aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Makefile.inc122
-rw-r--r--Makefile.libcompat6
-rw-r--r--ObsoleteFiles.inc5
-rw-r--r--RELNOTES24
-rw-r--r--UPDATING53
-rw-r--r--cddl/lib/libicp/Makefile2
-rw-r--r--cddl/lib/libicp_rescue/Makefile2
-rw-r--r--contrib/jemalloc/ChangeLog100
-rwxr-xr-xcontrib/jemalloc/FREEBSD-upgrade254
-rw-r--r--contrib/jemalloc/INSTALL.md849
-rw-r--r--contrib/jemalloc/Makefile.in1389
-rw-r--r--contrib/jemalloc/README20
-rw-r--r--contrib/jemalloc/TUNING.md262
-rw-r--r--contrib/jemalloc/VERSION2
-rwxr-xr-xcontrib/jemalloc/autogen.sh17
-rw-r--r--contrib/jemalloc/bin/jemalloc-config.in83
-rw-r--r--contrib/jemalloc/bin/jemalloc.sh.in9
-rw-r--r--contrib/jemalloc/bin/jeprof.in11352
-rwxr-xr-xcontrib/jemalloc/build-aux/config.guess3167
-rwxr-xr-xcontrib/jemalloc/build-aux/config.sub3684
-rwxr-xr-xcontrib/jemalloc/build-aux/install-sh250
-rw-r--r--contrib/jemalloc/config.stamp.in0
-rw-r--r--contrib/jemalloc/configure.ac5079
-rw-r--r--contrib/jemalloc/doc/html.xsl.in5
-rw-r--r--contrib/jemalloc/doc/jemalloc.xml.in7330
-rw-r--r--contrib/jemalloc/doc/manpages.xsl.in4
-rw-r--r--contrib/jemalloc/doc/stylesheet.xsl10
-rw-r--r--contrib/jemalloc/doc_internal/PROFILING_INTERNALS.md127
-rw-r--r--contrib/jemalloc/doc_internal/jemalloc.svg1
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/activity_callback.h23
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/arena_externs.h77
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/arena_inlines_a.h35
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/arena_inlines_b.h489
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/arena_stats.h227
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/arena_structs.h101
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/arena_structs_a.h11
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/arena_structs_b.h232
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/arena_types.h23
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/atomic.h33
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/atomic_msvc.h158
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/background_thread_externs.h7
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/background_thread_inlines.h14
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/background_thread_structs.h12
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/base.h110
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/base_externs.h22
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/base_inlines.h13
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/base_structs.h59
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/base_types.h33
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/bin.h85
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/bin_info.h50
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/bin_stats.h5
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/bin_types.h2
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/bit_util.h457
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/bitmap.h21
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/buf_writer.h32
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/cache_bin.h625
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/counter.h34
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/ctl.h31
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/decay.h186
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/ecache.h55
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/edata.h698
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/edata_cache.h49
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/ehooks.h412
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/emap.h357
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/emitter.h74
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/eset.h77
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/exp_grow.h50
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/extent.h137
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/extent_externs.h83
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/extent_inlines.h501
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/extent_structs.h256
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/extent_types.h23
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/fb.h373
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/fxp.h126
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/hash.h63
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/hpa.h182
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/hpa_hooks.h17
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/hpa_opts.h74
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/hpdata.h413
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/inspect.h40
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h18
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in797
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h28
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h16
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h82
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h52
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h124
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h13
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h28
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/jemalloc_preamble.h.in478
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/large_externs.h26
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/lockedint.h204
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/malloc_io.h11
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/mpsc_queue.h134
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/mutex.h63
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/mutex_pool.h94
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/mutex_prof.h13
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/nstime.h43
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/pa.h243
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/pac.h179
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/pages.h31
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/pai.h95
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/peak.h37
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/peak_event.h24
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/ph.h817
-rwxr-xr-xcontrib/jemalloc/include/jemalloc/internal/private_namespace.sh5
-rwxr-xr-xcontrib/jemalloc/include/jemalloc/internal/private_symbols.sh51
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/prng.h93
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/prof_data.h37
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/prof_externs.h116
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/prof_hook.h21
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/prof_inlines.h261
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/prof_inlines_a.h85
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/prof_inlines_b.h250
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/prof_log.h22
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/prof_recent.h23
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/prof_stats.h17
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/prof_structs.h47
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/prof_sys.h30
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/prof_types.h37
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/psset.h131
-rwxr-xr-xcontrib/jemalloc/include/jemalloc/internal/public_namespace.sh6
-rwxr-xr-xcontrib/jemalloc/include/jemalloc/internal/public_unnamespace.sh6
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/ql.h129
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/qr.h130
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/quantum.h12
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/rb.h920
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/rtree.h520
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/rtree_tsd.h24
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/safety_check.h7
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/san.h191
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/san_bump.h52
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/sc.h78
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/sec.h120
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/sec_opts.h59
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/slab_data.h12
-rwxr-xr-xcontrib/jemalloc/include/jemalloc/internal/smoothstep.sh101
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/stats.h29
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/sz.h97
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/tcache_externs.h60
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/tcache_inlines.h156
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/tcache_structs.h62
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/tcache_types.h40
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/test_hooks.h16
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/thread_event.h301
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/ticker.h92
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/tsd.h243
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/tsd_generic.h23
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h2
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/tsd_types.h2
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/tsd_win.h282
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/typed_list.h55
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/util.h56
-rw-r--r--contrib/jemalloc/include/jemalloc/internal/witness.h183
-rwxr-xr-xcontrib/jemalloc/include/jemalloc/jemalloc.sh27
-rw-r--r--contrib/jemalloc/include/jemalloc/jemalloc_defs.h.in106
-rw-r--r--contrib/jemalloc/include/jemalloc/jemalloc_macros.h.in282
-rwxr-xr-xcontrib/jemalloc/include/jemalloc/jemalloc_mangle.sh45
-rw-r--r--contrib/jemalloc/include/jemalloc/jemalloc_protos.h.in141
-rwxr-xr-xcontrib/jemalloc/include/jemalloc/jemalloc_rename.sh22
-rw-r--r--contrib/jemalloc/include/jemalloc/jemalloc_typedefs.h.in77
-rw-r--r--contrib/jemalloc/include/msvc_compat/C99/stdbool.h20
-rw-r--r--contrib/jemalloc/include/msvc_compat/C99/stdint.h247
-rw-r--r--contrib/jemalloc/include/msvc_compat/strings.h58
-rw-r--r--contrib/jemalloc/include/msvc_compat/windows_extra.h6
-rw-r--r--contrib/jemalloc/jemalloc.pc.in12
-rw-r--r--contrib/jemalloc/m4/ax_cxx_compile_stdcxx.m41517
-rwxr-xr-xcontrib/jemalloc/run_tests.sh1
-rwxr-xr-xcontrib/jemalloc/scripts/check-formatting.sh28
-rw-r--r--contrib/jemalloc/scripts/freebsd/before_install.sh3
-rw-r--r--contrib/jemalloc/scripts/freebsd/before_script.sh10
-rw-r--r--contrib/jemalloc/scripts/freebsd/script.sh3
-rwxr-xr-xcontrib/jemalloc/scripts/gen_run_tests.py260
-rwxr-xr-xcontrib/jemalloc/scripts/gen_travis.py480
-rw-r--r--contrib/jemalloc/scripts/linux/before_install.sh13
-rw-r--r--contrib/jemalloc/scripts/windows/before_install.sh83
-rw-r--r--contrib/jemalloc/scripts/windows/before_script.sh20
-rw-r--r--contrib/jemalloc/scripts/windows/script.sh10
-rw-r--r--contrib/jemalloc/src/arena.c2063
-rw-r--r--contrib/jemalloc/src/background_thread.c335
-rw-r--r--contrib/jemalloc/src/base.c193
-rw-r--r--contrib/jemalloc/src/bin.c30
-rw-r--r--contrib/jemalloc/src/bin_info.c30
-rw-r--r--contrib/jemalloc/src/bitmap.c1
-rw-r--r--contrib/jemalloc/src/buf_writer.c144
-rw-r--r--contrib/jemalloc/src/cache_bin.c99
-rw-r--r--contrib/jemalloc/src/ckh.c7
-rw-r--r--contrib/jemalloc/src/counter.c30
-rw-r--r--contrib/jemalloc/src/ctl.c1687
-rw-r--r--contrib/jemalloc/src/decay.c295
-rw-r--r--contrib/jemalloc/src/ecache.c35
-rw-r--r--contrib/jemalloc/src/edata.c6
-rw-r--r--contrib/jemalloc/src/edata_cache.c154
-rw-r--r--contrib/jemalloc/src/ehooks.c275
-rw-r--r--contrib/jemalloc/src/emap.c386
-rw-r--r--contrib/jemalloc/src/eset.c282
-rw-r--r--contrib/jemalloc/src/exp_grow.c8
-rw-r--r--contrib/jemalloc/src/extent.c2484
-rw-r--r--contrib/jemalloc/src/extent_dss.c42
-rw-r--r--contrib/jemalloc/src/extent_mmap.c1
-rw-r--r--contrib/jemalloc/src/fxp.c124
-rw-r--r--contrib/jemalloc/src/hash.c3
-rw-r--r--contrib/jemalloc/src/hook.c6
-rw-r--r--contrib/jemalloc/src/hpa.c1044
-rw-r--r--contrib/jemalloc/src/hpa_hooks.c63
-rw-r--r--contrib/jemalloc/src/hpdata.c325
-rw-r--r--contrib/jemalloc/src/inspect.c77
-rw-r--r--contrib/jemalloc/src/jemalloc.c2155
-rw-r--r--contrib/jemalloc/src/jemalloc_cpp.cpp399
-rw-r--r--contrib/jemalloc/src/large.c299
-rw-r--r--contrib/jemalloc/src/malloc_io.c46
-rw-r--r--contrib/jemalloc/src/mutex.c21
-rw-r--r--contrib/jemalloc/src/mutex_pool.c18
-rw-r--r--contrib/jemalloc/src/nstime.c127
-rw-r--r--contrib/jemalloc/src/pa.c277
-rw-r--r--contrib/jemalloc/src/pa_extra.c191
-rw-r--r--contrib/jemalloc/src/pac.c587
-rw-r--r--contrib/jemalloc/src/pages.c209
-rw-r--r--contrib/jemalloc/src/pai.c31
-rw-r--r--contrib/jemalloc/src/peak_event.c82
-rw-r--r--contrib/jemalloc/src/prng.c3
-rw-r--r--contrib/jemalloc/src/prof.c2923
-rw-r--r--contrib/jemalloc/src/prof_data.c1447
-rw-r--r--contrib/jemalloc/src/prof_log.c717
-rw-r--r--contrib/jemalloc/src/prof_recent.c600
-rw-r--r--contrib/jemalloc/src/prof_stats.c57
-rw-r--r--contrib/jemalloc/src/prof_sys.c669
-rw-r--r--contrib/jemalloc/src/psset.c385
-rw-r--r--contrib/jemalloc/src/rtree.c75
-rw-r--r--contrib/jemalloc/src/safety_check.c16
-rw-r--r--contrib/jemalloc/src/san.c208
-rw-r--r--contrib/jemalloc/src/san_bump.c104
-rw-r--r--contrib/jemalloc/src/sc.c17
-rw-r--r--contrib/jemalloc/src/sec.c422
-rw-r--r--contrib/jemalloc/src/stats.c794
-rw-r--r--contrib/jemalloc/src/sz.c52
-rw-r--r--contrib/jemalloc/src/tcache.c1137
-rw-r--r--contrib/jemalloc/src/thread_event.c343
-rw-r--r--contrib/jemalloc/src/ticker.c31
-rwxr-xr-xcontrib/jemalloc/src/ticker.py15
-rw-r--r--contrib/jemalloc/src/tsd.c75
-rw-r--r--contrib/jemalloc/src/witness.c44
-rw-r--r--contrib/jemalloc/src/zone.c469
-rw-r--r--contrib/kyua/doc/kyuafile.5.in10
-rw-r--r--contrib/kyua/drivers/report_junit_test.cpp3
-rw-r--r--contrib/kyua/engine/atf_list.cpp2
-rw-r--r--contrib/kyua/engine/requirements.cpp57
-rw-r--r--contrib/kyua/engine/requirements.hpp26
-rw-r--r--contrib/kyua/integration/cmd_report_junit_test.sh4
-rw-r--r--contrib/kyua/integration/cmd_report_test.sh1
-rw-r--r--contrib/kyua/model/metadata.cpp16
-rw-r--r--contrib/kyua/model/metadata.hpp4
-rw-r--r--contrib/kyua/model/metadata_test.cpp6
-rw-r--r--contrib/kyua/model/test_case_test.cpp2
-rw-r--r--contrib/kyua/model/test_program_test.cpp8
-rw-r--r--contrib/kyua/os/freebsd/main.cpp11
-rw-r--r--contrib/kyua/os/freebsd/reqs_checker_kmods.cpp50
-rw-r--r--contrib/kyua/os/freebsd/reqs_checker_kmods.hpp54
-rw-r--r--contrib/lua/Makefile2
-rw-r--r--contrib/lua/README2
-rw-r--r--contrib/lua/doc/contents.html14
-rw-r--r--contrib/lua/doc/lua.16
-rw-r--r--contrib/lua/doc/lua.css1
-rw-r--r--contrib/lua/doc/manual.html50
-rw-r--r--contrib/lua/doc/readme.html48
-rw-r--r--contrib/lua/src/lapi.c6
-rw-r--r--contrib/lua/src/lauxlib.c28
-rw-r--r--contrib/lua/src/lcode.c38
-rw-r--r--contrib/lua/src/lcode.h3
-rw-r--r--contrib/lua/src/ldebug.c231
-rw-r--r--contrib/lua/src/ldebug.h1
-rw-r--r--contrib/lua/src/ldo.c27
-rw-r--r--contrib/lua/src/ldo.h2
-rw-r--r--contrib/lua/src/lgc.c20
-rw-r--r--contrib/lua/src/liolib.c27
-rw-r--r--contrib/lua/src/lmathlib.c31
-rw-r--r--contrib/lua/src/loadlib.c9
-rw-r--r--contrib/lua/src/lobject.c2
-rw-r--r--contrib/lua/src/lobject.h18
-rw-r--r--contrib/lua/src/lopcodes.h8
-rw-r--r--contrib/lua/src/loslib.c2
-rw-r--r--contrib/lua/src/lparser.c19
-rw-r--r--contrib/lua/src/lstate.c11
-rw-r--r--contrib/lua/src/lstate.h3
-rw-r--r--contrib/lua/src/lstring.c13
-rw-r--r--contrib/lua/src/ltable.c39
-rw-r--r--contrib/lua/src/ltable.h2
-rw-r--r--contrib/lua/src/ltm.h5
-rw-r--r--contrib/lua/src/lua.c28
-rw-r--r--contrib/lua/src/lua.h8
-rw-r--r--contrib/lua/src/luaconf.h9
-rw-r--r--contrib/lua/src/lundump.c4
-rw-r--r--contrib/lua/src/lundump.h3
-rw-r--r--contrib/lua/src/lvm.c81
-rw-r--r--contrib/mandoc/Makefile2
-rw-r--r--contrib/mandoc/TODO11
-rw-r--r--contrib/mandoc/catman.8238
-rw-r--r--contrib/mandoc/catman.c231
-rw-r--r--contrib/mandoc/gmdiff4
-rw-r--r--contrib/mandoc/man.7151
-rw-r--r--contrib/mandoc/man.options.19
-rw-r--r--contrib/mandoc/man_html.c19
-rw-r--r--contrib/mandoc/man_term.c133
-rw-r--r--contrib/mandoc/man_validate.c18
-rw-r--r--contrib/mandoc/mandoc.131
-rw-r--r--contrib/mandoc/mandoc.css4
-rw-r--r--contrib/mandoc/mandocd.826
-rw-r--r--contrib/mandoc/mandocd.c34
-rw-r--r--contrib/mandoc/manpath.c11
-rw-r--r--contrib/mandoc/mdoc_html.c15
-rw-r--r--contrib/mandoc/mdoc_man.c5
-rw-r--r--contrib/mandoc/mdoc_markdown.c10
-rw-r--r--contrib/mandoc/mdoc_term.c122
-rw-r--r--contrib/mandoc/mdoc_validate.c10
-rw-r--r--contrib/mandoc/out.c63
-rw-r--r--contrib/mandoc/out.h24
-rw-r--r--contrib/mandoc/roff.7195
-rw-r--r--contrib/mandoc/roff_term.c31
-rw-r--r--contrib/mandoc/tbl.h16
-rw-r--r--contrib/mandoc/tbl_html.c34
-rw-r--r--contrib/mandoc/tbl_layout.c10
-rw-r--r--contrib/mandoc/tbl_term.c287
-rw-r--r--contrib/mandoc/term.c201
-rw-r--r--contrib/mandoc/term.h38
-rw-r--r--contrib/mandoc/term_ascii.c91
-rw-r--r--contrib/mandoc/term_ps.c18
-rw-r--r--contrib/mandoc/term_tab.c27
-rw-r--r--contrib/one-true-awk/FIXES8
-rw-r--r--contrib/one-true-awk/main.c2
-rw-r--r--contrib/one-true-awk/run.c4
-rw-r--r--contrib/tcpdump/print-pfsync.c12
-rw-r--r--contrib/tzcode/localtime.c341
-rw-r--r--crypto/krb5/src/lib/gssapi/krb5/util_crypt.c10
-rw-r--r--crypto/krb5/src/lib/gssapi/krb5/verify_mic.c11
-rw-r--r--crypto/openssl/BSDmakefile99
-rwxr-xr-xcrypto/openssl/apps/CA.pl2
-rw-r--r--crypto/openssl/apps/progs.c11
-rw-r--r--crypto/openssl/apps/progs.h4
-rwxr-xr-xcrypto/openssl/configdata.pm2295
-rw-r--r--crypto/openssl/exporters/libcrypto.pc13
-rw-r--r--crypto/openssl/exporters/libssl.pc11
-rw-r--r--crypto/openssl/exporters/openssl.pc9
-rw-r--r--crypto/openssl/freebsd/dump_version_from_configdata.pl21
-rw-r--r--crypto/openssl/freebsd/include/crypto/bn_conf.h27
-rw-r--r--crypto/openssl/freebsd/include/openssl/configuration.h38
-rw-r--r--crypto/openssl/include/crypto/bn_conf.h27
-rw-r--r--crypto/openssl/include/openssl/configuration.h53
-rw-r--r--crypto/openssl/include/openssl/fipskey.h2
-rw-r--r--crypto/openssl/include/openssl/opensslv.h2
-rw-r--r--crypto/openssl/libcrypto.pc13
-rw-r--r--crypto/openssl/libssl.pc11
-rw-r--r--crypto/openssl/openssl.pc9
-rwxr-xr-xcrypto/openssl/tools/c_rehash6
-rwxr-xr-xcrypto/openssl/util/shlib_wrap.sh4
-rwxr-xr-xcrypto/openssl/util/wrap.pl8
-rw-r--r--etc/mtree/BSD.root.dist6
-rw-r--r--etc/mtree/BSD.usr.dist2
-rw-r--r--include/Makefile2
-rw-r--r--include/rpc/auth_des.h79
-rw-r--r--kerberos5/Makefile.inc2
-rw-r--r--kerberos5/lib/Makefile2
-rw-r--r--kerberos5/libexec/Makefile2
-rw-r--r--krb5/Makefile2
-rw-r--r--krb5/Makefile.et2
-rw-r--r--krb5/Makefile.inc5
-rw-r--r--krb5/Makefile.pc (renamed from krb5/libdata/Makefile)30
-rw-r--r--krb5/include/Makefile4
-rw-r--r--krb5/include/Makefile.inc4
-rw-r--r--krb5/include/autoconf.h4
-rw-r--r--krb5/include/gssapi/Makefile2
-rw-r--r--krb5/include/gssrpc/Makefile2
-rw-r--r--krb5/include/krb5/Makefile2
-rw-r--r--krb5/lib/Makefile2
-rw-r--r--krb5/lib/Makefile.inc4
-rw-r--r--krb5/lib/apputils/Makefile2
-rw-r--r--krb5/lib/crypto/Makefile2
-rw-r--r--krb5/lib/crypto/builtin/Makefile.inc2
-rw-r--r--krb5/lib/crypto/builtin/aes/Makefile.inc2
-rw-r--r--krb5/lib/crypto/builtin/camellia/Makefile.inc2
-rw-r--r--krb5/lib/crypto/builtin/des/Makefile.inc2
-rw-r--r--krb5/lib/crypto/builtin/enc_provider/Makefile.inc2
-rw-r--r--krb5/lib/crypto/builtin/hash_provider/Makefile.inc2
-rw-r--r--krb5/lib/crypto/builtin/md4/Makefile.inc2
-rw-r--r--krb5/lib/crypto/builtin/md5/Makefile.inc2
-rw-r--r--krb5/lib/crypto/builtin/sha1/Makefile.inc2
-rw-r--r--krb5/lib/crypto/builtin/sha2/Makefile.inc2
-rw-r--r--krb5/lib/crypto/krb/Makefile.inc2
-rw-r--r--krb5/lib/crypto/openssl/Makefile.inc2
-rw-r--r--krb5/lib/crypto/openssl/des/Makefile.inc2
-rw-r--r--krb5/lib/crypto/openssl/enc_provider/Makefile.inc2
-rw-r--r--krb5/lib/crypto/openssl/hash_provider/Makefile.inc2
-rw-r--r--krb5/lib/gssapi/Makefile15
-rw-r--r--krb5/lib/gssapi/generic/Makefile.et2
-rw-r--r--krb5/lib/gssapi/generic/Makefile.inc2
-rw-r--r--krb5/lib/gssapi/krb5/Makefile.et2
-rw-r--r--krb5/lib/gssapi/krb5/Makefile.inc2
-rw-r--r--krb5/lib/gssapi/mechglue/Makefile.inc2
-rw-r--r--krb5/lib/gssapi/spnego/Makefile.inc2
-rw-r--r--krb5/lib/kadm5clnt/Makefile15
-rw-r--r--krb5/lib/kadm5clnt/clnt/Makefile.inc2
-rw-r--r--krb5/lib/kadm5srv/Makefile15
-rw-r--r--krb5/lib/kadm5srv/srv/Makefile.inc2
-rw-r--r--krb5/lib/kadmin_common/Makefile2
-rw-r--r--krb5/lib/kdb/Makefile15
-rw-r--r--krb5/lib/kprop_util/Makefile2
-rw-r--r--krb5/lib/krad/Makefile2
-rw-r--r--krb5/lib/krb5/Makefile16
-rw-r--r--krb5/lib/krb5/asn.1/Makefile.inc2
-rw-r--r--krb5/lib/krb5/ccache/Makefile.inc2
-rw-r--r--krb5/lib/krb5/docs/Makefile.inc2
-rw-r--r--krb5/lib/krb5/error_tables/Makefile.inc2
-rw-r--r--krb5/lib/krb5/keytab/Makefile.inc2
-rw-r--r--krb5/lib/krb5/krb/Makefile.inc2
-rw-r--r--krb5/lib/krb5/os/Makefile.inc2
-rw-r--r--krb5/lib/krb5/rcache/Makefile.inc2
-rw-r--r--krb5/lib/krb5/unicode/Makefile.inc2
-rw-r--r--krb5/lib/rpc/Makefile15
-rw-r--r--krb5/libexec/Makefile4
-rw-r--r--krb5/libexec/Makefile.inc4
-rw-r--r--krb5/libexec/kadmind/Makefile2
-rw-r--r--krb5/libexec/kdc/Makefile2
-rw-r--r--krb5/libexec/kprop/Makefile2
-rw-r--r--krb5/libexec/kpropd/Makefile2
-rw-r--r--krb5/libexec/kproplog/Makefile2
-rw-r--r--krb5/plugins/Makefile2
-rw-r--r--krb5/plugins/Makefile.inc2
-rw-r--r--krb5/plugins/audit/Makefile2
-rw-r--r--krb5/plugins/k5tls/Makefile2
-rw-r--r--krb5/plugins/kdb/Makefile2
-rw-r--r--krb5/plugins/kdb/Makefile.inc2
-rw-r--r--krb5/plugins/kdb/db2/Makefile2
-rw-r--r--krb5/plugins/kdb/db2/libdb2/Makefile.inc2
-rw-r--r--krb5/plugins/kdb/db2/libdb2/btree/Makefile.inc2
-rw-r--r--krb5/plugins/kdb/db2/libdb2/db/Makefile.inc2
-rw-r--r--krb5/plugins/kdb/db2/libdb2/hash/Makefile.inc2
-rw-r--r--krb5/plugins/kdb/db2/libdb2/include/Makefile.inc2
-rw-r--r--krb5/plugins/kdb/db2/libdb2/mpool/Makefile.inc2
-rw-r--r--krb5/plugins/kdb/db2/libdb2/recno/Makefile.inc2
-rw-r--r--krb5/plugins/preauth/Makefile2
-rw-r--r--krb5/plugins/preauth/Makefile.inc2
-rw-r--r--krb5/plugins/preauth/otp/Makefile2
-rw-r--r--krb5/plugins/preauth/pkinit/Makefile2
-rw-r--r--krb5/plugins/preauth/spake/Makefile2
-rw-r--r--krb5/plugins/preauth/test/Makefile2
-rw-r--r--krb5/usr.bin/Makefile2
-rw-r--r--krb5/usr.bin/Makefile.inc3
-rw-r--r--krb5/usr.bin/gss-client/Makefile2
-rw-r--r--krb5/usr.bin/kadmin/Makefile2
-rw-r--r--krb5/usr.bin/kdestroy/Makefile2
-rw-r--r--krb5/usr.bin/kinit/Makefile2
-rw-r--r--krb5/usr.bin/klist/Makefile2
-rw-r--r--krb5/usr.bin/kpasswd/Makefile2
-rw-r--r--krb5/usr.bin/ksu/Makefile2
-rw-r--r--krb5/usr.bin/kswitch/Makefile2
-rw-r--r--krb5/usr.bin/ktutil/Makefile2
-rw-r--r--krb5/usr.bin/kvno/Makefile2
-rw-r--r--krb5/usr.bin/sclient/Makefile2
-rw-r--r--krb5/usr.bin/sim_client/Makefile2
-rw-r--r--krb5/usr.sbin/Makefile4
-rw-r--r--krb5/usr.sbin/Makefile.inc2
-rw-r--r--krb5/usr.sbin/gss-server/Makefile4
-rw-r--r--krb5/usr.sbin/kadmin.local/Makefile2
-rw-r--r--krb5/usr.sbin/kdb5_util/Makefile2
-rw-r--r--krb5/usr.sbin/sim_server/Makefile4
-rw-r--r--krb5/usr.sbin/sserver/Makefile4
-rw-r--r--krb5/util/Makefile2
-rw-r--r--krb5/util/Makefile.inc4
-rw-r--r--krb5/util/build-tools/Makefile5
-rwxr-xr-xkrb5/util/build-tools/krb5-config.sh2
-rw-r--r--krb5/util/compile_et/Makefile5
-rw-r--r--krb5/util/et/Makefile14
-rw-r--r--krb5/util/profile/Makefile4
-rw-r--r--krb5/util/ss/Makefile4
-rw-r--r--krb5/util/support/Makefile4
-rw-r--r--krb5/util/verto/Makefile4
-rw-r--r--lib/Makefile6
-rw-r--r--lib/libc/i386/gen/makecontext.c2
-rw-r--r--lib/libc/net/gai_strerror.c44
-rw-r--r--lib/libc/rpc/Symbol.map19
-rw-r--r--lib/libc/rpc/auth_des.c455
-rw-r--r--lib/libc/rpc/authdes_prot.c44
-rw-r--r--lib/libc/rpc/key_call.c424
-rw-r--r--lib/libc/rpc/publickey.540
-rw-r--r--lib/libc/rpc/rpc_secure.3177
-rw-r--r--lib/libc/rpc/rpc_soc.313
-rw-r--r--lib/libc/rpc/rpc_soc.c31
-rw-r--r--lib/libc/rpc/svc_auth.c8
-rw-r--r--lib/libc/rpc/svc_auth_des.c460
-rw-r--r--lib/libc/stdlib/malloc/jemalloc/Makefile.inc14
-rw-r--r--lib/libc/stdlib/malloc/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h91
-rw-r--r--lib/libc/stdlib/malloc/jemalloc/include/jemalloc/internal/jemalloc_preamble.h54
-rw-r--r--lib/libc/stdlib/malloc/jemalloc/include/jemalloc/internal/private_namespace.h481
-rw-r--r--lib/libc/stdlib/malloc/jemalloc/include/jemalloc/internal/public_namespace.h4
-rw-r--r--lib/libc/stdlib/malloc/jemalloc/include/jemalloc/jemalloc.h69
-rw-r--r--lib/libc/stdlib/malloc/jemalloc/include/jemalloc/jemalloc_FreeBSD.h10
-rw-r--r--lib/libc/stdlib/malloc/jemalloc/include/jemalloc/jemalloc_defs.h55
-rw-r--r--lib/libc/tests/stdtime/detect_tz_changes_test.c61
-rw-r--r--lib/libcrypt/Makefile2
-rw-r--r--lib/libelftc/Makefile1
-rw-r--r--lib/libifconfig/Makefile1
-rw-r--r--lib/libifconfig/Symbol.map2
-rw-r--r--lib/libifconfig/libifconfig.h1
-rw-r--r--lib/libiscsiutil/Makefile1
-rw-r--r--lib/libmilter/Makefile2
-rw-r--r--lib/libnetbsd/Makefile1
-rw-r--r--lib/libnvmf/Makefile1
-rw-r--r--lib/libopenbsd/Makefile1
-rw-r--r--lib/libpfctl/Makefile1
-rw-r--r--lib/libpfctl/libpfctl.c22
-rw-r--r--lib/libpmcstat/Makefile1
-rw-r--r--lib/libprocstat/libprocstat.c3
-rw-r--r--lib/librpcsvc/Makefile2
-rw-r--r--lib/librpcsvc/yp_update.c199
-rw-r--r--lib/libsm/Makefile1
-rw-r--r--lib/libsmdb/Makefile1
-rw-r--r--lib/libsmutil/Makefile1
-rw-r--r--lib/libsys/copy_file_range.235
-rw-r--r--lib/libsys/stat.25
-rw-r--r--lib/libtelnet/Makefile1
-rw-r--r--lib/libutil/mntopts.c1
-rw-r--r--libexec/nuageinit/nuage.lua20
-rwxr-xr-xlibexec/nuageinit/nuageinit238
-rw-r--r--libexec/nuageinit/nuageinit.7104
-rw-r--r--libexec/nuageinit/tests/Makefile1
-rw-r--r--libexec/nuageinit/tests/nuage.sh9
-rw-r--r--libexec/nuageinit/tests/nuageinit.sh4
-rw-r--r--libexec/nuageinit/tests/settimezone.lua5
-rw-r--r--libexec/rc/rc.conf1
-rw-r--r--libexec/rc/rc.d/Makefile3
-rwxr-xr-xlibexec/rc/rc.d/ypupdated35
-rw-r--r--libexec/rtld-elf/rtld.c19
-rwxr-xr-xrelease/packages/generate-ucl.lua7
-rwxr-xr-xrelease/packages/generate-ucl.sh3
-rw-r--r--release/packages/ucl/dma-all.ucl12
-rw-r--r--release/packages/ucl/kerberos-lib-all.ucl4
-rw-r--r--release/packages/ucl/libmilter-all.ucl7
-rw-r--r--release/packages/ucl/openssl-lib-all.ucl4
-rw-r--r--release/packages/ucl/sendmail-all.ucl15
-rw-r--r--sbin/pfctl/pfctl.824
-rw-r--r--sbin/pfctl/pfctl_radix.c3
-rw-r--r--secure/lib/libcrypto/Makefile6
-rw-r--r--secure/lib/libcrypto/Makefile.common2
-rw-r--r--secure/lib/libcrypto/Makefile.inc6
-rw-r--r--secure/lib/libcrypto/Makefile.version2
-rw-r--r--secure/lib/libcrypto/engines/Makefile.inc3
-rw-r--r--secure/lib/libcrypto/modules/Makefile.inc3
-rw-r--r--secure/lib/libssl/Makefile3
-rw-r--r--secure/libexec/sshd-session/Makefile2
-rw-r--r--secure/ssh.mk3
-rw-r--r--secure/usr.bin/ssh/Makefile2
-rw-r--r--secure/usr.sbin/sshd/Makefile2
-rw-r--r--share/man/man4/Makefile3
-rw-r--r--share/man/man4/ata.44
-rw-r--r--share/man/man4/iflib.414
-rw-r--r--share/man/man4/ioat.42
-rw-r--r--share/man/man4/iwlwifi.44
-rw-r--r--share/man/man4/iwx.42
-rw-r--r--share/man/man4/man4.aarch64/armv8crypto.42
-rw-r--r--share/man/man4/man4.aarch64/enetc.42
-rw-r--r--share/man/man4/man4.aarch64/felix.42
-rw-r--r--share/man/man4/man4.aarch64/rk_gpio.42
-rw-r--r--share/man/man4/man4.aarch64/rk_grf.42
-rw-r--r--share/man/man4/man4.aarch64/rk_grf_gpio.42
-rw-r--r--share/man/man4/man4.aarch64/rk_i2c.42
-rw-r--r--share/man/man4/man4.aarch64/rk_pinctrl.42
-rw-r--r--share/man/man4/man4.arm/am335x_dmtpps.42
-rw-r--r--share/man/man4/man4.arm/aw_gpio.42
-rw-r--r--share/man/man4/man4.arm/aw_mmc.42
-rw-r--r--share/man/man4/man4.arm/aw_rtc.42
-rw-r--r--share/man/man4/man4.arm/aw_sid.42
-rw-r--r--share/man/man4/man4.arm/aw_spi.42
-rw-r--r--share/man/man4/man4.arm/aw_syscon.42
-rw-r--r--share/man/man4/man4.arm/bcm283x_pwm.42
-rw-r--r--share/man/man4/man4.arm/devcfg.42
-rw-r--r--share/man/man4/man4.arm/imx6_ahci.42
-rw-r--r--share/man/man4/man4.arm/imx6_snvs.42
-rw-r--r--share/man/man4/man4.arm/imx_spi.42
-rw-r--r--share/man/man4/man4.arm/imx_wdog.42
-rw-r--r--share/man/man4/man4.arm/mge.42
-rw-r--r--share/man/man4/man4.arm/ti_adc.42
-rw-r--r--share/man/man4/man4.powerpc/abtn.42
-rw-r--r--share/man/man4/man4.powerpc/adb.42
-rw-r--r--share/man/man4/man4.powerpc/akbd.42
-rw-r--r--share/man/man4/man4.powerpc/ams.42
-rw-r--r--share/man/man4/man4.powerpc/cuda.42
-rw-r--r--share/man/man4/man4.powerpc/dtsec.42
-rw-r--r--share/man/man4/man4.powerpc/llan.42
-rw-r--r--share/man/man4/man4.powerpc/pmu.42
-rw-r--r--share/man/man4/man4.powerpc/smu.42
-rw-r--r--share/man/man4/man4.powerpc/snd_ai2s.42
-rw-r--r--share/man/man4/man4.powerpc/snd_davbus.42
-rw-r--r--share/man/man4/man4.powerpc/tsec.42
-rw-r--r--share/man/man4/nvdimm.42
-rw-r--r--share/man/man4/qlnxe.42
-rw-r--r--share/man/man4/qlxgb.42
-rw-r--r--share/man/man4/qlxgbe.42
-rw-r--r--share/man/man4/qlxge.42
-rw-r--r--share/man/man4/sfxge.42
-rw-r--r--share/man/man4/smartpqi.42
-rw-r--r--share/man/man4/sume.42
-rw-r--r--share/man/man4/tap.415
-rw-r--r--share/man/man4/tun.415
-rw-r--r--share/man/man4/u2f.496
-rw-r--r--share/man/man4/umb.428
-rw-r--r--share/man/man4/vtnet.4142
-rw-r--r--share/man/man5/rc.conf.59
-rw-r--r--share/man/man5/src.conf.529
-rw-r--r--share/man/man7/hier.717
-rw-r--r--share/mk/Makefile1
-rw-r--r--share/mk/bsd.README1
-rw-r--r--share/mk/bsd.compat.mk1
-rw-r--r--share/mk/bsd.debug.mk68
-rw-r--r--share/mk/bsd.endian.mk13
-rw-r--r--share/mk/bsd.lib.mk101
-rw-r--r--share/mk/bsd.opts.mk1
-rw-r--r--share/mk/bsd.own.mk7
-rw-r--r--share/mk/bsd.prog.mk55
-rw-r--r--share/mk/local.dirdeps.mk2
-rw-r--r--share/mk/src.opts.mk4
-rw-r--r--share/mk/src.sys.mk2
-rw-r--r--stand/common/dev_net.c41
-rw-r--r--stand/common/gfx_fb.c63
-rw-r--r--stand/common/gfx_fb.h1
-rw-r--r--stand/common/misc.c13
-rw-r--r--stand/common/modinfo.c2
-rw-r--r--stand/defs.mk2
-rw-r--r--stand/efi/include/amd64/pe.h2
-rw-r--r--stand/efi/include/i386/pe.h2
-rw-r--r--stand/efi/libefi/efi_console.c23
-rw-r--r--stand/i386/libi386/vidconsole.c21
-rw-r--r--stand/kboot/kboot/main.c49
-rw-r--r--stand/libsa/bzipfs.c1
-rw-r--r--stand/libsa/cd9660.c1
-rw-r--r--stand/libsa/dosfs.c1
-rw-r--r--stand/libsa/environment.c17
-rw-r--r--stand/libsa/ext2fs.c1
-rw-r--r--stand/libsa/globals.c1
-rw-r--r--stand/libsa/gzipfs.c1
-rw-r--r--stand/libsa/libsa.34
-rw-r--r--stand/libsa/mount.c5
-rw-r--r--stand/libsa/net.h1
-rw-r--r--stand/libsa/nfs.c1
-rw-r--r--stand/libsa/open.c41
-rw-r--r--stand/libsa/pkgfs.c1
-rw-r--r--stand/libsa/splitfs.c1
-rw-r--r--stand/libsa/stand.h7
-rw-r--r--stand/libsa/tftp.c154
-rw-r--r--stand/libsa/ufs.c7
-rw-r--r--stand/libsa/ufsread.c7
-rw-r--r--stand/libsa/zfs/zfsimpl.c227
-rw-r--r--stand/loader.mk2
-rw-r--r--sys/amd64/conf/GENERIC4
-rw-r--r--sys/arm/arm/pmu_fdt.c4
-rw-r--r--sys/arm/conf/GENERIC1
-rw-r--r--sys/arm64/arm64/elf32_machdep.c2
-rw-r--r--sys/arm64/conf/std.dev1
-rw-r--r--sys/cddl/boot/zfs/zfsimpl.h4
-rw-r--r--sys/compat/linux/linux_netlink.c76
-rw-r--r--sys/compat/linuxkpi/common/include/linux/ieee80211.h16
-rw-r--r--sys/compat/linuxkpi/common/include/net/cfg80211.h15
-rw-r--r--sys/compat/linuxkpi/common/include/net/mac80211.h6
-rw-r--r--sys/compat/linuxkpi/common/src/linux_80211.c4
-rw-r--r--sys/conf/NOTES4
-rw-r--r--sys/conf/files4
-rw-r--r--sys/conf/files.amd647
-rw-r--r--sys/conf/files.arm642
-rw-r--r--sys/conf/kern.post.mk19
-rw-r--r--sys/conf/kern.pre.mk3
-rw-r--r--sys/conf/kmod.mk23
-rw-r--r--sys/conf/newvers.sh8
-rw-r--r--sys/conf/options6
-rw-r--r--sys/contrib/dev/acpica/changes.txt28
-rw-r--r--sys/contrib/dev/acpica/common/adisasm.c12
-rw-r--r--sys/contrib/dev/acpica/common/ahtable.c1
-rw-r--r--sys/contrib/dev/acpica/common/dmtable.c1
-rw-r--r--sys/contrib/dev/acpica/common/dmtbdump2.c2
-rw-r--r--sys/contrib/dev/acpica/common/dmtbinfo2.c2
-rw-r--r--sys/contrib/dev/acpica/common/dmtbinfo3.c2
-rw-r--r--sys/contrib/dev/acpica/compiler/aslanalyze.c16
-rw-r--r--sys/contrib/dev/acpica/compiler/aslrestype2s.c2
-rw-r--r--sys/contrib/dev/acpica/compiler/dttable2.c77
-rw-r--r--sys/contrib/dev/acpica/compiler/dttemplate.c24
-rw-r--r--sys/contrib/dev/acpica/compiler/dttemplate.h24
-rw-r--r--sys/contrib/dev/acpica/compiler/dtutils.c1
-rw-r--r--sys/contrib/dev/acpica/components/disassembler/dmresrcl2.c2
-rw-r--r--sys/contrib/dev/acpica/components/dispatcher/dsmethod.c29
-rw-r--r--sys/contrib/dev/acpica/components/dispatcher/dsmthdat.c1
-rw-r--r--sys/contrib/dev/acpica/components/events/evglock.c5
-rw-r--r--sys/contrib/dev/acpica/components/executer/extrace.c66
-rw-r--r--sys/contrib/dev/acpica/components/parser/psopinfo.c8
-rw-r--r--sys/contrib/dev/acpica/components/tables/tbprint.c8
-rw-r--r--sys/contrib/dev/acpica/components/utilities/utnonansi.c2
-rw-r--r--sys/contrib/dev/acpica/include/acdebug.h2
-rw-r--r--sys/contrib/dev/acpica/include/acexcep.h9
-rw-r--r--sys/contrib/dev/acpica/include/acinterp.h4
-rw-r--r--sys/contrib/dev/acpica/include/acpixf.h8
-rw-r--r--sys/contrib/dev/acpica/include/actbl.h2
-rw-r--r--sys/contrib/dev/acpica/include/actbl1.h2
-rw-r--r--sys/contrib/dev/acpica/include/actbl2.h25
-rwxr-xr-xsys/contrib/openzfs/.github/workflows/scripts/qemu-2-start.sh2
-rwxr-xr-xsys/contrib/openzfs/.github/workflows/scripts/qemu-4-build-vm.sh23
-rw-r--r--sys/contrib/openzfs/.github/workflows/zfs-qemu-packages.yml13
-rw-r--r--sys/contrib/openzfs/.mailmap4
-rw-r--r--sys/contrib/openzfs/AUTHORS13
-rw-r--r--sys/contrib/openzfs/META2
-rw-r--r--sys/contrib/openzfs/cmd/zdb/zdb.c61
-rw-r--r--sys/contrib/openzfs/config/kernel-mkdir.m42
-rw-r--r--sys/contrib/openzfs/config/toolchain-simd.m444
-rw-r--r--sys/contrib/openzfs/contrib/debian/control8
-rw-r--r--sys/contrib/openzfs/contrib/icp/gcm-simd/boringssl/LICENSE253
-rw-r--r--sys/contrib/openzfs/contrib/icp/gcm-simd/boringssl/README11
-rw-r--r--sys/contrib/openzfs/contrib/icp/gcm-simd/boringssl/aes-gcm-avx2-x86_64-linux.S1328
-rw-r--r--sys/contrib/openzfs/contrib/initramfs/scripts/zfs3
-rw-r--r--sys/contrib/openzfs/include/os/linux/kernel/linux/simd_x86.h26
-rw-r--r--sys/contrib/openzfs/include/os/linux/zfs/sys/trace_zil.h6
-rw-r--r--sys/contrib/openzfs/include/sys/spa.h2
-rw-r--r--sys/contrib/openzfs/include/sys/zil_impl.h18
-rw-r--r--sys/contrib/openzfs/include/sys/zio.h29
-rw-r--r--sys/contrib/openzfs/include/sys/zvol_impl.h5
-rw-r--r--sys/contrib/openzfs/lib/libicp/Makefile.am1
-rw-r--r--sys/contrib/openzfs/lib/libspl/include/sys/simd.h28
-rw-r--r--sys/contrib/openzfs/lib/libzpool/kernel.c75
-rw-r--r--sys/contrib/openzfs/lib/libzutil/zutil_import.c39
-rw-r--r--sys/contrib/openzfs/man/man4/zfs.44
-rw-r--r--sys/contrib/openzfs/man/man8/zdb.814
-rw-r--r--sys/contrib/openzfs/man/man8/zfs-send.824
-rw-r--r--sys/contrib/openzfs/module/Kbuild.in1
-rw-r--r--sys/contrib/openzfs/module/icp/algs/modes/gcm.c371
-rw-r--r--sys/contrib/openzfs/module/icp/algs/modes/modes.c2
-rw-r--r--sys/contrib/openzfs/module/icp/asm-x86_64/modes/THIRDPARTYLICENSE.boringssl253
-rw-r--r--sys/contrib/openzfs/module/icp/asm-x86_64/modes/THIRDPARTYLICENSE.boringssl.descrip1
-rw-r--r--sys/contrib/openzfs/module/icp/asm-x86_64/modes/aesni-gcm-avx2-vaes.S1323
-rw-r--r--sys/contrib/openzfs/module/icp/include/modes/modes.h13
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/kmod_core.c7
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c44
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c8
-rw-r--r--sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c210
-rw-r--r--sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c153
-rw-r--r--sys/contrib/openzfs/module/zcommon/simd_stat.c4
-rw-r--r--sys/contrib/openzfs/module/zfs/dbuf.c7
-rw-r--r--sys/contrib/openzfs/module/zfs/multilist.c4
-rw-r--r--sys/contrib/openzfs/module/zfs/spa_config.c115
-rw-r--r--sys/contrib/openzfs/module/zfs/spa_misc.c8
-rw-r--r--sys/contrib/openzfs/module/zfs/zil.c349
-rw-r--r--sys/contrib/openzfs/module/zfs/zio.c51
-rw-r--r--sys/contrib/openzfs/module/zfs/zvol.c281
-rw-r--r--sys/contrib/openzfs/module/zstd/zfs_zstd.c8
-rwxr-xr-xsys/contrib/openzfs/scripts/spdxcheck.pl1
-rw-r--r--sys/contrib/openzfs/tests/runfiles/common.run2
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/cmd/crypto_test.c2
-rw-r--r--sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am1
-rwxr-xr-xsys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_stress/zvol_stress_destroy.ksh66
-rw-r--r--sys/crypto/ccp/ccp.c2
-rw-r--r--sys/dev/acpica/acpi_powerres.c274
-rw-r--r--sys/dev/acpica/acpivar.h1
-rw-r--r--sys/dev/bnxt/bnxt_re/qplib_res.c4
-rw-r--r--sys/dev/e1000/e1000_phy.c5
-rw-r--r--sys/dev/e1000/if_em.c44
-rw-r--r--sys/dev/fdt/fdt_common.c2
-rw-r--r--sys/dev/fdt/fdt_common.h7
-rw-r--r--sys/dev/gpio/acpi_gpiobus.c165
-rw-r--r--sys/dev/gpio/acpi_gpiobusvar.h6
-rw-r--r--sys/dev/gpio/gpioaei.c204
-rw-r--r--sys/dev/gpio/gpiobus.c19
-rw-r--r--sys/dev/gpio/gpiobus_internal.h2
-rw-r--r--sys/dev/gpio/ofw_gpiobus.c14
-rw-r--r--sys/dev/hid/hidbus.c41
-rw-r--r--sys/dev/hid/hidquirk.h1
-rw-r--r--sys/dev/hid/hidraw.c12
-rw-r--r--sys/dev/hid/hkbd.c19
-rw-r--r--sys/dev/hid/ietp.c31
-rw-r--r--sys/dev/hid/u2f.c590
-rw-r--r--sys/dev/ice/ice_bitops.h4
-rw-r--r--sys/dev/ice/ice_lan_tx_rx.h2
-rw-r--r--sys/dev/ice/ice_lib.h2
-rw-r--r--sys/dev/ice/ice_protocol_type.h2
-rw-r--r--sys/dev/iicbus/iichid.c3
-rw-r--r--sys/dev/isci/scil/intel_sata.h2
-rw-r--r--sys/dev/ixgbe/if_ix.c231
-rw-r--r--sys/dev/ixgbe/if_ixv.c6
-rw-r--r--sys/dev/ixgbe/ixgbe.h11
-rw-r--r--sys/dev/ixgbe/ixgbe_api.c16
-rw-r--r--sys/dev/ixgbe/ixgbe_api.h1
-rw-r--r--sys/dev/ixgbe/ixgbe_common.c25
-rw-r--r--sys/dev/ixgbe/ixgbe_e610.c5567
-rw-r--r--sys/dev/ixgbe/ixgbe_e610.h224
-rw-r--r--sys/dev/ixgbe/ixgbe_osdep.c26
-rw-r--r--sys/dev/ixgbe/ixgbe_osdep.h31
-rw-r--r--sys/dev/ixgbe/ixgbe_type.h69
-rw-r--r--sys/dev/ixgbe/ixgbe_type_e610.h2278
-rw-r--r--sys/dev/ixgbe/ixgbe_vf.c3
-rw-r--r--sys/dev/ixl/if_ixl.c25
-rw-r--r--sys/dev/mlx5/mlx5_accel/mlx5_ipsec_offload.c2
-rw-r--r--sys/dev/netmap/if_ptnet.c6
-rw-r--r--sys/dev/psci/smccc_trng.c2
-rw-r--r--sys/dev/qcom_rnd/qcom_rnd.c2
-rw-r--r--sys/dev/random/armv8rng.c2
-rw-r--r--sys/dev/random/darn.c2
-rw-r--r--sys/dev/random/ivy.c2
-rw-r--r--sys/dev/random/nehemiah.c2
-rw-r--r--sys/dev/random/random_harvestq.c12
-rw-r--r--sys/dev/random/randomdev.h4
-rw-r--r--sys/dev/ufshci/ufshci.h104
-rw-r--r--sys/dev/ufshci/ufshci_ctrlr.c33
-rw-r--r--sys/dev/ufshci/ufshci_ctrlr_cmd.c26
-rw-r--r--sys/dev/ufshci/ufshci_private.h36
-rw-r--r--sys/dev/ufshci/ufshci_req_queue.c123
-rw-r--r--sys/dev/ufshci/ufshci_req_sdb.c133
-rw-r--r--sys/dev/usb/input/uhid.c6
-rw-r--r--sys/dev/usb/input/usbhid.c8
-rw-r--r--sys/dev/usb/usb_device.c48
-rw-r--r--sys/dev/usb/usb_generic.c37
-rw-r--r--sys/dev/usb/usbdi.h3
-rw-r--r--sys/dev/virtio/mmio/virtio_mmio.c48
-rw-r--r--sys/dev/virtio/mmio/virtio_mmio.h1
-rw-r--r--sys/dev/virtio/mmio/virtio_mmio_fdt.c47
-rw-r--r--sys/dev/virtio/mmio/virtio_mmio_if.m99
-rw-r--r--sys/dev/virtio/network/if_vtnet.c6
-rw-r--r--sys/dev/virtio/random/virtio_random.c2
-rw-r--r--sys/dev/virtio/virtio_bus_if.m4
-rw-r--r--sys/dev/virtio/virtqueue.c2
-rw-r--r--sys/fs/cd9660/cd9660_vnops.c14
-rw-r--r--sys/fs/ext2fs/ext2_vnops.c16
-rw-r--r--sys/fs/fuse/fuse_vnops.c2
-rw-r--r--sys/fs/nfs/nfsport.h14
-rw-r--r--sys/fs/nfsclient/nfs_clport.c6
-rw-r--r--sys/fs/nfsclient/nfs_clvnops.c4
-rw-r--r--sys/fs/nfsserver/nfs_nfsdport.c1
-rw-r--r--sys/fs/nfsserver/nfs_nfsdstate.c6
-rw-r--r--sys/fs/p9fs/p9fs_vnops.c2
-rw-r--r--sys/fs/tarfs/tarfs_vnops.c5
-rw-r--r--sys/fs/tmpfs/tmpfs_subr.c2
-rw-r--r--sys/fs/tmpfs/tmpfs_vnops.c17
-rw-r--r--sys/geom/part/g_part.c6
-rw-r--r--sys/i386/conf/GENERIC1
-rw-r--r--sys/isa/isareg.h2
-rw-r--r--sys/kern/imgact_elf.c54
-rw-r--r--sys/kern/kern_prot.c5
-rw-r--r--sys/kern/uipc_usrreq.c11
-rw-r--r--sys/kern/vfs_bio.c2
-rw-r--r--sys/kern/vfs_syscalls.c2
-rw-r--r--sys/kern/vfs_vnops.c2
-rw-r--r--sys/libkern/qsort.c14
-rw-r--r--sys/modules/e6000sw/Makefile2
-rw-r--r--sys/modules/etherswitch/Makefile2
-rw-r--r--sys/modules/evdev/Makefile2
-rw-r--r--sys/modules/gpio/gpioaei/Makefile2
-rw-r--r--sys/modules/gve/Makefile2
-rw-r--r--sys/modules/hid/Makefile1
-rw-r--r--sys/modules/hid/u2f/Makefile8
-rw-r--r--sys/modules/if_infiniband/Makefile3
-rw-r--r--sys/modules/if_vlan/Makefile2
-rw-r--r--sys/modules/ix/Makefile2
-rw-r--r--sys/modules/ixv/Makefile2
-rw-r--r--sys/modules/linux64/Makefile1
-rw-r--r--sys/modules/md/Makefile2
-rw-r--r--sys/modules/miiproxy/Makefile2
-rw-r--r--sys/modules/mlx5/Makefile2
-rw-r--r--sys/modules/mlx5en/Makefile2
-rw-r--r--sys/modules/netgraph/ksocket/Makefile2
-rw-r--r--sys/modules/nvmf/nvmf/Makefile3
-rw-r--r--sys/modules/ossl/Makefile1
-rw-r--r--sys/modules/qlnx/qlnxev/Makefile1
-rw-r--r--sys/modules/rtw88/Makefile1
-rw-r--r--sys/modules/rtw89/Makefile1
-rw-r--r--sys/modules/uinput/Makefile2
-rw-r--r--sys/modules/usb/usie/Makefile2
-rw-r--r--sys/modules/usb/wmt/Makefile2
-rw-r--r--sys/modules/zfs/Makefile7
-rw-r--r--sys/modules/zfs/zfs_config.h16
-rw-r--r--sys/modules/zfs/zfs_gitrev.h2
-rw-r--r--sys/net/if_bridgevar.h2
-rw-r--r--sys/net/if_clone.h2
-rw-r--r--sys/net/if_pfsync.h4
-rw-r--r--sys/net/if_tap.h2
-rw-r--r--sys/net/if_tun.h2
-rw-r--r--sys/net/if_tuntap.c88
-rw-r--r--sys/net/iflib.c96
-rw-r--r--sys/net/pfvar.h69
-rw-r--r--sys/netinet/ip_icmp.c3
-rw-r--r--sys/netinet/udp_usrreq.c2
-rw-r--r--sys/netinet6/udp6_usrreq.c2
-rw-r--r--sys/netlink/netlink_io.c13
-rw-r--r--sys/netlink/netlink_linux.h2
-rw-r--r--sys/netlink/route/iface.c1
-rw-r--r--sys/netlink/route/iface_drivers.c18
-rw-r--r--sys/netlink/route/route_var.h1
-rw-r--r--sys/netpfil/pf/if_pfsync.c12
-rw-r--r--sys/netpfil/pf/pf.c38
-rw-r--r--sys/netpfil/pf/pf_nl.c2
-rw-r--r--sys/powerpc/conf/GENERIC641
-rw-r--r--sys/powerpc/conf/GENERIC64LE1
-rw-r--r--sys/riscv/conf/GENERIC1
-rw-r--r--sys/riscv/starfive/jh7110_pcie.c12
-rw-r--r--sys/rpc/auth.h26
-rw-r--r--sys/security/mac_do/mac_do.c48
-rw-r--r--sys/sys/conf.h1
-rw-r--r--sys/sys/param.h2
-rw-r--r--sys/sys/vnode.h9
-rw-r--r--sys/tools/amd64_ia32_vdso.sh2
-rw-r--r--sys/tools/amd64_vdso.sh2
-rw-r--r--sys/ufs/ffs/ffs_vnops.c12
-rw-r--r--sys/ufs/ufs/ufs_vnops.c26
-rw-r--r--sys/vm/vnode_pager.c3
-rw-r--r--tests/sys/fs/fusefs/mockfs.cc2
-rw-r--r--tests/sys/net/Makefile1
-rwxr-xr-xtests/sys/net/if_tun_test.sh22
-rw-r--r--tests/sys/net/transient_tuntap.c54
-rw-r--r--tests/sys/netpfil/pf/table.sh24
-rw-r--r--tools/build/Makefile1
-rwxr-xr-xtools/build/depend-cleanup.sh64
-rw-r--r--tools/build/mk/OptionalObsoleteFiles.inc3511
-rw-r--r--tools/build/options/WITHOUT_GSSAPI1
-rw-r--r--tools/build/options/WITHOUT_KERBEROS2
-rw-r--r--tools/build/options/WITH_CLEAN4
-rwxr-xr-xtools/test/netfibs/reflector.sh2
-rw-r--r--tools/tools/usbtest/usb_msc_test.c4
-rw-r--r--usr.bin/Makefile2
-rw-r--r--usr.bin/kyua/Makefile3
-rw-r--r--usr.bin/localedef/localedef.12
-rw-r--r--usr.bin/tcopy/tcopy.cc2
-rw-r--r--usr.sbin/Makefile5
-rw-r--r--usr.sbin/certctl/certctl.820
-rw-r--r--usr.sbin/certctl/certctl.c107
-rw-r--r--usr.sbin/certctl/tests/certctl_test.sh155
-rw-r--r--usr.sbin/freebsd-update/freebsd-update.sh2
-rw-r--r--usr.sbin/jail/config.c2
-rwxr-xr-xusr.sbin/jail/tests/jail_basic_test.sh15
-rw-r--r--usr.sbin/pw/pw.89
-rw-r--r--usr.sbin/pw/pw.c1
-rw-r--r--usr.sbin/pw/pw_user.c18
-rw-r--r--usr.sbin/pw/pwupd.h1
-rw-r--r--usr.sbin/rpc.ypupdated/Makefile32
-rw-r--r--usr.sbin/rpc.ypupdated/Makefile.depend18
-rw-r--r--usr.sbin/rpc.ypupdated/update.c328
-rw-r--r--usr.sbin/rpc.ypupdated/yp_dbdelete.c68
-rw-r--r--usr.sbin/rpc.ypupdated/yp_dbupdate.c147
-rwxr-xr-xusr.sbin/rpc.ypupdated/ypupdate32
-rw-r--r--usr.sbin/rpc.ypupdated/ypupdated_extern.h32
-rw-r--r--usr.sbin/rpc.ypupdated/ypupdated_main.c287
-rw-r--r--usr.sbin/rpc.ypupdated/ypupdated_server.c227
-rw-r--r--usr.sbin/services_mkdb/services2
-rw-r--r--usr.sbin/syslogd/syslogd.c5
-rwxr-xr-xusr.sbin/unbound/setup/local-unbound-setup.sh2
-rw-r--r--usr.sbin/watch/watch.82
945 files changed, 91605 insertions, 21644 deletions
diff --git a/Makefile.inc1 b/Makefile.inc1
index cf32248b6b9d..d899f994a40d 100644
--- a/Makefile.inc1
+++ b/Makefile.inc1
@@ -1021,7 +1021,8 @@ IMAKE_MTREE= MTREE_CMD="${MTREE_CMD} ${MTREEFLAGS}"
.endif
.if make(distributeworld)
-CERTCTLDESTDIR= ${DESTDIR}/${DISTDIR}/base
+CERTCTLDESTDIR= ${DESTDIR}/${DISTDIR}
+CERTCTLFLAGS+= -d /base
.else
CERTCTLDESTDIR= ${DESTDIR}
.endif
@@ -1050,7 +1051,7 @@ DESTDIR_MTREE= ${DISTR_MTREECMD} ${DESTDIR_MTREEFLAGS}
METALOG_SORT_CMD= env -i LC_COLLATE=C sort
# kernel stage
-KMAKEENV= ${WMAKEENV:NSYSROOT=*}
+KMAKEENV= ${WMAKEENV}
KMAKE= ${TIME_ENV} ${KMAKEENV} ${MAKE} ${.MAKEFLAGS} ${KERNEL_FLAGS} KERNEL=${INSTKERNNAME}
#
@@ -1416,7 +1417,7 @@ _sysctl=sysctl
.endif
ITOOLS= [ awk cap_mkdb cat chflags chmod chown cmp cp \
- date echo egrep find grep id install ${_install-info} \
+ date echo egrep find grep id install \
ln make mkdir mtree mv pwd_mkdb \
rm sed services_mkdb sh sort strip ${_sysctl} test time true uname wc
@@ -2708,8 +2709,14 @@ _basic_bootstrap_tools+=sbin/md5
_basic_bootstrap_tools+=usr.sbin/tzsetup
.endif
-# certctl is needed as an install tool
+# certctl is needed as an install tool. libcrypto is rather heavy, so we'll
+# build that alongside it only for platforms that don't expose headers for
+# OpenSSL, like macOS.
.if ${MK_CAROOT} != "no" && ${MK_OPENSSL} != "no"
+.if ${.MAKE.OS} == "Darwin"
+_bootstrap_libcrypto=secure/lib/libcrypto
+${_bt}-usr.sbin/certctl: ${_bt}-secure/lib/libcrypto
+.endif
_certctl=usr.sbin/certctl
.endif
@@ -2776,6 +2783,7 @@ bootstrap-tools: ${_bt}-links .PHONY
${_strfile} \
usr.bin/dtc \
${_cat} \
+ ${_bootstrap_libcrypto} \
${_certctl} \
${_kbdcontrol} \
${_elftoolchain_libs} \
@@ -3379,7 +3387,7 @@ secure/lib/libssh__L: lib/libz__L secure/lib/libcrypto__L lib/libcrypt__L
secure/lib/libssh__L: lib/libldns__L
.endif
-.if ${MK_GSSAPI} != "no" && ${MK_KERBEROS_SUPPORT} != "no"
+.if ${MK_KERBEROS_SUPPORT} != "no"
.if ${MK_MITKRB5} != "no"
secure/lib/libssh__L: krb5/lib/gssapi__L krb5/lib/krb5__L \
krb5/lib/crypto__L krb5/util/et__L lib/libmd__L krb5/util/support__L
@@ -3438,11 +3446,9 @@ kerberos5/lib/libheimipcc__L: kerberos5/lib/libroken__L kerberos5/lib/libheimbas
lib/libsqlite3__L: lib/libthr__L
-.if ${MK_GSSAPI} != "no"
-.if ${MK_MITKRB5} == "no"
+.if ${MK_KERBEROS} != "no" && ${MK_MITKRB5} == "no"
_lib_libgssapi= lib/libgssapi
.endif
-.endif
.if ${MK_KERBEROS} != "no"
.if ${MK_MITKRB5} != "no"
diff --git a/Makefile.libcompat b/Makefile.libcompat
index 82ef4e9df0ac..e132a9f8442f 100644
--- a/Makefile.libcompat
+++ b/Makefile.libcompat
@@ -56,6 +56,7 @@ build${libcompat}: .PHONY
-p ${WORLDTMP}/usr/lib/debug/usr >/dev/null
.endif
${_+_}cd ${.CURDIR}; \
+ PATH=${BPATH:Q}:${PATH:Q} \
WORLDTMP=${WORLDTMP} \
MAKEFLAGS="-m ${.CURDIR}/tools/build/mk ${.MAKEFLAGS}" \
${MAKE} -f Makefile.inc1 \
@@ -93,6 +94,11 @@ _LC_LIBDIRS.${MK_CDDL:tl}+= cddl/lib
_LC_LIBDIRS.${MK_CRYPT:tl}+= secure/lib
.if ${MK_MITKRB5} != "no"
_LC_LIBDIRS.${MK_KERBEROS:tl}+= krb5/lib
+# Only include the parts of krb5/util that build libraries, not executables.
+_LC_LIBDIRS.${MK_KERBEROS:tl}+= krb5/util/et
+_LC_LIBDIRS.${MK_KERBEROS:tl}+= krb5/util/profile
+_LC_LIBDIRS.${MK_KERBEROS:tl}+= krb5/util/support
+_LC_LIBDIRS.${MK_KERBEROS:tl}+= krb5/util/verto
.else
_LC_LIBDIRS.${MK_KERBEROS:tl}+= kerberos5/lib
.endif
diff --git a/ObsoleteFiles.inc b/ObsoleteFiles.inc
index 4db0704d88ef..95d4c06fe07e 100644
--- a/ObsoleteFiles.inc
+++ b/ObsoleteFiles.inc
@@ -54,10 +54,6 @@
# 20250812: Remove a bogus manlink
OLD_FILES+=usr/share/man/man3/quota_statfs.3.gz
-# 20250810: Removal of remaining Secure RPC (DES) bits
-OLD_FILES+=usr/sbin/rpc.ypupdated
-OLD_FILES+=etc/rc.d/ypupdated
-
# 20250808: nvmfd removed from base install
OLD_FILES+=usr/sbin/nvmfd
OLD_FILES+=usr/share/man/man8/nvmfd.8.gz
@@ -80,7 +76,6 @@ OLD_FILES+=usr/lib/libopencsd.so
# 20250801: Move compile_et to /usr/sbin
OLD_FILES+=usr/share/et/compile_et
-OLD_DIRS+=usr/share/et
# 20250728: zfsboot (MBR) removed
OLD_FILES+=boot/zfsboot
diff --git a/RELNOTES b/RELNOTES
index c11e8543746c..3aec631bc15e 100644
--- a/RELNOTES
+++ b/RELNOTES
@@ -10,18 +10,20 @@ newline. Entries should be separated by a newline.
Changes to this file should not be MFCed.
+dc5ba6b8b4f0:
+ The WITHOUT_GSSAPI src.conf(5) option has been removed. The GSSAPI
+ libraries are now always built unless WITHOUT_KERBEROS is set.
+
+c43cad871720:
+ jemalloc 5.3.0 has landed. See contrib/jemalloc/ChangeLog
+ for the long list of changes.
+
cce64f2e6851:
Add support for the NFSv4.2 Clone operation, which uses
block cloning to "copy on write" files on an NFS server.
This only works for exported ZFS file systems that have
block cloning enabled, at this time.
-7ac276298b72, 7b8c9de17448, 1271b1d747a7, 9dcb984251b3:
- Support for Secure RPC DES authentication has been removed. This
- includes publickey(5), keyserv(8) and the rpc_secure(3) routines which
- rely on keyserv. The libc symbols are still present for backward
- compatibility, but all functions will unconditionally return an error.
-
37b2cb5ecb0f:
Add support to VOP_COPY_FILE_RANGE() for block cloning.
At this time, ZFS is the only local file system that supports
@@ -87,6 +89,16 @@ f1f230439fa4:
and obj NFSv4 mounted, the total RPC count dropped from
5461286 to 945643, with a 20% drop in elapsed time.
+c3fc0db3bc50
+ The default value of the sysctl variable
+ net.inet.tcp.nolocaltimewait has changed from 1 to 0. This means
+ that FreeBSD does not skip the TIME_WAIT state anymore for
+ endpoints for which the remote address is local. The new sysctl
+ variable net.inet.tcp.msl_local can be used to control the time
+ these endpoints stay in the TIME_WAIT state. The sysctl variable
+ net.inet.tcp.nolocaltimewait is deprecated and intended to be
+ removed in FreeBSD 16.
+
cd240957d7ba
Making a connection to INADDR_ANY (i.e., using INADDR_ANY as an alias
for localhost) is now disabled by default. This functionality can be
diff --git a/UPDATING b/UPDATING
index 82399310d299..ddb2e7603b2a 100644
--- a/UPDATING
+++ b/UPDATING
@@ -27,6 +27,59 @@ NOTE TO PEOPLE WHO THINK THAT FreeBSD 15.x IS SLOW:
world, or to merely disable the most expensive debugging functionality
at runtime, run "ln -s 'abort:false,junk:false' /etc/malloc.conf".)
+20250823:
+ The set of pkgbase packages for Kerberos and OpenSSL has changed.
+ After updating past 250e77d4f0a5, you should check 'pkg orphans'
+ and remove any orphaned packages, then examine the output of
+ 'pkg install -r base -g FreeBSD-openssl\* FreeBSD-kerberos\*'
+ and install any new packages you need.
+
+20250823:
+ To restore bridge(4)'s ABI compatibility with ifconfig from 14.x or
+ earlier, the size of struct ifbreq has changed, so you must update
+ the kernel and /sbin/ifconfig at the same time or you will not be
+ able to add interfaces to bridge. This is particularly important
+ if you require bridge for network access during boot, in which case
+ you should install the new kernel and the new /sbin/ifconfig prior
+ to rebooting.
+
+20250820:
+ The WITHOUT_GSSAPI src.conf knob has been removed. This was already
+ a no-op for MIT Kerberos, so this only affects builds which set
+ WITHOUT_MITKRB5=1, in which case you now always get libgssapi.
+
+ Also, Heimdal's compile_et and libcom_err are now controlled by
+ WITHOUT_KERBEROS, not WITHOUT_KERBEROS_SUPPORT. This matches
+ the behaviour of the MIT Kerberos versions.
+
+20250820:
+ Commits 08c7dd2fbe4f and a4197ea47777 have changed the ABI between
+ libvmmapi and the vmm device. If using a custom kernel configuration,
+ please ensure that the COMPAT_FREEBSD14 option is included so older
+ versions of libvmmapi continue working as expected.
+
+20250819:
+ The CLEAN option has been switched back from default-on to default-off.
+ This reverts the 20250808 change below, which had reverted the 20240729
+ change before it. Note that some src.conf(5) options are known to break
+ ABI or compatibility in ways that may require a clean build initially
+ when switched.
+
+20250816:
+ Sendmail's libmilter has been moved to its own package. If you want
+ to compile applications that use libmilter, you should install the
+ FreeBSD-libmilter* packages.
+
+ If you only have FreeBSD-sendmail installed for applications that
+ require libmilter, you can now remove it.
+
+20250815:
+ jemalloc 5.3.0 has been committed to the tree.
+
+20250815:
+ The removal of Secure RPC DES authentication notced in 20250810
+ has been reverted. (However, it is still non-functional.)
+
20250813:
Commit cce64f2e6851 changed the internal KAPI between the NFS
modules. As such, all of them need to be rebuilt from sources.
diff --git a/cddl/lib/libicp/Makefile b/cddl/lib/libicp/Makefile
index f097e7e6ff58..8e801246215f 100644
--- a/cddl/lib/libicp/Makefile
+++ b/cddl/lib/libicp/Makefile
@@ -11,6 +11,7 @@ ASM_SOURCES_AS = \
asm-x86_64/aes/aes_aesni.S \
asm-x86_64/modes/gcm_pclmulqdq.S \
asm-x86_64/modes/aesni-gcm-x86_64.S \
+ asm-x86_64/modes/aesni-gcm-avx2-vaes.S \
asm-x86_64/modes/ghash-x86_64.S \
asm-x86_64/sha2/sha256-x86_64.S \
asm-x86_64/sha2/sha512-x86_64.S \
@@ -112,6 +113,7 @@ CFLAGS.aes_amd64.S+= -DLOCORE
CFLAGS.aes_aesni.S+= -DLOCORE
CFLAGS.gcm_pclmulqdq.S+= -DLOCORE
CFLAGS.aesni-gcm-x86_64.S+= -DLOCORE
+CFLAGS.aesni-gcm-avx2-vaes.S+= -DLOCORE
CFLAGS.ghash-x86_64.S+= -DLOCORE
CFLAGS.sha256-x86_64.S+= -DLOCORE
CFLAGS.sha512-x86_64.S+= -DLOCORE
diff --git a/cddl/lib/libicp_rescue/Makefile b/cddl/lib/libicp_rescue/Makefile
index 3a8b6746fe61..0a5a81f4ab7f 100644
--- a/cddl/lib/libicp_rescue/Makefile
+++ b/cddl/lib/libicp_rescue/Makefile
@@ -11,6 +11,7 @@ ASM_SOURCES_AS = \
asm-x86_64/aes/aes_aesni.S \
asm-x86_64/modes/gcm_pclmulqdq.S \
asm-x86_64/modes/aesni-gcm-x86_64.S \
+ asm-x86_64/modes/aesni-gcm-avx2-vaes.S \
asm-x86_64/sha2/sha256-x86_64.S \
asm-x86_64/sha2/sha512-x86_64.S \
asm-x86_64/blake3/blake3_avx2.S \
@@ -109,6 +110,7 @@ CFLAGS.aes_amd64.S+= -DLOCORE
CFLAGS.aes_aesni.S+= -DLOCORE
CFLAGS.gcm_pclmulqdq.S+= -DLOCORE
CFLAGS.aesni-gcm-x86_64.S+= -DLOCORE
+CFLAGS.aesni-gcm-avx2-vaes.S+= -DLOCORE
CFLAGS.ghash-x86_64.S+= -DLOCORE
CFLAGS.sha256-x86_64.S+= -DLOCORE
CFLAGS.sha512-x86_64.S+= -DLOCORE
diff --git a/contrib/jemalloc/ChangeLog b/contrib/jemalloc/ChangeLog
index e55813b7becc..32fde56247f6 100644
--- a/contrib/jemalloc/ChangeLog
+++ b/contrib/jemalloc/ChangeLog
@@ -4,6 +4,106 @@ brevity. Much more detail can be found in the git revision history:
https://github.com/jemalloc/jemalloc
+* 5.3.0 (May 6, 2022)
+
+ This release contains many speed and space optimizations, from micro
+ optimizations on common paths to rework of internal data structures and
+ locking schemes, and many more too detailed to list below. Multiple percent
+ of system level metric improvements were measured in tested production
+ workloads. The release has gone through large-scale production testing.
+
+ New features:
+ - Add the thread.idle mallctl which hints that the calling thread will be
+ idle for a nontrivial period of time. (@davidtgoldblatt)
+ - Allow small size classes to be the maximum size class to cache in the
+ thread-specific cache, through the opt.[lg_]tcache_max option. (@interwq,
+ @jordalgo)
+ - Make the behavior of realloc(ptr, 0) configurable with opt.zero_realloc.
+ (@davidtgoldblatt)
+ - Add 'make uninstall' support. (@sangshuduo, @Lapenkov)
+ - Support C++17 over-aligned allocation. (@marksantaniello)
+ - Add the thread.peak mallctl for approximate per-thread peak memory tracking.
+ (@davidtgoldblatt)
+ - Add interval-based stats output opt.stats_interval. (@interwq)
+ - Add prof.prefix to override filename prefixes for dumps. (@zhxchen17)
+ - Add high resolution timestamp support for profiling. (@tyroguru)
+ - Add the --collapsed flag to jeprof for flamegraph generation.
+ (@igorwwwwwwwwwwwwwwwwwwww)
+ - Add the --debug-syms-by-id option to jeprof for debug symbols discovery.
+ (@DeannaGelbart)
+ - Add the opt.prof_leak_error option to exit with error code when leak is
+ detected using opt.prof_final. (@yunxuo)
+ - Add opt.cache_oblivious as an runtime alternative to config.cache_oblivious.
+ (@interwq)
+ - Add mallctl interfaces:
+ + opt.zero_realloc (@davidtgoldblatt)
+ + opt.cache_oblivious (@interwq)
+ + opt.prof_leak_error (@yunxuo)
+ + opt.stats_interval (@interwq)
+ + opt.stats_interval_opts (@interwq)
+ + opt.tcache_max (@interwq)
+ + opt.trust_madvise (@azat)
+ + prof.prefix (@zhxchen17)
+ + stats.zero_reallocs (@davidtgoldblatt)
+ + thread.idle (@davidtgoldblatt)
+ + thread.peak.{read,reset} (@davidtgoldblatt)
+
+ Bug fixes:
+ - Fix the synchronization around explicit tcache creation which could cause
+ invalid tcache identifiers. This regression was first released in 5.0.0.
+ (@yoshinorim, @davidtgoldblatt)
+ - Fix a profiling biasing issue which could cause incorrect heap usage and
+ object counts. This issue existed in all previous releases with the heap
+ profiling feature. (@davidtgoldblatt)
+ - Fix the order of stats counter updating on large realloc which could cause
+ failed assertions. This regression was first released in 5.0.0. (@azat)
+ - Fix the locking on the arena destroy mallctl, which could cause concurrent
+ arena creations to fail. This functionality was first introduced in 5.0.0.
+ (@interwq)
+
+ Portability improvements:
+ - Remove nothrow from system function declarations on macOS and FreeBSD.
+ (@davidtgoldblatt, @fredemmott, @leres)
+ - Improve overcommit and page alignment settings on NetBSD. (@zoulasc)
+ - Improve CPU affinity support on BSD platforms. (@devnexen)
+ - Improve utrace detection and support. (@devnexen)
+ - Improve QEMU support with MADV_DONTNEED zeroed pages detection. (@azat)
+ - Add memcntl support on Solaris / illumos. (@devnexen)
+ - Improve CPU_SPINWAIT on ARM. (@AWSjswinney)
+ - Improve TSD cleanup on FreeBSD. (@Lapenkov)
+ - Disable percpu_arena if the CPU count cannot be reliably detected. (@azat)
+ - Add malloc_size(3) override support. (@devnexen)
+ - Add mmap VM_MAKE_TAG support. (@devnexen)
+ - Add support for MADV_[NO]CORE. (@devnexen)
+ - Add support for DragonFlyBSD. (@devnexen)
+ - Fix the QUANTUM setting on MIPS64. (@brooksdavis)
+ - Add the QUANTUM setting for ARC. (@vineetgarc)
+ - Add the QUANTUM setting for LoongArch. (@wangjl-uos)
+ - Add QNX support. (@jqian-aurora)
+ - Avoid atexit(3) calls unless the relevant profiling features are enabled.
+ (@BusyJay, @laiwei-rice, @interwq)
+ - Fix unknown option detection when using Clang. (@Lapenkov)
+ - Fix symbol conflict with musl libc. (@georgthegreat)
+ - Add -Wimplicit-fallthrough checks. (@nickdesaulniers)
+ - Add __forceinline support on MSVC. (@santagada)
+ - Improve FreeBSD and Windows CI support. (@Lapenkov)
+ - Add CI support for PPC64LE architecture. (@ezeeyahoo)
+
+ Incompatible changes:
+ - Maximum size class allowed in tcache (opt.[lg_]tcache_max) now has an upper
+ bound of 8MiB. (@interwq)
+
+ Optimizations and refactors (@davidtgoldblatt, @Lapenkov, @interwq):
+ - Optimize the common cases of the thread cache operations.
+ - Optimize internal data structures, including RB tree and pairing heap.
+ - Optimize the internal locking on extent management.
+ - Extract and refactor the internal page allocator and interface modules.
+
+ Documentation:
+ - Fix doc build with --with-install-suffix. (@lawmurray, @interwq)
+ - Add PROFILING_INTERNALS.md. (@davidtgoldblatt)
+ - Ensure the proper order of doc building and installation. (@Mingli-Yu)
+
* 5.2.1 (August 5, 2019)
This release is primarily about Windows. A critical virtual memory leak is
diff --git a/contrib/jemalloc/FREEBSD-upgrade b/contrib/jemalloc/FREEBSD-upgrade
index d3173b9d1f36..fcb66ea71786 100755
--- a/contrib/jemalloc/FREEBSD-upgrade
+++ b/contrib/jemalloc/FREEBSD-upgrade
@@ -1,189 +1,91 @@
#!/bin/sh
-#
-# Usage: cd /usr/src/contrib/jemalloc
-# ./FREEBSD-upgrade <command> [args]
-#
-# At least the following ports are required when importing jemalloc:
-# - devel/autoconf
-# - devel/git
-# - devel/gmake
-# - textproc/docbook-xsl
-# - textproc/libxslt
-#
-# The normal workflow for importing a new release is:
-#
-# cd /usr/src/contrib/jemalloc
-#
-# Merge local changes that were made since the previous import:
-#
-# ./FREEBSD-upgrade merge-changes
-# ./FREEBSD-upgrade rediff
-#
-# Extract latest jemalloc release.
-#
-# ./FREEBSD-upgrade extract <rev>
-#
-# Fix patch conflicts as necessary, then regenerate diffs to update line
-# offsets:
-#
-# ./FREEBSD-upgrade rediff
-# ./FREEBSD-upgrade extract <rev>
-#
-# Do multiple buildworld/installworld rounds. If problems arise and patches
-# are needed, edit the code in ${work} as necessary, then:
-#
-# ./FREEBSD-upgrade rediff
-# ./FREEBSD-upgrade extract <rev>
-#
-# The rediff/extract order is important because rediff saves the local
-# changes, then extract blows away the work tree and re-creates it with the
-# diffs applied.
-#
-# Finally, to clean up:
-#
-# ./FREEBSD-upgrade clean
-set -e
-set -x
+# Note: you need docbook installed, as well as gmake (we need it to
+# make private_namespace.h)
-if [ ! -x "FREEBSD-upgrade" ] ; then
- echo "Run from within src/contrib/jemalloc/" >&2
- exit 1
-fi
+# git subtree merge -- not committed at this time.
+ git subtree merge -P contrib/jemalloc vendor/jemalloc
+cd contrib/jemalloc
-if [ "x${JEMALLOC_REPO}" = "x" ] ; then
- JEMALLOC_REPO=https://github.com/jemalloc/jemalloc.git
-fi
+# Gut the tests, since they take up too much space.
+# Everything else can stay, but if not, add more to trim (there's
+# always a trade off between time and saved size.
+git rm -rf test msvc
+git commit --amend
-src=`pwd`
+# kill the tests with empty files so we don't have to hack configure.ac
+mkdir -p test/include/test
+touch test/include/test/jemalloc_test_defs.h.in
+touch test/include/test/jemalloc_test.h.in
+echo 'exit 0' > test/test.sh.in
-jemalloc_tmp="jemalloc.tmp"
-tmpdir="${src}/../${jemalloc_tmp}"
-bare_repo="${tmpdir}/jemalloc_bare.git"
-work="jemalloc_work.git"
-work_repo="${tmpdir}/${work}"
-namespace_repo="${tmpdir}/jemalloc_namespace.git"
-changes="${src}/FREEBSD-changes"
-
-do_fetch() {
- local rev=$1
- if [ ! -d "${bare_repo}" ] ; then
- mkdir -p "${bare_repo}"
- git clone --bare ${JEMALLOC_REPO} ${bare_repo}
- fi
- (
- cd ${bare_repo}
- git fetch origin ${rev}
- )
-}
+# Reconfigure -- needed only to regenerate the .h files... We don't
+# use all the files generated.
+#
+# Also note: 5.2 lacks --with-lg-page-sizes, but 5.3 has it.
+# Also, there's got to be a way to not hard-wire version / hash.
+./autogen.sh --enable-xmalloc --enable-fill --enable-lazy-lock --enable-stats \
+ --enable-utrace --with-malloc-conf=abort_conf:false \
+ --with-xslroot=/usr/local/share/xsl/docbook --with-private-namespace=__ \
+ --with-lg-page-sizes=12,13,14,15,16 \
+ --with-version=5.3.0-0-g54eaed1d8b56b1aa528be3bdd1877e59c56fa90c
-do_extract_helper() {
- local rev=$1
- local repo=$2
- do_fetch ${rev}
- rm -rf ${repo}
- git clone ${bare_repo} ${repo}
- (
- cd ${repo}
- if [ "x${rev}" != "x" ] ; then
- # Use optional rev argument to check out a revision other than HEAD on
- # master.
- git checkout ${rev}
- fi
- )
-}
+# Copy over the important generated .h files in configure
+cp ./include/jemalloc/jemalloc.h ../../lib/libc/stdlib/malloc/jemalloc/include/jemalloc
+git add ../../lib/libc/stdlib/malloc/jemalloc/include/jemalloc/jemalloc.h
+cp ./include/jemalloc/jemalloc_defs.h ../../lib/libc/stdlib/malloc/jemalloc/include/jemalloc
+git add ../../lib/libc/stdlib/malloc/jemalloc/include/jemalloc/jemalloc_defs.h
-do_autogen() {
- ./autogen.sh --enable-xmalloc --enable-utrace \
- --with-malloc-conf=abort_conf:false \
- --with-xslroot=/usr/local/share/xsl/docbook --with-private-namespace=__ \
- --with-lg-page-sizes=12,13,14,16
-}
+# need to make the namespace .h files, and copy a small subset into the tree
+# These are super-awkward to generate at buildworld time. Also, we assume we
+# only have to make one of these (currently true due to current unlikely to
+# change dependencies.
+gmake include/jemalloc/internal/private_namespace.h
+for i in private_namespace.h jemalloc_internal_defs.h public_namespace.h jemalloc_preamble.h; do
+ cp include/jemalloc/internal/$i ../../lib/libc/stdlib/malloc/jemalloc/include/jemalloc/internal/
+ git add ../../lib/libc/stdlib/malloc/jemalloc/include/jemalloc/internal/$i
+ rm include/jemalloc/internal/$i
+done
+# OK, commit all the generated files
+git add VERSION
+git commit --amend
-do_extract_diff() {
- local rev=$1
- local repo=$2
- do_extract_helper ${rev} ${repo}
- (
- cd ${repo}
- # Apply diffs before generating files.
- patch -p1 < "${src}/FREEBSD-diffs"
- find . -name '*.orig' -delete
- # Generate files.
- do_autogen
- gmake dist
- )
-}
+# Clean up the mess
+git clean -f .
-do_extract_namespace() {
- local rev=$1
- local repo=$2
- do_extract_helper ${rev} ${repo}
- (
- cd ${repo}
- # Generate files.
- do_autogen
- gmake include/jemalloc/internal/private_namespace.h
- )
-}
+# Save the cheat sheet
+cp ~/jemalloc-upd FREEBSD-upgrade
+git add FREEBSD-upgrade
+git commit --amend
-do_extract() {
- local rev=$1
- do_fetch ${rev}
- do_extract_diff ${rev} ${work_repo}
- do_extract_namespace ${rev} ${namespace_repo}
-}
+# Remove hash.c from lib/libc/stdlib/malloc/jemalloc/Makefile.inc
+# mutex_pool.c prng.c
+# Add
+# bin_info.c san.c san_bump.c counter.c prof_data.c prof_log.c prof_recent.c prof_stats.c prof_sys.c
+# emap.c edata.c edata_cache.c pa.c pa_extra.c pac.c decay.c hpa.c hpa_hooks.c fxp.c hpdata.c pai.c
+# ecache.c ehooks.c eset.c sec.c cache_bin.c peak_event.c psset.c inspect.c exp_grow.c thread_event.c
+#
-do_diff() {
- (
- cd ${work_repo}
- find . -name '*.orig' -delete
- find . -name '*.rej' -delete
- git add -A
- git diff --cached
- ) > FREEBSD-diffs
-}
+# Manually comment out the following in lib/libc/stdlib/malloc/jemalloc/include/jemalloc/jemalloc.h
+# /* #define JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF */
+# Add
+# #define JEMALLOC_OVERRIDE_VALLOC
+# and #include "jemalloc_FreeBSD.h"
+# With some adjustments to the old jemalloc_FreeBSD.h, but git can help
-command=$1
-shift
-case "${command}" in
- merge-changes) # Merge local changes that were made since the previous import.
- rev=`cat VERSION |tr 'g' ' ' |awk '{print $2}'`
- # Extract code corresponding to most recent import.
- do_extract ${rev}
- # Compute local differences to the upstream+patches and apply them.
- (
- cd ${tmpdir}
- diff -ru -X ${src}/FREEBSD-Xlist ${work} ../jemalloc > ${changes} || true
- )
- (
- cd ${work_repo}
- patch -p1 < ${changes} || true
- find . -name '*.orig' -delete
- )
- # Update diff.
- do_diff
- ;;
- extract) # Extract upstream sources, apply patches, copy to contrib/jemalloc.
- rev=$1
- do_extract ${rev}
- # Delete existing files so that cruft doesn't silently remain.
- rm -rf ChangeLog COPYING VERSION doc include src
- # Copy files over.
- tar cf - -C ${work_repo} -X FREEBSD-Xlist . |tar xvf -
- internal_dir="include/jemalloc/internal"
- grep -v ' isthreaded ' \
- "${namespace_repo}/${internal_dir}/private_namespace.h" \
- > "${internal_dir}/private_namespace.h"
- ;;
- rediff) # Regenerate diffs based on working tree.
- do_diff
- ;;
- clean) # Remove working tree and temporary files.
- rm -rf ${tmpdir} ${changes}
- ;;
- *)
- echo "Unsupported command: \"${command}\"" >&2
- exit 1
- ;;
-esac
+# Had to manually remove
+# -#define __malloc_options_1_0 JEMALLOC_N(__malloc_options_1_0)
+# -#define _malloc_first_thread JEMALLOC_N(_malloc_first_thread)
+# -#define __malloc_message_1_0 JEMALLOC_N(__malloc_message_1_0)
+# -#define isthreaded JEMALLOC_N(isthreaded)
+#
+# Also had to remove the following to fix jemalloc 3 ABI compat
+# -#define je_allocm JEMALLOC_N(je_allocm)
+# -#define je_dallocm JEMALLOC_N(je_dallocm)
+# -#define je_nallocm JEMALLOC_N(je_nallocm)
+# -#define je_rallocm JEMALLOC_N(je_rallocm)
+# -#define je_sallocm JEMALLOC_N(je_sallocm)
+# Without the diff you end up with non-exported _je_je*allocm symbols. With you get symbols of the form:
+# 365: 000000000018e2a0 406 FUNC WEAK DEFAULT 14 rallocm@FBSD_1.3 (5)
+# 657: 000000000018e2a0 406 FUNC GLOBAL DEFAULT 14 __rallocm@FBSD_1.3 (5)
+#
diff --git a/contrib/jemalloc/INSTALL.md b/contrib/jemalloc/INSTALL.md
new file mode 100644
index 000000000000..9701364041c8
--- /dev/null
+++ b/contrib/jemalloc/INSTALL.md
@@ -0,0 +1,849 @@
+<<<<<<< HEAD
+Building and installing a packaged release of jemalloc can be as simple as
+typing the following while in the root directory of the source tree:
+
+ ./configure
+ make
+ make install
+
+If building from unpackaged developer sources, the simplest command sequence
+that might work is:
+
+ ./autogen.sh
+ make
+ make install
+
+You can uninstall the installed build artifacts like this:
+
+ make uninstall
+
+Notes:
+ - "autoconf" needs to be installed
+ - Documentation is built by the default target only when xsltproc is
+available. Build will warn but not stop if the dependency is missing.
+
+
+## Advanced configuration
+
+The 'configure' script supports numerous options that allow control of which
+functionality is enabled, where jemalloc is installed, etc. Optionally, pass
+any of the following arguments (not a definitive list) to 'configure':
+
+* `--help`
+
+ Print a definitive list of options.
+
+* `--prefix=<install-root-dir>`
+
+ Set the base directory in which to install. For example:
+
+ ./configure --prefix=/usr/local
+
+ will cause files to be installed into /usr/local/include, /usr/local/lib,
+ and /usr/local/man.
+
+* `--with-version=(<major>.<minor>.<bugfix>-<nrev>-g<gid>|VERSION)`
+
+ The VERSION file is mandatory for successful configuration, and the
+ following steps are taken to assure its presence:
+ 1) If --with-version=<major>.<minor>.<bugfix>-<nrev>-g<gid> is specified,
+ generate VERSION using the specified value.
+ 2) If --with-version is not specified in either form and the source
+ directory is inside a git repository, try to generate VERSION via 'git
+ describe' invocations that pattern-match release tags.
+ 3) If VERSION is missing, generate it with a bogus version:
+ 0.0.0-0-g0000000000000000000000000000000000000000
+
+ Note that --with-version=VERSION bypasses (1) and (2), which simplifies
+ VERSION configuration when embedding a jemalloc release into another
+ project's git repository.
+
+* `--with-rpath=<colon-separated-rpath>`
+
+ Embed one or more library paths, so that libjemalloc can find the libraries
+ it is linked to. This works only on ELF-based systems.
+
+* `--with-mangling=<map>`
+
+ Mangle public symbols specified in <map> which is a comma-separated list of
+ name:mangled pairs.
+
+ For example, to use ld's --wrap option as an alternative method for
+ overriding libc's malloc implementation, specify something like:
+
+ --with-mangling=malloc:__wrap_malloc,free:__wrap_free[...]
+
+ Note that mangling happens prior to application of the prefix specified by
+ --with-jemalloc-prefix, and mangled symbols are then ignored when applying
+ the prefix.
+
+* `--with-jemalloc-prefix=<prefix>`
+
+ Prefix all public APIs with <prefix>. For example, if <prefix> is
+ "prefix_", API changes like the following occur:
+
+ malloc() --> prefix_malloc()
+ malloc_conf --> prefix_malloc_conf
+ /etc/malloc.conf --> /etc/prefix_malloc.conf
+ MALLOC_CONF --> PREFIX_MALLOC_CONF
+
+ This makes it possible to use jemalloc at the same time as the system
+ allocator, or even to use multiple copies of jemalloc simultaneously.
+
+ By default, the prefix is "", except on OS X, where it is "je_". On OS X,
+ jemalloc overlays the default malloc zone, but makes no attempt to actually
+ replace the "malloc", "calloc", etc. symbols.
+
+* `--without-export`
+
+ Don't export public APIs. This can be useful when building jemalloc as a
+ static library, or to avoid exporting public APIs when using the zone
+ allocator on OSX.
+
+* `--with-private-namespace=<prefix>`
+
+ Prefix all library-private APIs with <prefix>je_. For shared libraries,
+ symbol visibility mechanisms prevent these symbols from being exported, but
+ for static libraries, naming collisions are a real possibility. By
+ default, <prefix> is empty, which results in a symbol prefix of je_ .
+
+* `--with-install-suffix=<suffix>`
+
+ Append <suffix> to the base name of all installed files, such that multiple
+ versions of jemalloc can coexist in the same installation directory. For
+ example, libjemalloc.so.0 becomes libjemalloc<suffix>.so.0.
+
+* `--with-malloc-conf=<malloc_conf>`
+
+ Embed `<malloc_conf>` as a run-time options string that is processed prior to
+ the malloc_conf global variable, the /etc/malloc.conf symlink, and the
+ MALLOC_CONF environment variable. For example, to change the default decay
+ time to 30 seconds:
+
+ --with-malloc-conf=decay_ms:30000
+
+* `--enable-debug`
+
+ Enable assertions and validation code. This incurs a substantial
+ performance hit, but is very useful during application development.
+
+* `--disable-stats`
+
+ Disable statistics gathering functionality. See the "opt.stats_print"
+ option documentation for usage details.
+
+* `--enable-prof`
+
+ Enable heap profiling and leak detection functionality. See the "opt.prof"
+ option documentation for usage details. When enabled, there are several
+ approaches to backtracing, and the configure script chooses the first one
+ in the following list that appears to function correctly:
+
+ + libunwind (requires --enable-prof-libunwind)
+ + libgcc (unless --disable-prof-libgcc)
+ + gcc intrinsics (unless --disable-prof-gcc)
+
+* `--enable-prof-libunwind`
+
+ Use the libunwind library (http://www.nongnu.org/libunwind/) for stack
+ backtracing.
+
+* `--disable-prof-libgcc`
+
+ Disable the use of libgcc's backtracing functionality.
+
+* `--disable-prof-gcc`
+
+ Disable the use of gcc intrinsics for backtracing.
+
+* `--with-static-libunwind=<libunwind.a>`
+
+ Statically link against the specified libunwind.a rather than dynamically
+ linking with -lunwind.
+
+* `--disable-fill`
+
+ Disable support for junk/zero filling of memory. See the "opt.junk" and
+ "opt.zero" option documentation for usage details.
+
+* `--disable-zone-allocator`
+
+ Disable zone allocator for Darwin. This means jemalloc won't be hooked as
+ the default allocator on OSX/iOS.
+
+* `--enable-utrace`
+
+ Enable utrace(2)-based allocation tracing. This feature is not broadly
+ portable (FreeBSD has it, but Linux and OS X do not).
+
+* `--enable-xmalloc`
+
+ Enable support for optional immediate termination due to out-of-memory
+ errors, as is commonly implemented by "xmalloc" wrapper function for malloc.
+ See the "opt.xmalloc" option documentation for usage details.
+
+* `--enable-lazy-lock`
+
+ Enable code that wraps pthread_create() to detect when an application
+ switches from single-threaded to multi-threaded mode, so that it can avoid
+ mutex locking/unlocking operations while in single-threaded mode. In
+ practice, this feature usually has little impact on performance unless
+ thread-specific caching is disabled.
+
+* `--disable-cache-oblivious`
+
+ Disable cache-oblivious large allocation alignment by default, for large
+ allocation requests with no alignment constraints. If this feature is
+ disabled, all large allocations are page-aligned as an implementation
+ artifact, which can severely harm CPU cache utilization. However, the
+ cache-oblivious layout comes at the cost of one extra page per large
+ allocation, which in the most extreme case increases physical memory usage
+ for the 16 KiB size class to 20 KiB.
+
+* `--disable-syscall`
+
+ Disable use of syscall(2) rather than {open,read,write,close}(2). This is
+ intended as a workaround for systems that place security limitations on
+ syscall(2).
+
+* `--disable-cxx`
+
+ Disable C++ integration. This will cause new and delete operator
+ implementations to be omitted.
+
+* `--with-xslroot=<path>`
+
+ Specify where to find DocBook XSL stylesheets when building the
+ documentation.
+
+* `--with-lg-page=<lg-page>`
+
+ Specify the base 2 log of the allocator page size, which must in turn be at
+ least as large as the system page size. By default the configure script
+ determines the host's page size and sets the allocator page size equal to
+ the system page size, so this option need not be specified unless the
+ system page size may change between configuration and execution, e.g. when
+ cross compiling.
+
+* `--with-lg-hugepage=<lg-hugepage>`
+
+ Specify the base 2 log of the system huge page size. This option is useful
+ when cross compiling, or when overriding the default for systems that do
+ not explicitly support huge pages.
+
+* `--with-lg-quantum=<lg-quantum>`
+
+ Specify the base 2 log of the minimum allocation alignment. jemalloc needs
+ to know the minimum alignment that meets the following C standard
+ requirement (quoted from the April 12, 2011 draft of the C11 standard):
+
+ > The pointer returned if the allocation succeeds is suitably aligned so
+ that it may be assigned to a pointer to any type of object with a
+ fundamental alignment requirement and then used to access such an object
+ or an array of such objects in the space allocated [...]
+
+ This setting is architecture-specific, and although jemalloc includes known
+ safe values for the most commonly used modern architectures, there is a
+ wrinkle related to GNU libc (glibc) that may impact your choice of
+ <lg-quantum>. On most modern architectures, this mandates 16-byte
+ alignment (<lg-quantum>=4), but the glibc developers chose not to meet this
+ requirement for performance reasons. An old discussion can be found at
+ <https://sourceware.org/bugzilla/show_bug.cgi?id=206> . Unlike glibc,
+ jemalloc does follow the C standard by default (caveat: jemalloc
+ technically cheats for size classes smaller than the quantum), but the fact
+ that Linux systems already work around this allocator noncompliance means
+ that it is generally safe in practice to let jemalloc's minimum alignment
+ follow glibc's lead. If you specify `--with-lg-quantum=3` during
+ configuration, jemalloc will provide additional size classes that are not
+ 16-byte-aligned (24, 40, and 56).
+
+* `--with-lg-vaddr=<lg-vaddr>`
+
+ Specify the number of significant virtual address bits. By default, the
+ configure script attempts to detect virtual address size on those platforms
+ where it knows how, and picks a default otherwise. This option may be
+ useful when cross-compiling.
+
+* `--disable-initial-exec-tls`
+
+ Disable the initial-exec TLS model for jemalloc's internal thread-local
+ storage (on those platforms that support explicit settings). This can allow
+ jemalloc to be dynamically loaded after program startup (e.g. using dlopen).
+ Note that in this case, there will be two malloc implementations operating
+ in the same process, which will almost certainly result in confusing runtime
+ crashes if pointers leak from one implementation to the other.
+
+* `--disable-libdl`
+
+ Disable the usage of libdl, namely dlsym(3) which is required by the lazy
+ lock option. This can allow building static binaries.
+
+The following environment variables (not a definitive list) impact configure's
+behavior:
+
+* `CFLAGS="?"`
+* `CXXFLAGS="?"`
+
+ Pass these flags to the C/C++ compiler. Any flags set by the configure
+ script are prepended, which means explicitly set flags generally take
+ precedence. Take care when specifying flags such as -Werror, because
+ configure tests may be affected in undesirable ways.
+
+* `EXTRA_CFLAGS="?"`
+* `EXTRA_CXXFLAGS="?"`
+
+ Append these flags to CFLAGS/CXXFLAGS, without passing them to the
+ compiler(s) during configuration. This makes it possible to add flags such
+ as -Werror, while allowing the configure script to determine what other
+ flags are appropriate for the specified configuration.
+
+* `CPPFLAGS="?"`
+
+ Pass these flags to the C preprocessor. Note that CFLAGS is not passed to
+ 'cpp' when 'configure' is looking for include files, so you must use
+ CPPFLAGS instead if you need to help 'configure' find header files.
+
+* `LD_LIBRARY_PATH="?"`
+
+ 'ld' uses this colon-separated list to find libraries.
+
+* `LDFLAGS="?"`
+
+ Pass these flags when linking.
+
+* `PATH="?"`
+
+ 'configure' uses this to find programs.
+
+In some cases it may be necessary to work around configuration results that do
+not match reality. For example, Linux 4.5 added support for the MADV_FREE flag
+to madvise(2), which can cause problems if building on a host with MADV_FREE
+support and deploying to a target without. To work around this, use a cache
+file to override the relevant configuration variable defined in configure.ac,
+e.g.:
+
+ echo "je_cv_madv_free=no" > config.cache && ./configure -C
+
+
+## Advanced compilation
+
+To build only parts of jemalloc, use the following targets:
+
+ build_lib_shared
+ build_lib_static
+ build_lib
+ build_doc_html
+ build_doc_man
+ build_doc
+
+To install only parts of jemalloc, use the following targets:
+
+ install_bin
+ install_include
+ install_lib_shared
+ install_lib_static
+ install_lib_pc
+ install_lib
+ install_doc_html
+ install_doc_man
+ install_doc
+
+To clean up build results to varying degrees, use the following make targets:
+
+ clean
+ distclean
+ relclean
+
+
+## Advanced installation
+
+Optionally, define make variables when invoking make, including (not
+exclusively):
+
+* `INCLUDEDIR="?"`
+
+ Use this as the installation prefix for header files.
+
+* `LIBDIR="?"`
+
+ Use this as the installation prefix for libraries.
+
+* `MANDIR="?"`
+
+ Use this as the installation prefix for man pages.
+
+* `DESTDIR="?"`
+
+ Prepend DESTDIR to INCLUDEDIR, LIBDIR, DATADIR, and MANDIR. This is useful
+ when installing to a different path than was specified via --prefix.
+
+* `CC="?"`
+
+ Use this to invoke the C compiler.
+
+* `CFLAGS="?"`
+
+ Pass these flags to the compiler.
+
+* `CPPFLAGS="?"`
+
+ Pass these flags to the C preprocessor.
+
+* `LDFLAGS="?"`
+
+ Pass these flags when linking.
+
+* `PATH="?"`
+
+ Use this to search for programs used during configuration and building.
+
+
+## Development
+
+If you intend to make non-trivial changes to jemalloc, use the 'autogen.sh'
+script rather than 'configure'. This re-generates 'configure', enables
+configuration dependency rules, and enables re-generation of automatically
+generated source files.
+
+The build system supports using an object directory separate from the source
+tree. For example, you can create an 'obj' directory, and from within that
+directory, issue configuration and build commands:
+
+ autoconf
+ mkdir obj
+ cd obj
+ ../configure --enable-autogen
+ make
+
+
+## Documentation
+
+The manual page is generated in both html and roff formats. Any web browser
+can be used to view the html manual. The roff manual page can be formatted
+prior to installation via the following command:
+
+ nroff -man -t doc/jemalloc.3
+||||||| dec341af7695
+=======
+Building and installing a packaged release of jemalloc can be as simple as
+typing the following while in the root directory of the source tree:
+
+ ./configure
+ make
+ make install
+
+If building from unpackaged developer sources, the simplest command sequence
+that might work is:
+
+ ./autogen.sh
+ make dist
+ make
+ make install
+
+Note that documentation is not built by the default target because doing so
+would create a dependency on xsltproc in packaged releases, hence the
+requirement to either run 'make dist' or avoid installing docs via the various
+install_* targets documented below.
+
+
+## Advanced configuration
+
+The 'configure' script supports numerous options that allow control of which
+functionality is enabled, where jemalloc is installed, etc. Optionally, pass
+any of the following arguments (not a definitive list) to 'configure':
+
+* `--help`
+
+ Print a definitive list of options.
+
+* `--prefix=<install-root-dir>`
+
+ Set the base directory in which to install. For example:
+
+ ./configure --prefix=/usr/local
+
+ will cause files to be installed into /usr/local/include, /usr/local/lib,
+ and /usr/local/man.
+
+* `--with-version=(<major>.<minor>.<bugfix>-<nrev>-g<gid>|VERSION)`
+
+ The VERSION file is mandatory for successful configuration, and the
+ following steps are taken to assure its presence:
+ 1) If --with-version=<major>.<minor>.<bugfix>-<nrev>-g<gid> is specified,
+ generate VERSION using the specified value.
+ 2) If --with-version is not specified in either form and the source
+ directory is inside a git repository, try to generate VERSION via 'git
+ describe' invocations that pattern-match release tags.
+ 3) If VERSION is missing, generate it with a bogus version:
+ 0.0.0-0-g0000000000000000000000000000000000000000
+
+ Note that --with-version=VERSION bypasses (1) and (2), which simplifies
+ VERSION configuration when embedding a jemalloc release into another
+ project's git repository.
+
+* `--with-rpath=<colon-separated-rpath>`
+
+ Embed one or more library paths, so that libjemalloc can find the libraries
+ it is linked to. This works only on ELF-based systems.
+
+* `--with-mangling=<map>`
+
+ Mangle public symbols specified in <map> which is a comma-separated list of
+ name:mangled pairs.
+
+ For example, to use ld's --wrap option as an alternative method for
+ overriding libc's malloc implementation, specify something like:
+
+ --with-mangling=malloc:__wrap_malloc,free:__wrap_free[...]
+
+ Note that mangling happens prior to application of the prefix specified by
+ --with-jemalloc-prefix, and mangled symbols are then ignored when applying
+ the prefix.
+
+* `--with-jemalloc-prefix=<prefix>`
+
+ Prefix all public APIs with <prefix>. For example, if <prefix> is
+ "prefix_", API changes like the following occur:
+
+ malloc() --> prefix_malloc()
+ malloc_conf --> prefix_malloc_conf
+ /etc/malloc.conf --> /etc/prefix_malloc.conf
+ MALLOC_CONF --> PREFIX_MALLOC_CONF
+
+ This makes it possible to use jemalloc at the same time as the system
+ allocator, or even to use multiple copies of jemalloc simultaneously.
+
+ By default, the prefix is "", except on OS X, where it is "je_". On OS X,
+ jemalloc overlays the default malloc zone, but makes no attempt to actually
+ replace the "malloc", "calloc", etc. symbols.
+
+* `--without-export`
+
+ Don't export public APIs. This can be useful when building jemalloc as a
+ static library, or to avoid exporting public APIs when using the zone
+ allocator on OSX.
+
+* `--with-private-namespace=<prefix>`
+
+ Prefix all library-private APIs with <prefix>je_. For shared libraries,
+ symbol visibility mechanisms prevent these symbols from being exported, but
+ for static libraries, naming collisions are a real possibility. By
+ default, <prefix> is empty, which results in a symbol prefix of je_ .
+
+* `--with-install-suffix=<suffix>`
+
+ Append <suffix> to the base name of all installed files, such that multiple
+ versions of jemalloc can coexist in the same installation directory. For
+ example, libjemalloc.so.0 becomes libjemalloc<suffix>.so.0.
+
+* `--with-malloc-conf=<malloc_conf>`
+
+ Embed `<malloc_conf>` as a run-time options string that is processed prior to
+ the malloc_conf global variable, the /etc/malloc.conf symlink, and the
+ MALLOC_CONF environment variable. For example, to change the default decay
+ time to 30 seconds:
+
+ --with-malloc-conf=decay_ms:30000
+
+* `--enable-debug`
+
+ Enable assertions and validation code. This incurs a substantial
+ performance hit, but is very useful during application development.
+
+* `--disable-stats`
+
+ Disable statistics gathering functionality. See the "opt.stats_print"
+ option documentation for usage details.
+
+* `--enable-prof`
+
+ Enable heap profiling and leak detection functionality. See the "opt.prof"
+ option documentation for usage details. When enabled, there are several
+ approaches to backtracing, and the configure script chooses the first one
+ in the following list that appears to function correctly:
+
+ + libunwind (requires --enable-prof-libunwind)
+ + libgcc (unless --disable-prof-libgcc)
+ + gcc intrinsics (unless --disable-prof-gcc)
+
+* `--enable-prof-libunwind`
+
+ Use the libunwind library (http://www.nongnu.org/libunwind/) for stack
+ backtracing.
+
+* `--disable-prof-libgcc`
+
+ Disable the use of libgcc's backtracing functionality.
+
+* `--disable-prof-gcc`
+
+ Disable the use of gcc intrinsics for backtracing.
+
+* `--with-static-libunwind=<libunwind.a>`
+
+ Statically link against the specified libunwind.a rather than dynamically
+ linking with -lunwind.
+
+* `--disable-fill`
+
+ Disable support for junk/zero filling of memory. See the "opt.junk" and
+ "opt.zero" option documentation for usage details.
+
+* `--disable-zone-allocator`
+
+ Disable zone allocator for Darwin. This means jemalloc won't be hooked as
+ the default allocator on OSX/iOS.
+
+* `--enable-utrace`
+
+ Enable utrace(2)-based allocation tracing. This feature is not broadly
+ portable (FreeBSD has it, but Linux and OS X do not).
+
+* `--enable-xmalloc`
+
+ Enable support for optional immediate termination due to out-of-memory
+ errors, as is commonly implemented by "xmalloc" wrapper function for malloc.
+ See the "opt.xmalloc" option documentation for usage details.
+
+* `--enable-lazy-lock`
+
+ Enable code that wraps pthread_create() to detect when an application
+ switches from single-threaded to multi-threaded mode, so that it can avoid
+ mutex locking/unlocking operations while in single-threaded mode. In
+ practice, this feature usually has little impact on performance unless
+ thread-specific caching is disabled.
+
+* `--disable-cache-oblivious`
+
+ Disable cache-oblivious large allocation alignment for large allocation
+ requests with no alignment constraints. If this feature is disabled, all
+ large allocations are page-aligned as an implementation artifact, which can
+ severely harm CPU cache utilization. However, the cache-oblivious layout
+ comes at the cost of one extra page per large allocation, which in the
+ most extreme case increases physical memory usage for the 16 KiB size class
+ to 20 KiB.
+
+* `--disable-syscall`
+
+ Disable use of syscall(2) rather than {open,read,write,close}(2). This is
+ intended as a workaround for systems that place security limitations on
+ syscall(2).
+
+* `--disable-cxx`
+
+ Disable C++ integration. This will cause new and delete operator
+ implementations to be omitted.
+
+* `--with-xslroot=<path>`
+
+ Specify where to find DocBook XSL stylesheets when building the
+ documentation.
+
+* `--with-lg-page=<lg-page>`
+
+ Specify the base 2 log of the allocator page size, which must in turn be at
+ least as large as the system page size. By default the configure script
+ determines the host's page size and sets the allocator page size equal to
+ the system page size, so this option need not be specified unless the
+ system page size may change between configuration and execution, e.g. when
+ cross compiling.
+
+* `--with-lg-hugepage=<lg-hugepage>`
+
+ Specify the base 2 log of the system huge page size. This option is useful
+ when cross compiling, or when overriding the default for systems that do
+ not explicitly support huge pages.
+
+* `--with-lg-quantum=<lg-quantum>`
+
+ Specify the base 2 log of the minimum allocation alignment. jemalloc needs
+ to know the minimum alignment that meets the following C standard
+ requirement (quoted from the April 12, 2011 draft of the C11 standard):
+
+ > The pointer returned if the allocation succeeds is suitably aligned so
+ that it may be assigned to a pointer to any type of object with a
+ fundamental alignment requirement and then used to access such an object
+ or an array of such objects in the space allocated [...]
+
+ This setting is architecture-specific, and although jemalloc includes known
+ safe values for the most commonly used modern architectures, there is a
+ wrinkle related to GNU libc (glibc) that may impact your choice of
+ <lg-quantum>. On most modern architectures, this mandates 16-byte
+ alignment (<lg-quantum>=4), but the glibc developers chose not to meet this
+ requirement for performance reasons. An old discussion can be found at
+ <https://sourceware.org/bugzilla/show_bug.cgi?id=206> . Unlike glibc,
+ jemalloc does follow the C standard by default (caveat: jemalloc
+ technically cheats for size classes smaller than the quantum), but the fact
+ that Linux systems already work around this allocator noncompliance means
+ that it is generally safe in practice to let jemalloc's minimum alignment
+ follow glibc's lead. If you specify `--with-lg-quantum=3` during
+ configuration, jemalloc will provide additional size classes that are not
+ 16-byte-aligned (24, 40, and 56).
+
+* `--with-lg-vaddr=<lg-vaddr>`
+
+ Specify the number of significant virtual address bits. By default, the
+ configure script attempts to detect virtual address size on those platforms
+ where it knows how, and picks a default otherwise. This option may be
+ useful when cross-compiling.
+
+* `--disable-initial-exec-tls`
+
+ Disable the initial-exec TLS model for jemalloc's internal thread-local
+ storage (on those platforms that support explicit settings). This can allow
+ jemalloc to be dynamically loaded after program startup (e.g. using dlopen).
+ Note that in this case, there will be two malloc implementations operating
+ in the same process, which will almost certainly result in confusing runtime
+ crashes if pointers leak from one implementation to the other.
+
+* `--disable-libdl`
+
+ Disable the usage of libdl, namely dlsym(3) which is required by the lazy
+ lock option. This can allow building static binaries.
+
+The following environment variables (not a definitive list) impact configure's
+behavior:
+
+* `CFLAGS="?"`
+* `CXXFLAGS="?"`
+
+ Pass these flags to the C/C++ compiler. Any flags set by the configure
+ script are prepended, which means explicitly set flags generally take
+ precedence. Take care when specifying flags such as -Werror, because
+ configure tests may be affected in undesirable ways.
+
+* `EXTRA_CFLAGS="?"`
+* `EXTRA_CXXFLAGS="?"`
+
+ Append these flags to CFLAGS/CXXFLAGS, without passing them to the
+ compiler(s) during configuration. This makes it possible to add flags such
+ as -Werror, while allowing the configure script to determine what other
+ flags are appropriate for the specified configuration.
+
+* `CPPFLAGS="?"`
+
+ Pass these flags to the C preprocessor. Note that CFLAGS is not passed to
+ 'cpp' when 'configure' is looking for include files, so you must use
+ CPPFLAGS instead if you need to help 'configure' find header files.
+
+* `LD_LIBRARY_PATH="?"`
+
+ 'ld' uses this colon-separated list to find libraries.
+
+* `LDFLAGS="?"`
+
+ Pass these flags when linking.
+
+* `PATH="?"`
+
+ 'configure' uses this to find programs.
+
+In some cases it may be necessary to work around configuration results that do
+not match reality. For example, Linux 4.5 added support for the MADV_FREE flag
+to madvise(2), which can cause problems if building on a host with MADV_FREE
+support and deploying to a target without. To work around this, use a cache
+file to override the relevant configuration variable defined in configure.ac,
+e.g.:
+
+ echo "je_cv_madv_free=no" > config.cache && ./configure -C
+
+
+## Advanced compilation
+
+To build only parts of jemalloc, use the following targets:
+
+ build_lib_shared
+ build_lib_static
+ build_lib
+ build_doc_html
+ build_doc_man
+ build_doc
+
+To install only parts of jemalloc, use the following targets:
+
+ install_bin
+ install_include
+ install_lib_shared
+ install_lib_static
+ install_lib_pc
+ install_lib
+ install_doc_html
+ install_doc_man
+ install_doc
+
+To clean up build results to varying degrees, use the following make targets:
+
+ clean
+ distclean
+ relclean
+
+
+## Advanced installation
+
+Optionally, define make variables when invoking make, including (not
+exclusively):
+
+* `INCLUDEDIR="?"`
+
+ Use this as the installation prefix for header files.
+
+* `LIBDIR="?"`
+
+ Use this as the installation prefix for libraries.
+
+* `MANDIR="?"`
+
+ Use this as the installation prefix for man pages.
+
+* `DESTDIR="?"`
+
+ Prepend DESTDIR to INCLUDEDIR, LIBDIR, DATADIR, and MANDIR. This is useful
+ when installing to a different path than was specified via --prefix.
+
+* `CC="?"`
+
+ Use this to invoke the C compiler.
+
+* `CFLAGS="?"`
+
+ Pass these flags to the compiler.
+
+* `CPPFLAGS="?"`
+
+ Pass these flags to the C preprocessor.
+
+* `LDFLAGS="?"`
+
+ Pass these flags when linking.
+
+* `PATH="?"`
+
+ Use this to search for programs used during configuration and building.
+
+
+## Development
+
+If you intend to make non-trivial changes to jemalloc, use the 'autogen.sh'
+script rather than 'configure'. This re-generates 'configure', enables
+configuration dependency rules, and enables re-generation of automatically
+generated source files.
+
+The build system supports using an object directory separate from the source
+tree. For example, you can create an 'obj' directory, and from within that
+directory, issue configuration and build commands:
+
+ autoconf
+ mkdir obj
+ cd obj
+ ../configure --enable-autogen
+ make
+
+
+## Documentation
+
+The manual page is generated in both html and roff formats. Any web browser
+can be used to view the html manual. The roff manual page can be formatted
+prior to installation via the following command:
+
+ nroff -man -t doc/jemalloc.3
+>>>>>>> main
diff --git a/contrib/jemalloc/Makefile.in b/contrib/jemalloc/Makefile.in
new file mode 100644
index 000000000000..121297702bcc
--- /dev/null
+++ b/contrib/jemalloc/Makefile.in
@@ -0,0 +1,1389 @@
+<<<<<<< HEAD
+# Clear out all vpaths, then set just one (default vpath) for the main build
+# directory.
+vpath
+vpath % .
+
+# Clear the default suffixes, so that built-in rules are not used.
+.SUFFIXES :
+
+SHELL := /bin/sh
+
+CC := @CC@
+CXX := @CXX@
+
+# Configuration parameters.
+DESTDIR =
+BINDIR := $(DESTDIR)@BINDIR@
+INCLUDEDIR := $(DESTDIR)@INCLUDEDIR@
+LIBDIR := $(DESTDIR)@LIBDIR@
+DATADIR := $(DESTDIR)@DATADIR@
+MANDIR := $(DESTDIR)@MANDIR@
+srcroot := @srcroot@
+objroot := @objroot@
+abs_srcroot := @abs_srcroot@
+abs_objroot := @abs_objroot@
+
+# Build parameters.
+CPPFLAGS := @CPPFLAGS@ -I$(objroot)include -I$(srcroot)include
+CONFIGURE_CFLAGS := @CONFIGURE_CFLAGS@
+SPECIFIED_CFLAGS := @SPECIFIED_CFLAGS@
+EXTRA_CFLAGS := @EXTRA_CFLAGS@
+CFLAGS := $(strip $(CONFIGURE_CFLAGS) $(SPECIFIED_CFLAGS) $(EXTRA_CFLAGS))
+CONFIGURE_CXXFLAGS := @CONFIGURE_CXXFLAGS@
+SPECIFIED_CXXFLAGS := @SPECIFIED_CXXFLAGS@
+EXTRA_CXXFLAGS := @EXTRA_CXXFLAGS@
+CXXFLAGS := $(strip $(CONFIGURE_CXXFLAGS) $(SPECIFIED_CXXFLAGS) $(EXTRA_CXXFLAGS))
+LDFLAGS := @LDFLAGS@
+EXTRA_LDFLAGS := @EXTRA_LDFLAGS@
+LIBS := @LIBS@
+RPATH_EXTRA := @RPATH_EXTRA@
+SO := @so@
+IMPORTLIB := @importlib@
+O := @o@
+A := @a@
+EXE := @exe@
+LIBPREFIX := @libprefix@
+REV := @rev@
+install_suffix := @install_suffix@
+ABI := @abi@
+XSLTPROC := @XSLTPROC@
+XSLROOT := @XSLROOT@
+AUTOCONF := @AUTOCONF@
+_RPATH = @RPATH@
+RPATH = $(if $(1),$(call _RPATH,$(1)))
+cfghdrs_in := $(addprefix $(srcroot),@cfghdrs_in@)
+cfghdrs_out := @cfghdrs_out@
+cfgoutputs_in := $(addprefix $(srcroot),@cfgoutputs_in@)
+cfgoutputs_out := @cfgoutputs_out@
+enable_autogen := @enable_autogen@
+enable_doc := @enable_doc@
+enable_shared := @enable_shared@
+enable_static := @enable_static@
+enable_prof := @enable_prof@
+enable_zone_allocator := @enable_zone_allocator@
+enable_experimental_smallocx := @enable_experimental_smallocx@
+MALLOC_CONF := @JEMALLOC_CPREFIX@MALLOC_CONF
+link_whole_archive := @link_whole_archive@
+DSO_LDFLAGS = @DSO_LDFLAGS@
+SOREV = @SOREV@
+PIC_CFLAGS = @PIC_CFLAGS@
+CTARGET = @CTARGET@
+LDTARGET = @LDTARGET@
+TEST_LD_MODE = @TEST_LD_MODE@
+MKLIB = @MKLIB@
+AR = @AR@
+ARFLAGS = @ARFLAGS@
+DUMP_SYMS = @DUMP_SYMS@
+AWK := @AWK@
+CC_MM = @CC_MM@
+LM := @LM@
+INSTALL = @INSTALL@
+
+ifeq (macho, $(ABI))
+TEST_LIBRARY_PATH := DYLD_FALLBACK_LIBRARY_PATH="$(objroot)lib"
+else
+ifeq (pecoff, $(ABI))
+TEST_LIBRARY_PATH := PATH="$(PATH):$(objroot)lib"
+else
+TEST_LIBRARY_PATH :=
+endif
+endif
+
+LIBJEMALLOC := $(LIBPREFIX)jemalloc$(install_suffix)
+
+# Lists of files.
+BINS := $(objroot)bin/jemalloc-config $(objroot)bin/jemalloc.sh $(objroot)bin/jeprof
+C_HDRS := $(objroot)include/jemalloc/jemalloc$(install_suffix).h
+C_SRCS := $(srcroot)src/jemalloc.c \
+ $(srcroot)src/arena.c \
+ $(srcroot)src/background_thread.c \
+ $(srcroot)src/base.c \
+ $(srcroot)src/bin.c \
+ $(srcroot)src/bin_info.c \
+ $(srcroot)src/bitmap.c \
+ $(srcroot)src/buf_writer.c \
+ $(srcroot)src/cache_bin.c \
+ $(srcroot)src/ckh.c \
+ $(srcroot)src/counter.c \
+ $(srcroot)src/ctl.c \
+ $(srcroot)src/decay.c \
+ $(srcroot)src/div.c \
+ $(srcroot)src/ecache.c \
+ $(srcroot)src/edata.c \
+ $(srcroot)src/edata_cache.c \
+ $(srcroot)src/ehooks.c \
+ $(srcroot)src/emap.c \
+ $(srcroot)src/eset.c \
+ $(srcroot)src/exp_grow.c \
+ $(srcroot)src/extent.c \
+ $(srcroot)src/extent_dss.c \
+ $(srcroot)src/extent_mmap.c \
+ $(srcroot)src/fxp.c \
+ $(srcroot)src/san.c \
+ $(srcroot)src/san_bump.c \
+ $(srcroot)src/hook.c \
+ $(srcroot)src/hpa.c \
+ $(srcroot)src/hpa_hooks.c \
+ $(srcroot)src/hpdata.c \
+ $(srcroot)src/inspect.c \
+ $(srcroot)src/large.c \
+ $(srcroot)src/log.c \
+ $(srcroot)src/malloc_io.c \
+ $(srcroot)src/mutex.c \
+ $(srcroot)src/nstime.c \
+ $(srcroot)src/pa.c \
+ $(srcroot)src/pa_extra.c \
+ $(srcroot)src/pai.c \
+ $(srcroot)src/pac.c \
+ $(srcroot)src/pages.c \
+ $(srcroot)src/peak_event.c \
+ $(srcroot)src/prof.c \
+ $(srcroot)src/prof_data.c \
+ $(srcroot)src/prof_log.c \
+ $(srcroot)src/prof_recent.c \
+ $(srcroot)src/prof_stats.c \
+ $(srcroot)src/prof_sys.c \
+ $(srcroot)src/psset.c \
+ $(srcroot)src/rtree.c \
+ $(srcroot)src/safety_check.c \
+ $(srcroot)src/sc.c \
+ $(srcroot)src/sec.c \
+ $(srcroot)src/stats.c \
+ $(srcroot)src/sz.c \
+ $(srcroot)src/tcache.c \
+ $(srcroot)src/test_hooks.c \
+ $(srcroot)src/thread_event.c \
+ $(srcroot)src/ticker.c \
+ $(srcroot)src/tsd.c \
+ $(srcroot)src/witness.c
+ifeq ($(enable_zone_allocator), 1)
+C_SRCS += $(srcroot)src/zone.c
+endif
+ifeq ($(IMPORTLIB),$(SO))
+STATIC_LIBS := $(objroot)lib/$(LIBJEMALLOC).$(A)
+endif
+ifdef PIC_CFLAGS
+STATIC_LIBS += $(objroot)lib/$(LIBJEMALLOC)_pic.$(A)
+else
+STATIC_LIBS += $(objroot)lib/$(LIBJEMALLOC)_s.$(A)
+endif
+DSOS := $(objroot)lib/$(LIBJEMALLOC).$(SOREV)
+ifneq ($(SOREV),$(SO))
+DSOS += $(objroot)lib/$(LIBJEMALLOC).$(SO)
+endif
+ifeq (1, $(link_whole_archive))
+LJEMALLOC := -Wl,--whole-archive -L$(objroot)lib -l$(LIBJEMALLOC) -Wl,--no-whole-archive
+else
+LJEMALLOC := $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
+endif
+PC := $(objroot)jemalloc.pc
+DOCS_XML := $(objroot)doc/jemalloc$(install_suffix).xml
+DOCS_HTML := $(DOCS_XML:$(objroot)%.xml=$(objroot)%.html)
+DOCS_MAN3 := $(DOCS_XML:$(objroot)%.xml=$(objroot)%.3)
+DOCS := $(DOCS_HTML) $(DOCS_MAN3)
+C_TESTLIB_SRCS := $(srcroot)test/src/btalloc.c $(srcroot)test/src/btalloc_0.c \
+ $(srcroot)test/src/btalloc_1.c $(srcroot)test/src/math.c \
+ $(srcroot)test/src/mtx.c $(srcroot)test/src/sleep.c \
+ $(srcroot)test/src/SFMT.c $(srcroot)test/src/test.c \
+ $(srcroot)test/src/thd.c $(srcroot)test/src/timer.c
+ifeq (1, $(link_whole_archive))
+C_UTIL_INTEGRATION_SRCS :=
+C_UTIL_CPP_SRCS :=
+else
+C_UTIL_INTEGRATION_SRCS := $(srcroot)src/nstime.c $(srcroot)src/malloc_io.c \
+ $(srcroot)src/ticker.c
+C_UTIL_CPP_SRCS := $(srcroot)src/nstime.c $(srcroot)src/malloc_io.c
+endif
+TESTS_UNIT := \
+ $(srcroot)test/unit/a0.c \
+ $(srcroot)test/unit/arena_decay.c \
+ $(srcroot)test/unit/arena_reset.c \
+ $(srcroot)test/unit/atomic.c \
+ $(srcroot)test/unit/background_thread.c \
+ $(srcroot)test/unit/background_thread_enable.c \
+ $(srcroot)test/unit/base.c \
+ $(srcroot)test/unit/batch_alloc.c \
+ $(srcroot)test/unit/binshard.c \
+ $(srcroot)test/unit/bitmap.c \
+ $(srcroot)test/unit/bit_util.c \
+ $(srcroot)test/unit/buf_writer.c \
+ $(srcroot)test/unit/cache_bin.c \
+ $(srcroot)test/unit/ckh.c \
+ $(srcroot)test/unit/counter.c \
+ $(srcroot)test/unit/decay.c \
+ $(srcroot)test/unit/div.c \
+ $(srcroot)test/unit/double_free.c \
+ $(srcroot)test/unit/edata_cache.c \
+ $(srcroot)test/unit/emitter.c \
+ $(srcroot)test/unit/extent_quantize.c \
+ ${srcroot}test/unit/fb.c \
+ $(srcroot)test/unit/fork.c \
+ ${srcroot}test/unit/fxp.c \
+ ${srcroot}test/unit/san.c \
+ ${srcroot}test/unit/san_bump.c \
+ $(srcroot)test/unit/hash.c \
+ $(srcroot)test/unit/hook.c \
+ $(srcroot)test/unit/hpa.c \
+ $(srcroot)test/unit/hpa_background_thread.c \
+ $(srcroot)test/unit/hpdata.c \
+ $(srcroot)test/unit/huge.c \
+ $(srcroot)test/unit/inspect.c \
+ $(srcroot)test/unit/junk.c \
+ $(srcroot)test/unit/junk_alloc.c \
+ $(srcroot)test/unit/junk_free.c \
+ $(srcroot)test/unit/log.c \
+ $(srcroot)test/unit/mallctl.c \
+ $(srcroot)test/unit/malloc_conf_2.c \
+ $(srcroot)test/unit/malloc_io.c \
+ $(srcroot)test/unit/math.c \
+ $(srcroot)test/unit/mpsc_queue.c \
+ $(srcroot)test/unit/mq.c \
+ $(srcroot)test/unit/mtx.c \
+ $(srcroot)test/unit/nstime.c \
+ $(srcroot)test/unit/oversize_threshold.c \
+ $(srcroot)test/unit/pa.c \
+ $(srcroot)test/unit/pack.c \
+ $(srcroot)test/unit/pages.c \
+ $(srcroot)test/unit/peak.c \
+ $(srcroot)test/unit/ph.c \
+ $(srcroot)test/unit/prng.c \
+ $(srcroot)test/unit/prof_accum.c \
+ $(srcroot)test/unit/prof_active.c \
+ $(srcroot)test/unit/prof_gdump.c \
+ $(srcroot)test/unit/prof_hook.c \
+ $(srcroot)test/unit/prof_idump.c \
+ $(srcroot)test/unit/prof_log.c \
+ $(srcroot)test/unit/prof_mdump.c \
+ $(srcroot)test/unit/prof_recent.c \
+ $(srcroot)test/unit/prof_reset.c \
+ $(srcroot)test/unit/prof_stats.c \
+ $(srcroot)test/unit/prof_tctx.c \
+ $(srcroot)test/unit/prof_thread_name.c \
+ $(srcroot)test/unit/prof_sys_thread_name.c \
+ $(srcroot)test/unit/psset.c \
+ $(srcroot)test/unit/ql.c \
+ $(srcroot)test/unit/qr.c \
+ $(srcroot)test/unit/rb.c \
+ $(srcroot)test/unit/retained.c \
+ $(srcroot)test/unit/rtree.c \
+ $(srcroot)test/unit/safety_check.c \
+ $(srcroot)test/unit/sc.c \
+ $(srcroot)test/unit/sec.c \
+ $(srcroot)test/unit/seq.c \
+ $(srcroot)test/unit/SFMT.c \
+ $(srcroot)test/unit/size_check.c \
+ $(srcroot)test/unit/size_classes.c \
+ $(srcroot)test/unit/slab.c \
+ $(srcroot)test/unit/smoothstep.c \
+ $(srcroot)test/unit/spin.c \
+ $(srcroot)test/unit/stats.c \
+ $(srcroot)test/unit/stats_print.c \
+ $(srcroot)test/unit/sz.c \
+ $(srcroot)test/unit/tcache_max.c \
+ $(srcroot)test/unit/test_hooks.c \
+ $(srcroot)test/unit/thread_event.c \
+ $(srcroot)test/unit/ticker.c \
+ $(srcroot)test/unit/tsd.c \
+ $(srcroot)test/unit/uaf.c \
+ $(srcroot)test/unit/witness.c \
+ $(srcroot)test/unit/zero.c \
+ $(srcroot)test/unit/zero_realloc_abort.c \
+ $(srcroot)test/unit/zero_realloc_free.c \
+ $(srcroot)test/unit/zero_realloc_alloc.c \
+ $(srcroot)test/unit/zero_reallocs.c
+ifeq (@enable_prof@, 1)
+TESTS_UNIT += \
+ $(srcroot)test/unit/arena_reset_prof.c \
+ $(srcroot)test/unit/batch_alloc_prof.c
+endif
+TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \
+ $(srcroot)test/integration/allocated.c \
+ $(srcroot)test/integration/extent.c \
+ $(srcroot)test/integration/malloc.c \
+ $(srcroot)test/integration/mallocx.c \
+ $(srcroot)test/integration/MALLOCX_ARENA.c \
+ $(srcroot)test/integration/overflow.c \
+ $(srcroot)test/integration/posix_memalign.c \
+ $(srcroot)test/integration/rallocx.c \
+ $(srcroot)test/integration/sdallocx.c \
+ $(srcroot)test/integration/slab_sizes.c \
+ $(srcroot)test/integration/thread_arena.c \
+ $(srcroot)test/integration/thread_tcache_enabled.c \
+ $(srcroot)test/integration/xallocx.c
+ifeq (@enable_experimental_smallocx@, 1)
+TESTS_INTEGRATION += \
+ $(srcroot)test/integration/smallocx.c
+endif
+ifeq (@enable_cxx@, 1)
+CPP_SRCS := $(srcroot)src/jemalloc_cpp.cpp
+TESTS_INTEGRATION_CPP := $(srcroot)test/integration/cpp/basic.cpp \
+ $(srcroot)test/integration/cpp/infallible_new_true.cpp \
+ $(srcroot)test/integration/cpp/infallible_new_false.cpp
+else
+CPP_SRCS :=
+TESTS_INTEGRATION_CPP :=
+endif
+TESTS_ANALYZE := $(srcroot)test/analyze/prof_bias.c \
+ $(srcroot)test/analyze/rand.c \
+ $(srcroot)test/analyze/sizes.c
+TESTS_STRESS := $(srcroot)test/stress/batch_alloc.c \
+ $(srcroot)test/stress/fill_flush.c \
+ $(srcroot)test/stress/hookbench.c \
+ $(srcroot)test/stress/large_microbench.c \
+ $(srcroot)test/stress/mallctl.c \
+ $(srcroot)test/stress/microbench.c
+
+
+TESTS := $(TESTS_UNIT) $(TESTS_INTEGRATION) $(TESTS_INTEGRATION_CPP) \
+ $(TESTS_ANALYZE) $(TESTS_STRESS)
+
+PRIVATE_NAMESPACE_HDRS := $(objroot)include/jemalloc/internal/private_namespace.h $(objroot)include/jemalloc/internal/private_namespace_jet.h
+PRIVATE_NAMESPACE_GEN_HDRS := $(PRIVATE_NAMESPACE_HDRS:%.h=%.gen.h)
+C_SYM_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.sym.$(O))
+C_SYMS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.sym)
+C_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.$(O))
+CPP_OBJS := $(CPP_SRCS:$(srcroot)%.cpp=$(objroot)%.$(O))
+C_PIC_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.pic.$(O))
+CPP_PIC_OBJS := $(CPP_SRCS:$(srcroot)%.cpp=$(objroot)%.pic.$(O))
+C_JET_SYM_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.sym.$(O))
+C_JET_SYMS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.sym)
+C_JET_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.$(O))
+C_TESTLIB_UNIT_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.unit.$(O))
+C_TESTLIB_INTEGRATION_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.integration.$(O))
+C_UTIL_INTEGRATION_OBJS := $(C_UTIL_INTEGRATION_SRCS:$(srcroot)%.c=$(objroot)%.integration.$(O))
+C_TESTLIB_ANALYZE_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.analyze.$(O))
+C_TESTLIB_STRESS_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.stress.$(O))
+C_TESTLIB_OBJS := $(C_TESTLIB_UNIT_OBJS) $(C_TESTLIB_INTEGRATION_OBJS) \
+ $(C_UTIL_INTEGRATION_OBJS) $(C_TESTLIB_ANALYZE_OBJS) \
+ $(C_TESTLIB_STRESS_OBJS)
+
+TESTS_UNIT_OBJS := $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%.$(O))
+TESTS_INTEGRATION_OBJS := $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%.$(O))
+TESTS_INTEGRATION_CPP_OBJS := $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%.$(O))
+TESTS_ANALYZE_OBJS := $(TESTS_ANALYZE:$(srcroot)%.c=$(objroot)%.$(O))
+TESTS_STRESS_OBJS := $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%.$(O))
+TESTS_OBJS := $(TESTS_UNIT_OBJS) $(TESTS_INTEGRATION_OBJS) $(TESTS_ANALYZE_OBJS) \
+ $(TESTS_STRESS_OBJS)
+TESTS_CPP_OBJS := $(TESTS_INTEGRATION_CPP_OBJS)
+
+.PHONY: all dist build_doc_html build_doc_man build_doc
+.PHONY: install_bin install_include install_lib
+.PHONY: install_doc_html install_doc_man install_doc install
+.PHONY: tests check clean distclean relclean
+
+.SECONDARY : $(PRIVATE_NAMESPACE_GEN_HDRS) $(TESTS_OBJS) $(TESTS_CPP_OBJS)
+
+# Default target.
+all: build_lib
+
+dist: build_doc
+
+$(objroot)doc/%$(install_suffix).html : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/html.xsl
+ifneq ($(XSLROOT),)
+ $(XSLTPROC) -o $@ $(objroot)doc/html.xsl $<
+else
+ifeq ($(wildcard $(DOCS_HTML)),)
+ @echo "<p>Missing xsltproc. Doc not built.</p>" > $@
+endif
+ @echo "Missing xsltproc. "$@" not (re)built."
+endif
+
+$(objroot)doc/%$(install_suffix).3 : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/manpages.xsl
+ifneq ($(XSLROOT),)
+ $(XSLTPROC) -o $@ $(objroot)doc/manpages.xsl $<
+# The -o option (output filename) of xsltproc may not work (it uses the
+# <refname> in the .xml file). Manually add the suffix if so.
+ ifneq ($(install_suffix),)
+ @if [ -f $(objroot)doc/jemalloc.3 ]; then \
+ mv $(objroot)doc/jemalloc.3 $(objroot)doc/jemalloc$(install_suffix).3 ; \
+ fi
+ endif
+else
+ifeq ($(wildcard $(DOCS_MAN3)),)
+ @echo "Missing xsltproc. Doc not built." > $@
+endif
+ @echo "Missing xsltproc. "$@" not (re)built."
+endif
+
+build_doc_html: $(DOCS_HTML)
+build_doc_man: $(DOCS_MAN3)
+build_doc: $(DOCS)
+
+#
+# Include generated dependency files.
+#
+ifdef CC_MM
+-include $(C_SYM_OBJS:%.$(O)=%.d)
+-include $(C_OBJS:%.$(O)=%.d)
+-include $(CPP_OBJS:%.$(O)=%.d)
+-include $(C_PIC_OBJS:%.$(O)=%.d)
+-include $(CPP_PIC_OBJS:%.$(O)=%.d)
+-include $(C_JET_SYM_OBJS:%.$(O)=%.d)
+-include $(C_JET_OBJS:%.$(O)=%.d)
+-include $(C_TESTLIB_OBJS:%.$(O)=%.d)
+-include $(TESTS_OBJS:%.$(O)=%.d)
+-include $(TESTS_CPP_OBJS:%.$(O)=%.d)
+endif
+
+$(C_SYM_OBJS): $(objroot)src/%.sym.$(O): $(srcroot)src/%.c
+$(C_SYM_OBJS): CPPFLAGS += -DJEMALLOC_NO_PRIVATE_NAMESPACE
+$(C_SYMS): $(objroot)src/%.sym: $(objroot)src/%.sym.$(O)
+$(C_OBJS): $(objroot)src/%.$(O): $(srcroot)src/%.c
+$(CPP_OBJS): $(objroot)src/%.$(O): $(srcroot)src/%.cpp
+$(C_PIC_OBJS): $(objroot)src/%.pic.$(O): $(srcroot)src/%.c
+$(C_PIC_OBJS): CFLAGS += $(PIC_CFLAGS)
+$(CPP_PIC_OBJS): $(objroot)src/%.pic.$(O): $(srcroot)src/%.cpp
+$(CPP_PIC_OBJS): CXXFLAGS += $(PIC_CFLAGS)
+$(C_JET_SYM_OBJS): $(objroot)src/%.jet.sym.$(O): $(srcroot)src/%.c
+$(C_JET_SYM_OBJS): CPPFLAGS += -DJEMALLOC_JET -DJEMALLOC_NO_PRIVATE_NAMESPACE
+$(C_JET_SYMS): $(objroot)src/%.jet.sym: $(objroot)src/%.jet.sym.$(O)
+$(C_JET_OBJS): $(objroot)src/%.jet.$(O): $(srcroot)src/%.c
+$(C_JET_OBJS): CPPFLAGS += -DJEMALLOC_JET
+$(C_TESTLIB_UNIT_OBJS): $(objroot)test/src/%.unit.$(O): $(srcroot)test/src/%.c
+$(C_TESTLIB_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST
+$(C_TESTLIB_INTEGRATION_OBJS): $(objroot)test/src/%.integration.$(O): $(srcroot)test/src/%.c
+$(C_TESTLIB_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST
+$(C_UTIL_INTEGRATION_OBJS): $(objroot)src/%.integration.$(O): $(srcroot)src/%.c
+$(C_TESTLIB_ANALYZE_OBJS): $(objroot)test/src/%.analyze.$(O): $(srcroot)test/src/%.c
+$(C_TESTLIB_ANALYZE_OBJS): CPPFLAGS += -DJEMALLOC_ANALYZE_TEST
+$(C_TESTLIB_STRESS_OBJS): $(objroot)test/src/%.stress.$(O): $(srcroot)test/src/%.c
+$(C_TESTLIB_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST -DJEMALLOC_STRESS_TESTLIB
+$(C_TESTLIB_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include
+$(TESTS_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST
+$(TESTS_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST
+$(TESTS_INTEGRATION_CPP_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_CPP_TEST
+$(TESTS_ANALYZE_OBJS): CPPFLAGS += -DJEMALLOC_ANALYZE_TEST
+$(TESTS_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST
+$(TESTS_OBJS): $(objroot)test/%.$(O): $(srcroot)test/%.c
+$(TESTS_CPP_OBJS): $(objroot)test/%.$(O): $(srcroot)test/%.cpp
+$(TESTS_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include
+$(TESTS_CPP_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include
+ifneq ($(IMPORTLIB),$(SO))
+$(CPP_OBJS) $(C_SYM_OBJS) $(C_OBJS) $(C_JET_SYM_OBJS) $(C_JET_OBJS): CPPFLAGS += -DDLLEXPORT
+endif
+
+# Dependencies.
+ifndef CC_MM
+HEADER_DIRS = $(srcroot)include/jemalloc/internal \
+ $(objroot)include/jemalloc $(objroot)include/jemalloc/internal
+HEADERS = $(filter-out $(PRIVATE_NAMESPACE_HDRS),$(wildcard $(foreach dir,$(HEADER_DIRS),$(dir)/*.h)))
+$(C_SYM_OBJS) $(C_OBJS) $(CPP_OBJS) $(C_PIC_OBJS) $(CPP_PIC_OBJS) $(C_JET_SYM_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS) $(TESTS_CPP_OBJS): $(HEADERS)
+$(TESTS_OBJS) $(TESTS_CPP_OBJS): $(objroot)test/include/test/jemalloc_test.h
+endif
+
+$(C_OBJS) $(CPP_OBJS) $(C_PIC_OBJS) $(CPP_PIC_OBJS) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(TESTS_INTEGRATION_OBJS) $(TESTS_INTEGRATION_CPP_OBJS): $(objroot)include/jemalloc/internal/private_namespace.h
+$(C_JET_OBJS) $(C_TESTLIB_UNIT_OBJS) $(C_TESTLIB_ANALYZE_OBJS) $(C_TESTLIB_STRESS_OBJS) $(TESTS_UNIT_OBJS) $(TESTS_ANALYZE_OBJS) $(TESTS_STRESS_OBJS): $(objroot)include/jemalloc/internal/private_namespace_jet.h
+
+$(C_SYM_OBJS) $(C_OBJS) $(C_PIC_OBJS) $(C_JET_SYM_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS): %.$(O):
+ @mkdir -p $(@D)
+ $(CC) $(CFLAGS) -c $(CPPFLAGS) $(CTARGET) $<
+ifdef CC_MM
+ @$(CC) -MM $(CPPFLAGS) -MT $@ -o $(@:%.$(O)=%.d) $<
+endif
+
+$(C_SYMS): %.sym:
+ @mkdir -p $(@D)
+ $(DUMP_SYMS) $< | $(AWK) -f $(objroot)include/jemalloc/internal/private_symbols.awk > $@
+
+$(C_JET_SYMS): %.sym:
+ @mkdir -p $(@D)
+ $(DUMP_SYMS) $< | $(AWK) -f $(objroot)include/jemalloc/internal/private_symbols_jet.awk > $@
+
+$(objroot)include/jemalloc/internal/private_namespace.gen.h: $(C_SYMS)
+ $(SHELL) $(srcroot)include/jemalloc/internal/private_namespace.sh $^ > $@
+
+$(objroot)include/jemalloc/internal/private_namespace_jet.gen.h: $(C_JET_SYMS)
+ $(SHELL) $(srcroot)include/jemalloc/internal/private_namespace.sh $^ > $@
+
+%.h: %.gen.h
+ @if ! `cmp -s $< $@` ; then echo "cp $< $@"; cp $< $@ ; fi
+
+$(CPP_OBJS) $(CPP_PIC_OBJS) $(TESTS_CPP_OBJS): %.$(O):
+ @mkdir -p $(@D)
+ $(CXX) $(CXXFLAGS) -c $(CPPFLAGS) $(CTARGET) $<
+ifdef CC_MM
+ @$(CXX) -MM $(CPPFLAGS) -MT $@ -o $(@:%.$(O)=%.d) $<
+endif
+
+ifneq ($(SOREV),$(SO))
+%.$(SO) : %.$(SOREV)
+ @mkdir -p $(@D)
+ ln -sf $(<F) $@
+endif
+
+$(objroot)lib/$(LIBJEMALLOC).$(SOREV) : $(if $(PIC_CFLAGS),$(C_PIC_OBJS),$(C_OBJS)) $(if $(PIC_CFLAGS),$(CPP_PIC_OBJS),$(CPP_OBJS))
+ @mkdir -p $(@D)
+ $(CC) $(DSO_LDFLAGS) $(call RPATH,$(RPATH_EXTRA)) $(LDTARGET) $+ $(LDFLAGS) $(LIBS) $(EXTRA_LDFLAGS)
+
+$(objroot)lib/$(LIBJEMALLOC)_pic.$(A) : $(C_PIC_OBJS) $(CPP_PIC_OBJS)
+$(objroot)lib/$(LIBJEMALLOC).$(A) : $(C_OBJS) $(CPP_OBJS)
+$(objroot)lib/$(LIBJEMALLOC)_s.$(A) : $(C_OBJS) $(CPP_OBJS)
+
+$(STATIC_LIBS):
+ @mkdir -p $(@D)
+ $(AR) $(ARFLAGS)@AROUT@ $+
+
+$(objroot)test/unit/%$(EXE): $(objroot)test/unit/%.$(O) $(C_JET_OBJS) $(C_TESTLIB_UNIT_OBJS)
+ @mkdir -p $(@D)
+ $(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LDFLAGS) $(filter-out -lm,$(LIBS)) $(LM) $(EXTRA_LDFLAGS)
+
+$(objroot)test/integration/%$(EXE): $(objroot)test/integration/%.$(O) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
+ @mkdir -p $(@D)
+ $(CC) $(TEST_LD_MODE) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LJEMALLOC) $(LDFLAGS) $(filter-out -lm,$(filter -lrt -pthread -lstdc++,$(LIBS))) $(LM) $(EXTRA_LDFLAGS)
+
+$(objroot)test/integration/cpp/%$(EXE): $(objroot)test/integration/cpp/%.$(O) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
+ @mkdir -p $(@D)
+ $(CXX) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(LIBS)) -lm $(EXTRA_LDFLAGS)
+
+$(objroot)test/analyze/%$(EXE): $(objroot)test/analyze/%.$(O) $(C_JET_OBJS) $(C_TESTLIB_ANALYZE_OBJS)
+ @mkdir -p $(@D)
+ $(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LDFLAGS) $(filter-out -lm,$(LIBS)) $(LM) $(EXTRA_LDFLAGS)
+
+$(objroot)test/stress/%$(EXE): $(objroot)test/stress/%.$(O) $(C_JET_OBJS) $(C_TESTLIB_STRESS_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
+ @mkdir -p $(@D)
+ $(CC) $(TEST_LD_MODE) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(LIBS)) $(LM) $(EXTRA_LDFLAGS)
+
+build_lib_shared: $(DSOS)
+build_lib_static: $(STATIC_LIBS)
+ifeq ($(enable_shared), 1)
+build_lib: build_lib_shared
+endif
+ifeq ($(enable_static), 1)
+build_lib: build_lib_static
+endif
+
+install_bin:
+ $(INSTALL) -d $(BINDIR)
+ @for b in $(BINS); do \
+ $(INSTALL) -v -m 755 $$b $(BINDIR); \
+done
+
+install_include:
+ $(INSTALL) -d $(INCLUDEDIR)/jemalloc
+ @for h in $(C_HDRS); do \
+ $(INSTALL) -v -m 644 $$h $(INCLUDEDIR)/jemalloc; \
+done
+
+install_lib_shared: $(DSOS)
+ $(INSTALL) -d $(LIBDIR)
+ $(INSTALL) -v -m 755 $(objroot)lib/$(LIBJEMALLOC).$(SOREV) $(LIBDIR)
+ifneq ($(SOREV),$(SO))
+ ln -sf $(LIBJEMALLOC).$(SOREV) $(LIBDIR)/$(LIBJEMALLOC).$(SO)
+endif
+
+install_lib_static: $(STATIC_LIBS)
+ $(INSTALL) -d $(LIBDIR)
+ @for l in $(STATIC_LIBS); do \
+ $(INSTALL) -v -m 755 $$l $(LIBDIR); \
+done
+
+install_lib_pc: $(PC)
+ $(INSTALL) -d $(LIBDIR)/pkgconfig
+ @for l in $(PC); do \
+ $(INSTALL) -v -m 644 $$l $(LIBDIR)/pkgconfig; \
+done
+
+ifeq ($(enable_shared), 1)
+install_lib: install_lib_shared
+endif
+ifeq ($(enable_static), 1)
+install_lib: install_lib_static
+endif
+install_lib: install_lib_pc
+
+install_doc_html: build_doc_html
+ $(INSTALL) -d $(DATADIR)/doc/jemalloc$(install_suffix)
+ @for d in $(DOCS_HTML); do \
+ $(INSTALL) -v -m 644 $$d $(DATADIR)/doc/jemalloc$(install_suffix); \
+done
+
+install_doc_man: build_doc_man
+ $(INSTALL) -d $(MANDIR)/man3
+ @for d in $(DOCS_MAN3); do \
+ $(INSTALL) -v -m 644 $$d $(MANDIR)/man3; \
+done
+
+install_doc: install_doc_html install_doc_man
+
+install: install_bin install_include install_lib
+
+ifeq ($(enable_doc), 1)
+install: install_doc
+endif
+
+uninstall_bin:
+ $(RM) -v $(foreach b,$(notdir $(BINS)),$(BINDIR)/$(b))
+
+uninstall_include:
+ $(RM) -v $(foreach h,$(notdir $(C_HDRS)),$(INCLUDEDIR)/jemalloc/$(h))
+ rmdir -v $(INCLUDEDIR)/jemalloc
+
+uninstall_lib_shared:
+ $(RM) -v $(LIBDIR)/$(LIBJEMALLOC).$(SOREV)
+ifneq ($(SOREV),$(SO))
+ $(RM) -v $(LIBDIR)/$(LIBJEMALLOC).$(SO)
+endif
+
+uninstall_lib_static:
+ $(RM) -v $(foreach l,$(notdir $(STATIC_LIBS)),$(LIBDIR)/$(l))
+
+uninstall_lib_pc:
+ $(RM) -v $(foreach p,$(notdir $(PC)),$(LIBDIR)/pkgconfig/$(p))
+
+ifeq ($(enable_shared), 1)
+uninstall_lib: uninstall_lib_shared
+endif
+ifeq ($(enable_static), 1)
+uninstall_lib: uninstall_lib_static
+endif
+uninstall_lib: uninstall_lib_pc
+
+uninstall_doc_html:
+ $(RM) -v $(foreach d,$(notdir $(DOCS_HTML)),$(DATADIR)/doc/jemalloc$(install_suffix)/$(d))
+ rmdir -v $(DATADIR)/doc/jemalloc$(install_suffix)
+
+uninstall_doc_man:
+ $(RM) -v $(foreach d,$(notdir $(DOCS_MAN3)),$(MANDIR)/man3/$(d))
+
+uninstall_doc: uninstall_doc_html uninstall_doc_man
+
+uninstall: uninstall_bin uninstall_include uninstall_lib
+
+ifeq ($(enable_doc), 1)
+uninstall: uninstall_doc
+endif
+
+tests_unit: $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%$(EXE))
+tests_integration: $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%$(EXE)) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%$(EXE))
+tests_analyze: $(TESTS_ANALYZE:$(srcroot)%.c=$(objroot)%$(EXE))
+tests_stress: $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%$(EXE))
+tests: tests_unit tests_integration tests_analyze tests_stress
+
+check_unit_dir:
+ @mkdir -p $(objroot)test/unit
+check_integration_dir:
+ @mkdir -p $(objroot)test/integration
+analyze_dir:
+ @mkdir -p $(objroot)test/analyze
+stress_dir:
+ @mkdir -p $(objroot)test/stress
+check_dir: check_unit_dir check_integration_dir
+
+check_unit: tests_unit check_unit_dir
+ $(SHELL) $(objroot)test/test.sh $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%)
+check_integration_prof: tests_integration check_integration_dir
+ifeq ($(enable_prof), 1)
+ $(MALLOC_CONF)="prof:true" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
+ $(MALLOC_CONF)="prof:true,prof_active:false" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
+endif
+check_integration_decay: tests_integration check_integration_dir
+ $(MALLOC_CONF)="dirty_decay_ms:-1,muzzy_decay_ms:-1" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
+ $(MALLOC_CONF)="dirty_decay_ms:0,muzzy_decay_ms:0" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
+check_integration: tests_integration check_integration_dir
+ $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
+analyze: tests_analyze analyze_dir
+ifeq ($(enable_prof), 1)
+ $(MALLOC_CONF)="prof:true" $(SHELL) $(objroot)test/test.sh $(TESTS_ANALYZE:$(srcroot)%.c=$(objroot)%)
+else
+ $(SHELL) $(objroot)test/test.sh $(TESTS_ANALYZE:$(srcroot)%.c=$(objroot)%)
+endif
+stress: tests_stress stress_dir
+ $(SHELL) $(objroot)test/test.sh $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%)
+check: check_unit check_integration check_integration_decay check_integration_prof
+
+clean:
+ rm -f $(PRIVATE_NAMESPACE_HDRS)
+ rm -f $(PRIVATE_NAMESPACE_GEN_HDRS)
+ rm -f $(C_SYM_OBJS)
+ rm -f $(C_SYMS)
+ rm -f $(C_OBJS)
+ rm -f $(CPP_OBJS)
+ rm -f $(C_PIC_OBJS)
+ rm -f $(CPP_PIC_OBJS)
+ rm -f $(C_JET_SYM_OBJS)
+ rm -f $(C_JET_SYMS)
+ rm -f $(C_JET_OBJS)
+ rm -f $(C_TESTLIB_OBJS)
+ rm -f $(C_SYM_OBJS:%.$(O)=%.d)
+ rm -f $(C_OBJS:%.$(O)=%.d)
+ rm -f $(CPP_OBJS:%.$(O)=%.d)
+ rm -f $(C_PIC_OBJS:%.$(O)=%.d)
+ rm -f $(CPP_PIC_OBJS:%.$(O)=%.d)
+ rm -f $(C_JET_SYM_OBJS:%.$(O)=%.d)
+ rm -f $(C_JET_OBJS:%.$(O)=%.d)
+ rm -f $(C_TESTLIB_OBJS:%.$(O)=%.d)
+ rm -f $(TESTS_OBJS:%.$(O)=%$(EXE))
+ rm -f $(TESTS_OBJS)
+ rm -f $(TESTS_OBJS:%.$(O)=%.d)
+ rm -f $(TESTS_OBJS:%.$(O)=%.out)
+ rm -f $(TESTS_CPP_OBJS:%.$(O)=%$(EXE))
+ rm -f $(TESTS_CPP_OBJS)
+ rm -f $(TESTS_CPP_OBJS:%.$(O)=%.d)
+ rm -f $(TESTS_CPP_OBJS:%.$(O)=%.out)
+ rm -f $(DSOS) $(STATIC_LIBS)
+
+distclean: clean
+ rm -f $(objroot)bin/jemalloc-config
+ rm -f $(objroot)bin/jemalloc.sh
+ rm -f $(objroot)bin/jeprof
+ rm -f $(objroot)config.log
+ rm -f $(objroot)config.status
+ rm -f $(objroot)config.stamp
+ rm -f $(cfghdrs_out)
+ rm -f $(cfgoutputs_out)
+
+relclean: distclean
+ rm -f $(objroot)configure
+ rm -f $(objroot)VERSION
+ rm -f $(DOCS_HTML)
+ rm -f $(DOCS_MAN3)
+
+#===============================================================================
+# Re-configuration rules.
+
+ifeq ($(enable_autogen), 1)
+$(srcroot)configure : $(srcroot)configure.ac
+ cd ./$(srcroot) && $(AUTOCONF)
+
+$(objroot)config.status : $(srcroot)configure
+ ./$(objroot)config.status --recheck
+
+$(srcroot)config.stamp.in : $(srcroot)configure.ac
+ echo stamp > $(srcroot)config.stamp.in
+
+$(objroot)config.stamp : $(cfgoutputs_in) $(cfghdrs_in) $(srcroot)configure
+ ./$(objroot)config.status
+ @touch $@
+
+# There must be some action in order for make to re-read Makefile when it is
+# out of date.
+$(cfgoutputs_out) $(cfghdrs_out) : $(objroot)config.stamp
+ @true
+endif
+||||||| dec341af7695
+=======
+# Clear out all vpaths, then set just one (default vpath) for the main build
+# directory.
+vpath
+vpath % .
+
+# Clear the default suffixes, so that built-in rules are not used.
+.SUFFIXES :
+
+SHELL := /bin/sh
+
+CC := @CC@
+CXX := @CXX@
+
+# Configuration parameters.
+DESTDIR =
+BINDIR := $(DESTDIR)@BINDIR@
+INCLUDEDIR := $(DESTDIR)@INCLUDEDIR@
+LIBDIR := $(DESTDIR)@LIBDIR@
+DATADIR := $(DESTDIR)@DATADIR@
+MANDIR := $(DESTDIR)@MANDIR@
+srcroot := @srcroot@
+objroot := @objroot@
+abs_srcroot := @abs_srcroot@
+abs_objroot := @abs_objroot@
+
+# Build parameters.
+CPPFLAGS := @CPPFLAGS@ -I$(objroot)include -I$(srcroot)include
+CONFIGURE_CFLAGS := @CONFIGURE_CFLAGS@
+SPECIFIED_CFLAGS := @SPECIFIED_CFLAGS@
+EXTRA_CFLAGS := @EXTRA_CFLAGS@
+CFLAGS := $(strip $(CONFIGURE_CFLAGS) $(SPECIFIED_CFLAGS) $(EXTRA_CFLAGS))
+CONFIGURE_CXXFLAGS := @CONFIGURE_CXXFLAGS@
+SPECIFIED_CXXFLAGS := @SPECIFIED_CXXFLAGS@
+EXTRA_CXXFLAGS := @EXTRA_CXXFLAGS@
+CXXFLAGS := $(strip $(CONFIGURE_CXXFLAGS) $(SPECIFIED_CXXFLAGS) $(EXTRA_CXXFLAGS))
+LDFLAGS := @LDFLAGS@
+EXTRA_LDFLAGS := @EXTRA_LDFLAGS@
+LIBS := @LIBS@
+RPATH_EXTRA := @RPATH_EXTRA@
+SO := @so@
+IMPORTLIB := @importlib@
+O := @o@
+A := @a@
+EXE := @exe@
+LIBPREFIX := @libprefix@
+REV := @rev@
+install_suffix := @install_suffix@
+ABI := @abi@
+XSLTPROC := @XSLTPROC@
+XSLROOT := @XSLROOT@
+AUTOCONF := @AUTOCONF@
+_RPATH = @RPATH@
+RPATH = $(if $(1),$(call _RPATH,$(1)))
+cfghdrs_in := $(addprefix $(srcroot),@cfghdrs_in@)
+cfghdrs_out := @cfghdrs_out@
+cfgoutputs_in := $(addprefix $(srcroot),@cfgoutputs_in@)
+cfgoutputs_out := @cfgoutputs_out@
+enable_autogen := @enable_autogen@
+enable_doc := @enable_doc@
+enable_shared := @enable_shared@
+enable_static := @enable_static@
+enable_prof := @enable_prof@
+enable_zone_allocator := @enable_zone_allocator@
+enable_experimental_smallocx := @enable_experimental_smallocx@
+MALLOC_CONF := @JEMALLOC_CPREFIX@MALLOC_CONF
+link_whole_archive := @link_whole_archive@
+DSO_LDFLAGS = @DSO_LDFLAGS@
+SOREV = @SOREV@
+PIC_CFLAGS = @PIC_CFLAGS@
+CTARGET = @CTARGET@
+LDTARGET = @LDTARGET@
+TEST_LD_MODE = @TEST_LD_MODE@
+MKLIB = @MKLIB@
+AR = @AR@
+ARFLAGS = @ARFLAGS@
+DUMP_SYMS = @DUMP_SYMS@
+AWK := @AWK@
+CC_MM = @CC_MM@
+LM := @LM@
+INSTALL = @INSTALL@
+
+ifeq (macho, $(ABI))
+TEST_LIBRARY_PATH := DYLD_FALLBACK_LIBRARY_PATH="$(objroot)lib"
+else
+ifeq (pecoff, $(ABI))
+TEST_LIBRARY_PATH := PATH="$(PATH):$(objroot)lib"
+else
+TEST_LIBRARY_PATH :=
+endif
+endif
+
+LIBJEMALLOC := $(LIBPREFIX)jemalloc$(install_suffix)
+
+# Lists of files.
+BINS := $(objroot)bin/jemalloc-config $(objroot)bin/jemalloc.sh $(objroot)bin/jeprof
+C_HDRS := $(objroot)include/jemalloc/jemalloc$(install_suffix).h
+C_SRCS := $(srcroot)src/jemalloc.c \
+ $(srcroot)src/arena.c \
+ $(srcroot)src/background_thread.c \
+ $(srcroot)src/base.c \
+ $(srcroot)src/bin.c \
+ $(srcroot)src/bitmap.c \
+ $(srcroot)src/ckh.c \
+ $(srcroot)src/ctl.c \
+ $(srcroot)src/div.c \
+ $(srcroot)src/extent.c \
+ $(srcroot)src/extent_dss.c \
+ $(srcroot)src/extent_mmap.c \
+ $(srcroot)src/hash.c \
+ $(srcroot)src/hook.c \
+ $(srcroot)src/large.c \
+ $(srcroot)src/log.c \
+ $(srcroot)src/malloc_io.c \
+ $(srcroot)src/mutex.c \
+ $(srcroot)src/mutex_pool.c \
+ $(srcroot)src/nstime.c \
+ $(srcroot)src/pages.c \
+ $(srcroot)src/prng.c \
+ $(srcroot)src/prof.c \
+ $(srcroot)src/rtree.c \
+ $(srcroot)src/safety_check.c \
+ $(srcroot)src/stats.c \
+ $(srcroot)src/sc.c \
+ $(srcroot)src/sz.c \
+ $(srcroot)src/tcache.c \
+ $(srcroot)src/test_hooks.c \
+ $(srcroot)src/ticker.c \
+ $(srcroot)src/tsd.c \
+ $(srcroot)src/witness.c
+ifeq ($(enable_zone_allocator), 1)
+C_SRCS += $(srcroot)src/zone.c
+endif
+ifeq ($(IMPORTLIB),$(SO))
+STATIC_LIBS := $(objroot)lib/$(LIBJEMALLOC).$(A)
+endif
+ifdef PIC_CFLAGS
+STATIC_LIBS += $(objroot)lib/$(LIBJEMALLOC)_pic.$(A)
+else
+STATIC_LIBS += $(objroot)lib/$(LIBJEMALLOC)_s.$(A)
+endif
+DSOS := $(objroot)lib/$(LIBJEMALLOC).$(SOREV)
+ifneq ($(SOREV),$(SO))
+DSOS += $(objroot)lib/$(LIBJEMALLOC).$(SO)
+endif
+ifeq (1, $(link_whole_archive))
+LJEMALLOC := -Wl,--whole-archive -L$(objroot)lib -l$(LIBJEMALLOC) -Wl,--no-whole-archive
+else
+LJEMALLOC := $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
+endif
+PC := $(objroot)jemalloc.pc
+MAN3 := $(objroot)doc/jemalloc$(install_suffix).3
+DOCS_XML := $(objroot)doc/jemalloc$(install_suffix).xml
+DOCS_HTML := $(DOCS_XML:$(objroot)%.xml=$(objroot)%.html)
+DOCS_MAN3 := $(DOCS_XML:$(objroot)%.xml=$(objroot)%.3)
+DOCS := $(DOCS_HTML) $(DOCS_MAN3)
+C_TESTLIB_SRCS := $(srcroot)test/src/btalloc.c $(srcroot)test/src/btalloc_0.c \
+ $(srcroot)test/src/btalloc_1.c $(srcroot)test/src/math.c \
+ $(srcroot)test/src/mtx.c $(srcroot)test/src/mq.c \
+ $(srcroot)test/src/SFMT.c $(srcroot)test/src/test.c \
+ $(srcroot)test/src/thd.c $(srcroot)test/src/timer.c
+ifeq (1, $(link_whole_archive))
+C_UTIL_INTEGRATION_SRCS :=
+C_UTIL_CPP_SRCS :=
+else
+C_UTIL_INTEGRATION_SRCS := $(srcroot)src/nstime.c $(srcroot)src/malloc_io.c
+C_UTIL_CPP_SRCS := $(srcroot)src/nstime.c $(srcroot)src/malloc_io.c
+endif
+TESTS_UNIT := \
+ $(srcroot)test/unit/a0.c \
+ $(srcroot)test/unit/arena_reset.c \
+ $(srcroot)test/unit/atomic.c \
+ $(srcroot)test/unit/background_thread.c \
+ $(srcroot)test/unit/background_thread_enable.c \
+ $(srcroot)test/unit/base.c \
+ $(srcroot)test/unit/bitmap.c \
+ $(srcroot)test/unit/bit_util.c \
+ $(srcroot)test/unit/binshard.c \
+ $(srcroot)test/unit/ckh.c \
+ $(srcroot)test/unit/decay.c \
+ $(srcroot)test/unit/div.c \
+ $(srcroot)test/unit/emitter.c \
+ $(srcroot)test/unit/extent_quantize.c \
+ $(srcroot)test/unit/extent_util.c \
+ $(srcroot)test/unit/fork.c \
+ $(srcroot)test/unit/hash.c \
+ $(srcroot)test/unit/hook.c \
+ $(srcroot)test/unit/huge.c \
+ $(srcroot)test/unit/junk.c \
+ $(srcroot)test/unit/junk_alloc.c \
+ $(srcroot)test/unit/junk_free.c \
+ $(srcroot)test/unit/log.c \
+ $(srcroot)test/unit/mallctl.c \
+ $(srcroot)test/unit/malloc_io.c \
+ $(srcroot)test/unit/math.c \
+ $(srcroot)test/unit/mq.c \
+ $(srcroot)test/unit/mtx.c \
+ $(srcroot)test/unit/pack.c \
+ $(srcroot)test/unit/pages.c \
+ $(srcroot)test/unit/ph.c \
+ $(srcroot)test/unit/prng.c \
+ $(srcroot)test/unit/prof_accum.c \
+ $(srcroot)test/unit/prof_active.c \
+ $(srcroot)test/unit/prof_gdump.c \
+ $(srcroot)test/unit/prof_idump.c \
+ $(srcroot)test/unit/prof_log.c \
+ $(srcroot)test/unit/prof_reset.c \
+ $(srcroot)test/unit/prof_tctx.c \
+ $(srcroot)test/unit/prof_thread_name.c \
+ $(srcroot)test/unit/ql.c \
+ $(srcroot)test/unit/qr.c \
+ $(srcroot)test/unit/rb.c \
+ $(srcroot)test/unit/retained.c \
+ $(srcroot)test/unit/rtree.c \
+ $(srcroot)test/unit/safety_check.c \
+ $(srcroot)test/unit/seq.c \
+ $(srcroot)test/unit/SFMT.c \
+ $(srcroot)test/unit/sc.c \
+ $(srcroot)test/unit/size_classes.c \
+ $(srcroot)test/unit/slab.c \
+ $(srcroot)test/unit/smoothstep.c \
+ $(srcroot)test/unit/spin.c \
+ $(srcroot)test/unit/stats.c \
+ $(srcroot)test/unit/stats_print.c \
+ $(srcroot)test/unit/test_hooks.c \
+ $(srcroot)test/unit/ticker.c \
+ $(srcroot)test/unit/nstime.c \
+ $(srcroot)test/unit/tsd.c \
+ $(srcroot)test/unit/witness.c \
+ $(srcroot)test/unit/zero.c
+ifeq (@enable_prof@, 1)
+TESTS_UNIT += \
+ $(srcroot)test/unit/arena_reset_prof.c
+endif
+TESTS_INTEGRATION := $(srcroot)test/integration/aligned_alloc.c \
+ $(srcroot)test/integration/allocated.c \
+ $(srcroot)test/integration/extent.c \
+ $(srcroot)test/integration/malloc.c \
+ $(srcroot)test/integration/mallocx.c \
+ $(srcroot)test/integration/MALLOCX_ARENA.c \
+ $(srcroot)test/integration/overflow.c \
+ $(srcroot)test/integration/posix_memalign.c \
+ $(srcroot)test/integration/rallocx.c \
+ $(srcroot)test/integration/sdallocx.c \
+ $(srcroot)test/integration/slab_sizes.c \
+ $(srcroot)test/integration/thread_arena.c \
+ $(srcroot)test/integration/thread_tcache_enabled.c \
+ $(srcroot)test/integration/xallocx.c
+ifeq (@enable_experimental_smallocx@, 1)
+TESTS_INTEGRATION += \
+ $(srcroot)test/integration/smallocx.c
+endif
+ifeq (@enable_cxx@, 1)
+CPP_SRCS := $(srcroot)src/jemalloc_cpp.cpp
+TESTS_INTEGRATION_CPP := $(srcroot)test/integration/cpp/basic.cpp
+else
+CPP_SRCS :=
+TESTS_INTEGRATION_CPP :=
+endif
+TESTS_STRESS := $(srcroot)test/stress/microbench.c \
+ $(srcroot)test/stress/hookbench.c
+
+
+TESTS := $(TESTS_UNIT) $(TESTS_INTEGRATION) $(TESTS_INTEGRATION_CPP) $(TESTS_STRESS)
+
+PRIVATE_NAMESPACE_HDRS := $(objroot)include/jemalloc/internal/private_namespace.h $(objroot)include/jemalloc/internal/private_namespace_jet.h
+PRIVATE_NAMESPACE_GEN_HDRS := $(PRIVATE_NAMESPACE_HDRS:%.h=%.gen.h)
+C_SYM_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.sym.$(O))
+C_SYMS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.sym)
+C_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.$(O))
+CPP_OBJS := $(CPP_SRCS:$(srcroot)%.cpp=$(objroot)%.$(O))
+C_PIC_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.pic.$(O))
+CPP_PIC_OBJS := $(CPP_SRCS:$(srcroot)%.cpp=$(objroot)%.pic.$(O))
+C_JET_SYM_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.sym.$(O))
+C_JET_SYMS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.sym)
+C_JET_OBJS := $(C_SRCS:$(srcroot)%.c=$(objroot)%.jet.$(O))
+C_TESTLIB_UNIT_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.unit.$(O))
+C_TESTLIB_INTEGRATION_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.integration.$(O))
+C_UTIL_INTEGRATION_OBJS := $(C_UTIL_INTEGRATION_SRCS:$(srcroot)%.c=$(objroot)%.integration.$(O))
+C_TESTLIB_STRESS_OBJS := $(C_TESTLIB_SRCS:$(srcroot)%.c=$(objroot)%.stress.$(O))
+C_TESTLIB_OBJS := $(C_TESTLIB_UNIT_OBJS) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(C_TESTLIB_STRESS_OBJS)
+
+TESTS_UNIT_OBJS := $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%.$(O))
+TESTS_INTEGRATION_OBJS := $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%.$(O))
+TESTS_INTEGRATION_CPP_OBJS := $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%.$(O))
+TESTS_STRESS_OBJS := $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%.$(O))
+TESTS_OBJS := $(TESTS_UNIT_OBJS) $(TESTS_INTEGRATION_OBJS) $(TESTS_STRESS_OBJS)
+TESTS_CPP_OBJS := $(TESTS_INTEGRATION_CPP_OBJS)
+
+.PHONY: all dist build_doc_html build_doc_man build_doc
+.PHONY: install_bin install_include install_lib
+.PHONY: install_doc_html install_doc_man install_doc install
+.PHONY: tests check clean distclean relclean
+
+.SECONDARY : $(PRIVATE_NAMESPACE_GEN_HDRS) $(TESTS_OBJS) $(TESTS_CPP_OBJS)
+
+# Default target.
+all: build_lib
+
+dist: build_doc
+
+$(objroot)doc/%.html : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/html.xsl
+ifneq ($(XSLROOT),)
+ $(XSLTPROC) -o $@ $(objroot)doc/html.xsl $<
+else
+ifeq ($(wildcard $(DOCS_HTML)),)
+ @echo "<p>Missing xsltproc. Doc not built.</p>" > $@
+endif
+ @echo "Missing xsltproc. "$@" not (re)built."
+endif
+
+$(objroot)doc/%.3 : $(objroot)doc/%.xml $(srcroot)doc/stylesheet.xsl $(objroot)doc/manpages.xsl
+ifneq ($(XSLROOT),)
+ $(XSLTPROC) -o $@ $(objroot)doc/manpages.xsl $<
+else
+ifeq ($(wildcard $(DOCS_MAN3)),)
+ @echo "Missing xsltproc. Doc not built." > $@
+endif
+ @echo "Missing xsltproc. "$@" not (re)built."
+endif
+
+build_doc_html: $(DOCS_HTML)
+build_doc_man: $(DOCS_MAN3)
+build_doc: $(DOCS)
+
+#
+# Include generated dependency files.
+#
+ifdef CC_MM
+-include $(C_SYM_OBJS:%.$(O)=%.d)
+-include $(C_OBJS:%.$(O)=%.d)
+-include $(CPP_OBJS:%.$(O)=%.d)
+-include $(C_PIC_OBJS:%.$(O)=%.d)
+-include $(CPP_PIC_OBJS:%.$(O)=%.d)
+-include $(C_JET_SYM_OBJS:%.$(O)=%.d)
+-include $(C_JET_OBJS:%.$(O)=%.d)
+-include $(C_TESTLIB_OBJS:%.$(O)=%.d)
+-include $(TESTS_OBJS:%.$(O)=%.d)
+-include $(TESTS_CPP_OBJS:%.$(O)=%.d)
+endif
+
+$(C_SYM_OBJS): $(objroot)src/%.sym.$(O): $(srcroot)src/%.c
+$(C_SYM_OBJS): CPPFLAGS += -DJEMALLOC_NO_PRIVATE_NAMESPACE
+$(C_SYMS): $(objroot)src/%.sym: $(objroot)src/%.sym.$(O)
+$(C_OBJS): $(objroot)src/%.$(O): $(srcroot)src/%.c
+$(CPP_OBJS): $(objroot)src/%.$(O): $(srcroot)src/%.cpp
+$(C_PIC_OBJS): $(objroot)src/%.pic.$(O): $(srcroot)src/%.c
+$(C_PIC_OBJS): CFLAGS += $(PIC_CFLAGS)
+$(CPP_PIC_OBJS): $(objroot)src/%.pic.$(O): $(srcroot)src/%.cpp
+$(CPP_PIC_OBJS): CXXFLAGS += $(PIC_CFLAGS)
+$(C_JET_SYM_OBJS): $(objroot)src/%.jet.sym.$(O): $(srcroot)src/%.c
+$(C_JET_SYM_OBJS): CPPFLAGS += -DJEMALLOC_JET -DJEMALLOC_NO_PRIVATE_NAMESPACE
+$(C_JET_SYMS): $(objroot)src/%.jet.sym: $(objroot)src/%.jet.sym.$(O)
+$(C_JET_OBJS): $(objroot)src/%.jet.$(O): $(srcroot)src/%.c
+$(C_JET_OBJS): CPPFLAGS += -DJEMALLOC_JET
+$(C_TESTLIB_UNIT_OBJS): $(objroot)test/src/%.unit.$(O): $(srcroot)test/src/%.c
+$(C_TESTLIB_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST
+$(C_TESTLIB_INTEGRATION_OBJS): $(objroot)test/src/%.integration.$(O): $(srcroot)test/src/%.c
+$(C_TESTLIB_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST
+$(C_UTIL_INTEGRATION_OBJS): $(objroot)src/%.integration.$(O): $(srcroot)src/%.c
+$(C_TESTLIB_STRESS_OBJS): $(objroot)test/src/%.stress.$(O): $(srcroot)test/src/%.c
+$(C_TESTLIB_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST -DJEMALLOC_STRESS_TESTLIB
+$(C_TESTLIB_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include
+$(TESTS_UNIT_OBJS): CPPFLAGS += -DJEMALLOC_UNIT_TEST
+$(TESTS_INTEGRATION_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_TEST
+$(TESTS_INTEGRATION_CPP_OBJS): CPPFLAGS += -DJEMALLOC_INTEGRATION_CPP_TEST
+$(TESTS_STRESS_OBJS): CPPFLAGS += -DJEMALLOC_STRESS_TEST
+$(TESTS_OBJS): $(objroot)test/%.$(O): $(srcroot)test/%.c
+$(TESTS_CPP_OBJS): $(objroot)test/%.$(O): $(srcroot)test/%.cpp
+$(TESTS_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include
+$(TESTS_CPP_OBJS): CPPFLAGS += -I$(srcroot)test/include -I$(objroot)test/include
+ifneq ($(IMPORTLIB),$(SO))
+$(CPP_OBJS) $(C_SYM_OBJS) $(C_OBJS) $(C_JET_SYM_OBJS) $(C_JET_OBJS): CPPFLAGS += -DDLLEXPORT
+endif
+
+# Dependencies.
+ifndef CC_MM
+HEADER_DIRS = $(srcroot)include/jemalloc/internal \
+ $(objroot)include/jemalloc $(objroot)include/jemalloc/internal
+HEADERS = $(filter-out $(PRIVATE_NAMESPACE_HDRS),$(wildcard $(foreach dir,$(HEADER_DIRS),$(dir)/*.h)))
+$(C_SYM_OBJS) $(C_OBJS) $(CPP_OBJS) $(C_PIC_OBJS) $(CPP_PIC_OBJS) $(C_JET_SYM_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS) $(TESTS_CPP_OBJS): $(HEADERS)
+$(TESTS_OBJS) $(TESTS_CPP_OBJS): $(objroot)test/include/test/jemalloc_test.h
+endif
+
+$(C_OBJS) $(CPP_OBJS) $(C_PIC_OBJS) $(CPP_PIC_OBJS) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(TESTS_INTEGRATION_OBJS) $(TESTS_INTEGRATION_CPP_OBJS): $(objroot)include/jemalloc/internal/private_namespace.h
+$(C_JET_OBJS) $(C_TESTLIB_UNIT_OBJS) $(C_TESTLIB_STRESS_OBJS) $(TESTS_UNIT_OBJS) $(TESTS_STRESS_OBJS): $(objroot)include/jemalloc/internal/private_namespace_jet.h
+
+$(C_SYM_OBJS) $(C_OBJS) $(C_PIC_OBJS) $(C_JET_SYM_OBJS) $(C_JET_OBJS) $(C_TESTLIB_OBJS) $(TESTS_OBJS): %.$(O):
+ @mkdir -p $(@D)
+ $(CC) $(CFLAGS) -c $(CPPFLAGS) $(CTARGET) $<
+ifdef CC_MM
+ @$(CC) -MM $(CPPFLAGS) -MT $@ -o $(@:%.$(O)=%.d) $<
+endif
+
+$(C_SYMS): %.sym:
+ @mkdir -p $(@D)
+ $(DUMP_SYMS) $< | $(AWK) -f $(objroot)include/jemalloc/internal/private_symbols.awk > $@
+
+$(C_JET_SYMS): %.sym:
+ @mkdir -p $(@D)
+ $(DUMP_SYMS) $< | $(AWK) -f $(objroot)include/jemalloc/internal/private_symbols_jet.awk > $@
+
+$(objroot)include/jemalloc/internal/private_namespace.gen.h: $(C_SYMS)
+ $(SHELL) $(srcroot)include/jemalloc/internal/private_namespace.sh $^ > $@
+
+$(objroot)include/jemalloc/internal/private_namespace_jet.gen.h: $(C_JET_SYMS)
+ $(SHELL) $(srcroot)include/jemalloc/internal/private_namespace.sh $^ > $@
+
+%.h: %.gen.h
+ @if ! `cmp -s $< $@` ; then echo "cp $< $<"; cp $< $@ ; fi
+
+$(CPP_OBJS) $(CPP_PIC_OBJS) $(TESTS_CPP_OBJS): %.$(O):
+ @mkdir -p $(@D)
+ $(CXX) $(CXXFLAGS) -c $(CPPFLAGS) $(CTARGET) $<
+ifdef CC_MM
+ @$(CXX) -MM $(CPPFLAGS) -MT $@ -o $(@:%.$(O)=%.d) $<
+endif
+
+ifneq ($(SOREV),$(SO))
+%.$(SO) : %.$(SOREV)
+ @mkdir -p $(@D)
+ ln -sf $(<F) $@
+endif
+
+$(objroot)lib/$(LIBJEMALLOC).$(SOREV) : $(if $(PIC_CFLAGS),$(C_PIC_OBJS),$(C_OBJS)) $(if $(PIC_CFLAGS),$(CPP_PIC_OBJS),$(CPP_OBJS))
+ @mkdir -p $(@D)
+ $(CC) $(DSO_LDFLAGS) $(call RPATH,$(RPATH_EXTRA)) $(LDTARGET) $+ $(LDFLAGS) $(LIBS) $(EXTRA_LDFLAGS)
+
+$(objroot)lib/$(LIBJEMALLOC)_pic.$(A) : $(C_PIC_OBJS) $(CPP_PIC_OBJS)
+$(objroot)lib/$(LIBJEMALLOC).$(A) : $(C_OBJS) $(CPP_OBJS)
+$(objroot)lib/$(LIBJEMALLOC)_s.$(A) : $(C_OBJS) $(CPP_OBJS)
+
+$(STATIC_LIBS):
+ @mkdir -p $(@D)
+ $(AR) $(ARFLAGS)@AROUT@ $+
+
+$(objroot)test/unit/%$(EXE): $(objroot)test/unit/%.$(O) $(C_JET_OBJS) $(C_TESTLIB_UNIT_OBJS)
+ @mkdir -p $(@D)
+ $(CC) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LDFLAGS) $(filter-out -lm,$(LIBS)) $(LM) $(EXTRA_LDFLAGS)
+
+$(objroot)test/integration/%$(EXE): $(objroot)test/integration/%.$(O) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
+ @mkdir -p $(@D)
+ $(CC) $(TEST_LD_MODE) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(LJEMALLOC) $(LDFLAGS) $(filter-out -lm,$(filter -lrt -pthread -lstdc++,$(LIBS))) $(LM) $(EXTRA_LDFLAGS)
+
+$(objroot)test/integration/cpp/%$(EXE): $(objroot)test/integration/cpp/%.$(O) $(C_TESTLIB_INTEGRATION_OBJS) $(C_UTIL_INTEGRATION_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
+ @mkdir -p $(@D)
+ $(CXX) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(LIBS)) -lm $(EXTRA_LDFLAGS)
+
+$(objroot)test/stress/%$(EXE): $(objroot)test/stress/%.$(O) $(C_JET_OBJS) $(C_TESTLIB_STRESS_OBJS) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB)
+ @mkdir -p $(@D)
+ $(CC) $(TEST_LD_MODE) $(LDTARGET) $(filter %.$(O),$^) $(call RPATH,$(objroot)lib) $(objroot)lib/$(LIBJEMALLOC).$(IMPORTLIB) $(LDFLAGS) $(filter-out -lm,$(LIBS)) $(LM) $(EXTRA_LDFLAGS)
+
+build_lib_shared: $(DSOS)
+build_lib_static: $(STATIC_LIBS)
+ifeq ($(enable_shared), 1)
+build_lib: build_lib_shared
+endif
+ifeq ($(enable_static), 1)
+build_lib: build_lib_static
+endif
+
+install_bin:
+ $(INSTALL) -d $(BINDIR)
+ @for b in $(BINS); do \
+ echo "$(INSTALL) -m 755 $$b $(BINDIR)"; \
+ $(INSTALL) -m 755 $$b $(BINDIR); \
+done
+
+install_include:
+ $(INSTALL) -d $(INCLUDEDIR)/jemalloc
+ @for h in $(C_HDRS); do \
+ echo "$(INSTALL) -m 644 $$h $(INCLUDEDIR)/jemalloc"; \
+ $(INSTALL) -m 644 $$h $(INCLUDEDIR)/jemalloc; \
+done
+
+install_lib_shared: $(DSOS)
+ $(INSTALL) -d $(LIBDIR)
+ $(INSTALL) -m 755 $(objroot)lib/$(LIBJEMALLOC).$(SOREV) $(LIBDIR)
+ifneq ($(SOREV),$(SO))
+ ln -sf $(LIBJEMALLOC).$(SOREV) $(LIBDIR)/$(LIBJEMALLOC).$(SO)
+endif
+
+install_lib_static: $(STATIC_LIBS)
+ $(INSTALL) -d $(LIBDIR)
+ @for l in $(STATIC_LIBS); do \
+ echo "$(INSTALL) -m 755 $$l $(LIBDIR)"; \
+ $(INSTALL) -m 755 $$l $(LIBDIR); \
+done
+
+install_lib_pc: $(PC)
+ $(INSTALL) -d $(LIBDIR)/pkgconfig
+ @for l in $(PC); do \
+ echo "$(INSTALL) -m 644 $$l $(LIBDIR)/pkgconfig"; \
+ $(INSTALL) -m 644 $$l $(LIBDIR)/pkgconfig; \
+done
+
+ifeq ($(enable_shared), 1)
+install_lib: install_lib_shared
+endif
+ifeq ($(enable_static), 1)
+install_lib: install_lib_static
+endif
+install_lib: install_lib_pc
+
+install_doc_html:
+ $(INSTALL) -d $(DATADIR)/doc/jemalloc$(install_suffix)
+ @for d in $(DOCS_HTML); do \
+ echo "$(INSTALL) -m 644 $$d $(DATADIR)/doc/jemalloc$(install_suffix)"; \
+ $(INSTALL) -m 644 $$d $(DATADIR)/doc/jemalloc$(install_suffix); \
+done
+
+install_doc_man:
+ $(INSTALL) -d $(MANDIR)/man3
+ @for d in $(DOCS_MAN3); do \
+ echo "$(INSTALL) -m 644 $$d $(MANDIR)/man3"; \
+ $(INSTALL) -m 644 $$d $(MANDIR)/man3; \
+done
+
+install_doc: build_doc install_doc_html install_doc_man
+
+install: install_bin install_include install_lib
+
+ifeq ($(enable_doc), 1)
+install: install_doc
+endif
+
+tests_unit: $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%$(EXE))
+tests_integration: $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%$(EXE)) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%$(EXE))
+tests_stress: $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%$(EXE))
+tests: tests_unit tests_integration tests_stress
+
+check_unit_dir:
+ @mkdir -p $(objroot)test/unit
+check_integration_dir:
+ @mkdir -p $(objroot)test/integration
+stress_dir:
+ @mkdir -p $(objroot)test/stress
+check_dir: check_unit_dir check_integration_dir
+
+check_unit: tests_unit check_unit_dir
+ $(SHELL) $(objroot)test/test.sh $(TESTS_UNIT:$(srcroot)%.c=$(objroot)%)
+check_integration_prof: tests_integration check_integration_dir
+ifeq ($(enable_prof), 1)
+ $(MALLOC_CONF)="prof:true" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
+ $(MALLOC_CONF)="prof:true,prof_active:false" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
+endif
+check_integration_decay: tests_integration check_integration_dir
+ $(MALLOC_CONF)="dirty_decay_ms:-1,muzzy_decay_ms:-1" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
+ $(MALLOC_CONF)="dirty_decay_ms:0,muzzy_decay_ms:0" $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
+check_integration: tests_integration check_integration_dir
+ $(SHELL) $(objroot)test/test.sh $(TESTS_INTEGRATION:$(srcroot)%.c=$(objroot)%) $(TESTS_INTEGRATION_CPP:$(srcroot)%.cpp=$(objroot)%)
+stress: tests_stress stress_dir
+ $(SHELL) $(objroot)test/test.sh $(TESTS_STRESS:$(srcroot)%.c=$(objroot)%)
+check: check_unit check_integration check_integration_decay check_integration_prof
+
+clean:
+ rm -f $(PRIVATE_NAMESPACE_HDRS)
+ rm -f $(PRIVATE_NAMESPACE_GEN_HDRS)
+ rm -f $(C_SYM_OBJS)
+ rm -f $(C_SYMS)
+ rm -f $(C_OBJS)
+ rm -f $(CPP_OBJS)
+ rm -f $(C_PIC_OBJS)
+ rm -f $(CPP_PIC_OBJS)
+ rm -f $(C_JET_SYM_OBJS)
+ rm -f $(C_JET_SYMS)
+ rm -f $(C_JET_OBJS)
+ rm -f $(C_TESTLIB_OBJS)
+ rm -f $(C_SYM_OBJS:%.$(O)=%.d)
+ rm -f $(C_OBJS:%.$(O)=%.d)
+ rm -f $(CPP_OBJS:%.$(O)=%.d)
+ rm -f $(C_PIC_OBJS:%.$(O)=%.d)
+ rm -f $(CPP_PIC_OBJS:%.$(O)=%.d)
+ rm -f $(C_JET_SYM_OBJS:%.$(O)=%.d)
+ rm -f $(C_JET_OBJS:%.$(O)=%.d)
+ rm -f $(C_TESTLIB_OBJS:%.$(O)=%.d)
+ rm -f $(TESTS_OBJS:%.$(O)=%$(EXE))
+ rm -f $(TESTS_OBJS)
+ rm -f $(TESTS_OBJS:%.$(O)=%.d)
+ rm -f $(TESTS_OBJS:%.$(O)=%.out)
+ rm -f $(TESTS_CPP_OBJS:%.$(O)=%$(EXE))
+ rm -f $(TESTS_CPP_OBJS)
+ rm -f $(TESTS_CPP_OBJS:%.$(O)=%.d)
+ rm -f $(TESTS_CPP_OBJS:%.$(O)=%.out)
+ rm -f $(DSOS) $(STATIC_LIBS)
+
+distclean: clean
+ rm -f $(objroot)bin/jemalloc-config
+ rm -f $(objroot)bin/jemalloc.sh
+ rm -f $(objroot)bin/jeprof
+ rm -f $(objroot)config.log
+ rm -f $(objroot)config.status
+ rm -f $(objroot)config.stamp
+ rm -f $(cfghdrs_out)
+ rm -f $(cfgoutputs_out)
+
+relclean: distclean
+ rm -f $(objroot)configure
+ rm -f $(objroot)VERSION
+ rm -f $(DOCS_HTML)
+ rm -f $(DOCS_MAN3)
+
+#===============================================================================
+# Re-configuration rules.
+
+ifeq ($(enable_autogen), 1)
+$(srcroot)configure : $(srcroot)configure.ac
+ cd ./$(srcroot) && $(AUTOCONF)
+
+$(objroot)config.status : $(srcroot)configure
+ ./$(objroot)config.status --recheck
+
+$(srcroot)config.stamp.in : $(srcroot)configure.ac
+ echo stamp > $(srcroot)config.stamp.in
+
+$(objroot)config.stamp : $(cfgoutputs_in) $(cfghdrs_in) $(srcroot)configure
+ ./$(objroot)config.status
+ @touch $@
+
+# There must be some action in order for make to re-read Makefile when it is
+# out of date.
+$(cfgoutputs_out) $(cfghdrs_out) : $(objroot)config.stamp
+ @true
+endif
+>>>>>>> main
diff --git a/contrib/jemalloc/README b/contrib/jemalloc/README
new file mode 100644
index 000000000000..3a6e0d272502
--- /dev/null
+++ b/contrib/jemalloc/README
@@ -0,0 +1,20 @@
+jemalloc is a general purpose malloc(3) implementation that emphasizes
+fragmentation avoidance and scalable concurrency support. jemalloc first came
+into use as the FreeBSD libc allocator in 2005, and since then it has found its
+way into numerous applications that rely on its predictable behavior. In 2010
+jemalloc development efforts broadened to include developer support features
+such as heap profiling and extensive monitoring/tuning hooks. Modern jemalloc
+releases continue to be integrated back into FreeBSD, and therefore versatility
+remains critical. Ongoing development efforts trend toward making jemalloc
+among the best allocators for a broad range of demanding applications, and
+eliminating/mitigating weaknesses that have practical repercussions for real
+world applications.
+
+The COPYING file contains copyright and licensing information.
+
+The INSTALL file contains information on how to configure, build, and install
+jemalloc.
+
+The ChangeLog file contains a brief summary of changes for each release.
+
+URL: http://jemalloc.net/
diff --git a/contrib/jemalloc/TUNING.md b/contrib/jemalloc/TUNING.md
new file mode 100644
index 000000000000..500541dd52d0
--- /dev/null
+++ b/contrib/jemalloc/TUNING.md
@@ -0,0 +1,262 @@
+<<<<<<< HEAD
+This document summarizes the common approaches for performance fine tuning with
+jemalloc (as of 5.3.0). The default configuration of jemalloc tends to work
+reasonably well in practice, and most applications should not have to tune any
+options. However, in order to cover a wide range of applications and avoid
+pathological cases, the default setting is sometimes kept conservative and
+suboptimal, even for many common workloads. When jemalloc is properly tuned for
+a specific application / workload, it is common to improve system level metrics
+by a few percent, or make favorable trade-offs.
+
+
+## Notable runtime options for performance tuning
+
+Runtime options can be set via
+[malloc_conf](http://jemalloc.net/jemalloc.3.html#tuning).
+
+* [background_thread](http://jemalloc.net/jemalloc.3.html#background_thread)
+
+ Enabling jemalloc background threads generally improves the tail latency for
+ application threads, since unused memory purging is shifted to the dedicated
+ background threads. In addition, unintended purging delay caused by
+ application inactivity is avoided with background threads.
+
+ Suggested: `background_thread:true` when jemalloc managed threads can be
+ allowed.
+
+* [metadata_thp](http://jemalloc.net/jemalloc.3.html#opt.metadata_thp)
+
+ Allowing jemalloc to utilize transparent huge pages for its internal
+ metadata usually reduces TLB misses significantly, especially for programs
+ with large memory footprint and frequent allocation / deallocation
+ activities. Metadata memory usage may increase due to the use of huge
+ pages.
+
+ Suggested for allocation intensive programs: `metadata_thp:auto` or
+ `metadata_thp:always`, which is expected to improve CPU utilization at a
+ small memory cost.
+
+* [dirty_decay_ms](http://jemalloc.net/jemalloc.3.html#opt.dirty_decay_ms) and
+ [muzzy_decay_ms](http://jemalloc.net/jemalloc.3.html#opt.muzzy_decay_ms)
+
+ Decay time determines how fast jemalloc returns unused pages back to the
+ operating system, and therefore provides a fairly straightforward trade-off
+ between CPU and memory usage. Shorter decay time purges unused pages faster
+ to reduces memory usage (usually at the cost of more CPU cycles spent on
+ purging), and vice versa.
+
+ Suggested: tune the values based on the desired trade-offs.
+
+* [narenas](http://jemalloc.net/jemalloc.3.html#opt.narenas)
+
+ By default jemalloc uses multiple arenas to reduce internal lock contention.
+ However high arena count may also increase overall memory fragmentation,
+ since arenas manage memory independently. When high degree of parallelism
+ is not expected at the allocator level, lower number of arenas often
+ improves memory usage.
+
+ Suggested: if low parallelism is expected, try lower arena count while
+ monitoring CPU and memory usage.
+
+* [percpu_arena](http://jemalloc.net/jemalloc.3.html#opt.percpu_arena)
+
+ Enable dynamic thread to arena association based on running CPU. This has
+ the potential to improve locality, e.g. when thread to CPU affinity is
+ present.
+
+ Suggested: try `percpu_arena:percpu` or `percpu_arena:phycpu` if
+ thread migration between processors is expected to be infrequent.
+
+Examples:
+
+* High resource consumption application, prioritizing CPU utilization:
+
+ `background_thread:true,metadata_thp:auto` combined with relaxed decay time
+ (increased `dirty_decay_ms` and / or `muzzy_decay_ms`,
+ e.g. `dirty_decay_ms:30000,muzzy_decay_ms:30000`).
+
+* High resource consumption application, prioritizing memory usage:
+
+ `background_thread:true,tcache_max:4096` combined with shorter decay time
+ (decreased `dirty_decay_ms` and / or `muzzy_decay_ms`,
+ e.g. `dirty_decay_ms:5000,muzzy_decay_ms:5000`), and lower arena count
+ (e.g. number of CPUs).
+
+* Low resource consumption application:
+
+ `narenas:1,tcache_max:1024` combined with shorter decay time (decreased
+ `dirty_decay_ms` and / or `muzzy_decay_ms`,e.g.
+ `dirty_decay_ms:1000,muzzy_decay_ms:0`).
+
+* Extremely conservative -- minimize memory usage at all costs, only suitable when
+allocation activity is very rare:
+
+ `narenas:1,tcache:false,dirty_decay_ms:0,muzzy_decay_ms:0`
+
+Note that it is recommended to combine the options with `abort_conf:true` which
+aborts immediately on illegal options.
+
+## Beyond runtime options
+
+In addition to the runtime options, there are a number of programmatic ways to
+improve application performance with jemalloc.
+
+* [Explicit arenas](http://jemalloc.net/jemalloc.3.html#arenas.create)
+
+ Manually created arenas can help performance in various ways, e.g. by
+ managing locality and contention for specific usages. For example,
+ applications can explicitly allocate frequently accessed objects from a
+ dedicated arena with
+ [mallocx()](http://jemalloc.net/jemalloc.3.html#MALLOCX_ARENA) to improve
+ locality. In addition, explicit arenas often benefit from individually
+ tuned options, e.g. relaxed [decay
+ time](http://jemalloc.net/jemalloc.3.html#arena.i.dirty_decay_ms) if
+ frequent reuse is expected.
+
+* [Extent hooks](http://jemalloc.net/jemalloc.3.html#arena.i.extent_hooks)
+
+ Extent hooks allow customization for managing underlying memory. One use
+ case for performance purpose is to utilize huge pages -- for example,
+ [HHVM](https://github.com/facebook/hhvm/blob/master/hphp/util/alloc.cpp)
+ uses explicit arenas with customized extent hooks to manage 1GB huge pages
+ for frequently accessed data, which reduces TLB misses significantly.
+
+* [Explicit thread-to-arena
+ binding](http://jemalloc.net/jemalloc.3.html#thread.arena)
+
+ It is common for some threads in an application to have different memory
+ access / allocation patterns. Threads with heavy workloads often benefit
+ from explicit binding, e.g. binding very active threads to dedicated arenas
+ may reduce contention at the allocator level.
+||||||| dec341af7695
+=======
+This document summarizes the common approaches for performance fine tuning with
+jemalloc (as of 5.1.0). The default configuration of jemalloc tends to work
+reasonably well in practice, and most applications should not have to tune any
+options. However, in order to cover a wide range of applications and avoid
+pathological cases, the default setting is sometimes kept conservative and
+suboptimal, even for many common workloads. When jemalloc is properly tuned for
+a specific application / workload, it is common to improve system level metrics
+by a few percent, or make favorable trade-offs.
+
+
+## Notable runtime options for performance tuning
+
+Runtime options can be set via
+[malloc_conf](http://jemalloc.net/jemalloc.3.html#tuning).
+
+* [background_thread](http://jemalloc.net/jemalloc.3.html#background_thread)
+
+ Enabling jemalloc background threads generally improves the tail latency for
+ application threads, since unused memory purging is shifted to the dedicated
+ background threads. In addition, unintended purging delay caused by
+ application inactivity is avoided with background threads.
+
+ Suggested: `background_thread:true` when jemalloc managed threads can be
+ allowed.
+
+* [metadata_thp](http://jemalloc.net/jemalloc.3.html#opt.metadata_thp)
+
+ Allowing jemalloc to utilize transparent huge pages for its internal
+ metadata usually reduces TLB misses significantly, especially for programs
+ with large memory footprint and frequent allocation / deallocation
+ activities. Metadata memory usage may increase due to the use of huge
+ pages.
+
+ Suggested for allocation intensive programs: `metadata_thp:auto` or
+ `metadata_thp:always`, which is expected to improve CPU utilization at a
+ small memory cost.
+
+* [dirty_decay_ms](http://jemalloc.net/jemalloc.3.html#opt.dirty_decay_ms) and
+ [muzzy_decay_ms](http://jemalloc.net/jemalloc.3.html#opt.muzzy_decay_ms)
+
+ Decay time determines how fast jemalloc returns unused pages back to the
+ operating system, and therefore provides a fairly straightforward trade-off
+ between CPU and memory usage. Shorter decay time purges unused pages faster
+ to reduces memory usage (usually at the cost of more CPU cycles spent on
+ purging), and vice versa.
+
+ Suggested: tune the values based on the desired trade-offs.
+
+* [narenas](http://jemalloc.net/jemalloc.3.html#opt.narenas)
+
+ By default jemalloc uses multiple arenas to reduce internal lock contention.
+ However high arena count may also increase overall memory fragmentation,
+ since arenas manage memory independently. When high degree of parallelism
+ is not expected at the allocator level, lower number of arenas often
+ improves memory usage.
+
+ Suggested: if low parallelism is expected, try lower arena count while
+ monitoring CPU and memory usage.
+
+* [percpu_arena](http://jemalloc.net/jemalloc.3.html#opt.percpu_arena)
+
+ Enable dynamic thread to arena association based on running CPU. This has
+ the potential to improve locality, e.g. when thread to CPU affinity is
+ present.
+
+ Suggested: try `percpu_arena:percpu` or `percpu_arena:phycpu` if
+ thread migration between processors is expected to be infrequent.
+
+Examples:
+
+* High resource consumption application, prioritizing CPU utilization:
+
+ `background_thread:true,metadata_thp:auto` combined with relaxed decay time
+ (increased `dirty_decay_ms` and / or `muzzy_decay_ms`,
+ e.g. `dirty_decay_ms:30000,muzzy_decay_ms:30000`).
+
+* High resource consumption application, prioritizing memory usage:
+
+ `background_thread:true` combined with shorter decay time (decreased
+ `dirty_decay_ms` and / or `muzzy_decay_ms`,
+ e.g. `dirty_decay_ms:5000,muzzy_decay_ms:5000`), and lower arena count
+ (e.g. number of CPUs).
+
+* Low resource consumption application:
+
+ `narenas:1,lg_tcache_max:13` combined with shorter decay time (decreased
+ `dirty_decay_ms` and / or `muzzy_decay_ms`,e.g.
+ `dirty_decay_ms:1000,muzzy_decay_ms:0`).
+
+* Extremely conservative -- minimize memory usage at all costs, only suitable when
+allocation activity is very rare:
+
+ `narenas:1,tcache:false,dirty_decay_ms:0,muzzy_decay_ms:0`
+
+Note that it is recommended to combine the options with `abort_conf:true` which
+aborts immediately on illegal options.
+
+## Beyond runtime options
+
+In addition to the runtime options, there are a number of programmatic ways to
+improve application performance with jemalloc.
+
+* [Explicit arenas](http://jemalloc.net/jemalloc.3.html#arenas.create)
+
+ Manually created arenas can help performance in various ways, e.g. by
+ managing locality and contention for specific usages. For example,
+ applications can explicitly allocate frequently accessed objects from a
+ dedicated arena with
+ [mallocx()](http://jemalloc.net/jemalloc.3.html#MALLOCX_ARENA) to improve
+ locality. In addition, explicit arenas often benefit from individually
+ tuned options, e.g. relaxed [decay
+ time](http://jemalloc.net/jemalloc.3.html#arena.i.dirty_decay_ms) if
+ frequent reuse is expected.
+
+* [Extent hooks](http://jemalloc.net/jemalloc.3.html#arena.i.extent_hooks)
+
+ Extent hooks allow customization for managing underlying memory. One use
+ case for performance purpose is to utilize huge pages -- for example,
+ [HHVM](https://github.com/facebook/hhvm/blob/master/hphp/util/alloc.cpp)
+ uses explicit arenas with customized extent hooks to manage 1GB huge pages
+ for frequently accessed data, which reduces TLB misses significantly.
+
+* [Explicit thread-to-arena
+ binding](http://jemalloc.net/jemalloc.3.html#thread.arena)
+
+ It is common for some threads in an application to have different memory
+ access / allocation patterns. Threads with heavy workloads often benefit
+ from explicit binding, e.g. binding very active threads to dedicated arenas
+ may reduce contention at the allocator level.
+>>>>>>> main
diff --git a/contrib/jemalloc/VERSION b/contrib/jemalloc/VERSION
index 428ded8c071f..1dcfea03fc1a 100644
--- a/contrib/jemalloc/VERSION
+++ b/contrib/jemalloc/VERSION
@@ -1 +1 @@
-5.2.1-0-gea6b3e973b477b8061e0076bb257dbd7f3faa756
+5.3.0-0-g54eaed1d8b56b1aa528be3bdd1877e59c56fa90c
diff --git a/contrib/jemalloc/autogen.sh b/contrib/jemalloc/autogen.sh
new file mode 100755
index 000000000000..75f32da6873c
--- /dev/null
+++ b/contrib/jemalloc/autogen.sh
@@ -0,0 +1,17 @@
+#!/bin/sh
+
+for i in autoconf; do
+ echo "$i"
+ $i
+ if [ $? -ne 0 ]; then
+ echo "Error $? in $i"
+ exit 1
+ fi
+done
+
+echo "./configure --enable-autogen $@"
+./configure --enable-autogen $@
+if [ $? -ne 0 ]; then
+ echo "Error $? in ./configure"
+ exit 1
+fi
diff --git a/contrib/jemalloc/bin/jemalloc-config.in b/contrib/jemalloc/bin/jemalloc-config.in
new file mode 100644
index 000000000000..80eca2e6437e
--- /dev/null
+++ b/contrib/jemalloc/bin/jemalloc-config.in
@@ -0,0 +1,83 @@
+#!/bin/sh
+
+usage() {
+ cat <<EOF
+Usage:
+ @BINDIR@/jemalloc-config <option>
+Options:
+ --help | -h : Print usage.
+ --version : Print jemalloc version.
+ --revision : Print shared library revision number.
+ --config : Print configure options used to build jemalloc.
+ --prefix : Print installation directory prefix.
+ --bindir : Print binary installation directory.
+ --datadir : Print data installation directory.
+ --includedir : Print include installation directory.
+ --libdir : Print library installation directory.
+ --mandir : Print manual page installation directory.
+ --cc : Print compiler used to build jemalloc.
+ --cflags : Print compiler flags used to build jemalloc.
+ --cppflags : Print preprocessor flags used to build jemalloc.
+ --cxxflags : Print C++ compiler flags used to build jemalloc.
+ --ldflags : Print library flags used to build jemalloc.
+ --libs : Print libraries jemalloc was linked against.
+EOF
+}
+
+prefix="@prefix@"
+exec_prefix="@exec_prefix@"
+
+case "$1" in
+--help | -h)
+ usage
+ exit 0
+ ;;
+--version)
+ echo "@jemalloc_version@"
+ ;;
+--revision)
+ echo "@rev@"
+ ;;
+--config)
+ echo "@CONFIG@"
+ ;;
+--prefix)
+ echo "@PREFIX@"
+ ;;
+--bindir)
+ echo "@BINDIR@"
+ ;;
+--datadir)
+ echo "@DATADIR@"
+ ;;
+--includedir)
+ echo "@INCLUDEDIR@"
+ ;;
+--libdir)
+ echo "@LIBDIR@"
+ ;;
+--mandir)
+ echo "@MANDIR@"
+ ;;
+--cc)
+ echo "@CC@"
+ ;;
+--cflags)
+ echo "@CFLAGS@"
+ ;;
+--cppflags)
+ echo "@CPPFLAGS@"
+ ;;
+--cxxflags)
+ echo "@CXXFLAGS@"
+ ;;
+--ldflags)
+ echo "@LDFLAGS@ @EXTRA_LDFLAGS@"
+ ;;
+--libs)
+ echo "@LIBS@"
+ ;;
+*)
+ usage
+ exit 1
+esac
diff --git a/contrib/jemalloc/bin/jemalloc.sh.in b/contrib/jemalloc/bin/jemalloc.sh.in
new file mode 100644
index 000000000000..cdf36737591a
--- /dev/null
+++ b/contrib/jemalloc/bin/jemalloc.sh.in
@@ -0,0 +1,9 @@
+#!/bin/sh
+
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+
+@LD_PRELOAD_VAR@=${libdir}/libjemalloc.@SOREV@
+export @LD_PRELOAD_VAR@
+exec "$@"
diff --git a/contrib/jemalloc/bin/jeprof.in b/contrib/jemalloc/bin/jeprof.in
new file mode 100644
index 000000000000..dc2c3ea877b8
--- /dev/null
+++ b/contrib/jemalloc/bin/jeprof.in
@@ -0,0 +1,11352 @@
+<<<<<<< HEAD
+#! /usr/bin/env perl
+
+# Copyright (c) 1998-2007, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# ---
+# Program for printing the profile generated by common/profiler.cc,
+# or by the heap profiler (common/debugallocation.cc)
+#
+# The profile contains a sequence of entries of the form:
+# <count> <stack trace>
+# This program parses the profile, and generates user-readable
+# output.
+#
+# Examples:
+#
+# % tools/jeprof "program" "profile"
+# Enters "interactive" mode
+#
+# % tools/jeprof --text "program" "profile"
+# Generates one line per procedure
+#
+# % tools/jeprof --gv "program" "profile"
+# Generates annotated call-graph and displays via "gv"
+#
+# % tools/jeprof --gv --focus=Mutex "program" "profile"
+# Restrict to code paths that involve an entry that matches "Mutex"
+#
+# % tools/jeprof --gv --focus=Mutex --ignore=string "program" "profile"
+# Restrict to code paths that involve an entry that matches "Mutex"
+# and does not match "string"
+#
+# % tools/jeprof --list=IBF_CheckDocid "program" "profile"
+# Generates disassembly listing of all routines with at least one
+# sample that match the --list=<regexp> pattern. The listing is
+# annotated with the flat and cumulative sample counts at each line.
+#
+# % tools/jeprof --disasm=IBF_CheckDocid "program" "profile"
+# Generates disassembly listing of all routines with at least one
+# sample that match the --disasm=<regexp> pattern. The listing is
+# annotated with the flat and cumulative sample counts at each PC value.
+#
+# TODO: Use color to indicate files?
+
+use strict;
+use warnings;
+use Getopt::Long;
+use Cwd;
+
+my $JEPROF_VERSION = "@jemalloc_version@";
+my $PPROF_VERSION = "2.0";
+
+# These are the object tools we use which can come from a
+# user-specified location using --tools, from the JEPROF_TOOLS
+# environment variable, or from the environment.
+my %obj_tool_map = (
+ "objdump" => "objdump",
+ "nm" => "nm",
+ "addr2line" => "addr2line",
+ "c++filt" => "c++filt",
+ ## ConfigureObjTools may add architecture-specific entries:
+ #"nm_pdb" => "nm-pdb", # for reading windows (PDB-format) executables
+ #"addr2line_pdb" => "addr2line-pdb", # ditto
+ #"otool" => "otool", # equivalent of objdump on OS X
+);
+# NOTE: these are lists, so you can put in commandline flags if you want.
+my @DOT = ("dot"); # leave non-absolute, since it may be in /usr/local
+my @GV = ("gv");
+my @EVINCE = ("evince"); # could also be xpdf or perhaps acroread
+my @KCACHEGRIND = ("kcachegrind");
+my @PS2PDF = ("ps2pdf");
+# These are used for dynamic profiles
+my @URL_FETCHER = ("curl", "-s", "--fail");
+
+# These are the web pages that servers need to support for dynamic profiles
+my $HEAP_PAGE = "/pprof/heap";
+my $PROFILE_PAGE = "/pprof/profile"; # must support cgi-param "?seconds=#"
+my $PMUPROFILE_PAGE = "/pprof/pmuprofile(?:\\?.*)?"; # must support cgi-param
+ # ?seconds=#&event=x&period=n
+my $GROWTH_PAGE = "/pprof/growth";
+my $CONTENTION_PAGE = "/pprof/contention";
+my $WALL_PAGE = "/pprof/wall(?:\\?.*)?"; # accepts options like namefilter
+my $FILTEREDPROFILE_PAGE = "/pprof/filteredprofile(?:\\?.*)?";
+my $CENSUSPROFILE_PAGE = "/pprof/censusprofile(?:\\?.*)?"; # must support cgi-param
+ # "?seconds=#",
+ # "?tags_regexp=#" and
+ # "?type=#".
+my $SYMBOL_PAGE = "/pprof/symbol"; # must support symbol lookup via POST
+my $PROGRAM_NAME_PAGE = "/pprof/cmdline";
+
+# These are the web pages that can be named on the command line.
+# All the alternatives must begin with /.
+my $PROFILES = "($HEAP_PAGE|$PROFILE_PAGE|$PMUPROFILE_PAGE|" .
+ "$GROWTH_PAGE|$CONTENTION_PAGE|$WALL_PAGE|" .
+ "$FILTEREDPROFILE_PAGE|$CENSUSPROFILE_PAGE)";
+
+# default binary name
+my $UNKNOWN_BINARY = "(unknown)";
+
+# There is a pervasive dependency on the length (in hex characters,
+# i.e., nibbles) of an address, distinguishing between 32-bit and
+# 64-bit profiles. To err on the safe size, default to 64-bit here:
+my $address_length = 16;
+
+my $dev_null = "/dev/null";
+if (! -e $dev_null && $^O =~ /MSWin/) { # $^O is the OS perl was built for
+ $dev_null = "nul";
+}
+
+# A list of paths to search for shared object files
+my @prefix_list = ();
+
+# Special routine name that should not have any symbols.
+# Used as separator to parse "addr2line -i" output.
+my $sep_symbol = '_fini';
+my $sep_address = undef;
+
+##### Argument parsing #####
+
+sub usage_string {
+ return <<EOF;
+Usage:
+jeprof [options] <program> <profiles>
+ <profiles> is a space separated list of profile names.
+jeprof [options] <symbolized-profiles>
+ <symbolized-profiles> is a list of profile files where each file contains
+ the necessary symbol mappings as well as profile data (likely generated
+ with --raw).
+jeprof [options] <profile>
+ <profile> is a remote form. Symbols are obtained from host:port$SYMBOL_PAGE
+
+ Each name can be:
+ /path/to/profile - a path to a profile file
+ host:port[/<service>] - a location of a service to get profile from
+
+ The /<service> can be $HEAP_PAGE, $PROFILE_PAGE, /pprof/pmuprofile,
+ $GROWTH_PAGE, $CONTENTION_PAGE, /pprof/wall,
+ $CENSUSPROFILE_PAGE, or /pprof/filteredprofile.
+ For instance:
+ jeprof http://myserver.com:80$HEAP_PAGE
+ If /<service> is omitted, the service defaults to $PROFILE_PAGE (cpu profiling).
+jeprof --symbols <program>
+ Maps addresses to symbol names. In this mode, stdin should be a
+ list of library mappings, in the same format as is found in the heap-
+ and cpu-profile files (this loosely matches that of /proc/self/maps
+ on linux), followed by a list of hex addresses to map, one per line.
+
+ For more help with querying remote servers, including how to add the
+ necessary server-side support code, see this filename (or one like it):
+
+ /usr/doc/gperftools-$PPROF_VERSION/pprof_remote_servers.html
+
+Options:
+ --cum Sort by cumulative data
+ --base=<base> Subtract <base> from <profile> before display
+ --interactive Run in interactive mode (interactive "help" gives help) [default]
+ --seconds=<n> Length of time for dynamic profiles [default=30 secs]
+ --add_lib=<file> Read additional symbols and line info from the given library
+ --lib_prefix=<dir> Comma separated list of library path prefixes
+
+Reporting Granularity:
+ --addresses Report at address level
+ --lines Report at source line level
+ --functions Report at function level [default]
+ --files Report at source file level
+
+Output type:
+ --text Generate text report
+ --callgrind Generate callgrind format to stdout
+ --gv Generate Postscript and display
+ --evince Generate PDF and display
+ --web Generate SVG and display
+ --list=<regexp> Generate source listing of matching routines
+ --disasm=<regexp> Generate disassembly of matching routines
+ --symbols Print demangled symbol names found at given addresses
+ --dot Generate DOT file to stdout
+ --ps Generate Postcript to stdout
+ --pdf Generate PDF to stdout
+ --svg Generate SVG to stdout
+ --gif Generate GIF to stdout
+ --raw Generate symbolized jeprof data (useful with remote fetch)
+ --collapsed Generate collapsed stacks for building flame graphs
+ (see http://www.brendangregg.com/flamegraphs.html)
+
+Heap-Profile Options:
+ --inuse_space Display in-use (mega)bytes [default]
+ --inuse_objects Display in-use objects
+ --alloc_space Display allocated (mega)bytes
+ --alloc_objects Display allocated objects
+ --show_bytes Display space in bytes
+ --drop_negative Ignore negative differences
+
+Contention-profile options:
+ --total_delay Display total delay at each region [default]
+ --contentions Display number of delays at each region
+ --mean_delay Display mean delay at each region
+
+Call-graph Options:
+ --nodecount=<n> Show at most so many nodes [default=80]
+ --nodefraction=<f> Hide nodes below <f>*total [default=.005]
+ --edgefraction=<f> Hide edges below <f>*total [default=.001]
+ --maxdegree=<n> Max incoming/outgoing edges per node [default=8]
+ --focus=<regexp> Focus on backtraces with nodes matching <regexp>
+ --thread=<n> Show profile for thread <n>
+ --ignore=<regexp> Ignore backtraces with nodes matching <regexp>
+ --scale=<n> Set GV scaling [default=0]
+ --heapcheck Make nodes with non-0 object counts
+ (i.e. direct leak generators) more visible
+ --retain=<regexp> Retain only nodes that match <regexp>
+ --exclude=<regexp> Exclude all nodes that match <regexp>
+
+Miscellaneous:
+ --tools=<prefix or binary:fullpath>[,...] \$PATH for object tool pathnames
+ --test Run unit tests
+ --help This message
+ --version Version information
+ --debug-syms-by-id (Linux only) Find debug symbol files by build ID as well as by name
+
+Environment Variables:
+ JEPROF_TMPDIR Profiles directory. Defaults to \$HOME/jeprof
+ JEPROF_TOOLS Prefix for object tools pathnames
+
+Examples:
+
+jeprof /bin/ls ls.prof
+ Enters "interactive" mode
+jeprof --text /bin/ls ls.prof
+ Outputs one line per procedure
+jeprof --web /bin/ls ls.prof
+ Displays annotated call-graph in web browser
+jeprof --gv /bin/ls ls.prof
+ Displays annotated call-graph via 'gv'
+jeprof --gv --focus=Mutex /bin/ls ls.prof
+ Restricts to code paths including a .*Mutex.* entry
+jeprof --gv --focus=Mutex --ignore=string /bin/ls ls.prof
+ Code paths including Mutex but not string
+jeprof --list=getdir /bin/ls ls.prof
+ (Per-line) annotated source listing for getdir()
+jeprof --disasm=getdir /bin/ls ls.prof
+ (Per-PC) annotated disassembly for getdir()
+
+jeprof http://localhost:1234/
+ Enters "interactive" mode
+jeprof --text localhost:1234
+ Outputs one line per procedure for localhost:1234
+jeprof --raw localhost:1234 > ./local.raw
+jeprof --text ./local.raw
+ Fetches a remote profile for later analysis and then
+ analyzes it in text mode.
+EOF
+}
+
+sub version_string {
+ return <<EOF
+jeprof (part of jemalloc $JEPROF_VERSION)
+based on pprof (part of gperftools $PPROF_VERSION)
+
+Copyright 1998-2007 Google Inc.
+
+This is BSD licensed software; see the source for copying conditions
+and license information.
+There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A
+PARTICULAR PURPOSE.
+EOF
+}
+
+sub usage {
+ my $msg = shift;
+ print STDERR "$msg\n\n";
+ print STDERR usage_string();
+ print STDERR "\nFATAL ERROR: $msg\n"; # just as a reminder
+ exit(1);
+}
+
+sub Init() {
+ # Setup tmp-file name and handler to clean it up.
+ # We do this in the very beginning so that we can use
+ # error() and cleanup() function anytime here after.
+ $main::tmpfile_sym = "/tmp/jeprof$$.sym";
+ $main::tmpfile_ps = "/tmp/jeprof$$";
+ $main::next_tmpfile = 0;
+ $SIG{'INT'} = \&sighandler;
+
+ # Cache from filename/linenumber to source code
+ $main::source_cache = ();
+
+ $main::opt_help = 0;
+ $main::opt_version = 0;
+
+ $main::opt_cum = 0;
+ $main::opt_base = '';
+ $main::opt_addresses = 0;
+ $main::opt_lines = 0;
+ $main::opt_functions = 0;
+ $main::opt_files = 0;
+ $main::opt_lib_prefix = "";
+
+ $main::opt_text = 0;
+ $main::opt_callgrind = 0;
+ $main::opt_list = "";
+ $main::opt_disasm = "";
+ $main::opt_symbols = 0;
+ $main::opt_gv = 0;
+ $main::opt_evince = 0;
+ $main::opt_web = 0;
+ $main::opt_dot = 0;
+ $main::opt_ps = 0;
+ $main::opt_pdf = 0;
+ $main::opt_gif = 0;
+ $main::opt_svg = 0;
+ $main::opt_raw = 0;
+ $main::opt_collapsed = 0;
+
+ $main::opt_nodecount = 80;
+ $main::opt_nodefraction = 0.005;
+ $main::opt_edgefraction = 0.001;
+ $main::opt_maxdegree = 8;
+ $main::opt_focus = '';
+ $main::opt_thread = undef;
+ $main::opt_ignore = '';
+ $main::opt_scale = 0;
+ $main::opt_heapcheck = 0;
+ $main::opt_retain = '';
+ $main::opt_exclude = '';
+ $main::opt_seconds = 30;
+ $main::opt_lib = "";
+
+ $main::opt_inuse_space = 0;
+ $main::opt_inuse_objects = 0;
+ $main::opt_alloc_space = 0;
+ $main::opt_alloc_objects = 0;
+ $main::opt_show_bytes = 0;
+ $main::opt_drop_negative = 0;
+ $main::opt_interactive = 0;
+
+ $main::opt_total_delay = 0;
+ $main::opt_contentions = 0;
+ $main::opt_mean_delay = 0;
+
+ $main::opt_tools = "";
+ $main::opt_debug = 0;
+ $main::opt_test = 0;
+ $main::opt_debug_syms_by_id = 0;
+
+ # These are undocumented flags used only by unittests.
+ $main::opt_test_stride = 0;
+
+ # Are we using $SYMBOL_PAGE?
+ $main::use_symbol_page = 0;
+
+ # Files returned by TempName.
+ %main::tempnames = ();
+
+ # Type of profile we are dealing with
+ # Supported types:
+ # cpu
+ # heap
+ # growth
+ # contention
+ $main::profile_type = ''; # Empty type means "unknown"
+
+ GetOptions("help!" => \$main::opt_help,
+ "version!" => \$main::opt_version,
+ "cum!" => \$main::opt_cum,
+ "base=s" => \$main::opt_base,
+ "seconds=i" => \$main::opt_seconds,
+ "add_lib=s" => \$main::opt_lib,
+ "lib_prefix=s" => \$main::opt_lib_prefix,
+ "functions!" => \$main::opt_functions,
+ "lines!" => \$main::opt_lines,
+ "addresses!" => \$main::opt_addresses,
+ "files!" => \$main::opt_files,
+ "text!" => \$main::opt_text,
+ "callgrind!" => \$main::opt_callgrind,
+ "list=s" => \$main::opt_list,
+ "disasm=s" => \$main::opt_disasm,
+ "symbols!" => \$main::opt_symbols,
+ "gv!" => \$main::opt_gv,
+ "evince!" => \$main::opt_evince,
+ "web!" => \$main::opt_web,
+ "dot!" => \$main::opt_dot,
+ "ps!" => \$main::opt_ps,
+ "pdf!" => \$main::opt_pdf,
+ "svg!" => \$main::opt_svg,
+ "gif!" => \$main::opt_gif,
+ "raw!" => \$main::opt_raw,
+ "collapsed!" => \$main::opt_collapsed,
+ "interactive!" => \$main::opt_interactive,
+ "nodecount=i" => \$main::opt_nodecount,
+ "nodefraction=f" => \$main::opt_nodefraction,
+ "edgefraction=f" => \$main::opt_edgefraction,
+ "maxdegree=i" => \$main::opt_maxdegree,
+ "focus=s" => \$main::opt_focus,
+ "thread=s" => \$main::opt_thread,
+ "ignore=s" => \$main::opt_ignore,
+ "scale=i" => \$main::opt_scale,
+ "heapcheck" => \$main::opt_heapcheck,
+ "retain=s" => \$main::opt_retain,
+ "exclude=s" => \$main::opt_exclude,
+ "inuse_space!" => \$main::opt_inuse_space,
+ "inuse_objects!" => \$main::opt_inuse_objects,
+ "alloc_space!" => \$main::opt_alloc_space,
+ "alloc_objects!" => \$main::opt_alloc_objects,
+ "show_bytes!" => \$main::opt_show_bytes,
+ "drop_negative!" => \$main::opt_drop_negative,
+ "total_delay!" => \$main::opt_total_delay,
+ "contentions!" => \$main::opt_contentions,
+ "mean_delay!" => \$main::opt_mean_delay,
+ "tools=s" => \$main::opt_tools,
+ "test!" => \$main::opt_test,
+ "debug!" => \$main::opt_debug,
+ "debug-syms-by-id!" => \$main::opt_debug_syms_by_id,
+ # Undocumented flags used only by unittests:
+ "test_stride=i" => \$main::opt_test_stride,
+ ) || usage("Invalid option(s)");
+
+ # Deal with the standard --help and --version
+ if ($main::opt_help) {
+ print usage_string();
+ exit(0);
+ }
+
+ if ($main::opt_version) {
+ print version_string();
+ exit(0);
+ }
+
+ # Disassembly/listing/symbols mode requires address-level info
+ if ($main::opt_disasm || $main::opt_list || $main::opt_symbols) {
+ $main::opt_functions = 0;
+ $main::opt_lines = 0;
+ $main::opt_addresses = 1;
+ $main::opt_files = 0;
+ }
+
+ # Check heap-profiling flags
+ if ($main::opt_inuse_space +
+ $main::opt_inuse_objects +
+ $main::opt_alloc_space +
+ $main::opt_alloc_objects > 1) {
+ usage("Specify at most on of --inuse/--alloc options");
+ }
+
+ # Check output granularities
+ my $grains =
+ $main::opt_functions +
+ $main::opt_lines +
+ $main::opt_addresses +
+ $main::opt_files +
+ 0;
+ if ($grains > 1) {
+ usage("Only specify one output granularity option");
+ }
+ if ($grains == 0) {
+ $main::opt_functions = 1;
+ }
+
+ # Check output modes
+ my $modes =
+ $main::opt_text +
+ $main::opt_callgrind +
+ ($main::opt_list eq '' ? 0 : 1) +
+ ($main::opt_disasm eq '' ? 0 : 1) +
+ ($main::opt_symbols == 0 ? 0 : 1) +
+ $main::opt_gv +
+ $main::opt_evince +
+ $main::opt_web +
+ $main::opt_dot +
+ $main::opt_ps +
+ $main::opt_pdf +
+ $main::opt_svg +
+ $main::opt_gif +
+ $main::opt_raw +
+ $main::opt_collapsed +
+ $main::opt_interactive +
+ 0;
+ if ($modes > 1) {
+ usage("Only specify one output mode");
+ }
+ if ($modes == 0) {
+ if (-t STDOUT) { # If STDOUT is a tty, activate interactive mode
+ $main::opt_interactive = 1;
+ } else {
+ $main::opt_text = 1;
+ }
+ }
+
+ if ($main::opt_test) {
+ RunUnitTests();
+ # Should not return
+ exit(1);
+ }
+
+ # Binary name and profile arguments list
+ $main::prog = "";
+ @main::pfile_args = ();
+
+ # Remote profiling without a binary (using $SYMBOL_PAGE instead)
+ if (@ARGV > 0) {
+ if (IsProfileURL($ARGV[0])) {
+ $main::use_symbol_page = 1;
+ } elsif (IsSymbolizedProfileFile($ARGV[0])) {
+ $main::use_symbolized_profile = 1;
+ $main::prog = $UNKNOWN_BINARY; # will be set later from the profile file
+ }
+ }
+
+ if ($main::use_symbol_page || $main::use_symbolized_profile) {
+ # We don't need a binary!
+ my %disabled = ('--lines' => $main::opt_lines,
+ '--disasm' => $main::opt_disasm);
+ for my $option (keys %disabled) {
+ usage("$option cannot be used without a binary") if $disabled{$option};
+ }
+ # Set $main::prog later...
+ scalar(@ARGV) || usage("Did not specify profile file");
+ } elsif ($main::opt_symbols) {
+ # --symbols needs a binary-name (to run nm on, etc) but not profiles
+ $main::prog = shift(@ARGV) || usage("Did not specify program");
+ } else {
+ $main::prog = shift(@ARGV) || usage("Did not specify program");
+ scalar(@ARGV) || usage("Did not specify profile file");
+ }
+
+ # Parse profile file/location arguments
+ foreach my $farg (@ARGV) {
+ if ($farg =~ m/(.*)\@([0-9]+)(|\/.*)$/ ) {
+ my $machine = $1;
+ my $num_machines = $2;
+ my $path = $3;
+ for (my $i = 0; $i < $num_machines; $i++) {
+ unshift(@main::pfile_args, "$i.$machine$path");
+ }
+ } else {
+ unshift(@main::pfile_args, $farg);
+ }
+ }
+
+ if ($main::use_symbol_page) {
+ unless (IsProfileURL($main::pfile_args[0])) {
+ error("The first profile should be a remote form to use $SYMBOL_PAGE\n");
+ }
+ CheckSymbolPage();
+ $main::prog = FetchProgramName();
+ } elsif (!$main::use_symbolized_profile) { # may not need objtools!
+ ConfigureObjTools($main::prog)
+ }
+
+ # Break the opt_lib_prefix into the prefix_list array
+ @prefix_list = split (',', $main::opt_lib_prefix);
+
+ # Remove trailing / from the prefixes, in the list to prevent
+ # searching things like /my/path//lib/mylib.so
+ foreach (@prefix_list) {
+ s|/+$||;
+ }
+
+ # Flag to prevent us from trying over and over to use
+ # elfutils if it's not installed (used only with
+ # --debug-syms-by-id option).
+ $main::gave_up_on_elfutils = 0;
+}
+
+sub FilterAndPrint {
+ my ($profile, $symbols, $libs, $thread) = @_;
+
+ # Get total data in profile
+ my $total = TotalProfile($profile);
+
+ # Remove uniniteresting stack items
+ $profile = RemoveUninterestingFrames($symbols, $profile);
+
+ # Focus?
+ if ($main::opt_focus ne '') {
+ $profile = FocusProfile($symbols, $profile, $main::opt_focus);
+ }
+
+ # Ignore?
+ if ($main::opt_ignore ne '') {
+ $profile = IgnoreProfile($symbols, $profile, $main::opt_ignore);
+ }
+
+ my $calls = ExtractCalls($symbols, $profile);
+
+ # Reduce profiles to required output granularity, and also clean
+ # each stack trace so a given entry exists at most once.
+ my $reduced = ReduceProfile($symbols, $profile);
+
+ # Get derived profiles
+ my $flat = FlatProfile($reduced);
+ my $cumulative = CumulativeProfile($reduced);
+
+ # Print
+ if (!$main::opt_interactive) {
+ if ($main::opt_disasm) {
+ PrintDisassembly($libs, $flat, $cumulative, $main::opt_disasm);
+ } elsif ($main::opt_list) {
+ PrintListing($total, $libs, $flat, $cumulative, $main::opt_list, 0);
+ } elsif ($main::opt_text) {
+ # Make sure the output is empty when have nothing to report
+ # (only matters when --heapcheck is given but we must be
+ # compatible with old branches that did not pass --heapcheck always):
+ if ($total != 0) {
+ printf("Total%s: %s %s\n",
+ (defined($thread) ? " (t$thread)" : ""),
+ Unparse($total), Units());
+ }
+ PrintText($symbols, $flat, $cumulative, -1);
+ } elsif ($main::opt_raw) {
+ PrintSymbolizedProfile($symbols, $profile, $main::prog);
+ } elsif ($main::opt_collapsed) {
+ PrintCollapsedStacks($symbols, $profile);
+ } elsif ($main::opt_callgrind) {
+ PrintCallgrind($calls);
+ } else {
+ if (PrintDot($main::prog, $symbols, $profile, $flat, $cumulative, $total)) {
+ if ($main::opt_gv) {
+ RunGV(TempName($main::next_tmpfile, "ps"), "");
+ } elsif ($main::opt_evince) {
+ RunEvince(TempName($main::next_tmpfile, "pdf"), "");
+ } elsif ($main::opt_web) {
+ my $tmp = TempName($main::next_tmpfile, "svg");
+ RunWeb($tmp);
+ # The command we run might hand the file name off
+ # to an already running browser instance and then exit.
+ # Normally, we'd remove $tmp on exit (right now),
+ # but fork a child to remove $tmp a little later, so that the
+ # browser has time to load it first.
+ delete $main::tempnames{$tmp};
+ if (fork() == 0) {
+ sleep 5;
+ unlink($tmp);
+ exit(0);
+ }
+ }
+ } else {
+ cleanup();
+ exit(1);
+ }
+ }
+ } else {
+ InteractiveMode($profile, $symbols, $libs, $total);
+ }
+}
+
+sub Main() {
+ Init();
+ $main::collected_profile = undef;
+ @main::profile_files = ();
+ $main::op_time = time();
+
+ # Printing symbols is special and requires a lot less info that most.
+ if ($main::opt_symbols) {
+ PrintSymbols(*STDIN); # Get /proc/maps and symbols output from stdin
+ return;
+ }
+
+ # Fetch all profile data
+ FetchDynamicProfiles();
+
+ # this will hold symbols that we read from the profile files
+ my $symbol_map = {};
+
+ # Read one profile, pick the last item on the list
+ my $data = ReadProfile($main::prog, pop(@main::profile_files));
+ my $profile = $data->{profile};
+ my $pcs = $data->{pcs};
+ my $libs = $data->{libs}; # Info about main program and shared libraries
+ $symbol_map = MergeSymbols($symbol_map, $data->{symbols});
+
+ # Add additional profiles, if available.
+ if (scalar(@main::profile_files) > 0) {
+ foreach my $pname (@main::profile_files) {
+ my $data2 = ReadProfile($main::prog, $pname);
+ $profile = AddProfile($profile, $data2->{profile});
+ $pcs = AddPcs($pcs, $data2->{pcs});
+ $symbol_map = MergeSymbols($symbol_map, $data2->{symbols});
+ }
+ }
+
+ # Subtract base from profile, if specified
+ if ($main::opt_base ne '') {
+ my $base = ReadProfile($main::prog, $main::opt_base);
+ $profile = SubtractProfile($profile, $base->{profile});
+ $pcs = AddPcs($pcs, $base->{pcs});
+ $symbol_map = MergeSymbols($symbol_map, $base->{symbols});
+ }
+
+ # Collect symbols
+ my $symbols;
+ if ($main::use_symbolized_profile) {
+ $symbols = FetchSymbols($pcs, $symbol_map);
+ } elsif ($main::use_symbol_page) {
+ $symbols = FetchSymbols($pcs);
+ } else {
+ # TODO(csilvers): $libs uses the /proc/self/maps data from profile1,
+ # which may differ from the data from subsequent profiles, especially
+ # if they were run on different machines. Use appropriate libs for
+ # each pc somehow.
+ $symbols = ExtractSymbols($libs, $pcs);
+ }
+
+ if (!defined($main::opt_thread)) {
+ FilterAndPrint($profile, $symbols, $libs);
+ }
+ if (defined($data->{threads})) {
+ foreach my $thread (sort { $a <=> $b } keys(%{$data->{threads}})) {
+ if (defined($main::opt_thread) &&
+ ($main::opt_thread eq '*' || $main::opt_thread == $thread)) {
+ my $thread_profile = $data->{threads}{$thread};
+ FilterAndPrint($thread_profile, $symbols, $libs, $thread);
+ }
+ }
+ }
+
+ cleanup();
+ exit(0);
+}
+
+##### Entry Point #####
+
+Main();
+
+# Temporary code to detect if we're running on a Goobuntu system.
+# These systems don't have the right stuff installed for the special
+# Readline libraries to work, so as a temporary workaround, we default
+# to using the normal stdio code, rather than the fancier readline-based
+# code
+sub ReadlineMightFail {
+ if (-e '/lib/libtermcap.so.2') {
+ return 0; # libtermcap exists, so readline should be okay
+ } else {
+ return 1;
+ }
+}
+
+sub RunGV {
+ my $fname = shift;
+ my $bg = shift; # "" or " &" if we should run in background
+ if (!system(ShellEscape(@GV, "--version") . " >$dev_null 2>&1")) {
+ # Options using double dash are supported by this gv version.
+ # Also, turn on noantialias to better handle bug in gv for
+ # postscript files with large dimensions.
+ # TODO: Maybe we should not pass the --noantialias flag
+ # if the gv version is known to work properly without the flag.
+ system(ShellEscape(@GV, "--scale=$main::opt_scale", "--noantialias", $fname)
+ . $bg);
+ } else {
+ # Old gv version - only supports options that use single dash.
+ print STDERR ShellEscape(@GV, "-scale", $main::opt_scale) . "\n";
+ system(ShellEscape(@GV, "-scale", "$main::opt_scale", $fname) . $bg);
+ }
+}
+
+sub RunEvince {
+ my $fname = shift;
+ my $bg = shift; # "" or " &" if we should run in background
+ system(ShellEscape(@EVINCE, $fname) . $bg);
+}
+
+sub RunWeb {
+ my $fname = shift;
+ print STDERR "Loading web page file:///$fname\n";
+
+ if (`uname` =~ /Darwin/) {
+ # OS X: open will use standard preference for SVG files.
+ system("/usr/bin/open", $fname);
+ return;
+ }
+
+ # Some kind of Unix; try generic symlinks, then specific browsers.
+ # (Stop once we find one.)
+ # Works best if the browser is already running.
+ my @alt = (
+ "/etc/alternatives/gnome-www-browser",
+ "/etc/alternatives/x-www-browser",
+ "google-chrome",
+ "firefox",
+ );
+ foreach my $b (@alt) {
+ if (system($b, $fname) == 0) {
+ return;
+ }
+ }
+
+ print STDERR "Could not load web browser.\n";
+}
+
+sub RunKcachegrind {
+ my $fname = shift;
+ my $bg = shift; # "" or " &" if we should run in background
+ print STDERR "Starting '@KCACHEGRIND " . $fname . $bg . "'\n";
+ system(ShellEscape(@KCACHEGRIND, $fname) . $bg);
+}
+
+
+##### Interactive helper routines #####
+
+sub InteractiveMode {
+ $| = 1; # Make output unbuffered for interactive mode
+ my ($orig_profile, $symbols, $libs, $total) = @_;
+
+ print STDERR "Welcome to jeprof! For help, type 'help'.\n";
+
+ # Use ReadLine if it's installed and input comes from a console.
+ if ( -t STDIN &&
+ !ReadlineMightFail() &&
+ defined(eval {require Term::ReadLine}) ) {
+ my $term = new Term::ReadLine 'jeprof';
+ while ( defined ($_ = $term->readline('(jeprof) '))) {
+ $term->addhistory($_) if /\S/;
+ if (!InteractiveCommand($orig_profile, $symbols, $libs, $total, $_)) {
+ last; # exit when we get an interactive command to quit
+ }
+ }
+ } else { # don't have readline
+ while (1) {
+ print STDERR "(jeprof) ";
+ $_ = <STDIN>;
+ last if ! defined $_ ;
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+
+ # Save some flags that might be reset by InteractiveCommand()
+ my $save_opt_lines = $main::opt_lines;
+
+ if (!InteractiveCommand($orig_profile, $symbols, $libs, $total, $_)) {
+ last; # exit when we get an interactive command to quit
+ }
+
+ # Restore flags
+ $main::opt_lines = $save_opt_lines;
+ }
+ }
+}
+
+# Takes two args: orig profile, and command to run.
+# Returns 1 if we should keep going, or 0 if we were asked to quit
+sub InteractiveCommand {
+ my($orig_profile, $symbols, $libs, $total, $command) = @_;
+ $_ = $command; # just to make future m//'s easier
+ if (!defined($_)) {
+ print STDERR "\n";
+ return 0;
+ }
+ if (m/^\s*quit/) {
+ return 0;
+ }
+ if (m/^\s*help/) {
+ InteractiveHelpMessage();
+ return 1;
+ }
+ # Clear all the mode options -- mode is controlled by "$command"
+ $main::opt_text = 0;
+ $main::opt_callgrind = 0;
+ $main::opt_disasm = 0;
+ $main::opt_list = 0;
+ $main::opt_gv = 0;
+ $main::opt_evince = 0;
+ $main::opt_cum = 0;
+
+ if (m/^\s*(text|top)(\d*)\s*(.*)/) {
+ $main::opt_text = 1;
+
+ my $line_limit = ($2 ne "") ? int($2) : 10;
+
+ my $routine;
+ my $ignore;
+ ($routine, $ignore) = ParseInteractiveArgs($3);
+
+ my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore);
+ my $reduced = ReduceProfile($symbols, $profile);
+
+ # Get derived profiles
+ my $flat = FlatProfile($reduced);
+ my $cumulative = CumulativeProfile($reduced);
+
+ PrintText($symbols, $flat, $cumulative, $line_limit);
+ return 1;
+ }
+ if (m/^\s*callgrind\s*([^ \n]*)/) {
+ $main::opt_callgrind = 1;
+
+ # Get derived profiles
+ my $calls = ExtractCalls($symbols, $orig_profile);
+ my $filename = $1;
+ if ( $1 eq '' ) {
+ $filename = TempName($main::next_tmpfile, "callgrind");
+ }
+ PrintCallgrind($calls, $filename);
+ if ( $1 eq '' ) {
+ RunKcachegrind($filename, " & ");
+ $main::next_tmpfile++;
+ }
+
+ return 1;
+ }
+ if (m/^\s*(web)?list\s*(.+)/) {
+ my $html = (defined($1) && ($1 eq "web"));
+ $main::opt_list = 1;
+
+ my $routine;
+ my $ignore;
+ ($routine, $ignore) = ParseInteractiveArgs($2);
+
+ my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore);
+ my $reduced = ReduceProfile($symbols, $profile);
+
+ # Get derived profiles
+ my $flat = FlatProfile($reduced);
+ my $cumulative = CumulativeProfile($reduced);
+
+ PrintListing($total, $libs, $flat, $cumulative, $routine, $html);
+ return 1;
+ }
+ if (m/^\s*disasm\s*(.+)/) {
+ $main::opt_disasm = 1;
+
+ my $routine;
+ my $ignore;
+ ($routine, $ignore) = ParseInteractiveArgs($1);
+
+ # Process current profile to account for various settings
+ my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore);
+ my $reduced = ReduceProfile($symbols, $profile);
+
+ # Get derived profiles
+ my $flat = FlatProfile($reduced);
+ my $cumulative = CumulativeProfile($reduced);
+
+ PrintDisassembly($libs, $flat, $cumulative, $routine);
+ return 1;
+ }
+ if (m/^\s*(gv|web|evince)\s*(.*)/) {
+ $main::opt_gv = 0;
+ $main::opt_evince = 0;
+ $main::opt_web = 0;
+ if ($1 eq "gv") {
+ $main::opt_gv = 1;
+ } elsif ($1 eq "evince") {
+ $main::opt_evince = 1;
+ } elsif ($1 eq "web") {
+ $main::opt_web = 1;
+ }
+
+ my $focus;
+ my $ignore;
+ ($focus, $ignore) = ParseInteractiveArgs($2);
+
+ # Process current profile to account for various settings
+ my $profile = ProcessProfile($total, $orig_profile, $symbols,
+ $focus, $ignore);
+ my $reduced = ReduceProfile($symbols, $profile);
+
+ # Get derived profiles
+ my $flat = FlatProfile($reduced);
+ my $cumulative = CumulativeProfile($reduced);
+
+ if (PrintDot($main::prog, $symbols, $profile, $flat, $cumulative, $total)) {
+ if ($main::opt_gv) {
+ RunGV(TempName($main::next_tmpfile, "ps"), " &");
+ } elsif ($main::opt_evince) {
+ RunEvince(TempName($main::next_tmpfile, "pdf"), " &");
+ } elsif ($main::opt_web) {
+ RunWeb(TempName($main::next_tmpfile, "svg"));
+ }
+ $main::next_tmpfile++;
+ }
+ return 1;
+ }
+ if (m/^\s*$/) {
+ return 1;
+ }
+ print STDERR "Unknown command: try 'help'.\n";
+ return 1;
+}
+
+
+sub ProcessProfile {
+ my $total_count = shift;
+ my $orig_profile = shift;
+ my $symbols = shift;
+ my $focus = shift;
+ my $ignore = shift;
+
+ # Process current profile to account for various settings
+ my $profile = $orig_profile;
+ printf("Total: %s %s\n", Unparse($total_count), Units());
+ if ($focus ne '') {
+ $profile = FocusProfile($symbols, $profile, $focus);
+ my $focus_count = TotalProfile($profile);
+ printf("After focusing on '%s': %s %s of %s (%0.1f%%)\n",
+ $focus,
+ Unparse($focus_count), Units(),
+ Unparse($total_count), ($focus_count*100.0) / $total_count);
+ }
+ if ($ignore ne '') {
+ $profile = IgnoreProfile($symbols, $profile, $ignore);
+ my $ignore_count = TotalProfile($profile);
+ printf("After ignoring '%s': %s %s of %s (%0.1f%%)\n",
+ $ignore,
+ Unparse($ignore_count), Units(),
+ Unparse($total_count),
+ ($ignore_count*100.0) / $total_count);
+ }
+
+ return $profile;
+}
+
+sub InteractiveHelpMessage {
+ print STDERR <<ENDOFHELP;
+Interactive jeprof mode
+
+Commands:
+ gv
+ gv [focus] [-ignore1] [-ignore2]
+ Show graphical hierarchical display of current profile. Without
+ any arguments, shows all samples in the profile. With the optional
+ "focus" argument, restricts the samples shown to just those where
+ the "focus" regular expression matches a routine name on the stack
+ trace.
+
+ web
+ web [focus] [-ignore1] [-ignore2]
+ Like GV, but displays profile in your web browser instead of using
+ Ghostview. Works best if your web browser is already running.
+ To change the browser that gets used:
+ On Linux, set the /etc/alternatives/gnome-www-browser symlink.
+ On OS X, change the Finder association for SVG files.
+
+ list [routine_regexp] [-ignore1] [-ignore2]
+ Show source listing of routines whose names match "routine_regexp"
+
+ weblist [routine_regexp] [-ignore1] [-ignore2]
+ Displays a source listing of routines whose names match "routine_regexp"
+ in a web browser. You can click on source lines to view the
+ corresponding disassembly.
+
+ top [--cum] [-ignore1] [-ignore2]
+ top20 [--cum] [-ignore1] [-ignore2]
+ top37 [--cum] [-ignore1] [-ignore2]
+ Show top lines ordered by flat profile count, or cumulative count
+ if --cum is specified. If a number is present after 'top', the
+ top K routines will be shown (defaults to showing the top 10)
+
+ disasm [routine_regexp] [-ignore1] [-ignore2]
+ Show disassembly of routines whose names match "routine_regexp",
+ annotated with sample counts.
+
+ callgrind
+ callgrind [filename]
+ Generates callgrind file. If no filename is given, kcachegrind is called.
+
+ help - This listing
+ quit or ^D - End jeprof
+
+For commands that accept optional -ignore tags, samples where any routine in
+the stack trace matches the regular expression in any of the -ignore
+parameters will be ignored.
+
+Further pprof details are available at this location (or one similar):
+
+ /usr/doc/gperftools-$PPROF_VERSION/cpu_profiler.html
+ /usr/doc/gperftools-$PPROF_VERSION/heap_profiler.html
+
+ENDOFHELP
+}
+sub ParseInteractiveArgs {
+ my $args = shift;
+ my $focus = "";
+ my $ignore = "";
+ my @x = split(/ +/, $args);
+ foreach $a (@x) {
+ if ($a =~ m/^(--|-)lines$/) {
+ $main::opt_lines = 1;
+ } elsif ($a =~ m/^(--|-)cum$/) {
+ $main::opt_cum = 1;
+ } elsif ($a =~ m/^-(.*)/) {
+ $ignore .= (($ignore ne "") ? "|" : "" ) . $1;
+ } else {
+ $focus .= (($focus ne "") ? "|" : "" ) . $a;
+ }
+ }
+ if ($ignore ne "") {
+ print STDERR "Ignoring samples in call stacks that match '$ignore'\n";
+ }
+ return ($focus, $ignore);
+}
+
+##### Output code #####
+
+sub TempName {
+ my $fnum = shift;
+ my $ext = shift;
+ my $file = "$main::tmpfile_ps.$fnum.$ext";
+ $main::tempnames{$file} = 1;
+ return $file;
+}
+
+# Print profile data in packed binary format (64-bit) to standard out
+sub PrintProfileData {
+ my $profile = shift;
+
+ # print header (64-bit style)
+ # (zero) (header-size) (version) (sample-period) (zero)
+ print pack('L*', 0, 0, 3, 0, 0, 0, 1, 0, 0, 0);
+
+ foreach my $k (keys(%{$profile})) {
+ my $count = $profile->{$k};
+ my @addrs = split(/\n/, $k);
+ if ($#addrs >= 0) {
+ my $depth = $#addrs + 1;
+ # int(foo / 2**32) is the only reliable way to get rid of bottom
+ # 32 bits on both 32- and 64-bit systems.
+ print pack('L*', $count & 0xFFFFFFFF, int($count / 2**32));
+ print pack('L*', $depth & 0xFFFFFFFF, int($depth / 2**32));
+
+ foreach my $full_addr (@addrs) {
+ my $addr = $full_addr;
+ $addr =~ s/0x0*//; # strip off leading 0x, zeroes
+ if (length($addr) > 16) {
+ print STDERR "Invalid address in profile: $full_addr\n";
+ next;
+ }
+ my $low_addr = substr($addr, -8); # get last 8 hex chars
+ my $high_addr = substr($addr, -16, 8); # get up to 8 more hex chars
+ print pack('L*', hex('0x' . $low_addr), hex('0x' . $high_addr));
+ }
+ }
+ }
+}
+
+# Print symbols and profile data
+sub PrintSymbolizedProfile {
+ my $symbols = shift;
+ my $profile = shift;
+ my $prog = shift;
+
+ $SYMBOL_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ my $symbol_marker = $&;
+
+ print '--- ', $symbol_marker, "\n";
+ if (defined($prog)) {
+ print 'binary=', $prog, "\n";
+ }
+ while (my ($pc, $name) = each(%{$symbols})) {
+ my $sep = ' ';
+ print '0x', $pc;
+ # We have a list of function names, which include the inlined
+ # calls. They are separated (and terminated) by --, which is
+ # illegal in function names.
+ for (my $j = 2; $j <= $#{$name}; $j += 3) {
+ print $sep, $name->[$j];
+ $sep = '--';
+ }
+ print "\n";
+ }
+ print '---', "\n";
+
+ my $profile_marker;
+ if ($main::profile_type eq 'heap') {
+ $HEAP_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ $profile_marker = $&;
+ } elsif ($main::profile_type eq 'growth') {
+ $GROWTH_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ $profile_marker = $&;
+ } elsif ($main::profile_type eq 'contention') {
+ $CONTENTION_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ $profile_marker = $&;
+ } else { # elsif ($main::profile_type eq 'cpu')
+ $PROFILE_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ $profile_marker = $&;
+ }
+
+ print '--- ', $profile_marker, "\n";
+ if (defined($main::collected_profile)) {
+ # if used with remote fetch, simply dump the collected profile to output.
+ open(SRC, "<$main::collected_profile");
+ while (<SRC>) {
+ print $_;
+ }
+ close(SRC);
+ } else {
+ # --raw/http: For everything to work correctly for non-remote profiles, we
+ # would need to extend PrintProfileData() to handle all possible profile
+ # types, re-enable the code that is currently disabled in ReadCPUProfile()
+ # and FixCallerAddresses(), and remove the remote profile dumping code in
+ # the block above.
+ die "--raw/http: jeprof can only dump remote profiles for --raw\n";
+ # dump a cpu-format profile to standard out
+ PrintProfileData($profile);
+ }
+}
+
+# Print text output
+sub PrintText {
+ my $symbols = shift;
+ my $flat = shift;
+ my $cumulative = shift;
+ my $line_limit = shift;
+
+ my $total = TotalProfile($flat);
+
+ # Which profile to sort by?
+ my $s = $main::opt_cum ? $cumulative : $flat;
+
+ my $running_sum = 0;
+ my $lines = 0;
+ foreach my $k (sort { GetEntry($s, $b) <=> GetEntry($s, $a) || $a cmp $b }
+ keys(%{$cumulative})) {
+ my $f = GetEntry($flat, $k);
+ my $c = GetEntry($cumulative, $k);
+ $running_sum += $f;
+
+ my $sym = $k;
+ if (exists($symbols->{$k})) {
+ $sym = $symbols->{$k}->[0] . " " . $symbols->{$k}->[1];
+ if ($main::opt_addresses) {
+ $sym = $k . " " . $sym;
+ }
+ }
+
+ if ($f != 0 || $c != 0) {
+ printf("%8s %6s %6s %8s %6s %s\n",
+ Unparse($f),
+ Percent($f, $total),
+ Percent($running_sum, $total),
+ Unparse($c),
+ Percent($c, $total),
+ $sym);
+ }
+ $lines++;
+ last if ($line_limit >= 0 && $lines >= $line_limit);
+ }
+}
+
+# Callgrind format has a compression for repeated function and file
+# names. You show the name the first time, and just use its number
+# subsequently. This can cut down the file to about a third or a
+# quarter of its uncompressed size. $key and $val are the key/value
+# pair that would normally be printed by callgrind; $map is a map from
+# value to number.
+sub CompressedCGName {
+ my($key, $val, $map) = @_;
+ my $idx = $map->{$val};
+ # For very short keys, providing an index hurts rather than helps.
+ if (length($val) <= 3) {
+ return "$key=$val\n";
+ } elsif (defined($idx)) {
+ return "$key=($idx)\n";
+ } else {
+ # scalar(keys $map) gives the number of items in the map.
+ $idx = scalar(keys(%{$map})) + 1;
+ $map->{$val} = $idx;
+ return "$key=($idx) $val\n";
+ }
+}
+
+# Print the call graph in a way that's suiteable for callgrind.
+sub PrintCallgrind {
+ my $calls = shift;
+ my $filename;
+ my %filename_to_index_map;
+ my %fnname_to_index_map;
+
+ if ($main::opt_interactive) {
+ $filename = shift;
+ print STDERR "Writing callgrind file to '$filename'.\n"
+ } else {
+ $filename = "&STDOUT";
+ }
+ open(CG, ">$filename");
+ printf CG ("events: Hits\n\n");
+ foreach my $call ( map { $_->[0] }
+ sort { $a->[1] cmp $b ->[1] ||
+ $a->[2] <=> $b->[2] }
+ map { /([^:]+):(\d+):([^ ]+)( -> ([^:]+):(\d+):(.+))?/;
+ [$_, $1, $2] }
+ keys %$calls ) {
+ my $count = int($calls->{$call});
+ $call =~ /([^:]+):(\d+):([^ ]+)( -> ([^:]+):(\d+):(.+))?/;
+ my ( $caller_file, $caller_line, $caller_function,
+ $callee_file, $callee_line, $callee_function ) =
+ ( $1, $2, $3, $5, $6, $7 );
+
+ # TODO(csilvers): for better compression, collect all the
+ # caller/callee_files and functions first, before printing
+ # anything, and only compress those referenced more than once.
+ printf CG CompressedCGName("fl", $caller_file, \%filename_to_index_map);
+ printf CG CompressedCGName("fn", $caller_function, \%fnname_to_index_map);
+ if (defined $6) {
+ printf CG CompressedCGName("cfl", $callee_file, \%filename_to_index_map);
+ printf CG CompressedCGName("cfn", $callee_function, \%fnname_to_index_map);
+ printf CG ("calls=$count $callee_line\n");
+ }
+ printf CG ("$caller_line $count\n\n");
+ }
+}
+
+# Print disassembly for all all routines that match $main::opt_disasm
+sub PrintDisassembly {
+ my $libs = shift;
+ my $flat = shift;
+ my $cumulative = shift;
+ my $disasm_opts = shift;
+
+ my $total = TotalProfile($flat);
+
+ foreach my $lib (@{$libs}) {
+ my $symbol_table = GetProcedureBoundaries($lib->[0], $disasm_opts);
+ my $offset = AddressSub($lib->[1], $lib->[3]);
+ foreach my $routine (sort ByName keys(%{$symbol_table})) {
+ my $start_addr = $symbol_table->{$routine}->[0];
+ my $end_addr = $symbol_table->{$routine}->[1];
+ # See if there are any samples in this routine
+ my $length = hex(AddressSub($end_addr, $start_addr));
+ my $addr = AddressAdd($start_addr, $offset);
+ for (my $i = 0; $i < $length; $i++) {
+ if (defined($cumulative->{$addr})) {
+ PrintDisassembledFunction($lib->[0], $offset,
+ $routine, $flat, $cumulative,
+ $start_addr, $end_addr, $total);
+ last;
+ }
+ $addr = AddressInc($addr);
+ }
+ }
+ }
+}
+
+# Return reference to array of tuples of the form:
+# [start_address, filename, linenumber, instruction, limit_address]
+# E.g.,
+# ["0x806c43d", "/foo/bar.cc", 131, "ret", "0x806c440"]
+sub Disassemble {
+ my $prog = shift;
+ my $offset = shift;
+ my $start_addr = shift;
+ my $end_addr = shift;
+
+ my $objdump = $obj_tool_map{"objdump"};
+ my $cmd = ShellEscape($objdump, "-C", "-d", "-l", "--no-show-raw-insn",
+ "--start-address=0x$start_addr",
+ "--stop-address=0x$end_addr", $prog);
+ open(OBJDUMP, "$cmd |") || error("$cmd: $!\n");
+ my @result = ();
+ my $filename = "";
+ my $linenumber = -1;
+ my $last = ["", "", "", ""];
+ while (<OBJDUMP>) {
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+ chop;
+ if (m|\s*([^:\s]+):(\d+)\s*$|) {
+ # Location line of the form:
+ # <filename>:<linenumber>
+ $filename = $1;
+ $linenumber = $2;
+ } elsif (m/^ +([0-9a-f]+):\s*(.*)/) {
+ # Disassembly line -- zero-extend address to full length
+ my $addr = HexExtend($1);
+ my $k = AddressAdd($addr, $offset);
+ $last->[4] = $k; # Store ending address for previous instruction
+ $last = [$k, $filename, $linenumber, $2, $end_addr];
+ push(@result, $last);
+ }
+ }
+ close(OBJDUMP);
+ return @result;
+}
+
+# The input file should contain lines of the form /proc/maps-like
+# output (same format as expected from the profiles) or that looks
+# like hex addresses (like "0xDEADBEEF"). We will parse all
+# /proc/maps output, and for all the hex addresses, we will output
+# "short" symbol names, one per line, in the same order as the input.
+sub PrintSymbols {
+ my $maps_and_symbols_file = shift;
+
+ # ParseLibraries expects pcs to be in a set. Fine by us...
+ my @pclist = (); # pcs in sorted order
+ my $pcs = {};
+ my $map = "";
+ foreach my $line (<$maps_and_symbols_file>) {
+ $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines
+ if ($line =~ /\b(0x[0-9a-f]+)\b/i) {
+ push(@pclist, HexExtend($1));
+ $pcs->{$pclist[-1]} = 1;
+ } else {
+ $map .= $line;
+ }
+ }
+
+ my $libs = ParseLibraries($main::prog, $map, $pcs);
+ my $symbols = ExtractSymbols($libs, $pcs);
+
+ foreach my $pc (@pclist) {
+ # ->[0] is the shortname, ->[2] is the full name
+ print(($symbols->{$pc}->[0] || "??") . "\n");
+ }
+}
+
+
+# For sorting functions by name
+sub ByName {
+ return ShortFunctionName($a) cmp ShortFunctionName($b);
+}
+
+# Print source-listing for all all routines that match $list_opts
+sub PrintListing {
+ my $total = shift;
+ my $libs = shift;
+ my $flat = shift;
+ my $cumulative = shift;
+ my $list_opts = shift;
+ my $html = shift;
+
+ my $output = \*STDOUT;
+ my $fname = "";
+
+ if ($html) {
+ # Arrange to write the output to a temporary file
+ $fname = TempName($main::next_tmpfile, "html");
+ $main::next_tmpfile++;
+ if (!open(TEMP, ">$fname")) {
+ print STDERR "$fname: $!\n";
+ return;
+ }
+ $output = \*TEMP;
+ print $output HtmlListingHeader();
+ printf $output ("<div class=\"legend\">%s<br>Total: %s %s</div>\n",
+ $main::prog, Unparse($total), Units());
+ }
+
+ my $listed = 0;
+ foreach my $lib (@{$libs}) {
+ my $symbol_table = GetProcedureBoundaries($lib->[0], $list_opts);
+ my $offset = AddressSub($lib->[1], $lib->[3]);
+ foreach my $routine (sort ByName keys(%{$symbol_table})) {
+ # Print if there are any samples in this routine
+ my $start_addr = $symbol_table->{$routine}->[0];
+ my $end_addr = $symbol_table->{$routine}->[1];
+ my $length = hex(AddressSub($end_addr, $start_addr));
+ my $addr = AddressAdd($start_addr, $offset);
+ for (my $i = 0; $i < $length; $i++) {
+ if (defined($cumulative->{$addr})) {
+ $listed += PrintSource(
+ $lib->[0], $offset,
+ $routine, $flat, $cumulative,
+ $start_addr, $end_addr,
+ $html,
+ $output);
+ last;
+ }
+ $addr = AddressInc($addr);
+ }
+ }
+ }
+
+ if ($html) {
+ if ($listed > 0) {
+ print $output HtmlListingFooter();
+ close($output);
+ RunWeb($fname);
+ } else {
+ close($output);
+ unlink($fname);
+ }
+ }
+}
+
+sub HtmlListingHeader {
+ return <<'EOF';
+<DOCTYPE html>
+<html>
+<head>
+<title>Pprof listing</title>
+<style type="text/css">
+body {
+ font-family: sans-serif;
+}
+h1 {
+ font-size: 1.5em;
+ margin-bottom: 4px;
+}
+.legend {
+ font-size: 1.25em;
+}
+.line {
+ color: #aaaaaa;
+}
+.nop {
+ color: #aaaaaa;
+}
+.unimportant {
+ color: #cccccc;
+}
+.disasmloc {
+ color: #000000;
+}
+.deadsrc {
+ cursor: pointer;
+}
+.deadsrc:hover {
+ background-color: #eeeeee;
+}
+.livesrc {
+ color: #0000ff;
+ cursor: pointer;
+}
+.livesrc:hover {
+ background-color: #eeeeee;
+}
+.asm {
+ color: #008800;
+ display: none;
+}
+</style>
+<script type="text/javascript">
+function jeprof_toggle_asm(e) {
+ var target;
+ if (!e) e = window.event;
+ if (e.target) target = e.target;
+ else if (e.srcElement) target = e.srcElement;
+
+ if (target) {
+ var asm = target.nextSibling;
+ if (asm && asm.className == "asm") {
+ asm.style.display = (asm.style.display == "block" ? "" : "block");
+ e.preventDefault();
+ return false;
+ }
+ }
+}
+</script>
+</head>
+<body>
+EOF
+}
+
+sub HtmlListingFooter {
+ return <<'EOF';
+</body>
+</html>
+EOF
+}
+
+sub HtmlEscape {
+ my $text = shift;
+ $text =~ s/&/&amp;/g;
+ $text =~ s/</&lt;/g;
+ $text =~ s/>/&gt;/g;
+ return $text;
+}
+
+# Returns the indentation of the line, if it has any non-whitespace
+# characters. Otherwise, returns -1.
+sub Indentation {
+ my $line = shift;
+ if (m/^(\s*)\S/) {
+ return length($1);
+ } else {
+ return -1;
+ }
+}
+
+# If the symbol table contains inlining info, Disassemble() may tag an
+# instruction with a location inside an inlined function. But for
+# source listings, we prefer to use the location in the function we
+# are listing. So use MapToSymbols() to fetch full location
+# information for each instruction and then pick out the first
+# location from a location list (location list contains callers before
+# callees in case of inlining).
+#
+# After this routine has run, each entry in $instructions contains:
+# [0] start address
+# [1] filename for function we are listing
+# [2] line number for function we are listing
+# [3] disassembly
+# [4] limit address
+# [5] most specific filename (may be different from [1] due to inlining)
+# [6] most specific line number (may be different from [2] due to inlining)
+sub GetTopLevelLineNumbers {
+ my ($lib, $offset, $instructions) = @_;
+ my $pcs = [];
+ for (my $i = 0; $i <= $#{$instructions}; $i++) {
+ push(@{$pcs}, $instructions->[$i]->[0]);
+ }
+ my $symbols = {};
+ MapToSymbols($lib, $offset, $pcs, $symbols);
+ for (my $i = 0; $i <= $#{$instructions}; $i++) {
+ my $e = $instructions->[$i];
+ push(@{$e}, $e->[1]);
+ push(@{$e}, $e->[2]);
+ my $addr = $e->[0];
+ my $sym = $symbols->{$addr};
+ if (defined($sym)) {
+ if ($#{$sym} >= 2 && $sym->[1] =~ m/^(.*):(\d+)$/) {
+ $e->[1] = $1; # File name
+ $e->[2] = $2; # Line number
+ }
+ }
+ }
+}
+
+# Print source-listing for one routine
+sub PrintSource {
+ my $prog = shift;
+ my $offset = shift;
+ my $routine = shift;
+ my $flat = shift;
+ my $cumulative = shift;
+ my $start_addr = shift;
+ my $end_addr = shift;
+ my $html = shift;
+ my $output = shift;
+
+ # Disassemble all instructions (just to get line numbers)
+ my @instructions = Disassemble($prog, $offset, $start_addr, $end_addr);
+ GetTopLevelLineNumbers($prog, $offset, \@instructions);
+
+ # Hack 1: assume that the first source file encountered in the
+ # disassembly contains the routine
+ my $filename = undef;
+ for (my $i = 0; $i <= $#instructions; $i++) {
+ if ($instructions[$i]->[2] >= 0) {
+ $filename = $instructions[$i]->[1];
+ last;
+ }
+ }
+ if (!defined($filename)) {
+ print STDERR "no filename found in $routine\n";
+ return 0;
+ }
+
+ # Hack 2: assume that the largest line number from $filename is the
+ # end of the procedure. This is typically safe since if P1 contains
+ # an inlined call to P2, then P2 usually occurs earlier in the
+ # source file. If this does not work, we might have to compute a
+ # density profile or just print all regions we find.
+ my $lastline = 0;
+ for (my $i = 0; $i <= $#instructions; $i++) {
+ my $f = $instructions[$i]->[1];
+ my $l = $instructions[$i]->[2];
+ if (($f eq $filename) && ($l > $lastline)) {
+ $lastline = $l;
+ }
+ }
+
+ # Hack 3: assume the first source location from "filename" is the start of
+ # the source code.
+ my $firstline = 1;
+ for (my $i = 0; $i <= $#instructions; $i++) {
+ if ($instructions[$i]->[1] eq $filename) {
+ $firstline = $instructions[$i]->[2];
+ last;
+ }
+ }
+
+ # Hack 4: Extend last line forward until its indentation is less than
+ # the indentation we saw on $firstline
+ my $oldlastline = $lastline;
+ {
+ if (!open(FILE, "<$filename")) {
+ print STDERR "$filename: $!\n";
+ return 0;
+ }
+ my $l = 0;
+ my $first_indentation = -1;
+ while (<FILE>) {
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+ $l++;
+ my $indent = Indentation($_);
+ if ($l >= $firstline) {
+ if ($first_indentation < 0 && $indent >= 0) {
+ $first_indentation = $indent;
+ last if ($first_indentation == 0);
+ }
+ }
+ if ($l >= $lastline && $indent >= 0) {
+ if ($indent >= $first_indentation) {
+ $lastline = $l+1;
+ } else {
+ last;
+ }
+ }
+ }
+ close(FILE);
+ }
+
+ # Assign all samples to the range $firstline,$lastline,
+ # Hack 4: If an instruction does not occur in the range, its samples
+ # are moved to the next instruction that occurs in the range.
+ my $samples1 = {}; # Map from line number to flat count
+ my $samples2 = {}; # Map from line number to cumulative count
+ my $running1 = 0; # Unassigned flat counts
+ my $running2 = 0; # Unassigned cumulative counts
+ my $total1 = 0; # Total flat counts
+ my $total2 = 0; # Total cumulative counts
+ my %disasm = (); # Map from line number to disassembly
+ my $running_disasm = ""; # Unassigned disassembly
+ my $skip_marker = "---\n";
+ if ($html) {
+ $skip_marker = "";
+ for (my $l = $firstline; $l <= $lastline; $l++) {
+ $disasm{$l} = "";
+ }
+ }
+ my $last_dis_filename = '';
+ my $last_dis_linenum = -1;
+ my $last_touched_line = -1; # To detect gaps in disassembly for a line
+ foreach my $e (@instructions) {
+ # Add up counts for all address that fall inside this instruction
+ my $c1 = 0;
+ my $c2 = 0;
+ for (my $a = $e->[0]; $a lt $e->[4]; $a = AddressInc($a)) {
+ $c1 += GetEntry($flat, $a);
+ $c2 += GetEntry($cumulative, $a);
+ }
+
+ if ($html) {
+ my $dis = sprintf(" %6s %6s \t\t%8s: %s ",
+ HtmlPrintNumber($c1),
+ HtmlPrintNumber($c2),
+ UnparseAddress($offset, $e->[0]),
+ CleanDisassembly($e->[3]));
+
+ # Append the most specific source line associated with this instruction
+ if (length($dis) < 80) { $dis .= (' ' x (80 - length($dis))) };
+ $dis = HtmlEscape($dis);
+ my $f = $e->[5];
+ my $l = $e->[6];
+ if ($f ne $last_dis_filename) {
+ $dis .= sprintf("<span class=disasmloc>%s:%d</span>",
+ HtmlEscape(CleanFileName($f)), $l);
+ } elsif ($l ne $last_dis_linenum) {
+ # De-emphasize the unchanged file name portion
+ $dis .= sprintf("<span class=unimportant>%s</span>" .
+ "<span class=disasmloc>:%d</span>",
+ HtmlEscape(CleanFileName($f)), $l);
+ } else {
+ # De-emphasize the entire location
+ $dis .= sprintf("<span class=unimportant>%s:%d</span>",
+ HtmlEscape(CleanFileName($f)), $l);
+ }
+ $last_dis_filename = $f;
+ $last_dis_linenum = $l;
+ $running_disasm .= $dis;
+ $running_disasm .= "\n";
+ }
+
+ $running1 += $c1;
+ $running2 += $c2;
+ $total1 += $c1;
+ $total2 += $c2;
+ my $file = $e->[1];
+ my $line = $e->[2];
+ if (($file eq $filename) &&
+ ($line >= $firstline) &&
+ ($line <= $lastline)) {
+ # Assign all accumulated samples to this line
+ AddEntry($samples1, $line, $running1);
+ AddEntry($samples2, $line, $running2);
+ $running1 = 0;
+ $running2 = 0;
+ if ($html) {
+ if ($line != $last_touched_line && $disasm{$line} ne '') {
+ $disasm{$line} .= "\n";
+ }
+ $disasm{$line} .= $running_disasm;
+ $running_disasm = '';
+ $last_touched_line = $line;
+ }
+ }
+ }
+
+ # Assign any leftover samples to $lastline
+ AddEntry($samples1, $lastline, $running1);
+ AddEntry($samples2, $lastline, $running2);
+ if ($html) {
+ if ($lastline != $last_touched_line && $disasm{$lastline} ne '') {
+ $disasm{$lastline} .= "\n";
+ }
+ $disasm{$lastline} .= $running_disasm;
+ }
+
+ if ($html) {
+ printf $output (
+ "<h1>%s</h1>%s\n<pre onClick=\"jeprof_toggle_asm()\">\n" .
+ "Total:%6s %6s (flat / cumulative %s)\n",
+ HtmlEscape(ShortFunctionName($routine)),
+ HtmlEscape(CleanFileName($filename)),
+ Unparse($total1),
+ Unparse($total2),
+ Units());
+ } else {
+ printf $output (
+ "ROUTINE ====================== %s in %s\n" .
+ "%6s %6s Total %s (flat / cumulative)\n",
+ ShortFunctionName($routine),
+ CleanFileName($filename),
+ Unparse($total1),
+ Unparse($total2),
+ Units());
+ }
+ if (!open(FILE, "<$filename")) {
+ print STDERR "$filename: $!\n";
+ return 0;
+ }
+ my $l = 0;
+ while (<FILE>) {
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+ $l++;
+ if ($l >= $firstline - 5 &&
+ (($l <= $oldlastline + 5) || ($l <= $lastline))) {
+ chop;
+ my $text = $_;
+ if ($l == $firstline) { print $output $skip_marker; }
+ my $n1 = GetEntry($samples1, $l);
+ my $n2 = GetEntry($samples2, $l);
+ if ($html) {
+ # Emit a span that has one of the following classes:
+ # livesrc -- has samples
+ # deadsrc -- has disassembly, but with no samples
+ # nop -- has no matching disasembly
+ # Also emit an optional span containing disassembly.
+ my $dis = $disasm{$l};
+ my $asm = "";
+ if (defined($dis) && $dis ne '') {
+ $asm = "<span class=\"asm\">" . $dis . "</span>";
+ }
+ my $source_class = (($n1 + $n2 > 0)
+ ? "livesrc"
+ : (($asm ne "") ? "deadsrc" : "nop"));
+ printf $output (
+ "<span class=\"line\">%5d</span> " .
+ "<span class=\"%s\">%6s %6s %s</span>%s\n",
+ $l, $source_class,
+ HtmlPrintNumber($n1),
+ HtmlPrintNumber($n2),
+ HtmlEscape($text),
+ $asm);
+ } else {
+ printf $output(
+ "%6s %6s %4d: %s\n",
+ UnparseAlt($n1),
+ UnparseAlt($n2),
+ $l,
+ $text);
+ }
+ if ($l == $lastline) { print $output $skip_marker; }
+ };
+ }
+ close(FILE);
+ if ($html) {
+ print $output "</pre>\n";
+ }
+ return 1;
+}
+
+# Return the source line for the specified file/linenumber.
+# Returns undef if not found.
+sub SourceLine {
+ my $file = shift;
+ my $line = shift;
+
+ # Look in cache
+ if (!defined($main::source_cache{$file})) {
+ if (100 < scalar keys(%main::source_cache)) {
+ # Clear the cache when it gets too big
+ $main::source_cache = ();
+ }
+
+ # Read all lines from the file
+ if (!open(FILE, "<$file")) {
+ print STDERR "$file: $!\n";
+ $main::source_cache{$file} = []; # Cache the negative result
+ return undef;
+ }
+ my $lines = [];
+ push(@{$lines}, ""); # So we can use 1-based line numbers as indices
+ while (<FILE>) {
+ push(@{$lines}, $_);
+ }
+ close(FILE);
+
+ # Save the lines in the cache
+ $main::source_cache{$file} = $lines;
+ }
+
+ my $lines = $main::source_cache{$file};
+ if (($line < 0) || ($line > $#{$lines})) {
+ return undef;
+ } else {
+ return $lines->[$line];
+ }
+}
+
+# Print disassembly for one routine with interspersed source if available
+sub PrintDisassembledFunction {
+ my $prog = shift;
+ my $offset = shift;
+ my $routine = shift;
+ my $flat = shift;
+ my $cumulative = shift;
+ my $start_addr = shift;
+ my $end_addr = shift;
+ my $total = shift;
+
+ # Disassemble all instructions
+ my @instructions = Disassemble($prog, $offset, $start_addr, $end_addr);
+
+ # Make array of counts per instruction
+ my @flat_count = ();
+ my @cum_count = ();
+ my $flat_total = 0;
+ my $cum_total = 0;
+ foreach my $e (@instructions) {
+ # Add up counts for all address that fall inside this instruction
+ my $c1 = 0;
+ my $c2 = 0;
+ for (my $a = $e->[0]; $a lt $e->[4]; $a = AddressInc($a)) {
+ $c1 += GetEntry($flat, $a);
+ $c2 += GetEntry($cumulative, $a);
+ }
+ push(@flat_count, $c1);
+ push(@cum_count, $c2);
+ $flat_total += $c1;
+ $cum_total += $c2;
+ }
+
+ # Print header with total counts
+ printf("ROUTINE ====================== %s\n" .
+ "%6s %6s %s (flat, cumulative) %.1f%% of total\n",
+ ShortFunctionName($routine),
+ Unparse($flat_total),
+ Unparse($cum_total),
+ Units(),
+ ($cum_total * 100.0) / $total);
+
+ # Process instructions in order
+ my $current_file = "";
+ for (my $i = 0; $i <= $#instructions; ) {
+ my $e = $instructions[$i];
+
+ # Print the new file name whenever we switch files
+ if ($e->[1] ne $current_file) {
+ $current_file = $e->[1];
+ my $fname = $current_file;
+ $fname =~ s|^\./||; # Trim leading "./"
+
+ # Shorten long file names
+ if (length($fname) >= 58) {
+ $fname = "..." . substr($fname, -55);
+ }
+ printf("-------------------- %s\n", $fname);
+ }
+
+ # TODO: Compute range of lines to print together to deal with
+ # small reorderings.
+ my $first_line = $e->[2];
+ my $last_line = $first_line;
+ my %flat_sum = ();
+ my %cum_sum = ();
+ for (my $l = $first_line; $l <= $last_line; $l++) {
+ $flat_sum{$l} = 0;
+ $cum_sum{$l} = 0;
+ }
+
+ # Find run of instructions for this range of source lines
+ my $first_inst = $i;
+ while (($i <= $#instructions) &&
+ ($instructions[$i]->[2] >= $first_line) &&
+ ($instructions[$i]->[2] <= $last_line)) {
+ $e = $instructions[$i];
+ $flat_sum{$e->[2]} += $flat_count[$i];
+ $cum_sum{$e->[2]} += $cum_count[$i];
+ $i++;
+ }
+ my $last_inst = $i - 1;
+
+ # Print source lines
+ for (my $l = $first_line; $l <= $last_line; $l++) {
+ my $line = SourceLine($current_file, $l);
+ if (!defined($line)) {
+ $line = "?\n";
+ next;
+ } else {
+ $line =~ s/^\s+//;
+ }
+ printf("%6s %6s %5d: %s",
+ UnparseAlt($flat_sum{$l}),
+ UnparseAlt($cum_sum{$l}),
+ $l,
+ $line);
+ }
+
+ # Print disassembly
+ for (my $x = $first_inst; $x <= $last_inst; $x++) {
+ my $e = $instructions[$x];
+ printf("%6s %6s %8s: %6s\n",
+ UnparseAlt($flat_count[$x]),
+ UnparseAlt($cum_count[$x]),
+ UnparseAddress($offset, $e->[0]),
+ CleanDisassembly($e->[3]));
+ }
+ }
+}
+
+# Print DOT graph
+sub PrintDot {
+ my $prog = shift;
+ my $symbols = shift;
+ my $raw = shift;
+ my $flat = shift;
+ my $cumulative = shift;
+ my $overall_total = shift;
+
+ # Get total
+ my $local_total = TotalProfile($flat);
+ my $nodelimit = int($main::opt_nodefraction * $local_total);
+ my $edgelimit = int($main::opt_edgefraction * $local_total);
+ my $nodecount = $main::opt_nodecount;
+
+ # Find nodes to include
+ my @list = (sort { abs(GetEntry($cumulative, $b)) <=>
+ abs(GetEntry($cumulative, $a))
+ || $a cmp $b }
+ keys(%{$cumulative}));
+ my $last = $nodecount - 1;
+ if ($last > $#list) {
+ $last = $#list;
+ }
+ while (($last >= 0) &&
+ (abs(GetEntry($cumulative, $list[$last])) <= $nodelimit)) {
+ $last--;
+ }
+ if ($last < 0) {
+ print STDERR "No nodes to print\n";
+ return 0;
+ }
+
+ if ($nodelimit > 0 || $edgelimit > 0) {
+ printf STDERR ("Dropping nodes with <= %s %s; edges with <= %s abs(%s)\n",
+ Unparse($nodelimit), Units(),
+ Unparse($edgelimit), Units());
+ }
+
+ # Open DOT output file
+ my $output;
+ my $escaped_dot = ShellEscape(@DOT);
+ my $escaped_ps2pdf = ShellEscape(@PS2PDF);
+ if ($main::opt_gv) {
+ my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "ps"));
+ $output = "| $escaped_dot -Tps2 >$escaped_outfile";
+ } elsif ($main::opt_evince) {
+ my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "pdf"));
+ $output = "| $escaped_dot -Tps2 | $escaped_ps2pdf - $escaped_outfile";
+ } elsif ($main::opt_ps) {
+ $output = "| $escaped_dot -Tps2";
+ } elsif ($main::opt_pdf) {
+ $output = "| $escaped_dot -Tps2 | $escaped_ps2pdf - -";
+ } elsif ($main::opt_web || $main::opt_svg) {
+ # We need to post-process the SVG, so write to a temporary file always.
+ my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "svg"));
+ $output = "| $escaped_dot -Tsvg >$escaped_outfile";
+ } elsif ($main::opt_gif) {
+ $output = "| $escaped_dot -Tgif";
+ } else {
+ $output = ">&STDOUT";
+ }
+ open(DOT, $output) || error("$output: $!\n");
+
+ # Title
+ printf DOT ("digraph \"%s; %s %s\" {\n",
+ $prog,
+ Unparse($overall_total),
+ Units());
+ if ($main::opt_pdf) {
+ # The output is more printable if we set the page size for dot.
+ printf DOT ("size=\"8,11\"\n");
+ }
+ printf DOT ("node [width=0.375,height=0.25];\n");
+
+ # Print legend
+ printf DOT ("Legend [shape=box,fontsize=24,shape=plaintext," .
+ "label=\"%s\\l%s\\l%s\\l%s\\l%s\\l\"];\n",
+ $prog,
+ sprintf("Total %s: %s", Units(), Unparse($overall_total)),
+ sprintf("Focusing on: %s", Unparse($local_total)),
+ sprintf("Dropped nodes with <= %s abs(%s)",
+ Unparse($nodelimit), Units()),
+ sprintf("Dropped edges with <= %s %s",
+ Unparse($edgelimit), Units())
+ );
+
+ # Print nodes
+ my %node = ();
+ my $nextnode = 1;
+ foreach my $a (@list[0..$last]) {
+ # Pick font size
+ my $f = GetEntry($flat, $a);
+ my $c = GetEntry($cumulative, $a);
+
+ my $fs = 8;
+ if ($local_total > 0) {
+ $fs = 8 + (50.0 * sqrt(abs($f * 1.0 / $local_total)));
+ }
+
+ $node{$a} = $nextnode++;
+ my $sym = $a;
+ $sym =~ s/\s+/\\n/g;
+ $sym =~ s/::/\\n/g;
+
+ # Extra cumulative info to print for non-leaves
+ my $extra = "";
+ if ($f != $c) {
+ $extra = sprintf("\\rof %s (%s)",
+ Unparse($c),
+ Percent($c, $local_total));
+ }
+ my $style = "";
+ if ($main::opt_heapcheck) {
+ if ($f > 0) {
+ # make leak-causing nodes more visible (add a background)
+ $style = ",style=filled,fillcolor=gray"
+ } elsif ($f < 0) {
+ # make anti-leak-causing nodes (which almost never occur)
+ # stand out as well (triple border)
+ $style = ",peripheries=3"
+ }
+ }
+
+ printf DOT ("N%d [label=\"%s\\n%s (%s)%s\\r" .
+ "\",shape=box,fontsize=%.1f%s];\n",
+ $node{$a},
+ $sym,
+ Unparse($f),
+ Percent($f, $local_total),
+ $extra,
+ $fs,
+ $style,
+ );
+ }
+
+ # Get edges and counts per edge
+ my %edge = ();
+ my $n;
+ my $fullname_to_shortname_map = {};
+ FillFullnameToShortnameMap($symbols, $fullname_to_shortname_map);
+ foreach my $k (keys(%{$raw})) {
+ # TODO: omit low %age edges
+ $n = $raw->{$k};
+ my @translated = TranslateStack($symbols, $fullname_to_shortname_map, $k);
+ for (my $i = 1; $i <= $#translated; $i++) {
+ my $src = $translated[$i];
+ my $dst = $translated[$i-1];
+ #next if ($src eq $dst); # Avoid self-edges?
+ if (exists($node{$src}) && exists($node{$dst})) {
+ my $edge_label = "$src\001$dst";
+ if (!exists($edge{$edge_label})) {
+ $edge{$edge_label} = 0;
+ }
+ $edge{$edge_label} += $n;
+ }
+ }
+ }
+
+ # Print edges (process in order of decreasing counts)
+ my %indegree = (); # Number of incoming edges added per node so far
+ my %outdegree = (); # Number of outgoing edges added per node so far
+ foreach my $e (sort { $edge{$b} <=> $edge{$a} } keys(%edge)) {
+ my @x = split(/\001/, $e);
+ $n = $edge{$e};
+
+ # Initialize degree of kept incoming and outgoing edges if necessary
+ my $src = $x[0];
+ my $dst = $x[1];
+ if (!exists($outdegree{$src})) { $outdegree{$src} = 0; }
+ if (!exists($indegree{$dst})) { $indegree{$dst} = 0; }
+
+ my $keep;
+ if ($indegree{$dst} == 0) {
+ # Keep edge if needed for reachability
+ $keep = 1;
+ } elsif (abs($n) <= $edgelimit) {
+ # Drop if we are below --edgefraction
+ $keep = 0;
+ } elsif ($outdegree{$src} >= $main::opt_maxdegree ||
+ $indegree{$dst} >= $main::opt_maxdegree) {
+ # Keep limited number of in/out edges per node
+ $keep = 0;
+ } else {
+ $keep = 1;
+ }
+
+ if ($keep) {
+ $outdegree{$src}++;
+ $indegree{$dst}++;
+
+ # Compute line width based on edge count
+ my $fraction = abs($local_total ? (3 * ($n / $local_total)) : 0);
+ if ($fraction > 1) { $fraction = 1; }
+ my $w = $fraction * 2;
+ if ($w < 1 && ($main::opt_web || $main::opt_svg)) {
+ # SVG output treats line widths < 1 poorly.
+ $w = 1;
+ }
+
+ # Dot sometimes segfaults if given edge weights that are too large, so
+ # we cap the weights at a large value
+ my $edgeweight = abs($n) ** 0.7;
+ if ($edgeweight > 100000) { $edgeweight = 100000; }
+ $edgeweight = int($edgeweight);
+
+ my $style = sprintf("setlinewidth(%f)", $w);
+ if ($x[1] =~ m/\(inline\)/) {
+ $style .= ",dashed";
+ }
+
+ # Use a slightly squashed function of the edge count as the weight
+ printf DOT ("N%s -> N%s [label=%s, weight=%d, style=\"%s\"];\n",
+ $node{$x[0]},
+ $node{$x[1]},
+ Unparse($n),
+ $edgeweight,
+ $style);
+ }
+ }
+
+ print DOT ("}\n");
+ close(DOT);
+
+ if ($main::opt_web || $main::opt_svg) {
+ # Rewrite SVG to be more usable inside web browser.
+ RewriteSvg(TempName($main::next_tmpfile, "svg"));
+ }
+
+ return 1;
+}
+
+sub RewriteSvg {
+ my $svgfile = shift;
+
+ open(SVG, $svgfile) || die "open temp svg: $!";
+ my @svg = <SVG>;
+ close(SVG);
+ unlink $svgfile;
+ my $svg = join('', @svg);
+
+ # Dot's SVG output is
+ #
+ # <svg width="___" height="___"
+ # viewBox="___" xmlns=...>
+ # <g id="graph0" transform="...">
+ # ...
+ # </g>
+ # </svg>
+ #
+ # Change it to
+ #
+ # <svg width="100%" height="100%"
+ # xmlns=...>
+ # $svg_javascript
+ # <g id="viewport" transform="translate(0,0)">
+ # <g id="graph0" transform="...">
+ # ...
+ # </g>
+ # </g>
+ # </svg>
+
+ # Fix width, height; drop viewBox.
+ $svg =~ s/(?s)<svg width="[^"]+" height="[^"]+"(.*?)viewBox="[^"]+"/<svg width="100%" height="100%"$1/;
+
+ # Insert script, viewport <g> above first <g>
+ my $svg_javascript = SvgJavascript();
+ my $viewport = "<g id=\"viewport\" transform=\"translate(0,0)\">\n";
+ $svg =~ s/<g id="graph\d"/$svg_javascript$viewport$&/;
+
+ # Insert final </g> above </svg>.
+ $svg =~ s/(.*)(<\/svg>)/$1<\/g>$2/;
+ $svg =~ s/<g id="graph\d"(.*?)/<g id="viewport"$1/;
+
+ if ($main::opt_svg) {
+ # --svg: write to standard output.
+ print $svg;
+ } else {
+ # Write back to temporary file.
+ open(SVG, ">$svgfile") || die "open $svgfile: $!";
+ print SVG $svg;
+ close(SVG);
+ }
+}
+
+sub SvgJavascript {
+ return <<'EOF';
+<script type="text/ecmascript"><![CDATA[
+// SVGPan
+// http://www.cyberz.org/blog/2009/12/08/svgpan-a-javascript-svg-panzoomdrag-library/
+// Local modification: if(true || ...) below to force panning, never moving.
+
+/**
+ * SVGPan library 1.2
+ * ====================
+ *
+ * Given an unique existing element with id "viewport", including the
+ * the library into any SVG adds the following capabilities:
+ *
+ * - Mouse panning
+ * - Mouse zooming (using the wheel)
+ * - Object dargging
+ *
+ * Known issues:
+ *
+ * - Zooming (while panning) on Safari has still some issues
+ *
+ * Releases:
+ *
+ * 1.2, Sat Mar 20 08:42:50 GMT 2010, Zeng Xiaohui
+ * Fixed a bug with browser mouse handler interaction
+ *
+ * 1.1, Wed Feb 3 17:39:33 GMT 2010, Zeng Xiaohui
+ * Updated the zoom code to support the mouse wheel on Safari/Chrome
+ *
+ * 1.0, Andrea Leofreddi
+ * First release
+ *
+ * This code is licensed under the following BSD license:
+ *
+ * Copyright 2009-2010 Andrea Leofreddi <a.leofreddi@itcharm.com>. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification, are
+ * permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this list of
+ * conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice, this list
+ * of conditions and the following disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Andrea Leofreddi ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+ * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Andrea Leofreddi OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are those of the
+ * authors and should not be interpreted as representing official policies, either expressed
+ * or implied, of Andrea Leofreddi.
+ */
+
+var root = document.documentElement;
+
+var state = 'none', stateTarget, stateOrigin, stateTf;
+
+setupHandlers(root);
+
+/**
+ * Register handlers
+ */
+function setupHandlers(root){
+ setAttributes(root, {
+ "onmouseup" : "add(evt)",
+ "onmousedown" : "handleMouseDown(evt)",
+ "onmousemove" : "handleMouseMove(evt)",
+ "onmouseup" : "handleMouseUp(evt)",
+ //"onmouseout" : "handleMouseUp(evt)", // Decomment this to stop the pan functionality when dragging out of the SVG element
+ });
+
+ if(navigator.userAgent.toLowerCase().indexOf('webkit') >= 0)
+ window.addEventListener('mousewheel', handleMouseWheel, false); // Chrome/Safari
+ else
+ window.addEventListener('DOMMouseScroll', handleMouseWheel, false); // Others
+
+ var g = svgDoc.getElementById("svg");
+ g.width = "100%";
+ g.height = "100%";
+}
+
+/**
+ * Instance an SVGPoint object with given event coordinates.
+ */
+function getEventPoint(evt) {
+ var p = root.createSVGPoint();
+
+ p.x = evt.clientX;
+ p.y = evt.clientY;
+
+ return p;
+}
+
+/**
+ * Sets the current transform matrix of an element.
+ */
+function setCTM(element, matrix) {
+ var s = "matrix(" + matrix.a + "," + matrix.b + "," + matrix.c + "," + matrix.d + "," + matrix.e + "," + matrix.f + ")";
+
+ element.setAttribute("transform", s);
+}
+
+/**
+ * Dumps a matrix to a string (useful for debug).
+ */
+function dumpMatrix(matrix) {
+ var s = "[ " + matrix.a + ", " + matrix.c + ", " + matrix.e + "\n " + matrix.b + ", " + matrix.d + ", " + matrix.f + "\n 0, 0, 1 ]";
+
+ return s;
+}
+
+/**
+ * Sets attributes of an element.
+ */
+function setAttributes(element, attributes){
+ for (i in attributes)
+ element.setAttributeNS(null, i, attributes[i]);
+}
+
+/**
+ * Handle mouse move event.
+ */
+function handleMouseWheel(evt) {
+ if(evt.preventDefault)
+ evt.preventDefault();
+
+ evt.returnValue = false;
+
+ var svgDoc = evt.target.ownerDocument;
+
+ var delta;
+
+ if(evt.wheelDelta)
+ delta = evt.wheelDelta / 3600; // Chrome/Safari
+ else
+ delta = evt.detail / -90; // Mozilla
+
+ var z = 1 + delta; // Zoom factor: 0.9/1.1
+
+ var g = svgDoc.getElementById("viewport");
+
+ var p = getEventPoint(evt);
+
+ p = p.matrixTransform(g.getCTM().inverse());
+
+ // Compute new scale matrix in current mouse position
+ var k = root.createSVGMatrix().translate(p.x, p.y).scale(z).translate(-p.x, -p.y);
+
+ setCTM(g, g.getCTM().multiply(k));
+
+ stateTf = stateTf.multiply(k.inverse());
+}
+
+/**
+ * Handle mouse move event.
+ */
+function handleMouseMove(evt) {
+ if(evt.preventDefault)
+ evt.preventDefault();
+
+ evt.returnValue = false;
+
+ var svgDoc = evt.target.ownerDocument;
+
+ var g = svgDoc.getElementById("viewport");
+
+ if(state == 'pan') {
+ // Pan mode
+ var p = getEventPoint(evt).matrixTransform(stateTf);
+
+ setCTM(g, stateTf.inverse().translate(p.x - stateOrigin.x, p.y - stateOrigin.y));
+ } else if(state == 'move') {
+ // Move mode
+ var p = getEventPoint(evt).matrixTransform(g.getCTM().inverse());
+
+ setCTM(stateTarget, root.createSVGMatrix().translate(p.x - stateOrigin.x, p.y - stateOrigin.y).multiply(g.getCTM().inverse()).multiply(stateTarget.getCTM()));
+
+ stateOrigin = p;
+ }
+}
+
+/**
+ * Handle click event.
+ */
+function handleMouseDown(evt) {
+ if(evt.preventDefault)
+ evt.preventDefault();
+
+ evt.returnValue = false;
+
+ var svgDoc = evt.target.ownerDocument;
+
+ var g = svgDoc.getElementById("viewport");
+
+ if(true || evt.target.tagName == "svg") {
+ // Pan mode
+ state = 'pan';
+
+ stateTf = g.getCTM().inverse();
+
+ stateOrigin = getEventPoint(evt).matrixTransform(stateTf);
+ } else {
+ // Move mode
+ state = 'move';
+
+ stateTarget = evt.target;
+
+ stateTf = g.getCTM().inverse();
+
+ stateOrigin = getEventPoint(evt).matrixTransform(stateTf);
+ }
+}
+
+/**
+ * Handle mouse button release event.
+ */
+function handleMouseUp(evt) {
+ if(evt.preventDefault)
+ evt.preventDefault();
+
+ evt.returnValue = false;
+
+ var svgDoc = evt.target.ownerDocument;
+
+ if(state == 'pan' || state == 'move') {
+ // Quit pan mode
+ state = '';
+ }
+}
+
+]]></script>
+EOF
+}
+
+# Provides a map from fullname to shortname for cases where the
+# shortname is ambiguous. The symlist has both the fullname and
+# shortname for all symbols, which is usually fine, but sometimes --
+# such as overloaded functions -- two different fullnames can map to
+# the same shortname. In that case, we use the address of the
+# function to disambiguate the two. This function fills in a map that
+# maps fullnames to modified shortnames in such cases. If a fullname
+# is not present in the map, the 'normal' shortname provided by the
+# symlist is the appropriate one to use.
+sub FillFullnameToShortnameMap {
+ my $symbols = shift;
+ my $fullname_to_shortname_map = shift;
+ my $shortnames_seen_once = {};
+ my $shortnames_seen_more_than_once = {};
+
+ foreach my $symlist (values(%{$symbols})) {
+ # TODO(csilvers): deal with inlined symbols too.
+ my $shortname = $symlist->[0];
+ my $fullname = $symlist->[2];
+ if ($fullname !~ /<[0-9a-fA-F]+>$/) { # fullname doesn't end in an address
+ next; # the only collisions we care about are when addresses differ
+ }
+ if (defined($shortnames_seen_once->{$shortname}) &&
+ $shortnames_seen_once->{$shortname} ne $fullname) {
+ $shortnames_seen_more_than_once->{$shortname} = 1;
+ } else {
+ $shortnames_seen_once->{$shortname} = $fullname;
+ }
+ }
+
+ foreach my $symlist (values(%{$symbols})) {
+ my $shortname = $symlist->[0];
+ my $fullname = $symlist->[2];
+ # TODO(csilvers): take in a list of addresses we care about, and only
+ # store in the map if $symlist->[1] is in that list. Saves space.
+ next if defined($fullname_to_shortname_map->{$fullname});
+ if (defined($shortnames_seen_more_than_once->{$shortname})) {
+ if ($fullname =~ /<0*([^>]*)>$/) { # fullname has address at end of it
+ $fullname_to_shortname_map->{$fullname} = "$shortname\@$1";
+ }
+ }
+ }
+}
+
+# Return a small number that identifies the argument.
+# Multiple calls with the same argument will return the same number.
+# Calls with different arguments will return different numbers.
+sub ShortIdFor {
+ my $key = shift;
+ my $id = $main::uniqueid{$key};
+ if (!defined($id)) {
+ $id = keys(%main::uniqueid) + 1;
+ $main::uniqueid{$key} = $id;
+ }
+ return $id;
+}
+
+# Translate a stack of addresses into a stack of symbols
+sub TranslateStack {
+ my $symbols = shift;
+ my $fullname_to_shortname_map = shift;
+ my $k = shift;
+
+ my @addrs = split(/\n/, $k);
+ my @result = ();
+ for (my $i = 0; $i <= $#addrs; $i++) {
+ my $a = $addrs[$i];
+
+ # Skip large addresses since they sometimes show up as fake entries on RH9
+ if (length($a) > 8 && $a gt "7fffffffffffffff") {
+ next;
+ }
+
+ if ($main::opt_disasm || $main::opt_list) {
+ # We want just the address for the key
+ push(@result, $a);
+ next;
+ }
+
+ my $symlist = $symbols->{$a};
+ if (!defined($symlist)) {
+ $symlist = [$a, "", $a];
+ }
+
+ # We can have a sequence of symbols for a particular entry
+ # (more than one symbol in the case of inlining). Callers
+ # come before callees in symlist, so walk backwards since
+ # the translated stack should contain callees before callers.
+ for (my $j = $#{$symlist}; $j >= 2; $j -= 3) {
+ my $func = $symlist->[$j-2];
+ my $fileline = $symlist->[$j-1];
+ my $fullfunc = $symlist->[$j];
+ if (defined($fullname_to_shortname_map->{$fullfunc})) {
+ $func = $fullname_to_shortname_map->{$fullfunc};
+ }
+ if ($j > 2) {
+ $func = "$func (inline)";
+ }
+
+ # Do not merge nodes corresponding to Callback::Run since that
+ # causes confusing cycles in dot display. Instead, we synthesize
+ # a unique name for this frame per caller.
+ if ($func =~ m/Callback.*::Run$/) {
+ my $caller = ($i > 0) ? $addrs[$i-1] : 0;
+ $func = "Run#" . ShortIdFor($caller);
+ }
+
+ if ($main::opt_addresses) {
+ push(@result, "$a $func $fileline");
+ } elsif ($main::opt_lines) {
+ if ($func eq '??' && $fileline eq '??:0') {
+ push(@result, "$a");
+ } else {
+ push(@result, "$func $fileline");
+ }
+ } elsif ($main::opt_functions) {
+ if ($func eq '??') {
+ push(@result, "$a");
+ } else {
+ push(@result, $func);
+ }
+ } elsif ($main::opt_files) {
+ if ($fileline eq '??:0' || $fileline eq '') {
+ push(@result, "$a");
+ } else {
+ my $f = $fileline;
+ $f =~ s/:\d+$//;
+ push(@result, $f);
+ }
+ } else {
+ push(@result, $a);
+ last; # Do not print inlined info
+ }
+ }
+ }
+
+ # print join(",", @addrs), " => ", join(",", @result), "\n";
+ return @result;
+}
+
+# Generate percent string for a number and a total
+sub Percent {
+ my $num = shift;
+ my $tot = shift;
+ if ($tot != 0) {
+ return sprintf("%.1f%%", $num * 100.0 / $tot);
+ } else {
+ return ($num == 0) ? "nan" : (($num > 0) ? "+inf" : "-inf");
+ }
+}
+
+# Generate pretty-printed form of number
+sub Unparse {
+ my $num = shift;
+ if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') {
+ if ($main::opt_inuse_objects || $main::opt_alloc_objects) {
+ return sprintf("%d", $num);
+ } else {
+ if ($main::opt_show_bytes) {
+ return sprintf("%d", $num);
+ } else {
+ return sprintf("%.1f", $num / 1048576.0);
+ }
+ }
+ } elsif ($main::profile_type eq 'contention' && !$main::opt_contentions) {
+ return sprintf("%.3f", $num / 1e9); # Convert nanoseconds to seconds
+ } else {
+ return sprintf("%d", $num);
+ }
+}
+
+# Alternate pretty-printed form: 0 maps to "."
+sub UnparseAlt {
+ my $num = shift;
+ if ($num == 0) {
+ return ".";
+ } else {
+ return Unparse($num);
+ }
+}
+
+# Alternate pretty-printed form: 0 maps to ""
+sub HtmlPrintNumber {
+ my $num = shift;
+ if ($num == 0) {
+ return "";
+ } else {
+ return Unparse($num);
+ }
+}
+
+# Return output units
+sub Units {
+ if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') {
+ if ($main::opt_inuse_objects || $main::opt_alloc_objects) {
+ return "objects";
+ } else {
+ if ($main::opt_show_bytes) {
+ return "B";
+ } else {
+ return "MB";
+ }
+ }
+ } elsif ($main::profile_type eq 'contention' && !$main::opt_contentions) {
+ return "seconds";
+ } else {
+ return "samples";
+ }
+}
+
+##### Profile manipulation code #####
+
+# Generate flattened profile:
+# If count is charged to stack [a,b,c,d], in generated profile,
+# it will be charged to [a]
+sub FlatProfile {
+ my $profile = shift;
+ my $result = {};
+ foreach my $k (keys(%{$profile})) {
+ my $count = $profile->{$k};
+ my @addrs = split(/\n/, $k);
+ if ($#addrs >= 0) {
+ AddEntry($result, $addrs[0], $count);
+ }
+ }
+ return $result;
+}
+
+# Generate cumulative profile:
+# If count is charged to stack [a,b,c,d], in generated profile,
+# it will be charged to [a], [b], [c], [d]
+sub CumulativeProfile {
+ my $profile = shift;
+ my $result = {};
+ foreach my $k (keys(%{$profile})) {
+ my $count = $profile->{$k};
+ my @addrs = split(/\n/, $k);
+ foreach my $a (@addrs) {
+ AddEntry($result, $a, $count);
+ }
+ }
+ return $result;
+}
+
+# If the second-youngest PC on the stack is always the same, returns
+# that pc. Otherwise, returns undef.
+sub IsSecondPcAlwaysTheSame {
+ my $profile = shift;
+
+ my $second_pc = undef;
+ foreach my $k (keys(%{$profile})) {
+ my @addrs = split(/\n/, $k);
+ if ($#addrs < 1) {
+ return undef;
+ }
+ if (not defined $second_pc) {
+ $second_pc = $addrs[1];
+ } else {
+ if ($second_pc ne $addrs[1]) {
+ return undef;
+ }
+ }
+ }
+ return $second_pc;
+}
+
+sub ExtractSymbolNameInlineStack {
+ my $symbols = shift;
+ my $address = shift;
+
+ my @stack = ();
+
+ if (exists $symbols->{$address}) {
+ my @localinlinestack = @{$symbols->{$address}};
+ for (my $i = $#localinlinestack; $i > 0; $i-=3) {
+ my $file = $localinlinestack[$i-1];
+ my $fn = $localinlinestack[$i-0];
+
+ if ($file eq "?" || $file eq ":0") {
+ $file = "??:0";
+ }
+ if ($fn eq '??') {
+ # If we can't get the symbol name, at least use the file information.
+ $fn = $file;
+ }
+ my $suffix = "[inline]";
+ if ($i == 2) {
+ $suffix = "";
+ }
+ push (@stack, $fn.$suffix);
+ }
+ }
+ else {
+ # If we can't get a symbol name, at least fill in the address.
+ push (@stack, $address);
+ }
+
+ return @stack;
+}
+
+sub ExtractSymbolLocation {
+ my $symbols = shift;
+ my $address = shift;
+ # 'addr2line' outputs "??:0" for unknown locations; we do the
+ # same to be consistent.
+ my $location = "??:0:unknown";
+ if (exists $symbols->{$address}) {
+ my $file = $symbols->{$address}->[1];
+ if ($file eq "?") {
+ $file = "??:0"
+ }
+ $location = $file . ":" . $symbols->{$address}->[0];
+ }
+ return $location;
+}
+
+# Extracts a graph of calls.
+sub ExtractCalls {
+ my $symbols = shift;
+ my $profile = shift;
+
+ my $calls = {};
+ while( my ($stack_trace, $count) = each %$profile ) {
+ my @address = split(/\n/, $stack_trace);
+ my $destination = ExtractSymbolLocation($symbols, $address[0]);
+ AddEntry($calls, $destination, $count);
+ for (my $i = 1; $i <= $#address; $i++) {
+ my $source = ExtractSymbolLocation($symbols, $address[$i]);
+ my $call = "$source -> $destination";
+ AddEntry($calls, $call, $count);
+ $destination = $source;
+ }
+ }
+
+ return $calls;
+}
+
+sub FilterFrames {
+ my $symbols = shift;
+ my $profile = shift;
+
+ if ($main::opt_retain eq '' && $main::opt_exclude eq '') {
+ return $profile;
+ }
+
+ my $result = {};
+ foreach my $k (keys(%{$profile})) {
+ my $count = $profile->{$k};
+ my @addrs = split(/\n/, $k);
+ my @path = ();
+ foreach my $a (@addrs) {
+ my $sym;
+ if (exists($symbols->{$a})) {
+ $sym = $symbols->{$a}->[0];
+ } else {
+ $sym = $a;
+ }
+ if ($main::opt_retain ne '' && $sym !~ m/$main::opt_retain/) {
+ next;
+ }
+ if ($main::opt_exclude ne '' && $sym =~ m/$main::opt_exclude/) {
+ next;
+ }
+ push(@path, $a);
+ }
+ if (scalar(@path) > 0) {
+ my $reduced_path = join("\n", @path);
+ AddEntry($result, $reduced_path, $count);
+ }
+ }
+
+ return $result;
+}
+
+sub PrintCollapsedStacks {
+ my $symbols = shift;
+ my $profile = shift;
+
+ while (my ($stack_trace, $count) = each %$profile) {
+ my @address = split(/\n/, $stack_trace);
+ my @names = reverse ( map { ExtractSymbolNameInlineStack($symbols, $_) } @address );
+ printf("%s %d\n", join(";", @names), $count);
+ }
+}
+
+sub RemoveUninterestingFrames {
+ my $symbols = shift;
+ my $profile = shift;
+
+ # List of function names to skip
+ my %skip = ();
+ my $skip_regexp = 'NOMATCH';
+ if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') {
+ foreach my $name ('@JEMALLOC_PREFIX@calloc',
+ 'cfree',
+ '@JEMALLOC_PREFIX@malloc',
+ 'newImpl',
+ 'void* newImpl',
+ '@JEMALLOC_PREFIX@free',
+ '@JEMALLOC_PREFIX@memalign',
+ '@JEMALLOC_PREFIX@posix_memalign',
+ '@JEMALLOC_PREFIX@aligned_alloc',
+ 'pvalloc',
+ '@JEMALLOC_PREFIX@valloc',
+ '@JEMALLOC_PREFIX@realloc',
+ '@JEMALLOC_PREFIX@mallocx',
+ '@JEMALLOC_PREFIX@rallocx',
+ '@JEMALLOC_PREFIX@xallocx',
+ '@JEMALLOC_PREFIX@dallocx',
+ '@JEMALLOC_PREFIX@sdallocx',
+ '@JEMALLOC_PREFIX@sdallocx_noflags',
+ 'tc_calloc',
+ 'tc_cfree',
+ 'tc_malloc',
+ 'tc_free',
+ 'tc_memalign',
+ 'tc_posix_memalign',
+ 'tc_pvalloc',
+ 'tc_valloc',
+ 'tc_realloc',
+ 'tc_new',
+ 'tc_delete',
+ 'tc_newarray',
+ 'tc_deletearray',
+ 'tc_new_nothrow',
+ 'tc_newarray_nothrow',
+ 'do_malloc',
+ '::do_malloc', # new name -- got moved to an unnamed ns
+ '::do_malloc_or_cpp_alloc',
+ 'DoSampledAllocation',
+ 'simple_alloc::allocate',
+ '__malloc_alloc_template::allocate',
+ '__builtin_delete',
+ '__builtin_new',
+ '__builtin_vec_delete',
+ '__builtin_vec_new',
+ 'operator new',
+ 'operator new[]',
+ # The entry to our memory-allocation routines on OS X
+ 'malloc_zone_malloc',
+ 'malloc_zone_calloc',
+ 'malloc_zone_valloc',
+ 'malloc_zone_realloc',
+ 'malloc_zone_memalign',
+ 'malloc_zone_free',
+ # These mark the beginning/end of our custom sections
+ '__start_google_malloc',
+ '__stop_google_malloc',
+ '__start_malloc_hook',
+ '__stop_malloc_hook') {
+ $skip{$name} = 1;
+ $skip{"_" . $name} = 1; # Mach (OS X) adds a _ prefix to everything
+ }
+ # TODO: Remove TCMalloc once everything has been
+ # moved into the tcmalloc:: namespace and we have flushed
+ # old code out of the system.
+ $skip_regexp = "TCMalloc|^tcmalloc::";
+ } elsif ($main::profile_type eq 'contention') {
+ foreach my $vname ('base::RecordLockProfileData',
+ 'base::SubmitMutexProfileData',
+ 'base::SubmitSpinLockProfileData',
+ 'Mutex::Unlock',
+ 'Mutex::UnlockSlow',
+ 'Mutex::ReaderUnlock',
+ 'MutexLock::~MutexLock',
+ 'SpinLock::Unlock',
+ 'SpinLock::SlowUnlock',
+ 'SpinLockHolder::~SpinLockHolder') {
+ $skip{$vname} = 1;
+ }
+ } elsif ($main::profile_type eq 'cpu') {
+ # Drop signal handlers used for CPU profile collection
+ # TODO(dpeng): this should not be necessary; it's taken
+ # care of by the general 2nd-pc mechanism below.
+ foreach my $name ('ProfileData::Add', # historical
+ 'ProfileData::prof_handler', # historical
+ 'CpuProfiler::prof_handler',
+ '__FRAME_END__',
+ '__pthread_sighandler',
+ '__restore') {
+ $skip{$name} = 1;
+ }
+ } else {
+ # Nothing skipped for unknown types
+ }
+
+ if ($main::profile_type eq 'cpu') {
+ # If all the second-youngest program counters are the same,
+ # this STRONGLY suggests that it is an artifact of measurement,
+ # i.e., stack frames pushed by the CPU profiler signal handler.
+ # Hence, we delete them.
+ # (The topmost PC is read from the signal structure, not from
+ # the stack, so it does not get involved.)
+ while (my $second_pc = IsSecondPcAlwaysTheSame($profile)) {
+ my $result = {};
+ my $func = '';
+ if (exists($symbols->{$second_pc})) {
+ $second_pc = $symbols->{$second_pc}->[0];
+ }
+ print STDERR "Removing $second_pc from all stack traces.\n";
+ foreach my $k (keys(%{$profile})) {
+ my $count = $profile->{$k};
+ my @addrs = split(/\n/, $k);
+ splice @addrs, 1, 1;
+ my $reduced_path = join("\n", @addrs);
+ AddEntry($result, $reduced_path, $count);
+ }
+ $profile = $result;
+ }
+ }
+
+ my $result = {};
+ foreach my $k (keys(%{$profile})) {
+ my $count = $profile->{$k};
+ my @addrs = split(/\n/, $k);
+ my @path = ();
+ foreach my $a (@addrs) {
+ if (exists($symbols->{$a})) {
+ my $func = $symbols->{$a}->[0];
+ if ($skip{$func} || ($func =~ m/$skip_regexp/)) {
+ # Throw away the portion of the backtrace seen so far, under the
+ # assumption that previous frames were for functions internal to the
+ # allocator.
+ @path = ();
+ next;
+ }
+ }
+ push(@path, $a);
+ }
+ my $reduced_path = join("\n", @path);
+ AddEntry($result, $reduced_path, $count);
+ }
+
+ $result = FilterFrames($symbols, $result);
+
+ return $result;
+}
+
+# Reduce profile to granularity given by user
+sub ReduceProfile {
+ my $symbols = shift;
+ my $profile = shift;
+ my $result = {};
+ my $fullname_to_shortname_map = {};
+ FillFullnameToShortnameMap($symbols, $fullname_to_shortname_map);
+ foreach my $k (keys(%{$profile})) {
+ my $count = $profile->{$k};
+ my @translated = TranslateStack($symbols, $fullname_to_shortname_map, $k);
+ my @path = ();
+ my %seen = ();
+ $seen{''} = 1; # So that empty keys are skipped
+ foreach my $e (@translated) {
+ # To avoid double-counting due to recursion, skip a stack-trace
+ # entry if it has already been seen
+ if (!$seen{$e}) {
+ $seen{$e} = 1;
+ push(@path, $e);
+ }
+ }
+ my $reduced_path = join("\n", @path);
+ AddEntry($result, $reduced_path, $count);
+ }
+ return $result;
+}
+
+# Does the specified symbol array match the regexp?
+sub SymbolMatches {
+ my $sym = shift;
+ my $re = shift;
+ if (defined($sym)) {
+ for (my $i = 0; $i < $#{$sym}; $i += 3) {
+ if ($sym->[$i] =~ m/$re/ || $sym->[$i+1] =~ m/$re/) {
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+# Focus only on paths involving specified regexps
+sub FocusProfile {
+ my $symbols = shift;
+ my $profile = shift;
+ my $focus = shift;
+ my $result = {};
+ foreach my $k (keys(%{$profile})) {
+ my $count = $profile->{$k};
+ my @addrs = split(/\n/, $k);
+ foreach my $a (@addrs) {
+ # Reply if it matches either the address/shortname/fileline
+ if (($a =~ m/$focus/) || SymbolMatches($symbols->{$a}, $focus)) {
+ AddEntry($result, $k, $count);
+ last;
+ }
+ }
+ }
+ return $result;
+}
+
+# Focus only on paths not involving specified regexps
+sub IgnoreProfile {
+ my $symbols = shift;
+ my $profile = shift;
+ my $ignore = shift;
+ my $result = {};
+ foreach my $k (keys(%{$profile})) {
+ my $count = $profile->{$k};
+ my @addrs = split(/\n/, $k);
+ my $matched = 0;
+ foreach my $a (@addrs) {
+ # Reply if it matches either the address/shortname/fileline
+ if (($a =~ m/$ignore/) || SymbolMatches($symbols->{$a}, $ignore)) {
+ $matched = 1;
+ last;
+ }
+ }
+ if (!$matched) {
+ AddEntry($result, $k, $count);
+ }
+ }
+ return $result;
+}
+
+# Get total count in profile
+sub TotalProfile {
+ my $profile = shift;
+ my $result = 0;
+ foreach my $k (keys(%{$profile})) {
+ $result += $profile->{$k};
+ }
+ return $result;
+}
+
+# Add A to B
+sub AddProfile {
+ my $A = shift;
+ my $B = shift;
+
+ my $R = {};
+ # add all keys in A
+ foreach my $k (keys(%{$A})) {
+ my $v = $A->{$k};
+ AddEntry($R, $k, $v);
+ }
+ # add all keys in B
+ foreach my $k (keys(%{$B})) {
+ my $v = $B->{$k};
+ AddEntry($R, $k, $v);
+ }
+ return $R;
+}
+
+# Merges symbol maps
+sub MergeSymbols {
+ my $A = shift;
+ my $B = shift;
+
+ my $R = {};
+ foreach my $k (keys(%{$A})) {
+ $R->{$k} = $A->{$k};
+ }
+ if (defined($B)) {
+ foreach my $k (keys(%{$B})) {
+ $R->{$k} = $B->{$k};
+ }
+ }
+ return $R;
+}
+
+
+# Add A to B
+sub AddPcs {
+ my $A = shift;
+ my $B = shift;
+
+ my $R = {};
+ # add all keys in A
+ foreach my $k (keys(%{$A})) {
+ $R->{$k} = 1
+ }
+ # add all keys in B
+ foreach my $k (keys(%{$B})) {
+ $R->{$k} = 1
+ }
+ return $R;
+}
+
+# Subtract B from A
+sub SubtractProfile {
+ my $A = shift;
+ my $B = shift;
+
+ my $R = {};
+ foreach my $k (keys(%{$A})) {
+ my $v = $A->{$k} - GetEntry($B, $k);
+ if ($v < 0 && $main::opt_drop_negative) {
+ $v = 0;
+ }
+ AddEntry($R, $k, $v);
+ }
+ if (!$main::opt_drop_negative) {
+ # Take care of when subtracted profile has more entries
+ foreach my $k (keys(%{$B})) {
+ if (!exists($A->{$k})) {
+ AddEntry($R, $k, 0 - $B->{$k});
+ }
+ }
+ }
+ return $R;
+}
+
+# Get entry from profile; zero if not present
+sub GetEntry {
+ my $profile = shift;
+ my $k = shift;
+ if (exists($profile->{$k})) {
+ return $profile->{$k};
+ } else {
+ return 0;
+ }
+}
+
+# Add entry to specified profile
+sub AddEntry {
+ my $profile = shift;
+ my $k = shift;
+ my $n = shift;
+ if (!exists($profile->{$k})) {
+ $profile->{$k} = 0;
+ }
+ $profile->{$k} += $n;
+}
+
+# Add a stack of entries to specified profile, and add them to the $pcs
+# list.
+sub AddEntries {
+ my $profile = shift;
+ my $pcs = shift;
+ my $stack = shift;
+ my $count = shift;
+ my @k = ();
+
+ foreach my $e (split(/\s+/, $stack)) {
+ my $pc = HexExtend($e);
+ $pcs->{$pc} = 1;
+ push @k, $pc;
+ }
+ AddEntry($profile, (join "\n", @k), $count);
+}
+
+##### Code to profile a server dynamically #####
+
+sub CheckSymbolPage {
+ my $url = SymbolPageURL();
+ my $command = ShellEscape(@URL_FETCHER, $url);
+ open(SYMBOL, "$command |") or error($command);
+ my $line = <SYMBOL>;
+ $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines
+ close(SYMBOL);
+ unless (defined($line)) {
+ error("$url doesn't exist\n");
+ }
+
+ if ($line =~ /^num_symbols:\s+(\d+)$/) {
+ if ($1 == 0) {
+ error("Stripped binary. No symbols available.\n");
+ }
+ } else {
+ error("Failed to get the number of symbols from $url\n");
+ }
+}
+
+sub IsProfileURL {
+ my $profile_name = shift;
+ if (-f $profile_name) {
+ printf STDERR "Using local file $profile_name.\n";
+ return 0;
+ }
+ return 1;
+}
+
+sub ParseProfileURL {
+ my $profile_name = shift;
+
+ if (!defined($profile_name) || $profile_name eq "") {
+ return ();
+ }
+
+ # Split profile URL - matches all non-empty strings, so no test.
+ $profile_name =~ m,^(https?://)?([^/]+)(.*?)(/|$PROFILES)?$,;
+
+ my $proto = $1 || "http://";
+ my $hostport = $2;
+ my $prefix = $3;
+ my $profile = $4 || "/";
+
+ my $host = $hostport;
+ $host =~ s/:.*//;
+
+ my $baseurl = "$proto$hostport$prefix";
+ return ($host, $baseurl, $profile);
+}
+
+# We fetch symbols from the first profile argument.
+sub SymbolPageURL {
+ my ($host, $baseURL, $path) = ParseProfileURL($main::pfile_args[0]);
+ return "$baseURL$SYMBOL_PAGE";
+}
+
+sub FetchProgramName() {
+ my ($host, $baseURL, $path) = ParseProfileURL($main::pfile_args[0]);
+ my $url = "$baseURL$PROGRAM_NAME_PAGE";
+ my $command_line = ShellEscape(@URL_FETCHER, $url);
+ open(CMDLINE, "$command_line |") or error($command_line);
+ my $cmdline = <CMDLINE>;
+ $cmdline =~ s/\r//g; # turn windows-looking lines into unix-looking lines
+ close(CMDLINE);
+ error("Failed to get program name from $url\n") unless defined($cmdline);
+ $cmdline =~ s/\x00.+//; # Remove argv[1] and latters.
+ $cmdline =~ s!\n!!g; # Remove LFs.
+ return $cmdline;
+}
+
+# Gee, curl's -L (--location) option isn't reliable at least
+# with its 7.12.3 version. Curl will forget to post data if
+# there is a redirection. This function is a workaround for
+# curl. Redirection happens on borg hosts.
+sub ResolveRedirectionForCurl {
+ my $url = shift;
+ my $command_line = ShellEscape(@URL_FETCHER, "--head", $url);
+ open(CMDLINE, "$command_line |") or error($command_line);
+ while (<CMDLINE>) {
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+ if (/^Location: (.*)/) {
+ $url = $1;
+ }
+ }
+ close(CMDLINE);
+ return $url;
+}
+
+# Add a timeout flat to URL_FETCHER. Returns a new list.
+sub AddFetchTimeout {
+ my $timeout = shift;
+ my @fetcher = @_;
+ if (defined($timeout)) {
+ if (join(" ", @fetcher) =~ m/\bcurl -s/) {
+ push(@fetcher, "--max-time", sprintf("%d", $timeout));
+ } elsif (join(" ", @fetcher) =~ m/\brpcget\b/) {
+ push(@fetcher, sprintf("--deadline=%d", $timeout));
+ }
+ }
+ return @fetcher;
+}
+
+# Reads a symbol map from the file handle name given as $1, returning
+# the resulting symbol map. Also processes variables relating to symbols.
+# Currently, the only variable processed is 'binary=<value>' which updates
+# $main::prog to have the correct program name.
+sub ReadSymbols {
+ my $in = shift;
+ my $map = {};
+ while (<$in>) {
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+ # Removes all the leading zeroes from the symbols, see comment below.
+ if (m/^0x0*([0-9a-f]+)\s+(.+)/) {
+ $map->{$1} = $2;
+ } elsif (m/^---/) {
+ last;
+ } elsif (m/^([a-z][^=]*)=(.*)$/ ) {
+ my ($variable, $value) = ($1, $2);
+ for ($variable, $value) {
+ s/^\s+//;
+ s/\s+$//;
+ }
+ if ($variable eq "binary") {
+ if ($main::prog ne $UNKNOWN_BINARY && $main::prog ne $value) {
+ printf STDERR ("Warning: Mismatched binary name '%s', using '%s'.\n",
+ $main::prog, $value);
+ }
+ $main::prog = $value;
+ } else {
+ printf STDERR ("Ignoring unknown variable in symbols list: " .
+ "'%s' = '%s'\n", $variable, $value);
+ }
+ }
+ }
+ return $map;
+}
+
+sub URLEncode {
+ my $str = shift;
+ $str =~ s/([^A-Za-z0-9\-_.!~*'()])/ sprintf "%%%02x", ord $1 /eg;
+ return $str;
+}
+
+sub AppendSymbolFilterParams {
+ my $url = shift;
+ my @params = ();
+ if ($main::opt_retain ne '') {
+ push(@params, sprintf("retain=%s", URLEncode($main::opt_retain)));
+ }
+ if ($main::opt_exclude ne '') {
+ push(@params, sprintf("exclude=%s", URLEncode($main::opt_exclude)));
+ }
+ if (scalar @params > 0) {
+ $url = sprintf("%s?%s", $url, join("&", @params));
+ }
+ return $url;
+}
+
+# Fetches and processes symbols to prepare them for use in the profile output
+# code. If the optional 'symbol_map' arg is not given, fetches symbols from
+# $SYMBOL_PAGE for all PC values found in profile. Otherwise, the raw symbols
+# are assumed to have already been fetched into 'symbol_map' and are simply
+# extracted and processed.
+sub FetchSymbols {
+ my $pcset = shift;
+ my $symbol_map = shift;
+
+ my %seen = ();
+ my @pcs = grep { !$seen{$_}++ } keys(%$pcset); # uniq
+
+ if (!defined($symbol_map)) {
+ my $post_data = join("+", sort((map {"0x" . "$_"} @pcs)));
+
+ open(POSTFILE, ">$main::tmpfile_sym");
+ print POSTFILE $post_data;
+ close(POSTFILE);
+
+ my $url = SymbolPageURL();
+
+ my $command_line;
+ if (join(" ", @URL_FETCHER) =~ m/\bcurl -s/) {
+ $url = ResolveRedirectionForCurl($url);
+ $url = AppendSymbolFilterParams($url);
+ $command_line = ShellEscape(@URL_FETCHER, "-d", "\@$main::tmpfile_sym",
+ $url);
+ } else {
+ $url = AppendSymbolFilterParams($url);
+ $command_line = (ShellEscape(@URL_FETCHER, "--post", $url)
+ . " < " . ShellEscape($main::tmpfile_sym));
+ }
+ # We use c++filt in case $SYMBOL_PAGE gives us mangled symbols.
+ my $escaped_cppfilt = ShellEscape($obj_tool_map{"c++filt"});
+ open(SYMBOL, "$command_line | $escaped_cppfilt |") or error($command_line);
+ $symbol_map = ReadSymbols(*SYMBOL{IO});
+ close(SYMBOL);
+ }
+
+ my $symbols = {};
+ foreach my $pc (@pcs) {
+ my $fullname;
+ # For 64 bits binaries, symbols are extracted with 8 leading zeroes.
+ # Then /symbol reads the long symbols in as uint64, and outputs
+ # the result with a "0x%08llx" format which get rid of the zeroes.
+ # By removing all the leading zeroes in both $pc and the symbols from
+ # /symbol, the symbols match and are retrievable from the map.
+ my $shortpc = $pc;
+ $shortpc =~ s/^0*//;
+ # Each line may have a list of names, which includes the function
+ # and also other functions it has inlined. They are separated (in
+ # PrintSymbolizedProfile), by --, which is illegal in function names.
+ my $fullnames;
+ if (defined($symbol_map->{$shortpc})) {
+ $fullnames = $symbol_map->{$shortpc};
+ } else {
+ $fullnames = "0x" . $pc; # Just use addresses
+ }
+ my $sym = [];
+ $symbols->{$pc} = $sym;
+ foreach my $fullname (split("--", $fullnames)) {
+ my $name = ShortFunctionName($fullname);
+ push(@{$sym}, $name, "?", $fullname);
+ }
+ }
+ return $symbols;
+}
+
+sub BaseName {
+ my $file_name = shift;
+ $file_name =~ s!^.*/!!; # Remove directory name
+ return $file_name;
+}
+
+sub MakeProfileBaseName {
+ my ($binary_name, $profile_name) = @_;
+ my ($host, $baseURL, $path) = ParseProfileURL($profile_name);
+ my $binary_shortname = BaseName($binary_name);
+ return sprintf("%s.%s.%s",
+ $binary_shortname, $main::op_time, $host);
+}
+
+sub FetchDynamicProfile {
+ my $binary_name = shift;
+ my $profile_name = shift;
+ my $fetch_name_only = shift;
+ my $encourage_patience = shift;
+
+ if (!IsProfileURL($profile_name)) {
+ return $profile_name;
+ } else {
+ my ($host, $baseURL, $path) = ParseProfileURL($profile_name);
+ if ($path eq "" || $path eq "/") {
+ # Missing type specifier defaults to cpu-profile
+ $path = $PROFILE_PAGE;
+ }
+
+ my $profile_file = MakeProfileBaseName($binary_name, $profile_name);
+
+ my $url = "$baseURL$path";
+ my $fetch_timeout = undef;
+ if ($path =~ m/$PROFILE_PAGE|$PMUPROFILE_PAGE/) {
+ if ($path =~ m/[?]/) {
+ $url .= "&";
+ } else {
+ $url .= "?";
+ }
+ $url .= sprintf("seconds=%d", $main::opt_seconds);
+ $fetch_timeout = $main::opt_seconds * 1.01 + 60;
+ # Set $profile_type for consumption by PrintSymbolizedProfile.
+ $main::profile_type = 'cpu';
+ } else {
+ # For non-CPU profiles, we add a type-extension to
+ # the target profile file name.
+ my $suffix = $path;
+ $suffix =~ s,/,.,g;
+ $profile_file .= $suffix;
+ # Set $profile_type for consumption by PrintSymbolizedProfile.
+ if ($path =~ m/$HEAP_PAGE/) {
+ $main::profile_type = 'heap';
+ } elsif ($path =~ m/$GROWTH_PAGE/) {
+ $main::profile_type = 'growth';
+ } elsif ($path =~ m/$CONTENTION_PAGE/) {
+ $main::profile_type = 'contention';
+ }
+ }
+
+ my $profile_dir = $ENV{"JEPROF_TMPDIR"} || ($ENV{HOME} . "/jeprof");
+ if (! -d $profile_dir) {
+ mkdir($profile_dir)
+ || die("Unable to create profile directory $profile_dir: $!\n");
+ }
+ my $tmp_profile = "$profile_dir/.tmp.$profile_file";
+ my $real_profile = "$profile_dir/$profile_file";
+
+ if ($fetch_name_only > 0) {
+ return $real_profile;
+ }
+
+ my @fetcher = AddFetchTimeout($fetch_timeout, @URL_FETCHER);
+ my $cmd = ShellEscape(@fetcher, $url) . " > " . ShellEscape($tmp_profile);
+ if ($path =~ m/$PROFILE_PAGE|$PMUPROFILE_PAGE|$CENSUSPROFILE_PAGE/){
+ print STDERR "Gathering CPU profile from $url for $main::opt_seconds seconds to\n ${real_profile}\n";
+ if ($encourage_patience) {
+ print STDERR "Be patient...\n";
+ }
+ } else {
+ print STDERR "Fetching $path profile from $url to\n ${real_profile}\n";
+ }
+
+ (system($cmd) == 0) || error("Failed to get profile: $cmd: $!\n");
+ (system("mv", $tmp_profile, $real_profile) == 0) || error("Unable to rename profile\n");
+ print STDERR "Wrote profile to $real_profile\n";
+ $main::collected_profile = $real_profile;
+ return $main::collected_profile;
+ }
+}
+
+# Collect profiles in parallel
+sub FetchDynamicProfiles {
+ my $items = scalar(@main::pfile_args);
+ my $levels = log($items) / log(2);
+
+ if ($items == 1) {
+ $main::profile_files[0] = FetchDynamicProfile($main::prog, $main::pfile_args[0], 0, 1);
+ } else {
+ # math rounding issues
+ if ((2 ** $levels) < $items) {
+ $levels++;
+ }
+ my $count = scalar(@main::pfile_args);
+ for (my $i = 0; $i < $count; $i++) {
+ $main::profile_files[$i] = FetchDynamicProfile($main::prog, $main::pfile_args[$i], 1, 0);
+ }
+ print STDERR "Fetching $count profiles, Be patient...\n";
+ FetchDynamicProfilesRecurse($levels, 0, 0);
+ $main::collected_profile = join(" \\\n ", @main::profile_files);
+ }
+}
+
+# Recursively fork a process to get enough processes
+# collecting profiles
+sub FetchDynamicProfilesRecurse {
+ my $maxlevel = shift;
+ my $level = shift;
+ my $position = shift;
+
+ if (my $pid = fork()) {
+ $position = 0 | ($position << 1);
+ TryCollectProfile($maxlevel, $level, $position);
+ wait;
+ } else {
+ $position = 1 | ($position << 1);
+ TryCollectProfile($maxlevel, $level, $position);
+ cleanup();
+ exit(0);
+ }
+}
+
+# Collect a single profile
+sub TryCollectProfile {
+ my $maxlevel = shift;
+ my $level = shift;
+ my $position = shift;
+
+ if ($level >= ($maxlevel - 1)) {
+ if ($position < scalar(@main::pfile_args)) {
+ FetchDynamicProfile($main::prog, $main::pfile_args[$position], 0, 0);
+ }
+ } else {
+ FetchDynamicProfilesRecurse($maxlevel, $level+1, $position);
+ }
+}
+
+##### Parsing code #####
+
+# Provide a small streaming-read module to handle very large
+# cpu-profile files. Stream in chunks along a sliding window.
+# Provides an interface to get one 'slot', correctly handling
+# endian-ness differences. A slot is one 32-bit or 64-bit word
+# (depending on the input profile). We tell endianness and bit-size
+# for the profile by looking at the first 8 bytes: in cpu profiles,
+# the second slot is always 3 (we'll accept anything that's not 0).
+BEGIN {
+ package CpuProfileStream;
+
+ sub new {
+ my ($class, $file, $fname) = @_;
+ my $self = { file => $file,
+ base => 0,
+ stride => 512 * 1024, # must be a multiple of bitsize/8
+ slots => [],
+ unpack_code => "", # N for big-endian, V for little
+ perl_is_64bit => 1, # matters if profile is 64-bit
+ };
+ bless $self, $class;
+ # Let unittests adjust the stride
+ if ($main::opt_test_stride > 0) {
+ $self->{stride} = $main::opt_test_stride;
+ }
+ # Read the first two slots to figure out bitsize and endianness.
+ my $slots = $self->{slots};
+ my $str;
+ read($self->{file}, $str, 8);
+ # Set the global $address_length based on what we see here.
+ # 8 is 32-bit (8 hexadecimal chars); 16 is 64-bit (16 hexadecimal chars).
+ $address_length = ($str eq (chr(0)x8)) ? 16 : 8;
+ if ($address_length == 8) {
+ if (substr($str, 6, 2) eq chr(0)x2) {
+ $self->{unpack_code} = 'V'; # Little-endian.
+ } elsif (substr($str, 4, 2) eq chr(0)x2) {
+ $self->{unpack_code} = 'N'; # Big-endian
+ } else {
+ ::error("$fname: header size >= 2**16\n");
+ }
+ @$slots = unpack($self->{unpack_code} . "*", $str);
+ } else {
+ # If we're a 64-bit profile, check if we're a 64-bit-capable
+ # perl. Otherwise, each slot will be represented as a float
+ # instead of an int64, losing precision and making all the
+ # 64-bit addresses wrong. We won't complain yet, but will
+ # later if we ever see a value that doesn't fit in 32 bits.
+ my $has_q = 0;
+ eval { $has_q = pack("Q", "1") ? 1 : 1; };
+ if (!$has_q) {
+ $self->{perl_is_64bit} = 0;
+ }
+ read($self->{file}, $str, 8);
+ if (substr($str, 4, 4) eq chr(0)x4) {
+ # We'd love to use 'Q', but it's a) not universal, b) not endian-proof.
+ $self->{unpack_code} = 'V'; # Little-endian.
+ } elsif (substr($str, 0, 4) eq chr(0)x4) {
+ $self->{unpack_code} = 'N'; # Big-endian
+ } else {
+ ::error("$fname: header size >= 2**32\n");
+ }
+ my @pair = unpack($self->{unpack_code} . "*", $str);
+ # Since we know one of the pair is 0, it's fine to just add them.
+ @$slots = (0, $pair[0] + $pair[1]);
+ }
+ return $self;
+ }
+
+ # Load more data when we access slots->get(X) which is not yet in memory.
+ sub overflow {
+ my ($self) = @_;
+ my $slots = $self->{slots};
+ $self->{base} += $#$slots + 1; # skip over data we're replacing
+ my $str;
+ read($self->{file}, $str, $self->{stride});
+ if ($address_length == 8) { # the 32-bit case
+ # This is the easy case: unpack provides 32-bit unpacking primitives.
+ @$slots = unpack($self->{unpack_code} . "*", $str);
+ } else {
+ # We need to unpack 32 bits at a time and combine.
+ my @b32_values = unpack($self->{unpack_code} . "*", $str);
+ my @b64_values = ();
+ for (my $i = 0; $i < $#b32_values; $i += 2) {
+ # TODO(csilvers): if this is a 32-bit perl, the math below
+ # could end up in a too-large int, which perl will promote
+ # to a double, losing necessary precision. Deal with that.
+ # Right now, we just die.
+ my ($lo, $hi) = ($b32_values[$i], $b32_values[$i+1]);
+ if ($self->{unpack_code} eq 'N') { # big-endian
+ ($lo, $hi) = ($hi, $lo);
+ }
+ my $value = $lo + $hi * (2**32);
+ if (!$self->{perl_is_64bit} && # check value is exactly represented
+ (($value % (2**32)) != $lo || int($value / (2**32)) != $hi)) {
+ ::error("Need a 64-bit perl to process this 64-bit profile.\n");
+ }
+ push(@b64_values, $value);
+ }
+ @$slots = @b64_values;
+ }
+ }
+
+ # Access the i-th long in the file (logically), or -1 at EOF.
+ sub get {
+ my ($self, $idx) = @_;
+ my $slots = $self->{slots};
+ while ($#$slots >= 0) {
+ if ($idx < $self->{base}) {
+ # The only time we expect a reference to $slots[$i - something]
+ # after referencing $slots[$i] is reading the very first header.
+ # Since $stride > |header|, that shouldn't cause any lookback
+ # errors. And everything after the header is sequential.
+ print STDERR "Unexpected look-back reading CPU profile";
+ return -1; # shrug, don't know what better to return
+ } elsif ($idx > $self->{base} + $#$slots) {
+ $self->overflow();
+ } else {
+ return $slots->[$idx - $self->{base}];
+ }
+ }
+ # If we get here, $slots is [], which means we've reached EOF
+ return -1; # unique since slots is supposed to hold unsigned numbers
+ }
+}
+
+# Reads the top, 'header' section of a profile, and returns the last
+# line of the header, commonly called a 'header line'. The header
+# section of a profile consists of zero or more 'command' lines that
+# are instructions to jeprof, which jeprof executes when reading the
+# header. All 'command' lines start with a %. After the command
+# lines is the 'header line', which is a profile-specific line that
+# indicates what type of profile it is, and perhaps other global
+# information about the profile. For instance, here's a header line
+# for a heap profile:
+# heap profile: 53: 38236 [ 5525: 1284029] @ heapprofile
+# For historical reasons, the CPU profile does not contain a text-
+# readable header line. If the profile looks like a CPU profile,
+# this function returns "". If no header line could be found, this
+# function returns undef.
+#
+# The following commands are recognized:
+# %warn -- emit the rest of this line to stderr, prefixed by 'WARNING:'
+#
+# The input file should be in binmode.
+sub ReadProfileHeader {
+ local *PROFILE = shift;
+ my $firstchar = "";
+ my $line = "";
+ read(PROFILE, $firstchar, 1);
+ seek(PROFILE, -1, 1); # unread the firstchar
+ if ($firstchar !~ /[[:print:]]/) { # is not a text character
+ return "";
+ }
+ while (defined($line = <PROFILE>)) {
+ $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines
+ if ($line =~ /^%warn\s+(.*)/) { # 'warn' command
+ # Note this matches both '%warn blah\n' and '%warn\n'.
+ print STDERR "WARNING: $1\n"; # print the rest of the line
+ } elsif ($line =~ /^%/) {
+ print STDERR "Ignoring unknown command from profile header: $line";
+ } else {
+ # End of commands, must be the header line.
+ return $line;
+ }
+ }
+ return undef; # got to EOF without seeing a header line
+}
+
+sub IsSymbolizedProfileFile {
+ my $file_name = shift;
+ if (!(-e $file_name) || !(-r $file_name)) {
+ return 0;
+ }
+ # Check if the file contains a symbol-section marker.
+ open(TFILE, "<$file_name");
+ binmode TFILE;
+ my $firstline = ReadProfileHeader(*TFILE);
+ close(TFILE);
+ if (!$firstline) {
+ return 0;
+ }
+ $SYMBOL_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ my $symbol_marker = $&;
+ return $firstline =~ /^--- *$symbol_marker/;
+}
+
+# Parse profile generated by common/profiler.cc and return a reference
+# to a map:
+# $result->{version} Version number of profile file
+# $result->{period} Sampling period (in microseconds)
+# $result->{profile} Profile object
+# $result->{threads} Map of thread IDs to profile objects
+# $result->{map} Memory map info from profile
+# $result->{pcs} Hash of all PC values seen, key is hex address
+sub ReadProfile {
+ my $prog = shift;
+ my $fname = shift;
+ my $result; # return value
+
+ $CONTENTION_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ my $contention_marker = $&;
+ $GROWTH_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ my $growth_marker = $&;
+ $SYMBOL_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ my $symbol_marker = $&;
+ $PROFILE_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ my $profile_marker = $&;
+ $HEAP_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ my $heap_marker = $&;
+
+ # Look at first line to see if it is a heap or a CPU profile.
+ # CPU profile may start with no header at all, and just binary data
+ # (starting with \0\0\0\0) -- in that case, don't try to read the
+ # whole firstline, since it may be gigabytes(!) of data.
+ open(PROFILE, "<$fname") || error("$fname: $!\n");
+ binmode PROFILE; # New perls do UTF-8 processing
+ my $header = ReadProfileHeader(*PROFILE);
+ if (!defined($header)) { # means "at EOF"
+ error("Profile is empty.\n");
+ }
+
+ my $symbols;
+ if ($header =~ m/^--- *$symbol_marker/o) {
+ # Verify that the user asked for a symbolized profile
+ if (!$main::use_symbolized_profile) {
+ # we have both a binary and symbolized profiles, abort
+ error("FATAL ERROR: Symbolized profile\n $fname\ncannot be used with " .
+ "a binary arg. Try again without passing\n $prog\n");
+ }
+ # Read the symbol section of the symbolized profile file.
+ $symbols = ReadSymbols(*PROFILE{IO});
+ # Read the next line to get the header for the remaining profile.
+ $header = ReadProfileHeader(*PROFILE) || "";
+ }
+
+ if ($header =~ m/^--- *($heap_marker|$growth_marker)/o) {
+ # Skip "--- ..." line for profile types that have their own headers.
+ $header = ReadProfileHeader(*PROFILE) || "";
+ }
+
+ $main::profile_type = '';
+
+ if ($header =~ m/^heap profile:.*$growth_marker/o) {
+ $main::profile_type = 'growth';
+ $result = ReadHeapProfile($prog, *PROFILE, $header);
+ } elsif ($header =~ m/^heap profile:/) {
+ $main::profile_type = 'heap';
+ $result = ReadHeapProfile($prog, *PROFILE, $header);
+ } elsif ($header =~ m/^heap/) {
+ $main::profile_type = 'heap';
+ $result = ReadThreadedHeapProfile($prog, $fname, $header);
+ } elsif ($header =~ m/^--- *$contention_marker/o) {
+ $main::profile_type = 'contention';
+ $result = ReadSynchProfile($prog, *PROFILE);
+ } elsif ($header =~ m/^--- *Stacks:/) {
+ print STDERR
+ "Old format contention profile: mistakenly reports " .
+ "condition variable signals as lock contentions.\n";
+ $main::profile_type = 'contention';
+ $result = ReadSynchProfile($prog, *PROFILE);
+ } elsif ($header =~ m/^--- *$profile_marker/) {
+ # the binary cpu profile data starts immediately after this line
+ $main::profile_type = 'cpu';
+ $result = ReadCPUProfile($prog, $fname, *PROFILE);
+ } else {
+ if (defined($symbols)) {
+ # a symbolized profile contains a format we don't recognize, bail out
+ error("$fname: Cannot recognize profile section after symbols.\n");
+ }
+ # no ascii header present -- must be a CPU profile
+ $main::profile_type = 'cpu';
+ $result = ReadCPUProfile($prog, $fname, *PROFILE);
+ }
+
+ close(PROFILE);
+
+ # if we got symbols along with the profile, return those as well
+ if (defined($symbols)) {
+ $result->{symbols} = $symbols;
+ }
+
+ return $result;
+}
+
+# Subtract one from caller pc so we map back to call instr.
+# However, don't do this if we're reading a symbolized profile
+# file, in which case the subtract-one was done when the file
+# was written.
+#
+# We apply the same logic to all readers, though ReadCPUProfile uses an
+# independent implementation.
+sub FixCallerAddresses {
+ my $stack = shift;
+ # --raw/http: Always subtract one from pc's, because PrintSymbolizedProfile()
+ # dumps unadjusted profiles.
+ {
+ $stack =~ /(\s)/;
+ my $delimiter = $1;
+ my @addrs = split(' ', $stack);
+ my @fixedaddrs;
+ $#fixedaddrs = $#addrs;
+ if ($#addrs >= 0) {
+ $fixedaddrs[0] = $addrs[0];
+ }
+ for (my $i = 1; $i <= $#addrs; $i++) {
+ $fixedaddrs[$i] = AddressSub($addrs[$i], "0x1");
+ }
+ return join $delimiter, @fixedaddrs;
+ }
+}
+
+# CPU profile reader
+sub ReadCPUProfile {
+ my $prog = shift;
+ my $fname = shift; # just used for logging
+ local *PROFILE = shift;
+ my $version;
+ my $period;
+ my $i;
+ my $profile = {};
+ my $pcs = {};
+
+ # Parse string into array of slots.
+ my $slots = CpuProfileStream->new(*PROFILE, $fname);
+
+ # Read header. The current header version is a 5-element structure
+ # containing:
+ # 0: header count (always 0)
+ # 1: header "words" (after this one: 3)
+ # 2: format version (0)
+ # 3: sampling period (usec)
+ # 4: unused padding (always 0)
+ if ($slots->get(0) != 0 ) {
+ error("$fname: not a profile file, or old format profile file\n");
+ }
+ $i = 2 + $slots->get(1);
+ $version = $slots->get(2);
+ $period = $slots->get(3);
+ # Do some sanity checking on these header values.
+ if ($version > (2**32) || $period > (2**32) || $i > (2**32) || $i < 5) {
+ error("$fname: not a profile file, or corrupted profile file\n");
+ }
+
+ # Parse profile
+ while ($slots->get($i) != -1) {
+ my $n = $slots->get($i++);
+ my $d = $slots->get($i++);
+ if ($d > (2**16)) { # TODO(csilvers): what's a reasonable max-stack-depth?
+ my $addr = sprintf("0%o", $i * ($address_length == 8 ? 4 : 8));
+ print STDERR "At index $i (address $addr):\n";
+ error("$fname: stack trace depth >= 2**32\n");
+ }
+ if ($slots->get($i) == 0) {
+ # End of profile data marker
+ $i += $d;
+ last;
+ }
+
+ # Make key out of the stack entries
+ my @k = ();
+ for (my $j = 0; $j < $d; $j++) {
+ my $pc = $slots->get($i+$j);
+ # Subtract one from caller pc so we map back to call instr.
+ $pc--;
+ $pc = sprintf("%0*x", $address_length, $pc);
+ $pcs->{$pc} = 1;
+ push @k, $pc;
+ }
+
+ AddEntry($profile, (join "\n", @k), $n);
+ $i += $d;
+ }
+
+ # Parse map
+ my $map = '';
+ seek(PROFILE, $i * 4, 0);
+ read(PROFILE, $map, (stat PROFILE)[7]);
+
+ my $r = {};
+ $r->{version} = $version;
+ $r->{period} = $period;
+ $r->{profile} = $profile;
+ $r->{libs} = ParseLibraries($prog, $map, $pcs);
+ $r->{pcs} = $pcs;
+
+ return $r;
+}
+
+sub HeapProfileIndex {
+ my $index = 1;
+ if ($main::opt_inuse_space) {
+ $index = 1;
+ } elsif ($main::opt_inuse_objects) {
+ $index = 0;
+ } elsif ($main::opt_alloc_space) {
+ $index = 3;
+ } elsif ($main::opt_alloc_objects) {
+ $index = 2;
+ }
+ return $index;
+}
+
+sub ReadMappedLibraries {
+ my $fh = shift;
+ my $map = "";
+ # Read the /proc/self/maps data
+ while (<$fh>) {
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+ $map .= $_;
+ }
+ return $map;
+}
+
+sub ReadMemoryMap {
+ my $fh = shift;
+ my $map = "";
+ # Read /proc/self/maps data as formatted by DumpAddressMap()
+ my $buildvar = "";
+ while (<PROFILE>) {
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+ # Parse "build=<dir>" specification if supplied
+ if (m/^\s*build=(.*)\n/) {
+ $buildvar = $1;
+ }
+
+ # Expand "$build" variable if available
+ $_ =~ s/\$build\b/$buildvar/g;
+
+ $map .= $_;
+ }
+ return $map;
+}
+
+sub AdjustSamples {
+ my ($sample_adjustment, $sampling_algorithm, $n1, $s1, $n2, $s2) = @_;
+ if ($sample_adjustment) {
+ if ($sampling_algorithm == 2) {
+ # Remote-heap version 2
+ # The sampling frequency is the rate of a Poisson process.
+ # This means that the probability of sampling an allocation of
+ # size X with sampling rate Y is 1 - exp(-X/Y)
+ if ($n1 != 0) {
+ my $ratio = (($s1*1.0)/$n1)/($sample_adjustment);
+ my $scale_factor = 1/(1 - exp(-$ratio));
+ $n1 *= $scale_factor;
+ $s1 *= $scale_factor;
+ }
+ if ($n2 != 0) {
+ my $ratio = (($s2*1.0)/$n2)/($sample_adjustment);
+ my $scale_factor = 1/(1 - exp(-$ratio));
+ $n2 *= $scale_factor;
+ $s2 *= $scale_factor;
+ }
+ } else {
+ # Remote-heap version 1
+ my $ratio;
+ $ratio = (($s1*1.0)/$n1)/($sample_adjustment);
+ if ($ratio < 1) {
+ $n1 /= $ratio;
+ $s1 /= $ratio;
+ }
+ $ratio = (($s2*1.0)/$n2)/($sample_adjustment);
+ if ($ratio < 1) {
+ $n2 /= $ratio;
+ $s2 /= $ratio;
+ }
+ }
+ }
+ return ($n1, $s1, $n2, $s2);
+}
+
+sub ReadHeapProfile {
+ my $prog = shift;
+ local *PROFILE = shift;
+ my $header = shift;
+
+ my $index = HeapProfileIndex();
+
+ # Find the type of this profile. The header line looks like:
+ # heap profile: 1246: 8800744 [ 1246: 8800744] @ <heap-url>/266053
+ # There are two pairs <count: size>, the first inuse objects/space, and the
+ # second allocated objects/space. This is followed optionally by a profile
+ # type, and if that is present, optionally by a sampling frequency.
+ # For remote heap profiles (v1):
+ # The interpretation of the sampling frequency is that the profiler, for
+ # each sample, calculates a uniformly distributed random integer less than
+ # the given value, and records the next sample after that many bytes have
+ # been allocated. Therefore, the expected sample interval is half of the
+ # given frequency. By default, if not specified, the expected sample
+ # interval is 128KB. Only remote-heap-page profiles are adjusted for
+ # sample size.
+ # For remote heap profiles (v2):
+ # The sampling frequency is the rate of a Poisson process. This means that
+ # the probability of sampling an allocation of size X with sampling rate Y
+ # is 1 - exp(-X/Y)
+ # For version 2, a typical header line might look like this:
+ # heap profile: 1922: 127792360 [ 1922: 127792360] @ <heap-url>_v2/524288
+ # the trailing number (524288) is the sampling rate. (Version 1 showed
+ # double the 'rate' here)
+ my $sampling_algorithm = 0;
+ my $sample_adjustment = 0;
+ chomp($header);
+ my $type = "unknown";
+ if ($header =~ m"^heap profile:\s*(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\](\s*@\s*([^/]*)(/(\d+))?)?") {
+ if (defined($6) && ($6 ne '')) {
+ $type = $6;
+ my $sample_period = $8;
+ # $type is "heapprofile" for profiles generated by the
+ # heap-profiler, and either "heap" or "heap_v2" for profiles
+ # generated by sampling directly within tcmalloc. It can also
+ # be "growth" for heap-growth profiles. The first is typically
+ # found for profiles generated locally, and the others for
+ # remote profiles.
+ if (($type eq "heapprofile") || ($type !~ /heap/) ) {
+ # No need to adjust for the sampling rate with heap-profiler-derived data
+ $sampling_algorithm = 0;
+ } elsif ($type =~ /_v2/) {
+ $sampling_algorithm = 2; # version 2 sampling
+ if (defined($sample_period) && ($sample_period ne '')) {
+ $sample_adjustment = int($sample_period);
+ }
+ } else {
+ $sampling_algorithm = 1; # version 1 sampling
+ if (defined($sample_period) && ($sample_period ne '')) {
+ $sample_adjustment = int($sample_period)/2;
+ }
+ }
+ } else {
+ # We detect whether or not this is a remote-heap profile by checking
+ # that the total-allocated stats ($n2,$s2) are exactly the
+ # same as the in-use stats ($n1,$s1). It is remotely conceivable
+ # that a non-remote-heap profile may pass this check, but it is hard
+ # to imagine how that could happen.
+ # In this case it's so old it's guaranteed to be remote-heap version 1.
+ my ($n1, $s1, $n2, $s2) = ($1, $2, $3, $4);
+ if (($n1 == $n2) && ($s1 == $s2)) {
+ # This is likely to be a remote-heap based sample profile
+ $sampling_algorithm = 1;
+ }
+ }
+ }
+
+ if ($sampling_algorithm > 0) {
+ # For remote-heap generated profiles, adjust the counts and sizes to
+ # account for the sample rate (we sample once every 128KB by default).
+ if ($sample_adjustment == 0) {
+ # Turn on profile adjustment.
+ $sample_adjustment = 128*1024;
+ print STDERR "Adjusting heap profiles for 1-in-128KB sampling rate\n";
+ } else {
+ printf STDERR ("Adjusting heap profiles for 1-in-%d sampling rate\n",
+ $sample_adjustment);
+ }
+ if ($sampling_algorithm > 1) {
+ # We don't bother printing anything for the original version (version 1)
+ printf STDERR "Heap version $sampling_algorithm\n";
+ }
+ }
+
+ my $profile = {};
+ my $pcs = {};
+ my $map = "";
+
+ while (<PROFILE>) {
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+ if (/^MAPPED_LIBRARIES:/) {
+ $map .= ReadMappedLibraries(*PROFILE);
+ last;
+ }
+
+ if (/^--- Memory map:/) {
+ $map .= ReadMemoryMap(*PROFILE);
+ last;
+ }
+
+ # Read entry of the form:
+ # <count1>: <bytes1> [<count2>: <bytes2>] @ a1 a2 a3 ... an
+ s/^\s*//;
+ s/\s*$//;
+ if (m/^\s*(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\]\s+@\s+(.*)$/) {
+ my $stack = $5;
+ my ($n1, $s1, $n2, $s2) = ($1, $2, $3, $4);
+ my @counts = AdjustSamples($sample_adjustment, $sampling_algorithm,
+ $n1, $s1, $n2, $s2);
+ AddEntries($profile, $pcs, FixCallerAddresses($stack), $counts[$index]);
+ }
+ }
+
+ my $r = {};
+ $r->{version} = "heap";
+ $r->{period} = 1;
+ $r->{profile} = $profile;
+ $r->{libs} = ParseLibraries($prog, $map, $pcs);
+ $r->{pcs} = $pcs;
+ return $r;
+}
+
+sub ReadThreadedHeapProfile {
+ my ($prog, $fname, $header) = @_;
+
+ my $index = HeapProfileIndex();
+ my $sampling_algorithm = 0;
+ my $sample_adjustment = 0;
+ chomp($header);
+ my $type = "unknown";
+ # Assuming a very specific type of header for now.
+ if ($header =~ m"^heap_v2/(\d+)") {
+ $type = "_v2";
+ $sampling_algorithm = 2;
+ $sample_adjustment = int($1);
+ }
+ if ($type ne "_v2" || !defined($sample_adjustment)) {
+ die "Threaded heap profiles require v2 sampling with a sample rate\n";
+ }
+
+ my $profile = {};
+ my $thread_profiles = {};
+ my $pcs = {};
+ my $map = "";
+ my $stack = "";
+
+ while (<PROFILE>) {
+ s/\r//g;
+ if (/^MAPPED_LIBRARIES:/) {
+ $map .= ReadMappedLibraries(*PROFILE);
+ last;
+ }
+
+ if (/^--- Memory map:/) {
+ $map .= ReadMemoryMap(*PROFILE);
+ last;
+ }
+
+ # Read entry of the form:
+ # @ a1 a2 ... an
+ # t*: <count1>: <bytes1> [<count2>: <bytes2>]
+ # t1: <count1>: <bytes1> [<count2>: <bytes2>]
+ # ...
+ # tn: <count1>: <bytes1> [<count2>: <bytes2>]
+ s/^\s*//;
+ s/\s*$//;
+ if (m/^@\s+(.*)$/) {
+ $stack = $1;
+ } elsif (m/^\s*(t(\*|\d+)):\s+(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\]$/) {
+ if ($stack eq "") {
+ # Still in the header, so this is just a per-thread summary.
+ next;
+ }
+ my $thread = $2;
+ my ($n1, $s1, $n2, $s2) = ($3, $4, $5, $6);
+ my @counts = AdjustSamples($sample_adjustment, $sampling_algorithm,
+ $n1, $s1, $n2, $s2);
+ if ($thread eq "*") {
+ AddEntries($profile, $pcs, FixCallerAddresses($stack), $counts[$index]);
+ } else {
+ if (!exists($thread_profiles->{$thread})) {
+ $thread_profiles->{$thread} = {};
+ }
+ AddEntries($thread_profiles->{$thread}, $pcs,
+ FixCallerAddresses($stack), $counts[$index]);
+ }
+ }
+ }
+
+ my $r = {};
+ $r->{version} = "heap";
+ $r->{period} = 1;
+ $r->{profile} = $profile;
+ $r->{threads} = $thread_profiles;
+ $r->{libs} = ParseLibraries($prog, $map, $pcs);
+ $r->{pcs} = $pcs;
+ return $r;
+}
+
+sub ReadSynchProfile {
+ my $prog = shift;
+ local *PROFILE = shift;
+ my $header = shift;
+
+ my $map = '';
+ my $profile = {};
+ my $pcs = {};
+ my $sampling_period = 1;
+ my $cyclespernanosec = 2.8; # Default assumption for old binaries
+ my $seen_clockrate = 0;
+ my $line;
+
+ my $index = 0;
+ if ($main::opt_total_delay) {
+ $index = 0;
+ } elsif ($main::opt_contentions) {
+ $index = 1;
+ } elsif ($main::opt_mean_delay) {
+ $index = 2;
+ }
+
+ while ( $line = <PROFILE> ) {
+ $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines
+ if ( $line =~ /^\s*(\d+)\s+(\d+) \@\s*(.*?)\s*$/ ) {
+ my ($cycles, $count, $stack) = ($1, $2, $3);
+
+ # Convert cycles to nanoseconds
+ $cycles /= $cyclespernanosec;
+
+ # Adjust for sampling done by application
+ $cycles *= $sampling_period;
+ $count *= $sampling_period;
+
+ my @values = ($cycles, $count, $cycles / $count);
+ AddEntries($profile, $pcs, FixCallerAddresses($stack), $values[$index]);
+
+ } elsif ( $line =~ /^(slow release).*thread \d+ \@\s*(.*?)\s*$/ ||
+ $line =~ /^\s*(\d+) \@\s*(.*?)\s*$/ ) {
+ my ($cycles, $stack) = ($1, $2);
+ if ($cycles !~ /^\d+$/) {
+ next;
+ }
+
+ # Convert cycles to nanoseconds
+ $cycles /= $cyclespernanosec;
+
+ # Adjust for sampling done by application
+ $cycles *= $sampling_period;
+
+ AddEntries($profile, $pcs, FixCallerAddresses($stack), $cycles);
+
+ } elsif ( $line =~ m/^([a-z][^=]*)=(.*)$/ ) {
+ my ($variable, $value) = ($1,$2);
+ for ($variable, $value) {
+ s/^\s+//;
+ s/\s+$//;
+ }
+ if ($variable eq "cycles/second") {
+ $cyclespernanosec = $value / 1e9;
+ $seen_clockrate = 1;
+ } elsif ($variable eq "sampling period") {
+ $sampling_period = $value;
+ } elsif ($variable eq "ms since reset") {
+ # Currently nothing is done with this value in jeprof
+ # So we just silently ignore it for now
+ } elsif ($variable eq "discarded samples") {
+ # Currently nothing is done with this value in jeprof
+ # So we just silently ignore it for now
+ } else {
+ printf STDERR ("Ignoring unnknown variable in /contention output: " .
+ "'%s' = '%s'\n",$variable,$value);
+ }
+ } else {
+ # Memory map entry
+ $map .= $line;
+ }
+ }
+
+ if (!$seen_clockrate) {
+ printf STDERR ("No cycles/second entry in profile; Guessing %.1f GHz\n",
+ $cyclespernanosec);
+ }
+
+ my $r = {};
+ $r->{version} = 0;
+ $r->{period} = $sampling_period;
+ $r->{profile} = $profile;
+ $r->{libs} = ParseLibraries($prog, $map, $pcs);
+ $r->{pcs} = $pcs;
+ return $r;
+}
+
+# Given a hex value in the form "0x1abcd" or "1abcd", return either
+# "0001abcd" or "000000000001abcd", depending on the current (global)
+# address length.
+sub HexExtend {
+ my $addr = shift;
+
+ $addr =~ s/^(0x)?0*//;
+ my $zeros_needed = $address_length - length($addr);
+ if ($zeros_needed < 0) {
+ printf STDERR "Warning: address $addr is longer than address length $address_length\n";
+ return $addr;
+ }
+ return ("0" x $zeros_needed) . $addr;
+}
+
+##### Symbol extraction #####
+
+# Aggressively search the lib_prefix values for the given library
+# If all else fails, just return the name of the library unmodified.
+# If the lib_prefix is "/my/path,/other/path" and $file is "/lib/dir/mylib.so"
+# it will search the following locations in this order, until it finds a file:
+# /my/path/lib/dir/mylib.so
+# /other/path/lib/dir/mylib.so
+# /my/path/dir/mylib.so
+# /other/path/dir/mylib.so
+# /my/path/mylib.so
+# /other/path/mylib.so
+# /lib/dir/mylib.so (returned as last resort)
+sub FindLibrary {
+ my $file = shift;
+ my $suffix = $file;
+
+ # Search for the library as described above
+ do {
+ foreach my $prefix (@prefix_list) {
+ my $fullpath = $prefix . $suffix;
+ if (-e $fullpath) {
+ return $fullpath;
+ }
+ }
+ } while ($suffix =~ s|^/[^/]+/|/|);
+ return $file;
+}
+
+# Return path to library with debugging symbols.
+# For libc libraries, the copy in /usr/lib/debug contains debugging symbols
+sub DebuggingLibrary {
+ my $file = shift;
+
+ if ($file !~ m|^/|) {
+ return undef;
+ }
+
+ # Find debug symbol file if it's named after the library's name.
+
+ if (-f "/usr/lib/debug$file") {
+ if($main::opt_debug) { print STDERR "found debug info for $file in /usr/lib/debug$file\n"; }
+ return "/usr/lib/debug$file";
+ } elsif (-f "/usr/lib/debug$file.debug") {
+ if($main::opt_debug) { print STDERR "found debug info for $file in /usr/lib/debug$file.debug\n"; }
+ return "/usr/lib/debug$file.debug";
+ }
+
+ if(!$main::opt_debug_syms_by_id) {
+ if($main::opt_debug) { print STDERR "no debug symbols found for $file\n" };
+ return undef;
+ }
+
+ # Find debug file if it's named after the library's build ID.
+
+ my $readelf = '';
+ if (!$main::gave_up_on_elfutils) {
+ $readelf = qx/eu-readelf -n ${file}/;
+ if ($?) {
+ print STDERR "Cannot run eu-readelf. To use --debug-syms-by-id you must be on Linux, with elfutils installed.\n";
+ $main::gave_up_on_elfutils = 1;
+ return undef;
+ }
+ my $buildID = $1 if $readelf =~ /Build ID: ([A-Fa-f0-9]+)/s;
+ if (defined $buildID && length $buildID > 0) {
+ my $symbolFile = '/usr/lib/debug/.build-id/' . substr($buildID, 0, 2) . '/' . substr($buildID, 2) . '.debug';
+ if (-e $symbolFile) {
+ if($main::opt_debug) { print STDERR "found debug symbol file $symbolFile for $file\n" };
+ return $symbolFile;
+ } else {
+ if($main::opt_debug) { print STDERR "no debug symbol file found for $file, build ID: $buildID\n" };
+ return undef;
+ }
+ }
+ }
+
+ if($main::opt_debug) { print STDERR "no debug symbols found for $file, build ID unknown\n" };
+ return undef;
+}
+
+
+# Parse text section header of a library using objdump
+sub ParseTextSectionHeaderFromObjdump {
+ my $lib = shift;
+
+ my $size = undef;
+ my $vma;
+ my $file_offset;
+ # Get objdump output from the library file to figure out how to
+ # map between mapped addresses and addresses in the library.
+ my $cmd = ShellEscape($obj_tool_map{"objdump"}, "-h", $lib);
+ open(OBJDUMP, "$cmd |") || error("$cmd: $!\n");
+ while (<OBJDUMP>) {
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+ # Idx Name Size VMA LMA File off Algn
+ # 10 .text 00104b2c 420156f0 420156f0 000156f0 2**4
+ # For 64-bit objects, VMA and LMA will be 16 hex digits, size and file
+ # offset may still be 8. But AddressSub below will still handle that.
+ my @x = split;
+ if (($#x >= 6) && ($x[1] eq '.text')) {
+ $size = $x[2];
+ $vma = $x[3];
+ $file_offset = $x[5];
+ last;
+ }
+ }
+ close(OBJDUMP);
+
+ if (!defined($size)) {
+ return undef;
+ }
+
+ my $r = {};
+ $r->{size} = $size;
+ $r->{vma} = $vma;
+ $r->{file_offset} = $file_offset;
+
+ return $r;
+}
+
+# Parse text section header of a library using otool (on OS X)
+sub ParseTextSectionHeaderFromOtool {
+ my $lib = shift;
+
+ my $size = undef;
+ my $vma = undef;
+ my $file_offset = undef;
+ # Get otool output from the library file to figure out how to
+ # map between mapped addresses and addresses in the library.
+ my $command = ShellEscape($obj_tool_map{"otool"}, "-l", $lib);
+ open(OTOOL, "$command |") || error("$command: $!\n");
+ my $cmd = "";
+ my $sectname = "";
+ my $segname = "";
+ foreach my $line (<OTOOL>) {
+ $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines
+ # Load command <#>
+ # cmd LC_SEGMENT
+ # [...]
+ # Section
+ # sectname __text
+ # segname __TEXT
+ # addr 0x000009f8
+ # size 0x00018b9e
+ # offset 2552
+ # align 2^2 (4)
+ # We will need to strip off the leading 0x from the hex addresses,
+ # and convert the offset into hex.
+ if ($line =~ /Load command/) {
+ $cmd = "";
+ $sectname = "";
+ $segname = "";
+ } elsif ($line =~ /Section/) {
+ $sectname = "";
+ $segname = "";
+ } elsif ($line =~ /cmd (\w+)/) {
+ $cmd = $1;
+ } elsif ($line =~ /sectname (\w+)/) {
+ $sectname = $1;
+ } elsif ($line =~ /segname (\w+)/) {
+ $segname = $1;
+ } elsif (!(($cmd eq "LC_SEGMENT" || $cmd eq "LC_SEGMENT_64") &&
+ $sectname eq "__text" &&
+ $segname eq "__TEXT")) {
+ next;
+ } elsif ($line =~ /\baddr 0x([0-9a-fA-F]+)/) {
+ $vma = $1;
+ } elsif ($line =~ /\bsize 0x([0-9a-fA-F]+)/) {
+ $size = $1;
+ } elsif ($line =~ /\boffset ([0-9]+)/) {
+ $file_offset = sprintf("%016x", $1);
+ }
+ if (defined($vma) && defined($size) && defined($file_offset)) {
+ last;
+ }
+ }
+ close(OTOOL);
+
+ if (!defined($vma) || !defined($size) || !defined($file_offset)) {
+ return undef;
+ }
+
+ my $r = {};
+ $r->{size} = $size;
+ $r->{vma} = $vma;
+ $r->{file_offset} = $file_offset;
+
+ return $r;
+}
+
+sub ParseTextSectionHeader {
+ # obj_tool_map("otool") is only defined if we're in a Mach-O environment
+ if (defined($obj_tool_map{"otool"})) {
+ my $r = ParseTextSectionHeaderFromOtool(@_);
+ if (defined($r)){
+ return $r;
+ }
+ }
+ # If otool doesn't work, or we don't have it, fall back to objdump
+ return ParseTextSectionHeaderFromObjdump(@_);
+}
+
+# Split /proc/pid/maps dump into a list of libraries
+sub ParseLibraries {
+ return if $main::use_symbol_page; # We don't need libraries info.
+ my $prog = Cwd::abs_path(shift);
+ my $map = shift;
+ my $pcs = shift;
+
+ my $result = [];
+ my $h = "[a-f0-9]+";
+ my $zero_offset = HexExtend("0");
+
+ my $buildvar = "";
+ foreach my $l (split("\n", $map)) {
+ if ($l =~ m/^\s*build=(.*)$/) {
+ $buildvar = $1;
+ }
+
+ my $start;
+ my $finish;
+ my $offset;
+ my $lib;
+ if ($l =~ /^($h)-($h)\s+..x.\s+($h)\s+\S+:\S+\s+\d+\s+(\S+\.(so|dll|dylib|bundle)((\.\d+)+\w*(\.\d+){0,3})?)$/i) {
+ # Full line from /proc/self/maps. Example:
+ # 40000000-40015000 r-xp 00000000 03:01 12845071 /lib/ld-2.3.2.so
+ $start = HexExtend($1);
+ $finish = HexExtend($2);
+ $offset = HexExtend($3);
+ $lib = $4;
+ $lib =~ s|\\|/|g; # turn windows-style paths into unix-style paths
+ } elsif ($l =~ /^\s*($h)-($h):\s*(\S+\.so(\.\d+)*)/) {
+ # Cooked line from DumpAddressMap. Example:
+ # 40000000-40015000: /lib/ld-2.3.2.so
+ $start = HexExtend($1);
+ $finish = HexExtend($2);
+ $offset = $zero_offset;
+ $lib = $3;
+ } elsif (($l =~ /^($h)-($h)\s+..x.\s+($h)\s+\S+:\S+\s+\d+\s+(\S+)$/i) && ($4 eq $prog)) {
+ # PIEs and address space randomization do not play well with our
+ # default assumption that main executable is at lowest
+ # addresses. So we're detecting main executable in
+ # /proc/self/maps as well.
+ $start = HexExtend($1);
+ $finish = HexExtend($2);
+ $offset = HexExtend($3);
+ $lib = $4;
+ $lib =~ s|\\|/|g; # turn windows-style paths into unix-style paths
+ }
+ # FreeBSD 10.0 virtual memory map /proc/curproc/map as defined in
+ # function procfs_doprocmap (sys/fs/procfs/procfs_map.c)
+ #
+ # Example:
+ # 0x800600000 0x80061a000 26 0 0xfffff800035a0000 r-x 75 33 0x1004 COW NC vnode /libexec/ld-elf.s
+ # o.1 NCH -1
+ elsif ($l =~ /^(0x$h)\s(0x$h)\s\d+\s\d+\s0x$h\sr-x\s\d+\s\d+\s0x\d+\s(COW|NCO)\s(NC|NNC)\svnode\s(\S+\.so(\.\d+)*)/) {
+ $start = HexExtend($1);
+ $finish = HexExtend($2);
+ $offset = $zero_offset;
+ $lib = FindLibrary($5);
+
+ } else {
+ next;
+ }
+
+ # Expand "$build" variable if available
+ $lib =~ s/\$build\b/$buildvar/g;
+
+ $lib = FindLibrary($lib);
+
+ # Check for pre-relocated libraries, which use pre-relocated symbol tables
+ # and thus require adjusting the offset that we'll use to translate
+ # VM addresses into symbol table addresses.
+ # Only do this if we're not going to fetch the symbol table from a
+ # debugging copy of the library.
+ if (!DebuggingLibrary($lib)) {
+ my $text = ParseTextSectionHeader($lib);
+ if (defined($text)) {
+ my $vma_offset = AddressSub($text->{vma}, $text->{file_offset});
+ $offset = AddressAdd($offset, $vma_offset);
+ }
+ }
+
+ if($main::opt_debug) { printf STDERR "$start:$finish ($offset) $lib\n"; }
+ push(@{$result}, [$lib, $start, $finish, $offset]);
+ }
+
+ # Append special entry for additional library (not relocated)
+ if ($main::opt_lib ne "") {
+ my $text = ParseTextSectionHeader($main::opt_lib);
+ if (defined($text)) {
+ my $start = $text->{vma};
+ my $finish = AddressAdd($start, $text->{size});
+
+ push(@{$result}, [$main::opt_lib, $start, $finish, $start]);
+ }
+ }
+
+ # Append special entry for the main program. This covers
+ # 0..max_pc_value_seen, so that we assume pc values not found in one
+ # of the library ranges will be treated as coming from the main
+ # program binary.
+ my $min_pc = HexExtend("0");
+ my $max_pc = $min_pc; # find the maximal PC value in any sample
+ foreach my $pc (keys(%{$pcs})) {
+ if (HexExtend($pc) gt $max_pc) { $max_pc = HexExtend($pc); }
+ }
+ push(@{$result}, [$prog, $min_pc, $max_pc, $zero_offset]);
+
+ return $result;
+}
+
+# Add two hex addresses of length $address_length.
+# Run jeprof --test for unit test if this is changed.
+sub AddressAdd {
+ my $addr1 = shift;
+ my $addr2 = shift;
+ my $sum;
+
+ if ($address_length == 8) {
+ # Perl doesn't cope with wraparound arithmetic, so do it explicitly:
+ $sum = (hex($addr1)+hex($addr2)) % (0x10000000 * 16);
+ return sprintf("%08x", $sum);
+
+ } else {
+ # Do the addition in 7-nibble chunks to trivialize carry handling.
+
+ if ($main::opt_debug and $main::opt_test) {
+ print STDERR "AddressAdd $addr1 + $addr2 = ";
+ }
+
+ my $a1 = substr($addr1,-7);
+ $addr1 = substr($addr1,0,-7);
+ my $a2 = substr($addr2,-7);
+ $addr2 = substr($addr2,0,-7);
+ $sum = hex($a1) + hex($a2);
+ my $c = 0;
+ if ($sum > 0xfffffff) {
+ $c = 1;
+ $sum -= 0x10000000;
+ }
+ my $r = sprintf("%07x", $sum);
+
+ $a1 = substr($addr1,-7);
+ $addr1 = substr($addr1,0,-7);
+ $a2 = substr($addr2,-7);
+ $addr2 = substr($addr2,0,-7);
+ $sum = hex($a1) + hex($a2) + $c;
+ $c = 0;
+ if ($sum > 0xfffffff) {
+ $c = 1;
+ $sum -= 0x10000000;
+ }
+ $r = sprintf("%07x", $sum) . $r;
+
+ $sum = hex($addr1) + hex($addr2) + $c;
+ if ($sum > 0xff) { $sum -= 0x100; }
+ $r = sprintf("%02x", $sum) . $r;
+
+ if ($main::opt_debug and $main::opt_test) { print STDERR "$r\n"; }
+
+ return $r;
+ }
+}
+
+
+# Subtract two hex addresses of length $address_length.
+# Run jeprof --test for unit test if this is changed.
+sub AddressSub {
+ my $addr1 = shift;
+ my $addr2 = shift;
+ my $diff;
+
+ if ($address_length == 8) {
+ # Perl doesn't cope with wraparound arithmetic, so do it explicitly:
+ $diff = (hex($addr1)-hex($addr2)) % (0x10000000 * 16);
+ return sprintf("%08x", $diff);
+
+ } else {
+ # Do the addition in 7-nibble chunks to trivialize borrow handling.
+ # if ($main::opt_debug) { print STDERR "AddressSub $addr1 - $addr2 = "; }
+
+ my $a1 = hex(substr($addr1,-7));
+ $addr1 = substr($addr1,0,-7);
+ my $a2 = hex(substr($addr2,-7));
+ $addr2 = substr($addr2,0,-7);
+ my $b = 0;
+ if ($a2 > $a1) {
+ $b = 1;
+ $a1 += 0x10000000;
+ }
+ $diff = $a1 - $a2;
+ my $r = sprintf("%07x", $diff);
+
+ $a1 = hex(substr($addr1,-7));
+ $addr1 = substr($addr1,0,-7);
+ $a2 = hex(substr($addr2,-7)) + $b;
+ $addr2 = substr($addr2,0,-7);
+ $b = 0;
+ if ($a2 > $a1) {
+ $b = 1;
+ $a1 += 0x10000000;
+ }
+ $diff = $a1 - $a2;
+ $r = sprintf("%07x", $diff) . $r;
+
+ $a1 = hex($addr1);
+ $a2 = hex($addr2) + $b;
+ if ($a2 > $a1) { $a1 += 0x100; }
+ $diff = $a1 - $a2;
+ $r = sprintf("%02x", $diff) . $r;
+
+ # if ($main::opt_debug) { print STDERR "$r\n"; }
+
+ return $r;
+ }
+}
+
+# Increment a hex addresses of length $address_length.
+# Run jeprof --test for unit test if this is changed.
+sub AddressInc {
+ my $addr = shift;
+ my $sum;
+
+ if ($address_length == 8) {
+ # Perl doesn't cope with wraparound arithmetic, so do it explicitly:
+ $sum = (hex($addr)+1) % (0x10000000 * 16);
+ return sprintf("%08x", $sum);
+
+ } else {
+ # Do the addition in 7-nibble chunks to trivialize carry handling.
+ # We are always doing this to step through the addresses in a function,
+ # and will almost never overflow the first chunk, so we check for this
+ # case and exit early.
+
+ # if ($main::opt_debug) { print STDERR "AddressInc $addr1 = "; }
+
+ my $a1 = substr($addr,-7);
+ $addr = substr($addr,0,-7);
+ $sum = hex($a1) + 1;
+ my $r = sprintf("%07x", $sum);
+ if ($sum <= 0xfffffff) {
+ $r = $addr . $r;
+ # if ($main::opt_debug) { print STDERR "$r\n"; }
+ return HexExtend($r);
+ } else {
+ $r = "0000000";
+ }
+
+ $a1 = substr($addr,-7);
+ $addr = substr($addr,0,-7);
+ $sum = hex($a1) + 1;
+ $r = sprintf("%07x", $sum) . $r;
+ if ($sum <= 0xfffffff) {
+ $r = $addr . $r;
+ # if ($main::opt_debug) { print STDERR "$r\n"; }
+ return HexExtend($r);
+ } else {
+ $r = "00000000000000";
+ }
+
+ $sum = hex($addr) + 1;
+ if ($sum > 0xff) { $sum -= 0x100; }
+ $r = sprintf("%02x", $sum) . $r;
+
+ # if ($main::opt_debug) { print STDERR "$r\n"; }
+ return $r;
+ }
+}
+
+# Extract symbols for all PC values found in profile
+sub ExtractSymbols {
+ my $libs = shift;
+ my $pcset = shift;
+
+ my $symbols = {};
+
+ # Map each PC value to the containing library. To make this faster,
+ # we sort libraries by their starting pc value (highest first), and
+ # advance through the libraries as we advance the pc. Sometimes the
+ # addresses of libraries may overlap with the addresses of the main
+ # binary, so to make sure the libraries 'win', we iterate over the
+ # libraries in reverse order (which assumes the binary doesn't start
+ # in the middle of a library, which seems a fair assumption).
+ my @pcs = (sort { $a cmp $b } keys(%{$pcset})); # pcset is 0-extended strings
+ foreach my $lib (sort {$b->[1] cmp $a->[1]} @{$libs}) {
+ my $libname = $lib->[0];
+ my $start = $lib->[1];
+ my $finish = $lib->[2];
+ my $offset = $lib->[3];
+
+ # Use debug library if it exists
+ my $debug_libname = DebuggingLibrary($libname);
+ if ($debug_libname) {
+ $libname = $debug_libname;
+ }
+
+ # Get list of pcs that belong in this library.
+ my $contained = [];
+ my ($start_pc_index, $finish_pc_index);
+ # Find smallest finish_pc_index such that $finish < $pc[$finish_pc_index].
+ for ($finish_pc_index = $#pcs + 1; $finish_pc_index > 0;
+ $finish_pc_index--) {
+ last if $pcs[$finish_pc_index - 1] le $finish;
+ }
+ # Find smallest start_pc_index such that $start <= $pc[$start_pc_index].
+ for ($start_pc_index = $finish_pc_index; $start_pc_index > 0;
+ $start_pc_index--) {
+ last if $pcs[$start_pc_index - 1] lt $start;
+ }
+ # This keeps PC values higher than $pc[$finish_pc_index] in @pcs,
+ # in case there are overlaps in libraries and the main binary.
+ @{$contained} = splice(@pcs, $start_pc_index,
+ $finish_pc_index - $start_pc_index);
+ # Map to symbols
+ MapToSymbols($libname, AddressSub($start, $offset), $contained, $symbols);
+ }
+
+ return $symbols;
+}
+
+# Map list of PC values to symbols for a given image
+sub MapToSymbols {
+ my $image = shift;
+ my $offset = shift;
+ my $pclist = shift;
+ my $symbols = shift;
+
+ my $debug = 0;
+
+ # Ignore empty binaries
+ if ($#{$pclist} < 0) { return; }
+
+ # Figure out the addr2line command to use
+ my $addr2line = $obj_tool_map{"addr2line"};
+ my $cmd = ShellEscape($addr2line, "-f", "-C", "-e", $image);
+ if (exists $obj_tool_map{"addr2line_pdb"}) {
+ $addr2line = $obj_tool_map{"addr2line_pdb"};
+ $cmd = ShellEscape($addr2line, "--demangle", "-f", "-C", "-e", $image);
+ }
+
+ # If "addr2line" isn't installed on the system at all, just use
+ # nm to get what info we can (function names, but not line numbers).
+ if (system(ShellEscape($addr2line, "--help") . " >$dev_null 2>&1") != 0) {
+ MapSymbolsWithNM($image, $offset, $pclist, $symbols);
+ return;
+ }
+
+ # "addr2line -i" can produce a variable number of lines per input
+ # address, with no separator that allows us to tell when data for
+ # the next address starts. So we find the address for a special
+ # symbol (_fini) and interleave this address between all real
+ # addresses passed to addr2line. The name of this special symbol
+ # can then be used as a separator.
+ $sep_address = undef; # May be filled in by MapSymbolsWithNM()
+ my $nm_symbols = {};
+ MapSymbolsWithNM($image, $offset, $pclist, $nm_symbols);
+ if (defined($sep_address)) {
+ # Only add " -i" to addr2line if the binary supports it.
+ # addr2line --help returns 0, but not if it sees an unknown flag first.
+ if (system("$cmd -i --help >$dev_null 2>&1") == 0) {
+ $cmd .= " -i";
+ } else {
+ $sep_address = undef; # no need for sep_address if we don't support -i
+ }
+ }
+
+ # Make file with all PC values with intervening 'sep_address' so
+ # that we can reliably detect the end of inlined function list
+ open(ADDRESSES, ">$main::tmpfile_sym") || error("$main::tmpfile_sym: $!\n");
+ if ($debug) { print("---- $image ---\n"); }
+ for (my $i = 0; $i <= $#{$pclist}; $i++) {
+ # addr2line always reads hex addresses, and does not need '0x' prefix.
+ if ($debug) { printf STDERR ("%s\n", $pclist->[$i]); }
+ printf ADDRESSES ("%s\n", AddressSub($pclist->[$i], $offset));
+ if (defined($sep_address)) {
+ printf ADDRESSES ("%s\n", $sep_address);
+ }
+ }
+ close(ADDRESSES);
+ if ($debug) {
+ print("----\n");
+ system("cat", $main::tmpfile_sym);
+ print("----\n");
+ system("$cmd < " . ShellEscape($main::tmpfile_sym));
+ print("----\n");
+ }
+
+ open(SYMBOLS, "$cmd <" . ShellEscape($main::tmpfile_sym) . " |")
+ || error("$cmd: $!\n");
+ my $count = 0; # Index in pclist
+ while (<SYMBOLS>) {
+ # Read fullfunction and filelineinfo from next pair of lines
+ s/\r?\n$//g;
+ my $fullfunction = $_;
+ $_ = <SYMBOLS>;
+ s/\r?\n$//g;
+ my $filelinenum = $_;
+
+ if (defined($sep_address) && $fullfunction eq $sep_symbol) {
+ # Terminating marker for data for this address
+ $count++;
+ next;
+ }
+
+ $filelinenum =~ s|\\|/|g; # turn windows-style paths into unix-style paths
+
+ my $pcstr = $pclist->[$count];
+ my $function = ShortFunctionName($fullfunction);
+ my $nms = $nm_symbols->{$pcstr};
+ if (defined($nms)) {
+ if ($fullfunction eq '??') {
+ # nm found a symbol for us.
+ $function = $nms->[0];
+ $fullfunction = $nms->[2];
+ } else {
+ # MapSymbolsWithNM tags each routine with its starting address,
+ # useful in case the image has multiple occurrences of this
+ # routine. (It uses a syntax that resembles template parameters,
+ # that are automatically stripped out by ShortFunctionName().)
+ # addr2line does not provide the same information. So we check
+ # if nm disambiguated our symbol, and if so take the annotated
+ # (nm) version of the routine-name. TODO(csilvers): this won't
+ # catch overloaded, inlined symbols, which nm doesn't see.
+ # Better would be to do a check similar to nm's, in this fn.
+ if ($nms->[2] =~ m/^\Q$function\E/) { # sanity check it's the right fn
+ $function = $nms->[0];
+ $fullfunction = $nms->[2];
+ }
+ }
+ }
+
+ # Prepend to accumulated symbols for pcstr
+ # (so that caller comes before callee)
+ my $sym = $symbols->{$pcstr};
+ if (!defined($sym)) {
+ $sym = [];
+ $symbols->{$pcstr} = $sym;
+ }
+ unshift(@{$sym}, $function, $filelinenum, $fullfunction);
+ if ($debug) { printf STDERR ("%s => [%s]\n", $pcstr, join(" ", @{$sym})); }
+ if (!defined($sep_address)) {
+ # Inlining is off, so this entry ends immediately
+ $count++;
+ }
+ }
+ close(SYMBOLS);
+}
+
+# Use nm to map the list of referenced PCs to symbols. Return true iff we
+# are able to read procedure information via nm.
+sub MapSymbolsWithNM {
+ my $image = shift;
+ my $offset = shift;
+ my $pclist = shift;
+ my $symbols = shift;
+
+ # Get nm output sorted by increasing address
+ my $symbol_table = GetProcedureBoundaries($image, ".");
+ if (!%{$symbol_table}) {
+ return 0;
+ }
+ # Start addresses are already the right length (8 or 16 hex digits).
+ my @names = sort { $symbol_table->{$a}->[0] cmp $symbol_table->{$b}->[0] }
+ keys(%{$symbol_table});
+
+ if ($#names < 0) {
+ # No symbols: just use addresses
+ foreach my $pc (@{$pclist}) {
+ my $pcstr = "0x" . $pc;
+ $symbols->{$pc} = [$pcstr, "?", $pcstr];
+ }
+ return 0;
+ }
+
+ # Sort addresses so we can do a join against nm output
+ my $index = 0;
+ my $fullname = $names[0];
+ my $name = ShortFunctionName($fullname);
+ foreach my $pc (sort { $a cmp $b } @{$pclist}) {
+ # Adjust for mapped offset
+ my $mpc = AddressSub($pc, $offset);
+ while (($index < $#names) && ($mpc ge $symbol_table->{$fullname}->[1])){
+ $index++;
+ $fullname = $names[$index];
+ $name = ShortFunctionName($fullname);
+ }
+ if ($mpc lt $symbol_table->{$fullname}->[1]) {
+ $symbols->{$pc} = [$name, "?", $fullname];
+ } else {
+ my $pcstr = "0x" . $pc;
+ $symbols->{$pc} = [$pcstr, "?", $pcstr];
+ }
+ }
+ return 1;
+}
+
+sub ShortFunctionName {
+ my $function = shift;
+ while ($function =~ s/\([^()]*\)(\s*const)?//g) { } # Argument types
+ while ($function =~ s/<[^<>]*>//g) { } # Remove template arguments
+ $function =~ s/^.*\s+(\w+::)/$1/; # Remove leading type
+ return $function;
+}
+
+# Trim overly long symbols found in disassembler output
+sub CleanDisassembly {
+ my $d = shift;
+ while ($d =~ s/\([^()%]*\)(\s*const)?//g) { } # Argument types, not (%rax)
+ while ($d =~ s/(\w+)<[^<>]*>/$1/g) { } # Remove template arguments
+ return $d;
+}
+
+# Clean file name for display
+sub CleanFileName {
+ my ($f) = @_;
+ $f =~ s|^/proc/self/cwd/||;
+ $f =~ s|^\./||;
+ return $f;
+}
+
+# Make address relative to section and clean up for display
+sub UnparseAddress {
+ my ($offset, $address) = @_;
+ $address = AddressSub($address, $offset);
+ $address =~ s/^0x//;
+ $address =~ s/^0*//;
+ return $address;
+}
+
+##### Miscellaneous #####
+
+# Find the right versions of the above object tools to use. The
+# argument is the program file being analyzed, and should be an ELF
+# 32-bit or ELF 64-bit executable file. The location of the tools
+# is determined by considering the following options in this order:
+# 1) --tools option, if set
+# 2) JEPROF_TOOLS environment variable, if set
+# 3) the environment
+sub ConfigureObjTools {
+ my $prog_file = shift;
+
+ # Check for the existence of $prog_file because /usr/bin/file does not
+ # predictably return error status in prod.
+ (-e $prog_file) || error("$prog_file does not exist.\n");
+
+ my $file_type = undef;
+ if (-e "/usr/bin/file") {
+ # Follow symlinks (at least for systems where "file" supports that).
+ my $escaped_prog_file = ShellEscape($prog_file);
+ $file_type = `/usr/bin/file -L $escaped_prog_file 2>$dev_null ||
+ /usr/bin/file $escaped_prog_file`;
+ } elsif ($^O == "MSWin32") {
+ $file_type = "MS Windows";
+ } else {
+ print STDERR "WARNING: Can't determine the file type of $prog_file";
+ }
+
+ if ($file_type =~ /64-bit/) {
+ # Change $address_length to 16 if the program file is ELF 64-bit.
+ # We can't detect this from many (most?) heap or lock contention
+ # profiles, since the actual addresses referenced are generally in low
+ # memory even for 64-bit programs.
+ $address_length = 16;
+ }
+
+ if ($file_type =~ /MS Windows/) {
+ # For windows, we provide a version of nm and addr2line as part of
+ # the opensource release, which is capable of parsing
+ # Windows-style PDB executables. It should live in the path, or
+ # in the same directory as jeprof.
+ $obj_tool_map{"nm_pdb"} = "nm-pdb";
+ $obj_tool_map{"addr2line_pdb"} = "addr2line-pdb";
+ }
+
+ if ($file_type =~ /Mach-O/) {
+ # OS X uses otool to examine Mach-O files, rather than objdump.
+ $obj_tool_map{"otool"} = "otool";
+ $obj_tool_map{"addr2line"} = "false"; # no addr2line
+ $obj_tool_map{"objdump"} = "false"; # no objdump
+ }
+
+ # Go fill in %obj_tool_map with the pathnames to use:
+ foreach my $tool (keys %obj_tool_map) {
+ $obj_tool_map{$tool} = ConfigureTool($obj_tool_map{$tool});
+ }
+}
+
+# Returns the path of a caller-specified object tool. If --tools or
+# JEPROF_TOOLS are specified, then returns the full path to the tool
+# with that prefix. Otherwise, returns the path unmodified (which
+# means we will look for it on PATH).
+sub ConfigureTool {
+ my $tool = shift;
+ my $path;
+
+ # --tools (or $JEPROF_TOOLS) is a comma separated list, where each
+ # item is either a) a pathname prefix, or b) a map of the form
+ # <tool>:<path>. First we look for an entry of type (b) for our
+ # tool. If one is found, we use it. Otherwise, we consider all the
+ # pathname prefixes in turn, until one yields an existing file. If
+ # none does, we use a default path.
+ my $tools = $main::opt_tools || $ENV{"JEPROF_TOOLS"} || "";
+ if ($tools =~ m/(,|^)\Q$tool\E:([^,]*)/) {
+ $path = $2;
+ # TODO(csilvers): sanity-check that $path exists? Hard if it's relative.
+ } elsif ($tools ne '') {
+ foreach my $prefix (split(',', $tools)) {
+ next if ($prefix =~ /:/); # ignore "tool:fullpath" entries in the list
+ if (-x $prefix . $tool) {
+ $path = $prefix . $tool;
+ last;
+ }
+ }
+ if (!$path) {
+ error("No '$tool' found with prefix specified by " .
+ "--tools (or \$JEPROF_TOOLS) '$tools'\n");
+ }
+ } else {
+ # ... otherwise use the version that exists in the same directory as
+ # jeprof. If there's nothing there, use $PATH.
+ $0 =~ m,[^/]*$,; # this is everything after the last slash
+ my $dirname = $`; # this is everything up to and including the last slash
+ if (-x "$dirname$tool") {
+ $path = "$dirname$tool";
+ } else {
+ $path = $tool;
+ }
+ }
+ if ($main::opt_debug) { print STDERR "Using '$path' for '$tool'.\n"; }
+ return $path;
+}
+
+sub ShellEscape {
+ my @escaped_words = ();
+ foreach my $word (@_) {
+ my $escaped_word = $word;
+ if ($word =~ m![^a-zA-Z0-9/.,_=-]!) { # check for anything not in whitelist
+ $escaped_word =~ s/'/'\\''/;
+ $escaped_word = "'$escaped_word'";
+ }
+ push(@escaped_words, $escaped_word);
+ }
+ return join(" ", @escaped_words);
+}
+
+sub cleanup {
+ unlink($main::tmpfile_sym);
+ unlink(keys %main::tempnames);
+
+ # We leave any collected profiles in $HOME/jeprof in case the user wants
+ # to look at them later. We print a message informing them of this.
+ if ((scalar(@main::profile_files) > 0) &&
+ defined($main::collected_profile)) {
+ if (scalar(@main::profile_files) == 1) {
+ print STDERR "Dynamically gathered profile is in $main::collected_profile\n";
+ }
+ print STDERR "If you want to investigate this profile further, you can do:\n";
+ print STDERR "\n";
+ print STDERR " jeprof \\\n";
+ print STDERR " $main::prog \\\n";
+ print STDERR " $main::collected_profile\n";
+ print STDERR "\n";
+ }
+}
+
+sub sighandler {
+ cleanup();
+ exit(1);
+}
+
+sub error {
+ my $msg = shift;
+ print STDERR $msg;
+ cleanup();
+ exit(1);
+}
+
+
+# Run $nm_command and get all the resulting procedure boundaries whose
+# names match "$regexp" and returns them in a hashtable mapping from
+# procedure name to a two-element vector of [start address, end address]
+sub GetProcedureBoundariesViaNm {
+ my $escaped_nm_command = shift; # shell-escaped
+ my $regexp = shift;
+
+ my $symbol_table = {};
+ open(NM, "$escaped_nm_command |") || error("$escaped_nm_command: $!\n");
+ my $last_start = "0";
+ my $routine = "";
+ while (<NM>) {
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+ if (m/^\s*([0-9a-f]+) (.) (..*)/) {
+ my $start_val = $1;
+ my $type = $2;
+ my $this_routine = $3;
+
+ # It's possible for two symbols to share the same address, if
+ # one is a zero-length variable (like __start_google_malloc) or
+ # one symbol is a weak alias to another (like __libc_malloc).
+ # In such cases, we want to ignore all values except for the
+ # actual symbol, which in nm-speak has type "T". The logic
+ # below does this, though it's a bit tricky: what happens when
+ # we have a series of lines with the same address, is the first
+ # one gets queued up to be processed. However, it won't
+ # *actually* be processed until later, when we read a line with
+ # a different address. That means that as long as we're reading
+ # lines with the same address, we have a chance to replace that
+ # item in the queue, which we do whenever we see a 'T' entry --
+ # that is, a line with type 'T'. If we never see a 'T' entry,
+ # we'll just go ahead and process the first entry (which never
+ # got touched in the queue), and ignore the others.
+ if ($start_val eq $last_start && $type =~ /t/i) {
+ # We are the 'T' symbol at this address, replace previous symbol.
+ $routine = $this_routine;
+ next;
+ } elsif ($start_val eq $last_start) {
+ # We're not the 'T' symbol at this address, so ignore us.
+ next;
+ }
+
+ if ($this_routine eq $sep_symbol) {
+ $sep_address = HexExtend($start_val);
+ }
+
+ # Tag this routine with the starting address in case the image
+ # has multiple occurrences of this routine. We use a syntax
+ # that resembles template parameters that are automatically
+ # stripped out by ShortFunctionName()
+ $this_routine .= "<$start_val>";
+
+ if (defined($routine) && $routine =~ m/$regexp/) {
+ $symbol_table->{$routine} = [HexExtend($last_start),
+ HexExtend($start_val)];
+ }
+ $last_start = $start_val;
+ $routine = $this_routine;
+ } elsif (m/^Loaded image name: (.+)/) {
+ # The win32 nm workalike emits information about the binary it is using.
+ if ($main::opt_debug) { print STDERR "Using Image $1\n"; }
+ } elsif (m/^PDB file name: (.+)/) {
+ # The win32 nm workalike emits information about the pdb it is using.
+ if ($main::opt_debug) { print STDERR "Using PDB $1\n"; }
+ }
+ }
+ close(NM);
+ # Handle the last line in the nm output. Unfortunately, we don't know
+ # how big this last symbol is, because we don't know how big the file
+ # is. For now, we just give it a size of 0.
+ # TODO(csilvers): do better here.
+ if (defined($routine) && $routine =~ m/$regexp/) {
+ $symbol_table->{$routine} = [HexExtend($last_start),
+ HexExtend($last_start)];
+ }
+ return $symbol_table;
+}
+
+# Gets the procedure boundaries for all routines in "$image" whose names
+# match "$regexp" and returns them in a hashtable mapping from procedure
+# name to a two-element vector of [start address, end address].
+# Will return an empty map if nm is not installed or not working properly.
+sub GetProcedureBoundaries {
+ my $image = shift;
+ my $regexp = shift;
+
+ # If $image doesn't start with /, then put ./ in front of it. This works
+ # around an obnoxious bug in our probing of nm -f behavior.
+ # "nm -f $image" is supposed to fail on GNU nm, but if:
+ #
+ # a. $image starts with [BbSsPp] (for example, bin/foo/bar), AND
+ # b. you have a.out in your current directory (a not uncommon occurrence)
+ #
+ # then "nm -f $image" succeeds because -f only looks at the first letter of
+ # the argument, which looks valid because it's [BbSsPp], and then since
+ # there's no image provided, it looks for a.out and finds it.
+ #
+ # This regex makes sure that $image starts with . or /, forcing the -f
+ # parsing to fail since . and / are not valid formats.
+ $image =~ s#^[^/]#./$&#;
+
+ # For libc libraries, the copy in /usr/lib/debug contains debugging symbols
+ my $debugging = DebuggingLibrary($image);
+ if ($debugging) {
+ $image = $debugging;
+ }
+
+ my $nm = $obj_tool_map{"nm"};
+ my $cppfilt = $obj_tool_map{"c++filt"};
+
+ # nm can fail for two reasons: 1) $image isn't a debug library; 2) nm
+ # binary doesn't support --demangle. In addition, for OS X we need
+ # to use the -f flag to get 'flat' nm output (otherwise we don't sort
+ # properly and get incorrect results). Unfortunately, GNU nm uses -f
+ # in an incompatible way. So first we test whether our nm supports
+ # --demangle and -f.
+ my $demangle_flag = "";
+ my $cppfilt_flag = "";
+ my $to_devnull = ">$dev_null 2>&1";
+ if (system(ShellEscape($nm, "--demangle", $image) . $to_devnull) == 0) {
+ # In this mode, we do "nm --demangle <foo>"
+ $demangle_flag = "--demangle";
+ $cppfilt_flag = "";
+ } elsif (system(ShellEscape($cppfilt, $image) . $to_devnull) == 0) {
+ # In this mode, we do "nm <foo> | c++filt"
+ $cppfilt_flag = " | " . ShellEscape($cppfilt);
+ };
+ my $flatten_flag = "";
+ if (system(ShellEscape($nm, "-f", $image) . $to_devnull) == 0) {
+ $flatten_flag = "-f";
+ }
+
+ # Finally, in the case $imagie isn't a debug library, we try again with
+ # -D to at least get *exported* symbols. If we can't use --demangle,
+ # we use c++filt instead, if it exists on this system.
+ my @nm_commands = (ShellEscape($nm, "-n", $flatten_flag, $demangle_flag,
+ $image) . " 2>$dev_null $cppfilt_flag",
+ ShellEscape($nm, "-D", "-n", $flatten_flag, $demangle_flag,
+ $image) . " 2>$dev_null $cppfilt_flag",
+ # 6nm is for Go binaries
+ ShellEscape("6nm", "$image") . " 2>$dev_null | sort",
+ );
+
+ # If the executable is an MS Windows PDB-format executable, we'll
+ # have set up obj_tool_map("nm_pdb"). In this case, we actually
+ # want to use both unix nm and windows-specific nm_pdb, since
+ # PDB-format executables can apparently include dwarf .o files.
+ if (exists $obj_tool_map{"nm_pdb"}) {
+ push(@nm_commands,
+ ShellEscape($obj_tool_map{"nm_pdb"}, "--demangle", $image)
+ . " 2>$dev_null");
+ }
+
+ foreach my $nm_command (@nm_commands) {
+ my $symbol_table = GetProcedureBoundariesViaNm($nm_command, $regexp);
+ return $symbol_table if (%{$symbol_table});
+ }
+ my $symbol_table = {};
+ return $symbol_table;
+}
+
+
+# The test vectors for AddressAdd/Sub/Inc are 8-16-nibble hex strings.
+# To make them more readable, we add underscores at interesting places.
+# This routine removes the underscores, producing the canonical representation
+# used by jeprof to represent addresses, particularly in the tested routines.
+sub CanonicalHex {
+ my $arg = shift;
+ return join '', (split '_',$arg);
+}
+
+
+# Unit test for AddressAdd:
+sub AddressAddUnitTest {
+ my $test_data_8 = shift;
+ my $test_data_16 = shift;
+ my $error_count = 0;
+ my $fail_count = 0;
+ my $pass_count = 0;
+ # print STDERR "AddressAddUnitTest: ", 1+$#{$test_data_8}, " tests\n";
+
+ # First a few 8-nibble addresses. Note that this implementation uses
+ # plain old arithmetic, so a quick sanity check along with verifying what
+ # happens to overflow (we want it to wrap):
+ $address_length = 8;
+ foreach my $row (@{$test_data_8}) {
+ if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
+ my $sum = AddressAdd ($row->[0], $row->[1]);
+ if ($sum ne $row->[2]) {
+ printf STDERR "ERROR: %s != %s + %s = %s\n", $sum,
+ $row->[0], $row->[1], $row->[2];
+ ++$fail_count;
+ } else {
+ ++$pass_count;
+ }
+ }
+ printf STDERR "AddressAdd 32-bit tests: %d passes, %d failures\n",
+ $pass_count, $fail_count;
+ $error_count = $fail_count;
+ $fail_count = 0;
+ $pass_count = 0;
+
+ # Now 16-nibble addresses.
+ $address_length = 16;
+ foreach my $row (@{$test_data_16}) {
+ if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
+ my $sum = AddressAdd (CanonicalHex($row->[0]), CanonicalHex($row->[1]));
+ my $expected = join '', (split '_',$row->[2]);
+ if ($sum ne CanonicalHex($row->[2])) {
+ printf STDERR "ERROR: %s != %s + %s = %s\n", $sum,
+ $row->[0], $row->[1], $row->[2];
+ ++$fail_count;
+ } else {
+ ++$pass_count;
+ }
+ }
+ printf STDERR "AddressAdd 64-bit tests: %d passes, %d failures\n",
+ $pass_count, $fail_count;
+ $error_count += $fail_count;
+
+ return $error_count;
+}
+
+
+# Unit test for AddressSub:
+sub AddressSubUnitTest {
+ my $test_data_8 = shift;
+ my $test_data_16 = shift;
+ my $error_count = 0;
+ my $fail_count = 0;
+ my $pass_count = 0;
+ # print STDERR "AddressSubUnitTest: ", 1+$#{$test_data_8}, " tests\n";
+
+ # First a few 8-nibble addresses. Note that this implementation uses
+ # plain old arithmetic, so a quick sanity check along with verifying what
+ # happens to overflow (we want it to wrap):
+ $address_length = 8;
+ foreach my $row (@{$test_data_8}) {
+ if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
+ my $sum = AddressSub ($row->[0], $row->[1]);
+ if ($sum ne $row->[3]) {
+ printf STDERR "ERROR: %s != %s - %s = %s\n", $sum,
+ $row->[0], $row->[1], $row->[3];
+ ++$fail_count;
+ } else {
+ ++$pass_count;
+ }
+ }
+ printf STDERR "AddressSub 32-bit tests: %d passes, %d failures\n",
+ $pass_count, $fail_count;
+ $error_count = $fail_count;
+ $fail_count = 0;
+ $pass_count = 0;
+
+ # Now 16-nibble addresses.
+ $address_length = 16;
+ foreach my $row (@{$test_data_16}) {
+ if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
+ my $sum = AddressSub (CanonicalHex($row->[0]), CanonicalHex($row->[1]));
+ if ($sum ne CanonicalHex($row->[3])) {
+ printf STDERR "ERROR: %s != %s - %s = %s\n", $sum,
+ $row->[0], $row->[1], $row->[3];
+ ++$fail_count;
+ } else {
+ ++$pass_count;
+ }
+ }
+ printf STDERR "AddressSub 64-bit tests: %d passes, %d failures\n",
+ $pass_count, $fail_count;
+ $error_count += $fail_count;
+
+ return $error_count;
+}
+
+
+# Unit test for AddressInc:
+sub AddressIncUnitTest {
+ my $test_data_8 = shift;
+ my $test_data_16 = shift;
+ my $error_count = 0;
+ my $fail_count = 0;
+ my $pass_count = 0;
+ # print STDERR "AddressIncUnitTest: ", 1+$#{$test_data_8}, " tests\n";
+
+ # First a few 8-nibble addresses. Note that this implementation uses
+ # plain old arithmetic, so a quick sanity check along with verifying what
+ # happens to overflow (we want it to wrap):
+ $address_length = 8;
+ foreach my $row (@{$test_data_8}) {
+ if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
+ my $sum = AddressInc ($row->[0]);
+ if ($sum ne $row->[4]) {
+ printf STDERR "ERROR: %s != %s + 1 = %s\n", $sum,
+ $row->[0], $row->[4];
+ ++$fail_count;
+ } else {
+ ++$pass_count;
+ }
+ }
+ printf STDERR "AddressInc 32-bit tests: %d passes, %d failures\n",
+ $pass_count, $fail_count;
+ $error_count = $fail_count;
+ $fail_count = 0;
+ $pass_count = 0;
+
+ # Now 16-nibble addresses.
+ $address_length = 16;
+ foreach my $row (@{$test_data_16}) {
+ if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
+ my $sum = AddressInc (CanonicalHex($row->[0]));
+ if ($sum ne CanonicalHex($row->[4])) {
+ printf STDERR "ERROR: %s != %s + 1 = %s\n", $sum,
+ $row->[0], $row->[4];
+ ++$fail_count;
+ } else {
+ ++$pass_count;
+ }
+ }
+ printf STDERR "AddressInc 64-bit tests: %d passes, %d failures\n",
+ $pass_count, $fail_count;
+ $error_count += $fail_count;
+
+ return $error_count;
+}
+
+
+# Driver for unit tests.
+# Currently just the address add/subtract/increment routines for 64-bit.
+sub RunUnitTests {
+ my $error_count = 0;
+
+ # This is a list of tuples [a, b, a+b, a-b, a+1]
+ my $unit_test_data_8 = [
+ [qw(aaaaaaaa 50505050 fafafafa 5a5a5a5a aaaaaaab)],
+ [qw(50505050 aaaaaaaa fafafafa a5a5a5a6 50505051)],
+ [qw(ffffffff aaaaaaaa aaaaaaa9 55555555 00000000)],
+ [qw(00000001 ffffffff 00000000 00000002 00000002)],
+ [qw(00000001 fffffff0 fffffff1 00000011 00000002)],
+ ];
+ my $unit_test_data_16 = [
+ # The implementation handles data in 7-nibble chunks, so those are the
+ # interesting boundaries.
+ [qw(aaaaaaaa 50505050
+ 00_000000f_afafafa 00_0000005_a5a5a5a 00_000000a_aaaaaab)],
+ [qw(50505050 aaaaaaaa
+ 00_000000f_afafafa ff_ffffffa_5a5a5a6 00_0000005_0505051)],
+ [qw(ffffffff aaaaaaaa
+ 00_000001a_aaaaaa9 00_0000005_5555555 00_0000010_0000000)],
+ [qw(00000001 ffffffff
+ 00_0000010_0000000 ff_ffffff0_0000002 00_0000000_0000002)],
+ [qw(00000001 fffffff0
+ 00_000000f_ffffff1 ff_ffffff0_0000011 00_0000000_0000002)],
+
+ [qw(00_a00000a_aaaaaaa 50505050
+ 00_a00000f_afafafa 00_a000005_a5a5a5a 00_a00000a_aaaaaab)],
+ [qw(0f_fff0005_0505050 aaaaaaaa
+ 0f_fff000f_afafafa 0f_ffefffa_5a5a5a6 0f_fff0005_0505051)],
+ [qw(00_000000f_fffffff 01_800000a_aaaaaaa
+ 01_800001a_aaaaaa9 fe_8000005_5555555 00_0000010_0000000)],
+ [qw(00_0000000_0000001 ff_fffffff_fffffff
+ 00_0000000_0000000 00_0000000_0000002 00_0000000_0000002)],
+ [qw(00_0000000_0000001 ff_fffffff_ffffff0
+ ff_fffffff_ffffff1 00_0000000_0000011 00_0000000_0000002)],
+ ];
+
+ $error_count += AddressAddUnitTest($unit_test_data_8, $unit_test_data_16);
+ $error_count += AddressSubUnitTest($unit_test_data_8, $unit_test_data_16);
+ $error_count += AddressIncUnitTest($unit_test_data_8, $unit_test_data_16);
+ if ($error_count > 0) {
+ print STDERR $error_count, " errors: FAILED\n";
+ } else {
+ print STDERR "PASS\n";
+ }
+ exit ($error_count);
+}
+||||||| dec341af7695
+=======
+#! /usr/bin/env perl
+
+# Copyright (c) 1998-2007, Google Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# ---
+# Program for printing the profile generated by common/profiler.cc,
+# or by the heap profiler (common/debugallocation.cc)
+#
+# The profile contains a sequence of entries of the form:
+# <count> <stack trace>
+# This program parses the profile, and generates user-readable
+# output.
+#
+# Examples:
+#
+# % tools/jeprof "program" "profile"
+# Enters "interactive" mode
+#
+# % tools/jeprof --text "program" "profile"
+# Generates one line per procedure
+#
+# % tools/jeprof --gv "program" "profile"
+# Generates annotated call-graph and displays via "gv"
+#
+# % tools/jeprof --gv --focus=Mutex "program" "profile"
+# Restrict to code paths that involve an entry that matches "Mutex"
+#
+# % tools/jeprof --gv --focus=Mutex --ignore=string "program" "profile"
+# Restrict to code paths that involve an entry that matches "Mutex"
+# and does not match "string"
+#
+# % tools/jeprof --list=IBF_CheckDocid "program" "profile"
+# Generates disassembly listing of all routines with at least one
+# sample that match the --list=<regexp> pattern. The listing is
+# annotated with the flat and cumulative sample counts at each line.
+#
+# % tools/jeprof --disasm=IBF_CheckDocid "program" "profile"
+# Generates disassembly listing of all routines with at least one
+# sample that match the --disasm=<regexp> pattern. The listing is
+# annotated with the flat and cumulative sample counts at each PC value.
+#
+# TODO: Use color to indicate files?
+
+use strict;
+use warnings;
+use Getopt::Long;
+use Cwd;
+
+my $JEPROF_VERSION = "@jemalloc_version@";
+my $PPROF_VERSION = "2.0";
+
+# These are the object tools we use which can come from a
+# user-specified location using --tools, from the JEPROF_TOOLS
+# environment variable, or from the environment.
+my %obj_tool_map = (
+ "objdump" => "objdump",
+ "nm" => "nm",
+ "addr2line" => "addr2line",
+ "c++filt" => "c++filt",
+ ## ConfigureObjTools may add architecture-specific entries:
+ #"nm_pdb" => "nm-pdb", # for reading windows (PDB-format) executables
+ #"addr2line_pdb" => "addr2line-pdb", # ditto
+ #"otool" => "otool", # equivalent of objdump on OS X
+);
+# NOTE: these are lists, so you can put in commandline flags if you want.
+my @DOT = ("dot"); # leave non-absolute, since it may be in /usr/local
+my @GV = ("gv");
+my @EVINCE = ("evince"); # could also be xpdf or perhaps acroread
+my @KCACHEGRIND = ("kcachegrind");
+my @PS2PDF = ("ps2pdf");
+# These are used for dynamic profiles
+my @URL_FETCHER = ("curl", "-s", "--fail");
+
+# These are the web pages that servers need to support for dynamic profiles
+my $HEAP_PAGE = "/pprof/heap";
+my $PROFILE_PAGE = "/pprof/profile"; # must support cgi-param "?seconds=#"
+my $PMUPROFILE_PAGE = "/pprof/pmuprofile(?:\\?.*)?"; # must support cgi-param
+ # ?seconds=#&event=x&period=n
+my $GROWTH_PAGE = "/pprof/growth";
+my $CONTENTION_PAGE = "/pprof/contention";
+my $WALL_PAGE = "/pprof/wall(?:\\?.*)?"; # accepts options like namefilter
+my $FILTEREDPROFILE_PAGE = "/pprof/filteredprofile(?:\\?.*)?";
+my $CENSUSPROFILE_PAGE = "/pprof/censusprofile(?:\\?.*)?"; # must support cgi-param
+ # "?seconds=#",
+ # "?tags_regexp=#" and
+ # "?type=#".
+my $SYMBOL_PAGE = "/pprof/symbol"; # must support symbol lookup via POST
+my $PROGRAM_NAME_PAGE = "/pprof/cmdline";
+
+# These are the web pages that can be named on the command line.
+# All the alternatives must begin with /.
+my $PROFILES = "($HEAP_PAGE|$PROFILE_PAGE|$PMUPROFILE_PAGE|" .
+ "$GROWTH_PAGE|$CONTENTION_PAGE|$WALL_PAGE|" .
+ "$FILTEREDPROFILE_PAGE|$CENSUSPROFILE_PAGE)";
+
+# default binary name
+my $UNKNOWN_BINARY = "(unknown)";
+
+# There is a pervasive dependency on the length (in hex characters,
+# i.e., nibbles) of an address, distinguishing between 32-bit and
+# 64-bit profiles. To err on the safe size, default to 64-bit here:
+my $address_length = 16;
+
+my $dev_null = "/dev/null";
+if (! -e $dev_null && $^O =~ /MSWin/) { # $^O is the OS perl was built for
+ $dev_null = "nul";
+}
+
+# A list of paths to search for shared object files
+my @prefix_list = ();
+
+# Special routine name that should not have any symbols.
+# Used as separator to parse "addr2line -i" output.
+my $sep_symbol = '_fini';
+my $sep_address = undef;
+
+##### Argument parsing #####
+
+sub usage_string {
+ return <<EOF;
+Usage:
+jeprof [options] <program> <profiles>
+ <profiles> is a space separated list of profile names.
+jeprof [options] <symbolized-profiles>
+ <symbolized-profiles> is a list of profile files where each file contains
+ the necessary symbol mappings as well as profile data (likely generated
+ with --raw).
+jeprof [options] <profile>
+ <profile> is a remote form. Symbols are obtained from host:port$SYMBOL_PAGE
+
+ Each name can be:
+ /path/to/profile - a path to a profile file
+ host:port[/<service>] - a location of a service to get profile from
+
+ The /<service> can be $HEAP_PAGE, $PROFILE_PAGE, /pprof/pmuprofile,
+ $GROWTH_PAGE, $CONTENTION_PAGE, /pprof/wall,
+ $CENSUSPROFILE_PAGE, or /pprof/filteredprofile.
+ For instance:
+ jeprof http://myserver.com:80$HEAP_PAGE
+ If /<service> is omitted, the service defaults to $PROFILE_PAGE (cpu profiling).
+jeprof --symbols <program>
+ Maps addresses to symbol names. In this mode, stdin should be a
+ list of library mappings, in the same format as is found in the heap-
+ and cpu-profile files (this loosely matches that of /proc/self/maps
+ on linux), followed by a list of hex addresses to map, one per line.
+
+ For more help with querying remote servers, including how to add the
+ necessary server-side support code, see this filename (or one like it):
+
+ /usr/doc/gperftools-$PPROF_VERSION/pprof_remote_servers.html
+
+Options:
+ --cum Sort by cumulative data
+ --base=<base> Subtract <base> from <profile> before display
+ --interactive Run in interactive mode (interactive "help" gives help) [default]
+ --seconds=<n> Length of time for dynamic profiles [default=30 secs]
+ --add_lib=<file> Read additional symbols and line info from the given library
+ --lib_prefix=<dir> Comma separated list of library path prefixes
+
+Reporting Granularity:
+ --addresses Report at address level
+ --lines Report at source line level
+ --functions Report at function level [default]
+ --files Report at source file level
+
+Output type:
+ --text Generate text report
+ --callgrind Generate callgrind format to stdout
+ --gv Generate Postscript and display
+ --evince Generate PDF and display
+ --web Generate SVG and display
+ --list=<regexp> Generate source listing of matching routines
+ --disasm=<regexp> Generate disassembly of matching routines
+ --symbols Print demangled symbol names found at given addresses
+ --dot Generate DOT file to stdout
+ --ps Generate Postcript to stdout
+ --pdf Generate PDF to stdout
+ --svg Generate SVG to stdout
+ --gif Generate GIF to stdout
+ --raw Generate symbolized jeprof data (useful with remote fetch)
+
+Heap-Profile Options:
+ --inuse_space Display in-use (mega)bytes [default]
+ --inuse_objects Display in-use objects
+ --alloc_space Display allocated (mega)bytes
+ --alloc_objects Display allocated objects
+ --show_bytes Display space in bytes
+ --drop_negative Ignore negative differences
+
+Contention-profile options:
+ --total_delay Display total delay at each region [default]
+ --contentions Display number of delays at each region
+ --mean_delay Display mean delay at each region
+
+Call-graph Options:
+ --nodecount=<n> Show at most so many nodes [default=80]
+ --nodefraction=<f> Hide nodes below <f>*total [default=.005]
+ --edgefraction=<f> Hide edges below <f>*total [default=.001]
+ --maxdegree=<n> Max incoming/outgoing edges per node [default=8]
+ --focus=<regexp> Focus on backtraces with nodes matching <regexp>
+ --thread=<n> Show profile for thread <n>
+ --ignore=<regexp> Ignore backtraces with nodes matching <regexp>
+ --scale=<n> Set GV scaling [default=0]
+ --heapcheck Make nodes with non-0 object counts
+ (i.e. direct leak generators) more visible
+ --retain=<regexp> Retain only nodes that match <regexp>
+ --exclude=<regexp> Exclude all nodes that match <regexp>
+
+Miscellaneous:
+ --tools=<prefix or binary:fullpath>[,...] \$PATH for object tool pathnames
+ --test Run unit tests
+ --help This message
+ --version Version information
+
+Environment Variables:
+ JEPROF_TMPDIR Profiles directory. Defaults to \$HOME/jeprof
+ JEPROF_TOOLS Prefix for object tools pathnames
+
+Examples:
+
+jeprof /bin/ls ls.prof
+ Enters "interactive" mode
+jeprof --text /bin/ls ls.prof
+ Outputs one line per procedure
+jeprof --web /bin/ls ls.prof
+ Displays annotated call-graph in web browser
+jeprof --gv /bin/ls ls.prof
+ Displays annotated call-graph via 'gv'
+jeprof --gv --focus=Mutex /bin/ls ls.prof
+ Restricts to code paths including a .*Mutex.* entry
+jeprof --gv --focus=Mutex --ignore=string /bin/ls ls.prof
+ Code paths including Mutex but not string
+jeprof --list=getdir /bin/ls ls.prof
+ (Per-line) annotated source listing for getdir()
+jeprof --disasm=getdir /bin/ls ls.prof
+ (Per-PC) annotated disassembly for getdir()
+
+jeprof http://localhost:1234/
+ Enters "interactive" mode
+jeprof --text localhost:1234
+ Outputs one line per procedure for localhost:1234
+jeprof --raw localhost:1234 > ./local.raw
+jeprof --text ./local.raw
+ Fetches a remote profile for later analysis and then
+ analyzes it in text mode.
+EOF
+}
+
+sub version_string {
+ return <<EOF
+jeprof (part of jemalloc $JEPROF_VERSION)
+based on pprof (part of gperftools $PPROF_VERSION)
+
+Copyright 1998-2007 Google Inc.
+
+This is BSD licensed software; see the source for copying conditions
+and license information.
+There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A
+PARTICULAR PURPOSE.
+EOF
+}
+
+sub usage {
+ my $msg = shift;
+ print STDERR "$msg\n\n";
+ print STDERR usage_string();
+ print STDERR "\nFATAL ERROR: $msg\n"; # just as a reminder
+ exit(1);
+}
+
+sub Init() {
+ # Setup tmp-file name and handler to clean it up.
+ # We do this in the very beginning so that we can use
+ # error() and cleanup() function anytime here after.
+ $main::tmpfile_sym = "/tmp/jeprof$$.sym";
+ $main::tmpfile_ps = "/tmp/jeprof$$";
+ $main::next_tmpfile = 0;
+ $SIG{'INT'} = \&sighandler;
+
+ # Cache from filename/linenumber to source code
+ $main::source_cache = ();
+
+ $main::opt_help = 0;
+ $main::opt_version = 0;
+
+ $main::opt_cum = 0;
+ $main::opt_base = '';
+ $main::opt_addresses = 0;
+ $main::opt_lines = 0;
+ $main::opt_functions = 0;
+ $main::opt_files = 0;
+ $main::opt_lib_prefix = "";
+
+ $main::opt_text = 0;
+ $main::opt_callgrind = 0;
+ $main::opt_list = "";
+ $main::opt_disasm = "";
+ $main::opt_symbols = 0;
+ $main::opt_gv = 0;
+ $main::opt_evince = 0;
+ $main::opt_web = 0;
+ $main::opt_dot = 0;
+ $main::opt_ps = 0;
+ $main::opt_pdf = 0;
+ $main::opt_gif = 0;
+ $main::opt_svg = 0;
+ $main::opt_raw = 0;
+
+ $main::opt_nodecount = 80;
+ $main::opt_nodefraction = 0.005;
+ $main::opt_edgefraction = 0.001;
+ $main::opt_maxdegree = 8;
+ $main::opt_focus = '';
+ $main::opt_thread = undef;
+ $main::opt_ignore = '';
+ $main::opt_scale = 0;
+ $main::opt_heapcheck = 0;
+ $main::opt_retain = '';
+ $main::opt_exclude = '';
+ $main::opt_seconds = 30;
+ $main::opt_lib = "";
+
+ $main::opt_inuse_space = 0;
+ $main::opt_inuse_objects = 0;
+ $main::opt_alloc_space = 0;
+ $main::opt_alloc_objects = 0;
+ $main::opt_show_bytes = 0;
+ $main::opt_drop_negative = 0;
+ $main::opt_interactive = 0;
+
+ $main::opt_total_delay = 0;
+ $main::opt_contentions = 0;
+ $main::opt_mean_delay = 0;
+
+ $main::opt_tools = "";
+ $main::opt_debug = 0;
+ $main::opt_test = 0;
+
+ # These are undocumented flags used only by unittests.
+ $main::opt_test_stride = 0;
+
+ # Are we using $SYMBOL_PAGE?
+ $main::use_symbol_page = 0;
+
+ # Files returned by TempName.
+ %main::tempnames = ();
+
+ # Type of profile we are dealing with
+ # Supported types:
+ # cpu
+ # heap
+ # growth
+ # contention
+ $main::profile_type = ''; # Empty type means "unknown"
+
+ GetOptions("help!" => \$main::opt_help,
+ "version!" => \$main::opt_version,
+ "cum!" => \$main::opt_cum,
+ "base=s" => \$main::opt_base,
+ "seconds=i" => \$main::opt_seconds,
+ "add_lib=s" => \$main::opt_lib,
+ "lib_prefix=s" => \$main::opt_lib_prefix,
+ "functions!" => \$main::opt_functions,
+ "lines!" => \$main::opt_lines,
+ "addresses!" => \$main::opt_addresses,
+ "files!" => \$main::opt_files,
+ "text!" => \$main::opt_text,
+ "callgrind!" => \$main::opt_callgrind,
+ "list=s" => \$main::opt_list,
+ "disasm=s" => \$main::opt_disasm,
+ "symbols!" => \$main::opt_symbols,
+ "gv!" => \$main::opt_gv,
+ "evince!" => \$main::opt_evince,
+ "web!" => \$main::opt_web,
+ "dot!" => \$main::opt_dot,
+ "ps!" => \$main::opt_ps,
+ "pdf!" => \$main::opt_pdf,
+ "svg!" => \$main::opt_svg,
+ "gif!" => \$main::opt_gif,
+ "raw!" => \$main::opt_raw,
+ "interactive!" => \$main::opt_interactive,
+ "nodecount=i" => \$main::opt_nodecount,
+ "nodefraction=f" => \$main::opt_nodefraction,
+ "edgefraction=f" => \$main::opt_edgefraction,
+ "maxdegree=i" => \$main::opt_maxdegree,
+ "focus=s" => \$main::opt_focus,
+ "thread=s" => \$main::opt_thread,
+ "ignore=s" => \$main::opt_ignore,
+ "scale=i" => \$main::opt_scale,
+ "heapcheck" => \$main::opt_heapcheck,
+ "retain=s" => \$main::opt_retain,
+ "exclude=s" => \$main::opt_exclude,
+ "inuse_space!" => \$main::opt_inuse_space,
+ "inuse_objects!" => \$main::opt_inuse_objects,
+ "alloc_space!" => \$main::opt_alloc_space,
+ "alloc_objects!" => \$main::opt_alloc_objects,
+ "show_bytes!" => \$main::opt_show_bytes,
+ "drop_negative!" => \$main::opt_drop_negative,
+ "total_delay!" => \$main::opt_total_delay,
+ "contentions!" => \$main::opt_contentions,
+ "mean_delay!" => \$main::opt_mean_delay,
+ "tools=s" => \$main::opt_tools,
+ "test!" => \$main::opt_test,
+ "debug!" => \$main::opt_debug,
+ # Undocumented flags used only by unittests:
+ "test_stride=i" => \$main::opt_test_stride,
+ ) || usage("Invalid option(s)");
+
+ # Deal with the standard --help and --version
+ if ($main::opt_help) {
+ print usage_string();
+ exit(0);
+ }
+
+ if ($main::opt_version) {
+ print version_string();
+ exit(0);
+ }
+
+ # Disassembly/listing/symbols mode requires address-level info
+ if ($main::opt_disasm || $main::opt_list || $main::opt_symbols) {
+ $main::opt_functions = 0;
+ $main::opt_lines = 0;
+ $main::opt_addresses = 1;
+ $main::opt_files = 0;
+ }
+
+ # Check heap-profiling flags
+ if ($main::opt_inuse_space +
+ $main::opt_inuse_objects +
+ $main::opt_alloc_space +
+ $main::opt_alloc_objects > 1) {
+ usage("Specify at most on of --inuse/--alloc options");
+ }
+
+ # Check output granularities
+ my $grains =
+ $main::opt_functions +
+ $main::opt_lines +
+ $main::opt_addresses +
+ $main::opt_files +
+ 0;
+ if ($grains > 1) {
+ usage("Only specify one output granularity option");
+ }
+ if ($grains == 0) {
+ $main::opt_functions = 1;
+ }
+
+ # Check output modes
+ my $modes =
+ $main::opt_text +
+ $main::opt_callgrind +
+ ($main::opt_list eq '' ? 0 : 1) +
+ ($main::opt_disasm eq '' ? 0 : 1) +
+ ($main::opt_symbols == 0 ? 0 : 1) +
+ $main::opt_gv +
+ $main::opt_evince +
+ $main::opt_web +
+ $main::opt_dot +
+ $main::opt_ps +
+ $main::opt_pdf +
+ $main::opt_svg +
+ $main::opt_gif +
+ $main::opt_raw +
+ $main::opt_interactive +
+ 0;
+ if ($modes > 1) {
+ usage("Only specify one output mode");
+ }
+ if ($modes == 0) {
+ if (-t STDOUT) { # If STDOUT is a tty, activate interactive mode
+ $main::opt_interactive = 1;
+ } else {
+ $main::opt_text = 1;
+ }
+ }
+
+ if ($main::opt_test) {
+ RunUnitTests();
+ # Should not return
+ exit(1);
+ }
+
+ # Binary name and profile arguments list
+ $main::prog = "";
+ @main::pfile_args = ();
+
+ # Remote profiling without a binary (using $SYMBOL_PAGE instead)
+ if (@ARGV > 0) {
+ if (IsProfileURL($ARGV[0])) {
+ $main::use_symbol_page = 1;
+ } elsif (IsSymbolizedProfileFile($ARGV[0])) {
+ $main::use_symbolized_profile = 1;
+ $main::prog = $UNKNOWN_BINARY; # will be set later from the profile file
+ }
+ }
+
+ if ($main::use_symbol_page || $main::use_symbolized_profile) {
+ # We don't need a binary!
+ my %disabled = ('--lines' => $main::opt_lines,
+ '--disasm' => $main::opt_disasm);
+ for my $option (keys %disabled) {
+ usage("$option cannot be used without a binary") if $disabled{$option};
+ }
+ # Set $main::prog later...
+ scalar(@ARGV) || usage("Did not specify profile file");
+ } elsif ($main::opt_symbols) {
+ # --symbols needs a binary-name (to run nm on, etc) but not profiles
+ $main::prog = shift(@ARGV) || usage("Did not specify program");
+ } else {
+ $main::prog = shift(@ARGV) || usage("Did not specify program");
+ scalar(@ARGV) || usage("Did not specify profile file");
+ }
+
+ # Parse profile file/location arguments
+ foreach my $farg (@ARGV) {
+ if ($farg =~ m/(.*)\@([0-9]+)(|\/.*)$/ ) {
+ my $machine = $1;
+ my $num_machines = $2;
+ my $path = $3;
+ for (my $i = 0; $i < $num_machines; $i++) {
+ unshift(@main::pfile_args, "$i.$machine$path");
+ }
+ } else {
+ unshift(@main::pfile_args, $farg);
+ }
+ }
+
+ if ($main::use_symbol_page) {
+ unless (IsProfileURL($main::pfile_args[0])) {
+ error("The first profile should be a remote form to use $SYMBOL_PAGE\n");
+ }
+ CheckSymbolPage();
+ $main::prog = FetchProgramName();
+ } elsif (!$main::use_symbolized_profile) { # may not need objtools!
+ ConfigureObjTools($main::prog)
+ }
+
+ # Break the opt_lib_prefix into the prefix_list array
+ @prefix_list = split (',', $main::opt_lib_prefix);
+
+ # Remove trailing / from the prefixes, in the list to prevent
+ # searching things like /my/path//lib/mylib.so
+ foreach (@prefix_list) {
+ s|/+$||;
+ }
+}
+
+sub FilterAndPrint {
+ my ($profile, $symbols, $libs, $thread) = @_;
+
+ # Get total data in profile
+ my $total = TotalProfile($profile);
+
+ # Remove uniniteresting stack items
+ $profile = RemoveUninterestingFrames($symbols, $profile);
+
+ # Focus?
+ if ($main::opt_focus ne '') {
+ $profile = FocusProfile($symbols, $profile, $main::opt_focus);
+ }
+
+ # Ignore?
+ if ($main::opt_ignore ne '') {
+ $profile = IgnoreProfile($symbols, $profile, $main::opt_ignore);
+ }
+
+ my $calls = ExtractCalls($symbols, $profile);
+
+ # Reduce profiles to required output granularity, and also clean
+ # each stack trace so a given entry exists at most once.
+ my $reduced = ReduceProfile($symbols, $profile);
+
+ # Get derived profiles
+ my $flat = FlatProfile($reduced);
+ my $cumulative = CumulativeProfile($reduced);
+
+ # Print
+ if (!$main::opt_interactive) {
+ if ($main::opt_disasm) {
+ PrintDisassembly($libs, $flat, $cumulative, $main::opt_disasm);
+ } elsif ($main::opt_list) {
+ PrintListing($total, $libs, $flat, $cumulative, $main::opt_list, 0);
+ } elsif ($main::opt_text) {
+ # Make sure the output is empty when have nothing to report
+ # (only matters when --heapcheck is given but we must be
+ # compatible with old branches that did not pass --heapcheck always):
+ if ($total != 0) {
+ printf("Total%s: %s %s\n",
+ (defined($thread) ? " (t$thread)" : ""),
+ Unparse($total), Units());
+ }
+ PrintText($symbols, $flat, $cumulative, -1);
+ } elsif ($main::opt_raw) {
+ PrintSymbolizedProfile($symbols, $profile, $main::prog);
+ } elsif ($main::opt_callgrind) {
+ PrintCallgrind($calls);
+ } else {
+ if (PrintDot($main::prog, $symbols, $profile, $flat, $cumulative, $total)) {
+ if ($main::opt_gv) {
+ RunGV(TempName($main::next_tmpfile, "ps"), "");
+ } elsif ($main::opt_evince) {
+ RunEvince(TempName($main::next_tmpfile, "pdf"), "");
+ } elsif ($main::opt_web) {
+ my $tmp = TempName($main::next_tmpfile, "svg");
+ RunWeb($tmp);
+ # The command we run might hand the file name off
+ # to an already running browser instance and then exit.
+ # Normally, we'd remove $tmp on exit (right now),
+ # but fork a child to remove $tmp a little later, so that the
+ # browser has time to load it first.
+ delete $main::tempnames{$tmp};
+ if (fork() == 0) {
+ sleep 5;
+ unlink($tmp);
+ exit(0);
+ }
+ }
+ } else {
+ cleanup();
+ exit(1);
+ }
+ }
+ } else {
+ InteractiveMode($profile, $symbols, $libs, $total);
+ }
+}
+
+sub Main() {
+ Init();
+ $main::collected_profile = undef;
+ @main::profile_files = ();
+ $main::op_time = time();
+
+ # Printing symbols is special and requires a lot less info that most.
+ if ($main::opt_symbols) {
+ PrintSymbols(*STDIN); # Get /proc/maps and symbols output from stdin
+ return;
+ }
+
+ # Fetch all profile data
+ FetchDynamicProfiles();
+
+ # this will hold symbols that we read from the profile files
+ my $symbol_map = {};
+
+ # Read one profile, pick the last item on the list
+ my $data = ReadProfile($main::prog, pop(@main::profile_files));
+ my $profile = $data->{profile};
+ my $pcs = $data->{pcs};
+ my $libs = $data->{libs}; # Info about main program and shared libraries
+ $symbol_map = MergeSymbols($symbol_map, $data->{symbols});
+
+ # Add additional profiles, if available.
+ if (scalar(@main::profile_files) > 0) {
+ foreach my $pname (@main::profile_files) {
+ my $data2 = ReadProfile($main::prog, $pname);
+ $profile = AddProfile($profile, $data2->{profile});
+ $pcs = AddPcs($pcs, $data2->{pcs});
+ $symbol_map = MergeSymbols($symbol_map, $data2->{symbols});
+ }
+ }
+
+ # Subtract base from profile, if specified
+ if ($main::opt_base ne '') {
+ my $base = ReadProfile($main::prog, $main::opt_base);
+ $profile = SubtractProfile($profile, $base->{profile});
+ $pcs = AddPcs($pcs, $base->{pcs});
+ $symbol_map = MergeSymbols($symbol_map, $base->{symbols});
+ }
+
+ # Collect symbols
+ my $symbols;
+ if ($main::use_symbolized_profile) {
+ $symbols = FetchSymbols($pcs, $symbol_map);
+ } elsif ($main::use_symbol_page) {
+ $symbols = FetchSymbols($pcs);
+ } else {
+ # TODO(csilvers): $libs uses the /proc/self/maps data from profile1,
+ # which may differ from the data from subsequent profiles, especially
+ # if they were run on different machines. Use appropriate libs for
+ # each pc somehow.
+ $symbols = ExtractSymbols($libs, $pcs);
+ }
+
+ if (!defined($main::opt_thread)) {
+ FilterAndPrint($profile, $symbols, $libs);
+ }
+ if (defined($data->{threads})) {
+ foreach my $thread (sort { $a <=> $b } keys(%{$data->{threads}})) {
+ if (defined($main::opt_thread) &&
+ ($main::opt_thread eq '*' || $main::opt_thread == $thread)) {
+ my $thread_profile = $data->{threads}{$thread};
+ FilterAndPrint($thread_profile, $symbols, $libs, $thread);
+ }
+ }
+ }
+
+ cleanup();
+ exit(0);
+}
+
+##### Entry Point #####
+
+Main();
+
+# Temporary code to detect if we're running on a Goobuntu system.
+# These systems don't have the right stuff installed for the special
+# Readline libraries to work, so as a temporary workaround, we default
+# to using the normal stdio code, rather than the fancier readline-based
+# code
+sub ReadlineMightFail {
+ if (-e '/lib/libtermcap.so.2') {
+ return 0; # libtermcap exists, so readline should be okay
+ } else {
+ return 1;
+ }
+}
+
+sub RunGV {
+ my $fname = shift;
+ my $bg = shift; # "" or " &" if we should run in background
+ if (!system(ShellEscape(@GV, "--version") . " >$dev_null 2>&1")) {
+ # Options using double dash are supported by this gv version.
+ # Also, turn on noantialias to better handle bug in gv for
+ # postscript files with large dimensions.
+ # TODO: Maybe we should not pass the --noantialias flag
+ # if the gv version is known to work properly without the flag.
+ system(ShellEscape(@GV, "--scale=$main::opt_scale", "--noantialias", $fname)
+ . $bg);
+ } else {
+ # Old gv version - only supports options that use single dash.
+ print STDERR ShellEscape(@GV, "-scale", $main::opt_scale) . "\n";
+ system(ShellEscape(@GV, "-scale", "$main::opt_scale", $fname) . $bg);
+ }
+}
+
+sub RunEvince {
+ my $fname = shift;
+ my $bg = shift; # "" or " &" if we should run in background
+ system(ShellEscape(@EVINCE, $fname) . $bg);
+}
+
+sub RunWeb {
+ my $fname = shift;
+ print STDERR "Loading web page file:///$fname\n";
+
+ if (`uname` =~ /Darwin/) {
+ # OS X: open will use standard preference for SVG files.
+ system("/usr/bin/open", $fname);
+ return;
+ }
+
+ # Some kind of Unix; try generic symlinks, then specific browsers.
+ # (Stop once we find one.)
+ # Works best if the browser is already running.
+ my @alt = (
+ "/etc/alternatives/gnome-www-browser",
+ "/etc/alternatives/x-www-browser",
+ "google-chrome",
+ "firefox",
+ );
+ foreach my $b (@alt) {
+ if (system($b, $fname) == 0) {
+ return;
+ }
+ }
+
+ print STDERR "Could not load web browser.\n";
+}
+
+sub RunKcachegrind {
+ my $fname = shift;
+ my $bg = shift; # "" or " &" if we should run in background
+ print STDERR "Starting '@KCACHEGRIND " . $fname . $bg . "'\n";
+ system(ShellEscape(@KCACHEGRIND, $fname) . $bg);
+}
+
+
+##### Interactive helper routines #####
+
+sub InteractiveMode {
+ $| = 1; # Make output unbuffered for interactive mode
+ my ($orig_profile, $symbols, $libs, $total) = @_;
+
+ print STDERR "Welcome to jeprof! For help, type 'help'.\n";
+
+ # Use ReadLine if it's installed and input comes from a console.
+ if ( -t STDIN &&
+ !ReadlineMightFail() &&
+ defined(eval {require Term::ReadLine}) ) {
+ my $term = new Term::ReadLine 'jeprof';
+ while ( defined ($_ = $term->readline('(jeprof) '))) {
+ $term->addhistory($_) if /\S/;
+ if (!InteractiveCommand($orig_profile, $symbols, $libs, $total, $_)) {
+ last; # exit when we get an interactive command to quit
+ }
+ }
+ } else { # don't have readline
+ while (1) {
+ print STDERR "(jeprof) ";
+ $_ = <STDIN>;
+ last if ! defined $_ ;
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+
+ # Save some flags that might be reset by InteractiveCommand()
+ my $save_opt_lines = $main::opt_lines;
+
+ if (!InteractiveCommand($orig_profile, $symbols, $libs, $total, $_)) {
+ last; # exit when we get an interactive command to quit
+ }
+
+ # Restore flags
+ $main::opt_lines = $save_opt_lines;
+ }
+ }
+}
+
+# Takes two args: orig profile, and command to run.
+# Returns 1 if we should keep going, or 0 if we were asked to quit
+sub InteractiveCommand {
+ my($orig_profile, $symbols, $libs, $total, $command) = @_;
+ $_ = $command; # just to make future m//'s easier
+ if (!defined($_)) {
+ print STDERR "\n";
+ return 0;
+ }
+ if (m/^\s*quit/) {
+ return 0;
+ }
+ if (m/^\s*help/) {
+ InteractiveHelpMessage();
+ return 1;
+ }
+ # Clear all the mode options -- mode is controlled by "$command"
+ $main::opt_text = 0;
+ $main::opt_callgrind = 0;
+ $main::opt_disasm = 0;
+ $main::opt_list = 0;
+ $main::opt_gv = 0;
+ $main::opt_evince = 0;
+ $main::opt_cum = 0;
+
+ if (m/^\s*(text|top)(\d*)\s*(.*)/) {
+ $main::opt_text = 1;
+
+ my $line_limit = ($2 ne "") ? int($2) : 10;
+
+ my $routine;
+ my $ignore;
+ ($routine, $ignore) = ParseInteractiveArgs($3);
+
+ my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore);
+ my $reduced = ReduceProfile($symbols, $profile);
+
+ # Get derived profiles
+ my $flat = FlatProfile($reduced);
+ my $cumulative = CumulativeProfile($reduced);
+
+ PrintText($symbols, $flat, $cumulative, $line_limit);
+ return 1;
+ }
+ if (m/^\s*callgrind\s*([^ \n]*)/) {
+ $main::opt_callgrind = 1;
+
+ # Get derived profiles
+ my $calls = ExtractCalls($symbols, $orig_profile);
+ my $filename = $1;
+ if ( $1 eq '' ) {
+ $filename = TempName($main::next_tmpfile, "callgrind");
+ }
+ PrintCallgrind($calls, $filename);
+ if ( $1 eq '' ) {
+ RunKcachegrind($filename, " & ");
+ $main::next_tmpfile++;
+ }
+
+ return 1;
+ }
+ if (m/^\s*(web)?list\s*(.+)/) {
+ my $html = (defined($1) && ($1 eq "web"));
+ $main::opt_list = 1;
+
+ my $routine;
+ my $ignore;
+ ($routine, $ignore) = ParseInteractiveArgs($2);
+
+ my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore);
+ my $reduced = ReduceProfile($symbols, $profile);
+
+ # Get derived profiles
+ my $flat = FlatProfile($reduced);
+ my $cumulative = CumulativeProfile($reduced);
+
+ PrintListing($total, $libs, $flat, $cumulative, $routine, $html);
+ return 1;
+ }
+ if (m/^\s*disasm\s*(.+)/) {
+ $main::opt_disasm = 1;
+
+ my $routine;
+ my $ignore;
+ ($routine, $ignore) = ParseInteractiveArgs($1);
+
+ # Process current profile to account for various settings
+ my $profile = ProcessProfile($total, $orig_profile, $symbols, "", $ignore);
+ my $reduced = ReduceProfile($symbols, $profile);
+
+ # Get derived profiles
+ my $flat = FlatProfile($reduced);
+ my $cumulative = CumulativeProfile($reduced);
+
+ PrintDisassembly($libs, $flat, $cumulative, $routine);
+ return 1;
+ }
+ if (m/^\s*(gv|web|evince)\s*(.*)/) {
+ $main::opt_gv = 0;
+ $main::opt_evince = 0;
+ $main::opt_web = 0;
+ if ($1 eq "gv") {
+ $main::opt_gv = 1;
+ } elsif ($1 eq "evince") {
+ $main::opt_evince = 1;
+ } elsif ($1 eq "web") {
+ $main::opt_web = 1;
+ }
+
+ my $focus;
+ my $ignore;
+ ($focus, $ignore) = ParseInteractiveArgs($2);
+
+ # Process current profile to account for various settings
+ my $profile = ProcessProfile($total, $orig_profile, $symbols,
+ $focus, $ignore);
+ my $reduced = ReduceProfile($symbols, $profile);
+
+ # Get derived profiles
+ my $flat = FlatProfile($reduced);
+ my $cumulative = CumulativeProfile($reduced);
+
+ if (PrintDot($main::prog, $symbols, $profile, $flat, $cumulative, $total)) {
+ if ($main::opt_gv) {
+ RunGV(TempName($main::next_tmpfile, "ps"), " &");
+ } elsif ($main::opt_evince) {
+ RunEvince(TempName($main::next_tmpfile, "pdf"), " &");
+ } elsif ($main::opt_web) {
+ RunWeb(TempName($main::next_tmpfile, "svg"));
+ }
+ $main::next_tmpfile++;
+ }
+ return 1;
+ }
+ if (m/^\s*$/) {
+ return 1;
+ }
+ print STDERR "Unknown command: try 'help'.\n";
+ return 1;
+}
+
+
+sub ProcessProfile {
+ my $total_count = shift;
+ my $orig_profile = shift;
+ my $symbols = shift;
+ my $focus = shift;
+ my $ignore = shift;
+
+ # Process current profile to account for various settings
+ my $profile = $orig_profile;
+ printf("Total: %s %s\n", Unparse($total_count), Units());
+ if ($focus ne '') {
+ $profile = FocusProfile($symbols, $profile, $focus);
+ my $focus_count = TotalProfile($profile);
+ printf("After focusing on '%s': %s %s of %s (%0.1f%%)\n",
+ $focus,
+ Unparse($focus_count), Units(),
+ Unparse($total_count), ($focus_count*100.0) / $total_count);
+ }
+ if ($ignore ne '') {
+ $profile = IgnoreProfile($symbols, $profile, $ignore);
+ my $ignore_count = TotalProfile($profile);
+ printf("After ignoring '%s': %s %s of %s (%0.1f%%)\n",
+ $ignore,
+ Unparse($ignore_count), Units(),
+ Unparse($total_count),
+ ($ignore_count*100.0) / $total_count);
+ }
+
+ return $profile;
+}
+
+sub InteractiveHelpMessage {
+ print STDERR <<ENDOFHELP;
+Interactive jeprof mode
+
+Commands:
+ gv
+ gv [focus] [-ignore1] [-ignore2]
+ Show graphical hierarchical display of current profile. Without
+ any arguments, shows all samples in the profile. With the optional
+ "focus" argument, restricts the samples shown to just those where
+ the "focus" regular expression matches a routine name on the stack
+ trace.
+
+ web
+ web [focus] [-ignore1] [-ignore2]
+ Like GV, but displays profile in your web browser instead of using
+ Ghostview. Works best if your web browser is already running.
+ To change the browser that gets used:
+ On Linux, set the /etc/alternatives/gnome-www-browser symlink.
+ On OS X, change the Finder association for SVG files.
+
+ list [routine_regexp] [-ignore1] [-ignore2]
+ Show source listing of routines whose names match "routine_regexp"
+
+ weblist [routine_regexp] [-ignore1] [-ignore2]
+ Displays a source listing of routines whose names match "routine_regexp"
+ in a web browser. You can click on source lines to view the
+ corresponding disassembly.
+
+ top [--cum] [-ignore1] [-ignore2]
+ top20 [--cum] [-ignore1] [-ignore2]
+ top37 [--cum] [-ignore1] [-ignore2]
+ Show top lines ordered by flat profile count, or cumulative count
+ if --cum is specified. If a number is present after 'top', the
+ top K routines will be shown (defaults to showing the top 10)
+
+ disasm [routine_regexp] [-ignore1] [-ignore2]
+ Show disassembly of routines whose names match "routine_regexp",
+ annotated with sample counts.
+
+ callgrind
+ callgrind [filename]
+ Generates callgrind file. If no filename is given, kcachegrind is called.
+
+ help - This listing
+ quit or ^D - End jeprof
+
+For commands that accept optional -ignore tags, samples where any routine in
+the stack trace matches the regular expression in any of the -ignore
+parameters will be ignored.
+
+Further pprof details are available at this location (or one similar):
+
+ /usr/doc/gperftools-$PPROF_VERSION/cpu_profiler.html
+ /usr/doc/gperftools-$PPROF_VERSION/heap_profiler.html
+
+ENDOFHELP
+}
+sub ParseInteractiveArgs {
+ my $args = shift;
+ my $focus = "";
+ my $ignore = "";
+ my @x = split(/ +/, $args);
+ foreach $a (@x) {
+ if ($a =~ m/^(--|-)lines$/) {
+ $main::opt_lines = 1;
+ } elsif ($a =~ m/^(--|-)cum$/) {
+ $main::opt_cum = 1;
+ } elsif ($a =~ m/^-(.*)/) {
+ $ignore .= (($ignore ne "") ? "|" : "" ) . $1;
+ } else {
+ $focus .= (($focus ne "") ? "|" : "" ) . $a;
+ }
+ }
+ if ($ignore ne "") {
+ print STDERR "Ignoring samples in call stacks that match '$ignore'\n";
+ }
+ return ($focus, $ignore);
+}
+
+##### Output code #####
+
+sub TempName {
+ my $fnum = shift;
+ my $ext = shift;
+ my $file = "$main::tmpfile_ps.$fnum.$ext";
+ $main::tempnames{$file} = 1;
+ return $file;
+}
+
+# Print profile data in packed binary format (64-bit) to standard out
+sub PrintProfileData {
+ my $profile = shift;
+
+ # print header (64-bit style)
+ # (zero) (header-size) (version) (sample-period) (zero)
+ print pack('L*', 0, 0, 3, 0, 0, 0, 1, 0, 0, 0);
+
+ foreach my $k (keys(%{$profile})) {
+ my $count = $profile->{$k};
+ my @addrs = split(/\n/, $k);
+ if ($#addrs >= 0) {
+ my $depth = $#addrs + 1;
+ # int(foo / 2**32) is the only reliable way to get rid of bottom
+ # 32 bits on both 32- and 64-bit systems.
+ print pack('L*', $count & 0xFFFFFFFF, int($count / 2**32));
+ print pack('L*', $depth & 0xFFFFFFFF, int($depth / 2**32));
+
+ foreach my $full_addr (@addrs) {
+ my $addr = $full_addr;
+ $addr =~ s/0x0*//; # strip off leading 0x, zeroes
+ if (length($addr) > 16) {
+ print STDERR "Invalid address in profile: $full_addr\n";
+ next;
+ }
+ my $low_addr = substr($addr, -8); # get last 8 hex chars
+ my $high_addr = substr($addr, -16, 8); # get up to 8 more hex chars
+ print pack('L*', hex('0x' . $low_addr), hex('0x' . $high_addr));
+ }
+ }
+ }
+}
+
+# Print symbols and profile data
+sub PrintSymbolizedProfile {
+ my $symbols = shift;
+ my $profile = shift;
+ my $prog = shift;
+
+ $SYMBOL_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ my $symbol_marker = $&;
+
+ print '--- ', $symbol_marker, "\n";
+ if (defined($prog)) {
+ print 'binary=', $prog, "\n";
+ }
+ while (my ($pc, $name) = each(%{$symbols})) {
+ my $sep = ' ';
+ print '0x', $pc;
+ # We have a list of function names, which include the inlined
+ # calls. They are separated (and terminated) by --, which is
+ # illegal in function names.
+ for (my $j = 2; $j <= $#{$name}; $j += 3) {
+ print $sep, $name->[$j];
+ $sep = '--';
+ }
+ print "\n";
+ }
+ print '---', "\n";
+
+ my $profile_marker;
+ if ($main::profile_type eq 'heap') {
+ $HEAP_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ $profile_marker = $&;
+ } elsif ($main::profile_type eq 'growth') {
+ $GROWTH_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ $profile_marker = $&;
+ } elsif ($main::profile_type eq 'contention') {
+ $CONTENTION_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ $profile_marker = $&;
+ } else { # elsif ($main::profile_type eq 'cpu')
+ $PROFILE_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ $profile_marker = $&;
+ }
+
+ print '--- ', $profile_marker, "\n";
+ if (defined($main::collected_profile)) {
+ # if used with remote fetch, simply dump the collected profile to output.
+ open(SRC, "<$main::collected_profile");
+ while (<SRC>) {
+ print $_;
+ }
+ close(SRC);
+ } else {
+ # --raw/http: For everything to work correctly for non-remote profiles, we
+ # would need to extend PrintProfileData() to handle all possible profile
+ # types, re-enable the code that is currently disabled in ReadCPUProfile()
+ # and FixCallerAddresses(), and remove the remote profile dumping code in
+ # the block above.
+ die "--raw/http: jeprof can only dump remote profiles for --raw\n";
+ # dump a cpu-format profile to standard out
+ PrintProfileData($profile);
+ }
+}
+
+# Print text output
+sub PrintText {
+ my $symbols = shift;
+ my $flat = shift;
+ my $cumulative = shift;
+ my $line_limit = shift;
+
+ my $total = TotalProfile($flat);
+
+ # Which profile to sort by?
+ my $s = $main::opt_cum ? $cumulative : $flat;
+
+ my $running_sum = 0;
+ my $lines = 0;
+ foreach my $k (sort { GetEntry($s, $b) <=> GetEntry($s, $a) || $a cmp $b }
+ keys(%{$cumulative})) {
+ my $f = GetEntry($flat, $k);
+ my $c = GetEntry($cumulative, $k);
+ $running_sum += $f;
+
+ my $sym = $k;
+ if (exists($symbols->{$k})) {
+ $sym = $symbols->{$k}->[0] . " " . $symbols->{$k}->[1];
+ if ($main::opt_addresses) {
+ $sym = $k . " " . $sym;
+ }
+ }
+
+ if ($f != 0 || $c != 0) {
+ printf("%8s %6s %6s %8s %6s %s\n",
+ Unparse($f),
+ Percent($f, $total),
+ Percent($running_sum, $total),
+ Unparse($c),
+ Percent($c, $total),
+ $sym);
+ }
+ $lines++;
+ last if ($line_limit >= 0 && $lines >= $line_limit);
+ }
+}
+
+# Callgrind format has a compression for repeated function and file
+# names. You show the name the first time, and just use its number
+# subsequently. This can cut down the file to about a third or a
+# quarter of its uncompressed size. $key and $val are the key/value
+# pair that would normally be printed by callgrind; $map is a map from
+# value to number.
+sub CompressedCGName {
+ my($key, $val, $map) = @_;
+ my $idx = $map->{$val};
+ # For very short keys, providing an index hurts rather than helps.
+ if (length($val) <= 3) {
+ return "$key=$val\n";
+ } elsif (defined($idx)) {
+ return "$key=($idx)\n";
+ } else {
+ # scalar(keys $map) gives the number of items in the map.
+ $idx = scalar(keys(%{$map})) + 1;
+ $map->{$val} = $idx;
+ return "$key=($idx) $val\n";
+ }
+}
+
+# Print the call graph in a way that's suiteable for callgrind.
+sub PrintCallgrind {
+ my $calls = shift;
+ my $filename;
+ my %filename_to_index_map;
+ my %fnname_to_index_map;
+
+ if ($main::opt_interactive) {
+ $filename = shift;
+ print STDERR "Writing callgrind file to '$filename'.\n"
+ } else {
+ $filename = "&STDOUT";
+ }
+ open(CG, ">$filename");
+ printf CG ("events: Hits\n\n");
+ foreach my $call ( map { $_->[0] }
+ sort { $a->[1] cmp $b ->[1] ||
+ $a->[2] <=> $b->[2] }
+ map { /([^:]+):(\d+):([^ ]+)( -> ([^:]+):(\d+):(.+))?/;
+ [$_, $1, $2] }
+ keys %$calls ) {
+ my $count = int($calls->{$call});
+ $call =~ /([^:]+):(\d+):([^ ]+)( -> ([^:]+):(\d+):(.+))?/;
+ my ( $caller_file, $caller_line, $caller_function,
+ $callee_file, $callee_line, $callee_function ) =
+ ( $1, $2, $3, $5, $6, $7 );
+
+ # TODO(csilvers): for better compression, collect all the
+ # caller/callee_files and functions first, before printing
+ # anything, and only compress those referenced more than once.
+ printf CG CompressedCGName("fl", $caller_file, \%filename_to_index_map);
+ printf CG CompressedCGName("fn", $caller_function, \%fnname_to_index_map);
+ if (defined $6) {
+ printf CG CompressedCGName("cfl", $callee_file, \%filename_to_index_map);
+ printf CG CompressedCGName("cfn", $callee_function, \%fnname_to_index_map);
+ printf CG ("calls=$count $callee_line\n");
+ }
+ printf CG ("$caller_line $count\n\n");
+ }
+}
+
+# Print disassembly for all all routines that match $main::opt_disasm
+sub PrintDisassembly {
+ my $libs = shift;
+ my $flat = shift;
+ my $cumulative = shift;
+ my $disasm_opts = shift;
+
+ my $total = TotalProfile($flat);
+
+ foreach my $lib (@{$libs}) {
+ my $symbol_table = GetProcedureBoundaries($lib->[0], $disasm_opts);
+ my $offset = AddressSub($lib->[1], $lib->[3]);
+ foreach my $routine (sort ByName keys(%{$symbol_table})) {
+ my $start_addr = $symbol_table->{$routine}->[0];
+ my $end_addr = $symbol_table->{$routine}->[1];
+ # See if there are any samples in this routine
+ my $length = hex(AddressSub($end_addr, $start_addr));
+ my $addr = AddressAdd($start_addr, $offset);
+ for (my $i = 0; $i < $length; $i++) {
+ if (defined($cumulative->{$addr})) {
+ PrintDisassembledFunction($lib->[0], $offset,
+ $routine, $flat, $cumulative,
+ $start_addr, $end_addr, $total);
+ last;
+ }
+ $addr = AddressInc($addr);
+ }
+ }
+ }
+}
+
+# Return reference to array of tuples of the form:
+# [start_address, filename, linenumber, instruction, limit_address]
+# E.g.,
+# ["0x806c43d", "/foo/bar.cc", 131, "ret", "0x806c440"]
+sub Disassemble {
+ my $prog = shift;
+ my $offset = shift;
+ my $start_addr = shift;
+ my $end_addr = shift;
+
+ my $objdump = $obj_tool_map{"objdump"};
+ my $cmd = ShellEscape($objdump, "-C", "-d", "-l", "--no-show-raw-insn",
+ "--start-address=0x$start_addr",
+ "--stop-address=0x$end_addr", $prog);
+ open(OBJDUMP, "$cmd |") || error("$cmd: $!\n");
+ my @result = ();
+ my $filename = "";
+ my $linenumber = -1;
+ my $last = ["", "", "", ""];
+ while (<OBJDUMP>) {
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+ chop;
+ if (m|\s*([^:\s]+):(\d+)\s*$|) {
+ # Location line of the form:
+ # <filename>:<linenumber>
+ $filename = $1;
+ $linenumber = $2;
+ } elsif (m/^ +([0-9a-f]+):\s*(.*)/) {
+ # Disassembly line -- zero-extend address to full length
+ my $addr = HexExtend($1);
+ my $k = AddressAdd($addr, $offset);
+ $last->[4] = $k; # Store ending address for previous instruction
+ $last = [$k, $filename, $linenumber, $2, $end_addr];
+ push(@result, $last);
+ }
+ }
+ close(OBJDUMP);
+ return @result;
+}
+
+# The input file should contain lines of the form /proc/maps-like
+# output (same format as expected from the profiles) or that looks
+# like hex addresses (like "0xDEADBEEF"). We will parse all
+# /proc/maps output, and for all the hex addresses, we will output
+# "short" symbol names, one per line, in the same order as the input.
+sub PrintSymbols {
+ my $maps_and_symbols_file = shift;
+
+ # ParseLibraries expects pcs to be in a set. Fine by us...
+ my @pclist = (); # pcs in sorted order
+ my $pcs = {};
+ my $map = "";
+ foreach my $line (<$maps_and_symbols_file>) {
+ $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines
+ if ($line =~ /\b(0x[0-9a-f]+)\b/i) {
+ push(@pclist, HexExtend($1));
+ $pcs->{$pclist[-1]} = 1;
+ } else {
+ $map .= $line;
+ }
+ }
+
+ my $libs = ParseLibraries($main::prog, $map, $pcs);
+ my $symbols = ExtractSymbols($libs, $pcs);
+
+ foreach my $pc (@pclist) {
+ # ->[0] is the shortname, ->[2] is the full name
+ print(($symbols->{$pc}->[0] || "??") . "\n");
+ }
+}
+
+
+# For sorting functions by name
+sub ByName {
+ return ShortFunctionName($a) cmp ShortFunctionName($b);
+}
+
+# Print source-listing for all all routines that match $list_opts
+sub PrintListing {
+ my $total = shift;
+ my $libs = shift;
+ my $flat = shift;
+ my $cumulative = shift;
+ my $list_opts = shift;
+ my $html = shift;
+
+ my $output = \*STDOUT;
+ my $fname = "";
+
+ if ($html) {
+ # Arrange to write the output to a temporary file
+ $fname = TempName($main::next_tmpfile, "html");
+ $main::next_tmpfile++;
+ if (!open(TEMP, ">$fname")) {
+ print STDERR "$fname: $!\n";
+ return;
+ }
+ $output = \*TEMP;
+ print $output HtmlListingHeader();
+ printf $output ("<div class=\"legend\">%s<br>Total: %s %s</div>\n",
+ $main::prog, Unparse($total), Units());
+ }
+
+ my $listed = 0;
+ foreach my $lib (@{$libs}) {
+ my $symbol_table = GetProcedureBoundaries($lib->[0], $list_opts);
+ my $offset = AddressSub($lib->[1], $lib->[3]);
+ foreach my $routine (sort ByName keys(%{$symbol_table})) {
+ # Print if there are any samples in this routine
+ my $start_addr = $symbol_table->{$routine}->[0];
+ my $end_addr = $symbol_table->{$routine}->[1];
+ my $length = hex(AddressSub($end_addr, $start_addr));
+ my $addr = AddressAdd($start_addr, $offset);
+ for (my $i = 0; $i < $length; $i++) {
+ if (defined($cumulative->{$addr})) {
+ $listed += PrintSource(
+ $lib->[0], $offset,
+ $routine, $flat, $cumulative,
+ $start_addr, $end_addr,
+ $html,
+ $output);
+ last;
+ }
+ $addr = AddressInc($addr);
+ }
+ }
+ }
+
+ if ($html) {
+ if ($listed > 0) {
+ print $output HtmlListingFooter();
+ close($output);
+ RunWeb($fname);
+ } else {
+ close($output);
+ unlink($fname);
+ }
+ }
+}
+
+sub HtmlListingHeader {
+ return <<'EOF';
+<DOCTYPE html>
+<html>
+<head>
+<title>Pprof listing</title>
+<style type="text/css">
+body {
+ font-family: sans-serif;
+}
+h1 {
+ font-size: 1.5em;
+ margin-bottom: 4px;
+}
+.legend {
+ font-size: 1.25em;
+}
+.line {
+ color: #aaaaaa;
+}
+.nop {
+ color: #aaaaaa;
+}
+.unimportant {
+ color: #cccccc;
+}
+.disasmloc {
+ color: #000000;
+}
+.deadsrc {
+ cursor: pointer;
+}
+.deadsrc:hover {
+ background-color: #eeeeee;
+}
+.livesrc {
+ color: #0000ff;
+ cursor: pointer;
+}
+.livesrc:hover {
+ background-color: #eeeeee;
+}
+.asm {
+ color: #008800;
+ display: none;
+}
+</style>
+<script type="text/javascript">
+function jeprof_toggle_asm(e) {
+ var target;
+ if (!e) e = window.event;
+ if (e.target) target = e.target;
+ else if (e.srcElement) target = e.srcElement;
+
+ if (target) {
+ var asm = target.nextSibling;
+ if (asm && asm.className == "asm") {
+ asm.style.display = (asm.style.display == "block" ? "" : "block");
+ e.preventDefault();
+ return false;
+ }
+ }
+}
+</script>
+</head>
+<body>
+EOF
+}
+
+sub HtmlListingFooter {
+ return <<'EOF';
+</body>
+</html>
+EOF
+}
+
+sub HtmlEscape {
+ my $text = shift;
+ $text =~ s/&/&amp;/g;
+ $text =~ s/</&lt;/g;
+ $text =~ s/>/&gt;/g;
+ return $text;
+}
+
+# Returns the indentation of the line, if it has any non-whitespace
+# characters. Otherwise, returns -1.
+sub Indentation {
+ my $line = shift;
+ if (m/^(\s*)\S/) {
+ return length($1);
+ } else {
+ return -1;
+ }
+}
+
+# If the symbol table contains inlining info, Disassemble() may tag an
+# instruction with a location inside an inlined function. But for
+# source listings, we prefer to use the location in the function we
+# are listing. So use MapToSymbols() to fetch full location
+# information for each instruction and then pick out the first
+# location from a location list (location list contains callers before
+# callees in case of inlining).
+#
+# After this routine has run, each entry in $instructions contains:
+# [0] start address
+# [1] filename for function we are listing
+# [2] line number for function we are listing
+# [3] disassembly
+# [4] limit address
+# [5] most specific filename (may be different from [1] due to inlining)
+# [6] most specific line number (may be different from [2] due to inlining)
+sub GetTopLevelLineNumbers {
+ my ($lib, $offset, $instructions) = @_;
+ my $pcs = [];
+ for (my $i = 0; $i <= $#{$instructions}; $i++) {
+ push(@{$pcs}, $instructions->[$i]->[0]);
+ }
+ my $symbols = {};
+ MapToSymbols($lib, $offset, $pcs, $symbols);
+ for (my $i = 0; $i <= $#{$instructions}; $i++) {
+ my $e = $instructions->[$i];
+ push(@{$e}, $e->[1]);
+ push(@{$e}, $e->[2]);
+ my $addr = $e->[0];
+ my $sym = $symbols->{$addr};
+ if (defined($sym)) {
+ if ($#{$sym} >= 2 && $sym->[1] =~ m/^(.*):(\d+)$/) {
+ $e->[1] = $1; # File name
+ $e->[2] = $2; # Line number
+ }
+ }
+ }
+}
+
+# Print source-listing for one routine
+sub PrintSource {
+ my $prog = shift;
+ my $offset = shift;
+ my $routine = shift;
+ my $flat = shift;
+ my $cumulative = shift;
+ my $start_addr = shift;
+ my $end_addr = shift;
+ my $html = shift;
+ my $output = shift;
+
+ # Disassemble all instructions (just to get line numbers)
+ my @instructions = Disassemble($prog, $offset, $start_addr, $end_addr);
+ GetTopLevelLineNumbers($prog, $offset, \@instructions);
+
+ # Hack 1: assume that the first source file encountered in the
+ # disassembly contains the routine
+ my $filename = undef;
+ for (my $i = 0; $i <= $#instructions; $i++) {
+ if ($instructions[$i]->[2] >= 0) {
+ $filename = $instructions[$i]->[1];
+ last;
+ }
+ }
+ if (!defined($filename)) {
+ print STDERR "no filename found in $routine\n";
+ return 0;
+ }
+
+ # Hack 2: assume that the largest line number from $filename is the
+ # end of the procedure. This is typically safe since if P1 contains
+ # an inlined call to P2, then P2 usually occurs earlier in the
+ # source file. If this does not work, we might have to compute a
+ # density profile or just print all regions we find.
+ my $lastline = 0;
+ for (my $i = 0; $i <= $#instructions; $i++) {
+ my $f = $instructions[$i]->[1];
+ my $l = $instructions[$i]->[2];
+ if (($f eq $filename) && ($l > $lastline)) {
+ $lastline = $l;
+ }
+ }
+
+ # Hack 3: assume the first source location from "filename" is the start of
+ # the source code.
+ my $firstline = 1;
+ for (my $i = 0; $i <= $#instructions; $i++) {
+ if ($instructions[$i]->[1] eq $filename) {
+ $firstline = $instructions[$i]->[2];
+ last;
+ }
+ }
+
+ # Hack 4: Extend last line forward until its indentation is less than
+ # the indentation we saw on $firstline
+ my $oldlastline = $lastline;
+ {
+ if (!open(FILE, "<$filename")) {
+ print STDERR "$filename: $!\n";
+ return 0;
+ }
+ my $l = 0;
+ my $first_indentation = -1;
+ while (<FILE>) {
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+ $l++;
+ my $indent = Indentation($_);
+ if ($l >= $firstline) {
+ if ($first_indentation < 0 && $indent >= 0) {
+ $first_indentation = $indent;
+ last if ($first_indentation == 0);
+ }
+ }
+ if ($l >= $lastline && $indent >= 0) {
+ if ($indent >= $first_indentation) {
+ $lastline = $l+1;
+ } else {
+ last;
+ }
+ }
+ }
+ close(FILE);
+ }
+
+ # Assign all samples to the range $firstline,$lastline,
+ # Hack 4: If an instruction does not occur in the range, its samples
+ # are moved to the next instruction that occurs in the range.
+ my $samples1 = {}; # Map from line number to flat count
+ my $samples2 = {}; # Map from line number to cumulative count
+ my $running1 = 0; # Unassigned flat counts
+ my $running2 = 0; # Unassigned cumulative counts
+ my $total1 = 0; # Total flat counts
+ my $total2 = 0; # Total cumulative counts
+ my %disasm = (); # Map from line number to disassembly
+ my $running_disasm = ""; # Unassigned disassembly
+ my $skip_marker = "---\n";
+ if ($html) {
+ $skip_marker = "";
+ for (my $l = $firstline; $l <= $lastline; $l++) {
+ $disasm{$l} = "";
+ }
+ }
+ my $last_dis_filename = '';
+ my $last_dis_linenum = -1;
+ my $last_touched_line = -1; # To detect gaps in disassembly for a line
+ foreach my $e (@instructions) {
+ # Add up counts for all address that fall inside this instruction
+ my $c1 = 0;
+ my $c2 = 0;
+ for (my $a = $e->[0]; $a lt $e->[4]; $a = AddressInc($a)) {
+ $c1 += GetEntry($flat, $a);
+ $c2 += GetEntry($cumulative, $a);
+ }
+
+ if ($html) {
+ my $dis = sprintf(" %6s %6s \t\t%8s: %s ",
+ HtmlPrintNumber($c1),
+ HtmlPrintNumber($c2),
+ UnparseAddress($offset, $e->[0]),
+ CleanDisassembly($e->[3]));
+
+ # Append the most specific source line associated with this instruction
+ if (length($dis) < 80) { $dis .= (' ' x (80 - length($dis))) };
+ $dis = HtmlEscape($dis);
+ my $f = $e->[5];
+ my $l = $e->[6];
+ if ($f ne $last_dis_filename) {
+ $dis .= sprintf("<span class=disasmloc>%s:%d</span>",
+ HtmlEscape(CleanFileName($f)), $l);
+ } elsif ($l ne $last_dis_linenum) {
+ # De-emphasize the unchanged file name portion
+ $dis .= sprintf("<span class=unimportant>%s</span>" .
+ "<span class=disasmloc>:%d</span>",
+ HtmlEscape(CleanFileName($f)), $l);
+ } else {
+ # De-emphasize the entire location
+ $dis .= sprintf("<span class=unimportant>%s:%d</span>",
+ HtmlEscape(CleanFileName($f)), $l);
+ }
+ $last_dis_filename = $f;
+ $last_dis_linenum = $l;
+ $running_disasm .= $dis;
+ $running_disasm .= "\n";
+ }
+
+ $running1 += $c1;
+ $running2 += $c2;
+ $total1 += $c1;
+ $total2 += $c2;
+ my $file = $e->[1];
+ my $line = $e->[2];
+ if (($file eq $filename) &&
+ ($line >= $firstline) &&
+ ($line <= $lastline)) {
+ # Assign all accumulated samples to this line
+ AddEntry($samples1, $line, $running1);
+ AddEntry($samples2, $line, $running2);
+ $running1 = 0;
+ $running2 = 0;
+ if ($html) {
+ if ($line != $last_touched_line && $disasm{$line} ne '') {
+ $disasm{$line} .= "\n";
+ }
+ $disasm{$line} .= $running_disasm;
+ $running_disasm = '';
+ $last_touched_line = $line;
+ }
+ }
+ }
+
+ # Assign any leftover samples to $lastline
+ AddEntry($samples1, $lastline, $running1);
+ AddEntry($samples2, $lastline, $running2);
+ if ($html) {
+ if ($lastline != $last_touched_line && $disasm{$lastline} ne '') {
+ $disasm{$lastline} .= "\n";
+ }
+ $disasm{$lastline} .= $running_disasm;
+ }
+
+ if ($html) {
+ printf $output (
+ "<h1>%s</h1>%s\n<pre onClick=\"jeprof_toggle_asm()\">\n" .
+ "Total:%6s %6s (flat / cumulative %s)\n",
+ HtmlEscape(ShortFunctionName($routine)),
+ HtmlEscape(CleanFileName($filename)),
+ Unparse($total1),
+ Unparse($total2),
+ Units());
+ } else {
+ printf $output (
+ "ROUTINE ====================== %s in %s\n" .
+ "%6s %6s Total %s (flat / cumulative)\n",
+ ShortFunctionName($routine),
+ CleanFileName($filename),
+ Unparse($total1),
+ Unparse($total2),
+ Units());
+ }
+ if (!open(FILE, "<$filename")) {
+ print STDERR "$filename: $!\n";
+ return 0;
+ }
+ my $l = 0;
+ while (<FILE>) {
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+ $l++;
+ if ($l >= $firstline - 5 &&
+ (($l <= $oldlastline + 5) || ($l <= $lastline))) {
+ chop;
+ my $text = $_;
+ if ($l == $firstline) { print $output $skip_marker; }
+ my $n1 = GetEntry($samples1, $l);
+ my $n2 = GetEntry($samples2, $l);
+ if ($html) {
+ # Emit a span that has one of the following classes:
+ # livesrc -- has samples
+ # deadsrc -- has disassembly, but with no samples
+ # nop -- has no matching disasembly
+ # Also emit an optional span containing disassembly.
+ my $dis = $disasm{$l};
+ my $asm = "";
+ if (defined($dis) && $dis ne '') {
+ $asm = "<span class=\"asm\">" . $dis . "</span>";
+ }
+ my $source_class = (($n1 + $n2 > 0)
+ ? "livesrc"
+ : (($asm ne "") ? "deadsrc" : "nop"));
+ printf $output (
+ "<span class=\"line\">%5d</span> " .
+ "<span class=\"%s\">%6s %6s %s</span>%s\n",
+ $l, $source_class,
+ HtmlPrintNumber($n1),
+ HtmlPrintNumber($n2),
+ HtmlEscape($text),
+ $asm);
+ } else {
+ printf $output(
+ "%6s %6s %4d: %s\n",
+ UnparseAlt($n1),
+ UnparseAlt($n2),
+ $l,
+ $text);
+ }
+ if ($l == $lastline) { print $output $skip_marker; }
+ };
+ }
+ close(FILE);
+ if ($html) {
+ print $output "</pre>\n";
+ }
+ return 1;
+}
+
+# Return the source line for the specified file/linenumber.
+# Returns undef if not found.
+sub SourceLine {
+ my $file = shift;
+ my $line = shift;
+
+ # Look in cache
+ if (!defined($main::source_cache{$file})) {
+ if (100 < scalar keys(%main::source_cache)) {
+ # Clear the cache when it gets too big
+ $main::source_cache = ();
+ }
+
+ # Read all lines from the file
+ if (!open(FILE, "<$file")) {
+ print STDERR "$file: $!\n";
+ $main::source_cache{$file} = []; # Cache the negative result
+ return undef;
+ }
+ my $lines = [];
+ push(@{$lines}, ""); # So we can use 1-based line numbers as indices
+ while (<FILE>) {
+ push(@{$lines}, $_);
+ }
+ close(FILE);
+
+ # Save the lines in the cache
+ $main::source_cache{$file} = $lines;
+ }
+
+ my $lines = $main::source_cache{$file};
+ if (($line < 0) || ($line > $#{$lines})) {
+ return undef;
+ } else {
+ return $lines->[$line];
+ }
+}
+
+# Print disassembly for one routine with interspersed source if available
+sub PrintDisassembledFunction {
+ my $prog = shift;
+ my $offset = shift;
+ my $routine = shift;
+ my $flat = shift;
+ my $cumulative = shift;
+ my $start_addr = shift;
+ my $end_addr = shift;
+ my $total = shift;
+
+ # Disassemble all instructions
+ my @instructions = Disassemble($prog, $offset, $start_addr, $end_addr);
+
+ # Make array of counts per instruction
+ my @flat_count = ();
+ my @cum_count = ();
+ my $flat_total = 0;
+ my $cum_total = 0;
+ foreach my $e (@instructions) {
+ # Add up counts for all address that fall inside this instruction
+ my $c1 = 0;
+ my $c2 = 0;
+ for (my $a = $e->[0]; $a lt $e->[4]; $a = AddressInc($a)) {
+ $c1 += GetEntry($flat, $a);
+ $c2 += GetEntry($cumulative, $a);
+ }
+ push(@flat_count, $c1);
+ push(@cum_count, $c2);
+ $flat_total += $c1;
+ $cum_total += $c2;
+ }
+
+ # Print header with total counts
+ printf("ROUTINE ====================== %s\n" .
+ "%6s %6s %s (flat, cumulative) %.1f%% of total\n",
+ ShortFunctionName($routine),
+ Unparse($flat_total),
+ Unparse($cum_total),
+ Units(),
+ ($cum_total * 100.0) / $total);
+
+ # Process instructions in order
+ my $current_file = "";
+ for (my $i = 0; $i <= $#instructions; ) {
+ my $e = $instructions[$i];
+
+ # Print the new file name whenever we switch files
+ if ($e->[1] ne $current_file) {
+ $current_file = $e->[1];
+ my $fname = $current_file;
+ $fname =~ s|^\./||; # Trim leading "./"
+
+ # Shorten long file names
+ if (length($fname) >= 58) {
+ $fname = "..." . substr($fname, -55);
+ }
+ printf("-------------------- %s\n", $fname);
+ }
+
+ # TODO: Compute range of lines to print together to deal with
+ # small reorderings.
+ my $first_line = $e->[2];
+ my $last_line = $first_line;
+ my %flat_sum = ();
+ my %cum_sum = ();
+ for (my $l = $first_line; $l <= $last_line; $l++) {
+ $flat_sum{$l} = 0;
+ $cum_sum{$l} = 0;
+ }
+
+ # Find run of instructions for this range of source lines
+ my $first_inst = $i;
+ while (($i <= $#instructions) &&
+ ($instructions[$i]->[2] >= $first_line) &&
+ ($instructions[$i]->[2] <= $last_line)) {
+ $e = $instructions[$i];
+ $flat_sum{$e->[2]} += $flat_count[$i];
+ $cum_sum{$e->[2]} += $cum_count[$i];
+ $i++;
+ }
+ my $last_inst = $i - 1;
+
+ # Print source lines
+ for (my $l = $first_line; $l <= $last_line; $l++) {
+ my $line = SourceLine($current_file, $l);
+ if (!defined($line)) {
+ $line = "?\n";
+ next;
+ } else {
+ $line =~ s/^\s+//;
+ }
+ printf("%6s %6s %5d: %s",
+ UnparseAlt($flat_sum{$l}),
+ UnparseAlt($cum_sum{$l}),
+ $l,
+ $line);
+ }
+
+ # Print disassembly
+ for (my $x = $first_inst; $x <= $last_inst; $x++) {
+ my $e = $instructions[$x];
+ printf("%6s %6s %8s: %6s\n",
+ UnparseAlt($flat_count[$x]),
+ UnparseAlt($cum_count[$x]),
+ UnparseAddress($offset, $e->[0]),
+ CleanDisassembly($e->[3]));
+ }
+ }
+}
+
+# Print DOT graph
+sub PrintDot {
+ my $prog = shift;
+ my $symbols = shift;
+ my $raw = shift;
+ my $flat = shift;
+ my $cumulative = shift;
+ my $overall_total = shift;
+
+ # Get total
+ my $local_total = TotalProfile($flat);
+ my $nodelimit = int($main::opt_nodefraction * $local_total);
+ my $edgelimit = int($main::opt_edgefraction * $local_total);
+ my $nodecount = $main::opt_nodecount;
+
+ # Find nodes to include
+ my @list = (sort { abs(GetEntry($cumulative, $b)) <=>
+ abs(GetEntry($cumulative, $a))
+ || $a cmp $b }
+ keys(%{$cumulative}));
+ my $last = $nodecount - 1;
+ if ($last > $#list) {
+ $last = $#list;
+ }
+ while (($last >= 0) &&
+ (abs(GetEntry($cumulative, $list[$last])) <= $nodelimit)) {
+ $last--;
+ }
+ if ($last < 0) {
+ print STDERR "No nodes to print\n";
+ return 0;
+ }
+
+ if ($nodelimit > 0 || $edgelimit > 0) {
+ printf STDERR ("Dropping nodes with <= %s %s; edges with <= %s abs(%s)\n",
+ Unparse($nodelimit), Units(),
+ Unparse($edgelimit), Units());
+ }
+
+ # Open DOT output file
+ my $output;
+ my $escaped_dot = ShellEscape(@DOT);
+ my $escaped_ps2pdf = ShellEscape(@PS2PDF);
+ if ($main::opt_gv) {
+ my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "ps"));
+ $output = "| $escaped_dot -Tps2 >$escaped_outfile";
+ } elsif ($main::opt_evince) {
+ my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "pdf"));
+ $output = "| $escaped_dot -Tps2 | $escaped_ps2pdf - $escaped_outfile";
+ } elsif ($main::opt_ps) {
+ $output = "| $escaped_dot -Tps2";
+ } elsif ($main::opt_pdf) {
+ $output = "| $escaped_dot -Tps2 | $escaped_ps2pdf - -";
+ } elsif ($main::opt_web || $main::opt_svg) {
+ # We need to post-process the SVG, so write to a temporary file always.
+ my $escaped_outfile = ShellEscape(TempName($main::next_tmpfile, "svg"));
+ $output = "| $escaped_dot -Tsvg >$escaped_outfile";
+ } elsif ($main::opt_gif) {
+ $output = "| $escaped_dot -Tgif";
+ } else {
+ $output = ">&STDOUT";
+ }
+ open(DOT, $output) || error("$output: $!\n");
+
+ # Title
+ printf DOT ("digraph \"%s; %s %s\" {\n",
+ $prog,
+ Unparse($overall_total),
+ Units());
+ if ($main::opt_pdf) {
+ # The output is more printable if we set the page size for dot.
+ printf DOT ("size=\"8,11\"\n");
+ }
+ printf DOT ("node [width=0.375,height=0.25];\n");
+
+ # Print legend
+ printf DOT ("Legend [shape=box,fontsize=24,shape=plaintext," .
+ "label=\"%s\\l%s\\l%s\\l%s\\l%s\\l\"];\n",
+ $prog,
+ sprintf("Total %s: %s", Units(), Unparse($overall_total)),
+ sprintf("Focusing on: %s", Unparse($local_total)),
+ sprintf("Dropped nodes with <= %s abs(%s)",
+ Unparse($nodelimit), Units()),
+ sprintf("Dropped edges with <= %s %s",
+ Unparse($edgelimit), Units())
+ );
+
+ # Print nodes
+ my %node = ();
+ my $nextnode = 1;
+ foreach my $a (@list[0..$last]) {
+ # Pick font size
+ my $f = GetEntry($flat, $a);
+ my $c = GetEntry($cumulative, $a);
+
+ my $fs = 8;
+ if ($local_total > 0) {
+ $fs = 8 + (50.0 * sqrt(abs($f * 1.0 / $local_total)));
+ }
+
+ $node{$a} = $nextnode++;
+ my $sym = $a;
+ $sym =~ s/\s+/\\n/g;
+ $sym =~ s/::/\\n/g;
+
+ # Extra cumulative info to print for non-leaves
+ my $extra = "";
+ if ($f != $c) {
+ $extra = sprintf("\\rof %s (%s)",
+ Unparse($c),
+ Percent($c, $local_total));
+ }
+ my $style = "";
+ if ($main::opt_heapcheck) {
+ if ($f > 0) {
+ # make leak-causing nodes more visible (add a background)
+ $style = ",style=filled,fillcolor=gray"
+ } elsif ($f < 0) {
+ # make anti-leak-causing nodes (which almost never occur)
+ # stand out as well (triple border)
+ $style = ",peripheries=3"
+ }
+ }
+
+ printf DOT ("N%d [label=\"%s\\n%s (%s)%s\\r" .
+ "\",shape=box,fontsize=%.1f%s];\n",
+ $node{$a},
+ $sym,
+ Unparse($f),
+ Percent($f, $local_total),
+ $extra,
+ $fs,
+ $style,
+ );
+ }
+
+ # Get edges and counts per edge
+ my %edge = ();
+ my $n;
+ my $fullname_to_shortname_map = {};
+ FillFullnameToShortnameMap($symbols, $fullname_to_shortname_map);
+ foreach my $k (keys(%{$raw})) {
+ # TODO: omit low %age edges
+ $n = $raw->{$k};
+ my @translated = TranslateStack($symbols, $fullname_to_shortname_map, $k);
+ for (my $i = 1; $i <= $#translated; $i++) {
+ my $src = $translated[$i];
+ my $dst = $translated[$i-1];
+ #next if ($src eq $dst); # Avoid self-edges?
+ if (exists($node{$src}) && exists($node{$dst})) {
+ my $edge_label = "$src\001$dst";
+ if (!exists($edge{$edge_label})) {
+ $edge{$edge_label} = 0;
+ }
+ $edge{$edge_label} += $n;
+ }
+ }
+ }
+
+ # Print edges (process in order of decreasing counts)
+ my %indegree = (); # Number of incoming edges added per node so far
+ my %outdegree = (); # Number of outgoing edges added per node so far
+ foreach my $e (sort { $edge{$b} <=> $edge{$a} } keys(%edge)) {
+ my @x = split(/\001/, $e);
+ $n = $edge{$e};
+
+ # Initialize degree of kept incoming and outgoing edges if necessary
+ my $src = $x[0];
+ my $dst = $x[1];
+ if (!exists($outdegree{$src})) { $outdegree{$src} = 0; }
+ if (!exists($indegree{$dst})) { $indegree{$dst} = 0; }
+
+ my $keep;
+ if ($indegree{$dst} == 0) {
+ # Keep edge if needed for reachability
+ $keep = 1;
+ } elsif (abs($n) <= $edgelimit) {
+ # Drop if we are below --edgefraction
+ $keep = 0;
+ } elsif ($outdegree{$src} >= $main::opt_maxdegree ||
+ $indegree{$dst} >= $main::opt_maxdegree) {
+ # Keep limited number of in/out edges per node
+ $keep = 0;
+ } else {
+ $keep = 1;
+ }
+
+ if ($keep) {
+ $outdegree{$src}++;
+ $indegree{$dst}++;
+
+ # Compute line width based on edge count
+ my $fraction = abs($local_total ? (3 * ($n / $local_total)) : 0);
+ if ($fraction > 1) { $fraction = 1; }
+ my $w = $fraction * 2;
+ if ($w < 1 && ($main::opt_web || $main::opt_svg)) {
+ # SVG output treats line widths < 1 poorly.
+ $w = 1;
+ }
+
+ # Dot sometimes segfaults if given edge weights that are too large, so
+ # we cap the weights at a large value
+ my $edgeweight = abs($n) ** 0.7;
+ if ($edgeweight > 100000) { $edgeweight = 100000; }
+ $edgeweight = int($edgeweight);
+
+ my $style = sprintf("setlinewidth(%f)", $w);
+ if ($x[1] =~ m/\(inline\)/) {
+ $style .= ",dashed";
+ }
+
+ # Use a slightly squashed function of the edge count as the weight
+ printf DOT ("N%s -> N%s [label=%s, weight=%d, style=\"%s\"];\n",
+ $node{$x[0]},
+ $node{$x[1]},
+ Unparse($n),
+ $edgeweight,
+ $style);
+ }
+ }
+
+ print DOT ("}\n");
+ close(DOT);
+
+ if ($main::opt_web || $main::opt_svg) {
+ # Rewrite SVG to be more usable inside web browser.
+ RewriteSvg(TempName($main::next_tmpfile, "svg"));
+ }
+
+ return 1;
+}
+
+sub RewriteSvg {
+ my $svgfile = shift;
+
+ open(SVG, $svgfile) || die "open temp svg: $!";
+ my @svg = <SVG>;
+ close(SVG);
+ unlink $svgfile;
+ my $svg = join('', @svg);
+
+ # Dot's SVG output is
+ #
+ # <svg width="___" height="___"
+ # viewBox="___" xmlns=...>
+ # <g id="graph0" transform="...">
+ # ...
+ # </g>
+ # </svg>
+ #
+ # Change it to
+ #
+ # <svg width="100%" height="100%"
+ # xmlns=...>
+ # $svg_javascript
+ # <g id="viewport" transform="translate(0,0)">
+ # <g id="graph0" transform="...">
+ # ...
+ # </g>
+ # </g>
+ # </svg>
+
+ # Fix width, height; drop viewBox.
+ $svg =~ s/(?s)<svg width="[^"]+" height="[^"]+"(.*?)viewBox="[^"]+"/<svg width="100%" height="100%"$1/;
+
+ # Insert script, viewport <g> above first <g>
+ my $svg_javascript = SvgJavascript();
+ my $viewport = "<g id=\"viewport\" transform=\"translate(0,0)\">\n";
+ $svg =~ s/<g id="graph\d"/$svg_javascript$viewport$&/;
+
+ # Insert final </g> above </svg>.
+ $svg =~ s/(.*)(<\/svg>)/$1<\/g>$2/;
+ $svg =~ s/<g id="graph\d"(.*?)/<g id="viewport"$1/;
+
+ if ($main::opt_svg) {
+ # --svg: write to standard output.
+ print $svg;
+ } else {
+ # Write back to temporary file.
+ open(SVG, ">$svgfile") || die "open $svgfile: $!";
+ print SVG $svg;
+ close(SVG);
+ }
+}
+
+sub SvgJavascript {
+ return <<'EOF';
+<script type="text/ecmascript"><![CDATA[
+// SVGPan
+// http://www.cyberz.org/blog/2009/12/08/svgpan-a-javascript-svg-panzoomdrag-library/
+// Local modification: if(true || ...) below to force panning, never moving.
+
+/**
+ * SVGPan library 1.2
+ * ====================
+ *
+ * Given an unique existing element with id "viewport", including the
+ * the library into any SVG adds the following capabilities:
+ *
+ * - Mouse panning
+ * - Mouse zooming (using the wheel)
+ * - Object dargging
+ *
+ * Known issues:
+ *
+ * - Zooming (while panning) on Safari has still some issues
+ *
+ * Releases:
+ *
+ * 1.2, Sat Mar 20 08:42:50 GMT 2010, Zeng Xiaohui
+ * Fixed a bug with browser mouse handler interaction
+ *
+ * 1.1, Wed Feb 3 17:39:33 GMT 2010, Zeng Xiaohui
+ * Updated the zoom code to support the mouse wheel on Safari/Chrome
+ *
+ * 1.0, Andrea Leofreddi
+ * First release
+ *
+ * This code is licensed under the following BSD license:
+ *
+ * Copyright 2009-2010 Andrea Leofreddi <a.leofreddi@itcharm.com>. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without modification, are
+ * permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this list of
+ * conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice, this list
+ * of conditions and the following disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Andrea Leofreddi ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
+ * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL Andrea Leofreddi OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * The views and conclusions contained in the software and documentation are those of the
+ * authors and should not be interpreted as representing official policies, either expressed
+ * or implied, of Andrea Leofreddi.
+ */
+
+var root = document.documentElement;
+
+var state = 'none', stateTarget, stateOrigin, stateTf;
+
+setupHandlers(root);
+
+/**
+ * Register handlers
+ */
+function setupHandlers(root){
+ setAttributes(root, {
+ "onmouseup" : "add(evt)",
+ "onmousedown" : "handleMouseDown(evt)",
+ "onmousemove" : "handleMouseMove(evt)",
+ "onmouseup" : "handleMouseUp(evt)",
+ //"onmouseout" : "handleMouseUp(evt)", // Decomment this to stop the pan functionality when dragging out of the SVG element
+ });
+
+ if(navigator.userAgent.toLowerCase().indexOf('webkit') >= 0)
+ window.addEventListener('mousewheel', handleMouseWheel, false); // Chrome/Safari
+ else
+ window.addEventListener('DOMMouseScroll', handleMouseWheel, false); // Others
+
+ var g = svgDoc.getElementById("svg");
+ g.width = "100%";
+ g.height = "100%";
+}
+
+/**
+ * Instance an SVGPoint object with given event coordinates.
+ */
+function getEventPoint(evt) {
+ var p = root.createSVGPoint();
+
+ p.x = evt.clientX;
+ p.y = evt.clientY;
+
+ return p;
+}
+
+/**
+ * Sets the current transform matrix of an element.
+ */
+function setCTM(element, matrix) {
+ var s = "matrix(" + matrix.a + "," + matrix.b + "," + matrix.c + "," + matrix.d + "," + matrix.e + "," + matrix.f + ")";
+
+ element.setAttribute("transform", s);
+}
+
+/**
+ * Dumps a matrix to a string (useful for debug).
+ */
+function dumpMatrix(matrix) {
+ var s = "[ " + matrix.a + ", " + matrix.c + ", " + matrix.e + "\n " + matrix.b + ", " + matrix.d + ", " + matrix.f + "\n 0, 0, 1 ]";
+
+ return s;
+}
+
+/**
+ * Sets attributes of an element.
+ */
+function setAttributes(element, attributes){
+ for (i in attributes)
+ element.setAttributeNS(null, i, attributes[i]);
+}
+
+/**
+ * Handle mouse move event.
+ */
+function handleMouseWheel(evt) {
+ if(evt.preventDefault)
+ evt.preventDefault();
+
+ evt.returnValue = false;
+
+ var svgDoc = evt.target.ownerDocument;
+
+ var delta;
+
+ if(evt.wheelDelta)
+ delta = evt.wheelDelta / 3600; // Chrome/Safari
+ else
+ delta = evt.detail / -90; // Mozilla
+
+ var z = 1 + delta; // Zoom factor: 0.9/1.1
+
+ var g = svgDoc.getElementById("viewport");
+
+ var p = getEventPoint(evt);
+
+ p = p.matrixTransform(g.getCTM().inverse());
+
+ // Compute new scale matrix in current mouse position
+ var k = root.createSVGMatrix().translate(p.x, p.y).scale(z).translate(-p.x, -p.y);
+
+ setCTM(g, g.getCTM().multiply(k));
+
+ stateTf = stateTf.multiply(k.inverse());
+}
+
+/**
+ * Handle mouse move event.
+ */
+function handleMouseMove(evt) {
+ if(evt.preventDefault)
+ evt.preventDefault();
+
+ evt.returnValue = false;
+
+ var svgDoc = evt.target.ownerDocument;
+
+ var g = svgDoc.getElementById("viewport");
+
+ if(state == 'pan') {
+ // Pan mode
+ var p = getEventPoint(evt).matrixTransform(stateTf);
+
+ setCTM(g, stateTf.inverse().translate(p.x - stateOrigin.x, p.y - stateOrigin.y));
+ } else if(state == 'move') {
+ // Move mode
+ var p = getEventPoint(evt).matrixTransform(g.getCTM().inverse());
+
+ setCTM(stateTarget, root.createSVGMatrix().translate(p.x - stateOrigin.x, p.y - stateOrigin.y).multiply(g.getCTM().inverse()).multiply(stateTarget.getCTM()));
+
+ stateOrigin = p;
+ }
+}
+
+/**
+ * Handle click event.
+ */
+function handleMouseDown(evt) {
+ if(evt.preventDefault)
+ evt.preventDefault();
+
+ evt.returnValue = false;
+
+ var svgDoc = evt.target.ownerDocument;
+
+ var g = svgDoc.getElementById("viewport");
+
+ if(true || evt.target.tagName == "svg") {
+ // Pan mode
+ state = 'pan';
+
+ stateTf = g.getCTM().inverse();
+
+ stateOrigin = getEventPoint(evt).matrixTransform(stateTf);
+ } else {
+ // Move mode
+ state = 'move';
+
+ stateTarget = evt.target;
+
+ stateTf = g.getCTM().inverse();
+
+ stateOrigin = getEventPoint(evt).matrixTransform(stateTf);
+ }
+}
+
+/**
+ * Handle mouse button release event.
+ */
+function handleMouseUp(evt) {
+ if(evt.preventDefault)
+ evt.preventDefault();
+
+ evt.returnValue = false;
+
+ var svgDoc = evt.target.ownerDocument;
+
+ if(state == 'pan' || state == 'move') {
+ // Quit pan mode
+ state = '';
+ }
+}
+
+]]></script>
+EOF
+}
+
+# Provides a map from fullname to shortname for cases where the
+# shortname is ambiguous. The symlist has both the fullname and
+# shortname for all symbols, which is usually fine, but sometimes --
+# such as overloaded functions -- two different fullnames can map to
+# the same shortname. In that case, we use the address of the
+# function to disambiguate the two. This function fills in a map that
+# maps fullnames to modified shortnames in such cases. If a fullname
+# is not present in the map, the 'normal' shortname provided by the
+# symlist is the appropriate one to use.
+sub FillFullnameToShortnameMap {
+ my $symbols = shift;
+ my $fullname_to_shortname_map = shift;
+ my $shortnames_seen_once = {};
+ my $shortnames_seen_more_than_once = {};
+
+ foreach my $symlist (values(%{$symbols})) {
+ # TODO(csilvers): deal with inlined symbols too.
+ my $shortname = $symlist->[0];
+ my $fullname = $symlist->[2];
+ if ($fullname !~ /<[0-9a-fA-F]+>$/) { # fullname doesn't end in an address
+ next; # the only collisions we care about are when addresses differ
+ }
+ if (defined($shortnames_seen_once->{$shortname}) &&
+ $shortnames_seen_once->{$shortname} ne $fullname) {
+ $shortnames_seen_more_than_once->{$shortname} = 1;
+ } else {
+ $shortnames_seen_once->{$shortname} = $fullname;
+ }
+ }
+
+ foreach my $symlist (values(%{$symbols})) {
+ my $shortname = $symlist->[0];
+ my $fullname = $symlist->[2];
+ # TODO(csilvers): take in a list of addresses we care about, and only
+ # store in the map if $symlist->[1] is in that list. Saves space.
+ next if defined($fullname_to_shortname_map->{$fullname});
+ if (defined($shortnames_seen_more_than_once->{$shortname})) {
+ if ($fullname =~ /<0*([^>]*)>$/) { # fullname has address at end of it
+ $fullname_to_shortname_map->{$fullname} = "$shortname\@$1";
+ }
+ }
+ }
+}
+
+# Return a small number that identifies the argument.
+# Multiple calls with the same argument will return the same number.
+# Calls with different arguments will return different numbers.
+sub ShortIdFor {
+ my $key = shift;
+ my $id = $main::uniqueid{$key};
+ if (!defined($id)) {
+ $id = keys(%main::uniqueid) + 1;
+ $main::uniqueid{$key} = $id;
+ }
+ return $id;
+}
+
+# Translate a stack of addresses into a stack of symbols
+sub TranslateStack {
+ my $symbols = shift;
+ my $fullname_to_shortname_map = shift;
+ my $k = shift;
+
+ my @addrs = split(/\n/, $k);
+ my @result = ();
+ for (my $i = 0; $i <= $#addrs; $i++) {
+ my $a = $addrs[$i];
+
+ # Skip large addresses since they sometimes show up as fake entries on RH9
+ if (length($a) > 8 && $a gt "7fffffffffffffff") {
+ next;
+ }
+
+ if ($main::opt_disasm || $main::opt_list) {
+ # We want just the address for the key
+ push(@result, $a);
+ next;
+ }
+
+ my $symlist = $symbols->{$a};
+ if (!defined($symlist)) {
+ $symlist = [$a, "", $a];
+ }
+
+ # We can have a sequence of symbols for a particular entry
+ # (more than one symbol in the case of inlining). Callers
+ # come before callees in symlist, so walk backwards since
+ # the translated stack should contain callees before callers.
+ for (my $j = $#{$symlist}; $j >= 2; $j -= 3) {
+ my $func = $symlist->[$j-2];
+ my $fileline = $symlist->[$j-1];
+ my $fullfunc = $symlist->[$j];
+ if (defined($fullname_to_shortname_map->{$fullfunc})) {
+ $func = $fullname_to_shortname_map->{$fullfunc};
+ }
+ if ($j > 2) {
+ $func = "$func (inline)";
+ }
+
+ # Do not merge nodes corresponding to Callback::Run since that
+ # causes confusing cycles in dot display. Instead, we synthesize
+ # a unique name for this frame per caller.
+ if ($func =~ m/Callback.*::Run$/) {
+ my $caller = ($i > 0) ? $addrs[$i-1] : 0;
+ $func = "Run#" . ShortIdFor($caller);
+ }
+
+ if ($main::opt_addresses) {
+ push(@result, "$a $func $fileline");
+ } elsif ($main::opt_lines) {
+ if ($func eq '??' && $fileline eq '??:0') {
+ push(@result, "$a");
+ } else {
+ push(@result, "$func $fileline");
+ }
+ } elsif ($main::opt_functions) {
+ if ($func eq '??') {
+ push(@result, "$a");
+ } else {
+ push(@result, $func);
+ }
+ } elsif ($main::opt_files) {
+ if ($fileline eq '??:0' || $fileline eq '') {
+ push(@result, "$a");
+ } else {
+ my $f = $fileline;
+ $f =~ s/:\d+$//;
+ push(@result, $f);
+ }
+ } else {
+ push(@result, $a);
+ last; # Do not print inlined info
+ }
+ }
+ }
+
+ # print join(",", @addrs), " => ", join(",", @result), "\n";
+ return @result;
+}
+
+# Generate percent string for a number and a total
+sub Percent {
+ my $num = shift;
+ my $tot = shift;
+ if ($tot != 0) {
+ return sprintf("%.1f%%", $num * 100.0 / $tot);
+ } else {
+ return ($num == 0) ? "nan" : (($num > 0) ? "+inf" : "-inf");
+ }
+}
+
+# Generate pretty-printed form of number
+sub Unparse {
+ my $num = shift;
+ if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') {
+ if ($main::opt_inuse_objects || $main::opt_alloc_objects) {
+ return sprintf("%d", $num);
+ } else {
+ if ($main::opt_show_bytes) {
+ return sprintf("%d", $num);
+ } else {
+ return sprintf("%.1f", $num / 1048576.0);
+ }
+ }
+ } elsif ($main::profile_type eq 'contention' && !$main::opt_contentions) {
+ return sprintf("%.3f", $num / 1e9); # Convert nanoseconds to seconds
+ } else {
+ return sprintf("%d", $num);
+ }
+}
+
+# Alternate pretty-printed form: 0 maps to "."
+sub UnparseAlt {
+ my $num = shift;
+ if ($num == 0) {
+ return ".";
+ } else {
+ return Unparse($num);
+ }
+}
+
+# Alternate pretty-printed form: 0 maps to ""
+sub HtmlPrintNumber {
+ my $num = shift;
+ if ($num == 0) {
+ return "";
+ } else {
+ return Unparse($num);
+ }
+}
+
+# Return output units
+sub Units {
+ if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') {
+ if ($main::opt_inuse_objects || $main::opt_alloc_objects) {
+ return "objects";
+ } else {
+ if ($main::opt_show_bytes) {
+ return "B";
+ } else {
+ return "MB";
+ }
+ }
+ } elsif ($main::profile_type eq 'contention' && !$main::opt_contentions) {
+ return "seconds";
+ } else {
+ return "samples";
+ }
+}
+
+##### Profile manipulation code #####
+
+# Generate flattened profile:
+# If count is charged to stack [a,b,c,d], in generated profile,
+# it will be charged to [a]
+sub FlatProfile {
+ my $profile = shift;
+ my $result = {};
+ foreach my $k (keys(%{$profile})) {
+ my $count = $profile->{$k};
+ my @addrs = split(/\n/, $k);
+ if ($#addrs >= 0) {
+ AddEntry($result, $addrs[0], $count);
+ }
+ }
+ return $result;
+}
+
+# Generate cumulative profile:
+# If count is charged to stack [a,b,c,d], in generated profile,
+# it will be charged to [a], [b], [c], [d]
+sub CumulativeProfile {
+ my $profile = shift;
+ my $result = {};
+ foreach my $k (keys(%{$profile})) {
+ my $count = $profile->{$k};
+ my @addrs = split(/\n/, $k);
+ foreach my $a (@addrs) {
+ AddEntry($result, $a, $count);
+ }
+ }
+ return $result;
+}
+
+# If the second-youngest PC on the stack is always the same, returns
+# that pc. Otherwise, returns undef.
+sub IsSecondPcAlwaysTheSame {
+ my $profile = shift;
+
+ my $second_pc = undef;
+ foreach my $k (keys(%{$profile})) {
+ my @addrs = split(/\n/, $k);
+ if ($#addrs < 1) {
+ return undef;
+ }
+ if (not defined $second_pc) {
+ $second_pc = $addrs[1];
+ } else {
+ if ($second_pc ne $addrs[1]) {
+ return undef;
+ }
+ }
+ }
+ return $second_pc;
+}
+
+sub ExtractSymbolLocation {
+ my $symbols = shift;
+ my $address = shift;
+ # 'addr2line' outputs "??:0" for unknown locations; we do the
+ # same to be consistent.
+ my $location = "??:0:unknown";
+ if (exists $symbols->{$address}) {
+ my $file = $symbols->{$address}->[1];
+ if ($file eq "?") {
+ $file = "??:0"
+ }
+ $location = $file . ":" . $symbols->{$address}->[0];
+ }
+ return $location;
+}
+
+# Extracts a graph of calls.
+sub ExtractCalls {
+ my $symbols = shift;
+ my $profile = shift;
+
+ my $calls = {};
+ while( my ($stack_trace, $count) = each %$profile ) {
+ my @address = split(/\n/, $stack_trace);
+ my $destination = ExtractSymbolLocation($symbols, $address[0]);
+ AddEntry($calls, $destination, $count);
+ for (my $i = 1; $i <= $#address; $i++) {
+ my $source = ExtractSymbolLocation($symbols, $address[$i]);
+ my $call = "$source -> $destination";
+ AddEntry($calls, $call, $count);
+ $destination = $source;
+ }
+ }
+
+ return $calls;
+}
+
+sub FilterFrames {
+ my $symbols = shift;
+ my $profile = shift;
+
+ if ($main::opt_retain eq '' && $main::opt_exclude eq '') {
+ return $profile;
+ }
+
+ my $result = {};
+ foreach my $k (keys(%{$profile})) {
+ my $count = $profile->{$k};
+ my @addrs = split(/\n/, $k);
+ my @path = ();
+ foreach my $a (@addrs) {
+ my $sym;
+ if (exists($symbols->{$a})) {
+ $sym = $symbols->{$a}->[0];
+ } else {
+ $sym = $a;
+ }
+ if ($main::opt_retain ne '' && $sym !~ m/$main::opt_retain/) {
+ next;
+ }
+ if ($main::opt_exclude ne '' && $sym =~ m/$main::opt_exclude/) {
+ next;
+ }
+ push(@path, $a);
+ }
+ if (scalar(@path) > 0) {
+ my $reduced_path = join("\n", @path);
+ AddEntry($result, $reduced_path, $count);
+ }
+ }
+
+ return $result;
+}
+
+sub RemoveUninterestingFrames {
+ my $symbols = shift;
+ my $profile = shift;
+
+ # List of function names to skip
+ my %skip = ();
+ my $skip_regexp = 'NOMATCH';
+ if ($main::profile_type eq 'heap' || $main::profile_type eq 'growth') {
+ foreach my $name ('@JEMALLOC_PREFIX@calloc',
+ 'cfree',
+ '@JEMALLOC_PREFIX@malloc',
+ 'newImpl',
+ 'void* newImpl',
+ '@JEMALLOC_PREFIX@free',
+ '@JEMALLOC_PREFIX@memalign',
+ '@JEMALLOC_PREFIX@posix_memalign',
+ '@JEMALLOC_PREFIX@aligned_alloc',
+ 'pvalloc',
+ '@JEMALLOC_PREFIX@valloc',
+ '@JEMALLOC_PREFIX@realloc',
+ '@JEMALLOC_PREFIX@mallocx',
+ '@JEMALLOC_PREFIX@rallocx',
+ '@JEMALLOC_PREFIX@xallocx',
+ '@JEMALLOC_PREFIX@dallocx',
+ '@JEMALLOC_PREFIX@sdallocx',
+ '@JEMALLOC_PREFIX@sdallocx_noflags',
+ 'tc_calloc',
+ 'tc_cfree',
+ 'tc_malloc',
+ 'tc_free',
+ 'tc_memalign',
+ 'tc_posix_memalign',
+ 'tc_pvalloc',
+ 'tc_valloc',
+ 'tc_realloc',
+ 'tc_new',
+ 'tc_delete',
+ 'tc_newarray',
+ 'tc_deletearray',
+ 'tc_new_nothrow',
+ 'tc_newarray_nothrow',
+ 'do_malloc',
+ '::do_malloc', # new name -- got moved to an unnamed ns
+ '::do_malloc_or_cpp_alloc',
+ 'DoSampledAllocation',
+ 'simple_alloc::allocate',
+ '__malloc_alloc_template::allocate',
+ '__builtin_delete',
+ '__builtin_new',
+ '__builtin_vec_delete',
+ '__builtin_vec_new',
+ 'operator new',
+ 'operator new[]',
+ # The entry to our memory-allocation routines on OS X
+ 'malloc_zone_malloc',
+ 'malloc_zone_calloc',
+ 'malloc_zone_valloc',
+ 'malloc_zone_realloc',
+ 'malloc_zone_memalign',
+ 'malloc_zone_free',
+ # These mark the beginning/end of our custom sections
+ '__start_google_malloc',
+ '__stop_google_malloc',
+ '__start_malloc_hook',
+ '__stop_malloc_hook') {
+ $skip{$name} = 1;
+ $skip{"_" . $name} = 1; # Mach (OS X) adds a _ prefix to everything
+ }
+ # TODO: Remove TCMalloc once everything has been
+ # moved into the tcmalloc:: namespace and we have flushed
+ # old code out of the system.
+ $skip_regexp = "TCMalloc|^tcmalloc::";
+ } elsif ($main::profile_type eq 'contention') {
+ foreach my $vname ('base::RecordLockProfileData',
+ 'base::SubmitMutexProfileData',
+ 'base::SubmitSpinLockProfileData',
+ 'Mutex::Unlock',
+ 'Mutex::UnlockSlow',
+ 'Mutex::ReaderUnlock',
+ 'MutexLock::~MutexLock',
+ 'SpinLock::Unlock',
+ 'SpinLock::SlowUnlock',
+ 'SpinLockHolder::~SpinLockHolder') {
+ $skip{$vname} = 1;
+ }
+ } elsif ($main::profile_type eq 'cpu') {
+ # Drop signal handlers used for CPU profile collection
+ # TODO(dpeng): this should not be necessary; it's taken
+ # care of by the general 2nd-pc mechanism below.
+ foreach my $name ('ProfileData::Add', # historical
+ 'ProfileData::prof_handler', # historical
+ 'CpuProfiler::prof_handler',
+ '__FRAME_END__',
+ '__pthread_sighandler',
+ '__restore') {
+ $skip{$name} = 1;
+ }
+ } else {
+ # Nothing skipped for unknown types
+ }
+
+ if ($main::profile_type eq 'cpu') {
+ # If all the second-youngest program counters are the same,
+ # this STRONGLY suggests that it is an artifact of measurement,
+ # i.e., stack frames pushed by the CPU profiler signal handler.
+ # Hence, we delete them.
+ # (The topmost PC is read from the signal structure, not from
+ # the stack, so it does not get involved.)
+ while (my $second_pc = IsSecondPcAlwaysTheSame($profile)) {
+ my $result = {};
+ my $func = '';
+ if (exists($symbols->{$second_pc})) {
+ $second_pc = $symbols->{$second_pc}->[0];
+ }
+ print STDERR "Removing $second_pc from all stack traces.\n";
+ foreach my $k (keys(%{$profile})) {
+ my $count = $profile->{$k};
+ my @addrs = split(/\n/, $k);
+ splice @addrs, 1, 1;
+ my $reduced_path = join("\n", @addrs);
+ AddEntry($result, $reduced_path, $count);
+ }
+ $profile = $result;
+ }
+ }
+
+ my $result = {};
+ foreach my $k (keys(%{$profile})) {
+ my $count = $profile->{$k};
+ my @addrs = split(/\n/, $k);
+ my @path = ();
+ foreach my $a (@addrs) {
+ if (exists($symbols->{$a})) {
+ my $func = $symbols->{$a}->[0];
+ if ($skip{$func} || ($func =~ m/$skip_regexp/)) {
+ # Throw away the portion of the backtrace seen so far, under the
+ # assumption that previous frames were for functions internal to the
+ # allocator.
+ @path = ();
+ next;
+ }
+ }
+ push(@path, $a);
+ }
+ my $reduced_path = join("\n", @path);
+ AddEntry($result, $reduced_path, $count);
+ }
+
+ $result = FilterFrames($symbols, $result);
+
+ return $result;
+}
+
+# Reduce profile to granularity given by user
+sub ReduceProfile {
+ my $symbols = shift;
+ my $profile = shift;
+ my $result = {};
+ my $fullname_to_shortname_map = {};
+ FillFullnameToShortnameMap($symbols, $fullname_to_shortname_map);
+ foreach my $k (keys(%{$profile})) {
+ my $count = $profile->{$k};
+ my @translated = TranslateStack($symbols, $fullname_to_shortname_map, $k);
+ my @path = ();
+ my %seen = ();
+ $seen{''} = 1; # So that empty keys are skipped
+ foreach my $e (@translated) {
+ # To avoid double-counting due to recursion, skip a stack-trace
+ # entry if it has already been seen
+ if (!$seen{$e}) {
+ $seen{$e} = 1;
+ push(@path, $e);
+ }
+ }
+ my $reduced_path = join("\n", @path);
+ AddEntry($result, $reduced_path, $count);
+ }
+ return $result;
+}
+
+# Does the specified symbol array match the regexp?
+sub SymbolMatches {
+ my $sym = shift;
+ my $re = shift;
+ if (defined($sym)) {
+ for (my $i = 0; $i < $#{$sym}; $i += 3) {
+ if ($sym->[$i] =~ m/$re/ || $sym->[$i+1] =~ m/$re/) {
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
+# Focus only on paths involving specified regexps
+sub FocusProfile {
+ my $symbols = shift;
+ my $profile = shift;
+ my $focus = shift;
+ my $result = {};
+ foreach my $k (keys(%{$profile})) {
+ my $count = $profile->{$k};
+ my @addrs = split(/\n/, $k);
+ foreach my $a (@addrs) {
+ # Reply if it matches either the address/shortname/fileline
+ if (($a =~ m/$focus/) || SymbolMatches($symbols->{$a}, $focus)) {
+ AddEntry($result, $k, $count);
+ last;
+ }
+ }
+ }
+ return $result;
+}
+
+# Focus only on paths not involving specified regexps
+sub IgnoreProfile {
+ my $symbols = shift;
+ my $profile = shift;
+ my $ignore = shift;
+ my $result = {};
+ foreach my $k (keys(%{$profile})) {
+ my $count = $profile->{$k};
+ my @addrs = split(/\n/, $k);
+ my $matched = 0;
+ foreach my $a (@addrs) {
+ # Reply if it matches either the address/shortname/fileline
+ if (($a =~ m/$ignore/) || SymbolMatches($symbols->{$a}, $ignore)) {
+ $matched = 1;
+ last;
+ }
+ }
+ if (!$matched) {
+ AddEntry($result, $k, $count);
+ }
+ }
+ return $result;
+}
+
+# Get total count in profile
+sub TotalProfile {
+ my $profile = shift;
+ my $result = 0;
+ foreach my $k (keys(%{$profile})) {
+ $result += $profile->{$k};
+ }
+ return $result;
+}
+
+# Add A to B
+sub AddProfile {
+ my $A = shift;
+ my $B = shift;
+
+ my $R = {};
+ # add all keys in A
+ foreach my $k (keys(%{$A})) {
+ my $v = $A->{$k};
+ AddEntry($R, $k, $v);
+ }
+ # add all keys in B
+ foreach my $k (keys(%{$B})) {
+ my $v = $B->{$k};
+ AddEntry($R, $k, $v);
+ }
+ return $R;
+}
+
+# Merges symbol maps
+sub MergeSymbols {
+ my $A = shift;
+ my $B = shift;
+
+ my $R = {};
+ foreach my $k (keys(%{$A})) {
+ $R->{$k} = $A->{$k};
+ }
+ if (defined($B)) {
+ foreach my $k (keys(%{$B})) {
+ $R->{$k} = $B->{$k};
+ }
+ }
+ return $R;
+}
+
+
+# Add A to B
+sub AddPcs {
+ my $A = shift;
+ my $B = shift;
+
+ my $R = {};
+ # add all keys in A
+ foreach my $k (keys(%{$A})) {
+ $R->{$k} = 1
+ }
+ # add all keys in B
+ foreach my $k (keys(%{$B})) {
+ $R->{$k} = 1
+ }
+ return $R;
+}
+
+# Subtract B from A
+sub SubtractProfile {
+ my $A = shift;
+ my $B = shift;
+
+ my $R = {};
+ foreach my $k (keys(%{$A})) {
+ my $v = $A->{$k} - GetEntry($B, $k);
+ if ($v < 0 && $main::opt_drop_negative) {
+ $v = 0;
+ }
+ AddEntry($R, $k, $v);
+ }
+ if (!$main::opt_drop_negative) {
+ # Take care of when subtracted profile has more entries
+ foreach my $k (keys(%{$B})) {
+ if (!exists($A->{$k})) {
+ AddEntry($R, $k, 0 - $B->{$k});
+ }
+ }
+ }
+ return $R;
+}
+
+# Get entry from profile; zero if not present
+sub GetEntry {
+ my $profile = shift;
+ my $k = shift;
+ if (exists($profile->{$k})) {
+ return $profile->{$k};
+ } else {
+ return 0;
+ }
+}
+
+# Add entry to specified profile
+sub AddEntry {
+ my $profile = shift;
+ my $k = shift;
+ my $n = shift;
+ if (!exists($profile->{$k})) {
+ $profile->{$k} = 0;
+ }
+ $profile->{$k} += $n;
+}
+
+# Add a stack of entries to specified profile, and add them to the $pcs
+# list.
+sub AddEntries {
+ my $profile = shift;
+ my $pcs = shift;
+ my $stack = shift;
+ my $count = shift;
+ my @k = ();
+
+ foreach my $e (split(/\s+/, $stack)) {
+ my $pc = HexExtend($e);
+ $pcs->{$pc} = 1;
+ push @k, $pc;
+ }
+ AddEntry($profile, (join "\n", @k), $count);
+}
+
+##### Code to profile a server dynamically #####
+
+sub CheckSymbolPage {
+ my $url = SymbolPageURL();
+ my $command = ShellEscape(@URL_FETCHER, $url);
+ open(SYMBOL, "$command |") or error($command);
+ my $line = <SYMBOL>;
+ $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines
+ close(SYMBOL);
+ unless (defined($line)) {
+ error("$url doesn't exist\n");
+ }
+
+ if ($line =~ /^num_symbols:\s+(\d+)$/) {
+ if ($1 == 0) {
+ error("Stripped binary. No symbols available.\n");
+ }
+ } else {
+ error("Failed to get the number of symbols from $url\n");
+ }
+}
+
+sub IsProfileURL {
+ my $profile_name = shift;
+ if (-f $profile_name) {
+ printf STDERR "Using local file $profile_name.\n";
+ return 0;
+ }
+ return 1;
+}
+
+sub ParseProfileURL {
+ my $profile_name = shift;
+
+ if (!defined($profile_name) || $profile_name eq "") {
+ return ();
+ }
+
+ # Split profile URL - matches all non-empty strings, so no test.
+ $profile_name =~ m,^(https?://)?([^/]+)(.*?)(/|$PROFILES)?$,;
+
+ my $proto = $1 || "http://";
+ my $hostport = $2;
+ my $prefix = $3;
+ my $profile = $4 || "/";
+
+ my $host = $hostport;
+ $host =~ s/:.*//;
+
+ my $baseurl = "$proto$hostport$prefix";
+ return ($host, $baseurl, $profile);
+}
+
+# We fetch symbols from the first profile argument.
+sub SymbolPageURL {
+ my ($host, $baseURL, $path) = ParseProfileURL($main::pfile_args[0]);
+ return "$baseURL$SYMBOL_PAGE";
+}
+
+sub FetchProgramName() {
+ my ($host, $baseURL, $path) = ParseProfileURL($main::pfile_args[0]);
+ my $url = "$baseURL$PROGRAM_NAME_PAGE";
+ my $command_line = ShellEscape(@URL_FETCHER, $url);
+ open(CMDLINE, "$command_line |") or error($command_line);
+ my $cmdline = <CMDLINE>;
+ $cmdline =~ s/\r//g; # turn windows-looking lines into unix-looking lines
+ close(CMDLINE);
+ error("Failed to get program name from $url\n") unless defined($cmdline);
+ $cmdline =~ s/\x00.+//; # Remove argv[1] and latters.
+ $cmdline =~ s!\n!!g; # Remove LFs.
+ return $cmdline;
+}
+
+# Gee, curl's -L (--location) option isn't reliable at least
+# with its 7.12.3 version. Curl will forget to post data if
+# there is a redirection. This function is a workaround for
+# curl. Redirection happens on borg hosts.
+sub ResolveRedirectionForCurl {
+ my $url = shift;
+ my $command_line = ShellEscape(@URL_FETCHER, "--head", $url);
+ open(CMDLINE, "$command_line |") or error($command_line);
+ while (<CMDLINE>) {
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+ if (/^Location: (.*)/) {
+ $url = $1;
+ }
+ }
+ close(CMDLINE);
+ return $url;
+}
+
+# Add a timeout flat to URL_FETCHER. Returns a new list.
+sub AddFetchTimeout {
+ my $timeout = shift;
+ my @fetcher = @_;
+ if (defined($timeout)) {
+ if (join(" ", @fetcher) =~ m/\bcurl -s/) {
+ push(@fetcher, "--max-time", sprintf("%d", $timeout));
+ } elsif (join(" ", @fetcher) =~ m/\brpcget\b/) {
+ push(@fetcher, sprintf("--deadline=%d", $timeout));
+ }
+ }
+ return @fetcher;
+}
+
+# Reads a symbol map from the file handle name given as $1, returning
+# the resulting symbol map. Also processes variables relating to symbols.
+# Currently, the only variable processed is 'binary=<value>' which updates
+# $main::prog to have the correct program name.
+sub ReadSymbols {
+ my $in = shift;
+ my $map = {};
+ while (<$in>) {
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+ # Removes all the leading zeroes from the symbols, see comment below.
+ if (m/^0x0*([0-9a-f]+)\s+(.+)/) {
+ $map->{$1} = $2;
+ } elsif (m/^---/) {
+ last;
+ } elsif (m/^([a-z][^=]*)=(.*)$/ ) {
+ my ($variable, $value) = ($1, $2);
+ for ($variable, $value) {
+ s/^\s+//;
+ s/\s+$//;
+ }
+ if ($variable eq "binary") {
+ if ($main::prog ne $UNKNOWN_BINARY && $main::prog ne $value) {
+ printf STDERR ("Warning: Mismatched binary name '%s', using '%s'.\n",
+ $main::prog, $value);
+ }
+ $main::prog = $value;
+ } else {
+ printf STDERR ("Ignoring unknown variable in symbols list: " .
+ "'%s' = '%s'\n", $variable, $value);
+ }
+ }
+ }
+ return $map;
+}
+
+sub URLEncode {
+ my $str = shift;
+ $str =~ s/([^A-Za-z0-9\-_.!~*'()])/ sprintf "%%%02x", ord $1 /eg;
+ return $str;
+}
+
+sub AppendSymbolFilterParams {
+ my $url = shift;
+ my @params = ();
+ if ($main::opt_retain ne '') {
+ push(@params, sprintf("retain=%s", URLEncode($main::opt_retain)));
+ }
+ if ($main::opt_exclude ne '') {
+ push(@params, sprintf("exclude=%s", URLEncode($main::opt_exclude)));
+ }
+ if (scalar @params > 0) {
+ $url = sprintf("%s?%s", $url, join("&", @params));
+ }
+ return $url;
+}
+
+# Fetches and processes symbols to prepare them for use in the profile output
+# code. If the optional 'symbol_map' arg is not given, fetches symbols from
+# $SYMBOL_PAGE for all PC values found in profile. Otherwise, the raw symbols
+# are assumed to have already been fetched into 'symbol_map' and are simply
+# extracted and processed.
+sub FetchSymbols {
+ my $pcset = shift;
+ my $symbol_map = shift;
+
+ my %seen = ();
+ my @pcs = grep { !$seen{$_}++ } keys(%$pcset); # uniq
+
+ if (!defined($symbol_map)) {
+ my $post_data = join("+", sort((map {"0x" . "$_"} @pcs)));
+
+ open(POSTFILE, ">$main::tmpfile_sym");
+ print POSTFILE $post_data;
+ close(POSTFILE);
+
+ my $url = SymbolPageURL();
+
+ my $command_line;
+ if (join(" ", @URL_FETCHER) =~ m/\bcurl -s/) {
+ $url = ResolveRedirectionForCurl($url);
+ $url = AppendSymbolFilterParams($url);
+ $command_line = ShellEscape(@URL_FETCHER, "-d", "\@$main::tmpfile_sym",
+ $url);
+ } else {
+ $url = AppendSymbolFilterParams($url);
+ $command_line = (ShellEscape(@URL_FETCHER, "--post", $url)
+ . " < " . ShellEscape($main::tmpfile_sym));
+ }
+ # We use c++filt in case $SYMBOL_PAGE gives us mangled symbols.
+ my $escaped_cppfilt = ShellEscape($obj_tool_map{"c++filt"});
+ open(SYMBOL, "$command_line | $escaped_cppfilt |") or error($command_line);
+ $symbol_map = ReadSymbols(*SYMBOL{IO});
+ close(SYMBOL);
+ }
+
+ my $symbols = {};
+ foreach my $pc (@pcs) {
+ my $fullname;
+ # For 64 bits binaries, symbols are extracted with 8 leading zeroes.
+ # Then /symbol reads the long symbols in as uint64, and outputs
+ # the result with a "0x%08llx" format which get rid of the zeroes.
+ # By removing all the leading zeroes in both $pc and the symbols from
+ # /symbol, the symbols match and are retrievable from the map.
+ my $shortpc = $pc;
+ $shortpc =~ s/^0*//;
+ # Each line may have a list of names, which includes the function
+ # and also other functions it has inlined. They are separated (in
+ # PrintSymbolizedProfile), by --, which is illegal in function names.
+ my $fullnames;
+ if (defined($symbol_map->{$shortpc})) {
+ $fullnames = $symbol_map->{$shortpc};
+ } else {
+ $fullnames = "0x" . $pc; # Just use addresses
+ }
+ my $sym = [];
+ $symbols->{$pc} = $sym;
+ foreach my $fullname (split("--", $fullnames)) {
+ my $name = ShortFunctionName($fullname);
+ push(@{$sym}, $name, "?", $fullname);
+ }
+ }
+ return $symbols;
+}
+
+sub BaseName {
+ my $file_name = shift;
+ $file_name =~ s!^.*/!!; # Remove directory name
+ return $file_name;
+}
+
+sub MakeProfileBaseName {
+ my ($binary_name, $profile_name) = @_;
+ my ($host, $baseURL, $path) = ParseProfileURL($profile_name);
+ my $binary_shortname = BaseName($binary_name);
+ return sprintf("%s.%s.%s",
+ $binary_shortname, $main::op_time, $host);
+}
+
+sub FetchDynamicProfile {
+ my $binary_name = shift;
+ my $profile_name = shift;
+ my $fetch_name_only = shift;
+ my $encourage_patience = shift;
+
+ if (!IsProfileURL($profile_name)) {
+ return $profile_name;
+ } else {
+ my ($host, $baseURL, $path) = ParseProfileURL($profile_name);
+ if ($path eq "" || $path eq "/") {
+ # Missing type specifier defaults to cpu-profile
+ $path = $PROFILE_PAGE;
+ }
+
+ my $profile_file = MakeProfileBaseName($binary_name, $profile_name);
+
+ my $url = "$baseURL$path";
+ my $fetch_timeout = undef;
+ if ($path =~ m/$PROFILE_PAGE|$PMUPROFILE_PAGE/) {
+ if ($path =~ m/[?]/) {
+ $url .= "&";
+ } else {
+ $url .= "?";
+ }
+ $url .= sprintf("seconds=%d", $main::opt_seconds);
+ $fetch_timeout = $main::opt_seconds * 1.01 + 60;
+ # Set $profile_type for consumption by PrintSymbolizedProfile.
+ $main::profile_type = 'cpu';
+ } else {
+ # For non-CPU profiles, we add a type-extension to
+ # the target profile file name.
+ my $suffix = $path;
+ $suffix =~ s,/,.,g;
+ $profile_file .= $suffix;
+ # Set $profile_type for consumption by PrintSymbolizedProfile.
+ if ($path =~ m/$HEAP_PAGE/) {
+ $main::profile_type = 'heap';
+ } elsif ($path =~ m/$GROWTH_PAGE/) {
+ $main::profile_type = 'growth';
+ } elsif ($path =~ m/$CONTENTION_PAGE/) {
+ $main::profile_type = 'contention';
+ }
+ }
+
+ my $profile_dir = $ENV{"JEPROF_TMPDIR"} || ($ENV{HOME} . "/jeprof");
+ if (! -d $profile_dir) {
+ mkdir($profile_dir)
+ || die("Unable to create profile directory $profile_dir: $!\n");
+ }
+ my $tmp_profile = "$profile_dir/.tmp.$profile_file";
+ my $real_profile = "$profile_dir/$profile_file";
+
+ if ($fetch_name_only > 0) {
+ return $real_profile;
+ }
+
+ my @fetcher = AddFetchTimeout($fetch_timeout, @URL_FETCHER);
+ my $cmd = ShellEscape(@fetcher, $url) . " > " . ShellEscape($tmp_profile);
+ if ($path =~ m/$PROFILE_PAGE|$PMUPROFILE_PAGE|$CENSUSPROFILE_PAGE/){
+ print STDERR "Gathering CPU profile from $url for $main::opt_seconds seconds to\n ${real_profile}\n";
+ if ($encourage_patience) {
+ print STDERR "Be patient...\n";
+ }
+ } else {
+ print STDERR "Fetching $path profile from $url to\n ${real_profile}\n";
+ }
+
+ (system($cmd) == 0) || error("Failed to get profile: $cmd: $!\n");
+ (system("mv", $tmp_profile, $real_profile) == 0) || error("Unable to rename profile\n");
+ print STDERR "Wrote profile to $real_profile\n";
+ $main::collected_profile = $real_profile;
+ return $main::collected_profile;
+ }
+}
+
+# Collect profiles in parallel
+sub FetchDynamicProfiles {
+ my $items = scalar(@main::pfile_args);
+ my $levels = log($items) / log(2);
+
+ if ($items == 1) {
+ $main::profile_files[0] = FetchDynamicProfile($main::prog, $main::pfile_args[0], 0, 1);
+ } else {
+ # math rounding issues
+ if ((2 ** $levels) < $items) {
+ $levels++;
+ }
+ my $count = scalar(@main::pfile_args);
+ for (my $i = 0; $i < $count; $i++) {
+ $main::profile_files[$i] = FetchDynamicProfile($main::prog, $main::pfile_args[$i], 1, 0);
+ }
+ print STDERR "Fetching $count profiles, Be patient...\n";
+ FetchDynamicProfilesRecurse($levels, 0, 0);
+ $main::collected_profile = join(" \\\n ", @main::profile_files);
+ }
+}
+
+# Recursively fork a process to get enough processes
+# collecting profiles
+sub FetchDynamicProfilesRecurse {
+ my $maxlevel = shift;
+ my $level = shift;
+ my $position = shift;
+
+ if (my $pid = fork()) {
+ $position = 0 | ($position << 1);
+ TryCollectProfile($maxlevel, $level, $position);
+ wait;
+ } else {
+ $position = 1 | ($position << 1);
+ TryCollectProfile($maxlevel, $level, $position);
+ cleanup();
+ exit(0);
+ }
+}
+
+# Collect a single profile
+sub TryCollectProfile {
+ my $maxlevel = shift;
+ my $level = shift;
+ my $position = shift;
+
+ if ($level >= ($maxlevel - 1)) {
+ if ($position < scalar(@main::pfile_args)) {
+ FetchDynamicProfile($main::prog, $main::pfile_args[$position], 0, 0);
+ }
+ } else {
+ FetchDynamicProfilesRecurse($maxlevel, $level+1, $position);
+ }
+}
+
+##### Parsing code #####
+
+# Provide a small streaming-read module to handle very large
+# cpu-profile files. Stream in chunks along a sliding window.
+# Provides an interface to get one 'slot', correctly handling
+# endian-ness differences. A slot is one 32-bit or 64-bit word
+# (depending on the input profile). We tell endianness and bit-size
+# for the profile by looking at the first 8 bytes: in cpu profiles,
+# the second slot is always 3 (we'll accept anything that's not 0).
+BEGIN {
+ package CpuProfileStream;
+
+ sub new {
+ my ($class, $file, $fname) = @_;
+ my $self = { file => $file,
+ base => 0,
+ stride => 512 * 1024, # must be a multiple of bitsize/8
+ slots => [],
+ unpack_code => "", # N for big-endian, V for little
+ perl_is_64bit => 1, # matters if profile is 64-bit
+ };
+ bless $self, $class;
+ # Let unittests adjust the stride
+ if ($main::opt_test_stride > 0) {
+ $self->{stride} = $main::opt_test_stride;
+ }
+ # Read the first two slots to figure out bitsize and endianness.
+ my $slots = $self->{slots};
+ my $str;
+ read($self->{file}, $str, 8);
+ # Set the global $address_length based on what we see here.
+ # 8 is 32-bit (8 hexadecimal chars); 16 is 64-bit (16 hexadecimal chars).
+ $address_length = ($str eq (chr(0)x8)) ? 16 : 8;
+ if ($address_length == 8) {
+ if (substr($str, 6, 2) eq chr(0)x2) {
+ $self->{unpack_code} = 'V'; # Little-endian.
+ } elsif (substr($str, 4, 2) eq chr(0)x2) {
+ $self->{unpack_code} = 'N'; # Big-endian
+ } else {
+ ::error("$fname: header size >= 2**16\n");
+ }
+ @$slots = unpack($self->{unpack_code} . "*", $str);
+ } else {
+ # If we're a 64-bit profile, check if we're a 64-bit-capable
+ # perl. Otherwise, each slot will be represented as a float
+ # instead of an int64, losing precision and making all the
+ # 64-bit addresses wrong. We won't complain yet, but will
+ # later if we ever see a value that doesn't fit in 32 bits.
+ my $has_q = 0;
+ eval { $has_q = pack("Q", "1") ? 1 : 1; };
+ if (!$has_q) {
+ $self->{perl_is_64bit} = 0;
+ }
+ read($self->{file}, $str, 8);
+ if (substr($str, 4, 4) eq chr(0)x4) {
+ # We'd love to use 'Q', but it's a) not universal, b) not endian-proof.
+ $self->{unpack_code} = 'V'; # Little-endian.
+ } elsif (substr($str, 0, 4) eq chr(0)x4) {
+ $self->{unpack_code} = 'N'; # Big-endian
+ } else {
+ ::error("$fname: header size >= 2**32\n");
+ }
+ my @pair = unpack($self->{unpack_code} . "*", $str);
+ # Since we know one of the pair is 0, it's fine to just add them.
+ @$slots = (0, $pair[0] + $pair[1]);
+ }
+ return $self;
+ }
+
+ # Load more data when we access slots->get(X) which is not yet in memory.
+ sub overflow {
+ my ($self) = @_;
+ my $slots = $self->{slots};
+ $self->{base} += $#$slots + 1; # skip over data we're replacing
+ my $str;
+ read($self->{file}, $str, $self->{stride});
+ if ($address_length == 8) { # the 32-bit case
+ # This is the easy case: unpack provides 32-bit unpacking primitives.
+ @$slots = unpack($self->{unpack_code} . "*", $str);
+ } else {
+ # We need to unpack 32 bits at a time and combine.
+ my @b32_values = unpack($self->{unpack_code} . "*", $str);
+ my @b64_values = ();
+ for (my $i = 0; $i < $#b32_values; $i += 2) {
+ # TODO(csilvers): if this is a 32-bit perl, the math below
+ # could end up in a too-large int, which perl will promote
+ # to a double, losing necessary precision. Deal with that.
+ # Right now, we just die.
+ my ($lo, $hi) = ($b32_values[$i], $b32_values[$i+1]);
+ if ($self->{unpack_code} eq 'N') { # big-endian
+ ($lo, $hi) = ($hi, $lo);
+ }
+ my $value = $lo + $hi * (2**32);
+ if (!$self->{perl_is_64bit} && # check value is exactly represented
+ (($value % (2**32)) != $lo || int($value / (2**32)) != $hi)) {
+ ::error("Need a 64-bit perl to process this 64-bit profile.\n");
+ }
+ push(@b64_values, $value);
+ }
+ @$slots = @b64_values;
+ }
+ }
+
+ # Access the i-th long in the file (logically), or -1 at EOF.
+ sub get {
+ my ($self, $idx) = @_;
+ my $slots = $self->{slots};
+ while ($#$slots >= 0) {
+ if ($idx < $self->{base}) {
+ # The only time we expect a reference to $slots[$i - something]
+ # after referencing $slots[$i] is reading the very first header.
+ # Since $stride > |header|, that shouldn't cause any lookback
+ # errors. And everything after the header is sequential.
+ print STDERR "Unexpected look-back reading CPU profile";
+ return -1; # shrug, don't know what better to return
+ } elsif ($idx > $self->{base} + $#$slots) {
+ $self->overflow();
+ } else {
+ return $slots->[$idx - $self->{base}];
+ }
+ }
+ # If we get here, $slots is [], which means we've reached EOF
+ return -1; # unique since slots is supposed to hold unsigned numbers
+ }
+}
+
+# Reads the top, 'header' section of a profile, and returns the last
+# line of the header, commonly called a 'header line'. The header
+# section of a profile consists of zero or more 'command' lines that
+# are instructions to jeprof, which jeprof executes when reading the
+# header. All 'command' lines start with a %. After the command
+# lines is the 'header line', which is a profile-specific line that
+# indicates what type of profile it is, and perhaps other global
+# information about the profile. For instance, here's a header line
+# for a heap profile:
+# heap profile: 53: 38236 [ 5525: 1284029] @ heapprofile
+# For historical reasons, the CPU profile does not contain a text-
+# readable header line. If the profile looks like a CPU profile,
+# this function returns "". If no header line could be found, this
+# function returns undef.
+#
+# The following commands are recognized:
+# %warn -- emit the rest of this line to stderr, prefixed by 'WARNING:'
+#
+# The input file should be in binmode.
+sub ReadProfileHeader {
+ local *PROFILE = shift;
+ my $firstchar = "";
+ my $line = "";
+ read(PROFILE, $firstchar, 1);
+ seek(PROFILE, -1, 1); # unread the firstchar
+ if ($firstchar !~ /[[:print:]]/) { # is not a text character
+ return "";
+ }
+ while (defined($line = <PROFILE>)) {
+ $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines
+ if ($line =~ /^%warn\s+(.*)/) { # 'warn' command
+ # Note this matches both '%warn blah\n' and '%warn\n'.
+ print STDERR "WARNING: $1\n"; # print the rest of the line
+ } elsif ($line =~ /^%/) {
+ print STDERR "Ignoring unknown command from profile header: $line";
+ } else {
+ # End of commands, must be the header line.
+ return $line;
+ }
+ }
+ return undef; # got to EOF without seeing a header line
+}
+
+sub IsSymbolizedProfileFile {
+ my $file_name = shift;
+ if (!(-e $file_name) || !(-r $file_name)) {
+ return 0;
+ }
+ # Check if the file contains a symbol-section marker.
+ open(TFILE, "<$file_name");
+ binmode TFILE;
+ my $firstline = ReadProfileHeader(*TFILE);
+ close(TFILE);
+ if (!$firstline) {
+ return 0;
+ }
+ $SYMBOL_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ my $symbol_marker = $&;
+ return $firstline =~ /^--- *$symbol_marker/;
+}
+
+# Parse profile generated by common/profiler.cc and return a reference
+# to a map:
+# $result->{version} Version number of profile file
+# $result->{period} Sampling period (in microseconds)
+# $result->{profile} Profile object
+# $result->{threads} Map of thread IDs to profile objects
+# $result->{map} Memory map info from profile
+# $result->{pcs} Hash of all PC values seen, key is hex address
+sub ReadProfile {
+ my $prog = shift;
+ my $fname = shift;
+ my $result; # return value
+
+ $CONTENTION_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ my $contention_marker = $&;
+ $GROWTH_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ my $growth_marker = $&;
+ $SYMBOL_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ my $symbol_marker = $&;
+ $PROFILE_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ my $profile_marker = $&;
+ $HEAP_PAGE =~ m,[^/]+$,; # matches everything after the last slash
+ my $heap_marker = $&;
+
+ # Look at first line to see if it is a heap or a CPU profile.
+ # CPU profile may start with no header at all, and just binary data
+ # (starting with \0\0\0\0) -- in that case, don't try to read the
+ # whole firstline, since it may be gigabytes(!) of data.
+ open(PROFILE, "<$fname") || error("$fname: $!\n");
+ binmode PROFILE; # New perls do UTF-8 processing
+ my $header = ReadProfileHeader(*PROFILE);
+ if (!defined($header)) { # means "at EOF"
+ error("Profile is empty.\n");
+ }
+
+ my $symbols;
+ if ($header =~ m/^--- *$symbol_marker/o) {
+ # Verify that the user asked for a symbolized profile
+ if (!$main::use_symbolized_profile) {
+ # we have both a binary and symbolized profiles, abort
+ error("FATAL ERROR: Symbolized profile\n $fname\ncannot be used with " .
+ "a binary arg. Try again without passing\n $prog\n");
+ }
+ # Read the symbol section of the symbolized profile file.
+ $symbols = ReadSymbols(*PROFILE{IO});
+ # Read the next line to get the header for the remaining profile.
+ $header = ReadProfileHeader(*PROFILE) || "";
+ }
+
+ if ($header =~ m/^--- *($heap_marker|$growth_marker)/o) {
+ # Skip "--- ..." line for profile types that have their own headers.
+ $header = ReadProfileHeader(*PROFILE) || "";
+ }
+
+ $main::profile_type = '';
+
+ if ($header =~ m/^heap profile:.*$growth_marker/o) {
+ $main::profile_type = 'growth';
+ $result = ReadHeapProfile($prog, *PROFILE, $header);
+ } elsif ($header =~ m/^heap profile:/) {
+ $main::profile_type = 'heap';
+ $result = ReadHeapProfile($prog, *PROFILE, $header);
+ } elsif ($header =~ m/^heap/) {
+ $main::profile_type = 'heap';
+ $result = ReadThreadedHeapProfile($prog, $fname, $header);
+ } elsif ($header =~ m/^--- *$contention_marker/o) {
+ $main::profile_type = 'contention';
+ $result = ReadSynchProfile($prog, *PROFILE);
+ } elsif ($header =~ m/^--- *Stacks:/) {
+ print STDERR
+ "Old format contention profile: mistakenly reports " .
+ "condition variable signals as lock contentions.\n";
+ $main::profile_type = 'contention';
+ $result = ReadSynchProfile($prog, *PROFILE);
+ } elsif ($header =~ m/^--- *$profile_marker/) {
+ # the binary cpu profile data starts immediately after this line
+ $main::profile_type = 'cpu';
+ $result = ReadCPUProfile($prog, $fname, *PROFILE);
+ } else {
+ if (defined($symbols)) {
+ # a symbolized profile contains a format we don't recognize, bail out
+ error("$fname: Cannot recognize profile section after symbols.\n");
+ }
+ # no ascii header present -- must be a CPU profile
+ $main::profile_type = 'cpu';
+ $result = ReadCPUProfile($prog, $fname, *PROFILE);
+ }
+
+ close(PROFILE);
+
+ # if we got symbols along with the profile, return those as well
+ if (defined($symbols)) {
+ $result->{symbols} = $symbols;
+ }
+
+ return $result;
+}
+
+# Subtract one from caller pc so we map back to call instr.
+# However, don't do this if we're reading a symbolized profile
+# file, in which case the subtract-one was done when the file
+# was written.
+#
+# We apply the same logic to all readers, though ReadCPUProfile uses an
+# independent implementation.
+sub FixCallerAddresses {
+ my $stack = shift;
+ # --raw/http: Always subtract one from pc's, because PrintSymbolizedProfile()
+ # dumps unadjusted profiles.
+ {
+ $stack =~ /(\s)/;
+ my $delimiter = $1;
+ my @addrs = split(' ', $stack);
+ my @fixedaddrs;
+ $#fixedaddrs = $#addrs;
+ if ($#addrs >= 0) {
+ $fixedaddrs[0] = $addrs[0];
+ }
+ for (my $i = 1; $i <= $#addrs; $i++) {
+ $fixedaddrs[$i] = AddressSub($addrs[$i], "0x1");
+ }
+ return join $delimiter, @fixedaddrs;
+ }
+}
+
+# CPU profile reader
+sub ReadCPUProfile {
+ my $prog = shift;
+ my $fname = shift; # just used for logging
+ local *PROFILE = shift;
+ my $version;
+ my $period;
+ my $i;
+ my $profile = {};
+ my $pcs = {};
+
+ # Parse string into array of slots.
+ my $slots = CpuProfileStream->new(*PROFILE, $fname);
+
+ # Read header. The current header version is a 5-element structure
+ # containing:
+ # 0: header count (always 0)
+ # 1: header "words" (after this one: 3)
+ # 2: format version (0)
+ # 3: sampling period (usec)
+ # 4: unused padding (always 0)
+ if ($slots->get(0) != 0 ) {
+ error("$fname: not a profile file, or old format profile file\n");
+ }
+ $i = 2 + $slots->get(1);
+ $version = $slots->get(2);
+ $period = $slots->get(3);
+ # Do some sanity checking on these header values.
+ if ($version > (2**32) || $period > (2**32) || $i > (2**32) || $i < 5) {
+ error("$fname: not a profile file, or corrupted profile file\n");
+ }
+
+ # Parse profile
+ while ($slots->get($i) != -1) {
+ my $n = $slots->get($i++);
+ my $d = $slots->get($i++);
+ if ($d > (2**16)) { # TODO(csilvers): what's a reasonable max-stack-depth?
+ my $addr = sprintf("0%o", $i * ($address_length == 8 ? 4 : 8));
+ print STDERR "At index $i (address $addr):\n";
+ error("$fname: stack trace depth >= 2**32\n");
+ }
+ if ($slots->get($i) == 0) {
+ # End of profile data marker
+ $i += $d;
+ last;
+ }
+
+ # Make key out of the stack entries
+ my @k = ();
+ for (my $j = 0; $j < $d; $j++) {
+ my $pc = $slots->get($i+$j);
+ # Subtract one from caller pc so we map back to call instr.
+ $pc--;
+ $pc = sprintf("%0*x", $address_length, $pc);
+ $pcs->{$pc} = 1;
+ push @k, $pc;
+ }
+
+ AddEntry($profile, (join "\n", @k), $n);
+ $i += $d;
+ }
+
+ # Parse map
+ my $map = '';
+ seek(PROFILE, $i * 4, 0);
+ read(PROFILE, $map, (stat PROFILE)[7]);
+
+ my $r = {};
+ $r->{version} = $version;
+ $r->{period} = $period;
+ $r->{profile} = $profile;
+ $r->{libs} = ParseLibraries($prog, $map, $pcs);
+ $r->{pcs} = $pcs;
+
+ return $r;
+}
+
+sub HeapProfileIndex {
+ my $index = 1;
+ if ($main::opt_inuse_space) {
+ $index = 1;
+ } elsif ($main::opt_inuse_objects) {
+ $index = 0;
+ } elsif ($main::opt_alloc_space) {
+ $index = 3;
+ } elsif ($main::opt_alloc_objects) {
+ $index = 2;
+ }
+ return $index;
+}
+
+sub ReadMappedLibraries {
+ my $fh = shift;
+ my $map = "";
+ # Read the /proc/self/maps data
+ while (<$fh>) {
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+ $map .= $_;
+ }
+ return $map;
+}
+
+sub ReadMemoryMap {
+ my $fh = shift;
+ my $map = "";
+ # Read /proc/self/maps data as formatted by DumpAddressMap()
+ my $buildvar = "";
+ while (<PROFILE>) {
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+ # Parse "build=<dir>" specification if supplied
+ if (m/^\s*build=(.*)\n/) {
+ $buildvar = $1;
+ }
+
+ # Expand "$build" variable if available
+ $_ =~ s/\$build\b/$buildvar/g;
+
+ $map .= $_;
+ }
+ return $map;
+}
+
+sub AdjustSamples {
+ my ($sample_adjustment, $sampling_algorithm, $n1, $s1, $n2, $s2) = @_;
+ if ($sample_adjustment) {
+ if ($sampling_algorithm == 2) {
+ # Remote-heap version 2
+ # The sampling frequency is the rate of a Poisson process.
+ # This means that the probability of sampling an allocation of
+ # size X with sampling rate Y is 1 - exp(-X/Y)
+ if ($n1 != 0) {
+ my $ratio = (($s1*1.0)/$n1)/($sample_adjustment);
+ my $scale_factor = 1/(1 - exp(-$ratio));
+ $n1 *= $scale_factor;
+ $s1 *= $scale_factor;
+ }
+ if ($n2 != 0) {
+ my $ratio = (($s2*1.0)/$n2)/($sample_adjustment);
+ my $scale_factor = 1/(1 - exp(-$ratio));
+ $n2 *= $scale_factor;
+ $s2 *= $scale_factor;
+ }
+ } else {
+ # Remote-heap version 1
+ my $ratio;
+ $ratio = (($s1*1.0)/$n1)/($sample_adjustment);
+ if ($ratio < 1) {
+ $n1 /= $ratio;
+ $s1 /= $ratio;
+ }
+ $ratio = (($s2*1.0)/$n2)/($sample_adjustment);
+ if ($ratio < 1) {
+ $n2 /= $ratio;
+ $s2 /= $ratio;
+ }
+ }
+ }
+ return ($n1, $s1, $n2, $s2);
+}
+
+sub ReadHeapProfile {
+ my $prog = shift;
+ local *PROFILE = shift;
+ my $header = shift;
+
+ my $index = HeapProfileIndex();
+
+ # Find the type of this profile. The header line looks like:
+ # heap profile: 1246: 8800744 [ 1246: 8800744] @ <heap-url>/266053
+ # There are two pairs <count: size>, the first inuse objects/space, and the
+ # second allocated objects/space. This is followed optionally by a profile
+ # type, and if that is present, optionally by a sampling frequency.
+ # For remote heap profiles (v1):
+ # The interpretation of the sampling frequency is that the profiler, for
+ # each sample, calculates a uniformly distributed random integer less than
+ # the given value, and records the next sample after that many bytes have
+ # been allocated. Therefore, the expected sample interval is half of the
+ # given frequency. By default, if not specified, the expected sample
+ # interval is 128KB. Only remote-heap-page profiles are adjusted for
+ # sample size.
+ # For remote heap profiles (v2):
+ # The sampling frequency is the rate of a Poisson process. This means that
+ # the probability of sampling an allocation of size X with sampling rate Y
+ # is 1 - exp(-X/Y)
+ # For version 2, a typical header line might look like this:
+ # heap profile: 1922: 127792360 [ 1922: 127792360] @ <heap-url>_v2/524288
+ # the trailing number (524288) is the sampling rate. (Version 1 showed
+ # double the 'rate' here)
+ my $sampling_algorithm = 0;
+ my $sample_adjustment = 0;
+ chomp($header);
+ my $type = "unknown";
+ if ($header =~ m"^heap profile:\s*(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\](\s*@\s*([^/]*)(/(\d+))?)?") {
+ if (defined($6) && ($6 ne '')) {
+ $type = $6;
+ my $sample_period = $8;
+ # $type is "heapprofile" for profiles generated by the
+ # heap-profiler, and either "heap" or "heap_v2" for profiles
+ # generated by sampling directly within tcmalloc. It can also
+ # be "growth" for heap-growth profiles. The first is typically
+ # found for profiles generated locally, and the others for
+ # remote profiles.
+ if (($type eq "heapprofile") || ($type !~ /heap/) ) {
+ # No need to adjust for the sampling rate with heap-profiler-derived data
+ $sampling_algorithm = 0;
+ } elsif ($type =~ /_v2/) {
+ $sampling_algorithm = 2; # version 2 sampling
+ if (defined($sample_period) && ($sample_period ne '')) {
+ $sample_adjustment = int($sample_period);
+ }
+ } else {
+ $sampling_algorithm = 1; # version 1 sampling
+ if (defined($sample_period) && ($sample_period ne '')) {
+ $sample_adjustment = int($sample_period)/2;
+ }
+ }
+ } else {
+ # We detect whether or not this is a remote-heap profile by checking
+ # that the total-allocated stats ($n2,$s2) are exactly the
+ # same as the in-use stats ($n1,$s1). It is remotely conceivable
+ # that a non-remote-heap profile may pass this check, but it is hard
+ # to imagine how that could happen.
+ # In this case it's so old it's guaranteed to be remote-heap version 1.
+ my ($n1, $s1, $n2, $s2) = ($1, $2, $3, $4);
+ if (($n1 == $n2) && ($s1 == $s2)) {
+ # This is likely to be a remote-heap based sample profile
+ $sampling_algorithm = 1;
+ }
+ }
+ }
+
+ if ($sampling_algorithm > 0) {
+ # For remote-heap generated profiles, adjust the counts and sizes to
+ # account for the sample rate (we sample once every 128KB by default).
+ if ($sample_adjustment == 0) {
+ # Turn on profile adjustment.
+ $sample_adjustment = 128*1024;
+ print STDERR "Adjusting heap profiles for 1-in-128KB sampling rate\n";
+ } else {
+ printf STDERR ("Adjusting heap profiles for 1-in-%d sampling rate\n",
+ $sample_adjustment);
+ }
+ if ($sampling_algorithm > 1) {
+ # We don't bother printing anything for the original version (version 1)
+ printf STDERR "Heap version $sampling_algorithm\n";
+ }
+ }
+
+ my $profile = {};
+ my $pcs = {};
+ my $map = "";
+
+ while (<PROFILE>) {
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+ if (/^MAPPED_LIBRARIES:/) {
+ $map .= ReadMappedLibraries(*PROFILE);
+ last;
+ }
+
+ if (/^--- Memory map:/) {
+ $map .= ReadMemoryMap(*PROFILE);
+ last;
+ }
+
+ # Read entry of the form:
+ # <count1>: <bytes1> [<count2>: <bytes2>] @ a1 a2 a3 ... an
+ s/^\s*//;
+ s/\s*$//;
+ if (m/^\s*(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\]\s+@\s+(.*)$/) {
+ my $stack = $5;
+ my ($n1, $s1, $n2, $s2) = ($1, $2, $3, $4);
+ my @counts = AdjustSamples($sample_adjustment, $sampling_algorithm,
+ $n1, $s1, $n2, $s2);
+ AddEntries($profile, $pcs, FixCallerAddresses($stack), $counts[$index]);
+ }
+ }
+
+ my $r = {};
+ $r->{version} = "heap";
+ $r->{period} = 1;
+ $r->{profile} = $profile;
+ $r->{libs} = ParseLibraries($prog, $map, $pcs);
+ $r->{pcs} = $pcs;
+ return $r;
+}
+
+sub ReadThreadedHeapProfile {
+ my ($prog, $fname, $header) = @_;
+
+ my $index = HeapProfileIndex();
+ my $sampling_algorithm = 0;
+ my $sample_adjustment = 0;
+ chomp($header);
+ my $type = "unknown";
+ # Assuming a very specific type of header for now.
+ if ($header =~ m"^heap_v2/(\d+)") {
+ $type = "_v2";
+ $sampling_algorithm = 2;
+ $sample_adjustment = int($1);
+ }
+ if ($type ne "_v2" || !defined($sample_adjustment)) {
+ die "Threaded heap profiles require v2 sampling with a sample rate\n";
+ }
+
+ my $profile = {};
+ my $thread_profiles = {};
+ my $pcs = {};
+ my $map = "";
+ my $stack = "";
+
+ while (<PROFILE>) {
+ s/\r//g;
+ if (/^MAPPED_LIBRARIES:/) {
+ $map .= ReadMappedLibraries(*PROFILE);
+ last;
+ }
+
+ if (/^--- Memory map:/) {
+ $map .= ReadMemoryMap(*PROFILE);
+ last;
+ }
+
+ # Read entry of the form:
+ # @ a1 a2 ... an
+ # t*: <count1>: <bytes1> [<count2>: <bytes2>]
+ # t1: <count1>: <bytes1> [<count2>: <bytes2>]
+ # ...
+ # tn: <count1>: <bytes1> [<count2>: <bytes2>]
+ s/^\s*//;
+ s/\s*$//;
+ if (m/^@\s+(.*)$/) {
+ $stack = $1;
+ } elsif (m/^\s*(t(\*|\d+)):\s+(\d+):\s+(\d+)\s+\[\s*(\d+):\s+(\d+)\]$/) {
+ if ($stack eq "") {
+ # Still in the header, so this is just a per-thread summary.
+ next;
+ }
+ my $thread = $2;
+ my ($n1, $s1, $n2, $s2) = ($3, $4, $5, $6);
+ my @counts = AdjustSamples($sample_adjustment, $sampling_algorithm,
+ $n1, $s1, $n2, $s2);
+ if ($thread eq "*") {
+ AddEntries($profile, $pcs, FixCallerAddresses($stack), $counts[$index]);
+ } else {
+ if (!exists($thread_profiles->{$thread})) {
+ $thread_profiles->{$thread} = {};
+ }
+ AddEntries($thread_profiles->{$thread}, $pcs,
+ FixCallerAddresses($stack), $counts[$index]);
+ }
+ }
+ }
+
+ my $r = {};
+ $r->{version} = "heap";
+ $r->{period} = 1;
+ $r->{profile} = $profile;
+ $r->{threads} = $thread_profiles;
+ $r->{libs} = ParseLibraries($prog, $map, $pcs);
+ $r->{pcs} = $pcs;
+ return $r;
+}
+
+sub ReadSynchProfile {
+ my $prog = shift;
+ local *PROFILE = shift;
+ my $header = shift;
+
+ my $map = '';
+ my $profile = {};
+ my $pcs = {};
+ my $sampling_period = 1;
+ my $cyclespernanosec = 2.8; # Default assumption for old binaries
+ my $seen_clockrate = 0;
+ my $line;
+
+ my $index = 0;
+ if ($main::opt_total_delay) {
+ $index = 0;
+ } elsif ($main::opt_contentions) {
+ $index = 1;
+ } elsif ($main::opt_mean_delay) {
+ $index = 2;
+ }
+
+ while ( $line = <PROFILE> ) {
+ $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines
+ if ( $line =~ /^\s*(\d+)\s+(\d+) \@\s*(.*?)\s*$/ ) {
+ my ($cycles, $count, $stack) = ($1, $2, $3);
+
+ # Convert cycles to nanoseconds
+ $cycles /= $cyclespernanosec;
+
+ # Adjust for sampling done by application
+ $cycles *= $sampling_period;
+ $count *= $sampling_period;
+
+ my @values = ($cycles, $count, $cycles / $count);
+ AddEntries($profile, $pcs, FixCallerAddresses($stack), $values[$index]);
+
+ } elsif ( $line =~ /^(slow release).*thread \d+ \@\s*(.*?)\s*$/ ||
+ $line =~ /^\s*(\d+) \@\s*(.*?)\s*$/ ) {
+ my ($cycles, $stack) = ($1, $2);
+ if ($cycles !~ /^\d+$/) {
+ next;
+ }
+
+ # Convert cycles to nanoseconds
+ $cycles /= $cyclespernanosec;
+
+ # Adjust for sampling done by application
+ $cycles *= $sampling_period;
+
+ AddEntries($profile, $pcs, FixCallerAddresses($stack), $cycles);
+
+ } elsif ( $line =~ m/^([a-z][^=]*)=(.*)$/ ) {
+ my ($variable, $value) = ($1,$2);
+ for ($variable, $value) {
+ s/^\s+//;
+ s/\s+$//;
+ }
+ if ($variable eq "cycles/second") {
+ $cyclespernanosec = $value / 1e9;
+ $seen_clockrate = 1;
+ } elsif ($variable eq "sampling period") {
+ $sampling_period = $value;
+ } elsif ($variable eq "ms since reset") {
+ # Currently nothing is done with this value in jeprof
+ # So we just silently ignore it for now
+ } elsif ($variable eq "discarded samples") {
+ # Currently nothing is done with this value in jeprof
+ # So we just silently ignore it for now
+ } else {
+ printf STDERR ("Ignoring unnknown variable in /contention output: " .
+ "'%s' = '%s'\n",$variable,$value);
+ }
+ } else {
+ # Memory map entry
+ $map .= $line;
+ }
+ }
+
+ if (!$seen_clockrate) {
+ printf STDERR ("No cycles/second entry in profile; Guessing %.1f GHz\n",
+ $cyclespernanosec);
+ }
+
+ my $r = {};
+ $r->{version} = 0;
+ $r->{period} = $sampling_period;
+ $r->{profile} = $profile;
+ $r->{libs} = ParseLibraries($prog, $map, $pcs);
+ $r->{pcs} = $pcs;
+ return $r;
+}
+
+# Given a hex value in the form "0x1abcd" or "1abcd", return either
+# "0001abcd" or "000000000001abcd", depending on the current (global)
+# address length.
+sub HexExtend {
+ my $addr = shift;
+
+ $addr =~ s/^(0x)?0*//;
+ my $zeros_needed = $address_length - length($addr);
+ if ($zeros_needed < 0) {
+ printf STDERR "Warning: address $addr is longer than address length $address_length\n";
+ return $addr;
+ }
+ return ("0" x $zeros_needed) . $addr;
+}
+
+##### Symbol extraction #####
+
+# Aggressively search the lib_prefix values for the given library
+# If all else fails, just return the name of the library unmodified.
+# If the lib_prefix is "/my/path,/other/path" and $file is "/lib/dir/mylib.so"
+# it will search the following locations in this order, until it finds a file:
+# /my/path/lib/dir/mylib.so
+# /other/path/lib/dir/mylib.so
+# /my/path/dir/mylib.so
+# /other/path/dir/mylib.so
+# /my/path/mylib.so
+# /other/path/mylib.so
+# /lib/dir/mylib.so (returned as last resort)
+sub FindLibrary {
+ my $file = shift;
+ my $suffix = $file;
+
+ # Search for the library as described above
+ do {
+ foreach my $prefix (@prefix_list) {
+ my $fullpath = $prefix . $suffix;
+ if (-e $fullpath) {
+ return $fullpath;
+ }
+ }
+ } while ($suffix =~ s|^/[^/]+/|/|);
+ return $file;
+}
+
+# Return path to library with debugging symbols.
+# For libc libraries, the copy in /usr/lib/debug contains debugging symbols
+sub DebuggingLibrary {
+ my $file = shift;
+ if ($file =~ m|^/|) {
+ if (-f "/usr/lib/debug$file") {
+ return "/usr/lib/debug$file";
+ } elsif (-f "/usr/lib/debug$file.debug") {
+ return "/usr/lib/debug$file.debug";
+ }
+ }
+ return undef;
+}
+
+# Parse text section header of a library using objdump
+sub ParseTextSectionHeaderFromObjdump {
+ my $lib = shift;
+
+ my $size = undef;
+ my $vma;
+ my $file_offset;
+ # Get objdump output from the library file to figure out how to
+ # map between mapped addresses and addresses in the library.
+ my $cmd = ShellEscape($obj_tool_map{"objdump"}, "-h", $lib);
+ open(OBJDUMP, "$cmd |") || error("$cmd: $!\n");
+ while (<OBJDUMP>) {
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+ # Idx Name Size VMA LMA File off Algn
+ # 10 .text 00104b2c 420156f0 420156f0 000156f0 2**4
+ # For 64-bit objects, VMA and LMA will be 16 hex digits, size and file
+ # offset may still be 8. But AddressSub below will still handle that.
+ my @x = split;
+ if (($#x >= 6) && ($x[1] eq '.text')) {
+ $size = $x[2];
+ $vma = $x[3];
+ $file_offset = $x[5];
+ last;
+ }
+ }
+ close(OBJDUMP);
+
+ if (!defined($size)) {
+ return undef;
+ }
+
+ my $r = {};
+ $r->{size} = $size;
+ $r->{vma} = $vma;
+ $r->{file_offset} = $file_offset;
+
+ return $r;
+}
+
+# Parse text section header of a library using otool (on OS X)
+sub ParseTextSectionHeaderFromOtool {
+ my $lib = shift;
+
+ my $size = undef;
+ my $vma = undef;
+ my $file_offset = undef;
+ # Get otool output from the library file to figure out how to
+ # map between mapped addresses and addresses in the library.
+ my $command = ShellEscape($obj_tool_map{"otool"}, "-l", $lib);
+ open(OTOOL, "$command |") || error("$command: $!\n");
+ my $cmd = "";
+ my $sectname = "";
+ my $segname = "";
+ foreach my $line (<OTOOL>) {
+ $line =~ s/\r//g; # turn windows-looking lines into unix-looking lines
+ # Load command <#>
+ # cmd LC_SEGMENT
+ # [...]
+ # Section
+ # sectname __text
+ # segname __TEXT
+ # addr 0x000009f8
+ # size 0x00018b9e
+ # offset 2552
+ # align 2^2 (4)
+ # We will need to strip off the leading 0x from the hex addresses,
+ # and convert the offset into hex.
+ if ($line =~ /Load command/) {
+ $cmd = "";
+ $sectname = "";
+ $segname = "";
+ } elsif ($line =~ /Section/) {
+ $sectname = "";
+ $segname = "";
+ } elsif ($line =~ /cmd (\w+)/) {
+ $cmd = $1;
+ } elsif ($line =~ /sectname (\w+)/) {
+ $sectname = $1;
+ } elsif ($line =~ /segname (\w+)/) {
+ $segname = $1;
+ } elsif (!(($cmd eq "LC_SEGMENT" || $cmd eq "LC_SEGMENT_64") &&
+ $sectname eq "__text" &&
+ $segname eq "__TEXT")) {
+ next;
+ } elsif ($line =~ /\baddr 0x([0-9a-fA-F]+)/) {
+ $vma = $1;
+ } elsif ($line =~ /\bsize 0x([0-9a-fA-F]+)/) {
+ $size = $1;
+ } elsif ($line =~ /\boffset ([0-9]+)/) {
+ $file_offset = sprintf("%016x", $1);
+ }
+ if (defined($vma) && defined($size) && defined($file_offset)) {
+ last;
+ }
+ }
+ close(OTOOL);
+
+ if (!defined($vma) || !defined($size) || !defined($file_offset)) {
+ return undef;
+ }
+
+ my $r = {};
+ $r->{size} = $size;
+ $r->{vma} = $vma;
+ $r->{file_offset} = $file_offset;
+
+ return $r;
+}
+
+sub ParseTextSectionHeader {
+ # obj_tool_map("otool") is only defined if we're in a Mach-O environment
+ if (defined($obj_tool_map{"otool"})) {
+ my $r = ParseTextSectionHeaderFromOtool(@_);
+ if (defined($r)){
+ return $r;
+ }
+ }
+ # If otool doesn't work, or we don't have it, fall back to objdump
+ return ParseTextSectionHeaderFromObjdump(@_);
+}
+
+# Split /proc/pid/maps dump into a list of libraries
+sub ParseLibraries {
+ return if $main::use_symbol_page; # We don't need libraries info.
+ my $prog = Cwd::abs_path(shift);
+ my $map = shift;
+ my $pcs = shift;
+
+ my $result = [];
+ my $h = "[a-f0-9]+";
+ my $zero_offset = HexExtend("0");
+
+ my $buildvar = "";
+ foreach my $l (split("\n", $map)) {
+ if ($l =~ m/^\s*build=(.*)$/) {
+ $buildvar = $1;
+ }
+
+ my $start;
+ my $finish;
+ my $offset;
+ my $lib;
+ if ($l =~ /^($h)-($h)\s+..x.\s+($h)\s+\S+:\S+\s+\d+\s+(\S+\.(so|dll|dylib|bundle)((\.\d+)+\w*(\.\d+){0,3})?)$/i) {
+ # Full line from /proc/self/maps. Example:
+ # 40000000-40015000 r-xp 00000000 03:01 12845071 /lib/ld-2.3.2.so
+ $start = HexExtend($1);
+ $finish = HexExtend($2);
+ $offset = HexExtend($3);
+ $lib = $4;
+ $lib =~ s|\\|/|g; # turn windows-style paths into unix-style paths
+ } elsif ($l =~ /^\s*($h)-($h):\s*(\S+\.so(\.\d+)*)/) {
+ # Cooked line from DumpAddressMap. Example:
+ # 40000000-40015000: /lib/ld-2.3.2.so
+ $start = HexExtend($1);
+ $finish = HexExtend($2);
+ $offset = $zero_offset;
+ $lib = $3;
+ } elsif (($l =~ /^($h)-($h)\s+..x.\s+($h)\s+\S+:\S+\s+\d+\s+(\S+)$/i) && ($4 eq $prog)) {
+ # PIEs and address space randomization do not play well with our
+ # default assumption that main executable is at lowest
+ # addresses. So we're detecting main executable in
+ # /proc/self/maps as well.
+ $start = HexExtend($1);
+ $finish = HexExtend($2);
+ $offset = HexExtend($3);
+ $lib = $4;
+ $lib =~ s|\\|/|g; # turn windows-style paths into unix-style paths
+ }
+ # FreeBSD 10.0 virtual memory map /proc/curproc/map as defined in
+ # function procfs_doprocmap (sys/fs/procfs/procfs_map.c)
+ #
+ # Example:
+ # 0x800600000 0x80061a000 26 0 0xfffff800035a0000 r-x 75 33 0x1004 COW NC vnode /libexec/ld-elf.s
+ # o.1 NCH -1
+ elsif ($l =~ /^(0x$h)\s(0x$h)\s\d+\s\d+\s0x$h\sr-x\s\d+\s\d+\s0x\d+\s(COW|NCO)\s(NC|NNC)\svnode\s(\S+\.so(\.\d+)*)/) {
+ $start = HexExtend($1);
+ $finish = HexExtend($2);
+ $offset = $zero_offset;
+ $lib = FindLibrary($5);
+
+ } else {
+ next;
+ }
+
+ # Expand "$build" variable if available
+ $lib =~ s/\$build\b/$buildvar/g;
+
+ $lib = FindLibrary($lib);
+
+ # Check for pre-relocated libraries, which use pre-relocated symbol tables
+ # and thus require adjusting the offset that we'll use to translate
+ # VM addresses into symbol table addresses.
+ # Only do this if we're not going to fetch the symbol table from a
+ # debugging copy of the library.
+ if (!DebuggingLibrary($lib)) {
+ my $text = ParseTextSectionHeader($lib);
+ if (defined($text)) {
+ my $vma_offset = AddressSub($text->{vma}, $text->{file_offset});
+ $offset = AddressAdd($offset, $vma_offset);
+ }
+ }
+
+ if($main::opt_debug) { printf STDERR "$start:$finish ($offset) $lib\n"; }
+ push(@{$result}, [$lib, $start, $finish, $offset]);
+ }
+
+ # Append special entry for additional library (not relocated)
+ if ($main::opt_lib ne "") {
+ my $text = ParseTextSectionHeader($main::opt_lib);
+ if (defined($text)) {
+ my $start = $text->{vma};
+ my $finish = AddressAdd($start, $text->{size});
+
+ push(@{$result}, [$main::opt_lib, $start, $finish, $start]);
+ }
+ }
+
+ # Append special entry for the main program. This covers
+ # 0..max_pc_value_seen, so that we assume pc values not found in one
+ # of the library ranges will be treated as coming from the main
+ # program binary.
+ my $min_pc = HexExtend("0");
+ my $max_pc = $min_pc; # find the maximal PC value in any sample
+ foreach my $pc (keys(%{$pcs})) {
+ if (HexExtend($pc) gt $max_pc) { $max_pc = HexExtend($pc); }
+ }
+ push(@{$result}, [$prog, $min_pc, $max_pc, $zero_offset]);
+
+ return $result;
+}
+
+# Add two hex addresses of length $address_length.
+# Run jeprof --test for unit test if this is changed.
+sub AddressAdd {
+ my $addr1 = shift;
+ my $addr2 = shift;
+ my $sum;
+
+ if ($address_length == 8) {
+ # Perl doesn't cope with wraparound arithmetic, so do it explicitly:
+ $sum = (hex($addr1)+hex($addr2)) % (0x10000000 * 16);
+ return sprintf("%08x", $sum);
+
+ } else {
+ # Do the addition in 7-nibble chunks to trivialize carry handling.
+
+ if ($main::opt_debug and $main::opt_test) {
+ print STDERR "AddressAdd $addr1 + $addr2 = ";
+ }
+
+ my $a1 = substr($addr1,-7);
+ $addr1 = substr($addr1,0,-7);
+ my $a2 = substr($addr2,-7);
+ $addr2 = substr($addr2,0,-7);
+ $sum = hex($a1) + hex($a2);
+ my $c = 0;
+ if ($sum > 0xfffffff) {
+ $c = 1;
+ $sum -= 0x10000000;
+ }
+ my $r = sprintf("%07x", $sum);
+
+ $a1 = substr($addr1,-7);
+ $addr1 = substr($addr1,0,-7);
+ $a2 = substr($addr2,-7);
+ $addr2 = substr($addr2,0,-7);
+ $sum = hex($a1) + hex($a2) + $c;
+ $c = 0;
+ if ($sum > 0xfffffff) {
+ $c = 1;
+ $sum -= 0x10000000;
+ }
+ $r = sprintf("%07x", $sum) . $r;
+
+ $sum = hex($addr1) + hex($addr2) + $c;
+ if ($sum > 0xff) { $sum -= 0x100; }
+ $r = sprintf("%02x", $sum) . $r;
+
+ if ($main::opt_debug and $main::opt_test) { print STDERR "$r\n"; }
+
+ return $r;
+ }
+}
+
+
+# Subtract two hex addresses of length $address_length.
+# Run jeprof --test for unit test if this is changed.
+sub AddressSub {
+ my $addr1 = shift;
+ my $addr2 = shift;
+ my $diff;
+
+ if ($address_length == 8) {
+ # Perl doesn't cope with wraparound arithmetic, so do it explicitly:
+ $diff = (hex($addr1)-hex($addr2)) % (0x10000000 * 16);
+ return sprintf("%08x", $diff);
+
+ } else {
+ # Do the addition in 7-nibble chunks to trivialize borrow handling.
+ # if ($main::opt_debug) { print STDERR "AddressSub $addr1 - $addr2 = "; }
+
+ my $a1 = hex(substr($addr1,-7));
+ $addr1 = substr($addr1,0,-7);
+ my $a2 = hex(substr($addr2,-7));
+ $addr2 = substr($addr2,0,-7);
+ my $b = 0;
+ if ($a2 > $a1) {
+ $b = 1;
+ $a1 += 0x10000000;
+ }
+ $diff = $a1 - $a2;
+ my $r = sprintf("%07x", $diff);
+
+ $a1 = hex(substr($addr1,-7));
+ $addr1 = substr($addr1,0,-7);
+ $a2 = hex(substr($addr2,-7)) + $b;
+ $addr2 = substr($addr2,0,-7);
+ $b = 0;
+ if ($a2 > $a1) {
+ $b = 1;
+ $a1 += 0x10000000;
+ }
+ $diff = $a1 - $a2;
+ $r = sprintf("%07x", $diff) . $r;
+
+ $a1 = hex($addr1);
+ $a2 = hex($addr2) + $b;
+ if ($a2 > $a1) { $a1 += 0x100; }
+ $diff = $a1 - $a2;
+ $r = sprintf("%02x", $diff) . $r;
+
+ # if ($main::opt_debug) { print STDERR "$r\n"; }
+
+ return $r;
+ }
+}
+
+# Increment a hex addresses of length $address_length.
+# Run jeprof --test for unit test if this is changed.
+sub AddressInc {
+ my $addr = shift;
+ my $sum;
+
+ if ($address_length == 8) {
+ # Perl doesn't cope with wraparound arithmetic, so do it explicitly:
+ $sum = (hex($addr)+1) % (0x10000000 * 16);
+ return sprintf("%08x", $sum);
+
+ } else {
+ # Do the addition in 7-nibble chunks to trivialize carry handling.
+ # We are always doing this to step through the addresses in a function,
+ # and will almost never overflow the first chunk, so we check for this
+ # case and exit early.
+
+ # if ($main::opt_debug) { print STDERR "AddressInc $addr1 = "; }
+
+ my $a1 = substr($addr,-7);
+ $addr = substr($addr,0,-7);
+ $sum = hex($a1) + 1;
+ my $r = sprintf("%07x", $sum);
+ if ($sum <= 0xfffffff) {
+ $r = $addr . $r;
+ # if ($main::opt_debug) { print STDERR "$r\n"; }
+ return HexExtend($r);
+ } else {
+ $r = "0000000";
+ }
+
+ $a1 = substr($addr,-7);
+ $addr = substr($addr,0,-7);
+ $sum = hex($a1) + 1;
+ $r = sprintf("%07x", $sum) . $r;
+ if ($sum <= 0xfffffff) {
+ $r = $addr . $r;
+ # if ($main::opt_debug) { print STDERR "$r\n"; }
+ return HexExtend($r);
+ } else {
+ $r = "00000000000000";
+ }
+
+ $sum = hex($addr) + 1;
+ if ($sum > 0xff) { $sum -= 0x100; }
+ $r = sprintf("%02x", $sum) . $r;
+
+ # if ($main::opt_debug) { print STDERR "$r\n"; }
+ return $r;
+ }
+}
+
+# Extract symbols for all PC values found in profile
+sub ExtractSymbols {
+ my $libs = shift;
+ my $pcset = shift;
+
+ my $symbols = {};
+
+ # Map each PC value to the containing library. To make this faster,
+ # we sort libraries by their starting pc value (highest first), and
+ # advance through the libraries as we advance the pc. Sometimes the
+ # addresses of libraries may overlap with the addresses of the main
+ # binary, so to make sure the libraries 'win', we iterate over the
+ # libraries in reverse order (which assumes the binary doesn't start
+ # in the middle of a library, which seems a fair assumption).
+ my @pcs = (sort { $a cmp $b } keys(%{$pcset})); # pcset is 0-extended strings
+ foreach my $lib (sort {$b->[1] cmp $a->[1]} @{$libs}) {
+ my $libname = $lib->[0];
+ my $start = $lib->[1];
+ my $finish = $lib->[2];
+ my $offset = $lib->[3];
+
+ # Use debug library if it exists
+ my $debug_libname = DebuggingLibrary($libname);
+ if ($debug_libname) {
+ $libname = $debug_libname;
+ }
+
+ # Get list of pcs that belong in this library.
+ my $contained = [];
+ my ($start_pc_index, $finish_pc_index);
+ # Find smallest finish_pc_index such that $finish < $pc[$finish_pc_index].
+ for ($finish_pc_index = $#pcs + 1; $finish_pc_index > 0;
+ $finish_pc_index--) {
+ last if $pcs[$finish_pc_index - 1] le $finish;
+ }
+ # Find smallest start_pc_index such that $start <= $pc[$start_pc_index].
+ for ($start_pc_index = $finish_pc_index; $start_pc_index > 0;
+ $start_pc_index--) {
+ last if $pcs[$start_pc_index - 1] lt $start;
+ }
+ # This keeps PC values higher than $pc[$finish_pc_index] in @pcs,
+ # in case there are overlaps in libraries and the main binary.
+ @{$contained} = splice(@pcs, $start_pc_index,
+ $finish_pc_index - $start_pc_index);
+ # Map to symbols
+ MapToSymbols($libname, AddressSub($start, $offset), $contained, $symbols);
+ }
+
+ return $symbols;
+}
+
+# Map list of PC values to symbols for a given image
+sub MapToSymbols {
+ my $image = shift;
+ my $offset = shift;
+ my $pclist = shift;
+ my $symbols = shift;
+
+ my $debug = 0;
+
+ # Ignore empty binaries
+ if ($#{$pclist} < 0) { return; }
+
+ # Figure out the addr2line command to use
+ my $addr2line = $obj_tool_map{"addr2line"};
+ my $cmd = ShellEscape($addr2line, "-f", "-C", "-e", $image);
+ if (exists $obj_tool_map{"addr2line_pdb"}) {
+ $addr2line = $obj_tool_map{"addr2line_pdb"};
+ $cmd = ShellEscape($addr2line, "--demangle", "-f", "-C", "-e", $image);
+ }
+
+ # If "addr2line" isn't installed on the system at all, just use
+ # nm to get what info we can (function names, but not line numbers).
+ if (system(ShellEscape($addr2line, "--help") . " >$dev_null 2>&1") != 0) {
+ MapSymbolsWithNM($image, $offset, $pclist, $symbols);
+ return;
+ }
+
+ # "addr2line -i" can produce a variable number of lines per input
+ # address, with no separator that allows us to tell when data for
+ # the next address starts. So we find the address for a special
+ # symbol (_fini) and interleave this address between all real
+ # addresses passed to addr2line. The name of this special symbol
+ # can then be used as a separator.
+ $sep_address = undef; # May be filled in by MapSymbolsWithNM()
+ my $nm_symbols = {};
+ MapSymbolsWithNM($image, $offset, $pclist, $nm_symbols);
+ if (defined($sep_address)) {
+ # Only add " -i" to addr2line if the binary supports it.
+ # addr2line --help returns 0, but not if it sees an unknown flag first.
+ if (system("$cmd -i --help >$dev_null 2>&1") == 0) {
+ $cmd .= " -i";
+ } else {
+ $sep_address = undef; # no need for sep_address if we don't support -i
+ }
+ }
+
+ # Make file with all PC values with intervening 'sep_address' so
+ # that we can reliably detect the end of inlined function list
+ open(ADDRESSES, ">$main::tmpfile_sym") || error("$main::tmpfile_sym: $!\n");
+ if ($debug) { print("---- $image ---\n"); }
+ for (my $i = 0; $i <= $#{$pclist}; $i++) {
+ # addr2line always reads hex addresses, and does not need '0x' prefix.
+ if ($debug) { printf STDERR ("%s\n", $pclist->[$i]); }
+ printf ADDRESSES ("%s\n", AddressSub($pclist->[$i], $offset));
+ if (defined($sep_address)) {
+ printf ADDRESSES ("%s\n", $sep_address);
+ }
+ }
+ close(ADDRESSES);
+ if ($debug) {
+ print("----\n");
+ system("cat", $main::tmpfile_sym);
+ print("----\n");
+ system("$cmd < " . ShellEscape($main::tmpfile_sym));
+ print("----\n");
+ }
+
+ open(SYMBOLS, "$cmd <" . ShellEscape($main::tmpfile_sym) . " |")
+ || error("$cmd: $!\n");
+ my $count = 0; # Index in pclist
+ while (<SYMBOLS>) {
+ # Read fullfunction and filelineinfo from next pair of lines
+ s/\r?\n$//g;
+ my $fullfunction = $_;
+ $_ = <SYMBOLS>;
+ s/\r?\n$//g;
+ my $filelinenum = $_;
+
+ if (defined($sep_address) && $fullfunction eq $sep_symbol) {
+ # Terminating marker for data for this address
+ $count++;
+ next;
+ }
+
+ $filelinenum =~ s|\\|/|g; # turn windows-style paths into unix-style paths
+
+ my $pcstr = $pclist->[$count];
+ my $function = ShortFunctionName($fullfunction);
+ my $nms = $nm_symbols->{$pcstr};
+ if (defined($nms)) {
+ if ($fullfunction eq '??') {
+ # nm found a symbol for us.
+ $function = $nms->[0];
+ $fullfunction = $nms->[2];
+ } else {
+ # MapSymbolsWithNM tags each routine with its starting address,
+ # useful in case the image has multiple occurrences of this
+ # routine. (It uses a syntax that resembles template paramters,
+ # that are automatically stripped out by ShortFunctionName().)
+ # addr2line does not provide the same information. So we check
+ # if nm disambiguated our symbol, and if so take the annotated
+ # (nm) version of the routine-name. TODO(csilvers): this won't
+ # catch overloaded, inlined symbols, which nm doesn't see.
+ # Better would be to do a check similar to nm's, in this fn.
+ if ($nms->[2] =~ m/^\Q$function\E/) { # sanity check it's the right fn
+ $function = $nms->[0];
+ $fullfunction = $nms->[2];
+ }
+ }
+ }
+
+ # Prepend to accumulated symbols for pcstr
+ # (so that caller comes before callee)
+ my $sym = $symbols->{$pcstr};
+ if (!defined($sym)) {
+ $sym = [];
+ $symbols->{$pcstr} = $sym;
+ }
+ unshift(@{$sym}, $function, $filelinenum, $fullfunction);
+ if ($debug) { printf STDERR ("%s => [%s]\n", $pcstr, join(" ", @{$sym})); }
+ if (!defined($sep_address)) {
+ # Inlining is off, so this entry ends immediately
+ $count++;
+ }
+ }
+ close(SYMBOLS);
+}
+
+# Use nm to map the list of referenced PCs to symbols. Return true iff we
+# are able to read procedure information via nm.
+sub MapSymbolsWithNM {
+ my $image = shift;
+ my $offset = shift;
+ my $pclist = shift;
+ my $symbols = shift;
+
+ # Get nm output sorted by increasing address
+ my $symbol_table = GetProcedureBoundaries($image, ".");
+ if (!%{$symbol_table}) {
+ return 0;
+ }
+ # Start addresses are already the right length (8 or 16 hex digits).
+ my @names = sort { $symbol_table->{$a}->[0] cmp $symbol_table->{$b}->[0] }
+ keys(%{$symbol_table});
+
+ if ($#names < 0) {
+ # No symbols: just use addresses
+ foreach my $pc (@{$pclist}) {
+ my $pcstr = "0x" . $pc;
+ $symbols->{$pc} = [$pcstr, "?", $pcstr];
+ }
+ return 0;
+ }
+
+ # Sort addresses so we can do a join against nm output
+ my $index = 0;
+ my $fullname = $names[0];
+ my $name = ShortFunctionName($fullname);
+ foreach my $pc (sort { $a cmp $b } @{$pclist}) {
+ # Adjust for mapped offset
+ my $mpc = AddressSub($pc, $offset);
+ while (($index < $#names) && ($mpc ge $symbol_table->{$fullname}->[1])){
+ $index++;
+ $fullname = $names[$index];
+ $name = ShortFunctionName($fullname);
+ }
+ if ($mpc lt $symbol_table->{$fullname}->[1]) {
+ $symbols->{$pc} = [$name, "?", $fullname];
+ } else {
+ my $pcstr = "0x" . $pc;
+ $symbols->{$pc} = [$pcstr, "?", $pcstr];
+ }
+ }
+ return 1;
+}
+
+sub ShortFunctionName {
+ my $function = shift;
+ while ($function =~ s/\([^()]*\)(\s*const)?//g) { } # Argument types
+ while ($function =~ s/<[^<>]*>//g) { } # Remove template arguments
+ $function =~ s/^.*\s+(\w+::)/$1/; # Remove leading type
+ return $function;
+}
+
+# Trim overly long symbols found in disassembler output
+sub CleanDisassembly {
+ my $d = shift;
+ while ($d =~ s/\([^()%]*\)(\s*const)?//g) { } # Argument types, not (%rax)
+ while ($d =~ s/(\w+)<[^<>]*>/$1/g) { } # Remove template arguments
+ return $d;
+}
+
+# Clean file name for display
+sub CleanFileName {
+ my ($f) = @_;
+ $f =~ s|^/proc/self/cwd/||;
+ $f =~ s|^\./||;
+ return $f;
+}
+
+# Make address relative to section and clean up for display
+sub UnparseAddress {
+ my ($offset, $address) = @_;
+ $address = AddressSub($address, $offset);
+ $address =~ s/^0x//;
+ $address =~ s/^0*//;
+ return $address;
+}
+
+##### Miscellaneous #####
+
+# Find the right versions of the above object tools to use. The
+# argument is the program file being analyzed, and should be an ELF
+# 32-bit or ELF 64-bit executable file. The location of the tools
+# is determined by considering the following options in this order:
+# 1) --tools option, if set
+# 2) JEPROF_TOOLS environment variable, if set
+# 3) the environment
+sub ConfigureObjTools {
+ my $prog_file = shift;
+
+ # Check for the existence of $prog_file because /usr/bin/file does not
+ # predictably return error status in prod.
+ (-e $prog_file) || error("$prog_file does not exist.\n");
+
+ my $file_type = undef;
+ if (-e "/usr/bin/file") {
+ # Follow symlinks (at least for systems where "file" supports that).
+ my $escaped_prog_file = ShellEscape($prog_file);
+ $file_type = `/usr/bin/file -L $escaped_prog_file 2>$dev_null ||
+ /usr/bin/file $escaped_prog_file`;
+ } elsif ($^O == "MSWin32") {
+ $file_type = "MS Windows";
+ } else {
+ print STDERR "WARNING: Can't determine the file type of $prog_file";
+ }
+
+ if ($file_type =~ /64-bit/) {
+ # Change $address_length to 16 if the program file is ELF 64-bit.
+ # We can't detect this from many (most?) heap or lock contention
+ # profiles, since the actual addresses referenced are generally in low
+ # memory even for 64-bit programs.
+ $address_length = 16;
+ }
+
+ if ($file_type =~ /MS Windows/) {
+ # For windows, we provide a version of nm and addr2line as part of
+ # the opensource release, which is capable of parsing
+ # Windows-style PDB executables. It should live in the path, or
+ # in the same directory as jeprof.
+ $obj_tool_map{"nm_pdb"} = "nm-pdb";
+ $obj_tool_map{"addr2line_pdb"} = "addr2line-pdb";
+ }
+
+ if ($file_type =~ /Mach-O/) {
+ # OS X uses otool to examine Mach-O files, rather than objdump.
+ $obj_tool_map{"otool"} = "otool";
+ $obj_tool_map{"addr2line"} = "false"; # no addr2line
+ $obj_tool_map{"objdump"} = "false"; # no objdump
+ }
+
+ # Go fill in %obj_tool_map with the pathnames to use:
+ foreach my $tool (keys %obj_tool_map) {
+ $obj_tool_map{$tool} = ConfigureTool($obj_tool_map{$tool});
+ }
+}
+
+# Returns the path of a caller-specified object tool. If --tools or
+# JEPROF_TOOLS are specified, then returns the full path to the tool
+# with that prefix. Otherwise, returns the path unmodified (which
+# means we will look for it on PATH).
+sub ConfigureTool {
+ my $tool = shift;
+ my $path;
+
+ # --tools (or $JEPROF_TOOLS) is a comma separated list, where each
+ # item is either a) a pathname prefix, or b) a map of the form
+ # <tool>:<path>. First we look for an entry of type (b) for our
+ # tool. If one is found, we use it. Otherwise, we consider all the
+ # pathname prefixes in turn, until one yields an existing file. If
+ # none does, we use a default path.
+ my $tools = $main::opt_tools || $ENV{"JEPROF_TOOLS"} || "";
+ if ($tools =~ m/(,|^)\Q$tool\E:([^,]*)/) {
+ $path = $2;
+ # TODO(csilvers): sanity-check that $path exists? Hard if it's relative.
+ } elsif ($tools ne '') {
+ foreach my $prefix (split(',', $tools)) {
+ next if ($prefix =~ /:/); # ignore "tool:fullpath" entries in the list
+ if (-x $prefix . $tool) {
+ $path = $prefix . $tool;
+ last;
+ }
+ }
+ if (!$path) {
+ error("No '$tool' found with prefix specified by " .
+ "--tools (or \$JEPROF_TOOLS) '$tools'\n");
+ }
+ } else {
+ # ... otherwise use the version that exists in the same directory as
+ # jeprof. If there's nothing there, use $PATH.
+ $0 =~ m,[^/]*$,; # this is everything after the last slash
+ my $dirname = $`; # this is everything up to and including the last slash
+ if (-x "$dirname$tool") {
+ $path = "$dirname$tool";
+ } else {
+ $path = $tool;
+ }
+ }
+ if ($main::opt_debug) { print STDERR "Using '$path' for '$tool'.\n"; }
+ return $path;
+}
+
+sub ShellEscape {
+ my @escaped_words = ();
+ foreach my $word (@_) {
+ my $escaped_word = $word;
+ if ($word =~ m![^a-zA-Z0-9/.,_=-]!) { # check for anything not in whitelist
+ $escaped_word =~ s/'/'\\''/;
+ $escaped_word = "'$escaped_word'";
+ }
+ push(@escaped_words, $escaped_word);
+ }
+ return join(" ", @escaped_words);
+}
+
+sub cleanup {
+ unlink($main::tmpfile_sym);
+ unlink(keys %main::tempnames);
+
+ # We leave any collected profiles in $HOME/jeprof in case the user wants
+ # to look at them later. We print a message informing them of this.
+ if ((scalar(@main::profile_files) > 0) &&
+ defined($main::collected_profile)) {
+ if (scalar(@main::profile_files) == 1) {
+ print STDERR "Dynamically gathered profile is in $main::collected_profile\n";
+ }
+ print STDERR "If you want to investigate this profile further, you can do:\n";
+ print STDERR "\n";
+ print STDERR " jeprof \\\n";
+ print STDERR " $main::prog \\\n";
+ print STDERR " $main::collected_profile\n";
+ print STDERR "\n";
+ }
+}
+
+sub sighandler {
+ cleanup();
+ exit(1);
+}
+
+sub error {
+ my $msg = shift;
+ print STDERR $msg;
+ cleanup();
+ exit(1);
+}
+
+
+# Run $nm_command and get all the resulting procedure boundaries whose
+# names match "$regexp" and returns them in a hashtable mapping from
+# procedure name to a two-element vector of [start address, end address]
+sub GetProcedureBoundariesViaNm {
+ my $escaped_nm_command = shift; # shell-escaped
+ my $regexp = shift;
+
+ my $symbol_table = {};
+ open(NM, "$escaped_nm_command |") || error("$escaped_nm_command: $!\n");
+ my $last_start = "0";
+ my $routine = "";
+ while (<NM>) {
+ s/\r//g; # turn windows-looking lines into unix-looking lines
+ if (m/^\s*([0-9a-f]+) (.) (..*)/) {
+ my $start_val = $1;
+ my $type = $2;
+ my $this_routine = $3;
+
+ # It's possible for two symbols to share the same address, if
+ # one is a zero-length variable (like __start_google_malloc) or
+ # one symbol is a weak alias to another (like __libc_malloc).
+ # In such cases, we want to ignore all values except for the
+ # actual symbol, which in nm-speak has type "T". The logic
+ # below does this, though it's a bit tricky: what happens when
+ # we have a series of lines with the same address, is the first
+ # one gets queued up to be processed. However, it won't
+ # *actually* be processed until later, when we read a line with
+ # a different address. That means that as long as we're reading
+ # lines with the same address, we have a chance to replace that
+ # item in the queue, which we do whenever we see a 'T' entry --
+ # that is, a line with type 'T'. If we never see a 'T' entry,
+ # we'll just go ahead and process the first entry (which never
+ # got touched in the queue), and ignore the others.
+ if ($start_val eq $last_start && $type =~ /t/i) {
+ # We are the 'T' symbol at this address, replace previous symbol.
+ $routine = $this_routine;
+ next;
+ } elsif ($start_val eq $last_start) {
+ # We're not the 'T' symbol at this address, so ignore us.
+ next;
+ }
+
+ if ($this_routine eq $sep_symbol) {
+ $sep_address = HexExtend($start_val);
+ }
+
+ # Tag this routine with the starting address in case the image
+ # has multiple occurrences of this routine. We use a syntax
+ # that resembles template parameters that are automatically
+ # stripped out by ShortFunctionName()
+ $this_routine .= "<$start_val>";
+
+ if (defined($routine) && $routine =~ m/$regexp/) {
+ $symbol_table->{$routine} = [HexExtend($last_start),
+ HexExtend($start_val)];
+ }
+ $last_start = $start_val;
+ $routine = $this_routine;
+ } elsif (m/^Loaded image name: (.+)/) {
+ # The win32 nm workalike emits information about the binary it is using.
+ if ($main::opt_debug) { print STDERR "Using Image $1\n"; }
+ } elsif (m/^PDB file name: (.+)/) {
+ # The win32 nm workalike emits information about the pdb it is using.
+ if ($main::opt_debug) { print STDERR "Using PDB $1\n"; }
+ }
+ }
+ close(NM);
+ # Handle the last line in the nm output. Unfortunately, we don't know
+ # how big this last symbol is, because we don't know how big the file
+ # is. For now, we just give it a size of 0.
+ # TODO(csilvers): do better here.
+ if (defined($routine) && $routine =~ m/$regexp/) {
+ $symbol_table->{$routine} = [HexExtend($last_start),
+ HexExtend($last_start)];
+ }
+ return $symbol_table;
+}
+
+# Gets the procedure boundaries for all routines in "$image" whose names
+# match "$regexp" and returns them in a hashtable mapping from procedure
+# name to a two-element vector of [start address, end address].
+# Will return an empty map if nm is not installed or not working properly.
+sub GetProcedureBoundaries {
+ my $image = shift;
+ my $regexp = shift;
+
+ # If $image doesn't start with /, then put ./ in front of it. This works
+ # around an obnoxious bug in our probing of nm -f behavior.
+ # "nm -f $image" is supposed to fail on GNU nm, but if:
+ #
+ # a. $image starts with [BbSsPp] (for example, bin/foo/bar), AND
+ # b. you have a.out in your current directory (a not uncommon occurence)
+ #
+ # then "nm -f $image" succeeds because -f only looks at the first letter of
+ # the argument, which looks valid because it's [BbSsPp], and then since
+ # there's no image provided, it looks for a.out and finds it.
+ #
+ # This regex makes sure that $image starts with . or /, forcing the -f
+ # parsing to fail since . and / are not valid formats.
+ $image =~ s#^[^/]#./$&#;
+
+ # For libc libraries, the copy in /usr/lib/debug contains debugging symbols
+ my $debugging = DebuggingLibrary($image);
+ if ($debugging) {
+ $image = $debugging;
+ }
+
+ my $nm = $obj_tool_map{"nm"};
+ my $cppfilt = $obj_tool_map{"c++filt"};
+
+ # nm can fail for two reasons: 1) $image isn't a debug library; 2) nm
+ # binary doesn't support --demangle. In addition, for OS X we need
+ # to use the -f flag to get 'flat' nm output (otherwise we don't sort
+ # properly and get incorrect results). Unfortunately, GNU nm uses -f
+ # in an incompatible way. So first we test whether our nm supports
+ # --demangle and -f.
+ my $demangle_flag = "";
+ my $cppfilt_flag = "";
+ my $to_devnull = ">$dev_null 2>&1";
+ if (system(ShellEscape($nm, "--demangle", $image) . $to_devnull) == 0) {
+ # In this mode, we do "nm --demangle <foo>"
+ $demangle_flag = "--demangle";
+ $cppfilt_flag = "";
+ } elsif (system(ShellEscape($cppfilt, $image) . $to_devnull) == 0) {
+ # In this mode, we do "nm <foo> | c++filt"
+ $cppfilt_flag = " | " . ShellEscape($cppfilt);
+ };
+ my $flatten_flag = "";
+ if (system(ShellEscape($nm, "-f", $image) . $to_devnull) == 0) {
+ $flatten_flag = "-f";
+ }
+
+ # Finally, in the case $imagie isn't a debug library, we try again with
+ # -D to at least get *exported* symbols. If we can't use --demangle,
+ # we use c++filt instead, if it exists on this system.
+ my @nm_commands = (ShellEscape($nm, "-n", $flatten_flag, $demangle_flag,
+ $image) . " 2>$dev_null $cppfilt_flag",
+ ShellEscape($nm, "-D", "-n", $flatten_flag, $demangle_flag,
+ $image) . " 2>$dev_null $cppfilt_flag",
+ # 6nm is for Go binaries
+ ShellEscape("6nm", "$image") . " 2>$dev_null | sort",
+ );
+
+ # If the executable is an MS Windows PDB-format executable, we'll
+ # have set up obj_tool_map("nm_pdb"). In this case, we actually
+ # want to use both unix nm and windows-specific nm_pdb, since
+ # PDB-format executables can apparently include dwarf .o files.
+ if (exists $obj_tool_map{"nm_pdb"}) {
+ push(@nm_commands,
+ ShellEscape($obj_tool_map{"nm_pdb"}, "--demangle", $image)
+ . " 2>$dev_null");
+ }
+
+ foreach my $nm_command (@nm_commands) {
+ my $symbol_table = GetProcedureBoundariesViaNm($nm_command, $regexp);
+ return $symbol_table if (%{$symbol_table});
+ }
+ my $symbol_table = {};
+ return $symbol_table;
+}
+
+
+# The test vectors for AddressAdd/Sub/Inc are 8-16-nibble hex strings.
+# To make them more readable, we add underscores at interesting places.
+# This routine removes the underscores, producing the canonical representation
+# used by jeprof to represent addresses, particularly in the tested routines.
+sub CanonicalHex {
+ my $arg = shift;
+ return join '', (split '_',$arg);
+}
+
+
+# Unit test for AddressAdd:
+sub AddressAddUnitTest {
+ my $test_data_8 = shift;
+ my $test_data_16 = shift;
+ my $error_count = 0;
+ my $fail_count = 0;
+ my $pass_count = 0;
+ # print STDERR "AddressAddUnitTest: ", 1+$#{$test_data_8}, " tests\n";
+
+ # First a few 8-nibble addresses. Note that this implementation uses
+ # plain old arithmetic, so a quick sanity check along with verifying what
+ # happens to overflow (we want it to wrap):
+ $address_length = 8;
+ foreach my $row (@{$test_data_8}) {
+ if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
+ my $sum = AddressAdd ($row->[0], $row->[1]);
+ if ($sum ne $row->[2]) {
+ printf STDERR "ERROR: %s != %s + %s = %s\n", $sum,
+ $row->[0], $row->[1], $row->[2];
+ ++$fail_count;
+ } else {
+ ++$pass_count;
+ }
+ }
+ printf STDERR "AddressAdd 32-bit tests: %d passes, %d failures\n",
+ $pass_count, $fail_count;
+ $error_count = $fail_count;
+ $fail_count = 0;
+ $pass_count = 0;
+
+ # Now 16-nibble addresses.
+ $address_length = 16;
+ foreach my $row (@{$test_data_16}) {
+ if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
+ my $sum = AddressAdd (CanonicalHex($row->[0]), CanonicalHex($row->[1]));
+ my $expected = join '', (split '_',$row->[2]);
+ if ($sum ne CanonicalHex($row->[2])) {
+ printf STDERR "ERROR: %s != %s + %s = %s\n", $sum,
+ $row->[0], $row->[1], $row->[2];
+ ++$fail_count;
+ } else {
+ ++$pass_count;
+ }
+ }
+ printf STDERR "AddressAdd 64-bit tests: %d passes, %d failures\n",
+ $pass_count, $fail_count;
+ $error_count += $fail_count;
+
+ return $error_count;
+}
+
+
+# Unit test for AddressSub:
+sub AddressSubUnitTest {
+ my $test_data_8 = shift;
+ my $test_data_16 = shift;
+ my $error_count = 0;
+ my $fail_count = 0;
+ my $pass_count = 0;
+ # print STDERR "AddressSubUnitTest: ", 1+$#{$test_data_8}, " tests\n";
+
+ # First a few 8-nibble addresses. Note that this implementation uses
+ # plain old arithmetic, so a quick sanity check along with verifying what
+ # happens to overflow (we want it to wrap):
+ $address_length = 8;
+ foreach my $row (@{$test_data_8}) {
+ if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
+ my $sum = AddressSub ($row->[0], $row->[1]);
+ if ($sum ne $row->[3]) {
+ printf STDERR "ERROR: %s != %s - %s = %s\n", $sum,
+ $row->[0], $row->[1], $row->[3];
+ ++$fail_count;
+ } else {
+ ++$pass_count;
+ }
+ }
+ printf STDERR "AddressSub 32-bit tests: %d passes, %d failures\n",
+ $pass_count, $fail_count;
+ $error_count = $fail_count;
+ $fail_count = 0;
+ $pass_count = 0;
+
+ # Now 16-nibble addresses.
+ $address_length = 16;
+ foreach my $row (@{$test_data_16}) {
+ if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
+ my $sum = AddressSub (CanonicalHex($row->[0]), CanonicalHex($row->[1]));
+ if ($sum ne CanonicalHex($row->[3])) {
+ printf STDERR "ERROR: %s != %s - %s = %s\n", $sum,
+ $row->[0], $row->[1], $row->[3];
+ ++$fail_count;
+ } else {
+ ++$pass_count;
+ }
+ }
+ printf STDERR "AddressSub 64-bit tests: %d passes, %d failures\n",
+ $pass_count, $fail_count;
+ $error_count += $fail_count;
+
+ return $error_count;
+}
+
+
+# Unit test for AddressInc:
+sub AddressIncUnitTest {
+ my $test_data_8 = shift;
+ my $test_data_16 = shift;
+ my $error_count = 0;
+ my $fail_count = 0;
+ my $pass_count = 0;
+ # print STDERR "AddressIncUnitTest: ", 1+$#{$test_data_8}, " tests\n";
+
+ # First a few 8-nibble addresses. Note that this implementation uses
+ # plain old arithmetic, so a quick sanity check along with verifying what
+ # happens to overflow (we want it to wrap):
+ $address_length = 8;
+ foreach my $row (@{$test_data_8}) {
+ if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
+ my $sum = AddressInc ($row->[0]);
+ if ($sum ne $row->[4]) {
+ printf STDERR "ERROR: %s != %s + 1 = %s\n", $sum,
+ $row->[0], $row->[4];
+ ++$fail_count;
+ } else {
+ ++$pass_count;
+ }
+ }
+ printf STDERR "AddressInc 32-bit tests: %d passes, %d failures\n",
+ $pass_count, $fail_count;
+ $error_count = $fail_count;
+ $fail_count = 0;
+ $pass_count = 0;
+
+ # Now 16-nibble addresses.
+ $address_length = 16;
+ foreach my $row (@{$test_data_16}) {
+ if ($main::opt_debug and $main::opt_test) { print STDERR "@{$row}\n"; }
+ my $sum = AddressInc (CanonicalHex($row->[0]));
+ if ($sum ne CanonicalHex($row->[4])) {
+ printf STDERR "ERROR: %s != %s + 1 = %s\n", $sum,
+ $row->[0], $row->[4];
+ ++$fail_count;
+ } else {
+ ++$pass_count;
+ }
+ }
+ printf STDERR "AddressInc 64-bit tests: %d passes, %d failures\n",
+ $pass_count, $fail_count;
+ $error_count += $fail_count;
+
+ return $error_count;
+}
+
+
+# Driver for unit tests.
+# Currently just the address add/subtract/increment routines for 64-bit.
+sub RunUnitTests {
+ my $error_count = 0;
+
+ # This is a list of tuples [a, b, a+b, a-b, a+1]
+ my $unit_test_data_8 = [
+ [qw(aaaaaaaa 50505050 fafafafa 5a5a5a5a aaaaaaab)],
+ [qw(50505050 aaaaaaaa fafafafa a5a5a5a6 50505051)],
+ [qw(ffffffff aaaaaaaa aaaaaaa9 55555555 00000000)],
+ [qw(00000001 ffffffff 00000000 00000002 00000002)],
+ [qw(00000001 fffffff0 fffffff1 00000011 00000002)],
+ ];
+ my $unit_test_data_16 = [
+ # The implementation handles data in 7-nibble chunks, so those are the
+ # interesting boundaries.
+ [qw(aaaaaaaa 50505050
+ 00_000000f_afafafa 00_0000005_a5a5a5a 00_000000a_aaaaaab)],
+ [qw(50505050 aaaaaaaa
+ 00_000000f_afafafa ff_ffffffa_5a5a5a6 00_0000005_0505051)],
+ [qw(ffffffff aaaaaaaa
+ 00_000001a_aaaaaa9 00_0000005_5555555 00_0000010_0000000)],
+ [qw(00000001 ffffffff
+ 00_0000010_0000000 ff_ffffff0_0000002 00_0000000_0000002)],
+ [qw(00000001 fffffff0
+ 00_000000f_ffffff1 ff_ffffff0_0000011 00_0000000_0000002)],
+
+ [qw(00_a00000a_aaaaaaa 50505050
+ 00_a00000f_afafafa 00_a000005_a5a5a5a 00_a00000a_aaaaaab)],
+ [qw(0f_fff0005_0505050 aaaaaaaa
+ 0f_fff000f_afafafa 0f_ffefffa_5a5a5a6 0f_fff0005_0505051)],
+ [qw(00_000000f_fffffff 01_800000a_aaaaaaa
+ 01_800001a_aaaaaa9 fe_8000005_5555555 00_0000010_0000000)],
+ [qw(00_0000000_0000001 ff_fffffff_fffffff
+ 00_0000000_0000000 00_0000000_0000002 00_0000000_0000002)],
+ [qw(00_0000000_0000001 ff_fffffff_ffffff0
+ ff_fffffff_ffffff1 00_0000000_0000011 00_0000000_0000002)],
+ ];
+
+ $error_count += AddressAddUnitTest($unit_test_data_8, $unit_test_data_16);
+ $error_count += AddressSubUnitTest($unit_test_data_8, $unit_test_data_16);
+ $error_count += AddressIncUnitTest($unit_test_data_8, $unit_test_data_16);
+ if ($error_count > 0) {
+ print STDERR $error_count, " errors: FAILED\n";
+ } else {
+ print STDERR "PASS\n";
+ }
+ exit ($error_count);
+}
+>>>>>>> main
diff --git a/contrib/jemalloc/build-aux/config.guess b/contrib/jemalloc/build-aux/config.guess
new file mode 100755
index 000000000000..e8241c60f184
--- /dev/null
+++ b/contrib/jemalloc/build-aux/config.guess
@@ -0,0 +1,3167 @@
+<<<<<<< HEAD
+#! /bin/sh
+# Attempt to guess a canonical system name.
+# Copyright 1992-2021 Free Software Foundation, Inc.
+
+timestamp='2021-01-01'
+
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <https://www.gnu.org/licenses/>.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that
+# program. This Exception is an additional permission under section 7
+# of the GNU General Public License, version 3 ("GPLv3").
+#
+# Originally written by Per Bothner; maintained since 2000 by Ben Elliston.
+#
+# You can get the latest version of this script from:
+# https://git.savannah.gnu.org/cgit/config.git/plain/config.guess
+#
+# Please send patches to <config-patches@gnu.org>.
+
+
+me=$(echo "$0" | sed -e 's,.*/,,')
+
+usage="\
+Usage: $0 [OPTION]
+
+Output the configuration name of the system \`$me' is run on.
+
+Options:
+ -h, --help print this help, then exit
+ -t, --time-stamp print date of last modification, then exit
+ -v, --version print version number, then exit
+
+Report bugs and patches to <config-patches@gnu.org>."
+
+version="\
+GNU config.guess ($timestamp)
+
+Originally written by Per Bothner.
+Copyright 1992-2021 Free Software Foundation, Inc.
+
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+ case $1 in
+ --time-stamp | --time* | -t )
+ echo "$timestamp" ; exit ;;
+ --version | -v )
+ echo "$version" ; exit ;;
+ --help | --h* | -h )
+ echo "$usage"; exit ;;
+ -- ) # Stop option processing
+ shift; break ;;
+ - ) # Use stdin as input.
+ break ;;
+ -* )
+ echo "$me: invalid option $1$help" >&2
+ exit 1 ;;
+ * )
+ break ;;
+ esac
+done
+
+if test $# != 0; then
+ echo "$me: too many arguments$help" >&2
+ exit 1
+fi
+
+# CC_FOR_BUILD -- compiler used by this script. Note that the use of a
+# compiler to aid in system detection is discouraged as it requires
+# temporary files to be created and, as you can see below, it is a
+# headache to deal with in a portable fashion.
+
+# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still
+# use `HOST_CC' if defined, but it is deprecated.
+
+# Portable tmp directory creation inspired by the Autoconf team.
+
+tmp=
+# shellcheck disable=SC2172
+trap 'test -z "$tmp" || rm -fr "$tmp"' 0 1 2 13 15
+
+set_cc_for_build() {
+ # prevent multiple calls if $tmp is already set
+ test "$tmp" && return 0
+ : "${TMPDIR=/tmp}"
+ # shellcheck disable=SC2039
+ { tmp=$( (umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null) && test -n "$tmp" && test -d "$tmp" ; } ||
+ { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir "$tmp" 2>/dev/null) ; } ||
+ { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir "$tmp" 2>/dev/null) && echo "Warning: creating insecure temp directory" >&2 ; } ||
+ { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; }
+ dummy=$tmp/dummy
+ case ${CC_FOR_BUILD-},${HOST_CC-},${CC-} in
+ ,,) echo "int x;" > "$dummy.c"
+ for driver in cc gcc c89 c99 ; do
+ if ($driver -c -o "$dummy.o" "$dummy.c") >/dev/null 2>&1 ; then
+ CC_FOR_BUILD="$driver"
+ break
+ fi
+ done
+ if test x"$CC_FOR_BUILD" = x ; then
+ CC_FOR_BUILD=no_compiler_found
+ fi
+ ;;
+ ,,*) CC_FOR_BUILD=$CC ;;
+ ,*,*) CC_FOR_BUILD=$HOST_CC ;;
+ esac
+}
+
+# This is needed to find uname on a Pyramid OSx when run in the BSD universe.
+# (ghazi@noc.rutgers.edu 1994-08-24)
+if test -f /.attbin/uname ; then
+ PATH=$PATH:/.attbin ; export PATH
+fi
+
+UNAME_MACHINE=$( (uname -m) 2>/dev/null) || UNAME_MACHINE=unknown
+UNAME_RELEASE=$( (uname -r) 2>/dev/null) || UNAME_RELEASE=unknown
+UNAME_SYSTEM=$( (uname -s) 2>/dev/null) || UNAME_SYSTEM=unknown
+UNAME_VERSION=$( (uname -v) 2>/dev/null) || UNAME_VERSION=unknown
+
+case "$UNAME_SYSTEM" in
+Linux|GNU|GNU/*)
+ LIBC=unknown
+
+ set_cc_for_build
+ cat <<-EOF > "$dummy.c"
+ #include <features.h>
+ #if defined(__UCLIBC__)
+ LIBC=uclibc
+ #elif defined(__dietlibc__)
+ LIBC=dietlibc
+ #elif defined(__GLIBC__)
+ LIBC=gnu
+ #else
+ #include <stdarg.h>
+ /* First heuristic to detect musl libc. */
+ #ifdef __DEFINED_va_list
+ LIBC=musl
+ #endif
+ #endif
+ EOF
+ eval "$($CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^LIBC' | sed 's, ,,g')"
+
+ # Second heuristic to detect musl libc.
+ if [ "$LIBC" = unknown ] &&
+ command -v ldd >/dev/null &&
+ ldd --version 2>&1 | grep -q ^musl; then
+ LIBC=musl
+ fi
+
+ # If the system lacks a compiler, then just pick glibc.
+ # We could probably try harder.
+ if [ "$LIBC" = unknown ]; then
+ LIBC=gnu
+ fi
+ ;;
+esac
+
+# Note: order is significant - the case branches are not exclusive.
+
+case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in
+ *:NetBSD:*:*)
+ # NetBSD (nbsd) targets should (where applicable) match one or
+ # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*,
+ # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently
+ # switched to ELF, *-*-netbsd* would select the old
+ # object file format. This provides both forward
+ # compatibility and a consistent mechanism for selecting the
+ # object file format.
+ #
+ # Note: NetBSD doesn't particularly care about the vendor
+ # portion of the name. We always set it to "unknown".
+ sysctl="sysctl -n hw.machine_arch"
+ UNAME_MACHINE_ARCH=$( (uname -p 2>/dev/null || \
+ "/sbin/$sysctl" 2>/dev/null || \
+ "/usr/sbin/$sysctl" 2>/dev/null || \
+ echo unknown))
+ case "$UNAME_MACHINE_ARCH" in
+ aarch64eb) machine=aarch64_be-unknown ;;
+ armeb) machine=armeb-unknown ;;
+ arm*) machine=arm-unknown ;;
+ sh3el) machine=shl-unknown ;;
+ sh3eb) machine=sh-unknown ;;
+ sh5el) machine=sh5le-unknown ;;
+ earmv*)
+ arch=$(echo "$UNAME_MACHINE_ARCH" | sed -e 's,^e\(armv[0-9]\).*$,\1,')
+ endian=$(echo "$UNAME_MACHINE_ARCH" | sed -ne 's,^.*\(eb\)$,\1,p')
+ machine="${arch}${endian}"-unknown
+ ;;
+ *) machine="$UNAME_MACHINE_ARCH"-unknown ;;
+ esac
+ # The Operating System including object format, if it has switched
+ # to ELF recently (or will in the future) and ABI.
+ case "$UNAME_MACHINE_ARCH" in
+ earm*)
+ os=netbsdelf
+ ;;
+ arm*|i386|m68k|ns32k|sh3*|sparc|vax)
+ set_cc_for_build
+ if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ELF__
+ then
+ # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout).
+ # Return netbsd for either. FIX?
+ os=netbsd
+ else
+ os=netbsdelf
+ fi
+ ;;
+ *)
+ os=netbsd
+ ;;
+ esac
+ # Determine ABI tags.
+ case "$UNAME_MACHINE_ARCH" in
+ earm*)
+ expr='s/^earmv[0-9]/-eabi/;s/eb$//'
+ abi=$(echo "$UNAME_MACHINE_ARCH" | sed -e "$expr")
+ ;;
+ esac
+ # The OS release
+ # Debian GNU/NetBSD machines have a different userland, and
+ # thus, need a distinct triplet. However, they do not need
+ # kernel version information, so it can be replaced with a
+ # suitable tag, in the style of linux-gnu.
+ case "$UNAME_VERSION" in
+ Debian*)
+ release='-gnu'
+ ;;
+ *)
+ release=$(echo "$UNAME_RELEASE" | sed -e 's/[-_].*//' | cut -d. -f1,2)
+ ;;
+ esac
+ # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM:
+ # contains redundant information, the shorter form:
+ # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
+ echo "$machine-${os}${release}${abi-}"
+ exit ;;
+ *:Bitrig:*:*)
+ UNAME_MACHINE_ARCH=$(arch | sed 's/Bitrig.//')
+ echo "$UNAME_MACHINE_ARCH"-unknown-bitrig"$UNAME_RELEASE"
+ exit ;;
+ *:OpenBSD:*:*)
+ UNAME_MACHINE_ARCH=$(arch | sed 's/OpenBSD.//')
+ echo "$UNAME_MACHINE_ARCH"-unknown-openbsd"$UNAME_RELEASE"
+ exit ;;
+ *:LibertyBSD:*:*)
+ UNAME_MACHINE_ARCH=$(arch | sed 's/^.*BSD\.//')
+ echo "$UNAME_MACHINE_ARCH"-unknown-libertybsd"$UNAME_RELEASE"
+ exit ;;
+ *:MidnightBSD:*:*)
+ echo "$UNAME_MACHINE"-unknown-midnightbsd"$UNAME_RELEASE"
+ exit ;;
+ *:ekkoBSD:*:*)
+ echo "$UNAME_MACHINE"-unknown-ekkobsd"$UNAME_RELEASE"
+ exit ;;
+ *:SolidBSD:*:*)
+ echo "$UNAME_MACHINE"-unknown-solidbsd"$UNAME_RELEASE"
+ exit ;;
+ *:OS108:*:*)
+ echo "$UNAME_MACHINE"-unknown-os108_"$UNAME_RELEASE"
+ exit ;;
+ macppc:MirBSD:*:*)
+ echo powerpc-unknown-mirbsd"$UNAME_RELEASE"
+ exit ;;
+ *:MirBSD:*:*)
+ echo "$UNAME_MACHINE"-unknown-mirbsd"$UNAME_RELEASE"
+ exit ;;
+ *:Sortix:*:*)
+ echo "$UNAME_MACHINE"-unknown-sortix
+ exit ;;
+ *:Twizzler:*:*)
+ echo "$UNAME_MACHINE"-unknown-twizzler
+ exit ;;
+ *:Redox:*:*)
+ echo "$UNAME_MACHINE"-unknown-redox
+ exit ;;
+ mips:OSF1:*.*)
+ echo mips-dec-osf1
+ exit ;;
+ alpha:OSF1:*:*)
+ case $UNAME_RELEASE in
+ *4.0)
+ UNAME_RELEASE=$(/usr/sbin/sizer -v | awk '{print $3}')
+ ;;
+ *5.*)
+ UNAME_RELEASE=$(/usr/sbin/sizer -v | awk '{print $4}')
+ ;;
+ esac
+ # According to Compaq, /usr/sbin/psrinfo has been available on
+ # OSF/1 and Tru64 systems produced since 1995. I hope that
+ # covers most systems running today. This code pipes the CPU
+ # types through head -n 1, so we only detect the type of CPU 0.
+ ALPHA_CPU_TYPE=$(/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1)
+ case "$ALPHA_CPU_TYPE" in
+ "EV4 (21064)")
+ UNAME_MACHINE=alpha ;;
+ "EV4.5 (21064)")
+ UNAME_MACHINE=alpha ;;
+ "LCA4 (21066/21068)")
+ UNAME_MACHINE=alpha ;;
+ "EV5 (21164)")
+ UNAME_MACHINE=alphaev5 ;;
+ "EV5.6 (21164A)")
+ UNAME_MACHINE=alphaev56 ;;
+ "EV5.6 (21164PC)")
+ UNAME_MACHINE=alphapca56 ;;
+ "EV5.7 (21164PC)")
+ UNAME_MACHINE=alphapca57 ;;
+ "EV6 (21264)")
+ UNAME_MACHINE=alphaev6 ;;
+ "EV6.7 (21264A)")
+ UNAME_MACHINE=alphaev67 ;;
+ "EV6.8CB (21264C)")
+ UNAME_MACHINE=alphaev68 ;;
+ "EV6.8AL (21264B)")
+ UNAME_MACHINE=alphaev68 ;;
+ "EV6.8CX (21264D)")
+ UNAME_MACHINE=alphaev68 ;;
+ "EV6.9A (21264/EV69A)")
+ UNAME_MACHINE=alphaev69 ;;
+ "EV7 (21364)")
+ UNAME_MACHINE=alphaev7 ;;
+ "EV7.9 (21364A)")
+ UNAME_MACHINE=alphaev79 ;;
+ esac
+ # A Pn.n version is a patched version.
+ # A Vn.n version is a released version.
+ # A Tn.n version is a released field test version.
+ # A Xn.n version is an unreleased experimental baselevel.
+ # 1.2 uses "1.2" for uname -r.
+ echo "$UNAME_MACHINE"-dec-osf"$(echo "$UNAME_RELEASE" | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz)"
+ # Reset EXIT trap before exiting to avoid spurious non-zero exit code.
+ exitcode=$?
+ trap '' 0
+ exit $exitcode ;;
+ Amiga*:UNIX_System_V:4.0:*)
+ echo m68k-unknown-sysv4
+ exit ;;
+ *:[Aa]miga[Oo][Ss]:*:*)
+ echo "$UNAME_MACHINE"-unknown-amigaos
+ exit ;;
+ *:[Mm]orph[Oo][Ss]:*:*)
+ echo "$UNAME_MACHINE"-unknown-morphos
+ exit ;;
+ *:OS/390:*:*)
+ echo i370-ibm-openedition
+ exit ;;
+ *:z/VM:*:*)
+ echo s390-ibm-zvmoe
+ exit ;;
+ *:OS400:*:*)
+ echo powerpc-ibm-os400
+ exit ;;
+ arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
+ echo arm-acorn-riscix"$UNAME_RELEASE"
+ exit ;;
+ arm*:riscos:*:*|arm*:RISCOS:*:*)
+ echo arm-unknown-riscos
+ exit ;;
+ SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*)
+ echo hppa1.1-hitachi-hiuxmpp
+ exit ;;
+ Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*)
+ # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE.
+ if test "$( (/bin/universe) 2>/dev/null)" = att ; then
+ echo pyramid-pyramid-sysv3
+ else
+ echo pyramid-pyramid-bsd
+ fi
+ exit ;;
+ NILE*:*:*:dcosx)
+ echo pyramid-pyramid-svr4
+ exit ;;
+ DRS?6000:unix:4.0:6*)
+ echo sparc-icl-nx6
+ exit ;;
+ DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*)
+ case $(/usr/bin/uname -p) in
+ sparc) echo sparc-icl-nx7; exit ;;
+ esac ;;
+ s390x:SunOS:*:*)
+ echo "$UNAME_MACHINE"-ibm-solaris2"$(echo "$UNAME_RELEASE" | sed -e 's/[^.]*//')"
+ exit ;;
+ sun4H:SunOS:5.*:*)
+ echo sparc-hal-solaris2"$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*//')"
+ exit ;;
+ sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
+ echo sparc-sun-solaris2"$(echo "$UNAME_RELEASE" | sed -e 's/[^.]*//')"
+ exit ;;
+ i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*)
+ echo i386-pc-auroraux"$UNAME_RELEASE"
+ exit ;;
+ i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*)
+ set_cc_for_build
+ SUN_ARCH=i386
+ # If there is a compiler, see if it is configured for 64-bit objects.
+ # Note that the Sun cc does not turn __LP64__ into 1 like gcc does.
+ # This test works for both compilers.
+ if test "$CC_FOR_BUILD" != no_compiler_found; then
+ if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \
+ (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \
+ grep IS_64BIT_ARCH >/dev/null
+ then
+ SUN_ARCH=x86_64
+ fi
+ fi
+ echo "$SUN_ARCH"-pc-solaris2"$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*//')"
+ exit ;;
+ sun4*:SunOS:6*:*)
+ # According to config.sub, this is the proper way to canonicalize
+ # SunOS6. Hard to guess exactly what SunOS6 will be like, but
+ # it's likely to be more like Solaris than SunOS4.
+ echo sparc-sun-solaris3"$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*//')"
+ exit ;;
+ sun4*:SunOS:*:*)
+ case "$(/usr/bin/arch -k)" in
+ Series*|S4*)
+ UNAME_RELEASE=$(uname -v)
+ ;;
+ esac
+ # Japanese Language versions have a version number like `4.1.3-JL'.
+ echo sparc-sun-sunos"$(echo "$UNAME_RELEASE"|sed -e 's/-/_/')"
+ exit ;;
+ sun3*:SunOS:*:*)
+ echo m68k-sun-sunos"$UNAME_RELEASE"
+ exit ;;
+ sun*:*:4.2BSD:*)
+ UNAME_RELEASE=$( (sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null)
+ test "x$UNAME_RELEASE" = x && UNAME_RELEASE=3
+ case "$(/bin/arch)" in
+ sun3)
+ echo m68k-sun-sunos"$UNAME_RELEASE"
+ ;;
+ sun4)
+ echo sparc-sun-sunos"$UNAME_RELEASE"
+ ;;
+ esac
+ exit ;;
+ aushp:SunOS:*:*)
+ echo sparc-auspex-sunos"$UNAME_RELEASE"
+ exit ;;
+ # The situation for MiNT is a little confusing. The machine name
+ # can be virtually everything (everything which is not
+ # "atarist" or "atariste" at least should have a processor
+ # > m68000). The system name ranges from "MiNT" over "FreeMiNT"
+ # to the lowercase version "mint" (or "freemint"). Finally
+ # the system name "TOS" denotes a system which is actually not
+ # MiNT. But MiNT is downward compatible to TOS, so this should
+ # be no problem.
+ atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*)
+ echo m68k-atari-mint"$UNAME_RELEASE"
+ exit ;;
+ atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*)
+ echo m68k-atari-mint"$UNAME_RELEASE"
+ exit ;;
+ *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*)
+ echo m68k-atari-mint"$UNAME_RELEASE"
+ exit ;;
+ milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*)
+ echo m68k-milan-mint"$UNAME_RELEASE"
+ exit ;;
+ hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*)
+ echo m68k-hades-mint"$UNAME_RELEASE"
+ exit ;;
+ *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*)
+ echo m68k-unknown-mint"$UNAME_RELEASE"
+ exit ;;
+ m68k:machten:*:*)
+ echo m68k-apple-machten"$UNAME_RELEASE"
+ exit ;;
+ powerpc:machten:*:*)
+ echo powerpc-apple-machten"$UNAME_RELEASE"
+ exit ;;
+ RISC*:Mach:*:*)
+ echo mips-dec-mach_bsd4.3
+ exit ;;
+ RISC*:ULTRIX:*:*)
+ echo mips-dec-ultrix"$UNAME_RELEASE"
+ exit ;;
+ VAX*:ULTRIX*:*:*)
+ echo vax-dec-ultrix"$UNAME_RELEASE"
+ exit ;;
+ 2020:CLIX:*:* | 2430:CLIX:*:*)
+ echo clipper-intergraph-clix"$UNAME_RELEASE"
+ exit ;;
+ mips:*:*:UMIPS | mips:*:*:RISCos)
+ set_cc_for_build
+ sed 's/^ //' << EOF > "$dummy.c"
+#ifdef __cplusplus
+#include <stdio.h> /* for printf() prototype */
+ int main (int argc, char *argv[]) {
+#else
+ int main (argc, argv) int argc; char *argv[]; {
+#endif
+ #if defined (host_mips) && defined (MIPSEB)
+ #if defined (SYSTYPE_SYSV)
+ printf ("mips-mips-riscos%ssysv\\n", argv[1]); exit (0);
+ #endif
+ #if defined (SYSTYPE_SVR4)
+ printf ("mips-mips-riscos%ssvr4\\n", argv[1]); exit (0);
+ #endif
+ #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD)
+ printf ("mips-mips-riscos%sbsd\\n", argv[1]); exit (0);
+ #endif
+ #endif
+ exit (-1);
+ }
+EOF
+ $CC_FOR_BUILD -o "$dummy" "$dummy.c" &&
+ dummyarg=$(echo "$UNAME_RELEASE" | sed -n 's/\([0-9]*\).*/\1/p') &&
+ SYSTEM_NAME=$("$dummy" "$dummyarg") &&
+ { echo "$SYSTEM_NAME"; exit; }
+ echo mips-mips-riscos"$UNAME_RELEASE"
+ exit ;;
+ Motorola:PowerMAX_OS:*:*)
+ echo powerpc-motorola-powermax
+ exit ;;
+ Motorola:*:4.3:PL8-*)
+ echo powerpc-harris-powermax
+ exit ;;
+ Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*)
+ echo powerpc-harris-powermax
+ exit ;;
+ Night_Hawk:Power_UNIX:*:*)
+ echo powerpc-harris-powerunix
+ exit ;;
+ m88k:CX/UX:7*:*)
+ echo m88k-harris-cxux7
+ exit ;;
+ m88k:*:4*:R4*)
+ echo m88k-motorola-sysv4
+ exit ;;
+ m88k:*:3*:R3*)
+ echo m88k-motorola-sysv3
+ exit ;;
+ AViiON:dgux:*:*)
+ # DG/UX returns AViiON for all architectures
+ UNAME_PROCESSOR=$(/usr/bin/uname -p)
+ if test "$UNAME_PROCESSOR" = mc88100 || test "$UNAME_PROCESSOR" = mc88110
+ then
+ if test "$TARGET_BINARY_INTERFACE"x = m88kdguxelfx || \
+ test "$TARGET_BINARY_INTERFACE"x = x
+ then
+ echo m88k-dg-dgux"$UNAME_RELEASE"
+ else
+ echo m88k-dg-dguxbcs"$UNAME_RELEASE"
+ fi
+ else
+ echo i586-dg-dgux"$UNAME_RELEASE"
+ fi
+ exit ;;
+ M88*:DolphinOS:*:*) # DolphinOS (SVR3)
+ echo m88k-dolphin-sysv3
+ exit ;;
+ M88*:*:R3*:*)
+ # Delta 88k system running SVR3
+ echo m88k-motorola-sysv3
+ exit ;;
+ XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3)
+ echo m88k-tektronix-sysv3
+ exit ;;
+ Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD)
+ echo m68k-tektronix-bsd
+ exit ;;
+ *:IRIX*:*:*)
+ echo mips-sgi-irix"$(echo "$UNAME_RELEASE"|sed -e 's/-/_/g')"
+ exit ;;
+ ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
+ echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id
+ exit ;; # Note that: echo "'$(uname -s)'" gives 'AIX '
+ i*86:AIX:*:*)
+ echo i386-ibm-aix
+ exit ;;
+ ia64:AIX:*:*)
+ if test -x /usr/bin/oslevel ; then
+ IBM_REV=$(/usr/bin/oslevel)
+ else
+ IBM_REV="$UNAME_VERSION.$UNAME_RELEASE"
+ fi
+ echo "$UNAME_MACHINE"-ibm-aix"$IBM_REV"
+ exit ;;
+ *:AIX:2:3)
+ if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then
+ set_cc_for_build
+ sed 's/^ //' << EOF > "$dummy.c"
+ #include <sys/systemcfg.h>
+
+ main()
+ {
+ if (!__power_pc())
+ exit(1);
+ puts("powerpc-ibm-aix3.2.5");
+ exit(0);
+ }
+EOF
+ if $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=$("$dummy")
+ then
+ echo "$SYSTEM_NAME"
+ else
+ echo rs6000-ibm-aix3.2.5
+ fi
+ elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then
+ echo rs6000-ibm-aix3.2.4
+ else
+ echo rs6000-ibm-aix3.2
+ fi
+ exit ;;
+ *:AIX:*:[4567])
+ IBM_CPU_ID=$(/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }')
+ if /usr/sbin/lsattr -El "$IBM_CPU_ID" | grep ' POWER' >/dev/null 2>&1; then
+ IBM_ARCH=rs6000
+ else
+ IBM_ARCH=powerpc
+ fi
+ if test -x /usr/bin/lslpp ; then
+ IBM_REV=$(/usr/bin/lslpp -Lqc bos.rte.libc |
+ awk -F: '{ print $3 }' | sed s/[0-9]*$/0/)
+ else
+ IBM_REV="$UNAME_VERSION.$UNAME_RELEASE"
+ fi
+ echo "$IBM_ARCH"-ibm-aix"$IBM_REV"
+ exit ;;
+ *:AIX:*:*)
+ echo rs6000-ibm-aix
+ exit ;;
+ ibmrt:4.4BSD:*|romp-ibm:4.4BSD:*)
+ echo romp-ibm-bsd4.4
+ exit ;;
+ ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and
+ echo romp-ibm-bsd"$UNAME_RELEASE" # 4.3 with uname added to
+ exit ;; # report: romp-ibm BSD 4.3
+ *:BOSX:*:*)
+ echo rs6000-bull-bosx
+ exit ;;
+ DPX/2?00:B.O.S.:*:*)
+ echo m68k-bull-sysv3
+ exit ;;
+ 9000/[34]??:4.3bsd:1.*:*)
+ echo m68k-hp-bsd
+ exit ;;
+ hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*)
+ echo m68k-hp-bsd4.4
+ exit ;;
+ 9000/[34678]??:HP-UX:*:*)
+ HPUX_REV=$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//')
+ case "$UNAME_MACHINE" in
+ 9000/31?) HP_ARCH=m68000 ;;
+ 9000/[34]??) HP_ARCH=m68k ;;
+ 9000/[678][0-9][0-9])
+ if test -x /usr/bin/getconf; then
+ sc_cpu_version=$(/usr/bin/getconf SC_CPU_VERSION 2>/dev/null)
+ sc_kernel_bits=$(/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null)
+ case "$sc_cpu_version" in
+ 523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0
+ 528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1
+ 532) # CPU_PA_RISC2_0
+ case "$sc_kernel_bits" in
+ 32) HP_ARCH=hppa2.0n ;;
+ 64) HP_ARCH=hppa2.0w ;;
+ '') HP_ARCH=hppa2.0 ;; # HP-UX 10.20
+ esac ;;
+ esac
+ fi
+ if test "$HP_ARCH" = ""; then
+ set_cc_for_build
+ sed 's/^ //' << EOF > "$dummy.c"
+
+ #define _HPUX_SOURCE
+ #include <stdlib.h>
+ #include <unistd.h>
+
+ int main ()
+ {
+ #if defined(_SC_KERNEL_BITS)
+ long bits = sysconf(_SC_KERNEL_BITS);
+ #endif
+ long cpu = sysconf (_SC_CPU_VERSION);
+
+ switch (cpu)
+ {
+ case CPU_PA_RISC1_0: puts ("hppa1.0"); break;
+ case CPU_PA_RISC1_1: puts ("hppa1.1"); break;
+ case CPU_PA_RISC2_0:
+ #if defined(_SC_KERNEL_BITS)
+ switch (bits)
+ {
+ case 64: puts ("hppa2.0w"); break;
+ case 32: puts ("hppa2.0n"); break;
+ default: puts ("hppa2.0"); break;
+ } break;
+ #else /* !defined(_SC_KERNEL_BITS) */
+ puts ("hppa2.0"); break;
+ #endif
+ default: puts ("hppa1.0"); break;
+ }
+ exit (0);
+ }
+EOF
+ (CCOPTS="" $CC_FOR_BUILD -o "$dummy" "$dummy.c" 2>/dev/null) && HP_ARCH=$("$dummy")
+ test -z "$HP_ARCH" && HP_ARCH=hppa
+ fi ;;
+ esac
+ if test "$HP_ARCH" = hppa2.0w
+ then
+ set_cc_for_build
+
+ # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating
+ # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler
+ # generating 64-bit code. GNU and HP use different nomenclature:
+ #
+ # $ CC_FOR_BUILD=cc ./config.guess
+ # => hppa2.0w-hp-hpux11.23
+ # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess
+ # => hppa64-hp-hpux11.23
+
+ if echo __LP64__ | (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) |
+ grep -q __LP64__
+ then
+ HP_ARCH=hppa2.0w
+ else
+ HP_ARCH=hppa64
+ fi
+ fi
+ echo "$HP_ARCH"-hp-hpux"$HPUX_REV"
+ exit ;;
+ ia64:HP-UX:*:*)
+ HPUX_REV=$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//')
+ echo ia64-hp-hpux"$HPUX_REV"
+ exit ;;
+ 3050*:HI-UX:*:*)
+ set_cc_for_build
+ sed 's/^ //' << EOF > "$dummy.c"
+ #include <unistd.h>
+ int
+ main ()
+ {
+ long cpu = sysconf (_SC_CPU_VERSION);
+ /* The order matters, because CPU_IS_HP_MC68K erroneously returns
+ true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct
+ results, however. */
+ if (CPU_IS_PA_RISC (cpu))
+ {
+ switch (cpu)
+ {
+ case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break;
+ case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break;
+ case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break;
+ default: puts ("hppa-hitachi-hiuxwe2"); break;
+ }
+ }
+ else if (CPU_IS_HP_MC68K (cpu))
+ puts ("m68k-hitachi-hiuxwe2");
+ else puts ("unknown-hitachi-hiuxwe2");
+ exit (0);
+ }
+EOF
+ $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=$("$dummy") &&
+ { echo "$SYSTEM_NAME"; exit; }
+ echo unknown-hitachi-hiuxwe2
+ exit ;;
+ 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:*)
+ echo hppa1.1-hp-bsd
+ exit ;;
+ 9000/8??:4.3bsd:*:*)
+ echo hppa1.0-hp-bsd
+ exit ;;
+ *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*)
+ echo hppa1.0-hp-mpeix
+ exit ;;
+ hp7??:OSF1:*:* | hp8?[79]:OSF1:*:*)
+ echo hppa1.1-hp-osf
+ exit ;;
+ hp8??:OSF1:*:*)
+ echo hppa1.0-hp-osf
+ exit ;;
+ i*86:OSF1:*:*)
+ if test -x /usr/sbin/sysversion ; then
+ echo "$UNAME_MACHINE"-unknown-osf1mk
+ else
+ echo "$UNAME_MACHINE"-unknown-osf1
+ fi
+ exit ;;
+ parisc*:Lites*:*:*)
+ echo hppa1.1-hp-lites
+ exit ;;
+ C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*)
+ echo c1-convex-bsd
+ exit ;;
+ C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*)
+ if getsysinfo -f scalar_acc
+ then echo c32-convex-bsd
+ else echo c2-convex-bsd
+ fi
+ exit ;;
+ C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*)
+ echo c34-convex-bsd
+ exit ;;
+ C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*)
+ echo c38-convex-bsd
+ exit ;;
+ C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*)
+ echo c4-convex-bsd
+ exit ;;
+ CRAY*Y-MP:*:*:*)
+ echo ymp-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*[A-Z]90:*:*:*)
+ echo "$UNAME_MACHINE"-cray-unicos"$UNAME_RELEASE" \
+ | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \
+ -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \
+ -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*TS:*:*:*)
+ echo t90-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*T3E:*:*:*)
+ echo alphaev5-cray-unicosmk"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*SV1:*:*:*)
+ echo sv1-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ *:UNICOS/mp:*:*)
+ echo craynv-cray-unicosmp"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
+ FUJITSU_PROC=$(uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz)
+ FUJITSU_SYS=$(uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///')
+ FUJITSU_REL=$(echo "$UNAME_RELEASE" | sed -e 's/ /_/')
+ echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+ exit ;;
+ 5000:UNIX_System_V:4.*:*)
+ FUJITSU_SYS=$(uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///')
+ FUJITSU_REL=$(echo "$UNAME_RELEASE" | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/')
+ echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+ exit ;;
+ i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
+ echo "$UNAME_MACHINE"-pc-bsdi"$UNAME_RELEASE"
+ exit ;;
+ sparc*:BSD/OS:*:*)
+ echo sparc-unknown-bsdi"$UNAME_RELEASE"
+ exit ;;
+ *:BSD/OS:*:*)
+ echo "$UNAME_MACHINE"-unknown-bsdi"$UNAME_RELEASE"
+ exit ;;
+ arm:FreeBSD:*:*)
+ UNAME_PROCESSOR=$(uname -p)
+ set_cc_for_build
+ if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ARM_PCS_VFP
+ then
+ echo "${UNAME_PROCESSOR}"-unknown-freebsd"$(echo ${UNAME_RELEASE}|sed -e 's/[-(].*//')"-gnueabi
+ else
+ echo "${UNAME_PROCESSOR}"-unknown-freebsd"$(echo ${UNAME_RELEASE}|sed -e 's/[-(].*//')"-gnueabihf
+ fi
+ exit ;;
+ *:FreeBSD:*:*)
+ UNAME_PROCESSOR=$(/usr/bin/uname -p)
+ case "$UNAME_PROCESSOR" in
+ amd64)
+ UNAME_PROCESSOR=x86_64 ;;
+ i386)
+ UNAME_PROCESSOR=i586 ;;
+ esac
+ echo "$UNAME_PROCESSOR"-unknown-freebsd"$(echo "$UNAME_RELEASE"|sed -e 's/[-(].*//')"
+ exit ;;
+ i*:CYGWIN*:*)
+ echo "$UNAME_MACHINE"-pc-cygwin
+ exit ;;
+ *:MINGW64*:*)
+ echo "$UNAME_MACHINE"-pc-mingw64
+ exit ;;
+ *:MINGW*:*)
+ echo "$UNAME_MACHINE"-pc-mingw32
+ exit ;;
+ *:MSYS*:*)
+ echo "$UNAME_MACHINE"-pc-msys
+ exit ;;
+ i*:PW*:*)
+ echo "$UNAME_MACHINE"-pc-pw32
+ exit ;;
+ *:Interix*:*)
+ case "$UNAME_MACHINE" in
+ x86)
+ echo i586-pc-interix"$UNAME_RELEASE"
+ exit ;;
+ authenticamd | genuineintel | EM64T)
+ echo x86_64-unknown-interix"$UNAME_RELEASE"
+ exit ;;
+ IA64)
+ echo ia64-unknown-interix"$UNAME_RELEASE"
+ exit ;;
+ esac ;;
+ i*:UWIN*:*)
+ echo "$UNAME_MACHINE"-pc-uwin
+ exit ;;
+ amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*)
+ echo x86_64-pc-cygwin
+ exit ;;
+ prep*:SunOS:5.*:*)
+ echo powerpcle-unknown-solaris2"$(echo "$UNAME_RELEASE"|sed -e 's/[^.]*//')"
+ exit ;;
+ *:GNU:*:*)
+ # the GNU system
+ echo "$(echo "$UNAME_MACHINE"|sed -e 's,[-/].*$,,')-unknown-$LIBC$(echo "$UNAME_RELEASE"|sed -e 's,/.*$,,')"
+ exit ;;
+ *:GNU/*:*:*)
+ # other systems with GNU libc and userland
+ echo "$UNAME_MACHINE-unknown-$(echo "$UNAME_SYSTEM" | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]")$(echo "$UNAME_RELEASE"|sed -e 's/[-(].*//')-$LIBC"
+ exit ;;
+ *:Minix:*:*)
+ echo "$UNAME_MACHINE"-unknown-minix
+ exit ;;
+ aarch64:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ aarch64_be:Linux:*:*)
+ UNAME_MACHINE=aarch64_be
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ alpha:Linux:*:*)
+ case $(sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' /proc/cpuinfo 2>/dev/null) in
+ EV5) UNAME_MACHINE=alphaev5 ;;
+ EV56) UNAME_MACHINE=alphaev56 ;;
+ PCA56) UNAME_MACHINE=alphapca56 ;;
+ PCA57) UNAME_MACHINE=alphapca56 ;;
+ EV6) UNAME_MACHINE=alphaev6 ;;
+ EV67) UNAME_MACHINE=alphaev67 ;;
+ EV68*) UNAME_MACHINE=alphaev68 ;;
+ esac
+ objdump --private-headers /bin/sh | grep -q ld.so.1
+ if test "$?" = 0 ; then LIBC=gnulibc1 ; fi
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ arc:Linux:*:* | arceb:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ arm*:Linux:*:*)
+ set_cc_for_build
+ if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ARM_EABI__
+ then
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ else
+ if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ARM_PCS_VFP
+ then
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabi
+ else
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabihf
+ fi
+ fi
+ exit ;;
+ avr32*:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ cris:Linux:*:*)
+ echo "$UNAME_MACHINE"-axis-linux-"$LIBC"
+ exit ;;
+ crisv32:Linux:*:*)
+ echo "$UNAME_MACHINE"-axis-linux-"$LIBC"
+ exit ;;
+ e2k:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ frv:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ hexagon:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ i*86:Linux:*:*)
+ echo "$UNAME_MACHINE"-pc-linux-"$LIBC"
+ exit ;;
+ ia64:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ k1om:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ loongarch32:Linux:*:* | loongarch64:Linux:*:* | loongarchx32:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ m32r*:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ m68*:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ mips:Linux:*:* | mips64:Linux:*:*)
+ set_cc_for_build
+ IS_GLIBC=0
+ test x"${LIBC}" = xgnu && IS_GLIBC=1
+ sed 's/^ //' << EOF > "$dummy.c"
+ #undef CPU
+ #undef mips
+ #undef mipsel
+ #undef mips64
+ #undef mips64el
+ #if ${IS_GLIBC} && defined(_ABI64)
+ LIBCABI=gnuabi64
+ #else
+ #if ${IS_GLIBC} && defined(_ABIN32)
+ LIBCABI=gnuabin32
+ #else
+ LIBCABI=${LIBC}
+ #endif
+ #endif
+
+ #if ${IS_GLIBC} && defined(__mips64) && defined(__mips_isa_rev) && __mips_isa_rev>=6
+ CPU=mipsisa64r6
+ #else
+ #if ${IS_GLIBC} && !defined(__mips64) && defined(__mips_isa_rev) && __mips_isa_rev>=6
+ CPU=mipsisa32r6
+ #else
+ #if defined(__mips64)
+ CPU=mips64
+ #else
+ CPU=mips
+ #endif
+ #endif
+ #endif
+
+ #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
+ MIPS_ENDIAN=el
+ #else
+ #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
+ MIPS_ENDIAN=
+ #else
+ MIPS_ENDIAN=
+ #endif
+ #endif
+EOF
+ eval "$($CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^CPU\|^MIPS_ENDIAN\|^LIBCABI')"
+ test "x$CPU" != x && { echo "$CPU${MIPS_ENDIAN}-unknown-linux-$LIBCABI"; exit; }
+ ;;
+ mips64el:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ openrisc*:Linux:*:*)
+ echo or1k-unknown-linux-"$LIBC"
+ exit ;;
+ or32:Linux:*:* | or1k*:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ padre:Linux:*:*)
+ echo sparc-unknown-linux-"$LIBC"
+ exit ;;
+ parisc64:Linux:*:* | hppa64:Linux:*:*)
+ echo hppa64-unknown-linux-"$LIBC"
+ exit ;;
+ parisc:Linux:*:* | hppa:Linux:*:*)
+ # Look for CPU level
+ case $(grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2) in
+ PA7*) echo hppa1.1-unknown-linux-"$LIBC" ;;
+ PA8*) echo hppa2.0-unknown-linux-"$LIBC" ;;
+ *) echo hppa-unknown-linux-"$LIBC" ;;
+ esac
+ exit ;;
+ ppc64:Linux:*:*)
+ echo powerpc64-unknown-linux-"$LIBC"
+ exit ;;
+ ppc:Linux:*:*)
+ echo powerpc-unknown-linux-"$LIBC"
+ exit ;;
+ ppc64le:Linux:*:*)
+ echo powerpc64le-unknown-linux-"$LIBC"
+ exit ;;
+ ppcle:Linux:*:*)
+ echo powerpcle-unknown-linux-"$LIBC"
+ exit ;;
+ riscv32:Linux:*:* | riscv32be:Linux:*:* | riscv64:Linux:*:* | riscv64be:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ s390:Linux:*:* | s390x:Linux:*:*)
+ echo "$UNAME_MACHINE"-ibm-linux-"$LIBC"
+ exit ;;
+ sh64*:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ sh*:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ sparc:Linux:*:* | sparc64:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ tile*:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ vax:Linux:*:*)
+ echo "$UNAME_MACHINE"-dec-linux-"$LIBC"
+ exit ;;
+ x86_64:Linux:*:*)
+ set_cc_for_build
+ LIBCABI=$LIBC
+ if test "$CC_FOR_BUILD" != no_compiler_found; then
+ if (echo '#ifdef __ILP32__'; echo IS_X32; echo '#endif') | \
+ (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \
+ grep IS_X32 >/dev/null
+ then
+ LIBCABI="$LIBC"x32
+ fi
+ fi
+ echo "$UNAME_MACHINE"-pc-linux-"$LIBCABI"
+ exit ;;
+ xtensa*:Linux:*:*)
+ echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"
+ exit ;;
+ i*86:DYNIX/ptx:4*:*)
+ # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
+ # earlier versions are messed up and put the nodename in both
+ # sysname and nodename.
+ echo i386-sequent-sysv4
+ exit ;;
+ i*86:UNIX_SV:4.2MP:2.*)
+ # Unixware is an offshoot of SVR4, but it has its own version
+ # number series starting with 2...
+ # I am not positive that other SVR4 systems won't match this,
+ # I just have to hope. -- rms.
+ # Use sysv4.2uw... so that sysv4* matches it.
+ echo "$UNAME_MACHINE"-pc-sysv4.2uw"$UNAME_VERSION"
+ exit ;;
+ i*86:OS/2:*:*)
+ # If we were able to find `uname', then EMX Unix compatibility
+ # is probably installed.
+ echo "$UNAME_MACHINE"-pc-os2-emx
+ exit ;;
+ i*86:XTS-300:*:STOP)
+ echo "$UNAME_MACHINE"-unknown-stop
+ exit ;;
+ i*86:atheos:*:*)
+ echo "$UNAME_MACHINE"-unknown-atheos
+ exit ;;
+ i*86:syllable:*:*)
+ echo "$UNAME_MACHINE"-pc-syllable
+ exit ;;
+ i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*)
+ echo i386-unknown-lynxos"$UNAME_RELEASE"
+ exit ;;
+ i*86:*DOS:*:*)
+ echo "$UNAME_MACHINE"-pc-msdosdjgpp
+ exit ;;
+ i*86:*:4.*:*)
+ UNAME_REL=$(echo "$UNAME_RELEASE" | sed 's/\/MP$//')
+ if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
+ echo "$UNAME_MACHINE"-univel-sysv"$UNAME_REL"
+ else
+ echo "$UNAME_MACHINE"-pc-sysv"$UNAME_REL"
+ fi
+ exit ;;
+ i*86:*:5:[678]*)
+ # UnixWare 7.x, OpenUNIX and OpenServer 6.
+ case $(/bin/uname -X | grep "^Machine") in
+ *486*) UNAME_MACHINE=i486 ;;
+ *Pentium) UNAME_MACHINE=i586 ;;
+ *Pent*|*Celeron) UNAME_MACHINE=i686 ;;
+ esac
+ echo "$UNAME_MACHINE-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}"
+ exit ;;
+ i*86:*:3.2:*)
+ if test -f /usr/options/cb.name; then
+ UNAME_REL=$(sed -n 's/.*Version //p' </usr/options/cb.name)
+ echo "$UNAME_MACHINE"-pc-isc"$UNAME_REL"
+ elif /bin/uname -X 2>/dev/null >/dev/null ; then
+ UNAME_REL=$( (/bin/uname -X|grep Release|sed -e 's/.*= //'))
+ (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486
+ (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \
+ && UNAME_MACHINE=i586
+ (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \
+ && UNAME_MACHINE=i686
+ (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \
+ && UNAME_MACHINE=i686
+ echo "$UNAME_MACHINE"-pc-sco"$UNAME_REL"
+ else
+ echo "$UNAME_MACHINE"-pc-sysv32
+ fi
+ exit ;;
+ pc:*:*:*)
+ # Left here for compatibility:
+ # uname -m prints for DJGPP always 'pc', but it prints nothing about
+ # the processor, so we play safe by assuming i586.
+ # Note: whatever this is, it MUST be the same as what config.sub
+ # prints for the "djgpp" host, or else GDB configure will decide that
+ # this is a cross-build.
+ echo i586-pc-msdosdjgpp
+ exit ;;
+ Intel:Mach:3*:*)
+ echo i386-pc-mach3
+ exit ;;
+ paragon:*:*:*)
+ echo i860-intel-osf1
+ exit ;;
+ i860:*:4.*:*) # i860-SVR4
+ if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then
+ echo i860-stardent-sysv"$UNAME_RELEASE" # Stardent Vistra i860-SVR4
+ else # Add other i860-SVR4 vendors below as they are discovered.
+ echo i860-unknown-sysv"$UNAME_RELEASE" # Unknown i860-SVR4
+ fi
+ exit ;;
+ mini*:CTIX:SYS*5:*)
+ # "miniframe"
+ echo m68010-convergent-sysv
+ exit ;;
+ mc68k:UNIX:SYSTEM5:3.51m)
+ echo m68k-convergent-sysv
+ exit ;;
+ M680?0:D-NIX:5.3:*)
+ echo m68k-diab-dnix
+ exit ;;
+ M68*:*:R3V[5678]*:*)
+ test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;;
+ 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0)
+ OS_REL=''
+ test -r /etc/.relid \
+ && OS_REL=.$(sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid)
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4.3"$OS_REL"; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+ && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;;
+ 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4; exit; } ;;
+ NCR*:*:4.2:* | MPRAS*:*:4.2:*)
+ OS_REL='.3'
+ test -r /etc/.relid \
+ && OS_REL=.$(sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid)
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4.3"$OS_REL"; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+ && { echo i586-ncr-sysv4.3"$OS_REL"; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \
+ && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;;
+ m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*)
+ echo m68k-unknown-lynxos"$UNAME_RELEASE"
+ exit ;;
+ mc68030:UNIX_System_V:4.*:*)
+ echo m68k-atari-sysv4
+ exit ;;
+ TSUNAMI:LynxOS:2.*:*)
+ echo sparc-unknown-lynxos"$UNAME_RELEASE"
+ exit ;;
+ rs6000:LynxOS:2.*:*)
+ echo rs6000-unknown-lynxos"$UNAME_RELEASE"
+ exit ;;
+ PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*)
+ echo powerpc-unknown-lynxos"$UNAME_RELEASE"
+ exit ;;
+ SM[BE]S:UNIX_SV:*:*)
+ echo mips-dde-sysv"$UNAME_RELEASE"
+ exit ;;
+ RM*:ReliantUNIX-*:*:*)
+ echo mips-sni-sysv4
+ exit ;;
+ RM*:SINIX-*:*:*)
+ echo mips-sni-sysv4
+ exit ;;
+ *:SINIX-*:*:*)
+ if uname -p 2>/dev/null >/dev/null ; then
+ UNAME_MACHINE=$( (uname -p) 2>/dev/null)
+ echo "$UNAME_MACHINE"-sni-sysv4
+ else
+ echo ns32k-sni-sysv
+ fi
+ exit ;;
+ PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort
+ # says <Richard.M.Bartel@ccMail.Census.GOV>
+ echo i586-unisys-sysv4
+ exit ;;
+ *:UNIX_System_V:4*:FTX*)
+ # From Gerald Hewes <hewes@openmarket.com>.
+ # How about differentiating between stratus architectures? -djm
+ echo hppa1.1-stratus-sysv4
+ exit ;;
+ *:*:*:FTX*)
+ # From seanf@swdc.stratus.com.
+ echo i860-stratus-sysv4
+ exit ;;
+ i*86:VOS:*:*)
+ # From Paul.Green@stratus.com.
+ echo "$UNAME_MACHINE"-stratus-vos
+ exit ;;
+ *:VOS:*:*)
+ # From Paul.Green@stratus.com.
+ echo hppa1.1-stratus-vos
+ exit ;;
+ mc68*:A/UX:*:*)
+ echo m68k-apple-aux"$UNAME_RELEASE"
+ exit ;;
+ news*:NEWS-OS:6*:*)
+ echo mips-sony-newsos6
+ exit ;;
+ R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
+ if test -d /usr/nec; then
+ echo mips-nec-sysv"$UNAME_RELEASE"
+ else
+ echo mips-unknown-sysv"$UNAME_RELEASE"
+ fi
+ exit ;;
+ BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only.
+ echo powerpc-be-beos
+ exit ;;
+ BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only.
+ echo powerpc-apple-beos
+ exit ;;
+ BePC:BeOS:*:*) # BeOS running on Intel PC compatible.
+ echo i586-pc-beos
+ exit ;;
+ BePC:Haiku:*:*) # Haiku running on Intel PC compatible.
+ echo i586-pc-haiku
+ exit ;;
+ x86_64:Haiku:*:*)
+ echo x86_64-unknown-haiku
+ exit ;;
+ SX-4:SUPER-UX:*:*)
+ echo sx4-nec-superux"$UNAME_RELEASE"
+ exit ;;
+ SX-5:SUPER-UX:*:*)
+ echo sx5-nec-superux"$UNAME_RELEASE"
+ exit ;;
+ SX-6:SUPER-UX:*:*)
+ echo sx6-nec-superux"$UNAME_RELEASE"
+ exit ;;
+ SX-7:SUPER-UX:*:*)
+ echo sx7-nec-superux"$UNAME_RELEASE"
+ exit ;;
+ SX-8:SUPER-UX:*:*)
+ echo sx8-nec-superux"$UNAME_RELEASE"
+ exit ;;
+ SX-8R:SUPER-UX:*:*)
+ echo sx8r-nec-superux"$UNAME_RELEASE"
+ exit ;;
+ SX-ACE:SUPER-UX:*:*)
+ echo sxace-nec-superux"$UNAME_RELEASE"
+ exit ;;
+ Power*:Rhapsody:*:*)
+ echo powerpc-apple-rhapsody"$UNAME_RELEASE"
+ exit ;;
+ *:Rhapsody:*:*)
+ echo "$UNAME_MACHINE"-apple-rhapsody"$UNAME_RELEASE"
+ exit ;;
+ arm64:Darwin:*:*)
+ echo aarch64-apple-darwin"$UNAME_RELEASE"
+ exit ;;
+ *:Darwin:*:*)
+ UNAME_PROCESSOR=$(uname -p)
+ case $UNAME_PROCESSOR in
+ unknown) UNAME_PROCESSOR=powerpc ;;
+ esac
+ if command -v xcode-select > /dev/null 2> /dev/null && \
+ ! xcode-select --print-path > /dev/null 2> /dev/null ; then
+ # Avoid executing cc if there is no toolchain installed as
+ # cc will be a stub that puts up a graphical alert
+ # prompting the user to install developer tools.
+ CC_FOR_BUILD=no_compiler_found
+ else
+ set_cc_for_build
+ fi
+ if test "$CC_FOR_BUILD" != no_compiler_found; then
+ if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \
+ (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \
+ grep IS_64BIT_ARCH >/dev/null
+ then
+ case $UNAME_PROCESSOR in
+ i386) UNAME_PROCESSOR=x86_64 ;;
+ powerpc) UNAME_PROCESSOR=powerpc64 ;;
+ esac
+ fi
+ # On 10.4-10.6 one might compile for PowerPC via gcc -arch ppc
+ if (echo '#ifdef __POWERPC__'; echo IS_PPC; echo '#endif') | \
+ (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \
+ grep IS_PPC >/dev/null
+ then
+ UNAME_PROCESSOR=powerpc
+ fi
+ elif test "$UNAME_PROCESSOR" = i386 ; then
+ # uname -m returns i386 or x86_64
+ UNAME_PROCESSOR=$UNAME_MACHINE
+ fi
+ echo "$UNAME_PROCESSOR"-apple-darwin"$UNAME_RELEASE"
+ exit ;;
+ *:procnto*:*:* | *:QNX:[0123456789]*:*)
+ UNAME_PROCESSOR=$(uname -p)
+ if test "$UNAME_PROCESSOR" = x86; then
+ UNAME_PROCESSOR=i386
+ UNAME_MACHINE=pc
+ fi
+ echo "$UNAME_PROCESSOR"-"$UNAME_MACHINE"-nto-qnx"$UNAME_RELEASE"
+ exit ;;
+ *:QNX:*:4*)
+ echo i386-pc-qnx
+ exit ;;
+ NEO-*:NONSTOP_KERNEL:*:*)
+ echo neo-tandem-nsk"$UNAME_RELEASE"
+ exit ;;
+ NSE-*:NONSTOP_KERNEL:*:*)
+ echo nse-tandem-nsk"$UNAME_RELEASE"
+ exit ;;
+ NSR-*:NONSTOP_KERNEL:*:*)
+ echo nsr-tandem-nsk"$UNAME_RELEASE"
+ exit ;;
+ NSV-*:NONSTOP_KERNEL:*:*)
+ echo nsv-tandem-nsk"$UNAME_RELEASE"
+ exit ;;
+ NSX-*:NONSTOP_KERNEL:*:*)
+ echo nsx-tandem-nsk"$UNAME_RELEASE"
+ exit ;;
+ *:NonStop-UX:*:*)
+ echo mips-compaq-nonstopux
+ exit ;;
+ BS2000:POSIX*:*:*)
+ echo bs2000-siemens-sysv
+ exit ;;
+ DS/*:UNIX_System_V:*:*)
+ echo "$UNAME_MACHINE"-"$UNAME_SYSTEM"-"$UNAME_RELEASE"
+ exit ;;
+ *:Plan9:*:*)
+ # "uname -m" is not consistent, so use $cputype instead. 386
+ # is converted to i386 for consistency with other x86
+ # operating systems.
+ # shellcheck disable=SC2154
+ if test "$cputype" = 386; then
+ UNAME_MACHINE=i386
+ else
+ UNAME_MACHINE="$cputype"
+ fi
+ echo "$UNAME_MACHINE"-unknown-plan9
+ exit ;;
+ *:TOPS-10:*:*)
+ echo pdp10-unknown-tops10
+ exit ;;
+ *:TENEX:*:*)
+ echo pdp10-unknown-tenex
+ exit ;;
+ KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*)
+ echo pdp10-dec-tops20
+ exit ;;
+ XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*)
+ echo pdp10-xkl-tops20
+ exit ;;
+ *:TOPS-20:*:*)
+ echo pdp10-unknown-tops20
+ exit ;;
+ *:ITS:*:*)
+ echo pdp10-unknown-its
+ exit ;;
+ SEI:*:*:SEIUX)
+ echo mips-sei-seiux"$UNAME_RELEASE"
+ exit ;;
+ *:DragonFly:*:*)
+ echo "$UNAME_MACHINE"-unknown-dragonfly"$(echo "$UNAME_RELEASE"|sed -e 's/[-(].*//')"
+ exit ;;
+ *:*VMS:*:*)
+ UNAME_MACHINE=$( (uname -p) 2>/dev/null)
+ case "$UNAME_MACHINE" in
+ A*) echo alpha-dec-vms ; exit ;;
+ I*) echo ia64-dec-vms ; exit ;;
+ V*) echo vax-dec-vms ; exit ;;
+ esac ;;
+ *:XENIX:*:SysV)
+ echo i386-pc-xenix
+ exit ;;
+ i*86:skyos:*:*)
+ echo "$UNAME_MACHINE"-pc-skyos"$(echo "$UNAME_RELEASE" | sed -e 's/ .*$//')"
+ exit ;;
+ i*86:rdos:*:*)
+ echo "$UNAME_MACHINE"-pc-rdos
+ exit ;;
+ i*86:AROS:*:*)
+ echo "$UNAME_MACHINE"-pc-aros
+ exit ;;
+ x86_64:VMkernel:*:*)
+ echo "$UNAME_MACHINE"-unknown-esx
+ exit ;;
+ amd64:Isilon\ OneFS:*:*)
+ echo x86_64-unknown-onefs
+ exit ;;
+ *:Unleashed:*:*)
+ echo "$UNAME_MACHINE"-unknown-unleashed"$UNAME_RELEASE"
+ exit ;;
+esac
+
+# No uname command or uname output not recognized.
+set_cc_for_build
+cat > "$dummy.c" <<EOF
+#ifdef _SEQUENT_
+#include <sys/types.h>
+#include <sys/utsname.h>
+#endif
+#if defined(ultrix) || defined(_ultrix) || defined(__ultrix) || defined(__ultrix__)
+#if defined (vax) || defined (__vax) || defined (__vax__) || defined(mips) || defined(__mips) || defined(__mips__) || defined(MIPS) || defined(__MIPS__)
+#include <signal.h>
+#if defined(_SIZE_T_) || defined(SIGLOST)
+#include <sys/utsname.h>
+#endif
+#endif
+#endif
+main ()
+{
+#if defined (sony)
+#if defined (MIPSEB)
+ /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed,
+ I don't know.... */
+ printf ("mips-sony-bsd\n"); exit (0);
+#else
+#include <sys/param.h>
+ printf ("m68k-sony-newsos%s\n",
+#ifdef NEWSOS4
+ "4"
+#else
+ ""
+#endif
+ ); exit (0);
+#endif
+#endif
+
+#if defined (NeXT)
+#if !defined (__ARCHITECTURE__)
+#define __ARCHITECTURE__ "m68k"
+#endif
+ int version;
+ version=$( (hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null);
+ if (version < 4)
+ printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version);
+ else
+ printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version);
+ exit (0);
+#endif
+
+#if defined (MULTIMAX) || defined (n16)
+#if defined (UMAXV)
+ printf ("ns32k-encore-sysv\n"); exit (0);
+#else
+#if defined (CMU)
+ printf ("ns32k-encore-mach\n"); exit (0);
+#else
+ printf ("ns32k-encore-bsd\n"); exit (0);
+#endif
+#endif
+#endif
+
+#if defined (__386BSD__)
+ printf ("i386-pc-bsd\n"); exit (0);
+#endif
+
+#if defined (sequent)
+#if defined (i386)
+ printf ("i386-sequent-dynix\n"); exit (0);
+#endif
+#if defined (ns32000)
+ printf ("ns32k-sequent-dynix\n"); exit (0);
+#endif
+#endif
+
+#if defined (_SEQUENT_)
+ struct utsname un;
+
+ uname(&un);
+ if (strncmp(un.version, "V2", 2) == 0) {
+ printf ("i386-sequent-ptx2\n"); exit (0);
+ }
+ if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */
+ printf ("i386-sequent-ptx1\n"); exit (0);
+ }
+ printf ("i386-sequent-ptx\n"); exit (0);
+#endif
+
+#if defined (vax)
+#if !defined (ultrix)
+#include <sys/param.h>
+#if defined (BSD)
+#if BSD == 43
+ printf ("vax-dec-bsd4.3\n"); exit (0);
+#else
+#if BSD == 199006
+ printf ("vax-dec-bsd4.3reno\n"); exit (0);
+#else
+ printf ("vax-dec-bsd\n"); exit (0);
+#endif
+#endif
+#else
+ printf ("vax-dec-bsd\n"); exit (0);
+#endif
+#else
+#if defined(_SIZE_T_) || defined(SIGLOST)
+ struct utsname un;
+ uname (&un);
+ printf ("vax-dec-ultrix%s\n", un.release); exit (0);
+#else
+ printf ("vax-dec-ultrix\n"); exit (0);
+#endif
+#endif
+#endif
+#if defined(ultrix) || defined(_ultrix) || defined(__ultrix) || defined(__ultrix__)
+#if defined(mips) || defined(__mips) || defined(__mips__) || defined(MIPS) || defined(__MIPS__)
+#if defined(_SIZE_T_) || defined(SIGLOST)
+ struct utsname *un;
+ uname (&un);
+ printf ("mips-dec-ultrix%s\n", un.release); exit (0);
+#else
+ printf ("mips-dec-ultrix\n"); exit (0);
+#endif
+#endif
+#endif
+
+#if defined (alliant) && defined (i860)
+ printf ("i860-alliant-bsd\n"); exit (0);
+#endif
+
+ exit (1);
+}
+EOF
+
+$CC_FOR_BUILD -o "$dummy" "$dummy.c" 2>/dev/null && SYSTEM_NAME=$($dummy) &&
+ { echo "$SYSTEM_NAME"; exit; }
+
+# Apollos put the system type in the environment.
+test -d /usr/apollo && { echo "$ISP-apollo-$SYSTYPE"; exit; }
+
+echo "$0: unable to guess system type" >&2
+
+case "$UNAME_MACHINE:$UNAME_SYSTEM" in
+ mips:Linux | mips64:Linux)
+ # If we got here on MIPS GNU/Linux, output extra information.
+ cat >&2 <<EOF
+
+NOTE: MIPS GNU/Linux systems require a C compiler to fully recognize
+the system type. Please install a C compiler and try again.
+EOF
+ ;;
+esac
+
+cat >&2 <<EOF
+
+This script (version $timestamp), has failed to recognize the
+operating system you are using. If your script is old, overwrite *all*
+copies of config.guess and config.sub with the latest versions from:
+
+ https://git.savannah.gnu.org/cgit/config.git/plain/config.guess
+and
+ https://git.savannah.gnu.org/cgit/config.git/plain/config.sub
+EOF
+
+year=$(echo $timestamp | sed 's,-.*,,')
+# shellcheck disable=SC2003
+if test "$(expr "$(date +%Y)" - "$year")" -lt 3 ; then
+ cat >&2 <<EOF
+
+If $0 has already been updated, send the following data and any
+information you think might be pertinent to config-patches@gnu.org to
+provide the necessary information to handle your system.
+
+config.guess timestamp = $timestamp
+
+uname -m = $( (uname -m) 2>/dev/null || echo unknown)
+uname -r = $( (uname -r) 2>/dev/null || echo unknown)
+uname -s = $( (uname -s) 2>/dev/null || echo unknown)
+uname -v = $( (uname -v) 2>/dev/null || echo unknown)
+
+/usr/bin/uname -p = $( (/usr/bin/uname -p) 2>/dev/null)
+/bin/uname -X = $( (/bin/uname -X) 2>/dev/null)
+
+hostinfo = $( (hostinfo) 2>/dev/null)
+/bin/universe = $( (/bin/universe) 2>/dev/null)
+/usr/bin/arch -k = $( (/usr/bin/arch -k) 2>/dev/null)
+/bin/arch = $( (/bin/arch) 2>/dev/null)
+/usr/bin/oslevel = $( (/usr/bin/oslevel) 2>/dev/null)
+/usr/convex/getsysinfo = $( (/usr/convex/getsysinfo) 2>/dev/null)
+
+UNAME_MACHINE = "$UNAME_MACHINE"
+UNAME_RELEASE = "$UNAME_RELEASE"
+UNAME_SYSTEM = "$UNAME_SYSTEM"
+UNAME_VERSION = "$UNAME_VERSION"
+EOF
+fi
+
+exit 1
+
+# Local variables:
+# eval: (add-hook 'before-save-hook 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
+||||||| dec341af7695
+=======
+#! /bin/sh
+# Attempt to guess a canonical system name.
+# Copyright 1992-2016 Free Software Foundation, Inc.
+
+timestamp='2016-10-02'
+
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that
+# program. This Exception is an additional permission under section 7
+# of the GNU General Public License, version 3 ("GPLv3").
+#
+# Originally written by Per Bothner; maintained since 2000 by Ben Elliston.
+#
+# You can get the latest version of this script from:
+# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess
+#
+# Please send patches to <config-patches@gnu.org>.
+
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION]
+
+Output the configuration name of the system \`$me' is run on.
+
+Operation modes:
+ -h, --help print this help, then exit
+ -t, --time-stamp print date of last modification, then exit
+ -v, --version print version number, then exit
+
+Report bugs and patches to <config-patches@gnu.org>."
+
+version="\
+GNU config.guess ($timestamp)
+
+Originally written by Per Bothner.
+Copyright 1992-2016 Free Software Foundation, Inc.
+
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+ case $1 in
+ --time-stamp | --time* | -t )
+ echo "$timestamp" ; exit ;;
+ --version | -v )
+ echo "$version" ; exit ;;
+ --help | --h* | -h )
+ echo "$usage"; exit ;;
+ -- ) # Stop option processing
+ shift; break ;;
+ - ) # Use stdin as input.
+ break ;;
+ -* )
+ echo "$me: invalid option $1$help" >&2
+ exit 1 ;;
+ * )
+ break ;;
+ esac
+done
+
+if test $# != 0; then
+ echo "$me: too many arguments$help" >&2
+ exit 1
+fi
+
+trap 'exit 1' 1 2 15
+
+# CC_FOR_BUILD -- compiler used by this script. Note that the use of a
+# compiler to aid in system detection is discouraged as it requires
+# temporary files to be created and, as you can see below, it is a
+# headache to deal with in a portable fashion.
+
+# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still
+# use `HOST_CC' if defined, but it is deprecated.
+
+# Portable tmp directory creation inspired by the Autoconf team.
+
+set_cc_for_build='
+trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ;
+trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ;
+: ${TMPDIR=/tmp} ;
+ { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } ||
+ { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } ||
+ { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } ||
+ { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ;
+dummy=$tmp/dummy ;
+tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ;
+case $CC_FOR_BUILD,$HOST_CC,$CC in
+ ,,) echo "int x;" > $dummy.c ;
+ for c in cc gcc c89 c99 ; do
+ if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then
+ CC_FOR_BUILD="$c"; break ;
+ fi ;
+ done ;
+ if test x"$CC_FOR_BUILD" = x ; then
+ CC_FOR_BUILD=no_compiler_found ;
+ fi
+ ;;
+ ,,*) CC_FOR_BUILD=$CC ;;
+ ,*,*) CC_FOR_BUILD=$HOST_CC ;;
+esac ; set_cc_for_build= ;'
+
+# This is needed to find uname on a Pyramid OSx when run in the BSD universe.
+# (ghazi@noc.rutgers.edu 1994-08-24)
+if (test -f /.attbin/uname) >/dev/null 2>&1 ; then
+ PATH=$PATH:/.attbin ; export PATH
+fi
+
+UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown
+UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown
+UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown
+UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown
+
+case "${UNAME_SYSTEM}" in
+Linux|GNU|GNU/*)
+ # If the system lacks a compiler, then just pick glibc.
+ # We could probably try harder.
+ LIBC=gnu
+
+ eval $set_cc_for_build
+ cat <<-EOF > $dummy.c
+ #include <features.h>
+ #if defined(__UCLIBC__)
+ LIBC=uclibc
+ #elif defined(__dietlibc__)
+ LIBC=dietlibc
+ #else
+ LIBC=gnu
+ #endif
+ EOF
+ eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC' | sed 's, ,,g'`
+ ;;
+esac
+
+# Note: order is significant - the case branches are not exclusive.
+
+case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
+ *:NetBSD:*:*)
+ # NetBSD (nbsd) targets should (where applicable) match one or
+ # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*,
+ # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently
+ # switched to ELF, *-*-netbsd* would select the old
+ # object file format. This provides both forward
+ # compatibility and a consistent mechanism for selecting the
+ # object file format.
+ #
+ # Note: NetBSD doesn't particularly care about the vendor
+ # portion of the name. We always set it to "unknown".
+ sysctl="sysctl -n hw.machine_arch"
+ UNAME_MACHINE_ARCH=`(uname -p 2>/dev/null || \
+ /sbin/$sysctl 2>/dev/null || \
+ /usr/sbin/$sysctl 2>/dev/null || \
+ echo unknown)`
+ case "${UNAME_MACHINE_ARCH}" in
+ armeb) machine=armeb-unknown ;;
+ arm*) machine=arm-unknown ;;
+ sh3el) machine=shl-unknown ;;
+ sh3eb) machine=sh-unknown ;;
+ sh5el) machine=sh5le-unknown ;;
+ earmv*)
+ arch=`echo ${UNAME_MACHINE_ARCH} | sed -e 's,^e\(armv[0-9]\).*$,\1,'`
+ endian=`echo ${UNAME_MACHINE_ARCH} | sed -ne 's,^.*\(eb\)$,\1,p'`
+ machine=${arch}${endian}-unknown
+ ;;
+ *) machine=${UNAME_MACHINE_ARCH}-unknown ;;
+ esac
+ # The Operating System including object format, if it has switched
+ # to ELF recently (or will in the future) and ABI.
+ case "${UNAME_MACHINE_ARCH}" in
+ earm*)
+ os=netbsdelf
+ ;;
+ arm*|i386|m68k|ns32k|sh3*|sparc|vax)
+ eval $set_cc_for_build
+ if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ELF__
+ then
+ # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout).
+ # Return netbsd for either. FIX?
+ os=netbsd
+ else
+ os=netbsdelf
+ fi
+ ;;
+ *)
+ os=netbsd
+ ;;
+ esac
+ # Determine ABI tags.
+ case "${UNAME_MACHINE_ARCH}" in
+ earm*)
+ expr='s/^earmv[0-9]/-eabi/;s/eb$//'
+ abi=`echo ${UNAME_MACHINE_ARCH} | sed -e "$expr"`
+ ;;
+ esac
+ # The OS release
+ # Debian GNU/NetBSD machines have a different userland, and
+ # thus, need a distinct triplet. However, they do not need
+ # kernel version information, so it can be replaced with a
+ # suitable tag, in the style of linux-gnu.
+ case "${UNAME_VERSION}" in
+ Debian*)
+ release='-gnu'
+ ;;
+ *)
+ release=`echo ${UNAME_RELEASE} | sed -e 's/[-_].*//' | cut -d. -f1,2`
+ ;;
+ esac
+ # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM:
+ # contains redundant information, the shorter form:
+ # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
+ echo "${machine}-${os}${release}${abi}"
+ exit ;;
+ *:Bitrig:*:*)
+ UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'`
+ echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE}
+ exit ;;
+ *:OpenBSD:*:*)
+ UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'`
+ echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE}
+ exit ;;
+ *:LibertyBSD:*:*)
+ UNAME_MACHINE_ARCH=`arch | sed 's/^.*BSD\.//'`
+ echo ${UNAME_MACHINE_ARCH}-unknown-libertybsd${UNAME_RELEASE}
+ exit ;;
+ *:ekkoBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE}
+ exit ;;
+ *:SolidBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE}
+ exit ;;
+ macppc:MirBSD:*:*)
+ echo powerpc-unknown-mirbsd${UNAME_RELEASE}
+ exit ;;
+ *:MirBSD:*:*)
+ echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE}
+ exit ;;
+ *:Sortix:*:*)
+ echo ${UNAME_MACHINE}-unknown-sortix
+ exit ;;
+ alpha:OSF1:*:*)
+ case $UNAME_RELEASE in
+ *4.0)
+ UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'`
+ ;;
+ *5.*)
+ UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'`
+ ;;
+ esac
+ # According to Compaq, /usr/sbin/psrinfo has been available on
+ # OSF/1 and Tru64 systems produced since 1995. I hope that
+ # covers most systems running today. This code pipes the CPU
+ # types through head -n 1, so we only detect the type of CPU 0.
+ ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1`
+ case "$ALPHA_CPU_TYPE" in
+ "EV4 (21064)")
+ UNAME_MACHINE=alpha ;;
+ "EV4.5 (21064)")
+ UNAME_MACHINE=alpha ;;
+ "LCA4 (21066/21068)")
+ UNAME_MACHINE=alpha ;;
+ "EV5 (21164)")
+ UNAME_MACHINE=alphaev5 ;;
+ "EV5.6 (21164A)")
+ UNAME_MACHINE=alphaev56 ;;
+ "EV5.6 (21164PC)")
+ UNAME_MACHINE=alphapca56 ;;
+ "EV5.7 (21164PC)")
+ UNAME_MACHINE=alphapca57 ;;
+ "EV6 (21264)")
+ UNAME_MACHINE=alphaev6 ;;
+ "EV6.7 (21264A)")
+ UNAME_MACHINE=alphaev67 ;;
+ "EV6.8CB (21264C)")
+ UNAME_MACHINE=alphaev68 ;;
+ "EV6.8AL (21264B)")
+ UNAME_MACHINE=alphaev68 ;;
+ "EV6.8CX (21264D)")
+ UNAME_MACHINE=alphaev68 ;;
+ "EV6.9A (21264/EV69A)")
+ UNAME_MACHINE=alphaev69 ;;
+ "EV7 (21364)")
+ UNAME_MACHINE=alphaev7 ;;
+ "EV7.9 (21364A)")
+ UNAME_MACHINE=alphaev79 ;;
+ esac
+ # A Pn.n version is a patched version.
+ # A Vn.n version is a released version.
+ # A Tn.n version is a released field test version.
+ # A Xn.n version is an unreleased experimental baselevel.
+ # 1.2 uses "1.2" for uname -r.
+ echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz`
+ # Reset EXIT trap before exiting to avoid spurious non-zero exit code.
+ exitcode=$?
+ trap '' 0
+ exit $exitcode ;;
+ Alpha\ *:Windows_NT*:*)
+ # How do we know it's Interix rather than the generic POSIX subsystem?
+ # Should we change UNAME_MACHINE based on the output of uname instead
+ # of the specific Alpha model?
+ echo alpha-pc-interix
+ exit ;;
+ 21064:Windows_NT:50:3)
+ echo alpha-dec-winnt3.5
+ exit ;;
+ Amiga*:UNIX_System_V:4.0:*)
+ echo m68k-unknown-sysv4
+ exit ;;
+ *:[Aa]miga[Oo][Ss]:*:*)
+ echo ${UNAME_MACHINE}-unknown-amigaos
+ exit ;;
+ *:[Mm]orph[Oo][Ss]:*:*)
+ echo ${UNAME_MACHINE}-unknown-morphos
+ exit ;;
+ *:OS/390:*:*)
+ echo i370-ibm-openedition
+ exit ;;
+ *:z/VM:*:*)
+ echo s390-ibm-zvmoe
+ exit ;;
+ *:OS400:*:*)
+ echo powerpc-ibm-os400
+ exit ;;
+ arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
+ echo arm-acorn-riscix${UNAME_RELEASE}
+ exit ;;
+ arm*:riscos:*:*|arm*:RISCOS:*:*)
+ echo arm-unknown-riscos
+ exit ;;
+ SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*)
+ echo hppa1.1-hitachi-hiuxmpp
+ exit ;;
+ Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*)
+ # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE.
+ if test "`(/bin/universe) 2>/dev/null`" = att ; then
+ echo pyramid-pyramid-sysv3
+ else
+ echo pyramid-pyramid-bsd
+ fi
+ exit ;;
+ NILE*:*:*:dcosx)
+ echo pyramid-pyramid-svr4
+ exit ;;
+ DRS?6000:unix:4.0:6*)
+ echo sparc-icl-nx6
+ exit ;;
+ DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*)
+ case `/usr/bin/uname -p` in
+ sparc) echo sparc-icl-nx7; exit ;;
+ esac ;;
+ s390x:SunOS:*:*)
+ echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4H:SunOS:5.*:*)
+ echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
+ echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*)
+ echo i386-pc-auroraux${UNAME_RELEASE}
+ exit ;;
+ i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*)
+ eval $set_cc_for_build
+ SUN_ARCH=i386
+ # If there is a compiler, see if it is configured for 64-bit objects.
+ # Note that the Sun cc does not turn __LP64__ into 1 like gcc does.
+ # This test works for both compilers.
+ if [ "$CC_FOR_BUILD" != no_compiler_found ]; then
+ if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \
+ (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \
+ grep IS_64BIT_ARCH >/dev/null
+ then
+ SUN_ARCH=x86_64
+ fi
+ fi
+ echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4*:SunOS:6*:*)
+ # According to config.sub, this is the proper way to canonicalize
+ # SunOS6. Hard to guess exactly what SunOS6 will be like, but
+ # it's likely to be more like Solaris than SunOS4.
+ echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ sun4*:SunOS:*:*)
+ case "`/usr/bin/arch -k`" in
+ Series*|S4*)
+ UNAME_RELEASE=`uname -v`
+ ;;
+ esac
+ # Japanese Language versions have a version number like `4.1.3-JL'.
+ echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'`
+ exit ;;
+ sun3*:SunOS:*:*)
+ echo m68k-sun-sunos${UNAME_RELEASE}
+ exit ;;
+ sun*:*:4.2BSD:*)
+ UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null`
+ test "x${UNAME_RELEASE}" = x && UNAME_RELEASE=3
+ case "`/bin/arch`" in
+ sun3)
+ echo m68k-sun-sunos${UNAME_RELEASE}
+ ;;
+ sun4)
+ echo sparc-sun-sunos${UNAME_RELEASE}
+ ;;
+ esac
+ exit ;;
+ aushp:SunOS:*:*)
+ echo sparc-auspex-sunos${UNAME_RELEASE}
+ exit ;;
+ # The situation for MiNT is a little confusing. The machine name
+ # can be virtually everything (everything which is not
+ # "atarist" or "atariste" at least should have a processor
+ # > m68000). The system name ranges from "MiNT" over "FreeMiNT"
+ # to the lowercase version "mint" (or "freemint"). Finally
+ # the system name "TOS" denotes a system which is actually not
+ # MiNT. But MiNT is downward compatible to TOS, so this should
+ # be no problem.
+ atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit ;;
+ atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit ;;
+ *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*)
+ echo m68k-atari-mint${UNAME_RELEASE}
+ exit ;;
+ milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*)
+ echo m68k-milan-mint${UNAME_RELEASE}
+ exit ;;
+ hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*)
+ echo m68k-hades-mint${UNAME_RELEASE}
+ exit ;;
+ *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*)
+ echo m68k-unknown-mint${UNAME_RELEASE}
+ exit ;;
+ m68k:machten:*:*)
+ echo m68k-apple-machten${UNAME_RELEASE}
+ exit ;;
+ powerpc:machten:*:*)
+ echo powerpc-apple-machten${UNAME_RELEASE}
+ exit ;;
+ RISC*:Mach:*:*)
+ echo mips-dec-mach_bsd4.3
+ exit ;;
+ RISC*:ULTRIX:*:*)
+ echo mips-dec-ultrix${UNAME_RELEASE}
+ exit ;;
+ VAX*:ULTRIX*:*:*)
+ echo vax-dec-ultrix${UNAME_RELEASE}
+ exit ;;
+ 2020:CLIX:*:* | 2430:CLIX:*:*)
+ echo clipper-intergraph-clix${UNAME_RELEASE}
+ exit ;;
+ mips:*:*:UMIPS | mips:*:*:RISCos)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+#ifdef __cplusplus
+#include <stdio.h> /* for printf() prototype */
+ int main (int argc, char *argv[]) {
+#else
+ int main (argc, argv) int argc; char *argv[]; {
+#endif
+ #if defined (host_mips) && defined (MIPSEB)
+ #if defined (SYSTYPE_SYSV)
+ printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0);
+ #endif
+ #if defined (SYSTYPE_SVR4)
+ printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0);
+ #endif
+ #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD)
+ printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0);
+ #endif
+ #endif
+ exit (-1);
+ }
+EOF
+ $CC_FOR_BUILD -o $dummy $dummy.c &&
+ dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` &&
+ SYSTEM_NAME=`$dummy $dummyarg` &&
+ { echo "$SYSTEM_NAME"; exit; }
+ echo mips-mips-riscos${UNAME_RELEASE}
+ exit ;;
+ Motorola:PowerMAX_OS:*:*)
+ echo powerpc-motorola-powermax
+ exit ;;
+ Motorola:*:4.3:PL8-*)
+ echo powerpc-harris-powermax
+ exit ;;
+ Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*)
+ echo powerpc-harris-powermax
+ exit ;;
+ Night_Hawk:Power_UNIX:*:*)
+ echo powerpc-harris-powerunix
+ exit ;;
+ m88k:CX/UX:7*:*)
+ echo m88k-harris-cxux7
+ exit ;;
+ m88k:*:4*:R4*)
+ echo m88k-motorola-sysv4
+ exit ;;
+ m88k:*:3*:R3*)
+ echo m88k-motorola-sysv3
+ exit ;;
+ AViiON:dgux:*:*)
+ # DG/UX returns AViiON for all architectures
+ UNAME_PROCESSOR=`/usr/bin/uname -p`
+ if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ]
+ then
+ if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \
+ [ ${TARGET_BINARY_INTERFACE}x = x ]
+ then
+ echo m88k-dg-dgux${UNAME_RELEASE}
+ else
+ echo m88k-dg-dguxbcs${UNAME_RELEASE}
+ fi
+ else
+ echo i586-dg-dgux${UNAME_RELEASE}
+ fi
+ exit ;;
+ M88*:DolphinOS:*:*) # DolphinOS (SVR3)
+ echo m88k-dolphin-sysv3
+ exit ;;
+ M88*:*:R3*:*)
+ # Delta 88k system running SVR3
+ echo m88k-motorola-sysv3
+ exit ;;
+ XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3)
+ echo m88k-tektronix-sysv3
+ exit ;;
+ Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD)
+ echo m68k-tektronix-bsd
+ exit ;;
+ *:IRIX*:*:*)
+ echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'`
+ exit ;;
+ ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
+ echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id
+ exit ;; # Note that: echo "'`uname -s`'" gives 'AIX '
+ i*86:AIX:*:*)
+ echo i386-ibm-aix
+ exit ;;
+ ia64:AIX:*:*)
+ if [ -x /usr/bin/oslevel ] ; then
+ IBM_REV=`/usr/bin/oslevel`
+ else
+ IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+ fi
+ echo ${UNAME_MACHINE}-ibm-aix${IBM_REV}
+ exit ;;
+ *:AIX:2:3)
+ if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #include <sys/systemcfg.h>
+
+ main()
+ {
+ if (!__power_pc())
+ exit(1);
+ puts("powerpc-ibm-aix3.2.5");
+ exit(0);
+ }
+EOF
+ if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy`
+ then
+ echo "$SYSTEM_NAME"
+ else
+ echo rs6000-ibm-aix3.2.5
+ fi
+ elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then
+ echo rs6000-ibm-aix3.2.4
+ else
+ echo rs6000-ibm-aix3.2
+ fi
+ exit ;;
+ *:AIX:*:[4567])
+ IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'`
+ if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then
+ IBM_ARCH=rs6000
+ else
+ IBM_ARCH=powerpc
+ fi
+ if [ -x /usr/bin/lslpp ] ; then
+ IBM_REV=`/usr/bin/lslpp -Lqc bos.rte.libc |
+ awk -F: '{ print $3 }' | sed s/[0-9]*$/0/`
+ else
+ IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
+ fi
+ echo ${IBM_ARCH}-ibm-aix${IBM_REV}
+ exit ;;
+ *:AIX:*:*)
+ echo rs6000-ibm-aix
+ exit ;;
+ ibmrt:4.4BSD:*|romp-ibm:BSD:*)
+ echo romp-ibm-bsd4.4
+ exit ;;
+ ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and
+ echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to
+ exit ;; # report: romp-ibm BSD 4.3
+ *:BOSX:*:*)
+ echo rs6000-bull-bosx
+ exit ;;
+ DPX/2?00:B.O.S.:*:*)
+ echo m68k-bull-sysv3
+ exit ;;
+ 9000/[34]??:4.3bsd:1.*:*)
+ echo m68k-hp-bsd
+ exit ;;
+ hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*)
+ echo m68k-hp-bsd4.4
+ exit ;;
+ 9000/[34678]??:HP-UX:*:*)
+ HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
+ case "${UNAME_MACHINE}" in
+ 9000/31? ) HP_ARCH=m68000 ;;
+ 9000/[34]?? ) HP_ARCH=m68k ;;
+ 9000/[678][0-9][0-9])
+ if [ -x /usr/bin/getconf ]; then
+ sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null`
+ sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
+ case "${sc_cpu_version}" in
+ 523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0
+ 528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1
+ 532) # CPU_PA_RISC2_0
+ case "${sc_kernel_bits}" in
+ 32) HP_ARCH=hppa2.0n ;;
+ 64) HP_ARCH=hppa2.0w ;;
+ '') HP_ARCH=hppa2.0 ;; # HP-UX 10.20
+ esac ;;
+ esac
+ fi
+ if [ "${HP_ARCH}" = "" ]; then
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+
+ #define _HPUX_SOURCE
+ #include <stdlib.h>
+ #include <unistd.h>
+
+ int main ()
+ {
+ #if defined(_SC_KERNEL_BITS)
+ long bits = sysconf(_SC_KERNEL_BITS);
+ #endif
+ long cpu = sysconf (_SC_CPU_VERSION);
+
+ switch (cpu)
+ {
+ case CPU_PA_RISC1_0: puts ("hppa1.0"); break;
+ case CPU_PA_RISC1_1: puts ("hppa1.1"); break;
+ case CPU_PA_RISC2_0:
+ #if defined(_SC_KERNEL_BITS)
+ switch (bits)
+ {
+ case 64: puts ("hppa2.0w"); break;
+ case 32: puts ("hppa2.0n"); break;
+ default: puts ("hppa2.0"); break;
+ } break;
+ #else /* !defined(_SC_KERNEL_BITS) */
+ puts ("hppa2.0"); break;
+ #endif
+ default: puts ("hppa1.0"); break;
+ }
+ exit (0);
+ }
+EOF
+ (CCOPTS="" $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy`
+ test -z "$HP_ARCH" && HP_ARCH=hppa
+ fi ;;
+ esac
+ if [ ${HP_ARCH} = hppa2.0w ]
+ then
+ eval $set_cc_for_build
+
+ # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating
+ # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler
+ # generating 64-bit code. GNU and HP use different nomenclature:
+ #
+ # $ CC_FOR_BUILD=cc ./config.guess
+ # => hppa2.0w-hp-hpux11.23
+ # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess
+ # => hppa64-hp-hpux11.23
+
+ if echo __LP64__ | (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) |
+ grep -q __LP64__
+ then
+ HP_ARCH=hppa2.0w
+ else
+ HP_ARCH=hppa64
+ fi
+ fi
+ echo ${HP_ARCH}-hp-hpux${HPUX_REV}
+ exit ;;
+ ia64:HP-UX:*:*)
+ HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
+ echo ia64-hp-hpux${HPUX_REV}
+ exit ;;
+ 3050*:HI-UX:*:*)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #include <unistd.h>
+ int
+ main ()
+ {
+ long cpu = sysconf (_SC_CPU_VERSION);
+ /* The order matters, because CPU_IS_HP_MC68K erroneously returns
+ true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct
+ results, however. */
+ if (CPU_IS_PA_RISC (cpu))
+ {
+ switch (cpu)
+ {
+ case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break;
+ case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break;
+ case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break;
+ default: puts ("hppa-hitachi-hiuxwe2"); break;
+ }
+ }
+ else if (CPU_IS_HP_MC68K (cpu))
+ puts ("m68k-hitachi-hiuxwe2");
+ else puts ("unknown-hitachi-hiuxwe2");
+ exit (0);
+ }
+EOF
+ $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` &&
+ { echo "$SYSTEM_NAME"; exit; }
+ echo unknown-hitachi-hiuxwe2
+ exit ;;
+ 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* )
+ echo hppa1.1-hp-bsd
+ exit ;;
+ 9000/8??:4.3bsd:*:*)
+ echo hppa1.0-hp-bsd
+ exit ;;
+ *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*)
+ echo hppa1.0-hp-mpeix
+ exit ;;
+ hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* )
+ echo hppa1.1-hp-osf
+ exit ;;
+ hp8??:OSF1:*:*)
+ echo hppa1.0-hp-osf
+ exit ;;
+ i*86:OSF1:*:*)
+ if [ -x /usr/sbin/sysversion ] ; then
+ echo ${UNAME_MACHINE}-unknown-osf1mk
+ else
+ echo ${UNAME_MACHINE}-unknown-osf1
+ fi
+ exit ;;
+ parisc*:Lites*:*:*)
+ echo hppa1.1-hp-lites
+ exit ;;
+ C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*)
+ echo c1-convex-bsd
+ exit ;;
+ C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*)
+ if getsysinfo -f scalar_acc
+ then echo c32-convex-bsd
+ else echo c2-convex-bsd
+ fi
+ exit ;;
+ C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*)
+ echo c34-convex-bsd
+ exit ;;
+ C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*)
+ echo c38-convex-bsd
+ exit ;;
+ C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*)
+ echo c4-convex-bsd
+ exit ;;
+ CRAY*Y-MP:*:*:*)
+ echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*[A-Z]90:*:*:*)
+ echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \
+ | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \
+ -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \
+ -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*TS:*:*:*)
+ echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*T3E:*:*:*)
+ echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ CRAY*SV1:*:*:*)
+ echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ *:UNICOS/mp:*:*)
+ echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
+ exit ;;
+ F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
+ FUJITSU_PROC=`uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz`
+ FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'`
+ FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
+ echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+ exit ;;
+ 5000:UNIX_System_V:4.*:*)
+ FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'`
+ FUJITSU_REL=`echo ${UNAME_RELEASE} | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/'`
+ echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
+ exit ;;
+ i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
+ echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE}
+ exit ;;
+ sparc*:BSD/OS:*:*)
+ echo sparc-unknown-bsdi${UNAME_RELEASE}
+ exit ;;
+ *:BSD/OS:*:*)
+ echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE}
+ exit ;;
+ *:FreeBSD:*:*)
+ UNAME_PROCESSOR=`/usr/bin/uname -p`
+ case ${UNAME_PROCESSOR} in
+ amd64)
+ echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+ *)
+ echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
+ esac
+ exit ;;
+ i*:CYGWIN*:*)
+ echo ${UNAME_MACHINE}-pc-cygwin
+ exit ;;
+ *:MINGW64*:*)
+ echo ${UNAME_MACHINE}-pc-mingw64
+ exit ;;
+ *:MINGW*:*)
+ echo ${UNAME_MACHINE}-pc-mingw32
+ exit ;;
+ *:MSYS*:*)
+ echo ${UNAME_MACHINE}-pc-msys
+ exit ;;
+ i*:windows32*:*)
+ # uname -m includes "-pc" on this system.
+ echo ${UNAME_MACHINE}-mingw32
+ exit ;;
+ i*:PW*:*)
+ echo ${UNAME_MACHINE}-pc-pw32
+ exit ;;
+ *:Interix*:*)
+ case ${UNAME_MACHINE} in
+ x86)
+ echo i586-pc-interix${UNAME_RELEASE}
+ exit ;;
+ authenticamd | genuineintel | EM64T)
+ echo x86_64-unknown-interix${UNAME_RELEASE}
+ exit ;;
+ IA64)
+ echo ia64-unknown-interix${UNAME_RELEASE}
+ exit ;;
+ esac ;;
+ [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*)
+ echo i${UNAME_MACHINE}-pc-mks
+ exit ;;
+ 8664:Windows_NT:*)
+ echo x86_64-pc-mks
+ exit ;;
+ i*:Windows_NT*:* | Pentium*:Windows_NT*:*)
+ # How do we know it's Interix rather than the generic POSIX subsystem?
+ # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we
+ # UNAME_MACHINE based on the output of uname instead of i386?
+ echo i586-pc-interix
+ exit ;;
+ i*:UWIN*:*)
+ echo ${UNAME_MACHINE}-pc-uwin
+ exit ;;
+ amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*)
+ echo x86_64-unknown-cygwin
+ exit ;;
+ p*:CYGWIN*:*)
+ echo powerpcle-unknown-cygwin
+ exit ;;
+ prep*:SunOS:5.*:*)
+ echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
+ exit ;;
+ *:GNU:*:*)
+ # the GNU system
+ echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-${LIBC}`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
+ exit ;;
+ *:GNU/*:*:*)
+ # other systems with GNU libc and userland
+ echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]"``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC}
+ exit ;;
+ i*86:Minix:*:*)
+ echo ${UNAME_MACHINE}-pc-minix
+ exit ;;
+ aarch64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ aarch64_be:Linux:*:*)
+ UNAME_MACHINE=aarch64_be
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ alpha:Linux:*:*)
+ case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
+ EV5) UNAME_MACHINE=alphaev5 ;;
+ EV56) UNAME_MACHINE=alphaev56 ;;
+ PCA56) UNAME_MACHINE=alphapca56 ;;
+ PCA57) UNAME_MACHINE=alphapca56 ;;
+ EV6) UNAME_MACHINE=alphaev6 ;;
+ EV67) UNAME_MACHINE=alphaev67 ;;
+ EV68*) UNAME_MACHINE=alphaev68 ;;
+ esac
+ objdump --private-headers /bin/sh | grep -q ld.so.1
+ if test "$?" = 0 ; then LIBC=gnulibc1 ; fi
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ arc:Linux:*:* | arceb:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ arm*:Linux:*:*)
+ eval $set_cc_for_build
+ if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ARM_EABI__
+ then
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ else
+ if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \
+ | grep -q __ARM_PCS_VFP
+ then
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabi
+ else
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabihf
+ fi
+ fi
+ exit ;;
+ avr32*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ cris:Linux:*:*)
+ echo ${UNAME_MACHINE}-axis-linux-${LIBC}
+ exit ;;
+ crisv32:Linux:*:*)
+ echo ${UNAME_MACHINE}-axis-linux-${LIBC}
+ exit ;;
+ e2k:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ frv:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ hexagon:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ i*86:Linux:*:*)
+ echo ${UNAME_MACHINE}-pc-linux-${LIBC}
+ exit ;;
+ ia64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ k1om:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ m32r*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ m68*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ mips:Linux:*:* | mips64:Linux:*:*)
+ eval $set_cc_for_build
+ sed 's/^ //' << EOF >$dummy.c
+ #undef CPU
+ #undef ${UNAME_MACHINE}
+ #undef ${UNAME_MACHINE}el
+ #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
+ CPU=${UNAME_MACHINE}el
+ #else
+ #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
+ CPU=${UNAME_MACHINE}
+ #else
+ CPU=
+ #endif
+ #endif
+EOF
+ eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'`
+ test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; }
+ ;;
+ mips64el:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ openrisc*:Linux:*:*)
+ echo or1k-unknown-linux-${LIBC}
+ exit ;;
+ or32:Linux:*:* | or1k*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ padre:Linux:*:*)
+ echo sparc-unknown-linux-${LIBC}
+ exit ;;
+ parisc64:Linux:*:* | hppa64:Linux:*:*)
+ echo hppa64-unknown-linux-${LIBC}
+ exit ;;
+ parisc:Linux:*:* | hppa:Linux:*:*)
+ # Look for CPU level
+ case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in
+ PA7*) echo hppa1.1-unknown-linux-${LIBC} ;;
+ PA8*) echo hppa2.0-unknown-linux-${LIBC} ;;
+ *) echo hppa-unknown-linux-${LIBC} ;;
+ esac
+ exit ;;
+ ppc64:Linux:*:*)
+ echo powerpc64-unknown-linux-${LIBC}
+ exit ;;
+ ppc:Linux:*:*)
+ echo powerpc-unknown-linux-${LIBC}
+ exit ;;
+ ppc64le:Linux:*:*)
+ echo powerpc64le-unknown-linux-${LIBC}
+ exit ;;
+ ppcle:Linux:*:*)
+ echo powerpcle-unknown-linux-${LIBC}
+ exit ;;
+ riscv32:Linux:*:* | riscv64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ s390:Linux:*:* | s390x:Linux:*:*)
+ echo ${UNAME_MACHINE}-ibm-linux-${LIBC}
+ exit ;;
+ sh64*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ sh*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ sparc:Linux:*:* | sparc64:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ tile*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ vax:Linux:*:*)
+ echo ${UNAME_MACHINE}-dec-linux-${LIBC}
+ exit ;;
+ x86_64:Linux:*:*)
+ echo ${UNAME_MACHINE}-pc-linux-${LIBC}
+ exit ;;
+ xtensa*:Linux:*:*)
+ echo ${UNAME_MACHINE}-unknown-linux-${LIBC}
+ exit ;;
+ i*86:DYNIX/ptx:4*:*)
+ # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
+ # earlier versions are messed up and put the nodename in both
+ # sysname and nodename.
+ echo i386-sequent-sysv4
+ exit ;;
+ i*86:UNIX_SV:4.2MP:2.*)
+ # Unixware is an offshoot of SVR4, but it has its own version
+ # number series starting with 2...
+ # I am not positive that other SVR4 systems won't match this,
+ # I just have to hope. -- rms.
+ # Use sysv4.2uw... so that sysv4* matches it.
+ echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION}
+ exit ;;
+ i*86:OS/2:*:*)
+ # If we were able to find `uname', then EMX Unix compatibility
+ # is probably installed.
+ echo ${UNAME_MACHINE}-pc-os2-emx
+ exit ;;
+ i*86:XTS-300:*:STOP)
+ echo ${UNAME_MACHINE}-unknown-stop
+ exit ;;
+ i*86:atheos:*:*)
+ echo ${UNAME_MACHINE}-unknown-atheos
+ exit ;;
+ i*86:syllable:*:*)
+ echo ${UNAME_MACHINE}-pc-syllable
+ exit ;;
+ i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*)
+ echo i386-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ i*86:*DOS:*:*)
+ echo ${UNAME_MACHINE}-pc-msdosdjgpp
+ exit ;;
+ i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*)
+ UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'`
+ if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
+ echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL}
+ else
+ echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL}
+ fi
+ exit ;;
+ i*86:*:5:[678]*)
+ # UnixWare 7.x, OpenUNIX and OpenServer 6.
+ case `/bin/uname -X | grep "^Machine"` in
+ *486*) UNAME_MACHINE=i486 ;;
+ *Pentium) UNAME_MACHINE=i586 ;;
+ *Pent*|*Celeron) UNAME_MACHINE=i686 ;;
+ esac
+ echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}
+ exit ;;
+ i*86:*:3.2:*)
+ if test -f /usr/options/cb.name; then
+ UNAME_REL=`sed -n 's/.*Version //p' </usr/options/cb.name`
+ echo ${UNAME_MACHINE}-pc-isc$UNAME_REL
+ elif /bin/uname -X 2>/dev/null >/dev/null ; then
+ UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')`
+ (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486
+ (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \
+ && UNAME_MACHINE=i586
+ (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \
+ && UNAME_MACHINE=i686
+ (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \
+ && UNAME_MACHINE=i686
+ echo ${UNAME_MACHINE}-pc-sco$UNAME_REL
+ else
+ echo ${UNAME_MACHINE}-pc-sysv32
+ fi
+ exit ;;
+ pc:*:*:*)
+ # Left here for compatibility:
+ # uname -m prints for DJGPP always 'pc', but it prints nothing about
+ # the processor, so we play safe by assuming i586.
+ # Note: whatever this is, it MUST be the same as what config.sub
+ # prints for the "djgpp" host, or else GDB configure will decide that
+ # this is a cross-build.
+ echo i586-pc-msdosdjgpp
+ exit ;;
+ Intel:Mach:3*:*)
+ echo i386-pc-mach3
+ exit ;;
+ paragon:*:*:*)
+ echo i860-intel-osf1
+ exit ;;
+ i860:*:4.*:*) # i860-SVR4
+ if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then
+ echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4
+ else # Add other i860-SVR4 vendors below as they are discovered.
+ echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4
+ fi
+ exit ;;
+ mini*:CTIX:SYS*5:*)
+ # "miniframe"
+ echo m68010-convergent-sysv
+ exit ;;
+ mc68k:UNIX:SYSTEM5:3.51m)
+ echo m68k-convergent-sysv
+ exit ;;
+ M680?0:D-NIX:5.3:*)
+ echo m68k-diab-dnix
+ exit ;;
+ M68*:*:R3V[5678]*:*)
+ test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;;
+ 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0)
+ OS_REL=''
+ test -r /etc/.relid \
+ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
+ 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4; exit; } ;;
+ NCR*:*:4.2:* | MPRAS*:*:4.2:*)
+ OS_REL='.3'
+ test -r /etc/.relid \
+ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
+ /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
+ && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
+ && { echo i586-ncr-sysv4.3${OS_REL}; exit; }
+ /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \
+ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
+ m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*)
+ echo m68k-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ mc68030:UNIX_System_V:4.*:*)
+ echo m68k-atari-sysv4
+ exit ;;
+ TSUNAMI:LynxOS:2.*:*)
+ echo sparc-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ rs6000:LynxOS:2.*:*)
+ echo rs6000-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*)
+ echo powerpc-unknown-lynxos${UNAME_RELEASE}
+ exit ;;
+ SM[BE]S:UNIX_SV:*:*)
+ echo mips-dde-sysv${UNAME_RELEASE}
+ exit ;;
+ RM*:ReliantUNIX-*:*:*)
+ echo mips-sni-sysv4
+ exit ;;
+ RM*:SINIX-*:*:*)
+ echo mips-sni-sysv4
+ exit ;;
+ *:SINIX-*:*:*)
+ if uname -p 2>/dev/null >/dev/null ; then
+ UNAME_MACHINE=`(uname -p) 2>/dev/null`
+ echo ${UNAME_MACHINE}-sni-sysv4
+ else
+ echo ns32k-sni-sysv
+ fi
+ exit ;;
+ PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort
+ # says <Richard.M.Bartel@ccMail.Census.GOV>
+ echo i586-unisys-sysv4
+ exit ;;
+ *:UNIX_System_V:4*:FTX*)
+ # From Gerald Hewes <hewes@openmarket.com>.
+ # How about differentiating between stratus architectures? -djm
+ echo hppa1.1-stratus-sysv4
+ exit ;;
+ *:*:*:FTX*)
+ # From seanf@swdc.stratus.com.
+ echo i860-stratus-sysv4
+ exit ;;
+ i*86:VOS:*:*)
+ # From Paul.Green@stratus.com.
+ echo ${UNAME_MACHINE}-stratus-vos
+ exit ;;
+ *:VOS:*:*)
+ # From Paul.Green@stratus.com.
+ echo hppa1.1-stratus-vos
+ exit ;;
+ mc68*:A/UX:*:*)
+ echo m68k-apple-aux${UNAME_RELEASE}
+ exit ;;
+ news*:NEWS-OS:6*:*)
+ echo mips-sony-newsos6
+ exit ;;
+ R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
+ if [ -d /usr/nec ]; then
+ echo mips-nec-sysv${UNAME_RELEASE}
+ else
+ echo mips-unknown-sysv${UNAME_RELEASE}
+ fi
+ exit ;;
+ BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only.
+ echo powerpc-be-beos
+ exit ;;
+ BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only.
+ echo powerpc-apple-beos
+ exit ;;
+ BePC:BeOS:*:*) # BeOS running on Intel PC compatible.
+ echo i586-pc-beos
+ exit ;;
+ BePC:Haiku:*:*) # Haiku running on Intel PC compatible.
+ echo i586-pc-haiku
+ exit ;;
+ x86_64:Haiku:*:*)
+ echo x86_64-unknown-haiku
+ exit ;;
+ SX-4:SUPER-UX:*:*)
+ echo sx4-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-5:SUPER-UX:*:*)
+ echo sx5-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-6:SUPER-UX:*:*)
+ echo sx6-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-7:SUPER-UX:*:*)
+ echo sx7-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-8:SUPER-UX:*:*)
+ echo sx8-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-8R:SUPER-UX:*:*)
+ echo sx8r-nec-superux${UNAME_RELEASE}
+ exit ;;
+ SX-ACE:SUPER-UX:*:*)
+ echo sxace-nec-superux${UNAME_RELEASE}
+ exit ;;
+ Power*:Rhapsody:*:*)
+ echo powerpc-apple-rhapsody${UNAME_RELEASE}
+ exit ;;
+ *:Rhapsody:*:*)
+ echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE}
+ exit ;;
+ *:Darwin:*:*)
+ UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown
+ eval $set_cc_for_build
+ if test "$UNAME_PROCESSOR" = unknown ; then
+ UNAME_PROCESSOR=powerpc
+ fi
+ if test `echo "$UNAME_RELEASE" | sed -e 's/\..*//'` -le 10 ; then
+ if [ "$CC_FOR_BUILD" != no_compiler_found ]; then
+ if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \
+ (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \
+ grep IS_64BIT_ARCH >/dev/null
+ then
+ case $UNAME_PROCESSOR in
+ i386) UNAME_PROCESSOR=x86_64 ;;
+ powerpc) UNAME_PROCESSOR=powerpc64 ;;
+ esac
+ fi
+ fi
+ elif test "$UNAME_PROCESSOR" = i386 ; then
+ # Avoid executing cc on OS X 10.9, as it ships with a stub
+ # that puts up a graphical alert prompting to install
+ # developer tools. Any system running Mac OS X 10.7 or
+ # later (Darwin 11 and later) is required to have a 64-bit
+ # processor. This is not true of the ARM version of Darwin
+ # that Apple uses in portable devices.
+ UNAME_PROCESSOR=x86_64
+ fi
+ echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE}
+ exit ;;
+ *:procnto*:*:* | *:QNX:[0123456789]*:*)
+ UNAME_PROCESSOR=`uname -p`
+ if test "$UNAME_PROCESSOR" = x86; then
+ UNAME_PROCESSOR=i386
+ UNAME_MACHINE=pc
+ fi
+ echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE}
+ exit ;;
+ *:QNX:*:4*)
+ echo i386-pc-qnx
+ exit ;;
+ NEO-?:NONSTOP_KERNEL:*:*)
+ echo neo-tandem-nsk${UNAME_RELEASE}
+ exit ;;
+ NSE-*:NONSTOP_KERNEL:*:*)
+ echo nse-tandem-nsk${UNAME_RELEASE}
+ exit ;;
+ NSR-?:NONSTOP_KERNEL:*:*)
+ echo nsr-tandem-nsk${UNAME_RELEASE}
+ exit ;;
+ *:NonStop-UX:*:*)
+ echo mips-compaq-nonstopux
+ exit ;;
+ BS2000:POSIX*:*:*)
+ echo bs2000-siemens-sysv
+ exit ;;
+ DS/*:UNIX_System_V:*:*)
+ echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE}
+ exit ;;
+ *:Plan9:*:*)
+ # "uname -m" is not consistent, so use $cputype instead. 386
+ # is converted to i386 for consistency with other x86
+ # operating systems.
+ if test "$cputype" = 386; then
+ UNAME_MACHINE=i386
+ else
+ UNAME_MACHINE="$cputype"
+ fi
+ echo ${UNAME_MACHINE}-unknown-plan9
+ exit ;;
+ *:TOPS-10:*:*)
+ echo pdp10-unknown-tops10
+ exit ;;
+ *:TENEX:*:*)
+ echo pdp10-unknown-tenex
+ exit ;;
+ KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*)
+ echo pdp10-dec-tops20
+ exit ;;
+ XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*)
+ echo pdp10-xkl-tops20
+ exit ;;
+ *:TOPS-20:*:*)
+ echo pdp10-unknown-tops20
+ exit ;;
+ *:ITS:*:*)
+ echo pdp10-unknown-its
+ exit ;;
+ SEI:*:*:SEIUX)
+ echo mips-sei-seiux${UNAME_RELEASE}
+ exit ;;
+ *:DragonFly:*:*)
+ echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`
+ exit ;;
+ *:*VMS:*:*)
+ UNAME_MACHINE=`(uname -p) 2>/dev/null`
+ case "${UNAME_MACHINE}" in
+ A*) echo alpha-dec-vms ; exit ;;
+ I*) echo ia64-dec-vms ; exit ;;
+ V*) echo vax-dec-vms ; exit ;;
+ esac ;;
+ *:XENIX:*:SysV)
+ echo i386-pc-xenix
+ exit ;;
+ i*86:skyos:*:*)
+ echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE} | sed -e 's/ .*$//'`
+ exit ;;
+ i*86:rdos:*:*)
+ echo ${UNAME_MACHINE}-pc-rdos
+ exit ;;
+ i*86:AROS:*:*)
+ echo ${UNAME_MACHINE}-pc-aros
+ exit ;;
+ x86_64:VMkernel:*:*)
+ echo ${UNAME_MACHINE}-unknown-esx
+ exit ;;
+ amd64:Isilon\ OneFS:*:*)
+ echo x86_64-unknown-onefs
+ exit ;;
+esac
+
+cat >&2 <<EOF
+$0: unable to guess system type
+
+This script (version $timestamp), has failed to recognize the
+operating system you are using. If your script is old, overwrite
+config.guess and config.sub with the latest versions from:
+
+ http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess
+and
+ http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub
+
+If $0 has already been updated, send the following data and any
+information you think might be pertinent to config-patches@gnu.org to
+provide the necessary information to handle your system.
+
+config.guess timestamp = $timestamp
+
+uname -m = `(uname -m) 2>/dev/null || echo unknown`
+uname -r = `(uname -r) 2>/dev/null || echo unknown`
+uname -s = `(uname -s) 2>/dev/null || echo unknown`
+uname -v = `(uname -v) 2>/dev/null || echo unknown`
+
+/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null`
+/bin/uname -X = `(/bin/uname -X) 2>/dev/null`
+
+hostinfo = `(hostinfo) 2>/dev/null`
+/bin/universe = `(/bin/universe) 2>/dev/null`
+/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null`
+/bin/arch = `(/bin/arch) 2>/dev/null`
+/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null`
+/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null`
+
+UNAME_MACHINE = ${UNAME_MACHINE}
+UNAME_RELEASE = ${UNAME_RELEASE}
+UNAME_SYSTEM = ${UNAME_SYSTEM}
+UNAME_VERSION = ${UNAME_VERSION}
+EOF
+
+exit 1
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
+>>>>>>> main
diff --git a/contrib/jemalloc/build-aux/config.sub b/contrib/jemalloc/build-aux/config.sub
new file mode 100755
index 000000000000..099dab005a9a
--- /dev/null
+++ b/contrib/jemalloc/build-aux/config.sub
@@ -0,0 +1,3684 @@
+<<<<<<< HEAD
+#! /bin/sh
+# Configuration validation subroutine script.
+# Copyright 1992-2021 Free Software Foundation, Inc.
+
+timestamp='2021-01-07'
+
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <https://www.gnu.org/licenses/>.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that
+# program. This Exception is an additional permission under section 7
+# of the GNU General Public License, version 3 ("GPLv3").
+
+
+# Please send patches to <config-patches@gnu.org>.
+#
+# Configuration subroutine to validate and canonicalize a configuration type.
+# Supply the specified configuration type as an argument.
+# If it is invalid, we print an error message on stderr and exit with code 1.
+# Otherwise, we print the canonical config type on stdout and succeed.
+
+# You can get the latest version of this script from:
+# https://git.savannah.gnu.org/cgit/config.git/plain/config.sub
+
+# This file is supposed to be the same for all GNU packages
+# and recognize all the CPU types, system types and aliases
+# that are meaningful with *any* GNU software.
+# Each package is responsible for reporting which valid configurations
+# it does not support. The user should be able to distinguish
+# a failure to support a valid configuration from a meaningless
+# configuration.
+
+# The goal of this file is to map all the various variations of a given
+# machine specification into a single specification in the form:
+# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM
+# or in some cases, the newer four-part form:
+# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM
+# It is wrong to echo any other type of specification.
+
+me=$(echo "$0" | sed -e 's,.*/,,')
+
+usage="\
+Usage: $0 [OPTION] CPU-MFR-OPSYS or ALIAS
+
+Canonicalize a configuration name.
+
+Options:
+ -h, --help print this help, then exit
+ -t, --time-stamp print date of last modification, then exit
+ -v, --version print version number, then exit
+
+Report bugs and patches to <config-patches@gnu.org>."
+
+version="\
+GNU config.sub ($timestamp)
+
+Copyright 1992-2021 Free Software Foundation, Inc.
+
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+ case $1 in
+ --time-stamp | --time* | -t )
+ echo "$timestamp" ; exit ;;
+ --version | -v )
+ echo "$version" ; exit ;;
+ --help | --h* | -h )
+ echo "$usage"; exit ;;
+ -- ) # Stop option processing
+ shift; break ;;
+ - ) # Use stdin as input.
+ break ;;
+ -* )
+ echo "$me: invalid option $1$help" >&2
+ exit 1 ;;
+
+ *local*)
+ # First pass through any local machine types.
+ echo "$1"
+ exit ;;
+
+ * )
+ break ;;
+ esac
+done
+
+case $# in
+ 0) echo "$me: missing argument$help" >&2
+ exit 1;;
+ 1) ;;
+ *) echo "$me: too many arguments$help" >&2
+ exit 1;;
+esac
+
+# Split fields of configuration type
+# shellcheck disable=SC2162
+IFS="-" read field1 field2 field3 field4 <<EOF
+$1
+EOF
+
+# Separate into logical components for further validation
+case $1 in
+ *-*-*-*-*)
+ echo Invalid configuration \`"$1"\': more than four components >&2
+ exit 1
+ ;;
+ *-*-*-*)
+ basic_machine=$field1-$field2
+ basic_os=$field3-$field4
+ ;;
+ *-*-*)
+ # Ambiguous whether COMPANY is present, or skipped and KERNEL-OS is two
+ # parts
+ maybe_os=$field2-$field3
+ case $maybe_os in
+ nto-qnx* | linux-* | uclinux-uclibc* \
+ | uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* \
+ | netbsd*-eabi* | kopensolaris*-gnu* | cloudabi*-eabi* \
+ | storm-chaos* | os2-emx* | rtmk-nova*)
+ basic_machine=$field1
+ basic_os=$maybe_os
+ ;;
+ android-linux)
+ basic_machine=$field1-unknown
+ basic_os=linux-android
+ ;;
+ *)
+ basic_machine=$field1-$field2
+ basic_os=$field3
+ ;;
+ esac
+ ;;
+ *-*)
+ # A lone config we happen to match not fitting any pattern
+ case $field1-$field2 in
+ decstation-3100)
+ basic_machine=mips-dec
+ basic_os=
+ ;;
+ *-*)
+ # Second component is usually, but not always the OS
+ case $field2 in
+ # Prevent following clause from handling this valid os
+ sun*os*)
+ basic_machine=$field1
+ basic_os=$field2
+ ;;
+ # Manufacturers
+ dec* | mips* | sequent* | encore* | pc533* | sgi* | sony* \
+ | att* | 7300* | 3300* | delta* | motorola* | sun[234]* \
+ | unicom* | ibm* | next | hp | isi* | apollo | altos* \
+ | convergent* | ncr* | news | 32* | 3600* | 3100* \
+ | hitachi* | c[123]* | convex* | sun | crds | omron* | dg \
+ | ultra | tti* | harris | dolphin | highlevel | gould \
+ | cbm | ns | masscomp | apple | axis | knuth | cray \
+ | microblaze* | sim | cisco \
+ | oki | wec | wrs | winbond)
+ basic_machine=$field1-$field2
+ basic_os=
+ ;;
+ *)
+ basic_machine=$field1
+ basic_os=$field2
+ ;;
+ esac
+ ;;
+ esac
+ ;;
+ *)
+ # Convert single-component short-hands not valid as part of
+ # multi-component configurations.
+ case $field1 in
+ 386bsd)
+ basic_machine=i386-pc
+ basic_os=bsd
+ ;;
+ a29khif)
+ basic_machine=a29k-amd
+ basic_os=udi
+ ;;
+ adobe68k)
+ basic_machine=m68010-adobe
+ basic_os=scout
+ ;;
+ alliant)
+ basic_machine=fx80-alliant
+ basic_os=
+ ;;
+ altos | altos3068)
+ basic_machine=m68k-altos
+ basic_os=
+ ;;
+ am29k)
+ basic_machine=a29k-none
+ basic_os=bsd
+ ;;
+ amdahl)
+ basic_machine=580-amdahl
+ basic_os=sysv
+ ;;
+ amiga)
+ basic_machine=m68k-unknown
+ basic_os=
+ ;;
+ amigaos | amigados)
+ basic_machine=m68k-unknown
+ basic_os=amigaos
+ ;;
+ amigaunix | amix)
+ basic_machine=m68k-unknown
+ basic_os=sysv4
+ ;;
+ apollo68)
+ basic_machine=m68k-apollo
+ basic_os=sysv
+ ;;
+ apollo68bsd)
+ basic_machine=m68k-apollo
+ basic_os=bsd
+ ;;
+ aros)
+ basic_machine=i386-pc
+ basic_os=aros
+ ;;
+ aux)
+ basic_machine=m68k-apple
+ basic_os=aux
+ ;;
+ balance)
+ basic_machine=ns32k-sequent
+ basic_os=dynix
+ ;;
+ blackfin)
+ basic_machine=bfin-unknown
+ basic_os=linux
+ ;;
+ cegcc)
+ basic_machine=arm-unknown
+ basic_os=cegcc
+ ;;
+ convex-c1)
+ basic_machine=c1-convex
+ basic_os=bsd
+ ;;
+ convex-c2)
+ basic_machine=c2-convex
+ basic_os=bsd
+ ;;
+ convex-c32)
+ basic_machine=c32-convex
+ basic_os=bsd
+ ;;
+ convex-c34)
+ basic_machine=c34-convex
+ basic_os=bsd
+ ;;
+ convex-c38)
+ basic_machine=c38-convex
+ basic_os=bsd
+ ;;
+ cray)
+ basic_machine=j90-cray
+ basic_os=unicos
+ ;;
+ crds | unos)
+ basic_machine=m68k-crds
+ basic_os=
+ ;;
+ da30)
+ basic_machine=m68k-da30
+ basic_os=
+ ;;
+ decstation | pmax | pmin | dec3100 | decstatn)
+ basic_machine=mips-dec
+ basic_os=
+ ;;
+ delta88)
+ basic_machine=m88k-motorola
+ basic_os=sysv3
+ ;;
+ dicos)
+ basic_machine=i686-pc
+ basic_os=dicos
+ ;;
+ djgpp)
+ basic_machine=i586-pc
+ basic_os=msdosdjgpp
+ ;;
+ ebmon29k)
+ basic_machine=a29k-amd
+ basic_os=ebmon
+ ;;
+ es1800 | OSE68k | ose68k | ose | OSE)
+ basic_machine=m68k-ericsson
+ basic_os=ose
+ ;;
+ gmicro)
+ basic_machine=tron-gmicro
+ basic_os=sysv
+ ;;
+ go32)
+ basic_machine=i386-pc
+ basic_os=go32
+ ;;
+ h8300hms)
+ basic_machine=h8300-hitachi
+ basic_os=hms
+ ;;
+ h8300xray)
+ basic_machine=h8300-hitachi
+ basic_os=xray
+ ;;
+ h8500hms)
+ basic_machine=h8500-hitachi
+ basic_os=hms
+ ;;
+ harris)
+ basic_machine=m88k-harris
+ basic_os=sysv3
+ ;;
+ hp300 | hp300hpux)
+ basic_machine=m68k-hp
+ basic_os=hpux
+ ;;
+ hp300bsd)
+ basic_machine=m68k-hp
+ basic_os=bsd
+ ;;
+ hppaosf)
+ basic_machine=hppa1.1-hp
+ basic_os=osf
+ ;;
+ hppro)
+ basic_machine=hppa1.1-hp
+ basic_os=proelf
+ ;;
+ i386mach)
+ basic_machine=i386-mach
+ basic_os=mach
+ ;;
+ isi68 | isi)
+ basic_machine=m68k-isi
+ basic_os=sysv
+ ;;
+ m68knommu)
+ basic_machine=m68k-unknown
+ basic_os=linux
+ ;;
+ magnum | m3230)
+ basic_machine=mips-mips
+ basic_os=sysv
+ ;;
+ merlin)
+ basic_machine=ns32k-utek
+ basic_os=sysv
+ ;;
+ mingw64)
+ basic_machine=x86_64-pc
+ basic_os=mingw64
+ ;;
+ mingw32)
+ basic_machine=i686-pc
+ basic_os=mingw32
+ ;;
+ mingw32ce)
+ basic_machine=arm-unknown
+ basic_os=mingw32ce
+ ;;
+ monitor)
+ basic_machine=m68k-rom68k
+ basic_os=coff
+ ;;
+ morphos)
+ basic_machine=powerpc-unknown
+ basic_os=morphos
+ ;;
+ moxiebox)
+ basic_machine=moxie-unknown
+ basic_os=moxiebox
+ ;;
+ msdos)
+ basic_machine=i386-pc
+ basic_os=msdos
+ ;;
+ msys)
+ basic_machine=i686-pc
+ basic_os=msys
+ ;;
+ mvs)
+ basic_machine=i370-ibm
+ basic_os=mvs
+ ;;
+ nacl)
+ basic_machine=le32-unknown
+ basic_os=nacl
+ ;;
+ ncr3000)
+ basic_machine=i486-ncr
+ basic_os=sysv4
+ ;;
+ netbsd386)
+ basic_machine=i386-pc
+ basic_os=netbsd
+ ;;
+ netwinder)
+ basic_machine=armv4l-rebel
+ basic_os=linux
+ ;;
+ news | news700 | news800 | news900)
+ basic_machine=m68k-sony
+ basic_os=newsos
+ ;;
+ news1000)
+ basic_machine=m68030-sony
+ basic_os=newsos
+ ;;
+ necv70)
+ basic_machine=v70-nec
+ basic_os=sysv
+ ;;
+ nh3000)
+ basic_machine=m68k-harris
+ basic_os=cxux
+ ;;
+ nh[45]000)
+ basic_machine=m88k-harris
+ basic_os=cxux
+ ;;
+ nindy960)
+ basic_machine=i960-intel
+ basic_os=nindy
+ ;;
+ mon960)
+ basic_machine=i960-intel
+ basic_os=mon960
+ ;;
+ nonstopux)
+ basic_machine=mips-compaq
+ basic_os=nonstopux
+ ;;
+ os400)
+ basic_machine=powerpc-ibm
+ basic_os=os400
+ ;;
+ OSE68000 | ose68000)
+ basic_machine=m68000-ericsson
+ basic_os=ose
+ ;;
+ os68k)
+ basic_machine=m68k-none
+ basic_os=os68k
+ ;;
+ paragon)
+ basic_machine=i860-intel
+ basic_os=osf
+ ;;
+ parisc)
+ basic_machine=hppa-unknown
+ basic_os=linux
+ ;;
+ psp)
+ basic_machine=mipsallegrexel-sony
+ basic_os=psp
+ ;;
+ pw32)
+ basic_machine=i586-unknown
+ basic_os=pw32
+ ;;
+ rdos | rdos64)
+ basic_machine=x86_64-pc
+ basic_os=rdos
+ ;;
+ rdos32)
+ basic_machine=i386-pc
+ basic_os=rdos
+ ;;
+ rom68k)
+ basic_machine=m68k-rom68k
+ basic_os=coff
+ ;;
+ sa29200)
+ basic_machine=a29k-amd
+ basic_os=udi
+ ;;
+ sei)
+ basic_machine=mips-sei
+ basic_os=seiux
+ ;;
+ sequent)
+ basic_machine=i386-sequent
+ basic_os=
+ ;;
+ sps7)
+ basic_machine=m68k-bull
+ basic_os=sysv2
+ ;;
+ st2000)
+ basic_machine=m68k-tandem
+ basic_os=
+ ;;
+ stratus)
+ basic_machine=i860-stratus
+ basic_os=sysv4
+ ;;
+ sun2)
+ basic_machine=m68000-sun
+ basic_os=
+ ;;
+ sun2os3)
+ basic_machine=m68000-sun
+ basic_os=sunos3
+ ;;
+ sun2os4)
+ basic_machine=m68000-sun
+ basic_os=sunos4
+ ;;
+ sun3)
+ basic_machine=m68k-sun
+ basic_os=
+ ;;
+ sun3os3)
+ basic_machine=m68k-sun
+ basic_os=sunos3
+ ;;
+ sun3os4)
+ basic_machine=m68k-sun
+ basic_os=sunos4
+ ;;
+ sun4)
+ basic_machine=sparc-sun
+ basic_os=
+ ;;
+ sun4os3)
+ basic_machine=sparc-sun
+ basic_os=sunos3
+ ;;
+ sun4os4)
+ basic_machine=sparc-sun
+ basic_os=sunos4
+ ;;
+ sun4sol2)
+ basic_machine=sparc-sun
+ basic_os=solaris2
+ ;;
+ sun386 | sun386i | roadrunner)
+ basic_machine=i386-sun
+ basic_os=
+ ;;
+ sv1)
+ basic_machine=sv1-cray
+ basic_os=unicos
+ ;;
+ symmetry)
+ basic_machine=i386-sequent
+ basic_os=dynix
+ ;;
+ t3e)
+ basic_machine=alphaev5-cray
+ basic_os=unicos
+ ;;
+ t90)
+ basic_machine=t90-cray
+ basic_os=unicos
+ ;;
+ toad1)
+ basic_machine=pdp10-xkl
+ basic_os=tops20
+ ;;
+ tpf)
+ basic_machine=s390x-ibm
+ basic_os=tpf
+ ;;
+ udi29k)
+ basic_machine=a29k-amd
+ basic_os=udi
+ ;;
+ ultra3)
+ basic_machine=a29k-nyu
+ basic_os=sym1
+ ;;
+ v810 | necv810)
+ basic_machine=v810-nec
+ basic_os=none
+ ;;
+ vaxv)
+ basic_machine=vax-dec
+ basic_os=sysv
+ ;;
+ vms)
+ basic_machine=vax-dec
+ basic_os=vms
+ ;;
+ vsta)
+ basic_machine=i386-pc
+ basic_os=vsta
+ ;;
+ vxworks960)
+ basic_machine=i960-wrs
+ basic_os=vxworks
+ ;;
+ vxworks68)
+ basic_machine=m68k-wrs
+ basic_os=vxworks
+ ;;
+ vxworks29k)
+ basic_machine=a29k-wrs
+ basic_os=vxworks
+ ;;
+ xbox)
+ basic_machine=i686-pc
+ basic_os=mingw32
+ ;;
+ ymp)
+ basic_machine=ymp-cray
+ basic_os=unicos
+ ;;
+ *)
+ basic_machine=$1
+ basic_os=
+ ;;
+ esac
+ ;;
+esac
+
+# Decode 1-component or ad-hoc basic machines
+case $basic_machine in
+ # Here we handle the default manufacturer of certain CPU types. It is in
+ # some cases the only manufacturer, in others, it is the most popular.
+ w89k)
+ cpu=hppa1.1
+ vendor=winbond
+ ;;
+ op50n)
+ cpu=hppa1.1
+ vendor=oki
+ ;;
+ op60c)
+ cpu=hppa1.1
+ vendor=oki
+ ;;
+ ibm*)
+ cpu=i370
+ vendor=ibm
+ ;;
+ orion105)
+ cpu=clipper
+ vendor=highlevel
+ ;;
+ mac | mpw | mac-mpw)
+ cpu=m68k
+ vendor=apple
+ ;;
+ pmac | pmac-mpw)
+ cpu=powerpc
+ vendor=apple
+ ;;
+
+ # Recognize the various machine names and aliases which stand
+ # for a CPU type and a company and sometimes even an OS.
+ 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc)
+ cpu=m68000
+ vendor=att
+ ;;
+ 3b*)
+ cpu=we32k
+ vendor=att
+ ;;
+ bluegene*)
+ cpu=powerpc
+ vendor=ibm
+ basic_os=cnk
+ ;;
+ decsystem10* | dec10*)
+ cpu=pdp10
+ vendor=dec
+ basic_os=tops10
+ ;;
+ decsystem20* | dec20*)
+ cpu=pdp10
+ vendor=dec
+ basic_os=tops20
+ ;;
+ delta | 3300 | motorola-3300 | motorola-delta \
+ | 3300-motorola | delta-motorola)
+ cpu=m68k
+ vendor=motorola
+ ;;
+ dpx2*)
+ cpu=m68k
+ vendor=bull
+ basic_os=sysv3
+ ;;
+ encore | umax | mmax)
+ cpu=ns32k
+ vendor=encore
+ ;;
+ elxsi)
+ cpu=elxsi
+ vendor=elxsi
+ basic_os=${basic_os:-bsd}
+ ;;
+ fx2800)
+ cpu=i860
+ vendor=alliant
+ ;;
+ genix)
+ cpu=ns32k
+ vendor=ns
+ ;;
+ h3050r* | hiux*)
+ cpu=hppa1.1
+ vendor=hitachi
+ basic_os=hiuxwe2
+ ;;
+ hp3k9[0-9][0-9] | hp9[0-9][0-9])
+ cpu=hppa1.0
+ vendor=hp
+ ;;
+ hp9k2[0-9][0-9] | hp9k31[0-9])
+ cpu=m68000
+ vendor=hp
+ ;;
+ hp9k3[2-9][0-9])
+ cpu=m68k
+ vendor=hp
+ ;;
+ hp9k6[0-9][0-9] | hp6[0-9][0-9])
+ cpu=hppa1.0
+ vendor=hp
+ ;;
+ hp9k7[0-79][0-9] | hp7[0-79][0-9])
+ cpu=hppa1.1
+ vendor=hp
+ ;;
+ hp9k78[0-9] | hp78[0-9])
+ # FIXME: really hppa2.0-hp
+ cpu=hppa1.1
+ vendor=hp
+ ;;
+ hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893)
+ # FIXME: really hppa2.0-hp
+ cpu=hppa1.1
+ vendor=hp
+ ;;
+ hp9k8[0-9][13679] | hp8[0-9][13679])
+ cpu=hppa1.1
+ vendor=hp
+ ;;
+ hp9k8[0-9][0-9] | hp8[0-9][0-9])
+ cpu=hppa1.0
+ vendor=hp
+ ;;
+ i*86v32)
+ cpu=$(echo "$1" | sed -e 's/86.*/86/')
+ vendor=pc
+ basic_os=sysv32
+ ;;
+ i*86v4*)
+ cpu=$(echo "$1" | sed -e 's/86.*/86/')
+ vendor=pc
+ basic_os=sysv4
+ ;;
+ i*86v)
+ cpu=$(echo "$1" | sed -e 's/86.*/86/')
+ vendor=pc
+ basic_os=sysv
+ ;;
+ i*86sol2)
+ cpu=$(echo "$1" | sed -e 's/86.*/86/')
+ vendor=pc
+ basic_os=solaris2
+ ;;
+ j90 | j90-cray)
+ cpu=j90
+ vendor=cray
+ basic_os=${basic_os:-unicos}
+ ;;
+ iris | iris4d)
+ cpu=mips
+ vendor=sgi
+ case $basic_os in
+ irix*)
+ ;;
+ *)
+ basic_os=irix4
+ ;;
+ esac
+ ;;
+ miniframe)
+ cpu=m68000
+ vendor=convergent
+ ;;
+ *mint | mint[0-9]* | *MiNT | *MiNT[0-9]*)
+ cpu=m68k
+ vendor=atari
+ basic_os=mint
+ ;;
+ news-3600 | risc-news)
+ cpu=mips
+ vendor=sony
+ basic_os=newsos
+ ;;
+ next | m*-next)
+ cpu=m68k
+ vendor=next
+ case $basic_os in
+ openstep*)
+ ;;
+ nextstep*)
+ ;;
+ ns2*)
+ basic_os=nextstep2
+ ;;
+ *)
+ basic_os=nextstep3
+ ;;
+ esac
+ ;;
+ np1)
+ cpu=np1
+ vendor=gould
+ ;;
+ op50n-* | op60c-*)
+ cpu=hppa1.1
+ vendor=oki
+ basic_os=proelf
+ ;;
+ pa-hitachi)
+ cpu=hppa1.1
+ vendor=hitachi
+ basic_os=hiuxwe2
+ ;;
+ pbd)
+ cpu=sparc
+ vendor=tti
+ ;;
+ pbb)
+ cpu=m68k
+ vendor=tti
+ ;;
+ pc532)
+ cpu=ns32k
+ vendor=pc532
+ ;;
+ pn)
+ cpu=pn
+ vendor=gould
+ ;;
+ power)
+ cpu=power
+ vendor=ibm
+ ;;
+ ps2)
+ cpu=i386
+ vendor=ibm
+ ;;
+ rm[46]00)
+ cpu=mips
+ vendor=siemens
+ ;;
+ rtpc | rtpc-*)
+ cpu=romp
+ vendor=ibm
+ ;;
+ sde)
+ cpu=mipsisa32
+ vendor=sde
+ basic_os=${basic_os:-elf}
+ ;;
+ simso-wrs)
+ cpu=sparclite
+ vendor=wrs
+ basic_os=vxworks
+ ;;
+ tower | tower-32)
+ cpu=m68k
+ vendor=ncr
+ ;;
+ vpp*|vx|vx-*)
+ cpu=f301
+ vendor=fujitsu
+ ;;
+ w65)
+ cpu=w65
+ vendor=wdc
+ ;;
+ w89k-*)
+ cpu=hppa1.1
+ vendor=winbond
+ basic_os=proelf
+ ;;
+ none)
+ cpu=none
+ vendor=none
+ ;;
+ leon|leon[3-9])
+ cpu=sparc
+ vendor=$basic_machine
+ ;;
+ leon-*|leon[3-9]-*)
+ cpu=sparc
+ vendor=$(echo "$basic_machine" | sed 's/-.*//')
+ ;;
+
+ *-*)
+ # shellcheck disable=SC2162
+ IFS="-" read cpu vendor <<EOF
+$basic_machine
+EOF
+ ;;
+ # We use `pc' rather than `unknown'
+ # because (1) that's what they normally are, and
+ # (2) the word "unknown" tends to confuse beginning users.
+ i*86 | x86_64)
+ cpu=$basic_machine
+ vendor=pc
+ ;;
+ # These rules are duplicated from below for sake of the special case above;
+ # i.e. things that normalized to x86 arches should also default to "pc"
+ pc98)
+ cpu=i386
+ vendor=pc
+ ;;
+ x64 | amd64)
+ cpu=x86_64
+ vendor=pc
+ ;;
+ # Recognize the basic CPU types without company name.
+ *)
+ cpu=$basic_machine
+ vendor=unknown
+ ;;
+esac
+
+unset -v basic_machine
+
+# Decode basic machines in the full and proper CPU-Company form.
+case $cpu-$vendor in
+ # Here we handle the default manufacturer of certain CPU types in canonical form. It is in
+ # some cases the only manufacturer, in others, it is the most popular.
+ craynv-unknown)
+ vendor=cray
+ basic_os=${basic_os:-unicosmp}
+ ;;
+ c90-unknown | c90-cray)
+ vendor=cray
+ basic_os=${Basic_os:-unicos}
+ ;;
+ fx80-unknown)
+ vendor=alliant
+ ;;
+ romp-unknown)
+ vendor=ibm
+ ;;
+ mmix-unknown)
+ vendor=knuth
+ ;;
+ microblaze-unknown | microblazeel-unknown)
+ vendor=xilinx
+ ;;
+ rs6000-unknown)
+ vendor=ibm
+ ;;
+ vax-unknown)
+ vendor=dec
+ ;;
+ pdp11-unknown)
+ vendor=dec
+ ;;
+ we32k-unknown)
+ vendor=att
+ ;;
+ cydra-unknown)
+ vendor=cydrome
+ ;;
+ i370-ibm*)
+ vendor=ibm
+ ;;
+ orion-unknown)
+ vendor=highlevel
+ ;;
+ xps-unknown | xps100-unknown)
+ cpu=xps100
+ vendor=honeywell
+ ;;
+
+ # Here we normalize CPU types with a missing or matching vendor
+ dpx20-unknown | dpx20-bull)
+ cpu=rs6000
+ vendor=bull
+ basic_os=${basic_os:-bosx}
+ ;;
+
+ # Here we normalize CPU types irrespective of the vendor
+ amd64-*)
+ cpu=x86_64
+ ;;
+ blackfin-*)
+ cpu=bfin
+ basic_os=linux
+ ;;
+ c54x-*)
+ cpu=tic54x
+ ;;
+ c55x-*)
+ cpu=tic55x
+ ;;
+ c6x-*)
+ cpu=tic6x
+ ;;
+ e500v[12]-*)
+ cpu=powerpc
+ basic_os=${basic_os}"spe"
+ ;;
+ mips3*-*)
+ cpu=mips64
+ ;;
+ ms1-*)
+ cpu=mt
+ ;;
+ m68knommu-*)
+ cpu=m68k
+ basic_os=linux
+ ;;
+ m9s12z-* | m68hcs12z-* | hcs12z-* | s12z-*)
+ cpu=s12z
+ ;;
+ openrisc-*)
+ cpu=or32
+ ;;
+ parisc-*)
+ cpu=hppa
+ basic_os=linux
+ ;;
+ pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*)
+ cpu=i586
+ ;;
+ pentiumpro-* | p6-* | 6x86-* | athlon-* | athalon_*-*)
+ cpu=i686
+ ;;
+ pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*)
+ cpu=i686
+ ;;
+ pentium4-*)
+ cpu=i786
+ ;;
+ pc98-*)
+ cpu=i386
+ ;;
+ ppc-* | ppcbe-*)
+ cpu=powerpc
+ ;;
+ ppcle-* | powerpclittle-*)
+ cpu=powerpcle
+ ;;
+ ppc64-*)
+ cpu=powerpc64
+ ;;
+ ppc64le-* | powerpc64little-*)
+ cpu=powerpc64le
+ ;;
+ sb1-*)
+ cpu=mipsisa64sb1
+ ;;
+ sb1el-*)
+ cpu=mipsisa64sb1el
+ ;;
+ sh5e[lb]-*)
+ cpu=$(echo "$cpu" | sed 's/^\(sh.\)e\(.\)$/\1\2e/')
+ ;;
+ spur-*)
+ cpu=spur
+ ;;
+ strongarm-* | thumb-*)
+ cpu=arm
+ ;;
+ tx39-*)
+ cpu=mipstx39
+ ;;
+ tx39el-*)
+ cpu=mipstx39el
+ ;;
+ x64-*)
+ cpu=x86_64
+ ;;
+ xscale-* | xscalee[bl]-*)
+ cpu=$(echo "$cpu" | sed 's/^xscale/arm/')
+ ;;
+ arm64-*)
+ cpu=aarch64
+ ;;
+
+ # Recognize the canonical CPU Types that limit and/or modify the
+ # company names they are paired with.
+ cr16-*)
+ basic_os=${basic_os:-elf}
+ ;;
+ crisv32-* | etraxfs*-*)
+ cpu=crisv32
+ vendor=axis
+ ;;
+ cris-* | etrax*-*)
+ cpu=cris
+ vendor=axis
+ ;;
+ crx-*)
+ basic_os=${basic_os:-elf}
+ ;;
+ neo-tandem)
+ cpu=neo
+ vendor=tandem
+ ;;
+ nse-tandem)
+ cpu=nse
+ vendor=tandem
+ ;;
+ nsr-tandem)
+ cpu=nsr
+ vendor=tandem
+ ;;
+ nsv-tandem)
+ cpu=nsv
+ vendor=tandem
+ ;;
+ nsx-tandem)
+ cpu=nsx
+ vendor=tandem
+ ;;
+ mipsallegrexel-sony)
+ cpu=mipsallegrexel
+ vendor=sony
+ ;;
+ tile*-*)
+ basic_os=${basic_os:-linux-gnu}
+ ;;
+
+ *)
+ # Recognize the canonical CPU types that are allowed with any
+ # company name.
+ case $cpu in
+ 1750a | 580 \
+ | a29k \
+ | aarch64 | aarch64_be \
+ | abacus \
+ | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] \
+ | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] \
+ | alphapca5[67] | alpha64pca5[67] \
+ | am33_2.0 \
+ | amdgcn \
+ | arc | arceb \
+ | arm | arm[lb]e | arme[lb] | armv* \
+ | avr | avr32 \
+ | asmjs \
+ | ba \
+ | be32 | be64 \
+ | bfin | bpf | bs2000 \
+ | c[123]* | c30 | [cjt]90 | c4x \
+ | c8051 | clipper | craynv | csky | cydra \
+ | d10v | d30v | dlx | dsp16xx \
+ | e2k | elxsi | epiphany \
+ | f30[01] | f700 | fido | fr30 | frv | ft32 | fx80 \
+ | h8300 | h8500 \
+ | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
+ | hexagon \
+ | i370 | i*86 | i860 | i960 | ia16 | ia64 \
+ | ip2k | iq2000 \
+ | k1om \
+ | le32 | le64 \
+ | lm32 \
+ | loongarch32 | loongarch64 | loongarchx32 \
+ | m32c | m32r | m32rle \
+ | m5200 | m68000 | m680[012346]0 | m68360 | m683?2 | m68k \
+ | m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x \
+ | m88110 | m88k | maxq | mb | mcore | mep | metag \
+ | microblaze | microblazeel \
+ | mips | mipsbe | mipseb | mipsel | mipsle \
+ | mips16 \
+ | mips64 | mips64eb | mips64el \
+ | mips64octeon | mips64octeonel \
+ | mips64orion | mips64orionel \
+ | mips64r5900 | mips64r5900el \
+ | mips64vr | mips64vrel \
+ | mips64vr4100 | mips64vr4100el \
+ | mips64vr4300 | mips64vr4300el \
+ | mips64vr5000 | mips64vr5000el \
+ | mips64vr5900 | mips64vr5900el \
+ | mipsisa32 | mipsisa32el \
+ | mipsisa32r2 | mipsisa32r2el \
+ | mipsisa32r6 | mipsisa32r6el \
+ | mipsisa64 | mipsisa64el \
+ | mipsisa64r2 | mipsisa64r2el \
+ | mipsisa64r6 | mipsisa64r6el \
+ | mipsisa64sb1 | mipsisa64sb1el \
+ | mipsisa64sr71k | mipsisa64sr71kel \
+ | mipsr5900 | mipsr5900el \
+ | mipstx39 | mipstx39el \
+ | mmix \
+ | mn10200 | mn10300 \
+ | moxie \
+ | mt \
+ | msp430 \
+ | nds32 | nds32le | nds32be \
+ | nfp \
+ | nios | nios2 | nios2eb | nios2el \
+ | none | np1 | ns16k | ns32k | nvptx \
+ | open8 \
+ | or1k* \
+ | or32 \
+ | orion \
+ | picochip \
+ | pdp10 | pdp11 | pj | pjl | pn | power \
+ | powerpc | powerpc64 | powerpc64le | powerpcle | powerpcspe \
+ | pru \
+ | pyramid \
+ | riscv | riscv32 | riscv32be | riscv64 | riscv64be \
+ | rl78 | romp | rs6000 | rx \
+ | s390 | s390x \
+ | score \
+ | sh | shl \
+ | sh[1234] | sh[24]a | sh[24]ae[lb] | sh[23]e | she[lb] | sh[lb]e \
+ | sh[1234]e[lb] | sh[12345][lb]e | sh[23]ele | sh64 | sh64le \
+ | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet \
+ | sparclite \
+ | sparcv8 | sparcv9 | sparcv9b | sparcv9v | sv1 | sx* \
+ | spu \
+ | tahoe \
+ | thumbv7* \
+ | tic30 | tic4x | tic54x | tic55x | tic6x | tic80 \
+ | tron \
+ | ubicom32 \
+ | v70 | v850 | v850e | v850e1 | v850es | v850e2 | v850e2v3 \
+ | vax \
+ | visium \
+ | w65 \
+ | wasm32 | wasm64 \
+ | we32k \
+ | x86 | x86_64 | xc16x | xgate | xps100 \
+ | xstormy16 | xtensa* \
+ | ymp \
+ | z8k | z80)
+ ;;
+
+ *)
+ echo Invalid configuration \`"$1"\': machine \`"$cpu-$vendor"\' not recognized 1>&2
+ exit 1
+ ;;
+ esac
+ ;;
+esac
+
+# Here we canonicalize certain aliases for manufacturers.
+case $vendor in
+ digital*)
+ vendor=dec
+ ;;
+ commodore*)
+ vendor=cbm
+ ;;
+ *)
+ ;;
+esac
+
+# Decode manufacturer-specific aliases for certain operating systems.
+
+if test x$basic_os != x
+then
+
+# First recognize some ad-hoc caes, or perhaps split kernel-os, or else just
+# set os.
+case $basic_os in
+ gnu/linux*)
+ kernel=linux
+ os=$(echo $basic_os | sed -e 's|gnu/linux|gnu|')
+ ;;
+ os2-emx)
+ kernel=os2
+ os=$(echo $basic_os | sed -e 's|os2-emx|emx|')
+ ;;
+ nto-qnx*)
+ kernel=nto
+ os=$(echo $basic_os | sed -e 's|nto-qnx|qnx|')
+ ;;
+ *-*)
+ # shellcheck disable=SC2162
+ IFS="-" read kernel os <<EOF
+$basic_os
+EOF
+ ;;
+ # Default OS when just kernel was specified
+ nto*)
+ kernel=nto
+ os=$(echo $basic_os | sed -e 's|nto|qnx|')
+ ;;
+ linux*)
+ kernel=linux
+ os=$(echo $basic_os | sed -e 's|linux|gnu|')
+ ;;
+ *)
+ kernel=
+ os=$basic_os
+ ;;
+esac
+
+# Now, normalize the OS (knowing we just have one component, it's not a kernel,
+# etc.)
+case $os in
+ # First match some system type aliases that might get confused
+ # with valid system types.
+ # solaris* is a basic system type, with this one exception.
+ auroraux)
+ os=auroraux
+ ;;
+ bluegene*)
+ os=cnk
+ ;;
+ solaris1 | solaris1.*)
+ os=$(echo $os | sed -e 's|solaris1|sunos4|')
+ ;;
+ solaris)
+ os=solaris2
+ ;;
+ unixware*)
+ os=sysv4.2uw
+ ;;
+ # es1800 is here to avoid being matched by es* (a different OS)
+ es1800*)
+ os=ose
+ ;;
+ # Some version numbers need modification
+ chorusos*)
+ os=chorusos
+ ;;
+ isc)
+ os=isc2.2
+ ;;
+ sco6)
+ os=sco5v6
+ ;;
+ sco5)
+ os=sco3.2v5
+ ;;
+ sco4)
+ os=sco3.2v4
+ ;;
+ sco3.2.[4-9]*)
+ os=$(echo $os | sed -e 's/sco3.2./sco3.2v/')
+ ;;
+ sco*v* | scout)
+ # Don't match below
+ ;;
+ sco*)
+ os=sco3.2v2
+ ;;
+ psos*)
+ os=psos
+ ;;
+ qnx*)
+ os=qnx
+ ;;
+ hiux*)
+ os=hiuxwe2
+ ;;
+ lynx*178)
+ os=lynxos178
+ ;;
+ lynx*5)
+ os=lynxos5
+ ;;
+ lynxos*)
+ # don't get caught up in next wildcard
+ ;;
+ lynx*)
+ os=lynxos
+ ;;
+ mac[0-9]*)
+ os=$(echo "$os" | sed -e 's|mac|macos|')
+ ;;
+ opened*)
+ os=openedition
+ ;;
+ os400*)
+ os=os400
+ ;;
+ sunos5*)
+ os=$(echo "$os" | sed -e 's|sunos5|solaris2|')
+ ;;
+ sunos6*)
+ os=$(echo "$os" | sed -e 's|sunos6|solaris3|')
+ ;;
+ wince*)
+ os=wince
+ ;;
+ utek*)
+ os=bsd
+ ;;
+ dynix*)
+ os=bsd
+ ;;
+ acis*)
+ os=aos
+ ;;
+ atheos*)
+ os=atheos
+ ;;
+ syllable*)
+ os=syllable
+ ;;
+ 386bsd)
+ os=bsd
+ ;;
+ ctix* | uts*)
+ os=sysv
+ ;;
+ nova*)
+ os=rtmk-nova
+ ;;
+ ns2)
+ os=nextstep2
+ ;;
+ # Preserve the version number of sinix5.
+ sinix5.*)
+ os=$(echo $os | sed -e 's|sinix|sysv|')
+ ;;
+ sinix*)
+ os=sysv4
+ ;;
+ tpf*)
+ os=tpf
+ ;;
+ triton*)
+ os=sysv3
+ ;;
+ oss*)
+ os=sysv3
+ ;;
+ svr4*)
+ os=sysv4
+ ;;
+ svr3)
+ os=sysv3
+ ;;
+ sysvr4)
+ os=sysv4
+ ;;
+ ose*)
+ os=ose
+ ;;
+ *mint | mint[0-9]* | *MiNT | MiNT[0-9]*)
+ os=mint
+ ;;
+ dicos*)
+ os=dicos
+ ;;
+ pikeos*)
+ # Until real need of OS specific support for
+ # particular features comes up, bare metal
+ # configurations are quite functional.
+ case $cpu in
+ arm*)
+ os=eabi
+ ;;
+ *)
+ os=elf
+ ;;
+ esac
+ ;;
+ *)
+ # No normalization, but not necessarily accepted, that comes below.
+ ;;
+esac
+
+else
+
+# Here we handle the default operating systems that come with various machines.
+# The value should be what the vendor currently ships out the door with their
+# machine or put another way, the most popular os provided with the machine.
+
+# Note that if you're going to try to match "-MANUFACTURER" here (say,
+# "-sun"), then you have to tell the case statement up towards the top
+# that MANUFACTURER isn't an operating system. Otherwise, code above
+# will signal an error saying that MANUFACTURER isn't an operating
+# system, and we'll never get to this point.
+
+kernel=
+case $cpu-$vendor in
+ score-*)
+ os=elf
+ ;;
+ spu-*)
+ os=elf
+ ;;
+ *-acorn)
+ os=riscix1.2
+ ;;
+ arm*-rebel)
+ kernel=linux
+ os=gnu
+ ;;
+ arm*-semi)
+ os=aout
+ ;;
+ c4x-* | tic4x-*)
+ os=coff
+ ;;
+ c8051-*)
+ os=elf
+ ;;
+ clipper-intergraph)
+ os=clix
+ ;;
+ hexagon-*)
+ os=elf
+ ;;
+ tic54x-*)
+ os=coff
+ ;;
+ tic55x-*)
+ os=coff
+ ;;
+ tic6x-*)
+ os=coff
+ ;;
+ # This must come before the *-dec entry.
+ pdp10-*)
+ os=tops20
+ ;;
+ pdp11-*)
+ os=none
+ ;;
+ *-dec | vax-*)
+ os=ultrix4.2
+ ;;
+ m68*-apollo)
+ os=domain
+ ;;
+ i386-sun)
+ os=sunos4.0.2
+ ;;
+ m68000-sun)
+ os=sunos3
+ ;;
+ m68*-cisco)
+ os=aout
+ ;;
+ mep-*)
+ os=elf
+ ;;
+ mips*-cisco)
+ os=elf
+ ;;
+ mips*-*)
+ os=elf
+ ;;
+ or32-*)
+ os=coff
+ ;;
+ *-tti) # must be before sparc entry or we get the wrong os.
+ os=sysv3
+ ;;
+ sparc-* | *-sun)
+ os=sunos4.1.1
+ ;;
+ pru-*)
+ os=elf
+ ;;
+ *-be)
+ os=beos
+ ;;
+ *-ibm)
+ os=aix
+ ;;
+ *-knuth)
+ os=mmixware
+ ;;
+ *-wec)
+ os=proelf
+ ;;
+ *-winbond)
+ os=proelf
+ ;;
+ *-oki)
+ os=proelf
+ ;;
+ *-hp)
+ os=hpux
+ ;;
+ *-hitachi)
+ os=hiux
+ ;;
+ i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent)
+ os=sysv
+ ;;
+ *-cbm)
+ os=amigaos
+ ;;
+ *-dg)
+ os=dgux
+ ;;
+ *-dolphin)
+ os=sysv3
+ ;;
+ m68k-ccur)
+ os=rtu
+ ;;
+ m88k-omron*)
+ os=luna
+ ;;
+ *-next)
+ os=nextstep
+ ;;
+ *-sequent)
+ os=ptx
+ ;;
+ *-crds)
+ os=unos
+ ;;
+ *-ns)
+ os=genix
+ ;;
+ i370-*)
+ os=mvs
+ ;;
+ *-gould)
+ os=sysv
+ ;;
+ *-highlevel)
+ os=bsd
+ ;;
+ *-encore)
+ os=bsd
+ ;;
+ *-sgi)
+ os=irix
+ ;;
+ *-siemens)
+ os=sysv4
+ ;;
+ *-masscomp)
+ os=rtu
+ ;;
+ f30[01]-fujitsu | f700-fujitsu)
+ os=uxpv
+ ;;
+ *-rom68k)
+ os=coff
+ ;;
+ *-*bug)
+ os=coff
+ ;;
+ *-apple)
+ os=macos
+ ;;
+ *-atari*)
+ os=mint
+ ;;
+ *-wrs)
+ os=vxworks
+ ;;
+ *)
+ os=none
+ ;;
+esac
+
+fi
+
+# Now, validate our (potentially fixed-up) OS.
+case $os in
+ # Sometimes we do "kernel-abi", so those need to count as OSes.
+ musl* | newlib* | uclibc*)
+ ;;
+ # Likewise for "kernel-libc"
+ eabi* | gnueabi*)
+ ;;
+ # Now accept the basic system types.
+ # The portable systems comes first.
+ # Each alternative MUST end in a * to match a version number.
+ gnu* | android* | bsd* | mach* | minix* | genix* | ultrix* | irix* \
+ | *vms* | esix* | aix* | cnk* | sunos | sunos[34]* \
+ | hpux* | unos* | osf* | luna* | dgux* | auroraux* | solaris* \
+ | sym* | plan9* | psp* | sim* | xray* | os68k* | v88r* \
+ | hiux* | abug | nacl* | netware* | windows* \
+ | os9* | macos* | osx* | ios* \
+ | mpw* | magic* | mmixware* | mon960* | lnews* \
+ | amigaos* | amigados* | msdos* | newsos* | unicos* | aof* \
+ | aos* | aros* | cloudabi* | sortix* | twizzler* \
+ | nindy* | vxsim* | vxworks* | ebmon* | hms* | mvs* \
+ | clix* | riscos* | uniplus* | iris* | isc* | rtu* | xenix* \
+ | mirbsd* | netbsd* | dicos* | openedition* | ose* \
+ | bitrig* | openbsd* | solidbsd* | libertybsd* | os108* \
+ | ekkobsd* | freebsd* | riscix* | lynxos* | os400* \
+ | bosx* | nextstep* | cxux* | aout* | elf* | oabi* \
+ | ptx* | coff* | ecoff* | winnt* | domain* | vsta* \
+ | udi* | lites* | ieee* | go32* | aux* | hcos* \
+ | chorusrdb* | cegcc* | glidix* \
+ | cygwin* | msys* | pe* | moss* | proelf* | rtems* \
+ | midipix* | mingw32* | mingw64* | mint* \
+ | uxpv* | beos* | mpeix* | udk* | moxiebox* \
+ | interix* | uwin* | mks* | rhapsody* | darwin* \
+ | openstep* | oskit* | conix* | pw32* | nonstopux* \
+ | storm-chaos* | tops10* | tenex* | tops20* | its* \
+ | os2* | vos* | palmos* | uclinux* | nucleus* | morphos* \
+ | scout* | superux* | sysv* | rtmk* | tpf* | windiss* \
+ | powermax* | dnix* | nx6 | nx7 | sei* | dragonfly* \
+ | skyos* | haiku* | rdos* | toppers* | drops* | es* \
+ | onefs* | tirtos* | phoenix* | fuchsia* | redox* | bme* \
+ | midnightbsd* | amdhsa* | unleashed* | emscripten* | wasi* \
+ | nsk* | powerunix* | genode* | zvmoe* | qnx* | emx*)
+ ;;
+ # This one is extra strict with allowed versions
+ sco3.2v2 | sco3.2v[4-9]* | sco5v6*)
+ # Don't forget version if it is 3.2v4 or newer.
+ ;;
+ none)
+ ;;
+ *)
+ echo Invalid configuration \`"$1"\': OS \`"$os"\' not recognized 1>&2
+ exit 1
+ ;;
+esac
+
+# As a final step for OS-related things, validate the OS-kernel combination
+# (given a valid OS), if there is a kernel.
+case $kernel-$os in
+ linux-gnu* | linux-dietlibc* | linux-android* | linux-newlib* | linux-musl* | linux-uclibc* )
+ ;;
+ uclinux-uclibc* )
+ ;;
+ -dietlibc* | -newlib* | -musl* | -uclibc* )
+ # These are just libc implementations, not actual OSes, and thus
+ # require a kernel.
+ echo "Invalid configuration \`$1': libc \`$os' needs explicit kernel." 1>&2
+ exit 1
+ ;;
+ kfreebsd*-gnu* | kopensolaris*-gnu*)
+ ;;
+ nto-qnx*)
+ ;;
+ os2-emx)
+ ;;
+ *-eabi* | *-gnueabi*)
+ ;;
+ -*)
+ # Blank kernel with real OS is always fine.
+ ;;
+ *-*)
+ echo "Invalid configuration \`$1': Kernel \`$kernel' not known to work with OS \`$os'." 1>&2
+ exit 1
+ ;;
+esac
+
+# Here we handle the case where we know the os, and the CPU type, but not the
+# manufacturer. We pick the logical manufacturer.
+case $vendor in
+ unknown)
+ case $cpu-$os in
+ *-riscix*)
+ vendor=acorn
+ ;;
+ *-sunos*)
+ vendor=sun
+ ;;
+ *-cnk* | *-aix*)
+ vendor=ibm
+ ;;
+ *-beos*)
+ vendor=be
+ ;;
+ *-hpux*)
+ vendor=hp
+ ;;
+ *-mpeix*)
+ vendor=hp
+ ;;
+ *-hiux*)
+ vendor=hitachi
+ ;;
+ *-unos*)
+ vendor=crds
+ ;;
+ *-dgux*)
+ vendor=dg
+ ;;
+ *-luna*)
+ vendor=omron
+ ;;
+ *-genix*)
+ vendor=ns
+ ;;
+ *-clix*)
+ vendor=intergraph
+ ;;
+ *-mvs* | *-opened*)
+ vendor=ibm
+ ;;
+ *-os400*)
+ vendor=ibm
+ ;;
+ s390-* | s390x-*)
+ vendor=ibm
+ ;;
+ *-ptx*)
+ vendor=sequent
+ ;;
+ *-tpf*)
+ vendor=ibm
+ ;;
+ *-vxsim* | *-vxworks* | *-windiss*)
+ vendor=wrs
+ ;;
+ *-aux*)
+ vendor=apple
+ ;;
+ *-hms*)
+ vendor=hitachi
+ ;;
+ *-mpw* | *-macos*)
+ vendor=apple
+ ;;
+ *-*mint | *-mint[0-9]* | *-*MiNT | *-MiNT[0-9]*)
+ vendor=atari
+ ;;
+ *-vos*)
+ vendor=stratus
+ ;;
+ esac
+ ;;
+esac
+
+echo "$cpu-$vendor-${kernel:+$kernel-}$os"
+exit
+
+# Local variables:
+# eval: (add-hook 'before-save-hook 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
+||||||| dec341af7695
+=======
+#! /bin/sh
+# Configuration validation subroutine script.
+# Copyright 1992-2016 Free Software Foundation, Inc.
+
+timestamp='2016-11-04'
+
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, see <http://www.gnu.org/licenses/>.
+#
+# As a special exception to the GNU General Public License, if you
+# distribute this file as part of a program that contains a
+# configuration script generated by Autoconf, you may include it under
+# the same distribution terms that you use for the rest of that
+# program. This Exception is an additional permission under section 7
+# of the GNU General Public License, version 3 ("GPLv3").
+
+
+# Please send patches to <config-patches@gnu.org>.
+#
+# Configuration subroutine to validate and canonicalize a configuration type.
+# Supply the specified configuration type as an argument.
+# If it is invalid, we print an error message on stderr and exit with code 1.
+# Otherwise, we print the canonical config type on stdout and succeed.
+
+# You can get the latest version of this script from:
+# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub
+
+# This file is supposed to be the same for all GNU packages
+# and recognize all the CPU types, system types and aliases
+# that are meaningful with *any* GNU software.
+# Each package is responsible for reporting which valid configurations
+# it does not support. The user should be able to distinguish
+# a failure to support a valid configuration from a meaningless
+# configuration.
+
+# The goal of this file is to map all the various variations of a given
+# machine specification into a single specification in the form:
+# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM
+# or in some cases, the newer four-part form:
+# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM
+# It is wrong to echo any other type of specification.
+
+me=`echo "$0" | sed -e 's,.*/,,'`
+
+usage="\
+Usage: $0 [OPTION] CPU-MFR-OPSYS or ALIAS
+
+Canonicalize a configuration name.
+
+Operation modes:
+ -h, --help print this help, then exit
+ -t, --time-stamp print date of last modification, then exit
+ -v, --version print version number, then exit
+
+Report bugs and patches to <config-patches@gnu.org>."
+
+version="\
+GNU config.sub ($timestamp)
+
+Copyright 1992-2016 Free Software Foundation, Inc.
+
+This is free software; see the source for copying conditions. There is NO
+warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
+
+help="
+Try \`$me --help' for more information."
+
+# Parse command line
+while test $# -gt 0 ; do
+ case $1 in
+ --time-stamp | --time* | -t )
+ echo "$timestamp" ; exit ;;
+ --version | -v )
+ echo "$version" ; exit ;;
+ --help | --h* | -h )
+ echo "$usage"; exit ;;
+ -- ) # Stop option processing
+ shift; break ;;
+ - ) # Use stdin as input.
+ break ;;
+ -* )
+ echo "$me: invalid option $1$help"
+ exit 1 ;;
+
+ *local*)
+ # First pass through any local machine types.
+ echo $1
+ exit ;;
+
+ * )
+ break ;;
+ esac
+done
+
+case $# in
+ 0) echo "$me: missing argument$help" >&2
+ exit 1;;
+ 1) ;;
+ *) echo "$me: too many arguments$help" >&2
+ exit 1;;
+esac
+
+# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any).
+# Here we must recognize all the valid KERNEL-OS combinations.
+maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
+case $maybe_os in
+ nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \
+ linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \
+ knetbsd*-gnu* | netbsd*-gnu* | netbsd*-eabi* | \
+ kopensolaris*-gnu* | cloudabi*-eabi* | \
+ storm-chaos* | os2-emx* | rtmk-nova*)
+ os=-$maybe_os
+ basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
+ ;;
+ android-linux)
+ os=-linux-android
+ basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown
+ ;;
+ *)
+ basic_machine=`echo $1 | sed 's/-[^-]*$//'`
+ if [ $basic_machine != $1 ]
+ then os=`echo $1 | sed 's/.*-/-/'`
+ else os=; fi
+ ;;
+esac
+
+### Let's recognize common machines as not being operating systems so
+### that things like config.sub decstation-3100 work. We also
+### recognize some manufacturers as not being operating systems, so we
+### can provide default operating systems below.
+case $os in
+ -sun*os*)
+ # Prevent following clause from handling this invalid input.
+ ;;
+ -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \
+ -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \
+ -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \
+ -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\
+ -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \
+ -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \
+ -apple | -axis | -knuth | -cray | -microblaze*)
+ os=
+ basic_machine=$1
+ ;;
+ -bluegene*)
+ os=-cnk
+ ;;
+ -sim | -cisco | -oki | -wec | -winbond)
+ os=
+ basic_machine=$1
+ ;;
+ -scout)
+ ;;
+ -wrs)
+ os=-vxworks
+ basic_machine=$1
+ ;;
+ -chorusos*)
+ os=-chorusos
+ basic_machine=$1
+ ;;
+ -chorusrdb)
+ os=-chorusrdb
+ basic_machine=$1
+ ;;
+ -hiux*)
+ os=-hiuxwe2
+ ;;
+ -sco6)
+ os=-sco5v6
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco5)
+ os=-sco3.2v5
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco4)
+ os=-sco3.2v4
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco3.2.[4-9]*)
+ os=`echo $os | sed -e 's/sco3.2./sco3.2v/'`
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco3.2v[4-9]*)
+ # Don't forget version if it is 3.2v4 or newer.
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco5v6*)
+ # Don't forget version if it is 3.2v4 or newer.
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -sco*)
+ os=-sco3.2v2
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -udk*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -isc)
+ os=-isc2.2
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -clix*)
+ basic_machine=clipper-intergraph
+ ;;
+ -isc*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
+ ;;
+ -lynx*178)
+ os=-lynxos178
+ ;;
+ -lynx*5)
+ os=-lynxos5
+ ;;
+ -lynx*)
+ os=-lynxos
+ ;;
+ -ptx*)
+ basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'`
+ ;;
+ -windowsnt*)
+ os=`echo $os | sed -e 's/windowsnt/winnt/'`
+ ;;
+ -psos*)
+ os=-psos
+ ;;
+ -mint | -mint[0-9]*)
+ basic_machine=m68k-atari
+ os=-mint
+ ;;
+esac
+
+# Decode aliases for certain CPU-COMPANY combinations.
+case $basic_machine in
+ # Recognize the basic CPU types without company name.
+ # Some are omitted here because they have special meanings below.
+ 1750a | 580 \
+ | a29k \
+ | aarch64 | aarch64_be \
+ | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
+ | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
+ | am33_2.0 \
+ | arc | arceb \
+ | arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \
+ | avr | avr32 \
+ | ba \
+ | be32 | be64 \
+ | bfin \
+ | c4x | c8051 | clipper \
+ | d10v | d30v | dlx | dsp16xx \
+ | e2k | epiphany \
+ | fido | fr30 | frv | ft32 \
+ | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
+ | hexagon \
+ | i370 | i860 | i960 | ia64 \
+ | ip2k | iq2000 \
+ | k1om \
+ | le32 | le64 \
+ | lm32 \
+ | m32c | m32r | m32rle | m68000 | m68k | m88k \
+ | maxq | mb | microblaze | microblazeel | mcore | mep | metag \
+ | mips | mipsbe | mipseb | mipsel | mipsle \
+ | mips16 \
+ | mips64 | mips64el \
+ | mips64octeon | mips64octeonel \
+ | mips64orion | mips64orionel \
+ | mips64r5900 | mips64r5900el \
+ | mips64vr | mips64vrel \
+ | mips64vr4100 | mips64vr4100el \
+ | mips64vr4300 | mips64vr4300el \
+ | mips64vr5000 | mips64vr5000el \
+ | mips64vr5900 | mips64vr5900el \
+ | mipsisa32 | mipsisa32el \
+ | mipsisa32r2 | mipsisa32r2el \
+ | mipsisa32r6 | mipsisa32r6el \
+ | mipsisa64 | mipsisa64el \
+ | mipsisa64r2 | mipsisa64r2el \
+ | mipsisa64r6 | mipsisa64r6el \
+ | mipsisa64sb1 | mipsisa64sb1el \
+ | mipsisa64sr71k | mipsisa64sr71kel \
+ | mipsr5900 | mipsr5900el \
+ | mipstx39 | mipstx39el \
+ | mn10200 | mn10300 \
+ | moxie \
+ | mt \
+ | msp430 \
+ | nds32 | nds32le | nds32be \
+ | nios | nios2 | nios2eb | nios2el \
+ | ns16k | ns32k \
+ | open8 | or1k | or1knd | or32 \
+ | pdp10 | pdp11 | pj | pjl \
+ | powerpc | powerpc64 | powerpc64le | powerpcle \
+ | pru \
+ | pyramid \
+ | riscv32 | riscv64 \
+ | rl78 | rx \
+ | score \
+ | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[234]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
+ | sh64 | sh64le \
+ | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \
+ | sparcv8 | sparcv9 | sparcv9b | sparcv9v \
+ | spu \
+ | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \
+ | ubicom32 \
+ | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \
+ | visium \
+ | we32k \
+ | x86 | xc16x | xstormy16 | xtensa \
+ | z8k | z80)
+ basic_machine=$basic_machine-unknown
+ ;;
+ c54x)
+ basic_machine=tic54x-unknown
+ ;;
+ c55x)
+ basic_machine=tic55x-unknown
+ ;;
+ c6x)
+ basic_machine=tic6x-unknown
+ ;;
+ leon|leon[3-9])
+ basic_machine=sparc-$basic_machine
+ ;;
+ m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip)
+ basic_machine=$basic_machine-unknown
+ os=-none
+ ;;
+ m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k)
+ ;;
+ ms1)
+ basic_machine=mt-unknown
+ ;;
+
+ strongarm | thumb | xscale)
+ basic_machine=arm-unknown
+ ;;
+ xgate)
+ basic_machine=$basic_machine-unknown
+ os=-none
+ ;;
+ xscaleeb)
+ basic_machine=armeb-unknown
+ ;;
+
+ xscaleel)
+ basic_machine=armel-unknown
+ ;;
+
+ # We use `pc' rather than `unknown'
+ # because (1) that's what they normally are, and
+ # (2) the word "unknown" tends to confuse beginning users.
+ i*86 | x86_64)
+ basic_machine=$basic_machine-pc
+ ;;
+ # Object if more than one company name word.
+ *-*-*)
+ echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
+ exit 1
+ ;;
+ # Recognize the basic CPU types with company name.
+ 580-* \
+ | a29k-* \
+ | aarch64-* | aarch64_be-* \
+ | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
+ | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
+ | alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \
+ | arm-* | armbe-* | armle-* | armeb-* | armv*-* \
+ | avr-* | avr32-* \
+ | ba-* \
+ | be32-* | be64-* \
+ | bfin-* | bs2000-* \
+ | c[123]* | c30-* | [cjt]90-* | c4x-* \
+ | c8051-* | clipper-* | craynv-* | cydra-* \
+ | d10v-* | d30v-* | dlx-* \
+ | e2k-* | elxsi-* \
+ | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \
+ | h8300-* | h8500-* \
+ | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
+ | hexagon-* \
+ | i*86-* | i860-* | i960-* | ia64-* \
+ | ip2k-* | iq2000-* \
+ | k1om-* \
+ | le32-* | le64-* \
+ | lm32-* \
+ | m32c-* | m32r-* | m32rle-* \
+ | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
+ | m88110-* | m88k-* | maxq-* | mcore-* | metag-* \
+ | microblaze-* | microblazeel-* \
+ | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
+ | mips16-* \
+ | mips64-* | mips64el-* \
+ | mips64octeon-* | mips64octeonel-* \
+ | mips64orion-* | mips64orionel-* \
+ | mips64r5900-* | mips64r5900el-* \
+ | mips64vr-* | mips64vrel-* \
+ | mips64vr4100-* | mips64vr4100el-* \
+ | mips64vr4300-* | mips64vr4300el-* \
+ | mips64vr5000-* | mips64vr5000el-* \
+ | mips64vr5900-* | mips64vr5900el-* \
+ | mipsisa32-* | mipsisa32el-* \
+ | mipsisa32r2-* | mipsisa32r2el-* \
+ | mipsisa32r6-* | mipsisa32r6el-* \
+ | mipsisa64-* | mipsisa64el-* \
+ | mipsisa64r2-* | mipsisa64r2el-* \
+ | mipsisa64r6-* | mipsisa64r6el-* \
+ | mipsisa64sb1-* | mipsisa64sb1el-* \
+ | mipsisa64sr71k-* | mipsisa64sr71kel-* \
+ | mipsr5900-* | mipsr5900el-* \
+ | mipstx39-* | mipstx39el-* \
+ | mmix-* \
+ | mt-* \
+ | msp430-* \
+ | nds32-* | nds32le-* | nds32be-* \
+ | nios-* | nios2-* | nios2eb-* | nios2el-* \
+ | none-* | np1-* | ns16k-* | ns32k-* \
+ | open8-* \
+ | or1k*-* \
+ | orion-* \
+ | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
+ | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \
+ | pru-* \
+ | pyramid-* \
+ | riscv32-* | riscv64-* \
+ | rl78-* | romp-* | rs6000-* | rx-* \
+ | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \
+ | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
+ | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \
+ | sparclite-* \
+ | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx*-* \
+ | tahoe-* \
+ | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \
+ | tile*-* \
+ | tron-* \
+ | ubicom32-* \
+ | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \
+ | vax-* \
+ | visium-* \
+ | we32k-* \
+ | x86-* | x86_64-* | xc16x-* | xps100-* \
+ | xstormy16-* | xtensa*-* \
+ | ymp-* \
+ | z8k-* | z80-*)
+ ;;
+ # Recognize the basic CPU types without company name, with glob match.
+ xtensa*)
+ basic_machine=$basic_machine-unknown
+ ;;
+ # Recognize the various machine names and aliases which stand
+ # for a CPU type and a company and sometimes even an OS.
+ 386bsd)
+ basic_machine=i386-unknown
+ os=-bsd
+ ;;
+ 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc)
+ basic_machine=m68000-att
+ ;;
+ 3b*)
+ basic_machine=we32k-att
+ ;;
+ a29khif)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ abacus)
+ basic_machine=abacus-unknown
+ ;;
+ adobe68k)
+ basic_machine=m68010-adobe
+ os=-scout
+ ;;
+ alliant | fx80)
+ basic_machine=fx80-alliant
+ ;;
+ altos | altos3068)
+ basic_machine=m68k-altos
+ ;;
+ am29k)
+ basic_machine=a29k-none
+ os=-bsd
+ ;;
+ amd64)
+ basic_machine=x86_64-pc
+ ;;
+ amd64-*)
+ basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ amdahl)
+ basic_machine=580-amdahl
+ os=-sysv
+ ;;
+ amiga | amiga-*)
+ basic_machine=m68k-unknown
+ ;;
+ amigaos | amigados)
+ basic_machine=m68k-unknown
+ os=-amigaos
+ ;;
+ amigaunix | amix)
+ basic_machine=m68k-unknown
+ os=-sysv4
+ ;;
+ apollo68)
+ basic_machine=m68k-apollo
+ os=-sysv
+ ;;
+ apollo68bsd)
+ basic_machine=m68k-apollo
+ os=-bsd
+ ;;
+ aros)
+ basic_machine=i386-pc
+ os=-aros
+ ;;
+ asmjs)
+ basic_machine=asmjs-unknown
+ ;;
+ aux)
+ basic_machine=m68k-apple
+ os=-aux
+ ;;
+ balance)
+ basic_machine=ns32k-sequent
+ os=-dynix
+ ;;
+ blackfin)
+ basic_machine=bfin-unknown
+ os=-linux
+ ;;
+ blackfin-*)
+ basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'`
+ os=-linux
+ ;;
+ bluegene*)
+ basic_machine=powerpc-ibm
+ os=-cnk
+ ;;
+ c54x-*)
+ basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ c55x-*)
+ basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ c6x-*)
+ basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ c90)
+ basic_machine=c90-cray
+ os=-unicos
+ ;;
+ cegcc)
+ basic_machine=arm-unknown
+ os=-cegcc
+ ;;
+ convex-c1)
+ basic_machine=c1-convex
+ os=-bsd
+ ;;
+ convex-c2)
+ basic_machine=c2-convex
+ os=-bsd
+ ;;
+ convex-c32)
+ basic_machine=c32-convex
+ os=-bsd
+ ;;
+ convex-c34)
+ basic_machine=c34-convex
+ os=-bsd
+ ;;
+ convex-c38)
+ basic_machine=c38-convex
+ os=-bsd
+ ;;
+ cray | j90)
+ basic_machine=j90-cray
+ os=-unicos
+ ;;
+ craynv)
+ basic_machine=craynv-cray
+ os=-unicosmp
+ ;;
+ cr16 | cr16-*)
+ basic_machine=cr16-unknown
+ os=-elf
+ ;;
+ crds | unos)
+ basic_machine=m68k-crds
+ ;;
+ crisv32 | crisv32-* | etraxfs*)
+ basic_machine=crisv32-axis
+ ;;
+ cris | cris-* | etrax*)
+ basic_machine=cris-axis
+ ;;
+ crx)
+ basic_machine=crx-unknown
+ os=-elf
+ ;;
+ da30 | da30-*)
+ basic_machine=m68k-da30
+ ;;
+ decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn)
+ basic_machine=mips-dec
+ ;;
+ decsystem10* | dec10*)
+ basic_machine=pdp10-dec
+ os=-tops10
+ ;;
+ decsystem20* | dec20*)
+ basic_machine=pdp10-dec
+ os=-tops20
+ ;;
+ delta | 3300 | motorola-3300 | motorola-delta \
+ | 3300-motorola | delta-motorola)
+ basic_machine=m68k-motorola
+ ;;
+ delta88)
+ basic_machine=m88k-motorola
+ os=-sysv3
+ ;;
+ dicos)
+ basic_machine=i686-pc
+ os=-dicos
+ ;;
+ djgpp)
+ basic_machine=i586-pc
+ os=-msdosdjgpp
+ ;;
+ dpx20 | dpx20-*)
+ basic_machine=rs6000-bull
+ os=-bosx
+ ;;
+ dpx2* | dpx2*-bull)
+ basic_machine=m68k-bull
+ os=-sysv3
+ ;;
+ e500v[12])
+ basic_machine=powerpc-unknown
+ os=$os"spe"
+ ;;
+ e500v[12]-*)
+ basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
+ os=$os"spe"
+ ;;
+ ebmon29k)
+ basic_machine=a29k-amd
+ os=-ebmon
+ ;;
+ elxsi)
+ basic_machine=elxsi-elxsi
+ os=-bsd
+ ;;
+ encore | umax | mmax)
+ basic_machine=ns32k-encore
+ ;;
+ es1800 | OSE68k | ose68k | ose | OSE)
+ basic_machine=m68k-ericsson
+ os=-ose
+ ;;
+ fx2800)
+ basic_machine=i860-alliant
+ ;;
+ genix)
+ basic_machine=ns32k-ns
+ ;;
+ gmicro)
+ basic_machine=tron-gmicro
+ os=-sysv
+ ;;
+ go32)
+ basic_machine=i386-pc
+ os=-go32
+ ;;
+ h3050r* | hiux*)
+ basic_machine=hppa1.1-hitachi
+ os=-hiuxwe2
+ ;;
+ h8300hms)
+ basic_machine=h8300-hitachi
+ os=-hms
+ ;;
+ h8300xray)
+ basic_machine=h8300-hitachi
+ os=-xray
+ ;;
+ h8500hms)
+ basic_machine=h8500-hitachi
+ os=-hms
+ ;;
+ harris)
+ basic_machine=m88k-harris
+ os=-sysv3
+ ;;
+ hp300-*)
+ basic_machine=m68k-hp
+ ;;
+ hp300bsd)
+ basic_machine=m68k-hp
+ os=-bsd
+ ;;
+ hp300hpux)
+ basic_machine=m68k-hp
+ os=-hpux
+ ;;
+ hp3k9[0-9][0-9] | hp9[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hp9k2[0-9][0-9] | hp9k31[0-9])
+ basic_machine=m68000-hp
+ ;;
+ hp9k3[2-9][0-9])
+ basic_machine=m68k-hp
+ ;;
+ hp9k6[0-9][0-9] | hp6[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hp9k7[0-79][0-9] | hp7[0-79][0-9])
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k78[0-9] | hp78[0-9])
+ # FIXME: really hppa2.0-hp
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893)
+ # FIXME: really hppa2.0-hp
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[0-9][13679] | hp8[0-9][13679])
+ basic_machine=hppa1.1-hp
+ ;;
+ hp9k8[0-9][0-9] | hp8[0-9][0-9])
+ basic_machine=hppa1.0-hp
+ ;;
+ hppa-next)
+ os=-nextstep3
+ ;;
+ hppaosf)
+ basic_machine=hppa1.1-hp
+ os=-osf
+ ;;
+ hppro)
+ basic_machine=hppa1.1-hp
+ os=-proelf
+ ;;
+ i370-ibm* | ibm*)
+ basic_machine=i370-ibm
+ ;;
+ i*86v32)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv32
+ ;;
+ i*86v4*)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv4
+ ;;
+ i*86v)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-sysv
+ ;;
+ i*86sol2)
+ basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
+ os=-solaris2
+ ;;
+ i386mach)
+ basic_machine=i386-mach
+ os=-mach
+ ;;
+ i386-vsta | vsta)
+ basic_machine=i386-unknown
+ os=-vsta
+ ;;
+ iris | iris4d)
+ basic_machine=mips-sgi
+ case $os in
+ -irix*)
+ ;;
+ *)
+ os=-irix4
+ ;;
+ esac
+ ;;
+ isi68 | isi)
+ basic_machine=m68k-isi
+ os=-sysv
+ ;;
+ leon-*|leon[3-9]-*)
+ basic_machine=sparc-`echo $basic_machine | sed 's/-.*//'`
+ ;;
+ m68knommu)
+ basic_machine=m68k-unknown
+ os=-linux
+ ;;
+ m68knommu-*)
+ basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'`
+ os=-linux
+ ;;
+ m88k-omron*)
+ basic_machine=m88k-omron
+ ;;
+ magnum | m3230)
+ basic_machine=mips-mips
+ os=-sysv
+ ;;
+ merlin)
+ basic_machine=ns32k-utek
+ os=-sysv
+ ;;
+ microblaze*)
+ basic_machine=microblaze-xilinx
+ ;;
+ mingw64)
+ basic_machine=x86_64-pc
+ os=-mingw64
+ ;;
+ mingw32)
+ basic_machine=i686-pc
+ os=-mingw32
+ ;;
+ mingw32ce)
+ basic_machine=arm-unknown
+ os=-mingw32ce
+ ;;
+ miniframe)
+ basic_machine=m68000-convergent
+ ;;
+ *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*)
+ basic_machine=m68k-atari
+ os=-mint
+ ;;
+ mips3*-*)
+ basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`
+ ;;
+ mips3*)
+ basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown
+ ;;
+ monitor)
+ basic_machine=m68k-rom68k
+ os=-coff
+ ;;
+ morphos)
+ basic_machine=powerpc-unknown
+ os=-morphos
+ ;;
+ moxiebox)
+ basic_machine=moxie-unknown
+ os=-moxiebox
+ ;;
+ msdos)
+ basic_machine=i386-pc
+ os=-msdos
+ ;;
+ ms1-*)
+ basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'`
+ ;;
+ msys)
+ basic_machine=i686-pc
+ os=-msys
+ ;;
+ mvs)
+ basic_machine=i370-ibm
+ os=-mvs
+ ;;
+ nacl)
+ basic_machine=le32-unknown
+ os=-nacl
+ ;;
+ ncr3000)
+ basic_machine=i486-ncr
+ os=-sysv4
+ ;;
+ netbsd386)
+ basic_machine=i386-unknown
+ os=-netbsd
+ ;;
+ netwinder)
+ basic_machine=armv4l-rebel
+ os=-linux
+ ;;
+ news | news700 | news800 | news900)
+ basic_machine=m68k-sony
+ os=-newsos
+ ;;
+ news1000)
+ basic_machine=m68030-sony
+ os=-newsos
+ ;;
+ news-3600 | risc-news)
+ basic_machine=mips-sony
+ os=-newsos
+ ;;
+ necv70)
+ basic_machine=v70-nec
+ os=-sysv
+ ;;
+ next | m*-next )
+ basic_machine=m68k-next
+ case $os in
+ -nextstep* )
+ ;;
+ -ns2*)
+ os=-nextstep2
+ ;;
+ *)
+ os=-nextstep3
+ ;;
+ esac
+ ;;
+ nh3000)
+ basic_machine=m68k-harris
+ os=-cxux
+ ;;
+ nh[45]000)
+ basic_machine=m88k-harris
+ os=-cxux
+ ;;
+ nindy960)
+ basic_machine=i960-intel
+ os=-nindy
+ ;;
+ mon960)
+ basic_machine=i960-intel
+ os=-mon960
+ ;;
+ nonstopux)
+ basic_machine=mips-compaq
+ os=-nonstopux
+ ;;
+ np1)
+ basic_machine=np1-gould
+ ;;
+ neo-tandem)
+ basic_machine=neo-tandem
+ ;;
+ nse-tandem)
+ basic_machine=nse-tandem
+ ;;
+ nsr-tandem)
+ basic_machine=nsr-tandem
+ ;;
+ op50n-* | op60c-*)
+ basic_machine=hppa1.1-oki
+ os=-proelf
+ ;;
+ openrisc | openrisc-*)
+ basic_machine=or32-unknown
+ ;;
+ os400)
+ basic_machine=powerpc-ibm
+ os=-os400
+ ;;
+ OSE68000 | ose68000)
+ basic_machine=m68000-ericsson
+ os=-ose
+ ;;
+ os68k)
+ basic_machine=m68k-none
+ os=-os68k
+ ;;
+ pa-hitachi)
+ basic_machine=hppa1.1-hitachi
+ os=-hiuxwe2
+ ;;
+ paragon)
+ basic_machine=i860-intel
+ os=-osf
+ ;;
+ parisc)
+ basic_machine=hppa-unknown
+ os=-linux
+ ;;
+ parisc-*)
+ basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'`
+ os=-linux
+ ;;
+ pbd)
+ basic_machine=sparc-tti
+ ;;
+ pbb)
+ basic_machine=m68k-tti
+ ;;
+ pc532 | pc532-*)
+ basic_machine=ns32k-pc532
+ ;;
+ pc98)
+ basic_machine=i386-pc
+ ;;
+ pc98-*)
+ basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentium | p5 | k5 | k6 | nexgen | viac3)
+ basic_machine=i586-pc
+ ;;
+ pentiumpro | p6 | 6x86 | athlon | athlon_*)
+ basic_machine=i686-pc
+ ;;
+ pentiumii | pentium2 | pentiumiii | pentium3)
+ basic_machine=i686-pc
+ ;;
+ pentium4)
+ basic_machine=i786-pc
+ ;;
+ pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*)
+ basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentiumpro-* | p6-* | 6x86-* | athlon-*)
+ basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*)
+ basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pentium4-*)
+ basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ pn)
+ basic_machine=pn-gould
+ ;;
+ power) basic_machine=power-ibm
+ ;;
+ ppc | ppcbe) basic_machine=powerpc-unknown
+ ;;
+ ppc-* | ppcbe-*)
+ basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ppcle | powerpclittle)
+ basic_machine=powerpcle-unknown
+ ;;
+ ppcle-* | powerpclittle-*)
+ basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ppc64) basic_machine=powerpc64-unknown
+ ;;
+ ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ppc64le | powerpc64little)
+ basic_machine=powerpc64le-unknown
+ ;;
+ ppc64le-* | powerpc64little-*)
+ basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ ps2)
+ basic_machine=i386-ibm
+ ;;
+ pw32)
+ basic_machine=i586-unknown
+ os=-pw32
+ ;;
+ rdos | rdos64)
+ basic_machine=x86_64-pc
+ os=-rdos
+ ;;
+ rdos32)
+ basic_machine=i386-pc
+ os=-rdos
+ ;;
+ rom68k)
+ basic_machine=m68k-rom68k
+ os=-coff
+ ;;
+ rm[46]00)
+ basic_machine=mips-siemens
+ ;;
+ rtpc | rtpc-*)
+ basic_machine=romp-ibm
+ ;;
+ s390 | s390-*)
+ basic_machine=s390-ibm
+ ;;
+ s390x | s390x-*)
+ basic_machine=s390x-ibm
+ ;;
+ sa29200)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ sb1)
+ basic_machine=mipsisa64sb1-unknown
+ ;;
+ sb1el)
+ basic_machine=mipsisa64sb1el-unknown
+ ;;
+ sde)
+ basic_machine=mipsisa32-sde
+ os=-elf
+ ;;
+ sei)
+ basic_machine=mips-sei
+ os=-seiux
+ ;;
+ sequent)
+ basic_machine=i386-sequent
+ ;;
+ sh)
+ basic_machine=sh-hitachi
+ os=-hms
+ ;;
+ sh5el)
+ basic_machine=sh5le-unknown
+ ;;
+ sh64)
+ basic_machine=sh64-unknown
+ ;;
+ sparclite-wrs | simso-wrs)
+ basic_machine=sparclite-wrs
+ os=-vxworks
+ ;;
+ sps7)
+ basic_machine=m68k-bull
+ os=-sysv2
+ ;;
+ spur)
+ basic_machine=spur-unknown
+ ;;
+ st2000)
+ basic_machine=m68k-tandem
+ ;;
+ stratus)
+ basic_machine=i860-stratus
+ os=-sysv4
+ ;;
+ strongarm-* | thumb-*)
+ basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'`
+ ;;
+ sun2)
+ basic_machine=m68000-sun
+ ;;
+ sun2os3)
+ basic_machine=m68000-sun
+ os=-sunos3
+ ;;
+ sun2os4)
+ basic_machine=m68000-sun
+ os=-sunos4
+ ;;
+ sun3os3)
+ basic_machine=m68k-sun
+ os=-sunos3
+ ;;
+ sun3os4)
+ basic_machine=m68k-sun
+ os=-sunos4
+ ;;
+ sun4os3)
+ basic_machine=sparc-sun
+ os=-sunos3
+ ;;
+ sun4os4)
+ basic_machine=sparc-sun
+ os=-sunos4
+ ;;
+ sun4sol2)
+ basic_machine=sparc-sun
+ os=-solaris2
+ ;;
+ sun3 | sun3-*)
+ basic_machine=m68k-sun
+ ;;
+ sun4)
+ basic_machine=sparc-sun
+ ;;
+ sun386 | sun386i | roadrunner)
+ basic_machine=i386-sun
+ ;;
+ sv1)
+ basic_machine=sv1-cray
+ os=-unicos
+ ;;
+ symmetry)
+ basic_machine=i386-sequent
+ os=-dynix
+ ;;
+ t3e)
+ basic_machine=alphaev5-cray
+ os=-unicos
+ ;;
+ t90)
+ basic_machine=t90-cray
+ os=-unicos
+ ;;
+ tile*)
+ basic_machine=$basic_machine-unknown
+ os=-linux-gnu
+ ;;
+ tx39)
+ basic_machine=mipstx39-unknown
+ ;;
+ tx39el)
+ basic_machine=mipstx39el-unknown
+ ;;
+ toad1)
+ basic_machine=pdp10-xkl
+ os=-tops20
+ ;;
+ tower | tower-32)
+ basic_machine=m68k-ncr
+ ;;
+ tpf)
+ basic_machine=s390x-ibm
+ os=-tpf
+ ;;
+ udi29k)
+ basic_machine=a29k-amd
+ os=-udi
+ ;;
+ ultra3)
+ basic_machine=a29k-nyu
+ os=-sym1
+ ;;
+ v810 | necv810)
+ basic_machine=v810-nec
+ os=-none
+ ;;
+ vaxv)
+ basic_machine=vax-dec
+ os=-sysv
+ ;;
+ vms)
+ basic_machine=vax-dec
+ os=-vms
+ ;;
+ vpp*|vx|vx-*)
+ basic_machine=f301-fujitsu
+ ;;
+ vxworks960)
+ basic_machine=i960-wrs
+ os=-vxworks
+ ;;
+ vxworks68)
+ basic_machine=m68k-wrs
+ os=-vxworks
+ ;;
+ vxworks29k)
+ basic_machine=a29k-wrs
+ os=-vxworks
+ ;;
+ w65*)
+ basic_machine=w65-wdc
+ os=-none
+ ;;
+ w89k-*)
+ basic_machine=hppa1.1-winbond
+ os=-proelf
+ ;;
+ xbox)
+ basic_machine=i686-pc
+ os=-mingw32
+ ;;
+ xps | xps100)
+ basic_machine=xps100-honeywell
+ ;;
+ xscale-* | xscalee[bl]-*)
+ basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'`
+ ;;
+ ymp)
+ basic_machine=ymp-cray
+ os=-unicos
+ ;;
+ z8k-*-coff)
+ basic_machine=z8k-unknown
+ os=-sim
+ ;;
+ z80-*-coff)
+ basic_machine=z80-unknown
+ os=-sim
+ ;;
+ none)
+ basic_machine=none-none
+ os=-none
+ ;;
+
+# Here we handle the default manufacturer of certain CPU types. It is in
+# some cases the only manufacturer, in others, it is the most popular.
+ w89k)
+ basic_machine=hppa1.1-winbond
+ ;;
+ op50n)
+ basic_machine=hppa1.1-oki
+ ;;
+ op60c)
+ basic_machine=hppa1.1-oki
+ ;;
+ romp)
+ basic_machine=romp-ibm
+ ;;
+ mmix)
+ basic_machine=mmix-knuth
+ ;;
+ rs6000)
+ basic_machine=rs6000-ibm
+ ;;
+ vax)
+ basic_machine=vax-dec
+ ;;
+ pdp10)
+ # there are many clones, so DEC is not a safe bet
+ basic_machine=pdp10-unknown
+ ;;
+ pdp11)
+ basic_machine=pdp11-dec
+ ;;
+ we32k)
+ basic_machine=we32k-att
+ ;;
+ sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele)
+ basic_machine=sh-unknown
+ ;;
+ sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v)
+ basic_machine=sparc-sun
+ ;;
+ cydra)
+ basic_machine=cydra-cydrome
+ ;;
+ orion)
+ basic_machine=orion-highlevel
+ ;;
+ orion105)
+ basic_machine=clipper-highlevel
+ ;;
+ mac | mpw | mac-mpw)
+ basic_machine=m68k-apple
+ ;;
+ pmac | pmac-mpw)
+ basic_machine=powerpc-apple
+ ;;
+ *-unknown)
+ # Make sure to match an already-canonicalized machine name.
+ ;;
+ *)
+ echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
+ exit 1
+ ;;
+esac
+
+# Here we canonicalize certain aliases for manufacturers.
+case $basic_machine in
+ *-digital*)
+ basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'`
+ ;;
+ *-commodore*)
+ basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'`
+ ;;
+ *)
+ ;;
+esac
+
+# Decode manufacturer-specific aliases for certain operating systems.
+
+if [ x"$os" != x"" ]
+then
+case $os in
+ # First match some system type aliases
+ # that might get confused with valid system types.
+ # -solaris* is a basic system type, with this one exception.
+ -auroraux)
+ os=-auroraux
+ ;;
+ -solaris1 | -solaris1.*)
+ os=`echo $os | sed -e 's|solaris1|sunos4|'`
+ ;;
+ -solaris)
+ os=-solaris2
+ ;;
+ -svr4*)
+ os=-sysv4
+ ;;
+ -unixware*)
+ os=-sysv4.2uw
+ ;;
+ -gnu/linux*)
+ os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'`
+ ;;
+ # First accept the basic system types.
+ # The portable systems comes first.
+ # Each alternative MUST END IN A *, to match a version number.
+ # -sysv* is not here because it comes later, after sysvr4.
+ -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
+ | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\
+ | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \
+ | -sym* | -kopensolaris* | -plan9* \
+ | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
+ | -aos* | -aros* | -cloudabi* | -sortix* \
+ | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
+ | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
+ | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \
+ | -bitrig* | -openbsd* | -solidbsd* | -libertybsd* \
+ | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \
+ | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
+ | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
+ | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
+ | -chorusos* | -chorusrdb* | -cegcc* \
+ | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
+ | -midipix* | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \
+ | -linux-newlib* | -linux-musl* | -linux-uclibc* \
+ | -uxpv* | -beos* | -mpeix* | -udk* | -moxiebox* \
+ | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \
+ | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
+ | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \
+ | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
+ | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \
+ | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \
+ | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* \
+ | -onefs* | -tirtos* | -phoenix* | -fuchsia*)
+ # Remember, each alternative MUST END IN *, to match a version number.
+ ;;
+ -qnx*)
+ case $basic_machine in
+ x86-* | i*86-*)
+ ;;
+ *)
+ os=-nto$os
+ ;;
+ esac
+ ;;
+ -nto-qnx*)
+ ;;
+ -nto*)
+ os=`echo $os | sed -e 's|nto|nto-qnx|'`
+ ;;
+ -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \
+ | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \
+ | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*)
+ ;;
+ -mac*)
+ os=`echo $os | sed -e 's|mac|macos|'`
+ ;;
+ -linux-dietlibc)
+ os=-linux-dietlibc
+ ;;
+ -linux*)
+ os=`echo $os | sed -e 's|linux|linux-gnu|'`
+ ;;
+ -sunos5*)
+ os=`echo $os | sed -e 's|sunos5|solaris2|'`
+ ;;
+ -sunos6*)
+ os=`echo $os | sed -e 's|sunos6|solaris3|'`
+ ;;
+ -opened*)
+ os=-openedition
+ ;;
+ -os400*)
+ os=-os400
+ ;;
+ -wince*)
+ os=-wince
+ ;;
+ -osfrose*)
+ os=-osfrose
+ ;;
+ -osf*)
+ os=-osf
+ ;;
+ -utek*)
+ os=-bsd
+ ;;
+ -dynix*)
+ os=-bsd
+ ;;
+ -acis*)
+ os=-aos
+ ;;
+ -atheos*)
+ os=-atheos
+ ;;
+ -syllable*)
+ os=-syllable
+ ;;
+ -386bsd)
+ os=-bsd
+ ;;
+ -ctix* | -uts*)
+ os=-sysv
+ ;;
+ -nova*)
+ os=-rtmk-nova
+ ;;
+ -ns2 )
+ os=-nextstep2
+ ;;
+ -nsk*)
+ os=-nsk
+ ;;
+ # Preserve the version number of sinix5.
+ -sinix5.*)
+ os=`echo $os | sed -e 's|sinix|sysv|'`
+ ;;
+ -sinix*)
+ os=-sysv4
+ ;;
+ -tpf*)
+ os=-tpf
+ ;;
+ -triton*)
+ os=-sysv3
+ ;;
+ -oss*)
+ os=-sysv3
+ ;;
+ -svr4)
+ os=-sysv4
+ ;;
+ -svr3)
+ os=-sysv3
+ ;;
+ -sysvr4)
+ os=-sysv4
+ ;;
+ # This must come after -sysvr4.
+ -sysv*)
+ ;;
+ -ose*)
+ os=-ose
+ ;;
+ -es1800*)
+ os=-ose
+ ;;
+ -xenix)
+ os=-xenix
+ ;;
+ -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
+ os=-mint
+ ;;
+ -aros*)
+ os=-aros
+ ;;
+ -zvmoe)
+ os=-zvmoe
+ ;;
+ -dicos*)
+ os=-dicos
+ ;;
+ -nacl*)
+ ;;
+ -ios)
+ ;;
+ -none)
+ ;;
+ *)
+ # Get rid of the `-' at the beginning of $os.
+ os=`echo $os | sed 's/[^-]*-//'`
+ echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2
+ exit 1
+ ;;
+esac
+else
+
+# Here we handle the default operating systems that come with various machines.
+# The value should be what the vendor currently ships out the door with their
+# machine or put another way, the most popular os provided with the machine.
+
+# Note that if you're going to try to match "-MANUFACTURER" here (say,
+# "-sun"), then you have to tell the case statement up towards the top
+# that MANUFACTURER isn't an operating system. Otherwise, code above
+# will signal an error saying that MANUFACTURER isn't an operating
+# system, and we'll never get to this point.
+
+case $basic_machine in
+ score-*)
+ os=-elf
+ ;;
+ spu-*)
+ os=-elf
+ ;;
+ *-acorn)
+ os=-riscix1.2
+ ;;
+ arm*-rebel)
+ os=-linux
+ ;;
+ arm*-semi)
+ os=-aout
+ ;;
+ c4x-* | tic4x-*)
+ os=-coff
+ ;;
+ c8051-*)
+ os=-elf
+ ;;
+ hexagon-*)
+ os=-elf
+ ;;
+ tic54x-*)
+ os=-coff
+ ;;
+ tic55x-*)
+ os=-coff
+ ;;
+ tic6x-*)
+ os=-coff
+ ;;
+ # This must come before the *-dec entry.
+ pdp10-*)
+ os=-tops20
+ ;;
+ pdp11-*)
+ os=-none
+ ;;
+ *-dec | vax-*)
+ os=-ultrix4.2
+ ;;
+ m68*-apollo)
+ os=-domain
+ ;;
+ i386-sun)
+ os=-sunos4.0.2
+ ;;
+ m68000-sun)
+ os=-sunos3
+ ;;
+ m68*-cisco)
+ os=-aout
+ ;;
+ mep-*)
+ os=-elf
+ ;;
+ mips*-cisco)
+ os=-elf
+ ;;
+ mips*-*)
+ os=-elf
+ ;;
+ or32-*)
+ os=-coff
+ ;;
+ *-tti) # must be before sparc entry or we get the wrong os.
+ os=-sysv3
+ ;;
+ sparc-* | *-sun)
+ os=-sunos4.1.1
+ ;;
+ *-be)
+ os=-beos
+ ;;
+ *-haiku)
+ os=-haiku
+ ;;
+ *-ibm)
+ os=-aix
+ ;;
+ *-knuth)
+ os=-mmixware
+ ;;
+ *-wec)
+ os=-proelf
+ ;;
+ *-winbond)
+ os=-proelf
+ ;;
+ *-oki)
+ os=-proelf
+ ;;
+ *-hp)
+ os=-hpux
+ ;;
+ *-hitachi)
+ os=-hiux
+ ;;
+ i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent)
+ os=-sysv
+ ;;
+ *-cbm)
+ os=-amigaos
+ ;;
+ *-dg)
+ os=-dgux
+ ;;
+ *-dolphin)
+ os=-sysv3
+ ;;
+ m68k-ccur)
+ os=-rtu
+ ;;
+ m88k-omron*)
+ os=-luna
+ ;;
+ *-next )
+ os=-nextstep
+ ;;
+ *-sequent)
+ os=-ptx
+ ;;
+ *-crds)
+ os=-unos
+ ;;
+ *-ns)
+ os=-genix
+ ;;
+ i370-*)
+ os=-mvs
+ ;;
+ *-next)
+ os=-nextstep3
+ ;;
+ *-gould)
+ os=-sysv
+ ;;
+ *-highlevel)
+ os=-bsd
+ ;;
+ *-encore)
+ os=-bsd
+ ;;
+ *-sgi)
+ os=-irix
+ ;;
+ *-siemens)
+ os=-sysv4
+ ;;
+ *-masscomp)
+ os=-rtu
+ ;;
+ f30[01]-fujitsu | f700-fujitsu)
+ os=-uxpv
+ ;;
+ *-rom68k)
+ os=-coff
+ ;;
+ *-*bug)
+ os=-coff
+ ;;
+ *-apple)
+ os=-macos
+ ;;
+ *-atari*)
+ os=-mint
+ ;;
+ *)
+ os=-none
+ ;;
+esac
+fi
+
+# Here we handle the case where we know the os, and the CPU type, but not the
+# manufacturer. We pick the logical manufacturer.
+vendor=unknown
+case $basic_machine in
+ *-unknown)
+ case $os in
+ -riscix*)
+ vendor=acorn
+ ;;
+ -sunos*)
+ vendor=sun
+ ;;
+ -cnk*|-aix*)
+ vendor=ibm
+ ;;
+ -beos*)
+ vendor=be
+ ;;
+ -hpux*)
+ vendor=hp
+ ;;
+ -mpeix*)
+ vendor=hp
+ ;;
+ -hiux*)
+ vendor=hitachi
+ ;;
+ -unos*)
+ vendor=crds
+ ;;
+ -dgux*)
+ vendor=dg
+ ;;
+ -luna*)
+ vendor=omron
+ ;;
+ -genix*)
+ vendor=ns
+ ;;
+ -mvs* | -opened*)
+ vendor=ibm
+ ;;
+ -os400*)
+ vendor=ibm
+ ;;
+ -ptx*)
+ vendor=sequent
+ ;;
+ -tpf*)
+ vendor=ibm
+ ;;
+ -vxsim* | -vxworks* | -windiss*)
+ vendor=wrs
+ ;;
+ -aux*)
+ vendor=apple
+ ;;
+ -hms*)
+ vendor=hitachi
+ ;;
+ -mpw* | -macos*)
+ vendor=apple
+ ;;
+ -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
+ vendor=atari
+ ;;
+ -vos*)
+ vendor=stratus
+ ;;
+ esac
+ basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"`
+ ;;
+esac
+
+echo $basic_machine$os
+exit
+
+# Local variables:
+# eval: (add-hook 'write-file-hooks 'time-stamp)
+# time-stamp-start: "timestamp='"
+# time-stamp-format: "%:y-%02m-%02d"
+# time-stamp-end: "'"
+# End:
+>>>>>>> main
diff --git a/contrib/jemalloc/build-aux/install-sh b/contrib/jemalloc/build-aux/install-sh
new file mode 100755
index 000000000000..ebc66913e940
--- /dev/null
+++ b/contrib/jemalloc/build-aux/install-sh
@@ -0,0 +1,250 @@
+#! /bin/sh
+#
+# install - install a program, script, or datafile
+# This comes from X11R5 (mit/util/scripts/install.sh).
+#
+# Copyright 1991 by the Massachusetts Institute of Technology
+#
+# Permission to use, copy, modify, distribute, and sell this software and its
+# documentation for any purpose is hereby granted without fee, provided that
+# the above copyright notice appear in all copies and that both that
+# copyright notice and this permission notice appear in supporting
+# documentation, and that the name of M.I.T. not be used in advertising or
+# publicity pertaining to distribution of the software without specific,
+# written prior permission. M.I.T. makes no representations about the
+# suitability of this software for any purpose. It is provided "as is"
+# without express or implied warranty.
+#
+# Calling this script install-sh is preferred over install.sh, to prevent
+# `make' implicit rules from creating a file called install from it
+# when there is no Makefile.
+#
+# This script is compatible with the BSD install script, but was written
+# from scratch. It can only install one file at a time, a restriction
+# shared with many OS's install programs.
+
+
+# set DOITPROG to echo to test this script
+
+# Don't use :- since 4.3BSD and earlier shells don't like it.
+doit="${DOITPROG-}"
+
+
+# put in absolute paths if you don't have them in your path; or use env. vars.
+
+mvprog="${MVPROG-mv}"
+cpprog="${CPPROG-cp}"
+chmodprog="${CHMODPROG-chmod}"
+chownprog="${CHOWNPROG-chown}"
+chgrpprog="${CHGRPPROG-chgrp}"
+stripprog="${STRIPPROG-strip}"
+rmprog="${RMPROG-rm}"
+mkdirprog="${MKDIRPROG-mkdir}"
+
+transformbasename=""
+transform_arg=""
+instcmd="$mvprog"
+chmodcmd="$chmodprog 0755"
+chowncmd=""
+chgrpcmd=""
+stripcmd=""
+rmcmd="$rmprog -f"
+mvcmd="$mvprog"
+src=""
+dst=""
+dir_arg=""
+
+while [ x"$1" != x ]; do
+ case $1 in
+ -c) instcmd="$cpprog"
+ shift
+ continue;;
+
+ -d) dir_arg=true
+ shift
+ continue;;
+
+ -m) chmodcmd="$chmodprog $2"
+ shift
+ shift
+ continue;;
+
+ -o) chowncmd="$chownprog $2"
+ shift
+ shift
+ continue;;
+
+ -g) chgrpcmd="$chgrpprog $2"
+ shift
+ shift
+ continue;;
+
+ -s) stripcmd="$stripprog"
+ shift
+ continue;;
+
+ -t=*) transformarg=`echo $1 | sed 's/-t=//'`
+ shift
+ continue;;
+
+ -b=*) transformbasename=`echo $1 | sed 's/-b=//'`
+ shift
+ continue;;
+
+ *) if [ x"$src" = x ]
+ then
+ src=$1
+ else
+ # this colon is to work around a 386BSD /bin/sh bug
+ :
+ dst=$1
+ fi
+ shift
+ continue;;
+ esac
+done
+
+if [ x"$src" = x ]
+then
+ echo "install: no input file specified"
+ exit 1
+else
+ true
+fi
+
+if [ x"$dir_arg" != x ]; then
+ dst=$src
+ src=""
+
+ if [ -d $dst ]; then
+ instcmd=:
+ else
+ instcmd=mkdir
+ fi
+else
+
+# Waiting for this to be detected by the "$instcmd $src $dsttmp" command
+# might cause directories to be created, which would be especially bad
+# if $src (and thus $dsttmp) contains '*'.
+
+ if [ -f $src -o -d $src ]
+ then
+ true
+ else
+ echo "install: $src does not exist"
+ exit 1
+ fi
+
+ if [ x"$dst" = x ]
+ then
+ echo "install: no destination specified"
+ exit 1
+ else
+ true
+ fi
+
+# If destination is a directory, append the input filename; if your system
+# does not like double slashes in filenames, you may need to add some logic
+
+ if [ -d $dst ]
+ then
+ dst="$dst"/`basename $src`
+ else
+ true
+ fi
+fi
+
+## this sed command emulates the dirname command
+dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'`
+
+# Make sure that the destination directory exists.
+# this part is taken from Noah Friedman's mkinstalldirs script
+
+# Skip lots of stat calls in the usual case.
+if [ ! -d "$dstdir" ]; then
+defaultIFS='
+'
+IFS="${IFS-${defaultIFS}}"
+
+oIFS="${IFS}"
+# Some sh's can't handle IFS=/ for some reason.
+IFS='%'
+set - `echo ${dstdir} | sed -e 's@/@%@g' -e 's@^%@/@'`
+IFS="${oIFS}"
+
+pathcomp=''
+
+while [ $# -ne 0 ] ; do
+ pathcomp="${pathcomp}${1}"
+ shift
+
+ if [ ! -d "${pathcomp}" ] ;
+ then
+ $mkdirprog "${pathcomp}"
+ else
+ true
+ fi
+
+ pathcomp="${pathcomp}/"
+done
+fi
+
+if [ x"$dir_arg" != x ]
+then
+ $doit $instcmd $dst &&
+
+ if [ x"$chowncmd" != x ]; then $doit $chowncmd $dst; else true ; fi &&
+ if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dst; else true ; fi &&
+ if [ x"$stripcmd" != x ]; then $doit $stripcmd $dst; else true ; fi &&
+ if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dst; else true ; fi
+else
+
+# If we're going to rename the final executable, determine the name now.
+
+ if [ x"$transformarg" = x ]
+ then
+ dstfile=`basename $dst`
+ else
+ dstfile=`basename $dst $transformbasename |
+ sed $transformarg`$transformbasename
+ fi
+
+# don't allow the sed command to completely eliminate the filename
+
+ if [ x"$dstfile" = x ]
+ then
+ dstfile=`basename $dst`
+ else
+ true
+ fi
+
+# Make a temp file name in the proper directory.
+
+ dsttmp=$dstdir/#inst.$$#
+
+# Move or copy the file name to the temp name
+
+ $doit $instcmd $src $dsttmp &&
+
+ trap "rm -f ${dsttmp}" 0 &&
+
+# and set any options; do chmod last to preserve setuid bits
+
+# If any of these fail, we abort the whole thing. If we want to
+# ignore errors from any of these, just make sure not to ignore
+# errors from the above "$doit $instcmd $src $dsttmp" command.
+
+ if [ x"$chowncmd" != x ]; then $doit $chowncmd $dsttmp; else true;fi &&
+ if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dsttmp; else true;fi &&
+ if [ x"$stripcmd" != x ]; then $doit $stripcmd $dsttmp; else true;fi &&
+ if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dsttmp; else true;fi &&
+
+# Now rename the file to the real destination.
+
+ $doit $rmcmd -f $dstdir/$dstfile &&
+ $doit $mvcmd $dsttmp $dstdir/$dstfile
+
+fi &&
+
+
+exit 0
diff --git a/contrib/jemalloc/config.stamp.in b/contrib/jemalloc/config.stamp.in
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/contrib/jemalloc/config.stamp.in
diff --git a/contrib/jemalloc/configure.ac b/contrib/jemalloc/configure.ac
new file mode 100644
index 000000000000..f7285a6154b9
--- /dev/null
+++ b/contrib/jemalloc/configure.ac
@@ -0,0 +1,5079 @@
+<<<<<<< HEAD
+dnl Process this file with autoconf to produce a configure script.
+AC_PREREQ(2.68)
+AC_INIT([Makefile.in])
+
+AC_CONFIG_AUX_DIR([build-aux])
+
+dnl ============================================================================
+dnl Custom macro definitions.
+
+dnl JE_CONCAT_VVV(r, a, b)
+dnl
+dnl Set $r to the concatenation of $a and $b, with a space separating them iff
+dnl both $a and $b are non-empty.
+AC_DEFUN([JE_CONCAT_VVV],
+if test "x[$]{$2}" = "x" -o "x[$]{$3}" = "x" ; then
+ $1="[$]{$2}[$]{$3}"
+else
+ $1="[$]{$2} [$]{$3}"
+fi
+)
+
+dnl JE_APPEND_VS(a, b)
+dnl
+dnl Set $a to the concatenation of $a and b, with a space separating them iff
+dnl both $a and b are non-empty.
+AC_DEFUN([JE_APPEND_VS],
+ T_APPEND_V=$2
+ JE_CONCAT_VVV($1, $1, T_APPEND_V)
+)
+
+CONFIGURE_CFLAGS=
+SPECIFIED_CFLAGS="${CFLAGS}"
+dnl JE_CFLAGS_ADD(cflag)
+dnl
+dnl CFLAGS is the concatenation of CONFIGURE_CFLAGS and SPECIFIED_CFLAGS
+dnl (ignoring EXTRA_CFLAGS, which does not impact configure tests. This macro
+dnl appends to CONFIGURE_CFLAGS and regenerates CFLAGS.
+AC_DEFUN([JE_CFLAGS_ADD],
+[
+AC_MSG_CHECKING([whether compiler supports $1])
+T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
+JE_APPEND_VS(CONFIGURE_CFLAGS, $1)
+JE_CONCAT_VVV(CFLAGS, CONFIGURE_CFLAGS, SPECIFIED_CFLAGS)
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
+[[
+]], [[
+ return 0;
+]])],
+ [je_cv_cflags_added=$1]
+ AC_MSG_RESULT([yes]),
+ [je_cv_cflags_added=]
+ AC_MSG_RESULT([no])
+ [CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"]
+)
+JE_CONCAT_VVV(CFLAGS, CONFIGURE_CFLAGS, SPECIFIED_CFLAGS)
+])
+
+dnl JE_CFLAGS_SAVE()
+dnl JE_CFLAGS_RESTORE()
+dnl
+dnl Save/restore CFLAGS. Nesting is not supported.
+AC_DEFUN([JE_CFLAGS_SAVE],
+SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
+)
+AC_DEFUN([JE_CFLAGS_RESTORE],
+CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}"
+JE_CONCAT_VVV(CFLAGS, CONFIGURE_CFLAGS, SPECIFIED_CFLAGS)
+)
+
+CONFIGURE_CXXFLAGS=
+SPECIFIED_CXXFLAGS="${CXXFLAGS}"
+dnl JE_CXXFLAGS_ADD(cxxflag)
+AC_DEFUN([JE_CXXFLAGS_ADD],
+[
+AC_MSG_CHECKING([whether compiler supports $1])
+T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}"
+JE_APPEND_VS(CONFIGURE_CXXFLAGS, $1)
+JE_CONCAT_VVV(CXXFLAGS, CONFIGURE_CXXFLAGS, SPECIFIED_CXXFLAGS)
+AC_LANG_PUSH([C++])
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
+[[
+]], [[
+ return 0;
+]])],
+ [je_cv_cxxflags_added=$1]
+ AC_MSG_RESULT([yes]),
+ [je_cv_cxxflags_added=]
+ AC_MSG_RESULT([no])
+ [CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}"]
+)
+AC_LANG_POP([C++])
+JE_CONCAT_VVV(CXXFLAGS, CONFIGURE_CXXFLAGS, SPECIFIED_CXXFLAGS)
+])
+
+dnl JE_COMPILABLE(label, hcode, mcode, rvar)
+dnl
+dnl Use AC_LINK_IFELSE() rather than AC_COMPILE_IFELSE() so that linker errors
+dnl cause failure.
+AC_DEFUN([JE_COMPILABLE],
+[
+AC_CACHE_CHECK([whether $1 is compilable],
+ [$4],
+ [AC_LINK_IFELSE([AC_LANG_PROGRAM([$2],
+ [$3])],
+ [$4=yes],
+ [$4=no])])
+])
+
+dnl ============================================================================
+
+CONFIG=`echo ${ac_configure_args} | sed -e 's#'"'"'\([^ ]*\)'"'"'#\1#g'`
+AC_SUBST([CONFIG])
+
+dnl Library revision.
+rev=2
+AC_SUBST([rev])
+
+srcroot=$srcdir
+if test "x${srcroot}" = "x." ; then
+ srcroot=""
+else
+ srcroot="${srcroot}/"
+fi
+AC_SUBST([srcroot])
+abs_srcroot="`cd \"${srcdir}\"; pwd`/"
+AC_SUBST([abs_srcroot])
+
+objroot=""
+AC_SUBST([objroot])
+abs_objroot="`pwd`/"
+AC_SUBST([abs_objroot])
+
+dnl Munge install path variables.
+case "$prefix" in
+ *\ * ) AC_MSG_ERROR([Prefix should not contain spaces]) ;;
+ "NONE" ) prefix="/usr/local" ;;
+esac
+case "$exec_prefix" in
+ *\ * ) AC_MSG_ERROR([Exec prefix should not contain spaces]) ;;
+ "NONE" ) exec_prefix=$prefix ;;
+esac
+PREFIX=$prefix
+AC_SUBST([PREFIX])
+BINDIR=`eval echo $bindir`
+BINDIR=`eval echo $BINDIR`
+AC_SUBST([BINDIR])
+INCLUDEDIR=`eval echo $includedir`
+INCLUDEDIR=`eval echo $INCLUDEDIR`
+AC_SUBST([INCLUDEDIR])
+LIBDIR=`eval echo $libdir`
+LIBDIR=`eval echo $LIBDIR`
+AC_SUBST([LIBDIR])
+DATADIR=`eval echo $datadir`
+DATADIR=`eval echo $DATADIR`
+AC_SUBST([DATADIR])
+MANDIR=`eval echo $mandir`
+MANDIR=`eval echo $MANDIR`
+AC_SUBST([MANDIR])
+
+dnl Support for building documentation.
+AC_PATH_PROG([XSLTPROC], [xsltproc], [false], [$PATH])
+if test -d "/usr/share/xml/docbook/stylesheet/docbook-xsl" ; then
+ DEFAULT_XSLROOT="/usr/share/xml/docbook/stylesheet/docbook-xsl"
+elif test -d "/usr/share/sgml/docbook/xsl-stylesheets" ; then
+ DEFAULT_XSLROOT="/usr/share/sgml/docbook/xsl-stylesheets"
+else
+ dnl Documentation building will fail if this default gets used.
+ DEFAULT_XSLROOT=""
+fi
+AC_ARG_WITH([xslroot],
+ [AS_HELP_STRING([--with-xslroot=<path>], [XSL stylesheet root path])], [
+if test "x$with_xslroot" = "xno" ; then
+ XSLROOT="${DEFAULT_XSLROOT}"
+else
+ XSLROOT="${with_xslroot}"
+fi
+],
+ XSLROOT="${DEFAULT_XSLROOT}"
+)
+if test "x$XSLTPROC" = "xfalse" ; then
+ XSLROOT=""
+fi
+AC_SUBST([XSLROOT])
+
+dnl If CFLAGS isn't defined, set CFLAGS to something reasonable. Otherwise,
+dnl just prevent autoconf from molesting CFLAGS.
+CFLAGS=$CFLAGS
+AC_PROG_CC
+
+if test "x$GCC" != "xyes" ; then
+ AC_CACHE_CHECK([whether compiler is MSVC],
+ [je_cv_msvc],
+ [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],
+ [
+#ifndef _MSC_VER
+ int fail[-1];
+#endif
+])],
+ [je_cv_msvc=yes],
+ [je_cv_msvc=no])])
+fi
+
+dnl check if a cray prgenv wrapper compiler is being used
+je_cv_cray_prgenv_wrapper=""
+if test "x${PE_ENV}" != "x" ; then
+ case "${CC}" in
+ CC|cc)
+ je_cv_cray_prgenv_wrapper="yes"
+ ;;
+ *)
+ ;;
+ esac
+fi
+
+AC_CACHE_CHECK([whether compiler is cray],
+ [je_cv_cray],
+ [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],
+ [
+#ifndef _CRAYC
+ int fail[-1];
+#endif
+])],
+ [je_cv_cray=yes],
+ [je_cv_cray=no])])
+
+if test "x${je_cv_cray}" = "xyes" ; then
+ AC_CACHE_CHECK([whether cray compiler version is 8.4],
+ [je_cv_cray_84],
+ [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],
+ [
+#if !(_RELEASE_MAJOR == 8 && _RELEASE_MINOR == 4)
+ int fail[-1];
+#endif
+])],
+ [je_cv_cray_84=yes],
+ [je_cv_cray_84=no])])
+fi
+
+if test "x$GCC" = "xyes" ; then
+ JE_CFLAGS_ADD([-std=gnu11])
+ if test "x$je_cv_cflags_added" = "x-std=gnu11" ; then
+ AC_DEFINE_UNQUOTED([JEMALLOC_HAS_RESTRICT], [ ], [ ])
+ else
+ JE_CFLAGS_ADD([-std=gnu99])
+ if test "x$je_cv_cflags_added" = "x-std=gnu99" ; then
+ AC_DEFINE_UNQUOTED([JEMALLOC_HAS_RESTRICT], [ ], [ ])
+ fi
+ fi
+ JE_CFLAGS_ADD([-Werror=unknown-warning-option])
+ JE_CFLAGS_ADD([-Wall])
+ JE_CFLAGS_ADD([-Wextra])
+ JE_CFLAGS_ADD([-Wshorten-64-to-32])
+ JE_CFLAGS_ADD([-Wsign-compare])
+ JE_CFLAGS_ADD([-Wundef])
+ JE_CFLAGS_ADD([-Wno-format-zero-length])
+ JE_CFLAGS_ADD([-Wpointer-arith])
+ dnl This warning triggers on the use of the universal zero initializer, which
+ dnl is a very handy idiom for things like the tcache static initializer (which
+ dnl has lots of nested structs). See the discussion at.
+ dnl https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53119
+ JE_CFLAGS_ADD([-Wno-missing-braces])
+ dnl This one too.
+ JE_CFLAGS_ADD([-Wno-missing-field-initializers])
+ JE_CFLAGS_ADD([-Wno-missing-attributes])
+ JE_CFLAGS_ADD([-pipe])
+ JE_CFLAGS_ADD([-g3])
+elif test "x$je_cv_msvc" = "xyes" ; then
+ CC="$CC -nologo"
+ JE_CFLAGS_ADD([-Zi])
+ JE_CFLAGS_ADD([-MT])
+ JE_CFLAGS_ADD([-W3])
+ JE_CFLAGS_ADD([-FS])
+ JE_APPEND_VS(CPPFLAGS, -I${srcdir}/include/msvc_compat)
+fi
+if test "x$je_cv_cray" = "xyes" ; then
+ dnl cray compiler 8.4 has an inlining bug
+ if test "x$je_cv_cray_84" = "xyes" ; then
+ JE_CFLAGS_ADD([-hipa2])
+ JE_CFLAGS_ADD([-hnognu])
+ fi
+ dnl ignore unreachable code warning
+ JE_CFLAGS_ADD([-hnomessage=128])
+ dnl ignore redefinition of "malloc", "free", etc warning
+ JE_CFLAGS_ADD([-hnomessage=1357])
+fi
+AC_SUBST([CONFIGURE_CFLAGS])
+AC_SUBST([SPECIFIED_CFLAGS])
+AC_SUBST([EXTRA_CFLAGS])
+AC_PROG_CPP
+
+AC_ARG_ENABLE([cxx],
+ [AS_HELP_STRING([--disable-cxx], [Disable C++ integration])],
+if test "x$enable_cxx" = "xno" ; then
+ enable_cxx="0"
+else
+ enable_cxx="1"
+fi
+,
+enable_cxx="1"
+)
+if test "x$enable_cxx" = "x1" ; then
+ dnl Require at least c++14, which is the first version to support sized
+ dnl deallocation. C++ support is not compiled otherwise.
+ m4_include([m4/ax_cxx_compile_stdcxx.m4])
+ AX_CXX_COMPILE_STDCXX([17], [noext], [optional])
+ if test "x${HAVE_CXX17}" != "x1"; then
+ AX_CXX_COMPILE_STDCXX([14], [noext], [optional])
+ fi
+ if test "x${HAVE_CXX14}" = "x1" -o "x${HAVE_CXX17}" = "x1"; then
+ JE_CXXFLAGS_ADD([-Wall])
+ JE_CXXFLAGS_ADD([-Wextra])
+ JE_CXXFLAGS_ADD([-g3])
+
+ SAVED_LIBS="${LIBS}"
+ JE_APPEND_VS(LIBS, -lstdc++)
+ JE_COMPILABLE([libstdc++ linkage], [
+#include <stdlib.h>
+], [[
+ int *arr = (int *)malloc(sizeof(int) * 42);
+ if (arr == NULL)
+ return 1;
+]], [je_cv_libstdcxx])
+ if test "x${je_cv_libstdcxx}" = "xno" ; then
+ LIBS="${SAVED_LIBS}"
+ fi
+ else
+ enable_cxx="0"
+ fi
+fi
+if test "x$enable_cxx" = "x1"; then
+ AC_DEFINE([JEMALLOC_ENABLE_CXX], [ ], [ ])
+fi
+AC_SUBST([enable_cxx])
+AC_SUBST([CONFIGURE_CXXFLAGS])
+AC_SUBST([SPECIFIED_CXXFLAGS])
+AC_SUBST([EXTRA_CXXFLAGS])
+
+AC_C_BIGENDIAN([ac_cv_big_endian=1], [ac_cv_big_endian=0])
+if test "x${ac_cv_big_endian}" = "x1" ; then
+ AC_DEFINE_UNQUOTED([JEMALLOC_BIG_ENDIAN], [ ], [ ])
+fi
+
+if test "x${je_cv_msvc}" = "xyes" -a "x${ac_cv_header_inttypes_h}" = "xno"; then
+ JE_APPEND_VS(CPPFLAGS, -I${srcdir}/include/msvc_compat/C99)
+fi
+
+if test "x${je_cv_msvc}" = "xyes" ; then
+ LG_SIZEOF_PTR=LG_SIZEOF_PTR_WIN
+ AC_MSG_RESULT([Using a predefined value for sizeof(void *): 4 for 32-bit, 8 for 64-bit])
+else
+ AC_CHECK_SIZEOF([void *])
+ if test "x${ac_cv_sizeof_void_p}" = "x8" ; then
+ LG_SIZEOF_PTR=3
+ elif test "x${ac_cv_sizeof_void_p}" = "x4" ; then
+ LG_SIZEOF_PTR=2
+ else
+ AC_MSG_ERROR([Unsupported pointer size: ${ac_cv_sizeof_void_p}])
+ fi
+fi
+AC_DEFINE_UNQUOTED([LG_SIZEOF_PTR], [$LG_SIZEOF_PTR], [ ])
+
+AC_CHECK_SIZEOF([int])
+if test "x${ac_cv_sizeof_int}" = "x8" ; then
+ LG_SIZEOF_INT=3
+elif test "x${ac_cv_sizeof_int}" = "x4" ; then
+ LG_SIZEOF_INT=2
+else
+ AC_MSG_ERROR([Unsupported int size: ${ac_cv_sizeof_int}])
+fi
+AC_DEFINE_UNQUOTED([LG_SIZEOF_INT], [$LG_SIZEOF_INT], [ ])
+
+AC_CHECK_SIZEOF([long])
+if test "x${ac_cv_sizeof_long}" = "x8" ; then
+ LG_SIZEOF_LONG=3
+elif test "x${ac_cv_sizeof_long}" = "x4" ; then
+ LG_SIZEOF_LONG=2
+else
+ AC_MSG_ERROR([Unsupported long size: ${ac_cv_sizeof_long}])
+fi
+AC_DEFINE_UNQUOTED([LG_SIZEOF_LONG], [$LG_SIZEOF_LONG], [ ])
+
+AC_CHECK_SIZEOF([long long])
+if test "x${ac_cv_sizeof_long_long}" = "x8" ; then
+ LG_SIZEOF_LONG_LONG=3
+elif test "x${ac_cv_sizeof_long_long}" = "x4" ; then
+ LG_SIZEOF_LONG_LONG=2
+else
+ AC_MSG_ERROR([Unsupported long long size: ${ac_cv_sizeof_long_long}])
+fi
+AC_DEFINE_UNQUOTED([LG_SIZEOF_LONG_LONG], [$LG_SIZEOF_LONG_LONG], [ ])
+
+AC_CHECK_SIZEOF([intmax_t])
+if test "x${ac_cv_sizeof_intmax_t}" = "x16" ; then
+ LG_SIZEOF_INTMAX_T=4
+elif test "x${ac_cv_sizeof_intmax_t}" = "x8" ; then
+ LG_SIZEOF_INTMAX_T=3
+elif test "x${ac_cv_sizeof_intmax_t}" = "x4" ; then
+ LG_SIZEOF_INTMAX_T=2
+else
+ AC_MSG_ERROR([Unsupported intmax_t size: ${ac_cv_sizeof_intmax_t}])
+fi
+AC_DEFINE_UNQUOTED([LG_SIZEOF_INTMAX_T], [$LG_SIZEOF_INTMAX_T], [ ])
+
+AC_CANONICAL_HOST
+dnl CPU-specific settings.
+CPU_SPINWAIT=""
+case "${host_cpu}" in
+ i686|x86_64)
+ HAVE_CPU_SPINWAIT=1
+ if test "x${je_cv_msvc}" = "xyes" ; then
+ AC_CACHE_VAL([je_cv_pause_msvc],
+ [JE_COMPILABLE([pause instruction MSVC], [],
+ [[_mm_pause(); return 0;]],
+ [je_cv_pause_msvc])])
+ if test "x${je_cv_pause_msvc}" = "xyes" ; then
+ CPU_SPINWAIT='_mm_pause()'
+ fi
+ else
+ AC_CACHE_VAL([je_cv_pause],
+ [JE_COMPILABLE([pause instruction], [],
+ [[__asm__ volatile("pause"); return 0;]],
+ [je_cv_pause])])
+ if test "x${je_cv_pause}" = "xyes" ; then
+ CPU_SPINWAIT='__asm__ volatile("pause")'
+ fi
+ fi
+ ;;
+ aarch64|arm*)
+ HAVE_CPU_SPINWAIT=1
+ dnl isb is a better equivalent to the pause instruction on x86.
+ AC_CACHE_VAL([je_cv_isb],
+ [JE_COMPILABLE([isb instruction], [],
+ [[__asm__ volatile("isb"); return 0;]],
+ [je_cv_isb])])
+ if test "x${je_cv_isb}" = "xyes" ; then
+ CPU_SPINWAIT='__asm__ volatile("isb")'
+ fi
+ ;;
+ *)
+ HAVE_CPU_SPINWAIT=0
+ ;;
+esac
+AC_DEFINE_UNQUOTED([HAVE_CPU_SPINWAIT], [$HAVE_CPU_SPINWAIT], [ ])
+AC_DEFINE_UNQUOTED([CPU_SPINWAIT], [$CPU_SPINWAIT], [ ])
+
+AC_ARG_WITH([lg_vaddr],
+ [AS_HELP_STRING([--with-lg-vaddr=<lg-vaddr>], [Number of significant virtual address bits])],
+ [LG_VADDR="$with_lg_vaddr"], [LG_VADDR="detect"])
+
+case "${host_cpu}" in
+ aarch64)
+ if test "x$LG_VADDR" = "xdetect"; then
+ AC_MSG_CHECKING([number of significant virtual address bits])
+ if test "x${LG_SIZEOF_PTR}" = "x2" ; then
+ #aarch64 ILP32
+ LG_VADDR=32
+ else
+ #aarch64 LP64
+ LG_VADDR=48
+ fi
+ AC_MSG_RESULT([$LG_VADDR])
+ fi
+ ;;
+ x86_64)
+ if test "x$LG_VADDR" = "xdetect"; then
+ AC_CACHE_CHECK([number of significant virtual address bits],
+ [je_cv_lg_vaddr],
+ AC_RUN_IFELSE([AC_LANG_PROGRAM(
+[[
+#include <stdio.h>
+#ifdef _WIN32
+#include <limits.h>
+#include <intrin.h>
+typedef unsigned __int32 uint32_t;
+#else
+#include <stdint.h>
+#endif
+]], [[
+ uint32_t r[[4]];
+ uint32_t eax_in = 0x80000008U;
+#ifdef _WIN32
+ __cpuid((int *)r, (int)eax_in);
+#else
+ asm volatile ("cpuid"
+ : "=a" (r[[0]]), "=b" (r[[1]]), "=c" (r[[2]]), "=d" (r[[3]])
+ : "a" (eax_in), "c" (0)
+ );
+#endif
+ uint32_t eax_out = r[[0]];
+ uint32_t vaddr = ((eax_out & 0x0000ff00U) >> 8);
+ FILE *f = fopen("conftest.out", "w");
+ if (f == NULL) {
+ return 1;
+ }
+ if (vaddr > (sizeof(void *) << 3)) {
+ vaddr = sizeof(void *) << 3;
+ }
+ fprintf(f, "%u", vaddr);
+ fclose(f);
+ return 0;
+]])],
+ [je_cv_lg_vaddr=`cat conftest.out`],
+ [je_cv_lg_vaddr=error],
+ [je_cv_lg_vaddr=57]))
+ if test "x${je_cv_lg_vaddr}" != "x" ; then
+ LG_VADDR="${je_cv_lg_vaddr}"
+ fi
+ if test "x${LG_VADDR}" != "xerror" ; then
+ AC_DEFINE_UNQUOTED([LG_VADDR], [$LG_VADDR], [ ])
+ else
+ AC_MSG_ERROR([cannot determine number of significant virtual address bits])
+ fi
+ fi
+ ;;
+ *)
+ if test "x$LG_VADDR" = "xdetect"; then
+ AC_MSG_CHECKING([number of significant virtual address bits])
+ if test "x${LG_SIZEOF_PTR}" = "x3" ; then
+ LG_VADDR=64
+ elif test "x${LG_SIZEOF_PTR}" = "x2" ; then
+ LG_VADDR=32
+ elif test "x${LG_SIZEOF_PTR}" = "xLG_SIZEOF_PTR_WIN" ; then
+ LG_VADDR="(1U << (LG_SIZEOF_PTR_WIN+3))"
+ else
+ AC_MSG_ERROR([Unsupported lg(pointer size): ${LG_SIZEOF_PTR}])
+ fi
+ AC_MSG_RESULT([$LG_VADDR])
+ fi
+ ;;
+esac
+AC_DEFINE_UNQUOTED([LG_VADDR], [$LG_VADDR], [ ])
+
+LD_PRELOAD_VAR="LD_PRELOAD"
+so="so"
+importlib="${so}"
+o="$ac_objext"
+a="a"
+exe="$ac_exeext"
+libprefix="lib"
+link_whole_archive="0"
+DSO_LDFLAGS='-shared -Wl,-soname,$(@F)'
+RPATH='-Wl,-rpath,$(1)'
+SOREV="${so}.${rev}"
+PIC_CFLAGS='-fPIC -DPIC'
+CTARGET='-o $@'
+LDTARGET='-o $@'
+TEST_LD_MODE=
+EXTRA_LDFLAGS=
+ARFLAGS='crus'
+AROUT=' $@'
+CC_MM=1
+
+if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then
+ TEST_LD_MODE='-dynamic'
+fi
+
+if test "x${je_cv_cray}" = "xyes" ; then
+ CC_MM=
+fi
+
+AN_MAKEVAR([AR], [AC_PROG_AR])
+AN_PROGRAM([ar], [AC_PROG_AR])
+AC_DEFUN([AC_PROG_AR], [AC_CHECK_TOOL(AR, ar, :)])
+AC_PROG_AR
+
+AN_MAKEVAR([NM], [AC_PROG_NM])
+AN_PROGRAM([nm], [AC_PROG_NM])
+AC_DEFUN([AC_PROG_NM], [AC_CHECK_TOOL(NM, nm, :)])
+AC_PROG_NM
+
+AC_PROG_AWK
+
+dnl ============================================================================
+dnl jemalloc version.
+dnl
+
+AC_ARG_WITH([version],
+ [AS_HELP_STRING([--with-version=<major>.<minor>.<bugfix>-<nrev>-g<gid>],
+ [Version string])],
+ [
+ echo "${with_version}" | grep ['^[0-9]\+\.[0-9]\+\.[0-9]\+-[0-9]\+-g[0-9a-f]\+$'] 2>&1 1>/dev/null
+ if test $? -eq 0 ; then
+ echo "$with_version" > "${objroot}VERSION"
+ else
+ echo "${with_version}" | grep ['^VERSION$'] 2>&1 1>/dev/null
+ if test $? -ne 0 ; then
+ AC_MSG_ERROR([${with_version} does not match <major>.<minor>.<bugfix>-<nrev>-g<gid> or VERSION])
+ fi
+ fi
+ ], [
+ dnl Set VERSION if source directory is inside a git repository.
+ if test "x`test ! \"${srcroot}\" && cd \"${srcroot}\"; git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then
+ dnl Pattern globs aren't powerful enough to match both single- and
+ dnl double-digit version numbers, so iterate over patterns to support up
+ dnl to version 99.99.99 without any accidental matches.
+ for pattern in ['[0-9].[0-9].[0-9]' '[0-9].[0-9].[0-9][0-9]' \
+ '[0-9].[0-9][0-9].[0-9]' '[0-9].[0-9][0-9].[0-9][0-9]' \
+ '[0-9][0-9].[0-9].[0-9]' '[0-9][0-9].[0-9].[0-9][0-9]' \
+ '[0-9][0-9].[0-9][0-9].[0-9]' \
+ '[0-9][0-9].[0-9][0-9].[0-9][0-9]']; do
+ (test ! "${srcroot}" && cd "${srcroot}"; git describe --long --abbrev=40 --match="${pattern}") > "${objroot}VERSION.tmp" 2>/dev/null
+ if test $? -eq 0 ; then
+ mv "${objroot}VERSION.tmp" "${objroot}VERSION"
+ break
+ fi
+ done
+ fi
+ rm -f "${objroot}VERSION.tmp"
+ ])
+
+if test ! -e "${objroot}VERSION" ; then
+ if test ! -e "${srcroot}VERSION" ; then
+ AC_MSG_RESULT(
+ [Missing VERSION file, and unable to generate it; creating bogus VERSION])
+ echo "0.0.0-0-g000000missing_version_try_git_fetch_tags" > "${objroot}VERSION"
+ else
+ cp ${srcroot}VERSION ${objroot}VERSION
+ fi
+fi
+jemalloc_version=`cat "${objroot}VERSION"`
+jemalloc_version_major=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]1}'`
+jemalloc_version_minor=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]2}'`
+jemalloc_version_bugfix=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]3}'`
+jemalloc_version_nrev=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]4}'`
+jemalloc_version_gid=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]5}'`
+AC_SUBST([jemalloc_version])
+AC_SUBST([jemalloc_version_major])
+AC_SUBST([jemalloc_version_minor])
+AC_SUBST([jemalloc_version_bugfix])
+AC_SUBST([jemalloc_version_nrev])
+AC_SUBST([jemalloc_version_gid])
+
+dnl Platform-specific settings. abi and RPATH can probably be determined
+dnl programmatically, but doing so is error-prone, which makes it generally
+dnl not worth the trouble.
+dnl
+dnl Define cpp macros in CPPFLAGS, rather than doing AC_DEFINE(macro), since the
+dnl definitions need to be seen before any headers are included, which is a pain
+dnl to make happen otherwise.
+default_retain="0"
+zero_realloc_default_free="0"
+maps_coalesce="1"
+DUMP_SYMS="${NM} -a"
+SYM_PREFIX=""
+case "${host}" in
+ *-*-darwin* | *-*-ios*)
+ abi="macho"
+ RPATH=""
+ LD_PRELOAD_VAR="DYLD_INSERT_LIBRARIES"
+ so="dylib"
+ importlib="${so}"
+ force_tls="0"
+ DSO_LDFLAGS='-shared -Wl,-install_name,$(LIBDIR)/$(@F)'
+ SOREV="${rev}.${so}"
+ sbrk_deprecated="1"
+ SYM_PREFIX="_"
+ ;;
+ *-*-freebsd*)
+ JE_APPEND_VS(CPPFLAGS, -D_BSD_SOURCE)
+ abi="elf"
+ AC_DEFINE([JEMALLOC_SYSCTL_VM_OVERCOMMIT], [ ], [ ])
+ force_lazy_lock="1"
+ ;;
+ *-*-dragonfly*)
+ abi="elf"
+ ;;
+ *-*-openbsd*)
+ abi="elf"
+ force_tls="0"
+ ;;
+ *-*-bitrig*)
+ abi="elf"
+ ;;
+ *-*-linux-android*)
+ dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE.
+ JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE)
+ abi="elf"
+ glibc="0"
+ AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS], [ ], [ ])
+ AC_DEFINE([JEMALLOC_HAS_ALLOCA_H], [ ], [ ])
+ AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ], [ ])
+ AC_DEFINE([JEMALLOC_THREADED_INIT], [ ], [ ])
+ AC_DEFINE([JEMALLOC_C11_ATOMICS], [ ], [ ])
+ force_tls="0"
+ if test "${LG_SIZEOF_PTR}" = "3"; then
+ default_retain="1"
+ fi
+ zero_realloc_default_free="1"
+ ;;
+ *-*-linux*)
+ dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE.
+ JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE)
+ abi="elf"
+ glibc="1"
+ AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS], [ ], [ ])
+ AC_DEFINE([JEMALLOC_HAS_ALLOCA_H], [ ], [ ])
+ AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ], [ ])
+ AC_DEFINE([JEMALLOC_THREADED_INIT], [ ], [ ])
+ AC_DEFINE([JEMALLOC_USE_CXX_THROW], [ ], [ ])
+ if test "${LG_SIZEOF_PTR}" = "3"; then
+ default_retain="1"
+ fi
+ zero_realloc_default_free="1"
+ ;;
+ *-*-kfreebsd*)
+ dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE.
+ JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE)
+ abi="elf"
+ AC_DEFINE([JEMALLOC_HAS_ALLOCA_H], [ ], [ ])
+ AC_DEFINE([JEMALLOC_SYSCTL_VM_OVERCOMMIT], [ ], [ ])
+ AC_DEFINE([JEMALLOC_THREADED_INIT], [ ], [ ])
+ AC_DEFINE([JEMALLOC_USE_CXX_THROW], [ ], [ ])
+ ;;
+ *-*-netbsd*)
+ AC_MSG_CHECKING([ABI])
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
+[[#ifdef __ELF__
+/* ELF */
+#else
+#error aout
+#endif
+]])],
+ [abi="elf"],
+ [abi="aout"])
+ AC_MSG_RESULT([$abi])
+ ;;
+ *-*-solaris2*)
+ abi="elf"
+ RPATH='-Wl,-R,$(1)'
+ dnl Solaris needs this for sigwait().
+ JE_APPEND_VS(CPPFLAGS, -D_POSIX_PTHREAD_SEMANTICS)
+ JE_APPEND_VS(LIBS, -lposix4 -lsocket -lnsl)
+ ;;
+ *-ibm-aix*)
+ if test "${LG_SIZEOF_PTR}" = "3"; then
+ dnl 64bit AIX
+ LD_PRELOAD_VAR="LDR_PRELOAD64"
+ else
+ dnl 32bit AIX
+ LD_PRELOAD_VAR="LDR_PRELOAD"
+ fi
+ abi="xcoff"
+ ;;
+ *-*-mingw* | *-*-cygwin*)
+ abi="pecoff"
+ force_tls="0"
+ maps_coalesce="0"
+ RPATH=""
+ so="dll"
+ if test "x$je_cv_msvc" = "xyes" ; then
+ importlib="lib"
+ DSO_LDFLAGS="-LD"
+ EXTRA_LDFLAGS="-link -DEBUG"
+ CTARGET='-Fo$@'
+ LDTARGET='-Fe$@'
+ AR='lib'
+ ARFLAGS='-nologo -out:'
+ AROUT='$@'
+ CC_MM=
+ else
+ importlib="${so}"
+ DSO_LDFLAGS="-shared"
+ link_whole_archive="1"
+ fi
+ case "${host}" in
+ *-*-cygwin*)
+ DUMP_SYMS="dumpbin /SYMBOLS"
+ ;;
+ *)
+ ;;
+ esac
+ a="lib"
+ libprefix=""
+ SOREV="${so}"
+ PIC_CFLAGS=""
+ if test "${LG_SIZEOF_PTR}" = "3"; then
+ default_retain="1"
+ fi
+ zero_realloc_default_free="1"
+ ;;
+ *-*-nto-qnx)
+ abi="elf"
+ force_tls="0"
+ AC_DEFINE([JEMALLOC_HAS_ALLOCA_H], [ ], [ ])
+ ;;
+ *)
+ AC_MSG_RESULT([Unsupported operating system: ${host}])
+ abi="elf"
+ ;;
+esac
+
+JEMALLOC_USABLE_SIZE_CONST=const
+AC_CHECK_HEADERS([malloc.h], [
+ AC_MSG_CHECKING([whether malloc_usable_size definition can use const argument])
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
+ [#include <malloc.h>
+ #include <stddef.h>
+ size_t malloc_usable_size(const void *ptr);
+ ],
+ [])],[
+ AC_MSG_RESULT([yes])
+ ],[
+ JEMALLOC_USABLE_SIZE_CONST=
+ AC_MSG_RESULT([no])
+ ])
+])
+AC_DEFINE_UNQUOTED([JEMALLOC_USABLE_SIZE_CONST], [$JEMALLOC_USABLE_SIZE_CONST], [ ])
+AC_SUBST([abi])
+AC_SUBST([RPATH])
+AC_SUBST([LD_PRELOAD_VAR])
+AC_SUBST([so])
+AC_SUBST([importlib])
+AC_SUBST([o])
+AC_SUBST([a])
+AC_SUBST([exe])
+AC_SUBST([libprefix])
+AC_SUBST([link_whole_archive])
+AC_SUBST([DSO_LDFLAGS])
+AC_SUBST([EXTRA_LDFLAGS])
+AC_SUBST([SOREV])
+AC_SUBST([PIC_CFLAGS])
+AC_SUBST([CTARGET])
+AC_SUBST([LDTARGET])
+AC_SUBST([TEST_LD_MODE])
+AC_SUBST([MKLIB])
+AC_SUBST([ARFLAGS])
+AC_SUBST([AROUT])
+AC_SUBST([DUMP_SYMS])
+AC_SUBST([CC_MM])
+
+dnl Determine whether libm must be linked to use e.g. log(3).
+AC_SEARCH_LIBS([log], [m], , [AC_MSG_ERROR([Missing math functions])])
+if test "x$ac_cv_search_log" != "xnone required" ; then
+ LM="$ac_cv_search_log"
+else
+ LM=
+fi
+AC_SUBST(LM)
+
+JE_COMPILABLE([__attribute__ syntax],
+ [static __attribute__((unused)) void foo(void){}],
+ [],
+ [je_cv_attribute])
+if test "x${je_cv_attribute}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_ATTR], [ ], [ ])
+ if test "x${GCC}" = "xyes" -a "x${abi}" = "xelf"; then
+ JE_CFLAGS_ADD([-fvisibility=hidden])
+ JE_CXXFLAGS_ADD([-fvisibility=hidden])
+ fi
+fi
+dnl Check for tls_model attribute support (clang 3.0 still lacks support).
+JE_CFLAGS_SAVE()
+JE_CFLAGS_ADD([-Werror])
+JE_CFLAGS_ADD([-herror_on_warning])
+JE_COMPILABLE([tls_model attribute], [],
+ [static __thread int
+ __attribute__((tls_model("initial-exec"), unused)) foo;
+ foo = 0;],
+ [je_cv_tls_model])
+JE_CFLAGS_RESTORE()
+dnl (Setting of JEMALLOC_TLS_MODEL is done later, after we've checked for
+dnl --disable-initial-exec-tls)
+
+dnl Check for alloc_size attribute support.
+JE_CFLAGS_SAVE()
+JE_CFLAGS_ADD([-Werror])
+JE_CFLAGS_ADD([-herror_on_warning])
+JE_COMPILABLE([alloc_size attribute], [#include <stdlib.h>],
+ [void *foo(size_t size) __attribute__((alloc_size(1)));],
+ [je_cv_alloc_size])
+JE_CFLAGS_RESTORE()
+if test "x${je_cv_alloc_size}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_ATTR_ALLOC_SIZE], [ ], [ ])
+fi
+dnl Check for format(gnu_printf, ...) attribute support.
+JE_CFLAGS_SAVE()
+JE_CFLAGS_ADD([-Werror])
+JE_CFLAGS_ADD([-herror_on_warning])
+JE_COMPILABLE([format(gnu_printf, ...) attribute], [#include <stdlib.h>],
+ [void *foo(const char *format, ...) __attribute__((format(gnu_printf, 1, 2)));],
+ [je_cv_format_gnu_printf])
+JE_CFLAGS_RESTORE()
+if test "x${je_cv_format_gnu_printf}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF], [ ], [ ])
+fi
+dnl Check for format(printf, ...) attribute support.
+JE_CFLAGS_SAVE()
+JE_CFLAGS_ADD([-Werror])
+JE_CFLAGS_ADD([-herror_on_warning])
+JE_COMPILABLE([format(printf, ...) attribute], [#include <stdlib.h>],
+ [void *foo(const char *format, ...) __attribute__((format(printf, 1, 2)));],
+ [je_cv_format_printf])
+JE_CFLAGS_RESTORE()
+if test "x${je_cv_format_printf}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_PRINTF], [ ], [ ])
+fi
+
+dnl Check for format_arg(...) attribute support.
+JE_CFLAGS_SAVE()
+JE_CFLAGS_ADD([-Werror])
+JE_CFLAGS_ADD([-herror_on_warning])
+JE_COMPILABLE([format(printf, ...) attribute], [#include <stdlib.h>],
+ [const char * __attribute__((__format_arg__(1))) foo(const char *format);],
+ [je_cv_format_arg])
+JE_CFLAGS_RESTORE()
+if test "x${je_cv_format_arg}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_ARG], [ ], [ ])
+fi
+
+dnl Check for fallthrough attribute support.
+JE_CFLAGS_SAVE()
+JE_CFLAGS_ADD([-Wimplicit-fallthrough])
+JE_COMPILABLE([fallthrough attribute],
+ [#if !__has_attribute(fallthrough)
+ #error "foo"
+ #endif],
+ [int x = 0;
+ switch (x) {
+ case 0: __attribute__((__fallthrough__));
+ case 1: return 1;
+ }],
+ [je_cv_fallthrough])
+JE_CFLAGS_RESTORE()
+if test "x${je_cv_fallthrough}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_ATTR_FALLTHROUGH], [ ], [ ])
+ JE_CFLAGS_ADD([-Wimplicit-fallthrough])
+ JE_CXXFLAGS_ADD([-Wimplicit-fallthrough])
+fi
+
+dnl Check for cold attribute support.
+JE_CFLAGS_SAVE()
+JE_CFLAGS_ADD([-Werror])
+JE_CFLAGS_ADD([-herror_on_warning])
+JE_COMPILABLE([cold attribute], [],
+ [__attribute__((__cold__)) void foo();],
+ [je_cv_cold])
+JE_CFLAGS_RESTORE()
+if test "x${je_cv_cold}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_ATTR_COLD], [ ], [ ])
+fi
+
+dnl Check for VM_MAKE_TAG for mmap support.
+JE_COMPILABLE([vm_make_tag],
+ [#include <sys/mman.h>
+ #include <mach/vm_statistics.h>],
+ [void *p;
+ p = mmap(0, 16, PROT_READ, MAP_ANON|MAP_PRIVATE, VM_MAKE_TAG(1), 0);
+ munmap(p, 16);],
+ [je_cv_vm_make_tag])
+if test "x${je_cv_vm_make_tag}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_VM_MAKE_TAG], [ ], [ ])
+fi
+
+dnl Support optional additions to rpath.
+AC_ARG_WITH([rpath],
+ [AS_HELP_STRING([--with-rpath=<rpath>], [Colon-separated rpath (ELF systems only)])],
+if test "x$with_rpath" = "xno" ; then
+ RPATH_EXTRA=
+else
+ RPATH_EXTRA="`echo $with_rpath | tr \":\" \" \"`"
+fi,
+ RPATH_EXTRA=
+)
+AC_SUBST([RPATH_EXTRA])
+
+dnl Disable rules that do automatic regeneration of configure output by default.
+AC_ARG_ENABLE([autogen],
+ [AS_HELP_STRING([--enable-autogen], [Automatically regenerate configure output])],
+if test "x$enable_autogen" = "xno" ; then
+ enable_autogen="0"
+else
+ enable_autogen="1"
+fi
+,
+enable_autogen="0"
+)
+AC_SUBST([enable_autogen])
+
+AC_PROG_INSTALL
+AC_PROG_RANLIB
+AC_PATH_PROG([LD], [ld], [false], [$PATH])
+AC_PATH_PROG([AUTOCONF], [autoconf], [false], [$PATH])
+
+dnl Enable documentation
+AC_ARG_ENABLE([doc],
+ [AS_HELP_STRING([--enable-doc], [Build documentation])],
+if test "x$enable_doc" = "xno" ; then
+ enable_doc="0"
+else
+ enable_doc="1"
+fi
+,
+enable_doc="1"
+)
+AC_SUBST([enable_doc])
+
+dnl Enable shared libs
+AC_ARG_ENABLE([shared],
+ [AS_HELP_STRING([--enable-shared], [Build shared libaries])],
+if test "x$enable_shared" = "xno" ; then
+ enable_shared="0"
+else
+ enable_shared="1"
+fi
+,
+enable_shared="1"
+)
+AC_SUBST([enable_shared])
+
+dnl Enable static libs
+AC_ARG_ENABLE([static],
+ [AS_HELP_STRING([--enable-static], [Build static libaries])],
+if test "x$enable_static" = "xno" ; then
+ enable_static="0"
+else
+ enable_static="1"
+fi
+,
+enable_static="1"
+)
+AC_SUBST([enable_static])
+
+if test "$enable_shared$enable_static" = "00" ; then
+ AC_MSG_ERROR([Please enable one of shared or static builds])
+fi
+
+dnl Perform no name mangling by default.
+AC_ARG_WITH([mangling],
+ [AS_HELP_STRING([--with-mangling=<map>], [Mangle symbols in <map>])],
+ [mangling_map="$with_mangling"], [mangling_map=""])
+
+dnl Do not prefix public APIs by default.
+AC_ARG_WITH([jemalloc_prefix],
+ [AS_HELP_STRING([--with-jemalloc-prefix=<prefix>], [Prefix to prepend to all public APIs])],
+ [JEMALLOC_PREFIX="$with_jemalloc_prefix"],
+ [if test "x$abi" != "xmacho" -a "x$abi" != "xpecoff"; then
+ JEMALLOC_PREFIX=""
+else
+ JEMALLOC_PREFIX="je_"
+fi]
+)
+if test "x$JEMALLOC_PREFIX" = "x" ; then
+ AC_DEFINE([JEMALLOC_IS_MALLOC], [ ], [ ])
+else
+ JEMALLOC_CPREFIX=`echo ${JEMALLOC_PREFIX} | tr "a-z" "A-Z"`
+ AC_DEFINE_UNQUOTED([JEMALLOC_PREFIX], ["$JEMALLOC_PREFIX"], [ ])
+ AC_DEFINE_UNQUOTED([JEMALLOC_CPREFIX], ["$JEMALLOC_CPREFIX"], [ ])
+fi
+AC_SUBST([JEMALLOC_PREFIX])
+AC_SUBST([JEMALLOC_CPREFIX])
+
+AC_ARG_WITH([export],
+ [AS_HELP_STRING([--without-export], [disable exporting jemalloc public APIs])],
+ [if test "x$with_export" = "xno"; then
+ AC_DEFINE([JEMALLOC_EXPORT],[], [ ])
+fi]
+)
+
+public_syms="aligned_alloc calloc dallocx free mallctl mallctlbymib mallctlnametomib malloc malloc_conf malloc_conf_2_conf_harder malloc_message malloc_stats_print malloc_usable_size mallocx smallocx_${jemalloc_version_gid} nallocx posix_memalign rallocx realloc sallocx sdallocx xallocx"
+dnl Check for additional platform-specific public API functions.
+AC_CHECK_FUNC([memalign],
+ [AC_DEFINE([JEMALLOC_OVERRIDE_MEMALIGN], [ ], [ ])
+ public_syms="${public_syms} memalign"])
+AC_CHECK_FUNC([valloc],
+ [AC_DEFINE([JEMALLOC_OVERRIDE_VALLOC], [ ], [ ])
+ public_syms="${public_syms} valloc"])
+AC_CHECK_FUNC([malloc_size],
+ [AC_DEFINE([JEMALLOC_HAVE_MALLOC_SIZE], [ ], [ ])
+ public_syms="${public_syms} malloc_size"])
+
+dnl Check for allocator-related functions that should be wrapped.
+wrap_syms=
+if test "x${JEMALLOC_PREFIX}" = "x" ; then
+ AC_CHECK_FUNC([__libc_calloc],
+ [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_CALLOC], [ ], [ ])
+ wrap_syms="${wrap_syms} __libc_calloc"])
+ AC_CHECK_FUNC([__libc_free],
+ [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_FREE], [ ], [ ])
+ wrap_syms="${wrap_syms} __libc_free"])
+ AC_CHECK_FUNC([__libc_malloc],
+ [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_MALLOC], [ ], [ ])
+ wrap_syms="${wrap_syms} __libc_malloc"])
+ AC_CHECK_FUNC([__libc_memalign],
+ [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_MEMALIGN], [ ], [ ])
+ wrap_syms="${wrap_syms} __libc_memalign"])
+ AC_CHECK_FUNC([__libc_realloc],
+ [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_REALLOC], [ ], [ ])
+ wrap_syms="${wrap_syms} __libc_realloc"])
+ AC_CHECK_FUNC([__libc_valloc],
+ [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_VALLOC], [ ], [ ])
+ wrap_syms="${wrap_syms} __libc_valloc"])
+ AC_CHECK_FUNC([__posix_memalign],
+ [AC_DEFINE([JEMALLOC_OVERRIDE___POSIX_MEMALIGN], [ ], [ ])
+ wrap_syms="${wrap_syms} __posix_memalign"])
+fi
+
+case "${host}" in
+ *-*-mingw* | *-*-cygwin*)
+ wrap_syms="${wrap_syms} tls_callback"
+ ;;
+ *)
+ ;;
+esac
+
+dnl Mangle library-private APIs.
+AC_ARG_WITH([private_namespace],
+ [AS_HELP_STRING([--with-private-namespace=<prefix>], [Prefix to prepend to all library-private APIs])],
+ [JEMALLOC_PRIVATE_NAMESPACE="${with_private_namespace}je_"],
+ [JEMALLOC_PRIVATE_NAMESPACE="je_"]
+)
+AC_DEFINE_UNQUOTED([JEMALLOC_PRIVATE_NAMESPACE], [$JEMALLOC_PRIVATE_NAMESPACE], [ ])
+private_namespace="$JEMALLOC_PRIVATE_NAMESPACE"
+AC_SUBST([private_namespace])
+
+dnl Do not add suffix to installed files by default.
+AC_ARG_WITH([install_suffix],
+ [AS_HELP_STRING([--with-install-suffix=<suffix>], [Suffix to append to all installed files])],
+ [case "$with_install_suffix" in
+ *\ * ) AC_MSG_ERROR([Install suffix should not contain spaces]) ;;
+ * ) INSTALL_SUFFIX="$with_install_suffix" ;;
+esac],
+ [INSTALL_SUFFIX=]
+)
+install_suffix="$INSTALL_SUFFIX"
+AC_SUBST([install_suffix])
+
+dnl Specify default malloc_conf.
+AC_ARG_WITH([malloc_conf],
+ [AS_HELP_STRING([--with-malloc-conf=<malloc_conf>], [config.malloc_conf options string])],
+ [JEMALLOC_CONFIG_MALLOC_CONF="$with_malloc_conf"],
+ [JEMALLOC_CONFIG_MALLOC_CONF=""]
+)
+config_malloc_conf="$JEMALLOC_CONFIG_MALLOC_CONF"
+AC_DEFINE_UNQUOTED([JEMALLOC_CONFIG_MALLOC_CONF], ["$config_malloc_conf"], [ ])
+
+dnl Substitute @je_@ in jemalloc_protos.h.in, primarily to make generation of
+dnl jemalloc_protos_jet.h easy.
+je_="je_"
+AC_SUBST([je_])
+
+cfgoutputs_in="Makefile.in"
+cfgoutputs_in="${cfgoutputs_in} jemalloc.pc.in"
+cfgoutputs_in="${cfgoutputs_in} doc/html.xsl.in"
+cfgoutputs_in="${cfgoutputs_in} doc/manpages.xsl.in"
+cfgoutputs_in="${cfgoutputs_in} doc/jemalloc.xml.in"
+cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_macros.h.in"
+cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_protos.h.in"
+cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_typedefs.h.in"
+cfgoutputs_in="${cfgoutputs_in} include/jemalloc/internal/jemalloc_preamble.h.in"
+cfgoutputs_in="${cfgoutputs_in} test/test.sh.in"
+cfgoutputs_in="${cfgoutputs_in} test/include/test/jemalloc_test.h.in"
+
+cfgoutputs_out="Makefile"
+cfgoutputs_out="${cfgoutputs_out} jemalloc.pc"
+cfgoutputs_out="${cfgoutputs_out} doc/html.xsl"
+cfgoutputs_out="${cfgoutputs_out} doc/manpages.xsl"
+cfgoutputs_out="${cfgoutputs_out} doc/jemalloc.xml"
+cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_macros.h"
+cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_protos.h"
+cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_typedefs.h"
+cfgoutputs_out="${cfgoutputs_out} include/jemalloc/internal/jemalloc_preamble.h"
+cfgoutputs_out="${cfgoutputs_out} test/test.sh"
+cfgoutputs_out="${cfgoutputs_out} test/include/test/jemalloc_test.h"
+
+cfgoutputs_tup="Makefile"
+cfgoutputs_tup="${cfgoutputs_tup} jemalloc.pc:jemalloc.pc.in"
+cfgoutputs_tup="${cfgoutputs_tup} doc/html.xsl:doc/html.xsl.in"
+cfgoutputs_tup="${cfgoutputs_tup} doc/manpages.xsl:doc/manpages.xsl.in"
+cfgoutputs_tup="${cfgoutputs_tup} doc/jemalloc.xml:doc/jemalloc.xml.in"
+cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_macros.h:include/jemalloc/jemalloc_macros.h.in"
+cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_protos.h:include/jemalloc/jemalloc_protos.h.in"
+cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_typedefs.h:include/jemalloc/jemalloc_typedefs.h.in"
+cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/internal/jemalloc_preamble.h"
+cfgoutputs_tup="${cfgoutputs_tup} test/test.sh:test/test.sh.in"
+cfgoutputs_tup="${cfgoutputs_tup} test/include/test/jemalloc_test.h:test/include/test/jemalloc_test.h.in"
+
+cfghdrs_in="include/jemalloc/jemalloc_defs.h.in"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/jemalloc_internal_defs.h.in"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_symbols.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_namespace.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_namespace.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_unnamespace.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_rename.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_mangle.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc.sh"
+cfghdrs_in="${cfghdrs_in} test/include/test/jemalloc_test_defs.h.in"
+
+cfghdrs_out="include/jemalloc/jemalloc_defs.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc${install_suffix}.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_symbols.awk"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_symbols_jet.awk"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_symbols.txt"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_namespace.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_unnamespace.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_protos_jet.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_rename.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle_jet.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/jemalloc_internal_defs.h"
+cfghdrs_out="${cfghdrs_out} test/include/test/jemalloc_test_defs.h"
+
+cfghdrs_tup="include/jemalloc/jemalloc_defs.h:include/jemalloc/jemalloc_defs.h.in"
+cfghdrs_tup="${cfghdrs_tup} include/jemalloc/internal/jemalloc_internal_defs.h:include/jemalloc/internal/jemalloc_internal_defs.h.in"
+cfghdrs_tup="${cfghdrs_tup} test/include/test/jemalloc_test_defs.h:test/include/test/jemalloc_test_defs.h.in"
+
+dnl ============================================================================
+dnl jemalloc build options.
+dnl
+
+dnl Do not compile with debugging by default.
+AC_ARG_ENABLE([debug],
+ [AS_HELP_STRING([--enable-debug],
+ [Build debugging code])],
+[if test "x$enable_debug" = "xno" ; then
+ enable_debug="0"
+else
+ enable_debug="1"
+fi
+],
+[enable_debug="0"]
+)
+if test "x$enable_debug" = "x1" ; then
+ AC_DEFINE([JEMALLOC_DEBUG], [ ], [ ])
+fi
+AC_SUBST([enable_debug])
+
+dnl Only optimize if not debugging.
+if test "x$enable_debug" = "x0" ; then
+ if test "x$GCC" = "xyes" ; then
+ JE_CFLAGS_ADD([-O3])
+ JE_CXXFLAGS_ADD([-O3])
+ JE_CFLAGS_ADD([-funroll-loops])
+ elif test "x$je_cv_msvc" = "xyes" ; then
+ JE_CFLAGS_ADD([-O2])
+ JE_CXXFLAGS_ADD([-O2])
+ else
+ JE_CFLAGS_ADD([-O])
+ JE_CXXFLAGS_ADD([-O])
+ fi
+fi
+
+dnl Enable statistics calculation by default.
+AC_ARG_ENABLE([stats],
+ [AS_HELP_STRING([--disable-stats],
+ [Disable statistics calculation/reporting])],
+[if test "x$enable_stats" = "xno" ; then
+ enable_stats="0"
+else
+ enable_stats="1"
+fi
+],
+[enable_stats="1"]
+)
+if test "x$enable_stats" = "x1" ; then
+ AC_DEFINE([JEMALLOC_STATS], [ ], [ ])
+fi
+AC_SUBST([enable_stats])
+
+dnl Do not enable smallocx by default.
+AC_ARG_ENABLE([experimental_smallocx],
+ [AS_HELP_STRING([--enable-experimental-smallocx], [Enable experimental smallocx API])],
+[if test "x$enable_experimental_smallocx" = "xno" ; then
+enable_experimental_smallocx="0"
+else
+enable_experimental_smallocx="1"
+fi
+],
+[enable_experimental_smallocx="0"]
+)
+if test "x$enable_experimental_smallocx" = "x1" ; then
+ AC_DEFINE([JEMALLOC_EXPERIMENTAL_SMALLOCX_API], [ ], [ ])
+fi
+AC_SUBST([enable_experimental_smallocx])
+
+dnl Do not enable profiling by default.
+AC_ARG_ENABLE([prof],
+ [AS_HELP_STRING([--enable-prof], [Enable allocation profiling])],
+[if test "x$enable_prof" = "xno" ; then
+ enable_prof="0"
+else
+ enable_prof="1"
+fi
+],
+[enable_prof="0"]
+)
+if test "x$enable_prof" = "x1" ; then
+ backtrace_method=""
+else
+ backtrace_method="N/A"
+fi
+
+AC_ARG_ENABLE([prof-libunwind],
+ [AS_HELP_STRING([--enable-prof-libunwind], [Use libunwind for backtracing])],
+[if test "x$enable_prof_libunwind" = "xno" ; then
+ enable_prof_libunwind="0"
+else
+ enable_prof_libunwind="1"
+ if test "x$enable_prof" = "x0" ; then
+ AC_MSG_ERROR([--enable-prof-libunwind should only be used with --enable-prof])
+ fi
+fi
+],
+[enable_prof_libunwind="0"]
+)
+AC_ARG_WITH([static_libunwind],
+ [AS_HELP_STRING([--with-static-libunwind=<libunwind.a>],
+ [Path to static libunwind library; use rather than dynamically linking])],
+if test "x$with_static_libunwind" = "xno" ; then
+ LUNWIND="-lunwind"
+else
+ if test ! -f "$with_static_libunwind" ; then
+ AC_MSG_ERROR([Static libunwind not found: $with_static_libunwind])
+ fi
+ LUNWIND="$with_static_libunwind"
+fi,
+ LUNWIND="-lunwind"
+)
+if test "x$backtrace_method" = "x" -a "x$enable_prof_libunwind" = "x1" ; then
+ AC_CHECK_HEADERS([libunwind.h], , [enable_prof_libunwind="0"])
+ if test "x$LUNWIND" = "x-lunwind" ; then
+ AC_CHECK_LIB([unwind], [unw_backtrace], [JE_APPEND_VS(LIBS, $LUNWIND)],
+ [enable_prof_libunwind="0"])
+ else
+ JE_APPEND_VS(LIBS, $LUNWIND)
+ fi
+ if test "x${enable_prof_libunwind}" = "x1" ; then
+ backtrace_method="libunwind"
+ AC_DEFINE([JEMALLOC_PROF_LIBUNWIND], [ ], [ ])
+ fi
+fi
+
+AC_ARG_ENABLE([prof-libgcc],
+ [AS_HELP_STRING([--disable-prof-libgcc],
+ [Do not use libgcc for backtracing])],
+[if test "x$enable_prof_libgcc" = "xno" ; then
+ enable_prof_libgcc="0"
+else
+ enable_prof_libgcc="1"
+fi
+],
+[enable_prof_libgcc="1"]
+)
+if test "x$backtrace_method" = "x" -a "x$enable_prof_libgcc" = "x1" \
+ -a "x$GCC" = "xyes" ; then
+ AC_CHECK_HEADERS([unwind.h], , [enable_prof_libgcc="0"])
+ if test "x${enable_prof_libgcc}" = "x1" ; then
+ AC_CHECK_LIB([gcc], [_Unwind_Backtrace], [JE_APPEND_VS(LIBS, -lgcc)], [enable_prof_libgcc="0"])
+ fi
+ if test "x${enable_prof_libgcc}" = "x1" ; then
+ backtrace_method="libgcc"
+ AC_DEFINE([JEMALLOC_PROF_LIBGCC], [ ], [ ])
+ fi
+else
+ enable_prof_libgcc="0"
+fi
+
+AC_ARG_ENABLE([prof-gcc],
+ [AS_HELP_STRING([--disable-prof-gcc],
+ [Do not use gcc intrinsics for backtracing])],
+[if test "x$enable_prof_gcc" = "xno" ; then
+ enable_prof_gcc="0"
+else
+ enable_prof_gcc="1"
+fi
+],
+[enable_prof_gcc="1"]
+)
+if test "x$backtrace_method" = "x" -a "x$enable_prof_gcc" = "x1" \
+ -a "x$GCC" = "xyes" ; then
+ JE_CFLAGS_ADD([-fno-omit-frame-pointer])
+ backtrace_method="gcc intrinsics"
+ AC_DEFINE([JEMALLOC_PROF_GCC], [ ], [ ])
+else
+ enable_prof_gcc="0"
+fi
+
+if test "x$backtrace_method" = "x" ; then
+ backtrace_method="none (disabling profiling)"
+ enable_prof="0"
+fi
+AC_MSG_CHECKING([configured backtracing method])
+AC_MSG_RESULT([$backtrace_method])
+if test "x$enable_prof" = "x1" ; then
+ dnl Heap profiling uses the log(3) function.
+ JE_APPEND_VS(LIBS, $LM)
+
+ AC_DEFINE([JEMALLOC_PROF], [ ], [ ])
+fi
+AC_SUBST([enable_prof])
+
+dnl Indicate whether adjacent virtual memory mappings automatically coalesce
+dnl (and fragment on demand).
+if test "x${maps_coalesce}" = "x1" ; then
+ AC_DEFINE([JEMALLOC_MAPS_COALESCE], [ ], [ ])
+fi
+
+dnl Indicate whether to retain memory (rather than using munmap()) by default.
+if test "x$default_retain" = "x1" ; then
+ AC_DEFINE([JEMALLOC_RETAIN], [ ], [ ])
+fi
+
+dnl Indicate whether realloc(ptr, 0) defaults to the "alloc" behavior.
+if test "x$zero_realloc_default_free" = "x1" ; then
+ AC_DEFINE([JEMALLOC_ZERO_REALLOC_DEFAULT_FREE], [ ], [ ])
+fi
+
+dnl Enable allocation from DSS if supported by the OS.
+have_dss="1"
+dnl Check whether the BSD/SUSv1 sbrk() exists. If not, disable DSS support.
+AC_CHECK_FUNC([sbrk], [have_sbrk="1"], [have_sbrk="0"])
+if test "x$have_sbrk" = "x1" ; then
+ if test "x$sbrk_deprecated" = "x1" ; then
+ AC_MSG_RESULT([Disabling dss allocation because sbrk is deprecated])
+ have_dss="0"
+ fi
+else
+ have_dss="0"
+fi
+
+if test "x$have_dss" = "x1" ; then
+ AC_DEFINE([JEMALLOC_DSS], [ ], [ ])
+fi
+
+dnl Support the junk/zero filling option by default.
+AC_ARG_ENABLE([fill],
+ [AS_HELP_STRING([--disable-fill], [Disable support for junk/zero filling])],
+[if test "x$enable_fill" = "xno" ; then
+ enable_fill="0"
+else
+ enable_fill="1"
+fi
+],
+[enable_fill="1"]
+)
+if test "x$enable_fill" = "x1" ; then
+ AC_DEFINE([JEMALLOC_FILL], [ ], [ ])
+fi
+AC_SUBST([enable_fill])
+
+dnl Disable utrace(2)-based tracing by default.
+AC_ARG_ENABLE([utrace],
+ [AS_HELP_STRING([--enable-utrace], [Enable utrace(2)-based tracing])],
+[if test "x$enable_utrace" = "xno" ; then
+ enable_utrace="0"
+else
+ enable_utrace="1"
+fi
+],
+[enable_utrace="0"]
+)
+JE_COMPILABLE([utrace(2)], [
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/time.h>
+#include <sys/uio.h>
+#include <sys/ktrace.h>
+], [
+ utrace((void *)0, 0);
+], [je_cv_utrace])
+if test "x${je_cv_utrace}" = "xno" ; then
+ JE_COMPILABLE([utrace(2) with label], [
+ #include <sys/types.h>
+ #include <sys/param.h>
+ #include <sys/time.h>
+ #include <sys/uio.h>
+ #include <sys/ktrace.h>
+ ], [
+ utrace((void *)0, (void *)0, 0);
+ ], [je_cv_utrace_label])
+ if test "x${je_cv_utrace_label}" = "xno"; then
+ enable_utrace="0"
+ fi
+ if test "x$enable_utrace" = "x1" ; then
+ AC_DEFINE([JEMALLOC_UTRACE_LABEL], [ ], [ ])
+ fi
+else
+ if test "x$enable_utrace" = "x1" ; then
+ AC_DEFINE([JEMALLOC_UTRACE], [ ], [ ])
+ fi
+fi
+AC_SUBST([enable_utrace])
+
+dnl Do not support the xmalloc option by default.
+AC_ARG_ENABLE([xmalloc],
+ [AS_HELP_STRING([--enable-xmalloc], [Support xmalloc option])],
+[if test "x$enable_xmalloc" = "xno" ; then
+ enable_xmalloc="0"
+else
+ enable_xmalloc="1"
+fi
+],
+[enable_xmalloc="0"]
+)
+if test "x$enable_xmalloc" = "x1" ; then
+ AC_DEFINE([JEMALLOC_XMALLOC], [ ], [ ])
+fi
+AC_SUBST([enable_xmalloc])
+
+dnl Support cache-oblivious allocation alignment by default.
+AC_ARG_ENABLE([cache-oblivious],
+ [AS_HELP_STRING([--disable-cache-oblivious],
+ [Disable support for cache-oblivious allocation alignment])],
+[if test "x$enable_cache_oblivious" = "xno" ; then
+ enable_cache_oblivious="0"
+else
+ enable_cache_oblivious="1"
+fi
+],
+[enable_cache_oblivious="1"]
+)
+if test "x$enable_cache_oblivious" = "x1" ; then
+ AC_DEFINE([JEMALLOC_CACHE_OBLIVIOUS], [ ], [ ])
+fi
+AC_SUBST([enable_cache_oblivious])
+
+dnl Do not log by default.
+AC_ARG_ENABLE([log],
+ [AS_HELP_STRING([--enable-log], [Support debug logging])],
+[if test "x$enable_log" = "xno" ; then
+ enable_log="0"
+else
+ enable_log="1"
+fi
+],
+[enable_log="0"]
+)
+if test "x$enable_log" = "x1" ; then
+ AC_DEFINE([JEMALLOC_LOG], [ ], [ ])
+fi
+AC_SUBST([enable_log])
+
+dnl Do not use readlinkat by default
+AC_ARG_ENABLE([readlinkat],
+ [AS_HELP_STRING([--enable-readlinkat], [Use readlinkat over readlink])],
+[if test "x$enable_readlinkat" = "xno" ; then
+ enable_readlinkat="0"
+else
+ enable_readlinkat="1"
+fi
+],
+[enable_readlinkat="0"]
+)
+if test "x$enable_readlinkat" = "x1" ; then
+ AC_DEFINE([JEMALLOC_READLINKAT], [ ], [ ])
+fi
+AC_SUBST([enable_readlinkat])
+
+dnl Avoid extra safety checks by default
+AC_ARG_ENABLE([opt-safety-checks],
+ [AS_HELP_STRING([--enable-opt-safety-checks],
+ [Perform certain low-overhead checks, even in opt mode])],
+[if test "x$enable_opt_safety_checks" = "xno" ; then
+ enable_opt_safety_checks="0"
+else
+ enable_opt_safety_checks="1"
+fi
+],
+[enable_opt_safety_checks="0"]
+)
+if test "x$enable_opt_safety_checks" = "x1" ; then
+ AC_DEFINE([JEMALLOC_OPT_SAFETY_CHECKS], [ ], [ ])
+fi
+AC_SUBST([enable_opt_safety_checks])
+
+dnl Look for sized-deallocation bugs while otherwise being in opt mode.
+AC_ARG_ENABLE([opt-size-checks],
+ [AS_HELP_STRING([--enable-opt-size-checks],
+ [Perform sized-deallocation argument checks, even in opt mode])],
+[if test "x$enable_opt_size_checks" = "xno" ; then
+ enable_opt_size_checks="0"
+else
+ enable_opt_size_checks="1"
+fi
+],
+[enable_opt_size_checks="0"]
+)
+if test "x$enable_opt_size_checks" = "x1" ; then
+ AC_DEFINE([JEMALLOC_OPT_SIZE_CHECKS], [ ], [ ])
+fi
+AC_SUBST([enable_opt_size_checks])
+
+dnl Do not check for use-after-free by default.
+AC_ARG_ENABLE([uaf-detection],
+ [AS_HELP_STRING([--enable-uaf-detection],
+ [Allow sampled junk-filling on deallocation to detect use-after-free])],
+[if test "x$enable_uaf_detection" = "xno" ; then
+ enable_uaf_detection="0"
+else
+ enable_uaf_detection="1"
+fi
+],
+[enable_uaf_detection="0"]
+)
+if test "x$enable_uaf_detection" = "x1" ; then
+ AC_DEFINE([JEMALLOC_UAF_DETECTION], [ ])
+fi
+AC_SUBST([enable_uaf_detection])
+
+JE_COMPILABLE([a program using __builtin_unreachable], [
+void foo (void) {
+ __builtin_unreachable();
+}
+], [
+ {
+ foo();
+ }
+], [je_cv_gcc_builtin_unreachable])
+if test "x${je_cv_gcc_builtin_unreachable}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_INTERNAL_UNREACHABLE], [__builtin_unreachable], [ ])
+else
+ AC_DEFINE([JEMALLOC_INTERNAL_UNREACHABLE], [abort], [ ])
+fi
+
+dnl ============================================================================
+dnl Check for __builtin_ffsl(), then ffsl(3), and fail if neither are found.
+dnl One of those two functions should (theoretically) exist on all platforms
+dnl that jemalloc currently has a chance of functioning on without modification.
+dnl We additionally assume ffs[ll]() or __builtin_ffs[ll]() are defined if
+dnl ffsl() or __builtin_ffsl() are defined, respectively.
+JE_COMPILABLE([a program using __builtin_ffsl], [
+#include <stdio.h>
+#include <strings.h>
+#include <string.h>
+], [
+ {
+ int rv = __builtin_ffsl(0x08);
+ printf("%d\n", rv);
+ }
+], [je_cv_gcc_builtin_ffsl])
+if test "x${je_cv_gcc_builtin_ffsl}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_INTERNAL_FFSLL], [__builtin_ffsll], [ ])
+ AC_DEFINE([JEMALLOC_INTERNAL_FFSL], [__builtin_ffsl], [ ])
+ AC_DEFINE([JEMALLOC_INTERNAL_FFS], [__builtin_ffs], [ ])
+else
+ JE_COMPILABLE([a program using ffsl], [
+ #include <stdio.h>
+ #include <strings.h>
+ #include <string.h>
+ ], [
+ {
+ int rv = ffsl(0x08);
+ printf("%d\n", rv);
+ }
+ ], [je_cv_function_ffsl])
+ if test "x${je_cv_function_ffsl}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_INTERNAL_FFSLL], [ffsll], [ ])
+ AC_DEFINE([JEMALLOC_INTERNAL_FFSL], [ffsl], [ ])
+ AC_DEFINE([JEMALLOC_INTERNAL_FFS], [ffs], [ ])
+ else
+ AC_MSG_ERROR([Cannot build without ffsl(3) or __builtin_ffsl()])
+ fi
+fi
+
+JE_COMPILABLE([a program using __builtin_popcountl], [
+#include <stdio.h>
+#include <strings.h>
+#include <string.h>
+], [
+ {
+ int rv = __builtin_popcountl(0x08);
+ printf("%d\n", rv);
+ }
+], [je_cv_gcc_builtin_popcountl])
+if test "x${je_cv_gcc_builtin_popcountl}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_INTERNAL_POPCOUNT], [__builtin_popcount], [ ])
+ AC_DEFINE([JEMALLOC_INTERNAL_POPCOUNTL], [__builtin_popcountl], [ ])
+ AC_DEFINE([JEMALLOC_INTERNAL_POPCOUNTLL], [__builtin_popcountll], [ ])
+fi
+
+AC_ARG_WITH([lg_quantum],
+ [AS_HELP_STRING([--with-lg-quantum=<lg-quantum>],
+ [Base 2 log of minimum allocation alignment])])
+if test "x$with_lg_quantum" != "x" ; then
+ AC_DEFINE_UNQUOTED([LG_QUANTUM], [$with_lg_quantum], [ ])
+fi
+
+AC_ARG_WITH([lg_slab_maxregs],
+ [AS_HELP_STRING([--with-lg-slab-maxregs=<lg-slab-maxregs>],
+ [Base 2 log of maximum number of regions in a slab (used with malloc_conf slab_sizes)])],
+ [CONFIG_LG_SLAB_MAXREGS="with_lg_slab_maxregs"],
+ [CONFIG_LG_SLAB_MAXREGS=""])
+if test "x$with_lg_slab_maxregs" != "x" ; then
+ AC_DEFINE_UNQUOTED([CONFIG_LG_SLAB_MAXREGS], [$with_lg_slab_maxregs], [ ])
+fi
+
+AC_ARG_WITH([lg_page],
+ [AS_HELP_STRING([--with-lg-page=<lg-page>], [Base 2 log of system page size])],
+ [LG_PAGE="$with_lg_page"], [LG_PAGE="detect"])
+case "${host}" in
+ aarch64-apple-darwin*)
+ dnl When cross-compile for Apple M1 and no page size specified, use the
+ dnl default and skip detecting the page size (which is likely incorrect).
+ if test "x${host}" != "x${build}" -a "x$LG_PAGE" = "xdetect"; then
+ LG_PAGE=14
+ fi
+ ;;
+esac
+if test "x$LG_PAGE" = "xdetect"; then
+ AC_CACHE_CHECK([LG_PAGE],
+ [je_cv_lg_page],
+ AC_RUN_IFELSE([AC_LANG_PROGRAM(
+[[
+#include <strings.h>
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <unistd.h>
+#endif
+#include <stdio.h>
+]],
+[[
+ int result;
+ FILE *f;
+
+#ifdef _WIN32
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ result = si.dwPageSize;
+#else
+ result = sysconf(_SC_PAGESIZE);
+#endif
+ if (result == -1) {
+ return 1;
+ }
+ result = JEMALLOC_INTERNAL_FFSL(result) - 1;
+
+ f = fopen("conftest.out", "w");
+ if (f == NULL) {
+ return 1;
+ }
+ fprintf(f, "%d", result);
+ fclose(f);
+
+ return 0;
+]])],
+ [je_cv_lg_page=`cat conftest.out`],
+ [je_cv_lg_page=undefined],
+ [je_cv_lg_page=12]))
+fi
+if test "x${je_cv_lg_page}" != "x" ; then
+ LG_PAGE="${je_cv_lg_page}"
+fi
+if test "x${LG_PAGE}" != "xundefined" ; then
+ AC_DEFINE_UNQUOTED([LG_PAGE], [$LG_PAGE], [ ])
+else
+ AC_MSG_ERROR([cannot determine value for LG_PAGE])
+fi
+
+AC_ARG_WITH([lg_hugepage],
+ [AS_HELP_STRING([--with-lg-hugepage=<lg-hugepage>],
+ [Base 2 log of system huge page size])],
+ [je_cv_lg_hugepage="${with_lg_hugepage}"],
+ [je_cv_lg_hugepage=""])
+if test "x${je_cv_lg_hugepage}" = "x" ; then
+ dnl Look in /proc/meminfo (Linux-specific) for information on the default huge
+ dnl page size, if any. The relevant line looks like:
+ dnl
+ dnl Hugepagesize: 2048 kB
+ if test -e "/proc/meminfo" ; then
+ hpsk=[`cat /proc/meminfo 2>/dev/null | \
+ grep -e '^Hugepagesize:[[:space:]]\+[0-9]\+[[:space:]]kB$' | \
+ awk '{print $2}'`]
+ if test "x${hpsk}" != "x" ; then
+ je_cv_lg_hugepage=10
+ while test "${hpsk}" -gt 1 ; do
+ hpsk="$((hpsk / 2))"
+ je_cv_lg_hugepage="$((je_cv_lg_hugepage + 1))"
+ done
+ fi
+ fi
+
+ dnl Set default if unable to automatically configure.
+ if test "x${je_cv_lg_hugepage}" = "x" ; then
+ je_cv_lg_hugepage=21
+ fi
+fi
+if test "x${LG_PAGE}" != "xundefined" -a \
+ "${je_cv_lg_hugepage}" -lt "${LG_PAGE}" ; then
+ AC_MSG_ERROR([Huge page size (2^${je_cv_lg_hugepage}) must be at least page size (2^${LG_PAGE})])
+fi
+AC_DEFINE_UNQUOTED([LG_HUGEPAGE], [${je_cv_lg_hugepage}], [ ])
+
+dnl ============================================================================
+dnl Enable libdl by default.
+AC_ARG_ENABLE([libdl],
+ [AS_HELP_STRING([--disable-libdl],
+ [Do not use libdl])],
+[if test "x$enable_libdl" = "xno" ; then
+ enable_libdl="0"
+else
+ enable_libdl="1"
+fi
+],
+[enable_libdl="1"]
+)
+AC_SUBST([libdl])
+
+dnl ============================================================================
+dnl Configure pthreads.
+
+if test "x$abi" != "xpecoff" ; then
+ AC_DEFINE([JEMALLOC_HAVE_PTHREAD], [ ], [ ])
+ AC_CHECK_HEADERS([pthread.h], , [AC_MSG_ERROR([pthread.h is missing])])
+ dnl Some systems may embed pthreads functionality in libc; check for libpthread
+ dnl first, but try libc too before failing.
+ AC_CHECK_LIB([pthread], [pthread_create], [JE_APPEND_VS(LIBS, -pthread)],
+ [AC_SEARCH_LIBS([pthread_create], , ,
+ AC_MSG_ERROR([libpthread is missing]))])
+ wrap_syms="${wrap_syms} pthread_create"
+ have_pthread="1"
+
+dnl Check if we have dlsym support.
+ if test "x$enable_libdl" = "x1" ; then
+ have_dlsym="1"
+ AC_CHECK_HEADERS([dlfcn.h],
+ AC_CHECK_FUNC([dlsym], [],
+ [AC_CHECK_LIB([dl], [dlsym], [LIBS="$LIBS -ldl"], [have_dlsym="0"])]),
+ [have_dlsym="0"])
+ if test "x$have_dlsym" = "x1" ; then
+ AC_DEFINE([JEMALLOC_HAVE_DLSYM], [ ], [ ])
+ fi
+ else
+ have_dlsym="0"
+ fi
+
+ JE_COMPILABLE([pthread_atfork(3)], [
+#include <pthread.h>
+], [
+ pthread_atfork((void *)0, (void *)0, (void *)0);
+], [je_cv_pthread_atfork])
+ if test "x${je_cv_pthread_atfork}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_PTHREAD_ATFORK], [ ], [ ])
+ fi
+ dnl Check if pthread_setname_np is available with the expected API.
+ JE_COMPILABLE([pthread_setname_np(3)], [
+#include <pthread.h>
+], [
+ pthread_setname_np(pthread_self(), "setname_test");
+], [je_cv_pthread_setname_np])
+ if test "x${je_cv_pthread_setname_np}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_PTHREAD_SETNAME_NP], [ ], [ ])
+ fi
+ dnl Check if pthread_getname_np is not necessarily present despite
+ dnl the pthread_setname_np counterpart
+ JE_COMPILABLE([pthread_getname_np(3)], [
+#include <pthread.h>
+#include <stdlib.h>
+], [
+ {
+ char *name = malloc(16);
+ pthread_getname_np(pthread_self(), name, 16);
+ free(name);
+ }
+], [je_cv_pthread_getname_np])
+ if test "x${je_cv_pthread_getname_np}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_PTHREAD_GETNAME_NP], [ ], [ ])
+ fi
+ dnl Check if pthread_get_name_np is not necessarily present despite
+ dnl the pthread_set_name_np counterpart
+ JE_COMPILABLE([pthread_get_name_np(3)], [
+#include <pthread.h>
+#include <pthread_np.h>
+#include <stdlib.h>
+], [
+ {
+ char *name = malloc(16);
+ pthread_get_name_np(pthread_self(), name, 16);
+ free(name);
+ }
+], [je_cv_pthread_get_name_np])
+ if test "x${je_cv_pthread_get_name_np}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_PTHREAD_GET_NAME_NP], [ ], [ ])
+ fi
+fi
+
+JE_APPEND_VS(CPPFLAGS, -D_REENTRANT)
+
+dnl Check whether clock_gettime(2) is in libc or librt.
+AC_SEARCH_LIBS([clock_gettime], [rt])
+
+dnl Cray wrapper compiler often adds `-lrt` when using `-static`. Check with
+dnl `-dynamic` as well in case a user tries to dynamically link in jemalloc
+if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then
+ if test "$ac_cv_search_clock_gettime" != "-lrt"; then
+ JE_CFLAGS_SAVE()
+
+ unset ac_cv_search_clock_gettime
+ JE_CFLAGS_ADD([-dynamic])
+ AC_SEARCH_LIBS([clock_gettime], [rt])
+
+ JE_CFLAGS_RESTORE()
+ fi
+fi
+
+dnl check for CLOCK_MONOTONIC_COARSE (Linux-specific).
+JE_COMPILABLE([clock_gettime(CLOCK_MONOTONIC_COARSE, ...)], [
+#include <time.h>
+], [
+ struct timespec ts;
+
+ clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
+], [je_cv_clock_monotonic_coarse])
+if test "x${je_cv_clock_monotonic_coarse}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE], [ ], [ ])
+fi
+
+dnl check for CLOCK_MONOTONIC.
+JE_COMPILABLE([clock_gettime(CLOCK_MONOTONIC, ...)], [
+#include <unistd.h>
+#include <time.h>
+], [
+ struct timespec ts;
+
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+#if !defined(_POSIX_MONOTONIC_CLOCK) || _POSIX_MONOTONIC_CLOCK < 0
+# error _POSIX_MONOTONIC_CLOCK missing/invalid
+#endif
+], [je_cv_clock_monotonic])
+if test "x${je_cv_clock_monotonic}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_CLOCK_MONOTONIC], [ ], [ ])
+fi
+
+dnl Check for mach_absolute_time().
+JE_COMPILABLE([mach_absolute_time()], [
+#include <mach/mach_time.h>
+], [
+ mach_absolute_time();
+], [je_cv_mach_absolute_time])
+if test "x${je_cv_mach_absolute_time}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_MACH_ABSOLUTE_TIME], [ ], [ ])
+fi
+
+dnl check for CLOCK_REALTIME (always should be available on Linux)
+JE_COMPILABLE([clock_gettime(CLOCK_REALTIME, ...)], [
+#include <time.h>
+], [
+ struct timespec ts;
+
+ clock_gettime(CLOCK_REALTIME, &ts);
+], [je_cv_clock_realtime])
+if test "x${je_cv_clock_realtime}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_CLOCK_REALTIME], [ ], [ ])
+fi
+
+dnl Use syscall(2) (if available) by default.
+AC_ARG_ENABLE([syscall],
+ [AS_HELP_STRING([--disable-syscall], [Disable use of syscall(2)])],
+[if test "x$enable_syscall" = "xno" ; then
+ enable_syscall="0"
+else
+ enable_syscall="1"
+fi
+],
+[enable_syscall="1"]
+)
+if test "x$enable_syscall" = "x1" ; then
+ dnl Check if syscall(2) is usable. Treat warnings as errors, so that e.g. OS
+ dnl X 10.12's deprecation warning prevents use.
+ JE_CFLAGS_SAVE()
+ JE_CFLAGS_ADD([-Werror])
+ JE_COMPILABLE([syscall(2)], [
+#include <sys/syscall.h>
+#include <unistd.h>
+], [
+ syscall(SYS_write, 2, "hello", 5);
+],
+ [je_cv_syscall])
+ JE_CFLAGS_RESTORE()
+ if test "x$je_cv_syscall" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_USE_SYSCALL], [ ], [ ])
+ fi
+fi
+
+dnl Check if the GNU-specific secure_getenv function exists.
+AC_CHECK_FUNC([secure_getenv],
+ [have_secure_getenv="1"],
+ [have_secure_getenv="0"]
+ )
+if test "x$have_secure_getenv" = "x1" ; then
+ AC_DEFINE([JEMALLOC_HAVE_SECURE_GETENV], [ ], [ ])
+fi
+
+dnl Check if the GNU-specific sched_getcpu function exists.
+AC_CHECK_FUNC([sched_getcpu],
+ [have_sched_getcpu="1"],
+ [have_sched_getcpu="0"]
+ )
+if test "x$have_sched_getcpu" = "x1" ; then
+ AC_DEFINE([JEMALLOC_HAVE_SCHED_GETCPU], [ ], [ ])
+fi
+
+dnl Check if the GNU-specific sched_setaffinity function exists.
+AC_CHECK_FUNC([sched_setaffinity],
+ [have_sched_setaffinity="1"],
+ [have_sched_setaffinity="0"]
+ )
+if test "x$have_sched_setaffinity" = "x1" ; then
+ AC_DEFINE([JEMALLOC_HAVE_SCHED_SETAFFINITY], [ ], [ ])
+fi
+
+dnl Check if the Solaris/BSD issetugid function exists.
+AC_CHECK_FUNC([issetugid],
+ [have_issetugid="1"],
+ [have_issetugid="0"]
+ )
+if test "x$have_issetugid" = "x1" ; then
+ AC_DEFINE([JEMALLOC_HAVE_ISSETUGID], [ ], [ ])
+fi
+
+dnl Check whether the BSD-specific _malloc_thread_cleanup() exists. If so, use
+dnl it rather than pthreads TSD cleanup functions to support cleanup during
+dnl thread exit, in order to avoid pthreads library recursion during
+dnl bootstrapping.
+AC_CHECK_FUNC([_malloc_thread_cleanup],
+ [have__malloc_thread_cleanup="1"],
+ [have__malloc_thread_cleanup="0"]
+ )
+if test "x$have__malloc_thread_cleanup" = "x1" ; then
+ AC_DEFINE([JEMALLOC_MALLOC_THREAD_CLEANUP], [ ], [ ])
+ wrap_syms="${wrap_syms} _malloc_thread_cleanup _malloc_tsd_cleanup_register"
+ force_tls="1"
+fi
+
+dnl Check whether the BSD-specific _pthread_mutex_init_calloc_cb() exists. If
+dnl so, mutex initialization causes allocation, and we need to implement this
+dnl callback function in order to prevent recursive allocation.
+AC_CHECK_FUNC([_pthread_mutex_init_calloc_cb],
+ [have__pthread_mutex_init_calloc_cb="1"],
+ [have__pthread_mutex_init_calloc_cb="0"]
+ )
+if test "x$have__pthread_mutex_init_calloc_cb" = "x1" ; then
+ AC_DEFINE([JEMALLOC_MUTEX_INIT_CB], [ ], [ ])
+ wrap_syms="${wrap_syms} _malloc_prefork _malloc_postfork"
+fi
+
+AC_CHECK_FUNC([memcntl],
+ [have_memcntl="1"],
+ [have_memcntl="0"],
+ )
+if test "x$have_memcntl" = "x1" ; then
+ AC_DEFINE([JEMALLOC_HAVE_MEMCNTL], [ ], [ ])
+fi
+
+dnl Disable lazy locking by default.
+AC_ARG_ENABLE([lazy_lock],
+ [AS_HELP_STRING([--enable-lazy-lock],
+ [Enable lazy locking (only lock when multi-threaded)])],
+[if test "x$enable_lazy_lock" = "xno" ; then
+ enable_lazy_lock="0"
+else
+ enable_lazy_lock="1"
+fi
+],
+[enable_lazy_lock=""]
+)
+if test "x${enable_lazy_lock}" = "x" ; then
+ if test "x${force_lazy_lock}" = "x1" ; then
+ AC_MSG_RESULT([Forcing lazy-lock to avoid allocator/threading bootstrap issues])
+ enable_lazy_lock="1"
+ else
+ enable_lazy_lock="0"
+ fi
+fi
+if test "x${enable_lazy_lock}" = "x1" -a "x${abi}" = "xpecoff" ; then
+ AC_MSG_RESULT([Forcing no lazy-lock because thread creation monitoring is unimplemented])
+ enable_lazy_lock="0"
+fi
+if test "x$enable_lazy_lock" = "x1" ; then
+ if test "x$have_dlsym" = "x1" ; then
+ AC_DEFINE([JEMALLOC_LAZY_LOCK], [ ], [ ])
+ else
+ AC_MSG_ERROR([Missing dlsym support: lazy-lock cannot be enabled.])
+ fi
+fi
+AC_SUBST([enable_lazy_lock])
+
+dnl Automatically configure TLS.
+if test "x${force_tls}" = "x1" ; then
+ enable_tls="1"
+elif test "x${force_tls}" = "x0" ; then
+ enable_tls="0"
+else
+ enable_tls="1"
+fi
+if test "x${enable_tls}" = "x1" ; then
+AC_MSG_CHECKING([for TLS])
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
+[[
+ __thread int x;
+]], [[
+ x = 42;
+
+ return 0;
+]])],
+ AC_MSG_RESULT([yes]),
+ AC_MSG_RESULT([no])
+ enable_tls="0")
+else
+ enable_tls="0"
+fi
+AC_SUBST([enable_tls])
+if test "x${enable_tls}" = "x1" ; then
+ AC_DEFINE_UNQUOTED([JEMALLOC_TLS], [ ], [ ])
+fi
+
+dnl ============================================================================
+dnl Check for C11 atomics.
+
+JE_COMPILABLE([C11 atomics], [
+#include <stdint.h>
+#if (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__)
+#include <stdatomic.h>
+#else
+#error Atomics not available
+#endif
+], [
+ uint64_t *p = (uint64_t *)0;
+ uint64_t x = 1;
+ volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
+ uint64_t r = atomic_fetch_add(a, x) + x;
+ return r == 0;
+], [je_cv_c11_atomics])
+if test "x${je_cv_c11_atomics}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_C11_ATOMICS], [ ], [ ])
+fi
+
+dnl ============================================================================
+dnl Check for GCC-style __atomic atomics.
+
+JE_COMPILABLE([GCC __atomic atomics], [
+], [
+ int x = 0;
+ int val = 1;
+ int y = __atomic_fetch_add(&x, val, __ATOMIC_RELAXED);
+ int after_add = x;
+ return after_add == 1;
+], [je_cv_gcc_atomic_atomics])
+if test "x${je_cv_gcc_atomic_atomics}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_GCC_ATOMIC_ATOMICS], [ ], [ ])
+
+ dnl check for 8-bit atomic support
+ JE_COMPILABLE([GCC 8-bit __atomic atomics], [
+ ], [
+ unsigned char x = 0;
+ int val = 1;
+ int y = __atomic_fetch_add(&x, val, __ATOMIC_RELAXED);
+ int after_add = (int)x;
+ return after_add == 1;
+ ], [je_cv_gcc_u8_atomic_atomics])
+ if test "x${je_cv_gcc_u8_atomic_atomics}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_GCC_U8_ATOMIC_ATOMICS], [ ], [ ])
+ fi
+fi
+
+dnl ============================================================================
+dnl Check for GCC-style __sync atomics.
+
+JE_COMPILABLE([GCC __sync atomics], [
+], [
+ int x = 0;
+ int before_add = __sync_fetch_and_add(&x, 1);
+ int after_add = x;
+ return (before_add == 0) && (after_add == 1);
+], [je_cv_gcc_sync_atomics])
+if test "x${je_cv_gcc_sync_atomics}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_GCC_SYNC_ATOMICS], [ ], [ ])
+
+ dnl check for 8-bit atomic support
+ JE_COMPILABLE([GCC 8-bit __sync atomics], [
+ ], [
+ unsigned char x = 0;
+ int before_add = __sync_fetch_and_add(&x, 1);
+ int after_add = (int)x;
+ return (before_add == 0) && (after_add == 1);
+ ], [je_cv_gcc_u8_sync_atomics])
+ if test "x${je_cv_gcc_u8_sync_atomics}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_GCC_U8_SYNC_ATOMICS], [ ], [ ])
+ fi
+fi
+
+dnl ============================================================================
+dnl Check for atomic(3) operations as provided on Darwin.
+dnl We need this not for the atomic operations (which are provided above), but
+dnl rather for the OS_unfair_lock type it exposes.
+
+JE_COMPILABLE([Darwin OSAtomic*()], [
+#include <libkern/OSAtomic.h>
+#include <inttypes.h>
+], [
+ {
+ int32_t x32 = 0;
+ volatile int32_t *x32p = &x32;
+ OSAtomicAdd32(1, x32p);
+ }
+ {
+ int64_t x64 = 0;
+ volatile int64_t *x64p = &x64;
+ OSAtomicAdd64(1, x64p);
+ }
+], [je_cv_osatomic])
+if test "x${je_cv_osatomic}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_OSATOMIC], [ ], [ ])
+fi
+
+dnl ============================================================================
+dnl Check for madvise(2).
+
+JE_COMPILABLE([madvise(2)], [
+#include <sys/mman.h>
+], [
+ madvise((void *)0, 0, 0);
+], [je_cv_madvise])
+if test "x${je_cv_madvise}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_MADVISE], [ ], [ ])
+
+ dnl Check for madvise(..., MADV_FREE).
+ JE_COMPILABLE([madvise(..., MADV_FREE)], [
+#include <sys/mman.h>
+], [
+ madvise((void *)0, 0, MADV_FREE);
+], [je_cv_madv_free])
+ if test "x${je_cv_madv_free}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ], [ ])
+ elif test "x${je_cv_madvise}" = "xyes" ; then
+ case "${host_cpu}" in i686|x86_64)
+ case "${host}" in *-*-linux*)
+ AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ], [ ])
+ AC_DEFINE([JEMALLOC_DEFINE_MADVISE_FREE], [ ], [ ])
+ ;;
+ esac
+ ;;
+ esac
+ fi
+
+ dnl Check for madvise(..., MADV_DONTNEED).
+ JE_COMPILABLE([madvise(..., MADV_DONTNEED)], [
+#include <sys/mman.h>
+], [
+ madvise((void *)0, 0, MADV_DONTNEED);
+], [je_cv_madv_dontneed])
+ if test "x${je_cv_madv_dontneed}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED], [ ], [ ])
+ fi
+
+ dnl Check for madvise(..., MADV_DO[NT]DUMP).
+ JE_COMPILABLE([madvise(..., MADV_DO[[NT]]DUMP)], [
+#include <sys/mman.h>
+], [
+ madvise((void *)0, 0, MADV_DONTDUMP);
+ madvise((void *)0, 0, MADV_DODUMP);
+], [je_cv_madv_dontdump])
+ if test "x${je_cv_madv_dontdump}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_MADVISE_DONTDUMP], [ ], [ ])
+ fi
+
+ dnl Check for madvise(..., MADV_[NO]HUGEPAGE).
+ JE_COMPILABLE([madvise(..., MADV_[[NO]]HUGEPAGE)], [
+#include <sys/mman.h>
+], [
+ madvise((void *)0, 0, MADV_HUGEPAGE);
+ madvise((void *)0, 0, MADV_NOHUGEPAGE);
+], [je_cv_thp])
+ dnl Check for madvise(..., MADV_[NO]CORE).
+ JE_COMPILABLE([madvise(..., MADV_[[NO]]CORE)], [
+#include <sys/mman.h>
+], [
+ madvise((void *)0, 0, MADV_NOCORE);
+ madvise((void *)0, 0, MADV_CORE);
+], [je_cv_madv_nocore])
+ if test "x${je_cv_madv_nocore}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_MADVISE_NOCORE], [ ], [ ])
+ fi
+case "${host_cpu}" in
+ arm*)
+ ;;
+ *)
+ if test "x${je_cv_thp}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_MADVISE_HUGE], [ ], [ ])
+ fi
+ ;;
+esac
+else
+ dnl Check for posix_madvise.
+ JE_COMPILABLE([posix_madvise], [
+ #include <sys/mman.h>
+ ], [
+ posix_madvise((void *)0, 0, 0);
+ ], [je_cv_posix_madvise])
+ if test "x${je_cv_posix_madvise}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_POSIX_MADVISE], [ ], [ ])
+
+ dnl Check for posix_madvise(..., POSIX_MADV_DONTNEED).
+ JE_COMPILABLE([posix_madvise(..., POSIX_MADV_DONTNEED)], [
+ #include <sys/mman.h>
+ ], [
+ posix_madvise((void *)0, 0, POSIX_MADV_DONTNEED);
+ ], [je_cv_posix_madv_dontneed])
+ if test "x${je_cv_posix_madv_dontneed}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED], [ ], [ ])
+ fi
+ fi
+fi
+
+dnl ============================================================================
+dnl Check for mprotect(2).
+
+JE_COMPILABLE([mprotect(2)], [
+#include <sys/mman.h>
+], [
+ mprotect((void *)0, 0, PROT_NONE);
+], [je_cv_mprotect])
+if test "x${je_cv_mprotect}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_MPROTECT], [ ], [ ])
+fi
+
+dnl ============================================================================
+dnl Check for __builtin_clz(), __builtin_clzl(), and __builtin_clzll().
+
+AC_CACHE_CHECK([for __builtin_clz],
+ [je_cv_builtin_clz],
+ [AC_LINK_IFELSE([AC_LANG_PROGRAM([],
+ [
+ {
+ unsigned x = 0;
+ int y = __builtin_clz(x);
+ }
+ {
+ unsigned long x = 0;
+ int y = __builtin_clzl(x);
+ }
+ {
+ unsigned long long x = 0;
+ int y = __builtin_clzll(x);
+ }
+ ])],
+ [je_cv_builtin_clz=yes],
+ [je_cv_builtin_clz=no])])
+
+if test "x${je_cv_builtin_clz}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_BUILTIN_CLZ], [ ], [ ])
+fi
+
+dnl ============================================================================
+dnl Check for os_unfair_lock operations as provided on Darwin.
+
+JE_COMPILABLE([Darwin os_unfair_lock_*()], [
+#include <os/lock.h>
+#include <AvailabilityMacros.h>
+], [
+ #if MAC_OS_X_VERSION_MIN_REQUIRED < 101200
+ #error "os_unfair_lock is not supported"
+ #else
+ os_unfair_lock lock = OS_UNFAIR_LOCK_INIT;
+ os_unfair_lock_lock(&lock);
+ os_unfair_lock_unlock(&lock);
+ #endif
+], [je_cv_os_unfair_lock])
+if test "x${je_cv_os_unfair_lock}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_OS_UNFAIR_LOCK], [ ], [ ])
+fi
+
+dnl ============================================================================
+dnl Darwin-related configuration.
+
+AC_ARG_ENABLE([zone-allocator],
+ [AS_HELP_STRING([--disable-zone-allocator],
+ [Disable zone allocator for Darwin])],
+[if test "x$enable_zone_allocator" = "xno" ; then
+ enable_zone_allocator="0"
+else
+ enable_zone_allocator="1"
+fi
+],
+[if test "x${abi}" = "xmacho"; then
+ enable_zone_allocator="1"
+fi
+]
+)
+AC_SUBST([enable_zone_allocator])
+
+if test "x${enable_zone_allocator}" = "x1" ; then
+ if test "x${abi}" != "xmacho"; then
+ AC_MSG_ERROR([--enable-zone-allocator is only supported on Darwin])
+ fi
+ AC_DEFINE([JEMALLOC_ZONE], [ ], [ ])
+fi
+
+dnl ============================================================================
+dnl Use initial-exec TLS by default.
+AC_ARG_ENABLE([initial-exec-tls],
+ [AS_HELP_STRING([--disable-initial-exec-tls],
+ [Disable the initial-exec tls model])],
+[if test "x$enable_initial_exec_tls" = "xno" ; then
+ enable_initial_exec_tls="0"
+else
+ enable_initial_exec_tls="1"
+fi
+],
+[enable_initial_exec_tls="1"]
+)
+AC_SUBST([enable_initial_exec_tls])
+
+if test "x${je_cv_tls_model}" = "xyes" -a \
+ "x${enable_initial_exec_tls}" = "x1" ; then
+ AC_DEFINE([JEMALLOC_TLS_MODEL],
+ [__attribute__((tls_model("initial-exec")))],
+ [ ])
+else
+ AC_DEFINE([JEMALLOC_TLS_MODEL], [ ], [ ])
+fi
+
+dnl ============================================================================
+dnl Enable background threads if possible.
+
+if test "x${have_pthread}" = "x1" -a "x${je_cv_os_unfair_lock}" != "xyes" -a \
+ "x${abi}" != "xmacho" ; then
+ AC_DEFINE([JEMALLOC_BACKGROUND_THREAD], [ ], [ ])
+fi
+
+dnl ============================================================================
+dnl Check for glibc malloc hooks
+
+if test "x$glibc" = "x1" ; then
+ JE_COMPILABLE([glibc malloc hook], [
+ #include <stddef.h>
+
+ extern void (* __free_hook)(void *ptr);
+ extern void *(* __malloc_hook)(size_t size);
+ extern void *(* __realloc_hook)(void *ptr, size_t size);
+], [
+ void *ptr = 0L;
+ if (__malloc_hook) ptr = __malloc_hook(1);
+ if (__realloc_hook) ptr = __realloc_hook(ptr, 2);
+ if (__free_hook && ptr) __free_hook(ptr);
+], [je_cv_glibc_malloc_hook])
+ if test "x${je_cv_glibc_malloc_hook}" = "xyes" ; then
+ if test "x${JEMALLOC_PREFIX}" = "x" ; then
+ AC_DEFINE([JEMALLOC_GLIBC_MALLOC_HOOK], [ ], [ ])
+ wrap_syms="${wrap_syms} __free_hook __malloc_hook __realloc_hook"
+ fi
+ fi
+
+ JE_COMPILABLE([glibc memalign hook], [
+ #include <stddef.h>
+
+ extern void *(* __memalign_hook)(size_t alignment, size_t size);
+], [
+ void *ptr = 0L;
+ if (__memalign_hook) ptr = __memalign_hook(16, 7);
+], [je_cv_glibc_memalign_hook])
+ if test "x${je_cv_glibc_memalign_hook}" = "xyes" ; then
+ if test "x${JEMALLOC_PREFIX}" = "x" ; then
+ AC_DEFINE([JEMALLOC_GLIBC_MEMALIGN_HOOK], [ ], [ ])
+ wrap_syms="${wrap_syms} __memalign_hook"
+ fi
+ fi
+fi
+
+JE_COMPILABLE([pthreads adaptive mutexes], [
+#include <pthread.h>
+], [
+ pthread_mutexattr_t attr;
+ pthread_mutexattr_init(&attr);
+ pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
+ pthread_mutexattr_destroy(&attr);
+], [je_cv_pthread_mutex_adaptive_np])
+if test "x${je_cv_pthread_mutex_adaptive_np}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP], [ ], [ ])
+fi
+
+JE_CFLAGS_SAVE()
+JE_CFLAGS_ADD([-D_GNU_SOURCE])
+JE_CFLAGS_ADD([-Werror])
+JE_CFLAGS_ADD([-herror_on_warning])
+JE_COMPILABLE([strerror_r returns char with gnu source], [
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+], [
+ char *buffer = (char *) malloc(100);
+ char *error = strerror_r(EINVAL, buffer, 100);
+ printf("%s\n", error);
+], [je_cv_strerror_r_returns_char_with_gnu_source])
+JE_CFLAGS_RESTORE()
+if test "x${je_cv_strerror_r_returns_char_with_gnu_source}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE], [ ], [ ])
+fi
+
+dnl ============================================================================
+dnl Check for typedefs, structures, and compiler characteristics.
+AC_HEADER_STDBOOL
+
+dnl ============================================================================
+dnl Define commands that generate output files.
+
+AC_CONFIG_COMMANDS([include/jemalloc/internal/public_symbols.txt], [
+ f="${objroot}include/jemalloc/internal/public_symbols.txt"
+ mkdir -p "${objroot}include/jemalloc/internal"
+ cp /dev/null "${f}"
+ for nm in `echo ${mangling_map} |tr ',' ' '` ; do
+ n=`echo ${nm} |tr ':' ' ' |awk '{print $[]1}'`
+ m=`echo ${nm} |tr ':' ' ' |awk '{print $[]2}'`
+ echo "${n}:${m}" >> "${f}"
+ dnl Remove name from public_syms so that it isn't redefined later.
+ public_syms=`for sym in ${public_syms}; do echo "${sym}"; done |grep -v "^${n}\$" |tr '\n' ' '`
+ done
+ for sym in ${public_syms} ; do
+ n="${sym}"
+ m="${JEMALLOC_PREFIX}${sym}"
+ echo "${n}:${m}" >> "${f}"
+ done
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+ mangling_map="${mangling_map}"
+ public_syms="${public_syms}"
+ JEMALLOC_PREFIX="${JEMALLOC_PREFIX}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/internal/private_symbols.awk], [
+ f="${objroot}include/jemalloc/internal/private_symbols.awk"
+ mkdir -p "${objroot}include/jemalloc/internal"
+ export_syms=`for sym in ${public_syms}; do echo "${JEMALLOC_PREFIX}${sym}"; done; for sym in ${wrap_syms}; do echo "${sym}"; done;`
+ "${srcdir}/include/jemalloc/internal/private_symbols.sh" "${SYM_PREFIX}" ${export_syms} > "${objroot}include/jemalloc/internal/private_symbols.awk"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+ public_syms="${public_syms}"
+ wrap_syms="${wrap_syms}"
+ SYM_PREFIX="${SYM_PREFIX}"
+ JEMALLOC_PREFIX="${JEMALLOC_PREFIX}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/internal/private_symbols_jet.awk], [
+ f="${objroot}include/jemalloc/internal/private_symbols_jet.awk"
+ mkdir -p "${objroot}include/jemalloc/internal"
+ export_syms=`for sym in ${public_syms}; do echo "jet_${sym}"; done; for sym in ${wrap_syms}; do echo "${sym}"; done;`
+ "${srcdir}/include/jemalloc/internal/private_symbols.sh" "${SYM_PREFIX}" ${export_syms} > "${objroot}include/jemalloc/internal/private_symbols_jet.awk"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+ public_syms="${public_syms}"
+ wrap_syms="${wrap_syms}"
+ SYM_PREFIX="${SYM_PREFIX}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/internal/public_namespace.h], [
+ mkdir -p "${objroot}include/jemalloc/internal"
+ "${srcdir}/include/jemalloc/internal/public_namespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_namespace.h"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/internal/public_unnamespace.h], [
+ mkdir -p "${objroot}include/jemalloc/internal"
+ "${srcdir}/include/jemalloc/internal/public_unnamespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_unnamespace.h"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_protos_jet.h], [
+ mkdir -p "${objroot}include/jemalloc"
+ cat "${srcdir}/include/jemalloc/jemalloc_protos.h.in" | sed -e 's/@je_@/jet_/g' > "${objroot}include/jemalloc/jemalloc_protos_jet.h"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_rename.h], [
+ mkdir -p "${objroot}include/jemalloc"
+ "${srcdir}/include/jemalloc/jemalloc_rename.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/jemalloc_rename.h"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_mangle.h], [
+ mkdir -p "${objroot}include/jemalloc"
+ "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" je_ > "${objroot}include/jemalloc/jemalloc_mangle.h"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_mangle_jet.h], [
+ mkdir -p "${objroot}include/jemalloc"
+ "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" jet_ > "${objroot}include/jemalloc/jemalloc_mangle_jet.h"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/jemalloc.h], [
+ mkdir -p "${objroot}include/jemalloc"
+ "${srcdir}/include/jemalloc/jemalloc.sh" "${objroot}" > "${objroot}include/jemalloc/jemalloc${install_suffix}.h"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+ install_suffix="${install_suffix}"
+])
+
+dnl Process .in files.
+AC_SUBST([cfghdrs_in])
+AC_SUBST([cfghdrs_out])
+AC_CONFIG_HEADERS([$cfghdrs_tup])
+
+dnl ============================================================================
+dnl Generate outputs.
+
+AC_CONFIG_FILES([$cfgoutputs_tup config.stamp bin/jemalloc-config bin/jemalloc.sh bin/jeprof])
+AC_SUBST([cfgoutputs_in])
+AC_SUBST([cfgoutputs_out])
+AC_OUTPUT
+
+dnl ============================================================================
+dnl Print out the results of configuration.
+AC_MSG_RESULT([===============================================================================])
+AC_MSG_RESULT([jemalloc version : ${jemalloc_version}])
+AC_MSG_RESULT([library revision : ${rev}])
+AC_MSG_RESULT([])
+AC_MSG_RESULT([CONFIG : ${CONFIG}])
+AC_MSG_RESULT([CC : ${CC}])
+AC_MSG_RESULT([CONFIGURE_CFLAGS : ${CONFIGURE_CFLAGS}])
+AC_MSG_RESULT([SPECIFIED_CFLAGS : ${SPECIFIED_CFLAGS}])
+AC_MSG_RESULT([EXTRA_CFLAGS : ${EXTRA_CFLAGS}])
+AC_MSG_RESULT([CPPFLAGS : ${CPPFLAGS}])
+AC_MSG_RESULT([CXX : ${CXX}])
+AC_MSG_RESULT([CONFIGURE_CXXFLAGS : ${CONFIGURE_CXXFLAGS}])
+AC_MSG_RESULT([SPECIFIED_CXXFLAGS : ${SPECIFIED_CXXFLAGS}])
+AC_MSG_RESULT([EXTRA_CXXFLAGS : ${EXTRA_CXXFLAGS}])
+AC_MSG_RESULT([LDFLAGS : ${LDFLAGS}])
+AC_MSG_RESULT([EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}])
+AC_MSG_RESULT([DSO_LDFLAGS : ${DSO_LDFLAGS}])
+AC_MSG_RESULT([LIBS : ${LIBS}])
+AC_MSG_RESULT([RPATH_EXTRA : ${RPATH_EXTRA}])
+AC_MSG_RESULT([])
+AC_MSG_RESULT([XSLTPROC : ${XSLTPROC}])
+AC_MSG_RESULT([XSLROOT : ${XSLROOT}])
+AC_MSG_RESULT([])
+AC_MSG_RESULT([PREFIX : ${PREFIX}])
+AC_MSG_RESULT([BINDIR : ${BINDIR}])
+AC_MSG_RESULT([DATADIR : ${DATADIR}])
+AC_MSG_RESULT([INCLUDEDIR : ${INCLUDEDIR}])
+AC_MSG_RESULT([LIBDIR : ${LIBDIR}])
+AC_MSG_RESULT([MANDIR : ${MANDIR}])
+AC_MSG_RESULT([])
+AC_MSG_RESULT([srcroot : ${srcroot}])
+AC_MSG_RESULT([abs_srcroot : ${abs_srcroot}])
+AC_MSG_RESULT([objroot : ${objroot}])
+AC_MSG_RESULT([abs_objroot : ${abs_objroot}])
+AC_MSG_RESULT([])
+AC_MSG_RESULT([JEMALLOC_PREFIX : ${JEMALLOC_PREFIX}])
+AC_MSG_RESULT([JEMALLOC_PRIVATE_NAMESPACE])
+AC_MSG_RESULT([ : ${JEMALLOC_PRIVATE_NAMESPACE}])
+AC_MSG_RESULT([install_suffix : ${install_suffix}])
+AC_MSG_RESULT([malloc_conf : ${config_malloc_conf}])
+AC_MSG_RESULT([documentation : ${enable_doc}])
+AC_MSG_RESULT([shared libs : ${enable_shared}])
+AC_MSG_RESULT([static libs : ${enable_static}])
+AC_MSG_RESULT([autogen : ${enable_autogen}])
+AC_MSG_RESULT([debug : ${enable_debug}])
+AC_MSG_RESULT([stats : ${enable_stats}])
+AC_MSG_RESULT([experimental_smallocx : ${enable_experimental_smallocx}])
+AC_MSG_RESULT([prof : ${enable_prof}])
+AC_MSG_RESULT([prof-libunwind : ${enable_prof_libunwind}])
+AC_MSG_RESULT([prof-libgcc : ${enable_prof_libgcc}])
+AC_MSG_RESULT([prof-gcc : ${enable_prof_gcc}])
+AC_MSG_RESULT([fill : ${enable_fill}])
+AC_MSG_RESULT([utrace : ${enable_utrace}])
+AC_MSG_RESULT([xmalloc : ${enable_xmalloc}])
+AC_MSG_RESULT([log : ${enable_log}])
+AC_MSG_RESULT([lazy_lock : ${enable_lazy_lock}])
+AC_MSG_RESULT([cache-oblivious : ${enable_cache_oblivious}])
+AC_MSG_RESULT([cxx : ${enable_cxx}])
+AC_MSG_RESULT([===============================================================================])
+||||||| dec341af7695
+=======
+dnl Process this file with autoconf to produce a configure script.
+AC_PREREQ(2.68)
+AC_INIT([Makefile.in])
+
+AC_CONFIG_AUX_DIR([build-aux])
+
+dnl ============================================================================
+dnl Custom macro definitions.
+
+dnl JE_CONCAT_VVV(r, a, b)
+dnl
+dnl Set $r to the concatenation of $a and $b, with a space separating them iff
+dnl both $a and $b are non-empty.
+AC_DEFUN([JE_CONCAT_VVV],
+if test "x[$]{$2}" = "x" -o "x[$]{$3}" = "x" ; then
+ $1="[$]{$2}[$]{$3}"
+else
+ $1="[$]{$2} [$]{$3}"
+fi
+)
+
+dnl JE_APPEND_VS(a, b)
+dnl
+dnl Set $a to the concatenation of $a and b, with a space separating them iff
+dnl both $a and b are non-empty.
+AC_DEFUN([JE_APPEND_VS],
+ T_APPEND_V=$2
+ JE_CONCAT_VVV($1, $1, T_APPEND_V)
+)
+
+CONFIGURE_CFLAGS=
+SPECIFIED_CFLAGS="${CFLAGS}"
+dnl JE_CFLAGS_ADD(cflag)
+dnl
+dnl CFLAGS is the concatenation of CONFIGURE_CFLAGS and SPECIFIED_CFLAGS
+dnl (ignoring EXTRA_CFLAGS, which does not impact configure tests. This macro
+dnl appends to CONFIGURE_CFLAGS and regenerates CFLAGS.
+AC_DEFUN([JE_CFLAGS_ADD],
+[
+AC_MSG_CHECKING([whether compiler supports $1])
+T_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
+JE_APPEND_VS(CONFIGURE_CFLAGS, $1)
+JE_CONCAT_VVV(CFLAGS, CONFIGURE_CFLAGS, SPECIFIED_CFLAGS)
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
+[[
+]], [[
+ return 0;
+]])],
+ [je_cv_cflags_added=$1]
+ AC_MSG_RESULT([yes]),
+ [je_cv_cflags_added=]
+ AC_MSG_RESULT([no])
+ [CONFIGURE_CFLAGS="${T_CONFIGURE_CFLAGS}"]
+)
+JE_CONCAT_VVV(CFLAGS, CONFIGURE_CFLAGS, SPECIFIED_CFLAGS)
+])
+
+dnl JE_CFLAGS_SAVE()
+dnl JE_CFLAGS_RESTORE()
+dnl
+dnl Save/restore CFLAGS. Nesting is not supported.
+AC_DEFUN([JE_CFLAGS_SAVE],
+SAVED_CONFIGURE_CFLAGS="${CONFIGURE_CFLAGS}"
+)
+AC_DEFUN([JE_CFLAGS_RESTORE],
+CONFIGURE_CFLAGS="${SAVED_CONFIGURE_CFLAGS}"
+JE_CONCAT_VVV(CFLAGS, CONFIGURE_CFLAGS, SPECIFIED_CFLAGS)
+)
+
+CONFIGURE_CXXFLAGS=
+SPECIFIED_CXXFLAGS="${CXXFLAGS}"
+dnl JE_CXXFLAGS_ADD(cxxflag)
+AC_DEFUN([JE_CXXFLAGS_ADD],
+[
+AC_MSG_CHECKING([whether compiler supports $1])
+T_CONFIGURE_CXXFLAGS="${CONFIGURE_CXXFLAGS}"
+JE_APPEND_VS(CONFIGURE_CXXFLAGS, $1)
+JE_CONCAT_VVV(CXXFLAGS, CONFIGURE_CXXFLAGS, SPECIFIED_CXXFLAGS)
+AC_LANG_PUSH([C++])
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
+[[
+]], [[
+ return 0;
+]])],
+ [je_cv_cxxflags_added=$1]
+ AC_MSG_RESULT([yes]),
+ [je_cv_cxxflags_added=]
+ AC_MSG_RESULT([no])
+ [CONFIGURE_CXXFLAGS="${T_CONFIGURE_CXXFLAGS}"]
+)
+AC_LANG_POP([C++])
+JE_CONCAT_VVV(CXXFLAGS, CONFIGURE_CXXFLAGS, SPECIFIED_CXXFLAGS)
+])
+
+dnl JE_COMPILABLE(label, hcode, mcode, rvar)
+dnl
+dnl Use AC_LINK_IFELSE() rather than AC_COMPILE_IFELSE() so that linker errors
+dnl cause failure.
+AC_DEFUN([JE_COMPILABLE],
+[
+AC_CACHE_CHECK([whether $1 is compilable],
+ [$4],
+ [AC_LINK_IFELSE([AC_LANG_PROGRAM([$2],
+ [$3])],
+ [$4=yes],
+ [$4=no])])
+])
+
+dnl ============================================================================
+
+CONFIG=`echo ${ac_configure_args} | sed -e 's#'"'"'\([^ ]*\)'"'"'#\1#g'`
+AC_SUBST([CONFIG])
+
+dnl Library revision.
+rev=2
+AC_SUBST([rev])
+
+srcroot=$srcdir
+if test "x${srcroot}" = "x." ; then
+ srcroot=""
+else
+ srcroot="${srcroot}/"
+fi
+AC_SUBST([srcroot])
+abs_srcroot="`cd \"${srcdir}\"; pwd`/"
+AC_SUBST([abs_srcroot])
+
+objroot=""
+AC_SUBST([objroot])
+abs_objroot="`pwd`/"
+AC_SUBST([abs_objroot])
+
+dnl Munge install path variables.
+if test "x$prefix" = "xNONE" ; then
+ prefix="/usr/local"
+fi
+if test "x$exec_prefix" = "xNONE" ; then
+ exec_prefix=$prefix
+fi
+PREFIX=$prefix
+AC_SUBST([PREFIX])
+BINDIR=`eval echo $bindir`
+BINDIR=`eval echo $BINDIR`
+AC_SUBST([BINDIR])
+INCLUDEDIR=`eval echo $includedir`
+INCLUDEDIR=`eval echo $INCLUDEDIR`
+AC_SUBST([INCLUDEDIR])
+LIBDIR=`eval echo $libdir`
+LIBDIR=`eval echo $LIBDIR`
+AC_SUBST([LIBDIR])
+DATADIR=`eval echo $datadir`
+DATADIR=`eval echo $DATADIR`
+AC_SUBST([DATADIR])
+MANDIR=`eval echo $mandir`
+MANDIR=`eval echo $MANDIR`
+AC_SUBST([MANDIR])
+
+dnl Support for building documentation.
+AC_PATH_PROG([XSLTPROC], [xsltproc], [false], [$PATH])
+if test -d "/usr/share/xml/docbook/stylesheet/docbook-xsl" ; then
+ DEFAULT_XSLROOT="/usr/share/xml/docbook/stylesheet/docbook-xsl"
+elif test -d "/usr/share/sgml/docbook/xsl-stylesheets" ; then
+ DEFAULT_XSLROOT="/usr/share/sgml/docbook/xsl-stylesheets"
+else
+ dnl Documentation building will fail if this default gets used.
+ DEFAULT_XSLROOT=""
+fi
+AC_ARG_WITH([xslroot],
+ [AS_HELP_STRING([--with-xslroot=<path>], [XSL stylesheet root path])], [
+if test "x$with_xslroot" = "xno" ; then
+ XSLROOT="${DEFAULT_XSLROOT}"
+else
+ XSLROOT="${with_xslroot}"
+fi
+],
+ XSLROOT="${DEFAULT_XSLROOT}"
+)
+if test "x$XSLTPROC" = "xfalse" ; then
+ XSLROOT=""
+fi
+AC_SUBST([XSLROOT])
+
+dnl If CFLAGS isn't defined, set CFLAGS to something reasonable. Otherwise,
+dnl just prevent autoconf from molesting CFLAGS.
+CFLAGS=$CFLAGS
+AC_PROG_CC
+
+if test "x$GCC" != "xyes" ; then
+ AC_CACHE_CHECK([whether compiler is MSVC],
+ [je_cv_msvc],
+ [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],
+ [
+#ifndef _MSC_VER
+ int fail[-1];
+#endif
+])],
+ [je_cv_msvc=yes],
+ [je_cv_msvc=no])])
+fi
+
+dnl check if a cray prgenv wrapper compiler is being used
+je_cv_cray_prgenv_wrapper=""
+if test "x${PE_ENV}" != "x" ; then
+ case "${CC}" in
+ CC|cc)
+ je_cv_cray_prgenv_wrapper="yes"
+ ;;
+ *)
+ ;;
+ esac
+fi
+
+AC_CACHE_CHECK([whether compiler is cray],
+ [je_cv_cray],
+ [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],
+ [
+#ifndef _CRAYC
+ int fail[-1];
+#endif
+])],
+ [je_cv_cray=yes],
+ [je_cv_cray=no])])
+
+if test "x${je_cv_cray}" = "xyes" ; then
+ AC_CACHE_CHECK([whether cray compiler version is 8.4],
+ [je_cv_cray_84],
+ [AC_COMPILE_IFELSE([AC_LANG_PROGRAM([],
+ [
+#if !(_RELEASE_MAJOR == 8 && _RELEASE_MINOR == 4)
+ int fail[-1];
+#endif
+])],
+ [je_cv_cray_84=yes],
+ [je_cv_cray_84=no])])
+fi
+
+if test "x$GCC" = "xyes" ; then
+ JE_CFLAGS_ADD([-std=gnu11])
+ if test "x$je_cv_cflags_added" = "x-std=gnu11" ; then
+ AC_DEFINE_UNQUOTED([JEMALLOC_HAS_RESTRICT])
+ else
+ JE_CFLAGS_ADD([-std=gnu99])
+ if test "x$je_cv_cflags_added" = "x-std=gnu99" ; then
+ AC_DEFINE_UNQUOTED([JEMALLOC_HAS_RESTRICT])
+ fi
+ fi
+ JE_CFLAGS_ADD([-Wall])
+ JE_CFLAGS_ADD([-Wextra])
+ JE_CFLAGS_ADD([-Wshorten-64-to-32])
+ JE_CFLAGS_ADD([-Wsign-compare])
+ JE_CFLAGS_ADD([-Wundef])
+ JE_CFLAGS_ADD([-Wno-format-zero-length])
+ JE_CFLAGS_ADD([-pipe])
+ JE_CFLAGS_ADD([-g3])
+elif test "x$je_cv_msvc" = "xyes" ; then
+ CC="$CC -nologo"
+ JE_CFLAGS_ADD([-Zi])
+ JE_CFLAGS_ADD([-MT])
+ JE_CFLAGS_ADD([-W3])
+ JE_CFLAGS_ADD([-FS])
+ JE_APPEND_VS(CPPFLAGS, -I${srcdir}/include/msvc_compat)
+fi
+if test "x$je_cv_cray" = "xyes" ; then
+ dnl cray compiler 8.4 has an inlining bug
+ if test "x$je_cv_cray_84" = "xyes" ; then
+ JE_CFLAGS_ADD([-hipa2])
+ JE_CFLAGS_ADD([-hnognu])
+ fi
+ dnl ignore unreachable code warning
+ JE_CFLAGS_ADD([-hnomessage=128])
+ dnl ignore redefinition of "malloc", "free", etc warning
+ JE_CFLAGS_ADD([-hnomessage=1357])
+fi
+AC_SUBST([CONFIGURE_CFLAGS])
+AC_SUBST([SPECIFIED_CFLAGS])
+AC_SUBST([EXTRA_CFLAGS])
+AC_PROG_CPP
+
+AC_ARG_ENABLE([cxx],
+ [AS_HELP_STRING([--disable-cxx], [Disable C++ integration])],
+if test "x$enable_cxx" = "xno" ; then
+ enable_cxx="0"
+else
+ enable_cxx="1"
+fi
+,
+enable_cxx="1"
+)
+if test "x$enable_cxx" = "x1" ; then
+ dnl Require at least c++14, which is the first version to support sized
+ dnl deallocation. C++ support is not compiled otherwise.
+ m4_include([m4/ax_cxx_compile_stdcxx.m4])
+ AX_CXX_COMPILE_STDCXX([14], [noext], [optional])
+ if test "x${HAVE_CXX14}" = "x1" ; then
+ JE_CXXFLAGS_ADD([-Wall])
+ JE_CXXFLAGS_ADD([-Wextra])
+ JE_CXXFLAGS_ADD([-g3])
+
+ SAVED_LIBS="${LIBS}"
+ JE_APPEND_VS(LIBS, -lstdc++)
+ JE_COMPILABLE([libstdc++ linkage], [
+#include <stdlib.h>
+], [[
+ int *arr = (int *)malloc(sizeof(int) * 42);
+ if (arr == NULL)
+ return 1;
+]], [je_cv_libstdcxx])
+ if test "x${je_cv_libstdcxx}" = "xno" ; then
+ LIBS="${SAVED_LIBS}"
+ fi
+ else
+ enable_cxx="0"
+ fi
+fi
+AC_SUBST([enable_cxx])
+AC_SUBST([CONFIGURE_CXXFLAGS])
+AC_SUBST([SPECIFIED_CXXFLAGS])
+AC_SUBST([EXTRA_CXXFLAGS])
+
+AC_C_BIGENDIAN([ac_cv_big_endian=1], [ac_cv_big_endian=0])
+if test "x${ac_cv_big_endian}" = "x1" ; then
+ AC_DEFINE_UNQUOTED([JEMALLOC_BIG_ENDIAN], [ ])
+fi
+
+if test "x${je_cv_msvc}" = "xyes" -a "x${ac_cv_header_inttypes_h}" = "xno"; then
+ JE_APPEND_VS(CPPFLAGS, -I${srcdir}/include/msvc_compat/C99)
+fi
+
+if test "x${je_cv_msvc}" = "xyes" ; then
+ LG_SIZEOF_PTR=LG_SIZEOF_PTR_WIN
+ AC_MSG_RESULT([Using a predefined value for sizeof(void *): 4 for 32-bit, 8 for 64-bit])
+else
+ AC_CHECK_SIZEOF([void *])
+ if test "x${ac_cv_sizeof_void_p}" = "x8" ; then
+ LG_SIZEOF_PTR=3
+ elif test "x${ac_cv_sizeof_void_p}" = "x4" ; then
+ LG_SIZEOF_PTR=2
+ else
+ AC_MSG_ERROR([Unsupported pointer size: ${ac_cv_sizeof_void_p}])
+ fi
+fi
+AC_DEFINE_UNQUOTED([LG_SIZEOF_PTR], [$LG_SIZEOF_PTR])
+
+AC_CHECK_SIZEOF([int])
+if test "x${ac_cv_sizeof_int}" = "x8" ; then
+ LG_SIZEOF_INT=3
+elif test "x${ac_cv_sizeof_int}" = "x4" ; then
+ LG_SIZEOF_INT=2
+else
+ AC_MSG_ERROR([Unsupported int size: ${ac_cv_sizeof_int}])
+fi
+AC_DEFINE_UNQUOTED([LG_SIZEOF_INT], [$LG_SIZEOF_INT])
+
+AC_CHECK_SIZEOF([long])
+if test "x${ac_cv_sizeof_long}" = "x8" ; then
+ LG_SIZEOF_LONG=3
+elif test "x${ac_cv_sizeof_long}" = "x4" ; then
+ LG_SIZEOF_LONG=2
+else
+ AC_MSG_ERROR([Unsupported long size: ${ac_cv_sizeof_long}])
+fi
+AC_DEFINE_UNQUOTED([LG_SIZEOF_LONG], [$LG_SIZEOF_LONG])
+
+AC_CHECK_SIZEOF([long long])
+if test "x${ac_cv_sizeof_long_long}" = "x8" ; then
+ LG_SIZEOF_LONG_LONG=3
+elif test "x${ac_cv_sizeof_long_long}" = "x4" ; then
+ LG_SIZEOF_LONG_LONG=2
+else
+ AC_MSG_ERROR([Unsupported long long size: ${ac_cv_sizeof_long_long}])
+fi
+AC_DEFINE_UNQUOTED([LG_SIZEOF_LONG_LONG], [$LG_SIZEOF_LONG_LONG])
+
+AC_CHECK_SIZEOF([intmax_t])
+if test "x${ac_cv_sizeof_intmax_t}" = "x16" ; then
+ LG_SIZEOF_INTMAX_T=4
+elif test "x${ac_cv_sizeof_intmax_t}" = "x8" ; then
+ LG_SIZEOF_INTMAX_T=3
+elif test "x${ac_cv_sizeof_intmax_t}" = "x4" ; then
+ LG_SIZEOF_INTMAX_T=2
+else
+ AC_MSG_ERROR([Unsupported intmax_t size: ${ac_cv_sizeof_intmax_t}])
+fi
+AC_DEFINE_UNQUOTED([LG_SIZEOF_INTMAX_T], [$LG_SIZEOF_INTMAX_T])
+
+AC_CANONICAL_HOST
+dnl CPU-specific settings.
+CPU_SPINWAIT=""
+case "${host_cpu}" in
+ i686|x86_64)
+ HAVE_CPU_SPINWAIT=1
+ if test "x${je_cv_msvc}" = "xyes" ; then
+ AC_CACHE_VAL([je_cv_pause_msvc],
+ [JE_COMPILABLE([pause instruction MSVC], [],
+ [[_mm_pause(); return 0;]],
+ [je_cv_pause_msvc])])
+ if test "x${je_cv_pause_msvc}" = "xyes" ; then
+ CPU_SPINWAIT='_mm_pause()'
+ fi
+ else
+ AC_CACHE_VAL([je_cv_pause],
+ [JE_COMPILABLE([pause instruction], [],
+ [[__asm__ volatile("pause"); return 0;]],
+ [je_cv_pause])])
+ if test "x${je_cv_pause}" = "xyes" ; then
+ CPU_SPINWAIT='__asm__ volatile("pause")'
+ fi
+ fi
+ ;;
+ *)
+ HAVE_CPU_SPINWAIT=0
+ ;;
+esac
+AC_DEFINE_UNQUOTED([HAVE_CPU_SPINWAIT], [$HAVE_CPU_SPINWAIT])
+AC_DEFINE_UNQUOTED([CPU_SPINWAIT], [$CPU_SPINWAIT])
+
+AC_ARG_WITH([lg_vaddr],
+ [AS_HELP_STRING([--with-lg-vaddr=<lg-vaddr>], [Number of significant virtual address bits])],
+ [LG_VADDR="$with_lg_vaddr"], [LG_VADDR="detect"])
+
+case "${host_cpu}" in
+ aarch64)
+ if test "x$LG_VADDR" = "xdetect"; then
+ AC_MSG_CHECKING([number of significant virtual address bits])
+ if test "x${LG_SIZEOF_PTR}" = "x2" ; then
+ #aarch64 ILP32
+ LG_VADDR=32
+ else
+ #aarch64 LP64
+ LG_VADDR=48
+ fi
+ AC_MSG_RESULT([$LG_VADDR])
+ fi
+ ;;
+ x86_64)
+ if test "x$LG_VADDR" = "xdetect"; then
+ AC_CACHE_CHECK([number of significant virtual address bits],
+ [je_cv_lg_vaddr],
+ AC_RUN_IFELSE([AC_LANG_PROGRAM(
+[[
+#include <stdio.h>
+#ifdef _WIN32
+#include <limits.h>
+#include <intrin.h>
+typedef unsigned __int32 uint32_t;
+#else
+#include <stdint.h>
+#endif
+]], [[
+ uint32_t r[[4]];
+ uint32_t eax_in = 0x80000008U;
+#ifdef _WIN32
+ __cpuid((int *)r, (int)eax_in);
+#else
+ asm volatile ("cpuid"
+ : "=a" (r[[0]]), "=b" (r[[1]]), "=c" (r[[2]]), "=d" (r[[3]])
+ : "a" (eax_in), "c" (0)
+ );
+#endif
+ uint32_t eax_out = r[[0]];
+ uint32_t vaddr = ((eax_out & 0x0000ff00U) >> 8);
+ FILE *f = fopen("conftest.out", "w");
+ if (f == NULL) {
+ return 1;
+ }
+ if (vaddr > (sizeof(void *) << 3)) {
+ vaddr = sizeof(void *) << 3;
+ }
+ fprintf(f, "%u", vaddr);
+ fclose(f);
+ return 0;
+]])],
+ [je_cv_lg_vaddr=`cat conftest.out`],
+ [je_cv_lg_vaddr=error],
+ [je_cv_lg_vaddr=57]))
+ if test "x${je_cv_lg_vaddr}" != "x" ; then
+ LG_VADDR="${je_cv_lg_vaddr}"
+ fi
+ if test "x${LG_VADDR}" != "xerror" ; then
+ AC_DEFINE_UNQUOTED([LG_VADDR], [$LG_VADDR])
+ else
+ AC_MSG_ERROR([cannot determine number of significant virtual address bits])
+ fi
+ fi
+ ;;
+ *)
+ if test "x$LG_VADDR" = "xdetect"; then
+ AC_MSG_CHECKING([number of significant virtual address bits])
+ if test "x${LG_SIZEOF_PTR}" = "x3" ; then
+ LG_VADDR=64
+ elif test "x${LG_SIZEOF_PTR}" = "x2" ; then
+ LG_VADDR=32
+ elif test "x${LG_SIZEOF_PTR}" = "xLG_SIZEOF_PTR_WIN" ; then
+ LG_VADDR="(1U << (LG_SIZEOF_PTR_WIN+3))"
+ else
+ AC_MSG_ERROR([Unsupported lg(pointer size): ${LG_SIZEOF_PTR}])
+ fi
+ AC_MSG_RESULT([$LG_VADDR])
+ fi
+ ;;
+esac
+AC_DEFINE_UNQUOTED([LG_VADDR], [$LG_VADDR])
+
+LD_PRELOAD_VAR="LD_PRELOAD"
+so="so"
+importlib="${so}"
+o="$ac_objext"
+a="a"
+exe="$ac_exeext"
+libprefix="lib"
+link_whole_archive="0"
+DSO_LDFLAGS='-shared -Wl,-soname,$(@F)'
+RPATH='-Wl,-rpath,$(1)'
+SOREV="${so}.${rev}"
+PIC_CFLAGS='-fPIC -DPIC'
+CTARGET='-o $@'
+LDTARGET='-o $@'
+TEST_LD_MODE=
+EXTRA_LDFLAGS=
+ARFLAGS='crus'
+AROUT=' $@'
+CC_MM=1
+
+if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then
+ TEST_LD_MODE='-dynamic'
+fi
+
+if test "x${je_cv_cray}" = "xyes" ; then
+ CC_MM=
+fi
+
+AN_MAKEVAR([AR], [AC_PROG_AR])
+AN_PROGRAM([ar], [AC_PROG_AR])
+AC_DEFUN([AC_PROG_AR], [AC_CHECK_TOOL(AR, ar, :)])
+AC_PROG_AR
+
+AN_MAKEVAR([NM], [AC_PROG_NM])
+AN_PROGRAM([nm], [AC_PROG_NM])
+AC_DEFUN([AC_PROG_NM], [AC_CHECK_TOOL(NM, nm, :)])
+AC_PROG_NM
+
+AC_PROG_AWK
+
+dnl ============================================================================
+dnl jemalloc version.
+dnl
+
+AC_ARG_WITH([version],
+ [AS_HELP_STRING([--with-version=<major>.<minor>.<bugfix>-<nrev>-g<gid>],
+ [Version string])],
+ [
+ echo "${with_version}" | grep ['^[0-9]\+\.[0-9]\+\.[0-9]\+-[0-9]\+-g[0-9a-f]\+$'] 2>&1 1>/dev/null
+ if test $? -eq 0 ; then
+ echo "$with_version" > "${objroot}VERSION"
+ else
+ echo "${with_version}" | grep ['^VERSION$'] 2>&1 1>/dev/null
+ if test $? -ne 0 ; then
+ AC_MSG_ERROR([${with_version} does not match <major>.<minor>.<bugfix>-<nrev>-g<gid> or VERSION])
+ fi
+ fi
+ ], [
+ dnl Set VERSION if source directory is inside a git repository.
+ if test "x`test ! \"${srcroot}\" && cd \"${srcroot}\"; git rev-parse --is-inside-work-tree 2>/dev/null`" = "xtrue" ; then
+ dnl Pattern globs aren't powerful enough to match both single- and
+ dnl double-digit version numbers, so iterate over patterns to support up
+ dnl to version 99.99.99 without any accidental matches.
+ for pattern in ['[0-9].[0-9].[0-9]' '[0-9].[0-9].[0-9][0-9]' \
+ '[0-9].[0-9][0-9].[0-9]' '[0-9].[0-9][0-9].[0-9][0-9]' \
+ '[0-9][0-9].[0-9].[0-9]' '[0-9][0-9].[0-9].[0-9][0-9]' \
+ '[0-9][0-9].[0-9][0-9].[0-9]' \
+ '[0-9][0-9].[0-9][0-9].[0-9][0-9]']; do
+ (test ! "${srcroot}" && cd "${srcroot}"; git describe --long --abbrev=40 --match="${pattern}") > "${objroot}VERSION.tmp" 2>/dev/null
+ if test $? -eq 0 ; then
+ mv "${objroot}VERSION.tmp" "${objroot}VERSION"
+ break
+ fi
+ done
+ fi
+ rm -f "${objroot}VERSION.tmp"
+ ])
+
+if test ! -e "${objroot}VERSION" ; then
+ if test ! -e "${srcroot}VERSION" ; then
+ AC_MSG_RESULT(
+ [Missing VERSION file, and unable to generate it; creating bogus VERSION])
+ echo "0.0.0-0-g0000000000000000000000000000000000000000" > "${objroot}VERSION"
+ else
+ cp ${srcroot}VERSION ${objroot}VERSION
+ fi
+fi
+jemalloc_version=`cat "${objroot}VERSION"`
+jemalloc_version_major=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]1}'`
+jemalloc_version_minor=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]2}'`
+jemalloc_version_bugfix=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]3}'`
+jemalloc_version_nrev=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]4}'`
+jemalloc_version_gid=`echo ${jemalloc_version} | tr ".g-" " " | awk '{print [$]5}'`
+AC_SUBST([jemalloc_version])
+AC_SUBST([jemalloc_version_major])
+AC_SUBST([jemalloc_version_minor])
+AC_SUBST([jemalloc_version_bugfix])
+AC_SUBST([jemalloc_version_nrev])
+AC_SUBST([jemalloc_version_gid])
+
+dnl Platform-specific settings. abi and RPATH can probably be determined
+dnl programmatically, but doing so is error-prone, which makes it generally
+dnl not worth the trouble.
+dnl
+dnl Define cpp macros in CPPFLAGS, rather than doing AC_DEFINE(macro), since the
+dnl definitions need to be seen before any headers are included, which is a pain
+dnl to make happen otherwise.
+default_retain="0"
+maps_coalesce="1"
+DUMP_SYMS="${NM} -a"
+SYM_PREFIX=""
+case "${host}" in
+ *-*-darwin* | *-*-ios*)
+ abi="macho"
+ RPATH=""
+ LD_PRELOAD_VAR="DYLD_INSERT_LIBRARIES"
+ so="dylib"
+ importlib="${so}"
+ force_tls="0"
+ DSO_LDFLAGS='-shared -Wl,-install_name,$(LIBDIR)/$(@F)'
+ SOREV="${rev}.${so}"
+ sbrk_deprecated="1"
+ SYM_PREFIX="_"
+ ;;
+ *-*-freebsd*)
+ abi="elf"
+ AC_DEFINE([JEMALLOC_SYSCTL_VM_OVERCOMMIT], [ ])
+ force_lazy_lock="1"
+ ;;
+ *-*-dragonfly*)
+ abi="elf"
+ ;;
+ *-*-openbsd*)
+ abi="elf"
+ force_tls="0"
+ ;;
+ *-*-bitrig*)
+ abi="elf"
+ ;;
+ *-*-linux-android)
+ dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE.
+ JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE)
+ abi="elf"
+ AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS], [ ])
+ AC_DEFINE([JEMALLOC_HAS_ALLOCA_H])
+ AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ])
+ AC_DEFINE([JEMALLOC_THREADED_INIT], [ ])
+ AC_DEFINE([JEMALLOC_C11_ATOMICS])
+ force_tls="0"
+ if test "${LG_SIZEOF_PTR}" = "3"; then
+ default_retain="1"
+ fi
+ ;;
+ *-*-linux*)
+ dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE.
+ JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE)
+ abi="elf"
+ AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS], [ ])
+ AC_DEFINE([JEMALLOC_HAS_ALLOCA_H])
+ AC_DEFINE([JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY], [ ])
+ AC_DEFINE([JEMALLOC_THREADED_INIT], [ ])
+ AC_DEFINE([JEMALLOC_USE_CXX_THROW], [ ])
+ if test "${LG_SIZEOF_PTR}" = "3"; then
+ default_retain="1"
+ fi
+ ;;
+ *-*-kfreebsd*)
+ dnl syscall(2) and secure_getenv(3) are exposed by _GNU_SOURCE.
+ JE_APPEND_VS(CPPFLAGS, -D_GNU_SOURCE)
+ abi="elf"
+ AC_DEFINE([JEMALLOC_HAS_ALLOCA_H])
+ AC_DEFINE([JEMALLOC_SYSCTL_VM_OVERCOMMIT], [ ])
+ AC_DEFINE([JEMALLOC_THREADED_INIT], [ ])
+ AC_DEFINE([JEMALLOC_USE_CXX_THROW], [ ])
+ ;;
+ *-*-netbsd*)
+ AC_MSG_CHECKING([ABI])
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
+[[#ifdef __ELF__
+/* ELF */
+#else
+#error aout
+#endif
+]])],
+ [abi="elf"],
+ [abi="aout"])
+ AC_MSG_RESULT([$abi])
+ ;;
+ *-*-solaris2*)
+ abi="elf"
+ RPATH='-Wl,-R,$(1)'
+ dnl Solaris needs this for sigwait().
+ JE_APPEND_VS(CPPFLAGS, -D_POSIX_PTHREAD_SEMANTICS)
+ JE_APPEND_VS(LIBS, -lposix4 -lsocket -lnsl)
+ ;;
+ *-ibm-aix*)
+ if test "${LG_SIZEOF_PTR}" = "3"; then
+ dnl 64bit AIX
+ LD_PRELOAD_VAR="LDR_PRELOAD64"
+ else
+ dnl 32bit AIX
+ LD_PRELOAD_VAR="LDR_PRELOAD"
+ fi
+ abi="xcoff"
+ ;;
+ *-*-mingw* | *-*-cygwin*)
+ abi="pecoff"
+ force_tls="0"
+ maps_coalesce="0"
+ RPATH=""
+ so="dll"
+ if test "x$je_cv_msvc" = "xyes" ; then
+ importlib="lib"
+ DSO_LDFLAGS="-LD"
+ EXTRA_LDFLAGS="-link -DEBUG"
+ CTARGET='-Fo$@'
+ LDTARGET='-Fe$@'
+ AR='lib'
+ ARFLAGS='-nologo -out:'
+ AROUT='$@'
+ CC_MM=
+ else
+ importlib="${so}"
+ DSO_LDFLAGS="-shared"
+ link_whole_archive="1"
+ fi
+ case "${host}" in
+ *-*-cygwin*)
+ DUMP_SYMS="dumpbin /SYMBOLS"
+ ;;
+ *)
+ ;;
+ esac
+ a="lib"
+ libprefix=""
+ SOREV="${so}"
+ PIC_CFLAGS=""
+ if test "${LG_SIZEOF_PTR}" = "3"; then
+ default_retain="1"
+ fi
+ ;;
+ *)
+ AC_MSG_RESULT([Unsupported operating system: ${host}])
+ abi="elf"
+ ;;
+esac
+
+JEMALLOC_USABLE_SIZE_CONST=const
+AC_CHECK_HEADERS([malloc.h], [
+ AC_MSG_CHECKING([whether malloc_usable_size definition can use const argument])
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
+ [#include <malloc.h>
+ #include <stddef.h>
+ size_t malloc_usable_size(const void *ptr);
+ ],
+ [])],[
+ AC_MSG_RESULT([yes])
+ ],[
+ JEMALLOC_USABLE_SIZE_CONST=
+ AC_MSG_RESULT([no])
+ ])
+])
+AC_DEFINE_UNQUOTED([JEMALLOC_USABLE_SIZE_CONST], [$JEMALLOC_USABLE_SIZE_CONST])
+AC_SUBST([abi])
+AC_SUBST([RPATH])
+AC_SUBST([LD_PRELOAD_VAR])
+AC_SUBST([so])
+AC_SUBST([importlib])
+AC_SUBST([o])
+AC_SUBST([a])
+AC_SUBST([exe])
+AC_SUBST([libprefix])
+AC_SUBST([link_whole_archive])
+AC_SUBST([DSO_LDFLAGS])
+AC_SUBST([EXTRA_LDFLAGS])
+AC_SUBST([SOREV])
+AC_SUBST([PIC_CFLAGS])
+AC_SUBST([CTARGET])
+AC_SUBST([LDTARGET])
+AC_SUBST([TEST_LD_MODE])
+AC_SUBST([MKLIB])
+AC_SUBST([ARFLAGS])
+AC_SUBST([AROUT])
+AC_SUBST([DUMP_SYMS])
+AC_SUBST([CC_MM])
+
+dnl Determine whether libm must be linked to use e.g. log(3).
+AC_SEARCH_LIBS([log], [m], , [AC_MSG_ERROR([Missing math functions])])
+if test "x$ac_cv_search_log" != "xnone required" ; then
+ LM="$ac_cv_search_log"
+else
+ LM=
+fi
+AC_SUBST(LM)
+
+JE_COMPILABLE([__attribute__ syntax],
+ [static __attribute__((unused)) void foo(void){}],
+ [],
+ [je_cv_attribute])
+if test "x${je_cv_attribute}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_ATTR], [ ])
+ if test "x${GCC}" = "xyes" -a "x${abi}" = "xelf"; then
+ JE_CFLAGS_ADD([-fvisibility=hidden])
+ JE_CXXFLAGS_ADD([-fvisibility=hidden])
+ fi
+fi
+dnl Check for tls_model attribute support (clang 3.0 still lacks support).
+JE_CFLAGS_SAVE()
+JE_CFLAGS_ADD([-Werror])
+JE_CFLAGS_ADD([-herror_on_warning])
+JE_COMPILABLE([tls_model attribute], [],
+ [static __thread int
+ __attribute__((tls_model("initial-exec"), unused)) foo;
+ foo = 0;],
+ [je_cv_tls_model])
+JE_CFLAGS_RESTORE()
+dnl (Setting of JEMALLOC_TLS_MODEL is done later, after we've checked for
+dnl --disable-initial-exec-tls)
+
+dnl Check for alloc_size attribute support.
+JE_CFLAGS_SAVE()
+JE_CFLAGS_ADD([-Werror])
+JE_CFLAGS_ADD([-herror_on_warning])
+JE_COMPILABLE([alloc_size attribute], [#include <stdlib.h>],
+ [void *foo(size_t size) __attribute__((alloc_size(1)));],
+ [je_cv_alloc_size])
+JE_CFLAGS_RESTORE()
+if test "x${je_cv_alloc_size}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_ATTR_ALLOC_SIZE], [ ])
+fi
+dnl Check for format(gnu_printf, ...) attribute support.
+JE_CFLAGS_SAVE()
+JE_CFLAGS_ADD([-Werror])
+JE_CFLAGS_ADD([-herror_on_warning])
+JE_COMPILABLE([format(gnu_printf, ...) attribute], [#include <stdlib.h>],
+ [void *foo(const char *format, ...) __attribute__((format(gnu_printf, 1, 2)));],
+ [je_cv_format_gnu_printf])
+JE_CFLAGS_RESTORE()
+if test "x${je_cv_format_gnu_printf}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF], [ ])
+fi
+dnl Check for format(printf, ...) attribute support.
+JE_CFLAGS_SAVE()
+JE_CFLAGS_ADD([-Werror])
+JE_CFLAGS_ADD([-herror_on_warning])
+JE_COMPILABLE([format(printf, ...) attribute], [#include <stdlib.h>],
+ [void *foo(const char *format, ...) __attribute__((format(printf, 1, 2)));],
+ [je_cv_format_printf])
+JE_CFLAGS_RESTORE()
+if test "x${je_cv_format_printf}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_PRINTF], [ ])
+fi
+
+dnl Check for format_arg(...) attribute support.
+JE_CFLAGS_SAVE()
+JE_CFLAGS_ADD([-Werror])
+JE_CFLAGS_ADD([-herror_on_warning])
+JE_COMPILABLE([format(printf, ...) attribute], [#include <stdlib.h>],
+ [const char * __attribute__((__format_arg__(1))) foo(const char *format);],
+ [je_cv_format_arg])
+JE_CFLAGS_RESTORE()
+if test "x${je_cv_format_arg}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_ATTR_FORMAT_ARG], [ ])
+fi
+
+dnl Support optional additions to rpath.
+AC_ARG_WITH([rpath],
+ [AS_HELP_STRING([--with-rpath=<rpath>], [Colon-separated rpath (ELF systems only)])],
+if test "x$with_rpath" = "xno" ; then
+ RPATH_EXTRA=
+else
+ RPATH_EXTRA="`echo $with_rpath | tr \":\" \" \"`"
+fi,
+ RPATH_EXTRA=
+)
+AC_SUBST([RPATH_EXTRA])
+
+dnl Disable rules that do automatic regeneration of configure output by default.
+AC_ARG_ENABLE([autogen],
+ [AS_HELP_STRING([--enable-autogen], [Automatically regenerate configure output])],
+if test "x$enable_autogen" = "xno" ; then
+ enable_autogen="0"
+else
+ enable_autogen="1"
+fi
+,
+enable_autogen="0"
+)
+AC_SUBST([enable_autogen])
+
+AC_PROG_INSTALL
+AC_PROG_RANLIB
+AC_PATH_PROG([LD], [ld], [false], [$PATH])
+AC_PATH_PROG([AUTOCONF], [autoconf], [false], [$PATH])
+
+dnl Enable documentation
+AC_ARG_ENABLE([doc],
+ [AS_HELP_STRING([--enable-documentation], [Build documentation])],
+if test "x$enable_doc" = "xno" ; then
+ enable_doc="0"
+else
+ enable_doc="1"
+fi
+,
+enable_doc="1"
+)
+AC_SUBST([enable_doc])
+
+dnl Enable shared libs
+AC_ARG_ENABLE([shared],
+ [AS_HELP_STRING([--enable-shared], [Build shared libaries])],
+if test "x$enable_shared" = "xno" ; then
+ enable_shared="0"
+else
+ enable_shared="1"
+fi
+,
+enable_shared="1"
+)
+AC_SUBST([enable_shared])
+
+dnl Enable static libs
+AC_ARG_ENABLE([static],
+ [AS_HELP_STRING([--enable-static], [Build static libaries])],
+if test "x$enable_static" = "xno" ; then
+ enable_static="0"
+else
+ enable_static="1"
+fi
+,
+enable_static="1"
+)
+AC_SUBST([enable_static])
+
+if test "$enable_shared$enable_static" = "00" ; then
+ AC_MSG_ERROR([Please enable one of shared or static builds])
+fi
+
+dnl Perform no name mangling by default.
+AC_ARG_WITH([mangling],
+ [AS_HELP_STRING([--with-mangling=<map>], [Mangle symbols in <map>])],
+ [mangling_map="$with_mangling"], [mangling_map=""])
+
+dnl Do not prefix public APIs by default.
+AC_ARG_WITH([jemalloc_prefix],
+ [AS_HELP_STRING([--with-jemalloc-prefix=<prefix>], [Prefix to prepend to all public APIs])],
+ [JEMALLOC_PREFIX="$with_jemalloc_prefix"],
+ [if test "x$abi" != "xmacho" -a "x$abi" != "xpecoff"; then
+ JEMALLOC_PREFIX=""
+else
+ JEMALLOC_PREFIX="je_"
+fi]
+)
+if test "x$JEMALLOC_PREFIX" = "x" ; then
+ AC_DEFINE([JEMALLOC_IS_MALLOC])
+else
+ JEMALLOC_CPREFIX=`echo ${JEMALLOC_PREFIX} | tr "a-z" "A-Z"`
+ AC_DEFINE_UNQUOTED([JEMALLOC_PREFIX], ["$JEMALLOC_PREFIX"])
+ AC_DEFINE_UNQUOTED([JEMALLOC_CPREFIX], ["$JEMALLOC_CPREFIX"])
+fi
+AC_SUBST([JEMALLOC_PREFIX])
+AC_SUBST([JEMALLOC_CPREFIX])
+
+AC_ARG_WITH([export],
+ [AS_HELP_STRING([--without-export], [disable exporting jemalloc public APIs])],
+ [if test "x$with_export" = "xno"; then
+ AC_DEFINE([JEMALLOC_EXPORT],[])
+fi]
+)
+
+public_syms="aligned_alloc calloc dallocx free mallctl mallctlbymib mallctlnametomib malloc malloc_conf malloc_message malloc_stats_print malloc_usable_size mallocx smallocx_${jemalloc_version_gid} nallocx posix_memalign rallocx realloc sallocx sdallocx xallocx"
+dnl Check for additional platform-specific public API functions.
+AC_CHECK_FUNC([memalign],
+ [AC_DEFINE([JEMALLOC_OVERRIDE_MEMALIGN], [ ])
+ public_syms="${public_syms} memalign"])
+AC_CHECK_FUNC([valloc],
+ [AC_DEFINE([JEMALLOC_OVERRIDE_VALLOC], [ ])
+ public_syms="${public_syms} valloc"])
+
+dnl Check for allocator-related functions that should be wrapped.
+wrap_syms=
+if test "x${JEMALLOC_PREFIX}" = "x" ; then
+ AC_CHECK_FUNC([__libc_calloc],
+ [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_CALLOC], [ ])
+ wrap_syms="${wrap_syms} __libc_calloc"])
+ AC_CHECK_FUNC([__libc_free],
+ [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_FREE], [ ])
+ wrap_syms="${wrap_syms} __libc_free"])
+ AC_CHECK_FUNC([__libc_malloc],
+ [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_MALLOC], [ ])
+ wrap_syms="${wrap_syms} __libc_malloc"])
+ AC_CHECK_FUNC([__libc_memalign],
+ [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_MEMALIGN], [ ])
+ wrap_syms="${wrap_syms} __libc_memalign"])
+ AC_CHECK_FUNC([__libc_realloc],
+ [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_REALLOC], [ ])
+ wrap_syms="${wrap_syms} __libc_realloc"])
+ AC_CHECK_FUNC([__libc_valloc],
+ [AC_DEFINE([JEMALLOC_OVERRIDE___LIBC_VALLOC], [ ])
+ wrap_syms="${wrap_syms} __libc_valloc"])
+ AC_CHECK_FUNC([__posix_memalign],
+ [AC_DEFINE([JEMALLOC_OVERRIDE___POSIX_MEMALIGN], [ ])
+ wrap_syms="${wrap_syms} __posix_memalign"])
+fi
+
+case "${host}" in
+ *-*-mingw* | *-*-cygwin*)
+ wrap_syms="${wrap_syms} tls_callback"
+ ;;
+ *)
+ ;;
+esac
+
+dnl Mangle library-private APIs.
+AC_ARG_WITH([private_namespace],
+ [AS_HELP_STRING([--with-private-namespace=<prefix>], [Prefix to prepend to all library-private APIs])],
+ [JEMALLOC_PRIVATE_NAMESPACE="${with_private_namespace}je_"],
+ [JEMALLOC_PRIVATE_NAMESPACE="je_"]
+)
+AC_DEFINE_UNQUOTED([JEMALLOC_PRIVATE_NAMESPACE], [$JEMALLOC_PRIVATE_NAMESPACE])
+private_namespace="$JEMALLOC_PRIVATE_NAMESPACE"
+AC_SUBST([private_namespace])
+
+dnl Do not add suffix to installed files by default.
+AC_ARG_WITH([install_suffix],
+ [AS_HELP_STRING([--with-install-suffix=<suffix>], [Suffix to append to all installed files])],
+ [INSTALL_SUFFIX="$with_install_suffix"],
+ [INSTALL_SUFFIX=]
+)
+install_suffix="$INSTALL_SUFFIX"
+AC_SUBST([install_suffix])
+
+dnl Specify default malloc_conf.
+AC_ARG_WITH([malloc_conf],
+ [AS_HELP_STRING([--with-malloc-conf=<malloc_conf>], [config.malloc_conf options string])],
+ [JEMALLOC_CONFIG_MALLOC_CONF="$with_malloc_conf"],
+ [JEMALLOC_CONFIG_MALLOC_CONF=""]
+)
+config_malloc_conf="$JEMALLOC_CONFIG_MALLOC_CONF"
+AC_DEFINE_UNQUOTED([JEMALLOC_CONFIG_MALLOC_CONF], ["$config_malloc_conf"])
+
+dnl Substitute @je_@ in jemalloc_protos.h.in, primarily to make generation of
+dnl jemalloc_protos_jet.h easy.
+je_="je_"
+AC_SUBST([je_])
+
+cfgoutputs_in="Makefile.in"
+cfgoutputs_in="${cfgoutputs_in} jemalloc.pc.in"
+cfgoutputs_in="${cfgoutputs_in} doc/html.xsl.in"
+cfgoutputs_in="${cfgoutputs_in} doc/manpages.xsl.in"
+cfgoutputs_in="${cfgoutputs_in} doc/jemalloc.xml.in"
+cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_macros.h.in"
+cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_protos.h.in"
+cfgoutputs_in="${cfgoutputs_in} include/jemalloc/jemalloc_typedefs.h.in"
+cfgoutputs_in="${cfgoutputs_in} include/jemalloc/internal/jemalloc_preamble.h.in"
+cfgoutputs_in="${cfgoutputs_in} test/test.sh.in"
+cfgoutputs_in="${cfgoutputs_in} test/include/test/jemalloc_test.h.in"
+
+cfgoutputs_out="Makefile"
+cfgoutputs_out="${cfgoutputs_out} jemalloc.pc"
+cfgoutputs_out="${cfgoutputs_out} doc/html.xsl"
+cfgoutputs_out="${cfgoutputs_out} doc/manpages.xsl"
+cfgoutputs_out="${cfgoutputs_out} doc/jemalloc.xml"
+cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_macros.h"
+cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_protos.h"
+cfgoutputs_out="${cfgoutputs_out} include/jemalloc/jemalloc_typedefs.h"
+cfgoutputs_out="${cfgoutputs_out} include/jemalloc/internal/jemalloc_preamble.h"
+cfgoutputs_out="${cfgoutputs_out} test/test.sh"
+cfgoutputs_out="${cfgoutputs_out} test/include/test/jemalloc_test.h"
+
+cfgoutputs_tup="Makefile"
+cfgoutputs_tup="${cfgoutputs_tup} jemalloc.pc:jemalloc.pc.in"
+cfgoutputs_tup="${cfgoutputs_tup} doc/html.xsl:doc/html.xsl.in"
+cfgoutputs_tup="${cfgoutputs_tup} doc/manpages.xsl:doc/manpages.xsl.in"
+cfgoutputs_tup="${cfgoutputs_tup} doc/jemalloc.xml:doc/jemalloc.xml.in"
+cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_macros.h:include/jemalloc/jemalloc_macros.h.in"
+cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_protos.h:include/jemalloc/jemalloc_protos.h.in"
+cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/jemalloc_typedefs.h:include/jemalloc/jemalloc_typedefs.h.in"
+cfgoutputs_tup="${cfgoutputs_tup} include/jemalloc/internal/jemalloc_preamble.h"
+cfgoutputs_tup="${cfgoutputs_tup} test/test.sh:test/test.sh.in"
+cfgoutputs_tup="${cfgoutputs_tup} test/include/test/jemalloc_test.h:test/include/test/jemalloc_test.h.in"
+
+cfghdrs_in="include/jemalloc/jemalloc_defs.h.in"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/jemalloc_internal_defs.h.in"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_symbols.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/private_namespace.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_namespace.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/internal/public_unnamespace.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_rename.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc_mangle.sh"
+cfghdrs_in="${cfghdrs_in} include/jemalloc/jemalloc.sh"
+cfghdrs_in="${cfghdrs_in} test/include/test/jemalloc_test_defs.h.in"
+
+cfghdrs_out="include/jemalloc/jemalloc_defs.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc${install_suffix}.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_symbols.awk"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/private_symbols_jet.awk"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_symbols.txt"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_namespace.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/public_unnamespace.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_protos_jet.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_rename.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/jemalloc_mangle_jet.h"
+cfghdrs_out="${cfghdrs_out} include/jemalloc/internal/jemalloc_internal_defs.h"
+cfghdrs_out="${cfghdrs_out} test/include/test/jemalloc_test_defs.h"
+
+cfghdrs_tup="include/jemalloc/jemalloc_defs.h:include/jemalloc/jemalloc_defs.h.in"
+cfghdrs_tup="${cfghdrs_tup} include/jemalloc/internal/jemalloc_internal_defs.h:include/jemalloc/internal/jemalloc_internal_defs.h.in"
+cfghdrs_tup="${cfghdrs_tup} test/include/test/jemalloc_test_defs.h:test/include/test/jemalloc_test_defs.h.in"
+
+dnl ============================================================================
+dnl jemalloc build options.
+dnl
+
+dnl Do not compile with debugging by default.
+AC_ARG_ENABLE([debug],
+ [AS_HELP_STRING([--enable-debug],
+ [Build debugging code])],
+[if test "x$enable_debug" = "xno" ; then
+ enable_debug="0"
+else
+ enable_debug="1"
+fi
+],
+[enable_debug="0"]
+)
+if test "x$enable_debug" = "x1" ; then
+ AC_DEFINE([JEMALLOC_DEBUG], [ ])
+fi
+if test "x$enable_debug" = "x1" ; then
+ AC_DEFINE([JEMALLOC_DEBUG], [ ])
+fi
+AC_SUBST([enable_debug])
+
+dnl Only optimize if not debugging.
+if test "x$enable_debug" = "x0" ; then
+ if test "x$GCC" = "xyes" ; then
+ JE_CFLAGS_ADD([-O3])
+ JE_CXXFLAGS_ADD([-O3])
+ JE_CFLAGS_ADD([-funroll-loops])
+ elif test "x$je_cv_msvc" = "xyes" ; then
+ JE_CFLAGS_ADD([-O2])
+ JE_CXXFLAGS_ADD([-O2])
+ else
+ JE_CFLAGS_ADD([-O])
+ JE_CXXFLAGS_ADD([-O])
+ fi
+fi
+
+dnl Enable statistics calculation by default.
+AC_ARG_ENABLE([stats],
+ [AS_HELP_STRING([--disable-stats],
+ [Disable statistics calculation/reporting])],
+[if test "x$enable_stats" = "xno" ; then
+ enable_stats="0"
+else
+ enable_stats="1"
+fi
+],
+[enable_stats="1"]
+)
+if test "x$enable_stats" = "x1" ; then
+ AC_DEFINE([JEMALLOC_STATS], [ ])
+fi
+AC_SUBST([enable_stats])
+
+dnl Do not enable smallocx by default.
+AC_ARG_ENABLE([experimental_smallocx],
+ [AS_HELP_STRING([--enable-experimental-smallocx], [Enable experimental smallocx API])],
+[if test "x$enable_experimental_smallocx" = "xno" ; then
+enable_experimental_smallocx="0"
+else
+enable_experimental_smallocx="1"
+fi
+],
+[enable_experimental_smallocx="0"]
+)
+if test "x$enable_experimental_smallocx" = "x1" ; then
+ AC_DEFINE([JEMALLOC_EXPERIMENTAL_SMALLOCX_API])
+fi
+AC_SUBST([enable_experimental_smallocx])
+
+dnl Do not enable profiling by default.
+AC_ARG_ENABLE([prof],
+ [AS_HELP_STRING([--enable-prof], [Enable allocation profiling])],
+[if test "x$enable_prof" = "xno" ; then
+ enable_prof="0"
+else
+ enable_prof="1"
+fi
+],
+[enable_prof="0"]
+)
+if test "x$enable_prof" = "x1" ; then
+ backtrace_method=""
+else
+ backtrace_method="N/A"
+fi
+
+AC_ARG_ENABLE([prof-libunwind],
+ [AS_HELP_STRING([--enable-prof-libunwind], [Use libunwind for backtracing])],
+[if test "x$enable_prof_libunwind" = "xno" ; then
+ enable_prof_libunwind="0"
+else
+ enable_prof_libunwind="1"
+fi
+],
+[enable_prof_libunwind="0"]
+)
+AC_ARG_WITH([static_libunwind],
+ [AS_HELP_STRING([--with-static-libunwind=<libunwind.a>],
+ [Path to static libunwind library; use rather than dynamically linking])],
+if test "x$with_static_libunwind" = "xno" ; then
+ LUNWIND="-lunwind"
+else
+ if test ! -f "$with_static_libunwind" ; then
+ AC_MSG_ERROR([Static libunwind not found: $with_static_libunwind])
+ fi
+ LUNWIND="$with_static_libunwind"
+fi,
+ LUNWIND="-lunwind"
+)
+if test "x$backtrace_method" = "x" -a "x$enable_prof_libunwind" = "x1" ; then
+ AC_CHECK_HEADERS([libunwind.h], , [enable_prof_libunwind="0"])
+ if test "x$LUNWIND" = "x-lunwind" ; then
+ AC_CHECK_LIB([unwind], [unw_backtrace], [JE_APPEND_VS(LIBS, $LUNWIND)],
+ [enable_prof_libunwind="0"])
+ else
+ JE_APPEND_VS(LIBS, $LUNWIND)
+ fi
+ if test "x${enable_prof_libunwind}" = "x1" ; then
+ backtrace_method="libunwind"
+ AC_DEFINE([JEMALLOC_PROF_LIBUNWIND], [ ])
+ fi
+fi
+
+AC_ARG_ENABLE([prof-libgcc],
+ [AS_HELP_STRING([--disable-prof-libgcc],
+ [Do not use libgcc for backtracing])],
+[if test "x$enable_prof_libgcc" = "xno" ; then
+ enable_prof_libgcc="0"
+else
+ enable_prof_libgcc="1"
+fi
+],
+[enable_prof_libgcc="1"]
+)
+if test "x$backtrace_method" = "x" -a "x$enable_prof_libgcc" = "x1" \
+ -a "x$GCC" = "xyes" ; then
+ AC_CHECK_HEADERS([unwind.h], , [enable_prof_libgcc="0"])
+ if test "x${enable_prof_libgcc}" = "x1" ; then
+ AC_CHECK_LIB([gcc], [_Unwind_Backtrace], [JE_APPEND_VS(LIBS, -lgcc)], [enable_prof_libgcc="0"])
+ fi
+ if test "x${enable_prof_libgcc}" = "x1" ; then
+ backtrace_method="libgcc"
+ AC_DEFINE([JEMALLOC_PROF_LIBGCC], [ ])
+ fi
+else
+ enable_prof_libgcc="0"
+fi
+
+AC_ARG_ENABLE([prof-gcc],
+ [AS_HELP_STRING([--disable-prof-gcc],
+ [Do not use gcc intrinsics for backtracing])],
+[if test "x$enable_prof_gcc" = "xno" ; then
+ enable_prof_gcc="0"
+else
+ enable_prof_gcc="1"
+fi
+],
+[enable_prof_gcc="1"]
+)
+if test "x$backtrace_method" = "x" -a "x$enable_prof_gcc" = "x1" \
+ -a "x$GCC" = "xyes" ; then
+ JE_CFLAGS_ADD([-fno-omit-frame-pointer])
+ backtrace_method="gcc intrinsics"
+ AC_DEFINE([JEMALLOC_PROF_GCC], [ ])
+else
+ enable_prof_gcc="0"
+fi
+
+if test "x$backtrace_method" = "x" ; then
+ backtrace_method="none (disabling profiling)"
+ enable_prof="0"
+fi
+AC_MSG_CHECKING([configured backtracing method])
+AC_MSG_RESULT([$backtrace_method])
+if test "x$enable_prof" = "x1" ; then
+ dnl Heap profiling uses the log(3) function.
+ JE_APPEND_VS(LIBS, $LM)
+
+ AC_DEFINE([JEMALLOC_PROF], [ ])
+fi
+AC_SUBST([enable_prof])
+
+dnl Indicate whether adjacent virtual memory mappings automatically coalesce
+dnl (and fragment on demand).
+if test "x${maps_coalesce}" = "x1" ; then
+ AC_DEFINE([JEMALLOC_MAPS_COALESCE], [ ])
+fi
+
+dnl Indicate whether to retain memory (rather than using munmap()) by default.
+if test "x$default_retain" = "x1" ; then
+ AC_DEFINE([JEMALLOC_RETAIN], [ ])
+fi
+
+dnl Enable allocation from DSS if supported by the OS.
+have_dss="1"
+dnl Check whether the BSD/SUSv1 sbrk() exists. If not, disable DSS support.
+AC_CHECK_FUNC([sbrk], [have_sbrk="1"], [have_sbrk="0"])
+if test "x$have_sbrk" = "x1" ; then
+ if test "x$sbrk_deprecated" = "x1" ; then
+ AC_MSG_RESULT([Disabling dss allocation because sbrk is deprecated])
+ have_dss="0"
+ fi
+else
+ have_dss="0"
+fi
+
+if test "x$have_dss" = "x1" ; then
+ AC_DEFINE([JEMALLOC_DSS], [ ])
+fi
+
+dnl Support the junk/zero filling option by default.
+AC_ARG_ENABLE([fill],
+ [AS_HELP_STRING([--disable-fill], [Disable support for junk/zero filling])],
+[if test "x$enable_fill" = "xno" ; then
+ enable_fill="0"
+else
+ enable_fill="1"
+fi
+],
+[enable_fill="1"]
+)
+if test "x$enable_fill" = "x1" ; then
+ AC_DEFINE([JEMALLOC_FILL], [ ])
+fi
+AC_SUBST([enable_fill])
+
+dnl Disable utrace(2)-based tracing by default.
+AC_ARG_ENABLE([utrace],
+ [AS_HELP_STRING([--enable-utrace], [Enable utrace(2)-based tracing])],
+[if test "x$enable_utrace" = "xno" ; then
+ enable_utrace="0"
+else
+ enable_utrace="1"
+fi
+],
+[enable_utrace="0"]
+)
+JE_COMPILABLE([utrace(2)], [
+#include <sys/types.h>
+#include <sys/param.h>
+#include <sys/time.h>
+#include <sys/uio.h>
+#include <sys/ktrace.h>
+], [
+ utrace((void *)0, 0);
+], [je_cv_utrace])
+if test "x${je_cv_utrace}" = "xno" ; then
+ enable_utrace="0"
+fi
+if test "x$enable_utrace" = "x1" ; then
+ AC_DEFINE([JEMALLOC_UTRACE], [ ])
+fi
+AC_SUBST([enable_utrace])
+
+dnl Do not support the xmalloc option by default.
+AC_ARG_ENABLE([xmalloc],
+ [AS_HELP_STRING([--enable-xmalloc], [Support xmalloc option])],
+[if test "x$enable_xmalloc" = "xno" ; then
+ enable_xmalloc="0"
+else
+ enable_xmalloc="1"
+fi
+],
+[enable_xmalloc="0"]
+)
+if test "x$enable_xmalloc" = "x1" ; then
+ AC_DEFINE([JEMALLOC_XMALLOC], [ ])
+fi
+AC_SUBST([enable_xmalloc])
+
+dnl Support cache-oblivious allocation alignment by default.
+AC_ARG_ENABLE([cache-oblivious],
+ [AS_HELP_STRING([--disable-cache-oblivious],
+ [Disable support for cache-oblivious allocation alignment])],
+[if test "x$enable_cache_oblivious" = "xno" ; then
+ enable_cache_oblivious="0"
+else
+ enable_cache_oblivious="1"
+fi
+],
+[enable_cache_oblivious="1"]
+)
+if test "x$enable_cache_oblivious" = "x1" ; then
+ AC_DEFINE([JEMALLOC_CACHE_OBLIVIOUS], [ ])
+fi
+AC_SUBST([enable_cache_oblivious])
+
+dnl Do not log by default.
+AC_ARG_ENABLE([log],
+ [AS_HELP_STRING([--enable-log], [Support debug logging])],
+[if test "x$enable_log" = "xno" ; then
+ enable_log="0"
+else
+ enable_log="1"
+fi
+],
+[enable_log="0"]
+)
+if test "x$enable_log" = "x1" ; then
+ AC_DEFINE([JEMALLOC_LOG], [ ])
+fi
+AC_SUBST([enable_log])
+
+dnl Do not use readlinkat by default
+AC_ARG_ENABLE([readlinkat],
+ [AS_HELP_STRING([--enable-readlinkat], [Use readlinkat over readlink])],
+[if test "x$enable_readlinkat" = "xno" ; then
+ enable_readlinkat="0"
+else
+ enable_readlinkat="1"
+fi
+],
+[enable_readlinkat="0"]
+)
+if test "x$enable_readlinkat" = "x1" ; then
+ AC_DEFINE([JEMALLOC_READLINKAT], [ ])
+fi
+AC_SUBST([enable_readlinkat])
+
+dnl Avoid extra safety checks by default
+AC_ARG_ENABLE([opt-safety-checks],
+ [AS_HELP_STRING([--enable-opt-safety-checks],
+ [Perform certain low-overhead checks, even in opt mode])],
+[if test "x$enable_opt_safety_checks" = "xno" ; then
+ enable_opt_safety_checks="0"
+else
+ enable_opt_safety_checks="1"
+fi
+],
+[enable_opt_safety_checks="0"]
+)
+if test "x$enable_opt_safety_checks" = "x1" ; then
+ AC_DEFINE([JEMALLOC_OPT_SAFETY_CHECKS], [ ])
+fi
+AC_SUBST([enable_opt_safety_checks])
+
+JE_COMPILABLE([a program using __builtin_unreachable], [
+void foo (void) {
+ __builtin_unreachable();
+}
+], [
+ {
+ foo();
+ }
+], [je_cv_gcc_builtin_unreachable])
+if test "x${je_cv_gcc_builtin_unreachable}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_INTERNAL_UNREACHABLE], [__builtin_unreachable])
+else
+ AC_DEFINE([JEMALLOC_INTERNAL_UNREACHABLE], [abort])
+fi
+
+dnl ============================================================================
+dnl Check for __builtin_ffsl(), then ffsl(3), and fail if neither are found.
+dnl One of those two functions should (theoretically) exist on all platforms
+dnl that jemalloc currently has a chance of functioning on without modification.
+dnl We additionally assume ffs[ll]() or __builtin_ffs[ll]() are defined if
+dnl ffsl() or __builtin_ffsl() are defined, respectively.
+JE_COMPILABLE([a program using __builtin_ffsl], [
+#include <stdio.h>
+#include <strings.h>
+#include <string.h>
+], [
+ {
+ int rv = __builtin_ffsl(0x08);
+ printf("%d\n", rv);
+ }
+], [je_cv_gcc_builtin_ffsl])
+if test "x${je_cv_gcc_builtin_ffsl}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_INTERNAL_FFSLL], [__builtin_ffsll])
+ AC_DEFINE([JEMALLOC_INTERNAL_FFSL], [__builtin_ffsl])
+ AC_DEFINE([JEMALLOC_INTERNAL_FFS], [__builtin_ffs])
+else
+ JE_COMPILABLE([a program using ffsl], [
+ #include <stdio.h>
+ #include <strings.h>
+ #include <string.h>
+ ], [
+ {
+ int rv = ffsl(0x08);
+ printf("%d\n", rv);
+ }
+ ], [je_cv_function_ffsl])
+ if test "x${je_cv_function_ffsl}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_INTERNAL_FFSLL], [ffsll])
+ AC_DEFINE([JEMALLOC_INTERNAL_FFSL], [ffsl])
+ AC_DEFINE([JEMALLOC_INTERNAL_FFS], [ffs])
+ else
+ AC_MSG_ERROR([Cannot build without ffsl(3) or __builtin_ffsl()])
+ fi
+fi
+
+JE_COMPILABLE([a program using __builtin_popcountl], [
+#include <stdio.h>
+#include <strings.h>
+#include <string.h>
+], [
+ {
+ int rv = __builtin_popcountl(0x08);
+ printf("%d\n", rv);
+ }
+], [je_cv_gcc_builtin_popcountl])
+if test "x${je_cv_gcc_builtin_popcountl}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_INTERNAL_POPCOUNT], [__builtin_popcount])
+ AC_DEFINE([JEMALLOC_INTERNAL_POPCOUNTL], [__builtin_popcountl])
+fi
+
+AC_ARG_WITH([lg_quantum],
+ [AS_HELP_STRING([--with-lg-quantum=<lg-quantum>],
+ [Base 2 log of minimum allocation alignment])],
+ [LG_QUANTA="$with_lg_quantum"],
+ [LG_QUANTA="3 4"])
+if test "x$with_lg_quantum" != "x" ; then
+ AC_DEFINE_UNQUOTED([LG_QUANTUM], [$with_lg_quantum])
+fi
+
+AC_ARG_WITH([lg_page],
+ [AS_HELP_STRING([--with-lg-page=<lg-page>], [Base 2 log of system page size])],
+ [LG_PAGE="$with_lg_page"], [LG_PAGE="detect"])
+if test "x$LG_PAGE" = "xdetect"; then
+ AC_CACHE_CHECK([LG_PAGE],
+ [je_cv_lg_page],
+ AC_RUN_IFELSE([AC_LANG_PROGRAM(
+[[
+#include <strings.h>
+#ifdef _WIN32
+#include <windows.h>
+#else
+#include <unistd.h>
+#endif
+#include <stdio.h>
+]],
+[[
+ int result;
+ FILE *f;
+
+#ifdef _WIN32
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ result = si.dwPageSize;
+#else
+ result = sysconf(_SC_PAGESIZE);
+#endif
+ if (result == -1) {
+ return 1;
+ }
+ result = JEMALLOC_INTERNAL_FFSL(result) - 1;
+
+ f = fopen("conftest.out", "w");
+ if (f == NULL) {
+ return 1;
+ }
+ fprintf(f, "%d", result);
+ fclose(f);
+
+ return 0;
+]])],
+ [je_cv_lg_page=`cat conftest.out`],
+ [je_cv_lg_page=undefined],
+ [je_cv_lg_page=12]))
+fi
+if test "x${je_cv_lg_page}" != "x" ; then
+ LG_PAGE="${je_cv_lg_page}"
+fi
+if test "x${LG_PAGE}" != "xundefined" ; then
+ AC_DEFINE_UNQUOTED([LG_PAGE], [$LG_PAGE])
+else
+ AC_MSG_ERROR([cannot determine value for LG_PAGE])
+fi
+
+AC_ARG_WITH([lg_hugepage],
+ [AS_HELP_STRING([--with-lg-hugepage=<lg-hugepage>],
+ [Base 2 log of system huge page size])],
+ [je_cv_lg_hugepage="${with_lg_hugepage}"],
+ [je_cv_lg_hugepage=""])
+if test "x${je_cv_lg_hugepage}" = "x" ; then
+ dnl Look in /proc/meminfo (Linux-specific) for information on the default huge
+ dnl page size, if any. The relevant line looks like:
+ dnl
+ dnl Hugepagesize: 2048 kB
+ if test -e "/proc/meminfo" ; then
+ hpsk=[`cat /proc/meminfo 2>/dev/null | \
+ grep -e '^Hugepagesize:[[:space:]]\+[0-9]\+[[:space:]]kB$' | \
+ awk '{print $2}'`]
+ if test "x${hpsk}" != "x" ; then
+ je_cv_lg_hugepage=10
+ while test "${hpsk}" -gt 1 ; do
+ hpsk="$((hpsk / 2))"
+ je_cv_lg_hugepage="$((je_cv_lg_hugepage + 1))"
+ done
+ fi
+ fi
+
+ dnl Set default if unable to automatically configure.
+ if test "x${je_cv_lg_hugepage}" = "x" ; then
+ je_cv_lg_hugepage=21
+ fi
+fi
+if test "x${LG_PAGE}" != "xundefined" -a \
+ "${je_cv_lg_hugepage}" -lt "${LG_PAGE}" ; then
+ AC_MSG_ERROR([Huge page size (2^${je_cv_lg_hugepage}) must be at least page size (2^${LG_PAGE})])
+fi
+AC_DEFINE_UNQUOTED([LG_HUGEPAGE], [${je_cv_lg_hugepage}])
+
+dnl ============================================================================
+dnl Enable libdl by default.
+AC_ARG_ENABLE([libdl],
+ [AS_HELP_STRING([--disable-libdl],
+ [Do not use libdl])],
+[if test "x$enable_libdl" = "xno" ; then
+ enable_libdl="0"
+else
+ enable_libdl="1"
+fi
+],
+[enable_libdl="1"]
+)
+AC_SUBST([libdl])
+
+dnl ============================================================================
+dnl Configure pthreads.
+
+if test "x$abi" != "xpecoff" ; then
+ AC_DEFINE([JEMALLOC_HAVE_PTHREAD], [ ])
+ AC_CHECK_HEADERS([pthread.h], , [AC_MSG_ERROR([pthread.h is missing])])
+ dnl Some systems may embed pthreads functionality in libc; check for libpthread
+ dnl first, but try libc too before failing.
+ AC_CHECK_LIB([pthread], [pthread_create], [JE_APPEND_VS(LIBS, -pthread)],
+ [AC_SEARCH_LIBS([pthread_create], , ,
+ AC_MSG_ERROR([libpthread is missing]))])
+ wrap_syms="${wrap_syms} pthread_create"
+ have_pthread="1"
+
+dnl Check if we have dlsym support.
+ if test "x$enable_libdl" = "x1" ; then
+ have_dlsym="1"
+ AC_CHECK_HEADERS([dlfcn.h],
+ AC_CHECK_FUNC([dlsym], [],
+ [AC_CHECK_LIB([dl], [dlsym], [LIBS="$LIBS -ldl"], [have_dlsym="0"])]),
+ [have_dlsym="0"])
+ if test "x$have_dlsym" = "x1" ; then
+ AC_DEFINE([JEMALLOC_HAVE_DLSYM], [ ])
+ fi
+ else
+ have_dlsym="0"
+ fi
+
+ JE_COMPILABLE([pthread_atfork(3)], [
+#include <pthread.h>
+], [
+ pthread_atfork((void *)0, (void *)0, (void *)0);
+], [je_cv_pthread_atfork])
+ if test "x${je_cv_pthread_atfork}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_PTHREAD_ATFORK], [ ])
+ fi
+ dnl Check if pthread_setname_np is available with the expected API.
+ JE_COMPILABLE([pthread_setname_np(3)], [
+#include <pthread.h>
+], [
+ pthread_setname_np(pthread_self(), "setname_test");
+], [je_cv_pthread_setname_np])
+ if test "x${je_cv_pthread_setname_np}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_PTHREAD_SETNAME_NP], [ ])
+ fi
+fi
+
+JE_APPEND_VS(CPPFLAGS, -D_REENTRANT)
+
+dnl Check whether clock_gettime(2) is in libc or librt.
+AC_SEARCH_LIBS([clock_gettime], [rt])
+
+dnl Cray wrapper compiler often adds `-lrt` when using `-static`. Check with
+dnl `-dynamic` as well in case a user tries to dynamically link in jemalloc
+if test "x$je_cv_cray_prgenv_wrapper" = "xyes" ; then
+ if test "$ac_cv_search_clock_gettime" != "-lrt"; then
+ JE_CFLAGS_SAVE()
+
+ unset ac_cv_search_clock_gettime
+ JE_CFLAGS_ADD([-dynamic])
+ AC_SEARCH_LIBS([clock_gettime], [rt])
+
+ JE_CFLAGS_RESTORE()
+ fi
+fi
+
+dnl check for CLOCK_MONOTONIC_COARSE (Linux-specific).
+JE_COMPILABLE([clock_gettime(CLOCK_MONOTONIC_COARSE, ...)], [
+#include <time.h>
+], [
+ struct timespec ts;
+
+ clock_gettime(CLOCK_MONOTONIC_COARSE, &ts);
+], [je_cv_clock_monotonic_coarse])
+if test "x${je_cv_clock_monotonic_coarse}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE])
+fi
+
+dnl check for CLOCK_MONOTONIC.
+JE_COMPILABLE([clock_gettime(CLOCK_MONOTONIC, ...)], [
+#include <unistd.h>
+#include <time.h>
+], [
+ struct timespec ts;
+
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+#if !defined(_POSIX_MONOTONIC_CLOCK) || _POSIX_MONOTONIC_CLOCK < 0
+# error _POSIX_MONOTONIC_CLOCK missing/invalid
+#endif
+], [je_cv_clock_monotonic])
+if test "x${je_cv_clock_monotonic}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_CLOCK_MONOTONIC])
+fi
+
+dnl Check for mach_absolute_time().
+JE_COMPILABLE([mach_absolute_time()], [
+#include <mach/mach_time.h>
+], [
+ mach_absolute_time();
+], [je_cv_mach_absolute_time])
+if test "x${je_cv_mach_absolute_time}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_MACH_ABSOLUTE_TIME])
+fi
+
+dnl Use syscall(2) (if available) by default.
+AC_ARG_ENABLE([syscall],
+ [AS_HELP_STRING([--disable-syscall], [Disable use of syscall(2)])],
+[if test "x$enable_syscall" = "xno" ; then
+ enable_syscall="0"
+else
+ enable_syscall="1"
+fi
+],
+[enable_syscall="1"]
+)
+if test "x$enable_syscall" = "x1" ; then
+ dnl Check if syscall(2) is usable. Treat warnings as errors, so that e.g. OS
+ dnl X 10.12's deprecation warning prevents use.
+ JE_CFLAGS_SAVE()
+ JE_CFLAGS_ADD([-Werror])
+ JE_COMPILABLE([syscall(2)], [
+#include <sys/syscall.h>
+#include <unistd.h>
+], [
+ syscall(SYS_write, 2, "hello", 5);
+],
+ [je_cv_syscall])
+ JE_CFLAGS_RESTORE()
+ if test "x$je_cv_syscall" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_USE_SYSCALL], [ ])
+ fi
+fi
+
+dnl Check if the GNU-specific secure_getenv function exists.
+AC_CHECK_FUNC([secure_getenv],
+ [have_secure_getenv="1"],
+ [have_secure_getenv="0"]
+ )
+if test "x$have_secure_getenv" = "x1" ; then
+ AC_DEFINE([JEMALLOC_HAVE_SECURE_GETENV], [ ])
+fi
+
+dnl Check if the GNU-specific sched_getcpu function exists.
+AC_CHECK_FUNC([sched_getcpu],
+ [have_sched_getcpu="1"],
+ [have_sched_getcpu="0"]
+ )
+if test "x$have_sched_getcpu" = "x1" ; then
+ AC_DEFINE([JEMALLOC_HAVE_SCHED_GETCPU], [ ])
+fi
+
+dnl Check if the GNU-specific sched_setaffinity function exists.
+AC_CHECK_FUNC([sched_setaffinity],
+ [have_sched_setaffinity="1"],
+ [have_sched_setaffinity="0"]
+ )
+if test "x$have_sched_setaffinity" = "x1" ; then
+ AC_DEFINE([JEMALLOC_HAVE_SCHED_SETAFFINITY], [ ])
+fi
+
+dnl Check if the Solaris/BSD issetugid function exists.
+AC_CHECK_FUNC([issetugid],
+ [have_issetugid="1"],
+ [have_issetugid="0"]
+ )
+if test "x$have_issetugid" = "x1" ; then
+ AC_DEFINE([JEMALLOC_HAVE_ISSETUGID], [ ])
+fi
+
+dnl Check whether the BSD-specific _malloc_thread_cleanup() exists. If so, use
+dnl it rather than pthreads TSD cleanup functions to support cleanup during
+dnl thread exit, in order to avoid pthreads library recursion during
+dnl bootstrapping.
+AC_CHECK_FUNC([_malloc_thread_cleanup],
+ [have__malloc_thread_cleanup="1"],
+ [have__malloc_thread_cleanup="0"]
+ )
+if test "x$have__malloc_thread_cleanup" = "x1" ; then
+ AC_DEFINE([JEMALLOC_MALLOC_THREAD_CLEANUP], [ ])
+ wrap_syms="${wrap_syms} _malloc_thread_cleanup"
+ force_tls="1"
+fi
+
+dnl Check whether the BSD-specific _pthread_mutex_init_calloc_cb() exists. If
+dnl so, mutex initialization causes allocation, and we need to implement this
+dnl callback function in order to prevent recursive allocation.
+AC_CHECK_FUNC([_pthread_mutex_init_calloc_cb],
+ [have__pthread_mutex_init_calloc_cb="1"],
+ [have__pthread_mutex_init_calloc_cb="0"]
+ )
+if test "x$have__pthread_mutex_init_calloc_cb" = "x1" ; then
+ AC_DEFINE([JEMALLOC_MUTEX_INIT_CB])
+ wrap_syms="${wrap_syms} _malloc_prefork _malloc_postfork"
+fi
+
+dnl Disable lazy locking by default.
+AC_ARG_ENABLE([lazy_lock],
+ [AS_HELP_STRING([--enable-lazy-lock],
+ [Enable lazy locking (only lock when multi-threaded)])],
+[if test "x$enable_lazy_lock" = "xno" ; then
+ enable_lazy_lock="0"
+else
+ enable_lazy_lock="1"
+fi
+],
+[enable_lazy_lock=""]
+)
+if test "x${enable_lazy_lock}" = "x" ; then
+ if test "x${force_lazy_lock}" = "x1" ; then
+ AC_MSG_RESULT([Forcing lazy-lock to avoid allocator/threading bootstrap issues])
+ enable_lazy_lock="1"
+ else
+ enable_lazy_lock="0"
+ fi
+fi
+if test "x${enable_lazy_lock}" = "x1" -a "x${abi}" = "xpecoff" ; then
+ AC_MSG_RESULT([Forcing no lazy-lock because thread creation monitoring is unimplemented])
+ enable_lazy_lock="0"
+fi
+if test "x$enable_lazy_lock" = "x1" ; then
+ if test "x$have_dlsym" = "x1" ; then
+ AC_DEFINE([JEMALLOC_LAZY_LOCK], [ ])
+ else
+ AC_MSG_ERROR([Missing dlsym support: lazy-lock cannot be enabled.])
+ fi
+fi
+AC_SUBST([enable_lazy_lock])
+
+dnl Automatically configure TLS.
+if test "x${force_tls}" = "x1" ; then
+ enable_tls="1"
+elif test "x${force_tls}" = "x0" ; then
+ enable_tls="0"
+else
+ enable_tls="1"
+fi
+if test "x${enable_tls}" = "x1" ; then
+AC_MSG_CHECKING([for TLS])
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM(
+[[
+ __thread int x;
+]], [[
+ x = 42;
+
+ return 0;
+]])],
+ AC_MSG_RESULT([yes]),
+ AC_MSG_RESULT([no])
+ enable_tls="0")
+else
+ enable_tls="0"
+fi
+AC_SUBST([enable_tls])
+if test "x${enable_tls}" = "x1" ; then
+ AC_DEFINE_UNQUOTED([JEMALLOC_TLS], [ ])
+fi
+
+dnl ============================================================================
+dnl Check for C11 atomics.
+
+JE_COMPILABLE([C11 atomics], [
+#include <stdint.h>
+#if (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__)
+#include <stdatomic.h>
+#else
+#error Atomics not available
+#endif
+], [
+ uint64_t *p = (uint64_t *)0;
+ uint64_t x = 1;
+ volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p;
+ uint64_t r = atomic_fetch_add(a, x) + x;
+ return r == 0;
+], [je_cv_c11_atomics])
+if test "x${je_cv_c11_atomics}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_C11_ATOMICS])
+fi
+
+dnl ============================================================================
+dnl Check for GCC-style __atomic atomics.
+
+JE_COMPILABLE([GCC __atomic atomics], [
+], [
+ int x = 0;
+ int val = 1;
+ int y = __atomic_fetch_add(&x, val, __ATOMIC_RELAXED);
+ int after_add = x;
+ return after_add == 1;
+], [je_cv_gcc_atomic_atomics])
+if test "x${je_cv_gcc_atomic_atomics}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_GCC_ATOMIC_ATOMICS])
+
+ dnl check for 8-bit atomic support
+ JE_COMPILABLE([GCC 8-bit __atomic atomics], [
+ ], [
+ unsigned char x = 0;
+ int val = 1;
+ int y = __atomic_fetch_add(&x, val, __ATOMIC_RELAXED);
+ int after_add = (int)x;
+ return after_add == 1;
+ ], [je_cv_gcc_u8_atomic_atomics])
+ if test "x${je_cv_gcc_u8_atomic_atomics}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_GCC_U8_ATOMIC_ATOMICS])
+ fi
+fi
+
+dnl ============================================================================
+dnl Check for GCC-style __sync atomics.
+
+JE_COMPILABLE([GCC __sync atomics], [
+], [
+ int x = 0;
+ int before_add = __sync_fetch_and_add(&x, 1);
+ int after_add = x;
+ return (before_add == 0) && (after_add == 1);
+], [je_cv_gcc_sync_atomics])
+if test "x${je_cv_gcc_sync_atomics}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_GCC_SYNC_ATOMICS])
+
+ dnl check for 8-bit atomic support
+ JE_COMPILABLE([GCC 8-bit __sync atomics], [
+ ], [
+ unsigned char x = 0;
+ int before_add = __sync_fetch_and_add(&x, 1);
+ int after_add = (int)x;
+ return (before_add == 0) && (after_add == 1);
+ ], [je_cv_gcc_u8_sync_atomics])
+ if test "x${je_cv_gcc_u8_sync_atomics}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_GCC_U8_SYNC_ATOMICS])
+ fi
+fi
+
+dnl ============================================================================
+dnl Check for atomic(3) operations as provided on Darwin.
+dnl We need this not for the atomic operations (which are provided above), but
+dnl rather for the OS_unfair_lock type it exposes.
+
+JE_COMPILABLE([Darwin OSAtomic*()], [
+#include <libkern/OSAtomic.h>
+#include <inttypes.h>
+], [
+ {
+ int32_t x32 = 0;
+ volatile int32_t *x32p = &x32;
+ OSAtomicAdd32(1, x32p);
+ }
+ {
+ int64_t x64 = 0;
+ volatile int64_t *x64p = &x64;
+ OSAtomicAdd64(1, x64p);
+ }
+], [je_cv_osatomic])
+if test "x${je_cv_osatomic}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_OSATOMIC], [ ])
+fi
+
+dnl ============================================================================
+dnl Check for madvise(2).
+
+JE_COMPILABLE([madvise(2)], [
+#include <sys/mman.h>
+], [
+ madvise((void *)0, 0, 0);
+], [je_cv_madvise])
+if test "x${je_cv_madvise}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_MADVISE], [ ])
+
+ dnl Check for madvise(..., MADV_FREE).
+ JE_COMPILABLE([madvise(..., MADV_FREE)], [
+#include <sys/mman.h>
+], [
+ madvise((void *)0, 0, MADV_FREE);
+], [je_cv_madv_free])
+ if test "x${je_cv_madv_free}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
+ elif test "x${je_cv_madvise}" = "xyes" ; then
+ case "${host_cpu}" in i686|x86_64)
+ case "${host}" in *-*-linux*)
+ AC_DEFINE([JEMALLOC_PURGE_MADVISE_FREE], [ ])
+ AC_DEFINE([JEMALLOC_DEFINE_MADVISE_FREE], [ ])
+ ;;
+ esac
+ ;;
+ esac
+ fi
+
+ dnl Check for madvise(..., MADV_DONTNEED).
+ JE_COMPILABLE([madvise(..., MADV_DONTNEED)], [
+#include <sys/mman.h>
+], [
+ madvise((void *)0, 0, MADV_DONTNEED);
+], [je_cv_madv_dontneed])
+ if test "x${je_cv_madv_dontneed}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_PURGE_MADVISE_DONTNEED], [ ])
+ fi
+
+ dnl Check for madvise(..., MADV_DO[NT]DUMP).
+ JE_COMPILABLE([madvise(..., MADV_DO[[NT]]DUMP)], [
+#include <sys/mman.h>
+], [
+ madvise((void *)0, 0, MADV_DONTDUMP);
+ madvise((void *)0, 0, MADV_DODUMP);
+], [je_cv_madv_dontdump])
+ if test "x${je_cv_madv_dontdump}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_MADVISE_DONTDUMP], [ ])
+ fi
+
+ dnl Check for madvise(..., MADV_[NO]HUGEPAGE).
+ JE_COMPILABLE([madvise(..., MADV_[[NO]]HUGEPAGE)], [
+#include <sys/mman.h>
+], [
+ madvise((void *)0, 0, MADV_HUGEPAGE);
+ madvise((void *)0, 0, MADV_NOHUGEPAGE);
+], [je_cv_thp])
+case "${host_cpu}" in
+ arm*)
+ ;;
+ *)
+ if test "x${je_cv_thp}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_MADVISE_HUGE], [ ])
+ fi
+ ;;
+esac
+fi
+
+dnl ============================================================================
+dnl Check for __builtin_clz() and __builtin_clzl().
+
+AC_CACHE_CHECK([for __builtin_clz],
+ [je_cv_builtin_clz],
+ [AC_LINK_IFELSE([AC_LANG_PROGRAM([],
+ [
+ {
+ unsigned x = 0;
+ int y = __builtin_clz(x);
+ }
+ {
+ unsigned long x = 0;
+ int y = __builtin_clzl(x);
+ }
+ ])],
+ [je_cv_builtin_clz=yes],
+ [je_cv_builtin_clz=no])])
+
+if test "x${je_cv_builtin_clz}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_BUILTIN_CLZ], [ ])
+fi
+
+dnl ============================================================================
+dnl Check for os_unfair_lock operations as provided on Darwin.
+
+JE_COMPILABLE([Darwin os_unfair_lock_*()], [
+#include <os/lock.h>
+#include <AvailabilityMacros.h>
+], [
+ #if MAC_OS_X_VERSION_MIN_REQUIRED < 101200
+ #error "os_unfair_lock is not supported"
+ #else
+ os_unfair_lock lock = OS_UNFAIR_LOCK_INIT;
+ os_unfair_lock_lock(&lock);
+ os_unfair_lock_unlock(&lock);
+ #endif
+], [je_cv_os_unfair_lock])
+if test "x${je_cv_os_unfair_lock}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_OS_UNFAIR_LOCK], [ ])
+fi
+
+dnl ============================================================================
+dnl Darwin-related configuration.
+
+AC_ARG_ENABLE([zone-allocator],
+ [AS_HELP_STRING([--disable-zone-allocator],
+ [Disable zone allocator for Darwin])],
+[if test "x$enable_zone_allocator" = "xno" ; then
+ enable_zone_allocator="0"
+else
+ enable_zone_allocator="1"
+fi
+],
+[if test "x${abi}" = "xmacho"; then
+ enable_zone_allocator="1"
+fi
+]
+)
+AC_SUBST([enable_zone_allocator])
+
+if test "x${enable_zone_allocator}" = "x1" ; then
+ if test "x${abi}" != "xmacho"; then
+ AC_MSG_ERROR([--enable-zone-allocator is only supported on Darwin])
+ fi
+ AC_DEFINE([JEMALLOC_ZONE], [ ])
+fi
+
+dnl ============================================================================
+dnl Use initial-exec TLS by default.
+AC_ARG_ENABLE([initial-exec-tls],
+ [AS_HELP_STRING([--disable-initial-exec-tls],
+ [Disable the initial-exec tls model])],
+[if test "x$enable_initial_exec_tls" = "xno" ; then
+ enable_initial_exec_tls="0"
+else
+ enable_initial_exec_tls="1"
+fi
+],
+[enable_initial_exec_tls="1"]
+)
+AC_SUBST([enable_initial_exec_tls])
+
+if test "x${je_cv_tls_model}" = "xyes" -a \
+ "x${enable_initial_exec_tls}" = "x1" ; then
+ AC_DEFINE([JEMALLOC_TLS_MODEL],
+ [__attribute__((tls_model("initial-exec")))])
+else
+ AC_DEFINE([JEMALLOC_TLS_MODEL], [ ])
+fi
+
+dnl ============================================================================
+dnl Enable background threads if possible.
+
+if test "x${have_pthread}" = "x1" -a "x${je_cv_os_unfair_lock}" != "xyes" ; then
+ AC_DEFINE([JEMALLOC_BACKGROUND_THREAD])
+fi
+
+dnl ============================================================================
+dnl Check for glibc malloc hooks
+
+JE_COMPILABLE([glibc malloc hook], [
+#include <stddef.h>
+
+extern void (* __free_hook)(void *ptr);
+extern void *(* __malloc_hook)(size_t size);
+extern void *(* __realloc_hook)(void *ptr, size_t size);
+], [
+ void *ptr = 0L;
+ if (__malloc_hook) ptr = __malloc_hook(1);
+ if (__realloc_hook) ptr = __realloc_hook(ptr, 2);
+ if (__free_hook && ptr) __free_hook(ptr);
+], [je_cv_glibc_malloc_hook])
+if test "x${je_cv_glibc_malloc_hook}" = "xyes" ; then
+ if test "x${JEMALLOC_PREFIX}" = "x" ; then
+ AC_DEFINE([JEMALLOC_GLIBC_MALLOC_HOOK], [ ])
+ wrap_syms="${wrap_syms} __free_hook __malloc_hook __realloc_hook"
+ fi
+fi
+
+JE_COMPILABLE([glibc memalign hook], [
+#include <stddef.h>
+
+extern void *(* __memalign_hook)(size_t alignment, size_t size);
+], [
+ void *ptr = 0L;
+ if (__memalign_hook) ptr = __memalign_hook(16, 7);
+], [je_cv_glibc_memalign_hook])
+if test "x${je_cv_glibc_memalign_hook}" = "xyes" ; then
+ if test "x${JEMALLOC_PREFIX}" = "x" ; then
+ AC_DEFINE([JEMALLOC_GLIBC_MEMALIGN_HOOK], [ ])
+ wrap_syms="${wrap_syms} __memalign_hook"
+ fi
+fi
+
+JE_COMPILABLE([pthreads adaptive mutexes], [
+#include <pthread.h>
+], [
+ pthread_mutexattr_t attr;
+ pthread_mutexattr_init(&attr);
+ pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ADAPTIVE_NP);
+ pthread_mutexattr_destroy(&attr);
+], [je_cv_pthread_mutex_adaptive_np])
+if test "x${je_cv_pthread_mutex_adaptive_np}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP], [ ])
+fi
+
+JE_CFLAGS_SAVE()
+JE_CFLAGS_ADD([-D_GNU_SOURCE])
+JE_CFLAGS_ADD([-Werror])
+JE_CFLAGS_ADD([-herror_on_warning])
+JE_COMPILABLE([strerror_r returns char with gnu source], [
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+], [
+ char *buffer = (char *) malloc(100);
+ char *error = strerror_r(EINVAL, buffer, 100);
+ printf("%s\n", error);
+], [je_cv_strerror_r_returns_char_with_gnu_source])
+JE_CFLAGS_RESTORE()
+if test "x${je_cv_strerror_r_returns_char_with_gnu_source}" = "xyes" ; then
+ AC_DEFINE([JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE], [ ])
+fi
+
+dnl ============================================================================
+dnl Check for typedefs, structures, and compiler characteristics.
+AC_HEADER_STDBOOL
+
+dnl ============================================================================
+dnl Define commands that generate output files.
+
+AC_CONFIG_COMMANDS([include/jemalloc/internal/public_symbols.txt], [
+ f="${objroot}include/jemalloc/internal/public_symbols.txt"
+ mkdir -p "${objroot}include/jemalloc/internal"
+ cp /dev/null "${f}"
+ for nm in `echo ${mangling_map} |tr ',' ' '` ; do
+ n=`echo ${nm} |tr ':' ' ' |awk '{print $[]1}'`
+ m=`echo ${nm} |tr ':' ' ' |awk '{print $[]2}'`
+ echo "${n}:${m}" >> "${f}"
+ dnl Remove name from public_syms so that it isn't redefined later.
+ public_syms=`for sym in ${public_syms}; do echo "${sym}"; done |grep -v "^${n}\$" |tr '\n' ' '`
+ done
+ for sym in ${public_syms} ; do
+ n="${sym}"
+ m="${JEMALLOC_PREFIX}${sym}"
+ echo "${n}:${m}" >> "${f}"
+ done
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+ mangling_map="${mangling_map}"
+ public_syms="${public_syms}"
+ JEMALLOC_PREFIX="${JEMALLOC_PREFIX}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/internal/private_symbols.awk], [
+ f="${objroot}include/jemalloc/internal/private_symbols.awk"
+ mkdir -p "${objroot}include/jemalloc/internal"
+ export_syms=`for sym in ${public_syms}; do echo "${JEMALLOC_PREFIX}${sym}"; done; for sym in ${wrap_syms}; do echo "${sym}"; done;`
+ "${srcdir}/include/jemalloc/internal/private_symbols.sh" "${SYM_PREFIX}" ${export_syms} > "${objroot}include/jemalloc/internal/private_symbols.awk"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+ public_syms="${public_syms}"
+ wrap_syms="${wrap_syms}"
+ SYM_PREFIX="${SYM_PREFIX}"
+ JEMALLOC_PREFIX="${JEMALLOC_PREFIX}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/internal/private_symbols_jet.awk], [
+ f="${objroot}include/jemalloc/internal/private_symbols_jet.awk"
+ mkdir -p "${objroot}include/jemalloc/internal"
+ export_syms=`for sym in ${public_syms}; do echo "jet_${sym}"; done; for sym in ${wrap_syms}; do echo "${sym}"; done;`
+ "${srcdir}/include/jemalloc/internal/private_symbols.sh" "${SYM_PREFIX}" ${export_syms} > "${objroot}include/jemalloc/internal/private_symbols_jet.awk"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+ public_syms="${public_syms}"
+ wrap_syms="${wrap_syms}"
+ SYM_PREFIX="${SYM_PREFIX}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/internal/public_namespace.h], [
+ mkdir -p "${objroot}include/jemalloc/internal"
+ "${srcdir}/include/jemalloc/internal/public_namespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_namespace.h"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/internal/public_unnamespace.h], [
+ mkdir -p "${objroot}include/jemalloc/internal"
+ "${srcdir}/include/jemalloc/internal/public_unnamespace.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/internal/public_unnamespace.h"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_protos_jet.h], [
+ mkdir -p "${objroot}include/jemalloc"
+ cat "${srcdir}/include/jemalloc/jemalloc_protos.h.in" | sed -e 's/@je_@/jet_/g' > "${objroot}include/jemalloc/jemalloc_protos_jet.h"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_rename.h], [
+ mkdir -p "${objroot}include/jemalloc"
+ "${srcdir}/include/jemalloc/jemalloc_rename.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" > "${objroot}include/jemalloc/jemalloc_rename.h"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_mangle.h], [
+ mkdir -p "${objroot}include/jemalloc"
+ "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" je_ > "${objroot}include/jemalloc/jemalloc_mangle.h"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/jemalloc_mangle_jet.h], [
+ mkdir -p "${objroot}include/jemalloc"
+ "${srcdir}/include/jemalloc/jemalloc_mangle.sh" "${objroot}include/jemalloc/internal/public_symbols.txt" jet_ > "${objroot}include/jemalloc/jemalloc_mangle_jet.h"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+])
+AC_CONFIG_COMMANDS([include/jemalloc/jemalloc.h], [
+ mkdir -p "${objroot}include/jemalloc"
+ "${srcdir}/include/jemalloc/jemalloc.sh" "${objroot}" > "${objroot}include/jemalloc/jemalloc${install_suffix}.h"
+], [
+ srcdir="${srcdir}"
+ objroot="${objroot}"
+ install_suffix="${install_suffix}"
+])
+
+dnl Process .in files.
+AC_SUBST([cfghdrs_in])
+AC_SUBST([cfghdrs_out])
+AC_CONFIG_HEADERS([$cfghdrs_tup])
+
+dnl ============================================================================
+dnl Generate outputs.
+
+AC_CONFIG_FILES([$cfgoutputs_tup config.stamp bin/jemalloc-config bin/jemalloc.sh bin/jeprof])
+AC_SUBST([cfgoutputs_in])
+AC_SUBST([cfgoutputs_out])
+AC_OUTPUT
+
+dnl ============================================================================
+dnl Print out the results of configuration.
+AC_MSG_RESULT([===============================================================================])
+AC_MSG_RESULT([jemalloc version : ${jemalloc_version}])
+AC_MSG_RESULT([library revision : ${rev}])
+AC_MSG_RESULT([])
+AC_MSG_RESULT([CONFIG : ${CONFIG}])
+AC_MSG_RESULT([CC : ${CC}])
+AC_MSG_RESULT([CONFIGURE_CFLAGS : ${CONFIGURE_CFLAGS}])
+AC_MSG_RESULT([SPECIFIED_CFLAGS : ${SPECIFIED_CFLAGS}])
+AC_MSG_RESULT([EXTRA_CFLAGS : ${EXTRA_CFLAGS}])
+AC_MSG_RESULT([CPPFLAGS : ${CPPFLAGS}])
+AC_MSG_RESULT([CXX : ${CXX}])
+AC_MSG_RESULT([CONFIGURE_CXXFLAGS : ${CONFIGURE_CXXFLAGS}])
+AC_MSG_RESULT([SPECIFIED_CXXFLAGS : ${SPECIFIED_CXXFLAGS}])
+AC_MSG_RESULT([EXTRA_CXXFLAGS : ${EXTRA_CXXFLAGS}])
+AC_MSG_RESULT([LDFLAGS : ${LDFLAGS}])
+AC_MSG_RESULT([EXTRA_LDFLAGS : ${EXTRA_LDFLAGS}])
+AC_MSG_RESULT([DSO_LDFLAGS : ${DSO_LDFLAGS}])
+AC_MSG_RESULT([LIBS : ${LIBS}])
+AC_MSG_RESULT([RPATH_EXTRA : ${RPATH_EXTRA}])
+AC_MSG_RESULT([])
+AC_MSG_RESULT([XSLTPROC : ${XSLTPROC}])
+AC_MSG_RESULT([XSLROOT : ${XSLROOT}])
+AC_MSG_RESULT([])
+AC_MSG_RESULT([PREFIX : ${PREFIX}])
+AC_MSG_RESULT([BINDIR : ${BINDIR}])
+AC_MSG_RESULT([DATADIR : ${DATADIR}])
+AC_MSG_RESULT([INCLUDEDIR : ${INCLUDEDIR}])
+AC_MSG_RESULT([LIBDIR : ${LIBDIR}])
+AC_MSG_RESULT([MANDIR : ${MANDIR}])
+AC_MSG_RESULT([])
+AC_MSG_RESULT([srcroot : ${srcroot}])
+AC_MSG_RESULT([abs_srcroot : ${abs_srcroot}])
+AC_MSG_RESULT([objroot : ${objroot}])
+AC_MSG_RESULT([abs_objroot : ${abs_objroot}])
+AC_MSG_RESULT([])
+AC_MSG_RESULT([JEMALLOC_PREFIX : ${JEMALLOC_PREFIX}])
+AC_MSG_RESULT([JEMALLOC_PRIVATE_NAMESPACE])
+AC_MSG_RESULT([ : ${JEMALLOC_PRIVATE_NAMESPACE}])
+AC_MSG_RESULT([install_suffix : ${install_suffix}])
+AC_MSG_RESULT([malloc_conf : ${config_malloc_conf}])
+AC_MSG_RESULT([documentation : ${enable_doc}])
+AC_MSG_RESULT([shared libs : ${enable_shared}])
+AC_MSG_RESULT([static libs : ${enable_static}])
+AC_MSG_RESULT([autogen : ${enable_autogen}])
+AC_MSG_RESULT([debug : ${enable_debug}])
+AC_MSG_RESULT([stats : ${enable_stats}])
+AC_MSG_RESULT([experimetal_smallocx : ${enable_experimental_smallocx}])
+AC_MSG_RESULT([prof : ${enable_prof}])
+AC_MSG_RESULT([prof-libunwind : ${enable_prof_libunwind}])
+AC_MSG_RESULT([prof-libgcc : ${enable_prof_libgcc}])
+AC_MSG_RESULT([prof-gcc : ${enable_prof_gcc}])
+AC_MSG_RESULT([fill : ${enable_fill}])
+AC_MSG_RESULT([utrace : ${enable_utrace}])
+AC_MSG_RESULT([xmalloc : ${enable_xmalloc}])
+AC_MSG_RESULT([log : ${enable_log}])
+AC_MSG_RESULT([lazy_lock : ${enable_lazy_lock}])
+AC_MSG_RESULT([cache-oblivious : ${enable_cache_oblivious}])
+AC_MSG_RESULT([cxx : ${enable_cxx}])
+AC_MSG_RESULT([===============================================================================])
+>>>>>>> main
diff --git a/contrib/jemalloc/doc/html.xsl.in b/contrib/jemalloc/doc/html.xsl.in
new file mode 100644
index 000000000000..ec4fa6552bee
--- /dev/null
+++ b/contrib/jemalloc/doc/html.xsl.in
@@ -0,0 +1,5 @@
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+ <xsl:import href="@XSLROOT@/html/docbook.xsl"/>
+ <xsl:import href="@abs_srcroot@doc/stylesheet.xsl"/>
+ <xsl:output method="xml" encoding="utf-8"/>
+</xsl:stylesheet>
diff --git a/contrib/jemalloc/doc/jemalloc.xml.in b/contrib/jemalloc/doc/jemalloc.xml.in
new file mode 100644
index 000000000000..e2b15de21961
--- /dev/null
+++ b/contrib/jemalloc/doc/jemalloc.xml.in
@@ -0,0 +1,7330 @@
+<<<<<<< HEAD
+<?xml version='1.0' encoding='UTF-8'?>
+<?xml-stylesheet type="text/xsl"
+ href="http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl"?>
+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.4//EN"
+ "http://www.oasis-open.org/docbook/xml/4.4/docbookx.dtd" [
+]>
+
+<refentry>
+ <refentryinfo>
+ <title>User Manual</title>
+ <productname>jemalloc</productname>
+ <releaseinfo role="version">@jemalloc_version@</releaseinfo>
+ <authorgroup>
+ <author>
+ <firstname>Jason</firstname>
+ <surname>Evans</surname>
+ <personblurb>Author</personblurb>
+ </author>
+ </authorgroup>
+ </refentryinfo>
+ <refmeta>
+ <refentrytitle>JEMALLOC</refentrytitle>
+ <manvolnum>3</manvolnum>
+ </refmeta>
+ <refnamediv>
+ <refdescriptor>jemalloc</refdescriptor>
+ <refname>jemalloc</refname>
+ <!-- Each refname causes a man page file to be created. Only if this were
+ the system malloc(3) implementation would these files be appropriate.
+ <refname>malloc</refname>
+ <refname>calloc</refname>
+ <refname>posix_memalign</refname>
+ <refname>aligned_alloc</refname>
+ <refname>realloc</refname>
+ <refname>free</refname>
+ <refname>mallocx</refname>
+ <refname>rallocx</refname>
+ <refname>xallocx</refname>
+ <refname>sallocx</refname>
+ <refname>dallocx</refname>
+ <refname>sdallocx</refname>
+ <refname>nallocx</refname>
+ <refname>mallctl</refname>
+ <refname>mallctlnametomib</refname>
+ <refname>mallctlbymib</refname>
+ <refname>malloc_stats_print</refname>
+ <refname>malloc_usable_size</refname>
+ -->
+ <refpurpose>general purpose memory allocation functions</refpurpose>
+ </refnamediv>
+ <refsect1 id="library">
+ <title>LIBRARY</title>
+ <para>This manual describes jemalloc @jemalloc_version@. More information
+ can be found at the <ulink
+ url="http://jemalloc.net/">jemalloc website</ulink>.</para>
+
+ <para>The following configuration options are enabled in libc's built-in
+ jemalloc: <option>--enable-fill</option>,
+ <option>--enable-lazy-lock</option>, <option>--enable-stats</option>,
+ <option>--enable-utrace</option>, <option>--enable-xmalloc</option>, and
+ <option>--with-malloc-conf=abort_conf:false</option>.
+ Additionally, <option>--enable-debug</option> is enabled in development
+ versions of FreeBSD (controlled by the
+ <constant>MK_MALLOC_PRODUCTION</constant> make variable).</para>
+
+ </refsect1>
+ <refsynopsisdiv>
+ <title>SYNOPSIS</title>
+ <funcsynopsis>
+ <funcsynopsisinfo>#include &lt;<filename class="headerfile">stdlib.h</filename>&gt;
+#include &lt;<filename class="headerfile">malloc_np.h</filename>&gt;</funcsynopsisinfo>
+ <refsect2>
+ <title>Standard API</title>
+ <funcprototype>
+ <funcdef>void *<function>malloc</function></funcdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>void *<function>calloc</function></funcdef>
+ <paramdef>size_t <parameter>number</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>int <function>posix_memalign</function></funcdef>
+ <paramdef>void **<parameter>ptr</parameter></paramdef>
+ <paramdef>size_t <parameter>alignment</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>void *<function>aligned_alloc</function></funcdef>
+ <paramdef>size_t <parameter>alignment</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>void *<function>realloc</function></funcdef>
+ <paramdef>void *<parameter>ptr</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>void <function>free</function></funcdef>
+ <paramdef>void *<parameter>ptr</parameter></paramdef>
+ </funcprototype>
+ </refsect2>
+ <refsect2>
+ <title>Non-standard API</title>
+ <funcprototype>
+ <funcdef>void *<function>mallocx</function></funcdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>void *<function>rallocx</function></funcdef>
+ <paramdef>void *<parameter>ptr</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>size_t <function>xallocx</function></funcdef>
+ <paramdef>void *<parameter>ptr</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>size_t <parameter>extra</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>size_t <function>sallocx</function></funcdef>
+ <paramdef>void *<parameter>ptr</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>void <function>dallocx</function></funcdef>
+ <paramdef>void *<parameter>ptr</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>void <function>sdallocx</function></funcdef>
+ <paramdef>void *<parameter>ptr</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>size_t <function>nallocx</function></funcdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>int <function>mallctl</function></funcdef>
+ <paramdef>const char *<parameter>name</parameter></paramdef>
+ <paramdef>void *<parameter>oldp</parameter></paramdef>
+ <paramdef>size_t *<parameter>oldlenp</parameter></paramdef>
+ <paramdef>void *<parameter>newp</parameter></paramdef>
+ <paramdef>size_t <parameter>newlen</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>int <function>mallctlnametomib</function></funcdef>
+ <paramdef>const char *<parameter>name</parameter></paramdef>
+ <paramdef>size_t *<parameter>mibp</parameter></paramdef>
+ <paramdef>size_t *<parameter>miblenp</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>int <function>mallctlbymib</function></funcdef>
+ <paramdef>const size_t *<parameter>mib</parameter></paramdef>
+ <paramdef>size_t <parameter>miblen</parameter></paramdef>
+ <paramdef>void *<parameter>oldp</parameter></paramdef>
+ <paramdef>size_t *<parameter>oldlenp</parameter></paramdef>
+ <paramdef>void *<parameter>newp</parameter></paramdef>
+ <paramdef>size_t <parameter>newlen</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>void <function>malloc_stats_print</function></funcdef>
+ <paramdef>void <parameter>(*write_cb)</parameter>
+ <funcparams>void *, const char *</funcparams>
+ </paramdef>
+ <paramdef>void *<parameter>cbopaque</parameter></paramdef>
+ <paramdef>const char *<parameter>opts</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>size_t <function>malloc_usable_size</function></funcdef>
+ <paramdef>const void *<parameter>ptr</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>void <function>(*malloc_message)</function></funcdef>
+ <paramdef>void *<parameter>cbopaque</parameter></paramdef>
+ <paramdef>const char *<parameter>s</parameter></paramdef>
+ </funcprototype>
+ <para><type>const char *</type><varname>malloc_conf</varname>;</para>
+ </refsect2>
+ </funcsynopsis>
+ </refsynopsisdiv>
+ <refsect1 id="description">
+ <title>DESCRIPTION</title>
+ <refsect2>
+ <title>Standard API</title>
+
+ <para>The <function>malloc()</function> function allocates
+ <parameter>size</parameter> bytes of uninitialized memory. The allocated
+ space is suitably aligned (after possible pointer coercion) for storage
+ of any type of object.</para>
+
+ <para>The <function>calloc()</function> function allocates
+ space for <parameter>number</parameter> objects, each
+ <parameter>size</parameter> bytes in length. The result is identical to
+ calling <function>malloc()</function> with an argument of
+ <parameter>number</parameter> * <parameter>size</parameter>, with the
+ exception that the allocated memory is explicitly initialized to zero
+ bytes.</para>
+
+ <para>The <function>posix_memalign()</function> function
+ allocates <parameter>size</parameter> bytes of memory such that the
+ allocation's base address is a multiple of
+ <parameter>alignment</parameter>, and returns the allocation in the value
+ pointed to by <parameter>ptr</parameter>. The requested
+ <parameter>alignment</parameter> must be a power of 2 at least as large as
+ <code language="C">sizeof(<type>void *</type>)</code>.</para>
+
+ <para>The <function>aligned_alloc()</function> function
+ allocates <parameter>size</parameter> bytes of memory such that the
+ allocation's base address is a multiple of
+ <parameter>alignment</parameter>. The requested
+ <parameter>alignment</parameter> must be a power of 2. Behavior is
+ undefined if <parameter>size</parameter> is not an integral multiple of
+ <parameter>alignment</parameter>.</para>
+
+ <para>The <function>realloc()</function> function changes the
+ size of the previously allocated memory referenced by
+ <parameter>ptr</parameter> to <parameter>size</parameter> bytes. The
+ contents of the memory are unchanged up to the lesser of the new and old
+ sizes. If the new size is larger, the contents of the newly allocated
+ portion of the memory are undefined. Upon success, the memory referenced
+ by <parameter>ptr</parameter> is freed and a pointer to the newly
+ allocated memory is returned. Note that
+ <function>realloc()</function> may move the memory allocation,
+ resulting in a different return value than <parameter>ptr</parameter>.
+ If <parameter>ptr</parameter> is <constant>NULL</constant>, the
+ <function>realloc()</function> function behaves identically to
+ <function>malloc()</function> for the specified size.</para>
+
+ <para>The <function>free()</function> function causes the
+ allocated memory referenced by <parameter>ptr</parameter> to be made
+ available for future allocations. If <parameter>ptr</parameter> is
+ <constant>NULL</constant>, no action occurs.</para>
+ </refsect2>
+ <refsect2>
+ <title>Non-standard API</title>
+ <para>The <function>mallocx()</function>,
+ <function>rallocx()</function>,
+ <function>xallocx()</function>,
+ <function>sallocx()</function>,
+ <function>dallocx()</function>,
+ <function>sdallocx()</function>, and
+ <function>nallocx()</function> functions all have a
+ <parameter>flags</parameter> argument that can be used to specify
+ options. The functions only check the options that are contextually
+ relevant. Use bitwise or (<code language="C">|</code>) operations to
+ specify one or more of the following:
+ <variablelist>
+ <varlistentry id="MALLOCX_LG_ALIGN">
+ <term><constant>MALLOCX_LG_ALIGN(<parameter>la</parameter>)
+ </constant></term>
+
+ <listitem><para>Align the memory allocation to start at an address
+ that is a multiple of <code language="C">(1 &lt;&lt;
+ <parameter>la</parameter>)</code>. This macro does not validate
+ that <parameter>la</parameter> is within the valid
+ range.</para></listitem>
+ </varlistentry>
+ <varlistentry id="MALLOCX_ALIGN">
+ <term><constant>MALLOCX_ALIGN(<parameter>a</parameter>)
+ </constant></term>
+
+ <listitem><para>Align the memory allocation to start at an address
+ that is a multiple of <parameter>a</parameter>, where
+ <parameter>a</parameter> is a power of two. This macro does not
+ validate that <parameter>a</parameter> is a power of 2.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry id="MALLOCX_ZERO">
+ <term><constant>MALLOCX_ZERO</constant></term>
+
+ <listitem><para>Initialize newly allocated memory to contain zero
+ bytes. In the growing reallocation case, the real size prior to
+ reallocation defines the boundary between untouched bytes and those
+ that are initialized to contain zero bytes. If this macro is
+ absent, newly allocated memory is uninitialized.</para></listitem>
+ </varlistentry>
+ <varlistentry id="MALLOCX_TCACHE">
+ <term><constant>MALLOCX_TCACHE(<parameter>tc</parameter>)
+ </constant></term>
+
+ <listitem><para>Use the thread-specific cache (tcache) specified by
+ the identifier <parameter>tc</parameter>, which must have been
+ acquired via the <link
+ linkend="tcache.create"><mallctl>tcache.create</mallctl></link>
+ mallctl. This macro does not validate that
+ <parameter>tc</parameter> specifies a valid
+ identifier.</para></listitem>
+ </varlistentry>
+ <varlistentry id="MALLOC_TCACHE_NONE">
+ <term><constant>MALLOCX_TCACHE_NONE</constant></term>
+
+ <listitem><para>Do not use a thread-specific cache (tcache). Unless
+ <constant>MALLOCX_TCACHE(<parameter>tc</parameter>)</constant> or
+ <constant>MALLOCX_TCACHE_NONE</constant> is specified, an
+ automatically managed tcache will be used under many circumstances.
+ This macro cannot be used in the same <parameter>flags</parameter>
+ argument as
+ <constant>MALLOCX_TCACHE(<parameter>tc</parameter>)</constant>.</para></listitem>
+ </varlistentry>
+ <varlistentry id="MALLOCX_ARENA">
+ <term><constant>MALLOCX_ARENA(<parameter>a</parameter>)
+ </constant></term>
+
+ <listitem><para>Use the arena specified by the index
+ <parameter>a</parameter>. This macro has no effect for regions that
+ were allocated via an arena other than the one specified. This
+ macro does not validate that <parameter>a</parameter> specifies an
+ arena index in the valid range.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+
+ <para>The <function>mallocx()</function> function allocates at
+ least <parameter>size</parameter> bytes of memory, and returns a pointer
+ to the base address of the allocation. Behavior is undefined if
+ <parameter>size</parameter> is <constant>0</constant>.</para>
+
+ <para>The <function>rallocx()</function> function resizes the
+ allocation at <parameter>ptr</parameter> to be at least
+ <parameter>size</parameter> bytes, and returns a pointer to the base
+ address of the resulting allocation, which may or may not have moved from
+ its original location. Behavior is undefined if
+ <parameter>size</parameter> is <constant>0</constant>.</para>
+
+ <para>The <function>xallocx()</function> function resizes the
+ allocation at <parameter>ptr</parameter> in place to be at least
+ <parameter>size</parameter> bytes, and returns the real size of the
+ allocation. If <parameter>extra</parameter> is non-zero, an attempt is
+ made to resize the allocation to be at least <code
+ language="C">(<parameter>size</parameter> +
+ <parameter>extra</parameter>)</code> bytes, though inability to allocate
+ the extra byte(s) will not by itself result in failure to resize.
+ Behavior is undefined if <parameter>size</parameter> is
+ <constant>0</constant>, or if <code
+ language="C">(<parameter>size</parameter> + <parameter>extra</parameter>
+ &gt; <constant>SIZE_T_MAX</constant>)</code>.</para>
+
+ <para>The <function>sallocx()</function> function returns the
+ real size of the allocation at <parameter>ptr</parameter>.</para>
+
+ <para>The <function>dallocx()</function> function causes the
+ memory referenced by <parameter>ptr</parameter> to be made available for
+ future allocations.</para>
+
+ <para>The <function>sdallocx()</function> function is an
+ extension of <function>dallocx()</function> with a
+ <parameter>size</parameter> parameter to allow the caller to pass in the
+ allocation size as an optimization. The minimum valid input size is the
+ original requested size of the allocation, and the maximum valid input
+ size is the corresponding value returned by
+ <function>nallocx()</function> or
+ <function>sallocx()</function>.</para>
+
+ <para>The <function>nallocx()</function> function allocates no
+ memory, but it performs the same size computation as the
+ <function>mallocx()</function> function, and returns the real
+ size of the allocation that would result from the equivalent
+ <function>mallocx()</function> function call, or
+ <constant>0</constant> if the inputs exceed the maximum supported size
+ class and/or alignment. Behavior is undefined if
+ <parameter>size</parameter> is <constant>0</constant>.</para>
+
+ <para>The <function>mallctl()</function> function provides a
+ general interface for introspecting the memory allocator, as well as
+ setting modifiable parameters and triggering actions. The
+ period-separated <parameter>name</parameter> argument specifies a
+ location in a tree-structured namespace; see the <xref
+ linkend="mallctl_namespace" xrefstyle="template:%t"/> section for
+ documentation on the tree contents. To read a value, pass a pointer via
+ <parameter>oldp</parameter> to adequate space to contain the value, and a
+ pointer to its length via <parameter>oldlenp</parameter>; otherwise pass
+ <constant>NULL</constant> and <constant>NULL</constant>. Similarly, to
+ write a value, pass a pointer to the value via
+ <parameter>newp</parameter>, and its length via
+ <parameter>newlen</parameter>; otherwise pass <constant>NULL</constant>
+ and <constant>0</constant>.</para>
+
+ <para>The <function>mallctlnametomib()</function> function
+ provides a way to avoid repeated name lookups for applications that
+ repeatedly query the same portion of the namespace, by translating a name
+ to a <quote>Management Information Base</quote> (MIB) that can be passed
+ repeatedly to <function>mallctlbymib()</function>. Upon
+ successful return from <function>mallctlnametomib()</function>,
+ <parameter>mibp</parameter> contains an array of
+ <parameter>*miblenp</parameter> integers, where
+ <parameter>*miblenp</parameter> is the lesser of the number of components
+ in <parameter>name</parameter> and the input value of
+ <parameter>*miblenp</parameter>. Thus it is possible to pass a
+ <parameter>*miblenp</parameter> that is smaller than the number of
+ period-separated name components, which results in a partial MIB that can
+ be used as the basis for constructing a complete MIB. For name
+ components that are integers (e.g. the 2 in
+ <link
+ linkend="arenas.bin.i.size"><mallctl>arenas.bin.2.size</mallctl></link>),
+ the corresponding MIB component will always be that integer. Therefore,
+ it is legitimate to construct code like the following: <programlisting
+ language="C"><![CDATA[
+unsigned nbins, i;
+size_t mib[4];
+size_t len, miblen;
+
+len = sizeof(nbins);
+mallctl("arenas.nbins", &nbins, &len, NULL, 0);
+
+miblen = 4;
+mallctlnametomib("arenas.bin.0.size", mib, &miblen);
+for (i = 0; i < nbins; i++) {
+ size_t bin_size;
+
+ mib[2] = i;
+ len = sizeof(bin_size);
+ mallctlbymib(mib, miblen, (void *)&bin_size, &len, NULL, 0);
+ /* Do something with bin_size... */
+}]]></programlisting></para>
+
+ <varlistentry id="malloc_stats_print_opts">
+ </varlistentry>
+ <para>The <function>malloc_stats_print()</function> function writes
+ summary statistics via the <parameter>write_cb</parameter> callback
+ function pointer and <parameter>cbopaque</parameter> data passed to
+ <parameter>write_cb</parameter>, or <function>malloc_message()</function>
+ if <parameter>write_cb</parameter> is <constant>NULL</constant>. The
+ statistics are presented in human-readable form unless <quote>J</quote> is
+ specified as a character within the <parameter>opts</parameter> string, in
+ which case the statistics are presented in <ulink
+ url="http://www.json.org/">JSON format</ulink>. This function can be
+ called repeatedly. General information that never changes during
+ execution can be omitted by specifying <quote>g</quote> as a character
+ within the <parameter>opts</parameter> string. Note that
+ <function>malloc_stats_print()</function> uses the
+ <function>mallctl*()</function> functions internally, so inconsistent
+ statistics can be reported if multiple threads use these functions
+ simultaneously. If <option>--enable-stats</option> is specified during
+ configuration, <quote>m</quote>, <quote>d</quote>, and <quote>a</quote>
+ can be specified to omit merged arena, destroyed merged arena, and per
+ arena statistics, respectively; <quote>b</quote> and <quote>l</quote> can
+ be specified to omit per size class statistics for bins and large objects,
+ respectively; <quote>x</quote> can be specified to omit all mutex
+ statistics; <quote>e</quote> can be used to omit extent statistics.
+ Unrecognized characters are silently ignored. Note that thread caching
+ may prevent some statistics from being completely up to date, since extra
+ locking would be required to merge counters that track thread cache
+ operations.</para>
+
+ <para>The <function>malloc_usable_size()</function> function
+ returns the usable size of the allocation pointed to by
+ <parameter>ptr</parameter>. The return value may be larger than the size
+ that was requested during allocation. The
+ <function>malloc_usable_size()</function> function is not a
+ mechanism for in-place <function>realloc()</function>; rather
+ it is provided solely as a tool for introspection purposes. Any
+ discrepancy between the requested allocation size and the size reported
+ by <function>malloc_usable_size()</function> should not be
+ depended on, since such behavior is entirely implementation-dependent.
+ </para>
+ </refsect2>
+ </refsect1>
+ <refsect1 id="tuning">
+ <title>TUNING</title>
+ <para>Once, when the first call is made to one of the memory allocation
+ routines, the allocator initializes its internals based in part on various
+ options that can be specified at compile- or run-time.</para>
+
+ <para>The string specified via <option>--with-malloc-conf</option>, the
+ string pointed to by the global variable <varname>malloc_conf</varname>, the
+ <quote>name</quote> of the file referenced by the symbolic link named
+ <filename class="symlink">/etc/malloc.conf</filename>, and the value of the
+ environment variable <envar>MALLOC_CONF</envar>, will be interpreted, in
+ that order, from left to right as options. Note that
+ <varname>malloc_conf</varname> may be read before
+ <function>main()</function> is entered, so the declaration of
+ <varname>malloc_conf</varname> should specify an initializer that contains
+ the final value to be read by jemalloc. <option>--with-malloc-conf</option>
+ and <varname>malloc_conf</varname> are compile-time mechanisms, whereas
+ <filename class="symlink">/etc/malloc.conf</filename> and
+ <envar>MALLOC_CONF</envar> can be safely set any time prior to program
+ invocation.</para>
+
+ <para>An options string is a comma-separated list of option:value pairs.
+ There is one key corresponding to each <link
+ linkend="opt.abort"><mallctl>opt.*</mallctl></link> mallctl (see the <xref
+ linkend="mallctl_namespace" xrefstyle="template:%t"/> section for options
+ documentation). For example, <literal>abort:true,narenas:1</literal> sets
+ the <link linkend="opt.abort"><mallctl>opt.abort</mallctl></link> and <link
+ linkend="opt.narenas"><mallctl>opt.narenas</mallctl></link> options. Some
+ options have boolean values (true/false), others have integer values (base
+ 8, 10, or 16, depending on prefix), and yet others have raw string
+ values.</para>
+ </refsect1>
+ <refsect1 id="implementation_notes">
+ <title>IMPLEMENTATION NOTES</title>
+ <para>Traditionally, allocators have used
+ <citerefentry><refentrytitle>sbrk</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> to obtain memory, which is
+ suboptimal for several reasons, including race conditions, increased
+ fragmentation, and artificial limitations on maximum usable memory. If
+ <citerefentry><refentrytitle>sbrk</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> is supported by the operating
+ system, this allocator uses both
+ <citerefentry><refentrytitle>mmap</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> and
+ <citerefentry><refentrytitle>sbrk</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry>, in that order of preference;
+ otherwise only <citerefentry><refentrytitle>mmap</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> is used.</para>
+
+ <para>This allocator uses multiple arenas in order to reduce lock
+ contention for threaded programs on multi-processor systems. This works
+ well with regard to threading scalability, but incurs some costs. There is
+ a small fixed per-arena overhead, and additionally, arenas manage memory
+ completely independently of each other, which means a small fixed increase
+ in overall memory fragmentation. These overheads are not generally an
+ issue, given the number of arenas normally used. Note that using
+ substantially more arenas than the default is not likely to improve
+ performance, mainly due to reduced cache performance. However, it may make
+ sense to reduce the number of arenas if an application does not make much
+ use of the allocation functions.</para>
+
+ <para>In addition to multiple arenas, this allocator supports
+ thread-specific caching, in order to make it possible to completely avoid
+ synchronization for most allocation requests. Such caching allows very fast
+ allocation in the common case, but it increases memory usage and
+ fragmentation, since a bounded number of objects can remain allocated in
+ each thread cache.</para>
+
+ <para>Memory is conceptually broken into extents. Extents are always
+ aligned to multiples of the page size. This alignment makes it possible to
+ find metadata for user objects quickly. User objects are broken into two
+ categories according to size: small and large. Contiguous small objects
+ comprise a slab, which resides within a single extent, whereas large objects
+ each have their own extents backing them.</para>
+
+ <para>Small objects are managed in groups by slabs. Each slab maintains
+ a bitmap to track which regions are in use. Allocation requests that are no
+ more than half the quantum (8 or 16, depending on architecture) are rounded
+ up to the nearest power of two that is at least <code
+ language="C">sizeof(<type>double</type>)</code>. All other object size
+ classes are multiples of the quantum, spaced such that there are four size
+ classes for each doubling in size, which limits internal fragmentation to
+ approximately 20% for all but the smallest size classes. Small size classes
+ are smaller than four times the page size, and large size classes extend
+ from four times the page size up to the largest size class that does not
+ exceed <constant>PTRDIFF_MAX</constant>.</para>
+
+ <para>Allocations are packed tightly together, which can be an issue for
+ multi-threaded applications. If you need to assure that allocations do not
+ suffer from cacheline sharing, round your allocation requests up to the
+ nearest multiple of the cacheline size, or specify cacheline alignment when
+ allocating.</para>
+
+ <para>The <function>realloc()</function>,
+ <function>rallocx()</function>, and
+ <function>xallocx()</function> functions may resize allocations
+ without moving them under limited circumstances. Unlike the
+ <function>*allocx()</function> API, the standard API does not
+ officially round up the usable size of an allocation to the nearest size
+ class, so technically it is necessary to call
+ <function>realloc()</function> to grow e.g. a 9-byte allocation to
+ 16 bytes, or shrink a 16-byte allocation to 9 bytes. Growth and shrinkage
+ trivially succeeds in place as long as the pre-size and post-size both round
+ up to the same size class. No other API guarantees are made regarding
+ in-place resizing, but the current implementation also tries to resize large
+ allocations in place, as long as the pre-size and post-size are both large.
+ For shrinkage to succeed, the extent allocator must support splitting (see
+ <link
+ linkend="arena.i.extent_hooks"><mallctl>arena.&lt;i&gt;.extent_hooks</mallctl></link>).
+ Growth only succeeds if the trailing memory is currently available, and the
+ extent allocator supports merging.</para>
+
+ <para>Assuming 4 KiB pages and a 16-byte quantum on a 64-bit system, the
+ size classes in each category are as shown in <xref linkend="size_classes"
+ xrefstyle="template:Table %n"/>.</para>
+
+ <table xml:id="size_classes" frame="all">
+ <title>Size classes</title>
+ <tgroup cols="3" colsep="1" rowsep="1">
+ <colspec colname="c1" align="left"/>
+ <colspec colname="c2" align="right"/>
+ <colspec colname="c3" align="left"/>
+ <thead>
+ <row>
+ <entry>Category</entry>
+ <entry>Spacing</entry>
+ <entry>Size</entry>
+ </row>
+ </thead>
+ <tbody>
+ <row>
+ <entry morerows="8">Small</entry>
+ <entry>lg</entry>
+ <entry>[8]</entry>
+ </row>
+ <row>
+ <entry>16</entry>
+ <entry>[16, 32, 48, 64, 80, 96, 112, 128]</entry>
+ </row>
+ <row>
+ <entry>32</entry>
+ <entry>[160, 192, 224, 256]</entry>
+ </row>
+ <row>
+ <entry>64</entry>
+ <entry>[320, 384, 448, 512]</entry>
+ </row>
+ <row>
+ <entry>128</entry>
+ <entry>[640, 768, 896, 1024]</entry>
+ </row>
+ <row>
+ <entry>256</entry>
+ <entry>[1280, 1536, 1792, 2048]</entry>
+ </row>
+ <row>
+ <entry>512</entry>
+ <entry>[2560, 3072, 3584, 4096]</entry>
+ </row>
+ <row>
+ <entry>1 KiB</entry>
+ <entry>[5 KiB, 6 KiB, 7 KiB, 8 KiB]</entry>
+ </row>
+ <row>
+ <entry>2 KiB</entry>
+ <entry>[10 KiB, 12 KiB, 14 KiB]</entry>
+ </row>
+ <row>
+ <entry morerows="15">Large</entry>
+ <entry>2 KiB</entry>
+ <entry>[16 KiB]</entry>
+ </row>
+ <row>
+ <entry>4 KiB</entry>
+ <entry>[20 KiB, 24 KiB, 28 KiB, 32 KiB]</entry>
+ </row>
+ <row>
+ <entry>8 KiB</entry>
+ <entry>[40 KiB, 48 KiB, 56 KiB, 64 KiB]</entry>
+ </row>
+ <row>
+ <entry>16 KiB</entry>
+ <entry>[80 KiB, 96 KiB, 112 KiB, 128 KiB]</entry>
+ </row>
+ <row>
+ <entry>32 KiB</entry>
+ <entry>[160 KiB, 192 KiB, 224 KiB, 256 KiB]</entry>
+ </row>
+ <row>
+ <entry>64 KiB</entry>
+ <entry>[320 KiB, 384 KiB, 448 KiB, 512 KiB]</entry>
+ </row>
+ <row>
+ <entry>128 KiB</entry>
+ <entry>[640 KiB, 768 KiB, 896 KiB, 1 MiB]</entry>
+ </row>
+ <row>
+ <entry>256 KiB</entry>
+ <entry>[1280 KiB, 1536 KiB, 1792 KiB, 2 MiB]</entry>
+ </row>
+ <row>
+ <entry>512 KiB</entry>
+ <entry>[2560 KiB, 3 MiB, 3584 KiB, 4 MiB]</entry>
+ </row>
+ <row>
+ <entry>1 MiB</entry>
+ <entry>[5 MiB, 6 MiB, 7 MiB, 8 MiB]</entry>
+ </row>
+ <row>
+ <entry>2 MiB</entry>
+ <entry>[10 MiB, 12 MiB, 14 MiB, 16 MiB]</entry>
+ </row>
+ <row>
+ <entry>4 MiB</entry>
+ <entry>[20 MiB, 24 MiB, 28 MiB, 32 MiB]</entry>
+ </row>
+ <row>
+ <entry>8 MiB</entry>
+ <entry>[40 MiB, 48 MiB, 56 MiB, 64 MiB]</entry>
+ </row>
+ <row>
+ <entry>...</entry>
+ <entry>...</entry>
+ </row>
+ <row>
+ <entry>512 PiB</entry>
+ <entry>[2560 PiB, 3 EiB, 3584 PiB, 4 EiB]</entry>
+ </row>
+ <row>
+ <entry>1 EiB</entry>
+ <entry>[5 EiB, 6 EiB, 7 EiB]</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+ </refsect1>
+ <refsect1 id="mallctl_namespace">
+ <title>MALLCTL NAMESPACE</title>
+ <para>The following names are defined in the namespace accessible via the
+ <function>mallctl*()</function> functions. Value types are specified in
+ parentheses, their readable/writable statuses are encoded as
+ <literal>rw</literal>, <literal>r-</literal>, <literal>-w</literal>, or
+ <literal>--</literal>, and required build configuration flags follow, if
+ any. A name element encoded as <literal>&lt;i&gt;</literal> or
+ <literal>&lt;j&gt;</literal> indicates an integer component, where the
+ integer varies from 0 to some upper value that must be determined via
+ introspection. In the case of <mallctl>stats.arenas.&lt;i&gt;.*</mallctl>
+ and <mallctl>arena.&lt;i&gt;.{initialized,purge,decay,dss}</mallctl>,
+ <literal>&lt;i&gt;</literal> equal to
+ <constant>MALLCTL_ARENAS_ALL</constant> can be used to operate on all arenas
+ or access the summation of statistics from all arenas; similarly
+ <literal>&lt;i&gt;</literal> equal to
+ <constant>MALLCTL_ARENAS_DESTROYED</constant> can be used to access the
+ summation of statistics from all destroyed arenas. These constants can be
+ utilized either via <function>mallctlnametomib()</function> followed by
+ <function>mallctlbymib()</function>, or via code such as the following:
+ <programlisting language="C"><![CDATA[
+#define STRINGIFY_HELPER(x) #x
+#define STRINGIFY(x) STRINGIFY_HELPER(x)
+
+mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay",
+ NULL, NULL, NULL, 0);]]></programlisting>
+ Take special note of the <link
+ linkend="epoch"><mallctl>epoch</mallctl></link> mallctl, which controls
+ refreshing of cached dynamic statistics.</para>
+
+ <variablelist>
+ <varlistentry id="version">
+ <term>
+ <mallctl>version</mallctl>
+ (<type>const char *</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Return the jemalloc version string.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="epoch">
+ <term>
+ <mallctl>epoch</mallctl>
+ (<type>uint64_t</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>If a value is passed in, refresh the data from which
+ the <function>mallctl*()</function> functions report values,
+ and increment the epoch. Return the current epoch. This is useful for
+ detecting whether another thread caused a refresh.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="background_thread">
+ <term>
+ <mallctl>background_thread</mallctl>
+ (<type>bool</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Enable/disable internal background worker threads. When
+ set to true, background threads are created on demand (the number of
+ background threads will be no more than the number of CPUs or active
+ arenas). Threads run periodically, and handle <link
+ linkend="arena.i.decay">purging</link> asynchronously. When switching
+ off, background threads are terminated synchronously. Note that after
+ <citerefentry><refentrytitle>fork</refentrytitle><manvolnum>2</manvolnum></citerefentry>
+ function, the state in the child process will be disabled regardless
+ the state in parent process. See <link
+ linkend="stats.background_thread.num_threads"><mallctl>stats.background_thread</mallctl></link>
+ for related stats. <link
+ linkend="opt.background_thread"><mallctl>opt.background_thread</mallctl></link>
+ can be used to set the default option. This option is only available on
+ selected pthread-based platforms.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="max_background_threads">
+ <term>
+ <mallctl>max_background_threads</mallctl>
+ (<type>size_t</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Maximum number of background worker threads that will
+ be created. This value is capped at <link
+ linkend="opt.max_background_threads"><mallctl>opt.max_background_threads</mallctl></link> at
+ startup.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.cache_oblivious">
+ <term>
+ <mallctl>config.cache_oblivious</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--enable-cache-oblivious</option> was specified
+ during build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.debug">
+ <term>
+ <mallctl>config.debug</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--enable-debug</option> was specified during
+ build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.fill">
+ <term>
+ <mallctl>config.fill</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--enable-fill</option> was specified during
+ build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.lazy_lock">
+ <term>
+ <mallctl>config.lazy_lock</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--enable-lazy-lock</option> was specified
+ during build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.malloc_conf">
+ <term>
+ <mallctl>config.malloc_conf</mallctl>
+ (<type>const char *</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Embedded configure-time-specified run-time options
+ string, empty unless <option>--with-malloc-conf</option> was specified
+ during build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.prof">
+ <term>
+ <mallctl>config.prof</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--enable-prof</option> was specified during
+ build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.prof_libgcc">
+ <term>
+ <mallctl>config.prof_libgcc</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--disable-prof-libgcc</option> was not
+ specified during build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.prof_libunwind">
+ <term>
+ <mallctl>config.prof_libunwind</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--enable-prof-libunwind</option> was specified
+ during build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.stats">
+ <term>
+ <mallctl>config.stats</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--enable-stats</option> was specified during
+ build configuration.</para></listitem>
+ </varlistentry>
+
+
+ <varlistentry id="config.utrace">
+ <term>
+ <mallctl>config.utrace</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--enable-utrace</option> was specified during
+ build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.xmalloc">
+ <term>
+ <mallctl>config.xmalloc</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--enable-xmalloc</option> was specified during
+ build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.abort">
+ <term>
+ <mallctl>opt.abort</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Abort-on-warning enabled/disabled. If true, most
+ warnings are fatal. Note that runtime option warnings are not included
+ (see <link
+ linkend="opt.abort_conf"><mallctl>opt.abort_conf</mallctl></link> for
+ that). The process will call
+ <citerefentry><refentrytitle>abort</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry> in these cases. This option is
+ disabled by default unless <option>--enable-debug</option> is
+ specified during configuration, in which case it is enabled by default.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.confirm_conf">
+ <term>
+ <mallctl>opt.confirm_conf</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Confirm-runtime-options-when-program-starts
+ enabled/disabled. If true, the string specified via
+ <option>--with-malloc-conf</option>, the string pointed to by the
+ global variable <varname>malloc_conf</varname>, the <quote>name</quote>
+ of the file referenced by the symbolic link named
+ <filename class="symlink">/etc/malloc.conf</filename>, and the value of
+ the environment variable <envar>MALLOC_CONF</envar>, will be printed in
+ order. Then, each option being set will be individually printed. This
+ option is disabled by default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.abort_conf">
+ <term>
+ <mallctl>opt.abort_conf</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Abort-on-invalid-configuration enabled/disabled. If
+ true, invalid runtime options are fatal. The process will call
+ <citerefentry><refentrytitle>abort</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry> in these cases. This option is
+ disabled by default unless <option>--enable-debug</option> is
+ specified during configuration, in which case it is enabled by default.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.cache_oblivious">
+ <term>
+ <mallctl>opt.cache_oblivious</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Enable / Disable cache-oblivious large allocation
+ alignment, for large requests with no alignment constraints. If this
+ feature is disabled, all large allocations are page-aligned as an
+ implementation artifact, which can severely harm CPU cache utilization.
+ However, the cache-oblivious layout comes at the cost of one extra page
+ per large allocation, which in the most extreme case increases physical
+ memory usage for the 16 KiB size class to 20 KiB. This option is enabled
+ by default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.metadata_thp">
+ <term>
+ <mallctl>opt.metadata_thp</mallctl>
+ (<type>const char *</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Controls whether to allow jemalloc to use transparent
+ huge page (THP) for internal metadata (see <link
+ linkend="stats.metadata">stats.metadata</link>). <quote>always</quote>
+ allows such usage. <quote>auto</quote> uses no THP initially, but may
+ begin to do so when metadata usage reaches certain level. The default
+ is <quote>disabled</quote>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.trust_madvise">
+ <term>
+ <mallctl>opt.trust_madvise</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>If true, do not perform runtime check for MADV_DONTNEED,
+ to check that it actually zeros pages. The default is disabled on Linux
+ and enabled elsewhere.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.retain">
+ <term>
+ <mallctl>opt.retain</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>If true, retain unused virtual memory for later reuse
+ rather than discarding it by calling
+ <citerefentry><refentrytitle>munmap</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> or equivalent (see <link
+ linkend="stats.retained">stats.retained</link> for related details).
+ It also makes jemalloc use <citerefentry>
+ <refentrytitle>mmap</refentrytitle><manvolnum>2</manvolnum>
+ </citerefentry> or equivalent in a more greedy way, mapping larger
+ chunks in one go. This option is disabled by default unless discarding
+ virtual memory is known to trigger platform-specific performance
+ problems, namely 1) for [64-bit] Linux, which has a quirk in its virtual
+ memory allocation algorithm that causes semi-permanent VM map holes
+ under normal jemalloc operation; and 2) for [64-bit] Windows, which
+ disallows split / merged regions with
+ <parameter><constant>MEM_RELEASE</constant></parameter>. Although the
+ same issues may present on 32-bit platforms as well, retaining virtual
+ memory for 32-bit Linux and Windows is disabled by default due to the
+ practical possibility of address space exhaustion. </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.dss">
+ <term>
+ <mallctl>opt.dss</mallctl>
+ (<type>const char *</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>dss (<citerefentry><refentrytitle>sbrk</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry>) allocation precedence as
+ related to <citerefentry><refentrytitle>mmap</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> allocation. The following
+ settings are supported if
+ <citerefentry><refentrytitle>sbrk</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> is supported by the operating
+ system: <quote>disabled</quote>, <quote>primary</quote>, and
+ <quote>secondary</quote>; otherwise only <quote>disabled</quote> is
+ supported. The default is <quote>secondary</quote> if
+ <citerefentry><refentrytitle>sbrk</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> is supported by the operating
+ system; <quote>disabled</quote> otherwise.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.narenas">
+ <term>
+ <mallctl>opt.narenas</mallctl>
+ (<type>unsigned</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Maximum number of arenas to use for automatic
+ multiplexing of threads and arenas. The default is four times the
+ number of CPUs, or one if there is a single CPU.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.oversize_threshold">
+ <term>
+ <mallctl>opt.oversize_threshold</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>The threshold in bytes of which requests are considered
+ oversize. Allocation requests with greater sizes are fulfilled from a
+ dedicated arena (automatically managed, however not within
+ <literal>narenas</literal>), in order to reduce fragmentation by not
+ mixing huge allocations with small ones. In addition, the decay API
+ guarantees on the extents greater than the specified threshold may be
+ overridden. Note that requests with arena index specified via
+ <constant>MALLOCX_ARENA</constant>, or threads associated with explicit
+ arenas will not be considered. The default threshold is 8MiB. Values
+ not within large size classes disables this feature.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.percpu_arena">
+ <term>
+ <mallctl>opt.percpu_arena</mallctl>
+ (<type>const char *</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Per CPU arena mode. Use the <quote>percpu</quote>
+ setting to enable this feature, which uses number of CPUs to determine
+ number of arenas, and bind threads to arenas dynamically based on the
+ CPU the thread runs on currently. <quote>phycpu</quote> setting uses
+ one arena per physical CPU, which means the two hyper threads on the
+ same CPU share one arena. Note that no runtime checking regarding the
+ availability of hyper threading is done at the moment. When set to
+ <quote>disabled</quote>, narenas and thread to arena association will
+ not be impacted by this option. The default is <quote>disabled</quote>.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.background_thread">
+ <term>
+ <mallctl>opt.background_thread</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Internal background worker threads enabled/disabled.
+ Because of potential circular dependencies, enabling background thread
+ using this option may cause crash or deadlock during initialization. For
+ a reliable way to use this feature, see <link
+ linkend="background_thread">background_thread</link> for dynamic control
+ options and details. This option is disabled by
+ default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.max_background_threads">
+ <term>
+ <mallctl>opt.max_background_threads</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Maximum number of background threads that will be created
+ if <link linkend="background_thread">background_thread</link> is set.
+ Defaults to number of cpus.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.dirty_decay_ms">
+ <term>
+ <mallctl>opt.dirty_decay_ms</mallctl>
+ (<type>ssize_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Approximate time in milliseconds from the creation of a
+ set of unused dirty pages until an equivalent set of unused dirty pages
+ is purged (i.e. converted to muzzy via e.g.
+ <function>madvise(<parameter>...</parameter><parameter><constant>MADV_FREE</constant></parameter>)</function>
+ if supported by the operating system, or converted to clean otherwise)
+ and/or reused. Dirty pages are defined as previously having been
+ potentially written to by the application, and therefore consuming
+ physical memory, yet having no current use. The pages are incrementally
+ purged according to a sigmoidal decay curve that starts and ends with
+ zero purge rate. A decay time of 0 causes all unused dirty pages to be
+ purged immediately upon creation. A decay time of -1 disables purging.
+ The default decay time is 10 seconds. See <link
+ linkend="arenas.dirty_decay_ms"><mallctl>arenas.dirty_decay_ms</mallctl></link>
+ and <link
+ linkend="arena.i.dirty_decay_ms"><mallctl>arena.&lt;i&gt;.dirty_decay_ms</mallctl></link>
+ for related dynamic control options. See <link
+ linkend="opt.muzzy_decay_ms"><mallctl>opt.muzzy_decay_ms</mallctl></link>
+ for a description of muzzy pages.for a description of muzzy pages. Note
+ that when the <link
+ linkend="opt.oversize_threshold"><mallctl>oversize_threshold</mallctl></link>
+ feature is enabled, the arenas reserved for oversize requests may have
+ its own default decay settings.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.muzzy_decay_ms">
+ <term>
+ <mallctl>opt.muzzy_decay_ms</mallctl>
+ (<type>ssize_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Approximate time in milliseconds from the creation of a
+ set of unused muzzy pages until an equivalent set of unused muzzy pages
+ is purged (i.e. converted to clean) and/or reused. Muzzy pages are
+ defined as previously having been unused dirty pages that were
+ subsequently purged in a manner that left them subject to the
+ reclamation whims of the operating system (e.g.
+ <function>madvise(<parameter>...</parameter><parameter><constant>MADV_FREE</constant></parameter>)</function>),
+ and therefore in an indeterminate state. The pages are incrementally
+ purged according to a sigmoidal decay curve that starts and ends with
+ zero purge rate. A decay time of 0 causes all unused muzzy pages to be
+ purged immediately upon creation. A decay time of -1 disables purging.
+ The default decay time is 10 seconds. See <link
+ linkend="arenas.muzzy_decay_ms"><mallctl>arenas.muzzy_decay_ms</mallctl></link>
+ and <link
+ linkend="arena.i.muzzy_decay_ms"><mallctl>arena.&lt;i&gt;.muzzy_decay_ms</mallctl></link>
+ for related dynamic control options.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.lg_extent_max_active_fit">
+ <term>
+ <mallctl>opt.lg_extent_max_active_fit</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>When reusing dirty extents, this determines the (log
+ base 2 of the) maximum ratio between the size of the active extent
+ selected (to split off from) and the size of the requested allocation.
+ This prevents the splitting of large active extents for smaller
+ allocations, which can reduce fragmentation over the long run
+ (especially for non-active extents). Lower value may reduce
+ fragmentation, at the cost of extra active extents. The default value
+ is 6, which gives a maximum ratio of 64 (2^6).</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.stats_print">
+ <term>
+ <mallctl>opt.stats_print</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Enable/disable statistics printing at exit. If
+ enabled, the <function>malloc_stats_print()</function>
+ function is called at program exit via an
+ <citerefentry><refentrytitle>atexit</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry> function. <link
+ linkend="opt.stats_print_opts"><mallctl>opt.stats_print_opts</mallctl></link>
+ can be combined to specify output options. If
+ <option>--enable-stats</option> is specified during configuration, this
+ has the potential to cause deadlock for a multi-threaded process that
+ exits while one or more threads are executing in the memory allocation
+ functions. Furthermore, <function>atexit()</function> may
+ allocate memory during application initialization and then deadlock
+ internally when jemalloc in turn calls
+ <function>atexit()</function>, so this option is not
+ universally usable (though the application can register its own
+ <function>atexit()</function> function with equivalent
+ functionality). Therefore, this option should only be used with care;
+ it is primarily intended as a performance tuning aid during application
+ development. This option is disabled by default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.stats_print_opts">
+ <term>
+ <mallctl>opt.stats_print_opts</mallctl>
+ (<type>const char *</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Options (the <parameter>opts</parameter> string) to pass
+ to the <function>malloc_stats_print()</function> at exit (enabled
+ through <link
+ linkend="opt.stats_print"><mallctl>opt.stats_print</mallctl></link>). See
+ available options in <link
+ linkend="malloc_stats_print_opts"><function>malloc_stats_print()</function></link>.
+ Has no effect unless <link
+ linkend="opt.stats_print"><mallctl>opt.stats_print</mallctl></link> is
+ enabled. The default is <quote></quote>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.stats_interval">
+ <term>
+ <mallctl>opt.stats_interval</mallctl>
+ (<type>int64_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Average interval between statistics outputs, as measured
+ in bytes of allocation activity. The actual interval may be sporadic
+ because decentralized event counters are used to avoid synchronization
+ bottlenecks. The output may be triggered on any thread, which then
+ calls <function>malloc_stats_print()</function>. <link
+ linkend="opt.stats_interval_opts"><mallctl>opt.stats_interval_opts</mallctl></link>
+ can be combined to specify output options. By default,
+ interval-triggered stats output is disabled (encoded as
+ -1).</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.stats_interval_opts">
+ <term>
+ <mallctl>opt.stats_interval_opts</mallctl>
+ (<type>const char *</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Options (the <parameter>opts</parameter> string) to pass
+ to the <function>malloc_stats_print()</function> for interval based
+ statistics printing (enabled
+ through <link
+ linkend="opt.stats_interval"><mallctl>opt.stats_interval</mallctl></link>). See
+ available options in <link
+ linkend="malloc_stats_print_opts"><function>malloc_stats_print()</function></link>.
+ Has no effect unless <link
+ linkend="opt.stats_interval"><mallctl>opt.stats_interval</mallctl></link> is
+ enabled. The default is <quote></quote>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.junk">
+ <term>
+ <mallctl>opt.junk</mallctl>
+ (<type>const char *</type>)
+ <literal>r-</literal>
+ [<option>--enable-fill</option>]
+ </term>
+ <listitem><para>Junk filling. If set to <quote>alloc</quote>, each byte
+ of uninitialized allocated memory will be initialized to
+ <literal>0xa5</literal>. If set to <quote>free</quote>, all deallocated
+ memory will be initialized to <literal>0x5a</literal>. If set to
+ <quote>true</quote>, both allocated and deallocated memory will be
+ initialized, and if set to <quote>false</quote>, junk filling be
+ disabled entirely. This is intended for debugging and will impact
+ performance negatively. This option is <quote>false</quote> by default
+ unless <option>--enable-debug</option> is specified during
+ configuration, in which case it is <quote>true</quote> by
+ default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.zero">
+ <term>
+ <mallctl>opt.zero</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-fill</option>]
+ </term>
+ <listitem><para>Zero filling enabled/disabled. If enabled, each byte
+ of uninitialized allocated memory will be initialized to 0. Note that
+ this initialization only happens once for each byte, so
+ <function>realloc()</function> and
+ <function>rallocx()</function> calls do not zero memory that
+ was previously allocated. This is intended for debugging and will
+ impact performance negatively. This option is disabled by default.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.utrace">
+ <term>
+ <mallctl>opt.utrace</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-utrace</option>]
+ </term>
+ <listitem><para>Allocation tracing based on
+ <citerefentry><refentrytitle>utrace</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> enabled/disabled. This option
+ is disabled by default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.xmalloc">
+ <term>
+ <mallctl>opt.xmalloc</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-xmalloc</option>]
+ </term>
+ <listitem><para>Abort-on-out-of-memory enabled/disabled. If enabled,
+ rather than returning failure for any allocation function, display a
+ diagnostic message on <constant>STDERR_FILENO</constant> and cause the
+ program to drop core (using
+ <citerefentry><refentrytitle>abort</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry>). If an application is
+ designed to depend on this behavior, set the option at compile time by
+ including the following in the source code:
+ <programlisting language="C"><![CDATA[
+malloc_conf = "xmalloc:true";]]></programlisting>
+ This option is disabled by default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.tcache">
+ <term>
+ <mallctl>opt.tcache</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Thread-specific caching (tcache) enabled/disabled. When
+ there are multiple threads, each thread uses a tcache for objects up to
+ a certain size. Thread-specific caching allows many allocations to be
+ satisfied without performing any thread synchronization, at the cost of
+ increased memory use. See the <link
+ linkend="opt.tcache_max"><mallctl>opt.tcache_max</mallctl></link>
+ option for related tuning information. This option is enabled by
+ default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.tcache_max">
+ <term>
+ <mallctl>opt.tcache_max</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Maximum size class to cache in the thread-specific cache
+ (tcache). At a minimum, the first size class is cached; and at a
+ maximum, size classes up to 8 MiB can be cached. The default maximum is
+ 32 KiB (2^15). As a convenience, this may also be set by specifying
+ lg_tcache_max, which will be taken to be the base-2 logarithm of the
+ setting of tcache_max.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.thp">
+ <term>
+ <mallctl>opt.thp</mallctl>
+ (<type>const char *</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Transparent hugepage (THP) mode. Settings "always",
+ "never" and "default" are available if THP is supported by the operating
+ system. The "always" setting enables transparent hugepage for all user
+ memory mappings with
+ <parameter><constant>MADV_HUGEPAGE</constant></parameter>; "never"
+ ensures no transparent hugepage with
+ <parameter><constant>MADV_NOHUGEPAGE</constant></parameter>; the default
+ setting "default" makes no changes. Note that: this option does not
+ affect THP for jemalloc internal metadata (see <link
+ linkend="opt.metadata_thp"><mallctl>opt.metadata_thp</mallctl></link>);
+ in addition, for arenas with customized <link
+ linkend="arena.i.extent_hooks"><mallctl>extent_hooks</mallctl></link>,
+ this option is bypassed as it is implemented as part of the default
+ extent hooks.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.prof">
+ <term>
+ <mallctl>opt.prof</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Memory profiling enabled/disabled. If enabled, profile
+ memory allocation activity. See the <link
+ linkend="opt.prof_active"><mallctl>opt.prof_active</mallctl></link>
+ option for on-the-fly activation/deactivation. See the <link
+ linkend="opt.lg_prof_sample"><mallctl>opt.lg_prof_sample</mallctl></link>
+ option for probabilistic sampling control. See the <link
+ linkend="opt.prof_accum"><mallctl>opt.prof_accum</mallctl></link>
+ option for control of cumulative sample reporting. See the <link
+ linkend="opt.lg_prof_interval"><mallctl>opt.lg_prof_interval</mallctl></link>
+ option for information on interval-triggered profile dumping, the <link
+ linkend="opt.prof_gdump"><mallctl>opt.prof_gdump</mallctl></link>
+ option for information on high-water-triggered profile dumping, and the
+ <link linkend="opt.prof_final"><mallctl>opt.prof_final</mallctl></link>
+ option for final profile dumping. Profile output is compatible with
+ the <command>jeprof</command> command, which is based on the
+ <command>pprof</command> that is developed as part of the <ulink
+ url="http://code.google.com/p/gperftools/">gperftools
+ package</ulink>. See <link linkend="heap_profile_format">HEAP PROFILE
+ FORMAT</link> for heap profile format documentation.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.prof_prefix">
+ <term>
+ <mallctl>opt.prof_prefix</mallctl>
+ (<type>const char *</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Filename prefix for profile dumps. If the prefix is
+ set to the empty string, no automatic dumps will occur; this is
+ primarily useful for disabling the automatic final heap dump (which
+ also disables leak reporting, if enabled). The default prefix is
+ <filename>jeprof</filename>. This prefix value can be overridden by
+ <link linkend="prof.prefix"><mallctl>prof.prefix</mallctl></link>.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.prof_active">
+ <term>
+ <mallctl>opt.prof_active</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Profiling activated/deactivated. This is a secondary
+ control mechanism that makes it possible to start the application with
+ profiling enabled (see the <link
+ linkend="opt.prof"><mallctl>opt.prof</mallctl></link> option) but
+ inactive, then toggle profiling at any time during program execution
+ with the <link
+ linkend="prof.active"><mallctl>prof.active</mallctl></link> mallctl.
+ This option is enabled by default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.prof_thread_active_init">
+ <term>
+ <mallctl>opt.prof_thread_active_init</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Initial setting for <link
+ linkend="thread.prof.active"><mallctl>thread.prof.active</mallctl></link>
+ in newly created threads. The initial setting for newly created threads
+ can also be changed during execution via the <link
+ linkend="prof.thread_active_init"><mallctl>prof.thread_active_init</mallctl></link>
+ mallctl. This option is enabled by default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.lg_prof_sample">
+ <term>
+ <mallctl>opt.lg_prof_sample</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Average interval (log base 2) between allocation
+ samples, as measured in bytes of allocation activity. Increasing the
+ sampling interval decreases profile fidelity, but also decreases the
+ computational overhead. The default sample interval is 512 KiB (2^19
+ B).</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.prof_accum">
+ <term>
+ <mallctl>opt.prof_accum</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Reporting of cumulative object/byte counts in profile
+ dumps enabled/disabled. If this option is enabled, every unique
+ backtrace must be stored for the duration of execution. Depending on
+ the application, this can impose a large memory overhead, and the
+ cumulative counts are not always of interest. This option is disabled
+ by default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.lg_prof_interval">
+ <term>
+ <mallctl>opt.lg_prof_interval</mallctl>
+ (<type>ssize_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Average interval (log base 2) between memory profile
+ dumps, as measured in bytes of allocation activity. The actual
+ interval between dumps may be sporadic because decentralized allocation
+ counters are used to avoid synchronization bottlenecks. Profiles are
+ dumped to files named according to the pattern
+ <filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.i&lt;iseq&gt;.heap</filename>,
+ where <literal>&lt;prefix&gt;</literal> is controlled by the
+ <link
+ linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link> and
+ <link linkend="prof.prefix"><mallctl>prof.prefix</mallctl></link>
+ options. By default, interval-triggered profile dumping is disabled
+ (encoded as -1).
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.prof_gdump">
+ <term>
+ <mallctl>opt.prof_gdump</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Set the initial state of <link
+ linkend="prof.gdump"><mallctl>prof.gdump</mallctl></link>, which when
+ enabled triggers a memory profile dump every time the total virtual
+ memory exceeds the previous maximum. This option is disabled by
+ default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.prof_final">
+ <term>
+ <mallctl>opt.prof_final</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Use an
+ <citerefentry><refentrytitle>atexit</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry> function to dump final memory
+ usage to a file named according to the pattern
+ <filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.f.heap</filename>,
+ where <literal>&lt;prefix&gt;</literal> is controlled by the <link
+ linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link> and
+ <link linkend="prof.prefix"><mallctl>prof.prefix</mallctl></link>
+ options. Note that <function>atexit()</function> may allocate
+ memory during application initialization and then deadlock internally
+ when jemalloc in turn calls <function>atexit()</function>, so
+ this option is not universally usable (though the application can
+ register its own <function>atexit()</function> function with
+ equivalent functionality). This option is disabled by
+ default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.prof_leak">
+ <term>
+ <mallctl>opt.prof_leak</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Leak reporting enabled/disabled. If enabled, use an
+ <citerefentry><refentrytitle>atexit</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry> function to report memory leaks
+ detected by allocation sampling. See the
+ <link linkend="opt.prof"><mallctl>opt.prof</mallctl></link> option for
+ information on analyzing heap profile output. Works only when combined
+ with <link linkend="opt.prof_final"><mallctl>opt.prof_final</mallctl>
+ </link>, otherwise does nothing. This option is disabled by default.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.prof_leak_error">
+ <term>
+ <mallctl>opt.prof_leak_error</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Similar to <link linkend="opt.prof_leak"><mallctl>
+ opt.prof_leak</mallctl></link>, but makes the process exit with error
+ code 1 if a memory leak is detected. This option supersedes
+ <link linkend="opt.prof_leak"><mallctl>opt.prof_leak</mallctl></link>,
+ meaning that if both are specified, this option takes precedence. When
+ enabled, also enables <link linkend="opt.prof_leak"><mallctl>
+ opt.prof_leak</mallctl></link>. Works only when combined with
+ <link linkend="opt.prof_final"><mallctl>opt.prof_final</mallctl></link>,
+ otherwise does nothing. This option is disabled by default.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.zero_realloc">
+ <term>
+ <mallctl>opt.zero_realloc</mallctl>
+ (<type>const char *</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para> Determines the behavior of
+ <function>realloc()</function> when passed a value of zero for the new
+ size. <quote>alloc</quote> treats this as an allocation of size zero
+ (and returns a non-null result except in case of resource exhaustion).
+ <quote>free</quote> treats this as a deallocation of the pointer, and
+ returns <constant>NULL</constant> without setting
+ <varname>errno</varname>. <quote>abort</quote> aborts the process if
+ zero is passed. The default is <quote>free</quote> on Linux and
+ Windows, and <quote>alloc</quote> elsewhere.</para>
+
+ <para>There is considerable divergence of behaviors across
+ implementations in handling this case. Many have the behavior of
+ <quote>free</quote>. This can introduce security vulnerabilities, since
+ a <constant>NULL</constant> return value indicates failure, and the
+ continued validity of the passed-in pointer (per POSIX and C11).
+ <quote>alloc</quote> is safe, but can cause leaks in programs that
+ expect the common behavior. Programs intended to be portable and
+ leak-free cannot assume either behavior, and must therefore never call
+ realloc with a size of 0. The <quote>abort</quote> option enables these
+ testing this behavior.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="thread.arena">
+ <term>
+ <mallctl>thread.arena</mallctl>
+ (<type>unsigned</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Get or set the arena associated with the calling
+ thread. If the specified arena was not initialized beforehand (see the
+ <link
+ linkend="arena.i.initialized"><mallctl>arena.i.initialized</mallctl></link>
+ mallctl), it will be automatically initialized as a side effect of
+ calling this interface.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="thread.allocated">
+ <term>
+ <mallctl>thread.allocated</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Get the total number of bytes ever allocated by the
+ calling thread. This counter has the potential to wrap around; it is
+ up to the application to appropriately interpret the counter in such
+ cases.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="thread.allocatedp">
+ <term>
+ <mallctl>thread.allocatedp</mallctl>
+ (<type>uint64_t *</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Get a pointer to the the value that is returned by the
+ <link
+ linkend="thread.allocated"><mallctl>thread.allocated</mallctl></link>
+ mallctl. This is useful for avoiding the overhead of repeated
+ <function>mallctl*()</function> calls. Note that the underlying counter
+ should not be modified by the application.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="thread.deallocated">
+ <term>
+ <mallctl>thread.deallocated</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Get the total number of bytes ever deallocated by the
+ calling thread. This counter has the potential to wrap around; it is
+ up to the application to appropriately interpret the counter in such
+ cases.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="thread.deallocatedp">
+ <term>
+ <mallctl>thread.deallocatedp</mallctl>
+ (<type>uint64_t *</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Get a pointer to the the value that is returned by the
+ <link
+ linkend="thread.deallocated"><mallctl>thread.deallocated</mallctl></link>
+ mallctl. This is useful for avoiding the overhead of repeated
+ <function>mallctl*()</function> calls. Note that the underlying counter
+ should not be modified by the application.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="thread.peak.read">
+ <term>
+ <mallctl>thread.peak.read</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Get an approximation of the maximum value of the
+ difference between the number of bytes allocated and the number of bytes
+ deallocated by the calling thread since the last call to <link
+ linkend="thread.peak.reset"><mallctl>thread.peak.reset</mallctl></link>,
+ or since the thread's creation if it has not called <link
+ linkend="thread.peak.reset"><mallctl>thread.peak.reset</mallctl></link>.
+ No guarantees are made about the quality of the approximation, but
+ jemalloc currently endeavors to maintain accuracy to within one hundred
+ kilobytes.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="thread.peak.reset">
+ <term>
+ <mallctl>thread.peak.reset</mallctl>
+ (<type>void</type>)
+ <literal>--</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Resets the counter for net bytes allocated in the calling
+ thread to zero. This affects subsequent calls to <link
+ linkend="thread.peak.read"><mallctl>thread.peak.read</mallctl></link>,
+ but not the values returned by <link
+ linkend="thread.allocated"><mallctl>thread.allocated</mallctl></link>
+ or <link
+ linkend="thread.deallocated"><mallctl>thread.deallocated</mallctl></link>.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="thread.tcache.enabled">
+ <term>
+ <mallctl>thread.tcache.enabled</mallctl>
+ (<type>bool</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Enable/disable calling thread's tcache. The tcache is
+ implicitly flushed as a side effect of becoming
+ disabled (see <link
+ linkend="thread.tcache.flush"><mallctl>thread.tcache.flush</mallctl></link>).
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="thread.tcache.flush">
+ <term>
+ <mallctl>thread.tcache.flush</mallctl>
+ (<type>void</type>)
+ <literal>--</literal>
+ </term>
+ <listitem><para>Flush calling thread's thread-specific cache (tcache).
+ This interface releases all cached objects and internal data structures
+ associated with the calling thread's tcache. Ordinarily, this interface
+ need not be called, since automatic periodic incremental garbage
+ collection occurs, and the thread cache is automatically discarded when
+ a thread exits. However, garbage collection is triggered by allocation
+ activity, so it is possible for a thread that stops
+ allocating/deallocating to retain its cache indefinitely, in which case
+ the developer may find manual flushing useful.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="thread.prof.name">
+ <term>
+ <mallctl>thread.prof.name</mallctl>
+ (<type>const char *</type>)
+ <literal>r-</literal> or
+ <literal>-w</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Get/set the descriptive name associated with the calling
+ thread in memory profile dumps. An internal copy of the name string is
+ created, so the input string need not be maintained after this interface
+ completes execution. The output string of this interface should be
+ copied for non-ephemeral uses, because multiple implementation details
+ can cause asynchronous string deallocation. Furthermore, each
+ invocation of this interface can only read or write; simultaneous
+ read/write is not supported due to string lifetime limitations. The
+ name string must be nil-terminated and comprised only of characters in
+ the sets recognized
+ by <citerefentry><refentrytitle>isgraph</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry> and
+ <citerefentry><refentrytitle>isblank</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="thread.prof.active">
+ <term>
+ <mallctl>thread.prof.active</mallctl>
+ (<type>bool</type>)
+ <literal>rw</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Control whether sampling is currently active for the
+ calling thread. This is an activation mechanism in addition to <link
+ linkend="prof.active"><mallctl>prof.active</mallctl></link>; both must
+ be active for the calling thread to sample. This flag is enabled by
+ default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="thread.idle">
+ <term>
+ <mallctl>thread.idle</mallctl>
+ (<type>void</type>)
+ <literal>--</literal>
+ </term>
+ <listitem><para>Hints to jemalloc that the calling thread will be idle
+ for some nontrivial period of time (say, on the order of seconds), and
+ that doing some cleanup operations may be beneficial. There are no
+ guarantees as to what specific operations will be performed; currently
+ this flushes the caller's tcache and may (according to some heuristic)
+ purge its associated arena.</para>
+ <para>This is not intended to be a general-purpose background activity
+ mechanism, and threads should not wake up multiple times solely to call
+ it. Rather, a thread waiting for a task should do a timed wait first,
+ call <link linkend="thread.idle"><mallctl>thread.idle</mallctl></link>
+ if no task appears in the timeout interval, and then do an untimed wait.
+ For such a background activity mechanism, see
+ <link linkend="background_thread"><mallctl>background_thread</mallctl></link>.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="tcache.create">
+ <term>
+ <mallctl>tcache.create</mallctl>
+ (<type>unsigned</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Create an explicit thread-specific cache (tcache) and
+ return an identifier that can be passed to the <link
+ linkend="MALLOCX_TCACHE"><constant>MALLOCX_TCACHE(<parameter>tc</parameter>)</constant></link>
+ macro to explicitly use the specified cache rather than the
+ automatically managed one that is used by default. Each explicit cache
+ can be used by only one thread at a time; the application must assure
+ that this constraint holds.
+ </para>
+
+ <para>If the amount of space supplied for storing the thread-specific
+ cache identifier does not equal
+ <code language="C">sizeof(<type>unsigned</type>)</code>, no
+ thread-specific cache will be created, no data will be written to the
+ space pointed by <parameter>oldp</parameter>, and
+ <parameter>*oldlenp</parameter> will be set to 0.
+ </para></listitem>
+
+ </varlistentry>
+
+ <varlistentry id="tcache.flush">
+ <term>
+ <mallctl>tcache.flush</mallctl>
+ (<type>unsigned</type>)
+ <literal>-w</literal>
+ </term>
+ <listitem><para>Flush the specified thread-specific cache (tcache). The
+ same considerations apply to this interface as to <link
+ linkend="thread.tcache.flush"><mallctl>thread.tcache.flush</mallctl></link>,
+ except that the tcache will never be automatically discarded.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="tcache.destroy">
+ <term>
+ <mallctl>tcache.destroy</mallctl>
+ (<type>unsigned</type>)
+ <literal>-w</literal>
+ </term>
+ <listitem><para>Flush the specified thread-specific cache (tcache) and
+ make the identifier available for use during a future tcache creation.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arena.i.initialized">
+ <term>
+ <mallctl>arena.&lt;i&gt;.initialized</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Get whether the specified arena's statistics are
+ initialized (i.e. the arena was initialized prior to the current epoch).
+ This interface can also be nominally used to query whether the merged
+ statistics corresponding to <constant>MALLCTL_ARENAS_ALL</constant> are
+ initialized (always true).</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arena.i.decay">
+ <term>
+ <mallctl>arena.&lt;i&gt;.decay</mallctl>
+ (<type>void</type>)
+ <literal>--</literal>
+ </term>
+ <listitem><para>Trigger decay-based purging of unused dirty/muzzy pages
+ for arena &lt;i&gt;, or for all arenas if &lt;i&gt; equals
+ <constant>MALLCTL_ARENAS_ALL</constant>. The proportion of unused
+ dirty/muzzy pages to be purged depends on the current time; see <link
+ linkend="opt.dirty_decay_ms"><mallctl>opt.dirty_decay_ms</mallctl></link>
+ and <link
+ linkend="opt.muzzy_decay_ms"><mallctl>opt.muzy_decay_ms</mallctl></link>
+ for details.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arena.i.purge">
+ <term>
+ <mallctl>arena.&lt;i&gt;.purge</mallctl>
+ (<type>void</type>)
+ <literal>--</literal>
+ </term>
+ <listitem><para>Purge all unused dirty pages for arena &lt;i&gt;, or for
+ all arenas if &lt;i&gt; equals <constant>MALLCTL_ARENAS_ALL</constant>.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arena.i.reset">
+ <term>
+ <mallctl>arena.&lt;i&gt;.reset</mallctl>
+ (<type>void</type>)
+ <literal>--</literal>
+ </term>
+ <listitem><para>Discard all of the arena's extant allocations. This
+ interface can only be used with arenas explicitly created via <link
+ linkend="arenas.create"><mallctl>arenas.create</mallctl></link>. None
+ of the arena's discarded/cached allocations may accessed afterward. As
+ part of this requirement, all thread caches which were used to
+ allocate/deallocate in conjunction with the arena must be flushed
+ beforehand.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arena.i.destroy">
+ <term>
+ <mallctl>arena.&lt;i&gt;.destroy</mallctl>
+ (<type>void</type>)
+ <literal>--</literal>
+ </term>
+ <listitem><para>Destroy the arena. Discard all of the arena's extant
+ allocations using the same mechanism as for <link
+ linkend="arena.i.reset"><mallctl>arena.&lt;i&gt;.reset</mallctl></link>
+ (with all the same constraints and side effects), merge the arena stats
+ into those accessible at arena index
+ <constant>MALLCTL_ARENAS_DESTROYED</constant>, and then completely
+ discard all metadata associated with the arena. Future calls to <link
+ linkend="arenas.create"><mallctl>arenas.create</mallctl></link> may
+ recycle the arena index. Destruction will fail if any threads are
+ currently associated with the arena as a result of calls to <link
+ linkend="thread.arena"><mallctl>thread.arena</mallctl></link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arena.i.dss">
+ <term>
+ <mallctl>arena.&lt;i&gt;.dss</mallctl>
+ (<type>const char *</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Set the precedence of dss allocation as related to mmap
+ allocation for arena &lt;i&gt;, or for all arenas if &lt;i&gt; equals
+ <constant>MALLCTL_ARENAS_ALL</constant>. See <link
+ linkend="opt.dss"><mallctl>opt.dss</mallctl></link> for supported
+ settings.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arena.i.dirty_decay_ms">
+ <term>
+ <mallctl>arena.&lt;i&gt;.dirty_decay_ms</mallctl>
+ (<type>ssize_t</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Current per-arena approximate time in milliseconds from
+ the creation of a set of unused dirty pages until an equivalent set of
+ unused dirty pages is purged and/or reused. Each time this interface is
+ set, all currently unused dirty pages are considered to have fully
+ decayed, which causes immediate purging of all unused dirty pages unless
+ the decay time is set to -1 (i.e. purging disabled). See <link
+ linkend="opt.dirty_decay_ms"><mallctl>opt.dirty_decay_ms</mallctl></link>
+ for additional information.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arena.i.muzzy_decay_ms">
+ <term>
+ <mallctl>arena.&lt;i&gt;.muzzy_decay_ms</mallctl>
+ (<type>ssize_t</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Current per-arena approximate time in milliseconds from
+ the creation of a set of unused muzzy pages until an equivalent set of
+ unused muzzy pages is purged and/or reused. Each time this interface is
+ set, all currently unused muzzy pages are considered to have fully
+ decayed, which causes immediate purging of all unused muzzy pages unless
+ the decay time is set to -1 (i.e. purging disabled). See <link
+ linkend="opt.muzzy_decay_ms"><mallctl>opt.muzzy_decay_ms</mallctl></link>
+ for additional information.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arena.i.retain_grow_limit">
+ <term>
+ <mallctl>arena.&lt;i&gt;.retain_grow_limit</mallctl>
+ (<type>size_t</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Maximum size to grow retained region (only relevant when
+ <link linkend="opt.retain"><mallctl>opt.retain</mallctl></link> is
+ enabled). This controls the maximum increment to expand virtual memory,
+ or allocation through <link
+ linkend="arena.i.extent_hooks"><mallctl>arena.&lt;i&gt;extent_hooks</mallctl></link>.
+ In particular, if customized extent hooks reserve physical memory
+ (e.g. 1G huge pages), this is useful to control the allocation hook's
+ input size. The default is no limit.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arena.i.extent_hooks">
+ <term>
+ <mallctl>arena.&lt;i&gt;.extent_hooks</mallctl>
+ (<type>extent_hooks_t *</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Get or set the extent management hook functions for
+ arena &lt;i&gt;. The functions must be capable of operating on all
+ extant extents associated with arena &lt;i&gt;, usually by passing
+ unknown extents to the replaced functions. In practice, it is feasible
+ to control allocation for arenas explicitly created via <link
+ linkend="arenas.create"><mallctl>arenas.create</mallctl></link> such
+ that all extents originate from an application-supplied extent allocator
+ (by specifying the custom extent hook functions during arena creation).
+ However, the API guarantees for the automatically created arenas may be
+ relaxed -- hooks set there may be called in a "best effort" fashion; in
+ addition there may be extents created prior to the application having an
+ opportunity to take over extent allocation.</para>
+
+ <programlisting language="C"><![CDATA[
+typedef extent_hooks_s extent_hooks_t;
+struct extent_hooks_s {
+ extent_alloc_t *alloc;
+ extent_dalloc_t *dalloc;
+ extent_destroy_t *destroy;
+ extent_commit_t *commit;
+ extent_decommit_t *decommit;
+ extent_purge_t *purge_lazy;
+ extent_purge_t *purge_forced;
+ extent_split_t *split;
+ extent_merge_t *merge;
+};]]></programlisting>
+ <para>The <type>extent_hooks_t</type> structure comprises function
+ pointers which are described individually below. jemalloc uses these
+ functions to manage extent lifetime, which starts off with allocation of
+ mapped committed memory, in the simplest case followed by deallocation.
+ However, there are performance and platform reasons to retain extents
+ for later reuse. Cleanup attempts cascade from deallocation to decommit
+ to forced purging to lazy purging, which gives the extent management
+ functions opportunities to reject the most permanent cleanup operations
+ in favor of less permanent (and often less costly) operations. All
+ operations except allocation can be universally opted out of by setting
+ the hook pointers to <constant>NULL</constant>, or selectively opted out
+ of by returning failure. Note that once the extent hook is set, the
+ structure is accessed directly by the associated arenas, so it must
+ remain valid for the entire lifetime of the arenas.</para>
+
+ <funcsynopsis><funcprototype>
+ <funcdef>typedef void *<function>(extent_alloc_t)</function></funcdef>
+ <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
+ <paramdef>void *<parameter>new_addr</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>size_t <parameter>alignment</parameter></paramdef>
+ <paramdef>bool *<parameter>zero</parameter></paramdef>
+ <paramdef>bool *<parameter>commit</parameter></paramdef>
+ <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+ </funcprototype></funcsynopsis>
+ <literallayout></literallayout>
+ <para>An extent allocation function conforms to the
+ <type>extent_alloc_t</type> type and upon success returns a pointer to
+ <parameter>size</parameter> bytes of mapped memory on behalf of arena
+ <parameter>arena_ind</parameter> such that the extent's base address is
+ a multiple of <parameter>alignment</parameter>, as well as setting
+ <parameter>*zero</parameter> to indicate whether the extent is zeroed
+ and <parameter>*commit</parameter> to indicate whether the extent is
+ committed. Upon error the function returns <constant>NULL</constant>
+ and leaves <parameter>*zero</parameter> and
+ <parameter>*commit</parameter> unmodified. The
+ <parameter>size</parameter> parameter is always a multiple of the page
+ size. The <parameter>alignment</parameter> parameter is always a power
+ of two at least as large as the page size. Zeroing is mandatory if
+ <parameter>*zero</parameter> is true upon function entry. Committing is
+ mandatory if <parameter>*commit</parameter> is true upon function entry.
+ If <parameter>new_addr</parameter> is not <constant>NULL</constant>, the
+ returned pointer must be <parameter>new_addr</parameter> on success or
+ <constant>NULL</constant> on error. Committed memory may be committed
+ in absolute terms as on a system that does not overcommit, or in
+ implicit terms as on a system that overcommits and satisfies physical
+ memory needs on demand via soft page faults. Note that replacing the
+ default extent allocation function makes the arena's <link
+ linkend="arena.i.dss"><mallctl>arena.&lt;i&gt;.dss</mallctl></link>
+ setting irrelevant.</para>
+
+ <funcsynopsis><funcprototype>
+ <funcdef>typedef bool <function>(extent_dalloc_t)</function></funcdef>
+ <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
+ <paramdef>void *<parameter>addr</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>bool <parameter>committed</parameter></paramdef>
+ <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+ </funcprototype></funcsynopsis>
+ <literallayout></literallayout>
+ <para>
+ An extent deallocation function conforms to the
+ <type>extent_dalloc_t</type> type and deallocates an extent at given
+ <parameter>addr</parameter> and <parameter>size</parameter> with
+ <parameter>committed</parameter>/decommited memory as indicated, on
+ behalf of arena <parameter>arena_ind</parameter>, returning false upon
+ success. If the function returns true, this indicates opt-out from
+ deallocation; the virtual memory mapping associated with the extent
+ remains mapped, in the same commit state, and available for future use,
+ in which case it will be automatically retained for later reuse.</para>
+
+ <funcsynopsis><funcprototype>
+ <funcdef>typedef void <function>(extent_destroy_t)</function></funcdef>
+ <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
+ <paramdef>void *<parameter>addr</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>bool <parameter>committed</parameter></paramdef>
+ <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+ </funcprototype></funcsynopsis>
+ <literallayout></literallayout>
+ <para>
+ An extent destruction function conforms to the
+ <type>extent_destroy_t</type> type and unconditionally destroys an
+ extent at given <parameter>addr</parameter> and
+ <parameter>size</parameter> with
+ <parameter>committed</parameter>/decommited memory as indicated, on
+ behalf of arena <parameter>arena_ind</parameter>. This function may be
+ called to destroy retained extents during arena destruction (see <link
+ linkend="arena.i.destroy"><mallctl>arena.&lt;i&gt;.destroy</mallctl></link>).</para>
+
+ <funcsynopsis><funcprototype>
+ <funcdef>typedef bool <function>(extent_commit_t)</function></funcdef>
+ <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
+ <paramdef>void *<parameter>addr</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>size_t <parameter>offset</parameter></paramdef>
+ <paramdef>size_t <parameter>length</parameter></paramdef>
+ <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+ </funcprototype></funcsynopsis>
+ <literallayout></literallayout>
+ <para>An extent commit function conforms to the
+ <type>extent_commit_t</type> type and commits zeroed physical memory to
+ back pages within an extent at given <parameter>addr</parameter> and
+ <parameter>size</parameter> at <parameter>offset</parameter> bytes,
+ extending for <parameter>length</parameter> on behalf of arena
+ <parameter>arena_ind</parameter>, returning false upon success.
+ Committed memory may be committed in absolute terms as on a system that
+ does not overcommit, or in implicit terms as on a system that
+ overcommits and satisfies physical memory needs on demand via soft page
+ faults. If the function returns true, this indicates insufficient
+ physical memory to satisfy the request.</para>
+
+ <funcsynopsis><funcprototype>
+ <funcdef>typedef bool <function>(extent_decommit_t)</function></funcdef>
+ <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
+ <paramdef>void *<parameter>addr</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>size_t <parameter>offset</parameter></paramdef>
+ <paramdef>size_t <parameter>length</parameter></paramdef>
+ <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+ </funcprototype></funcsynopsis>
+ <literallayout></literallayout>
+ <para>An extent decommit function conforms to the
+ <type>extent_decommit_t</type> type and decommits any physical memory
+ that is backing pages within an extent at given
+ <parameter>addr</parameter> and <parameter>size</parameter> at
+ <parameter>offset</parameter> bytes, extending for
+ <parameter>length</parameter> on behalf of arena
+ <parameter>arena_ind</parameter>, returning false upon success, in which
+ case the pages will be committed via the extent commit function before
+ being reused. If the function returns true, this indicates opt-out from
+ decommit; the memory remains committed and available for future use, in
+ which case it will be automatically retained for later reuse.</para>
+
+ <funcsynopsis><funcprototype>
+ <funcdef>typedef bool <function>(extent_purge_t)</function></funcdef>
+ <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
+ <paramdef>void *<parameter>addr</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>size_t <parameter>offset</parameter></paramdef>
+ <paramdef>size_t <parameter>length</parameter></paramdef>
+ <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+ </funcprototype></funcsynopsis>
+ <literallayout></literallayout>
+ <para>An extent purge function conforms to the
+ <type>extent_purge_t</type> type and discards physical pages
+ within the virtual memory mapping associated with an extent at given
+ <parameter>addr</parameter> and <parameter>size</parameter> at
+ <parameter>offset</parameter> bytes, extending for
+ <parameter>length</parameter> on behalf of arena
+ <parameter>arena_ind</parameter>. A lazy extent purge function (e.g.
+ implemented via
+ <function>madvise(<parameter>...</parameter><parameter><constant>MADV_FREE</constant></parameter>)</function>)
+ can delay purging indefinitely and leave the pages within the purged
+ virtual memory range in an indeterminite state, whereas a forced extent
+ purge function immediately purges, and the pages within the virtual
+ memory range will be zero-filled the next time they are accessed. If
+ the function returns true, this indicates failure to purge.</para>
+
+ <funcsynopsis><funcprototype>
+ <funcdef>typedef bool <function>(extent_split_t)</function></funcdef>
+ <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
+ <paramdef>void *<parameter>addr</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>size_t <parameter>size_a</parameter></paramdef>
+ <paramdef>size_t <parameter>size_b</parameter></paramdef>
+ <paramdef>bool <parameter>committed</parameter></paramdef>
+ <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+ </funcprototype></funcsynopsis>
+ <literallayout></literallayout>
+ <para>An extent split function conforms to the
+ <type>extent_split_t</type> type and optionally splits an extent at
+ given <parameter>addr</parameter> and <parameter>size</parameter> into
+ two adjacent extents, the first of <parameter>size_a</parameter> bytes,
+ and the second of <parameter>size_b</parameter> bytes, operating on
+ <parameter>committed</parameter>/decommitted memory as indicated, on
+ behalf of arena <parameter>arena_ind</parameter>, returning false upon
+ success. If the function returns true, this indicates that the extent
+ remains unsplit and therefore should continue to be operated on as a
+ whole.</para>
+
+ <funcsynopsis><funcprototype>
+ <funcdef>typedef bool <function>(extent_merge_t)</function></funcdef>
+ <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
+ <paramdef>void *<parameter>addr_a</parameter></paramdef>
+ <paramdef>size_t <parameter>size_a</parameter></paramdef>
+ <paramdef>void *<parameter>addr_b</parameter></paramdef>
+ <paramdef>size_t <parameter>size_b</parameter></paramdef>
+ <paramdef>bool <parameter>committed</parameter></paramdef>
+ <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+ </funcprototype></funcsynopsis>
+ <literallayout></literallayout>
+ <para>An extent merge function conforms to the
+ <type>extent_merge_t</type> type and optionally merges adjacent extents,
+ at given <parameter>addr_a</parameter> and <parameter>size_a</parameter>
+ with given <parameter>addr_b</parameter> and
+ <parameter>size_b</parameter> into one contiguous extent, operating on
+ <parameter>committed</parameter>/decommitted memory as indicated, on
+ behalf of arena <parameter>arena_ind</parameter>, returning false upon
+ success. If the function returns true, this indicates that the extents
+ remain distinct mappings and therefore should continue to be operated on
+ independently.</para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.narenas">
+ <term>
+ <mallctl>arenas.narenas</mallctl>
+ (<type>unsigned</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Current limit on number of arenas.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.dirty_decay_ms">
+ <term>
+ <mallctl>arenas.dirty_decay_ms</mallctl>
+ (<type>ssize_t</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Current default per-arena approximate time in
+ milliseconds from the creation of a set of unused dirty pages until an
+ equivalent set of unused dirty pages is purged and/or reused, used to
+ initialize <link
+ linkend="arena.i.dirty_decay_ms"><mallctl>arena.&lt;i&gt;.dirty_decay_ms</mallctl></link>
+ during arena creation. See <link
+ linkend="opt.dirty_decay_ms"><mallctl>opt.dirty_decay_ms</mallctl></link>
+ for additional information.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.muzzy_decay_ms">
+ <term>
+ <mallctl>arenas.muzzy_decay_ms</mallctl>
+ (<type>ssize_t</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Current default per-arena approximate time in
+ milliseconds from the creation of a set of unused muzzy pages until an
+ equivalent set of unused muzzy pages is purged and/or reused, used to
+ initialize <link
+ linkend="arena.i.muzzy_decay_ms"><mallctl>arena.&lt;i&gt;.muzzy_decay_ms</mallctl></link>
+ during arena creation. See <link
+ linkend="opt.muzzy_decay_ms"><mallctl>opt.muzzy_decay_ms</mallctl></link>
+ for additional information.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.quantum">
+ <term>
+ <mallctl>arenas.quantum</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Quantum size.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.page">
+ <term>
+ <mallctl>arenas.page</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Page size.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.tcache_max">
+ <term>
+ <mallctl>arenas.tcache_max</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Maximum thread-cached size class.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.nbins">
+ <term>
+ <mallctl>arenas.nbins</mallctl>
+ (<type>unsigned</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Number of bin size classes.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.nhbins">
+ <term>
+ <mallctl>arenas.nhbins</mallctl>
+ (<type>unsigned</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Total number of thread cache bin size
+ classes.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.bin.i.size">
+ <term>
+ <mallctl>arenas.bin.&lt;i&gt;.size</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Maximum size supported by size class.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.bin.i.nregs">
+ <term>
+ <mallctl>arenas.bin.&lt;i&gt;.nregs</mallctl>
+ (<type>uint32_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Number of regions per slab.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.bin.i.slab_size">
+ <term>
+ <mallctl>arenas.bin.&lt;i&gt;.slab_size</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Number of bytes per slab.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.nlextents">
+ <term>
+ <mallctl>arenas.nlextents</mallctl>
+ (<type>unsigned</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Total number of large size classes.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.lextent.i.size">
+ <term>
+ <mallctl>arenas.lextent.&lt;i&gt;.size</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Maximum size supported by this large size
+ class.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.create">
+ <term>
+ <mallctl>arenas.create</mallctl>
+ (<type>unsigned</type>, <type>extent_hooks_t *</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Explicitly create a new arena outside the range of
+ automatically managed arenas, with optionally specified extent hooks,
+ and return the new arena index.</para>
+
+ <para>If the amount of space supplied for storing the arena index does
+ not equal <code language="C">sizeof(<type>unsigned</type>)</code>, no
+ arena will be created, no data will be written to the space pointed by
+ <parameter>oldp</parameter>, and <parameter>*oldlenp</parameter> will
+ be set to 0.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.lookup">
+ <term>
+ <mallctl>arenas.lookup</mallctl>
+ (<type>unsigned</type>, <type>void*</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Index of the arena to which an allocation belongs to.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="prof.thread_active_init">
+ <term>
+ <mallctl>prof.thread_active_init</mallctl>
+ (<type>bool</type>)
+ <literal>rw</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Control the initial setting for <link
+ linkend="thread.prof.active"><mallctl>thread.prof.active</mallctl></link>
+ in newly created threads. See the <link
+ linkend="opt.prof_thread_active_init"><mallctl>opt.prof_thread_active_init</mallctl></link>
+ option for additional information.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="prof.active">
+ <term>
+ <mallctl>prof.active</mallctl>
+ (<type>bool</type>)
+ <literal>rw</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Control whether sampling is currently active. See the
+ <link
+ linkend="opt.prof_active"><mallctl>opt.prof_active</mallctl></link>
+ option for additional information, as well as the interrelated <link
+ linkend="thread.prof.active"><mallctl>thread.prof.active</mallctl></link>
+ mallctl.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="prof.dump">
+ <term>
+ <mallctl>prof.dump</mallctl>
+ (<type>const char *</type>)
+ <literal>-w</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Dump a memory profile to the specified file, or if NULL
+ is specified, to a file according to the pattern
+ <filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.m&lt;mseq&gt;.heap</filename>,
+ where <literal>&lt;prefix&gt;</literal> is controlled by the
+ <link linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
+ and <link linkend="prof.prefix"><mallctl>prof.prefix</mallctl></link>
+ options.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="prof.prefix">
+ <term>
+ <mallctl>prof.prefix</mallctl>
+ (<type>const char *</type>)
+ <literal>-w</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Set the filename prefix for profile dumps. See
+ <link
+ linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
+ for the default setting. This can be useful to differentiate profile
+ dumps such as from forked processes.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="prof.gdump">
+ <term>
+ <mallctl>prof.gdump</mallctl>
+ (<type>bool</type>)
+ <literal>rw</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>When enabled, trigger a memory profile dump every time
+ the total virtual memory exceeds the previous maximum. Profiles are
+ dumped to files named according to the pattern
+ <filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.u&lt;useq&gt;.heap</filename>,
+ where <literal>&lt;prefix&gt;</literal> is controlled by the <link
+ linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link> and
+ <link linkend="prof.prefix"><mallctl>prof.prefix</mallctl></link>
+ options.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="prof.reset">
+ <term>
+ <mallctl>prof.reset</mallctl>
+ (<type>size_t</type>)
+ <literal>-w</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Reset all memory profile statistics, and optionally
+ update the sample rate (see <link
+ linkend="opt.lg_prof_sample"><mallctl>opt.lg_prof_sample</mallctl></link>
+ and <link
+ linkend="prof.lg_sample"><mallctl>prof.lg_sample</mallctl></link>).
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="prof.lg_sample">
+ <term>
+ <mallctl>prof.lg_sample</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Get the current sample rate (see <link
+ linkend="opt.lg_prof_sample"><mallctl>opt.lg_prof_sample</mallctl></link>).
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="prof.interval">
+ <term>
+ <mallctl>prof.interval</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Average number of bytes allocated between
+ interval-based profile dumps. See the
+ <link
+ linkend="opt.lg_prof_interval"><mallctl>opt.lg_prof_interval</mallctl></link>
+ option for additional information.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.allocated">
+ <term>
+ <mallctl>stats.allocated</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Total number of bytes allocated by the
+ application.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.active">
+ <term>
+ <mallctl>stats.active</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Total number of bytes in active pages allocated by the
+ application. This is a multiple of the page size, and greater than or
+ equal to <link
+ linkend="stats.allocated"><mallctl>stats.allocated</mallctl></link>.
+ This does not include <link linkend="stats.arenas.i.pdirty">
+ <mallctl>stats.arenas.&lt;i&gt;.pdirty</mallctl></link>,
+ <link linkend="stats.arenas.i.pmuzzy">
+ <mallctl>stats.arenas.&lt;i&gt;.pmuzzy</mallctl></link>, nor pages
+ entirely devoted to allocator metadata.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.metadata">
+ <term>
+ <mallctl>stats.metadata</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Total number of bytes dedicated to metadata, which
+ comprise base allocations used for bootstrap-sensitive allocator
+ metadata structures (see <link
+ linkend="stats.arenas.i.base"><mallctl>stats.arenas.&lt;i&gt;.base</mallctl></link>)
+ and internal allocations (see <link
+ linkend="stats.arenas.i.internal"><mallctl>stats.arenas.&lt;i&gt;.internal</mallctl></link>).
+ Transparent huge page (enabled with <link
+ linkend="opt.metadata_thp">opt.metadata_thp</link>) usage is not
+ considered.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.metadata_thp">
+ <term>
+ <mallctl>stats.metadata_thp</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of transparent huge pages (THP) used for
+ metadata. See <link
+ linkend="stats.metadata"><mallctl>stats.metadata</mallctl></link> and
+ <link linkend="opt.metadata_thp">opt.metadata_thp</link>) for
+ details.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.resident">
+ <term>
+ <mallctl>stats.resident</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Maximum number of bytes in physically resident data
+ pages mapped by the allocator, comprising all pages dedicated to
+ allocator metadata, pages backing active allocations, and unused dirty
+ pages. This is a maximum rather than precise because pages may not
+ actually be physically resident if they correspond to demand-zeroed
+ virtual memory that has not yet been touched. This is a multiple of the
+ page size, and is larger than <link
+ linkend="stats.active"><mallctl>stats.active</mallctl></link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.mapped">
+ <term>
+ <mallctl>stats.mapped</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Total number of bytes in active extents mapped by the
+ allocator. This is larger than <link
+ linkend="stats.active"><mallctl>stats.active</mallctl></link>. This
+ does not include inactive extents, even those that contain unused dirty
+ pages, which means that there is no strict ordering between this and
+ <link
+ linkend="stats.resident"><mallctl>stats.resident</mallctl></link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.retained">
+ <term>
+ <mallctl>stats.retained</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Total number of bytes in virtual memory mappings that
+ were retained rather than being returned to the operating system via
+ e.g. <citerefentry><refentrytitle>munmap</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> or similar. Retained virtual
+ memory is typically untouched, decommitted, or purged, so it has no
+ strongly associated physical memory (see <link
+ linkend="arena.i.extent_hooks">extent hooks</link> for details).
+ Retained memory is excluded from mapped memory statistics, e.g. <link
+ linkend="stats.mapped"><mallctl>stats.mapped</mallctl></link>.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.zero_reallocs">
+ <term>
+ <mallctl>stats.zero_reallocs</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of times that the <function>realloc()</function>
+ was called with a non-<constant>NULL</constant> pointer argument and a
+ <constant>0</constant> size argument. This is a fundamentally unsafe
+ pattern in portable programs; see <link linkend="opt.zero_realloc">
+ <mallctl>opt.zero_realloc</mallctl></link> for details.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.background_thread.num_threads">
+ <term>
+ <mallctl>stats.background_thread.num_threads</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para> Number of <link linkend="background_thread">background
+ threads</link> running currently.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.background_thread.num_runs">
+ <term>
+ <mallctl>stats.background_thread.num_runs</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para> Total number of runs from all <link
+ linkend="background_thread">background threads</link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.background_thread.run_interval">
+ <term>
+ <mallctl>stats.background_thread.run_interval</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para> Average run interval in nanoseconds of <link
+ linkend="background_thread">background threads</link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.mutexes.ctl">
+ <term>
+ <mallctl>stats.mutexes.ctl.{counter};</mallctl>
+ (<type>counter specific type</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Statistics on <varname>ctl</varname> mutex (global
+ scope; mallctl related). <mallctl>{counter}</mallctl> is one of the
+ counters below:</para>
+ <varlistentry id="mutex_counters">
+ <listitem><para><varname>num_ops</varname> (<type>uint64_t</type>):
+ Total number of lock acquisition operations on this mutex.</para>
+
+ <para><varname>num_spin_acq</varname> (<type>uint64_t</type>): Number
+ of times the mutex was spin-acquired. When the mutex is currently
+ locked and cannot be acquired immediately, a short period of
+ spin-retry within jemalloc will be performed. Acquired through spin
+ generally means the contention was lightweight and not causing context
+ switches.</para>
+
+ <para><varname>num_wait</varname> (<type>uint64_t</type>): Number of
+ times the mutex was wait-acquired, which means the mutex contention
+ was not solved by spin-retry, and blocking operation was likely
+ involved in order to acquire the mutex. This event generally implies
+ higher cost / longer delay, and should be investigated if it happens
+ often.</para>
+
+ <para><varname>max_wait_time</varname> (<type>uint64_t</type>):
+ Maximum length of time in nanoseconds spent on a single wait-acquired
+ lock operation. Note that to avoid profiling overhead on the common
+ path, this does not consider spin-acquired cases.</para>
+
+ <para><varname>total_wait_time</varname> (<type>uint64_t</type>):
+ Cumulative time in nanoseconds spent on wait-acquired lock operations.
+ Similarly, spin-acquired cases are not considered.</para>
+
+ <para><varname>max_num_thds</varname> (<type>uint32_t</type>): Maximum
+ number of threads waiting on this mutex simultaneously. Similarly,
+ spin-acquired cases are not considered.</para>
+
+ <para><varname>num_owner_switch</varname> (<type>uint64_t</type>):
+ Number of times the current mutex owner is different from the previous
+ one. This event does not generally imply an issue; rather it is an
+ indicator of how often the protected data are accessed by different
+ threads.
+ </para>
+ </listitem>
+ </varlistentry>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.mutexes.background_thread">
+ <term>
+ <mallctl>stats.mutexes.background_thread.{counter}</mallctl>
+ (<type>counter specific type</type>) <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Statistics on <varname>background_thread</varname> mutex
+ (global scope; <link
+ linkend="background_thread"><mallctl>background_thread</mallctl></link>
+ related). <mallctl>{counter}</mallctl> is one of the counters in <link
+ linkend="mutex_counters">mutex profiling
+ counters</link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.mutexes.prof">
+ <term>
+ <mallctl>stats.mutexes.prof.{counter}</mallctl>
+ (<type>counter specific type</type>) <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Statistics on <varname>prof</varname> mutex (global
+ scope; profiling related). <mallctl>{counter}</mallctl> is one of the
+ counters in <link linkend="mutex_counters">mutex profiling
+ counters</link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.mutexes.prof_thds_data">
+ <term>
+ <mallctl>stats.mutexes.prof_thds_data.{counter}</mallctl>
+ (<type>counter specific type</type>) <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Statistics on <varname>prof</varname> threads data mutex
+ (global scope; profiling related). <mallctl>{counter}</mallctl> is one
+ of the counters in <link linkend="mutex_counters">mutex profiling
+ counters</link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.mutexes.prof_dump">
+ <term>
+ <mallctl>stats.mutexes.prof_dump.{counter}</mallctl>
+ (<type>counter specific type</type>) <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Statistics on <varname>prof</varname> dumping mutex
+ (global scope; profiling related). <mallctl>{counter}</mallctl> is one
+ of the counters in <link linkend="mutex_counters">mutex profiling
+ counters</link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.mutexes.reset">
+ <term>
+ <mallctl>stats.mutexes.reset</mallctl>
+ (<type>void</type>) <literal>--</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Reset all mutex profile statistics, including global
+ mutexes, arena mutexes and bin mutexes.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.dss">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.dss</mallctl>
+ (<type>const char *</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>dss (<citerefentry><refentrytitle>sbrk</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry>) allocation precedence as
+ related to <citerefentry><refentrytitle>mmap</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> allocation. See <link
+ linkend="opt.dss"><mallctl>opt.dss</mallctl></link> for details.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.dirty_decay_ms">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.dirty_decay_ms</mallctl>
+ (<type>ssize_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Approximate time in milliseconds from the creation of a
+ set of unused dirty pages until an equivalent set of unused dirty pages
+ is purged and/or reused. See <link
+ linkend="opt.dirty_decay_ms"><mallctl>opt.dirty_decay_ms</mallctl></link>
+ for details.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.muzzy_decay_ms">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.muzzy_decay_ms</mallctl>
+ (<type>ssize_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Approximate time in milliseconds from the creation of a
+ set of unused muzzy pages until an equivalent set of unused muzzy pages
+ is purged and/or reused. See <link
+ linkend="opt.muzzy_decay_ms"><mallctl>opt.muzzy_decay_ms</mallctl></link>
+ for details.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.nthreads">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.nthreads</mallctl>
+ (<type>unsigned</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Number of threads currently assigned to
+ arena.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.uptime">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.uptime</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Time elapsed (in nanoseconds) since the arena was
+ created. If &lt;i&gt; equals <constant>0</constant> or
+ <constant>MALLCTL_ARENAS_ALL</constant>, this is the uptime since malloc
+ initialization.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.pactive">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.pactive</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Number of pages in active extents.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.pdirty">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.pdirty</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Number of pages within unused extents that are
+ potentially dirty, and for which <function>madvise()</function> or
+ similar has not been called. See <link
+ linkend="opt.dirty_decay_ms"><mallctl>opt.dirty_decay_ms</mallctl></link>
+ for a description of dirty pages.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.pmuzzy">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.pmuzzy</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Number of pages within unused extents that are muzzy.
+ See <link
+ linkend="opt.muzzy_decay_ms"><mallctl>opt.muzzy_decay_ms</mallctl></link>
+ for a description of muzzy pages.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.mapped">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.mapped</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of mapped bytes.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.retained">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.retained</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of retained bytes. See <link
+ linkend="stats.retained"><mallctl>stats.retained</mallctl></link> for
+ details.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.extent_avail">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.extent_avail</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of allocated (but unused) extent structs in this
+ arena.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.base">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.base</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>
+ Number of bytes dedicated to bootstrap-sensitive allocator metadata
+ structures.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.internal">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.internal</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of bytes dedicated to internal allocations.
+ Internal allocations differ from application-originated allocations in
+ that they are for internal use, and that they are omitted from heap
+ profiles.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.metadata_thp">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.metadata_thp</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of transparent huge pages (THP) used for
+ metadata. See <link linkend="opt.metadata_thp">opt.metadata_thp</link>
+ for details.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.resident">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.resident</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Maximum number of bytes in physically resident data
+ pages mapped by the arena, comprising all pages dedicated to allocator
+ metadata, pages backing active allocations, and unused dirty pages.
+ This is a maximum rather than precise because pages may not actually be
+ physically resident if they correspond to demand-zeroed virtual memory
+ that has not yet been touched. This is a multiple of the page
+ size.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.dirty_npurge">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.dirty_npurge</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of dirty page purge sweeps performed.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.dirty_nmadvise">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.dirty_nmadvise</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of <function>madvise()</function> or similar
+ calls made to purge dirty pages.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.dirty_purged">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.dirty_purged</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of dirty pages purged.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.muzzy_npurge">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.muzzy_npurge</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of muzzy page purge sweeps performed.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.muzzy_nmadvise">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.muzzy_nmadvise</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of <function>madvise()</function> or similar
+ calls made to purge muzzy pages.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.muzzy_purged">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.muzzy_purged</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of muzzy pages purged.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.small.allocated">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.small.allocated</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of bytes currently allocated by small objects.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.small.nmalloc">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.small.nmalloc</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of times a small allocation was
+ requested from the arena's bins, whether to fill the relevant tcache if
+ <link linkend="opt.tcache"><mallctl>opt.tcache</mallctl></link> is
+ enabled, or to directly satisfy an allocation request
+ otherwise.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.small.ndalloc">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.small.ndalloc</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of times a small allocation was
+ returned to the arena's bins, whether to flush the relevant tcache if
+ <link linkend="opt.tcache"><mallctl>opt.tcache</mallctl></link> is
+ enabled, or to directly deallocate an allocation
+ otherwise.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.small.nrequests">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.small.nrequests</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of allocation requests satisfied by
+ all bin size classes.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.small.nfills">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.small.nfills</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of tcache fills by all small size
+ classes.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.small.nflushes">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.small.nflushes</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of tcache flushes by all small size
+ classes.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.large.allocated">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.large.allocated</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of bytes currently allocated by large objects.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.large.nmalloc">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.large.nmalloc</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of times a large extent was allocated
+ from the arena, whether to fill the relevant tcache if <link
+ linkend="opt.tcache"><mallctl>opt.tcache</mallctl></link> is enabled and
+ the size class is within the range being cached, or to directly satisfy
+ an allocation request otherwise.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.large.ndalloc">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.large.ndalloc</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of times a large extent was returned
+ to the arena, whether to flush the relevant tcache if <link
+ linkend="opt.tcache"><mallctl>opt.tcache</mallctl></link> is enabled and
+ the size class is within the range being cached, or to directly
+ deallocate an allocation otherwise.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.large.nrequests">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.large.nrequests</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of allocation requests satisfied by
+ all large size classes.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.large.nfills">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.large.nfills</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of tcache fills by all large size
+ classes.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.large.nflushes">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.large.nflushes</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of tcache flushes by all large size
+ classes.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.bins.j.nmalloc">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nmalloc</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of times a bin region of the
+ corresponding size class was allocated from the arena, whether to fill
+ the relevant tcache if <link
+ linkend="opt.tcache"><mallctl>opt.tcache</mallctl></link> is enabled, or
+ to directly satisfy an allocation request otherwise.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.bins.j.ndalloc">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.ndalloc</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of times a bin region of the
+ corresponding size class was returned to the arena, whether to flush the
+ relevant tcache if <link
+ linkend="opt.tcache"><mallctl>opt.tcache</mallctl></link> is enabled, or
+ to directly deallocate an allocation otherwise.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.bins.j.nrequests">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nrequests</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of allocation requests satisfied by
+ bin regions of the corresponding size class.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.bins.j.curregs">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.curregs</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Current number of regions for this size
+ class.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.bins.j.nfills">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nfills</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Cumulative number of tcache fills.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.bins.j.nflushes">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nflushes</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Cumulative number of tcache flushes.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.bins.j.nslabs">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nslabs</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of slabs created.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.bins.j.nreslabs">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nreslabs</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of times the current slab from which
+ to allocate changed.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.bins.j.curslabs">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.curslabs</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Current number of slabs.</para></listitem>
+ </varlistentry>
+
+
+ <varlistentry id="stats.arenas.i.bins.j.nonfull_slabs">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nonfull_slabs</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Current number of nonfull slabs.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.bins.mutex">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.mutex.{counter}</mallctl>
+ (<type>counter specific type</type>) <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Statistics on
+ <varname>arena.&lt;i&gt;.bins.&lt;j&gt;</varname> mutex (arena bin
+ scope; bin operation related). <mallctl>{counter}</mallctl> is one of
+ the counters in <link linkend="mutex_counters">mutex profiling
+ counters</link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.extents.n">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.extents.&lt;j&gt;.n{extent_type}</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para> Number of extents of the given type in this arena in
+ the bucket corresponding to page size index &lt;j&gt;. The extent type
+ is one of dirty, muzzy, or retained.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.extents.bytes">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.extents.&lt;j&gt;.{extent_type}_bytes</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para> Sum of the bytes managed by extents of the given type
+ in this arena in the bucket corresponding to page size index &lt;j&gt;.
+ The extent type is one of dirty, muzzy, or retained.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.lextents.j.nmalloc">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.lextents.&lt;j&gt;.nmalloc</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of times a large extent of the
+ corresponding size class was allocated from the arena, whether to fill
+ the relevant tcache if <link
+ linkend="opt.tcache"><mallctl>opt.tcache</mallctl></link> is enabled and
+ the size class is within the range being cached, or to directly satisfy
+ an allocation request otherwise.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.lextents.j.ndalloc">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.lextents.&lt;j&gt;.ndalloc</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of times a large extent of the
+ corresponding size class was returned to the arena, whether to flush the
+ relevant tcache if <link
+ linkend="opt.tcache"><mallctl>opt.tcache</mallctl></link> is enabled and
+ the size class is within the range being cached, or to directly
+ deallocate an allocation otherwise.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.lextents.j.nrequests">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.lextents.&lt;j&gt;.nrequests</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of allocation requests satisfied by
+ large extents of the corresponding size class.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.lextents.j.curlextents">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.lextents.&lt;j&gt;.curlextents</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Current number of large allocations for this size class.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.mutexes.large">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.mutexes.large.{counter}</mallctl>
+ (<type>counter specific type</type>) <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Statistics on <varname>arena.&lt;i&gt;.large</varname>
+ mutex (arena scope; large allocation related).
+ <mallctl>{counter}</mallctl> is one of the counters in <link
+ linkend="mutex_counters">mutex profiling
+ counters</link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.mutexes.extent_avail">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.mutexes.extent_avail.{counter}</mallctl>
+ (<type>counter specific type</type>) <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Statistics on <varname>arena.&lt;i&gt;.extent_avail
+ </varname> mutex (arena scope; extent avail related).
+ <mallctl>{counter}</mallctl> is one of the counters in <link
+ linkend="mutex_counters">mutex profiling
+ counters</link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.mutexes.extents_dirty">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.mutexes.extents_dirty.{counter}</mallctl>
+ (<type>counter specific type</type>) <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Statistics on <varname>arena.&lt;i&gt;.extents_dirty
+ </varname> mutex (arena scope; dirty extents related).
+ <mallctl>{counter}</mallctl> is one of the counters in <link
+ linkend="mutex_counters">mutex profiling
+ counters</link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.mutexes.extents_muzzy">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.mutexes.extents_muzzy.{counter}</mallctl>
+ (<type>counter specific type</type>) <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Statistics on <varname>arena.&lt;i&gt;.extents_muzzy
+ </varname> mutex (arena scope; muzzy extents related).
+ <mallctl>{counter}</mallctl> is one of the counters in <link
+ linkend="mutex_counters">mutex profiling
+ counters</link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.mutexes.extents_retained">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.mutexes.extents_retained.{counter}</mallctl>
+ (<type>counter specific type</type>) <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Statistics on <varname>arena.&lt;i&gt;.extents_retained
+ </varname> mutex (arena scope; retained extents related).
+ <mallctl>{counter}</mallctl> is one of the counters in <link
+ linkend="mutex_counters">mutex profiling
+ counters</link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.mutexes.decay_dirty">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.mutexes.decay_dirty.{counter}</mallctl>
+ (<type>counter specific type</type>) <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Statistics on <varname>arena.&lt;i&gt;.decay_dirty
+ </varname> mutex (arena scope; decay for dirty pages related).
+ <mallctl>{counter}</mallctl> is one of the counters in <link
+ linkend="mutex_counters">mutex profiling
+ counters</link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.mutexes.decay_muzzy">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.mutexes.decay_muzzy.{counter}</mallctl>
+ (<type>counter specific type</type>) <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Statistics on <varname>arena.&lt;i&gt;.decay_muzzy
+ </varname> mutex (arena scope; decay for muzzy pages related).
+ <mallctl>{counter}</mallctl> is one of the counters in <link
+ linkend="mutex_counters">mutex profiling
+ counters</link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.mutexes.base">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.mutexes.base.{counter}</mallctl>
+ (<type>counter specific type</type>) <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Statistics on <varname>arena.&lt;i&gt;.base</varname>
+ mutex (arena scope; base allocator related).
+ <mallctl>{counter}</mallctl> is one of the counters in <link
+ linkend="mutex_counters">mutex profiling
+ counters</link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.mutexes.tcache_list">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.mutexes.tcache_list.{counter}</mallctl>
+ (<type>counter specific type</type>) <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Statistics on
+ <varname>arena.&lt;i&gt;.tcache_list</varname> mutex (arena scope;
+ tcache to arena association related). This mutex is expected to be
+ accessed less often. <mallctl>{counter}</mallctl> is one of the
+ counters in <link linkend="mutex_counters">mutex profiling
+ counters</link>.</para></listitem>
+ </varlistentry>
+
+ </variablelist>
+ </refsect1>
+ <refsect1 id="heap_profile_format">
+ <title>HEAP PROFILE FORMAT</title>
+ <para>Although the heap profiling functionality was originally designed to
+ be compatible with the
+ <command>pprof</command> command that is developed as part of the <ulink
+ url="http://code.google.com/p/gperftools/">gperftools
+ package</ulink>, the addition of per thread heap profiling functionality
+ required a different heap profile format. The <command>jeprof</command>
+ command is derived from <command>pprof</command>, with enhancements to
+ support the heap profile format described here.</para>
+
+ <para>In the following hypothetical heap profile, <constant>[...]</constant>
+ indicates elision for the sake of compactness. <programlisting><![CDATA[
+heap_v2/524288
+ t*: 28106: 56637512 [0: 0]
+ [...]
+ t3: 352: 16777344 [0: 0]
+ [...]
+ t99: 17754: 29341640 [0: 0]
+ [...]
+@ 0x5f86da8 0x5f5a1dc [...] 0x29e4d4e 0xa200316 0xabb2988 [...]
+ t*: 13: 6688 [0: 0]
+ t3: 12: 6496 [0: 0]
+ t99: 1: 192 [0: 0]
+[...]
+
+MAPPED_LIBRARIES:
+[...]]]></programlisting> The following matches the above heap profile, but most
+tokens are replaced with <constant>&lt;description&gt;</constant> to indicate
+descriptions of the corresponding fields. <programlisting><![CDATA[
+<heap_profile_format_version>/<mean_sample_interval>
+ <aggregate>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
+ [...]
+ <thread_3_aggregate>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
+ [...]
+ <thread_99_aggregate>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
+ [...]
+@ <top_frame> <frame> [...] <frame> <frame> <frame> [...]
+ <backtrace_aggregate>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
+ <backtrace_thread_3>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
+ <backtrace_thread_99>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
+[...]
+
+MAPPED_LIBRARIES:
+</proc/<pid>/maps>]]></programlisting></para>
+ </refsect1>
+
+ <refsect1 id="debugging_malloc_problems">
+ <title>DEBUGGING MALLOC PROBLEMS</title>
+ <para>When debugging, it is a good idea to configure/build jemalloc with
+ the <option>--enable-debug</option> and <option>--enable-fill</option>
+ options, and recompile the program with suitable options and symbols for
+ debugger support. When so configured, jemalloc incorporates a wide variety
+ of run-time assertions that catch application errors such as double-free,
+ write-after-free, etc.</para>
+
+ <para>Programs often accidentally depend on <quote>uninitialized</quote>
+ memory actually being filled with zero bytes. Junk filling
+ (see the <link linkend="opt.junk"><mallctl>opt.junk</mallctl></link>
+ option) tends to expose such bugs in the form of obviously incorrect
+ results and/or coredumps. Conversely, zero
+ filling (see the <link
+ linkend="opt.zero"><mallctl>opt.zero</mallctl></link> option) eliminates
+ the symptoms of such bugs. Between these two options, it is usually
+ possible to quickly detect, diagnose, and eliminate such bugs.</para>
+
+ <para>This implementation does not provide much detail about the problems
+ it detects, because the performance impact for storing such information
+ would be prohibitive.</para>
+ </refsect1>
+ <refsect1 id="diagnostic_messages">
+ <title>DIAGNOSTIC MESSAGES</title>
+ <para>If any of the memory allocation/deallocation functions detect an
+ error or warning condition, a message will be printed to file descriptor
+ <constant>STDERR_FILENO</constant>. Errors will result in the process
+ dumping core. If the <link
+ linkend="opt.abort"><mallctl>opt.abort</mallctl></link> option is set, most
+ warnings are treated as errors.</para>
+
+ <para>The <varname>malloc_message</varname> variable allows the programmer
+ to override the function which emits the text strings forming the errors
+ and warnings if for some reason the <constant>STDERR_FILENO</constant> file
+ descriptor is not suitable for this.
+ <function>malloc_message()</function> takes the
+ <parameter>cbopaque</parameter> pointer argument that is
+ <constant>NULL</constant> unless overridden by the arguments in a call to
+ <function>malloc_stats_print()</function>, followed by a string
+ pointer. Please note that doing anything which tries to allocate memory in
+ this function is likely to result in a crash or deadlock.</para>
+
+ <para>All messages are prefixed by
+ <quote><computeroutput>&lt;jemalloc&gt;: </computeroutput></quote>.</para>
+ </refsect1>
+ <refsect1 id="return_values">
+ <title>RETURN VALUES</title>
+ <refsect2>
+ <title>Standard API</title>
+ <para>The <function>malloc()</function> and
+ <function>calloc()</function> functions return a pointer to the
+ allocated memory if successful; otherwise a <constant>NULL</constant>
+ pointer is returned and <varname>errno</varname> is set to
+ <errorname>ENOMEM</errorname>.</para>
+
+ <para>The <function>posix_memalign()</function> function
+ returns the value 0 if successful; otherwise it returns an error value.
+ The <function>posix_memalign()</function> function will fail
+ if:
+ <variablelist>
+ <varlistentry>
+ <term><errorname>EINVAL</errorname></term>
+
+ <listitem><para>The <parameter>alignment</parameter> parameter is
+ not a power of 2 at least as large as
+ <code language="C">sizeof(<type>void *</type>)</code>.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><errorname>ENOMEM</errorname></term>
+
+ <listitem><para>Memory allocation error.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+
+ <para>The <function>aligned_alloc()</function> function returns
+ a pointer to the allocated memory if successful; otherwise a
+ <constant>NULL</constant> pointer is returned and
+ <varname>errno</varname> is set. The
+ <function>aligned_alloc()</function> function will fail if:
+ <variablelist>
+ <varlistentry>
+ <term><errorname>EINVAL</errorname></term>
+
+ <listitem><para>The <parameter>alignment</parameter> parameter is
+ not a power of 2.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><errorname>ENOMEM</errorname></term>
+
+ <listitem><para>Memory allocation error.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+
+ <para>The <function>realloc()</function> function returns a
+ pointer, possibly identical to <parameter>ptr</parameter>, to the
+ allocated memory if successful; otherwise a <constant>NULL</constant>
+ pointer is returned, and <varname>errno</varname> is set to
+ <errorname>ENOMEM</errorname> if the error was the result of an
+ allocation failure. The <function>realloc()</function>
+ function always leaves the original buffer intact when an error occurs.
+ </para>
+
+ <para>The <function>free()</function> function returns no
+ value.</para>
+ </refsect2>
+ <refsect2>
+ <title>Non-standard API</title>
+ <para>The <function>mallocx()</function> and
+ <function>rallocx()</function> functions return a pointer to
+ the allocated memory if successful; otherwise a <constant>NULL</constant>
+ pointer is returned to indicate insufficient contiguous memory was
+ available to service the allocation request. </para>
+
+ <para>The <function>xallocx()</function> function returns the
+ real size of the resulting resized allocation pointed to by
+ <parameter>ptr</parameter>, which is a value less than
+ <parameter>size</parameter> if the allocation could not be adequately
+ grown in place. </para>
+
+ <para>The <function>sallocx()</function> function returns the
+ real size of the allocation pointed to by <parameter>ptr</parameter>.
+ </para>
+
+ <para>The <function>nallocx()</function> returns the real size
+ that would result from a successful equivalent
+ <function>mallocx()</function> function call, or zero if
+ insufficient memory is available to perform the size computation. </para>
+
+ <para>The <function>mallctl()</function>,
+ <function>mallctlnametomib()</function>, and
+ <function>mallctlbymib()</function> functions return 0 on
+ success; otherwise they return an error value. The functions will fail
+ if:
+ <variablelist>
+ <varlistentry>
+ <term><errorname>EINVAL</errorname></term>
+
+ <listitem><para><parameter>newp</parameter> is not
+ <constant>NULL</constant>, and <parameter>newlen</parameter> is too
+ large or too small. Alternatively, <parameter>*oldlenp</parameter>
+ is too large or too small; when it happens, except for a very few
+ cases explicitly documented otherwise, as much data as possible
+ are read despite the error, with the amount of data read being
+ recorded in <parameter>*oldlenp</parameter>.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><errorname>ENOENT</errorname></term>
+
+ <listitem><para><parameter>name</parameter> or
+ <parameter>mib</parameter> specifies an unknown/invalid
+ value.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><errorname>EPERM</errorname></term>
+
+ <listitem><para>Attempt to read or write void value, or attempt to
+ write read-only value.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><errorname>EAGAIN</errorname></term>
+
+ <listitem><para>A memory allocation failure
+ occurred.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><errorname>EFAULT</errorname></term>
+
+ <listitem><para>An interface with side effects failed in some way
+ not directly related to <function>mallctl*()</function>
+ read/write processing.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+
+ <para>The <function>malloc_usable_size()</function> function
+ returns the usable size of the allocation pointed to by
+ <parameter>ptr</parameter>. </para>
+ </refsect2>
+ </refsect1>
+ <refsect1 id="environment">
+ <title>ENVIRONMENT</title>
+ <para>The following environment variable affects the execution of the
+ allocation functions:
+ <variablelist>
+ <varlistentry>
+ <term><envar>MALLOC_CONF</envar></term>
+
+ <listitem><para>If the environment variable
+ <envar>MALLOC_CONF</envar> is set, the characters it contains
+ will be interpreted as options.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </refsect1>
+ <refsect1 id="examples">
+ <title>EXAMPLES</title>
+ <para>To dump core whenever a problem occurs:
+ <screen>ln -s 'abort:true' /etc/malloc.conf</screen>
+ </para>
+ <para>To specify in the source that only one arena should be automatically
+ created:
+ <programlisting language="C"><![CDATA[
+malloc_conf = "narenas:1";]]></programlisting></para>
+ </refsect1>
+ <refsect1 id="see_also">
+ <title>SEE ALSO</title>
+ <para><citerefentry><refentrytitle>madvise</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>mmap</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>sbrk</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>utrace</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>alloca</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>atexit</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>getpagesize</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry></para>
+ </refsect1>
+ <refsect1 id="standards">
+ <title>STANDARDS</title>
+ <para>The <function>malloc()</function>,
+ <function>calloc()</function>,
+ <function>realloc()</function>, and
+ <function>free()</function> functions conform to ISO/IEC
+ 9899:1990 (<quote>ISO C90</quote>).</para>
+
+ <para>The <function>posix_memalign()</function> function conforms
+ to IEEE Std 1003.1-2001 (<quote>POSIX.1</quote>).</para>
+ </refsect1>
+ <refsect1 id="history">
+ <title>HISTORY</title>
+ <para>The <function>malloc_usable_size()</function> and
+ <function>posix_memalign()</function> functions first appeared in FreeBSD
+ 7.0.</para>
+
+ <para>The <function>aligned_alloc()</function>,
+ <function>malloc_stats_print()</function>, and
+ <function>mallctl*()</function> functions first appeared in FreeBSD
+ 10.0.</para>
+
+ <para>The <function>*allocx()</function> functions first appeared in FreeBSD
+ 11.0.</para>
+ </refsect1>
+</refentry>
+||||||| dec341af7695
+=======
+<?xml version='1.0' encoding='UTF-8'?>
+<?xml-stylesheet type="text/xsl"
+ href="http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl"?>
+<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.4//EN"
+ "http://www.oasis-open.org/docbook/xml/4.4/docbookx.dtd" [
+]>
+
+<refentry>
+ <refentryinfo>
+ <title>User Manual</title>
+ <productname>jemalloc</productname>
+ <releaseinfo role="version">@jemalloc_version@</releaseinfo>
+ <authorgroup>
+ <author>
+ <firstname>Jason</firstname>
+ <surname>Evans</surname>
+ <personblurb>Author</personblurb>
+ </author>
+ </authorgroup>
+ </refentryinfo>
+ <refmeta>
+ <refentrytitle>JEMALLOC</refentrytitle>
+ <manvolnum>3</manvolnum>
+ </refmeta>
+ <refnamediv>
+ <refdescriptor>jemalloc</refdescriptor>
+ <refname>jemalloc</refname>
+ <!-- Each refname causes a man page file to be created. Only if this were
+ the system malloc(3) implementation would these files be appropriate.
+ <refname>malloc</refname>
+ <refname>calloc</refname>
+ <refname>posix_memalign</refname>
+ <refname>aligned_alloc</refname>
+ <refname>realloc</refname>
+ <refname>free</refname>
+ <refname>mallocx</refname>
+ <refname>rallocx</refname>
+ <refname>xallocx</refname>
+ <refname>sallocx</refname>
+ <refname>dallocx</refname>
+ <refname>sdallocx</refname>
+ <refname>nallocx</refname>
+ <refname>mallctl</refname>
+ <refname>mallctlnametomib</refname>
+ <refname>mallctlbymib</refname>
+ <refname>malloc_stats_print</refname>
+ <refname>malloc_usable_size</refname>
+ -->
+ <refpurpose>general purpose memory allocation functions</refpurpose>
+ </refnamediv>
+ <refsect1 id="library">
+ <title>LIBRARY</title>
+ <para>This manual describes jemalloc @jemalloc_version@. More information
+ can be found at the <ulink
+ url="http://jemalloc.net/">jemalloc website</ulink>.</para>
+
+ <para>The following configuration options are enabled in libc's built-in
+ jemalloc: <option>--enable-fill</option>,
+ <option>--enable-lazy-lock</option>, <option>--enable-stats</option>,
+ <option>--enable-utrace</option>, <option>--enable-xmalloc</option>, and
+ <option>--with-malloc-conf=abort_conf:false</option>.
+ Additionally, <option>--enable-debug</option> is enabled in development
+ versions of FreeBSD (controlled by the
+ <constant>MK_MALLOC_PRODUCTION</constant> make variable).</para>
+
+ </refsect1>
+ <refsynopsisdiv>
+ <title>SYNOPSIS</title>
+ <funcsynopsis>
+ <funcsynopsisinfo>#include &lt;<filename class="headerfile">stdlib.h</filename>&gt;
+#include &lt;<filename class="headerfile">malloc_np.h</filename>&gt;</funcsynopsisinfo>
+ <refsect2>
+ <title>Standard API</title>
+ <funcprototype>
+ <funcdef>void *<function>malloc</function></funcdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>void *<function>calloc</function></funcdef>
+ <paramdef>size_t <parameter>number</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>int <function>posix_memalign</function></funcdef>
+ <paramdef>void **<parameter>ptr</parameter></paramdef>
+ <paramdef>size_t <parameter>alignment</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>void *<function>aligned_alloc</function></funcdef>
+ <paramdef>size_t <parameter>alignment</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>void *<function>realloc</function></funcdef>
+ <paramdef>void *<parameter>ptr</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>void <function>free</function></funcdef>
+ <paramdef>void *<parameter>ptr</parameter></paramdef>
+ </funcprototype>
+ </refsect2>
+ <refsect2>
+ <title>Non-standard API</title>
+ <funcprototype>
+ <funcdef>void *<function>mallocx</function></funcdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>void *<function>rallocx</function></funcdef>
+ <paramdef>void *<parameter>ptr</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>size_t <function>xallocx</function></funcdef>
+ <paramdef>void *<parameter>ptr</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>size_t <parameter>extra</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>size_t <function>sallocx</function></funcdef>
+ <paramdef>void *<parameter>ptr</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>void <function>dallocx</function></funcdef>
+ <paramdef>void *<parameter>ptr</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>void <function>sdallocx</function></funcdef>
+ <paramdef>void *<parameter>ptr</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>size_t <function>nallocx</function></funcdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>int <parameter>flags</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>int <function>mallctl</function></funcdef>
+ <paramdef>const char *<parameter>name</parameter></paramdef>
+ <paramdef>void *<parameter>oldp</parameter></paramdef>
+ <paramdef>size_t *<parameter>oldlenp</parameter></paramdef>
+ <paramdef>void *<parameter>newp</parameter></paramdef>
+ <paramdef>size_t <parameter>newlen</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>int <function>mallctlnametomib</function></funcdef>
+ <paramdef>const char *<parameter>name</parameter></paramdef>
+ <paramdef>size_t *<parameter>mibp</parameter></paramdef>
+ <paramdef>size_t *<parameter>miblenp</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>int <function>mallctlbymib</function></funcdef>
+ <paramdef>const size_t *<parameter>mib</parameter></paramdef>
+ <paramdef>size_t <parameter>miblen</parameter></paramdef>
+ <paramdef>void *<parameter>oldp</parameter></paramdef>
+ <paramdef>size_t *<parameter>oldlenp</parameter></paramdef>
+ <paramdef>void *<parameter>newp</parameter></paramdef>
+ <paramdef>size_t <parameter>newlen</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>void <function>malloc_stats_print</function></funcdef>
+ <paramdef>void <parameter>(*write_cb)</parameter>
+ <funcparams>void *, const char *</funcparams>
+ </paramdef>
+ <paramdef>void *<parameter>cbopaque</parameter></paramdef>
+ <paramdef>const char *<parameter>opts</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>size_t <function>malloc_usable_size</function></funcdef>
+ <paramdef>const void *<parameter>ptr</parameter></paramdef>
+ </funcprototype>
+ <funcprototype>
+ <funcdef>void <function>(*malloc_message)</function></funcdef>
+ <paramdef>void *<parameter>cbopaque</parameter></paramdef>
+ <paramdef>const char *<parameter>s</parameter></paramdef>
+ </funcprototype>
+ <para><type>const char *</type><varname>malloc_conf</varname>;</para>
+ </refsect2>
+ </funcsynopsis>
+ </refsynopsisdiv>
+ <refsect1 id="description">
+ <title>DESCRIPTION</title>
+ <refsect2>
+ <title>Standard API</title>
+
+ <para>The <function>malloc()</function> function allocates
+ <parameter>size</parameter> bytes of uninitialized memory. The allocated
+ space is suitably aligned (after possible pointer coercion) for storage
+ of any type of object.</para>
+
+ <para>The <function>calloc()</function> function allocates
+ space for <parameter>number</parameter> objects, each
+ <parameter>size</parameter> bytes in length. The result is identical to
+ calling <function>malloc()</function> with an argument of
+ <parameter>number</parameter> * <parameter>size</parameter>, with the
+ exception that the allocated memory is explicitly initialized to zero
+ bytes.</para>
+
+ <para>The <function>posix_memalign()</function> function
+ allocates <parameter>size</parameter> bytes of memory such that the
+ allocation's base address is a multiple of
+ <parameter>alignment</parameter>, and returns the allocation in the value
+ pointed to by <parameter>ptr</parameter>. The requested
+ <parameter>alignment</parameter> must be a power of 2 at least as large as
+ <code language="C">sizeof(<type>void *</type>)</code>.</para>
+
+ <para>The <function>aligned_alloc()</function> function
+ allocates <parameter>size</parameter> bytes of memory such that the
+ allocation's base address is a multiple of
+ <parameter>alignment</parameter>. The requested
+ <parameter>alignment</parameter> must be a power of 2. Behavior is
+ undefined if <parameter>size</parameter> is not an integral multiple of
+ <parameter>alignment</parameter>.</para>
+
+ <para>The <function>realloc()</function> function changes the
+ size of the previously allocated memory referenced by
+ <parameter>ptr</parameter> to <parameter>size</parameter> bytes. The
+ contents of the memory are unchanged up to the lesser of the new and old
+ sizes. If the new size is larger, the contents of the newly allocated
+ portion of the memory are undefined. Upon success, the memory referenced
+ by <parameter>ptr</parameter> is freed and a pointer to the newly
+ allocated memory is returned. Note that
+ <function>realloc()</function> may move the memory allocation,
+ resulting in a different return value than <parameter>ptr</parameter>.
+ If <parameter>ptr</parameter> is <constant>NULL</constant>, the
+ <function>realloc()</function> function behaves identically to
+ <function>malloc()</function> for the specified size.</para>
+
+ <para>The <function>free()</function> function causes the
+ allocated memory referenced by <parameter>ptr</parameter> to be made
+ available for future allocations. If <parameter>ptr</parameter> is
+ <constant>NULL</constant>, no action occurs.</para>
+ </refsect2>
+ <refsect2>
+ <title>Non-standard API</title>
+ <para>The <function>mallocx()</function>,
+ <function>rallocx()</function>,
+ <function>xallocx()</function>,
+ <function>sallocx()</function>,
+ <function>dallocx()</function>,
+ <function>sdallocx()</function>, and
+ <function>nallocx()</function> functions all have a
+ <parameter>flags</parameter> argument that can be used to specify
+ options. The functions only check the options that are contextually
+ relevant. Use bitwise or (<code language="C">|</code>) operations to
+ specify one or more of the following:
+ <variablelist>
+ <varlistentry id="MALLOCX_LG_ALIGN">
+ <term><constant>MALLOCX_LG_ALIGN(<parameter>la</parameter>)
+ </constant></term>
+
+ <listitem><para>Align the memory allocation to start at an address
+ that is a multiple of <code language="C">(1 &lt;&lt;
+ <parameter>la</parameter>)</code>. This macro does not validate
+ that <parameter>la</parameter> is within the valid
+ range.</para></listitem>
+ </varlistentry>
+ <varlistentry id="MALLOCX_ALIGN">
+ <term><constant>MALLOCX_ALIGN(<parameter>a</parameter>)
+ </constant></term>
+
+ <listitem><para>Align the memory allocation to start at an address
+ that is a multiple of <parameter>a</parameter>, where
+ <parameter>a</parameter> is a power of two. This macro does not
+ validate that <parameter>a</parameter> is a power of 2.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry id="MALLOCX_ZERO">
+ <term><constant>MALLOCX_ZERO</constant></term>
+
+ <listitem><para>Initialize newly allocated memory to contain zero
+ bytes. In the growing reallocation case, the real size prior to
+ reallocation defines the boundary between untouched bytes and those
+ that are initialized to contain zero bytes. If this macro is
+ absent, newly allocated memory is uninitialized.</para></listitem>
+ </varlistentry>
+ <varlistentry id="MALLOCX_TCACHE">
+ <term><constant>MALLOCX_TCACHE(<parameter>tc</parameter>)
+ </constant></term>
+
+ <listitem><para>Use the thread-specific cache (tcache) specified by
+ the identifier <parameter>tc</parameter>, which must have been
+ acquired via the <link
+ linkend="tcache.create"><mallctl>tcache.create</mallctl></link>
+ mallctl. This macro does not validate that
+ <parameter>tc</parameter> specifies a valid
+ identifier.</para></listitem>
+ </varlistentry>
+ <varlistentry id="MALLOC_TCACHE_NONE">
+ <term><constant>MALLOCX_TCACHE_NONE</constant></term>
+
+ <listitem><para>Do not use a thread-specific cache (tcache). Unless
+ <constant>MALLOCX_TCACHE(<parameter>tc</parameter>)</constant> or
+ <constant>MALLOCX_TCACHE_NONE</constant> is specified, an
+ automatically managed tcache will be used under many circumstances.
+ This macro cannot be used in the same <parameter>flags</parameter>
+ argument as
+ <constant>MALLOCX_TCACHE(<parameter>tc</parameter>)</constant>.</para></listitem>
+ </varlistentry>
+ <varlistentry id="MALLOCX_ARENA">
+ <term><constant>MALLOCX_ARENA(<parameter>a</parameter>)
+ </constant></term>
+
+ <listitem><para>Use the arena specified by the index
+ <parameter>a</parameter>. This macro has no effect for regions that
+ were allocated via an arena other than the one specified. This
+ macro does not validate that <parameter>a</parameter> specifies an
+ arena index in the valid range.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+
+ <para>The <function>mallocx()</function> function allocates at
+ least <parameter>size</parameter> bytes of memory, and returns a pointer
+ to the base address of the allocation. Behavior is undefined if
+ <parameter>size</parameter> is <constant>0</constant>.</para>
+
+ <para>The <function>rallocx()</function> function resizes the
+ allocation at <parameter>ptr</parameter> to be at least
+ <parameter>size</parameter> bytes, and returns a pointer to the base
+ address of the resulting allocation, which may or may not have moved from
+ its original location. Behavior is undefined if
+ <parameter>size</parameter> is <constant>0</constant>.</para>
+
+ <para>The <function>xallocx()</function> function resizes the
+ allocation at <parameter>ptr</parameter> in place to be at least
+ <parameter>size</parameter> bytes, and returns the real size of the
+ allocation. If <parameter>extra</parameter> is non-zero, an attempt is
+ made to resize the allocation to be at least <code
+ language="C">(<parameter>size</parameter> +
+ <parameter>extra</parameter>)</code> bytes, though inability to allocate
+ the extra byte(s) will not by itself result in failure to resize.
+ Behavior is undefined if <parameter>size</parameter> is
+ <constant>0</constant>, or if <code
+ language="C">(<parameter>size</parameter> + <parameter>extra</parameter>
+ &gt; <constant>SIZE_T_MAX</constant>)</code>.</para>
+
+ <para>The <function>sallocx()</function> function returns the
+ real size of the allocation at <parameter>ptr</parameter>.</para>
+
+ <para>The <function>dallocx()</function> function causes the
+ memory referenced by <parameter>ptr</parameter> to be made available for
+ future allocations.</para>
+
+ <para>The <function>sdallocx()</function> function is an
+ extension of <function>dallocx()</function> with a
+ <parameter>size</parameter> parameter to allow the caller to pass in the
+ allocation size as an optimization. The minimum valid input size is the
+ original requested size of the allocation, and the maximum valid input
+ size is the corresponding value returned by
+ <function>nallocx()</function> or
+ <function>sallocx()</function>.</para>
+
+ <para>The <function>nallocx()</function> function allocates no
+ memory, but it performs the same size computation as the
+ <function>mallocx()</function> function, and returns the real
+ size of the allocation that would result from the equivalent
+ <function>mallocx()</function> function call, or
+ <constant>0</constant> if the inputs exceed the maximum supported size
+ class and/or alignment. Behavior is undefined if
+ <parameter>size</parameter> is <constant>0</constant>.</para>
+
+ <para>The <function>mallctl()</function> function provides a
+ general interface for introspecting the memory allocator, as well as
+ setting modifiable parameters and triggering actions. The
+ period-separated <parameter>name</parameter> argument specifies a
+ location in a tree-structured namespace; see the <xref
+ linkend="mallctl_namespace" xrefstyle="template:%t"/> section for
+ documentation on the tree contents. To read a value, pass a pointer via
+ <parameter>oldp</parameter> to adequate space to contain the value, and a
+ pointer to its length via <parameter>oldlenp</parameter>; otherwise pass
+ <constant>NULL</constant> and <constant>NULL</constant>. Similarly, to
+ write a value, pass a pointer to the value via
+ <parameter>newp</parameter>, and its length via
+ <parameter>newlen</parameter>; otherwise pass <constant>NULL</constant>
+ and <constant>0</constant>.</para>
+
+ <para>The <function>mallctlnametomib()</function> function
+ provides a way to avoid repeated name lookups for applications that
+ repeatedly query the same portion of the namespace, by translating a name
+ to a <quote>Management Information Base</quote> (MIB) that can be passed
+ repeatedly to <function>mallctlbymib()</function>. Upon
+ successful return from <function>mallctlnametomib()</function>,
+ <parameter>mibp</parameter> contains an array of
+ <parameter>*miblenp</parameter> integers, where
+ <parameter>*miblenp</parameter> is the lesser of the number of components
+ in <parameter>name</parameter> and the input value of
+ <parameter>*miblenp</parameter>. Thus it is possible to pass a
+ <parameter>*miblenp</parameter> that is smaller than the number of
+ period-separated name components, which results in a partial MIB that can
+ be used as the basis for constructing a complete MIB. For name
+ components that are integers (e.g. the 2 in
+ <link
+ linkend="arenas.bin.i.size"><mallctl>arenas.bin.2.size</mallctl></link>),
+ the corresponding MIB component will always be that integer. Therefore,
+ it is legitimate to construct code like the following: <programlisting
+ language="C"><![CDATA[
+unsigned nbins, i;
+size_t mib[4];
+size_t len, miblen;
+
+len = sizeof(nbins);
+mallctl("arenas.nbins", &nbins, &len, NULL, 0);
+
+miblen = 4;
+mallctlnametomib("arenas.bin.0.size", mib, &miblen);
+for (i = 0; i < nbins; i++) {
+ size_t bin_size;
+
+ mib[2] = i;
+ len = sizeof(bin_size);
+ mallctlbymib(mib, miblen, (void *)&bin_size, &len, NULL, 0);
+ /* Do something with bin_size... */
+}]]></programlisting></para>
+
+ <varlistentry id="malloc_stats_print_opts">
+ </varlistentry>
+ <para>The <function>malloc_stats_print()</function> function writes
+ summary statistics via the <parameter>write_cb</parameter> callback
+ function pointer and <parameter>cbopaque</parameter> data passed to
+ <parameter>write_cb</parameter>, or <function>malloc_message()</function>
+ if <parameter>write_cb</parameter> is <constant>NULL</constant>. The
+ statistics are presented in human-readable form unless <quote>J</quote> is
+ specified as a character within the <parameter>opts</parameter> string, in
+ which case the statistics are presented in <ulink
+ url="http://www.json.org/">JSON format</ulink>. This function can be
+ called repeatedly. General information that never changes during
+ execution can be omitted by specifying <quote>g</quote> as a character
+ within the <parameter>opts</parameter> string. Note that
+ <function>malloc_stats_print()</function> uses the
+ <function>mallctl*()</function> functions internally, so inconsistent
+ statistics can be reported if multiple threads use these functions
+ simultaneously. If <option>--enable-stats</option> is specified during
+ configuration, <quote>m</quote>, <quote>d</quote>, and <quote>a</quote>
+ can be specified to omit merged arena, destroyed merged arena, and per
+ arena statistics, respectively; <quote>b</quote> and <quote>l</quote> can
+ be specified to omit per size class statistics for bins and large objects,
+ respectively; <quote>x</quote> can be specified to omit all mutex
+ statistics; <quote>e</quote> can be used to omit extent statistics.
+ Unrecognized characters are silently ignored. Note that thread caching
+ may prevent some statistics from being completely up to date, since extra
+ locking would be required to merge counters that track thread cache
+ operations.</para>
+
+ <para>The <function>malloc_usable_size()</function> function
+ returns the usable size of the allocation pointed to by
+ <parameter>ptr</parameter>. The return value may be larger than the size
+ that was requested during allocation. The
+ <function>malloc_usable_size()</function> function is not a
+ mechanism for in-place <function>realloc()</function>; rather
+ it is provided solely as a tool for introspection purposes. Any
+ discrepancy between the requested allocation size and the size reported
+ by <function>malloc_usable_size()</function> should not be
+ depended on, since such behavior is entirely implementation-dependent.
+ </para>
+ </refsect2>
+ </refsect1>
+ <refsect1 id="tuning">
+ <title>TUNING</title>
+ <para>Once, when the first call is made to one of the memory allocation
+ routines, the allocator initializes its internals based in part on various
+ options that can be specified at compile- or run-time.</para>
+
+ <para>The string specified via <option>--with-malloc-conf</option>, the
+ string pointed to by the global variable <varname>malloc_conf</varname>, the
+ <quote>name</quote> of the file referenced by the symbolic link named
+ <filename class="symlink">/etc/malloc.conf</filename>, and the value of the
+ environment variable <envar>MALLOC_CONF</envar>, will be interpreted, in
+ that order, from left to right as options. Note that
+ <varname>malloc_conf</varname> may be read before
+ <function>main()</function> is entered, so the declaration of
+ <varname>malloc_conf</varname> should specify an initializer that contains
+ the final value to be read by jemalloc. <option>--with-malloc-conf</option>
+ and <varname>malloc_conf</varname> are compile-time mechanisms, whereas
+ <filename class="symlink">/etc/malloc.conf</filename> and
+ <envar>MALLOC_CONF</envar> can be safely set any time prior to program
+ invocation.</para>
+
+ <para>An options string is a comma-separated list of option:value pairs.
+ There is one key corresponding to each <link
+ linkend="opt.abort"><mallctl>opt.*</mallctl></link> mallctl (see the <xref
+ linkend="mallctl_namespace" xrefstyle="template:%t"/> section for options
+ documentation). For example, <literal>abort:true,narenas:1</literal> sets
+ the <link linkend="opt.abort"><mallctl>opt.abort</mallctl></link> and <link
+ linkend="opt.narenas"><mallctl>opt.narenas</mallctl></link> options. Some
+ options have boolean values (true/false), others have integer values (base
+ 8, 10, or 16, depending on prefix), and yet others have raw string
+ values.</para>
+ </refsect1>
+ <refsect1 id="implementation_notes">
+ <title>IMPLEMENTATION NOTES</title>
+ <para>Traditionally, allocators have used
+ <citerefentry><refentrytitle>sbrk</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> to obtain memory, which is
+ suboptimal for several reasons, including race conditions, increased
+ fragmentation, and artificial limitations on maximum usable memory. If
+ <citerefentry><refentrytitle>sbrk</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> is supported by the operating
+ system, this allocator uses both
+ <citerefentry><refentrytitle>mmap</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> and
+ <citerefentry><refentrytitle>sbrk</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry>, in that order of preference;
+ otherwise only <citerefentry><refentrytitle>mmap</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> is used.</para>
+
+ <para>This allocator uses multiple arenas in order to reduce lock
+ contention for threaded programs on multi-processor systems. This works
+ well with regard to threading scalability, but incurs some costs. There is
+ a small fixed per-arena overhead, and additionally, arenas manage memory
+ completely independently of each other, which means a small fixed increase
+ in overall memory fragmentation. These overheads are not generally an
+ issue, given the number of arenas normally used. Note that using
+ substantially more arenas than the default is not likely to improve
+ performance, mainly due to reduced cache performance. However, it may make
+ sense to reduce the number of arenas if an application does not make much
+ use of the allocation functions.</para>
+
+ <para>In addition to multiple arenas, this allocator supports
+ thread-specific caching, in order to make it possible to completely avoid
+ synchronization for most allocation requests. Such caching allows very fast
+ allocation in the common case, but it increases memory usage and
+ fragmentation, since a bounded number of objects can remain allocated in
+ each thread cache.</para>
+
+ <para>Memory is conceptually broken into extents. Extents are always
+ aligned to multiples of the page size. This alignment makes it possible to
+ find metadata for user objects quickly. User objects are broken into two
+ categories according to size: small and large. Contiguous small objects
+ comprise a slab, which resides within a single extent, whereas large objects
+ each have their own extents backing them.</para>
+
+ <para>Small objects are managed in groups by slabs. Each slab maintains
+ a bitmap to track which regions are in use. Allocation requests that are no
+ more than half the quantum (8 or 16, depending on architecture) are rounded
+ up to the nearest power of two that is at least <code
+ language="C">sizeof(<type>double</type>)</code>. All other object size
+ classes are multiples of the quantum, spaced such that there are four size
+ classes for each doubling in size, which limits internal fragmentation to
+ approximately 20% for all but the smallest size classes. Small size classes
+ are smaller than four times the page size, and large size classes extend
+ from four times the page size up to the largest size class that does not
+ exceed <constant>PTRDIFF_MAX</constant>.</para>
+
+ <para>Allocations are packed tightly together, which can be an issue for
+ multi-threaded applications. If you need to assure that allocations do not
+ suffer from cacheline sharing, round your allocation requests up to the
+ nearest multiple of the cacheline size, or specify cacheline alignment when
+ allocating.</para>
+
+ <para>The <function>realloc()</function>,
+ <function>rallocx()</function>, and
+ <function>xallocx()</function> functions may resize allocations
+ without moving them under limited circumstances. Unlike the
+ <function>*allocx()</function> API, the standard API does not
+ officially round up the usable size of an allocation to the nearest size
+ class, so technically it is necessary to call
+ <function>realloc()</function> to grow e.g. a 9-byte allocation to
+ 16 bytes, or shrink a 16-byte allocation to 9 bytes. Growth and shrinkage
+ trivially succeeds in place as long as the pre-size and post-size both round
+ up to the same size class. No other API guarantees are made regarding
+ in-place resizing, but the current implementation also tries to resize large
+ allocations in place, as long as the pre-size and post-size are both large.
+ For shrinkage to succeed, the extent allocator must support splitting (see
+ <link
+ linkend="arena.i.extent_hooks"><mallctl>arena.&lt;i&gt;.extent_hooks</mallctl></link>).
+ Growth only succeeds if the trailing memory is currently available, and the
+ extent allocator supports merging.</para>
+
+ <para>Assuming 4 KiB pages and a 16-byte quantum on a 64-bit system, the
+ size classes in each category are as shown in <xref linkend="size_classes"
+ xrefstyle="template:Table %n"/>.</para>
+
+ <table xml:id="size_classes" frame="all">
+ <title>Size classes</title>
+ <tgroup cols="3" colsep="1" rowsep="1">
+ <colspec colname="c1" align="left"/>
+ <colspec colname="c2" align="right"/>
+ <colspec colname="c3" align="left"/>
+ <thead>
+ <row>
+ <entry>Category</entry>
+ <entry>Spacing</entry>
+ <entry>Size</entry>
+ </row>
+ </thead>
+ <tbody>
+ <row>
+ <entry morerows="8">Small</entry>
+ <entry>lg</entry>
+ <entry>[8]</entry>
+ </row>
+ <row>
+ <entry>16</entry>
+ <entry>[16, 32, 48, 64, 80, 96, 112, 128]</entry>
+ </row>
+ <row>
+ <entry>32</entry>
+ <entry>[160, 192, 224, 256]</entry>
+ </row>
+ <row>
+ <entry>64</entry>
+ <entry>[320, 384, 448, 512]</entry>
+ </row>
+ <row>
+ <entry>128</entry>
+ <entry>[640, 768, 896, 1024]</entry>
+ </row>
+ <row>
+ <entry>256</entry>
+ <entry>[1280, 1536, 1792, 2048]</entry>
+ </row>
+ <row>
+ <entry>512</entry>
+ <entry>[2560, 3072, 3584, 4096]</entry>
+ </row>
+ <row>
+ <entry>1 KiB</entry>
+ <entry>[5 KiB, 6 KiB, 7 KiB, 8 KiB]</entry>
+ </row>
+ <row>
+ <entry>2 KiB</entry>
+ <entry>[10 KiB, 12 KiB, 14 KiB]</entry>
+ </row>
+ <row>
+ <entry morerows="15">Large</entry>
+ <entry>2 KiB</entry>
+ <entry>[16 KiB]</entry>
+ </row>
+ <row>
+ <entry>4 KiB</entry>
+ <entry>[20 KiB, 24 KiB, 28 KiB, 32 KiB]</entry>
+ </row>
+ <row>
+ <entry>8 KiB</entry>
+ <entry>[40 KiB, 48 KiB, 54 KiB, 64 KiB]</entry>
+ </row>
+ <row>
+ <entry>16 KiB</entry>
+ <entry>[80 KiB, 96 KiB, 112 KiB, 128 KiB]</entry>
+ </row>
+ <row>
+ <entry>32 KiB</entry>
+ <entry>[160 KiB, 192 KiB, 224 KiB, 256 KiB]</entry>
+ </row>
+ <row>
+ <entry>64 KiB</entry>
+ <entry>[320 KiB, 384 KiB, 448 KiB, 512 KiB]</entry>
+ </row>
+ <row>
+ <entry>128 KiB</entry>
+ <entry>[640 KiB, 768 KiB, 896 KiB, 1 MiB]</entry>
+ </row>
+ <row>
+ <entry>256 KiB</entry>
+ <entry>[1280 KiB, 1536 KiB, 1792 KiB, 2 MiB]</entry>
+ </row>
+ <row>
+ <entry>512 KiB</entry>
+ <entry>[2560 KiB, 3 MiB, 3584 KiB, 4 MiB]</entry>
+ </row>
+ <row>
+ <entry>1 MiB</entry>
+ <entry>[5 MiB, 6 MiB, 7 MiB, 8 MiB]</entry>
+ </row>
+ <row>
+ <entry>2 MiB</entry>
+ <entry>[10 MiB, 12 MiB, 14 MiB, 16 MiB]</entry>
+ </row>
+ <row>
+ <entry>4 MiB</entry>
+ <entry>[20 MiB, 24 MiB, 28 MiB, 32 MiB]</entry>
+ </row>
+ <row>
+ <entry>8 MiB</entry>
+ <entry>[40 MiB, 48 MiB, 56 MiB, 64 MiB]</entry>
+ </row>
+ <row>
+ <entry>...</entry>
+ <entry>...</entry>
+ </row>
+ <row>
+ <entry>512 PiB</entry>
+ <entry>[2560 PiB, 3 EiB, 3584 PiB, 4 EiB]</entry>
+ </row>
+ <row>
+ <entry>1 EiB</entry>
+ <entry>[5 EiB, 6 EiB, 7 EiB]</entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </table>
+ </refsect1>
+ <refsect1 id="mallctl_namespace">
+ <title>MALLCTL NAMESPACE</title>
+ <para>The following names are defined in the namespace accessible via the
+ <function>mallctl*()</function> functions. Value types are specified in
+ parentheses, their readable/writable statuses are encoded as
+ <literal>rw</literal>, <literal>r-</literal>, <literal>-w</literal>, or
+ <literal>--</literal>, and required build configuration flags follow, if
+ any. A name element encoded as <literal>&lt;i&gt;</literal> or
+ <literal>&lt;j&gt;</literal> indicates an integer component, where the
+ integer varies from 0 to some upper value that must be determined via
+ introspection. In the case of <mallctl>stats.arenas.&lt;i&gt;.*</mallctl>
+ and <mallctl>arena.&lt;i&gt;.{initialized,purge,decay,dss}</mallctl>,
+ <literal>&lt;i&gt;</literal> equal to
+ <constant>MALLCTL_ARENAS_ALL</constant> can be used to operate on all arenas
+ or access the summation of statistics from all arenas; similarly
+ <literal>&lt;i&gt;</literal> equal to
+ <constant>MALLCTL_ARENAS_DESTROYED</constant> can be used to access the
+ summation of statistics from all destroyed arenas. These constants can be
+ utilized either via <function>mallctlnametomib()</function> followed by
+ <function>mallctlbymib()</function>, or via code such as the following:
+ <programlisting language="C"><![CDATA[
+#define STRINGIFY_HELPER(x) #x
+#define STRINGIFY(x) STRINGIFY_HELPER(x)
+
+mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".decay",
+ NULL, NULL, NULL, 0);]]></programlisting>
+ Take special note of the <link
+ linkend="epoch"><mallctl>epoch</mallctl></link> mallctl, which controls
+ refreshing of cached dynamic statistics.</para>
+
+ <variablelist>
+ <varlistentry id="version">
+ <term>
+ <mallctl>version</mallctl>
+ (<type>const char *</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Return the jemalloc version string.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="epoch">
+ <term>
+ <mallctl>epoch</mallctl>
+ (<type>uint64_t</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>If a value is passed in, refresh the data from which
+ the <function>mallctl*()</function> functions report values,
+ and increment the epoch. Return the current epoch. This is useful for
+ detecting whether another thread caused a refresh.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="background_thread">
+ <term>
+ <mallctl>background_thread</mallctl>
+ (<type>bool</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Enable/disable internal background worker threads. When
+ set to true, background threads are created on demand (the number of
+ background threads will be no more than the number of CPUs or active
+ arenas). Threads run periodically, and handle <link
+ linkend="arena.i.decay">purging</link> asynchronously. When switching
+ off, background threads are terminated synchronously. Note that after
+ <citerefentry><refentrytitle>fork</refentrytitle><manvolnum>2</manvolnum></citerefentry>
+ function, the state in the child process will be disabled regardless
+ the state in parent process. See <link
+ linkend="stats.background_thread.num_threads"><mallctl>stats.background_thread</mallctl></link>
+ for related stats. <link
+ linkend="opt.background_thread"><mallctl>opt.background_thread</mallctl></link>
+ can be used to set the default option. This option is only available on
+ selected pthread-based platforms.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="max_background_threads">
+ <term>
+ <mallctl>max_background_threads</mallctl>
+ (<type>size_t</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Maximum number of background worker threads that will
+ be created. This value is capped at <link
+ linkend="opt.max_background_threads"><mallctl>opt.max_background_threads</mallctl></link> at
+ startup.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.cache_oblivious">
+ <term>
+ <mallctl>config.cache_oblivious</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--enable-cache-oblivious</option> was specified
+ during build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.debug">
+ <term>
+ <mallctl>config.debug</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--enable-debug</option> was specified during
+ build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.fill">
+ <term>
+ <mallctl>config.fill</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--enable-fill</option> was specified during
+ build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.lazy_lock">
+ <term>
+ <mallctl>config.lazy_lock</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--enable-lazy-lock</option> was specified
+ during build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.malloc_conf">
+ <term>
+ <mallctl>config.malloc_conf</mallctl>
+ (<type>const char *</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Embedded configure-time-specified run-time options
+ string, empty unless <option>--with-malloc-conf</option> was specified
+ during build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.prof">
+ <term>
+ <mallctl>config.prof</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--enable-prof</option> was specified during
+ build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.prof_libgcc">
+ <term>
+ <mallctl>config.prof_libgcc</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--disable-prof-libgcc</option> was not
+ specified during build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.prof_libunwind">
+ <term>
+ <mallctl>config.prof_libunwind</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--enable-prof-libunwind</option> was specified
+ during build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.stats">
+ <term>
+ <mallctl>config.stats</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--enable-stats</option> was specified during
+ build configuration.</para></listitem>
+ </varlistentry>
+
+
+ <varlistentry id="config.utrace">
+ <term>
+ <mallctl>config.utrace</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--enable-utrace</option> was specified during
+ build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="config.xmalloc">
+ <term>
+ <mallctl>config.xmalloc</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para><option>--enable-xmalloc</option> was specified during
+ build configuration.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.abort">
+ <term>
+ <mallctl>opt.abort</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Abort-on-warning enabled/disabled. If true, most
+ warnings are fatal. Note that runtime option warnings are not included
+ (see <link
+ linkend="opt.abort_conf"><mallctl>opt.abort_conf</mallctl></link> for
+ that). The process will call
+ <citerefentry><refentrytitle>abort</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry> in these cases. This option is
+ disabled by default unless <option>--enable-debug</option> is
+ specified during configuration, in which case it is enabled by default.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.confirm_conf">
+ <term>
+ <mallctl>opt.confirm_conf</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Confirm-runtime-options-when-program-starts
+ enabled/disabled. If true, the string specified via
+ <option>--with-malloc-conf</option>, the string pointed to by the
+ global variable <varname>malloc_conf</varname>, the <quote>name</quote>
+ of the file referenced by the symbolic link named
+ <filename class="symlink">/etc/malloc.conf</filename>, and the value of
+ the environment variable <envar>MALLOC_CONF</envar>, will be printed in
+ order. Then, each option being set will be individually printed. This
+ option is disabled by default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.abort_conf">
+ <term>
+ <mallctl>opt.abort_conf</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Abort-on-invalid-configuration enabled/disabled. If
+ true, invalid runtime options are fatal. The process will call
+ <citerefentry><refentrytitle>abort</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry> in these cases. This option is
+ disabled by default unless <option>--enable-debug</option> is
+ specified during configuration, in which case it is enabled by default.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.metadata_thp">
+ <term>
+ <mallctl>opt.metadata_thp</mallctl>
+ (<type>const char *</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Controls whether to allow jemalloc to use transparent
+ huge page (THP) for internal metadata (see <link
+ linkend="stats.metadata">stats.metadata</link>). <quote>always</quote>
+ allows such usage. <quote>auto</quote> uses no THP initially, but may
+ begin to do so when metadata usage reaches certain level. The default
+ is <quote>disabled</quote>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.retain">
+ <term>
+ <mallctl>opt.retain</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>If true, retain unused virtual memory for later reuse
+ rather than discarding it by calling
+ <citerefentry><refentrytitle>munmap</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> or equivalent (see <link
+ linkend="stats.retained">stats.retained</link> for related details).
+ It also makes jemalloc use <citerefentry>
+ <refentrytitle>mmap</refentrytitle><manvolnum>2</manvolnum>
+ </citerefentry> or equivalent in a more greedy way, mapping larger
+ chunks in one go. This option is disabled by default unless discarding
+ virtual memory is known to trigger platform-specific performance
+ problems, namely 1) for [64-bit] Linux, which has a quirk in its virtual
+ memory allocation algorithm that causes semi-permanent VM map holes
+ under normal jemalloc operation; and 2) for [64-bit] Windows, which
+ disallows split / merged regions with
+ <parameter><constant>MEM_RELEASE</constant></parameter>. Although the
+ same issues may present on 32-bit platforms as well, retaining virtual
+ memory for 32-bit Linux and Windows is disabled by default due to the
+ practical possibility of address space exhaustion. </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.dss">
+ <term>
+ <mallctl>opt.dss</mallctl>
+ (<type>const char *</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>dss (<citerefentry><refentrytitle>sbrk</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry>) allocation precedence as
+ related to <citerefentry><refentrytitle>mmap</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> allocation. The following
+ settings are supported if
+ <citerefentry><refentrytitle>sbrk</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> is supported by the operating
+ system: <quote>disabled</quote>, <quote>primary</quote>, and
+ <quote>secondary</quote>; otherwise only <quote>disabled</quote> is
+ supported. The default is <quote>secondary</quote> if
+ <citerefentry><refentrytitle>sbrk</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> is supported by the operating
+ system; <quote>disabled</quote> otherwise.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.narenas">
+ <term>
+ <mallctl>opt.narenas</mallctl>
+ (<type>unsigned</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Maximum number of arenas to use for automatic
+ multiplexing of threads and arenas. The default is four times the
+ number of CPUs, or one if there is a single CPU.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.oversize_threshold">
+ <term>
+ <mallctl>opt.oversize_threshold</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>The threshold in bytes of which requests are considered
+ oversize. Allocation requests with greater sizes are fulfilled from a
+ dedicated arena (automatically managed, however not within
+ <literal>narenas</literal>), in order to reduce fragmentation by not
+ mixing huge allocations with small ones. In addition, the decay API
+ guarantees on the extents greater than the specified threshold may be
+ overridden. Note that requests with arena index specified via
+ <constant>MALLOCX_ARENA</constant>, or threads associated with explicit
+ arenas will not be considered. The default threshold is 8MiB. Values
+ not within large size classes disables this feature.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.percpu_arena">
+ <term>
+ <mallctl>opt.percpu_arena</mallctl>
+ (<type>const char *</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Per CPU arena mode. Use the <quote>percpu</quote>
+ setting to enable this feature, which uses number of CPUs to determine
+ number of arenas, and bind threads to arenas dynamically based on the
+ CPU the thread runs on currently. <quote>phycpu</quote> setting uses
+ one arena per physical CPU, which means the two hyper threads on the
+ same CPU share one arena. Note that no runtime checking regarding the
+ availability of hyper threading is done at the moment. When set to
+ <quote>disabled</quote>, narenas and thread to arena association will
+ not be impacted by this option. The default is <quote>disabled</quote>.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.background_thread">
+ <term>
+ <mallctl>opt.background_thread</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Internal background worker threads enabled/disabled.
+ Because of potential circular dependencies, enabling background thread
+ using this option may cause crash or deadlock during initialization. For
+ a reliable way to use this feature, see <link
+ linkend="background_thread">background_thread</link> for dynamic control
+ options and details. This option is disabled by
+ default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.max_background_threads">
+ <term>
+ <mallctl>opt.max_background_threads</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Maximum number of background threads that will be created
+ if <link linkend="background_thread">background_thread</link> is set.
+ Defaults to number of cpus.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.dirty_decay_ms">
+ <term>
+ <mallctl>opt.dirty_decay_ms</mallctl>
+ (<type>ssize_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Approximate time in milliseconds from the creation of a
+ set of unused dirty pages until an equivalent set of unused dirty pages
+ is purged (i.e. converted to muzzy via e.g.
+ <function>madvise(<parameter>...</parameter><parameter><constant>MADV_FREE</constant></parameter>)</function>
+ if supported by the operating system, or converted to clean otherwise)
+ and/or reused. Dirty pages are defined as previously having been
+ potentially written to by the application, and therefore consuming
+ physical memory, yet having no current use. The pages are incrementally
+ purged according to a sigmoidal decay curve that starts and ends with
+ zero purge rate. A decay time of 0 causes all unused dirty pages to be
+ purged immediately upon creation. A decay time of -1 disables purging.
+ The default decay time is 10 seconds. See <link
+ linkend="arenas.dirty_decay_ms"><mallctl>arenas.dirty_decay_ms</mallctl></link>
+ and <link
+ linkend="arena.i.dirty_decay_ms"><mallctl>arena.&lt;i&gt;.dirty_decay_ms</mallctl></link>
+ for related dynamic control options. See <link
+ linkend="opt.muzzy_decay_ms"><mallctl>opt.muzzy_decay_ms</mallctl></link>
+ for a description of muzzy pages.for a description of muzzy pages. Note
+ that when the <link
+ linkend="opt.oversize_threshold"><mallctl>oversize_threshold</mallctl></link>
+ feature is enabled, the arenas reserved for oversize requests may have
+ its own default decay settings.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.muzzy_decay_ms">
+ <term>
+ <mallctl>opt.muzzy_decay_ms</mallctl>
+ (<type>ssize_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Approximate time in milliseconds from the creation of a
+ set of unused muzzy pages until an equivalent set of unused muzzy pages
+ is purged (i.e. converted to clean) and/or reused. Muzzy pages are
+ defined as previously having been unused dirty pages that were
+ subsequently purged in a manner that left them subject to the
+ reclamation whims of the operating system (e.g.
+ <function>madvise(<parameter>...</parameter><parameter><constant>MADV_FREE</constant></parameter>)</function>),
+ and therefore in an indeterminate state. The pages are incrementally
+ purged according to a sigmoidal decay curve that starts and ends with
+ zero purge rate. A decay time of 0 causes all unused muzzy pages to be
+ purged immediately upon creation. A decay time of -1 disables purging.
+ The default decay time is 10 seconds. See <link
+ linkend="arenas.muzzy_decay_ms"><mallctl>arenas.muzzy_decay_ms</mallctl></link>
+ and <link
+ linkend="arena.i.muzzy_decay_ms"><mallctl>arena.&lt;i&gt;.muzzy_decay_ms</mallctl></link>
+ for related dynamic control options.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.lg_extent_max_active_fit">
+ <term>
+ <mallctl>opt.lg_extent_max_active_fit</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>When reusing dirty extents, this determines the (log
+ base 2 of the) maximum ratio between the size of the active extent
+ selected (to split off from) and the size of the requested allocation.
+ This prevents the splitting of large active extents for smaller
+ allocations, which can reduce fragmentation over the long run
+ (especially for non-active extents). Lower value may reduce
+ fragmentation, at the cost of extra active extents. The default value
+ is 6, which gives a maximum ratio of 64 (2^6).</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.stats_print">
+ <term>
+ <mallctl>opt.stats_print</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Enable/disable statistics printing at exit. If
+ enabled, the <function>malloc_stats_print()</function>
+ function is called at program exit via an
+ <citerefentry><refentrytitle>atexit</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry> function. <link
+ linkend="opt.stats_print_opts"><mallctl>opt.stats_print_opts</mallctl></link>
+ can be combined to specify output options. If
+ <option>--enable-stats</option> is specified during configuration, this
+ has the potential to cause deadlock for a multi-threaded process that
+ exits while one or more threads are executing in the memory allocation
+ functions. Furthermore, <function>atexit()</function> may
+ allocate memory during application initialization and then deadlock
+ internally when jemalloc in turn calls
+ <function>atexit()</function>, so this option is not
+ universally usable (though the application can register its own
+ <function>atexit()</function> function with equivalent
+ functionality). Therefore, this option should only be used with care;
+ it is primarily intended as a performance tuning aid during application
+ development. This option is disabled by default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.stats_print_opts">
+ <term>
+ <mallctl>opt.stats_print_opts</mallctl>
+ (<type>const char *</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Options (the <parameter>opts</parameter> string) to pass
+ to the <function>malloc_stats_print()</function> at exit (enabled
+ through <link
+ linkend="opt.stats_print"><mallctl>opt.stats_print</mallctl></link>). See
+ available options in <link
+ linkend="malloc_stats_print_opts"><function>malloc_stats_print()</function></link>.
+ Has no effect unless <link
+ linkend="opt.stats_print"><mallctl>opt.stats_print</mallctl></link> is
+ enabled. The default is <quote></quote>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.junk">
+ <term>
+ <mallctl>opt.junk</mallctl>
+ (<type>const char *</type>)
+ <literal>r-</literal>
+ [<option>--enable-fill</option>]
+ </term>
+ <listitem><para>Junk filling. If set to <quote>alloc</quote>, each byte
+ of uninitialized allocated memory will be initialized to
+ <literal>0xa5</literal>. If set to <quote>free</quote>, all deallocated
+ memory will be initialized to <literal>0x5a</literal>. If set to
+ <quote>true</quote>, both allocated and deallocated memory will be
+ initialized, and if set to <quote>false</quote>, junk filling be
+ disabled entirely. This is intended for debugging and will impact
+ performance negatively. This option is <quote>false</quote> by default
+ unless <option>--enable-debug</option> is specified during
+ configuration, in which case it is <quote>true</quote> by
+ default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.zero">
+ <term>
+ <mallctl>opt.zero</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-fill</option>]
+ </term>
+ <listitem><para>Zero filling enabled/disabled. If enabled, each byte
+ of uninitialized allocated memory will be initialized to 0. Note that
+ this initialization only happens once for each byte, so
+ <function>realloc()</function> and
+ <function>rallocx()</function> calls do not zero memory that
+ was previously allocated. This is intended for debugging and will
+ impact performance negatively. This option is disabled by default.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.utrace">
+ <term>
+ <mallctl>opt.utrace</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-utrace</option>]
+ </term>
+ <listitem><para>Allocation tracing based on
+ <citerefentry><refentrytitle>utrace</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> enabled/disabled. This option
+ is disabled by default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.xmalloc">
+ <term>
+ <mallctl>opt.xmalloc</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-xmalloc</option>]
+ </term>
+ <listitem><para>Abort-on-out-of-memory enabled/disabled. If enabled,
+ rather than returning failure for any allocation function, display a
+ diagnostic message on <constant>STDERR_FILENO</constant> and cause the
+ program to drop core (using
+ <citerefentry><refentrytitle>abort</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry>). If an application is
+ designed to depend on this behavior, set the option at compile time by
+ including the following in the source code:
+ <programlisting language="C"><![CDATA[
+malloc_conf = "xmalloc:true";]]></programlisting>
+ This option is disabled by default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.tcache">
+ <term>
+ <mallctl>opt.tcache</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Thread-specific caching (tcache) enabled/disabled. When
+ there are multiple threads, each thread uses a tcache for objects up to
+ a certain size. Thread-specific caching allows many allocations to be
+ satisfied without performing any thread synchronization, at the cost of
+ increased memory use. See the <link
+ linkend="opt.lg_tcache_max"><mallctl>opt.lg_tcache_max</mallctl></link>
+ option for related tuning information. This option is enabled by
+ default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.lg_tcache_max">
+ <term>
+ <mallctl>opt.lg_tcache_max</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Maximum size class (log base 2) to cache in the
+ thread-specific cache (tcache). At a minimum, all small size classes
+ are cached, and at a maximum all large size classes are cached. The
+ default maximum is 32 KiB (2^15).</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.thp">
+ <term>
+ <mallctl>opt.thp</mallctl>
+ (<type>const char *</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Transparent hugepage (THP) mode. Settings "always",
+ "never" and "default" are available if THP is supported by the operating
+ system. The "always" setting enables transparent hugepage for all user
+ memory mappings with
+ <parameter><constant>MADV_HUGEPAGE</constant></parameter>; "never"
+ ensures no transparent hugepage with
+ <parameter><constant>MADV_NOHUGEPAGE</constant></parameter>; the default
+ setting "default" makes no changes. Note that: this option does not
+ affect THP for jemalloc internal metadata (see <link
+ linkend="opt.metadata_thp"><mallctl>opt.metadata_thp</mallctl></link>);
+ in addition, for arenas with customized <link
+ linkend="arena.i.extent_hooks"><mallctl>extent_hooks</mallctl></link>,
+ this option is bypassed as it is implemented as part of the default
+ extent hooks.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.prof">
+ <term>
+ <mallctl>opt.prof</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Memory profiling enabled/disabled. If enabled, profile
+ memory allocation activity. See the <link
+ linkend="opt.prof_active"><mallctl>opt.prof_active</mallctl></link>
+ option for on-the-fly activation/deactivation. See the <link
+ linkend="opt.lg_prof_sample"><mallctl>opt.lg_prof_sample</mallctl></link>
+ option for probabilistic sampling control. See the <link
+ linkend="opt.prof_accum"><mallctl>opt.prof_accum</mallctl></link>
+ option for control of cumulative sample reporting. See the <link
+ linkend="opt.lg_prof_interval"><mallctl>opt.lg_prof_interval</mallctl></link>
+ option for information on interval-triggered profile dumping, the <link
+ linkend="opt.prof_gdump"><mallctl>opt.prof_gdump</mallctl></link>
+ option for information on high-water-triggered profile dumping, and the
+ <link linkend="opt.prof_final"><mallctl>opt.prof_final</mallctl></link>
+ option for final profile dumping. Profile output is compatible with
+ the <command>jeprof</command> command, which is based on the
+ <command>pprof</command> that is developed as part of the <ulink
+ url="http://code.google.com/p/gperftools/">gperftools
+ package</ulink>. See <link linkend="heap_profile_format">HEAP PROFILE
+ FORMAT</link> for heap profile format documentation.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.prof_prefix">
+ <term>
+ <mallctl>opt.prof_prefix</mallctl>
+ (<type>const char *</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Filename prefix for profile dumps. If the prefix is
+ set to the empty string, no automatic dumps will occur; this is
+ primarily useful for disabling the automatic final heap dump (which
+ also disables leak reporting, if enabled). The default prefix is
+ <filename>jeprof</filename>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.prof_active">
+ <term>
+ <mallctl>opt.prof_active</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Profiling activated/deactivated. This is a secondary
+ control mechanism that makes it possible to start the application with
+ profiling enabled (see the <link
+ linkend="opt.prof"><mallctl>opt.prof</mallctl></link> option) but
+ inactive, then toggle profiling at any time during program execution
+ with the <link
+ linkend="prof.active"><mallctl>prof.active</mallctl></link> mallctl.
+ This option is enabled by default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.prof_thread_active_init">
+ <term>
+ <mallctl>opt.prof_thread_active_init</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Initial setting for <link
+ linkend="thread.prof.active"><mallctl>thread.prof.active</mallctl></link>
+ in newly created threads. The initial setting for newly created threads
+ can also be changed during execution via the <link
+ linkend="prof.thread_active_init"><mallctl>prof.thread_active_init</mallctl></link>
+ mallctl. This option is enabled by default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.lg_prof_sample">
+ <term>
+ <mallctl>opt.lg_prof_sample</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Average interval (log base 2) between allocation
+ samples, as measured in bytes of allocation activity. Increasing the
+ sampling interval decreases profile fidelity, but also decreases the
+ computational overhead. The default sample interval is 512 KiB (2^19
+ B).</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.prof_accum">
+ <term>
+ <mallctl>opt.prof_accum</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Reporting of cumulative object/byte counts in profile
+ dumps enabled/disabled. If this option is enabled, every unique
+ backtrace must be stored for the duration of execution. Depending on
+ the application, this can impose a large memory overhead, and the
+ cumulative counts are not always of interest. This option is disabled
+ by default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.lg_prof_interval">
+ <term>
+ <mallctl>opt.lg_prof_interval</mallctl>
+ (<type>ssize_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Average interval (log base 2) between memory profile
+ dumps, as measured in bytes of allocation activity. The actual
+ interval between dumps may be sporadic because decentralized allocation
+ counters are used to avoid synchronization bottlenecks. Profiles are
+ dumped to files named according to the pattern
+ <filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.i&lt;iseq&gt;.heap</filename>,
+ where <literal>&lt;prefix&gt;</literal> is controlled by the
+ <link
+ linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
+ option. By default, interval-triggered profile dumping is disabled
+ (encoded as -1).
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.prof_gdump">
+ <term>
+ <mallctl>opt.prof_gdump</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Set the initial state of <link
+ linkend="prof.gdump"><mallctl>prof.gdump</mallctl></link>, which when
+ enabled triggers a memory profile dump every time the total virtual
+ memory exceeds the previous maximum. This option is disabled by
+ default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.prof_final">
+ <term>
+ <mallctl>opt.prof_final</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Use an
+ <citerefentry><refentrytitle>atexit</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry> function to dump final memory
+ usage to a file named according to the pattern
+ <filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.f.heap</filename>,
+ where <literal>&lt;prefix&gt;</literal> is controlled by the <link
+ linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
+ option. Note that <function>atexit()</function> may allocate
+ memory during application initialization and then deadlock internally
+ when jemalloc in turn calls <function>atexit()</function>, so
+ this option is not universally usable (though the application can
+ register its own <function>atexit()</function> function with
+ equivalent functionality). This option is disabled by
+ default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="opt.prof_leak">
+ <term>
+ <mallctl>opt.prof_leak</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Leak reporting enabled/disabled. If enabled, use an
+ <citerefentry><refentrytitle>atexit</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry> function to report memory leaks
+ detected by allocation sampling. See the
+ <link linkend="opt.prof"><mallctl>opt.prof</mallctl></link> option for
+ information on analyzing heap profile output. This option is disabled
+ by default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="thread.arena">
+ <term>
+ <mallctl>thread.arena</mallctl>
+ (<type>unsigned</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Get or set the arena associated with the calling
+ thread. If the specified arena was not initialized beforehand (see the
+ <link
+ linkend="arena.i.initialized"><mallctl>arena.i.initialized</mallctl></link>
+ mallctl), it will be automatically initialized as a side effect of
+ calling this interface.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="thread.allocated">
+ <term>
+ <mallctl>thread.allocated</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Get the total number of bytes ever allocated by the
+ calling thread. This counter has the potential to wrap around; it is
+ up to the application to appropriately interpret the counter in such
+ cases.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="thread.allocatedp">
+ <term>
+ <mallctl>thread.allocatedp</mallctl>
+ (<type>uint64_t *</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Get a pointer to the the value that is returned by the
+ <link
+ linkend="thread.allocated"><mallctl>thread.allocated</mallctl></link>
+ mallctl. This is useful for avoiding the overhead of repeated
+ <function>mallctl*()</function> calls.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="thread.deallocated">
+ <term>
+ <mallctl>thread.deallocated</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Get the total number of bytes ever deallocated by the
+ calling thread. This counter has the potential to wrap around; it is
+ up to the application to appropriately interpret the counter in such
+ cases.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="thread.deallocatedp">
+ <term>
+ <mallctl>thread.deallocatedp</mallctl>
+ (<type>uint64_t *</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Get a pointer to the the value that is returned by the
+ <link
+ linkend="thread.deallocated"><mallctl>thread.deallocated</mallctl></link>
+ mallctl. This is useful for avoiding the overhead of repeated
+ <function>mallctl*()</function> calls.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="thread.tcache.enabled">
+ <term>
+ <mallctl>thread.tcache.enabled</mallctl>
+ (<type>bool</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Enable/disable calling thread's tcache. The tcache is
+ implicitly flushed as a side effect of becoming
+ disabled (see <link
+ linkend="thread.tcache.flush"><mallctl>thread.tcache.flush</mallctl></link>).
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="thread.tcache.flush">
+ <term>
+ <mallctl>thread.tcache.flush</mallctl>
+ (<type>void</type>)
+ <literal>--</literal>
+ </term>
+ <listitem><para>Flush calling thread's thread-specific cache (tcache).
+ This interface releases all cached objects and internal data structures
+ associated with the calling thread's tcache. Ordinarily, this interface
+ need not be called, since automatic periodic incremental garbage
+ collection occurs, and the thread cache is automatically discarded when
+ a thread exits. However, garbage collection is triggered by allocation
+ activity, so it is possible for a thread that stops
+ allocating/deallocating to retain its cache indefinitely, in which case
+ the developer may find manual flushing useful.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="thread.prof.name">
+ <term>
+ <mallctl>thread.prof.name</mallctl>
+ (<type>const char *</type>)
+ <literal>r-</literal> or
+ <literal>-w</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Get/set the descriptive name associated with the calling
+ thread in memory profile dumps. An internal copy of the name string is
+ created, so the input string need not be maintained after this interface
+ completes execution. The output string of this interface should be
+ copied for non-ephemeral uses, because multiple implementation details
+ can cause asynchronous string deallocation. Furthermore, each
+ invocation of this interface can only read or write; simultaneous
+ read/write is not supported due to string lifetime limitations. The
+ name string must be nil-terminated and comprised only of characters in
+ the sets recognized
+ by <citerefentry><refentrytitle>isgraph</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry> and
+ <citerefentry><refentrytitle>isblank</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="thread.prof.active">
+ <term>
+ <mallctl>thread.prof.active</mallctl>
+ (<type>bool</type>)
+ <literal>rw</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Control whether sampling is currently active for the
+ calling thread. This is an activation mechanism in addition to <link
+ linkend="prof.active"><mallctl>prof.active</mallctl></link>; both must
+ be active for the calling thread to sample. This flag is enabled by
+ default.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="tcache.create">
+ <term>
+ <mallctl>tcache.create</mallctl>
+ (<type>unsigned</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Create an explicit thread-specific cache (tcache) and
+ return an identifier that can be passed to the <link
+ linkend="MALLOCX_TCACHE"><constant>MALLOCX_TCACHE(<parameter>tc</parameter>)</constant></link>
+ macro to explicitly use the specified cache rather than the
+ automatically managed one that is used by default. Each explicit cache
+ can be used by only one thread at a time; the application must assure
+ that this constraint holds.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="tcache.flush">
+ <term>
+ <mallctl>tcache.flush</mallctl>
+ (<type>unsigned</type>)
+ <literal>-w</literal>
+ </term>
+ <listitem><para>Flush the specified thread-specific cache (tcache). The
+ same considerations apply to this interface as to <link
+ linkend="thread.tcache.flush"><mallctl>thread.tcache.flush</mallctl></link>,
+ except that the tcache will never be automatically discarded.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="tcache.destroy">
+ <term>
+ <mallctl>tcache.destroy</mallctl>
+ (<type>unsigned</type>)
+ <literal>-w</literal>
+ </term>
+ <listitem><para>Flush the specified thread-specific cache (tcache) and
+ make the identifier available for use during a future tcache creation.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arena.i.initialized">
+ <term>
+ <mallctl>arena.&lt;i&gt;.initialized</mallctl>
+ (<type>bool</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Get whether the specified arena's statistics are
+ initialized (i.e. the arena was initialized prior to the current epoch).
+ This interface can also be nominally used to query whether the merged
+ statistics corresponding to <constant>MALLCTL_ARENAS_ALL</constant> are
+ initialized (always true).</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arena.i.decay">
+ <term>
+ <mallctl>arena.&lt;i&gt;.decay</mallctl>
+ (<type>void</type>)
+ <literal>--</literal>
+ </term>
+ <listitem><para>Trigger decay-based purging of unused dirty/muzzy pages
+ for arena &lt;i&gt;, or for all arenas if &lt;i&gt; equals
+ <constant>MALLCTL_ARENAS_ALL</constant>. The proportion of unused
+ dirty/muzzy pages to be purged depends on the current time; see <link
+ linkend="opt.dirty_decay_ms"><mallctl>opt.dirty_decay_ms</mallctl></link>
+ and <link
+ linkend="opt.muzzy_decay_ms"><mallctl>opt.muzy_decay_ms</mallctl></link>
+ for details.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arena.i.purge">
+ <term>
+ <mallctl>arena.&lt;i&gt;.purge</mallctl>
+ (<type>void</type>)
+ <literal>--</literal>
+ </term>
+ <listitem><para>Purge all unused dirty pages for arena &lt;i&gt;, or for
+ all arenas if &lt;i&gt; equals <constant>MALLCTL_ARENAS_ALL</constant>.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arena.i.reset">
+ <term>
+ <mallctl>arena.&lt;i&gt;.reset</mallctl>
+ (<type>void</type>)
+ <literal>--</literal>
+ </term>
+ <listitem><para>Discard all of the arena's extant allocations. This
+ interface can only be used with arenas explicitly created via <link
+ linkend="arenas.create"><mallctl>arenas.create</mallctl></link>. None
+ of the arena's discarded/cached allocations may accessed afterward. As
+ part of this requirement, all thread caches which were used to
+ allocate/deallocate in conjunction with the arena must be flushed
+ beforehand.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arena.i.destroy">
+ <term>
+ <mallctl>arena.&lt;i&gt;.destroy</mallctl>
+ (<type>void</type>)
+ <literal>--</literal>
+ </term>
+ <listitem><para>Destroy the arena. Discard all of the arena's extant
+ allocations using the same mechanism as for <link
+ linkend="arena.i.reset"><mallctl>arena.&lt;i&gt;.reset</mallctl></link>
+ (with all the same constraints and side effects), merge the arena stats
+ into those accessible at arena index
+ <constant>MALLCTL_ARENAS_DESTROYED</constant>, and then completely
+ discard all metadata associated with the arena. Future calls to <link
+ linkend="arenas.create"><mallctl>arenas.create</mallctl></link> may
+ recycle the arena index. Destruction will fail if any threads are
+ currently associated with the arena as a result of calls to <link
+ linkend="thread.arena"><mallctl>thread.arena</mallctl></link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arena.i.dss">
+ <term>
+ <mallctl>arena.&lt;i&gt;.dss</mallctl>
+ (<type>const char *</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Set the precedence of dss allocation as related to mmap
+ allocation for arena &lt;i&gt;, or for all arenas if &lt;i&gt; equals
+ <constant>MALLCTL_ARENAS_ALL</constant>. See <link
+ linkend="opt.dss"><mallctl>opt.dss</mallctl></link> for supported
+ settings.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arena.i.dirty_decay_ms">
+ <term>
+ <mallctl>arena.&lt;i&gt;.dirty_decay_ms</mallctl>
+ (<type>ssize_t</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Current per-arena approximate time in milliseconds from
+ the creation of a set of unused dirty pages until an equivalent set of
+ unused dirty pages is purged and/or reused. Each time this interface is
+ set, all currently unused dirty pages are considered to have fully
+ decayed, which causes immediate purging of all unused dirty pages unless
+ the decay time is set to -1 (i.e. purging disabled). See <link
+ linkend="opt.dirty_decay_ms"><mallctl>opt.dirty_decay_ms</mallctl></link>
+ for additional information.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arena.i.muzzy_decay_ms">
+ <term>
+ <mallctl>arena.&lt;i&gt;.muzzy_decay_ms</mallctl>
+ (<type>ssize_t</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Current per-arena approximate time in milliseconds from
+ the creation of a set of unused muzzy pages until an equivalent set of
+ unused muzzy pages is purged and/or reused. Each time this interface is
+ set, all currently unused muzzy pages are considered to have fully
+ decayed, which causes immediate purging of all unused muzzy pages unless
+ the decay time is set to -1 (i.e. purging disabled). See <link
+ linkend="opt.muzzy_decay_ms"><mallctl>opt.muzzy_decay_ms</mallctl></link>
+ for additional information.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arena.i.retain_grow_limit">
+ <term>
+ <mallctl>arena.&lt;i&gt;.retain_grow_limit</mallctl>
+ (<type>size_t</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Maximum size to grow retained region (only relevant when
+ <link linkend="opt.retain"><mallctl>opt.retain</mallctl></link> is
+ enabled). This controls the maximum increment to expand virtual memory,
+ or allocation through <link
+ linkend="arena.i.extent_hooks"><mallctl>arena.&lt;i&gt;extent_hooks</mallctl></link>.
+ In particular, if customized extent hooks reserve physical memory
+ (e.g. 1G huge pages), this is useful to control the allocation hook's
+ input size. The default is no limit.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arena.i.extent_hooks">
+ <term>
+ <mallctl>arena.&lt;i&gt;.extent_hooks</mallctl>
+ (<type>extent_hooks_t *</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Get or set the extent management hook functions for
+ arena &lt;i&gt;. The functions must be capable of operating on all
+ extant extents associated with arena &lt;i&gt;, usually by passing
+ unknown extents to the replaced functions. In practice, it is feasible
+ to control allocation for arenas explicitly created via <link
+ linkend="arenas.create"><mallctl>arenas.create</mallctl></link> such
+ that all extents originate from an application-supplied extent allocator
+ (by specifying the custom extent hook functions during arena creation).
+ However, the API guarantees for the automatically created arenas may be
+ relaxed -- hooks set there may be called in a "best effort" fashion; in
+ addition there may be extents created prior to the application having an
+ opportunity to take over extent allocation.</para>
+
+ <programlisting language="C"><![CDATA[
+typedef extent_hooks_s extent_hooks_t;
+struct extent_hooks_s {
+ extent_alloc_t *alloc;
+ extent_dalloc_t *dalloc;
+ extent_destroy_t *destroy;
+ extent_commit_t *commit;
+ extent_decommit_t *decommit;
+ extent_purge_t *purge_lazy;
+ extent_purge_t *purge_forced;
+ extent_split_t *split;
+ extent_merge_t *merge;
+};]]></programlisting>
+ <para>The <type>extent_hooks_t</type> structure comprises function
+ pointers which are described individually below. jemalloc uses these
+ functions to manage extent lifetime, which starts off with allocation of
+ mapped committed memory, in the simplest case followed by deallocation.
+ However, there are performance and platform reasons to retain extents
+ for later reuse. Cleanup attempts cascade from deallocation to decommit
+ to forced purging to lazy purging, which gives the extent management
+ functions opportunities to reject the most permanent cleanup operations
+ in favor of less permanent (and often less costly) operations. All
+ operations except allocation can be universally opted out of by setting
+ the hook pointers to <constant>NULL</constant>, or selectively opted out
+ of by returning failure. Note that once the extent hook is set, the
+ structure is accessed directly by the associated arenas, so it must
+ remain valid for the entire lifetime of the arenas.</para>
+
+ <funcsynopsis><funcprototype>
+ <funcdef>typedef void *<function>(extent_alloc_t)</function></funcdef>
+ <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
+ <paramdef>void *<parameter>new_addr</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>size_t <parameter>alignment</parameter></paramdef>
+ <paramdef>bool *<parameter>zero</parameter></paramdef>
+ <paramdef>bool *<parameter>commit</parameter></paramdef>
+ <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+ </funcprototype></funcsynopsis>
+ <literallayout></literallayout>
+ <para>An extent allocation function conforms to the
+ <type>extent_alloc_t</type> type and upon success returns a pointer to
+ <parameter>size</parameter> bytes of mapped memory on behalf of arena
+ <parameter>arena_ind</parameter> such that the extent's base address is
+ a multiple of <parameter>alignment</parameter>, as well as setting
+ <parameter>*zero</parameter> to indicate whether the extent is zeroed
+ and <parameter>*commit</parameter> to indicate whether the extent is
+ committed. Upon error the function returns <constant>NULL</constant>
+ and leaves <parameter>*zero</parameter> and
+ <parameter>*commit</parameter> unmodified. The
+ <parameter>size</parameter> parameter is always a multiple of the page
+ size. The <parameter>alignment</parameter> parameter is always a power
+ of two at least as large as the page size. Zeroing is mandatory if
+ <parameter>*zero</parameter> is true upon function entry. Committing is
+ mandatory if <parameter>*commit</parameter> is true upon function entry.
+ If <parameter>new_addr</parameter> is not <constant>NULL</constant>, the
+ returned pointer must be <parameter>new_addr</parameter> on success or
+ <constant>NULL</constant> on error. Committed memory may be committed
+ in absolute terms as on a system that does not overcommit, or in
+ implicit terms as on a system that overcommits and satisfies physical
+ memory needs on demand via soft page faults. Note that replacing the
+ default extent allocation function makes the arena's <link
+ linkend="arena.i.dss"><mallctl>arena.&lt;i&gt;.dss</mallctl></link>
+ setting irrelevant.</para>
+
+ <funcsynopsis><funcprototype>
+ <funcdef>typedef bool <function>(extent_dalloc_t)</function></funcdef>
+ <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
+ <paramdef>void *<parameter>addr</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>bool <parameter>committed</parameter></paramdef>
+ <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+ </funcprototype></funcsynopsis>
+ <literallayout></literallayout>
+ <para>
+ An extent deallocation function conforms to the
+ <type>extent_dalloc_t</type> type and deallocates an extent at given
+ <parameter>addr</parameter> and <parameter>size</parameter> with
+ <parameter>committed</parameter>/decommited memory as indicated, on
+ behalf of arena <parameter>arena_ind</parameter>, returning false upon
+ success. If the function returns true, this indicates opt-out from
+ deallocation; the virtual memory mapping associated with the extent
+ remains mapped, in the same commit state, and available for future use,
+ in which case it will be automatically retained for later reuse.</para>
+
+ <funcsynopsis><funcprototype>
+ <funcdef>typedef void <function>(extent_destroy_t)</function></funcdef>
+ <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
+ <paramdef>void *<parameter>addr</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>bool <parameter>committed</parameter></paramdef>
+ <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+ </funcprototype></funcsynopsis>
+ <literallayout></literallayout>
+ <para>
+ An extent destruction function conforms to the
+ <type>extent_destroy_t</type> type and unconditionally destroys an
+ extent at given <parameter>addr</parameter> and
+ <parameter>size</parameter> with
+ <parameter>committed</parameter>/decommited memory as indicated, on
+ behalf of arena <parameter>arena_ind</parameter>. This function may be
+ called to destroy retained extents during arena destruction (see <link
+ linkend="arena.i.destroy"><mallctl>arena.&lt;i&gt;.destroy</mallctl></link>).</para>
+
+ <funcsynopsis><funcprototype>
+ <funcdef>typedef bool <function>(extent_commit_t)</function></funcdef>
+ <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
+ <paramdef>void *<parameter>addr</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>size_t <parameter>offset</parameter></paramdef>
+ <paramdef>size_t <parameter>length</parameter></paramdef>
+ <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+ </funcprototype></funcsynopsis>
+ <literallayout></literallayout>
+ <para>An extent commit function conforms to the
+ <type>extent_commit_t</type> type and commits zeroed physical memory to
+ back pages within an extent at given <parameter>addr</parameter> and
+ <parameter>size</parameter> at <parameter>offset</parameter> bytes,
+ extending for <parameter>length</parameter> on behalf of arena
+ <parameter>arena_ind</parameter>, returning false upon success.
+ Committed memory may be committed in absolute terms as on a system that
+ does not overcommit, or in implicit terms as on a system that
+ overcommits and satisfies physical memory needs on demand via soft page
+ faults. If the function returns true, this indicates insufficient
+ physical memory to satisfy the request.</para>
+
+ <funcsynopsis><funcprototype>
+ <funcdef>typedef bool <function>(extent_decommit_t)</function></funcdef>
+ <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
+ <paramdef>void *<parameter>addr</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>size_t <parameter>offset</parameter></paramdef>
+ <paramdef>size_t <parameter>length</parameter></paramdef>
+ <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+ </funcprototype></funcsynopsis>
+ <literallayout></literallayout>
+ <para>An extent decommit function conforms to the
+ <type>extent_decommit_t</type> type and decommits any physical memory
+ that is backing pages within an extent at given
+ <parameter>addr</parameter> and <parameter>size</parameter> at
+ <parameter>offset</parameter> bytes, extending for
+ <parameter>length</parameter> on behalf of arena
+ <parameter>arena_ind</parameter>, returning false upon success, in which
+ case the pages will be committed via the extent commit function before
+ being reused. If the function returns true, this indicates opt-out from
+ decommit; the memory remains committed and available for future use, in
+ which case it will be automatically retained for later reuse.</para>
+
+ <funcsynopsis><funcprototype>
+ <funcdef>typedef bool <function>(extent_purge_t)</function></funcdef>
+ <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
+ <paramdef>void *<parameter>addr</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>size_t <parameter>offset</parameter></paramdef>
+ <paramdef>size_t <parameter>length</parameter></paramdef>
+ <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+ </funcprototype></funcsynopsis>
+ <literallayout></literallayout>
+ <para>An extent purge function conforms to the
+ <type>extent_purge_t</type> type and discards physical pages
+ within the virtual memory mapping associated with an extent at given
+ <parameter>addr</parameter> and <parameter>size</parameter> at
+ <parameter>offset</parameter> bytes, extending for
+ <parameter>length</parameter> on behalf of arena
+ <parameter>arena_ind</parameter>. A lazy extent purge function (e.g.
+ implemented via
+ <function>madvise(<parameter>...</parameter><parameter><constant>MADV_FREE</constant></parameter>)</function>)
+ can delay purging indefinitely and leave the pages within the purged
+ virtual memory range in an indeterminite state, whereas a forced extent
+ purge function immediately purges, and the pages within the virtual
+ memory range will be zero-filled the next time they are accessed. If
+ the function returns true, this indicates failure to purge.</para>
+
+ <funcsynopsis><funcprototype>
+ <funcdef>typedef bool <function>(extent_split_t)</function></funcdef>
+ <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
+ <paramdef>void *<parameter>addr</parameter></paramdef>
+ <paramdef>size_t <parameter>size</parameter></paramdef>
+ <paramdef>size_t <parameter>size_a</parameter></paramdef>
+ <paramdef>size_t <parameter>size_b</parameter></paramdef>
+ <paramdef>bool <parameter>committed</parameter></paramdef>
+ <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+ </funcprototype></funcsynopsis>
+ <literallayout></literallayout>
+ <para>An extent split function conforms to the
+ <type>extent_split_t</type> type and optionally splits an extent at
+ given <parameter>addr</parameter> and <parameter>size</parameter> into
+ two adjacent extents, the first of <parameter>size_a</parameter> bytes,
+ and the second of <parameter>size_b</parameter> bytes, operating on
+ <parameter>committed</parameter>/decommitted memory as indicated, on
+ behalf of arena <parameter>arena_ind</parameter>, returning false upon
+ success. If the function returns true, this indicates that the extent
+ remains unsplit and therefore should continue to be operated on as a
+ whole.</para>
+
+ <funcsynopsis><funcprototype>
+ <funcdef>typedef bool <function>(extent_merge_t)</function></funcdef>
+ <paramdef>extent_hooks_t *<parameter>extent_hooks</parameter></paramdef>
+ <paramdef>void *<parameter>addr_a</parameter></paramdef>
+ <paramdef>size_t <parameter>size_a</parameter></paramdef>
+ <paramdef>void *<parameter>addr_b</parameter></paramdef>
+ <paramdef>size_t <parameter>size_b</parameter></paramdef>
+ <paramdef>bool <parameter>committed</parameter></paramdef>
+ <paramdef>unsigned <parameter>arena_ind</parameter></paramdef>
+ </funcprototype></funcsynopsis>
+ <literallayout></literallayout>
+ <para>An extent merge function conforms to the
+ <type>extent_merge_t</type> type and optionally merges adjacent extents,
+ at given <parameter>addr_a</parameter> and <parameter>size_a</parameter>
+ with given <parameter>addr_b</parameter> and
+ <parameter>size_b</parameter> into one contiguous extent, operating on
+ <parameter>committed</parameter>/decommitted memory as indicated, on
+ behalf of arena <parameter>arena_ind</parameter>, returning false upon
+ success. If the function returns true, this indicates that the extents
+ remain distinct mappings and therefore should continue to be operated on
+ independently.</para>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.narenas">
+ <term>
+ <mallctl>arenas.narenas</mallctl>
+ (<type>unsigned</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Current limit on number of arenas.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.dirty_decay_ms">
+ <term>
+ <mallctl>arenas.dirty_decay_ms</mallctl>
+ (<type>ssize_t</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Current default per-arena approximate time in
+ milliseconds from the creation of a set of unused dirty pages until an
+ equivalent set of unused dirty pages is purged and/or reused, used to
+ initialize <link
+ linkend="arena.i.dirty_decay_ms"><mallctl>arena.&lt;i&gt;.dirty_decay_ms</mallctl></link>
+ during arena creation. See <link
+ linkend="opt.dirty_decay_ms"><mallctl>opt.dirty_decay_ms</mallctl></link>
+ for additional information.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.muzzy_decay_ms">
+ <term>
+ <mallctl>arenas.muzzy_decay_ms</mallctl>
+ (<type>ssize_t</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Current default per-arena approximate time in
+ milliseconds from the creation of a set of unused muzzy pages until an
+ equivalent set of unused muzzy pages is purged and/or reused, used to
+ initialize <link
+ linkend="arena.i.muzzy_decay_ms"><mallctl>arena.&lt;i&gt;.muzzy_decay_ms</mallctl></link>
+ during arena creation. See <link
+ linkend="opt.muzzy_decay_ms"><mallctl>opt.muzzy_decay_ms</mallctl></link>
+ for additional information.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.quantum">
+ <term>
+ <mallctl>arenas.quantum</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Quantum size.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.page">
+ <term>
+ <mallctl>arenas.page</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Page size.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.tcache_max">
+ <term>
+ <mallctl>arenas.tcache_max</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Maximum thread-cached size class.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.nbins">
+ <term>
+ <mallctl>arenas.nbins</mallctl>
+ (<type>unsigned</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Number of bin size classes.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.nhbins">
+ <term>
+ <mallctl>arenas.nhbins</mallctl>
+ (<type>unsigned</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Total number of thread cache bin size
+ classes.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.bin.i.size">
+ <term>
+ <mallctl>arenas.bin.&lt;i&gt;.size</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Maximum size supported by size class.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.bin.i.nregs">
+ <term>
+ <mallctl>arenas.bin.&lt;i&gt;.nregs</mallctl>
+ (<type>uint32_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Number of regions per slab.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.bin.i.slab_size">
+ <term>
+ <mallctl>arenas.bin.&lt;i&gt;.slab_size</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Number of bytes per slab.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.nlextents">
+ <term>
+ <mallctl>arenas.nlextents</mallctl>
+ (<type>unsigned</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Total number of large size classes.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.lextent.i.size">
+ <term>
+ <mallctl>arenas.lextent.&lt;i&gt;.size</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Maximum size supported by this large size
+ class.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.create">
+ <term>
+ <mallctl>arenas.create</mallctl>
+ (<type>unsigned</type>, <type>extent_hooks_t *</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Explicitly create a new arena outside the range of
+ automatically managed arenas, with optionally specified extent hooks,
+ and return the new arena index.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="arenas.lookup">
+ <term>
+ <mallctl>arenas.lookup</mallctl>
+ (<type>unsigned</type>, <type>void*</type>)
+ <literal>rw</literal>
+ </term>
+ <listitem><para>Index of the arena to which an allocation belongs to.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="prof.thread_active_init">
+ <term>
+ <mallctl>prof.thread_active_init</mallctl>
+ (<type>bool</type>)
+ <literal>rw</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Control the initial setting for <link
+ linkend="thread.prof.active"><mallctl>thread.prof.active</mallctl></link>
+ in newly created threads. See the <link
+ linkend="opt.prof_thread_active_init"><mallctl>opt.prof_thread_active_init</mallctl></link>
+ option for additional information.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="prof.active">
+ <term>
+ <mallctl>prof.active</mallctl>
+ (<type>bool</type>)
+ <literal>rw</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Control whether sampling is currently active. See the
+ <link
+ linkend="opt.prof_active"><mallctl>opt.prof_active</mallctl></link>
+ option for additional information, as well as the interrelated <link
+ linkend="thread.prof.active"><mallctl>thread.prof.active</mallctl></link>
+ mallctl.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="prof.dump">
+ <term>
+ <mallctl>prof.dump</mallctl>
+ (<type>const char *</type>)
+ <literal>-w</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Dump a memory profile to the specified file, or if NULL
+ is specified, to a file according to the pattern
+ <filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.m&lt;mseq&gt;.heap</filename>,
+ where <literal>&lt;prefix&gt;</literal> is controlled by the
+ <link
+ linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
+ option.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="prof.gdump">
+ <term>
+ <mallctl>prof.gdump</mallctl>
+ (<type>bool</type>)
+ <literal>rw</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>When enabled, trigger a memory profile dump every time
+ the total virtual memory exceeds the previous maximum. Profiles are
+ dumped to files named according to the pattern
+ <filename>&lt;prefix&gt;.&lt;pid&gt;.&lt;seq&gt;.u&lt;useq&gt;.heap</filename>,
+ where <literal>&lt;prefix&gt;</literal> is controlled by the <link
+ linkend="opt.prof_prefix"><mallctl>opt.prof_prefix</mallctl></link>
+ option.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="prof.reset">
+ <term>
+ <mallctl>prof.reset</mallctl>
+ (<type>size_t</type>)
+ <literal>-w</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Reset all memory profile statistics, and optionally
+ update the sample rate (see <link
+ linkend="opt.lg_prof_sample"><mallctl>opt.lg_prof_sample</mallctl></link>
+ and <link
+ linkend="prof.lg_sample"><mallctl>prof.lg_sample</mallctl></link>).
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="prof.lg_sample">
+ <term>
+ <mallctl>prof.lg_sample</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Get the current sample rate (see <link
+ linkend="opt.lg_prof_sample"><mallctl>opt.lg_prof_sample</mallctl></link>).
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="prof.interval">
+ <term>
+ <mallctl>prof.interval</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-prof</option>]
+ </term>
+ <listitem><para>Average number of bytes allocated between
+ interval-based profile dumps. See the
+ <link
+ linkend="opt.lg_prof_interval"><mallctl>opt.lg_prof_interval</mallctl></link>
+ option for additional information.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.allocated">
+ <term>
+ <mallctl>stats.allocated</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Total number of bytes allocated by the
+ application.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.active">
+ <term>
+ <mallctl>stats.active</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Total number of bytes in active pages allocated by the
+ application. This is a multiple of the page size, and greater than or
+ equal to <link
+ linkend="stats.allocated"><mallctl>stats.allocated</mallctl></link>.
+ This does not include <link linkend="stats.arenas.i.pdirty">
+ <mallctl>stats.arenas.&lt;i&gt;.pdirty</mallctl></link>,
+ <link linkend="stats.arenas.i.pmuzzy">
+ <mallctl>stats.arenas.&lt;i&gt;.pmuzzy</mallctl></link>, nor pages
+ entirely devoted to allocator metadata.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.metadata">
+ <term>
+ <mallctl>stats.metadata</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Total number of bytes dedicated to metadata, which
+ comprise base allocations used for bootstrap-sensitive allocator
+ metadata structures (see <link
+ linkend="stats.arenas.i.base"><mallctl>stats.arenas.&lt;i&gt;.base</mallctl></link>)
+ and internal allocations (see <link
+ linkend="stats.arenas.i.internal"><mallctl>stats.arenas.&lt;i&gt;.internal</mallctl></link>).
+ Transparent huge page (enabled with <link
+ linkend="opt.metadata_thp">opt.metadata_thp</link>) usage is not
+ considered.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.metadata_thp">
+ <term>
+ <mallctl>stats.metadata_thp</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of transparent huge pages (THP) used for
+ metadata. See <link
+ linkend="stats.metadata"><mallctl>stats.metadata</mallctl></link> and
+ <link linkend="opt.metadata_thp">opt.metadata_thp</link>) for
+ details.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.resident">
+ <term>
+ <mallctl>stats.resident</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Maximum number of bytes in physically resident data
+ pages mapped by the allocator, comprising all pages dedicated to
+ allocator metadata, pages backing active allocations, and unused dirty
+ pages. This is a maximum rather than precise because pages may not
+ actually be physically resident if they correspond to demand-zeroed
+ virtual memory that has not yet been touched. This is a multiple of the
+ page size, and is larger than <link
+ linkend="stats.active"><mallctl>stats.active</mallctl></link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.mapped">
+ <term>
+ <mallctl>stats.mapped</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Total number of bytes in active extents mapped by the
+ allocator. This is larger than <link
+ linkend="stats.active"><mallctl>stats.active</mallctl></link>. This
+ does not include inactive extents, even those that contain unused dirty
+ pages, which means that there is no strict ordering between this and
+ <link
+ linkend="stats.resident"><mallctl>stats.resident</mallctl></link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.retained">
+ <term>
+ <mallctl>stats.retained</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Total number of bytes in virtual memory mappings that
+ were retained rather than being returned to the operating system via
+ e.g. <citerefentry><refentrytitle>munmap</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> or similar. Retained virtual
+ memory is typically untouched, decommitted, or purged, so it has no
+ strongly associated physical memory (see <link
+ linkend="arena.i.extent_hooks">extent hooks</link> for details).
+ Retained memory is excluded from mapped memory statistics, e.g. <link
+ linkend="stats.mapped"><mallctl>stats.mapped</mallctl></link>.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.background_thread.num_threads">
+ <term>
+ <mallctl>stats.background_thread.num_threads</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para> Number of <link linkend="background_thread">background
+ threads</link> running currently.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.background_thread.num_runs">
+ <term>
+ <mallctl>stats.background_thread.num_runs</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para> Total number of runs from all <link
+ linkend="background_thread">background threads</link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.background_thread.run_interval">
+ <term>
+ <mallctl>stats.background_thread.run_interval</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para> Average run interval in nanoseconds of <link
+ linkend="background_thread">background threads</link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.mutexes.ctl">
+ <term>
+ <mallctl>stats.mutexes.ctl.{counter};</mallctl>
+ (<type>counter specific type</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Statistics on <varname>ctl</varname> mutex (global
+ scope; mallctl related). <mallctl>{counter}</mallctl> is one of the
+ counters below:</para>
+ <varlistentry id="mutex_counters">
+ <listitem><para><varname>num_ops</varname> (<type>uint64_t</type>):
+ Total number of lock acquisition operations on this mutex.</para>
+
+ <para><varname>num_spin_acq</varname> (<type>uint64_t</type>): Number
+ of times the mutex was spin-acquired. When the mutex is currently
+ locked and cannot be acquired immediately, a short period of
+ spin-retry within jemalloc will be performed. Acquired through spin
+ generally means the contention was lightweight and not causing context
+ switches.</para>
+
+ <para><varname>num_wait</varname> (<type>uint64_t</type>): Number of
+ times the mutex was wait-acquired, which means the mutex contention
+ was not solved by spin-retry, and blocking operation was likely
+ involved in order to acquire the mutex. This event generally implies
+ higher cost / longer delay, and should be investigated if it happens
+ often.</para>
+
+ <para><varname>max_wait_time</varname> (<type>uint64_t</type>):
+ Maximum length of time in nanoseconds spent on a single wait-acquired
+ lock operation. Note that to avoid profiling overhead on the common
+ path, this does not consider spin-acquired cases.</para>
+
+ <para><varname>total_wait_time</varname> (<type>uint64_t</type>):
+ Cumulative time in nanoseconds spent on wait-acquired lock operations.
+ Similarly, spin-acquired cases are not considered.</para>
+
+ <para><varname>max_num_thds</varname> (<type>uint32_t</type>): Maximum
+ number of threads waiting on this mutex simultaneously. Similarly,
+ spin-acquired cases are not considered.</para>
+
+ <para><varname>num_owner_switch</varname> (<type>uint64_t</type>):
+ Number of times the current mutex owner is different from the previous
+ one. This event does not generally imply an issue; rather it is an
+ indicator of how often the protected data are accessed by different
+ threads.
+ </para>
+ </listitem>
+ </varlistentry>
+ </listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.mutexes.background_thread">
+ <term>
+ <mallctl>stats.mutexes.background_thread.{counter}</mallctl>
+ (<type>counter specific type</type>) <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Statistics on <varname>background_thread</varname> mutex
+ (global scope; <link
+ linkend="background_thread"><mallctl>background_thread</mallctl></link>
+ related). <mallctl>{counter}</mallctl> is one of the counters in <link
+ linkend="mutex_counters">mutex profiling
+ counters</link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.mutexes.prof">
+ <term>
+ <mallctl>stats.mutexes.prof.{counter}</mallctl>
+ (<type>counter specific type</type>) <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Statistics on <varname>prof</varname> mutex (global
+ scope; profiling related). <mallctl>{counter}</mallctl> is one of the
+ counters in <link linkend="mutex_counters">mutex profiling
+ counters</link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.mutexes.reset">
+ <term>
+ <mallctl>stats.mutexes.reset</mallctl>
+ (<type>void</type>) <literal>--</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Reset all mutex profile statistics, including global
+ mutexes, arena mutexes and bin mutexes.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.dss">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.dss</mallctl>
+ (<type>const char *</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>dss (<citerefentry><refentrytitle>sbrk</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry>) allocation precedence as
+ related to <citerefentry><refentrytitle>mmap</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry> allocation. See <link
+ linkend="opt.dss"><mallctl>opt.dss</mallctl></link> for details.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.dirty_decay_ms">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.dirty_decay_ms</mallctl>
+ (<type>ssize_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Approximate time in milliseconds from the creation of a
+ set of unused dirty pages until an equivalent set of unused dirty pages
+ is purged and/or reused. See <link
+ linkend="opt.dirty_decay_ms"><mallctl>opt.dirty_decay_ms</mallctl></link>
+ for details.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.muzzy_decay_ms">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.muzzy_decay_ms</mallctl>
+ (<type>ssize_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Approximate time in milliseconds from the creation of a
+ set of unused muzzy pages until an equivalent set of unused muzzy pages
+ is purged and/or reused. See <link
+ linkend="opt.muzzy_decay_ms"><mallctl>opt.muzzy_decay_ms</mallctl></link>
+ for details.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.nthreads">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.nthreads</mallctl>
+ (<type>unsigned</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Number of threads currently assigned to
+ arena.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.uptime">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.uptime</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Time elapsed (in nanoseconds) since the arena was
+ created. If &lt;i&gt; equals <constant>0</constant> or
+ <constant>MALLCTL_ARENAS_ALL</constant>, this is the uptime since malloc
+ initialization.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.pactive">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.pactive</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Number of pages in active extents.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.pdirty">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.pdirty</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Number of pages within unused extents that are
+ potentially dirty, and for which <function>madvise()</function> or
+ similar has not been called. See <link
+ linkend="opt.dirty_decay_ms"><mallctl>opt.dirty_decay_ms</mallctl></link>
+ for a description of dirty pages.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.pmuzzy">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.pmuzzy</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Number of pages within unused extents that are muzzy.
+ See <link
+ linkend="opt.muzzy_decay_ms"><mallctl>opt.muzzy_decay_ms</mallctl></link>
+ for a description of muzzy pages.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.mapped">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.mapped</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of mapped bytes.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.retained">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.retained</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of retained bytes. See <link
+ linkend="stats.retained"><mallctl>stats.retained</mallctl></link> for
+ details.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.extent_avail">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.extent_avail</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of allocated (but unused) extent structs in this
+ arena.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.base">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.base</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>
+ Number of bytes dedicated to bootstrap-sensitive allocator metadata
+ structures.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.internal">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.internal</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of bytes dedicated to internal allocations.
+ Internal allocations differ from application-originated allocations in
+ that they are for internal use, and that they are omitted from heap
+ profiles.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.metadata_thp">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.metadata_thp</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of transparent huge pages (THP) used for
+ metadata. See <link linkend="opt.metadata_thp">opt.metadata_thp</link>
+ for details.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.resident">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.resident</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Maximum number of bytes in physically resident data
+ pages mapped by the arena, comprising all pages dedicated to allocator
+ metadata, pages backing active allocations, and unused dirty pages.
+ This is a maximum rather than precise because pages may not actually be
+ physically resident if they correspond to demand-zeroed virtual memory
+ that has not yet been touched. This is a multiple of the page
+ size.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.dirty_npurge">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.dirty_npurge</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of dirty page purge sweeps performed.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.dirty_nmadvise">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.dirty_nmadvise</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of <function>madvise()</function> or similar
+ calls made to purge dirty pages.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.dirty_purged">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.dirty_purged</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of dirty pages purged.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.muzzy_npurge">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.muzzy_npurge</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of muzzy page purge sweeps performed.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.muzzy_nmadvise">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.muzzy_nmadvise</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of <function>madvise()</function> or similar
+ calls made to purge muzzy pages.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.muzzy_purged">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.muzzy_purged</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of muzzy pages purged.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.small.allocated">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.small.allocated</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of bytes currently allocated by small objects.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.small.nmalloc">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.small.nmalloc</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of times a small allocation was
+ requested from the arena's bins, whether to fill the relevant tcache if
+ <link linkend="opt.tcache"><mallctl>opt.tcache</mallctl></link> is
+ enabled, or to directly satisfy an allocation request
+ otherwise.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.small.ndalloc">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.small.ndalloc</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of times a small allocation was
+ returned to the arena's bins, whether to flush the relevant tcache if
+ <link linkend="opt.tcache"><mallctl>opt.tcache</mallctl></link> is
+ enabled, or to directly deallocate an allocation
+ otherwise.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.small.nrequests">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.small.nrequests</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of allocation requests satisfied by
+ all bin size classes.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.small.nfills">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.small.nfills</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of tcache fills by all small size
+ classes.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.small.nflushes">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.small.nflushes</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of tcache flushes by all small size
+ classes.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.large.allocated">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.large.allocated</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Number of bytes currently allocated by large objects.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.large.nmalloc">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.large.nmalloc</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of times a large extent was allocated
+ from the arena, whether to fill the relevant tcache if <link
+ linkend="opt.tcache"><mallctl>opt.tcache</mallctl></link> is enabled and
+ the size class is within the range being cached, or to directly satisfy
+ an allocation request otherwise.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.large.ndalloc">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.large.ndalloc</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of times a large extent was returned
+ to the arena, whether to flush the relevant tcache if <link
+ linkend="opt.tcache"><mallctl>opt.tcache</mallctl></link> is enabled and
+ the size class is within the range being cached, or to directly
+ deallocate an allocation otherwise.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.large.nrequests">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.large.nrequests</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of allocation requests satisfied by
+ all large size classes.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.large.nfills">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.large.nfills</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of tcache fills by all large size
+ classes.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.large.nflushes">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.large.nflushes</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of tcache flushes by all large size
+ classes.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.bins.j.nmalloc">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nmalloc</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of times a bin region of the
+ corresponding size class was allocated from the arena, whether to fill
+ the relevant tcache if <link
+ linkend="opt.tcache"><mallctl>opt.tcache</mallctl></link> is enabled, or
+ to directly satisfy an allocation request otherwise.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.bins.j.ndalloc">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.ndalloc</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of times a bin region of the
+ corresponding size class was returned to the arena, whether to flush the
+ relevant tcache if <link
+ linkend="opt.tcache"><mallctl>opt.tcache</mallctl></link> is enabled, or
+ to directly deallocate an allocation otherwise.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.bins.j.nrequests">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nrequests</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of allocation requests satisfied by
+ bin regions of the corresponding size class.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.bins.j.curregs">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.curregs</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Current number of regions for this size
+ class.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.bins.j.nfills">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nfills</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Cumulative number of tcache fills.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.bins.j.nflushes">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nflushes</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ </term>
+ <listitem><para>Cumulative number of tcache flushes.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.bins.j.nslabs">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nslabs</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of slabs created.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.bins.j.nreslabs">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nreslabs</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of times the current slab from which
+ to allocate changed.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.bins.j.curslabs">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.curslabs</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Current number of slabs.</para></listitem>
+ </varlistentry>
+
+
+ <varlistentry id="stats.arenas.i.bins.j.nonfull_slabs">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.nonfull_slabs</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Current number of nonfull slabs.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.bins.mutex">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.bins.&lt;j&gt;.mutex.{counter}</mallctl>
+ (<type>counter specific type</type>) <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Statistics on
+ <varname>arena.&lt;i&gt;.bins.&lt;j&gt;</varname> mutex (arena bin
+ scope; bin operation related). <mallctl>{counter}</mallctl> is one of
+ the counters in <link linkend="mutex_counters">mutex profiling
+ counters</link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.extents.n">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.extents.&lt;j&gt;.n{extent_type}</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para> Number of extents of the given type in this arena in
+ the bucket corresponding to page size index &lt;j&gt;. The extent type
+ is one of dirty, muzzy, or retained.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.extents.bytes">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.extents.&lt;j&gt;.{extent_type}_bytes</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para> Sum of the bytes managed by extents of the given type
+ in this arena in the bucket corresponding to page size index &lt;j&gt;.
+ The extent type is one of dirty, muzzy, or retained.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.lextents.j.nmalloc">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.lextents.&lt;j&gt;.nmalloc</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of times a large extent of the
+ corresponding size class was allocated from the arena, whether to fill
+ the relevant tcache if <link
+ linkend="opt.tcache"><mallctl>opt.tcache</mallctl></link> is enabled and
+ the size class is within the range being cached, or to directly satisfy
+ an allocation request otherwise.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.lextents.j.ndalloc">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.lextents.&lt;j&gt;.ndalloc</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of times a large extent of the
+ corresponding size class was returned to the arena, whether to flush the
+ relevant tcache if <link
+ linkend="opt.tcache"><mallctl>opt.tcache</mallctl></link> is enabled and
+ the size class is within the range being cached, or to directly
+ deallocate an allocation otherwise.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.lextents.j.nrequests">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.lextents.&lt;j&gt;.nrequests</mallctl>
+ (<type>uint64_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Cumulative number of allocation requests satisfied by
+ large extents of the corresponding size class.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.lextents.j.curlextents">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.lextents.&lt;j&gt;.curlextents</mallctl>
+ (<type>size_t</type>)
+ <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Current number of large allocations for this size class.
+ </para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.mutexes.large">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.mutexes.large.{counter}</mallctl>
+ (<type>counter specific type</type>) <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Statistics on <varname>arena.&lt;i&gt;.large</varname>
+ mutex (arena scope; large allocation related).
+ <mallctl>{counter}</mallctl> is one of the counters in <link
+ linkend="mutex_counters">mutex profiling
+ counters</link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.mutexes.extent_avail">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.mutexes.extent_avail.{counter}</mallctl>
+ (<type>counter specific type</type>) <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Statistics on <varname>arena.&lt;i&gt;.extent_avail
+ </varname> mutex (arena scope; extent avail related).
+ <mallctl>{counter}</mallctl> is one of the counters in <link
+ linkend="mutex_counters">mutex profiling
+ counters</link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.mutexes.extents_dirty">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.mutexes.extents_dirty.{counter}</mallctl>
+ (<type>counter specific type</type>) <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Statistics on <varname>arena.&lt;i&gt;.extents_dirty
+ </varname> mutex (arena scope; dirty extents related).
+ <mallctl>{counter}</mallctl> is one of the counters in <link
+ linkend="mutex_counters">mutex profiling
+ counters</link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.mutexes.extents_muzzy">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.mutexes.extents_muzzy.{counter}</mallctl>
+ (<type>counter specific type</type>) <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Statistics on <varname>arena.&lt;i&gt;.extents_muzzy
+ </varname> mutex (arena scope; muzzy extents related).
+ <mallctl>{counter}</mallctl> is one of the counters in <link
+ linkend="mutex_counters">mutex profiling
+ counters</link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.mutexes.extents_retained">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.mutexes.extents_retained.{counter}</mallctl>
+ (<type>counter specific type</type>) <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Statistics on <varname>arena.&lt;i&gt;.extents_retained
+ </varname> mutex (arena scope; retained extents related).
+ <mallctl>{counter}</mallctl> is one of the counters in <link
+ linkend="mutex_counters">mutex profiling
+ counters</link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.mutexes.decay_dirty">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.mutexes.decay_dirty.{counter}</mallctl>
+ (<type>counter specific type</type>) <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Statistics on <varname>arena.&lt;i&gt;.decay_dirty
+ </varname> mutex (arena scope; decay for dirty pages related).
+ <mallctl>{counter}</mallctl> is one of the counters in <link
+ linkend="mutex_counters">mutex profiling
+ counters</link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.mutexes.decay_muzzy">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.mutexes.decay_muzzy.{counter}</mallctl>
+ (<type>counter specific type</type>) <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Statistics on <varname>arena.&lt;i&gt;.decay_muzzy
+ </varname> mutex (arena scope; decay for muzzy pages related).
+ <mallctl>{counter}</mallctl> is one of the counters in <link
+ linkend="mutex_counters">mutex profiling
+ counters</link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.mutexes.base">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.mutexes.base.{counter}</mallctl>
+ (<type>counter specific type</type>) <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Statistics on <varname>arena.&lt;i&gt;.base</varname>
+ mutex (arena scope; base allocator related).
+ <mallctl>{counter}</mallctl> is one of the counters in <link
+ linkend="mutex_counters">mutex profiling
+ counters</link>.</para></listitem>
+ </varlistentry>
+
+ <varlistentry id="stats.arenas.i.mutexes.tcache_list">
+ <term>
+ <mallctl>stats.arenas.&lt;i&gt;.mutexes.tcache_list.{counter}</mallctl>
+ (<type>counter specific type</type>) <literal>r-</literal>
+ [<option>--enable-stats</option>]
+ </term>
+ <listitem><para>Statistics on
+ <varname>arena.&lt;i&gt;.tcache_list</varname> mutex (arena scope;
+ tcache to arena association related). This mutex is expected to be
+ accessed less often. <mallctl>{counter}</mallctl> is one of the
+ counters in <link linkend="mutex_counters">mutex profiling
+ counters</link>.</para></listitem>
+ </varlistentry>
+
+ </variablelist>
+ </refsect1>
+ <refsect1 id="heap_profile_format">
+ <title>HEAP PROFILE FORMAT</title>
+ <para>Although the heap profiling functionality was originally designed to
+ be compatible with the
+ <command>pprof</command> command that is developed as part of the <ulink
+ url="http://code.google.com/p/gperftools/">gperftools
+ package</ulink>, the addition of per thread heap profiling functionality
+ required a different heap profile format. The <command>jeprof</command>
+ command is derived from <command>pprof</command>, with enhancements to
+ support the heap profile format described here.</para>
+
+ <para>In the following hypothetical heap profile, <constant>[...]</constant>
+ indicates elision for the sake of compactness. <programlisting><![CDATA[
+heap_v2/524288
+ t*: 28106: 56637512 [0: 0]
+ [...]
+ t3: 352: 16777344 [0: 0]
+ [...]
+ t99: 17754: 29341640 [0: 0]
+ [...]
+@ 0x5f86da8 0x5f5a1dc [...] 0x29e4d4e 0xa200316 0xabb2988 [...]
+ t*: 13: 6688 [0: 0]
+ t3: 12: 6496 [0: ]
+ t99: 1: 192 [0: 0]
+[...]
+
+MAPPED_LIBRARIES:
+[...]]]></programlisting> The following matches the above heap profile, but most
+tokens are replaced with <constant>&lt;description&gt;</constant> to indicate
+descriptions of the corresponding fields. <programlisting><![CDATA[
+<heap_profile_format_version>/<mean_sample_interval>
+ <aggregate>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
+ [...]
+ <thread_3_aggregate>: <curobjs>: <curbytes>[<cumobjs>: <cumbytes>]
+ [...]
+ <thread_99_aggregate>: <curobjs>: <curbytes>[<cumobjs>: <cumbytes>]
+ [...]
+@ <top_frame> <frame> [...] <frame> <frame> <frame> [...]
+ <backtrace_aggregate>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
+ <backtrace_thread_3>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
+ <backtrace_thread_99>: <curobjs>: <curbytes> [<cumobjs>: <cumbytes>]
+[...]
+
+MAPPED_LIBRARIES:
+</proc/<pid>/maps>]]></programlisting></para>
+ </refsect1>
+
+ <refsect1 id="debugging_malloc_problems">
+ <title>DEBUGGING MALLOC PROBLEMS</title>
+ <para>When debugging, it is a good idea to configure/build jemalloc with
+ the <option>--enable-debug</option> and <option>--enable-fill</option>
+ options, and recompile the program with suitable options and symbols for
+ debugger support. When so configured, jemalloc incorporates a wide variety
+ of run-time assertions that catch application errors such as double-free,
+ write-after-free, etc.</para>
+
+ <para>Programs often accidentally depend on <quote>uninitialized</quote>
+ memory actually being filled with zero bytes. Junk filling
+ (see the <link linkend="opt.junk"><mallctl>opt.junk</mallctl></link>
+ option) tends to expose such bugs in the form of obviously incorrect
+ results and/or coredumps. Conversely, zero
+ filling (see the <link
+ linkend="opt.zero"><mallctl>opt.zero</mallctl></link> option) eliminates
+ the symptoms of such bugs. Between these two options, it is usually
+ possible to quickly detect, diagnose, and eliminate such bugs.</para>
+
+ <para>This implementation does not provide much detail about the problems
+ it detects, because the performance impact for storing such information
+ would be prohibitive.</para>
+ </refsect1>
+ <refsect1 id="diagnostic_messages">
+ <title>DIAGNOSTIC MESSAGES</title>
+ <para>If any of the memory allocation/deallocation functions detect an
+ error or warning condition, a message will be printed to file descriptor
+ <constant>STDERR_FILENO</constant>. Errors will result in the process
+ dumping core. If the <link
+ linkend="opt.abort"><mallctl>opt.abort</mallctl></link> option is set, most
+ warnings are treated as errors.</para>
+
+ <para>The <varname>malloc_message</varname> variable allows the programmer
+ to override the function which emits the text strings forming the errors
+ and warnings if for some reason the <constant>STDERR_FILENO</constant> file
+ descriptor is not suitable for this.
+ <function>malloc_message()</function> takes the
+ <parameter>cbopaque</parameter> pointer argument that is
+ <constant>NULL</constant> unless overridden by the arguments in a call to
+ <function>malloc_stats_print()</function>, followed by a string
+ pointer. Please note that doing anything which tries to allocate memory in
+ this function is likely to result in a crash or deadlock.</para>
+
+ <para>All messages are prefixed by
+ <quote><computeroutput>&lt;jemalloc&gt;: </computeroutput></quote>.</para>
+ </refsect1>
+ <refsect1 id="return_values">
+ <title>RETURN VALUES</title>
+ <refsect2>
+ <title>Standard API</title>
+ <para>The <function>malloc()</function> and
+ <function>calloc()</function> functions return a pointer to the
+ allocated memory if successful; otherwise a <constant>NULL</constant>
+ pointer is returned and <varname>errno</varname> is set to
+ <errorname>ENOMEM</errorname>.</para>
+
+ <para>The <function>posix_memalign()</function> function
+ returns the value 0 if successful; otherwise it returns an error value.
+ The <function>posix_memalign()</function> function will fail
+ if:
+ <variablelist>
+ <varlistentry>
+ <term><errorname>EINVAL</errorname></term>
+
+ <listitem><para>The <parameter>alignment</parameter> parameter is
+ not a power of 2 at least as large as
+ <code language="C">sizeof(<type>void *</type>)</code>.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><errorname>ENOMEM</errorname></term>
+
+ <listitem><para>Memory allocation error.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+
+ <para>The <function>aligned_alloc()</function> function returns
+ a pointer to the allocated memory if successful; otherwise a
+ <constant>NULL</constant> pointer is returned and
+ <varname>errno</varname> is set. The
+ <function>aligned_alloc()</function> function will fail if:
+ <variablelist>
+ <varlistentry>
+ <term><errorname>EINVAL</errorname></term>
+
+ <listitem><para>The <parameter>alignment</parameter> parameter is
+ not a power of 2.
+ </para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><errorname>ENOMEM</errorname></term>
+
+ <listitem><para>Memory allocation error.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+
+ <para>The <function>realloc()</function> function returns a
+ pointer, possibly identical to <parameter>ptr</parameter>, to the
+ allocated memory if successful; otherwise a <constant>NULL</constant>
+ pointer is returned, and <varname>errno</varname> is set to
+ <errorname>ENOMEM</errorname> if the error was the result of an
+ allocation failure. The <function>realloc()</function>
+ function always leaves the original buffer intact when an error occurs.
+ </para>
+
+ <para>The <function>free()</function> function returns no
+ value.</para>
+ </refsect2>
+ <refsect2>
+ <title>Non-standard API</title>
+ <para>The <function>mallocx()</function> and
+ <function>rallocx()</function> functions return a pointer to
+ the allocated memory if successful; otherwise a <constant>NULL</constant>
+ pointer is returned to indicate insufficient contiguous memory was
+ available to service the allocation request. </para>
+
+ <para>The <function>xallocx()</function> function returns the
+ real size of the resulting resized allocation pointed to by
+ <parameter>ptr</parameter>, which is a value less than
+ <parameter>size</parameter> if the allocation could not be adequately
+ grown in place. </para>
+
+ <para>The <function>sallocx()</function> function returns the
+ real size of the allocation pointed to by <parameter>ptr</parameter>.
+ </para>
+
+ <para>The <function>nallocx()</function> returns the real size
+ that would result from a successful equivalent
+ <function>mallocx()</function> function call, or zero if
+ insufficient memory is available to perform the size computation. </para>
+
+ <para>The <function>mallctl()</function>,
+ <function>mallctlnametomib()</function>, and
+ <function>mallctlbymib()</function> functions return 0 on
+ success; otherwise they return an error value. The functions will fail
+ if:
+ <variablelist>
+ <varlistentry>
+ <term><errorname>EINVAL</errorname></term>
+
+ <listitem><para><parameter>newp</parameter> is not
+ <constant>NULL</constant>, and <parameter>newlen</parameter> is too
+ large or too small. Alternatively, <parameter>*oldlenp</parameter>
+ is too large or too small; in this case as much data as possible
+ are read despite the error.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><errorname>ENOENT</errorname></term>
+
+ <listitem><para><parameter>name</parameter> or
+ <parameter>mib</parameter> specifies an unknown/invalid
+ value.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><errorname>EPERM</errorname></term>
+
+ <listitem><para>Attempt to read or write void value, or attempt to
+ write read-only value.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><errorname>EAGAIN</errorname></term>
+
+ <listitem><para>A memory allocation failure
+ occurred.</para></listitem>
+ </varlistentry>
+ <varlistentry>
+ <term><errorname>EFAULT</errorname></term>
+
+ <listitem><para>An interface with side effects failed in some way
+ not directly related to <function>mallctl*()</function>
+ read/write processing.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+
+ <para>The <function>malloc_usable_size()</function> function
+ returns the usable size of the allocation pointed to by
+ <parameter>ptr</parameter>. </para>
+ </refsect2>
+ </refsect1>
+ <refsect1 id="environment">
+ <title>ENVIRONMENT</title>
+ <para>The following environment variable affects the execution of the
+ allocation functions:
+ <variablelist>
+ <varlistentry>
+ <term><envar>MALLOC_CONF</envar></term>
+
+ <listitem><para>If the environment variable
+ <envar>MALLOC_CONF</envar> is set, the characters it contains
+ will be interpreted as options.</para></listitem>
+ </varlistentry>
+ </variablelist>
+ </para>
+ </refsect1>
+ <refsect1 id="examples">
+ <title>EXAMPLES</title>
+ <para>To dump core whenever a problem occurs:
+ <screen>ln -s 'abort:true' /etc/malloc.conf</screen>
+ </para>
+ <para>To specify in the source that only one arena should be automatically
+ created:
+ <programlisting language="C"><![CDATA[
+malloc_conf = "narenas:1";]]></programlisting></para>
+ </refsect1>
+ <refsect1 id="see_also">
+ <title>SEE ALSO</title>
+ <para><citerefentry><refentrytitle>madvise</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>mmap</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>sbrk</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>utrace</refentrytitle>
+ <manvolnum>2</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>alloca</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>atexit</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry>,
+ <citerefentry><refentrytitle>getpagesize</refentrytitle>
+ <manvolnum>3</manvolnum></citerefentry></para>
+ </refsect1>
+ <refsect1 id="standards">
+ <title>STANDARDS</title>
+ <para>The <function>malloc()</function>,
+ <function>calloc()</function>,
+ <function>realloc()</function>, and
+ <function>free()</function> functions conform to ISO/IEC
+ 9899:1990 (<quote>ISO C90</quote>).</para>
+
+ <para>The <function>posix_memalign()</function> function conforms
+ to IEEE Std 1003.1-2001 (<quote>POSIX.1</quote>).</para>
+ </refsect1>
+ <refsect1 id="history">
+ <title>HISTORY</title>
+ <para>The <function>malloc_usable_size()</function> and
+ <function>posix_memalign()</function> functions first appeared in FreeBSD
+ 7.0.</para>
+
+ <para>The <function>aligned_alloc()</function>,
+ <function>malloc_stats_print()</function>, and
+ <function>mallctl*()</function> functions first appeared in FreeBSD
+ 10.0.</para>
+
+ <para>The <function>*allocx()</function> functions first appeared in FreeBSD
+ 11.0.</para>
+ </refsect1>
+</refentry>
+>>>>>>> main
diff --git a/contrib/jemalloc/doc/manpages.xsl.in b/contrib/jemalloc/doc/manpages.xsl.in
new file mode 100644
index 000000000000..88b2626b9581
--- /dev/null
+++ b/contrib/jemalloc/doc/manpages.xsl.in
@@ -0,0 +1,4 @@
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+ <xsl:import href="@XSLROOT@/manpages/docbook.xsl"/>
+ <xsl:import href="@abs_srcroot@doc/stylesheet.xsl"/>
+</xsl:stylesheet>
diff --git a/contrib/jemalloc/doc/stylesheet.xsl b/contrib/jemalloc/doc/stylesheet.xsl
new file mode 100644
index 000000000000..619365d825c7
--- /dev/null
+++ b/contrib/jemalloc/doc/stylesheet.xsl
@@ -0,0 +1,10 @@
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+ <xsl:param name="funcsynopsis.style">ansi</xsl:param>
+ <xsl:param name="function.parens" select="0"/>
+ <xsl:template match="function">
+ <xsl:call-template name="inline.monoseq"/>
+ </xsl:template>
+ <xsl:template match="mallctl">
+ <quote><xsl:call-template name="inline.monoseq"/></quote>
+ </xsl:template>
+</xsl:stylesheet>
diff --git a/contrib/jemalloc/doc_internal/PROFILING_INTERNALS.md b/contrib/jemalloc/doc_internal/PROFILING_INTERNALS.md
new file mode 100644
index 000000000000..0a9f31c0c9c0
--- /dev/null
+++ b/contrib/jemalloc/doc_internal/PROFILING_INTERNALS.md
@@ -0,0 +1,127 @@
+# jemalloc profiling
+This describes the mathematical basis behind jemalloc's profiling implementation, as well as the implementation tricks that make it effective. Historically, the jemalloc profiling design simply copied tcmalloc's. The implementation has since diverged, due to both the desire to record additional information, and to correct some biasing bugs.
+
+Note: this document is markdown with embedded LaTeX; different markdown renderers may not produce the expected output. Viewing with `pandoc -s PROFILING_INTERNALS.md -o PROFILING_INTERNALS.pdf` is recommended.
+
+## Some tricks in our implementation toolbag
+
+### Sampling
+Recording our metadata is quite expensive; we need to walk up the stack to get a stack trace. On top of that, we need to allocate storage to record that stack trace, and stick it somewhere where a profile-dumping call can find it. That call might happen on another thread, so we'll probably need to take a lock to do so. These costs are quite large compared to the average cost of an allocation. To manage this, we'll only sample some fraction of allocations. This will miss some of them, so our data will be incomplete, but we'll try to make up for it. We can tune our sampling rate to balance accuracy and performance.
+
+### Fast Bernoulli sampling
+Compared to our fast paths, even a `coinflip(p)` function can be quite expensive. Having to do a random-number generation and some floating point operations would be a sizeable relative cost. However (as pointed out in [[Vitter, 1987](https://dl.acm.org/doi/10.1145/23002.23003)]), if we can orchestrate our algorithm so that many of our `coinflip` calls share their parameter value, we can do better. We can sample from the geometric distribution, and initialize a counter with the result. When the counter hits 0, the `coinflip` function returns true (and reinitializes its internal counter).
+This can let us do a random-number generation once per (logical) coinflip that comes up heads, rather than once per (logical) coinflip. Since we expect to sample relatively rarely, this can be a large win.
+
+### Fast-path / slow-path thinking
+Most programs have a skewed distribution of allocations. Smaller allocations are much more frequent than large ones, but shorter lived and less common as a fraction of program memory. "Small" and "large" are necessarily sort of fuzzy terms, but if we define "small" as "allocations jemalloc puts into slabs" and "large" as the others, then it's not uncommon for small allocations to be hundreds of times more frequent than large ones, but take up around half the amount of heap space as large ones. Moreover, small allocations tend to be much cheaper than large ones (often by a factor of 20-30): they're more likely to hit in thread caches, less likely to have to do an mmap, and cheaper to fill (by the user) once the allocation has been returned.
+
+## An unbiased estimator of space consumption from (almost) arbitrary sampling strategies
+Suppose we have a sampling strategy that meets the following criteria:
+
+ - One allocation being sampled is independent of other allocations being sampled.
+ - Each allocation has a non-zero probability of being sampled.
+
+We can then estimate the bytes in live allocations through some particular stack trace as:
+
+$$ \sum_i S_i I_i \frac{1}{\mathrm{E}[I_i]} $$
+
+where the sum ranges over some index variable of live allocations from that stack, $S_i$ is the size of the $i$'th allocation, and $I_i$ is an indicator random variable for whether or not the $i'th$ allocation is sampled. $S_i$ and $\mathrm{E}[I_i]$ are constants (the program allocations are fixed; the random variables are the sampling decisions), so taking the expectation we get
+
+$$ \sum_i S_i \mathrm{E}[I_i] \frac{1}{\mathrm{E}[I_i]}.$$
+
+This is of course $\sum_i S_i$, as we want (and, a similar calculation could be done for allocation counts as well).
+This is a fairly general strategy; note that while we require that sampling decisions be independent of one another's outcomes, they don't have to be independent of previous allocations, total bytes allocated, etc. You can imagine strategies that:
+
+ - Sample allocations at program startup at a higher rate than subsequent allocations
+ - Sample even-indexed allocations more frequently than odd-indexed ones (so long as no allocation has zero sampling probability)
+ - Let threads declare themselves as high-sampling-priority, and sample their allocations at an increased rate.
+
+These can all be fit into this framework to give an unbiased estimator.
+
+## Evaluating sampling strategies
+Not all strategies for picking allocations to sample are equally good, of course. Among unbiased estimators, the lower the variance, the lower the mean squared error. Using the estimator above, the variance is:
+
+$$
+\begin{aligned}
+& \mathrm{Var}[\sum_i S_i I_i \frac{1}{\mathrm{E}[I_i]}] \\
+=& \sum_i \mathrm{Var}[S_i I_i \frac{1}{\mathrm{E}[I_i]}] \\
+=& \sum_i \frac{S_i^2}{\mathrm{E}[I_i]^2} \mathrm{Var}[I_i] \\
+=& \sum_i \frac{S_i^2}{\mathrm{E}[I_i]^2} \mathrm{Var}[I_i] \\
+=& \sum_i \frac{S_i^2}{\mathrm{E}[I_i]^2} \mathrm{E}[I_i](1 - \mathrm{E}[I_i]) \\
+=& \sum_i S_i^2 \frac{1 - \mathrm{E}[I_i]}{\mathrm{E}[I_i]}.
+\end{aligned}
+$$
+
+We can use this formula to compare various strategy choices. All else being equal, lower-variance strategies are better.
+
+## Possible sampling strategies
+Because of the desire to avoid the fast-path costs, we'd like to use our Bernoulli trick if possible. There are two obvious counters to use: a coinflip per allocation, and a coinflip per byte allocated.
+
+### Bernoulli sampling per-allocation
+An obvious strategy is to pick some large $N$, and give each allocation a $1/N$ chance of being sampled. This would let us use our Bernoulli-via-Geometric trick. Using the formula from above, we can compute the variance as:
+
+$$ \sum_i S_i^2 \frac{1 - \frac{1}{N}}{\frac{1}{N}} = (N-1) \sum_i S_i^2.$$
+
+That is, an allocation of size $Z$ contributes a term of $(N-1)Z^2$ to the variance.
+
+### Bernoulli sampling per-byte
+Another option we have is to pick some rate $R$, and give each byte a $1/R$ chance of being picked for sampling (at which point we would sample its contained allocation). The chance of an allocation of size $Z$ being sampled, then, is
+
+$$1-(1-\frac{1}{R})^{Z}$$
+
+and an allocation of size $Z$ contributes a term of
+
+$$Z^2 \frac{(1-\frac{1}{R})^{Z}}{1-(1-\frac{1}{R})^{Z}}.$$
+
+In practical settings, $R$ is large, and so this is well-approximated by
+
+$$Z^2 \frac{e^{-Z/R}}{1 - e^{-Z/R}} .$$
+
+Just to get a sense of the dynamics here, let's look at the behavior for various values of $Z$. When $Z$ is small relative to $R$, we can use $e^z \approx 1 + x$, and conclude that the variance contributed by a small-$Z$ allocation is around
+
+$$Z^2 \frac{1-Z/R}{Z/R} \approx RZ.$$
+
+When $Z$ is comparable to $R$, the variance term is near $Z^2$ (we have $\frac{e^{-Z/R}}{1 - e^{-Z/R}} = 1$ when $Z/R = \ln 2 \approx 0.693$). When $Z$ is large relative to $R$, the variance term goes to zero.
+
+## Picking a sampling strategy
+The fast-path/slow-path dynamics of allocation patterns point us towards the per-byte sampling approach:
+
+ - The quadratic increase in variance per allocation in the first approach is quite costly when heaps have a non-negligible portion of their bytes in those allocations, which is practically often the case.
+ - The Bernoulli-per-byte approach shifts more of its samples towards large allocations, which are already a slow-path.
+ - We drive several tickers (e.g. tcache gc) by bytes allocated, and report bytes-allocated as a user-visible statistic, so we have to do all the necessary bookkeeping anyways.
+
+Indeed, this is the approach we use in jemalloc. Our heap dumps record the size of the allocation and the sampling rate $R$, and jeprof unbiases by dividing by $1 - e^{-Z/R}$. The framework above would suggest dividing by $1-(1-1/R)^Z$; instead, we use the fact that $R$ is large in practical situations, and so $e^{-Z/R}$ is a good approximation (and faster to compute). (Equivalently, we may also see this as the factor that falls out from viewing sampling as a Poisson process directly).
+
+## Consequences for heap dump consumers
+Using this approach means that there are a few things users need to be aware of.
+
+### Stack counts are not proportional to allocation frequencies
+If one stack appears twice as often as another, this by itself does not imply that it allocates twice as often. Consider the case in which there are only two types of allocating call stacks in a program. Stack A allocates 8 bytes, and occurs a million times in a program. Stack B allocates 8 MB, and occurs just once in a program. If our sampling rate $R$ is about 1MB, we expect stack A to show up about 8 times, and stack B to show up once. Stack A isn't 8 times more frequent than stack B, though; it's a million times more frequent.
+
+### Aggregation must be done after unbiasing samples
+Some tools manually parse heap dump output, and aggregate across stacks (or across program runs) to provide wider-scale data analyses. When doing this aggregation, though, it's important to unbias-and-then-sum, rather than sum-and-then-unbias. Reusing our example from the previous section: suppose we collect heap dumps of the program from a million machines. We then have 8 million occurs of stack A (each of 8 bytes), and a million occurrences of stack B (each of 8 MB). If we sum first, we'll attribute 64 MB to stack A, and 8 TB to stack B. Unbiasing changes these numbers by an infinitesimal amount, so that sum-then-unbias dramatically underreports the amount of memory allocated by stack A.
+
+## An avenue for future exploration
+While the framework we laid out above is pretty general, as an engineering decision we're only interested in fairly simple approaches (i.e. ones for which the chance of an allocation being sampled depends only on its size). Our job is then: for each size class $Z$, pick a probability $p_Z$ that an allocation of that size will be sampled. We made some handwave-y references to statistical distributions to justify our choices, but there's no reason we need to pick them that way. Any set of non-zero probabilities is a valid choice.
+The real limiting factor in our ability to reduce estimator variance is that fact that sampling is expensive; we want to make sure we only do it on a small fraction of allocations. Our goal, then, is to pick the $p_Z$ to minimize variance given some maximum sampling rate $P$. If we define $a_Z$ to be the fraction of allocations of size $Z$, and $l_Z$ to be the fraction of allocations of size $Z$ still alive at the time of a heap dump, then we can phrase this as an optimization problem over the choices of $p_Z$:
+
+Minimize
+
+$$ \sum_Z Z^2 l_Z \frac{1-p_Z}{p_Z} $$
+
+subject to
+
+$$ \sum_Z a_Z p_Z \leq P $$
+
+Ignoring a term that doesn't depend on $p_Z$, the objective is minimized whenever
+
+$$ \sum_Z Z^2 l_Z \frac{1}{p_Z} $$
+
+is. For a particular program, $l_Z$ and $a_Z$ are just numbers that can be obtained (exactly) from existing stats introspection facilities, and we have a fairly tractable convex optimization problem (it can be framed as a second-order cone program). It would be interesting to evaluate, for various common allocation patterns, how well our current strategy adapts. Do our actual choices for $p_Z$ closely correspond to the optimal ones? How close is the variance of our choices to the variance of the optimal strategy?
+You can imagine an implementation that actually goes all the way, and makes $p_Z$ selections a tuning parameter. I don't think this is a good use of development time for the foreseeable future; but I do wonder about the answers to some of these questions.
+
+## Implementation realities
+
+The nice story above is at least partially a lie. Initially, jeprof (copying its logic from pprof) had the sum-then-unbias error described above. The current version of jemalloc does the unbiasing step on a per-allocation basis internally, so that we're always tracking what the unbiased numbers "should" be. The problem is, actually surfacing those unbiased numbers would require a breaking change to jeprof (and the various already-deployed tools that have copied its logic). Instead, we use a little bit more trickery. Since we know at dump time the numbers we want jeprof to report, we simply choose the values we'll output so that the jeprof numbers will match the true numbers. The math is described in `src/prof_data.c` (where the only cleverness is a change of variables that lets the exponentials fall out).
+
+This has the effect of making the output of jeprof (and related tools) correct, while making its inputs incorrect. This can be annoying to human readers of raw profiling dump output.
diff --git a/contrib/jemalloc/doc_internal/jemalloc.svg b/contrib/jemalloc/doc_internal/jemalloc.svg
new file mode 100644
index 000000000000..5e77327e66b1
--- /dev/null
+++ b/contrib/jemalloc/doc_internal/jemalloc.svg
@@ -0,0 +1 @@
+<svg id="Layer_3" data-name="Layer 3" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 499 184.27"><defs><style>.cls-1,.cls-3{fill:none;}.cls-2{clip-path:url(#clip-path);}.cls-3{stroke:#262262;stroke-linecap:round;stroke-linejoin:round;stroke-width:4px;}</style><clipPath id="clip-path" transform="translate(-100.66 -259.87)"><path class="cls-1" d="M144.57,396c0,18.2-9.37,27.83-37.33,23.55V400.1c11.11,2.14,12.18-.27,12.18-11.5V324.11h25Zm-12.71-78.66c-9,0-15.52-1.48-15.52-12.71S122.9,292,131.86,292s15.52,1.2,15.52,12.58C147.38,315.55,141,317.29,131.86,317.29Zm50.57,76.39c-30.64,0-35.85-18.86-35.85-35.59s5.61-35.32,35.72-35.32c35.32,0,33.44,28,33.44,40.67H170.12c.54,9.5,4,14.05,11.37,14.05,6.83,0,9.64-3.34,10-7.89l24.75.13C215.48,383.38,205.84,393.68,182.43,393.68Zm-1.47-55c-6.69,0-10,2.81-10.84,12h21.41C190.73,341.9,188.18,338.69,181,338.69Zm112.78,53.65V351.4c0-4.15-1.33-8.16-6-8.16-5,0-6,3.75-6,8.16v40.94H256.42V351.4c0-4.15-.81-8.16-5.89-8.16s-6.29,3.75-6.29,8.16v40.94H219.09V324.11h14l4.15,8c2.67-4.69,10.56-9.37,18.86-9.37,7.36,0,16.19,2.14,21,9.1,3.48-5.22,11.11-9.1,20.21-9.1,19.13,0,21.54,11.37,21.54,27.16v42.41Zm83.09,0L372.41,383c-5.48,7.22-13.11,10.7-24.35,10.7-14.85,0-26.75-6-26.75-19.93,0-15.26,12.44-20.88,44.28-23,0-9.5-1.61-12.57-8.83-12.57-6.69,0-8.56,3.48-8.56,9.9H323.45c0-12.85,6.82-25.29,32.64-25.29,30,0,34.65,14.45,34.65,31.17v38.4Zm-21.54-28.63c-6.29.94-8.3,4.28-8.3,7.36,0,4.28,2.41,6.69,8.3,6.69s10.17-4.82,10.17-15.12ZM396,392.34V297.75h24.75v94.59Zm30.77,0V297.75h24.75v94.59Zm62.21,1.34c-28.09,0-34.11-18.6-34.11-35.32s6.29-35.59,34.38-35.59c27.7,0,34.12,19,34.12,35.59C523.33,375.22,516.91,393.68,488.94,393.68Zm.27-50.84c-7.89,0-11.37,4.82-11.37,15.52s3.61,15.39,11.1,15.39c7.9,0,11.38-4.42,11.38-15.39C500.32,347.79,497.24,342.84,489.21,342.84Zm69.17,50.84c-28.9,0-34.52-18.6-34.52-35.32s5.76-35.59,34.12-35.59c21.14,0,34.52,10.84,34.52,31.17H568.42c0-9.23-5.49-11.23-10.17-11.23-7,0-11.11,4.54-11.11,15.38s4,15.52,11.11,15.52c4.81,0,10-2.41,10-10.57H592.5C592.5,383.38,579,393.68,558.38,393.68Z"/></clipPath></defs><title>jemalloc Final Logo</title><g class="cls-2"><line class="cls-3" x1="345" y1="182.27" x2="345" y2="2"/><line class="cls-3" x1="225" y1="182.27" x2="225" y2="2"/><line class="cls-3" x1="105" y1="182.27" x2="105" y2="2"/><line class="cls-3" x1="43" y1="182.27" x2="43" y2="2"/><line class="cls-3" x1="475" y1="182.27" x2="475" y2="2"/><line class="cls-3" x1="195" y1="182.27" x2="195" y2="2"/><line class="cls-3" x1="75" y1="182.27" x2="75" y2="2"/><line class="cls-3" x1="337" y1="182.27" x2="337" y2="2"/><line class="cls-3" x1="215" y1="182.27" x2="215" y2="2"/><line class="cls-3" x1="95" y1="182.27" x2="95" y2="2"/><line class="cls-3" x1="415" y1="182.27" x2="415" y2="2"/><line class="cls-3" x1="385" y1="182.27" x2="385" y2="2"/><line class="cls-3" x1="183" y1="182.27" x2="183" y2="2"/><line class="cls-3" x1="65" y1="182.27" x2="65" y2="2"/><line class="cls-3" x1="173" y1="182.27" x2="173" y2="2"/><line class="cls-3" x1="145" y1="182.27" x2="145" y2="2"/><line class="cls-3" x1="163" y1="182.27" x2="163" y2="2"/><line class="cls-3" x1="460" y1="182.27" x2="460" y2="2"/><line class="cls-3" x1="281" y1="182.27" x2="281" y2="2"/><line class="cls-3" x1="313" y1="182.27" x2="313" y2="2"/><line class="cls-3" x1="252" y1="182.27" x2="252" y2="2"/><line class="cls-3" x1="450" y1="182.27" x2="450" y2="2"/><line class="cls-3" x1="271" y1="182.27" x2="271" y2="2"/><line class="cls-3" x1="332" y1="182.27" x2="332" y2="2"/><line class="cls-3" x1="203" y1="182.27" x2="203" y2="2"/><line class="cls-3" x1="13" y1="182.27" x2="13" y2="2"/><line class="cls-3" x1="373" y1="182.27" x2="373" y2="2"/><line class="cls-3" x1="354" y1="182.27" x2="354" y2="2"/><line class="cls-3" x1="235" y1="182.27" x2="235" y2="2"/><line class="cls-3" x1="115" y1="182.27" x2="115" y2="2"/><line class="cls-3" x1="53" y1="182.27" x2="53" y2="2"/><line class="cls-3" x1="484" y1="182.27" x2="484" y2="2"/><line class="cls-3" x1="405" y1="182.27" x2="405" y2="2"/><line class="cls-3" x1="85" y1="182.27" x2="85" y2="2"/><line class="cls-3" x1="225" y1="182.27" x2="225" y2="2"/><line class="cls-3" x1="105" y1="182.27" x2="105" y2="2"/><line class="cls-3" x1="43" y1="182.27" x2="43" y2="2"/><line class="cls-3" x1="435" y1="182.27" x2="435" y2="2"/><line class="cls-3" x1="123" y1="182.27" x2="123" y2="2"/><line class="cls-3" x1="75" y1="182.27" x2="75" y2="2"/><line class="cls-3" x1="183" y1="182.27" x2="183" y2="2"/><line class="cls-3" x1="155" y1="182.27" x2="155" y2="2"/><line class="cls-3" x1="173" y1="182.27" x2="173" y2="2"/><line class="cls-3" x1="145" y1="182.27" x2="145" y2="2"/><line class="cls-3" x1="470" y1="182.27" x2="470" y2="2"/><line class="cls-3" x1="292" y1="182.27" x2="292" y2="2"/><line class="cls-3" x1="262" y1="182.27" x2="262" y2="2"/><line class="cls-3" x1="460" y1="182.27" x2="460" y2="2"/><line class="cls-3" x1="281" y1="182.27" x2="281" y2="2"/><line class="cls-3" x1="313" y1="182.27" x2="313" y2="2"/><line class="cls-3" x1="243" y1="182.27" x2="243" y2="2"/><line class="cls-3" x1="22" y1="182.27" x2="22" y2="2"/><line class="cls-3" x1="383" y1="182.27" x2="383" y2="2"/><line class="cls-3" x1="5" y1="182.27" x2="5" y2="2"/><line class="cls-3" x1="133" y1="182.27" x2="133" y2="2"/><line class="cls-3" x1="362" y1="182.27" x2="362" y2="2"/><line class="cls-3" x1="288" y1="182.27" x2="288" y2="2"/><line class="cls-3" x1="298" y1="182.27" x2="298" y2="2"/><line class="cls-3" x1="423" y1="182.27" x2="423" y2="2"/><line class="cls-3" x1="369" y1="182.27" x2="369" y2="2"/><line class="cls-3" x1="490" y1="182.27" x2="490" y2="2"/><line class="cls-3" x1="2" y1="182.27" x2="2" y2="2"/><line class="cls-3" x1="493" y1="182.27" x2="493" y2="2"/><line class="cls-3" x1="225" y1="182.27" x2="225" y2="2"/><line class="cls-3" x1="105" y1="182.27" x2="105" y2="2"/><line class="cls-3" x1="43" y1="182.27" x2="43" y2="2"/><line class="cls-3" x1="475" y1="182.27" x2="475" y2="2"/><line class="cls-3" x1="195" y1="182.27" x2="195" y2="2"/><line class="cls-3" x1="75" y1="182.27" x2="75" y2="2"/><line class="cls-3" x1="337" y1="182.27" x2="337" y2="2"/><line class="cls-3" x1="215" y1="182.27" x2="215" y2="2"/><line class="cls-3" x1="95" y1="182.27" x2="95" y2="2"/><line class="cls-3" x1="415" y1="182.27" x2="415" y2="2"/><line class="cls-3" x1="385" y1="182.27" x2="385" y2="2"/><line class="cls-3" x1="183" y1="182.27" x2="183" y2="2"/><line class="cls-3" x1="65" y1="182.27" x2="65" y2="2"/><line class="cls-3" x1="173" y1="182.27" x2="173" y2="2"/><line class="cls-3" x1="145" y1="182.27" x2="145" y2="2"/><line class="cls-3" x1="163" y1="182.27" x2="163" y2="2"/><line class="cls-3" x1="460" y1="182.27" x2="460" y2="2"/><line class="cls-3" x1="281" y1="182.27" x2="281" y2="2"/><line class="cls-3" x1="313" y1="182.27" x2="313" y2="2"/><line class="cls-3" x1="252" y1="182.27" x2="252" y2="2"/><line class="cls-3" x1="450" y1="182.27" x2="450" y2="2"/><line class="cls-3" x1="271" y1="182.27" x2="271" y2="2"/><line class="cls-3" x1="306" y1="182.27" x2="306" y2="2"/><line class="cls-3" x1="203" y1="182.27" x2="203" y2="2"/><line class="cls-3" x1="13" y1="182.27" x2="13" y2="2"/><line class="cls-3" x1="373" y1="182.27" x2="373" y2="2"/><line class="cls-3" x1="354" y1="182.27" x2="354" y2="2"/><line class="cls-3" x1="235" y1="182.27" x2="235" y2="2"/><line class="cls-3" x1="115" y1="182.27" x2="115" y2="2"/><line class="cls-3" x1="53" y1="182.27" x2="53" y2="2"/><line class="cls-3" x1="484" y1="182.27" x2="484" y2="2"/><line class="cls-3" x1="405" y1="182.27" x2="405" y2="2"/><line class="cls-3" x1="85" y1="182.27" x2="85" y2="2"/><line class="cls-3" x1="225" y1="182.27" x2="225" y2="2"/><line class="cls-3" x1="105" y1="182.27" x2="105" y2="2"/><line class="cls-3" x1="43" y1="182.27" x2="43" y2="2"/><line class="cls-3" x1="435" y1="182.27" x2="435" y2="2"/><line class="cls-3" x1="123" y1="182.27" x2="123" y2="2"/><line class="cls-3" x1="75" y1="182.27" x2="75" y2="2"/><line class="cls-3" x1="183" y1="182.27" x2="183" y2="2"/><line class="cls-3" x1="155" y1="182.27" x2="155" y2="2"/><line class="cls-3" x1="173" y1="182.27" x2="173" y2="2"/><line class="cls-3" x1="145" y1="182.27" x2="145" y2="2"/><line class="cls-3" x1="470" y1="182.27" x2="470" y2="2"/><line class="cls-3" x1="292" y1="182.27" x2="292" y2="2"/><line class="cls-3" x1="262" y1="182.27" x2="262" y2="2"/><line class="cls-3" x1="460" y1="182.27" x2="460" y2="2"/><line class="cls-3" x1="281" y1="182.27" x2="281" y2="2"/><line class="cls-3" x1="328" y1="182.27" x2="328" y2="2"/><line class="cls-3" x1="243" y1="182.27" x2="243" y2="2"/><line class="cls-3" x1="22" y1="182.27" x2="22" y2="2"/><line class="cls-3" x1="383" y1="182.27" x2="383" y2="2"/><line class="cls-3" x1="5" y1="182.27" x2="5" y2="2"/><line class="cls-3" x1="32" y1="182.27" x2="32" y2="2"/><line class="cls-3" x1="133" y1="182.27" x2="133" y2="2"/><line class="cls-3" x1="362" y1="182.27" x2="362" y2="2"/><line class="cls-3" x1="288" y1="182.27" x2="288" y2="2"/><line class="cls-3" x1="298" y1="182.27" x2="298" y2="2"/><line class="cls-3" x1="423" y1="182.27" x2="423" y2="2"/><line class="cls-3" x1="369" y1="182.27" x2="369" y2="2"/><line class="cls-3" x1="490" y1="182.27" x2="490" y2="2"/><line class="cls-3" x1="2" y1="182.27" x2="2" y2="2"/><line class="cls-3" x1="493" y1="182.27" x2="493" y2="2"/><line class="cls-3" x1="349" y1="182.27" x2="349" y2="2"/><line class="cls-3" x1="229" y1="182.27" x2="229" y2="2"/><line class="cls-3" x1="109" y1="182.27" x2="109" y2="2"/><line class="cls-3" x1="47" y1="182.27" x2="47" y2="2"/><line class="cls-3" x1="479" y1="182.27" x2="479" y2="2"/><line class="cls-3" x1="399" y1="182.27" x2="399" y2="2"/><line class="cls-3" x1="199" y1="182.27" x2="199" y2="2"/><line class="cls-3" x1="79" y1="182.27" x2="79" y2="2"/><line class="cls-3" x1="341" y1="182.27" x2="341" y2="2"/><line class="cls-3" x1="219" y1="182.27" x2="219" y2="2"/><line class="cls-3" x1="99" y1="182.27" x2="99" y2="2"/><line class="cls-3" x1="41" y1="182.27" x2="41" y2="2"/><line class="cls-3" x1="419" y1="182.27" x2="419" y2="2"/><line class="cls-3" x1="389" y1="182.27" x2="389" y2="2"/><line class="cls-3" x1="187" y1="182.27" x2="187" y2="2"/><line class="cls-3" x1="69" y1="182.27" x2="69" y2="2"/><line class="cls-3" x1="177" y1="182.27" x2="177" y2="2"/><line class="cls-3" x1="149" y1="182.27" x2="149" y2="2"/><line class="cls-3" x1="464" y1="182.27" x2="464" y2="2"/><line class="cls-3" x1="285" y1="182.27" x2="285" y2="2"/><line class="cls-3" x1="317" y1="182.27" x2="317" y2="2"/><line class="cls-3" x1="454" y1="182.27" x2="454" y2="2"/><line class="cls-3" x1="275" y1="182.27" x2="275" y2="2"/><line class="cls-3" x1="308" y1="182.27" x2="308" y2="2"/><line class="cls-3" x1="207" y1="182.27" x2="207" y2="2"/><line class="cls-3" x1="17" y1="182.27" x2="17" y2="2"/><line class="cls-3" x1="377" y1="182.27" x2="377" y2="2"/><line class="cls-3" x1="358" y1="182.27" x2="358" y2="2"/><line class="cls-3" x1="238" y1="182.27" x2="238" y2="2"/><line class="cls-3" x1="119" y1="182.27" x2="119" y2="2"/><line class="cls-3" x1="488" y1="182.27" x2="488" y2="2"/><line class="cls-3" x1="409" y1="182.27" x2="409" y2="2"/><line class="cls-3" x1="229" y1="182.27" x2="229" y2="2"/><line class="cls-3" x1="109" y1="182.27" x2="109" y2="2"/><line class="cls-3" x1="47" y1="182.27" x2="47" y2="2"/><line class="cls-3" x1="439" y1="182.27" x2="439" y2="2"/><line class="cls-3" x1="399" y1="182.27" x2="399" y2="2"/><line class="cls-3" x1="127" y1="182.27" x2="127" y2="2"/><line class="cls-3" x1="79" y1="182.27" x2="79" y2="2"/><line class="cls-3" x1="187" y1="182.27" x2="187" y2="2"/><line class="cls-3" x1="159" y1="182.27" x2="159" y2="2"/><line class="cls-3" x1="177" y1="182.27" x2="177" y2="2"/><line class="cls-3" x1="149" y1="182.27" x2="149" y2="2"/><line class="cls-3" x1="474" y1="182.27" x2="474" y2="2"/><line class="cls-3" x1="266" y1="182.27" x2="266" y2="2"/><line class="cls-3" x1="464" y1="182.27" x2="464" y2="2"/><line class="cls-3" x1="285" y1="182.27" x2="285" y2="2"/><line class="cls-3" x1="317" y1="182.27" x2="317" y2="2"/><line class="cls-3" x1="247" y1="182.27" x2="247" y2="2"/><line class="cls-3" x1="26" y1="182.27" x2="26" y2="2"/><line class="cls-3" x1="387" y1="182.27" x2="387" y2="2"/><line class="cls-3" x1="9" y1="182.27" x2="9" y2="2"/><line class="cls-3" x1="137" y1="182.27" x2="137" y2="2"/><line class="cls-3" x1="292" y1="182.27" x2="292" y2="2"/><line class="cls-3" x1="373" y1="182.27" x2="373" y2="2"/><line class="cls-3" x1="56" y1="182.27" x2="56" y2="2"/><line class="cls-3" x1="494" y1="182.27" x2="494" y2="2"/><line class="cls-3" x1="497" y1="182.27" x2="497" y2="2"/><line class="cls-3" x1="349" y1="182.27" x2="349" y2="2"/><line class="cls-3" x1="229" y1="182.27" x2="229" y2="2"/><line class="cls-3" x1="109" y1="182.27" x2="109" y2="2"/><line class="cls-3" x1="47" y1="182.27" x2="47" y2="2"/><line class="cls-3" x1="479" y1="182.27" x2="479" y2="2"/><line class="cls-3" x1="399" y1="182.27" x2="399" y2="2"/><line class="cls-3" x1="199" y1="182.27" x2="199" y2="2"/><line class="cls-3" x1="79" y1="182.27" x2="79" y2="2"/><line class="cls-3" x1="341" y1="182.27" x2="341" y2="2"/><line class="cls-3" x1="219" y1="182.27" x2="219" y2="2"/><line class="cls-3" x1="99" y1="182.27" x2="99" y2="2"/><line class="cls-3" x1="41" y1="182.27" x2="41" y2="2"/><line class="cls-3" x1="419" y1="182.27" x2="419" y2="2"/><line class="cls-3" x1="389" y1="182.27" x2="389" y2="2"/><line class="cls-3" x1="187" y1="182.27" x2="187" y2="2"/><line class="cls-3" x1="69" y1="182.27" x2="69" y2="2"/><line class="cls-3" x1="177" y1="182.27" x2="177" y2="2"/><line class="cls-3" x1="149" y1="182.27" x2="149" y2="2"/><line class="cls-3" x1="141" y1="182.27" x2="141" y2="2"/><line class="cls-3" x1="464" y1="182.27" x2="464" y2="2"/><line class="cls-3" x1="285" y1="182.27" x2="285" y2="2"/><line class="cls-3" x1="317" y1="182.27" x2="317" y2="2"/><line class="cls-3" x1="454" y1="182.27" x2="454" y2="2"/><line class="cls-3" x1="275" y1="182.27" x2="275" y2="2"/><line class="cls-3" x1="308" y1="182.27" x2="308" y2="2"/><line class="cls-3" x1="207" y1="182.27" x2="207" y2="2"/><line class="cls-3" x1="17" y1="182.27" x2="17" y2="2"/><line class="cls-3" x1="377" y1="182.27" x2="377" y2="2"/><line class="cls-3" x1="119" y1="182.27" x2="119" y2="2"/><line class="cls-3" x1="488" y1="182.27" x2="488" y2="2"/><line class="cls-3" x1="409" y1="182.27" x2="409" y2="2"/><line class="cls-3" x1="229" y1="182.27" x2="229" y2="2"/><line class="cls-3" x1="109" y1="182.27" x2="109" y2="2"/><line class="cls-3" x1="47" y1="182.27" x2="47" y2="2"/><line class="cls-3" x1="439" y1="182.27" x2="439" y2="2"/><line class="cls-3" x1="399" y1="182.27" x2="399" y2="2"/><line class="cls-3" x1="127" y1="182.27" x2="127" y2="2"/><line class="cls-3" x1="79" y1="182.27" x2="79" y2="2"/><line class="cls-3" x1="187" y1="182.27" x2="187" y2="2"/><line class="cls-3" x1="159" y1="182.27" x2="159" y2="2"/><line class="cls-3" x1="177" y1="182.27" x2="177" y2="2"/><line class="cls-3" x1="149" y1="182.27" x2="149" y2="2"/><line class="cls-3" x1="474" y1="182.27" x2="474" y2="2"/><line class="cls-3" x1="295" y1="182.27" x2="295" y2="2"/><line class="cls-3" x1="266" y1="182.27" x2="266" y2="2"/><line class="cls-3" x1="464" y1="182.27" x2="464" y2="2"/><line class="cls-3" x1="285" y1="182.27" x2="285" y2="2"/><line class="cls-3" x1="317" y1="182.27" x2="317" y2="2"/><line class="cls-3" x1="247" y1="182.27" x2="247" y2="2"/><line class="cls-3" x1="58" y1="182.27" x2="58" y2="2"/><line class="cls-3" x1="387" y1="182.27" x2="387" y2="2"/><line class="cls-3" x1="9" y1="182.27" x2="9" y2="2"/><line class="cls-3" x1="292" y1="182.27" x2="292" y2="2"/><line class="cls-3" x1="301" y1="182.27" x2="301" y2="2"/><line class="cls-3" x1="428" y1="182.27" x2="428" y2="2"/><line class="cls-3" x1="373" y1="182.27" x2="373" y2="2"/><line class="cls-3" x1="56" y1="182.27" x2="56" y2="2"/><line class="cls-3" x1="494" y1="182.27" x2="494" y2="2"/><line class="cls-3" x1="497" y1="182.27" x2="497" y2="2"/></g></svg> \ No newline at end of file
diff --git a/contrib/jemalloc/include/jemalloc/internal/activity_callback.h b/contrib/jemalloc/include/jemalloc/internal/activity_callback.h
new file mode 100644
index 000000000000..6c2e84e3180d
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/activity_callback.h
@@ -0,0 +1,23 @@
+#ifndef JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H
+#define JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H
+
+/*
+ * The callback to be executed "periodically", in response to some amount of
+ * allocator activity.
+ *
+ * This callback need not be computing any sort of peak (although that's the
+ * intended first use case), but we drive it from the peak counter, so it's
+ * keeps things tidy to keep it here.
+ *
+ * The calls to this thunk get driven by the peak_event module.
+ */
+#define ACTIVITY_CALLBACK_THUNK_INITIALIZER {NULL, NULL}
+typedef void (*activity_callback_t)(void *uctx, uint64_t allocated,
+ uint64_t deallocated);
+typedef struct activity_callback_thunk_s activity_callback_thunk_t;
+struct activity_callback_thunk_s {
+ activity_callback_t callback;
+ void *uctx;
+};
+
+#endif /* JEMALLOC_INTERNAL_ACTIVITY_CALLBACK_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/arena_externs.h b/contrib/jemalloc/include/jemalloc/internal/arena_externs.h
index a4523ae0c494..e6fceaafea50 100644
--- a/contrib/jemalloc/include/jemalloc/internal/arena_externs.h
+++ b/contrib/jemalloc/include/jemalloc/internal/arena_externs.h
@@ -2,59 +2,67 @@
#define JEMALLOC_INTERNAL_ARENA_EXTERNS_H
#include "jemalloc/internal/bin.h"
+#include "jemalloc/internal/div.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/hook.h"
#include "jemalloc/internal/pages.h"
#include "jemalloc/internal/stats.h"
+/*
+ * When the amount of pages to be purged exceeds this amount, deferred purge
+ * should happen.
+ */
+#define ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD UINT64_C(1024)
+
extern ssize_t opt_dirty_decay_ms;
extern ssize_t opt_muzzy_decay_ms;
extern percpu_arena_mode_t opt_percpu_arena;
extern const char *percpu_arena_mode_names[];
-extern const uint64_t h_steps[SMOOTHSTEP_NSTEPS];
+extern div_info_t arena_binind_div_info[SC_NBINS];
+
extern malloc_mutex_t arenas_lock;
+extern emap_t arena_emap_global;
extern size_t opt_oversize_threshold;
extern size_t oversize_threshold;
+/*
+ * arena_bin_offsets[binind] is the offset of the first bin shard for size class
+ * binind.
+ */
+extern uint32_t arena_bin_offsets[SC_NBINS];
+
void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms,
ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy);
void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
- bin_stats_t *bstats, arena_stats_large_t *lstats,
- arena_stats_extents_t *estats);
-void arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent);
-#ifdef JEMALLOC_JET
-size_t arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr);
-#endif
-extent_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
- size_t usize, size_t alignment, bool *zero);
+ bin_stats_data_t *bstats, arena_stats_large_t *lstats,
+ pac_estats_t *estats, hpa_shard_stats_t *hpastats, sec_stats_t *secstats);
+void arena_handle_deferred_work(tsdn_t *tsdn, arena_t *arena);
+edata_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
+ size_t usize, size_t alignment, bool zero);
void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena,
- extent_t *extent);
+ edata_t *edata);
void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
- extent_t *extent, size_t oldsize);
+ edata_t *edata, size_t oldsize);
void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
- extent_t *extent, size_t oldsize);
-ssize_t arena_dirty_decay_ms_get(arena_t *arena);
-bool arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms);
-ssize_t arena_muzzy_decay_ms_get(arena_t *arena);
-bool arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_ms);
+ edata_t *edata, size_t oldsize);
+bool arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, extent_state_t state,
+ ssize_t decay_ms);
+ssize_t arena_decay_ms_get(arena_t *arena, extent_state_t state);
void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
bool all);
+uint64_t arena_time_until_deferred(tsdn_t *tsdn, arena_t *arena);
+void arena_do_deferred_work(tsdn_t *tsdn, arena_t *arena);
void arena_reset(tsd_t *tsd, arena_t *arena);
void arena_destroy(tsd_t *tsd, arena_t *arena);
-void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
- cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes);
-void arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info,
- bool zero);
-
-typedef void (arena_dalloc_junk_small_t)(void *, const bin_info_t *);
-extern arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small;
+void arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena,
+ cache_bin_t *cache_bin, cache_bin_info_t *cache_bin_info, szind_t binind,
+ const unsigned nfill);
void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
szind_t ind, bool zero);
@@ -63,8 +71,12 @@ void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
void arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize);
void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
bool slow_path);
-void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
- szind_t binind, extent_t *extent, void *ptr);
+void arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab);
+
+void arena_dalloc_bin_locked_handle_newly_empty(tsdn_t *tsdn, arena_t *arena,
+ edata_t *slab, bin_t *bin);
+void arena_dalloc_bin_locked_handle_newly_nonempty(tsdn_t *tsdn, arena_t *arena,
+ edata_t *slab, bin_t *bin);
void arena_dalloc_small(tsdn_t *tsdn, void *ptr);
bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
size_t extra, bool zero, size_t *newsize);
@@ -72,6 +84,9 @@ void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
size_t size, size_t alignment, bool zero, tcache_t *tcache,
hook_ralloc_args_t *hook_args);
dss_prec_t arena_dss_prec_get(arena_t *arena);
+ehooks_t *arena_get_ehooks(arena_t *arena);
+extent_hooks_t *arena_set_extent_hooks(tsd_t *tsd, arena_t *arena,
+ extent_hooks_t *extent_hooks);
bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
ssize_t arena_dirty_decay_ms_default_get(void);
bool arena_dirty_decay_ms_default_set(ssize_t decay_ms);
@@ -82,14 +97,15 @@ bool arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena,
unsigned arena_nthreads_get(arena_t *arena, bool internal);
void arena_nthreads_inc(arena_t *arena, bool internal);
void arena_nthreads_dec(arena_t *arena, bool internal);
-size_t arena_extent_sn_next(arena_t *arena);
-arena_t *arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
+arena_t *arena_new(tsdn_t *tsdn, unsigned ind, const arena_config_t *config);
bool arena_init_huge(void);
bool arena_is_huge(unsigned arena_ind);
arena_t *arena_choose_huge(tsd_t *tsd);
-bin_t *arena_bin_choose_lock(tsdn_t *tsdn, arena_t *arena, szind_t binind,
+bin_t *arena_bin_choose(tsdn_t *tsdn, arena_t *arena, szind_t binind,
unsigned *binshard);
-void arena_boot(sc_data_t *sc_data);
+size_t arena_fill_small_fresh(tsdn_t *tsdn, arena_t *arena, szind_t binind,
+ void **ptrs, size_t nfill, bool zero);
+bool arena_boot(sc_data_t *sc_data, base_t *base, bool hpa);
void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
void arena_prefork2(tsdn_t *tsdn, arena_t *arena);
@@ -98,6 +114,7 @@ void arena_prefork4(tsdn_t *tsdn, arena_t *arena);
void arena_prefork5(tsdn_t *tsdn, arena_t *arena);
void arena_prefork6(tsdn_t *tsdn, arena_t *arena);
void arena_prefork7(tsdn_t *tsdn, arena_t *arena);
+void arena_prefork8(tsdn_t *tsdn, arena_t *arena);
void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
void arena_postfork_child(tsdn_t *tsdn, arena_t *arena);
diff --git a/contrib/jemalloc/include/jemalloc/internal/arena_inlines_a.h b/contrib/jemalloc/include/jemalloc/internal/arena_inlines_a.h
index 9abf7f6ac702..8568358c7fb6 100644
--- a/contrib/jemalloc/include/jemalloc/internal/arena_inlines_a.h
+++ b/contrib/jemalloc/include/jemalloc/internal/arena_inlines_a.h
@@ -3,7 +3,7 @@
static inline unsigned
arena_ind_get(const arena_t *arena) {
- return base_ind_get(arena->base);
+ return arena->ind;
}
static inline void
@@ -21,37 +21,4 @@ arena_internal_get(arena_t *arena) {
return atomic_load_zu(&arena->stats.internal, ATOMIC_RELAXED);
}
-static inline bool
-arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes) {
- cassert(config_prof);
-
- if (likely(prof_interval == 0 || !prof_active_get_unlocked())) {
- return false;
- }
-
- return prof_accum_add(tsdn, &arena->prof_accum, accumbytes);
-}
-
-static inline void
-percpu_arena_update(tsd_t *tsd, unsigned cpu) {
- assert(have_percpu_arena);
- arena_t *oldarena = tsd_arena_get(tsd);
- assert(oldarena != NULL);
- unsigned oldind = arena_ind_get(oldarena);
-
- if (oldind != cpu) {
- unsigned newind = cpu;
- arena_t *newarena = arena_get(tsd_tsdn(tsd), newind, true);
- assert(newarena != NULL);
-
- /* Set new arena/tcache associations. */
- arena_migrate(tsd, oldind, newind);
- tcache_t *tcache = tcache_get(tsd);
- if (tcache != NULL) {
- tcache_arena_reassociate(tsd_tsdn(tsd), tcache,
- newarena);
- }
- }
-}
-
#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_A_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/arena_inlines_b.h b/contrib/jemalloc/include/jemalloc/internal/arena_inlines_b.h
index dd926575fc8a..fa81537c469d 100644
--- a/contrib/jemalloc/include/jemalloc/internal/arena_inlines_b.h
+++ b/contrib/jemalloc/include/jemalloc/internal/arena_inlines_b.h
@@ -1,16 +1,20 @@
#ifndef JEMALLOC_INTERNAL_ARENA_INLINES_B_H
#define JEMALLOC_INTERNAL_ARENA_INLINES_B_H
+#include "jemalloc/internal/div.h"
+#include "jemalloc/internal/emap.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/safety_check.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/ticker.h"
-JEMALLOC_ALWAYS_INLINE bool
-arena_has_default_hooks(arena_t *arena) {
- return (extent_hooks_get(arena) == &extent_hooks_default);
+static inline arena_t *
+arena_get_from_edata(edata_t *edata) {
+ return (arena_t *)atomic_load_p(&arenas[edata_arena_ind_get(edata)],
+ ATOMIC_RELAXED);
}
JEMALLOC_ALWAYS_INLINE arena_t *
@@ -34,127 +38,109 @@ arena_choose_maybe_huge(tsd_t *tsd, arena_t *arena, size_t size) {
return arena_choose(tsd, NULL);
}
-JEMALLOC_ALWAYS_INLINE prof_tctx_t *
-arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
+JEMALLOC_ALWAYS_INLINE void
+arena_prof_info_get(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx,
+ prof_info_t *prof_info, bool reset_recent) {
cassert(config_prof);
assert(ptr != NULL);
+ assert(prof_info != NULL);
+
+ edata_t *edata = NULL;
+ bool is_slab;
/* Static check. */
if (alloc_ctx == NULL) {
- const extent_t *extent = iealloc(tsdn, ptr);
- if (unlikely(!extent_slab_get(extent))) {
- return large_prof_tctx_get(tsdn, extent);
- }
+ edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
+ ptr);
+ is_slab = edata_slab_get(edata);
+ } else if (unlikely(!(is_slab = alloc_ctx->slab))) {
+ edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
+ ptr);
+ }
+
+ if (unlikely(!is_slab)) {
+ /* edata must have been initialized at this point. */
+ assert(edata != NULL);
+ large_prof_info_get(tsd, edata, prof_info, reset_recent);
} else {
- if (unlikely(!alloc_ctx->slab)) {
- return large_prof_tctx_get(tsdn, iealloc(tsdn, ptr));
- }
+ prof_info->alloc_tctx = (prof_tctx_t *)(uintptr_t)1U;
+ /*
+ * No need to set other fields in prof_info; they will never be
+ * accessed if (uintptr_t)alloc_tctx == (uintptr_t)1U.
+ */
}
- return (prof_tctx_t *)(uintptr_t)1U;
}
JEMALLOC_ALWAYS_INLINE void
-arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
- alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
+arena_prof_tctx_reset(tsd_t *tsd, const void *ptr,
+ emap_alloc_ctx_t *alloc_ctx) {
cassert(config_prof);
assert(ptr != NULL);
/* Static check. */
if (alloc_ctx == NULL) {
- extent_t *extent = iealloc(tsdn, ptr);
- if (unlikely(!extent_slab_get(extent))) {
- large_prof_tctx_set(tsdn, extent, tctx);
+ edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd),
+ &arena_emap_global, ptr);
+ if (unlikely(!edata_slab_get(edata))) {
+ large_prof_tctx_reset(edata);
}
} else {
if (unlikely(!alloc_ctx->slab)) {
- large_prof_tctx_set(tsdn, iealloc(tsdn, ptr), tctx);
+ edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd),
+ &arena_emap_global, ptr);
+ large_prof_tctx_reset(edata);
}
}
}
-static inline void
-arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
+JEMALLOC_ALWAYS_INLINE void
+arena_prof_tctx_reset_sampled(tsd_t *tsd, const void *ptr) {
cassert(config_prof);
assert(ptr != NULL);
- extent_t *extent = iealloc(tsdn, ptr);
- assert(!extent_slab_get(extent));
+ edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
+ ptr);
+ assert(!edata_slab_get(edata));
- large_prof_tctx_reset(tsdn, extent);
-}
-
-JEMALLOC_ALWAYS_INLINE nstime_t
-arena_prof_alloc_time_get(tsdn_t *tsdn, const void *ptr,
- alloc_ctx_t *alloc_ctx) {
- cassert(config_prof);
- assert(ptr != NULL);
-
- extent_t *extent = iealloc(tsdn, ptr);
- /*
- * Unlike arena_prof_prof_tctx_{get, set}, we only call this once we're
- * sure we have a sampled allocation.
- */
- assert(!extent_slab_get(extent));
- return large_prof_alloc_time_get(extent);
+ large_prof_tctx_reset(edata);
}
JEMALLOC_ALWAYS_INLINE void
-arena_prof_alloc_time_set(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx,
- nstime_t t) {
+arena_prof_info_set(tsd_t *tsd, edata_t *edata, prof_tctx_t *tctx,
+ size_t size) {
cassert(config_prof);
- assert(ptr != NULL);
- extent_t *extent = iealloc(tsdn, ptr);
- assert(!extent_slab_get(extent));
- large_prof_alloc_time_set(extent, t);
+ assert(!edata_slab_get(edata));
+ large_prof_info_set(edata, tctx, size);
}
JEMALLOC_ALWAYS_INLINE void
arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks) {
- tsd_t *tsd;
- ticker_t *decay_ticker;
-
if (unlikely(tsdn_null(tsdn))) {
return;
}
- tsd = tsdn_tsd(tsdn);
- decay_ticker = decay_ticker_get(tsd, arena_ind_get(arena));
- if (unlikely(decay_ticker == NULL)) {
- return;
- }
- if (unlikely(ticker_ticks(decay_ticker, nticks))) {
+ tsd_t *tsd = tsdn_tsd(tsdn);
+ /*
+ * We use the ticker_geom_t to avoid having per-arena state in the tsd.
+ * Instead of having a countdown-until-decay timer running for every
+ * arena in every thread, we flip a coin once per tick, whose
+ * probability of coming up heads is 1/nticks; this is effectively the
+ * operation of the ticker_geom_t. Each arena has the same chance of a
+ * coinflip coming up heads (1/ARENA_DECAY_NTICKS_PER_UPDATE), so we can
+ * use a single ticker for all of them.
+ */
+ ticker_geom_t *decay_ticker = tsd_arena_decay_tickerp_get(tsd);
+ uint64_t *prng_state = tsd_prng_statep_get(tsd);
+ if (unlikely(ticker_geom_ticks(decay_ticker, prng_state, nticks))) {
arena_decay(tsdn, arena, false, false);
}
}
JEMALLOC_ALWAYS_INLINE void
arena_decay_tick(tsdn_t *tsdn, arena_t *arena) {
- malloc_mutex_assert_not_owner(tsdn, &arena->decay_dirty.mtx);
- malloc_mutex_assert_not_owner(tsdn, &arena->decay_muzzy.mtx);
-
arena_decay_ticks(tsdn, arena, 1);
}
-/* Purge a single extent to retained / unmapped directly. */
-JEMALLOC_ALWAYS_INLINE void
-arena_decay_extent(tsdn_t *tsdn,arena_t *arena, extent_hooks_t **r_extent_hooks,
- extent_t *extent) {
- size_t extent_size = extent_size_get(extent);
- extent_dalloc_wrapper(tsdn, arena,
- r_extent_hooks, extent);
- if (config_stats) {
- /* Update stats accordingly. */
- arena_stats_lock(tsdn, &arena->stats);
- arena_stats_add_u64(tsdn, &arena->stats,
- &arena->decay_dirty.stats->nmadvise, 1);
- arena_stats_add_u64(tsdn, &arena->stats,
- &arena->decay_dirty.stats->purged, extent_size >> LG_PAGE);
- arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped,
- extent_size);
- arena_stats_unlock(tsdn, &arena->stats);
- }
-}
-
JEMALLOC_ALWAYS_INLINE void *
arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
tcache_t *tcache, bool slow_path) {
@@ -178,21 +164,19 @@ arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
JEMALLOC_ALWAYS_INLINE arena_t *
arena_aalloc(tsdn_t *tsdn, const void *ptr) {
- return extent_arena_get(iealloc(tsdn, ptr));
+ edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
+ unsigned arena_ind = edata_arena_ind_get(edata);
+ return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_RELAXED);
}
JEMALLOC_ALWAYS_INLINE size_t
arena_salloc(tsdn_t *tsdn, const void *ptr) {
assert(ptr != NULL);
+ emap_alloc_ctx_t alloc_ctx;
+ emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, &alloc_ctx);
+ assert(alloc_ctx.szind != SC_NSIZES);
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
-
- szind_t szind = rtree_szind_read(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true);
- assert(szind != SC_NSIZES);
-
- return sz_index2size(szind);
+ return sz_index2size(alloc_ctx.szind);
}
JEMALLOC_ALWAYS_INLINE size_t
@@ -206,26 +190,53 @@ arena_vsalloc(tsdn_t *tsdn, const void *ptr) {
* failure.
*/
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
-
- extent_t *extent;
- szind_t szind;
- if (rtree_extent_szind_read(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, false, &extent, &szind)) {
+ emap_full_alloc_ctx_t full_alloc_ctx;
+ bool missing = emap_full_alloc_ctx_try_lookup(tsdn, &arena_emap_global,
+ ptr, &full_alloc_ctx);
+ if (missing) {
return 0;
}
- if (extent == NULL) {
+ if (full_alloc_ctx.edata == NULL) {
return 0;
}
- assert(extent_state_get(extent) == extent_state_active);
+ assert(edata_state_get(full_alloc_ctx.edata) == extent_state_active);
/* Only slab members should be looked up via interior pointers. */
- assert(extent_addr_get(extent) == ptr || extent_slab_get(extent));
+ assert(edata_addr_get(full_alloc_ctx.edata) == ptr
+ || edata_slab_get(full_alloc_ctx.edata));
+
+ assert(full_alloc_ctx.szind != SC_NSIZES);
+
+ return sz_index2size(full_alloc_ctx.szind);
+}
- assert(szind != SC_NSIZES);
+JEMALLOC_ALWAYS_INLINE bool
+large_dalloc_safety_checks(edata_t *edata, void *ptr, szind_t szind) {
+ if (!config_opt_safety_checks) {
+ return false;
+ }
+
+ /*
+ * Eagerly detect double free and sized dealloc bugs for large sizes.
+ * The cost is low enough (as edata will be accessed anyway) to be
+ * enabled all the time.
+ */
+ if (unlikely(edata == NULL ||
+ edata_state_get(edata) != extent_state_active)) {
+ safety_check_fail("Invalid deallocation detected: "
+ "pages being freed (%p) not currently active, "
+ "possibly caused by double free bugs.",
+ (uintptr_t)edata_addr_get(edata));
+ return true;
+ }
+ size_t input_size = sz_index2size(szind);
+ if (unlikely(input_size != edata_usize_get(edata))) {
+ safety_check_fail_sized_dealloc(/* current_dealloc */ true, ptr,
+ /* true_size */ edata_usize_get(edata), input_size);
+ return true;
+ }
- return sz_index2size(szind);
+ return false;
}
static inline void
@@ -233,8 +244,13 @@ arena_dalloc_large_no_tcache(tsdn_t *tsdn, void *ptr, szind_t szind) {
if (config_prof && unlikely(szind < SC_NBINS)) {
arena_dalloc_promoted(tsdn, ptr, NULL, true);
} else {
- extent_t *extent = iealloc(tsdn, ptr);
- large_dalloc(tsdn, extent);
+ edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
+ ptr);
+ if (large_dalloc_safety_checks(edata, ptr, szind)) {
+ /* See the comment in isfree. */
+ return;
+ }
+ large_dalloc(tsdn, edata);
}
}
@@ -242,27 +258,22 @@ static inline void
arena_dalloc_no_tcache(tsdn_t *tsdn, void *ptr) {
assert(ptr != NULL);
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
-
- szind_t szind;
- bool slab;
- rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
- true, &szind, &slab);
+ emap_alloc_ctx_t alloc_ctx;
+ emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr, &alloc_ctx);
if (config_debug) {
- extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
- rtree_ctx, (uintptr_t)ptr, true);
- assert(szind == extent_szind_get(extent));
- assert(szind < SC_NSIZES);
- assert(slab == extent_slab_get(extent));
+ edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
+ ptr);
+ assert(alloc_ctx.szind == edata_szind_get(edata));
+ assert(alloc_ctx.szind < SC_NSIZES);
+ assert(alloc_ctx.slab == edata_slab_get(edata));
}
- if (likely(slab)) {
+ if (likely(alloc_ctx.slab)) {
/* Small allocation. */
arena_dalloc_small(tsdn, ptr);
} else {
- arena_dalloc_large_no_tcache(tsdn, ptr, szind);
+ arena_dalloc_large_no_tcache(tsdn, ptr, alloc_ctx.szind);
}
}
@@ -277,14 +288,19 @@ arena_dalloc_large(tsdn_t *tsdn, void *ptr, tcache_t *tcache, szind_t szind,
slow_path);
}
} else {
- extent_t *extent = iealloc(tsdn, ptr);
- large_dalloc(tsdn, extent);
+ edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
+ ptr);
+ if (large_dalloc_safety_checks(edata, ptr, szind)) {
+ /* See the comment in isfree. */
+ return;
+ }
+ large_dalloc(tsdn, edata);
}
}
JEMALLOC_ALWAYS_INLINE void
arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
- alloc_ctx_t *alloc_ctx, bool slow_path) {
+ emap_alloc_ctx_t *caller_alloc_ctx, bool slow_path) {
assert(!tsdn_null(tsdn) || tcache == NULL);
assert(ptr != NULL);
@@ -293,34 +309,30 @@ arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
return;
}
- szind_t szind;
- bool slab;
- rtree_ctx_t *rtree_ctx;
- if (alloc_ctx != NULL) {
- szind = alloc_ctx->szind;
- slab = alloc_ctx->slab;
- assert(szind != SC_NSIZES);
+ emap_alloc_ctx_t alloc_ctx;
+ if (caller_alloc_ctx != NULL) {
+ alloc_ctx = *caller_alloc_ctx;
} else {
- rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
- rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true, &szind, &slab);
+ util_assume(!tsdn_null(tsdn));
+ emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr,
+ &alloc_ctx);
}
if (config_debug) {
- rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
- extent_t *extent = rtree_extent_read(tsdn, &extents_rtree,
- rtree_ctx, (uintptr_t)ptr, true);
- assert(szind == extent_szind_get(extent));
- assert(szind < SC_NSIZES);
- assert(slab == extent_slab_get(extent));
+ edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
+ ptr);
+ assert(alloc_ctx.szind == edata_szind_get(edata));
+ assert(alloc_ctx.szind < SC_NSIZES);
+ assert(alloc_ctx.slab == edata_slab_get(edata));
}
- if (likely(slab)) {
+ if (likely(alloc_ctx.slab)) {
/* Small allocation. */
- tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
- slow_path);
+ tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
+ alloc_ctx.szind, slow_path);
} else {
- arena_dalloc_large(tsdn, ptr, tcache, szind, slow_path);
+ arena_dalloc_large(tsdn, ptr, tcache, alloc_ctx.szind,
+ slow_path);
}
}
@@ -329,47 +341,43 @@ arena_sdalloc_no_tcache(tsdn_t *tsdn, void *ptr, size_t size) {
assert(ptr != NULL);
assert(size <= SC_LARGE_MAXCLASS);
- szind_t szind;
- bool slab;
+ emap_alloc_ctx_t alloc_ctx;
if (!config_prof || !opt_prof) {
/*
* There is no risk of being confused by a promoted sampled
* object, so base szind and slab on the given size.
*/
- szind = sz_size2index(size);
- slab = (szind < SC_NBINS);
+ alloc_ctx.szind = sz_size2index(size);
+ alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
}
if ((config_prof && opt_prof) || config_debug) {
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
- &rtree_ctx_fallback);
-
- rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true, &szind, &slab);
+ emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr,
+ &alloc_ctx);
- assert(szind == sz_size2index(size));
- assert((config_prof && opt_prof) || slab == (szind < SC_NBINS));
+ assert(alloc_ctx.szind == sz_size2index(size));
+ assert((config_prof && opt_prof)
+ || alloc_ctx.slab == (alloc_ctx.szind < SC_NBINS));
if (config_debug) {
- extent_t *extent = rtree_extent_read(tsdn,
- &extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
- assert(szind == extent_szind_get(extent));
- assert(slab == extent_slab_get(extent));
+ edata_t *edata = emap_edata_lookup(tsdn,
+ &arena_emap_global, ptr);
+ assert(alloc_ctx.szind == edata_szind_get(edata));
+ assert(alloc_ctx.slab == edata_slab_get(edata));
}
}
- if (likely(slab)) {
+ if (likely(alloc_ctx.slab)) {
/* Small allocation. */
arena_dalloc_small(tsdn, ptr);
} else {
- arena_dalloc_large_no_tcache(tsdn, ptr, szind);
+ arena_dalloc_large_no_tcache(tsdn, ptr, alloc_ctx.szind);
}
}
JEMALLOC_ALWAYS_INLINE void
arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
- alloc_ctx_t *alloc_ctx, bool slow_path) {
+ emap_alloc_ctx_t *caller_alloc_ctx, bool slow_path) {
assert(!tsdn_null(tsdn) || tcache == NULL);
assert(ptr != NULL);
assert(size <= SC_LARGE_MAXCLASS);
@@ -379,49 +387,164 @@ arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
return;
}
- szind_t szind;
- bool slab;
- alloc_ctx_t local_ctx;
+ emap_alloc_ctx_t alloc_ctx;
if (config_prof && opt_prof) {
- if (alloc_ctx == NULL) {
+ if (caller_alloc_ctx == NULL) {
/* Uncommon case and should be a static check. */
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
- &rtree_ctx_fallback);
- rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true, &local_ctx.szind,
- &local_ctx.slab);
- assert(local_ctx.szind == sz_size2index(size));
- alloc_ctx = &local_ctx;
+ emap_alloc_ctx_lookup(tsdn, &arena_emap_global, ptr,
+ &alloc_ctx);
+ assert(alloc_ctx.szind == sz_size2index(size));
+ } else {
+ alloc_ctx = *caller_alloc_ctx;
}
- slab = alloc_ctx->slab;
- szind = alloc_ctx->szind;
} else {
/*
* There is no risk of being confused by a promoted sampled
* object, so base szind and slab on the given size.
*/
- szind = sz_size2index(size);
- slab = (szind < SC_NBINS);
+ alloc_ctx.szind = sz_size2index(size);
+ alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
}
if (config_debug) {
- rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsdn_tsd(tsdn));
- rtree_szind_slab_read(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true, &szind, &slab);
- extent_t *extent = rtree_extent_read(tsdn,
- &extents_rtree, rtree_ctx, (uintptr_t)ptr, true);
- assert(szind == extent_szind_get(extent));
- assert(slab == extent_slab_get(extent));
+ edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global,
+ ptr);
+ assert(alloc_ctx.szind == edata_szind_get(edata));
+ assert(alloc_ctx.slab == edata_slab_get(edata));
}
- if (likely(slab)) {
+ if (likely(alloc_ctx.slab)) {
/* Small allocation. */
- tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr, szind,
- slow_path);
+ tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
+ alloc_ctx.szind, slow_path);
} else {
- arena_dalloc_large(tsdn, ptr, tcache, szind, slow_path);
+ arena_dalloc_large(tsdn, ptr, tcache, alloc_ctx.szind,
+ slow_path);
+ }
+}
+
+static inline void
+arena_cache_oblivious_randomize(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
+ size_t alignment) {
+ assert(edata_base_get(edata) == edata_addr_get(edata));
+
+ if (alignment < PAGE) {
+ unsigned lg_range = LG_PAGE -
+ lg_floor(CACHELINE_CEILING(alignment));
+ size_t r;
+ if (!tsdn_null(tsdn)) {
+ tsd_t *tsd = tsdn_tsd(tsdn);
+ r = (size_t)prng_lg_range_u64(
+ tsd_prng_statep_get(tsd), lg_range);
+ } else {
+ uint64_t stack_value = (uint64_t)(uintptr_t)&r;
+ r = (size_t)prng_lg_range_u64(&stack_value, lg_range);
+ }
+ uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE -
+ lg_range);
+ edata->e_addr = (void *)((uintptr_t)edata->e_addr +
+ random_offset);
+ assert(ALIGNMENT_ADDR2BASE(edata->e_addr, alignment) ==
+ edata->e_addr);
+ }
+}
+
+/*
+ * The dalloc bin info contains just the information that the common paths need
+ * during tcache flushes. By force-inlining these paths, and using local copies
+ * of data (so that the compiler knows it's constant), we avoid a whole bunch of
+ * redundant loads and stores by leaving this information in registers.
+ */
+typedef struct arena_dalloc_bin_locked_info_s arena_dalloc_bin_locked_info_t;
+struct arena_dalloc_bin_locked_info_s {
+ div_info_t div_info;
+ uint32_t nregs;
+ uint64_t ndalloc;
+};
+
+JEMALLOC_ALWAYS_INLINE size_t
+arena_slab_regind(arena_dalloc_bin_locked_info_t *info, szind_t binind,
+ edata_t *slab, const void *ptr) {
+ size_t diff, regind;
+
+ /* Freeing a pointer outside the slab can cause assertion failure. */
+ assert((uintptr_t)ptr >= (uintptr_t)edata_addr_get(slab));
+ assert((uintptr_t)ptr < (uintptr_t)edata_past_get(slab));
+ /* Freeing an interior pointer can cause assertion failure. */
+ assert(((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab)) %
+ (uintptr_t)bin_infos[binind].reg_size == 0);
+
+ diff = (size_t)((uintptr_t)ptr - (uintptr_t)edata_addr_get(slab));
+
+ /* Avoid doing division with a variable divisor. */
+ regind = div_compute(&info->div_info, diff);
+
+ assert(regind < bin_infos[binind].nregs);
+
+ return regind;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_dalloc_bin_locked_begin(arena_dalloc_bin_locked_info_t *info,
+ szind_t binind) {
+ info->div_info = arena_binind_div_info[binind];
+ info->nregs = bin_infos[binind].nregs;
+ info->ndalloc = 0;
+}
+
+/*
+ * Does the deallocation work associated with freeing a single pointer (a
+ * "step") in between a arena_dalloc_bin_locked begin and end call.
+ *
+ * Returns true if arena_slab_dalloc must be called on slab. Doesn't do
+ * stats updates, which happen during finish (this lets running counts get left
+ * in a register).
+ */
+JEMALLOC_ALWAYS_INLINE bool
+arena_dalloc_bin_locked_step(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
+ arena_dalloc_bin_locked_info_t *info, szind_t binind, edata_t *slab,
+ void *ptr) {
+ const bin_info_t *bin_info = &bin_infos[binind];
+ size_t regind = arena_slab_regind(info, binind, slab, ptr);
+ slab_data_t *slab_data = edata_slab_data_get(slab);
+
+ assert(edata_nfree_get(slab) < bin_info->nregs);
+ /* Freeing an unallocated pointer can cause assertion failure. */
+ assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
+
+ bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
+ edata_nfree_inc(slab);
+
+ if (config_stats) {
+ info->ndalloc++;
+ }
+
+ unsigned nfree = edata_nfree_get(slab);
+ if (nfree == bin_info->nregs) {
+ arena_dalloc_bin_locked_handle_newly_empty(tsdn, arena, slab,
+ bin);
+ return true;
+ } else if (nfree == 1 && slab != bin->slabcur) {
+ arena_dalloc_bin_locked_handle_newly_nonempty(tsdn, arena, slab,
+ bin);
}
+ return false;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+arena_dalloc_bin_locked_finish(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
+ arena_dalloc_bin_locked_info_t *info) {
+ if (config_stats) {
+ bin->stats.ndalloc += info->ndalloc;
+ assert(bin->stats.curregs >= (size_t)info->ndalloc);
+ bin->stats.curregs -= (size_t)info->ndalloc;
+ }
+}
+
+static inline bin_t *
+arena_get_bin(arena_t *arena, szind_t binind, unsigned binshard) {
+ bin_t *shard0 = (bin_t *)((uintptr_t)arena + arena_bin_offsets[binind]);
+ return shard0 + binshard;
}
#endif /* JEMALLOC_INTERNAL_ARENA_INLINES_B_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/arena_stats.h b/contrib/jemalloc/include/jemalloc/internal/arena_stats.h
index 23949ed92616..15f1d345f9e8 100644
--- a/contrib/jemalloc/include/jemalloc/internal/arena_stats.h
+++ b/contrib/jemalloc/include/jemalloc/internal/arena_stats.h
@@ -2,77 +2,41 @@
#define JEMALLOC_INTERNAL_ARENA_STATS_H
#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/lockedint.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_prof.h"
+#include "jemalloc/internal/pa.h"
#include "jemalloc/internal/sc.h"
JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS
-/*
- * In those architectures that support 64-bit atomics, we use atomic updates for
- * our 64-bit values. Otherwise, we use a plain uint64_t and synchronize
- * externally.
- */
-#ifdef JEMALLOC_ATOMIC_U64
-typedef atomic_u64_t arena_stats_u64_t;
-#else
-/* Must hold the arena stats mutex while reading atomically. */
-typedef uint64_t arena_stats_u64_t;
-#endif
-
typedef struct arena_stats_large_s arena_stats_large_t;
struct arena_stats_large_s {
/*
* Total number of allocation/deallocation requests served directly by
* the arena.
*/
- arena_stats_u64_t nmalloc;
- arena_stats_u64_t ndalloc;
+ locked_u64_t nmalloc;
+ locked_u64_t ndalloc;
/*
* Number of allocation requests that correspond to this size class.
* This includes requests served by tcache, though tcache only
* periodically merges into this counter.
*/
- arena_stats_u64_t nrequests; /* Partially derived. */
+ locked_u64_t nrequests; /* Partially derived. */
/*
* Number of tcache fills / flushes for large (similarly, periodically
* merged). Note that there is no large tcache batch-fill currently
* (i.e. only fill 1 at a time); however flush may be batched.
*/
- arena_stats_u64_t nfills; /* Partially derived. */
- arena_stats_u64_t nflushes; /* Partially derived. */
+ locked_u64_t nfills; /* Partially derived. */
+ locked_u64_t nflushes; /* Partially derived. */
/* Current number of allocations of this size class. */
size_t curlextents; /* Derived. */
};
-typedef struct arena_stats_decay_s arena_stats_decay_t;
-struct arena_stats_decay_s {
- /* Total number of purge sweeps. */
- arena_stats_u64_t npurge;
- /* Total number of madvise calls made. */
- arena_stats_u64_t nmadvise;
- /* Total number of pages purged. */
- arena_stats_u64_t purged;
-};
-
-typedef struct arena_stats_extents_s arena_stats_extents_t;
-struct arena_stats_extents_s {
- /*
- * Stats for a given index in the range [0, SC_NPSIZES] in an extents_t.
- * We track both bytes and # of extents: two extents in the same bucket
- * may have different sizes if adjacent size classes differ by more than
- * a page, so bytes cannot always be derived from # of extents.
- */
- atomic_zu_t ndirty;
- atomic_zu_t dirty_bytes;
- atomic_zu_t nmuzzy;
- atomic_zu_t muzzy_bytes;
- atomic_zu_t nretained;
- atomic_zu_t retained_bytes;
-};
-
/*
* Arena stats. Note that fields marked "derived" are not directly maintained
* within the arena code; rather their values are derived during stats merge
@@ -80,43 +44,36 @@ struct arena_stats_extents_s {
*/
typedef struct arena_stats_s arena_stats_t;
struct arena_stats_s {
-#ifndef JEMALLOC_ATOMIC_U64
- malloc_mutex_t mtx;
-#endif
-
- /* Number of bytes currently mapped, excluding retained memory. */
- atomic_zu_t mapped; /* Partially derived. */
+ LOCKEDINT_MTX_DECLARE(mtx)
/*
- * Number of unused virtual memory bytes currently retained. Retained
- * bytes are technically mapped (though always decommitted or purged),
- * but they are excluded from the mapped statistic (above).
+ * resident includes the base stats -- that's why it lives here and not
+ * in pa_shard_stats_t.
*/
- atomic_zu_t retained; /* Derived. */
-
- /* Number of extent_t structs allocated by base, but not being used. */
- atomic_zu_t extent_avail;
-
- arena_stats_decay_t decay_dirty;
- arena_stats_decay_t decay_muzzy;
+ size_t base; /* Derived. */
+ size_t resident; /* Derived. */
+ size_t metadata_thp; /* Derived. */
+ size_t mapped; /* Derived. */
- atomic_zu_t base; /* Derived. */
atomic_zu_t internal;
- atomic_zu_t resident; /* Derived. */
- atomic_zu_t metadata_thp;
- atomic_zu_t allocated_large; /* Derived. */
- arena_stats_u64_t nmalloc_large; /* Derived. */
- arena_stats_u64_t ndalloc_large; /* Derived. */
- arena_stats_u64_t nfills_large; /* Derived. */
- arena_stats_u64_t nflushes_large; /* Derived. */
- arena_stats_u64_t nrequests_large; /* Derived. */
+ size_t allocated_large; /* Derived. */
+ uint64_t nmalloc_large; /* Derived. */
+ uint64_t ndalloc_large; /* Derived. */
+ uint64_t nfills_large; /* Derived. */
+ uint64_t nflushes_large; /* Derived. */
+ uint64_t nrequests_large; /* Derived. */
- /* VM space had to be leaked (undocumented). Normally 0. */
- atomic_zu_t abandoned_vm;
+ /*
+ * The stats logically owned by the pa_shard in the same arena. This
+ * lives here only because it's convenient for the purposes of the ctl
+ * module -- it only knows about the single arena_stats.
+ */
+ pa_shard_stats_t pa_shard_stats;
/* Number of bytes cached in tcache associated with this arena. */
- atomic_zu_t tcache_bytes; /* Derived. */
+ size_t tcache_bytes; /* Derived. */
+ size_t tcache_stashed_bytes; /* Derived. */
mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes];
@@ -134,138 +91,24 @@ arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) {
assert(((char *)arena_stats)[i] == 0);
}
}
-#ifndef JEMALLOC_ATOMIC_U64
- if (malloc_mutex_init(&arena_stats->mtx, "arena_stats",
+ if (LOCKEDINT_MTX_INIT(arena_stats->mtx, "arena_stats",
WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) {
return true;
}
-#endif
/* Memory is zeroed, so there is no need to clear stats. */
return false;
}
static inline void
-arena_stats_lock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
-#ifndef JEMALLOC_ATOMIC_U64
- malloc_mutex_lock(tsdn, &arena_stats->mtx);
-#endif
-}
-
-static inline void
-arena_stats_unlock(tsdn_t *tsdn, arena_stats_t *arena_stats) {
-#ifndef JEMALLOC_ATOMIC_U64
- malloc_mutex_unlock(tsdn, &arena_stats->mtx);
-#endif
-}
-
-static inline uint64_t
-arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
- arena_stats_u64_t *p) {
-#ifdef JEMALLOC_ATOMIC_U64
- return atomic_load_u64(p, ATOMIC_RELAXED);
-#else
- malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
- return *p;
-#endif
-}
-
-static inline void
-arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
- arena_stats_u64_t *p, uint64_t x) {
-#ifdef JEMALLOC_ATOMIC_U64
- atomic_fetch_add_u64(p, x, ATOMIC_RELAXED);
-#else
- malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
- *p += x;
-#endif
-}
-
-static inline void
-arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats,
- arena_stats_u64_t *p, uint64_t x) {
-#ifdef JEMALLOC_ATOMIC_U64
- uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED);
- assert(r - x <= r);
-#else
- malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
- *p -= x;
- assert(*p + x >= *p);
-#endif
-}
-
-/*
- * Non-atomically sets *dst += src. *dst needs external synchronization.
- * This lets us avoid the cost of a fetch_add when its unnecessary (note that
- * the types here are atomic).
- */
-static inline void
-arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) {
-#ifdef JEMALLOC_ATOMIC_U64
- uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
- atomic_store_u64(dst, src + cur_dst, ATOMIC_RELAXED);
-#else
- *dst += src;
-#endif
-}
-
-static inline size_t
-arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats,
- atomic_zu_t *p) {
-#ifdef JEMALLOC_ATOMIC_U64
- return atomic_load_zu(p, ATOMIC_RELAXED);
-#else
- malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
- return atomic_load_zu(p, ATOMIC_RELAXED);
-#endif
-}
-
-static inline void
-arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats,
- atomic_zu_t *p, size_t x) {
-#ifdef JEMALLOC_ATOMIC_U64
- atomic_fetch_add_zu(p, x, ATOMIC_RELAXED);
-#else
- malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
- size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
- atomic_store_zu(p, cur + x, ATOMIC_RELAXED);
-#endif
-}
-
-static inline void
-arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats,
- atomic_zu_t *p, size_t x) {
-#ifdef JEMALLOC_ATOMIC_U64
- size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED);
- assert(r - x <= r);
-#else
- malloc_mutex_assert_owner(tsdn, &arena_stats->mtx);
- size_t cur = atomic_load_zu(p, ATOMIC_RELAXED);
- atomic_store_zu(p, cur - x, ATOMIC_RELAXED);
-#endif
-}
-
-/* Like the _u64 variant, needs an externally synchronized *dst. */
-static inline void
-arena_stats_accum_zu(atomic_zu_t *dst, size_t src) {
- size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
- atomic_store_zu(dst, src + cur_dst, ATOMIC_RELAXED);
-}
-
-static inline void
arena_stats_large_flush_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats,
szind_t szind, uint64_t nrequests) {
- arena_stats_lock(tsdn, arena_stats);
+ LOCKEDINT_MTX_LOCK(tsdn, arena_stats->mtx);
arena_stats_large_t *lstats = &arena_stats->lstats[szind - SC_NBINS];
- arena_stats_add_u64(tsdn, arena_stats, &lstats->nrequests, nrequests);
- arena_stats_add_u64(tsdn, arena_stats, &lstats->nflushes, 1);
- arena_stats_unlock(tsdn, arena_stats);
-}
-
-static inline void
-arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) {
- arena_stats_lock(tsdn, arena_stats);
- arena_stats_add_zu(tsdn, arena_stats, &arena_stats->mapped, size);
- arena_stats_unlock(tsdn, arena_stats);
+ locked_inc_u64(tsdn, LOCKEDINT_MTX(arena_stats->mtx),
+ &lstats->nrequests, nrequests);
+ locked_inc_u64(tsdn, LOCKEDINT_MTX(arena_stats->mtx),
+ &lstats->nflushes, 1);
+ LOCKEDINT_MTX_UNLOCK(tsdn, arena_stats->mtx);
}
#endif /* JEMALLOC_INTERNAL_ARENA_STATS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/arena_structs.h b/contrib/jemalloc/include/jemalloc/internal/arena_structs.h
new file mode 100644
index 000000000000..e2a5a4087bcc
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/arena_structs.h
@@ -0,0 +1,101 @@
+#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_H
+#define JEMALLOC_INTERNAL_ARENA_STRUCTS_H
+
+#include "jemalloc/internal/arena_stats.h"
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/bin.h"
+#include "jemalloc/internal/bitmap.h"
+#include "jemalloc/internal/counter.h"
+#include "jemalloc/internal/ecache.h"
+#include "jemalloc/internal/edata_cache.h"
+#include "jemalloc/internal/extent_dss.h"
+#include "jemalloc/internal/jemalloc_internal_types.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/nstime.h"
+#include "jemalloc/internal/pa.h"
+#include "jemalloc/internal/ql.h"
+#include "jemalloc/internal/sc.h"
+#include "jemalloc/internal/ticker.h"
+
+struct arena_s {
+ /*
+ * Number of threads currently assigned to this arena. Each thread has
+ * two distinct assignments, one for application-serving allocation, and
+ * the other for internal metadata allocation. Internal metadata must
+ * not be allocated from arenas explicitly created via the arenas.create
+ * mallctl, because the arena.<i>.reset mallctl indiscriminately
+ * discards all allocations for the affected arena.
+ *
+ * 0: Application allocation.
+ * 1: Internal metadata allocation.
+ *
+ * Synchronization: atomic.
+ */
+ atomic_u_t nthreads[2];
+
+ /* Next bin shard for binding new threads. Synchronization: atomic. */
+ atomic_u_t binshard_next;
+
+ /*
+ * When percpu_arena is enabled, to amortize the cost of reading /
+ * updating the current CPU id, track the most recent thread accessing
+ * this arena, and only read CPU if there is a mismatch.
+ */
+ tsdn_t *last_thd;
+
+ /* Synchronization: internal. */
+ arena_stats_t stats;
+
+ /*
+ * Lists of tcaches and cache_bin_array_descriptors for extant threads
+ * associated with this arena. Stats from these are merged
+ * incrementally, and at exit if opt_stats_print is enabled.
+ *
+ * Synchronization: tcache_ql_mtx.
+ */
+ ql_head(tcache_slow_t) tcache_ql;
+ ql_head(cache_bin_array_descriptor_t) cache_bin_array_descriptor_ql;
+ malloc_mutex_t tcache_ql_mtx;
+
+ /*
+ * Represents a dss_prec_t, but atomically.
+ *
+ * Synchronization: atomic.
+ */
+ atomic_u_t dss_prec;
+
+ /*
+ * Extant large allocations.
+ *
+ * Synchronization: large_mtx.
+ */
+ edata_list_active_t large;
+ /* Synchronizes all large allocation/update/deallocation. */
+ malloc_mutex_t large_mtx;
+
+ /* The page-level allocator shard this arena uses. */
+ pa_shard_t pa_shard;
+
+ /*
+ * A cached copy of base->ind. This can get accessed on hot paths;
+ * looking it up in base requires an extra pointer hop / cache miss.
+ */
+ unsigned ind;
+
+ /*
+ * Base allocator, from which arena metadata are allocated.
+ *
+ * Synchronization: internal.
+ */
+ base_t *base;
+ /* Used to determine uptime. Read-only after initialization. */
+ nstime_t create_time;
+
+ /*
+ * The arena is allocated alongside its bins; really this is a
+ * dynamically sized array determined by the binshard settings.
+ */
+ bin_t bins[0];
+};
+
+#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/arena_structs_a.h b/contrib/jemalloc/include/jemalloc/internal/arena_structs_a.h
deleted file mode 100644
index 46aa77c884b7..000000000000
--- a/contrib/jemalloc/include/jemalloc/internal/arena_structs_a.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H
-#define JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H
-
-#include "jemalloc/internal/bitmap.h"
-
-struct arena_slab_data_s {
- /* Per region allocated/deallocated bitmap. */
- bitmap_t bitmap[BITMAP_GROUPS_MAX];
-};
-
-#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_A_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/arena_structs_b.h b/contrib/jemalloc/include/jemalloc/internal/arena_structs_b.h
deleted file mode 100644
index eeab57fd6e5c..000000000000
--- a/contrib/jemalloc/include/jemalloc/internal/arena_structs_b.h
+++ /dev/null
@@ -1,232 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
-#define JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H
-
-#include "jemalloc/internal/arena_stats.h"
-#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/bin.h"
-#include "jemalloc/internal/bitmap.h"
-#include "jemalloc/internal/extent_dss.h"
-#include "jemalloc/internal/jemalloc_internal_types.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/nstime.h"
-#include "jemalloc/internal/ql.h"
-#include "jemalloc/internal/sc.h"
-#include "jemalloc/internal/smoothstep.h"
-#include "jemalloc/internal/ticker.h"
-
-struct arena_decay_s {
- /* Synchronizes all non-atomic fields. */
- malloc_mutex_t mtx;
- /*
- * True if a thread is currently purging the extents associated with
- * this decay structure.
- */
- bool purging;
- /*
- * Approximate time in milliseconds from the creation of a set of unused
- * dirty pages until an equivalent set of unused dirty pages is purged
- * and/or reused.
- */
- atomic_zd_t time_ms;
- /* time / SMOOTHSTEP_NSTEPS. */
- nstime_t interval;
- /*
- * Time at which the current decay interval logically started. We do
- * not actually advance to a new epoch until sometime after it starts
- * because of scheduling and computation delays, and it is even possible
- * to completely skip epochs. In all cases, during epoch advancement we
- * merge all relevant activity into the most recently recorded epoch.
- */
- nstime_t epoch;
- /* Deadline randomness generator. */
- uint64_t jitter_state;
- /*
- * Deadline for current epoch. This is the sum of interval and per
- * epoch jitter which is a uniform random variable in [0..interval).
- * Epochs always advance by precise multiples of interval, but we
- * randomize the deadline to reduce the likelihood of arenas purging in
- * lockstep.
- */
- nstime_t deadline;
- /*
- * Number of unpurged pages at beginning of current epoch. During epoch
- * advancement we use the delta between arena->decay_*.nunpurged and
- * extents_npages_get(&arena->extents_*) to determine how many dirty
- * pages, if any, were generated.
- */
- size_t nunpurged;
- /*
- * Trailing log of how many unused dirty pages were generated during
- * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
- * element is the most recent epoch. Corresponding epoch times are
- * relative to epoch.
- */
- size_t backlog[SMOOTHSTEP_NSTEPS];
-
- /*
- * Pointer to associated stats. These stats are embedded directly in
- * the arena's stats due to how stats structures are shared between the
- * arena and ctl code.
- *
- * Synchronization: Same as associated arena's stats field. */
- arena_stats_decay_t *stats;
- /* Peak number of pages in associated extents. Used for debug only. */
- uint64_t ceil_npages;
-};
-
-struct arena_s {
- /*
- * Number of threads currently assigned to this arena. Each thread has
- * two distinct assignments, one for application-serving allocation, and
- * the other for internal metadata allocation. Internal metadata must
- * not be allocated from arenas explicitly created via the arenas.create
- * mallctl, because the arena.<i>.reset mallctl indiscriminately
- * discards all allocations for the affected arena.
- *
- * 0: Application allocation.
- * 1: Internal metadata allocation.
- *
- * Synchronization: atomic.
- */
- atomic_u_t nthreads[2];
-
- /* Next bin shard for binding new threads. Synchronization: atomic. */
- atomic_u_t binshard_next;
-
- /*
- * When percpu_arena is enabled, to amortize the cost of reading /
- * updating the current CPU id, track the most recent thread accessing
- * this arena, and only read CPU if there is a mismatch.
- */
- tsdn_t *last_thd;
-
- /* Synchronization: internal. */
- arena_stats_t stats;
-
- /*
- * Lists of tcaches and cache_bin_array_descriptors for extant threads
- * associated with this arena. Stats from these are merged
- * incrementally, and at exit if opt_stats_print is enabled.
- *
- * Synchronization: tcache_ql_mtx.
- */
- ql_head(tcache_t) tcache_ql;
- ql_head(cache_bin_array_descriptor_t) cache_bin_array_descriptor_ql;
- malloc_mutex_t tcache_ql_mtx;
-
- /* Synchronization: internal. */
- prof_accum_t prof_accum;
-
- /*
- * PRNG state for cache index randomization of large allocation base
- * pointers.
- *
- * Synchronization: atomic.
- */
- atomic_zu_t offset_state;
-
- /*
- * Extent serial number generator state.
- *
- * Synchronization: atomic.
- */
- atomic_zu_t extent_sn_next;
-
- /*
- * Represents a dss_prec_t, but atomically.
- *
- * Synchronization: atomic.
- */
- atomic_u_t dss_prec;
-
- /*
- * Number of pages in active extents.
- *
- * Synchronization: atomic.
- */
- atomic_zu_t nactive;
-
- /*
- * Extant large allocations.
- *
- * Synchronization: large_mtx.
- */
- extent_list_t large;
- /* Synchronizes all large allocation/update/deallocation. */
- malloc_mutex_t large_mtx;
-
- /*
- * Collections of extents that were previously allocated. These are
- * used when allocating extents, in an attempt to re-use address space.
- *
- * Synchronization: internal.
- */
- extents_t extents_dirty;
- extents_t extents_muzzy;
- extents_t extents_retained;
-
- /*
- * Decay-based purging state, responsible for scheduling extent state
- * transitions.
- *
- * Synchronization: internal.
- */
- arena_decay_t decay_dirty; /* dirty --> muzzy */
- arena_decay_t decay_muzzy; /* muzzy --> retained */
-
- /*
- * Next extent size class in a growing series to use when satisfying a
- * request via the extent hooks (only if opt_retain). This limits the
- * number of disjoint virtual memory ranges so that extent merging can
- * be effective even if multiple arenas' extent allocation requests are
- * highly interleaved.
- *
- * retain_grow_limit is the max allowed size ind to expand (unless the
- * required size is greater). Default is no limit, and controlled
- * through mallctl only.
- *
- * Synchronization: extent_grow_mtx
- */
- pszind_t extent_grow_next;
- pszind_t retain_grow_limit;
- malloc_mutex_t extent_grow_mtx;
-
- /*
- * Available extent structures that were allocated via
- * base_alloc_extent().
- *
- * Synchronization: extent_avail_mtx.
- */
- extent_tree_t extent_avail;
- atomic_zu_t extent_avail_cnt;
- malloc_mutex_t extent_avail_mtx;
-
- /*
- * bins is used to store heaps of free regions.
- *
- * Synchronization: internal.
- */
- bins_t bins[SC_NBINS];
-
- /*
- * Base allocator, from which arena metadata are allocated.
- *
- * Synchronization: internal.
- */
- base_t *base;
- /* Used to determine uptime. Read-only after initialization. */
- nstime_t create_time;
-};
-
-/* Used in conjunction with tsd for fast arena-related context lookup. */
-struct arena_tdata_s {
- ticker_t decay_ticker;
-};
-
-/* Used to pass rtree lookup context down the path. */
-struct alloc_ctx_s {
- szind_t szind;
- bool slab;
-};
-
-#endif /* JEMALLOC_INTERNAL_ARENA_STRUCTS_B_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/arena_types.h b/contrib/jemalloc/include/jemalloc/internal/arena_types.h
index 624937e4f596..d0e1291762a6 100644
--- a/contrib/jemalloc/include/jemalloc/internal/arena_types.h
+++ b/contrib/jemalloc/include/jemalloc/internal/arena_types.h
@@ -3,21 +3,14 @@
#include "jemalloc/internal/sc.h"
-/* Maximum number of regions in one slab. */
-#define LG_SLAB_MAXREGS (LG_PAGE - SC_LG_TINY_MIN)
-#define SLAB_MAXREGS (1U << LG_SLAB_MAXREGS)
-
/* Default decay times in milliseconds. */
#define DIRTY_DECAY_MS_DEFAULT ZD(10 * 1000)
#define MUZZY_DECAY_MS_DEFAULT (0)
/* Number of event ticks between time checks. */
-#define DECAY_NTICKS_PER_UPDATE 1000
+#define ARENA_DECAY_NTICKS_PER_UPDATE 1000
-typedef struct arena_slab_data_s arena_slab_data_t;
typedef struct arena_decay_s arena_decay_t;
typedef struct arena_s arena_t;
-typedef struct arena_tdata_s arena_tdata_t;
-typedef struct alloc_ctx_s alloc_ctx_t;
typedef enum {
percpu_arena_mode_names_base = 0, /* Used for options processing. */
@@ -48,4 +41,18 @@ typedef enum {
*/
#define OVERSIZE_THRESHOLD_DEFAULT (8 << 20)
+struct arena_config_s {
+ /* extent hooks to be used for the arena */
+ extent_hooks_t *extent_hooks;
+
+ /*
+ * Use extent hooks for metadata (base) allocations when true.
+ */
+ bool metadata_use_hooks;
+};
+
+typedef struct arena_config_s arena_config_t;
+
+extern const arena_config_t arena_config_default;
+
#endif /* JEMALLOC_INTERNAL_ARENA_TYPES_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/atomic.h b/contrib/jemalloc/include/jemalloc/internal/atomic.h
index a76f54cee3f4..c0f73122accd 100644
--- a/contrib/jemalloc/include/jemalloc/internal/atomic.h
+++ b/contrib/jemalloc/include/jemalloc/internal/atomic.h
@@ -52,6 +52,27 @@
#define ATOMIC_SEQ_CST atomic_memory_order_seq_cst
/*
+ * Another convenience -- simple atomic helper functions.
+ */
+#define JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(type, short_type, \
+ lg_size) \
+ JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size) \
+ ATOMIC_INLINE void \
+ atomic_load_add_store_##short_type(atomic_##short_type##_t *a, \
+ type inc) { \
+ type oldval = atomic_load_##short_type(a, ATOMIC_RELAXED); \
+ type newval = oldval + inc; \
+ atomic_store_##short_type(a, newval, ATOMIC_RELAXED); \
+ } \
+ ATOMIC_INLINE void \
+ atomic_load_sub_store_##short_type(atomic_##short_type##_t *a, \
+ type inc) { \
+ type oldval = atomic_load_##short_type(a, ATOMIC_RELAXED); \
+ type newval = oldval - inc; \
+ atomic_store_##short_type(a, newval, ATOMIC_RELAXED); \
+ }
+
+/*
* Not all platforms have 64-bit atomics. If we do, this #define exposes that
* fact.
*/
@@ -67,18 +88,18 @@ JEMALLOC_GENERATE_ATOMICS(void *, p, LG_SIZEOF_PTR)
*/
JEMALLOC_GENERATE_ATOMICS(bool, b, 0)
-JEMALLOC_GENERATE_INT_ATOMICS(unsigned, u, LG_SIZEOF_INT)
+JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(unsigned, u, LG_SIZEOF_INT)
-JEMALLOC_GENERATE_INT_ATOMICS(size_t, zu, LG_SIZEOF_PTR)
+JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(size_t, zu, LG_SIZEOF_PTR)
-JEMALLOC_GENERATE_INT_ATOMICS(ssize_t, zd, LG_SIZEOF_PTR)
+JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(ssize_t, zd, LG_SIZEOF_PTR)
-JEMALLOC_GENERATE_INT_ATOMICS(uint8_t, u8, 0)
+JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(uint8_t, u8, 0)
-JEMALLOC_GENERATE_INT_ATOMICS(uint32_t, u32, 2)
+JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(uint32_t, u32, 2)
#ifdef JEMALLOC_ATOMIC_U64
-JEMALLOC_GENERATE_INT_ATOMICS(uint64_t, u64, 3)
+JEMALLOC_GENERATE_EXPANDED_INT_ATOMICS(uint64_t, u64, 3)
#endif
#undef ATOMIC_INLINE
diff --git a/contrib/jemalloc/include/jemalloc/internal/atomic_msvc.h b/contrib/jemalloc/include/jemalloc/internal/atomic_msvc.h
new file mode 100644
index 000000000000..67057ce50895
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/atomic_msvc.h
@@ -0,0 +1,158 @@
+#ifndef JEMALLOC_INTERNAL_ATOMIC_MSVC_H
+#define JEMALLOC_INTERNAL_ATOMIC_MSVC_H
+
+#define ATOMIC_INIT(...) {__VA_ARGS__}
+
+typedef enum {
+ atomic_memory_order_relaxed,
+ atomic_memory_order_acquire,
+ atomic_memory_order_release,
+ atomic_memory_order_acq_rel,
+ atomic_memory_order_seq_cst
+} atomic_memory_order_t;
+
+typedef char atomic_repr_0_t;
+typedef short atomic_repr_1_t;
+typedef long atomic_repr_2_t;
+typedef __int64 atomic_repr_3_t;
+
+ATOMIC_INLINE void
+atomic_fence(atomic_memory_order_t mo) {
+ _ReadWriteBarrier();
+# if defined(_M_ARM) || defined(_M_ARM64)
+ /* ARM needs a barrier for everything but relaxed. */
+ if (mo != atomic_memory_order_relaxed) {
+ MemoryBarrier();
+ }
+# elif defined(_M_IX86) || defined (_M_X64)
+ /* x86 needs a barrier only for seq_cst. */
+ if (mo == atomic_memory_order_seq_cst) {
+ MemoryBarrier();
+ }
+# else
+# error "Don't know how to create atomics for this platform for MSVC."
+# endif
+ _ReadWriteBarrier();
+}
+
+#define ATOMIC_INTERLOCKED_REPR(lg_size) atomic_repr_ ## lg_size ## _t
+
+#define ATOMIC_CONCAT(a, b) ATOMIC_RAW_CONCAT(a, b)
+#define ATOMIC_RAW_CONCAT(a, b) a ## b
+
+#define ATOMIC_INTERLOCKED_NAME(base_name, lg_size) ATOMIC_CONCAT( \
+ base_name, ATOMIC_INTERLOCKED_SUFFIX(lg_size))
+
+#define ATOMIC_INTERLOCKED_SUFFIX(lg_size) \
+ ATOMIC_CONCAT(ATOMIC_INTERLOCKED_SUFFIX_, lg_size)
+
+#define ATOMIC_INTERLOCKED_SUFFIX_0 8
+#define ATOMIC_INTERLOCKED_SUFFIX_1 16
+#define ATOMIC_INTERLOCKED_SUFFIX_2
+#define ATOMIC_INTERLOCKED_SUFFIX_3 64
+
+#define JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \
+typedef struct { \
+ ATOMIC_INTERLOCKED_REPR(lg_size) repr; \
+} atomic_##short_type##_t; \
+ \
+ATOMIC_INLINE type \
+atomic_load_##short_type(const atomic_##short_type##_t *a, \
+ atomic_memory_order_t mo) { \
+ ATOMIC_INTERLOCKED_REPR(lg_size) ret = a->repr; \
+ if (mo != atomic_memory_order_relaxed) { \
+ atomic_fence(atomic_memory_order_acquire); \
+ } \
+ return (type) ret; \
+} \
+ \
+ATOMIC_INLINE void \
+atomic_store_##short_type(atomic_##short_type##_t *a, \
+ type val, atomic_memory_order_t mo) { \
+ if (mo != atomic_memory_order_relaxed) { \
+ atomic_fence(atomic_memory_order_release); \
+ } \
+ a->repr = (ATOMIC_INTERLOCKED_REPR(lg_size)) val; \
+ if (mo == atomic_memory_order_seq_cst) { \
+ atomic_fence(atomic_memory_order_seq_cst); \
+ } \
+} \
+ \
+ATOMIC_INLINE type \
+atomic_exchange_##short_type(atomic_##short_type##_t *a, type val, \
+ atomic_memory_order_t mo) { \
+ return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchange, \
+ lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
+} \
+ \
+ATOMIC_INLINE bool \
+atomic_compare_exchange_weak_##short_type(atomic_##short_type##_t *a, \
+ type *expected, type desired, atomic_memory_order_t success_mo, \
+ atomic_memory_order_t failure_mo) { \
+ ATOMIC_INTERLOCKED_REPR(lg_size) e = \
+ (ATOMIC_INTERLOCKED_REPR(lg_size))*expected; \
+ ATOMIC_INTERLOCKED_REPR(lg_size) d = \
+ (ATOMIC_INTERLOCKED_REPR(lg_size))desired; \
+ ATOMIC_INTERLOCKED_REPR(lg_size) old = \
+ ATOMIC_INTERLOCKED_NAME(_InterlockedCompareExchange, \
+ lg_size)(&a->repr, d, e); \
+ if (old == e) { \
+ return true; \
+ } else { \
+ *expected = (type)old; \
+ return false; \
+ } \
+} \
+ \
+ATOMIC_INLINE bool \
+atomic_compare_exchange_strong_##short_type(atomic_##short_type##_t *a, \
+ type *expected, type desired, atomic_memory_order_t success_mo, \
+ atomic_memory_order_t failure_mo) { \
+ /* We implement the weak version with strong semantics. */ \
+ return atomic_compare_exchange_weak_##short_type(a, expected, \
+ desired, success_mo, failure_mo); \
+}
+
+
+#define JEMALLOC_GENERATE_INT_ATOMICS(type, short_type, lg_size) \
+JEMALLOC_GENERATE_ATOMICS(type, short_type, lg_size) \
+ \
+ATOMIC_INLINE type \
+atomic_fetch_add_##short_type(atomic_##short_type##_t *a, \
+ type val, atomic_memory_order_t mo) { \
+ return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedExchangeAdd, \
+ lg_size)(&a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
+} \
+ \
+ATOMIC_INLINE type \
+atomic_fetch_sub_##short_type(atomic_##short_type##_t *a, \
+ type val, atomic_memory_order_t mo) { \
+ /* \
+ * MSVC warns on negation of unsigned operands, but for us it \
+ * gives exactly the right semantics (MAX_TYPE + 1 - operand). \
+ */ \
+ __pragma(warning(push)) \
+ __pragma(warning(disable: 4146)) \
+ return atomic_fetch_add_##short_type(a, -val, mo); \
+ __pragma(warning(pop)) \
+} \
+ATOMIC_INLINE type \
+atomic_fetch_and_##short_type(atomic_##short_type##_t *a, \
+ type val, atomic_memory_order_t mo) { \
+ return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedAnd, lg_size)( \
+ &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
+} \
+ATOMIC_INLINE type \
+atomic_fetch_or_##short_type(atomic_##short_type##_t *a, \
+ type val, atomic_memory_order_t mo) { \
+ return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedOr, lg_size)( \
+ &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
+} \
+ATOMIC_INLINE type \
+atomic_fetch_xor_##short_type(atomic_##short_type##_t *a, \
+ type val, atomic_memory_order_t mo) { \
+ return (type)ATOMIC_INTERLOCKED_NAME(_InterlockedXor, lg_size)( \
+ &a->repr, (ATOMIC_INTERLOCKED_REPR(lg_size))val); \
+}
+
+#endif /* JEMALLOC_INTERNAL_ATOMIC_MSVC_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/background_thread_externs.h b/contrib/jemalloc/include/jemalloc/internal/background_thread_externs.h
index 0f997e18beec..6ae3c8d89d4b 100644
--- a/contrib/jemalloc/include/jemalloc/internal/background_thread_externs.h
+++ b/contrib/jemalloc/include/jemalloc/internal/background_thread_externs.h
@@ -12,8 +12,9 @@ extern background_thread_info_t *background_thread_info;
bool background_thread_create(tsd_t *tsd, unsigned arena_ind);
bool background_threads_enable(tsd_t *tsd);
bool background_threads_disable(tsd_t *tsd);
-void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
- arena_decay_t *decay, size_t npages_new);
+bool background_thread_is_started(background_thread_info_t* info);
+void background_thread_wakeup_early(background_thread_info_t *info,
+ nstime_t *remaining_sleep);
void background_thread_prefork0(tsdn_t *tsdn);
void background_thread_prefork1(tsdn_t *tsdn);
void background_thread_postfork_parent(tsdn_t *tsdn);
@@ -27,6 +28,6 @@ extern int pthread_create_wrapper(pthread_t *__restrict, const pthread_attr_t *,
void *(*)(void *), void *__restrict);
#endif
bool background_thread_boot0(void);
-bool background_thread_boot1(tsdn_t *tsdn);
+bool background_thread_boot1(tsdn_t *tsdn, base_t *base);
#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/background_thread_inlines.h b/contrib/jemalloc/include/jemalloc/internal/background_thread_inlines.h
index f85e86fa375b..92c5febe70fd 100644
--- a/contrib/jemalloc/include/jemalloc/internal/background_thread_inlines.h
+++ b/contrib/jemalloc/include/jemalloc/internal/background_thread_inlines.h
@@ -45,18 +45,4 @@ background_thread_indefinite_sleep(background_thread_info_t *info) {
return atomic_load_b(&info->indefinite_sleep, ATOMIC_ACQUIRE);
}
-JEMALLOC_ALWAYS_INLINE void
-arena_background_thread_inactivity_check(tsdn_t *tsdn, arena_t *arena,
- bool is_background_thread) {
- if (!background_thread_enabled() || is_background_thread) {
- return;
- }
- background_thread_info_t *info =
- arena_background_thread_info_get(arena);
- if (background_thread_indefinite_sleep(info)) {
- background_thread_interval_check(tsdn, arena,
- &arena->decay_dirty, 0);
- }
-}
-
#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_INLINES_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/background_thread_structs.h b/contrib/jemalloc/include/jemalloc/internal/background_thread_structs.h
index c02aa434c7da..83a919846029 100644
--- a/contrib/jemalloc/include/jemalloc/internal/background_thread_structs.h
+++ b/contrib/jemalloc/include/jemalloc/internal/background_thread_structs.h
@@ -11,6 +11,17 @@
#define MAX_BACKGROUND_THREAD_LIMIT MALLOCX_ARENA_LIMIT
#define DEFAULT_NUM_BACKGROUND_THREAD 4
+/*
+ * These exist only as a transitional state. Eventually, deferral should be
+ * part of the PAI, and each implementation can indicate wait times with more
+ * specificity.
+ */
+#define BACKGROUND_THREAD_HPA_INTERVAL_MAX_UNINITIALIZED (-2)
+#define BACKGROUND_THREAD_HPA_INTERVAL_MAX_DEFAULT_WHEN_ENABLED 5000
+
+#define BACKGROUND_THREAD_DEFERRED_MIN UINT64_C(0)
+#define BACKGROUND_THREAD_DEFERRED_MAX UINT64_MAX
+
typedef enum {
background_thread_stopped,
background_thread_started,
@@ -48,6 +59,7 @@ struct background_thread_stats_s {
size_t num_threads;
uint64_t num_runs;
nstime_t run_interval;
+ mutex_prof_data_t max_counter_per_bg_thd;
};
typedef struct background_thread_stats_s background_thread_stats_t;
diff --git a/contrib/jemalloc/include/jemalloc/internal/base.h b/contrib/jemalloc/include/jemalloc/internal/base.h
new file mode 100644
index 000000000000..9b2c9fb10b99
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/base.h
@@ -0,0 +1,110 @@
+#ifndef JEMALLOC_INTERNAL_BASE_H
+#define JEMALLOC_INTERNAL_BASE_H
+
+#include "jemalloc/internal/edata.h"
+#include "jemalloc/internal/ehooks.h"
+#include "jemalloc/internal/mutex.h"
+
+enum metadata_thp_mode_e {
+ metadata_thp_disabled = 0,
+ /*
+ * Lazily enable hugepage for metadata. To avoid high RSS caused by THP
+ * + low usage arena (i.e. THP becomes a significant percentage), the
+ * "auto" option only starts using THP after a base allocator used up
+ * the first THP region. Starting from the second hugepage (in a single
+ * arena), "auto" behaves the same as "always", i.e. madvise hugepage
+ * right away.
+ */
+ metadata_thp_auto = 1,
+ metadata_thp_always = 2,
+ metadata_thp_mode_limit = 3
+};
+typedef enum metadata_thp_mode_e metadata_thp_mode_t;
+
+#define METADATA_THP_DEFAULT metadata_thp_disabled
+extern metadata_thp_mode_t opt_metadata_thp;
+extern const char *metadata_thp_mode_names[];
+
+
+/* Embedded at the beginning of every block of base-managed virtual memory. */
+typedef struct base_block_s base_block_t;
+struct base_block_s {
+ /* Total size of block's virtual memory mapping. */
+ size_t size;
+
+ /* Next block in list of base's blocks. */
+ base_block_t *next;
+
+ /* Tracks unused trailing space. */
+ edata_t edata;
+};
+
+typedef struct base_s base_t;
+struct base_s {
+ /*
+ * User-configurable extent hook functions.
+ */
+ ehooks_t ehooks;
+
+ /*
+ * User-configurable extent hook functions for metadata allocations.
+ */
+ ehooks_t ehooks_base;
+
+ /* Protects base_alloc() and base_stats_get() operations. */
+ malloc_mutex_t mtx;
+
+ /* Using THP when true (metadata_thp auto mode). */
+ bool auto_thp_switched;
+ /*
+ * Most recent size class in the series of increasingly large base
+ * extents. Logarithmic spacing between subsequent allocations ensures
+ * that the total number of distinct mappings remains small.
+ */
+ pszind_t pind_last;
+
+ /* Serial number generation state. */
+ size_t extent_sn_next;
+
+ /* Chain of all blocks associated with base. */
+ base_block_t *blocks;
+
+ /* Heap of extents that track unused trailing space within blocks. */
+ edata_heap_t avail[SC_NSIZES];
+
+ /* Stats, only maintained if config_stats. */
+ size_t allocated;
+ size_t resident;
+ size_t mapped;
+ /* Number of THP regions touched. */
+ size_t n_thp;
+};
+
+static inline unsigned
+base_ind_get(const base_t *base) {
+ return ehooks_ind_get(&base->ehooks);
+}
+
+static inline bool
+metadata_thp_enabled(void) {
+ return (opt_metadata_thp != metadata_thp_disabled);
+}
+
+base_t *b0get(void);
+base_t *base_new(tsdn_t *tsdn, unsigned ind,
+ const extent_hooks_t *extent_hooks, bool metadata_use_hooks);
+void base_delete(tsdn_t *tsdn, base_t *base);
+ehooks_t *base_ehooks_get(base_t *base);
+ehooks_t *base_ehooks_get_for_metadata(base_t *base);
+extent_hooks_t *base_extent_hooks_set(base_t *base,
+ extent_hooks_t *extent_hooks);
+void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment);
+edata_t *base_alloc_edata(tsdn_t *tsdn, base_t *base);
+void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated,
+ size_t *resident, size_t *mapped, size_t *n_thp);
+void base_prefork(tsdn_t *tsdn, base_t *base);
+void base_postfork_parent(tsdn_t *tsdn, base_t *base);
+void base_postfork_child(tsdn_t *tsdn, base_t *base);
+bool base_boot(tsdn_t *tsdn);
+
+#endif /* JEMALLOC_INTERNAL_BASE_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/base_externs.h b/contrib/jemalloc/include/jemalloc/internal/base_externs.h
deleted file mode 100644
index 7b705c9b4d1c..000000000000
--- a/contrib/jemalloc/include/jemalloc/internal/base_externs.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_BASE_EXTERNS_H
-#define JEMALLOC_INTERNAL_BASE_EXTERNS_H
-
-extern metadata_thp_mode_t opt_metadata_thp;
-extern const char *metadata_thp_mode_names[];
-
-base_t *b0get(void);
-base_t *base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
-void base_delete(tsdn_t *tsdn, base_t *base);
-extent_hooks_t *base_extent_hooks_get(base_t *base);
-extent_hooks_t *base_extent_hooks_set(base_t *base,
- extent_hooks_t *extent_hooks);
-void *base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment);
-extent_t *base_alloc_extent(tsdn_t *tsdn, base_t *base);
-void base_stats_get(tsdn_t *tsdn, base_t *base, size_t *allocated,
- size_t *resident, size_t *mapped, size_t *n_thp);
-void base_prefork(tsdn_t *tsdn, base_t *base);
-void base_postfork_parent(tsdn_t *tsdn, base_t *base);
-void base_postfork_child(tsdn_t *tsdn, base_t *base);
-bool base_boot(tsdn_t *tsdn);
-
-#endif /* JEMALLOC_INTERNAL_BASE_EXTERNS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/base_inlines.h b/contrib/jemalloc/include/jemalloc/internal/base_inlines.h
deleted file mode 100644
index aec0e2e1e1c5..000000000000
--- a/contrib/jemalloc/include/jemalloc/internal/base_inlines.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_BASE_INLINES_H
-#define JEMALLOC_INTERNAL_BASE_INLINES_H
-
-static inline unsigned
-base_ind_get(const base_t *base) {
- return base->ind;
-}
-
-static inline bool
-metadata_thp_enabled(void) {
- return (opt_metadata_thp != metadata_thp_disabled);
-}
-#endif /* JEMALLOC_INTERNAL_BASE_INLINES_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/base_structs.h b/contrib/jemalloc/include/jemalloc/internal/base_structs.h
deleted file mode 100644
index 07f214eb2f2a..000000000000
--- a/contrib/jemalloc/include/jemalloc/internal/base_structs.h
+++ /dev/null
@@ -1,59 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_BASE_STRUCTS_H
-#define JEMALLOC_INTERNAL_BASE_STRUCTS_H
-
-#include "jemalloc/internal/jemalloc_internal_types.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/sc.h"
-
-/* Embedded at the beginning of every block of base-managed virtual memory. */
-struct base_block_s {
- /* Total size of block's virtual memory mapping. */
- size_t size;
-
- /* Next block in list of base's blocks. */
- base_block_t *next;
-
- /* Tracks unused trailing space. */
- extent_t extent;
-};
-
-struct base_s {
- /* Associated arena's index within the arenas array. */
- unsigned ind;
-
- /*
- * User-configurable extent hook functions. Points to an
- * extent_hooks_t.
- */
- atomic_p_t extent_hooks;
-
- /* Protects base_alloc() and base_stats_get() operations. */
- malloc_mutex_t mtx;
-
- /* Using THP when true (metadata_thp auto mode). */
- bool auto_thp_switched;
- /*
- * Most recent size class in the series of increasingly large base
- * extents. Logarithmic spacing between subsequent allocations ensures
- * that the total number of distinct mappings remains small.
- */
- pszind_t pind_last;
-
- /* Serial number generation state. */
- size_t extent_sn_next;
-
- /* Chain of all blocks associated with base. */
- base_block_t *blocks;
-
- /* Heap of extents that track unused trailing space within blocks. */
- extent_heap_t avail[SC_NSIZES];
-
- /* Stats, only maintained if config_stats. */
- size_t allocated;
- size_t resident;
- size_t mapped;
- /* Number of THP regions touched. */
- size_t n_thp;
-};
-
-#endif /* JEMALLOC_INTERNAL_BASE_STRUCTS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/base_types.h b/contrib/jemalloc/include/jemalloc/internal/base_types.h
deleted file mode 100644
index b6db77df7c6c..000000000000
--- a/contrib/jemalloc/include/jemalloc/internal/base_types.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_BASE_TYPES_H
-#define JEMALLOC_INTERNAL_BASE_TYPES_H
-
-typedef struct base_block_s base_block_t;
-typedef struct base_s base_t;
-
-#define METADATA_THP_DEFAULT metadata_thp_disabled
-
-/*
- * In auto mode, arenas switch to huge pages for the base allocator on the
- * second base block. a0 switches to thp on the 5th block (after 20 megabytes
- * of metadata), since more metadata (e.g. rtree nodes) come from a0's base.
- */
-
-#define BASE_AUTO_THP_THRESHOLD 2
-#define BASE_AUTO_THP_THRESHOLD_A0 5
-
-typedef enum {
- metadata_thp_disabled = 0,
- /*
- * Lazily enable hugepage for metadata. To avoid high RSS caused by THP
- * + low usage arena (i.e. THP becomes a significant percentage), the
- * "auto" option only starts using THP after a base allocator used up
- * the first THP region. Starting from the second hugepage (in a single
- * arena), "auto" behaves the same as "always", i.e. madvise hugepage
- * right away.
- */
- metadata_thp_auto = 1,
- metadata_thp_always = 2,
- metadata_thp_mode_limit = 3
-} metadata_thp_mode_t;
-
-#endif /* JEMALLOC_INTERNAL_BASE_TYPES_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/bin.h b/contrib/jemalloc/include/jemalloc/internal/bin.h
index 8547e89309bc..63f97395e91d 100644
--- a/contrib/jemalloc/include/jemalloc/internal/bin.h
+++ b/contrib/jemalloc/include/jemalloc/internal/bin.h
@@ -3,8 +3,7 @@
#include "jemalloc/internal/bin_stats.h"
#include "jemalloc/internal/bin_types.h"
-#include "jemalloc/internal/extent_types.h"
-#include "jemalloc/internal/extent_structs.h"
+#include "jemalloc/internal/edata.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/sc.h"
@@ -12,74 +11,34 @@
* A bin contains a set of extents that are currently being used for slab
* allocations.
*/
-
-/*
- * Read-only information associated with each element of arena_t's bins array
- * is stored separately, partly to reduce memory usage (only one copy, rather
- * than one per arena), but mainly to avoid false cacheline sharing.
- *
- * Each slab has the following layout:
- *
- * /--------------------\
- * | region 0 |
- * |--------------------|
- * | region 1 |
- * |--------------------|
- * | ... |
- * | ... |
- * | ... |
- * |--------------------|
- * | region nregs-1 |
- * \--------------------/
- */
-typedef struct bin_info_s bin_info_t;
-struct bin_info_s {
- /* Size of regions in a slab for this bin's size class. */
- size_t reg_size;
-
- /* Total size of a slab for this bin's size class. */
- size_t slab_size;
-
- /* Total number of regions in a slab for this bin's size class. */
- uint32_t nregs;
-
- /* Number of sharded bins in each arena for this size class. */
- uint32_t n_shards;
-
- /*
- * Metadata used to manipulate bitmaps for slabs associated with this
- * bin.
- */
- bitmap_info_t bitmap_info;
-};
-
-extern bin_info_t bin_infos[SC_NBINS];
-
typedef struct bin_s bin_t;
struct bin_s {
/* All operations on bin_t fields require lock ownership. */
malloc_mutex_t lock;
/*
+ * Bin statistics. These get touched every time the lock is acquired,
+ * so put them close by in the hopes of getting some cache locality.
+ */
+ bin_stats_t stats;
+
+ /*
* Current slab being used to service allocations of this bin's size
* class. slabcur is independent of slabs_{nonfull,full}; whenever
* slabcur is reassigned, the previous slab must be deallocated or
* inserted into slabs_{nonfull,full}.
*/
- extent_t *slabcur;
+ edata_t *slabcur;
/*
* Heap of non-full slabs. This heap is used to assure that new
* allocations come from the non-full slab that is oldest/lowest in
* memory.
*/
- extent_heap_t slabs_nonfull;
+ edata_heap_t slabs_nonfull;
/* List used to track full slabs. */
- extent_list_t slabs_full;
-
- /* Bin statistics. */
- bin_stats_t stats;
+ edata_list_active_t slabs_full;
};
/* A set of sharded bins of the same size class. */
@@ -92,7 +51,6 @@ struct bins_s {
void bin_shard_sizes_boot(unsigned bin_shards[SC_NBINS]);
bool bin_update_shard_size(unsigned bin_shards[SC_NBINS], size_t start_size,
size_t end_size, size_t nshards);
-void bin_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]);
/* Initializes a bin to empty. Returns true on error. */
bool bin_init(bin_t *bin);
@@ -104,19 +62,20 @@ void bin_postfork_child(tsdn_t *tsdn, bin_t *bin);
/* Stats. */
static inline void
-bin_stats_merge(tsdn_t *tsdn, bin_stats_t *dst_bin_stats, bin_t *bin) {
+bin_stats_merge(tsdn_t *tsdn, bin_stats_data_t *dst_bin_stats, bin_t *bin) {
malloc_mutex_lock(tsdn, &bin->lock);
malloc_mutex_prof_accum(tsdn, &dst_bin_stats->mutex_data, &bin->lock);
- dst_bin_stats->nmalloc += bin->stats.nmalloc;
- dst_bin_stats->ndalloc += bin->stats.ndalloc;
- dst_bin_stats->nrequests += bin->stats.nrequests;
- dst_bin_stats->curregs += bin->stats.curregs;
- dst_bin_stats->nfills += bin->stats.nfills;
- dst_bin_stats->nflushes += bin->stats.nflushes;
- dst_bin_stats->nslabs += bin->stats.nslabs;
- dst_bin_stats->reslabs += bin->stats.reslabs;
- dst_bin_stats->curslabs += bin->stats.curslabs;
- dst_bin_stats->nonfull_slabs += bin->stats.nonfull_slabs;
+ bin_stats_t *stats = &dst_bin_stats->stats_data;
+ stats->nmalloc += bin->stats.nmalloc;
+ stats->ndalloc += bin->stats.ndalloc;
+ stats->nrequests += bin->stats.nrequests;
+ stats->curregs += bin->stats.curregs;
+ stats->nfills += bin->stats.nfills;
+ stats->nflushes += bin->stats.nflushes;
+ stats->nslabs += bin->stats.nslabs;
+ stats->reslabs += bin->stats.reslabs;
+ stats->curslabs += bin->stats.curslabs;
+ stats->nonfull_slabs += bin->stats.nonfull_slabs;
malloc_mutex_unlock(tsdn, &bin->lock);
}
diff --git a/contrib/jemalloc/include/jemalloc/internal/bin_info.h b/contrib/jemalloc/include/jemalloc/internal/bin_info.h
new file mode 100644
index 000000000000..7fe65c866a10
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/bin_info.h
@@ -0,0 +1,50 @@
+#ifndef JEMALLOC_INTERNAL_BIN_INFO_H
+#define JEMALLOC_INTERNAL_BIN_INFO_H
+
+#include "jemalloc/internal/bitmap.h"
+
+/*
+ * Read-only information associated with each element of arena_t's bins array
+ * is stored separately, partly to reduce memory usage (only one copy, rather
+ * than one per arena), but mainly to avoid false cacheline sharing.
+ *
+ * Each slab has the following layout:
+ *
+ * /--------------------\
+ * | region 0 |
+ * |--------------------|
+ * | region 1 |
+ * |--------------------|
+ * | ... |
+ * | ... |
+ * | ... |
+ * |--------------------|
+ * | region nregs-1 |
+ * \--------------------/
+ */
+typedef struct bin_info_s bin_info_t;
+struct bin_info_s {
+ /* Size of regions in a slab for this bin's size class. */
+ size_t reg_size;
+
+ /* Total size of a slab for this bin's size class. */
+ size_t slab_size;
+
+ /* Total number of regions in a slab for this bin's size class. */
+ uint32_t nregs;
+
+ /* Number of sharded bins in each arena for this size class. */
+ uint32_t n_shards;
+
+ /*
+ * Metadata used to manipulate bitmaps for slabs associated with this
+ * bin.
+ */
+ bitmap_info_t bitmap_info;
+};
+
+extern bin_info_t bin_infos[SC_NBINS];
+
+void bin_info_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]);
+
+#endif /* JEMALLOC_INTERNAL_BIN_INFO_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/bin_stats.h b/contrib/jemalloc/include/jemalloc/internal/bin_stats.h
index d04519c8244c..0b99297c070a 100644
--- a/contrib/jemalloc/include/jemalloc/internal/bin_stats.h
+++ b/contrib/jemalloc/include/jemalloc/internal/bin_stats.h
@@ -47,8 +47,11 @@ struct bin_stats_s {
/* Current size of nonfull slabs heap in this bin. */
size_t nonfull_slabs;
+};
+typedef struct bin_stats_data_s bin_stats_data_t;
+struct bin_stats_data_s {
+ bin_stats_t stats_data;
mutex_prof_data_t mutex_data;
};
-
#endif /* JEMALLOC_INTERNAL_BIN_STATS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/bin_types.h b/contrib/jemalloc/include/jemalloc/internal/bin_types.h
index 3533606b90c4..945e8326c63d 100644
--- a/contrib/jemalloc/include/jemalloc/internal/bin_types.h
+++ b/contrib/jemalloc/include/jemalloc/internal/bin_types.h
@@ -3,7 +3,7 @@
#include "jemalloc/internal/sc.h"
-#define BIN_SHARDS_MAX (1 << EXTENT_BITS_BINSHARD_WIDTH)
+#define BIN_SHARDS_MAX (1 << EDATA_BITS_BINSHARD_WIDTH)
#define N_BIN_SHARDS_DEFAULT 1
/* Used in TSD static initializer only. Real init in arena_bind(). */
diff --git a/contrib/jemalloc/include/jemalloc/internal/bit_util.h b/contrib/jemalloc/include/jemalloc/internal/bit_util.h
index c045eb86878a..bac59140fbb0 100644
--- a/contrib/jemalloc/include/jemalloc/internal/bit_util.h
+++ b/contrib/jemalloc/include/jemalloc/internal/bit_util.h
@@ -3,144 +3,383 @@
#include "jemalloc/internal/assert.h"
-#define BIT_UTIL_INLINE static inline
-
/* Sanity check. */
#if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \
|| !defined(JEMALLOC_INTERNAL_FFS)
# error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure
#endif
+/*
+ * Unlike the builtins and posix ffs functions, our ffs requires a non-zero
+ * input, and returns the position of the lowest bit set (as opposed to the
+ * posix versions, which return 1 larger than that position and use a return
+ * value of zero as a sentinel. This tends to simplify logic in callers, and
+ * allows for consistency with the builtins we build fls on top of.
+ */
+static inline unsigned
+ffs_llu(unsigned long long x) {
+ util_assume(x != 0);
+ return JEMALLOC_INTERNAL_FFSLL(x) - 1;
+}
-BIT_UTIL_INLINE unsigned
-ffs_llu(unsigned long long bitmap) {
- return JEMALLOC_INTERNAL_FFSLL(bitmap);
+static inline unsigned
+ffs_lu(unsigned long x) {
+ util_assume(x != 0);
+ return JEMALLOC_INTERNAL_FFSL(x) - 1;
}
-BIT_UTIL_INLINE unsigned
-ffs_lu(unsigned long bitmap) {
- return JEMALLOC_INTERNAL_FFSL(bitmap);
+static inline unsigned
+ffs_u(unsigned x) {
+ util_assume(x != 0);
+ return JEMALLOC_INTERNAL_FFS(x) - 1;
}
-BIT_UTIL_INLINE unsigned
-ffs_u(unsigned bitmap) {
- return JEMALLOC_INTERNAL_FFS(bitmap);
+#define DO_FLS_SLOW(x, suffix) do { \
+ util_assume(x != 0); \
+ x |= (x >> 1); \
+ x |= (x >> 2); \
+ x |= (x >> 4); \
+ x |= (x >> 8); \
+ x |= (x >> 16); \
+ if (sizeof(x) > 4) { \
+ /* \
+ * If sizeof(x) is 4, then the expression "x >> 32" \
+ * will generate compiler warnings even if the code \
+ * never executes. This circumvents the warning, and \
+ * gets compiled out in optimized builds. \
+ */ \
+ int constant_32 = sizeof(x) * 4; \
+ x |= (x >> constant_32); \
+ } \
+ x++; \
+ if (x == 0) { \
+ return 8 * sizeof(x) - 1; \
+ } \
+ return ffs_##suffix(x) - 1; \
+} while(0)
+
+static inline unsigned
+fls_llu_slow(unsigned long long x) {
+ DO_FLS_SLOW(x, llu);
}
-#ifdef JEMALLOC_INTERNAL_POPCOUNTL
-BIT_UTIL_INLINE unsigned
+static inline unsigned
+fls_lu_slow(unsigned long x) {
+ DO_FLS_SLOW(x, lu);
+}
+
+static inline unsigned
+fls_u_slow(unsigned x) {
+ DO_FLS_SLOW(x, u);
+}
+
+#undef DO_FLS_SLOW
+
+#ifdef JEMALLOC_HAVE_BUILTIN_CLZ
+static inline unsigned
+fls_llu(unsigned long long x) {
+ util_assume(x != 0);
+ /*
+ * Note that the xor here is more naturally written as subtraction; the
+ * last bit set is the number of bits in the type minus the number of
+ * leading zero bits. But GCC implements that as:
+ * bsr edi, edi
+ * mov eax, 31
+ * xor edi, 31
+ * sub eax, edi
+ * If we write it as xor instead, then we get
+ * bsr eax, edi
+ * as desired.
+ */
+ return (8 * sizeof(x) - 1) ^ __builtin_clzll(x);
+}
+
+static inline unsigned
+fls_lu(unsigned long x) {
+ util_assume(x != 0);
+ return (8 * sizeof(x) - 1) ^ __builtin_clzl(x);
+}
+
+static inline unsigned
+fls_u(unsigned x) {
+ util_assume(x != 0);
+ return (8 * sizeof(x) - 1) ^ __builtin_clz(x);
+}
+#elif defined(_MSC_VER)
+
+#if LG_SIZEOF_PTR == 3
+#define DO_BSR64(bit, x) _BitScanReverse64(&bit, x)
+#else
+/*
+ * This never actually runs; we're just dodging a compiler error for the
+ * never-taken branch where sizeof(void *) == 8.
+ */
+#define DO_BSR64(bit, x) bit = 0; unreachable()
+#endif
+
+#define DO_FLS(x) do { \
+ if (x == 0) { \
+ return 8 * sizeof(x); \
+ } \
+ unsigned long bit; \
+ if (sizeof(x) == 4) { \
+ _BitScanReverse(&bit, (unsigned)x); \
+ return (unsigned)bit; \
+ } \
+ if (sizeof(x) == 8 && sizeof(void *) == 8) { \
+ DO_BSR64(bit, x); \
+ return (unsigned)bit; \
+ } \
+ if (sizeof(x) == 8 && sizeof(void *) == 4) { \
+ /* Dodge a compiler warning, as above. */ \
+ int constant_32 = sizeof(x) * 4; \
+ if (_BitScanReverse(&bit, \
+ (unsigned)(x >> constant_32))) { \
+ return 32 + (unsigned)bit; \
+ } else { \
+ _BitScanReverse(&bit, (unsigned)x); \
+ return (unsigned)bit; \
+ } \
+ } \
+ unreachable(); \
+} while (0)
+
+static inline unsigned
+fls_llu(unsigned long long x) {
+ DO_FLS(x);
+}
+
+static inline unsigned
+fls_lu(unsigned long x) {
+ DO_FLS(x);
+}
+
+static inline unsigned
+fls_u(unsigned x) {
+ DO_FLS(x);
+}
+
+#undef DO_FLS
+#undef DO_BSR64
+#else
+
+static inline unsigned
+fls_llu(unsigned long long x) {
+ return fls_llu_slow(x);
+}
+
+static inline unsigned
+fls_lu(unsigned long x) {
+ return fls_lu_slow(x);
+}
+
+static inline unsigned
+fls_u(unsigned x) {
+ return fls_u_slow(x);
+}
+#endif
+
+#if LG_SIZEOF_LONG_LONG > 3
+# error "Haven't implemented popcount for 16-byte ints."
+#endif
+
+#define DO_POPCOUNT(x, type) do { \
+ /* \
+ * Algorithm from an old AMD optimization reference manual. \
+ * We're putting a little bit more work than you might expect \
+ * into the no-instrinsic case, since we only support the \
+ * GCC intrinsics spelling of popcount (for now). Detecting \
+ * whether or not the popcount builtin is actually useable in \
+ * MSVC is nontrivial. \
+ */ \
+ \
+ type bmul = (type)0x0101010101010101ULL; \
+ \
+ /* \
+ * Replace each 2 bits with the sideways sum of the original \
+ * values. 0x5 = 0b0101. \
+ * \
+ * You might expect this to be: \
+ * x = (x & 0x55...) + ((x >> 1) & 0x55...). \
+ * That costs an extra mask relative to this, though. \
+ */ \
+ x = x - ((x >> 1) & (0x55U * bmul)); \
+ /* Replace each 4 bits with their sideays sum. 0x3 = 0b0011. */\
+ x = (x & (bmul * 0x33U)) + ((x >> 2) & (bmul * 0x33U)); \
+ /* \
+ * Replace each 8 bits with their sideways sum. Note that we \
+ * can't overflow within each 4-bit sum here, so we can skip \
+ * the initial mask. \
+ */ \
+ x = (x + (x >> 4)) & (bmul * 0x0FU); \
+ /* \
+ * None of the partial sums in this multiplication (viewed in \
+ * base-256) can overflow into the next digit. So the least \
+ * significant byte of the product will be the least \
+ * significant byte of the original value, the second least \
+ * significant byte will be the sum of the two least \
+ * significant bytes of the original value, and so on. \
+ * Importantly, the high byte will be the byte-wise sum of all \
+ * the bytes of the original value. \
+ */ \
+ x = x * bmul; \
+ x >>= ((sizeof(x) - 1) * 8); \
+ return (unsigned)x; \
+} while(0)
+
+static inline unsigned
+popcount_u_slow(unsigned bitmap) {
+ DO_POPCOUNT(bitmap, unsigned);
+}
+
+static inline unsigned
+popcount_lu_slow(unsigned long bitmap) {
+ DO_POPCOUNT(bitmap, unsigned long);
+}
+
+static inline unsigned
+popcount_llu_slow(unsigned long long bitmap) {
+ DO_POPCOUNT(bitmap, unsigned long long);
+}
+
+#undef DO_POPCOUNT
+
+static inline unsigned
+popcount_u(unsigned bitmap) {
+#ifdef JEMALLOC_INTERNAL_POPCOUNT
+ return JEMALLOC_INTERNAL_POPCOUNT(bitmap);
+#else
+ return popcount_u_slow(bitmap);
+#endif
+}
+
+static inline unsigned
popcount_lu(unsigned long bitmap) {
- return JEMALLOC_INTERNAL_POPCOUNTL(bitmap);
+#ifdef JEMALLOC_INTERNAL_POPCOUNTL
+ return JEMALLOC_INTERNAL_POPCOUNTL(bitmap);
+#else
+ return popcount_lu_slow(bitmap);
+#endif
}
+
+static inline unsigned
+popcount_llu(unsigned long long bitmap) {
+#ifdef JEMALLOC_INTERNAL_POPCOUNTLL
+ return JEMALLOC_INTERNAL_POPCOUNTLL(bitmap);
+#else
+ return popcount_llu_slow(bitmap);
#endif
+}
/*
* Clears first unset bit in bitmap, and returns
* place of bit. bitmap *must not* be 0.
*/
-BIT_UTIL_INLINE size_t
+static inline size_t
cfs_lu(unsigned long* bitmap) {
- size_t bit = ffs_lu(*bitmap) - 1;
+ util_assume(*bitmap != 0);
+ size_t bit = ffs_lu(*bitmap);
*bitmap ^= ZU(1) << bit;
return bit;
}
-BIT_UTIL_INLINE unsigned
-ffs_zu(size_t bitmap) {
+static inline unsigned
+ffs_zu(size_t x) {
#if LG_SIZEOF_PTR == LG_SIZEOF_INT
- return ffs_u(bitmap);
+ return ffs_u(x);
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
- return ffs_lu(bitmap);
+ return ffs_lu(x);
#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
- return ffs_llu(bitmap);
+ return ffs_llu(x);
#else
#error No implementation for size_t ffs()
#endif
}
-BIT_UTIL_INLINE unsigned
-ffs_u64(uint64_t bitmap) {
+static inline unsigned
+fls_zu(size_t x) {
+#if LG_SIZEOF_PTR == LG_SIZEOF_INT
+ return fls_u(x);
+#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG
+ return fls_lu(x);
+#elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG
+ return fls_llu(x);
+#else
+#error No implementation for size_t fls()
+#endif
+}
+
+
+static inline unsigned
+ffs_u64(uint64_t x) {
#if LG_SIZEOF_LONG == 3
- return ffs_lu(bitmap);
+ return ffs_lu(x);
#elif LG_SIZEOF_LONG_LONG == 3
- return ffs_llu(bitmap);
+ return ffs_llu(x);
#else
#error No implementation for 64-bit ffs()
#endif
}
-BIT_UTIL_INLINE unsigned
-ffs_u32(uint32_t bitmap) {
+static inline unsigned
+fls_u64(uint64_t x) {
+#if LG_SIZEOF_LONG == 3
+ return fls_lu(x);
+#elif LG_SIZEOF_LONG_LONG == 3
+ return fls_llu(x);
+#else
+#error No implementation for 64-bit fls()
+#endif
+}
+
+static inline unsigned
+ffs_u32(uint32_t x) {
#if LG_SIZEOF_INT == 2
- return ffs_u(bitmap);
+ return ffs_u(x);
#else
#error No implementation for 32-bit ffs()
#endif
- return ffs_u(bitmap);
+ return ffs_u(x);
+}
+
+static inline unsigned
+fls_u32(uint32_t x) {
+#if LG_SIZEOF_INT == 2
+ return fls_u(x);
+#else
+#error No implementation for 32-bit fls()
+#endif
+ return fls_u(x);
}
-BIT_UTIL_INLINE uint64_t
+static inline uint64_t
pow2_ceil_u64(uint64_t x) {
-#if (defined(__amd64__) || defined(__x86_64__) || defined(JEMALLOC_HAVE_BUILTIN_CLZ))
- if(unlikely(x <= 1)) {
+ if (unlikely(x <= 1)) {
return x;
}
- size_t msb_on_index;
-#if (defined(__amd64__) || defined(__x86_64__))
- asm ("bsrq %1, %0"
- : "=r"(msb_on_index) // Outputs.
- : "r"(x-1) // Inputs.
- );
-#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
- msb_on_index = (63 ^ __builtin_clzll(x - 1));
-#endif
+ size_t msb_on_index = fls_u64(x - 1);
+ /*
+ * Range-check; it's on the callers to ensure that the result of this
+ * call won't overflow.
+ */
assert(msb_on_index < 63);
return 1ULL << (msb_on_index + 1);
-#else
- x--;
- x |= x >> 1;
- x |= x >> 2;
- x |= x >> 4;
- x |= x >> 8;
- x |= x >> 16;
- x |= x >> 32;
- x++;
- return x;
-#endif
}
-BIT_UTIL_INLINE uint32_t
+static inline uint32_t
pow2_ceil_u32(uint32_t x) {
-#if ((defined(__i386__) || defined(JEMALLOC_HAVE_BUILTIN_CLZ)) && (!defined(__s390__)))
- if(unlikely(x <= 1)) {
- return x;
+ if (unlikely(x <= 1)) {
+ return x;
}
- size_t msb_on_index;
-#if (defined(__i386__))
- asm ("bsr %1, %0"
- : "=r"(msb_on_index) // Outputs.
- : "r"(x-1) // Inputs.
- );
-#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
- msb_on_index = (31 ^ __builtin_clz(x - 1));
-#endif
+ size_t msb_on_index = fls_u32(x - 1);
+ /* As above. */
assert(msb_on_index < 31);
return 1U << (msb_on_index + 1);
-#else
- x--;
- x |= x >> 1;
- x |= x >> 2;
- x |= x >> 4;
- x |= x >> 8;
- x |= x >> 16;
- x++;
- return x;
-#endif
}
/* Compute the smallest power of 2 that is >= x. */
-BIT_UTIL_INLINE size_t
+static inline size_t
pow2_ceil_zu(size_t x) {
#if (LG_SIZEOF_PTR == 3)
return pow2_ceil_u64(x);
@@ -149,77 +388,21 @@ pow2_ceil_zu(size_t x) {
#endif
}
-#if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__))
-BIT_UTIL_INLINE unsigned
-lg_floor(size_t x) {
- size_t ret;
- assert(x != 0);
-
- asm ("bsr %1, %0"
- : "=r"(ret) // Outputs.
- : "r"(x) // Inputs.
- );
- assert(ret < UINT_MAX);
- return (unsigned)ret;
-}
-#elif (defined(_MSC_VER))
-BIT_UTIL_INLINE unsigned
+static inline unsigned
lg_floor(size_t x) {
- unsigned long ret;
-
- assert(x != 0);
-
+ util_assume(x != 0);
#if (LG_SIZEOF_PTR == 3)
- _BitScanReverse64(&ret, x);
-#elif (LG_SIZEOF_PTR == 2)
- _BitScanReverse(&ret, x);
+ return fls_u64(x);
#else
-# error "Unsupported type size for lg_floor()"
+ return fls_u32(x);
#endif
- assert(ret < UINT_MAX);
- return (unsigned)ret;
}
-#elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ))
-BIT_UTIL_INLINE unsigned
-lg_floor(size_t x) {
- assert(x != 0);
-#if (LG_SIZEOF_PTR == LG_SIZEOF_INT)
- return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x);
-#elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG)
- return ((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x);
-#else
-# error "Unsupported type size for lg_floor()"
-#endif
-}
-#else
-BIT_UTIL_INLINE unsigned
-lg_floor(size_t x) {
- assert(x != 0);
-
- x |= (x >> 1);
- x |= (x >> 2);
- x |= (x >> 4);
- x |= (x >> 8);
- x |= (x >> 16);
-#if (LG_SIZEOF_PTR == 3)
- x |= (x >> 32);
-#endif
- if (x == SIZE_T_MAX) {
- return (8 << LG_SIZEOF_PTR) - 1;
- }
- x++;
- return ffs_zu(x) - 2;
-}
-#endif
-
-BIT_UTIL_INLINE unsigned
+static inline unsigned
lg_ceil(size_t x) {
return lg_floor(x) + ((x & (x - 1)) == 0 ? 0 : 1);
}
-#undef BIT_UTIL_INLINE
-
/* A compile-time version of lg_floor and lg_ceil. */
#define LG_FLOOR_1(x) 0
#define LG_FLOOR_2(x) (x < (1ULL << 1) ? LG_FLOOR_1(x) : 1 + LG_FLOOR_1(x >> 1))
diff --git a/contrib/jemalloc/include/jemalloc/internal/bitmap.h b/contrib/jemalloc/include/jemalloc/internal/bitmap.h
index c3f9cb490f67..dc19454d46ee 100644
--- a/contrib/jemalloc/include/jemalloc/internal/bitmap.h
+++ b/contrib/jemalloc/include/jemalloc/internal/bitmap.h
@@ -1,7 +1,6 @@
#ifndef JEMALLOC_INTERNAL_BITMAP_H
#define JEMALLOC_INTERNAL_BITMAP_H
-#include "jemalloc/internal/arena_types.h"
#include "jemalloc/internal/bit_util.h"
#include "jemalloc/internal/sc.h"
@@ -9,9 +8,9 @@ typedef unsigned long bitmap_t;
#define LG_SIZEOF_BITMAP LG_SIZEOF_LONG
/* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */
-#if LG_SLAB_MAXREGS > LG_CEIL(SC_NSIZES)
+#if SC_LG_SLAB_MAXREGS > LG_CEIL(SC_NSIZES)
/* Maximum bitmap bit count is determined by maximum regions per slab. */
-# define LG_BITMAP_MAXBITS LG_SLAB_MAXREGS
+# define LG_BITMAP_MAXBITS SC_LG_SLAB_MAXREGS
#else
/* Maximum bitmap bit count is determined by number of extent size classes. */
# define LG_BITMAP_MAXBITS LG_CEIL(SC_NSIZES)
@@ -273,7 +272,7 @@ bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) {
}
return bitmap_ffu(bitmap, binfo, sib_base);
}
- bit += ((size_t)(ffs_lu(group_masked) - 1)) <<
+ bit += ((size_t)ffs_lu(group_masked)) <<
(lg_bits_per_group - LG_BITMAP_GROUP_NBITS);
}
assert(bit >= min_bit);
@@ -285,9 +284,9 @@ bitmap_ffu(const bitmap_t *bitmap, const bitmap_info_t *binfo, size_t min_bit) {
- 1);
size_t bit;
do {
- bit = ffs_lu(g);
- if (bit != 0) {
- return (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
+ if (g != 0) {
+ bit = ffs_lu(g);
+ return (i << LG_BITMAP_GROUP_NBITS) + bit;
}
i++;
g = bitmap[i];
@@ -308,20 +307,20 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) {
#ifdef BITMAP_USE_TREE
i = binfo->nlevels - 1;
g = bitmap[binfo->levels[i].group_offset];
- bit = ffs_lu(g) - 1;
+ bit = ffs_lu(g);
while (i > 0) {
i--;
g = bitmap[binfo->levels[i].group_offset + bit];
- bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1);
+ bit = (bit << LG_BITMAP_GROUP_NBITS) + ffs_lu(g);
}
#else
i = 0;
g = bitmap[0];
- while ((bit = ffs_lu(g)) == 0) {
+ while (g == 0) {
i++;
g = bitmap[i];
}
- bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
+ bit = (i << LG_BITMAP_GROUP_NBITS) + ffs_lu(g);
#endif
bitmap_set(bitmap, binfo, bit);
return bit;
diff --git a/contrib/jemalloc/include/jemalloc/internal/buf_writer.h b/contrib/jemalloc/include/jemalloc/internal/buf_writer.h
new file mode 100644
index 000000000000..37aa6de5b3c3
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/buf_writer.h
@@ -0,0 +1,32 @@
+#ifndef JEMALLOC_INTERNAL_BUF_WRITER_H
+#define JEMALLOC_INTERNAL_BUF_WRITER_H
+
+/*
+ * Note: when using the buffered writer, cbopaque is passed to write_cb only
+ * when the buffer is flushed. It would make a difference if cbopaque points
+ * to something that's changing for each write_cb call, or something that
+ * affects write_cb in a way dependent on the content of the output string.
+ * However, the most typical usage case in practice is that cbopaque points to
+ * some "option like" content for the write_cb, so it doesn't matter.
+ */
+
+typedef struct {
+ write_cb_t *write_cb;
+ void *cbopaque;
+ char *buf;
+ size_t buf_size;
+ size_t buf_end;
+ bool internal_buf;
+} buf_writer_t;
+
+bool buf_writer_init(tsdn_t *tsdn, buf_writer_t *buf_writer,
+ write_cb_t *write_cb, void *cbopaque, char *buf, size_t buf_len);
+void buf_writer_flush(buf_writer_t *buf_writer);
+write_cb_t buf_writer_cb;
+void buf_writer_terminate(tsdn_t *tsdn, buf_writer_t *buf_writer);
+
+typedef ssize_t (read_cb_t)(void *read_cbopaque, void *buf, size_t limit);
+void buf_writer_pipe(buf_writer_t *buf_writer, read_cb_t *read_cb,
+ void *read_cbopaque);
+
+#endif /* JEMALLOC_INTERNAL_BUF_WRITER_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/cache_bin.h b/contrib/jemalloc/include/jemalloc/internal/cache_bin.h
index d14556a3da8a..caf5be338d6a 100644
--- a/contrib/jemalloc/include/jemalloc/internal/cache_bin.h
+++ b/contrib/jemalloc/include/jemalloc/internal/cache_bin.h
@@ -2,6 +2,7 @@
#define JEMALLOC_INTERNAL_CACHE_BIN_H
#include "jemalloc/internal/ql.h"
+#include "jemalloc/internal/sz.h"
/*
* The cache_bins are the mechanism that the tcache and the arena use to
@@ -13,14 +14,38 @@
* of the tcache at all.
*/
+/*
+ * The size in bytes of each cache bin stack. We also use this to indicate
+ * *counts* of individual objects.
+ */
+typedef uint16_t cache_bin_sz_t;
/*
- * The count of the number of cached allocations in a bin. We make this signed
- * so that negative numbers can encode "invalid" states (e.g. a low water mark
- * of -1 for a cache that has been depleted).
+ * Leave a noticeable mark pattern on the cache bin stack boundaries, in case a
+ * bug starts leaking those. Make it look like the junk pattern but be distinct
+ * from it.
*/
-typedef int32_t cache_bin_sz_t;
+static const uintptr_t cache_bin_preceding_junk =
+ (uintptr_t)0x7a7a7a7a7a7a7a7aULL;
+/* Note: a7 vs. 7a above -- this tells you which pointer leaked. */
+static const uintptr_t cache_bin_trailing_junk =
+ (uintptr_t)0xa7a7a7a7a7a7a7a7ULL;
+/*
+ * That implies the following value, for the maximum number of items in any
+ * individual bin. The cache bins track their bounds looking just at the low
+ * bits of a pointer, compared against a cache_bin_sz_t. So that's
+ * 1 << (sizeof(cache_bin_sz_t) * 8)
+ * bytes spread across pointer sized objects to get the maximum.
+ */
+#define CACHE_BIN_NCACHED_MAX (((size_t)1 << sizeof(cache_bin_sz_t) * 8) \
+ / sizeof(void *) - 1)
+
+/*
+ * This lives inside the cache_bin (for locality reasons), and is initialized
+ * alongside it, but is otherwise not modified by any cache bin operations.
+ * It's logically public and maintained by its callers.
+ */
typedef struct cache_bin_stats_s cache_bin_stats_t;
struct cache_bin_stats_s {
/*
@@ -36,34 +61,75 @@ struct cache_bin_stats_s {
*/
typedef struct cache_bin_info_s cache_bin_info_t;
struct cache_bin_info_s {
- /* Upper limit on ncached. */
cache_bin_sz_t ncached_max;
};
+/*
+ * Responsible for caching allocations associated with a single size.
+ *
+ * Several pointers are used to track the stack. To save on metadata bytes,
+ * only the stack_head is a full sized pointer (which is dereferenced on the
+ * fastpath), while the others store only the low 16 bits -- this is correct
+ * because a single stack never takes more space than 2^16 bytes, and at the
+ * same time only equality checks are performed on the low bits.
+ *
+ * (low addr) (high addr)
+ * |------stashed------|------available------|------cached-----|
+ * ^ ^ ^ ^
+ * low_bound(derived) low_bits_full stack_head low_bits_empty
+ */
typedef struct cache_bin_s cache_bin_t;
struct cache_bin_s {
- /* Min # cached since last GC. */
- cache_bin_sz_t low_water;
- /* # of cached objects. */
- cache_bin_sz_t ncached;
/*
- * ncached and stats are both modified frequently. Let's keep them
+ * The stack grows down. Whenever the bin is nonempty, the head points
+ * to an array entry containing a valid allocation. When it is empty,
+ * the head points to one element past the owned array.
+ */
+ void **stack_head;
+ /*
+ * cur_ptr and stats are both modified frequently. Let's keep them
* close so that they have a higher chance of being on the same
* cacheline, thus less write-backs.
*/
cache_bin_stats_t tstats;
+
/*
- * Stack of available objects.
+ * The low bits of the address of the first item in the stack that
+ * hasn't been used since the last GC, to track the low water mark (min
+ * # of cached items).
*
- * To make use of adjacent cacheline prefetch, the items in the avail
- * stack goes to higher address for newer allocations. avail points
- * just above the available space, which means that
- * avail[-ncached, ... -1] are available items and the lowest item will
- * be allocated first.
+ * Since the stack grows down, this is a higher address than
+ * low_bits_full.
*/
- void **avail;
+ uint16_t low_bits_low_water;
+
+ /*
+ * The low bits of the value that stack_head will take on when the array
+ * is full (of cached & stashed items). But remember that stack_head
+ * always points to a valid item when the array is nonempty -- this is
+ * in the array.
+ *
+ * Recall that since the stack grows down, this is the lowest available
+ * address in the array for caching. Only adjusted when stashing items.
+ */
+ uint16_t low_bits_full;
+
+ /*
+ * The low bits of the value that stack_head will take on when the array
+ * is empty.
+ *
+ * The stack grows down -- this is one past the highest address in the
+ * array. Immutable after initialization.
+ */
+ uint16_t low_bits_empty;
};
+/*
+ * The cache_bins live inside the tcache, but the arena (by design) isn't
+ * supposed to know much about tcache internals. To let the arena iterate over
+ * associated bins, we keep (with the tcache) a linked list of
+ * cache_bin_array_descriptor_ts that tell the arena how to find the bins.
+ */
typedef struct cache_bin_array_descriptor_s cache_bin_array_descriptor_t;
struct cache_bin_array_descriptor_s {
/*
@@ -72,37 +138,214 @@ struct cache_bin_array_descriptor_s {
*/
ql_elm(cache_bin_array_descriptor_t) link;
/* Pointers to the tcache bins. */
- cache_bin_t *bins_small;
- cache_bin_t *bins_large;
+ cache_bin_t *bins;
};
static inline void
cache_bin_array_descriptor_init(cache_bin_array_descriptor_t *descriptor,
- cache_bin_t *bins_small, cache_bin_t *bins_large) {
+ cache_bin_t *bins) {
ql_elm_new(descriptor, link);
- descriptor->bins_small = bins_small;
- descriptor->bins_large = bins_large;
+ descriptor->bins = bins;
}
-JEMALLOC_ALWAYS_INLINE void *
-cache_bin_alloc_easy(cache_bin_t *bin, bool *success) {
- void *ret;
+JEMALLOC_ALWAYS_INLINE bool
+cache_bin_nonfast_aligned(const void *ptr) {
+ if (!config_uaf_detection) {
+ return false;
+ }
+ /*
+ * Currently we use alignment to decide which pointer to junk & stash on
+ * dealloc (for catching use-after-free). In some common cases a
+ * page-aligned check is needed already (sdalloc w/ config_prof), so we
+ * are getting it more or less for free -- no added instructions on
+ * free_fastpath.
+ *
+ * Another way of deciding which pointer to sample, is adding another
+ * thread_event to pick one every N bytes. That also adds no cost on
+ * the fastpath, however it will tend to pick large allocations which is
+ * not the desired behavior.
+ */
+ return ((uintptr_t)ptr & san_cache_bin_nonfast_mask) == 0;
+}
+
+/* Returns ncached_max: Upper limit on ncached. */
+static inline cache_bin_sz_t
+cache_bin_info_ncached_max(cache_bin_info_t *info) {
+ return info->ncached_max;
+}
+
+/*
+ * Internal.
+ *
+ * Asserts that the pointer associated with earlier is <= the one associated
+ * with later.
+ */
+static inline void
+cache_bin_assert_earlier(cache_bin_t *bin, uint16_t earlier, uint16_t later) {
+ if (earlier > later) {
+ assert(bin->low_bits_full > bin->low_bits_empty);
+ }
+}
- bin->ncached--;
+/*
+ * Internal.
+ *
+ * Does difference calculations that handle wraparound correctly. Earlier must
+ * be associated with the position earlier in memory.
+ */
+static inline uint16_t
+cache_bin_diff(cache_bin_t *bin, uint16_t earlier, uint16_t later, bool racy) {
+ /*
+ * When it's racy, bin->low_bits_full can be modified concurrently. It
+ * can cross the uint16_t max value and become less than
+ * bin->low_bits_empty at the time of the check.
+ */
+ if (!racy) {
+ cache_bin_assert_earlier(bin, earlier, later);
+ }
+ return later - earlier;
+}
+/*
+ * Number of items currently cached in the bin, without checking ncached_max.
+ * We require specifying whether or not the request is racy or not (i.e. whether
+ * or not concurrent modifications are possible).
+ */
+static inline cache_bin_sz_t
+cache_bin_ncached_get_internal(cache_bin_t *bin, bool racy) {
+ cache_bin_sz_t diff = cache_bin_diff(bin,
+ (uint16_t)(uintptr_t)bin->stack_head, bin->low_bits_empty, racy);
+ cache_bin_sz_t n = diff / sizeof(void *);
/*
- * Check for both bin->ncached == 0 and ncached < low_water
- * in a single branch.
+ * We have undefined behavior here; if this function is called from the
+ * arena stats updating code, then stack_head could change from the
+ * first line to the next one. Morally, these loads should be atomic,
+ * but compilers won't currently generate comparisons with in-memory
+ * operands against atomics, and these variables get accessed on the
+ * fast paths. This should still be "safe" in the sense of generating
+ * the correct assembly for the foreseeable future, though.
*/
- if (unlikely(bin->ncached <= bin->low_water)) {
- bin->low_water = bin->ncached;
- if (bin->ncached == -1) {
- bin->ncached = 0;
- *success = false;
- return NULL;
- }
+ assert(n == 0 || *(bin->stack_head) != NULL || racy);
+ return n;
+}
+
+/*
+ * Number of items currently cached in the bin, with checking ncached_max. The
+ * caller must know that no concurrent modification of the cache_bin is
+ * possible.
+ */
+static inline cache_bin_sz_t
+cache_bin_ncached_get_local(cache_bin_t *bin, cache_bin_info_t *info) {
+ cache_bin_sz_t n = cache_bin_ncached_get_internal(bin,
+ /* racy */ false);
+ assert(n <= cache_bin_info_ncached_max(info));
+ return n;
+}
+
+/*
+ * Internal.
+ *
+ * A pointer to the position one past the end of the backing array.
+ *
+ * Do not call if racy, because both 'bin->stack_head' and 'bin->low_bits_full'
+ * are subject to concurrent modifications.
+ */
+static inline void **
+cache_bin_empty_position_get(cache_bin_t *bin) {
+ cache_bin_sz_t diff = cache_bin_diff(bin,
+ (uint16_t)(uintptr_t)bin->stack_head, bin->low_bits_empty,
+ /* racy */ false);
+ uintptr_t empty_bits = (uintptr_t)bin->stack_head + diff;
+ void **ret = (void **)empty_bits;
+
+ assert(ret >= bin->stack_head);
+
+ return ret;
+}
+
+/*
+ * Internal.
+ *
+ * Calculates low bits of the lower bound of the usable cache bin's range (see
+ * cache_bin_t visual representation above).
+ *
+ * No values are concurrently modified, so should be safe to read in a
+ * multithreaded environment. Currently concurrent access happens only during
+ * arena statistics collection.
+ */
+static inline uint16_t
+cache_bin_low_bits_low_bound_get(cache_bin_t *bin, cache_bin_info_t *info) {
+ return (uint16_t)bin->low_bits_empty -
+ info->ncached_max * sizeof(void *);
+}
+
+/*
+ * Internal.
+ *
+ * A pointer to the position with the lowest address of the backing array.
+ */
+static inline void **
+cache_bin_low_bound_get(cache_bin_t *bin, cache_bin_info_t *info) {
+ cache_bin_sz_t ncached_max = cache_bin_info_ncached_max(info);
+ void **ret = cache_bin_empty_position_get(bin) - ncached_max;
+ assert(ret <= bin->stack_head);
+
+ return ret;
+}
+
+/*
+ * As the name implies. This is important since it's not correct to try to
+ * batch fill a nonempty cache bin.
+ */
+static inline void
+cache_bin_assert_empty(cache_bin_t *bin, cache_bin_info_t *info) {
+ assert(cache_bin_ncached_get_local(bin, info) == 0);
+ assert(cache_bin_empty_position_get(bin) == bin->stack_head);
+}
+
+/*
+ * Get low water, but without any of the correctness checking we do for the
+ * caller-usable version, if we are temporarily breaking invariants (like
+ * ncached >= low_water during flush).
+ */
+static inline cache_bin_sz_t
+cache_bin_low_water_get_internal(cache_bin_t *bin) {
+ return cache_bin_diff(bin, bin->low_bits_low_water,
+ bin->low_bits_empty, /* racy */ false) / sizeof(void *);
+}
+
+/* Returns the numeric value of low water in [0, ncached]. */
+static inline cache_bin_sz_t
+cache_bin_low_water_get(cache_bin_t *bin, cache_bin_info_t *info) {
+ cache_bin_sz_t low_water = cache_bin_low_water_get_internal(bin);
+ assert(low_water <= cache_bin_info_ncached_max(info));
+ assert(low_water <= cache_bin_ncached_get_local(bin, info));
+
+ cache_bin_assert_earlier(bin, (uint16_t)(uintptr_t)bin->stack_head,
+ bin->low_bits_low_water);
+
+ return low_water;
+}
+
+/*
+ * Indicates that the current cache bin position should be the low water mark
+ * going forward.
+ */
+static inline void
+cache_bin_low_water_set(cache_bin_t *bin) {
+ bin->low_bits_low_water = (uint16_t)(uintptr_t)bin->stack_head;
+}
+
+static inline void
+cache_bin_low_water_adjust(cache_bin_t *bin) {
+ if (cache_bin_ncached_get_internal(bin, /* racy */ false)
+ < cache_bin_low_water_get_internal(bin)) {
+ cache_bin_low_water_set(bin);
}
+}
+JEMALLOC_ALWAYS_INLINE void *
+cache_bin_alloc_impl(cache_bin_t *bin, bool *success, bool adjust_low_water) {
/*
* success (instead of ret) should be checked upon the return of this
* function. We avoid checking (ret == NULL) because there is never a
@@ -110,22 +353,318 @@ cache_bin_alloc_easy(cache_bin_t *bin, bool *success) {
* and eagerly checking ret would cause pipeline stall (waiting for the
* cacheline).
*/
- *success = true;
- ret = *(bin->avail - (bin->ncached + 1));
- return ret;
+ /*
+ * This may read from the empty position; however the loaded value won't
+ * be used. It's safe because the stack has one more slot reserved.
+ */
+ void *ret = *bin->stack_head;
+ uint16_t low_bits = (uint16_t)(uintptr_t)bin->stack_head;
+ void **new_head = bin->stack_head + 1;
+
+ /*
+ * Note that the low water mark is at most empty; if we pass this check,
+ * we know we're non-empty.
+ */
+ if (likely(low_bits != bin->low_bits_low_water)) {
+ bin->stack_head = new_head;
+ *success = true;
+ return ret;
+ }
+ if (!adjust_low_water) {
+ *success = false;
+ return NULL;
+ }
+ /*
+ * In the fast-path case where we call alloc_easy and then alloc, the
+ * previous checking and computation is optimized away -- we didn't
+ * actually commit any of our operations.
+ */
+ if (likely(low_bits != bin->low_bits_empty)) {
+ bin->stack_head = new_head;
+ bin->low_bits_low_water = (uint16_t)(uintptr_t)new_head;
+ *success = true;
+ return ret;
+ }
+ *success = false;
+ return NULL;
+}
+
+/*
+ * Allocate an item out of the bin, failing if we're at the low-water mark.
+ */
+JEMALLOC_ALWAYS_INLINE void *
+cache_bin_alloc_easy(cache_bin_t *bin, bool *success) {
+ /* We don't look at info if we're not adjusting low-water. */
+ return cache_bin_alloc_impl(bin, success, false);
+}
+
+/*
+ * Allocate an item out of the bin, even if we're currently at the low-water
+ * mark (and failing only if the bin is empty).
+ */
+JEMALLOC_ALWAYS_INLINE void *
+cache_bin_alloc(cache_bin_t *bin, bool *success) {
+ return cache_bin_alloc_impl(bin, success, true);
+}
+
+JEMALLOC_ALWAYS_INLINE cache_bin_sz_t
+cache_bin_alloc_batch(cache_bin_t *bin, size_t num, void **out) {
+ cache_bin_sz_t n = cache_bin_ncached_get_internal(bin,
+ /* racy */ false);
+ if (n > num) {
+ n = (cache_bin_sz_t)num;
+ }
+ memcpy(out, bin->stack_head, n * sizeof(void *));
+ bin->stack_head += n;
+ cache_bin_low_water_adjust(bin);
+
+ return n;
}
JEMALLOC_ALWAYS_INLINE bool
-cache_bin_dalloc_easy(cache_bin_t *bin, cache_bin_info_t *bin_info, void *ptr) {
- if (unlikely(bin->ncached == bin_info->ncached_max)) {
+cache_bin_full(cache_bin_t *bin) {
+ return ((uint16_t)(uintptr_t)bin->stack_head == bin->low_bits_full);
+}
+
+/*
+ * Free an object into the given bin. Fails only if the bin is full.
+ */
+JEMALLOC_ALWAYS_INLINE bool
+cache_bin_dalloc_easy(cache_bin_t *bin, void *ptr) {
+ if (unlikely(cache_bin_full(bin))) {
return false;
}
- assert(bin->ncached < bin_info->ncached_max);
- bin->ncached++;
- *(bin->avail - bin->ncached) = ptr;
+
+ bin->stack_head--;
+ *bin->stack_head = ptr;
+ cache_bin_assert_earlier(bin, bin->low_bits_full,
+ (uint16_t)(uintptr_t)bin->stack_head);
return true;
}
+/* Returns false if failed to stash (i.e. bin is full). */
+JEMALLOC_ALWAYS_INLINE bool
+cache_bin_stash(cache_bin_t *bin, void *ptr) {
+ if (cache_bin_full(bin)) {
+ return false;
+ }
+
+ /* Stash at the full position, in the [full, head) range. */
+ uint16_t low_bits_head = (uint16_t)(uintptr_t)bin->stack_head;
+ /* Wraparound handled as well. */
+ uint16_t diff = cache_bin_diff(bin, bin->low_bits_full, low_bits_head,
+ /* racy */ false);
+ *(void **)((uintptr_t)bin->stack_head - diff) = ptr;
+
+ assert(!cache_bin_full(bin));
+ bin->low_bits_full += sizeof(void *);
+ cache_bin_assert_earlier(bin, bin->low_bits_full, low_bits_head);
+
+ return true;
+}
+
+/*
+ * Get the number of stashed pointers.
+ *
+ * When called from a thread not owning the TLS (i.e. racy = true), it's
+ * important to keep in mind that 'bin->stack_head' and 'bin->low_bits_full' can
+ * be modified concurrently and almost none assertions about their values can be
+ * made.
+ */
+JEMALLOC_ALWAYS_INLINE cache_bin_sz_t
+cache_bin_nstashed_get_internal(cache_bin_t *bin, cache_bin_info_t *info,
+ bool racy) {
+ cache_bin_sz_t ncached_max = cache_bin_info_ncached_max(info);
+ uint16_t low_bits_low_bound = cache_bin_low_bits_low_bound_get(bin,
+ info);
+
+ cache_bin_sz_t n = cache_bin_diff(bin, low_bits_low_bound,
+ bin->low_bits_full, racy) / sizeof(void *);
+ assert(n <= ncached_max);
+
+ if (!racy) {
+ /* Below are for assertions only. */
+ void **low_bound = cache_bin_low_bound_get(bin, info);
+
+ assert((uint16_t)(uintptr_t)low_bound == low_bits_low_bound);
+ void *stashed = *(low_bound + n - 1);
+ bool aligned = cache_bin_nonfast_aligned(stashed);
+#ifdef JEMALLOC_JET
+ /* Allow arbitrary pointers to be stashed in tests. */
+ aligned = true;
+#endif
+ assert(n == 0 || (stashed != NULL && aligned));
+ }
+
+ return n;
+}
+
+JEMALLOC_ALWAYS_INLINE cache_bin_sz_t
+cache_bin_nstashed_get_local(cache_bin_t *bin, cache_bin_info_t *info) {
+ cache_bin_sz_t n = cache_bin_nstashed_get_internal(bin, info,
+ /* racy */ false);
+ assert(n <= cache_bin_info_ncached_max(info));
+ return n;
+}
+
+/*
+ * Obtain a racy view of the number of items currently in the cache bin, in the
+ * presence of possible concurrent modifications.
+ */
+static inline void
+cache_bin_nitems_get_remote(cache_bin_t *bin, cache_bin_info_t *info,
+ cache_bin_sz_t *ncached, cache_bin_sz_t *nstashed) {
+ cache_bin_sz_t n = cache_bin_ncached_get_internal(bin, /* racy */ true);
+ assert(n <= cache_bin_info_ncached_max(info));
+ *ncached = n;
+
+ n = cache_bin_nstashed_get_internal(bin, info, /* racy */ true);
+ assert(n <= cache_bin_info_ncached_max(info));
+ *nstashed = n;
+ /* Note that cannot assert ncached + nstashed <= ncached_max (racy). */
+}
+
+/*
+ * Filling and flushing are done in batch, on arrays of void *s. For filling,
+ * the arrays go forward, and can be accessed with ordinary array arithmetic.
+ * For flushing, we work from the end backwards, and so need to use special
+ * accessors that invert the usual ordering.
+ *
+ * This is important for maintaining first-fit; the arena code fills with
+ * earliest objects first, and so those are the ones we should return first for
+ * cache_bin_alloc calls. When flushing, we should flush the objects that we
+ * wish to return later; those at the end of the array. This is better for the
+ * first-fit heuristic as well as for cache locality; the most recently freed
+ * objects are the ones most likely to still be in cache.
+ *
+ * This all sounds very hand-wavey and theoretical, but reverting the ordering
+ * on one or the other pathway leads to measurable slowdowns.
+ */
+
+typedef struct cache_bin_ptr_array_s cache_bin_ptr_array_t;
+struct cache_bin_ptr_array_s {
+ cache_bin_sz_t n;
+ void **ptr;
+};
+
+/*
+ * Declare a cache_bin_ptr_array_t sufficient for nval items.
+ *
+ * In the current implementation, this could be just part of a
+ * cache_bin_ptr_array_init_... call, since we reuse the cache bin stack memory.
+ * Indirecting behind a macro, though, means experimenting with linked-list
+ * representations is easy (since they'll require an alloca in the calling
+ * frame).
+ */
+#define CACHE_BIN_PTR_ARRAY_DECLARE(name, nval) \
+ cache_bin_ptr_array_t name; \
+ name.n = (nval)
+
+/*
+ * Start a fill. The bin must be empty, and This must be followed by a
+ * finish_fill call before doing any alloc/dalloc operations on the bin.
+ */
+static inline void
+cache_bin_init_ptr_array_for_fill(cache_bin_t *bin, cache_bin_info_t *info,
+ cache_bin_ptr_array_t *arr, cache_bin_sz_t nfill) {
+ cache_bin_assert_empty(bin, info);
+ arr->ptr = cache_bin_empty_position_get(bin) - nfill;
+}
+
+/*
+ * While nfill in cache_bin_init_ptr_array_for_fill is the number we *intend* to
+ * fill, nfilled here is the number we actually filled (which may be less, in
+ * case of OOM.
+ */
+static inline void
+cache_bin_finish_fill(cache_bin_t *bin, cache_bin_info_t *info,
+ cache_bin_ptr_array_t *arr, cache_bin_sz_t nfilled) {
+ cache_bin_assert_empty(bin, info);
+ void **empty_position = cache_bin_empty_position_get(bin);
+ if (nfilled < arr->n) {
+ memmove(empty_position - nfilled, empty_position - arr->n,
+ nfilled * sizeof(void *));
+ }
+ bin->stack_head = empty_position - nfilled;
+}
+
+/*
+ * Same deal, but with flush. Unlike fill (which can fail), the user must flush
+ * everything we give them.
+ */
+static inline void
+cache_bin_init_ptr_array_for_flush(cache_bin_t *bin, cache_bin_info_t *info,
+ cache_bin_ptr_array_t *arr, cache_bin_sz_t nflush) {
+ arr->ptr = cache_bin_empty_position_get(bin) - nflush;
+ assert(cache_bin_ncached_get_local(bin, info) == 0
+ || *arr->ptr != NULL);
+}
+
+static inline void
+cache_bin_finish_flush(cache_bin_t *bin, cache_bin_info_t *info,
+ cache_bin_ptr_array_t *arr, cache_bin_sz_t nflushed) {
+ unsigned rem = cache_bin_ncached_get_local(bin, info) - nflushed;
+ memmove(bin->stack_head + nflushed, bin->stack_head,
+ rem * sizeof(void *));
+ bin->stack_head = bin->stack_head + nflushed;
+ cache_bin_low_water_adjust(bin);
+}
+
+static inline void
+cache_bin_init_ptr_array_for_stashed(cache_bin_t *bin, szind_t binind,
+ cache_bin_info_t *info, cache_bin_ptr_array_t *arr,
+ cache_bin_sz_t nstashed) {
+ assert(nstashed > 0);
+ assert(cache_bin_nstashed_get_local(bin, info) == nstashed);
+
+ void **low_bound = cache_bin_low_bound_get(bin, info);
+ arr->ptr = low_bound;
+ assert(*arr->ptr != NULL);
+}
+
+static inline void
+cache_bin_finish_flush_stashed(cache_bin_t *bin, cache_bin_info_t *info) {
+ void **low_bound = cache_bin_low_bound_get(bin, info);
+
+ /* Reset the bin local full position. */
+ bin->low_bits_full = (uint16_t)(uintptr_t)low_bound;
+ assert(cache_bin_nstashed_get_local(bin, info) == 0);
+}
+
+/*
+ * Initialize a cache_bin_info to represent up to the given number of items in
+ * the cache_bins it is associated with.
+ */
+void cache_bin_info_init(cache_bin_info_t *bin_info,
+ cache_bin_sz_t ncached_max);
+/*
+ * Given an array of initialized cache_bin_info_ts, determine how big an
+ * allocation is required to initialize a full set of cache_bin_ts.
+ */
+void cache_bin_info_compute_alloc(cache_bin_info_t *infos, szind_t ninfos,
+ size_t *size, size_t *alignment);
+
+/*
+ * Actually initialize some cache bins. Callers should allocate the backing
+ * memory indicated by a call to cache_bin_compute_alloc. They should then
+ * preincrement, call init once for each bin and info, and then call
+ * cache_bin_postincrement. *alloc_cur will then point immediately past the end
+ * of the allocation.
+ */
+void cache_bin_preincrement(cache_bin_info_t *infos, szind_t ninfos,
+ void *alloc, size_t *cur_offset);
+void cache_bin_postincrement(cache_bin_info_t *infos, szind_t ninfos,
+ void *alloc, size_t *cur_offset);
+void cache_bin_init(cache_bin_t *bin, cache_bin_info_t *info, void *alloc,
+ size_t *cur_offset);
+
+/*
+ * If a cache bin was zero initialized (either because it lives in static or
+ * thread-local storage, or was memset to 0), this function indicates whether or
+ * not cache_bin_init was called on it.
+ */
+bool cache_bin_still_zero_initialized(cache_bin_t *bin);
+
#endif /* JEMALLOC_INTERNAL_CACHE_BIN_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/counter.h b/contrib/jemalloc/include/jemalloc/internal/counter.h
new file mode 100644
index 000000000000..79abf0648b87
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/counter.h
@@ -0,0 +1,34 @@
+#ifndef JEMALLOC_INTERNAL_COUNTER_H
+#define JEMALLOC_INTERNAL_COUNTER_H
+
+#include "jemalloc/internal/mutex.h"
+
+typedef struct counter_accum_s {
+ LOCKEDINT_MTX_DECLARE(mtx)
+ locked_u64_t accumbytes;
+ uint64_t interval;
+} counter_accum_t;
+
+JEMALLOC_ALWAYS_INLINE bool
+counter_accum(tsdn_t *tsdn, counter_accum_t *counter, uint64_t bytes) {
+ uint64_t interval = counter->interval;
+ assert(interval > 0);
+ LOCKEDINT_MTX_LOCK(tsdn, counter->mtx);
+ /*
+ * If the event moves fast enough (and/or if the event handling is slow
+ * enough), extreme overflow can cause counter trigger coalescing.
+ * This is an intentional mechanism that avoids rate-limiting
+ * allocation.
+ */
+ bool overflow = locked_inc_mod_u64(tsdn, LOCKEDINT_MTX(counter->mtx),
+ &counter->accumbytes, bytes, interval);
+ LOCKEDINT_MTX_UNLOCK(tsdn, counter->mtx);
+ return overflow;
+}
+
+bool counter_accum_init(counter_accum_t *counter, uint64_t interval);
+void counter_prefork(tsdn_t *tsdn, counter_accum_t *counter);
+void counter_postfork_parent(tsdn_t *tsdn, counter_accum_t *counter);
+void counter_postfork_child(tsdn_t *tsdn, counter_accum_t *counter);
+
+#endif /* JEMALLOC_INTERNAL_COUNTER_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/ctl.h b/contrib/jemalloc/include/jemalloc/internal/ctl.h
index 1d1aacc6f417..63d27f8aad37 100644
--- a/contrib/jemalloc/include/jemalloc/internal/ctl.h
+++ b/contrib/jemalloc/include/jemalloc/internal/ctl.h
@@ -42,9 +42,11 @@ typedef struct ctl_arena_stats_s {
uint64_t nfills_small;
uint64_t nflushes_small;
- bin_stats_t bstats[SC_NBINS];
+ bin_stats_data_t bstats[SC_NBINS];
arena_stats_large_t lstats[SC_NSIZES - SC_NBINS];
- arena_stats_extents_t estats[SC_NPSIZES];
+ pac_estats_t estats[SC_NPSIZES];
+ hpa_shard_stats_t hpastats;
+ sec_stats_t secstats;
} ctl_arena_stats_t;
typedef struct ctl_stats_s {
@@ -96,13 +98,17 @@ typedef struct ctl_arenas_s {
int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
void *newp, size_t newlen);
int ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp);
-
int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen);
+int ctl_mibnametomib(tsd_t *tsd, size_t *mib, size_t miblen, const char *name,
+ size_t *miblenp);
+int ctl_bymibname(tsd_t *tsd, size_t *mib, size_t miblen, const char *name,
+ size_t *miblenp, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
bool ctl_boot(void);
void ctl_prefork(tsdn_t *tsdn);
void ctl_postfork_parent(tsdn_t *tsdn);
void ctl_postfork_child(tsdn_t *tsdn);
+void ctl_mtx_assert_held(tsdn_t *tsdn);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
@@ -131,4 +137,23 @@ void ctl_postfork_child(tsdn_t *tsdn);
} \
} while (0)
+#define xmallctlmibnametomib(mib, miblen, name, miblenp) do { \
+ if (ctl_mibnametomib(tsd_fetch(), mib, miblen, name, miblenp) \
+ != 0) { \
+ malloc_write( \
+ "<jemalloc>: Failure in ctl_mibnametomib()\n"); \
+ abort(); \
+ } \
+} while (0)
+
+#define xmallctlbymibname(mib, miblen, name, miblenp, oldp, oldlenp, \
+ newp, newlen) do { \
+ if (ctl_bymibname(tsd_fetch(), mib, miblen, name, miblenp, \
+ oldp, oldlenp, newp, newlen) != 0) { \
+ malloc_write( \
+ "<jemalloc>: Failure in ctl_bymibname()\n"); \
+ abort(); \
+ } \
+} while (0)
+
#endif /* JEMALLOC_INTERNAL_CTL_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/decay.h b/contrib/jemalloc/include/jemalloc/internal/decay.h
new file mode 100644
index 000000000000..cf6a9d22c010
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/decay.h
@@ -0,0 +1,186 @@
+#ifndef JEMALLOC_INTERNAL_DECAY_H
+#define JEMALLOC_INTERNAL_DECAY_H
+
+#include "jemalloc/internal/smoothstep.h"
+
+#define DECAY_UNBOUNDED_TIME_TO_PURGE ((uint64_t)-1)
+
+/*
+ * The decay_t computes the number of pages we should purge at any given time.
+ * Page allocators inform a decay object when pages enter a decay-able state
+ * (i.e. dirty or muzzy), and query it to determine how many pages should be
+ * purged at any given time.
+ *
+ * This is mostly a single-threaded data structure and doesn't care about
+ * synchronization at all; it's the caller's responsibility to manage their
+ * synchronization on their own. There are two exceptions:
+ * 1) It's OK to racily call decay_ms_read (i.e. just the simplest state query).
+ * 2) The mtx and purging fields live (and are initialized) here, but are
+ * logically owned by the page allocator. This is just a convenience (since
+ * those fields would be duplicated for both the dirty and muzzy states
+ * otherwise).
+ */
+typedef struct decay_s decay_t;
+struct decay_s {
+ /* Synchronizes all non-atomic fields. */
+ malloc_mutex_t mtx;
+ /*
+ * True if a thread is currently purging the extents associated with
+ * this decay structure.
+ */
+ bool purging;
+ /*
+ * Approximate time in milliseconds from the creation of a set of unused
+ * dirty pages until an equivalent set of unused dirty pages is purged
+ * and/or reused.
+ */
+ atomic_zd_t time_ms;
+ /* time / SMOOTHSTEP_NSTEPS. */
+ nstime_t interval;
+ /*
+ * Time at which the current decay interval logically started. We do
+ * not actually advance to a new epoch until sometime after it starts
+ * because of scheduling and computation delays, and it is even possible
+ * to completely skip epochs. In all cases, during epoch advancement we
+ * merge all relevant activity into the most recently recorded epoch.
+ */
+ nstime_t epoch;
+ /* Deadline randomness generator. */
+ uint64_t jitter_state;
+ /*
+ * Deadline for current epoch. This is the sum of interval and per
+ * epoch jitter which is a uniform random variable in [0..interval).
+ * Epochs always advance by precise multiples of interval, but we
+ * randomize the deadline to reduce the likelihood of arenas purging in
+ * lockstep.
+ */
+ nstime_t deadline;
+ /*
+ * The number of pages we cap ourselves at in the current epoch, per
+ * decay policies. Updated on an epoch change. After an epoch change,
+ * the caller should take steps to try to purge down to this amount.
+ */
+ size_t npages_limit;
+ /*
+ * Number of unpurged pages at beginning of current epoch. During epoch
+ * advancement we use the delta between arena->decay_*.nunpurged and
+ * ecache_npages_get(&arena->ecache_*) to determine how many dirty pages,
+ * if any, were generated.
+ */
+ size_t nunpurged;
+ /*
+ * Trailing log of how many unused dirty pages were generated during
+ * each of the past SMOOTHSTEP_NSTEPS decay epochs, where the last
+ * element is the most recent epoch. Corresponding epoch times are
+ * relative to epoch.
+ *
+ * Updated only on epoch advance, triggered by
+ * decay_maybe_advance_epoch, below.
+ */
+ size_t backlog[SMOOTHSTEP_NSTEPS];
+
+ /* Peak number of pages in associated extents. Used for debug only. */
+ uint64_t ceil_npages;
+};
+
+/*
+ * The current decay time setting. This is the only public access to a decay_t
+ * that's allowed without holding mtx.
+ */
+static inline ssize_t
+decay_ms_read(const decay_t *decay) {
+ return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
+}
+
+/*
+ * See the comment on the struct field -- the limit on pages we should allow in
+ * this decay state this epoch.
+ */
+static inline size_t
+decay_npages_limit_get(const decay_t *decay) {
+ return decay->npages_limit;
+}
+
+/* How many unused dirty pages were generated during the last epoch. */
+static inline size_t
+decay_epoch_npages_delta(const decay_t *decay) {
+ return decay->backlog[SMOOTHSTEP_NSTEPS - 1];
+}
+
+/*
+ * Current epoch duration, in nanoseconds. Given that new epochs are started
+ * somewhat haphazardly, this is not necessarily exactly the time between any
+ * two calls to decay_maybe_advance_epoch; see the comments on fields in the
+ * decay_t.
+ */
+static inline uint64_t
+decay_epoch_duration_ns(const decay_t *decay) {
+ return nstime_ns(&decay->interval);
+}
+
+static inline bool
+decay_immediately(const decay_t *decay) {
+ ssize_t decay_ms = decay_ms_read(decay);
+ return decay_ms == 0;
+}
+
+static inline bool
+decay_disabled(const decay_t *decay) {
+ ssize_t decay_ms = decay_ms_read(decay);
+ return decay_ms < 0;
+}
+
+/* Returns true if decay is enabled and done gradually. */
+static inline bool
+decay_gradually(const decay_t *decay) {
+ ssize_t decay_ms = decay_ms_read(decay);
+ return decay_ms > 0;
+}
+
+/*
+ * Returns true if the passed in decay time setting is valid.
+ * < -1 : invalid
+ * -1 : never decay
+ * 0 : decay immediately
+ * > 0 : some positive decay time, up to a maximum allowed value of
+ * NSTIME_SEC_MAX * 1000, which corresponds to decaying somewhere in the early
+ * 27th century. By that time, we expect to have implemented alternate purging
+ * strategies.
+ */
+bool decay_ms_valid(ssize_t decay_ms);
+
+/*
+ * As a precondition, the decay_t must be zeroed out (as if with memset).
+ *
+ * Returns true on error.
+ */
+bool decay_init(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms);
+
+/*
+ * Given an already-initialized decay_t, reinitialize it with the given decay
+ * time. The decay_t must have previously been initialized (and should not then
+ * be zeroed).
+ */
+void decay_reinit(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms);
+
+/*
+ * Compute how many of 'npages_new' pages we would need to purge in 'time'.
+ */
+uint64_t decay_npages_purge_in(decay_t *decay, nstime_t *time,
+ size_t npages_new);
+
+/* Returns true if the epoch advanced and there are pages to purge. */
+bool decay_maybe_advance_epoch(decay_t *decay, nstime_t *new_time,
+ size_t current_npages);
+
+/*
+ * Calculates wait time until a number of pages in the interval
+ * [0.5 * npages_threshold .. 1.5 * npages_threshold] should be purged.
+ *
+ * Returns number of nanoseconds or DECAY_UNBOUNDED_TIME_TO_PURGE in case of
+ * indefinite wait.
+ */
+uint64_t decay_ns_until_purge(decay_t *decay, size_t npages_current,
+ uint64_t npages_threshold);
+
+#endif /* JEMALLOC_INTERNAL_DECAY_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/ecache.h b/contrib/jemalloc/include/jemalloc/internal/ecache.h
new file mode 100644
index 000000000000..71cae3e34c38
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/ecache.h
@@ -0,0 +1,55 @@
+#ifndef JEMALLOC_INTERNAL_ECACHE_H
+#define JEMALLOC_INTERNAL_ECACHE_H
+
+#include "jemalloc/internal/eset.h"
+#include "jemalloc/internal/san.h"
+#include "jemalloc/internal/mutex.h"
+
+typedef struct ecache_s ecache_t;
+struct ecache_s {
+ malloc_mutex_t mtx;
+ eset_t eset;
+ eset_t guarded_eset;
+ /* All stored extents must be in the same state. */
+ extent_state_t state;
+ /* The index of the ehooks the ecache is associated with. */
+ unsigned ind;
+ /*
+ * If true, delay coalescing until eviction; otherwise coalesce during
+ * deallocation.
+ */
+ bool delay_coalesce;
+};
+
+static inline size_t
+ecache_npages_get(ecache_t *ecache) {
+ return eset_npages_get(&ecache->eset) +
+ eset_npages_get(&ecache->guarded_eset);
+}
+
+/* Get the number of extents in the given page size index. */
+static inline size_t
+ecache_nextents_get(ecache_t *ecache, pszind_t ind) {
+ return eset_nextents_get(&ecache->eset, ind) +
+ eset_nextents_get(&ecache->guarded_eset, ind);
+}
+
+/* Get the sum total bytes of the extents in the given page size index. */
+static inline size_t
+ecache_nbytes_get(ecache_t *ecache, pszind_t ind) {
+ return eset_nbytes_get(&ecache->eset, ind) +
+ eset_nbytes_get(&ecache->guarded_eset, ind);
+}
+
+static inline unsigned
+ecache_ind_get(ecache_t *ecache) {
+ return ecache->ind;
+}
+
+bool ecache_init(tsdn_t *tsdn, ecache_t *ecache, extent_state_t state,
+ unsigned ind, bool delay_coalesce);
+void ecache_prefork(tsdn_t *tsdn, ecache_t *ecache);
+void ecache_postfork_parent(tsdn_t *tsdn, ecache_t *ecache);
+void ecache_postfork_child(tsdn_t *tsdn, ecache_t *ecache);
+
+#endif /* JEMALLOC_INTERNAL_ECACHE_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/edata.h b/contrib/jemalloc/include/jemalloc/internal/edata.h
new file mode 100644
index 000000000000..af039ea734af
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/edata.h
@@ -0,0 +1,698 @@
+#ifndef JEMALLOC_INTERNAL_EDATA_H
+#define JEMALLOC_INTERNAL_EDATA_H
+
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/bin_info.h"
+#include "jemalloc/internal/bit_util.h"
+#include "jemalloc/internal/hpdata.h"
+#include "jemalloc/internal/nstime.h"
+#include "jemalloc/internal/ph.h"
+#include "jemalloc/internal/ql.h"
+#include "jemalloc/internal/sc.h"
+#include "jemalloc/internal/slab_data.h"
+#include "jemalloc/internal/sz.h"
+#include "jemalloc/internal/typed_list.h"
+
+/*
+ * sizeof(edata_t) is 128 bytes on 64-bit architectures. Ensure the alignment
+ * to free up the low bits in the rtree leaf.
+ */
+#define EDATA_ALIGNMENT 128
+
+enum extent_state_e {
+ extent_state_active = 0,
+ extent_state_dirty = 1,
+ extent_state_muzzy = 2,
+ extent_state_retained = 3,
+ extent_state_transition = 4, /* States below are intermediate. */
+ extent_state_merging = 5,
+ extent_state_max = 5 /* Sanity checking only. */
+};
+typedef enum extent_state_e extent_state_t;
+
+enum extent_head_state_e {
+ EXTENT_NOT_HEAD,
+ EXTENT_IS_HEAD /* See comments in ehooks_default_merge_impl(). */
+};
+typedef enum extent_head_state_e extent_head_state_t;
+
+/*
+ * Which implementation of the page allocator interface, (PAI, defined in
+ * pai.h) owns the given extent?
+ */
+enum extent_pai_e {
+ EXTENT_PAI_PAC = 0,
+ EXTENT_PAI_HPA = 1
+};
+typedef enum extent_pai_e extent_pai_t;
+
+struct e_prof_info_s {
+ /* Time when this was allocated. */
+ nstime_t e_prof_alloc_time;
+ /* Allocation request size. */
+ size_t e_prof_alloc_size;
+ /* Points to a prof_tctx_t. */
+ atomic_p_t e_prof_tctx;
+ /*
+ * Points to a prof_recent_t for the allocation; NULL
+ * means the recent allocation record no longer exists.
+ * Protected by prof_recent_alloc_mtx.
+ */
+ atomic_p_t e_prof_recent_alloc;
+};
+typedef struct e_prof_info_s e_prof_info_t;
+
+/*
+ * The information about a particular edata that lives in an emap. Space is
+ * more precious there (the information, plus the edata pointer, has to live in
+ * a 64-bit word if we want to enable a packed representation.
+ *
+ * There are two things that are special about the information here:
+ * - It's quicker to access. You have one fewer pointer hop, since finding the
+ * edata_t associated with an item always requires accessing the rtree leaf in
+ * which this data is stored.
+ * - It can be read unsynchronized, and without worrying about lifetime issues.
+ */
+typedef struct edata_map_info_s edata_map_info_t;
+struct edata_map_info_s {
+ bool slab;
+ szind_t szind;
+};
+
+typedef struct edata_cmp_summary_s edata_cmp_summary_t;
+struct edata_cmp_summary_s {
+ uint64_t sn;
+ uintptr_t addr;
+};
+
+/* Extent (span of pages). Use accessor functions for e_* fields. */
+typedef struct edata_s edata_t;
+ph_structs(edata_avail, edata_t);
+ph_structs(edata_heap, edata_t);
+struct edata_s {
+ /*
+ * Bitfield containing several fields:
+ *
+ * a: arena_ind
+ * b: slab
+ * c: committed
+ * p: pai
+ * z: zeroed
+ * g: guarded
+ * t: state
+ * i: szind
+ * f: nfree
+ * s: bin_shard
+ *
+ * 00000000 ... 0000ssss ssffffff ffffiiii iiiitttg zpcbaaaa aaaaaaaa
+ *
+ * arena_ind: Arena from which this extent came, or all 1 bits if
+ * unassociated.
+ *
+ * slab: The slab flag indicates whether the extent is used for a slab
+ * of small regions. This helps differentiate small size classes,
+ * and it indicates whether interior pointers can be looked up via
+ * iealloc().
+ *
+ * committed: The committed flag indicates whether physical memory is
+ * committed to the extent, whether explicitly or implicitly
+ * as on a system that overcommits and satisfies physical
+ * memory needs on demand via soft page faults.
+ *
+ * pai: The pai flag is an extent_pai_t.
+ *
+ * zeroed: The zeroed flag is used by extent recycling code to track
+ * whether memory is zero-filled.
+ *
+ * guarded: The guarded flag is use by the sanitizer to track whether
+ * the extent has page guards around it.
+ *
+ * state: The state flag is an extent_state_t.
+ *
+ * szind: The szind flag indicates usable size class index for
+ * allocations residing in this extent, regardless of whether the
+ * extent is a slab. Extent size and usable size often differ
+ * even for non-slabs, either due to sz_large_pad or promotion of
+ * sampled small regions.
+ *
+ * nfree: Number of free regions in slab.
+ *
+ * bin_shard: the shard of the bin from which this extent came.
+ */
+ uint64_t e_bits;
+#define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT))
+
+#define EDATA_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS
+#define EDATA_BITS_ARENA_SHIFT 0
+#define EDATA_BITS_ARENA_MASK MASK(EDATA_BITS_ARENA_WIDTH, EDATA_BITS_ARENA_SHIFT)
+
+#define EDATA_BITS_SLAB_WIDTH 1
+#define EDATA_BITS_SLAB_SHIFT (EDATA_BITS_ARENA_WIDTH + EDATA_BITS_ARENA_SHIFT)
+#define EDATA_BITS_SLAB_MASK MASK(EDATA_BITS_SLAB_WIDTH, EDATA_BITS_SLAB_SHIFT)
+
+#define EDATA_BITS_COMMITTED_WIDTH 1
+#define EDATA_BITS_COMMITTED_SHIFT (EDATA_BITS_SLAB_WIDTH + EDATA_BITS_SLAB_SHIFT)
+#define EDATA_BITS_COMMITTED_MASK MASK(EDATA_BITS_COMMITTED_WIDTH, EDATA_BITS_COMMITTED_SHIFT)
+
+#define EDATA_BITS_PAI_WIDTH 1
+#define EDATA_BITS_PAI_SHIFT (EDATA_BITS_COMMITTED_WIDTH + EDATA_BITS_COMMITTED_SHIFT)
+#define EDATA_BITS_PAI_MASK MASK(EDATA_BITS_PAI_WIDTH, EDATA_BITS_PAI_SHIFT)
+
+#define EDATA_BITS_ZEROED_WIDTH 1
+#define EDATA_BITS_ZEROED_SHIFT (EDATA_BITS_PAI_WIDTH + EDATA_BITS_PAI_SHIFT)
+#define EDATA_BITS_ZEROED_MASK MASK(EDATA_BITS_ZEROED_WIDTH, EDATA_BITS_ZEROED_SHIFT)
+
+#define EDATA_BITS_GUARDED_WIDTH 1
+#define EDATA_BITS_GUARDED_SHIFT (EDATA_BITS_ZEROED_WIDTH + EDATA_BITS_ZEROED_SHIFT)
+#define EDATA_BITS_GUARDED_MASK MASK(EDATA_BITS_GUARDED_WIDTH, EDATA_BITS_GUARDED_SHIFT)
+
+#define EDATA_BITS_STATE_WIDTH 3
+#define EDATA_BITS_STATE_SHIFT (EDATA_BITS_GUARDED_WIDTH + EDATA_BITS_GUARDED_SHIFT)
+#define EDATA_BITS_STATE_MASK MASK(EDATA_BITS_STATE_WIDTH, EDATA_BITS_STATE_SHIFT)
+
+#define EDATA_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES)
+#define EDATA_BITS_SZIND_SHIFT (EDATA_BITS_STATE_WIDTH + EDATA_BITS_STATE_SHIFT)
+#define EDATA_BITS_SZIND_MASK MASK(EDATA_BITS_SZIND_WIDTH, EDATA_BITS_SZIND_SHIFT)
+
+#define EDATA_BITS_NFREE_WIDTH (SC_LG_SLAB_MAXREGS + 1)
+#define EDATA_BITS_NFREE_SHIFT (EDATA_BITS_SZIND_WIDTH + EDATA_BITS_SZIND_SHIFT)
+#define EDATA_BITS_NFREE_MASK MASK(EDATA_BITS_NFREE_WIDTH, EDATA_BITS_NFREE_SHIFT)
+
+#define EDATA_BITS_BINSHARD_WIDTH 6
+#define EDATA_BITS_BINSHARD_SHIFT (EDATA_BITS_NFREE_WIDTH + EDATA_BITS_NFREE_SHIFT)
+#define EDATA_BITS_BINSHARD_MASK MASK(EDATA_BITS_BINSHARD_WIDTH, EDATA_BITS_BINSHARD_SHIFT)
+
+#define EDATA_BITS_IS_HEAD_WIDTH 1
+#define EDATA_BITS_IS_HEAD_SHIFT (EDATA_BITS_BINSHARD_WIDTH + EDATA_BITS_BINSHARD_SHIFT)
+#define EDATA_BITS_IS_HEAD_MASK MASK(EDATA_BITS_IS_HEAD_WIDTH, EDATA_BITS_IS_HEAD_SHIFT)
+
+ /* Pointer to the extent that this structure is responsible for. */
+ void *e_addr;
+
+ union {
+ /*
+ * Extent size and serial number associated with the extent
+ * structure (different than the serial number for the extent at
+ * e_addr).
+ *
+ * ssssssss [...] ssssssss ssssnnnn nnnnnnnn
+ */
+ size_t e_size_esn;
+ #define EDATA_SIZE_MASK ((size_t)~(PAGE-1))
+ #define EDATA_ESN_MASK ((size_t)PAGE-1)
+ /* Base extent size, which may not be a multiple of PAGE. */
+ size_t e_bsize;
+ };
+
+ /*
+ * If this edata is a user allocation from an HPA, it comes out of some
+ * pageslab (we don't yet support huegpage allocations that don't fit
+ * into pageslabs). This tracks it.
+ */
+ hpdata_t *e_ps;
+
+ /*
+ * Serial number. These are not necessarily unique; splitting an extent
+ * results in two extents with the same serial number.
+ */
+ uint64_t e_sn;
+
+ union {
+ /*
+ * List linkage used when the edata_t is active; either in
+ * arena's large allocations or bin_t's slabs_full.
+ */
+ ql_elm(edata_t) ql_link_active;
+ /*
+ * Pairing heap linkage. Used whenever the extent is inactive
+ * (in the page allocators), or when it is active and in
+ * slabs_nonfull, or when the edata_t is unassociated with an
+ * extent and sitting in an edata_cache.
+ */
+ union {
+ edata_heap_link_t heap_link;
+ edata_avail_link_t avail_link;
+ };
+ };
+
+ union {
+ /*
+ * List linkage used when the extent is inactive:
+ * - Stashed dirty extents
+ * - Ecache LRU functionality.
+ */
+ ql_elm(edata_t) ql_link_inactive;
+ /* Small region slab metadata. */
+ slab_data_t e_slab_data;
+
+ /* Profiling data, used for large objects. */
+ e_prof_info_t e_prof_info;
+ };
+};
+
+TYPED_LIST(edata_list_active, edata_t, ql_link_active)
+TYPED_LIST(edata_list_inactive, edata_t, ql_link_inactive)
+
+static inline unsigned
+edata_arena_ind_get(const edata_t *edata) {
+ unsigned arena_ind = (unsigned)((edata->e_bits &
+ EDATA_BITS_ARENA_MASK) >> EDATA_BITS_ARENA_SHIFT);
+ assert(arena_ind < MALLOCX_ARENA_LIMIT);
+
+ return arena_ind;
+}
+
+static inline szind_t
+edata_szind_get_maybe_invalid(const edata_t *edata) {
+ szind_t szind = (szind_t)((edata->e_bits & EDATA_BITS_SZIND_MASK) >>
+ EDATA_BITS_SZIND_SHIFT);
+ assert(szind <= SC_NSIZES);
+ return szind;
+}
+
+static inline szind_t
+edata_szind_get(const edata_t *edata) {
+ szind_t szind = edata_szind_get_maybe_invalid(edata);
+ assert(szind < SC_NSIZES); /* Never call when "invalid". */
+ return szind;
+}
+
+static inline size_t
+edata_usize_get(const edata_t *edata) {
+ return sz_index2size(edata_szind_get(edata));
+}
+
+static inline unsigned
+edata_binshard_get(const edata_t *edata) {
+ unsigned binshard = (unsigned)((edata->e_bits &
+ EDATA_BITS_BINSHARD_MASK) >> EDATA_BITS_BINSHARD_SHIFT);
+ assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
+ return binshard;
+}
+
+static inline uint64_t
+edata_sn_get(const edata_t *edata) {
+ return edata->e_sn;
+}
+
+static inline extent_state_t
+edata_state_get(const edata_t *edata) {
+ return (extent_state_t)((edata->e_bits & EDATA_BITS_STATE_MASK) >>
+ EDATA_BITS_STATE_SHIFT);
+}
+
+static inline bool
+edata_guarded_get(const edata_t *edata) {
+ return (bool)((edata->e_bits & EDATA_BITS_GUARDED_MASK) >>
+ EDATA_BITS_GUARDED_SHIFT);
+}
+
+static inline bool
+edata_zeroed_get(const edata_t *edata) {
+ return (bool)((edata->e_bits & EDATA_BITS_ZEROED_MASK) >>
+ EDATA_BITS_ZEROED_SHIFT);
+}
+
+static inline bool
+edata_committed_get(const edata_t *edata) {
+ return (bool)((edata->e_bits & EDATA_BITS_COMMITTED_MASK) >>
+ EDATA_BITS_COMMITTED_SHIFT);
+}
+
+static inline extent_pai_t
+edata_pai_get(const edata_t *edata) {
+ return (extent_pai_t)((edata->e_bits & EDATA_BITS_PAI_MASK) >>
+ EDATA_BITS_PAI_SHIFT);
+}
+
+static inline bool
+edata_slab_get(const edata_t *edata) {
+ return (bool)((edata->e_bits & EDATA_BITS_SLAB_MASK) >>
+ EDATA_BITS_SLAB_SHIFT);
+}
+
+static inline unsigned
+edata_nfree_get(const edata_t *edata) {
+ assert(edata_slab_get(edata));
+ return (unsigned)((edata->e_bits & EDATA_BITS_NFREE_MASK) >>
+ EDATA_BITS_NFREE_SHIFT);
+}
+
+static inline void *
+edata_base_get(const edata_t *edata) {
+ assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) ||
+ !edata_slab_get(edata));
+ return PAGE_ADDR2BASE(edata->e_addr);
+}
+
+static inline void *
+edata_addr_get(const edata_t *edata) {
+ assert(edata->e_addr == PAGE_ADDR2BASE(edata->e_addr) ||
+ !edata_slab_get(edata));
+ return edata->e_addr;
+}
+
+static inline size_t
+edata_size_get(const edata_t *edata) {
+ return (edata->e_size_esn & EDATA_SIZE_MASK);
+}
+
+static inline size_t
+edata_esn_get(const edata_t *edata) {
+ return (edata->e_size_esn & EDATA_ESN_MASK);
+}
+
+static inline size_t
+edata_bsize_get(const edata_t *edata) {
+ return edata->e_bsize;
+}
+
+static inline hpdata_t *
+edata_ps_get(const edata_t *edata) {
+ assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
+ return edata->e_ps;
+}
+
+static inline void *
+edata_before_get(const edata_t *edata) {
+ return (void *)((uintptr_t)edata_base_get(edata) - PAGE);
+}
+
+static inline void *
+edata_last_get(const edata_t *edata) {
+ return (void *)((uintptr_t)edata_base_get(edata) +
+ edata_size_get(edata) - PAGE);
+}
+
+static inline void *
+edata_past_get(const edata_t *edata) {
+ return (void *)((uintptr_t)edata_base_get(edata) +
+ edata_size_get(edata));
+}
+
+static inline slab_data_t *
+edata_slab_data_get(edata_t *edata) {
+ assert(edata_slab_get(edata));
+ return &edata->e_slab_data;
+}
+
+static inline const slab_data_t *
+edata_slab_data_get_const(const edata_t *edata) {
+ assert(edata_slab_get(edata));
+ return &edata->e_slab_data;
+}
+
+static inline prof_tctx_t *
+edata_prof_tctx_get(const edata_t *edata) {
+ return (prof_tctx_t *)atomic_load_p(&edata->e_prof_info.e_prof_tctx,
+ ATOMIC_ACQUIRE);
+}
+
+static inline const nstime_t *
+edata_prof_alloc_time_get(const edata_t *edata) {
+ return &edata->e_prof_info.e_prof_alloc_time;
+}
+
+static inline size_t
+edata_prof_alloc_size_get(const edata_t *edata) {
+ return edata->e_prof_info.e_prof_alloc_size;
+}
+
+static inline prof_recent_t *
+edata_prof_recent_alloc_get_dont_call_directly(const edata_t *edata) {
+ return (prof_recent_t *)atomic_load_p(
+ &edata->e_prof_info.e_prof_recent_alloc, ATOMIC_RELAXED);
+}
+
+static inline void
+edata_arena_ind_set(edata_t *edata, unsigned arena_ind) {
+ edata->e_bits = (edata->e_bits & ~EDATA_BITS_ARENA_MASK) |
+ ((uint64_t)arena_ind << EDATA_BITS_ARENA_SHIFT);
+}
+
+static inline void
+edata_binshard_set(edata_t *edata, unsigned binshard) {
+ /* The assertion assumes szind is set already. */
+ assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
+ edata->e_bits = (edata->e_bits & ~EDATA_BITS_BINSHARD_MASK) |
+ ((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT);
+}
+
+static inline void
+edata_addr_set(edata_t *edata, void *addr) {
+ edata->e_addr = addr;
+}
+
+static inline void
+edata_size_set(edata_t *edata, size_t size) {
+ assert((size & ~EDATA_SIZE_MASK) == 0);
+ edata->e_size_esn = size | (edata->e_size_esn & ~EDATA_SIZE_MASK);
+}
+
+static inline void
+edata_esn_set(edata_t *edata, size_t esn) {
+ edata->e_size_esn = (edata->e_size_esn & ~EDATA_ESN_MASK) | (esn &
+ EDATA_ESN_MASK);
+}
+
+static inline void
+edata_bsize_set(edata_t *edata, size_t bsize) {
+ edata->e_bsize = bsize;
+}
+
+static inline void
+edata_ps_set(edata_t *edata, hpdata_t *ps) {
+ assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
+ edata->e_ps = ps;
+}
+
+static inline void
+edata_szind_set(edata_t *edata, szind_t szind) {
+ assert(szind <= SC_NSIZES); /* SC_NSIZES means "invalid". */
+ edata->e_bits = (edata->e_bits & ~EDATA_BITS_SZIND_MASK) |
+ ((uint64_t)szind << EDATA_BITS_SZIND_SHIFT);
+}
+
+static inline void
+edata_nfree_set(edata_t *edata, unsigned nfree) {
+ assert(edata_slab_get(edata));
+ edata->e_bits = (edata->e_bits & ~EDATA_BITS_NFREE_MASK) |
+ ((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT);
+}
+
+static inline void
+edata_nfree_binshard_set(edata_t *edata, unsigned nfree, unsigned binshard) {
+ /* The assertion assumes szind is set already. */
+ assert(binshard < bin_infos[edata_szind_get(edata)].n_shards);
+ edata->e_bits = (edata->e_bits &
+ (~EDATA_BITS_NFREE_MASK & ~EDATA_BITS_BINSHARD_MASK)) |
+ ((uint64_t)binshard << EDATA_BITS_BINSHARD_SHIFT) |
+ ((uint64_t)nfree << EDATA_BITS_NFREE_SHIFT);
+}
+
+static inline void
+edata_nfree_inc(edata_t *edata) {
+ assert(edata_slab_get(edata));
+ edata->e_bits += ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT);
+}
+
+static inline void
+edata_nfree_dec(edata_t *edata) {
+ assert(edata_slab_get(edata));
+ edata->e_bits -= ((uint64_t)1U << EDATA_BITS_NFREE_SHIFT);
+}
+
+static inline void
+edata_nfree_sub(edata_t *edata, uint64_t n) {
+ assert(edata_slab_get(edata));
+ edata->e_bits -= (n << EDATA_BITS_NFREE_SHIFT);
+}
+
+static inline void
+edata_sn_set(edata_t *edata, uint64_t sn) {
+ edata->e_sn = sn;
+}
+
+static inline void
+edata_state_set(edata_t *edata, extent_state_t state) {
+ edata->e_bits = (edata->e_bits & ~EDATA_BITS_STATE_MASK) |
+ ((uint64_t)state << EDATA_BITS_STATE_SHIFT);
+}
+
+static inline void
+edata_guarded_set(edata_t *edata, bool guarded) {
+ edata->e_bits = (edata->e_bits & ~EDATA_BITS_GUARDED_MASK) |
+ ((uint64_t)guarded << EDATA_BITS_GUARDED_SHIFT);
+}
+
+static inline void
+edata_zeroed_set(edata_t *edata, bool zeroed) {
+ edata->e_bits = (edata->e_bits & ~EDATA_BITS_ZEROED_MASK) |
+ ((uint64_t)zeroed << EDATA_BITS_ZEROED_SHIFT);
+}
+
+static inline void
+edata_committed_set(edata_t *edata, bool committed) {
+ edata->e_bits = (edata->e_bits & ~EDATA_BITS_COMMITTED_MASK) |
+ ((uint64_t)committed << EDATA_BITS_COMMITTED_SHIFT);
+}
+
+static inline void
+edata_pai_set(edata_t *edata, extent_pai_t pai) {
+ edata->e_bits = (edata->e_bits & ~EDATA_BITS_PAI_MASK) |
+ ((uint64_t)pai << EDATA_BITS_PAI_SHIFT);
+}
+
+static inline void
+edata_slab_set(edata_t *edata, bool slab) {
+ edata->e_bits = (edata->e_bits & ~EDATA_BITS_SLAB_MASK) |
+ ((uint64_t)slab << EDATA_BITS_SLAB_SHIFT);
+}
+
+static inline void
+edata_prof_tctx_set(edata_t *edata, prof_tctx_t *tctx) {
+ atomic_store_p(&edata->e_prof_info.e_prof_tctx, tctx, ATOMIC_RELEASE);
+}
+
+static inline void
+edata_prof_alloc_time_set(edata_t *edata, nstime_t *t) {
+ nstime_copy(&edata->e_prof_info.e_prof_alloc_time, t);
+}
+
+static inline void
+edata_prof_alloc_size_set(edata_t *edata, size_t size) {
+ edata->e_prof_info.e_prof_alloc_size = size;
+}
+
+static inline void
+edata_prof_recent_alloc_set_dont_call_directly(edata_t *edata,
+ prof_recent_t *recent_alloc) {
+ atomic_store_p(&edata->e_prof_info.e_prof_recent_alloc, recent_alloc,
+ ATOMIC_RELAXED);
+}
+
+static inline bool
+edata_is_head_get(edata_t *edata) {
+ return (bool)((edata->e_bits & EDATA_BITS_IS_HEAD_MASK) >>
+ EDATA_BITS_IS_HEAD_SHIFT);
+}
+
+static inline void
+edata_is_head_set(edata_t *edata, bool is_head) {
+ edata->e_bits = (edata->e_bits & ~EDATA_BITS_IS_HEAD_MASK) |
+ ((uint64_t)is_head << EDATA_BITS_IS_HEAD_SHIFT);
+}
+
+static inline bool
+edata_state_in_transition(extent_state_t state) {
+ return state >= extent_state_transition;
+}
+
+/*
+ * Because this function is implemented as a sequence of bitfield modifications,
+ * even though each individual bit is properly initialized, we technically read
+ * uninitialized data within it. This is mostly fine, since most callers get
+ * their edatas from zeroing sources, but callers who make stack edata_ts need
+ * to manually zero them.
+ */
+static inline void
+edata_init(edata_t *edata, unsigned arena_ind, void *addr, size_t size,
+ bool slab, szind_t szind, uint64_t sn, extent_state_t state, bool zeroed,
+ bool committed, extent_pai_t pai, extent_head_state_t is_head) {
+ assert(addr == PAGE_ADDR2BASE(addr) || !slab);
+
+ edata_arena_ind_set(edata, arena_ind);
+ edata_addr_set(edata, addr);
+ edata_size_set(edata, size);
+ edata_slab_set(edata, slab);
+ edata_szind_set(edata, szind);
+ edata_sn_set(edata, sn);
+ edata_state_set(edata, state);
+ edata_guarded_set(edata, false);
+ edata_zeroed_set(edata, zeroed);
+ edata_committed_set(edata, committed);
+ edata_pai_set(edata, pai);
+ edata_is_head_set(edata, is_head == EXTENT_IS_HEAD);
+ if (config_prof) {
+ edata_prof_tctx_set(edata, NULL);
+ }
+}
+
+static inline void
+edata_binit(edata_t *edata, void *addr, size_t bsize, uint64_t sn) {
+ edata_arena_ind_set(edata, (1U << MALLOCX_ARENA_BITS) - 1);
+ edata_addr_set(edata, addr);
+ edata_bsize_set(edata, bsize);
+ edata_slab_set(edata, false);
+ edata_szind_set(edata, SC_NSIZES);
+ edata_sn_set(edata, sn);
+ edata_state_set(edata, extent_state_active);
+ edata_guarded_set(edata, false);
+ edata_zeroed_set(edata, true);
+ edata_committed_set(edata, true);
+ /*
+ * This isn't strictly true, but base allocated extents never get
+ * deallocated and can't be looked up in the emap, but no sense in
+ * wasting a state bit to encode this fact.
+ */
+ edata_pai_set(edata, EXTENT_PAI_PAC);
+}
+
+static inline int
+edata_esn_comp(const edata_t *a, const edata_t *b) {
+ size_t a_esn = edata_esn_get(a);
+ size_t b_esn = edata_esn_get(b);
+
+ return (a_esn > b_esn) - (a_esn < b_esn);
+}
+
+static inline int
+edata_ead_comp(const edata_t *a, const edata_t *b) {
+ uintptr_t a_eaddr = (uintptr_t)a;
+ uintptr_t b_eaddr = (uintptr_t)b;
+
+ return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr);
+}
+
+static inline edata_cmp_summary_t
+edata_cmp_summary_get(const edata_t *edata) {
+ return (edata_cmp_summary_t){edata_sn_get(edata),
+ (uintptr_t)edata_addr_get(edata)};
+}
+
+static inline int
+edata_cmp_summary_comp(edata_cmp_summary_t a, edata_cmp_summary_t b) {
+ int ret;
+ ret = (a.sn > b.sn) - (a.sn < b.sn);
+ if (ret != 0) {
+ return ret;
+ }
+ ret = (a.addr > b.addr) - (a.addr < b.addr);
+ return ret;
+}
+
+static inline int
+edata_snad_comp(const edata_t *a, const edata_t *b) {
+ edata_cmp_summary_t a_cmp = edata_cmp_summary_get(a);
+ edata_cmp_summary_t b_cmp = edata_cmp_summary_get(b);
+
+ return edata_cmp_summary_comp(a_cmp, b_cmp);
+}
+
+static inline int
+edata_esnead_comp(const edata_t *a, const edata_t *b) {
+ int ret;
+
+ ret = edata_esn_comp(a, b);
+ if (ret != 0) {
+ return ret;
+ }
+
+ ret = edata_ead_comp(a, b);
+ return ret;
+}
+
+ph_proto(, edata_avail, edata_t)
+ph_proto(, edata_heap, edata_t)
+
+#endif /* JEMALLOC_INTERNAL_EDATA_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/edata_cache.h b/contrib/jemalloc/include/jemalloc/internal/edata_cache.h
new file mode 100644
index 000000000000..8b6c0ef79499
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/edata_cache.h
@@ -0,0 +1,49 @@
+#ifndef JEMALLOC_INTERNAL_EDATA_CACHE_H
+#define JEMALLOC_INTERNAL_EDATA_CACHE_H
+
+#include "jemalloc/internal/base.h"
+
+/* For tests only. */
+#define EDATA_CACHE_FAST_FILL 4
+
+/*
+ * A cache of edata_t structures allocated via base_alloc_edata (as opposed to
+ * the underlying extents they describe). The contents of returned edata_t
+ * objects are garbage and cannot be relied upon.
+ */
+
+typedef struct edata_cache_s edata_cache_t;
+struct edata_cache_s {
+ edata_avail_t avail;
+ atomic_zu_t count;
+ malloc_mutex_t mtx;
+ base_t *base;
+};
+
+bool edata_cache_init(edata_cache_t *edata_cache, base_t *base);
+edata_t *edata_cache_get(tsdn_t *tsdn, edata_cache_t *edata_cache);
+void edata_cache_put(tsdn_t *tsdn, edata_cache_t *edata_cache, edata_t *edata);
+
+void edata_cache_prefork(tsdn_t *tsdn, edata_cache_t *edata_cache);
+void edata_cache_postfork_parent(tsdn_t *tsdn, edata_cache_t *edata_cache);
+void edata_cache_postfork_child(tsdn_t *tsdn, edata_cache_t *edata_cache);
+
+/*
+ * An edata_cache_small is like an edata_cache, but it relies on external
+ * synchronization and avoids first-fit strategies.
+ */
+
+typedef struct edata_cache_fast_s edata_cache_fast_t;
+struct edata_cache_fast_s {
+ edata_list_inactive_t list;
+ edata_cache_t *fallback;
+ bool disabled;
+};
+
+void edata_cache_fast_init(edata_cache_fast_t *ecs, edata_cache_t *fallback);
+edata_t *edata_cache_fast_get(tsdn_t *tsdn, edata_cache_fast_t *ecs);
+void edata_cache_fast_put(tsdn_t *tsdn, edata_cache_fast_t *ecs,
+ edata_t *edata);
+void edata_cache_fast_disable(tsdn_t *tsdn, edata_cache_fast_t *ecs);
+
+#endif /* JEMALLOC_INTERNAL_EDATA_CACHE_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/ehooks.h b/contrib/jemalloc/include/jemalloc/internal/ehooks.h
new file mode 100644
index 000000000000..8d9513e258a8
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/ehooks.h
@@ -0,0 +1,412 @@
+#ifndef JEMALLOC_INTERNAL_EHOOKS_H
+#define JEMALLOC_INTERNAL_EHOOKS_H
+
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/extent_mmap.h"
+
+/*
+ * This module is the internal interface to the extent hooks (both
+ * user-specified and external). Eventually, this will give us the flexibility
+ * to use multiple different versions of user-visible extent-hook APIs under a
+ * single user interface.
+ *
+ * Current API expansions (not available to anyone but the default hooks yet):
+ * - Head state tracking. Hooks can decide whether or not to merge two
+ * extents based on whether or not one of them is the head (i.e. was
+ * allocated on its own). The later extent loses its "head" status.
+ */
+
+extern const extent_hooks_t ehooks_default_extent_hooks;
+
+typedef struct ehooks_s ehooks_t;
+struct ehooks_s {
+ /*
+ * The user-visible id that goes with the ehooks (i.e. that of the base
+ * they're a part of, the associated arena's index within the arenas
+ * array).
+ */
+ unsigned ind;
+ /* Logically an extent_hooks_t *. */
+ atomic_p_t ptr;
+};
+
+extern const extent_hooks_t ehooks_default_extent_hooks;
+
+/*
+ * These are not really part of the public API. Each hook has a fast-path for
+ * the default-hooks case that can avoid various small inefficiencies:
+ * - Forgetting tsd and then calling tsd_get within the hook.
+ * - Getting more state than necessary out of the extent_t.
+ * - Doing arena_ind -> arena -> arena_ind lookups.
+ * By making the calls to these functions visible to the compiler, it can move
+ * those extra bits of computation down below the fast-paths where they get ignored.
+ */
+void *ehooks_default_alloc_impl(tsdn_t *tsdn, void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
+bool ehooks_default_dalloc_impl(void *addr, size_t size);
+void ehooks_default_destroy_impl(void *addr, size_t size);
+bool ehooks_default_commit_impl(void *addr, size_t offset, size_t length);
+bool ehooks_default_decommit_impl(void *addr, size_t offset, size_t length);
+#ifdef PAGES_CAN_PURGE_LAZY
+bool ehooks_default_purge_lazy_impl(void *addr, size_t offset, size_t length);
+#endif
+#ifdef PAGES_CAN_PURGE_FORCED
+bool ehooks_default_purge_forced_impl(void *addr, size_t offset, size_t length);
+#endif
+bool ehooks_default_split_impl();
+/*
+ * Merge is the only default extent hook we declare -- see the comment in
+ * ehooks_merge.
+ */
+bool ehooks_default_merge(extent_hooks_t *extent_hooks, void *addr_a,
+ size_t size_a, void *addr_b, size_t size_b, bool committed,
+ unsigned arena_ind);
+bool ehooks_default_merge_impl(tsdn_t *tsdn, void *addr_a, void *addr_b);
+void ehooks_default_zero_impl(void *addr, size_t size);
+void ehooks_default_guard_impl(void *guard1, void *guard2);
+void ehooks_default_unguard_impl(void *guard1, void *guard2);
+
+/*
+ * We don't officially support reentrancy from wtihin the extent hooks. But
+ * various people who sit within throwing distance of the jemalloc team want
+ * that functionality in certain limited cases. The default reentrancy guards
+ * assert that we're not reentrant from a0 (since it's the bootstrap arena,
+ * where reentrant allocations would be redirected), which we would incorrectly
+ * trigger in cases where a0 has extent hooks (those hooks themselves can't be
+ * reentrant, then, but there are reasonable uses for such functionality, like
+ * putting internal metadata on hugepages). Therefore, we use the raw
+ * reentrancy guards.
+ *
+ * Eventually, we need to think more carefully about whether and where we
+ * support allocating from within extent hooks (and what that means for things
+ * like profiling, stats collection, etc.), and document what the guarantee is.
+ */
+static inline void
+ehooks_pre_reentrancy(tsdn_t *tsdn) {
+ tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
+ tsd_pre_reentrancy_raw(tsd);
+}
+
+static inline void
+ehooks_post_reentrancy(tsdn_t *tsdn) {
+ tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
+ tsd_post_reentrancy_raw(tsd);
+}
+
+/* Beginning of the public API. */
+void ehooks_init(ehooks_t *ehooks, extent_hooks_t *extent_hooks, unsigned ind);
+
+static inline unsigned
+ehooks_ind_get(const ehooks_t *ehooks) {
+ return ehooks->ind;
+}
+
+static inline void
+ehooks_set_extent_hooks_ptr(ehooks_t *ehooks, extent_hooks_t *extent_hooks) {
+ atomic_store_p(&ehooks->ptr, extent_hooks, ATOMIC_RELEASE);
+}
+
+static inline extent_hooks_t *
+ehooks_get_extent_hooks_ptr(ehooks_t *ehooks) {
+ return (extent_hooks_t *)atomic_load_p(&ehooks->ptr, ATOMIC_ACQUIRE);
+}
+
+static inline bool
+ehooks_are_default(ehooks_t *ehooks) {
+ return ehooks_get_extent_hooks_ptr(ehooks) ==
+ &ehooks_default_extent_hooks;
+}
+
+/*
+ * In some cases, a caller needs to allocate resources before attempting to call
+ * a hook. If that hook is doomed to fail, this is wasteful. We therefore
+ * include some checks for such cases.
+ */
+static inline bool
+ehooks_dalloc_will_fail(ehooks_t *ehooks) {
+ if (ehooks_are_default(ehooks)) {
+ return opt_retain;
+ } else {
+ return ehooks_get_extent_hooks_ptr(ehooks)->dalloc == NULL;
+ }
+}
+
+static inline bool
+ehooks_split_will_fail(ehooks_t *ehooks) {
+ return ehooks_get_extent_hooks_ptr(ehooks)->split == NULL;
+}
+
+static inline bool
+ehooks_merge_will_fail(ehooks_t *ehooks) {
+ return ehooks_get_extent_hooks_ptr(ehooks)->merge == NULL;
+}
+
+static inline bool
+ehooks_guard_will_fail(ehooks_t *ehooks) {
+ /*
+ * Before the guard hooks are officially introduced, limit the use to
+ * the default hooks only.
+ */
+ return !ehooks_are_default(ehooks);
+}
+
+/*
+ * Some hooks are required to return zeroed memory in certain situations. In
+ * debug mode, we do some heuristic checks that they did what they were supposed
+ * to.
+ *
+ * This isn't really ehooks-specific (i.e. anyone can check for zeroed memory).
+ * But incorrect zero information indicates an ehook bug.
+ */
+static inline void
+ehooks_debug_zero_check(void *addr, size_t size) {
+ assert(((uintptr_t)addr & PAGE_MASK) == 0);
+ assert((size & PAGE_MASK) == 0);
+ assert(size > 0);
+ if (config_debug) {
+ /* Check the whole first page. */
+ size_t *p = (size_t *)addr;
+ for (size_t i = 0; i < PAGE / sizeof(size_t); i++) {
+ assert(p[i] == 0);
+ }
+ /*
+ * And 4 spots within. There's a tradeoff here; the larger
+ * this number, the more likely it is that we'll catch a bug
+ * where ehooks return a sparsely non-zero range. But
+ * increasing the number of checks also increases the number of
+ * page faults in debug mode. FreeBSD does much of their
+ * day-to-day development work in debug mode, so we don't want
+ * even the debug builds to be too slow.
+ */
+ const size_t nchecks = 4;
+ assert(PAGE >= sizeof(size_t) * nchecks);
+ for (size_t i = 0; i < nchecks; ++i) {
+ assert(p[i * (size / sizeof(size_t) / nchecks)] == 0);
+ }
+ }
+}
+
+
+static inline void *
+ehooks_alloc(tsdn_t *tsdn, ehooks_t *ehooks, void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit) {
+ bool orig_zero = *zero;
+ void *ret;
+ extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
+ if (extent_hooks == &ehooks_default_extent_hooks) {
+ ret = ehooks_default_alloc_impl(tsdn, new_addr, size,
+ alignment, zero, commit, ehooks_ind_get(ehooks));
+ } else {
+ ehooks_pre_reentrancy(tsdn);
+ ret = extent_hooks->alloc(extent_hooks, new_addr, size,
+ alignment, zero, commit, ehooks_ind_get(ehooks));
+ ehooks_post_reentrancy(tsdn);
+ }
+ assert(new_addr == NULL || ret == NULL || new_addr == ret);
+ assert(!orig_zero || *zero);
+ if (*zero && ret != NULL) {
+ ehooks_debug_zero_check(ret, size);
+ }
+ return ret;
+}
+
+static inline bool
+ehooks_dalloc(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
+ bool committed) {
+ extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
+ if (extent_hooks == &ehooks_default_extent_hooks) {
+ return ehooks_default_dalloc_impl(addr, size);
+ } else if (extent_hooks->dalloc == NULL) {
+ return true;
+ } else {
+ ehooks_pre_reentrancy(tsdn);
+ bool err = extent_hooks->dalloc(extent_hooks, addr, size,
+ committed, ehooks_ind_get(ehooks));
+ ehooks_post_reentrancy(tsdn);
+ return err;
+ }
+}
+
+static inline void
+ehooks_destroy(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
+ bool committed) {
+ extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
+ if (extent_hooks == &ehooks_default_extent_hooks) {
+ ehooks_default_destroy_impl(addr, size);
+ } else if (extent_hooks->destroy == NULL) {
+ /* Do nothing. */
+ } else {
+ ehooks_pre_reentrancy(tsdn);
+ extent_hooks->destroy(extent_hooks, addr, size, committed,
+ ehooks_ind_get(ehooks));
+ ehooks_post_reentrancy(tsdn);
+ }
+}
+
+static inline bool
+ehooks_commit(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
+ size_t offset, size_t length) {
+ extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
+ bool err;
+ if (extent_hooks == &ehooks_default_extent_hooks) {
+ err = ehooks_default_commit_impl(addr, offset, length);
+ } else if (extent_hooks->commit == NULL) {
+ err = true;
+ } else {
+ ehooks_pre_reentrancy(tsdn);
+ err = extent_hooks->commit(extent_hooks, addr, size,
+ offset, length, ehooks_ind_get(ehooks));
+ ehooks_post_reentrancy(tsdn);
+ }
+ if (!err) {
+ ehooks_debug_zero_check(addr, size);
+ }
+ return err;
+}
+
+static inline bool
+ehooks_decommit(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
+ size_t offset, size_t length) {
+ extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
+ if (extent_hooks == &ehooks_default_extent_hooks) {
+ return ehooks_default_decommit_impl(addr, offset, length);
+ } else if (extent_hooks->decommit == NULL) {
+ return true;
+ } else {
+ ehooks_pre_reentrancy(tsdn);
+ bool err = extent_hooks->decommit(extent_hooks, addr, size,
+ offset, length, ehooks_ind_get(ehooks));
+ ehooks_post_reentrancy(tsdn);
+ return err;
+ }
+}
+
+static inline bool
+ehooks_purge_lazy(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
+ size_t offset, size_t length) {
+ extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
+#ifdef PAGES_CAN_PURGE_LAZY
+ if (extent_hooks == &ehooks_default_extent_hooks) {
+ return ehooks_default_purge_lazy_impl(addr, offset, length);
+ }
+#endif
+ if (extent_hooks->purge_lazy == NULL) {
+ return true;
+ } else {
+ ehooks_pre_reentrancy(tsdn);
+ bool err = extent_hooks->purge_lazy(extent_hooks, addr, size,
+ offset, length, ehooks_ind_get(ehooks));
+ ehooks_post_reentrancy(tsdn);
+ return err;
+ }
+}
+
+static inline bool
+ehooks_purge_forced(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
+ size_t offset, size_t length) {
+ extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
+ /*
+ * It would be correct to have a ehooks_debug_zero_check call at the end
+ * of this function; purge_forced is required to zero. But checking
+ * would touch the page in question, which may have performance
+ * consequences (imagine the hooks are using hugepages, with a global
+ * zero page off). Even in debug mode, it's usually a good idea to
+ * avoid cases that can dramatically increase memory consumption.
+ */
+#ifdef PAGES_CAN_PURGE_FORCED
+ if (extent_hooks == &ehooks_default_extent_hooks) {
+ return ehooks_default_purge_forced_impl(addr, offset, length);
+ }
+#endif
+ if (extent_hooks->purge_forced == NULL) {
+ return true;
+ } else {
+ ehooks_pre_reentrancy(tsdn);
+ bool err = extent_hooks->purge_forced(extent_hooks, addr, size,
+ offset, length, ehooks_ind_get(ehooks));
+ ehooks_post_reentrancy(tsdn);
+ return err;
+ }
+}
+
+static inline bool
+ehooks_split(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size,
+ size_t size_a, size_t size_b, bool committed) {
+ extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
+ if (ehooks_are_default(ehooks)) {
+ return ehooks_default_split_impl();
+ } else if (extent_hooks->split == NULL) {
+ return true;
+ } else {
+ ehooks_pre_reentrancy(tsdn);
+ bool err = extent_hooks->split(extent_hooks, addr, size, size_a,
+ size_b, committed, ehooks_ind_get(ehooks));
+ ehooks_post_reentrancy(tsdn);
+ return err;
+ }
+}
+
+static inline bool
+ehooks_merge(tsdn_t *tsdn, ehooks_t *ehooks, void *addr_a, size_t size_a,
+ void *addr_b, size_t size_b, bool committed) {
+ extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
+ if (extent_hooks == &ehooks_default_extent_hooks) {
+ return ehooks_default_merge_impl(tsdn, addr_a, addr_b);
+ } else if (extent_hooks->merge == NULL) {
+ return true;
+ } else {
+ ehooks_pre_reentrancy(tsdn);
+ bool err = extent_hooks->merge(extent_hooks, addr_a, size_a,
+ addr_b, size_b, committed, ehooks_ind_get(ehooks));
+ ehooks_post_reentrancy(tsdn);
+ return err;
+ }
+}
+
+static inline void
+ehooks_zero(tsdn_t *tsdn, ehooks_t *ehooks, void *addr, size_t size) {
+ extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
+ if (extent_hooks == &ehooks_default_extent_hooks) {
+ ehooks_default_zero_impl(addr, size);
+ } else {
+ /*
+ * It would be correct to try using the user-provided purge
+ * hooks (since they are required to have zeroed the extent if
+ * they indicate success), but we don't necessarily know their
+ * cost. We'll be conservative and use memset.
+ */
+ memset(addr, 0, size);
+ }
+}
+
+static inline bool
+ehooks_guard(tsdn_t *tsdn, ehooks_t *ehooks, void *guard1, void *guard2) {
+ bool err;
+ extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
+
+ if (extent_hooks == &ehooks_default_extent_hooks) {
+ ehooks_default_guard_impl(guard1, guard2);
+ err = false;
+ } else {
+ err = true;
+ }
+
+ return err;
+}
+
+static inline bool
+ehooks_unguard(tsdn_t *tsdn, ehooks_t *ehooks, void *guard1, void *guard2) {
+ bool err;
+ extent_hooks_t *extent_hooks = ehooks_get_extent_hooks_ptr(ehooks);
+
+ if (extent_hooks == &ehooks_default_extent_hooks) {
+ ehooks_default_unguard_impl(guard1, guard2);
+ err = false;
+ } else {
+ err = true;
+ }
+
+ return err;
+}
+
+#endif /* JEMALLOC_INTERNAL_EHOOKS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/emap.h b/contrib/jemalloc/include/jemalloc/internal/emap.h
new file mode 100644
index 000000000000..847af3278de3
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/emap.h
@@ -0,0 +1,357 @@
+#ifndef JEMALLOC_INTERNAL_EMAP_H
+#define JEMALLOC_INTERNAL_EMAP_H
+
+#include "jemalloc/internal/base.h"
+#include "jemalloc/internal/rtree.h"
+
+/*
+ * Note: Ends without at semicolon, so that
+ * EMAP_DECLARE_RTREE_CTX;
+ * in uses will avoid empty-statement warnings.
+ */
+#define EMAP_DECLARE_RTREE_CTX \
+ rtree_ctx_t rtree_ctx_fallback; \
+ rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback)
+
+typedef struct emap_s emap_t;
+struct emap_s {
+ rtree_t rtree;
+};
+
+/* Used to pass rtree lookup context down the path. */
+typedef struct emap_alloc_ctx_t emap_alloc_ctx_t;
+struct emap_alloc_ctx_t {
+ szind_t szind;
+ bool slab;
+};
+
+typedef struct emap_full_alloc_ctx_s emap_full_alloc_ctx_t;
+struct emap_full_alloc_ctx_s {
+ szind_t szind;
+ bool slab;
+ edata_t *edata;
+};
+
+bool emap_init(emap_t *emap, base_t *base, bool zeroed);
+
+void emap_remap(tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind,
+ bool slab);
+
+void emap_update_edata_state(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
+ extent_state_t state);
+
+/*
+ * The two acquire functions below allow accessing neighbor edatas, if it's safe
+ * and valid to do so (i.e. from the same arena, of the same state, etc.). This
+ * is necessary because the ecache locks are state based, and only protect
+ * edatas with the same state. Therefore the neighbor edata's state needs to be
+ * verified first, before chasing the edata pointer. The returned edata will be
+ * in an acquired state, meaning other threads will be prevented from accessing
+ * it, even if technically the edata can still be discovered from the rtree.
+ *
+ * This means, at any moment when holding pointers to edata, either one of the
+ * state based locks is held (and the edatas are all of the protected state), or
+ * the edatas are in an acquired state (e.g. in active or merging state). The
+ * acquire operation itself (changing the edata to an acquired state) is done
+ * under the state locks.
+ */
+edata_t *emap_try_acquire_edata_neighbor(tsdn_t *tsdn, emap_t *emap,
+ edata_t *edata, extent_pai_t pai, extent_state_t expected_state,
+ bool forward);
+edata_t *emap_try_acquire_edata_neighbor_expand(tsdn_t *tsdn, emap_t *emap,
+ edata_t *edata, extent_pai_t pai, extent_state_t expected_state);
+void emap_release_edata(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
+ extent_state_t new_state);
+
+/*
+ * Associate the given edata with its beginning and end address, setting the
+ * szind and slab info appropriately.
+ * Returns true on error (i.e. resource exhaustion).
+ */
+bool emap_register_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
+ szind_t szind, bool slab);
+
+/*
+ * Does the same thing, but with the interior of the range, for slab
+ * allocations.
+ *
+ * You might wonder why we don't just have a single emap_register function that
+ * does both depending on the value of 'slab'. The answer is twofold:
+ * - As a practical matter, in places like the extract->split->commit pathway,
+ * we defer the interior operation until we're sure that the commit won't fail
+ * (but we have to register the split boundaries there).
+ * - In general, we're trying to move to a world where the page-specific
+ * allocator doesn't know as much about how the pages it allocates will be
+ * used, and passing a 'slab' parameter everywhere makes that more
+ * complicated.
+ *
+ * Unlike the boundary version, this function can't fail; this is because slabs
+ * can't get big enough to touch a new page that neither of the boundaries
+ * touched, so no allocation is necessary to fill the interior once the boundary
+ * has been touched.
+ */
+void emap_register_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
+ szind_t szind);
+
+void emap_deregister_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
+void emap_deregister_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
+
+typedef struct emap_prepare_s emap_prepare_t;
+struct emap_prepare_s {
+ rtree_leaf_elm_t *lead_elm_a;
+ rtree_leaf_elm_t *lead_elm_b;
+ rtree_leaf_elm_t *trail_elm_a;
+ rtree_leaf_elm_t *trail_elm_b;
+};
+
+/**
+ * These functions the emap metadata management for merging, splitting, and
+ * reusing extents. In particular, they set the boundary mappings from
+ * addresses to edatas. If the result is going to be used as a slab, you
+ * still need to call emap_register_interior on it, though.
+ *
+ * Remap simply changes the szind and slab status of an extent's boundary
+ * mappings. If the extent is not a slab, it doesn't bother with updating the
+ * end mapping (since lookups only occur in the interior of an extent for
+ * slabs). Since the szind and slab status only make sense for active extents,
+ * this should only be called while activating or deactivating an extent.
+ *
+ * Split and merge have a "prepare" and a "commit" portion. The prepare portion
+ * does the operations that can be done without exclusive access to the extent
+ * in question, while the commit variant requires exclusive access to maintain
+ * the emap invariants. The only function that can fail is emap_split_prepare,
+ * and it returns true on failure (at which point the caller shouldn't commit).
+ *
+ * In all cases, "lead" refers to the lower-addressed extent, and trail to the
+ * higher-addressed one. It's the caller's responsibility to set the edata
+ * state appropriately.
+ */
+bool emap_split_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
+ edata_t *edata, size_t size_a, edata_t *trail, size_t size_b);
+void emap_split_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
+ edata_t *lead, size_t size_a, edata_t *trail, size_t size_b);
+void emap_merge_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
+ edata_t *lead, edata_t *trail);
+void emap_merge_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
+ edata_t *lead, edata_t *trail);
+
+/* Assert that the emap's view of the given edata matches the edata's view. */
+void emap_do_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
+static inline void
+emap_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
+ if (config_debug) {
+ emap_do_assert_mapped(tsdn, emap, edata);
+ }
+}
+
+/* Assert that the given edata isn't in the map. */
+void emap_do_assert_not_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata);
+static inline void
+emap_assert_not_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
+ if (config_debug) {
+ emap_do_assert_not_mapped(tsdn, emap, edata);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+emap_edata_in_transition(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
+ assert(config_debug);
+ emap_assert_mapped(tsdn, emap, edata);
+
+ EMAP_DECLARE_RTREE_CTX;
+ rtree_contents_t contents = rtree_read(tsdn, &emap->rtree, rtree_ctx,
+ (uintptr_t)edata_base_get(edata));
+
+ return edata_state_in_transition(contents.metadata.state);
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+emap_edata_is_acquired(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
+ if (!config_debug) {
+ /* For assertions only. */
+ return false;
+ }
+
+ /*
+ * The edata is considered acquired if no other threads will attempt to
+ * read / write any fields from it. This includes a few cases:
+ *
+ * 1) edata not hooked into emap yet -- This implies the edata just got
+ * allocated or initialized.
+ *
+ * 2) in an active or transition state -- In both cases, the edata can
+ * be discovered from the emap, however the state tracked in the rtree
+ * will prevent other threads from accessing the actual edata.
+ */
+ EMAP_DECLARE_RTREE_CTX;
+ rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &emap->rtree,
+ rtree_ctx, (uintptr_t)edata_base_get(edata), /* dependent */ true,
+ /* init_missing */ false);
+ if (elm == NULL) {
+ return true;
+ }
+ rtree_contents_t contents = rtree_leaf_elm_read(tsdn, &emap->rtree, elm,
+ /* dependent */ true);
+ if (contents.edata == NULL ||
+ contents.metadata.state == extent_state_active ||
+ edata_state_in_transition(contents.metadata.state)) {
+ return true;
+ }
+
+ return false;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+extent_assert_can_coalesce(const edata_t *inner, const edata_t *outer) {
+ assert(edata_arena_ind_get(inner) == edata_arena_ind_get(outer));
+ assert(edata_pai_get(inner) == edata_pai_get(outer));
+ assert(edata_committed_get(inner) == edata_committed_get(outer));
+ assert(edata_state_get(inner) == extent_state_active);
+ assert(edata_state_get(outer) == extent_state_merging);
+ assert(!edata_guarded_get(inner) && !edata_guarded_get(outer));
+ assert(edata_base_get(inner) == edata_past_get(outer) ||
+ edata_base_get(outer) == edata_past_get(inner));
+}
+
+JEMALLOC_ALWAYS_INLINE void
+extent_assert_can_expand(const edata_t *original, const edata_t *expand) {
+ assert(edata_arena_ind_get(original) == edata_arena_ind_get(expand));
+ assert(edata_pai_get(original) == edata_pai_get(expand));
+ assert(edata_state_get(original) == extent_state_active);
+ assert(edata_state_get(expand) == extent_state_merging);
+ assert(edata_past_get(original) == edata_base_get(expand));
+}
+
+JEMALLOC_ALWAYS_INLINE edata_t *
+emap_edata_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr) {
+ EMAP_DECLARE_RTREE_CTX;
+
+ return rtree_read(tsdn, &emap->rtree, rtree_ctx, (uintptr_t)ptr).edata;
+}
+
+/* Fills in alloc_ctx with the info in the map. */
+JEMALLOC_ALWAYS_INLINE void
+emap_alloc_ctx_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr,
+ emap_alloc_ctx_t *alloc_ctx) {
+ EMAP_DECLARE_RTREE_CTX;
+
+ rtree_metadata_t metadata = rtree_metadata_read(tsdn, &emap->rtree,
+ rtree_ctx, (uintptr_t)ptr);
+ alloc_ctx->szind = metadata.szind;
+ alloc_ctx->slab = metadata.slab;
+}
+
+/* The pointer must be mapped. */
+JEMALLOC_ALWAYS_INLINE void
+emap_full_alloc_ctx_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr,
+ emap_full_alloc_ctx_t *full_alloc_ctx) {
+ EMAP_DECLARE_RTREE_CTX;
+
+ rtree_contents_t contents = rtree_read(tsdn, &emap->rtree, rtree_ctx,
+ (uintptr_t)ptr);
+ full_alloc_ctx->edata = contents.edata;
+ full_alloc_ctx->szind = contents.metadata.szind;
+ full_alloc_ctx->slab = contents.metadata.slab;
+}
+
+/*
+ * The pointer is allowed to not be mapped.
+ *
+ * Returns true when the pointer is not present.
+ */
+JEMALLOC_ALWAYS_INLINE bool
+emap_full_alloc_ctx_try_lookup(tsdn_t *tsdn, emap_t *emap, const void *ptr,
+ emap_full_alloc_ctx_t *full_alloc_ctx) {
+ EMAP_DECLARE_RTREE_CTX;
+
+ rtree_contents_t contents;
+ bool err = rtree_read_independent(tsdn, &emap->rtree, rtree_ctx,
+ (uintptr_t)ptr, &contents);
+ if (err) {
+ return true;
+ }
+ full_alloc_ctx->edata = contents.edata;
+ full_alloc_ctx->szind = contents.metadata.szind;
+ full_alloc_ctx->slab = contents.metadata.slab;
+ return false;
+}
+
+/*
+ * Only used on the fastpath of free. Returns true when cannot be fulfilled by
+ * fast path, e.g. when the metadata key is not cached.
+ */
+JEMALLOC_ALWAYS_INLINE bool
+emap_alloc_ctx_try_lookup_fast(tsd_t *tsd, emap_t *emap, const void *ptr,
+ emap_alloc_ctx_t *alloc_ctx) {
+ /* Use the unsafe getter since this may gets called during exit. */
+ rtree_ctx_t *rtree_ctx = tsd_rtree_ctxp_get_unsafe(tsd);
+
+ rtree_metadata_t metadata;
+ bool err = rtree_metadata_try_read_fast(tsd_tsdn(tsd), &emap->rtree,
+ rtree_ctx, (uintptr_t)ptr, &metadata);
+ if (err) {
+ return true;
+ }
+ alloc_ctx->szind = metadata.szind;
+ alloc_ctx->slab = metadata.slab;
+ return false;
+}
+
+/*
+ * We want to do batch lookups out of the cache bins, which use
+ * cache_bin_ptr_array_get to access the i'th element of the bin (since they
+ * invert usual ordering in deciding what to flush). This lets the emap avoid
+ * caring about its caller's ordering.
+ */
+typedef const void *(*emap_ptr_getter)(void *ctx, size_t ind);
+/*
+ * This allows size-checking assertions, which we can only do while we're in the
+ * process of edata lookups.
+ */
+typedef void (*emap_metadata_visitor)(void *ctx, emap_full_alloc_ctx_t *alloc_ctx);
+
+typedef union emap_batch_lookup_result_u emap_batch_lookup_result_t;
+union emap_batch_lookup_result_u {
+ edata_t *edata;
+ rtree_leaf_elm_t *rtree_leaf;
+};
+
+JEMALLOC_ALWAYS_INLINE void
+emap_edata_lookup_batch(tsd_t *tsd, emap_t *emap, size_t nptrs,
+ emap_ptr_getter ptr_getter, void *ptr_getter_ctx,
+ emap_metadata_visitor metadata_visitor, void *metadata_visitor_ctx,
+ emap_batch_lookup_result_t *result) {
+ /* Avoids null-checking tsdn in the loop below. */
+ util_assume(tsd != NULL);
+ rtree_ctx_t *rtree_ctx = tsd_rtree_ctxp_get(tsd);
+
+ for (size_t i = 0; i < nptrs; i++) {
+ const void *ptr = ptr_getter(ptr_getter_ctx, i);
+ /*
+ * Reuse the edatas array as a temp buffer, lying a little about
+ * the types.
+ */
+ result[i].rtree_leaf = rtree_leaf_elm_lookup(tsd_tsdn(tsd),
+ &emap->rtree, rtree_ctx, (uintptr_t)ptr,
+ /* dependent */ true, /* init_missing */ false);
+ }
+
+ for (size_t i = 0; i < nptrs; i++) {
+ rtree_leaf_elm_t *elm = result[i].rtree_leaf;
+ rtree_contents_t contents = rtree_leaf_elm_read(tsd_tsdn(tsd),
+ &emap->rtree, elm, /* dependent */ true);
+ result[i].edata = contents.edata;
+ emap_full_alloc_ctx_t alloc_ctx;
+ /*
+ * Not all these fields are read in practice by the metadata
+ * visitor. But the compiler can easily optimize away the ones
+ * that aren't, so no sense in being incomplete.
+ */
+ alloc_ctx.szind = contents.metadata.szind;
+ alloc_ctx.slab = contents.metadata.slab;
+ alloc_ctx.edata = contents.edata;
+ metadata_visitor(metadata_visitor_ctx, &alloc_ctx);
+ }
+}
+
+#endif /* JEMALLOC_INTERNAL_EMAP_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/emitter.h b/contrib/jemalloc/include/jemalloc/internal/emitter.h
index 542bc79c36d0..9482f68bc5c3 100644
--- a/contrib/jemalloc/include/jemalloc/internal/emitter.h
+++ b/contrib/jemalloc/include/jemalloc/internal/emitter.h
@@ -6,6 +6,7 @@
typedef enum emitter_output_e emitter_output_t;
enum emitter_output_e {
emitter_output_json,
+ emitter_output_json_compact,
emitter_output_table
};
@@ -21,6 +22,7 @@ typedef enum emitter_type_e emitter_type_t;
enum emitter_type_e {
emitter_type_bool,
emitter_type_int,
+ emitter_type_int64,
emitter_type_unsigned,
emitter_type_uint32,
emitter_type_uint64,
@@ -66,7 +68,7 @@ typedef struct emitter_s emitter_t;
struct emitter_s {
emitter_output_t output;
/* The output information. */
- void (*write_cb)(void *, const char *);
+ write_cb_t *write_cb;
void *cbopaque;
int nesting_depth;
/* True if we've already emitted a value at the given depth. */
@@ -75,6 +77,12 @@ struct emitter_s {
bool emitted_key;
};
+static inline bool
+emitter_outputs_json(emitter_t *emitter) {
+ return emitter->output == emitter_output_json ||
+ emitter->output == emitter_output_json_compact;
+}
+
/* Internal convenience function. Write to the emitter the given string. */
JEMALLOC_FORMAT_PRINTF(2, 3)
static inline void
@@ -135,13 +143,16 @@ emitter_print_value(emitter_t *emitter, emitter_justify_t justify, int width,
switch (value_type) {
case emitter_type_bool:
- emitter_printf(emitter,
+ emitter_printf(emitter,
emitter_gen_fmt(fmt, FMT_SIZE, "%s", justify, width),
*(const bool *)value ? "true" : "false");
break;
case emitter_type_int:
EMIT_SIMPLE(int, "%d")
break;
+ case emitter_type_int64:
+ EMIT_SIMPLE(int64_t, "%" FMTd64)
+ break;
case emitter_type_unsigned:
EMIT_SIMPLE(unsigned, "%u")
break;
@@ -159,7 +170,7 @@ emitter_print_value(emitter_t *emitter, emitter_justify_t justify, int width,
* anywhere near the fmt size.
*/
assert(str_written < BUF_SIZE);
- emitter_printf(emitter,
+ emitter_printf(emitter,
emitter_gen_fmt(fmt, FMT_SIZE, "%s", justify, width), buf);
break;
case emitter_type_uint32:
@@ -196,6 +207,7 @@ static inline void
emitter_indent(emitter_t *emitter) {
int amount = emitter->nesting_depth;
const char *indent_str;
+ assert(emitter->output != emitter_output_json_compact);
if (emitter->output == emitter_output_json) {
indent_str = "\t";
} else {
@@ -209,12 +221,18 @@ emitter_indent(emitter_t *emitter) {
static inline void
emitter_json_key_prefix(emitter_t *emitter) {
+ assert(emitter_outputs_json(emitter));
if (emitter->emitted_key) {
emitter->emitted_key = false;
return;
}
- emitter_printf(emitter, "%s\n", emitter->item_at_depth ? "," : "");
- emitter_indent(emitter);
+ if (emitter->item_at_depth) {
+ emitter_printf(emitter, ",");
+ }
+ if (emitter->output != emitter_output_json_compact) {
+ emitter_printf(emitter, "\n");
+ emitter_indent(emitter);
+ }
}
/******************************************************************************/
@@ -222,27 +240,28 @@ emitter_json_key_prefix(emitter_t *emitter) {
static inline void
emitter_init(emitter_t *emitter, emitter_output_t emitter_output,
- void (*write_cb)(void *, const char *), void *cbopaque) {
+ write_cb_t *write_cb, void *cbopaque) {
emitter->output = emitter_output;
emitter->write_cb = write_cb;
emitter->cbopaque = cbopaque;
emitter->item_at_depth = false;
- emitter->emitted_key = false;
+ emitter->emitted_key = false;
emitter->nesting_depth = 0;
}
/******************************************************************************/
/* JSON public API. */
-/*
+/*
* Emits a key (e.g. as appears in an object). The next json entity emitted will
* be the corresponding value.
*/
static inline void
emitter_json_key(emitter_t *emitter, const char *json_key) {
- if (emitter->output == emitter_output_json) {
+ if (emitter_outputs_json(emitter)) {
emitter_json_key_prefix(emitter);
- emitter_printf(emitter, "\"%s\": ", json_key);
+ emitter_printf(emitter, "\"%s\":%s", json_key,
+ emitter->output == emitter_output_json_compact ? "" : " ");
emitter->emitted_key = true;
}
}
@@ -250,7 +269,7 @@ emitter_json_key(emitter_t *emitter, const char *json_key) {
static inline void
emitter_json_value(emitter_t *emitter, emitter_type_t value_type,
const void *value) {
- if (emitter->output == emitter_output_json) {
+ if (emitter_outputs_json(emitter)) {
emitter_json_key_prefix(emitter);
emitter_print_value(emitter, emitter_justify_none, -1,
value_type, value);
@@ -268,7 +287,7 @@ emitter_json_kv(emitter_t *emitter, const char *json_key,
static inline void
emitter_json_array_begin(emitter_t *emitter) {
- if (emitter->output == emitter_output_json) {
+ if (emitter_outputs_json(emitter)) {
emitter_json_key_prefix(emitter);
emitter_printf(emitter, "[");
emitter_nest_inc(emitter);
@@ -284,18 +303,20 @@ emitter_json_array_kv_begin(emitter_t *emitter, const char *json_key) {
static inline void
emitter_json_array_end(emitter_t *emitter) {
- if (emitter->output == emitter_output_json) {
+ if (emitter_outputs_json(emitter)) {
assert(emitter->nesting_depth > 0);
emitter_nest_dec(emitter);
- emitter_printf(emitter, "\n");
- emitter_indent(emitter);
+ if (emitter->output != emitter_output_json_compact) {
+ emitter_printf(emitter, "\n");
+ emitter_indent(emitter);
+ }
emitter_printf(emitter, "]");
}
}
static inline void
emitter_json_object_begin(emitter_t *emitter) {
- if (emitter->output == emitter_output_json) {
+ if (emitter_outputs_json(emitter)) {
emitter_json_key_prefix(emitter);
emitter_printf(emitter, "{");
emitter_nest_inc(emitter);
@@ -311,11 +332,13 @@ emitter_json_object_kv_begin(emitter_t *emitter, const char *json_key) {
static inline void
emitter_json_object_end(emitter_t *emitter) {
- if (emitter->output == emitter_output_json) {
+ if (emitter_outputs_json(emitter)) {
assert(emitter->nesting_depth > 0);
emitter_nest_dec(emitter);
- emitter_printf(emitter, "\n");
- emitter_indent(emitter);
+ if (emitter->output != emitter_output_json_compact) {
+ emitter_printf(emitter, "\n");
+ emitter_indent(emitter);
+ }
emitter_printf(emitter, "}");
}
}
@@ -420,7 +443,7 @@ emitter_kv_note(emitter_t *emitter, const char *json_key, const char *table_key,
emitter_type_t value_type, const void *value,
const char *table_note_key, emitter_type_t table_note_value_type,
const void *table_note_value) {
- if (emitter->output == emitter_output_json) {
+ if (emitter_outputs_json(emitter)) {
emitter_json_key(emitter, json_key);
emitter_json_value(emitter, value_type, value);
} else {
@@ -440,7 +463,7 @@ emitter_kv(emitter_t *emitter, const char *json_key, const char *table_key,
static inline void
emitter_dict_begin(emitter_t *emitter, const char *json_key,
const char *table_header) {
- if (emitter->output == emitter_output_json) {
+ if (emitter_outputs_json(emitter)) {
emitter_json_key(emitter, json_key);
emitter_json_object_begin(emitter);
} else {
@@ -450,7 +473,7 @@ emitter_dict_begin(emitter_t *emitter, const char *json_key,
static inline void
emitter_dict_end(emitter_t *emitter) {
- if (emitter->output == emitter_output_json) {
+ if (emitter_outputs_json(emitter)) {
emitter_json_object_end(emitter);
} else {
emitter_table_dict_end(emitter);
@@ -459,7 +482,7 @@ emitter_dict_end(emitter_t *emitter) {
static inline void
emitter_begin(emitter_t *emitter) {
- if (emitter->output == emitter_output_json) {
+ if (emitter_outputs_json(emitter)) {
assert(emitter->nesting_depth == 0);
emitter_printf(emitter, "{");
emitter_nest_inc(emitter);
@@ -476,10 +499,11 @@ emitter_begin(emitter_t *emitter) {
static inline void
emitter_end(emitter_t *emitter) {
- if (emitter->output == emitter_output_json) {
+ if (emitter_outputs_json(emitter)) {
assert(emitter->nesting_depth == 1);
emitter_nest_dec(emitter);
- emitter_printf(emitter, "\n}\n");
+ emitter_printf(emitter, "%s", emitter->output ==
+ emitter_output_json_compact ? "}" : "\n}\n");
}
}
diff --git a/contrib/jemalloc/include/jemalloc/internal/eset.h b/contrib/jemalloc/include/jemalloc/internal/eset.h
new file mode 100644
index 000000000000..4f689b47d881
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/eset.h
@@ -0,0 +1,77 @@
+#ifndef JEMALLOC_INTERNAL_ESET_H
+#define JEMALLOC_INTERNAL_ESET_H
+
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/fb.h"
+#include "jemalloc/internal/edata.h"
+#include "jemalloc/internal/mutex.h"
+
+/*
+ * An eset ("extent set") is a quantized collection of extents, with built-in
+ * LRU queue.
+ *
+ * This class is not thread-safe; synchronization must be done externally if
+ * there are mutating operations. One exception is the stats counters, which
+ * may be read without any locking.
+ */
+
+typedef struct eset_bin_s eset_bin_t;
+struct eset_bin_s {
+ edata_heap_t heap;
+ /*
+ * We do first-fit across multiple size classes. If we compared against
+ * the min element in each heap directly, we'd take a cache miss per
+ * extent we looked at. If we co-locate the edata summaries, we only
+ * take a miss on the edata we're actually going to return (which is
+ * inevitable anyways).
+ */
+ edata_cmp_summary_t heap_min;
+};
+
+typedef struct eset_bin_stats_s eset_bin_stats_t;
+struct eset_bin_stats_s {
+ atomic_zu_t nextents;
+ atomic_zu_t nbytes;
+};
+
+typedef struct eset_s eset_t;
+struct eset_s {
+ /* Bitmap for which set bits correspond to non-empty heaps. */
+ fb_group_t bitmap[FB_NGROUPS(SC_NPSIZES + 1)];
+
+ /* Quantized per size class heaps of extents. */
+ eset_bin_t bins[SC_NPSIZES + 1];
+
+ eset_bin_stats_t bin_stats[SC_NPSIZES + 1];
+
+ /* LRU of all extents in heaps. */
+ edata_list_inactive_t lru;
+
+ /* Page sum for all extents in heaps. */
+ atomic_zu_t npages;
+
+ /*
+ * A duplication of the data in the containing ecache. We use this only
+ * for assertions on the states of the passed-in extents.
+ */
+ extent_state_t state;
+};
+
+void eset_init(eset_t *eset, extent_state_t state);
+
+size_t eset_npages_get(eset_t *eset);
+/* Get the number of extents in the given page size index. */
+size_t eset_nextents_get(eset_t *eset, pszind_t ind);
+/* Get the sum total bytes of the extents in the given page size index. */
+size_t eset_nbytes_get(eset_t *eset, pszind_t ind);
+
+void eset_insert(eset_t *eset, edata_t *edata);
+void eset_remove(eset_t *eset, edata_t *edata);
+/*
+ * Select an extent from this eset of the given size and alignment. Returns
+ * null if no such item could be found.
+ */
+edata_t *eset_fit(eset_t *eset, size_t esize, size_t alignment, bool exact_only,
+ unsigned lg_max_fit);
+
+#endif /* JEMALLOC_INTERNAL_ESET_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/exp_grow.h b/contrib/jemalloc/include/jemalloc/internal/exp_grow.h
new file mode 100644
index 000000000000..8566b8a4c6a4
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/exp_grow.h
@@ -0,0 +1,50 @@
+#ifndef JEMALLOC_INTERNAL_EXP_GROW_H
+#define JEMALLOC_INTERNAL_EXP_GROW_H
+
+typedef struct exp_grow_s exp_grow_t;
+struct exp_grow_s {
+ /*
+ * Next extent size class in a growing series to use when satisfying a
+ * request via the extent hooks (only if opt_retain). This limits the
+ * number of disjoint virtual memory ranges so that extent merging can
+ * be effective even if multiple arenas' extent allocation requests are
+ * highly interleaved.
+ *
+ * retain_grow_limit is the max allowed size ind to expand (unless the
+ * required size is greater). Default is no limit, and controlled
+ * through mallctl only.
+ */
+ pszind_t next;
+ pszind_t limit;
+};
+
+static inline bool
+exp_grow_size_prepare(exp_grow_t *exp_grow, size_t alloc_size_min,
+ size_t *r_alloc_size, pszind_t *r_skip) {
+ *r_skip = 0;
+ *r_alloc_size = sz_pind2sz(exp_grow->next + *r_skip);
+ while (*r_alloc_size < alloc_size_min) {
+ (*r_skip)++;
+ if (exp_grow->next + *r_skip >=
+ sz_psz2ind(SC_LARGE_MAXCLASS)) {
+ /* Outside legal range. */
+ return true;
+ }
+ *r_alloc_size = sz_pind2sz(exp_grow->next + *r_skip);
+ }
+ return false;
+}
+
+static inline void
+exp_grow_size_commit(exp_grow_t *exp_grow, pszind_t skip) {
+ if (exp_grow->next + skip + 1 <= exp_grow->limit) {
+ exp_grow->next += skip + 1;
+ } else {
+ exp_grow->next = exp_grow->limit;
+ }
+
+}
+
+void exp_grow_init(exp_grow_t *exp_grow);
+
+#endif /* JEMALLOC_INTERNAL_EXP_GROW_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/extent.h b/contrib/jemalloc/include/jemalloc/internal/extent.h
new file mode 100644
index 000000000000..1d51d41097e3
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/extent.h
@@ -0,0 +1,137 @@
+#ifndef JEMALLOC_INTERNAL_EXTENT_H
+#define JEMALLOC_INTERNAL_EXTENT_H
+
+#include "jemalloc/internal/ecache.h"
+#include "jemalloc/internal/ehooks.h"
+#include "jemalloc/internal/ph.h"
+#include "jemalloc/internal/rtree.h"
+
+/*
+ * This module contains the page-level allocator. It chooses the addresses that
+ * allocations requested by other modules will inhabit, and updates the global
+ * metadata to reflect allocation/deallocation/purging decisions.
+ */
+
+/*
+ * When reuse (and split) an active extent, (1U << opt_lg_extent_max_active_fit)
+ * is the max ratio between the size of the active extent and the new extent.
+ */
+#define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6
+extern size_t opt_lg_extent_max_active_fit;
+
+edata_t *ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
+ bool zero, bool guarded);
+edata_t *ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
+ bool zero, bool guarded);
+void ecache_dalloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ ecache_t *ecache, edata_t *edata);
+edata_t *ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ ecache_t *ecache, size_t npages_min);
+
+void extent_gdump_add(tsdn_t *tsdn, const edata_t *edata);
+void extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
+ edata_t *edata);
+void extent_dalloc_gap(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ edata_t *edata);
+edata_t *extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ void *new_addr, size_t size, size_t alignment, bool zero, bool *commit,
+ bool growing_retained);
+void extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ edata_t *edata);
+void extent_destroy_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ edata_t *edata);
+bool extent_commit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
+ size_t offset, size_t length);
+bool extent_decommit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
+ size_t offset, size_t length);
+bool extent_purge_lazy_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
+ size_t offset, size_t length);
+bool extent_purge_forced_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
+ size_t offset, size_t length);
+edata_t *extent_split_wrapper(tsdn_t *tsdn, pac_t *pac,
+ ehooks_t *ehooks, edata_t *edata, size_t size_a, size_t size_b,
+ bool holding_core_locks);
+bool extent_merge_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ edata_t *a, edata_t *b);
+bool extent_commit_zero(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
+ bool commit, bool zero, bool growing_retained);
+size_t extent_sn_next(pac_t *pac);
+bool extent_boot(void);
+
+JEMALLOC_ALWAYS_INLINE bool
+extent_neighbor_head_state_mergeable(bool edata_is_head,
+ bool neighbor_is_head, bool forward) {
+ /*
+ * Head states checking: disallow merging if the higher addr extent is a
+ * head extent. This helps preserve first-fit, and more importantly
+ * makes sure no merge across arenas.
+ */
+ if (forward) {
+ if (neighbor_is_head) {
+ return false;
+ }
+ } else {
+ if (edata_is_head) {
+ return false;
+ }
+ }
+ return true;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+extent_can_acquire_neighbor(edata_t *edata, rtree_contents_t contents,
+ extent_pai_t pai, extent_state_t expected_state, bool forward,
+ bool expanding) {
+ edata_t *neighbor = contents.edata;
+ if (neighbor == NULL) {
+ return false;
+ }
+ /* It's not safe to access *neighbor yet; must verify states first. */
+ bool neighbor_is_head = contents.metadata.is_head;
+ if (!extent_neighbor_head_state_mergeable(edata_is_head_get(edata),
+ neighbor_is_head, forward)) {
+ return false;
+ }
+ extent_state_t neighbor_state = contents.metadata.state;
+ if (pai == EXTENT_PAI_PAC) {
+ if (neighbor_state != expected_state) {
+ return false;
+ }
+ /* From this point, it's safe to access *neighbor. */
+ if (!expanding && (edata_committed_get(edata) !=
+ edata_committed_get(neighbor))) {
+ /*
+ * Some platforms (e.g. Windows) require an explicit
+ * commit step (and writing to uncommitted memory is not
+ * allowed).
+ */
+ return false;
+ }
+ } else {
+ if (neighbor_state == extent_state_active) {
+ return false;
+ }
+ /* From this point, it's safe to access *neighbor. */
+ }
+
+ assert(edata_pai_get(edata) == pai);
+ if (edata_pai_get(neighbor) != pai) {
+ return false;
+ }
+ if (opt_retain) {
+ assert(edata_arena_ind_get(edata) ==
+ edata_arena_ind_get(neighbor));
+ } else {
+ if (edata_arena_ind_get(edata) !=
+ edata_arena_ind_get(neighbor)) {
+ return false;
+ }
+ }
+ assert(!edata_guarded_get(edata) && !edata_guarded_get(neighbor));
+
+ return true;
+}
+
+#endif /* JEMALLOC_INTERNAL_EXTENT_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/extent_externs.h b/contrib/jemalloc/include/jemalloc/internal/extent_externs.h
deleted file mode 100644
index 8aba57633a34..000000000000
--- a/contrib/jemalloc/include/jemalloc/internal/extent_externs.h
+++ /dev/null
@@ -1,83 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_EXTENT_EXTERNS_H
-#define JEMALLOC_INTERNAL_EXTENT_EXTERNS_H
-
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/mutex_pool.h"
-#include "jemalloc/internal/ph.h"
-#include "jemalloc/internal/rtree.h"
-
-extern size_t opt_lg_extent_max_active_fit;
-
-extern rtree_t extents_rtree;
-extern const extent_hooks_t extent_hooks_default;
-extern mutex_pool_t extent_mutex_pool;
-
-extent_t *extent_alloc(tsdn_t *tsdn, arena_t *arena);
-void extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
-
-extent_hooks_t *extent_hooks_get(arena_t *arena);
-extent_hooks_t *extent_hooks_set(tsd_t *tsd, arena_t *arena,
- extent_hooks_t *extent_hooks);
-
-#ifdef JEMALLOC_JET
-size_t extent_size_quantize_floor(size_t size);
-size_t extent_size_quantize_ceil(size_t size);
-#endif
-
-ph_proto(, extent_avail_, extent_tree_t, extent_t)
-ph_proto(, extent_heap_, extent_heap_t, extent_t)
-
-bool extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
- bool delay_coalesce);
-extent_state_t extents_state_get(const extents_t *extents);
-size_t extents_npages_get(extents_t *extents);
-/* Get the number of extents in the given page size index. */
-size_t extents_nextents_get(extents_t *extents, pszind_t ind);
-/* Get the sum total bytes of the extents in the given page size index. */
-size_t extents_nbytes_get(extents_t *extents, pszind_t ind);
-extent_t *extents_alloc(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
- size_t size, size_t pad, size_t alignment, bool slab, szind_t szind,
- bool *zero, bool *commit);
-void extents_dalloc(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent);
-extent_t *extents_evict(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_min);
-void extents_prefork(tsdn_t *tsdn, extents_t *extents);
-void extents_postfork_parent(tsdn_t *tsdn, extents_t *extents);
-void extents_postfork_child(tsdn_t *tsdn, extents_t *extents);
-extent_t *extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
- size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit);
-void extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent);
-void extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent);
-void extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent);
-bool extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length);
-bool extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length);
-bool extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length);
-bool extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length);
-extent_t *extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
- szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b);
-bool extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b);
-
-bool extent_boot(void);
-
-void extent_util_stats_get(tsdn_t *tsdn, const void *ptr,
- size_t *nfree, size_t *nregs, size_t *size);
-void extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
- size_t *nfree, size_t *nregs, size_t *size,
- size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr);
-
-#endif /* JEMALLOC_INTERNAL_EXTENT_EXTERNS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/extent_inlines.h b/contrib/jemalloc/include/jemalloc/internal/extent_inlines.h
deleted file mode 100644
index 77fa4c4a29a7..000000000000
--- a/contrib/jemalloc/include/jemalloc/internal/extent_inlines.h
+++ /dev/null
@@ -1,501 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_EXTENT_INLINES_H
-#define JEMALLOC_INTERNAL_EXTENT_INLINES_H
-
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/mutex_pool.h"
-#include "jemalloc/internal/pages.h"
-#include "jemalloc/internal/prng.h"
-#include "jemalloc/internal/ql.h"
-#include "jemalloc/internal/sc.h"
-#include "jemalloc/internal/sz.h"
-
-static inline void
-extent_lock(tsdn_t *tsdn, extent_t *extent) {
- assert(extent != NULL);
- mutex_pool_lock(tsdn, &extent_mutex_pool, (uintptr_t)extent);
-}
-
-static inline void
-extent_unlock(tsdn_t *tsdn, extent_t *extent) {
- assert(extent != NULL);
- mutex_pool_unlock(tsdn, &extent_mutex_pool, (uintptr_t)extent);
-}
-
-static inline void
-extent_lock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) {
- assert(extent1 != NULL && extent2 != NULL);
- mutex_pool_lock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1,
- (uintptr_t)extent2);
-}
-
-static inline void
-extent_unlock2(tsdn_t *tsdn, extent_t *extent1, extent_t *extent2) {
- assert(extent1 != NULL && extent2 != NULL);
- mutex_pool_unlock2(tsdn, &extent_mutex_pool, (uintptr_t)extent1,
- (uintptr_t)extent2);
-}
-
-static inline unsigned
-extent_arena_ind_get(const extent_t *extent) {
- unsigned arena_ind = (unsigned)((extent->e_bits &
- EXTENT_BITS_ARENA_MASK) >> EXTENT_BITS_ARENA_SHIFT);
- assert(arena_ind < MALLOCX_ARENA_LIMIT);
-
- return arena_ind;
-}
-
-static inline arena_t *
-extent_arena_get(const extent_t *extent) {
- unsigned arena_ind = extent_arena_ind_get(extent);
-
- return (arena_t *)atomic_load_p(&arenas[arena_ind], ATOMIC_ACQUIRE);
-}
-
-static inline szind_t
-extent_szind_get_maybe_invalid(const extent_t *extent) {
- szind_t szind = (szind_t)((extent->e_bits & EXTENT_BITS_SZIND_MASK) >>
- EXTENT_BITS_SZIND_SHIFT);
- assert(szind <= SC_NSIZES);
- return szind;
-}
-
-static inline szind_t
-extent_szind_get(const extent_t *extent) {
- szind_t szind = extent_szind_get_maybe_invalid(extent);
- assert(szind < SC_NSIZES); /* Never call when "invalid". */
- return szind;
-}
-
-static inline size_t
-extent_usize_get(const extent_t *extent) {
- return sz_index2size(extent_szind_get(extent));
-}
-
-static inline unsigned
-extent_binshard_get(const extent_t *extent) {
- unsigned binshard = (unsigned)((extent->e_bits &
- EXTENT_BITS_BINSHARD_MASK) >> EXTENT_BITS_BINSHARD_SHIFT);
- assert(binshard < bin_infos[extent_szind_get(extent)].n_shards);
- return binshard;
-}
-
-static inline size_t
-extent_sn_get(const extent_t *extent) {
- return (size_t)((extent->e_bits & EXTENT_BITS_SN_MASK) >>
- EXTENT_BITS_SN_SHIFT);
-}
-
-static inline extent_state_t
-extent_state_get(const extent_t *extent) {
- return (extent_state_t)((extent->e_bits & EXTENT_BITS_STATE_MASK) >>
- EXTENT_BITS_STATE_SHIFT);
-}
-
-static inline bool
-extent_zeroed_get(const extent_t *extent) {
- return (bool)((extent->e_bits & EXTENT_BITS_ZEROED_MASK) >>
- EXTENT_BITS_ZEROED_SHIFT);
-}
-
-static inline bool
-extent_committed_get(const extent_t *extent) {
- return (bool)((extent->e_bits & EXTENT_BITS_COMMITTED_MASK) >>
- EXTENT_BITS_COMMITTED_SHIFT);
-}
-
-static inline bool
-extent_dumpable_get(const extent_t *extent) {
- return (bool)((extent->e_bits & EXTENT_BITS_DUMPABLE_MASK) >>
- EXTENT_BITS_DUMPABLE_SHIFT);
-}
-
-static inline bool
-extent_slab_get(const extent_t *extent) {
- return (bool)((extent->e_bits & EXTENT_BITS_SLAB_MASK) >>
- EXTENT_BITS_SLAB_SHIFT);
-}
-
-static inline unsigned
-extent_nfree_get(const extent_t *extent) {
- assert(extent_slab_get(extent));
- return (unsigned)((extent->e_bits & EXTENT_BITS_NFREE_MASK) >>
- EXTENT_BITS_NFREE_SHIFT);
-}
-
-static inline void *
-extent_base_get(const extent_t *extent) {
- assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
- !extent_slab_get(extent));
- return PAGE_ADDR2BASE(extent->e_addr);
-}
-
-static inline void *
-extent_addr_get(const extent_t *extent) {
- assert(extent->e_addr == PAGE_ADDR2BASE(extent->e_addr) ||
- !extent_slab_get(extent));
- return extent->e_addr;
-}
-
-static inline size_t
-extent_size_get(const extent_t *extent) {
- return (extent->e_size_esn & EXTENT_SIZE_MASK);
-}
-
-static inline size_t
-extent_esn_get(const extent_t *extent) {
- return (extent->e_size_esn & EXTENT_ESN_MASK);
-}
-
-static inline size_t
-extent_bsize_get(const extent_t *extent) {
- return extent->e_bsize;
-}
-
-static inline void *
-extent_before_get(const extent_t *extent) {
- return (void *)((uintptr_t)extent_base_get(extent) - PAGE);
-}
-
-static inline void *
-extent_last_get(const extent_t *extent) {
- return (void *)((uintptr_t)extent_base_get(extent) +
- extent_size_get(extent) - PAGE);
-}
-
-static inline void *
-extent_past_get(const extent_t *extent) {
- return (void *)((uintptr_t)extent_base_get(extent) +
- extent_size_get(extent));
-}
-
-static inline arena_slab_data_t *
-extent_slab_data_get(extent_t *extent) {
- assert(extent_slab_get(extent));
- return &extent->e_slab_data;
-}
-
-static inline const arena_slab_data_t *
-extent_slab_data_get_const(const extent_t *extent) {
- assert(extent_slab_get(extent));
- return &extent->e_slab_data;
-}
-
-static inline prof_tctx_t *
-extent_prof_tctx_get(const extent_t *extent) {
- return (prof_tctx_t *)atomic_load_p(&extent->e_prof_tctx,
- ATOMIC_ACQUIRE);
-}
-
-static inline nstime_t
-extent_prof_alloc_time_get(const extent_t *extent) {
- return extent->e_alloc_time;
-}
-
-static inline void
-extent_arena_set(extent_t *extent, arena_t *arena) {
- unsigned arena_ind = (arena != NULL) ? arena_ind_get(arena) : ((1U <<
- MALLOCX_ARENA_BITS) - 1);
- extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ARENA_MASK) |
- ((uint64_t)arena_ind << EXTENT_BITS_ARENA_SHIFT);
-}
-
-static inline void
-extent_binshard_set(extent_t *extent, unsigned binshard) {
- /* The assertion assumes szind is set already. */
- assert(binshard < bin_infos[extent_szind_get(extent)].n_shards);
- extent->e_bits = (extent->e_bits & ~EXTENT_BITS_BINSHARD_MASK) |
- ((uint64_t)binshard << EXTENT_BITS_BINSHARD_SHIFT);
-}
-
-static inline void
-extent_addr_set(extent_t *extent, void *addr) {
- extent->e_addr = addr;
-}
-
-static inline void
-extent_addr_randomize(tsdn_t *tsdn, extent_t *extent, size_t alignment) {
- assert(extent_base_get(extent) == extent_addr_get(extent));
-
- if (alignment < PAGE) {
- unsigned lg_range = LG_PAGE -
- lg_floor(CACHELINE_CEILING(alignment));
- size_t r;
- if (!tsdn_null(tsdn)) {
- tsd_t *tsd = tsdn_tsd(tsdn);
- r = (size_t)prng_lg_range_u64(
- tsd_offset_statep_get(tsd), lg_range);
- } else {
- r = prng_lg_range_zu(
- &extent_arena_get(extent)->offset_state,
- lg_range, true);
- }
- uintptr_t random_offset = ((uintptr_t)r) << (LG_PAGE -
- lg_range);
- extent->e_addr = (void *)((uintptr_t)extent->e_addr +
- random_offset);
- assert(ALIGNMENT_ADDR2BASE(extent->e_addr, alignment) ==
- extent->e_addr);
- }
-}
-
-static inline void
-extent_size_set(extent_t *extent, size_t size) {
- assert((size & ~EXTENT_SIZE_MASK) == 0);
- extent->e_size_esn = size | (extent->e_size_esn & ~EXTENT_SIZE_MASK);
-}
-
-static inline void
-extent_esn_set(extent_t *extent, size_t esn) {
- extent->e_size_esn = (extent->e_size_esn & ~EXTENT_ESN_MASK) | (esn &
- EXTENT_ESN_MASK);
-}
-
-static inline void
-extent_bsize_set(extent_t *extent, size_t bsize) {
- extent->e_bsize = bsize;
-}
-
-static inline void
-extent_szind_set(extent_t *extent, szind_t szind) {
- assert(szind <= SC_NSIZES); /* SC_NSIZES means "invalid". */
- extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SZIND_MASK) |
- ((uint64_t)szind << EXTENT_BITS_SZIND_SHIFT);
-}
-
-static inline void
-extent_nfree_set(extent_t *extent, unsigned nfree) {
- assert(extent_slab_get(extent));
- extent->e_bits = (extent->e_bits & ~EXTENT_BITS_NFREE_MASK) |
- ((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT);
-}
-
-static inline void
-extent_nfree_binshard_set(extent_t *extent, unsigned nfree, unsigned binshard) {
- /* The assertion assumes szind is set already. */
- assert(binshard < bin_infos[extent_szind_get(extent)].n_shards);
- extent->e_bits = (extent->e_bits &
- (~EXTENT_BITS_NFREE_MASK & ~EXTENT_BITS_BINSHARD_MASK)) |
- ((uint64_t)binshard << EXTENT_BITS_BINSHARD_SHIFT) |
- ((uint64_t)nfree << EXTENT_BITS_NFREE_SHIFT);
-}
-
-static inline void
-extent_nfree_inc(extent_t *extent) {
- assert(extent_slab_get(extent));
- extent->e_bits += ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT);
-}
-
-static inline void
-extent_nfree_dec(extent_t *extent) {
- assert(extent_slab_get(extent));
- extent->e_bits -= ((uint64_t)1U << EXTENT_BITS_NFREE_SHIFT);
-}
-
-static inline void
-extent_nfree_sub(extent_t *extent, uint64_t n) {
- assert(extent_slab_get(extent));
- extent->e_bits -= (n << EXTENT_BITS_NFREE_SHIFT);
-}
-
-static inline void
-extent_sn_set(extent_t *extent, size_t sn) {
- extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SN_MASK) |
- ((uint64_t)sn << EXTENT_BITS_SN_SHIFT);
-}
-
-static inline void
-extent_state_set(extent_t *extent, extent_state_t state) {
- extent->e_bits = (extent->e_bits & ~EXTENT_BITS_STATE_MASK) |
- ((uint64_t)state << EXTENT_BITS_STATE_SHIFT);
-}
-
-static inline void
-extent_zeroed_set(extent_t *extent, bool zeroed) {
- extent->e_bits = (extent->e_bits & ~EXTENT_BITS_ZEROED_MASK) |
- ((uint64_t)zeroed << EXTENT_BITS_ZEROED_SHIFT);
-}
-
-static inline void
-extent_committed_set(extent_t *extent, bool committed) {
- extent->e_bits = (extent->e_bits & ~EXTENT_BITS_COMMITTED_MASK) |
- ((uint64_t)committed << EXTENT_BITS_COMMITTED_SHIFT);
-}
-
-static inline void
-extent_dumpable_set(extent_t *extent, bool dumpable) {
- extent->e_bits = (extent->e_bits & ~EXTENT_BITS_DUMPABLE_MASK) |
- ((uint64_t)dumpable << EXTENT_BITS_DUMPABLE_SHIFT);
-}
-
-static inline void
-extent_slab_set(extent_t *extent, bool slab) {
- extent->e_bits = (extent->e_bits & ~EXTENT_BITS_SLAB_MASK) |
- ((uint64_t)slab << EXTENT_BITS_SLAB_SHIFT);
-}
-
-static inline void
-extent_prof_tctx_set(extent_t *extent, prof_tctx_t *tctx) {
- atomic_store_p(&extent->e_prof_tctx, tctx, ATOMIC_RELEASE);
-}
-
-static inline void
-extent_prof_alloc_time_set(extent_t *extent, nstime_t t) {
- nstime_copy(&extent->e_alloc_time, &t);
-}
-
-static inline bool
-extent_is_head_get(extent_t *extent) {
- if (maps_coalesce) {
- not_reached();
- }
-
- return (bool)((extent->e_bits & EXTENT_BITS_IS_HEAD_MASK) >>
- EXTENT_BITS_IS_HEAD_SHIFT);
-}
-
-static inline void
-extent_is_head_set(extent_t *extent, bool is_head) {
- if (maps_coalesce) {
- not_reached();
- }
-
- extent->e_bits = (extent->e_bits & ~EXTENT_BITS_IS_HEAD_MASK) |
- ((uint64_t)is_head << EXTENT_BITS_IS_HEAD_SHIFT);
-}
-
-static inline void
-extent_init(extent_t *extent, arena_t *arena, void *addr, size_t size,
- bool slab, szind_t szind, size_t sn, extent_state_t state, bool zeroed,
- bool committed, bool dumpable, extent_head_state_t is_head) {
- assert(addr == PAGE_ADDR2BASE(addr) || !slab);
-
- extent_arena_set(extent, arena);
- extent_addr_set(extent, addr);
- extent_size_set(extent, size);
- extent_slab_set(extent, slab);
- extent_szind_set(extent, szind);
- extent_sn_set(extent, sn);
- extent_state_set(extent, state);
- extent_zeroed_set(extent, zeroed);
- extent_committed_set(extent, committed);
- extent_dumpable_set(extent, dumpable);
- ql_elm_new(extent, ql_link);
- if (!maps_coalesce) {
- extent_is_head_set(extent, (is_head == EXTENT_IS_HEAD) ? true :
- false);
- }
- if (config_prof) {
- extent_prof_tctx_set(extent, NULL);
- }
-}
-
-static inline void
-extent_binit(extent_t *extent, void *addr, size_t bsize, size_t sn) {
- extent_arena_set(extent, NULL);
- extent_addr_set(extent, addr);
- extent_bsize_set(extent, bsize);
- extent_slab_set(extent, false);
- extent_szind_set(extent, SC_NSIZES);
- extent_sn_set(extent, sn);
- extent_state_set(extent, extent_state_active);
- extent_zeroed_set(extent, true);
- extent_committed_set(extent, true);
- extent_dumpable_set(extent, true);
-}
-
-static inline void
-extent_list_init(extent_list_t *list) {
- ql_new(list);
-}
-
-static inline extent_t *
-extent_list_first(const extent_list_t *list) {
- return ql_first(list);
-}
-
-static inline extent_t *
-extent_list_last(const extent_list_t *list) {
- return ql_last(list, ql_link);
-}
-
-static inline void
-extent_list_append(extent_list_t *list, extent_t *extent) {
- ql_tail_insert(list, extent, ql_link);
-}
-
-static inline void
-extent_list_prepend(extent_list_t *list, extent_t *extent) {
- ql_head_insert(list, extent, ql_link);
-}
-
-static inline void
-extent_list_replace(extent_list_t *list, extent_t *to_remove,
- extent_t *to_insert) {
- ql_after_insert(to_remove, to_insert, ql_link);
- ql_remove(list, to_remove, ql_link);
-}
-
-static inline void
-extent_list_remove(extent_list_t *list, extent_t *extent) {
- ql_remove(list, extent, ql_link);
-}
-
-static inline int
-extent_sn_comp(const extent_t *a, const extent_t *b) {
- size_t a_sn = extent_sn_get(a);
- size_t b_sn = extent_sn_get(b);
-
- return (a_sn > b_sn) - (a_sn < b_sn);
-}
-
-static inline int
-extent_esn_comp(const extent_t *a, const extent_t *b) {
- size_t a_esn = extent_esn_get(a);
- size_t b_esn = extent_esn_get(b);
-
- return (a_esn > b_esn) - (a_esn < b_esn);
-}
-
-static inline int
-extent_ad_comp(const extent_t *a, const extent_t *b) {
- uintptr_t a_addr = (uintptr_t)extent_addr_get(a);
- uintptr_t b_addr = (uintptr_t)extent_addr_get(b);
-
- return (a_addr > b_addr) - (a_addr < b_addr);
-}
-
-static inline int
-extent_ead_comp(const extent_t *a, const extent_t *b) {
- uintptr_t a_eaddr = (uintptr_t)a;
- uintptr_t b_eaddr = (uintptr_t)b;
-
- return (a_eaddr > b_eaddr) - (a_eaddr < b_eaddr);
-}
-
-static inline int
-extent_snad_comp(const extent_t *a, const extent_t *b) {
- int ret;
-
- ret = extent_sn_comp(a, b);
- if (ret != 0) {
- return ret;
- }
-
- ret = extent_ad_comp(a, b);
- return ret;
-}
-
-static inline int
-extent_esnead_comp(const extent_t *a, const extent_t *b) {
- int ret;
-
- ret = extent_esn_comp(a, b);
- if (ret != 0) {
- return ret;
- }
-
- ret = extent_ead_comp(a, b);
- return ret;
-}
-
-#endif /* JEMALLOC_INTERNAL_EXTENT_INLINES_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/extent_structs.h b/contrib/jemalloc/include/jemalloc/internal/extent_structs.h
deleted file mode 100644
index 767cd8930fbe..000000000000
--- a/contrib/jemalloc/include/jemalloc/internal/extent_structs.h
+++ /dev/null
@@ -1,256 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
-#define JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
-
-#include "jemalloc/internal/atomic.h"
-#include "jemalloc/internal/bit_util.h"
-#include "jemalloc/internal/bitmap.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/ql.h"
-#include "jemalloc/internal/ph.h"
-#include "jemalloc/internal/sc.h"
-
-typedef enum {
- extent_state_active = 0,
- extent_state_dirty = 1,
- extent_state_muzzy = 2,
- extent_state_retained = 3
-} extent_state_t;
-
-/* Extent (span of pages). Use accessor functions for e_* fields. */
-struct extent_s {
- /*
- * Bitfield containing several fields:
- *
- * a: arena_ind
- * b: slab
- * c: committed
- * d: dumpable
- * z: zeroed
- * t: state
- * i: szind
- * f: nfree
- * s: bin_shard
- * n: sn
- *
- * nnnnnnnn ... nnnnnnss ssssffff ffffffii iiiiiitt zdcbaaaa aaaaaaaa
- *
- * arena_ind: Arena from which this extent came, or all 1 bits if
- * unassociated.
- *
- * slab: The slab flag indicates whether the extent is used for a slab
- * of small regions. This helps differentiate small size classes,
- * and it indicates whether interior pointers can be looked up via
- * iealloc().
- *
- * committed: The committed flag indicates whether physical memory is
- * committed to the extent, whether explicitly or implicitly
- * as on a system that overcommits and satisfies physical
- * memory needs on demand via soft page faults.
- *
- * dumpable: The dumpable flag indicates whether or not we've set the
- * memory in question to be dumpable. Note that this
- * interacts somewhat subtly with user-specified extent hooks,
- * since we don't know if *they* are fiddling with
- * dumpability (in which case, we don't want to undo whatever
- * they're doing). To deal with this scenario, we:
- * - Make dumpable false only for memory allocated with the
- * default hooks.
- * - Only allow memory to go from non-dumpable to dumpable,
- * and only once.
- * - Never make the OS call to allow dumping when the
- * dumpable bit is already set.
- * These three constraints mean that we will never
- * accidentally dump user memory that the user meant to set
- * nondumpable with their extent hooks.
- *
- *
- * zeroed: The zeroed flag is used by extent recycling code to track
- * whether memory is zero-filled.
- *
- * state: The state flag is an extent_state_t.
- *
- * szind: The szind flag indicates usable size class index for
- * allocations residing in this extent, regardless of whether the
- * extent is a slab. Extent size and usable size often differ
- * even for non-slabs, either due to sz_large_pad or promotion of
- * sampled small regions.
- *
- * nfree: Number of free regions in slab.
- *
- * bin_shard: the shard of the bin from which this extent came.
- *
- * sn: Serial number (potentially non-unique).
- *
- * Serial numbers may wrap around if !opt_retain, but as long as
- * comparison functions fall back on address comparison for equal
- * serial numbers, stable (if imperfect) ordering is maintained.
- *
- * Serial numbers may not be unique even in the absence of
- * wrap-around, e.g. when splitting an extent and assigning the same
- * serial number to both resulting adjacent extents.
- */
- uint64_t e_bits;
-#define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT))
-
-#define EXTENT_BITS_ARENA_WIDTH MALLOCX_ARENA_BITS
-#define EXTENT_BITS_ARENA_SHIFT 0
-#define EXTENT_BITS_ARENA_MASK MASK(EXTENT_BITS_ARENA_WIDTH, EXTENT_BITS_ARENA_SHIFT)
-
-#define EXTENT_BITS_SLAB_WIDTH 1
-#define EXTENT_BITS_SLAB_SHIFT (EXTENT_BITS_ARENA_WIDTH + EXTENT_BITS_ARENA_SHIFT)
-#define EXTENT_BITS_SLAB_MASK MASK(EXTENT_BITS_SLAB_WIDTH, EXTENT_BITS_SLAB_SHIFT)
-
-#define EXTENT_BITS_COMMITTED_WIDTH 1
-#define EXTENT_BITS_COMMITTED_SHIFT (EXTENT_BITS_SLAB_WIDTH + EXTENT_BITS_SLAB_SHIFT)
-#define EXTENT_BITS_COMMITTED_MASK MASK(EXTENT_BITS_COMMITTED_WIDTH, EXTENT_BITS_COMMITTED_SHIFT)
-
-#define EXTENT_BITS_DUMPABLE_WIDTH 1
-#define EXTENT_BITS_DUMPABLE_SHIFT (EXTENT_BITS_COMMITTED_WIDTH + EXTENT_BITS_COMMITTED_SHIFT)
-#define EXTENT_BITS_DUMPABLE_MASK MASK(EXTENT_BITS_DUMPABLE_WIDTH, EXTENT_BITS_DUMPABLE_SHIFT)
-
-#define EXTENT_BITS_ZEROED_WIDTH 1
-#define EXTENT_BITS_ZEROED_SHIFT (EXTENT_BITS_DUMPABLE_WIDTH + EXTENT_BITS_DUMPABLE_SHIFT)
-#define EXTENT_BITS_ZEROED_MASK MASK(EXTENT_BITS_ZEROED_WIDTH, EXTENT_BITS_ZEROED_SHIFT)
-
-#define EXTENT_BITS_STATE_WIDTH 2
-#define EXTENT_BITS_STATE_SHIFT (EXTENT_BITS_ZEROED_WIDTH + EXTENT_BITS_ZEROED_SHIFT)
-#define EXTENT_BITS_STATE_MASK MASK(EXTENT_BITS_STATE_WIDTH, EXTENT_BITS_STATE_SHIFT)
-
-#define EXTENT_BITS_SZIND_WIDTH LG_CEIL(SC_NSIZES)
-#define EXTENT_BITS_SZIND_SHIFT (EXTENT_BITS_STATE_WIDTH + EXTENT_BITS_STATE_SHIFT)
-#define EXTENT_BITS_SZIND_MASK MASK(EXTENT_BITS_SZIND_WIDTH, EXTENT_BITS_SZIND_SHIFT)
-
-#define EXTENT_BITS_NFREE_WIDTH (LG_SLAB_MAXREGS + 1)
-#define EXTENT_BITS_NFREE_SHIFT (EXTENT_BITS_SZIND_WIDTH + EXTENT_BITS_SZIND_SHIFT)
-#define EXTENT_BITS_NFREE_MASK MASK(EXTENT_BITS_NFREE_WIDTH, EXTENT_BITS_NFREE_SHIFT)
-
-#define EXTENT_BITS_BINSHARD_WIDTH 6
-#define EXTENT_BITS_BINSHARD_SHIFT (EXTENT_BITS_NFREE_WIDTH + EXTENT_BITS_NFREE_SHIFT)
-#define EXTENT_BITS_BINSHARD_MASK MASK(EXTENT_BITS_BINSHARD_WIDTH, EXTENT_BITS_BINSHARD_SHIFT)
-
-#define EXTENT_BITS_IS_HEAD_WIDTH 1
-#define EXTENT_BITS_IS_HEAD_SHIFT (EXTENT_BITS_BINSHARD_WIDTH + EXTENT_BITS_BINSHARD_SHIFT)
-#define EXTENT_BITS_IS_HEAD_MASK MASK(EXTENT_BITS_IS_HEAD_WIDTH, EXTENT_BITS_IS_HEAD_SHIFT)
-
-#define EXTENT_BITS_SN_SHIFT (EXTENT_BITS_IS_HEAD_WIDTH + EXTENT_BITS_IS_HEAD_SHIFT)
-#define EXTENT_BITS_SN_MASK (UINT64_MAX << EXTENT_BITS_SN_SHIFT)
-
- /* Pointer to the extent that this structure is responsible for. */
- void *e_addr;
-
- union {
- /*
- * Extent size and serial number associated with the extent
- * structure (different than the serial number for the extent at
- * e_addr).
- *
- * ssssssss [...] ssssssss ssssnnnn nnnnnnnn
- */
- size_t e_size_esn;
- #define EXTENT_SIZE_MASK ((size_t)~(PAGE-1))
- #define EXTENT_ESN_MASK ((size_t)PAGE-1)
- /* Base extent size, which may not be a multiple of PAGE. */
- size_t e_bsize;
- };
-
- /*
- * List linkage, used by a variety of lists:
- * - bin_t's slabs_full
- * - extents_t's LRU
- * - stashed dirty extents
- * - arena's large allocations
- */
- ql_elm(extent_t) ql_link;
-
- /*
- * Linkage for per size class sn/address-ordered heaps, and
- * for extent_avail
- */
- phn(extent_t) ph_link;
-
- union {
- /* Small region slab metadata. */
- arena_slab_data_t e_slab_data;
-
- /* Profiling data, used for large objects. */
- struct {
- /* Time when this was allocated. */
- nstime_t e_alloc_time;
- /* Points to a prof_tctx_t. */
- atomic_p_t e_prof_tctx;
- };
- };
-};
-typedef ql_head(extent_t) extent_list_t;
-typedef ph(extent_t) extent_tree_t;
-typedef ph(extent_t) extent_heap_t;
-
-/* Quantized collection of extents, with built-in LRU queue. */
-struct extents_s {
- malloc_mutex_t mtx;
-
- /*
- * Quantized per size class heaps of extents.
- *
- * Synchronization: mtx.
- */
- extent_heap_t heaps[SC_NPSIZES + 1];
- atomic_zu_t nextents[SC_NPSIZES + 1];
- atomic_zu_t nbytes[SC_NPSIZES + 1];
-
- /*
- * Bitmap for which set bits correspond to non-empty heaps.
- *
- * Synchronization: mtx.
- */
- bitmap_t bitmap[BITMAP_GROUPS(SC_NPSIZES + 1)];
-
- /*
- * LRU of all extents in heaps.
- *
- * Synchronization: mtx.
- */
- extent_list_t lru;
-
- /*
- * Page sum for all extents in heaps.
- *
- * The synchronization here is a little tricky. Modifications to npages
- * must hold mtx, but reads need not (though, a reader who sees npages
- * without holding the mutex can't assume anything about the rest of the
- * state of the extents_t).
- */
- atomic_zu_t npages;
-
- /* All stored extents must be in the same state. */
- extent_state_t state;
-
- /*
- * If true, delay coalescing until eviction; otherwise coalesce during
- * deallocation.
- */
- bool delay_coalesce;
-};
-
-/*
- * The following two structs are for experimental purposes. See
- * experimental_utilization_query_ctl and
- * experimental_utilization_batch_query_ctl in src/ctl.c.
- */
-
-struct extent_util_stats_s {
- size_t nfree;
- size_t nregs;
- size_t size;
-};
-
-struct extent_util_stats_verbose_s {
- void *slabcur_addr;
- size_t nfree;
- size_t nregs;
- size_t size;
- size_t bin_nfree;
- size_t bin_nregs;
-};
-
-#endif /* JEMALLOC_INTERNAL_EXTENT_STRUCTS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/extent_types.h b/contrib/jemalloc/include/jemalloc/internal/extent_types.h
deleted file mode 100644
index 96925cf95887..000000000000
--- a/contrib/jemalloc/include/jemalloc/internal/extent_types.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_EXTENT_TYPES_H
-#define JEMALLOC_INTERNAL_EXTENT_TYPES_H
-
-typedef struct extent_s extent_t;
-typedef struct extents_s extents_t;
-
-typedef struct extent_util_stats_s extent_util_stats_t;
-typedef struct extent_util_stats_verbose_s extent_util_stats_verbose_t;
-
-#define EXTENT_HOOKS_INITIALIZER NULL
-
-/*
- * When reuse (and split) an active extent, (1U << opt_lg_extent_max_active_fit)
- * is the max ratio between the size of the active extent and the new extent.
- */
-#define LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT 6
-
-typedef enum {
- EXTENT_NOT_HEAD,
- EXTENT_IS_HEAD /* Only relevant for Windows && opt.retain. */
-} extent_head_state_t;
-
-#endif /* JEMALLOC_INTERNAL_EXTENT_TYPES_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/fb.h b/contrib/jemalloc/include/jemalloc/internal/fb.h
new file mode 100644
index 000000000000..90c4091ff6cc
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/fb.h
@@ -0,0 +1,373 @@
+#ifndef JEMALLOC_INTERNAL_FB_H
+#define JEMALLOC_INTERNAL_FB_H
+
+/*
+ * The flat bitmap module. This has a larger API relative to the bitmap module
+ * (supporting things like backwards searches, and searching for both set and
+ * unset bits), at the cost of slower operations for very large bitmaps.
+ *
+ * Initialized flat bitmaps start at all-zeros (all bits unset).
+ */
+
+typedef unsigned long fb_group_t;
+#define FB_GROUP_BITS (ZU(1) << (LG_SIZEOF_LONG + 3))
+#define FB_NGROUPS(nbits) ((nbits) / FB_GROUP_BITS \
+ + ((nbits) % FB_GROUP_BITS == 0 ? 0 : 1))
+
+static inline void
+fb_init(fb_group_t *fb, size_t nbits) {
+ size_t ngroups = FB_NGROUPS(nbits);
+ memset(fb, 0, ngroups * sizeof(fb_group_t));
+}
+
+static inline bool
+fb_empty(fb_group_t *fb, size_t nbits) {
+ size_t ngroups = FB_NGROUPS(nbits);
+ for (size_t i = 0; i < ngroups; i++) {
+ if (fb[i] != 0) {
+ return false;
+ }
+ }
+ return true;
+}
+
+static inline bool
+fb_full(fb_group_t *fb, size_t nbits) {
+ size_t ngroups = FB_NGROUPS(nbits);
+ size_t trailing_bits = nbits % FB_GROUP_BITS;
+ size_t limit = (trailing_bits == 0 ? ngroups : ngroups - 1);
+ for (size_t i = 0; i < limit; i++) {
+ if (fb[i] != ~(fb_group_t)0) {
+ return false;
+ }
+ }
+ if (trailing_bits == 0) {
+ return true;
+ }
+ return fb[ngroups - 1] == ((fb_group_t)1 << trailing_bits) - 1;
+}
+
+static inline bool
+fb_get(fb_group_t *fb, size_t nbits, size_t bit) {
+ assert(bit < nbits);
+ size_t group_ind = bit / FB_GROUP_BITS;
+ size_t bit_ind = bit % FB_GROUP_BITS;
+ return (bool)(fb[group_ind] & ((fb_group_t)1 << bit_ind));
+}
+
+static inline void
+fb_set(fb_group_t *fb, size_t nbits, size_t bit) {
+ assert(bit < nbits);
+ size_t group_ind = bit / FB_GROUP_BITS;
+ size_t bit_ind = bit % FB_GROUP_BITS;
+ fb[group_ind] |= ((fb_group_t)1 << bit_ind);
+}
+
+static inline void
+fb_unset(fb_group_t *fb, size_t nbits, size_t bit) {
+ assert(bit < nbits);
+ size_t group_ind = bit / FB_GROUP_BITS;
+ size_t bit_ind = bit % FB_GROUP_BITS;
+ fb[group_ind] &= ~((fb_group_t)1 << bit_ind);
+}
+
+
+/*
+ * Some implementation details. This visitation function lets us apply a group
+ * visitor to each group in the bitmap (potentially modifying it). The mask
+ * indicates which bits are logically part of the visitation.
+ */
+typedef void (*fb_group_visitor_t)(void *ctx, fb_group_t *fb, fb_group_t mask);
+JEMALLOC_ALWAYS_INLINE void
+fb_visit_impl(fb_group_t *fb, size_t nbits, fb_group_visitor_t visit, void *ctx,
+ size_t start, size_t cnt) {
+ assert(cnt > 0);
+ assert(start + cnt <= nbits);
+ size_t group_ind = start / FB_GROUP_BITS;
+ size_t start_bit_ind = start % FB_GROUP_BITS;
+ /*
+ * The first group is special; it's the only one we don't start writing
+ * to from bit 0.
+ */
+ size_t first_group_cnt = (start_bit_ind + cnt > FB_GROUP_BITS
+ ? FB_GROUP_BITS - start_bit_ind : cnt);
+ /*
+ * We can basically split affected words into:
+ * - The first group, where we touch only the high bits
+ * - The last group, where we touch only the low bits
+ * - The middle, where we set all the bits to the same thing.
+ * We treat each case individually. The last two could be merged, but
+ * this can lead to bad codegen for those middle words.
+ */
+ /* First group */
+ fb_group_t mask = ((~(fb_group_t)0)
+ >> (FB_GROUP_BITS - first_group_cnt))
+ << start_bit_ind;
+ visit(ctx, &fb[group_ind], mask);
+
+ cnt -= first_group_cnt;
+ group_ind++;
+ /* Middle groups */
+ while (cnt > FB_GROUP_BITS) {
+ visit(ctx, &fb[group_ind], ~(fb_group_t)0);
+ cnt -= FB_GROUP_BITS;
+ group_ind++;
+ }
+ /* Last group */
+ if (cnt != 0) {
+ mask = (~(fb_group_t)0) >> (FB_GROUP_BITS - cnt);
+ visit(ctx, &fb[group_ind], mask);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE void
+fb_assign_visitor(void *ctx, fb_group_t *fb, fb_group_t mask) {
+ bool val = *(bool *)ctx;
+ if (val) {
+ *fb |= mask;
+ } else {
+ *fb &= ~mask;
+ }
+}
+
+/* Sets the cnt bits starting at position start. Must not have a 0 count. */
+static inline void
+fb_set_range(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) {
+ bool val = true;
+ fb_visit_impl(fb, nbits, &fb_assign_visitor, &val, start, cnt);
+}
+
+/* Unsets the cnt bits starting at position start. Must not have a 0 count. */
+static inline void
+fb_unset_range(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) {
+ bool val = false;
+ fb_visit_impl(fb, nbits, &fb_assign_visitor, &val, start, cnt);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+fb_scount_visitor(void *ctx, fb_group_t *fb, fb_group_t mask) {
+ size_t *scount = (size_t *)ctx;
+ *scount += popcount_lu(*fb & mask);
+}
+
+/* Finds the number of set bit in the of length cnt starting at start. */
+JEMALLOC_ALWAYS_INLINE size_t
+fb_scount(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) {
+ size_t scount = 0;
+ fb_visit_impl(fb, nbits, &fb_scount_visitor, &scount, start, cnt);
+ return scount;
+}
+
+/* Finds the number of unset bit in the of length cnt starting at start. */
+JEMALLOC_ALWAYS_INLINE size_t
+fb_ucount(fb_group_t *fb, size_t nbits, size_t start, size_t cnt) {
+ size_t scount = fb_scount(fb, nbits, start, cnt);
+ return cnt - scount;
+}
+
+/*
+ * An implementation detail; find the first bit at position >= min_bit with the
+ * value val.
+ *
+ * Returns the number of bits in the bitmap if no such bit exists.
+ */
+JEMALLOC_ALWAYS_INLINE ssize_t
+fb_find_impl(fb_group_t *fb, size_t nbits, size_t start, bool val,
+ bool forward) {
+ assert(start < nbits);
+ size_t ngroups = FB_NGROUPS(nbits);
+ ssize_t group_ind = start / FB_GROUP_BITS;
+ size_t bit_ind = start % FB_GROUP_BITS;
+
+ fb_group_t maybe_invert = (val ? 0 : (fb_group_t)-1);
+
+ fb_group_t group = fb[group_ind];
+ group ^= maybe_invert;
+ if (forward) {
+ /* Only keep ones in bits bit_ind and above. */
+ group &= ~((1LU << bit_ind) - 1);
+ } else {
+ /*
+ * Only keep ones in bits bit_ind and below. You might more
+ * naturally express this as (1 << (bit_ind + 1)) - 1, but
+ * that shifts by an invalid amount if bit_ind is one less than
+ * FB_GROUP_BITS.
+ */
+ group &= ((2LU << bit_ind) - 1);
+ }
+ ssize_t group_ind_bound = forward ? (ssize_t)ngroups : -1;
+ while (group == 0) {
+ group_ind += forward ? 1 : -1;
+ if (group_ind == group_ind_bound) {
+ return forward ? (ssize_t)nbits : (ssize_t)-1;
+ }
+ group = fb[group_ind];
+ group ^= maybe_invert;
+ }
+ assert(group != 0);
+ size_t bit = forward ? ffs_lu(group) : fls_lu(group);
+ size_t pos = group_ind * FB_GROUP_BITS + bit;
+ /*
+ * The high bits of a partially filled last group are zeros, so if we're
+ * looking for zeros we don't want to report an invalid result.
+ */
+ if (forward && !val && pos > nbits) {
+ return nbits;
+ }
+ return pos;
+}
+
+/*
+ * Find the first set bit in the bitmap with an index >= min_bit. Returns the
+ * number of bits in the bitmap if no such bit exists.
+ */
+static inline size_t
+fb_ffu(fb_group_t *fb, size_t nbits, size_t min_bit) {
+ return (size_t)fb_find_impl(fb, nbits, min_bit, /* val */ false,
+ /* forward */ true);
+}
+
+/* The same, but looks for an unset bit. */
+static inline size_t
+fb_ffs(fb_group_t *fb, size_t nbits, size_t min_bit) {
+ return (size_t)fb_find_impl(fb, nbits, min_bit, /* val */ true,
+ /* forward */ true);
+}
+
+/*
+ * Find the last set bit in the bitmap with an index <= max_bit. Returns -1 if
+ * no such bit exists.
+ */
+static inline ssize_t
+fb_flu(fb_group_t *fb, size_t nbits, size_t max_bit) {
+ return fb_find_impl(fb, nbits, max_bit, /* val */ false,
+ /* forward */ false);
+}
+
+static inline ssize_t
+fb_fls(fb_group_t *fb, size_t nbits, size_t max_bit) {
+ return fb_find_impl(fb, nbits, max_bit, /* val */ true,
+ /* forward */ false);
+}
+
+/* Returns whether or not we found a range. */
+JEMALLOC_ALWAYS_INLINE bool
+fb_iter_range_impl(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
+ size_t *r_len, bool val, bool forward) {
+ assert(start < nbits);
+ ssize_t next_range_begin = fb_find_impl(fb, nbits, start, val, forward);
+ if ((forward && next_range_begin == (ssize_t)nbits)
+ || (!forward && next_range_begin == (ssize_t)-1)) {
+ return false;
+ }
+ /* Half open range; the set bits are [begin, end). */
+ ssize_t next_range_end = fb_find_impl(fb, nbits, next_range_begin, !val,
+ forward);
+ if (forward) {
+ *r_begin = next_range_begin;
+ *r_len = next_range_end - next_range_begin;
+ } else {
+ *r_begin = next_range_end + 1;
+ *r_len = next_range_begin - next_range_end;
+ }
+ return true;
+}
+
+/*
+ * Used to iterate through ranges of set bits.
+ *
+ * Tries to find the next contiguous sequence of set bits with a first index >=
+ * start. If one exists, puts the earliest bit of the range in *r_begin, its
+ * length in *r_len, and returns true. Otherwise, returns false (without
+ * touching *r_begin or *r_end).
+ */
+static inline bool
+fb_srange_iter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
+ size_t *r_len) {
+ return fb_iter_range_impl(fb, nbits, start, r_begin, r_len,
+ /* val */ true, /* forward */ true);
+}
+
+/*
+ * The same as fb_srange_iter, but searches backwards from start rather than
+ * forwards. (The position returned is still the earliest bit in the range).
+ */
+static inline bool
+fb_srange_riter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
+ size_t *r_len) {
+ return fb_iter_range_impl(fb, nbits, start, r_begin, r_len,
+ /* val */ true, /* forward */ false);
+}
+
+/* Similar to fb_srange_iter, but searches for unset bits. */
+static inline bool
+fb_urange_iter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
+ size_t *r_len) {
+ return fb_iter_range_impl(fb, nbits, start, r_begin, r_len,
+ /* val */ false, /* forward */ true);
+}
+
+/* Similar to fb_srange_riter, but searches for unset bits. */
+static inline bool
+fb_urange_riter(fb_group_t *fb, size_t nbits, size_t start, size_t *r_begin,
+ size_t *r_len) {
+ return fb_iter_range_impl(fb, nbits, start, r_begin, r_len,
+ /* val */ false, /* forward */ false);
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+fb_range_longest_impl(fb_group_t *fb, size_t nbits, bool val) {
+ size_t begin = 0;
+ size_t longest_len = 0;
+ size_t len = 0;
+ while (begin < nbits && fb_iter_range_impl(fb, nbits, begin, &begin,
+ &len, val, /* forward */ true)) {
+ if (len > longest_len) {
+ longest_len = len;
+ }
+ begin += len;
+ }
+ return longest_len;
+}
+
+static inline size_t
+fb_srange_longest(fb_group_t *fb, size_t nbits) {
+ return fb_range_longest_impl(fb, nbits, /* val */ true);
+}
+
+static inline size_t
+fb_urange_longest(fb_group_t *fb, size_t nbits) {
+ return fb_range_longest_impl(fb, nbits, /* val */ false);
+}
+
+/*
+ * Initializes each bit of dst with the bitwise-AND of the corresponding bits of
+ * src1 and src2. All bitmaps must be the same size.
+ */
+static inline void
+fb_bit_and(fb_group_t *dst, fb_group_t *src1, fb_group_t *src2, size_t nbits) {
+ size_t ngroups = FB_NGROUPS(nbits);
+ for (size_t i = 0; i < ngroups; i++) {
+ dst[i] = src1[i] & src2[i];
+ }
+}
+
+/* Like fb_bit_and, but with bitwise-OR. */
+static inline void
+fb_bit_or(fb_group_t *dst, fb_group_t *src1, fb_group_t *src2, size_t nbits) {
+ size_t ngroups = FB_NGROUPS(nbits);
+ for (size_t i = 0; i < ngroups; i++) {
+ dst[i] = src1[i] | src2[i];
+ }
+}
+
+/* Initializes dst bit i to the negation of source bit i. */
+static inline void
+fb_bit_not(fb_group_t *dst, fb_group_t *src, size_t nbits) {
+ size_t ngroups = FB_NGROUPS(nbits);
+ for (size_t i = 0; i < ngroups; i++) {
+ dst[i] = ~src[i];
+ }
+}
+
+#endif /* JEMALLOC_INTERNAL_FB_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/fxp.h b/contrib/jemalloc/include/jemalloc/internal/fxp.h
new file mode 100644
index 000000000000..415a982890a1
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/fxp.h
@@ -0,0 +1,126 @@
+#ifndef JEMALLOC_INTERNAL_FXP_H
+#define JEMALLOC_INTERNAL_FXP_H
+
+/*
+ * A simple fixed-point math implementation, supporting only unsigned values
+ * (with overflow being an error).
+ *
+ * It's not in general safe to use floating point in core code, because various
+ * libc implementations we get linked against can assume that malloc won't touch
+ * floating point state and call it with an unusual calling convention.
+ */
+
+/*
+ * High 16 bits are the integer part, low 16 are the fractional part. Or
+ * equivalently, repr == 2**16 * val, where we use "val" to refer to the
+ * (imaginary) fractional representation of the true value.
+ *
+ * We pick a uint32_t here since it's convenient in some places to
+ * double the representation size (i.e. multiplication and division use
+ * 64-bit integer types), and a uint64_t is the largest type we're
+ * certain is available.
+ */
+typedef uint32_t fxp_t;
+#define FXP_INIT_INT(x) ((x) << 16)
+#define FXP_INIT_PERCENT(pct) (((pct) << 16) / 100)
+
+/*
+ * Amount of precision used in parsing and printing numbers. The integer bound
+ * is simply because the integer part of the number gets 16 bits, and so is
+ * bounded by 65536.
+ *
+ * We use a lot of precision for the fractional part, even though most of it
+ * gets rounded off; this lets us get exact values for the important special
+ * case where the denominator is a small power of 2 (for instance,
+ * 1/512 == 0.001953125 is exactly representable even with only 16 bits of
+ * fractional precision). We need to left-shift by 16 before dividing by
+ * 10**precision, so we pick precision to be floor(log(2**48)) = 14.
+ */
+#define FXP_INTEGER_PART_DIGITS 5
+#define FXP_FRACTIONAL_PART_DIGITS 14
+
+/*
+ * In addition to the integer and fractional parts of the number, we need to
+ * include a null character and (possibly) a decimal point.
+ */
+#define FXP_BUF_SIZE (FXP_INTEGER_PART_DIGITS + FXP_FRACTIONAL_PART_DIGITS + 2)
+
+static inline fxp_t
+fxp_add(fxp_t a, fxp_t b) {
+ return a + b;
+}
+
+static inline fxp_t
+fxp_sub(fxp_t a, fxp_t b) {
+ assert(a >= b);
+ return a - b;
+}
+
+static inline fxp_t
+fxp_mul(fxp_t a, fxp_t b) {
+ uint64_t unshifted = (uint64_t)a * (uint64_t)b;
+ /*
+ * Unshifted is (a.val * 2**16) * (b.val * 2**16)
+ * == (a.val * b.val) * 2**32, but we want
+ * (a.val * b.val) * 2 ** 16.
+ */
+ return (uint32_t)(unshifted >> 16);
+}
+
+static inline fxp_t
+fxp_div(fxp_t a, fxp_t b) {
+ assert(b != 0);
+ uint64_t unshifted = ((uint64_t)a << 32) / (uint64_t)b;
+ /*
+ * Unshifted is (a.val * 2**16) * (2**32) / (b.val * 2**16)
+ * == (a.val / b.val) * (2 ** 32), which again corresponds to a right
+ * shift of 16.
+ */
+ return (uint32_t)(unshifted >> 16);
+}
+
+static inline uint32_t
+fxp_round_down(fxp_t a) {
+ return a >> 16;
+}
+
+static inline uint32_t
+fxp_round_nearest(fxp_t a) {
+ uint32_t fractional_part = (a & ((1U << 16) - 1));
+ uint32_t increment = (uint32_t)(fractional_part >= (1U << 15));
+ return (a >> 16) + increment;
+}
+
+/*
+ * Approximately computes x * frac, without the size limitations that would be
+ * imposed by converting u to an fxp_t.
+ */
+static inline size_t
+fxp_mul_frac(size_t x_orig, fxp_t frac) {
+ assert(frac <= (1U << 16));
+ /*
+ * Work around an over-enthusiastic warning about type limits below (on
+ * 32-bit platforms, a size_t is always less than 1ULL << 48).
+ */
+ uint64_t x = (uint64_t)x_orig;
+ /*
+ * If we can guarantee no overflow, multiply first before shifting, to
+ * preserve some precision. Otherwise, shift first and then multiply.
+ * In the latter case, we only lose the low 16 bits of a 48-bit number,
+ * so we're still accurate to within 1/2**32.
+ */
+ if (x < (1ULL << 48)) {
+ return (size_t)((x * frac) >> 16);
+ } else {
+ return (size_t)((x >> 16) * (uint64_t)frac);
+ }
+}
+
+/*
+ * Returns true on error. Otherwise, returns false and updates *ptr to point to
+ * the first character not parsed (because it wasn't a digit).
+ */
+bool fxp_parse(fxp_t *a, const char *ptr, char **end);
+void fxp_print(fxp_t a, char buf[FXP_BUF_SIZE]);
+
+#endif /* JEMALLOC_INTERNAL_FXP_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/hash.h b/contrib/jemalloc/include/jemalloc/internal/hash.h
index 935ddcfc95a1..7f945679efbc 100644
--- a/contrib/jemalloc/include/jemalloc/internal/hash.h
+++ b/contrib/jemalloc/include/jemalloc/internal/hash.h
@@ -104,8 +104,8 @@ hash_x86_32(const void *key, int len, uint32_t seed) {
uint32_t k1 = 0;
switch (len & 3) {
- case 3: k1 ^= tail[2] << 16; JEMALLOC_FALLTHROUGH
- case 2: k1 ^= tail[1] << 8; JEMALLOC_FALLTHROUGH
+ case 3: k1 ^= tail[2] << 16; JEMALLOC_FALLTHROUGH;
+ case 2: k1 ^= tail[1] << 8; JEMALLOC_FALLTHROUGH;
case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15);
k1 *= c2; h1 ^= k1;
}
@@ -177,26 +177,26 @@ hash_x86_128(const void *key, const int len, uint32_t seed,
uint32_t k4 = 0;
switch (len & 15) {
- case 15: k4 ^= tail[14] << 16; JEMALLOC_FALLTHROUGH
- case 14: k4 ^= tail[13] << 8; JEMALLOC_FALLTHROUGH
+ case 15: k4 ^= tail[14] << 16; JEMALLOC_FALLTHROUGH;
+ case 14: k4 ^= tail[13] << 8; JEMALLOC_FALLTHROUGH;
case 13: k4 ^= tail[12] << 0;
k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4;
- JEMALLOC_FALLTHROUGH
- case 12: k3 ^= tail[11] << 24; JEMALLOC_FALLTHROUGH
- case 11: k3 ^= tail[10] << 16; JEMALLOC_FALLTHROUGH
- case 10: k3 ^= tail[ 9] << 8; JEMALLOC_FALLTHROUGH
+ JEMALLOC_FALLTHROUGH;
+ case 12: k3 ^= (uint32_t) tail[11] << 24; JEMALLOC_FALLTHROUGH;
+ case 11: k3 ^= tail[10] << 16; JEMALLOC_FALLTHROUGH;
+ case 10: k3 ^= tail[ 9] << 8; JEMALLOC_FALLTHROUGH;
case 9: k3 ^= tail[ 8] << 0;
- k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
- JEMALLOC_FALLTHROUGH
- case 8: k2 ^= tail[ 7] << 24; JEMALLOC_FALLTHROUGH
- case 7: k2 ^= tail[ 6] << 16; JEMALLOC_FALLTHROUGH
- case 6: k2 ^= tail[ 5] << 8; JEMALLOC_FALLTHROUGH
+ k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3;
+ JEMALLOC_FALLTHROUGH;
+ case 8: k2 ^= (uint32_t) tail[ 7] << 24; JEMALLOC_FALLTHROUGH;
+ case 7: k2 ^= tail[ 6] << 16; JEMALLOC_FALLTHROUGH;
+ case 6: k2 ^= tail[ 5] << 8; JEMALLOC_FALLTHROUGH;
case 5: k2 ^= tail[ 4] << 0;
k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2;
- JEMALLOC_FALLTHROUGH
- case 4: k1 ^= tail[ 3] << 24; JEMALLOC_FALLTHROUGH
- case 3: k1 ^= tail[ 2] << 16; JEMALLOC_FALLTHROUGH
- case 2: k1 ^= tail[ 1] << 8; JEMALLOC_FALLTHROUGH
+ JEMALLOC_FALLTHROUGH;
+ case 4: k1 ^= (uint32_t) tail[ 3] << 24; JEMALLOC_FALLTHROUGH;
+ case 3: k1 ^= tail[ 2] << 16; JEMALLOC_FALLTHROUGH;
+ case 2: k1 ^= tail[ 1] << 8; JEMALLOC_FALLTHROUGH;
case 1: k1 ^= tail[ 0] << 0;
k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1;
break;
@@ -261,24 +261,25 @@ hash_x64_128(const void *key, const int len, const uint32_t seed,
uint64_t k2 = 0;
switch (len & 15) {
- case 15: k2 ^= ((uint64_t)(tail[14])) << 48; JEMALLOC_FALLTHROUGH
- case 14: k2 ^= ((uint64_t)(tail[13])) << 40; JEMALLOC_FALLTHROUGH
- case 13: k2 ^= ((uint64_t)(tail[12])) << 32; JEMALLOC_FALLTHROUGH
- case 12: k2 ^= ((uint64_t)(tail[11])) << 24; JEMALLOC_FALLTHROUGH
- case 11: k2 ^= ((uint64_t)(tail[10])) << 16; JEMALLOC_FALLTHROUGH
- case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; JEMALLOC_FALLTHROUGH
+ case 15: k2 ^= ((uint64_t)(tail[14])) << 48; JEMALLOC_FALLTHROUGH;
+ case 14: k2 ^= ((uint64_t)(tail[13])) << 40; JEMALLOC_FALLTHROUGH;
+ case 13: k2 ^= ((uint64_t)(tail[12])) << 32; JEMALLOC_FALLTHROUGH;
+ case 12: k2 ^= ((uint64_t)(tail[11])) << 24; JEMALLOC_FALLTHROUGH;
+ case 11: k2 ^= ((uint64_t)(tail[10])) << 16; JEMALLOC_FALLTHROUGH;
+ case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; JEMALLOC_FALLTHROUGH;
case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0;
k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2;
- JEMALLOC_FALLTHROUGH
- case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; JEMALLOC_FALLTHROUGH
- case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; JEMALLOC_FALLTHROUGH
- case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; JEMALLOC_FALLTHROUGH
- case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; JEMALLOC_FALLTHROUGH
- case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; JEMALLOC_FALLTHROUGH
- case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; JEMALLOC_FALLTHROUGH
- case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; JEMALLOC_FALLTHROUGH
+ JEMALLOC_FALLTHROUGH;
+ case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; JEMALLOC_FALLTHROUGH;
+ case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; JEMALLOC_FALLTHROUGH;
+ case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; JEMALLOC_FALLTHROUGH;
+ case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; JEMALLOC_FALLTHROUGH;
+ case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; JEMALLOC_FALLTHROUGH;
+ case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; JEMALLOC_FALLTHROUGH;
+ case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; JEMALLOC_FALLTHROUGH;
case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0;
k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1;
+ break;
}
}
diff --git a/contrib/jemalloc/include/jemalloc/internal/hpa.h b/contrib/jemalloc/include/jemalloc/internal/hpa.h
new file mode 100644
index 000000000000..f3562853e802
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/hpa.h
@@ -0,0 +1,182 @@
+#ifndef JEMALLOC_INTERNAL_HPA_H
+#define JEMALLOC_INTERNAL_HPA_H
+
+#include "jemalloc/internal/exp_grow.h"
+#include "jemalloc/internal/hpa_hooks.h"
+#include "jemalloc/internal/hpa_opts.h"
+#include "jemalloc/internal/pai.h"
+#include "jemalloc/internal/psset.h"
+
+typedef struct hpa_central_s hpa_central_t;
+struct hpa_central_s {
+ /*
+ * The mutex guarding most of the operations on the central data
+ * structure.
+ */
+ malloc_mutex_t mtx;
+ /*
+ * Guards expansion of eden. We separate this from the regular mutex so
+ * that cheaper operations can still continue while we're doing the OS
+ * call.
+ */
+ malloc_mutex_t grow_mtx;
+ /*
+ * Either NULL (if empty), or some integer multiple of a
+ * hugepage-aligned number of hugepages. We carve them off one at a
+ * time to satisfy new pageslab requests.
+ *
+ * Guarded by grow_mtx.
+ */
+ void *eden;
+ size_t eden_len;
+ /* Source for metadata. */
+ base_t *base;
+ /* Number of grow operations done on this hpa_central_t. */
+ uint64_t age_counter;
+
+ /* The HPA hooks. */
+ hpa_hooks_t hooks;
+};
+
+typedef struct hpa_shard_nonderived_stats_s hpa_shard_nonderived_stats_t;
+struct hpa_shard_nonderived_stats_s {
+ /*
+ * The number of times we've purged within a hugepage.
+ *
+ * Guarded by mtx.
+ */
+ uint64_t npurge_passes;
+ /*
+ * The number of individual purge calls we perform (which should always
+ * be bigger than npurge_passes, since each pass purges at least one
+ * extent within a hugepage.
+ *
+ * Guarded by mtx.
+ */
+ uint64_t npurges;
+
+ /*
+ * The number of times we've hugified a pageslab.
+ *
+ * Guarded by mtx.
+ */
+ uint64_t nhugifies;
+ /*
+ * The number of times we've dehugified a pageslab.
+ *
+ * Guarded by mtx.
+ */
+ uint64_t ndehugifies;
+};
+
+/* Completely derived; only used by CTL. */
+typedef struct hpa_shard_stats_s hpa_shard_stats_t;
+struct hpa_shard_stats_s {
+ psset_stats_t psset_stats;
+ hpa_shard_nonderived_stats_t nonderived_stats;
+};
+
+typedef struct hpa_shard_s hpa_shard_t;
+struct hpa_shard_s {
+ /*
+ * pai must be the first member; we cast from a pointer to it to a
+ * pointer to the hpa_shard_t.
+ */
+ pai_t pai;
+
+ /* The central allocator we get our hugepages from. */
+ hpa_central_t *central;
+ /* Protects most of this shard's state. */
+ malloc_mutex_t mtx;
+ /*
+ * Guards the shard's access to the central allocator (preventing
+ * multiple threads operating on this shard from accessing the central
+ * allocator).
+ */
+ malloc_mutex_t grow_mtx;
+ /* The base metadata allocator. */
+ base_t *base;
+
+ /*
+ * This edata cache is the one we use when allocating a small extent
+ * from a pageslab. The pageslab itself comes from the centralized
+ * allocator, and so will use its edata_cache.
+ */
+ edata_cache_fast_t ecf;
+
+ psset_t psset;
+
+ /*
+ * How many grow operations have occurred.
+ *
+ * Guarded by grow_mtx.
+ */
+ uint64_t age_counter;
+
+ /* The arena ind we're associated with. */
+ unsigned ind;
+
+ /*
+ * Our emap. This is just a cache of the emap pointer in the associated
+ * hpa_central.
+ */
+ emap_t *emap;
+
+ /* The configuration choices for this hpa shard. */
+ hpa_shard_opts_t opts;
+
+ /*
+ * How many pages have we started but not yet finished purging in this
+ * hpa shard.
+ */
+ size_t npending_purge;
+
+ /*
+ * Those stats which are copied directly into the CTL-centric hpa shard
+ * stats.
+ */
+ hpa_shard_nonderived_stats_t stats;
+
+ /*
+ * Last time we performed purge on this shard.
+ */
+ nstime_t last_purge;
+};
+
+/*
+ * Whether or not the HPA can be used given the current configuration. This is
+ * is not necessarily a guarantee that it backs its allocations by hugepages,
+ * just that it can function properly given the system it's running on.
+ */
+bool hpa_supported();
+bool hpa_central_init(hpa_central_t *central, base_t *base, const hpa_hooks_t *hooks);
+bool hpa_shard_init(hpa_shard_t *shard, hpa_central_t *central, emap_t *emap,
+ base_t *base, edata_cache_t *edata_cache, unsigned ind,
+ const hpa_shard_opts_t *opts);
+
+void hpa_shard_stats_accum(hpa_shard_stats_t *dst, hpa_shard_stats_t *src);
+void hpa_shard_stats_merge(tsdn_t *tsdn, hpa_shard_t *shard,
+ hpa_shard_stats_t *dst);
+
+/*
+ * Notify the shard that we won't use it for allocations much longer. Due to
+ * the possibility of races, we don't actually prevent allocations; just flush
+ * and disable the embedded edata_cache_small.
+ */
+void hpa_shard_disable(tsdn_t *tsdn, hpa_shard_t *shard);
+void hpa_shard_destroy(tsdn_t *tsdn, hpa_shard_t *shard);
+
+void hpa_shard_set_deferral_allowed(tsdn_t *tsdn, hpa_shard_t *shard,
+ bool deferral_allowed);
+void hpa_shard_do_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard);
+
+/*
+ * We share the fork ordering with the PA and arena prefork handling; that's why
+ * these are 3 and 4 rather than 0 and 1.
+ */
+void hpa_shard_prefork3(tsdn_t *tsdn, hpa_shard_t *shard);
+void hpa_shard_prefork4(tsdn_t *tsdn, hpa_shard_t *shard);
+void hpa_shard_postfork_parent(tsdn_t *tsdn, hpa_shard_t *shard);
+void hpa_shard_postfork_child(tsdn_t *tsdn, hpa_shard_t *shard);
+
+#endif /* JEMALLOC_INTERNAL_HPA_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/hpa_hooks.h b/contrib/jemalloc/include/jemalloc/internal/hpa_hooks.h
new file mode 100644
index 000000000000..4ea221cb0b42
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/hpa_hooks.h
@@ -0,0 +1,17 @@
+#ifndef JEMALLOC_INTERNAL_HPA_HOOKS_H
+#define JEMALLOC_INTERNAL_HPA_HOOKS_H
+
+typedef struct hpa_hooks_s hpa_hooks_t;
+struct hpa_hooks_s {
+ void *(*map)(size_t size);
+ void (*unmap)(void *ptr, size_t size);
+ void (*purge)(void *ptr, size_t size);
+ void (*hugify)(void *ptr, size_t size);
+ void (*dehugify)(void *ptr, size_t size);
+ void (*curtime)(nstime_t *r_time, bool first_reading);
+ uint64_t (*ms_since)(nstime_t *r_time);
+};
+
+extern hpa_hooks_t hpa_hooks_default;
+
+#endif /* JEMALLOC_INTERNAL_HPA_HOOKS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/hpa_opts.h b/contrib/jemalloc/include/jemalloc/internal/hpa_opts.h
new file mode 100644
index 000000000000..ee84fea13757
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/hpa_opts.h
@@ -0,0 +1,74 @@
+#ifndef JEMALLOC_INTERNAL_HPA_OPTS_H
+#define JEMALLOC_INTERNAL_HPA_OPTS_H
+
+#include "jemalloc/internal/fxp.h"
+
+/*
+ * This file is morally part of hpa.h, but is split out for header-ordering
+ * reasons.
+ */
+
+typedef struct hpa_shard_opts_s hpa_shard_opts_t;
+struct hpa_shard_opts_s {
+ /*
+ * The largest size we'll allocate out of the shard. For those
+ * allocations refused, the caller (in practice, the PA module) will
+ * fall back to the more general (for now) PAC, which can always handle
+ * any allocation request.
+ */
+ size_t slab_max_alloc;
+
+ /*
+ * When the number of active bytes in a hugepage is >=
+ * hugification_threshold, we force hugify it.
+ */
+ size_t hugification_threshold;
+
+ /*
+ * The HPA purges whenever the number of pages exceeds dirty_mult *
+ * active_pages. This may be set to (fxp_t)-1 to disable purging.
+ */
+ fxp_t dirty_mult;
+
+ /*
+ * Whether or not the PAI methods are allowed to defer work to a
+ * subsequent hpa_shard_do_deferred_work() call. Practically, this
+ * corresponds to background threads being enabled. We track this
+ * ourselves for encapsulation purposes.
+ */
+ bool deferral_allowed;
+
+ /*
+ * How long a hugepage has to be a hugification candidate before it will
+ * actually get hugified.
+ */
+ uint64_t hugify_delay_ms;
+
+ /*
+ * Minimum amount of time between purges.
+ */
+ uint64_t min_purge_interval_ms;
+};
+
+#define HPA_SHARD_OPTS_DEFAULT { \
+ /* slab_max_alloc */ \
+ 64 * 1024, \
+ /* hugification_threshold */ \
+ HUGEPAGE * 95 / 100, \
+ /* dirty_mult */ \
+ FXP_INIT_PERCENT(25), \
+ /* \
+ * deferral_allowed \
+ * \
+ * Really, this is always set by the arena during creation \
+ * or by an hpa_shard_set_deferral_allowed call, so the value \
+ * we put here doesn't matter. \
+ */ \
+ false, \
+ /* hugify_delay_ms */ \
+ 10 * 1000, \
+ /* min_purge_interval_ms */ \
+ 5 * 1000 \
+}
+
+#endif /* JEMALLOC_INTERNAL_HPA_OPTS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/hpdata.h b/contrib/jemalloc/include/jemalloc/internal/hpdata.h
new file mode 100644
index 000000000000..1fb534db016a
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/hpdata.h
@@ -0,0 +1,413 @@
+#ifndef JEMALLOC_INTERNAL_HPDATA_H
+#define JEMALLOC_INTERNAL_HPDATA_H
+
+#include "jemalloc/internal/fb.h"
+#include "jemalloc/internal/ph.h"
+#include "jemalloc/internal/ql.h"
+#include "jemalloc/internal/typed_list.h"
+
+/*
+ * The metadata representation we use for extents in hugepages. While the PAC
+ * uses the edata_t to represent both active and inactive extents, the HP only
+ * uses the edata_t for active ones; instead, inactive extent state is tracked
+ * within hpdata associated with the enclosing hugepage-sized, hugepage-aligned
+ * region of virtual address space.
+ *
+ * An hpdata need not be "truly" backed by a hugepage (which is not necessarily
+ * an observable property of any given region of address space). It's just
+ * hugepage-sized and hugepage-aligned; it's *potentially* huge.
+ */
+typedef struct hpdata_s hpdata_t;
+ph_structs(hpdata_age_heap, hpdata_t);
+struct hpdata_s {
+ /*
+ * We likewise follow the edata convention of mangling names and forcing
+ * the use of accessors -- this lets us add some consistency checks on
+ * access.
+ */
+
+ /*
+ * The address of the hugepage in question. This can't be named h_addr,
+ * since that conflicts with a macro defined in Windows headers.
+ */
+ void *h_address;
+ /* Its age (measured in psset operations). */
+ uint64_t h_age;
+ /* Whether or not we think the hugepage is mapped that way by the OS. */
+ bool h_huge;
+
+ /*
+ * For some properties, we keep parallel sets of bools; h_foo_allowed
+ * and h_in_psset_foo_container. This is a decoupling mechanism to
+ * avoid bothering the hpa (which manages policies) from the psset
+ * (which is the mechanism used to enforce those policies). This allows
+ * all the container management logic to live in one place, without the
+ * HPA needing to know or care how that happens.
+ */
+
+ /*
+ * Whether or not the hpdata is allowed to be used to serve allocations,
+ * and whether or not the psset is currently tracking it as such.
+ */
+ bool h_alloc_allowed;
+ bool h_in_psset_alloc_container;
+
+ /*
+ * The same, but with purging. There's no corresponding
+ * h_in_psset_purge_container, because the psset (currently) always
+ * removes hpdatas from their containers during updates (to implement
+ * LRU for purging).
+ */
+ bool h_purge_allowed;
+
+ /* And with hugifying. */
+ bool h_hugify_allowed;
+ /* When we became a hugification candidate. */
+ nstime_t h_time_hugify_allowed;
+ bool h_in_psset_hugify_container;
+
+ /* Whether or not a purge or hugify is currently happening. */
+ bool h_mid_purge;
+ bool h_mid_hugify;
+
+ /*
+ * Whether or not the hpdata is being updated in the psset (i.e. if
+ * there has been a psset_update_begin call issued without a matching
+ * psset_update_end call). Eventually this will expand to other types
+ * of updates.
+ */
+ bool h_updating;
+
+ /* Whether or not the hpdata is in a psset. */
+ bool h_in_psset;
+
+ union {
+ /* When nonempty (and also nonfull), used by the psset bins. */
+ hpdata_age_heap_link_t age_link;
+ /*
+ * When empty (or not corresponding to any hugepage), list
+ * linkage.
+ */
+ ql_elm(hpdata_t) ql_link_empty;
+ };
+
+ /*
+ * Linkage for the psset to track candidates for purging and hugifying.
+ */
+ ql_elm(hpdata_t) ql_link_purge;
+ ql_elm(hpdata_t) ql_link_hugify;
+
+ /* The length of the largest contiguous sequence of inactive pages. */
+ size_t h_longest_free_range;
+
+ /* Number of active pages. */
+ size_t h_nactive;
+
+ /* A bitmap with bits set in the active pages. */
+ fb_group_t active_pages[FB_NGROUPS(HUGEPAGE_PAGES)];
+
+ /*
+ * Number of dirty or active pages, and a bitmap tracking them. One
+ * way to think of this is as which pages are dirty from the OS's
+ * perspective.
+ */
+ size_t h_ntouched;
+
+ /* The touched pages (using the same definition as above). */
+ fb_group_t touched_pages[FB_NGROUPS(HUGEPAGE_PAGES)];
+};
+
+TYPED_LIST(hpdata_empty_list, hpdata_t, ql_link_empty)
+TYPED_LIST(hpdata_purge_list, hpdata_t, ql_link_purge)
+TYPED_LIST(hpdata_hugify_list, hpdata_t, ql_link_hugify)
+
+ph_proto(, hpdata_age_heap, hpdata_t);
+
+static inline void *
+hpdata_addr_get(const hpdata_t *hpdata) {
+ return hpdata->h_address;
+}
+
+static inline void
+hpdata_addr_set(hpdata_t *hpdata, void *addr) {
+ assert(HUGEPAGE_ADDR2BASE(addr) == addr);
+ hpdata->h_address = addr;
+}
+
+static inline uint64_t
+hpdata_age_get(const hpdata_t *hpdata) {
+ return hpdata->h_age;
+}
+
+static inline void
+hpdata_age_set(hpdata_t *hpdata, uint64_t age) {
+ hpdata->h_age = age;
+}
+
+static inline bool
+hpdata_huge_get(const hpdata_t *hpdata) {
+ return hpdata->h_huge;
+}
+
+static inline bool
+hpdata_alloc_allowed_get(const hpdata_t *hpdata) {
+ return hpdata->h_alloc_allowed;
+}
+
+static inline void
+hpdata_alloc_allowed_set(hpdata_t *hpdata, bool alloc_allowed) {
+ hpdata->h_alloc_allowed = alloc_allowed;
+}
+
+static inline bool
+hpdata_in_psset_alloc_container_get(const hpdata_t *hpdata) {
+ return hpdata->h_in_psset_alloc_container;
+}
+
+static inline void
+hpdata_in_psset_alloc_container_set(hpdata_t *hpdata, bool in_container) {
+ assert(in_container != hpdata->h_in_psset_alloc_container);
+ hpdata->h_in_psset_alloc_container = in_container;
+}
+
+static inline bool
+hpdata_purge_allowed_get(const hpdata_t *hpdata) {
+ return hpdata->h_purge_allowed;
+}
+
+static inline void
+hpdata_purge_allowed_set(hpdata_t *hpdata, bool purge_allowed) {
+ assert(purge_allowed == false || !hpdata->h_mid_purge);
+ hpdata->h_purge_allowed = purge_allowed;
+}
+
+static inline bool
+hpdata_hugify_allowed_get(const hpdata_t *hpdata) {
+ return hpdata->h_hugify_allowed;
+}
+
+static inline void
+hpdata_allow_hugify(hpdata_t *hpdata, nstime_t now) {
+ assert(!hpdata->h_mid_hugify);
+ hpdata->h_hugify_allowed = true;
+ hpdata->h_time_hugify_allowed = now;
+}
+
+static inline nstime_t
+hpdata_time_hugify_allowed(hpdata_t *hpdata) {
+ return hpdata->h_time_hugify_allowed;
+}
+
+static inline void
+hpdata_disallow_hugify(hpdata_t *hpdata) {
+ hpdata->h_hugify_allowed = false;
+}
+
+static inline bool
+hpdata_in_psset_hugify_container_get(const hpdata_t *hpdata) {
+ return hpdata->h_in_psset_hugify_container;
+}
+
+static inline void
+hpdata_in_psset_hugify_container_set(hpdata_t *hpdata, bool in_container) {
+ assert(in_container != hpdata->h_in_psset_hugify_container);
+ hpdata->h_in_psset_hugify_container = in_container;
+}
+
+static inline bool
+hpdata_mid_purge_get(const hpdata_t *hpdata) {
+ return hpdata->h_mid_purge;
+}
+
+static inline void
+hpdata_mid_purge_set(hpdata_t *hpdata, bool mid_purge) {
+ assert(mid_purge != hpdata->h_mid_purge);
+ hpdata->h_mid_purge = mid_purge;
+}
+
+static inline bool
+hpdata_mid_hugify_get(const hpdata_t *hpdata) {
+ return hpdata->h_mid_hugify;
+}
+
+static inline void
+hpdata_mid_hugify_set(hpdata_t *hpdata, bool mid_hugify) {
+ assert(mid_hugify != hpdata->h_mid_hugify);
+ hpdata->h_mid_hugify = mid_hugify;
+}
+
+static inline bool
+hpdata_changing_state_get(const hpdata_t *hpdata) {
+ return hpdata->h_mid_purge || hpdata->h_mid_hugify;
+}
+
+
+static inline bool
+hpdata_updating_get(const hpdata_t *hpdata) {
+ return hpdata->h_updating;
+}
+
+static inline void
+hpdata_updating_set(hpdata_t *hpdata, bool updating) {
+ assert(updating != hpdata->h_updating);
+ hpdata->h_updating = updating;
+}
+
+static inline bool
+hpdata_in_psset_get(const hpdata_t *hpdata) {
+ return hpdata->h_in_psset;
+}
+
+static inline void
+hpdata_in_psset_set(hpdata_t *hpdata, bool in_psset) {
+ assert(in_psset != hpdata->h_in_psset);
+ hpdata->h_in_psset = in_psset;
+}
+
+static inline size_t
+hpdata_longest_free_range_get(const hpdata_t *hpdata) {
+ return hpdata->h_longest_free_range;
+}
+
+static inline void
+hpdata_longest_free_range_set(hpdata_t *hpdata, size_t longest_free_range) {
+ assert(longest_free_range <= HUGEPAGE_PAGES);
+ hpdata->h_longest_free_range = longest_free_range;
+}
+
+static inline size_t
+hpdata_nactive_get(hpdata_t *hpdata) {
+ return hpdata->h_nactive;
+}
+
+static inline size_t
+hpdata_ntouched_get(hpdata_t *hpdata) {
+ return hpdata->h_ntouched;
+}
+
+static inline size_t
+hpdata_ndirty_get(hpdata_t *hpdata) {
+ return hpdata->h_ntouched - hpdata->h_nactive;
+}
+
+static inline size_t
+hpdata_nretained_get(hpdata_t *hpdata) {
+ return HUGEPAGE_PAGES - hpdata->h_ntouched;
+}
+
+static inline void
+hpdata_assert_empty(hpdata_t *hpdata) {
+ assert(fb_empty(hpdata->active_pages, HUGEPAGE_PAGES));
+ assert(hpdata->h_nactive == 0);
+}
+
+/*
+ * Only used in tests, and in hpdata_assert_consistent, below. Verifies some
+ * consistency properties of the hpdata (e.g. that cached counts of page stats
+ * match computed ones).
+ */
+static inline bool
+hpdata_consistent(hpdata_t *hpdata) {
+ if(fb_urange_longest(hpdata->active_pages, HUGEPAGE_PAGES)
+ != hpdata_longest_free_range_get(hpdata)) {
+ return false;
+ }
+ if (fb_scount(hpdata->active_pages, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES)
+ != hpdata->h_nactive) {
+ return false;
+ }
+ if (fb_scount(hpdata->touched_pages, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES)
+ != hpdata->h_ntouched) {
+ return false;
+ }
+ if (hpdata->h_ntouched < hpdata->h_nactive) {
+ return false;
+ }
+ if (hpdata->h_huge && hpdata->h_ntouched != HUGEPAGE_PAGES) {
+ return false;
+ }
+ if (hpdata_changing_state_get(hpdata)
+ && ((hpdata->h_purge_allowed) || hpdata->h_hugify_allowed)) {
+ return false;
+ }
+ if (hpdata_hugify_allowed_get(hpdata)
+ != hpdata_in_psset_hugify_container_get(hpdata)) {
+ return false;
+ }
+ return true;
+}
+
+static inline void
+hpdata_assert_consistent(hpdata_t *hpdata) {
+ assert(hpdata_consistent(hpdata));
+}
+
+static inline bool
+hpdata_empty(hpdata_t *hpdata) {
+ return hpdata->h_nactive == 0;
+}
+
+static inline bool
+hpdata_full(hpdata_t *hpdata) {
+ return hpdata->h_nactive == HUGEPAGE_PAGES;
+}
+
+void hpdata_init(hpdata_t *hpdata, void *addr, uint64_t age);
+
+/*
+ * Given an hpdata which can serve an allocation request, pick and reserve an
+ * offset within that allocation.
+ */
+void *hpdata_reserve_alloc(hpdata_t *hpdata, size_t sz);
+void hpdata_unreserve(hpdata_t *hpdata, void *begin, size_t sz);
+
+/*
+ * The hpdata_purge_prepare_t allows grabbing the metadata required to purge
+ * subranges of a hugepage while holding a lock, drop the lock during the actual
+ * purging of them, and reacquire it to update the metadata again.
+ */
+typedef struct hpdata_purge_state_s hpdata_purge_state_t;
+struct hpdata_purge_state_s {
+ size_t npurged;
+ size_t ndirty_to_purge;
+ fb_group_t to_purge[FB_NGROUPS(HUGEPAGE_PAGES)];
+ size_t next_purge_search_begin;
+};
+
+/*
+ * Initializes purge state. The access to hpdata must be externally
+ * synchronized with other hpdata_* calls.
+ *
+ * You can tell whether or not a thread is purging or hugifying a given hpdata
+ * via hpdata_changing_state_get(hpdata). Racing hugification or purging
+ * operations aren't allowed.
+ *
+ * Once you begin purging, you have to follow through and call hpdata_purge_next
+ * until you're done, and then end. Allocating out of an hpdata undergoing
+ * purging is not allowed.
+ *
+ * Returns the number of dirty pages that will be purged.
+ */
+size_t hpdata_purge_begin(hpdata_t *hpdata, hpdata_purge_state_t *purge_state);
+
+/*
+ * If there are more extents to purge, sets *r_purge_addr and *r_purge_size to
+ * true, and returns true. Otherwise, returns false to indicate that we're
+ * done.
+ *
+ * This requires exclusive access to the purge state, but *not* to the hpdata.
+ * In particular, unreserve calls are allowed while purging (i.e. you can dalloc
+ * into one part of the hpdata while purging a different part).
+ */
+bool hpdata_purge_next(hpdata_t *hpdata, hpdata_purge_state_t *purge_state,
+ void **r_purge_addr, size_t *r_purge_size);
+/*
+ * Updates the hpdata metadata after all purging is done. Needs external
+ * synchronization.
+ */
+void hpdata_purge_end(hpdata_t *hpdata, hpdata_purge_state_t *purge_state);
+
+void hpdata_hugify(hpdata_t *hpdata);
+void hpdata_dehugify(hpdata_t *hpdata);
+
+#endif /* JEMALLOC_INTERNAL_HPDATA_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/inspect.h b/contrib/jemalloc/include/jemalloc/internal/inspect.h
new file mode 100644
index 000000000000..65fef51dfa81
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/inspect.h
@@ -0,0 +1,40 @@
+#ifndef JEMALLOC_INTERNAL_INSPECT_H
+#define JEMALLOC_INTERNAL_INSPECT_H
+
+/*
+ * This module contains the heap introspection capabilities. For now they are
+ * exposed purely through mallctl APIs in the experimental namespace, but this
+ * may change over time.
+ */
+
+/*
+ * The following two structs are for experimental purposes. See
+ * experimental_utilization_query_ctl and
+ * experimental_utilization_batch_query_ctl in src/ctl.c.
+ */
+typedef struct inspect_extent_util_stats_s inspect_extent_util_stats_t;
+struct inspect_extent_util_stats_s {
+ size_t nfree;
+ size_t nregs;
+ size_t size;
+};
+
+typedef struct inspect_extent_util_stats_verbose_s
+ inspect_extent_util_stats_verbose_t;
+
+struct inspect_extent_util_stats_verbose_s {
+ void *slabcur_addr;
+ size_t nfree;
+ size_t nregs;
+ size_t size;
+ size_t bin_nfree;
+ size_t bin_nregs;
+};
+
+void inspect_extent_util_stats_get(tsdn_t *tsdn, const void *ptr,
+ size_t *nfree, size_t *nregs, size_t *size);
+void inspect_extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
+ size_t *nfree, size_t *nregs, size_t *size,
+ size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr);
+
+#endif /* JEMALLOC_INTERNAL_INSPECT_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
index a0e4f5af0124..d7790be0ad16 100644
--- a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
+++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
@@ -1,13 +1,16 @@
#ifndef JEMALLOC_INTERNAL_DECLS_H
#define JEMALLOC_INTERNAL_DECLS_H
+#ifndef JEMALLOC_NO_PRIVATE_NAMESPACE
#include "libc_private.h"
#include "namespace.h"
+#endif
#include <math.h>
#ifdef _WIN32
# include <windows.h>
# include "msvc_compat/windows_extra.h"
+# include "msvc_compat/strings.h"
# ifdef _WIN64
# if LG_VADDR <= 32
# error Generate the headers using x64 vcargs
@@ -34,8 +37,12 @@
# include <sys/uio.h>
# endif
# include <pthread.h>
-# ifdef __FreeBSD__
+# if defined(__FreeBSD__) || defined(__DragonFly__)
# include <pthread_np.h>
+# include <sched.h>
+# if defined(__FreeBSD__)
+# define cpu_set_t cpuset_t
+# endif
# endif
# include <signal.h>
# ifdef JEMALLOC_OS_UNFAIR_LOCK
@@ -94,4 +101,13 @@ isblank(int c) {
#endif
#include <fcntl.h>
+/*
+ * The Win32 midl compiler has #define small char; we don't use midl, but
+ * "small" is a nice identifier to have available when talking about size
+ * classes.
+ */
+#ifdef small
+# undef small
+#endif
+
#endif /* JEMALLOC_INTERNAL_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in
new file mode 100644
index 000000000000..52b31878a464
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h.in
@@ -0,0 +1,797 @@
+<<<<<<< HEAD
+#ifndef JEMALLOC_INTERNAL_DEFS_H_
+#define JEMALLOC_INTERNAL_DEFS_H_
+/*
+ * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
+ * public APIs to be prefixed. This makes it possible, with some care, to use
+ * multiple allocators simultaneously.
+ */
+#undef JEMALLOC_PREFIX
+#undef JEMALLOC_CPREFIX
+
+/*
+ * Define overrides for non-standard allocator-related functions if they are
+ * present on the system.
+ */
+#undef JEMALLOC_OVERRIDE___LIBC_CALLOC
+#undef JEMALLOC_OVERRIDE___LIBC_FREE
+#undef JEMALLOC_OVERRIDE___LIBC_MALLOC
+#undef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
+#undef JEMALLOC_OVERRIDE___LIBC_REALLOC
+#undef JEMALLOC_OVERRIDE___LIBC_VALLOC
+#undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN
+
+/*
+ * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
+ * For shared libraries, symbol visibility mechanisms prevent these symbols
+ * from being exported, but for static libraries, naming collisions are a real
+ * possibility.
+ */
+#undef JEMALLOC_PRIVATE_NAMESPACE
+
+/*
+ * Hyper-threaded CPUs may need a special instruction inside spin loops in
+ * order to yield to another virtual CPU.
+ */
+#undef CPU_SPINWAIT
+/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */
+#undef HAVE_CPU_SPINWAIT
+
+/*
+ * Number of significant bits in virtual addresses. This may be less than the
+ * total number of bits in a pointer, e.g. on x64, for which the uppermost 16
+ * bits are the same as bit 47.
+ */
+#undef LG_VADDR
+
+/* Defined if C11 atomics are available. */
+#undef JEMALLOC_C11_ATOMICS
+
+/* Defined if GCC __atomic atomics are available. */
+#undef JEMALLOC_GCC_ATOMIC_ATOMICS
+/* and the 8-bit variant support. */
+#undef JEMALLOC_GCC_U8_ATOMIC_ATOMICS
+
+/* Defined if GCC __sync atomics are available. */
+#undef JEMALLOC_GCC_SYNC_ATOMICS
+/* and the 8-bit variant support. */
+#undef JEMALLOC_GCC_U8_SYNC_ATOMICS
+
+/*
+ * Defined if __builtin_clz() and __builtin_clzl() are available.
+ */
+#undef JEMALLOC_HAVE_BUILTIN_CLZ
+
+/*
+ * Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
+ */
+#undef JEMALLOC_OS_UNFAIR_LOCK
+
+/* Defined if syscall(2) is usable. */
+#undef JEMALLOC_USE_SYSCALL
+
+/*
+ * Defined if secure_getenv(3) is available.
+ */
+#undef JEMALLOC_HAVE_SECURE_GETENV
+
+/*
+ * Defined if issetugid(2) is available.
+ */
+#undef JEMALLOC_HAVE_ISSETUGID
+
+/* Defined if pthread_atfork(3) is available. */
+#undef JEMALLOC_HAVE_PTHREAD_ATFORK
+
+/* Defined if pthread_setname_np(3) is available. */
+#undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
+
+/* Defined if pthread_getname_np(3) is available. */
+#undef JEMALLOC_HAVE_PTHREAD_GETNAME_NP
+
+/* Defined if pthread_get_name_np(3) is available. */
+#undef JEMALLOC_HAVE_PTHREAD_GET_NAME_NP
+
+/*
+ * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
+ */
+#undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
+
+/*
+ * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
+ */
+#undef JEMALLOC_HAVE_CLOCK_MONOTONIC
+
+/*
+ * Defined if mach_absolute_time() is available.
+ */
+#undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
+
+/*
+ * Defined if clock_gettime(CLOCK_REALTIME, ...) is available.
+ */
+#undef JEMALLOC_HAVE_CLOCK_REALTIME
+
+/*
+ * Defined if _malloc_thread_cleanup() exists. At least in the case of
+ * FreeBSD, pthread_key_create() allocates, which if used during malloc
+ * bootstrapping will cause recursion into the pthreads library. Therefore, if
+ * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
+ * malloc_tsd.
+ */
+#undef JEMALLOC_MALLOC_THREAD_CLEANUP
+
+/*
+ * Defined if threaded initialization is known to be safe on this platform.
+ * Among other things, it must be possible to initialize a mutex without
+ * triggering allocation in order for threaded allocation to be safe.
+ */
+#undef JEMALLOC_THREADED_INIT
+
+/*
+ * Defined if the pthreads implementation defines
+ * _pthread_mutex_init_calloc_cb(), in which case the function is used in order
+ * to avoid recursive allocation during mutex initialization.
+ */
+#undef JEMALLOC_MUTEX_INIT_CB
+
+/* Non-empty if the tls_model attribute is supported. */
+#undef JEMALLOC_TLS_MODEL
+
+/*
+ * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
+ * inline functions.
+ */
+#undef JEMALLOC_DEBUG
+
+/* JEMALLOC_STATS enables statistics calculation. */
+#undef JEMALLOC_STATS
+
+/* JEMALLOC_EXPERIMENTAL_SMALLOCX_API enables experimental smallocx API. */
+#undef JEMALLOC_EXPERIMENTAL_SMALLOCX_API
+
+/* JEMALLOC_PROF enables allocation profiling. */
+#undef JEMALLOC_PROF
+
+/* Use libunwind for profile backtracing if defined. */
+#undef JEMALLOC_PROF_LIBUNWIND
+
+/* Use libgcc for profile backtracing if defined. */
+#undef JEMALLOC_PROF_LIBGCC
+
+/* Use gcc intrinsics for profile backtracing if defined. */
+#undef JEMALLOC_PROF_GCC
+
+/*
+ * JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
+ * segment (DSS).
+ */
+#undef JEMALLOC_DSS
+
+/* Support memory filling (junk/zero). */
+#undef JEMALLOC_FILL
+
+/* Support utrace(2)-based tracing. */
+#undef JEMALLOC_UTRACE
+
+/* Support utrace(2)-based tracing (label based signature). */
+#undef JEMALLOC_UTRACE_LABEL
+
+/* Support optional abort() on OOM. */
+#undef JEMALLOC_XMALLOC
+
+/* Support lazy locking (avoid locking unless a second thread is launched). */
+#undef JEMALLOC_LAZY_LOCK
+
+/*
+ * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
+ * classes).
+ */
+#undef LG_QUANTUM
+
+/* One page is 2^LG_PAGE bytes. */
+#undef LG_PAGE
+
+/* Maximum number of regions in a slab. */
+#undef CONFIG_LG_SLAB_MAXREGS
+
+/*
+ * One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
+ * system does not explicitly support huge pages; system calls that require
+ * explicit huge page support are separately configured.
+ */
+#undef LG_HUGEPAGE
+
+/*
+ * If defined, adjacent virtual memory mappings with identical attributes
+ * automatically coalesce, and they fragment when changes are made to subranges.
+ * This is the normal order of things for mmap()/munmap(), but on Windows
+ * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
+ * mappings do *not* coalesce/fragment.
+ */
+#undef JEMALLOC_MAPS_COALESCE
+
+/*
+ * If defined, retain memory for later reuse by default rather than using e.g.
+ * munmap() to unmap freed extents. This is enabled on 64-bit Linux because
+ * common sequences of mmap()/munmap() calls will cause virtual memory map
+ * holes.
+ */
+#undef JEMALLOC_RETAIN
+
+/* TLS is used to map arenas and magazine caches to threads. */
+#undef JEMALLOC_TLS
+
+/*
+ * Used to mark unreachable code to quiet "end of non-void" compiler warnings.
+ * Don't use this directly; instead use unreachable() from util.h
+ */
+#undef JEMALLOC_INTERNAL_UNREACHABLE
+
+/*
+ * ffs*() functions to use for bitmapping. Don't use these directly; instead,
+ * use ffs_*() from util.h.
+ */
+#undef JEMALLOC_INTERNAL_FFSLL
+#undef JEMALLOC_INTERNAL_FFSL
+#undef JEMALLOC_INTERNAL_FFS
+
+/*
+ * popcount*() functions to use for bitmapping.
+ */
+#undef JEMALLOC_INTERNAL_POPCOUNTL
+#undef JEMALLOC_INTERNAL_POPCOUNT
+
+/*
+ * If defined, explicitly attempt to more uniformly distribute large allocation
+ * pointer alignments across all cache indices.
+ */
+#undef JEMALLOC_CACHE_OBLIVIOUS
+
+/*
+ * If defined, enable logging facilities. We make this a configure option to
+ * avoid taking extra branches everywhere.
+ */
+#undef JEMALLOC_LOG
+
+/*
+ * If defined, use readlinkat() (instead of readlink()) to follow
+ * /etc/malloc_conf.
+ */
+#undef JEMALLOC_READLINKAT
+
+/*
+ * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
+ */
+#undef JEMALLOC_ZONE
+
+/*
+ * Methods for determining whether the OS overcommits.
+ * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
+ * /proc/sys/vm.overcommit_memory file.
+ * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
+ */
+#undef JEMALLOC_SYSCTL_VM_OVERCOMMIT
+#undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
+
+/* Defined if madvise(2) is available. */
+#undef JEMALLOC_HAVE_MADVISE
+
+/*
+ * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
+ * arguments to madvise(2).
+ */
+#undef JEMALLOC_HAVE_MADVISE_HUGE
+
+/*
+ * Methods for purging unused pages differ between operating systems.
+ *
+ * madvise(..., MADV_FREE) : This marks pages as being unused, such that they
+ * will be discarded rather than swapped out.
+ * madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
+ * defined, this immediately discards pages,
+ * such that new pages will be demand-zeroed if
+ * the address region is later touched;
+ * otherwise this behaves similarly to
+ * MADV_FREE, though typically with higher
+ * system overhead.
+ */
+#undef JEMALLOC_PURGE_MADVISE_FREE
+#undef JEMALLOC_PURGE_MADVISE_DONTNEED
+#undef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
+
+/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
+#undef JEMALLOC_DEFINE_MADVISE_FREE
+
+/*
+ * Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.
+ */
+#undef JEMALLOC_MADVISE_DONTDUMP
+
+/*
+ * Defined if MADV_[NO]CORE is supported as an argument to madvise.
+ */
+#undef JEMALLOC_MADVISE_NOCORE
+
+/* Defined if mprotect(2) is available. */
+#undef JEMALLOC_HAVE_MPROTECT
+
+/*
+ * Defined if transparent huge pages (THPs) are supported via the
+ * MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
+ */
+#undef JEMALLOC_THP
+
+/* Defined if posix_madvise is available. */
+#undef JEMALLOC_HAVE_POSIX_MADVISE
+
+/*
+ * Method for purging unused pages using posix_madvise.
+ *
+ * posix_madvise(..., POSIX_MADV_DONTNEED)
+ */
+#undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED
+#undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS
+
+/*
+ * Defined if memcntl page admin call is supported
+ */
+#undef JEMALLOC_HAVE_MEMCNTL
+
+/*
+ * Defined if malloc_size is supported
+ */
+#undef JEMALLOC_HAVE_MALLOC_SIZE
+
+/* Define if operating system has alloca.h header. */
+#undef JEMALLOC_HAS_ALLOCA_H
+
+/* C99 restrict keyword supported. */
+#undef JEMALLOC_HAS_RESTRICT
+
+/* For use by hash code. */
+#undef JEMALLOC_BIG_ENDIAN
+
+/* sizeof(int) == 2^LG_SIZEOF_INT. */
+#undef LG_SIZEOF_INT
+
+/* sizeof(long) == 2^LG_SIZEOF_LONG. */
+#undef LG_SIZEOF_LONG
+
+/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
+#undef LG_SIZEOF_LONG_LONG
+
+/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
+#undef LG_SIZEOF_INTMAX_T
+
+/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
+#undef JEMALLOC_GLIBC_MALLOC_HOOK
+
+/* glibc memalign hook. */
+#undef JEMALLOC_GLIBC_MEMALIGN_HOOK
+
+/* pthread support */
+#undef JEMALLOC_HAVE_PTHREAD
+
+/* dlsym() support */
+#undef JEMALLOC_HAVE_DLSYM
+
+/* Adaptive mutex support in pthreads. */
+#undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
+
+/* GNU specific sched_getcpu support */
+#undef JEMALLOC_HAVE_SCHED_GETCPU
+
+/* GNU specific sched_setaffinity support */
+#undef JEMALLOC_HAVE_SCHED_SETAFFINITY
+
+/*
+ * If defined, all the features necessary for background threads are present.
+ */
+#undef JEMALLOC_BACKGROUND_THREAD
+
+/*
+ * If defined, jemalloc symbols are not exported (doesn't work when
+ * JEMALLOC_PREFIX is not defined).
+ */
+#undef JEMALLOC_EXPORT
+
+/* config.malloc_conf options string. */
+#undef JEMALLOC_CONFIG_MALLOC_CONF
+
+/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
+#undef JEMALLOC_IS_MALLOC
+
+/*
+ * Defined if strerror_r returns char * if _GNU_SOURCE is defined.
+ */
+#undef JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE
+
+/* Performs additional safety checks when defined. */
+#undef JEMALLOC_OPT_SAFETY_CHECKS
+
+/* Is C++ support being built? */
+#undef JEMALLOC_ENABLE_CXX
+
+/* Performs additional size checks when defined. */
+#undef JEMALLOC_OPT_SIZE_CHECKS
+
+/* Allows sampled junk and stash for checking use-after-free when defined. */
+#undef JEMALLOC_UAF_DETECTION
+
+/* Darwin VM_MAKE_TAG support */
+#undef JEMALLOC_HAVE_VM_MAKE_TAG
+
+/* If defined, realloc(ptr, 0) defaults to "free" instead of "alloc". */
+#undef JEMALLOC_ZERO_REALLOC_DEFAULT_FREE
+
+#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
+||||||| dec341af7695
+=======
+#ifndef JEMALLOC_INTERNAL_DEFS_H_
+#define JEMALLOC_INTERNAL_DEFS_H_
+/*
+ * If JEMALLOC_PREFIX is defined via --with-jemalloc-prefix, it will cause all
+ * public APIs to be prefixed. This makes it possible, with some care, to use
+ * multiple allocators simultaneously.
+ */
+#undef JEMALLOC_PREFIX
+#undef JEMALLOC_CPREFIX
+
+/*
+ * Define overrides for non-standard allocator-related functions if they are
+ * present on the system.
+ */
+#undef JEMALLOC_OVERRIDE___LIBC_CALLOC
+#undef JEMALLOC_OVERRIDE___LIBC_FREE
+#undef JEMALLOC_OVERRIDE___LIBC_MALLOC
+#undef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
+#undef JEMALLOC_OVERRIDE___LIBC_REALLOC
+#undef JEMALLOC_OVERRIDE___LIBC_VALLOC
+#undef JEMALLOC_OVERRIDE___POSIX_MEMALIGN
+
+/*
+ * JEMALLOC_PRIVATE_NAMESPACE is used as a prefix for all library-private APIs.
+ * For shared libraries, symbol visibility mechanisms prevent these symbols
+ * from being exported, but for static libraries, naming collisions are a real
+ * possibility.
+ */
+#undef JEMALLOC_PRIVATE_NAMESPACE
+
+/*
+ * Hyper-threaded CPUs may need a special instruction inside spin loops in
+ * order to yield to another virtual CPU.
+ */
+#undef CPU_SPINWAIT
+/* 1 if CPU_SPINWAIT is defined, 0 otherwise. */
+#undef HAVE_CPU_SPINWAIT
+
+/*
+ * Number of significant bits in virtual addresses. This may be less than the
+ * total number of bits in a pointer, e.g. on x64, for which the uppermost 16
+ * bits are the same as bit 47.
+ */
+#undef LG_VADDR
+
+/* Defined if C11 atomics are available. */
+#undef JEMALLOC_C11_ATOMICS
+
+/* Defined if GCC __atomic atomics are available. */
+#undef JEMALLOC_GCC_ATOMIC_ATOMICS
+/* and the 8-bit variant support. */
+#undef JEMALLOC_GCC_U8_ATOMIC_ATOMICS
+
+/* Defined if GCC __sync atomics are available. */
+#undef JEMALLOC_GCC_SYNC_ATOMICS
+/* and the 8-bit variant support. */
+#undef JEMALLOC_GCC_U8_SYNC_ATOMICS
+
+/*
+ * Defined if __builtin_clz() and __builtin_clzl() are available.
+ */
+#undef JEMALLOC_HAVE_BUILTIN_CLZ
+
+/*
+ * Defined if os_unfair_lock_*() functions are available, as provided by Darwin.
+ */
+#undef JEMALLOC_OS_UNFAIR_LOCK
+
+/* Defined if syscall(2) is usable. */
+#undef JEMALLOC_USE_SYSCALL
+
+/*
+ * Defined if secure_getenv(3) is available.
+ */
+#undef JEMALLOC_HAVE_SECURE_GETENV
+
+/*
+ * Defined if issetugid(2) is available.
+ */
+#undef JEMALLOC_HAVE_ISSETUGID
+
+/* Defined if pthread_atfork(3) is available. */
+#undef JEMALLOC_HAVE_PTHREAD_ATFORK
+
+/* Defined if pthread_setname_np(3) is available. */
+#undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
+
+/*
+ * Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
+ */
+#undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
+
+/*
+ * Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
+ */
+#undef JEMALLOC_HAVE_CLOCK_MONOTONIC
+
+/*
+ * Defined if mach_absolute_time() is available.
+ */
+#undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME
+
+/*
+ * Defined if _malloc_thread_cleanup() exists. At least in the case of
+ * FreeBSD, pthread_key_create() allocates, which if used during malloc
+ * bootstrapping will cause recursion into the pthreads library. Therefore, if
+ * _malloc_thread_cleanup() exists, use it as the basis for thread cleanup in
+ * malloc_tsd.
+ */
+#undef JEMALLOC_MALLOC_THREAD_CLEANUP
+
+/*
+ * Defined if threaded initialization is known to be safe on this platform.
+ * Among other things, it must be possible to initialize a mutex without
+ * triggering allocation in order for threaded allocation to be safe.
+ */
+#undef JEMALLOC_THREADED_INIT
+
+/*
+ * Defined if the pthreads implementation defines
+ * _pthread_mutex_init_calloc_cb(), in which case the function is used in order
+ * to avoid recursive allocation during mutex initialization.
+ */
+#undef JEMALLOC_MUTEX_INIT_CB
+
+/* Non-empty if the tls_model attribute is supported. */
+#undef JEMALLOC_TLS_MODEL
+
+/*
+ * JEMALLOC_DEBUG enables assertions and other sanity checks, and disables
+ * inline functions.
+ */
+#undef JEMALLOC_DEBUG
+
+/* JEMALLOC_STATS enables statistics calculation. */
+#undef JEMALLOC_STATS
+
+/* JEMALLOC_EXPERIMENTAL_SMALLOCX_API enables experimental smallocx API. */
+#undef JEMALLOC_EXPERIMENTAL_SMALLOCX_API
+
+/* JEMALLOC_PROF enables allocation profiling. */
+#undef JEMALLOC_PROF
+
+/* Use libunwind for profile backtracing if defined. */
+#undef JEMALLOC_PROF_LIBUNWIND
+
+/* Use libgcc for profile backtracing if defined. */
+#undef JEMALLOC_PROF_LIBGCC
+
+/* Use gcc intrinsics for profile backtracing if defined. */
+#undef JEMALLOC_PROF_GCC
+
+/*
+ * JEMALLOC_DSS enables use of sbrk(2) to allocate extents from the data storage
+ * segment (DSS).
+ */
+#undef JEMALLOC_DSS
+
+/* Support memory filling (junk/zero). */
+#undef JEMALLOC_FILL
+
+/* Support utrace(2)-based tracing. */
+#undef JEMALLOC_UTRACE
+
+/* Support optional abort() on OOM. */
+#undef JEMALLOC_XMALLOC
+
+/* Support lazy locking (avoid locking unless a second thread is launched). */
+#undef JEMALLOC_LAZY_LOCK
+
+/*
+ * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
+ * classes).
+ */
+#undef LG_QUANTUM
+
+/* One page is 2^LG_PAGE bytes. */
+#undef LG_PAGE
+
+/*
+ * One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
+ * system does not explicitly support huge pages; system calls that require
+ * explicit huge page support are separately configured.
+ */
+#undef LG_HUGEPAGE
+
+/*
+ * If defined, adjacent virtual memory mappings with identical attributes
+ * automatically coalesce, and they fragment when changes are made to subranges.
+ * This is the normal order of things for mmap()/munmap(), but on Windows
+ * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
+ * mappings do *not* coalesce/fragment.
+ */
+#undef JEMALLOC_MAPS_COALESCE
+
+/*
+ * If defined, retain memory for later reuse by default rather than using e.g.
+ * munmap() to unmap freed extents. This is enabled on 64-bit Linux because
+ * common sequences of mmap()/munmap() calls will cause virtual memory map
+ * holes.
+ */
+#undef JEMALLOC_RETAIN
+
+/* TLS is used to map arenas and magazine caches to threads. */
+#undef JEMALLOC_TLS
+
+/*
+ * Used to mark unreachable code to quiet "end of non-void" compiler warnings.
+ * Don't use this directly; instead use unreachable() from util.h
+ */
+#undef JEMALLOC_INTERNAL_UNREACHABLE
+
+/*
+ * ffs*() functions to use for bitmapping. Don't use these directly; instead,
+ * use ffs_*() from util.h.
+ */
+#undef JEMALLOC_INTERNAL_FFSLL
+#undef JEMALLOC_INTERNAL_FFSL
+#undef JEMALLOC_INTERNAL_FFS
+
+/*
+ * popcount*() functions to use for bitmapping.
+ */
+#undef JEMALLOC_INTERNAL_POPCOUNTL
+#undef JEMALLOC_INTERNAL_POPCOUNT
+
+/*
+ * If defined, explicitly attempt to more uniformly distribute large allocation
+ * pointer alignments across all cache indices.
+ */
+#undef JEMALLOC_CACHE_OBLIVIOUS
+
+/*
+ * If defined, enable logging facilities. We make this a configure option to
+ * avoid taking extra branches everywhere.
+ */
+#undef JEMALLOC_LOG
+
+/*
+ * If defined, use readlinkat() (instead of readlink()) to follow
+ * /etc/malloc_conf.
+ */
+#undef JEMALLOC_READLINKAT
+
+/*
+ * Darwin (OS X) uses zones to work around Mach-O symbol override shortcomings.
+ */
+#undef JEMALLOC_ZONE
+
+/*
+ * Methods for determining whether the OS overcommits.
+ * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
+ * /proc/sys/vm.overcommit_memory file.
+ * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
+ */
+#undef JEMALLOC_SYSCTL_VM_OVERCOMMIT
+#undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
+
+/* Defined if madvise(2) is available. */
+#undef JEMALLOC_HAVE_MADVISE
+
+/*
+ * Defined if transparent huge pages are supported via the MADV_[NO]HUGEPAGE
+ * arguments to madvise(2).
+ */
+#undef JEMALLOC_HAVE_MADVISE_HUGE
+
+/*
+ * Methods for purging unused pages differ between operating systems.
+ *
+ * madvise(..., MADV_FREE) : This marks pages as being unused, such that they
+ * will be discarded rather than swapped out.
+ * madvise(..., MADV_DONTNEED) : If JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS is
+ * defined, this immediately discards pages,
+ * such that new pages will be demand-zeroed if
+ * the address region is later touched;
+ * otherwise this behaves similarly to
+ * MADV_FREE, though typically with higher
+ * system overhead.
+ */
+#undef JEMALLOC_PURGE_MADVISE_FREE
+#undef JEMALLOC_PURGE_MADVISE_DONTNEED
+#undef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
+
+/* Defined if madvise(2) is available but MADV_FREE is not (x86 Linux only). */
+#undef JEMALLOC_DEFINE_MADVISE_FREE
+
+/*
+ * Defined if MADV_DO[NT]DUMP is supported as an argument to madvise.
+ */
+#undef JEMALLOC_MADVISE_DONTDUMP
+
+/*
+ * Defined if transparent huge pages (THPs) are supported via the
+ * MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
+ */
+#undef JEMALLOC_THP
+
+/* Define if operating system has alloca.h header. */
+#undef JEMALLOC_HAS_ALLOCA_H
+
+/* C99 restrict keyword supported. */
+#undef JEMALLOC_HAS_RESTRICT
+
+/* For use by hash code. */
+#undef JEMALLOC_BIG_ENDIAN
+
+/* sizeof(int) == 2^LG_SIZEOF_INT. */
+#undef LG_SIZEOF_INT
+
+/* sizeof(long) == 2^LG_SIZEOF_LONG. */
+#undef LG_SIZEOF_LONG
+
+/* sizeof(long long) == 2^LG_SIZEOF_LONG_LONG. */
+#undef LG_SIZEOF_LONG_LONG
+
+/* sizeof(intmax_t) == 2^LG_SIZEOF_INTMAX_T. */
+#undef LG_SIZEOF_INTMAX_T
+
+/* glibc malloc hooks (__malloc_hook, __realloc_hook, __free_hook). */
+#undef JEMALLOC_GLIBC_MALLOC_HOOK
+
+/* glibc memalign hook. */
+#undef JEMALLOC_GLIBC_MEMALIGN_HOOK
+
+/* pthread support */
+#undef JEMALLOC_HAVE_PTHREAD
+
+/* dlsym() support */
+#undef JEMALLOC_HAVE_DLSYM
+
+/* Adaptive mutex support in pthreads. */
+#undef JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
+
+/* GNU specific sched_getcpu support */
+#undef JEMALLOC_HAVE_SCHED_GETCPU
+
+/* GNU specific sched_setaffinity support */
+#undef JEMALLOC_HAVE_SCHED_SETAFFINITY
+
+/*
+ * If defined, all the features necessary for background threads are present.
+ */
+#undef JEMALLOC_BACKGROUND_THREAD
+
+/*
+ * If defined, jemalloc symbols are not exported (doesn't work when
+ * JEMALLOC_PREFIX is not defined).
+ */
+#undef JEMALLOC_EXPORT
+
+/* config.malloc_conf options string. */
+#undef JEMALLOC_CONFIG_MALLOC_CONF
+
+/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
+#undef JEMALLOC_IS_MALLOC
+
+/*
+ * Defined if strerror_r returns char * if _GNU_SOURCE is defined.
+ */
+#undef JEMALLOC_STRERROR_R_RETURNS_CHAR_WITH_GNU_SOURCE
+
+/* Performs additional safety checks when defined. */
+#undef JEMALLOC_OPT_SAFETY_CHECKS
+
+#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
+>>>>>>> main
diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h
index d291170beefa..fc834c67373d 100644
--- a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h
+++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_externs.h
@@ -2,7 +2,10 @@
#define JEMALLOC_INTERNAL_EXTERNS_H
#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/hpa_opts.h"
+#include "jemalloc/internal/sec_opts.h"
#include "jemalloc/internal/tsd_types.h"
+#include "jemalloc/internal/nstime.h"
/* TSD checks this to set thread local slow state accordingly. */
extern bool malloc_slow;
@@ -10,14 +13,30 @@ extern bool malloc_slow;
/* Run-time options. */
extern bool opt_abort;
extern bool opt_abort_conf;
+extern bool opt_trust_madvise;
extern bool opt_confirm_conf;
+extern bool opt_hpa;
+extern hpa_shard_opts_t opt_hpa_opts;
+extern sec_opts_t opt_hpa_sec_opts;
+
extern const char *opt_junk;
extern bool opt_junk_alloc;
extern bool opt_junk_free;
+extern void (*junk_free_callback)(void *ptr, size_t size);
+extern void (*junk_alloc_callback)(void *ptr, size_t size);
extern bool opt_utrace;
extern bool opt_xmalloc;
+extern bool opt_experimental_infallible_new;
extern bool opt_zero;
extern unsigned opt_narenas;
+extern zero_realloc_action_t opt_zero_realloc_action;
+extern malloc_init_t malloc_init_state;
+extern const char *zero_realloc_mode_names[];
+extern atomic_zu_t zero_realloc_count;
+extern bool opt_cache_oblivious;
+
+/* Escape free-fastpath when ptr & mask == 0 (for sanitization purpose). */
+extern uintptr_t san_cache_bin_nonfast_mask;
/* Number of CPUs. */
extern unsigned ncpus;
@@ -41,17 +60,16 @@ void *bootstrap_calloc(size_t num, size_t size);
void bootstrap_free(void *ptr);
void arena_set(unsigned ind, arena_t *arena);
unsigned narenas_total_get(void);
-arena_t *arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks);
-arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
+arena_t *arena_init(tsdn_t *tsdn, unsigned ind, const arena_config_t *config);
arena_t *arena_choose_hard(tsd_t *tsd, bool internal);
-void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
+void arena_migrate(tsd_t *tsd, arena_t *oldarena, arena_t *newarena);
void iarena_cleanup(tsd_t *tsd);
void arena_cleanup(tsd_t *tsd);
-void arenas_tdata_cleanup(tsd_t *tsd);
+size_t batch_alloc(void **ptrs, size_t num, size_t size, int flags);
void jemalloc_prefork(void);
void jemalloc_postfork_parent(void);
void jemalloc_postfork_child(void);
-bool malloc_initialized(void);
void je_sdallocx_noflags(void *ptr, size_t size);
+void *malloc_default(size_t size);
#endif /* JEMALLOC_INTERNAL_EXTERNS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h
index 437eaa407939..751c112ff4c0 100644
--- a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h
+++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_includes.h
@@ -10,7 +10,7 @@
* structs, externs, and inlines), and included each header file multiple times
* in this file, picking out the portion we want on each pass using the
* following #defines:
- * JEMALLOC_H_TYPES : Preprocessor-defined constants and psuedo-opaque data
+ * JEMALLOC_H_TYPES : Preprocessor-defined constants and pseudo-opaque data
* types.
* JEMALLOC_H_STRUCTS : Data structures.
* JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
@@ -40,8 +40,6 @@
/* TYPES */
/******************************************************************************/
-#include "jemalloc/internal/extent_types.h"
-#include "jemalloc/internal/base_types.h"
#include "jemalloc/internal/arena_types.h"
#include "jemalloc/internal/tcache_types.h"
#include "jemalloc/internal/prof_types.h"
@@ -50,11 +48,8 @@
/* STRUCTS */
/******************************************************************************/
-#include "jemalloc/internal/arena_structs_a.h"
-#include "jemalloc/internal/extent_structs.h"
-#include "jemalloc/internal/base_structs.h"
#include "jemalloc/internal/prof_structs.h"
-#include "jemalloc/internal/arena_structs_b.h"
+#include "jemalloc/internal/arena_structs.h"
#include "jemalloc/internal/tcache_structs.h"
#include "jemalloc/internal/background_thread_structs.h"
@@ -63,8 +58,6 @@
/******************************************************************************/
#include "jemalloc/internal/jemalloc_internal_externs.h"
-#include "jemalloc/internal/extent_externs.h"
-#include "jemalloc/internal/base_externs.h"
#include "jemalloc/internal/arena_externs.h"
#include "jemalloc/internal/large_externs.h"
#include "jemalloc/internal/tcache_externs.h"
@@ -76,19 +69,16 @@
/******************************************************************************/
#include "jemalloc/internal/jemalloc_internal_inlines_a.h"
-#include "jemalloc/internal/base_inlines.h"
/*
* Include portions of arena code interleaved with tcache code in order to
* resolve circular dependencies.
*/
-#include "jemalloc/internal/prof_inlines_a.h"
#include "jemalloc/internal/arena_inlines_a.h"
-#include "jemalloc/internal/extent_inlines.h"
#include "jemalloc/internal/jemalloc_internal_inlines_b.h"
#include "jemalloc/internal/tcache_inlines.h"
#include "jemalloc/internal/arena_inlines_b.h"
#include "jemalloc/internal/jemalloc_internal_inlines_c.h"
-#include "jemalloc/internal/prof_inlines_b.h"
+#include "jemalloc/internal/prof_inlines.h"
#include "jemalloc/internal/background_thread_inlines.h"
#endif /* JEMALLOC_INTERNAL_INCLUDES_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h
index ddde9b4e63e6..9e27cc3012fd 100644
--- a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h
+++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_a.h
@@ -56,31 +56,6 @@ percpu_arena_ind_limit(percpu_arena_mode_t mode) {
}
}
-static inline arena_tdata_t *
-arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing) {
- arena_tdata_t *tdata;
- arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
-
- if (unlikely(arenas_tdata == NULL)) {
- /* arenas_tdata hasn't been initialized yet. */
- return arena_tdata_get_hard(tsd, ind);
- }
- if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) {
- /*
- * ind is invalid, cache is old (too small), or tdata to be
- * initialized.
- */
- return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) :
- NULL);
- }
-
- tdata = &arenas_tdata[ind];
- if (likely(tdata != NULL) || !refresh_if_missing) {
- return tdata;
- }
- return arena_tdata_get_hard(tsd, ind);
-}
-
static inline arena_t *
arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) {
arena_t *ret;
@@ -90,36 +65,12 @@ arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing) {
ret = (arena_t *)atomic_load_p(&arenas[ind], ATOMIC_ACQUIRE);
if (unlikely(ret == NULL)) {
if (init_if_missing) {
- ret = arena_init(tsdn, ind,
- (extent_hooks_t *)&extent_hooks_default);
+ ret = arena_init(tsdn, ind, &arena_config_default);
}
}
return ret;
}
-static inline ticker_t *
-decay_ticker_get(tsd_t *tsd, unsigned ind) {
- arena_tdata_t *tdata;
-
- tdata = arena_tdata_get(tsd, ind, true);
- if (unlikely(tdata == NULL)) {
- return NULL;
- }
- return &tdata->decay_ticker;
-}
-
-JEMALLOC_ALWAYS_INLINE cache_bin_t *
-tcache_small_bin_get(tcache_t *tcache, szind_t binind) {
- assert(binind < SC_NBINS);
- return &tcache->bins_small[binind];
-}
-
-JEMALLOC_ALWAYS_INLINE cache_bin_t *
-tcache_large_bin_get(tcache_t *tcache, szind_t binind) {
- assert(binind >= SC_NBINS &&binind < nhbins);
- return &tcache->bins_large[binind - SC_NBINS];
-}
-
JEMALLOC_ALWAYS_INLINE bool
tcache_available(tsd_t *tsd) {
/*
@@ -129,9 +80,9 @@ tcache_available(tsd_t *tsd) {
*/
if (likely(tsd_tcache_enabled_get(tsd))) {
/* Associated arena == NULL implies tcache init in progress. */
- assert(tsd_tcachep_get(tsd)->arena == NULL ||
- tcache_small_bin_get(tsd_tcachep_get(tsd), 0)->avail !=
- NULL);
+ if (config_debug && tsd_tcache_slowp_get(tsd)->arena != NULL) {
+ tcache_assert_initialized(tsd_tcachep_get(tsd));
+ }
return true;
}
@@ -147,28 +98,25 @@ tcache_get(tsd_t *tsd) {
return tsd_tcachep_get(tsd);
}
+JEMALLOC_ALWAYS_INLINE tcache_slow_t *
+tcache_slow_get(tsd_t *tsd) {
+ if (!tcache_available(tsd)) {
+ return NULL;
+ }
+
+ return tsd_tcache_slowp_get(tsd);
+}
+
static inline void
pre_reentrancy(tsd_t *tsd, arena_t *arena) {
/* arena is the current context. Reentry from a0 is not allowed. */
assert(arena != arena_get(tsd_tsdn(tsd), 0, false));
-
- bool fast = tsd_fast(tsd);
- assert(tsd_reentrancy_level_get(tsd) < INT8_MAX);
- ++*tsd_reentrancy_levelp_get(tsd);
- if (fast) {
- /* Prepare slow path for reentrancy. */
- tsd_slow_update(tsd);
- assert(tsd_state_get(tsd) == tsd_state_nominal_slow);
- }
+ tsd_pre_reentrancy_raw(tsd);
}
static inline void
post_reentrancy(tsd_t *tsd) {
- int8_t *reentrancy_level = tsd_reentrancy_levelp_get(tsd);
- assert(*reentrancy_level > 0);
- if (--*reentrancy_level == 0) {
- tsd_slow_update(tsd);
- }
+ tsd_post_reentrancy_raw(tsd);
}
#endif /* JEMALLOC_INTERNAL_INLINES_A_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h
index 70d6e5788570..152f8a039569 100644
--- a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h
+++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_b.h
@@ -1,7 +1,31 @@
#ifndef JEMALLOC_INTERNAL_INLINES_B_H
#define JEMALLOC_INTERNAL_INLINES_B_H
-#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/extent.h"
+
+static inline void
+percpu_arena_update(tsd_t *tsd, unsigned cpu) {
+ assert(have_percpu_arena);
+ arena_t *oldarena = tsd_arena_get(tsd);
+ assert(oldarena != NULL);
+ unsigned oldind = arena_ind_get(oldarena);
+
+ if (oldind != cpu) {
+ unsigned newind = cpu;
+ arena_t *newarena = arena_get(tsd_tsdn(tsd), newind, true);
+ assert(newarena != NULL);
+
+ /* Set new arena/tcache associations. */
+ arena_migrate(tsd, oldarena, newarena);
+ tcache_t *tcache = tcache_get(tsd);
+ if (tcache != NULL) {
+ tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd);
+ tcache_arena_reassociate(tsd_tsdn(tsd), tcache_slow,
+ tcache, newarena);
+ }
+ }
+}
+
/* Choose an arena based on a per-thread value. */
static inline arena_t *
@@ -22,18 +46,19 @@ arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal) {
ret = arena_choose_hard(tsd, internal);
assert(ret);
if (tcache_available(tsd)) {
- tcache_t *tcache = tcache_get(tsd);
- if (tcache->arena != NULL) {
- /* See comments in tcache_data_init().*/
- assert(tcache->arena ==
+ tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd);
+ tcache_t *tcache = tsd_tcachep_get(tsd);
+ if (tcache_slow->arena != NULL) {
+ /* See comments in tsd_tcache_data_init().*/
+ assert(tcache_slow->arena ==
arena_get(tsd_tsdn(tsd), 0, false));
- if (tcache->arena != ret) {
+ if (tcache_slow->arena != ret) {
tcache_arena_reassociate(tsd_tsdn(tsd),
- tcache, ret);
+ tcache_slow, tcache, ret);
}
} else {
- tcache_arena_associate(tsd_tsdn(tsd), tcache,
- ret);
+ tcache_arena_associate(tsd_tsdn(tsd),
+ tcache_slow, tcache, ret);
}
}
}
@@ -75,13 +100,4 @@ arena_is_auto(arena_t *arena) {
return (arena_ind_get(arena) < manual_arena_base);
}
-JEMALLOC_ALWAYS_INLINE extent_t *
-iealloc(tsdn_t *tsdn, const void *ptr) {
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
-
- return rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true);
-}
-
#endif /* JEMALLOC_INTERNAL_INLINES_B_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h
index cdb10eb21f73..b0868b7d616b 100644
--- a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h
+++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_inlines_c.h
@@ -3,7 +3,9 @@
#include "jemalloc/internal/hook.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
+#include "jemalloc/internal/log.h"
#include "jemalloc/internal/sz.h"
+#include "jemalloc/internal/thread_event.h"
#include "jemalloc/internal/witness.h"
/*
@@ -101,8 +103,8 @@ ivsalloc(tsdn_t *tsdn, const void *ptr) {
}
JEMALLOC_ALWAYS_INLINE void
-idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, alloc_ctx_t *alloc_ctx,
- bool is_internal, bool slow_path) {
+idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
+ emap_alloc_ctx_t *alloc_ctx, bool is_internal, bool slow_path) {
assert(ptr != NULL);
assert(!is_internal || tcache == NULL);
assert(!is_internal || arena_is_auto(iaalloc(tsdn, ptr)));
@@ -125,7 +127,7 @@ idalloc(tsd_t *tsd, void *ptr) {
JEMALLOC_ALWAYS_INLINE void
isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
- alloc_ctx_t *alloc_ctx, bool slow_path) {
+ emap_alloc_ctx_t *alloc_ctx, bool slow_path) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
arena_sdalloc(tsdn, ptr, size, tcache, alloc_ctx, slow_path);
@@ -219,4 +221,120 @@ ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
newsize);
}
+JEMALLOC_ALWAYS_INLINE void
+fastpath_success_finish(tsd_t *tsd, uint64_t allocated_after,
+ cache_bin_t *bin, void *ret) {
+ thread_allocated_set(tsd, allocated_after);
+ if (config_stats) {
+ bin->tstats.nrequests++;
+ }
+
+ LOG("core.malloc.exit", "result: %p", ret);
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+malloc_initialized(void) {
+ return (malloc_init_state == malloc_init_initialized);
+}
+
+/*
+ * malloc() fastpath. Included here so that we can inline it into operator new;
+ * function call overhead there is non-negligible as a fraction of total CPU in
+ * allocation-heavy C++ programs. We take the fallback alloc to allow malloc
+ * (which can return NULL) to differ in its behavior from operator new (which
+ * can't). It matches the signature of malloc / operator new so that we can
+ * tail-call the fallback allocator, allowing us to avoid setting up the call
+ * frame in the common case.
+ *
+ * Fastpath assumes size <= SC_LOOKUP_MAXCLASS, and that we hit
+ * tcache. If either of these is false, we tail-call to the slowpath,
+ * malloc_default(). Tail-calling is used to avoid any caller-saved
+ * registers.
+ *
+ * fastpath supports ticker and profiling, both of which will also
+ * tail-call to the slowpath if they fire.
+ */
+JEMALLOC_ALWAYS_INLINE void *
+imalloc_fastpath(size_t size, void *(fallback_alloc)(size_t)) {
+ LOG("core.malloc.entry", "size: %zu", size);
+ if (tsd_get_allocates() && unlikely(!malloc_initialized())) {
+ return fallback_alloc(size);
+ }
+
+ tsd_t *tsd = tsd_get(false);
+ if (unlikely((size > SC_LOOKUP_MAXCLASS) || tsd == NULL)) {
+ return fallback_alloc(size);
+ }
+ /*
+ * The code below till the branch checking the next_event threshold may
+ * execute before malloc_init(), in which case the threshold is 0 to
+ * trigger slow path and initialization.
+ *
+ * Note that when uninitialized, only the fast-path variants of the sz /
+ * tsd facilities may be called.
+ */
+ szind_t ind;
+ /*
+ * The thread_allocated counter in tsd serves as a general purpose
+ * accumulator for bytes of allocation to trigger different types of
+ * events. usize is always needed to advance thread_allocated, though
+ * it's not always needed in the core allocation logic.
+ */
+ size_t usize;
+ sz_size2index_usize_fastpath(size, &ind, &usize);
+ /* Fast path relies on size being a bin. */
+ assert(ind < SC_NBINS);
+ assert((SC_LOOKUP_MAXCLASS < SC_SMALL_MAXCLASS) &&
+ (size <= SC_SMALL_MAXCLASS));
+
+ uint64_t allocated, threshold;
+ te_malloc_fastpath_ctx(tsd, &allocated, &threshold);
+ uint64_t allocated_after = allocated + usize;
+ /*
+ * The ind and usize might be uninitialized (or partially) before
+ * malloc_init(). The assertions check for: 1) full correctness (usize
+ * & ind) when initialized; and 2) guaranteed slow-path (threshold == 0)
+ * when !initialized.
+ */
+ if (!malloc_initialized()) {
+ assert(threshold == 0);
+ } else {
+ assert(ind == sz_size2index(size));
+ assert(usize > 0 && usize == sz_index2size(ind));
+ }
+ /*
+ * Check for events and tsd non-nominal (fast_threshold will be set to
+ * 0) in a single branch.
+ */
+ if (unlikely(allocated_after >= threshold)) {
+ return fallback_alloc(size);
+ }
+ assert(tsd_fast(tsd));
+
+ tcache_t *tcache = tsd_tcachep_get(tsd);
+ assert(tcache == tcache_get(tsd));
+ cache_bin_t *bin = &tcache->bins[ind];
+ bool tcache_success;
+ void *ret;
+
+ /*
+ * We split up the code this way so that redundant low-water
+ * computation doesn't happen on the (more common) case in which we
+ * don't touch the low water mark. The compiler won't do this
+ * duplication on its own.
+ */
+ ret = cache_bin_alloc_easy(bin, &tcache_success);
+ if (tcache_success) {
+ fastpath_success_finish(tsd, allocated_after, bin, ret);
+ return ret;
+ }
+ ret = cache_bin_alloc(bin, &tcache_success);
+ if (tcache_success) {
+ fastpath_success_finish(tsd, allocated_after, bin, ret);
+ return ret;
+ }
+
+ return fallback_alloc(size);
+}
+
#endif /* JEMALLOC_INTERNAL_INLINES_C_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h
index d8ea06f6d069..e97b5f90730c 100644
--- a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h
+++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h
@@ -4,7 +4,11 @@
#ifdef JEMALLOC_DEBUG
# define JEMALLOC_ALWAYS_INLINE static inline
#else
-# define JEMALLOC_ALWAYS_INLINE JEMALLOC_ATTR(always_inline) static inline
+# ifdef _MSC_VER
+# define JEMALLOC_ALWAYS_INLINE static __forceinline
+# else
+# define JEMALLOC_ALWAYS_INLINE JEMALLOC_ATTR(always_inline) static inline
+# endif
#endif
#ifdef _MSC_VER
# define inline _inline
@@ -40,13 +44,6 @@
#define JEMALLOC_VA_ARGS_HEAD(head, ...) head
#define JEMALLOC_VA_ARGS_TAIL(head, ...) __VA_ARGS__
-#if (defined(__GNUC__) || defined(__GNUG__)) && !defined(__clang__) \
- && defined(JEMALLOC_HAVE_ATTR) && (__GNUC__ >= 7)
-#define JEMALLOC_FALLTHROUGH JEMALLOC_ATTR(fallthrough);
-#else
-#define JEMALLOC_FALLTHROUGH /* falls through */
-#endif
-
/* Diagnostic suppression macros */
#if defined(_MSC_VER) && !defined(__clang__)
# define JEMALLOC_DIAGNOSTIC_PUSH __pragma(warning(push))
diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h
index e296c5a7e847..62c2b59c71c2 100644
--- a/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h
+++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_internal_types.h
@@ -3,15 +3,31 @@
#include "jemalloc/internal/quantum.h"
-/* Page size index type. */
-typedef unsigned pszind_t;
-
-/* Size class index type. */
-typedef unsigned szind_t;
-
/* Processor / core id type. */
typedef int malloc_cpuid_t;
+/* When realloc(non-null-ptr, 0) is called, what happens? */
+enum zero_realloc_action_e {
+ /* Realloc(ptr, 0) is free(ptr); return malloc(0); */
+ zero_realloc_action_alloc = 0,
+ /* Realloc(ptr, 0) is free(ptr); */
+ zero_realloc_action_free = 1,
+ /* Realloc(ptr, 0) aborts. */
+ zero_realloc_action_abort = 2
+};
+typedef enum zero_realloc_action_e zero_realloc_action_t;
+
+/* Signature of write callback. */
+typedef void (write_cb_t)(void *, const char *);
+
+enum malloc_init_e {
+ malloc_init_uninitialized = 3,
+ malloc_init_a0_initialized = 2,
+ malloc_init_recursible = 1,
+ malloc_init_initialized = 0 /* Common case --> jnz. */
+};
+typedef enum malloc_init_e malloc_init_t;
+
/*
* Flags bits:
*
diff --git a/contrib/jemalloc/include/jemalloc/internal/jemalloc_preamble.h.in b/contrib/jemalloc/include/jemalloc/internal/jemalloc_preamble.h.in
new file mode 100644
index 000000000000..c4fb2d0793bf
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/jemalloc_preamble.h.in
@@ -0,0 +1,478 @@
+<<<<<<< HEAD
+#ifndef JEMALLOC_PREAMBLE_H
+#define JEMALLOC_PREAMBLE_H
+
+#include "jemalloc_internal_defs.h"
+#include "jemalloc/internal/jemalloc_internal_decls.h"
+
+#if defined(JEMALLOC_UTRACE) || defined(JEMALLOC_UTRACE_LABEL)
+#include <sys/ktrace.h>
+# if defined(JEMALLOC_UTRACE)
+# define UTRACE_CALL(p, l) utrace(p, l)
+# else
+# define UTRACE_CALL(p, l) utrace("jemalloc_process", p, l)
+# define JEMALLOC_UTRACE
+# endif
+#endif
+
+#ifndef JEMALLOC_PRIVATE_NAMESPACE
+#include "un-namespace.h"
+#include "libc_private.h"
+#endif
+
+#define JEMALLOC_NO_DEMANGLE
+#ifdef JEMALLOC_JET
+# undef JEMALLOC_IS_MALLOC
+# define JEMALLOC_N(n) jet_##n
+# include "jemalloc/internal/public_namespace.h"
+# define JEMALLOC_NO_RENAME
+# include "../jemalloc@install_suffix@.h"
+# undef JEMALLOC_NO_RENAME
+#else
+# define JEMALLOC_N(n) @private_namespace@##n
+# include "../jemalloc@install_suffix@.h"
+#endif
+
+#if defined(JEMALLOC_OSATOMIC)
+#include <libkern/OSAtomic.h>
+#endif
+
+#ifdef JEMALLOC_ZONE
+#include <mach/mach_error.h>
+#include <mach/mach_init.h>
+#include <mach/vm_map.h>
+#endif
+
+#include "jemalloc/internal/jemalloc_internal_macros.h"
+
+/*
+ * Note that the ordering matters here; the hook itself is name-mangled. We
+ * want the inclusion of hooks to happen early, so that we hook as much as
+ * possible.
+ */
+#ifndef JEMALLOC_NO_PRIVATE_NAMESPACE
+# ifndef JEMALLOC_JET
+# include "jemalloc/internal/private_namespace.h"
+# else
+# include "jemalloc/internal/private_namespace_jet.h"
+# endif
+#endif
+#include "jemalloc/internal/test_hooks.h"
+
+#ifdef JEMALLOC_DEFINE_MADVISE_FREE
+# define JEMALLOC_MADV_FREE 8
+#endif
+
+static const bool config_debug =
+#ifdef JEMALLOC_DEBUG
+ true
+#else
+ false
+#endif
+ ;
+static const bool have_dss =
+#ifdef JEMALLOC_DSS
+ true
+#else
+ false
+#endif
+ ;
+static const bool have_madvise_huge =
+#ifdef JEMALLOC_HAVE_MADVISE_HUGE
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_fill =
+#ifdef JEMALLOC_FILL
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_lazy_lock = true;
+static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
+static const bool config_prof =
+#ifdef JEMALLOC_PROF
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_prof_libgcc =
+#ifdef JEMALLOC_PROF_LIBGCC
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_prof_libunwind =
+#ifdef JEMALLOC_PROF_LIBUNWIND
+ true
+#else
+ false
+#endif
+ ;
+static const bool maps_coalesce =
+#ifdef JEMALLOC_MAPS_COALESCE
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_stats =
+#ifdef JEMALLOC_STATS
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_tls =
+#ifdef JEMALLOC_TLS
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_utrace =
+#ifdef JEMALLOC_UTRACE
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_xmalloc =
+#ifdef JEMALLOC_XMALLOC
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_cache_oblivious =
+#ifdef JEMALLOC_CACHE_OBLIVIOUS
+ true
+#else
+ false
+#endif
+ ;
+/*
+ * Undocumented, for jemalloc development use only at the moment. See the note
+ * in jemalloc/internal/log.h.
+ */
+static const bool config_log =
+#ifdef JEMALLOC_LOG
+ true
+#else
+ false
+#endif
+ ;
+/*
+ * Are extra safety checks enabled; things like checking the size of sized
+ * deallocations, double-frees, etc.
+ */
+static const bool config_opt_safety_checks =
+#ifdef JEMALLOC_OPT_SAFETY_CHECKS
+ true
+#elif defined(JEMALLOC_DEBUG)
+ /*
+ * This lets us only guard safety checks by one flag instead of two; fast
+ * checks can guard solely by config_opt_safety_checks and run in debug mode
+ * too.
+ */
+ true
+#else
+ false
+#endif
+ ;
+
+/*
+ * Extra debugging of sized deallocations too onerous to be included in the
+ * general safety checks.
+ */
+static const bool config_opt_size_checks =
+#if defined(JEMALLOC_OPT_SIZE_CHECKS) || defined(JEMALLOC_DEBUG)
+ true
+#else
+ false
+#endif
+ ;
+
+static const bool config_uaf_detection =
+#if defined(JEMALLOC_UAF_DETECTION) || defined(JEMALLOC_DEBUG)
+ true
+#else
+ false
+#endif
+ ;
+
+/* Whether or not the C++ extensions are enabled. */
+static const bool config_enable_cxx =
+#ifdef JEMALLOC_ENABLE_CXX
+ true
+#else
+ false
+#endif
+;
+
+#if defined(_WIN32) || defined(JEMALLOC_HAVE_SCHED_GETCPU)
+/* Currently percpu_arena depends on sched_getcpu. */
+#define JEMALLOC_PERCPU_ARENA
+#endif
+static const bool have_percpu_arena =
+#ifdef JEMALLOC_PERCPU_ARENA
+ true
+#else
+ false
+#endif
+ ;
+/*
+ * Undocumented, and not recommended; the application should take full
+ * responsibility for tracking provenance.
+ */
+static const bool force_ivsalloc =
+#ifdef JEMALLOC_FORCE_IVSALLOC
+ true
+#else
+ false
+#endif
+ ;
+static const bool have_background_thread =
+#ifdef JEMALLOC_BACKGROUND_THREAD
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_high_res_timer =
+#ifdef JEMALLOC_HAVE_CLOCK_REALTIME
+ true
+#else
+ false
+#endif
+ ;
+
+static const bool have_memcntl =
+#ifdef JEMALLOC_HAVE_MEMCNTL
+ true
+#else
+ false
+#endif
+ ;
+
+#endif /* JEMALLOC_PREAMBLE_H */
+||||||| dec341af7695
+=======
+#ifndef JEMALLOC_PREAMBLE_H
+#define JEMALLOC_PREAMBLE_H
+
+#include "jemalloc_internal_defs.h"
+#include "jemalloc/internal/jemalloc_internal_decls.h"
+
+#ifdef JEMALLOC_UTRACE
+#include <sys/ktrace.h>
+#endif
+
+#ifndef JEMALLOC_PRIVATE_NAMESPACE
+#include "un-namespace.h"
+#include "libc_private.h"
+#endif
+
+#define JEMALLOC_NO_DEMANGLE
+#ifdef JEMALLOC_JET
+# undef JEMALLOC_IS_MALLOC
+# define JEMALLOC_N(n) jet_##n
+# include "jemalloc/internal/public_namespace.h"
+# define JEMALLOC_NO_RENAME
+# include "../jemalloc@install_suffix@.h"
+# undef JEMALLOC_NO_RENAME
+#else
+# define JEMALLOC_N(n) @private_namespace@##n
+# include "../jemalloc@install_suffix@.h"
+#endif
+
+#if defined(JEMALLOC_OSATOMIC)
+#include <libkern/OSAtomic.h>
+#endif
+
+#ifdef JEMALLOC_ZONE
+#include <mach/mach_error.h>
+#include <mach/mach_init.h>
+#include <mach/vm_map.h>
+#endif
+
+#include "jemalloc/internal/jemalloc_internal_macros.h"
+
+/*
+ * Note that the ordering matters here; the hook itself is name-mangled. We
+ * want the inclusion of hooks to happen early, so that we hook as much as
+ * possible.
+ */
+#ifndef JEMALLOC_NO_PRIVATE_NAMESPACE
+# ifndef JEMALLOC_JET
+# include "jemalloc/internal/private_namespace.h"
+# else
+# include "jemalloc/internal/private_namespace_jet.h"
+# endif
+#endif
+#include "jemalloc/internal/test_hooks.h"
+
+#ifdef JEMALLOC_DEFINE_MADVISE_FREE
+# define JEMALLOC_MADV_FREE 8
+#endif
+
+static const bool config_debug =
+#ifdef JEMALLOC_DEBUG
+ true
+#else
+ false
+#endif
+ ;
+static const bool have_dss =
+#ifdef JEMALLOC_DSS
+ true
+#else
+ false
+#endif
+ ;
+static const bool have_madvise_huge =
+#ifdef JEMALLOC_HAVE_MADVISE_HUGE
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_fill =
+#ifdef JEMALLOC_FILL
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_lazy_lock = true;
+static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
+static const bool config_prof =
+#ifdef JEMALLOC_PROF
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_prof_libgcc =
+#ifdef JEMALLOC_PROF_LIBGCC
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_prof_libunwind =
+#ifdef JEMALLOC_PROF_LIBUNWIND
+ true
+#else
+ false
+#endif
+ ;
+static const bool maps_coalesce =
+#ifdef JEMALLOC_MAPS_COALESCE
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_stats =
+#ifdef JEMALLOC_STATS
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_tls =
+#ifdef JEMALLOC_TLS
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_utrace =
+#ifdef JEMALLOC_UTRACE
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_xmalloc =
+#ifdef JEMALLOC_XMALLOC
+ true
+#else
+ false
+#endif
+ ;
+static const bool config_cache_oblivious =
+#ifdef JEMALLOC_CACHE_OBLIVIOUS
+ true
+#else
+ false
+#endif
+ ;
+/*
+ * Undocumented, for jemalloc development use only at the moment. See the note
+ * in jemalloc/internal/log.h.
+ */
+static const bool config_log =
+#ifdef JEMALLOC_LOG
+ true
+#else
+ false
+#endif
+ ;
+/*
+ * Are extra safety checks enabled; things like checking the size of sized
+ * deallocations, double-frees, etc.
+ */
+static const bool config_opt_safety_checks =
+#ifdef JEMALLOC_OPT_SAFETY_CHECKS
+ true
+#elif defined(JEMALLOC_DEBUG)
+ /*
+ * This lets us only guard safety checks by one flag instead of two; fast
+ * checks can guard solely by config_opt_safety_checks and run in debug mode
+ * too.
+ */
+ true
+#else
+ false
+#endif
+ ;
+
+#if defined(_WIN32) || defined(JEMALLOC_HAVE_SCHED_GETCPU)
+/* Currently percpu_arena depends on sched_getcpu. */
+#define JEMALLOC_PERCPU_ARENA
+#endif
+static const bool have_percpu_arena =
+#ifdef JEMALLOC_PERCPU_ARENA
+ true
+#else
+ false
+#endif
+ ;
+/*
+ * Undocumented, and not recommended; the application should take full
+ * responsibility for tracking provenance.
+ */
+static const bool force_ivsalloc =
+#ifdef JEMALLOC_FORCE_IVSALLOC
+ true
+#else
+ false
+#endif
+ ;
+static const bool have_background_thread =
+#ifdef JEMALLOC_BACKGROUND_THREAD
+ true
+#else
+ false
+#endif
+ ;
+
+#endif /* JEMALLOC_PREAMBLE_H */
+>>>>>>> main
diff --git a/contrib/jemalloc/include/jemalloc/internal/large_externs.h b/contrib/jemalloc/include/jemalloc/internal/large_externs.h
index a05019e8a542..8e09122dfb7b 100644
--- a/contrib/jemalloc/include/jemalloc/internal/large_externs.h
+++ b/contrib/jemalloc/include/jemalloc/internal/large_externs.h
@@ -6,27 +6,19 @@
void *large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
void *large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool zero);
-bool large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
+bool large_ralloc_no_move(tsdn_t *tsdn, edata_t *edata, size_t usize_min,
size_t usize_max, bool zero);
void *large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
size_t alignment, bool zero, tcache_t *tcache,
hook_ralloc_args_t *hook_args);
-typedef void (large_dalloc_junk_t)(void *, size_t);
-extern large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk;
-
-typedef void (large_dalloc_maybe_junk_t)(void *, size_t);
-extern large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk;
-
-void large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent);
-void large_dalloc_finish(tsdn_t *tsdn, extent_t *extent);
-void large_dalloc(tsdn_t *tsdn, extent_t *extent);
-size_t large_salloc(tsdn_t *tsdn, const extent_t *extent);
-prof_tctx_t *large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent);
-void large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx);
-void large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent);
-
-nstime_t large_prof_alloc_time_get(const extent_t *extent);
-void large_prof_alloc_time_set(extent_t *extent, nstime_t time);
+void large_dalloc_prep_locked(tsdn_t *tsdn, edata_t *edata);
+void large_dalloc_finish(tsdn_t *tsdn, edata_t *edata);
+void large_dalloc(tsdn_t *tsdn, edata_t *edata);
+size_t large_salloc(tsdn_t *tsdn, const edata_t *edata);
+void large_prof_info_get(tsd_t *tsd, edata_t *edata, prof_info_t *prof_info,
+ bool reset_recent);
+void large_prof_tctx_reset(edata_t *edata);
+void large_prof_info_set(edata_t *edata, prof_tctx_t *tctx, size_t size);
#endif /* JEMALLOC_INTERNAL_LARGE_EXTERNS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/lockedint.h b/contrib/jemalloc/include/jemalloc/internal/lockedint.h
new file mode 100644
index 000000000000..d020ebec1c4b
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/lockedint.h
@@ -0,0 +1,204 @@
+#ifndef JEMALLOC_INTERNAL_LOCKEDINT_H
+#define JEMALLOC_INTERNAL_LOCKEDINT_H
+
+/*
+ * In those architectures that support 64-bit atomics, we use atomic updates for
+ * our 64-bit values. Otherwise, we use a plain uint64_t and synchronize
+ * externally.
+ */
+
+typedef struct locked_u64_s locked_u64_t;
+#ifdef JEMALLOC_ATOMIC_U64
+struct locked_u64_s {
+ atomic_u64_t val;
+};
+#else
+/* Must hold the associated mutex. */
+struct locked_u64_s {
+ uint64_t val;
+};
+#endif
+
+typedef struct locked_zu_s locked_zu_t;
+struct locked_zu_s {
+ atomic_zu_t val;
+};
+
+#ifndef JEMALLOC_ATOMIC_U64
+# define LOCKEDINT_MTX_DECLARE(name) malloc_mutex_t name;
+# define LOCKEDINT_MTX_INIT(mu, name, rank, rank_mode) \
+ malloc_mutex_init(&(mu), name, rank, rank_mode)
+# define LOCKEDINT_MTX(mtx) (&(mtx))
+# define LOCKEDINT_MTX_LOCK(tsdn, mu) malloc_mutex_lock(tsdn, &(mu))
+# define LOCKEDINT_MTX_UNLOCK(tsdn, mu) malloc_mutex_unlock(tsdn, &(mu))
+# define LOCKEDINT_MTX_PREFORK(tsdn, mu) malloc_mutex_prefork(tsdn, &(mu))
+# define LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, mu) \
+ malloc_mutex_postfork_parent(tsdn, &(mu))
+# define LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, mu) \
+ malloc_mutex_postfork_child(tsdn, &(mu))
+#else
+# define LOCKEDINT_MTX_DECLARE(name)
+# define LOCKEDINT_MTX(mtx) NULL
+# define LOCKEDINT_MTX_INIT(mu, name, rank, rank_mode) false
+# define LOCKEDINT_MTX_LOCK(tsdn, mu)
+# define LOCKEDINT_MTX_UNLOCK(tsdn, mu)
+# define LOCKEDINT_MTX_PREFORK(tsdn, mu)
+# define LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, mu)
+# define LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, mu)
+#endif
+
+#ifdef JEMALLOC_ATOMIC_U64
+# define LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx) assert((mtx) == NULL)
+#else
+# define LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx) \
+ malloc_mutex_assert_owner(tsdn, (mtx))
+#endif
+
+static inline uint64_t
+locked_read_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p) {
+ LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
+#ifdef JEMALLOC_ATOMIC_U64
+ return atomic_load_u64(&p->val, ATOMIC_RELAXED);
+#else
+ return p->val;
+#endif
+}
+
+static inline void
+locked_inc_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p,
+ uint64_t x) {
+ LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
+#ifdef JEMALLOC_ATOMIC_U64
+ atomic_fetch_add_u64(&p->val, x, ATOMIC_RELAXED);
+#else
+ p->val += x;
+#endif
+}
+
+static inline void
+locked_dec_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p,
+ uint64_t x) {
+ LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
+#ifdef JEMALLOC_ATOMIC_U64
+ uint64_t r = atomic_fetch_sub_u64(&p->val, x, ATOMIC_RELAXED);
+ assert(r - x <= r);
+#else
+ p->val -= x;
+ assert(p->val + x >= p->val);
+#endif
+}
+
+/* Increment and take modulus. Returns whether the modulo made any change. */
+static inline bool
+locked_inc_mod_u64(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_u64_t *p,
+ const uint64_t x, const uint64_t modulus) {
+ LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
+ uint64_t before, after;
+ bool overflow;
+#ifdef JEMALLOC_ATOMIC_U64
+ before = atomic_load_u64(&p->val, ATOMIC_RELAXED);
+ do {
+ after = before + x;
+ assert(after >= before);
+ overflow = (after >= modulus);
+ if (overflow) {
+ after %= modulus;
+ }
+ } while (!atomic_compare_exchange_weak_u64(&p->val, &before, after,
+ ATOMIC_RELAXED, ATOMIC_RELAXED));
+#else
+ before = p->val;
+ after = before + x;
+ overflow = (after >= modulus);
+ if (overflow) {
+ after %= modulus;
+ }
+ p->val = after;
+#endif
+ return overflow;
+}
+
+/*
+ * Non-atomically sets *dst += src. *dst needs external synchronization.
+ * This lets us avoid the cost of a fetch_add when its unnecessary (note that
+ * the types here are atomic).
+ */
+static inline void
+locked_inc_u64_unsynchronized(locked_u64_t *dst, uint64_t src) {
+#ifdef JEMALLOC_ATOMIC_U64
+ uint64_t cur_dst = atomic_load_u64(&dst->val, ATOMIC_RELAXED);
+ atomic_store_u64(&dst->val, src + cur_dst, ATOMIC_RELAXED);
+#else
+ dst->val += src;
+#endif
+}
+
+static inline uint64_t
+locked_read_u64_unsynchronized(locked_u64_t *p) {
+#ifdef JEMALLOC_ATOMIC_U64
+ return atomic_load_u64(&p->val, ATOMIC_RELAXED);
+#else
+ return p->val;
+#endif
+}
+
+static inline void
+locked_init_u64_unsynchronized(locked_u64_t *p, uint64_t x) {
+#ifdef JEMALLOC_ATOMIC_U64
+ atomic_store_u64(&p->val, x, ATOMIC_RELAXED);
+#else
+ p->val = x;
+#endif
+}
+
+static inline size_t
+locked_read_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p) {
+ LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
+#ifdef JEMALLOC_ATOMIC_U64
+ return atomic_load_zu(&p->val, ATOMIC_RELAXED);
+#else
+ return atomic_load_zu(&p->val, ATOMIC_RELAXED);
+#endif
+}
+
+static inline void
+locked_inc_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p,
+ size_t x) {
+ LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
+#ifdef JEMALLOC_ATOMIC_U64
+ atomic_fetch_add_zu(&p->val, x, ATOMIC_RELAXED);
+#else
+ size_t cur = atomic_load_zu(&p->val, ATOMIC_RELAXED);
+ atomic_store_zu(&p->val, cur + x, ATOMIC_RELAXED);
+#endif
+}
+
+static inline void
+locked_dec_zu(tsdn_t *tsdn, malloc_mutex_t *mtx, locked_zu_t *p,
+ size_t x) {
+ LOCKEDINT_MTX_ASSERT_INTERNAL(tsdn, mtx);
+#ifdef JEMALLOC_ATOMIC_U64
+ size_t r = atomic_fetch_sub_zu(&p->val, x, ATOMIC_RELAXED);
+ assert(r - x <= r);
+#else
+ size_t cur = atomic_load_zu(&p->val, ATOMIC_RELAXED);
+ atomic_store_zu(&p->val, cur - x, ATOMIC_RELAXED);
+#endif
+}
+
+/* Like the _u64 variant, needs an externally synchronized *dst. */
+static inline void
+locked_inc_zu_unsynchronized(locked_zu_t *dst, size_t src) {
+ size_t cur_dst = atomic_load_zu(&dst->val, ATOMIC_RELAXED);
+ atomic_store_zu(&dst->val, src + cur_dst, ATOMIC_RELAXED);
+}
+
+/*
+ * Unlike the _u64 variant, this is safe to call unconditionally.
+ */
+static inline size_t
+locked_read_atomic_zu(locked_zu_t *p) {
+ return atomic_load_zu(&p->val, ATOMIC_RELAXED);
+}
+
+#endif /* JEMALLOC_INTERNAL_LOCKEDINT_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/malloc_io.h b/contrib/jemalloc/include/jemalloc/internal/malloc_io.h
index 1d1a414e0f0b..a375bdae084a 100644
--- a/contrib/jemalloc/include/jemalloc/internal/malloc_io.h
+++ b/contrib/jemalloc/include/jemalloc/internal/malloc_io.h
@@ -1,6 +1,8 @@
#ifndef JEMALLOC_INTERNAL_MALLOC_IO_H
#define JEMALLOC_INTERNAL_MALLOC_IO_H
+#include "jemalloc/internal/jemalloc_internal_types.h"
+
#ifdef _WIN32
# ifdef _WIN64
# define FMT64_PREFIX "ll"
@@ -40,6 +42,7 @@
*/
#define MALLOC_PRINTF_BUFSIZE 4096
+write_cb_t wrtmessage;
int buferror(int err, char *buf, size_t buflen);
uintmax_t malloc_strtoumax(const char *restrict nptr, char **restrict endptr,
int base);
@@ -57,10 +60,10 @@ size_t malloc_snprintf(char *str, size_t size, const char *format, ...)
* The caller can set write_cb to null to choose to print with the
* je_malloc_message hook.
*/
-void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *format, va_list ap);
-void malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4);
+void malloc_vcprintf(write_cb_t *write_cb, void *cbopaque, const char *format,
+ va_list ap);
+void malloc_cprintf(write_cb_t *write_cb, void *cbopaque, const char *format,
+ ...) JEMALLOC_FORMAT_PRINTF(3, 4);
void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2);
static inline ssize_t
diff --git a/contrib/jemalloc/include/jemalloc/internal/mpsc_queue.h b/contrib/jemalloc/include/jemalloc/internal/mpsc_queue.h
new file mode 100644
index 000000000000..316ea9b16edd
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/mpsc_queue.h
@@ -0,0 +1,134 @@
+#ifndef JEMALLOC_INTERNAL_MPSC_QUEUE_H
+#define JEMALLOC_INTERNAL_MPSC_QUEUE_H
+
+#include "jemalloc/internal/atomic.h"
+
+/*
+ * A concurrent implementation of a multi-producer, single-consumer queue. It
+ * supports three concurrent operations:
+ * - Push
+ * - Push batch
+ * - Pop batch
+ *
+ * These operations are all lock-free.
+ *
+ * The implementation is the simple two-stack queue built on a Treiber stack.
+ * It's not terribly efficient, but this isn't expected to go into anywhere with
+ * hot code. In fact, we don't really even need queue semantics in any
+ * anticipated use cases; we could get away with just the stack. But this way
+ * lets us frame the API in terms of the existing list types, which is a nice
+ * convenience. We can save on cache misses by introducing our own (parallel)
+ * single-linked list type here, and dropping FIFO semantics, if we need this to
+ * get faster. Since we're currently providing queue semantics though, we use
+ * the prev field in the link rather than the next field for Treiber-stack
+ * linkage, so that we can preserve order for bash-pushed lists (recall that the
+ * two-stack tricks reverses orders in the lock-free first stack).
+ */
+
+#define mpsc_queue(a_type) \
+struct { \
+ atomic_p_t tail; \
+}
+
+#define mpsc_queue_proto(a_attr, a_prefix, a_queue_type, a_type, \
+ a_list_type) \
+/* Initialize a queue. */ \
+a_attr void \
+a_prefix##new(a_queue_type *queue); \
+/* Insert all items in src into the queue, clearing src. */ \
+a_attr void \
+a_prefix##push_batch(a_queue_type *queue, a_list_type *src); \
+/* Insert node into the queue. */ \
+a_attr void \
+a_prefix##push(a_queue_type *queue, a_type *node); \
+/* \
+ * Pop all items in the queue into the list at dst. dst should already \
+ * be initialized (and may contain existing items, which then remain \
+ * in dst). \
+ */ \
+a_attr void \
+a_prefix##pop_batch(a_queue_type *queue, a_list_type *dst);
+
+#define mpsc_queue_gen(a_attr, a_prefix, a_queue_type, a_type, \
+ a_list_type, a_link) \
+a_attr void \
+a_prefix##new(a_queue_type *queue) { \
+ atomic_store_p(&queue->tail, NULL, ATOMIC_RELAXED); \
+} \
+a_attr void \
+a_prefix##push_batch(a_queue_type *queue, a_list_type *src) { \
+ /* \
+ * Reuse the ql list next field as the Treiber stack next \
+ * field. \
+ */ \
+ a_type *first = ql_first(src); \
+ a_type *last = ql_last(src, a_link); \
+ void* cur_tail = atomic_load_p(&queue->tail, ATOMIC_RELAXED); \
+ do { \
+ /* \
+ * Note that this breaks the queue ring structure; \
+ * it's not a ring any more! \
+ */ \
+ first->a_link.qre_prev = cur_tail; \
+ /* \
+ * Note: the upcoming CAS doesn't need an atomic; every \
+ * push only needs to synchronize with the next pop, \
+ * which we get from the release sequence rules. \
+ */ \
+ } while (!atomic_compare_exchange_weak_p(&queue->tail, \
+ &cur_tail, last, ATOMIC_RELEASE, ATOMIC_RELAXED)); \
+ ql_new(src); \
+} \
+a_attr void \
+a_prefix##push(a_queue_type *queue, a_type *node) { \
+ ql_elm_new(node, a_link); \
+ a_list_type list; \
+ ql_new(&list); \
+ ql_head_insert(&list, node, a_link); \
+ a_prefix##push_batch(queue, &list); \
+} \
+a_attr void \
+a_prefix##pop_batch(a_queue_type *queue, a_list_type *dst) { \
+ a_type *tail = atomic_load_p(&queue->tail, ATOMIC_RELAXED); \
+ if (tail == NULL) { \
+ /* \
+ * In the common special case where there are no \
+ * pending elements, bail early without a costly RMW. \
+ */ \
+ return; \
+ } \
+ tail = atomic_exchange_p(&queue->tail, NULL, ATOMIC_ACQUIRE); \
+ /* \
+ * It's a single-consumer queue, so if cur started non-NULL, \
+ * it'd better stay non-NULL. \
+ */ \
+ assert(tail != NULL); \
+ /* \
+ * We iterate through the stack and both fix up the link \
+ * structure (stack insertion broke the list requirement that \
+ * the list be circularly linked). It's just as efficient at \
+ * this point to make the queue a "real" queue, so do that as \
+ * well. \
+ * If this ever gets to be a hot spot, we can omit this fixup \
+ * and make the queue a bag (i.e. not necessarily ordered), but \
+ * that would mean jettisoning the existing list API as the \
+ * batch pushing/popping interface. \
+ */ \
+ a_list_type reversed; \
+ ql_new(&reversed); \
+ while (tail != NULL) { \
+ /* \
+ * Pop an item off the stack, prepend it onto the list \
+ * (reversing the order). Recall that we use the \
+ * list prev field as the Treiber stack next field to \
+ * preserve order of batch-pushed items when reversed. \
+ */ \
+ a_type *next = tail->a_link.qre_prev; \
+ ql_elm_new(tail, a_link); \
+ ql_head_insert(&reversed, tail, a_link); \
+ tail = next; \
+ } \
+ ql_concat(dst, &reversed, a_link); \
+}
+
+#endif /* JEMALLOC_INTERNAL_MPSC_QUEUE_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/mutex.h b/contrib/jemalloc/include/jemalloc/internal/mutex.h
index 94af16183649..8468e7bcb864 100644
--- a/contrib/jemalloc/include/jemalloc/internal/mutex.h
+++ b/contrib/jemalloc/include/jemalloc/internal/mutex.h
@@ -6,6 +6,8 @@
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/witness.h"
+extern int64_t opt_mutex_max_spin;
+
typedef enum {
/* Can only acquire one mutex of a given witness rank at a time. */
malloc_mutex_rank_exclusive,
@@ -43,7 +45,7 @@ struct malloc_mutex_s {
#else
pthread_mutex_t lock;
#endif
- /*
+ /*
* Hint flag to avoid exclusive cache line contention
* during spin waiting
*/
@@ -67,12 +69,6 @@ struct malloc_mutex_s {
#endif
};
-/*
- * Based on benchmark results, a fixed spin with this amount of retries works
- * well for our critical sections.
- */
-#define MALLOC_MUTEX_MAX_SPIN 250
-
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
# define MALLOC_MUTEX_LOCK(m) AcquireSRWLockExclusive(&(m)->lock)
@@ -243,22 +239,25 @@ malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) {
witness_assert_not_owner(tsdn_witness_tsdp_get(tsdn), &mutex->witness);
}
-/* Copy the prof data from mutex for processing. */
static inline void
-malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
- malloc_mutex_t *mutex) {
- mutex_prof_data_t *source = &mutex->prof_data;
- /* Can only read holding the mutex. */
- malloc_mutex_assert_owner(tsdn, mutex);
-
+malloc_mutex_prof_copy(mutex_prof_data_t *dst, mutex_prof_data_t *source) {
/*
* Not *really* allowed (we shouldn't be doing non-atomic loads of
* atomic data), but the mutex protection makes this safe, and writing
* a member-for-member copy is tedious for this situation.
*/
- *data = *source;
+ *dst = *source;
/* n_wait_thds is not reported (modified w/o locking). */
- atomic_store_u32(&data->n_waiting_thds, 0, ATOMIC_RELAXED);
+ atomic_store_u32(&dst->n_waiting_thds, 0, ATOMIC_RELAXED);
+}
+
+/* Copy the prof data from mutex for processing. */
+static inline void
+malloc_mutex_prof_read(tsdn_t *tsdn, mutex_prof_data_t *data,
+ malloc_mutex_t *mutex) {
+ /* Can only read holding the mutex. */
+ malloc_mutex_assert_owner(tsdn, mutex);
+ malloc_mutex_prof_copy(data, &mutex->prof_data);
}
static inline void
@@ -283,4 +282,36 @@ malloc_mutex_prof_accum(tsdn_t *tsdn, mutex_prof_data_t *data,
data->n_lock_ops += source->n_lock_ops;
}
+/* Compare the prof data and update to the maximum. */
+static inline void
+malloc_mutex_prof_max_update(tsdn_t *tsdn, mutex_prof_data_t *data,
+ malloc_mutex_t *mutex) {
+ mutex_prof_data_t *source = &mutex->prof_data;
+ /* Can only read holding the mutex. */
+ malloc_mutex_assert_owner(tsdn, mutex);
+
+ if (nstime_compare(&source->tot_wait_time, &data->tot_wait_time) > 0) {
+ nstime_copy(&data->tot_wait_time, &source->tot_wait_time);
+ }
+ if (nstime_compare(&source->max_wait_time, &data->max_wait_time) > 0) {
+ nstime_copy(&data->max_wait_time, &source->max_wait_time);
+ }
+ if (source->n_wait_times > data->n_wait_times) {
+ data->n_wait_times = source->n_wait_times;
+ }
+ if (source->n_spin_acquired > data->n_spin_acquired) {
+ data->n_spin_acquired = source->n_spin_acquired;
+ }
+ if (source->max_n_thds > data->max_n_thds) {
+ data->max_n_thds = source->max_n_thds;
+ }
+ if (source->n_owner_switches > data->n_owner_switches) {
+ data->n_owner_switches = source->n_owner_switches;
+ }
+ if (source->n_lock_ops > data->n_lock_ops) {
+ data->n_lock_ops = source->n_lock_ops;
+ }
+ /* n_wait_thds is not reported. */
+}
+
#endif /* JEMALLOC_INTERNAL_MUTEX_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/mutex_pool.h b/contrib/jemalloc/include/jemalloc/internal/mutex_pool.h
deleted file mode 100644
index 726cece90bc7..000000000000
--- a/contrib/jemalloc/include/jemalloc/internal/mutex_pool.h
+++ /dev/null
@@ -1,94 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_MUTEX_POOL_H
-#define JEMALLOC_INTERNAL_MUTEX_POOL_H
-
-#include "jemalloc/internal/hash.h"
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/witness.h"
-
-/* We do mod reductions by this value, so it should be kept a power of 2. */
-#define MUTEX_POOL_SIZE 256
-
-typedef struct mutex_pool_s mutex_pool_t;
-struct mutex_pool_s {
- malloc_mutex_t mutexes[MUTEX_POOL_SIZE];
-};
-
-bool mutex_pool_init(mutex_pool_t *pool, const char *name, witness_rank_t rank);
-
-/* Internal helper - not meant to be called outside this module. */
-static inline malloc_mutex_t *
-mutex_pool_mutex(mutex_pool_t *pool, uintptr_t key) {
- size_t hash_result[2];
- hash(&key, sizeof(key), 0xd50dcc1b, hash_result);
- return &pool->mutexes[hash_result[0] % MUTEX_POOL_SIZE];
-}
-
-static inline void
-mutex_pool_assert_not_held(tsdn_t *tsdn, mutex_pool_t *pool) {
- for (int i = 0; i < MUTEX_POOL_SIZE; i++) {
- malloc_mutex_assert_not_owner(tsdn, &pool->mutexes[i]);
- }
-}
-
-/*
- * Note that a mutex pool doesn't work exactly the way an embdedded mutex would.
- * You're not allowed to acquire mutexes in the pool one at a time. You have to
- * acquire all the mutexes you'll need in a single function call, and then
- * release them all in a single function call.
- */
-
-static inline void
-mutex_pool_lock(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) {
- mutex_pool_assert_not_held(tsdn, pool);
-
- malloc_mutex_t *mutex = mutex_pool_mutex(pool, key);
- malloc_mutex_lock(tsdn, mutex);
-}
-
-static inline void
-mutex_pool_unlock(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) {
- malloc_mutex_t *mutex = mutex_pool_mutex(pool, key);
- malloc_mutex_unlock(tsdn, mutex);
-
- mutex_pool_assert_not_held(tsdn, pool);
-}
-
-static inline void
-mutex_pool_lock2(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key1,
- uintptr_t key2) {
- mutex_pool_assert_not_held(tsdn, pool);
-
- malloc_mutex_t *mutex1 = mutex_pool_mutex(pool, key1);
- malloc_mutex_t *mutex2 = mutex_pool_mutex(pool, key2);
- if ((uintptr_t)mutex1 < (uintptr_t)mutex2) {
- malloc_mutex_lock(tsdn, mutex1);
- malloc_mutex_lock(tsdn, mutex2);
- } else if ((uintptr_t)mutex1 == (uintptr_t)mutex2) {
- malloc_mutex_lock(tsdn, mutex1);
- } else {
- malloc_mutex_lock(tsdn, mutex2);
- malloc_mutex_lock(tsdn, mutex1);
- }
-}
-
-static inline void
-mutex_pool_unlock2(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key1,
- uintptr_t key2) {
- malloc_mutex_t *mutex1 = mutex_pool_mutex(pool, key1);
- malloc_mutex_t *mutex2 = mutex_pool_mutex(pool, key2);
- if (mutex1 == mutex2) {
- malloc_mutex_unlock(tsdn, mutex1);
- } else {
- malloc_mutex_unlock(tsdn, mutex1);
- malloc_mutex_unlock(tsdn, mutex2);
- }
-
- mutex_pool_assert_not_held(tsdn, pool);
-}
-
-static inline void
-mutex_pool_assert_owner(tsdn_t *tsdn, mutex_pool_t *pool, uintptr_t key) {
- malloc_mutex_assert_owner(tsdn, mutex_pool_mutex(pool, key));
-}
-
-#endif /* JEMALLOC_INTERNAL_MUTEX_POOL_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/mutex_prof.h b/contrib/jemalloc/include/jemalloc/internal/mutex_prof.h
index 2cb8fb0cbf7b..4a526a5aeb86 100644
--- a/contrib/jemalloc/include/jemalloc/internal/mutex_prof.h
+++ b/contrib/jemalloc/include/jemalloc/internal/mutex_prof.h
@@ -7,8 +7,14 @@
#define MUTEX_PROF_GLOBAL_MUTEXES \
OP(background_thread) \
+ OP(max_per_bg_thd) \
OP(ctl) \
- OP(prof)
+ OP(prof) \
+ OP(prof_thds_data) \
+ OP(prof_dump) \
+ OP(prof_recent_alloc) \
+ OP(prof_recent_dump) \
+ OP(prof_stats)
typedef enum {
#define OP(mtx) global_prof_mutex_##mtx,
@@ -26,7 +32,10 @@ typedef enum {
OP(decay_dirty) \
OP(decay_muzzy) \
OP(base) \
- OP(tcache_list)
+ OP(tcache_list) \
+ OP(hpa_shard) \
+ OP(hpa_shard_grow) \
+ OP(hpa_sec)
typedef enum {
#define OP(mtx) arena_prof_mutex_##mtx,
diff --git a/contrib/jemalloc/include/jemalloc/internal/nstime.h b/contrib/jemalloc/include/jemalloc/internal/nstime.h
index 17c177c7f4b3..486e5ccacc73 100644
--- a/contrib/jemalloc/include/jemalloc/internal/nstime.h
+++ b/contrib/jemalloc/include/jemalloc/internal/nstime.h
@@ -3,12 +3,23 @@
/* Maximum supported number of seconds (~584 years). */
#define NSTIME_SEC_MAX KQU(18446744072)
-#define NSTIME_ZERO_INITIALIZER {0}
+
+#define NSTIME_MAGIC ((uint32_t)0xb8a9ce37)
+#ifdef JEMALLOC_DEBUG
+# define NSTIME_ZERO_INITIALIZER {0, NSTIME_MAGIC}
+#else
+# define NSTIME_ZERO_INITIALIZER {0}
+#endif
typedef struct {
uint64_t ns;
+#ifdef JEMALLOC_DEBUG
+ uint32_t magic; /* Tracks if initialized. */
+#endif
} nstime_t;
+static const nstime_t nstime_zero = NSTIME_ZERO_INITIALIZER;
+
void nstime_init(nstime_t *time, uint64_t ns);
void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec);
uint64_t nstime_ns(const nstime_t *time);
@@ -24,11 +35,39 @@ void nstime_isubtract(nstime_t *time, uint64_t subtrahend);
void nstime_imultiply(nstime_t *time, uint64_t multiplier);
void nstime_idivide(nstime_t *time, uint64_t divisor);
uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor);
+uint64_t nstime_ns_since(const nstime_t *past);
typedef bool (nstime_monotonic_t)(void);
extern nstime_monotonic_t *JET_MUTABLE nstime_monotonic;
-typedef bool (nstime_update_t)(nstime_t *);
+typedef void (nstime_update_t)(nstime_t *);
extern nstime_update_t *JET_MUTABLE nstime_update;
+typedef void (nstime_prof_update_t)(nstime_t *);
+extern nstime_prof_update_t *JET_MUTABLE nstime_prof_update;
+
+void nstime_init_update(nstime_t *time);
+void nstime_prof_init_update(nstime_t *time);
+
+enum prof_time_res_e {
+ prof_time_res_default = 0,
+ prof_time_res_high = 1
+};
+typedef enum prof_time_res_e prof_time_res_t;
+
+extern prof_time_res_t opt_prof_time_res;
+extern const char *prof_time_res_mode_names[];
+
+JEMALLOC_ALWAYS_INLINE void
+nstime_init_zero(nstime_t *time) {
+ nstime_copy(time, &nstime_zero);
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+nstime_equals_zero(nstime_t *time) {
+ int diff = nstime_compare(time, &nstime_zero);
+ assert(diff >= 0);
+ return diff == 0;
+}
+
#endif /* JEMALLOC_INTERNAL_NSTIME_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/pa.h b/contrib/jemalloc/include/jemalloc/internal/pa.h
new file mode 100644
index 000000000000..4748a05b691e
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/pa.h
@@ -0,0 +1,243 @@
+#ifndef JEMALLOC_INTERNAL_PA_H
+#define JEMALLOC_INTERNAL_PA_H
+
+#include "jemalloc/internal/base.h"
+#include "jemalloc/internal/decay.h"
+#include "jemalloc/internal/ecache.h"
+#include "jemalloc/internal/edata_cache.h"
+#include "jemalloc/internal/emap.h"
+#include "jemalloc/internal/hpa.h"
+#include "jemalloc/internal/lockedint.h"
+#include "jemalloc/internal/pac.h"
+#include "jemalloc/internal/pai.h"
+#include "jemalloc/internal/sec.h"
+
+/*
+ * The page allocator; responsible for acquiring pages of memory for
+ * allocations. It picks the implementation of the page allocator interface
+ * (i.e. a pai_t) to handle a given page-level allocation request. For now, the
+ * only such implementation is the PAC code ("page allocator classic"), but
+ * others will be coming soon.
+ */
+
+typedef struct pa_central_s pa_central_t;
+struct pa_central_s {
+ hpa_central_t hpa;
+};
+
+/*
+ * The stats for a particular pa_shard. Because of the way the ctl module
+ * handles stats epoch data collection (it has its own arena_stats, and merges
+ * the stats from each arena into it), this needs to live in the arena_stats_t;
+ * hence we define it here and let the pa_shard have a pointer (rather than the
+ * more natural approach of just embedding it in the pa_shard itself).
+ *
+ * We follow the arena_stats_t approach of marking the derived fields. These
+ * are the ones that are not maintained on their own; instead, their values are
+ * derived during those stats merges.
+ */
+typedef struct pa_shard_stats_s pa_shard_stats_t;
+struct pa_shard_stats_s {
+ /* Number of edata_t structs allocated by base, but not being used. */
+ size_t edata_avail; /* Derived. */
+ /*
+ * Stats specific to the PAC. For now, these are the only stats that
+ * exist, but there will eventually be other page allocators. Things
+ * like edata_avail make sense in a cross-PA sense, but things like
+ * npurges don't.
+ */
+ pac_stats_t pac_stats;
+};
+
+/*
+ * The local allocator handle. Keeps the state necessary to satisfy page-sized
+ * allocations.
+ *
+ * The contents are mostly internal to the PA module. The key exception is that
+ * arena decay code is allowed to grab pointers to the dirty and muzzy ecaches
+ * decay_ts, for a couple of queries, passing them back to a PA function, or
+ * acquiring decay.mtx and looking at decay.purging. The reasoning is that,
+ * while PA decides what and how to purge, the arena code decides when and where
+ * (e.g. on what thread). It's allowed to use the presence of another purger to
+ * decide.
+ * (The background thread code also touches some other decay internals, but
+ * that's not fundamental; its' just an artifact of a partial refactoring, and
+ * its accesses could be straightforwardly moved inside the decay module).
+ */
+typedef struct pa_shard_s pa_shard_t;
+struct pa_shard_s {
+ /* The central PA this shard is associated with. */
+ pa_central_t *central;
+
+ /*
+ * Number of pages in active extents.
+ *
+ * Synchronization: atomic.
+ */
+ atomic_zu_t nactive;
+
+ /*
+ * Whether or not we should prefer the hugepage allocator. Atomic since
+ * it may be concurrently modified by a thread setting extent hooks.
+ * Note that we still may do HPA operations in this arena; if use_hpa is
+ * changed from true to false, we'll free back to the hugepage allocator
+ * for those allocations.
+ */
+ atomic_b_t use_hpa;
+
+ /*
+ * If we never used the HPA to begin with, it wasn't initialized, and so
+ * we shouldn't try to e.g. acquire its mutexes during fork. This
+ * tracks that knowledge.
+ */
+ bool ever_used_hpa;
+
+ /* Allocates from a PAC. */
+ pac_t pac;
+
+ /*
+ * We place a small extent cache in front of the HPA, since we intend
+ * these configurations to use many fewer arenas, and therefore have a
+ * higher risk of hot locks.
+ */
+ sec_t hpa_sec;
+ hpa_shard_t hpa_shard;
+
+ /* The source of edata_t objects. */
+ edata_cache_t edata_cache;
+
+ unsigned ind;
+
+ malloc_mutex_t *stats_mtx;
+ pa_shard_stats_t *stats;
+
+ /* The emap this shard is tied to. */
+ emap_t *emap;
+
+ /* The base from which we get the ehooks and allocate metadat. */
+ base_t *base;
+};
+
+static inline bool
+pa_shard_dont_decay_muzzy(pa_shard_t *shard) {
+ return ecache_npages_get(&shard->pac.ecache_muzzy) == 0 &&
+ pac_decay_ms_get(&shard->pac, extent_state_muzzy) <= 0;
+}
+
+static inline ehooks_t *
+pa_shard_ehooks_get(pa_shard_t *shard) {
+ return base_ehooks_get(shard->base);
+}
+
+/* Returns true on error. */
+bool pa_central_init(pa_central_t *central, base_t *base, bool hpa,
+ hpa_hooks_t *hpa_hooks);
+
+/* Returns true on error. */
+bool pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, pa_central_t *central,
+ emap_t *emap, base_t *base, unsigned ind, pa_shard_stats_t *stats,
+ malloc_mutex_t *stats_mtx, nstime_t *cur_time, size_t oversize_threshold,
+ ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms);
+
+/*
+ * This isn't exposed to users; we allow late enablement of the HPA shard so
+ * that we can boot without worrying about the HPA, then turn it on in a0.
+ */
+bool pa_shard_enable_hpa(tsdn_t *tsdn, pa_shard_t *shard,
+ const hpa_shard_opts_t *hpa_opts, const sec_opts_t *hpa_sec_opts);
+
+/*
+ * We stop using the HPA when custom extent hooks are installed, but still
+ * redirect deallocations to it.
+ */
+void pa_shard_disable_hpa(tsdn_t *tsdn, pa_shard_t *shard);
+
+/*
+ * This does the PA-specific parts of arena reset (i.e. freeing all active
+ * allocations).
+ */
+void pa_shard_reset(tsdn_t *tsdn, pa_shard_t *shard);
+
+/*
+ * Destroy all the remaining retained extents. Should only be called after
+ * decaying all active, dirty, and muzzy extents to the retained state, as the
+ * last step in destroying the shard.
+ */
+void pa_shard_destroy(tsdn_t *tsdn, pa_shard_t *shard);
+
+/* Gets an edata for the given allocation. */
+edata_t *pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size,
+ size_t alignment, bool slab, szind_t szind, bool zero, bool guarded,
+ bool *deferred_work_generated);
+/* Returns true on error, in which case nothing changed. */
+bool pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
+ size_t new_size, szind_t szind, bool zero, bool *deferred_work_generated);
+/*
+ * The same. Sets *generated_dirty to true if we produced new dirty pages, and
+ * false otherwise.
+ */
+bool pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
+ size_t new_size, szind_t szind, bool *deferred_work_generated);
+/*
+ * Frees the given edata back to the pa. Sets *generated_dirty if we produced
+ * new dirty pages (well, we always set it for now; but this need not be the
+ * case).
+ * (We could make generated_dirty the return value of course, but this is more
+ * consistent with the shrink pathway and our error codes here).
+ */
+void pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata,
+ bool *deferred_work_generated);
+bool pa_decay_ms_set(tsdn_t *tsdn, pa_shard_t *shard, extent_state_t state,
+ ssize_t decay_ms, pac_purge_eagerness_t eagerness);
+ssize_t pa_decay_ms_get(pa_shard_t *shard, extent_state_t state);
+
+/*
+ * Do deferred work on this PA shard.
+ *
+ * Morally, this should do both PAC decay and the HPA deferred work. For now,
+ * though, the arena, background thread, and PAC modules are tightly interwoven
+ * in a way that's tricky to extricate, so we only do the HPA-specific parts.
+ */
+void pa_shard_set_deferral_allowed(tsdn_t *tsdn, pa_shard_t *shard,
+ bool deferral_allowed);
+void pa_shard_do_deferred_work(tsdn_t *tsdn, pa_shard_t *shard);
+void pa_shard_try_deferred_work(tsdn_t *tsdn, pa_shard_t *shard);
+uint64_t pa_shard_time_until_deferred_work(tsdn_t *tsdn, pa_shard_t *shard);
+
+/******************************************************************************/
+/*
+ * Various bits of "boring" functionality that are still part of this module,
+ * but that we relegate to pa_extra.c, to keep the core logic in pa.c as
+ * readable as possible.
+ */
+
+/*
+ * These fork phases are synchronized with the arena fork phase numbering to
+ * make it easy to keep straight. That's why there's no prefork1.
+ */
+void pa_shard_prefork0(tsdn_t *tsdn, pa_shard_t *shard);
+void pa_shard_prefork2(tsdn_t *tsdn, pa_shard_t *shard);
+void pa_shard_prefork3(tsdn_t *tsdn, pa_shard_t *shard);
+void pa_shard_prefork4(tsdn_t *tsdn, pa_shard_t *shard);
+void pa_shard_prefork5(tsdn_t *tsdn, pa_shard_t *shard);
+void pa_shard_postfork_parent(tsdn_t *tsdn, pa_shard_t *shard);
+void pa_shard_postfork_child(tsdn_t *tsdn, pa_shard_t *shard);
+
+void pa_shard_basic_stats_merge(pa_shard_t *shard, size_t *nactive,
+ size_t *ndirty, size_t *nmuzzy);
+
+void pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard,
+ pa_shard_stats_t *pa_shard_stats_out, pac_estats_t *estats_out,
+ hpa_shard_stats_t *hpa_stats_out, sec_stats_t *sec_stats_out,
+ size_t *resident);
+
+/*
+ * Reads the PA-owned mutex stats into the output stats array, at the
+ * appropriate positions. Morally, these stats should really live in
+ * pa_shard_stats_t, but the indices are sort of baked into the various mutex
+ * prof macros. This would be a good thing to do at some point.
+ */
+void pa_shard_mtx_stats_read(tsdn_t *tsdn, pa_shard_t *shard,
+ mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes]);
+
+#endif /* JEMALLOC_INTERNAL_PA_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/pac.h b/contrib/jemalloc/include/jemalloc/internal/pac.h
new file mode 100644
index 000000000000..01c4e6afabb0
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/pac.h
@@ -0,0 +1,179 @@
+#ifndef JEMALLOC_INTERNAL_PAC_H
+#define JEMALLOC_INTERNAL_PAC_H
+
+#include "jemalloc/internal/exp_grow.h"
+#include "jemalloc/internal/pai.h"
+#include "san_bump.h"
+
+
+/*
+ * Page allocator classic; an implementation of the PAI interface that:
+ * - Can be used for arenas with custom extent hooks.
+ * - Can always satisfy any allocation request (including highly-fragmentary
+ * ones).
+ * - Can use efficient OS-level zeroing primitives for demand-filled pages.
+ */
+
+/* How "eager" decay/purging should be. */
+enum pac_purge_eagerness_e {
+ PAC_PURGE_ALWAYS,
+ PAC_PURGE_NEVER,
+ PAC_PURGE_ON_EPOCH_ADVANCE
+};
+typedef enum pac_purge_eagerness_e pac_purge_eagerness_t;
+
+typedef struct pac_decay_stats_s pac_decay_stats_t;
+struct pac_decay_stats_s {
+ /* Total number of purge sweeps. */
+ locked_u64_t npurge;
+ /* Total number of madvise calls made. */
+ locked_u64_t nmadvise;
+ /* Total number of pages purged. */
+ locked_u64_t purged;
+};
+
+typedef struct pac_estats_s pac_estats_t;
+struct pac_estats_s {
+ /*
+ * Stats for a given index in the range [0, SC_NPSIZES] in the various
+ * ecache_ts.
+ * We track both bytes and # of extents: two extents in the same bucket
+ * may have different sizes if adjacent size classes differ by more than
+ * a page, so bytes cannot always be derived from # of extents.
+ */
+ size_t ndirty;
+ size_t dirty_bytes;
+ size_t nmuzzy;
+ size_t muzzy_bytes;
+ size_t nretained;
+ size_t retained_bytes;
+};
+
+typedef struct pac_stats_s pac_stats_t;
+struct pac_stats_s {
+ pac_decay_stats_t decay_dirty;
+ pac_decay_stats_t decay_muzzy;
+
+ /*
+ * Number of unused virtual memory bytes currently retained. Retained
+ * bytes are technically mapped (though always decommitted or purged),
+ * but they are excluded from the mapped statistic (above).
+ */
+ size_t retained; /* Derived. */
+
+ /*
+ * Number of bytes currently mapped, excluding retained memory (and any
+ * base-allocated memory, which is tracked by the arena stats).
+ *
+ * We name this "pac_mapped" to avoid confusion with the arena_stats
+ * "mapped".
+ */
+ atomic_zu_t pac_mapped;
+
+ /* VM space had to be leaked (undocumented). Normally 0. */
+ atomic_zu_t abandoned_vm;
+};
+
+typedef struct pac_s pac_t;
+struct pac_s {
+ /*
+ * Must be the first member (we convert it to a PAC given only a
+ * pointer). The handle to the allocation interface.
+ */
+ pai_t pai;
+ /*
+ * Collections of extents that were previously allocated. These are
+ * used when allocating extents, in an attempt to re-use address space.
+ *
+ * Synchronization: internal.
+ */
+ ecache_t ecache_dirty;
+ ecache_t ecache_muzzy;
+ ecache_t ecache_retained;
+
+ base_t *base;
+ emap_t *emap;
+ edata_cache_t *edata_cache;
+
+ /* The grow info for the retained ecache. */
+ exp_grow_t exp_grow;
+ malloc_mutex_t grow_mtx;
+
+ /* Special allocator for guarded frequently reused extents. */
+ san_bump_alloc_t sba;
+
+ /* How large extents should be before getting auto-purged. */
+ atomic_zu_t oversize_threshold;
+
+ /*
+ * Decay-based purging state, responsible for scheduling extent state
+ * transitions.
+ *
+ * Synchronization: via the internal mutex.
+ */
+ decay_t decay_dirty; /* dirty --> muzzy */
+ decay_t decay_muzzy; /* muzzy --> retained */
+
+ malloc_mutex_t *stats_mtx;
+ pac_stats_t *stats;
+
+ /* Extent serial number generator state. */
+ atomic_zu_t extent_sn_next;
+};
+
+bool pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
+ edata_cache_t *edata_cache, nstime_t *cur_time, size_t oversize_threshold,
+ ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms, pac_stats_t *pac_stats,
+ malloc_mutex_t *stats_mtx);
+
+static inline size_t
+pac_mapped(pac_t *pac) {
+ return atomic_load_zu(&pac->stats->pac_mapped, ATOMIC_RELAXED);
+}
+
+static inline ehooks_t *
+pac_ehooks_get(pac_t *pac) {
+ return base_ehooks_get(pac->base);
+}
+
+/*
+ * All purging functions require holding decay->mtx. This is one of the few
+ * places external modules are allowed to peek inside pa_shard_t internals.
+ */
+
+/*
+ * Decays the number of pages currently in the ecache. This might not leave the
+ * ecache empty if other threads are inserting dirty objects into it
+ * concurrently with the call.
+ */
+void pac_decay_all(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
+ pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay);
+/*
+ * Updates decay settings for the current time, and conditionally purges in
+ * response (depending on decay_purge_setting). Returns whether or not the
+ * epoch advanced.
+ */
+bool pac_maybe_decay_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
+ pac_decay_stats_t *decay_stats, ecache_t *ecache,
+ pac_purge_eagerness_t eagerness);
+
+/*
+ * Gets / sets the maximum amount that we'll grow an arena down the
+ * grow-retained pathways (unless forced to by an allocaction request).
+ *
+ * Set new_limit to NULL if it's just a query, or old_limit to NULL if you don't
+ * care about the previous value.
+ *
+ * Returns true on error (if the new limit is not valid).
+ */
+bool pac_retain_grow_limit_get_set(tsdn_t *tsdn, pac_t *pac, size_t *old_limit,
+ size_t *new_limit);
+
+bool pac_decay_ms_set(tsdn_t *tsdn, pac_t *pac, extent_state_t state,
+ ssize_t decay_ms, pac_purge_eagerness_t eagerness);
+ssize_t pac_decay_ms_get(pac_t *pac, extent_state_t state);
+
+void pac_reset(tsdn_t *tsdn, pac_t *pac);
+void pac_destroy(tsdn_t *tsdn, pac_t *pac);
+
+#endif /* JEMALLOC_INTERNAL_PAC_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/pages.h b/contrib/jemalloc/include/jemalloc/internal/pages.h
index 7dae633afe58..ad1f606a8a05 100644
--- a/contrib/jemalloc/include/jemalloc/internal/pages.h
+++ b/contrib/jemalloc/include/jemalloc/internal/pages.h
@@ -13,10 +13,27 @@
/* Return the smallest pagesize multiple that is >= s. */
#define PAGE_CEILING(s) \
(((s) + PAGE_MASK) & ~PAGE_MASK)
+/* Return the largest pagesize multiple that is <=s. */
+#define PAGE_FLOOR(s) \
+ ((s) & ~PAGE_MASK)
/* Huge page size. LG_HUGEPAGE is determined by the configure script. */
#define HUGEPAGE ((size_t)(1U << LG_HUGEPAGE))
#define HUGEPAGE_MASK ((size_t)(HUGEPAGE - 1))
+
+#if LG_HUGEPAGE != 0
+# define HUGEPAGE_PAGES (HUGEPAGE / PAGE)
+#else
+/*
+ * It's convenient to define arrays (or bitmaps) of HUGEPAGE_PAGES lengths. If
+ * we can't autodetect the hugepage size, it gets treated as 0, in which case
+ * we'll trigger a compiler error in those arrays. Avoid this case by ensuring
+ * that this value is at least 1. (We won't ever run in this degraded state;
+ * hpa_supported() returns false in this case.
+ */
+# define HUGEPAGE_PAGES 1
+#endif
+
/* Return the huge page base address for the huge page containing address a. */
#define HUGEPAGE_ADDR2BASE(a) \
((void *)((uintptr_t)(a) & ~HUGEPAGE_MASK))
@@ -58,6 +75,18 @@ static const bool pages_can_purge_forced =
#endif
;
+#if defined(JEMALLOC_HAVE_MADVISE_HUGE) || defined(JEMALLOC_HAVE_MEMCNTL)
+# define PAGES_CAN_HUGIFY
+#endif
+
+static const bool pages_can_hugify =
+#ifdef PAGES_CAN_HUGIFY
+ true
+#else
+ false
+#endif
+ ;
+
typedef enum {
thp_mode_default = 0, /* Do not change hugepage settings. */
thp_mode_always = 1, /* Always set MADV_HUGEPAGE. */
@@ -84,5 +113,7 @@ bool pages_dontdump(void *addr, size_t size);
bool pages_dodump(void *addr, size_t size);
bool pages_boot(void);
void pages_set_thp_state (void *ptr, size_t size);
+void pages_mark_guards(void *head, void *tail);
+void pages_unmark_guards(void *head, void *tail);
#endif /* JEMALLOC_INTERNAL_PAGES_EXTERNS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/pai.h b/contrib/jemalloc/include/jemalloc/internal/pai.h
new file mode 100644
index 000000000000..d978cd7d25ec
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/pai.h
@@ -0,0 +1,95 @@
+#ifndef JEMALLOC_INTERNAL_PAI_H
+#define JEMALLOC_INTERNAL_PAI_H
+
+/* An interface for page allocation. */
+
+typedef struct pai_s pai_t;
+struct pai_s {
+ /* Returns NULL on failure. */
+ edata_t *(*alloc)(tsdn_t *tsdn, pai_t *self, size_t size,
+ size_t alignment, bool zero, bool guarded, bool frequent_reuse,
+ bool *deferred_work_generated);
+ /*
+ * Returns the number of extents added to the list (which may be fewer
+ * than requested, in case of OOM). The list should already be
+ * initialized. The only alignment guarantee is page-alignment, and
+ * the results are not necessarily zeroed.
+ */
+ size_t (*alloc_batch)(tsdn_t *tsdn, pai_t *self, size_t size,
+ size_t nallocs, edata_list_active_t *results,
+ bool *deferred_work_generated);
+ bool (*expand)(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ size_t old_size, size_t new_size, bool zero,
+ bool *deferred_work_generated);
+ bool (*shrink)(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ size_t old_size, size_t new_size, bool *deferred_work_generated);
+ void (*dalloc)(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ bool *deferred_work_generated);
+ /* This function empties out list as a side-effect of being called. */
+ void (*dalloc_batch)(tsdn_t *tsdn, pai_t *self,
+ edata_list_active_t *list, bool *deferred_work_generated);
+ uint64_t (*time_until_deferred_work)(tsdn_t *tsdn, pai_t *self);
+};
+
+/*
+ * These are just simple convenience functions to avoid having to reference the
+ * same pai_t twice on every invocation.
+ */
+
+static inline edata_t *
+pai_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment,
+ bool zero, bool guarded, bool frequent_reuse,
+ bool *deferred_work_generated) {
+ return self->alloc(tsdn, self, size, alignment, zero, guarded,
+ frequent_reuse, deferred_work_generated);
+}
+
+static inline size_t
+pai_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs,
+ edata_list_active_t *results, bool *deferred_work_generated) {
+ return self->alloc_batch(tsdn, self, size, nallocs, results,
+ deferred_work_generated);
+}
+
+static inline bool
+pai_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
+ size_t new_size, bool zero, bool *deferred_work_generated) {
+ return self->expand(tsdn, self, edata, old_size, new_size, zero,
+ deferred_work_generated);
+}
+
+static inline bool
+pai_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
+ size_t new_size, bool *deferred_work_generated) {
+ return self->shrink(tsdn, self, edata, old_size, new_size,
+ deferred_work_generated);
+}
+
+static inline void
+pai_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ bool *deferred_work_generated) {
+ self->dalloc(tsdn, self, edata, deferred_work_generated);
+}
+
+static inline void
+pai_dalloc_batch(tsdn_t *tsdn, pai_t *self, edata_list_active_t *list,
+ bool *deferred_work_generated) {
+ self->dalloc_batch(tsdn, self, list, deferred_work_generated);
+}
+
+static inline uint64_t
+pai_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) {
+ return self->time_until_deferred_work(tsdn, self);
+}
+
+/*
+ * An implementation of batch allocation that simply calls alloc once for
+ * each item in the list.
+ */
+size_t pai_alloc_batch_default(tsdn_t *tsdn, pai_t *self, size_t size,
+ size_t nallocs, edata_list_active_t *results, bool *deferred_work_generated);
+/* Ditto, for dalloc. */
+void pai_dalloc_batch_default(tsdn_t *tsdn, pai_t *self,
+ edata_list_active_t *list, bool *deferred_work_generated);
+
+#endif /* JEMALLOC_INTERNAL_PAI_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/peak.h b/contrib/jemalloc/include/jemalloc/internal/peak.h
new file mode 100644
index 000000000000..59da3e41b6b7
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/peak.h
@@ -0,0 +1,37 @@
+#ifndef JEMALLOC_INTERNAL_PEAK_H
+#define JEMALLOC_INTERNAL_PEAK_H
+
+typedef struct peak_s peak_t;
+struct peak_s {
+ /* The highest recorded peak value, after adjustment (see below). */
+ uint64_t cur_max;
+ /*
+ * The difference between alloc and dalloc at the last set_zero call;
+ * this lets us cancel out the appropriate amount of excess.
+ */
+ uint64_t adjustment;
+};
+
+#define PEAK_INITIALIZER {0, 0}
+
+static inline uint64_t
+peak_max(peak_t *peak) {
+ return peak->cur_max;
+}
+
+static inline void
+peak_update(peak_t *peak, uint64_t alloc, uint64_t dalloc) {
+ int64_t candidate_max = (int64_t)(alloc - dalloc - peak->adjustment);
+ if (candidate_max > (int64_t)peak->cur_max) {
+ peak->cur_max = candidate_max;
+ }
+}
+
+/* Resets the counter to zero; all peaks are now relative to this point. */
+static inline void
+peak_set_zero(peak_t *peak, uint64_t alloc, uint64_t dalloc) {
+ peak->cur_max = 0;
+ peak->adjustment = alloc - dalloc;
+}
+
+#endif /* JEMALLOC_INTERNAL_PEAK_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/peak_event.h b/contrib/jemalloc/include/jemalloc/internal/peak_event.h
new file mode 100644
index 000000000000..b808ce043299
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/peak_event.h
@@ -0,0 +1,24 @@
+#ifndef JEMALLOC_INTERNAL_PEAK_EVENT_H
+#define JEMALLOC_INTERNAL_PEAK_EVENT_H
+
+/*
+ * While peak.h contains the simple helper struct that tracks state, this
+ * contains the allocator tie-ins (and knows about tsd, the event module, etc.).
+ */
+
+/* Update the peak with current tsd state. */
+void peak_event_update(tsd_t *tsd);
+/* Set current state to zero. */
+void peak_event_zero(tsd_t *tsd);
+uint64_t peak_event_max(tsd_t *tsd);
+
+/* Manual hooks. */
+/* The activity-triggered hooks. */
+uint64_t peak_alloc_new_event_wait(tsd_t *tsd);
+uint64_t peak_alloc_postponed_event_wait(tsd_t *tsd);
+void peak_alloc_event_handler(tsd_t *tsd, uint64_t elapsed);
+uint64_t peak_dalloc_new_event_wait(tsd_t *tsd);
+uint64_t peak_dalloc_postponed_event_wait(tsd_t *tsd);
+void peak_dalloc_event_handler(tsd_t *tsd, uint64_t elapsed);
+
+#endif /* JEMALLOC_INTERNAL_PEAK_EVENT_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/ph.h b/contrib/jemalloc/include/jemalloc/internal/ph.h
index 84d6778a906e..5f091c5fbb0c 100644
--- a/contrib/jemalloc/include/jemalloc/internal/ph.h
+++ b/contrib/jemalloc/include/jemalloc/internal/ph.h
@@ -1,3 +1,6 @@
+#ifndef JEMALLOC_INTERNAL_PH_H
+#define JEMALLOC_INTERNAL_PH_H
+
/*
* A Pairing Heap implementation.
*
@@ -10,382 +13,508 @@
* http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf
*
*******************************************************************************
+ *
+ * We include a non-obvious optimization:
+ * - First, we introduce a new pop-and-link operation; pop the two most
+ * recently-inserted items off the aux-list, link them, and push the resulting
+ * heap.
+ * - We maintain a count of the number of insertions since the last time we
+ * merged the aux-list (i.e. via first() or remove_first()). After N inserts,
+ * we do ffs(N) pop-and-link operations.
+ *
+ * One way to think of this is that we're progressively building up a tree in
+ * the aux-list, rather than a linked-list (think of the series of merges that
+ * will be performed as the aux-count grows).
+ *
+ * There's a couple reasons we benefit from this:
+ * - Ordinarily, after N insertions, the aux-list is of size N. With our
+ * strategy, it's of size O(log(N)). So we decrease the worst-case time of
+ * first() calls, and reduce the average cost of remove_min calls. Since
+ * these almost always occur while holding a lock, we practically reduce the
+ * frequency of unusually long hold times.
+ * - This moves the bulk of the work of merging the aux-list onto the threads
+ * that are inserting into the heap. In some common scenarios, insertions
+ * happen in bulk, from a single thread (think tcache flushing; we potentially
+ * move many slabs from slabs_full to slabs_nonfull). All the nodes in this
+ * case are in the inserting threads cache, and linking them is very cheap
+ * (cache misses dominate linking cost). Without this optimization, linking
+ * happens on the next call to remove_first. Since that remove_first call
+ * likely happens on a different thread (or at least, after the cache has
+ * gotten cold if done on the same thread), deferring linking trades cheap
+ * link operations now for expensive ones later.
+ *
+ * The ffs trick keeps amortized insert cost at constant time. Similar
+ * strategies based on periodically sorting the list after a batch of operations
+ * perform worse than this in practice, even with various fancy tricks; they
+ * all took amortized complexity of an insert from O(1) to O(log(n)).
*/
-#ifndef PH_H_
-#define PH_H_
+typedef int (*ph_cmp_t)(void *, void *);
/* Node structure. */
-#define phn(a_type) \
-struct { \
- a_type *phn_prev; \
- a_type *phn_next; \
- a_type *phn_lchild; \
+typedef struct phn_link_s phn_link_t;
+struct phn_link_s {
+ void *prev;
+ void *next;
+ void *lchild;
+};
+
+typedef struct ph_s ph_t;
+struct ph_s {
+ void *root;
+ /*
+ * Inserts done since the last aux-list merge. This is not necessarily
+ * the size of the aux-list, since it's possible that removals have
+ * happened since, and we don't track whether or not those removals are
+ * from the aux list.
+ */
+ size_t auxcount;
+};
+
+JEMALLOC_ALWAYS_INLINE phn_link_t *
+phn_link_get(void *phn, size_t offset) {
+ return (phn_link_t *)(((uintptr_t)phn) + offset);
}
-/* Root structure. */
-#define ph(a_type) \
-struct { \
- a_type *ph_root; \
+JEMALLOC_ALWAYS_INLINE void
+phn_link_init(void *phn, size_t offset) {
+ phn_link_get(phn, offset)->prev = NULL;
+ phn_link_get(phn, offset)->next = NULL;
+ phn_link_get(phn, offset)->lchild = NULL;
}
-/* Internal utility macros. */
-#define phn_lchild_get(a_type, a_field, a_phn) \
- (a_phn->a_field.phn_lchild)
-#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \
- a_phn->a_field.phn_lchild = a_lchild; \
-} while (0)
-
-#define phn_next_get(a_type, a_field, a_phn) \
- (a_phn->a_field.phn_next)
-#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \
- a_phn->a_field.phn_prev = a_prev; \
-} while (0)
-
-#define phn_prev_get(a_type, a_field, a_phn) \
- (a_phn->a_field.phn_prev)
-#define phn_next_set(a_type, a_field, a_phn, a_next) do { \
- a_phn->a_field.phn_next = a_next; \
-} while (0)
-
-#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \
- a_type *phn0child; \
- \
- assert(a_phn0 != NULL); \
- assert(a_phn1 != NULL); \
- assert(a_cmp(a_phn0, a_phn1) <= 0); \
- \
- phn_prev_set(a_type, a_field, a_phn1, a_phn0); \
- phn0child = phn_lchild_get(a_type, a_field, a_phn0); \
- phn_next_set(a_type, a_field, a_phn1, phn0child); \
- if (phn0child != NULL) { \
- phn_prev_set(a_type, a_field, phn0child, a_phn1); \
- } \
- phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \
-} while (0)
-
-#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \
- if (a_phn0 == NULL) { \
- r_phn = a_phn1; \
- } else if (a_phn1 == NULL) { \
- r_phn = a_phn0; \
- } else if (a_cmp(a_phn0, a_phn1) < 0) { \
- phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \
- a_cmp); \
- r_phn = a_phn0; \
- } else { \
- phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \
- a_cmp); \
- r_phn = a_phn1; \
- } \
-} while (0)
+/* Internal utility helpers. */
+JEMALLOC_ALWAYS_INLINE void *
+phn_lchild_get(void *phn, size_t offset) {
+ return phn_link_get(phn, offset)->lchild;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+phn_lchild_set(void *phn, void *lchild, size_t offset) {
+ phn_link_get(phn, offset)->lchild = lchild;
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+phn_next_get(void *phn, size_t offset) {
+ return phn_link_get(phn, offset)->next;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+phn_next_set(void *phn, void *next, size_t offset) {
+ phn_link_get(phn, offset)->next = next;
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+phn_prev_get(void *phn, size_t offset) {
+ return phn_link_get(phn, offset)->prev;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+phn_prev_set(void *phn, void *prev, size_t offset) {
+ phn_link_get(phn, offset)->prev = prev;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+phn_merge_ordered(void *phn0, void *phn1, size_t offset,
+ ph_cmp_t cmp) {
+ void *phn0child;
+
+ assert(phn0 != NULL);
+ assert(phn1 != NULL);
+ assert(cmp(phn0, phn1) <= 0);
+
+ phn_prev_set(phn1, phn0, offset);
+ phn0child = phn_lchild_get(phn0, offset);
+ phn_next_set(phn1, phn0child, offset);
+ if (phn0child != NULL) {
+ phn_prev_set(phn0child, phn1, offset);
+ }
+ phn_lchild_set(phn0, phn1, offset);
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+phn_merge(void *phn0, void *phn1, size_t offset, ph_cmp_t cmp) {
+ void *result;
+ if (phn0 == NULL) {
+ result = phn1;
+ } else if (phn1 == NULL) {
+ result = phn0;
+ } else if (cmp(phn0, phn1) < 0) {
+ phn_merge_ordered(phn0, phn1, offset, cmp);
+ result = phn0;
+ } else {
+ phn_merge_ordered(phn1, phn0, offset, cmp);
+ result = phn1;
+ }
+ return result;
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+phn_merge_siblings(void *phn, size_t offset, ph_cmp_t cmp) {
+ void *head = NULL;
+ void *tail = NULL;
+ void *phn0 = phn;
+ void *phn1 = phn_next_get(phn0, offset);
+
+ /*
+ * Multipass merge, wherein the first two elements of a FIFO
+ * are repeatedly merged, and each result is appended to the
+ * singly linked FIFO, until the FIFO contains only a single
+ * element. We start with a sibling list but no reference to
+ * its tail, so we do a single pass over the sibling list to
+ * populate the FIFO.
+ */
+ if (phn1 != NULL) {
+ void *phnrest = phn_next_get(phn1, offset);
+ if (phnrest != NULL) {
+ phn_prev_set(phnrest, NULL, offset);
+ }
+ phn_prev_set(phn0, NULL, offset);
+ phn_next_set(phn0, NULL, offset);
+ phn_prev_set(phn1, NULL, offset);
+ phn_next_set(phn1, NULL, offset);
+ phn0 = phn_merge(phn0, phn1, offset, cmp);
+ head = tail = phn0;
+ phn0 = phnrest;
+ while (phn0 != NULL) {
+ phn1 = phn_next_get(phn0, offset);
+ if (phn1 != NULL) {
+ phnrest = phn_next_get(phn1, offset);
+ if (phnrest != NULL) {
+ phn_prev_set(phnrest, NULL, offset);
+ }
+ phn_prev_set(phn0, NULL, offset);
+ phn_next_set(phn0, NULL, offset);
+ phn_prev_set(phn1, NULL, offset);
+ phn_next_set(phn1, NULL, offset);
+ phn0 = phn_merge(phn0, phn1, offset, cmp);
+ phn_next_set(tail, phn0, offset);
+ tail = phn0;
+ phn0 = phnrest;
+ } else {
+ phn_next_set(tail, phn0, offset);
+ tail = phn0;
+ phn0 = NULL;
+ }
+ }
+ phn0 = head;
+ phn1 = phn_next_get(phn0, offset);
+ if (phn1 != NULL) {
+ while (true) {
+ head = phn_next_get(phn1, offset);
+ assert(phn_prev_get(phn0, offset) == NULL);
+ phn_next_set(phn0, NULL, offset);
+ assert(phn_prev_get(phn1, offset) == NULL);
+ phn_next_set(phn1, NULL, offset);
+ phn0 = phn_merge(phn0, phn1, offset, cmp);
+ if (head == NULL) {
+ break;
+ }
+ phn_next_set(tail, phn0, offset);
+ tail = phn0;
+ phn0 = head;
+ phn1 = phn_next_get(phn0, offset);
+ }
+ }
+ }
+ return phn0;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+ph_merge_aux(ph_t *ph, size_t offset, ph_cmp_t cmp) {
+ ph->auxcount = 0;
+ void *phn = phn_next_get(ph->root, offset);
+ if (phn != NULL) {
+ phn_prev_set(ph->root, NULL, offset);
+ phn_next_set(ph->root, NULL, offset);
+ phn_prev_set(phn, NULL, offset);
+ phn = phn_merge_siblings(phn, offset, cmp);
+ assert(phn_next_get(phn, offset) == NULL);
+ ph->root = phn_merge(ph->root, phn, offset, cmp);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+ph_merge_children(void *phn, size_t offset, ph_cmp_t cmp) {
+ void *result;
+ void *lchild = phn_lchild_get(phn, offset);
+ if (lchild == NULL) {
+ result = NULL;
+ } else {
+ result = phn_merge_siblings(lchild, offset, cmp);
+ }
+ return result;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+ph_new(ph_t *ph) {
+ ph->root = NULL;
+ ph->auxcount = 0;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+ph_empty(ph_t *ph) {
+ return ph->root == NULL;
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+ph_first(ph_t *ph, size_t offset, ph_cmp_t cmp) {
+ if (ph->root == NULL) {
+ return NULL;
+ }
+ ph_merge_aux(ph, offset, cmp);
+ return ph->root;
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+ph_any(ph_t *ph, size_t offset) {
+ if (ph->root == NULL) {
+ return NULL;
+ }
+ void *aux = phn_next_get(ph->root, offset);
+ if (aux != NULL) {
+ return aux;
+ }
+ return ph->root;
+}
+
+/* Returns true if we should stop trying to merge. */
+JEMALLOC_ALWAYS_INLINE bool
+ph_try_aux_merge_pair(ph_t *ph, size_t offset, ph_cmp_t cmp) {
+ assert(ph->root != NULL);
+ void *phn0 = phn_next_get(ph->root, offset);
+ if (phn0 == NULL) {
+ return true;
+ }
+ void *phn1 = phn_next_get(phn0, offset);
+ if (phn1 == NULL) {
+ return true;
+ }
+ void *next_phn1 = phn_next_get(phn1, offset);
+ phn_next_set(phn0, NULL, offset);
+ phn_prev_set(phn0, NULL, offset);
+ phn_next_set(phn1, NULL, offset);
+ phn_prev_set(phn1, NULL, offset);
+ phn0 = phn_merge(phn0, phn1, offset, cmp);
+ phn_next_set(phn0, next_phn1, offset);
+ if (next_phn1 != NULL) {
+ phn_prev_set(next_phn1, phn0, offset);
+ }
+ phn_next_set(ph->root, phn0, offset);
+ phn_prev_set(phn0, ph->root, offset);
+ return next_phn1 == NULL;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+ph_insert(ph_t *ph, void *phn, size_t offset, ph_cmp_t cmp) {
+ phn_link_init(phn, offset);
-#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \
- a_type *head = NULL; \
- a_type *tail = NULL; \
- a_type *phn0 = a_phn; \
- a_type *phn1 = phn_next_get(a_type, a_field, phn0); \
+ /*
+ * Treat the root as an aux list during insertion, and lazily merge
+ * during a_prefix##remove_first(). For elements that are inserted,
+ * then removed via a_prefix##remove() before the aux list is ever
+ * processed, this makes insert/remove constant-time, whereas eager
+ * merging would make insert O(log n).
+ */
+ if (ph->root == NULL) {
+ ph->root = phn;
+ } else {
+ /*
+ * As a special case, check to see if we can replace the root.
+ * This is practically common in some important cases, and lets
+ * us defer some insertions (hopefully, until the point where
+ * some of the items in the aux list have been removed, savings
+ * us from linking them at all).
+ */
+ if (cmp(phn, ph->root) < 0) {
+ phn_lchild_set(phn, ph->root, offset);
+ phn_prev_set(ph->root, phn, offset);
+ ph->root = phn;
+ ph->auxcount = 0;
+ return;
+ }
+ ph->auxcount++;
+ phn_next_set(phn, phn_next_get(ph->root, offset), offset);
+ if (phn_next_get(ph->root, offset) != NULL) {
+ phn_prev_set(phn_next_get(ph->root, offset), phn,
+ offset);
+ }
+ phn_prev_set(phn, ph->root, offset);
+ phn_next_set(ph->root, phn, offset);
+ }
+ if (ph->auxcount > 1) {
+ unsigned nmerges = ffs_zu(ph->auxcount - 1);
+ bool done = false;
+ for (unsigned i = 0; i < nmerges && !done; i++) {
+ done = ph_try_aux_merge_pair(ph, offset, cmp);
+ }
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE void *
+ph_remove_first(ph_t *ph, size_t offset, ph_cmp_t cmp) {
+ void *ret;
+
+ if (ph->root == NULL) {
+ return NULL;
+ }
+ ph_merge_aux(ph, offset, cmp);
+ ret = ph->root;
+ ph->root = ph_merge_children(ph->root, offset, cmp);
+
+ return ret;
+
+}
+
+JEMALLOC_ALWAYS_INLINE void
+ph_remove(ph_t *ph, void *phn, size_t offset, ph_cmp_t cmp) {
+ void *replace;
+ void *parent;
+
+ if (ph->root == phn) {
+ /*
+ * We can delete from aux list without merging it, but we need
+ * to merge if we are dealing with the root node and it has
+ * children.
+ */
+ if (phn_lchild_get(phn, offset) == NULL) {
+ ph->root = phn_next_get(phn, offset);
+ if (ph->root != NULL) {
+ phn_prev_set(ph->root, NULL, offset);
+ }
+ return;
+ }
+ ph_merge_aux(ph, offset, cmp);
+ if (ph->root == phn) {
+ ph->root = ph_merge_children(ph->root, offset, cmp);
+ return;
+ }
+ }
+
+ /* Get parent (if phn is leftmost child) before mutating. */
+ if ((parent = phn_prev_get(phn, offset)) != NULL) {
+ if (phn_lchild_get(parent, offset) != phn) {
+ parent = NULL;
+ }
+ }
+ /* Find a possible replacement node, and link to parent. */
+ replace = ph_merge_children(phn, offset, cmp);
+ /* Set next/prev for sibling linked list. */
+ if (replace != NULL) {
+ if (parent != NULL) {
+ phn_prev_set(replace, parent, offset);
+ phn_lchild_set(parent, replace, offset);
+ } else {
+ phn_prev_set(replace, phn_prev_get(phn, offset),
+ offset);
+ if (phn_prev_get(phn, offset) != NULL) {
+ phn_next_set(phn_prev_get(phn, offset), replace,
+ offset);
+ }
+ }
+ phn_next_set(replace, phn_next_get(phn, offset), offset);
+ if (phn_next_get(phn, offset) != NULL) {
+ phn_prev_set(phn_next_get(phn, offset), replace,
+ offset);
+ }
+ } else {
+ if (parent != NULL) {
+ void *next = phn_next_get(phn, offset);
+ phn_lchild_set(parent, next, offset);
+ if (next != NULL) {
+ phn_prev_set(next, parent, offset);
+ }
+ } else {
+ assert(phn_prev_get(phn, offset) != NULL);
+ phn_next_set(
+ phn_prev_get(phn, offset),
+ phn_next_get(phn, offset), offset);
+ }
+ if (phn_next_get(phn, offset) != NULL) {
+ phn_prev_set(
+ phn_next_get(phn, offset),
+ phn_prev_get(phn, offset), offset);
+ }
+ }
+}
+
+#define ph_structs(a_prefix, a_type) \
+typedef struct { \
+ phn_link_t link; \
+} a_prefix##_link_t; \
\
- /* \
- * Multipass merge, wherein the first two elements of a FIFO \
- * are repeatedly merged, and each result is appended to the \
- * singly linked FIFO, until the FIFO contains only a single \
- * element. We start with a sibling list but no reference to \
- * its tail, so we do a single pass over the sibling list to \
- * populate the FIFO. \
- */ \
- if (phn1 != NULL) { \
- a_type *phnrest = phn_next_get(a_type, a_field, phn1); \
- if (phnrest != NULL) { \
- phn_prev_set(a_type, a_field, phnrest, NULL); \
- } \
- phn_prev_set(a_type, a_field, phn0, NULL); \
- phn_next_set(a_type, a_field, phn0, NULL); \
- phn_prev_set(a_type, a_field, phn1, NULL); \
- phn_next_set(a_type, a_field, phn1, NULL); \
- phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \
- head = tail = phn0; \
- phn0 = phnrest; \
- while (phn0 != NULL) { \
- phn1 = phn_next_get(a_type, a_field, phn0); \
- if (phn1 != NULL) { \
- phnrest = phn_next_get(a_type, a_field, \
- phn1); \
- if (phnrest != NULL) { \
- phn_prev_set(a_type, a_field, \
- phnrest, NULL); \
- } \
- phn_prev_set(a_type, a_field, phn0, \
- NULL); \
- phn_next_set(a_type, a_field, phn0, \
- NULL); \
- phn_prev_set(a_type, a_field, phn1, \
- NULL); \
- phn_next_set(a_type, a_field, phn1, \
- NULL); \
- phn_merge(a_type, a_field, phn0, phn1, \
- a_cmp, phn0); \
- phn_next_set(a_type, a_field, tail, \
- phn0); \
- tail = phn0; \
- phn0 = phnrest; \
- } else { \
- phn_next_set(a_type, a_field, tail, \
- phn0); \
- tail = phn0; \
- phn0 = NULL; \
- } \
- } \
- phn0 = head; \
- phn1 = phn_next_get(a_type, a_field, phn0); \
- if (phn1 != NULL) { \
- while (true) { \
- head = phn_next_get(a_type, a_field, \
- phn1); \
- assert(phn_prev_get(a_type, a_field, \
- phn0) == NULL); \
- phn_next_set(a_type, a_field, phn0, \
- NULL); \
- assert(phn_prev_get(a_type, a_field, \
- phn1) == NULL); \
- phn_next_set(a_type, a_field, phn1, \
- NULL); \
- phn_merge(a_type, a_field, phn0, phn1, \
- a_cmp, phn0); \
- if (head == NULL) { \
- break; \
- } \
- phn_next_set(a_type, a_field, tail, \
- phn0); \
- tail = phn0; \
- phn0 = head; \
- phn1 = phn_next_get(a_type, a_field, \
- phn0); \
- } \
- } \
- } \
- r_phn = phn0; \
-} while (0)
-
-#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \
- a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \
- if (phn != NULL) { \
- phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \
- phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \
- phn_prev_set(a_type, a_field, phn, NULL); \
- ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \
- assert(phn_next_get(a_type, a_field, phn) == NULL); \
- phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \
- a_ph->ph_root); \
- } \
-} while (0)
-
-#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \
- a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \
- if (lchild == NULL) { \
- r_phn = NULL; \
- } else { \
- ph_merge_siblings(a_type, a_field, lchild, a_cmp, \
- r_phn); \
- } \
-} while (0)
+typedef struct { \
+ ph_t ph; \
+} a_prefix##_t;
/*
* The ph_proto() macro generates function prototypes that correspond to the
* functions generated by an equivalently parameterized call to ph_gen().
*/
-#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \
-a_attr void a_prefix##new(a_ph_type *ph); \
-a_attr bool a_prefix##empty(a_ph_type *ph); \
-a_attr a_type *a_prefix##first(a_ph_type *ph); \
-a_attr a_type *a_prefix##any(a_ph_type *ph); \
-a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \
-a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \
-a_attr a_type *a_prefix##remove_any(a_ph_type *ph); \
-a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn);
+#define ph_proto(a_attr, a_prefix, a_type) \
+ \
+a_attr void a_prefix##_new(a_prefix##_t *ph); \
+a_attr bool a_prefix##_empty(a_prefix##_t *ph); \
+a_attr a_type *a_prefix##_first(a_prefix##_t *ph); \
+a_attr a_type *a_prefix##_any(a_prefix##_t *ph); \
+a_attr void a_prefix##_insert(a_prefix##_t *ph, a_type *phn); \
+a_attr a_type *a_prefix##_remove_first(a_prefix##_t *ph); \
+a_attr void a_prefix##_remove(a_prefix##_t *ph, a_type *phn); \
+a_attr a_type *a_prefix##_remove_any(a_prefix##_t *ph);
-/*
- * The ph_gen() macro generates a type-specific pairing heap implementation,
- * based on the above cpp macros.
- */
-#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \
+/* The ph_gen() macro generates a type-specific pairing heap implementation. */
+#define ph_gen(a_attr, a_prefix, a_type, a_field, a_cmp) \
+JEMALLOC_ALWAYS_INLINE int \
+a_prefix##_ph_cmp(void *a, void *b) { \
+ return a_cmp((a_type *)a, (a_type *)b); \
+} \
+ \
a_attr void \
-a_prefix##new(a_ph_type *ph) { \
- memset(ph, 0, sizeof(ph(a_type))); \
+a_prefix##_new(a_prefix##_t *ph) { \
+ ph_new(&ph->ph); \
} \
+ \
a_attr bool \
-a_prefix##empty(a_ph_type *ph) { \
- return (ph->ph_root == NULL); \
+a_prefix##_empty(a_prefix##_t *ph) { \
+ return ph_empty(&ph->ph); \
} \
+ \
a_attr a_type * \
-a_prefix##first(a_ph_type *ph) { \
- if (ph->ph_root == NULL) { \
- return NULL; \
- } \
- ph_merge_aux(a_type, a_field, ph, a_cmp); \
- return ph->ph_root; \
+a_prefix##_first(a_prefix##_t *ph) { \
+ return ph_first(&ph->ph, offsetof(a_type, a_field), \
+ &a_prefix##_ph_cmp); \
} \
+ \
a_attr a_type * \
-a_prefix##any(a_ph_type *ph) { \
- if (ph->ph_root == NULL) { \
- return NULL; \
- } \
- a_type *aux = phn_next_get(a_type, a_field, ph->ph_root); \
- if (aux != NULL) { \
- return aux; \
- } \
- return ph->ph_root; \
+a_prefix##_any(a_prefix##_t *ph) { \
+ return ph_any(&ph->ph, offsetof(a_type, a_field)); \
} \
-a_attr void \
-a_prefix##insert(a_ph_type *ph, a_type *phn) { \
- memset(&phn->a_field, 0, sizeof(phn(a_type))); \
\
- /* \
- * Treat the root as an aux list during insertion, and lazily \
- * merge during a_prefix##remove_first(). For elements that \
- * are inserted, then removed via a_prefix##remove() before the \
- * aux list is ever processed, this makes insert/remove \
- * constant-time, whereas eager merging would make insert \
- * O(log n). \
- */ \
- if (ph->ph_root == NULL) { \
- ph->ph_root = phn; \
- } else { \
- phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \
- a_field, ph->ph_root)); \
- if (phn_next_get(a_type, a_field, ph->ph_root) != \
- NULL) { \
- phn_prev_set(a_type, a_field, \
- phn_next_get(a_type, a_field, ph->ph_root), \
- phn); \
- } \
- phn_prev_set(a_type, a_field, phn, ph->ph_root); \
- phn_next_set(a_type, a_field, ph->ph_root, phn); \
- } \
+a_attr void \
+a_prefix##_insert(a_prefix##_t *ph, a_type *phn) { \
+ ph_insert(&ph->ph, phn, offsetof(a_type, a_field), \
+ a_prefix##_ph_cmp); \
} \
-a_attr a_type * \
-a_prefix##remove_first(a_ph_type *ph) { \
- a_type *ret; \
\
- if (ph->ph_root == NULL) { \
- return NULL; \
- } \
- ph_merge_aux(a_type, a_field, ph, a_cmp); \
- \
- ret = ph->ph_root; \
- \
- ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
- ph->ph_root); \
+a_attr a_type * \
+a_prefix##_remove_first(a_prefix##_t *ph) { \
+ return ph_remove_first(&ph->ph, offsetof(a_type, a_field), \
+ a_prefix##_ph_cmp); \
+} \
\
- return ret; \
+a_attr void \
+a_prefix##_remove(a_prefix##_t *ph, a_type *phn) { \
+ ph_remove(&ph->ph, phn, offsetof(a_type, a_field), \
+ a_prefix##_ph_cmp); \
} \
+ \
a_attr a_type * \
-a_prefix##remove_any(a_ph_type *ph) { \
- /* \
- * Remove the most recently inserted aux list element, or the \
- * root if the aux list is empty. This has the effect of \
- * behaving as a LIFO (and insertion/removal is therefore \
- * constant-time) if a_prefix##[remove_]first() are never \
- * called. \
- */ \
- if (ph->ph_root == NULL) { \
- return NULL; \
- } \
- a_type *ret = phn_next_get(a_type, a_field, ph->ph_root); \
+a_prefix##_remove_any(a_prefix##_t *ph) { \
+ a_type *ret = a_prefix##_any(ph); \
if (ret != NULL) { \
- a_type *aux = phn_next_get(a_type, a_field, ret); \
- phn_next_set(a_type, a_field, ph->ph_root, aux); \
- if (aux != NULL) { \
- phn_prev_set(a_type, a_field, aux, \
- ph->ph_root); \
- } \
- return ret; \
+ a_prefix##_remove(ph, ret); \
} \
- ret = ph->ph_root; \
- ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
- ph->ph_root); \
return ret; \
-} \
-a_attr void \
-a_prefix##remove(a_ph_type *ph, a_type *phn) { \
- a_type *replace, *parent; \
- \
- if (ph->ph_root == phn) { \
- /* \
- * We can delete from aux list without merging it, but \
- * we need to merge if we are dealing with the root \
- * node and it has children. \
- */ \
- if (phn_lchild_get(a_type, a_field, phn) == NULL) { \
- ph->ph_root = phn_next_get(a_type, a_field, \
- phn); \
- if (ph->ph_root != NULL) { \
- phn_prev_set(a_type, a_field, \
- ph->ph_root, NULL); \
- } \
- return; \
- } \
- ph_merge_aux(a_type, a_field, ph, a_cmp); \
- if (ph->ph_root == phn) { \
- ph_merge_children(a_type, a_field, ph->ph_root, \
- a_cmp, ph->ph_root); \
- return; \
- } \
- } \
- \
- /* Get parent (if phn is leftmost child) before mutating. */ \
- if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \
- if (phn_lchild_get(a_type, a_field, parent) != phn) { \
- parent = NULL; \
- } \
- } \
- /* Find a possible replacement node, and link to parent. */ \
- ph_merge_children(a_type, a_field, phn, a_cmp, replace); \
- /* Set next/prev for sibling linked list. */ \
- if (replace != NULL) { \
- if (parent != NULL) { \
- phn_prev_set(a_type, a_field, replace, parent); \
- phn_lchild_set(a_type, a_field, parent, \
- replace); \
- } else { \
- phn_prev_set(a_type, a_field, replace, \
- phn_prev_get(a_type, a_field, phn)); \
- if (phn_prev_get(a_type, a_field, phn) != \
- NULL) { \
- phn_next_set(a_type, a_field, \
- phn_prev_get(a_type, a_field, phn), \
- replace); \
- } \
- } \
- phn_next_set(a_type, a_field, replace, \
- phn_next_get(a_type, a_field, phn)); \
- if (phn_next_get(a_type, a_field, phn) != NULL) { \
- phn_prev_set(a_type, a_field, \
- phn_next_get(a_type, a_field, phn), \
- replace); \
- } \
- } else { \
- if (parent != NULL) { \
- a_type *next = phn_next_get(a_type, a_field, \
- phn); \
- phn_lchild_set(a_type, a_field, parent, next); \
- if (next != NULL) { \
- phn_prev_set(a_type, a_field, next, \
- parent); \
- } \
- } else { \
- assert(phn_prev_get(a_type, a_field, phn) != \
- NULL); \
- phn_next_set(a_type, a_field, \
- phn_prev_get(a_type, a_field, phn), \
- phn_next_get(a_type, a_field, phn)); \
- } \
- if (phn_next_get(a_type, a_field, phn) != NULL) { \
- phn_prev_set(a_type, a_field, \
- phn_next_get(a_type, a_field, phn), \
- phn_prev_get(a_type, a_field, phn)); \
- } \
- } \
}
-#endif /* PH_H_ */
+#endif /* JEMALLOC_INTERNAL_PH_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/private_namespace.sh b/contrib/jemalloc/include/jemalloc/internal/private_namespace.sh
new file mode 100755
index 000000000000..6ef1346a3c1a
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/private_namespace.sh
@@ -0,0 +1,5 @@
+#!/bin/sh
+
+for symbol in `cat "$@"` ; do
+ echo "#define ${symbol} JEMALLOC_N(${symbol})"
+done
diff --git a/contrib/jemalloc/include/jemalloc/internal/private_symbols.sh b/contrib/jemalloc/include/jemalloc/internal/private_symbols.sh
new file mode 100755
index 000000000000..442a259fdc89
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/private_symbols.sh
@@ -0,0 +1,51 @@
+#!/bin/sh
+#
+# Generate private_symbols[_jet].awk.
+#
+# Usage: private_symbols.sh <sym_prefix> <sym>*
+#
+# <sym_prefix> is typically "" or "_".
+
+sym_prefix=$1
+shift
+
+cat <<EOF
+#!/usr/bin/env awk -f
+
+BEGIN {
+ sym_prefix = "${sym_prefix}"
+ split("\\
+EOF
+
+for public_sym in "$@" ; do
+ cat <<EOF
+ ${sym_prefix}${public_sym} \\
+EOF
+done
+
+cat <<"EOF"
+ ", exported_symbol_names)
+ # Store exported symbol names as keys in exported_symbols.
+ for (i in exported_symbol_names) {
+ exported_symbols[exported_symbol_names[i]] = 1
+ }
+}
+
+# Process 'nm -a <c_source.o>' output.
+#
+# Handle lines like:
+# 0000000000000008 D opt_junk
+# 0000000000007574 T malloc_initialized
+(NF == 3 && $2 ~ /^[ABCDGRSTVW]$/ && !($3 in exported_symbols) && $3 ~ /^[A-Za-z0-9_]+$/) {
+ print substr($3, 1+length(sym_prefix), length($3)-length(sym_prefix))
+}
+
+# Process 'dumpbin /SYMBOLS <c_source.obj>' output.
+#
+# Handle lines like:
+# 353 00008098 SECT4 notype External | opt_junk
+# 3F1 00000000 SECT7 notype () External | malloc_initialized
+($3 ~ /^SECT[0-9]+/ && $(NF-2) == "External" && !($NF in exported_symbols)) {
+ print $NF
+}
+EOF
diff --git a/contrib/jemalloc/include/jemalloc/internal/prng.h b/contrib/jemalloc/include/jemalloc/internal/prng.h
index 15cc2d18fa4d..14542aa12d46 100644
--- a/contrib/jemalloc/include/jemalloc/internal/prng.h
+++ b/contrib/jemalloc/include/jemalloc/internal/prng.h
@@ -1,7 +1,6 @@
#ifndef JEMALLOC_INTERNAL_PRNG_H
#define JEMALLOC_INTERNAL_PRNG_H
-#include "jemalloc/internal/atomic.h"
#include "jemalloc/internal/bit_util.h"
/*
@@ -59,66 +58,38 @@ prng_state_next_zu(size_t state) {
/*
* The prng_lg_range functions give a uniform int in the half-open range [0,
- * 2**lg_range). If atomic is true, they do so safely from multiple threads.
- * Multithreaded 64-bit prngs aren't supported.
+ * 2**lg_range).
*/
JEMALLOC_ALWAYS_INLINE uint32_t
-prng_lg_range_u32(atomic_u32_t *state, unsigned lg_range, bool atomic) {
- uint32_t ret, state0, state1;
-
+prng_lg_range_u32(uint32_t *state, unsigned lg_range) {
assert(lg_range > 0);
assert(lg_range <= 32);
- state0 = atomic_load_u32(state, ATOMIC_RELAXED);
-
- if (atomic) {
- do {
- state1 = prng_state_next_u32(state0);
- } while (!atomic_compare_exchange_weak_u32(state, &state0,
- state1, ATOMIC_RELAXED, ATOMIC_RELAXED));
- } else {
- state1 = prng_state_next_u32(state0);
- atomic_store_u32(state, state1, ATOMIC_RELAXED);
- }
- ret = state1 >> (32 - lg_range);
+ *state = prng_state_next_u32(*state);
+ uint32_t ret = *state >> (32 - lg_range);
return ret;
}
JEMALLOC_ALWAYS_INLINE uint64_t
prng_lg_range_u64(uint64_t *state, unsigned lg_range) {
- uint64_t ret, state1;
-
assert(lg_range > 0);
assert(lg_range <= 64);
- state1 = prng_state_next_u64(*state);
- *state = state1;
- ret = state1 >> (64 - lg_range);
+ *state = prng_state_next_u64(*state);
+ uint64_t ret = *state >> (64 - lg_range);
return ret;
}
JEMALLOC_ALWAYS_INLINE size_t
-prng_lg_range_zu(atomic_zu_t *state, unsigned lg_range, bool atomic) {
- size_t ret, state0, state1;
-
+prng_lg_range_zu(size_t *state, unsigned lg_range) {
assert(lg_range > 0);
assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR));
- state0 = atomic_load_zu(state, ATOMIC_RELAXED);
-
- if (atomic) {
- do {
- state1 = prng_state_next_zu(state0);
- } while (atomic_compare_exchange_weak_zu(state, &state0,
- state1, ATOMIC_RELAXED, ATOMIC_RELAXED));
- } else {
- state1 = prng_state_next_zu(state0);
- atomic_store_zu(state, state1, ATOMIC_RELAXED);
- }
- ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range);
+ *state = prng_state_next_zu(*state);
+ size_t ret = *state >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range);
return ret;
}
@@ -129,18 +100,24 @@ prng_lg_range_zu(atomic_zu_t *state, unsigned lg_range, bool atomic) {
*/
JEMALLOC_ALWAYS_INLINE uint32_t
-prng_range_u32(atomic_u32_t *state, uint32_t range, bool atomic) {
- uint32_t ret;
- unsigned lg_range;
-
- assert(range > 1);
+prng_range_u32(uint32_t *state, uint32_t range) {
+ assert(range != 0);
+ /*
+ * If range were 1, lg_range would be 0, so the shift in
+ * prng_lg_range_u32 would be a shift of a 32-bit variable by 32 bits,
+ * which is UB. Just handle this case as a one-off.
+ */
+ if (range == 1) {
+ return 0;
+ }
/* Compute the ceiling of lg(range). */
- lg_range = ffs_u32(pow2_ceil_u32(range)) - 1;
+ unsigned lg_range = ffs_u32(pow2_ceil_u32(range));
/* Generate a result in [0..range) via repeated trial. */
+ uint32_t ret;
do {
- ret = prng_lg_range_u32(state, lg_range, atomic);
+ ret = prng_lg_range_u32(state, lg_range);
} while (ret >= range);
return ret;
@@ -148,15 +125,18 @@ prng_range_u32(atomic_u32_t *state, uint32_t range, bool atomic) {
JEMALLOC_ALWAYS_INLINE uint64_t
prng_range_u64(uint64_t *state, uint64_t range) {
- uint64_t ret;
- unsigned lg_range;
+ assert(range != 0);
- assert(range > 1);
+ /* See the note in prng_range_u32. */
+ if (range == 1) {
+ return 0;
+ }
/* Compute the ceiling of lg(range). */
- lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
+ unsigned lg_range = ffs_u64(pow2_ceil_u64(range));
/* Generate a result in [0..range) via repeated trial. */
+ uint64_t ret;
do {
ret = prng_lg_range_u64(state, lg_range);
} while (ret >= range);
@@ -165,18 +145,21 @@ prng_range_u64(uint64_t *state, uint64_t range) {
}
JEMALLOC_ALWAYS_INLINE size_t
-prng_range_zu(atomic_zu_t *state, size_t range, bool atomic) {
- size_t ret;
- unsigned lg_range;
+prng_range_zu(size_t *state, size_t range) {
+ assert(range != 0);
- assert(range > 1);
+ /* See the note in prng_range_u32. */
+ if (range == 1) {
+ return 0;
+ }
/* Compute the ceiling of lg(range). */
- lg_range = ffs_u64(pow2_ceil_u64(range)) - 1;
+ unsigned lg_range = ffs_u64(pow2_ceil_u64(range));
/* Generate a result in [0..range) via repeated trial. */
+ size_t ret;
do {
- ret = prng_lg_range_zu(state, lg_range, atomic);
+ ret = prng_lg_range_zu(state, lg_range);
} while (ret >= range);
return ret;
diff --git a/contrib/jemalloc/include/jemalloc/internal/prof_data.h b/contrib/jemalloc/include/jemalloc/internal/prof_data.h
new file mode 100644
index 000000000000..4c8e22c76f20
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/prof_data.h
@@ -0,0 +1,37 @@
+#ifndef JEMALLOC_INTERNAL_PROF_DATA_H
+#define JEMALLOC_INTERNAL_PROF_DATA_H
+
+#include "jemalloc/internal/mutex.h"
+
+extern malloc_mutex_t bt2gctx_mtx;
+extern malloc_mutex_t tdatas_mtx;
+extern malloc_mutex_t prof_dump_mtx;
+
+extern malloc_mutex_t *gctx_locks;
+extern malloc_mutex_t *tdata_locks;
+
+extern size_t prof_unbiased_sz[PROF_SC_NSIZES];
+extern size_t prof_shifted_unbiased_cnt[PROF_SC_NSIZES];
+
+void prof_bt_hash(const void *key, size_t r_hash[2]);
+bool prof_bt_keycomp(const void *k1, const void *k2);
+
+bool prof_data_init(tsd_t *tsd);
+prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt);
+char *prof_thread_name_alloc(tsd_t *tsd, const char *thread_name);
+int prof_thread_name_set_impl(tsd_t *tsd, const char *thread_name);
+void prof_unbias_map_init();
+void prof_dump_impl(tsd_t *tsd, write_cb_t *prof_dump_write, void *cbopaque,
+ prof_tdata_t *tdata, bool leakcheck);
+prof_tdata_t * prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid,
+ uint64_t thr_discrim, char *thread_name, bool active);
+void prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata);
+void prof_reset(tsd_t *tsd, size_t lg_sample);
+void prof_tctx_try_destroy(tsd_t *tsd, prof_tctx_t *tctx);
+
+/* Used in unit tests. */
+size_t prof_tdata_count(void);
+size_t prof_bt_count(void);
+void prof_cnt_all(prof_cnt_t *cnt_all);
+
+#endif /* JEMALLOC_INTERNAL_PROF_DATA_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/prof_externs.h b/contrib/jemalloc/include/jemalloc/internal/prof_externs.h
index 094f3e170ae7..bdff1349aeaf 100644
--- a/contrib/jemalloc/include/jemalloc/internal/prof_externs.h
+++ b/contrib/jemalloc/include/jemalloc/internal/prof_externs.h
@@ -2,75 +2,72 @@
#define JEMALLOC_INTERNAL_PROF_EXTERNS_H
#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/prof_hook.h"
-extern malloc_mutex_t bt2gctx_mtx;
-
-extern bool opt_prof;
-extern bool opt_prof_active;
-extern bool opt_prof_thread_active_init;
-extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
-extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
-extern bool opt_prof_gdump; /* High-water memory dumping. */
-extern bool opt_prof_final; /* Final profile dumping. */
-extern bool opt_prof_leak; /* Dump leak summary at exit. */
-extern bool opt_prof_accum; /* Report cumulative bytes. */
-extern bool opt_prof_log; /* Turn logging on at boot. */
-extern char opt_prof_prefix[
+extern bool opt_prof;
+extern bool opt_prof_active;
+extern bool opt_prof_thread_active_init;
+extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */
+extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */
+extern bool opt_prof_gdump; /* High-water memory dumping. */
+extern bool opt_prof_final; /* Final profile dumping. */
+extern bool opt_prof_leak; /* Dump leak summary at exit. */
+extern bool opt_prof_leak_error; /* Exit with error code if memory leaked */
+extern bool opt_prof_accum; /* Report cumulative bytes. */
+extern bool opt_prof_log; /* Turn logging on at boot. */
+extern char opt_prof_prefix[
/* Minimize memory bloat for non-prof builds. */
#ifdef JEMALLOC_PROF
PATH_MAX +
#endif
1];
+extern bool opt_prof_unbias;
+
+/* For recording recent allocations */
+extern ssize_t opt_prof_recent_alloc_max;
+
+/* Whether to use thread name provided by the system or by mallctl. */
+extern bool opt_prof_sys_thread_name;
+
+/* Whether to record per size class counts and request size totals. */
+extern bool opt_prof_stats;
/* Accessed via prof_active_[gs]et{_unlocked,}(). */
-extern bool prof_active;
+extern bool prof_active_state;
/* Accessed via prof_gdump_[gs]et{_unlocked,}(). */
-extern bool prof_gdump_val;
+extern bool prof_gdump_val;
-/*
- * Profile dump interval, measured in bytes allocated. Each arena triggers a
- * profile dump when it reaches this threshold. The effect is that the
- * interval between profile dumps averages prof_interval, though the actual
- * interval between dumps will tend to be sporadic, and the interval will be a
- * maximum of approximately (prof_interval * narenas).
- */
-extern uint64_t prof_interval;
+/* Profile dump interval, measured in bytes allocated. */
+extern uint64_t prof_interval;
/*
* Initialized as opt_lg_prof_sample, and potentially modified during profiling
* resets.
*/
-extern size_t lg_prof_sample;
-
-void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated);
-void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
- prof_tctx_t *tctx);
-void prof_free_sampled_object(tsd_t *tsd, const void *ptr, size_t usize,
- prof_tctx_t *tctx);
-void bt_init(prof_bt_t *bt, void **vec);
-void prof_backtrace(prof_bt_t *bt);
-prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt);
-#ifdef JEMALLOC_JET
-size_t prof_tdata_count(void);
-size_t prof_bt_count(void);
-#endif
-typedef int (prof_dump_open_t)(bool, const char *);
-extern prof_dump_open_t *JET_MUTABLE prof_dump_open;
-
-typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *);
-extern prof_dump_header_t *JET_MUTABLE prof_dump_header;
-#ifdef JEMALLOC_JET
-void prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs,
- uint64_t *accumbytes);
-#endif
-bool prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum);
+extern size_t lg_prof_sample;
+
+extern bool prof_booted;
+
+void prof_backtrace_hook_set(prof_backtrace_hook_t hook);
+prof_backtrace_hook_t prof_backtrace_hook_get();
+
+void prof_dump_hook_set(prof_dump_hook_t hook);
+prof_dump_hook_t prof_dump_hook_get();
+
+/* Functions only accessed in prof_inlines.h */
+prof_tdata_t *prof_tdata_init(tsd_t *tsd);
+prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
+
+void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx);
+void prof_malloc_sample_object(tsd_t *tsd, const void *ptr, size_t size,
+ size_t usize, prof_tctx_t *tctx);
+void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_info_t *prof_info);
+prof_tctx_t *prof_tctx_create(tsd_t *tsd);
void prof_idump(tsdn_t *tsdn);
bool prof_mdump(tsd_t *tsd, const char *filename);
void prof_gdump(tsdn_t *tsdn);
-prof_tdata_t *prof_tdata_init(tsd_t *tsd);
-prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
-void prof_reset(tsd_t *tsd, size_t lg_sample);
+
void prof_tdata_cleanup(tsd_t *tsd);
bool prof_active_get(tsdn_t *tsdn);
bool prof_active_set(tsdn_t *tsdn, bool active);
@@ -84,22 +81,15 @@ bool prof_gdump_get(tsdn_t *tsdn);
bool prof_gdump_set(tsdn_t *tsdn, bool active);
void prof_boot0(void);
void prof_boot1(void);
-bool prof_boot2(tsd_t *tsd);
+bool prof_boot2(tsd_t *tsd, base_t *base);
void prof_prefork0(tsdn_t *tsdn);
void prof_prefork1(tsdn_t *tsdn);
void prof_postfork_parent(tsdn_t *tsdn);
void prof_postfork_child(tsdn_t *tsdn);
-void prof_sample_threshold_update(prof_tdata_t *tdata);
-
-bool prof_log_start(tsdn_t *tsdn, const char *filename);
-bool prof_log_stop(tsdn_t *tsdn);
-#ifdef JEMALLOC_JET
-size_t prof_log_bt_count(void);
-size_t prof_log_alloc_count(void);
-size_t prof_log_thr_count(void);
-bool prof_log_is_logging(void);
-bool prof_log_rep_check(void);
-void prof_log_dummy_set(bool new_value);
-#endif
+
+/* Only accessed by thread event. */
+uint64_t prof_sample_new_event_wait(tsd_t *tsd);
+uint64_t prof_sample_postponed_event_wait(tsd_t *tsd);
+void prof_sample_event_handler(tsd_t *tsd, uint64_t elapsed);
#endif /* JEMALLOC_INTERNAL_PROF_EXTERNS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/prof_hook.h b/contrib/jemalloc/include/jemalloc/internal/prof_hook.h
new file mode 100644
index 000000000000..150d19d3d61c
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/prof_hook.h
@@ -0,0 +1,21 @@
+#ifndef JEMALLOC_INTERNAL_PROF_HOOK_H
+#define JEMALLOC_INTERNAL_PROF_HOOK_H
+
+/*
+ * The hooks types of which are declared in this file are experimental and
+ * undocumented, thus the typedefs are located in an 'internal' header.
+ */
+
+/*
+ * A hook to mock out backtrace functionality. This can be handy, since it's
+ * otherwise difficult to guarantee that two allocations are reported as coming
+ * from the exact same stack trace in the presence of an optimizing compiler.
+ */
+typedef void (*prof_backtrace_hook_t)(void **, unsigned *, unsigned);
+
+/*
+ * A callback hook that notifies about recently dumped heap profile.
+ */
+typedef void (*prof_dump_hook_t)(const char *filename);
+
+#endif /* JEMALLOC_INTERNAL_PROF_HOOK_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/prof_inlines.h b/contrib/jemalloc/include/jemalloc/internal/prof_inlines.h
new file mode 100644
index 000000000000..a8e7e7fb663e
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/prof_inlines.h
@@ -0,0 +1,261 @@
+#ifndef JEMALLOC_INTERNAL_PROF_INLINES_H
+#define JEMALLOC_INTERNAL_PROF_INLINES_H
+
+#include "jemalloc/internal/safety_check.h"
+#include "jemalloc/internal/sz.h"
+#include "jemalloc/internal/thread_event.h"
+
+JEMALLOC_ALWAYS_INLINE void
+prof_active_assert() {
+ cassert(config_prof);
+ /*
+ * If opt_prof is off, then prof_active must always be off, regardless
+ * of whether prof_active_mtx is in effect or not.
+ */
+ assert(opt_prof || !prof_active_state);
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+prof_active_get_unlocked(void) {
+ prof_active_assert();
+ /*
+ * Even if opt_prof is true, sampling can be temporarily disabled by
+ * setting prof_active to false. No locking is used when reading
+ * prof_active in the fast path, so there are no guarantees regarding
+ * how long it will take for all threads to notice state changes.
+ */
+ return prof_active_state;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+prof_gdump_get_unlocked(void) {
+ /*
+ * No locking is used when reading prof_gdump_val in the fast path, so
+ * there are no guarantees regarding how long it will take for all
+ * threads to notice state changes.
+ */
+ return prof_gdump_val;
+}
+
+JEMALLOC_ALWAYS_INLINE prof_tdata_t *
+prof_tdata_get(tsd_t *tsd, bool create) {
+ prof_tdata_t *tdata;
+
+ cassert(config_prof);
+
+ tdata = tsd_prof_tdata_get(tsd);
+ if (create) {
+ assert(tsd_reentrancy_level_get(tsd) == 0);
+ if (unlikely(tdata == NULL)) {
+ if (tsd_nominal(tsd)) {
+ tdata = prof_tdata_init(tsd);
+ tsd_prof_tdata_set(tsd, tdata);
+ }
+ } else if (unlikely(tdata->expired)) {
+ tdata = prof_tdata_reinit(tsd, tdata);
+ tsd_prof_tdata_set(tsd, tdata);
+ }
+ assert(tdata == NULL || tdata->attached);
+ }
+
+ return tdata;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+prof_info_get(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx,
+ prof_info_t *prof_info) {
+ cassert(config_prof);
+ assert(ptr != NULL);
+ assert(prof_info != NULL);
+
+ arena_prof_info_get(tsd, ptr, alloc_ctx, prof_info, false);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+prof_info_get_and_reset_recent(tsd_t *tsd, const void *ptr,
+ emap_alloc_ctx_t *alloc_ctx, prof_info_t *prof_info) {
+ cassert(config_prof);
+ assert(ptr != NULL);
+ assert(prof_info != NULL);
+
+ arena_prof_info_get(tsd, ptr, alloc_ctx, prof_info, true);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+prof_tctx_reset(tsd_t *tsd, const void *ptr, emap_alloc_ctx_t *alloc_ctx) {
+ cassert(config_prof);
+ assert(ptr != NULL);
+
+ arena_prof_tctx_reset(tsd, ptr, alloc_ctx);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+prof_tctx_reset_sampled(tsd_t *tsd, const void *ptr) {
+ cassert(config_prof);
+ assert(ptr != NULL);
+
+ arena_prof_tctx_reset_sampled(tsd, ptr);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+prof_info_set(tsd_t *tsd, edata_t *edata, prof_tctx_t *tctx, size_t size) {
+ cassert(config_prof);
+ assert(edata != NULL);
+ assert((uintptr_t)tctx > (uintptr_t)1U);
+
+ arena_prof_info_set(tsd, edata, tctx, size);
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+prof_sample_should_skip(tsd_t *tsd, bool sample_event) {
+ cassert(config_prof);
+
+ /* Fastpath: no need to load tdata */
+ if (likely(!sample_event)) {
+ return true;
+ }
+
+ /*
+ * sample_event is always obtained from the thread event module, and
+ * whenever it's true, it means that the thread event module has
+ * already checked the reentrancy level.
+ */
+ assert(tsd_reentrancy_level_get(tsd) == 0);
+
+ prof_tdata_t *tdata = prof_tdata_get(tsd, true);
+ if (unlikely(tdata == NULL)) {
+ return true;
+ }
+
+ return !tdata->active;
+}
+
+JEMALLOC_ALWAYS_INLINE prof_tctx_t *
+prof_alloc_prep(tsd_t *tsd, bool prof_active, bool sample_event) {
+ prof_tctx_t *ret;
+
+ if (!prof_active ||
+ likely(prof_sample_should_skip(tsd, sample_event))) {
+ ret = (prof_tctx_t *)(uintptr_t)1U;
+ } else {
+ ret = prof_tctx_create(tsd);
+ }
+
+ return ret;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+prof_malloc(tsd_t *tsd, const void *ptr, size_t size, size_t usize,
+ emap_alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
+ cassert(config_prof);
+ assert(ptr != NULL);
+ assert(usize == isalloc(tsd_tsdn(tsd), ptr));
+
+ if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
+ prof_malloc_sample_object(tsd, ptr, size, usize, tctx);
+ } else {
+ prof_tctx_reset(tsd, ptr, alloc_ctx);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE void
+prof_realloc(tsd_t *tsd, const void *ptr, size_t size, size_t usize,
+ prof_tctx_t *tctx, bool prof_active, const void *old_ptr, size_t old_usize,
+ prof_info_t *old_prof_info, bool sample_event) {
+ bool sampled, old_sampled, moved;
+
+ cassert(config_prof);
+ assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
+
+ if (prof_active && ptr != NULL) {
+ assert(usize == isalloc(tsd_tsdn(tsd), ptr));
+ if (prof_sample_should_skip(tsd, sample_event)) {
+ /*
+ * Don't sample. The usize passed to prof_alloc_prep()
+ * was larger than what actually got allocated, so a
+ * backtrace was captured for this allocation, even
+ * though its actual usize was insufficient to cross the
+ * sample threshold.
+ */
+ prof_alloc_rollback(tsd, tctx);
+ tctx = (prof_tctx_t *)(uintptr_t)1U;
+ }
+ }
+
+ sampled = ((uintptr_t)tctx > (uintptr_t)1U);
+ old_sampled = ((uintptr_t)old_prof_info->alloc_tctx > (uintptr_t)1U);
+ moved = (ptr != old_ptr);
+
+ if (unlikely(sampled)) {
+ prof_malloc_sample_object(tsd, ptr, size, usize, tctx);
+ } else if (moved) {
+ prof_tctx_reset(tsd, ptr, NULL);
+ } else if (unlikely(old_sampled)) {
+ /*
+ * prof_tctx_reset() would work for the !moved case as well,
+ * but prof_tctx_reset_sampled() is slightly cheaper, and the
+ * proper thing to do here in the presence of explicit
+ * knowledge re: moved state.
+ */
+ prof_tctx_reset_sampled(tsd, ptr);
+ } else {
+ prof_info_t prof_info;
+ prof_info_get(tsd, ptr, NULL, &prof_info);
+ assert((uintptr_t)prof_info.alloc_tctx == (uintptr_t)1U);
+ }
+
+ /*
+ * The prof_free_sampled_object() call must come after the
+ * prof_malloc_sample_object() call, because tctx and old_tctx may be
+ * the same, in which case reversing the call order could cause the tctx
+ * to be prematurely destroyed as a side effect of momentarily zeroed
+ * counters.
+ */
+ if (unlikely(old_sampled)) {
+ prof_free_sampled_object(tsd, old_usize, old_prof_info);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
+prof_sample_align(size_t orig_align) {
+ /*
+ * Enforce page alignment, so that sampled allocations can be identified
+ * w/o metadata lookup.
+ */
+ assert(opt_prof);
+ return (opt_cache_oblivious && orig_align < PAGE) ? PAGE :
+ orig_align;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+prof_sample_aligned(const void *ptr) {
+ return ((uintptr_t)ptr & PAGE_MASK) == 0;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+prof_sampled(tsd_t *tsd, const void *ptr) {
+ prof_info_t prof_info;
+ prof_info_get(tsd, ptr, NULL, &prof_info);
+ bool sampled = (uintptr_t)prof_info.alloc_tctx > (uintptr_t)1U;
+ if (sampled) {
+ assert(prof_sample_aligned(ptr));
+ }
+ return sampled;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+prof_free(tsd_t *tsd, const void *ptr, size_t usize,
+ emap_alloc_ctx_t *alloc_ctx) {
+ prof_info_t prof_info;
+ prof_info_get_and_reset_recent(tsd, ptr, alloc_ctx, &prof_info);
+
+ cassert(config_prof);
+ assert(usize == isalloc(tsd_tsdn(tsd), ptr));
+
+ if (unlikely((uintptr_t)prof_info.alloc_tctx > (uintptr_t)1U)) {
+ assert(prof_sample_aligned(ptr));
+ prof_free_sampled_object(tsd, usize, &prof_info);
+ }
+}
+
+#endif /* JEMALLOC_INTERNAL_PROF_INLINES_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/prof_inlines_a.h b/contrib/jemalloc/include/jemalloc/internal/prof_inlines_a.h
deleted file mode 100644
index 471d9853cf87..000000000000
--- a/contrib/jemalloc/include/jemalloc/internal/prof_inlines_a.h
+++ /dev/null
@@ -1,85 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_PROF_INLINES_A_H
-#define JEMALLOC_INTERNAL_PROF_INLINES_A_H
-
-#include "jemalloc/internal/mutex.h"
-
-static inline bool
-prof_accum_add(tsdn_t *tsdn, prof_accum_t *prof_accum,
- uint64_t accumbytes) {
- cassert(config_prof);
-
- bool overflow;
- uint64_t a0, a1;
-
- /*
- * If the application allocates fast enough (and/or if idump is slow
- * enough), extreme overflow here (a1 >= prof_interval * 2) can cause
- * idump trigger coalescing. This is an intentional mechanism that
- * avoids rate-limiting allocation.
- */
-#ifdef JEMALLOC_ATOMIC_U64
- a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED);
- do {
- a1 = a0 + accumbytes;
- assert(a1 >= a0);
- overflow = (a1 >= prof_interval);
- if (overflow) {
- a1 %= prof_interval;
- }
- } while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0,
- a1, ATOMIC_RELAXED, ATOMIC_RELAXED));
-#else
- malloc_mutex_lock(tsdn, &prof_accum->mtx);
- a0 = prof_accum->accumbytes;
- a1 = a0 + accumbytes;
- overflow = (a1 >= prof_interval);
- if (overflow) {
- a1 %= prof_interval;
- }
- prof_accum->accumbytes = a1;
- malloc_mutex_unlock(tsdn, &prof_accum->mtx);
-#endif
- return overflow;
-}
-
-static inline void
-prof_accum_cancel(tsdn_t *tsdn, prof_accum_t *prof_accum,
- size_t usize) {
- cassert(config_prof);
-
- /*
- * Cancel out as much of the excessive prof_accumbytes increase as
- * possible without underflowing. Interval-triggered dumps occur
- * slightly more often than intended as a result of incomplete
- * canceling.
- */
- uint64_t a0, a1;
-#ifdef JEMALLOC_ATOMIC_U64
- a0 = atomic_load_u64(&prof_accum->accumbytes, ATOMIC_RELAXED);
- do {
- a1 = (a0 >= SC_LARGE_MINCLASS - usize)
- ? a0 - (SC_LARGE_MINCLASS - usize) : 0;
- } while (!atomic_compare_exchange_weak_u64(&prof_accum->accumbytes, &a0,
- a1, ATOMIC_RELAXED, ATOMIC_RELAXED));
-#else
- malloc_mutex_lock(tsdn, &prof_accum->mtx);
- a0 = prof_accum->accumbytes;
- a1 = (a0 >= SC_LARGE_MINCLASS - usize)
- ? a0 - (SC_LARGE_MINCLASS - usize) : 0;
- prof_accum->accumbytes = a1;
- malloc_mutex_unlock(tsdn, &prof_accum->mtx);
-#endif
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-prof_active_get_unlocked(void) {
- /*
- * Even if opt_prof is true, sampling can be temporarily disabled by
- * setting prof_active to false. No locking is used when reading
- * prof_active in the fast path, so there are no guarantees regarding
- * how long it will take for all threads to notice state changes.
- */
- return prof_active;
-}
-
-#endif /* JEMALLOC_INTERNAL_PROF_INLINES_A_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/prof_inlines_b.h b/contrib/jemalloc/include/jemalloc/internal/prof_inlines_b.h
deleted file mode 100644
index 8ba8a1e1ffe7..000000000000
--- a/contrib/jemalloc/include/jemalloc/internal/prof_inlines_b.h
+++ /dev/null
@@ -1,250 +0,0 @@
-#ifndef JEMALLOC_INTERNAL_PROF_INLINES_B_H
-#define JEMALLOC_INTERNAL_PROF_INLINES_B_H
-
-#include "jemalloc/internal/safety_check.h"
-#include "jemalloc/internal/sz.h"
-
-JEMALLOC_ALWAYS_INLINE bool
-prof_gdump_get_unlocked(void) {
- /*
- * No locking is used when reading prof_gdump_val in the fast path, so
- * there are no guarantees regarding how long it will take for all
- * threads to notice state changes.
- */
- return prof_gdump_val;
-}
-
-JEMALLOC_ALWAYS_INLINE prof_tdata_t *
-prof_tdata_get(tsd_t *tsd, bool create) {
- prof_tdata_t *tdata;
-
- cassert(config_prof);
-
- tdata = tsd_prof_tdata_get(tsd);
- if (create) {
- if (unlikely(tdata == NULL)) {
- if (tsd_nominal(tsd)) {
- tdata = prof_tdata_init(tsd);
- tsd_prof_tdata_set(tsd, tdata);
- }
- } else if (unlikely(tdata->expired)) {
- tdata = prof_tdata_reinit(tsd, tdata);
- tsd_prof_tdata_set(tsd, tdata);
- }
- assert(tdata == NULL || tdata->attached);
- }
-
- return tdata;
-}
-
-JEMALLOC_ALWAYS_INLINE prof_tctx_t *
-prof_tctx_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
- cassert(config_prof);
- assert(ptr != NULL);
-
- return arena_prof_tctx_get(tsdn, ptr, alloc_ctx);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
- alloc_ctx_t *alloc_ctx, prof_tctx_t *tctx) {
- cassert(config_prof);
- assert(ptr != NULL);
-
- arena_prof_tctx_set(tsdn, ptr, usize, alloc_ctx, tctx);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-prof_tctx_reset(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) {
- cassert(config_prof);
- assert(ptr != NULL);
-
- arena_prof_tctx_reset(tsdn, ptr, tctx);
-}
-
-JEMALLOC_ALWAYS_INLINE nstime_t
-prof_alloc_time_get(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx) {
- cassert(config_prof);
- assert(ptr != NULL);
-
- return arena_prof_alloc_time_get(tsdn, ptr, alloc_ctx);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-prof_alloc_time_set(tsdn_t *tsdn, const void *ptr, alloc_ctx_t *alloc_ctx,
- nstime_t t) {
- cassert(config_prof);
- assert(ptr != NULL);
-
- arena_prof_alloc_time_set(tsdn, ptr, alloc_ctx, t);
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-prof_sample_check(tsd_t *tsd, size_t usize, bool update) {
- ssize_t check = update ? 0 : usize;
-
- int64_t bytes_until_sample = tsd_bytes_until_sample_get(tsd);
- if (update) {
- bytes_until_sample -= usize;
- if (tsd_nominal(tsd)) {
- tsd_bytes_until_sample_set(tsd, bytes_until_sample);
- }
- }
- if (likely(bytes_until_sample >= check)) {
- return true;
- }
-
- return false;
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update,
- prof_tdata_t **tdata_out) {
- prof_tdata_t *tdata;
-
- cassert(config_prof);
-
- /* Fastpath: no need to load tdata */
- if (likely(prof_sample_check(tsd, usize, update))) {
- return true;
- }
-
- bool booted = tsd_prof_tdata_get(tsd);
- tdata = prof_tdata_get(tsd, true);
- if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)) {
- tdata = NULL;
- }
-
- if (tdata_out != NULL) {
- *tdata_out = tdata;
- }
-
- if (unlikely(tdata == NULL)) {
- return true;
- }
-
- /*
- * If this was the first creation of tdata, then
- * prof_tdata_get() reset bytes_until_sample, so decrement and
- * check it again
- */
- if (!booted && prof_sample_check(tsd, usize, update)) {
- return true;
- }
-
- if (tsd_reentrancy_level_get(tsd) > 0) {
- return true;
- }
- /* Compute new sample threshold. */
- if (update) {
- prof_sample_threshold_update(tdata);
- }
- return !tdata->active;
-}
-
-JEMALLOC_ALWAYS_INLINE prof_tctx_t *
-prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) {
- prof_tctx_t *ret;
- prof_tdata_t *tdata;
- prof_bt_t bt;
-
- assert(usize == sz_s2u(usize));
-
- if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update,
- &tdata))) {
- ret = (prof_tctx_t *)(uintptr_t)1U;
- } else {
- bt_init(&bt, tdata->vec);
- prof_backtrace(&bt);
- ret = prof_lookup(tsd, &bt);
- }
-
- return ret;
-}
-
-JEMALLOC_ALWAYS_INLINE void
-prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx,
- prof_tctx_t *tctx) {
- cassert(config_prof);
- assert(ptr != NULL);
- assert(usize == isalloc(tsdn, ptr));
-
- if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
- prof_malloc_sample_object(tsdn, ptr, usize, tctx);
- } else {
- prof_tctx_set(tsdn, ptr, usize, alloc_ctx,
- (prof_tctx_t *)(uintptr_t)1U);
- }
-}
-
-JEMALLOC_ALWAYS_INLINE void
-prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
- bool prof_active, bool updated, const void *old_ptr, size_t old_usize,
- prof_tctx_t *old_tctx) {
- bool sampled, old_sampled, moved;
-
- cassert(config_prof);
- assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
-
- if (prof_active && !updated && ptr != NULL) {
- assert(usize == isalloc(tsd_tsdn(tsd), ptr));
- if (prof_sample_accum_update(tsd, usize, true, NULL)) {
- /*
- * Don't sample. The usize passed to prof_alloc_prep()
- * was larger than what actually got allocated, so a
- * backtrace was captured for this allocation, even
- * though its actual usize was insufficient to cross the
- * sample threshold.
- */
- prof_alloc_rollback(tsd, tctx, true);
- tctx = (prof_tctx_t *)(uintptr_t)1U;
- }
- }
-
- sampled = ((uintptr_t)tctx > (uintptr_t)1U);
- old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
- moved = (ptr != old_ptr);
-
- if (unlikely(sampled)) {
- prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx);
- } else if (moved) {
- prof_tctx_set(tsd_tsdn(tsd), ptr, usize, NULL,
- (prof_tctx_t *)(uintptr_t)1U);
- } else if (unlikely(old_sampled)) {
- /*
- * prof_tctx_set() would work for the !moved case as well, but
- * prof_tctx_reset() is slightly cheaper, and the proper thing
- * to do here in the presence of explicit knowledge re: moved
- * state.
- */
- prof_tctx_reset(tsd_tsdn(tsd), ptr, tctx);
- } else {
- assert((uintptr_t)prof_tctx_get(tsd_tsdn(tsd), ptr, NULL) ==
- (uintptr_t)1U);
- }
-
- /*
- * The prof_free_sampled_object() call must come after the
- * prof_malloc_sample_object() call, because tctx and old_tctx may be
- * the same, in which case reversing the call order could cause the tctx
- * to be prematurely destroyed as a side effect of momentarily zeroed
- * counters.
- */
- if (unlikely(old_sampled)) {
- prof_free_sampled_object(tsd, ptr, old_usize, old_tctx);
- }
-}
-
-JEMALLOC_ALWAYS_INLINE void
-prof_free(tsd_t *tsd, const void *ptr, size_t usize, alloc_ctx_t *alloc_ctx) {
- prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx);
-
- cassert(config_prof);
- assert(usize == isalloc(tsd_tsdn(tsd), ptr));
-
- if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) {
- prof_free_sampled_object(tsd, ptr, usize, tctx);
- }
-}
-
-#endif /* JEMALLOC_INTERNAL_PROF_INLINES_B_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/prof_log.h b/contrib/jemalloc/include/jemalloc/internal/prof_log.h
new file mode 100644
index 000000000000..ccb557dde69f
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/prof_log.h
@@ -0,0 +1,22 @@
+#ifndef JEMALLOC_INTERNAL_PROF_LOG_H
+#define JEMALLOC_INTERNAL_PROF_LOG_H
+
+#include "jemalloc/internal/mutex.h"
+
+extern malloc_mutex_t log_mtx;
+
+void prof_try_log(tsd_t *tsd, size_t usize, prof_info_t *prof_info);
+bool prof_log_init(tsd_t *tsdn);
+
+/* Used in unit tests. */
+size_t prof_log_bt_count(void);
+size_t prof_log_alloc_count(void);
+size_t prof_log_thr_count(void);
+bool prof_log_is_logging(void);
+bool prof_log_rep_check(void);
+void prof_log_dummy_set(bool new_value);
+
+bool prof_log_start(tsdn_t *tsdn, const char *filename);
+bool prof_log_stop(tsdn_t *tsdn);
+
+#endif /* JEMALLOC_INTERNAL_PROF_LOG_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/prof_recent.h b/contrib/jemalloc/include/jemalloc/internal/prof_recent.h
new file mode 100644
index 000000000000..df4102362699
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/prof_recent.h
@@ -0,0 +1,23 @@
+#ifndef JEMALLOC_INTERNAL_PROF_RECENT_H
+#define JEMALLOC_INTERNAL_PROF_RECENT_H
+
+extern malloc_mutex_t prof_recent_alloc_mtx;
+extern malloc_mutex_t prof_recent_dump_mtx;
+
+bool prof_recent_alloc_prepare(tsd_t *tsd, prof_tctx_t *tctx);
+void prof_recent_alloc(tsd_t *tsd, edata_t *edata, size_t size, size_t usize);
+void prof_recent_alloc_reset(tsd_t *tsd, edata_t *edata);
+bool prof_recent_init();
+void edata_prof_recent_alloc_init(edata_t *edata);
+
+/* Used in unit tests. */
+typedef ql_head(prof_recent_t) prof_recent_list_t;
+extern prof_recent_list_t prof_recent_alloc_list;
+edata_t *prof_recent_alloc_edata_get_no_lock_test(const prof_recent_t *node);
+prof_recent_t *edata_prof_recent_alloc_get_no_lock_test(const edata_t *edata);
+
+ssize_t prof_recent_alloc_max_ctl_read();
+ssize_t prof_recent_alloc_max_ctl_write(tsd_t *tsd, ssize_t max);
+void prof_recent_alloc_dump(tsd_t *tsd, write_cb_t *write_cb, void *cbopaque);
+
+#endif /* JEMALLOC_INTERNAL_PROF_RECENT_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/prof_stats.h b/contrib/jemalloc/include/jemalloc/internal/prof_stats.h
new file mode 100644
index 000000000000..7954e82de796
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/prof_stats.h
@@ -0,0 +1,17 @@
+#ifndef JEMALLOC_INTERNAL_PROF_STATS_H
+#define JEMALLOC_INTERNAL_PROF_STATS_H
+
+typedef struct prof_stats_s prof_stats_t;
+struct prof_stats_s {
+ uint64_t req_sum;
+ uint64_t count;
+};
+
+extern malloc_mutex_t prof_stats_mtx;
+
+void prof_stats_inc(tsd_t *tsd, szind_t ind, size_t size);
+void prof_stats_dec(tsd_t *tsd, szind_t ind, size_t size);
+void prof_stats_get_live(tsd_t *tsd, szind_t ind, prof_stats_t *stats);
+void prof_stats_get_accum(tsd_t *tsd, szind_t ind, prof_stats_t *stats);
+
+#endif /* JEMALLOC_INTERNAL_PROF_STATS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/prof_structs.h b/contrib/jemalloc/include/jemalloc/internal/prof_structs.h
index 34ed4822b672..dd22115f6222 100644
--- a/contrib/jemalloc/include/jemalloc/internal/prof_structs.h
+++ b/contrib/jemalloc/include/jemalloc/internal/prof_structs.h
@@ -2,6 +2,7 @@
#define JEMALLOC_INTERNAL_PROF_STRUCTS_H
#include "jemalloc/internal/ckh.h"
+#include "jemalloc/internal/edata.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/rb.h"
@@ -15,26 +16,22 @@ struct prof_bt_s {
#ifdef JEMALLOC_PROF_LIBGCC
/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */
typedef struct {
- prof_bt_t *bt;
+ void **vec;
+ unsigned *len;
unsigned max;
} prof_unwind_data_t;
#endif
-struct prof_accum_s {
-#ifndef JEMALLOC_ATOMIC_U64
- malloc_mutex_t mtx;
- uint64_t accumbytes;
-#else
- atomic_u64_t accumbytes;
-#endif
-};
-
struct prof_cnt_s {
/* Profiling counters. */
uint64_t curobjs;
+ uint64_t curobjs_shifted_unbiased;
uint64_t curbytes;
+ uint64_t curbytes_unbiased;
uint64_t accumobjs;
+ uint64_t accumobjs_shifted_unbiased;
uint64_t accumbytes;
+ uint64_t accumbytes_unbiased;
};
typedef enum {
@@ -55,6 +52,12 @@ struct prof_tctx_s {
uint64_t thr_uid;
uint64_t thr_discrim;
+ /*
+ * Reference count of how many times this tctx object is referenced in
+ * recent allocation / deallocation records, protected by tdata->lock.
+ */
+ uint64_t recent_count;
+
/* Profiling counters, protected by tdata->lock. */
prof_cnt_t cnts;
@@ -96,6 +99,15 @@ struct prof_tctx_s {
};
typedef rb_tree(prof_tctx_t) prof_tctx_tree_t;
+struct prof_info_s {
+ /* Time when the allocation was made. */
+ nstime_t alloc_time;
+ /* Points to the prof_tctx_t corresponding to the allocation. */
+ prof_tctx_t *alloc_tctx;
+ /* Allocation request size. */
+ size_t alloc_size;
+};
+
struct prof_gctx_s {
/* Protects nlimbo, cnt_summed, and tctxs. */
malloc_mutex_t *lock;
@@ -167,9 +179,6 @@ struct prof_tdata_s {
*/
ckh_t bt2tctx;
- /* Sampling state. */
- uint64_t prng_state;
-
/* State used to avoid dumping while operating on prof internals. */
bool enq;
bool enq_idump;
@@ -197,4 +206,16 @@ struct prof_tdata_s {
};
typedef rb_tree(prof_tdata_t) prof_tdata_tree_t;
+struct prof_recent_s {
+ nstime_t alloc_time;
+ nstime_t dalloc_time;
+
+ ql_elm(prof_recent_t) link;
+ size_t size;
+ size_t usize;
+ atomic_p_t alloc_edata; /* NULL means allocation has been freed. */
+ prof_tctx_t *alloc_tctx;
+ prof_tctx_t *dalloc_tctx;
+};
+
#endif /* JEMALLOC_INTERNAL_PROF_STRUCTS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/prof_sys.h b/contrib/jemalloc/include/jemalloc/internal/prof_sys.h
new file mode 100644
index 000000000000..3d25a4295e28
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/prof_sys.h
@@ -0,0 +1,30 @@
+#ifndef JEMALLOC_INTERNAL_PROF_SYS_H
+#define JEMALLOC_INTERNAL_PROF_SYS_H
+
+extern malloc_mutex_t prof_dump_filename_mtx;
+extern base_t *prof_base;
+
+void bt_init(prof_bt_t *bt, void **vec);
+void prof_backtrace(tsd_t *tsd, prof_bt_t *bt);
+void prof_hooks_init();
+void prof_unwind_init();
+void prof_sys_thread_name_fetch(tsd_t *tsd);
+int prof_getpid(void);
+void prof_get_default_filename(tsdn_t *tsdn, char *filename, uint64_t ind);
+bool prof_prefix_set(tsdn_t *tsdn, const char *prefix);
+void prof_fdump_impl(tsd_t *tsd);
+void prof_idump_impl(tsd_t *tsd);
+bool prof_mdump_impl(tsd_t *tsd, const char *filename);
+void prof_gdump_impl(tsd_t *tsd);
+
+/* Used in unit tests. */
+typedef int (prof_sys_thread_name_read_t)(char *buf, size_t limit);
+extern prof_sys_thread_name_read_t *JET_MUTABLE prof_sys_thread_name_read;
+typedef int (prof_dump_open_file_t)(const char *, int);
+extern prof_dump_open_file_t *JET_MUTABLE prof_dump_open_file;
+typedef ssize_t (prof_dump_write_file_t)(int, const void *, size_t);
+extern prof_dump_write_file_t *JET_MUTABLE prof_dump_write_file;
+typedef int (prof_dump_open_maps_t)();
+extern prof_dump_open_maps_t *JET_MUTABLE prof_dump_open_maps;
+
+#endif /* JEMALLOC_INTERNAL_PROF_SYS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/prof_types.h b/contrib/jemalloc/include/jemalloc/internal/prof_types.h
index 1eff995ecf0f..ba6286548e04 100644
--- a/contrib/jemalloc/include/jemalloc/internal/prof_types.h
+++ b/contrib/jemalloc/include/jemalloc/internal/prof_types.h
@@ -2,11 +2,12 @@
#define JEMALLOC_INTERNAL_PROF_TYPES_H
typedef struct prof_bt_s prof_bt_t;
-typedef struct prof_accum_s prof_accum_t;
typedef struct prof_cnt_s prof_cnt_t;
typedef struct prof_tctx_s prof_tctx_t;
+typedef struct prof_info_s prof_info_t;
typedef struct prof_gctx_s prof_gctx_t;
typedef struct prof_tdata_s prof_tdata_t;
+typedef struct prof_recent_s prof_recent_t;
/* Option defaults. */
#ifdef JEMALLOC_PROF
@@ -28,7 +29,23 @@ typedef struct prof_tdata_s prof_tdata_t;
#define PROF_CKH_MINITEMS 64
/* Size of memory buffer to use when writing dump files. */
-#define PROF_DUMP_BUFSIZE 65536
+#ifndef JEMALLOC_PROF
+/* Minimize memory bloat for non-prof builds. */
+# define PROF_DUMP_BUFSIZE 1
+#elif defined(JEMALLOC_DEBUG)
+/* Use a small buffer size in debug build, mainly to facilitate testing. */
+# define PROF_DUMP_BUFSIZE 16
+#else
+# define PROF_DUMP_BUFSIZE 65536
+#endif
+
+/* Size of size class related tables */
+#ifdef JEMALLOC_PROF
+# define PROF_SC_NSIZES SC_NSIZES
+#else
+/* Minimize memory bloat for non-prof builds. */
+# define PROF_SC_NSIZES 1
+#endif
/* Size of stack-allocated buffer used by prof_printf(). */
#define PROF_PRINTF_BUFSIZE 128
@@ -45,12 +62,14 @@ typedef struct prof_tdata_s prof_tdata_t;
*/
#define PROF_NTDATA_LOCKS 256
-/*
- * prof_tdata pointers close to NULL are used to encode state information that
- * is used for cleaning up during thread shutdown.
- */
-#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1)
-#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2)
-#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY
+/* Minimize memory bloat for non-prof builds. */
+#ifdef JEMALLOC_PROF
+#define PROF_DUMP_FILENAME_LEN (PATH_MAX + 1)
+#else
+#define PROF_DUMP_FILENAME_LEN 1
+#endif
+
+/* Default number of recent allocations to record. */
+#define PROF_RECENT_ALLOC_MAX_DEFAULT 0
#endif /* JEMALLOC_INTERNAL_PROF_TYPES_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/psset.h b/contrib/jemalloc/include/jemalloc/internal/psset.h
new file mode 100644
index 000000000000..e1d64970ee14
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/psset.h
@@ -0,0 +1,131 @@
+#ifndef JEMALLOC_INTERNAL_PSSET_H
+#define JEMALLOC_INTERNAL_PSSET_H
+
+#include "jemalloc/internal/hpdata.h"
+
+/*
+ * A page-slab set. What the eset is to PAC, the psset is to HPA. It maintains
+ * a collection of page-slabs (the intent being that they are backed by
+ * hugepages, or at least could be), and handles allocation and deallocation
+ * requests.
+ */
+
+/*
+ * One more than the maximum pszind_t we will serve out of the HPA.
+ * Practically, we expect only the first few to be actually used. This
+ * corresponds to a maximum size of of 512MB on systems with 4k pages and
+ * SC_NGROUP == 4, which is already an unreasonably large maximum. Morally, you
+ * can think of this as being SC_NPSIZES, but there's no sense in wasting that
+ * much space in the arena, making bitmaps that much larger, etc.
+ */
+#define PSSET_NPSIZES 64
+
+/*
+ * We keep two purge lists per page size class; one for hugified hpdatas (at
+ * index 2*pszind), and one for the non-hugified hpdatas (at index 2*pszind +
+ * 1). This lets us implement a preference for purging non-hugified hpdatas
+ * among similarly-dirty ones.
+ * We reserve the last two indices for empty slabs, in that case purging
+ * hugified ones (which are definitionally all waste) before non-hugified ones
+ * (i.e. reversing the order).
+ */
+#define PSSET_NPURGE_LISTS (2 * PSSET_NPSIZES)
+
+typedef struct psset_bin_stats_s psset_bin_stats_t;
+struct psset_bin_stats_s {
+ /* How many pageslabs are in this bin? */
+ size_t npageslabs;
+ /* Of them, how many pages are active? */
+ size_t nactive;
+ /* And how many are dirty? */
+ size_t ndirty;
+};
+
+typedef struct psset_stats_s psset_stats_t;
+struct psset_stats_s {
+ /*
+ * The second index is huge stats; nonfull_slabs[pszind][0] contains
+ * stats for the non-huge slabs in bucket pszind, while
+ * nonfull_slabs[pszind][1] contains stats for the huge slabs.
+ */
+ psset_bin_stats_t nonfull_slabs[PSSET_NPSIZES][2];
+
+ /*
+ * Full slabs don't live in any edata heap, but we still track their
+ * stats.
+ */
+ psset_bin_stats_t full_slabs[2];
+
+ /* Empty slabs are similar. */
+ psset_bin_stats_t empty_slabs[2];
+};
+
+typedef struct psset_s psset_t;
+struct psset_s {
+ /*
+ * The pageslabs, quantized by the size class of the largest contiguous
+ * free run of pages in a pageslab.
+ */
+ hpdata_age_heap_t pageslabs[PSSET_NPSIZES];
+ /* Bitmap for which set bits correspond to non-empty heaps. */
+ fb_group_t pageslab_bitmap[FB_NGROUPS(PSSET_NPSIZES)];
+ /*
+ * The sum of all bin stats in stats. This lets us quickly answer
+ * queries for the number of dirty, active, and retained pages in the
+ * entire set.
+ */
+ psset_bin_stats_t merged_stats;
+ psset_stats_t stats;
+ /*
+ * Slabs with no active allocations, but which are allowed to serve new
+ * allocations.
+ */
+ hpdata_empty_list_t empty;
+ /*
+ * Slabs which are available to be purged, ordered by how much we want
+ * to purge them (with later indices indicating slabs we want to purge
+ * more).
+ */
+ hpdata_purge_list_t to_purge[PSSET_NPURGE_LISTS];
+ /* Bitmap for which set bits correspond to non-empty purge lists. */
+ fb_group_t purge_bitmap[FB_NGROUPS(PSSET_NPURGE_LISTS)];
+ /* Slabs which are available to be hugified. */
+ hpdata_hugify_list_t to_hugify;
+};
+
+void psset_init(psset_t *psset);
+void psset_stats_accum(psset_stats_t *dst, psset_stats_t *src);
+
+/*
+ * Begin or end updating the given pageslab's metadata. While the pageslab is
+ * being updated, it won't be returned from psset_fit calls.
+ */
+void psset_update_begin(psset_t *psset, hpdata_t *ps);
+void psset_update_end(psset_t *psset, hpdata_t *ps);
+
+/* Analogous to the eset_fit; pick a hpdata to serve the request. */
+hpdata_t *psset_pick_alloc(psset_t *psset, size_t size);
+/* Pick one to purge. */
+hpdata_t *psset_pick_purge(psset_t *psset);
+/* Pick one to hugify. */
+hpdata_t *psset_pick_hugify(psset_t *psset);
+
+void psset_insert(psset_t *psset, hpdata_t *ps);
+void psset_remove(psset_t *psset, hpdata_t *ps);
+
+static inline size_t
+psset_npageslabs(psset_t *psset) {
+ return psset->merged_stats.npageslabs;
+}
+
+static inline size_t
+psset_nactive(psset_t *psset) {
+ return psset->merged_stats.nactive;
+}
+
+static inline size_t
+psset_ndirty(psset_t *psset) {
+ return psset->merged_stats.ndirty;
+}
+
+#endif /* JEMALLOC_INTERNAL_PSSET_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/public_namespace.sh b/contrib/jemalloc/include/jemalloc/internal/public_namespace.sh
new file mode 100755
index 000000000000..4d415ba01fa8
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/public_namespace.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+for nm in `cat $1` ; do
+ n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
+ echo "#define je_${n} JEMALLOC_N(${n})"
+done
diff --git a/contrib/jemalloc/include/jemalloc/internal/public_unnamespace.sh b/contrib/jemalloc/include/jemalloc/internal/public_unnamespace.sh
new file mode 100755
index 000000000000..4239d17754ca
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/public_unnamespace.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+for nm in `cat $1` ; do
+ n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
+ echo "#undef je_${n}"
+done
diff --git a/contrib/jemalloc/include/jemalloc/internal/ql.h b/contrib/jemalloc/include/jemalloc/internal/ql.h
index 802904077161..c7f52f862219 100644
--- a/contrib/jemalloc/include/jemalloc/internal/ql.h
+++ b/contrib/jemalloc/include/jemalloc/internal/ql.h
@@ -3,37 +3,85 @@
#include "jemalloc/internal/qr.h"
+/*
+ * A linked-list implementation.
+ *
+ * This is built on top of the ring implementation, but that can be viewed as an
+ * implementation detail (i.e. trying to advance past the tail of the list
+ * doesn't wrap around).
+ *
+ * You define a struct like so:
+ * typedef strucy my_s my_t;
+ * struct my_s {
+ * int data;
+ * ql_elm(my_t) my_link;
+ * };
+ *
+ * // We wobble between "list" and "head" for this type; we're now mostly
+ * // heading towards "list".
+ * typedef ql_head(my_t) my_list_t;
+ *
+ * You then pass a my_list_t * for a_head arguments, a my_t * for a_elm
+ * arguments, the token "my_link" for a_field arguments, and the token "my_t"
+ * for a_type arguments.
+ */
+
/* List definitions. */
#define ql_head(a_type) \
struct { \
a_type *qlh_first; \
}
+/* Static initializer for an empty list. */
#define ql_head_initializer(a_head) {NULL}
+/* The field definition. */
#define ql_elm(a_type) qr(a_type)
-/* List functions. */
+/* A pointer to the first element in the list, or NULL if the list is empty. */
+#define ql_first(a_head) ((a_head)->qlh_first)
+
+/* Dynamically initializes a list. */
#define ql_new(a_head) do { \
- (a_head)->qlh_first = NULL; \
+ ql_first(a_head) = NULL; \
} while (0)
-#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
+/*
+ * Sets dest to be the contents of src (overwriting any elements there), leaving
+ * src empty.
+ */
+#define ql_move(a_head_dest, a_head_src) do { \
+ ql_first(a_head_dest) = ql_first(a_head_src); \
+ ql_new(a_head_src); \
+} while (0)
-#define ql_first(a_head) ((a_head)->qlh_first)
+/* True if the list is empty, otherwise false. */
+#define ql_empty(a_head) (ql_first(a_head) == NULL)
+
+/*
+ * Initializes a ql_elm. Must be called even if the field is about to be
+ * overwritten.
+ */
+#define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field)
+/*
+ * Obtains the last item in the list.
+ */
#define ql_last(a_head, a_field) \
- ((ql_first(a_head) != NULL) \
- ? qr_prev(ql_first(a_head), a_field) : NULL)
+ (ql_empty(a_head) ? NULL : qr_prev(ql_first(a_head), a_field))
+/*
+ * Gets a pointer to the next/prev element in the list. Trying to advance past
+ * the end or retreat before the beginning of the list returns NULL.
+ */
#define ql_next(a_head, a_elm, a_field) \
((ql_last(a_head, a_field) != (a_elm)) \
? qr_next((a_elm), a_field) : NULL)
-
#define ql_prev(a_head, a_elm, a_field) \
((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \
: NULL)
+/* Inserts a_elm before a_qlelm in the list. */
#define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \
qr_before_insert((a_qlelm), (a_elm), a_field); \
if (ql_first(a_head) == (a_qlelm)) { \
@@ -41,23 +89,41 @@ struct { \
} \
} while (0)
+/* Inserts a_elm after a_qlelm in the list. */
#define ql_after_insert(a_qlelm, a_elm, a_field) \
qr_after_insert((a_qlelm), (a_elm), a_field)
+/* Inserts a_elm as the first item in the list. */
#define ql_head_insert(a_head, a_elm, a_field) do { \
- if (ql_first(a_head) != NULL) { \
+ if (!ql_empty(a_head)) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = (a_elm); \
} while (0)
+/* Inserts a_elm as the last item in the list. */
#define ql_tail_insert(a_head, a_elm, a_field) do { \
- if (ql_first(a_head) != NULL) { \
+ if (!ql_empty(a_head)) { \
qr_before_insert(ql_first(a_head), (a_elm), a_field); \
} \
ql_first(a_head) = qr_next((a_elm), a_field); \
} while (0)
+/*
+ * Given lists a = [a_1, ..., a_n] and [b_1, ..., b_n], results in:
+ * a = [a1, ..., a_n, b_1, ..., b_n] and b = [].
+ */
+#define ql_concat(a_head_a, a_head_b, a_field) do { \
+ if (ql_empty(a_head_a)) { \
+ ql_move(a_head_a, a_head_b); \
+ } else if (!ql_empty(a_head_b)) { \
+ qr_meld(ql_first(a_head_a), ql_first(a_head_b), \
+ a_field); \
+ ql_new(a_head_b); \
+ } \
+} while (0)
+
+/* Removes a_elm from the list. */
#define ql_remove(a_head, a_elm, a_field) do { \
if (ql_first(a_head) == (a_elm)) { \
ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
@@ -65,20 +131,63 @@ struct { \
if (ql_first(a_head) != (a_elm)) { \
qr_remove((a_elm), a_field); \
} else { \
- ql_first(a_head) = NULL; \
+ ql_new(a_head); \
} \
} while (0)
+/* Removes the first item in the list. */
#define ql_head_remove(a_head, a_type, a_field) do { \
a_type *t = ql_first(a_head); \
ql_remove((a_head), t, a_field); \
} while (0)
+/* Removes the last item in the list. */
#define ql_tail_remove(a_head, a_type, a_field) do { \
a_type *t = ql_last(a_head, a_field); \
ql_remove((a_head), t, a_field); \
} while (0)
+/*
+ * Given a = [a_1, a_2, ..., a_n-1, a_n, a_n+1, ...],
+ * ql_split(a, a_n, b, some_field) results in
+ * a = [a_1, a_2, ..., a_n-1]
+ * and replaces b's contents with:
+ * b = [a_n, a_n+1, ...]
+ */
+#define ql_split(a_head_a, a_elm, a_head_b, a_field) do { \
+ if (ql_first(a_head_a) == (a_elm)) { \
+ ql_move(a_head_b, a_head_a); \
+ } else { \
+ qr_split(ql_first(a_head_a), (a_elm), a_field); \
+ ql_first(a_head_b) = (a_elm); \
+ } \
+} while (0)
+
+/*
+ * An optimized version of:
+ * a_type *t = ql_first(a_head);
+ * ql_remove((a_head), t, a_field);
+ * ql_tail_insert((a_head), t, a_field);
+ */
+#define ql_rotate(a_head, a_field) do { \
+ ql_first(a_head) = qr_next(ql_first(a_head), a_field); \
+} while (0)
+
+/*
+ * Helper macro to iterate over each element in a list in order, starting from
+ * the head (or in reverse order, starting from the tail). The usage is
+ * (assuming my_t and my_list_t defined as above).
+ *
+ * int sum(my_list_t *list) {
+ * int sum = 0;
+ * my_t *iter;
+ * ql_foreach(iter, list, link) {
+ * sum += iter->data;
+ * }
+ * return sum;
+ * }
+ */
+
#define ql_foreach(a_var, a_head, a_field) \
qr_foreach((a_var), ql_first(a_head), a_field)
diff --git a/contrib/jemalloc/include/jemalloc/internal/qr.h b/contrib/jemalloc/include/jemalloc/internal/qr.h
index 1e1056b38685..ece4f5568600 100644
--- a/contrib/jemalloc/include/jemalloc/internal/qr.h
+++ b/contrib/jemalloc/include/jemalloc/internal/qr.h
@@ -1,6 +1,21 @@
#ifndef JEMALLOC_INTERNAL_QR_H
#define JEMALLOC_INTERNAL_QR_H
+/*
+ * A ring implementation based on an embedded circular doubly-linked list.
+ *
+ * You define your struct like so:
+ *
+ * typedef struct my_s my_t;
+ * struct my_s {
+ * int data;
+ * qr(my_t) my_link;
+ * };
+ *
+ * And then pass a my_t * into macros for a_qr arguments, and the token
+ * "my_link" into a_field fields.
+ */
+
/* Ring definitions. */
#define qr(a_type) \
struct { \
@@ -8,61 +23,114 @@ struct { \
a_type *qre_prev; \
}
-/* Ring functions. */
+/*
+ * Initialize a qr link. Every link must be initialized before being used, even
+ * if that initialization is going to be immediately overwritten (say, by being
+ * passed into an insertion macro).
+ */
#define qr_new(a_qr, a_field) do { \
(a_qr)->a_field.qre_next = (a_qr); \
(a_qr)->a_field.qre_prev = (a_qr); \
} while (0)
+/*
+ * Go forwards or backwards in the ring. Note that (the ring being circular), this
+ * always succeeds -- you just keep looping around and around the ring if you
+ * chase pointers without end.
+ */
#define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next)
-
#define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev)
-#define qr_before_insert(a_qrelm, a_qr, a_field) do { \
- (a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \
- (a_qr)->a_field.qre_next = (a_qrelm); \
- (a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \
- (a_qrelm)->a_field.qre_prev = (a_qr); \
+/*
+ * Given two rings:
+ * a -> a_1 -> ... -> a_n --
+ * ^ |
+ * |------------------------
+ *
+ * b -> b_1 -> ... -> b_n --
+ * ^ |
+ * |------------------------
+ *
+ * Results in the ring:
+ * a -> a_1 -> ... -> a_n -> b -> b_1 -> ... -> b_n --
+ * ^ |
+ * |-------------------------------------------------|
+ *
+ * a_qr_a can directly be a qr_next() macro, but a_qr_b cannot.
+ */
+#define qr_meld(a_qr_a, a_qr_b, a_field) do { \
+ (a_qr_b)->a_field.qre_prev->a_field.qre_next = \
+ (a_qr_a)->a_field.qre_prev; \
+ (a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \
+ (a_qr_b)->a_field.qre_prev = \
+ (a_qr_b)->a_field.qre_prev->a_field.qre_next; \
+ (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
+ (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
} while (0)
-#define qr_after_insert(a_qrelm, a_qr, a_field) do { \
- (a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \
- (a_qr)->a_field.qre_prev = (a_qrelm); \
- (a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \
- (a_qrelm)->a_field.qre_next = (a_qr); \
-} while (0)
+/*
+ * Logically, this is just a meld. The intent, though, is that a_qrelm is a
+ * single-element ring, so that "before" has a more obvious interpretation than
+ * meld.
+ */
+#define qr_before_insert(a_qrelm, a_qr, a_field) \
+ qr_meld((a_qrelm), (a_qr), a_field)
-#define qr_meld(a_qr_a, a_qr_b, a_type, a_field) do { \
- a_type *t; \
- (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \
- (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \
- t = (a_qr_a)->a_field.qre_prev; \
- (a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \
- (a_qr_b)->a_field.qre_prev = t; \
-} while (0)
+/* Ditto, but inserting after rather than before. */
+#define qr_after_insert(a_qrelm, a_qr, a_field) \
+ qr_before_insert(qr_next(a_qrelm, a_field), (a_qr), a_field)
/*
+ * Inverts meld; given the ring:
+ * a -> a_1 -> ... -> a_n -> b -> b_1 -> ... -> b_n --
+ * ^ |
+ * |-------------------------------------------------|
+ *
+ * Results in two rings:
+ * a -> a_1 -> ... -> a_n --
+ * ^ |
+ * |------------------------
+ *
+ * b -> b_1 -> ... -> b_n --
+ * ^ |
+ * |------------------------
+ *
* qr_meld() and qr_split() are functionally equivalent, so there's no need to
* have two copies of the code.
*/
-#define qr_split(a_qr_a, a_qr_b, a_type, a_field) \
- qr_meld((a_qr_a), (a_qr_b), a_type, a_field)
+#define qr_split(a_qr_a, a_qr_b, a_field) \
+ qr_meld((a_qr_a), (a_qr_b), a_field)
-#define qr_remove(a_qr, a_field) do { \
- (a_qr)->a_field.qre_prev->a_field.qre_next \
- = (a_qr)->a_field.qre_next; \
- (a_qr)->a_field.qre_next->a_field.qre_prev \
- = (a_qr)->a_field.qre_prev; \
- (a_qr)->a_field.qre_next = (a_qr); \
- (a_qr)->a_field.qre_prev = (a_qr); \
-} while (0)
+/*
+ * Splits off a_qr from the rest of its ring, so that it becomes a
+ * single-element ring.
+ */
+#define qr_remove(a_qr, a_field) \
+ qr_split(qr_next(a_qr, a_field), (a_qr), a_field)
+/*
+ * Helper macro to iterate over each element in a ring exactly once, starting
+ * with a_qr. The usage is (assuming my_t defined as above):
+ *
+ * int sum(my_t *item) {
+ * int sum = 0;
+ * my_t *iter;
+ * qr_foreach(iter, item, link) {
+ * sum += iter->data;
+ * }
+ * return sum;
+ * }
+ */
#define qr_foreach(var, a_qr, a_field) \
for ((var) = (a_qr); \
(var) != NULL; \
(var) = (((var)->a_field.qre_next != (a_qr)) \
? (var)->a_field.qre_next : NULL))
+/*
+ * The same (and with the same usage) as qr_foreach, but in the opposite order,
+ * ending with a_qr.
+ */
#define qr_reverse_foreach(var, a_qr, a_field) \
for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \
(var) != NULL; \
diff --git a/contrib/jemalloc/include/jemalloc/internal/quantum.h b/contrib/jemalloc/include/jemalloc/internal/quantum.h
index 821086e992cd..c22d753aa79f 100644
--- a/contrib/jemalloc/include/jemalloc/internal/quantum.h
+++ b/contrib/jemalloc/include/jemalloc/internal/quantum.h
@@ -30,11 +30,18 @@
# ifdef __hppa__
# define LG_QUANTUM 4
# endif
+# ifdef __loongarch__
+# define LG_QUANTUM 4
+# endif
# ifdef __m68k__
# define LG_QUANTUM 3
# endif
# ifdef __mips__
-# define LG_QUANTUM 3
+# if defined(__mips_n32) || defined(__mips_n64)
+# define LG_QUANTUM 4
+# else
+# define LG_QUANTUM 3
+# endif
# endif
# ifdef __nios2__
# define LG_QUANTUM 3
@@ -61,6 +68,9 @@
# ifdef __le32__
# define LG_QUANTUM 4
# endif
+# ifdef __arc__
+# define LG_QUANTUM 3
+# endif
# ifndef LG_QUANTUM
# error "Unknown minimum alignment for architecture; specify via "
"--with-lg-quantum"
diff --git a/contrib/jemalloc/include/jemalloc/internal/rb.h b/contrib/jemalloc/include/jemalloc/internal/rb.h
index 47fa5ca99bbe..a9a51cb68604 100644
--- a/contrib/jemalloc/include/jemalloc/internal/rb.h
+++ b/contrib/jemalloc/include/jemalloc/internal/rb.h
@@ -1,3 +1,6 @@
+#ifndef JEMALLOC_INTERNAL_RB_H
+#define JEMALLOC_INTERNAL_RB_H
+
/*-
*******************************************************************************
*
@@ -19,13 +22,19 @@
*******************************************************************************
*/
-#ifndef RB_H_
-#define RB_H_
-
#ifndef __PGI
#define RB_COMPACT
#endif
+/*
+ * Each node in the RB tree consumes at least 1 byte of space (for the linkage
+ * if nothing else, so there are a maximum of sizeof(void *) << 3 rb tree nodes
+ * in any process (and thus, at most sizeof(void *) << 3 nodes in any rb tree).
+ * The choice of algorithm bounds the depth of a tree to twice the binary log of
+ * the number of elements in the tree; the following bound follows.
+ */
+#define RB_MAX_DEPTH (sizeof(void *) << 4)
+
#ifdef RB_COMPACT
/* Node structure. */
#define rb_node(a_type) \
@@ -159,12 +168,22 @@ struct { \
rbtn_right_set(a_type, a_field, (r_node), (a_node)); \
} while (0)
+#define rb_summarized_only_false(...)
+#define rb_summarized_only_true(...) __VA_ARGS__
+#define rb_empty_summarize(a_node, a_lchild, a_rchild) false
+
/*
- * The rb_proto() macro generates function prototypes that correspond to the
- * functions generated by an equivalently parameterized call to rb_gen().
+ * The rb_proto() and rb_summarized_proto() macros generate function prototypes
+ * that correspond to the functions generated by an equivalently parameterized
+ * call to rb_gen() or rb_summarized_gen(), respectively.
*/
#define rb_proto(a_attr, a_prefix, a_rbt_type, a_type) \
+ rb_proto_impl(a_attr, a_prefix, a_rbt_type, a_type, false)
+#define rb_summarized_proto(a_attr, a_prefix, a_rbt_type, a_type) \
+ rb_proto_impl(a_attr, a_prefix, a_rbt_type, a_type, true)
+#define rb_proto_impl(a_attr, a_prefix, a_rbt_type, a_type, \
+ a_is_summarized) \
a_attr void \
a_prefix##new(a_rbt_type *rbtree); \
a_attr bool \
@@ -195,31 +214,94 @@ a_prefix##reverse_iter(a_rbt_type *rbtree, a_type *start, \
a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg); \
a_attr void \
a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
- void *arg);
+ void *arg); \
+/* Extended API */ \
+rb_summarized_only_##a_is_summarized( \
+a_attr void \
+a_prefix##update_summaries(a_rbt_type *rbtree, a_type *node); \
+a_attr bool \
+a_prefix##empty_filtered(a_rbt_type *rbtree, \
+ bool (*filter_node)(void *, a_type *), \
+ bool (*filter_subtree)(void *, a_type *), \
+ void *filter_ctx); \
+a_attr a_type * \
+a_prefix##first_filtered(a_rbt_type *rbtree, \
+ bool (*filter_node)(void *, a_type *), \
+ bool (*filter_subtree)(void *, a_type *), \
+ void *filter_ctx); \
+a_attr a_type * \
+a_prefix##last_filtered(a_rbt_type *rbtree, \
+ bool (*filter_node)(void *, a_type *), \
+ bool (*filter_subtree)(void *, a_type *), \
+ void *filter_ctx); \
+a_attr a_type * \
+a_prefix##next_filtered(a_rbt_type *rbtree, a_type *node, \
+ bool (*filter_node)(void *, a_type *), \
+ bool (*filter_subtree)(void *, a_type *), \
+ void *filter_ctx); \
+a_attr a_type * \
+a_prefix##prev_filtered(a_rbt_type *rbtree, a_type *node, \
+ bool (*filter_node)(void *, a_type *), \
+ bool (*filter_subtree)(void *, a_type *), \
+ void *filter_ctx); \
+a_attr a_type * \
+a_prefix##search_filtered(a_rbt_type *rbtree, const a_type *key, \
+ bool (*filter_node)(void *, a_type *), \
+ bool (*filter_subtree)(void *, a_type *), \
+ void *filter_ctx); \
+a_attr a_type * \
+a_prefix##nsearch_filtered(a_rbt_type *rbtree, const a_type *key, \
+ bool (*filter_node)(void *, a_type *), \
+ bool (*filter_subtree)(void *, a_type *), \
+ void *filter_ctx); \
+a_attr a_type * \
+a_prefix##psearch_filtered(a_rbt_type *rbtree, const a_type *key, \
+ bool (*filter_node)(void *, a_type *), \
+ bool (*filter_subtree)(void *, a_type *), \
+ void *filter_ctx); \
+a_attr a_type * \
+a_prefix##iter_filtered(a_rbt_type *rbtree, a_type *start, \
+ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \
+ bool (*filter_node)(void *, a_type *), \
+ bool (*filter_subtree)(void *, a_type *), \
+ void *filter_ctx); \
+a_attr a_type * \
+a_prefix##reverse_iter_filtered(a_rbt_type *rbtree, a_type *start, \
+ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \
+ bool (*filter_node)(void *, a_type *), \
+ bool (*filter_subtree)(void *, a_type *), \
+ void *filter_ctx); \
+)
/*
* The rb_gen() macro generates a type-specific red-black tree implementation,
* based on the above cpp macros.
- *
* Arguments:
*
- * a_attr : Function attribute for generated functions (ex: static).
- * a_prefix : Prefix for generated functions (ex: ex_).
- * a_rb_type : Type for red-black tree data structure (ex: ex_t).
- * a_type : Type for red-black tree node data structure (ex: ex_node_t).
- * a_field : Name of red-black tree node linkage (ex: ex_link).
- * a_cmp : Node comparison function name, with the following prototype:
- * int (a_cmp *)(a_type *a_node, a_type *a_other);
- * ^^^^^^
- * or a_key
- * Interpretation of comparison function return values:
- * -1 : a_node < a_other
- * 0 : a_node == a_other
- * 1 : a_node > a_other
- * In all cases, the a_node or a_key macro argument is the first
- * argument to the comparison function, which makes it possible
- * to write comparison functions that treat the first argument
- * specially.
+ * a_attr:
+ * Function attribute for generated functions (ex: static).
+ * a_prefix:
+ * Prefix for generated functions (ex: ex_).
+ * a_rb_type:
+ * Type for red-black tree data structure (ex: ex_t).
+ * a_type:
+ * Type for red-black tree node data structure (ex: ex_node_t).
+ * a_field:
+ * Name of red-black tree node linkage (ex: ex_link).
+ * a_cmp:
+ * Node comparison function name, with the following prototype:
+ *
+ * int a_cmp(a_type *a_node, a_type *a_other);
+ * ^^^^^^
+ * or a_key
+ * Interpretation of comparison function return values:
+ * -1 : a_node < a_other
+ * 0 : a_node == a_other
+ * 1 : a_node > a_other
+ * In all cases, the a_node or a_key macro argument is the first argument to
+ * the comparison function, which makes it possible to write comparison
+ * functions that treat the first argument specially. a_cmp must be a total
+ * order on values inserted into the tree -- duplicates are not allowed.
*
* Assuming the following setup:
*
@@ -338,8 +420,193 @@ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
* during iteration. There is no way to stop iteration once it
* has begun.
* arg : Opaque pointer passed to cb().
+ *
+ * The rb_summarized_gen() macro generates all the functions above, but has an
+ * expanded interface. In introduces the notion of summarizing subtrees, and of
+ * filtering searches in the tree according to the information contained in
+ * those summaries.
+ * The extra macro argument is:
+ * a_summarize:
+ * Tree summarization function name, with the following prototype:
+ *
+ * bool a_summarize(a_type *a_node, const a_type *a_left_child,
+ * const a_type *a_right_child);
+ *
+ * This function should update a_node with the summary of the subtree rooted
+ * there, using the data contained in it and the summaries in a_left_child
+ * and a_right_child. One or both of them may be NULL. When the tree
+ * changes due to an insertion or removal, it updates the summaries of all
+ * nodes whose subtrees have changed (always updating the summaries of
+ * children before their parents). If the user alters a node in the tree in
+ * a way that may change its summary, they can call the generated
+ * update_summaries function to bubble up the summary changes to the root.
+ * It should return true if the summary changed (or may have changed), and
+ * false if it didn't (which will allow the implementation to terminate
+ * "bubbling up" the summaries early).
+ * As the parameter names indicate, the children are ordered as they are in
+ * the tree, a_left_child, if it is not NULL, compares less than a_node,
+ * which in turn compares less than a_right_child (if a_right_child is not
+ * NULL).
+ *
+ * Using the same setup as above but replacing the macro with
+ * rb_summarized_gen(static, ex_, ex_t, ex_node_t, ex_link, ex_cmp,
+ * ex_summarize)
+ *
+ * Generates all the previous functions, but adds some more:
+ *
+ * static void
+ * ex_update_summaries(ex_t *tree, ex_node_t *node);
+ * Description: Recompute all summaries of ancestors of node.
+ * Args:
+ * tree: Pointer to an initialized red-black tree object.
+ * node: The element of the tree whose summary may have changed.
+ *
+ * For each of ex_empty, ex_first, ex_last, ex_next, ex_prev, ex_search,
+ * ex_nsearch, ex_psearch, ex_iter, and ex_reverse_iter, an additional function
+ * is generated as well, with the suffix _filtered (e.g. ex_empty_filtered,
+ * ex_first_filtered, etc.). These use the concept of a "filter"; a binary
+ * property some node either satisfies or does not satisfy. Clever use of the
+ * a_summary argument to rb_summarized_gen can allow efficient computation of
+ * these predicates across whole subtrees of the tree.
+ * The extended API functions accept three additional arguments after the
+ * arguments to the corresponding non-extended equivalent.
+ *
+ * ex_fn(..., bool (*filter_node)(void *, ex_node_t *),
+ * bool (*filter_subtree)(void *, ex_node_t *), void *filter_ctx);
+ * filter_node : Returns true if the node passes the filter.
+ * filter_subtree : Returns true if some node in the subtree rooted at
+ * node passes the filter.
+ * filter_ctx : A context argument passed to the filters.
+ *
+ * For a more concrete example of summarizing and filtering, suppose we're using
+ * the red-black tree to track a set of integers:
+ *
+ * struct ex_node_s {
+ * rb_node(ex_node_t) ex_link;
+ * unsigned data;
+ * };
+ *
+ * Suppose, for some application-specific reason, we want to be able to quickly
+ * find numbers in the set which are divisible by large powers of 2 (say, for
+ * aligned allocation purposes). We augment the node with a summary field:
+ *
+ * struct ex_node_s {
+ * rb_node(ex_node_t) ex_link;
+ * unsigned data;
+ * unsigned max_subtree_ffs;
+ * }
+ *
+ * and define our summarization function as follows:
+ *
+ * bool
+ * ex_summarize(ex_node_t *node, const ex_node_t *lchild,
+ * const ex_node_t *rchild) {
+ * unsigned new_max_subtree_ffs = ffs(node->data);
+ * if (lchild != NULL && lchild->max_subtree_ffs > new_max_subtree_ffs) {
+ * new_max_subtree_ffs = lchild->max_subtree_ffs;
+ * }
+ * if (rchild != NULL && rchild->max_subtree_ffs > new_max_subtree_ffs) {
+ * new_max_subtree_ffs = rchild->max_subtree_ffs;
+ * }
+ * bool changed = (node->max_subtree_ffs != new_max_subtree_ffs)
+ * node->max_subtree_ffs = new_max_subtree_ffs;
+ * // This could be "return true" without any correctness or big-O
+ * // performance changes; but practically, precisely reporting summary
+ * // changes reduces the amount of work that has to be done when "bubbling
+ * // up" summary changes.
+ * return changed;
+ * }
+ *
+ * We can now implement our filter functions as follows:
+ * bool
+ * ex_filter_node(void *filter_ctx, ex_node_t *node) {
+ * unsigned required_ffs = *(unsigned *)filter_ctx;
+ * return ffs(node->data) >= required_ffs;
+ * }
+ * bool
+ * ex_filter_subtree(void *filter_ctx, ex_node_t *node) {
+ * unsigned required_ffs = *(unsigned *)filter_ctx;
+ * return node->max_subtree_ffs >= required_ffs;
+ * }
+ *
+ * We can now easily search for, e.g., the smallest integer in the set that's
+ * divisible by 128:
+ * ex_node_t *
+ * find_div_128(ex_tree_t *tree) {
+ * unsigned min_ffs = 7;
+ * return ex_first_filtered(tree, &ex_filter_node, &ex_filter_subtree,
+ * &min_ffs);
+ * }
+ *
+ * We could with similar ease:
+ * - Fnd the next multiple of 128 in the set that's larger than 12345 (with
+ * ex_nsearch_filtered)
+ * - Iterate over just those multiples of 64 that are in the set (with
+ * ex_iter_filtered)
+ * - Determine if the set contains any multiples of 1024 (with
+ * ex_empty_filtered).
+ *
+ * Some possibly subtle API notes:
+ * - The node argument to ex_next_filtered and ex_prev_filtered need not pass
+ * the filter; it will find the next/prev node that passes the filter.
+ * - ex_search_filtered will fail even for a node in the tree, if that node does
+ * not pass the filter. ex_psearch_filtered and ex_nsearch_filtered behave
+ * similarly; they may return a node larger/smaller than the key, even if a
+ * node equivalent to the key is in the tree (but does not pass the filter).
+ * - Similarly, if the start argument to a filtered iteration function does not
+ * pass the filter, the callback won't be invoked on it.
+ *
+ * These should make sense after a moment's reflection; each post-condition is
+ * the same as with the unfiltered version, with the added constraint that the
+ * returned node must pass the filter.
*/
#define rb_gen(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp) \
+ rb_gen_impl(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp, \
+ rb_empty_summarize, false)
+#define rb_summarized_gen(a_attr, a_prefix, a_rbt_type, a_type, \
+ a_field, a_cmp, a_summarize) \
+ rb_gen_impl(a_attr, a_prefix, a_rbt_type, a_type, a_field, a_cmp, \
+ a_summarize, true)
+
+#define rb_gen_impl(a_attr, a_prefix, a_rbt_type, a_type, \
+ a_field, a_cmp, a_summarize, a_is_summarized) \
+typedef struct { \
+ a_type *node; \
+ int cmp; \
+} a_prefix##path_entry_t; \
+static inline void \
+a_prefix##summarize_range(a_prefix##path_entry_t *rfirst, \
+ a_prefix##path_entry_t *rlast) { \
+ while ((uintptr_t)rlast >= (uintptr_t)rfirst) { \
+ a_type *node = rlast->node; \
+ /* Avoid a warning when a_summarize is rb_empty_summarize. */ \
+ (void)node; \
+ bool changed = a_summarize(node, rbtn_left_get(a_type, a_field, \
+ node), rbtn_right_get(a_type, a_field, node)); \
+ if (!changed) { \
+ break; \
+ } \
+ rlast--; \
+ } \
+} \
+/* On the remove pathways, we sometimes swap the node being removed */\
+/* and its first successor; in such cases we need to do two range */\
+/* updates; one from the node to its (former) swapped successor, the */\
+/* next from that successor to the root (with either allowed to */\
+/* bail out early if appropriate. */\
+static inline void \
+a_prefix##summarize_swapped_range(a_prefix##path_entry_t *rfirst, \
+ a_prefix##path_entry_t *rlast, a_prefix##path_entry_t *swap_loc) { \
+ if (swap_loc == NULL || rlast <= swap_loc) { \
+ a_prefix##summarize_range(rfirst, rlast); \
+ } else { \
+ a_prefix##summarize_range(swap_loc + 1, rlast); \
+ (void)a_summarize(swap_loc->node, \
+ rbtn_left_get(a_type, a_field, swap_loc->node), \
+ rbtn_right_get(a_type, a_field, swap_loc->node)); \
+ a_prefix##summarize_range(rfirst, swap_loc - 1); \
+ } \
+} \
a_attr void \
a_prefix##new(a_rbt_type *rbtree) { \
rb_new(a_type, a_field, rbtree); \
@@ -465,10 +732,8 @@ a_prefix##psearch(a_rbt_type *rbtree, const a_type *key) { \
} \
a_attr void \
a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
- struct { \
- a_type *node; \
- int cmp; \
- } path[sizeof(void *) << 4], *pathp; \
+ a_prefix##path_entry_t path[RB_MAX_DEPTH]; \
+ a_prefix##path_entry_t *pathp; \
rbt_node_new(a_type, a_field, rbtree, node); \
/* Wind. */ \
path->node = rbtree->rbt_root; \
@@ -484,6 +749,13 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
} \
} \
pathp->node = node; \
+ /* A loop invariant we maintain is that all nodes with */\
+ /* out-of-date summaries live in path[0], path[1], ..., *pathp. */\
+ /* To maintain this, we have to summarize node, since we */\
+ /* decrement pathp before the first iteration. */\
+ assert(rbtn_left_get(a_type, a_field, node) == NULL); \
+ assert(rbtn_right_get(a_type, a_field, node) == NULL); \
+ (void)a_summarize(node, NULL, NULL); \
/* Unwind. */ \
for (pathp--; (uintptr_t)pathp >= (uintptr_t)path; pathp--) { \
a_type *cnode = pathp->node; \
@@ -498,9 +770,13 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
a_type *tnode; \
rbtn_black_set(a_type, a_field, leftleft); \
rbtn_rotate_right(a_type, a_field, cnode, tnode); \
+ (void)a_summarize(cnode, \
+ rbtn_left_get(a_type, a_field, cnode), \
+ rbtn_right_get(a_type, a_field, cnode)); \
cnode = tnode; \
} \
} else { \
+ a_prefix##summarize_range(path, pathp); \
return; \
} \
} else { \
@@ -521,13 +797,20 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
rbtn_rotate_left(a_type, a_field, cnode, tnode); \
rbtn_color_set(a_type, a_field, tnode, tred); \
rbtn_red_set(a_type, a_field, cnode); \
+ (void)a_summarize(cnode, \
+ rbtn_left_get(a_type, a_field, cnode), \
+ rbtn_right_get(a_type, a_field, cnode)); \
cnode = tnode; \
} \
} else { \
+ a_prefix##summarize_range(path, pathp); \
return; \
} \
} \
pathp->node = cnode; \
+ (void)a_summarize(cnode, \
+ rbtn_left_get(a_type, a_field, cnode), \
+ rbtn_right_get(a_type, a_field, cnode)); \
} \
/* Set root, and make it black. */ \
rbtree->rbt_root = path->node; \
@@ -535,12 +818,18 @@ a_prefix##insert(a_rbt_type *rbtree, a_type *node) { \
} \
a_attr void \
a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
- struct { \
- a_type *node; \
- int cmp; \
- } *pathp, *nodep, path[sizeof(void *) << 4]; \
+ a_prefix##path_entry_t path[RB_MAX_DEPTH]; \
+ a_prefix##path_entry_t *pathp; \
+ a_prefix##path_entry_t *nodep; \
+ a_prefix##path_entry_t *swap_loc; \
+ /* This is a "real" sentinel -- NULL means we didn't swap the */\
+ /* node to be pruned with one of its successors, and so */\
+ /* summarization can terminate early whenever some summary */\
+ /* doesn't change. */\
+ swap_loc = NULL; \
+ /* This is just to silence a compiler warning. */ \
+ nodep = NULL; \
/* Wind. */ \
- nodep = NULL; /* Silence compiler warning. */ \
path->node = rbtree->rbt_root; \
for (pathp = path; pathp->node != NULL; pathp++) { \
int cmp = pathp->cmp = a_cmp(node, pathp->node); \
@@ -567,6 +856,7 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
pathp--; \
if (pathp->node != node) { \
/* Swap node with its successor. */ \
+ swap_loc = nodep; \
bool tred = rbtn_red_get(a_type, a_field, pathp->node); \
rbtn_color_set(a_type, a_field, pathp->node, \
rbtn_red_get(a_type, a_field, node)); \
@@ -604,6 +894,9 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_black_set(a_type, a_field, left); \
if (pathp == path) { \
rbtree->rbt_root = left; \
+ /* Nothing to summarize -- the subtree rooted at the */\
+ /* node's left child hasn't changed, and it's now the */\
+ /* root. */\
} else { \
if (pathp[-1].cmp < 0) { \
rbtn_left_set(a_type, a_field, pathp[-1].node, \
@@ -612,6 +905,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
left); \
} \
+ a_prefix##summarize_swapped_range(path, &pathp[-1], \
+ swap_loc); \
} \
return; \
} else if (pathp == path) { \
@@ -620,10 +915,15 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
return; \
} \
} \
+ /* We've now established the invariant that the node has no right */\
+ /* child (well, morally; we didn't bother nulling it out if we */\
+ /* swapped it with its successor), and that the only nodes with */\
+ /* out-of-date summaries live in path[0], path[1], ..., pathp[-1].*/\
if (rbtn_red_get(a_type, a_field, pathp->node)) { \
/* Prune red node, which requires no fixup. */ \
assert(pathp[-1].cmp < 0); \
rbtn_left_set(a_type, a_field, pathp[-1].node, NULL); \
+ a_prefix##summarize_swapped_range(path, &pathp[-1], swap_loc); \
return; \
} \
/* The node to be pruned is black, so unwind until balance is */\
@@ -657,6 +957,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, pathp->node, tnode);\
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
+ (void)a_summarize(pathp->node, \
+ rbtn_left_get(a_type, a_field, pathp->node), \
+ rbtn_right_get(a_type, a_field, pathp->node)); \
+ (void)a_summarize(right, \
+ rbtn_left_get(a_type, a_field, right), \
+ rbtn_right_get(a_type, a_field, right)); \
} else { \
/* || */\
/* pathp(r) */\
@@ -667,7 +973,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* */\
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
+ (void)a_summarize(pathp->node, \
+ rbtn_left_get(a_type, a_field, pathp->node), \
+ rbtn_right_get(a_type, a_field, pathp->node)); \
} \
+ (void)a_summarize(tnode, rbtn_left_get(a_type, a_field, \
+ tnode), rbtn_right_get(a_type, a_field, tnode)); \
/* Balance restored, but rotation modified subtree */\
/* root. */\
assert((uintptr_t)pathp > (uintptr_t)path); \
@@ -678,6 +989,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
tnode); \
} \
+ a_prefix##summarize_swapped_range(path, &pathp[-1], \
+ swap_loc); \
return; \
} else { \
a_type *right = rbtn_right_get(a_type, a_field, \
@@ -698,6 +1011,15 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, pathp->node, tnode);\
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
+ (void)a_summarize(pathp->node, \
+ rbtn_left_get(a_type, a_field, pathp->node), \
+ rbtn_right_get(a_type, a_field, pathp->node)); \
+ (void)a_summarize(right, \
+ rbtn_left_get(a_type, a_field, right), \
+ rbtn_right_get(a_type, a_field, right)); \
+ (void)a_summarize(tnode, \
+ rbtn_left_get(a_type, a_field, tnode), \
+ rbtn_right_get(a_type, a_field, tnode)); \
/* Balance restored, but rotation modified */\
/* subtree root, which may actually be the tree */\
/* root. */\
@@ -712,6 +1034,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, \
pathp[-1].node, tnode); \
} \
+ a_prefix##summarize_swapped_range(path, \
+ &pathp[-1], swap_loc); \
} \
return; \
} else { \
@@ -725,6 +1049,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_red_set(a_type, a_field, pathp->node); \
rbtn_rotate_left(a_type, a_field, pathp->node, \
tnode); \
+ (void)a_summarize(pathp->node, \
+ rbtn_left_get(a_type, a_field, pathp->node), \
+ rbtn_right_get(a_type, a_field, pathp->node)); \
+ (void)a_summarize(tnode, \
+ rbtn_left_get(a_type, a_field, tnode), \
+ rbtn_right_get(a_type, a_field, tnode)); \
pathp->node = tnode; \
} \
} \
@@ -757,6 +1087,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
tnode); \
rbtn_right_set(a_type, a_field, unode, tnode); \
rbtn_rotate_left(a_type, a_field, unode, tnode); \
+ (void)a_summarize(pathp->node, \
+ rbtn_left_get(a_type, a_field, pathp->node), \
+ rbtn_right_get(a_type, a_field, pathp->node)); \
+ (void)a_summarize(unode, \
+ rbtn_left_get(a_type, a_field, unode), \
+ rbtn_right_get(a_type, a_field, unode)); \
} else { \
/* || */\
/* pathp(b) */\
@@ -771,7 +1107,13 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
rbtn_black_set(a_type, a_field, tnode); \
+ (void)a_summarize(pathp->node, \
+ rbtn_left_get(a_type, a_field, pathp->node), \
+ rbtn_right_get(a_type, a_field, pathp->node)); \
} \
+ (void)a_summarize(tnode, \
+ rbtn_left_get(a_type, a_field, tnode), \
+ rbtn_right_get(a_type, a_field, tnode)); \
/* Balance restored, but rotation modified subtree */\
/* root, which may actually be the tree root. */\
if (pathp == path) { \
@@ -785,6 +1127,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
tnode); \
} \
+ a_prefix##summarize_swapped_range(path, &pathp[-1], \
+ swap_loc); \
} \
return; \
} else if (rbtn_red_get(a_type, a_field, pathp->node)) { \
@@ -803,6 +1147,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_black_set(a_type, a_field, leftleft); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
+ (void)a_summarize(pathp->node, \
+ rbtn_left_get(a_type, a_field, pathp->node), \
+ rbtn_right_get(a_type, a_field, pathp->node)); \
+ (void)a_summarize(tnode, \
+ rbtn_left_get(a_type, a_field, tnode), \
+ rbtn_right_get(a_type, a_field, tnode)); \
/* Balance restored, but rotation modified */\
/* subtree root. */\
assert((uintptr_t)pathp > (uintptr_t)path); \
@@ -813,6 +1163,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, pathp[-1].node, \
tnode); \
} \
+ a_prefix##summarize_swapped_range(path, &pathp[-1], \
+ swap_loc); \
return; \
} else { \
/* || */\
@@ -824,6 +1176,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_red_set(a_type, a_field, left); \
rbtn_black_set(a_type, a_field, pathp->node); \
/* Balance restored. */ \
+ a_prefix##summarize_swapped_range(path, pathp, \
+ swap_loc); \
return; \
} \
} else { \
@@ -840,6 +1194,12 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_black_set(a_type, a_field, leftleft); \
rbtn_rotate_right(a_type, a_field, pathp->node, \
tnode); \
+ (void)a_summarize(pathp->node, \
+ rbtn_left_get(a_type, a_field, pathp->node), \
+ rbtn_right_get(a_type, a_field, pathp->node)); \
+ (void)a_summarize(tnode, \
+ rbtn_left_get(a_type, a_field, tnode), \
+ rbtn_right_get(a_type, a_field, tnode)); \
/* Balance restored, but rotation modified */\
/* subtree root, which may actually be the tree */\
/* root. */\
@@ -854,6 +1214,8 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
rbtn_right_set(a_type, a_field, \
pathp[-1].node, tnode); \
} \
+ a_prefix##summarize_swapped_range(path, \
+ &pathp[-1], swap_loc); \
} \
return; \
} else { \
@@ -864,6 +1226,9 @@ a_prefix##remove(a_rbt_type *rbtree, a_type *node) { \
/* / */\
/* (b) */\
rbtn_red_set(a_type, a_field, left); \
+ (void)a_summarize(pathp->node, \
+ rbtn_left_get(a_type, a_field, pathp->node), \
+ rbtn_right_get(a_type, a_field, pathp->node)); \
} \
} \
} \
@@ -1001,6 +1366,491 @@ a_prefix##destroy(a_rbt_type *rbtree, void (*cb)(a_type *, void *), \
void *arg) { \
a_prefix##destroy_recurse(rbtree, rbtree->rbt_root, cb, arg); \
rbtree->rbt_root = NULL; \
-}
+} \
+/* BEGIN SUMMARIZED-ONLY IMPLEMENTATION */ \
+rb_summarized_only_##a_is_summarized( \
+static inline a_prefix##path_entry_t * \
+a_prefix##wind(a_rbt_type *rbtree, \
+ a_prefix##path_entry_t path[RB_MAX_DEPTH], a_type *node) { \
+ a_prefix##path_entry_t *pathp; \
+ path->node = rbtree->rbt_root; \
+ for (pathp = path; ; pathp++) { \
+ assert((size_t)(pathp - path) < RB_MAX_DEPTH); \
+ pathp->cmp = a_cmp(node, pathp->node); \
+ if (pathp->cmp < 0) { \
+ pathp[1].node = rbtn_left_get(a_type, a_field, \
+ pathp->node); \
+ } else if (pathp->cmp == 0) { \
+ return pathp; \
+ } else { \
+ pathp[1].node = rbtn_right_get(a_type, a_field, \
+ pathp->node); \
+ } \
+ } \
+ unreachable(); \
+} \
+a_attr void \
+a_prefix##update_summaries(a_rbt_type *rbtree, a_type *node) { \
+ a_prefix##path_entry_t path[RB_MAX_DEPTH]; \
+ a_prefix##path_entry_t *pathp = a_prefix##wind(rbtree, path, node); \
+ a_prefix##summarize_range(path, pathp); \
+} \
+a_attr bool \
+a_prefix##empty_filtered(a_rbt_type *rbtree, \
+ bool (*filter_node)(void *, a_type *), \
+ bool (*filter_subtree)(void *, a_type *), \
+ void *filter_ctx) { \
+ a_type *node = rbtree->rbt_root; \
+ return node == NULL || !filter_subtree(filter_ctx, node); \
+} \
+static inline a_type * \
+a_prefix##first_filtered_from_node(a_type *node, \
+ bool (*filter_node)(void *, a_type *), \
+ bool (*filter_subtree)(void *, a_type *), \
+ void *filter_ctx) { \
+ assert(node != NULL && filter_subtree(filter_ctx, node)); \
+ while (true) { \
+ a_type *left = rbtn_left_get(a_type, a_field, node); \
+ a_type *right = rbtn_right_get(a_type, a_field, node); \
+ if (left != NULL && filter_subtree(filter_ctx, left)) { \
+ node = left; \
+ } else if (filter_node(filter_ctx, node)) { \
+ return node; \
+ } else { \
+ assert(right != NULL \
+ && filter_subtree(filter_ctx, right)); \
+ node = right; \
+ } \
+ } \
+ unreachable(); \
+} \
+a_attr a_type * \
+a_prefix##first_filtered(a_rbt_type *rbtree, \
+ bool (*filter_node)(void *, a_type *), \
+ bool (*filter_subtree)(void *, a_type *), \
+ void *filter_ctx) { \
+ a_type *node = rbtree->rbt_root; \
+ if (node == NULL || !filter_subtree(filter_ctx, node)) { \
+ return NULL; \
+ } \
+ return a_prefix##first_filtered_from_node(node, filter_node, \
+ filter_subtree, filter_ctx); \
+} \
+static inline a_type * \
+a_prefix##last_filtered_from_node(a_type *node, \
+ bool (*filter_node)(void *, a_type *), \
+ bool (*filter_subtree)(void *, a_type *), \
+ void *filter_ctx) { \
+ assert(node != NULL && filter_subtree(filter_ctx, node)); \
+ while (true) { \
+ a_type *left = rbtn_left_get(a_type, a_field, node); \
+ a_type *right = rbtn_right_get(a_type, a_field, node); \
+ if (right != NULL && filter_subtree(filter_ctx, right)) { \
+ node = right; \
+ } else if (filter_node(filter_ctx, node)) { \
+ return node; \
+ } else { \
+ assert(left != NULL \
+ && filter_subtree(filter_ctx, left)); \
+ node = left; \
+ } \
+ } \
+ unreachable(); \
+} \
+a_attr a_type * \
+a_prefix##last_filtered(a_rbt_type *rbtree, \
+ bool (*filter_node)(void *, a_type *), \
+ bool (*filter_subtree)(void *, a_type *), \
+ void *filter_ctx) { \
+ a_type *node = rbtree->rbt_root; \
+ if (node == NULL || !filter_subtree(filter_ctx, node)) { \
+ return NULL; \
+ } \
+ return a_prefix##last_filtered_from_node(node, filter_node, \
+ filter_subtree, filter_ctx); \
+} \
+/* Internal implementation function. Search for a node comparing */\
+/* equal to key matching the filter. If such a node is in the tree, */\
+/* return it. Additionally, the caller has the option to ask for */\
+/* bounds on the next / prev node in the tree passing the filter. */\
+/* If nextbound is true, then this function will do one of the */\
+/* following: */\
+/* - Fill in *nextbound_node with the smallest node in the tree */\
+/* greater than key passing the filter, and NULL-out */\
+/* *nextbound_subtree. */\
+/* - Fill in *nextbound_subtree with a parent of that node which is */\
+/* not a parent of the searched-for node, and NULL-out */\
+/* *nextbound_node. */\
+/* - NULL-out both *nextbound_node and *nextbound_subtree, in which */\
+/* case no node greater than key but passing the filter is in the */\
+/* tree. */\
+/* The prevbound case is similar. If the caller knows that key is in */\
+/* the tree and that the subtree rooted at key does not contain a */\
+/* node satisfying the bound being searched for, then they can pass */\
+/* false for include_subtree, in which case we won't bother searching */\
+/* there (risking a cache miss). */\
+/* */\
+/* This API is unfortunately complex; but the logic for filtered */\
+/* searches is very subtle, and otherwise we would have to repeat it */\
+/* multiple times for filtered search, nsearch, psearch, next, and */\
+/* prev. */\
+static inline a_type * \
+a_prefix##search_with_filter_bounds(a_rbt_type *rbtree, \
+ const a_type *key, \
+ bool (*filter_node)(void *, a_type *), \
+ bool (*filter_subtree)(void *, a_type *), \
+ void *filter_ctx, \
+ bool include_subtree, \
+ bool nextbound, a_type **nextbound_node, a_type **nextbound_subtree, \
+ bool prevbound, a_type **prevbound_node, a_type **prevbound_subtree) {\
+ if (nextbound) { \
+ *nextbound_node = NULL; \
+ *nextbound_subtree = NULL; \
+ } \
+ if (prevbound) { \
+ *prevbound_node = NULL; \
+ *prevbound_subtree = NULL; \
+ } \
+ a_type *tnode = rbtree->rbt_root; \
+ while (tnode != NULL && filter_subtree(filter_ctx, tnode)) { \
+ int cmp = a_cmp(key, tnode); \
+ a_type *tleft = rbtn_left_get(a_type, a_field, tnode); \
+ a_type *tright = rbtn_right_get(a_type, a_field, tnode); \
+ if (cmp < 0) { \
+ if (nextbound) { \
+ if (filter_node(filter_ctx, tnode)) { \
+ *nextbound_node = tnode; \
+ *nextbound_subtree = NULL; \
+ } else if (tright != NULL && filter_subtree( \
+ filter_ctx, tright)) { \
+ *nextbound_node = NULL; \
+ *nextbound_subtree = tright; \
+ } \
+ } \
+ tnode = tleft; \
+ } else if (cmp > 0) { \
+ if (prevbound) { \
+ if (filter_node(filter_ctx, tnode)) { \
+ *prevbound_node = tnode; \
+ *prevbound_subtree = NULL; \
+ } else if (tleft != NULL && filter_subtree( \
+ filter_ctx, tleft)) { \
+ *prevbound_node = NULL; \
+ *prevbound_subtree = tleft; \
+ } \
+ } \
+ tnode = tright; \
+ } else { \
+ if (filter_node(filter_ctx, tnode)) { \
+ return tnode; \
+ } \
+ if (include_subtree) { \
+ if (prevbound && tleft != NULL && filter_subtree( \
+ filter_ctx, tleft)) { \
+ *prevbound_node = NULL; \
+ *prevbound_subtree = tleft; \
+ } \
+ if (nextbound && tright != NULL && filter_subtree( \
+ filter_ctx, tright)) { \
+ *nextbound_node = NULL; \
+ *nextbound_subtree = tright; \
+ } \
+ } \
+ return NULL; \
+ } \
+ } \
+ return NULL; \
+} \
+a_attr a_type * \
+a_prefix##next_filtered(a_rbt_type *rbtree, a_type *node, \
+ bool (*filter_node)(void *, a_type *), \
+ bool (*filter_subtree)(void *, a_type *), \
+ void *filter_ctx) { \
+ a_type *nright = rbtn_right_get(a_type, a_field, node); \
+ if (nright != NULL && filter_subtree(filter_ctx, nright)) { \
+ return a_prefix##first_filtered_from_node(nright, filter_node, \
+ filter_subtree, filter_ctx); \
+ } \
+ a_type *node_candidate; \
+ a_type *subtree_candidate; \
+ a_type *search_result = a_prefix##search_with_filter_bounds( \
+ rbtree, node, filter_node, filter_subtree, filter_ctx, \
+ /* include_subtree */ false, \
+ /* nextbound */ true, &node_candidate, &subtree_candidate, \
+ /* prevbound */ false, NULL, NULL); \
+ assert(node == search_result \
+ || !filter_node(filter_ctx, node)); \
+ if (node_candidate != NULL) { \
+ return node_candidate; \
+ } \
+ if (subtree_candidate != NULL) { \
+ return a_prefix##first_filtered_from_node( \
+ subtree_candidate, filter_node, filter_subtree, \
+ filter_ctx); \
+ } \
+ return NULL; \
+} \
+a_attr a_type * \
+a_prefix##prev_filtered(a_rbt_type *rbtree, a_type *node, \
+ bool (*filter_node)(void *, a_type *), \
+ bool (*filter_subtree)(void *, a_type *), \
+ void *filter_ctx) { \
+ a_type *nleft = rbtn_left_get(a_type, a_field, node); \
+ if (nleft != NULL && filter_subtree(filter_ctx, nleft)) { \
+ return a_prefix##last_filtered_from_node(nleft, filter_node, \
+ filter_subtree, filter_ctx); \
+ } \
+ a_type *node_candidate; \
+ a_type *subtree_candidate; \
+ a_type *search_result = a_prefix##search_with_filter_bounds( \
+ rbtree, node, filter_node, filter_subtree, filter_ctx, \
+ /* include_subtree */ false, \
+ /* nextbound */ false, NULL, NULL, \
+ /* prevbound */ true, &node_candidate, &subtree_candidate); \
+ assert(node == search_result \
+ || !filter_node(filter_ctx, node)); \
+ if (node_candidate != NULL) { \
+ return node_candidate; \
+ } \
+ if (subtree_candidate != NULL) { \
+ return a_prefix##last_filtered_from_node( \
+ subtree_candidate, filter_node, filter_subtree, \
+ filter_ctx); \
+ } \
+ return NULL; \
+} \
+a_attr a_type * \
+a_prefix##search_filtered(a_rbt_type *rbtree, const a_type *key, \
+ bool (*filter_node)(void *, a_type *), \
+ bool (*filter_subtree)(void *, a_type *), \
+ void *filter_ctx) { \
+ a_type *result = a_prefix##search_with_filter_bounds(rbtree, key, \
+ filter_node, filter_subtree, filter_ctx, \
+ /* include_subtree */ false, \
+ /* nextbound */ false, NULL, NULL, \
+ /* prevbound */ false, NULL, NULL); \
+ return result; \
+} \
+a_attr a_type * \
+a_prefix##nsearch_filtered(a_rbt_type *rbtree, const a_type *key, \
+ bool (*filter_node)(void *, a_type *), \
+ bool (*filter_subtree)(void *, a_type *), \
+ void *filter_ctx) { \
+ a_type *node_candidate; \
+ a_type *subtree_candidate; \
+ a_type *result = a_prefix##search_with_filter_bounds(rbtree, key, \
+ filter_node, filter_subtree, filter_ctx, \
+ /* include_subtree */ true, \
+ /* nextbound */ true, &node_candidate, &subtree_candidate, \
+ /* prevbound */ false, NULL, NULL); \
+ if (result != NULL) { \
+ return result; \
+ } \
+ if (node_candidate != NULL) { \
+ return node_candidate; \
+ } \
+ if (subtree_candidate != NULL) { \
+ return a_prefix##first_filtered_from_node( \
+ subtree_candidate, filter_node, filter_subtree, \
+ filter_ctx); \
+ } \
+ return NULL; \
+} \
+a_attr a_type * \
+a_prefix##psearch_filtered(a_rbt_type *rbtree, const a_type *key, \
+ bool (*filter_node)(void *, a_type *), \
+ bool (*filter_subtree)(void *, a_type *), \
+ void *filter_ctx) { \
+ a_type *node_candidate; \
+ a_type *subtree_candidate; \
+ a_type *result = a_prefix##search_with_filter_bounds(rbtree, key, \
+ filter_node, filter_subtree, filter_ctx, \
+ /* include_subtree */ true, \
+ /* nextbound */ false, NULL, NULL, \
+ /* prevbound */ true, &node_candidate, &subtree_candidate); \
+ if (result != NULL) { \
+ return result; \
+ } \
+ if (node_candidate != NULL) { \
+ return node_candidate; \
+ } \
+ if (subtree_candidate != NULL) { \
+ return a_prefix##last_filtered_from_node( \
+ subtree_candidate, filter_node, filter_subtree, \
+ filter_ctx); \
+ } \
+ return NULL; \
+} \
+a_attr a_type * \
+a_prefix##iter_recurse_filtered(a_rbt_type *rbtree, a_type *node, \
+ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \
+ bool (*filter_node)(void *, a_type *), \
+ bool (*filter_subtree)(void *, a_type *), \
+ void *filter_ctx) { \
+ if (node == NULL || !filter_subtree(filter_ctx, node)) { \
+ return NULL; \
+ } \
+ a_type *ret; \
+ a_type *left = rbtn_left_get(a_type, a_field, node); \
+ a_type *right = rbtn_right_get(a_type, a_field, node); \
+ ret = a_prefix##iter_recurse_filtered(rbtree, left, cb, arg, \
+ filter_node, filter_subtree, filter_ctx); \
+ if (ret != NULL) { \
+ return ret; \
+ } \
+ if (filter_node(filter_ctx, node)) { \
+ ret = cb(rbtree, node, arg); \
+ } \
+ if (ret != NULL) { \
+ return ret; \
+ } \
+ return a_prefix##iter_recurse_filtered(rbtree, right, cb, arg, \
+ filter_node, filter_subtree, filter_ctx); \
+} \
+a_attr a_type * \
+a_prefix##iter_start_filtered(a_rbt_type *rbtree, a_type *start, \
+ a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \
+ void *arg, bool (*filter_node)(void *, a_type *), \
+ bool (*filter_subtree)(void *, a_type *), \
+ void *filter_ctx) { \
+ if (!filter_subtree(filter_ctx, node)) { \
+ return NULL; \
+ } \
+ int cmp = a_cmp(start, node); \
+ a_type *ret; \
+ a_type *left = rbtn_left_get(a_type, a_field, node); \
+ a_type *right = rbtn_right_get(a_type, a_field, node); \
+ if (cmp < 0) { \
+ ret = a_prefix##iter_start_filtered(rbtree, start, left, cb, \
+ arg, filter_node, filter_subtree, filter_ctx); \
+ if (ret != NULL) { \
+ return ret; \
+ } \
+ if (filter_node(filter_ctx, node)) { \
+ ret = cb(rbtree, node, arg); \
+ if (ret != NULL) { \
+ return ret; \
+ } \
+ } \
+ return a_prefix##iter_recurse_filtered(rbtree, right, cb, arg, \
+ filter_node, filter_subtree, filter_ctx); \
+ } else if (cmp > 0) { \
+ return a_prefix##iter_start_filtered(rbtree, start, right, \
+ cb, arg, filter_node, filter_subtree, filter_ctx); \
+ } else { \
+ if (filter_node(filter_ctx, node)) { \
+ ret = cb(rbtree, node, arg); \
+ if (ret != NULL) { \
+ return ret; \
+ } \
+ } \
+ return a_prefix##iter_recurse_filtered(rbtree, right, cb, arg, \
+ filter_node, filter_subtree, filter_ctx); \
+ } \
+} \
+a_attr a_type * \
+a_prefix##iter_filtered(a_rbt_type *rbtree, a_type *start, \
+ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \
+ bool (*filter_node)(void *, a_type *), \
+ bool (*filter_subtree)(void *, a_type *), \
+ void *filter_ctx) { \
+ a_type *ret; \
+ if (start != NULL) { \
+ ret = a_prefix##iter_start_filtered(rbtree, start, \
+ rbtree->rbt_root, cb, arg, filter_node, filter_subtree, \
+ filter_ctx); \
+ } else { \
+ ret = a_prefix##iter_recurse_filtered(rbtree, rbtree->rbt_root, \
+ cb, arg, filter_node, filter_subtree, filter_ctx); \
+ } \
+ return ret; \
+} \
+a_attr a_type * \
+a_prefix##reverse_iter_recurse_filtered(a_rbt_type *rbtree, \
+ a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \
+ void *arg, \
+ bool (*filter_node)(void *, a_type *), \
+ bool (*filter_subtree)(void *, a_type *), \
+ void *filter_ctx) { \
+ if (node == NULL || !filter_subtree(filter_ctx, node)) { \
+ return NULL; \
+ } \
+ a_type *ret; \
+ a_type *left = rbtn_left_get(a_type, a_field, node); \
+ a_type *right = rbtn_right_get(a_type, a_field, node); \
+ ret = a_prefix##reverse_iter_recurse_filtered(rbtree, right, cb, \
+ arg, filter_node, filter_subtree, filter_ctx); \
+ if (ret != NULL) { \
+ return ret; \
+ } \
+ if (filter_node(filter_ctx, node)) { \
+ ret = cb(rbtree, node, arg); \
+ } \
+ if (ret != NULL) { \
+ return ret; \
+ } \
+ return a_prefix##reverse_iter_recurse_filtered(rbtree, left, cb, \
+ arg, filter_node, filter_subtree, filter_ctx); \
+} \
+a_attr a_type * \
+a_prefix##reverse_iter_start_filtered(a_rbt_type *rbtree, a_type *start,\
+ a_type *node, a_type *(*cb)(a_rbt_type *, a_type *, void *), \
+ void *arg, bool (*filter_node)(void *, a_type *), \
+ bool (*filter_subtree)(void *, a_type *), \
+ void *filter_ctx) { \
+ if (!filter_subtree(filter_ctx, node)) { \
+ return NULL; \
+ } \
+ int cmp = a_cmp(start, node); \
+ a_type *ret; \
+ a_type *left = rbtn_left_get(a_type, a_field, node); \
+ a_type *right = rbtn_right_get(a_type, a_field, node); \
+ if (cmp > 0) { \
+ ret = a_prefix##reverse_iter_start_filtered(rbtree, start, \
+ right, cb, arg, filter_node, filter_subtree, filter_ctx); \
+ if (ret != NULL) { \
+ return ret; \
+ } \
+ if (filter_node(filter_ctx, node)) { \
+ ret = cb(rbtree, node, arg); \
+ if (ret != NULL) { \
+ return ret; \
+ } \
+ } \
+ return a_prefix##reverse_iter_recurse_filtered(rbtree, left, cb,\
+ arg, filter_node, filter_subtree, filter_ctx); \
+ } else if (cmp < 0) { \
+ return a_prefix##reverse_iter_start_filtered(rbtree, start, \
+ left, cb, arg, filter_node, filter_subtree, filter_ctx); \
+ } else { \
+ if (filter_node(filter_ctx, node)) { \
+ ret = cb(rbtree, node, arg); \
+ if (ret != NULL) { \
+ return ret; \
+ } \
+ } \
+ return a_prefix##reverse_iter_recurse_filtered(rbtree, left, cb,\
+ arg, filter_node, filter_subtree, filter_ctx); \
+ } \
+} \
+a_attr a_type * \
+a_prefix##reverse_iter_filtered(a_rbt_type *rbtree, a_type *start, \
+ a_type *(*cb)(a_rbt_type *, a_type *, void *), void *arg, \
+ bool (*filter_node)(void *, a_type *), \
+ bool (*filter_subtree)(void *, a_type *), \
+ void *filter_ctx) { \
+ a_type *ret; \
+ if (start != NULL) { \
+ ret = a_prefix##reverse_iter_start_filtered(rbtree, start, \
+ rbtree->rbt_root, cb, arg, filter_node, filter_subtree, \
+ filter_ctx); \
+ } else { \
+ ret = a_prefix##reverse_iter_recurse_filtered(rbtree, \
+ rbtree->rbt_root, cb, arg, filter_node, filter_subtree, \
+ filter_ctx); \
+ } \
+ return ret; \
+} \
+) /* end rb_summarized_only */
-#endif /* RB_H_ */
+#endif /* JEMALLOC_INTERNAL_RB_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/rtree.h b/contrib/jemalloc/include/jemalloc/internal/rtree.h
index 16ccbebee7f0..a00adb2982f3 100644
--- a/contrib/jemalloc/include/jemalloc/internal/rtree.h
+++ b/contrib/jemalloc/include/jemalloc/internal/rtree.h
@@ -35,33 +35,52 @@
# define RTREE_LEAF_COMPACT
#endif
-/* Needed for initialization only. */
-#define RTREE_LEAFKEY_INVALID ((uintptr_t)1)
-
typedef struct rtree_node_elm_s rtree_node_elm_t;
struct rtree_node_elm_s {
atomic_p_t child; /* (rtree_{node,leaf}_elm_t *) */
};
+typedef struct rtree_metadata_s rtree_metadata_t;
+struct rtree_metadata_s {
+ szind_t szind;
+ extent_state_t state; /* Mirrors edata->state. */
+ bool is_head; /* Mirrors edata->is_head. */
+ bool slab;
+};
+
+typedef struct rtree_contents_s rtree_contents_t;
+struct rtree_contents_s {
+ edata_t *edata;
+ rtree_metadata_t metadata;
+};
+
+#define RTREE_LEAF_STATE_WIDTH EDATA_BITS_STATE_WIDTH
+#define RTREE_LEAF_STATE_SHIFT 2
+#define RTREE_LEAF_STATE_MASK MASK(RTREE_LEAF_STATE_WIDTH, RTREE_LEAF_STATE_SHIFT)
+
struct rtree_leaf_elm_s {
#ifdef RTREE_LEAF_COMPACT
/*
* Single pointer-width field containing all three leaf element fields.
* For example, on a 64-bit x64 system with 48 significant virtual
- * memory address bits, the index, extent, and slab fields are packed as
+ * memory address bits, the index, edata, and slab fields are packed as
* such:
*
* x: index
- * e: extent
+ * e: edata
+ * s: state
+ * h: is_head
* b: slab
*
- * 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee eeee000b
+ * 00000000 xxxxxxxx eeeeeeee [...] eeeeeeee e00ssshb
*/
atomic_p_t le_bits;
#else
- atomic_p_t le_extent; /* (extent_t *) */
- atomic_u_t le_szind; /* (szind_t) */
- atomic_b_t le_slab; /* (bool) */
+ atomic_p_t le_edata; /* (edata_t *) */
+ /*
+ * From high to low bits: szind (8 bits), state (4 bits), is_head, slab
+ */
+ atomic_u_t le_metadata;
#endif
};
@@ -78,6 +97,7 @@ struct rtree_level_s {
typedef struct rtree_s rtree_t;
struct rtree_s {
+ base_t *base;
malloc_mutex_t init_lock;
/* Number of elements based on rtree_levels[0].bits. */
#if RTREE_HEIGHT > 1
@@ -109,42 +129,29 @@ static const rtree_level_t rtree_levels[] = {
#endif
};
-bool rtree_new(rtree_t *rtree, bool zeroed);
-
-typedef rtree_node_elm_t *(rtree_node_alloc_t)(tsdn_t *, rtree_t *, size_t);
-extern rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc;
+bool rtree_new(rtree_t *rtree, base_t *base, bool zeroed);
-typedef rtree_leaf_elm_t *(rtree_leaf_alloc_t)(tsdn_t *, rtree_t *, size_t);
-extern rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc;
-
-typedef void (rtree_node_dalloc_t)(tsdn_t *, rtree_t *, rtree_node_elm_t *);
-extern rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc;
-
-typedef void (rtree_leaf_dalloc_t)(tsdn_t *, rtree_t *, rtree_leaf_elm_t *);
-extern rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc;
-#ifdef JEMALLOC_JET
-void rtree_delete(tsdn_t *tsdn, rtree_t *rtree);
-#endif
rtree_leaf_elm_t *rtree_leaf_elm_lookup_hard(tsdn_t *tsdn, rtree_t *rtree,
rtree_ctx_t *rtree_ctx, uintptr_t key, bool dependent, bool init_missing);
-JEMALLOC_ALWAYS_INLINE uintptr_t
-rtree_leafkey(uintptr_t key) {
+JEMALLOC_ALWAYS_INLINE unsigned
+rtree_leaf_maskbits(void) {
unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits -
rtree_levels[RTREE_HEIGHT-1].bits);
- unsigned maskbits = ptrbits - cumbits;
- uintptr_t mask = ~((ZU(1) << maskbits) - 1);
+ return ptrbits - cumbits;
+}
+
+JEMALLOC_ALWAYS_INLINE uintptr_t
+rtree_leafkey(uintptr_t key) {
+ uintptr_t mask = ~((ZU(1) << rtree_leaf_maskbits()) - 1);
return (key & mask);
}
JEMALLOC_ALWAYS_INLINE size_t
rtree_cache_direct_map(uintptr_t key) {
- unsigned ptrbits = ZU(1) << (LG_SIZEOF_PTR+3);
- unsigned cumbits = (rtree_levels[RTREE_HEIGHT-1].cumbits -
- rtree_levels[RTREE_HEIGHT-1].bits);
- unsigned maskbits = ptrbits - cumbits;
- return (size_t)((key >> maskbits) & (RTREE_CTX_NCACHE - 1));
+ return (size_t)((key >> rtree_leaf_maskbits()) &
+ (RTREE_CTX_NCACHE - 1));
}
JEMALLOC_ALWAYS_INLINE uintptr_t
@@ -176,151 +183,174 @@ rtree_leaf_elm_bits_read(tsdn_t *tsdn, rtree_t *rtree,
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
}
-JEMALLOC_ALWAYS_INLINE extent_t *
-rtree_leaf_elm_bits_extent_get(uintptr_t bits) {
+JEMALLOC_ALWAYS_INLINE uintptr_t
+rtree_leaf_elm_bits_encode(rtree_contents_t contents) {
+ assert((uintptr_t)contents.edata % (uintptr_t)EDATA_ALIGNMENT == 0);
+ uintptr_t edata_bits = (uintptr_t)contents.edata
+ & (((uintptr_t)1 << LG_VADDR) - 1);
+
+ uintptr_t szind_bits = (uintptr_t)contents.metadata.szind << LG_VADDR;
+ uintptr_t slab_bits = (uintptr_t)contents.metadata.slab;
+ uintptr_t is_head_bits = (uintptr_t)contents.metadata.is_head << 1;
+ uintptr_t state_bits = (uintptr_t)contents.metadata.state <<
+ RTREE_LEAF_STATE_SHIFT;
+ uintptr_t metadata_bits = szind_bits | state_bits | is_head_bits |
+ slab_bits;
+ assert((edata_bits & metadata_bits) == 0);
+
+ return edata_bits | metadata_bits;
+}
+
+JEMALLOC_ALWAYS_INLINE rtree_contents_t
+rtree_leaf_elm_bits_decode(uintptr_t bits) {
+ rtree_contents_t contents;
+ /* Do the easy things first. */
+ contents.metadata.szind = bits >> LG_VADDR;
+ contents.metadata.slab = (bool)(bits & 1);
+ contents.metadata.is_head = (bool)(bits & (1 << 1));
+
+ uintptr_t state_bits = (bits & RTREE_LEAF_STATE_MASK) >>
+ RTREE_LEAF_STATE_SHIFT;
+ assert(state_bits <= extent_state_max);
+ contents.metadata.state = (extent_state_t)state_bits;
+
+ uintptr_t low_bit_mask = ~((uintptr_t)EDATA_ALIGNMENT - 1);
# ifdef __aarch64__
/*
* aarch64 doesn't sign extend the highest virtual address bit to set
- * the higher ones. Instead, the high bits gets zeroed.
+ * the higher ones. Instead, the high bits get zeroed.
*/
uintptr_t high_bit_mask = ((uintptr_t)1 << LG_VADDR) - 1;
- /* Mask off the slab bit. */
- uintptr_t low_bit_mask = ~(uintptr_t)1;
+ /* Mask off metadata. */
uintptr_t mask = high_bit_mask & low_bit_mask;
- return (extent_t *)(bits & mask);
+ contents.edata = (edata_t *)(bits & mask);
# else
- /* Restore sign-extended high bits, mask slab bit. */
- return (extent_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB) >>
- RTREE_NHIB) & ~((uintptr_t)0x1));
+ /* Restore sign-extended high bits, mask metadata bits. */
+ contents.edata = (edata_t *)((uintptr_t)((intptr_t)(bits << RTREE_NHIB)
+ >> RTREE_NHIB) & low_bit_mask);
# endif
+ assert((uintptr_t)contents.edata % (uintptr_t)EDATA_ALIGNMENT == 0);
+ return contents;
}
-JEMALLOC_ALWAYS_INLINE szind_t
-rtree_leaf_elm_bits_szind_get(uintptr_t bits) {
- return (szind_t)(bits >> LG_VADDR);
-}
-
-JEMALLOC_ALWAYS_INLINE bool
-rtree_leaf_elm_bits_slab_get(uintptr_t bits) {
- return (bool)(bits & (uintptr_t)0x1);
-}
+# endif /* RTREE_LEAF_COMPACT */
-# endif
-
-JEMALLOC_ALWAYS_INLINE extent_t *
-rtree_leaf_elm_extent_read(tsdn_t *tsdn, rtree_t *rtree,
- rtree_leaf_elm_t *elm, bool dependent) {
+JEMALLOC_ALWAYS_INLINE rtree_contents_t
+rtree_leaf_elm_read(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *elm,
+ bool dependent) {
#ifdef RTREE_LEAF_COMPACT
uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
- return rtree_leaf_elm_bits_extent_get(bits);
+ rtree_contents_t contents = rtree_leaf_elm_bits_decode(bits);
+ return contents;
#else
- extent_t *extent = (extent_t *)atomic_load_p(&elm->le_extent, dependent
+ rtree_contents_t contents;
+ unsigned metadata_bits = atomic_load_u(&elm->le_metadata, dependent
? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
- return extent;
-#endif
-}
+ contents.metadata.slab = (bool)(metadata_bits & 1);
+ contents.metadata.is_head = (bool)(metadata_bits & (1 << 1));
-JEMALLOC_ALWAYS_INLINE szind_t
-rtree_leaf_elm_szind_read(tsdn_t *tsdn, rtree_t *rtree,
- rtree_leaf_elm_t *elm, bool dependent) {
-#ifdef RTREE_LEAF_COMPACT
- uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
- return rtree_leaf_elm_bits_szind_get(bits);
-#else
- return (szind_t)atomic_load_u(&elm->le_szind, dependent ? ATOMIC_RELAXED
- : ATOMIC_ACQUIRE);
+ uintptr_t state_bits = (metadata_bits & RTREE_LEAF_STATE_MASK) >>
+ RTREE_LEAF_STATE_SHIFT;
+ assert(state_bits <= extent_state_max);
+ contents.metadata.state = (extent_state_t)state_bits;
+ contents.metadata.szind = metadata_bits >> (RTREE_LEAF_STATE_SHIFT +
+ RTREE_LEAF_STATE_WIDTH);
+
+ contents.edata = (edata_t *)atomic_load_p(&elm->le_edata, dependent
+ ? ATOMIC_RELAXED : ATOMIC_ACQUIRE);
+
+ return contents;
#endif
}
-JEMALLOC_ALWAYS_INLINE bool
-rtree_leaf_elm_slab_read(tsdn_t *tsdn, rtree_t *rtree,
- rtree_leaf_elm_t *elm, bool dependent) {
+JEMALLOC_ALWAYS_INLINE void
+rtree_contents_encode(rtree_contents_t contents, void **bits,
+ unsigned *additional) {
#ifdef RTREE_LEAF_COMPACT
- uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
- return rtree_leaf_elm_bits_slab_get(bits);
+ *bits = (void *)rtree_leaf_elm_bits_encode(contents);
#else
- return atomic_load_b(&elm->le_slab, dependent ? ATOMIC_RELAXED :
- ATOMIC_ACQUIRE);
+ *additional = (unsigned)contents.metadata.slab
+ | ((unsigned)contents.metadata.is_head << 1)
+ | ((unsigned)contents.metadata.state << RTREE_LEAF_STATE_SHIFT)
+ | ((unsigned)contents.metadata.szind << (RTREE_LEAF_STATE_SHIFT +
+ RTREE_LEAF_STATE_WIDTH));
+ *bits = contents.edata;
#endif
}
-static inline void
-rtree_leaf_elm_extent_write(tsdn_t *tsdn, rtree_t *rtree,
- rtree_leaf_elm_t *elm, extent_t *extent) {
+JEMALLOC_ALWAYS_INLINE void
+rtree_leaf_elm_write_commit(tsdn_t *tsdn, rtree_t *rtree,
+ rtree_leaf_elm_t *elm, void *bits, unsigned additional) {
#ifdef RTREE_LEAF_COMPACT
- uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, true);
- uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
- LG_VADDR) | ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1))
- | ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
- atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
+ atomic_store_p(&elm->le_bits, bits, ATOMIC_RELEASE);
#else
- atomic_store_p(&elm->le_extent, extent, ATOMIC_RELEASE);
+ atomic_store_u(&elm->le_metadata, additional, ATOMIC_RELEASE);
+ /*
+ * Write edata last, since the element is atomically considered valid
+ * as soon as the edata field is non-NULL.
+ */
+ atomic_store_p(&elm->le_edata, bits, ATOMIC_RELEASE);
#endif
}
-static inline void
-rtree_leaf_elm_szind_write(tsdn_t *tsdn, rtree_t *rtree,
- rtree_leaf_elm_t *elm, szind_t szind) {
- assert(szind <= SC_NSIZES);
+JEMALLOC_ALWAYS_INLINE void
+rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree,
+ rtree_leaf_elm_t *elm, rtree_contents_t contents) {
+ assert((uintptr_t)contents.edata % EDATA_ALIGNMENT == 0);
+ void *bits;
+ unsigned additional;
-#ifdef RTREE_LEAF_COMPACT
- uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
- true);
- uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
- ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
- (((uintptr_t)0x1 << LG_VADDR) - 1)) |
- ((uintptr_t)rtree_leaf_elm_bits_slab_get(old_bits));
- atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
-#else
- atomic_store_u(&elm->le_szind, szind, ATOMIC_RELEASE);
-#endif
+ rtree_contents_encode(contents, &bits, &additional);
+ rtree_leaf_elm_write_commit(tsdn, rtree, elm, bits, additional);
}
-static inline void
-rtree_leaf_elm_slab_write(tsdn_t *tsdn, rtree_t *rtree,
- rtree_leaf_elm_t *elm, bool slab) {
+/* The state field can be updated independently (and more frequently). */
+JEMALLOC_ALWAYS_INLINE void
+rtree_leaf_elm_state_update(tsdn_t *tsdn, rtree_t *rtree,
+ rtree_leaf_elm_t *elm1, rtree_leaf_elm_t *elm2, extent_state_t state) {
+ assert(elm1 != NULL);
#ifdef RTREE_LEAF_COMPACT
- uintptr_t old_bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm,
- true);
- uintptr_t bits = ((uintptr_t)rtree_leaf_elm_bits_szind_get(old_bits) <<
- LG_VADDR) | ((uintptr_t)rtree_leaf_elm_bits_extent_get(old_bits) &
- (((uintptr_t)0x1 << LG_VADDR) - 1)) | ((uintptr_t)slab);
- atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
+ uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm1,
+ /* dependent */ true);
+ bits &= ~RTREE_LEAF_STATE_MASK;
+ bits |= state << RTREE_LEAF_STATE_SHIFT;
+ atomic_store_p(&elm1->le_bits, (void *)bits, ATOMIC_RELEASE);
+ if (elm2 != NULL) {
+ atomic_store_p(&elm2->le_bits, (void *)bits, ATOMIC_RELEASE);
+ }
#else
- atomic_store_b(&elm->le_slab, slab, ATOMIC_RELEASE);
+ unsigned bits = atomic_load_u(&elm1->le_metadata, ATOMIC_RELAXED);
+ bits &= ~RTREE_LEAF_STATE_MASK;
+ bits |= state << RTREE_LEAF_STATE_SHIFT;
+ atomic_store_u(&elm1->le_metadata, bits, ATOMIC_RELEASE);
+ if (elm2 != NULL) {
+ atomic_store_u(&elm2->le_metadata, bits, ATOMIC_RELEASE);
+ }
#endif
}
-static inline void
-rtree_leaf_elm_write(tsdn_t *tsdn, rtree_t *rtree,
- rtree_leaf_elm_t *elm, extent_t *extent, szind_t szind, bool slab) {
-#ifdef RTREE_LEAF_COMPACT
- uintptr_t bits = ((uintptr_t)szind << LG_VADDR) |
- ((uintptr_t)extent & (((uintptr_t)0x1 << LG_VADDR) - 1)) |
- ((uintptr_t)slab);
- atomic_store_p(&elm->le_bits, (void *)bits, ATOMIC_RELEASE);
-#else
- rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
- rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
- /*
- * Write extent last, since the element is atomically considered valid
- * as soon as the extent field is non-NULL.
- */
- rtree_leaf_elm_extent_write(tsdn, rtree, elm, extent);
-#endif
-}
+/*
+ * Tries to look up the key in the L1 cache, returning false if there's a hit, or
+ * true if there's a miss.
+ * Key is allowed to be NULL; returns true in this case.
+ */
+JEMALLOC_ALWAYS_INLINE bool
+rtree_leaf_elm_lookup_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t key, rtree_leaf_elm_t **elm) {
+ size_t slot = rtree_cache_direct_map(key);
+ uintptr_t leafkey = rtree_leafkey(key);
+ assert(leafkey != RTREE_LEAFKEY_INVALID);
-static inline void
-rtree_leaf_elm_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree,
- rtree_leaf_elm_t *elm, szind_t szind, bool slab) {
- assert(!slab || szind < SC_NBINS);
+ if (unlikely(rtree_ctx->cache[slot].leafkey != leafkey)) {
+ return true;
+ }
- /*
- * The caller implicitly assures that it is the only writer to the szind
- * and slab fields, and that the extent field cannot currently change.
- */
- rtree_leaf_elm_slab_write(tsdn, rtree, elm, slab);
- rtree_leaf_elm_szind_write(tsdn, rtree, elm, szind);
+ rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf;
+ assert(leaf != NULL);
+ uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1);
+ *elm = &leaf[subkey];
+
+ return false;
}
JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t *
@@ -382,147 +412,143 @@ rtree_leaf_elm_lookup(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
dependent, init_missing);
}
+/*
+ * Returns true on lookup failure.
+ */
static inline bool
-rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
- extent_t *extent, szind_t szind, bool slab) {
- /* Use rtree_clear() to set the extent to NULL. */
- assert(extent != NULL);
-
+rtree_read_independent(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t key, rtree_contents_t *r_contents) {
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
- key, false, true);
+ key, /* dependent */ false, /* init_missing */ false);
if (elm == NULL) {
return true;
}
-
- assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) == NULL);
- rtree_leaf_elm_write(tsdn, rtree, elm, extent, szind, slab);
-
+ *r_contents = rtree_leaf_elm_read(tsdn, rtree, elm,
+ /* dependent */ false);
return false;
}
-JEMALLOC_ALWAYS_INLINE rtree_leaf_elm_t *
-rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
- bool dependent) {
+static inline rtree_contents_t
+rtree_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t key) {
rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
- key, dependent, false);
- if (!dependent && elm == NULL) {
- return NULL;
- }
+ key, /* dependent */ true, /* init_missing */ false);
assert(elm != NULL);
- return elm;
+ return rtree_leaf_elm_read(tsdn, rtree, elm, /* dependent */ true);
}
-JEMALLOC_ALWAYS_INLINE extent_t *
-rtree_extent_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t key, bool dependent) {
- rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
- dependent);
- if (!dependent && elm == NULL) {
- return NULL;
- }
- return rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent);
-}
-
-JEMALLOC_ALWAYS_INLINE szind_t
-rtree_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t key, bool dependent) {
- rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
- dependent);
- if (!dependent && elm == NULL) {
- return SC_NSIZES;
- }
- return rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
+static inline rtree_metadata_t
+rtree_metadata_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t key) {
+ rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
+ key, /* dependent */ true, /* init_missing */ false);
+ assert(elm != NULL);
+ return rtree_leaf_elm_read(tsdn, rtree, elm,
+ /* dependent */ true).metadata;
}
/*
- * rtree_slab_read() is intentionally omitted because slab is always read in
- * conjunction with szind, which makes rtree_szind_slab_read() a better choice.
+ * Returns true when the request cannot be fulfilled by fastpath.
*/
-
-JEMALLOC_ALWAYS_INLINE bool
-rtree_extent_szind_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t key, bool dependent, extent_t **r_extent, szind_t *r_szind) {
- rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
- dependent);
- if (!dependent && elm == NULL) {
+static inline bool
+rtree_metadata_try_read_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t key, rtree_metadata_t *r_rtree_metadata) {
+ rtree_leaf_elm_t *elm;
+ /*
+ * Should check the bool return value (lookup success or not) instead of
+ * elm == NULL (which will result in an extra branch). This is because
+ * when the cache lookup succeeds, there will never be a NULL pointer
+ * returned (which is unknown to the compiler).
+ */
+ if (rtree_leaf_elm_lookup_fast(tsdn, rtree, rtree_ctx, key, &elm)) {
return true;
}
- *r_extent = rtree_leaf_elm_extent_read(tsdn, rtree, elm, dependent);
- *r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
+ assert(elm != NULL);
+ *r_rtree_metadata = rtree_leaf_elm_read(tsdn, rtree, elm,
+ /* dependent */ true).metadata;
return false;
}
-/*
- * Try to read szind_slab from the L1 cache. Returns true on a hit,
- * and fills in r_szind and r_slab. Otherwise returns false.
- *
- * Key is allowed to be NULL in order to save an extra branch on the
- * fastpath. returns false in this case.
- */
-JEMALLOC_ALWAYS_INLINE bool
-rtree_szind_slab_read_fast(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t key, szind_t *r_szind, bool *r_slab) {
- rtree_leaf_elm_t *elm;
-
- size_t slot = rtree_cache_direct_map(key);
- uintptr_t leafkey = rtree_leafkey(key);
- assert(leafkey != RTREE_LEAFKEY_INVALID);
-
- if (likely(rtree_ctx->cache[slot].leafkey == leafkey)) {
- rtree_leaf_elm_t *leaf = rtree_ctx->cache[slot].leaf;
- assert(leaf != NULL);
- uintptr_t subkey = rtree_subkey(key, RTREE_HEIGHT-1);
- elm = &leaf[subkey];
-
-#ifdef RTREE_LEAF_COMPACT
- uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree,
- elm, true);
- *r_szind = rtree_leaf_elm_bits_szind_get(bits);
- *r_slab = rtree_leaf_elm_bits_slab_get(bits);
-#else
- *r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, true);
- *r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, true);
-#endif
- return true;
- } else {
- return false;
+JEMALLOC_ALWAYS_INLINE void
+rtree_write_range_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t base, uintptr_t end, rtree_contents_t contents, bool clearing) {
+ assert((base & PAGE_MASK) == 0 && (end & PAGE_MASK) == 0);
+ /*
+ * Only used for emap_(de)register_interior, which implies the
+ * boundaries have been registered already. Therefore all the lookups
+ * are dependent w/o init_missing, assuming the range spans across at
+ * most 2 rtree leaf nodes (each covers 1 GiB of vaddr).
+ */
+ void *bits;
+ unsigned additional;
+ rtree_contents_encode(contents, &bits, &additional);
+
+ rtree_leaf_elm_t *elm = NULL; /* Dead store. */
+ for (uintptr_t addr = base; addr <= end; addr += PAGE) {
+ if (addr == base ||
+ (addr & ((ZU(1) << rtree_leaf_maskbits()) - 1)) == 0) {
+ elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, addr,
+ /* dependent */ true, /* init_missing */ false);
+ assert(elm != NULL);
+ }
+ assert(elm == rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx, addr,
+ /* dependent */ true, /* init_missing */ false));
+ assert(!clearing || rtree_leaf_elm_read(tsdn, rtree, elm,
+ /* dependent */ true).edata != NULL);
+ rtree_leaf_elm_write_commit(tsdn, rtree, elm, bits, additional);
+ elm++;
}
}
+
+JEMALLOC_ALWAYS_INLINE void
+rtree_write_range(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t base, uintptr_t end, rtree_contents_t contents) {
+ rtree_write_range_impl(tsdn, rtree, rtree_ctx, base, end, contents,
+ /* clearing */ false);
+}
+
JEMALLOC_ALWAYS_INLINE bool
-rtree_szind_slab_read(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t key, bool dependent, szind_t *r_szind, bool *r_slab) {
- rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key,
- dependent);
- if (!dependent && elm == NULL) {
+rtree_write(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx, uintptr_t key,
+ rtree_contents_t contents) {
+ rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
+ key, /* dependent */ false, /* init_missing */ true);
+ if (elm == NULL) {
return true;
}
-#ifdef RTREE_LEAF_COMPACT
- uintptr_t bits = rtree_leaf_elm_bits_read(tsdn, rtree, elm, dependent);
- *r_szind = rtree_leaf_elm_bits_szind_get(bits);
- *r_slab = rtree_leaf_elm_bits_slab_get(bits);
-#else
- *r_szind = rtree_leaf_elm_szind_read(tsdn, rtree, elm, dependent);
- *r_slab = rtree_leaf_elm_slab_read(tsdn, rtree, elm, dependent);
-#endif
- return false;
-}
-static inline void
-rtree_szind_slab_update(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
- uintptr_t key, szind_t szind, bool slab) {
- assert(!slab || szind < SC_NBINS);
+ rtree_leaf_elm_write(tsdn, rtree, elm, contents);
- rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
- rtree_leaf_elm_szind_slab_update(tsdn, rtree, elm, szind, slab);
+ return false;
}
static inline void
rtree_clear(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
uintptr_t key) {
- rtree_leaf_elm_t *elm = rtree_read(tsdn, rtree, rtree_ctx, key, true);
- assert(rtree_leaf_elm_extent_read(tsdn, rtree, elm, false) !=
- NULL);
- rtree_leaf_elm_write(tsdn, rtree, elm, NULL, SC_NSIZES, false);
+ rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, rtree, rtree_ctx,
+ key, /* dependent */ true, /* init_missing */ false);
+ assert(elm != NULL);
+ assert(rtree_leaf_elm_read(tsdn, rtree, elm,
+ /* dependent */ true).edata != NULL);
+ rtree_contents_t contents;
+ contents.edata = NULL;
+ contents.metadata.szind = SC_NSIZES;
+ contents.metadata.slab = false;
+ contents.metadata.is_head = false;
+ contents.metadata.state = (extent_state_t)0;
+ rtree_leaf_elm_write(tsdn, rtree, elm, contents);
+}
+
+static inline void
+rtree_clear_range(tsdn_t *tsdn, rtree_t *rtree, rtree_ctx_t *rtree_ctx,
+ uintptr_t base, uintptr_t end) {
+ rtree_contents_t contents;
+ contents.edata = NULL;
+ contents.metadata.szind = SC_NSIZES;
+ contents.metadata.slab = false;
+ contents.metadata.is_head = false;
+ contents.metadata.state = (extent_state_t)0;
+ rtree_write_range_impl(tsdn, rtree, rtree_ctx, base, end, contents,
+ /* clearing */ true);
}
#endif /* JEMALLOC_INTERNAL_RTREE_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/rtree_tsd.h b/contrib/jemalloc/include/jemalloc/internal/rtree_tsd.h
index 562e29297a76..e45525c5e6a3 100644
--- a/contrib/jemalloc/include/jemalloc/internal/rtree_tsd.h
+++ b/contrib/jemalloc/include/jemalloc/internal/rtree_tsd.h
@@ -18,16 +18,28 @@
* cache misses if made overly large, plus the cost of linear search in the LRU
* cache.
*/
-#define RTREE_CTX_LG_NCACHE 4
-#define RTREE_CTX_NCACHE (1 << RTREE_CTX_LG_NCACHE)
+#define RTREE_CTX_NCACHE 16
#define RTREE_CTX_NCACHE_L2 8
+/* Needed for initialization only. */
+#define RTREE_LEAFKEY_INVALID ((uintptr_t)1)
+#define RTREE_CTX_CACHE_ELM_INVALID {RTREE_LEAFKEY_INVALID, NULL}
+
+#define RTREE_CTX_INIT_ELM_1 RTREE_CTX_CACHE_ELM_INVALID
+#define RTREE_CTX_INIT_ELM_2 RTREE_CTX_INIT_ELM_1, RTREE_CTX_INIT_ELM_1
+#define RTREE_CTX_INIT_ELM_4 RTREE_CTX_INIT_ELM_2, RTREE_CTX_INIT_ELM_2
+#define RTREE_CTX_INIT_ELM_8 RTREE_CTX_INIT_ELM_4, RTREE_CTX_INIT_ELM_4
+#define RTREE_CTX_INIT_ELM_16 RTREE_CTX_INIT_ELM_8, RTREE_CTX_INIT_ELM_8
+
+#define _RTREE_CTX_INIT_ELM_DATA(n) RTREE_CTX_INIT_ELM_##n
+#define RTREE_CTX_INIT_ELM_DATA(n) _RTREE_CTX_INIT_ELM_DATA(n)
+
/*
- * Zero initializer required for tsd initialization only. Proper initialization
- * done via rtree_ctx_data_init().
+ * Static initializer (to invalidate the cache entries) is required because the
+ * free fastpath may access the rtree cache before a full tsd initialization.
*/
-#define RTREE_CTX_ZERO_INITIALIZER {{{0, 0}}, {{0, 0}}}
-
+#define RTREE_CTX_INITIALIZER {{RTREE_CTX_INIT_ELM_DATA(RTREE_CTX_NCACHE)}, \
+ {RTREE_CTX_INIT_ELM_DATA(RTREE_CTX_NCACHE_L2)}}
typedef struct rtree_leaf_elm_s rtree_leaf_elm_t;
diff --git a/contrib/jemalloc/include/jemalloc/internal/safety_check.h b/contrib/jemalloc/include/jemalloc/internal/safety_check.h
index 53339ac12f2d..f1a74f174dee 100644
--- a/contrib/jemalloc/include/jemalloc/internal/safety_check.h
+++ b/contrib/jemalloc/include/jemalloc/internal/safety_check.h
@@ -1,9 +1,14 @@
#ifndef JEMALLOC_INTERNAL_SAFETY_CHECK_H
#define JEMALLOC_INTERNAL_SAFETY_CHECK_H
+void safety_check_fail_sized_dealloc(bool current_dealloc, const void *ptr,
+ size_t true_size, size_t input_size);
void safety_check_fail(const char *format, ...);
+
+typedef void (*safety_check_abort_hook_t)(const char *message);
+
/* Can set to NULL for a default. */
-void safety_check_set_abort(void (*abort_fn)());
+void safety_check_set_abort(safety_check_abort_hook_t abort_fn);
JEMALLOC_ALWAYS_INLINE void
safety_check_set_redzone(void *ptr, size_t usize, size_t bumped_usize) {
diff --git a/contrib/jemalloc/include/jemalloc/internal/san.h b/contrib/jemalloc/include/jemalloc/internal/san.h
new file mode 100644
index 000000000000..8813d6bbe7de
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/san.h
@@ -0,0 +1,191 @@
+#ifndef JEMALLOC_INTERNAL_GUARD_H
+#define JEMALLOC_INTERNAL_GUARD_H
+
+#include "jemalloc/internal/ehooks.h"
+#include "jemalloc/internal/emap.h"
+
+#define SAN_PAGE_GUARD PAGE
+#define SAN_PAGE_GUARDS_SIZE (SAN_PAGE_GUARD * 2)
+
+#define SAN_GUARD_LARGE_EVERY_N_EXTENTS_DEFAULT 0
+#define SAN_GUARD_SMALL_EVERY_N_EXTENTS_DEFAULT 0
+
+#define SAN_LG_UAF_ALIGN_DEFAULT (-1)
+#define SAN_CACHE_BIN_NONFAST_MASK_DEFAULT (uintptr_t)(-1)
+
+static const uintptr_t uaf_detect_junk = (uintptr_t)0x5b5b5b5b5b5b5b5bULL;
+
+/* 0 means disabled, i.e. never guarded. */
+extern size_t opt_san_guard_large;
+extern size_t opt_san_guard_small;
+/* -1 means disabled, i.e. never check for use-after-free. */
+extern ssize_t opt_lg_san_uaf_align;
+
+void san_guard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
+ emap_t *emap, bool left, bool right, bool remap);
+void san_unguard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
+ emap_t *emap, bool left, bool right);
+/*
+ * Unguard the extent, but don't modify emap boundaries. Must be called on an
+ * extent that has been erased from emap and shouldn't be placed back.
+ */
+void san_unguard_pages_pre_destroy(tsdn_t *tsdn, ehooks_t *ehooks,
+ edata_t *edata, emap_t *emap);
+void san_check_stashed_ptrs(void **ptrs, size_t nstashed, size_t usize);
+
+void tsd_san_init(tsd_t *tsd);
+void san_init(ssize_t lg_san_uaf_align);
+
+static inline void
+san_guard_pages_two_sided(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
+ emap_t *emap, bool remap) {
+ san_guard_pages(tsdn, ehooks, edata, emap, true, true, remap);
+}
+
+static inline void
+san_unguard_pages_two_sided(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
+ emap_t *emap) {
+ san_unguard_pages(tsdn, ehooks, edata, emap, true, true);
+}
+
+static inline size_t
+san_two_side_unguarded_sz(size_t size) {
+ assert(size % PAGE == 0);
+ assert(size >= SAN_PAGE_GUARDS_SIZE);
+ return size - SAN_PAGE_GUARDS_SIZE;
+}
+
+static inline size_t
+san_two_side_guarded_sz(size_t size) {
+ assert(size % PAGE == 0);
+ return size + SAN_PAGE_GUARDS_SIZE;
+}
+
+static inline size_t
+san_one_side_unguarded_sz(size_t size) {
+ assert(size % PAGE == 0);
+ assert(size >= SAN_PAGE_GUARD);
+ return size - SAN_PAGE_GUARD;
+}
+
+static inline size_t
+san_one_side_guarded_sz(size_t size) {
+ assert(size % PAGE == 0);
+ return size + SAN_PAGE_GUARD;
+}
+
+static inline bool
+san_guard_enabled(void) {
+ return (opt_san_guard_large != 0 || opt_san_guard_small != 0);
+}
+
+static inline bool
+san_large_extent_decide_guard(tsdn_t *tsdn, ehooks_t *ehooks, size_t size,
+ size_t alignment) {
+ if (opt_san_guard_large == 0 || ehooks_guard_will_fail(ehooks) ||
+ tsdn_null(tsdn)) {
+ return false;
+ }
+
+ tsd_t *tsd = tsdn_tsd(tsdn);
+ uint64_t n = tsd_san_extents_until_guard_large_get(tsd);
+ assert(n >= 1);
+ if (n > 1) {
+ /*
+ * Subtract conditionally because the guard may not happen due
+ * to alignment or size restriction below.
+ */
+ *tsd_san_extents_until_guard_largep_get(tsd) = n - 1;
+ }
+
+ if (n == 1 && (alignment <= PAGE) &&
+ (san_two_side_guarded_sz(size) <= SC_LARGE_MAXCLASS)) {
+ *tsd_san_extents_until_guard_largep_get(tsd) =
+ opt_san_guard_large;
+ return true;
+ } else {
+ assert(tsd_san_extents_until_guard_large_get(tsd) >= 1);
+ return false;
+ }
+}
+
+static inline bool
+san_slab_extent_decide_guard(tsdn_t *tsdn, ehooks_t *ehooks) {
+ if (opt_san_guard_small == 0 || ehooks_guard_will_fail(ehooks) ||
+ tsdn_null(tsdn)) {
+ return false;
+ }
+
+ tsd_t *tsd = tsdn_tsd(tsdn);
+ uint64_t n = tsd_san_extents_until_guard_small_get(tsd);
+ assert(n >= 1);
+ if (n == 1) {
+ *tsd_san_extents_until_guard_smallp_get(tsd) =
+ opt_san_guard_small;
+ return true;
+ } else {
+ *tsd_san_extents_until_guard_smallp_get(tsd) = n - 1;
+ assert(tsd_san_extents_until_guard_small_get(tsd) >= 1);
+ return false;
+ }
+}
+
+static inline void
+san_junk_ptr_locations(void *ptr, size_t usize, void **first, void **mid,
+ void **last) {
+ size_t ptr_sz = sizeof(void *);
+
+ *first = ptr;
+
+ *mid = (void *)((uintptr_t)ptr + ((usize >> 1) & ~(ptr_sz - 1)));
+ assert(*first != *mid || usize == ptr_sz);
+ assert((uintptr_t)*first <= (uintptr_t)*mid);
+
+ /*
+ * When usize > 32K, the gap between requested_size and usize might be
+ * greater than 4K -- this means the last write may access an
+ * likely-untouched page (default settings w/ 4K pages). However by
+ * default the tcache only goes up to the 32K size class, and is usually
+ * tuned lower instead of higher, which makes it less of a concern.
+ */
+ *last = (void *)((uintptr_t)ptr + usize - sizeof(uaf_detect_junk));
+ assert(*first != *last || usize == ptr_sz);
+ assert(*mid != *last || usize <= ptr_sz * 2);
+ assert((uintptr_t)*mid <= (uintptr_t)*last);
+}
+
+static inline bool
+san_junk_ptr_should_slow(void) {
+ /*
+ * The latter condition (pointer size greater than the min size class)
+ * is not expected -- fall back to the slow path for simplicity.
+ */
+ return config_debug || (LG_SIZEOF_PTR > SC_LG_TINY_MIN);
+}
+
+static inline void
+san_junk_ptr(void *ptr, size_t usize) {
+ if (san_junk_ptr_should_slow()) {
+ memset(ptr, (char)uaf_detect_junk, usize);
+ return;
+ }
+
+ void *first, *mid, *last;
+ san_junk_ptr_locations(ptr, usize, &first, &mid, &last);
+ *(uintptr_t *)first = uaf_detect_junk;
+ *(uintptr_t *)mid = uaf_detect_junk;
+ *(uintptr_t *)last = uaf_detect_junk;
+}
+
+static inline bool
+san_uaf_detection_enabled(void) {
+ bool ret = config_uaf_detection && (opt_lg_san_uaf_align != -1);
+ if (config_uaf_detection && ret) {
+ assert(san_cache_bin_nonfast_mask == ((uintptr_t)1 <<
+ opt_lg_san_uaf_align) - 1);
+ }
+
+ return ret;
+}
+
+#endif /* JEMALLOC_INTERNAL_GUARD_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/san_bump.h b/contrib/jemalloc/include/jemalloc/internal/san_bump.h
new file mode 100644
index 000000000000..8ec4a710d6f4
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/san_bump.h
@@ -0,0 +1,52 @@
+#ifndef JEMALLOC_INTERNAL_SAN_BUMP_H
+#define JEMALLOC_INTERNAL_SAN_BUMP_H
+
+#include "jemalloc/internal/edata.h"
+#include "jemalloc/internal/exp_grow.h"
+#include "jemalloc/internal/mutex.h"
+
+#define SBA_RETAINED_ALLOC_SIZE ((size_t)4 << 20)
+
+extern bool opt_retain;
+
+typedef struct ehooks_s ehooks_t;
+typedef struct pac_s pac_t;
+
+typedef struct san_bump_alloc_s san_bump_alloc_t;
+struct san_bump_alloc_s {
+ malloc_mutex_t mtx;
+
+ edata_t *curr_reg;
+};
+
+static inline bool
+san_bump_enabled() {
+ /*
+ * We enable san_bump allocator only when it's possible to break up a
+ * mapping and unmap a part of it (maps_coalesce). This is needed to
+ * ensure the arena destruction process can destroy all retained guarded
+ * extents one by one and to unmap a trailing part of a retained guarded
+ * region when it's too small to fit a pending allocation.
+ * opt_retain is required, because this allocator retains a large
+ * virtual memory mapping and returns smaller parts of it.
+ */
+ return maps_coalesce && opt_retain;
+}
+
+static inline bool
+san_bump_alloc_init(san_bump_alloc_t* sba) {
+ bool err = malloc_mutex_init(&sba->mtx, "sanitizer_bump_allocator",
+ WITNESS_RANK_SAN_BUMP_ALLOC, malloc_mutex_rank_exclusive);
+ if (err) {
+ return true;
+ }
+ sba->curr_reg = NULL;
+
+ return false;
+}
+
+edata_t *
+san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t* sba, pac_t *pac, ehooks_t *ehooks,
+ size_t size, bool zero);
+
+#endif /* JEMALLOC_INTERNAL_SAN_BUMP_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/sc.h b/contrib/jemalloc/include/jemalloc/internal/sc.h
index 9a099d8b6457..9bab347beeed 100644
--- a/contrib/jemalloc/include/jemalloc/internal/sc.h
+++ b/contrib/jemalloc/include/jemalloc/internal/sc.h
@@ -197,30 +197,34 @@
(SC_LG_BASE_MAX - SC_LG_FIRST_REGULAR_BASE + 1) - 1)
#define SC_NSIZES (SC_NTINY + SC_NPSEUDO + SC_NREGULAR)
-/* The number of size classes that are a multiple of the page size. */
-#define SC_NPSIZES ( \
- /* Start with all the size classes. */ \
- SC_NSIZES \
- /* Subtract out those groups with too small a base. */ \
- - (LG_PAGE - 1 - SC_LG_FIRST_REGULAR_BASE) * SC_NGROUP \
- /* And the pseudo-group. */ \
- - SC_NPSEUDO \
- /* And the tiny group. */ \
- - SC_NTINY \
- /* Sizes where ndelta*delta is not a multiple of the page size. */ \
- - (SC_LG_NGROUP * SC_NGROUP))
/*
- * Note that the last line is computed as the sum of the second column in the
- * following table:
- * lg(base) | count of sizes to exclude
- * ------------------------------|-----------------------------
- * LG_PAGE - 1 | SC_NGROUP - 1
- * LG_PAGE | SC_NGROUP - 1
- * LG_PAGE + 1 | SC_NGROUP - 2
- * LG_PAGE + 2 | SC_NGROUP - 4
- * ... | ...
- * LG_PAGE + (SC_LG_NGROUP - 1) | SC_NGROUP - (SC_NGROUP / 2)
+ * The number of size classes that are a multiple of the page size.
+ *
+ * Here are the first few bases that have a page-sized SC.
+ *
+ * lg(base) | base | highest SC | page-multiple SCs
+ * --------------|------------------------------------------
+ * LG_PAGE - 1 | PAGE / 2 | PAGE | 1
+ * LG_PAGE | PAGE | 2 * PAGE | 1
+ * LG_PAGE + 1 | 2 * PAGE | 4 * PAGE | 2
+ * LG_PAGE + 2 | 4 * PAGE | 8 * PAGE | 4
+ *
+ * The number of page-multiple SCs continues to grow in powers of two, up until
+ * lg_delta == lg_page, which corresponds to setting lg_base to lg_page +
+ * SC_LG_NGROUP. So, then, the number of size classes that are multiples of the
+ * page size whose lg_delta is less than the page size are
+ * is 1 + (2**0 + 2**1 + ... + 2**(lg_ngroup - 1) == 2**lg_ngroup.
+ *
+ * For each base with lg_base in [lg_page + lg_ngroup, lg_base_max), there are
+ * NGROUP page-sized size classes, and when lg_base == lg_base_max, there are
+ * NGROUP - 1.
+ *
+ * This gives us the quantity we seek.
*/
+#define SC_NPSIZES ( \
+ SC_NGROUP \
+ + (SC_LG_BASE_MAX - (LG_PAGE + SC_LG_NGROUP)) * SC_NGROUP \
+ + SC_NGROUP - 1)
/*
* We declare a size class is binnable if size < page size * group. Or, in other
@@ -242,17 +246,23 @@
# error "Too many small size classes"
#endif
-/* The largest size class in the lookup table. */
-#define SC_LOOKUP_MAXCLASS ((size_t)1 << 12)
+/* The largest size class in the lookup table, and its binary log. */
+#define SC_LG_MAX_LOOKUP 12
+#define SC_LOOKUP_MAXCLASS (1 << SC_LG_MAX_LOOKUP)
/* Internal, only used for the definition of SC_SMALL_MAXCLASS. */
-#define SC_SMALL_MAX_BASE ((size_t)1 << (LG_PAGE + SC_LG_NGROUP - 1))
-#define SC_SMALL_MAX_DELTA ((size_t)1 << (LG_PAGE - 1))
+#define SC_SMALL_MAX_BASE (1 << (LG_PAGE + SC_LG_NGROUP - 1))
+#define SC_SMALL_MAX_DELTA (1 << (LG_PAGE - 1))
/* The largest size class allocated out of a slab. */
#define SC_SMALL_MAXCLASS (SC_SMALL_MAX_BASE \
+ (SC_NGROUP - 1) * SC_SMALL_MAX_DELTA)
+/* The fastpath assumes all lookup-able sizes are small. */
+#if (SC_SMALL_MAXCLASS < SC_LOOKUP_MAXCLASS)
+# error "Lookup table sizes must be small"
+#endif
+
/* The smallest size class not allocated out of a slab. */
#define SC_LARGE_MINCLASS ((size_t)1ULL << (LG_PAGE + SC_LG_NGROUP))
#define SC_LG_LARGE_MINCLASS (LG_PAGE + SC_LG_NGROUP)
@@ -264,6 +274,19 @@
/* The largest size class supported. */
#define SC_LARGE_MAXCLASS (SC_MAX_BASE + (SC_NGROUP - 1) * SC_MAX_DELTA)
+/* Maximum number of regions in one slab. */
+#ifndef CONFIG_LG_SLAB_MAXREGS
+# define SC_LG_SLAB_MAXREGS (LG_PAGE - SC_LG_TINY_MIN)
+#else
+# if CONFIG_LG_SLAB_MAXREGS < (LG_PAGE - SC_LG_TINY_MIN)
+# error "Unsupported SC_LG_SLAB_MAXREGS"
+# else
+# define SC_LG_SLAB_MAXREGS CONFIG_LG_SLAB_MAXREGS
+# endif
+#endif
+
+#define SC_SLAB_MAXREGS (1U << SC_LG_SLAB_MAXREGS)
+
typedef struct sc_s sc_t;
struct sc_s {
/* Size class index, or -1 if not a valid size class. */
@@ -321,10 +344,11 @@ struct sc_data_s {
sc_t sc[SC_NSIZES];
};
+size_t reg_size_compute(int lg_base, int lg_delta, int ndelta);
void sc_data_init(sc_data_t *data);
/*
* Updates slab sizes in [begin, end] to be pgs pages in length, if possible.
- * Otherwise, does its best to accomodate the request.
+ * Otherwise, does its best to accommodate the request.
*/
void sc_data_update_slab_size(sc_data_t *data, size_t begin, size_t end,
int pgs);
diff --git a/contrib/jemalloc/include/jemalloc/internal/sec.h b/contrib/jemalloc/include/jemalloc/internal/sec.h
new file mode 100644
index 000000000000..fa863382db97
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/sec.h
@@ -0,0 +1,120 @@
+#ifndef JEMALLOC_INTERNAL_SEC_H
+#define JEMALLOC_INTERNAL_SEC_H
+
+#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/pai.h"
+
+/*
+ * Small extent cache.
+ *
+ * This includes some utilities to cache small extents. We have a per-pszind
+ * bin with its own list of extents of that size. We don't try to do any
+ * coalescing of extents (since it would in general require cross-shard locks or
+ * knowledge of the underlying PAI implementation).
+ */
+
+/*
+ * For now, this is just one field; eventually, we'll probably want to get more
+ * fine-grained data out (like per-size class statistics).
+ */
+typedef struct sec_stats_s sec_stats_t;
+struct sec_stats_s {
+ /* Sum of bytes_cur across all shards. */
+ size_t bytes;
+};
+
+static inline void
+sec_stats_accum(sec_stats_t *dst, sec_stats_t *src) {
+ dst->bytes += src->bytes;
+}
+
+/* A collections of free extents, all of the same size. */
+typedef struct sec_bin_s sec_bin_t;
+struct sec_bin_s {
+ /*
+ * When we fail to fulfill an allocation, we do a batch-alloc on the
+ * underlying allocator to fill extra items, as well. We drop the SEC
+ * lock while doing so, to allow operations on other bins to succeed.
+ * That introduces the possibility of other threads also trying to
+ * allocate out of this bin, failing, and also going to the backing
+ * allocator. To avoid a thundering herd problem in which lots of
+ * threads do batch allocs and overfill this bin as a result, we only
+ * allow one batch allocation at a time for a bin. This bool tracks
+ * whether or not some thread is already batch allocating.
+ *
+ * Eventually, the right answer may be a smarter sharding policy for the
+ * bins (e.g. a mutex per bin, which would also be more scalable
+ * generally; the batch-allocating thread could hold it while
+ * batch-allocating).
+ */
+ bool being_batch_filled;
+
+ /*
+ * Number of bytes in this particular bin (as opposed to the
+ * sec_shard_t's bytes_cur. This isn't user visible or reported in
+ * stats; rather, it allows us to quickly determine the change in the
+ * centralized counter when flushing.
+ */
+ size_t bytes_cur;
+ edata_list_active_t freelist;
+};
+
+typedef struct sec_shard_s sec_shard_t;
+struct sec_shard_s {
+ /*
+ * We don't keep per-bin mutexes, even though that would allow more
+ * sharding; this allows global cache-eviction, which in turn allows for
+ * better balancing across free lists.
+ */
+ malloc_mutex_t mtx;
+ /*
+ * A SEC may need to be shut down (i.e. flushed of its contents and
+ * prevented from further caching). To avoid tricky synchronization
+ * issues, we just track enabled-status in each shard, guarded by a
+ * mutex. In practice, this is only ever checked during brief races,
+ * since the arena-level atomic boolean tracking HPA enabled-ness means
+ * that we won't go down these pathways very often after custom extent
+ * hooks are installed.
+ */
+ bool enabled;
+ sec_bin_t *bins;
+ /* Number of bytes in all bins in the shard. */
+ size_t bytes_cur;
+ /* The next pszind to flush in the flush-some pathways. */
+ pszind_t to_flush_next;
+};
+
+typedef struct sec_s sec_t;
+struct sec_s {
+ pai_t pai;
+ pai_t *fallback;
+
+ sec_opts_t opts;
+ sec_shard_t *shards;
+ pszind_t npsizes;
+};
+
+bool sec_init(tsdn_t *tsdn, sec_t *sec, base_t *base, pai_t *fallback,
+ const sec_opts_t *opts);
+void sec_flush(tsdn_t *tsdn, sec_t *sec);
+void sec_disable(tsdn_t *tsdn, sec_t *sec);
+
+/*
+ * Morally, these two stats methods probably ought to be a single one (and the
+ * mutex_prof_data ought to live in the sec_stats_t. But splitting them apart
+ * lets them fit easily into the pa_shard stats framework (which also has this
+ * split), which simplifies the stats management.
+ */
+void sec_stats_merge(tsdn_t *tsdn, sec_t *sec, sec_stats_t *stats);
+void sec_mutex_stats_read(tsdn_t *tsdn, sec_t *sec,
+ mutex_prof_data_t *mutex_prof_data);
+
+/*
+ * We use the arena lock ordering; these are acquired in phase 2 of forking, but
+ * should be acquired before the underlying allocator mutexes.
+ */
+void sec_prefork2(tsdn_t *tsdn, sec_t *sec);
+void sec_postfork_parent(tsdn_t *tsdn, sec_t *sec);
+void sec_postfork_child(tsdn_t *tsdn, sec_t *sec);
+
+#endif /* JEMALLOC_INTERNAL_SEC_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/sec_opts.h b/contrib/jemalloc/include/jemalloc/internal/sec_opts.h
new file mode 100644
index 000000000000..a3ad72fbece5
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/sec_opts.h
@@ -0,0 +1,59 @@
+#ifndef JEMALLOC_INTERNAL_SEC_OPTS_H
+#define JEMALLOC_INTERNAL_SEC_OPTS_H
+
+/*
+ * The configuration settings used by an sec_t. Morally, this is part of the
+ * SEC interface, but we put it here for header-ordering reasons.
+ */
+
+typedef struct sec_opts_s sec_opts_t;
+struct sec_opts_s {
+ /*
+ * We don't necessarily always use all the shards; requests are
+ * distributed across shards [0, nshards - 1).
+ */
+ size_t nshards;
+ /*
+ * We'll automatically refuse to cache any objects in this sec if
+ * they're larger than max_alloc bytes, instead forwarding such objects
+ * directly to the fallback.
+ */
+ size_t max_alloc;
+ /*
+ * Exceeding this amount of cached extents in a shard causes us to start
+ * flushing bins in that shard until we fall below bytes_after_flush.
+ */
+ size_t max_bytes;
+ /*
+ * The number of bytes (in all bins) we flush down to when we exceed
+ * bytes_cur. We want this to be less than bytes_cur, because
+ * otherwise we could get into situations where a shard undergoing
+ * net-deallocation keeps bytes_cur very near to max_bytes, so that
+ * most deallocations get immediately forwarded to the underlying PAI
+ * implementation, defeating the point of the SEC.
+ */
+ size_t bytes_after_flush;
+ /*
+ * When we can't satisfy an allocation out of the SEC because there are
+ * no available ones cached, we allocate multiple of that size out of
+ * the fallback allocator. Eventually we might want to do something
+ * cleverer, but for now we just grab a fixed number.
+ */
+ size_t batch_fill_extra;
+};
+
+#define SEC_OPTS_DEFAULT { \
+ /* nshards */ \
+ 4, \
+ /* max_alloc */ \
+ (32 * 1024) < PAGE ? PAGE : (32 * 1024), \
+ /* max_bytes */ \
+ 256 * 1024, \
+ /* bytes_after_flush */ \
+ 128 * 1024, \
+ /* batch_fill_extra */ \
+ 0 \
+}
+
+
+#endif /* JEMALLOC_INTERNAL_SEC_OPTS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/slab_data.h b/contrib/jemalloc/include/jemalloc/internal/slab_data.h
new file mode 100644
index 000000000000..e821863d8d1c
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/slab_data.h
@@ -0,0 +1,12 @@
+#ifndef JEMALLOC_INTERNAL_SLAB_DATA_H
+#define JEMALLOC_INTERNAL_SLAB_DATA_H
+
+#include "jemalloc/internal/bitmap.h"
+
+typedef struct slab_data_s slab_data_t;
+struct slab_data_s {
+ /* Per region allocated/deallocated bitmap. */
+ bitmap_t bitmap[BITMAP_GROUPS_MAX];
+};
+
+#endif /* JEMALLOC_INTERNAL_SLAB_DATA_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/smoothstep.sh b/contrib/jemalloc/include/jemalloc/internal/smoothstep.sh
new file mode 100755
index 000000000000..65de97bf46a8
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/smoothstep.sh
@@ -0,0 +1,101 @@
+#!/bin/sh
+#
+# Generate a discrete lookup table for a sigmoid function in the smoothstep
+# family (https://en.wikipedia.org/wiki/Smoothstep), where the lookup table
+# entries correspond to x in [1/nsteps, 2/nsteps, ..., nsteps/nsteps]. Encode
+# the entries using a binary fixed point representation.
+#
+# Usage: smoothstep.sh <variant> <nsteps> <bfp> <xprec> <yprec>
+#
+# <variant> is in {smooth, smoother, smoothest}.
+# <nsteps> must be greater than zero.
+# <bfp> must be in [0..62]; reasonable values are roughly [10..30].
+# <xprec> is x decimal precision.
+# <yprec> is y decimal precision.
+
+#set -x
+
+cmd="sh smoothstep.sh $*"
+variant=$1
+nsteps=$2
+bfp=$3
+xprec=$4
+yprec=$5
+
+case "${variant}" in
+ smooth)
+ ;;
+ smoother)
+ ;;
+ smoothest)
+ ;;
+ *)
+ echo "Unsupported variant"
+ exit 1
+ ;;
+esac
+
+smooth() {
+ step=$1
+ y=`echo ${yprec} k ${step} ${nsteps} / sx _2 lx 3 ^ '*' 3 lx 2 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
+ h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' `
+}
+
+smoother() {
+ step=$1
+ y=`echo ${yprec} k ${step} ${nsteps} / sx 6 lx 5 ^ '*' _15 lx 4 ^ '*' + 10 lx 3 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
+ h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' `
+}
+
+smoothest() {
+ step=$1
+ y=`echo ${yprec} k ${step} ${nsteps} / sx _20 lx 7 ^ '*' 70 lx 6 ^ '*' + _84 lx 5 ^ '*' + 35 lx 4 ^ '*' + p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
+ h=`echo ${yprec} k 2 ${bfp} ^ ${y} '*' p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g' | tr '.' ' ' | awk '{print $1}' `
+}
+
+cat <<EOF
+#ifndef JEMALLOC_INTERNAL_SMOOTHSTEP_H
+#define JEMALLOC_INTERNAL_SMOOTHSTEP_H
+
+/*
+ * This file was generated by the following command:
+ * $cmd
+ */
+/******************************************************************************/
+
+/*
+ * This header defines a precomputed table based on the smoothstep family of
+ * sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0
+ * to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so
+ * that floating point math can be avoided.
+ *
+ * 3 2
+ * smoothstep(x) = -2x + 3x
+ *
+ * 5 4 3
+ * smootherstep(x) = 6x - 15x + 10x
+ *
+ * 7 6 5 4
+ * smootheststep(x) = -20x + 70x - 84x + 35x
+ */
+
+#define SMOOTHSTEP_VARIANT "${variant}"
+#define SMOOTHSTEP_NSTEPS ${nsteps}
+#define SMOOTHSTEP_BFP ${bfp}
+#define SMOOTHSTEP \\
+ /* STEP(step, h, x, y) */ \\
+EOF
+
+s=1
+while [ $s -le $nsteps ] ; do
+ $variant ${s}
+ x=`echo ${xprec} k ${s} ${nsteps} / p | dc | tr -d '\\\\\n' | sed -e 's#^\.#0.#g'`
+ printf ' STEP(%4d, UINT64_C(0x%016x), %s, %s) \\\n' ${s} ${h} ${x} ${y}
+
+ s=$((s+1))
+done
+echo
+
+cat <<EOF
+#endif /* JEMALLOC_INTERNAL_SMOOTHSTEP_H */
+EOF
diff --git a/contrib/jemalloc/include/jemalloc/internal/stats.h b/contrib/jemalloc/include/jemalloc/internal/stats.h
index 3b9e0eac12b8..727f7dcbd718 100644
--- a/contrib/jemalloc/include/jemalloc/internal/stats.h
+++ b/contrib/jemalloc/include/jemalloc/internal/stats.h
@@ -11,7 +11,8 @@
OPTION('b', bins, true, false) \
OPTION('l', large, true, false) \
OPTION('x', mutex, true, false) \
- OPTION('e', extents, true, false)
+ OPTION('e', extents, true, false) \
+ OPTION('h', hpa, config_stats, false)
enum {
#define OPTION(o, v, d, s) stats_print_option_num_##v,
@@ -24,8 +25,30 @@ enum {
extern bool opt_stats_print;
extern char opt_stats_print_opts[stats_print_tot_num_options+1];
+/* Utilities for stats_interval. */
+extern int64_t opt_stats_interval;
+extern char opt_stats_interval_opts[stats_print_tot_num_options+1];
+
+#define STATS_INTERVAL_DEFAULT -1
+/*
+ * Batch-increment the counter to reduce synchronization overhead. Each thread
+ * merges after (interval >> LG_BATCH_SIZE) bytes of allocations; also limit the
+ * BATCH_MAX for accuracy when the interval is huge (which is expected).
+ */
+#define STATS_INTERVAL_ACCUM_LG_BATCH_SIZE 6
+#define STATS_INTERVAL_ACCUM_BATCH_MAX (4 << 20)
+
+/* Only accessed by thread event. */
+uint64_t stats_interval_new_event_wait(tsd_t *tsd);
+uint64_t stats_interval_postponed_event_wait(tsd_t *tsd);
+void stats_interval_event_handler(tsd_t *tsd, uint64_t elapsed);
+
/* Implements je_malloc_stats_print. */
-void stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *opts);
+void stats_print(write_cb_t *write_cb, void *cbopaque, const char *opts);
+
+bool stats_boot(void);
+void stats_prefork(tsdn_t *tsdn);
+void stats_postfork_parent(tsdn_t *tsdn);
+void stats_postfork_child(tsdn_t *tsdn);
#endif /* JEMALLOC_INTERNAL_STATS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/sz.h b/contrib/jemalloc/include/jemalloc/internal/sz.h
index 68e558abfea3..3c0fc1da33a6 100644
--- a/contrib/jemalloc/include/jemalloc/internal/sz.h
+++ b/contrib/jemalloc/include/jemalloc/internal/sz.h
@@ -22,6 +22,12 @@
* size that would result from such an allocation.
*/
+/* Page size index type. */
+typedef unsigned pszind_t;
+
+/* Size class index type. */
+typedef unsigned szind_t;
+
/*
* sz_pind2sz_tab encodes the same information as could be computed by
* sz_pind2sz_compute().
@@ -39,34 +45,62 @@ extern size_t sz_index2size_tab[SC_NSIZES];
*/
extern uint8_t sz_size2index_tab[];
-static const size_t sz_large_pad =
-#ifdef JEMALLOC_CACHE_OBLIVIOUS
- PAGE
-#else
- 0
-#endif
- ;
+/*
+ * Padding for large allocations: PAGE when opt_cache_oblivious == true (to
+ * enable cache index randomization); 0 otherwise.
+ */
+extern size_t sz_large_pad;
-extern void sz_boot(const sc_data_t *sc_data);
+extern void sz_boot(const sc_data_t *sc_data, bool cache_oblivious);
JEMALLOC_ALWAYS_INLINE pszind_t
sz_psz2ind(size_t psz) {
+ assert(psz > 0);
if (unlikely(psz > SC_LARGE_MAXCLASS)) {
return SC_NPSIZES;
}
- pszind_t x = lg_floor((psz<<1)-1);
- pszind_t shift = (x < SC_LG_NGROUP + LG_PAGE) ?
+ /* x is the lg of the first base >= psz. */
+ pszind_t x = lg_ceil(psz);
+ /*
+ * sc.h introduces a lot of size classes. These size classes are divided
+ * into different size class groups. There is a very special size class
+ * group, each size class in or after it is an integer multiple of PAGE.
+ * We call it first_ps_rg. It means first page size regular group. The
+ * range of first_ps_rg is (base, base * 2], and base == PAGE *
+ * SC_NGROUP. off_to_first_ps_rg begins from 1, instead of 0. e.g.
+ * off_to_first_ps_rg is 1 when psz is (PAGE * SC_NGROUP + 1).
+ */
+ pszind_t off_to_first_ps_rg = (x < SC_LG_NGROUP + LG_PAGE) ?
0 : x - (SC_LG_NGROUP + LG_PAGE);
- pszind_t grp = shift << SC_LG_NGROUP;
- pszind_t lg_delta = (x < SC_LG_NGROUP + LG_PAGE + 1) ?
- LG_PAGE : x - SC_LG_NGROUP - 1;
+ /*
+ * Same as sc_s::lg_delta.
+ * Delta for off_to_first_ps_rg == 1 is PAGE,
+ * for each increase in offset, it's multiplied by two.
+ * Therefore, lg_delta = LG_PAGE + (off_to_first_ps_rg - 1).
+ */
+ pszind_t lg_delta = (off_to_first_ps_rg == 0) ?
+ LG_PAGE : LG_PAGE + (off_to_first_ps_rg - 1);
- size_t delta_inverse_mask = ZU(-1) << lg_delta;
- pszind_t mod = ((((psz-1) & delta_inverse_mask) >> lg_delta)) &
- ((ZU(1) << SC_LG_NGROUP) - 1);
+ /*
+ * Let's write psz in binary, e.g. 0011 for 0x3, 0111 for 0x7.
+ * The leftmost bits whose len is lg_base decide the base of psz.
+ * The rightmost bits whose len is lg_delta decide (pgz % PAGE).
+ * The middle bits whose len is SC_LG_NGROUP decide ndelta.
+ * ndelta is offset to the first size class in the size class group,
+ * starts from 1.
+ * If you don't know lg_base, ndelta or lg_delta, see sc.h.
+ * |xxxxxxxxxxxxxxxxxxxx|------------------------|yyyyyyyyyyyyyyyyyyyyy|
+ * |<-- len: lg_base -->|<-- len: SC_LG_NGROUP-->|<-- len: lg_delta -->|
+ * |<-- ndelta -->|
+ * rg_inner_off = ndelta - 1
+ * Why use (psz - 1)?
+ * To handle case: psz % (1 << lg_delta) == 0.
+ */
+ pszind_t rg_inner_off = (((psz - 1)) >> lg_delta) & (SC_NGROUP - 1);
- pszind_t ind = grp + mod;
+ pszind_t base_ind = off_to_first_ps_rg << SC_LG_NGROUP;
+ pszind_t ind = base_ind + rg_inner_off;
return ind;
}
@@ -152,10 +186,15 @@ sz_size2index_compute(size_t size) {
}
JEMALLOC_ALWAYS_INLINE szind_t
-sz_size2index_lookup(size_t size) {
+sz_size2index_lookup_impl(size_t size) {
assert(size <= SC_LOOKUP_MAXCLASS);
- szind_t ret = (sz_size2index_tab[(size + (ZU(1) << SC_LG_TINY_MIN) - 1)
- >> SC_LG_TINY_MIN]);
+ return sz_size2index_tab[(size + (ZU(1) << SC_LG_TINY_MIN) - 1)
+ >> SC_LG_TINY_MIN];
+}
+
+JEMALLOC_ALWAYS_INLINE szind_t
+sz_size2index_lookup(size_t size) {
+ szind_t ret = sz_size2index_lookup_impl(size);
assert(ret == sz_size2index_compute(size));
return ret;
}
@@ -195,8 +234,13 @@ sz_index2size_compute(szind_t index) {
}
JEMALLOC_ALWAYS_INLINE size_t
+sz_index2size_lookup_impl(szind_t index) {
+ return sz_index2size_tab[index];
+}
+
+JEMALLOC_ALWAYS_INLINE size_t
sz_index2size_lookup(szind_t index) {
- size_t ret = (size_t)sz_index2size_tab[index];
+ size_t ret = sz_index2size_lookup_impl(index);
assert(ret == sz_index2size_compute(index));
return ret;
}
@@ -207,6 +251,12 @@ sz_index2size(szind_t index) {
return sz_index2size_lookup(index);
}
+JEMALLOC_ALWAYS_INLINE void
+sz_size2index_usize_fastpath(size_t size, szind_t *ind, size_t *usize) {
+ *ind = sz_size2index_lookup_impl(size);
+ *usize = sz_index2size_lookup_impl(*ind);
+}
+
JEMALLOC_ALWAYS_INLINE size_t
sz_s2u_compute(size_t size) {
if (unlikely(size > SC_LARGE_MAXCLASS)) {
@@ -266,7 +316,7 @@ sz_sa2u(size_t size, size_t alignment) {
assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
/* Try for a small size class. */
- if (size <= SC_SMALL_MAXCLASS && alignment < PAGE) {
+ if (size <= SC_SMALL_MAXCLASS && alignment <= PAGE) {
/*
* Round size up to the nearest multiple of alignment.
*
@@ -315,4 +365,7 @@ sz_sa2u(size_t size, size_t alignment) {
return usize;
}
+size_t sz_psz_quantize_floor(size_t size);
+size_t sz_psz_quantize_ceil(size_t size);
+
#endif /* JEMALLOC_INTERNAL_SIZE_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/tcache_externs.h b/contrib/jemalloc/include/jemalloc/internal/tcache_externs.h
index d63eafde8ce6..a2ab7101b065 100644
--- a/contrib/jemalloc/include/jemalloc/internal/tcache_externs.h
+++ b/contrib/jemalloc/include/jemalloc/internal/tcache_externs.h
@@ -1,10 +1,17 @@
#ifndef JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
#define JEMALLOC_INTERNAL_TCACHE_EXTERNS_H
-extern bool opt_tcache;
-extern ssize_t opt_lg_tcache_max;
-
-extern cache_bin_info_t *tcache_bin_info;
+extern bool opt_tcache;
+extern size_t opt_tcache_max;
+extern ssize_t opt_lg_tcache_nslots_mul;
+extern unsigned opt_tcache_nslots_small_min;
+extern unsigned opt_tcache_nslots_small_max;
+extern unsigned opt_tcache_nslots_large;
+extern ssize_t opt_lg_tcache_shift;
+extern size_t opt_tcache_gc_incr_bytes;
+extern size_t opt_tcache_gc_delay_bytes;
+extern unsigned opt_lg_tcache_flush_small_div;
+extern unsigned opt_lg_tcache_flush_large_div;
/*
* Number of tcache bins. There are SC_NBINS small-object bins, plus 0 or more
@@ -15,6 +22,8 @@ extern unsigned nhbins;
/* Maximum cached size class. */
extern size_t tcache_maxclass;
+extern cache_bin_info_t *tcache_bin_info;
+
/*
* Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and
* usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are
@@ -25,24 +34,27 @@ extern size_t tcache_maxclass;
*/
extern tcaches_t *tcaches;
-size_t tcache_salloc(tsdn_t *tsdn, const void *ptr);
-void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
-void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
+size_t tcache_salloc(tsdn_t *tsdn, const void *ptr);
+void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
cache_bin_t *tbin, szind_t binind, bool *tcache_success);
-void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
+
+void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
szind_t binind, unsigned rem);
-void tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind,
- unsigned rem, tcache_t *tcache);
-void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache,
- arena_t *arena);
+void tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
+ szind_t binind, unsigned rem);
+void tcache_bin_flush_stashed(tsd_t *tsd, tcache_t *tcache, cache_bin_t *bin,
+ szind_t binind, bool is_small);
+void tcache_arena_reassociate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
+ tcache_t *tcache, arena_t *arena);
tcache_t *tcache_create_explicit(tsd_t *tsd);
-void tcache_cleanup(tsd_t *tsd);
-void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
-bool tcaches_create(tsd_t *tsd, unsigned *r_ind);
-void tcaches_flush(tsd_t *tsd, unsigned ind);
-void tcaches_destroy(tsd_t *tsd, unsigned ind);
-bool tcache_boot(tsdn_t *tsdn);
-void tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
+void tcache_cleanup(tsd_t *tsd);
+void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
+bool tcaches_create(tsd_t *tsd, base_t *base, unsigned *r_ind);
+void tcaches_flush(tsd_t *tsd, unsigned ind);
+void tcaches_destroy(tsd_t *tsd, unsigned ind);
+bool tcache_boot(tsdn_t *tsdn, base_t *base);
+void tcache_arena_associate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
+ tcache_t *tcache, arena_t *arena);
void tcache_prefork(tsdn_t *tsdn);
void tcache_postfork_parent(tsdn_t *tsdn);
void tcache_postfork_child(tsdn_t *tsdn);
@@ -50,4 +62,14 @@ void tcache_flush(tsd_t *tsd);
bool tsd_tcache_data_init(tsd_t *tsd);
bool tsd_tcache_enabled_data_init(tsd_t *tsd);
+void tcache_assert_initialized(tcache_t *tcache);
+
+/* Only accessed by thread event. */
+uint64_t tcache_gc_new_event_wait(tsd_t *tsd);
+uint64_t tcache_gc_postponed_event_wait(tsd_t *tsd);
+void tcache_gc_event_handler(tsd_t *tsd, uint64_t elapsed);
+uint64_t tcache_gc_dalloc_new_event_wait(tsd_t *tsd);
+uint64_t tcache_gc_dalloc_postponed_event_wait(tsd_t *tsd);
+void tcache_gc_dalloc_event_handler(tsd_t *tsd, uint64_t elapsed);
+
#endif /* JEMALLOC_INTERNAL_TCACHE_EXTERNS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/tcache_inlines.h b/contrib/jemalloc/include/jemalloc/internal/tcache_inlines.h
index 5eca20e893b4..2634f145dc39 100644
--- a/contrib/jemalloc/include/jemalloc/internal/tcache_inlines.h
+++ b/contrib/jemalloc/include/jemalloc/internal/tcache_inlines.h
@@ -3,9 +3,9 @@
#include "jemalloc/internal/bin.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
+#include "jemalloc/internal/san.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/sz.h"
-#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/util.h"
static inline bool
@@ -27,28 +27,29 @@ tcache_enabled_set(tsd_t *tsd, bool enabled) {
tsd_slow_update(tsd);
}
-JEMALLOC_ALWAYS_INLINE void
-tcache_event(tsd_t *tsd, tcache_t *tcache) {
- if (TCACHE_GC_INCR == 0) {
- return;
+JEMALLOC_ALWAYS_INLINE bool
+tcache_small_bin_disabled(szind_t ind, cache_bin_t *bin) {
+ assert(ind < SC_NBINS);
+ bool ret = (cache_bin_info_ncached_max(&tcache_bin_info[ind]) == 0);
+ if (ret && bin != NULL) {
+ /* small size class but cache bin disabled. */
+ assert(ind >= nhbins);
+ assert((uintptr_t)(*bin->stack_head) ==
+ cache_bin_preceding_junk);
}
- if (unlikely(ticker_tick(&tcache->gc_ticker))) {
- tcache_event_hard(tsd, tcache);
- }
+ return ret;
}
JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
size_t size, szind_t binind, bool zero, bool slow_path) {
void *ret;
- cache_bin_t *bin;
bool tcache_success;
- size_t usize JEMALLOC_CC_SILENCE_INIT(0);
assert(binind < SC_NBINS);
- bin = tcache_small_bin_get(tcache, binind);
- ret = cache_bin_alloc_easy(bin, &tcache_success);
+ cache_bin_t *bin = &tcache->bins[binind];
+ ret = cache_bin_alloc(bin, &tcache_success);
assert(tcache_success == (ret != NULL));
if (unlikely(!tcache_success)) {
bool tcache_hard_success;
@@ -56,6 +57,13 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
if (unlikely(arena == NULL)) {
return NULL;
}
+ if (unlikely(tcache_small_bin_disabled(binind, bin))) {
+ /* stats and zero are handled directly by the arena. */
+ return arena_malloc_hard(tsd_tsdn(tsd), arena, size,
+ binind, zero);
+ }
+ tcache_bin_flush_stashed(tsd, tcache, bin, binind,
+ /* is_small */ true);
ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
bin, binind, &tcache_hard_success);
@@ -65,38 +73,14 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
}
assert(ret);
- /*
- * Only compute usize if required. The checks in the following if
- * statement are all static.
- */
- if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
- usize = sz_index2size(binind);
+ if (unlikely(zero)) {
+ size_t usize = sz_index2size(binind);
assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize);
- }
-
- if (likely(!zero)) {
- if (slow_path && config_fill) {
- if (unlikely(opt_junk_alloc)) {
- arena_alloc_junk_small(ret, &bin_infos[binind],
- false);
- } else if (unlikely(opt_zero)) {
- memset(ret, 0, usize);
- }
- }
- } else {
- if (slow_path && config_fill && unlikely(opt_junk_alloc)) {
- arena_alloc_junk_small(ret, &bin_infos[binind], true);
- }
memset(ret, 0, usize);
}
-
if (config_stats) {
bin->tstats.nrequests++;
}
- if (config_prof) {
- tcache->prof_accumbytes += usize;
- }
- tcache_event(tsd, tcache);
return ret;
}
@@ -104,12 +88,11 @@ JEMALLOC_ALWAYS_INLINE void *
tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
szind_t binind, bool zero, bool slow_path) {
void *ret;
- cache_bin_t *bin;
bool tcache_success;
- assert(binind >= SC_NBINS &&binind < nhbins);
- bin = tcache_large_bin_get(tcache, binind);
- ret = cache_bin_alloc_easy(bin, &tcache_success);
+ assert(binind >= SC_NBINS && binind < nhbins);
+ cache_bin_t *bin = &tcache->bins[binind];
+ ret = cache_bin_alloc(bin, &tcache_success);
assert(tcache_success == (ret != NULL));
if (unlikely(!tcache_success)) {
/*
@@ -120,96 +103,79 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
if (unlikely(arena == NULL)) {
return NULL;
}
+ tcache_bin_flush_stashed(tsd, tcache, bin, binind,
+ /* is_small */ false);
ret = large_malloc(tsd_tsdn(tsd), arena, sz_s2u(size), zero);
if (ret == NULL) {
return NULL;
}
} else {
- size_t usize JEMALLOC_CC_SILENCE_INIT(0);
-
- /* Only compute usize on demand */
- if (config_prof || (slow_path && config_fill) ||
- unlikely(zero)) {
- usize = sz_index2size(binind);
+ if (unlikely(zero)) {
+ size_t usize = sz_index2size(binind);
assert(usize <= tcache_maxclass);
- }
-
- if (likely(!zero)) {
- if (slow_path && config_fill) {
- if (unlikely(opt_junk_alloc)) {
- memset(ret, JEMALLOC_ALLOC_JUNK,
- usize);
- } else if (unlikely(opt_zero)) {
- memset(ret, 0, usize);
- }
- }
- } else {
memset(ret, 0, usize);
}
if (config_stats) {
bin->tstats.nrequests++;
}
- if (config_prof) {
- tcache->prof_accumbytes += usize;
- }
}
- tcache_event(tsd, tcache);
return ret;
}
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
bool slow_path) {
- cache_bin_t *bin;
- cache_bin_info_t *bin_info;
+ assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SC_SMALL_MAXCLASS);
- assert(tcache_salloc(tsd_tsdn(tsd), ptr)
- <= SC_SMALL_MAXCLASS);
-
- if (slow_path && config_fill && unlikely(opt_junk_free)) {
- arena_dalloc_junk_small(ptr, &bin_infos[binind]);
+ cache_bin_t *bin = &tcache->bins[binind];
+ /*
+ * Not marking the branch unlikely because this is past free_fastpath()
+ * (which handles the most common cases), i.e. at this point it's often
+ * uncommon cases.
+ */
+ if (cache_bin_nonfast_aligned(ptr)) {
+ /* Junk unconditionally, even if bin is full. */
+ san_junk_ptr(ptr, sz_index2size(binind));
+ if (cache_bin_stash(bin, ptr)) {
+ return;
+ }
+ assert(cache_bin_full(bin));
+ /* Bin full; fall through into the flush branch. */
}
- bin = tcache_small_bin_get(tcache, binind);
- bin_info = &tcache_bin_info[binind];
- if (unlikely(!cache_bin_dalloc_easy(bin, bin_info, ptr))) {
- tcache_bin_flush_small(tsd, tcache, bin, binind,
- (bin_info->ncached_max >> 1));
- bool ret = cache_bin_dalloc_easy(bin, bin_info, ptr);
+ if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) {
+ if (unlikely(tcache_small_bin_disabled(binind, bin))) {
+ arena_dalloc_small(tsd_tsdn(tsd), ptr);
+ return;
+ }
+ cache_bin_sz_t max = cache_bin_info_ncached_max(
+ &tcache_bin_info[binind]);
+ unsigned remain = max >> opt_lg_tcache_flush_small_div;
+ tcache_bin_flush_small(tsd, tcache, bin, binind, remain);
+ bool ret = cache_bin_dalloc_easy(bin, ptr);
assert(ret);
}
-
- tcache_event(tsd, tcache);
}
JEMALLOC_ALWAYS_INLINE void
tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
bool slow_path) {
- cache_bin_t *bin;
- cache_bin_info_t *bin_info;
assert(tcache_salloc(tsd_tsdn(tsd), ptr)
> SC_SMALL_MAXCLASS);
assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
- if (slow_path && config_fill && unlikely(opt_junk_free)) {
- large_dalloc_junk(ptr, sz_index2size(binind));
- }
-
- bin = tcache_large_bin_get(tcache, binind);
- bin_info = &tcache_bin_info[binind];
- if (unlikely(bin->ncached == bin_info->ncached_max)) {
- tcache_bin_flush_large(tsd, bin, binind,
- (bin_info->ncached_max >> 1), tcache);
+ cache_bin_t *bin = &tcache->bins[binind];
+ if (unlikely(!cache_bin_dalloc_easy(bin, ptr))) {
+ unsigned remain = cache_bin_info_ncached_max(
+ &tcache_bin_info[binind]) >> opt_lg_tcache_flush_large_div;
+ tcache_bin_flush_large(tsd, tcache, bin, binind, remain);
+ bool ret = cache_bin_dalloc_easy(bin, ptr);
+ assert(ret);
}
- assert(bin->ncached < bin_info->ncached_max);
- bin->ncached++;
- *(bin->avail - bin->ncached) = ptr;
-
- tcache_event(tsd, tcache);
}
JEMALLOC_ALWAYS_INLINE tcache_t *
diff --git a/contrib/jemalloc/include/jemalloc/internal/tcache_structs.h b/contrib/jemalloc/include/jemalloc/internal/tcache_structs.h
index 172ef9040c04..176d73de95b7 100644
--- a/contrib/jemalloc/include/jemalloc/internal/tcache_structs.h
+++ b/contrib/jemalloc/include/jemalloc/internal/tcache_structs.h
@@ -7,36 +7,19 @@
#include "jemalloc/internal/ticker.h"
#include "jemalloc/internal/tsd_types.h"
-/* Various uses of this struct need it to be a named type. */
-typedef ql_elm(tsd_t) tsd_link_t;
+/*
+ * The tcache state is split into the slow and hot path data. Each has a
+ * pointer to the other, and the data always comes in pairs. The layout of each
+ * of them varies in practice; tcache_slow lives in the TSD for the automatic
+ * tcache, and as part of a dynamic allocation for manual allocations. Keeping
+ * a pointer to tcache_slow lets us treat these cases uniformly, rather than
+ * splitting up the tcache [de]allocation code into those paths called with the
+ * TSD tcache and those called with a manual tcache.
+ */
-struct tcache_s {
- /*
- * To minimize our cache-footprint, we put the frequently accessed data
- * together at the start of this struct.
- */
-
- /* Cleared after arena_prof_accum(). */
- uint64_t prof_accumbytes;
- /* Drives incremental GC. */
- ticker_t gc_ticker;
- /*
- * The pointer stacks associated with bins follow as a contiguous array.
- * During tcache initialization, the avail pointer in each element of
- * tbins is initialized to point to the proper offset within this array.
- */
- cache_bin_t bins_small[SC_NBINS];
-
- /*
- * This data is less hot; we can be a little less careful with our
- * footprint here.
- */
+struct tcache_slow_s {
/* Lets us track all the tcaches in an arena. */
- ql_elm(tcache_t) link;
-
- /* Logically scoped to tsd, but put here for cache layout reasons. */
- ql_elm(tsd_t) tsd_link;
- bool in_hook;
+ ql_elm(tcache_slow_t) link;
/*
* The descriptor lets the arena find our cache bins without seeing the
@@ -51,12 +34,27 @@ struct tcache_s {
szind_t next_gc_bin;
/* For small bins, fill (ncached_max >> lg_fill_div). */
uint8_t lg_fill_div[SC_NBINS];
+ /* For small bins, whether has been refilled since last GC. */
+ bool bin_refilled[SC_NBINS];
+ /*
+ * For small bins, the number of items we can pretend to flush before
+ * actually flushing.
+ */
+ uint8_t bin_flush_delay_items[SC_NBINS];
/*
- * We put the cache bins for large size classes at the end of the
- * struct, since some of them might not get used. This might end up
- * letting us avoid touching an extra page if we don't have to.
+ * The start of the allocation containing the dynamic allocation for
+ * either the cache bins alone, or the cache bin memory as well as this
+ * tcache_slow_t and its associated tcache_t.
*/
- cache_bin_t bins_large[SC_NSIZES-SC_NBINS];
+ void *dyn_alloc;
+
+ /* The associated bins. */
+ tcache_t *tcache;
+};
+
+struct tcache_s {
+ tcache_slow_t *tcache_slow;
+ cache_bin_t bins[TCACHE_NBINS_MAX];
};
/* Linkage for list of available (previously used) explicit tcache IDs. */
diff --git a/contrib/jemalloc/include/jemalloc/internal/tcache_types.h b/contrib/jemalloc/include/jemalloc/internal/tcache_types.h
index dce69382ebb0..583677ea2d35 100644
--- a/contrib/jemalloc/include/jemalloc/internal/tcache_types.h
+++ b/contrib/jemalloc/include/jemalloc/internal/tcache_types.h
@@ -3,6 +3,7 @@
#include "jemalloc/internal/sc.h"
+typedef struct tcache_slow_s tcache_slow_t;
typedef struct tcache_s tcache_t;
typedef struct tcaches_s tcaches_t;
@@ -16,39 +17,9 @@ typedef struct tcaches_s tcaches_t;
#define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3)
#define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY
-/*
- * Absolute minimum number of cache slots for each small bin.
- */
-#define TCACHE_NSLOTS_SMALL_MIN 20
-
-/*
- * Absolute maximum number of cache slots for each small bin in the thread
- * cache. This is an additional constraint beyond that imposed as: twice the
- * number of regions per slab for this size class.
- *
- * This constant must be an even number.
- */
-#define TCACHE_NSLOTS_SMALL_MAX 200
-
-/* Number of cache slots for large size classes. */
-#define TCACHE_NSLOTS_LARGE 20
-
-/* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */
-#define LG_TCACHE_MAXCLASS_DEFAULT 15
-
-/*
- * TCACHE_GC_SWEEP is the approximate number of allocation events between
- * full GC sweeps. Integer rounding may cause the actual number to be
- * slightly higher, since GC is performed incrementally.
- */
-#define TCACHE_GC_SWEEP 8192
-
-/* Number of tcache allocation/deallocation events between incremental GCs. */
-#define TCACHE_GC_INCR \
- ((TCACHE_GC_SWEEP / SC_NBINS) + ((TCACHE_GC_SWEEP / SC_NBINS == 0) ? 0 : 1))
-
-/* Used in TSD static initializer only. Real init in tcache_data_init(). */
+/* Used in TSD static initializer only. Real init in tsd_tcache_data_init(). */
#define TCACHE_ZERO_INITIALIZER {0}
+#define TCACHE_SLOW_ZERO_INITIALIZER {0}
/* Used in TSD static initializer only. Will be initialized to opt_tcache. */
#define TCACHE_ENABLED_ZERO_INITIALIZER false
@@ -56,4 +27,9 @@ typedef struct tcaches_s tcaches_t;
/* Used for explicit tcache only. Means flushed but not destroyed. */
#define TCACHES_ELM_NEED_REINIT ((tcache_t *)(uintptr_t)1)
+#define TCACHE_LG_MAXCLASS_LIMIT 23 /* tcache_maxclass = 8M */
+#define TCACHE_MAXCLASS_LIMIT ((size_t)1 << TCACHE_LG_MAXCLASS_LIMIT)
+#define TCACHE_NBINS_MAX (SC_NBINS + SC_NGROUP * \
+ (TCACHE_LG_MAXCLASS_LIMIT - SC_LG_LARGE_MINCLASS) + 1)
+
#endif /* JEMALLOC_INTERNAL_TCACHE_TYPES_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/test_hooks.h b/contrib/jemalloc/include/jemalloc/internal/test_hooks.h
index 0780c52fa270..3d530b5c5756 100644
--- a/contrib/jemalloc/include/jemalloc/internal/test_hooks.h
+++ b/contrib/jemalloc/include/jemalloc/internal/test_hooks.h
@@ -4,9 +4,21 @@
extern JEMALLOC_EXPORT void (*test_hooks_arena_new_hook)();
extern JEMALLOC_EXPORT void (*test_hooks_libc_hook)();
-#define JEMALLOC_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn)
+#if defined(JEMALLOC_JET) || defined(JEMALLOC_UNIT_TEST)
+# define JEMALLOC_TEST_HOOK(fn, hook) ((void)(hook != NULL && (hook(), 0)), fn)
+# define open JEMALLOC_TEST_HOOK(open, test_hooks_libc_hook)
+# define read JEMALLOC_TEST_HOOK(read, test_hooks_libc_hook)
+# define write JEMALLOC_TEST_HOOK(write, test_hooks_libc_hook)
+# define readlink JEMALLOC_TEST_HOOK(readlink, test_hooks_libc_hook)
+# define close JEMALLOC_TEST_HOOK(close, test_hooks_libc_hook)
+# define creat JEMALLOC_TEST_HOOK(creat, test_hooks_libc_hook)
+# define secure_getenv JEMALLOC_TEST_HOOK(secure_getenv, test_hooks_libc_hook)
/* Note that this is undef'd and re-define'd in src/prof.c. */
-#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, test_hooks_libc_hook)
+# define _Unwind_Backtrace JEMALLOC_TEST_HOOK(_Unwind_Backtrace, test_hooks_libc_hook)
+#else
+# define JEMALLOC_TEST_HOOK(fn, hook) fn
+#endif
+
#endif /* JEMALLOC_INTERNAL_TEST_HOOKS_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/thread_event.h b/contrib/jemalloc/include/jemalloc/internal/thread_event.h
new file mode 100644
index 000000000000..2f4e1b39c7bc
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/thread_event.h
@@ -0,0 +1,301 @@
+#ifndef JEMALLOC_INTERNAL_THREAD_EVENT_H
+#define JEMALLOC_INTERNAL_THREAD_EVENT_H
+
+#include "jemalloc/internal/tsd.h"
+
+/* "te" is short for "thread_event" */
+
+/*
+ * TE_MIN_START_WAIT should not exceed the minimal allocation usize.
+ */
+#define TE_MIN_START_WAIT ((uint64_t)1U)
+#define TE_MAX_START_WAIT UINT64_MAX
+
+/*
+ * Maximum threshold on thread_(de)allocated_next_event_fast, so that there is
+ * no need to check overflow in malloc fast path. (The allocation size in malloc
+ * fast path never exceeds SC_LOOKUP_MAXCLASS.)
+ */
+#define TE_NEXT_EVENT_FAST_MAX (UINT64_MAX - SC_LOOKUP_MAXCLASS + 1U)
+
+/*
+ * The max interval helps make sure that malloc stays on the fast path in the
+ * common case, i.e. thread_allocated < thread_allocated_next_event_fast. When
+ * thread_allocated is within an event's distance to TE_NEXT_EVENT_FAST_MAX
+ * above, thread_allocated_next_event_fast is wrapped around and we fall back to
+ * the medium-fast path. The max interval makes sure that we're not staying on
+ * the fallback case for too long, even if there's no active event or if all
+ * active events have long wait times.
+ */
+#define TE_MAX_INTERVAL ((uint64_t)(4U << 20))
+
+/*
+ * Invalid elapsed time, for situations where elapsed time is not needed. See
+ * comments in thread_event.c for more info.
+ */
+#define TE_INVALID_ELAPSED UINT64_MAX
+
+typedef struct te_ctx_s {
+ bool is_alloc;
+ uint64_t *current;
+ uint64_t *last_event;
+ uint64_t *next_event;
+ uint64_t *next_event_fast;
+} te_ctx_t;
+
+void te_assert_invariants_debug(tsd_t *tsd);
+void te_event_trigger(tsd_t *tsd, te_ctx_t *ctx);
+void te_recompute_fast_threshold(tsd_t *tsd);
+void tsd_te_init(tsd_t *tsd);
+
+/*
+ * List of all events, in the following format:
+ * E(event, (condition), is_alloc_event)
+ */
+#define ITERATE_OVER_ALL_EVENTS \
+ E(tcache_gc, (opt_tcache_gc_incr_bytes > 0), true) \
+ E(prof_sample, (config_prof && opt_prof), true) \
+ E(stats_interval, (opt_stats_interval >= 0), true) \
+ E(tcache_gc_dalloc, (opt_tcache_gc_incr_bytes > 0), false) \
+ E(peak_alloc, config_stats, true) \
+ E(peak_dalloc, config_stats, false)
+
+#define E(event, condition_unused, is_alloc_event_unused) \
+ C(event##_event_wait)
+
+/* List of all thread event counters. */
+#define ITERATE_OVER_ALL_COUNTERS \
+ C(thread_allocated) \
+ C(thread_allocated_last_event) \
+ ITERATE_OVER_ALL_EVENTS \
+ C(prof_sample_last_event) \
+ C(stats_interval_last_event)
+
+/* Getters directly wrap TSD getters. */
+#define C(counter) \
+JEMALLOC_ALWAYS_INLINE uint64_t \
+counter##_get(tsd_t *tsd) { \
+ return tsd_##counter##_get(tsd); \
+}
+
+ITERATE_OVER_ALL_COUNTERS
+#undef C
+
+/*
+ * Setters call the TSD pointer getters rather than the TSD setters, so that
+ * the counters can be modified even when TSD state is reincarnated or
+ * minimal_initialized: if an event is triggered in such cases, we will
+ * temporarily delay the event and let it be immediately triggered at the next
+ * allocation call.
+ */
+#define C(counter) \
+JEMALLOC_ALWAYS_INLINE void \
+counter##_set(tsd_t *tsd, uint64_t v) { \
+ *tsd_##counter##p_get(tsd) = v; \
+}
+
+ITERATE_OVER_ALL_COUNTERS
+#undef C
+
+/*
+ * For generating _event_wait getter / setter functions for each individual
+ * event.
+ */
+#undef E
+
+/*
+ * The malloc and free fastpath getters -- use the unsafe getters since tsd may
+ * be non-nominal, in which case the fast_threshold will be set to 0. This
+ * allows checking for events and tsd non-nominal in a single branch.
+ *
+ * Note that these can only be used on the fastpath.
+ */
+JEMALLOC_ALWAYS_INLINE void
+te_malloc_fastpath_ctx(tsd_t *tsd, uint64_t *allocated, uint64_t *threshold) {
+ *allocated = *tsd_thread_allocatedp_get_unsafe(tsd);
+ *threshold = *tsd_thread_allocated_next_event_fastp_get_unsafe(tsd);
+ assert(*threshold <= TE_NEXT_EVENT_FAST_MAX);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+te_free_fastpath_ctx(tsd_t *tsd, uint64_t *deallocated, uint64_t *threshold) {
+ /* Unsafe getters since this may happen before tsd_init. */
+ *deallocated = *tsd_thread_deallocatedp_get_unsafe(tsd);
+ *threshold = *tsd_thread_deallocated_next_event_fastp_get_unsafe(tsd);
+ assert(*threshold <= TE_NEXT_EVENT_FAST_MAX);
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+te_ctx_is_alloc(te_ctx_t *ctx) {
+ return ctx->is_alloc;
+}
+
+JEMALLOC_ALWAYS_INLINE uint64_t
+te_ctx_current_bytes_get(te_ctx_t *ctx) {
+ return *ctx->current;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+te_ctx_current_bytes_set(te_ctx_t *ctx, uint64_t v) {
+ *ctx->current = v;
+}
+
+JEMALLOC_ALWAYS_INLINE uint64_t
+te_ctx_last_event_get(te_ctx_t *ctx) {
+ return *ctx->last_event;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+te_ctx_last_event_set(te_ctx_t *ctx, uint64_t v) {
+ *ctx->last_event = v;
+}
+
+/* Below 3 for next_event_fast. */
+JEMALLOC_ALWAYS_INLINE uint64_t
+te_ctx_next_event_fast_get(te_ctx_t *ctx) {
+ uint64_t v = *ctx->next_event_fast;
+ assert(v <= TE_NEXT_EVENT_FAST_MAX);
+ return v;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+te_ctx_next_event_fast_set(te_ctx_t *ctx, uint64_t v) {
+ assert(v <= TE_NEXT_EVENT_FAST_MAX);
+ *ctx->next_event_fast = v;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+te_next_event_fast_set_non_nominal(tsd_t *tsd) {
+ /*
+ * Set the fast thresholds to zero when tsd is non-nominal. Use the
+ * unsafe getter as this may get called during tsd init and clean up.
+ */
+ *tsd_thread_allocated_next_event_fastp_get_unsafe(tsd) = 0;
+ *tsd_thread_deallocated_next_event_fastp_get_unsafe(tsd) = 0;
+}
+
+/* For next_event. Setter also updates the fast threshold. */
+JEMALLOC_ALWAYS_INLINE uint64_t
+te_ctx_next_event_get(te_ctx_t *ctx) {
+ return *ctx->next_event;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+te_ctx_next_event_set(tsd_t *tsd, te_ctx_t *ctx, uint64_t v) {
+ *ctx->next_event = v;
+ te_recompute_fast_threshold(tsd);
+}
+
+/*
+ * The function checks in debug mode whether the thread event counters are in
+ * a consistent state, which forms the invariants before and after each round
+ * of thread event handling that we can rely on and need to promise.
+ * The invariants are only temporarily violated in the middle of
+ * te_event_advance() if an event is triggered (the te_event_trigger() call at
+ * the end will restore the invariants).
+ */
+JEMALLOC_ALWAYS_INLINE void
+te_assert_invariants(tsd_t *tsd) {
+ if (config_debug) {
+ te_assert_invariants_debug(tsd);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE void
+te_ctx_get(tsd_t *tsd, te_ctx_t *ctx, bool is_alloc) {
+ ctx->is_alloc = is_alloc;
+ if (is_alloc) {
+ ctx->current = tsd_thread_allocatedp_get(tsd);
+ ctx->last_event = tsd_thread_allocated_last_eventp_get(tsd);
+ ctx->next_event = tsd_thread_allocated_next_eventp_get(tsd);
+ ctx->next_event_fast =
+ tsd_thread_allocated_next_event_fastp_get(tsd);
+ } else {
+ ctx->current = tsd_thread_deallocatedp_get(tsd);
+ ctx->last_event = tsd_thread_deallocated_last_eventp_get(tsd);
+ ctx->next_event = tsd_thread_deallocated_next_eventp_get(tsd);
+ ctx->next_event_fast =
+ tsd_thread_deallocated_next_event_fastp_get(tsd);
+ }
+}
+
+/*
+ * The lookahead functionality facilitates events to be able to lookahead, i.e.
+ * without touching the event counters, to determine whether an event would be
+ * triggered. The event counters are not advanced until the end of the
+ * allocation / deallocation calls, so the lookahead can be useful if some
+ * preparation work for some event must be done early in the allocation /
+ * deallocation calls.
+ *
+ * Currently only the profiling sampling event needs the lookahead
+ * functionality, so we don't yet define general purpose lookahead functions.
+ *
+ * Surplus is a terminology referring to the amount of bytes beyond what's
+ * needed for triggering an event, which can be a useful quantity to have in
+ * general when lookahead is being called.
+ */
+
+JEMALLOC_ALWAYS_INLINE bool
+te_prof_sample_event_lookahead_surplus(tsd_t *tsd, size_t usize,
+ size_t *surplus) {
+ if (surplus != NULL) {
+ /*
+ * This is a dead store: the surplus will be overwritten before
+ * any read. The initialization suppresses compiler warnings.
+ * Meanwhile, using SIZE_MAX to initialize is good for
+ * debugging purpose, because a valid surplus value is strictly
+ * less than usize, which is at most SIZE_MAX.
+ */
+ *surplus = SIZE_MAX;
+ }
+ if (unlikely(!tsd_nominal(tsd) || tsd_reentrancy_level_get(tsd) > 0)) {
+ return false;
+ }
+ /* The subtraction is intentionally susceptible to underflow. */
+ uint64_t accumbytes = tsd_thread_allocated_get(tsd) + usize -
+ tsd_thread_allocated_last_event_get(tsd);
+ uint64_t sample_wait = tsd_prof_sample_event_wait_get(tsd);
+ if (accumbytes < sample_wait) {
+ return false;
+ }
+ assert(accumbytes - sample_wait < (uint64_t)usize);
+ if (surplus != NULL) {
+ *surplus = (size_t)(accumbytes - sample_wait);
+ }
+ return true;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+te_prof_sample_event_lookahead(tsd_t *tsd, size_t usize) {
+ return te_prof_sample_event_lookahead_surplus(tsd, usize, NULL);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+te_event_advance(tsd_t *tsd, size_t usize, bool is_alloc) {
+ te_assert_invariants(tsd);
+
+ te_ctx_t ctx;
+ te_ctx_get(tsd, &ctx, is_alloc);
+
+ uint64_t bytes_before = te_ctx_current_bytes_get(&ctx);
+ te_ctx_current_bytes_set(&ctx, bytes_before + usize);
+
+ /* The subtraction is intentionally susceptible to underflow. */
+ if (likely(usize < te_ctx_next_event_get(&ctx) - bytes_before)) {
+ te_assert_invariants(tsd);
+ } else {
+ te_event_trigger(tsd, &ctx);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE void
+thread_dalloc_event(tsd_t *tsd, size_t usize) {
+ te_event_advance(tsd, usize, false);
+}
+
+JEMALLOC_ALWAYS_INLINE void
+thread_alloc_event(tsd_t *tsd, size_t usize) {
+ te_event_advance(tsd, usize, true);
+}
+
+#endif /* JEMALLOC_INTERNAL_THREAD_EVENT_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/ticker.h b/contrib/jemalloc/include/jemalloc/internal/ticker.h
index 52d0db4c89c6..6b51ddec43bf 100644
--- a/contrib/jemalloc/include/jemalloc/internal/ticker.h
+++ b/contrib/jemalloc/include/jemalloc/internal/ticker.h
@@ -1,6 +1,7 @@
#ifndef JEMALLOC_INTERNAL_TICKER_H
#define JEMALLOC_INTERNAL_TICKER_H
+#include "jemalloc/internal/prng.h"
#include "jemalloc/internal/util.h"
/**
@@ -10,11 +11,11 @@
* have occurred with a call to ticker_ticks), which will return true (and reset
* the counter) if the countdown hit zero.
*/
-
-typedef struct {
+typedef struct ticker_s ticker_t;
+struct ticker_s {
int32_t tick;
int32_t nticks;
-} ticker_t;
+};
static inline void
ticker_init(ticker_t *ticker, int32_t nticks) {
@@ -75,7 +76,7 @@ ticker_tick(ticker_t *ticker) {
return ticker_ticks(ticker, 1);
}
-/*
+/*
* Try to tick. If ticker would fire, return true, but rely on
* slowpath to reset ticker.
*/
@@ -88,4 +89,87 @@ ticker_trytick(ticker_t *ticker) {
return false;
}
+/*
+ * The ticker_geom_t is much like the ticker_t, except that instead of ticker
+ * having a constant countdown, it has an approximate one; each tick has
+ * approximately a 1/nticks chance of triggering the count.
+ *
+ * The motivation is in triggering arena decay. With a naive strategy, each
+ * thread would maintain a ticker per arena, and check if decay is necessary
+ * each time that the arena's ticker fires. This has two costs:
+ * - Since under reasonable assumptions both threads and arenas can scale
+ * linearly with the number of CPUs, maintaining per-arena data in each thread
+ * scales quadratically with the number of CPUs.
+ * - These tickers are often a cache miss down tcache flush pathways.
+ *
+ * By giving each tick a 1/nticks chance of firing, we still maintain the same
+ * average number of ticks-until-firing per arena, with only a single ticker's
+ * worth of metadata.
+ */
+
+/* See ticker.c for an explanation of these constants. */
+#define TICKER_GEOM_NBITS 6
+#define TICKER_GEOM_MUL 61
+extern const uint8_t ticker_geom_table[1 << TICKER_GEOM_NBITS];
+
+/* Not actually any different from ticker_t; just for type safety. */
+typedef struct ticker_geom_s ticker_geom_t;
+struct ticker_geom_s {
+ int32_t tick;
+ int32_t nticks;
+};
+
+/*
+ * Just pick the average delay for the first counter. We're more concerned with
+ * the behavior over long periods of time rather than the exact timing of the
+ * initial ticks.
+ */
+#define TICKER_GEOM_INIT(nticks) {nticks, nticks}
+
+static inline void
+ticker_geom_init(ticker_geom_t *ticker, int32_t nticks) {
+ /*
+ * Make sure there's no overflow possible. This shouldn't really be a
+ * problem for reasonable nticks choices, which are all static and
+ * relatively small.
+ */
+ assert((uint64_t)nticks * (uint64_t)255 / (uint64_t)TICKER_GEOM_MUL
+ <= (uint64_t)INT32_MAX);
+ ticker->tick = nticks;
+ ticker->nticks = nticks;
+}
+
+static inline int32_t
+ticker_geom_read(const ticker_geom_t *ticker) {
+ return ticker->tick;
+}
+
+/* Same deal as above. */
+#if defined(__GNUC__) && !defined(__clang__) \
+ && (defined(__x86_64__) || defined(__i386__))
+JEMALLOC_NOINLINE
+#endif
+static bool
+ticker_geom_fixup(ticker_geom_t *ticker, uint64_t *prng_state) {
+ uint64_t idx = prng_lg_range_u64(prng_state, TICKER_GEOM_NBITS);
+ ticker->tick = (uint32_t)(
+ (uint64_t)ticker->nticks * (uint64_t)ticker_geom_table[idx]
+ / (uint64_t)TICKER_GEOM_MUL);
+ return true;
+}
+
+static inline bool
+ticker_geom_ticks(ticker_geom_t *ticker, uint64_t *prng_state, int32_t nticks) {
+ ticker->tick -= nticks;
+ if (unlikely(ticker->tick < 0)) {
+ return ticker_geom_fixup(ticker, prng_state);
+ }
+ return false;
+}
+
+static inline bool
+ticker_geom_tick(ticker_geom_t *ticker, uint64_t *prng_state) {
+ return ticker_geom_ticks(ticker, prng_state, 1);
+}
+
#endif /* JEMALLOC_INTERNAL_TICKER_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/tsd.h b/contrib/jemalloc/include/jemalloc/internal/tsd.h
index ecfda5d60df1..6cd52aeee962 100644
--- a/contrib/jemalloc/include/jemalloc/internal/tsd.h
+++ b/contrib/jemalloc/include/jemalloc/internal/tsd.h
@@ -1,10 +1,12 @@
#ifndef JEMALLOC_INTERNAL_TSD_H
#define JEMALLOC_INTERNAL_TSD_H
+#include "jemalloc/internal/activity_callback.h"
#include "jemalloc/internal/arena_types.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/bin_types.h"
#include "jemalloc/internal/jemalloc_internal_externs.h"
+#include "jemalloc/internal/peak.h"
#include "jemalloc/internal/prof_types.h"
#include "jemalloc/internal/ql.h"
#include "jemalloc/internal/rtree_tsd.h"
@@ -15,39 +17,30 @@
/*
* Thread-Specific-Data layout
- * --- data accessed on tcache fast path: state, rtree_ctx, stats, prof ---
- * s: state
- * e: tcache_enabled
- * m: thread_allocated (config_stats)
- * f: thread_deallocated (config_stats)
- * p: prof_tdata (config_prof)
- * c: rtree_ctx (rtree cache accessed on deallocation)
- * t: tcache
- * --- data not accessed on tcache fast path: arena-related fields ---
- * d: arenas_tdata_bypass
- * r: reentrancy_level
- * x: narenas_tdata
- * i: iarena
- * a: arena
- * o: arenas_tdata
- * Loading TSD data is on the critical path of basically all malloc operations.
- * In particular, tcache and rtree_ctx rely on hot CPU cache to be effective.
- * Use a compact layout to reduce cache footprint.
- * +--- 64-bit and 64B cacheline; 1B each letter; First byte on the left. ---+
- * |---------------------------- 1st cacheline ----------------------------|
- * | sedrxxxx mmmmmmmm ffffffff pppppppp [c * 32 ........ ........ .......] |
- * |---------------------------- 2nd cacheline ----------------------------|
- * | [c * 64 ........ ........ ........ ........ ........ ........ .......] |
- * |---------------------------- 3nd cacheline ----------------------------|
- * | [c * 32 ........ ........ .......] iiiiiiii aaaaaaaa oooooooo [t...... |
- * +-------------------------------------------------------------------------+
- * Note: the entire tcache is embedded into TSD and spans multiple cachelines.
*
- * The last 3 members (i, a and o) before tcache isn't really needed on tcache
- * fast path. However we have a number of unused tcache bins and witnesses
- * (never touched unless config_debug) at the end of tcache, so we place them
- * there to avoid breaking the cachelines and possibly paging in an extra page.
+ * At least some thread-local data gets touched on the fast-path of almost all
+ * malloc operations. But much of it is only necessary down slow-paths, or
+ * testing. We want to colocate the fast-path data so that it can live on the
+ * same cacheline if possible. So we define three tiers of hotness:
+ * TSD_DATA_FAST: Touched on the alloc/dalloc fast paths.
+ * TSD_DATA_SLOW: Touched down slow paths. "Slow" here is sort of general;
+ * there are "semi-slow" paths like "not a sized deallocation, but can still
+ * live in the tcache". We'll want to keep these closer to the fast-path
+ * data.
+ * TSD_DATA_SLOWER: Only touched in test or debug modes, or not touched at all.
+ *
+ * An additional concern is that the larger tcache bins won't be used (we have a
+ * bin per size class, but by default only cache relatively small objects). So
+ * the earlier bins are in the TSD_DATA_FAST tier, but the later ones are in the
+ * TSD_DATA_SLOWER tier.
+ *
+ * As a result of all this, we put the slow data first, then the fast data, then
+ * the slower data, while keeping the tcache as the last element of the fast
+ * data (so that the fast -> slower transition happens midway through the
+ * tcache). While we don't yet play alignment tricks to guarantee it, this
+ * increases our odds of getting some cache/page locality on fast paths.
*/
+
#ifdef JEMALLOC_JET
typedef void (*test_callback_t)(int *);
# define MALLOC_TSD_TEST_DATA_INIT 0x72b65c10
@@ -60,50 +53,112 @@ typedef void (*test_callback_t)(int *);
# define MALLOC_TEST_TSD_INITIALIZER
#endif
-/* O(name, type, nullable type */
-#define MALLOC_TSD \
+typedef ql_elm(tsd_t) tsd_link_t;
+
+/* O(name, type, nullable type) */
+#define TSD_DATA_SLOW \
O(tcache_enabled, bool, bool) \
- O(arenas_tdata_bypass, bool, bool) \
O(reentrancy_level, int8_t, int8_t) \
- O(narenas_tdata, uint32_t, uint32_t) \
- O(offset_state, uint64_t, uint64_t) \
- O(thread_allocated, uint64_t, uint64_t) \
- O(thread_deallocated, uint64_t, uint64_t) \
- O(bytes_until_sample, int64_t, int64_t) \
+ O(thread_allocated_last_event, uint64_t, uint64_t) \
+ O(thread_allocated_next_event, uint64_t, uint64_t) \
+ O(thread_deallocated_last_event, uint64_t, uint64_t) \
+ O(thread_deallocated_next_event, uint64_t, uint64_t) \
+ O(tcache_gc_event_wait, uint64_t, uint64_t) \
+ O(tcache_gc_dalloc_event_wait, uint64_t, uint64_t) \
+ O(prof_sample_event_wait, uint64_t, uint64_t) \
+ O(prof_sample_last_event, uint64_t, uint64_t) \
+ O(stats_interval_event_wait, uint64_t, uint64_t) \
+ O(stats_interval_last_event, uint64_t, uint64_t) \
+ O(peak_alloc_event_wait, uint64_t, uint64_t) \
+ O(peak_dalloc_event_wait, uint64_t, uint64_t) \
O(prof_tdata, prof_tdata_t *, prof_tdata_t *) \
- O(rtree_ctx, rtree_ctx_t, rtree_ctx_t) \
+ O(prng_state, uint64_t, uint64_t) \
+ O(san_extents_until_guard_small, uint64_t, uint64_t) \
+ O(san_extents_until_guard_large, uint64_t, uint64_t) \
O(iarena, arena_t *, arena_t *) \
O(arena, arena_t *, arena_t *) \
- O(arenas_tdata, arena_tdata_t *, arena_tdata_t *)\
+ O(arena_decay_ticker, ticker_geom_t, ticker_geom_t) \
+ O(sec_shard, uint8_t, uint8_t) \
O(binshards, tsd_binshards_t, tsd_binshards_t)\
- O(tcache, tcache_t, tcache_t) \
+ O(tsd_link, tsd_link_t, tsd_link_t) \
+ O(in_hook, bool, bool) \
+ O(peak, peak_t, peak_t) \
+ O(activity_callback_thunk, activity_callback_thunk_t, \
+ activity_callback_thunk_t) \
+ O(tcache_slow, tcache_slow_t, tcache_slow_t) \
+ O(rtree_ctx, rtree_ctx_t, rtree_ctx_t)
+
+#define TSD_DATA_SLOW_INITIALIZER \
+ /* tcache_enabled */ TCACHE_ENABLED_ZERO_INITIALIZER, \
+ /* reentrancy_level */ 0, \
+ /* thread_allocated_last_event */ 0, \
+ /* thread_allocated_next_event */ 0, \
+ /* thread_deallocated_last_event */ 0, \
+ /* thread_deallocated_next_event */ 0, \
+ /* tcache_gc_event_wait */ 0, \
+ /* tcache_gc_dalloc_event_wait */ 0, \
+ /* prof_sample_event_wait */ 0, \
+ /* prof_sample_last_event */ 0, \
+ /* stats_interval_event_wait */ 0, \
+ /* stats_interval_last_event */ 0, \
+ /* peak_alloc_event_wait */ 0, \
+ /* peak_dalloc_event_wait */ 0, \
+ /* prof_tdata */ NULL, \
+ /* prng_state */ 0, \
+ /* san_extents_until_guard_small */ 0, \
+ /* san_extents_until_guard_large */ 0, \
+ /* iarena */ NULL, \
+ /* arena */ NULL, \
+ /* arena_decay_ticker */ \
+ TICKER_GEOM_INIT(ARENA_DECAY_NTICKS_PER_UPDATE), \
+ /* sec_shard */ (uint8_t)-1, \
+ /* binshards */ TSD_BINSHARDS_ZERO_INITIALIZER, \
+ /* tsd_link */ {NULL}, \
+ /* in_hook */ false, \
+ /* peak */ PEAK_INITIALIZER, \
+ /* activity_callback_thunk */ \
+ ACTIVITY_CALLBACK_THUNK_INITIALIZER, \
+ /* tcache_slow */ TCACHE_SLOW_ZERO_INITIALIZER, \
+ /* rtree_ctx */ RTREE_CTX_INITIALIZER,
+
+/* O(name, type, nullable type) */
+#define TSD_DATA_FAST \
+ O(thread_allocated, uint64_t, uint64_t) \
+ O(thread_allocated_next_event_fast, uint64_t, uint64_t) \
+ O(thread_deallocated, uint64_t, uint64_t) \
+ O(thread_deallocated_next_event_fast, uint64_t, uint64_t) \
+ O(tcache, tcache_t, tcache_t)
+
+#define TSD_DATA_FAST_INITIALIZER \
+ /* thread_allocated */ 0, \
+ /* thread_allocated_next_event_fast */ 0, \
+ /* thread_deallocated */ 0, \
+ /* thread_deallocated_next_event_fast */ 0, \
+ /* tcache */ TCACHE_ZERO_INITIALIZER,
+
+/* O(name, type, nullable type) */
+#define TSD_DATA_SLOWER \
O(witness_tsd, witness_tsd_t, witness_tsdn_t) \
MALLOC_TEST_TSD
+#define TSD_DATA_SLOWER_INITIALIZER \
+ /* witness */ WITNESS_TSD_INITIALIZER \
+ /* test data */ MALLOC_TEST_TSD_INITIALIZER
+
+
#define TSD_INITIALIZER { \
- ATOMIC_INIT(tsd_state_uninitialized), \
- TCACHE_ENABLED_ZERO_INITIALIZER, \
- false, \
- 0, \
- 0, \
- 0, \
- 0, \
- 0, \
- 0, \
- NULL, \
- RTREE_CTX_ZERO_INITIALIZER, \
- NULL, \
- NULL, \
- NULL, \
- TSD_BINSHARDS_ZERO_INITIALIZER, \
- TCACHE_ZERO_INITIALIZER, \
- WITNESS_TSD_INITIALIZER \
- MALLOC_TEST_TSD_INITIALIZER \
+ TSD_DATA_SLOW_INITIALIZER \
+ /* state */ ATOMIC_INIT(tsd_state_uninitialized), \
+ TSD_DATA_FAST_INITIALIZER \
+ TSD_DATA_SLOWER_INITIALIZER \
}
+#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32)
+void _malloc_tsd_cleanup_register(bool (*f)(void));
+#endif
+
void *malloc_tsd_malloc(size_t size);
void malloc_tsd_dalloc(void *wrapper);
-void malloc_tsd_cleanup_register(bool (*f)(void));
tsd_t *malloc_tsd_boot0(void);
void malloc_tsd_boot1(void);
void tsd_cleanup(void *arg);
@@ -189,14 +244,17 @@ struct tsd_s {
* setters below.
*/
+#define O(n, t, nt) \
+ t TSD_MANGLE(n);
+
+ TSD_DATA_SLOW
/*
* We manually limit the state to just a single byte. Unless the 8-bit
* atomics are unavailable (which is rare).
*/
tsd_state_t state;
-#define O(n, t, nt) \
- t TSD_MANGLE(n);
-MALLOC_TSD
+ TSD_DATA_FAST
+ TSD_DATA_SLOWER
#undef O
/* AddressSanitizer requires TLS data to be aligned to at least 8 bytes. */
} JEMALLOC_ALIGNED(16);
@@ -263,7 +321,9 @@ JEMALLOC_ALWAYS_INLINE t * \
tsd_##n##p_get_unsafe(tsd_t *tsd) { \
return &tsd->TSD_MANGLE(n); \
}
-MALLOC_TSD
+TSD_DATA_SLOW
+TSD_DATA_FAST
+TSD_DATA_SLOWER
#undef O
/* tsd_foop_get(tsd) returns a pointer to the thread-local instance of foo. */
@@ -282,7 +342,9 @@ tsd_##n##p_get(tsd_t *tsd) { \
state == tsd_state_minimal_initialized); \
return tsd_##n##p_get_unsafe(tsd); \
}
-MALLOC_TSD
+TSD_DATA_SLOW
+TSD_DATA_FAST
+TSD_DATA_SLOWER
#undef O
/*
@@ -298,7 +360,9 @@ tsdn_##n##p_get(tsdn_t *tsdn) { \
tsd_t *tsd = tsdn_tsd(tsdn); \
return (nt *)tsd_##n##p_get(tsd); \
}
-MALLOC_TSD
+TSD_DATA_SLOW
+TSD_DATA_FAST
+TSD_DATA_SLOWER
#undef O
/* tsd_foo_get(tsd) returns the value of the thread-local instance of foo. */
@@ -307,7 +371,9 @@ JEMALLOC_ALWAYS_INLINE t \
tsd_##n##_get(tsd_t *tsd) { \
return *tsd_##n##p_get(tsd); \
}
-MALLOC_TSD
+TSD_DATA_SLOW
+TSD_DATA_FAST
+TSD_DATA_SLOWER
#undef O
/* tsd_foo_set(tsd, val) updates the thread-local instance of foo to be val. */
@@ -318,7 +384,9 @@ tsd_##n##_set(tsd_t *tsd, t val) { \
tsd_state_get(tsd) != tsd_state_minimal_initialized); \
*tsd_##n##p_get(tsd) = val; \
}
-MALLOC_TSD
+TSD_DATA_SLOW
+TSD_DATA_FAST
+TSD_DATA_SLOWER
#undef O
JEMALLOC_ALWAYS_INLINE void
@@ -383,7 +451,10 @@ tsd_fetch(void) {
static inline bool
tsd_nominal(tsd_t *tsd) {
- return (tsd_state_get(tsd) <= tsd_state_nominal_max);
+ bool nominal = tsd_state_get(tsd) <= tsd_state_nominal_max;
+ assert(nominal || tsd_reentrancy_level_get(tsd) > 0);
+
+ return nominal;
}
JEMALLOC_ALWAYS_INLINE tsdn_t *
@@ -413,4 +484,36 @@ tsdn_rtree_ctx(tsdn_t *tsdn, rtree_ctx_t *fallback) {
return tsd_rtree_ctx(tsdn_tsd(tsdn));
}
+static inline bool
+tsd_state_nocleanup(tsd_t *tsd) {
+ return tsd_state_get(tsd) == tsd_state_reincarnated ||
+ tsd_state_get(tsd) == tsd_state_minimal_initialized;
+}
+
+/*
+ * These "raw" tsd reentrancy functions don't have any debug checking to make
+ * sure that we're not touching arena 0. Better is to call pre_reentrancy and
+ * post_reentrancy if this is possible.
+ */
+static inline void
+tsd_pre_reentrancy_raw(tsd_t *tsd) {
+ bool fast = tsd_fast(tsd);
+ assert(tsd_reentrancy_level_get(tsd) < INT8_MAX);
+ ++*tsd_reentrancy_levelp_get(tsd);
+ if (fast) {
+ /* Prepare slow path for reentrancy. */
+ tsd_slow_update(tsd);
+ assert(tsd_state_get(tsd) == tsd_state_nominal_slow);
+ }
+}
+
+static inline void
+tsd_post_reentrancy_raw(tsd_t *tsd) {
+ int8_t *reentrancy_level = tsd_reentrancy_levelp_get(tsd);
+ assert(*reentrancy_level > 0);
+ if (--*reentrancy_level == 0) {
+ tsd_slow_update(tsd);
+ }
+}
+
#endif /* JEMALLOC_INTERNAL_TSD_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/tsd_generic.h b/contrib/jemalloc/include/jemalloc/internal/tsd_generic.h
index cf73c0c71553..a718472f32ef 100644
--- a/contrib/jemalloc/include/jemalloc/internal/tsd_generic.h
+++ b/contrib/jemalloc/include/jemalloc/internal/tsd_generic.h
@@ -52,6 +52,9 @@ tsd_cleanup_wrapper(void *arg) {
JEMALLOC_ALWAYS_INLINE void
tsd_wrapper_set(tsd_wrapper_t *wrapper) {
+ if (unlikely(!tsd_booted)) {
+ return;
+ }
if (pthread_setspecific(tsd_tsd, (void *)wrapper) != 0) {
malloc_write("<jemalloc>: Error setting TSD\n");
abort();
@@ -60,7 +63,13 @@ tsd_wrapper_set(tsd_wrapper_t *wrapper) {
JEMALLOC_ALWAYS_INLINE tsd_wrapper_t *
tsd_wrapper_get(bool init) {
- tsd_wrapper_t *wrapper = (tsd_wrapper_t *)pthread_getspecific(tsd_tsd);
+ tsd_wrapper_t *wrapper;
+
+ if (unlikely(!tsd_booted)) {
+ return &tsd_boot_wrapper;
+ }
+
+ wrapper = (tsd_wrapper_t *)pthread_getspecific(tsd_tsd);
if (init && unlikely(wrapper == NULL)) {
tsd_init_block_t block;
@@ -91,11 +100,21 @@ tsd_wrapper_get(bool init) {
JEMALLOC_ALWAYS_INLINE bool
tsd_boot0(void) {
+ tsd_wrapper_t *wrapper;
+ tsd_init_block_t block;
+
+ wrapper = (tsd_wrapper_t *)
+ tsd_init_check_recursion(&tsd_init_head, &block);
+ if (wrapper) {
+ return false;
+ }
+ block.data = &tsd_boot_wrapper;
if (pthread_key_create(&tsd_tsd, tsd_cleanup_wrapper) != 0) {
return true;
}
- tsd_wrapper_set(&tsd_boot_wrapper);
tsd_booted = true;
+ tsd_wrapper_set(&tsd_boot_wrapper);
+ tsd_init_finish(&tsd_init_head, &block);
return false;
}
diff --git a/contrib/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h b/contrib/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h
index 65852d5c1492..d8f3ef13c00f 100644
--- a/contrib/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h
+++ b/contrib/jemalloc/include/jemalloc/internal/tsd_malloc_thread_cleanup.h
@@ -21,7 +21,7 @@ tsd_cleanup_wrapper(void) {
JEMALLOC_ALWAYS_INLINE bool
tsd_boot0(void) {
- malloc_tsd_cleanup_register(&tsd_cleanup_wrapper);
+ _malloc_tsd_cleanup_register(&tsd_cleanup_wrapper);
tsd_booted = true;
return false;
}
diff --git a/contrib/jemalloc/include/jemalloc/internal/tsd_types.h b/contrib/jemalloc/include/jemalloc/internal/tsd_types.h
index 6200af61f3dc..a6ae37da5a21 100644
--- a/contrib/jemalloc/include/jemalloc/internal/tsd_types.h
+++ b/contrib/jemalloc/include/jemalloc/internal/tsd_types.h
@@ -1,7 +1,7 @@
#ifndef JEMALLOC_INTERNAL_TSD_TYPES_H
#define JEMALLOC_INTERNAL_TSD_TYPES_H
-#define MALLOC_TSD_CLEANUPS_MAX 2
+#define MALLOC_TSD_CLEANUPS_MAX 4
typedef struct tsd_s tsd_t;
typedef struct tsdn_s tsdn_t;
diff --git a/contrib/jemalloc/include/jemalloc/internal/tsd_win.h b/contrib/jemalloc/include/jemalloc/internal/tsd_win.h
new file mode 100644
index 000000000000..46be2434b5a5
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/tsd_win.h
@@ -0,0 +1,282 @@
+<<<<<<< HEAD
+#ifdef JEMALLOC_INTERNAL_TSD_WIN_H
+#error This file should be included only once, by tsd.h.
+#endif
+#define JEMALLOC_INTERNAL_TSD_WIN_H
+
+typedef struct {
+ bool initialized;
+ tsd_t val;
+} tsd_wrapper_t;
+
+extern DWORD tsd_tsd;
+extern tsd_wrapper_t tsd_boot_wrapper;
+extern bool tsd_booted;
+
+/* Initialization/cleanup. */
+JEMALLOC_ALWAYS_INLINE bool
+tsd_cleanup_wrapper(void) {
+ DWORD error = GetLastError();
+ tsd_wrapper_t *wrapper = (tsd_wrapper_t *)TlsGetValue(tsd_tsd);
+ SetLastError(error);
+
+ if (wrapper == NULL) {
+ return false;
+ }
+
+ if (wrapper->initialized) {
+ wrapper->initialized = false;
+ tsd_cleanup(&wrapper->val);
+ if (wrapper->initialized) {
+ /* Trigger another cleanup round. */
+ return true;
+ }
+ }
+ malloc_tsd_dalloc(wrapper);
+ return false;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+tsd_wrapper_set(tsd_wrapper_t *wrapper) {
+ if (!TlsSetValue(tsd_tsd, (void *)wrapper)) {
+ malloc_write("<jemalloc>: Error setting TSD\n");
+ abort();
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE tsd_wrapper_t *
+tsd_wrapper_get(bool init) {
+ DWORD error = GetLastError();
+ tsd_wrapper_t *wrapper = (tsd_wrapper_t *) TlsGetValue(tsd_tsd);
+ SetLastError(error);
+
+ if (init && unlikely(wrapper == NULL)) {
+ wrapper = (tsd_wrapper_t *)
+ malloc_tsd_malloc(sizeof(tsd_wrapper_t));
+ if (wrapper == NULL) {
+ malloc_write("<jemalloc>: Error allocating TSD\n");
+ abort();
+ } else {
+ wrapper->initialized = false;
+ /* MSVC is finicky about aggregate initialization. */
+ tsd_t tsd_initializer = TSD_INITIALIZER;
+ wrapper->val = tsd_initializer;
+ }
+ tsd_wrapper_set(wrapper);
+ }
+ return wrapper;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tsd_boot0(void) {
+ tsd_tsd = TlsAlloc();
+ if (tsd_tsd == TLS_OUT_OF_INDEXES) {
+ return true;
+ }
+ _malloc_tsd_cleanup_register(&tsd_cleanup_wrapper);
+ tsd_wrapper_set(&tsd_boot_wrapper);
+ tsd_booted = true;
+ return false;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+tsd_boot1(void) {
+ tsd_wrapper_t *wrapper;
+ wrapper = (tsd_wrapper_t *)
+ malloc_tsd_malloc(sizeof(tsd_wrapper_t));
+ if (wrapper == NULL) {
+ malloc_write("<jemalloc>: Error allocating TSD\n");
+ abort();
+ }
+ tsd_boot_wrapper.initialized = false;
+ tsd_cleanup(&tsd_boot_wrapper.val);
+ wrapper->initialized = false;
+ tsd_t initializer = TSD_INITIALIZER;
+ wrapper->val = initializer;
+ tsd_wrapper_set(wrapper);
+}
+JEMALLOC_ALWAYS_INLINE bool
+tsd_boot(void) {
+ if (tsd_boot0()) {
+ return true;
+ }
+ tsd_boot1();
+ return false;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tsd_booted_get(void) {
+ return tsd_booted;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tsd_get_allocates(void) {
+ return true;
+}
+
+/* Get/set. */
+JEMALLOC_ALWAYS_INLINE tsd_t *
+tsd_get(bool init) {
+ tsd_wrapper_t *wrapper;
+
+ assert(tsd_booted);
+ wrapper = tsd_wrapper_get(init);
+ if (tsd_get_allocates() && !init && wrapper == NULL) {
+ return NULL;
+ }
+ return &wrapper->val;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+tsd_set(tsd_t *val) {
+ tsd_wrapper_t *wrapper;
+
+ assert(tsd_booted);
+ wrapper = tsd_wrapper_get(true);
+ if (likely(&wrapper->val != val)) {
+ wrapper->val = *(val);
+ }
+ wrapper->initialized = true;
+}
+||||||| dec341af7695
+=======
+#ifdef JEMALLOC_INTERNAL_TSD_WIN_H
+#error This file should be included only once, by tsd.h.
+#endif
+#define JEMALLOC_INTERNAL_TSD_WIN_H
+
+typedef struct {
+ bool initialized;
+ tsd_t val;
+} tsd_wrapper_t;
+
+extern DWORD tsd_tsd;
+extern tsd_wrapper_t tsd_boot_wrapper;
+extern bool tsd_booted;
+
+/* Initialization/cleanup. */
+JEMALLOC_ALWAYS_INLINE bool
+tsd_cleanup_wrapper(void) {
+ DWORD error = GetLastError();
+ tsd_wrapper_t *wrapper = (tsd_wrapper_t *)TlsGetValue(tsd_tsd);
+ SetLastError(error);
+
+ if (wrapper == NULL) {
+ return false;
+ }
+
+ if (wrapper->initialized) {
+ wrapper->initialized = false;
+ tsd_cleanup(&wrapper->val);
+ if (wrapper->initialized) {
+ /* Trigger another cleanup round. */
+ return true;
+ }
+ }
+ malloc_tsd_dalloc(wrapper);
+ return false;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+tsd_wrapper_set(tsd_wrapper_t *wrapper) {
+ if (!TlsSetValue(tsd_tsd, (void *)wrapper)) {
+ malloc_write("<jemalloc>: Error setting TSD\n");
+ abort();
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE tsd_wrapper_t *
+tsd_wrapper_get(bool init) {
+ DWORD error = GetLastError();
+ tsd_wrapper_t *wrapper = (tsd_wrapper_t *) TlsGetValue(tsd_tsd);
+ SetLastError(error);
+
+ if (init && unlikely(wrapper == NULL)) {
+ wrapper = (tsd_wrapper_t *)
+ malloc_tsd_malloc(sizeof(tsd_wrapper_t));
+ if (wrapper == NULL) {
+ malloc_write("<jemalloc>: Error allocating TSD\n");
+ abort();
+ } else {
+ wrapper->initialized = false;
+ /* MSVC is finicky about aggregate initialization. */
+ tsd_t tsd_initializer = TSD_INITIALIZER;
+ wrapper->val = tsd_initializer;
+ }
+ tsd_wrapper_set(wrapper);
+ }
+ return wrapper;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tsd_boot0(void) {
+ tsd_tsd = TlsAlloc();
+ if (tsd_tsd == TLS_OUT_OF_INDEXES) {
+ return true;
+ }
+ malloc_tsd_cleanup_register(&tsd_cleanup_wrapper);
+ tsd_wrapper_set(&tsd_boot_wrapper);
+ tsd_booted = true;
+ return false;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+tsd_boot1(void) {
+ tsd_wrapper_t *wrapper;
+ wrapper = (tsd_wrapper_t *)
+ malloc_tsd_malloc(sizeof(tsd_wrapper_t));
+ if (wrapper == NULL) {
+ malloc_write("<jemalloc>: Error allocating TSD\n");
+ abort();
+ }
+ tsd_boot_wrapper.initialized = false;
+ tsd_cleanup(&tsd_boot_wrapper.val);
+ wrapper->initialized = false;
+ tsd_t initializer = TSD_INITIALIZER;
+ wrapper->val = initializer;
+ tsd_wrapper_set(wrapper);
+}
+JEMALLOC_ALWAYS_INLINE bool
+tsd_boot(void) {
+ if (tsd_boot0()) {
+ return true;
+ }
+ tsd_boot1();
+ return false;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tsd_booted_get(void) {
+ return tsd_booted;
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tsd_get_allocates(void) {
+ return true;
+}
+
+/* Get/set. */
+JEMALLOC_ALWAYS_INLINE tsd_t *
+tsd_get(bool init) {
+ tsd_wrapper_t *wrapper;
+
+ assert(tsd_booted);
+ wrapper = tsd_wrapper_get(init);
+ if (tsd_get_allocates() && !init && wrapper == NULL) {
+ return NULL;
+ }
+ return &wrapper->val;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+tsd_set(tsd_t *val) {
+ tsd_wrapper_t *wrapper;
+
+ assert(tsd_booted);
+ wrapper = tsd_wrapper_get(true);
+ if (likely(&wrapper->val != val)) {
+ wrapper->val = *(val);
+ }
+ wrapper->initialized = true;
+}
+>>>>>>> main
diff --git a/contrib/jemalloc/include/jemalloc/internal/typed_list.h b/contrib/jemalloc/include/jemalloc/internal/typed_list.h
new file mode 100644
index 000000000000..6535055a1eca
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/internal/typed_list.h
@@ -0,0 +1,55 @@
+#ifndef JEMALLOC_INTERNAL_TYPED_LIST_H
+#define JEMALLOC_INTERNAL_TYPED_LIST_H
+
+/*
+ * This wraps the ql module to implement a list class in a way that's a little
+ * bit easier to use; it handles ql_elm_new calls and provides type safety.
+ */
+
+#define TYPED_LIST(list_type, el_type, linkage) \
+typedef struct { \
+ ql_head(el_type) head; \
+} list_type##_t; \
+static inline void \
+list_type##_init(list_type##_t *list) { \
+ ql_new(&list->head); \
+} \
+static inline el_type * \
+list_type##_first(const list_type##_t *list) { \
+ return ql_first(&list->head); \
+} \
+static inline el_type * \
+list_type##_last(const list_type##_t *list) { \
+ return ql_last(&list->head, linkage); \
+} \
+static inline void \
+list_type##_append(list_type##_t *list, el_type *item) { \
+ ql_elm_new(item, linkage); \
+ ql_tail_insert(&list->head, item, linkage); \
+} \
+static inline void \
+list_type##_prepend(list_type##_t *list, el_type *item) { \
+ ql_elm_new(item, linkage); \
+ ql_head_insert(&list->head, item, linkage); \
+} \
+static inline void \
+list_type##_replace(list_type##_t *list, el_type *to_remove, \
+ el_type *to_insert) { \
+ ql_elm_new(to_insert, linkage); \
+ ql_after_insert(to_remove, to_insert, linkage); \
+ ql_remove(&list->head, to_remove, linkage); \
+} \
+static inline void \
+list_type##_remove(list_type##_t *list, el_type *item) { \
+ ql_remove(&list->head, item, linkage); \
+} \
+static inline bool \
+list_type##_empty(list_type##_t *list) { \
+ return ql_empty(&list->head); \
+} \
+static inline void \
+list_type##_concat(list_type##_t *list_a, list_type##_t *list_b) { \
+ ql_concat(&list_a->head, &list_b->head, linkage); \
+}
+
+#endif /* JEMALLOC_INTERNAL_TYPED_LIST_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/util.h b/contrib/jemalloc/include/jemalloc/internal/util.h
index 304cb545afcb..dcb1c0a5d62b 100644
--- a/contrib/jemalloc/include/jemalloc/internal/util.h
+++ b/contrib/jemalloc/include/jemalloc/internal/util.h
@@ -62,6 +62,62 @@ get_errno(void) {
#endif
}
+JEMALLOC_ALWAYS_INLINE void
+util_assume(bool b) {
+ if (!b) {
+ unreachable();
+ }
+}
+
+/* ptr should be valid. */
+JEMALLOC_ALWAYS_INLINE void
+util_prefetch_read(void *ptr) {
+ /*
+ * This should arguably be a config check; but any version of GCC so old
+ * that it doesn't support __builtin_prefetch is also too old to build
+ * jemalloc.
+ */
+#ifdef __GNUC__
+ if (config_debug) {
+ /* Enforce the "valid ptr" requirement. */
+ *(volatile char *)ptr;
+ }
+ __builtin_prefetch(ptr, /* read or write */ 0, /* locality hint */ 3);
+#else
+ *(volatile char *)ptr;
+#endif
+}
+
+JEMALLOC_ALWAYS_INLINE void
+util_prefetch_write(void *ptr) {
+#ifdef __GNUC__
+ if (config_debug) {
+ *(volatile char *)ptr;
+ }
+ /*
+ * The only difference from the read variant is that this has a 1 as the
+ * second argument (the write hint).
+ */
+ __builtin_prefetch(ptr, 1, 3);
+#else
+ *(volatile char *)ptr;
+#endif
+}
+
+JEMALLOC_ALWAYS_INLINE void
+util_prefetch_read_range(void *ptr, size_t sz) {
+ for (size_t i = 0; i < sz; i += CACHELINE) {
+ util_prefetch_read((void *)((uintptr_t)ptr + i));
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE void
+util_prefetch_write_range(void *ptr, size_t sz) {
+ for (size_t i = 0; i < sz; i += CACHELINE) {
+ util_prefetch_write((void *)((uintptr_t)ptr + i));
+ }
+}
+
#undef UTIL_INLINE
#endif /* JEMALLOC_INTERNAL_UTIL_H */
diff --git a/contrib/jemalloc/include/jemalloc/internal/witness.h b/contrib/jemalloc/include/jemalloc/internal/witness.h
index fff9e98cb64f..e81b9a0069c0 100644
--- a/contrib/jemalloc/include/jemalloc/internal/witness.h
+++ b/contrib/jemalloc/include/jemalloc/internal/witness.h
@@ -7,60 +7,76 @@
/* LOCK RANKS */
/******************************************************************************/
-/*
- * Witnesses with rank WITNESS_RANK_OMIT are completely ignored by the witness
- * machinery.
- */
-
-#define WITNESS_RANK_OMIT 0U
-
-#define WITNESS_RANK_MIN 1U
-
-#define WITNESS_RANK_INIT 1U
-#define WITNESS_RANK_CTL 1U
-#define WITNESS_RANK_TCACHES 2U
-#define WITNESS_RANK_ARENAS 3U
-
-#define WITNESS_RANK_BACKGROUND_THREAD_GLOBAL 4U
-
-#define WITNESS_RANK_PROF_DUMP 5U
-#define WITNESS_RANK_PROF_BT2GCTX 6U
-#define WITNESS_RANK_PROF_TDATAS 7U
-#define WITNESS_RANK_PROF_TDATA 8U
-#define WITNESS_RANK_PROF_LOG 9U
-#define WITNESS_RANK_PROF_GCTX 10U
-#define WITNESS_RANK_BACKGROUND_THREAD 11U
-
-/*
- * Used as an argument to witness_assert_depth_to_rank() in order to validate
- * depth excluding non-core locks with lower ranks. Since the rank argument to
- * witness_assert_depth_to_rank() is inclusive rather than exclusive, this
- * definition can have the same value as the minimally ranked core lock.
- */
-#define WITNESS_RANK_CORE 12U
-
-#define WITNESS_RANK_DECAY 12U
-#define WITNESS_RANK_TCACHE_QL 13U
-#define WITNESS_RANK_EXTENT_GROW 14U
-#define WITNESS_RANK_EXTENTS 15U
-#define WITNESS_RANK_EXTENT_AVAIL 16U
-
-#define WITNESS_RANK_EXTENT_POOL 17U
-#define WITNESS_RANK_RTREE 18U
-#define WITNESS_RANK_BASE 19U
-#define WITNESS_RANK_ARENA_LARGE 20U
-#define WITNESS_RANK_HOOK 21U
-
-#define WITNESS_RANK_LEAF 0xffffffffU
-#define WITNESS_RANK_BIN WITNESS_RANK_LEAF
-#define WITNESS_RANK_ARENA_STATS WITNESS_RANK_LEAF
-#define WITNESS_RANK_DSS WITNESS_RANK_LEAF
-#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF
-#define WITNESS_RANK_PROF_ACCUM WITNESS_RANK_LEAF
-#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF
-#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF
-#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF
-#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF
+enum witness_rank_e {
+ /*
+ * Order matters within this enum listing -- higher valued locks can
+ * only be acquired after lower-valued ones. We use the
+ * auto-incrementing-ness of enum values to enforce this.
+ */
+
+ /*
+ * Witnesses with rank WITNESS_RANK_OMIT are completely ignored by the
+ * witness machinery.
+ */
+ WITNESS_RANK_OMIT,
+ WITNESS_RANK_MIN,
+ WITNESS_RANK_INIT = WITNESS_RANK_MIN,
+ WITNESS_RANK_CTL,
+ WITNESS_RANK_TCACHES,
+ WITNESS_RANK_ARENAS,
+ WITNESS_RANK_BACKGROUND_THREAD_GLOBAL,
+ WITNESS_RANK_PROF_DUMP,
+ WITNESS_RANK_PROF_BT2GCTX,
+ WITNESS_RANK_PROF_TDATAS,
+ WITNESS_RANK_PROF_TDATA,
+ WITNESS_RANK_PROF_LOG,
+ WITNESS_RANK_PROF_GCTX,
+ WITNESS_RANK_PROF_RECENT_DUMP,
+ WITNESS_RANK_BACKGROUND_THREAD,
+ /*
+ * Used as an argument to witness_assert_depth_to_rank() in order to
+ * validate depth excluding non-core locks with lower ranks. Since the
+ * rank argument to witness_assert_depth_to_rank() is inclusive rather
+ * than exclusive, this definition can have the same value as the
+ * minimally ranked core lock.
+ */
+ WITNESS_RANK_CORE,
+ WITNESS_RANK_DECAY = WITNESS_RANK_CORE,
+ WITNESS_RANK_TCACHE_QL,
+
+ WITNESS_RANK_SEC_SHARD,
+
+ WITNESS_RANK_EXTENT_GROW,
+ WITNESS_RANK_HPA_SHARD_GROW = WITNESS_RANK_EXTENT_GROW,
+ WITNESS_RANK_SAN_BUMP_ALLOC = WITNESS_RANK_EXTENT_GROW,
+
+ WITNESS_RANK_EXTENTS,
+ WITNESS_RANK_HPA_SHARD = WITNESS_RANK_EXTENTS,
+
+ WITNESS_RANK_HPA_CENTRAL_GROW,
+ WITNESS_RANK_HPA_CENTRAL,
+
+ WITNESS_RANK_EDATA_CACHE,
+
+ WITNESS_RANK_RTREE,
+ WITNESS_RANK_BASE,
+ WITNESS_RANK_ARENA_LARGE,
+ WITNESS_RANK_HOOK,
+
+ WITNESS_RANK_LEAF=0x1000,
+ WITNESS_RANK_BIN = WITNESS_RANK_LEAF,
+ WITNESS_RANK_ARENA_STATS = WITNESS_RANK_LEAF,
+ WITNESS_RANK_COUNTER_ACCUM = WITNESS_RANK_LEAF,
+ WITNESS_RANK_DSS = WITNESS_RANK_LEAF,
+ WITNESS_RANK_PROF_ACTIVE = WITNESS_RANK_LEAF,
+ WITNESS_RANK_PROF_DUMP_FILENAME = WITNESS_RANK_LEAF,
+ WITNESS_RANK_PROF_GDUMP = WITNESS_RANK_LEAF,
+ WITNESS_RANK_PROF_NEXT_THR_UID = WITNESS_RANK_LEAF,
+ WITNESS_RANK_PROF_RECENT_ALLOC = WITNESS_RANK_LEAF,
+ WITNESS_RANK_PROF_STATS = WITNESS_RANK_LEAF,
+ WITNESS_RANK_PROF_THREAD_ACTIVE_INIT = WITNESS_RANK_LEAF,
+};
+typedef enum witness_rank_e witness_rank_t;
/******************************************************************************/
/* PER-WITNESS DATA */
@@ -72,7 +88,6 @@
#endif
typedef struct witness_s witness_t;
-typedef unsigned witness_rank_t;
typedef ql_head(witness_t) witness_list_t;
typedef int witness_comp_t (const witness_t *, void *, const witness_t *,
void *);
@@ -82,8 +97,8 @@ struct witness_s {
const char *name;
/*
- * Witness rank, where 0 is lowest and UINT_MAX is highest. Witnesses
- * must be acquired in order of increasing rank.
+ * Witness rank, where 0 is lowest and WITNESS_RANK_LEAF is highest.
+ * Witnesses must be acquired in order of increasing rank.
*/
witness_rank_t rank;
@@ -228,26 +243,13 @@ witness_assert_not_owner(witness_tsdn_t *witness_tsdn,
}
}
-static inline void
-witness_assert_depth_to_rank(witness_tsdn_t *witness_tsdn,
- witness_rank_t rank_inclusive, unsigned depth) {
- witness_tsd_t *witness_tsd;
- unsigned d;
- witness_list_t *witnesses;
- witness_t *w;
-
- if (!config_debug) {
- return;
- }
+/* Returns depth. Not intended for direct use. */
+static inline unsigned
+witness_depth_to_rank(witness_list_t *witnesses, witness_rank_t rank_inclusive)
+{
+ unsigned d = 0;
+ witness_t *w = ql_last(witnesses, link);
- if (witness_tsdn_null(witness_tsdn)) {
- return;
- }
- witness_tsd = witness_tsdn_tsd(witness_tsdn);
-
- d = 0;
- witnesses = &witness_tsd->witnesses;
- w = ql_last(witnesses, link);
if (w != NULL) {
ql_reverse_foreach(w, witnesses, link) {
if (w->rank < rank_inclusive) {
@@ -256,6 +258,20 @@ witness_assert_depth_to_rank(witness_tsdn_t *witness_tsdn,
d++;
}
}
+
+ return d;
+}
+
+static inline void
+witness_assert_depth_to_rank(witness_tsdn_t *witness_tsdn,
+ witness_rank_t rank_inclusive, unsigned depth) {
+ if (!config_debug || witness_tsdn_null(witness_tsdn)) {
+ return;
+ }
+
+ witness_list_t *witnesses = &witness_tsdn_tsd(witness_tsdn)->witnesses;
+ unsigned d = witness_depth_to_rank(witnesses, rank_inclusive);
+
if (d != depth) {
witness_depth_error(witnesses, rank_inclusive, depth);
}
@@ -272,6 +288,21 @@ witness_assert_lockless(witness_tsdn_t *witness_tsdn) {
}
static inline void
+witness_assert_positive_depth_to_rank(witness_tsdn_t *witness_tsdn,
+ witness_rank_t rank_inclusive) {
+ if (!config_debug || witness_tsdn_null(witness_tsdn)) {
+ return;
+ }
+
+ witness_list_t *witnesses = &witness_tsdn_tsd(witness_tsdn)->witnesses;
+ unsigned d = witness_depth_to_rank(witnesses, rank_inclusive);
+
+ if (d == 0) {
+ witness_depth_error(witnesses, rank_inclusive, 1);
+ }
+}
+
+static inline void
witness_lock(witness_tsdn_t *witness_tsdn, witness_t *witness) {
witness_tsd_t *witness_tsd;
witness_list_t *witnesses;
diff --git a/contrib/jemalloc/include/jemalloc/jemalloc.sh b/contrib/jemalloc/include/jemalloc/jemalloc.sh
new file mode 100755
index 000000000000..b19b1548b3d7
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/jemalloc.sh
@@ -0,0 +1,27 @@
+#!/bin/sh
+
+objroot=$1
+
+cat <<EOF
+#ifndef JEMALLOC_H_
+#define JEMALLOC_H_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+EOF
+
+for hdr in jemalloc_defs.h jemalloc_rename.h jemalloc_macros.h \
+ jemalloc_protos.h jemalloc_typedefs.h jemalloc_mangle.h ; do
+ cat "${objroot}include/jemalloc/${hdr}" \
+ | grep -v 'Generated from .* by configure\.' \
+ | sed -e 's/ $//g'
+ echo
+done
+
+cat <<EOF
+#ifdef __cplusplus
+}
+#endif
+#endif /* JEMALLOC_H_ */
+EOF
diff --git a/contrib/jemalloc/include/jemalloc/jemalloc_defs.h.in b/contrib/jemalloc/include/jemalloc/jemalloc_defs.h.in
new file mode 100644
index 000000000000..19c990dcdbdf
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/jemalloc_defs.h.in
@@ -0,0 +1,106 @@
+<<<<<<< HEAD
+/* Defined if __attribute__((...)) syntax is supported. */
+#undef JEMALLOC_HAVE_ATTR
+
+/* Defined if alloc_size attribute is supported. */
+#undef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
+
+/* Defined if format_arg(...) attribute is supported. */
+#undef JEMALLOC_HAVE_ATTR_FORMAT_ARG
+
+/* Defined if format(gnu_printf, ...) attribute is supported. */
+#undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
+
+/* Defined if format(printf, ...) attribute is supported. */
+#undef JEMALLOC_HAVE_ATTR_FORMAT_PRINTF
+
+/* Defined if fallthrough attribute is supported. */
+#undef JEMALLOC_HAVE_ATTR_FALLTHROUGH
+
+/* Defined if cold attribute is supported. */
+#undef JEMALLOC_HAVE_ATTR_COLD
+
+/*
+ * Define overrides for non-standard allocator-related functions if they are
+ * present on the system.
+ */
+#undef JEMALLOC_OVERRIDE_MEMALIGN
+#undef JEMALLOC_OVERRIDE_VALLOC
+
+/*
+ * At least Linux omits the "const" in:
+ *
+ * size_t malloc_usable_size(const void *ptr);
+ *
+ * Match the operating system's prototype.
+ */
+#undef JEMALLOC_USABLE_SIZE_CONST
+
+/*
+ * If defined, specify throw() for the public function prototypes when compiling
+ * with C++. The only justification for this is to match the prototypes that
+ * glibc defines.
+ */
+#undef JEMALLOC_USE_CXX_THROW
+
+#ifdef _MSC_VER
+# ifdef _WIN64
+# define LG_SIZEOF_PTR_WIN 3
+# else
+# define LG_SIZEOF_PTR_WIN 2
+# endif
+#endif
+
+/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
+#undef LG_SIZEOF_PTR
+||||||| dec341af7695
+=======
+/* Defined if __attribute__((...)) syntax is supported. */
+#undef JEMALLOC_HAVE_ATTR
+
+/* Defined if alloc_size attribute is supported. */
+#undef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
+
+/* Defined if format_arg(...) attribute is supported. */
+#undef JEMALLOC_HAVE_ATTR_FORMAT_ARG
+
+/* Defined if format(gnu_printf, ...) attribute is supported. */
+#undef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
+
+/* Defined if format(printf, ...) attribute is supported. */
+#undef JEMALLOC_HAVE_ATTR_FORMAT_PRINTF
+
+/*
+ * Define overrides for non-standard allocator-related functions if they are
+ * present on the system.
+ */
+#undef JEMALLOC_OVERRIDE_MEMALIGN
+#undef JEMALLOC_OVERRIDE_VALLOC
+
+/*
+ * At least Linux omits the "const" in:
+ *
+ * size_t malloc_usable_size(const void *ptr);
+ *
+ * Match the operating system's prototype.
+ */
+#undef JEMALLOC_USABLE_SIZE_CONST
+
+/*
+ * If defined, specify throw() for the public function prototypes when compiling
+ * with C++. The only justification for this is to match the prototypes that
+ * glibc defines.
+ */
+#undef JEMALLOC_USE_CXX_THROW
+
+#ifdef _MSC_VER
+# ifdef _WIN64
+# define LG_SIZEOF_PTR_WIN 3
+# else
+# define LG_SIZEOF_PTR_WIN 2
+# endif
+#endif
+
+/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
+#undef LG_SIZEOF_PTR
+>>>>>>> main
diff --git a/contrib/jemalloc/include/jemalloc/jemalloc_macros.h.in b/contrib/jemalloc/include/jemalloc/jemalloc_macros.h.in
new file mode 100644
index 000000000000..dc57f521f580
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/jemalloc_macros.h.in
@@ -0,0 +1,282 @@
+<<<<<<< HEAD
+#include <stdlib.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <limits.h>
+#include <strings.h>
+
+#define JEMALLOC_VERSION "@jemalloc_version@"
+#define JEMALLOC_VERSION_MAJOR @jemalloc_version_major@
+#define JEMALLOC_VERSION_MINOR @jemalloc_version_minor@
+#define JEMALLOC_VERSION_BUGFIX @jemalloc_version_bugfix@
+#define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@
+#define JEMALLOC_VERSION_GID "@jemalloc_version_gid@"
+#define JEMALLOC_VERSION_GID_IDENT @jemalloc_version_gid@
+
+#define MALLOCX_LG_ALIGN(la) ((int)(la))
+#if LG_SIZEOF_PTR == 2
+# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1))
+#else
+# define MALLOCX_ALIGN(a) \
+ ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \
+ ffs((int)(((size_t)(a))>>32))+31))
+#endif
+#define MALLOCX_ZERO ((int)0x40)
+/*
+ * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1
+ * encodes MALLOCX_TCACHE_NONE.
+ */
+#define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8))
+#define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1)
+/*
+ * Bias arena index bits so that 0 encodes "use an automatically chosen arena".
+ */
+#define MALLOCX_ARENA(a) ((((int)(a))+1) << 20)
+
+/*
+ * Use as arena index in "arena.<i>.{purge,decay,dss}" and
+ * "stats.arenas.<i>.*" mallctl interfaces to select all arenas. This
+ * definition is intentionally specified in raw decimal format to support
+ * cpp-based string concatenation, e.g.
+ *
+ * #define STRINGIFY_HELPER(x) #x
+ * #define STRINGIFY(x) STRINGIFY_HELPER(x)
+ *
+ * mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", NULL, NULL, NULL,
+ * 0);
+ */
+#define MALLCTL_ARENAS_ALL 4096
+/*
+ * Use as arena index in "stats.arenas.<i>.*" mallctl interfaces to select
+ * destroyed arenas.
+ */
+#define MALLCTL_ARENAS_DESTROYED 4097
+
+#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
+# define JEMALLOC_CXX_THROW throw()
+#else
+# define JEMALLOC_CXX_THROW
+#endif
+
+#if defined(_MSC_VER)
+# define JEMALLOC_ATTR(s)
+# define JEMALLOC_ALIGNED(s) __declspec(align(s))
+# define JEMALLOC_ALLOC_SIZE(s)
+# define JEMALLOC_ALLOC_SIZE2(s1, s2)
+# ifndef JEMALLOC_EXPORT
+# ifdef DLLEXPORT
+# define JEMALLOC_EXPORT __declspec(dllexport)
+# else
+# define JEMALLOC_EXPORT __declspec(dllimport)
+# endif
+# endif
+# define JEMALLOC_FORMAT_ARG(i)
+# define JEMALLOC_FORMAT_PRINTF(s, i)
+# define JEMALLOC_FALLTHROUGH
+# define JEMALLOC_NOINLINE __declspec(noinline)
+# ifdef __cplusplus
+# define JEMALLOC_NOTHROW __declspec(nothrow)
+# else
+# define JEMALLOC_NOTHROW
+# endif
+# define JEMALLOC_SECTION(s) __declspec(allocate(s))
+# define JEMALLOC_RESTRICT_RETURN __declspec(restrict)
+# if _MSC_VER >= 1900 && !defined(__EDG__)
+# define JEMALLOC_ALLOCATOR __declspec(allocator)
+# else
+# define JEMALLOC_ALLOCATOR
+# endif
+# define JEMALLOC_COLD
+#elif defined(JEMALLOC_HAVE_ATTR)
+# define JEMALLOC_ATTR(s) __attribute__((s))
+# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
+# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
+# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s))
+# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2))
+# else
+# define JEMALLOC_ALLOC_SIZE(s)
+# define JEMALLOC_ALLOC_SIZE2(s1, s2)
+# endif
+# ifndef JEMALLOC_EXPORT
+# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
+# endif
+# ifdef JEMALLOC_HAVE_ATTR_FORMAT_ARG
+# define JEMALLOC_FORMAT_ARG(i) JEMALLOC_ATTR(__format_arg__(3))
+# else
+# define JEMALLOC_FORMAT_ARG(i)
+# endif
+# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
+# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
+# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
+# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i))
+# else
+# define JEMALLOC_FORMAT_PRINTF(s, i)
+# endif
+# ifdef JEMALLOC_HAVE_ATTR_FALLTHROUGH
+# define JEMALLOC_FALLTHROUGH JEMALLOC_ATTR(fallthrough)
+# else
+# define JEMALLOC_FALLTHROUGH
+# endif
+# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
+# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow)
+# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
+# define JEMALLOC_RESTRICT_RETURN
+# define JEMALLOC_ALLOCATOR
+# ifdef JEMALLOC_HAVE_ATTR_COLD
+# define JEMALLOC_COLD JEMALLOC_ATTR(__cold__)
+# else
+# define JEMALLOC_COLD
+# endif
+#else
+# define JEMALLOC_ATTR(s)
+# define JEMALLOC_ALIGNED(s)
+# define JEMALLOC_ALLOC_SIZE(s)
+# define JEMALLOC_ALLOC_SIZE2(s1, s2)
+# define JEMALLOC_EXPORT
+# define JEMALLOC_FORMAT_PRINTF(s, i)
+# define JEMALLOC_FALLTHROUGH
+# define JEMALLOC_NOINLINE
+# define JEMALLOC_NOTHROW
+# define JEMALLOC_SECTION(s)
+# define JEMALLOC_RESTRICT_RETURN
+# define JEMALLOC_ALLOCATOR
+# define JEMALLOC_COLD
+#endif
+
+#if (defined(__APPLE__) || defined(__FreeBSD__)) && !defined(JEMALLOC_NO_RENAME)
+# define JEMALLOC_SYS_NOTHROW
+#else
+# define JEMALLOC_SYS_NOTHROW JEMALLOC_NOTHROW
+#endif
+||||||| dec341af7695
+=======
+#include <stdlib.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <limits.h>
+#include <strings.h>
+
+#define JEMALLOC_VERSION "@jemalloc_version@"
+#define JEMALLOC_VERSION_MAJOR @jemalloc_version_major@
+#define JEMALLOC_VERSION_MINOR @jemalloc_version_minor@
+#define JEMALLOC_VERSION_BUGFIX @jemalloc_version_bugfix@
+#define JEMALLOC_VERSION_NREV @jemalloc_version_nrev@
+#define JEMALLOC_VERSION_GID "@jemalloc_version_gid@"
+#define JEMALLOC_VERSION_GID_IDENT @jemalloc_version_gid@
+
+#define MALLOCX_LG_ALIGN(la) ((int)(la))
+#if LG_SIZEOF_PTR == 2
+# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1))
+#else
+# define MALLOCX_ALIGN(a) \
+ ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \
+ ffs((int)(((size_t)(a))>>32))+31))
+#endif
+#define MALLOCX_ZERO ((int)0x40)
+/*
+ * Bias tcache index bits so that 0 encodes "automatic tcache management", and 1
+ * encodes MALLOCX_TCACHE_NONE.
+ */
+#define MALLOCX_TCACHE(tc) ((int)(((tc)+2) << 8))
+#define MALLOCX_TCACHE_NONE MALLOCX_TCACHE(-1)
+/*
+ * Bias arena index bits so that 0 encodes "use an automatically chosen arena".
+ */
+#define MALLOCX_ARENA(a) ((((int)(a))+1) << 20)
+
+/*
+ * Use as arena index in "arena.<i>.{purge,decay,dss}" and
+ * "stats.arenas.<i>.*" mallctl interfaces to select all arenas. This
+ * definition is intentionally specified in raw decimal format to support
+ * cpp-based string concatenation, e.g.
+ *
+ * #define STRINGIFY_HELPER(x) #x
+ * #define STRINGIFY(x) STRINGIFY_HELPER(x)
+ *
+ * mallctl("arena." STRINGIFY(MALLCTL_ARENAS_ALL) ".purge", NULL, NULL, NULL,
+ * 0);
+ */
+#define MALLCTL_ARENAS_ALL 4096
+/*
+ * Use as arena index in "stats.arenas.<i>.*" mallctl interfaces to select
+ * destroyed arenas.
+ */
+#define MALLCTL_ARENAS_DESTROYED 4097
+
+#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
+# define JEMALLOC_CXX_THROW throw()
+#else
+# define JEMALLOC_CXX_THROW
+#endif
+
+#if defined(_MSC_VER)
+# define JEMALLOC_ATTR(s)
+# define JEMALLOC_ALIGNED(s) __declspec(align(s))
+# define JEMALLOC_ALLOC_SIZE(s)
+# define JEMALLOC_ALLOC_SIZE2(s1, s2)
+# ifndef JEMALLOC_EXPORT
+# ifdef DLLEXPORT
+# define JEMALLOC_EXPORT __declspec(dllexport)
+# else
+# define JEMALLOC_EXPORT __declspec(dllimport)
+# endif
+# endif
+# define JEMALLOC_FORMAT_ARG(i)
+# define JEMALLOC_FORMAT_PRINTF(s, i)
+# define JEMALLOC_NOINLINE __declspec(noinline)
+# ifdef __cplusplus
+# define JEMALLOC_NOTHROW __declspec(nothrow)
+# else
+# define JEMALLOC_NOTHROW
+# endif
+# define JEMALLOC_SECTION(s) __declspec(allocate(s))
+# define JEMALLOC_RESTRICT_RETURN __declspec(restrict)
+# if _MSC_VER >= 1900 && !defined(__EDG__)
+# define JEMALLOC_ALLOCATOR __declspec(allocator)
+# else
+# define JEMALLOC_ALLOCATOR
+# endif
+#elif defined(JEMALLOC_HAVE_ATTR)
+# define JEMALLOC_ATTR(s) __attribute__((s))
+# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
+# ifdef JEMALLOC_HAVE_ATTR_ALLOC_SIZE
+# define JEMALLOC_ALLOC_SIZE(s) JEMALLOC_ATTR(alloc_size(s))
+# define JEMALLOC_ALLOC_SIZE2(s1, s2) JEMALLOC_ATTR(alloc_size(s1, s2))
+# else
+# define JEMALLOC_ALLOC_SIZE(s)
+# define JEMALLOC_ALLOC_SIZE2(s1, s2)
+# endif
+# ifndef JEMALLOC_EXPORT
+# define JEMALLOC_EXPORT JEMALLOC_ATTR(visibility("default"))
+# endif
+# ifdef JEMALLOC_HAVE_ATTR_FORMAT_ARG
+# define JEMALLOC_FORMAT_ARG(i) JEMALLOC_ATTR(__format_arg__(3))
+# else
+# define JEMALLOC_FORMAT_ARG(i)
+# endif
+# ifdef JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
+# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(gnu_printf, s, i))
+# elif defined(JEMALLOC_HAVE_ATTR_FORMAT_PRINTF)
+# define JEMALLOC_FORMAT_PRINTF(s, i) JEMALLOC_ATTR(format(printf, s, i))
+# else
+# define JEMALLOC_FORMAT_PRINTF(s, i)
+# endif
+# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
+# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow)
+# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
+# define JEMALLOC_RESTRICT_RETURN
+# define JEMALLOC_ALLOCATOR
+#else
+# define JEMALLOC_ATTR(s)
+# define JEMALLOC_ALIGNED(s)
+# define JEMALLOC_ALLOC_SIZE(s)
+# define JEMALLOC_ALLOC_SIZE2(s1, s2)
+# define JEMALLOC_EXPORT
+# define JEMALLOC_FORMAT_PRINTF(s, i)
+# define JEMALLOC_NOINLINE
+# define JEMALLOC_NOTHROW
+# define JEMALLOC_SECTION(s)
+# define JEMALLOC_RESTRICT_RETURN
+# define JEMALLOC_ALLOCATOR
+#endif
+>>>>>>> main
diff --git a/contrib/jemalloc/include/jemalloc/jemalloc_mangle.sh b/contrib/jemalloc/include/jemalloc/jemalloc_mangle.sh
new file mode 100755
index 000000000000..c675bb469f1c
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/jemalloc_mangle.sh
@@ -0,0 +1,45 @@
+#!/bin/sh -eu
+
+public_symbols_txt=$1
+symbol_prefix=$2
+
+cat <<EOF
+/*
+ * By default application code must explicitly refer to mangled symbol names,
+ * so that it is possible to use jemalloc in conjunction with another allocator
+ * in the same application. Define JEMALLOC_MANGLE in order to cause automatic
+ * name mangling that matches the API prefixing that happened as a result of
+ * --with-mangling and/or --with-jemalloc-prefix configuration settings.
+ */
+#ifdef JEMALLOC_MANGLE
+# ifndef JEMALLOC_NO_DEMANGLE
+# define JEMALLOC_NO_DEMANGLE
+# endif
+EOF
+
+for nm in `cat ${public_symbols_txt}` ; do
+ n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
+ echo "# define ${n} ${symbol_prefix}${n}"
+done
+
+cat <<EOF
+#endif
+
+/*
+ * The ${symbol_prefix}* macros can be used as stable alternative names for the
+ * public jemalloc API if JEMALLOC_NO_DEMANGLE is defined. This is primarily
+ * meant for use in jemalloc itself, but it can be used by application code to
+ * provide isolation from the name mangling specified via --with-mangling
+ * and/or --with-jemalloc-prefix.
+ */
+#ifndef JEMALLOC_NO_DEMANGLE
+EOF
+
+for nm in `cat ${public_symbols_txt}` ; do
+ n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
+ echo "# undef ${symbol_prefix}${n}"
+done
+
+cat <<EOF
+#endif
+EOF
diff --git a/contrib/jemalloc/include/jemalloc/jemalloc_protos.h.in b/contrib/jemalloc/include/jemalloc/jemalloc_protos.h.in
new file mode 100644
index 000000000000..116ef116b4e1
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/jemalloc_protos.h.in
@@ -0,0 +1,141 @@
+<<<<<<< HEAD
+/*
+ * The @je_@ prefix on the following public symbol declarations is an artifact
+ * of namespace management, and should be omitted in application code unless
+ * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle@install_suffix@.h).
+ */
+extern JEMALLOC_EXPORT const char *@je_@malloc_conf;
+extern JEMALLOC_EXPORT void (*@je_@malloc_message)(void *cbopaque,
+ const char *s);
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_SYS_NOTHROW *@je_@malloc(size_t size)
+ JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_SYS_NOTHROW *@je_@calloc(size_t num, size_t size)
+ JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2);
+JEMALLOC_EXPORT int JEMALLOC_SYS_NOTHROW @je_@posix_memalign(
+ void **memptr, size_t alignment, size_t size) JEMALLOC_CXX_THROW
+ JEMALLOC_ATTR(nonnull(1));
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_SYS_NOTHROW *@je_@aligned_alloc(size_t alignment,
+ size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc)
+ JEMALLOC_ALLOC_SIZE(2);
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_SYS_NOTHROW *@je_@realloc(void *ptr, size_t size)
+ JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2);
+JEMALLOC_EXPORT void JEMALLOC_SYS_NOTHROW @je_@free(void *ptr)
+ JEMALLOC_CXX_THROW;
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *@je_@mallocx(size_t size, int flags)
+ JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *@je_@rallocx(void *ptr, size_t size,
+ int flags) JEMALLOC_ALLOC_SIZE(2);
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@xallocx(void *ptr, size_t size,
+ size_t extra, int flags);
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@sallocx(const void *ptr,
+ int flags) JEMALLOC_ATTR(pure);
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW @je_@dallocx(void *ptr, int flags);
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW @je_@sdallocx(void *ptr, size_t size,
+ int flags);
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@nallocx(size_t size, int flags)
+ JEMALLOC_ATTR(pure);
+
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW @je_@mallctl(const char *name,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen);
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW @je_@mallctlnametomib(const char *name,
+ size_t *mibp, size_t *miblenp);
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW @je_@mallctlbymib(const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW @je_@malloc_stats_print(
+ void (*write_cb)(void *, const char *), void *@je_@cbopaque,
+ const char *opts);
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@malloc_usable_size(
+ JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW;
+#ifdef JEMALLOC_HAVE_MALLOC_SIZE
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@malloc_size(
+ const void *ptr);
+#endif
+
+#ifdef JEMALLOC_OVERRIDE_MEMALIGN
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_SYS_NOTHROW *@je_@memalign(size_t alignment, size_t size)
+ JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
+#endif
+
+#ifdef JEMALLOC_OVERRIDE_VALLOC
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_SYS_NOTHROW *@je_@valloc(size_t size) JEMALLOC_CXX_THROW
+ JEMALLOC_ATTR(malloc);
+#endif
+||||||| dec341af7695
+=======
+/*
+ * The @je_@ prefix on the following public symbol declarations is an artifact
+ * of namespace management, and should be omitted in application code unless
+ * JEMALLOC_NO_DEMANGLE is defined (see jemalloc_mangle@install_suffix@.h).
+ */
+extern JEMALLOC_EXPORT const char *@je_@malloc_conf;
+extern JEMALLOC_EXPORT void (*@je_@malloc_message)(void *cbopaque,
+ const char *s);
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *@je_@malloc(size_t size)
+ JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *@je_@calloc(size_t num, size_t size)
+ JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2);
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW @je_@posix_memalign(void **memptr,
+ size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1));
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *@je_@aligned_alloc(size_t alignment,
+ size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc)
+ JEMALLOC_ALLOC_SIZE(2);
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *@je_@realloc(void *ptr, size_t size)
+ JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2);
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW @je_@free(void *ptr)
+ JEMALLOC_CXX_THROW;
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *@je_@mallocx(size_t size, int flags)
+ JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *@je_@rallocx(void *ptr, size_t size,
+ int flags) JEMALLOC_ALLOC_SIZE(2);
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@xallocx(void *ptr, size_t size,
+ size_t extra, int flags);
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@sallocx(const void *ptr,
+ int flags) JEMALLOC_ATTR(pure);
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW @je_@dallocx(void *ptr, int flags);
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW @je_@sdallocx(void *ptr, size_t size,
+ int flags);
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@nallocx(size_t size, int flags)
+ JEMALLOC_ATTR(pure);
+
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW @je_@mallctl(const char *name,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen);
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW @je_@mallctlnametomib(const char *name,
+ size_t *mibp, size_t *miblenp);
+JEMALLOC_EXPORT int JEMALLOC_NOTHROW @je_@mallctlbymib(const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen);
+JEMALLOC_EXPORT void JEMALLOC_NOTHROW @je_@malloc_stats_print(
+ void (*write_cb)(void *, const char *), void *@je_@cbopaque,
+ const char *opts);
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW @je_@malloc_usable_size(
+ JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW;
+
+#ifdef JEMALLOC_OVERRIDE_MEMALIGN
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *@je_@memalign(size_t alignment, size_t size)
+ JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
+#endif
+
+#ifdef JEMALLOC_OVERRIDE_VALLOC
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+ void JEMALLOC_NOTHROW *@je_@valloc(size_t size) JEMALLOC_CXX_THROW
+ JEMALLOC_ATTR(malloc);
+#endif
+>>>>>>> main
diff --git a/contrib/jemalloc/include/jemalloc/jemalloc_rename.sh b/contrib/jemalloc/include/jemalloc/jemalloc_rename.sh
new file mode 100755
index 000000000000..f94389120e5e
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/jemalloc_rename.sh
@@ -0,0 +1,22 @@
+#!/bin/sh
+
+public_symbols_txt=$1
+
+cat <<EOF
+/*
+ * Name mangling for public symbols is controlled by --with-mangling and
+ * --with-jemalloc-prefix. With default settings the je_ prefix is stripped by
+ * these macro definitions.
+ */
+#ifndef JEMALLOC_NO_RENAME
+EOF
+
+for nm in `cat ${public_symbols_txt}` ; do
+ n=`echo ${nm} |tr ':' ' ' |awk '{print $1}'`
+ m=`echo ${nm} |tr ':' ' ' |awk '{print $2}'`
+ echo "# define je_${n} ${m}"
+done
+
+cat <<EOF
+#endif
+EOF
diff --git a/contrib/jemalloc/include/jemalloc/jemalloc_typedefs.h.in b/contrib/jemalloc/include/jemalloc/jemalloc_typedefs.h.in
new file mode 100644
index 000000000000..1a58874306eb
--- /dev/null
+++ b/contrib/jemalloc/include/jemalloc/jemalloc_typedefs.h.in
@@ -0,0 +1,77 @@
+typedef struct extent_hooks_s extent_hooks_t;
+
+/*
+ * void *
+ * extent_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
+ * size_t alignment, bool *zero, bool *commit, unsigned arena_ind);
+ */
+typedef void *(extent_alloc_t)(extent_hooks_t *, void *, size_t, size_t, bool *,
+ bool *, unsigned);
+
+/*
+ * bool
+ * extent_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * bool committed, unsigned arena_ind);
+ */
+typedef bool (extent_dalloc_t)(extent_hooks_t *, void *, size_t, bool,
+ unsigned);
+
+/*
+ * void
+ * extent_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * bool committed, unsigned arena_ind);
+ */
+typedef void (extent_destroy_t)(extent_hooks_t *, void *, size_t, bool,
+ unsigned);
+
+/*
+ * bool
+ * extent_commit(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * size_t offset, size_t length, unsigned arena_ind);
+ */
+typedef bool (extent_commit_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
+ unsigned);
+
+/*
+ * bool
+ * extent_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * size_t offset, size_t length, unsigned arena_ind);
+ */
+typedef bool (extent_decommit_t)(extent_hooks_t *, void *, size_t, size_t,
+ size_t, unsigned);
+
+/*
+ * bool
+ * extent_purge(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * size_t offset, size_t length, unsigned arena_ind);
+ */
+typedef bool (extent_purge_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
+ unsigned);
+
+/*
+ * bool
+ * extent_split(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ * size_t size_a, size_t size_b, bool committed, unsigned arena_ind);
+ */
+typedef bool (extent_split_t)(extent_hooks_t *, void *, size_t, size_t, size_t,
+ bool, unsigned);
+
+/*
+ * bool
+ * extent_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
+ * void *addr_b, size_t size_b, bool committed, unsigned arena_ind);
+ */
+typedef bool (extent_merge_t)(extent_hooks_t *, void *, size_t, void *, size_t,
+ bool, unsigned);
+
+struct extent_hooks_s {
+ extent_alloc_t *alloc;
+ extent_dalloc_t *dalloc;
+ extent_destroy_t *destroy;
+ extent_commit_t *commit;
+ extent_decommit_t *decommit;
+ extent_purge_t *purge_lazy;
+ extent_purge_t *purge_forced;
+ extent_split_t *split;
+ extent_merge_t *merge;
+};
diff --git a/contrib/jemalloc/include/msvc_compat/C99/stdbool.h b/contrib/jemalloc/include/msvc_compat/C99/stdbool.h
new file mode 100644
index 000000000000..d92160ebc752
--- /dev/null
+++ b/contrib/jemalloc/include/msvc_compat/C99/stdbool.h
@@ -0,0 +1,20 @@
+#ifndef stdbool_h
+#define stdbool_h
+
+#include <wtypes.h>
+
+/* MSVC doesn't define _Bool or bool in C, but does have BOOL */
+/* Note this doesn't pass autoconf's test because (bool) 0.5 != true */
+/* Clang-cl uses MSVC headers, so needs msvc_compat, but has _Bool as
+ * a built-in type. */
+#ifndef __clang__
+typedef BOOL _Bool;
+#endif
+
+#define bool _Bool
+#define true 1
+#define false 0
+
+#define __bool_true_false_are_defined 1
+
+#endif /* stdbool_h */
diff --git a/contrib/jemalloc/include/msvc_compat/C99/stdint.h b/contrib/jemalloc/include/msvc_compat/C99/stdint.h
new file mode 100644
index 000000000000..d02608a59726
--- /dev/null
+++ b/contrib/jemalloc/include/msvc_compat/C99/stdint.h
@@ -0,0 +1,247 @@
+// ISO C9x compliant stdint.h for Microsoft Visual Studio
+// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
+//
+// Copyright (c) 2006-2008 Alexander Chemeris
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. The name of the author may be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef _MSC_VER // [
+#error "Use this header only with Microsoft Visual C++ compilers!"
+#endif // _MSC_VER ]
+
+#ifndef _MSC_STDINT_H_ // [
+#define _MSC_STDINT_H_
+
+#if _MSC_VER > 1000
+#pragma once
+#endif
+
+#include <limits.h>
+
+// For Visual Studio 6 in C++ mode and for many Visual Studio versions when
+// compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}'
+// or compiler give many errors like this:
+// error C2733: second C linkage of overloaded function 'wmemchr' not allowed
+#ifdef __cplusplus
+extern "C" {
+#endif
+# include <wchar.h>
+#ifdef __cplusplus
+}
+#endif
+
+// Define _W64 macros to mark types changing their size, like intptr_t.
+#ifndef _W64
+# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300
+# define _W64 __w64
+# else
+# define _W64
+# endif
+#endif
+
+
+// 7.18.1 Integer types
+
+// 7.18.1.1 Exact-width integer types
+
+// Visual Studio 6 and Embedded Visual C++ 4 doesn't
+// realize that, e.g. char has the same size as __int8
+// so we give up on __intX for them.
+#if (_MSC_VER < 1300)
+ typedef signed char int8_t;
+ typedef signed short int16_t;
+ typedef signed int int32_t;
+ typedef unsigned char uint8_t;
+ typedef unsigned short uint16_t;
+ typedef unsigned int uint32_t;
+#else
+ typedef signed __int8 int8_t;
+ typedef signed __int16 int16_t;
+ typedef signed __int32 int32_t;
+ typedef unsigned __int8 uint8_t;
+ typedef unsigned __int16 uint16_t;
+ typedef unsigned __int32 uint32_t;
+#endif
+typedef signed __int64 int64_t;
+typedef unsigned __int64 uint64_t;
+
+
+// 7.18.1.2 Minimum-width integer types
+typedef int8_t int_least8_t;
+typedef int16_t int_least16_t;
+typedef int32_t int_least32_t;
+typedef int64_t int_least64_t;
+typedef uint8_t uint_least8_t;
+typedef uint16_t uint_least16_t;
+typedef uint32_t uint_least32_t;
+typedef uint64_t uint_least64_t;
+
+// 7.18.1.3 Fastest minimum-width integer types
+typedef int8_t int_fast8_t;
+typedef int16_t int_fast16_t;
+typedef int32_t int_fast32_t;
+typedef int64_t int_fast64_t;
+typedef uint8_t uint_fast8_t;
+typedef uint16_t uint_fast16_t;
+typedef uint32_t uint_fast32_t;
+typedef uint64_t uint_fast64_t;
+
+// 7.18.1.4 Integer types capable of holding object pointers
+#ifdef _WIN64 // [
+ typedef signed __int64 intptr_t;
+ typedef unsigned __int64 uintptr_t;
+#else // _WIN64 ][
+ typedef _W64 signed int intptr_t;
+ typedef _W64 unsigned int uintptr_t;
+#endif // _WIN64 ]
+
+// 7.18.1.5 Greatest-width integer types
+typedef int64_t intmax_t;
+typedef uint64_t uintmax_t;
+
+
+// 7.18.2 Limits of specified-width integer types
+
+#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259
+
+// 7.18.2.1 Limits of exact-width integer types
+#define INT8_MIN ((int8_t)_I8_MIN)
+#define INT8_MAX _I8_MAX
+#define INT16_MIN ((int16_t)_I16_MIN)
+#define INT16_MAX _I16_MAX
+#define INT32_MIN ((int32_t)_I32_MIN)
+#define INT32_MAX _I32_MAX
+#define INT64_MIN ((int64_t)_I64_MIN)
+#define INT64_MAX _I64_MAX
+#define UINT8_MAX _UI8_MAX
+#define UINT16_MAX _UI16_MAX
+#define UINT32_MAX _UI32_MAX
+#define UINT64_MAX _UI64_MAX
+
+// 7.18.2.2 Limits of minimum-width integer types
+#define INT_LEAST8_MIN INT8_MIN
+#define INT_LEAST8_MAX INT8_MAX
+#define INT_LEAST16_MIN INT16_MIN
+#define INT_LEAST16_MAX INT16_MAX
+#define INT_LEAST32_MIN INT32_MIN
+#define INT_LEAST32_MAX INT32_MAX
+#define INT_LEAST64_MIN INT64_MIN
+#define INT_LEAST64_MAX INT64_MAX
+#define UINT_LEAST8_MAX UINT8_MAX
+#define UINT_LEAST16_MAX UINT16_MAX
+#define UINT_LEAST32_MAX UINT32_MAX
+#define UINT_LEAST64_MAX UINT64_MAX
+
+// 7.18.2.3 Limits of fastest minimum-width integer types
+#define INT_FAST8_MIN INT8_MIN
+#define INT_FAST8_MAX INT8_MAX
+#define INT_FAST16_MIN INT16_MIN
+#define INT_FAST16_MAX INT16_MAX
+#define INT_FAST32_MIN INT32_MIN
+#define INT_FAST32_MAX INT32_MAX
+#define INT_FAST64_MIN INT64_MIN
+#define INT_FAST64_MAX INT64_MAX
+#define UINT_FAST8_MAX UINT8_MAX
+#define UINT_FAST16_MAX UINT16_MAX
+#define UINT_FAST32_MAX UINT32_MAX
+#define UINT_FAST64_MAX UINT64_MAX
+
+// 7.18.2.4 Limits of integer types capable of holding object pointers
+#ifdef _WIN64 // [
+# define INTPTR_MIN INT64_MIN
+# define INTPTR_MAX INT64_MAX
+# define UINTPTR_MAX UINT64_MAX
+#else // _WIN64 ][
+# define INTPTR_MIN INT32_MIN
+# define INTPTR_MAX INT32_MAX
+# define UINTPTR_MAX UINT32_MAX
+#endif // _WIN64 ]
+
+// 7.18.2.5 Limits of greatest-width integer types
+#define INTMAX_MIN INT64_MIN
+#define INTMAX_MAX INT64_MAX
+#define UINTMAX_MAX UINT64_MAX
+
+// 7.18.3 Limits of other integer types
+
+#ifdef _WIN64 // [
+# define PTRDIFF_MIN _I64_MIN
+# define PTRDIFF_MAX _I64_MAX
+#else // _WIN64 ][
+# define PTRDIFF_MIN _I32_MIN
+# define PTRDIFF_MAX _I32_MAX
+#endif // _WIN64 ]
+
+#define SIG_ATOMIC_MIN INT_MIN
+#define SIG_ATOMIC_MAX INT_MAX
+
+#ifndef SIZE_MAX // [
+# ifdef _WIN64 // [
+# define SIZE_MAX _UI64_MAX
+# else // _WIN64 ][
+# define SIZE_MAX _UI32_MAX
+# endif // _WIN64 ]
+#endif // SIZE_MAX ]
+
+// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h>
+#ifndef WCHAR_MIN // [
+# define WCHAR_MIN 0
+#endif // WCHAR_MIN ]
+#ifndef WCHAR_MAX // [
+# define WCHAR_MAX _UI16_MAX
+#endif // WCHAR_MAX ]
+
+#define WINT_MIN 0
+#define WINT_MAX _UI16_MAX
+
+#endif // __STDC_LIMIT_MACROS ]
+
+
+// 7.18.4 Limits of other integer types
+
+#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260
+
+// 7.18.4.1 Macros for minimum-width integer constants
+
+#define INT8_C(val) val##i8
+#define INT16_C(val) val##i16
+#define INT32_C(val) val##i32
+#define INT64_C(val) val##i64
+
+#define UINT8_C(val) val##ui8
+#define UINT16_C(val) val##ui16
+#define UINT32_C(val) val##ui32
+#define UINT64_C(val) val##ui64
+
+// 7.18.4.2 Macros for greatest-width integer constants
+#define INTMAX_C INT64_C
+#define UINTMAX_C UINT64_C
+
+#endif // __STDC_CONSTANT_MACROS ]
+
+
+#endif // _MSC_STDINT_H_ ]
diff --git a/contrib/jemalloc/include/msvc_compat/strings.h b/contrib/jemalloc/include/msvc_compat/strings.h
new file mode 100644
index 000000000000..996f256ce84e
--- /dev/null
+++ b/contrib/jemalloc/include/msvc_compat/strings.h
@@ -0,0 +1,58 @@
+#ifndef strings_h
+#define strings_h
+
+/* MSVC doesn't define ffs/ffsl. This dummy strings.h header is provided
+ * for both */
+#ifdef _MSC_VER
+# include <intrin.h>
+# pragma intrinsic(_BitScanForward)
+static __forceinline int ffsl(long x) {
+ unsigned long i;
+
+ if (_BitScanForward(&i, x)) {
+ return i + 1;
+ }
+ return 0;
+}
+
+static __forceinline int ffs(int x) {
+ return ffsl(x);
+}
+
+# ifdef _M_X64
+# pragma intrinsic(_BitScanForward64)
+# endif
+
+static __forceinline int ffsll(unsigned __int64 x) {
+ unsigned long i;
+#ifdef _M_X64
+ if (_BitScanForward64(&i, x)) {
+ return i + 1;
+ }
+ return 0;
+#else
+// Fallback for 32-bit build where 64-bit version not available
+// assuming little endian
+ union {
+ unsigned __int64 ll;
+ unsigned long l[2];
+ } s;
+
+ s.ll = x;
+
+ if (_BitScanForward(&i, s.l[0])) {
+ return i + 1;
+ } else if(_BitScanForward(&i, s.l[1])) {
+ return i + 33;
+ }
+ return 0;
+#endif
+}
+
+#else
+# define ffsll(x) __builtin_ffsll(x)
+# define ffsl(x) __builtin_ffsl(x)
+# define ffs(x) __builtin_ffs(x)
+#endif
+
+#endif /* strings_h */
diff --git a/contrib/jemalloc/include/msvc_compat/windows_extra.h b/contrib/jemalloc/include/msvc_compat/windows_extra.h
new file mode 100644
index 000000000000..a6ebb9306f24
--- /dev/null
+++ b/contrib/jemalloc/include/msvc_compat/windows_extra.h
@@ -0,0 +1,6 @@
+#ifndef MSVC_COMPAT_WINDOWS_EXTRA_H
+#define MSVC_COMPAT_WINDOWS_EXTRA_H
+
+#include <errno.h>
+
+#endif /* MSVC_COMPAT_WINDOWS_EXTRA_H */
diff --git a/contrib/jemalloc/jemalloc.pc.in b/contrib/jemalloc/jemalloc.pc.in
new file mode 100644
index 000000000000..c428a86dc39a
--- /dev/null
+++ b/contrib/jemalloc/jemalloc.pc.in
@@ -0,0 +1,12 @@
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+install_suffix=@install_suffix@
+
+Name: jemalloc
+Description: A general purpose malloc(3) implementation that emphasizes fragmentation avoidance and scalable concurrency support.
+URL: http://jemalloc.net/
+Version: @jemalloc_version_major@.@jemalloc_version_minor@.@jemalloc_version_bugfix@_@jemalloc_version_nrev@
+Cflags: -I${includedir}
+Libs: -L${libdir} -ljemalloc${install_suffix}
diff --git a/contrib/jemalloc/m4/ax_cxx_compile_stdcxx.m4 b/contrib/jemalloc/m4/ax_cxx_compile_stdcxx.m4
new file mode 100644
index 000000000000..72784472d275
--- /dev/null
+++ b/contrib/jemalloc/m4/ax_cxx_compile_stdcxx.m4
@@ -0,0 +1,1517 @@
+<<<<<<< HEAD
+# ===========================================================================
+# https://www.gnu.org/software/autoconf-archive/ax_cxx_compile_stdcxx.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_CXX_COMPILE_STDCXX(VERSION, [ext|noext], [mandatory|optional])
+#
+# DESCRIPTION
+#
+# Check for baseline language coverage in the compiler for the specified
+# version of the C++ standard. If necessary, add switches to CXX and
+# CXXCPP to enable support. VERSION may be '11' (for the C++11 standard)
+# or '14' (for the C++14 standard).
+#
+# The second argument, if specified, indicates whether you insist on an
+# extended mode (e.g. -std=gnu++11) or a strict conformance mode (e.g.
+# -std=c++11). If neither is specified, you get whatever works, with
+# preference for an extended mode.
+#
+# The third argument, if specified 'mandatory' or if left unspecified,
+# indicates that baseline support for the specified C++ standard is
+# required and that the macro should error out if no mode with that
+# support is found. If specified 'optional', then configuration proceeds
+# regardless, after defining HAVE_CXX${VERSION} if and only if a
+# supporting mode is found.
+#
+# LICENSE
+#
+# Copyright (c) 2008 Benjamin Kosnik <bkoz@redhat.com>
+# Copyright (c) 2012 Zack Weinberg <zackw@panix.com>
+# Copyright (c) 2013 Roy Stogner <roystgnr@ices.utexas.edu>
+# Copyright (c) 2014, 2015 Google Inc.; contributed by Alexey Sokolov <sokolov@google.com>
+# Copyright (c) 2015 Paul Norman <penorman@mac.com>
+# Copyright (c) 2015 Moritz Klammler <moritz@klammler.eu>
+# Copyright (c) 2016, 2018 Krzesimir Nowak <qdlacz@gmail.com>
+# Copyright (c) 2019 Enji Cooper <yaneurabeya@gmail.com>
+#
+# Copying and distribution of this file, with or without modification, are
+# permitted in any medium without royalty provided the copyright notice
+# and this notice are preserved. This file is offered as-is, without any
+# warranty.
+
+#serial 11
+
+dnl This macro is based on the code from the AX_CXX_COMPILE_STDCXX_11 macro
+dnl (serial version number 13).
+
+AC_DEFUN([AX_CXX_COMPILE_STDCXX], [dnl
+ m4_if([$1], [11], [ax_cxx_compile_alternatives="11 0x"],
+ [$1], [14], [ax_cxx_compile_alternatives="14 1y"],
+ [$1], [17], [ax_cxx_compile_alternatives="17 1z"],
+ [m4_fatal([invalid first argument `$1' to AX_CXX_COMPILE_STDCXX])])dnl
+ m4_if([$2], [], [],
+ [$2], [ext], [],
+ [$2], [noext], [],
+ [m4_fatal([invalid second argument `$2' to AX_CXX_COMPILE_STDCXX])])dnl
+ m4_if([$3], [], [ax_cxx_compile_cxx$1_required=true],
+ [$3], [mandatory], [ax_cxx_compile_cxx$1_required=true],
+ [$3], [optional], [ax_cxx_compile_cxx$1_required=false],
+ [m4_fatal([invalid third argument `$3' to AX_CXX_COMPILE_STDCXX])])
+ AC_LANG_PUSH([C++])dnl
+ ac_success=no
+
+ m4_if([$2], [noext], [], [dnl
+ if test x$ac_success = xno; then
+ for alternative in ${ax_cxx_compile_alternatives}; do
+ switch="-std=gnu++${alternative}"
+ cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch])
+ AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch,
+ $cachevar,
+ [ac_save_CXX="$CXX"
+ CXX="$CXX $switch"
+ AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])],
+ [eval $cachevar=yes],
+ [eval $cachevar=no])
+ CXX="$ac_save_CXX"])
+ if eval test x\$$cachevar = xyes; then
+ CXX="$CXX $switch"
+ if test -n "$CXXCPP" ; then
+ CXXCPP="$CXXCPP $switch"
+ fi
+ ac_success=yes
+ break
+ fi
+ done
+ fi])
+
+ m4_if([$2], [ext], [], [dnl
+ if test x$ac_success = xno; then
+ dnl HP's aCC needs +std=c++11 according to:
+ dnl http://h21007.www2.hp.com/portal/download/files/unprot/aCxx/PDF_Release_Notes/769149-001.pdf
+ dnl Cray's crayCC needs "-h std=c++11"
+ for alternative in ${ax_cxx_compile_alternatives}; do
+ for switch in -std=c++${alternative} +std=c++${alternative} "-h std=c++${alternative}"; do
+ cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch])
+ AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch,
+ $cachevar,
+ [ac_save_CXX="$CXX"
+ CXX="$CXX $switch"
+ AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])],
+ [eval $cachevar=yes],
+ [eval $cachevar=no])
+ CXX="$ac_save_CXX"])
+ if eval test x\$$cachevar = xyes; then
+ CXX="$CXX $switch"
+ if test -n "$CXXCPP" ; then
+ CXXCPP="$CXXCPP $switch"
+ fi
+ ac_success=yes
+ break
+ fi
+ done
+ if test x$ac_success = xyes; then
+ break
+ fi
+ done
+ fi])
+ AC_LANG_POP([C++])
+ if test x$ax_cxx_compile_cxx$1_required = xtrue; then
+ if test x$ac_success = xno; then
+ AC_MSG_ERROR([*** A compiler with support for C++$1 language features is required.])
+ fi
+ fi
+ if test x$ac_success = xno; then
+ HAVE_CXX$1=0
+ AC_MSG_NOTICE([No compiler with C++$1 support was found])
+ else
+ HAVE_CXX$1=1
+ AC_DEFINE(HAVE_CXX$1,1,
+ [define if the compiler supports basic C++$1 syntax])
+ fi
+ AC_SUBST(HAVE_CXX$1)
+])
+
+
+dnl Test body for checking C++11 support
+
+m4_define([_AX_CXX_COMPILE_STDCXX_testbody_11],
+ _AX_CXX_COMPILE_STDCXX_testbody_new_in_11
+)
+
+
+dnl Test body for checking C++14 support
+
+m4_define([_AX_CXX_COMPILE_STDCXX_testbody_14],
+ _AX_CXX_COMPILE_STDCXX_testbody_new_in_11
+ _AX_CXX_COMPILE_STDCXX_testbody_new_in_14
+)
+
+m4_define([_AX_CXX_COMPILE_STDCXX_testbody_17],
+ _AX_CXX_COMPILE_STDCXX_testbody_new_in_11
+ _AX_CXX_COMPILE_STDCXX_testbody_new_in_14
+ _AX_CXX_COMPILE_STDCXX_testbody_new_in_17
+)
+
+dnl Tests for new features in C++11
+
+m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_11], [[
+
+// If the compiler admits that it is not ready for C++11, why torture it?
+// Hopefully, this will speed up the test.
+
+#ifndef __cplusplus
+
+#error "This is not a C++ compiler"
+
+#elif __cplusplus < 201103L
+
+#error "This is not a C++11 compiler"
+
+#else
+
+namespace cxx11
+{
+
+ namespace test_static_assert
+ {
+
+ template <typename T>
+ struct check
+ {
+ static_assert(sizeof(int) <= sizeof(T), "not big enough");
+ };
+
+ }
+
+ namespace test_final_override
+ {
+
+ struct Base
+ {
+ virtual ~Base() {}
+ virtual void f() {}
+ };
+
+ struct Derived : public Base
+ {
+ virtual ~Derived() override {}
+ virtual void f() override {}
+ };
+
+ }
+
+ namespace test_double_right_angle_brackets
+ {
+
+ template < typename T >
+ struct check {};
+
+ typedef check<void> single_type;
+ typedef check<check<void>> double_type;
+ typedef check<check<check<void>>> triple_type;
+ typedef check<check<check<check<void>>>> quadruple_type;
+
+ }
+
+ namespace test_decltype
+ {
+
+ int
+ f()
+ {
+ int a = 1;
+ decltype(a) b = 2;
+ return a + b;
+ }
+
+ }
+
+ namespace test_type_deduction
+ {
+
+ template < typename T1, typename T2 >
+ struct is_same
+ {
+ static const bool value = false;
+ };
+
+ template < typename T >
+ struct is_same<T, T>
+ {
+ static const bool value = true;
+ };
+
+ template < typename T1, typename T2 >
+ auto
+ add(T1 a1, T2 a2) -> decltype(a1 + a2)
+ {
+ return a1 + a2;
+ }
+
+ int
+ test(const int c, volatile int v)
+ {
+ static_assert(is_same<int, decltype(0)>::value == true, "");
+ static_assert(is_same<int, decltype(c)>::value == false, "");
+ static_assert(is_same<int, decltype(v)>::value == false, "");
+ auto ac = c;
+ auto av = v;
+ auto sumi = ac + av + 'x';
+ auto sumf = ac + av + 1.0;
+ static_assert(is_same<int, decltype(ac)>::value == true, "");
+ static_assert(is_same<int, decltype(av)>::value == true, "");
+ static_assert(is_same<int, decltype(sumi)>::value == true, "");
+ static_assert(is_same<int, decltype(sumf)>::value == false, "");
+ static_assert(is_same<int, decltype(add(c, v))>::value == true, "");
+ return (sumf > 0.0) ? sumi : add(c, v);
+ }
+
+ }
+
+ namespace test_noexcept
+ {
+
+ int f() { return 0; }
+ int g() noexcept { return 0; }
+
+ static_assert(noexcept(f()) == false, "");
+ static_assert(noexcept(g()) == true, "");
+
+ }
+
+ namespace test_constexpr
+ {
+
+ template < typename CharT >
+ unsigned long constexpr
+ strlen_c_r(const CharT *const s, const unsigned long acc) noexcept
+ {
+ return *s ? strlen_c_r(s + 1, acc + 1) : acc;
+ }
+
+ template < typename CharT >
+ unsigned long constexpr
+ strlen_c(const CharT *const s) noexcept
+ {
+ return strlen_c_r(s, 0UL);
+ }
+
+ static_assert(strlen_c("") == 0UL, "");
+ static_assert(strlen_c("1") == 1UL, "");
+ static_assert(strlen_c("example") == 7UL, "");
+ static_assert(strlen_c("another\0example") == 7UL, "");
+
+ }
+
+ namespace test_rvalue_references
+ {
+
+ template < int N >
+ struct answer
+ {
+ static constexpr int value = N;
+ };
+
+ answer<1> f(int&) { return answer<1>(); }
+ answer<2> f(const int&) { return answer<2>(); }
+ answer<3> f(int&&) { return answer<3>(); }
+
+ void
+ test()
+ {
+ int i = 0;
+ const int c = 0;
+ static_assert(decltype(f(i))::value == 1, "");
+ static_assert(decltype(f(c))::value == 2, "");
+ static_assert(decltype(f(0))::value == 3, "");
+ }
+
+ }
+
+ namespace test_uniform_initialization
+ {
+
+ struct test
+ {
+ static const int zero {};
+ static const int one {1};
+ };
+
+ static_assert(test::zero == 0, "");
+ static_assert(test::one == 1, "");
+
+ }
+
+ namespace test_lambdas
+ {
+
+ void
+ test1()
+ {
+ auto lambda1 = [](){};
+ auto lambda2 = lambda1;
+ lambda1();
+ lambda2();
+ }
+
+ int
+ test2()
+ {
+ auto a = [](int i, int j){ return i + j; }(1, 2);
+ auto b = []() -> int { return '0'; }();
+ auto c = [=](){ return a + b; }();
+ auto d = [&](){ return c; }();
+ auto e = [a, &b](int x) mutable {
+ const auto identity = [](int y){ return y; };
+ for (auto i = 0; i < a; ++i)
+ a += b--;
+ return x + identity(a + b);
+ }(0);
+ return a + b + c + d + e;
+ }
+
+ int
+ test3()
+ {
+ const auto nullary = [](){ return 0; };
+ const auto unary = [](int x){ return x; };
+ using nullary_t = decltype(nullary);
+ using unary_t = decltype(unary);
+ const auto higher1st = [](nullary_t f){ return f(); };
+ const auto higher2nd = [unary](nullary_t f1){
+ return [unary, f1](unary_t f2){ return f2(unary(f1())); };
+ };
+ return higher1st(nullary) + higher2nd(nullary)(unary);
+ }
+
+ }
+
+ namespace test_variadic_templates
+ {
+
+ template <int...>
+ struct sum;
+
+ template <int N0, int... N1toN>
+ struct sum<N0, N1toN...>
+ {
+ static constexpr auto value = N0 + sum<N1toN...>::value;
+ };
+
+ template <>
+ struct sum<>
+ {
+ static constexpr auto value = 0;
+ };
+
+ static_assert(sum<>::value == 0, "");
+ static_assert(sum<1>::value == 1, "");
+ static_assert(sum<23>::value == 23, "");
+ static_assert(sum<1, 2>::value == 3, "");
+ static_assert(sum<5, 5, 11>::value == 21, "");
+ static_assert(sum<2, 3, 5, 7, 11, 13>::value == 41, "");
+
+ }
+
+ // http://stackoverflow.com/questions/13728184/template-aliases-and-sfinae
+ // Clang 3.1 fails with headers of libstd++ 4.8.3 when using std::function
+ // because of this.
+ namespace test_template_alias_sfinae
+ {
+
+ struct foo {};
+
+ template<typename T>
+ using member = typename T::member_type;
+
+ template<typename T>
+ void func(...) {}
+
+ template<typename T>
+ void func(member<T>*) {}
+
+ void test();
+
+ void test() { func<foo>(0); }
+
+ }
+
+} // namespace cxx11
+
+#endif // __cplusplus >= 201103L
+
+]])
+
+
+dnl Tests for new features in C++14
+
+m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_14], [[
+
+// If the compiler admits that it is not ready for C++14, why torture it?
+// Hopefully, this will speed up the test.
+
+#ifndef __cplusplus
+
+#error "This is not a C++ compiler"
+
+#elif __cplusplus < 201402L
+
+#error "This is not a C++14 compiler"
+
+#else
+
+namespace cxx14
+{
+
+ namespace test_polymorphic_lambdas
+ {
+
+ int
+ test()
+ {
+ const auto lambda = [](auto&&... args){
+ const auto istiny = [](auto x){
+ return (sizeof(x) == 1UL) ? 1 : 0;
+ };
+ const int aretiny[] = { istiny(args)... };
+ return aretiny[0];
+ };
+ return lambda(1, 1L, 1.0f, '1');
+ }
+
+ }
+
+ namespace test_binary_literals
+ {
+
+ constexpr auto ivii = 0b0000000000101010;
+ static_assert(ivii == 42, "wrong value");
+
+ }
+
+ namespace test_generalized_constexpr
+ {
+
+ template < typename CharT >
+ constexpr unsigned long
+ strlen_c(const CharT *const s) noexcept
+ {
+ auto length = 0UL;
+ for (auto p = s; *p; ++p)
+ ++length;
+ return length;
+ }
+
+ static_assert(strlen_c("") == 0UL, "");
+ static_assert(strlen_c("x") == 1UL, "");
+ static_assert(strlen_c("test") == 4UL, "");
+ static_assert(strlen_c("another\0test") == 7UL, "");
+
+ }
+
+ namespace test_lambda_init_capture
+ {
+
+ int
+ test()
+ {
+ auto x = 0;
+ const auto lambda1 = [a = x](int b){ return a + b; };
+ const auto lambda2 = [a = lambda1(x)](){ return a; };
+ return lambda2();
+ }
+
+ }
+
+ namespace test_digit_separators
+ {
+
+ constexpr auto ten_million = 100'000'000;
+ static_assert(ten_million == 100000000, "");
+
+ }
+
+ namespace test_return_type_deduction
+ {
+
+ auto f(int& x) { return x; }
+ decltype(auto) g(int& x) { return x; }
+
+ template < typename T1, typename T2 >
+ struct is_same
+ {
+ static constexpr auto value = false;
+ };
+
+ template < typename T >
+ struct is_same<T, T>
+ {
+ static constexpr auto value = true;
+ };
+
+ int
+ test()
+ {
+ auto x = 0;
+ static_assert(is_same<int, decltype(f(x))>::value, "");
+ static_assert(is_same<int&, decltype(g(x))>::value, "");
+ return x;
+ }
+
+ }
+
+} // namespace cxx14
+
+#endif // __cplusplus >= 201402L
+
+]])
+
+
+dnl Tests for new features in C++17
+
+m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_17], [[
+
+// If the compiler admits that it is not ready for C++17, why torture it?
+// Hopefully, this will speed up the test.
+
+#ifndef __cplusplus
+
+#error "This is not a C++ compiler"
+
+#elif __cplusplus < 201703L
+
+#error "This is not a C++17 compiler"
+
+#else
+
+#include <initializer_list>
+#include <utility>
+#include <type_traits>
+
+namespace cxx17
+{
+
+ namespace test_constexpr_lambdas
+ {
+
+ constexpr int foo = [](){return 42;}();
+
+ }
+
+ namespace test::nested_namespace::definitions
+ {
+
+ }
+
+ namespace test_fold_expression
+ {
+
+ template<typename... Args>
+ int multiply(Args... args)
+ {
+ return (args * ... * 1);
+ }
+
+ template<typename... Args>
+ bool all(Args... args)
+ {
+ return (args && ...);
+ }
+
+ }
+
+ namespace test_extended_static_assert
+ {
+
+ static_assert (true);
+
+ }
+
+ namespace test_auto_brace_init_list
+ {
+
+ auto foo = {5};
+ auto bar {5};
+
+ static_assert(std::is_same<std::initializer_list<int>, decltype(foo)>::value);
+ static_assert(std::is_same<int, decltype(bar)>::value);
+ }
+
+ namespace test_typename_in_template_template_parameter
+ {
+
+ template<template<typename> typename X> struct D;
+
+ }
+
+ namespace test_fallthrough_nodiscard_maybe_unused_attributes
+ {
+
+ int f1()
+ {
+ return 42;
+ }
+
+ [[nodiscard]] int f2()
+ {
+ [[maybe_unused]] auto unused = f1();
+
+ switch (f1())
+ {
+ case 17:
+ f1();
+ [[fallthrough]];
+ case 42:
+ f1();
+ }
+ return f1();
+ }
+
+ }
+
+ namespace test_extended_aggregate_initialization
+ {
+
+ struct base1
+ {
+ int b1, b2 = 42;
+ };
+
+ struct base2
+ {
+ base2() {
+ b3 = 42;
+ }
+ int b3;
+ };
+
+ struct derived : base1, base2
+ {
+ int d;
+ };
+
+ derived d1 {{1, 2}, {}, 4}; // full initialization
+ derived d2 {{}, {}, 4}; // value-initialized bases
+
+ }
+
+ namespace test_general_range_based_for_loop
+ {
+
+ struct iter
+ {
+ int i;
+
+ int& operator* ()
+ {
+ return i;
+ }
+
+ const int& operator* () const
+ {
+ return i;
+ }
+
+ iter& operator++()
+ {
+ ++i;
+ return *this;
+ }
+ };
+
+ struct sentinel
+ {
+ int i;
+ };
+
+ bool operator== (const iter& i, const sentinel& s)
+ {
+ return i.i == s.i;
+ }
+
+ bool operator!= (const iter& i, const sentinel& s)
+ {
+ return !(i == s);
+ }
+
+ struct range
+ {
+ iter begin() const
+ {
+ return {0};
+ }
+
+ sentinel end() const
+ {
+ return {5};
+ }
+ };
+
+ void f()
+ {
+ range r {};
+
+ for (auto i : r)
+ {
+ [[maybe_unused]] auto v = i;
+ }
+ }
+
+ }
+
+ namespace test_lambda_capture_asterisk_this_by_value
+ {
+
+ struct t
+ {
+ int i;
+ int foo()
+ {
+ return [*this]()
+ {
+ return i;
+ }();
+ }
+ };
+
+ }
+
+ namespace test_enum_class_construction
+ {
+
+ enum class byte : unsigned char
+ {};
+
+ byte foo {42};
+
+ }
+
+ namespace test_constexpr_if
+ {
+
+ template <bool cond>
+ int f ()
+ {
+ if constexpr(cond)
+ {
+ return 13;
+ }
+ else
+ {
+ return 42;
+ }
+ }
+
+ }
+
+ namespace test_selection_statement_with_initializer
+ {
+
+ int f()
+ {
+ return 13;
+ }
+
+ int f2()
+ {
+ if (auto i = f(); i > 0)
+ {
+ return 3;
+ }
+
+ switch (auto i = f(); i + 4)
+ {
+ case 17:
+ return 2;
+
+ default:
+ return 1;
+ }
+ }
+
+ }
+
+ namespace test_template_argument_deduction_for_class_templates
+ {
+
+ template <typename T1, typename T2>
+ struct pair
+ {
+ pair (T1 p1, T2 p2)
+ : m1 {p1},
+ m2 {p2}
+ {}
+
+ T1 m1;
+ T2 m2;
+ };
+
+ void f()
+ {
+ [[maybe_unused]] auto p = pair{13, 42u};
+ }
+
+ }
+
+ namespace test_non_type_auto_template_parameters
+ {
+
+ template <auto n>
+ struct B
+ {};
+
+ B<5> b1;
+ B<'a'> b2;
+
+ }
+
+ namespace test_structured_bindings
+ {
+
+ int arr[2] = { 1, 2 };
+ std::pair<int, int> pr = { 1, 2 };
+
+ auto f1() -> int(&)[2]
+ {
+ return arr;
+ }
+
+ auto f2() -> std::pair<int, int>&
+ {
+ return pr;
+ }
+
+ struct S
+ {
+ int x1 : 2;
+ volatile double y1;
+ };
+
+ S f3()
+ {
+ return {};
+ }
+
+ auto [ x1, y1 ] = f1();
+ auto& [ xr1, yr1 ] = f1();
+ auto [ x2, y2 ] = f2();
+ auto& [ xr2, yr2 ] = f2();
+ const auto [ x3, y3 ] = f3();
+
+ }
+
+ namespace test_exception_spec_type_system
+ {
+
+ struct Good {};
+ struct Bad {};
+
+ void g1() noexcept;
+ void g2();
+
+ template<typename T>
+ Bad
+ f(T*, T*);
+
+ template<typename T1, typename T2>
+ Good
+ f(T1*, T2*);
+
+ static_assert (std::is_same_v<Good, decltype(f(g1, g2))>);
+
+ }
+
+ namespace test_inline_variables
+ {
+
+ template<class T> void f(T)
+ {}
+
+ template<class T> inline T g(T)
+ {
+ return T{};
+ }
+
+ template<> inline void f<>(int)
+ {}
+
+ template<> int g<>(int)
+ {
+ return 5;
+ }
+
+ }
+
+} // namespace cxx17
+
+#endif // __cplusplus < 201703L
+
+]])
+||||||| dec341af7695
+=======
+# ===========================================================================
+# http://www.gnu.org/software/autoconf-archive/ax_cxx_compile_stdcxx.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_CXX_COMPILE_STDCXX(VERSION, [ext|noext], [mandatory|optional])
+#
+# DESCRIPTION
+#
+# Check for baseline language coverage in the compiler for the specified
+# version of the C++ standard. If necessary, add switches to CXX and
+# CXXCPP to enable support. VERSION may be '11' (for the C++11 standard)
+# or '14' (for the C++14 standard).
+#
+# The second argument, if specified, indicates whether you insist on an
+# extended mode (e.g. -std=gnu++11) or a strict conformance mode (e.g.
+# -std=c++11). If neither is specified, you get whatever works, with
+# preference for an extended mode.
+#
+# The third argument, if specified 'mandatory' or if left unspecified,
+# indicates that baseline support for the specified C++ standard is
+# required and that the macro should error out if no mode with that
+# support is found. If specified 'optional', then configuration proceeds
+# regardless, after defining HAVE_CXX${VERSION} if and only if a
+# supporting mode is found.
+#
+# LICENSE
+#
+# Copyright (c) 2008 Benjamin Kosnik <bkoz@redhat.com>
+# Copyright (c) 2012 Zack Weinberg <zackw@panix.com>
+# Copyright (c) 2013 Roy Stogner <roystgnr@ices.utexas.edu>
+# Copyright (c) 2014, 2015 Google Inc.; contributed by Alexey Sokolov <sokolov@google.com>
+# Copyright (c) 2015 Paul Norman <penorman@mac.com>
+# Copyright (c) 2015 Moritz Klammler <moritz@klammler.eu>
+#
+# Copying and distribution of this file, with or without modification, are
+# permitted in any medium without royalty provided the copyright notice
+# and this notice are preserved. This file is offered as-is, without any
+# warranty.
+
+#serial 4
+
+dnl This macro is based on the code from the AX_CXX_COMPILE_STDCXX_11 macro
+dnl (serial version number 13).
+
+AC_DEFUN([AX_CXX_COMPILE_STDCXX], [dnl
+ m4_if([$1], [11], [],
+ [$1], [14], [],
+ [$1], [17], [m4_fatal([support for C++17 not yet implemented in AX_CXX_COMPILE_STDCXX])],
+ [m4_fatal([invalid first argument `$1' to AX_CXX_COMPILE_STDCXX])])dnl
+ m4_if([$2], [], [],
+ [$2], [ext], [],
+ [$2], [noext], [],
+ [m4_fatal([invalid second argument `$2' to AX_CXX_COMPILE_STDCXX])])dnl
+ m4_if([$3], [], [ax_cxx_compile_cxx$1_required=true],
+ [$3], [mandatory], [ax_cxx_compile_cxx$1_required=true],
+ [$3], [optional], [ax_cxx_compile_cxx$1_required=false],
+ [m4_fatal([invalid third argument `$3' to AX_CXX_COMPILE_STDCXX])])
+ AC_LANG_PUSH([C++])dnl
+ ac_success=no
+ AC_CACHE_CHECK(whether $CXX supports C++$1 features by default,
+ ax_cv_cxx_compile_cxx$1,
+ [AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])],
+ [ax_cv_cxx_compile_cxx$1=yes],
+ [ax_cv_cxx_compile_cxx$1=no])])
+ if test x$ax_cv_cxx_compile_cxx$1 = xyes; then
+ ac_success=yes
+ fi
+
+ m4_if([$2], [noext], [], [dnl
+ if test x$ac_success = xno; then
+ for switch in -std=gnu++$1 -std=gnu++0x; do
+ cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch])
+ AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch,
+ $cachevar,
+ [ac_save_CXX="$CXX"
+ CXX="$CXX $switch"
+ AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])],
+ [eval $cachevar=yes],
+ [eval $cachevar=no])
+ CXX="$ac_save_CXX"])
+ if eval test x\$$cachevar = xyes; then
+ CXX="$CXX $switch"
+ if test -n "$CXXCPP" ; then
+ CXXCPP="$CXXCPP $switch"
+ fi
+ ac_success=yes
+ break
+ fi
+ done
+ fi])
+
+ m4_if([$2], [ext], [], [dnl
+ if test x$ac_success = xno; then
+ dnl HP's aCC needs +std=c++11 according to:
+ dnl http://h21007.www2.hp.com/portal/download/files/unprot/aCxx/PDF_Release_Notes/769149-001.pdf
+ dnl Cray's crayCC needs "-h std=c++11"
+ for switch in -std=c++$1 -std=c++0x +std=c++$1 "-h std=c++$1"; do
+ cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch])
+ AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch,
+ $cachevar,
+ [ac_save_CXX="$CXX"
+ CXX="$CXX $switch"
+ AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])],
+ [eval $cachevar=yes],
+ [eval $cachevar=no])
+ CXX="$ac_save_CXX"])
+ if eval test x\$$cachevar = xyes; then
+ CXX="$CXX $switch"
+ if test -n "$CXXCPP" ; then
+ CXXCPP="$CXXCPP $switch"
+ fi
+ ac_success=yes
+ break
+ fi
+ done
+ fi])
+ AC_LANG_POP([C++])
+ if test x$ax_cxx_compile_cxx$1_required = xtrue; then
+ if test x$ac_success = xno; then
+ AC_MSG_ERROR([*** A compiler with support for C++$1 language features is required.])
+ fi
+ fi
+ if test x$ac_success = xno; then
+ HAVE_CXX$1=0
+ AC_MSG_NOTICE([No compiler with C++$1 support was found])
+ else
+ HAVE_CXX$1=1
+ AC_DEFINE(HAVE_CXX$1,1,
+ [define if the compiler supports basic C++$1 syntax])
+ fi
+ AC_SUBST(HAVE_CXX$1)
+])
+
+
+dnl Test body for checking C++11 support
+
+m4_define([_AX_CXX_COMPILE_STDCXX_testbody_11],
+ _AX_CXX_COMPILE_STDCXX_testbody_new_in_11
+)
+
+
+dnl Test body for checking C++14 support
+
+m4_define([_AX_CXX_COMPILE_STDCXX_testbody_14],
+ _AX_CXX_COMPILE_STDCXX_testbody_new_in_11
+ _AX_CXX_COMPILE_STDCXX_testbody_new_in_14
+)
+
+
+dnl Tests for new features in C++11
+
+m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_11], [[
+
+// If the compiler admits that it is not ready for C++11, why torture it?
+// Hopefully, this will speed up the test.
+
+#ifndef __cplusplus
+
+#error "This is not a C++ compiler"
+
+#elif __cplusplus < 201103L
+
+#error "This is not a C++11 compiler"
+
+#else
+
+namespace cxx11
+{
+
+ namespace test_static_assert
+ {
+
+ template <typename T>
+ struct check
+ {
+ static_assert(sizeof(int) <= sizeof(T), "not big enough");
+ };
+
+ }
+
+ namespace test_final_override
+ {
+
+ struct Base
+ {
+ virtual void f() {}
+ };
+
+ struct Derived : public Base
+ {
+ virtual void f() override {}
+ };
+
+ }
+
+ namespace test_double_right_angle_brackets
+ {
+
+ template < typename T >
+ struct check {};
+
+ typedef check<void> single_type;
+ typedef check<check<void>> double_type;
+ typedef check<check<check<void>>> triple_type;
+ typedef check<check<check<check<void>>>> quadruple_type;
+
+ }
+
+ namespace test_decltype
+ {
+
+ int
+ f()
+ {
+ int a = 1;
+ decltype(a) b = 2;
+ return a + b;
+ }
+
+ }
+
+ namespace test_type_deduction
+ {
+
+ template < typename T1, typename T2 >
+ struct is_same
+ {
+ static const bool value = false;
+ };
+
+ template < typename T >
+ struct is_same<T, T>
+ {
+ static const bool value = true;
+ };
+
+ template < typename T1, typename T2 >
+ auto
+ add(T1 a1, T2 a2) -> decltype(a1 + a2)
+ {
+ return a1 + a2;
+ }
+
+ int
+ test(const int c, volatile int v)
+ {
+ static_assert(is_same<int, decltype(0)>::value == true, "");
+ static_assert(is_same<int, decltype(c)>::value == false, "");
+ static_assert(is_same<int, decltype(v)>::value == false, "");
+ auto ac = c;
+ auto av = v;
+ auto sumi = ac + av + 'x';
+ auto sumf = ac + av + 1.0;
+ static_assert(is_same<int, decltype(ac)>::value == true, "");
+ static_assert(is_same<int, decltype(av)>::value == true, "");
+ static_assert(is_same<int, decltype(sumi)>::value == true, "");
+ static_assert(is_same<int, decltype(sumf)>::value == false, "");
+ static_assert(is_same<int, decltype(add(c, v))>::value == true, "");
+ return (sumf > 0.0) ? sumi : add(c, v);
+ }
+
+ }
+
+ namespace test_noexcept
+ {
+
+ int f() { return 0; }
+ int g() noexcept { return 0; }
+
+ static_assert(noexcept(f()) == false, "");
+ static_assert(noexcept(g()) == true, "");
+
+ }
+
+ namespace test_constexpr
+ {
+
+ template < typename CharT >
+ unsigned long constexpr
+ strlen_c_r(const CharT *const s, const unsigned long acc) noexcept
+ {
+ return *s ? strlen_c_r(s + 1, acc + 1) : acc;
+ }
+
+ template < typename CharT >
+ unsigned long constexpr
+ strlen_c(const CharT *const s) noexcept
+ {
+ return strlen_c_r(s, 0UL);
+ }
+
+ static_assert(strlen_c("") == 0UL, "");
+ static_assert(strlen_c("1") == 1UL, "");
+ static_assert(strlen_c("example") == 7UL, "");
+ static_assert(strlen_c("another\0example") == 7UL, "");
+
+ }
+
+ namespace test_rvalue_references
+ {
+
+ template < int N >
+ struct answer
+ {
+ static constexpr int value = N;
+ };
+
+ answer<1> f(int&) { return answer<1>(); }
+ answer<2> f(const int&) { return answer<2>(); }
+ answer<3> f(int&&) { return answer<3>(); }
+
+ void
+ test()
+ {
+ int i = 0;
+ const int c = 0;
+ static_assert(decltype(f(i))::value == 1, "");
+ static_assert(decltype(f(c))::value == 2, "");
+ static_assert(decltype(f(0))::value == 3, "");
+ }
+
+ }
+
+ namespace test_uniform_initialization
+ {
+
+ struct test
+ {
+ static const int zero {};
+ static const int one {1};
+ };
+
+ static_assert(test::zero == 0, "");
+ static_assert(test::one == 1, "");
+
+ }
+
+ namespace test_lambdas
+ {
+
+ void
+ test1()
+ {
+ auto lambda1 = [](){};
+ auto lambda2 = lambda1;
+ lambda1();
+ lambda2();
+ }
+
+ int
+ test2()
+ {
+ auto a = [](int i, int j){ return i + j; }(1, 2);
+ auto b = []() -> int { return '0'; }();
+ auto c = [=](){ return a + b; }();
+ auto d = [&](){ return c; }();
+ auto e = [a, &b](int x) mutable {
+ const auto identity = [](int y){ return y; };
+ for (auto i = 0; i < a; ++i)
+ a += b--;
+ return x + identity(a + b);
+ }(0);
+ return a + b + c + d + e;
+ }
+
+ int
+ test3()
+ {
+ const auto nullary = [](){ return 0; };
+ const auto unary = [](int x){ return x; };
+ using nullary_t = decltype(nullary);
+ using unary_t = decltype(unary);
+ const auto higher1st = [](nullary_t f){ return f(); };
+ const auto higher2nd = [unary](nullary_t f1){
+ return [unary, f1](unary_t f2){ return f2(unary(f1())); };
+ };
+ return higher1st(nullary) + higher2nd(nullary)(unary);
+ }
+
+ }
+
+ namespace test_variadic_templates
+ {
+
+ template <int...>
+ struct sum;
+
+ template <int N0, int... N1toN>
+ struct sum<N0, N1toN...>
+ {
+ static constexpr auto value = N0 + sum<N1toN...>::value;
+ };
+
+ template <>
+ struct sum<>
+ {
+ static constexpr auto value = 0;
+ };
+
+ static_assert(sum<>::value == 0, "");
+ static_assert(sum<1>::value == 1, "");
+ static_assert(sum<23>::value == 23, "");
+ static_assert(sum<1, 2>::value == 3, "");
+ static_assert(sum<5, 5, 11>::value == 21, "");
+ static_assert(sum<2, 3, 5, 7, 11, 13>::value == 41, "");
+
+ }
+
+ // http://stackoverflow.com/questions/13728184/template-aliases-and-sfinae
+ // Clang 3.1 fails with headers of libstd++ 4.8.3 when using std::function
+ // because of this.
+ namespace test_template_alias_sfinae
+ {
+
+ struct foo {};
+
+ template<typename T>
+ using member = typename T::member_type;
+
+ template<typename T>
+ void func(...) {}
+
+ template<typename T>
+ void func(member<T>*) {}
+
+ void test();
+
+ void test() { func<foo>(0); }
+
+ }
+
+} // namespace cxx11
+
+#endif // __cplusplus >= 201103L
+
+]])
+
+
+dnl Tests for new features in C++14
+
+m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_14], [[
+
+// If the compiler admits that it is not ready for C++14, why torture it?
+// Hopefully, this will speed up the test.
+
+#ifndef __cplusplus
+
+#error "This is not a C++ compiler"
+
+#elif __cplusplus < 201402L
+
+#error "This is not a C++14 compiler"
+
+#else
+
+namespace cxx14
+{
+
+ namespace test_polymorphic_lambdas
+ {
+
+ int
+ test()
+ {
+ const auto lambda = [](auto&&... args){
+ const auto istiny = [](auto x){
+ return (sizeof(x) == 1UL) ? 1 : 0;
+ };
+ const int aretiny[] = { istiny(args)... };
+ return aretiny[0];
+ };
+ return lambda(1, 1L, 1.0f, '1');
+ }
+
+ }
+
+ namespace test_binary_literals
+ {
+
+ constexpr auto ivii = 0b0000000000101010;
+ static_assert(ivii == 42, "wrong value");
+
+ }
+
+ namespace test_generalized_constexpr
+ {
+
+ template < typename CharT >
+ constexpr unsigned long
+ strlen_c(const CharT *const s) noexcept
+ {
+ auto length = 0UL;
+ for (auto p = s; *p; ++p)
+ ++length;
+ return length;
+ }
+
+ static_assert(strlen_c("") == 0UL, "");
+ static_assert(strlen_c("x") == 1UL, "");
+ static_assert(strlen_c("test") == 4UL, "");
+ static_assert(strlen_c("another\0test") == 7UL, "");
+
+ }
+
+ namespace test_lambda_init_capture
+ {
+
+ int
+ test()
+ {
+ auto x = 0;
+ const auto lambda1 = [a = x](int b){ return a + b; };
+ const auto lambda2 = [a = lambda1(x)](){ return a; };
+ return lambda2();
+ }
+
+ }
+
+ namespace test_digit_seperators
+ {
+
+ constexpr auto ten_million = 100'000'000;
+ static_assert(ten_million == 100000000, "");
+
+ }
+
+ namespace test_return_type_deduction
+ {
+
+ auto f(int& x) { return x; }
+ decltype(auto) g(int& x) { return x; }
+
+ template < typename T1, typename T2 >
+ struct is_same
+ {
+ static constexpr auto value = false;
+ };
+
+ template < typename T >
+ struct is_same<T, T>
+ {
+ static constexpr auto value = true;
+ };
+
+ int
+ test()
+ {
+ auto x = 0;
+ static_assert(is_same<int, decltype(f(x))>::value, "");
+ static_assert(is_same<int&, decltype(g(x))>::value, "");
+ return x;
+ }
+
+ }
+
+} // namespace cxx14
+
+#endif // __cplusplus >= 201402L
+
+]])
+>>>>>>> main
diff --git a/contrib/jemalloc/run_tests.sh b/contrib/jemalloc/run_tests.sh
new file mode 100755
index 000000000000..b434f15b335e
--- /dev/null
+++ b/contrib/jemalloc/run_tests.sh
@@ -0,0 +1 @@
+$(dirname "$)")/scripts/gen_run_tests.py | bash
diff --git a/contrib/jemalloc/scripts/check-formatting.sh b/contrib/jemalloc/scripts/check-formatting.sh
new file mode 100755
index 000000000000..68cafd8e546e
--- /dev/null
+++ b/contrib/jemalloc/scripts/check-formatting.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+# The files that need to be properly formatted. We'll grow this incrementally
+# until it includes all the jemalloc source files (as we convert things over),
+# and then just replace it with
+# find -name '*.c' -o -name '*.h' -o -name '*.cpp
+FILES=(
+)
+
+if command -v clang-format &> /dev/null; then
+ CLANG_FORMAT="clang-format"
+elif command -v clang-format-8 &> /dev/null; then
+ CLANG_FORMAT="clang-format-8"
+else
+ echo "Couldn't find clang-format."
+fi
+
+if ! $CLANG_FORMAT -version | grep "version 8\." &> /dev/null; then
+ echo "clang-format is the wrong version."
+ exit 1
+fi
+
+for file in ${FILES[@]}; do
+ if ! cmp --silent $file <($CLANG_FORMAT $file) &> /dev/null; then
+ echo "Error: $file is not clang-formatted"
+ exit 1
+ fi
+done
diff --git a/contrib/jemalloc/scripts/freebsd/before_install.sh b/contrib/jemalloc/scripts/freebsd/before_install.sh
new file mode 100644
index 000000000000..f2bee321f73f
--- /dev/null
+++ b/contrib/jemalloc/scripts/freebsd/before_install.sh
@@ -0,0 +1,3 @@
+#!/bin/tcsh
+
+su -m root -c 'pkg install -y git'
diff --git a/contrib/jemalloc/scripts/freebsd/before_script.sh b/contrib/jemalloc/scripts/freebsd/before_script.sh
new file mode 100644
index 000000000000..29406f6fbbdf
--- /dev/null
+++ b/contrib/jemalloc/scripts/freebsd/before_script.sh
@@ -0,0 +1,10 @@
+#!/bin/tcsh
+
+autoconf
+# We don't perfectly track freebsd stdlib.h definitions. This is fine when
+# we count as a system header, but breaks otherwise, like during these
+# tests.
+./configure --with-jemalloc-prefix=ci_ ${COMPILER_FLAGS:+ CC="$CC $COMPILER_FLAGS" CXX="$CXX $COMPILER_FLAGS"} $CONFIGURE_FLAGS
+JE_NCPUS=`sysctl -n kern.smp.cpus`
+gmake -j${JE_NCPUS}
+gmake -j${JE_NCPUS} tests
diff --git a/contrib/jemalloc/scripts/freebsd/script.sh b/contrib/jemalloc/scripts/freebsd/script.sh
new file mode 100644
index 000000000000..d9c53a201f5e
--- /dev/null
+++ b/contrib/jemalloc/scripts/freebsd/script.sh
@@ -0,0 +1,3 @@
+#!/bin/tcsh
+
+gmake check
diff --git a/contrib/jemalloc/scripts/gen_run_tests.py b/contrib/jemalloc/scripts/gen_run_tests.py
new file mode 100755
index 000000000000..698d3bce4bbf
--- /dev/null
+++ b/contrib/jemalloc/scripts/gen_run_tests.py
@@ -0,0 +1,260 @@
+<<<<<<< HEAD
+#!/usr/bin/env python3
+
+import sys
+from itertools import combinations
+from os import uname
+from multiprocessing import cpu_count
+from subprocess import call
+
+# Later, we want to test extended vaddr support. Apparently, the "real" way of
+# checking this is flaky on OS X.
+bits_64 = sys.maxsize > 2**32
+
+nparallel = cpu_count() * 2
+
+uname = uname()[0]
+
+if call("command -v gmake", shell=True) == 0:
+ make_cmd = 'gmake'
+else:
+ make_cmd = 'make'
+
+def powerset(items):
+ result = []
+ for i in range(len(items) + 1):
+ result += combinations(items, i)
+ return result
+
+possible_compilers = []
+for cc, cxx in (['gcc', 'g++'], ['clang', 'clang++']):
+ try:
+ cmd_ret = call([cc, "-v"])
+ if cmd_ret == 0:
+ possible_compilers.append((cc, cxx))
+ except:
+ pass
+possible_compiler_opts = [
+ '-m32',
+]
+possible_config_opts = [
+ '--enable-debug',
+ '--enable-prof',
+ '--disable-stats',
+ '--enable-opt-safety-checks',
+ '--with-lg-page=16',
+]
+if bits_64:
+ possible_config_opts.append('--with-lg-vaddr=56')
+
+possible_malloc_conf_opts = [
+ 'tcache:false',
+ 'dss:primary',
+ 'percpu_arena:percpu',
+ 'background_thread:true',
+]
+
+print('set -e')
+print('if [ -f Makefile ] ; then %(make_cmd)s relclean ; fi' % {'make_cmd':
+ make_cmd})
+print('autoconf')
+print('rm -rf run_tests.out')
+print('mkdir run_tests.out')
+print('cd run_tests.out')
+
+ind = 0
+for cc, cxx in possible_compilers:
+ for compiler_opts in powerset(possible_compiler_opts):
+ for config_opts in powerset(possible_config_opts):
+ for malloc_conf_opts in powerset(possible_malloc_conf_opts):
+ if cc == 'clang' \
+ and '-m32' in possible_compiler_opts \
+ and '--enable-prof' in config_opts:
+ continue
+ config_line = (
+ 'EXTRA_CFLAGS=-Werror EXTRA_CXXFLAGS=-Werror '
+ + 'CC="{} {}" '.format(cc, " ".join(compiler_opts))
+ + 'CXX="{} {}" '.format(cxx, " ".join(compiler_opts))
+ + '../../configure '
+ + " ".join(config_opts) + (' --with-malloc-conf=' +
+ ",".join(malloc_conf_opts) if len(malloc_conf_opts) > 0
+ else '')
+ )
+
+ # We don't want to test large vaddr spaces in 32-bit mode.
+ if ('-m32' in compiler_opts and '--with-lg-vaddr=56' in
+ config_opts):
+ continue
+
+ # Per CPU arenas are only supported on Linux.
+ linux_supported = ('percpu_arena:percpu' in malloc_conf_opts \
+ or 'background_thread:true' in malloc_conf_opts)
+ # Heap profiling and dss are not supported on OS X.
+ darwin_unsupported = ('--enable-prof' in config_opts or \
+ 'dss:primary' in malloc_conf_opts)
+ if (uname == 'Linux' and linux_supported) \
+ or (not linux_supported and (uname != 'Darwin' or \
+ not darwin_unsupported)):
+ print("""cat <<EOF > run_test_%(ind)d.sh
+#!/bin/sh
+
+set -e
+
+abort() {
+ echo "==> Error" >> run_test.log
+ echo "Error; see run_tests.out/run_test_%(ind)d.out/run_test.log"
+ exit 255 # Special exit code tells xargs to terminate.
+}
+
+# Environment variables are not supported.
+run_cmd() {
+ echo "==> \$@" >> run_test.log
+ \$@ >> run_test.log 2>&1 || abort
+}
+
+echo "=> run_test_%(ind)d: %(config_line)s"
+mkdir run_test_%(ind)d.out
+cd run_test_%(ind)d.out
+
+echo "==> %(config_line)s" >> run_test.log
+%(config_line)s >> run_test.log 2>&1 || abort
+
+run_cmd %(make_cmd)s all tests
+run_cmd %(make_cmd)s check
+run_cmd %(make_cmd)s distclean
+EOF
+chmod 755 run_test_%(ind)d.sh""" % {'ind': ind, 'config_line': config_line,
+ 'make_cmd': make_cmd})
+ ind += 1
+
+print('for i in `seq 0 %(last_ind)d` ; do echo run_test_${i}.sh ; done | xargs'
+ ' -P %(nparallel)d -n 1 sh' % {'last_ind': ind-1, 'nparallel': nparallel})
+||||||| dec341af7695
+=======
+#!/usr/bin/env python
+
+import sys
+from itertools import combinations
+from os import uname
+from multiprocessing import cpu_count
+from subprocess import call
+
+# Later, we want to test extended vaddr support. Apparently, the "real" way of
+# checking this is flaky on OS X.
+bits_64 = sys.maxsize > 2**32
+
+nparallel = cpu_count() * 2
+
+uname = uname()[0]
+
+if "BSD" in uname:
+ make_cmd = 'gmake'
+else:
+ make_cmd = 'make'
+
+def powerset(items):
+ result = []
+ for i in xrange(len(items) + 1):
+ result += combinations(items, i)
+ return result
+
+possible_compilers = []
+for cc, cxx in (['gcc', 'g++'], ['clang', 'clang++']):
+ try:
+ cmd_ret = call([cc, "-v"])
+ if cmd_ret == 0:
+ possible_compilers.append((cc, cxx))
+ except:
+ pass
+possible_compiler_opts = [
+ '-m32',
+]
+possible_config_opts = [
+ '--enable-debug',
+ '--enable-prof',
+ '--disable-stats',
+ '--enable-opt-safety-checks',
+]
+if bits_64:
+ possible_config_opts.append('--with-lg-vaddr=56')
+
+possible_malloc_conf_opts = [
+ 'tcache:false',
+ 'dss:primary',
+ 'percpu_arena:percpu',
+ 'background_thread:true',
+]
+
+print 'set -e'
+print 'if [ -f Makefile ] ; then %(make_cmd)s relclean ; fi' % {'make_cmd': make_cmd}
+print 'autoconf'
+print 'rm -rf run_tests.out'
+print 'mkdir run_tests.out'
+print 'cd run_tests.out'
+
+ind = 0
+for cc, cxx in possible_compilers:
+ for compiler_opts in powerset(possible_compiler_opts):
+ for config_opts in powerset(possible_config_opts):
+ for malloc_conf_opts in powerset(possible_malloc_conf_opts):
+ if cc is 'clang' \
+ and '-m32' in possible_compiler_opts \
+ and '--enable-prof' in config_opts:
+ continue
+ config_line = (
+ 'EXTRA_CFLAGS=-Werror EXTRA_CXXFLAGS=-Werror '
+ + 'CC="{} {}" '.format(cc, " ".join(compiler_opts))
+ + 'CXX="{} {}" '.format(cxx, " ".join(compiler_opts))
+ + '../../configure '
+ + " ".join(config_opts) + (' --with-malloc-conf=' +
+ ",".join(malloc_conf_opts) if len(malloc_conf_opts) > 0
+ else '')
+ )
+
+ # We don't want to test large vaddr spaces in 32-bit mode.
+ if ('-m32' in compiler_opts and '--with-lg-vaddr=56' in
+ config_opts):
+ continue
+
+ # Per CPU arenas are only supported on Linux.
+ linux_supported = ('percpu_arena:percpu' in malloc_conf_opts \
+ or 'background_thread:true' in malloc_conf_opts)
+ # Heap profiling and dss are not supported on OS X.
+ darwin_unsupported = ('--enable-prof' in config_opts or \
+ 'dss:primary' in malloc_conf_opts)
+ if (uname == 'Linux' and linux_supported) \
+ or (not linux_supported and (uname != 'Darwin' or \
+ not darwin_unsupported)):
+ print """cat <<EOF > run_test_%(ind)d.sh
+#!/bin/sh
+
+set -e
+
+abort() {
+ echo "==> Error" >> run_test.log
+ echo "Error; see run_tests.out/run_test_%(ind)d.out/run_test.log"
+ exit 255 # Special exit code tells xargs to terminate.
+}
+
+# Environment variables are not supported.
+run_cmd() {
+ echo "==> \$@" >> run_test.log
+ \$@ >> run_test.log 2>&1 || abort
+}
+
+echo "=> run_test_%(ind)d: %(config_line)s"
+mkdir run_test_%(ind)d.out
+cd run_test_%(ind)d.out
+
+echo "==> %(config_line)s" >> run_test.log
+%(config_line)s >> run_test.log 2>&1 || abort
+
+run_cmd %(make_cmd)s all tests
+run_cmd %(make_cmd)s check
+run_cmd %(make_cmd)s distclean
+EOF
+chmod 755 run_test_%(ind)d.sh""" % {'ind': ind, 'config_line': config_line, 'make_cmd': make_cmd}
+ ind += 1
+
+print 'for i in `seq 0 %(last_ind)d` ; do echo run_test_${i}.sh ; done | xargs -P %(nparallel)d -n 1 sh' % {'last_ind': ind-1, 'nparallel': nparallel}
+>>>>>>> main
diff --git a/contrib/jemalloc/scripts/gen_travis.py b/contrib/jemalloc/scripts/gen_travis.py
new file mode 100755
index 000000000000..c306744e6a65
--- /dev/null
+++ b/contrib/jemalloc/scripts/gen_travis.py
@@ -0,0 +1,480 @@
+<<<<<<< HEAD
+#!/usr/bin/env python3
+
+from itertools import combinations, chain
+from enum import Enum, auto
+
+
+LINUX = 'linux'
+OSX = 'osx'
+WINDOWS = 'windows'
+FREEBSD = 'freebsd'
+
+
+AMD64 = 'amd64'
+ARM64 = 'arm64'
+PPC64LE = 'ppc64le'
+
+
+TRAVIS_TEMPLATE = """\
+# This config file is generated by ./scripts/gen_travis.py.
+# Do not edit by hand.
+
+# We use 'minimal', because 'generic' makes Windows VMs hang at startup. Also
+# the software provided by 'generic' is simply not needed for our tests.
+# Differences are explained here:
+# https://docs.travis-ci.com/user/languages/minimal-and-generic/
+language: minimal
+dist: focal
+
+jobs:
+ include:
+{jobs}
+
+before_install:
+ - |-
+ if test -f "./scripts/$TRAVIS_OS_NAME/before_install.sh"; then
+ source ./scripts/$TRAVIS_OS_NAME/before_install.sh
+ fi
+
+before_script:
+ - |-
+ if test -f "./scripts/$TRAVIS_OS_NAME/before_script.sh"; then
+ source ./scripts/$TRAVIS_OS_NAME/before_script.sh
+ else
+ scripts/gen_travis.py > travis_script && diff .travis.yml travis_script
+ autoconf
+ # If COMPILER_FLAGS are not empty, add them to CC and CXX
+ ./configure ${{COMPILER_FLAGS:+ CC="$CC $COMPILER_FLAGS" \
+CXX="$CXX $COMPILER_FLAGS"}} $CONFIGURE_FLAGS
+ make -j3
+ make -j3 tests
+ fi
+
+script:
+ - |-
+ if test -f "./scripts/$TRAVIS_OS_NAME/script.sh"; then
+ source ./scripts/$TRAVIS_OS_NAME/script.sh
+ else
+ make check
+ fi
+"""
+
+
+class Option(object):
+ class Type:
+ COMPILER = auto()
+ COMPILER_FLAG = auto()
+ CONFIGURE_FLAG = auto()
+ MALLOC_CONF = auto()
+ FEATURE = auto()
+
+ def __init__(self, type, value):
+ self.type = type
+ self.value = value
+
+ @staticmethod
+ def as_compiler(value):
+ return Option(Option.Type.COMPILER, value)
+
+ @staticmethod
+ def as_compiler_flag(value):
+ return Option(Option.Type.COMPILER_FLAG, value)
+
+ @staticmethod
+ def as_configure_flag(value):
+ return Option(Option.Type.CONFIGURE_FLAG, value)
+
+ @staticmethod
+ def as_malloc_conf(value):
+ return Option(Option.Type.MALLOC_CONF, value)
+
+ @staticmethod
+ def as_feature(value):
+ return Option(Option.Type.FEATURE, value)
+
+ def __eq__(self, obj):
+ return (isinstance(obj, Option) and obj.type == self.type
+ and obj.value == self.value)
+
+
+# The 'default' configuration is gcc, on linux, with no compiler or configure
+# flags. We also test with clang, -m32, --enable-debug, --enable-prof,
+# --disable-stats, and --with-malloc-conf=tcache:false. To avoid abusing
+# travis though, we don't test all 2**7 = 128 possible combinations of these;
+# instead, we only test combinations of up to 2 'unusual' settings, under the
+# hope that bugs involving interactions of such settings are rare.
+MAX_UNUSUAL_OPTIONS = 2
+
+
+GCC = Option.as_compiler('CC=gcc CXX=g++')
+CLANG = Option.as_compiler('CC=clang CXX=clang++')
+CL = Option.as_compiler('CC=cl.exe CXX=cl.exe')
+
+
+compilers_unusual = [CLANG,]
+
+
+CROSS_COMPILE_32BIT = Option.as_feature('CROSS_COMPILE_32BIT')
+feature_unusuals = [CROSS_COMPILE_32BIT]
+
+
+configure_flag_unusuals = [Option.as_configure_flag(opt) for opt in (
+ '--enable-debug',
+ '--enable-prof',
+ '--disable-stats',
+ '--disable-libdl',
+ '--enable-opt-safety-checks',
+ '--with-lg-page=16',
+)]
+
+
+malloc_conf_unusuals = [Option.as_malloc_conf(opt) for opt in (
+ 'tcache:false',
+ 'dss:primary',
+ 'percpu_arena:percpu',
+ 'background_thread:true',
+)]
+
+
+all_unusuals = (compilers_unusual + feature_unusuals
+ + configure_flag_unusuals + malloc_conf_unusuals)
+
+
+def get_extra_cflags(os, compiler):
+ if os == FREEBSD:
+ return []
+
+ if os == WINDOWS:
+ # For non-CL compilers under Windows (for now it's only MinGW-GCC),
+ # -fcommon needs to be specified to correctly handle multiple
+ # 'malloc_conf' symbols and such, which are declared weak under Linux.
+ # Weak symbols don't work with MinGW-GCC.
+ if compiler != CL.value:
+ return ['-fcommon']
+ else:
+ return []
+
+ # We get some spurious errors when -Warray-bounds is enabled.
+ extra_cflags = ['-Werror', '-Wno-array-bounds']
+ if compiler == CLANG.value or os == OSX:
+ extra_cflags += [
+ '-Wno-unknown-warning-option',
+ '-Wno-ignored-attributes'
+ ]
+ if os == OSX:
+ extra_cflags += [
+ '-Wno-deprecated-declarations',
+ ]
+ return extra_cflags
+
+
+# Formats a job from a combination of flags
+def format_job(os, arch, combination):
+ compilers = [x.value for x in combination if x.type == Option.Type.COMPILER]
+ assert(len(compilers) <= 1)
+ compiler_flags = [x.value for x in combination if x.type == Option.Type.COMPILER_FLAG]
+ configure_flags = [x.value for x in combination if x.type == Option.Type.CONFIGURE_FLAG]
+ malloc_conf = [x.value for x in combination if x.type == Option.Type.MALLOC_CONF]
+ features = [x.value for x in combination if x.type == Option.Type.FEATURE]
+
+ if len(malloc_conf) > 0:
+ configure_flags.append('--with-malloc-conf=' + ','.join(malloc_conf))
+
+ if not compilers:
+ compiler = GCC.value
+ else:
+ compiler = compilers[0]
+
+ extra_environment_vars = ''
+ cross_compile = CROSS_COMPILE_32BIT.value in features
+ if os == LINUX and cross_compile:
+ compiler_flags.append('-m32')
+
+ features_str = ' '.join([' {}=yes'.format(feature) for feature in features])
+
+ stringify = lambda arr, name: ' {}="{}"'.format(name, ' '.join(arr)) if arr else ''
+ env_string = '{}{}{}{}{}{}'.format(
+ compiler,
+ features_str,
+ stringify(compiler_flags, 'COMPILER_FLAGS'),
+ stringify(configure_flags, 'CONFIGURE_FLAGS'),
+ stringify(get_extra_cflags(os, compiler), 'EXTRA_CFLAGS'),
+ extra_environment_vars)
+
+ job = ' - os: {}\n'.format(os)
+ job += ' arch: {}\n'.format(arch)
+ job += ' env: {}'.format(env_string)
+ return job
+
+
+def generate_unusual_combinations(unusuals, max_unusual_opts):
+ """
+ Generates different combinations of non-standard compilers, compiler flags,
+ configure flags and malloc_conf settings.
+
+ @param max_unusual_opts: Limit of unusual options per combination.
+ """
+ return chain.from_iterable(
+ [combinations(unusuals, i) for i in range(max_unusual_opts + 1)])
+
+
+def included(combination, exclude):
+ """
+ Checks if the combination of options should be included in the Travis
+ testing matrix.
+
+ @param exclude: A list of options to be avoided.
+ """
+ return not any(excluded in combination for excluded in exclude)
+
+
+def generate_jobs(os, arch, exclude, max_unusual_opts, unusuals=all_unusuals):
+ jobs = []
+ for combination in generate_unusual_combinations(unusuals, max_unusual_opts):
+ if included(combination, exclude):
+ jobs.append(format_job(os, arch, combination))
+ return '\n'.join(jobs)
+
+
+def generate_linux(arch):
+ os = LINUX
+
+ # Only generate 2 unusual options for AMD64 to reduce matrix size
+ max_unusual_opts = MAX_UNUSUAL_OPTIONS if arch == AMD64 else 1
+
+ exclude = []
+ if arch == PPC64LE:
+ # Avoid 32 bit builds and clang on PowerPC
+ exclude = (CROSS_COMPILE_32BIT, CLANG,)
+
+ return generate_jobs(os, arch, exclude, max_unusual_opts)
+
+
+def generate_macos(arch):
+ os = OSX
+
+ max_unusual_opts = 1
+
+ exclude = ([Option.as_malloc_conf(opt) for opt in (
+ 'dss:primary',
+ 'percpu_arena:percpu',
+ 'background_thread:true')] +
+ [Option.as_configure_flag('--enable-prof')] +
+ [CLANG,])
+
+ return generate_jobs(os, arch, exclude, max_unusual_opts)
+
+
+def generate_windows(arch):
+ os = WINDOWS
+
+ max_unusual_opts = 3
+ unusuals = (
+ Option.as_configure_flag('--enable-debug'),
+ CL,
+ CROSS_COMPILE_32BIT,
+ )
+ return generate_jobs(os, arch, (), max_unusual_opts, unusuals)
+
+
+def generate_freebsd(arch):
+ os = FREEBSD
+
+ max_unusual_opts = 4
+ unusuals = (
+ Option.as_configure_flag('--enable-debug'),
+ Option.as_configure_flag('--enable-prof --enable-prof-libunwind'),
+ Option.as_configure_flag('--with-lg-page=16 --with-malloc-conf=tcache:false'),
+ CROSS_COMPILE_32BIT,
+ )
+ return generate_jobs(os, arch, (), max_unusual_opts, unusuals)
+
+
+
+def get_manual_jobs():
+ return """\
+ # Development build
+ - os: linux
+ env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug \
+--disable-cache-oblivious --enable-stats --enable-log --enable-prof" \
+EXTRA_CFLAGS="-Werror -Wno-array-bounds"
+ # --enable-expermental-smallocx:
+ - os: linux
+ env: CC=gcc CXX=g++ CONFIGURE_FLAGS="--enable-debug \
+--enable-experimental-smallocx --enable-stats --enable-prof" \
+EXTRA_CFLAGS="-Werror -Wno-array-bounds"
+"""
+
+
+def main():
+ jobs = '\n'.join((
+ generate_windows(AMD64),
+
+ generate_freebsd(AMD64),
+
+ generate_linux(AMD64),
+ generate_linux(PPC64LE),
+
+ generate_macos(AMD64),
+
+ get_manual_jobs(),
+ ))
+
+ print(TRAVIS_TEMPLATE.format(jobs=jobs))
+
+
+if __name__ == '__main__':
+ main()
+||||||| dec341af7695
+=======
+#!/usr/bin/env python
+
+from itertools import combinations
+
+travis_template = """\
+language: generic
+dist: precise
+
+matrix:
+ include:
+%s
+
+before_script:
+ - autoconf
+ - scripts/gen_travis.py > travis_script && diff .travis.yml travis_script
+ - ./configure ${COMPILER_FLAGS:+ \
+ CC="$CC $COMPILER_FLAGS" \
+ CXX="$CXX $COMPILER_FLAGS" } \
+ $CONFIGURE_FLAGS
+ - make -j3
+ - make -j3 tests
+
+script:
+ - make check
+"""
+
+# The 'default' configuration is gcc, on linux, with no compiler or configure
+# flags. We also test with clang, -m32, --enable-debug, --enable-prof,
+# --disable-stats, and --with-malloc-conf=tcache:false. To avoid abusing
+# travis though, we don't test all 2**7 = 128 possible combinations of these;
+# instead, we only test combinations of up to 2 'unusual' settings, under the
+# hope that bugs involving interactions of such settings are rare.
+# Things at once, for C(7, 0) + C(7, 1) + C(7, 2) = 29
+MAX_UNUSUAL_OPTIONS = 2
+
+os_default = 'linux'
+os_unusual = 'osx'
+
+compilers_default = 'CC=gcc CXX=g++'
+compilers_unusual = 'CC=clang CXX=clang++'
+
+compiler_flag_unusuals = ['-m32']
+
+configure_flag_unusuals = [
+ '--enable-debug',
+ '--enable-prof',
+ '--disable-stats',
+ '--disable-libdl',
+ '--enable-opt-safety-checks',
+]
+
+malloc_conf_unusuals = [
+ 'tcache:false',
+ 'dss:primary',
+ 'percpu_arena:percpu',
+ 'background_thread:true',
+]
+
+all_unusuals = (
+ [os_unusual] + [compilers_unusual] + compiler_flag_unusuals
+ + configure_flag_unusuals + malloc_conf_unusuals
+)
+
+unusual_combinations_to_test = []
+for i in xrange(MAX_UNUSUAL_OPTIONS + 1):
+ unusual_combinations_to_test += combinations(all_unusuals, i)
+
+gcc_multilib_set = False
+# Formats a job from a combination of flags
+def format_job(combination):
+ global gcc_multilib_set
+
+ os = os_unusual if os_unusual in combination else os_default
+ compilers = compilers_unusual if compilers_unusual in combination else compilers_default
+
+ compiler_flags = [x for x in combination if x in compiler_flag_unusuals]
+ configure_flags = [x for x in combination if x in configure_flag_unusuals]
+ malloc_conf = [x for x in combination if x in malloc_conf_unusuals]
+
+ # Filter out unsupported configurations on OS X.
+ if os == 'osx' and ('dss:primary' in malloc_conf or \
+ 'percpu_arena:percpu' in malloc_conf or 'background_thread:true' \
+ in malloc_conf):
+ return ""
+ if len(malloc_conf) > 0:
+ configure_flags.append('--with-malloc-conf=' + ",".join(malloc_conf))
+
+ # Filter out an unsupported configuration - heap profiling on OS X.
+ if os == 'osx' and '--enable-prof' in configure_flags:
+ return ""
+
+ # We get some spurious errors when -Warray-bounds is enabled.
+ env_string = ('{} COMPILER_FLAGS="{}" CONFIGURE_FLAGS="{}" '
+ 'EXTRA_CFLAGS="-Werror -Wno-array-bounds"').format(
+ compilers, " ".join(compiler_flags), " ".join(configure_flags))
+
+ job = ""
+ job += ' - os: %s\n' % os
+ job += ' env: %s\n' % env_string
+ if '-m32' in combination and os == 'linux':
+ job += ' addons:'
+ if gcc_multilib_set:
+ job += ' *gcc_multilib\n'
+ else:
+ job += ' &gcc_multilib\n'
+ job += ' apt:\n'
+ job += ' packages:\n'
+ job += ' - gcc-multilib\n'
+ gcc_multilib_set = True
+ return job
+
+include_rows = ""
+for combination in unusual_combinations_to_test:
+ include_rows += format_job(combination)
+
+# Development build
+include_rows += '''\
+ # Development build
+ - os: linux
+ env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --disable-cache-oblivious --enable-stats --enable-log --enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
+'''
+
+# Enable-expermental-smallocx
+include_rows += '''\
+ # --enable-expermental-smallocx:
+ - os: linux
+ env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="--enable-debug --enable-experimental-smallocx --enable-stats --enable-prof" EXTRA_CFLAGS="-Werror -Wno-array-bounds"
+'''
+
+# Valgrind build bots
+include_rows += '''
+ # Valgrind
+ - os: linux
+ env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" JEMALLOC_TEST_PREFIX="valgrind"
+ addons:
+ apt:
+ packages:
+ - valgrind
+'''
+
+# To enable valgrind on macosx add:
+#
+# - os: osx
+# env: CC=gcc CXX=g++ COMPILER_FLAGS="" CONFIGURE_FLAGS="" EXTRA_CFLAGS="-Werror -Wno-array-bounds" JEMALLOC_TEST_PREFIX="valgrind"
+# install: brew install valgrind
+#
+# It currently fails due to: https://github.com/jemalloc/jemalloc/issues/1274
+
+print travis_template % include_rows
+>>>>>>> main
diff --git a/contrib/jemalloc/scripts/linux/before_install.sh b/contrib/jemalloc/scripts/linux/before_install.sh
new file mode 100644
index 000000000000..674174639ea9
--- /dev/null
+++ b/contrib/jemalloc/scripts/linux/before_install.sh
@@ -0,0 +1,13 @@
+#!/bin/bash
+
+set -ev
+
+if [[ "$TRAVIS_OS_NAME" != "linux" ]]; then
+ echo "Incorrect \$TRAVIS_OS_NAME: expected linux, got $TRAVIS_OS_NAME"
+ exit 1
+fi
+
+if [[ "$CROSS_COMPILE_32BIT" == "yes" ]]; then
+ sudo apt-get update
+ sudo apt-get -y install gcc-multilib g++-multilib
+fi
diff --git a/contrib/jemalloc/scripts/windows/before_install.sh b/contrib/jemalloc/scripts/windows/before_install.sh
new file mode 100644
index 000000000000..2740c4588d61
--- /dev/null
+++ b/contrib/jemalloc/scripts/windows/before_install.sh
@@ -0,0 +1,83 @@
+#!/bin/bash
+
+set -e
+
+# The purpose of this script is to install build dependencies and set
+# $build_env to a function that sets appropriate environment variables,
+# to enable (mingw32|mingw64) environment if we want to compile with gcc, or
+# (mingw32|mingw64) + vcvarsall.bat if we want to compile with cl.exe
+
+if [[ "$TRAVIS_OS_NAME" != "windows" ]]; then
+ echo "Incorrect \$TRAVIS_OS_NAME: expected windows, got $TRAVIS_OS_NAME"
+ exit 1
+fi
+
+[[ ! -f C:/tools/msys64/msys2_shell.cmd ]] && rm -rf C:/tools/msys64
+choco uninstall -y mingw
+choco upgrade --no-progress -y msys2
+
+msys_shell_cmd="cmd //C RefreshEnv.cmd && set MSYS=winsymlinks:nativestrict && C:\\tools\\msys64\\msys2_shell.cmd"
+
+msys2() { $msys_shell_cmd -defterm -no-start -msys2 -c "$*"; }
+mingw32() { $msys_shell_cmd -defterm -no-start -mingw32 -c "$*"; }
+mingw64() { $msys_shell_cmd -defterm -no-start -mingw64 -c "$*"; }
+
+if [[ "$CROSS_COMPILE_32BIT" == "yes" ]]; then
+ mingw=mingw32
+ mingw_gcc_package_arch=i686
+else
+ mingw=mingw64
+ mingw_gcc_package_arch=x86_64
+fi
+
+if [[ "$CC" == *"gcc"* ]]; then
+ $mingw pacman -S --noconfirm --needed \
+ autotools \
+ git \
+ mingw-w64-${mingw_gcc_package_arch}-make \
+ mingw-w64-${mingw_gcc_package_arch}-gcc \
+ mingw-w64-${mingw_gcc_package_arch}-binutils
+ build_env=$mingw
+elif [[ "$CC" == *"cl"* ]]; then
+ $mingw pacman -S --noconfirm --needed \
+ autotools \
+ git \
+ mingw-w64-${mingw_gcc_package_arch}-make \
+ mingw-w64-${mingw_gcc_package_arch}-binutils
+
+ # In order to use MSVC compiler (cl.exe), we need to correctly set some environment
+ # variables, namely PATH, INCLUDE, LIB and LIBPATH. The correct values of these
+ # variables are set by a batch script "vcvarsall.bat". The code below generates
+ # a batch script that calls "vcvarsall.bat" and prints the environment variables.
+ #
+ # Then, those environment variables are transformed from cmd to bash format and put
+ # into a script $apply_vsenv. If cl.exe needs to be used from bash, one can
+ # 'source $apply_vsenv' and it will apply the environment variables needed for cl.exe
+ # to be located and function correctly.
+ #
+ # At last, a function "mingw_with_msvc_vars" is generated which forwards user input
+ # into a correct mingw (32 or 64) subshell that automatically performs 'source $apply_vsenv',
+ # making it possible for autotools to discover and use cl.exe.
+ vcvarsall="vcvarsall.tmp.bat"
+ echo "@echo off" > $vcvarsall
+ echo "call \"c:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\\\vcvarsall.bat\" $USE_MSVC" >> $vcvarsall
+ echo "set" >> $vcvarsall
+
+ apply_vsenv="./apply_vsenv.sh"
+ cmd //C $vcvarsall | grep -E "^PATH=" | sed -n -e 's/\(.*\)=\(.*\)/export \1=$PATH:"\2"/g' \
+ -e 's/\([a-zA-Z]\):[\\\/]/\/\1\//g' \
+ -e 's/\\/\//g' \
+ -e 's/;\//:\//gp' > $apply_vsenv
+ cmd //C $vcvarsall | grep -E "^(INCLUDE|LIB|LIBPATH)=" | sed -n -e 's/\(.*\)=\(.*\)/export \1="\2"/gp' >> $apply_vsenv
+
+ cat $apply_vsenv
+ mingw_with_msvc_vars() { $msys_shell_cmd -defterm -no-start -$mingw -c "source $apply_vsenv && ""$*"; }
+ build_env=mingw_with_msvc_vars
+
+ rm -f $vcvarsall
+else
+ echo "Unknown C compiler: $CC"
+ exit 1
+fi
+
+echo "Build environment function: $build_env"
diff --git a/contrib/jemalloc/scripts/windows/before_script.sh b/contrib/jemalloc/scripts/windows/before_script.sh
new file mode 100644
index 000000000000..9d30ababd933
--- /dev/null
+++ b/contrib/jemalloc/scripts/windows/before_script.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+set -e
+
+if [[ "$TRAVIS_OS_NAME" != "windows" ]]; then
+ echo "Incorrect \$TRAVIS_OS_NAME: expected windows, got $TRAVIS_OS_NAME"
+ exit 1
+fi
+
+$build_env autoconf
+$build_env ./configure $CONFIGURE_FLAGS
+# mingw32-make simply means "make", unrelated to mingw32 vs mingw64.
+# Simply disregard the prefix and treat is as "make".
+$build_env mingw32-make -j3
+# At the moment, it's impossible to make tests in parallel,
+# seemingly due to concurrent writes to '.pdb' file. I don't know why
+# that happens, because we explicitly supply '/Fs' to the compiler.
+# Until we figure out how to fix it, we should build tests sequentially
+# on Windows.
+$build_env mingw32-make tests
diff --git a/contrib/jemalloc/scripts/windows/script.sh b/contrib/jemalloc/scripts/windows/script.sh
new file mode 100644
index 000000000000..3a27f70aa517
--- /dev/null
+++ b/contrib/jemalloc/scripts/windows/script.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+set -e
+
+if [[ "$TRAVIS_OS_NAME" != "windows" ]]; then
+ echo "Incorrect \$TRAVIS_OS_NAME: expected windows, got $TRAVIS_OS_NAME"
+ exit 1
+fi
+
+$build_env mingw32-make -k check
diff --git a/contrib/jemalloc/src/arena.c b/contrib/jemalloc/src/arena.c
index ba50e41033ff..857b27c52fb0 100644
--- a/contrib/jemalloc/src/arena.c
+++ b/contrib/jemalloc/src/arena.c
@@ -1,11 +1,12 @@
-#define JEMALLOC_ARENA_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
-#include "jemalloc/internal/div.h"
+#include "jemalloc/internal/decay.h"
+#include "jemalloc/internal/ehooks.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/extent_mmap.h"
+#include "jemalloc/internal/san.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/safety_check.h"
@@ -35,34 +36,37 @@ ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT;
static atomic_zd_t dirty_decay_ms_default;
static atomic_zd_t muzzy_decay_ms_default;
-const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
-#define STEP(step, h, x, y) \
- h,
- SMOOTHSTEP
-#undef STEP
-};
+emap_t arena_emap_global;
+pa_central_t arena_pa_central_global;
-static div_info_t arena_binind_div_info[SC_NBINS];
+div_info_t arena_binind_div_info[SC_NBINS];
size_t opt_oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT;
size_t oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT;
+
+uint32_t arena_bin_offsets[SC_NBINS];
+static unsigned nbins_total;
+
static unsigned huge_arena_ind;
+const arena_config_t arena_config_default = {
+ /* .extent_hooks = */ (extent_hooks_t *)&ehooks_default_extent_hooks,
+ /* .metadata_use_hooks = */ true,
+};
+
/******************************************************************************/
/*
* Function prototypes for static functions that are referenced prior to
* definition.
*/
-static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena,
- arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit,
- size_t npages_decay_max, bool is_background_thread);
static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena,
bool is_background_thread, bool all);
-static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
- bin_t *bin);
-static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
+static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab,
bin_t *bin);
+static void
+arena_maybe_do_deferred_work(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
+ size_t npages_new);
/******************************************************************************/
@@ -72,19 +76,17 @@ arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
size_t *nactive, size_t *ndirty, size_t *nmuzzy) {
*nthreads += arena_nthreads_get(arena, false);
*dss = dss_prec_names[arena_dss_prec_get(arena)];
- *dirty_decay_ms = arena_dirty_decay_ms_get(arena);
- *muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
- *nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED);
- *ndirty += extents_npages_get(&arena->extents_dirty);
- *nmuzzy += extents_npages_get(&arena->extents_muzzy);
+ *dirty_decay_ms = arena_decay_ms_get(arena, extent_state_dirty);
+ *muzzy_decay_ms = arena_decay_ms_get(arena, extent_state_muzzy);
+ pa_shard_basic_stats_merge(&arena->pa_shard, nactive, ndirty, nmuzzy);
}
void
arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
- bin_stats_t *bstats, arena_stats_large_t *lstats,
- arena_stats_extents_t *estats) {
+ bin_stats_data_t *bstats, arena_stats_large_t *lstats,
+ pac_estats_t *estats, hpa_shard_stats_t *hpastats, sec_stats_t *secstats) {
cassert(config_stats);
arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms,
@@ -93,122 +95,74 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
size_t base_allocated, base_resident, base_mapped, metadata_thp;
base_stats_get(tsdn, arena->base, &base_allocated, &base_resident,
&base_mapped, &metadata_thp);
+ size_t pac_mapped_sz = pac_mapped(&arena->pa_shard.pac);
+ astats->mapped += base_mapped + pac_mapped_sz;
+ astats->resident += base_resident;
- arena_stats_lock(tsdn, &arena->stats);
+ LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
- arena_stats_accum_zu(&astats->mapped, base_mapped
- + arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped));
- arena_stats_accum_zu(&astats->retained,
- extents_npages_get(&arena->extents_retained) << LG_PAGE);
-
- atomic_store_zu(&astats->extent_avail,
- atomic_load_zu(&arena->extent_avail_cnt, ATOMIC_RELAXED),
- ATOMIC_RELAXED);
-
- arena_stats_accum_u64(&astats->decay_dirty.npurge,
- arena_stats_read_u64(tsdn, &arena->stats,
- &arena->stats.decay_dirty.npurge));
- arena_stats_accum_u64(&astats->decay_dirty.nmadvise,
- arena_stats_read_u64(tsdn, &arena->stats,
- &arena->stats.decay_dirty.nmadvise));
- arena_stats_accum_u64(&astats->decay_dirty.purged,
- arena_stats_read_u64(tsdn, &arena->stats,
- &arena->stats.decay_dirty.purged));
-
- arena_stats_accum_u64(&astats->decay_muzzy.npurge,
- arena_stats_read_u64(tsdn, &arena->stats,
- &arena->stats.decay_muzzy.npurge));
- arena_stats_accum_u64(&astats->decay_muzzy.nmadvise,
- arena_stats_read_u64(tsdn, &arena->stats,
- &arena->stats.decay_muzzy.nmadvise));
- arena_stats_accum_u64(&astats->decay_muzzy.purged,
- arena_stats_read_u64(tsdn, &arena->stats,
- &arena->stats.decay_muzzy.purged));
-
- arena_stats_accum_zu(&astats->base, base_allocated);
- arena_stats_accum_zu(&astats->internal, arena_internal_get(arena));
- arena_stats_accum_zu(&astats->metadata_thp, metadata_thp);
- arena_stats_accum_zu(&astats->resident, base_resident +
- (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) +
- extents_npages_get(&arena->extents_dirty) +
- extents_npages_get(&arena->extents_muzzy)) << LG_PAGE)));
- arena_stats_accum_zu(&astats->abandoned_vm, atomic_load_zu(
- &arena->stats.abandoned_vm, ATOMIC_RELAXED));
+ astats->base += base_allocated;
+ atomic_load_add_store_zu(&astats->internal, arena_internal_get(arena));
+ astats->metadata_thp += metadata_thp;
for (szind_t i = 0; i < SC_NSIZES - SC_NBINS; i++) {
- uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats,
+ uint64_t nmalloc = locked_read_u64(tsdn,
+ LOCKEDINT_MTX(arena->stats.mtx),
&arena->stats.lstats[i].nmalloc);
- arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc);
- arena_stats_accum_u64(&astats->nmalloc_large, nmalloc);
+ locked_inc_u64_unsynchronized(&lstats[i].nmalloc, nmalloc);
+ astats->nmalloc_large += nmalloc;
- uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats,
+ uint64_t ndalloc = locked_read_u64(tsdn,
+ LOCKEDINT_MTX(arena->stats.mtx),
&arena->stats.lstats[i].ndalloc);
- arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc);
- arena_stats_accum_u64(&astats->ndalloc_large, ndalloc);
+ locked_inc_u64_unsynchronized(&lstats[i].ndalloc, ndalloc);
+ astats->ndalloc_large += ndalloc;
- uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats,
+ uint64_t nrequests = locked_read_u64(tsdn,
+ LOCKEDINT_MTX(arena->stats.mtx),
&arena->stats.lstats[i].nrequests);
- arena_stats_accum_u64(&lstats[i].nrequests,
- nmalloc + nrequests);
- arena_stats_accum_u64(&astats->nrequests_large,
+ locked_inc_u64_unsynchronized(&lstats[i].nrequests,
nmalloc + nrequests);
+ astats->nrequests_large += nmalloc + nrequests;
/* nfill == nmalloc for large currently. */
- arena_stats_accum_u64(&lstats[i].nfills, nmalloc);
- arena_stats_accum_u64(&astats->nfills_large, nmalloc);
+ locked_inc_u64_unsynchronized(&lstats[i].nfills, nmalloc);
+ astats->nfills_large += nmalloc;
- uint64_t nflush = arena_stats_read_u64(tsdn, &arena->stats,
+ uint64_t nflush = locked_read_u64(tsdn,
+ LOCKEDINT_MTX(arena->stats.mtx),
&arena->stats.lstats[i].nflushes);
- arena_stats_accum_u64(&lstats[i].nflushes, nflush);
- arena_stats_accum_u64(&astats->nflushes_large, nflush);
+ locked_inc_u64_unsynchronized(&lstats[i].nflushes, nflush);
+ astats->nflushes_large += nflush;
assert(nmalloc >= ndalloc);
assert(nmalloc - ndalloc <= SIZE_T_MAX);
size_t curlextents = (size_t)(nmalloc - ndalloc);
lstats[i].curlextents += curlextents;
- arena_stats_accum_zu(&astats->allocated_large,
- curlextents * sz_index2size(SC_NBINS + i));
- }
-
- for (pszind_t i = 0; i < SC_NPSIZES; i++) {
- size_t dirty, muzzy, retained, dirty_bytes, muzzy_bytes,
- retained_bytes;
- dirty = extents_nextents_get(&arena->extents_dirty, i);
- muzzy = extents_nextents_get(&arena->extents_muzzy, i);
- retained = extents_nextents_get(&arena->extents_retained, i);
- dirty_bytes = extents_nbytes_get(&arena->extents_dirty, i);
- muzzy_bytes = extents_nbytes_get(&arena->extents_muzzy, i);
- retained_bytes =
- extents_nbytes_get(&arena->extents_retained, i);
-
- atomic_store_zu(&estats[i].ndirty, dirty, ATOMIC_RELAXED);
- atomic_store_zu(&estats[i].nmuzzy, muzzy, ATOMIC_RELAXED);
- atomic_store_zu(&estats[i].nretained, retained, ATOMIC_RELAXED);
- atomic_store_zu(&estats[i].dirty_bytes, dirty_bytes,
- ATOMIC_RELAXED);
- atomic_store_zu(&estats[i].muzzy_bytes, muzzy_bytes,
- ATOMIC_RELAXED);
- atomic_store_zu(&estats[i].retained_bytes, retained_bytes,
- ATOMIC_RELAXED);
- }
-
- arena_stats_unlock(tsdn, &arena->stats);
-
- /* tcache_bytes counts currently cached bytes. */
- atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED);
+ astats->allocated_large +=
+ curlextents * sz_index2size(SC_NBINS + i);
+ }
+
+ pa_shard_stats_merge(tsdn, &arena->pa_shard, &astats->pa_shard_stats,
+ estats, hpastats, secstats, &astats->resident);
+
+ LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
+
+ /* Currently cached bytes and sanitizer-stashed bytes in tcache. */
+ astats->tcache_bytes = 0;
+ astats->tcache_stashed_bytes = 0;
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
cache_bin_array_descriptor_t *descriptor;
ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) {
- szind_t i = 0;
- for (; i < SC_NBINS; i++) {
- cache_bin_t *tbin = &descriptor->bins_small[i];
- arena_stats_accum_zu(&astats->tcache_bytes,
- tbin->ncached * sz_index2size(i));
- }
- for (; i < nhbins; i++) {
- cache_bin_t *tbin = &descriptor->bins_large[i];
- arena_stats_accum_zu(&astats->tcache_bytes,
- tbin->ncached * sz_index2size(i));
+ for (szind_t i = 0; i < nhbins; i++) {
+ cache_bin_t *cache_bin = &descriptor->bins[i];
+ cache_bin_sz_t ncached, nstashed;
+ cache_bin_nitems_get_remote(cache_bin,
+ &tcache_bin_info[i], &ncached, &nstashed);
+
+ astats->tcache_bytes += ncached * sz_index2size(i);
+ astats->tcache_stashed_bytes += nstashed *
+ sz_index2size(i);
}
}
malloc_mutex_prof_read(tsdn,
@@ -224,21 +178,11 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
/* Gather per arena mutex profiling data. */
READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large);
- READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx,
- arena_prof_mutex_extent_avail)
- READ_ARENA_MUTEX_PROF_DATA(extents_dirty.mtx,
- arena_prof_mutex_extents_dirty)
- READ_ARENA_MUTEX_PROF_DATA(extents_muzzy.mtx,
- arena_prof_mutex_extents_muzzy)
- READ_ARENA_MUTEX_PROF_DATA(extents_retained.mtx,
- arena_prof_mutex_extents_retained)
- READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx,
- arena_prof_mutex_decay_dirty)
- READ_ARENA_MUTEX_PROF_DATA(decay_muzzy.mtx,
- arena_prof_mutex_decay_muzzy)
READ_ARENA_MUTEX_PROF_DATA(base->mtx,
- arena_prof_mutex_base)
+ arena_prof_mutex_base);
#undef READ_ARENA_MUTEX_PROF_DATA
+ pa_shard_mtx_stats_read(tsdn, &arena->pa_shard,
+ astats->mutex_prof_data);
nstime_copy(&astats->uptime, &arena->create_time);
nstime_update(&astats->uptime);
@@ -247,55 +191,67 @@ arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
for (szind_t i = 0; i < SC_NBINS; i++) {
for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
bin_stats_merge(tsdn, &bstats[i],
- &arena->bins[i].bin_shards[j]);
+ arena_get_bin(arena, i, j));
}
}
}
-void
-arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent) {
+static void
+arena_background_thread_inactivity_check(tsdn_t *tsdn, arena_t *arena,
+ bool is_background_thread) {
+ if (!background_thread_enabled() || is_background_thread) {
+ return;
+ }
+ background_thread_info_t *info =
+ arena_background_thread_info_get(arena);
+ if (background_thread_indefinite_sleep(info)) {
+ arena_maybe_do_deferred_work(tsdn, arena,
+ &arena->pa_shard.pac.decay_dirty, 0);
+ }
+}
+
+/*
+ * React to deferred work generated by a PAI function.
+ */
+void arena_handle_deferred_work(tsdn_t *tsdn, arena_t *arena) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
- extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty,
- extent);
- if (arena_dirty_decay_ms_get(arena) == 0) {
+ if (decay_immediately(&arena->pa_shard.pac.decay_dirty)) {
arena_decay_dirty(tsdn, arena, false, true);
- } else {
- arena_background_thread_inactivity_check(tsdn, arena, false);
}
+ arena_background_thread_inactivity_check(tsdn, arena, false);
}
static void *
-arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) {
+arena_slab_reg_alloc(edata_t *slab, const bin_info_t *bin_info) {
void *ret;
- arena_slab_data_t *slab_data = extent_slab_data_get(slab);
+ slab_data_t *slab_data = edata_slab_data_get(slab);
size_t regind;
- assert(extent_nfree_get(slab) > 0);
+ assert(edata_nfree_get(slab) > 0);
assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info);
- ret = (void *)((uintptr_t)extent_addr_get(slab) +
+ ret = (void *)((uintptr_t)edata_addr_get(slab) +
(uintptr_t)(bin_info->reg_size * regind));
- extent_nfree_dec(slab);
+ edata_nfree_dec(slab);
return ret;
}
static void
-arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info,
+arena_slab_reg_alloc_batch(edata_t *slab, const bin_info_t *bin_info,
unsigned cnt, void** ptrs) {
- arena_slab_data_t *slab_data = extent_slab_data_get(slab);
+ slab_data_t *slab_data = edata_slab_data_get(slab);
- assert(extent_nfree_get(slab) >= cnt);
+ assert(edata_nfree_get(slab) >= cnt);
assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
#if (! defined JEMALLOC_INTERNAL_POPCOUNTL) || (defined BITMAP_USE_TREE)
for (unsigned i = 0; i < cnt; i++) {
size_t regind = bitmap_sfu(slab_data->bitmap,
&bin_info->bitmap_info);
- *(ptrs + i) = (void *)((uintptr_t)extent_addr_get(slab) +
+ *(ptrs + i) = (void *)((uintptr_t)edata_addr_get(slab) +
(uintptr_t)(bin_info->reg_size * regind));
}
#else
@@ -316,7 +272,7 @@ arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info,
* Load from memory locations only once, outside the
* hot loop below.
*/
- uintptr_t base = (uintptr_t)extent_addr_get(slab);
+ uintptr_t base = (uintptr_t)edata_addr_get(slab);
uintptr_t regsize = (uintptr_t)bin_info->reg_size;
while (pop--) {
size_t bit = cfs_lu(&g);
@@ -328,56 +284,7 @@ arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info,
slab_data->bitmap[group] = g;
}
#endif
- extent_nfree_sub(slab, cnt);
-}
-
-#ifndef JEMALLOC_JET
-static
-#endif
-size_t
-arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) {
- size_t diff, regind;
-
- /* Freeing a pointer outside the slab can cause assertion failure. */
- assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab));
- assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab));
- /* Freeing an interior pointer can cause assertion failure. */
- assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) %
- (uintptr_t)bin_infos[binind].reg_size == 0);
-
- diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab));
-
- /* Avoid doing division with a variable divisor. */
- regind = div_compute(&arena_binind_div_info[binind], diff);
-
- assert(regind < bin_infos[binind].nregs);
-
- return regind;
-}
-
-static void
-arena_slab_reg_dalloc(extent_t *slab, arena_slab_data_t *slab_data, void *ptr) {
- szind_t binind = extent_szind_get(slab);
- const bin_info_t *bin_info = &bin_infos[binind];
- size_t regind = arena_slab_regind(slab, binind, ptr);
-
- assert(extent_nfree_get(slab) < bin_info->nregs);
- /* Freeing an unallocated pointer can cause assertion failure. */
- assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind));
-
- bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind);
- extent_nfree_inc(slab);
-}
-
-static void
-arena_nactive_add(arena_t *arena, size_t add_pages) {
- atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXED);
-}
-
-static void
-arena_nactive_sub(arena_t *arena, size_t sub_pages) {
- assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages);
- atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXED);
+ edata_nfree_sub(slab, cnt);
}
static void
@@ -392,7 +299,7 @@ arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
index = sz_size2index(usize);
hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0;
- arena_stats_add_u64(tsdn, &arena->stats,
+ locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx),
&arena->stats.lstats[hindex].nmalloc, 1);
}
@@ -408,551 +315,118 @@ arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) {
index = sz_size2index(usize);
hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0;
- arena_stats_add_u64(tsdn, &arena->stats,
+ locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx),
&arena->stats.lstats[hindex].ndalloc, 1);
}
static void
arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize,
size_t usize) {
- arena_large_dalloc_stats_update(tsdn, arena, oldusize);
arena_large_malloc_stats_update(tsdn, arena, usize);
+ arena_large_dalloc_stats_update(tsdn, arena, oldusize);
}
-static bool
-arena_may_have_muzzy(arena_t *arena) {
- return (pages_can_purge_lazy && (arena_muzzy_decay_ms_get(arena) != 0));
-}
-
-extent_t *
+edata_t *
arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize,
- size_t alignment, bool *zero) {
- extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
+ size_t alignment, bool zero) {
+ bool deferred_work_generated = false;
+ szind_t szind = sz_size2index(usize);
+ size_t esize = usize + sz_large_pad;
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
+ bool guarded = san_large_extent_decide_guard(tsdn,
+ arena_get_ehooks(arena), esize, alignment);
+ edata_t *edata = pa_alloc(tsdn, &arena->pa_shard, esize, alignment,
+ /* slab */ false, szind, zero, guarded, &deferred_work_generated);
+ assert(deferred_work_generated == false);
- szind_t szind = sz_size2index(usize);
- size_t mapped_add;
- bool commit = true;
- extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks,
- &arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false,
- szind, zero, &commit);
- if (extent == NULL && arena_may_have_muzzy(arena)) {
- extent = extents_alloc(tsdn, arena, &extent_hooks,
- &arena->extents_muzzy, NULL, usize, sz_large_pad, alignment,
- false, szind, zero, &commit);
- }
- size_t size = usize + sz_large_pad;
- if (extent == NULL) {
- extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL,
- usize, sz_large_pad, alignment, false, szind, zero,
- &commit);
+ if (edata != NULL) {
if (config_stats) {
- /*
- * extent may be NULL on OOM, but in that case
- * mapped_add isn't used below, so there's no need to
- * conditionlly set it to 0 here.
- */
- mapped_add = size;
+ LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
+ arena_large_malloc_stats_update(tsdn, arena, usize);
+ LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
}
- } else if (config_stats) {
- mapped_add = 0;
}
- if (extent != NULL) {
- if (config_stats) {
- arena_stats_lock(tsdn, &arena->stats);
- arena_large_malloc_stats_update(tsdn, arena, usize);
- if (mapped_add != 0) {
- arena_stats_add_zu(tsdn, &arena->stats,
- &arena->stats.mapped, mapped_add);
- }
- arena_stats_unlock(tsdn, &arena->stats);
- }
- arena_nactive_add(arena, size >> LG_PAGE);
+ if (edata != NULL && sz_large_pad != 0) {
+ arena_cache_oblivious_randomize(tsdn, arena, edata, alignment);
}
- return extent;
+ return edata;
}
void
-arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
+arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, edata_t *edata) {
if (config_stats) {
- arena_stats_lock(tsdn, &arena->stats);
+ LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
arena_large_dalloc_stats_update(tsdn, arena,
- extent_usize_get(extent));
- arena_stats_unlock(tsdn, &arena->stats);
+ edata_usize_get(edata));
+ LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
}
- arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE);
}
void
-arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
+arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
size_t oldusize) {
- size_t usize = extent_usize_get(extent);
- size_t udiff = oldusize - usize;
+ size_t usize = edata_usize_get(edata);
if (config_stats) {
- arena_stats_lock(tsdn, &arena->stats);
+ LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
- arena_stats_unlock(tsdn, &arena->stats);
+ LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
}
- arena_nactive_sub(arena, udiff >> LG_PAGE);
}
void
-arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
+arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
size_t oldusize) {
- size_t usize = extent_usize_get(extent);
- size_t udiff = usize - oldusize;
+ size_t usize = edata_usize_get(edata);
if (config_stats) {
- arena_stats_lock(tsdn, &arena->stats);
+ LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize);
- arena_stats_unlock(tsdn, &arena->stats);
- }
- arena_nactive_add(arena, udiff >> LG_PAGE);
-}
-
-static ssize_t
-arena_decay_ms_read(arena_decay_t *decay) {
- return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
-}
-
-static void
-arena_decay_ms_write(arena_decay_t *decay, ssize_t decay_ms) {
- atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED);
-}
-
-static void
-arena_decay_deadline_init(arena_decay_t *decay) {
- /*
- * Generate a new deadline that is uniformly random within the next
- * epoch after the current one.
- */
- nstime_copy(&decay->deadline, &decay->epoch);
- nstime_add(&decay->deadline, &decay->interval);
- if (arena_decay_ms_read(decay) > 0) {
- nstime_t jitter;
-
- nstime_init(&jitter, prng_range_u64(&decay->jitter_state,
- nstime_ns(&decay->interval)));
- nstime_add(&decay->deadline, &jitter);
- }
-}
-
-static bool
-arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) {
- return (nstime_compare(&decay->deadline, time) <= 0);
-}
-
-static size_t
-arena_decay_backlog_npages_limit(const arena_decay_t *decay) {
- uint64_t sum;
- size_t npages_limit_backlog;
- unsigned i;
-
- /*
- * For each element of decay_backlog, multiply by the corresponding
- * fixed-point smoothstep decay factor. Sum the products, then divide
- * to round down to the nearest whole number of pages.
- */
- sum = 0;
- for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
- sum += decay->backlog[i] * h_steps[i];
- }
- npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
-
- return npages_limit_backlog;
-}
-
-static void
-arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) {
- size_t npages_delta = (current_npages > decay->nunpurged) ?
- current_npages - decay->nunpurged : 0;
- decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta;
-
- if (config_debug) {
- if (current_npages > decay->ceil_npages) {
- decay->ceil_npages = current_npages;
- }
- size_t npages_limit = arena_decay_backlog_npages_limit(decay);
- assert(decay->ceil_npages >= npages_limit);
- if (decay->ceil_npages > npages_limit) {
- decay->ceil_npages = npages_limit;
- }
+ LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
}
}
-static void
-arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64,
- size_t current_npages) {
- if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
- memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
- sizeof(size_t));
- } else {
- size_t nadvance_z = (size_t)nadvance_u64;
-
- assert((uint64_t)nadvance_z == nadvance_u64);
-
- memmove(decay->backlog, &decay->backlog[nadvance_z],
- (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
- if (nadvance_z > 1) {
- memset(&decay->backlog[SMOOTHSTEP_NSTEPS -
- nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
- }
- }
-
- arena_decay_backlog_update_last(decay, current_npages);
-}
-
-static void
-arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
- extents_t *extents, size_t current_npages, size_t npages_limit,
- bool is_background_thread) {
- if (current_npages > npages_limit) {
- arena_decay_to_limit(tsdn, arena, decay, extents, false,
- npages_limit, current_npages - npages_limit,
- is_background_thread);
- }
-}
-
-static void
-arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time,
- size_t current_npages) {
- assert(arena_decay_deadline_reached(decay, time));
-
- nstime_t delta;
- nstime_copy(&delta, time);
- nstime_subtract(&delta, &decay->epoch);
-
- uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval);
- assert(nadvance_u64 > 0);
-
- /* Add nadvance_u64 decay intervals to epoch. */
- nstime_copy(&delta, &decay->interval);
- nstime_imultiply(&delta, nadvance_u64);
- nstime_add(&decay->epoch, &delta);
-
- /* Set a new deadline. */
- arena_decay_deadline_init(decay);
-
- /* Update the backlog. */
- arena_decay_backlog_update(decay, nadvance_u64, current_npages);
-}
-
-static void
-arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
- extents_t *extents, const nstime_t *time, bool is_background_thread) {
- size_t current_npages = extents_npages_get(extents);
- arena_decay_epoch_advance_helper(decay, time, current_npages);
-
- size_t npages_limit = arena_decay_backlog_npages_limit(decay);
- /* We may unlock decay->mtx when try_purge(). Finish logging first. */
- decay->nunpurged = (npages_limit > current_npages) ? npages_limit :
- current_npages;
-
- if (!background_thread_enabled() || is_background_thread) {
- arena_decay_try_purge(tsdn, arena, decay, extents,
- current_npages, npages_limit, is_background_thread);
- }
-}
-
-static void
-arena_decay_reinit(arena_decay_t *decay, ssize_t decay_ms) {
- arena_decay_ms_write(decay, decay_ms);
- if (decay_ms > 0) {
- nstime_init(&decay->interval, (uint64_t)decay_ms *
- KQU(1000000));
- nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS);
- }
-
- nstime_init(&decay->epoch, 0);
- nstime_update(&decay->epoch);
- decay->jitter_state = (uint64_t)(uintptr_t)decay;
- arena_decay_deadline_init(decay);
- decay->nunpurged = 0;
- memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
-}
-
-static bool
-arena_decay_init(arena_decay_t *decay, ssize_t decay_ms,
- arena_stats_decay_t *stats) {
- if (config_debug) {
- for (size_t i = 0; i < sizeof(arena_decay_t); i++) {
- assert(((char *)decay)[i] == 0);
- }
- decay->ceil_npages = 0;
- }
- if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY,
- malloc_mutex_rank_exclusive)) {
- return true;
- }
- decay->purging = false;
- arena_decay_reinit(decay, decay_ms);
- /* Memory is zeroed, so there is no need to clear stats. */
- if (config_stats) {
- decay->stats = stats;
- }
- return false;
-}
-
-static bool
-arena_decay_ms_valid(ssize_t decay_ms) {
- if (decay_ms < -1) {
- return false;
- }
- if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX *
- KQU(1000)) {
- return true;
- }
- return false;
-}
-
-static bool
-arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
- extents_t *extents, bool is_background_thread) {
- malloc_mutex_assert_owner(tsdn, &decay->mtx);
-
- /* Purge all or nothing if the option is disabled. */
- ssize_t decay_ms = arena_decay_ms_read(decay);
- if (decay_ms <= 0) {
- if (decay_ms == 0) {
- arena_decay_to_limit(tsdn, arena, decay, extents, false,
- 0, extents_npages_get(extents),
- is_background_thread);
- }
- return false;
- }
-
- nstime_t time;
- nstime_init(&time, 0);
- nstime_update(&time);
- if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, &time)
- > 0)) {
- /*
- * Time went backwards. Move the epoch back in time and
- * generate a new deadline, with the expectation that time
- * typically flows forward for long enough periods of time that
- * epochs complete. Unfortunately, this strategy is susceptible
- * to clock jitter triggering premature epoch advances, but
- * clock jitter estimation and compensation isn't feasible here
- * because calls into this code are event-driven.
- */
- nstime_copy(&decay->epoch, &time);
- arena_decay_deadline_init(decay);
+/*
+ * In situations where we're not forcing a decay (i.e. because the user
+ * specifically requested it), should we purge ourselves, or wait for the
+ * background thread to get to it.
+ */
+static pac_purge_eagerness_t
+arena_decide_unforced_purge_eagerness(bool is_background_thread) {
+ if (is_background_thread) {
+ return PAC_PURGE_ALWAYS;
+ } else if (!is_background_thread && background_thread_enabled()) {
+ return PAC_PURGE_NEVER;
} else {
- /* Verify that time does not go backwards. */
- assert(nstime_compare(&decay->epoch, &time) <= 0);
+ return PAC_PURGE_ON_EPOCH_ADVANCE;
}
-
- /*
- * If the deadline has been reached, advance to the current epoch and
- * purge to the new limit if necessary. Note that dirty pages created
- * during the current epoch are not subject to purge until a future
- * epoch, so as a result purging only happens during epoch advances, or
- * being triggered by background threads (scheduled event).
- */
- bool advance_epoch = arena_decay_deadline_reached(decay, &time);
- if (advance_epoch) {
- arena_decay_epoch_advance(tsdn, arena, decay, extents, &time,
- is_background_thread);
- } else if (is_background_thread) {
- arena_decay_try_purge(tsdn, arena, decay, extents,
- extents_npages_get(extents),
- arena_decay_backlog_npages_limit(decay),
- is_background_thread);
- }
-
- return advance_epoch;
-}
-
-static ssize_t
-arena_decay_ms_get(arena_decay_t *decay) {
- return arena_decay_ms_read(decay);
-}
-
-ssize_t
-arena_dirty_decay_ms_get(arena_t *arena) {
- return arena_decay_ms_get(&arena->decay_dirty);
-}
-
-ssize_t
-arena_muzzy_decay_ms_get(arena_t *arena) {
- return arena_decay_ms_get(&arena->decay_muzzy);
-}
-
-static bool
-arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
- extents_t *extents, ssize_t decay_ms) {
- if (!arena_decay_ms_valid(decay_ms)) {
- return true;
- }
-
- malloc_mutex_lock(tsdn, &decay->mtx);
- /*
- * Restart decay backlog from scratch, which may cause many dirty pages
- * to be immediately purged. It would conceptually be possible to map
- * the old backlog onto the new backlog, but there is no justification
- * for such complexity since decay_ms changes are intended to be
- * infrequent, either between the {-1, 0, >0} states, or a one-time
- * arbitrary change during initial arena configuration.
- */
- arena_decay_reinit(decay, decay_ms);
- arena_maybe_decay(tsdn, arena, decay, extents, false);
- malloc_mutex_unlock(tsdn, &decay->mtx);
-
- return false;
-}
-
-bool
-arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
- ssize_t decay_ms) {
- return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty,
- &arena->extents_dirty, decay_ms);
}
bool
-arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena,
+arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, extent_state_t state,
ssize_t decay_ms) {
- return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy,
- &arena->extents_muzzy, decay_ms);
+ pac_purge_eagerness_t eagerness = arena_decide_unforced_purge_eagerness(
+ /* is_background_thread */ false);
+ return pa_decay_ms_set(tsdn, &arena->pa_shard, state, decay_ms,
+ eagerness);
}
-static size_t
-arena_stash_decayed(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit,
- size_t npages_decay_max, extent_list_t *decay_extents) {
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
-
- /* Stash extents according to npages_limit. */
- size_t nstashed = 0;
- extent_t *extent;
- while (nstashed < npages_decay_max &&
- (extent = extents_evict(tsdn, arena, r_extent_hooks, extents,
- npages_limit)) != NULL) {
- extent_list_append(decay_extents, extent);
- nstashed += extent_size_get(extent) >> LG_PAGE;
- }
- return nstashed;
-}
-
-static size_t
-arena_decay_stashed(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents,
- bool all, extent_list_t *decay_extents, bool is_background_thread) {
- size_t nmadvise, nunmapped;
- size_t npurged;
-
- if (config_stats) {
- nmadvise = 0;
- nunmapped = 0;
- }
- npurged = 0;
-
- ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena);
- for (extent_t *extent = extent_list_first(decay_extents); extent !=
- NULL; extent = extent_list_first(decay_extents)) {
- if (config_stats) {
- nmadvise++;
- }
- size_t npages = extent_size_get(extent) >> LG_PAGE;
- npurged += npages;
- extent_list_remove(decay_extents, extent);
- switch (extents_state_get(extents)) {
- case extent_state_active:
- not_reached();
- case extent_state_dirty:
- if (!all && muzzy_decay_ms != 0 &&
- !extent_purge_lazy_wrapper(tsdn, arena,
- r_extent_hooks, extent, 0,
- extent_size_get(extent))) {
- extents_dalloc(tsdn, arena, r_extent_hooks,
- &arena->extents_muzzy, extent);
- arena_background_thread_inactivity_check(tsdn,
- arena, is_background_thread);
- break;
- }
- /* Fall through. */
- case extent_state_muzzy:
- extent_dalloc_wrapper(tsdn, arena, r_extent_hooks,
- extent);
- if (config_stats) {
- nunmapped += npages;
- }
- break;
- case extent_state_retained:
- default:
- not_reached();
- }
- }
-
- if (config_stats) {
- arena_stats_lock(tsdn, &arena->stats);
- arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge,
- 1);
- arena_stats_add_u64(tsdn, &arena->stats,
- &decay->stats->nmadvise, nmadvise);
- arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->purged,
- npurged);
- arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped,
- nunmapped << LG_PAGE);
- arena_stats_unlock(tsdn, &arena->stats);
- }
-
- return npurged;
-}
-
-/*
- * npages_limit: Decay at most npages_decay_max pages without violating the
- * invariant: (extents_npages_get(extents) >= npages_limit). We need an upper
- * bound on number of pages in order to prevent unbounded growth (namely in
- * stashed), otherwise unbounded new pages could be added to extents during the
- * current decay run, so that the purging thread never finishes.
- */
-static void
-arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
- extents_t *extents, bool all, size_t npages_limit, size_t npages_decay_max,
- bool is_background_thread) {
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 1);
- malloc_mutex_assert_owner(tsdn, &decay->mtx);
-
- if (decay->purging) {
- return;
- }
- decay->purging = true;
- malloc_mutex_unlock(tsdn, &decay->mtx);
-
- extent_hooks_t *extent_hooks = extent_hooks_get(arena);
-
- extent_list_t decay_extents;
- extent_list_init(&decay_extents);
-
- size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents,
- npages_limit, npages_decay_max, &decay_extents);
- if (npurge != 0) {
- size_t npurged = arena_decay_stashed(tsdn, arena,
- &extent_hooks, decay, extents, all, &decay_extents,
- is_background_thread);
- assert(npurged == npurge);
- }
-
- malloc_mutex_lock(tsdn, &decay->mtx);
- decay->purging = false;
+ssize_t
+arena_decay_ms_get(arena_t *arena, extent_state_t state) {
+ return pa_decay_ms_get(&arena->pa_shard, state);
}
static bool
-arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
- extents_t *extents, bool is_background_thread, bool all) {
+arena_decay_impl(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
+ pac_decay_stats_t *decay_stats, ecache_t *ecache,
+ bool is_background_thread, bool all) {
if (all) {
malloc_mutex_lock(tsdn, &decay->mtx);
- arena_decay_to_limit(tsdn, arena, decay, extents, all, 0,
- extents_npages_get(extents), is_background_thread);
+ pac_decay_all(tsdn, &arena->pa_shard.pac, decay, decay_stats,
+ ecache, /* fully_decay */ all);
malloc_mutex_unlock(tsdn, &decay->mtx);
-
return false;
}
@@ -960,20 +434,20 @@ arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
/* No need to wait if another thread is in progress. */
return true;
}
-
- bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents,
- is_background_thread);
+ pac_purge_eagerness_t eagerness =
+ arena_decide_unforced_purge_eagerness(is_background_thread);
+ bool epoch_advanced = pac_maybe_decay_purge(tsdn, &arena->pa_shard.pac,
+ decay, decay_stats, ecache, eagerness);
size_t npages_new;
if (epoch_advanced) {
/* Backlog is updated on epoch advance. */
- npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1];
+ npages_new = decay_epoch_npages_delta(decay);
}
malloc_mutex_unlock(tsdn, &decay->mtx);
if (have_background_thread && background_thread_enabled() &&
epoch_advanced && !is_background_thread) {
- background_thread_interval_check(tsdn, arena, decay,
- npages_new);
+ arena_maybe_do_deferred_work(tsdn, arena, decay, npages_new);
}
return false;
@@ -982,53 +456,143 @@ arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay,
static bool
arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
bool all) {
- return arena_decay_impl(tsdn, arena, &arena->decay_dirty,
- &arena->extents_dirty, is_background_thread, all);
+ return arena_decay_impl(tsdn, arena, &arena->pa_shard.pac.decay_dirty,
+ &arena->pa_shard.pac.stats->decay_dirty,
+ &arena->pa_shard.pac.ecache_dirty, is_background_thread, all);
}
static bool
arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
bool all) {
- return arena_decay_impl(tsdn, arena, &arena->decay_muzzy,
- &arena->extents_muzzy, is_background_thread, all);
+ if (pa_shard_dont_decay_muzzy(&arena->pa_shard)) {
+ return false;
+ }
+ return arena_decay_impl(tsdn, arena, &arena->pa_shard.pac.decay_muzzy,
+ &arena->pa_shard.pac.stats->decay_muzzy,
+ &arena->pa_shard.pac.ecache_muzzy, is_background_thread, all);
}
void
arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) {
+ if (all) {
+ /*
+ * We should take a purge of "all" to mean "save as much memory
+ * as possible", including flushing any caches (for situations
+ * like thread death, or manual purge calls).
+ */
+ sec_flush(tsdn, &arena->pa_shard.hpa_sec);
+ }
if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) {
return;
}
arena_decay_muzzy(tsdn, arena, is_background_thread, all);
}
+static bool
+arena_should_decay_early(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
+ background_thread_info_t *info, nstime_t *remaining_sleep,
+ size_t npages_new) {
+ malloc_mutex_assert_owner(tsdn, &info->mtx);
+
+ if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
+ return false;
+ }
+
+ if (!decay_gradually(decay)) {
+ malloc_mutex_unlock(tsdn, &decay->mtx);
+ return false;
+ }
+
+ nstime_init(remaining_sleep, background_thread_wakeup_time_get(info));
+ if (nstime_compare(remaining_sleep, &decay->epoch) <= 0) {
+ malloc_mutex_unlock(tsdn, &decay->mtx);
+ return false;
+ }
+ nstime_subtract(remaining_sleep, &decay->epoch);
+ if (npages_new > 0) {
+ uint64_t npurge_new = decay_npages_purge_in(decay,
+ remaining_sleep, npages_new);
+ info->npages_to_purge_new += npurge_new;
+ }
+ malloc_mutex_unlock(tsdn, &decay->mtx);
+ return info->npages_to_purge_new >
+ ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD;
+}
+
+/*
+ * Check if deferred work needs to be done sooner than planned.
+ * For decay we might want to wake up earlier because of an influx of dirty
+ * pages. Rather than waiting for previously estimated time, we proactively
+ * purge those pages.
+ * If background thread sleeps indefinitely, always wake up because some
+ * deferred work has been generated.
+ */
static void
-arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) {
- arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE);
+arena_maybe_do_deferred_work(tsdn_t *tsdn, arena_t *arena, decay_t *decay,
+ size_t npages_new) {
+ background_thread_info_t *info = arena_background_thread_info_get(
+ arena);
+ if (malloc_mutex_trylock(tsdn, &info->mtx)) {
+ /*
+ * Background thread may hold the mutex for a long period of
+ * time. We'd like to avoid the variance on application
+ * threads. So keep this non-blocking, and leave the work to a
+ * future epoch.
+ */
+ return;
+ }
+ if (!background_thread_is_started(info)) {
+ goto label_done;
+ }
+
+ nstime_t remaining_sleep;
+ if (background_thread_indefinite_sleep(info)) {
+ background_thread_wakeup_early(info, NULL);
+ } else if (arena_should_decay_early(tsdn, arena, decay, info,
+ &remaining_sleep, npages_new)) {
+ info->npages_to_purge_new = 0;
+ background_thread_wakeup_early(info, &remaining_sleep);
+ }
+label_done:
+ malloc_mutex_unlock(tsdn, &info->mtx);
+}
+
+/* Called from background threads. */
+void
+arena_do_deferred_work(tsdn_t *tsdn, arena_t *arena) {
+ arena_decay(tsdn, arena, true, false);
+ pa_shard_do_deferred_work(tsdn, &arena->pa_shard);
+}
- extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
- arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab);
+void
+arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab) {
+ bool deferred_work_generated = false;
+ pa_dalloc(tsdn, &arena->pa_shard, slab, &deferred_work_generated);
+ if (deferred_work_generated) {
+ arena_handle_deferred_work(tsdn, arena);
+ }
}
static void
-arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) {
- assert(extent_nfree_get(slab) > 0);
- extent_heap_insert(&bin->slabs_nonfull, slab);
+arena_bin_slabs_nonfull_insert(bin_t *bin, edata_t *slab) {
+ assert(edata_nfree_get(slab) > 0);
+ edata_heap_insert(&bin->slabs_nonfull, slab);
if (config_stats) {
bin->stats.nonfull_slabs++;
}
}
static void
-arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) {
- extent_heap_remove(&bin->slabs_nonfull, slab);
+arena_bin_slabs_nonfull_remove(bin_t *bin, edata_t *slab) {
+ edata_heap_remove(&bin->slabs_nonfull, slab);
if (config_stats) {
bin->stats.nonfull_slabs--;
}
}
-static extent_t *
+static edata_t *
arena_bin_slabs_nonfull_tryget(bin_t *bin) {
- extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull);
+ edata_t *slab = edata_heap_remove_first(&bin->slabs_nonfull);
if (slab == NULL) {
return NULL;
}
@@ -1040,30 +604,30 @@ arena_bin_slabs_nonfull_tryget(bin_t *bin) {
}
static void
-arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) {
- assert(extent_nfree_get(slab) == 0);
+arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, edata_t *slab) {
+ assert(edata_nfree_get(slab) == 0);
/*
* Tracking extents is required by arena_reset, which is not allowed
- * for auto arenas. Bypass this step to avoid touching the extent
+ * for auto arenas. Bypass this step to avoid touching the edata
* linkage (often results in cache misses) for auto arenas.
*/
if (arena_is_auto(arena)) {
return;
}
- extent_list_append(&bin->slabs_full, slab);
+ edata_list_active_append(&bin->slabs_full, slab);
}
static void
-arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) {
+arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, edata_t *slab) {
if (arena_is_auto(arena)) {
return;
}
- extent_list_remove(&bin->slabs_full, slab);
+ edata_list_active_remove(&bin->slabs_full, slab);
}
static void
arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) {
- extent_t *slab;
+ edata_t *slab;
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
if (bin->slabcur != NULL) {
@@ -1073,13 +637,13 @@ arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) {
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
}
- while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) != NULL) {
+ while ((slab = edata_heap_remove_first(&bin->slabs_nonfull)) != NULL) {
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
}
- for (slab = extent_list_first(&bin->slabs_full); slab != NULL;
- slab = extent_list_first(&bin->slabs_full)) {
+ for (slab = edata_list_active_first(&bin->slabs_full); slab != NULL;
+ slab = edata_list_active_first(&bin->slabs_full)) {
arena_bin_slabs_full_remove(arena, bin, slab);
malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
arena_slab_dalloc(tsd_tsdn(tsd), arena, slab);
@@ -1111,16 +675,15 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
/* Large allocations. */
malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
- for (extent_t *extent = extent_list_first(&arena->large); extent !=
- NULL; extent = extent_list_first(&arena->large)) {
- void *ptr = extent_base_get(extent);
+ for (edata_t *edata = edata_list_active_first(&arena->large);
+ edata != NULL; edata = edata_list_active_first(&arena->large)) {
+ void *ptr = edata_base_get(edata);
size_t usize;
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
- alloc_ctx_t alloc_ctx;
- rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
- rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
+ emap_alloc_ctx_t alloc_ctx;
+ emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr,
+ &alloc_ctx);
assert(alloc_ctx.szind != SC_NSIZES);
if (config_stats || (config_prof && opt_prof)) {
@@ -1131,7 +694,7 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
if (config_prof && opt_prof) {
prof_free(tsd, ptr, usize, &alloc_ctx);
}
- large_dalloc(tsd_tsdn(tsd), extent);
+ large_dalloc(tsd_tsdn(tsd), edata);
malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
}
malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
@@ -1139,32 +702,95 @@ arena_reset(tsd_t *tsd, arena_t *arena) {
/* Bins. */
for (unsigned i = 0; i < SC_NBINS; i++) {
for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
- arena_bin_reset(tsd, arena,
- &arena->bins[i].bin_shards[j]);
+ arena_bin_reset(tsd, arena, arena_get_bin(arena, i, j));
}
}
+ pa_shard_reset(tsd_tsdn(tsd), &arena->pa_shard);
+}
+
+static void
+arena_prepare_base_deletion_sync_finish(tsd_t *tsd, malloc_mutex_t **mutexes,
+ unsigned n_mtx) {
+ for (unsigned i = 0; i < n_mtx; i++) {
+ malloc_mutex_lock(tsd_tsdn(tsd), mutexes[i]);
+ malloc_mutex_unlock(tsd_tsdn(tsd), mutexes[i]);
+ }
+}
+
+#define ARENA_DESTROY_MAX_DELAYED_MTX 32
+static void
+arena_prepare_base_deletion_sync(tsd_t *tsd, malloc_mutex_t *mtx,
+ malloc_mutex_t **delayed_mtx, unsigned *n_delayed) {
+ if (!malloc_mutex_trylock(tsd_tsdn(tsd), mtx)) {
+ /* No contention. */
+ malloc_mutex_unlock(tsd_tsdn(tsd), mtx);
+ return;
+ }
+ unsigned n = *n_delayed;
+ assert(n < ARENA_DESTROY_MAX_DELAYED_MTX);
+ /* Add another to the batch. */
+ delayed_mtx[n++] = mtx;
- atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
+ if (n == ARENA_DESTROY_MAX_DELAYED_MTX) {
+ arena_prepare_base_deletion_sync_finish(tsd, delayed_mtx, n);
+ n = 0;
+ }
+ *n_delayed = n;
}
static void
-arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) {
+arena_prepare_base_deletion(tsd_t *tsd, base_t *base_to_destroy) {
/*
- * Iterate over the retained extents and destroy them. This gives the
- * extent allocator underlying the extent hooks an opportunity to unmap
- * all retained memory without having to keep its own metadata
- * structures. In practice, virtual memory for dss-allocated extents is
- * leaked here, so best practice is to avoid dss for arenas to be
- * destroyed, or provide custom extent hooks that track retained
- * dss-based extents for later reuse.
+ * In order to coalesce, emap_try_acquire_edata_neighbor will attempt to
+ * check neighbor edata's state to determine eligibility. This means
+ * under certain conditions, the metadata from an arena can be accessed
+ * w/o holding any locks from that arena. In order to guarantee safe
+ * memory access, the metadata and the underlying base allocator needs
+ * to be kept alive, until all pending accesses are done.
+ *
+ * 1) with opt_retain, the arena boundary implies the is_head state
+ * (tracked in the rtree leaf), and the coalesce flow will stop at the
+ * head state branch. Therefore no cross arena metadata access
+ * possible.
+ *
+ * 2) w/o opt_retain, the arena id needs to be read from the edata_t,
+ * meaning read only cross-arena metadata access is possible. The
+ * coalesce attempt will stop at the arena_id mismatch, and is always
+ * under one of the ecache locks. To allow safe passthrough of such
+ * metadata accesses, the loop below will iterate through all manual
+ * arenas' ecache locks. As all the metadata from this base allocator
+ * have been unlinked from the rtree, after going through all the
+ * relevant ecache locks, it's safe to say that a) pending accesses are
+ * all finished, and b) no new access will be generated.
*/
- extent_hooks_t *extent_hooks = extent_hooks_get(arena);
- extent_t *extent;
- while ((extent = extents_evict(tsdn, arena, &extent_hooks,
- &arena->extents_retained, 0)) != NULL) {
- extent_destroy_wrapper(tsdn, arena, &extent_hooks, extent);
+ if (opt_retain) {
+ return;
}
+ unsigned destroy_ind = base_ind_get(base_to_destroy);
+ assert(destroy_ind >= manual_arena_base);
+
+ tsdn_t *tsdn = tsd_tsdn(tsd);
+ malloc_mutex_t *delayed_mtx[ARENA_DESTROY_MAX_DELAYED_MTX];
+ unsigned n_delayed = 0, total = narenas_total_get();
+ for (unsigned i = 0; i < total; i++) {
+ if (i == destroy_ind) {
+ continue;
+ }
+ arena_t *arena = arena_get(tsdn, i, false);
+ if (arena == NULL) {
+ continue;
+ }
+ pac_t *pac = &arena->pa_shard.pac;
+ arena_prepare_base_deletion_sync(tsd, &pac->ecache_dirty.mtx,
+ delayed_mtx, &n_delayed);
+ arena_prepare_base_deletion_sync(tsd, &pac->ecache_muzzy.mtx,
+ delayed_mtx, &n_delayed);
+ arena_prepare_base_deletion_sync(tsd, &pac->ecache_retained.mtx,
+ delayed_mtx, &n_delayed);
+ }
+ arena_prepare_base_deletion_sync_finish(tsd, delayed_mtx, n_delayed);
}
+#undef ARENA_DESTROY_MAX_DELAYED_MTX
void
arena_destroy(tsd_t *tsd, arena_t *arena) {
@@ -1175,13 +801,10 @@ arena_destroy(tsd_t *tsd, arena_t *arena) {
/*
* No allocations have occurred since arena_reset() was called.
* Furthermore, the caller (arena_i_destroy_ctl()) purged all cached
- * extents, so only retained extents may remain.
+ * extents, so only retained extents may remain and it's safe to call
+ * pa_shard_destroy_retained.
*/
- assert(extents_npages_get(&arena->extents_dirty) == 0);
- assert(extents_npages_get(&arena->extents_muzzy) == 0);
-
- /* Deallocate retained memory. */
- arena_destroy_retained(tsd_tsdn(tsd), arena);
+ pa_shard_destroy(tsd_tsdn(tsd), &arena->pa_shard);
/*
* Remove the arena pointer from the arenas array. We rely on the fact
@@ -1197,316 +820,370 @@ arena_destroy(tsd_t *tsd, arena_t *arena) {
/*
* Destroy the base allocator, which manages all metadata ever mapped by
- * this arena.
+ * this arena. The prepare function will make sure no pending access to
+ * the metadata in this base anymore.
*/
+ arena_prepare_base_deletion(tsd, arena->base);
base_delete(tsd_tsdn(tsd), arena->base);
}
-static extent_t *
-arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, const bin_info_t *bin_info,
- szind_t szind) {
- extent_t *slab;
- bool zero, commit;
-
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
-
- zero = false;
- commit = true;
- slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL,
- bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit);
-
- if (config_stats && slab != NULL) {
- arena_stats_mapped_add(tsdn, &arena->stats,
- bin_info->slab_size);
- }
-
- return slab;
-}
-
-static extent_t *
+static edata_t *
arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard,
const bin_info_t *bin_info) {
+ bool deferred_work_generated = false;
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
- extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
- szind_t szind = sz_size2index(bin_info->reg_size);
- bool zero = false;
- bool commit = true;
- extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks,
- &arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true,
- binind, &zero, &commit);
- if (slab == NULL && arena_may_have_muzzy(arena)) {
- slab = extents_alloc(tsdn, arena, &extent_hooks,
- &arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE,
- true, binind, &zero, &commit);
+ bool guarded = san_slab_extent_decide_guard(tsdn,
+ arena_get_ehooks(arena));
+ edata_t *slab = pa_alloc(tsdn, &arena->pa_shard, bin_info->slab_size,
+ /* alignment */ PAGE, /* slab */ true, /* szind */ binind,
+ /* zero */ false, guarded, &deferred_work_generated);
+
+ if (deferred_work_generated) {
+ arena_handle_deferred_work(tsdn, arena);
}
+
if (slab == NULL) {
- slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks,
- bin_info, szind);
- if (slab == NULL) {
- return NULL;
- }
+ return NULL;
}
- assert(extent_slab_get(slab));
+ assert(edata_slab_get(slab));
/* Initialize slab internals. */
- arena_slab_data_t *slab_data = extent_slab_data_get(slab);
- extent_nfree_binshard_set(slab, bin_info->nregs, binshard);
+ slab_data_t *slab_data = edata_slab_data_get(slab);
+ edata_nfree_binshard_set(slab, bin_info->nregs, binshard);
bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false);
- arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE);
-
return slab;
}
-static extent_t *
-arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
- szind_t binind, unsigned binshard) {
- extent_t *slab;
- const bin_info_t *bin_info;
-
- /* Look for a usable slab. */
- slab = arena_bin_slabs_nonfull_tryget(bin);
- if (slab != NULL) {
- return slab;
- }
- /* No existing slabs have any space available. */
-
- bin_info = &bin_infos[binind];
-
- /* Allocate a new slab. */
- malloc_mutex_unlock(tsdn, &bin->lock);
- /******************************/
- slab = arena_slab_alloc(tsdn, arena, binind, binshard, bin_info);
- /********************************/
- malloc_mutex_lock(tsdn, &bin->lock);
- if (slab != NULL) {
- if (config_stats) {
- bin->stats.nslabs++;
- bin->stats.curslabs++;
- }
- return slab;
+/*
+ * Before attempting the _with_fresh_slab approaches below, the _no_fresh_slab
+ * variants (i.e. through slabcur and nonfull) must be tried first.
+ */
+static void
+arena_bin_refill_slabcur_with_fresh_slab(tsdn_t *tsdn, arena_t *arena,
+ bin_t *bin, szind_t binind, edata_t *fresh_slab) {
+ malloc_mutex_assert_owner(tsdn, &bin->lock);
+ /* Only called after slabcur and nonfull both failed. */
+ assert(bin->slabcur == NULL);
+ assert(edata_heap_first(&bin->slabs_nonfull) == NULL);
+ assert(fresh_slab != NULL);
+
+ /* A new slab from arena_slab_alloc() */
+ assert(edata_nfree_get(fresh_slab) == bin_infos[binind].nregs);
+ if (config_stats) {
+ bin->stats.nslabs++;
+ bin->stats.curslabs++;
}
+ bin->slabcur = fresh_slab;
+}
- /*
- * arena_slab_alloc() failed, but another thread may have made
- * sufficient memory available while this one dropped bin->lock above,
- * so search one more time.
- */
- slab = arena_bin_slabs_nonfull_tryget(bin);
- if (slab != NULL) {
- return slab;
- }
+/* Refill slabcur and then alloc using the fresh slab */
+static void *
+arena_bin_malloc_with_fresh_slab(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
+ szind_t binind, edata_t *fresh_slab) {
+ malloc_mutex_assert_owner(tsdn, &bin->lock);
+ arena_bin_refill_slabcur_with_fresh_slab(tsdn, arena, bin, binind,
+ fresh_slab);
- return NULL;
+ return arena_slab_reg_alloc(bin->slabcur, &bin_infos[binind]);
}
-/* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */
-static void *
-arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
- szind_t binind, unsigned binshard) {
- const bin_info_t *bin_info;
- extent_t *slab;
+static bool
+arena_bin_refill_slabcur_no_fresh_slab(tsdn_t *tsdn, arena_t *arena,
+ bin_t *bin) {
+ malloc_mutex_assert_owner(tsdn, &bin->lock);
+ /* Only called after arena_slab_reg_alloc[_batch] failed. */
+ assert(bin->slabcur == NULL || edata_nfree_get(bin->slabcur) == 0);
- bin_info = &bin_infos[binind];
- if (!arena_is_auto(arena) && bin->slabcur != NULL) {
- arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
- bin->slabcur = NULL;
- }
- slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind, binshard);
if (bin->slabcur != NULL) {
- /*
- * Another thread updated slabcur while this one ran without the
- * bin lock in arena_bin_nonfull_slab_get().
- */
- if (extent_nfree_get(bin->slabcur) > 0) {
- void *ret = arena_slab_reg_alloc(bin->slabcur,
- bin_info);
- if (slab != NULL) {
- /*
- * arena_slab_alloc() may have allocated slab,
- * or it may have been pulled from
- * slabs_nonfull. Therefore it is unsafe to
- * make any assumptions about how slab has
- * previously been used, and
- * arena_bin_lower_slab() must be called, as if
- * a region were just deallocated from the slab.
- */
- if (extent_nfree_get(slab) == bin_info->nregs) {
- arena_dalloc_bin_slab(tsdn, arena, slab,
- bin);
- } else {
- arena_bin_lower_slab(tsdn, arena, slab,
- bin);
- }
- }
- return ret;
- }
-
arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
- bin->slabcur = NULL;
- }
-
- if (slab == NULL) {
- return NULL;
}
- bin->slabcur = slab;
- assert(extent_nfree_get(bin->slabcur) > 0);
+ /* Look for a usable slab. */
+ bin->slabcur = arena_bin_slabs_nonfull_tryget(bin);
+ assert(bin->slabcur == NULL || edata_nfree_get(bin->slabcur) > 0);
- return arena_slab_reg_alloc(slab, bin_info);
+ return (bin->slabcur == NULL);
}
-/* Choose a bin shard and return the locked bin. */
bin_t *
-arena_bin_choose_lock(tsdn_t *tsdn, arena_t *arena, szind_t binind,
- unsigned *binshard) {
- bin_t *bin;
+arena_bin_choose(tsdn_t *tsdn, arena_t *arena, szind_t binind,
+ unsigned *binshard_p) {
+ unsigned binshard;
if (tsdn_null(tsdn) || tsd_arena_get(tsdn_tsd(tsdn)) == NULL) {
- *binshard = 0;
+ binshard = 0;
} else {
- *binshard = tsd_binshardsp_get(tsdn_tsd(tsdn))->binshard[binind];
+ binshard = tsd_binshardsp_get(tsdn_tsd(tsdn))->binshard[binind];
}
- assert(*binshard < bin_infos[binind].n_shards);
- bin = &arena->bins[binind].bin_shards[*binshard];
- malloc_mutex_lock(tsdn, &bin->lock);
-
- return bin;
+ assert(binshard < bin_infos[binind].n_shards);
+ if (binshard_p != NULL) {
+ *binshard_p = binshard;
+ }
+ return arena_get_bin(arena, binind, binshard);
}
void
-arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
- cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) {
- unsigned i, nfill, cnt;
+arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena,
+ cache_bin_t *cache_bin, cache_bin_info_t *cache_bin_info, szind_t binind,
+ const unsigned nfill) {
+ assert(cache_bin_ncached_get_local(cache_bin, cache_bin_info) == 0);
+
+ const bin_info_t *bin_info = &bin_infos[binind];
+
+ CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nfill);
+ cache_bin_init_ptr_array_for_fill(cache_bin, cache_bin_info, &ptrs,
+ nfill);
+ /*
+ * Bin-local resources are used first: 1) bin->slabcur, and 2) nonfull
+ * slabs. After both are exhausted, new slabs will be allocated through
+ * arena_slab_alloc().
+ *
+ * Bin lock is only taken / released right before / after the while(...)
+ * refill loop, with new slab allocation (which has its own locking)
+ * kept outside of the loop. This setup facilitates flat combining, at
+ * the cost of the nested loop (through goto label_refill).
+ *
+ * To optimize for cases with contention and limited resources
+ * (e.g. hugepage-backed or non-overcommit arenas), each fill-iteration
+ * gets one chance of slab_alloc, and a retry of bin local resources
+ * after the slab allocation (regardless if slab_alloc failed, because
+ * the bin lock is dropped during the slab allocation).
+ *
+ * In other words, new slab allocation is allowed, as long as there was
+ * progress since the previous slab_alloc. This is tracked with
+ * made_progress below, initialized to true to jump start the first
+ * iteration.
+ *
+ * In other words (again), the loop will only terminate early (i.e. stop
+ * with filled < nfill) after going through the three steps: a) bin
+ * local exhausted, b) unlock and slab_alloc returns null, c) re-lock
+ * and bin local fails again.
+ */
+ bool made_progress = true;
+ edata_t *fresh_slab = NULL;
+ bool alloc_and_retry = false;
+ unsigned filled = 0;
+ unsigned binshard;
+ bin_t *bin = arena_bin_choose(tsdn, arena, binind, &binshard);
+
+label_refill:
+ malloc_mutex_lock(tsdn, &bin->lock);
- assert(tbin->ncached == 0);
+ while (filled < nfill) {
+ /* Try batch-fill from slabcur first. */
+ edata_t *slabcur = bin->slabcur;
+ if (slabcur != NULL && edata_nfree_get(slabcur) > 0) {
+ unsigned tofill = nfill - filled;
+ unsigned nfree = edata_nfree_get(slabcur);
+ unsigned cnt = tofill < nfree ? tofill : nfree;
+
+ arena_slab_reg_alloc_batch(slabcur, bin_info, cnt,
+ &ptrs.ptr[filled]);
+ made_progress = true;
+ filled += cnt;
+ continue;
+ }
+ /* Next try refilling slabcur from nonfull slabs. */
+ if (!arena_bin_refill_slabcur_no_fresh_slab(tsdn, arena, bin)) {
+ assert(bin->slabcur != NULL);
+ continue;
+ }
+
+ /* Then see if a new slab was reserved already. */
+ if (fresh_slab != NULL) {
+ arena_bin_refill_slabcur_with_fresh_slab(tsdn, arena,
+ bin, binind, fresh_slab);
+ assert(bin->slabcur != NULL);
+ fresh_slab = NULL;
+ continue;
+ }
+
+ /* Try slab_alloc if made progress (or never did slab_alloc). */
+ if (made_progress) {
+ assert(bin->slabcur == NULL);
+ assert(fresh_slab == NULL);
+ alloc_and_retry = true;
+ /* Alloc a new slab then come back. */
+ break;
+ }
+
+ /* OOM. */
+
+ assert(fresh_slab == NULL);
+ assert(!alloc_and_retry);
+ break;
+ } /* while (filled < nfill) loop. */
- if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) {
- prof_idump(tsdn);
+ if (config_stats && !alloc_and_retry) {
+ bin->stats.nmalloc += filled;
+ bin->stats.nrequests += cache_bin->tstats.nrequests;
+ bin->stats.curregs += filled;
+ bin->stats.nfills++;
+ cache_bin->tstats.nrequests = 0;
+ }
+
+ malloc_mutex_unlock(tsdn, &bin->lock);
+
+ if (alloc_and_retry) {
+ assert(fresh_slab == NULL);
+ assert(filled < nfill);
+ assert(made_progress);
+
+ fresh_slab = arena_slab_alloc(tsdn, arena, binind, binshard,
+ bin_info);
+ /* fresh_slab NULL case handled in the for loop. */
+
+ alloc_and_retry = false;
+ made_progress = false;
+ goto label_refill;
}
+ assert(filled == nfill || (fresh_slab == NULL && !made_progress));
+ /* Release if allocated but not used. */
+ if (fresh_slab != NULL) {
+ assert(edata_nfree_get(fresh_slab) == bin_info->nregs);
+ arena_slab_dalloc(tsdn, arena, fresh_slab);
+ fresh_slab = NULL;
+ }
+
+ cache_bin_finish_fill(cache_bin, cache_bin_info, &ptrs, filled);
+ arena_decay_tick(tsdn, arena);
+}
+
+size_t
+arena_fill_small_fresh(tsdn_t *tsdn, arena_t *arena, szind_t binind,
+ void **ptrs, size_t nfill, bool zero) {
+ assert(binind < SC_NBINS);
+ const bin_info_t *bin_info = &bin_infos[binind];
+ const size_t nregs = bin_info->nregs;
+ assert(nregs > 0);
+ const size_t usize = bin_info->reg_size;
+
+ const bool manual_arena = !arena_is_auto(arena);
unsigned binshard;
- bin_t *bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard);
-
- for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
- tcache->lg_fill_div[binind]); i < nfill; i += cnt) {
- extent_t *slab;
- if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) >
- 0) {
- unsigned tofill = nfill - i;
- cnt = tofill < extent_nfree_get(slab) ?
- tofill : extent_nfree_get(slab);
- arena_slab_reg_alloc_batch(
- slab, &bin_infos[binind], cnt,
- tbin->avail - nfill + i);
- } else {
- cnt = 1;
- void *ptr = arena_bin_malloc_hard(tsdn, arena, bin,
- binind, binshard);
- /*
- * OOM. tbin->avail isn't yet filled down to its first
- * element, so the successful allocations (if any) must
- * be moved just before tbin->avail before bailing out.
- */
- if (ptr == NULL) {
- if (i > 0) {
- memmove(tbin->avail - i,
- tbin->avail - nfill,
- i * sizeof(void *));
- }
- break;
- }
- /* Insert such that low regions get used first. */
- *(tbin->avail - nfill + i) = ptr;
+ bin_t *bin = arena_bin_choose(tsdn, arena, binind, &binshard);
+
+ size_t nslab = 0;
+ size_t filled = 0;
+ edata_t *slab = NULL;
+ edata_list_active_t fulls;
+ edata_list_active_init(&fulls);
+
+ while (filled < nfill && (slab = arena_slab_alloc(tsdn, arena, binind,
+ binshard, bin_info)) != NULL) {
+ assert((size_t)edata_nfree_get(slab) == nregs);
+ ++nslab;
+ size_t batch = nfill - filled;
+ if (batch > nregs) {
+ batch = nregs;
+ }
+ assert(batch > 0);
+ arena_slab_reg_alloc_batch(slab, bin_info, (unsigned)batch,
+ &ptrs[filled]);
+ assert(edata_addr_get(slab) == ptrs[filled]);
+ if (zero) {
+ memset(ptrs[filled], 0, batch * usize);
}
- if (config_fill && unlikely(opt_junk_alloc)) {
- for (unsigned j = 0; j < cnt; j++) {
- void* ptr = *(tbin->avail - nfill + i + j);
- arena_alloc_junk_small(ptr, &bin_infos[binind],
- true);
+ filled += batch;
+ if (batch == nregs) {
+ if (manual_arena) {
+ edata_list_active_append(&fulls, slab);
}
+ slab = NULL;
}
}
+
+ malloc_mutex_lock(tsdn, &bin->lock);
+ /*
+ * Only the last slab can be non-empty, and the last slab is non-empty
+ * iff slab != NULL.
+ */
+ if (slab != NULL) {
+ arena_bin_lower_slab(tsdn, arena, slab, bin);
+ }
+ if (manual_arena) {
+ edata_list_active_concat(&bin->slabs_full, &fulls);
+ }
+ assert(edata_list_active_empty(&fulls));
if (config_stats) {
- bin->stats.nmalloc += i;
- bin->stats.nrequests += tbin->tstats.nrequests;
- bin->stats.curregs += i;
- bin->stats.nfills++;
- tbin->tstats.nrequests = 0;
+ bin->stats.nslabs += nslab;
+ bin->stats.curslabs += nslab;
+ bin->stats.nmalloc += filled;
+ bin->stats.nrequests += filled;
+ bin->stats.curregs += filled;
}
malloc_mutex_unlock(tsdn, &bin->lock);
- tbin->ncached = i;
+
arena_decay_tick(tsdn, arena);
+ return filled;
}
-void
-arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool zero) {
- if (!zero) {
- memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size);
+/*
+ * Without allocating a new slab, try arena_slab_reg_alloc() and re-fill
+ * bin->slabcur if necessary.
+ */
+static void *
+arena_bin_malloc_no_fresh_slab(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
+ szind_t binind) {
+ malloc_mutex_assert_owner(tsdn, &bin->lock);
+ if (bin->slabcur == NULL || edata_nfree_get(bin->slabcur) == 0) {
+ if (arena_bin_refill_slabcur_no_fresh_slab(tsdn, arena, bin)) {
+ return NULL;
+ }
}
-}
-static void
-arena_dalloc_junk_small_impl(void *ptr, const bin_info_t *bin_info) {
- memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size);
+ assert(bin->slabcur != NULL && edata_nfree_get(bin->slabcur) > 0);
+ return arena_slab_reg_alloc(bin->slabcur, &bin_infos[binind]);
}
-arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small =
- arena_dalloc_junk_small_impl;
static void *
arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) {
- void *ret;
- bin_t *bin;
- size_t usize;
- extent_t *slab;
-
assert(binind < SC_NBINS);
- usize = sz_index2size(binind);
+ const bin_info_t *bin_info = &bin_infos[binind];
+ size_t usize = sz_index2size(binind);
unsigned binshard;
- bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard);
-
- if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) {
- ret = arena_slab_reg_alloc(slab, &bin_infos[binind]);
- } else {
- ret = arena_bin_malloc_hard(tsdn, arena, bin, binind, binshard);
- }
+ bin_t *bin = arena_bin_choose(tsdn, arena, binind, &binshard);
+ malloc_mutex_lock(tsdn, &bin->lock);
+ edata_t *fresh_slab = NULL;
+ void *ret = arena_bin_malloc_no_fresh_slab(tsdn, arena, bin, binind);
if (ret == NULL) {
malloc_mutex_unlock(tsdn, &bin->lock);
- return NULL;
+ /******************************/
+ fresh_slab = arena_slab_alloc(tsdn, arena, binind, binshard,
+ bin_info);
+ /********************************/
+ malloc_mutex_lock(tsdn, &bin->lock);
+ /* Retry since the lock was dropped. */
+ ret = arena_bin_malloc_no_fresh_slab(tsdn, arena, bin, binind);
+ if (ret == NULL) {
+ if (fresh_slab == NULL) {
+ /* OOM */
+ malloc_mutex_unlock(tsdn, &bin->lock);
+ return NULL;
+ }
+ ret = arena_bin_malloc_with_fresh_slab(tsdn, arena, bin,
+ binind, fresh_slab);
+ fresh_slab = NULL;
+ }
}
-
if (config_stats) {
bin->stats.nmalloc++;
bin->stats.nrequests++;
bin->stats.curregs++;
}
malloc_mutex_unlock(tsdn, &bin->lock);
- if (config_prof && arena_prof_accum(tsdn, arena, usize)) {
- prof_idump(tsdn);
- }
- if (!zero) {
- if (config_fill) {
- if (unlikely(opt_junk_alloc)) {
- arena_alloc_junk_small(ret,
- &bin_infos[binind], false);
- } else if (unlikely(opt_zero)) {
- memset(ret, 0, usize);
- }
- }
- } else {
- if (config_fill && unlikely(opt_junk_alloc)) {
- arena_alloc_junk_small(ret, &bin_infos[binind],
- true);
- }
+ if (fresh_slab != NULL) {
+ arena_slab_dalloc(tsdn, arena, fresh_slab);
+ }
+ if (zero) {
memset(ret, 0, usize);
}
-
arena_decay_tick(tsdn, arena);
+
return ret;
}
@@ -1533,10 +1210,17 @@ arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool zero, tcache_t *tcache) {
void *ret;
- if (usize <= SC_SMALL_MAXCLASS
- && (alignment < PAGE
- || (alignment == PAGE && (usize & PAGE_MASK) == 0))) {
+ if (usize <= SC_SMALL_MAXCLASS) {
/* Small; alignment doesn't require special slab placement. */
+
+ /* usize should be a result of sz_sa2u() */
+ assert((usize & (alignment - 1)) == 0);
+
+ /*
+ * Small usize can't come from an alignment larger than a page.
+ */
+ assert(alignment <= PAGE);
+
ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize),
zero, tcache, true);
} else {
@@ -1560,33 +1244,22 @@ arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) {
safety_check_set_redzone(ptr, usize, SC_LARGE_MINCLASS);
}
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
-
- extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true);
- arena_t *arena = extent_arena_get(extent);
+ edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
szind_t szind = sz_size2index(usize);
- extent_szind_set(extent, szind);
- rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
- szind, false);
-
- prof_accum_cancel(tsdn, &arena->prof_accum, usize);
+ edata_szind_set(edata, szind);
+ emap_remap(tsdn, &arena_emap_global, edata, szind, /* slab */ false);
assert(isalloc(tsdn, ptr) == usize);
}
static size_t
-arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) {
+arena_prof_demote(tsdn_t *tsdn, edata_t *edata, const void *ptr) {
cassert(config_prof);
assert(ptr != NULL);
- extent_szind_set(extent, SC_NBINS);
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
- rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr,
- SC_NBINS, false);
+ edata_szind_set(edata, SC_NBINS);
+ emap_remap(tsdn, &arena_emap_global, edata, SC_NBINS, /* slab */ false);
assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS);
@@ -1599,9 +1272,9 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
cassert(config_prof);
assert(opt_prof);
- extent_t *extent = iealloc(tsdn, ptr);
- size_t usize = extent_usize_get(extent);
- size_t bumped_usize = arena_prof_demote(tsdn, extent, ptr);
+ edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
+ size_t usize = edata_usize_get(edata);
+ size_t bumped_usize = arena_prof_demote(tsdn, edata, ptr);
if (config_opt_safety_checks && usize < SC_LARGE_MINCLASS) {
/*
* Currently, we only do redzoning for small sampled
@@ -1614,17 +1287,17 @@ arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
sz_size2index(bumped_usize), slow_path);
} else {
- large_dalloc(tsdn, extent);
+ large_dalloc(tsdn, edata);
}
}
static void
-arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) {
+arena_dissociate_bin_slab(arena_t *arena, edata_t *slab, bin_t *bin) {
/* Dissociate slab from bin. */
if (slab == bin->slabcur) {
bin->slabcur = NULL;
} else {
- szind_t binind = extent_szind_get(slab);
+ szind_t binind = edata_szind_get(slab);
const bin_info_t *bin_info = &bin_infos[binind];
/*
@@ -1641,24 +1314,9 @@ arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) {
}
static void
-arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
+arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, edata_t *slab,
bin_t *bin) {
- assert(slab != bin->slabcur);
-
- malloc_mutex_unlock(tsdn, &bin->lock);
- /******************************/
- arena_slab_dalloc(tsdn, arena, slab);
- /****************************/
- malloc_mutex_lock(tsdn, &bin->lock);
- if (config_stats) {
- bin->stats.curslabs--;
- }
-}
-
-static void
-arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
- bin_t *bin) {
- assert(extent_nfree_get(slab) > 0);
+ assert(edata_nfree_get(slab) > 0);
/*
* Make sure that if bin->slabcur is non-NULL, it refers to the
@@ -1666,9 +1324,9 @@ arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
* than proactively keeping it pointing at the oldest/lowest non-full
* slab.
*/
- if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) {
+ if (bin->slabcur != NULL && edata_snad_comp(bin->slabcur, slab) > 0) {
/* Switch slabcur. */
- if (extent_nfree_get(bin->slabcur) > 0) {
+ if (edata_nfree_get(bin->slabcur) > 0) {
arena_bin_slabs_nonfull_insert(bin, bin->slabcur);
} else {
arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
@@ -1683,56 +1341,54 @@ arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab,
}
static void
-arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
- szind_t binind, extent_t *slab, void *ptr, bool junked) {
- arena_slab_data_t *slab_data = extent_slab_data_get(slab);
- const bin_info_t *bin_info = &bin_infos[binind];
-
- if (!junked && config_fill && unlikely(opt_junk_free)) {
- arena_dalloc_junk_small(ptr, bin_info);
- }
-
- arena_slab_reg_dalloc(slab, slab_data, ptr);
- unsigned nfree = extent_nfree_get(slab);
- if (nfree == bin_info->nregs) {
- arena_dissociate_bin_slab(arena, slab, bin);
- arena_dalloc_bin_slab(tsdn, arena, slab, bin);
- } else if (nfree == 1 && slab != bin->slabcur) {
- arena_bin_slabs_full_remove(arena, bin, slab);
- arena_bin_lower_slab(tsdn, arena, slab, bin);
- }
+arena_dalloc_bin_slab_prepare(tsdn_t *tsdn, edata_t *slab, bin_t *bin) {
+ malloc_mutex_assert_owner(tsdn, &bin->lock);
+ assert(slab != bin->slabcur);
if (config_stats) {
- bin->stats.ndalloc++;
- bin->stats.curregs--;
+ bin->stats.curslabs--;
}
}
void
-arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin,
- szind_t binind, extent_t *extent, void *ptr) {
- arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr,
- true);
+arena_dalloc_bin_locked_handle_newly_empty(tsdn_t *tsdn, arena_t *arena,
+ edata_t *slab, bin_t *bin) {
+ arena_dissociate_bin_slab(arena, slab, bin);
+ arena_dalloc_bin_slab_prepare(tsdn, slab, bin);
+}
+
+void
+arena_dalloc_bin_locked_handle_newly_nonempty(tsdn_t *tsdn, arena_t *arena,
+ edata_t *slab, bin_t *bin) {
+ arena_bin_slabs_full_remove(arena, bin, slab);
+ arena_bin_lower_slab(tsdn, arena, slab, bin);
}
static void
-arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) {
- szind_t binind = extent_szind_get(extent);
- unsigned binshard = extent_binshard_get(extent);
- bin_t *bin = &arena->bins[binind].bin_shards[binshard];
+arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, edata_t *edata, void *ptr) {
+ szind_t binind = edata_szind_get(edata);
+ unsigned binshard = edata_binshard_get(edata);
+ bin_t *bin = arena_get_bin(arena, binind, binshard);
malloc_mutex_lock(tsdn, &bin->lock);
- arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr,
- false);
+ arena_dalloc_bin_locked_info_t info;
+ arena_dalloc_bin_locked_begin(&info, binind);
+ bool ret = arena_dalloc_bin_locked_step(tsdn, arena, bin,
+ &info, binind, edata, ptr);
+ arena_dalloc_bin_locked_finish(tsdn, arena, bin, &info);
malloc_mutex_unlock(tsdn, &bin->lock);
+
+ if (ret) {
+ arena_slab_dalloc(tsdn, arena, edata);
+ }
}
void
arena_dalloc_small(tsdn_t *tsdn, void *ptr) {
- extent_t *extent = iealloc(tsdn, ptr);
- arena_t *arena = extent_arena_get(extent);
+ edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
+ arena_t *arena = arena_get_from_edata(edata);
- arena_dalloc_bin(tsdn, arena, extent, ptr);
+ arena_dalloc_bin(tsdn, arena, edata, ptr);
arena_decay_tick(tsdn, arena);
}
@@ -1743,7 +1399,7 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
/* Calls with non-zero extra had to clamp extra. */
assert(extra == 0 || size + extra <= SC_LARGE_MAXCLASS);
- extent_t *extent = iealloc(tsdn, ptr);
+ edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
if (unlikely(size > SC_LARGE_MAXCLASS)) {
ret = true;
goto done;
@@ -1766,18 +1422,19 @@ arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
goto done;
}
- arena_decay_tick(tsdn, extent_arena_get(extent));
+ arena_t *arena = arena_get_from_edata(edata);
+ arena_decay_tick(tsdn, arena);
ret = false;
} else if (oldsize >= SC_LARGE_MINCLASS
&& usize_max >= SC_LARGE_MINCLASS) {
- ret = large_ralloc_no_move(tsdn, extent, usize_min, usize_max,
+ ret = large_ralloc_no_move(tsdn, edata, usize_min, usize_max,
zero);
} else {
ret = true;
}
done:
- assert(extent == iealloc(tsdn, ptr));
- *newsize = extent_usize_get(extent);
+ assert(edata == emap_edata_lookup(tsdn, &arena_emap_global, ptr));
+ *newsize = edata_usize_get(edata);
return ret;
}
@@ -1800,7 +1457,7 @@ void *
arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
size_t size, size_t alignment, bool zero, tcache_t *tcache,
hook_ralloc_args_t *hook_args) {
- size_t usize = sz_s2u(size);
+ size_t usize = alignment == 0 ? sz_s2u(size) : sz_sa2u(size, alignment);
if (unlikely(usize == 0 || size > SC_LARGE_MAXCLASS)) {
return NULL;
}
@@ -1850,6 +1507,29 @@ arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
return ret;
}
+ehooks_t *
+arena_get_ehooks(arena_t *arena) {
+ return base_ehooks_get(arena->base);
+}
+
+extent_hooks_t *
+arena_set_extent_hooks(tsd_t *tsd, arena_t *arena,
+ extent_hooks_t *extent_hooks) {
+ background_thread_info_t *info;
+ if (have_background_thread) {
+ info = arena_background_thread_info_get(arena);
+ malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
+ }
+ /* No using the HPA now that we have the custom hooks. */
+ pa_shard_disable_hpa(tsd_tsdn(tsd), &arena->pa_shard);
+ extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks);
+ if (have_background_thread) {
+ malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
+ }
+
+ return ret;
+}
+
dss_prec_t
arena_dss_prec_get(arena_t *arena) {
return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE);
@@ -1871,7 +1551,7 @@ arena_dirty_decay_ms_default_get(void) {
bool
arena_dirty_decay_ms_default_set(ssize_t decay_ms) {
- if (!arena_decay_ms_valid(decay_ms)) {
+ if (!decay_ms_valid(decay_ms)) {
return true;
}
atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED);
@@ -1885,7 +1565,7 @@ arena_muzzy_decay_ms_default_get(void) {
bool
arena_muzzy_decay_ms_default_set(ssize_t decay_ms) {
- if (!arena_decay_ms_valid(decay_ms)) {
+ if (!decay_ms_valid(decay_ms)) {
return true;
}
atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED);
@@ -1896,26 +1576,8 @@ bool
arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit,
size_t *new_limit) {
assert(opt_retain);
-
- pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0);
- if (new_limit != NULL) {
- size_t limit = *new_limit;
- /* Grow no more than the new limit. */
- if ((new_ind = sz_psz2ind(limit + 1) - 1) >= SC_NPSIZES) {
- return true;
- }
- }
-
- malloc_mutex_lock(tsd_tsdn(tsd), &arena->extent_grow_mtx);
- if (old_limit != NULL) {
- *old_limit = sz_pind2sz(arena->retain_grow_limit);
- }
- if (new_limit != NULL) {
- arena->retain_grow_limit = new_ind;
- }
- malloc_mutex_unlock(tsd_tsdn(tsd), &arena->extent_grow_mtx);
-
- return false;
+ return pac_retain_grow_limit_get_set(tsd_tsdn(tsd),
+ &arena->pa_shard.pac, old_limit, new_limit);
}
unsigned
@@ -1933,13 +1595,8 @@ arena_nthreads_dec(arena_t *arena, bool internal) {
atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
}
-size_t
-arena_extent_sn_next(arena_t *arena) {
- return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED);
-}
-
arena_t *
-arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
+arena_new(tsdn_t *tsdn, unsigned ind, const arena_config_t *config) {
arena_t *arena;
base_t *base;
unsigned i;
@@ -1947,16 +1604,13 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
if (ind == 0) {
base = b0get();
} else {
- base = base_new(tsdn, ind, extent_hooks);
+ base = base_new(tsdn, ind, config->extent_hooks,
+ config->metadata_use_hooks);
if (base == NULL) {
return NULL;
}
}
- unsigned nbins_total = 0;
- for (i = 0; i < SC_NBINS; i++) {
- nbins_total += bin_infos[i].n_shards;
- }
size_t arena_size = sizeof(arena_t) + sizeof(bin_t) * nbins_total;
arena = (arena_t *)base_alloc(tsdn, base, arena_size, CACHELINE);
if (arena == NULL) {
@@ -1980,110 +1634,56 @@ arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
}
}
- if (config_prof) {
- if (prof_accum_init(tsdn, &arena->prof_accum)) {
- goto label_error;
- }
- }
-
- if (config_cache_oblivious) {
- /*
- * A nondeterministic seed based on the address of arena reduces
- * the likelihood of lockstep non-uniform cache index
- * utilization among identical concurrent processes, but at the
- * cost of test repeatability. For debug builds, instead use a
- * deterministic seed.
- */
- atomic_store_zu(&arena->offset_state, config_debug ? ind :
- (size_t)(uintptr_t)arena, ATOMIC_RELAXED);
- }
-
- atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED);
-
atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(),
ATOMIC_RELAXED);
- atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED);
-
- extent_list_init(&arena->large);
+ edata_list_active_init(&arena->large);
if (malloc_mutex_init(&arena->large_mtx, "arena_large",
WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) {
goto label_error;
}
- /*
- * Delay coalescing for dirty extents despite the disruptive effect on
- * memory layout for best-fit extent allocation, since cached extents
- * are likely to be reused soon after deallocation, and the cost of
- * merging/splitting extents is non-trivial.
- */
- if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty,
- true)) {
- goto label_error;
- }
- /*
- * Coalesce muzzy extents immediately, because operations on them are in
- * the critical path much less often than for dirty extents.
- */
- if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy,
- false)) {
- goto label_error;
- }
- /*
- * Coalesce retained extents immediately, in part because they will
- * never be evicted (and therefore there's no opportunity for delayed
- * coalescing), but also because operations on retained extents are not
- * in the critical path.
- */
- if (extents_init(tsdn, &arena->extents_retained, extent_state_retained,
- false)) {
- goto label_error;
- }
-
- if (arena_decay_init(&arena->decay_dirty,
- arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) {
- goto label_error;
- }
- if (arena_decay_init(&arena->decay_muzzy,
- arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) {
- goto label_error;
- }
-
- arena->extent_grow_next = sz_psz2ind(HUGEPAGE);
- arena->retain_grow_limit = sz_psz2ind(SC_LARGE_MAXCLASS);
- if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow",
- WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
- goto label_error;
- }
-
- extent_avail_new(&arena->extent_avail);
- if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail",
- WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) {
+ nstime_t cur_time;
+ nstime_init_update(&cur_time);
+ if (pa_shard_init(tsdn, &arena->pa_shard, &arena_pa_central_global,
+ &arena_emap_global, base, ind, &arena->stats.pa_shard_stats,
+ LOCKEDINT_MTX(arena->stats.mtx), &cur_time, oversize_threshold,
+ arena_dirty_decay_ms_default_get(),
+ arena_muzzy_decay_ms_default_get())) {
goto label_error;
}
/* Initialize bins. */
- uintptr_t bin_addr = (uintptr_t)arena + sizeof(arena_t);
atomic_store_u(&arena->binshard_next, 0, ATOMIC_RELEASE);
- for (i = 0; i < SC_NBINS; i++) {
- unsigned nshards = bin_infos[i].n_shards;
- arena->bins[i].bin_shards = (bin_t *)bin_addr;
- bin_addr += nshards * sizeof(bin_t);
- for (unsigned j = 0; j < nshards; j++) {
- bool err = bin_init(&arena->bins[i].bin_shards[j]);
- if (err) {
- goto label_error;
- }
+ for (i = 0; i < nbins_total; i++) {
+ bool err = bin_init(&arena->bins[i]);
+ if (err) {
+ goto label_error;
}
}
- assert(bin_addr == (uintptr_t)arena + arena_size);
arena->base = base;
/* Set arena before creating background threads. */
arena_set(ind, arena);
+ arena->ind = ind;
- nstime_init(&arena->create_time, 0);
- nstime_update(&arena->create_time);
+ nstime_init_update(&arena->create_time);
+
+ /*
+ * We turn on the HPA if set to. There are two exceptions:
+ * - Custom extent hooks (we should only return memory allocated from
+ * them in that case).
+ * - Arena 0 initialization. In this case, we're mid-bootstrapping, and
+ * so arena_hpa_global is not yet initialized.
+ */
+ if (opt_hpa && ehooks_are_default(base_ehooks_get(base)) && ind != 0) {
+ hpa_shard_opts_t hpa_shard_opts = opt_hpa_opts;
+ hpa_shard_opts.deferral_allowed = background_thread_enabled();
+ if (pa_shard_enable_hpa(tsdn, &arena->pa_shard,
+ &hpa_shard_opts, &opt_hpa_sec_opts)) {
+ goto label_error;
+ }
+ }
/* We don't support reentrancy for arena 0 bootstrapping. */
if (ind != 0) {
@@ -2129,10 +1729,12 @@ arena_choose_huge(tsd_t *tsd) {
* expected for huge allocations.
*/
if (arena_dirty_decay_ms_default_get() > 0) {
- arena_dirty_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0);
+ arena_decay_ms_set(tsd_tsdn(tsd), huge_arena,
+ extent_state_dirty, 0);
}
if (arena_muzzy_decay_ms_default_get() > 0) {
- arena_muzzy_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0);
+ arena_decay_ms_set(tsd_tsdn(tsd), huge_arena,
+ extent_state_muzzy, 0);
}
}
@@ -2167,8 +1769,8 @@ arena_is_huge(unsigned arena_ind) {
return (arena_ind == huge_arena_ind);
}
-void
-arena_boot(sc_data_t *sc_data) {
+bool
+arena_boot(sc_data_t *sc_data, base_t *base, bool hpa) {
arena_dirty_decay_ms_default_set(opt_dirty_decay_ms);
arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms);
for (unsigned i = 0; i < SC_NBINS; i++) {
@@ -2176,12 +1778,20 @@ arena_boot(sc_data_t *sc_data) {
div_init(&arena_binind_div_info[i],
(1U << sc->lg_base) + (sc->ndelta << sc->lg_delta));
}
+
+ uint32_t cur_offset = (uint32_t)offsetof(arena_t, bins);
+ for (szind_t i = 0; i < SC_NBINS; i++) {
+ arena_bin_offsets[i] = cur_offset;
+ nbins_total += bin_infos[i].n_shards;
+ cur_offset += (uint32_t)(bin_infos[i].n_shards * sizeof(bin_t));
+ }
+ return pa_central_init(&arena_pa_central_global, base, hpa,
+ &hpa_hooks_default);
}
void
arena_prefork0(tsdn_t *tsdn, arena_t *arena) {
- malloc_mutex_prefork(tsdn, &arena->decay_dirty.mtx);
- malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx);
+ pa_shard_prefork0(tsdn, &arena->pa_shard);
}
void
@@ -2193,59 +1803,50 @@ arena_prefork1(tsdn_t *tsdn, arena_t *arena) {
void
arena_prefork2(tsdn_t *tsdn, arena_t *arena) {
- malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx);
+ pa_shard_prefork2(tsdn, &arena->pa_shard);
}
void
arena_prefork3(tsdn_t *tsdn, arena_t *arena) {
- extents_prefork(tsdn, &arena->extents_dirty);
- extents_prefork(tsdn, &arena->extents_muzzy);
- extents_prefork(tsdn, &arena->extents_retained);
+ pa_shard_prefork3(tsdn, &arena->pa_shard);
}
void
arena_prefork4(tsdn_t *tsdn, arena_t *arena) {
- malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx);
+ pa_shard_prefork4(tsdn, &arena->pa_shard);
}
void
arena_prefork5(tsdn_t *tsdn, arena_t *arena) {
- base_prefork(tsdn, arena->base);
+ pa_shard_prefork5(tsdn, &arena->pa_shard);
}
void
arena_prefork6(tsdn_t *tsdn, arena_t *arena) {
- malloc_mutex_prefork(tsdn, &arena->large_mtx);
+ base_prefork(tsdn, arena->base);
}
void
arena_prefork7(tsdn_t *tsdn, arena_t *arena) {
- for (unsigned i = 0; i < SC_NBINS; i++) {
- for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
- bin_prefork(tsdn, &arena->bins[i].bin_shards[j]);
- }
+ malloc_mutex_prefork(tsdn, &arena->large_mtx);
+}
+
+void
+arena_prefork8(tsdn_t *tsdn, arena_t *arena) {
+ for (unsigned i = 0; i < nbins_total; i++) {
+ bin_prefork(tsdn, &arena->bins[i]);
}
}
void
arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
- unsigned i;
-
- for (i = 0; i < SC_NBINS; i++) {
- for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
- bin_postfork_parent(tsdn,
- &arena->bins[i].bin_shards[j]);
- }
+ for (unsigned i = 0; i < nbins_total; i++) {
+ bin_postfork_parent(tsdn, &arena->bins[i]);
}
+
malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
base_postfork_parent(tsdn, arena->base);
- malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx);
- extents_postfork_parent(tsdn, &arena->extents_dirty);
- extents_postfork_parent(tsdn, &arena->extents_muzzy);
- extents_postfork_parent(tsdn, &arena->extents_retained);
- malloc_mutex_postfork_parent(tsdn, &arena->extent_grow_mtx);
- malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx);
- malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx);
+ pa_shard_postfork_parent(tsdn, &arena->pa_shard);
if (config_stats) {
malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx);
}
@@ -2253,8 +1854,6 @@ arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) {
void
arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
- unsigned i;
-
atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) {
@@ -2266,32 +1865,26 @@ arena_postfork_child(tsdn_t *tsdn, arena_t *arena) {
if (config_stats) {
ql_new(&arena->tcache_ql);
ql_new(&arena->cache_bin_array_descriptor_ql);
- tcache_t *tcache = tcache_get(tsdn_tsd(tsdn));
- if (tcache != NULL && tcache->arena == arena) {
- ql_elm_new(tcache, link);
- ql_tail_insert(&arena->tcache_ql, tcache, link);
+ tcache_slow_t *tcache_slow = tcache_slow_get(tsdn_tsd(tsdn));
+ if (tcache_slow != NULL && tcache_slow->arena == arena) {
+ tcache_t *tcache = tcache_slow->tcache;
+ ql_elm_new(tcache_slow, link);
+ ql_tail_insert(&arena->tcache_ql, tcache_slow, link);
cache_bin_array_descriptor_init(
- &tcache->cache_bin_array_descriptor,
- tcache->bins_small, tcache->bins_large);
+ &tcache_slow->cache_bin_array_descriptor,
+ tcache->bins);
ql_tail_insert(&arena->cache_bin_array_descriptor_ql,
- &tcache->cache_bin_array_descriptor, link);
+ &tcache_slow->cache_bin_array_descriptor, link);
}
}
- for (i = 0; i < SC_NBINS; i++) {
- for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
- bin_postfork_child(tsdn, &arena->bins[i].bin_shards[j]);
- }
+ for (unsigned i = 0; i < nbins_total; i++) {
+ bin_postfork_child(tsdn, &arena->bins[i]);
}
+
malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
base_postfork_child(tsdn, arena->base);
- malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx);
- extents_postfork_child(tsdn, &arena->extents_dirty);
- extents_postfork_child(tsdn, &arena->extents_muzzy);
- extents_postfork_child(tsdn, &arena->extents_retained);
- malloc_mutex_postfork_child(tsdn, &arena->extent_grow_mtx);
- malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx);
- malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx);
+ pa_shard_postfork_child(tsdn, &arena->pa_shard);
if (config_stats) {
malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx);
}
diff --git a/contrib/jemalloc/src/background_thread.c b/contrib/jemalloc/src/background_thread.c
index 57b9b256bb9b..3bb8d26cd3d6 100644
--- a/contrib/jemalloc/src/background_thread.c
+++ b/contrib/jemalloc/src/background_thread.c
@@ -1,4 +1,3 @@
-#define JEMALLOC_BACKGROUND_THREAD_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
@@ -54,8 +53,9 @@ pthread_create_wrapper(pthread_t *__restrict thread, const pthread_attr_t *attr,
bool background_thread_create(tsd_t *tsd, unsigned arena_ind) NOT_REACHED
bool background_threads_enable(tsd_t *tsd) NOT_REACHED
bool background_threads_disable(tsd_t *tsd) NOT_REACHED
-void background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
- arena_decay_t *decay, size_t npages_new) NOT_REACHED
+bool background_thread_is_started(background_thread_info_t *info) NOT_REACHED
+void background_thread_wakeup_early(background_thread_info_t *info,
+ nstime_t *remaining_sleep) NOT_REACHED
void background_thread_prefork0(tsdn_t *tsdn) NOT_REACHED
void background_thread_prefork1(tsdn_t *tsdn) NOT_REACHED
void background_thread_postfork_parent(tsdn_t *tsdn) NOT_REACHED
@@ -74,7 +74,7 @@ background_thread_info_init(tsdn_t *tsdn, background_thread_info_t *info) {
info->npages_to_purge_new = 0;
if (config_stats) {
info->tot_n_runs = 0;
- nstime_init(&info->tot_sleep_time, 0);
+ nstime_init_zero(&info->tot_sleep_time);
}
}
@@ -82,136 +82,40 @@ static inline bool
set_current_thread_affinity(int cpu) {
#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
cpu_set_t cpuset;
+#else
+# ifndef __NetBSD__
+ cpuset_t cpuset;
+# else
+ cpuset_t *cpuset;
+# endif
+#endif
+
+#ifndef __NetBSD__
CPU_ZERO(&cpuset);
CPU_SET(cpu, &cpuset);
- int ret = sched_setaffinity(0, sizeof(cpu_set_t), &cpuset);
+#else
+ cpuset = cpuset_create();
+#endif
- return (ret != 0);
+#if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
+ return (sched_setaffinity(0, sizeof(cpu_set_t), &cpuset) != 0);
#else
- return false;
+# ifndef __NetBSD__
+ int ret = pthread_setaffinity_np(pthread_self(), sizeof(cpuset_t),
+ &cpuset);
+# else
+ int ret = pthread_setaffinity_np(pthread_self(), cpuset_size(cpuset),
+ cpuset);
+ cpuset_destroy(cpuset);
+# endif
+ return ret != 0;
#endif
}
-/* Threshold for determining when to wake up the background thread. */
-#define BACKGROUND_THREAD_NPAGES_THRESHOLD UINT64_C(1024)
#define BILLION UINT64_C(1000000000)
/* Minimal sleep interval 100 ms. */
#define BACKGROUND_THREAD_MIN_INTERVAL_NS (BILLION / 10)
-static inline size_t
-decay_npurge_after_interval(arena_decay_t *decay, size_t interval) {
- size_t i;
- uint64_t sum = 0;
- for (i = 0; i < interval; i++) {
- sum += decay->backlog[i] * h_steps[i];
- }
- for (; i < SMOOTHSTEP_NSTEPS; i++) {
- sum += decay->backlog[i] * (h_steps[i] - h_steps[i - interval]);
- }
-
- return (size_t)(sum >> SMOOTHSTEP_BFP);
-}
-
-static uint64_t
-arena_decay_compute_purge_interval_impl(tsdn_t *tsdn, arena_decay_t *decay,
- extents_t *extents) {
- if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
- /* Use minimal interval if decay is contended. */
- return BACKGROUND_THREAD_MIN_INTERVAL_NS;
- }
-
- uint64_t interval;
- ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
- if (decay_time <= 0) {
- /* Purging is eagerly done or disabled currently. */
- interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
- goto label_done;
- }
-
- uint64_t decay_interval_ns = nstime_ns(&decay->interval);
- assert(decay_interval_ns > 0);
- size_t npages = extents_npages_get(extents);
- if (npages == 0) {
- unsigned i;
- for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
- if (decay->backlog[i] > 0) {
- break;
- }
- }
- if (i == SMOOTHSTEP_NSTEPS) {
- /* No dirty pages recorded. Sleep indefinitely. */
- interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
- goto label_done;
- }
- }
- if (npages <= BACKGROUND_THREAD_NPAGES_THRESHOLD) {
- /* Use max interval. */
- interval = decay_interval_ns * SMOOTHSTEP_NSTEPS;
- goto label_done;
- }
-
- size_t lb = BACKGROUND_THREAD_MIN_INTERVAL_NS / decay_interval_ns;
- size_t ub = SMOOTHSTEP_NSTEPS;
- /* Minimal 2 intervals to ensure reaching next epoch deadline. */
- lb = (lb < 2) ? 2 : lb;
- if ((decay_interval_ns * ub <= BACKGROUND_THREAD_MIN_INTERVAL_NS) ||
- (lb + 2 > ub)) {
- interval = BACKGROUND_THREAD_MIN_INTERVAL_NS;
- goto label_done;
- }
-
- assert(lb + 2 <= ub);
- size_t npurge_lb, npurge_ub;
- npurge_lb = decay_npurge_after_interval(decay, lb);
- if (npurge_lb > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
- interval = decay_interval_ns * lb;
- goto label_done;
- }
- npurge_ub = decay_npurge_after_interval(decay, ub);
- if (npurge_ub < BACKGROUND_THREAD_NPAGES_THRESHOLD) {
- interval = decay_interval_ns * ub;
- goto label_done;
- }
-
- unsigned n_search = 0;
- size_t target, npurge;
- while ((npurge_lb + BACKGROUND_THREAD_NPAGES_THRESHOLD < npurge_ub)
- && (lb + 2 < ub)) {
- target = (lb + ub) / 2;
- npurge = decay_npurge_after_interval(decay, target);
- if (npurge > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
- ub = target;
- npurge_ub = npurge;
- } else {
- lb = target;
- npurge_lb = npurge;
- }
- assert(n_search++ < lg_floor(SMOOTHSTEP_NSTEPS) + 1);
- }
- interval = decay_interval_ns * (ub + lb) / 2;
-label_done:
- interval = (interval < BACKGROUND_THREAD_MIN_INTERVAL_NS) ?
- BACKGROUND_THREAD_MIN_INTERVAL_NS : interval;
- malloc_mutex_unlock(tsdn, &decay->mtx);
-
- return interval;
-}
-
-/* Compute purge interval for background threads. */
-static uint64_t
-arena_decay_compute_purge_interval(tsdn_t *tsdn, arena_t *arena) {
- uint64_t i1, i2;
- i1 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_dirty,
- &arena->extents_dirty);
- if (i1 == BACKGROUND_THREAD_MIN_INTERVAL_NS) {
- return i1;
- }
- i2 = arena_decay_compute_purge_interval_impl(tsdn, &arena->decay_muzzy,
- &arena->extents_muzzy);
-
- return i1 < i2 ? i1 : i2;
-}
-
static void
background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
uint64_t interval) {
@@ -228,7 +132,8 @@ background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
int ret;
if (interval == BACKGROUND_THREAD_INDEFINITE_SLEEP) {
- assert(background_thread_indefinite_sleep(info));
+ background_thread_wakeup_time_set(tsdn, info,
+ BACKGROUND_THREAD_INDEFINITE_SLEEP);
ret = pthread_cond_wait(&info->cond, &info->mtx.lock);
assert(ret == 0);
} else {
@@ -236,8 +141,7 @@ background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
interval <= BACKGROUND_THREAD_INDEFINITE_SLEEP);
/* We need malloc clock (can be different from tv). */
nstime_t next_wakeup;
- nstime_init(&next_wakeup, 0);
- nstime_update(&next_wakeup);
+ nstime_init_update(&next_wakeup);
nstime_iadd(&next_wakeup, interval);
assert(nstime_ns(&next_wakeup) <
BACKGROUND_THREAD_INDEFINITE_SLEEP);
@@ -254,8 +158,6 @@ background_thread_sleep(tsdn_t *tsdn, background_thread_info_t *info,
assert(!background_thread_indefinite_sleep(info));
ret = pthread_cond_timedwait(&info->cond, &info->mtx.lock, &ts);
assert(ret == ETIMEDOUT || ret == 0);
- background_thread_wakeup_time_set(tsdn, info,
- BACKGROUND_THREAD_INDEFINITE_SLEEP);
}
if (config_stats) {
gettimeofday(&tv, NULL);
@@ -283,28 +185,48 @@ background_thread_pause_check(tsdn_t *tsdn, background_thread_info_t *info) {
}
static inline void
-background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info, unsigned ind) {
- uint64_t min_interval = BACKGROUND_THREAD_INDEFINITE_SLEEP;
+background_work_sleep_once(tsdn_t *tsdn, background_thread_info_t *info,
+ unsigned ind) {
+ uint64_t ns_until_deferred = BACKGROUND_THREAD_DEFERRED_MAX;
unsigned narenas = narenas_total_get();
+ bool slept_indefinitely = background_thread_indefinite_sleep(info);
for (unsigned i = ind; i < narenas; i += max_background_threads) {
arena_t *arena = arena_get(tsdn, i, false);
if (!arena) {
continue;
}
- arena_decay(tsdn, arena, true, false);
- if (min_interval == BACKGROUND_THREAD_MIN_INTERVAL_NS) {
+ /*
+ * If thread was woken up from the indefinite sleep, don't
+ * do the work instantly, but rather check when the deferred
+ * work that caused this thread to wake up is scheduled for.
+ */
+ if (!slept_indefinitely) {
+ arena_do_deferred_work(tsdn, arena);
+ }
+ if (ns_until_deferred <= BACKGROUND_THREAD_MIN_INTERVAL_NS) {
/* Min interval will be used. */
continue;
}
- uint64_t interval = arena_decay_compute_purge_interval(tsdn,
- arena);
- assert(interval >= BACKGROUND_THREAD_MIN_INTERVAL_NS);
- if (min_interval > interval) {
- min_interval = interval;
+ uint64_t ns_arena_deferred = pa_shard_time_until_deferred_work(
+ tsdn, &arena->pa_shard);
+ if (ns_arena_deferred < ns_until_deferred) {
+ ns_until_deferred = ns_arena_deferred;
}
}
- background_thread_sleep(tsdn, info, min_interval);
+
+ uint64_t sleep_ns;
+ if (ns_until_deferred == BACKGROUND_THREAD_DEFERRED_MAX) {
+ sleep_ns = BACKGROUND_THREAD_INDEFINITE_SLEEP;
+ } else {
+ sleep_ns =
+ (ns_until_deferred < BACKGROUND_THREAD_MIN_INTERVAL_NS)
+ ? BACKGROUND_THREAD_MIN_INTERVAL_NS
+ : ns_until_deferred;
+
+ }
+
+ background_thread_sleep(tsdn, info, sleep_ns);
}
static bool
@@ -508,7 +430,7 @@ background_thread_entry(void *ind_arg) {
assert(thread_ind < max_background_threads);
#ifdef JEMALLOC_HAVE_PTHREAD_SETNAME_NP
pthread_setname_np(pthread_self(), "jemalloc_bg_thd");
-#elif defined(__FreeBSD__)
+#elif defined(__FreeBSD__) || defined(__DragonFly__)
pthread_set_name_np(pthread_self(), "jemalloc_bg_thd");
#endif
if (opt_percpu_arena != percpu_arena_disabled) {
@@ -608,16 +530,16 @@ background_threads_enable(tsd_t *tsd) {
malloc_mutex_assert_owner(tsd_tsdn(tsd), &background_thread_lock);
VARIABLE_ARRAY(bool, marked, max_background_threads);
- unsigned i, nmarked;
- for (i = 0; i < max_background_threads; i++) {
+ unsigned nmarked;
+ for (unsigned i = 0; i < max_background_threads; i++) {
marked[i] = false;
}
nmarked = 0;
/* Thread 0 is required and created at the end. */
marked[0] = true;
/* Mark the threads we need to create for thread 0. */
- unsigned n = narenas_total_get();
- for (i = 1; i < n; i++) {
+ unsigned narenas = narenas_total_get();
+ for (unsigned i = 1; i < narenas; i++) {
if (marked[i % max_background_threads] ||
arena_get(tsd_tsdn(tsd), i, false) == NULL) {
continue;
@@ -634,7 +556,18 @@ background_threads_enable(tsd_t *tsd) {
}
}
- return background_thread_create_locked(tsd, 0);
+ bool err = background_thread_create_locked(tsd, 0);
+ if (err) {
+ return true;
+ }
+ for (unsigned i = 0; i < narenas; i++) {
+ arena_t *arena = arena_get(tsd_tsdn(tsd), i, false);
+ if (arena != NULL) {
+ pa_shard_set_deferral_allowed(tsd_tsdn(tsd),
+ &arena->pa_shard, true);
+ }
+ }
+ return false;
}
bool
@@ -648,92 +581,36 @@ background_threads_disable(tsd_t *tsd) {
return true;
}
assert(n_background_threads == 0);
+ unsigned narenas = narenas_total_get();
+ for (unsigned i = 0; i < narenas; i++) {
+ arena_t *arena = arena_get(tsd_tsdn(tsd), i, false);
+ if (arena != NULL) {
+ pa_shard_set_deferral_allowed(tsd_tsdn(tsd),
+ &arena->pa_shard, false);
+ }
+ }
return false;
}
-/* Check if we need to signal the background thread early. */
+bool
+background_thread_is_started(background_thread_info_t *info) {
+ return info->state == background_thread_started;
+}
+
void
-background_thread_interval_check(tsdn_t *tsdn, arena_t *arena,
- arena_decay_t *decay, size_t npages_new) {
- background_thread_info_t *info = arena_background_thread_info_get(
- arena);
- if (malloc_mutex_trylock(tsdn, &info->mtx)) {
- /*
- * Background thread may hold the mutex for a long period of
- * time. We'd like to avoid the variance on application
- * threads. So keep this non-blocking, and leave the work to a
- * future epoch.
- */
+background_thread_wakeup_early(background_thread_info_t *info,
+ nstime_t *remaining_sleep) {
+ /*
+ * This is an optimization to increase batching. At this point
+ * we know that background thread wakes up soon, so the time to cache
+ * the just freed memory is bounded and low.
+ */
+ if (remaining_sleep != NULL && nstime_ns(remaining_sleep) <
+ BACKGROUND_THREAD_MIN_INTERVAL_NS) {
return;
}
-
- if (info->state != background_thread_started) {
- goto label_done;
- }
- if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
- goto label_done;
- }
-
- ssize_t decay_time = atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED);
- if (decay_time <= 0) {
- /* Purging is eagerly done or disabled currently. */
- goto label_done_unlock2;
- }
- uint64_t decay_interval_ns = nstime_ns(&decay->interval);
- assert(decay_interval_ns > 0);
-
- nstime_t diff;
- nstime_init(&diff, background_thread_wakeup_time_get(info));
- if (nstime_compare(&diff, &decay->epoch) <= 0) {
- goto label_done_unlock2;
- }
- nstime_subtract(&diff, &decay->epoch);
- if (nstime_ns(&diff) < BACKGROUND_THREAD_MIN_INTERVAL_NS) {
- goto label_done_unlock2;
- }
-
- if (npages_new > 0) {
- size_t n_epoch = (size_t)(nstime_ns(&diff) / decay_interval_ns);
- /*
- * Compute how many new pages we would need to purge by the next
- * wakeup, which is used to determine if we should signal the
- * background thread.
- */
- uint64_t npurge_new;
- if (n_epoch >= SMOOTHSTEP_NSTEPS) {
- npurge_new = npages_new;
- } else {
- uint64_t h_steps_max = h_steps[SMOOTHSTEP_NSTEPS - 1];
- assert(h_steps_max >=
- h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
- npurge_new = npages_new * (h_steps_max -
- h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
- npurge_new >>= SMOOTHSTEP_BFP;
- }
- info->npages_to_purge_new += npurge_new;
- }
-
- bool should_signal;
- if (info->npages_to_purge_new > BACKGROUND_THREAD_NPAGES_THRESHOLD) {
- should_signal = true;
- } else if (unlikely(background_thread_indefinite_sleep(info)) &&
- (extents_npages_get(&arena->extents_dirty) > 0 ||
- extents_npages_get(&arena->extents_muzzy) > 0 ||
- info->npages_to_purge_new > 0)) {
- should_signal = true;
- } else {
- should_signal = false;
- }
-
- if (should_signal) {
- info->npages_to_purge_new = 0;
- pthread_cond_signal(&info->cond);
- }
-label_done_unlock2:
- malloc_mutex_unlock(tsdn, &decay->mtx);
-label_done:
- malloc_mutex_unlock(tsdn, &info->mtx);
+ pthread_cond_signal(&info->cond);
}
void
@@ -794,9 +671,11 @@ background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
return true;
}
- stats->num_threads = n_background_threads;
+ nstime_init_zero(&stats->run_interval);
+ memset(&stats->max_counter_per_bg_thd, 0, sizeof(mutex_prof_data_t));
+
uint64_t num_runs = 0;
- nstime_init(&stats->run_interval, 0);
+ stats->num_threads = n_background_threads;
for (unsigned i = 0; i < max_background_threads; i++) {
background_thread_info_t *info = &background_thread_info[i];
if (malloc_mutex_trylock(tsdn, &info->mtx)) {
@@ -809,6 +688,8 @@ background_thread_stats_read(tsdn_t *tsdn, background_thread_stats_t *stats) {
if (info->state != background_thread_stopped) {
num_runs += info->tot_n_runs;
nstime_add(&stats->run_interval, &info->tot_sleep_time);
+ malloc_mutex_prof_max_update(tsdn,
+ &stats->max_counter_per_bg_thd, &info->mtx);
}
malloc_mutex_unlock(tsdn, &info->mtx);
}
@@ -892,7 +773,7 @@ background_thread_boot0(void) {
}
bool
-background_thread_boot1(tsdn_t *tsdn) {
+background_thread_boot1(tsdn_t *tsdn, base_t *base) {
#ifdef JEMALLOC_BACKGROUND_THREAD
assert(have_background_thread);
assert(narenas_total_get() > 0);
@@ -911,7 +792,7 @@ background_thread_boot1(tsdn_t *tsdn) {
}
background_thread_info = (background_thread_info_t *)base_alloc(tsdn,
- b0get(), opt_max_background_threads *
+ base, opt_max_background_threads *
sizeof(background_thread_info_t), CACHELINE);
if (background_thread_info == NULL) {
return true;
diff --git a/contrib/jemalloc/src/base.c b/contrib/jemalloc/src/base.c
index f3c61661a20a..7f4d67564b74 100644
--- a/contrib/jemalloc/src/base.c
+++ b/contrib/jemalloc/src/base.c
@@ -1,4 +1,3 @@
-#define JEMALLOC_BASE_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
@@ -7,6 +6,15 @@
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/sz.h"
+/*
+ * In auto mode, arenas switch to huge pages for the base allocator on the
+ * second base block. a0 switches to thp on the 5th block (after 20 megabytes
+ * of metadata), since more metadata (e.g. rtree nodes) come from a0's base.
+ */
+
+#define BASE_AUTO_THP_THRESHOLD 2
+#define BASE_AUTO_THP_THRESHOLD_A0 5
+
/******************************************************************************/
/* Data. */
@@ -29,7 +37,7 @@ metadata_thp_madvise(void) {
}
static void *
-base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size) {
+base_map(tsdn_t *tsdn, ehooks_t *ehooks, unsigned ind, size_t size) {
void *addr;
bool zero = true;
bool commit = true;
@@ -37,22 +45,21 @@ base_map(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, size_t size)
/* Use huge page sizes and alignment regardless of opt_metadata_thp. */
assert(size == HUGEPAGE_CEILING(size));
size_t alignment = HUGEPAGE;
- if (extent_hooks == &extent_hooks_default) {
+ if (ehooks_are_default(ehooks)) {
addr = extent_alloc_mmap(NULL, size, alignment, &zero, &commit);
+ if (have_madvise_huge && addr) {
+ pages_set_thp_state(addr, size);
+ }
} else {
- /* No arena context as we are creating new arenas. */
- tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
- pre_reentrancy(tsd, NULL);
- addr = extent_hooks->alloc(extent_hooks, NULL, size, alignment,
- &zero, &commit, ind);
- post_reentrancy(tsd);
+ addr = ehooks_alloc(tsdn, ehooks, NULL, size, alignment, &zero,
+ &commit);
}
return addr;
}
static void
-base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
+base_unmap(tsdn_t *tsdn, ehooks_t *ehooks, unsigned ind, void *addr,
size_t size) {
/*
* Cascade through dalloc, decommit, purge_forced, and purge_lazy,
@@ -64,7 +71,7 @@ base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
* may in fact want the end state of all associated virtual memory to be
* in some consistent-but-allocated state.
*/
- if (extent_hooks == &extent_hooks_default) {
+ if (ehooks_are_default(ehooks)) {
if (!extent_dalloc_mmap(addr, size)) {
goto label_done;
}
@@ -80,31 +87,19 @@ base_unmap(tsdn_t *tsdn, extent_hooks_t *extent_hooks, unsigned ind, void *addr,
/* Nothing worked. This should never happen. */
not_reached();
} else {
- tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
- pre_reentrancy(tsd, NULL);
- if (extent_hooks->dalloc != NULL &&
- !extent_hooks->dalloc(extent_hooks, addr, size, true,
- ind)) {
- goto label_post_reentrancy;
+ if (!ehooks_dalloc(tsdn, ehooks, addr, size, true)) {
+ goto label_done;
}
- if (extent_hooks->decommit != NULL &&
- !extent_hooks->decommit(extent_hooks, addr, size, 0, size,
- ind)) {
- goto label_post_reentrancy;
+ if (!ehooks_decommit(tsdn, ehooks, addr, size, 0, size)) {
+ goto label_done;
}
- if (extent_hooks->purge_forced != NULL &&
- !extent_hooks->purge_forced(extent_hooks, addr, size, 0,
- size, ind)) {
- goto label_post_reentrancy;
+ if (!ehooks_purge_forced(tsdn, ehooks, addr, size, 0, size)) {
+ goto label_done;
}
- if (extent_hooks->purge_lazy != NULL &&
- !extent_hooks->purge_lazy(extent_hooks, addr, size, 0, size,
- ind)) {
- goto label_post_reentrancy;
+ if (!ehooks_purge_lazy(tsdn, ehooks, addr, size, 0, size)) {
+ goto label_done;
}
/* Nothing worked. That's the application's problem. */
- label_post_reentrancy:
- post_reentrancy(tsd);
}
label_done:
if (metadata_thp_madvise()) {
@@ -116,14 +111,14 @@ label_done:
}
static void
-base_extent_init(size_t *extent_sn_next, extent_t *extent, void *addr,
+base_edata_init(size_t *extent_sn_next, edata_t *edata, void *addr,
size_t size) {
size_t sn;
sn = *extent_sn_next;
(*extent_sn_next)++;
- extent_binit(extent, addr, size, sn);
+ edata_binit(edata, addr, size, sn);
}
static size_t
@@ -169,7 +164,7 @@ base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
pages_huge(block, block->size);
if (config_stats) {
base->n_thp += HUGEPAGE_CEILING(block->size -
- extent_bsize_get(&block->extent)) >> LG_HUGEPAGE;
+ edata_bsize_get(&block->edata)) >> LG_HUGEPAGE;
}
block = block->next;
assert(block == NULL || (base_ind_get(base) == 0));
@@ -177,34 +172,34 @@ base_auto_thp_switch(tsdn_t *tsdn, base_t *base) {
}
static void *
-base_extent_bump_alloc_helper(extent_t *extent, size_t *gap_size, size_t size,
+base_extent_bump_alloc_helper(edata_t *edata, size_t *gap_size, size_t size,
size_t alignment) {
void *ret;
assert(alignment == ALIGNMENT_CEILING(alignment, QUANTUM));
assert(size == ALIGNMENT_CEILING(size, alignment));
- *gap_size = ALIGNMENT_CEILING((uintptr_t)extent_addr_get(extent),
- alignment) - (uintptr_t)extent_addr_get(extent);
- ret = (void *)((uintptr_t)extent_addr_get(extent) + *gap_size);
- assert(extent_bsize_get(extent) >= *gap_size + size);
- extent_binit(extent, (void *)((uintptr_t)extent_addr_get(extent) +
- *gap_size + size), extent_bsize_get(extent) - *gap_size - size,
- extent_sn_get(extent));
+ *gap_size = ALIGNMENT_CEILING((uintptr_t)edata_addr_get(edata),
+ alignment) - (uintptr_t)edata_addr_get(edata);
+ ret = (void *)((uintptr_t)edata_addr_get(edata) + *gap_size);
+ assert(edata_bsize_get(edata) >= *gap_size + size);
+ edata_binit(edata, (void *)((uintptr_t)edata_addr_get(edata) +
+ *gap_size + size), edata_bsize_get(edata) - *gap_size - size,
+ edata_sn_get(edata));
return ret;
}
static void
-base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size,
+base_extent_bump_alloc_post(base_t *base, edata_t *edata, size_t gap_size,
void *addr, size_t size) {
- if (extent_bsize_get(extent) > 0) {
+ if (edata_bsize_get(edata) > 0) {
/*
* Compute the index for the largest size class that does not
* exceed extent's size.
*/
szind_t index_floor =
- sz_size2index(extent_bsize_get(extent) + 1) - 1;
- extent_heap_insert(&base->avail[index_floor], extent);
+ sz_size2index(edata_bsize_get(edata) + 1) - 1;
+ edata_heap_insert(&base->avail[index_floor], edata);
}
if (config_stats) {
@@ -229,13 +224,13 @@ base_extent_bump_alloc_post(base_t *base, extent_t *extent, size_t gap_size,
}
static void *
-base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size,
+base_extent_bump_alloc(base_t *base, edata_t *edata, size_t size,
size_t alignment) {
void *ret;
size_t gap_size;
- ret = base_extent_bump_alloc_helper(extent, &gap_size, size, alignment);
- base_extent_bump_alloc_post(base, extent, gap_size, ret, size);
+ ret = base_extent_bump_alloc_helper(edata, &gap_size, size, alignment);
+ base_extent_bump_alloc_post(base, edata, gap_size, ret, size);
return ret;
}
@@ -245,8 +240,8 @@ base_extent_bump_alloc(base_t *base, extent_t *extent, size_t size,
* On success a pointer to the initialized base_block_t header is returned.
*/
static base_block_t *
-base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
- unsigned ind, pszind_t *pind_last, size_t *extent_sn_next, size_t size,
+base_block_alloc(tsdn_t *tsdn, base_t *base, ehooks_t *ehooks, unsigned ind,
+ pszind_t *pind_last, size_t *extent_sn_next, size_t size,
size_t alignment) {
alignment = ALIGNMENT_CEILING(alignment, QUANTUM);
size_t usize = ALIGNMENT_CEILING(size, alignment);
@@ -267,7 +262,7 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
size_t next_block_size = HUGEPAGE_CEILING(sz_pind2sz(pind_next));
size_t block_size = (min_block_size > next_block_size) ? min_block_size
: next_block_size;
- base_block_t *block = (base_block_t *)base_map(tsdn, extent_hooks, ind,
+ base_block_t *block = (base_block_t *)base_map(tsdn, ehooks, ind,
block_size);
if (block == NULL) {
return NULL;
@@ -295,7 +290,7 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
block->size = block_size;
block->next = NULL;
assert(block_size >= header_size);
- base_extent_init(extent_sn_next, &block->extent,
+ base_edata_init(extent_sn_next, &block->edata,
(void *)((uintptr_t)block + header_size), block_size - header_size);
return block;
}
@@ -304,17 +299,17 @@ base_block_alloc(tsdn_t *tsdn, base_t *base, extent_hooks_t *extent_hooks,
* Allocate an extent that is at least as large as specified size, with
* specified alignment.
*/
-static extent_t *
+static edata_t *
base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
malloc_mutex_assert_owner(tsdn, &base->mtx);
- extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
+ ehooks_t *ehooks = base_ehooks_get_for_metadata(base);
/*
* Drop mutex during base_block_alloc(), because an extent hook will be
* called.
*/
malloc_mutex_unlock(tsdn, &base->mtx);
- base_block_t *block = base_block_alloc(tsdn, base, extent_hooks,
+ base_block_t *block = base_block_alloc(tsdn, base, ehooks,
base_ind_get(base), &base->pind_last, &base->extent_sn_next, size,
alignment);
malloc_mutex_lock(tsdn, &base->mtx);
@@ -338,7 +333,7 @@ base_extent_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
assert(base->resident <= base->mapped);
assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
}
- return &block->extent;
+ return &block->edata;
}
base_t *
@@ -347,10 +342,22 @@ b0get(void) {
}
base_t *
-base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
+base_new(tsdn_t *tsdn, unsigned ind, const extent_hooks_t *extent_hooks,
+ bool metadata_use_hooks) {
pszind_t pind_last = 0;
size_t extent_sn_next = 0;
- base_block_t *block = base_block_alloc(tsdn, NULL, extent_hooks, ind,
+
+ /*
+ * The base will contain the ehooks eventually, but it itself is
+ * allocated using them. So we use some stack ehooks to bootstrap its
+ * memory, and then initialize the ehooks within the base_t.
+ */
+ ehooks_t fake_ehooks;
+ ehooks_init(&fake_ehooks, metadata_use_hooks ?
+ (extent_hooks_t *)extent_hooks :
+ (extent_hooks_t *)&ehooks_default_extent_hooks, ind);
+
+ base_block_t *block = base_block_alloc(tsdn, NULL, &fake_ehooks, ind,
&pind_last, &extent_sn_next, sizeof(base_t), QUANTUM);
if (block == NULL) {
return NULL;
@@ -359,13 +366,15 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
size_t gap_size;
size_t base_alignment = CACHELINE;
size_t base_size = ALIGNMENT_CEILING(sizeof(base_t), base_alignment);
- base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->extent,
+ base_t *base = (base_t *)base_extent_bump_alloc_helper(&block->edata,
&gap_size, base_size, base_alignment);
- base->ind = ind;
- atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELAXED);
+ ehooks_init(&base->ehooks, (extent_hooks_t *)extent_hooks, ind);
+ ehooks_init(&base->ehooks_base, metadata_use_hooks ?
+ (extent_hooks_t *)extent_hooks :
+ (extent_hooks_t *)&ehooks_default_extent_hooks, ind);
if (malloc_mutex_init(&base->mtx, "base", WITNESS_RANK_BASE,
malloc_mutex_rank_exclusive)) {
- base_unmap(tsdn, extent_hooks, ind, block, block->size);
+ base_unmap(tsdn, &fake_ehooks, ind, block, block->size);
return NULL;
}
base->pind_last = pind_last;
@@ -373,7 +382,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
base->blocks = block;
base->auto_thp_switched = false;
for (szind_t i = 0; i < SC_NSIZES; i++) {
- extent_heap_new(&base->avail[i]);
+ edata_heap_new(&base->avail[i]);
}
if (config_stats) {
base->allocated = sizeof(base_block_t);
@@ -386,7 +395,7 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
assert(base->resident <= base->mapped);
assert(base->n_thp << LG_HUGEPAGE <= base->mapped);
}
- base_extent_bump_alloc_post(base, &block->extent, gap_size, base,
+ base_extent_bump_alloc_post(base, &block->edata, gap_size, base,
base_size);
return base;
@@ -394,26 +403,31 @@ base_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
void
base_delete(tsdn_t *tsdn, base_t *base) {
- extent_hooks_t *extent_hooks = base_extent_hooks_get(base);
+ ehooks_t *ehooks = base_ehooks_get_for_metadata(base);
base_block_t *next = base->blocks;
do {
base_block_t *block = next;
next = block->next;
- base_unmap(tsdn, extent_hooks, base_ind_get(base), block,
+ base_unmap(tsdn, ehooks, base_ind_get(base), block,
block->size);
} while (next != NULL);
}
-extent_hooks_t *
-base_extent_hooks_get(base_t *base) {
- return (extent_hooks_t *)atomic_load_p(&base->extent_hooks,
- ATOMIC_ACQUIRE);
+ehooks_t *
+base_ehooks_get(base_t *base) {
+ return &base->ehooks;
+}
+
+ehooks_t *
+base_ehooks_get_for_metadata(base_t *base) {
+ return &base->ehooks_base;
}
extent_hooks_t *
base_extent_hooks_set(base_t *base, extent_hooks_t *extent_hooks) {
- extent_hooks_t *old_extent_hooks = base_extent_hooks_get(base);
- atomic_store_p(&base->extent_hooks, extent_hooks, ATOMIC_RELEASE);
+ extent_hooks_t *old_extent_hooks =
+ ehooks_get_extent_hooks_ptr(&base->ehooks);
+ ehooks_init(&base->ehooks, extent_hooks, ehooks_ind_get(&base->ehooks));
return old_extent_hooks;
}
@@ -424,28 +438,28 @@ base_alloc_impl(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment,
size_t usize = ALIGNMENT_CEILING(size, alignment);
size_t asize = usize + alignment - QUANTUM;
- extent_t *extent = NULL;
+ edata_t *edata = NULL;
malloc_mutex_lock(tsdn, &base->mtx);
for (szind_t i = sz_size2index(asize); i < SC_NSIZES; i++) {
- extent = extent_heap_remove_first(&base->avail[i]);
- if (extent != NULL) {
+ edata = edata_heap_remove_first(&base->avail[i]);
+ if (edata != NULL) {
/* Use existing space. */
break;
}
}
- if (extent == NULL) {
+ if (edata == NULL) {
/* Try to allocate more space. */
- extent = base_extent_alloc(tsdn, base, usize, alignment);
+ edata = base_extent_alloc(tsdn, base, usize, alignment);
}
void *ret;
- if (extent == NULL) {
+ if (edata == NULL) {
ret = NULL;
goto label_return;
}
- ret = base_extent_bump_alloc(base, extent, usize, alignment);
+ ret = base_extent_bump_alloc(base, edata, usize, alignment);
if (esn != NULL) {
- *esn = extent_sn_get(extent);
+ *esn = (size_t)edata_sn_get(edata);
}
label_return:
malloc_mutex_unlock(tsdn, &base->mtx);
@@ -465,16 +479,16 @@ base_alloc(tsdn_t *tsdn, base_t *base, size_t size, size_t alignment) {
return base_alloc_impl(tsdn, base, size, alignment, NULL);
}
-extent_t *
-base_alloc_extent(tsdn_t *tsdn, base_t *base) {
+edata_t *
+base_alloc_edata(tsdn_t *tsdn, base_t *base) {
size_t esn;
- extent_t *extent = base_alloc_impl(tsdn, base, sizeof(extent_t),
- CACHELINE, &esn);
- if (extent == NULL) {
+ edata_t *edata = base_alloc_impl(tsdn, base, sizeof(edata_t),
+ EDATA_ALIGNMENT, &esn);
+ if (edata == NULL) {
return NULL;
}
- extent_esn_set(extent, esn);
- return extent;
+ edata_esn_set(edata, esn);
+ return edata;
}
void
@@ -509,6 +523,7 @@ base_postfork_child(tsdn_t *tsdn, base_t *base) {
bool
base_boot(tsdn_t *tsdn) {
- b0 = base_new(tsdn, 0, (extent_hooks_t *)&extent_hooks_default);
+ b0 = base_new(tsdn, 0, (extent_hooks_t *)&ehooks_default_extent_hooks,
+ /* metadata_use_hooks */ true);
return (b0 == NULL);
}
diff --git a/contrib/jemalloc/src/bin.c b/contrib/jemalloc/src/bin.c
index bca6b12c352b..fa20458705ac 100644
--- a/contrib/jemalloc/src/bin.c
+++ b/contrib/jemalloc/src/bin.c
@@ -6,26 +6,6 @@
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/witness.h"
-bin_info_t bin_infos[SC_NBINS];
-
-static void
-bin_infos_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
- bin_info_t bin_infos[SC_NBINS]) {
- for (unsigned i = 0; i < SC_NBINS; i++) {
- bin_info_t *bin_info = &bin_infos[i];
- sc_t *sc = &sc_data->sc[i];
- bin_info->reg_size = ((size_t)1U << sc->lg_base)
- + ((size_t)sc->ndelta << sc->lg_delta);
- bin_info->slab_size = (sc->pgs << LG_PAGE);
- bin_info->nregs =
- (uint32_t)(bin_info->slab_size / bin_info->reg_size);
- bin_info->n_shards = bin_shard_sizes[i];
- bitmap_info_t bitmap_info = BITMAP_INFO_INITIALIZER(
- bin_info->nregs);
- bin_info->bitmap_info = bitmap_info;
- }
-}
-
bool
bin_update_shard_size(unsigned bin_shard_sizes[SC_NBINS], size_t start_size,
size_t end_size, size_t nshards) {
@@ -58,12 +38,6 @@ bin_shard_sizes_boot(unsigned bin_shard_sizes[SC_NBINS]) {
}
}
-void
-bin_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) {
- assert(sc_data->initialized);
- bin_infos_init(sc_data, bin_shard_sizes, bin_infos);
-}
-
bool
bin_init(bin_t *bin) {
if (malloc_mutex_init(&bin->lock, "bin", WITNESS_RANK_BIN,
@@ -71,8 +45,8 @@ bin_init(bin_t *bin) {
return true;
}
bin->slabcur = NULL;
- extent_heap_new(&bin->slabs_nonfull);
- extent_list_init(&bin->slabs_full);
+ edata_heap_new(&bin->slabs_nonfull);
+ edata_list_active_init(&bin->slabs_full);
if (config_stats) {
memset(&bin->stats, 0, sizeof(bin_stats_t));
}
diff --git a/contrib/jemalloc/src/bin_info.c b/contrib/jemalloc/src/bin_info.c
new file mode 100644
index 000000000000..8629ef8817df
--- /dev/null
+++ b/contrib/jemalloc/src/bin_info.c
@@ -0,0 +1,30 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/bin_info.h"
+
+bin_info_t bin_infos[SC_NBINS];
+
+static void
+bin_infos_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
+ bin_info_t infos[SC_NBINS]) {
+ for (unsigned i = 0; i < SC_NBINS; i++) {
+ bin_info_t *bin_info = &infos[i];
+ sc_t *sc = &sc_data->sc[i];
+ bin_info->reg_size = ((size_t)1U << sc->lg_base)
+ + ((size_t)sc->ndelta << sc->lg_delta);
+ bin_info->slab_size = (sc->pgs << LG_PAGE);
+ bin_info->nregs =
+ (uint32_t)(bin_info->slab_size / bin_info->reg_size);
+ bin_info->n_shards = bin_shard_sizes[i];
+ bitmap_info_t bitmap_info = BITMAP_INFO_INITIALIZER(
+ bin_info->nregs);
+ bin_info->bitmap_info = bitmap_info;
+ }
+}
+
+void
+bin_info_boot(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) {
+ assert(sc_data->initialized);
+ bin_infos_init(sc_data, bin_shard_sizes, bin_infos);
+}
diff --git a/contrib/jemalloc/src/bitmap.c b/contrib/jemalloc/src/bitmap.c
index 468b3178ebfa..0ccedc5db5bc 100644
--- a/contrib/jemalloc/src/bitmap.c
+++ b/contrib/jemalloc/src/bitmap.c
@@ -1,4 +1,3 @@
-#define JEMALLOC_BITMAP_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
diff --git a/contrib/jemalloc/src/buf_writer.c b/contrib/jemalloc/src/buf_writer.c
new file mode 100644
index 000000000000..7c6f79403c00
--- /dev/null
+++ b/contrib/jemalloc/src/buf_writer.c
@@ -0,0 +1,144 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/buf_writer.h"
+#include "jemalloc/internal/malloc_io.h"
+
+static void *
+buf_writer_allocate_internal_buf(tsdn_t *tsdn, size_t buf_len) {
+#ifdef JEMALLOC_JET
+ if (buf_len > SC_LARGE_MAXCLASS) {
+ return NULL;
+ }
+#else
+ assert(buf_len <= SC_LARGE_MAXCLASS);
+#endif
+ return iallocztm(tsdn, buf_len, sz_size2index(buf_len), false, NULL,
+ true, arena_get(tsdn, 0, false), true);
+}
+
+static void
+buf_writer_free_internal_buf(tsdn_t *tsdn, void *buf) {
+ if (buf != NULL) {
+ idalloctm(tsdn, buf, NULL, NULL, true, true);
+ }
+}
+
+static void
+buf_writer_assert(buf_writer_t *buf_writer) {
+ assert(buf_writer != NULL);
+ assert(buf_writer->write_cb != NULL);
+ if (buf_writer->buf != NULL) {
+ assert(buf_writer->buf_size > 0);
+ } else {
+ assert(buf_writer->buf_size == 0);
+ assert(buf_writer->internal_buf);
+ }
+ assert(buf_writer->buf_end <= buf_writer->buf_size);
+}
+
+bool
+buf_writer_init(tsdn_t *tsdn, buf_writer_t *buf_writer, write_cb_t *write_cb,
+ void *cbopaque, char *buf, size_t buf_len) {
+ if (write_cb != NULL) {
+ buf_writer->write_cb = write_cb;
+ } else {
+ buf_writer->write_cb = je_malloc_message != NULL ?
+ je_malloc_message : wrtmessage;
+ }
+ buf_writer->cbopaque = cbopaque;
+ assert(buf_len >= 2);
+ if (buf != NULL) {
+ buf_writer->buf = buf;
+ buf_writer->internal_buf = false;
+ } else {
+ buf_writer->buf = buf_writer_allocate_internal_buf(tsdn,
+ buf_len);
+ buf_writer->internal_buf = true;
+ }
+ if (buf_writer->buf != NULL) {
+ buf_writer->buf_size = buf_len - 1; /* Allowing for '\0'. */
+ } else {
+ buf_writer->buf_size = 0;
+ }
+ buf_writer->buf_end = 0;
+ buf_writer_assert(buf_writer);
+ return buf_writer->buf == NULL;
+}
+
+void
+buf_writer_flush(buf_writer_t *buf_writer) {
+ buf_writer_assert(buf_writer);
+ if (buf_writer->buf == NULL) {
+ return;
+ }
+ buf_writer->buf[buf_writer->buf_end] = '\0';
+ buf_writer->write_cb(buf_writer->cbopaque, buf_writer->buf);
+ buf_writer->buf_end = 0;
+ buf_writer_assert(buf_writer);
+}
+
+void
+buf_writer_cb(void *buf_writer_arg, const char *s) {
+ buf_writer_t *buf_writer = (buf_writer_t *)buf_writer_arg;
+ buf_writer_assert(buf_writer);
+ if (buf_writer->buf == NULL) {
+ buf_writer->write_cb(buf_writer->cbopaque, s);
+ return;
+ }
+ size_t i, slen, n;
+ for (i = 0, slen = strlen(s); i < slen; i += n) {
+ if (buf_writer->buf_end == buf_writer->buf_size) {
+ buf_writer_flush(buf_writer);
+ }
+ size_t s_remain = slen - i;
+ size_t buf_remain = buf_writer->buf_size - buf_writer->buf_end;
+ n = s_remain < buf_remain ? s_remain : buf_remain;
+ memcpy(buf_writer->buf + buf_writer->buf_end, s + i, n);
+ buf_writer->buf_end += n;
+ buf_writer_assert(buf_writer);
+ }
+ assert(i == slen);
+}
+
+void
+buf_writer_terminate(tsdn_t *tsdn, buf_writer_t *buf_writer) {
+ buf_writer_assert(buf_writer);
+ buf_writer_flush(buf_writer);
+ if (buf_writer->internal_buf) {
+ buf_writer_free_internal_buf(tsdn, buf_writer->buf);
+ }
+}
+
+void
+buf_writer_pipe(buf_writer_t *buf_writer, read_cb_t *read_cb,
+ void *read_cbopaque) {
+ /*
+ * A tiny local buffer in case the buffered writer failed to allocate
+ * at init.
+ */
+ static char backup_buf[16];
+ static buf_writer_t backup_buf_writer;
+
+ buf_writer_assert(buf_writer);
+ assert(read_cb != NULL);
+ if (buf_writer->buf == NULL) {
+ buf_writer_init(TSDN_NULL, &backup_buf_writer,
+ buf_writer->write_cb, buf_writer->cbopaque, backup_buf,
+ sizeof(backup_buf));
+ buf_writer = &backup_buf_writer;
+ }
+ assert(buf_writer->buf != NULL);
+ ssize_t nread = 0;
+ do {
+ buf_writer->buf_end += nread;
+ buf_writer_assert(buf_writer);
+ if (buf_writer->buf_end == buf_writer->buf_size) {
+ buf_writer_flush(buf_writer);
+ }
+ nread = read_cb(read_cbopaque,
+ buf_writer->buf + buf_writer->buf_end,
+ buf_writer->buf_size - buf_writer->buf_end);
+ } while (nread > 0);
+ buf_writer_flush(buf_writer);
+}
diff --git a/contrib/jemalloc/src/cache_bin.c b/contrib/jemalloc/src/cache_bin.c
new file mode 100644
index 000000000000..9ae072a0ee6e
--- /dev/null
+++ b/contrib/jemalloc/src/cache_bin.c
@@ -0,0 +1,99 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/bit_util.h"
+#include "jemalloc/internal/cache_bin.h"
+#include "jemalloc/internal/safety_check.h"
+
+void
+cache_bin_info_init(cache_bin_info_t *info,
+ cache_bin_sz_t ncached_max) {
+ assert(ncached_max <= CACHE_BIN_NCACHED_MAX);
+ size_t stack_size = (size_t)ncached_max * sizeof(void *);
+ assert(stack_size < ((size_t)1 << (sizeof(cache_bin_sz_t) * 8)));
+ info->ncached_max = (cache_bin_sz_t)ncached_max;
+}
+
+void
+cache_bin_info_compute_alloc(cache_bin_info_t *infos, szind_t ninfos,
+ size_t *size, size_t *alignment) {
+ /* For the total bin stack region (per tcache), reserve 2 more slots so
+ * that
+ * 1) the empty position can be safely read on the fast path before
+ * checking "is_empty"; and
+ * 2) the cur_ptr can go beyond the empty position by 1 step safely on
+ * the fast path (i.e. no overflow).
+ */
+ *size = sizeof(void *) * 2;
+ for (szind_t i = 0; i < ninfos; i++) {
+ assert(infos[i].ncached_max > 0);
+ *size += infos[i].ncached_max * sizeof(void *);
+ }
+
+ /*
+ * Align to at least PAGE, to minimize the # of TLBs needed by the
+ * smaller sizes; also helps if the larger sizes don't get used at all.
+ */
+ *alignment = PAGE;
+}
+
+void
+cache_bin_preincrement(cache_bin_info_t *infos, szind_t ninfos, void *alloc,
+ size_t *cur_offset) {
+ if (config_debug) {
+ size_t computed_size;
+ size_t computed_alignment;
+
+ /* Pointer should be as aligned as we asked for. */
+ cache_bin_info_compute_alloc(infos, ninfos, &computed_size,
+ &computed_alignment);
+ assert(((uintptr_t)alloc & (computed_alignment - 1)) == 0);
+ }
+
+ *(uintptr_t *)((uintptr_t)alloc + *cur_offset) =
+ cache_bin_preceding_junk;
+ *cur_offset += sizeof(void *);
+}
+
+void
+cache_bin_postincrement(cache_bin_info_t *infos, szind_t ninfos, void *alloc,
+ size_t *cur_offset) {
+ *(uintptr_t *)((uintptr_t)alloc + *cur_offset) =
+ cache_bin_trailing_junk;
+ *cur_offset += sizeof(void *);
+}
+
+void
+cache_bin_init(cache_bin_t *bin, cache_bin_info_t *info, void *alloc,
+ size_t *cur_offset) {
+ /*
+ * The full_position points to the lowest available space. Allocations
+ * will access the slots toward higher addresses (for the benefit of
+ * adjacent prefetch).
+ */
+ void *stack_cur = (void *)((uintptr_t)alloc + *cur_offset);
+ void *full_position = stack_cur;
+ uint16_t bin_stack_size = info->ncached_max * sizeof(void *);
+
+ *cur_offset += bin_stack_size;
+ void *empty_position = (void *)((uintptr_t)alloc + *cur_offset);
+
+ /* Init to the empty position. */
+ bin->stack_head = (void **)empty_position;
+ bin->low_bits_low_water = (uint16_t)(uintptr_t)bin->stack_head;
+ bin->low_bits_full = (uint16_t)(uintptr_t)full_position;
+ bin->low_bits_empty = (uint16_t)(uintptr_t)empty_position;
+ cache_bin_sz_t free_spots = cache_bin_diff(bin,
+ bin->low_bits_full, (uint16_t)(uintptr_t)bin->stack_head,
+ /* racy */ false);
+ assert(free_spots == bin_stack_size);
+ assert(cache_bin_ncached_get_local(bin, info) == 0);
+ assert(cache_bin_empty_position_get(bin) == empty_position);
+
+ assert(bin_stack_size > 0 || empty_position == full_position);
+}
+
+bool
+cache_bin_still_zero_initialized(cache_bin_t *bin) {
+ return bin->stack_head == NULL;
+}
diff --git a/contrib/jemalloc/src/ckh.c b/contrib/jemalloc/src/ckh.c
index 1bf6df5a115e..8db4319c51d6 100644
--- a/contrib/jemalloc/src/ckh.c
+++ b/contrib/jemalloc/src/ckh.c
@@ -34,7 +34,6 @@
* respectively.
*
******************************************************************************/
-#define JEMALLOC_CKH_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/ckh.h"
@@ -357,14 +356,14 @@ ckh_shrink(tsd_t *tsd, ckh_t *ckh) {
}
bool
-ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
+ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *ckh_hash,
ckh_keycomp_t *keycomp) {
bool ret;
size_t mincells, usize;
unsigned lg_mincells;
assert(minitems > 0);
- assert(hash != NULL);
+ assert(ckh_hash != NULL);
assert(keycomp != NULL);
#ifdef CKH_COUNT
@@ -393,7 +392,7 @@ ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
}
ckh->lg_minbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
ckh->lg_curbuckets = lg_mincells - LG_CKH_BUCKET_CELLS;
- ckh->hash = hash;
+ ckh->hash = ckh_hash;
ckh->keycomp = keycomp;
usize = sz_sa2u(sizeof(ckhc_t) << lg_mincells, CACHELINE);
diff --git a/contrib/jemalloc/src/counter.c b/contrib/jemalloc/src/counter.c
new file mode 100644
index 000000000000..8f1ae3af45ed
--- /dev/null
+++ b/contrib/jemalloc/src/counter.c
@@ -0,0 +1,30 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/counter.h"
+
+bool
+counter_accum_init(counter_accum_t *counter, uint64_t interval) {
+ if (LOCKEDINT_MTX_INIT(counter->mtx, "counter_accum",
+ WITNESS_RANK_COUNTER_ACCUM, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ locked_init_u64_unsynchronized(&counter->accumbytes, 0);
+ counter->interval = interval;
+ return false;
+}
+
+void
+counter_prefork(tsdn_t *tsdn, counter_accum_t *counter) {
+ LOCKEDINT_MTX_PREFORK(tsdn, counter->mtx);
+}
+
+void
+counter_postfork_parent(tsdn_t *tsdn, counter_accum_t *counter) {
+ LOCKEDINT_MTX_POSTFORK_PARENT(tsdn, counter->mtx);
+}
+
+void
+counter_postfork_child(tsdn_t *tsdn, counter_accum_t *counter) {
+ LOCKEDINT_MTX_POSTFORK_CHILD(tsdn, counter->mtx);
+}
diff --git a/contrib/jemalloc/src/ctl.c b/contrib/jemalloc/src/ctl.c
index 48afaa61f4ee..135271bafae7 100644
--- a/contrib/jemalloc/src/ctl.c
+++ b/contrib/jemalloc/src/ctl.c
@@ -1,4 +1,3 @@
-#define JEMALLOC_CTL_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
@@ -6,8 +5,16 @@
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/extent_mmap.h"
+#include "jemalloc/internal/inspect.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/nstime.h"
+#include "jemalloc/internal/peak_event.h"
+#include "jemalloc/internal/prof_data.h"
+#include "jemalloc/internal/prof_log.h"
+#include "jemalloc/internal/prof_recent.h"
+#include "jemalloc/internal/prof_stats.h"
+#include "jemalloc/internal/prof_sys.h"
+#include "jemalloc/internal/safety_check.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/util.h"
@@ -60,6 +67,8 @@ CTL_PROTO(background_thread)
CTL_PROTO(max_background_threads)
CTL_PROTO(thread_tcache_enabled)
CTL_PROTO(thread_tcache_flush)
+CTL_PROTO(thread_peak_read)
+CTL_PROTO(thread_peak_reset)
CTL_PROTO(thread_prof_name)
CTL_PROTO(thread_prof_active)
CTL_PROTO(thread_arena)
@@ -67,6 +76,7 @@ CTL_PROTO(thread_allocated)
CTL_PROTO(thread_allocatedp)
CTL_PROTO(thread_deallocated)
CTL_PROTO(thread_deallocatedp)
+CTL_PROTO(thread_idle)
CTL_PROTO(config_cache_oblivious)
CTL_PROTO(config_debug)
CTL_PROTO(config_fill)
@@ -81,7 +91,20 @@ CTL_PROTO(config_utrace)
CTL_PROTO(config_xmalloc)
CTL_PROTO(opt_abort)
CTL_PROTO(opt_abort_conf)
+CTL_PROTO(opt_cache_oblivious)
+CTL_PROTO(opt_trust_madvise)
CTL_PROTO(opt_confirm_conf)
+CTL_PROTO(opt_hpa)
+CTL_PROTO(opt_hpa_slab_max_alloc)
+CTL_PROTO(opt_hpa_hugification_threshold)
+CTL_PROTO(opt_hpa_hugify_delay_ms)
+CTL_PROTO(opt_hpa_min_purge_interval_ms)
+CTL_PROTO(opt_hpa_dirty_mult)
+CTL_PROTO(opt_hpa_sec_nshards)
+CTL_PROTO(opt_hpa_sec_max_alloc)
+CTL_PROTO(opt_hpa_sec_max_bytes)
+CTL_PROTO(opt_hpa_sec_bytes_after_flush)
+CTL_PROTO(opt_hpa_sec_batch_fill_extra)
CTL_PROTO(opt_metadata_thp)
CTL_PROTO(opt_retain)
CTL_PROTO(opt_dss)
@@ -89,19 +112,31 @@ CTL_PROTO(opt_narenas)
CTL_PROTO(opt_percpu_arena)
CTL_PROTO(opt_oversize_threshold)
CTL_PROTO(opt_background_thread)
+CTL_PROTO(opt_mutex_max_spin)
CTL_PROTO(opt_max_background_threads)
CTL_PROTO(opt_dirty_decay_ms)
CTL_PROTO(opt_muzzy_decay_ms)
CTL_PROTO(opt_stats_print)
CTL_PROTO(opt_stats_print_opts)
+CTL_PROTO(opt_stats_interval)
+CTL_PROTO(opt_stats_interval_opts)
CTL_PROTO(opt_junk)
CTL_PROTO(opt_zero)
CTL_PROTO(opt_utrace)
CTL_PROTO(opt_xmalloc)
+CTL_PROTO(opt_experimental_infallible_new)
CTL_PROTO(opt_tcache)
+CTL_PROTO(opt_tcache_max)
+CTL_PROTO(opt_tcache_nslots_small_min)
+CTL_PROTO(opt_tcache_nslots_small_max)
+CTL_PROTO(opt_tcache_nslots_large)
+CTL_PROTO(opt_lg_tcache_nslots_mul)
+CTL_PROTO(opt_tcache_gc_incr_bytes)
+CTL_PROTO(opt_tcache_gc_delay_bytes)
+CTL_PROTO(opt_lg_tcache_flush_small_div)
+CTL_PROTO(opt_lg_tcache_flush_large_div)
CTL_PROTO(opt_thp)
CTL_PROTO(opt_lg_extent_max_active_fit)
-CTL_PROTO(opt_lg_tcache_max)
CTL_PROTO(opt_prof)
CTL_PROTO(opt_prof_prefix)
CTL_PROTO(opt_prof_active)
@@ -111,7 +146,14 @@ CTL_PROTO(opt_lg_prof_interval)
CTL_PROTO(opt_prof_gdump)
CTL_PROTO(opt_prof_final)
CTL_PROTO(opt_prof_leak)
+CTL_PROTO(opt_prof_leak_error)
CTL_PROTO(opt_prof_accum)
+CTL_PROTO(opt_prof_recent_alloc_max)
+CTL_PROTO(opt_prof_stats)
+CTL_PROTO(opt_prof_sys_thread_name)
+CTL_PROTO(opt_prof_time_res)
+CTL_PROTO(opt_lg_san_uaf_align)
+CTL_PROTO(opt_zero_realloc)
CTL_PROTO(tcache_create)
CTL_PROTO(tcache_flush)
CTL_PROTO(tcache_destroy)
@@ -121,6 +163,7 @@ CTL_PROTO(arena_i_purge)
CTL_PROTO(arena_i_reset)
CTL_PROTO(arena_i_destroy)
CTL_PROTO(arena_i_dss)
+CTL_PROTO(arena_i_oversize_threshold)
CTL_PROTO(arena_i_dirty_decay_ms)
CTL_PROTO(arena_i_muzzy_decay_ms)
CTL_PROTO(arena_i_extent_hooks)
@@ -148,11 +191,18 @@ CTL_PROTO(prof_thread_active_init)
CTL_PROTO(prof_active)
CTL_PROTO(prof_dump)
CTL_PROTO(prof_gdump)
+CTL_PROTO(prof_prefix)
CTL_PROTO(prof_reset)
CTL_PROTO(prof_interval)
CTL_PROTO(lg_prof_sample)
CTL_PROTO(prof_log_start)
CTL_PROTO(prof_log_stop)
+CTL_PROTO(prof_stats_bins_i_live)
+CTL_PROTO(prof_stats_bins_i_accum)
+INDEX_PROTO(prof_stats_bins_i)
+CTL_PROTO(prof_stats_lextents_i_live)
+CTL_PROTO(prof_stats_lextents_i_accum)
+INDEX_PROTO(prof_stats_lextents_i)
CTL_PROTO(stats_arenas_i_small_allocated)
CTL_PROTO(stats_arenas_i_small_nmalloc)
CTL_PROTO(stats_arenas_i_small_ndalloc)
@@ -188,6 +238,39 @@ CTL_PROTO(stats_arenas_i_extents_j_dirty_bytes)
CTL_PROTO(stats_arenas_i_extents_j_muzzy_bytes)
CTL_PROTO(stats_arenas_i_extents_j_retained_bytes)
INDEX_PROTO(stats_arenas_i_extents_j)
+CTL_PROTO(stats_arenas_i_hpa_shard_npurge_passes)
+CTL_PROTO(stats_arenas_i_hpa_shard_npurges)
+CTL_PROTO(stats_arenas_i_hpa_shard_nhugifies)
+CTL_PROTO(stats_arenas_i_hpa_shard_ndehugifies)
+
+/* We have a set of stats for full slabs. */
+CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge)
+CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge)
+CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_nactive_nonhuge)
+CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_nactive_huge)
+CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_ndirty_nonhuge)
+CTL_PROTO(stats_arenas_i_hpa_shard_full_slabs_ndirty_huge)
+
+/* A parallel set for the empty slabs. */
+CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_nonhuge)
+CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_huge)
+CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_nactive_nonhuge)
+CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_nactive_huge)
+CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_ndirty_nonhuge)
+CTL_PROTO(stats_arenas_i_hpa_shard_empty_slabs_ndirty_huge)
+
+/*
+ * And one for the slabs that are neither empty nor full, but indexed by how
+ * full they are.
+ */
+CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_nonhuge)
+CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_huge)
+CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_nonhuge)
+CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_huge)
+CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_nonhuge)
+CTL_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_huge)
+
+INDEX_PROTO(stats_arenas_i_hpa_shard_nonfull_slabs_j)
CTL_PROTO(stats_arenas_i_nthreads)
CTL_PROTO(stats_arenas_i_uptime)
CTL_PROTO(stats_arenas_i_dss)
@@ -209,8 +292,10 @@ CTL_PROTO(stats_arenas_i_base)
CTL_PROTO(stats_arenas_i_internal)
CTL_PROTO(stats_arenas_i_metadata_thp)
CTL_PROTO(stats_arenas_i_tcache_bytes)
+CTL_PROTO(stats_arenas_i_tcache_stashed_bytes)
CTL_PROTO(stats_arenas_i_resident)
CTL_PROTO(stats_arenas_i_abandoned_vm)
+CTL_PROTO(stats_arenas_i_hpa_sec_bytes)
INDEX_PROTO(stats_arenas_i)
CTL_PROTO(stats_allocated)
CTL_PROTO(stats_active)
@@ -222,12 +307,21 @@ CTL_PROTO(stats_metadata_thp)
CTL_PROTO(stats_resident)
CTL_PROTO(stats_mapped)
CTL_PROTO(stats_retained)
+CTL_PROTO(stats_zero_reallocs)
CTL_PROTO(experimental_hooks_install)
CTL_PROTO(experimental_hooks_remove)
+CTL_PROTO(experimental_hooks_prof_backtrace)
+CTL_PROTO(experimental_hooks_prof_dump)
+CTL_PROTO(experimental_hooks_safety_check_abort)
+CTL_PROTO(experimental_thread_activity_callback)
CTL_PROTO(experimental_utilization_query)
CTL_PROTO(experimental_utilization_batch_query)
CTL_PROTO(experimental_arenas_i_pactivep)
INDEX_PROTO(experimental_arenas_i)
+CTL_PROTO(experimental_prof_recent_alloc_max)
+CTL_PROTO(experimental_prof_recent_alloc_dump)
+CTL_PROTO(experimental_batch_alloc)
+CTL_PROTO(experimental_arenas_create_ext)
#define MUTEX_STATS_CTL_PROTO_GEN(n) \
CTL_PROTO(stats_##n##_num_ops) \
@@ -275,6 +369,11 @@ static const ctl_named_node_t thread_tcache_node[] = {
{NAME("flush"), CTL(thread_tcache_flush)}
};
+static const ctl_named_node_t thread_peak_node[] = {
+ {NAME("read"), CTL(thread_peak_read)},
+ {NAME("reset"), CTL(thread_peak_reset)},
+};
+
static const ctl_named_node_t thread_prof_node[] = {
{NAME("name"), CTL(thread_prof_name)},
{NAME("active"), CTL(thread_prof_active)}
@@ -287,7 +386,9 @@ static const ctl_named_node_t thread_node[] = {
{NAME("deallocated"), CTL(thread_deallocated)},
{NAME("deallocatedp"), CTL(thread_deallocatedp)},
{NAME("tcache"), CHILD(named, thread_tcache)},
- {NAME("prof"), CHILD(named, thread_prof)}
+ {NAME("peak"), CHILD(named, thread_peak)},
+ {NAME("prof"), CHILD(named, thread_prof)},
+ {NAME("idle"), CTL(thread_idle)}
};
static const ctl_named_node_t config_node[] = {
@@ -308,27 +409,60 @@ static const ctl_named_node_t config_node[] = {
static const ctl_named_node_t opt_node[] = {
{NAME("abort"), CTL(opt_abort)},
{NAME("abort_conf"), CTL(opt_abort_conf)},
+ {NAME("cache_oblivious"), CTL(opt_cache_oblivious)},
+ {NAME("trust_madvise"), CTL(opt_trust_madvise)},
{NAME("confirm_conf"), CTL(opt_confirm_conf)},
+ {NAME("hpa"), CTL(opt_hpa)},
+ {NAME("hpa_slab_max_alloc"), CTL(opt_hpa_slab_max_alloc)},
+ {NAME("hpa_hugification_threshold"),
+ CTL(opt_hpa_hugification_threshold)},
+ {NAME("hpa_hugify_delay_ms"), CTL(opt_hpa_hugify_delay_ms)},
+ {NAME("hpa_min_purge_interval_ms"), CTL(opt_hpa_min_purge_interval_ms)},
+ {NAME("hpa_dirty_mult"), CTL(opt_hpa_dirty_mult)},
+ {NAME("hpa_sec_nshards"), CTL(opt_hpa_sec_nshards)},
+ {NAME("hpa_sec_max_alloc"), CTL(opt_hpa_sec_max_alloc)},
+ {NAME("hpa_sec_max_bytes"), CTL(opt_hpa_sec_max_bytes)},
+ {NAME("hpa_sec_bytes_after_flush"),
+ CTL(opt_hpa_sec_bytes_after_flush)},
+ {NAME("hpa_sec_batch_fill_extra"),
+ CTL(opt_hpa_sec_batch_fill_extra)},
{NAME("metadata_thp"), CTL(opt_metadata_thp)},
{NAME("retain"), CTL(opt_retain)},
{NAME("dss"), CTL(opt_dss)},
{NAME("narenas"), CTL(opt_narenas)},
{NAME("percpu_arena"), CTL(opt_percpu_arena)},
{NAME("oversize_threshold"), CTL(opt_oversize_threshold)},
+ {NAME("mutex_max_spin"), CTL(opt_mutex_max_spin)},
{NAME("background_thread"), CTL(opt_background_thread)},
{NAME("max_background_threads"), CTL(opt_max_background_threads)},
{NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)},
{NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)},
{NAME("stats_print"), CTL(opt_stats_print)},
{NAME("stats_print_opts"), CTL(opt_stats_print_opts)},
+ {NAME("stats_interval"), CTL(opt_stats_interval)},
+ {NAME("stats_interval_opts"), CTL(opt_stats_interval_opts)},
{NAME("junk"), CTL(opt_junk)},
{NAME("zero"), CTL(opt_zero)},
{NAME("utrace"), CTL(opt_utrace)},
{NAME("xmalloc"), CTL(opt_xmalloc)},
+ {NAME("experimental_infallible_new"),
+ CTL(opt_experimental_infallible_new)},
{NAME("tcache"), CTL(opt_tcache)},
+ {NAME("tcache_max"), CTL(opt_tcache_max)},
+ {NAME("tcache_nslots_small_min"),
+ CTL(opt_tcache_nslots_small_min)},
+ {NAME("tcache_nslots_small_max"),
+ CTL(opt_tcache_nslots_small_max)},
+ {NAME("tcache_nslots_large"), CTL(opt_tcache_nslots_large)},
+ {NAME("lg_tcache_nslots_mul"), CTL(opt_lg_tcache_nslots_mul)},
+ {NAME("tcache_gc_incr_bytes"), CTL(opt_tcache_gc_incr_bytes)},
+ {NAME("tcache_gc_delay_bytes"), CTL(opt_tcache_gc_delay_bytes)},
+ {NAME("lg_tcache_flush_small_div"),
+ CTL(opt_lg_tcache_flush_small_div)},
+ {NAME("lg_tcache_flush_large_div"),
+ CTL(opt_lg_tcache_flush_large_div)},
{NAME("thp"), CTL(opt_thp)},
{NAME("lg_extent_max_active_fit"), CTL(opt_lg_extent_max_active_fit)},
- {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)},
{NAME("prof"), CTL(opt_prof)},
{NAME("prof_prefix"), CTL(opt_prof_prefix)},
{NAME("prof_active"), CTL(opt_prof_active)},
@@ -338,7 +472,14 @@ static const ctl_named_node_t opt_node[] = {
{NAME("prof_gdump"), CTL(opt_prof_gdump)},
{NAME("prof_final"), CTL(opt_prof_final)},
{NAME("prof_leak"), CTL(opt_prof_leak)},
- {NAME("prof_accum"), CTL(opt_prof_accum)}
+ {NAME("prof_leak_error"), CTL(opt_prof_leak_error)},
+ {NAME("prof_accum"), CTL(opt_prof_accum)},
+ {NAME("prof_recent_alloc_max"), CTL(opt_prof_recent_alloc_max)},
+ {NAME("prof_stats"), CTL(opt_prof_stats)},
+ {NAME("prof_sys_thread_name"), CTL(opt_prof_sys_thread_name)},
+ {NAME("prof_time_resolution"), CTL(opt_prof_time_res)},
+ {NAME("lg_san_uaf_align"), CTL(opt_lg_san_uaf_align)},
+ {NAME("zero_realloc"), CTL(opt_zero_realloc)}
};
static const ctl_named_node_t tcache_node[] = {
@@ -354,6 +495,11 @@ static const ctl_named_node_t arena_i_node[] = {
{NAME("reset"), CTL(arena_i_reset)},
{NAME("destroy"), CTL(arena_i_destroy)},
{NAME("dss"), CTL(arena_i_dss)},
+ /*
+ * Undocumented for now, since we anticipate an arena API in flux after
+ * we cut the last 5-series release.
+ */
+ {NAME("oversize_threshold"), CTL(arena_i_oversize_threshold)},
{NAME("dirty_decay_ms"), CTL(arena_i_dirty_decay_ms)},
{NAME("muzzy_decay_ms"), CTL(arena_i_muzzy_decay_ms)},
{NAME("extent_hooks"), CTL(arena_i_extent_hooks)},
@@ -408,17 +554,51 @@ static const ctl_named_node_t arenas_node[] = {
{NAME("lookup"), CTL(arenas_lookup)}
};
+static const ctl_named_node_t prof_stats_bins_i_node[] = {
+ {NAME("live"), CTL(prof_stats_bins_i_live)},
+ {NAME("accum"), CTL(prof_stats_bins_i_accum)}
+};
+
+static const ctl_named_node_t super_prof_stats_bins_i_node[] = {
+ {NAME(""), CHILD(named, prof_stats_bins_i)}
+};
+
+static const ctl_indexed_node_t prof_stats_bins_node[] = {
+ {INDEX(prof_stats_bins_i)}
+};
+
+static const ctl_named_node_t prof_stats_lextents_i_node[] = {
+ {NAME("live"), CTL(prof_stats_lextents_i_live)},
+ {NAME("accum"), CTL(prof_stats_lextents_i_accum)}
+};
+
+static const ctl_named_node_t super_prof_stats_lextents_i_node[] = {
+ {NAME(""), CHILD(named, prof_stats_lextents_i)}
+};
+
+static const ctl_indexed_node_t prof_stats_lextents_node[] = {
+ {INDEX(prof_stats_lextents_i)}
+};
+
+static const ctl_named_node_t prof_stats_node[] = {
+ {NAME("bins"), CHILD(indexed, prof_stats_bins)},
+ {NAME("lextents"), CHILD(indexed, prof_stats_lextents)},
+};
+
static const ctl_named_node_t prof_node[] = {
{NAME("thread_active_init"), CTL(prof_thread_active_init)},
{NAME("active"), CTL(prof_active)},
{NAME("dump"), CTL(prof_dump)},
{NAME("gdump"), CTL(prof_gdump)},
+ {NAME("prefix"), CTL(prof_prefix)},
{NAME("reset"), CTL(prof_reset)},
{NAME("interval"), CTL(prof_interval)},
{NAME("lg_sample"), CTL(lg_prof_sample)},
{NAME("log_start"), CTL(prof_log_start)},
- {NAME("log_stop"), CTL(prof_log_stop)}
+ {NAME("log_stop"), CTL(prof_log_stop)},
+ {NAME("stats"), CHILD(named, prof_stats)}
};
+
static const ctl_named_node_t stats_arenas_i_small_node[] = {
{NAME("allocated"), CTL(stats_arenas_i_small_allocated)},
{NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)},
@@ -521,6 +701,75 @@ MUTEX_PROF_ARENA_MUTEXES
#undef OP
};
+static const ctl_named_node_t stats_arenas_i_hpa_shard_full_slabs_node[] = {
+ {NAME("npageslabs_nonhuge"),
+ CTL(stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge)},
+ {NAME("npageslabs_huge"),
+ CTL(stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge)},
+ {NAME("nactive_nonhuge"),
+ CTL(stats_arenas_i_hpa_shard_full_slabs_nactive_nonhuge)},
+ {NAME("nactive_huge"),
+ CTL(stats_arenas_i_hpa_shard_full_slabs_nactive_huge)},
+ {NAME("ndirty_nonhuge"),
+ CTL(stats_arenas_i_hpa_shard_full_slabs_ndirty_nonhuge)},
+ {NAME("ndirty_huge"),
+ CTL(stats_arenas_i_hpa_shard_full_slabs_ndirty_huge)}
+};
+
+static const ctl_named_node_t stats_arenas_i_hpa_shard_empty_slabs_node[] = {
+ {NAME("npageslabs_nonhuge"),
+ CTL(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_nonhuge)},
+ {NAME("npageslabs_huge"),
+ CTL(stats_arenas_i_hpa_shard_empty_slabs_npageslabs_huge)},
+ {NAME("nactive_nonhuge"),
+ CTL(stats_arenas_i_hpa_shard_empty_slabs_nactive_nonhuge)},
+ {NAME("nactive_huge"),
+ CTL(stats_arenas_i_hpa_shard_empty_slabs_nactive_huge)},
+ {NAME("ndirty_nonhuge"),
+ CTL(stats_arenas_i_hpa_shard_empty_slabs_ndirty_nonhuge)},
+ {NAME("ndirty_huge"),
+ CTL(stats_arenas_i_hpa_shard_empty_slabs_ndirty_huge)}
+};
+
+static const ctl_named_node_t stats_arenas_i_hpa_shard_nonfull_slabs_j_node[] = {
+ {NAME("npageslabs_nonhuge"),
+ CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_nonhuge)},
+ {NAME("npageslabs_huge"),
+ CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_huge)},
+ {NAME("nactive_nonhuge"),
+ CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_nonhuge)},
+ {NAME("nactive_huge"),
+ CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_huge)},
+ {NAME("ndirty_nonhuge"),
+ CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_nonhuge)},
+ {NAME("ndirty_huge"),
+ CTL(stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_huge)}
+};
+
+static const ctl_named_node_t super_stats_arenas_i_hpa_shard_nonfull_slabs_j_node[] = {
+ {NAME(""),
+ CHILD(named, stats_arenas_i_hpa_shard_nonfull_slabs_j)}
+};
+
+static const ctl_indexed_node_t stats_arenas_i_hpa_shard_nonfull_slabs_node[] =
+{
+ {INDEX(stats_arenas_i_hpa_shard_nonfull_slabs_j)}
+};
+
+static const ctl_named_node_t stats_arenas_i_hpa_shard_node[] = {
+ {NAME("full_slabs"), CHILD(named,
+ stats_arenas_i_hpa_shard_full_slabs)},
+ {NAME("empty_slabs"), CHILD(named,
+ stats_arenas_i_hpa_shard_empty_slabs)},
+ {NAME("nonfull_slabs"), CHILD(indexed,
+ stats_arenas_i_hpa_shard_nonfull_slabs)},
+
+ {NAME("npurge_passes"), CTL(stats_arenas_i_hpa_shard_npurge_passes)},
+ {NAME("npurges"), CTL(stats_arenas_i_hpa_shard_npurges)},
+ {NAME("nhugifies"), CTL(stats_arenas_i_hpa_shard_nhugifies)},
+ {NAME("ndehugifies"), CTL(stats_arenas_i_hpa_shard_ndehugifies)}
+};
+
static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("nthreads"), CTL(stats_arenas_i_nthreads)},
{NAME("uptime"), CTL(stats_arenas_i_uptime)},
@@ -543,14 +792,18 @@ static const ctl_named_node_t stats_arenas_i_node[] = {
{NAME("internal"), CTL(stats_arenas_i_internal)},
{NAME("metadata_thp"), CTL(stats_arenas_i_metadata_thp)},
{NAME("tcache_bytes"), CTL(stats_arenas_i_tcache_bytes)},
+ {NAME("tcache_stashed_bytes"),
+ CTL(stats_arenas_i_tcache_stashed_bytes)},
{NAME("resident"), CTL(stats_arenas_i_resident)},
{NAME("abandoned_vm"), CTL(stats_arenas_i_abandoned_vm)},
+ {NAME("hpa_sec_bytes"), CTL(stats_arenas_i_hpa_sec_bytes)},
{NAME("small"), CHILD(named, stats_arenas_i_small)},
{NAME("large"), CHILD(named, stats_arenas_i_large)},
{NAME("bins"), CHILD(indexed, stats_arenas_i_bins)},
{NAME("lextents"), CHILD(indexed, stats_arenas_i_lextents)},
{NAME("extents"), CHILD(indexed, stats_arenas_i_extents)},
- {NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)}
+ {NAME("mutexes"), CHILD(named, stats_arenas_i_mutexes)},
+ {NAME("hpa_shard"), CHILD(named, stats_arenas_i_hpa_shard)}
};
static const ctl_named_node_t super_stats_arenas_i_node[] = {
{NAME(""), CHILD(named, stats_arenas_i)}
@@ -589,12 +842,21 @@ static const ctl_named_node_t stats_node[] = {
{NAME("background_thread"),
CHILD(named, stats_background_thread)},
{NAME("mutexes"), CHILD(named, stats_mutexes)},
- {NAME("arenas"), CHILD(indexed, stats_arenas)}
+ {NAME("arenas"), CHILD(indexed, stats_arenas)},
+ {NAME("zero_reallocs"), CTL(stats_zero_reallocs)},
};
static const ctl_named_node_t experimental_hooks_node[] = {
{NAME("install"), CTL(experimental_hooks_install)},
- {NAME("remove"), CTL(experimental_hooks_remove)}
+ {NAME("remove"), CTL(experimental_hooks_remove)},
+ {NAME("prof_backtrace"), CTL(experimental_hooks_prof_backtrace)},
+ {NAME("prof_dump"), CTL(experimental_hooks_prof_dump)},
+ {NAME("safety_check_abort"), CTL(experimental_hooks_safety_check_abort)},
+};
+
+static const ctl_named_node_t experimental_thread_node[] = {
+ {NAME("activity_callback"),
+ CTL(experimental_thread_activity_callback)}
};
static const ctl_named_node_t experimental_utilization_node[] = {
@@ -613,10 +875,19 @@ static const ctl_indexed_node_t experimental_arenas_node[] = {
{INDEX(experimental_arenas_i)}
};
+static const ctl_named_node_t experimental_prof_recent_node[] = {
+ {NAME("alloc_max"), CTL(experimental_prof_recent_alloc_max)},
+ {NAME("alloc_dump"), CTL(experimental_prof_recent_alloc_dump)},
+};
+
static const ctl_named_node_t experimental_node[] = {
{NAME("hooks"), CHILD(named, experimental_hooks)},
{NAME("utilization"), CHILD(named, experimental_utilization)},
- {NAME("arenas"), CHILD(indexed, experimental_arenas)}
+ {NAME("arenas"), CHILD(indexed, experimental_arenas)},
+ {NAME("arenas_create_ext"), CTL(experimental_arenas_create_ext)},
+ {NAME("prof_recent"), CHILD(named, experimental_prof_recent)},
+ {NAME("batch_alloc"), CTL(experimental_batch_alloc)},
+ {NAME("thread"), CHILD(named, experimental_thread)}
};
static const ctl_named_node_t root_node[] = {
@@ -650,28 +921,13 @@ static const ctl_named_node_t super_root_node[] = {
* synchronized by the ctl mutex.
*/
static void
-ctl_accum_arena_stats_u64(arena_stats_u64_t *dst, arena_stats_u64_t *src) {
-#ifdef JEMALLOC_ATOMIC_U64
- uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
- uint64_t cur_src = atomic_load_u64(src, ATOMIC_RELAXED);
- atomic_store_u64(dst, cur_dst + cur_src, ATOMIC_RELAXED);
-#else
- *dst += *src;
-#endif
-}
-
-/* Likewise: with ctl mutex synchronization, reading is simple. */
-static uint64_t
-ctl_arena_stats_read_u64(arena_stats_u64_t *p) {
-#ifdef JEMALLOC_ATOMIC_U64
- return atomic_load_u64(p, ATOMIC_RELAXED);
-#else
- return *p;
-#endif
+ctl_accum_locked_u64(locked_u64_t *dst, locked_u64_t *src) {
+ locked_inc_u64_unsynchronized(dst,
+ locked_read_u64_unsynchronized(src));
}
static void
-accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) {
+ctl_accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) {
size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
size_t cur_src = atomic_load_zu(src, ATOMIC_RELAXED);
atomic_store_zu(dst, cur_dst + cur_src, ATOMIC_RELAXED);
@@ -783,11 +1039,15 @@ ctl_arena_clear(ctl_arena_t *ctl_arena) {
ctl_arena->astats->nfills_small = 0;
ctl_arena->astats->nflushes_small = 0;
memset(ctl_arena->astats->bstats, 0, SC_NBINS *
- sizeof(bin_stats_t));
+ sizeof(bin_stats_data_t));
memset(ctl_arena->astats->lstats, 0, (SC_NSIZES - SC_NBINS) *
sizeof(arena_stats_large_t));
memset(ctl_arena->astats->estats, 0, SC_NPSIZES *
- sizeof(arena_stats_extents_t));
+ sizeof(pac_estats_t));
+ memset(&ctl_arena->astats->hpastats, 0,
+ sizeof(hpa_shard_stats_t));
+ memset(&ctl_arena->astats->secstats, 0,
+ sizeof(sec_stats_t));
}
}
@@ -801,22 +1061,19 @@ ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) {
&ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
&ctl_arena->pdirty, &ctl_arena->pmuzzy,
&ctl_arena->astats->astats, ctl_arena->astats->bstats,
- ctl_arena->astats->lstats, ctl_arena->astats->estats);
+ ctl_arena->astats->lstats, ctl_arena->astats->estats,
+ &ctl_arena->astats->hpastats, &ctl_arena->astats->secstats);
for (i = 0; i < SC_NBINS; i++) {
- ctl_arena->astats->allocated_small +=
- ctl_arena->astats->bstats[i].curregs *
+ bin_stats_t *bstats =
+ &ctl_arena->astats->bstats[i].stats_data;
+ ctl_arena->astats->allocated_small += bstats->curregs *
sz_index2size(i);
- ctl_arena->astats->nmalloc_small +=
- ctl_arena->astats->bstats[i].nmalloc;
- ctl_arena->astats->ndalloc_small +=
- ctl_arena->astats->bstats[i].ndalloc;
- ctl_arena->astats->nrequests_small +=
- ctl_arena->astats->bstats[i].nrequests;
- ctl_arena->astats->nfills_small +=
- ctl_arena->astats->bstats[i].nfills;
- ctl_arena->astats->nflushes_small +=
- ctl_arena->astats->bstats[i].nflushes;
+ ctl_arena->astats->nmalloc_small += bstats->nmalloc;
+ ctl_arena->astats->ndalloc_small += bstats->ndalloc;
+ ctl_arena->astats->nrequests_small += bstats->nrequests;
+ ctl_arena->astats->nfills_small += bstats->nfills;
+ ctl_arena->astats->nflushes_small += bstats->nflushes;
}
} else {
arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads,
@@ -848,27 +1105,32 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
ctl_arena_stats_t *astats = ctl_arena->astats;
if (!destroyed) {
- accum_atomic_zu(&sdstats->astats.mapped,
- &astats->astats.mapped);
- accum_atomic_zu(&sdstats->astats.retained,
- &astats->astats.retained);
- accum_atomic_zu(&sdstats->astats.extent_avail,
- &astats->astats.extent_avail);
- }
-
- ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge,
- &astats->astats.decay_dirty.npurge);
- ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.nmadvise,
- &astats->astats.decay_dirty.nmadvise);
- ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.purged,
- &astats->astats.decay_dirty.purged);
-
- ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.npurge,
- &astats->astats.decay_muzzy.npurge);
- ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.nmadvise,
- &astats->astats.decay_muzzy.nmadvise);
- ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.purged,
- &astats->astats.decay_muzzy.purged);
+ sdstats->astats.mapped += astats->astats.mapped;
+ sdstats->astats.pa_shard_stats.pac_stats.retained
+ += astats->astats.pa_shard_stats.pac_stats.retained;
+ sdstats->astats.pa_shard_stats.edata_avail
+ += astats->astats.pa_shard_stats.edata_avail;
+ }
+
+ ctl_accum_locked_u64(
+ &sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge,
+ &astats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge);
+ ctl_accum_locked_u64(
+ &sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise,
+ &astats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise);
+ ctl_accum_locked_u64(
+ &sdstats->astats.pa_shard_stats.pac_stats.decay_dirty.purged,
+ &astats->astats.pa_shard_stats.pac_stats.decay_dirty.purged);
+
+ ctl_accum_locked_u64(
+ &sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge,
+ &astats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge);
+ ctl_accum_locked_u64(
+ &sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise,
+ &astats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise);
+ ctl_accum_locked_u64(
+ &sdstats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged,
+ &astats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged);
#define OP(mtx) malloc_mutex_prof_merge( \
&(sdstats->astats.mutex_prof_data[ \
@@ -878,14 +1140,11 @@ ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
MUTEX_PROF_ARENA_MUTEXES
#undef OP
if (!destroyed) {
- accum_atomic_zu(&sdstats->astats.base,
- &astats->astats.base);
- accum_atomic_zu(&sdstats->astats.internal,
+ sdstats->astats.base += astats->astats.base;
+ sdstats->astats.resident += astats->astats.resident;
+ sdstats->astats.metadata_thp += astats->astats.metadata_thp;
+ ctl_accum_atomic_zu(&sdstats->astats.internal,
&astats->astats.internal);
- accum_atomic_zu(&sdstats->astats.resident,
- &astats->astats.resident);
- accum_atomic_zu(&sdstats->astats.metadata_thp,
- &astats->astats.metadata_thp);
} else {
assert(atomic_load_zu(
&astats->astats.internal, ATOMIC_RELAXED) == 0);
@@ -903,23 +1162,23 @@ MUTEX_PROF_ARENA_MUTEXES
sdstats->nflushes_small += astats->nflushes_small;
if (!destroyed) {
- accum_atomic_zu(&sdstats->astats.allocated_large,
- &astats->astats.allocated_large);
+ sdstats->astats.allocated_large +=
+ astats->astats.allocated_large;
} else {
- assert(atomic_load_zu(&astats->astats.allocated_large,
- ATOMIC_RELAXED) == 0);
+ assert(astats->astats.allocated_large == 0);
}
- ctl_accum_arena_stats_u64(&sdstats->astats.nmalloc_large,
- &astats->astats.nmalloc_large);
- ctl_accum_arena_stats_u64(&sdstats->astats.ndalloc_large,
- &astats->astats.ndalloc_large);
- ctl_accum_arena_stats_u64(&sdstats->astats.nrequests_large,
- &astats->astats.nrequests_large);
- accum_atomic_zu(&sdstats->astats.abandoned_vm,
- &astats->astats.abandoned_vm);
-
- accum_atomic_zu(&sdstats->astats.tcache_bytes,
- &astats->astats.tcache_bytes);
+ sdstats->astats.nmalloc_large += astats->astats.nmalloc_large;
+ sdstats->astats.ndalloc_large += astats->astats.ndalloc_large;
+ sdstats->astats.nrequests_large
+ += astats->astats.nrequests_large;
+ sdstats->astats.nflushes_large += astats->astats.nflushes_large;
+ ctl_accum_atomic_zu(
+ &sdstats->astats.pa_shard_stats.pac_stats.abandoned_vm,
+ &astats->astats.pa_shard_stats.pac_stats.abandoned_vm);
+
+ sdstats->astats.tcache_bytes += astats->astats.tcache_bytes;
+ sdstats->astats.tcache_stashed_bytes +=
+ astats->astats.tcache_stashed_bytes;
if (ctl_arena->arena_ind == 0) {
sdstats->astats.uptime = astats->astats.uptime;
@@ -927,29 +1186,26 @@ MUTEX_PROF_ARENA_MUTEXES
/* Merge bin stats. */
for (i = 0; i < SC_NBINS; i++) {
- sdstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
- sdstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
- sdstats->bstats[i].nrequests +=
- astats->bstats[i].nrequests;
+ bin_stats_t *bstats = &astats->bstats[i].stats_data;
+ bin_stats_t *merged = &sdstats->bstats[i].stats_data;
+ merged->nmalloc += bstats->nmalloc;
+ merged->ndalloc += bstats->ndalloc;
+ merged->nrequests += bstats->nrequests;
if (!destroyed) {
- sdstats->bstats[i].curregs +=
- astats->bstats[i].curregs;
+ merged->curregs += bstats->curregs;
} else {
- assert(astats->bstats[i].curregs == 0);
+ assert(bstats->curregs == 0);
}
- sdstats->bstats[i].nfills += astats->bstats[i].nfills;
- sdstats->bstats[i].nflushes +=
- astats->bstats[i].nflushes;
- sdstats->bstats[i].nslabs += astats->bstats[i].nslabs;
- sdstats->bstats[i].reslabs += astats->bstats[i].reslabs;
+ merged->nfills += bstats->nfills;
+ merged->nflushes += bstats->nflushes;
+ merged->nslabs += bstats->nslabs;
+ merged->reslabs += bstats->reslabs;
if (!destroyed) {
- sdstats->bstats[i].curslabs +=
- astats->bstats[i].curslabs;
- sdstats->bstats[i].nonfull_slabs +=
- astats->bstats[i].nonfull_slabs;
+ merged->curslabs += bstats->curslabs;
+ merged->nonfull_slabs += bstats->nonfull_slabs;
} else {
- assert(astats->bstats[i].curslabs == 0);
- assert(astats->bstats[i].nonfull_slabs == 0);
+ assert(bstats->curslabs == 0);
+ assert(bstats->nonfull_slabs == 0);
}
malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data,
&astats->bstats[i].mutex_data);
@@ -957,11 +1213,11 @@ MUTEX_PROF_ARENA_MUTEXES
/* Merge stats for large allocations. */
for (i = 0; i < SC_NSIZES - SC_NBINS; i++) {
- ctl_accum_arena_stats_u64(&sdstats->lstats[i].nmalloc,
+ ctl_accum_locked_u64(&sdstats->lstats[i].nmalloc,
&astats->lstats[i].nmalloc);
- ctl_accum_arena_stats_u64(&sdstats->lstats[i].ndalloc,
+ ctl_accum_locked_u64(&sdstats->lstats[i].ndalloc,
&astats->lstats[i].ndalloc);
- ctl_accum_arena_stats_u64(&sdstats->lstats[i].nrequests,
+ ctl_accum_locked_u64(&sdstats->lstats[i].nrequests,
&astats->lstats[i].nrequests);
if (!destroyed) {
sdstats->lstats[i].curlextents +=
@@ -973,19 +1229,21 @@ MUTEX_PROF_ARENA_MUTEXES
/* Merge extents stats. */
for (i = 0; i < SC_NPSIZES; i++) {
- accum_atomic_zu(&sdstats->estats[i].ndirty,
- &astats->estats[i].ndirty);
- accum_atomic_zu(&sdstats->estats[i].nmuzzy,
- &astats->estats[i].nmuzzy);
- accum_atomic_zu(&sdstats->estats[i].nretained,
- &astats->estats[i].nretained);
- accum_atomic_zu(&sdstats->estats[i].dirty_bytes,
- &astats->estats[i].dirty_bytes);
- accum_atomic_zu(&sdstats->estats[i].muzzy_bytes,
- &astats->estats[i].muzzy_bytes);
- accum_atomic_zu(&sdstats->estats[i].retained_bytes,
- &astats->estats[i].retained_bytes);
+ sdstats->estats[i].ndirty += astats->estats[i].ndirty;
+ sdstats->estats[i].nmuzzy += astats->estats[i].nmuzzy;
+ sdstats->estats[i].nretained
+ += astats->estats[i].nretained;
+ sdstats->estats[i].dirty_bytes
+ += astats->estats[i].dirty_bytes;
+ sdstats->estats[i].muzzy_bytes
+ += astats->estats[i].muzzy_bytes;
+ sdstats->estats[i].retained_bytes
+ += astats->estats[i].retained_bytes;
}
+
+ /* Merge HPA stats. */
+ hpa_shard_stats_accum(&sdstats->hpastats, &astats->hpastats);
+ sec_stats_accum(&sdstats->secstats, &astats->secstats);
}
}
@@ -1001,7 +1259,7 @@ ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena,
}
static unsigned
-ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) {
+ctl_arena_init(tsd_t *tsd, const arena_config_t *config) {
unsigned arena_ind;
ctl_arena_t *ctl_arena;
@@ -1019,7 +1277,7 @@ ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) {
}
/* Initialize new arena. */
- if (arena_init(tsd_tsdn(tsd), arena_ind, extent_hooks) == NULL) {
+ if (arena_init(tsd_tsdn(tsd), arena_ind, config) == NULL) {
return UINT_MAX;
}
@@ -1036,8 +1294,11 @@ ctl_background_thread_stats_read(tsdn_t *tsdn) {
if (!have_background_thread ||
background_thread_stats_read(tsdn, stats)) {
memset(stats, 0, sizeof(background_thread_stats_t));
- nstime_init(&stats->run_interval, 0);
+ nstime_init_zero(&stats->run_interval);
}
+ malloc_mutex_prof_copy(
+ &ctl_stats->mutex_prof_data[global_prof_mutex_max_per_bg_thd],
+ &stats->max_counter_per_bg_thd);
}
static void
@@ -1069,21 +1330,17 @@ ctl_refresh(tsdn_t *tsdn) {
if (config_stats) {
ctl_stats->allocated = ctl_sarena->astats->allocated_small +
- atomic_load_zu(&ctl_sarena->astats->astats.allocated_large,
- ATOMIC_RELAXED);
+ ctl_sarena->astats->astats.allocated_large;
ctl_stats->active = (ctl_sarena->pactive << LG_PAGE);
- ctl_stats->metadata = atomic_load_zu(
- &ctl_sarena->astats->astats.base, ATOMIC_RELAXED) +
+ ctl_stats->metadata = ctl_sarena->astats->astats.base +
atomic_load_zu(&ctl_sarena->astats->astats.internal,
ATOMIC_RELAXED);
- ctl_stats->metadata_thp = atomic_load_zu(
- &ctl_sarena->astats->astats.metadata_thp, ATOMIC_RELAXED);
- ctl_stats->resident = atomic_load_zu(
- &ctl_sarena->astats->astats.resident, ATOMIC_RELAXED);
- ctl_stats->mapped = atomic_load_zu(
- &ctl_sarena->astats->astats.mapped, ATOMIC_RELAXED);
- ctl_stats->retained = atomic_load_zu(
- &ctl_sarena->astats->astats.retained, ATOMIC_RELAXED);
+ ctl_stats->resident = ctl_sarena->astats->astats.resident;
+ ctl_stats->metadata_thp =
+ ctl_sarena->astats->astats.metadata_thp;
+ ctl_stats->mapped = ctl_sarena->astats->astats.mapped;
+ ctl_stats->retained = ctl_sarena->astats->astats
+ .pa_shard_stats.pac_stats.retained;
ctl_background_thread_stats_read(tsdn);
@@ -1093,8 +1350,20 @@ ctl_refresh(tsdn_t *tsdn) {
malloc_mutex_unlock(tsdn, &mtx);
if (config_prof && opt_prof) {
- READ_GLOBAL_MUTEX_PROF_DATA(global_prof_mutex_prof,
- bt2gctx_mtx);
+ READ_GLOBAL_MUTEX_PROF_DATA(
+ global_prof_mutex_prof, bt2gctx_mtx);
+ READ_GLOBAL_MUTEX_PROF_DATA(
+ global_prof_mutex_prof_thds_data, tdatas_mtx);
+ READ_GLOBAL_MUTEX_PROF_DATA(
+ global_prof_mutex_prof_dump, prof_dump_mtx);
+ READ_GLOBAL_MUTEX_PROF_DATA(
+ global_prof_mutex_prof_recent_alloc,
+ prof_recent_alloc_mtx);
+ READ_GLOBAL_MUTEX_PROF_DATA(
+ global_prof_mutex_prof_recent_dump,
+ prof_recent_dump_mtx);
+ READ_GLOBAL_MUTEX_PROF_DATA(
+ global_prof_mutex_prof_stats, prof_stats_mtx);
}
if (have_background_thread) {
READ_GLOBAL_MUTEX_PROF_DATA(
@@ -1191,8 +1460,9 @@ label_return:
}
static int
-ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
- size_t *mibp, size_t *depthp) {
+ctl_lookup(tsdn_t *tsdn, const ctl_named_node_t *starting_node,
+ const char *name, const ctl_named_node_t **ending_nodep, size_t *mibp,
+ size_t *depthp) {
int ret;
const char *elm, *tdot, *dot;
size_t elen, i, j;
@@ -1206,7 +1476,7 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
ret = ENOENT;
goto label_return;
}
- node = super_root_node;
+ node = starting_node;
for (i = 0; i < *depthp; i++) {
assert(node);
assert(node->nchildren > 0);
@@ -1220,10 +1490,6 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
if (strlen(child->name) == elen &&
strncmp(elm, child->name, elen) == 0) {
node = child;
- if (nodesp != NULL) {
- nodesp[i] =
- (const ctl_node_t *)node;
- }
mibp[i] = j;
break;
}
@@ -1250,13 +1516,11 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
goto label_return;
}
- if (nodesp != NULL) {
- nodesp[i] = (const ctl_node_t *)node;
- }
mibp[i] = (size_t)index;
}
- if (node->ctl != NULL) {
+ /* Reached the end? */
+ if (node->ctl != NULL || *dot == '\0') {
/* Terminal node. */
if (*dot != '\0') {
/*
@@ -1272,16 +1536,14 @@ ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
}
/* Update elm. */
- if (*dot == '\0') {
- /* No more elements. */
- ret = ENOENT;
- goto label_return;
- }
elm = &dot[1];
dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot :
strchr(elm, '\0');
elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
}
+ if (ending_nodep != NULL) {
+ *ending_nodep = node;
+ }
ret = 0;
label_return:
@@ -1293,7 +1555,6 @@ ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
void *newp, size_t newlen) {
int ret;
size_t depth;
- ctl_node_t const *nodes[CTL_MAX_DEPTH];
size_t mib[CTL_MAX_DEPTH];
const ctl_named_node_t *node;
@@ -1303,12 +1564,12 @@ ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
}
depth = CTL_MAX_DEPTH;
- ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth);
+ ret = ctl_lookup(tsd_tsdn(tsd), super_root_node, name, &node, mib,
+ &depth);
if (ret != 0) {
goto label_return;
}
- node = ctl_named_node(nodes[depth-1]);
if (node != NULL && node->ctl) {
ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen);
} else {
@@ -1329,26 +1590,19 @@ ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp) {
goto label_return;
}
- ret = ctl_lookup(tsd_tsdn(tsd), name, NULL, mibp, miblenp);
+ ret = ctl_lookup(tsd_tsdn(tsd), super_root_node, name, NULL, mibp,
+ miblenp);
label_return:
return(ret);
}
-int
-ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
- size_t *oldlenp, void *newp, size_t newlen) {
+static int
+ctl_lookupbymib(tsdn_t *tsdn, const ctl_named_node_t **ending_nodep,
+ const size_t *mib, size_t miblen) {
int ret;
- const ctl_named_node_t *node;
- size_t i;
- if (!ctl_initialized && ctl_init(tsd)) {
- ret = EAGAIN;
- goto label_return;
- }
-
- /* Iterate down the tree. */
- node = super_root_node;
- for (i = 0; i < miblen; i++) {
+ const ctl_named_node_t *node = super_root_node;
+ for (size_t i = 0; i < miblen; i++) {
assert(node);
assert(node->nchildren > 0);
if (ctl_named_node(node->children) != NULL) {
@@ -1363,13 +1617,36 @@ ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
/* Indexed element. */
inode = ctl_indexed_node(node->children);
- node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]);
+ node = inode->index(tsdn, mib, miblen, mib[i]);
if (node == NULL) {
ret = ENOENT;
goto label_return;
}
}
}
+ assert(ending_nodep != NULL);
+ *ending_nodep = node;
+ ret = 0;
+
+label_return:
+ return(ret);
+}
+
+int
+ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ const ctl_named_node_t *node;
+
+ if (!ctl_initialized && ctl_init(tsd)) {
+ ret = EAGAIN;
+ goto label_return;
+ }
+
+ ret = ctl_lookupbymib(tsd_tsdn(tsd), &node, mib, miblen);
+ if (ret != 0) {
+ goto label_return;
+ }
/* Call the ctl function. */
if (node && node->ctl) {
@@ -1383,6 +1660,81 @@ label_return:
return(ret);
}
+int
+ctl_mibnametomib(tsd_t *tsd, size_t *mib, size_t miblen, const char *name,
+ size_t *miblenp) {
+ int ret;
+ const ctl_named_node_t *node;
+
+ if (!ctl_initialized && ctl_init(tsd)) {
+ ret = EAGAIN;
+ goto label_return;
+ }
+
+ ret = ctl_lookupbymib(tsd_tsdn(tsd), &node, mib, miblen);
+ if (ret != 0) {
+ goto label_return;
+ }
+ if (node == NULL || node->ctl != NULL) {
+ ret = ENOENT;
+ goto label_return;
+ }
+
+ assert(miblenp != NULL);
+ assert(*miblenp >= miblen);
+ *miblenp -= miblen;
+ ret = ctl_lookup(tsd_tsdn(tsd), node, name, NULL, mib + miblen,
+ miblenp);
+ *miblenp += miblen;
+label_return:
+ return(ret);
+}
+
+int
+ctl_bymibname(tsd_t *tsd, size_t *mib, size_t miblen, const char *name,
+ size_t *miblenp, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ const ctl_named_node_t *node;
+
+ if (!ctl_initialized && ctl_init(tsd)) {
+ ret = EAGAIN;
+ goto label_return;
+ }
+
+ ret = ctl_lookupbymib(tsd_tsdn(tsd), &node, mib, miblen);
+ if (ret != 0) {
+ goto label_return;
+ }
+ if (node == NULL || node->ctl != NULL) {
+ ret = ENOENT;
+ goto label_return;
+ }
+
+ assert(miblenp != NULL);
+ assert(*miblenp >= miblen);
+ *miblenp -= miblen;
+ /*
+ * The same node supplies the starting node and stores the ending node.
+ */
+ ret = ctl_lookup(tsd_tsdn(tsd), node, name, &node, mib + miblen,
+ miblenp);
+ *miblenp += miblen;
+ if (ret != 0) {
+ goto label_return;
+ }
+
+ if (node != NULL && node->ctl) {
+ ret = node->ctl(tsd, mib, *miblenp, oldp, oldlenp, newp,
+ newlen);
+ } else {
+ /* The name refers to a partial path through the ctl tree. */
+ ret = ENOENT;
+ }
+
+label_return:
+ return(ret);
+}
+
bool
ctl_boot(void) {
if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL,
@@ -1410,6 +1762,11 @@ ctl_postfork_child(tsdn_t *tsdn) {
malloc_mutex_postfork_child(tsdn, &ctl_mtx);
}
+void
+ctl_mtx_assert_held(tsdn_t *tsdn) {
+ malloc_mutex_assert_owner(tsdn, &ctl_mtx);
+}
+
/******************************************************************************/
/* *_ctl() functions. */
@@ -1427,6 +1784,7 @@ ctl_postfork_child(tsdn_t *tsdn) {
} \
} while (0)
+/* Can read or write, but not both. */
#define READ_XOR_WRITE() do { \
if ((oldp != NULL && oldlenp != NULL) && (newp != NULL || \
newlen != 0)) { \
@@ -1435,12 +1793,31 @@ ctl_postfork_child(tsdn_t *tsdn) {
} \
} while (0)
+/* Can neither read nor write. */
+#define NEITHER_READ_NOR_WRITE() do { \
+ if (oldp != NULL || oldlenp != NULL || newp != NULL || \
+ newlen != 0) { \
+ ret = EPERM; \
+ goto label_return; \
+ } \
+} while (0)
+
+/* Verify that the space provided is enough. */
+#define VERIFY_READ(t) do { \
+ if (oldp == NULL || oldlenp == NULL || *oldlenp != sizeof(t)) { \
+ *oldlenp = 0; \
+ ret = EINVAL; \
+ goto label_return; \
+ } \
+} while (0)
+
#define READ(v, t) do { \
if (oldp != NULL && oldlenp != NULL) { \
if (*oldlenp != sizeof(t)) { \
size_t copylen = (sizeof(t) <= *oldlenp) \
? sizeof(t) : *oldlenp; \
memcpy(oldp, (void *)&(v), copylen); \
+ *oldlenp = copylen; \
ret = EINVAL; \
goto label_return; \
} \
@@ -1458,6 +1835,14 @@ ctl_postfork_child(tsdn_t *tsdn) {
} \
} while (0)
+#define ASSURED_WRITE(v, t) do { \
+ if (newp == NULL || newlen != sizeof(t)) { \
+ ret = EINVAL; \
+ goto label_return; \
+ } \
+ (v) = *(t *)newp; \
+} while (0)
+
#define MIB_UNSIGNED(v, i) do { \
if (mib[i] > UINT_MAX) { \
ret = EFAULT; \
@@ -1497,8 +1882,8 @@ label_return: \
#define CTL_RO_CGEN(c, n, v, t) \
static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
@@ -1540,8 +1925,8 @@ label_return: \
*/
#define CTL_RO_NL_CGEN(c, n, v, t) \
static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
@@ -1559,8 +1944,8 @@ label_return: \
#define CTL_RO_NL_GEN(n, v, t) \
static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
@@ -1573,29 +1958,10 @@ label_return: \
return ret; \
}
-#define CTL_TSD_RO_NL_CGEN(c, n, m, t) \
-static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, \
- size_t *oldlenp, void *newp, size_t newlen) { \
- int ret; \
- t oldval; \
- \
- if (!(c)) { \
- return ENOENT; \
- } \
- READONLY(); \
- oldval = (m(tsd)); \
- READ(oldval, t); \
- \
- ret = 0; \
-label_return: \
- return ret; \
-}
-
#define CTL_RO_CONFIG_GEN(n, t) \
static int \
-n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
- void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
+n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) { \
int ret; \
t oldval; \
\
@@ -1761,7 +2127,34 @@ CTL_RO_CONFIG_GEN(config_xmalloc, bool)
CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool)
+CTL_RO_NL_GEN(opt_cache_oblivious, opt_cache_oblivious, bool)
+CTL_RO_NL_GEN(opt_trust_madvise, opt_trust_madvise, bool)
CTL_RO_NL_GEN(opt_confirm_conf, opt_confirm_conf, bool)
+
+/* HPA options. */
+CTL_RO_NL_GEN(opt_hpa, opt_hpa, bool)
+CTL_RO_NL_GEN(opt_hpa_hugification_threshold,
+ opt_hpa_opts.hugification_threshold, size_t)
+CTL_RO_NL_GEN(opt_hpa_hugify_delay_ms, opt_hpa_opts.hugify_delay_ms, uint64_t)
+CTL_RO_NL_GEN(opt_hpa_min_purge_interval_ms, opt_hpa_opts.min_purge_interval_ms,
+ uint64_t)
+
+/*
+ * This will have to change before we publicly document this option; fxp_t and
+ * its representation are internal implementation details.
+ */
+CTL_RO_NL_GEN(opt_hpa_dirty_mult, opt_hpa_opts.dirty_mult, fxp_t)
+CTL_RO_NL_GEN(opt_hpa_slab_max_alloc, opt_hpa_opts.slab_max_alloc, size_t)
+
+/* HPA SEC options */
+CTL_RO_NL_GEN(opt_hpa_sec_nshards, opt_hpa_sec_opts.nshards, size_t)
+CTL_RO_NL_GEN(opt_hpa_sec_max_alloc, opt_hpa_sec_opts.max_alloc, size_t)
+CTL_RO_NL_GEN(opt_hpa_sec_max_bytes, opt_hpa_sec_opts.max_bytes, size_t)
+CTL_RO_NL_GEN(opt_hpa_sec_bytes_after_flush, opt_hpa_sec_opts.bytes_after_flush,
+ size_t)
+CTL_RO_NL_GEN(opt_hpa_sec_batch_fill_extra, opt_hpa_sec_opts.batch_fill_extra,
+ size_t)
+
CTL_RO_NL_GEN(opt_metadata_thp, metadata_thp_mode_names[opt_metadata_thp],
const char *)
CTL_RO_NL_GEN(opt_retain, opt_retain, bool)
@@ -1769,6 +2162,7 @@ CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
CTL_RO_NL_GEN(opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena],
const char *)
+CTL_RO_NL_GEN(opt_mutex_max_spin, opt_mutex_max_spin, int64_t)
CTL_RO_NL_GEN(opt_oversize_threshold, opt_oversize_threshold, size_t)
CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool)
CTL_RO_NL_GEN(opt_max_background_threads, opt_max_background_threads, size_t)
@@ -1776,15 +2170,31 @@ CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t)
CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t)
CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
CTL_RO_NL_GEN(opt_stats_print_opts, opt_stats_print_opts, const char *)
+CTL_RO_NL_GEN(opt_stats_interval, opt_stats_interval, int64_t)
+CTL_RO_NL_GEN(opt_stats_interval_opts, opt_stats_interval_opts, const char *)
CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
+CTL_RO_NL_CGEN(config_enable_cxx, opt_experimental_infallible_new,
+ opt_experimental_infallible_new, bool)
CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool)
+CTL_RO_NL_GEN(opt_tcache_max, opt_tcache_max, size_t)
+CTL_RO_NL_GEN(opt_tcache_nslots_small_min, opt_tcache_nslots_small_min,
+ unsigned)
+CTL_RO_NL_GEN(opt_tcache_nslots_small_max, opt_tcache_nslots_small_max,
+ unsigned)
+CTL_RO_NL_GEN(opt_tcache_nslots_large, opt_tcache_nslots_large, unsigned)
+CTL_RO_NL_GEN(opt_lg_tcache_nslots_mul, opt_lg_tcache_nslots_mul, ssize_t)
+CTL_RO_NL_GEN(opt_tcache_gc_incr_bytes, opt_tcache_gc_incr_bytes, size_t)
+CTL_RO_NL_GEN(opt_tcache_gc_delay_bytes, opt_tcache_gc_delay_bytes, size_t)
+CTL_RO_NL_GEN(opt_lg_tcache_flush_small_div, opt_lg_tcache_flush_small_div,
+ unsigned)
+CTL_RO_NL_GEN(opt_lg_tcache_flush_large_div, opt_lg_tcache_flush_large_div,
+ unsigned)
CTL_RO_NL_GEN(opt_thp, thp_mode_names[opt_thp], const char *)
CTL_RO_NL_GEN(opt_lg_extent_max_active_fit, opt_lg_extent_max_active_fit,
size_t)
-CTL_RO_NL_GEN(opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool)
@@ -1796,6 +2206,18 @@ CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_leak_error, opt_prof_leak_error, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_recent_alloc_max,
+ opt_prof_recent_alloc_max, ssize_t)
+CTL_RO_NL_CGEN(config_prof, opt_prof_stats, opt_prof_stats, bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_sys_thread_name, opt_prof_sys_thread_name,
+ bool)
+CTL_RO_NL_CGEN(config_prof, opt_prof_time_res,
+ prof_time_res_mode_names[opt_prof_time_res], const char *)
+CTL_RO_NL_CGEN(config_uaf_detection, opt_lg_san_uaf_align,
+ opt_lg_san_uaf_align, ssize_t)
+CTL_RO_NL_GEN(opt_zero_realloc,
+ zero_realloc_mode_names[opt_zero_realloc_action], const char *)
/******************************************************************************/
@@ -1843,10 +2265,11 @@ thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
goto label_return;
}
/* Set new arena/tcache associations. */
- arena_migrate(tsd, oldind, newind);
+ arena_migrate(tsd, oldarena, newarena);
if (tcache_available(tsd)) {
tcache_arena_reassociate(tsd_tsdn(tsd),
- tsd_tcachep_get(tsd), newarena);
+ tsd_tcache_slowp_get(tsd), tsd_tcachep_get(tsd),
+ newarena);
}
}
@@ -1855,14 +2278,10 @@ label_return:
return ret;
}
-CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get,
- uint64_t)
-CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get,
- uint64_t *)
-CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get,
- uint64_t)
-CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
- tsd_thread_deallocatedp_get, uint64_t *)
+CTL_RO_NL_GEN(thread_allocated, tsd_thread_allocated_get(tsd), uint64_t)
+CTL_RO_NL_GEN(thread_allocatedp, tsd_thread_allocatedp_get(tsd), uint64_t *)
+CTL_RO_NL_GEN(thread_deallocated, tsd_thread_deallocated_get(tsd), uint64_t)
+CTL_RO_NL_GEN(thread_deallocatedp, tsd_thread_deallocatedp_get(tsd), uint64_t *)
static int
thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib,
@@ -1897,8 +2316,7 @@ thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib,
goto label_return;
}
- READONLY();
- WRITEONLY();
+ NEITHER_READ_NOR_WRITE();
tcache_flush(tsd);
@@ -1908,12 +2326,44 @@ label_return:
}
static int
+thread_peak_read_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp,
+ size_t newlen) {
+ int ret;
+ if (!config_stats) {
+ return ENOENT;
+ }
+ READONLY();
+ peak_event_update(tsd);
+ uint64_t result = peak_event_max(tsd);
+ READ(result, uint64_t);
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static int
+thread_peak_reset_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp,
+ size_t newlen) {
+ int ret;
+ if (!config_stats) {
+ return ENOENT;
+ }
+ NEITHER_READ_NOR_WRITE();
+ peak_event_zero(tsd);
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static int
thread_prof_name_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret;
- if (!config_prof) {
+ if (!config_prof || !opt_prof) {
return ENOENT;
}
@@ -1950,8 +2400,12 @@ thread_prof_active_ctl(tsd_t *tsd, const size_t *mib,
return ENOENT;
}
- oldval = prof_thread_active_get(tsd);
+ oldval = opt_prof ? prof_thread_active_get(tsd) : false;
if (newp != NULL) {
+ if (!opt_prof) {
+ ret = ENOENT;
+ goto label_return;
+ }
if (newlen != sizeof(bool)) {
ret = EINVAL;
goto label_return;
@@ -1968,6 +2422,39 @@ label_return:
return ret;
}
+static int
+thread_idle_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp,
+ size_t newlen) {
+ int ret;
+
+ NEITHER_READ_NOR_WRITE();
+
+ if (tcache_available(tsd)) {
+ tcache_flush(tsd);
+ }
+ /*
+ * This heuristic is perhaps not the most well-considered. But it
+ * matches the only idling policy we have experience with in the status
+ * quo. Over time we should investigate more principled approaches.
+ */
+ if (opt_narenas > ncpus * 2) {
+ arena_t *arena = arena_choose(tsd, NULL);
+ if (arena != NULL) {
+ arena_decay(tsd_tsdn(tsd), arena, false, true);
+ }
+ /*
+ * The missing arena case is not actually an error; a thread
+ * might be idle before it associates itself to one. This is
+ * unusual, but not wrong.
+ */
+ }
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
/******************************************************************************/
static int
@@ -1977,7 +2464,8 @@ tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
unsigned tcache_ind;
READONLY();
- if (tcaches_create(tsd, &tcache_ind)) {
+ VERIFY_READ(unsigned);
+ if (tcaches_create(tsd, b0get(), &tcache_ind)) {
ret = EFAULT;
goto label_return;
}
@@ -1995,12 +2483,7 @@ tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
unsigned tcache_ind;
WRITEONLY();
- tcache_ind = UINT_MAX;
- WRITE(tcache_ind, unsigned);
- if (tcache_ind == UINT_MAX) {
- ret = EFAULT;
- goto label_return;
- }
+ ASSURED_WRITE(tcache_ind, unsigned);
tcaches_flush(tsd, tcache_ind);
ret = 0;
@@ -2015,12 +2498,7 @@ tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
unsigned tcache_ind;
WRITEONLY();
- tcache_ind = UINT_MAX;
- WRITE(tcache_ind, unsigned);
- if (tcache_ind == UINT_MAX) {
- ret = EFAULT;
- goto label_return;
- }
+ ASSURED_WRITE(tcache_ind, unsigned);
tcaches_destroy(tsd, tcache_ind);
ret = 0;
@@ -2105,8 +2583,7 @@ arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
int ret;
unsigned arena_ind;
- READONLY();
- WRITEONLY();
+ NEITHER_READ_NOR_WRITE();
MIB_UNSIGNED(arena_ind, 1);
arena_i_decay(tsd_tsdn(tsd), arena_ind, false);
@@ -2121,8 +2598,7 @@ arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
int ret;
unsigned arena_ind;
- READONLY();
- WRITEONLY();
+ NEITHER_READ_NOR_WRITE();
MIB_UNSIGNED(arena_ind, 1);
arena_i_decay(tsd_tsdn(tsd), arena_ind, true);
@@ -2137,8 +2613,7 @@ arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen,
arena_t **arena) {
int ret;
- READONLY();
- WRITEONLY();
+ NEITHER_READ_NOR_WRITE();
MIB_UNSIGNED(*arena_ind, 1);
*arena = arena_get(tsd_tsdn(tsd), *arena_ind, false);
@@ -2211,6 +2686,8 @@ arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
arena_t *arena;
ctl_arena_t *ctl_darena, *ctl_arena;
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+
ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
newp, newlen, &arena_ind, &arena);
if (ret != 0) {
@@ -2241,6 +2718,8 @@ arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
assert(ret == 0);
label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+
return ret;
}
@@ -2306,6 +2785,38 @@ label_return:
}
static int
+arena_i_oversize_threshold_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+
+ unsigned arena_ind;
+ MIB_UNSIGNED(arena_ind, 1);
+
+ arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
+ if (arena == NULL) {
+ ret = EFAULT;
+ goto label_return;
+ }
+
+ if (oldp != NULL && oldlenp != NULL) {
+ size_t oldval = atomic_load_zu(
+ &arena->pa_shard.pac.oversize_threshold, ATOMIC_RELAXED);
+ READ(oldval, size_t);
+ }
+ if (newp != NULL) {
+ if (newlen != sizeof(size_t)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ atomic_store_zu(&arena->pa_shard.pac.oversize_threshold,
+ *(size_t *)newp, ATOMIC_RELAXED);
+ }
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static int
arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) {
int ret;
@@ -2318,10 +2829,10 @@ arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
ret = EFAULT;
goto label_return;
}
+ extent_state_t state = dirty ? extent_state_dirty : extent_state_muzzy;
if (oldp != NULL && oldlenp != NULL) {
- size_t oldval = dirty ? arena_dirty_decay_ms_get(arena) :
- arena_muzzy_decay_ms_get(arena);
+ size_t oldval = arena_decay_ms_get(arena, state);
READ(oldval, ssize_t);
}
if (newp != NULL) {
@@ -2340,9 +2851,9 @@ arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
goto label_return;
}
}
- if (dirty ? arena_dirty_decay_ms_set(tsd_tsdn(tsd), arena,
- *(ssize_t *)newp) : arena_muzzy_decay_ms_set(tsd_tsdn(tsd),
- arena, *(ssize_t *)newp)) {
+
+ if (arena_decay_ms_set(tsd_tsdn(tsd), arena, state,
+ *(ssize_t *)newp)) {
ret = EFAULT;
goto label_return;
}
@@ -2385,15 +2896,18 @@ arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
goto label_return;
}
old_extent_hooks =
- (extent_hooks_t *)&extent_hooks_default;
+ (extent_hooks_t *)&ehooks_default_extent_hooks;
READ(old_extent_hooks, extent_hooks_t *);
if (newp != NULL) {
/* Initialize a new arena as a side effect. */
extent_hooks_t *new_extent_hooks
JEMALLOC_CC_SILENCE_INIT(NULL);
WRITE(new_extent_hooks, extent_hooks_t *);
+ arena_config_t config = arena_config_default;
+ config.extent_hooks = new_extent_hooks;
+
arena = arena_init(tsd_tsdn(tsd), arena_ind,
- new_extent_hooks);
+ &config);
if (arena == NULL) {
ret = EFAULT;
goto label_return;
@@ -2404,11 +2918,13 @@ arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
extent_hooks_t *new_extent_hooks
JEMALLOC_CC_SILENCE_INIT(NULL);
WRITE(new_extent_hooks, extent_hooks_t *);
- old_extent_hooks = extent_hooks_set(tsd, arena,
- new_extent_hooks);
+ old_extent_hooks = arena_set_extent_hooks(tsd,
+ arena, new_extent_hooks);
READ(old_extent_hooks, extent_hooks_t *);
} else {
- old_extent_hooks = extent_hooks_get(arena);
+ old_extent_hooks =
+ ehooks_get_extent_hooks_ptr(
+ arena_get_ehooks(arena));
READ(old_extent_hooks, extent_hooks_t *);
}
}
@@ -2493,10 +3009,6 @@ arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
READONLY();
- if (*oldlenp != sizeof(unsigned)) {
- ret = EINVAL;
- goto label_return;
- }
narenas = ctl_arenas->narenas;
READ(narenas, unsigned);
@@ -2582,14 +3094,14 @@ static int
arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
- extent_hooks_t *extent_hooks;
unsigned arena_ind;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
- extent_hooks = (extent_hooks_t *)&extent_hooks_default;
- WRITE(extent_hooks, extent_hooks_t *);
- if ((arena_ind = ctl_arena_init(tsd, extent_hooks)) == UINT_MAX) {
+ VERIFY_READ(unsigned);
+ arena_config_t config = arena_config_default;
+ WRITE(config.extent_hooks, extent_hooks_t *);
+ if ((arena_ind = ctl_arena_init(tsd, &config)) == UINT_MAX) {
ret = EAGAIN;
goto label_return;
}
@@ -2602,26 +3114,52 @@ label_return:
}
static int
+experimental_arenas_create_ext_ctl(tsd_t *tsd,
+ const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ unsigned arena_ind;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+
+ arena_config_t config = arena_config_default;
+ VERIFY_READ(unsigned);
+ WRITE(config, arena_config_t);
+
+ if ((arena_ind = ctl_arena_init(tsd, &config)) == UINT_MAX) {
+ ret = EAGAIN;
+ goto label_return;
+ }
+ READ(arena_ind, unsigned);
+ ret = 0;
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return ret;
+}
+
+static int
arenas_lookup_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp,
size_t newlen) {
int ret;
unsigned arena_ind;
void *ptr;
- extent_t *extent;
+ edata_t *edata;
arena_t *arena;
ptr = NULL;
ret = EINVAL;
malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
WRITE(ptr, void *);
- extent = iealloc(tsd_tsdn(tsd), ptr);
- if (extent == NULL)
+ edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr);
+ if (edata == NULL) {
goto label_return;
+ }
- arena = extent_arena_get(extent);
- if (arena == NULL)
+ arena = arena_get_from_edata(edata);
+ if (arena == NULL) {
goto label_return;
+ }
arena_ind = arena_ind_get(arena);
READ(arena_ind, unsigned);
@@ -2646,6 +3184,10 @@ prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib,
}
if (newp != NULL) {
+ if (!opt_prof) {
+ ret = ENOENT;
+ goto label_return;
+ }
if (newlen != sizeof(bool)) {
ret = EINVAL;
goto label_return;
@@ -2653,7 +3195,8 @@ prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib,
oldval = prof_thread_active_init_set(tsd_tsdn(tsd),
*(bool *)newp);
} else {
- oldval = prof_thread_active_init_get(tsd_tsdn(tsd));
+ oldval = opt_prof ? prof_thread_active_init_get(tsd_tsdn(tsd)) :
+ false;
}
READ(oldval, bool);
@@ -2669,7 +3212,8 @@ prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
bool oldval;
if (!config_prof) {
- return ENOENT;
+ ret = ENOENT;
+ goto label_return;
}
if (newp != NULL) {
@@ -2677,9 +3221,20 @@ prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
ret = EINVAL;
goto label_return;
}
- oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp);
+ bool val = *(bool *)newp;
+ if (!opt_prof) {
+ if (val) {
+ ret = ENOENT;
+ goto label_return;
+ } else {
+ /* No change needed (already off). */
+ oldval = false;
+ }
+ } else {
+ oldval = prof_active_set(tsd_tsdn(tsd), val);
+ }
} else {
- oldval = prof_active_get(tsd_tsdn(tsd));
+ oldval = opt_prof ? prof_active_get(tsd_tsdn(tsd)) : false;
}
READ(oldval, bool);
@@ -2694,7 +3249,7 @@ prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
int ret;
const char *filename = NULL;
- if (!config_prof) {
+ if (!config_prof || !opt_prof) {
return ENOENT;
}
@@ -2722,13 +3277,17 @@ prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
}
if (newp != NULL) {
+ if (!opt_prof) {
+ ret = ENOENT;
+ goto label_return;
+ }
if (newlen != sizeof(bool)) {
ret = EINVAL;
goto label_return;
}
oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp);
} else {
- oldval = prof_gdump_get(tsd_tsdn(tsd));
+ oldval = opt_prof ? prof_gdump_get(tsd_tsdn(tsd)) : false;
}
READ(oldval, bool);
@@ -2738,12 +3297,32 @@ label_return:
}
static int
+prof_prefix_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ const char *prefix = NULL;
+
+ if (!config_prof || !opt_prof) {
+ return ENOENT;
+ }
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
+ WRITEONLY();
+ WRITE(prefix, const char *);
+
+ ret = prof_prefix_set(tsd_tsdn(tsd), prefix) ? EFAULT : 0;
+label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
+ return ret;
+}
+
+static int
prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
size_t lg_sample = lg_prof_sample;
- if (!config_prof) {
+ if (!config_prof || !opt_prof) {
return ENOENT;
}
@@ -2770,7 +3349,7 @@ prof_log_start_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
const char *filename = NULL;
- if (!config_prof) {
+ if (!config_prof || !opt_prof) {
return ENOENT;
}
@@ -2790,7 +3369,7 @@ label_return:
static int
prof_log_stop_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
size_t *oldlenp, void *newp, size_t newlen) {
- if (!config_prof) {
+ if (!config_prof || !opt_prof) {
return ENOENT;
}
@@ -2801,6 +3380,87 @@ prof_log_stop_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
return 0;
}
+static int
+experimental_hooks_prof_backtrace_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+
+ if (oldp == NULL && newp == NULL) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ if (oldp != NULL) {
+ prof_backtrace_hook_t old_hook =
+ prof_backtrace_hook_get();
+ READ(old_hook, prof_backtrace_hook_t);
+ }
+ if (newp != NULL) {
+ if (!opt_prof) {
+ ret = ENOENT;
+ goto label_return;
+ }
+ prof_backtrace_hook_t new_hook JEMALLOC_CC_SILENCE_INIT(NULL);
+ WRITE(new_hook, prof_backtrace_hook_t);
+ if (new_hook == NULL) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ prof_backtrace_hook_set(new_hook);
+ }
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static int
+experimental_hooks_prof_dump_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+
+ if (oldp == NULL && newp == NULL) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ if (oldp != NULL) {
+ prof_dump_hook_t old_hook =
+ prof_dump_hook_get();
+ READ(old_hook, prof_dump_hook_t);
+ }
+ if (newp != NULL) {
+ if (!opt_prof) {
+ ret = ENOENT;
+ goto label_return;
+ }
+ prof_dump_hook_t new_hook JEMALLOC_CC_SILENCE_INIT(NULL);
+ WRITE(new_hook, prof_dump_hook_t);
+ prof_dump_hook_set(new_hook);
+ }
+ ret = 0;
+label_return:
+ return ret;
+}
+
+/* For integration test purpose only. No plan to move out of experimental. */
+static int
+experimental_hooks_safety_check_abort_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+
+ WRITEONLY();
+ if (newp != NULL) {
+ if (newlen != sizeof(safety_check_abort_hook_t)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ safety_check_abort_hook_t hook JEMALLOC_CC_SILENCE_INIT(NULL);
+ WRITE(hook, safety_check_abort_hook_t);
+ safety_check_set_abort(hook);
+ }
+ ret = 0;
+label_return:
+ return ret;
+}
+
/******************************************************************************/
CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t)
@@ -2818,6 +3478,9 @@ CTL_RO_CGEN(config_stats, stats_background_thread_num_runs,
CTL_RO_CGEN(config_stats, stats_background_thread_run_interval,
nstime_ns(&ctl_stats->background_thread.run_interval), uint64_t)
+CTL_RO_CGEN(config_stats, stats_zero_reallocs,
+ atomic_load_zu(&zero_realloc_count, ATOMIC_RELAXED), size_t)
+
CTL_RO_GEN(stats_arenas_i_dss, arenas_i(mib[2])->dss, const char *)
CTL_RO_GEN(stats_arenas_i_dirty_decay_ms, arenas_i(mib[2])->dirty_decay_ms,
ssize_t)
@@ -2830,55 +3493,61 @@ CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t)
CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t)
CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.mapped, ATOMIC_RELAXED),
- size_t)
+ arenas_i(mib[2])->astats->astats.mapped, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.retained, ATOMIC_RELAXED),
- size_t)
+ arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.retained, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_extent_avail,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.extent_avail,
- ATOMIC_RELAXED),
- size_t)
+ arenas_i(mib[2])->astats->astats.pa_shard_stats.edata_avail, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.decay_dirty.npurge), uint64_t)
+ locked_read_u64_unsynchronized(
+ &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.npurge),
+ uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.decay_dirty.nmadvise), uint64_t)
+ locked_read_u64_unsynchronized(
+ &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.nmadvise),
+ uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.decay_dirty.purged), uint64_t)
+ locked_read_u64_unsynchronized(
+ &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_dirty.purged),
+ uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.decay_muzzy.npurge), uint64_t)
+ locked_read_u64_unsynchronized(
+ &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.npurge),
+ uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.decay_muzzy.nmadvise), uint64_t)
+ locked_read_u64_unsynchronized(
+ &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.nmadvise),
+ uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.decay_muzzy.purged), uint64_t)
+ locked_read_u64_unsynchronized(
+ &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.decay_muzzy.purged),
+ uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_base,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.base, ATOMIC_RELAXED),
+ arenas_i(mib[2])->astats->astats.base,
size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_internal,
atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED),
size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_thp,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.metadata_thp,
- ATOMIC_RELAXED), size_t)
+ arenas_i(mib[2])->astats->astats.metadata_thp, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.tcache_bytes,
- ATOMIC_RELAXED), size_t)
+ arenas_i(mib[2])->astats->astats.tcache_bytes, size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_stashed_bytes,
+ arenas_i(mib[2])->astats->astats.tcache_stashed_bytes, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_resident,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.resident, ATOMIC_RELAXED),
+ arenas_i(mib[2])->astats->astats.resident,
size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_abandoned_vm,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.abandoned_vm,
+ atomic_load_zu(
+ &arenas_i(mib[2])->astats->astats.pa_shard_stats.pac_stats.abandoned_vm,
ATOMIC_RELAXED), size_t)
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_sec_bytes,
+ arenas_i(mib[2])->astats->secstats.bytes, size_t)
+
CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
arenas_i(mib[2])->astats->allocated_small, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
@@ -2892,27 +3561,21 @@ CTL_RO_CGEN(config_stats, stats_arenas_i_small_nfills,
CTL_RO_CGEN(config_stats, stats_arenas_i_small_nflushes,
arenas_i(mib[2])->astats->nflushes_small, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
- atomic_load_zu(&arenas_i(mib[2])->astats->astats.allocated_large,
- ATOMIC_RELAXED), size_t)
+ arenas_i(mib[2])->astats->astats.allocated_large, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t)
+ arenas_i(mib[2])->astats->astats.nmalloc_large, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.ndalloc_large), uint64_t)
+ arenas_i(mib[2])->astats->astats.ndalloc_large, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.nrequests_large), uint64_t)
+ arenas_i(mib[2])->astats->astats.nrequests_large, uint64_t)
/*
* Note: "nmalloc_large" here instead of "nfills" in the read. This is
* intentional (large has no batch fill).
*/
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nfills,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t)
+ arenas_i(mib[2])->astats->astats.nmalloc_large, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_large_nflushes,
- ctl_arena_stats_read_u64(
- &arenas_i(mib[2])->astats->astats.nflushes_large), uint64_t)
+ arenas_i(mib[2])->astats->astats.nflushes_large, uint64_t)
/* Lock profiling related APIs below. */
#define RO_MUTEX_CTL_GEN(n, l) \
@@ -2972,9 +3635,13 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
}
if (config_prof && opt_prof) {
MUTEX_PROF_RESET(bt2gctx_mtx);
+ MUTEX_PROF_RESET(tdatas_mtx);
+ MUTEX_PROF_RESET(prof_dump_mtx);
+ MUTEX_PROF_RESET(prof_recent_alloc_mtx);
+ MUTEX_PROF_RESET(prof_recent_dump_mtx);
+ MUTEX_PROF_RESET(prof_stats_mtx);
}
-
/* Per arena mutexes. */
unsigned n = narenas_total_get();
@@ -2984,18 +3651,18 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
continue;
}
MUTEX_PROF_RESET(arena->large_mtx);
- MUTEX_PROF_RESET(arena->extent_avail_mtx);
- MUTEX_PROF_RESET(arena->extents_dirty.mtx);
- MUTEX_PROF_RESET(arena->extents_muzzy.mtx);
- MUTEX_PROF_RESET(arena->extents_retained.mtx);
- MUTEX_PROF_RESET(arena->decay_dirty.mtx);
- MUTEX_PROF_RESET(arena->decay_muzzy.mtx);
+ MUTEX_PROF_RESET(arena->pa_shard.edata_cache.mtx);
+ MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_dirty.mtx);
+ MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_muzzy.mtx);
+ MUTEX_PROF_RESET(arena->pa_shard.pac.ecache_retained.mtx);
+ MUTEX_PROF_RESET(arena->pa_shard.pac.decay_dirty.mtx);
+ MUTEX_PROF_RESET(arena->pa_shard.pac.decay_muzzy.mtx);
MUTEX_PROF_RESET(arena->tcache_ql_mtx);
MUTEX_PROF_RESET(arena->base->mtx);
- for (szind_t i = 0; i < SC_NBINS; i++) {
- for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
- bin_t *bin = &arena->bins[i].bin_shards[j];
+ for (szind_t j = 0; j < SC_NBINS; j++) {
+ for (unsigned k = 0; k < bin_infos[j].n_shards; k++) {
+ bin_t *bin = arena_get_bin(arena, j, k);
MUTEX_PROF_RESET(bin->lock);
}
}
@@ -3005,25 +3672,25 @@ stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
}
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
- arenas_i(mib[2])->astats->bstats[mib[4]].nmalloc, uint64_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nmalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
- arenas_i(mib[2])->astats->bstats[mib[4]].ndalloc, uint64_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.ndalloc, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
- arenas_i(mib[2])->astats->bstats[mib[4]].nrequests, uint64_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nrequests, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs,
- arenas_i(mib[2])->astats->bstats[mib[4]].curregs, size_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.curregs, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nfills,
- arenas_i(mib[2])->astats->bstats[mib[4]].nfills, uint64_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nfills, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nflushes,
- arenas_i(mib[2])->astats->bstats[mib[4]].nflushes, uint64_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nflushes, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs,
- arenas_i(mib[2])->astats->bstats[mib[4]].nslabs, uint64_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nslabs, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs,
- arenas_i(mib[2])->astats->bstats[mib[4]].reslabs, uint64_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.reslabs, uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs,
- arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.curslabs, size_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nonfull_slabs,
- arenas_i(mib[2])->astats->bstats[mib[4]].nonfull_slabs, size_t)
+ arenas_i(mib[2])->astats->bstats[mib[4]].stats_data.nonfull_slabs, size_t)
static const ctl_named_node_t *
stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib,
@@ -3035,13 +3702,13 @@ stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib,
}
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc,
- ctl_arena_stats_read_u64(
+ locked_read_u64_unsynchronized(
&arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc,
- ctl_arena_stats_read_u64(
+ locked_read_u64_unsynchronized(
&arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests,
- ctl_arena_stats_read_u64(
+ locked_read_u64_unsynchronized(
&arenas_i(mib[2])->astats->lstats[mib[4]].nrequests), uint64_t)
CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents,
arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t)
@@ -3056,29 +3723,17 @@ stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib,
}
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_ndirty,
- atomic_load_zu(
- &arenas_i(mib[2])->astats->estats[mib[4]].ndirty,
- ATOMIC_RELAXED), size_t);
+ arenas_i(mib[2])->astats->estats[mib[4]].ndirty, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nmuzzy,
- atomic_load_zu(
- &arenas_i(mib[2])->astats->estats[mib[4]].nmuzzy,
- ATOMIC_RELAXED), size_t);
+ arenas_i(mib[2])->astats->estats[mib[4]].nmuzzy, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nretained,
- atomic_load_zu(
- &arenas_i(mib[2])->astats->estats[mib[4]].nretained,
- ATOMIC_RELAXED), size_t);
+ arenas_i(mib[2])->astats->estats[mib[4]].nretained, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_dirty_bytes,
- atomic_load_zu(
- &arenas_i(mib[2])->astats->estats[mib[4]].dirty_bytes,
- ATOMIC_RELAXED), size_t);
+ arenas_i(mib[2])->astats->estats[mib[4]].dirty_bytes, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_muzzy_bytes,
- atomic_load_zu(
- &arenas_i(mib[2])->astats->estats[mib[4]].muzzy_bytes,
- ATOMIC_RELAXED), size_t);
+ arenas_i(mib[2])->astats->estats[mib[4]].muzzy_bytes, size_t);
CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_retained_bytes,
- atomic_load_zu(
- &arenas_i(mib[2])->astats->estats[mib[4]].retained_bytes,
- ATOMIC_RELAXED), size_t);
+ arenas_i(mib[2])->astats->estats[mib[4]].retained_bytes, size_t);
static const ctl_named_node_t *
stats_arenas_i_extents_j_index(tsdn_t *tsdn, const size_t *mib,
@@ -3089,6 +3744,82 @@ stats_arenas_i_extents_j_index(tsdn_t *tsdn, const size_t *mib,
return super_stats_arenas_i_extents_j_node;
}
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_npurge_passes,
+ arenas_i(mib[2])->astats->hpastats.nonderived_stats.npurge_passes, uint64_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_npurges,
+ arenas_i(mib[2])->astats->hpastats.nonderived_stats.npurges, uint64_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nhugifies,
+ arenas_i(mib[2])->astats->hpastats.nonderived_stats.nhugifies, uint64_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_ndehugifies,
+ arenas_i(mib[2])->astats->hpastats.nonderived_stats.ndehugifies, uint64_t);
+
+/* Full, nonhuge */
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_npageslabs_nonhuge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[0].npageslabs,
+ size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_nactive_nonhuge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[0].nactive, size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_ndirty_nonhuge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[0].ndirty, size_t);
+
+/* Full, huge */
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_npageslabs_huge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[1].npageslabs,
+ size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_nactive_huge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[1].nactive, size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_full_slabs_ndirty_huge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.full_slabs[1].ndirty, size_t);
+
+/* Empty, nonhuge */
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_npageslabs_nonhuge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[0].npageslabs,
+ size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_nactive_nonhuge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[0].nactive, size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_ndirty_nonhuge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[0].ndirty, size_t);
+
+/* Empty, huge */
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_npageslabs_huge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[1].npageslabs,
+ size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_nactive_huge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[1].nactive, size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_empty_slabs_ndirty_huge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.empty_slabs[1].ndirty, size_t);
+
+/* Nonfull, nonhuge */
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_nonhuge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][0].npageslabs,
+ size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_nonhuge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][0].nactive,
+ size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_nonhuge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][0].ndirty,
+ size_t);
+
+/* Nonfull, huge */
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_npageslabs_huge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][1].npageslabs,
+ size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_nactive_huge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][1].nactive,
+ size_t);
+CTL_RO_CGEN(config_stats, stats_arenas_i_hpa_shard_nonfull_slabs_j_ndirty_huge,
+ arenas_i(mib[2])->astats->hpastats.psset_stats.nonfull_slabs[mib[5]][1].ndirty,
+ size_t);
+
+static const ctl_named_node_t *
+stats_arenas_i_hpa_shard_nonfull_slabs_j_index(tsdn_t *tsdn, const size_t *mib,
+ size_t miblen, size_t j) {
+ if (j >= PSSET_NPSIZES) {
+ return NULL;
+ }
+ return super_stats_arenas_i_hpa_shard_nonfull_slabs_j_node;
+}
+
static bool
ctl_arenas_i_verify(size_t i) {
size_t a = arenas_i2a_impl(i, true, true);
@@ -3161,6 +3892,32 @@ label_return:
return ret;
}
+static int
+experimental_thread_activity_callback_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+
+ if (!config_stats) {
+ return ENOENT;
+ }
+
+ activity_callback_thunk_t t_old = tsd_activity_callback_thunk_get(tsd);
+ READ(t_old, activity_callback_thunk_t);
+
+ if (newp != NULL) {
+ /*
+ * This initialization is unnecessary. If it's omitted, though,
+ * clang gets confused and warns on the subsequent use of t_new.
+ */
+ activity_callback_thunk_t t_new = {NULL, NULL};
+ WRITE(t_new, activity_callback_thunk_t);
+ tsd_activity_callback_thunk_set(tsd, t_new);
+ }
+ ret = 0;
+label_return:
+ return ret;
+}
+
/*
* Output six memory utilization entries for an input pointer, the first one of
* type (void *) and the remaining five of type size_t, describing the following
@@ -3178,7 +3935,8 @@ label_return:
* otherwise their values are undefined.
*
* This API is mainly intended for small class allocations, where extents are
- * used as slab.
+ * used as slab. Note that if the bin the extent belongs to is completely
+ * full, "(a)" will be NULL.
*
* In case of large class allocations, "(a)" will be NULL, and "(e)" and "(f)"
* will be zero (if stats are enabled; otherwise undefined). The other three
@@ -3232,11 +3990,11 @@ experimental_utilization_query_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
- assert(sizeof(extent_util_stats_verbose_t)
+ assert(sizeof(inspect_extent_util_stats_verbose_t)
== sizeof(void *) + sizeof(size_t) * 5);
if (oldp == NULL || oldlenp == NULL
- || *oldlenp != sizeof(extent_util_stats_verbose_t)
+ || *oldlenp != sizeof(inspect_extent_util_stats_verbose_t)
|| newp == NULL) {
ret = EINVAL;
goto label_return;
@@ -3244,9 +4002,9 @@ experimental_utilization_query_ctl(tsd_t *tsd, const size_t *mib,
void *ptr = NULL;
WRITE(ptr, void *);
- extent_util_stats_verbose_t *util_stats
- = (extent_util_stats_verbose_t *)oldp;
- extent_util_stats_verbose_get(tsd_tsdn(tsd), ptr,
+ inspect_extent_util_stats_verbose_t *util_stats
+ = (inspect_extent_util_stats_verbose_t *)oldp;
+ inspect_extent_util_stats_verbose_get(tsd_tsdn(tsd), ptr,
&util_stats->nfree, &util_stats->nregs, &util_stats->size,
&util_stats->bin_nfree, &util_stats->bin_nregs,
&util_stats->slabcur_addr);
@@ -3357,21 +4115,22 @@ experimental_utilization_batch_query_ctl(tsd_t *tsd, const size_t *mib,
size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
int ret;
- assert(sizeof(extent_util_stats_t) == sizeof(size_t) * 3);
+ assert(sizeof(inspect_extent_util_stats_t) == sizeof(size_t) * 3);
const size_t len = newlen / sizeof(const void *);
if (oldp == NULL || oldlenp == NULL || newp == NULL || newlen == 0
|| newlen != len * sizeof(const void *)
- || *oldlenp != len * sizeof(extent_util_stats_t)) {
+ || *oldlenp != len * sizeof(inspect_extent_util_stats_t)) {
ret = EINVAL;
goto label_return;
}
void **ptrs = (void **)newp;
- extent_util_stats_t *util_stats = (extent_util_stats_t *)oldp;
+ inspect_extent_util_stats_t *util_stats =
+ (inspect_extent_util_stats_t *)oldp;
size_t i;
for (i = 0; i < len; ++i) {
- extent_util_stats_get(tsd_tsdn(tsd), ptrs[i],
+ inspect_extent_util_stats_get(tsd_tsdn(tsd), ptrs[i],
&util_stats[i].nfree, &util_stats[i].nregs,
&util_stats[i].size);
}
@@ -3420,7 +4179,7 @@ experimental_arenas_i_pactivep_ctl(tsd_t *tsd, const size_t *mib,
#if defined(JEMALLOC_GCC_ATOMIC_ATOMICS) || \
defined(JEMALLOC_GCC_SYNC_ATOMICS) || defined(_MSC_VER)
/* Expose the underlying counter for fast read. */
- pactivep = (size_t *)&(arena->nactive.repr);
+ pactivep = (size_t *)&(arena->pa_shard.nactive.repr);
READ(pactivep, size_t *);
ret = 0;
#else
@@ -3433,3 +4192,223 @@ label_return:
malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
return ret;
}
+
+static int
+experimental_prof_recent_alloc_max_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+
+ if (!(config_prof && opt_prof)) {
+ ret = ENOENT;
+ goto label_return;
+ }
+
+ ssize_t old_max;
+ if (newp != NULL) {
+ if (newlen != sizeof(ssize_t)) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ ssize_t max = *(ssize_t *)newp;
+ if (max < -1) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ old_max = prof_recent_alloc_max_ctl_write(tsd, max);
+ } else {
+ old_max = prof_recent_alloc_max_ctl_read();
+ }
+ READ(old_max, ssize_t);
+
+ ret = 0;
+
+label_return:
+ return ret;
+}
+
+typedef struct write_cb_packet_s write_cb_packet_t;
+struct write_cb_packet_s {
+ write_cb_t *write_cb;
+ void *cbopaque;
+};
+
+static int
+experimental_prof_recent_alloc_dump_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+
+ if (!(config_prof && opt_prof)) {
+ ret = ENOENT;
+ goto label_return;
+ }
+
+ assert(sizeof(write_cb_packet_t) == sizeof(void *) * 2);
+
+ WRITEONLY();
+ write_cb_packet_t write_cb_packet;
+ ASSURED_WRITE(write_cb_packet, write_cb_packet_t);
+
+ prof_recent_alloc_dump(tsd, write_cb_packet.write_cb,
+ write_cb_packet.cbopaque);
+
+ ret = 0;
+
+label_return:
+ return ret;
+}
+
+typedef struct batch_alloc_packet_s batch_alloc_packet_t;
+struct batch_alloc_packet_s {
+ void **ptrs;
+ size_t num;
+ size_t size;
+ int flags;
+};
+
+static int
+experimental_batch_alloc_ctl(tsd_t *tsd, const size_t *mib,
+ size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+
+ VERIFY_READ(size_t);
+
+ batch_alloc_packet_t batch_alloc_packet;
+ ASSURED_WRITE(batch_alloc_packet, batch_alloc_packet_t);
+ size_t filled = batch_alloc(batch_alloc_packet.ptrs,
+ batch_alloc_packet.num, batch_alloc_packet.size,
+ batch_alloc_packet.flags);
+ READ(filled, size_t);
+
+ ret = 0;
+
+label_return:
+ return ret;
+}
+
+static int
+prof_stats_bins_i_live_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ unsigned binind;
+ prof_stats_t stats;
+
+ if (!(config_prof && opt_prof && opt_prof_stats)) {
+ ret = ENOENT;
+ goto label_return;
+ }
+
+ READONLY();
+ MIB_UNSIGNED(binind, 3);
+ if (binind >= SC_NBINS) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ prof_stats_get_live(tsd, (szind_t)binind, &stats);
+ READ(stats, prof_stats_t);
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static int
+prof_stats_bins_i_accum_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ unsigned binind;
+ prof_stats_t stats;
+
+ if (!(config_prof && opt_prof && opt_prof_stats)) {
+ ret = ENOENT;
+ goto label_return;
+ }
+
+ READONLY();
+ MIB_UNSIGNED(binind, 3);
+ if (binind >= SC_NBINS) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ prof_stats_get_accum(tsd, (szind_t)binind, &stats);
+ READ(stats, prof_stats_t);
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static const ctl_named_node_t *
+prof_stats_bins_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
+ size_t i) {
+ if (!(config_prof && opt_prof && opt_prof_stats)) {
+ return NULL;
+ }
+ if (i >= SC_NBINS) {
+ return NULL;
+ }
+ return super_prof_stats_bins_i_node;
+}
+
+static int
+prof_stats_lextents_i_live_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ unsigned lextent_ind;
+ prof_stats_t stats;
+
+ if (!(config_prof && opt_prof && opt_prof_stats)) {
+ ret = ENOENT;
+ goto label_return;
+ }
+
+ READONLY();
+ MIB_UNSIGNED(lextent_ind, 3);
+ if (lextent_ind >= SC_NSIZES - SC_NBINS) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ prof_stats_get_live(tsd, (szind_t)(lextent_ind + SC_NBINS), &stats);
+ READ(stats, prof_stats_t);
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static int
+prof_stats_lextents_i_accum_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
+ void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
+ int ret;
+ unsigned lextent_ind;
+ prof_stats_t stats;
+
+ if (!(config_prof && opt_prof && opt_prof_stats)) {
+ ret = ENOENT;
+ goto label_return;
+ }
+
+ READONLY();
+ MIB_UNSIGNED(lextent_ind, 3);
+ if (lextent_ind >= SC_NSIZES - SC_NBINS) {
+ ret = EINVAL;
+ goto label_return;
+ }
+ prof_stats_get_accum(tsd, (szind_t)(lextent_ind + SC_NBINS), &stats);
+ READ(stats, prof_stats_t);
+
+ ret = 0;
+label_return:
+ return ret;
+}
+
+static const ctl_named_node_t *
+prof_stats_lextents_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
+ size_t i) {
+ if (!(config_prof && opt_prof && opt_prof_stats)) {
+ return NULL;
+ }
+ if (i >= SC_NSIZES - SC_NBINS) {
+ return NULL;
+ }
+ return super_prof_stats_lextents_i_node;
+}
diff --git a/contrib/jemalloc/src/decay.c b/contrib/jemalloc/src/decay.c
new file mode 100644
index 000000000000..d801b2bc08ea
--- /dev/null
+++ b/contrib/jemalloc/src/decay.c
@@ -0,0 +1,295 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/decay.h"
+
+static const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = {
+#define STEP(step, h, x, y) \
+ h,
+ SMOOTHSTEP
+#undef STEP
+};
+
+/*
+ * Generate a new deadline that is uniformly random within the next epoch after
+ * the current one.
+ */
+void
+decay_deadline_init(decay_t *decay) {
+ nstime_copy(&decay->deadline, &decay->epoch);
+ nstime_add(&decay->deadline, &decay->interval);
+ if (decay_ms_read(decay) > 0) {
+ nstime_t jitter;
+
+ nstime_init(&jitter, prng_range_u64(&decay->jitter_state,
+ nstime_ns(&decay->interval)));
+ nstime_add(&decay->deadline, &jitter);
+ }
+}
+
+void
+decay_reinit(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms) {
+ atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED);
+ if (decay_ms > 0) {
+ nstime_init(&decay->interval, (uint64_t)decay_ms *
+ KQU(1000000));
+ nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS);
+ }
+
+ nstime_copy(&decay->epoch, cur_time);
+ decay->jitter_state = (uint64_t)(uintptr_t)decay;
+ decay_deadline_init(decay);
+ decay->nunpurged = 0;
+ memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
+}
+
+bool
+decay_init(decay_t *decay, nstime_t *cur_time, ssize_t decay_ms) {
+ if (config_debug) {
+ for (size_t i = 0; i < sizeof(decay_t); i++) {
+ assert(((char *)decay)[i] == 0);
+ }
+ decay->ceil_npages = 0;
+ }
+ if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ decay->purging = false;
+ decay_reinit(decay, cur_time, decay_ms);
+ return false;
+}
+
+bool
+decay_ms_valid(ssize_t decay_ms) {
+ if (decay_ms < -1) {
+ return false;
+ }
+ if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX *
+ KQU(1000)) {
+ return true;
+ }
+ return false;
+}
+
+static void
+decay_maybe_update_time(decay_t *decay, nstime_t *new_time) {
+ if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch,
+ new_time) > 0)) {
+ /*
+ * Time went backwards. Move the epoch back in time and
+ * generate a new deadline, with the expectation that time
+ * typically flows forward for long enough periods of time that
+ * epochs complete. Unfortunately, this strategy is susceptible
+ * to clock jitter triggering premature epoch advances, but
+ * clock jitter estimation and compensation isn't feasible here
+ * because calls into this code are event-driven.
+ */
+ nstime_copy(&decay->epoch, new_time);
+ decay_deadline_init(decay);
+ } else {
+ /* Verify that time does not go backwards. */
+ assert(nstime_compare(&decay->epoch, new_time) <= 0);
+ }
+}
+
+static size_t
+decay_backlog_npages_limit(const decay_t *decay) {
+ /*
+ * For each element of decay_backlog, multiply by the corresponding
+ * fixed-point smoothstep decay factor. Sum the products, then divide
+ * to round down to the nearest whole number of pages.
+ */
+ uint64_t sum = 0;
+ for (unsigned i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
+ sum += decay->backlog[i] * h_steps[i];
+ }
+ size_t npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
+
+ return npages_limit_backlog;
+}
+
+/*
+ * Update backlog, assuming that 'nadvance_u64' time intervals have passed.
+ * Trailing 'nadvance_u64' records should be erased and 'current_npages' is
+ * placed as the newest record.
+ */
+static void
+decay_backlog_update(decay_t *decay, uint64_t nadvance_u64,
+ size_t current_npages) {
+ if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
+ memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
+ sizeof(size_t));
+ } else {
+ size_t nadvance_z = (size_t)nadvance_u64;
+
+ assert((uint64_t)nadvance_z == nadvance_u64);
+
+ memmove(decay->backlog, &decay->backlog[nadvance_z],
+ (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
+ if (nadvance_z > 1) {
+ memset(&decay->backlog[SMOOTHSTEP_NSTEPS -
+ nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
+ }
+ }
+
+ size_t npages_delta = (current_npages > decay->nunpurged) ?
+ current_npages - decay->nunpurged : 0;
+ decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta;
+
+ if (config_debug) {
+ if (current_npages > decay->ceil_npages) {
+ decay->ceil_npages = current_npages;
+ }
+ size_t npages_limit = decay_backlog_npages_limit(decay);
+ assert(decay->ceil_npages >= npages_limit);
+ if (decay->ceil_npages > npages_limit) {
+ decay->ceil_npages = npages_limit;
+ }
+ }
+}
+
+static inline bool
+decay_deadline_reached(const decay_t *decay, const nstime_t *time) {
+ return (nstime_compare(&decay->deadline, time) <= 0);
+}
+
+uint64_t
+decay_npages_purge_in(decay_t *decay, nstime_t *time, size_t npages_new) {
+ uint64_t decay_interval_ns = decay_epoch_duration_ns(decay);
+ size_t n_epoch = (size_t)(nstime_ns(time) / decay_interval_ns);
+
+ uint64_t npages_purge;
+ if (n_epoch >= SMOOTHSTEP_NSTEPS) {
+ npages_purge = npages_new;
+ } else {
+ uint64_t h_steps_max = h_steps[SMOOTHSTEP_NSTEPS - 1];
+ assert(h_steps_max >=
+ h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
+ npages_purge = npages_new * (h_steps_max -
+ h_steps[SMOOTHSTEP_NSTEPS - 1 - n_epoch]);
+ npages_purge >>= SMOOTHSTEP_BFP;
+ }
+ return npages_purge;
+}
+
+bool
+decay_maybe_advance_epoch(decay_t *decay, nstime_t *new_time,
+ size_t npages_current) {
+ /* Handle possible non-monotonicity of time. */
+ decay_maybe_update_time(decay, new_time);
+
+ if (!decay_deadline_reached(decay, new_time)) {
+ return false;
+ }
+ nstime_t delta;
+ nstime_copy(&delta, new_time);
+ nstime_subtract(&delta, &decay->epoch);
+
+ uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval);
+ assert(nadvance_u64 > 0);
+
+ /* Add nadvance_u64 decay intervals to epoch. */
+ nstime_copy(&delta, &decay->interval);
+ nstime_imultiply(&delta, nadvance_u64);
+ nstime_add(&decay->epoch, &delta);
+
+ /* Set a new deadline. */
+ decay_deadline_init(decay);
+
+ /* Update the backlog. */
+ decay_backlog_update(decay, nadvance_u64, npages_current);
+
+ decay->npages_limit = decay_backlog_npages_limit(decay);
+ decay->nunpurged = (decay->npages_limit > npages_current) ?
+ decay->npages_limit : npages_current;
+
+ return true;
+}
+
+/*
+ * Calculate how many pages should be purged after 'interval'.
+ *
+ * First, calculate how many pages should remain at the moment, then subtract
+ * the number of pages that should remain after 'interval'. The difference is
+ * how many pages should be purged until then.
+ *
+ * The number of pages that should remain at a specific moment is calculated
+ * like this: pages(now) = sum(backlog[i] * h_steps[i]). After 'interval'
+ * passes, backlog would shift 'interval' positions to the left and sigmoid
+ * curve would be applied starting with backlog[interval].
+ *
+ * The implementation doesn't directly map to the description, but it's
+ * essentially the same calculation, optimized to avoid iterating over
+ * [interval..SMOOTHSTEP_NSTEPS) twice.
+ */
+static inline size_t
+decay_npurge_after_interval(decay_t *decay, size_t interval) {
+ size_t i;
+ uint64_t sum = 0;
+ for (i = 0; i < interval; i++) {
+ sum += decay->backlog[i] * h_steps[i];
+ }
+ for (; i < SMOOTHSTEP_NSTEPS; i++) {
+ sum += decay->backlog[i] *
+ (h_steps[i] - h_steps[i - interval]);
+ }
+
+ return (size_t)(sum >> SMOOTHSTEP_BFP);
+}
+
+uint64_t decay_ns_until_purge(decay_t *decay, size_t npages_current,
+ uint64_t npages_threshold) {
+ if (!decay_gradually(decay)) {
+ return DECAY_UNBOUNDED_TIME_TO_PURGE;
+ }
+ uint64_t decay_interval_ns = decay_epoch_duration_ns(decay);
+ assert(decay_interval_ns > 0);
+ if (npages_current == 0) {
+ unsigned i;
+ for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) {
+ if (decay->backlog[i] > 0) {
+ break;
+ }
+ }
+ if (i == SMOOTHSTEP_NSTEPS) {
+ /* No dirty pages recorded. Sleep indefinitely. */
+ return DECAY_UNBOUNDED_TIME_TO_PURGE;
+ }
+ }
+ if (npages_current <= npages_threshold) {
+ /* Use max interval. */
+ return decay_interval_ns * SMOOTHSTEP_NSTEPS;
+ }
+
+ /* Minimal 2 intervals to ensure reaching next epoch deadline. */
+ size_t lb = 2;
+ size_t ub = SMOOTHSTEP_NSTEPS;
+
+ size_t npurge_lb, npurge_ub;
+ npurge_lb = decay_npurge_after_interval(decay, lb);
+ if (npurge_lb > npages_threshold) {
+ return decay_interval_ns * lb;
+ }
+ npurge_ub = decay_npurge_after_interval(decay, ub);
+ if (npurge_ub < npages_threshold) {
+ return decay_interval_ns * ub;
+ }
+
+ unsigned n_search = 0;
+ size_t target, npurge;
+ while ((npurge_lb + npages_threshold < npurge_ub) && (lb + 2 < ub)) {
+ target = (lb + ub) / 2;
+ npurge = decay_npurge_after_interval(decay, target);
+ if (npurge > npages_threshold) {
+ ub = target;
+ npurge_ub = npurge;
+ } else {
+ lb = target;
+ npurge_lb = npurge;
+ }
+ assert(n_search < lg_floor(SMOOTHSTEP_NSTEPS) + 1);
+ ++n_search;
+ }
+ return decay_interval_ns * (ub + lb) / 2;
+}
diff --git a/contrib/jemalloc/src/ecache.c b/contrib/jemalloc/src/ecache.c
new file mode 100644
index 000000000000..a242227d32d6
--- /dev/null
+++ b/contrib/jemalloc/src/ecache.c
@@ -0,0 +1,35 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/san.h"
+
+bool
+ecache_init(tsdn_t *tsdn, ecache_t *ecache, extent_state_t state, unsigned ind,
+ bool delay_coalesce) {
+ if (malloc_mutex_init(&ecache->mtx, "extents", WITNESS_RANK_EXTENTS,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ ecache->state = state;
+ ecache->ind = ind;
+ ecache->delay_coalesce = delay_coalesce;
+ eset_init(&ecache->eset, state);
+ eset_init(&ecache->guarded_eset, state);
+
+ return false;
+}
+
+void
+ecache_prefork(tsdn_t *tsdn, ecache_t *ecache) {
+ malloc_mutex_prefork(tsdn, &ecache->mtx);
+}
+
+void
+ecache_postfork_parent(tsdn_t *tsdn, ecache_t *ecache) {
+ malloc_mutex_postfork_parent(tsdn, &ecache->mtx);
+}
+
+void
+ecache_postfork_child(tsdn_t *tsdn, ecache_t *ecache) {
+ malloc_mutex_postfork_child(tsdn, &ecache->mtx);
+}
diff --git a/contrib/jemalloc/src/edata.c b/contrib/jemalloc/src/edata.c
new file mode 100644
index 000000000000..82b6f5654b51
--- /dev/null
+++ b/contrib/jemalloc/src/edata.c
@@ -0,0 +1,6 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+ph_gen(, edata_avail, edata_t, avail_link,
+ edata_esnead_comp)
+ph_gen(, edata_heap, edata_t, heap_link, edata_snad_comp)
diff --git a/contrib/jemalloc/src/edata_cache.c b/contrib/jemalloc/src/edata_cache.c
new file mode 100644
index 000000000000..6bc1848cbcb8
--- /dev/null
+++ b/contrib/jemalloc/src/edata_cache.c
@@ -0,0 +1,154 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+bool
+edata_cache_init(edata_cache_t *edata_cache, base_t *base) {
+ edata_avail_new(&edata_cache->avail);
+ /*
+ * This is not strictly necessary, since the edata_cache_t is only
+ * created inside an arena, which is zeroed on creation. But this is
+ * handy as a safety measure.
+ */
+ atomic_store_zu(&edata_cache->count, 0, ATOMIC_RELAXED);
+ if (malloc_mutex_init(&edata_cache->mtx, "edata_cache",
+ WITNESS_RANK_EDATA_CACHE, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ edata_cache->base = base;
+ return false;
+}
+
+edata_t *
+edata_cache_get(tsdn_t *tsdn, edata_cache_t *edata_cache) {
+ malloc_mutex_lock(tsdn, &edata_cache->mtx);
+ edata_t *edata = edata_avail_first(&edata_cache->avail);
+ if (edata == NULL) {
+ malloc_mutex_unlock(tsdn, &edata_cache->mtx);
+ return base_alloc_edata(tsdn, edata_cache->base);
+ }
+ edata_avail_remove(&edata_cache->avail, edata);
+ atomic_load_sub_store_zu(&edata_cache->count, 1);
+ malloc_mutex_unlock(tsdn, &edata_cache->mtx);
+ return edata;
+}
+
+void
+edata_cache_put(tsdn_t *tsdn, edata_cache_t *edata_cache, edata_t *edata) {
+ malloc_mutex_lock(tsdn, &edata_cache->mtx);
+ edata_avail_insert(&edata_cache->avail, edata);
+ atomic_load_add_store_zu(&edata_cache->count, 1);
+ malloc_mutex_unlock(tsdn, &edata_cache->mtx);
+}
+
+void
+edata_cache_prefork(tsdn_t *tsdn, edata_cache_t *edata_cache) {
+ malloc_mutex_prefork(tsdn, &edata_cache->mtx);
+}
+
+void
+edata_cache_postfork_parent(tsdn_t *tsdn, edata_cache_t *edata_cache) {
+ malloc_mutex_postfork_parent(tsdn, &edata_cache->mtx);
+}
+
+void
+edata_cache_postfork_child(tsdn_t *tsdn, edata_cache_t *edata_cache) {
+ malloc_mutex_postfork_child(tsdn, &edata_cache->mtx);
+}
+
+void
+edata_cache_fast_init(edata_cache_fast_t *ecs, edata_cache_t *fallback) {
+ edata_list_inactive_init(&ecs->list);
+ ecs->fallback = fallback;
+ ecs->disabled = false;
+}
+
+static void
+edata_cache_fast_try_fill_from_fallback(tsdn_t *tsdn,
+ edata_cache_fast_t *ecs) {
+ edata_t *edata;
+ malloc_mutex_lock(tsdn, &ecs->fallback->mtx);
+ for (int i = 0; i < EDATA_CACHE_FAST_FILL; i++) {
+ edata = edata_avail_remove_first(&ecs->fallback->avail);
+ if (edata == NULL) {
+ break;
+ }
+ edata_list_inactive_append(&ecs->list, edata);
+ atomic_load_sub_store_zu(&ecs->fallback->count, 1);
+ }
+ malloc_mutex_unlock(tsdn, &ecs->fallback->mtx);
+}
+
+edata_t *
+edata_cache_fast_get(tsdn_t *tsdn, edata_cache_fast_t *ecs) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_EDATA_CACHE, 0);
+
+ if (ecs->disabled) {
+ assert(edata_list_inactive_first(&ecs->list) == NULL);
+ return edata_cache_get(tsdn, ecs->fallback);
+ }
+
+ edata_t *edata = edata_list_inactive_first(&ecs->list);
+ if (edata != NULL) {
+ edata_list_inactive_remove(&ecs->list, edata);
+ return edata;
+ }
+ /* Slow path; requires synchronization. */
+ edata_cache_fast_try_fill_from_fallback(tsdn, ecs);
+ edata = edata_list_inactive_first(&ecs->list);
+ if (edata != NULL) {
+ edata_list_inactive_remove(&ecs->list, edata);
+ } else {
+ /*
+ * Slowest path (fallback was also empty); allocate something
+ * new.
+ */
+ edata = base_alloc_edata(tsdn, ecs->fallback->base);
+ }
+ return edata;
+}
+
+static void
+edata_cache_fast_flush_all(tsdn_t *tsdn, edata_cache_fast_t *ecs) {
+ /*
+ * You could imagine smarter cache management policies (like
+ * only flushing down to some threshold in anticipation of
+ * future get requests). But just flushing everything provides
+ * a good opportunity to defrag too, and lets us share code between the
+ * flush and disable pathways.
+ */
+ edata_t *edata;
+ size_t nflushed = 0;
+ malloc_mutex_lock(tsdn, &ecs->fallback->mtx);
+ while ((edata = edata_list_inactive_first(&ecs->list)) != NULL) {
+ edata_list_inactive_remove(&ecs->list, edata);
+ edata_avail_insert(&ecs->fallback->avail, edata);
+ nflushed++;
+ }
+ atomic_load_add_store_zu(&ecs->fallback->count, nflushed);
+ malloc_mutex_unlock(tsdn, &ecs->fallback->mtx);
+}
+
+void
+edata_cache_fast_put(tsdn_t *tsdn, edata_cache_fast_t *ecs, edata_t *edata) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_EDATA_CACHE, 0);
+
+ if (ecs->disabled) {
+ assert(edata_list_inactive_first(&ecs->list) == NULL);
+ edata_cache_put(tsdn, ecs->fallback, edata);
+ return;
+ }
+
+ /*
+ * Prepend rather than append, to do LIFO ordering in the hopes of some
+ * cache locality.
+ */
+ edata_list_inactive_prepend(&ecs->list, edata);
+}
+
+void
+edata_cache_fast_disable(tsdn_t *tsdn, edata_cache_fast_t *ecs) {
+ edata_cache_fast_flush_all(tsdn, ecs);
+ ecs->disabled = true;
+}
diff --git a/contrib/jemalloc/src/ehooks.c b/contrib/jemalloc/src/ehooks.c
new file mode 100644
index 000000000000..383e9de6a6b9
--- /dev/null
+++ b/contrib/jemalloc/src/ehooks.c
@@ -0,0 +1,275 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/ehooks.h"
+#include "jemalloc/internal/extent_mmap.h"
+
+void
+ehooks_init(ehooks_t *ehooks, extent_hooks_t *extent_hooks, unsigned ind) {
+ /* All other hooks are optional; this one is not. */
+ assert(extent_hooks->alloc != NULL);
+ ehooks->ind = ind;
+ ehooks_set_extent_hooks_ptr(ehooks, extent_hooks);
+}
+
+/*
+ * If the caller specifies (!*zero), it is still possible to receive zeroed
+ * memory, in which case *zero is toggled to true. arena_extent_alloc() takes
+ * advantage of this to avoid demanding zeroed extents, but taking advantage of
+ * them if they are returned.
+ */
+static void *
+extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
+ void *ret;
+
+ assert(size != 0);
+ assert(alignment != 0);
+
+ /* "primary" dss. */
+ if (have_dss && dss_prec == dss_prec_primary && (ret =
+ extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
+ commit)) != NULL) {
+ return ret;
+ }
+ /* mmap. */
+ if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
+ != NULL) {
+ return ret;
+ }
+ /* "secondary" dss. */
+ if (have_dss && dss_prec == dss_prec_secondary && (ret =
+ extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
+ commit)) != NULL) {
+ return ret;
+ }
+
+ /* All strategies for allocation failed. */
+ return NULL;
+}
+
+void *
+ehooks_default_alloc_impl(tsdn_t *tsdn, void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
+ arena_t *arena = arena_get(tsdn, arena_ind, false);
+ /* NULL arena indicates arena_create. */
+ assert(arena != NULL || alignment == HUGEPAGE);
+ dss_prec_t dss = (arena == NULL) ? dss_prec_disabled :
+ (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_RELAXED);
+ void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment,
+ zero, commit, dss);
+ if (have_madvise_huge && ret) {
+ pages_set_thp_state(ret, size);
+ }
+ return ret;
+}
+
+static void *
+ehooks_default_alloc(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
+ size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
+ return ehooks_default_alloc_impl(tsdn_fetch(), new_addr, size,
+ ALIGNMENT_CEILING(alignment, PAGE), zero, commit, arena_ind);
+}
+
+bool
+ehooks_default_dalloc_impl(void *addr, size_t size) {
+ if (!have_dss || !extent_in_dss(addr)) {
+ return extent_dalloc_mmap(addr, size);
+ }
+ return true;
+}
+
+static bool
+ehooks_default_dalloc(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ bool committed, unsigned arena_ind) {
+ return ehooks_default_dalloc_impl(addr, size);
+}
+
+void
+ehooks_default_destroy_impl(void *addr, size_t size) {
+ if (!have_dss || !extent_in_dss(addr)) {
+ pages_unmap(addr, size);
+ }
+}
+
+static void
+ehooks_default_destroy(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ bool committed, unsigned arena_ind) {
+ ehooks_default_destroy_impl(addr, size);
+}
+
+bool
+ehooks_default_commit_impl(void *addr, size_t offset, size_t length) {
+ return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
+ length);
+}
+
+static bool
+ehooks_default_commit(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ size_t offset, size_t length, unsigned arena_ind) {
+ return ehooks_default_commit_impl(addr, offset, length);
+}
+
+bool
+ehooks_default_decommit_impl(void *addr, size_t offset, size_t length) {
+ return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
+ length);
+}
+
+static bool
+ehooks_default_decommit(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ size_t offset, size_t length, unsigned arena_ind) {
+ return ehooks_default_decommit_impl(addr, offset, length);
+}
+
+#ifdef PAGES_CAN_PURGE_LAZY
+bool
+ehooks_default_purge_lazy_impl(void *addr, size_t offset, size_t length) {
+ return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
+ length);
+}
+
+static bool
+ehooks_default_purge_lazy(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ size_t offset, size_t length, unsigned arena_ind) {
+ assert(addr != NULL);
+ assert((offset & PAGE_MASK) == 0);
+ assert(length != 0);
+ assert((length & PAGE_MASK) == 0);
+ return ehooks_default_purge_lazy_impl(addr, offset, length);
+}
+#endif
+
+#ifdef PAGES_CAN_PURGE_FORCED
+bool
+ehooks_default_purge_forced_impl(void *addr, size_t offset, size_t length) {
+ return pages_purge_forced((void *)((uintptr_t)addr +
+ (uintptr_t)offset), length);
+}
+
+static bool
+ehooks_default_purge_forced(extent_hooks_t *extent_hooks, void *addr,
+ size_t size, size_t offset, size_t length, unsigned arena_ind) {
+ assert(addr != NULL);
+ assert((offset & PAGE_MASK) == 0);
+ assert(length != 0);
+ assert((length & PAGE_MASK) == 0);
+ return ehooks_default_purge_forced_impl(addr, offset, length);
+}
+#endif
+
+bool
+ehooks_default_split_impl() {
+ if (!maps_coalesce) {
+ /*
+ * Without retain, only whole regions can be purged (required by
+ * MEM_RELEASE on Windows) -- therefore disallow splitting. See
+ * comments in extent_head_no_merge().
+ */
+ return !opt_retain;
+ }
+
+ return false;
+}
+
+static bool
+ehooks_default_split(extent_hooks_t *extent_hooks, void *addr, size_t size,
+ size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
+ return ehooks_default_split_impl();
+}
+
+bool
+ehooks_default_merge_impl(tsdn_t *tsdn, void *addr_a, void *addr_b) {
+ assert(addr_a < addr_b);
+ /*
+ * For non-DSS cases --
+ * a) W/o maps_coalesce, merge is not always allowed (Windows):
+ * 1) w/o retain, never merge (first branch below).
+ * 2) with retain, only merge extents from the same VirtualAlloc
+ * region (in which case MEM_DECOMMIT is utilized for purging).
+ *
+ * b) With maps_coalesce, it's always possible to merge.
+ * 1) w/o retain, always allow merge (only about dirty / muzzy).
+ * 2) with retain, to preserve the SN / first-fit, merge is still
+ * disallowed if b is a head extent, i.e. no merging across
+ * different mmap regions.
+ *
+ * a2) and b2) are implemented in emap_try_acquire_edata_neighbor, and
+ * sanity checked in the second branch below.
+ */
+ if (!maps_coalesce && !opt_retain) {
+ return true;
+ }
+ if (config_debug) {
+ edata_t *a = emap_edata_lookup(tsdn, &arena_emap_global,
+ addr_a);
+ bool head_a = edata_is_head_get(a);
+ edata_t *b = emap_edata_lookup(tsdn, &arena_emap_global,
+ addr_b);
+ bool head_b = edata_is_head_get(b);
+ emap_assert_mapped(tsdn, &arena_emap_global, a);
+ emap_assert_mapped(tsdn, &arena_emap_global, b);
+ assert(extent_neighbor_head_state_mergeable(head_a, head_b,
+ /* forward */ true));
+ }
+ if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
+ return true;
+ }
+
+ return false;
+}
+
+bool
+ehooks_default_merge(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
+ void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
+ tsdn_t *tsdn = tsdn_fetch();
+
+ return ehooks_default_merge_impl(tsdn, addr_a, addr_b);
+}
+
+void
+ehooks_default_zero_impl(void *addr, size_t size) {
+ /*
+ * By default, we try to zero out memory using OS-provided demand-zeroed
+ * pages. If the user has specifically requested hugepages, though, we
+ * don't want to purge in the middle of a hugepage (which would break it
+ * up), so we act conservatively and use memset.
+ */
+ bool needs_memset = true;
+ if (opt_thp != thp_mode_always) {
+ needs_memset = pages_purge_forced(addr, size);
+ }
+ if (needs_memset) {
+ memset(addr, 0, size);
+ }
+}
+
+void
+ehooks_default_guard_impl(void *guard1, void *guard2) {
+ pages_mark_guards(guard1, guard2);
+}
+
+void
+ehooks_default_unguard_impl(void *guard1, void *guard2) {
+ pages_unmark_guards(guard1, guard2);
+}
+
+const extent_hooks_t ehooks_default_extent_hooks = {
+ ehooks_default_alloc,
+ ehooks_default_dalloc,
+ ehooks_default_destroy,
+ ehooks_default_commit,
+ ehooks_default_decommit,
+#ifdef PAGES_CAN_PURGE_LAZY
+ ehooks_default_purge_lazy,
+#else
+ NULL,
+#endif
+#ifdef PAGES_CAN_PURGE_FORCED
+ ehooks_default_purge_forced,
+#else
+ NULL,
+#endif
+ ehooks_default_split,
+ ehooks_default_merge
+};
diff --git a/contrib/jemalloc/src/emap.c b/contrib/jemalloc/src/emap.c
new file mode 100644
index 000000000000..9cc95a724a9b
--- /dev/null
+++ b/contrib/jemalloc/src/emap.c
@@ -0,0 +1,386 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/emap.h"
+
+enum emap_lock_result_e {
+ emap_lock_result_success,
+ emap_lock_result_failure,
+ emap_lock_result_no_extent
+};
+typedef enum emap_lock_result_e emap_lock_result_t;
+
+bool
+emap_init(emap_t *emap, base_t *base, bool zeroed) {
+ return rtree_new(&emap->rtree, base, zeroed);
+}
+
+void
+emap_update_edata_state(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
+ extent_state_t state) {
+ witness_assert_positive_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE);
+
+ edata_state_set(edata, state);
+
+ EMAP_DECLARE_RTREE_CTX;
+ rtree_leaf_elm_t *elm1 = rtree_leaf_elm_lookup(tsdn, &emap->rtree,
+ rtree_ctx, (uintptr_t)edata_base_get(edata), /* dependent */ true,
+ /* init_missing */ false);
+ assert(elm1 != NULL);
+ rtree_leaf_elm_t *elm2 = edata_size_get(edata) == PAGE ? NULL :
+ rtree_leaf_elm_lookup(tsdn, &emap->rtree, rtree_ctx,
+ (uintptr_t)edata_last_get(edata), /* dependent */ true,
+ /* init_missing */ false);
+
+ rtree_leaf_elm_state_update(tsdn, &emap->rtree, elm1, elm2, state);
+
+ emap_assert_mapped(tsdn, emap, edata);
+}
+
+static inline edata_t *
+emap_try_acquire_edata_neighbor_impl(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
+ extent_pai_t pai, extent_state_t expected_state, bool forward,
+ bool expanding) {
+ witness_assert_positive_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE);
+ assert(!edata_guarded_get(edata));
+ assert(!expanding || forward);
+ assert(!edata_state_in_transition(expected_state));
+ assert(expected_state == extent_state_dirty ||
+ expected_state == extent_state_muzzy ||
+ expected_state == extent_state_retained);
+
+ void *neighbor_addr = forward ? edata_past_get(edata) :
+ edata_before_get(edata);
+ /*
+ * This is subtle; the rtree code asserts that its input pointer is
+ * non-NULL, and this is a useful thing to check. But it's possible
+ * that edata corresponds to an address of (void *)PAGE (in practice,
+ * this has only been observed on FreeBSD when address-space
+ * randomization is on, but it could in principle happen anywhere). In
+ * this case, edata_before_get(edata) is NULL, triggering the assert.
+ */
+ if (neighbor_addr == NULL) {
+ return NULL;
+ }
+
+ EMAP_DECLARE_RTREE_CTX;
+ rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &emap->rtree,
+ rtree_ctx, (uintptr_t)neighbor_addr, /* dependent*/ false,
+ /* init_missing */ false);
+ if (elm == NULL) {
+ return NULL;
+ }
+
+ rtree_contents_t neighbor_contents = rtree_leaf_elm_read(tsdn,
+ &emap->rtree, elm, /* dependent */ true);
+ if (!extent_can_acquire_neighbor(edata, neighbor_contents, pai,
+ expected_state, forward, expanding)) {
+ return NULL;
+ }
+
+ /* From this point, the neighbor edata can be safely acquired. */
+ edata_t *neighbor = neighbor_contents.edata;
+ assert(edata_state_get(neighbor) == expected_state);
+ emap_update_edata_state(tsdn, emap, neighbor, extent_state_merging);
+ if (expanding) {
+ extent_assert_can_expand(edata, neighbor);
+ } else {
+ extent_assert_can_coalesce(edata, neighbor);
+ }
+
+ return neighbor;
+}
+
+edata_t *
+emap_try_acquire_edata_neighbor(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
+ extent_pai_t pai, extent_state_t expected_state, bool forward) {
+ return emap_try_acquire_edata_neighbor_impl(tsdn, emap, edata, pai,
+ expected_state, forward, /* expand */ false);
+}
+
+edata_t *
+emap_try_acquire_edata_neighbor_expand(tsdn_t *tsdn, emap_t *emap,
+ edata_t *edata, extent_pai_t pai, extent_state_t expected_state) {
+ /* Try expanding forward. */
+ return emap_try_acquire_edata_neighbor_impl(tsdn, emap, edata, pai,
+ expected_state, /* forward */ true, /* expand */ true);
+}
+
+void
+emap_release_edata(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
+ extent_state_t new_state) {
+ assert(emap_edata_in_transition(tsdn, emap, edata));
+ assert(emap_edata_is_acquired(tsdn, emap, edata));
+
+ emap_update_edata_state(tsdn, emap, edata, new_state);
+}
+
+static bool
+emap_rtree_leaf_elms_lookup(tsdn_t *tsdn, emap_t *emap, rtree_ctx_t *rtree_ctx,
+ const edata_t *edata, bool dependent, bool init_missing,
+ rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
+ *r_elm_a = rtree_leaf_elm_lookup(tsdn, &emap->rtree, rtree_ctx,
+ (uintptr_t)edata_base_get(edata), dependent, init_missing);
+ if (!dependent && *r_elm_a == NULL) {
+ return true;
+ }
+ assert(*r_elm_a != NULL);
+
+ *r_elm_b = rtree_leaf_elm_lookup(tsdn, &emap->rtree, rtree_ctx,
+ (uintptr_t)edata_last_get(edata), dependent, init_missing);
+ if (!dependent && *r_elm_b == NULL) {
+ return true;
+ }
+ assert(*r_elm_b != NULL);
+
+ return false;
+}
+
+static void
+emap_rtree_write_acquired(tsdn_t *tsdn, emap_t *emap, rtree_leaf_elm_t *elm_a,
+ rtree_leaf_elm_t *elm_b, edata_t *edata, szind_t szind, bool slab) {
+ rtree_contents_t contents;
+ contents.edata = edata;
+ contents.metadata.szind = szind;
+ contents.metadata.slab = slab;
+ contents.metadata.is_head = (edata == NULL) ? false :
+ edata_is_head_get(edata);
+ contents.metadata.state = (edata == NULL) ? 0 : edata_state_get(edata);
+ rtree_leaf_elm_write(tsdn, &emap->rtree, elm_a, contents);
+ if (elm_b != NULL) {
+ rtree_leaf_elm_write(tsdn, &emap->rtree, elm_b, contents);
+ }
+}
+
+bool
+emap_register_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
+ szind_t szind, bool slab) {
+ assert(edata_state_get(edata) == extent_state_active);
+ EMAP_DECLARE_RTREE_CTX;
+
+ rtree_leaf_elm_t *elm_a, *elm_b;
+ bool err = emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, edata,
+ false, true, &elm_a, &elm_b);
+ if (err) {
+ return true;
+ }
+ assert(rtree_leaf_elm_read(tsdn, &emap->rtree, elm_a,
+ /* dependent */ false).edata == NULL);
+ assert(rtree_leaf_elm_read(tsdn, &emap->rtree, elm_b,
+ /* dependent */ false).edata == NULL);
+ emap_rtree_write_acquired(tsdn, emap, elm_a, elm_b, edata, szind, slab);
+ return false;
+}
+
+/* Invoked *after* emap_register_boundary. */
+void
+emap_register_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata,
+ szind_t szind) {
+ EMAP_DECLARE_RTREE_CTX;
+
+ assert(edata_slab_get(edata));
+ assert(edata_state_get(edata) == extent_state_active);
+
+ if (config_debug) {
+ /* Making sure the boundary is registered already. */
+ rtree_leaf_elm_t *elm_a, *elm_b;
+ bool err = emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx,
+ edata, /* dependent */ true, /* init_missing */ false,
+ &elm_a, &elm_b);
+ assert(!err);
+ rtree_contents_t contents_a, contents_b;
+ contents_a = rtree_leaf_elm_read(tsdn, &emap->rtree, elm_a,
+ /* dependent */ true);
+ contents_b = rtree_leaf_elm_read(tsdn, &emap->rtree, elm_b,
+ /* dependent */ true);
+ assert(contents_a.edata == edata && contents_b.edata == edata);
+ assert(contents_a.metadata.slab && contents_b.metadata.slab);
+ }
+
+ rtree_contents_t contents;
+ contents.edata = edata;
+ contents.metadata.szind = szind;
+ contents.metadata.slab = true;
+ contents.metadata.state = extent_state_active;
+ contents.metadata.is_head = false; /* Not allowed to access. */
+
+ assert(edata_size_get(edata) > (2 << LG_PAGE));
+ rtree_write_range(tsdn, &emap->rtree, rtree_ctx,
+ (uintptr_t)edata_base_get(edata) + PAGE,
+ (uintptr_t)edata_last_get(edata) - PAGE, contents);
+}
+
+void
+emap_deregister_boundary(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
+ /*
+ * The edata must be either in an acquired state, or protected by state
+ * based locks.
+ */
+ if (!emap_edata_is_acquired(tsdn, emap, edata)) {
+ witness_assert_positive_depth_to_rank(
+ tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE);
+ }
+
+ EMAP_DECLARE_RTREE_CTX;
+ rtree_leaf_elm_t *elm_a, *elm_b;
+
+ emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, edata,
+ true, false, &elm_a, &elm_b);
+ emap_rtree_write_acquired(tsdn, emap, elm_a, elm_b, NULL, SC_NSIZES,
+ false);
+}
+
+void
+emap_deregister_interior(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
+ EMAP_DECLARE_RTREE_CTX;
+
+ assert(edata_slab_get(edata));
+ if (edata_size_get(edata) > (2 << LG_PAGE)) {
+ rtree_clear_range(tsdn, &emap->rtree, rtree_ctx,
+ (uintptr_t)edata_base_get(edata) + PAGE,
+ (uintptr_t)edata_last_get(edata) - PAGE);
+ }
+}
+
+void
+emap_remap(tsdn_t *tsdn, emap_t *emap, edata_t *edata, szind_t szind,
+ bool slab) {
+ EMAP_DECLARE_RTREE_CTX;
+
+ if (szind != SC_NSIZES) {
+ rtree_contents_t contents;
+ contents.edata = edata;
+ contents.metadata.szind = szind;
+ contents.metadata.slab = slab;
+ contents.metadata.is_head = edata_is_head_get(edata);
+ contents.metadata.state = edata_state_get(edata);
+
+ rtree_write(tsdn, &emap->rtree, rtree_ctx,
+ (uintptr_t)edata_addr_get(edata), contents);
+ /*
+ * Recall that this is called only for active->inactive and
+ * inactive->active transitions (since only active extents have
+ * meaningful values for szind and slab). Active, non-slab
+ * extents only need to handle lookups at their head (on
+ * deallocation), so we don't bother filling in the end
+ * boundary.
+ *
+ * For slab extents, we do the end-mapping change. This still
+ * leaves the interior unmodified; an emap_register_interior
+ * call is coming in those cases, though.
+ */
+ if (slab && edata_size_get(edata) > PAGE) {
+ uintptr_t key = (uintptr_t)edata_past_get(edata)
+ - (uintptr_t)PAGE;
+ rtree_write(tsdn, &emap->rtree, rtree_ctx, key,
+ contents);
+ }
+ }
+}
+
+bool
+emap_split_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
+ edata_t *edata, size_t size_a, edata_t *trail, size_t size_b) {
+ EMAP_DECLARE_RTREE_CTX;
+
+ /*
+ * We use incorrect constants for things like arena ind, zero, ranged,
+ * and commit state, and head status. This is a fake edata_t, used to
+ * facilitate a lookup.
+ */
+ edata_t lead = {0};
+ edata_init(&lead, 0U, edata_addr_get(edata), size_a, false, 0, 0,
+ extent_state_active, false, false, EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
+
+ emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, &lead, false, true,
+ &prepare->lead_elm_a, &prepare->lead_elm_b);
+ emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, trail, false, true,
+ &prepare->trail_elm_a, &prepare->trail_elm_b);
+
+ if (prepare->lead_elm_a == NULL || prepare->lead_elm_b == NULL
+ || prepare->trail_elm_a == NULL || prepare->trail_elm_b == NULL) {
+ return true;
+ }
+ return false;
+}
+
+void
+emap_split_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
+ edata_t *lead, size_t size_a, edata_t *trail, size_t size_b) {
+ /*
+ * We should think about not writing to the lead leaf element. We can
+ * get into situations where a racing realloc-like call can disagree
+ * with a size lookup request. I think it's fine to declare that these
+ * situations are race bugs, but there's an argument to be made that for
+ * things like xallocx, a size lookup call should return either the old
+ * size or the new size, but not anything else.
+ */
+ emap_rtree_write_acquired(tsdn, emap, prepare->lead_elm_a,
+ prepare->lead_elm_b, lead, SC_NSIZES, /* slab */ false);
+ emap_rtree_write_acquired(tsdn, emap, prepare->trail_elm_a,
+ prepare->trail_elm_b, trail, SC_NSIZES, /* slab */ false);
+}
+
+void
+emap_merge_prepare(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
+ edata_t *lead, edata_t *trail) {
+ EMAP_DECLARE_RTREE_CTX;
+ emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, lead, true, false,
+ &prepare->lead_elm_a, &prepare->lead_elm_b);
+ emap_rtree_leaf_elms_lookup(tsdn, emap, rtree_ctx, trail, true, false,
+ &prepare->trail_elm_a, &prepare->trail_elm_b);
+}
+
+void
+emap_merge_commit(tsdn_t *tsdn, emap_t *emap, emap_prepare_t *prepare,
+ edata_t *lead, edata_t *trail) {
+ rtree_contents_t clear_contents;
+ clear_contents.edata = NULL;
+ clear_contents.metadata.szind = SC_NSIZES;
+ clear_contents.metadata.slab = false;
+ clear_contents.metadata.is_head = false;
+ clear_contents.metadata.state = (extent_state_t)0;
+
+ if (prepare->lead_elm_b != NULL) {
+ rtree_leaf_elm_write(tsdn, &emap->rtree,
+ prepare->lead_elm_b, clear_contents);
+ }
+
+ rtree_leaf_elm_t *merged_b;
+ if (prepare->trail_elm_b != NULL) {
+ rtree_leaf_elm_write(tsdn, &emap->rtree,
+ prepare->trail_elm_a, clear_contents);
+ merged_b = prepare->trail_elm_b;
+ } else {
+ merged_b = prepare->trail_elm_a;
+ }
+
+ emap_rtree_write_acquired(tsdn, emap, prepare->lead_elm_a, merged_b,
+ lead, SC_NSIZES, false);
+}
+
+void
+emap_do_assert_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
+ EMAP_DECLARE_RTREE_CTX;
+
+ rtree_contents_t contents = rtree_read(tsdn, &emap->rtree, rtree_ctx,
+ (uintptr_t)edata_base_get(edata));
+ assert(contents.edata == edata);
+ assert(contents.metadata.is_head == edata_is_head_get(edata));
+ assert(contents.metadata.state == edata_state_get(edata));
+}
+
+void
+emap_do_assert_not_mapped(tsdn_t *tsdn, emap_t *emap, edata_t *edata) {
+ emap_full_alloc_ctx_t context1 = {0};
+ emap_full_alloc_ctx_try_lookup(tsdn, emap, edata_base_get(edata),
+ &context1);
+ assert(context1.edata == NULL);
+
+ emap_full_alloc_ctx_t context2 = {0};
+ emap_full_alloc_ctx_try_lookup(tsdn, emap, edata_last_get(edata),
+ &context2);
+ assert(context2.edata == NULL);
+}
diff --git a/contrib/jemalloc/src/eset.c b/contrib/jemalloc/src/eset.c
new file mode 100644
index 000000000000..6f8f335e198b
--- /dev/null
+++ b/contrib/jemalloc/src/eset.c
@@ -0,0 +1,282 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/eset.h"
+
+#define ESET_NPSIZES (SC_NPSIZES + 1)
+
+static void
+eset_bin_init(eset_bin_t *bin) {
+ edata_heap_new(&bin->heap);
+ /*
+ * heap_min doesn't need initialization; it gets filled in when the bin
+ * goes from non-empty to empty.
+ */
+}
+
+static void
+eset_bin_stats_init(eset_bin_stats_t *bin_stats) {
+ atomic_store_zu(&bin_stats->nextents, 0, ATOMIC_RELAXED);
+ atomic_store_zu(&bin_stats->nbytes, 0, ATOMIC_RELAXED);
+}
+
+void
+eset_init(eset_t *eset, extent_state_t state) {
+ for (unsigned i = 0; i < ESET_NPSIZES; i++) {
+ eset_bin_init(&eset->bins[i]);
+ eset_bin_stats_init(&eset->bin_stats[i]);
+ }
+ fb_init(eset->bitmap, ESET_NPSIZES);
+ edata_list_inactive_init(&eset->lru);
+ eset->state = state;
+}
+
+size_t
+eset_npages_get(eset_t *eset) {
+ return atomic_load_zu(&eset->npages, ATOMIC_RELAXED);
+}
+
+size_t
+eset_nextents_get(eset_t *eset, pszind_t pind) {
+ return atomic_load_zu(&eset->bin_stats[pind].nextents, ATOMIC_RELAXED);
+}
+
+size_t
+eset_nbytes_get(eset_t *eset, pszind_t pind) {
+ return atomic_load_zu(&eset->bin_stats[pind].nbytes, ATOMIC_RELAXED);
+}
+
+static void
+eset_stats_add(eset_t *eset, pszind_t pind, size_t sz) {
+ size_t cur = atomic_load_zu(&eset->bin_stats[pind].nextents,
+ ATOMIC_RELAXED);
+ atomic_store_zu(&eset->bin_stats[pind].nextents, cur + 1,
+ ATOMIC_RELAXED);
+ cur = atomic_load_zu(&eset->bin_stats[pind].nbytes, ATOMIC_RELAXED);
+ atomic_store_zu(&eset->bin_stats[pind].nbytes, cur + sz,
+ ATOMIC_RELAXED);
+}
+
+static void
+eset_stats_sub(eset_t *eset, pszind_t pind, size_t sz) {
+ size_t cur = atomic_load_zu(&eset->bin_stats[pind].nextents,
+ ATOMIC_RELAXED);
+ atomic_store_zu(&eset->bin_stats[pind].nextents, cur - 1,
+ ATOMIC_RELAXED);
+ cur = atomic_load_zu(&eset->bin_stats[pind].nbytes, ATOMIC_RELAXED);
+ atomic_store_zu(&eset->bin_stats[pind].nbytes, cur - sz,
+ ATOMIC_RELAXED);
+}
+
+void
+eset_insert(eset_t *eset, edata_t *edata) {
+ assert(edata_state_get(edata) == eset->state);
+
+ size_t size = edata_size_get(edata);
+ size_t psz = sz_psz_quantize_floor(size);
+ pszind_t pind = sz_psz2ind(psz);
+
+ edata_cmp_summary_t edata_cmp_summary = edata_cmp_summary_get(edata);
+ if (edata_heap_empty(&eset->bins[pind].heap)) {
+ fb_set(eset->bitmap, ESET_NPSIZES, (size_t)pind);
+ /* Only element is automatically the min element. */
+ eset->bins[pind].heap_min = edata_cmp_summary;
+ } else {
+ /*
+ * There's already a min element; update the summary if we're
+ * about to insert a lower one.
+ */
+ if (edata_cmp_summary_comp(edata_cmp_summary,
+ eset->bins[pind].heap_min) < 0) {
+ eset->bins[pind].heap_min = edata_cmp_summary;
+ }
+ }
+ edata_heap_insert(&eset->bins[pind].heap, edata);
+
+ if (config_stats) {
+ eset_stats_add(eset, pind, size);
+ }
+
+ edata_list_inactive_append(&eset->lru, edata);
+ size_t npages = size >> LG_PAGE;
+ /*
+ * All modifications to npages hold the mutex (as asserted above), so we
+ * don't need an atomic fetch-add; we can get by with a load followed by
+ * a store.
+ */
+ size_t cur_eset_npages =
+ atomic_load_zu(&eset->npages, ATOMIC_RELAXED);
+ atomic_store_zu(&eset->npages, cur_eset_npages + npages,
+ ATOMIC_RELAXED);
+}
+
+void
+eset_remove(eset_t *eset, edata_t *edata) {
+ assert(edata_state_get(edata) == eset->state ||
+ edata_state_in_transition(edata_state_get(edata)));
+
+ size_t size = edata_size_get(edata);
+ size_t psz = sz_psz_quantize_floor(size);
+ pszind_t pind = sz_psz2ind(psz);
+ if (config_stats) {
+ eset_stats_sub(eset, pind, size);
+ }
+
+ edata_cmp_summary_t edata_cmp_summary = edata_cmp_summary_get(edata);
+ edata_heap_remove(&eset->bins[pind].heap, edata);
+ if (edata_heap_empty(&eset->bins[pind].heap)) {
+ fb_unset(eset->bitmap, ESET_NPSIZES, (size_t)pind);
+ } else {
+ /*
+ * This is a little weird; we compare if the summaries are
+ * equal, rather than if the edata we removed was the heap
+ * minimum. The reason why is that getting the heap minimum
+ * can cause a pairing heap merge operation. We can avoid this
+ * if we only update the min if it's changed, in which case the
+ * summaries of the removed element and the min element should
+ * compare equal.
+ */
+ if (edata_cmp_summary_comp(edata_cmp_summary,
+ eset->bins[pind].heap_min) == 0) {
+ eset->bins[pind].heap_min = edata_cmp_summary_get(
+ edata_heap_first(&eset->bins[pind].heap));
+ }
+ }
+ edata_list_inactive_remove(&eset->lru, edata);
+ size_t npages = size >> LG_PAGE;
+ /*
+ * As in eset_insert, we hold eset->mtx and so don't need atomic
+ * operations for updating eset->npages.
+ */
+ size_t cur_extents_npages =
+ atomic_load_zu(&eset->npages, ATOMIC_RELAXED);
+ assert(cur_extents_npages >= npages);
+ atomic_store_zu(&eset->npages,
+ cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
+}
+
+/*
+ * Find an extent with size [min_size, max_size) to satisfy the alignment
+ * requirement. For each size, try only the first extent in the heap.
+ */
+static edata_t *
+eset_fit_alignment(eset_t *eset, size_t min_size, size_t max_size,
+ size_t alignment) {
+ pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(min_size));
+ pszind_t pind_max = sz_psz2ind(sz_psz_quantize_ceil(max_size));
+
+ for (pszind_t i =
+ (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)pind);
+ i < pind_max;
+ i = (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)i + 1)) {
+ assert(i < SC_NPSIZES);
+ assert(!edata_heap_empty(&eset->bins[i].heap));
+ edata_t *edata = edata_heap_first(&eset->bins[i].heap);
+ uintptr_t base = (uintptr_t)edata_base_get(edata);
+ size_t candidate_size = edata_size_get(edata);
+ assert(candidate_size >= min_size);
+
+ uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
+ PAGE_CEILING(alignment));
+ if (base > next_align || base + candidate_size <= next_align) {
+ /* Overflow or not crossing the next alignment. */
+ continue;
+ }
+
+ size_t leadsize = next_align - base;
+ if (candidate_size - leadsize >= min_size) {
+ return edata;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * Do first-fit extent selection, i.e. select the oldest/lowest extent that is
+ * large enough.
+ *
+ * lg_max_fit is the (log of the) maximum ratio between the requested size and
+ * the returned size that we'll allow. This can reduce fragmentation by
+ * avoiding reusing and splitting large extents for smaller sizes. In practice,
+ * it's set to opt_lg_extent_max_active_fit for the dirty eset and SC_PTR_BITS
+ * for others.
+ */
+static edata_t *
+eset_first_fit(eset_t *eset, size_t size, bool exact_only,
+ unsigned lg_max_fit) {
+ edata_t *ret = NULL;
+ edata_cmp_summary_t ret_summ JEMALLOC_CC_SILENCE_INIT({0});
+
+ pszind_t pind = sz_psz2ind(sz_psz_quantize_ceil(size));
+
+ if (exact_only) {
+ return edata_heap_empty(&eset->bins[pind].heap) ? NULL :
+ edata_heap_first(&eset->bins[pind].heap);
+ }
+
+ for (pszind_t i =
+ (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)pind);
+ i < ESET_NPSIZES;
+ i = (pszind_t)fb_ffs(eset->bitmap, ESET_NPSIZES, (size_t)i + 1)) {
+ assert(!edata_heap_empty(&eset->bins[i].heap));
+ if (lg_max_fit == SC_PTR_BITS) {
+ /*
+ * We'll shift by this below, and shifting out all the
+ * bits is undefined. Decreasing is safe, since the
+ * page size is larger than 1 byte.
+ */
+ lg_max_fit = SC_PTR_BITS - 1;
+ }
+ if ((sz_pind2sz(i) >> lg_max_fit) > size) {
+ break;
+ }
+ if (ret == NULL || edata_cmp_summary_comp(
+ eset->bins[i].heap_min, ret_summ) < 0) {
+ /*
+ * We grab the edata as early as possible, even though
+ * we might change it later. Practically, a large
+ * portion of eset_fit calls succeed at the first valid
+ * index, so this doesn't cost much, and we get the
+ * effect of prefetching the edata as early as possible.
+ */
+ edata_t *edata = edata_heap_first(&eset->bins[i].heap);
+ assert(edata_size_get(edata) >= size);
+ assert(ret == NULL || edata_snad_comp(edata, ret) < 0);
+ assert(ret == NULL || edata_cmp_summary_comp(
+ eset->bins[i].heap_min,
+ edata_cmp_summary_get(edata)) == 0);
+ ret = edata;
+ ret_summ = eset->bins[i].heap_min;
+ }
+ if (i == SC_NPSIZES) {
+ break;
+ }
+ assert(i < SC_NPSIZES);
+ }
+
+ return ret;
+}
+
+edata_t *
+eset_fit(eset_t *eset, size_t esize, size_t alignment, bool exact_only,
+ unsigned lg_max_fit) {
+ size_t max_size = esize + PAGE_CEILING(alignment) - PAGE;
+ /* Beware size_t wrap-around. */
+ if (max_size < esize) {
+ return NULL;
+ }
+
+ edata_t *edata = eset_first_fit(eset, max_size, exact_only, lg_max_fit);
+
+ if (alignment > PAGE && edata == NULL) {
+ /*
+ * max_size guarantees the alignment requirement but is rather
+ * pessimistic. Next we try to satisfy the aligned allocation
+ * with sizes in [esize, max_size).
+ */
+ edata = eset_fit_alignment(eset, esize, max_size, alignment);
+ }
+
+ return edata;
+}
diff --git a/contrib/jemalloc/src/exp_grow.c b/contrib/jemalloc/src/exp_grow.c
new file mode 100644
index 000000000000..386471f49fde
--- /dev/null
+++ b/contrib/jemalloc/src/exp_grow.c
@@ -0,0 +1,8 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+void
+exp_grow_init(exp_grow_t *exp_grow) {
+ exp_grow->next = sz_psz2ind(HUGEPAGE);
+ exp_grow->limit = sz_psz2ind(SC_LARGE_MAXCLASS);
+}
diff --git a/contrib/jemalloc/src/extent.c b/contrib/jemalloc/src/extent.c
index b4ef382676be..cf3d1f3112df 100644
--- a/contrib/jemalloc/src/extent.c
+++ b/contrib/jemalloc/src/extent.c
@@ -1,93 +1,28 @@
-#define JEMALLOC_EXTENT_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/emap.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/ph.h"
-#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/mutex_pool.h"
/******************************************************************************/
/* Data. */
-rtree_t extents_rtree;
-/* Keyed by the address of the extent_t being protected. */
-mutex_pool_t extent_mutex_pool;
-
size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT;
-static const bitmap_info_t extents_bitmap_info =
- BITMAP_INFO_INITIALIZER(SC_NPSIZES+1);
-
-static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr,
- size_t size, size_t alignment, bool *zero, bool *commit,
- unsigned arena_ind);
-static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr,
- size_t size, bool committed, unsigned arena_ind);
-static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr,
- size_t size, bool committed, unsigned arena_ind);
-static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr,
- size_t size, size_t offset, size_t length, unsigned arena_ind);
-static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length, bool growing_retained);
-static bool extent_decommit_default(extent_hooks_t *extent_hooks,
- void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
-#ifdef PAGES_CAN_PURGE_LAZY
-static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr,
- size_t size, size_t offset, size_t length, unsigned arena_ind);
-#endif
-static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length, bool growing_retained);
-#ifdef PAGES_CAN_PURGE_FORCED
-static bool extent_purge_forced_default(extent_hooks_t *extent_hooks,
- void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
-#endif
-static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length, bool growing_retained);
-static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr,
- size_t size, size_t size_a, size_t size_b, bool committed,
- unsigned arena_ind);
-static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
- szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
- bool growing_retained);
-static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a,
- size_t size_a, void *addr_b, size_t size_b, bool committed,
- unsigned arena_ind);
-static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
- bool growing_retained);
-
-const extent_hooks_t extent_hooks_default = {
- extent_alloc_default,
- extent_dalloc_default,
- extent_destroy_default,
- extent_commit_default,
- extent_decommit_default
-#ifdef PAGES_CAN_PURGE_LAZY
- ,
- extent_purge_lazy_default
-#else
- ,
- NULL
-#endif
-#ifdef PAGES_CAN_PURGE_FORCED
- ,
- extent_purge_forced_default
-#else
- ,
- NULL
-#endif
- ,
- extent_split_default,
- extent_merge_default
-};
+static bool extent_commit_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
+ size_t offset, size_t length, bool growing_retained);
+static bool extent_purge_lazy_impl(tsdn_t *tsdn, ehooks_t *ehooks,
+ edata_t *edata, size_t offset, size_t length, bool growing_retained);
+static bool extent_purge_forced_impl(tsdn_t *tsdn, ehooks_t *ehooks,
+ edata_t *edata, size_t offset, size_t length, bool growing_retained);
+static edata_t *extent_split_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ edata_t *edata, size_t size_a, size_t size_b, bool holding_core_locks);
+static bool extent_merge_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ edata_t *a, edata_t *b, bool holding_core_locks);
/* Used exclusively for gdump triggering. */
static atomic_zu_t curpages;
@@ -99,503 +34,158 @@ static atomic_zu_t highpages;
* definition.
*/
-static void extent_deregister(tsdn_t *tsdn, extent_t *extent);
-static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
- size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind,
- bool *zero, bool *commit, bool growing_retained);
-static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
- extent_t *extent, bool *coalesced, bool growing_retained);
-static void extent_record(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent,
- bool growing_retained);
+static void extent_deregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata);
+static edata_t *extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ ecache_t *ecache, edata_t *expand_edata, size_t usize, size_t alignment,
+ bool zero, bool *commit, bool growing_retained, bool guarded);
+static edata_t *extent_try_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ ecache_t *ecache, edata_t *edata, bool *coalesced);
+static edata_t *extent_alloc_retained(tsdn_t *tsdn, pac_t *pac,
+ ehooks_t *ehooks, edata_t *expand_edata, size_t size, size_t alignment,
+ bool zero, bool *commit, bool guarded);
/******************************************************************************/
-#define ATTR_NONE /* does nothing */
-
-ph_gen(ATTR_NONE, extent_avail_, extent_tree_t, extent_t, ph_link,
- extent_esnead_comp)
-
-#undef ATTR_NONE
-
-typedef enum {
- lock_result_success,
- lock_result_failure,
- lock_result_no_extent
-} lock_result_t;
-
-static lock_result_t
-extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm,
- extent_t **result, bool inactive_only) {
- extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree,
- elm, true);
-
- /* Slab implies active extents and should be skipped. */
- if (extent1 == NULL || (inactive_only && rtree_leaf_elm_slab_read(tsdn,
- &extents_rtree, elm, true))) {
- return lock_result_no_extent;
- }
-
- /*
- * It's possible that the extent changed out from under us, and with it
- * the leaf->extent mapping. We have to recheck while holding the lock.
- */
- extent_lock(tsdn, extent1);
- extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn,
- &extents_rtree, elm, true);
-
- if (extent1 == extent2) {
- *result = extent1;
- return lock_result_success;
- } else {
- extent_unlock(tsdn, extent1);
- return lock_result_failure;
- }
-}
-
-/*
- * Returns a pool-locked extent_t * if there's one associated with the given
- * address, and NULL otherwise.
- */
-static extent_t *
-extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr,
- bool inactive_only) {
- extent_t *ret = NULL;
- rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree,
- rtree_ctx, (uintptr_t)addr, false, false);
- if (elm == NULL) {
- return NULL;
- }
- lock_result_t lock_result;
- do {
- lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret,
- inactive_only);
- } while (lock_result == lock_result_failure);
- return ret;
-}
-
-extent_t *
-extent_alloc(tsdn_t *tsdn, arena_t *arena) {
- malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
- extent_t *extent = extent_avail_first(&arena->extent_avail);
- if (extent == NULL) {
- malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
- return base_alloc_extent(tsdn, arena->base);
- }
- extent_avail_remove(&arena->extent_avail, extent);
- atomic_fetch_sub_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED);
- malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
- return extent;
-}
-
-void
-extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
- malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
- extent_avail_insert(&arena->extent_avail, extent);
- atomic_fetch_add_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED);
- malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
-}
-
-extent_hooks_t *
-extent_hooks_get(arena_t *arena) {
- return base_extent_hooks_get(arena->base);
-}
-
-extent_hooks_t *
-extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) {
- background_thread_info_t *info;
- if (have_background_thread) {
- info = arena_background_thread_info_get(arena);
- malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
- }
- extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks);
- if (have_background_thread) {
- malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
- }
-
- return ret;
-}
-
-static void
-extent_hooks_assure_initialized(arena_t *arena,
- extent_hooks_t **r_extent_hooks) {
- if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) {
- *r_extent_hooks = extent_hooks_get(arena);
- }
-}
-
-#ifndef JEMALLOC_JET
-static
-#endif
size_t
-extent_size_quantize_floor(size_t size) {
- size_t ret;
- pszind_t pind;
-
- assert(size > 0);
- assert((size & PAGE_MASK) == 0);
-
- pind = sz_psz2ind(size - sz_large_pad + 1);
- if (pind == 0) {
- /*
- * Avoid underflow. This short-circuit would also do the right
- * thing for all sizes in the range for which there are
- * PAGE-spaced size classes, but it's simplest to just handle
- * the one case that would cause erroneous results.
- */
- return size;
- }
- ret = sz_pind2sz(pind - 1) + sz_large_pad;
- assert(ret <= size);
- return ret;
+extent_sn_next(pac_t *pac) {
+ return atomic_fetch_add_zu(&pac->extent_sn_next, 1, ATOMIC_RELAXED);
}
-#ifndef JEMALLOC_JET
-static
-#endif
-size_t
-extent_size_quantize_ceil(size_t size) {
- size_t ret;
-
- assert(size > 0);
- assert(size - sz_large_pad <= SC_LARGE_MAXCLASS);
- assert((size & PAGE_MASK) == 0);
-
- ret = extent_size_quantize_floor(size);
- if (ret < size) {
- /*
- * Skip a quantization that may have an adequately large extent,
- * because under-sized extents may be mixed in. This only
- * happens when an unusual size is requested, i.e. for aligned
- * allocation, and is just one of several places where linear
- * search would potentially find sufficiently aligned available
- * memory somewhere lower.
- */
- ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
- sz_large_pad;
- }
- return ret;
+static inline bool
+extent_may_force_decay(pac_t *pac) {
+ return !(pac_decay_ms_get(pac, extent_state_dirty) == -1
+ || pac_decay_ms_get(pac, extent_state_muzzy) == -1);
}
-/* Generate pairing heap functions. */
-ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
+static bool
+extent_try_delayed_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ ecache_t *ecache, edata_t *edata) {
+ emap_update_edata_state(tsdn, pac->emap, edata, extent_state_active);
-bool
-extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
- bool delay_coalesce) {
- if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS,
- malloc_mutex_rank_exclusive)) {
+ bool coalesced;
+ edata = extent_try_coalesce(tsdn, pac, ehooks, ecache,
+ edata, &coalesced);
+ emap_update_edata_state(tsdn, pac->emap, edata, ecache->state);
+
+ if (!coalesced) {
return true;
}
- for (unsigned i = 0; i < SC_NPSIZES + 1; i++) {
- extent_heap_new(&extents->heaps[i]);
- }
- bitmap_init(extents->bitmap, &extents_bitmap_info, true);
- extent_list_init(&extents->lru);
- atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED);
- extents->state = state;
- extents->delay_coalesce = delay_coalesce;
+ eset_insert(&ecache->eset, edata);
return false;
}
-extent_state_t
-extents_state_get(const extents_t *extents) {
- return extents->state;
-}
-
-size_t
-extents_npages_get(extents_t *extents) {
- return atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
-}
-
-size_t
-extents_nextents_get(extents_t *extents, pszind_t pind) {
- return atomic_load_zu(&extents->nextents[pind], ATOMIC_RELAXED);
-}
-
-size_t
-extents_nbytes_get(extents_t *extents, pszind_t pind) {
- return atomic_load_zu(&extents->nbytes[pind], ATOMIC_RELAXED);
-}
-
-static void
-extents_stats_add(extents_t *extent, pszind_t pind, size_t sz) {
- size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED);
- atomic_store_zu(&extent->nextents[pind], cur + 1, ATOMIC_RELAXED);
- cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED);
- atomic_store_zu(&extent->nbytes[pind], cur + sz, ATOMIC_RELAXED);
-}
-
-static void
-extents_stats_sub(extents_t *extent, pszind_t pind, size_t sz) {
- size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED);
- atomic_store_zu(&extent->nextents[pind], cur - 1, ATOMIC_RELAXED);
- cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED);
- atomic_store_zu(&extent->nbytes[pind], cur - sz, ATOMIC_RELAXED);
-}
-
-static void
-extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
- malloc_mutex_assert_owner(tsdn, &extents->mtx);
- assert(extent_state_get(extent) == extents->state);
-
- size_t size = extent_size_get(extent);
- size_t psz = extent_size_quantize_floor(size);
- pszind_t pind = sz_psz2ind(psz);
- if (extent_heap_empty(&extents->heaps[pind])) {
- bitmap_unset(extents->bitmap, &extents_bitmap_info,
- (size_t)pind);
- }
- extent_heap_insert(&extents->heaps[pind], extent);
-
- if (config_stats) {
- extents_stats_add(extents, pind, size);
- }
-
- extent_list_append(&extents->lru, extent);
- size_t npages = size >> LG_PAGE;
- /*
- * All modifications to npages hold the mutex (as asserted above), so we
- * don't need an atomic fetch-add; we can get by with a load followed by
- * a store.
- */
- size_t cur_extents_npages =
- atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
- atomic_store_zu(&extents->npages, cur_extents_npages + npages,
- ATOMIC_RELAXED);
-}
-
-static void
-extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
- malloc_mutex_assert_owner(tsdn, &extents->mtx);
- assert(extent_state_get(extent) == extents->state);
-
- size_t size = extent_size_get(extent);
- size_t psz = extent_size_quantize_floor(size);
- pszind_t pind = sz_psz2ind(psz);
- extent_heap_remove(&extents->heaps[pind], extent);
-
- if (config_stats) {
- extents_stats_sub(extents, pind, size);
- }
-
- if (extent_heap_empty(&extents->heaps[pind])) {
- bitmap_set(extents->bitmap, &extents_bitmap_info,
- (size_t)pind);
- }
- extent_list_remove(&extents->lru, extent);
- size_t npages = size >> LG_PAGE;
- /*
- * As in extents_insert_locked, we hold extents->mtx and so don't need
- * atomic operations for updating extents->npages.
- */
- size_t cur_extents_npages =
- atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
- assert(cur_extents_npages >= npages);
- atomic_store_zu(&extents->npages,
- cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
-}
-
-/*
- * Find an extent with size [min_size, max_size) to satisfy the alignment
- * requirement. For each size, try only the first extent in the heap.
- */
-static extent_t *
-extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size,
- size_t alignment) {
- pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(min_size));
- pszind_t pind_max = sz_psz2ind(extent_size_quantize_ceil(max_size));
-
- for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
- &extents_bitmap_info, (size_t)pind); i < pind_max; i =
- (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
- (size_t)i+1)) {
- assert(i < SC_NPSIZES);
- assert(!extent_heap_empty(&extents->heaps[i]));
- extent_t *extent = extent_heap_first(&extents->heaps[i]);
- uintptr_t base = (uintptr_t)extent_base_get(extent);
- size_t candidate_size = extent_size_get(extent);
- assert(candidate_size >= min_size);
-
- uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
- PAGE_CEILING(alignment));
- if (base > next_align || base + candidate_size <= next_align) {
- /* Overflow or not crossing the next alignment. */
- continue;
- }
-
- size_t leadsize = next_align - base;
- if (candidate_size - leadsize >= min_size) {
- return extent;
- }
- }
+edata_t *
+ecache_alloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
+ edata_t *expand_edata, size_t size, size_t alignment, bool zero,
+ bool guarded) {
+ assert(size != 0);
+ assert(alignment != 0);
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
- return NULL;
+ bool commit = true;
+ edata_t *edata = extent_recycle(tsdn, pac, ehooks, ecache, expand_edata,
+ size, alignment, zero, &commit, false, guarded);
+ assert(edata == NULL || edata_pai_get(edata) == EXTENT_PAI_PAC);
+ assert(edata == NULL || edata_guarded_get(edata) == guarded);
+ return edata;
}
-/*
- * Do first-fit extent selection, i.e. select the oldest/lowest extent that is
- * large enough.
- */
-static extent_t *
-extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
- size_t size) {
- extent_t *ret = NULL;
-
- pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
-
- if (!maps_coalesce && !opt_retain) {
- /*
- * No split / merge allowed (Windows w/o retain). Try exact fit
- * only.
- */
- return extent_heap_empty(&extents->heaps[pind]) ? NULL :
- extent_heap_first(&extents->heaps[pind]);
- }
+edata_t *
+ecache_alloc_grow(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
+ edata_t *expand_edata, size_t size, size_t alignment, bool zero,
+ bool guarded) {
+ assert(size != 0);
+ assert(alignment != 0);
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
- for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
- &extents_bitmap_info, (size_t)pind);
- i < SC_NPSIZES + 1;
- i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
- (size_t)i+1)) {
- assert(!extent_heap_empty(&extents->heaps[i]));
- extent_t *extent = extent_heap_first(&extents->heaps[i]);
- assert(extent_size_get(extent) >= size);
- /*
- * In order to reduce fragmentation, avoid reusing and splitting
- * large extents for much smaller sizes.
- *
- * Only do check for dirty extents (delay_coalesce).
- */
- if (extents->delay_coalesce &&
- (sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) {
- break;
- }
- if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
- ret = extent;
+ bool commit = true;
+ edata_t *edata = extent_alloc_retained(tsdn, pac, ehooks, expand_edata,
+ size, alignment, zero, &commit, guarded);
+ if (edata == NULL) {
+ if (opt_retain && expand_edata != NULL) {
+ /*
+ * When retain is enabled and trying to expand, we do
+ * not attempt extent_alloc_wrapper which does mmap that
+ * is very unlikely to succeed (unless it happens to be
+ * at the end).
+ */
+ return NULL;
}
- if (i == SC_NPSIZES) {
- break;
+ if (guarded) {
+ /*
+ * Means no cached guarded extents available (and no
+ * grow_retained was attempted). The pac_alloc flow
+ * will alloc regular extents to make new guarded ones.
+ */
+ return NULL;
}
- assert(i < SC_NPSIZES);
- }
-
- return ret;
-}
-
-/*
- * Do first-fit extent selection, where the selection policy choice is
- * based on extents->delay_coalesce.
- */
-static extent_t *
-extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
- size_t esize, size_t alignment) {
- malloc_mutex_assert_owner(tsdn, &extents->mtx);
-
- size_t max_size = esize + PAGE_CEILING(alignment) - PAGE;
- /* Beware size_t wrap-around. */
- if (max_size < esize) {
- return NULL;
+ void *new_addr = (expand_edata == NULL) ? NULL :
+ edata_past_get(expand_edata);
+ edata = extent_alloc_wrapper(tsdn, pac, ehooks, new_addr,
+ size, alignment, zero, &commit,
+ /* growing_retained */ false);
}
- extent_t *extent =
- extents_first_fit_locked(tsdn, arena, extents, max_size);
-
- if (alignment > PAGE && extent == NULL) {
- /*
- * max_size guarantees the alignment requirement but is rather
- * pessimistic. Next we try to satisfy the aligned allocation
- * with sizes in [esize, max_size).
- */
- extent = extents_fit_alignment(extents, esize, max_size,
- alignment);
- }
-
- return extent;
-}
-
-static bool
-extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
- extent_t *extent) {
- extent_state_set(extent, extent_state_active);
- bool coalesced;
- extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx,
- extents, extent, &coalesced, false);
- extent_state_set(extent, extents_state_get(extents));
-
- if (!coalesced) {
- return true;
- }
- extents_insert_locked(tsdn, extents, extent);
- return false;
-}
-
-extent_t *
-extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
- extents_t *extents, void *new_addr, size_t size, size_t pad,
- size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
- assert(size + pad != 0);
- assert(alignment != 0);
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
-
- extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, extents,
- new_addr, size, pad, alignment, slab, szind, zero, commit, false);
- assert(extent == NULL || extent_dumpable_get(extent));
- return extent;
+ assert(edata == NULL || edata_pai_get(edata) == EXTENT_PAI_PAC);
+ return edata;
}
void
-extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
- extents_t *extents, extent_t *extent) {
- assert(extent_base_get(extent) != NULL);
- assert(extent_size_get(extent) != 0);
- assert(extent_dumpable_get(extent));
+ecache_dalloc(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
+ edata_t *edata) {
+ assert(edata_base_get(edata) != NULL);
+ assert(edata_size_get(edata) != 0);
+ assert(edata_pai_get(edata) == EXTENT_PAI_PAC);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
- extent_addr_set(extent, extent_base_get(extent));
- extent_zeroed_set(extent, false);
+ edata_addr_set(edata, edata_base_get(edata));
+ edata_zeroed_set(edata, false);
- extent_record(tsdn, arena, r_extent_hooks, extents, extent, false);
+ extent_record(tsdn, pac, ehooks, ecache, edata);
}
-extent_t *
-extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
- extents_t *extents, size_t npages_min) {
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
-
- malloc_mutex_lock(tsdn, &extents->mtx);
+edata_t *
+ecache_evict(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ ecache_t *ecache, size_t npages_min) {
+ malloc_mutex_lock(tsdn, &ecache->mtx);
/*
* Get the LRU coalesced extent, if any. If coalescing was delayed,
* the loop will iterate until the LRU extent is fully coalesced.
*/
- extent_t *extent;
+ edata_t *edata;
while (true) {
/* Get the LRU extent, if any. */
- extent = extent_list_first(&extents->lru);
- if (extent == NULL) {
- goto label_return;
+ eset_t *eset = &ecache->eset;
+ edata = edata_list_inactive_first(&eset->lru);
+ if (edata == NULL) {
+ /*
+ * Next check if there are guarded extents. They are
+ * more expensive to purge (since they are not
+ * mergeable), thus in favor of caching them longer.
+ */
+ eset = &ecache->guarded_eset;
+ edata = edata_list_inactive_first(&eset->lru);
+ if (edata == NULL) {
+ goto label_return;
+ }
}
/* Check the eviction limit. */
- size_t extents_npages = atomic_load_zu(&extents->npages,
- ATOMIC_RELAXED);
+ size_t extents_npages = ecache_npages_get(ecache);
if (extents_npages <= npages_min) {
- extent = NULL;
+ edata = NULL;
goto label_return;
}
- extents_remove_locked(tsdn, extents, extent);
- if (!extents->delay_coalesce) {
+ eset_remove(eset, edata);
+ if (!ecache->delay_coalesce || edata_guarded_get(edata)) {
break;
}
/* Try to coalesce. */
- if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks,
- rtree_ctx, extents, extent)) {
+ if (extent_try_delayed_coalesce(tsdn, pac, ehooks, ecache,
+ edata)) {
break;
}
/*
@@ -608,23 +198,24 @@ extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
* Either mark the extent active or deregister it to protect against
* concurrent operations.
*/
- switch (extents_state_get(extents)) {
+ switch (ecache->state) {
case extent_state_active:
not_reached();
case extent_state_dirty:
case extent_state_muzzy:
- extent_state_set(extent, extent_state_active);
+ emap_update_edata_state(tsdn, pac->emap, edata,
+ extent_state_active);
break;
case extent_state_retained:
- extent_deregister(tsdn, extent);
+ extent_deregister(tsdn, pac, edata);
break;
default:
not_reached();
}
label_return:
- malloc_mutex_unlock(tsdn, &extents->mtx);
- return extent;
+ malloc_mutex_unlock(tsdn, &ecache->mtx);
+ return edata;
}
/*
@@ -632,123 +223,73 @@ label_return:
* indicates OOM), e.g. when trying to split an existing extent.
*/
static void
-extents_abandon_vm(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
- extents_t *extents, extent_t *extent, bool growing_retained) {
- size_t sz = extent_size_get(extent);
+extents_abandon_vm(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
+ edata_t *edata, bool growing_retained) {
+ size_t sz = edata_size_get(edata);
if (config_stats) {
- arena_stats_accum_zu(&arena->stats.abandoned_vm, sz);
+ atomic_fetch_add_zu(&pac->stats->abandoned_vm, sz,
+ ATOMIC_RELAXED);
}
/*
* Leak extent after making sure its pages have already been purged, so
* that this is only a virtual memory leak.
*/
- if (extents_state_get(extents) == extent_state_dirty) {
- if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks,
- extent, 0, sz, growing_retained)) {
- extent_purge_forced_impl(tsdn, arena, r_extent_hooks,
- extent, 0, extent_size_get(extent),
- growing_retained);
+ if (ecache->state == extent_state_dirty) {
+ if (extent_purge_lazy_impl(tsdn, ehooks, edata, 0, sz,
+ growing_retained)) {
+ extent_purge_forced_impl(tsdn, ehooks, edata, 0,
+ edata_size_get(edata), growing_retained);
}
}
- extent_dalloc(tsdn, arena, extent);
-}
-
-void
-extents_prefork(tsdn_t *tsdn, extents_t *extents) {
- malloc_mutex_prefork(tsdn, &extents->mtx);
-}
-
-void
-extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) {
- malloc_mutex_postfork_parent(tsdn, &extents->mtx);
-}
-
-void
-extents_postfork_child(tsdn_t *tsdn, extents_t *extents) {
- malloc_mutex_postfork_child(tsdn, &extents->mtx);
+ edata_cache_put(tsdn, pac->edata_cache, edata);
}
static void
-extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
- extent_t *extent) {
- assert(extent_arena_get(extent) == arena);
- assert(extent_state_get(extent) == extent_state_active);
+extent_deactivate_locked_impl(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
+ edata_t *edata) {
+ malloc_mutex_assert_owner(tsdn, &ecache->mtx);
+ assert(edata_arena_ind_get(edata) == ecache_ind_get(ecache));
- extent_state_set(extent, extents_state_get(extents));
- extents_insert_locked(tsdn, extents, extent);
+ emap_update_edata_state(tsdn, pac->emap, edata, ecache->state);
+ eset_t *eset = edata_guarded_get(edata) ? &ecache->guarded_eset :
+ &ecache->eset;
+ eset_insert(eset, edata);
}
static void
-extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
- extent_t *extent) {
- malloc_mutex_lock(tsdn, &extents->mtx);
- extent_deactivate_locked(tsdn, arena, extents, extent);
- malloc_mutex_unlock(tsdn, &extents->mtx);
+extent_deactivate_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
+ edata_t *edata) {
+ assert(edata_state_get(edata) == extent_state_active);
+ extent_deactivate_locked_impl(tsdn, pac, ecache, edata);
}
static void
-extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
- extent_t *extent) {
- assert(extent_arena_get(extent) == arena);
- assert(extent_state_get(extent) == extents_state_get(extents));
-
- extents_remove_locked(tsdn, extents, extent);
- extent_state_set(extent, extent_state_active);
-}
-
-static bool
-extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
- const extent_t *extent, bool dependent, bool init_missing,
- rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
- *r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)extent_base_get(extent), dependent, init_missing);
- if (!dependent && *r_elm_a == NULL) {
- return true;
- }
- assert(*r_elm_a != NULL);
-
- *r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)extent_last_get(extent), dependent, init_missing);
- if (!dependent && *r_elm_b == NULL) {
- return true;
- }
- assert(*r_elm_b != NULL);
-
- return false;
+extent_deactivate_check_state_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
+ edata_t *edata, extent_state_t expected_state) {
+ assert(edata_state_get(edata) == expected_state);
+ extent_deactivate_locked_impl(tsdn, pac, ecache, edata);
}
static void
-extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a,
- rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) {
- rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab);
- if (elm_b != NULL) {
- rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind,
- slab);
- }
-}
+extent_activate_locked(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache, eset_t *eset,
+ edata_t *edata) {
+ assert(edata_arena_ind_get(edata) == ecache_ind_get(ecache));
+ assert(edata_state_get(edata) == ecache->state ||
+ edata_state_get(edata) == extent_state_merging);
-static void
-extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent,
- szind_t szind) {
- assert(extent_slab_get(extent));
-
- /* Register interior. */
- for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
- rtree_write(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
- LG_PAGE), extent, szind, true);
- }
+ eset_remove(eset, edata);
+ emap_update_edata_state(tsdn, pac->emap, edata, extent_state_active);
}
-static void
-extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) {
+void
+extent_gdump_add(tsdn_t *tsdn, const edata_t *edata) {
cassert(config_prof);
/* prof_gdump() requirement. */
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
- if (opt_prof && extent_state_get(extent) == extent_state_active) {
- size_t nadd = extent_size_get(extent) >> LG_PAGE;
+ if (opt_prof && edata_state_get(edata) == extent_state_active) {
+ size_t nadd = edata_size_get(edata) >> LG_PAGE;
size_t cur = atomic_fetch_add_zu(&curpages, nadd,
ATOMIC_RELAXED) + nadd;
size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED);
@@ -767,232 +308,184 @@ extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) {
}
static void
-extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) {
+extent_gdump_sub(tsdn_t *tsdn, const edata_t *edata) {
cassert(config_prof);
- if (opt_prof && extent_state_get(extent) == extent_state_active) {
- size_t nsub = extent_size_get(extent) >> LG_PAGE;
+ if (opt_prof && edata_state_get(edata) == extent_state_active) {
+ size_t nsub = edata_size_get(edata) >> LG_PAGE;
assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub);
atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED);
}
}
static bool
-extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) {
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
- rtree_leaf_elm_t *elm_a, *elm_b;
-
+extent_register_impl(tsdn_t *tsdn, pac_t *pac, edata_t *edata, bool gdump_add) {
+ assert(edata_state_get(edata) == extent_state_active);
/*
- * We need to hold the lock to protect against a concurrent coalesce
- * operation that sees us in a partial state.
+ * No locking needed, as the edata must be in active state, which
+ * prevents other threads from accessing the edata.
*/
- extent_lock(tsdn, extent);
-
- if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true,
- &elm_a, &elm_b)) {
- extent_unlock(tsdn, extent);
+ if (emap_register_boundary(tsdn, pac->emap, edata, SC_NSIZES,
+ /* slab */ false)) {
return true;
}
- szind_t szind = extent_szind_get_maybe_invalid(extent);
- bool slab = extent_slab_get(extent);
- extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab);
- if (slab) {
- extent_interior_register(tsdn, rtree_ctx, extent, szind);
- }
-
- extent_unlock(tsdn, extent);
-
if (config_prof && gdump_add) {
- extent_gdump_add(tsdn, extent);
+ extent_gdump_add(tsdn, edata);
}
return false;
}
static bool
-extent_register(tsdn_t *tsdn, extent_t *extent) {
- return extent_register_impl(tsdn, extent, true);
+extent_register(tsdn_t *tsdn, pac_t *pac, edata_t *edata) {
+ return extent_register_impl(tsdn, pac, edata, true);
}
static bool
-extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) {
- return extent_register_impl(tsdn, extent, false);
+extent_register_no_gdump_add(tsdn_t *tsdn, pac_t *pac, edata_t *edata) {
+ return extent_register_impl(tsdn, pac, edata, false);
}
static void
-extent_reregister(tsdn_t *tsdn, extent_t *extent) {
- bool err = extent_register(tsdn, extent);
+extent_reregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata) {
+ bool err = extent_register(tsdn, pac, edata);
assert(!err);
}
/*
- * Removes all pointers to the given extent from the global rtree indices for
- * its interior. This is relevant for slab extents, for which we need to do
- * metadata lookups at places other than the head of the extent. We deregister
- * on the interior, then, when an extent moves from being an active slab to an
- * inactive state.
- */
-static void
-extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
- extent_t *extent) {
- size_t i;
-
- assert(extent_slab_get(extent));
-
- for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
- rtree_clear(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
- LG_PAGE));
- }
-}
-
-/*
* Removes all pointers to the given extent from the global rtree.
*/
static void
-extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) {
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
- rtree_leaf_elm_t *elm_a, *elm_b;
- extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false,
- &elm_a, &elm_b);
-
- extent_lock(tsdn, extent);
-
- extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, SC_NSIZES, false);
- if (extent_slab_get(extent)) {
- extent_interior_deregister(tsdn, rtree_ctx, extent);
- extent_slab_set(extent, false);
- }
-
- extent_unlock(tsdn, extent);
+extent_deregister_impl(tsdn_t *tsdn, pac_t *pac, edata_t *edata,
+ bool gdump) {
+ emap_deregister_boundary(tsdn, pac->emap, edata);
if (config_prof && gdump) {
- extent_gdump_sub(tsdn, extent);
+ extent_gdump_sub(tsdn, edata);
}
}
static void
-extent_deregister(tsdn_t *tsdn, extent_t *extent) {
- extent_deregister_impl(tsdn, extent, true);
+extent_deregister(tsdn_t *tsdn, pac_t *pac, edata_t *edata) {
+ extent_deregister_impl(tsdn, pac, edata, true);
}
static void
-extent_deregister_no_gdump_sub(tsdn_t *tsdn, extent_t *extent) {
- extent_deregister_impl(tsdn, extent, false);
+extent_deregister_no_gdump_sub(tsdn_t *tsdn, pac_t *pac,
+ edata_t *edata) {
+ extent_deregister_impl(tsdn, pac, edata, false);
}
/*
- * Tries to find and remove an extent from extents that can be used for the
+ * Tries to find and remove an extent from ecache that can be used for the
* given allocation request.
*/
-static extent_t *
-extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
- void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
- bool growing_retained) {
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, growing_retained ? 1 : 0);
+static edata_t *
+extent_recycle_extract(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
+ bool guarded) {
+ malloc_mutex_assert_owner(tsdn, &ecache->mtx);
assert(alignment > 0);
- if (config_debug && new_addr != NULL) {
+ if (config_debug && expand_edata != NULL) {
/*
- * Non-NULL new_addr has two use cases:
- *
- * 1) Recycle a known-extant extent, e.g. during purging.
- * 2) Perform in-place expanding reallocation.
- *
- * Regardless of use case, new_addr must either refer to a
- * non-existing extent, or to the base of an extant extent,
- * since only active slabs support interior lookups (which of
- * course cannot be recycled).
+ * Non-NULL expand_edata indicates in-place expanding realloc.
+ * new_addr must either refer to a non-existing extent, or to
+ * the base of an extant extent, since only active slabs support
+ * interior lookups (which of course cannot be recycled).
*/
+ void *new_addr = edata_past_get(expand_edata);
assert(PAGE_ADDR2BASE(new_addr) == new_addr);
- assert(pad == 0);
assert(alignment <= PAGE);
}
- size_t esize = size + pad;
- malloc_mutex_lock(tsdn, &extents->mtx);
- extent_hooks_assure_initialized(arena, r_extent_hooks);
- extent_t *extent;
- if (new_addr != NULL) {
- extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr,
- false);
- if (extent != NULL) {
- /*
- * We might null-out extent to report an error, but we
- * still need to unlock the associated mutex after.
- */
- extent_t *unlock_extent = extent;
- assert(extent_base_get(extent) == new_addr);
- if (extent_arena_get(extent) != arena ||
- extent_size_get(extent) < esize ||
- extent_state_get(extent) !=
- extents_state_get(extents)) {
- extent = NULL;
+ edata_t *edata;
+ eset_t *eset = guarded ? &ecache->guarded_eset : &ecache->eset;
+ if (expand_edata != NULL) {
+ edata = emap_try_acquire_edata_neighbor_expand(tsdn, pac->emap,
+ expand_edata, EXTENT_PAI_PAC, ecache->state);
+ if (edata != NULL) {
+ extent_assert_can_expand(expand_edata, edata);
+ if (edata_size_get(edata) < size) {
+ emap_release_edata(tsdn, pac->emap, edata,
+ ecache->state);
+ edata = NULL;
}
- extent_unlock(tsdn, unlock_extent);
}
} else {
- extent = extents_fit_locked(tsdn, arena, extents, esize,
- alignment);
+ /*
+ * A large extent might be broken up from its original size to
+ * some small size to satisfy a small request. When that small
+ * request is freed, though, it won't merge back with the larger
+ * extent if delayed coalescing is on. The large extent can
+ * then no longer satify a request for its original size. To
+ * limit this effect, when delayed coalescing is enabled, we
+ * put a cap on how big an extent we can split for a request.
+ */
+ unsigned lg_max_fit = ecache->delay_coalesce
+ ? (unsigned)opt_lg_extent_max_active_fit : SC_PTR_BITS;
+
+ /*
+ * If split and merge are not allowed (Windows w/o retain), try
+ * exact fit only.
+ *
+ * For simplicity purposes, splitting guarded extents is not
+ * supported. Hence, we do only exact fit for guarded
+ * allocations.
+ */
+ bool exact_only = (!maps_coalesce && !opt_retain) || guarded;
+ edata = eset_fit(eset, size, alignment, exact_only,
+ lg_max_fit);
}
- if (extent == NULL) {
- malloc_mutex_unlock(tsdn, &extents->mtx);
+ if (edata == NULL) {
return NULL;
}
+ assert(!guarded || edata_guarded_get(edata));
+ extent_activate_locked(tsdn, pac, ecache, eset, edata);
- extent_activate_locked(tsdn, arena, extents, extent);
- malloc_mutex_unlock(tsdn, &extents->mtx);
-
- return extent;
+ return edata;
}
/*
* Given an allocation request and an extent guaranteed to be able to satisfy
- * it, this splits off lead and trail extents, leaving extent pointing to an
+ * it, this splits off lead and trail extents, leaving edata pointing to an
* extent satisfying the allocation.
- * This function doesn't put lead or trail into any extents_t; it's the caller's
+ * This function doesn't put lead or trail into any ecache; it's the caller's
* job to ensure that they can be reused.
*/
typedef enum {
/*
- * Split successfully. lead, extent, and trail, are modified to extents
+ * Split successfully. lead, edata, and trail, are modified to extents
* describing the ranges before, in, and after the given allocation.
*/
extent_split_interior_ok,
/*
* The extent can't satisfy the given allocation request. None of the
- * input extent_t *s are touched.
+ * input edata_t *s are touched.
*/
extent_split_interior_cant_alloc,
/*
* In a potentially invalid state. Must leak (if *to_leak is non-NULL),
* and salvage what's still salvageable (if *to_salvage is non-NULL).
- * None of lead, extent, or trail are valid.
+ * None of lead, edata, or trail are valid.
*/
extent_split_interior_error
} extent_split_interior_result_t;
static extent_split_interior_result_t
-extent_split_interior(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx,
+extent_split_interior(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
/* The result of splitting, in case of success. */
- extent_t **extent, extent_t **lead, extent_t **trail,
+ edata_t **edata, edata_t **lead, edata_t **trail,
/* The mess to clean up, in case of error. */
- extent_t **to_leak, extent_t **to_salvage,
- void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
- szind_t szind, bool growing_retained) {
- size_t esize = size + pad;
- size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(*extent),
- PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(*extent);
- assert(new_addr == NULL || leadsize == 0);
- if (extent_size_get(*extent) < leadsize + esize) {
+ edata_t **to_leak, edata_t **to_salvage,
+ edata_t *expand_edata, size_t size, size_t alignment) {
+ size_t leadsize = ALIGNMENT_CEILING((uintptr_t)edata_base_get(*edata),
+ PAGE_CEILING(alignment)) - (uintptr_t)edata_base_get(*edata);
+ assert(expand_edata == NULL || leadsize == 0);
+ if (edata_size_get(*edata) < leadsize + size) {
return extent_split_interior_cant_alloc;
}
- size_t trailsize = extent_size_get(*extent) - leadsize - esize;
+ size_t trailsize = edata_size_get(*edata) - leadsize - size;
*lead = NULL;
*trail = NULL;
@@ -1001,11 +494,11 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena,
/* Split the lead. */
if (leadsize != 0) {
- *lead = *extent;
- *extent = extent_split_impl(tsdn, arena, r_extent_hooks,
- *lead, leadsize, SC_NSIZES, false, esize + trailsize, szind,
- slab, growing_retained);
- if (*extent == NULL) {
+ assert(!edata_guarded_get(*edata));
+ *lead = *edata;
+ *edata = extent_split_impl(tsdn, pac, ehooks, *lead, leadsize,
+ size + trailsize, /* holding_core_locks*/ true);
+ if (*edata == NULL) {
*to_leak = *lead;
*lead = NULL;
return extent_split_interior_error;
@@ -1014,36 +507,18 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena,
/* Split the trail. */
if (trailsize != 0) {
- *trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent,
- esize, szind, slab, trailsize, SC_NSIZES, false,
- growing_retained);
+ assert(!edata_guarded_get(*edata));
+ *trail = extent_split_impl(tsdn, pac, ehooks, *edata, size,
+ trailsize, /* holding_core_locks */ true);
if (*trail == NULL) {
- *to_leak = *extent;
+ *to_leak = *edata;
*to_salvage = *lead;
*lead = NULL;
- *extent = NULL;
+ *edata = NULL;
return extent_split_interior_error;
}
}
- if (leadsize == 0 && trailsize == 0) {
- /*
- * Splitting causes szind to be set as a side effect, but no
- * splitting occurred.
- */
- extent_szind_set(*extent, szind);
- if (szind != SC_NSIZES) {
- rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)extent_addr_get(*extent), szind, slab);
- if (slab && extent_size_get(*extent) > PAGE) {
- rtree_szind_slab_update(tsdn, &extents_rtree,
- rtree_ctx,
- (uintptr_t)extent_past_get(*extent) -
- (uintptr_t)PAGE, szind, slab);
- }
- }
- }
-
return extent_split_interior_ok;
}
@@ -1051,42 +526,43 @@ extent_split_interior(tsdn_t *tsdn, arena_t *arena,
* This fulfills the indicated allocation request out of the given extent (which
* the caller should have ensured was big enough). If there's any unused space
* before or after the resulting allocation, that space is given its own extent
- * and put back into extents.
+ * and put back into ecache.
*/
-static extent_t *
-extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
- void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
- szind_t szind, extent_t *extent, bool growing_retained) {
- extent_t *lead;
- extent_t *trail;
- extent_t *to_leak;
- extent_t *to_salvage;
+static edata_t *
+extent_recycle_split(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ ecache_t *ecache, edata_t *expand_edata, size_t size, size_t alignment,
+ edata_t *edata, bool growing_retained) {
+ assert(!edata_guarded_get(edata) || size == edata_size_get(edata));
+ malloc_mutex_assert_owner(tsdn, &ecache->mtx);
+
+ edata_t *lead;
+ edata_t *trail;
+ edata_t *to_leak JEMALLOC_CC_SILENCE_INIT(NULL);
+ edata_t *to_salvage JEMALLOC_CC_SILENCE_INIT(NULL);
extent_split_interior_result_t result = extent_split_interior(
- tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
- &to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind,
- growing_retained);
+ tsdn, pac, ehooks, &edata, &lead, &trail, &to_leak, &to_salvage,
+ expand_edata, size, alignment);
if (!maps_coalesce && result != extent_split_interior_ok
&& !opt_retain) {
/*
* Split isn't supported (implies Windows w/o retain). Avoid
- * leaking the extents.
+ * leaking the extent.
*/
assert(to_leak != NULL && lead == NULL && trail == NULL);
- extent_deactivate(tsdn, arena, extents, to_leak);
+ extent_deactivate_locked(tsdn, pac, ecache, to_leak);
return NULL;
}
if (result == extent_split_interior_ok) {
if (lead != NULL) {
- extent_deactivate(tsdn, arena, extents, lead);
+ extent_deactivate_locked(tsdn, pac, ecache, lead);
}
if (trail != NULL) {
- extent_deactivate(tsdn, arena, extents, trail);
+ extent_deactivate_locked(tsdn, pac, ecache, trail);
}
- return extent;
+ return edata;
} else {
/*
* We should have picked an extent that was large enough to
@@ -1094,294 +570,144 @@ extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
*/
assert(result == extent_split_interior_error);
if (to_salvage != NULL) {
- extent_deregister(tsdn, to_salvage);
+ extent_deregister(tsdn, pac, to_salvage);
}
if (to_leak != NULL) {
- void *leak = extent_base_get(to_leak);
- extent_deregister_no_gdump_sub(tsdn, to_leak);
- extents_abandon_vm(tsdn, arena, r_extent_hooks, extents,
- to_leak, growing_retained);
- assert(extent_lock_from_addr(tsdn, rtree_ctx, leak,
- false) == NULL);
+ extent_deregister_no_gdump_sub(tsdn, pac, to_leak);
+ /*
+ * May go down the purge path (which assume no ecache
+ * locks). Only happens with OOM caused split failures.
+ */
+ malloc_mutex_unlock(tsdn, &ecache->mtx);
+ extents_abandon_vm(tsdn, pac, ehooks, ecache, to_leak,
+ growing_retained);
+ malloc_mutex_lock(tsdn, &ecache->mtx);
}
return NULL;
}
unreachable();
}
-static bool
-extent_need_manual_zero(arena_t *arena) {
- /*
- * Need to manually zero the extent on repopulating if either; 1) non
- * default extent hooks installed (in which case the purge semantics may
- * change); or 2) transparent huge pages enabled.
- */
- return (!arena_has_default_hooks(arena) ||
- (opt_thp == thp_mode_always));
-}
-
/*
* Tries to satisfy the given allocation request by reusing one of the extents
- * in the given extents_t.
+ * in the given ecache_t.
*/
-static extent_t *
-extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
- extents_t *extents, void *new_addr, size_t size, size_t pad,
- size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit,
- bool growing_retained) {
+static edata_t *
+extent_recycle(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
+ edata_t *expand_edata, size_t size, size_t alignment, bool zero,
+ bool *commit, bool growing_retained, bool guarded) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
- assert(new_addr == NULL || !slab);
- assert(pad == 0 || !slab);
- assert(!*zero || !slab);
+ assert(!guarded || expand_edata == NULL);
+ assert(!guarded || alignment <= PAGE);
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+ malloc_mutex_lock(tsdn, &ecache->mtx);
- extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks,
- rtree_ctx, extents, new_addr, size, pad, alignment, slab,
- growing_retained);
- if (extent == NULL) {
+ edata_t *edata = extent_recycle_extract(tsdn, pac, ehooks, ecache,
+ expand_edata, size, alignment, guarded);
+ if (edata == NULL) {
+ malloc_mutex_unlock(tsdn, &ecache->mtx);
return NULL;
}
- extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx,
- extents, new_addr, size, pad, alignment, slab, szind, extent,
- growing_retained);
- if (extent == NULL) {
+ edata = extent_recycle_split(tsdn, pac, ehooks, ecache, expand_edata,
+ size, alignment, edata, growing_retained);
+ malloc_mutex_unlock(tsdn, &ecache->mtx);
+ if (edata == NULL) {
return NULL;
}
- if (*commit && !extent_committed_get(extent)) {
- if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent,
- 0, extent_size_get(extent), growing_retained)) {
- extent_record(tsdn, arena, r_extent_hooks, extents,
- extent, growing_retained);
- return NULL;
- }
- if (!extent_need_manual_zero(arena)) {
- extent_zeroed_set(extent, true);
- }
- }
-
- if (extent_committed_get(extent)) {
- *commit = true;
- }
- if (extent_zeroed_get(extent)) {
- *zero = true;
- }
-
- if (pad != 0) {
- extent_addr_randomize(tsdn, extent, alignment);
- }
- assert(extent_state_get(extent) == extent_state_active);
- if (slab) {
- extent_slab_set(extent, slab);
- extent_interior_register(tsdn, rtree_ctx, extent, szind);
- }
-
- if (*zero) {
- void *addr = extent_base_get(extent);
- if (!extent_zeroed_get(extent)) {
- size_t size = extent_size_get(extent);
- if (extent_need_manual_zero(arena) ||
- pages_purge_forced(addr, size)) {
- memset(addr, 0, size);
- }
- } else if (config_debug) {
- size_t *p = (size_t *)(uintptr_t)addr;
- /* Check the first page only. */
- for (size_t i = 0; i < PAGE / sizeof(size_t); i++) {
- assert(p[i] == 0);
- }
- }
- }
- return extent;
-}
-
-/*
- * If the caller specifies (!*zero), it is still possible to receive zeroed
- * memory, in which case *zero is toggled to true. arena_extent_alloc() takes
- * advantage of this to avoid demanding zeroed extents, but taking advantage of
- * them if they are returned.
- */
-static void *
-extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
- size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
- void *ret;
-
- assert(size != 0);
- assert(alignment != 0);
-
- /* "primary" dss. */
- if (have_dss && dss_prec == dss_prec_primary && (ret =
- extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
- commit)) != NULL) {
- return ret;
- }
- /* mmap. */
- if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
- != NULL) {
- return ret;
- }
- /* "secondary" dss. */
- if (have_dss && dss_prec == dss_prec_secondary && (ret =
- extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
- commit)) != NULL) {
- return ret;
- }
-
- /* All strategies for allocation failed. */
- return NULL;
-}
-
-static void *
-extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
- size_t size, size_t alignment, bool *zero, bool *commit) {
- void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
- commit, (dss_prec_t)atomic_load_u(&arena->dss_prec,
- ATOMIC_RELAXED));
- if (have_madvise_huge && ret) {
- pages_set_thp_state(ret, size);
+ assert(edata_state_get(edata) == extent_state_active);
+ if (extent_commit_zero(tsdn, ehooks, edata, *commit, zero,
+ growing_retained)) {
+ extent_record(tsdn, pac, ehooks, ecache, edata);
+ return NULL;
}
- return ret;
-}
-
-static void *
-extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
- size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
- tsdn_t *tsdn;
- arena_t *arena;
-
- tsdn = tsdn_fetch();
- arena = arena_get(tsdn, arena_ind, false);
- /*
- * The arena we're allocating on behalf of must have been initialized
- * already.
- */
- assert(arena != NULL);
-
- return extent_alloc_default_impl(tsdn, arena, new_addr, size,
- ALIGNMENT_CEILING(alignment, PAGE), zero, commit);
-}
-
-static void
-extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) {
- tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
- if (arena == arena_get(tsd_tsdn(tsd), 0, false)) {
+ if (edata_committed_get(edata)) {
/*
- * The only legitimate case of customized extent hooks for a0 is
- * hooks with no allocation activities. One such example is to
- * place metadata on pre-allocated resources such as huge pages.
- * In that case, rely on reentrancy_level checks to catch
- * infinite recursions.
+ * This reverses the purpose of this variable - previously it
+ * was treated as an input parameter, now it turns into an
+ * output parameter, reporting if the edata has actually been
+ * committed.
*/
- pre_reentrancy(tsd, NULL);
- } else {
- pre_reentrancy(tsd, arena);
+ *commit = true;
}
-}
-
-static void
-extent_hook_post_reentrancy(tsdn_t *tsdn) {
- tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
- post_reentrancy(tsd);
+ return edata;
}
/*
* If virtual memory is retained, create increasingly larger extents from which
* to split requested extents in order to limit the total number of disjoint
- * virtual memory ranges retained by each arena.
+ * virtual memory ranges retained by each shard.
*/
-static extent_t *
-extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment,
- bool slab, szind_t szind, bool *zero, bool *commit) {
- malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx);
- assert(pad == 0 || !slab);
- assert(!*zero || !slab);
-
- size_t esize = size + pad;
- size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE;
+static edata_t *
+extent_grow_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ size_t size, size_t alignment, bool zero, bool *commit) {
+ malloc_mutex_assert_owner(tsdn, &pac->grow_mtx);
+
+ size_t alloc_size_min = size + PAGE_CEILING(alignment) - PAGE;
/* Beware size_t wrap-around. */
- if (alloc_size_min < esize) {
+ if (alloc_size_min < size) {
goto label_err;
}
/*
* Find the next extent size in the series that would be large enough to
* satisfy this request.
*/
- pszind_t egn_skip = 0;
- size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
- while (alloc_size < alloc_size_min) {
- egn_skip++;
- if (arena->extent_grow_next + egn_skip >=
- sz_psz2ind(SC_LARGE_MAXCLASS)) {
- /* Outside legal range. */
- goto label_err;
- }
- alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
+ size_t alloc_size;
+ pszind_t exp_grow_skip;
+ bool err = exp_grow_size_prepare(&pac->exp_grow, alloc_size_min,
+ &alloc_size, &exp_grow_skip);
+ if (err) {
+ goto label_err;
}
- extent_t *extent = extent_alloc(tsdn, arena);
- if (extent == NULL) {
+ edata_t *edata = edata_cache_get(tsdn, pac->edata_cache);
+ if (edata == NULL) {
goto label_err;
}
bool zeroed = false;
bool committed = false;
- void *ptr;
- if (*r_extent_hooks == &extent_hooks_default) {
- ptr = extent_alloc_default_impl(tsdn, arena, NULL,
- alloc_size, PAGE, &zeroed, &committed);
- } else {
- extent_hook_pre_reentrancy(tsdn, arena);
- ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL,
- alloc_size, PAGE, &zeroed, &committed,
- arena_ind_get(arena));
- extent_hook_post_reentrancy(tsdn);
- }
+ void *ptr = ehooks_alloc(tsdn, ehooks, NULL, alloc_size, PAGE, &zeroed,
+ &committed);
- extent_init(extent, arena, ptr, alloc_size, false, SC_NSIZES,
- arena_extent_sn_next(arena), extent_state_active, zeroed,
- committed, true, EXTENT_IS_HEAD);
if (ptr == NULL) {
- extent_dalloc(tsdn, arena, extent);
+ edata_cache_put(tsdn, pac->edata_cache, edata);
goto label_err;
}
- if (extent_register_no_gdump_add(tsdn, extent)) {
- extent_dalloc(tsdn, arena, extent);
+ edata_init(edata, ecache_ind_get(&pac->ecache_retained), ptr,
+ alloc_size, false, SC_NSIZES, extent_sn_next(pac),
+ extent_state_active, zeroed, committed, EXTENT_PAI_PAC,
+ EXTENT_IS_HEAD);
+
+ if (extent_register_no_gdump_add(tsdn, pac, edata)) {
+ edata_cache_put(tsdn, pac->edata_cache, edata);
goto label_err;
}
- if (extent_zeroed_get(extent) && extent_committed_get(extent)) {
- *zero = true;
- }
- if (extent_committed_get(extent)) {
+ if (edata_committed_get(edata)) {
*commit = true;
}
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+ edata_t *lead;
+ edata_t *trail;
+ edata_t *to_leak JEMALLOC_CC_SILENCE_INIT(NULL);
+ edata_t *to_salvage JEMALLOC_CC_SILENCE_INIT(NULL);
- extent_t *lead;
- extent_t *trail;
- extent_t *to_leak;
- extent_t *to_salvage;
- extent_split_interior_result_t result = extent_split_interior(
- tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
- &to_leak, &to_salvage, NULL, size, pad, alignment, slab, szind,
- true);
+ extent_split_interior_result_t result = extent_split_interior(tsdn,
+ pac, ehooks, &edata, &lead, &trail, &to_leak, &to_salvage, NULL,
+ size, alignment);
if (result == extent_split_interior_ok) {
if (lead != NULL) {
- extent_record(tsdn, arena, r_extent_hooks,
- &arena->extents_retained, lead, true);
+ extent_record(tsdn, pac, ehooks, &pac->ecache_retained,
+ lead);
}
if (trail != NULL) {
- extent_record(tsdn, arena, r_extent_hooks,
- &arena->extents_retained, trail, true);
+ extent_record(tsdn, pac, ehooks, &pac->ecache_retained,
+ trail);
}
} else {
/*
@@ -1393,26 +719,32 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
if (config_prof) {
extent_gdump_add(tsdn, to_salvage);
}
- extent_record(tsdn, arena, r_extent_hooks,
- &arena->extents_retained, to_salvage, true);
+ extent_record(tsdn, pac, ehooks, &pac->ecache_retained,
+ to_salvage);
}
if (to_leak != NULL) {
- extent_deregister_no_gdump_sub(tsdn, to_leak);
- extents_abandon_vm(tsdn, arena, r_extent_hooks,
- &arena->extents_retained, to_leak, true);
+ extent_deregister_no_gdump_sub(tsdn, pac, to_leak);
+ extents_abandon_vm(tsdn, pac, ehooks,
+ &pac->ecache_retained, to_leak, true);
}
goto label_err;
}
- if (*commit && !extent_committed_get(extent)) {
- if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0,
- extent_size_get(extent), true)) {
- extent_record(tsdn, arena, r_extent_hooks,
- &arena->extents_retained, extent, true);
+ if (*commit && !edata_committed_get(edata)) {
+ if (extent_commit_impl(tsdn, ehooks, edata, 0,
+ edata_size_get(edata), true)) {
+ extent_record(tsdn, pac, ehooks,
+ &pac->ecache_retained, edata);
goto label_err;
}
- if (!extent_need_manual_zero(arena)) {
- extent_zeroed_set(extent, true);
+ /* A successful commit should return zeroed memory. */
+ if (config_debug) {
+ void *addr = edata_addr_get(edata);
+ size_t *p = (size_t *)(uintptr_t)addr;
+ /* Check the first page only. */
+ for (size_t i = 0; i < PAGE / sizeof(size_t); i++) {
+ assert(p[i] == 0);
+ }
}
}
@@ -1420,187 +752,74 @@ extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
* Increment extent_grow_next if doing so wouldn't exceed the allowed
* range.
*/
- if (arena->extent_grow_next + egn_skip + 1 <=
- arena->retain_grow_limit) {
- arena->extent_grow_next += egn_skip + 1;
- } else {
- arena->extent_grow_next = arena->retain_grow_limit;
- }
/* All opportunities for failure are past. */
- malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
+ exp_grow_size_commit(&pac->exp_grow, exp_grow_skip);
+ malloc_mutex_unlock(tsdn, &pac->grow_mtx);
if (config_prof) {
/* Adjust gdump stats now that extent is final size. */
- extent_gdump_add(tsdn, extent);
- }
- if (pad != 0) {
- extent_addr_randomize(tsdn, extent, alignment);
+ extent_gdump_add(tsdn, edata);
}
- if (slab) {
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
- &rtree_ctx_fallback);
-
- extent_slab_set(extent, true);
- extent_interior_register(tsdn, rtree_ctx, extent, szind);
- }
- if (*zero && !extent_zeroed_get(extent)) {
- void *addr = extent_base_get(extent);
- size_t size = extent_size_get(extent);
- if (extent_need_manual_zero(arena) ||
- pages_purge_forced(addr, size)) {
- memset(addr, 0, size);
- }
+ if (zero && !edata_zeroed_get(edata)) {
+ ehooks_zero(tsdn, ehooks, edata_base_get(edata),
+ edata_size_get(edata));
}
-
- return extent;
+ return edata;
label_err:
- malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
+ malloc_mutex_unlock(tsdn, &pac->grow_mtx);
return NULL;
}
-static extent_t *
-extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
- size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
+static edata_t *
+extent_alloc_retained(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ edata_t *expand_edata, size_t size, size_t alignment, bool zero,
+ bool *commit, bool guarded) {
assert(size != 0);
assert(alignment != 0);
- malloc_mutex_lock(tsdn, &arena->extent_grow_mtx);
+ malloc_mutex_lock(tsdn, &pac->grow_mtx);
- extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks,
- &arena->extents_retained, new_addr, size, pad, alignment, slab,
- szind, zero, commit, true);
- if (extent != NULL) {
- malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
+ edata_t *edata = extent_recycle(tsdn, pac, ehooks,
+ &pac->ecache_retained, expand_edata, size, alignment, zero, commit,
+ /* growing_retained */ true, guarded);
+ if (edata != NULL) {
+ malloc_mutex_unlock(tsdn, &pac->grow_mtx);
if (config_prof) {
- extent_gdump_add(tsdn, extent);
+ extent_gdump_add(tsdn, edata);
}
- } else if (opt_retain && new_addr == NULL) {
- extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size,
- pad, alignment, slab, szind, zero, commit);
- /* extent_grow_retained() always releases extent_grow_mtx. */
+ } else if (opt_retain && expand_edata == NULL && !guarded) {
+ edata = extent_grow_retained(tsdn, pac, ehooks, size,
+ alignment, zero, commit);
+ /* extent_grow_retained() always releases pac->grow_mtx. */
} else {
- malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
- }
- malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx);
-
- return extent;
-}
-
-static extent_t *
-extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
- size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
- size_t esize = size + pad;
- extent_t *extent = extent_alloc(tsdn, arena);
- if (extent == NULL) {
- return NULL;
- }
- void *addr;
- size_t palignment = ALIGNMENT_CEILING(alignment, PAGE);
- if (*r_extent_hooks == &extent_hooks_default) {
- /* Call directly to propagate tsdn. */
- addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize,
- palignment, zero, commit);
- } else {
- extent_hook_pre_reentrancy(tsdn, arena);
- addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr,
- esize, palignment, zero, commit, arena_ind_get(arena));
- extent_hook_post_reentrancy(tsdn);
- }
- if (addr == NULL) {
- extent_dalloc(tsdn, arena, extent);
- return NULL;
- }
- extent_init(extent, arena, addr, esize, slab, szind,
- arena_extent_sn_next(arena), extent_state_active, *zero, *commit,
- true, EXTENT_NOT_HEAD);
- if (pad != 0) {
- extent_addr_randomize(tsdn, extent, alignment);
- }
- if (extent_register(tsdn, extent)) {
- extent_dalloc(tsdn, arena, extent);
- return NULL;
- }
-
- return extent;
-}
-
-extent_t *
-extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
- size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, 0);
-
- extent_hooks_assure_initialized(arena, r_extent_hooks);
-
- extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks,
- new_addr, size, pad, alignment, slab, szind, zero, commit);
- if (extent == NULL) {
- if (opt_retain && new_addr != NULL) {
- /*
- * When retain is enabled and new_addr is set, we do not
- * attempt extent_alloc_wrapper_hard which does mmap
- * that is very unlikely to succeed (unless it happens
- * to be at the end).
- */
- return NULL;
- }
- extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks,
- new_addr, size, pad, alignment, slab, szind, zero, commit);
- }
-
- assert(extent == NULL || extent_dumpable_get(extent));
- return extent;
-}
-
-static bool
-extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner,
- const extent_t *outer) {
- assert(extent_arena_get(inner) == arena);
- if (extent_arena_get(outer) != arena) {
- return false;
- }
-
- assert(extent_state_get(inner) == extent_state_active);
- if (extent_state_get(outer) != extents->state) {
- return false;
- }
-
- if (extent_committed_get(inner) != extent_committed_get(outer)) {
- return false;
+ malloc_mutex_unlock(tsdn, &pac->grow_mtx);
}
+ malloc_mutex_assert_not_owner(tsdn, &pac->grow_mtx);
- return true;
+ return edata;
}
static bool
-extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
- extents_t *extents, extent_t *inner, extent_t *outer, bool forward,
- bool growing_retained) {
- assert(extent_can_coalesce(arena, extents, inner, outer));
-
- extent_activate_locked(tsdn, arena, extents, outer);
-
- malloc_mutex_unlock(tsdn, &extents->mtx);
- bool err = extent_merge_impl(tsdn, arena, r_extent_hooks,
- forward ? inner : outer, forward ? outer : inner, growing_retained);
- malloc_mutex_lock(tsdn, &extents->mtx);
-
+extent_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
+ edata_t *inner, edata_t *outer, bool forward) {
+ extent_assert_can_coalesce(inner, outer);
+ eset_remove(&ecache->eset, outer);
+
+ bool err = extent_merge_impl(tsdn, pac, ehooks,
+ forward ? inner : outer, forward ? outer : inner,
+ /* holding_core_locks */ true);
if (err) {
- extent_deactivate_locked(tsdn, arena, extents, outer);
+ extent_deactivate_check_state_locked(tsdn, pac, ecache, outer,
+ extent_state_merging);
}
return err;
}
-static extent_t *
-extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
- extent_t *extent, bool *coalesced, bool growing_retained,
- bool inactive_only) {
+static edata_t *
+extent_try_coalesce_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ ecache_t *ecache, edata_t *edata, bool *coalesced) {
+ assert(!edata_guarded_get(edata));
/*
* We avoid checking / locking inactive neighbors for large size
* classes, since they are eagerly coalesced on deallocation which can
@@ -1615,470 +834,333 @@ extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena,
again = false;
/* Try to coalesce forward. */
- extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx,
- extent_past_get(extent), inactive_only);
+ edata_t *next = emap_try_acquire_edata_neighbor(tsdn, pac->emap,
+ edata, EXTENT_PAI_PAC, ecache->state, /* forward */ true);
if (next != NULL) {
- /*
- * extents->mtx only protects against races for
- * like-state extents, so call extent_can_coalesce()
- * before releasing next's pool lock.
- */
- bool can_coalesce = extent_can_coalesce(arena, extents,
- extent, next);
-
- extent_unlock(tsdn, next);
-
- if (can_coalesce && !extent_coalesce(tsdn, arena,
- r_extent_hooks, extents, extent, next, true,
- growing_retained)) {
- if (extents->delay_coalesce) {
+ if (!extent_coalesce(tsdn, pac, ehooks, ecache, edata,
+ next, true)) {
+ if (ecache->delay_coalesce) {
/* Do minimal coalescing. */
*coalesced = true;
- return extent;
+ return edata;
}
again = true;
}
}
/* Try to coalesce backward. */
- extent_t *prev = NULL;
- if (extent_before_get(extent) != NULL) {
- prev = extent_lock_from_addr(tsdn, rtree_ctx,
- extent_before_get(extent), inactive_only);
- }
+ edata_t *prev = emap_try_acquire_edata_neighbor(tsdn, pac->emap,
+ edata, EXTENT_PAI_PAC, ecache->state, /* forward */ false);
if (prev != NULL) {
- bool can_coalesce = extent_can_coalesce(arena, extents,
- extent, prev);
- extent_unlock(tsdn, prev);
-
- if (can_coalesce && !extent_coalesce(tsdn, arena,
- r_extent_hooks, extents, extent, prev, false,
- growing_retained)) {
- extent = prev;
- if (extents->delay_coalesce) {
+ if (!extent_coalesce(tsdn, pac, ehooks, ecache, edata,
+ prev, false)) {
+ edata = prev;
+ if (ecache->delay_coalesce) {
/* Do minimal coalescing. */
*coalesced = true;
- return extent;
+ return edata;
}
again = true;
}
}
} while (again);
- if (extents->delay_coalesce) {
+ if (ecache->delay_coalesce) {
*coalesced = false;
}
- return extent;
+ return edata;
+}
+
+static edata_t *
+extent_try_coalesce(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ ecache_t *ecache, edata_t *edata, bool *coalesced) {
+ return extent_try_coalesce_impl(tsdn, pac, ehooks, ecache, edata,
+ coalesced);
}
-static extent_t *
-extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
- extent_t *extent, bool *coalesced, bool growing_retained) {
- return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx,
- extents, extent, coalesced, growing_retained, false);
+static edata_t *
+extent_try_coalesce_large(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ ecache_t *ecache, edata_t *edata, bool *coalesced) {
+ return extent_try_coalesce_impl(tsdn, pac, ehooks, ecache, edata,
+ coalesced);
}
-static extent_t *
-extent_try_coalesce_large(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
- extent_t *extent, bool *coalesced, bool growing_retained) {
- return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx,
- extents, extent, coalesced, growing_retained, true);
+/* Purge a single extent to retained / unmapped directly. */
+static void
+extent_maximally_purge(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ edata_t *edata) {
+ size_t extent_size = edata_size_get(edata);
+ extent_dalloc_wrapper(tsdn, pac, ehooks, edata);
+ if (config_stats) {
+ /* Update stats accordingly. */
+ LOCKEDINT_MTX_LOCK(tsdn, *pac->stats_mtx);
+ locked_inc_u64(tsdn,
+ LOCKEDINT_MTX(*pac->stats_mtx),
+ &pac->stats->decay_dirty.nmadvise, 1);
+ locked_inc_u64(tsdn,
+ LOCKEDINT_MTX(*pac->stats_mtx),
+ &pac->stats->decay_dirty.purged,
+ extent_size >> LG_PAGE);
+ LOCKEDINT_MTX_UNLOCK(tsdn, *pac->stats_mtx);
+ atomic_fetch_sub_zu(&pac->stats->pac_mapped, extent_size,
+ ATOMIC_RELAXED);
+ }
}
/*
* Does the metadata management portions of putting an unused extent into the
- * given extents_t (coalesces, deregisters slab interiors, the heap operations).
+ * given ecache_t (coalesces and inserts into the eset).
*/
-static void
-extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
- extents_t *extents, extent_t *extent, bool growing_retained) {
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
-
- assert((extents_state_get(extents) != extent_state_dirty &&
- extents_state_get(extents) != extent_state_muzzy) ||
- !extent_zeroed_get(extent));
-
- malloc_mutex_lock(tsdn, &extents->mtx);
- extent_hooks_assure_initialized(arena, r_extent_hooks);
-
- extent_szind_set(extent, SC_NSIZES);
- if (extent_slab_get(extent)) {
- extent_interior_deregister(tsdn, rtree_ctx, extent);
- extent_slab_set(extent, false);
- }
+void
+extent_record(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, ecache_t *ecache,
+ edata_t *edata) {
+ assert((ecache->state != extent_state_dirty &&
+ ecache->state != extent_state_muzzy) ||
+ !edata_zeroed_get(edata));
+
+ malloc_mutex_lock(tsdn, &ecache->mtx);
- assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)extent_base_get(extent), true) == extent);
+ emap_assert_mapped(tsdn, pac->emap, edata);
- if (!extents->delay_coalesce) {
- extent = extent_try_coalesce(tsdn, arena, r_extent_hooks,
- rtree_ctx, extents, extent, NULL, growing_retained);
- } else if (extent_size_get(extent) >= SC_LARGE_MINCLASS) {
- assert(extents == &arena->extents_dirty);
+ if (edata_guarded_get(edata)) {
+ goto label_skip_coalesce;
+ }
+ if (!ecache->delay_coalesce) {
+ edata = extent_try_coalesce(tsdn, pac, ehooks, ecache, edata,
+ NULL);
+ } else if (edata_size_get(edata) >= SC_LARGE_MINCLASS) {
+ assert(ecache == &pac->ecache_dirty);
/* Always coalesce large extents eagerly. */
bool coalesced;
do {
- assert(extent_state_get(extent) == extent_state_active);
- extent = extent_try_coalesce_large(tsdn, arena,
- r_extent_hooks, rtree_ctx, extents, extent,
- &coalesced, growing_retained);
+ assert(edata_state_get(edata) == extent_state_active);
+ edata = extent_try_coalesce_large(tsdn, pac, ehooks,
+ ecache, edata, &coalesced);
} while (coalesced);
- if (extent_size_get(extent) >= oversize_threshold) {
+ if (edata_size_get(edata) >=
+ atomic_load_zu(&pac->oversize_threshold, ATOMIC_RELAXED)
+ && extent_may_force_decay(pac)) {
/* Shortcut to purge the oversize extent eagerly. */
- malloc_mutex_unlock(tsdn, &extents->mtx);
- arena_decay_extent(tsdn, arena, r_extent_hooks, extent);
+ malloc_mutex_unlock(tsdn, &ecache->mtx);
+ extent_maximally_purge(tsdn, pac, ehooks, edata);
return;
}
}
- extent_deactivate_locked(tsdn, arena, extents, extent);
+label_skip_coalesce:
+ extent_deactivate_locked(tsdn, pac, ecache, edata);
- malloc_mutex_unlock(tsdn, &extents->mtx);
+ malloc_mutex_unlock(tsdn, &ecache->mtx);
}
void
-extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
- extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
-
+extent_dalloc_gap(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ edata_t *edata) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
- if (extent_register(tsdn, extent)) {
- extent_dalloc(tsdn, arena, extent);
+ if (extent_register(tsdn, pac, edata)) {
+ edata_cache_put(tsdn, pac->edata_cache, edata);
return;
}
- extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent);
+ extent_dalloc_wrapper(tsdn, pac, ehooks, edata);
}
static bool
-extent_may_dalloc(void) {
- /* With retain enabled, the default dalloc always fails. */
- return !opt_retain;
-}
-
-static bool
-extent_dalloc_default_impl(void *addr, size_t size) {
- if (!have_dss || !extent_in_dss(addr)) {
- return extent_dalloc_mmap(addr, size);
- }
- return true;
-}
-
-static bool
-extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
- bool committed, unsigned arena_ind) {
- return extent_dalloc_default_impl(addr, size);
-}
-
-static bool
-extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent) {
+extent_dalloc_wrapper_try(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ edata_t *edata) {
bool err;
- assert(extent_base_get(extent) != NULL);
- assert(extent_size_get(extent) != 0);
+ assert(edata_base_get(edata) != NULL);
+ assert(edata_size_get(edata) != 0);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
- extent_addr_set(extent, extent_base_get(extent));
+ edata_addr_set(edata, edata_base_get(edata));
- extent_hooks_assure_initialized(arena, r_extent_hooks);
/* Try to deallocate. */
- if (*r_extent_hooks == &extent_hooks_default) {
- /* Call directly to propagate tsdn. */
- err = extent_dalloc_default_impl(extent_base_get(extent),
- extent_size_get(extent));
- } else {
- extent_hook_pre_reentrancy(tsdn, arena);
- err = ((*r_extent_hooks)->dalloc == NULL ||
- (*r_extent_hooks)->dalloc(*r_extent_hooks,
- extent_base_get(extent), extent_size_get(extent),
- extent_committed_get(extent), arena_ind_get(arena)));
- extent_hook_post_reentrancy(tsdn);
- }
+ err = ehooks_dalloc(tsdn, ehooks, edata_base_get(edata),
+ edata_size_get(edata), edata_committed_get(edata));
if (!err) {
- extent_dalloc(tsdn, arena, extent);
+ edata_cache_put(tsdn, pac->edata_cache, edata);
}
return err;
}
+edata_t *
+extent_alloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ void *new_addr, size_t size, size_t alignment, bool zero, bool *commit,
+ bool growing_retained) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, growing_retained ? 1 : 0);
+
+ edata_t *edata = edata_cache_get(tsdn, pac->edata_cache);
+ if (edata == NULL) {
+ return NULL;
+ }
+ size_t palignment = ALIGNMENT_CEILING(alignment, PAGE);
+ void *addr = ehooks_alloc(tsdn, ehooks, new_addr, size, palignment,
+ &zero, commit);
+ if (addr == NULL) {
+ edata_cache_put(tsdn, pac->edata_cache, edata);
+ return NULL;
+ }
+ edata_init(edata, ecache_ind_get(&pac->ecache_dirty), addr,
+ size, /* slab */ false, SC_NSIZES, extent_sn_next(pac),
+ extent_state_active, zero, *commit, EXTENT_PAI_PAC,
+ opt_retain ? EXTENT_IS_HEAD : EXTENT_NOT_HEAD);
+ /*
+ * Retained memory is not counted towards gdump. Only if an extent is
+ * allocated as a separate mapping, i.e. growing_retained is false, then
+ * gdump should be updated.
+ */
+ bool gdump_add = !growing_retained;
+ if (extent_register_impl(tsdn, pac, edata, gdump_add)) {
+ edata_cache_put(tsdn, pac->edata_cache, edata);
+ return NULL;
+ }
+
+ return edata;
+}
+
void
-extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent) {
- assert(extent_dumpable_get(extent));
+extent_dalloc_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ edata_t *edata) {
+ assert(edata_pai_get(edata) == EXTENT_PAI_PAC);
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
/* Avoid calling the default extent_dalloc unless have to. */
- if (*r_extent_hooks != &extent_hooks_default || extent_may_dalloc()) {
+ if (!ehooks_dalloc_will_fail(ehooks)) {
+ /* Remove guard pages for dalloc / unmap. */
+ if (edata_guarded_get(edata)) {
+ assert(ehooks_are_default(ehooks));
+ san_unguard_pages_two_sided(tsdn, ehooks, edata,
+ pac->emap);
+ }
/*
* Deregister first to avoid a race with other allocating
* threads, and reregister if deallocation fails.
*/
- extent_deregister(tsdn, extent);
- if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks,
- extent)) {
+ extent_deregister(tsdn, pac, edata);
+ if (!extent_dalloc_wrapper_try(tsdn, pac, ehooks, edata)) {
return;
}
- extent_reregister(tsdn, extent);
+ extent_reregister(tsdn, pac, edata);
}
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_pre_reentrancy(tsdn, arena);
- }
/* Try to decommit; purge if that fails. */
bool zeroed;
- if (!extent_committed_get(extent)) {
+ if (!edata_committed_get(edata)) {
zeroed = true;
- } else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent,
- 0, extent_size_get(extent))) {
+ } else if (!extent_decommit_wrapper(tsdn, ehooks, edata, 0,
+ edata_size_get(edata))) {
zeroed = true;
- } else if ((*r_extent_hooks)->purge_forced != NULL &&
- !(*r_extent_hooks)->purge_forced(*r_extent_hooks,
- extent_base_get(extent), extent_size_get(extent), 0,
- extent_size_get(extent), arena_ind_get(arena))) {
+ } else if (!ehooks_purge_forced(tsdn, ehooks, edata_base_get(edata),
+ edata_size_get(edata), 0, edata_size_get(edata))) {
zeroed = true;
- } else if (extent_state_get(extent) == extent_state_muzzy ||
- ((*r_extent_hooks)->purge_lazy != NULL &&
- !(*r_extent_hooks)->purge_lazy(*r_extent_hooks,
- extent_base_get(extent), extent_size_get(extent), 0,
- extent_size_get(extent), arena_ind_get(arena)))) {
+ } else if (edata_state_get(edata) == extent_state_muzzy ||
+ !ehooks_purge_lazy(tsdn, ehooks, edata_base_get(edata),
+ edata_size_get(edata), 0, edata_size_get(edata))) {
zeroed = false;
} else {
zeroed = false;
}
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_post_reentrancy(tsdn);
- }
- extent_zeroed_set(extent, zeroed);
+ edata_zeroed_set(edata, zeroed);
if (config_prof) {
- extent_gdump_sub(tsdn, extent);
+ extent_gdump_sub(tsdn, edata);
}
- extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained,
- extent, false);
-}
-
-static void
-extent_destroy_default_impl(void *addr, size_t size) {
- if (!have_dss || !extent_in_dss(addr)) {
- pages_unmap(addr, size);
- }
-}
-
-static void
-extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
- bool committed, unsigned arena_ind) {
- extent_destroy_default_impl(addr, size);
+ extent_record(tsdn, pac, ehooks, &pac->ecache_retained, edata);
}
void
-extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent) {
- assert(extent_base_get(extent) != NULL);
- assert(extent_size_get(extent) != 0);
+extent_destroy_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ edata_t *edata) {
+ assert(edata_base_get(edata) != NULL);
+ assert(edata_size_get(edata) != 0);
+ extent_state_t state = edata_state_get(edata);
+ assert(state == extent_state_retained || state == extent_state_active);
+ assert(emap_edata_is_acquired(tsdn, pac->emap, edata));
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
- /* Deregister first to avoid a race with other allocating threads. */
- extent_deregister(tsdn, extent);
-
- extent_addr_set(extent, extent_base_get(extent));
-
- extent_hooks_assure_initialized(arena, r_extent_hooks);
- /* Try to destroy; silently fail otherwise. */
- if (*r_extent_hooks == &extent_hooks_default) {
- /* Call directly to propagate tsdn. */
- extent_destroy_default_impl(extent_base_get(extent),
- extent_size_get(extent));
- } else if ((*r_extent_hooks)->destroy != NULL) {
- extent_hook_pre_reentrancy(tsdn, arena);
- (*r_extent_hooks)->destroy(*r_extent_hooks,
- extent_base_get(extent), extent_size_get(extent),
- extent_committed_get(extent), arena_ind_get(arena));
- extent_hook_post_reentrancy(tsdn);
+ if (edata_guarded_get(edata)) {
+ assert(opt_retain);
+ san_unguard_pages_pre_destroy(tsdn, ehooks, edata, pac->emap);
}
+ edata_addr_set(edata, edata_base_get(edata));
- extent_dalloc(tsdn, arena, extent);
-}
+ /* Try to destroy; silently fail otherwise. */
+ ehooks_destroy(tsdn, ehooks, edata_base_get(edata),
+ edata_size_get(edata), edata_committed_get(edata));
-static bool
-extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
- size_t offset, size_t length, unsigned arena_ind) {
- return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
- length);
+ edata_cache_put(tsdn, pac->edata_cache, edata);
}
static bool
-extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length, bool growing_retained) {
+extent_commit_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
+ size_t offset, size_t length, bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
-
- extent_hooks_assure_initialized(arena, r_extent_hooks);
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_pre_reentrancy(tsdn, arena);
- }
- bool err = ((*r_extent_hooks)->commit == NULL ||
- (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent),
- extent_size_get(extent), offset, length, arena_ind_get(arena)));
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_post_reentrancy(tsdn);
- }
- extent_committed_set(extent, extent_committed_get(extent) || !err);
+ bool err = ehooks_commit(tsdn, ehooks, edata_base_get(edata),
+ edata_size_get(edata), offset, length);
+ edata_committed_set(edata, edata_committed_get(edata) || !err);
return err;
}
bool
-extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length) {
- return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset,
- length, false);
-}
-
-static bool
-extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
- size_t offset, size_t length, unsigned arena_ind) {
- return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
- length);
+extent_commit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
+ size_t offset, size_t length) {
+ return extent_commit_impl(tsdn, ehooks, edata, offset, length,
+ /* growing_retained */ false);
}
bool
-extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length) {
+extent_decommit_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
+ size_t offset, size_t length) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, 0);
-
- extent_hooks_assure_initialized(arena, r_extent_hooks);
-
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_pre_reentrancy(tsdn, arena);
- }
- bool err = ((*r_extent_hooks)->decommit == NULL ||
- (*r_extent_hooks)->decommit(*r_extent_hooks,
- extent_base_get(extent), extent_size_get(extent), offset, length,
- arena_ind_get(arena)));
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_post_reentrancy(tsdn);
- }
- extent_committed_set(extent, extent_committed_get(extent) && err);
+ bool err = ehooks_decommit(tsdn, ehooks, edata_base_get(edata),
+ edata_size_get(edata), offset, length);
+ edata_committed_set(edata, edata_committed_get(edata) && err);
return err;
}
-#ifdef PAGES_CAN_PURGE_LAZY
-static bool
-extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
- size_t offset, size_t length, unsigned arena_ind) {
- assert(addr != NULL);
- assert((offset & PAGE_MASK) == 0);
- assert(length != 0);
- assert((length & PAGE_MASK) == 0);
-
- return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
- length);
-}
-#endif
-
static bool
-extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length, bool growing_retained) {
+extent_purge_lazy_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
+ size_t offset, size_t length, bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
-
- extent_hooks_assure_initialized(arena, r_extent_hooks);
-
- if ((*r_extent_hooks)->purge_lazy == NULL) {
- return true;
- }
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_pre_reentrancy(tsdn, arena);
- }
- bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks,
- extent_base_get(extent), extent_size_get(extent), offset, length,
- arena_ind_get(arena));
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_post_reentrancy(tsdn);
- }
-
+ bool err = ehooks_purge_lazy(tsdn, ehooks, edata_base_get(edata),
+ edata_size_get(edata), offset, length);
return err;
}
bool
-extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length) {
- return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent,
- offset, length, false);
-}
-
-#ifdef PAGES_CAN_PURGE_FORCED
-static bool
-extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr,
- size_t size, size_t offset, size_t length, unsigned arena_ind) {
- assert(addr != NULL);
- assert((offset & PAGE_MASK) == 0);
- assert(length != 0);
- assert((length & PAGE_MASK) == 0);
-
- return pages_purge_forced((void *)((uintptr_t)addr +
- (uintptr_t)offset), length);
+extent_purge_lazy_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
+ size_t offset, size_t length) {
+ return extent_purge_lazy_impl(tsdn, ehooks, edata, offset,
+ length, false);
}
-#endif
static bool
-extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length, bool growing_retained) {
+extent_purge_forced_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
+ size_t offset, size_t length, bool growing_retained) {
witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
WITNESS_RANK_CORE, growing_retained ? 1 : 0);
-
- extent_hooks_assure_initialized(arena, r_extent_hooks);
-
- if ((*r_extent_hooks)->purge_forced == NULL) {
- return true;
- }
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_pre_reentrancy(tsdn, arena);
- }
- bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks,
- extent_base_get(extent), extent_size_get(extent), offset, length,
- arena_ind_get(arena));
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_post_reentrancy(tsdn);
- }
+ bool err = ehooks_purge_forced(tsdn, ehooks, edata_base_get(edata),
+ edata_size_get(edata), offset, length);
return err;
}
bool
-extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
- size_t length) {
- return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent,
- offset, length, false);
-}
-
-static bool
-extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
- size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
- if (!maps_coalesce) {
- /*
- * Without retain, only whole regions can be purged (required by
- * MEM_RELEASE on Windows) -- therefore disallow splitting. See
- * comments in extent_head_no_merge().
- */
- return !opt_retain;
- }
-
- return false;
+extent_purge_forced_wrapper(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
+ size_t offset, size_t length) {
+ return extent_purge_forced_impl(tsdn, ehooks, edata, offset, length,
+ false);
}
/*
@@ -2088,183 +1170,95 @@ extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
* with the trail (the higher addressed portion). This makes 'extent' the lead,
* and returns the trail (except in case of error).
*/
-static extent_t *
-extent_split_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
- szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
- bool growing_retained) {
- assert(extent_size_get(extent) == size_a + size_b);
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, growing_retained ? 1 : 0);
-
- extent_hooks_assure_initialized(arena, r_extent_hooks);
+static edata_t *
+extent_split_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ edata_t *edata, size_t size_a, size_t size_b, bool holding_core_locks) {
+ assert(edata_size_get(edata) == size_a + size_b);
+ /* Only the shrink path may split w/o holding core locks. */
+ if (holding_core_locks) {
+ witness_assert_positive_depth_to_rank(
+ tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE);
+ } else {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+ }
- if ((*r_extent_hooks)->split == NULL) {
+ if (ehooks_split_will_fail(ehooks)) {
return NULL;
}
- extent_t *trail = extent_alloc(tsdn, arena);
+ edata_t *trail = edata_cache_get(tsdn, pac->edata_cache);
if (trail == NULL) {
goto label_error_a;
}
- extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
- size_a), size_b, slab_b, szind_b, extent_sn_get(extent),
- extent_state_get(extent), extent_zeroed_get(extent),
- extent_committed_get(extent), extent_dumpable_get(extent),
- EXTENT_NOT_HEAD);
-
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
- rtree_leaf_elm_t *lead_elm_a, *lead_elm_b;
- {
- extent_t lead;
-
- extent_init(&lead, arena, extent_addr_get(extent), size_a,
- slab_a, szind_a, extent_sn_get(extent),
- extent_state_get(extent), extent_zeroed_get(extent),
- extent_committed_get(extent), extent_dumpable_get(extent),
- EXTENT_NOT_HEAD);
-
- extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false,
- true, &lead_elm_a, &lead_elm_b);
- }
- rtree_leaf_elm_t *trail_elm_a, *trail_elm_b;
- extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true,
- &trail_elm_a, &trail_elm_b);
-
- if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL
- || trail_elm_b == NULL) {
+ edata_init(trail, edata_arena_ind_get(edata),
+ (void *)((uintptr_t)edata_base_get(edata) + size_a), size_b,
+ /* slab */ false, SC_NSIZES, edata_sn_get(edata),
+ edata_state_get(edata), edata_zeroed_get(edata),
+ edata_committed_get(edata), EXTENT_PAI_PAC, EXTENT_NOT_HEAD);
+ emap_prepare_t prepare;
+ bool err = emap_split_prepare(tsdn, pac->emap, &prepare, edata,
+ size_a, trail, size_b);
+ if (err) {
goto label_error_b;
}
- extent_lock2(tsdn, extent, trail);
+ /*
+ * No need to acquire trail or edata, because: 1) trail was new (just
+ * allocated); and 2) edata is either an active allocation (the shrink
+ * path), or in an acquired state (extracted from the ecache on the
+ * extent_recycle_split path).
+ */
+ assert(emap_edata_is_acquired(tsdn, pac->emap, edata));
+ assert(emap_edata_is_acquired(tsdn, pac->emap, trail));
+
+ err = ehooks_split(tsdn, ehooks, edata_base_get(edata), size_a + size_b,
+ size_a, size_b, edata_committed_get(edata));
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_pre_reentrancy(tsdn, arena);
- }
- bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent),
- size_a + size_b, size_a, size_b, extent_committed_get(extent),
- arena_ind_get(arena));
- if (*r_extent_hooks != &extent_hooks_default) {
- extent_hook_post_reentrancy(tsdn);
- }
if (err) {
- goto label_error_c;
+ goto label_error_b;
}
- extent_size_set(extent, size_a);
- extent_szind_set(extent, szind_a);
-
- extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent,
- szind_a, slab_a);
- extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail,
- szind_b, slab_b);
-
- extent_unlock2(tsdn, extent, trail);
+ edata_size_set(edata, size_a);
+ emap_split_commit(tsdn, pac->emap, &prepare, edata, size_a, trail,
+ size_b);
return trail;
-label_error_c:
- extent_unlock2(tsdn, extent, trail);
label_error_b:
- extent_dalloc(tsdn, arena, trail);
+ edata_cache_put(tsdn, pac->edata_cache, trail);
label_error_a:
return NULL;
}
-extent_t *
-extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
- szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) {
- return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a,
- szind_a, slab_a, size_b, szind_b, slab_b, false);
-}
-
-static bool
-extent_merge_default_impl(void *addr_a, void *addr_b) {
- if (!maps_coalesce && !opt_retain) {
- return true;
- }
- if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
- return true;
- }
-
- return false;
-}
-
-/*
- * Returns true if the given extents can't be merged because of their head bit
- * settings. Assumes the second extent has the higher address.
- */
-static bool
-extent_head_no_merge(extent_t *a, extent_t *b) {
- assert(extent_base_get(a) < extent_base_get(b));
- /*
- * When coalesce is not always allowed (Windows), only merge extents
- * from the same VirtualAlloc region under opt.retain (in which case
- * MEM_DECOMMIT is utilized for purging).
- */
- if (maps_coalesce) {
- return false;
- }
- if (!opt_retain) {
- return true;
- }
- /* If b is a head extent, disallow the cross-region merge. */
- if (extent_is_head_get(b)) {
- /*
- * Additionally, sn should not overflow with retain; sanity
- * check that different regions have unique sn.
- */
- assert(extent_sn_comp(a, b) != 0);
- return true;
- }
- assert(extent_sn_comp(a, b) == 0);
-
- return false;
+edata_t *
+extent_split_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *edata,
+ size_t size_a, size_t size_b, bool holding_core_locks) {
+ return extent_split_impl(tsdn, pac, ehooks, edata, size_a, size_b,
+ holding_core_locks);
}
static bool
-extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
- void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
- if (!maps_coalesce) {
- tsdn_t *tsdn = tsdn_fetch();
- extent_t *a = iealloc(tsdn, addr_a);
- extent_t *b = iealloc(tsdn, addr_b);
- if (extent_head_no_merge(a, b)) {
- return true;
- }
+extent_merge_impl(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, edata_t *a,
+ edata_t *b, bool holding_core_locks) {
+ /* Only the expanding path may merge w/o holding ecache locks. */
+ if (holding_core_locks) {
+ witness_assert_positive_depth_to_rank(
+ tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_CORE);
+ } else {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
}
- return extent_merge_default_impl(addr_a, addr_b);
-}
-static bool
-extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
- bool growing_retained) {
- witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
- WITNESS_RANK_CORE, growing_retained ? 1 : 0);
- assert(extent_base_get(a) < extent_base_get(b));
+ assert(edata_base_get(a) < edata_base_get(b));
+ assert(edata_arena_ind_get(a) == edata_arena_ind_get(b));
+ assert(edata_arena_ind_get(a) == ehooks_ind_get(ehooks));
+ emap_assert_mapped(tsdn, pac->emap, a);
+ emap_assert_mapped(tsdn, pac->emap, b);
- extent_hooks_assure_initialized(arena, r_extent_hooks);
-
- if ((*r_extent_hooks)->merge == NULL || extent_head_no_merge(a, b)) {
- return true;
- }
-
- bool err;
- if (*r_extent_hooks == &extent_hooks_default) {
- /* Call directly to propagate tsdn. */
- err = extent_merge_default_impl(extent_base_get(a),
- extent_base_get(b));
- } else {
- extent_hook_pre_reentrancy(tsdn, arena);
- err = (*r_extent_hooks)->merge(*r_extent_hooks,
- extent_base_get(a), extent_size_get(a), extent_base_get(b),
- extent_size_get(b), extent_committed_get(a),
- arena_ind_get(arena));
- extent_hook_post_reentrancy(tsdn);
- }
+ bool err = ehooks_merge(tsdn, ehooks, edata_base_get(a),
+ edata_size_get(a), edata_base_get(b), edata_size_get(b),
+ edata_committed_get(a));
if (err) {
return true;
@@ -2275,132 +1269,58 @@ extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
* owned, so the following code uses decomposed helper functions rather
* than extent_{,de}register() to do things in the right order.
*/
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
- rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
- extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a,
- &a_elm_b);
- extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a,
- &b_elm_b);
-
- extent_lock2(tsdn, a, b);
-
- if (a_elm_b != NULL) {
- rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL,
- SC_NSIZES, false);
- }
- if (b_elm_b != NULL) {
- rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL,
- SC_NSIZES, false);
- } else {
- b_elm_b = b_elm_a;
- }
-
- extent_size_set(a, extent_size_get(a) + extent_size_get(b));
- extent_szind_set(a, SC_NSIZES);
- extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ?
- extent_sn_get(a) : extent_sn_get(b));
- extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
+ emap_prepare_t prepare;
+ emap_merge_prepare(tsdn, pac->emap, &prepare, a, b);
- extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, SC_NSIZES,
- false);
+ assert(edata_state_get(a) == extent_state_active ||
+ edata_state_get(a) == extent_state_merging);
+ edata_state_set(a, extent_state_active);
+ edata_size_set(a, edata_size_get(a) + edata_size_get(b));
+ edata_sn_set(a, (edata_sn_get(a) < edata_sn_get(b)) ?
+ edata_sn_get(a) : edata_sn_get(b));
+ edata_zeroed_set(a, edata_zeroed_get(a) && edata_zeroed_get(b));
- extent_unlock2(tsdn, a, b);
+ emap_merge_commit(tsdn, pac->emap, &prepare, a, b);
- extent_dalloc(tsdn, extent_arena_get(b), b);
+ edata_cache_put(tsdn, pac->edata_cache, b);
return false;
}
bool
-extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
- extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) {
- return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false);
+extent_merge_wrapper(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks,
+ edata_t *a, edata_t *b) {
+ return extent_merge_impl(tsdn, pac, ehooks, a, b,
+ /* holding_core_locks */ false);
}
bool
-extent_boot(void) {
- if (rtree_new(&extents_rtree, true)) {
- return true;
- }
+extent_commit_zero(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
+ bool commit, bool zero, bool growing_retained) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, growing_retained ? 1 : 0);
- if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool",
- WITNESS_RANK_EXTENT_POOL)) {
- return true;
+ if (commit && !edata_committed_get(edata)) {
+ if (extent_commit_impl(tsdn, ehooks, edata, 0,
+ edata_size_get(edata), growing_retained)) {
+ return true;
+ }
}
-
- if (have_dss) {
- extent_dss_boot();
+ if (zero && !edata_zeroed_get(edata)) {
+ void *addr = edata_base_get(edata);
+ size_t size = edata_size_get(edata);
+ ehooks_zero(tsdn, ehooks, addr, size);
}
-
return false;
}
-void
-extent_util_stats_get(tsdn_t *tsdn, const void *ptr,
- size_t *nfree, size_t *nregs, size_t *size) {
- assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL);
-
- const extent_t *extent = iealloc(tsdn, ptr);
- if (unlikely(extent == NULL)) {
- *nfree = *nregs = *size = 0;
- return;
- }
-
- *size = extent_size_get(extent);
- if (!extent_slab_get(extent)) {
- *nfree = 0;
- *nregs = 1;
- } else {
- *nfree = extent_nfree_get(extent);
- *nregs = bin_infos[extent_szind_get(extent)].nregs;
- assert(*nfree <= *nregs);
- assert(*nfree * extent_usize_get(extent) <= *size);
- }
-}
-
-void
-extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
- size_t *nfree, size_t *nregs, size_t *size,
- size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr) {
- assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL
- && bin_nfree != NULL && bin_nregs != NULL && slabcur_addr != NULL);
-
- const extent_t *extent = iealloc(tsdn, ptr);
- if (unlikely(extent == NULL)) {
- *nfree = *nregs = *size = *bin_nfree = *bin_nregs = 0;
- *slabcur_addr = NULL;
- return;
- }
+bool
+extent_boot(void) {
+ assert(sizeof(slab_data_t) >= sizeof(e_prof_info_t));
- *size = extent_size_get(extent);
- if (!extent_slab_get(extent)) {
- *nfree = *bin_nfree = *bin_nregs = 0;
- *nregs = 1;
- *slabcur_addr = NULL;
- return;
+ if (have_dss) {
+ extent_dss_boot();
}
- *nfree = extent_nfree_get(extent);
- const szind_t szind = extent_szind_get(extent);
- *nregs = bin_infos[szind].nregs;
- assert(*nfree <= *nregs);
- assert(*nfree * extent_usize_get(extent) <= *size);
-
- const arena_t *arena = extent_arena_get(extent);
- assert(arena != NULL);
- const unsigned binshard = extent_binshard_get(extent);
- bin_t *bin = &arena->bins[szind].bin_shards[binshard];
-
- malloc_mutex_lock(tsdn, &bin->lock);
- if (config_stats) {
- *bin_nregs = *nregs * bin->stats.curslabs;
- assert(*bin_nregs >= bin->stats.curregs);
- *bin_nfree = *bin_nregs - bin->stats.curregs;
- } else {
- *bin_nfree = *bin_nregs = 0;
- }
- *slabcur_addr = extent_addr_get(bin->slabcur);
- assert(*slabcur_addr != NULL);
- malloc_mutex_unlock(tsdn, &bin->lock);
+ return false;
}
diff --git a/contrib/jemalloc/src/extent_dss.c b/contrib/jemalloc/src/extent_dss.c
index 858178911051..9a35bacfb4f8 100644
--- a/contrib/jemalloc/src/extent_dss.c
+++ b/contrib/jemalloc/src/extent_dss.c
@@ -1,4 +1,3 @@
-#define JEMALLOC_EXTENT_DSS_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
@@ -109,7 +108,7 @@ extent_dss_max_update(void *new_addr) {
void *
extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
size_t alignment, bool *zero, bool *commit) {
- extent_t *gap;
+ edata_t *gap;
cassert(have_dss);
assert(size > 0);
@@ -123,7 +122,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
return NULL;
}
- gap = extent_alloc(tsdn, arena);
+ gap = edata_cache_get(tsdn, &arena->pa_shard.edata_cache);
if (gap == NULL) {
return NULL;
}
@@ -141,6 +140,8 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
goto label_oom;
}
+ bool head_state = opt_retain ? EXTENT_IS_HEAD :
+ EXTENT_NOT_HEAD;
/*
* Compute how much page-aligned gap space (if any) is
* necessary to satisfy alignment. This space can be
@@ -153,11 +154,12 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
size_t gap_size_page = (uintptr_t)ret -
(uintptr_t)gap_addr_page;
if (gap_size_page != 0) {
- extent_init(gap, arena, gap_addr_page,
- gap_size_page, false, SC_NSIZES,
- arena_extent_sn_next(arena),
- extent_state_active, false, true, true,
- EXTENT_NOT_HEAD);
+ edata_init(gap, arena_ind_get(arena),
+ gap_addr_page, gap_size_page, false,
+ SC_NSIZES, extent_sn_next(
+ &arena->pa_shard.pac),
+ extent_state_active, false, true,
+ EXTENT_PAI_PAC, head_state);
}
/*
* Compute the address just past the end of the desired
@@ -186,25 +188,29 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
extent_dss_extending_finish();
if (gap_size_page != 0) {
- extent_dalloc_gap(tsdn, arena, gap);
+ ehooks_t *ehooks = arena_get_ehooks(
+ arena);
+ extent_dalloc_gap(tsdn,
+ &arena->pa_shard.pac, ehooks, gap);
} else {
- extent_dalloc(tsdn, arena, gap);
+ edata_cache_put(tsdn,
+ &arena->pa_shard.edata_cache, gap);
}
if (!*commit) {
*commit = pages_decommit(ret, size);
}
if (*zero && *commit) {
- extent_hooks_t *extent_hooks =
- EXTENT_HOOKS_INITIALIZER;
- extent_t extent;
+ edata_t edata = {0};
+ ehooks_t *ehooks = arena_get_ehooks(
+ arena);
- extent_init(&extent, arena, ret, size,
+ edata_init(&edata,
+ arena_ind_get(arena), ret, size,
size, false, SC_NSIZES,
extent_state_active, false, true,
- true, EXTENT_NOT_HEAD);
+ EXTENT_PAI_PAC, head_state);
if (extent_purge_forced_wrapper(tsdn,
- arena, &extent_hooks, &extent, 0,
- size)) {
+ ehooks, &edata, 0, size)) {
memset(ret, 0, size);
}
}
@@ -224,7 +230,7 @@ extent_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
}
label_oom:
extent_dss_extending_finish();
- extent_dalloc(tsdn, arena, gap);
+ edata_cache_put(tsdn, &arena->pa_shard.edata_cache, gap);
return NULL;
}
diff --git a/contrib/jemalloc/src/extent_mmap.c b/contrib/jemalloc/src/extent_mmap.c
index 17fd1c8f9577..5f0ee2d24b1b 100644
--- a/contrib/jemalloc/src/extent_mmap.c
+++ b/contrib/jemalloc/src/extent_mmap.c
@@ -1,4 +1,3 @@
-#define JEMALLOC_EXTENT_MMAP_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
diff --git a/contrib/jemalloc/src/fxp.c b/contrib/jemalloc/src/fxp.c
new file mode 100644
index 000000000000..96585f0a65ac
--- /dev/null
+++ b/contrib/jemalloc/src/fxp.c
@@ -0,0 +1,124 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/fxp.h"
+
+static bool
+fxp_isdigit(char c) {
+ return '0' <= c && c <= '9';
+}
+
+bool
+fxp_parse(fxp_t *result, const char *str, char **end) {
+ /*
+ * Using malloc_strtoumax in this method isn't as handy as you might
+ * expect (I tried). In the fractional part, significant leading zeros
+ * mean that you still need to do your own parsing, now with trickier
+ * math. In the integer part, the casting (uintmax_t to uint32_t)
+ * forces more reasoning about bounds than just checking for overflow as
+ * we parse.
+ */
+ uint32_t integer_part = 0;
+
+ const char *cur = str;
+
+ /* The string must start with a digit or a decimal point. */
+ if (*cur != '.' && !fxp_isdigit(*cur)) {
+ return true;
+ }
+
+ while ('0' <= *cur && *cur <= '9') {
+ integer_part *= 10;
+ integer_part += *cur - '0';
+ if (integer_part >= (1U << 16)) {
+ return true;
+ }
+ cur++;
+ }
+
+ /*
+ * We've parsed all digits at the beginning of the string, without
+ * overflow. Either we're done, or there's a fractional part.
+ */
+ if (*cur != '.') {
+ *result = (integer_part << 16);
+ if (end != NULL) {
+ *end = (char *)cur;
+ }
+ return false;
+ }
+
+ /* There's a fractional part. */
+ cur++;
+ if (!fxp_isdigit(*cur)) {
+ /* Shouldn't end on the decimal point. */
+ return true;
+ }
+
+ /*
+ * We use a lot of precision for the fractional part, even though we'll
+ * discard most of it; this lets us get exact values for the important
+ * special case where the denominator is a small power of 2 (for
+ * instance, 1/512 == 0.001953125 is exactly representable even with
+ * only 16 bits of fractional precision). We need to left-shift by 16
+ * before dividing so we pick the number of digits to be
+ * floor(log(2**48)) = 14.
+ */
+ uint64_t fractional_part = 0;
+ uint64_t frac_div = 1;
+ for (int i = 0; i < FXP_FRACTIONAL_PART_DIGITS; i++) {
+ fractional_part *= 10;
+ frac_div *= 10;
+ if (fxp_isdigit(*cur)) {
+ fractional_part += *cur - '0';
+ cur++;
+ }
+ }
+ /*
+ * We only parse the first maxdigits characters, but we can still ignore
+ * any digits after that.
+ */
+ while (fxp_isdigit(*cur)) {
+ cur++;
+ }
+
+ assert(fractional_part < frac_div);
+ uint32_t fractional_repr = (uint32_t)(
+ (fractional_part << 16) / frac_div);
+
+ /* Success! */
+ *result = (integer_part << 16) + fractional_repr;
+ if (end != NULL) {
+ *end = (char *)cur;
+ }
+ return false;
+}
+
+void
+fxp_print(fxp_t a, char buf[FXP_BUF_SIZE]) {
+ uint32_t integer_part = fxp_round_down(a);
+ uint32_t fractional_part = (a & ((1U << 16) - 1));
+
+ int leading_fraction_zeros = 0;
+ uint64_t fraction_digits = fractional_part;
+ for (int i = 0; i < FXP_FRACTIONAL_PART_DIGITS; i++) {
+ if (fraction_digits < (1U << 16)
+ && fraction_digits * 10 >= (1U << 16)) {
+ leading_fraction_zeros = i;
+ }
+ fraction_digits *= 10;
+ }
+ fraction_digits >>= 16;
+ while (fraction_digits > 0 && fraction_digits % 10 == 0) {
+ fraction_digits /= 10;
+ }
+
+ size_t printed = malloc_snprintf(buf, FXP_BUF_SIZE, "%"FMTu32".",
+ integer_part);
+ for (int i = 0; i < leading_fraction_zeros; i++) {
+ buf[printed] = '0';
+ printed++;
+ }
+ malloc_snprintf(&buf[printed], FXP_BUF_SIZE - printed, "%"FMTu64,
+ fraction_digits);
+}
diff --git a/contrib/jemalloc/src/hash.c b/contrib/jemalloc/src/hash.c
deleted file mode 100644
index 7b2bdc2bd6f4..000000000000
--- a/contrib/jemalloc/src/hash.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#define JEMALLOC_HASH_C_
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
diff --git a/contrib/jemalloc/src/hook.c b/contrib/jemalloc/src/hook.c
index 9ac703cf9f51..493edbbe512d 100644
--- a/contrib/jemalloc/src/hook.c
+++ b/contrib/jemalloc/src/hook.c
@@ -130,9 +130,9 @@ hook_reentrantp() {
*/
static bool in_hook_global = true;
tsdn_t *tsdn = tsdn_fetch();
- tcache_t *tcache = tsdn_tcachep_get(tsdn);
- if (tcache != NULL) {
- return &tcache->in_hook;
+ bool *in_hook = tsdn_in_hookp_get(tsdn);
+ if (in_hook!= NULL) {
+ return in_hook;
}
return &in_hook_global;
}
diff --git a/contrib/jemalloc/src/hpa.c b/contrib/jemalloc/src/hpa.c
new file mode 100644
index 000000000000..7e2aeba0c0ff
--- /dev/null
+++ b/contrib/jemalloc/src/hpa.c
@@ -0,0 +1,1044 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/hpa.h"
+
+#include "jemalloc/internal/fb.h"
+#include "jemalloc/internal/witness.h"
+
+#define HPA_EDEN_SIZE (128 * HUGEPAGE)
+
+static edata_t *hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
+ size_t alignment, bool zero, bool guarded, bool frequent_reuse,
+ bool *deferred_work_generated);
+static size_t hpa_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size,
+ size_t nallocs, edata_list_active_t *results, bool *deferred_work_generated);
+static bool hpa_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated);
+static bool hpa_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ size_t old_size, size_t new_size, bool *deferred_work_generated);
+static void hpa_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ bool *deferred_work_generated);
+static void hpa_dalloc_batch(tsdn_t *tsdn, pai_t *self,
+ edata_list_active_t *list, bool *deferred_work_generated);
+static uint64_t hpa_time_until_deferred_work(tsdn_t *tsdn, pai_t *self);
+
+bool
+hpa_supported() {
+#ifdef _WIN32
+ /*
+ * At least until the API and implementation is somewhat settled, we
+ * don't want to try to debug the VM subsystem on the hardest-to-test
+ * platform.
+ */
+ return false;
+#endif
+ if (!pages_can_hugify) {
+ return false;
+ }
+ /*
+ * We fundamentally rely on a address-space-hungry growth strategy for
+ * hugepages.
+ */
+ if (LG_SIZEOF_PTR != 3) {
+ return false;
+ }
+ /*
+ * If we couldn't detect the value of HUGEPAGE, HUGEPAGE_PAGES becomes
+ * this sentinel value -- see the comment in pages.h.
+ */
+ if (HUGEPAGE_PAGES == 1) {
+ return false;
+ }
+ return true;
+}
+
+static void
+hpa_do_consistency_checks(hpa_shard_t *shard) {
+ assert(shard->base != NULL);
+}
+
+bool
+hpa_central_init(hpa_central_t *central, base_t *base, const hpa_hooks_t *hooks) {
+ /* malloc_conf processing should have filtered out these cases. */
+ assert(hpa_supported());
+ bool err;
+ err = malloc_mutex_init(&central->grow_mtx, "hpa_central_grow",
+ WITNESS_RANK_HPA_CENTRAL_GROW, malloc_mutex_rank_exclusive);
+ if (err) {
+ return true;
+ }
+ err = malloc_mutex_init(&central->mtx, "hpa_central",
+ WITNESS_RANK_HPA_CENTRAL, malloc_mutex_rank_exclusive);
+ if (err) {
+ return true;
+ }
+ central->base = base;
+ central->eden = NULL;
+ central->eden_len = 0;
+ central->age_counter = 0;
+ central->hooks = *hooks;
+ return false;
+}
+
+static hpdata_t *
+hpa_alloc_ps(tsdn_t *tsdn, hpa_central_t *central) {
+ return (hpdata_t *)base_alloc(tsdn, central->base, sizeof(hpdata_t),
+ CACHELINE);
+}
+
+hpdata_t *
+hpa_central_extract(tsdn_t *tsdn, hpa_central_t *central, size_t size,
+ bool *oom) {
+ /* Don't yet support big allocations; these should get filtered out. */
+ assert(size <= HUGEPAGE);
+ /*
+ * Should only try to extract from the central allocator if the local
+ * shard is exhausted. We should hold the grow_mtx on that shard.
+ */
+ witness_assert_positive_depth_to_rank(
+ tsdn_witness_tsdp_get(tsdn), WITNESS_RANK_HPA_SHARD_GROW);
+
+ malloc_mutex_lock(tsdn, &central->grow_mtx);
+ *oom = false;
+
+ hpdata_t *ps = NULL;
+
+ /* Is eden a perfect fit? */
+ if (central->eden != NULL && central->eden_len == HUGEPAGE) {
+ ps = hpa_alloc_ps(tsdn, central);
+ if (ps == NULL) {
+ *oom = true;
+ malloc_mutex_unlock(tsdn, &central->grow_mtx);
+ return NULL;
+ }
+ hpdata_init(ps, central->eden, central->age_counter++);
+ central->eden = NULL;
+ central->eden_len = 0;
+ malloc_mutex_unlock(tsdn, &central->grow_mtx);
+ return ps;
+ }
+
+ /*
+ * We're about to try to allocate from eden by splitting. If eden is
+ * NULL, we have to allocate it too. Otherwise, we just have to
+ * allocate an edata_t for the new psset.
+ */
+ if (central->eden == NULL) {
+ /*
+ * During development, we're primarily concerned with systems
+ * with overcommit. Eventually, we should be more careful here.
+ */
+ bool commit = true;
+ /* Allocate address space, bailing if we fail. */
+ void *new_eden = pages_map(NULL, HPA_EDEN_SIZE, HUGEPAGE,
+ &commit);
+ if (new_eden == NULL) {
+ *oom = true;
+ malloc_mutex_unlock(tsdn, &central->grow_mtx);
+ return NULL;
+ }
+ ps = hpa_alloc_ps(tsdn, central);
+ if (ps == NULL) {
+ pages_unmap(new_eden, HPA_EDEN_SIZE);
+ *oom = true;
+ malloc_mutex_unlock(tsdn, &central->grow_mtx);
+ return NULL;
+ }
+ central->eden = new_eden;
+ central->eden_len = HPA_EDEN_SIZE;
+ } else {
+ /* Eden is already nonempty; only need an edata for ps. */
+ ps = hpa_alloc_ps(tsdn, central);
+ if (ps == NULL) {
+ *oom = true;
+ malloc_mutex_unlock(tsdn, &central->grow_mtx);
+ return NULL;
+ }
+ }
+ assert(ps != NULL);
+ assert(central->eden != NULL);
+ assert(central->eden_len > HUGEPAGE);
+ assert(central->eden_len % HUGEPAGE == 0);
+ assert(HUGEPAGE_ADDR2BASE(central->eden) == central->eden);
+
+ hpdata_init(ps, central->eden, central->age_counter++);
+
+ char *eden_char = (char *)central->eden;
+ eden_char += HUGEPAGE;
+ central->eden = (void *)eden_char;
+ central->eden_len -= HUGEPAGE;
+
+ malloc_mutex_unlock(tsdn, &central->grow_mtx);
+
+ return ps;
+}
+
+bool
+hpa_shard_init(hpa_shard_t *shard, hpa_central_t *central, emap_t *emap,
+ base_t *base, edata_cache_t *edata_cache, unsigned ind,
+ const hpa_shard_opts_t *opts) {
+ /* malloc_conf processing should have filtered out these cases. */
+ assert(hpa_supported());
+ bool err;
+ err = malloc_mutex_init(&shard->grow_mtx, "hpa_shard_grow",
+ WITNESS_RANK_HPA_SHARD_GROW, malloc_mutex_rank_exclusive);
+ if (err) {
+ return true;
+ }
+ err = malloc_mutex_init(&shard->mtx, "hpa_shard",
+ WITNESS_RANK_HPA_SHARD, malloc_mutex_rank_exclusive);
+ if (err) {
+ return true;
+ }
+
+ assert(edata_cache != NULL);
+ shard->central = central;
+ shard->base = base;
+ edata_cache_fast_init(&shard->ecf, edata_cache);
+ psset_init(&shard->psset);
+ shard->age_counter = 0;
+ shard->ind = ind;
+ shard->emap = emap;
+
+ shard->opts = *opts;
+
+ shard->npending_purge = 0;
+ nstime_init_zero(&shard->last_purge);
+
+ shard->stats.npurge_passes = 0;
+ shard->stats.npurges = 0;
+ shard->stats.nhugifies = 0;
+ shard->stats.ndehugifies = 0;
+
+ /*
+ * Fill these in last, so that if an hpa_shard gets used despite
+ * initialization failing, we'll at least crash instead of just
+ * operating on corrupted data.
+ */
+ shard->pai.alloc = &hpa_alloc;
+ shard->pai.alloc_batch = &hpa_alloc_batch;
+ shard->pai.expand = &hpa_expand;
+ shard->pai.shrink = &hpa_shrink;
+ shard->pai.dalloc = &hpa_dalloc;
+ shard->pai.dalloc_batch = &hpa_dalloc_batch;
+ shard->pai.time_until_deferred_work = &hpa_time_until_deferred_work;
+
+ hpa_do_consistency_checks(shard);
+
+ return false;
+}
+
+/*
+ * Note that the stats functions here follow the usual stats naming conventions;
+ * "merge" obtains the stats from some live object of instance, while "accum"
+ * only combines the stats from one stats objet to another. Hence the lack of
+ * locking here.
+ */
+static void
+hpa_shard_nonderived_stats_accum(hpa_shard_nonderived_stats_t *dst,
+ hpa_shard_nonderived_stats_t *src) {
+ dst->npurge_passes += src->npurge_passes;
+ dst->npurges += src->npurges;
+ dst->nhugifies += src->nhugifies;
+ dst->ndehugifies += src->ndehugifies;
+}
+
+void
+hpa_shard_stats_accum(hpa_shard_stats_t *dst, hpa_shard_stats_t *src) {
+ psset_stats_accum(&dst->psset_stats, &src->psset_stats);
+ hpa_shard_nonderived_stats_accum(&dst->nonderived_stats,
+ &src->nonderived_stats);
+}
+
+void
+hpa_shard_stats_merge(tsdn_t *tsdn, hpa_shard_t *shard,
+ hpa_shard_stats_t *dst) {
+ hpa_do_consistency_checks(shard);
+
+ malloc_mutex_lock(tsdn, &shard->grow_mtx);
+ malloc_mutex_lock(tsdn, &shard->mtx);
+ psset_stats_accum(&dst->psset_stats, &shard->psset.stats);
+ hpa_shard_nonderived_stats_accum(&dst->nonderived_stats, &shard->stats);
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+ malloc_mutex_unlock(tsdn, &shard->grow_mtx);
+}
+
+static bool
+hpa_good_hugification_candidate(hpa_shard_t *shard, hpdata_t *ps) {
+ /*
+ * Note that this needs to be >= rather than just >, because of the
+ * important special case in which the hugification threshold is exactly
+ * HUGEPAGE.
+ */
+ return hpdata_nactive_get(ps) * PAGE
+ >= shard->opts.hugification_threshold;
+}
+
+static size_t
+hpa_adjusted_ndirty(tsdn_t *tsdn, hpa_shard_t *shard) {
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+ return psset_ndirty(&shard->psset) - shard->npending_purge;
+}
+
+static size_t
+hpa_ndirty_max(tsdn_t *tsdn, hpa_shard_t *shard) {
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+ if (shard->opts.dirty_mult == (fxp_t)-1) {
+ return (size_t)-1;
+ }
+ return fxp_mul_frac(psset_nactive(&shard->psset),
+ shard->opts.dirty_mult);
+}
+
+static bool
+hpa_hugify_blocked_by_ndirty(tsdn_t *tsdn, hpa_shard_t *shard) {
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+ hpdata_t *to_hugify = psset_pick_hugify(&shard->psset);
+ if (to_hugify == NULL) {
+ return false;
+ }
+ return hpa_adjusted_ndirty(tsdn, shard)
+ + hpdata_nretained_get(to_hugify) > hpa_ndirty_max(tsdn, shard);
+}
+
+static bool
+hpa_should_purge(tsdn_t *tsdn, hpa_shard_t *shard) {
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+ if (hpa_adjusted_ndirty(tsdn, shard) > hpa_ndirty_max(tsdn, shard)) {
+ return true;
+ }
+ if (hpa_hugify_blocked_by_ndirty(tsdn, shard)) {
+ return true;
+ }
+ return false;
+}
+
+static void
+hpa_update_purge_hugify_eligibility(tsdn_t *tsdn, hpa_shard_t *shard,
+ hpdata_t *ps) {
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+ if (hpdata_changing_state_get(ps)) {
+ hpdata_purge_allowed_set(ps, false);
+ hpdata_disallow_hugify(ps);
+ return;
+ }
+ /*
+ * Hugepages are distinctly costly to purge, so try to avoid it unless
+ * they're *particularly* full of dirty pages. Eventually, we should
+ * use a smarter / more dynamic heuristic for situations where we have
+ * to manually hugify.
+ *
+ * In situations where we don't manually hugify, this problem is
+ * reduced. The "bad" situation we're trying to avoid is one's that's
+ * common in some Linux configurations (where both enabled and defrag
+ * are set to madvise) that can lead to long latency spikes on the first
+ * access after a hugification. The ideal policy in such configurations
+ * is probably time-based for both purging and hugifying; only hugify a
+ * hugepage if it's met the criteria for some extended period of time,
+ * and only dehugify it if it's failed to meet the criteria for an
+ * extended period of time. When background threads are on, we should
+ * try to take this hit on one of them, as well.
+ *
+ * I think the ideal setting is THP always enabled, and defrag set to
+ * deferred; in that case we don't need any explicit calls on the
+ * allocator's end at all; we just try to pack allocations in a
+ * hugepage-friendly manner and let the OS hugify in the background.
+ */
+ hpdata_purge_allowed_set(ps, hpdata_ndirty_get(ps) > 0);
+ if (hpa_good_hugification_candidate(shard, ps)
+ && !hpdata_huge_get(ps)) {
+ nstime_t now;
+ shard->central->hooks.curtime(&now, /* first_reading */ true);
+ hpdata_allow_hugify(ps, now);
+ }
+ /*
+ * Once a hugepage has become eligible for hugification, we don't mark
+ * it as ineligible just because it stops meeting the criteria (this
+ * could lead to situations where a hugepage that spends most of its
+ * time meeting the criteria never quite getting hugified if there are
+ * intervening deallocations). The idea is that the hugification delay
+ * will allow them to get purged, reseting their "hugify-allowed" bit.
+ * If they don't get purged, then the hugification isn't hurting and
+ * might help. As an exception, we don't hugify hugepages that are now
+ * empty; it definitely doesn't help there until the hugepage gets
+ * reused, which is likely not for a while.
+ */
+ if (hpdata_nactive_get(ps) == 0) {
+ hpdata_disallow_hugify(ps);
+ }
+}
+
+static bool
+hpa_shard_has_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard) {
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+ hpdata_t *to_hugify = psset_pick_hugify(&shard->psset);
+ return to_hugify != NULL || hpa_should_purge(tsdn, shard);
+}
+
+/* Returns whether or not we purged anything. */
+static bool
+hpa_try_purge(tsdn_t *tsdn, hpa_shard_t *shard) {
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+
+ hpdata_t *to_purge = psset_pick_purge(&shard->psset);
+ if (to_purge == NULL) {
+ return false;
+ }
+ assert(hpdata_purge_allowed_get(to_purge));
+ assert(!hpdata_changing_state_get(to_purge));
+
+ /*
+ * Don't let anyone else purge or hugify this page while
+ * we're purging it (allocations and deallocations are
+ * OK).
+ */
+ psset_update_begin(&shard->psset, to_purge);
+ assert(hpdata_alloc_allowed_get(to_purge));
+ hpdata_mid_purge_set(to_purge, true);
+ hpdata_purge_allowed_set(to_purge, false);
+ hpdata_disallow_hugify(to_purge);
+ /*
+ * Unlike with hugification (where concurrent
+ * allocations are allowed), concurrent allocation out
+ * of a hugepage being purged is unsafe; we might hand
+ * out an extent for an allocation and then purge it
+ * (clearing out user data).
+ */
+ hpdata_alloc_allowed_set(to_purge, false);
+ psset_update_end(&shard->psset, to_purge);
+
+ /* Gather all the metadata we'll need during the purge. */
+ bool dehugify = hpdata_huge_get(to_purge);
+ hpdata_purge_state_t purge_state;
+ size_t num_to_purge = hpdata_purge_begin(to_purge, &purge_state);
+
+ shard->npending_purge += num_to_purge;
+
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+
+ /* Actually do the purging, now that the lock is dropped. */
+ if (dehugify) {
+ shard->central->hooks.dehugify(hpdata_addr_get(to_purge),
+ HUGEPAGE);
+ }
+ size_t total_purged = 0;
+ uint64_t purges_this_pass = 0;
+ void *purge_addr;
+ size_t purge_size;
+ while (hpdata_purge_next(to_purge, &purge_state, &purge_addr,
+ &purge_size)) {
+ total_purged += purge_size;
+ assert(total_purged <= HUGEPAGE);
+ purges_this_pass++;
+ shard->central->hooks.purge(purge_addr, purge_size);
+ }
+
+ malloc_mutex_lock(tsdn, &shard->mtx);
+ /* The shard updates */
+ shard->npending_purge -= num_to_purge;
+ shard->stats.npurge_passes++;
+ shard->stats.npurges += purges_this_pass;
+ shard->central->hooks.curtime(&shard->last_purge,
+ /* first_reading */ false);
+ if (dehugify) {
+ shard->stats.ndehugifies++;
+ }
+
+ /* The hpdata updates. */
+ psset_update_begin(&shard->psset, to_purge);
+ if (dehugify) {
+ hpdata_dehugify(to_purge);
+ }
+ hpdata_purge_end(to_purge, &purge_state);
+ hpdata_mid_purge_set(to_purge, false);
+
+ hpdata_alloc_allowed_set(to_purge, true);
+ hpa_update_purge_hugify_eligibility(tsdn, shard, to_purge);
+
+ psset_update_end(&shard->psset, to_purge);
+
+ return true;
+}
+
+/* Returns whether or not we hugified anything. */
+static bool
+hpa_try_hugify(tsdn_t *tsdn, hpa_shard_t *shard) {
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+
+ if (hpa_hugify_blocked_by_ndirty(tsdn, shard)) {
+ return false;
+ }
+
+ hpdata_t *to_hugify = psset_pick_hugify(&shard->psset);
+ if (to_hugify == NULL) {
+ return false;
+ }
+ assert(hpdata_hugify_allowed_get(to_hugify));
+ assert(!hpdata_changing_state_get(to_hugify));
+
+ /* Make sure that it's been hugifiable for long enough. */
+ nstime_t time_hugify_allowed = hpdata_time_hugify_allowed(to_hugify);
+ uint64_t millis = shard->central->hooks.ms_since(&time_hugify_allowed);
+ if (millis < shard->opts.hugify_delay_ms) {
+ return false;
+ }
+
+ /*
+ * Don't let anyone else purge or hugify this page while
+ * we're hugifying it (allocations and deallocations are
+ * OK).
+ */
+ psset_update_begin(&shard->psset, to_hugify);
+ hpdata_mid_hugify_set(to_hugify, true);
+ hpdata_purge_allowed_set(to_hugify, false);
+ hpdata_disallow_hugify(to_hugify);
+ assert(hpdata_alloc_allowed_get(to_hugify));
+ psset_update_end(&shard->psset, to_hugify);
+
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+
+ shard->central->hooks.hugify(hpdata_addr_get(to_hugify), HUGEPAGE);
+
+ malloc_mutex_lock(tsdn, &shard->mtx);
+ shard->stats.nhugifies++;
+
+ psset_update_begin(&shard->psset, to_hugify);
+ hpdata_hugify(to_hugify);
+ hpdata_mid_hugify_set(to_hugify, false);
+ hpa_update_purge_hugify_eligibility(tsdn, shard, to_hugify);
+ psset_update_end(&shard->psset, to_hugify);
+
+ return true;
+}
+
+/*
+ * Execution of deferred work is forced if it's triggered by an explicit
+ * hpa_shard_do_deferred_work() call.
+ */
+static void
+hpa_shard_maybe_do_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard,
+ bool forced) {
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+ if (!forced && shard->opts.deferral_allowed) {
+ return;
+ }
+ /*
+ * If we're on a background thread, do work so long as there's work to
+ * be done. Otherwise, bound latency to not be *too* bad by doing at
+ * most a small fixed number of operations.
+ */
+ bool hugified = false;
+ bool purged = false;
+ size_t max_ops = (forced ? (size_t)-1 : 16);
+ size_t nops = 0;
+ do {
+ /*
+ * Always purge before hugifying, to make sure we get some
+ * ability to hit our quiescence targets.
+ */
+ purged = false;
+ while (hpa_should_purge(tsdn, shard) && nops < max_ops) {
+ purged = hpa_try_purge(tsdn, shard);
+ if (purged) {
+ nops++;
+ }
+ }
+ hugified = hpa_try_hugify(tsdn, shard);
+ if (hugified) {
+ nops++;
+ }
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+ } while ((hugified || purged) && nops < max_ops);
+}
+
+static edata_t *
+hpa_try_alloc_one_no_grow(tsdn_t *tsdn, hpa_shard_t *shard, size_t size,
+ bool *oom) {
+ bool err;
+ edata_t *edata = edata_cache_fast_get(tsdn, &shard->ecf);
+ if (edata == NULL) {
+ *oom = true;
+ return NULL;
+ }
+
+ hpdata_t *ps = psset_pick_alloc(&shard->psset, size);
+ if (ps == NULL) {
+ edata_cache_fast_put(tsdn, &shard->ecf, edata);
+ return NULL;
+ }
+
+ psset_update_begin(&shard->psset, ps);
+
+ if (hpdata_empty(ps)) {
+ /*
+ * If the pageslab used to be empty, treat it as though it's
+ * brand new for fragmentation-avoidance purposes; what we're
+ * trying to approximate is the age of the allocations *in* that
+ * pageslab, and the allocations in the new pageslab are
+ * definitionally the youngest in this hpa shard.
+ */
+ hpdata_age_set(ps, shard->age_counter++);
+ }
+
+ void *addr = hpdata_reserve_alloc(ps, size);
+ edata_init(edata, shard->ind, addr, size, /* slab */ false,
+ SC_NSIZES, /* sn */ hpdata_age_get(ps), extent_state_active,
+ /* zeroed */ false, /* committed */ true, EXTENT_PAI_HPA,
+ EXTENT_NOT_HEAD);
+ edata_ps_set(edata, ps);
+
+ /*
+ * This could theoretically be moved outside of the critical section,
+ * but that introduces the potential for a race. Without the lock, the
+ * (initially nonempty, since this is the reuse pathway) pageslab we
+ * allocated out of could become otherwise empty while the lock is
+ * dropped. This would force us to deal with a pageslab eviction down
+ * the error pathway, which is a pain.
+ */
+ err = emap_register_boundary(tsdn, shard->emap, edata,
+ SC_NSIZES, /* slab */ false);
+ if (err) {
+ hpdata_unreserve(ps, edata_addr_get(edata),
+ edata_size_get(edata));
+ /*
+ * We should arguably reset dirty state here, but this would
+ * require some sort of prepare + commit functionality that's a
+ * little much to deal with for now.
+ *
+ * We don't have a do_deferred_work down this pathway, on the
+ * principle that we didn't *really* affect shard state (we
+ * tweaked the stats, but our tweaks weren't really accurate).
+ */
+ psset_update_end(&shard->psset, ps);
+ edata_cache_fast_put(tsdn, &shard->ecf, edata);
+ *oom = true;
+ return NULL;
+ }
+
+ hpa_update_purge_hugify_eligibility(tsdn, shard, ps);
+ psset_update_end(&shard->psset, ps);
+ return edata;
+}
+
+static size_t
+hpa_try_alloc_batch_no_grow(tsdn_t *tsdn, hpa_shard_t *shard, size_t size,
+ bool *oom, size_t nallocs, edata_list_active_t *results,
+ bool *deferred_work_generated) {
+ malloc_mutex_lock(tsdn, &shard->mtx);
+ size_t nsuccess = 0;
+ for (; nsuccess < nallocs; nsuccess++) {
+ edata_t *edata = hpa_try_alloc_one_no_grow(tsdn, shard, size,
+ oom);
+ if (edata == NULL) {
+ break;
+ }
+ edata_list_active_append(results, edata);
+ }
+
+ hpa_shard_maybe_do_deferred_work(tsdn, shard, /* forced */ false);
+ *deferred_work_generated = hpa_shard_has_deferred_work(tsdn, shard);
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+ return nsuccess;
+}
+
+static size_t
+hpa_alloc_batch_psset(tsdn_t *tsdn, hpa_shard_t *shard, size_t size,
+ size_t nallocs, edata_list_active_t *results,
+ bool *deferred_work_generated) {
+ assert(size <= shard->opts.slab_max_alloc);
+ bool oom = false;
+
+ size_t nsuccess = hpa_try_alloc_batch_no_grow(tsdn, shard, size, &oom,
+ nallocs, results, deferred_work_generated);
+
+ if (nsuccess == nallocs || oom) {
+ return nsuccess;
+ }
+
+ /*
+ * We didn't OOM, but weren't able to fill everything requested of us;
+ * try to grow.
+ */
+ malloc_mutex_lock(tsdn, &shard->grow_mtx);
+ /*
+ * Check for grow races; maybe some earlier thread expanded the psset
+ * in between when we dropped the main mutex and grabbed the grow mutex.
+ */
+ nsuccess += hpa_try_alloc_batch_no_grow(tsdn, shard, size, &oom,
+ nallocs - nsuccess, results, deferred_work_generated);
+ if (nsuccess == nallocs || oom) {
+ malloc_mutex_unlock(tsdn, &shard->grow_mtx);
+ return nsuccess;
+ }
+
+ /*
+ * Note that we don't hold shard->mtx here (while growing);
+ * deallocations (and allocations of smaller sizes) may still succeed
+ * while we're doing this potentially expensive system call.
+ */
+ hpdata_t *ps = hpa_central_extract(tsdn, shard->central, size, &oom);
+ if (ps == NULL) {
+ malloc_mutex_unlock(tsdn, &shard->grow_mtx);
+ return nsuccess;
+ }
+
+ /*
+ * We got the pageslab; allocate from it. This does an unlock followed
+ * by a lock on the same mutex, and holds the grow mutex while doing
+ * deferred work, but this is an uncommon path; the simplicity is worth
+ * it.
+ */
+ malloc_mutex_lock(tsdn, &shard->mtx);
+ psset_insert(&shard->psset, ps);
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+
+ nsuccess += hpa_try_alloc_batch_no_grow(tsdn, shard, size, &oom,
+ nallocs - nsuccess, results, deferred_work_generated);
+ /*
+ * Drop grow_mtx before doing deferred work; other threads blocked on it
+ * should be allowed to proceed while we're working.
+ */
+ malloc_mutex_unlock(tsdn, &shard->grow_mtx);
+
+ return nsuccess;
+}
+
+static hpa_shard_t *
+hpa_from_pai(pai_t *self) {
+ assert(self->alloc = &hpa_alloc);
+ assert(self->expand = &hpa_expand);
+ assert(self->shrink = &hpa_shrink);
+ assert(self->dalloc = &hpa_dalloc);
+ return (hpa_shard_t *)self;
+}
+
+static size_t
+hpa_alloc_batch(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs,
+ edata_list_active_t *results, bool *deferred_work_generated) {
+ assert(nallocs > 0);
+ assert((size & PAGE_MASK) == 0);
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+ hpa_shard_t *shard = hpa_from_pai(self);
+
+ if (size > shard->opts.slab_max_alloc) {
+ return 0;
+ }
+
+ size_t nsuccess = hpa_alloc_batch_psset(tsdn, shard, size, nallocs,
+ results, deferred_work_generated);
+
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ /*
+ * Guard the sanity checks with config_debug because the loop cannot be
+ * proven non-circular by the compiler, even if everything within the
+ * loop is optimized away.
+ */
+ if (config_debug) {
+ edata_t *edata;
+ ql_foreach(edata, &results->head, ql_link_active) {
+ emap_assert_mapped(tsdn, shard->emap, edata);
+ assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
+ assert(edata_state_get(edata) == extent_state_active);
+ assert(edata_arena_ind_get(edata) == shard->ind);
+ assert(edata_szind_get_maybe_invalid(edata) ==
+ SC_NSIZES);
+ assert(!edata_slab_get(edata));
+ assert(edata_committed_get(edata));
+ assert(edata_base_get(edata) == edata_addr_get(edata));
+ assert(edata_base_get(edata) != NULL);
+ }
+ }
+ return nsuccess;
+}
+
+static edata_t *
+hpa_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
+ bool guarded, bool frequent_reuse, bool *deferred_work_generated) {
+ assert((size & PAGE_MASK) == 0);
+ assert(!guarded);
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+
+ /* We don't handle alignment or zeroing for now. */
+ if (alignment > PAGE || zero) {
+ return NULL;
+ }
+ /*
+ * An alloc with alignment == PAGE and zero == false is equivalent to a
+ * batch alloc of 1. Just do that, so we can share code.
+ */
+ edata_list_active_t results;
+ edata_list_active_init(&results);
+ size_t nallocs = hpa_alloc_batch(tsdn, self, size, /* nallocs */ 1,
+ &results, deferred_work_generated);
+ assert(nallocs == 0 || nallocs == 1);
+ edata_t *edata = edata_list_active_first(&results);
+ return edata;
+}
+
+static bool
+hpa_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
+ size_t new_size, bool zero, bool *deferred_work_generated) {
+ /* Expand not yet supported. */
+ return true;
+}
+
+static bool
+hpa_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ size_t old_size, size_t new_size, bool *deferred_work_generated) {
+ /* Shrink not yet supported. */
+ return true;
+}
+
+static void
+hpa_dalloc_prepare_unlocked(tsdn_t *tsdn, hpa_shard_t *shard, edata_t *edata) {
+ malloc_mutex_assert_not_owner(tsdn, &shard->mtx);
+
+ assert(edata_pai_get(edata) == EXTENT_PAI_HPA);
+ assert(edata_state_get(edata) == extent_state_active);
+ assert(edata_arena_ind_get(edata) == shard->ind);
+ assert(edata_szind_get_maybe_invalid(edata) == SC_NSIZES);
+ assert(edata_committed_get(edata));
+ assert(edata_base_get(edata) != NULL);
+
+ /*
+ * Another thread shouldn't be trying to touch the metadata of an
+ * allocation being freed. The one exception is a merge attempt from a
+ * lower-addressed PAC extent; in this case we have a nominal race on
+ * the edata metadata bits, but in practice the fact that the PAI bits
+ * are different will prevent any further access. The race is bad, but
+ * benign in practice, and the long term plan is to track enough state
+ * in the rtree to prevent these merge attempts in the first place.
+ */
+ edata_addr_set(edata, edata_base_get(edata));
+ edata_zeroed_set(edata, false);
+ emap_deregister_boundary(tsdn, shard->emap, edata);
+}
+
+static void
+hpa_dalloc_locked(tsdn_t *tsdn, hpa_shard_t *shard, edata_t *edata) {
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+
+ /*
+ * Release the metadata early, to avoid having to remember to do it
+ * while we're also doing tricky purging logic. First, we need to grab
+ * a few bits of metadata from it.
+ *
+ * Note that the shard mutex protects ps's metadata too; it wouldn't be
+ * correct to try to read most information out of it without the lock.
+ */
+ hpdata_t *ps = edata_ps_get(edata);
+ /* Currently, all edatas come from pageslabs. */
+ assert(ps != NULL);
+ void *unreserve_addr = edata_addr_get(edata);
+ size_t unreserve_size = edata_size_get(edata);
+ edata_cache_fast_put(tsdn, &shard->ecf, edata);
+
+ psset_update_begin(&shard->psset, ps);
+ hpdata_unreserve(ps, unreserve_addr, unreserve_size);
+ hpa_update_purge_hugify_eligibility(tsdn, shard, ps);
+ psset_update_end(&shard->psset, ps);
+}
+
+static void
+hpa_dalloc_batch(tsdn_t *tsdn, pai_t *self, edata_list_active_t *list,
+ bool *deferred_work_generated) {
+ hpa_shard_t *shard = hpa_from_pai(self);
+
+ edata_t *edata;
+ ql_foreach(edata, &list->head, ql_link_active) {
+ hpa_dalloc_prepare_unlocked(tsdn, shard, edata);
+ }
+
+ malloc_mutex_lock(tsdn, &shard->mtx);
+ /* Now, remove from the list. */
+ while ((edata = edata_list_active_first(list)) != NULL) {
+ edata_list_active_remove(list, edata);
+ hpa_dalloc_locked(tsdn, shard, edata);
+ }
+ hpa_shard_maybe_do_deferred_work(tsdn, shard, /* forced */ false);
+ *deferred_work_generated =
+ hpa_shard_has_deferred_work(tsdn, shard);
+
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+}
+
+static void
+hpa_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ bool *deferred_work_generated) {
+ assert(!edata_guarded_get(edata));
+ /* Just a dalloc_batch of size 1; this lets us share logic. */
+ edata_list_active_t dalloc_list;
+ edata_list_active_init(&dalloc_list);
+ edata_list_active_append(&dalloc_list, edata);
+ hpa_dalloc_batch(tsdn, self, &dalloc_list, deferred_work_generated);
+}
+
+/*
+ * Calculate time until either purging or hugification ought to happen.
+ * Called by background threads.
+ */
+static uint64_t
+hpa_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) {
+ hpa_shard_t *shard = hpa_from_pai(self);
+ uint64_t time_ns = BACKGROUND_THREAD_DEFERRED_MAX;
+
+ malloc_mutex_lock(tsdn, &shard->mtx);
+
+ hpdata_t *to_hugify = psset_pick_hugify(&shard->psset);
+ if (to_hugify != NULL) {
+ nstime_t time_hugify_allowed =
+ hpdata_time_hugify_allowed(to_hugify);
+ uint64_t since_hugify_allowed_ms =
+ shard->central->hooks.ms_since(&time_hugify_allowed);
+ /*
+ * If not enough time has passed since hugification was allowed,
+ * sleep for the rest.
+ */
+ if (since_hugify_allowed_ms < shard->opts.hugify_delay_ms) {
+ time_ns = shard->opts.hugify_delay_ms -
+ since_hugify_allowed_ms;
+ time_ns *= 1000 * 1000;
+ } else {
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+ return BACKGROUND_THREAD_DEFERRED_MIN;
+ }
+ }
+
+ if (hpa_should_purge(tsdn, shard)) {
+ /*
+ * If we haven't purged before, no need to check interval
+ * between purges. Simply purge as soon as possible.
+ */
+ if (shard->stats.npurge_passes == 0) {
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+ return BACKGROUND_THREAD_DEFERRED_MIN;
+ }
+ uint64_t since_last_purge_ms = shard->central->hooks.ms_since(
+ &shard->last_purge);
+
+ if (since_last_purge_ms < shard->opts.min_purge_interval_ms) {
+ uint64_t until_purge_ns;
+ until_purge_ns = shard->opts.min_purge_interval_ms -
+ since_last_purge_ms;
+ until_purge_ns *= 1000 * 1000;
+
+ if (until_purge_ns < time_ns) {
+ time_ns = until_purge_ns;
+ }
+ } else {
+ time_ns = BACKGROUND_THREAD_DEFERRED_MIN;
+ }
+ }
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+ return time_ns;
+}
+
+void
+hpa_shard_disable(tsdn_t *tsdn, hpa_shard_t *shard) {
+ hpa_do_consistency_checks(shard);
+
+ malloc_mutex_lock(tsdn, &shard->mtx);
+ edata_cache_fast_disable(tsdn, &shard->ecf);
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+}
+
+static void
+hpa_shard_assert_stats_empty(psset_bin_stats_t *bin_stats) {
+ assert(bin_stats->npageslabs == 0);
+ assert(bin_stats->nactive == 0);
+}
+
+static void
+hpa_assert_empty(tsdn_t *tsdn, hpa_shard_t *shard, psset_t *psset) {
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+ for (int huge = 0; huge <= 1; huge++) {
+ hpa_shard_assert_stats_empty(&psset->stats.full_slabs[huge]);
+ for (pszind_t i = 0; i < PSSET_NPSIZES; i++) {
+ hpa_shard_assert_stats_empty(
+ &psset->stats.nonfull_slabs[i][huge]);
+ }
+ }
+}
+
+void
+hpa_shard_destroy(tsdn_t *tsdn, hpa_shard_t *shard) {
+ hpa_do_consistency_checks(shard);
+ /*
+ * By the time we're here, the arena code should have dalloc'd all the
+ * active extents, which means we should have eventually evicted
+ * everything from the psset, so it shouldn't be able to serve even a
+ * 1-page allocation.
+ */
+ if (config_debug) {
+ malloc_mutex_lock(tsdn, &shard->mtx);
+ hpa_assert_empty(tsdn, shard, &shard->psset);
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+ }
+ hpdata_t *ps;
+ while ((ps = psset_pick_alloc(&shard->psset, PAGE)) != NULL) {
+ /* There should be no allocations anywhere. */
+ assert(hpdata_empty(ps));
+ psset_remove(&shard->psset, ps);
+ shard->central->hooks.unmap(hpdata_addr_get(ps), HUGEPAGE);
+ }
+}
+
+void
+hpa_shard_set_deferral_allowed(tsdn_t *tsdn, hpa_shard_t *shard,
+ bool deferral_allowed) {
+ hpa_do_consistency_checks(shard);
+
+ malloc_mutex_lock(tsdn, &shard->mtx);
+ bool deferral_previously_allowed = shard->opts.deferral_allowed;
+ shard->opts.deferral_allowed = deferral_allowed;
+ if (deferral_previously_allowed && !deferral_allowed) {
+ hpa_shard_maybe_do_deferred_work(tsdn, shard,
+ /* forced */ true);
+ }
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+}
+
+void
+hpa_shard_do_deferred_work(tsdn_t *tsdn, hpa_shard_t *shard) {
+ hpa_do_consistency_checks(shard);
+
+ malloc_mutex_lock(tsdn, &shard->mtx);
+ hpa_shard_maybe_do_deferred_work(tsdn, shard, /* forced */ true);
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+}
+
+void
+hpa_shard_prefork3(tsdn_t *tsdn, hpa_shard_t *shard) {
+ hpa_do_consistency_checks(shard);
+
+ malloc_mutex_prefork(tsdn, &shard->grow_mtx);
+}
+
+void
+hpa_shard_prefork4(tsdn_t *tsdn, hpa_shard_t *shard) {
+ hpa_do_consistency_checks(shard);
+
+ malloc_mutex_prefork(tsdn, &shard->mtx);
+}
+
+void
+hpa_shard_postfork_parent(tsdn_t *tsdn, hpa_shard_t *shard) {
+ hpa_do_consistency_checks(shard);
+
+ malloc_mutex_postfork_parent(tsdn, &shard->grow_mtx);
+ malloc_mutex_postfork_parent(tsdn, &shard->mtx);
+}
+
+void
+hpa_shard_postfork_child(tsdn_t *tsdn, hpa_shard_t *shard) {
+ hpa_do_consistency_checks(shard);
+
+ malloc_mutex_postfork_child(tsdn, &shard->grow_mtx);
+ malloc_mutex_postfork_child(tsdn, &shard->mtx);
+}
diff --git a/contrib/jemalloc/src/hpa_hooks.c b/contrib/jemalloc/src/hpa_hooks.c
new file mode 100644
index 000000000000..ade581e8dc03
--- /dev/null
+++ b/contrib/jemalloc/src/hpa_hooks.c
@@ -0,0 +1,63 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/hpa_hooks.h"
+
+static void *hpa_hooks_map(size_t size);
+static void hpa_hooks_unmap(void *ptr, size_t size);
+static void hpa_hooks_purge(void *ptr, size_t size);
+static void hpa_hooks_hugify(void *ptr, size_t size);
+static void hpa_hooks_dehugify(void *ptr, size_t size);
+static void hpa_hooks_curtime(nstime_t *r_nstime, bool first_reading);
+static uint64_t hpa_hooks_ms_since(nstime_t *past_nstime);
+
+hpa_hooks_t hpa_hooks_default = {
+ &hpa_hooks_map,
+ &hpa_hooks_unmap,
+ &hpa_hooks_purge,
+ &hpa_hooks_hugify,
+ &hpa_hooks_dehugify,
+ &hpa_hooks_curtime,
+ &hpa_hooks_ms_since
+};
+
+static void *
+hpa_hooks_map(size_t size) {
+ bool commit = true;
+ return pages_map(NULL, size, HUGEPAGE, &commit);
+}
+
+static void
+hpa_hooks_unmap(void *ptr, size_t size) {
+ pages_unmap(ptr, size);
+}
+
+static void
+hpa_hooks_purge(void *ptr, size_t size) {
+ pages_purge_forced(ptr, size);
+}
+
+static void
+hpa_hooks_hugify(void *ptr, size_t size) {
+ bool err = pages_huge(ptr, size);
+ (void)err;
+}
+
+static void
+hpa_hooks_dehugify(void *ptr, size_t size) {
+ bool err = pages_nohuge(ptr, size);
+ (void)err;
+}
+
+static void
+hpa_hooks_curtime(nstime_t *r_nstime, bool first_reading) {
+ if (first_reading) {
+ nstime_init_zero(r_nstime);
+ }
+ nstime_update(r_nstime);
+}
+
+static uint64_t
+hpa_hooks_ms_since(nstime_t *past_nstime) {
+ return nstime_ns_since(past_nstime) / 1000 / 1000;
+}
diff --git a/contrib/jemalloc/src/hpdata.c b/contrib/jemalloc/src/hpdata.c
new file mode 100644
index 000000000000..e7d7294c7822
--- /dev/null
+++ b/contrib/jemalloc/src/hpdata.c
@@ -0,0 +1,325 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/hpdata.h"
+
+static int
+hpdata_age_comp(const hpdata_t *a, const hpdata_t *b) {
+ uint64_t a_age = hpdata_age_get(a);
+ uint64_t b_age = hpdata_age_get(b);
+ /*
+ * hpdata ages are operation counts in the psset; no two should be the
+ * same.
+ */
+ assert(a_age != b_age);
+ return (a_age > b_age) - (a_age < b_age);
+}
+
+ph_gen(, hpdata_age_heap, hpdata_t, age_link, hpdata_age_comp)
+
+void
+hpdata_init(hpdata_t *hpdata, void *addr, uint64_t age) {
+ hpdata_addr_set(hpdata, addr);
+ hpdata_age_set(hpdata, age);
+ hpdata->h_huge = false;
+ hpdata->h_alloc_allowed = true;
+ hpdata->h_in_psset_alloc_container = false;
+ hpdata->h_purge_allowed = false;
+ hpdata->h_hugify_allowed = false;
+ hpdata->h_in_psset_hugify_container = false;
+ hpdata->h_mid_purge = false;
+ hpdata->h_mid_hugify = false;
+ hpdata->h_updating = false;
+ hpdata->h_in_psset = false;
+ hpdata_longest_free_range_set(hpdata, HUGEPAGE_PAGES);
+ hpdata->h_nactive = 0;
+ fb_init(hpdata->active_pages, HUGEPAGE_PAGES);
+ hpdata->h_ntouched = 0;
+ fb_init(hpdata->touched_pages, HUGEPAGE_PAGES);
+
+ hpdata_assert_consistent(hpdata);
+}
+
+void *
+hpdata_reserve_alloc(hpdata_t *hpdata, size_t sz) {
+ hpdata_assert_consistent(hpdata);
+ /*
+ * This is a metadata change; the hpdata should therefore either not be
+ * in the psset, or should have explicitly marked itself as being
+ * mid-update.
+ */
+ assert(!hpdata->h_in_psset || hpdata->h_updating);
+ assert(hpdata->h_alloc_allowed);
+ assert((sz & PAGE_MASK) == 0);
+ size_t npages = sz >> LG_PAGE;
+ assert(npages <= hpdata_longest_free_range_get(hpdata));
+
+ size_t result;
+
+ size_t start = 0;
+ /*
+ * These are dead stores, but the compiler will issue warnings on them
+ * since it can't tell statically that found is always true below.
+ */
+ size_t begin = 0;
+ size_t len = 0;
+
+ size_t largest_unchosen_range = 0;
+ while (true) {
+ bool found = fb_urange_iter(hpdata->active_pages,
+ HUGEPAGE_PAGES, start, &begin, &len);
+ /*
+ * A precondition to this function is that hpdata must be able
+ * to serve the allocation.
+ */
+ assert(found);
+ assert(len <= hpdata_longest_free_range_get(hpdata));
+ if (len >= npages) {
+ /*
+ * We use first-fit within the page slabs; this gives
+ * bounded worst-case fragmentation within a slab. It's
+ * not necessarily right; we could experiment with
+ * various other options.
+ */
+ break;
+ }
+ if (len > largest_unchosen_range) {
+ largest_unchosen_range = len;
+ }
+ start = begin + len;
+ }
+ /* We found a range; remember it. */
+ result = begin;
+ fb_set_range(hpdata->active_pages, HUGEPAGE_PAGES, begin, npages);
+ hpdata->h_nactive += npages;
+
+ /*
+ * We might be about to dirty some memory for the first time; update our
+ * count if so.
+ */
+ size_t new_dirty = fb_ucount(hpdata->touched_pages, HUGEPAGE_PAGES,
+ result, npages);
+ fb_set_range(hpdata->touched_pages, HUGEPAGE_PAGES, result, npages);
+ hpdata->h_ntouched += new_dirty;
+
+ /*
+ * If we allocated out of a range that was the longest in the hpdata, it
+ * might be the only one of that size and we'll have to adjust the
+ * metadata.
+ */
+ if (len == hpdata_longest_free_range_get(hpdata)) {
+ start = begin + npages;
+ while (start < HUGEPAGE_PAGES) {
+ bool found = fb_urange_iter(hpdata->active_pages,
+ HUGEPAGE_PAGES, start, &begin, &len);
+ if (!found) {
+ break;
+ }
+ assert(len <= hpdata_longest_free_range_get(hpdata));
+ if (len == hpdata_longest_free_range_get(hpdata)) {
+ largest_unchosen_range = len;
+ break;
+ }
+ if (len > largest_unchosen_range) {
+ largest_unchosen_range = len;
+ }
+ start = begin + len;
+ }
+ hpdata_longest_free_range_set(hpdata, largest_unchosen_range);
+ }
+
+ hpdata_assert_consistent(hpdata);
+ return (void *)(
+ (uintptr_t)hpdata_addr_get(hpdata) + (result << LG_PAGE));
+}
+
+void
+hpdata_unreserve(hpdata_t *hpdata, void *addr, size_t sz) {
+ hpdata_assert_consistent(hpdata);
+ /* See the comment in reserve. */
+ assert(!hpdata->h_in_psset || hpdata->h_updating);
+ assert(((uintptr_t)addr & PAGE_MASK) == 0);
+ assert((sz & PAGE_MASK) == 0);
+ size_t begin = ((uintptr_t)addr - (uintptr_t)hpdata_addr_get(hpdata))
+ >> LG_PAGE;
+ assert(begin < HUGEPAGE_PAGES);
+ size_t npages = sz >> LG_PAGE;
+ size_t old_longest_range = hpdata_longest_free_range_get(hpdata);
+
+ fb_unset_range(hpdata->active_pages, HUGEPAGE_PAGES, begin, npages);
+ /* We might have just created a new, larger range. */
+ size_t new_begin = (fb_fls(hpdata->active_pages, HUGEPAGE_PAGES,
+ begin) + 1);
+ size_t new_end = fb_ffs(hpdata->active_pages, HUGEPAGE_PAGES,
+ begin + npages - 1);
+ size_t new_range_len = new_end - new_begin;
+
+ if (new_range_len > old_longest_range) {
+ hpdata_longest_free_range_set(hpdata, new_range_len);
+ }
+
+ hpdata->h_nactive -= npages;
+
+ hpdata_assert_consistent(hpdata);
+}
+
+size_t
+hpdata_purge_begin(hpdata_t *hpdata, hpdata_purge_state_t *purge_state) {
+ hpdata_assert_consistent(hpdata);
+ /*
+ * See the comment below; we might purge any inactive extent, so it's
+ * unsafe for any other thread to turn any inactive extent active while
+ * we're operating on it.
+ */
+ assert(!hpdata_alloc_allowed_get(hpdata));
+
+ purge_state->npurged = 0;
+ purge_state->next_purge_search_begin = 0;
+
+ /*
+ * Initialize to_purge.
+ *
+ * It's possible to end up in situations where two dirty extents are
+ * separated by a retained extent:
+ * - 1 page allocated.
+ * - 1 page allocated.
+ * - 1 pages allocated.
+ *
+ * If the middle page is freed and purged, and then the first and third
+ * pages are freed, and then another purge pass happens, the hpdata
+ * looks like this:
+ * - 1 page dirty.
+ * - 1 page retained.
+ * - 1 page dirty.
+ *
+ * But it's safe to do a single 3-page purge.
+ *
+ * We do this by first computing the dirty pages, and then filling in
+ * any gaps by extending each range in the dirty bitmap to extend until
+ * the next active page. This purges more pages, but the expensive part
+ * of purging is the TLB shootdowns, rather than the kernel state
+ * tracking; doing a little bit more of the latter is fine if it saves
+ * us from doing some of the former.
+ */
+
+ /*
+ * The dirty pages are those that are touched but not active. Note that
+ * in a normal-ish case, HUGEPAGE_PAGES is something like 512 and the
+ * fb_group_t is 64 bits, so this is 64 bytes, spread across 8
+ * fb_group_ts.
+ */
+ fb_group_t dirty_pages[FB_NGROUPS(HUGEPAGE_PAGES)];
+ fb_init(dirty_pages, HUGEPAGE_PAGES);
+ fb_bit_not(dirty_pages, hpdata->active_pages, HUGEPAGE_PAGES);
+ fb_bit_and(dirty_pages, dirty_pages, hpdata->touched_pages,
+ HUGEPAGE_PAGES);
+
+ fb_init(purge_state->to_purge, HUGEPAGE_PAGES);
+ size_t next_bit = 0;
+ while (next_bit < HUGEPAGE_PAGES) {
+ size_t next_dirty = fb_ffs(dirty_pages, HUGEPAGE_PAGES,
+ next_bit);
+ /* Recall that fb_ffs returns nbits if no set bit is found. */
+ if (next_dirty == HUGEPAGE_PAGES) {
+ break;
+ }
+ size_t next_active = fb_ffs(hpdata->active_pages,
+ HUGEPAGE_PAGES, next_dirty);
+ /*
+ * Don't purge past the end of the dirty extent, into retained
+ * pages. This helps the kernel a tiny bit, but honestly it's
+ * mostly helpful for testing (where we tend to write test cases
+ * that think in terms of the dirty ranges).
+ */
+ ssize_t last_dirty = fb_fls(dirty_pages, HUGEPAGE_PAGES,
+ next_active - 1);
+ assert(last_dirty >= 0);
+ assert((size_t)last_dirty >= next_dirty);
+ assert((size_t)last_dirty - next_dirty + 1 <= HUGEPAGE_PAGES);
+
+ fb_set_range(purge_state->to_purge, HUGEPAGE_PAGES, next_dirty,
+ last_dirty - next_dirty + 1);
+ next_bit = next_active + 1;
+ }
+
+ /* We should purge, at least, everything dirty. */
+ size_t ndirty = hpdata->h_ntouched - hpdata->h_nactive;
+ purge_state->ndirty_to_purge = ndirty;
+ assert(ndirty <= fb_scount(
+ purge_state->to_purge, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES));
+ assert(ndirty == fb_scount(dirty_pages, HUGEPAGE_PAGES, 0,
+ HUGEPAGE_PAGES));
+
+ hpdata_assert_consistent(hpdata);
+
+ return ndirty;
+}
+
+bool
+hpdata_purge_next(hpdata_t *hpdata, hpdata_purge_state_t *purge_state,
+ void **r_purge_addr, size_t *r_purge_size) {
+ /*
+ * Note that we don't have a consistency check here; we're accessing
+ * hpdata without synchronization, and therefore have no right to expect
+ * a consistent state.
+ */
+ assert(!hpdata_alloc_allowed_get(hpdata));
+
+ if (purge_state->next_purge_search_begin == HUGEPAGE_PAGES) {
+ return false;
+ }
+ size_t purge_begin;
+ size_t purge_len;
+ bool found_range = fb_srange_iter(purge_state->to_purge, HUGEPAGE_PAGES,
+ purge_state->next_purge_search_begin, &purge_begin, &purge_len);
+ if (!found_range) {
+ return false;
+ }
+
+ *r_purge_addr = (void *)(
+ (uintptr_t)hpdata_addr_get(hpdata) + purge_begin * PAGE);
+ *r_purge_size = purge_len * PAGE;
+
+ purge_state->next_purge_search_begin = purge_begin + purge_len;
+ purge_state->npurged += purge_len;
+ assert(purge_state->npurged <= HUGEPAGE_PAGES);
+
+ return true;
+}
+
+void
+hpdata_purge_end(hpdata_t *hpdata, hpdata_purge_state_t *purge_state) {
+ assert(!hpdata_alloc_allowed_get(hpdata));
+ hpdata_assert_consistent(hpdata);
+ /* See the comment in reserve. */
+ assert(!hpdata->h_in_psset || hpdata->h_updating);
+
+ assert(purge_state->npurged == fb_scount(purge_state->to_purge,
+ HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES));
+ assert(purge_state->npurged >= purge_state->ndirty_to_purge);
+
+ fb_bit_not(purge_state->to_purge, purge_state->to_purge,
+ HUGEPAGE_PAGES);
+ fb_bit_and(hpdata->touched_pages, hpdata->touched_pages,
+ purge_state->to_purge, HUGEPAGE_PAGES);
+ assert(hpdata->h_ntouched >= purge_state->ndirty_to_purge);
+ hpdata->h_ntouched -= purge_state->ndirty_to_purge;
+
+ hpdata_assert_consistent(hpdata);
+}
+
+void
+hpdata_hugify(hpdata_t *hpdata) {
+ hpdata_assert_consistent(hpdata);
+ hpdata->h_huge = true;
+ fb_set_range(hpdata->touched_pages, HUGEPAGE_PAGES, 0, HUGEPAGE_PAGES);
+ hpdata->h_ntouched = HUGEPAGE_PAGES;
+ hpdata_assert_consistent(hpdata);
+}
+
+void
+hpdata_dehugify(hpdata_t *hpdata) {
+ hpdata_assert_consistent(hpdata);
+ hpdata->h_huge = false;
+ hpdata_assert_consistent(hpdata);
+}
diff --git a/contrib/jemalloc/src/inspect.c b/contrib/jemalloc/src/inspect.c
new file mode 100644
index 000000000000..911b5d524ac3
--- /dev/null
+++ b/contrib/jemalloc/src/inspect.c
@@ -0,0 +1,77 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+void
+inspect_extent_util_stats_get(tsdn_t *tsdn, const void *ptr, size_t *nfree,
+ size_t *nregs, size_t *size) {
+ assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL);
+
+ const edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
+ if (unlikely(edata == NULL)) {
+ *nfree = *nregs = *size = 0;
+ return;
+ }
+
+ *size = edata_size_get(edata);
+ if (!edata_slab_get(edata)) {
+ *nfree = 0;
+ *nregs = 1;
+ } else {
+ *nfree = edata_nfree_get(edata);
+ *nregs = bin_infos[edata_szind_get(edata)].nregs;
+ assert(*nfree <= *nregs);
+ assert(*nfree * edata_usize_get(edata) <= *size);
+ }
+}
+
+void
+inspect_extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
+ size_t *nfree, size_t *nregs, size_t *size, size_t *bin_nfree,
+ size_t *bin_nregs, void **slabcur_addr) {
+ assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL
+ && bin_nfree != NULL && bin_nregs != NULL && slabcur_addr != NULL);
+
+ const edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
+ if (unlikely(edata == NULL)) {
+ *nfree = *nregs = *size = *bin_nfree = *bin_nregs = 0;
+ *slabcur_addr = NULL;
+ return;
+ }
+
+ *size = edata_size_get(edata);
+ if (!edata_slab_get(edata)) {
+ *nfree = *bin_nfree = *bin_nregs = 0;
+ *nregs = 1;
+ *slabcur_addr = NULL;
+ return;
+ }
+
+ *nfree = edata_nfree_get(edata);
+ const szind_t szind = edata_szind_get(edata);
+ *nregs = bin_infos[szind].nregs;
+ assert(*nfree <= *nregs);
+ assert(*nfree * edata_usize_get(edata) <= *size);
+
+ arena_t *arena = (arena_t *)atomic_load_p(
+ &arenas[edata_arena_ind_get(edata)], ATOMIC_RELAXED);
+ assert(arena != NULL);
+ const unsigned binshard = edata_binshard_get(edata);
+ bin_t *bin = arena_get_bin(arena, szind, binshard);
+
+ malloc_mutex_lock(tsdn, &bin->lock);
+ if (config_stats) {
+ *bin_nregs = *nregs * bin->stats.curslabs;
+ assert(*bin_nregs >= bin->stats.curregs);
+ *bin_nfree = *bin_nregs - bin->stats.curregs;
+ } else {
+ *bin_nfree = *bin_nregs = 0;
+ }
+ edata_t *slab;
+ if (bin->slabcur != NULL) {
+ slab = bin->slabcur;
+ } else {
+ slab = edata_heap_first(&bin->slabs_nonfull);
+ }
+ *slabcur_addr = slab != NULL ? edata_addr_get(slab) : NULL;
+ malloc_mutex_unlock(tsdn, &bin->lock);
+}
diff --git a/contrib/jemalloc/src/jemalloc.c b/contrib/jemalloc/src/jemalloc.c
index fefb719ac5c4..e4b183d1a24d 100644
--- a/contrib/jemalloc/src/jemalloc.c
+++ b/contrib/jemalloc/src/jemalloc.c
@@ -4,20 +4,26 @@
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/atomic.h"
+#include "jemalloc/internal/buf_writer.h"
#include "jemalloc/internal/ctl.h"
+#include "jemalloc/internal/emap.h"
#include "jemalloc/internal/extent_dss.h"
#include "jemalloc/internal/extent_mmap.h"
+#include "jemalloc/internal/fxp.h"
+#include "jemalloc/internal/san.h"
#include "jemalloc/internal/hook.h"
#include "jemalloc/internal/jemalloc_internal_types.h"
#include "jemalloc/internal/log.h"
#include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/nstime.h"
#include "jemalloc/internal/rtree.h"
#include "jemalloc/internal/safety_check.h"
#include "jemalloc/internal/sc.h"
#include "jemalloc/internal/spin.h"
#include "jemalloc/internal/sz.h"
#include "jemalloc/internal/ticker.h"
+#include "jemalloc/internal/thread_event.h"
#include "jemalloc/internal/util.h"
/******************************************************************************/
@@ -33,6 +39,29 @@ const char *je_malloc_conf
JEMALLOC_ATTR(weak)
#endif
;
+/*
+ * The usual rule is that the closer to runtime you are, the higher priority
+ * your configuration settings are (so the jemalloc config options get lower
+ * priority than the per-binary setting, which gets lower priority than the /etc
+ * setting, which gets lower priority than the environment settings).
+ *
+ * But it's a fairly common use case in some testing environments for a user to
+ * be able to control the binary, but nothing else (e.g. a performancy canary
+ * uses the production OS and environment variables, but can run any binary in
+ * those circumstances). For these use cases, it's handy to have an in-binary
+ * mechanism for overriding environment variable settings, with the idea that if
+ * the results are positive they get promoted to the official settings, and
+ * moved from the binary to the environment variable.
+ *
+ * We don't actually want this to be widespread, so we'll give it a silly name
+ * and not mention it in headers or documentation.
+ */
+const char *je_malloc_conf_2_conf_harder
+#ifndef _WIN32
+ JEMALLOC_ATTR(weak)
+#endif
+ ;
+
bool opt_abort =
#ifdef JEMALLOC_DEBUG
true
@@ -70,16 +99,73 @@ bool opt_junk_free =
false
#endif
;
+bool opt_trust_madvise =
+#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
+ false
+#else
+ true
+#endif
+ ;
+
+bool opt_cache_oblivious =
+#ifdef JEMALLOC_CACHE_OBLIVIOUS
+ true
+#else
+ false
+#endif
+ ;
+
+zero_realloc_action_t opt_zero_realloc_action =
+#ifdef JEMALLOC_ZERO_REALLOC_DEFAULT_FREE
+ zero_realloc_action_free
+#else
+ zero_realloc_action_alloc
+#endif
+ ;
+
+atomic_zu_t zero_realloc_count = ATOMIC_INIT(0);
+
+const char *zero_realloc_mode_names[] = {
+ "alloc",
+ "free",
+ "abort",
+};
+
+/*
+ * These are the documented values for junk fill debugging facilities -- see the
+ * man page.
+ */
+static const uint8_t junk_alloc_byte = 0xa5;
+static const uint8_t junk_free_byte = 0x5a;
+
+static void default_junk_alloc(void *ptr, size_t usize) {
+ memset(ptr, junk_alloc_byte, usize);
+}
+
+static void default_junk_free(void *ptr, size_t usize) {
+ memset(ptr, junk_free_byte, usize);
+}
+
+void (*junk_alloc_callback)(void *ptr, size_t size) = &default_junk_alloc;
+void (*junk_free_callback)(void *ptr, size_t size) = &default_junk_free;
bool opt_utrace = false;
bool opt_xmalloc = false;
+bool opt_experimental_infallible_new = false;
bool opt_zero = false;
unsigned opt_narenas = 0;
+fxp_t opt_narenas_ratio = FXP_INIT_INT(4);
unsigned ncpus;
/* Protects arenas initialization. */
malloc_mutex_t arenas_lock;
+
+/* The global hpa, and whether it's on. */
+bool opt_hpa = false;
+hpa_shard_opts_t opt_hpa_opts = HPA_SHARD_OPTS_DEFAULT;
+sec_opts_t opt_hpa_sec_opts = SEC_OPTS_DEFAULT;
+
/*
* Arenas that are used to service external requests. Not all elements of the
* arenas array are necessarily used; arenas are created lazily as needed.
@@ -98,13 +184,7 @@ static arena_t *a0; /* arenas[0]. */
unsigned narenas_auto;
unsigned manual_arena_base;
-typedef enum {
- malloc_init_uninitialized = 3,
- malloc_init_a0_initialized = 2,
- malloc_init_recursible = 1,
- malloc_init_initialized = 0 /* Common case --> jnz. */
-} malloc_init_t;
-static malloc_init_t malloc_init_state = malloc_init_uninitialized;
+malloc_init_t malloc_init_state = malloc_init_uninitialized;
/* False should be the common case. Set to true to trigger initialization. */
bool malloc_slow = true;
@@ -184,7 +264,7 @@ typedef struct {
ut.p = (a); \
ut.s = (b); \
ut.r = (c); \
- utrace(&ut, sizeof(ut)); \
+ UTRACE_CALL(&ut, sizeof(ut)); \
errno = utrace_serrno; \
} \
} while (0)
@@ -209,11 +289,6 @@ static bool malloc_init_hard(void);
* Begin miscellaneous support functions.
*/
-bool
-malloc_initialized(void) {
- return (malloc_init_state == malloc_init_initialized);
-}
-
JEMALLOC_ALWAYS_INLINE bool
malloc_init_a0(void) {
if (unlikely(malloc_init_state == malloc_init_uninitialized)) {
@@ -261,7 +336,7 @@ a0dalloc(void *ptr) {
}
/*
- * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
+ * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-sensitive
* situations that cannot tolerate TLS variable access (TLS allocation and very
* early internal data structure initialization).
*/
@@ -319,7 +394,7 @@ narenas_total_get(void) {
/* Create a new arena and insert it into the arenas array at index ind. */
static arena_t *
-arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
+arena_init_locked(tsdn_t *tsdn, unsigned ind, const arena_config_t *config) {
arena_t *arena;
assert(ind <= narenas_total_get());
@@ -341,7 +416,7 @@ arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
}
/* Actually initialize the arena. */
- arena = arena_new(tsdn, ind, extent_hooks);
+ arena = arena_new(tsdn, ind, config);
return arena;
}
@@ -365,11 +440,11 @@ arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) {
}
arena_t *
-arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
+arena_init(tsdn_t *tsdn, unsigned ind, const arena_config_t *config) {
arena_t *arena;
malloc_mutex_lock(tsdn, &arenas_lock);
- arena = arena_init_locked(tsdn, ind, extent_hooks);
+ arena = arena_init_locked(tsdn, ind, config);
malloc_mutex_unlock(tsdn, &arenas_lock);
arena_new_create_background_thread(tsdn, ind);
@@ -398,14 +473,19 @@ arena_bind(tsd_t *tsd, unsigned ind, bool internal) {
}
void
-arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) {
- arena_t *oldarena, *newarena;
+arena_migrate(tsd_t *tsd, arena_t *oldarena, arena_t *newarena) {
+ assert(oldarena != NULL);
+ assert(newarena != NULL);
- oldarena = arena_get(tsd_tsdn(tsd), oldind, false);
- newarena = arena_get(tsd_tsdn(tsd), newind, false);
arena_nthreads_dec(oldarena, false);
arena_nthreads_inc(newarena, false);
tsd_arena_set(tsd, newarena);
+
+ if (arena_nthreads_get(oldarena, false) == 0) {
+ /* Purge if the old arena has no associated threads anymore. */
+ arena_decay(tsd_tsdn(tsd), oldarena,
+ /* is_background_thread */ false, /* all */ true);
+ }
}
static void
@@ -422,82 +502,6 @@ arena_unbind(tsd_t *tsd, unsigned ind, bool internal) {
}
}
-arena_tdata_t *
-arena_tdata_get_hard(tsd_t *tsd, unsigned ind) {
- arena_tdata_t *tdata, *arenas_tdata_old;
- arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
- unsigned narenas_tdata_old, i;
- unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
- unsigned narenas_actual = narenas_total_get();
-
- /*
- * Dissociate old tdata array (and set up for deallocation upon return)
- * if it's too small.
- */
- if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
- arenas_tdata_old = arenas_tdata;
- narenas_tdata_old = narenas_tdata;
- arenas_tdata = NULL;
- narenas_tdata = 0;
- tsd_arenas_tdata_set(tsd, arenas_tdata);
- tsd_narenas_tdata_set(tsd, narenas_tdata);
- } else {
- arenas_tdata_old = NULL;
- narenas_tdata_old = 0;
- }
-
- /* Allocate tdata array if it's missing. */
- if (arenas_tdata == NULL) {
- bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
- narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
-
- if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
- *arenas_tdata_bypassp = true;
- arenas_tdata = (arena_tdata_t *)a0malloc(
- sizeof(arena_tdata_t) * narenas_tdata);
- *arenas_tdata_bypassp = false;
- }
- if (arenas_tdata == NULL) {
- tdata = NULL;
- goto label_return;
- }
- assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
- tsd_arenas_tdata_set(tsd, arenas_tdata);
- tsd_narenas_tdata_set(tsd, narenas_tdata);
- }
-
- /*
- * Copy to tdata array. It's possible that the actual number of arenas
- * has increased since narenas_total_get() was called above, but that
- * causes no correctness issues unless two threads concurrently execute
- * the arenas.create mallctl, which we trust mallctl synchronization to
- * prevent.
- */
-
- /* Copy/initialize tickers. */
- for (i = 0; i < narenas_actual; i++) {
- if (i < narenas_tdata_old) {
- ticker_copy(&arenas_tdata[i].decay_ticker,
- &arenas_tdata_old[i].decay_ticker);
- } else {
- ticker_init(&arenas_tdata[i].decay_ticker,
- DECAY_NTICKS_PER_UPDATE);
- }
- }
- if (narenas_tdata > narenas_actual) {
- memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
- * (narenas_tdata - narenas_actual));
- }
-
- /* Read the refreshed tdata array. */
- tdata = &arenas_tdata[ind];
-label_return:
- if (arenas_tdata_old != NULL) {
- a0dalloc(arenas_tdata_old);
- }
- return tdata;
-}
-
/* Slow path, called only by arena_choose(). */
arena_t *
arena_choose_hard(tsd_t *tsd, bool internal) {
@@ -580,8 +584,7 @@ arena_choose_hard(tsd_t *tsd, bool internal) {
/* Initialize a new arena. */
choose[j] = first_null;
arena = arena_init_locked(tsd_tsdn(tsd),
- choose[j],
- (extent_hooks_t *)&extent_hooks_default);
+ choose[j], &arena_config_default);
if (arena == NULL) {
malloc_mutex_unlock(tsd_tsdn(tsd),
&arenas_lock);
@@ -633,20 +636,6 @@ arena_cleanup(tsd_t *tsd) {
}
}
-void
-arenas_tdata_cleanup(tsd_t *tsd) {
- arena_tdata_t *arenas_tdata;
-
- /* Prevent tsd->arenas_tdata from being (re)created. */
- *tsd_arenas_tdata_bypassp_get(tsd) = true;
-
- arenas_tdata = tsd_arenas_tdata_get(tsd);
- if (arenas_tdata != NULL) {
- tsd_arenas_tdata_set(tsd, NULL);
- a0dalloc(arenas_tdata);
- }
-}
-
static void
stats_print_atexit(void) {
if (config_stats) {
@@ -665,11 +654,13 @@ stats_print_atexit(void) {
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
arena_t *arena = arena_get(tsdn, i, false);
if (arena != NULL) {
- tcache_t *tcache;
+ tcache_slow_t *tcache_slow;
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
- ql_foreach(tcache, &arena->tcache_ql, link) {
- tcache_stats_merge(tsdn, tcache, arena);
+ ql_foreach(tcache_slow, &arena->tcache_ql,
+ link) {
+ tcache_stats_merge(tsdn,
+ tcache_slow->tcache, arena);
}
malloc_mutex_unlock(tsdn,
&arena->tcache_ql_mtx);
@@ -734,18 +725,28 @@ malloc_ncpus(void) {
SYSTEM_INFO si;
GetSystemInfo(&si);
result = si.dwNumberOfProcessors;
-#elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT)
+#elif defined(CPU_COUNT)
/*
* glibc >= 2.6 has the CPU_COUNT macro.
*
* glibc's sysconf() uses isspace(). glibc allocates for the first time
* *before* setting up the isspace tables. Therefore we need a
* different method to get the number of CPUs.
+ *
+ * The getaffinity approach is also preferred when only a subset of CPUs
+ * is available, to avoid using more arenas than necessary.
*/
{
+# if defined(__FreeBSD__) || defined(__DragonFly__)
+ cpuset_t set;
+# else
cpu_set_t set;
-
+# endif
+# if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
+ sched_getaffinity(0, sizeof(set), &set);
+# else
pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
+# endif
result = CPU_COUNT(&set);
}
#else
@@ -754,9 +755,47 @@ malloc_ncpus(void) {
return ((result == -1) ? 1 : (unsigned)result);
}
+/*
+ * Ensure that number of CPUs is determistinc, i.e. it is the same based on:
+ * - sched_getaffinity()
+ * - _SC_NPROCESSORS_ONLN
+ * - _SC_NPROCESSORS_CONF
+ * Since otherwise tricky things is possible with percpu arenas in use.
+ */
+static bool
+malloc_cpu_count_is_deterministic()
+{
+#ifdef _WIN32
+ return true;
+#else
+ long cpu_onln = sysconf(_SC_NPROCESSORS_ONLN);
+ long cpu_conf = sysconf(_SC_NPROCESSORS_CONF);
+ if (cpu_onln != cpu_conf) {
+ return false;
+ }
+# if defined(CPU_COUNT)
+# if defined(__FreeBSD__) || defined(__DragonFly__)
+ cpuset_t set;
+# else
+ cpu_set_t set;
+# endif /* __FreeBSD__ */
+# if defined(JEMALLOC_HAVE_SCHED_SETAFFINITY)
+ sched_getaffinity(0, sizeof(set), &set);
+# else /* !JEMALLOC_HAVE_SCHED_SETAFFINITY */
+ pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
+# endif /* JEMALLOC_HAVE_SCHED_SETAFFINITY */
+ long cpu_affinity = CPU_COUNT(&set);
+ if (cpu_affinity != cpu_conf) {
+ return false;
+ }
+# endif /* CPU_COUNT */
+ return true;
+#endif
+}
+
static void
-init_opt_stats_print_opts(const char *v, size_t vlen) {
- size_t opts_len = strlen(opt_stats_print_opts);
+init_opt_stats_opts(const char *v, size_t vlen, char *dest) {
+ size_t opts_len = strlen(dest);
assert(opts_len <= stats_print_tot_num_options);
for (size_t i = 0; i < vlen; i++) {
@@ -767,16 +806,16 @@ init_opt_stats_print_opts(const char *v, size_t vlen) {
default: continue;
}
- if (strchr(opt_stats_print_opts, v[i]) != NULL) {
+ if (strchr(dest, v[i]) != NULL) {
/* Ignore repeated. */
continue;
}
- opt_stats_print_opts[opts_len++] = v[i];
- opt_stats_print_opts[opts_len] = '\0';
+ dest[opts_len++] = v[i];
+ dest[opts_len] = '\0';
assert(opts_len <= stats_print_tot_num_options);
}
- assert(opts_len == strlen(opt_stats_print_opts));
+ assert(opts_len == strlen(dest));
}
/* Reads the next size pair in a multi-sized option. */
@@ -858,10 +897,12 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
if (opts != *opts_p) {
malloc_write("<jemalloc>: Conf string ends "
"with key\n");
+ had_conf_error = true;
}
return true;
default:
malloc_write("<jemalloc>: Malformed conf string\n");
+ had_conf_error = true;
return true;
}
}
@@ -880,6 +921,7 @@ malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
if (*opts == '\0') {
malloc_write("<jemalloc>: Conf string ends "
"with comma\n");
+ had_conf_error = true;
}
*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
accept = true;
@@ -936,7 +978,7 @@ malloc_slow_flag_init(void) {
}
/* Number of sources for initializing malloc_conf */
-#define MALLOC_CONF_NSOURCES 4
+#define MALLOC_CONF_NSOURCES 5
static const char *
obtain_malloc_conf(unsigned which_source, char buf[PATH_MAX + 1]) {
@@ -1014,6 +1056,9 @@ obtain_malloc_conf(unsigned which_source, char buf[PATH_MAX + 1]) {
ret = NULL;
}
break;
+ } case 4: {
+ ret = je_malloc_conf_2_conf_harder;
+ break;
} default:
not_reached();
ret = NULL;
@@ -1030,7 +1075,9 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
"string pointed to by the global variable malloc_conf",
"\"name\" of the file referenced by the symbolic link named "
"/etc/malloc.conf",
- "value of the environment variable MALLOC_CONF"
+ "value of the environment variable MALLOC_CONF",
+ "string pointed to by the global variable "
+ "malloc_conf_2_conf_harder",
};
unsigned i;
const char *opts, *k, *v;
@@ -1098,39 +1145,50 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
#define CONF_CHECK_MIN(um, min) ((um) < (min))
#define CONF_DONT_CHECK_MAX(um, max) false
#define CONF_CHECK_MAX(um, max) ((um) > (max))
-#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \
+
+#define CONF_VALUE_READ(max_t, result) \
+ char *end; \
+ set_errno(0); \
+ result = (max_t)malloc_strtoumax(v, &end, 0);
+#define CONF_VALUE_READ_FAIL() \
+ (get_errno() != 0 || (uintptr_t)end - (uintptr_t)v != vlen)
+
+#define CONF_HANDLE_T(t, max_t, o, n, min, max, check_min, check_max, clip) \
if (CONF_MATCH(n)) { \
- uintmax_t um; \
- char *end; \
- \
- set_errno(0); \
- um = malloc_strtoumax(v, &end, 0); \
- if (get_errno() != 0 || (uintptr_t)end -\
- (uintptr_t)v != vlen) { \
+ max_t mv; \
+ CONF_VALUE_READ(max_t, mv) \
+ if (CONF_VALUE_READ_FAIL()) { \
CONF_ERROR("Invalid conf value",\
k, klen, v, vlen); \
} else if (clip) { \
- if (check_min(um, (t)(min))) { \
+ if (check_min(mv, (t)(min))) { \
o = (t)(min); \
} else if ( \
- check_max(um, (t)(max))) { \
+ check_max(mv, (t)(max))) { \
o = (t)(max); \
} else { \
- o = (t)um; \
+ o = (t)mv; \
} \
} else { \
- if (check_min(um, (t)(min)) || \
- check_max(um, (t)(max))) { \
+ if (check_min(mv, (t)(min)) || \
+ check_max(mv, (t)(max))) { \
CONF_ERROR( \
"Out-of-range " \
"conf value", \
k, klen, v, vlen); \
} else { \
- o = (t)um; \
+ o = (t)mv; \
} \
} \
CONF_CONTINUE; \
}
+#define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \
+ CONF_HANDLE_T(t, uintmax_t, o, n, min, max, check_min, \
+ check_max, clip)
+#define CONF_HANDLE_T_SIGNED(t, o, n, min, max, check_min, check_max, clip)\
+ CONF_HANDLE_T(t, intmax_t, o, n, min, max, check_min, \
+ check_max, clip)
+
#define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \
clip) \
CONF_HANDLE_T_U(unsigned, o, n, min, max, \
@@ -1138,27 +1196,15 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
#define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \
CONF_HANDLE_T_U(size_t, o, n, min, max, \
check_min, check_max, clip)
+#define CONF_HANDLE_INT64_T(o, n, min, max, check_min, check_max, clip) \
+ CONF_HANDLE_T_SIGNED(int64_t, o, n, min, max, \
+ check_min, check_max, clip)
+#define CONF_HANDLE_UINT64_T(o, n, min, max, check_min, check_max, clip)\
+ CONF_HANDLE_T_U(uint64_t, o, n, min, max, \
+ check_min, check_max, clip)
#define CONF_HANDLE_SSIZE_T(o, n, min, max) \
- if (CONF_MATCH(n)) { \
- long l; \
- char *end; \
- \
- set_errno(0); \
- l = strtol(v, &end, 0); \
- if (get_errno() != 0 || (uintptr_t)end -\
- (uintptr_t)v != vlen) { \
- CONF_ERROR("Invalid conf value",\
- k, klen, v, vlen); \
- } else if (l < (ssize_t)(min) || l > \
- (ssize_t)(max)) { \
- CONF_ERROR( \
- "Out-of-range conf value", \
- k, klen, v, vlen); \
- } else { \
- o = l; \
- } \
- CONF_CONTINUE; \
- }
+ CONF_HANDLE_T_SIGNED(ssize_t, o, n, min, max, \
+ CONF_CHECK_MIN, CONF_CHECK_MAX, false)
#define CONF_HANDLE_CHAR_P(o, n, d) \
if (CONF_MATCH(n)) { \
size_t cpylen = (vlen <= \
@@ -1178,13 +1224,14 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
CONF_HANDLE_BOOL(opt_abort, "abort")
CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf")
+ CONF_HANDLE_BOOL(opt_trust_madvise, "trust_madvise")
if (strncmp("metadata_thp", k, klen) == 0) {
- int i;
+ int m;
bool match = false;
- for (i = 0; i < metadata_thp_mode_limit; i++) {
- if (strncmp(metadata_thp_mode_names[i],
+ for (m = 0; m < metadata_thp_mode_limit; m++) {
+ if (strncmp(metadata_thp_mode_names[m],
v, vlen) == 0) {
- opt_metadata_thp = i;
+ opt_metadata_thp = m;
match = true;
break;
}
@@ -1197,18 +1244,18 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
}
CONF_HANDLE_BOOL(opt_retain, "retain")
if (strncmp("dss", k, klen) == 0) {
- int i;
+ int m;
bool match = false;
- for (i = 0; i < dss_prec_limit; i++) {
- if (strncmp(dss_prec_names[i], v, vlen)
+ for (m = 0; m < dss_prec_limit; m++) {
+ if (strncmp(dss_prec_names[m], v, vlen)
== 0) {
- if (extent_dss_prec_set(i)) {
+ if (extent_dss_prec_set(m)) {
CONF_ERROR(
"Error setting dss",
k, klen, v, vlen);
} else {
opt_dss =
- dss_prec_names[i];
+ dss_prec_names[m];
match = true;
break;
}
@@ -1220,9 +1267,27 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
}
CONF_CONTINUE;
}
- CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
- UINT_MAX, CONF_CHECK_MIN, CONF_DONT_CHECK_MAX,
- false)
+ if (CONF_MATCH("narenas")) {
+ if (CONF_MATCH_VALUE("default")) {
+ opt_narenas = 0;
+ CONF_CONTINUE;
+ } else {
+ CONF_HANDLE_UNSIGNED(opt_narenas,
+ "narenas", 1, UINT_MAX,
+ CONF_CHECK_MIN, CONF_DONT_CHECK_MAX,
+ /* clip */ false)
+ }
+ }
+ if (CONF_MATCH("narenas_ratio")) {
+ char *end;
+ bool err = fxp_parse(&opt_narenas_ratio, v,
+ &end);
+ if (err || (size_t)(end - v) != vlen) {
+ CONF_ERROR("Invalid conf value",
+ k, klen, v, vlen);
+ }
+ CONF_CONTINUE;
+ }
if (CONF_MATCH("bin_shards")) {
const char *bin_shards_segment_cur = v;
size_t vlen_left = vlen;
@@ -1245,6 +1310,9 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
} while (vlen_left > 0);
CONF_CONTINUE;
}
+ CONF_HANDLE_INT64_T(opt_mutex_max_spin,
+ "mutex_max_spin", -1, INT64_MAX, CONF_CHECK_MIN,
+ CONF_DONT_CHECK_MAX, false);
CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms,
"dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
@@ -1255,7 +1323,16 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
SSIZE_MAX);
CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
if (CONF_MATCH("stats_print_opts")) {
- init_opt_stats_print_opts(v, vlen);
+ init_opt_stats_opts(v, vlen,
+ opt_stats_print_opts);
+ CONF_CONTINUE;
+ }
+ CONF_HANDLE_INT64_T(opt_stats_interval,
+ "stats_interval", -1, INT64_MAX,
+ CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, false)
+ if (CONF_MATCH("stats_interval_opts")) {
+ init_opt_stats_opts(v, vlen,
+ opt_stats_interval_opts);
CONF_CONTINUE;
}
if (config_fill) {
@@ -1291,9 +1368,61 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
if (config_xmalloc) {
CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
}
+ if (config_enable_cxx) {
+ CONF_HANDLE_BOOL(
+ opt_experimental_infallible_new,
+ "experimental_infallible_new")
+ }
+
CONF_HANDLE_BOOL(opt_tcache, "tcache")
- CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max",
- -1, (sizeof(size_t) << 3) - 1)
+ CONF_HANDLE_SIZE_T(opt_tcache_max, "tcache_max",
+ 0, TCACHE_MAXCLASS_LIMIT, CONF_DONT_CHECK_MIN,
+ CONF_CHECK_MAX, /* clip */ true)
+ if (CONF_MATCH("lg_tcache_max")) {
+ size_t m;
+ CONF_VALUE_READ(size_t, m)
+ if (CONF_VALUE_READ_FAIL()) {
+ CONF_ERROR("Invalid conf value",
+ k, klen, v, vlen);
+ } else {
+ /* clip if necessary */
+ if (m > TCACHE_LG_MAXCLASS_LIMIT) {
+ m = TCACHE_LG_MAXCLASS_LIMIT;
+ }
+ opt_tcache_max = (size_t)1 << m;
+ }
+ CONF_CONTINUE;
+ }
+ /*
+ * Anyone trying to set a value outside -16 to 16 is
+ * deeply confused.
+ */
+ CONF_HANDLE_SSIZE_T(opt_lg_tcache_nslots_mul,
+ "lg_tcache_nslots_mul", -16, 16)
+ /* Ditto with values past 2048. */
+ CONF_HANDLE_UNSIGNED(opt_tcache_nslots_small_min,
+ "tcache_nslots_small_min", 1, 2048,
+ CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true)
+ CONF_HANDLE_UNSIGNED(opt_tcache_nslots_small_max,
+ "tcache_nslots_small_max", 1, 2048,
+ CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true)
+ CONF_HANDLE_UNSIGNED(opt_tcache_nslots_large,
+ "tcache_nslots_large", 1, 2048,
+ CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true)
+ CONF_HANDLE_SIZE_T(opt_tcache_gc_incr_bytes,
+ "tcache_gc_incr_bytes", 1024, SIZE_T_MAX,
+ CONF_CHECK_MIN, CONF_DONT_CHECK_MAX,
+ /* clip */ true)
+ CONF_HANDLE_SIZE_T(opt_tcache_gc_delay_bytes,
+ "tcache_gc_delay_bytes", 0, SIZE_T_MAX,
+ CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX,
+ /* clip */ false)
+ CONF_HANDLE_UNSIGNED(opt_lg_tcache_flush_small_div,
+ "lg_tcache_flush_small_div", 1, 16,
+ CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true)
+ CONF_HANDLE_UNSIGNED(opt_lg_tcache_flush_large_div,
+ "lg_tcache_flush_large_div", 1, 16,
+ CONF_CHECK_MIN, CONF_CHECK_MAX, /* clip */ true)
/*
* The runtime option of oversize_threshold remains
@@ -1313,16 +1442,16 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
if (strncmp("percpu_arena", k, klen) == 0) {
bool match = false;
- for (int i = percpu_arena_mode_names_base; i <
- percpu_arena_mode_names_limit; i++) {
- if (strncmp(percpu_arena_mode_names[i],
+ for (int m = percpu_arena_mode_names_base; m <
+ percpu_arena_mode_names_limit; m++) {
+ if (strncmp(percpu_arena_mode_names[m],
v, vlen) == 0) {
if (!have_percpu_arena) {
CONF_ERROR(
"No getcpu support",
k, klen, v, vlen);
}
- opt_percpu_arena = i;
+ opt_percpu_arena = m;
match = true;
break;
}
@@ -1340,7 +1469,83 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
opt_max_background_threads,
CONF_CHECK_MIN, CONF_CHECK_MAX,
true);
+ CONF_HANDLE_BOOL(opt_hpa, "hpa")
+ CONF_HANDLE_SIZE_T(opt_hpa_opts.slab_max_alloc,
+ "hpa_slab_max_alloc", PAGE, HUGEPAGE,
+ CONF_CHECK_MIN, CONF_CHECK_MAX, true);
+
+ /*
+ * Accept either a ratio-based or an exact hugification
+ * threshold.
+ */
+ CONF_HANDLE_SIZE_T(opt_hpa_opts.hugification_threshold,
+ "hpa_hugification_threshold", PAGE, HUGEPAGE,
+ CONF_CHECK_MIN, CONF_CHECK_MAX, true);
+ if (CONF_MATCH("hpa_hugification_threshold_ratio")) {
+ fxp_t ratio;
+ char *end;
+ bool err = fxp_parse(&ratio, v,
+ &end);
+ if (err || (size_t)(end - v) != vlen
+ || ratio > FXP_INIT_INT(1)) {
+ CONF_ERROR("Invalid conf value",
+ k, klen, v, vlen);
+ } else {
+ opt_hpa_opts.hugification_threshold =
+ fxp_mul_frac(HUGEPAGE, ratio);
+ }
+ CONF_CONTINUE;
+ }
+
+ CONF_HANDLE_UINT64_T(
+ opt_hpa_opts.hugify_delay_ms, "hpa_hugify_delay_ms",
+ 0, 0, CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX,
+ false);
+
+ CONF_HANDLE_UINT64_T(
+ opt_hpa_opts.min_purge_interval_ms,
+ "hpa_min_purge_interval_ms", 0, 0,
+ CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, false);
+
+ if (CONF_MATCH("hpa_dirty_mult")) {
+ if (CONF_MATCH_VALUE("-1")) {
+ opt_hpa_opts.dirty_mult = (fxp_t)-1;
+ CONF_CONTINUE;
+ }
+ fxp_t ratio;
+ char *end;
+ bool err = fxp_parse(&ratio, v,
+ &end);
+ if (err || (size_t)(end - v) != vlen) {
+ CONF_ERROR("Invalid conf value",
+ k, klen, v, vlen);
+ } else {
+ opt_hpa_opts.dirty_mult = ratio;
+ }
+ CONF_CONTINUE;
+ }
+
+ CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.nshards,
+ "hpa_sec_nshards", 0, 0, CONF_CHECK_MIN,
+ CONF_DONT_CHECK_MAX, true);
+ CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.max_alloc,
+ "hpa_sec_max_alloc", PAGE, 0, CONF_CHECK_MIN,
+ CONF_DONT_CHECK_MAX, true);
+ CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.max_bytes,
+ "hpa_sec_max_bytes", PAGE, 0, CONF_CHECK_MIN,
+ CONF_DONT_CHECK_MAX, true);
+ CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.bytes_after_flush,
+ "hpa_sec_bytes_after_flush", PAGE, 0,
+ CONF_CHECK_MIN, CONF_DONT_CHECK_MAX, true);
+ CONF_HANDLE_SIZE_T(opt_hpa_sec_opts.batch_fill_extra,
+ "hpa_sec_batch_fill_extra", 0, HUGEPAGE_PAGES,
+ CONF_CHECK_MIN, CONF_CHECK_MAX, true);
+
if (CONF_MATCH("slab_sizes")) {
+ if (CONF_MATCH_VALUE("default")) {
+ sc_data_init(sc_data);
+ CONF_CONTINUE;
+ }
bool err;
const char *slab_size_segment_cur = v;
size_t vlen_left = vlen;
@@ -1382,7 +1587,44 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
+ CONF_HANDLE_BOOL(opt_prof_leak_error,
+ "prof_leak_error")
CONF_HANDLE_BOOL(opt_prof_log, "prof_log")
+ CONF_HANDLE_SSIZE_T(opt_prof_recent_alloc_max,
+ "prof_recent_alloc_max", -1, SSIZE_MAX)
+ CONF_HANDLE_BOOL(opt_prof_stats, "prof_stats")
+ CONF_HANDLE_BOOL(opt_prof_sys_thread_name,
+ "prof_sys_thread_name")
+ if (CONF_MATCH("prof_time_resolution")) {
+ if (CONF_MATCH_VALUE("default")) {
+ opt_prof_time_res =
+ prof_time_res_default;
+ } else if (CONF_MATCH_VALUE("high")) {
+ if (!config_high_res_timer) {
+ CONF_ERROR(
+ "No high resolution"
+ " timer support",
+ k, klen, v, vlen);
+ } else {
+ opt_prof_time_res =
+ prof_time_res_high;
+ }
+ } else {
+ CONF_ERROR("Invalid conf value",
+ k, klen, v, vlen);
+ }
+ CONF_CONTINUE;
+ }
+ /*
+ * Undocumented. When set to false, don't
+ * correct for an unbiasing bug in jeprof
+ * attribution. This can be handy if you want
+ * to get consistent numbers from your binary
+ * across different jemalloc versions, even if
+ * those numbers are incorrect. The default is
+ * true.
+ */
+ CONF_HANDLE_BOOL(opt_prof_unbias, "prof_unbias")
}
if (config_log) {
if (CONF_MATCH("log")) {
@@ -1396,15 +1638,15 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
}
if (CONF_MATCH("thp")) {
bool match = false;
- for (int i = 0; i < thp_mode_names_limit; i++) {
- if (strncmp(thp_mode_names[i],v, vlen)
+ for (int m = 0; m < thp_mode_names_limit; m++) {
+ if (strncmp(thp_mode_names[m],v, vlen)
== 0) {
- if (!have_madvise_huge) {
+ if (!have_madvise_huge && !have_memcntl) {
CONF_ERROR(
"No THP support",
k, klen, v, vlen);
}
- opt_thp = i;
+ opt_thp = m;
match = true;
break;
}
@@ -1415,6 +1657,55 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
}
CONF_CONTINUE;
}
+ if (CONF_MATCH("zero_realloc")) {
+ if (CONF_MATCH_VALUE("alloc")) {
+ opt_zero_realloc_action
+ = zero_realloc_action_alloc;
+ } else if (CONF_MATCH_VALUE("free")) {
+ opt_zero_realloc_action
+ = zero_realloc_action_free;
+ } else if (CONF_MATCH_VALUE("abort")) {
+ opt_zero_realloc_action
+ = zero_realloc_action_abort;
+ } else {
+ CONF_ERROR("Invalid conf value",
+ k, klen, v, vlen);
+ }
+ CONF_CONTINUE;
+ }
+ if (config_uaf_detection &&
+ CONF_MATCH("lg_san_uaf_align")) {
+ ssize_t a;
+ CONF_VALUE_READ(ssize_t, a)
+ if (CONF_VALUE_READ_FAIL() || a < -1) {
+ CONF_ERROR("Invalid conf value",
+ k, klen, v, vlen);
+ }
+ if (a == -1) {
+ opt_lg_san_uaf_align = -1;
+ CONF_CONTINUE;
+ }
+
+ /* clip if necessary */
+ ssize_t max_allowed = (sizeof(size_t) << 3) - 1;
+ ssize_t min_allowed = LG_PAGE;
+ if (a > max_allowed) {
+ a = max_allowed;
+ } else if (a < min_allowed) {
+ a = min_allowed;
+ }
+
+ opt_lg_san_uaf_align = a;
+ CONF_CONTINUE;
+ }
+
+ CONF_HANDLE_SIZE_T(opt_san_guard_small,
+ "san_guard_small", 0, SIZE_T_MAX,
+ CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, false)
+ CONF_HANDLE_SIZE_T(opt_san_guard_large,
+ "san_guard_large", 0, SIZE_T_MAX,
+ CONF_DONT_CHECK_MIN, CONF_DONT_CHECK_MAX, false)
+
CONF_ERROR("Invalid conf pair", k, klen, v, vlen);
#undef CONF_ERROR
#undef CONF_CONTINUE
@@ -1425,7 +1716,9 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
#undef CONF_CHECK_MIN
#undef CONF_DONT_CHECK_MAX
#undef CONF_CHECK_MAX
+#undef CONF_HANDLE_T
#undef CONF_HANDLE_T_U
+#undef CONF_HANDLE_T_SIGNED
#undef CONF_HANDLE_UNSIGNED
#undef CONF_HANDLE_SIZE_T
#undef CONF_HANDLE_SSIZE_T
@@ -1440,15 +1733,33 @@ malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
atomic_store_b(&log_init_done, true, ATOMIC_RELEASE);
}
+static bool
+malloc_conf_init_check_deps(void) {
+ if (opt_prof_leak_error && !opt_prof_final) {
+ malloc_printf("<jemalloc>: prof_leak_error is set w/o "
+ "prof_final.\n");
+ return true;
+ }
+
+ return false;
+}
+
static void
malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) {
- const char *opts_cache[MALLOC_CONF_NSOURCES] = {NULL, NULL, NULL, NULL};
+ const char *opts_cache[MALLOC_CONF_NSOURCES] = {NULL, NULL, NULL, NULL,
+ NULL};
char buf[PATH_MAX + 1];
/* The first call only set the confirm_conf option and opts_cache */
malloc_conf_init_helper(NULL, NULL, true, opts_cache, buf);
malloc_conf_init_helper(sc_data, bin_shard_sizes, false, opts_cache,
NULL);
+ if (malloc_conf_init_check_deps()) {
+ /* check_deps does warning msg only; abort below if needed. */
+ if (opt_abort_conf) {
+ malloc_abort_invalid_conf();
+ }
+ }
}
#undef MALLOC_CONF_NSOURCES
@@ -1492,8 +1803,8 @@ malloc_init_hard_a0_locked() {
* Ordering here is somewhat tricky; we need sc_boot() first, since that
* determines what the size classes will be, and then
* malloc_conf_init(), since any slab size tweaking will need to be done
- * before sz_boot and bin_boot, which assume that the values they read
- * out of sc_data_global are final.
+ * before sz_boot and bin_info_boot, which assume that the values they
+ * read out of sc_data_global are final.
*/
sc_boot(&sc_data);
unsigned bin_shard_sizes[SC_NBINS];
@@ -1507,8 +1818,9 @@ malloc_init_hard_a0_locked() {
prof_boot0();
}
malloc_conf_init(&sc_data, bin_shard_sizes);
- sz_boot(&sc_data);
- bin_boot(&sc_data, bin_shard_sizes);
+ san_init(opt_lg_san_uaf_align);
+ sz_boot(&sc_data, opt_cache_oblivious);
+ bin_info_boot(&sc_data, bin_shard_sizes);
if (opt_stats_print) {
/* Print statistics at exit. */
@@ -1519,12 +1831,20 @@ malloc_init_hard_a0_locked() {
}
}
}
+
+ if (stats_boot()) {
+ return true;
+ }
if (pages_boot()) {
return true;
}
if (base_boot(TSDN_NULL)) {
return true;
}
+ /* emap_global is static, hence zeroed. */
+ if (emap_init(&arena_emap_global, b0get(), /* zeroed */ true)) {
+ return true;
+ }
if (extent_boot()) {
return true;
}
@@ -1534,8 +1854,20 @@ malloc_init_hard_a0_locked() {
if (config_prof) {
prof_boot1();
}
- arena_boot(&sc_data);
- if (tcache_boot(TSDN_NULL)) {
+ if (opt_hpa && !hpa_supported()) {
+ malloc_printf("<jemalloc>: HPA not supported in the current "
+ "configuration; %s.",
+ opt_abort_conf ? "aborting" : "disabling");
+ if (opt_abort_conf) {
+ malloc_abort_invalid_conf();
+ } else {
+ opt_hpa = false;
+ }
+ }
+ if (arena_boot(&sc_data, b0get(), opt_hpa)) {
+ return true;
+ }
+ if (tcache_boot(TSDN_NULL, b0get())) {
return true;
}
if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS,
@@ -1554,11 +1886,29 @@ malloc_init_hard_a0_locked() {
* Initialize one arena here. The rest are lazily created in
* arena_choose_hard().
*/
- if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default)
- == NULL) {
+ if (arena_init(TSDN_NULL, 0, &arena_config_default) == NULL) {
return true;
}
a0 = arena_get(TSDN_NULL, 0, false);
+
+ if (opt_hpa && !hpa_supported()) {
+ malloc_printf("<jemalloc>: HPA not supported in the current "
+ "configuration; %s.",
+ opt_abort_conf ? "aborting" : "disabling");
+ if (opt_abort_conf) {
+ malloc_abort_invalid_conf();
+ } else {
+ opt_hpa = false;
+ }
+ } else if (opt_hpa) {
+ hpa_shard_opts_t hpa_shard_opts = opt_hpa_opts;
+ hpa_shard_opts.deferral_allowed = background_thread_enabled();
+ if (pa_shard_enable_hpa(TSDN_NULL, &a0->pa_shard,
+ &hpa_shard_opts, &opt_hpa_sec_opts)) {
+ return true;
+ }
+ }
+
malloc_init_state = malloc_init_a0_initialized;
return false;
@@ -1580,6 +1930,29 @@ malloc_init_hard_recursible(void) {
malloc_init_state = malloc_init_recursible;
ncpus = malloc_ncpus();
+ if (opt_percpu_arena != percpu_arena_disabled) {
+ bool cpu_count_is_deterministic =
+ malloc_cpu_count_is_deterministic();
+ if (!cpu_count_is_deterministic) {
+ /*
+ * If # of CPU is not deterministic, and narenas not
+ * specified, disables per cpu arena since it may not
+ * detect CPU IDs properly.
+ */
+ if (opt_narenas == 0) {
+ opt_percpu_arena = percpu_arena_disabled;
+ malloc_write("<jemalloc>: Number of CPUs "
+ "detected is not deterministic. Per-CPU "
+ "arena disabled.\n");
+ if (opt_abort_conf) {
+ malloc_abort_invalid_conf();
+ }
+ if (opt_abort) {
+ abort();
+ }
+ }
+ }
+ }
#if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \
&& !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \
@@ -1610,7 +1983,13 @@ malloc_narenas_default(void) {
* default.
*/
if (ncpus > 1) {
- return ncpus << 2;
+ fxp_t fxp_ncpus = FXP_INIT_INT(ncpus);
+ fxp_t goal = fxp_mul(fxp_ncpus, opt_narenas_ratio);
+ uint32_t int_goal = fxp_round_nearest(goal);
+ if (int_goal == 0) {
+ return 1;
+ }
+ return int_goal;
} else {
return 1;
}
@@ -1769,10 +2148,11 @@ malloc_init_hard(void) {
/* Set reentrancy level to 1 during init. */
pre_reentrancy(tsd, NULL);
/* Initialize narenas before prof_boot2 (for allocation). */
- if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) {
+ if (malloc_init_narenas()
+ || background_thread_boot1(tsd_tsdn(tsd), b0get())) {
UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
}
- if (config_prof && prof_boot2(tsd)) {
+ if (config_prof && prof_boot2(tsd, b0get())) {
UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
}
@@ -1911,38 +2291,107 @@ dynamic_opts_init(dynamic_opts_t *dynamic_opts) {
dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC;
}
-/* ind is ignored if dopts->alignment > 0. */
-JEMALLOC_ALWAYS_INLINE void *
-imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
- size_t size, size_t usize, szind_t ind) {
- tcache_t *tcache;
- arena_t *arena;
+/*
+ * ind parameter is optional and is only checked and filled if alignment == 0;
+ * return true if result is out of range.
+ */
+JEMALLOC_ALWAYS_INLINE bool
+aligned_usize_get(size_t size, size_t alignment, size_t *usize, szind_t *ind,
+ bool bump_empty_aligned_alloc) {
+ assert(usize != NULL);
+ if (alignment == 0) {
+ if (ind != NULL) {
+ *ind = sz_size2index(size);
+ if (unlikely(*ind >= SC_NSIZES)) {
+ return true;
+ }
+ *usize = sz_index2size(*ind);
+ assert(*usize > 0 && *usize <= SC_LARGE_MAXCLASS);
+ return false;
+ }
+ *usize = sz_s2u(size);
+ } else {
+ if (bump_empty_aligned_alloc && unlikely(size == 0)) {
+ size = 1;
+ }
+ *usize = sz_sa2u(size, alignment);
+ }
+ if (unlikely(*usize == 0 || *usize > SC_LARGE_MAXCLASS)) {
+ return true;
+ }
+ return false;
+}
- /* Fill in the tcache. */
- if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) {
- if (likely(!sopts->slow)) {
+JEMALLOC_ALWAYS_INLINE bool
+zero_get(bool guarantee, bool slow) {
+ if (config_fill && slow && unlikely(opt_zero)) {
+ return true;
+ } else {
+ return guarantee;
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE tcache_t *
+tcache_get_from_ind(tsd_t *tsd, unsigned tcache_ind, bool slow, bool is_alloc) {
+ tcache_t *tcache;
+ if (tcache_ind == TCACHE_IND_AUTOMATIC) {
+ if (likely(!slow)) {
/* Getting tcache ptr unconditionally. */
tcache = tsd_tcachep_get(tsd);
assert(tcache == tcache_get(tsd));
- } else {
+ } else if (is_alloc ||
+ likely(tsd_reentrancy_level_get(tsd) == 0)) {
tcache = tcache_get(tsd);
+ } else {
+ tcache = NULL;
}
- } else if (dopts->tcache_ind == TCACHE_IND_NONE) {
- tcache = NULL;
} else {
- tcache = tcaches_get(tsd, dopts->tcache_ind);
+ /*
+ * Should not specify tcache on deallocation path when being
+ * reentrant.
+ */
+ assert(is_alloc || tsd_reentrancy_level_get(tsd) == 0 ||
+ tsd_state_nocleanup(tsd));
+ if (tcache_ind == TCACHE_IND_NONE) {
+ tcache = NULL;
+ } else {
+ tcache = tcaches_get(tsd, tcache_ind);
+ }
}
+ return tcache;
+}
- /* Fill in the arena. */
- if (dopts->arena_ind == ARENA_IND_AUTOMATIC) {
+/* Return true if a manual arena is specified and arena_get() OOMs. */
+JEMALLOC_ALWAYS_INLINE bool
+arena_get_from_ind(tsd_t *tsd, unsigned arena_ind, arena_t **arena_p) {
+ if (arena_ind == ARENA_IND_AUTOMATIC) {
/*
* In case of automatic arena management, we defer arena
* computation until as late as we can, hoping to fill the
* allocation out of the tcache.
*/
- arena = NULL;
+ *arena_p = NULL;
} else {
- arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true);
+ *arena_p = arena_get(tsd_tsdn(tsd), arena_ind, true);
+ if (unlikely(*arena_p == NULL) && arena_ind >= narenas_auto) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/* ind is ignored if dopts->alignment > 0. */
+JEMALLOC_ALWAYS_INLINE void *
+imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
+ size_t size, size_t usize, szind_t ind) {
+ /* Fill in the tcache. */
+ tcache_t *tcache = tcache_get_from_ind(tsd, dopts->tcache_ind,
+ sopts->slow, /* is_alloc */ true);
+
+ /* Fill in the arena. */
+ arena_t *arena;
+ if (arena_get_from_ind(tsd, dopts->arena_ind, &arena)) {
+ return NULL;
}
if (unlikely(dopts->alignment != 0)) {
@@ -1966,6 +2415,7 @@ imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
szind_t ind_large;
size_t bumped_usize = usize;
+ dopts->alignment = prof_sample_align(dopts->alignment);
if (usize <= SC_SMALL_MAXCLASS) {
assert(((dopts->alignment == 0) ?
sz_s2u(SC_LARGE_MINCLASS) :
@@ -1982,6 +2432,7 @@ imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
} else {
ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind);
}
+ assert(prof_sample_aligned(ret));
return ret;
}
@@ -2035,16 +2486,14 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
/* Filled in by compute_size_with_overflow below. */
size_t size = 0;
/*
- * For unaligned allocations, we need only ind. For aligned
- * allocations, or in case of stats or profiling we need usize.
- *
- * These are actually dead stores, in that their values are reset before
- * any branch on their value is taken. Sometimes though, it's
- * convenient to pass them as arguments before this point. To avoid
- * undefined behavior then, we initialize them with dummy stores.
+ * The zero initialization for ind is actually dead store, in that its
+ * value is reset before any branch on its value is taken. Sometimes
+ * though, it's convenient to pass it as arguments before this point.
+ * To avoid undefined behavior then, we initialize it with dummy stores.
*/
szind_t ind = 0;
- size_t usize = 0;
+ /* usize will always be properly initialized. */
+ size_t usize;
/* Reentrancy is only checked on slow path. */
int8_t reentrancy_level;
@@ -2061,31 +2510,12 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
}
/* This is the beginning of the "core" algorithm. */
-
- if (dopts->alignment == 0) {
- ind = sz_size2index(size);
- if (unlikely(ind >= SC_NSIZES)) {
- goto label_oom;
- }
- if (config_stats || (config_prof && opt_prof) || sopts->usize) {
- usize = sz_index2size(ind);
- dopts->usize = usize;
- assert(usize > 0 && usize
- <= SC_LARGE_MAXCLASS);
- }
- } else {
- if (sopts->bump_empty_aligned_alloc) {
- if (unlikely(size == 0)) {
- size = 1;
- }
- }
- usize = sz_sa2u(size, dopts->alignment);
- dopts->usize = usize;
- if (unlikely(usize == 0
- || usize > SC_LARGE_MAXCLASS)) {
- goto label_oom;
- }
+ dopts->zero = zero_get(dopts->zero, sopts->slow);
+ if (aligned_usize_get(size, dopts->alignment, &usize, &ind,
+ sopts->bump_empty_aligned_alloc)) {
+ goto label_oom;
}
+ dopts->usize = usize;
/* Validate the user input. */
if (sopts->assert_nonempty_alloc) {
assert (size != 0);
@@ -2111,26 +2541,25 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
dopts->arena_ind = 0;
}
+ /*
+ * If dopts->alignment > 0, then ind is still 0, but usize was computed
+ * in the previous if statement. Down the positive alignment path,
+ * imalloc_no_sample and imalloc_sample will ignore ind.
+ */
+
/* If profiling is on, get our profiling context. */
if (config_prof && opt_prof) {
- /*
- * Note that if we're going down this path, usize must have been
- * initialized in the previous if statement.
- */
- prof_tctx_t *tctx = prof_alloc_prep(
- tsd, usize, prof_active_get_unlocked(), true);
+ bool prof_active = prof_active_get_unlocked();
+ bool sample_event = te_prof_sample_event_lookahead(tsd, usize);
+ prof_tctx_t *tctx = prof_alloc_prep(tsd, prof_active,
+ sample_event);
- alloc_ctx_t alloc_ctx;
+ emap_alloc_ctx_t alloc_ctx;
if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
- alloc_ctx.slab = (usize
- <= SC_SMALL_MAXCLASS);
+ alloc_ctx.slab = (usize <= SC_SMALL_MAXCLASS);
allocation = imalloc_no_sample(
sopts, dopts, tsd, usize, usize, ind);
} else if ((uintptr_t)tctx > (uintptr_t)1U) {
- /*
- * Note that ind might still be 0 here. This is fine;
- * imalloc_sample ignores ind if dopts->alignment > 0.
- */
allocation = imalloc_sample(
sopts, dopts, tsd, usize, ind);
alloc_ctx.slab = false;
@@ -2139,17 +2568,12 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
}
if (unlikely(allocation == NULL)) {
- prof_alloc_rollback(tsd, tctx, true);
+ prof_alloc_rollback(tsd, tctx);
goto label_oom;
}
- prof_malloc(tsd_tsdn(tsd), allocation, usize, &alloc_ctx, tctx);
+ prof_malloc(tsd, allocation, size, usize, &alloc_ctx, tctx);
} else {
- /*
- * If dopts->alignment > 0, then ind is still 0, but usize was
- * computed in the previous if statement. Down the positive
- * alignment path, imalloc_no_sample ignores ind and size
- * (relying only on usize).
- */
+ assert(!opt_prof);
allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize,
ind);
if (unlikely(allocation == NULL)) {
@@ -2161,12 +2585,17 @@ imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
* Allocation has been done at this point. We still have some
* post-allocation work to do though.
*/
+
+ thread_alloc_event(tsd, usize);
+
assert(dopts->alignment == 0
|| ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0));
- if (config_stats) {
- assert(usize == isalloc(tsd_tsdn(tsd), allocation));
- *tsd_thread_allocatedp_get(tsd) += usize;
+ assert(usize == isalloc(tsd_tsdn(tsd), allocation));
+
+ if (config_fill && sopts->slow && !dopts->zero
+ && unlikely(opt_junk_alloc)) {
+ junk_alloc_callback(allocation, usize);
}
if (sopts->slow) {
@@ -2277,7 +2706,11 @@ malloc_default(size_t size) {
static_opts_t sopts;
dynamic_opts_t dopts;
- LOG("core.malloc.entry", "size: %zu", size);
+ /*
+ * This variant has logging hook on exit but not on entry. It's callled
+ * only by je_malloc, below, which emits the entry one for us (and, if
+ * it calls us, does so only via tail call).
+ */
static_opts_init(&sopts);
dynamic_opts_init(&dopts);
@@ -2310,86 +2743,11 @@ malloc_default(size_t size) {
* Begin malloc(3)-compatible functions.
*/
-/*
- * malloc() fastpath.
- *
- * Fastpath assumes size <= SC_LOOKUP_MAXCLASS, and that we hit
- * tcache. If either of these is false, we tail-call to the slowpath,
- * malloc_default(). Tail-calling is used to avoid any caller-saved
- * registers.
- *
- * fastpath supports ticker and profiling, both of which will also
- * tail-call to the slowpath if they fire.
- */
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
je_malloc(size_t size) {
- LOG("core.malloc.entry", "size: %zu", size);
-
- if (tsd_get_allocates() && unlikely(!malloc_initialized())) {
- return malloc_default(size);
- }
-
- tsd_t *tsd = tsd_get(false);
- if (unlikely(!tsd || !tsd_fast(tsd) || (size > SC_LOOKUP_MAXCLASS))) {
- return malloc_default(size);
- }
-
- tcache_t *tcache = tsd_tcachep_get(tsd);
-
- if (unlikely(ticker_trytick(&tcache->gc_ticker))) {
- return malloc_default(size);
- }
-
- szind_t ind = sz_size2index_lookup(size);
- size_t usize;
- if (config_stats || config_prof) {
- usize = sz_index2size(ind);
- }
- /* Fast path relies on size being a bin. I.e. SC_LOOKUP_MAXCLASS < SC_SMALL_MAXCLASS */
- assert(ind < SC_NBINS);
- assert(size <= SC_SMALL_MAXCLASS);
-
- if (config_prof) {
- int64_t bytes_until_sample = tsd_bytes_until_sample_get(tsd);
- bytes_until_sample -= usize;
- tsd_bytes_until_sample_set(tsd, bytes_until_sample);
-
- if (unlikely(bytes_until_sample < 0)) {
- /*
- * Avoid a prof_active check on the fastpath.
- * If prof_active is false, set bytes_until_sample to
- * a large value. If prof_active is set to true,
- * bytes_until_sample will be reset.
- */
- if (!prof_active) {
- tsd_bytes_until_sample_set(tsd, SSIZE_MAX);
- }
- return malloc_default(size);
- }
- }
-
- cache_bin_t *bin = tcache_small_bin_get(tcache, ind);
- bool tcache_success;
- void* ret = cache_bin_alloc_easy(bin, &tcache_success);
-
- if (tcache_success) {
- if (config_stats) {
- *tsd_thread_allocatedp_get(tsd) += usize;
- bin->tstats.nrequests++;
- }
- if (config_prof) {
- tcache->prof_accumbytes += usize;
- }
-
- LOG("core.malloc.exit", "result: %p", ret);
-
- /* Fastpath success */
- return ret;
- }
-
- return malloc_default(size);
+ return imalloc_fastpath(size, &malloc_default);
}
JEMALLOC_EXPORT int JEMALLOC_NOTHROW
@@ -2506,56 +2864,6 @@ je_calloc(size_t num, size_t size) {
return ret;
}
-static void *
-irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
- prof_tctx_t *tctx, hook_ralloc_args_t *hook_args) {
- void *p;
-
- if (tctx == NULL) {
- return NULL;
- }
- if (usize <= SC_SMALL_MAXCLASS) {
- p = iralloc(tsd, old_ptr, old_usize,
- SC_LARGE_MINCLASS, 0, false, hook_args);
- if (p == NULL) {
- return NULL;
- }
- arena_prof_promote(tsd_tsdn(tsd), p, usize);
- } else {
- p = iralloc(tsd, old_ptr, old_usize, usize, 0, false,
- hook_args);
- }
-
- return p;
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
- alloc_ctx_t *alloc_ctx, hook_ralloc_args_t *hook_args) {
- void *p;
- bool prof_active;
- prof_tctx_t *old_tctx, *tctx;
-
- prof_active = prof_active_get_unlocked();
- old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
- tctx = prof_alloc_prep(tsd, usize, prof_active, true);
- if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
- p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx,
- hook_args);
- } else {
- p = iralloc(tsd, old_ptr, old_usize, usize, 0, false,
- hook_args);
- }
- if (unlikely(p == NULL)) {
- prof_alloc_rollback(tsd, tctx, true);
- return NULL;
- }
- prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize,
- old_tctx);
-
- return p;
-}
-
JEMALLOC_ALWAYS_INLINE void
ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
if (!slow_path) {
@@ -2569,30 +2877,50 @@ ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
- alloc_ctx_t alloc_ctx;
- rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
- rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
+ emap_alloc_ctx_t alloc_ctx;
+ emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr,
+ &alloc_ctx);
assert(alloc_ctx.szind != SC_NSIZES);
- size_t usize;
+ size_t usize = sz_index2size(alloc_ctx.szind);
if (config_prof && opt_prof) {
- usize = sz_index2size(alloc_ctx.szind);
prof_free(tsd, ptr, usize, &alloc_ctx);
- } else if (config_stats) {
- usize = sz_index2size(alloc_ctx.szind);
- }
- if (config_stats) {
- *tsd_thread_deallocatedp_get(tsd) += usize;
}
if (likely(!slow_path)) {
idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
false);
} else {
+ if (config_fill && slow_path && opt_junk_free) {
+ junk_free_callback(ptr, usize);
+ }
idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
true);
}
+ thread_dalloc_event(tsd, usize);
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+maybe_check_alloc_ctx(tsd_t *tsd, void *ptr, emap_alloc_ctx_t *alloc_ctx) {
+ if (config_opt_size_checks) {
+ emap_alloc_ctx_t dbg_ctx;
+ emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr,
+ &dbg_ctx);
+ if (alloc_ctx->szind != dbg_ctx.szind) {
+ safety_check_fail_sized_dealloc(
+ /* current_dealloc */ true, ptr,
+ /* true_size */ sz_size2index(dbg_ctx.szind),
+ /* input_size */ sz_size2index(alloc_ctx->szind));
+ return true;
+ }
+ if (alloc_ctx->slab != dbg_ctx.slab) {
+ safety_check_fail(
+ "Internal heap corruption detected: "
+ "mismatch in slab bit");
+ return true;
+ }
+ }
+ return false;
}
JEMALLOC_ALWAYS_INLINE void
@@ -2608,147 +2936,63 @@ isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
- alloc_ctx_t alloc_ctx, *ctx;
- if (!config_cache_oblivious && ((uintptr_t)ptr & PAGE_MASK) != 0) {
- /*
- * When cache_oblivious is disabled and ptr is not page aligned,
- * the allocation was not sampled -- usize can be used to
- * determine szind directly.
- */
+ emap_alloc_ctx_t alloc_ctx;
+ if (!config_prof) {
alloc_ctx.szind = sz_size2index(usize);
- alloc_ctx.slab = true;
- ctx = &alloc_ctx;
- if (config_debug) {
- alloc_ctx_t dbg_ctx;
- rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
- rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree,
- rtree_ctx, (uintptr_t)ptr, true, &dbg_ctx.szind,
- &dbg_ctx.slab);
- assert(dbg_ctx.szind == alloc_ctx.szind);
- assert(dbg_ctx.slab == alloc_ctx.slab);
- }
- } else if (config_prof && opt_prof) {
- rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
- rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
- assert(alloc_ctx.szind == sz_size2index(usize));
- ctx = &alloc_ctx;
+ alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
} else {
- ctx = NULL;
+ if (likely(!prof_sample_aligned(ptr))) {
+ /*
+ * When the ptr is not page aligned, it was not sampled.
+ * usize can be trusted to determine szind and slab.
+ */
+ alloc_ctx.szind = sz_size2index(usize);
+ alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
+ } else if (opt_prof) {
+ emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global,
+ ptr, &alloc_ctx);
+
+ if (config_opt_safety_checks) {
+ /* Small alloc may have !slab (sampled). */
+ if (unlikely(alloc_ctx.szind !=
+ sz_size2index(usize))) {
+ safety_check_fail_sized_dealloc(
+ /* current_dealloc */ true, ptr,
+ /* true_size */ sz_index2size(
+ alloc_ctx.szind),
+ /* input_size */ usize);
+ }
+ }
+ } else {
+ alloc_ctx.szind = sz_size2index(usize);
+ alloc_ctx.slab = (alloc_ctx.szind < SC_NBINS);
+ }
+ }
+ bool fail = maybe_check_alloc_ctx(tsd, ptr, &alloc_ctx);
+ if (fail) {
+ /*
+ * This is a heap corruption bug. In real life we'll crash; for
+ * the unit test we just want to avoid breaking anything too
+ * badly to get a test result out. Let's leak instead of trying
+ * to free.
+ */
+ return;
}
if (config_prof && opt_prof) {
- prof_free(tsd, ptr, usize, ctx);
- }
- if (config_stats) {
- *tsd_thread_deallocatedp_get(tsd) += usize;
+ prof_free(tsd, ptr, usize, &alloc_ctx);
}
-
if (likely(!slow_path)) {
- isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, false);
- } else {
- isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true);
- }
-}
-
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-void JEMALLOC_NOTHROW *
-JEMALLOC_ALLOC_SIZE(2)
-je_realloc(void *ptr, size_t arg_size) {
- void *ret;
- tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
- size_t usize JEMALLOC_CC_SILENCE_INIT(0);
- size_t old_usize = 0;
- size_t size = arg_size;
-
- LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size);
-
- if (unlikely(size == 0)) {
- size = 1;
- }
-
- if (likely(ptr != NULL)) {
- assert(malloc_initialized() || IS_INITIALIZER);
- tsd_t *tsd = tsd_fetch();
-
- check_entry_exit_locking(tsd_tsdn(tsd));
-
-
- hook_ralloc_args_t hook_args = {true, {(uintptr_t)ptr,
- (uintptr_t)arg_size, 0, 0}};
-
- alloc_ctx_t alloc_ctx;
- rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
- rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
- assert(alloc_ctx.szind != SC_NSIZES);
- old_usize = sz_index2size(alloc_ctx.szind);
- assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
- if (config_prof && opt_prof) {
- usize = sz_s2u(size);
- if (unlikely(usize == 0
- || usize > SC_LARGE_MAXCLASS)) {
- ret = NULL;
- } else {
- ret = irealloc_prof(tsd, ptr, old_usize, usize,
- &alloc_ctx, &hook_args);
- }
- } else {
- if (config_stats) {
- usize = sz_s2u(size);
- }
- ret = iralloc(tsd, ptr, old_usize, size, 0, false,
- &hook_args);
- }
- tsdn = tsd_tsdn(tsd);
+ isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, &alloc_ctx,
+ false);
} else {
- /* realloc(NULL, size) is equivalent to malloc(size). */
- static_opts_t sopts;
- dynamic_opts_t dopts;
-
- static_opts_init(&sopts);
- dynamic_opts_init(&dopts);
-
- sopts.null_out_result_on_error = true;
- sopts.set_errno_on_error = true;
- sopts.oom_string =
- "<jemalloc>: Error in realloc(): out of memory\n";
-
- dopts.result = &ret;
- dopts.num_items = 1;
- dopts.item_size = size;
-
- imalloc(&sopts, &dopts);
- if (sopts.slow) {
- uintptr_t args[3] = {(uintptr_t)ptr, arg_size};
- hook_invoke_alloc(hook_alloc_realloc, ret,
- (uintptr_t)ret, args);
- }
-
- return ret;
- }
-
- if (unlikely(ret == NULL)) {
- if (config_xmalloc && unlikely(opt_xmalloc)) {
- malloc_write("<jemalloc>: Error in realloc(): "
- "out of memory\n");
- abort();
+ if (config_fill && slow_path && opt_junk_free) {
+ junk_free_callback(ptr, usize);
}
- set_errno(ENOMEM);
- }
- if (config_stats && likely(ret != NULL)) {
- tsd_t *tsd;
-
- assert(usize == isalloc(tsdn, ret));
- tsd = tsdn_tsd(tsdn);
- *tsd_thread_allocatedp_get(tsd) += usize;
- *tsd_thread_deallocatedp_get(tsd) += old_usize;
+ isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, &alloc_ctx,
+ true);
}
- UTRACE(ptr, size, ret);
- check_entry_exit_locking(tsdn);
-
- LOG("core.realloc.exit", "result: %p", ret);
- return ret;
+ thread_dalloc_event(tsd, usize);
}
JEMALLOC_NOINLINE
@@ -2767,79 +3011,149 @@ free_default(void *ptr) {
tsd_t *tsd = tsd_fetch_min();
check_entry_exit_locking(tsd_tsdn(tsd));
- tcache_t *tcache;
if (likely(tsd_fast(tsd))) {
- tsd_assert_fast(tsd);
- /* Unconditionally get tcache ptr on fast path. */
- tcache = tsd_tcachep_get(tsd);
- ifree(tsd, ptr, tcache, false);
+ tcache_t *tcache = tcache_get_from_ind(tsd,
+ TCACHE_IND_AUTOMATIC, /* slow */ false,
+ /* is_alloc */ false);
+ ifree(tsd, ptr, tcache, /* slow */ false);
} else {
- if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
- tcache = tcache_get(tsd);
- } else {
- tcache = NULL;
- }
+ tcache_t *tcache = tcache_get_from_ind(tsd,
+ TCACHE_IND_AUTOMATIC, /* slow */ true,
+ /* is_alloc */ false);
uintptr_t args_raw[3] = {(uintptr_t)ptr};
hook_invoke_dalloc(hook_dalloc_free, ptr, args_raw);
- ifree(tsd, ptr, tcache, true);
+ ifree(tsd, ptr, tcache, /* slow */ true);
}
+
check_entry_exit_locking(tsd_tsdn(tsd));
}
}
+JEMALLOC_ALWAYS_INLINE bool
+free_fastpath_nonfast_aligned(void *ptr, bool check_prof) {
+ /*
+ * free_fastpath do not handle two uncommon cases: 1) sampled profiled
+ * objects and 2) sampled junk & stash for use-after-free detection.
+ * Both have special alignments which are used to escape the fastpath.
+ *
+ * prof_sample is page-aligned, which covers the UAF check when both
+ * are enabled (the assertion below). Avoiding redundant checks since
+ * this is on the fastpath -- at most one runtime branch from this.
+ */
+ if (config_debug && cache_bin_nonfast_aligned(ptr)) {
+ assert(prof_sample_aligned(ptr));
+ }
+
+ if (config_prof && check_prof) {
+ /* When prof is enabled, the prof_sample alignment is enough. */
+ if (prof_sample_aligned(ptr)) {
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ if (config_uaf_detection) {
+ if (cache_bin_nonfast_aligned(ptr)) {
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ return false;
+}
+
+/* Returns whether or not the free attempt was successful. */
JEMALLOC_ALWAYS_INLINE
bool free_fastpath(void *ptr, size_t size, bool size_hint) {
tsd_t *tsd = tsd_get(false);
- if (unlikely(!tsd || !tsd_fast(tsd))) {
+ /* The branch gets optimized away unless tsd_get_allocates(). */
+ if (unlikely(tsd == NULL)) {
return false;
}
-
- tcache_t *tcache = tsd_tcachep_get(tsd);
-
- alloc_ctx_t alloc_ctx;
/*
- * If !config_cache_oblivious, we can check PAGE alignment to
- * detect sampled objects. Otherwise addresses are
- * randomized, and we have to look it up in the rtree anyway.
- * See also isfree().
+ * The tsd_fast() / initialized checks are folded into the branch
+ * testing (deallocated_after >= threshold) later in this function.
+ * The threshold will be set to 0 when !tsd_fast.
*/
- if (!size_hint || config_cache_oblivious) {
- rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
- bool res = rtree_szind_slab_read_fast(tsd_tsdn(tsd), &extents_rtree,
- rtree_ctx, (uintptr_t)ptr,
- &alloc_ctx.szind, &alloc_ctx.slab);
+ assert(tsd_fast(tsd) ||
+ *tsd_thread_deallocated_next_event_fastp_get_unsafe(tsd) == 0);
+
+ emap_alloc_ctx_t alloc_ctx;
+ if (!size_hint) {
+ bool err = emap_alloc_ctx_try_lookup_fast(tsd,
+ &arena_emap_global, ptr, &alloc_ctx);
/* Note: profiled objects will have alloc_ctx.slab set */
- if (!res || !alloc_ctx.slab) {
+ if (unlikely(err || !alloc_ctx.slab ||
+ free_fastpath_nonfast_aligned(ptr,
+ /* check_prof */ false))) {
return false;
}
assert(alloc_ctx.szind != SC_NSIZES);
} else {
/*
- * Check for both sizes that are too large, and for sampled objects.
- * Sampled objects are always page-aligned. The sampled object check
- * will also check for null ptr.
+ * Check for both sizes that are too large, and for sampled /
+ * special aligned objects. The alignment check will also check
+ * for null ptr.
*/
- if (size > SC_LOOKUP_MAXCLASS || (((uintptr_t)ptr & PAGE_MASK) == 0)) {
+ if (unlikely(size > SC_LOOKUP_MAXCLASS ||
+ free_fastpath_nonfast_aligned(ptr,
+ /* check_prof */ true))) {
return false;
}
alloc_ctx.szind = sz_size2index_lookup(size);
+ /* Max lookup class must be small. */
+ assert(alloc_ctx.szind < SC_NBINS);
+ /* This is a dead store, except when opt size checking is on. */
+ alloc_ctx.slab = true;
}
+ /*
+ * Currently the fastpath only handles small sizes. The branch on
+ * SC_LOOKUP_MAXCLASS makes sure of it. This lets us avoid checking
+ * tcache szind upper limit (i.e. tcache_maxclass) as well.
+ */
+ assert(alloc_ctx.slab);
+
+ uint64_t deallocated, threshold;
+ te_free_fastpath_ctx(tsd, &deallocated, &threshold);
- if (unlikely(ticker_trytick(&tcache->gc_ticker))) {
+ size_t usize = sz_index2size(alloc_ctx.szind);
+ uint64_t deallocated_after = deallocated + usize;
+ /*
+ * Check for events and tsd non-nominal (fast_threshold will be set to
+ * 0) in a single branch. Note that this handles the uninitialized case
+ * as well (TSD init will be triggered on the non-fastpath). Therefore
+ * anything depends on a functional TSD (e.g. the alloc_ctx sanity check
+ * below) needs to be after this branch.
+ */
+ if (unlikely(deallocated_after >= threshold)) {
return false;
}
+ assert(tsd_fast(tsd));
+ bool fail = maybe_check_alloc_ctx(tsd, ptr, &alloc_ctx);
+ if (fail) {
+ /* See the comment in isfree. */
+ return true;
+ }
+
+ tcache_t *tcache = tcache_get_from_ind(tsd, TCACHE_IND_AUTOMATIC,
+ /* slow */ false, /* is_alloc */ false);
+ cache_bin_t *bin = &tcache->bins[alloc_ctx.szind];
+
+ /*
+ * If junking were enabled, this is where we would do it. It's not
+ * though, since we ensured above that we're on the fast path. Assert
+ * that to double-check.
+ */
+ assert(!opt_junk_free);
- cache_bin_t *bin = tcache_small_bin_get(tcache, alloc_ctx.szind);
- cache_bin_info_t *bin_info = &tcache_bin_info[alloc_ctx.szind];
- if (!cache_bin_dalloc_easy(bin, bin_info, ptr)) {
+ if (!cache_bin_dalloc_easy(bin, ptr)) {
return false;
}
- if (config_stats) {
- size_t usize = sz_index2size(alloc_ctx.szind);
- *tsd_thread_deallocatedp_get(tsd) += usize;
- }
+ *tsd_thread_deallocatedp_get(tsd) = deallocated_after;
return true;
}
@@ -2950,6 +3264,8 @@ je_valloc(size_t size) {
* passed an extra argument for the caller return address, which will be
* ignored.
*/
+#include <features.h> // defines __GLIBC__ if we are compiling against glibc
+
JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
@@ -2958,7 +3274,7 @@ JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
je_memalign;
# endif
-# ifdef CPU_COUNT
+# ifdef __GLIBC__
/*
* To enable static linking with glibc, the libc specific malloc interface must
* be implemented also, so none of glibc's malloc.o functions are added to the
@@ -3001,6 +3317,26 @@ int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign);
* Begin non-standard functions.
*/
+JEMALLOC_ALWAYS_INLINE unsigned
+mallocx_tcache_get(int flags) {
+ if (likely((flags & MALLOCX_TCACHE_MASK) == 0)) {
+ return TCACHE_IND_AUTOMATIC;
+ } else if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
+ return TCACHE_IND_NONE;
+ } else {
+ return MALLOCX_TCACHE_GET(flags);
+ }
+}
+
+JEMALLOC_ALWAYS_INLINE unsigned
+mallocx_arena_get(int flags) {
+ if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
+ return MALLOCX_ARENA_GET(flags);
+ } else {
+ return ARENA_IND_AUTOMATIC;
+ }
+}
+
#ifdef JEMALLOC_EXPERIMENTAL_SMALLOCX_API
#define JEMALLOC_SMALLOCX_CONCAT_HELPER(x, y) x ## y
@@ -3045,25 +3381,10 @@ JEMALLOC_SMALLOCX_CONCAT_HELPER2(je_smallocx_, JEMALLOC_VERSION_GID_IDENT)
dopts.num_items = 1;
dopts.item_size = size;
if (unlikely(flags != 0)) {
- if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) {
- dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
- }
-
+ dopts.alignment = MALLOCX_ALIGN_GET(flags);
dopts.zero = MALLOCX_ZERO_GET(flags);
-
- if ((flags & MALLOCX_TCACHE_MASK) != 0) {
- if ((flags & MALLOCX_TCACHE_MASK)
- == MALLOCX_TCACHE_NONE) {
- dopts.tcache_ind = TCACHE_IND_NONE;
- } else {
- dopts.tcache_ind = MALLOCX_TCACHE_GET(flags);
- }
- } else {
- dopts.tcache_ind = TCACHE_IND_AUTOMATIC;
- }
-
- if ((flags & MALLOCX_ARENA_MASK) != 0)
- dopts.arena_ind = MALLOCX_ARENA_GET(flags);
+ dopts.tcache_ind = mallocx_tcache_get(flags);
+ dopts.arena_ind = mallocx_arena_get(flags);
}
imalloc(&sopts, &dopts);
@@ -3098,25 +3419,10 @@ je_mallocx(size_t size, int flags) {
dopts.num_items = 1;
dopts.item_size = size;
if (unlikely(flags != 0)) {
- if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) {
- dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
- }
-
+ dopts.alignment = MALLOCX_ALIGN_GET(flags);
dopts.zero = MALLOCX_ZERO_GET(flags);
-
- if ((flags & MALLOCX_TCACHE_MASK) != 0) {
- if ((flags & MALLOCX_TCACHE_MASK)
- == MALLOCX_TCACHE_NONE) {
- dopts.tcache_ind = TCACHE_IND_NONE;
- } else {
- dopts.tcache_ind = MALLOCX_TCACHE_GET(flags);
- }
- } else {
- dopts.tcache_ind = TCACHE_IND_AUTOMATIC;
- }
-
- if ((flags & MALLOCX_ARENA_MASK) != 0)
- dopts.arena_ind = MALLOCX_ARENA_GET(flags);
+ dopts.tcache_ind = mallocx_tcache_get(flags);
+ dopts.arena_ind = mallocx_arena_get(flags);
}
imalloc(&sopts, &dopts);
@@ -3139,6 +3445,8 @@ irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize,
if (tctx == NULL) {
return NULL;
}
+
+ alignment = prof_sample_align(alignment);
if (usize <= SC_SMALL_MAXCLASS) {
p = iralloct(tsdn, old_ptr, old_usize,
SC_LARGE_MINCLASS, alignment, zero, tcache,
@@ -3151,66 +3459,48 @@ irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize,
p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero,
tcache, arena, hook_args);
}
+ assert(prof_sample_aligned(p));
return p;
}
JEMALLOC_ALWAYS_INLINE void *
irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
- size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
- arena_t *arena, alloc_ctx_t *alloc_ctx, hook_ralloc_args_t *hook_args) {
+ size_t alignment, size_t usize, bool zero, tcache_t *tcache,
+ arena_t *arena, emap_alloc_ctx_t *alloc_ctx,
+ hook_ralloc_args_t *hook_args) {
+ prof_info_t old_prof_info;
+ prof_info_get_and_reset_recent(tsd, old_ptr, alloc_ctx, &old_prof_info);
+ bool prof_active = prof_active_get_unlocked();
+ bool sample_event = te_prof_sample_event_lookahead(tsd, usize);
+ prof_tctx_t *tctx = prof_alloc_prep(tsd, prof_active, sample_event);
void *p;
- bool prof_active;
- prof_tctx_t *old_tctx, *tctx;
-
- prof_active = prof_active_get_unlocked();
- old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
- tctx = prof_alloc_prep(tsd, *usize, prof_active, false);
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize,
- *usize, alignment, zero, tcache, arena, tctx, hook_args);
+ usize, alignment, zero, tcache, arena, tctx, hook_args);
} else {
p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment,
zero, tcache, arena, hook_args);
}
if (unlikely(p == NULL)) {
- prof_alloc_rollback(tsd, tctx, false);
+ prof_alloc_rollback(tsd, tctx);
return NULL;
}
-
- if (p == old_ptr && alignment != 0) {
- /*
- * The allocation did not move, so it is possible that the size
- * class is smaller than would guarantee the requested
- * alignment, and that the alignment constraint was
- * serendipitously satisfied. Additionally, old_usize may not
- * be the same as the current usize because of in-place large
- * reallocation. Therefore, query the actual value of usize.
- */
- *usize = isalloc(tsd_tsdn(tsd), p);
- }
- prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr,
- old_usize, old_tctx);
+ assert(usize == isalloc(tsd_tsdn(tsd), p));
+ prof_realloc(tsd, p, size, usize, tctx, prof_active, old_ptr,
+ old_usize, &old_prof_info, sample_event);
return p;
}
-JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
-void JEMALLOC_NOTHROW *
-JEMALLOC_ALLOC_SIZE(2)
-je_rallocx(void *ptr, size_t size, int flags) {
+static void *
+do_rallocx(void *ptr, size_t size, int flags, bool is_realloc) {
void *p;
tsd_t *tsd;
size_t usize;
size_t old_usize;
size_t alignment = MALLOCX_ALIGN_GET(flags);
- bool zero = flags & MALLOCX_ZERO;
arena_t *arena;
- tcache_t *tcache;
-
- LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
- size, flags);
-
assert(ptr != NULL);
assert(size != 0);
@@ -3218,44 +3508,31 @@ je_rallocx(void *ptr, size_t size, int flags) {
tsd = tsd_fetch();
check_entry_exit_locking(tsd_tsdn(tsd));
- if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
- unsigned arena_ind = MALLOCX_ARENA_GET(flags);
- arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
- if (unlikely(arena == NULL)) {
- goto label_oom;
- }
- } else {
- arena = NULL;
- }
+ bool zero = zero_get(MALLOCX_ZERO_GET(flags), /* slow */ true);
- if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
- if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
- tcache = NULL;
- } else {
- tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
- }
- } else {
- tcache = tcache_get(tsd);
+ unsigned arena_ind = mallocx_arena_get(flags);
+ if (arena_get_from_ind(tsd, arena_ind, &arena)) {
+ goto label_oom;
}
- alloc_ctx_t alloc_ctx;
- rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
- rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
+ unsigned tcache_ind = mallocx_tcache_get(flags);
+ tcache_t *tcache = tcache_get_from_ind(tsd, tcache_ind,
+ /* slow */ true, /* is_alloc */ true);
+
+ emap_alloc_ctx_t alloc_ctx;
+ emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr,
+ &alloc_ctx);
assert(alloc_ctx.szind != SC_NSIZES);
old_usize = sz_index2size(alloc_ctx.szind);
assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
+ if (aligned_usize_get(size, alignment, &usize, NULL, false)) {
+ goto label_oom;
+ }
- hook_ralloc_args_t hook_args = {false, {(uintptr_t)ptr, size, flags,
- 0}};
+ hook_ralloc_args_t hook_args = {is_realloc, {(uintptr_t)ptr, size,
+ flags, 0}};
if (config_prof && opt_prof) {
- usize = (alignment == 0) ?
- sz_s2u(size) : sz_sa2u(size, alignment);
- if (unlikely(usize == 0
- || usize > SC_LARGE_MAXCLASS)) {
- goto label_oom;
- }
- p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
+ p = irallocx_prof(tsd, ptr, old_usize, size, alignment, usize,
zero, tcache, arena, &alloc_ctx, &hook_args);
if (unlikely(p == NULL)) {
goto label_oom;
@@ -3266,20 +3543,22 @@ je_rallocx(void *ptr, size_t size, int flags) {
if (unlikely(p == NULL)) {
goto label_oom;
}
- if (config_stats) {
- usize = isalloc(tsd_tsdn(tsd), p);
- }
+ assert(usize == isalloc(tsd_tsdn(tsd), p));
}
assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
+ thread_alloc_event(tsd, usize);
+ thread_dalloc_event(tsd, old_usize);
- if (config_stats) {
- *tsd_thread_allocatedp_get(tsd) += usize;
- *tsd_thread_deallocatedp_get(tsd) += old_usize;
- }
UTRACE(ptr, size, p);
check_entry_exit_locking(tsd_tsdn(tsd));
- LOG("core.rallocx.exit", "result: %p", p);
+ if (config_fill && unlikely(opt_junk_alloc) && usize > old_usize
+ && !zero) {
+ size_t excess_len = usize - old_usize;
+ void *excess_start = (void *)((uintptr_t)p + old_usize);
+ junk_alloc_callback(excess_start, excess_len);
+ }
+
return p;
label_oom:
if (config_xmalloc && unlikely(opt_xmalloc)) {
@@ -3289,10 +3568,103 @@ label_oom:
UTRACE(ptr, size, 0);
check_entry_exit_locking(tsd_tsdn(tsd));
- LOG("core.rallocx.exit", "result: %p", NULL);
return NULL;
}
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ALLOC_SIZE(2)
+je_rallocx(void *ptr, size_t size, int flags) {
+ LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
+ size, flags);
+ void *ret = do_rallocx(ptr, size, flags, false);
+ LOG("core.rallocx.exit", "result: %p", ret);
+ return ret;
+}
+
+static void *
+do_realloc_nonnull_zero(void *ptr) {
+ if (config_stats) {
+ atomic_fetch_add_zu(&zero_realloc_count, 1, ATOMIC_RELAXED);
+ }
+ if (opt_zero_realloc_action == zero_realloc_action_alloc) {
+ /*
+ * The user might have gotten an alloc setting while expecting a
+ * free setting. If that's the case, we at least try to
+ * reduce the harm, and turn off the tcache while allocating, so
+ * that we'll get a true first fit.
+ */
+ return do_rallocx(ptr, 1, MALLOCX_TCACHE_NONE, true);
+ } else if (opt_zero_realloc_action == zero_realloc_action_free) {
+ UTRACE(ptr, 0, 0);
+ tsd_t *tsd = tsd_fetch();
+ check_entry_exit_locking(tsd_tsdn(tsd));
+
+ tcache_t *tcache = tcache_get_from_ind(tsd,
+ TCACHE_IND_AUTOMATIC, /* slow */ true,
+ /* is_alloc */ false);
+ uintptr_t args[3] = {(uintptr_t)ptr, 0};
+ hook_invoke_dalloc(hook_dalloc_realloc, ptr, args);
+ ifree(tsd, ptr, tcache, true);
+
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ return NULL;
+ } else {
+ safety_check_fail("Called realloc(non-null-ptr, 0) with "
+ "zero_realloc:abort set\n");
+ /* In real code, this will never run; the safety check failure
+ * will call abort. In the unit test, we just want to bail out
+ * without corrupting internal state that the test needs to
+ * finish.
+ */
+ return NULL;
+ }
+}
+
+JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
+void JEMALLOC_NOTHROW *
+JEMALLOC_ALLOC_SIZE(2)
+je_realloc(void *ptr, size_t size) {
+ LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size);
+
+ if (likely(ptr != NULL && size != 0)) {
+ void *ret = do_rallocx(ptr, size, 0, true);
+ LOG("core.realloc.exit", "result: %p", ret);
+ return ret;
+ } else if (ptr != NULL && size == 0) {
+ void *ret = do_realloc_nonnull_zero(ptr);
+ LOG("core.realloc.exit", "result: %p", ret);
+ return ret;
+ } else {
+ /* realloc(NULL, size) is equivalent to malloc(size). */
+ void *ret;
+
+ static_opts_t sopts;
+ dynamic_opts_t dopts;
+
+ static_opts_init(&sopts);
+ dynamic_opts_init(&dopts);
+
+ sopts.null_out_result_on_error = true;
+ sopts.set_errno_on_error = true;
+ sopts.oom_string =
+ "<jemalloc>: Error in realloc(): out of memory\n";
+
+ dopts.result = &ret;
+ dopts.num_items = 1;
+ dopts.item_size = size;
+
+ imalloc(&sopts, &dopts);
+ if (sopts.slow) {
+ uintptr_t args[3] = {(uintptr_t)ptr, size};
+ hook_invoke_alloc(hook_alloc_realloc, ret,
+ (uintptr_t)ret, args);
+ }
+ LOG("core.realloc.exit", "result: %p", ret);
+ return ret;
+ }
+}
+
JEMALLOC_ALWAYS_INLINE size_t
ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
size_t extra, size_t alignment, bool zero) {
@@ -3309,51 +3681,46 @@ ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
static size_t
ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) {
- size_t usize;
-
- if (tctx == NULL) {
+ /* Sampled allocation needs to be page aligned. */
+ if (tctx == NULL || !prof_sample_aligned(ptr)) {
return old_usize;
}
- usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment,
- zero);
- return usize;
+ return ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment,
+ zero);
}
JEMALLOC_ALWAYS_INLINE size_t
ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
- size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) {
- size_t usize_max, usize;
- bool prof_active;
- prof_tctx_t *old_tctx, *tctx;
+ size_t extra, size_t alignment, bool zero, emap_alloc_ctx_t *alloc_ctx) {
+ /*
+ * old_prof_info is only used for asserting that the profiling info
+ * isn't changed by the ixalloc() call.
+ */
+ prof_info_t old_prof_info;
+ prof_info_get(tsd, ptr, alloc_ctx, &old_prof_info);
- prof_active = prof_active_get_unlocked();
- old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx);
/*
* usize isn't knowable before ixalloc() returns when extra is non-zero.
* Therefore, compute its maximum possible value and use that in
* prof_alloc_prep() to decide whether to capture a backtrace.
* prof_realloc() will use the actual usize to decide whether to sample.
*/
- if (alignment == 0) {
- usize_max = sz_s2u(size+extra);
- assert(usize_max > 0
- && usize_max <= SC_LARGE_MAXCLASS);
- } else {
- usize_max = sz_sa2u(size+extra, alignment);
- if (unlikely(usize_max == 0
- || usize_max > SC_LARGE_MAXCLASS)) {
- /*
- * usize_max is out of range, and chances are that
- * allocation will fail, but use the maximum possible
- * value and carry on with prof_alloc_prep(), just in
- * case allocation succeeds.
- */
- usize_max = SC_LARGE_MAXCLASS;
- }
+ size_t usize_max;
+ if (aligned_usize_get(size + extra, alignment, &usize_max, NULL,
+ false)) {
+ /*
+ * usize_max is out of range, and chances are that allocation
+ * will fail, but use the maximum possible value and carry on
+ * with prof_alloc_prep(), just in case allocation succeeds.
+ */
+ usize_max = SC_LARGE_MAXCLASS;
}
- tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
+ bool prof_active = prof_active_get_unlocked();
+ bool sample_event = te_prof_sample_event_lookahead(tsd, usize_max);
+ prof_tctx_t *tctx = prof_alloc_prep(tsd, prof_active, sample_event);
+ size_t usize;
if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize,
size, extra, alignment, zero, tctx);
@@ -3361,13 +3728,28 @@ ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
extra, alignment, zero);
}
+
+ /*
+ * At this point we can still safely get the original profiling
+ * information associated with the ptr, because (a) the edata_t object
+ * associated with the ptr still lives and (b) the profiling info
+ * fields are not touched. "(a)" is asserted in the outer je_xallocx()
+ * function, and "(b)" is indirectly verified below by checking that
+ * the alloc_tctx field is unchanged.
+ */
+ prof_info_t prof_info;
if (usize == old_usize) {
- prof_alloc_rollback(tsd, tctx, false);
- return usize;
+ prof_info_get(tsd, ptr, alloc_ctx, &prof_info);
+ prof_alloc_rollback(tsd, tctx);
+ } else {
+ prof_info_get_and_reset_recent(tsd, ptr, alloc_ctx, &prof_info);
+ assert(usize <= usize_max);
+ sample_event = te_prof_sample_event_lookahead(tsd, usize);
+ prof_realloc(tsd, ptr, size, usize, tctx, prof_active, ptr,
+ old_usize, &prof_info, sample_event);
}
- prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize,
- old_tctx);
+ assert(old_prof_info.alloc_tctx == prof_info.alloc_tctx);
return usize;
}
@@ -3376,7 +3758,7 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
tsd_t *tsd;
size_t usize, old_usize;
size_t alignment = MALLOCX_ALIGN_GET(flags);
- bool zero = flags & MALLOCX_ZERO;
+ bool zero = zero_get(MALLOCX_ZERO_GET(flags), /* slow */ true);
LOG("core.xallocx.entry", "ptr: %p, size: %zu, extra: %zu, "
"flags: %d", ptr, size, extra, flags);
@@ -3388,10 +3770,17 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
tsd = tsd_fetch();
check_entry_exit_locking(tsd_tsdn(tsd));
- alloc_ctx_t alloc_ctx;
- rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
- rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
- (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
+ /*
+ * old_edata is only for verifying that xallocx() keeps the edata_t
+ * object associated with the ptr (though the content of the edata_t
+ * object can be changed).
+ */
+ edata_t *old_edata = emap_edata_lookup(tsd_tsdn(tsd),
+ &arena_emap_global, ptr);
+
+ emap_alloc_ctx_t alloc_ctx;
+ emap_alloc_ctx_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr,
+ &alloc_ctx);
assert(alloc_ctx.szind != SC_NSIZES);
old_usize = sz_index2size(alloc_ctx.szind);
assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
@@ -3419,13 +3808,25 @@ je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
extra, alignment, zero);
}
+
+ /*
+ * xallocx() should keep using the same edata_t object (though its
+ * content can be changed).
+ */
+ assert(emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global, ptr)
+ == old_edata);
+
if (unlikely(usize == old_usize)) {
goto label_not_resized;
}
+ thread_alloc_event(tsd, usize);
+ thread_dalloc_event(tsd, old_usize);
- if (config_stats) {
- *tsd_thread_allocatedp_get(tsd) += usize;
- *tsd_thread_deallocatedp_get(tsd) += old_usize;
+ if (config_fill && unlikely(opt_junk_alloc) && usize > old_usize &&
+ !zero) {
+ size_t excess_len = usize - old_usize;
+ void *excess_start = (void *)((uintptr_t)ptr + old_usize);
+ junk_alloc_callback(excess_start, excess_len);
}
label_not_resized:
if (unlikely(!tsd_fast(tsd))) {
@@ -3475,31 +3876,13 @@ je_dallocx(void *ptr, int flags) {
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
- tsd_t *tsd = tsd_fetch();
+ tsd_t *tsd = tsd_fetch_min();
bool fast = tsd_fast(tsd);
check_entry_exit_locking(tsd_tsdn(tsd));
- tcache_t *tcache;
- if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
- /* Not allowed to be reentrant and specify a custom tcache. */
- assert(tsd_reentrancy_level_get(tsd) == 0);
- if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
- tcache = NULL;
- } else {
- tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
- }
- } else {
- if (likely(fast)) {
- tcache = tsd_tcachep_get(tsd);
- assert(tcache == tcache_get(tsd));
- } else {
- if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
- tcache = tcache_get(tsd);
- } else {
- tcache = NULL;
- }
- }
- }
+ unsigned tcache_ind = mallocx_tcache_get(flags);
+ tcache_t *tcache = tcache_get_from_ind(tsd, tcache_ind, !fast,
+ /* is_alloc */ false);
UTRACE(ptr, 0, 0);
if (likely(fast)) {
@@ -3518,13 +3901,9 @@ je_dallocx(void *ptr, int flags) {
JEMALLOC_ALWAYS_INLINE size_t
inallocx(tsdn_t *tsdn, size_t size, int flags) {
check_entry_exit_locking(tsdn);
-
size_t usize;
- if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) {
- usize = sz_s2u(size);
- } else {
- usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
- }
+ /* In case of out of range, let the user see it rather than fail. */
+ aligned_usize_get(size, MALLOCX_ALIGN_GET(flags), &usize, NULL, false);
check_entry_exit_locking(tsdn);
return usize;
}
@@ -3534,33 +3913,14 @@ sdallocx_default(void *ptr, size_t size, int flags) {
assert(ptr != NULL);
assert(malloc_initialized() || IS_INITIALIZER);
- tsd_t *tsd = tsd_fetch();
+ tsd_t *tsd = tsd_fetch_min();
bool fast = tsd_fast(tsd);
size_t usize = inallocx(tsd_tsdn(tsd), size, flags);
- assert(usize == isalloc(tsd_tsdn(tsd), ptr));
check_entry_exit_locking(tsd_tsdn(tsd));
- tcache_t *tcache;
- if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
- /* Not allowed to be reentrant and specify a custom tcache. */
- assert(tsd_reentrancy_level_get(tsd) == 0);
- if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
- tcache = NULL;
- } else {
- tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
- }
- } else {
- if (likely(fast)) {
- tcache = tsd_tcachep_get(tsd);
- assert(tcache == tcache_get(tsd));
- } else {
- if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
- tcache = tcache_get(tsd);
- } else {
- tcache = NULL;
- }
- }
- }
+ unsigned tcache_ind = mallocx_tcache_get(flags);
+ tcache_t *tcache = tcache_get_from_ind(tsd, tcache_ind, !fast,
+ /* is_alloc */ false);
UTRACE(ptr, 0, 0);
if (likely(fast)) {
@@ -3572,7 +3932,6 @@ sdallocx_default(void *ptr, size_t size, int flags) {
isfree(tsd, ptr, usize, tcache, true);
}
check_entry_exit_locking(tsd_tsdn(tsd));
-
}
JEMALLOC_EXPORT void JEMALLOC_NOTHROW
@@ -3580,7 +3939,7 @@ je_sdallocx(void *ptr, size_t size, int flags) {
LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
size, flags);
- if (flags !=0 || !free_fastpath(ptr, size, true)) {
+ if (flags != 0 || !free_fastpath(ptr, size, true)) {
sdallocx_default(ptr, size, flags);
}
@@ -3689,6 +4048,7 @@ je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
return ret;
}
+#define STATS_PRINT_BUFSIZE 65536
JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
const char *opts) {
@@ -3698,23 +4058,30 @@ je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
tsdn = tsdn_fetch();
check_entry_exit_locking(tsdn);
- stats_print(write_cb, cbopaque, opts);
+
+ if (config_debug) {
+ stats_print(write_cb, cbopaque, opts);
+ } else {
+ buf_writer_t buf_writer;
+ buf_writer_init(tsdn, &buf_writer, write_cb, cbopaque, NULL,
+ STATS_PRINT_BUFSIZE);
+ stats_print(buf_writer_cb, &buf_writer, opts);
+ buf_writer_terminate(tsdn, &buf_writer);
+ }
+
check_entry_exit_locking(tsdn);
LOG("core.malloc_stats_print.exit", "");
}
+#undef STATS_PRINT_BUFSIZE
-JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
-je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
- size_t ret;
- tsdn_t *tsdn;
-
- LOG("core.malloc_usable_size.entry", "ptr: %p", ptr);
-
+JEMALLOC_ALWAYS_INLINE size_t
+je_malloc_usable_size_impl(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
assert(malloc_initialized() || IS_INITIALIZER);
- tsdn = tsdn_fetch();
+ tsdn_t *tsdn = tsdn_fetch();
check_entry_exit_locking(tsdn);
+ size_t ret;
if (unlikely(ptr == NULL)) {
ret = 0;
} else {
@@ -3725,12 +4092,211 @@ je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
ret = isalloc(tsdn, ptr);
}
}
-
check_entry_exit_locking(tsdn);
+
+ return ret;
+}
+
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
+je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
+ LOG("core.malloc_usable_size.entry", "ptr: %p", ptr);
+
+ size_t ret = je_malloc_usable_size_impl(ptr);
+
LOG("core.malloc_usable_size.exit", "result: %zu", ret);
return ret;
}
+#ifdef JEMALLOC_HAVE_MALLOC_SIZE
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
+je_malloc_size(const void *ptr) {
+ LOG("core.malloc_size.entry", "ptr: %p", ptr);
+
+ size_t ret = je_malloc_usable_size_impl(ptr);
+
+ LOG("core.malloc_size.exit", "result: %zu", ret);
+ return ret;
+}
+#endif
+
+static void
+batch_alloc_prof_sample_assert(tsd_t *tsd, size_t batch, size_t usize) {
+ assert(config_prof && opt_prof);
+ bool prof_sample_event = te_prof_sample_event_lookahead(tsd,
+ batch * usize);
+ assert(!prof_sample_event);
+ size_t surplus;
+ prof_sample_event = te_prof_sample_event_lookahead_surplus(tsd,
+ (batch + 1) * usize, &surplus);
+ assert(prof_sample_event);
+ assert(surplus < usize);
+}
+
+size_t
+batch_alloc(void **ptrs, size_t num, size_t size, int flags) {
+ LOG("core.batch_alloc.entry",
+ "ptrs: %p, num: %zu, size: %zu, flags: %d", ptrs, num, size, flags);
+
+ tsd_t *tsd = tsd_fetch();
+ check_entry_exit_locking(tsd_tsdn(tsd));
+
+ size_t filled = 0;
+
+ if (unlikely(tsd == NULL || tsd_reentrancy_level_get(tsd) > 0)) {
+ goto label_done;
+ }
+
+ size_t alignment = MALLOCX_ALIGN_GET(flags);
+ size_t usize;
+ if (aligned_usize_get(size, alignment, &usize, NULL, false)) {
+ goto label_done;
+ }
+ szind_t ind = sz_size2index(usize);
+ bool zero = zero_get(MALLOCX_ZERO_GET(flags), /* slow */ true);
+
+ /*
+ * The cache bin and arena will be lazily initialized; it's hard to
+ * know in advance whether each of them needs to be initialized.
+ */
+ cache_bin_t *bin = NULL;
+ arena_t *arena = NULL;
+
+ size_t nregs = 0;
+ if (likely(ind < SC_NBINS)) {
+ nregs = bin_infos[ind].nregs;
+ assert(nregs > 0);
+ }
+
+ while (filled < num) {
+ size_t batch = num - filled;
+ size_t surplus = SIZE_MAX; /* Dead store. */
+ bool prof_sample_event = config_prof && opt_prof
+ && prof_active_get_unlocked()
+ && te_prof_sample_event_lookahead_surplus(tsd,
+ batch * usize, &surplus);
+
+ if (prof_sample_event) {
+ /*
+ * Adjust so that the batch does not trigger prof
+ * sampling.
+ */
+ batch -= surplus / usize + 1;
+ batch_alloc_prof_sample_assert(tsd, batch, usize);
+ }
+
+ size_t progress = 0;
+
+ if (likely(ind < SC_NBINS) && batch >= nregs) {
+ if (arena == NULL) {
+ unsigned arena_ind = mallocx_arena_get(flags);
+ if (arena_get_from_ind(tsd, arena_ind,
+ &arena)) {
+ goto label_done;
+ }
+ if (arena == NULL) {
+ arena = arena_choose(tsd, NULL);
+ }
+ if (unlikely(arena == NULL)) {
+ goto label_done;
+ }
+ }
+ size_t arena_batch = batch - batch % nregs;
+ size_t n = arena_fill_small_fresh(tsd_tsdn(tsd), arena,
+ ind, ptrs + filled, arena_batch, zero);
+ progress += n;
+ filled += n;
+ }
+
+ if (likely(ind < nhbins) && progress < batch) {
+ if (bin == NULL) {
+ unsigned tcache_ind = mallocx_tcache_get(flags);
+ tcache_t *tcache = tcache_get_from_ind(tsd,
+ tcache_ind, /* slow */ true,
+ /* is_alloc */ true);
+ if (tcache != NULL) {
+ bin = &tcache->bins[ind];
+ }
+ }
+ /*
+ * If we don't have a tcache bin, we don't want to
+ * immediately give up, because there's the possibility
+ * that the user explicitly requested to bypass the
+ * tcache, or that the user explicitly turned off the
+ * tcache; in such cases, we go through the slow path,
+ * i.e. the mallocx() call at the end of the while loop.
+ */
+ if (bin != NULL) {
+ size_t bin_batch = batch - progress;
+ /*
+ * n can be less than bin_batch, meaning that
+ * the cache bin does not have enough memory.
+ * In such cases, we rely on the slow path,
+ * i.e. the mallocx() call at the end of the
+ * while loop, to fill in the cache, and in the
+ * next iteration of the while loop, the tcache
+ * will contain a lot of memory, and we can
+ * harvest them here. Compared to the
+ * alternative approach where we directly go to
+ * the arena bins here, the overhead of our
+ * current approach should usually be minimal,
+ * since we never try to fetch more memory than
+ * what a slab contains via the tcache. An
+ * additional benefit is that the tcache will
+ * not be empty for the next allocation request.
+ */
+ size_t n = cache_bin_alloc_batch(bin, bin_batch,
+ ptrs + filled);
+ if (config_stats) {
+ bin->tstats.nrequests += n;
+ }
+ if (zero) {
+ for (size_t i = 0; i < n; ++i) {
+ memset(ptrs[filled + i], 0,
+ usize);
+ }
+ }
+ if (config_prof && opt_prof
+ && unlikely(ind >= SC_NBINS)) {
+ for (size_t i = 0; i < n; ++i) {
+ prof_tctx_reset_sampled(tsd,
+ ptrs[filled + i]);
+ }
+ }
+ progress += n;
+ filled += n;
+ }
+ }
+
+ /*
+ * For thread events other than prof sampling, trigger them as
+ * if there's a single allocation of size (n * usize). This is
+ * fine because:
+ * (a) these events do not alter the allocation itself, and
+ * (b) it's possible that some event would have been triggered
+ * multiple times, instead of only once, if the allocations
+ * were handled individually, but it would do no harm (or
+ * even be beneficial) to coalesce the triggerings.
+ */
+ thread_alloc_event(tsd, progress * usize);
+
+ if (progress < batch || prof_sample_event) {
+ void *p = je_mallocx(size, flags);
+ if (p == NULL) { /* OOM */
+ break;
+ }
+ if (progress == batch) {
+ assert(prof_sampled(tsd, p));
+ }
+ ptrs[filled++] = p;
+ }
+ }
+
+label_done:
+ check_entry_exit_locking(tsd_tsdn(tsd));
+ LOG("core.batch_alloc.exit", "result: %zu", filled);
+ return filled;
+}
+
/*
* End non-standard functions.
*/
@@ -3894,7 +4460,7 @@ _malloc_prefork(void)
background_thread_prefork1(tsd_tsdn(tsd));
}
/* Break arena prefork into stages to preserve lock order. */
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < 9; i++) {
for (j = 0; j < narenas; j++) {
if ((arena = arena_get(tsd_tsdn(tsd), j, false)) !=
NULL) {
@@ -3923,12 +4489,17 @@ _malloc_prefork(void)
case 7:
arena_prefork7(tsd_tsdn(tsd), arena);
break;
+ case 8:
+ arena_prefork8(tsd_tsdn(tsd), arena);
+ break;
default: not_reached();
}
}
}
+
}
prof_prefork1(tsd_tsdn(tsd));
+ stats_prefork(tsd_tsdn(tsd));
tsd_prefork(tsd);
}
@@ -3956,6 +4527,7 @@ _malloc_postfork(void)
witness_postfork_parent(tsd_witness_tsdp_get(tsd));
/* Release all mutexes, now that fork() has completed. */
+ stats_postfork_parent(tsd_tsdn(tsd));
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
arena_t *arena;
@@ -3985,6 +4557,7 @@ jemalloc_postfork_child(void) {
witness_postfork_child(tsd_witness_tsdp_get(tsd));
/* Release all mutexes, now that fork() has completed. */
+ stats_postfork_child(tsd_tsdn(tsd));
for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
arena_t *arena;
diff --git a/contrib/jemalloc/src/jemalloc_cpp.cpp b/contrib/jemalloc/src/jemalloc_cpp.cpp
new file mode 100644
index 000000000000..d606686e0c97
--- /dev/null
+++ b/contrib/jemalloc/src/jemalloc_cpp.cpp
@@ -0,0 +1,399 @@
+<<<<<<< HEAD
+#include <mutex>
+#include <new>
+
+#define JEMALLOC_CPP_CPP_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+// All operators in this file are exported.
+
+// Possibly alias hidden versions of malloc and sdallocx to avoid an extra plt
+// thunk?
+//
+// extern __typeof (sdallocx) sdallocx_int
+// __attribute ((alias ("sdallocx"),
+// visibility ("hidden")));
+//
+// ... but it needs to work with jemalloc namespaces.
+
+void *operator new(std::size_t size);
+void *operator new[](std::size_t size);
+void *operator new(std::size_t size, const std::nothrow_t &) noexcept;
+void *operator new[](std::size_t size, const std::nothrow_t &) noexcept;
+void operator delete(void *ptr) noexcept;
+void operator delete[](void *ptr) noexcept;
+void operator delete(void *ptr, const std::nothrow_t &) noexcept;
+void operator delete[](void *ptr, const std::nothrow_t &) noexcept;
+
+#if __cpp_sized_deallocation >= 201309
+/* C++14's sized-delete operators. */
+void operator delete(void *ptr, std::size_t size) noexcept;
+void operator delete[](void *ptr, std::size_t size) noexcept;
+#endif
+
+#if __cpp_aligned_new >= 201606
+/* C++17's over-aligned operators. */
+void *operator new(std::size_t size, std::align_val_t);
+void *operator new(std::size_t size, std::align_val_t, const std::nothrow_t &) noexcept;
+void *operator new[](std::size_t size, std::align_val_t);
+void *operator new[](std::size_t size, std::align_val_t, const std::nothrow_t &) noexcept;
+void operator delete(void* ptr, std::align_val_t) noexcept;
+void operator delete(void* ptr, std::align_val_t, const std::nothrow_t &) noexcept;
+void operator delete(void* ptr, std::size_t size, std::align_val_t al) noexcept;
+void operator delete[](void* ptr, std::align_val_t) noexcept;
+void operator delete[](void* ptr, std::align_val_t, const std::nothrow_t &) noexcept;
+void operator delete[](void* ptr, std::size_t size, std::align_val_t al) noexcept;
+#endif
+
+JEMALLOC_NOINLINE
+static void *
+handleOOM(std::size_t size, bool nothrow) {
+ if (opt_experimental_infallible_new) {
+ safety_check_fail("<jemalloc>: Allocation failed and "
+ "opt.experimental_infallible_new is true. Aborting.\n");
+ return nullptr;
+ }
+
+ void *ptr = nullptr;
+
+ while (ptr == nullptr) {
+ std::new_handler handler;
+ // GCC-4.8 and clang 4.0 do not have std::get_new_handler.
+ {
+ static std::mutex mtx;
+ std::lock_guard<std::mutex> lock(mtx);
+
+ handler = std::set_new_handler(nullptr);
+ std::set_new_handler(handler);
+ }
+ if (handler == nullptr)
+ break;
+
+ try {
+ handler();
+ } catch (const std::bad_alloc &) {
+ break;
+ }
+
+ ptr = je_malloc(size);
+ }
+
+ if (ptr == nullptr && !nothrow)
+ std::__throw_bad_alloc();
+ return ptr;
+}
+
+template <bool IsNoExcept>
+JEMALLOC_NOINLINE
+static void *
+fallback_impl(std::size_t size) noexcept(IsNoExcept) {
+ void *ptr = malloc_default(size);
+ if (likely(ptr != nullptr)) {
+ return ptr;
+ }
+ return handleOOM(size, IsNoExcept);
+}
+
+template <bool IsNoExcept>
+JEMALLOC_ALWAYS_INLINE
+void *
+newImpl(std::size_t size) noexcept(IsNoExcept) {
+ return imalloc_fastpath(size, &fallback_impl<IsNoExcept>);
+}
+
+void *
+operator new(std::size_t size) {
+ return newImpl<false>(size);
+}
+
+void *
+operator new[](std::size_t size) {
+ return newImpl<false>(size);
+}
+
+void *
+operator new(std::size_t size, const std::nothrow_t &) noexcept {
+ return newImpl<true>(size);
+}
+
+void *
+operator new[](std::size_t size, const std::nothrow_t &) noexcept {
+ return newImpl<true>(size);
+}
+
+#if __cpp_aligned_new >= 201606
+
+template <bool IsNoExcept>
+JEMALLOC_ALWAYS_INLINE
+void *
+alignedNewImpl(std::size_t size, std::align_val_t alignment) noexcept(IsNoExcept) {
+ void *ptr = je_aligned_alloc(static_cast<std::size_t>(alignment), size);
+ if (likely(ptr != nullptr)) {
+ return ptr;
+ }
+
+ return handleOOM(size, IsNoExcept);
+}
+
+void *
+operator new(std::size_t size, std::align_val_t alignment) {
+ return alignedNewImpl<false>(size, alignment);
+}
+
+void *
+operator new[](std::size_t size, std::align_val_t alignment) {
+ return alignedNewImpl<false>(size, alignment);
+}
+
+void *
+operator new(std::size_t size, std::align_val_t alignment, const std::nothrow_t &) noexcept {
+ return alignedNewImpl<true>(size, alignment);
+}
+
+void *
+operator new[](std::size_t size, std::align_val_t alignment, const std::nothrow_t &) noexcept {
+ return alignedNewImpl<true>(size, alignment);
+}
+
+#endif // __cpp_aligned_new
+
+void
+operator delete(void *ptr) noexcept {
+ je_free(ptr);
+}
+
+void
+operator delete[](void *ptr) noexcept {
+ je_free(ptr);
+}
+
+void
+operator delete(void *ptr, const std::nothrow_t &) noexcept {
+ je_free(ptr);
+}
+
+void operator delete[](void *ptr, const std::nothrow_t &) noexcept {
+ je_free(ptr);
+}
+
+#if __cpp_sized_deallocation >= 201309
+
+JEMALLOC_ALWAYS_INLINE
+void
+sizedDeleteImpl(void* ptr, std::size_t size) noexcept {
+ if (unlikely(ptr == nullptr)) {
+ return;
+ }
+ je_sdallocx_noflags(ptr, size);
+}
+
+void
+operator delete(void *ptr, std::size_t size) noexcept {
+ sizedDeleteImpl(ptr, size);
+}
+
+void
+operator delete[](void *ptr, std::size_t size) noexcept {
+ sizedDeleteImpl(ptr, size);
+}
+
+#endif // __cpp_sized_deallocation
+
+#if __cpp_aligned_new >= 201606
+
+JEMALLOC_ALWAYS_INLINE
+void
+alignedSizedDeleteImpl(void* ptr, std::size_t size, std::align_val_t alignment) noexcept {
+ if (config_debug) {
+ assert(((size_t)alignment & ((size_t)alignment - 1)) == 0);
+ }
+ if (unlikely(ptr == nullptr)) {
+ return;
+ }
+ je_sdallocx(ptr, size, MALLOCX_ALIGN(alignment));
+}
+
+void
+operator delete(void* ptr, std::align_val_t) noexcept {
+ je_free(ptr);
+}
+
+void
+operator delete[](void* ptr, std::align_val_t) noexcept {
+ je_free(ptr);
+}
+
+void
+operator delete(void* ptr, std::align_val_t, const std::nothrow_t&) noexcept {
+ je_free(ptr);
+}
+
+void
+operator delete[](void* ptr, std::align_val_t, const std::nothrow_t&) noexcept {
+ je_free(ptr);
+}
+
+void
+operator delete(void* ptr, std::size_t size, std::align_val_t alignment) noexcept {
+ alignedSizedDeleteImpl(ptr, size, alignment);
+}
+
+void
+operator delete[](void* ptr, std::size_t size, std::align_val_t alignment) noexcept {
+ alignedSizedDeleteImpl(ptr, size, alignment);
+}
+
+#endif // __cpp_aligned_new
+||||||| dec341af7695
+=======
+#include <mutex>
+#include <new>
+
+#define JEMALLOC_CPP_CPP_
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#ifdef __cplusplus
+}
+#endif
+
+// All operators in this file are exported.
+
+// Possibly alias hidden versions of malloc and sdallocx to avoid an extra plt
+// thunk?
+//
+// extern __typeof (sdallocx) sdallocx_int
+// __attribute ((alias ("sdallocx"),
+// visibility ("hidden")));
+//
+// ... but it needs to work with jemalloc namespaces.
+
+void *operator new(std::size_t size);
+void *operator new[](std::size_t size);
+void *operator new(std::size_t size, const std::nothrow_t &) noexcept;
+void *operator new[](std::size_t size, const std::nothrow_t &) noexcept;
+void operator delete(void *ptr) noexcept;
+void operator delete[](void *ptr) noexcept;
+void operator delete(void *ptr, const std::nothrow_t &) noexcept;
+void operator delete[](void *ptr, const std::nothrow_t &) noexcept;
+
+#if __cpp_sized_deallocation >= 201309
+/* C++14's sized-delete operators. */
+void operator delete(void *ptr, std::size_t size) noexcept;
+void operator delete[](void *ptr, std::size_t size) noexcept;
+#endif
+
+JEMALLOC_NOINLINE
+static void *
+handleOOM(std::size_t size, bool nothrow) {
+ void *ptr = nullptr;
+
+ while (ptr == nullptr) {
+ std::new_handler handler;
+ // GCC-4.8 and clang 4.0 do not have std::get_new_handler.
+ {
+ static std::mutex mtx;
+ std::lock_guard<std::mutex> lock(mtx);
+
+ handler = std::set_new_handler(nullptr);
+ std::set_new_handler(handler);
+ }
+ if (handler == nullptr)
+ break;
+
+ try {
+ handler();
+ } catch (const std::bad_alloc &) {
+ break;
+ }
+
+ ptr = je_malloc(size);
+ }
+
+ if (ptr == nullptr && !nothrow)
+ std::__throw_bad_alloc();
+ return ptr;
+}
+
+template <bool IsNoExcept>
+JEMALLOC_ALWAYS_INLINE
+void *
+newImpl(std::size_t size) noexcept(IsNoExcept) {
+ void *ptr = je_malloc(size);
+ if (likely(ptr != nullptr))
+ return ptr;
+
+ return handleOOM(size, IsNoExcept);
+}
+
+void *
+operator new(std::size_t size) {
+ return newImpl<false>(size);
+}
+
+void *
+operator new[](std::size_t size) {
+ return newImpl<false>(size);
+}
+
+void *
+operator new(std::size_t size, const std::nothrow_t &) noexcept {
+ return newImpl<true>(size);
+}
+
+void *
+operator new[](std::size_t size, const std::nothrow_t &) noexcept {
+ return newImpl<true>(size);
+}
+
+void
+operator delete(void *ptr) noexcept {
+ je_free(ptr);
+}
+
+void
+operator delete[](void *ptr) noexcept {
+ je_free(ptr);
+}
+
+void
+operator delete(void *ptr, const std::nothrow_t &) noexcept {
+ je_free(ptr);
+}
+
+void operator delete[](void *ptr, const std::nothrow_t &) noexcept {
+ je_free(ptr);
+}
+
+#if __cpp_sized_deallocation >= 201309
+
+void
+operator delete(void *ptr, std::size_t size) noexcept {
+ if (unlikely(ptr == nullptr)) {
+ return;
+ }
+ je_sdallocx_noflags(ptr, size);
+}
+
+void operator delete[](void *ptr, std::size_t size) noexcept {
+ if (unlikely(ptr == nullptr)) {
+ return;
+ }
+ je_sdallocx_noflags(ptr, size);
+}
+
+#endif // __cpp_sized_deallocation
+>>>>>>> main
diff --git a/contrib/jemalloc/src/large.c b/contrib/jemalloc/src/large.c
index 8e7a781d3300..5fc4bf584c45 100644
--- a/contrib/jemalloc/src/large.c
+++ b/contrib/jemalloc/src/large.c
@@ -1,11 +1,11 @@
-#define JEMALLOC_LARGE_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/emap.h"
#include "jemalloc/internal/extent_mmap.h"
#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/rtree.h"
+#include "jemalloc/internal/prof_recent.h"
#include "jemalloc/internal/util.h"
/******************************************************************************/
@@ -21,8 +21,7 @@ void *
large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
bool zero) {
size_t ausize;
- extent_t *extent;
- bool is_zeroed;
+ edata_t *edata;
UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
assert(!tsdn_null(tsdn) || arena != NULL);
@@ -32,163 +31,80 @@ large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
return NULL;
}
- if (config_fill && unlikely(opt_zero)) {
- zero = true;
- }
- /*
- * Copy zero into is_zeroed and pass the copy when allocating the
- * extent, so that it is possible to make correct junk/zero fill
- * decisions below, even if is_zeroed ends up true when zero is false.
- */
- is_zeroed = zero;
if (likely(!tsdn_null(tsdn))) {
arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, usize);
}
- if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn,
- arena, usize, alignment, &is_zeroed)) == NULL) {
+ if (unlikely(arena == NULL) || (edata = arena_extent_alloc_large(tsdn,
+ arena, usize, alignment, zero)) == NULL) {
return NULL;
}
/* See comments in arena_bin_slabs_full_insert(). */
if (!arena_is_auto(arena)) {
- /* Insert extent into large. */
+ /* Insert edata into large. */
malloc_mutex_lock(tsdn, &arena->large_mtx);
- extent_list_append(&arena->large, extent);
+ edata_list_active_append(&arena->large, edata);
malloc_mutex_unlock(tsdn, &arena->large_mtx);
}
- if (config_prof && arena_prof_accum(tsdn, arena, usize)) {
- prof_idump(tsdn);
- }
-
- if (zero) {
- assert(is_zeroed);
- } else if (config_fill && unlikely(opt_junk_alloc)) {
- memset(extent_addr_get(extent), JEMALLOC_ALLOC_JUNK,
- extent_usize_get(extent));
- }
arena_decay_tick(tsdn, arena);
- return extent_addr_get(extent);
+ return edata_addr_get(edata);
}
-static void
-large_dalloc_junk_impl(void *ptr, size_t size) {
- memset(ptr, JEMALLOC_FREE_JUNK, size);
-}
-large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk = large_dalloc_junk_impl;
-
-static void
-large_dalloc_maybe_junk_impl(void *ptr, size_t size) {
- if (config_fill && have_dss && unlikely(opt_junk_free)) {
- /*
- * Only bother junk filling if the extent isn't about to be
- * unmapped.
- */
- if (opt_retain || (have_dss && extent_in_dss(ptr))) {
- large_dalloc_junk(ptr, size);
- }
- }
-}
-large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk =
- large_dalloc_maybe_junk_impl;
-
static bool
-large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) {
- arena_t *arena = extent_arena_get(extent);
- size_t oldusize = extent_usize_get(extent);
- extent_hooks_t *extent_hooks = extent_hooks_get(arena);
- size_t diff = extent_size_get(extent) - (usize + sz_large_pad);
+large_ralloc_no_move_shrink(tsdn_t *tsdn, edata_t *edata, size_t usize) {
+ arena_t *arena = arena_get_from_edata(edata);
+ ehooks_t *ehooks = arena_get_ehooks(arena);
+ size_t old_size = edata_size_get(edata);
+ size_t old_usize = edata_usize_get(edata);
- assert(oldusize > usize);
+ assert(old_usize > usize);
- if (extent_hooks->split == NULL) {
+ if (ehooks_split_will_fail(ehooks)) {
return true;
}
- /* Split excess pages. */
- if (diff != 0) {
- extent_t *trail = extent_split_wrapper(tsdn, arena,
- &extent_hooks, extent, usize + sz_large_pad,
- sz_size2index(usize), false, diff, SC_NSIZES, false);
- if (trail == NULL) {
- return true;
- }
-
- if (config_fill && unlikely(opt_junk_free)) {
- large_dalloc_maybe_junk(extent_addr_get(trail),
- extent_size_get(trail));
- }
-
- arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, trail);
+ bool deferred_work_generated = false;
+ bool err = pa_shrink(tsdn, &arena->pa_shard, edata, old_size,
+ usize + sz_large_pad, sz_size2index(usize),
+ &deferred_work_generated);
+ if (err) {
+ return true;
}
-
- arena_extent_ralloc_large_shrink(tsdn, arena, extent, oldusize);
+ if (deferred_work_generated) {
+ arena_handle_deferred_work(tsdn, arena);
+ }
+ arena_extent_ralloc_large_shrink(tsdn, arena, edata, old_usize);
return false;
}
static bool
-large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
+large_ralloc_no_move_expand(tsdn_t *tsdn, edata_t *edata, size_t usize,
bool zero) {
- arena_t *arena = extent_arena_get(extent);
- size_t oldusize = extent_usize_get(extent);
- extent_hooks_t *extent_hooks = extent_hooks_get(arena);
- size_t trailsize = usize - oldusize;
+ arena_t *arena = arena_get_from_edata(edata);
- if (extent_hooks->merge == NULL) {
- return true;
- }
+ size_t old_size = edata_size_get(edata);
+ size_t old_usize = edata_usize_get(edata);
+ size_t new_size = usize + sz_large_pad;
- if (config_fill && unlikely(opt_zero)) {
- zero = true;
- }
- /*
- * Copy zero into is_zeroed_trail and pass the copy when allocating the
- * extent, so that it is possible to make correct junk/zero fill
- * decisions below, even if is_zeroed_trail ends up true when zero is
- * false.
- */
- bool is_zeroed_trail = zero;
- bool commit = true;
- extent_t *trail;
- bool new_mapping;
- if ((trail = extents_alloc(tsdn, arena, &extent_hooks,
- &arena->extents_dirty, extent_past_get(extent), trailsize, 0,
- CACHELINE, false, SC_NSIZES, &is_zeroed_trail, &commit)) != NULL
- || (trail = extents_alloc(tsdn, arena, &extent_hooks,
- &arena->extents_muzzy, extent_past_get(extent), trailsize, 0,
- CACHELINE, false, SC_NSIZES, &is_zeroed_trail, &commit)) != NULL) {
- if (config_stats) {
- new_mapping = false;
- }
- } else {
- if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks,
- extent_past_get(extent), trailsize, 0, CACHELINE, false,
- SC_NSIZES, &is_zeroed_trail, &commit)) == NULL) {
- return true;
- }
- if (config_stats) {
- new_mapping = true;
- }
- }
+ szind_t szind = sz_size2index(usize);
- if (extent_merge_wrapper(tsdn, arena, &extent_hooks, extent, trail)) {
- extent_dalloc_wrapper(tsdn, arena, &extent_hooks, trail);
- return true;
+ bool deferred_work_generated = false;
+ bool err = pa_expand(tsdn, &arena->pa_shard, edata, old_size, new_size,
+ szind, zero, &deferred_work_generated);
+
+ if (deferred_work_generated) {
+ arena_handle_deferred_work(tsdn, arena);
}
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
- szind_t szind = sz_size2index(usize);
- extent_szind_set(extent, szind);
- rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
- (uintptr_t)extent_addr_get(extent), szind, false);
- if (config_stats && new_mapping) {
- arena_stats_mapped_add(tsdn, &arena->stats, trailsize);
+ if (err) {
+ return true;
}
if (zero) {
- if (config_cache_oblivious) {
+ if (opt_cache_oblivious) {
+ assert(sz_large_pad == PAGE);
/*
* Zero the trailing bytes of the original allocation's
* last page, since they are in an indeterminate state.
@@ -197,28 +113,23 @@ large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize,
* of CACHELINE in [0 .. PAGE).
*/
void *zbase = (void *)
- ((uintptr_t)extent_addr_get(extent) + oldusize);
+ ((uintptr_t)edata_addr_get(edata) + old_usize);
void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
PAGE));
size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
assert(nzero > 0);
memset(zbase, 0, nzero);
}
- assert(is_zeroed_trail);
- } else if (config_fill && unlikely(opt_junk_alloc)) {
- memset((void *)((uintptr_t)extent_addr_get(extent) + oldusize),
- JEMALLOC_ALLOC_JUNK, usize - oldusize);
}
-
- arena_extent_ralloc_large_expand(tsdn, arena, extent, oldusize);
+ arena_extent_ralloc_large_expand(tsdn, arena, edata, old_usize);
return false;
}
bool
-large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
+large_ralloc_no_move(tsdn_t *tsdn, edata_t *edata, size_t usize_min,
size_t usize_max, bool zero) {
- size_t oldusize = extent_usize_get(extent);
+ size_t oldusize = edata_usize_get(edata);
/* The following should have been caught by callers. */
assert(usize_min > 0 && usize_max <= SC_LARGE_MAXCLASS);
@@ -228,16 +139,15 @@ large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
if (usize_max > oldusize) {
/* Attempt to expand the allocation in-place. */
- if (!large_ralloc_no_move_expand(tsdn, extent, usize_max,
+ if (!large_ralloc_no_move_expand(tsdn, edata, usize_max,
zero)) {
- arena_decay_tick(tsdn, extent_arena_get(extent));
+ arena_decay_tick(tsdn, arena_get_from_edata(edata));
return false;
}
/* Try again, this time with usize_min. */
if (usize_min < usize_max && usize_min > oldusize &&
- large_ralloc_no_move_expand(tsdn, extent, usize_min,
- zero)) {
- arena_decay_tick(tsdn, extent_arena_get(extent));
+ large_ralloc_no_move_expand(tsdn, edata, usize_min, zero)) {
+ arena_decay_tick(tsdn, arena_get_from_edata(edata));
return false;
}
}
@@ -247,14 +157,14 @@ large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min,
* the new size.
*/
if (oldusize >= usize_min && oldusize <= usize_max) {
- arena_decay_tick(tsdn, extent_arena_get(extent));
+ arena_decay_tick(tsdn, arena_get_from_edata(edata));
return false;
}
/* Attempt to shrink the allocation in-place. */
if (oldusize > usize_max) {
- if (!large_ralloc_no_move_shrink(tsdn, extent, usize_max)) {
- arena_decay_tick(tsdn, extent_arena_get(extent));
+ if (!large_ralloc_no_move_shrink(tsdn, edata, usize_max)) {
+ arena_decay_tick(tsdn, arena_get_from_edata(edata));
return false;
}
}
@@ -274,9 +184,9 @@ void *
large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
size_t alignment, bool zero, tcache_t *tcache,
hook_ralloc_args_t *hook_args) {
- extent_t *extent = iealloc(tsdn, ptr);
+ edata_t *edata = emap_edata_lookup(tsdn, &arena_emap_global, ptr);
- size_t oldusize = extent_usize_get(extent);
+ size_t oldusize = edata_usize_get(edata);
/* The following should have been caught by callers. */
assert(usize > 0 && usize <= SC_LARGE_MAXCLASS);
/* Both allocation sizes must be large to avoid a move. */
@@ -284,11 +194,11 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
&& usize >= SC_LARGE_MINCLASS);
/* Try to avoid moving the allocation. */
- if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) {
+ if (!large_ralloc_no_move(tsdn, edata, usize, usize, zero)) {
hook_invoke_expand(hook_args->is_realloc
? hook_expand_realloc : hook_expand_rallocx, ptr, oldusize,
usize, (uintptr_t)ptr, hook_args->args);
- return extent_addr_get(extent);
+ return edata_addr_get(edata);
}
/*
@@ -309,87 +219,104 @@ large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize,
? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args);
size_t copysize = (usize < oldusize) ? usize : oldusize;
- memcpy(ret, extent_addr_get(extent), copysize);
- isdalloct(tsdn, extent_addr_get(extent), oldusize, tcache, NULL, true);
+ memcpy(ret, edata_addr_get(edata), copysize);
+ isdalloct(tsdn, edata_addr_get(edata), oldusize, tcache, NULL, true);
return ret;
}
/*
- * junked_locked indicates whether the extent's data have been junk-filled, and
- * whether the arena's large_mtx is currently held.
+ * locked indicates whether the arena's large_mtx is currently held.
*/
static void
-large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent,
- bool junked_locked) {
- if (!junked_locked) {
+large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata,
+ bool locked) {
+ if (!locked) {
/* See comments in arena_bin_slabs_full_insert(). */
if (!arena_is_auto(arena)) {
malloc_mutex_lock(tsdn, &arena->large_mtx);
- extent_list_remove(&arena->large, extent);
+ edata_list_active_remove(&arena->large, edata);
malloc_mutex_unlock(tsdn, &arena->large_mtx);
}
- large_dalloc_maybe_junk(extent_addr_get(extent),
- extent_usize_get(extent));
} else {
/* Only hold the large_mtx if necessary. */
if (!arena_is_auto(arena)) {
malloc_mutex_assert_owner(tsdn, &arena->large_mtx);
- extent_list_remove(&arena->large, extent);
+ edata_list_active_remove(&arena->large, edata);
}
}
- arena_extent_dalloc_large_prep(tsdn, arena, extent);
+ arena_extent_dalloc_large_prep(tsdn, arena, edata);
}
static void
-large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
- extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
- arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, extent);
+large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, edata_t *edata) {
+ bool deferred_work_generated = false;
+ pa_dalloc(tsdn, &arena->pa_shard, edata, &deferred_work_generated);
+ if (deferred_work_generated) {
+ arena_handle_deferred_work(tsdn, arena);
+ }
}
void
-large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent) {
- large_dalloc_prep_impl(tsdn, extent_arena_get(extent), extent, true);
+large_dalloc_prep_locked(tsdn_t *tsdn, edata_t *edata) {
+ large_dalloc_prep_impl(tsdn, arena_get_from_edata(edata), edata, true);
}
void
-large_dalloc_finish(tsdn_t *tsdn, extent_t *extent) {
- large_dalloc_finish_impl(tsdn, extent_arena_get(extent), extent);
+large_dalloc_finish(tsdn_t *tsdn, edata_t *edata) {
+ large_dalloc_finish_impl(tsdn, arena_get_from_edata(edata), edata);
}
void
-large_dalloc(tsdn_t *tsdn, extent_t *extent) {
- arena_t *arena = extent_arena_get(extent);
- large_dalloc_prep_impl(tsdn, arena, extent, false);
- large_dalloc_finish_impl(tsdn, arena, extent);
+large_dalloc(tsdn_t *tsdn, edata_t *edata) {
+ arena_t *arena = arena_get_from_edata(edata);
+ large_dalloc_prep_impl(tsdn, arena, edata, false);
+ large_dalloc_finish_impl(tsdn, arena, edata);
arena_decay_tick(tsdn, arena);
}
size_t
-large_salloc(tsdn_t *tsdn, const extent_t *extent) {
- return extent_usize_get(extent);
-}
-
-prof_tctx_t *
-large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent) {
- return extent_prof_tctx_get(extent);
+large_salloc(tsdn_t *tsdn, const edata_t *edata) {
+ return edata_usize_get(edata);
}
void
-large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx) {
- extent_prof_tctx_set(extent, tctx);
+large_prof_info_get(tsd_t *tsd, edata_t *edata, prof_info_t *prof_info,
+ bool reset_recent) {
+ assert(prof_info != NULL);
+
+ prof_tctx_t *alloc_tctx = edata_prof_tctx_get(edata);
+ prof_info->alloc_tctx = alloc_tctx;
+
+ if ((uintptr_t)alloc_tctx > (uintptr_t)1U) {
+ nstime_copy(&prof_info->alloc_time,
+ edata_prof_alloc_time_get(edata));
+ prof_info->alloc_size = edata_prof_alloc_size_get(edata);
+ if (reset_recent) {
+ /*
+ * Reset the pointer on the recent allocation record,
+ * so that this allocation is recorded as released.
+ */
+ prof_recent_alloc_reset(tsd, edata);
+ }
+ }
}
-void
-large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent) {
- large_prof_tctx_set(tsdn, extent, (prof_tctx_t *)(uintptr_t)1U);
+static void
+large_prof_tctx_set(edata_t *edata, prof_tctx_t *tctx) {
+ edata_prof_tctx_set(edata, tctx);
}
-nstime_t
-large_prof_alloc_time_get(const extent_t *extent) {
- return extent_prof_alloc_time_get(extent);
+void
+large_prof_tctx_reset(edata_t *edata) {
+ large_prof_tctx_set(edata, (prof_tctx_t *)(uintptr_t)1U);
}
void
-large_prof_alloc_time_set(extent_t *extent, nstime_t t) {
- extent_prof_alloc_time_set(extent, t);
+large_prof_info_set(edata_t *edata, prof_tctx_t *tctx, size_t size) {
+ nstime_t t;
+ nstime_prof_init_update(&t);
+ edata_prof_alloc_time_set(edata, &t);
+ edata_prof_alloc_size_set(edata, size);
+ edata_prof_recent_alloc_init(edata);
+ large_prof_tctx_set(edata, tctx);
}
diff --git a/contrib/jemalloc/src/malloc_io.c b/contrib/jemalloc/src/malloc_io.c
index cda589c467c7..93a30497fc95 100644
--- a/contrib/jemalloc/src/malloc_io.c
+++ b/contrib/jemalloc/src/malloc_io.c
@@ -1,4 +1,3 @@
-#define JEMALLOC_MALLOC_IO_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
@@ -53,7 +52,6 @@
/******************************************************************************/
/* Function prototypes for non-inline static functions. */
-static void wrtmessage(void *cbopaque, const char *s);
#define U2S_BUFSIZE ((1U << (LG_SIZEOF_INTMAX_T + 3)) + 1)
static char *u2s(uintmax_t x, unsigned base, bool uppercase, char *s,
size_t *slen_p);
@@ -68,7 +66,7 @@ static char *x2s(uintmax_t x, bool alt_form, bool uppercase, char *s,
/******************************************************************************/
/* malloc_message() setup. */
-static void
+void
wrtmessage(void *cbopaque, const char *s) {
malloc_write_fd(STDERR_FILENO, s, strlen(s));
}
@@ -149,10 +147,10 @@ malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base) {
break;
case '-':
neg = true;
- /* Fall through. */
+ JEMALLOC_FALLTHROUGH;
case '+':
p++;
- /* Fall through. */
+ JEMALLOC_FALLTHROUGH;
default:
goto label_prefix;
}
@@ -303,7 +301,7 @@ d2s(intmax_t x, char sign, char *s, size_t *slen_p) {
if (!neg) {
break;
}
- /* Fall through. */
+ JEMALLOC_FALLTHROUGH;
case ' ':
case '+':
s--;
@@ -337,6 +335,7 @@ x2s(uintmax_t x, bool alt_form, bool uppercase, char *s, size_t *slen_p) {
return s;
}
+JEMALLOC_COLD
size_t
malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
size_t i;
@@ -362,7 +361,11 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
if (!left_justify && pad_len != 0) { \
size_t j; \
for (j = 0; j < pad_len; j++) { \
- APPEND_C(' '); \
+ if (pad_zero) { \
+ APPEND_C('0'); \
+ } else { \
+ APPEND_C(' '); \
+ } \
} \
} \
/* Value. */ \
@@ -434,6 +437,8 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
unsigned char len = '?';
char *s;
size_t slen;
+ bool first_width_digit = true;
+ bool pad_zero = false;
f++;
/* Flags. */
@@ -470,7 +475,12 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
width = -width;
}
break;
- case '0': case '1': case '2': case '3': case '4':
+ case '0':
+ if (first_width_digit) {
+ pad_zero = true;
+ }
+ JEMALLOC_FALLTHROUGH;
+ case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9': {
uintmax_t uwidth;
set_errno(0);
@@ -478,6 +488,7 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
assert(uwidth != UINTMAX_MAX || get_errno() !=
ERANGE);
width = (int)uwidth;
+ first_width_digit = false;
break;
} default:
break;
@@ -535,6 +546,18 @@ malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
intmax_t val JEMALLOC_CC_SILENCE_INIT(0);
char buf[D2S_BUFSIZE];
+ /*
+ * Outputting negative, zero-padded numbers
+ * would require a nontrivial rework of the
+ * interaction between the width and padding
+ * (since 0 padding goes between the '-' and the
+ * number, while ' ' padding goes either before
+ * the - or after the number. Since we
+ * currently don't ever need 0-padded negative
+ * numbers, just don't bother supporting it.
+ */
+ assert(!pad_zero);
+
GET_ARG_NUMERIC(val, len);
s = d2s(val, (plus_plus ? '+' : (plus_space ?
' ' : '-')), buf, &slen);
@@ -634,8 +657,8 @@ malloc_snprintf(char *str, size_t size, const char *format, ...) {
}
void
-malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *format, va_list ap) {
+malloc_vcprintf(write_cb_t *write_cb, void *cbopaque, const char *format,
+ va_list ap) {
char buf[MALLOC_PRINTF_BUFSIZE];
if (write_cb == NULL) {
@@ -658,8 +681,7 @@ malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
*/
JEMALLOC_FORMAT_PRINTF(3, 4)
void
-malloc_cprintf(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *format, ...) {
+malloc_cprintf(write_cb_t *write_cb, void *cbopaque, const char *format, ...) {
va_list ap;
va_start(ap, format);
diff --git a/contrib/jemalloc/src/mutex.c b/contrib/jemalloc/src/mutex.c
index 88a7730c64c1..ceb1d031d7df 100644
--- a/contrib/jemalloc/src/mutex.c
+++ b/contrib/jemalloc/src/mutex.c
@@ -1,4 +1,3 @@
-#define JEMALLOC_MUTEX_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
@@ -10,6 +9,12 @@
#define _CRT_SPINCOUNT 4000
#endif
+/*
+ * Based on benchmark results, a fixed spin with this amount of retries works
+ * well for our critical sections.
+ */
+int64_t opt_mutex_max_spin = 600;
+
/******************************************************************************/
/* Data. */
@@ -42,6 +47,7 @@ pthread_create(pthread_t *__restrict thread,
JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
void *(calloc_cb)(size_t, size_t));
+#ifndef JEMALLOC_NO_PRIVATE_NAMESPACE
#pragma weak _pthread_mutex_init_calloc_cb
int
_pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
@@ -53,17 +59,18 @@ _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
calloc_cb));
}
#endif
+#endif
void
malloc_mutex_lock_slow(malloc_mutex_t *mutex) {
mutex_prof_data_t *data = &mutex->prof_data;
- nstime_t before = NSTIME_ZERO_INITIALIZER;
+ nstime_t before;
if (ncpus == 1) {
goto label_spin_done;
}
- int cnt = 0, max_cnt = MALLOC_MUTEX_MAX_SPIN;
+ int cnt = 0;
do {
spin_cpu_spinwait();
if (!atomic_load_b(&mutex->locked, ATOMIC_RELAXED)
@@ -71,7 +78,7 @@ malloc_mutex_lock_slow(malloc_mutex_t *mutex) {
data->n_spin_acquired++;
return;
}
- } while (cnt++ < max_cnt);
+ } while (cnt++ < opt_mutex_max_spin || opt_mutex_max_spin == -1);
if (!config_stats) {
/* Only spin is useful when stats is off. */
@@ -79,7 +86,7 @@ malloc_mutex_lock_slow(malloc_mutex_t *mutex) {
return;
}
label_spin_done:
- nstime_update(&before);
+ nstime_init_update(&before);
/* Copy before to after to avoid clock skews. */
nstime_t after;
nstime_copy(&after, &before);
@@ -115,8 +122,8 @@ label_spin_done:
static void
mutex_prof_data_init(mutex_prof_data_t *data) {
memset(data, 0, sizeof(mutex_prof_data_t));
- nstime_init(&data->max_wait_time, 0);
- nstime_init(&data->tot_wait_time, 0);
+ nstime_init_zero(&data->max_wait_time);
+ nstime_init_zero(&data->tot_wait_time);
data->prev_owner = NULL;
}
diff --git a/contrib/jemalloc/src/mutex_pool.c b/contrib/jemalloc/src/mutex_pool.c
deleted file mode 100644
index f24d10e44a80..000000000000
--- a/contrib/jemalloc/src/mutex_pool.c
+++ /dev/null
@@ -1,18 +0,0 @@
-#define JEMALLOC_MUTEX_POOL_C_
-
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
-
-#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/mutex_pool.h"
-
-bool
-mutex_pool_init(mutex_pool_t *pool, const char *name, witness_rank_t rank) {
- for (int i = 0; i < MUTEX_POOL_SIZE; ++i) {
- if (malloc_mutex_init(&pool->mutexes[i], name, rank,
- malloc_mutex_address_ordered)) {
- return true;
- }
- }
- return false;
-}
diff --git a/contrib/jemalloc/src/nstime.c b/contrib/jemalloc/src/nstime.c
index 71db353965ff..a1a53777febb 100644
--- a/contrib/jemalloc/src/nstime.c
+++ b/contrib/jemalloc/src/nstime.c
@@ -8,96 +8,169 @@
#define BILLION UINT64_C(1000000000)
#define MILLION UINT64_C(1000000)
+static void
+nstime_set_initialized(nstime_t *time) {
+#ifdef JEMALLOC_DEBUG
+ time->magic = NSTIME_MAGIC;
+#endif
+}
+
+static void
+nstime_assert_initialized(const nstime_t *time) {
+#ifdef JEMALLOC_DEBUG
+ /*
+ * Some parts (e.g. stats) rely on memset to zero initialize. Treat
+ * these as valid initialization.
+ */
+ assert(time->magic == NSTIME_MAGIC ||
+ (time->magic == 0 && time->ns == 0));
+#endif
+}
+
+static void
+nstime_pair_assert_initialized(const nstime_t *t1, const nstime_t *t2) {
+ nstime_assert_initialized(t1);
+ nstime_assert_initialized(t2);
+}
+
+static void
+nstime_initialize_operand(nstime_t *time) {
+ /*
+ * Operations like nstime_add may have the initial operand being zero
+ * initialized (covered by the assert below). Full-initialize needed
+ * before changing it to non-zero.
+ */
+ nstime_assert_initialized(time);
+ nstime_set_initialized(time);
+}
+
void
nstime_init(nstime_t *time, uint64_t ns) {
+ nstime_set_initialized(time);
time->ns = ns;
}
void
nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec) {
+ nstime_set_initialized(time);
time->ns = sec * BILLION + nsec;
}
uint64_t
nstime_ns(const nstime_t *time) {
+ nstime_assert_initialized(time);
return time->ns;
}
uint64_t
nstime_msec(const nstime_t *time) {
+ nstime_assert_initialized(time);
return time->ns / MILLION;
}
uint64_t
nstime_sec(const nstime_t *time) {
+ nstime_assert_initialized(time);
return time->ns / BILLION;
}
uint64_t
nstime_nsec(const nstime_t *time) {
+ nstime_assert_initialized(time);
return time->ns % BILLION;
}
void
nstime_copy(nstime_t *time, const nstime_t *source) {
+ /* Source is required to be initialized. */
+ nstime_assert_initialized(source);
*time = *source;
+ nstime_assert_initialized(time);
}
int
nstime_compare(const nstime_t *a, const nstime_t *b) {
+ nstime_pair_assert_initialized(a, b);
return (a->ns > b->ns) - (a->ns < b->ns);
}
void
nstime_add(nstime_t *time, const nstime_t *addend) {
+ nstime_pair_assert_initialized(time, addend);
assert(UINT64_MAX - time->ns >= addend->ns);
+ nstime_initialize_operand(time);
time->ns += addend->ns;
}
void
nstime_iadd(nstime_t *time, uint64_t addend) {
+ nstime_assert_initialized(time);
assert(UINT64_MAX - time->ns >= addend);
+ nstime_initialize_operand(time);
time->ns += addend;
}
void
nstime_subtract(nstime_t *time, const nstime_t *subtrahend) {
+ nstime_pair_assert_initialized(time, subtrahend);
assert(nstime_compare(time, subtrahend) >= 0);
+ /* No initialize operand -- subtraction must be initialized. */
time->ns -= subtrahend->ns;
}
void
nstime_isubtract(nstime_t *time, uint64_t subtrahend) {
+ nstime_assert_initialized(time);
assert(time->ns >= subtrahend);
+ /* No initialize operand -- subtraction must be initialized. */
time->ns -= subtrahend;
}
void
nstime_imultiply(nstime_t *time, uint64_t multiplier) {
+ nstime_assert_initialized(time);
assert((((time->ns | multiplier) & (UINT64_MAX << (sizeof(uint64_t) <<
2))) == 0) || ((time->ns * multiplier) / multiplier == time->ns));
+ nstime_initialize_operand(time);
time->ns *= multiplier;
}
void
nstime_idivide(nstime_t *time, uint64_t divisor) {
+ nstime_assert_initialized(time);
assert(divisor != 0);
+ nstime_initialize_operand(time);
time->ns /= divisor;
}
uint64_t
nstime_divide(const nstime_t *time, const nstime_t *divisor) {
+ nstime_pair_assert_initialized(time, divisor);
assert(divisor->ns != 0);
+ /* No initialize operand -- *time itself remains unchanged. */
return time->ns / divisor->ns;
}
+/* Returns time since *past, w/o updating *past. */
+uint64_t
+nstime_ns_since(const nstime_t *past) {
+ nstime_assert_initialized(past);
+
+ nstime_t now;
+ nstime_copy(&now, past);
+ nstime_update(&now);
+
+ assert(nstime_compare(&now, past) >= 0);
+ return now.ns - past->ns;
+}
+
#ifdef _WIN32
# define NSTIME_MONOTONIC true
static void
@@ -152,7 +225,42 @@ nstime_monotonic_impl(void) {
}
nstime_monotonic_t *JET_MUTABLE nstime_monotonic = nstime_monotonic_impl;
-static bool
+prof_time_res_t opt_prof_time_res =
+ prof_time_res_default;
+
+const char *prof_time_res_mode_names[] = {
+ "default",
+ "high",
+};
+
+
+static void
+nstime_get_realtime(nstime_t *time) {
+#if defined(JEMALLOC_HAVE_CLOCK_REALTIME) && !defined(_WIN32)
+ struct timespec ts;
+
+ clock_gettime(CLOCK_REALTIME, &ts);
+ nstime_init2(time, ts.tv_sec, ts.tv_nsec);
+#else
+ unreachable();
+#endif
+}
+
+static void
+nstime_prof_update_impl(nstime_t *time) {
+ nstime_t old_time;
+
+ nstime_copy(&old_time, time);
+
+ if (opt_prof_time_res == prof_time_res_high) {
+ nstime_get_realtime(time);
+ } else {
+ nstime_get(time);
+ }
+}
+nstime_prof_update_t *JET_MUTABLE nstime_prof_update = nstime_prof_update_impl;
+
+static void
nstime_update_impl(nstime_t *time) {
nstime_t old_time;
@@ -162,9 +270,20 @@ nstime_update_impl(nstime_t *time) {
/* Handle non-monotonic clocks. */
if (unlikely(nstime_compare(&old_time, time) > 0)) {
nstime_copy(time, &old_time);
- return true;
}
-
- return false;
}
nstime_update_t *JET_MUTABLE nstime_update = nstime_update_impl;
+
+void
+nstime_init_update(nstime_t *time) {
+ nstime_init_zero(time);
+ nstime_update(time);
+}
+
+void
+nstime_prof_init_update(nstime_t *time) {
+ nstime_init_zero(time);
+ nstime_prof_update(time);
+}
+
+
diff --git a/contrib/jemalloc/src/pa.c b/contrib/jemalloc/src/pa.c
new file mode 100644
index 000000000000..eb7e4620ea53
--- /dev/null
+++ b/contrib/jemalloc/src/pa.c
@@ -0,0 +1,277 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/san.h"
+#include "jemalloc/internal/hpa.h"
+
+static void
+pa_nactive_add(pa_shard_t *shard, size_t add_pages) {
+ atomic_fetch_add_zu(&shard->nactive, add_pages, ATOMIC_RELAXED);
+}
+
+static void
+pa_nactive_sub(pa_shard_t *shard, size_t sub_pages) {
+ assert(atomic_load_zu(&shard->nactive, ATOMIC_RELAXED) >= sub_pages);
+ atomic_fetch_sub_zu(&shard->nactive, sub_pages, ATOMIC_RELAXED);
+}
+
+bool
+pa_central_init(pa_central_t *central, base_t *base, bool hpa,
+ hpa_hooks_t *hpa_hooks) {
+ bool err;
+ if (hpa) {
+ err = hpa_central_init(&central->hpa, base, hpa_hooks);
+ if (err) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool
+pa_shard_init(tsdn_t *tsdn, pa_shard_t *shard, pa_central_t *central,
+ emap_t *emap, base_t *base, unsigned ind, pa_shard_stats_t *stats,
+ malloc_mutex_t *stats_mtx, nstime_t *cur_time,
+ size_t pac_oversize_threshold, ssize_t dirty_decay_ms,
+ ssize_t muzzy_decay_ms) {
+ /* This will change eventually, but for now it should hold. */
+ assert(base_ind_get(base) == ind);
+ if (edata_cache_init(&shard->edata_cache, base)) {
+ return true;
+ }
+
+ if (pac_init(tsdn, &shard->pac, base, emap, &shard->edata_cache,
+ cur_time, pac_oversize_threshold, dirty_decay_ms, muzzy_decay_ms,
+ &stats->pac_stats, stats_mtx)) {
+ return true;
+ }
+
+ shard->ind = ind;
+
+ shard->ever_used_hpa = false;
+ atomic_store_b(&shard->use_hpa, false, ATOMIC_RELAXED);
+
+ atomic_store_zu(&shard->nactive, 0, ATOMIC_RELAXED);
+
+ shard->stats_mtx = stats_mtx;
+ shard->stats = stats;
+ memset(shard->stats, 0, sizeof(*shard->stats));
+
+ shard->central = central;
+ shard->emap = emap;
+ shard->base = base;
+
+ return false;
+}
+
+bool
+pa_shard_enable_hpa(tsdn_t *tsdn, pa_shard_t *shard,
+ const hpa_shard_opts_t *hpa_opts, const sec_opts_t *hpa_sec_opts) {
+ if (hpa_shard_init(&shard->hpa_shard, &shard->central->hpa, shard->emap,
+ shard->base, &shard->edata_cache, shard->ind, hpa_opts)) {
+ return true;
+ }
+ if (sec_init(tsdn, &shard->hpa_sec, shard->base, &shard->hpa_shard.pai,
+ hpa_sec_opts)) {
+ return true;
+ }
+ shard->ever_used_hpa = true;
+ atomic_store_b(&shard->use_hpa, true, ATOMIC_RELAXED);
+
+ return false;
+}
+
+void
+pa_shard_disable_hpa(tsdn_t *tsdn, pa_shard_t *shard) {
+ atomic_store_b(&shard->use_hpa, false, ATOMIC_RELAXED);
+ if (shard->ever_used_hpa) {
+ sec_disable(tsdn, &shard->hpa_sec);
+ hpa_shard_disable(tsdn, &shard->hpa_shard);
+ }
+}
+
+void
+pa_shard_reset(tsdn_t *tsdn, pa_shard_t *shard) {
+ atomic_store_zu(&shard->nactive, 0, ATOMIC_RELAXED);
+ if (shard->ever_used_hpa) {
+ sec_flush(tsdn, &shard->hpa_sec);
+ }
+}
+
+static bool
+pa_shard_uses_hpa(pa_shard_t *shard) {
+ return atomic_load_b(&shard->use_hpa, ATOMIC_RELAXED);
+}
+
+void
+pa_shard_destroy(tsdn_t *tsdn, pa_shard_t *shard) {
+ pac_destroy(tsdn, &shard->pac);
+ if (shard->ever_used_hpa) {
+ sec_flush(tsdn, &shard->hpa_sec);
+ hpa_shard_disable(tsdn, &shard->hpa_shard);
+ }
+}
+
+static pai_t *
+pa_get_pai(pa_shard_t *shard, edata_t *edata) {
+ return (edata_pai_get(edata) == EXTENT_PAI_PAC
+ ? &shard->pac.pai : &shard->hpa_sec.pai);
+}
+
+edata_t *
+pa_alloc(tsdn_t *tsdn, pa_shard_t *shard, size_t size, size_t alignment,
+ bool slab, szind_t szind, bool zero, bool guarded,
+ bool *deferred_work_generated) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+ assert(!guarded || alignment <= PAGE);
+
+ edata_t *edata = NULL;
+ if (!guarded && pa_shard_uses_hpa(shard)) {
+ edata = pai_alloc(tsdn, &shard->hpa_sec.pai, size, alignment,
+ zero, /* guarded */ false, slab, deferred_work_generated);
+ }
+ /*
+ * Fall back to the PAC if the HPA is off or couldn't serve the given
+ * allocation request.
+ */
+ if (edata == NULL) {
+ edata = pai_alloc(tsdn, &shard->pac.pai, size, alignment, zero,
+ guarded, slab, deferred_work_generated);
+ }
+ if (edata != NULL) {
+ assert(edata_size_get(edata) == size);
+ pa_nactive_add(shard, size >> LG_PAGE);
+ emap_remap(tsdn, shard->emap, edata, szind, slab);
+ edata_szind_set(edata, szind);
+ edata_slab_set(edata, slab);
+ if (slab && (size > 2 * PAGE)) {
+ emap_register_interior(tsdn, shard->emap, edata, szind);
+ }
+ assert(edata_arena_ind_get(edata) == shard->ind);
+ }
+ return edata;
+}
+
+bool
+pa_expand(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
+ size_t new_size, szind_t szind, bool zero, bool *deferred_work_generated) {
+ assert(new_size > old_size);
+ assert(edata_size_get(edata) == old_size);
+ assert((new_size & PAGE_MASK) == 0);
+ if (edata_guarded_get(edata)) {
+ return true;
+ }
+ size_t expand_amount = new_size - old_size;
+
+ pai_t *pai = pa_get_pai(shard, edata);
+
+ bool error = pai_expand(tsdn, pai, edata, old_size, new_size, zero,
+ deferred_work_generated);
+ if (error) {
+ return true;
+ }
+
+ pa_nactive_add(shard, expand_amount >> LG_PAGE);
+ edata_szind_set(edata, szind);
+ emap_remap(tsdn, shard->emap, edata, szind, /* slab */ false);
+ return false;
+}
+
+bool
+pa_shrink(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata, size_t old_size,
+ size_t new_size, szind_t szind, bool *deferred_work_generated) {
+ assert(new_size < old_size);
+ assert(edata_size_get(edata) == old_size);
+ assert((new_size & PAGE_MASK) == 0);
+ if (edata_guarded_get(edata)) {
+ return true;
+ }
+ size_t shrink_amount = old_size - new_size;
+
+ pai_t *pai = pa_get_pai(shard, edata);
+ bool error = pai_shrink(tsdn, pai, edata, old_size, new_size,
+ deferred_work_generated);
+ if (error) {
+ return true;
+ }
+ pa_nactive_sub(shard, shrink_amount >> LG_PAGE);
+
+ edata_szind_set(edata, szind);
+ emap_remap(tsdn, shard->emap, edata, szind, /* slab */ false);
+ return false;
+}
+
+void
+pa_dalloc(tsdn_t *tsdn, pa_shard_t *shard, edata_t *edata,
+ bool *deferred_work_generated) {
+ emap_remap(tsdn, shard->emap, edata, SC_NSIZES, /* slab */ false);
+ if (edata_slab_get(edata)) {
+ emap_deregister_interior(tsdn, shard->emap, edata);
+ /*
+ * The slab state of the extent isn't cleared. It may be used
+ * by the pai implementation, e.g. to make caching decisions.
+ */
+ }
+ edata_addr_set(edata, edata_base_get(edata));
+ edata_szind_set(edata, SC_NSIZES);
+ pa_nactive_sub(shard, edata_size_get(edata) >> LG_PAGE);
+ pai_t *pai = pa_get_pai(shard, edata);
+ pai_dalloc(tsdn, pai, edata, deferred_work_generated);
+}
+
+bool
+pa_shard_retain_grow_limit_get_set(tsdn_t *tsdn, pa_shard_t *shard,
+ size_t *old_limit, size_t *new_limit) {
+ return pac_retain_grow_limit_get_set(tsdn, &shard->pac, old_limit,
+ new_limit);
+}
+
+bool
+pa_decay_ms_set(tsdn_t *tsdn, pa_shard_t *shard, extent_state_t state,
+ ssize_t decay_ms, pac_purge_eagerness_t eagerness) {
+ return pac_decay_ms_set(tsdn, &shard->pac, state, decay_ms, eagerness);
+}
+
+ssize_t
+pa_decay_ms_get(pa_shard_t *shard, extent_state_t state) {
+ return pac_decay_ms_get(&shard->pac, state);
+}
+
+void
+pa_shard_set_deferral_allowed(tsdn_t *tsdn, pa_shard_t *shard,
+ bool deferral_allowed) {
+ if (pa_shard_uses_hpa(shard)) {
+ hpa_shard_set_deferral_allowed(tsdn, &shard->hpa_shard,
+ deferral_allowed);
+ }
+}
+
+void
+pa_shard_do_deferred_work(tsdn_t *tsdn, pa_shard_t *shard) {
+ if (pa_shard_uses_hpa(shard)) {
+ hpa_shard_do_deferred_work(tsdn, &shard->hpa_shard);
+ }
+}
+
+/*
+ * Get time until next deferred work ought to happen. If there are multiple
+ * things that have been deferred, this function calculates the time until
+ * the soonest of those things.
+ */
+uint64_t
+pa_shard_time_until_deferred_work(tsdn_t *tsdn, pa_shard_t *shard) {
+ uint64_t time = pai_time_until_deferred_work(tsdn, &shard->pac.pai);
+ if (time == BACKGROUND_THREAD_DEFERRED_MIN) {
+ return time;
+ }
+
+ if (pa_shard_uses_hpa(shard)) {
+ uint64_t hpa =
+ pai_time_until_deferred_work(tsdn, &shard->hpa_shard.pai);
+ if (hpa < time) {
+ time = hpa;
+ }
+ }
+ return time;
+}
diff --git a/contrib/jemalloc/src/pa_extra.c b/contrib/jemalloc/src/pa_extra.c
new file mode 100644
index 000000000000..0f488be69c5d
--- /dev/null
+++ b/contrib/jemalloc/src/pa_extra.c
@@ -0,0 +1,191 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+/*
+ * This file is logically part of the PA module. While pa.c contains the core
+ * allocator functionality, this file contains boring integration functionality;
+ * things like the pre- and post- fork handlers, and stats merging for CTL
+ * refreshes.
+ */
+
+void
+pa_shard_prefork0(tsdn_t *tsdn, pa_shard_t *shard) {
+ malloc_mutex_prefork(tsdn, &shard->pac.decay_dirty.mtx);
+ malloc_mutex_prefork(tsdn, &shard->pac.decay_muzzy.mtx);
+}
+
+void
+pa_shard_prefork2(tsdn_t *tsdn, pa_shard_t *shard) {
+ if (shard->ever_used_hpa) {
+ sec_prefork2(tsdn, &shard->hpa_sec);
+ }
+}
+
+void
+pa_shard_prefork3(tsdn_t *tsdn, pa_shard_t *shard) {
+ malloc_mutex_prefork(tsdn, &shard->pac.grow_mtx);
+ if (shard->ever_used_hpa) {
+ hpa_shard_prefork3(tsdn, &shard->hpa_shard);
+ }
+}
+
+void
+pa_shard_prefork4(tsdn_t *tsdn, pa_shard_t *shard) {
+ ecache_prefork(tsdn, &shard->pac.ecache_dirty);
+ ecache_prefork(tsdn, &shard->pac.ecache_muzzy);
+ ecache_prefork(tsdn, &shard->pac.ecache_retained);
+ if (shard->ever_used_hpa) {
+ hpa_shard_prefork4(tsdn, &shard->hpa_shard);
+ }
+}
+
+void
+pa_shard_prefork5(tsdn_t *tsdn, pa_shard_t *shard) {
+ edata_cache_prefork(tsdn, &shard->edata_cache);
+}
+
+void
+pa_shard_postfork_parent(tsdn_t *tsdn, pa_shard_t *shard) {
+ edata_cache_postfork_parent(tsdn, &shard->edata_cache);
+ ecache_postfork_parent(tsdn, &shard->pac.ecache_dirty);
+ ecache_postfork_parent(tsdn, &shard->pac.ecache_muzzy);
+ ecache_postfork_parent(tsdn, &shard->pac.ecache_retained);
+ malloc_mutex_postfork_parent(tsdn, &shard->pac.grow_mtx);
+ malloc_mutex_postfork_parent(tsdn, &shard->pac.decay_dirty.mtx);
+ malloc_mutex_postfork_parent(tsdn, &shard->pac.decay_muzzy.mtx);
+ if (shard->ever_used_hpa) {
+ sec_postfork_parent(tsdn, &shard->hpa_sec);
+ hpa_shard_postfork_parent(tsdn, &shard->hpa_shard);
+ }
+}
+
+void
+pa_shard_postfork_child(tsdn_t *tsdn, pa_shard_t *shard) {
+ edata_cache_postfork_child(tsdn, &shard->edata_cache);
+ ecache_postfork_child(tsdn, &shard->pac.ecache_dirty);
+ ecache_postfork_child(tsdn, &shard->pac.ecache_muzzy);
+ ecache_postfork_child(tsdn, &shard->pac.ecache_retained);
+ malloc_mutex_postfork_child(tsdn, &shard->pac.grow_mtx);
+ malloc_mutex_postfork_child(tsdn, &shard->pac.decay_dirty.mtx);
+ malloc_mutex_postfork_child(tsdn, &shard->pac.decay_muzzy.mtx);
+ if (shard->ever_used_hpa) {
+ sec_postfork_child(tsdn, &shard->hpa_sec);
+ hpa_shard_postfork_child(tsdn, &shard->hpa_shard);
+ }
+}
+
+void
+pa_shard_basic_stats_merge(pa_shard_t *shard, size_t *nactive, size_t *ndirty,
+ size_t *nmuzzy) {
+ *nactive += atomic_load_zu(&shard->nactive, ATOMIC_RELAXED);
+ *ndirty += ecache_npages_get(&shard->pac.ecache_dirty);
+ *nmuzzy += ecache_npages_get(&shard->pac.ecache_muzzy);
+}
+
+void
+pa_shard_stats_merge(tsdn_t *tsdn, pa_shard_t *shard,
+ pa_shard_stats_t *pa_shard_stats_out, pac_estats_t *estats_out,
+ hpa_shard_stats_t *hpa_stats_out, sec_stats_t *sec_stats_out,
+ size_t *resident) {
+ cassert(config_stats);
+
+ pa_shard_stats_out->pac_stats.retained +=
+ ecache_npages_get(&shard->pac.ecache_retained) << LG_PAGE;
+ pa_shard_stats_out->edata_avail += atomic_load_zu(
+ &shard->edata_cache.count, ATOMIC_RELAXED);
+
+ size_t resident_pgs = 0;
+ resident_pgs += atomic_load_zu(&shard->nactive, ATOMIC_RELAXED);
+ resident_pgs += ecache_npages_get(&shard->pac.ecache_dirty);
+ *resident += (resident_pgs << LG_PAGE);
+
+ /* Dirty decay stats */
+ locked_inc_u64_unsynchronized(
+ &pa_shard_stats_out->pac_stats.decay_dirty.npurge,
+ locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
+ &shard->pac.stats->decay_dirty.npurge));
+ locked_inc_u64_unsynchronized(
+ &pa_shard_stats_out->pac_stats.decay_dirty.nmadvise,
+ locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
+ &shard->pac.stats->decay_dirty.nmadvise));
+ locked_inc_u64_unsynchronized(
+ &pa_shard_stats_out->pac_stats.decay_dirty.purged,
+ locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
+ &shard->pac.stats->decay_dirty.purged));
+
+ /* Muzzy decay stats */
+ locked_inc_u64_unsynchronized(
+ &pa_shard_stats_out->pac_stats.decay_muzzy.npurge,
+ locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
+ &shard->pac.stats->decay_muzzy.npurge));
+ locked_inc_u64_unsynchronized(
+ &pa_shard_stats_out->pac_stats.decay_muzzy.nmadvise,
+ locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
+ &shard->pac.stats->decay_muzzy.nmadvise));
+ locked_inc_u64_unsynchronized(
+ &pa_shard_stats_out->pac_stats.decay_muzzy.purged,
+ locked_read_u64(tsdn, LOCKEDINT_MTX(*shard->stats_mtx),
+ &shard->pac.stats->decay_muzzy.purged));
+
+ atomic_load_add_store_zu(&pa_shard_stats_out->pac_stats.abandoned_vm,
+ atomic_load_zu(&shard->pac.stats->abandoned_vm, ATOMIC_RELAXED));
+
+ for (pszind_t i = 0; i < SC_NPSIZES; i++) {
+ size_t dirty, muzzy, retained, dirty_bytes, muzzy_bytes,
+ retained_bytes;
+ dirty = ecache_nextents_get(&shard->pac.ecache_dirty, i);
+ muzzy = ecache_nextents_get(&shard->pac.ecache_muzzy, i);
+ retained = ecache_nextents_get(&shard->pac.ecache_retained, i);
+ dirty_bytes = ecache_nbytes_get(&shard->pac.ecache_dirty, i);
+ muzzy_bytes = ecache_nbytes_get(&shard->pac.ecache_muzzy, i);
+ retained_bytes = ecache_nbytes_get(&shard->pac.ecache_retained,
+ i);
+
+ estats_out[i].ndirty = dirty;
+ estats_out[i].nmuzzy = muzzy;
+ estats_out[i].nretained = retained;
+ estats_out[i].dirty_bytes = dirty_bytes;
+ estats_out[i].muzzy_bytes = muzzy_bytes;
+ estats_out[i].retained_bytes = retained_bytes;
+ }
+
+ if (shard->ever_used_hpa) {
+ hpa_shard_stats_merge(tsdn, &shard->hpa_shard, hpa_stats_out);
+ sec_stats_merge(tsdn, &shard->hpa_sec, sec_stats_out);
+ }
+}
+
+static void
+pa_shard_mtx_stats_read_single(tsdn_t *tsdn, mutex_prof_data_t *mutex_prof_data,
+ malloc_mutex_t *mtx, int ind) {
+ malloc_mutex_lock(tsdn, mtx);
+ malloc_mutex_prof_read(tsdn, &mutex_prof_data[ind], mtx);
+ malloc_mutex_unlock(tsdn, mtx);
+}
+
+void
+pa_shard_mtx_stats_read(tsdn_t *tsdn, pa_shard_t *shard,
+ mutex_prof_data_t mutex_prof_data[mutex_prof_num_arena_mutexes]) {
+ pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
+ &shard->edata_cache.mtx, arena_prof_mutex_extent_avail);
+ pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
+ &shard->pac.ecache_dirty.mtx, arena_prof_mutex_extents_dirty);
+ pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
+ &shard->pac.ecache_muzzy.mtx, arena_prof_mutex_extents_muzzy);
+ pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
+ &shard->pac.ecache_retained.mtx, arena_prof_mutex_extents_retained);
+ pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
+ &shard->pac.decay_dirty.mtx, arena_prof_mutex_decay_dirty);
+ pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
+ &shard->pac.decay_muzzy.mtx, arena_prof_mutex_decay_muzzy);
+
+ if (shard->ever_used_hpa) {
+ pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
+ &shard->hpa_shard.mtx, arena_prof_mutex_hpa_shard);
+ pa_shard_mtx_stats_read_single(tsdn, mutex_prof_data,
+ &shard->hpa_shard.grow_mtx,
+ arena_prof_mutex_hpa_shard_grow);
+ sec_mutex_stats_read(tsdn, &shard->hpa_sec,
+ &mutex_prof_data[arena_prof_mutex_hpa_sec]);
+ }
+}
diff --git a/contrib/jemalloc/src/pac.c b/contrib/jemalloc/src/pac.c
new file mode 100644
index 000000000000..53e3d823758e
--- /dev/null
+++ b/contrib/jemalloc/src/pac.c
@@ -0,0 +1,587 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/pac.h"
+#include "jemalloc/internal/san.h"
+
+static edata_t *pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size,
+ size_t alignment, bool zero, bool guarded, bool frequent_reuse,
+ bool *deferred_work_generated);
+static bool pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated);
+static bool pac_shrink_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ size_t old_size, size_t new_size, bool *deferred_work_generated);
+static void pac_dalloc_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ bool *deferred_work_generated);
+static uint64_t pac_time_until_deferred_work(tsdn_t *tsdn, pai_t *self);
+
+static inline void
+pac_decay_data_get(pac_t *pac, extent_state_t state,
+ decay_t **r_decay, pac_decay_stats_t **r_decay_stats, ecache_t **r_ecache) {
+ switch(state) {
+ case extent_state_dirty:
+ *r_decay = &pac->decay_dirty;
+ *r_decay_stats = &pac->stats->decay_dirty;
+ *r_ecache = &pac->ecache_dirty;
+ return;
+ case extent_state_muzzy:
+ *r_decay = &pac->decay_muzzy;
+ *r_decay_stats = &pac->stats->decay_muzzy;
+ *r_ecache = &pac->ecache_muzzy;
+ return;
+ default:
+ unreachable();
+ }
+}
+
+bool
+pac_init(tsdn_t *tsdn, pac_t *pac, base_t *base, emap_t *emap,
+ edata_cache_t *edata_cache, nstime_t *cur_time,
+ size_t pac_oversize_threshold, ssize_t dirty_decay_ms,
+ ssize_t muzzy_decay_ms, pac_stats_t *pac_stats, malloc_mutex_t *stats_mtx) {
+ unsigned ind = base_ind_get(base);
+ /*
+ * Delay coalescing for dirty extents despite the disruptive effect on
+ * memory layout for best-fit extent allocation, since cached extents
+ * are likely to be reused soon after deallocation, and the cost of
+ * merging/splitting extents is non-trivial.
+ */
+ if (ecache_init(tsdn, &pac->ecache_dirty, extent_state_dirty, ind,
+ /* delay_coalesce */ true)) {
+ return true;
+ }
+ /*
+ * Coalesce muzzy extents immediately, because operations on them are in
+ * the critical path much less often than for dirty extents.
+ */
+ if (ecache_init(tsdn, &pac->ecache_muzzy, extent_state_muzzy, ind,
+ /* delay_coalesce */ false)) {
+ return true;
+ }
+ /*
+ * Coalesce retained extents immediately, in part because they will
+ * never be evicted (and therefore there's no opportunity for delayed
+ * coalescing), but also because operations on retained extents are not
+ * in the critical path.
+ */
+ if (ecache_init(tsdn, &pac->ecache_retained, extent_state_retained,
+ ind, /* delay_coalesce */ false)) {
+ return true;
+ }
+ exp_grow_init(&pac->exp_grow);
+ if (malloc_mutex_init(&pac->grow_mtx, "extent_grow",
+ WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ atomic_store_zu(&pac->oversize_threshold, pac_oversize_threshold,
+ ATOMIC_RELAXED);
+ if (decay_init(&pac->decay_dirty, cur_time, dirty_decay_ms)) {
+ return true;
+ }
+ if (decay_init(&pac->decay_muzzy, cur_time, muzzy_decay_ms)) {
+ return true;
+ }
+ if (san_bump_alloc_init(&pac->sba)) {
+ return true;
+ }
+
+ pac->base = base;
+ pac->emap = emap;
+ pac->edata_cache = edata_cache;
+ pac->stats = pac_stats;
+ pac->stats_mtx = stats_mtx;
+ atomic_store_zu(&pac->extent_sn_next, 0, ATOMIC_RELAXED);
+
+ pac->pai.alloc = &pac_alloc_impl;
+ pac->pai.alloc_batch = &pai_alloc_batch_default;
+ pac->pai.expand = &pac_expand_impl;
+ pac->pai.shrink = &pac_shrink_impl;
+ pac->pai.dalloc = &pac_dalloc_impl;
+ pac->pai.dalloc_batch = &pai_dalloc_batch_default;
+ pac->pai.time_until_deferred_work = &pac_time_until_deferred_work;
+
+ return false;
+}
+
+static inline bool
+pac_may_have_muzzy(pac_t *pac) {
+ return pac_decay_ms_get(pac, extent_state_muzzy) != 0;
+}
+
+static edata_t *
+pac_alloc_real(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size,
+ size_t alignment, bool zero, bool guarded) {
+ assert(!guarded || alignment <= PAGE);
+
+ edata_t *edata = ecache_alloc(tsdn, pac, ehooks, &pac->ecache_dirty,
+ NULL, size, alignment, zero, guarded);
+
+ if (edata == NULL && pac_may_have_muzzy(pac)) {
+ edata = ecache_alloc(tsdn, pac, ehooks, &pac->ecache_muzzy,
+ NULL, size, alignment, zero, guarded);
+ }
+ if (edata == NULL) {
+ edata = ecache_alloc_grow(tsdn, pac, ehooks,
+ &pac->ecache_retained, NULL, size, alignment, zero,
+ guarded);
+ if (config_stats && edata != NULL) {
+ atomic_fetch_add_zu(&pac->stats->pac_mapped, size,
+ ATOMIC_RELAXED);
+ }
+ }
+
+ return edata;
+}
+
+static edata_t *
+pac_alloc_new_guarded(tsdn_t *tsdn, pac_t *pac, ehooks_t *ehooks, size_t size,
+ size_t alignment, bool zero, bool frequent_reuse) {
+ assert(alignment <= PAGE);
+
+ edata_t *edata;
+ if (san_bump_enabled() && frequent_reuse) {
+ edata = san_bump_alloc(tsdn, &pac->sba, pac, ehooks, size,
+ zero);
+ } else {
+ size_t size_with_guards = san_two_side_guarded_sz(size);
+ /* Alloc a non-guarded extent first.*/
+ edata = pac_alloc_real(tsdn, pac, ehooks, size_with_guards,
+ /* alignment */ PAGE, zero, /* guarded */ false);
+ if (edata != NULL) {
+ /* Add guards around it. */
+ assert(edata_size_get(edata) == size_with_guards);
+ san_guard_pages_two_sided(tsdn, ehooks, edata,
+ pac->emap, true);
+ }
+ }
+ assert(edata == NULL || (edata_guarded_get(edata) &&
+ edata_size_get(edata) == size));
+
+ return edata;
+}
+
+static edata_t *
+pac_alloc_impl(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment,
+ bool zero, bool guarded, bool frequent_reuse,
+ bool *deferred_work_generated) {
+ pac_t *pac = (pac_t *)self;
+ ehooks_t *ehooks = pac_ehooks_get(pac);
+
+ edata_t *edata = NULL;
+ /*
+ * The condition is an optimization - not frequently reused guarded
+ * allocations are never put in the ecache. pac_alloc_real also
+ * doesn't grow retained for guarded allocations. So pac_alloc_real
+ * for such allocations would always return NULL.
+ * */
+ if (!guarded || frequent_reuse) {
+ edata = pac_alloc_real(tsdn, pac, ehooks, size, alignment,
+ zero, guarded);
+ }
+ if (edata == NULL && guarded) {
+ /* No cached guarded extents; creating a new one. */
+ edata = pac_alloc_new_guarded(tsdn, pac, ehooks, size,
+ alignment, zero, frequent_reuse);
+ }
+
+ return edata;
+}
+
+static bool
+pac_expand_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
+ size_t new_size, bool zero, bool *deferred_work_generated) {
+ pac_t *pac = (pac_t *)self;
+ ehooks_t *ehooks = pac_ehooks_get(pac);
+
+ size_t mapped_add = 0;
+ size_t expand_amount = new_size - old_size;
+
+ if (ehooks_merge_will_fail(ehooks)) {
+ return true;
+ }
+ edata_t *trail = ecache_alloc(tsdn, pac, ehooks, &pac->ecache_dirty,
+ edata, expand_amount, PAGE, zero, /* guarded*/ false);
+ if (trail == NULL) {
+ trail = ecache_alloc(tsdn, pac, ehooks, &pac->ecache_muzzy,
+ edata, expand_amount, PAGE, zero, /* guarded*/ false);
+ }
+ if (trail == NULL) {
+ trail = ecache_alloc_grow(tsdn, pac, ehooks,
+ &pac->ecache_retained, edata, expand_amount, PAGE, zero,
+ /* guarded */ false);
+ mapped_add = expand_amount;
+ }
+ if (trail == NULL) {
+ return true;
+ }
+ if (extent_merge_wrapper(tsdn, pac, ehooks, edata, trail)) {
+ extent_dalloc_wrapper(tsdn, pac, ehooks, trail);
+ return true;
+ }
+ if (config_stats && mapped_add > 0) {
+ atomic_fetch_add_zu(&pac->stats->pac_mapped, mapped_add,
+ ATOMIC_RELAXED);
+ }
+ return false;
+}
+
+static bool
+pac_shrink_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
+ size_t new_size, bool *deferred_work_generated) {
+ pac_t *pac = (pac_t *)self;
+ ehooks_t *ehooks = pac_ehooks_get(pac);
+
+ size_t shrink_amount = old_size - new_size;
+
+ if (ehooks_split_will_fail(ehooks)) {
+ return true;
+ }
+
+ edata_t *trail = extent_split_wrapper(tsdn, pac, ehooks, edata,
+ new_size, shrink_amount, /* holding_core_locks */ false);
+ if (trail == NULL) {
+ return true;
+ }
+ ecache_dalloc(tsdn, pac, ehooks, &pac->ecache_dirty, trail);
+ *deferred_work_generated = true;
+ return false;
+}
+
+static void
+pac_dalloc_impl(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ bool *deferred_work_generated) {
+ pac_t *pac = (pac_t *)self;
+ ehooks_t *ehooks = pac_ehooks_get(pac);
+
+ if (edata_guarded_get(edata)) {
+ /*
+ * Because cached guarded extents do exact fit only, large
+ * guarded extents are restored on dalloc eagerly (otherwise
+ * they will not be reused efficiently). Slab sizes have a
+ * limited number of size classes, and tend to cycle faster.
+ *
+ * In the case where coalesce is restrained (VirtualFree on
+ * Windows), guarded extents are also not cached -- otherwise
+ * during arena destroy / reset, the retained extents would not
+ * be whole regions (i.e. they are split between regular and
+ * guarded).
+ */
+ if (!edata_slab_get(edata) || !maps_coalesce) {
+ assert(edata_size_get(edata) >= SC_LARGE_MINCLASS ||
+ !maps_coalesce);
+ san_unguard_pages_two_sided(tsdn, ehooks, edata,
+ pac->emap);
+ }
+ }
+
+ ecache_dalloc(tsdn, pac, ehooks, &pac->ecache_dirty, edata);
+ /* Purging of deallocated pages is deferred */
+ *deferred_work_generated = true;
+}
+
+static inline uint64_t
+pac_ns_until_purge(tsdn_t *tsdn, decay_t *decay, size_t npages) {
+ if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
+ /* Use minimal interval if decay is contended. */
+ return BACKGROUND_THREAD_DEFERRED_MIN;
+ }
+ uint64_t result = decay_ns_until_purge(decay, npages,
+ ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD);
+
+ malloc_mutex_unlock(tsdn, &decay->mtx);
+ return result;
+}
+
+static uint64_t
+pac_time_until_deferred_work(tsdn_t *tsdn, pai_t *self) {
+ uint64_t time;
+ pac_t *pac = (pac_t *)self;
+
+ time = pac_ns_until_purge(tsdn,
+ &pac->decay_dirty,
+ ecache_npages_get(&pac->ecache_dirty));
+ if (time == BACKGROUND_THREAD_DEFERRED_MIN) {
+ return time;
+ }
+
+ uint64_t muzzy = pac_ns_until_purge(tsdn,
+ &pac->decay_muzzy,
+ ecache_npages_get(&pac->ecache_muzzy));
+ if (muzzy < time) {
+ time = muzzy;
+ }
+ return time;
+}
+
+bool
+pac_retain_grow_limit_get_set(tsdn_t *tsdn, pac_t *pac, size_t *old_limit,
+ size_t *new_limit) {
+ pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0);
+ if (new_limit != NULL) {
+ size_t limit = *new_limit;
+ /* Grow no more than the new limit. */
+ if ((new_ind = sz_psz2ind(limit + 1) - 1) >= SC_NPSIZES) {
+ return true;
+ }
+ }
+
+ malloc_mutex_lock(tsdn, &pac->grow_mtx);
+ if (old_limit != NULL) {
+ *old_limit = sz_pind2sz(pac->exp_grow.limit);
+ }
+ if (new_limit != NULL) {
+ pac->exp_grow.limit = new_ind;
+ }
+ malloc_mutex_unlock(tsdn, &pac->grow_mtx);
+
+ return false;
+}
+
+static size_t
+pac_stash_decayed(tsdn_t *tsdn, pac_t *pac, ecache_t *ecache,
+ size_t npages_limit, size_t npages_decay_max,
+ edata_list_inactive_t *result) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 0);
+ ehooks_t *ehooks = pac_ehooks_get(pac);
+
+ /* Stash extents according to npages_limit. */
+ size_t nstashed = 0;
+ while (nstashed < npages_decay_max) {
+ edata_t *edata = ecache_evict(tsdn, pac, ehooks, ecache,
+ npages_limit);
+ if (edata == NULL) {
+ break;
+ }
+ edata_list_inactive_append(result, edata);
+ nstashed += edata_size_get(edata) >> LG_PAGE;
+ }
+ return nstashed;
+}
+
+static size_t
+pac_decay_stashed(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
+ pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay,
+ edata_list_inactive_t *decay_extents) {
+ bool err;
+
+ size_t nmadvise = 0;
+ size_t nunmapped = 0;
+ size_t npurged = 0;
+
+ ehooks_t *ehooks = pac_ehooks_get(pac);
+
+ bool try_muzzy = !fully_decay
+ && pac_decay_ms_get(pac, extent_state_muzzy) != 0;
+
+ for (edata_t *edata = edata_list_inactive_first(decay_extents); edata !=
+ NULL; edata = edata_list_inactive_first(decay_extents)) {
+ edata_list_inactive_remove(decay_extents, edata);
+
+ size_t size = edata_size_get(edata);
+ size_t npages = size >> LG_PAGE;
+
+ nmadvise++;
+ npurged += npages;
+
+ switch (ecache->state) {
+ case extent_state_active:
+ not_reached();
+ case extent_state_dirty:
+ if (try_muzzy) {
+ err = extent_purge_lazy_wrapper(tsdn, ehooks,
+ edata, /* offset */ 0, size);
+ if (!err) {
+ ecache_dalloc(tsdn, pac, ehooks,
+ &pac->ecache_muzzy, edata);
+ break;
+ }
+ }
+ JEMALLOC_FALLTHROUGH;
+ case extent_state_muzzy:
+ extent_dalloc_wrapper(tsdn, pac, ehooks, edata);
+ nunmapped += npages;
+ break;
+ case extent_state_retained:
+ default:
+ not_reached();
+ }
+ }
+
+ if (config_stats) {
+ LOCKEDINT_MTX_LOCK(tsdn, *pac->stats_mtx);
+ locked_inc_u64(tsdn, LOCKEDINT_MTX(*pac->stats_mtx),
+ &decay_stats->npurge, 1);
+ locked_inc_u64(tsdn, LOCKEDINT_MTX(*pac->stats_mtx),
+ &decay_stats->nmadvise, nmadvise);
+ locked_inc_u64(tsdn, LOCKEDINT_MTX(*pac->stats_mtx),
+ &decay_stats->purged, npurged);
+ LOCKEDINT_MTX_UNLOCK(tsdn, *pac->stats_mtx);
+ atomic_fetch_sub_zu(&pac->stats->pac_mapped,
+ nunmapped << LG_PAGE, ATOMIC_RELAXED);
+ }
+
+ return npurged;
+}
+
+/*
+ * npages_limit: Decay at most npages_decay_max pages without violating the
+ * invariant: (ecache_npages_get(ecache) >= npages_limit). We need an upper
+ * bound on number of pages in order to prevent unbounded growth (namely in
+ * stashed), otherwise unbounded new pages could be added to extents during the
+ * current decay run, so that the purging thread never finishes.
+ */
+static void
+pac_decay_to_limit(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
+ pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay,
+ size_t npages_limit, size_t npages_decay_max) {
+ witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
+ WITNESS_RANK_CORE, 1);
+
+ if (decay->purging || npages_decay_max == 0) {
+ return;
+ }
+ decay->purging = true;
+ malloc_mutex_unlock(tsdn, &decay->mtx);
+
+ edata_list_inactive_t decay_extents;
+ edata_list_inactive_init(&decay_extents);
+ size_t npurge = pac_stash_decayed(tsdn, pac, ecache, npages_limit,
+ npages_decay_max, &decay_extents);
+ if (npurge != 0) {
+ size_t npurged = pac_decay_stashed(tsdn, pac, decay,
+ decay_stats, ecache, fully_decay, &decay_extents);
+ assert(npurged == npurge);
+ }
+
+ malloc_mutex_lock(tsdn, &decay->mtx);
+ decay->purging = false;
+}
+
+void
+pac_decay_all(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
+ pac_decay_stats_t *decay_stats, ecache_t *ecache, bool fully_decay) {
+ malloc_mutex_assert_owner(tsdn, &decay->mtx);
+ pac_decay_to_limit(tsdn, pac, decay, decay_stats, ecache, fully_decay,
+ /* npages_limit */ 0, ecache_npages_get(ecache));
+}
+
+static void
+pac_decay_try_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
+ pac_decay_stats_t *decay_stats, ecache_t *ecache,
+ size_t current_npages, size_t npages_limit) {
+ if (current_npages > npages_limit) {
+ pac_decay_to_limit(tsdn, pac, decay, decay_stats, ecache,
+ /* fully_decay */ false, npages_limit,
+ current_npages - npages_limit);
+ }
+}
+
+bool
+pac_maybe_decay_purge(tsdn_t *tsdn, pac_t *pac, decay_t *decay,
+ pac_decay_stats_t *decay_stats, ecache_t *ecache,
+ pac_purge_eagerness_t eagerness) {
+ malloc_mutex_assert_owner(tsdn, &decay->mtx);
+
+ /* Purge all or nothing if the option is disabled. */
+ ssize_t decay_ms = decay_ms_read(decay);
+ if (decay_ms <= 0) {
+ if (decay_ms == 0) {
+ pac_decay_to_limit(tsdn, pac, decay, decay_stats,
+ ecache, /* fully_decay */ false,
+ /* npages_limit */ 0, ecache_npages_get(ecache));
+ }
+ return false;
+ }
+
+ /*
+ * If the deadline has been reached, advance to the current epoch and
+ * purge to the new limit if necessary. Note that dirty pages created
+ * during the current epoch are not subject to purge until a future
+ * epoch, so as a result purging only happens during epoch advances, or
+ * being triggered by background threads (scheduled event).
+ */
+ nstime_t time;
+ nstime_init_update(&time);
+ size_t npages_current = ecache_npages_get(ecache);
+ bool epoch_advanced = decay_maybe_advance_epoch(decay, &time,
+ npages_current);
+ if (eagerness == PAC_PURGE_ALWAYS
+ || (epoch_advanced && eagerness == PAC_PURGE_ON_EPOCH_ADVANCE)) {
+ size_t npages_limit = decay_npages_limit_get(decay);
+ pac_decay_try_purge(tsdn, pac, decay, decay_stats, ecache,
+ npages_current, npages_limit);
+ }
+
+ return epoch_advanced;
+}
+
+bool
+pac_decay_ms_set(tsdn_t *tsdn, pac_t *pac, extent_state_t state,
+ ssize_t decay_ms, pac_purge_eagerness_t eagerness) {
+ decay_t *decay;
+ pac_decay_stats_t *decay_stats;
+ ecache_t *ecache;
+ pac_decay_data_get(pac, state, &decay, &decay_stats, &ecache);
+
+ if (!decay_ms_valid(decay_ms)) {
+ return true;
+ }
+
+ malloc_mutex_lock(tsdn, &decay->mtx);
+ /*
+ * Restart decay backlog from scratch, which may cause many dirty pages
+ * to be immediately purged. It would conceptually be possible to map
+ * the old backlog onto the new backlog, but there is no justification
+ * for such complexity since decay_ms changes are intended to be
+ * infrequent, either between the {-1, 0, >0} states, or a one-time
+ * arbitrary change during initial arena configuration.
+ */
+ nstime_t cur_time;
+ nstime_init_update(&cur_time);
+ decay_reinit(decay, &cur_time, decay_ms);
+ pac_maybe_decay_purge(tsdn, pac, decay, decay_stats, ecache, eagerness);
+ malloc_mutex_unlock(tsdn, &decay->mtx);
+
+ return false;
+}
+
+ssize_t
+pac_decay_ms_get(pac_t *pac, extent_state_t state) {
+ decay_t *decay;
+ pac_decay_stats_t *decay_stats;
+ ecache_t *ecache;
+ pac_decay_data_get(pac, state, &decay, &decay_stats, &ecache);
+ return decay_ms_read(decay);
+}
+
+void
+pac_reset(tsdn_t *tsdn, pac_t *pac) {
+ /*
+ * No-op for now; purging is still done at the arena-level. It should
+ * get moved in here, though.
+ */
+ (void)tsdn;
+ (void)pac;
+}
+
+void
+pac_destroy(tsdn_t *tsdn, pac_t *pac) {
+ assert(ecache_npages_get(&pac->ecache_dirty) == 0);
+ assert(ecache_npages_get(&pac->ecache_muzzy) == 0);
+ /*
+ * Iterate over the retained extents and destroy them. This gives the
+ * extent allocator underlying the extent hooks an opportunity to unmap
+ * all retained memory without having to keep its own metadata
+ * structures. In practice, virtual memory for dss-allocated extents is
+ * leaked here, so best practice is to avoid dss for arenas to be
+ * destroyed, or provide custom extent hooks that track retained
+ * dss-based extents for later reuse.
+ */
+ ehooks_t *ehooks = pac_ehooks_get(pac);
+ edata_t *edata;
+ while ((edata = ecache_evict(tsdn, pac, ehooks,
+ &pac->ecache_retained, 0)) != NULL) {
+ extent_destroy_wrapper(tsdn, pac, ehooks, edata);
+ }
+}
diff --git a/contrib/jemalloc/src/pages.c b/contrib/jemalloc/src/pages.c
index 1050c3925153..4b46687d70fc 100644
--- a/contrib/jemalloc/src/pages.c
+++ b/contrib/jemalloc/src/pages.c
@@ -1,4 +1,3 @@
-#define JEMALLOC_PAGES_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/pages.h"
@@ -16,6 +15,14 @@
#include <vm/vm.h>
#endif
#endif
+#ifdef __NetBSD__
+#include <sys/bitops.h> /* ilog2 */
+#endif
+#ifdef JEMALLOC_HAVE_VM_MAKE_TAG
+#define PAGES_FD_TAG VM_MAKE_TAG(101U)
+#else
+#define PAGES_FD_TAG -1
+#endif
/******************************************************************************/
/* Data. */
@@ -42,6 +49,57 @@ thp_mode_t init_system_thp_mode;
/* Runtime support for lazy purge. Irrelevant when !pages_can_purge_lazy. */
static bool pages_can_purge_lazy_runtime = true;
+#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
+static int madvise_dont_need_zeros_is_faulty = -1;
+/**
+ * Check that MADV_DONTNEED will actually zero pages on subsequent access.
+ *
+ * Since qemu does not support this, yet [1], and you can get very tricky
+ * assert if you will run program with jemalloc in use under qemu:
+ *
+ * <jemalloc>: ../contrib/jemalloc/src/extent.c:1195: Failed assertion: "p[i] == 0"
+ *
+ * [1]: https://patchwork.kernel.org/patch/10576637/
+ */
+static int madvise_MADV_DONTNEED_zeroes_pages()
+{
+ int works = -1;
+ size_t size = PAGE;
+
+ void * addr = mmap(NULL, size, PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
+
+ if (addr == MAP_FAILED) {
+ malloc_write("<jemalloc>: Cannot allocate memory for "
+ "MADV_DONTNEED check\n");
+ if (opt_abort) {
+ abort();
+ }
+ }
+
+ memset(addr, 'A', size);
+ if (madvise(addr, size, MADV_DONTNEED) == 0) {
+ works = memchr(addr, 'A', size) == NULL;
+ } else {
+ /*
+ * If madvise() does not support MADV_DONTNEED, then we can
+ * call it anyway, and use it's return code.
+ */
+ works = 1;
+ }
+
+ if (munmap(addr, size) != 0) {
+ malloc_write("<jemalloc>: Cannot deallocate memory for "
+ "MADV_DONTNEED check\n");
+ if (opt_abort) {
+ abort();
+ }
+ }
+
+ return works;
+}
+#endif
+
/******************************************************************************/
/*
* Function prototypes for static functions that are referenced prior to
@@ -76,9 +134,21 @@ os_pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
* of existing mappings, and we only want to create new mappings.
*/
{
+#ifdef __NetBSD__
+ /*
+ * On NetBSD PAGE for a platform is defined to the
+ * maximum page size of all machine architectures
+ * for that platform, so that we can use the same
+ * binaries across all machine architectures.
+ */
+ if (alignment > os_page || PAGE > os_page) {
+ unsigned int a = ilog2(MAX(alignment, PAGE));
+ mmap_flags |= MAP_ALIGNED(a);
+ }
+#endif
int prot = *commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
- ret = mmap(addr, size, prot, mmap_flags, -1, 0);
+ ret = mmap(addr, size, prot, mmap_flags, PAGES_FD_TAG, 0);
}
assert(ret != NULL);
@@ -199,8 +269,8 @@ pages_map(void *addr, size_t size, size_t alignment, bool *commit) {
flags |= MAP_FIXED | MAP_EXCL;
} else {
unsigned alignment_bits = ffs_zu(alignment);
- assert(alignment_bits > 1);
- flags |= MAP_ALIGNED(alignment_bits - 1);
+ assert(alignment_bits > 0);
+ flags |= MAP_ALIGNED(alignment_bits);
}
void *ret = mmap(addr, size, prot, flags, -1, 0);
@@ -248,14 +318,10 @@ pages_unmap(void *addr, size_t size) {
}
static bool
-pages_commit_impl(void *addr, size_t size, bool commit) {
+os_pages_commit(void *addr, size_t size, bool commit) {
assert(PAGE_ADDR2BASE(addr) == addr);
assert(PAGE_CEILING(size) == size);
- if (os_overcommits) {
- return true;
- }
-
#ifdef _WIN32
return (commit ? (addr != VirtualAlloc(addr, size, MEM_COMMIT,
PAGE_READWRITE)) : (!VirtualFree(addr, size, MEM_DECOMMIT)));
@@ -263,7 +329,7 @@ pages_commit_impl(void *addr, size_t size, bool commit) {
{
int prot = commit ? PAGES_PROT_COMMIT : PAGES_PROT_DECOMMIT;
void *result = mmap(addr, size, prot, mmap_flags | MAP_FIXED,
- -1, 0);
+ PAGES_FD_TAG, 0);
if (result == MAP_FAILED) {
return true;
}
@@ -280,6 +346,15 @@ pages_commit_impl(void *addr, size_t size, bool commit) {
#endif
}
+static bool
+pages_commit_impl(void *addr, size_t size, bool commit) {
+ if (os_overcommits) {
+ return true;
+ }
+
+ return os_pages_commit(addr, size, commit);
+}
+
bool
pages_commit(void *addr, size_t size) {
return pages_commit_impl(addr, size, true);
@@ -290,6 +365,66 @@ pages_decommit(void *addr, size_t size) {
return pages_commit_impl(addr, size, false);
}
+void
+pages_mark_guards(void *head, void *tail) {
+ assert(head != NULL || tail != NULL);
+ assert(head == NULL || tail == NULL ||
+ (uintptr_t)head < (uintptr_t)tail);
+#ifdef JEMALLOC_HAVE_MPROTECT
+ if (head != NULL) {
+ mprotect(head, PAGE, PROT_NONE);
+ }
+ if (tail != NULL) {
+ mprotect(tail, PAGE, PROT_NONE);
+ }
+#else
+ /* Decommit sets to PROT_NONE / MEM_DECOMMIT. */
+ if (head != NULL) {
+ os_pages_commit(head, PAGE, false);
+ }
+ if (tail != NULL) {
+ os_pages_commit(tail, PAGE, false);
+ }
+#endif
+}
+
+void
+pages_unmark_guards(void *head, void *tail) {
+ assert(head != NULL || tail != NULL);
+ assert(head == NULL || tail == NULL ||
+ (uintptr_t)head < (uintptr_t)tail);
+#ifdef JEMALLOC_HAVE_MPROTECT
+ bool head_and_tail = (head != NULL) && (tail != NULL);
+ size_t range = head_and_tail ?
+ (uintptr_t)tail - (uintptr_t)head + PAGE :
+ SIZE_T_MAX;
+ /*
+ * The amount of work that the kernel does in mprotect depends on the
+ * range argument. SC_LARGE_MINCLASS is an arbitrary threshold chosen
+ * to prevent kernel from doing too much work that would outweigh the
+ * savings of performing one less system call.
+ */
+ bool ranged_mprotect = head_and_tail && range <= SC_LARGE_MINCLASS;
+ if (ranged_mprotect) {
+ mprotect(head, range, PROT_READ | PROT_WRITE);
+ } else {
+ if (head != NULL) {
+ mprotect(head, PAGE, PROT_READ | PROT_WRITE);
+ }
+ if (tail != NULL) {
+ mprotect(tail, PAGE, PROT_READ | PROT_WRITE);
+ }
+ }
+#else
+ if (head != NULL) {
+ os_pages_commit(head, PAGE, true);
+ }
+ if (tail != NULL) {
+ os_pages_commit(tail, PAGE, true);
+ }
+#endif
+}
+
bool
pages_purge_lazy(void *addr, size_t size) {
assert(ALIGNMENT_ADDR2BASE(addr, os_page) == addr);
@@ -320,6 +455,9 @@ pages_purge_lazy(void *addr, size_t size) {
#elif defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
!defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)
return (madvise(addr, size, MADV_DONTNEED) != 0);
+#elif defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED) && \
+ !defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS)
+ return (posix_madvise(addr, size, POSIX_MADV_DONTNEED) != 0);
#else
not_reached();
#endif
@@ -336,7 +474,12 @@ pages_purge_forced(void *addr, size_t size) {
#if defined(JEMALLOC_PURGE_MADVISE_DONTNEED) && \
defined(JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS)
- return (madvise(addr, size, MADV_DONTNEED) != 0);
+ return (unlikely(madvise_dont_need_zeros_is_faulty) ||
+ madvise(addr, size, MADV_DONTNEED) != 0);
+#elif defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED) && \
+ defined(JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS)
+ return (unlikely(madvise_dont_need_zeros_is_faulty) ||
+ posix_madvise(addr, size, POSIX_MADV_DONTNEED) != 0);
#elif defined(JEMALLOC_MAPS_COALESCE)
/* Try to overlay a new demand-zeroed mapping. */
return pages_commit(addr, size);
@@ -351,8 +494,13 @@ pages_huge_impl(void *addr, size_t size, bool aligned) {
assert(HUGEPAGE_ADDR2BASE(addr) == addr);
assert(HUGEPAGE_CEILING(size) == size);
}
-#ifdef JEMALLOC_HAVE_MADVISE_HUGE
+#if defined(JEMALLOC_HAVE_MADVISE_HUGE)
return (madvise(addr, size, MADV_HUGEPAGE) != 0);
+#elif defined(JEMALLOC_HAVE_MEMCNTL)
+ struct memcntl_mha m = {0};
+ m.mha_cmd = MHA_MAPSIZE_VA;
+ m.mha_pagesize = HUGEPAGE;
+ return (memcntl(addr, size, MC_HAT_ADVISE, (caddr_t)&m, 0, 0) == 0);
#else
return true;
#endif
@@ -396,8 +544,10 @@ bool
pages_dontdump(void *addr, size_t size) {
assert(PAGE_ADDR2BASE(addr) == addr);
assert(PAGE_CEILING(size) == size);
-#ifdef JEMALLOC_MADVISE_DONTDUMP
+#if defined(JEMALLOC_MADVISE_DONTDUMP)
return madvise(addr, size, MADV_DONTDUMP) != 0;
+#elif defined(JEMALLOC_MADVISE_NOCORE)
+ return madvise(addr, size, MADV_NOCORE) != 0;
#else
return false;
#endif
@@ -407,8 +557,10 @@ bool
pages_dodump(void *addr, size_t size) {
assert(PAGE_ADDR2BASE(addr) == addr);
assert(PAGE_CEILING(size) == size);
-#ifdef JEMALLOC_MADVISE_DONTDUMP
+#if defined(JEMALLOC_MADVISE_DONTDUMP)
return madvise(addr, size, MADV_DODUMP) != 0;
+#elif defined(JEMALLOC_MADVISE_NOCORE)
+ return madvise(addr, size, MADV_CORE) != 0;
#else
return false;
#endif
@@ -554,14 +706,14 @@ pages_set_thp_state (void *ptr, size_t size) {
static void
init_thp_state(void) {
- if (!have_madvise_huge) {
+ if (!have_madvise_huge && !have_memcntl) {
if (metadata_thp_enabled() && opt_abort) {
malloc_write("<jemalloc>: no MADV_HUGEPAGE support\n");
abort();
}
goto label_error;
}
-
+#if defined(JEMALLOC_HAVE_MADVISE_HUGE)
static const char sys_state_madvise[] = "always [madvise] never\n";
static const char sys_state_always[] = "[always] madvise never\n";
static const char sys_state_never[] = "always madvise [never]\n";
@@ -570,6 +722,9 @@ init_thp_state(void) {
#if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
int fd = (int)syscall(SYS_open,
"/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
+#elif defined(JEMALLOC_USE_SYSCALL) && defined(SYS_openat)
+ int fd = (int)syscall(SYS_openat,
+ AT_FDCWD, "/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
#else
int fd = open("/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
#endif
@@ -585,7 +740,7 @@ init_thp_state(void) {
#endif
if (nread < 0) {
- goto label_error;
+ goto label_error;
}
if (strncmp(buf, sys_state_madvise, (size_t)nread) == 0) {
@@ -598,6 +753,10 @@ init_thp_state(void) {
goto label_error;
}
return;
+#elif defined(JEMALLOC_HAVE_MEMCNTL)
+ init_system_thp_mode = thp_mode_default;
+ return;
+#endif
label_error:
opt_thp = init_system_thp_mode = thp_mode_not_supported;
}
@@ -613,6 +772,20 @@ pages_boot(void) {
return true;
}
+#ifdef JEMALLOC_PURGE_MADVISE_DONTNEED_ZEROS
+ if (!opt_trust_madvise) {
+ madvise_dont_need_zeros_is_faulty = !madvise_MADV_DONTNEED_zeroes_pages();
+ if (madvise_dont_need_zeros_is_faulty) {
+ malloc_write("<jemalloc>: MADV_DONTNEED does not work (memset will be used instead)\n");
+ malloc_write("<jemalloc>: (This is the expected behaviour if you are running under QEMU)\n");
+ }
+ } else {
+ /* In case opt_trust_madvise is disable,
+ * do not do runtime check */
+ madvise_dont_need_zeros_is_faulty = 0;
+ }
+#endif
+
#ifndef _WIN32
mmap_flags = MAP_PRIVATE | MAP_ANON;
#endif
@@ -626,6 +799,8 @@ pages_boot(void) {
mmap_flags |= MAP_NORESERVE;
}
# endif
+#elif defined(__NetBSD__)
+ os_overcommits = true;
#else
os_overcommits = false;
#endif
diff --git a/contrib/jemalloc/src/pai.c b/contrib/jemalloc/src/pai.c
new file mode 100644
index 000000000000..45c87729278d
--- /dev/null
+++ b/contrib/jemalloc/src/pai.c
@@ -0,0 +1,31 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+size_t
+pai_alloc_batch_default(tsdn_t *tsdn, pai_t *self, size_t size, size_t nallocs,
+ edata_list_active_t *results, bool *deferred_work_generated) {
+ for (size_t i = 0; i < nallocs; i++) {
+ bool deferred_by_alloc = false;
+ edata_t *edata = pai_alloc(tsdn, self, size, PAGE,
+ /* zero */ false, /* guarded */ false,
+ /* frequent_reuse */ false, &deferred_by_alloc);
+ *deferred_work_generated |= deferred_by_alloc;
+ if (edata == NULL) {
+ return i;
+ }
+ edata_list_active_append(results, edata);
+ }
+ return nallocs;
+}
+
+void
+pai_dalloc_batch_default(tsdn_t *tsdn, pai_t *self,
+ edata_list_active_t *list, bool *deferred_work_generated) {
+ edata_t *edata;
+ while ((edata = edata_list_active_first(list)) != NULL) {
+ bool deferred_by_dalloc = false;
+ edata_list_active_remove(list, edata);
+ pai_dalloc(tsdn, self, edata, &deferred_by_dalloc);
+ *deferred_work_generated |= deferred_by_dalloc;
+ }
+}
diff --git a/contrib/jemalloc/src/peak_event.c b/contrib/jemalloc/src/peak_event.c
new file mode 100644
index 000000000000..4093fbcc691e
--- /dev/null
+++ b/contrib/jemalloc/src/peak_event.c
@@ -0,0 +1,82 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/peak_event.h"
+
+#include "jemalloc/internal/activity_callback.h"
+#include "jemalloc/internal/peak.h"
+
+/*
+ * Update every 64K by default. We're not exposing this as a configuration
+ * option for now; we don't want to bind ourselves too tightly to any particular
+ * performance requirements for small values, or guarantee that we'll even be
+ * able to provide fine-grained accuracy.
+ */
+#define PEAK_EVENT_WAIT (64 * 1024)
+
+/* Update the peak with current tsd state. */
+void
+peak_event_update(tsd_t *tsd) {
+ uint64_t alloc = tsd_thread_allocated_get(tsd);
+ uint64_t dalloc = tsd_thread_deallocated_get(tsd);
+ peak_t *peak = tsd_peakp_get(tsd);
+ peak_update(peak, alloc, dalloc);
+}
+
+static void
+peak_event_activity_callback(tsd_t *tsd) {
+ activity_callback_thunk_t *thunk = tsd_activity_callback_thunkp_get(
+ tsd);
+ uint64_t alloc = tsd_thread_allocated_get(tsd);
+ uint64_t dalloc = tsd_thread_deallocated_get(tsd);
+ if (thunk->callback != NULL) {
+ thunk->callback(thunk->uctx, alloc, dalloc);
+ }
+}
+
+/* Set current state to zero. */
+void
+peak_event_zero(tsd_t *tsd) {
+ uint64_t alloc = tsd_thread_allocated_get(tsd);
+ uint64_t dalloc = tsd_thread_deallocated_get(tsd);
+ peak_t *peak = tsd_peakp_get(tsd);
+ peak_set_zero(peak, alloc, dalloc);
+}
+
+uint64_t
+peak_event_max(tsd_t *tsd) {
+ peak_t *peak = tsd_peakp_get(tsd);
+ return peak_max(peak);
+}
+
+uint64_t
+peak_alloc_new_event_wait(tsd_t *tsd) {
+ return PEAK_EVENT_WAIT;
+}
+
+uint64_t
+peak_alloc_postponed_event_wait(tsd_t *tsd) {
+ return TE_MIN_START_WAIT;
+}
+
+void
+peak_alloc_event_handler(tsd_t *tsd, uint64_t elapsed) {
+ peak_event_update(tsd);
+ peak_event_activity_callback(tsd);
+}
+
+uint64_t
+peak_dalloc_new_event_wait(tsd_t *tsd) {
+ return PEAK_EVENT_WAIT;
+}
+
+uint64_t
+peak_dalloc_postponed_event_wait(tsd_t *tsd) {
+ return TE_MIN_START_WAIT;
+}
+
+void
+peak_dalloc_event_handler(tsd_t *tsd, uint64_t elapsed) {
+ peak_event_update(tsd);
+ peak_event_activity_callback(tsd);
+}
diff --git a/contrib/jemalloc/src/prng.c b/contrib/jemalloc/src/prng.c
deleted file mode 100644
index 83c04bf9b5dd..000000000000
--- a/contrib/jemalloc/src/prng.c
+++ /dev/null
@@ -1,3 +0,0 @@
-#define JEMALLOC_PRNG_C_
-#include "jemalloc/internal/jemalloc_preamble.h"
-#include "jemalloc/internal/jemalloc_internal_includes.h"
diff --git a/contrib/jemalloc/src/prof.c b/contrib/jemalloc/src/prof.c
index 13334cb4c0ba..7a6d5d569a48 100644
--- a/contrib/jemalloc/src/prof.c
+++ b/contrib/jemalloc/src/prof.c
@@ -1,1126 +1,199 @@
-#define JEMALLOC_PROF_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
+#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/assert.h"
-#include "jemalloc/internal/ckh.h"
-#include "jemalloc/internal/hash.h"
-#include "jemalloc/internal/malloc_io.h"
#include "jemalloc/internal/mutex.h"
-#include "jemalloc/internal/emitter.h"
+#include "jemalloc/internal/counter.h"
+#include "jemalloc/internal/prof_data.h"
+#include "jemalloc/internal/prof_log.h"
+#include "jemalloc/internal/prof_recent.h"
+#include "jemalloc/internal/prof_stats.h"
+#include "jemalloc/internal/prof_sys.h"
+#include "jemalloc/internal/prof_hook.h"
+#include "jemalloc/internal/thread_event.h"
-/******************************************************************************/
-
-#ifdef JEMALLOC_PROF_LIBUNWIND
-#define UNW_LOCAL_ONLY
-#include <libunwind.h>
-#endif
-
-#ifdef JEMALLOC_PROF_LIBGCC
/*
- * We have a circular dependency -- jemalloc_internal.h tells us if we should
- * use libgcc's unwinding functionality, but after we've included that, we've
- * already hooked _Unwind_Backtrace. We'll temporarily disable hooking.
+ * This file implements the profiling "APIs" needed by other parts of jemalloc,
+ * and also manages the relevant "operational" data, mainly options and mutexes;
+ * the core profiling data structures are encapsulated in prof_data.c.
*/
-#undef _Unwind_Backtrace
-#include <unwind.h>
-#define _Unwind_Backtrace JEMALLOC_HOOK(_Unwind_Backtrace, test_hooks_libc_hook)
-#endif
/******************************************************************************/
+
/* Data. */
-bool opt_prof = false;
-bool opt_prof_active = true;
-bool opt_prof_thread_active_init = true;
-size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
-ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
-bool opt_prof_gdump = false;
-bool opt_prof_final = false;
-bool opt_prof_leak = false;
-bool opt_prof_accum = false;
-bool opt_prof_log = false;
-char opt_prof_prefix[
- /* Minimize memory bloat for non-prof builds. */
-#ifdef JEMALLOC_PROF
- PATH_MAX +
-#endif
- 1];
+bool opt_prof = false;
+bool opt_prof_active = true;
+bool opt_prof_thread_active_init = true;
+size_t opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
+ssize_t opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
+bool opt_prof_gdump = false;
+bool opt_prof_final = false;
+bool opt_prof_leak = false;
+bool opt_prof_leak_error = false;
+bool opt_prof_accum = false;
+char opt_prof_prefix[PROF_DUMP_FILENAME_LEN];
+bool opt_prof_sys_thread_name = false;
+bool opt_prof_unbias = true;
+
+/* Accessed via prof_sample_event_handler(). */
+static counter_accum_t prof_idump_accumulated;
/*
* Initialized as opt_prof_active, and accessed via
* prof_active_[gs]et{_unlocked,}().
*/
-bool prof_active;
-static malloc_mutex_t prof_active_mtx;
+bool prof_active_state;
+static malloc_mutex_t prof_active_mtx;
/*
* Initialized as opt_prof_thread_active_init, and accessed via
* prof_thread_active_init_[gs]et().
*/
-static bool prof_thread_active_init;
-static malloc_mutex_t prof_thread_active_init_mtx;
+static bool prof_thread_active_init;
+static malloc_mutex_t prof_thread_active_init_mtx;
/*
* Initialized as opt_prof_gdump, and accessed via
* prof_gdump_[gs]et{_unlocked,}().
*/
-bool prof_gdump_val;
-static malloc_mutex_t prof_gdump_mtx;
-
-uint64_t prof_interval = 0;
-
-size_t lg_prof_sample;
-
-typedef enum prof_logging_state_e prof_logging_state_t;
-enum prof_logging_state_e {
- prof_logging_state_stopped,
- prof_logging_state_started,
- prof_logging_state_dumping
-};
-
-/*
- * - stopped: log_start never called, or previous log_stop has completed.
- * - started: log_start called, log_stop not called yet. Allocations are logged.
- * - dumping: log_stop called but not finished; samples are not logged anymore.
- */
-prof_logging_state_t prof_logging_state = prof_logging_state_stopped;
-
-#ifdef JEMALLOC_JET
-static bool prof_log_dummy = false;
-#endif
-
-/* Incremented for every log file that is output. */
-static uint64_t log_seq = 0;
-static char log_filename[
- /* Minimize memory bloat for non-prof builds. */
-#ifdef JEMALLOC_PROF
- PATH_MAX +
-#endif
- 1];
-
-/* Timestamp for most recent call to log_start(). */
-static nstime_t log_start_timestamp = NSTIME_ZERO_INITIALIZER;
-
-/* Increment these when adding to the log_bt and log_thr linked lists. */
-static size_t log_bt_index = 0;
-static size_t log_thr_index = 0;
-
-/* Linked list node definitions. These are only used in prof.c. */
-typedef struct prof_bt_node_s prof_bt_node_t;
-
-struct prof_bt_node_s {
- prof_bt_node_t *next;
- size_t index;
- prof_bt_t bt;
- /* Variable size backtrace vector pointed to by bt. */
- void *vec[1];
-};
-
-typedef struct prof_thr_node_s prof_thr_node_t;
-
-struct prof_thr_node_s {
- prof_thr_node_t *next;
- size_t index;
- uint64_t thr_uid;
- /* Variable size based on thr_name_sz. */
- char name[1];
-};
-
-typedef struct prof_alloc_node_s prof_alloc_node_t;
-
-/* This is output when logging sampled allocations. */
-struct prof_alloc_node_s {
- prof_alloc_node_t *next;
- /* Indices into an array of thread data. */
- size_t alloc_thr_ind;
- size_t free_thr_ind;
+bool prof_gdump_val;
+static malloc_mutex_t prof_gdump_mtx;
- /* Indices into an array of backtraces. */
- size_t alloc_bt_ind;
- size_t free_bt_ind;
+uint64_t prof_interval = 0;
- uint64_t alloc_time_ns;
- uint64_t free_time_ns;
+size_t lg_prof_sample;
- size_t usize;
-};
-
-/*
- * Created on the first call to prof_log_start and deleted on prof_log_stop.
- * These are the backtraces and threads that have already been logged by an
- * allocation.
- */
-static bool log_tables_initialized = false;
-static ckh_t log_bt_node_set;
-static ckh_t log_thr_node_set;
-
-/* Store linked lists for logged data. */
-static prof_bt_node_t *log_bt_first = NULL;
-static prof_bt_node_t *log_bt_last = NULL;
-static prof_thr_node_t *log_thr_first = NULL;
-static prof_thr_node_t *log_thr_last = NULL;
-static prof_alloc_node_t *log_alloc_first = NULL;
-static prof_alloc_node_t *log_alloc_last = NULL;
-
-/* Protects the prof_logging_state and any log_{...} variable. */
-static malloc_mutex_t log_mtx;
-
-/*
- * Table of mutexes that are shared among gctx's. These are leaf locks, so
- * there is no problem with using them for more than one gctx at the same time.
- * The primary motivation for this sharing though is that gctx's are ephemeral,
- * and destroying mutexes causes complications for systems that allocate when
- * creating/destroying mutexes.
- */
-static malloc_mutex_t *gctx_locks;
-static atomic_u_t cum_gctxs; /* Atomic counter. */
-
-/*
- * Table of mutexes that are shared among tdata's. No operations require
- * holding multiple tdata locks, so there is no problem with using them for more
- * than one tdata at the same time, even though a gctx lock may be acquired
- * while holding a tdata lock.
- */
-static malloc_mutex_t *tdata_locks;
-
-/*
- * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data
- * structure that knows about all backtraces currently captured.
- */
-static ckh_t bt2gctx;
-/* Non static to enable profiling. */
-malloc_mutex_t bt2gctx_mtx;
-
-/*
- * Tree of all extant prof_tdata_t structures, regardless of state,
- * {attached,detached,expired}.
- */
-static prof_tdata_tree_t tdatas;
-static malloc_mutex_t tdatas_mtx;
-
-static uint64_t next_thr_uid;
-static malloc_mutex_t next_thr_uid_mtx;
-
-static malloc_mutex_t prof_dump_seq_mtx;
-static uint64_t prof_dump_seq;
-static uint64_t prof_dump_iseq;
-static uint64_t prof_dump_mseq;
-static uint64_t prof_dump_useq;
-
-/*
- * This buffer is rather large for stack allocation, so use a single buffer for
- * all profile dumps.
- */
-static malloc_mutex_t prof_dump_mtx;
-static char prof_dump_buf[
- /* Minimize memory bloat for non-prof builds. */
-#ifdef JEMALLOC_PROF
- PROF_DUMP_BUFSIZE
-#else
- 1
-#endif
-];
-static size_t prof_dump_buf_end;
-static int prof_dump_fd;
+static uint64_t next_thr_uid;
+static malloc_mutex_t next_thr_uid_mtx;
/* Do not dump any profiles until bootstrapping is complete. */
-static bool prof_booted = false;
+bool prof_booted = false;
-/******************************************************************************/
-/*
- * Function prototypes for static functions that are referenced prior to
- * definition.
- */
+/* Logically a prof_backtrace_hook_t. */
+atomic_p_t prof_backtrace_hook;
-static bool prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx);
-static void prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx);
-static bool prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
- bool even_if_attached);
-static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata,
- bool even_if_attached);
-static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name);
-
-/* Hashtable functions for log_bt_node_set and log_thr_node_set. */
-static void prof_thr_node_hash(const void *key, size_t r_hash[2]);
-static bool prof_thr_node_keycomp(const void *k1, const void *k2);
-static void prof_bt_node_hash(const void *key, size_t r_hash[2]);
-static bool prof_bt_node_keycomp(const void *k1, const void *k2);
-
-/******************************************************************************/
-/* Red-black trees. */
-
-static int
-prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) {
- uint64_t a_thr_uid = a->thr_uid;
- uint64_t b_thr_uid = b->thr_uid;
- int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
- if (ret == 0) {
- uint64_t a_thr_discrim = a->thr_discrim;
- uint64_t b_thr_discrim = b->thr_discrim;
- ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim <
- b_thr_discrim);
- if (ret == 0) {
- uint64_t a_tctx_uid = a->tctx_uid;
- uint64_t b_tctx_uid = b->tctx_uid;
- ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid <
- b_tctx_uid);
- }
- }
- return ret;
-}
-
-rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
- tctx_link, prof_tctx_comp)
-
-static int
-prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) {
- unsigned a_len = a->bt.len;
- unsigned b_len = b->bt.len;
- unsigned comp_len = (a_len < b_len) ? a_len : b_len;
- int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *));
- if (ret == 0) {
- ret = (a_len > b_len) - (a_len < b_len);
- }
- return ret;
-}
-
-rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
- prof_gctx_comp)
-
-static int
-prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) {
- int ret;
- uint64_t a_uid = a->thr_uid;
- uint64_t b_uid = b->thr_uid;
-
- ret = ((a_uid > b_uid) - (a_uid < b_uid));
- if (ret == 0) {
- uint64_t a_discrim = a->thr_discrim;
- uint64_t b_discrim = b->thr_discrim;
-
- ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim));
- }
- return ret;
-}
-
-rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
- prof_tdata_comp)
+/* Logically a prof_dump_hook_t. */
+atomic_p_t prof_dump_hook;
/******************************************************************************/
void
-prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) {
- prof_tdata_t *tdata;
-
+prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx) {
cassert(config_prof);
- if (updated) {
- /*
- * Compute a new sample threshold. This isn't very important in
- * practice, because this function is rarely executed, so the
- * potential for sample bias is minimal except in contrived
- * programs.
- */
- tdata = prof_tdata_get(tsd, true);
- if (tdata != NULL) {
- prof_sample_threshold_update(tdata);
- }
+ if (tsd_reentrancy_level_get(tsd) > 0) {
+ assert((uintptr_t)tctx == (uintptr_t)1U);
+ return;
}
if ((uintptr_t)tctx > (uintptr_t)1U) {
malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
tctx->prepared = false;
- if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) {
- prof_tctx_destroy(tsd, tctx);
- } else {
- malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
- }
+ prof_tctx_try_destroy(tsd, tctx);
}
}
void
-prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
- prof_tctx_t *tctx) {
- prof_tctx_set(tsdn, ptr, usize, NULL, tctx);
+prof_malloc_sample_object(tsd_t *tsd, const void *ptr, size_t size,
+ size_t usize, prof_tctx_t *tctx) {
+ cassert(config_prof);
+
+ if (opt_prof_sys_thread_name) {
+ prof_sys_thread_name_fetch(tsd);
+ }
+
+ edata_t *edata = emap_edata_lookup(tsd_tsdn(tsd), &arena_emap_global,
+ ptr);
+ prof_info_set(tsd, edata, tctx, size);
- /* Get the current time and set this in the extent_t. We'll read this
- * when free() is called. */
- nstime_t t = NSTIME_ZERO_INITIALIZER;
- nstime_update(&t);
- prof_alloc_time_set(tsdn, ptr, NULL, t);
+ szind_t szind = sz_size2index(usize);
- malloc_mutex_lock(tsdn, tctx->tdata->lock);
+ malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
+ /*
+ * We need to do these map lookups while holding the lock, to avoid the
+ * possibility of races with prof_reset calls, which update the map and
+ * then acquire the lock. This actually still leaves a data race on the
+ * contents of the unbias map, but we have not yet gone through and
+ * atomic-ified the prof module, and compilers are not yet causing us
+ * issues. The key thing is to make sure that, if we read garbage data,
+ * the prof_reset call is about to mark our tctx as expired before any
+ * dumping of our corrupted output is attempted.
+ */
+ size_t shifted_unbiased_cnt = prof_shifted_unbiased_cnt[szind];
+ size_t unbiased_bytes = prof_unbiased_sz[szind];
tctx->cnts.curobjs++;
+ tctx->cnts.curobjs_shifted_unbiased += shifted_unbiased_cnt;
tctx->cnts.curbytes += usize;
+ tctx->cnts.curbytes_unbiased += unbiased_bytes;
if (opt_prof_accum) {
tctx->cnts.accumobjs++;
+ tctx->cnts.accumobjs_shifted_unbiased += shifted_unbiased_cnt;
tctx->cnts.accumbytes += usize;
+ tctx->cnts.accumbytes_unbiased += unbiased_bytes;
}
+ bool record_recent = prof_recent_alloc_prepare(tsd, tctx);
tctx->prepared = false;
- malloc_mutex_unlock(tsdn, tctx->tdata->lock);
-}
-
-static size_t
-prof_log_bt_index(tsd_t *tsd, prof_bt_t *bt) {
- assert(prof_logging_state == prof_logging_state_started);
- malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx);
-
- prof_bt_node_t dummy_node;
- dummy_node.bt = *bt;
- prof_bt_node_t *node;
-
- /* See if this backtrace is already cached in the table. */
- if (ckh_search(&log_bt_node_set, (void *)(&dummy_node),
- (void **)(&node), NULL)) {
- size_t sz = offsetof(prof_bt_node_t, vec) +
- (bt->len * sizeof(void *));
- prof_bt_node_t *new_node = (prof_bt_node_t *)
- iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL,
- true, arena_get(TSDN_NULL, 0, true), true);
- if (log_bt_first == NULL) {
- log_bt_first = new_node;
- log_bt_last = new_node;
- } else {
- log_bt_last->next = new_node;
- log_bt_last = new_node;
- }
-
- new_node->next = NULL;
- new_node->index = log_bt_index;
- /*
- * Copy the backtrace: bt is inside a tdata or gctx, which
- * might die before prof_log_stop is called.
- */
- new_node->bt.len = bt->len;
- memcpy(new_node->vec, bt->vec, bt->len * sizeof(void *));
- new_node->bt.vec = new_node->vec;
-
- log_bt_index++;
- ckh_insert(tsd, &log_bt_node_set, (void *)new_node, NULL);
- return new_node->index;
- } else {
- return node->index;
+ malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
+ if (record_recent) {
+ assert(tctx == edata_prof_tctx_get(edata));
+ prof_recent_alloc(tsd, edata, size, usize);
}
-}
-static size_t
-prof_log_thr_index(tsd_t *tsd, uint64_t thr_uid, const char *name) {
- assert(prof_logging_state == prof_logging_state_started);
- malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx);
-
- prof_thr_node_t dummy_node;
- dummy_node.thr_uid = thr_uid;
- prof_thr_node_t *node;
-
- /* See if this thread is already cached in the table. */
- if (ckh_search(&log_thr_node_set, (void *)(&dummy_node),
- (void **)(&node), NULL)) {
- size_t sz = offsetof(prof_thr_node_t, name) + strlen(name) + 1;
- prof_thr_node_t *new_node = (prof_thr_node_t *)
- iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL,
- true, arena_get(TSDN_NULL, 0, true), true);
- if (log_thr_first == NULL) {
- log_thr_first = new_node;
- log_thr_last = new_node;
- } else {
- log_thr_last->next = new_node;
- log_thr_last = new_node;
- }
-
- new_node->next = NULL;
- new_node->index = log_thr_index;
- new_node->thr_uid = thr_uid;
- strcpy(new_node->name, name);
- log_thr_index++;
- ckh_insert(tsd, &log_thr_node_set, (void *)new_node, NULL);
- return new_node->index;
- } else {
- return node->index;
+ if (opt_prof_stats) {
+ prof_stats_inc(tsd, szind, size);
}
}
-static void
-prof_try_log(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx) {
- malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
-
- prof_tdata_t *cons_tdata = prof_tdata_get(tsd, false);
- if (cons_tdata == NULL) {
- /*
- * We decide not to log these allocations. cons_tdata will be
- * NULL only when the current thread is in a weird state (e.g.
- * it's being destroyed).
- */
- return;
- }
-
- malloc_mutex_lock(tsd_tsdn(tsd), &log_mtx);
-
- if (prof_logging_state != prof_logging_state_started) {
- goto label_done;
- }
-
- if (!log_tables_initialized) {
- bool err1 = ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS,
- prof_bt_node_hash, prof_bt_node_keycomp);
- bool err2 = ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS,
- prof_thr_node_hash, prof_thr_node_keycomp);
- if (err1 || err2) {
- goto label_done;
- }
- log_tables_initialized = true;
- }
-
- nstime_t alloc_time = prof_alloc_time_get(tsd_tsdn(tsd), ptr,
- (alloc_ctx_t *)NULL);
- nstime_t free_time = NSTIME_ZERO_INITIALIZER;
- nstime_update(&free_time);
-
- size_t sz = sizeof(prof_alloc_node_t);
- prof_alloc_node_t *new_node = (prof_alloc_node_t *)
- iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, true,
- arena_get(TSDN_NULL, 0, true), true);
-
- const char *prod_thr_name = (tctx->tdata->thread_name == NULL)?
- "" : tctx->tdata->thread_name;
- const char *cons_thr_name = prof_thread_name_get(tsd);
-
- prof_bt_t bt;
- /* Initialize the backtrace, using the buffer in tdata to store it. */
- bt_init(&bt, cons_tdata->vec);
- prof_backtrace(&bt);
- prof_bt_t *cons_bt = &bt;
-
- /* We haven't destroyed tctx yet, so gctx should be good to read. */
- prof_bt_t *prod_bt = &tctx->gctx->bt;
-
- new_node->next = NULL;
- new_node->alloc_thr_ind = prof_log_thr_index(tsd, tctx->tdata->thr_uid,
- prod_thr_name);
- new_node->free_thr_ind = prof_log_thr_index(tsd, cons_tdata->thr_uid,
- cons_thr_name);
- new_node->alloc_bt_ind = prof_log_bt_index(tsd, prod_bt);
- new_node->free_bt_ind = prof_log_bt_index(tsd, cons_bt);
- new_node->alloc_time_ns = nstime_ns(&alloc_time);
- new_node->free_time_ns = nstime_ns(&free_time);
- new_node->usize = usize;
-
- if (log_alloc_first == NULL) {
- log_alloc_first = new_node;
- log_alloc_last = new_node;
- } else {
- log_alloc_last->next = new_node;
- log_alloc_last = new_node;
- }
+void
+prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_info_t *prof_info) {
+ cassert(config_prof);
-label_done:
- malloc_mutex_unlock(tsd_tsdn(tsd), &log_mtx);
-}
+ assert(prof_info != NULL);
+ prof_tctx_t *tctx = prof_info->alloc_tctx;
+ assert((uintptr_t)tctx > (uintptr_t)1U);
-void
-prof_free_sampled_object(tsd_t *tsd, const void *ptr, size_t usize,
- prof_tctx_t *tctx) {
+ szind_t szind = sz_size2index(usize);
malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
assert(tctx->cnts.curobjs > 0);
assert(tctx->cnts.curbytes >= usize);
+ /*
+ * It's not correct to do equivalent asserts for unbiased bytes, because
+ * of the potential for races with prof.reset calls. The map contents
+ * should really be atomic, but we have not atomic-ified the prof module
+ * yet.
+ */
tctx->cnts.curobjs--;
+ tctx->cnts.curobjs_shifted_unbiased -= prof_shifted_unbiased_cnt[szind];
tctx->cnts.curbytes -= usize;
+ tctx->cnts.curbytes_unbiased -= prof_unbiased_sz[szind];
- prof_try_log(tsd, ptr, usize, tctx);
-
- if (prof_tctx_should_destroy(tsd_tsdn(tsd), tctx)) {
- prof_tctx_destroy(tsd, tctx);
- } else {
- malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
- }
-}
-
-void
-bt_init(prof_bt_t *bt, void **vec) {
- cassert(config_prof);
-
- bt->vec = vec;
- bt->len = 0;
-}
-
-static void
-prof_enter(tsd_t *tsd, prof_tdata_t *tdata) {
- cassert(config_prof);
- assert(tdata == prof_tdata_get(tsd, false));
-
- if (tdata != NULL) {
- assert(!tdata->enq);
- tdata->enq = true;
- }
-
- malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
-}
-
-static void
-prof_leave(tsd_t *tsd, prof_tdata_t *tdata) {
- cassert(config_prof);
- assert(tdata == prof_tdata_get(tsd, false));
-
- malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
-
- if (tdata != NULL) {
- bool idump, gdump;
-
- assert(tdata->enq);
- tdata->enq = false;
- idump = tdata->enq_idump;
- tdata->enq_idump = false;
- gdump = tdata->enq_gdump;
- tdata->enq_gdump = false;
-
- if (idump) {
- prof_idump(tsd_tsdn(tsd));
- }
- if (gdump) {
- prof_gdump(tsd_tsdn(tsd));
- }
- }
-}
-
-#ifdef JEMALLOC_PROF_LIBUNWIND
-void
-prof_backtrace(prof_bt_t *bt) {
- int nframes;
-
- cassert(config_prof);
- assert(bt->len == 0);
- assert(bt->vec != NULL);
-
- nframes = unw_backtrace(bt->vec, PROF_BT_MAX);
- if (nframes <= 0) {
- return;
- }
- bt->len = nframes;
-}
-#elif (defined(JEMALLOC_PROF_LIBGCC))
-static _Unwind_Reason_Code
-prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) {
- cassert(config_prof);
-
- return _URC_NO_REASON;
-}
-
-static _Unwind_Reason_Code
-prof_unwind_callback(struct _Unwind_Context *context, void *arg) {
- prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
- void *ip;
-
- cassert(config_prof);
-
- ip = (void *)_Unwind_GetIP(context);
- if (ip == NULL) {
- return _URC_END_OF_STACK;
- }
- data->bt->vec[data->bt->len] = ip;
- data->bt->len++;
- if (data->bt->len == data->max) {
- return _URC_END_OF_STACK;
- }
-
- return _URC_NO_REASON;
-}
+ prof_try_log(tsd, usize, prof_info);
-void
-prof_backtrace(prof_bt_t *bt) {
- prof_unwind_data_t data = {bt, PROF_BT_MAX};
-
- cassert(config_prof);
+ prof_tctx_try_destroy(tsd, tctx);
- _Unwind_Backtrace(prof_unwind_callback, &data);
-}
-#elif (defined(JEMALLOC_PROF_GCC))
-void
-prof_backtrace(prof_bt_t *bt) {
-#define BT_FRAME(i) \
- if ((i) < PROF_BT_MAX) { \
- void *p; \
- if (__builtin_frame_address(i) == 0) { \
- return; \
- } \
- p = __builtin_return_address(i); \
- if (p == NULL) { \
- return; \
- } \
- bt->vec[(i)] = p; \
- bt->len = (i) + 1; \
- } else { \
- return; \
+ if (opt_prof_stats) {
+ prof_stats_dec(tsd, szind, prof_info->alloc_size);
}
-
- cassert(config_prof);
-
- BT_FRAME(0)
- BT_FRAME(1)
- BT_FRAME(2)
- BT_FRAME(3)
- BT_FRAME(4)
- BT_FRAME(5)
- BT_FRAME(6)
- BT_FRAME(7)
- BT_FRAME(8)
- BT_FRAME(9)
-
- BT_FRAME(10)
- BT_FRAME(11)
- BT_FRAME(12)
- BT_FRAME(13)
- BT_FRAME(14)
- BT_FRAME(15)
- BT_FRAME(16)
- BT_FRAME(17)
- BT_FRAME(18)
- BT_FRAME(19)
-
- BT_FRAME(20)
- BT_FRAME(21)
- BT_FRAME(22)
- BT_FRAME(23)
- BT_FRAME(24)
- BT_FRAME(25)
- BT_FRAME(26)
- BT_FRAME(27)
- BT_FRAME(28)
- BT_FRAME(29)
-
- BT_FRAME(30)
- BT_FRAME(31)
- BT_FRAME(32)
- BT_FRAME(33)
- BT_FRAME(34)
- BT_FRAME(35)
- BT_FRAME(36)
- BT_FRAME(37)
- BT_FRAME(38)
- BT_FRAME(39)
-
- BT_FRAME(40)
- BT_FRAME(41)
- BT_FRAME(42)
- BT_FRAME(43)
- BT_FRAME(44)
- BT_FRAME(45)
- BT_FRAME(46)
- BT_FRAME(47)
- BT_FRAME(48)
- BT_FRAME(49)
-
- BT_FRAME(50)
- BT_FRAME(51)
- BT_FRAME(52)
- BT_FRAME(53)
- BT_FRAME(54)
- BT_FRAME(55)
- BT_FRAME(56)
- BT_FRAME(57)
- BT_FRAME(58)
- BT_FRAME(59)
-
- BT_FRAME(60)
- BT_FRAME(61)
- BT_FRAME(62)
- BT_FRAME(63)
- BT_FRAME(64)
- BT_FRAME(65)
- BT_FRAME(66)
- BT_FRAME(67)
- BT_FRAME(68)
- BT_FRAME(69)
-
- BT_FRAME(70)
- BT_FRAME(71)
- BT_FRAME(72)
- BT_FRAME(73)
- BT_FRAME(74)
- BT_FRAME(75)
- BT_FRAME(76)
- BT_FRAME(77)
- BT_FRAME(78)
- BT_FRAME(79)
-
- BT_FRAME(80)
- BT_FRAME(81)
- BT_FRAME(82)
- BT_FRAME(83)
- BT_FRAME(84)
- BT_FRAME(85)
- BT_FRAME(86)
- BT_FRAME(87)
- BT_FRAME(88)
- BT_FRAME(89)
-
- BT_FRAME(90)
- BT_FRAME(91)
- BT_FRAME(92)
- BT_FRAME(93)
- BT_FRAME(94)
- BT_FRAME(95)
- BT_FRAME(96)
- BT_FRAME(97)
- BT_FRAME(98)
- BT_FRAME(99)
-
- BT_FRAME(100)
- BT_FRAME(101)
- BT_FRAME(102)
- BT_FRAME(103)
- BT_FRAME(104)
- BT_FRAME(105)
- BT_FRAME(106)
- BT_FRAME(107)
- BT_FRAME(108)
- BT_FRAME(109)
-
- BT_FRAME(110)
- BT_FRAME(111)
- BT_FRAME(112)
- BT_FRAME(113)
- BT_FRAME(114)
- BT_FRAME(115)
- BT_FRAME(116)
- BT_FRAME(117)
- BT_FRAME(118)
- BT_FRAME(119)
-
- BT_FRAME(120)
- BT_FRAME(121)
- BT_FRAME(122)
- BT_FRAME(123)
- BT_FRAME(124)
- BT_FRAME(125)
- BT_FRAME(126)
- BT_FRAME(127)
-#undef BT_FRAME
-}
-#else
-void
-prof_backtrace(prof_bt_t *bt) {
- cassert(config_prof);
- not_reached();
-}
-#endif
-
-static malloc_mutex_t *
-prof_gctx_mutex_choose(void) {
- unsigned ngctxs = atomic_fetch_add_u(&cum_gctxs, 1, ATOMIC_RELAXED);
-
- return &gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS];
}
-static malloc_mutex_t *
-prof_tdata_mutex_choose(uint64_t thr_uid) {
- return &tdata_locks[thr_uid % PROF_NTDATA_LOCKS];
-}
-
-static prof_gctx_t *
-prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) {
- /*
- * Create a single allocation that has space for vec of length bt->len.
- */
- size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
- prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size,
- sz_size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true),
- true);
- if (gctx == NULL) {
+prof_tctx_t *
+prof_tctx_create(tsd_t *tsd) {
+ if (!tsd_nominal(tsd) || tsd_reentrancy_level_get(tsd) > 0) {
return NULL;
}
- gctx->lock = prof_gctx_mutex_choose();
- /*
- * Set nlimbo to 1, in order to avoid a race condition with
- * prof_tctx_destroy()/prof_gctx_try_destroy().
- */
- gctx->nlimbo = 1;
- tctx_tree_new(&gctx->tctxs);
- /* Duplicate bt. */
- memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *));
- gctx->bt.vec = gctx->vec;
- gctx->bt.len = bt->len;
- return gctx;
-}
-
-static void
-prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
- prof_tdata_t *tdata) {
- cassert(config_prof);
-
- /*
- * Check that gctx is still unused by any thread cache before destroying
- * it. prof_lookup() increments gctx->nlimbo in order to avoid a race
- * condition with this function, as does prof_tctx_destroy() in order to
- * avoid a race between the main body of prof_tctx_destroy() and entry
- * into this function.
- */
- prof_enter(tsd, tdata_self);
- malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
- assert(gctx->nlimbo != 0);
- if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
- /* Remove gctx from bt2gctx. */
- if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) {
- not_reached();
- }
- prof_leave(tsd, tdata_self);
- /* Destroy gctx. */
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
- idalloctm(tsd_tsdn(tsd), gctx, NULL, NULL, true, true);
- } else {
- /*
- * Compensate for increment in prof_tctx_destroy() or
- * prof_lookup().
- */
- gctx->nlimbo--;
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
- prof_leave(tsd, tdata_self);
- }
-}
-
-static bool
-prof_tctx_should_destroy(tsdn_t *tsdn, prof_tctx_t *tctx) {
- malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
-
- if (opt_prof_accum) {
- return false;
- }
- if (tctx->cnts.curobjs != 0) {
- return false;
- }
- if (tctx->prepared) {
- return false;
- }
- return true;
-}
-static bool
-prof_gctx_should_destroy(prof_gctx_t *gctx) {
- if (opt_prof_accum) {
- return false;
- }
- if (!tctx_tree_empty(&gctx->tctxs)) {
- return false;
- }
- if (gctx->nlimbo != 0) {
- return false;
- }
- return true;
-}
-
-static void
-prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) {
- prof_tdata_t *tdata = tctx->tdata;
- prof_gctx_t *gctx = tctx->gctx;
- bool destroy_tdata, destroy_tctx, destroy_gctx;
-
- malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
-
- assert(tctx->cnts.curobjs == 0);
- assert(tctx->cnts.curbytes == 0);
- assert(!opt_prof_accum);
- assert(tctx->cnts.accumobjs == 0);
- assert(tctx->cnts.accumbytes == 0);
-
- ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL);
- destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata, false);
- malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
-
- malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
- switch (tctx->state) {
- case prof_tctx_state_nominal:
- tctx_tree_remove(&gctx->tctxs, tctx);
- destroy_tctx = true;
- if (prof_gctx_should_destroy(gctx)) {
- /*
- * Increment gctx->nlimbo in order to keep another
- * thread from winning the race to destroy gctx while
- * this one has gctx->lock dropped. Without this, it
- * would be possible for another thread to:
- *
- * 1) Sample an allocation associated with gctx.
- * 2) Deallocate the sampled object.
- * 3) Successfully prof_gctx_try_destroy(gctx).
- *
- * The result would be that gctx no longer exists by the
- * time this thread accesses it in
- * prof_gctx_try_destroy().
- */
- gctx->nlimbo++;
- destroy_gctx = true;
- } else {
- destroy_gctx = false;
- }
- break;
- case prof_tctx_state_dumping:
- /*
- * A dumping thread needs tctx to remain valid until dumping
- * has finished. Change state such that the dumping thread will
- * complete destruction during a late dump iteration phase.
- */
- tctx->state = prof_tctx_state_purgatory;
- destroy_tctx = false;
- destroy_gctx = false;
- break;
- default:
- not_reached();
- destroy_tctx = false;
- destroy_gctx = false;
- }
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
- if (destroy_gctx) {
- prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx,
- tdata);
- }
-
- malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock);
-
- if (destroy_tdata) {
- prof_tdata_destroy(tsd, tdata, false);
- }
-
- if (destroy_tctx) {
- idalloctm(tsd_tsdn(tsd), tctx, NULL, NULL, true, true);
- }
-}
-
-static bool
-prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
- void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) {
- union {
- prof_gctx_t *p;
- void *v;
- } gctx, tgctx;
- union {
- prof_bt_t *p;
- void *v;
- } btkey;
- bool new_gctx;
-
- prof_enter(tsd, tdata);
- if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
- /* bt has never been seen before. Insert it. */
- prof_leave(tsd, tdata);
- tgctx.p = prof_gctx_create(tsd_tsdn(tsd), bt);
- if (tgctx.v == NULL) {
- return true;
- }
- prof_enter(tsd, tdata);
- if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
- gctx.p = tgctx.p;
- btkey.p = &gctx.p->bt;
- if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
- /* OOM. */
- prof_leave(tsd, tdata);
- idalloctm(tsd_tsdn(tsd), gctx.v, NULL, NULL,
- true, true);
- return true;
- }
- new_gctx = true;
- } else {
- new_gctx = false;
- }
- } else {
- tgctx.v = NULL;
- new_gctx = false;
- }
-
- if (!new_gctx) {
- /*
- * Increment nlimbo, in order to avoid a race condition with
- * prof_tctx_destroy()/prof_gctx_try_destroy().
- */
- malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock);
- gctx.p->nlimbo++;
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock);
- new_gctx = false;
-
- if (tgctx.v != NULL) {
- /* Lost race to insert. */
- idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, NULL, true,
- true);
- }
- }
- prof_leave(tsd, tdata);
-
- *p_btkey = btkey.v;
- *p_gctx = gctx.p;
- *p_new_gctx = new_gctx;
- return false;
-}
-
-prof_tctx_t *
-prof_lookup(tsd_t *tsd, prof_bt_t *bt) {
- union {
- prof_tctx_t *p;
- void *v;
- } ret;
- prof_tdata_t *tdata;
- bool not_found;
-
- cassert(config_prof);
-
- tdata = prof_tdata_get(tsd, false);
+ prof_tdata_t *tdata = prof_tdata_get(tsd, true);
if (tdata == NULL) {
return NULL;
}
- malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
- not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
- if (!not_found) { /* Note double negative! */
- ret.p->prepared = true;
- }
- malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
- if (not_found) {
- void *btkey;
- prof_gctx_t *gctx;
- bool new_gctx, error;
-
- /*
- * This thread's cache lacks bt. Look for it in the global
- * cache.
- */
- if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx,
- &new_gctx)) {
- return NULL;
- }
-
- /* Link a prof_tctx_t into gctx for this thread. */
- ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t),
- sz_size2index(sizeof(prof_tctx_t)), false, NULL, true,
- arena_ichoose(tsd, NULL), true);
- if (ret.p == NULL) {
- if (new_gctx) {
- prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
- }
- return NULL;
- }
- ret.p->tdata = tdata;
- ret.p->thr_uid = tdata->thr_uid;
- ret.p->thr_discrim = tdata->thr_discrim;
- memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
- ret.p->gctx = gctx;
- ret.p->tctx_uid = tdata->tctx_uid_next++;
- ret.p->prepared = true;
- ret.p->state = prof_tctx_state_initializing;
- malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
- error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v);
- malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
- if (error) {
- if (new_gctx) {
- prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
- }
- idalloctm(tsd_tsdn(tsd), ret.v, NULL, NULL, true, true);
- return NULL;
- }
- malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
- ret.p->state = prof_tctx_state_nominal;
- tctx_tree_insert(&gctx->tctxs, ret.p);
- gctx->nlimbo--;
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
- }
-
- return ret.p;
+ prof_bt_t bt;
+ bt_init(&bt, tdata->vec);
+ prof_backtrace(tsd, &bt);
+ return prof_lookup(tsd, &bt);
}
/*
@@ -1136,27 +209,22 @@ prof_lookup(tsd_t *tsd, prof_bt_t *bt) {
* (e.g.
* -mno-sse) in order for the workaround to be complete.
*/
-void
-prof_sample_threshold_update(prof_tdata_t *tdata) {
+uint64_t
+prof_sample_new_event_wait(tsd_t *tsd) {
#ifdef JEMALLOC_PROF
- if (!config_prof) {
- return;
- }
-
if (lg_prof_sample == 0) {
- tsd_bytes_until_sample_set(tsd_fetch(), 0);
- return;
+ return TE_MIN_START_WAIT;
}
/*
* Compute sample interval as a geometrically distributed random
* variable with mean (2^lg_prof_sample).
*
- * __ __
- * | log(u) | 1
- * tdata->bytes_until_sample = | -------- |, where p = ---------------
- * | log(1-p) | lg_prof_sample
- * 2
+ * __ __
+ * | log(u) | 1
+ * bytes_until_sample = | -------- |, where p = ---------------
+ * | log(1-p) | lg_prof_sample
+ * 2
*
* For more information on the math, see:
*
@@ -1165,857 +233,56 @@ prof_sample_threshold_update(prof_tdata_t *tdata) {
* Springer-Verlag, New York, 1986
* pp 500
* (http://luc.devroye.org/rnbookindex.html)
+ *
+ * In the actual computation, there's a non-zero probability that our
+ * pseudo random number generator generates an exact 0, and to avoid
+ * log(0), we set u to 1.0 in case r is 0. Therefore u effectively is
+ * uniformly distributed in (0, 1] instead of [0, 1). Further, rather
+ * than taking the ceiling, we take the floor and then add 1, since
+ * otherwise bytes_until_sample would be 0 if u is exactly 1.0.
*/
- uint64_t r = prng_lg_range_u64(&tdata->prng_state, 53);
- double u = (double)r * (1.0/9007199254740992.0L);
- uint64_t bytes_until_sample = (uint64_t)(log(u) /
+ uint64_t r = prng_lg_range_u64(tsd_prng_statep_get(tsd), 53);
+ double u = (r == 0U) ? 1.0 : (double)r * (1.0/9007199254740992.0L);
+ return (uint64_t)(log(u) /
log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
+ (uint64_t)1U;
- if (bytes_until_sample > SSIZE_MAX) {
- bytes_until_sample = SSIZE_MAX;
- }
- tsd_bytes_until_sample_set(tsd_fetch(), bytes_until_sample);
-
-#endif
-}
-
-#ifdef JEMALLOC_JET
-static prof_tdata_t *
-prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
- void *arg) {
- size_t *tdata_count = (size_t *)arg;
-
- (*tdata_count)++;
-
- return NULL;
-}
-
-size_t
-prof_tdata_count(void) {
- size_t tdata_count = 0;
- tsdn_t *tsdn;
-
- tsdn = tsdn_fetch();
- malloc_mutex_lock(tsdn, &tdatas_mtx);
- tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
- (void *)&tdata_count);
- malloc_mutex_unlock(tsdn, &tdatas_mtx);
-
- return tdata_count;
-}
-
-size_t
-prof_bt_count(void) {
- size_t bt_count;
- tsd_t *tsd;
- prof_tdata_t *tdata;
-
- tsd = tsd_fetch();
- tdata = prof_tdata_get(tsd, false);
- if (tdata == NULL) {
- return 0;
- }
-
- malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
- bt_count = ckh_count(&bt2gctx);
- malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
-
- return bt_count;
-}
-#endif
-
-static int
-prof_dump_open_impl(bool propagate_err, const char *filename) {
- int fd;
-
- fd = creat(filename, 0644);
- if (fd == -1 && !propagate_err) {
- malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n",
- filename);
- if (opt_abort) {
- abort();
- }
- }
-
- return fd;
-}
-prof_dump_open_t *JET_MUTABLE prof_dump_open = prof_dump_open_impl;
-
-static bool
-prof_dump_flush(bool propagate_err) {
- bool ret = false;
- ssize_t err;
-
- cassert(config_prof);
-
- err = malloc_write_fd(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
- if (err == -1) {
- if (!propagate_err) {
- malloc_write("<jemalloc>: write() failed during heap "
- "profile flush\n");
- if (opt_abort) {
- abort();
- }
- }
- ret = true;
- }
- prof_dump_buf_end = 0;
-
- return ret;
-}
-
-static bool
-prof_dump_close(bool propagate_err) {
- bool ret;
-
- assert(prof_dump_fd != -1);
- ret = prof_dump_flush(propagate_err);
- close(prof_dump_fd);
- prof_dump_fd = -1;
-
- return ret;
-}
-
-static bool
-prof_dump_write(bool propagate_err, const char *s) {
- size_t i, slen, n;
-
- cassert(config_prof);
-
- i = 0;
- slen = strlen(s);
- while (i < slen) {
- /* Flush the buffer if it is full. */
- if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
- if (prof_dump_flush(propagate_err) && propagate_err) {
- return true;
- }
- }
-
- if (prof_dump_buf_end + slen - i <= PROF_DUMP_BUFSIZE) {
- /* Finish writing. */
- n = slen - i;
- } else {
- /* Write as much of s as will fit. */
- n = PROF_DUMP_BUFSIZE - prof_dump_buf_end;
- }
- memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n);
- prof_dump_buf_end += n;
- i += n;
- }
- assert(i == slen);
-
- return false;
-}
-
-JEMALLOC_FORMAT_PRINTF(2, 3)
-static bool
-prof_dump_printf(bool propagate_err, const char *format, ...) {
- bool ret;
- va_list ap;
- char buf[PROF_PRINTF_BUFSIZE];
-
- va_start(ap, format);
- malloc_vsnprintf(buf, sizeof(buf), format, ap);
- va_end(ap);
- ret = prof_dump_write(propagate_err, buf);
-
- return ret;
-}
-
-static void
-prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) {
- malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
-
- malloc_mutex_lock(tsdn, tctx->gctx->lock);
-
- switch (tctx->state) {
- case prof_tctx_state_initializing:
- malloc_mutex_unlock(tsdn, tctx->gctx->lock);
- return;
- case prof_tctx_state_nominal:
- tctx->state = prof_tctx_state_dumping;
- malloc_mutex_unlock(tsdn, tctx->gctx->lock);
-
- memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
-
- tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
- tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
- if (opt_prof_accum) {
- tdata->cnt_summed.accumobjs +=
- tctx->dump_cnts.accumobjs;
- tdata->cnt_summed.accumbytes +=
- tctx->dump_cnts.accumbytes;
- }
- break;
- case prof_tctx_state_dumping:
- case prof_tctx_state_purgatory:
- not_reached();
- }
-}
-
-static void
-prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) {
- malloc_mutex_assert_owner(tsdn, gctx->lock);
-
- gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
- gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
- if (opt_prof_accum) {
- gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs;
- gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes;
- }
-}
-
-static prof_tctx_t *
-prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
- tsdn_t *tsdn = (tsdn_t *)arg;
-
- malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
-
- switch (tctx->state) {
- case prof_tctx_state_nominal:
- /* New since dumping started; ignore. */
- break;
- case prof_tctx_state_dumping:
- case prof_tctx_state_purgatory:
- prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx);
- break;
- default:
- not_reached();
- }
-
- return NULL;
-}
-
-struct prof_tctx_dump_iter_arg_s {
- tsdn_t *tsdn;
- bool propagate_err;
-};
-
-static prof_tctx_t *
-prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) {
- struct prof_tctx_dump_iter_arg_s *arg =
- (struct prof_tctx_dump_iter_arg_s *)opaque;
-
- malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock);
-
- switch (tctx->state) {
- case prof_tctx_state_initializing:
- case prof_tctx_state_nominal:
- /* Not captured by this dump. */
- break;
- case prof_tctx_state_dumping:
- case prof_tctx_state_purgatory:
- if (prof_dump_printf(arg->propagate_err,
- " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": "
- "%"FMTu64"]\n", tctx->thr_uid, tctx->dump_cnts.curobjs,
- tctx->dump_cnts.curbytes, tctx->dump_cnts.accumobjs,
- tctx->dump_cnts.accumbytes)) {
- return tctx;
- }
- break;
- default:
- not_reached();
- }
- return NULL;
-}
-
-static prof_tctx_t *
-prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
- tsdn_t *tsdn = (tsdn_t *)arg;
- prof_tctx_t *ret;
-
- malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
-
- switch (tctx->state) {
- case prof_tctx_state_nominal:
- /* New since dumping started; ignore. */
- break;
- case prof_tctx_state_dumping:
- tctx->state = prof_tctx_state_nominal;
- break;
- case prof_tctx_state_purgatory:
- ret = tctx;
- goto label_return;
- default:
- not_reached();
- }
-
- ret = NULL;
-label_return:
- return ret;
-}
-
-static void
-prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) {
- cassert(config_prof);
-
- malloc_mutex_lock(tsdn, gctx->lock);
-
- /*
- * Increment nlimbo so that gctx won't go away before dump.
- * Additionally, link gctx into the dump list so that it is included in
- * prof_dump()'s second pass.
- */
- gctx->nlimbo++;
- gctx_tree_insert(gctxs, gctx);
-
- memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t));
-
- malloc_mutex_unlock(tsdn, gctx->lock);
-}
-
-struct prof_gctx_merge_iter_arg_s {
- tsdn_t *tsdn;
- size_t leak_ngctx;
-};
-
-static prof_gctx_t *
-prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
- struct prof_gctx_merge_iter_arg_s *arg =
- (struct prof_gctx_merge_iter_arg_s *)opaque;
-
- malloc_mutex_lock(arg->tsdn, gctx->lock);
- tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter,
- (void *)arg->tsdn);
- if (gctx->cnt_summed.curobjs != 0) {
- arg->leak_ngctx++;
- }
- malloc_mutex_unlock(arg->tsdn, gctx->lock);
-
- return NULL;
-}
-
-static void
-prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) {
- prof_tdata_t *tdata = prof_tdata_get(tsd, false);
- prof_gctx_t *gctx;
-
- /*
- * Standard tree iteration won't work here, because as soon as we
- * decrement gctx->nlimbo and unlock gctx, another thread can
- * concurrently destroy it, which will corrupt the tree. Therefore,
- * tear down the tree one node at a time during iteration.
- */
- while ((gctx = gctx_tree_first(gctxs)) != NULL) {
- gctx_tree_remove(gctxs, gctx);
- malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
- {
- prof_tctx_t *next;
-
- next = NULL;
- do {
- prof_tctx_t *to_destroy =
- tctx_tree_iter(&gctx->tctxs, next,
- prof_tctx_finish_iter,
- (void *)tsd_tsdn(tsd));
- if (to_destroy != NULL) {
- next = tctx_tree_next(&gctx->tctxs,
- to_destroy);
- tctx_tree_remove(&gctx->tctxs,
- to_destroy);
- idalloctm(tsd_tsdn(tsd), to_destroy,
- NULL, NULL, true, true);
- } else {
- next = NULL;
- }
- } while (next != NULL);
- }
- gctx->nlimbo--;
- if (prof_gctx_should_destroy(gctx)) {
- gctx->nlimbo++;
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
- prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
- } else {
- malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
- }
- }
-}
-
-struct prof_tdata_merge_iter_arg_s {
- tsdn_t *tsdn;
- prof_cnt_t cnt_all;
-};
-
-static prof_tdata_t *
-prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
- void *opaque) {
- struct prof_tdata_merge_iter_arg_s *arg =
- (struct prof_tdata_merge_iter_arg_s *)opaque;
-
- malloc_mutex_lock(arg->tsdn, tdata->lock);
- if (!tdata->expired) {
- size_t tabind;
- union {
- prof_tctx_t *p;
- void *v;
- } tctx;
-
- tdata->dumping = true;
- memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
- for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
- &tctx.v);) {
- prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata);
- }
-
- arg->cnt_all.curobjs += tdata->cnt_summed.curobjs;
- arg->cnt_all.curbytes += tdata->cnt_summed.curbytes;
- if (opt_prof_accum) {
- arg->cnt_all.accumobjs += tdata->cnt_summed.accumobjs;
- arg->cnt_all.accumbytes += tdata->cnt_summed.accumbytes;
- }
- } else {
- tdata->dumping = false;
- }
- malloc_mutex_unlock(arg->tsdn, tdata->lock);
-
- return NULL;
-}
-
-static prof_tdata_t *
-prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
- void *arg) {
- bool propagate_err = *(bool *)arg;
-
- if (!tdata->dumping) {
- return NULL;
- }
-
- if (prof_dump_printf(propagate_err,
- " t%"FMTu64": %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]%s%s\n",
- tdata->thr_uid, tdata->cnt_summed.curobjs,
- tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs,
- tdata->cnt_summed.accumbytes,
- (tdata->thread_name != NULL) ? " " : "",
- (tdata->thread_name != NULL) ? tdata->thread_name : "")) {
- return tdata;
- }
- return NULL;
-}
-
-static bool
-prof_dump_header_impl(tsdn_t *tsdn, bool propagate_err,
- const prof_cnt_t *cnt_all) {
- bool ret;
-
- if (prof_dump_printf(propagate_err,
- "heap_v2/%"FMTu64"\n"
- " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
- ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs,
- cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes)) {
- return true;
- }
-
- malloc_mutex_lock(tsdn, &tdatas_mtx);
- ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter,
- (void *)&propagate_err) != NULL);
- malloc_mutex_unlock(tsdn, &tdatas_mtx);
- return ret;
-}
-prof_dump_header_t *JET_MUTABLE prof_dump_header = prof_dump_header_impl;
-
-static bool
-prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx,
- const prof_bt_t *bt, prof_gctx_tree_t *gctxs) {
- bool ret;
- unsigned i;
- struct prof_tctx_dump_iter_arg_s prof_tctx_dump_iter_arg;
-
- cassert(config_prof);
- malloc_mutex_assert_owner(tsdn, gctx->lock);
-
- /* Avoid dumping such gctx's that have no useful data. */
- if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
- (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) {
- assert(gctx->cnt_summed.curobjs == 0);
- assert(gctx->cnt_summed.curbytes == 0);
- assert(gctx->cnt_summed.accumobjs == 0);
- assert(gctx->cnt_summed.accumbytes == 0);
- ret = false;
- goto label_return;
- }
-
- if (prof_dump_printf(propagate_err, "@")) {
- ret = true;
- goto label_return;
- }
- for (i = 0; i < bt->len; i++) {
- if (prof_dump_printf(propagate_err, " %#"FMTxPTR,
- (uintptr_t)bt->vec[i])) {
- ret = true;
- goto label_return;
- }
- }
-
- if (prof_dump_printf(propagate_err,
- "\n"
- " t*: %"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]\n",
- gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes,
- gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) {
- ret = true;
- goto label_return;
- }
-
- prof_tctx_dump_iter_arg.tsdn = tsdn;
- prof_tctx_dump_iter_arg.propagate_err = propagate_err;
- if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter,
- (void *)&prof_tctx_dump_iter_arg) != NULL) {
- ret = true;
- goto label_return;
- }
-
- ret = false;
-label_return:
- return ret;
-}
-
-#ifndef _WIN32
-JEMALLOC_FORMAT_PRINTF(1, 2)
-static int
-prof_open_maps(const char *format, ...) {
- int mfd;
- va_list ap;
- char filename[PATH_MAX + 1];
-
- va_start(ap, format);
- malloc_vsnprintf(filename, sizeof(filename), format, ap);
- va_end(ap);
-
-#if defined(O_CLOEXEC)
- mfd = open(filename, O_RDONLY | O_CLOEXEC);
-#else
- mfd = open(filename, O_RDONLY);
- if (mfd != -1) {
- fcntl(mfd, F_SETFD, fcntl(mfd, F_GETFD) | FD_CLOEXEC);
- }
-#endif
-
- return mfd;
-}
-#endif
-
-static int
-prof_getpid(void) {
-#ifdef _WIN32
- return GetCurrentProcessId();
#else
- return getpid();
-#endif
-}
-
-static bool
-prof_dump_maps(bool propagate_err) {
- bool ret;
- int mfd;
-
- cassert(config_prof);
-#ifdef __FreeBSD__
- mfd = prof_open_maps("/proc/curproc/map");
-#elif defined(_WIN32)
- mfd = -1; // Not implemented
-#else
- {
- int pid = prof_getpid();
-
- mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid);
- if (mfd == -1) {
- mfd = prof_open_maps("/proc/%d/maps", pid);
- }
- }
-#endif
- if (mfd != -1) {
- ssize_t nread;
-
- if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") &&
- propagate_err) {
- ret = true;
- goto label_return;
- }
- nread = 0;
- do {
- prof_dump_buf_end += nread;
- if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
- /* Make space in prof_dump_buf before read(). */
- if (prof_dump_flush(propagate_err) &&
- propagate_err) {
- ret = true;
- goto label_return;
- }
- }
- nread = malloc_read_fd(mfd,
- &prof_dump_buf[prof_dump_buf_end], PROF_DUMP_BUFSIZE
- - prof_dump_buf_end);
- } while (nread > 0);
- } else {
- ret = true;
- goto label_return;
- }
-
- ret = false;
-label_return:
- if (mfd != -1) {
- close(mfd);
- }
- return ret;
-}
-
-/*
- * See prof_sample_threshold_update() comment for why the body of this function
- * is conditionally compiled.
- */
-static void
-prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
- const char *filename) {
-#ifdef JEMALLOC_PROF
- /*
- * Scaling is equivalent AdjustSamples() in jeprof, but the result may
- * differ slightly from what jeprof reports, because here we scale the
- * summary values, whereas jeprof scales each context individually and
- * reports the sums of the scaled values.
- */
- if (cnt_all->curbytes != 0) {
- double sample_period = (double)((uint64_t)1 << lg_prof_sample);
- double ratio = (((double)cnt_all->curbytes) /
- (double)cnt_all->curobjs) / sample_period;
- double scale_factor = 1.0 / (1.0 - exp(-ratio));
- uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes)
- * scale_factor);
- uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) *
- scale_factor);
-
- malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64
- " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n",
- curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs !=
- 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : "");
- malloc_printf(
- "<jemalloc>: Run jeprof on \"%s\" for leak detail\n",
- filename);
- }
+ not_reached();
+ return TE_MAX_START_WAIT;
#endif
}
-struct prof_gctx_dump_iter_arg_s {
- tsdn_t *tsdn;
- bool propagate_err;
-};
-
-static prof_gctx_t *
-prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
- prof_gctx_t *ret;
- struct prof_gctx_dump_iter_arg_s *arg =
- (struct prof_gctx_dump_iter_arg_s *)opaque;
-
- malloc_mutex_lock(arg->tsdn, gctx->lock);
-
- if (prof_dump_gctx(arg->tsdn, arg->propagate_err, gctx, &gctx->bt,
- gctxs)) {
- ret = gctx;
- goto label_return;
- }
-
- ret = NULL;
-label_return:
- malloc_mutex_unlock(arg->tsdn, gctx->lock);
- return ret;
-}
-
-static void
-prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata,
- struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg,
- struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg,
- prof_gctx_tree_t *gctxs) {
- size_t tabind;
- union {
- prof_gctx_t *p;
- void *v;
- } gctx;
-
- prof_enter(tsd, tdata);
-
+uint64_t
+prof_sample_postponed_event_wait(tsd_t *tsd) {
/*
- * Put gctx's in limbo and clear their counters in preparation for
- * summing.
+ * The postponed wait time for prof sample event is computed as if we
+ * want a new wait time (i.e. as if the event were triggered). If we
+ * instead postpone to the immediate next allocation, like how we're
+ * handling the other events, then we can have sampling bias, if e.g.
+ * the allocation immediately following a reentrancy always comes from
+ * the same stack trace.
*/
- gctx_tree_new(gctxs);
- for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) {
- prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, gctxs);
- }
-
- /*
- * Iterate over tdatas, and for the non-expired ones snapshot their tctx
- * stats and merge them into the associated gctx's.
- */
- prof_tdata_merge_iter_arg->tsdn = tsd_tsdn(tsd);
- memset(&prof_tdata_merge_iter_arg->cnt_all, 0, sizeof(prof_cnt_t));
- malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
- tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter,
- (void *)prof_tdata_merge_iter_arg);
- malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
-
- /* Merge tctx stats into gctx's. */
- prof_gctx_merge_iter_arg->tsdn = tsd_tsdn(tsd);
- prof_gctx_merge_iter_arg->leak_ngctx = 0;
- gctx_tree_iter(gctxs, NULL, prof_gctx_merge_iter,
- (void *)prof_gctx_merge_iter_arg);
-
- prof_leave(tsd, tdata);
-}
-
-static bool
-prof_dump_file(tsd_t *tsd, bool propagate_err, const char *filename,
- bool leakcheck, prof_tdata_t *tdata,
- struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg,
- struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg,
- struct prof_gctx_dump_iter_arg_s *prof_gctx_dump_iter_arg,
- prof_gctx_tree_t *gctxs) {
- /* Create dump file. */
- if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1) {
- return true;
- }
-
- /* Dump profile header. */
- if (prof_dump_header(tsd_tsdn(tsd), propagate_err,
- &prof_tdata_merge_iter_arg->cnt_all)) {
- goto label_write_error;
- }
-
- /* Dump per gctx profile stats. */
- prof_gctx_dump_iter_arg->tsdn = tsd_tsdn(tsd);
- prof_gctx_dump_iter_arg->propagate_err = propagate_err;
- if (gctx_tree_iter(gctxs, NULL, prof_gctx_dump_iter,
- (void *)prof_gctx_dump_iter_arg) != NULL) {
- goto label_write_error;
- }
-
- /* Dump /proc/<pid>/maps if possible. */
- if (prof_dump_maps(propagate_err)) {
- goto label_write_error;
- }
-
- if (prof_dump_close(propagate_err)) {
- return true;
- }
-
- return false;
-label_write_error:
- prof_dump_close(propagate_err);
- return true;
+ return prof_sample_new_event_wait(tsd);
}
-static bool
-prof_dump(tsd_t *tsd, bool propagate_err, const char *filename,
- bool leakcheck) {
- cassert(config_prof);
- assert(tsd_reentrancy_level_get(tsd) == 0);
-
- prof_tdata_t * tdata = prof_tdata_get(tsd, true);
- if (tdata == NULL) {
- return true;
- }
-
- pre_reentrancy(tsd, NULL);
- malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
-
- prof_gctx_tree_t gctxs;
- struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
- struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
- struct prof_gctx_dump_iter_arg_s prof_gctx_dump_iter_arg;
- prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg,
- &prof_gctx_merge_iter_arg, &gctxs);
- bool err = prof_dump_file(tsd, propagate_err, filename, leakcheck, tdata,
- &prof_tdata_merge_iter_arg, &prof_gctx_merge_iter_arg,
- &prof_gctx_dump_iter_arg, &gctxs);
- prof_gctx_finish(tsd, &gctxs);
-
- malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
- post_reentrancy(tsd);
-
- if (err) {
- return true;
- }
-
- if (leakcheck) {
- prof_leakcheck(&prof_tdata_merge_iter_arg.cnt_all,
- prof_gctx_merge_iter_arg.leak_ngctx, filename);
- }
- return false;
-}
-
-#ifdef JEMALLOC_JET
void
-prof_cnt_all(uint64_t *curobjs, uint64_t *curbytes, uint64_t *accumobjs,
- uint64_t *accumbytes) {
- tsd_t *tsd;
- prof_tdata_t *tdata;
- struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg;
- struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg;
- prof_gctx_tree_t gctxs;
-
- tsd = tsd_fetch();
- tdata = prof_tdata_get(tsd, false);
- if (tdata == NULL) {
- if (curobjs != NULL) {
- *curobjs = 0;
- }
- if (curbytes != NULL) {
- *curbytes = 0;
- }
- if (accumobjs != NULL) {
- *accumobjs = 0;
- }
- if (accumbytes != NULL) {
- *accumbytes = 0;
- }
+prof_sample_event_handler(tsd_t *tsd, uint64_t elapsed) {
+ cassert(config_prof);
+ assert(elapsed > 0 && elapsed != TE_INVALID_ELAPSED);
+ if (prof_interval == 0 || !prof_active_get_unlocked()) {
return;
}
-
- prof_dump_prep(tsd, tdata, &prof_tdata_merge_iter_arg,
- &prof_gctx_merge_iter_arg, &gctxs);
- prof_gctx_finish(tsd, &gctxs);
-
- if (curobjs != NULL) {
- *curobjs = prof_tdata_merge_iter_arg.cnt_all.curobjs;
- }
- if (curbytes != NULL) {
- *curbytes = prof_tdata_merge_iter_arg.cnt_all.curbytes;
- }
- if (accumobjs != NULL) {
- *accumobjs = prof_tdata_merge_iter_arg.cnt_all.accumobjs;
- }
- if (accumbytes != NULL) {
- *accumbytes = prof_tdata_merge_iter_arg.cnt_all.accumbytes;
- }
-}
-#endif
-
-#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
-#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
-static void
-prof_dump_filename(char *filename, char v, uint64_t vseq) {
- cassert(config_prof);
-
- if (vseq != VSEQ_INVALID) {
- /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
- malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
- "%s.%d.%"FMTu64".%c%"FMTu64".heap",
- opt_prof_prefix, prof_getpid(), prof_dump_seq, v, vseq);
- } else {
- /* "<prefix>.<pid>.<seq>.<v>.heap" */
- malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
- "%s.%d.%"FMTu64".%c.heap",
- opt_prof_prefix, prof_getpid(), prof_dump_seq, v);
+ if (counter_accum(tsd_tsdn(tsd), &prof_idump_accumulated, elapsed)) {
+ prof_idump(tsd_tsdn(tsd));
}
- prof_dump_seq++;
}
static void
prof_fdump(void) {
tsd_t *tsd;
- char filename[DUMP_FILENAME_BUFSIZE];
cassert(config_prof);
assert(opt_prof_final);
- assert(opt_prof_prefix[0] != '\0');
if (!prof_booted) {
return;
@@ -2023,26 +290,14 @@ prof_fdump(void) {
tsd = tsd_fetch();
assert(tsd_reentrancy_level_get(tsd) == 0);
- malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
- prof_dump_filename(filename, 'f', VSEQ_INVALID);
- malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
- prof_dump(tsd, false, filename, opt_prof_leak);
+ prof_fdump_impl(tsd);
}
-bool
-prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum) {
+static bool
+prof_idump_accum_init(void) {
cassert(config_prof);
-#ifndef JEMALLOC_ATOMIC_U64
- if (malloc_mutex_init(&prof_accum->mtx, "prof_accum",
- WITNESS_RANK_PROF_ACCUM, malloc_mutex_rank_exclusive)) {
- return true;
- }
- prof_accum->accumbytes = 0;
-#else
- atomic_store_u64(&prof_accum->accumbytes, 0, ATOMIC_RELAXED);
-#endif
- return false;
+ return counter_accum_init(&prof_idump_accumulated, prof_interval);
}
void
@@ -2060,7 +315,7 @@ prof_idump(tsdn_t *tsdn) {
return;
}
- tdata = prof_tdata_get(tsd, false);
+ tdata = prof_tdata_get(tsd, true);
if (tdata == NULL) {
return;
}
@@ -2069,14 +324,7 @@ prof_idump(tsdn_t *tsdn) {
return;
}
- if (opt_prof_prefix[0] != '\0') {
- char filename[PATH_MAX + 1];
- malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
- prof_dump_filename(filename, 'i', prof_dump_iseq);
- prof_dump_iseq++;
- malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
- prof_dump(tsd, false, filename, false);
- }
+ prof_idump_impl(tsd);
}
bool
@@ -2087,19 +335,8 @@ prof_mdump(tsd_t *tsd, const char *filename) {
if (!opt_prof || !prof_booted) {
return true;
}
- char filename_buf[DUMP_FILENAME_BUFSIZE];
- if (filename == NULL) {
- /* No filename specified, so automatically generate one. */
- if (opt_prof_prefix[0] == '\0') {
- return true;
- }
- malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
- prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
- prof_dump_mseq++;
- malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
- filename = filename_buf;
- }
- return prof_dump(tsd, true, filename, false);
+
+ return prof_mdump_impl(tsd, filename);
}
void
@@ -2126,63 +363,7 @@ prof_gdump(tsdn_t *tsdn) {
return;
}
- if (opt_prof_prefix[0] != '\0') {
- char filename[DUMP_FILENAME_BUFSIZE];
- malloc_mutex_lock(tsdn, &prof_dump_seq_mtx);
- prof_dump_filename(filename, 'u', prof_dump_useq);
- prof_dump_useq++;
- malloc_mutex_unlock(tsdn, &prof_dump_seq_mtx);
- prof_dump(tsd, false, filename, false);
- }
-}
-
-static void
-prof_bt_hash(const void *key, size_t r_hash[2]) {
- prof_bt_t *bt = (prof_bt_t *)key;
-
- cassert(config_prof);
-
- hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash);
-}
-
-static bool
-prof_bt_keycomp(const void *k1, const void *k2) {
- const prof_bt_t *bt1 = (prof_bt_t *)k1;
- const prof_bt_t *bt2 = (prof_bt_t *)k2;
-
- cassert(config_prof);
-
- if (bt1->len != bt2->len) {
- return false;
- }
- return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
-}
-
-static void
-prof_bt_node_hash(const void *key, size_t r_hash[2]) {
- const prof_bt_node_t *bt_node = (prof_bt_node_t *)key;
- prof_bt_hash((void *)(&bt_node->bt), r_hash);
-}
-
-static bool
-prof_bt_node_keycomp(const void *k1, const void *k2) {
- const prof_bt_node_t *bt_node1 = (prof_bt_node_t *)k1;
- const prof_bt_node_t *bt_node2 = (prof_bt_node_t *)k2;
- return prof_bt_keycomp((void *)(&bt_node1->bt),
- (void *)(&bt_node2->bt));
-}
-
-static void
-prof_thr_node_hash(const void *key, size_t r_hash[2]) {
- const prof_thr_node_t *thr_node = (prof_thr_node_t *)key;
- hash(&thr_node->thr_uid, sizeof(uint64_t), 0x94122f35U, r_hash);
-}
-
-static bool
-prof_thr_node_keycomp(const void *k1, const void *k2) {
- const prof_thr_node_t *thr_node1 = (prof_thr_node_t *)k1;
- const prof_thr_node_t *thr_node2 = (prof_thr_node_t *)k2;
- return thr_node1->thr_uid == thr_node2->thr_uid;
+ prof_gdump_impl(tsd);
}
static uint64_t
@@ -2197,132 +378,18 @@ prof_thr_uid_alloc(tsdn_t *tsdn) {
return thr_uid;
}
-static prof_tdata_t *
-prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
- char *thread_name, bool active) {
- prof_tdata_t *tdata;
-
- cassert(config_prof);
-
- /* Initialize an empty cache for this thread. */
- tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t),
- sz_size2index(sizeof(prof_tdata_t)), false, NULL, true,
- arena_get(TSDN_NULL, 0, true), true);
- if (tdata == NULL) {
- return NULL;
- }
-
- tdata->lock = prof_tdata_mutex_choose(thr_uid);
- tdata->thr_uid = thr_uid;
- tdata->thr_discrim = thr_discrim;
- tdata->thread_name = thread_name;
- tdata->attached = true;
- tdata->expired = false;
- tdata->tctx_uid_next = 0;
-
- if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash,
- prof_bt_keycomp)) {
- idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true);
- return NULL;
- }
-
- tdata->prng_state = (uint64_t)(uintptr_t)tdata;
- prof_sample_threshold_update(tdata);
-
- tdata->enq = false;
- tdata->enq_idump = false;
- tdata->enq_gdump = false;
-
- tdata->dumping = false;
- tdata->active = active;
-
- malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
- tdata_tree_insert(&tdatas, tdata);
- malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
-
- return tdata;
-}
-
prof_tdata_t *
prof_tdata_init(tsd_t *tsd) {
return prof_tdata_init_impl(tsd, prof_thr_uid_alloc(tsd_tsdn(tsd)), 0,
NULL, prof_thread_active_init_get(tsd_tsdn(tsd)));
}
-static bool
-prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) {
- if (tdata->attached && !even_if_attached) {
- return false;
- }
- if (ckh_count(&tdata->bt2tctx) != 0) {
- return false;
- }
- return true;
-}
-
-static bool
-prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
- bool even_if_attached) {
- malloc_mutex_assert_owner(tsdn, tdata->lock);
-
- return prof_tdata_should_destroy_unlocked(tdata, even_if_attached);
-}
-
-static void
-prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
- bool even_if_attached) {
- malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx);
-
- tdata_tree_remove(&tdatas, tdata);
-
- assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
-
- if (tdata->thread_name != NULL) {
- idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true,
- true);
- }
- ckh_delete(tsd, &tdata->bt2tctx);
- idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true);
-}
-
-static void
-prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) {
- malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
- prof_tdata_destroy_locked(tsd, tdata, even_if_attached);
- malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
-}
-
-static void
-prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) {
- bool destroy_tdata;
-
- malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
- if (tdata->attached) {
- destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata,
- true);
- /*
- * Only detach if !destroy_tdata, because detaching would allow
- * another thread to win the race to destroy tdata.
- */
- if (!destroy_tdata) {
- tdata->attached = false;
- }
- tsd_prof_tdata_set(tsd, NULL);
- } else {
- destroy_tdata = false;
- }
- malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
- if (destroy_tdata) {
- prof_tdata_destroy(tsd, tdata, true);
- }
-}
-
prof_tdata_t *
prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) {
uint64_t thr_uid = tdata->thr_uid;
uint64_t thr_discrim = tdata->thr_discrim + 1;
char *thread_name = (tdata->thread_name != NULL) ?
- prof_thread_name_alloc(tsd_tsdn(tsd), tdata->thread_name) : NULL;
+ prof_thread_name_alloc(tsd, tdata->thread_name) : NULL;
bool active = tdata->active;
prof_tdata_detach(tsd, tdata);
@@ -2330,58 +397,6 @@ prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata) {
active);
}
-static bool
-prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) {
- bool destroy_tdata;
-
- malloc_mutex_lock(tsdn, tdata->lock);
- if (!tdata->expired) {
- tdata->expired = true;
- destroy_tdata = tdata->attached ? false :
- prof_tdata_should_destroy(tsdn, tdata, false);
- } else {
- destroy_tdata = false;
- }
- malloc_mutex_unlock(tsdn, tdata->lock);
-
- return destroy_tdata;
-}
-
-static prof_tdata_t *
-prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata,
- void *arg) {
- tsdn_t *tsdn = (tsdn_t *)arg;
-
- return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL);
-}
-
-void
-prof_reset(tsd_t *tsd, size_t lg_sample) {
- prof_tdata_t *next;
-
- assert(lg_sample < (sizeof(uint64_t) << 3));
-
- malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
- malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
-
- lg_prof_sample = lg_sample;
-
- next = NULL;
- do {
- prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
- prof_tdata_reset_iter, (void *)tsd);
- if (to_destroy != NULL) {
- next = tdata_tree_next(&tdatas, to_destroy);
- prof_tdata_destroy_locked(tsd, to_destroy, false);
- } else {
- next = NULL;
- }
- } while (next != NULL);
-
- malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
- malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
-}
-
void
prof_tdata_cleanup(tsd_t *tsd) {
prof_tdata_t *tdata;
@@ -2400,8 +415,9 @@ bool
prof_active_get(tsdn_t *tsdn) {
bool prof_active_current;
+ prof_active_assert();
malloc_mutex_lock(tsdn, &prof_active_mtx);
- prof_active_current = prof_active;
+ prof_active_current = prof_active_state;
malloc_mutex_unlock(tsdn, &prof_active_mtx);
return prof_active_current;
}
@@ -2410,377 +426,19 @@ bool
prof_active_set(tsdn_t *tsdn, bool active) {
bool prof_active_old;
+ prof_active_assert();
malloc_mutex_lock(tsdn, &prof_active_mtx);
- prof_active_old = prof_active;
- prof_active = active;
+ prof_active_old = prof_active_state;
+ prof_active_state = active;
malloc_mutex_unlock(tsdn, &prof_active_mtx);
+ prof_active_assert();
return prof_active_old;
}
-#ifdef JEMALLOC_JET
-size_t
-prof_log_bt_count(void) {
- size_t cnt = 0;
- prof_bt_node_t *node = log_bt_first;
- while (node != NULL) {
- cnt++;
- node = node->next;
- }
- return cnt;
-}
-
-size_t
-prof_log_alloc_count(void) {
- size_t cnt = 0;
- prof_alloc_node_t *node = log_alloc_first;
- while (node != NULL) {
- cnt++;
- node = node->next;
- }
- return cnt;
-}
-
-size_t
-prof_log_thr_count(void) {
- size_t cnt = 0;
- prof_thr_node_t *node = log_thr_first;
- while (node != NULL) {
- cnt++;
- node = node->next;
- }
- return cnt;
-}
-
-bool
-prof_log_is_logging(void) {
- return prof_logging_state == prof_logging_state_started;
-}
-
-bool
-prof_log_rep_check(void) {
- if (prof_logging_state == prof_logging_state_stopped
- && log_tables_initialized) {
- return true;
- }
-
- if (log_bt_last != NULL && log_bt_last->next != NULL) {
- return true;
- }
- if (log_thr_last != NULL && log_thr_last->next != NULL) {
- return true;
- }
- if (log_alloc_last != NULL && log_alloc_last->next != NULL) {
- return true;
- }
-
- size_t bt_count = prof_log_bt_count();
- size_t thr_count = prof_log_thr_count();
- size_t alloc_count = prof_log_alloc_count();
-
-
- if (prof_logging_state == prof_logging_state_stopped) {
- if (bt_count != 0 || thr_count != 0 || alloc_count || 0) {
- return true;
- }
- }
-
- prof_alloc_node_t *node = log_alloc_first;
- while (node != NULL) {
- if (node->alloc_bt_ind >= bt_count) {
- return true;
- }
- if (node->free_bt_ind >= bt_count) {
- return true;
- }
- if (node->alloc_thr_ind >= thr_count) {
- return true;
- }
- if (node->free_thr_ind >= thr_count) {
- return true;
- }
- if (node->alloc_time_ns > node->free_time_ns) {
- return true;
- }
- node = node->next;
- }
-
- return false;
-}
-
-void
-prof_log_dummy_set(bool new_value) {
- prof_log_dummy = new_value;
-}
-#endif
-
-bool
-prof_log_start(tsdn_t *tsdn, const char *filename) {
- if (!opt_prof || !prof_booted) {
- return true;
- }
-
- bool ret = false;
- size_t buf_size = PATH_MAX + 1;
-
- malloc_mutex_lock(tsdn, &log_mtx);
-
- if (prof_logging_state != prof_logging_state_stopped) {
- ret = true;
- } else if (filename == NULL) {
- /* Make default name. */
- malloc_snprintf(log_filename, buf_size, "%s.%d.%"FMTu64".json",
- opt_prof_prefix, prof_getpid(), log_seq);
- log_seq++;
- prof_logging_state = prof_logging_state_started;
- } else if (strlen(filename) >= buf_size) {
- ret = true;
- } else {
- strcpy(log_filename, filename);
- prof_logging_state = prof_logging_state_started;
- }
-
- if (!ret) {
- nstime_update(&log_start_timestamp);
- }
-
- malloc_mutex_unlock(tsdn, &log_mtx);
-
- return ret;
-}
-
-/* Used as an atexit function to stop logging on exit. */
-static void
-prof_log_stop_final(void) {
- tsd_t *tsd = tsd_fetch();
- prof_log_stop(tsd_tsdn(tsd));
-}
-
-struct prof_emitter_cb_arg_s {
- int fd;
- ssize_t ret;
-};
-
-static void
-prof_emitter_write_cb(void *opaque, const char *to_write) {
- struct prof_emitter_cb_arg_s *arg =
- (struct prof_emitter_cb_arg_s *)opaque;
- size_t bytes = strlen(to_write);
-#ifdef JEMALLOC_JET
- if (prof_log_dummy) {
- return;
- }
-#endif
- arg->ret = write(arg->fd, (void *)to_write, bytes);
-}
-
-/*
- * prof_log_emit_{...} goes through the appropriate linked list, emitting each
- * node to the json and deallocating it.
- */
-static void
-prof_log_emit_threads(tsd_t *tsd, emitter_t *emitter) {
- emitter_json_array_kv_begin(emitter, "threads");
- prof_thr_node_t *thr_node = log_thr_first;
- prof_thr_node_t *thr_old_node;
- while (thr_node != NULL) {
- emitter_json_object_begin(emitter);
-
- emitter_json_kv(emitter, "thr_uid", emitter_type_uint64,
- &thr_node->thr_uid);
-
- char *thr_name = thr_node->name;
-
- emitter_json_kv(emitter, "thr_name", emitter_type_string,
- &thr_name);
-
- emitter_json_object_end(emitter);
- thr_old_node = thr_node;
- thr_node = thr_node->next;
- idalloc(tsd, thr_old_node);
- }
- emitter_json_array_end(emitter);
-}
-
-static void
-prof_log_emit_traces(tsd_t *tsd, emitter_t *emitter) {
- emitter_json_array_kv_begin(emitter, "stack_traces");
- prof_bt_node_t *bt_node = log_bt_first;
- prof_bt_node_t *bt_old_node;
- /*
- * Calculate how many hex digits we need: twice number of bytes, two for
- * "0x", and then one more for terminating '\0'.
- */
- char buf[2 * sizeof(intptr_t) + 3];
- size_t buf_sz = sizeof(buf);
- while (bt_node != NULL) {
- emitter_json_array_begin(emitter);
- size_t i;
- for (i = 0; i < bt_node->bt.len; i++) {
- malloc_snprintf(buf, buf_sz, "%p", bt_node->bt.vec[i]);
- char *trace_str = buf;
- emitter_json_value(emitter, emitter_type_string,
- &trace_str);
- }
- emitter_json_array_end(emitter);
-
- bt_old_node = bt_node;
- bt_node = bt_node->next;
- idalloc(tsd, bt_old_node);
- }
- emitter_json_array_end(emitter);
-}
-
-static void
-prof_log_emit_allocs(tsd_t *tsd, emitter_t *emitter) {
- emitter_json_array_kv_begin(emitter, "allocations");
- prof_alloc_node_t *alloc_node = log_alloc_first;
- prof_alloc_node_t *alloc_old_node;
- while (alloc_node != NULL) {
- emitter_json_object_begin(emitter);
-
- emitter_json_kv(emitter, "alloc_thread", emitter_type_size,
- &alloc_node->alloc_thr_ind);
-
- emitter_json_kv(emitter, "free_thread", emitter_type_size,
- &alloc_node->free_thr_ind);
-
- emitter_json_kv(emitter, "alloc_trace", emitter_type_size,
- &alloc_node->alloc_bt_ind);
-
- emitter_json_kv(emitter, "free_trace", emitter_type_size,
- &alloc_node->free_bt_ind);
-
- emitter_json_kv(emitter, "alloc_timestamp",
- emitter_type_uint64, &alloc_node->alloc_time_ns);
-
- emitter_json_kv(emitter, "free_timestamp", emitter_type_uint64,
- &alloc_node->free_time_ns);
-
- emitter_json_kv(emitter, "usize", emitter_type_uint64,
- &alloc_node->usize);
-
- emitter_json_object_end(emitter);
-
- alloc_old_node = alloc_node;
- alloc_node = alloc_node->next;
- idalloc(tsd, alloc_old_node);
- }
- emitter_json_array_end(emitter);
-}
-
-static void
-prof_log_emit_metadata(emitter_t *emitter) {
- emitter_json_object_kv_begin(emitter, "info");
-
- nstime_t now = NSTIME_ZERO_INITIALIZER;
-
- nstime_update(&now);
- uint64_t ns = nstime_ns(&now) - nstime_ns(&log_start_timestamp);
- emitter_json_kv(emitter, "duration", emitter_type_uint64, &ns);
-
- char *vers = JEMALLOC_VERSION;
- emitter_json_kv(emitter, "version",
- emitter_type_string, &vers);
-
- emitter_json_kv(emitter, "lg_sample_rate",
- emitter_type_int, &lg_prof_sample);
-
- int pid = prof_getpid();
- emitter_json_kv(emitter, "pid", emitter_type_int, &pid);
-
- emitter_json_object_end(emitter);
-}
-
-
-bool
-prof_log_stop(tsdn_t *tsdn) {
- if (!opt_prof || !prof_booted) {
- return true;
- }
-
- tsd_t *tsd = tsdn_tsd(tsdn);
- malloc_mutex_lock(tsdn, &log_mtx);
-
- if (prof_logging_state != prof_logging_state_started) {
- malloc_mutex_unlock(tsdn, &log_mtx);
- return true;
- }
-
- /*
- * Set the state to dumping. We'll set it to stopped when we're done.
- * Since other threads won't be able to start/stop/log when the state is
- * dumping, we don't have to hold the lock during the whole method.
- */
- prof_logging_state = prof_logging_state_dumping;
- malloc_mutex_unlock(tsdn, &log_mtx);
-
-
- emitter_t emitter;
-
- /* Create a file. */
-
- int fd;
-#ifdef JEMALLOC_JET
- if (prof_log_dummy) {
- fd = 0;
- } else {
- fd = creat(log_filename, 0644);
- }
-#else
- fd = creat(log_filename, 0644);
-#endif
-
- if (fd == -1) {
- malloc_printf("<jemalloc>: creat() for log file \"%s\" "
- " failed with %d\n", log_filename, errno);
- if (opt_abort) {
- abort();
- }
- return true;
- }
-
- /* Emit to json. */
- struct prof_emitter_cb_arg_s arg;
- arg.fd = fd;
- emitter_init(&emitter, emitter_output_json, &prof_emitter_write_cb,
- (void *)(&arg));
-
- emitter_begin(&emitter);
- prof_log_emit_metadata(&emitter);
- prof_log_emit_threads(tsd, &emitter);
- prof_log_emit_traces(tsd, &emitter);
- prof_log_emit_allocs(tsd, &emitter);
- emitter_end(&emitter);
-
- /* Reset global state. */
- if (log_tables_initialized) {
- ckh_delete(tsd, &log_bt_node_set);
- ckh_delete(tsd, &log_thr_node_set);
- }
- log_tables_initialized = false;
- log_bt_index = 0;
- log_thr_index = 0;
- log_bt_first = NULL;
- log_bt_last = NULL;
- log_thr_first = NULL;
- log_thr_last = NULL;
- log_alloc_first = NULL;
- log_alloc_last = NULL;
-
- malloc_mutex_lock(tsdn, &log_mtx);
- prof_logging_state = prof_logging_state_stopped;
- malloc_mutex_unlock(tsdn, &log_mtx);
-
-#ifdef JEMALLOC_JET
- if (prof_log_dummy) {
- return false;
- }
-#endif
- return close(fd);
-}
-
const char *
prof_thread_name_get(tsd_t *tsd) {
+ assert(tsd_reentrancy_level_get(tsd) == 0);
+
prof_tdata_t *tdata;
tdata = prof_tdata_get(tsd, true);
@@ -2790,69 +448,19 @@ prof_thread_name_get(tsd_t *tsd) {
return (tdata->thread_name != NULL ? tdata->thread_name : "");
}
-static char *
-prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name) {
- char *ret;
- size_t size;
-
- if (thread_name == NULL) {
- return NULL;
- }
-
- size = strlen(thread_name) + 1;
- if (size == 1) {
- return "";
- }
-
- ret = iallocztm(tsdn, size, sz_size2index(size), false, NULL, true,
- arena_get(TSDN_NULL, 0, true), true);
- if (ret == NULL) {
- return NULL;
- }
- memcpy(ret, thread_name, size);
- return ret;
-}
-
int
prof_thread_name_set(tsd_t *tsd, const char *thread_name) {
- prof_tdata_t *tdata;
- unsigned i;
- char *s;
-
- tdata = prof_tdata_get(tsd, true);
- if (tdata == NULL) {
- return EAGAIN;
- }
-
- /* Validate input. */
- if (thread_name == NULL) {
- return EFAULT;
- }
- for (i = 0; thread_name[i] != '\0'; i++) {
- char c = thread_name[i];
- if (!isgraph(c) && !isblank(c)) {
- return EFAULT;
- }
- }
-
- s = prof_thread_name_alloc(tsd_tsdn(tsd), thread_name);
- if (s == NULL) {
- return EAGAIN;
- }
-
- if (tdata->thread_name != NULL) {
- idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true,
- true);
- tdata->thread_name = NULL;
- }
- if (strlen(s) > 0) {
- tdata->thread_name = s;
+ if (opt_prof_sys_thread_name) {
+ return ENOENT;
+ } else {
+ return prof_thread_name_set_impl(tsd, thread_name);
}
- return 0;
}
bool
prof_thread_active_get(tsd_t *tsd) {
+ assert(tsd_reentrancy_level_get(tsd) == 0);
+
prof_tdata_t *tdata;
tdata = prof_tdata_get(tsd, true);
@@ -2864,6 +472,8 @@ prof_thread_active_get(tsd_t *tsd) {
bool
prof_thread_active_set(tsd_t *tsd, bool active) {
+ assert(tsd_reentrancy_level_get(tsd) == 0);
+
prof_tdata_t *tdata;
tdata = prof_tdata_get(tsd, true);
@@ -2917,6 +527,28 @@ prof_gdump_set(tsdn_t *tsdn, bool gdump) {
}
void
+prof_backtrace_hook_set(prof_backtrace_hook_t hook) {
+ atomic_store_p(&prof_backtrace_hook, hook, ATOMIC_RELEASE);
+}
+
+prof_backtrace_hook_t
+prof_backtrace_hook_get() {
+ return (prof_backtrace_hook_t)atomic_load_p(&prof_backtrace_hook,
+ ATOMIC_ACQUIRE);
+}
+
+void
+prof_dump_hook_set(prof_dump_hook_t hook) {
+ atomic_store_p(&prof_dump_hook, hook, ATOMIC_RELEASE);
+}
+
+prof_dump_hook_t
+prof_dump_hook_get() {
+ return (prof_dump_hook_t)atomic_load_p(&prof_dump_hook,
+ ATOMIC_ACQUIRE);
+}
+
+void
prof_boot0(void) {
cassert(config_prof);
@@ -2932,6 +564,9 @@ prof_boot1(void) {
* opt_prof must be in its final state before any arenas are
* initialized, so this function must be executed early.
*/
+ if (opt_prof_leak_error && !opt_prof_leak) {
+ opt_prof_leak = true;
+ }
if (opt_prof_leak && !opt_prof) {
/*
@@ -2949,61 +584,65 @@ prof_boot1(void) {
}
bool
-prof_boot2(tsd_t *tsd) {
+prof_boot2(tsd_t *tsd, base_t *base) {
cassert(config_prof);
- if (opt_prof) {
- unsigned i;
+ /*
+ * Initialize the global mutexes unconditionally to maintain correct
+ * stats when opt_prof is false.
+ */
+ if (malloc_mutex_init(&prof_active_mtx, "prof_active",
+ WITNESS_RANK_PROF_ACTIVE, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump",
+ WITNESS_RANK_PROF_GDUMP, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ if (malloc_mutex_init(&prof_thread_active_init_mtx,
+ "prof_thread_active_init", WITNESS_RANK_PROF_THREAD_ACTIVE_INIT,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx",
+ WITNESS_RANK_PROF_BT2GCTX, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas",
+ WITNESS_RANK_PROF_TDATAS, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid",
+ WITNESS_RANK_PROF_NEXT_THR_UID, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ if (malloc_mutex_init(&prof_stats_mtx, "prof_stats",
+ WITNESS_RANK_PROF_STATS, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ if (malloc_mutex_init(&prof_dump_filename_mtx,
+ "prof_dump_filename", WITNESS_RANK_PROF_DUMP_FILENAME,
+ malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ if (malloc_mutex_init(&prof_dump_mtx, "prof_dump",
+ WITNESS_RANK_PROF_DUMP, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+ if (opt_prof) {
lg_prof_sample = opt_lg_prof_sample;
-
- prof_active = opt_prof_active;
- if (malloc_mutex_init(&prof_active_mtx, "prof_active",
- WITNESS_RANK_PROF_ACTIVE, malloc_mutex_rank_exclusive)) {
- return true;
- }
-
+ prof_unbias_map_init();
+ prof_active_state = opt_prof_active;
prof_gdump_val = opt_prof_gdump;
- if (malloc_mutex_init(&prof_gdump_mtx, "prof_gdump",
- WITNESS_RANK_PROF_GDUMP, malloc_mutex_rank_exclusive)) {
- return true;
- }
-
prof_thread_active_init = opt_prof_thread_active_init;
- if (malloc_mutex_init(&prof_thread_active_init_mtx,
- "prof_thread_active_init",
- WITNESS_RANK_PROF_THREAD_ACTIVE_INIT,
- malloc_mutex_rank_exclusive)) {
- return true;
- }
-
- if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
- prof_bt_keycomp)) {
- return true;
- }
- if (malloc_mutex_init(&bt2gctx_mtx, "prof_bt2gctx",
- WITNESS_RANK_PROF_BT2GCTX, malloc_mutex_rank_exclusive)) {
- return true;
- }
- tdata_tree_new(&tdatas);
- if (malloc_mutex_init(&tdatas_mtx, "prof_tdatas",
- WITNESS_RANK_PROF_TDATAS, malloc_mutex_rank_exclusive)) {
+ if (prof_data_init(tsd)) {
return true;
}
next_thr_uid = 0;
- if (malloc_mutex_init(&next_thr_uid_mtx, "prof_next_thr_uid",
- WITNESS_RANK_PROF_NEXT_THR_UID, malloc_mutex_rank_exclusive)) {
- return true;
- }
-
- if (malloc_mutex_init(&prof_dump_seq_mtx, "prof_dump_seq",
- WITNESS_RANK_PROF_DUMP_SEQ, malloc_mutex_rank_exclusive)) {
- return true;
- }
- if (malloc_mutex_init(&prof_dump_mtx, "prof_dump",
- WITNESS_RANK_PROF_DUMP, malloc_mutex_rank_exclusive)) {
+ if (prof_idump_accum_init()) {
return true;
}
@@ -3015,42 +654,22 @@ prof_boot2(tsd_t *tsd) {
}
}
- if (opt_prof_log) {
- prof_log_start(tsd_tsdn(tsd), NULL);
- }
-
- if (atexit(prof_log_stop_final) != 0) {
- malloc_write("<jemalloc>: Error in atexit() "
- "for logging\n");
- if (opt_abort) {
- abort();
- }
- }
-
- if (malloc_mutex_init(&log_mtx, "prof_log",
- WITNESS_RANK_PROF_LOG, malloc_mutex_rank_exclusive)) {
- return true;
- }
-
- if (ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS,
- prof_bt_node_hash, prof_bt_node_keycomp)) {
+ if (prof_log_init(tsd)) {
return true;
}
- if (ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS,
- prof_thr_node_hash, prof_thr_node_keycomp)) {
+ if (prof_recent_init()) {
return true;
}
- log_tables_initialized = true;
+ prof_base = base;
- gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
- b0get(), PROF_NCTX_LOCKS * sizeof(malloc_mutex_t),
- CACHELINE);
+ gctx_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), base,
+ PROF_NCTX_LOCKS * sizeof(malloc_mutex_t), CACHELINE);
if (gctx_locks == NULL) {
return true;
}
- for (i = 0; i < PROF_NCTX_LOCKS; i++) {
+ for (unsigned i = 0; i < PROF_NCTX_LOCKS; i++) {
if (malloc_mutex_init(&gctx_locks[i], "prof_gctx",
WITNESS_RANK_PROF_GCTX,
malloc_mutex_rank_exclusive)) {
@@ -3058,26 +677,21 @@ prof_boot2(tsd_t *tsd) {
}
}
- tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd),
- b0get(), PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t),
- CACHELINE);
+ tdata_locks = (malloc_mutex_t *)base_alloc(tsd_tsdn(tsd), base,
+ PROF_NTDATA_LOCKS * sizeof(malloc_mutex_t), CACHELINE);
if (tdata_locks == NULL) {
return true;
}
- for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
+ for (unsigned i = 0; i < PROF_NTDATA_LOCKS; i++) {
if (malloc_mutex_init(&tdata_locks[i], "prof_tdata",
WITNESS_RANK_PROF_TDATA,
malloc_mutex_rank_exclusive)) {
return true;
}
}
-#ifdef JEMALLOC_PROF_LIBGCC
- /*
- * Cause the backtracing machinery to allocate its internal
- * state before enabling profiling.
- */
- _Unwind_Backtrace(prof_unwind_init_callback, NULL);
-#endif
+
+ prof_unwind_init();
+ prof_hooks_init();
}
prof_booted = true;
@@ -3095,18 +709,23 @@ prof_prefork0(tsdn_t *tsdn) {
for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
malloc_mutex_prefork(tsdn, &tdata_locks[i]);
}
+ malloc_mutex_prefork(tsdn, &log_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++) {
malloc_mutex_prefork(tsdn, &gctx_locks[i]);
}
+ malloc_mutex_prefork(tsdn, &prof_recent_dump_mtx);
}
}
void
prof_prefork1(tsdn_t *tsdn) {
if (config_prof && opt_prof) {
+ counter_prefork(tsdn, &prof_idump_accumulated);
malloc_mutex_prefork(tsdn, &prof_active_mtx);
- malloc_mutex_prefork(tsdn, &prof_dump_seq_mtx);
+ malloc_mutex_prefork(tsdn, &prof_dump_filename_mtx);
malloc_mutex_prefork(tsdn, &prof_gdump_mtx);
+ malloc_mutex_prefork(tsdn, &prof_recent_alloc_mtx);
+ malloc_mutex_prefork(tsdn, &prof_stats_mtx);
malloc_mutex_prefork(tsdn, &next_thr_uid_mtx);
malloc_mutex_prefork(tsdn, &prof_thread_active_init_mtx);
}
@@ -3120,12 +739,17 @@ prof_postfork_parent(tsdn_t *tsdn) {
malloc_mutex_postfork_parent(tsdn,
&prof_thread_active_init_mtx);
malloc_mutex_postfork_parent(tsdn, &next_thr_uid_mtx);
+ malloc_mutex_postfork_parent(tsdn, &prof_stats_mtx);
+ malloc_mutex_postfork_parent(tsdn, &prof_recent_alloc_mtx);
malloc_mutex_postfork_parent(tsdn, &prof_gdump_mtx);
- malloc_mutex_postfork_parent(tsdn, &prof_dump_seq_mtx);
+ malloc_mutex_postfork_parent(tsdn, &prof_dump_filename_mtx);
malloc_mutex_postfork_parent(tsdn, &prof_active_mtx);
+ counter_postfork_parent(tsdn, &prof_idump_accumulated);
+ malloc_mutex_postfork_parent(tsdn, &prof_recent_dump_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++) {
malloc_mutex_postfork_parent(tsdn, &gctx_locks[i]);
}
+ malloc_mutex_postfork_parent(tsdn, &log_mtx);
for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
malloc_mutex_postfork_parent(tsdn, &tdata_locks[i]);
}
@@ -3142,12 +766,17 @@ prof_postfork_child(tsdn_t *tsdn) {
malloc_mutex_postfork_child(tsdn, &prof_thread_active_init_mtx);
malloc_mutex_postfork_child(tsdn, &next_thr_uid_mtx);
+ malloc_mutex_postfork_child(tsdn, &prof_stats_mtx);
+ malloc_mutex_postfork_child(tsdn, &prof_recent_alloc_mtx);
malloc_mutex_postfork_child(tsdn, &prof_gdump_mtx);
- malloc_mutex_postfork_child(tsdn, &prof_dump_seq_mtx);
+ malloc_mutex_postfork_child(tsdn, &prof_dump_filename_mtx);
malloc_mutex_postfork_child(tsdn, &prof_active_mtx);
+ counter_postfork_child(tsdn, &prof_idump_accumulated);
+ malloc_mutex_postfork_child(tsdn, &prof_recent_dump_mtx);
for (i = 0; i < PROF_NCTX_LOCKS; i++) {
malloc_mutex_postfork_child(tsdn, &gctx_locks[i]);
}
+ malloc_mutex_postfork_child(tsdn, &log_mtx);
for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
malloc_mutex_postfork_child(tsdn, &tdata_locks[i]);
}
diff --git a/contrib/jemalloc/src/prof_data.c b/contrib/jemalloc/src/prof_data.c
new file mode 100644
index 000000000000..bfa55be1ca55
--- /dev/null
+++ b/contrib/jemalloc/src/prof_data.c
@@ -0,0 +1,1447 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/ckh.h"
+#include "jemalloc/internal/hash.h"
+#include "jemalloc/internal/malloc_io.h"
+#include "jemalloc/internal/prof_data.h"
+
+/*
+ * This file defines and manages the core profiling data structures.
+ *
+ * Conceptually, profiling data can be imagined as a table with three columns:
+ * thread, stack trace, and current allocation size. (When prof_accum is on,
+ * there's one additional column which is the cumulative allocation size.)
+ *
+ * Implementation wise, each thread maintains a hash recording the stack trace
+ * to allocation size correspondences, which are basically the individual rows
+ * in the table. In addition, two global "indices" are built to make data
+ * aggregation efficient (for dumping): bt2gctx and tdatas, which are basically
+ * the "grouped by stack trace" and "grouped by thread" views of the same table,
+ * respectively. Note that the allocation size is only aggregated to the two
+ * indices at dumping time, so as to optimize for performance.
+ */
+
+/******************************************************************************/
+
+malloc_mutex_t bt2gctx_mtx;
+malloc_mutex_t tdatas_mtx;
+malloc_mutex_t prof_dump_mtx;
+
+/*
+ * Table of mutexes that are shared among gctx's. These are leaf locks, so
+ * there is no problem with using them for more than one gctx at the same time.
+ * The primary motivation for this sharing though is that gctx's are ephemeral,
+ * and destroying mutexes causes complications for systems that allocate when
+ * creating/destroying mutexes.
+ */
+malloc_mutex_t *gctx_locks;
+static atomic_u_t cum_gctxs; /* Atomic counter. */
+
+/*
+ * Table of mutexes that are shared among tdata's. No operations require
+ * holding multiple tdata locks, so there is no problem with using them for more
+ * than one tdata at the same time, even though a gctx lock may be acquired
+ * while holding a tdata lock.
+ */
+malloc_mutex_t *tdata_locks;
+
+/*
+ * Global hash of (prof_bt_t *)-->(prof_gctx_t *). This is the master data
+ * structure that knows about all backtraces currently captured.
+ */
+static ckh_t bt2gctx;
+
+/*
+ * Tree of all extant prof_tdata_t structures, regardless of state,
+ * {attached,detached,expired}.
+ */
+static prof_tdata_tree_t tdatas;
+
+size_t prof_unbiased_sz[PROF_SC_NSIZES];
+size_t prof_shifted_unbiased_cnt[PROF_SC_NSIZES];
+
+/******************************************************************************/
+/* Red-black trees. */
+
+static int
+prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b) {
+ uint64_t a_thr_uid = a->thr_uid;
+ uint64_t b_thr_uid = b->thr_uid;
+ int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
+ if (ret == 0) {
+ uint64_t a_thr_discrim = a->thr_discrim;
+ uint64_t b_thr_discrim = b->thr_discrim;
+ ret = (a_thr_discrim > b_thr_discrim) - (a_thr_discrim <
+ b_thr_discrim);
+ if (ret == 0) {
+ uint64_t a_tctx_uid = a->tctx_uid;
+ uint64_t b_tctx_uid = b->tctx_uid;
+ ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid <
+ b_tctx_uid);
+ }
+ }
+ return ret;
+}
+
+rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
+ tctx_link, prof_tctx_comp)
+
+static int
+prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b) {
+ unsigned a_len = a->bt.len;
+ unsigned b_len = b->bt.len;
+ unsigned comp_len = (a_len < b_len) ? a_len : b_len;
+ int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *));
+ if (ret == 0) {
+ ret = (a_len > b_len) - (a_len < b_len);
+ }
+ return ret;
+}
+
+rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
+ prof_gctx_comp)
+
+static int
+prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b) {
+ int ret;
+ uint64_t a_uid = a->thr_uid;
+ uint64_t b_uid = b->thr_uid;
+
+ ret = ((a_uid > b_uid) - (a_uid < b_uid));
+ if (ret == 0) {
+ uint64_t a_discrim = a->thr_discrim;
+ uint64_t b_discrim = b->thr_discrim;
+
+ ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim));
+ }
+ return ret;
+}
+
+rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
+ prof_tdata_comp)
+
+/******************************************************************************/
+
+static malloc_mutex_t *
+prof_gctx_mutex_choose(void) {
+ unsigned ngctxs = atomic_fetch_add_u(&cum_gctxs, 1, ATOMIC_RELAXED);
+
+ return &gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS];
+}
+
+static malloc_mutex_t *
+prof_tdata_mutex_choose(uint64_t thr_uid) {
+ return &tdata_locks[thr_uid % PROF_NTDATA_LOCKS];
+}
+
+bool
+prof_data_init(tsd_t *tsd) {
+ tdata_tree_new(&tdatas);
+ return ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS,
+ prof_bt_hash, prof_bt_keycomp);
+}
+
+static void
+prof_enter(tsd_t *tsd, prof_tdata_t *tdata) {
+ cassert(config_prof);
+ assert(tdata == prof_tdata_get(tsd, false));
+
+ if (tdata != NULL) {
+ assert(!tdata->enq);
+ tdata->enq = true;
+ }
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
+}
+
+static void
+prof_leave(tsd_t *tsd, prof_tdata_t *tdata) {
+ cassert(config_prof);
+ assert(tdata == prof_tdata_get(tsd, false));
+
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
+
+ if (tdata != NULL) {
+ bool idump, gdump;
+
+ assert(tdata->enq);
+ tdata->enq = false;
+ idump = tdata->enq_idump;
+ tdata->enq_idump = false;
+ gdump = tdata->enq_gdump;
+ tdata->enq_gdump = false;
+
+ if (idump) {
+ prof_idump(tsd_tsdn(tsd));
+ }
+ if (gdump) {
+ prof_gdump(tsd_tsdn(tsd));
+ }
+ }
+}
+
+static prof_gctx_t *
+prof_gctx_create(tsdn_t *tsdn, prof_bt_t *bt) {
+ /*
+ * Create a single allocation that has space for vec of length bt->len.
+ */
+ size_t size = offsetof(prof_gctx_t, vec) + (bt->len * sizeof(void *));
+ prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsdn, size,
+ sz_size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true),
+ true);
+ if (gctx == NULL) {
+ return NULL;
+ }
+ gctx->lock = prof_gctx_mutex_choose();
+ /*
+ * Set nlimbo to 1, in order to avoid a race condition with
+ * prof_tctx_destroy()/prof_gctx_try_destroy().
+ */
+ gctx->nlimbo = 1;
+ tctx_tree_new(&gctx->tctxs);
+ /* Duplicate bt. */
+ memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *));
+ gctx->bt.vec = gctx->vec;
+ gctx->bt.len = bt->len;
+ return gctx;
+}
+
+static void
+prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self,
+ prof_gctx_t *gctx) {
+ cassert(config_prof);
+
+ /*
+ * Check that gctx is still unused by any thread cache before destroying
+ * it. prof_lookup() increments gctx->nlimbo in order to avoid a race
+ * condition with this function, as does prof_tctx_destroy() in order to
+ * avoid a race between the main body of prof_tctx_destroy() and entry
+ * into this function.
+ */
+ prof_enter(tsd, tdata_self);
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
+ assert(gctx->nlimbo != 0);
+ if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
+ /* Remove gctx from bt2gctx. */
+ if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL)) {
+ not_reached();
+ }
+ prof_leave(tsd, tdata_self);
+ /* Destroy gctx. */
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ idalloctm(tsd_tsdn(tsd), gctx, NULL, NULL, true, true);
+ } else {
+ /*
+ * Compensate for increment in prof_tctx_destroy() or
+ * prof_lookup().
+ */
+ gctx->nlimbo--;
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ prof_leave(tsd, tdata_self);
+ }
+}
+
+static bool
+prof_gctx_should_destroy(prof_gctx_t *gctx) {
+ if (opt_prof_accum) {
+ return false;
+ }
+ if (!tctx_tree_empty(&gctx->tctxs)) {
+ return false;
+ }
+ if (gctx->nlimbo != 0) {
+ return false;
+ }
+ return true;
+}
+
+static bool
+prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
+ void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) {
+ union {
+ prof_gctx_t *p;
+ void *v;
+ } gctx, tgctx;
+ union {
+ prof_bt_t *p;
+ void *v;
+ } btkey;
+ bool new_gctx;
+
+ prof_enter(tsd, tdata);
+ if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
+ /* bt has never been seen before. Insert it. */
+ prof_leave(tsd, tdata);
+ tgctx.p = prof_gctx_create(tsd_tsdn(tsd), bt);
+ if (tgctx.v == NULL) {
+ return true;
+ }
+ prof_enter(tsd, tdata);
+ if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
+ gctx.p = tgctx.p;
+ btkey.p = &gctx.p->bt;
+ if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
+ /* OOM. */
+ prof_leave(tsd, tdata);
+ idalloctm(tsd_tsdn(tsd), gctx.v, NULL, NULL,
+ true, true);
+ return true;
+ }
+ new_gctx = true;
+ } else {
+ new_gctx = false;
+ }
+ } else {
+ tgctx.v = NULL;
+ new_gctx = false;
+ }
+
+ if (!new_gctx) {
+ /*
+ * Increment nlimbo, in order to avoid a race condition with
+ * prof_tctx_destroy()/prof_gctx_try_destroy().
+ */
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx.p->lock);
+ gctx.p->nlimbo++;
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx.p->lock);
+ new_gctx = false;
+
+ if (tgctx.v != NULL) {
+ /* Lost race to insert. */
+ idalloctm(tsd_tsdn(tsd), tgctx.v, NULL, NULL, true,
+ true);
+ }
+ }
+ prof_leave(tsd, tdata);
+
+ *p_btkey = btkey.v;
+ *p_gctx = gctx.p;
+ *p_new_gctx = new_gctx;
+ return false;
+}
+
+prof_tctx_t *
+prof_lookup(tsd_t *tsd, prof_bt_t *bt) {
+ union {
+ prof_tctx_t *p;
+ void *v;
+ } ret;
+ prof_tdata_t *tdata;
+ bool not_found;
+
+ cassert(config_prof);
+
+ tdata = prof_tdata_get(tsd, false);
+ assert(tdata != NULL);
+
+ malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
+ not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
+ if (!not_found) { /* Note double negative! */
+ ret.p->prepared = true;
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
+ if (not_found) {
+ void *btkey;
+ prof_gctx_t *gctx;
+ bool new_gctx, error;
+
+ /*
+ * This thread's cache lacks bt. Look for it in the global
+ * cache.
+ */
+ if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx,
+ &new_gctx)) {
+ return NULL;
+ }
+
+ /* Link a prof_tctx_t into gctx for this thread. */
+ ret.v = iallocztm(tsd_tsdn(tsd), sizeof(prof_tctx_t),
+ sz_size2index(sizeof(prof_tctx_t)), false, NULL, true,
+ arena_ichoose(tsd, NULL), true);
+ if (ret.p == NULL) {
+ if (new_gctx) {
+ prof_gctx_try_destroy(tsd, tdata, gctx);
+ }
+ return NULL;
+ }
+ ret.p->tdata = tdata;
+ ret.p->thr_uid = tdata->thr_uid;
+ ret.p->thr_discrim = tdata->thr_discrim;
+ ret.p->recent_count = 0;
+ memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
+ ret.p->gctx = gctx;
+ ret.p->tctx_uid = tdata->tctx_uid_next++;
+ ret.p->prepared = true;
+ ret.p->state = prof_tctx_state_initializing;
+ malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
+ error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v);
+ malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
+ if (error) {
+ if (new_gctx) {
+ prof_gctx_try_destroy(tsd, tdata, gctx);
+ }
+ idalloctm(tsd_tsdn(tsd), ret.v, NULL, NULL, true, true);
+ return NULL;
+ }
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
+ ret.p->state = prof_tctx_state_nominal;
+ tctx_tree_insert(&gctx->tctxs, ret.p);
+ gctx->nlimbo--;
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ }
+
+ return ret.p;
+}
+
+/* Used in unit tests. */
+static prof_tdata_t *
+prof_tdata_count_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata,
+ void *arg) {
+ size_t *tdata_count = (size_t *)arg;
+
+ (*tdata_count)++;
+
+ return NULL;
+}
+
+/* Used in unit tests. */
+size_t
+prof_tdata_count(void) {
+ size_t tdata_count = 0;
+ tsdn_t *tsdn;
+
+ tsdn = tsdn_fetch();
+ malloc_mutex_lock(tsdn, &tdatas_mtx);
+ tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
+ (void *)&tdata_count);
+ malloc_mutex_unlock(tsdn, &tdatas_mtx);
+
+ return tdata_count;
+}
+
+/* Used in unit tests. */
+size_t
+prof_bt_count(void) {
+ size_t bt_count;
+ tsd_t *tsd;
+ prof_tdata_t *tdata;
+
+ tsd = tsd_fetch();
+ tdata = prof_tdata_get(tsd, false);
+ if (tdata == NULL) {
+ return 0;
+ }
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
+ bt_count = ckh_count(&bt2gctx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &bt2gctx_mtx);
+
+ return bt_count;
+}
+
+char *
+prof_thread_name_alloc(tsd_t *tsd, const char *thread_name) {
+ char *ret;
+ size_t size;
+
+ if (thread_name == NULL) {
+ return NULL;
+ }
+
+ size = strlen(thread_name) + 1;
+ if (size == 1) {
+ return "";
+ }
+
+ ret = iallocztm(tsd_tsdn(tsd), size, sz_size2index(size), false, NULL,
+ true, arena_get(TSDN_NULL, 0, true), true);
+ if (ret == NULL) {
+ return NULL;
+ }
+ memcpy(ret, thread_name, size);
+ return ret;
+}
+
+int
+prof_thread_name_set_impl(tsd_t *tsd, const char *thread_name) {
+ assert(tsd_reentrancy_level_get(tsd) == 0);
+
+ prof_tdata_t *tdata;
+ unsigned i;
+ char *s;
+
+ tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL) {
+ return EAGAIN;
+ }
+
+ /* Validate input. */
+ if (thread_name == NULL) {
+ return EFAULT;
+ }
+ for (i = 0; thread_name[i] != '\0'; i++) {
+ char c = thread_name[i];
+ if (!isgraph(c) && !isblank(c)) {
+ return EFAULT;
+ }
+ }
+
+ s = prof_thread_name_alloc(tsd, thread_name);
+ if (s == NULL) {
+ return EAGAIN;
+ }
+
+ if (tdata->thread_name != NULL) {
+ idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true,
+ true);
+ tdata->thread_name = NULL;
+ }
+ if (strlen(s) > 0) {
+ tdata->thread_name = s;
+ }
+ return 0;
+}
+
+JEMALLOC_FORMAT_PRINTF(3, 4)
+static void
+prof_dump_printf(write_cb_t *prof_dump_write, void *cbopaque,
+ const char *format, ...) {
+ va_list ap;
+ char buf[PROF_PRINTF_BUFSIZE];
+
+ va_start(ap, format);
+ malloc_vsnprintf(buf, sizeof(buf), format, ap);
+ va_end(ap);
+ prof_dump_write(cbopaque, buf);
+}
+
+/*
+ * Casting a double to a uint64_t may not necessarily be in range; this can be
+ * UB. I don't think this is practically possible with the cur counters, but
+ * plausibly could be with the accum counters.
+ */
+#ifdef JEMALLOC_PROF
+static uint64_t
+prof_double_uint64_cast(double d) {
+ /*
+ * Note: UINT64_MAX + 1 is exactly representable as a double on all
+ * reasonable platforms (certainly those we'll support). Writing this
+ * as !(a < b) instead of (a >= b) means that we're NaN-safe.
+ */
+ double rounded = round(d);
+ if (!(rounded < (double)UINT64_MAX)) {
+ return UINT64_MAX;
+ }
+ return (uint64_t)rounded;
+}
+#endif
+
+void prof_unbias_map_init() {
+ /* See the comment in prof_sample_new_event_wait */
+#ifdef JEMALLOC_PROF
+ for (szind_t i = 0; i < SC_NSIZES; i++) {
+ double sz = (double)sz_index2size(i);
+ double rate = (double)(ZU(1) << lg_prof_sample);
+ double div_val = 1.0 - exp(-sz / rate);
+ double unbiased_sz = sz / div_val;
+ /*
+ * The "true" right value for the unbiased count is
+ * 1.0/(1 - exp(-sz/rate)). The problem is, we keep the counts
+ * as integers (for a variety of reasons -- rounding errors
+ * could trigger asserts, and not all libcs can properly handle
+ * floating point arithmetic during malloc calls inside libc).
+ * Rounding to an integer, though, can lead to rounding errors
+ * of over 30% for sizes close to the sampling rate. So
+ * instead, we multiply by a constant, dividing the maximum
+ * possible roundoff error by that constant. To avoid overflow
+ * in summing up size_t values, the largest safe constant we can
+ * pick is the size of the smallest allocation.
+ */
+ double cnt_shift = (double)(ZU(1) << SC_LG_TINY_MIN);
+ double shifted_unbiased_cnt = cnt_shift / div_val;
+ prof_unbiased_sz[i] = (size_t)round(unbiased_sz);
+ prof_shifted_unbiased_cnt[i] = (size_t)round(
+ shifted_unbiased_cnt);
+ }
+#else
+ unreachable();
+#endif
+}
+
+/*
+ * The unbiasing story is long. The jeprof unbiasing logic was copied from
+ * pprof. Both shared an issue: they unbiased using the average size of the
+ * allocations at a particular stack trace. This can work out OK if allocations
+ * are mostly of the same size given some stack, but not otherwise. We now
+ * internally track what the unbiased results ought to be. We can't just report
+ * them as they are though; they'll still go through the jeprof unbiasing
+ * process. Instead, we figure out what values we can feed *into* jeprof's
+ * unbiasing mechanism that will lead to getting the right values out.
+ *
+ * It'll unbias count and aggregate size as:
+ *
+ * c_out = c_in * 1/(1-exp(-s_in/c_in/R)
+ * s_out = s_in * 1/(1-exp(-s_in/c_in/R)
+ *
+ * We want to solve for the values of c_in and s_in that will
+ * give the c_out and s_out that we've computed internally.
+ *
+ * Let's do a change of variables (both to make the math easier and to make it
+ * easier to write):
+ * x = s_in / c_in
+ * y = s_in
+ * k = 1/R.
+ *
+ * Then
+ * c_out = y/x * 1/(1-exp(-k*x))
+ * s_out = y * 1/(1-exp(-k*x))
+ *
+ * The first equation gives:
+ * y = x * c_out * (1-exp(-k*x))
+ * The second gives:
+ * y = s_out * (1-exp(-k*x))
+ * So we have
+ * x = s_out / c_out.
+ * And all the other values fall out from that.
+ *
+ * This is all a fair bit of work. The thing we get out of it is that we don't
+ * break backwards compatibility with jeprof (and the various tools that have
+ * copied its unbiasing logic). Eventually, we anticipate a v3 heap profile
+ * dump format based on JSON, at which point I think much of this logic can get
+ * cleaned up (since we'll be taking a compatibility break there anyways).
+ */
+static void
+prof_do_unbias(uint64_t c_out_shifted_i, uint64_t s_out_i, uint64_t *r_c_in,
+ uint64_t *r_s_in) {
+#ifdef JEMALLOC_PROF
+ if (c_out_shifted_i == 0 || s_out_i == 0) {
+ *r_c_in = 0;
+ *r_s_in = 0;
+ return;
+ }
+ /*
+ * See the note in prof_unbias_map_init() to see why we take c_out in a
+ * shifted form.
+ */
+ double c_out = (double)c_out_shifted_i
+ / (double)(ZU(1) << SC_LG_TINY_MIN);
+ double s_out = (double)s_out_i;
+ double R = (double)(ZU(1) << lg_prof_sample);
+
+ double x = s_out / c_out;
+ double y = s_out * (1.0 - exp(-x / R));
+
+ double c_in = y / x;
+ double s_in = y;
+
+ *r_c_in = prof_double_uint64_cast(c_in);
+ *r_s_in = prof_double_uint64_cast(s_in);
+#else
+ unreachable();
+#endif
+}
+
+static void
+prof_dump_print_cnts(write_cb_t *prof_dump_write, void *cbopaque,
+ const prof_cnt_t *cnts) {
+ uint64_t curobjs;
+ uint64_t curbytes;
+ uint64_t accumobjs;
+ uint64_t accumbytes;
+ if (opt_prof_unbias) {
+ prof_do_unbias(cnts->curobjs_shifted_unbiased,
+ cnts->curbytes_unbiased, &curobjs, &curbytes);
+ prof_do_unbias(cnts->accumobjs_shifted_unbiased,
+ cnts->accumbytes_unbiased, &accumobjs, &accumbytes);
+ } else {
+ curobjs = cnts->curobjs;
+ curbytes = cnts->curbytes;
+ accumobjs = cnts->accumobjs;
+ accumbytes = cnts->accumbytes;
+ }
+ prof_dump_printf(prof_dump_write, cbopaque,
+ "%"FMTu64": %"FMTu64" [%"FMTu64": %"FMTu64"]",
+ curobjs, curbytes, accumobjs, accumbytes);
+}
+
+static void
+prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) {
+ malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
+
+ malloc_mutex_lock(tsdn, tctx->gctx->lock);
+
+ switch (tctx->state) {
+ case prof_tctx_state_initializing:
+ malloc_mutex_unlock(tsdn, tctx->gctx->lock);
+ return;
+ case prof_tctx_state_nominal:
+ tctx->state = prof_tctx_state_dumping;
+ malloc_mutex_unlock(tsdn, tctx->gctx->lock);
+
+ memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
+
+ tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
+ tdata->cnt_summed.curobjs_shifted_unbiased
+ += tctx->dump_cnts.curobjs_shifted_unbiased;
+ tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
+ tdata->cnt_summed.curbytes_unbiased
+ += tctx->dump_cnts.curbytes_unbiased;
+ if (opt_prof_accum) {
+ tdata->cnt_summed.accumobjs +=
+ tctx->dump_cnts.accumobjs;
+ tdata->cnt_summed.accumobjs_shifted_unbiased +=
+ tctx->dump_cnts.accumobjs_shifted_unbiased;
+ tdata->cnt_summed.accumbytes +=
+ tctx->dump_cnts.accumbytes;
+ tdata->cnt_summed.accumbytes_unbiased +=
+ tctx->dump_cnts.accumbytes_unbiased;
+ }
+ break;
+ case prof_tctx_state_dumping:
+ case prof_tctx_state_purgatory:
+ not_reached();
+ }
+}
+
+static void
+prof_tctx_merge_gctx(tsdn_t *tsdn, prof_tctx_t *tctx, prof_gctx_t *gctx) {
+ malloc_mutex_assert_owner(tsdn, gctx->lock);
+
+ gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
+ gctx->cnt_summed.curobjs_shifted_unbiased
+ += tctx->dump_cnts.curobjs_shifted_unbiased;
+ gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
+ gctx->cnt_summed.curbytes_unbiased += tctx->dump_cnts.curbytes_unbiased;
+ if (opt_prof_accum) {
+ gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs;
+ gctx->cnt_summed.accumobjs_shifted_unbiased
+ += tctx->dump_cnts.accumobjs_shifted_unbiased;
+ gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes;
+ gctx->cnt_summed.accumbytes_unbiased
+ += tctx->dump_cnts.accumbytes_unbiased;
+ }
+}
+
+static prof_tctx_t *
+prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
+ tsdn_t *tsdn = (tsdn_t *)arg;
+
+ malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
+
+ switch (tctx->state) {
+ case prof_tctx_state_nominal:
+ /* New since dumping started; ignore. */
+ break;
+ case prof_tctx_state_dumping:
+ case prof_tctx_state_purgatory:
+ prof_tctx_merge_gctx(tsdn, tctx, tctx->gctx);
+ break;
+ default:
+ not_reached();
+ }
+
+ return NULL;
+}
+
+typedef struct prof_dump_iter_arg_s prof_dump_iter_arg_t;
+struct prof_dump_iter_arg_s {
+ tsdn_t *tsdn;
+ write_cb_t *prof_dump_write;
+ void *cbopaque;
+};
+
+static prof_tctx_t *
+prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *opaque) {
+ prof_dump_iter_arg_t *arg = (prof_dump_iter_arg_t *)opaque;
+ malloc_mutex_assert_owner(arg->tsdn, tctx->gctx->lock);
+
+ switch (tctx->state) {
+ case prof_tctx_state_initializing:
+ case prof_tctx_state_nominal:
+ /* Not captured by this dump. */
+ break;
+ case prof_tctx_state_dumping:
+ case prof_tctx_state_purgatory:
+ prof_dump_printf(arg->prof_dump_write, arg->cbopaque,
+ " t%"FMTu64": ", tctx->thr_uid);
+ prof_dump_print_cnts(arg->prof_dump_write, arg->cbopaque,
+ &tctx->dump_cnts);
+ arg->prof_dump_write(arg->cbopaque, "\n");
+ break;
+ default:
+ not_reached();
+ }
+ return NULL;
+}
+
+static prof_tctx_t *
+prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg) {
+ tsdn_t *tsdn = (tsdn_t *)arg;
+ prof_tctx_t *ret;
+
+ malloc_mutex_assert_owner(tsdn, tctx->gctx->lock);
+
+ switch (tctx->state) {
+ case prof_tctx_state_nominal:
+ /* New since dumping started; ignore. */
+ break;
+ case prof_tctx_state_dumping:
+ tctx->state = prof_tctx_state_nominal;
+ break;
+ case prof_tctx_state_purgatory:
+ ret = tctx;
+ goto label_return;
+ default:
+ not_reached();
+ }
+
+ ret = NULL;
+label_return:
+ return ret;
+}
+
+static void
+prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) {
+ cassert(config_prof);
+
+ malloc_mutex_lock(tsdn, gctx->lock);
+
+ /*
+ * Increment nlimbo so that gctx won't go away before dump.
+ * Additionally, link gctx into the dump list so that it is included in
+ * prof_dump()'s second pass.
+ */
+ gctx->nlimbo++;
+ gctx_tree_insert(gctxs, gctx);
+
+ memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t));
+
+ malloc_mutex_unlock(tsdn, gctx->lock);
+}
+
+typedef struct prof_gctx_merge_iter_arg_s prof_gctx_merge_iter_arg_t;
+struct prof_gctx_merge_iter_arg_s {
+ tsdn_t *tsdn;
+ size_t *leak_ngctx;
+};
+
+static prof_gctx_t *
+prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
+ prof_gctx_merge_iter_arg_t *arg = (prof_gctx_merge_iter_arg_t *)opaque;
+
+ malloc_mutex_lock(arg->tsdn, gctx->lock);
+ tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter,
+ (void *)arg->tsdn);
+ if (gctx->cnt_summed.curobjs != 0) {
+ (*arg->leak_ngctx)++;
+ }
+ malloc_mutex_unlock(arg->tsdn, gctx->lock);
+
+ return NULL;
+}
+
+static void
+prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs) {
+ prof_tdata_t *tdata = prof_tdata_get(tsd, false);
+ prof_gctx_t *gctx;
+
+ /*
+ * Standard tree iteration won't work here, because as soon as we
+ * decrement gctx->nlimbo and unlock gctx, another thread can
+ * concurrently destroy it, which will corrupt the tree. Therefore,
+ * tear down the tree one node at a time during iteration.
+ */
+ while ((gctx = gctx_tree_first(gctxs)) != NULL) {
+ gctx_tree_remove(gctxs, gctx);
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
+ {
+ prof_tctx_t *next;
+
+ next = NULL;
+ do {
+ prof_tctx_t *to_destroy =
+ tctx_tree_iter(&gctx->tctxs, next,
+ prof_tctx_finish_iter,
+ (void *)tsd_tsdn(tsd));
+ if (to_destroy != NULL) {
+ next = tctx_tree_next(&gctx->tctxs,
+ to_destroy);
+ tctx_tree_remove(&gctx->tctxs,
+ to_destroy);
+ idalloctm(tsd_tsdn(tsd), to_destroy,
+ NULL, NULL, true, true);
+ } else {
+ next = NULL;
+ }
+ } while (next != NULL);
+ }
+ gctx->nlimbo--;
+ if (prof_gctx_should_destroy(gctx)) {
+ gctx->nlimbo++;
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ prof_gctx_try_destroy(tsd, tdata, gctx);
+ } else {
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ }
+ }
+}
+
+typedef struct prof_tdata_merge_iter_arg_s prof_tdata_merge_iter_arg_t;
+struct prof_tdata_merge_iter_arg_s {
+ tsdn_t *tsdn;
+ prof_cnt_t *cnt_all;
+};
+
+static prof_tdata_t *
+prof_tdata_merge_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata,
+ void *opaque) {
+ prof_tdata_merge_iter_arg_t *arg =
+ (prof_tdata_merge_iter_arg_t *)opaque;
+
+ malloc_mutex_lock(arg->tsdn, tdata->lock);
+ if (!tdata->expired) {
+ size_t tabind;
+ union {
+ prof_tctx_t *p;
+ void *v;
+ } tctx;
+
+ tdata->dumping = true;
+ memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
+ for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
+ &tctx.v);) {
+ prof_tctx_merge_tdata(arg->tsdn, tctx.p, tdata);
+ }
+
+ arg->cnt_all->curobjs += tdata->cnt_summed.curobjs;
+ arg->cnt_all->curobjs_shifted_unbiased
+ += tdata->cnt_summed.curobjs_shifted_unbiased;
+ arg->cnt_all->curbytes += tdata->cnt_summed.curbytes;
+ arg->cnt_all->curbytes_unbiased
+ += tdata->cnt_summed.curbytes_unbiased;
+ if (opt_prof_accum) {
+ arg->cnt_all->accumobjs += tdata->cnt_summed.accumobjs;
+ arg->cnt_all->accumobjs_shifted_unbiased
+ += tdata->cnt_summed.accumobjs_shifted_unbiased;
+ arg->cnt_all->accumbytes +=
+ tdata->cnt_summed.accumbytes;
+ arg->cnt_all->accumbytes_unbiased +=
+ tdata->cnt_summed.accumbytes_unbiased;
+ }
+ } else {
+ tdata->dumping = false;
+ }
+ malloc_mutex_unlock(arg->tsdn, tdata->lock);
+
+ return NULL;
+}
+
+static prof_tdata_t *
+prof_tdata_dump_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata,
+ void *opaque) {
+ if (!tdata->dumping) {
+ return NULL;
+ }
+
+ prof_dump_iter_arg_t *arg = (prof_dump_iter_arg_t *)opaque;
+ prof_dump_printf(arg->prof_dump_write, arg->cbopaque, " t%"FMTu64": ",
+ tdata->thr_uid);
+ prof_dump_print_cnts(arg->prof_dump_write, arg->cbopaque,
+ &tdata->cnt_summed);
+ if (tdata->thread_name != NULL) {
+ arg->prof_dump_write(arg->cbopaque, " ");
+ arg->prof_dump_write(arg->cbopaque, tdata->thread_name);
+ }
+ arg->prof_dump_write(arg->cbopaque, "\n");
+ return NULL;
+}
+
+static void
+prof_dump_header(prof_dump_iter_arg_t *arg, const prof_cnt_t *cnt_all) {
+ prof_dump_printf(arg->prof_dump_write, arg->cbopaque,
+ "heap_v2/%"FMTu64"\n t*: ", ((uint64_t)1U << lg_prof_sample));
+ prof_dump_print_cnts(arg->prof_dump_write, arg->cbopaque, cnt_all);
+ arg->prof_dump_write(arg->cbopaque, "\n");
+
+ malloc_mutex_lock(arg->tsdn, &tdatas_mtx);
+ tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter, arg);
+ malloc_mutex_unlock(arg->tsdn, &tdatas_mtx);
+}
+
+static void
+prof_dump_gctx(prof_dump_iter_arg_t *arg, prof_gctx_t *gctx,
+ const prof_bt_t *bt, prof_gctx_tree_t *gctxs) {
+ cassert(config_prof);
+ malloc_mutex_assert_owner(arg->tsdn, gctx->lock);
+
+ /* Avoid dumping such gctx's that have no useful data. */
+ if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
+ (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) {
+ assert(gctx->cnt_summed.curobjs == 0);
+ assert(gctx->cnt_summed.curbytes == 0);
+ /*
+ * These asserts would not be correct -- see the comment on races
+ * in prof.c
+ * assert(gctx->cnt_summed.curobjs_unbiased == 0);
+ * assert(gctx->cnt_summed.curbytes_unbiased == 0);
+ */
+ assert(gctx->cnt_summed.accumobjs == 0);
+ assert(gctx->cnt_summed.accumobjs_shifted_unbiased == 0);
+ assert(gctx->cnt_summed.accumbytes == 0);
+ assert(gctx->cnt_summed.accumbytes_unbiased == 0);
+ return;
+ }
+
+ arg->prof_dump_write(arg->cbopaque, "@");
+ for (unsigned i = 0; i < bt->len; i++) {
+ prof_dump_printf(arg->prof_dump_write, arg->cbopaque,
+ " %#"FMTxPTR, (uintptr_t)bt->vec[i]);
+ }
+
+ arg->prof_dump_write(arg->cbopaque, "\n t*: ");
+ prof_dump_print_cnts(arg->prof_dump_write, arg->cbopaque,
+ &gctx->cnt_summed);
+ arg->prof_dump_write(arg->cbopaque, "\n");
+
+ tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter, arg);
+}
+
+/*
+ * See prof_sample_new_event_wait() comment for why the body of this function
+ * is conditionally compiled.
+ */
+static void
+prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx) {
+#ifdef JEMALLOC_PROF
+ /*
+ * Scaling is equivalent AdjustSamples() in jeprof, but the result may
+ * differ slightly from what jeprof reports, because here we scale the
+ * summary values, whereas jeprof scales each context individually and
+ * reports the sums of the scaled values.
+ */
+ if (cnt_all->curbytes != 0) {
+ double sample_period = (double)((uint64_t)1 << lg_prof_sample);
+ double ratio = (((double)cnt_all->curbytes) /
+ (double)cnt_all->curobjs) / sample_period;
+ double scale_factor = 1.0 / (1.0 - exp(-ratio));
+ uint64_t curbytes = (uint64_t)round(((double)cnt_all->curbytes)
+ * scale_factor);
+ uint64_t curobjs = (uint64_t)round(((double)cnt_all->curobjs) *
+ scale_factor);
+
+ malloc_printf("<jemalloc>: Leak approximation summary: ~%"FMTu64
+ " byte%s, ~%"FMTu64" object%s, >= %zu context%s\n",
+ curbytes, (curbytes != 1) ? "s" : "", curobjs, (curobjs !=
+ 1) ? "s" : "", leak_ngctx, (leak_ngctx != 1) ? "s" : "");
+ malloc_printf(
+ "<jemalloc>: Run jeprof on dump output for leak detail\n");
+ if (opt_prof_leak_error) {
+ malloc_printf(
+ "<jemalloc>: Exiting with error code because memory"
+ " leaks were detected\n");
+ /*
+ * Use _exit() with underscore to avoid calling atexit()
+ * and entering endless cycle.
+ */
+ _exit(1);
+ }
+ }
+#endif
+}
+
+static prof_gctx_t *
+prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *opaque) {
+ prof_dump_iter_arg_t *arg = (prof_dump_iter_arg_t *)opaque;
+ malloc_mutex_lock(arg->tsdn, gctx->lock);
+ prof_dump_gctx(arg, gctx, &gctx->bt, gctxs);
+ malloc_mutex_unlock(arg->tsdn, gctx->lock);
+ return NULL;
+}
+
+static void
+prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata, prof_cnt_t *cnt_all,
+ size_t *leak_ngctx, prof_gctx_tree_t *gctxs) {
+ size_t tabind;
+ union {
+ prof_gctx_t *p;
+ void *v;
+ } gctx;
+
+ prof_enter(tsd, tdata);
+
+ /*
+ * Put gctx's in limbo and clear their counters in preparation for
+ * summing.
+ */
+ gctx_tree_new(gctxs);
+ for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);) {
+ prof_dump_gctx_prep(tsd_tsdn(tsd), gctx.p, gctxs);
+ }
+
+ /*
+ * Iterate over tdatas, and for the non-expired ones snapshot their tctx
+ * stats and merge them into the associated gctx's.
+ */
+ memset(cnt_all, 0, sizeof(prof_cnt_t));
+ prof_tdata_merge_iter_arg_t prof_tdata_merge_iter_arg = {tsd_tsdn(tsd),
+ cnt_all};
+ malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
+ tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter,
+ &prof_tdata_merge_iter_arg);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
+
+ /* Merge tctx stats into gctx's. */
+ *leak_ngctx = 0;
+ prof_gctx_merge_iter_arg_t prof_gctx_merge_iter_arg = {tsd_tsdn(tsd),
+ leak_ngctx};
+ gctx_tree_iter(gctxs, NULL, prof_gctx_merge_iter,
+ &prof_gctx_merge_iter_arg);
+
+ prof_leave(tsd, tdata);
+}
+
+void
+prof_dump_impl(tsd_t *tsd, write_cb_t *prof_dump_write, void *cbopaque,
+ prof_tdata_t *tdata, bool leakcheck) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_dump_mtx);
+ prof_cnt_t cnt_all;
+ size_t leak_ngctx;
+ prof_gctx_tree_t gctxs;
+ prof_dump_prep(tsd, tdata, &cnt_all, &leak_ngctx, &gctxs);
+ prof_dump_iter_arg_t prof_dump_iter_arg = {tsd_tsdn(tsd),
+ prof_dump_write, cbopaque};
+ prof_dump_header(&prof_dump_iter_arg, &cnt_all);
+ gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter, &prof_dump_iter_arg);
+ prof_gctx_finish(tsd, &gctxs);
+ if (leakcheck) {
+ prof_leakcheck(&cnt_all, leak_ngctx);
+ }
+}
+
+/* Used in unit tests. */
+void
+prof_cnt_all(prof_cnt_t *cnt_all) {
+ tsd_t *tsd = tsd_fetch();
+ prof_tdata_t *tdata = prof_tdata_get(tsd, false);
+ if (tdata == NULL) {
+ memset(cnt_all, 0, sizeof(prof_cnt_t));
+ } else {
+ size_t leak_ngctx;
+ prof_gctx_tree_t gctxs;
+ prof_dump_prep(tsd, tdata, cnt_all, &leak_ngctx, &gctxs);
+ prof_gctx_finish(tsd, &gctxs);
+ }
+}
+
+void
+prof_bt_hash(const void *key, size_t r_hash[2]) {
+ prof_bt_t *bt = (prof_bt_t *)key;
+
+ cassert(config_prof);
+
+ hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash);
+}
+
+bool
+prof_bt_keycomp(const void *k1, const void *k2) {
+ const prof_bt_t *bt1 = (prof_bt_t *)k1;
+ const prof_bt_t *bt2 = (prof_bt_t *)k2;
+
+ cassert(config_prof);
+
+ if (bt1->len != bt2->len) {
+ return false;
+ }
+ return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
+}
+
+prof_tdata_t *
+prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
+ char *thread_name, bool active) {
+ assert(tsd_reentrancy_level_get(tsd) == 0);
+
+ prof_tdata_t *tdata;
+
+ cassert(config_prof);
+
+ /* Initialize an empty cache for this thread. */
+ tdata = (prof_tdata_t *)iallocztm(tsd_tsdn(tsd), sizeof(prof_tdata_t),
+ sz_size2index(sizeof(prof_tdata_t)), false, NULL, true,
+ arena_get(TSDN_NULL, 0, true), true);
+ if (tdata == NULL) {
+ return NULL;
+ }
+
+ tdata->lock = prof_tdata_mutex_choose(thr_uid);
+ tdata->thr_uid = thr_uid;
+ tdata->thr_discrim = thr_discrim;
+ tdata->thread_name = thread_name;
+ tdata->attached = true;
+ tdata->expired = false;
+ tdata->tctx_uid_next = 0;
+
+ if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS, prof_bt_hash,
+ prof_bt_keycomp)) {
+ idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true);
+ return NULL;
+ }
+
+ tdata->enq = false;
+ tdata->enq_idump = false;
+ tdata->enq_gdump = false;
+
+ tdata->dumping = false;
+ tdata->active = active;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
+ tdata_tree_insert(&tdatas, tdata);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
+
+ return tdata;
+}
+
+static bool
+prof_tdata_should_destroy_unlocked(prof_tdata_t *tdata, bool even_if_attached) {
+ if (tdata->attached && !even_if_attached) {
+ return false;
+ }
+ if (ckh_count(&tdata->bt2tctx) != 0) {
+ return false;
+ }
+ return true;
+}
+
+static bool
+prof_tdata_should_destroy(tsdn_t *tsdn, prof_tdata_t *tdata,
+ bool even_if_attached) {
+ malloc_mutex_assert_owner(tsdn, tdata->lock);
+
+ return prof_tdata_should_destroy_unlocked(tdata, even_if_attached);
+}
+
+static void
+prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
+ bool even_if_attached) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &tdatas_mtx);
+ malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tdata->lock);
+
+ tdata_tree_remove(&tdatas, tdata);
+
+ assert(prof_tdata_should_destroy_unlocked(tdata, even_if_attached));
+
+ if (tdata->thread_name != NULL) {
+ idalloctm(tsd_tsdn(tsd), tdata->thread_name, NULL, NULL, true,
+ true);
+ }
+ ckh_delete(tsd, &tdata->bt2tctx);
+ idalloctm(tsd_tsdn(tsd), tdata, NULL, NULL, true, true);
+}
+
+static void
+prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached) {
+ malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
+ prof_tdata_destroy_locked(tsd, tdata, even_if_attached);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
+}
+
+void
+prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata) {
+ bool destroy_tdata;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), tdata->lock);
+ if (tdata->attached) {
+ destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd), tdata,
+ true);
+ /*
+ * Only detach if !destroy_tdata, because detaching would allow
+ * another thread to win the race to destroy tdata.
+ */
+ if (!destroy_tdata) {
+ tdata->attached = false;
+ }
+ tsd_prof_tdata_set(tsd, NULL);
+ } else {
+ destroy_tdata = false;
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
+ if (destroy_tdata) {
+ prof_tdata_destroy(tsd, tdata, true);
+ }
+}
+
+static bool
+prof_tdata_expire(tsdn_t *tsdn, prof_tdata_t *tdata) {
+ bool destroy_tdata;
+
+ malloc_mutex_lock(tsdn, tdata->lock);
+ if (!tdata->expired) {
+ tdata->expired = true;
+ destroy_tdata = prof_tdata_should_destroy(tsdn, tdata, false);
+ } else {
+ destroy_tdata = false;
+ }
+ malloc_mutex_unlock(tsdn, tdata->lock);
+
+ return destroy_tdata;
+}
+
+static prof_tdata_t *
+prof_tdata_reset_iter(prof_tdata_tree_t *tdatas_ptr, prof_tdata_t *tdata,
+ void *arg) {
+ tsdn_t *tsdn = (tsdn_t *)arg;
+
+ return (prof_tdata_expire(tsdn, tdata) ? tdata : NULL);
+}
+
+void
+prof_reset(tsd_t *tsd, size_t lg_sample) {
+ prof_tdata_t *next;
+
+ assert(lg_sample < (sizeof(uint64_t) << 3));
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
+ malloc_mutex_lock(tsd_tsdn(tsd), &tdatas_mtx);
+
+ lg_prof_sample = lg_sample;
+ prof_unbias_map_init();
+
+ next = NULL;
+ do {
+ prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
+ prof_tdata_reset_iter, (void *)tsd);
+ if (to_destroy != NULL) {
+ next = tdata_tree_next(&tdatas, to_destroy);
+ prof_tdata_destroy_locked(tsd, to_destroy, false);
+ } else {
+ next = NULL;
+ }
+ } while (next != NULL);
+
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tdatas_mtx);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
+}
+
+static bool
+prof_tctx_should_destroy(tsd_t *tsd, prof_tctx_t *tctx) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
+
+ if (opt_prof_accum) {
+ return false;
+ }
+ if (tctx->cnts.curobjs != 0) {
+ return false;
+ }
+ if (tctx->prepared) {
+ return false;
+ }
+ if (tctx->recent_count != 0) {
+ return false;
+ }
+ return true;
+}
+
+static void
+prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
+
+ assert(tctx->cnts.curobjs == 0);
+ assert(tctx->cnts.curbytes == 0);
+ /*
+ * These asserts are not correct -- see the comment about races in
+ * prof.c
+ *
+ * assert(tctx->cnts.curobjs_shifted_unbiased == 0);
+ * assert(tctx->cnts.curbytes_unbiased == 0);
+ */
+ assert(!opt_prof_accum);
+ assert(tctx->cnts.accumobjs == 0);
+ assert(tctx->cnts.accumbytes == 0);
+ /*
+ * These ones are, since accumbyte counts never go down. Either
+ * prof_accum is off (in which case these should never have changed from
+ * their initial value of zero), or it's on (in which case we shouldn't
+ * be destroying this tctx).
+ */
+ assert(tctx->cnts.accumobjs_shifted_unbiased == 0);
+ assert(tctx->cnts.accumbytes_unbiased == 0);
+
+ prof_gctx_t *gctx = tctx->gctx;
+
+ {
+ prof_tdata_t *tdata = tctx->tdata;
+ tctx->tdata = NULL;
+ ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL);
+ bool destroy_tdata = prof_tdata_should_destroy(tsd_tsdn(tsd),
+ tdata, false);
+ malloc_mutex_unlock(tsd_tsdn(tsd), tdata->lock);
+ if (destroy_tdata) {
+ prof_tdata_destroy(tsd, tdata, false);
+ }
+ }
+
+ bool destroy_tctx, destroy_gctx;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), gctx->lock);
+ switch (tctx->state) {
+ case prof_tctx_state_nominal:
+ tctx_tree_remove(&gctx->tctxs, tctx);
+ destroy_tctx = true;
+ if (prof_gctx_should_destroy(gctx)) {
+ /*
+ * Increment gctx->nlimbo in order to keep another
+ * thread from winning the race to destroy gctx while
+ * this one has gctx->lock dropped. Without this, it
+ * would be possible for another thread to:
+ *
+ * 1) Sample an allocation associated with gctx.
+ * 2) Deallocate the sampled object.
+ * 3) Successfully prof_gctx_try_destroy(gctx).
+ *
+ * The result would be that gctx no longer exists by the
+ * time this thread accesses it in
+ * prof_gctx_try_destroy().
+ */
+ gctx->nlimbo++;
+ destroy_gctx = true;
+ } else {
+ destroy_gctx = false;
+ }
+ break;
+ case prof_tctx_state_dumping:
+ /*
+ * A dumping thread needs tctx to remain valid until dumping
+ * has finished. Change state such that the dumping thread will
+ * complete destruction during a late dump iteration phase.
+ */
+ tctx->state = prof_tctx_state_purgatory;
+ destroy_tctx = false;
+ destroy_gctx = false;
+ break;
+ default:
+ not_reached();
+ destroy_tctx = false;
+ destroy_gctx = false;
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), gctx->lock);
+ if (destroy_gctx) {
+ prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx);
+ }
+ if (destroy_tctx) {
+ idalloctm(tsd_tsdn(tsd), tctx, NULL, NULL, true, true);
+ }
+}
+
+void
+prof_tctx_try_destroy(tsd_t *tsd, prof_tctx_t *tctx) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
+ if (prof_tctx_should_destroy(tsd, tctx)) {
+ /* tctx->tdata->lock will be released in prof_tctx_destroy(). */
+ prof_tctx_destroy(tsd, tctx);
+ } else {
+ malloc_mutex_unlock(tsd_tsdn(tsd), tctx->tdata->lock);
+ }
+}
+
+/******************************************************************************/
diff --git a/contrib/jemalloc/src/prof_log.c b/contrib/jemalloc/src/prof_log.c
new file mode 100644
index 000000000000..0632c3b37e55
--- /dev/null
+++ b/contrib/jemalloc/src/prof_log.c
@@ -0,0 +1,717 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/buf_writer.h"
+#include "jemalloc/internal/ckh.h"
+#include "jemalloc/internal/emitter.h"
+#include "jemalloc/internal/hash.h"
+#include "jemalloc/internal/malloc_io.h"
+#include "jemalloc/internal/mutex.h"
+#include "jemalloc/internal/prof_data.h"
+#include "jemalloc/internal/prof_log.h"
+#include "jemalloc/internal/prof_sys.h"
+
+bool opt_prof_log = false;
+typedef enum prof_logging_state_e prof_logging_state_t;
+enum prof_logging_state_e {
+ prof_logging_state_stopped,
+ prof_logging_state_started,
+ prof_logging_state_dumping
+};
+
+/*
+ * - stopped: log_start never called, or previous log_stop has completed.
+ * - started: log_start called, log_stop not called yet. Allocations are logged.
+ * - dumping: log_stop called but not finished; samples are not logged anymore.
+ */
+prof_logging_state_t prof_logging_state = prof_logging_state_stopped;
+
+/* Used in unit tests. */
+static bool prof_log_dummy = false;
+
+/* Incremented for every log file that is output. */
+static uint64_t log_seq = 0;
+static char log_filename[
+ /* Minimize memory bloat for non-prof builds. */
+#ifdef JEMALLOC_PROF
+ PATH_MAX +
+#endif
+ 1];
+
+/* Timestamp for most recent call to log_start(). */
+static nstime_t log_start_timestamp;
+
+/* Increment these when adding to the log_bt and log_thr linked lists. */
+static size_t log_bt_index = 0;
+static size_t log_thr_index = 0;
+
+/* Linked list node definitions. These are only used in this file. */
+typedef struct prof_bt_node_s prof_bt_node_t;
+
+struct prof_bt_node_s {
+ prof_bt_node_t *next;
+ size_t index;
+ prof_bt_t bt;
+ /* Variable size backtrace vector pointed to by bt. */
+ void *vec[1];
+};
+
+typedef struct prof_thr_node_s prof_thr_node_t;
+
+struct prof_thr_node_s {
+ prof_thr_node_t *next;
+ size_t index;
+ uint64_t thr_uid;
+ /* Variable size based on thr_name_sz. */
+ char name[1];
+};
+
+typedef struct prof_alloc_node_s prof_alloc_node_t;
+
+/* This is output when logging sampled allocations. */
+struct prof_alloc_node_s {
+ prof_alloc_node_t *next;
+ /* Indices into an array of thread data. */
+ size_t alloc_thr_ind;
+ size_t free_thr_ind;
+
+ /* Indices into an array of backtraces. */
+ size_t alloc_bt_ind;
+ size_t free_bt_ind;
+
+ uint64_t alloc_time_ns;
+ uint64_t free_time_ns;
+
+ size_t usize;
+};
+
+/*
+ * Created on the first call to prof_try_log and deleted on prof_log_stop.
+ * These are the backtraces and threads that have already been logged by an
+ * allocation.
+ */
+static bool log_tables_initialized = false;
+static ckh_t log_bt_node_set;
+static ckh_t log_thr_node_set;
+
+/* Store linked lists for logged data. */
+static prof_bt_node_t *log_bt_first = NULL;
+static prof_bt_node_t *log_bt_last = NULL;
+static prof_thr_node_t *log_thr_first = NULL;
+static prof_thr_node_t *log_thr_last = NULL;
+static prof_alloc_node_t *log_alloc_first = NULL;
+static prof_alloc_node_t *log_alloc_last = NULL;
+
+/* Protects the prof_logging_state and any log_{...} variable. */
+malloc_mutex_t log_mtx;
+
+/******************************************************************************/
+/*
+ * Function prototypes for static functions that are referenced prior to
+ * definition.
+ */
+
+/* Hashtable functions for log_bt_node_set and log_thr_node_set. */
+static void prof_thr_node_hash(const void *key, size_t r_hash[2]);
+static bool prof_thr_node_keycomp(const void *k1, const void *k2);
+static void prof_bt_node_hash(const void *key, size_t r_hash[2]);
+static bool prof_bt_node_keycomp(const void *k1, const void *k2);
+
+/******************************************************************************/
+
+static size_t
+prof_log_bt_index(tsd_t *tsd, prof_bt_t *bt) {
+ assert(prof_logging_state == prof_logging_state_started);
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx);
+
+ prof_bt_node_t dummy_node;
+ dummy_node.bt = *bt;
+ prof_bt_node_t *node;
+
+ /* See if this backtrace is already cached in the table. */
+ if (ckh_search(&log_bt_node_set, (void *)(&dummy_node),
+ (void **)(&node), NULL)) {
+ size_t sz = offsetof(prof_bt_node_t, vec) +
+ (bt->len * sizeof(void *));
+ prof_bt_node_t *new_node = (prof_bt_node_t *)
+ iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL,
+ true, arena_get(TSDN_NULL, 0, true), true);
+ if (log_bt_first == NULL) {
+ log_bt_first = new_node;
+ log_bt_last = new_node;
+ } else {
+ log_bt_last->next = new_node;
+ log_bt_last = new_node;
+ }
+
+ new_node->next = NULL;
+ new_node->index = log_bt_index;
+ /*
+ * Copy the backtrace: bt is inside a tdata or gctx, which
+ * might die before prof_log_stop is called.
+ */
+ new_node->bt.len = bt->len;
+ memcpy(new_node->vec, bt->vec, bt->len * sizeof(void *));
+ new_node->bt.vec = new_node->vec;
+
+ log_bt_index++;
+ ckh_insert(tsd, &log_bt_node_set, (void *)new_node, NULL);
+ return new_node->index;
+ } else {
+ return node->index;
+ }
+}
+
+static size_t
+prof_log_thr_index(tsd_t *tsd, uint64_t thr_uid, const char *name) {
+ assert(prof_logging_state == prof_logging_state_started);
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &log_mtx);
+
+ prof_thr_node_t dummy_node;
+ dummy_node.thr_uid = thr_uid;
+ prof_thr_node_t *node;
+
+ /* See if this thread is already cached in the table. */
+ if (ckh_search(&log_thr_node_set, (void *)(&dummy_node),
+ (void **)(&node), NULL)) {
+ size_t sz = offsetof(prof_thr_node_t, name) + strlen(name) + 1;
+ prof_thr_node_t *new_node = (prof_thr_node_t *)
+ iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL,
+ true, arena_get(TSDN_NULL, 0, true), true);
+ if (log_thr_first == NULL) {
+ log_thr_first = new_node;
+ log_thr_last = new_node;
+ } else {
+ log_thr_last->next = new_node;
+ log_thr_last = new_node;
+ }
+
+ new_node->next = NULL;
+ new_node->index = log_thr_index;
+ new_node->thr_uid = thr_uid;
+ strcpy(new_node->name, name);
+
+ log_thr_index++;
+ ckh_insert(tsd, &log_thr_node_set, (void *)new_node, NULL);
+ return new_node->index;
+ } else {
+ return node->index;
+ }
+}
+
+JEMALLOC_COLD
+void
+prof_try_log(tsd_t *tsd, size_t usize, prof_info_t *prof_info) {
+ cassert(config_prof);
+ prof_tctx_t *tctx = prof_info->alloc_tctx;
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
+
+ prof_tdata_t *cons_tdata = prof_tdata_get(tsd, false);
+ if (cons_tdata == NULL) {
+ /*
+ * We decide not to log these allocations. cons_tdata will be
+ * NULL only when the current thread is in a weird state (e.g.
+ * it's being destroyed).
+ */
+ return;
+ }
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &log_mtx);
+
+ if (prof_logging_state != prof_logging_state_started) {
+ goto label_done;
+ }
+
+ if (!log_tables_initialized) {
+ bool err1 = ckh_new(tsd, &log_bt_node_set, PROF_CKH_MINITEMS,
+ prof_bt_node_hash, prof_bt_node_keycomp);
+ bool err2 = ckh_new(tsd, &log_thr_node_set, PROF_CKH_MINITEMS,
+ prof_thr_node_hash, prof_thr_node_keycomp);
+ if (err1 || err2) {
+ goto label_done;
+ }
+ log_tables_initialized = true;
+ }
+
+ nstime_t alloc_time = prof_info->alloc_time;
+ nstime_t free_time;
+ nstime_prof_init_update(&free_time);
+
+ size_t sz = sizeof(prof_alloc_node_t);
+ prof_alloc_node_t *new_node = (prof_alloc_node_t *)
+ iallocztm(tsd_tsdn(tsd), sz, sz_size2index(sz), false, NULL, true,
+ arena_get(TSDN_NULL, 0, true), true);
+
+ const char *prod_thr_name = (tctx->tdata->thread_name == NULL)?
+ "" : tctx->tdata->thread_name;
+ const char *cons_thr_name = prof_thread_name_get(tsd);
+
+ prof_bt_t bt;
+ /* Initialize the backtrace, using the buffer in tdata to store it. */
+ bt_init(&bt, cons_tdata->vec);
+ prof_backtrace(tsd, &bt);
+ prof_bt_t *cons_bt = &bt;
+
+ /* We haven't destroyed tctx yet, so gctx should be good to read. */
+ prof_bt_t *prod_bt = &tctx->gctx->bt;
+
+ new_node->next = NULL;
+ new_node->alloc_thr_ind = prof_log_thr_index(tsd, tctx->tdata->thr_uid,
+ prod_thr_name);
+ new_node->free_thr_ind = prof_log_thr_index(tsd, cons_tdata->thr_uid,
+ cons_thr_name);
+ new_node->alloc_bt_ind = prof_log_bt_index(tsd, prod_bt);
+ new_node->free_bt_ind = prof_log_bt_index(tsd, cons_bt);
+ new_node->alloc_time_ns = nstime_ns(&alloc_time);
+ new_node->free_time_ns = nstime_ns(&free_time);
+ new_node->usize = usize;
+
+ if (log_alloc_first == NULL) {
+ log_alloc_first = new_node;
+ log_alloc_last = new_node;
+ } else {
+ log_alloc_last->next = new_node;
+ log_alloc_last = new_node;
+ }
+
+label_done:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &log_mtx);
+}
+
+static void
+prof_bt_node_hash(const void *key, size_t r_hash[2]) {
+ const prof_bt_node_t *bt_node = (prof_bt_node_t *)key;
+ prof_bt_hash((void *)(&bt_node->bt), r_hash);
+}
+
+static bool
+prof_bt_node_keycomp(const void *k1, const void *k2) {
+ const prof_bt_node_t *bt_node1 = (prof_bt_node_t *)k1;
+ const prof_bt_node_t *bt_node2 = (prof_bt_node_t *)k2;
+ return prof_bt_keycomp((void *)(&bt_node1->bt),
+ (void *)(&bt_node2->bt));
+}
+
+static void
+prof_thr_node_hash(const void *key, size_t r_hash[2]) {
+ const prof_thr_node_t *thr_node = (prof_thr_node_t *)key;
+ hash(&thr_node->thr_uid, sizeof(uint64_t), 0x94122f35U, r_hash);
+}
+
+static bool
+prof_thr_node_keycomp(const void *k1, const void *k2) {
+ const prof_thr_node_t *thr_node1 = (prof_thr_node_t *)k1;
+ const prof_thr_node_t *thr_node2 = (prof_thr_node_t *)k2;
+ return thr_node1->thr_uid == thr_node2->thr_uid;
+}
+
+/* Used in unit tests. */
+size_t
+prof_log_bt_count(void) {
+ cassert(config_prof);
+ size_t cnt = 0;
+ prof_bt_node_t *node = log_bt_first;
+ while (node != NULL) {
+ cnt++;
+ node = node->next;
+ }
+ return cnt;
+}
+
+/* Used in unit tests. */
+size_t
+prof_log_alloc_count(void) {
+ cassert(config_prof);
+ size_t cnt = 0;
+ prof_alloc_node_t *node = log_alloc_first;
+ while (node != NULL) {
+ cnt++;
+ node = node->next;
+ }
+ return cnt;
+}
+
+/* Used in unit tests. */
+size_t
+prof_log_thr_count(void) {
+ cassert(config_prof);
+ size_t cnt = 0;
+ prof_thr_node_t *node = log_thr_first;
+ while (node != NULL) {
+ cnt++;
+ node = node->next;
+ }
+ return cnt;
+}
+
+/* Used in unit tests. */
+bool
+prof_log_is_logging(void) {
+ cassert(config_prof);
+ return prof_logging_state == prof_logging_state_started;
+}
+
+/* Used in unit tests. */
+bool
+prof_log_rep_check(void) {
+ cassert(config_prof);
+ if (prof_logging_state == prof_logging_state_stopped
+ && log_tables_initialized) {
+ return true;
+ }
+
+ if (log_bt_last != NULL && log_bt_last->next != NULL) {
+ return true;
+ }
+ if (log_thr_last != NULL && log_thr_last->next != NULL) {
+ return true;
+ }
+ if (log_alloc_last != NULL && log_alloc_last->next != NULL) {
+ return true;
+ }
+
+ size_t bt_count = prof_log_bt_count();
+ size_t thr_count = prof_log_thr_count();
+ size_t alloc_count = prof_log_alloc_count();
+
+
+ if (prof_logging_state == prof_logging_state_stopped) {
+ if (bt_count != 0 || thr_count != 0 || alloc_count || 0) {
+ return true;
+ }
+ }
+
+ prof_alloc_node_t *node = log_alloc_first;
+ while (node != NULL) {
+ if (node->alloc_bt_ind >= bt_count) {
+ return true;
+ }
+ if (node->free_bt_ind >= bt_count) {
+ return true;
+ }
+ if (node->alloc_thr_ind >= thr_count) {
+ return true;
+ }
+ if (node->free_thr_ind >= thr_count) {
+ return true;
+ }
+ if (node->alloc_time_ns > node->free_time_ns) {
+ return true;
+ }
+ node = node->next;
+ }
+
+ return false;
+}
+
+/* Used in unit tests. */
+void
+prof_log_dummy_set(bool new_value) {
+ cassert(config_prof);
+ prof_log_dummy = new_value;
+}
+
+/* Used as an atexit function to stop logging on exit. */
+static void
+prof_log_stop_final(void) {
+ tsd_t *tsd = tsd_fetch();
+ prof_log_stop(tsd_tsdn(tsd));
+}
+
+JEMALLOC_COLD
+bool
+prof_log_start(tsdn_t *tsdn, const char *filename) {
+ cassert(config_prof);
+
+ if (!opt_prof) {
+ return true;
+ }
+
+ bool ret = false;
+
+ malloc_mutex_lock(tsdn, &log_mtx);
+
+ static bool prof_log_atexit_called = false;
+ if (!prof_log_atexit_called) {
+ prof_log_atexit_called = true;
+ if (atexit(prof_log_stop_final) != 0) {
+ malloc_write("<jemalloc>: Error in atexit() "
+ "for logging\n");
+ if (opt_abort) {
+ abort();
+ }
+ ret = true;
+ goto label_done;
+ }
+ }
+
+ if (prof_logging_state != prof_logging_state_stopped) {
+ ret = true;
+ } else if (filename == NULL) {
+ /* Make default name. */
+ prof_get_default_filename(tsdn, log_filename, log_seq);
+ log_seq++;
+ prof_logging_state = prof_logging_state_started;
+ } else if (strlen(filename) >= PROF_DUMP_FILENAME_LEN) {
+ ret = true;
+ } else {
+ strcpy(log_filename, filename);
+ prof_logging_state = prof_logging_state_started;
+ }
+
+ if (!ret) {
+ nstime_prof_init_update(&log_start_timestamp);
+ }
+label_done:
+ malloc_mutex_unlock(tsdn, &log_mtx);
+
+ return ret;
+}
+
+struct prof_emitter_cb_arg_s {
+ int fd;
+ ssize_t ret;
+};
+
+static void
+prof_emitter_write_cb(void *opaque, const char *to_write) {
+ struct prof_emitter_cb_arg_s *arg =
+ (struct prof_emitter_cb_arg_s *)opaque;
+ size_t bytes = strlen(to_write);
+ if (prof_log_dummy) {
+ return;
+ }
+ arg->ret = malloc_write_fd(arg->fd, to_write, bytes);
+}
+
+/*
+ * prof_log_emit_{...} goes through the appropriate linked list, emitting each
+ * node to the json and deallocating it.
+ */
+static void
+prof_log_emit_threads(tsd_t *tsd, emitter_t *emitter) {
+ emitter_json_array_kv_begin(emitter, "threads");
+ prof_thr_node_t *thr_node = log_thr_first;
+ prof_thr_node_t *thr_old_node;
+ while (thr_node != NULL) {
+ emitter_json_object_begin(emitter);
+
+ emitter_json_kv(emitter, "thr_uid", emitter_type_uint64,
+ &thr_node->thr_uid);
+
+ char *thr_name = thr_node->name;
+
+ emitter_json_kv(emitter, "thr_name", emitter_type_string,
+ &thr_name);
+
+ emitter_json_object_end(emitter);
+ thr_old_node = thr_node;
+ thr_node = thr_node->next;
+ idalloctm(tsd_tsdn(tsd), thr_old_node, NULL, NULL, true, true);
+ }
+ emitter_json_array_end(emitter);
+}
+
+static void
+prof_log_emit_traces(tsd_t *tsd, emitter_t *emitter) {
+ emitter_json_array_kv_begin(emitter, "stack_traces");
+ prof_bt_node_t *bt_node = log_bt_first;
+ prof_bt_node_t *bt_old_node;
+ /*
+ * Calculate how many hex digits we need: twice number of bytes, two for
+ * "0x", and then one more for terminating '\0'.
+ */
+ char buf[2 * sizeof(intptr_t) + 3];
+ size_t buf_sz = sizeof(buf);
+ while (bt_node != NULL) {
+ emitter_json_array_begin(emitter);
+ size_t i;
+ for (i = 0; i < bt_node->bt.len; i++) {
+ malloc_snprintf(buf, buf_sz, "%p", bt_node->bt.vec[i]);
+ char *trace_str = buf;
+ emitter_json_value(emitter, emitter_type_string,
+ &trace_str);
+ }
+ emitter_json_array_end(emitter);
+
+ bt_old_node = bt_node;
+ bt_node = bt_node->next;
+ idalloctm(tsd_tsdn(tsd), bt_old_node, NULL, NULL, true, true);
+ }
+ emitter_json_array_end(emitter);
+}
+
+static void
+prof_log_emit_allocs(tsd_t *tsd, emitter_t *emitter) {
+ emitter_json_array_kv_begin(emitter, "allocations");
+ prof_alloc_node_t *alloc_node = log_alloc_first;
+ prof_alloc_node_t *alloc_old_node;
+ while (alloc_node != NULL) {
+ emitter_json_object_begin(emitter);
+
+ emitter_json_kv(emitter, "alloc_thread", emitter_type_size,
+ &alloc_node->alloc_thr_ind);
+
+ emitter_json_kv(emitter, "free_thread", emitter_type_size,
+ &alloc_node->free_thr_ind);
+
+ emitter_json_kv(emitter, "alloc_trace", emitter_type_size,
+ &alloc_node->alloc_bt_ind);
+
+ emitter_json_kv(emitter, "free_trace", emitter_type_size,
+ &alloc_node->free_bt_ind);
+
+ emitter_json_kv(emitter, "alloc_timestamp",
+ emitter_type_uint64, &alloc_node->alloc_time_ns);
+
+ emitter_json_kv(emitter, "free_timestamp", emitter_type_uint64,
+ &alloc_node->free_time_ns);
+
+ emitter_json_kv(emitter, "usize", emitter_type_uint64,
+ &alloc_node->usize);
+
+ emitter_json_object_end(emitter);
+
+ alloc_old_node = alloc_node;
+ alloc_node = alloc_node->next;
+ idalloctm(tsd_tsdn(tsd), alloc_old_node, NULL, NULL, true,
+ true);
+ }
+ emitter_json_array_end(emitter);
+}
+
+static void
+prof_log_emit_metadata(emitter_t *emitter) {
+ emitter_json_object_kv_begin(emitter, "info");
+
+ nstime_t now;
+
+ nstime_prof_init_update(&now);
+ uint64_t ns = nstime_ns(&now) - nstime_ns(&log_start_timestamp);
+ emitter_json_kv(emitter, "duration", emitter_type_uint64, &ns);
+
+ char *vers = JEMALLOC_VERSION;
+ emitter_json_kv(emitter, "version",
+ emitter_type_string, &vers);
+
+ emitter_json_kv(emitter, "lg_sample_rate",
+ emitter_type_int, &lg_prof_sample);
+
+ const char *res_type = prof_time_res_mode_names[opt_prof_time_res];
+ emitter_json_kv(emitter, "prof_time_resolution", emitter_type_string,
+ &res_type);
+
+ int pid = prof_getpid();
+ emitter_json_kv(emitter, "pid", emitter_type_int, &pid);
+
+ emitter_json_object_end(emitter);
+}
+
+#define PROF_LOG_STOP_BUFSIZE PROF_DUMP_BUFSIZE
+JEMALLOC_COLD
+bool
+prof_log_stop(tsdn_t *tsdn) {
+ cassert(config_prof);
+ if (!opt_prof || !prof_booted) {
+ return true;
+ }
+
+ tsd_t *tsd = tsdn_tsd(tsdn);
+ malloc_mutex_lock(tsdn, &log_mtx);
+
+ if (prof_logging_state != prof_logging_state_started) {
+ malloc_mutex_unlock(tsdn, &log_mtx);
+ return true;
+ }
+
+ /*
+ * Set the state to dumping. We'll set it to stopped when we're done.
+ * Since other threads won't be able to start/stop/log when the state is
+ * dumping, we don't have to hold the lock during the whole method.
+ */
+ prof_logging_state = prof_logging_state_dumping;
+ malloc_mutex_unlock(tsdn, &log_mtx);
+
+
+ emitter_t emitter;
+
+ /* Create a file. */
+
+ int fd;
+ if (prof_log_dummy) {
+ fd = 0;
+ } else {
+ fd = creat(log_filename, 0644);
+ }
+
+ if (fd == -1) {
+ malloc_printf("<jemalloc>: creat() for log file \"%s\" "
+ " failed with %d\n", log_filename, errno);
+ if (opt_abort) {
+ abort();
+ }
+ return true;
+ }
+
+ struct prof_emitter_cb_arg_s arg;
+ arg.fd = fd;
+
+ buf_writer_t buf_writer;
+ buf_writer_init(tsdn, &buf_writer, prof_emitter_write_cb, &arg, NULL,
+ PROF_LOG_STOP_BUFSIZE);
+ emitter_init(&emitter, emitter_output_json_compact, buf_writer_cb,
+ &buf_writer);
+
+ emitter_begin(&emitter);
+ prof_log_emit_metadata(&emitter);
+ prof_log_emit_threads(tsd, &emitter);
+ prof_log_emit_traces(tsd, &emitter);
+ prof_log_emit_allocs(tsd, &emitter);
+ emitter_end(&emitter);
+
+ buf_writer_terminate(tsdn, &buf_writer);
+
+ /* Reset global state. */
+ if (log_tables_initialized) {
+ ckh_delete(tsd, &log_bt_node_set);
+ ckh_delete(tsd, &log_thr_node_set);
+ }
+ log_tables_initialized = false;
+ log_bt_index = 0;
+ log_thr_index = 0;
+ log_bt_first = NULL;
+ log_bt_last = NULL;
+ log_thr_first = NULL;
+ log_thr_last = NULL;
+ log_alloc_first = NULL;
+ log_alloc_last = NULL;
+
+ malloc_mutex_lock(tsdn, &log_mtx);
+ prof_logging_state = prof_logging_state_stopped;
+ malloc_mutex_unlock(tsdn, &log_mtx);
+
+ if (prof_log_dummy) {
+ return false;
+ }
+ return close(fd) || arg.ret == -1;
+}
+#undef PROF_LOG_STOP_BUFSIZE
+
+JEMALLOC_COLD
+bool
+prof_log_init(tsd_t *tsd) {
+ cassert(config_prof);
+ if (malloc_mutex_init(&log_mtx, "prof_log",
+ WITNESS_RANK_PROF_LOG, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+
+ if (opt_prof_log) {
+ prof_log_start(tsd_tsdn(tsd), NULL);
+ }
+
+ return false;
+}
+
+/******************************************************************************/
diff --git a/contrib/jemalloc/src/prof_recent.c b/contrib/jemalloc/src/prof_recent.c
new file mode 100644
index 000000000000..834a9446c16f
--- /dev/null
+++ b/contrib/jemalloc/src/prof_recent.c
@@ -0,0 +1,600 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/buf_writer.h"
+#include "jemalloc/internal/emitter.h"
+#include "jemalloc/internal/prof_data.h"
+#include "jemalloc/internal/prof_recent.h"
+
+ssize_t opt_prof_recent_alloc_max = PROF_RECENT_ALLOC_MAX_DEFAULT;
+malloc_mutex_t prof_recent_alloc_mtx; /* Protects the fields below */
+static atomic_zd_t prof_recent_alloc_max;
+static ssize_t prof_recent_alloc_count = 0;
+prof_recent_list_t prof_recent_alloc_list;
+
+malloc_mutex_t prof_recent_dump_mtx; /* Protects dumping. */
+
+static void
+prof_recent_alloc_max_init() {
+ atomic_store_zd(&prof_recent_alloc_max, opt_prof_recent_alloc_max,
+ ATOMIC_RELAXED);
+}
+
+static inline ssize_t
+prof_recent_alloc_max_get_no_lock() {
+ return atomic_load_zd(&prof_recent_alloc_max, ATOMIC_RELAXED);
+}
+
+static inline ssize_t
+prof_recent_alloc_max_get(tsd_t *tsd) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ return prof_recent_alloc_max_get_no_lock();
+}
+
+static inline ssize_t
+prof_recent_alloc_max_update(tsd_t *tsd, ssize_t max) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ ssize_t old_max = prof_recent_alloc_max_get(tsd);
+ atomic_store_zd(&prof_recent_alloc_max, max, ATOMIC_RELAXED);
+ return old_max;
+}
+
+static prof_recent_t *
+prof_recent_allocate_node(tsdn_t *tsdn) {
+ return (prof_recent_t *)iallocztm(tsdn, sizeof(prof_recent_t),
+ sz_size2index(sizeof(prof_recent_t)), false, NULL, true,
+ arena_get(tsdn, 0, false), true);
+}
+
+static void
+prof_recent_free_node(tsdn_t *tsdn, prof_recent_t *node) {
+ assert(node != NULL);
+ assert(isalloc(tsdn, node) == sz_s2u(sizeof(prof_recent_t)));
+ idalloctm(tsdn, node, NULL, NULL, true, true);
+}
+
+static inline void
+increment_recent_count(tsd_t *tsd, prof_tctx_t *tctx) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
+ ++tctx->recent_count;
+ assert(tctx->recent_count > 0);
+}
+
+bool
+prof_recent_alloc_prepare(tsd_t *tsd, prof_tctx_t *tctx) {
+ cassert(config_prof);
+ assert(opt_prof && prof_booted);
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), tctx->tdata->lock);
+ malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+
+ /*
+ * Check whether last-N mode is turned on without trying to acquire the
+ * lock, so as to optimize for the following two scenarios:
+ * (1) Last-N mode is switched off;
+ * (2) Dumping, during which last-N mode is temporarily turned off so
+ * as not to block sampled allocations.
+ */
+ if (prof_recent_alloc_max_get_no_lock() == 0) {
+ return false;
+ }
+
+ /*
+ * Increment recent_count to hold the tctx so that it won't be gone
+ * even after tctx->tdata->lock is released. This acts as a
+ * "placeholder"; the real recording of the allocation requires a lock
+ * on prof_recent_alloc_mtx and is done in prof_recent_alloc (when
+ * tctx->tdata->lock has been released).
+ */
+ increment_recent_count(tsd, tctx);
+ return true;
+}
+
+static void
+decrement_recent_count(tsd_t *tsd, prof_tctx_t *tctx) {
+ malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ assert(tctx != NULL);
+ malloc_mutex_lock(tsd_tsdn(tsd), tctx->tdata->lock);
+ assert(tctx->recent_count > 0);
+ --tctx->recent_count;
+ prof_tctx_try_destroy(tsd, tctx);
+}
+
+static inline edata_t *
+prof_recent_alloc_edata_get_no_lock(const prof_recent_t *n) {
+ return (edata_t *)atomic_load_p(&n->alloc_edata, ATOMIC_ACQUIRE);
+}
+
+edata_t *
+prof_recent_alloc_edata_get_no_lock_test(const prof_recent_t *n) {
+ cassert(config_prof);
+ return prof_recent_alloc_edata_get_no_lock(n);
+}
+
+static inline edata_t *
+prof_recent_alloc_edata_get(tsd_t *tsd, const prof_recent_t *n) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ return prof_recent_alloc_edata_get_no_lock(n);
+}
+
+static void
+prof_recent_alloc_edata_set(tsd_t *tsd, prof_recent_t *n, edata_t *edata) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ atomic_store_p(&n->alloc_edata, edata, ATOMIC_RELEASE);
+}
+
+void
+edata_prof_recent_alloc_init(edata_t *edata) {
+ cassert(config_prof);
+ edata_prof_recent_alloc_set_dont_call_directly(edata, NULL);
+}
+
+static inline prof_recent_t *
+edata_prof_recent_alloc_get_no_lock(const edata_t *edata) {
+ cassert(config_prof);
+ return edata_prof_recent_alloc_get_dont_call_directly(edata);
+}
+
+prof_recent_t *
+edata_prof_recent_alloc_get_no_lock_test(const edata_t *edata) {
+ cassert(config_prof);
+ return edata_prof_recent_alloc_get_no_lock(edata);
+}
+
+static inline prof_recent_t *
+edata_prof_recent_alloc_get(tsd_t *tsd, const edata_t *edata) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ prof_recent_t *recent_alloc =
+ edata_prof_recent_alloc_get_no_lock(edata);
+ assert(recent_alloc == NULL ||
+ prof_recent_alloc_edata_get(tsd, recent_alloc) == edata);
+ return recent_alloc;
+}
+
+static prof_recent_t *
+edata_prof_recent_alloc_update_internal(tsd_t *tsd, edata_t *edata,
+ prof_recent_t *recent_alloc) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ prof_recent_t *old_recent_alloc =
+ edata_prof_recent_alloc_get(tsd, edata);
+ edata_prof_recent_alloc_set_dont_call_directly(edata, recent_alloc);
+ return old_recent_alloc;
+}
+
+static void
+edata_prof_recent_alloc_set(tsd_t *tsd, edata_t *edata,
+ prof_recent_t *recent_alloc) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ assert(recent_alloc != NULL);
+ prof_recent_t *old_recent_alloc =
+ edata_prof_recent_alloc_update_internal(tsd, edata, recent_alloc);
+ assert(old_recent_alloc == NULL);
+ prof_recent_alloc_edata_set(tsd, recent_alloc, edata);
+}
+
+static void
+edata_prof_recent_alloc_reset(tsd_t *tsd, edata_t *edata,
+ prof_recent_t *recent_alloc) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ assert(recent_alloc != NULL);
+ prof_recent_t *old_recent_alloc =
+ edata_prof_recent_alloc_update_internal(tsd, edata, NULL);
+ assert(old_recent_alloc == recent_alloc);
+ assert(edata == prof_recent_alloc_edata_get(tsd, recent_alloc));
+ prof_recent_alloc_edata_set(tsd, recent_alloc, NULL);
+}
+
+/*
+ * This function should be called right before an allocation is released, so
+ * that the associated recent allocation record can contain the following
+ * information:
+ * (1) The allocation is released;
+ * (2) The time of the deallocation; and
+ * (3) The prof_tctx associated with the deallocation.
+ */
+void
+prof_recent_alloc_reset(tsd_t *tsd, edata_t *edata) {
+ cassert(config_prof);
+ /*
+ * Check whether the recent allocation record still exists without
+ * trying to acquire the lock.
+ */
+ if (edata_prof_recent_alloc_get_no_lock(edata) == NULL) {
+ return;
+ }
+
+ prof_tctx_t *dalloc_tctx = prof_tctx_create(tsd);
+ /*
+ * In case dalloc_tctx is NULL, e.g. due to OOM, we will not record the
+ * deallocation time / tctx, which is handled later, after we check
+ * again when holding the lock.
+ */
+
+ if (dalloc_tctx != NULL) {
+ malloc_mutex_lock(tsd_tsdn(tsd), dalloc_tctx->tdata->lock);
+ increment_recent_count(tsd, dalloc_tctx);
+ dalloc_tctx->prepared = false;
+ malloc_mutex_unlock(tsd_tsdn(tsd), dalloc_tctx->tdata->lock);
+ }
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ /* Check again after acquiring the lock. */
+ prof_recent_t *recent = edata_prof_recent_alloc_get(tsd, edata);
+ if (recent != NULL) {
+ assert(nstime_equals_zero(&recent->dalloc_time));
+ assert(recent->dalloc_tctx == NULL);
+ if (dalloc_tctx != NULL) {
+ nstime_prof_update(&recent->dalloc_time);
+ recent->dalloc_tctx = dalloc_tctx;
+ dalloc_tctx = NULL;
+ }
+ edata_prof_recent_alloc_reset(tsd, edata, recent);
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+
+ if (dalloc_tctx != NULL) {
+ /* We lost the rase - the allocation record was just gone. */
+ decrement_recent_count(tsd, dalloc_tctx);
+ }
+}
+
+static void
+prof_recent_alloc_evict_edata(tsd_t *tsd, prof_recent_t *recent_alloc) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ edata_t *edata = prof_recent_alloc_edata_get(tsd, recent_alloc);
+ if (edata != NULL) {
+ edata_prof_recent_alloc_reset(tsd, edata, recent_alloc);
+ }
+}
+
+static bool
+prof_recent_alloc_is_empty(tsd_t *tsd) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ if (ql_empty(&prof_recent_alloc_list)) {
+ assert(prof_recent_alloc_count == 0);
+ return true;
+ } else {
+ assert(prof_recent_alloc_count > 0);
+ return false;
+ }
+}
+
+static void
+prof_recent_alloc_assert_count(tsd_t *tsd) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ if (!config_debug) {
+ return;
+ }
+ ssize_t count = 0;
+ prof_recent_t *n;
+ ql_foreach(n, &prof_recent_alloc_list, link) {
+ ++count;
+ }
+ assert(count == prof_recent_alloc_count);
+ assert(prof_recent_alloc_max_get(tsd) == -1 ||
+ count <= prof_recent_alloc_max_get(tsd));
+}
+
+void
+prof_recent_alloc(tsd_t *tsd, edata_t *edata, size_t size, size_t usize) {
+ cassert(config_prof);
+ assert(edata != NULL);
+ prof_tctx_t *tctx = edata_prof_tctx_get(edata);
+
+ malloc_mutex_assert_not_owner(tsd_tsdn(tsd), tctx->tdata->lock);
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ prof_recent_alloc_assert_count(tsd);
+
+ /*
+ * Reserve a new prof_recent_t node if needed. If needed, we release
+ * the prof_recent_alloc_mtx lock and allocate. Then, rather than
+ * immediately checking for OOM, we regain the lock and try to make use
+ * of the reserve node if needed. There are six scenarios:
+ *
+ * \ now | no need | need but OOMed | need and allocated
+ * later \ | | |
+ * ------------------------------------------------------------
+ * no need | (1) | (2) | (3)
+ * ------------------------------------------------------------
+ * need | (4) | (5) | (6)
+ *
+ * First, "(4)" never happens, because we don't release the lock in the
+ * middle if there's no need for a new node; in such cases "(1)" always
+ * takes place, which is trivial.
+ *
+ * Out of the remaining four scenarios, "(6)" is the common case and is
+ * trivial. "(5)" is also trivial, in which case we'll rollback the
+ * effect of prof_recent_alloc_prepare() as expected.
+ *
+ * "(2)" / "(3)" occurs when the need for a new node is gone after we
+ * regain the lock. If the new node is successfully allocated, i.e. in
+ * the case of "(3)", we'll release it in the end; otherwise, i.e. in
+ * the case of "(2)", we do nothing - we're lucky that the OOM ends up
+ * doing no harm at all.
+ *
+ * Therefore, the only performance cost of the "release lock" ->
+ * "allocate" -> "regain lock" design is the "(3)" case, but it happens
+ * very rarely, so the cost is relatively small compared to the gain of
+ * not having to have the lock order of prof_recent_alloc_mtx above all
+ * the allocation locks.
+ */
+ prof_recent_t *reserve = NULL;
+ if (prof_recent_alloc_max_get(tsd) == -1 ||
+ prof_recent_alloc_count < prof_recent_alloc_max_get(tsd)) {
+ assert(prof_recent_alloc_max_get(tsd) != 0);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ reserve = prof_recent_allocate_node(tsd_tsdn(tsd));
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ prof_recent_alloc_assert_count(tsd);
+ }
+
+ if (prof_recent_alloc_max_get(tsd) == 0) {
+ assert(prof_recent_alloc_is_empty(tsd));
+ goto label_rollback;
+ }
+
+ prof_tctx_t *old_alloc_tctx, *old_dalloc_tctx;
+ if (prof_recent_alloc_count == prof_recent_alloc_max_get(tsd)) {
+ /* If upper limit is reached, rotate the head. */
+ assert(prof_recent_alloc_max_get(tsd) != -1);
+ assert(!prof_recent_alloc_is_empty(tsd));
+ prof_recent_t *head = ql_first(&prof_recent_alloc_list);
+ old_alloc_tctx = head->alloc_tctx;
+ assert(old_alloc_tctx != NULL);
+ old_dalloc_tctx = head->dalloc_tctx;
+ prof_recent_alloc_evict_edata(tsd, head);
+ ql_rotate(&prof_recent_alloc_list, link);
+ } else {
+ /* Otherwise make use of the new node. */
+ assert(prof_recent_alloc_max_get(tsd) == -1 ||
+ prof_recent_alloc_count < prof_recent_alloc_max_get(tsd));
+ if (reserve == NULL) {
+ goto label_rollback;
+ }
+ ql_elm_new(reserve, link);
+ ql_tail_insert(&prof_recent_alloc_list, reserve, link);
+ reserve = NULL;
+ old_alloc_tctx = NULL;
+ old_dalloc_tctx = NULL;
+ ++prof_recent_alloc_count;
+ }
+
+ /* Fill content into the tail node. */
+ prof_recent_t *tail = ql_last(&prof_recent_alloc_list, link);
+ assert(tail != NULL);
+ tail->size = size;
+ tail->usize = usize;
+ nstime_copy(&tail->alloc_time, edata_prof_alloc_time_get(edata));
+ tail->alloc_tctx = tctx;
+ nstime_init_zero(&tail->dalloc_time);
+ tail->dalloc_tctx = NULL;
+ edata_prof_recent_alloc_set(tsd, edata, tail);
+
+ assert(!prof_recent_alloc_is_empty(tsd));
+ prof_recent_alloc_assert_count(tsd);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+
+ if (reserve != NULL) {
+ prof_recent_free_node(tsd_tsdn(tsd), reserve);
+ }
+
+ /*
+ * Asynchronously handle the tctx of the old node, so that there's no
+ * simultaneous holdings of prof_recent_alloc_mtx and tdata->lock.
+ * In the worst case this may delay the tctx release but it's better
+ * than holding prof_recent_alloc_mtx for longer.
+ */
+ if (old_alloc_tctx != NULL) {
+ decrement_recent_count(tsd, old_alloc_tctx);
+ }
+ if (old_dalloc_tctx != NULL) {
+ decrement_recent_count(tsd, old_dalloc_tctx);
+ }
+ return;
+
+label_rollback:
+ assert(edata_prof_recent_alloc_get(tsd, edata) == NULL);
+ prof_recent_alloc_assert_count(tsd);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ if (reserve != NULL) {
+ prof_recent_free_node(tsd_tsdn(tsd), reserve);
+ }
+ decrement_recent_count(tsd, tctx);
+}
+
+ssize_t
+prof_recent_alloc_max_ctl_read() {
+ cassert(config_prof);
+ /* Don't bother to acquire the lock. */
+ return prof_recent_alloc_max_get_no_lock();
+}
+
+static void
+prof_recent_alloc_restore_locked(tsd_t *tsd, prof_recent_list_t *to_delete) {
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ ssize_t max = prof_recent_alloc_max_get(tsd);
+ if (max == -1 || prof_recent_alloc_count <= max) {
+ /* Easy case - no need to alter the list. */
+ ql_new(to_delete);
+ prof_recent_alloc_assert_count(tsd);
+ return;
+ }
+
+ prof_recent_t *node;
+ ql_foreach(node, &prof_recent_alloc_list, link) {
+ if (prof_recent_alloc_count == max) {
+ break;
+ }
+ prof_recent_alloc_evict_edata(tsd, node);
+ --prof_recent_alloc_count;
+ }
+ assert(prof_recent_alloc_count == max);
+
+ ql_move(to_delete, &prof_recent_alloc_list);
+ if (max == 0) {
+ assert(node == NULL);
+ } else {
+ assert(node != NULL);
+ ql_split(to_delete, node, &prof_recent_alloc_list, link);
+ }
+ assert(!ql_empty(to_delete));
+ prof_recent_alloc_assert_count(tsd);
+}
+
+static void
+prof_recent_alloc_async_cleanup(tsd_t *tsd, prof_recent_list_t *to_delete) {
+ malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &prof_recent_dump_mtx);
+ malloc_mutex_assert_not_owner(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ while (!ql_empty(to_delete)) {
+ prof_recent_t *node = ql_first(to_delete);
+ ql_remove(to_delete, node, link);
+ decrement_recent_count(tsd, node->alloc_tctx);
+ if (node->dalloc_tctx != NULL) {
+ decrement_recent_count(tsd, node->dalloc_tctx);
+ }
+ prof_recent_free_node(tsd_tsdn(tsd), node);
+ }
+}
+
+ssize_t
+prof_recent_alloc_max_ctl_write(tsd_t *tsd, ssize_t max) {
+ cassert(config_prof);
+ assert(max >= -1);
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ prof_recent_alloc_assert_count(tsd);
+ const ssize_t old_max = prof_recent_alloc_max_update(tsd, max);
+ prof_recent_list_t to_delete;
+ prof_recent_alloc_restore_locked(tsd, &to_delete);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ prof_recent_alloc_async_cleanup(tsd, &to_delete);
+ return old_max;
+}
+
+static void
+prof_recent_alloc_dump_bt(emitter_t *emitter, prof_tctx_t *tctx) {
+ char bt_buf[2 * sizeof(intptr_t) + 3];
+ char *s = bt_buf;
+ assert(tctx != NULL);
+ prof_bt_t *bt = &tctx->gctx->bt;
+ for (size_t i = 0; i < bt->len; ++i) {
+ malloc_snprintf(bt_buf, sizeof(bt_buf), "%p", bt->vec[i]);
+ emitter_json_value(emitter, emitter_type_string, &s);
+ }
+}
+
+static void
+prof_recent_alloc_dump_node(emitter_t *emitter, prof_recent_t *node) {
+ emitter_json_object_begin(emitter);
+
+ emitter_json_kv(emitter, "size", emitter_type_size, &node->size);
+ emitter_json_kv(emitter, "usize", emitter_type_size, &node->usize);
+ bool released = prof_recent_alloc_edata_get_no_lock(node) == NULL;
+ emitter_json_kv(emitter, "released", emitter_type_bool, &released);
+
+ emitter_json_kv(emitter, "alloc_thread_uid", emitter_type_uint64,
+ &node->alloc_tctx->thr_uid);
+ prof_tdata_t *alloc_tdata = node->alloc_tctx->tdata;
+ assert(alloc_tdata != NULL);
+ if (alloc_tdata->thread_name != NULL) {
+ emitter_json_kv(emitter, "alloc_thread_name",
+ emitter_type_string, &alloc_tdata->thread_name);
+ }
+ uint64_t alloc_time_ns = nstime_ns(&node->alloc_time);
+ emitter_json_kv(emitter, "alloc_time", emitter_type_uint64,
+ &alloc_time_ns);
+ emitter_json_array_kv_begin(emitter, "alloc_trace");
+ prof_recent_alloc_dump_bt(emitter, node->alloc_tctx);
+ emitter_json_array_end(emitter);
+
+ if (released && node->dalloc_tctx != NULL) {
+ emitter_json_kv(emitter, "dalloc_thread_uid",
+ emitter_type_uint64, &node->dalloc_tctx->thr_uid);
+ prof_tdata_t *dalloc_tdata = node->dalloc_tctx->tdata;
+ assert(dalloc_tdata != NULL);
+ if (dalloc_tdata->thread_name != NULL) {
+ emitter_json_kv(emitter, "dalloc_thread_name",
+ emitter_type_string, &dalloc_tdata->thread_name);
+ }
+ assert(!nstime_equals_zero(&node->dalloc_time));
+ uint64_t dalloc_time_ns = nstime_ns(&node->dalloc_time);
+ emitter_json_kv(emitter, "dalloc_time", emitter_type_uint64,
+ &dalloc_time_ns);
+ emitter_json_array_kv_begin(emitter, "dalloc_trace");
+ prof_recent_alloc_dump_bt(emitter, node->dalloc_tctx);
+ emitter_json_array_end(emitter);
+ }
+
+ emitter_json_object_end(emitter);
+}
+
+#define PROF_RECENT_PRINT_BUFSIZE 65536
+JEMALLOC_COLD
+void
+prof_recent_alloc_dump(tsd_t *tsd, write_cb_t *write_cb, void *cbopaque) {
+ cassert(config_prof);
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_dump_mtx);
+ buf_writer_t buf_writer;
+ buf_writer_init(tsd_tsdn(tsd), &buf_writer, write_cb, cbopaque, NULL,
+ PROF_RECENT_PRINT_BUFSIZE);
+ emitter_t emitter;
+ emitter_init(&emitter, emitter_output_json_compact, buf_writer_cb,
+ &buf_writer);
+ prof_recent_list_t temp_list;
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ prof_recent_alloc_assert_count(tsd);
+ ssize_t dump_max = prof_recent_alloc_max_get(tsd);
+ ql_move(&temp_list, &prof_recent_alloc_list);
+ ssize_t dump_count = prof_recent_alloc_count;
+ prof_recent_alloc_count = 0;
+ prof_recent_alloc_assert_count(tsd);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+
+ emitter_begin(&emitter);
+ uint64_t sample_interval = (uint64_t)1U << lg_prof_sample;
+ emitter_json_kv(&emitter, "sample_interval", emitter_type_uint64,
+ &sample_interval);
+ emitter_json_kv(&emitter, "recent_alloc_max", emitter_type_ssize,
+ &dump_max);
+ emitter_json_array_kv_begin(&emitter, "recent_alloc");
+ prof_recent_t *node;
+ ql_foreach(node, &temp_list, link) {
+ prof_recent_alloc_dump_node(&emitter, node);
+ }
+ emitter_json_array_end(&emitter);
+ emitter_end(&emitter);
+
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+ prof_recent_alloc_assert_count(tsd);
+ ql_concat(&temp_list, &prof_recent_alloc_list, link);
+ ql_move(&prof_recent_alloc_list, &temp_list);
+ prof_recent_alloc_count += dump_count;
+ prof_recent_alloc_restore_locked(tsd, &temp_list);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_alloc_mtx);
+
+ buf_writer_terminate(tsd_tsdn(tsd), &buf_writer);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_recent_dump_mtx);
+
+ prof_recent_alloc_async_cleanup(tsd, &temp_list);
+}
+#undef PROF_RECENT_PRINT_BUFSIZE
+
+bool
+prof_recent_init() {
+ cassert(config_prof);
+ prof_recent_alloc_max_init();
+
+ if (malloc_mutex_init(&prof_recent_alloc_mtx, "prof_recent_alloc",
+ WITNESS_RANK_PROF_RECENT_ALLOC, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+
+ if (malloc_mutex_init(&prof_recent_dump_mtx, "prof_recent_dump",
+ WITNESS_RANK_PROF_RECENT_DUMP, malloc_mutex_rank_exclusive)) {
+ return true;
+ }
+
+ ql_new(&prof_recent_alloc_list);
+
+ return false;
+}
diff --git a/contrib/jemalloc/src/prof_stats.c b/contrib/jemalloc/src/prof_stats.c
new file mode 100644
index 000000000000..5d1a506bb72d
--- /dev/null
+++ b/contrib/jemalloc/src/prof_stats.c
@@ -0,0 +1,57 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/prof_stats.h"
+
+bool opt_prof_stats = false;
+malloc_mutex_t prof_stats_mtx;
+static prof_stats_t prof_stats_live[PROF_SC_NSIZES];
+static prof_stats_t prof_stats_accum[PROF_SC_NSIZES];
+
+static void
+prof_stats_enter(tsd_t *tsd, szind_t ind) {
+ assert(opt_prof && opt_prof_stats);
+ assert(ind < SC_NSIZES);
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_stats_mtx);
+}
+
+static void
+prof_stats_leave(tsd_t *tsd) {
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_stats_mtx);
+}
+
+void
+prof_stats_inc(tsd_t *tsd, szind_t ind, size_t size) {
+ cassert(config_prof);
+ prof_stats_enter(tsd, ind);
+ prof_stats_live[ind].req_sum += size;
+ prof_stats_live[ind].count++;
+ prof_stats_accum[ind].req_sum += size;
+ prof_stats_accum[ind].count++;
+ prof_stats_leave(tsd);
+}
+
+void
+prof_stats_dec(tsd_t *tsd, szind_t ind, size_t size) {
+ cassert(config_prof);
+ prof_stats_enter(tsd, ind);
+ prof_stats_live[ind].req_sum -= size;
+ prof_stats_live[ind].count--;
+ prof_stats_leave(tsd);
+}
+
+void
+prof_stats_get_live(tsd_t *tsd, szind_t ind, prof_stats_t *stats) {
+ cassert(config_prof);
+ prof_stats_enter(tsd, ind);
+ memcpy(stats, &prof_stats_live[ind], sizeof(prof_stats_t));
+ prof_stats_leave(tsd);
+}
+
+void
+prof_stats_get_accum(tsd_t *tsd, szind_t ind, prof_stats_t *stats) {
+ cassert(config_prof);
+ prof_stats_enter(tsd, ind);
+ memcpy(stats, &prof_stats_accum[ind], sizeof(prof_stats_t));
+ prof_stats_leave(tsd);
+}
diff --git a/contrib/jemalloc/src/prof_sys.c b/contrib/jemalloc/src/prof_sys.c
new file mode 100644
index 000000000000..b5f1f5b225e1
--- /dev/null
+++ b/contrib/jemalloc/src/prof_sys.c
@@ -0,0 +1,669 @@
+#define JEMALLOC_PROF_SYS_C_
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/buf_writer.h"
+#include "jemalloc/internal/ctl.h"
+#include "jemalloc/internal/prof_data.h"
+#include "jemalloc/internal/prof_sys.h"
+
+#ifdef JEMALLOC_PROF_LIBUNWIND
+#define UNW_LOCAL_ONLY
+#include <libunwind.h>
+#endif
+
+#ifdef JEMALLOC_PROF_LIBGCC
+/*
+ * We have a circular dependency -- jemalloc_internal.h tells us if we should
+ * use libgcc's unwinding functionality, but after we've included that, we've
+ * already hooked _Unwind_Backtrace. We'll temporarily disable hooking.
+ */
+#undef _Unwind_Backtrace
+#include <unwind.h>
+#define _Unwind_Backtrace JEMALLOC_TEST_HOOK(_Unwind_Backtrace, test_hooks_libc_hook)
+#endif
+
+/******************************************************************************/
+
+malloc_mutex_t prof_dump_filename_mtx;
+
+bool prof_do_mock = false;
+
+static uint64_t prof_dump_seq;
+static uint64_t prof_dump_iseq;
+static uint64_t prof_dump_mseq;
+static uint64_t prof_dump_useq;
+
+static char *prof_prefix = NULL;
+
+/* The fallback allocator profiling functionality will use. */
+base_t *prof_base;
+
+void
+bt_init(prof_bt_t *bt, void **vec) {
+ cassert(config_prof);
+
+ bt->vec = vec;
+ bt->len = 0;
+}
+
+#ifdef JEMALLOC_PROF_LIBUNWIND
+static void
+prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) {
+ int nframes;
+
+ cassert(config_prof);
+ assert(*len == 0);
+ assert(vec != NULL);
+ assert(max_len == PROF_BT_MAX);
+
+ nframes = unw_backtrace(vec, PROF_BT_MAX);
+ if (nframes <= 0) {
+ return;
+ }
+ *len = nframes;
+}
+#elif (defined(JEMALLOC_PROF_LIBGCC))
+static _Unwind_Reason_Code
+prof_unwind_init_callback(struct _Unwind_Context *context, void *arg) {
+ cassert(config_prof);
+
+ return _URC_NO_REASON;
+}
+
+static _Unwind_Reason_Code
+prof_unwind_callback(struct _Unwind_Context *context, void *arg) {
+ prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
+ void *ip;
+
+ cassert(config_prof);
+
+ ip = (void *)_Unwind_GetIP(context);
+ if (ip == NULL) {
+ return _URC_END_OF_STACK;
+ }
+ data->vec[*data->len] = ip;
+ (*data->len)++;
+ if (*data->len == data->max) {
+ return _URC_END_OF_STACK;
+ }
+
+ return _URC_NO_REASON;
+}
+
+static void
+prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) {
+ prof_unwind_data_t data = {vec, len, max_len};
+
+ cassert(config_prof);
+ assert(vec != NULL);
+ assert(max_len == PROF_BT_MAX);
+
+ _Unwind_Backtrace(prof_unwind_callback, &data);
+}
+#elif (defined(JEMALLOC_PROF_GCC))
+static void
+prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) {
+#define BT_FRAME(i) \
+ if ((i) < max_len) { \
+ void *p; \
+ if (__builtin_frame_address(i) == 0) { \
+ return; \
+ } \
+ p = __builtin_return_address(i); \
+ if (p == NULL) { \
+ return; \
+ } \
+ vec[(i)] = p; \
+ *len = (i) + 1; \
+ } else { \
+ return; \
+ }
+
+ cassert(config_prof);
+ assert(vec != NULL);
+ assert(max_len == PROF_BT_MAX);
+
+ BT_FRAME(0)
+ BT_FRAME(1)
+ BT_FRAME(2)
+ BT_FRAME(3)
+ BT_FRAME(4)
+ BT_FRAME(5)
+ BT_FRAME(6)
+ BT_FRAME(7)
+ BT_FRAME(8)
+ BT_FRAME(9)
+
+ BT_FRAME(10)
+ BT_FRAME(11)
+ BT_FRAME(12)
+ BT_FRAME(13)
+ BT_FRAME(14)
+ BT_FRAME(15)
+ BT_FRAME(16)
+ BT_FRAME(17)
+ BT_FRAME(18)
+ BT_FRAME(19)
+
+ BT_FRAME(20)
+ BT_FRAME(21)
+ BT_FRAME(22)
+ BT_FRAME(23)
+ BT_FRAME(24)
+ BT_FRAME(25)
+ BT_FRAME(26)
+ BT_FRAME(27)
+ BT_FRAME(28)
+ BT_FRAME(29)
+
+ BT_FRAME(30)
+ BT_FRAME(31)
+ BT_FRAME(32)
+ BT_FRAME(33)
+ BT_FRAME(34)
+ BT_FRAME(35)
+ BT_FRAME(36)
+ BT_FRAME(37)
+ BT_FRAME(38)
+ BT_FRAME(39)
+
+ BT_FRAME(40)
+ BT_FRAME(41)
+ BT_FRAME(42)
+ BT_FRAME(43)
+ BT_FRAME(44)
+ BT_FRAME(45)
+ BT_FRAME(46)
+ BT_FRAME(47)
+ BT_FRAME(48)
+ BT_FRAME(49)
+
+ BT_FRAME(50)
+ BT_FRAME(51)
+ BT_FRAME(52)
+ BT_FRAME(53)
+ BT_FRAME(54)
+ BT_FRAME(55)
+ BT_FRAME(56)
+ BT_FRAME(57)
+ BT_FRAME(58)
+ BT_FRAME(59)
+
+ BT_FRAME(60)
+ BT_FRAME(61)
+ BT_FRAME(62)
+ BT_FRAME(63)
+ BT_FRAME(64)
+ BT_FRAME(65)
+ BT_FRAME(66)
+ BT_FRAME(67)
+ BT_FRAME(68)
+ BT_FRAME(69)
+
+ BT_FRAME(70)
+ BT_FRAME(71)
+ BT_FRAME(72)
+ BT_FRAME(73)
+ BT_FRAME(74)
+ BT_FRAME(75)
+ BT_FRAME(76)
+ BT_FRAME(77)
+ BT_FRAME(78)
+ BT_FRAME(79)
+
+ BT_FRAME(80)
+ BT_FRAME(81)
+ BT_FRAME(82)
+ BT_FRAME(83)
+ BT_FRAME(84)
+ BT_FRAME(85)
+ BT_FRAME(86)
+ BT_FRAME(87)
+ BT_FRAME(88)
+ BT_FRAME(89)
+
+ BT_FRAME(90)
+ BT_FRAME(91)
+ BT_FRAME(92)
+ BT_FRAME(93)
+ BT_FRAME(94)
+ BT_FRAME(95)
+ BT_FRAME(96)
+ BT_FRAME(97)
+ BT_FRAME(98)
+ BT_FRAME(99)
+
+ BT_FRAME(100)
+ BT_FRAME(101)
+ BT_FRAME(102)
+ BT_FRAME(103)
+ BT_FRAME(104)
+ BT_FRAME(105)
+ BT_FRAME(106)
+ BT_FRAME(107)
+ BT_FRAME(108)
+ BT_FRAME(109)
+
+ BT_FRAME(110)
+ BT_FRAME(111)
+ BT_FRAME(112)
+ BT_FRAME(113)
+ BT_FRAME(114)
+ BT_FRAME(115)
+ BT_FRAME(116)
+ BT_FRAME(117)
+ BT_FRAME(118)
+ BT_FRAME(119)
+
+ BT_FRAME(120)
+ BT_FRAME(121)
+ BT_FRAME(122)
+ BT_FRAME(123)
+ BT_FRAME(124)
+ BT_FRAME(125)
+ BT_FRAME(126)
+ BT_FRAME(127)
+#undef BT_FRAME
+}
+#else
+static void
+prof_backtrace_impl(void **vec, unsigned *len, unsigned max_len) {
+ cassert(config_prof);
+ not_reached();
+}
+#endif
+
+void
+prof_backtrace(tsd_t *tsd, prof_bt_t *bt) {
+ cassert(config_prof);
+ prof_backtrace_hook_t prof_backtrace_hook = prof_backtrace_hook_get();
+ assert(prof_backtrace_hook != NULL);
+
+ pre_reentrancy(tsd, NULL);
+ prof_backtrace_hook(bt->vec, &bt->len, PROF_BT_MAX);
+ post_reentrancy(tsd);
+}
+
+void
+prof_hooks_init() {
+ prof_backtrace_hook_set(&prof_backtrace_impl);
+ prof_dump_hook_set(NULL);
+}
+
+void
+prof_unwind_init() {
+#ifdef JEMALLOC_PROF_LIBGCC
+ /*
+ * Cause the backtracing machinery to allocate its internal
+ * state before enabling profiling.
+ */
+ _Unwind_Backtrace(prof_unwind_init_callback, NULL);
+#endif
+}
+
+static int
+prof_sys_thread_name_read_impl(char *buf, size_t limit) {
+#if defined(JEMALLOC_HAVE_PTHREAD_GETNAME_NP)
+ return pthread_getname_np(pthread_self(), buf, limit);
+#elif defined(JEMALLOC_HAVE_PTHREAD_GET_NAME_NP)
+ pthread_get_name_np(pthread_self(), buf, limit);
+ return 0;
+#else
+ return ENOSYS;
+#endif
+}
+prof_sys_thread_name_read_t *JET_MUTABLE prof_sys_thread_name_read =
+ prof_sys_thread_name_read_impl;
+
+void
+prof_sys_thread_name_fetch(tsd_t *tsd) {
+#define THREAD_NAME_MAX_LEN 16
+ char buf[THREAD_NAME_MAX_LEN];
+ if (!prof_sys_thread_name_read(buf, THREAD_NAME_MAX_LEN)) {
+ prof_thread_name_set_impl(tsd, buf);
+ }
+#undef THREAD_NAME_MAX_LEN
+}
+
+int
+prof_getpid(void) {
+#ifdef _WIN32
+ return GetCurrentProcessId();
+#else
+ return getpid();
+#endif
+}
+
+/*
+ * This buffer is rather large for stack allocation, so use a single buffer for
+ * all profile dumps; protected by prof_dump_mtx.
+ */
+static char prof_dump_buf[PROF_DUMP_BUFSIZE];
+
+typedef struct prof_dump_arg_s prof_dump_arg_t;
+struct prof_dump_arg_s {
+ /*
+ * Whether error should be handled locally: if true, then we print out
+ * error message as well as abort (if opt_abort is true) when an error
+ * occurred, and we also report the error back to the caller in the end;
+ * if false, then we only report the error back to the caller in the
+ * end.
+ */
+ const bool handle_error_locally;
+ /*
+ * Whether there has been an error in the dumping process, which could
+ * have happened either in file opening or in file writing. When an
+ * error has already occurred, we will stop further writing to the file.
+ */
+ bool error;
+ /* File descriptor of the dump file. */
+ int prof_dump_fd;
+};
+
+static void
+prof_dump_check_possible_error(prof_dump_arg_t *arg, bool err_cond,
+ const char *format, ...) {
+ assert(!arg->error);
+ if (!err_cond) {
+ return;
+ }
+
+ arg->error = true;
+ if (!arg->handle_error_locally) {
+ return;
+ }
+
+ va_list ap;
+ char buf[PROF_PRINTF_BUFSIZE];
+ va_start(ap, format);
+ malloc_vsnprintf(buf, sizeof(buf), format, ap);
+ va_end(ap);
+ malloc_write(buf);
+
+ if (opt_abort) {
+ abort();
+ }
+}
+
+static int
+prof_dump_open_file_impl(const char *filename, int mode) {
+ return creat(filename, mode);
+}
+prof_dump_open_file_t *JET_MUTABLE prof_dump_open_file =
+ prof_dump_open_file_impl;
+
+static void
+prof_dump_open(prof_dump_arg_t *arg, const char *filename) {
+ arg->prof_dump_fd = prof_dump_open_file(filename, 0644);
+ prof_dump_check_possible_error(arg, arg->prof_dump_fd == -1,
+ "<jemalloc>: failed to open \"%s\"\n", filename);
+}
+
+prof_dump_write_file_t *JET_MUTABLE prof_dump_write_file = malloc_write_fd;
+
+static void
+prof_dump_flush(void *opaque, const char *s) {
+ cassert(config_prof);
+ prof_dump_arg_t *arg = (prof_dump_arg_t *)opaque;
+ if (!arg->error) {
+ ssize_t err = prof_dump_write_file(arg->prof_dump_fd, s,
+ strlen(s));
+ prof_dump_check_possible_error(arg, err == -1,
+ "<jemalloc>: failed to write during heap profile flush\n");
+ }
+}
+
+static void
+prof_dump_close(prof_dump_arg_t *arg) {
+ if (arg->prof_dump_fd != -1) {
+ close(arg->prof_dump_fd);
+ }
+}
+
+#ifndef _WIN32
+JEMALLOC_FORMAT_PRINTF(1, 2)
+static int
+prof_open_maps_internal(const char *format, ...) {
+ int mfd;
+ va_list ap;
+ char filename[PATH_MAX + 1];
+
+ va_start(ap, format);
+ malloc_vsnprintf(filename, sizeof(filename), format, ap);
+ va_end(ap);
+
+#if defined(O_CLOEXEC)
+ mfd = open(filename, O_RDONLY | O_CLOEXEC);
+#else
+ mfd = open(filename, O_RDONLY);
+ if (mfd != -1) {
+ fcntl(mfd, F_SETFD, fcntl(mfd, F_GETFD) | FD_CLOEXEC);
+ }
+#endif
+
+ return mfd;
+}
+#endif
+
+static int
+prof_dump_open_maps_impl() {
+ int mfd;
+
+ cassert(config_prof);
+#if defined(__FreeBSD__) || defined(__DragonFly__)
+ mfd = prof_open_maps_internal("/proc/curproc/map");
+#elif defined(_WIN32)
+ mfd = -1; // Not implemented
+#else
+ int pid = prof_getpid();
+
+ mfd = prof_open_maps_internal("/proc/%d/task/%d/maps", pid, pid);
+ if (mfd == -1) {
+ mfd = prof_open_maps_internal("/proc/%d/maps", pid);
+ }
+#endif
+ return mfd;
+}
+prof_dump_open_maps_t *JET_MUTABLE prof_dump_open_maps =
+ prof_dump_open_maps_impl;
+
+static ssize_t
+prof_dump_read_maps_cb(void *read_cbopaque, void *buf, size_t limit) {
+ int mfd = *(int *)read_cbopaque;
+ assert(mfd != -1);
+ return malloc_read_fd(mfd, buf, limit);
+}
+
+static void
+prof_dump_maps(buf_writer_t *buf_writer) {
+ int mfd = prof_dump_open_maps();
+ if (mfd == -1) {
+ return;
+ }
+
+ buf_writer_cb(buf_writer, "\nMAPPED_LIBRARIES:\n");
+ buf_writer_pipe(buf_writer, prof_dump_read_maps_cb, &mfd);
+ close(mfd);
+}
+
+static bool
+prof_dump(tsd_t *tsd, bool propagate_err, const char *filename,
+ bool leakcheck) {
+ cassert(config_prof);
+ assert(tsd_reentrancy_level_get(tsd) == 0);
+
+ prof_tdata_t * tdata = prof_tdata_get(tsd, true);
+ if (tdata == NULL) {
+ return true;
+ }
+
+ prof_dump_arg_t arg = {/* handle_error_locally */ !propagate_err,
+ /* error */ false, /* prof_dump_fd */ -1};
+
+ pre_reentrancy(tsd, NULL);
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_mtx);
+
+ prof_dump_open(&arg, filename);
+ buf_writer_t buf_writer;
+ bool err = buf_writer_init(tsd_tsdn(tsd), &buf_writer, prof_dump_flush,
+ &arg, prof_dump_buf, PROF_DUMP_BUFSIZE);
+ assert(!err);
+ prof_dump_impl(tsd, buf_writer_cb, &buf_writer, tdata, leakcheck);
+ prof_dump_maps(&buf_writer);
+ buf_writer_terminate(tsd_tsdn(tsd), &buf_writer);
+ prof_dump_close(&arg);
+
+ prof_dump_hook_t dump_hook = prof_dump_hook_get();
+ if (dump_hook != NULL) {
+ dump_hook(filename);
+ }
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_mtx);
+ post_reentrancy(tsd);
+
+ return arg.error;
+}
+
+/*
+ * If profiling is off, then PROF_DUMP_FILENAME_LEN is 1, so we'll end up
+ * calling strncpy with a size of 0, which triggers a -Wstringop-truncation
+ * warning (strncpy can never actually be called in this case, since we bail out
+ * much earlier when config_prof is false). This function works around the
+ * warning to let us leave the warning on.
+ */
+static inline void
+prof_strncpy(char *UNUSED dest, const char *UNUSED src, size_t UNUSED size) {
+ cassert(config_prof);
+#ifdef JEMALLOC_PROF
+ strncpy(dest, src, size);
+#endif
+}
+
+static const char *
+prof_prefix_get(tsdn_t* tsdn) {
+ malloc_mutex_assert_owner(tsdn, &prof_dump_filename_mtx);
+
+ return prof_prefix == NULL ? opt_prof_prefix : prof_prefix;
+}
+
+static bool
+prof_prefix_is_empty(tsdn_t *tsdn) {
+ malloc_mutex_lock(tsdn, &prof_dump_filename_mtx);
+ bool ret = (prof_prefix_get(tsdn)[0] == '\0');
+ malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx);
+ return ret;
+}
+
+#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
+#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
+static void
+prof_dump_filename(tsd_t *tsd, char *filename, char v, uint64_t vseq) {
+ cassert(config_prof);
+
+ assert(tsd_reentrancy_level_get(tsd) == 0);
+ const char *prefix = prof_prefix_get(tsd_tsdn(tsd));
+
+ if (vseq != VSEQ_INVALID) {
+ /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
+ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
+ "%s.%d.%"FMTu64".%c%"FMTu64".heap", prefix, prof_getpid(),
+ prof_dump_seq, v, vseq);
+ } else {
+ /* "<prefix>.<pid>.<seq>.<v>.heap" */
+ malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
+ "%s.%d.%"FMTu64".%c.heap", prefix, prof_getpid(),
+ prof_dump_seq, v);
+ }
+ prof_dump_seq++;
+}
+
+void
+prof_get_default_filename(tsdn_t *tsdn, char *filename, uint64_t ind) {
+ malloc_mutex_lock(tsdn, &prof_dump_filename_mtx);
+ malloc_snprintf(filename, PROF_DUMP_FILENAME_LEN,
+ "%s.%d.%"FMTu64".json", prof_prefix_get(tsdn), prof_getpid(), ind);
+ malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx);
+}
+
+void
+prof_fdump_impl(tsd_t *tsd) {
+ char filename[DUMP_FILENAME_BUFSIZE];
+
+ assert(!prof_prefix_is_empty(tsd_tsdn(tsd)));
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
+ prof_dump_filename(tsd, filename, 'f', VSEQ_INVALID);
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
+ prof_dump(tsd, false, filename, opt_prof_leak);
+}
+
+bool
+prof_prefix_set(tsdn_t *tsdn, const char *prefix) {
+ cassert(config_prof);
+ ctl_mtx_assert_held(tsdn);
+ malloc_mutex_lock(tsdn, &prof_dump_filename_mtx);
+ if (prof_prefix == NULL) {
+ malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx);
+ /* Everything is still guarded by ctl_mtx. */
+ char *buffer = base_alloc(tsdn, prof_base,
+ PROF_DUMP_FILENAME_LEN, QUANTUM);
+ if (buffer == NULL) {
+ return true;
+ }
+ malloc_mutex_lock(tsdn, &prof_dump_filename_mtx);
+ prof_prefix = buffer;
+ }
+ assert(prof_prefix != NULL);
+
+ prof_strncpy(prof_prefix, prefix, PROF_DUMP_FILENAME_LEN - 1);
+ prof_prefix[PROF_DUMP_FILENAME_LEN - 1] = '\0';
+ malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx);
+
+ return false;
+}
+
+void
+prof_idump_impl(tsd_t *tsd) {
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
+ if (prof_prefix_get(tsd_tsdn(tsd))[0] == '\0') {
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
+ return;
+ }
+ char filename[PATH_MAX + 1];
+ prof_dump_filename(tsd, filename, 'i', prof_dump_iseq);
+ prof_dump_iseq++;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
+ prof_dump(tsd, false, filename, false);
+}
+
+bool
+prof_mdump_impl(tsd_t *tsd, const char *filename) {
+ char filename_buf[DUMP_FILENAME_BUFSIZE];
+ if (filename == NULL) {
+ /* No filename specified, so automatically generate one. */
+ malloc_mutex_lock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
+ if (prof_prefix_get(tsd_tsdn(tsd))[0] == '\0') {
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
+ return true;
+ }
+ prof_dump_filename(tsd, filename_buf, 'm', prof_dump_mseq);
+ prof_dump_mseq++;
+ malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_filename_mtx);
+ filename = filename_buf;
+ }
+ return prof_dump(tsd, true, filename, false);
+}
+
+void
+prof_gdump_impl(tsd_t *tsd) {
+ tsdn_t *tsdn = tsd_tsdn(tsd);
+ malloc_mutex_lock(tsdn, &prof_dump_filename_mtx);
+ if (prof_prefix_get(tsdn)[0] == '\0') {
+ malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx);
+ return;
+ }
+ char filename[DUMP_FILENAME_BUFSIZE];
+ prof_dump_filename(tsd, filename, 'u', prof_dump_useq);
+ prof_dump_useq++;
+ malloc_mutex_unlock(tsdn, &prof_dump_filename_mtx);
+ prof_dump(tsd, false, filename, false);
+}
diff --git a/contrib/jemalloc/src/psset.c b/contrib/jemalloc/src/psset.c
new file mode 100644
index 000000000000..9a8f054f111c
--- /dev/null
+++ b/contrib/jemalloc/src/psset.c
@@ -0,0 +1,385 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/psset.h"
+
+#include "jemalloc/internal/fb.h"
+
+void
+psset_init(psset_t *psset) {
+ for (unsigned i = 0; i < PSSET_NPSIZES; i++) {
+ hpdata_age_heap_new(&psset->pageslabs[i]);
+ }
+ fb_init(psset->pageslab_bitmap, PSSET_NPSIZES);
+ memset(&psset->merged_stats, 0, sizeof(psset->merged_stats));
+ memset(&psset->stats, 0, sizeof(psset->stats));
+ hpdata_empty_list_init(&psset->empty);
+ for (int i = 0; i < PSSET_NPURGE_LISTS; i++) {
+ hpdata_purge_list_init(&psset->to_purge[i]);
+ }
+ fb_init(psset->purge_bitmap, PSSET_NPURGE_LISTS);
+ hpdata_hugify_list_init(&psset->to_hugify);
+}
+
+static void
+psset_bin_stats_accum(psset_bin_stats_t *dst, psset_bin_stats_t *src) {
+ dst->npageslabs += src->npageslabs;
+ dst->nactive += src->nactive;
+ dst->ndirty += src->ndirty;
+}
+
+void
+psset_stats_accum(psset_stats_t *dst, psset_stats_t *src) {
+ psset_bin_stats_accum(&dst->full_slabs[0], &src->full_slabs[0]);
+ psset_bin_stats_accum(&dst->full_slabs[1], &src->full_slabs[1]);
+ psset_bin_stats_accum(&dst->empty_slabs[0], &src->empty_slabs[0]);
+ psset_bin_stats_accum(&dst->empty_slabs[1], &src->empty_slabs[1]);
+ for (pszind_t i = 0; i < PSSET_NPSIZES; i++) {
+ psset_bin_stats_accum(&dst->nonfull_slabs[i][0],
+ &src->nonfull_slabs[i][0]);
+ psset_bin_stats_accum(&dst->nonfull_slabs[i][1],
+ &src->nonfull_slabs[i][1]);
+ }
+}
+
+/*
+ * The stats maintenance strategy is to remove a pageslab's contribution to the
+ * stats when we call psset_update_begin, and re-add it (to a potentially new
+ * bin) when we call psset_update_end.
+ */
+JEMALLOC_ALWAYS_INLINE void
+psset_bin_stats_insert_remove(psset_t *psset, psset_bin_stats_t *binstats,
+ hpdata_t *ps, bool insert) {
+ size_t mul = insert ? (size_t)1 : (size_t)-1;
+ size_t huge_idx = (size_t)hpdata_huge_get(ps);
+
+ binstats[huge_idx].npageslabs += mul * 1;
+ binstats[huge_idx].nactive += mul * hpdata_nactive_get(ps);
+ binstats[huge_idx].ndirty += mul * hpdata_ndirty_get(ps);
+
+ psset->merged_stats.npageslabs += mul * 1;
+ psset->merged_stats.nactive += mul * hpdata_nactive_get(ps);
+ psset->merged_stats.ndirty += mul * hpdata_ndirty_get(ps);
+
+ if (config_debug) {
+ psset_bin_stats_t check_stats = {0};
+ for (size_t huge = 0; huge <= 1; huge++) {
+ psset_bin_stats_accum(&check_stats,
+ &psset->stats.full_slabs[huge]);
+ psset_bin_stats_accum(&check_stats,
+ &psset->stats.empty_slabs[huge]);
+ for (pszind_t pind = 0; pind < PSSET_NPSIZES; pind++) {
+ psset_bin_stats_accum(&check_stats,
+ &psset->stats.nonfull_slabs[pind][huge]);
+ }
+ }
+ assert(psset->merged_stats.npageslabs
+ == check_stats.npageslabs);
+ assert(psset->merged_stats.nactive == check_stats.nactive);
+ assert(psset->merged_stats.ndirty == check_stats.ndirty);
+ }
+}
+
+static void
+psset_bin_stats_insert(psset_t *psset, psset_bin_stats_t *binstats,
+ hpdata_t *ps) {
+ psset_bin_stats_insert_remove(psset, binstats, ps, true);
+}
+
+static void
+psset_bin_stats_remove(psset_t *psset, psset_bin_stats_t *binstats,
+ hpdata_t *ps) {
+ psset_bin_stats_insert_remove(psset, binstats, ps, false);
+}
+
+static void
+psset_hpdata_heap_remove(psset_t *psset, pszind_t pind, hpdata_t *ps) {
+ hpdata_age_heap_remove(&psset->pageslabs[pind], ps);
+ if (hpdata_age_heap_empty(&psset->pageslabs[pind])) {
+ fb_unset(psset->pageslab_bitmap, PSSET_NPSIZES, (size_t)pind);
+ }
+}
+
+static void
+psset_hpdata_heap_insert(psset_t *psset, pszind_t pind, hpdata_t *ps) {
+ if (hpdata_age_heap_empty(&psset->pageslabs[pind])) {
+ fb_set(psset->pageslab_bitmap, PSSET_NPSIZES, (size_t)pind);
+ }
+ hpdata_age_heap_insert(&psset->pageslabs[pind], ps);
+}
+
+static void
+psset_stats_insert(psset_t* psset, hpdata_t *ps) {
+ if (hpdata_empty(ps)) {
+ psset_bin_stats_insert(psset, psset->stats.empty_slabs, ps);
+ } else if (hpdata_full(ps)) {
+ psset_bin_stats_insert(psset, psset->stats.full_slabs, ps);
+ } else {
+ size_t longest_free_range = hpdata_longest_free_range_get(ps);
+
+ pszind_t pind = sz_psz2ind(sz_psz_quantize_floor(
+ longest_free_range << LG_PAGE));
+ assert(pind < PSSET_NPSIZES);
+
+ psset_bin_stats_insert(psset, psset->stats.nonfull_slabs[pind],
+ ps);
+ }
+}
+
+static void
+psset_stats_remove(psset_t *psset, hpdata_t *ps) {
+ if (hpdata_empty(ps)) {
+ psset_bin_stats_remove(psset, psset->stats.empty_slabs, ps);
+ } else if (hpdata_full(ps)) {
+ psset_bin_stats_remove(psset, psset->stats.full_slabs, ps);
+ } else {
+ size_t longest_free_range = hpdata_longest_free_range_get(ps);
+
+ pszind_t pind = sz_psz2ind(sz_psz_quantize_floor(
+ longest_free_range << LG_PAGE));
+ assert(pind < PSSET_NPSIZES);
+
+ psset_bin_stats_remove(psset, psset->stats.nonfull_slabs[pind],
+ ps);
+ }
+}
+
+/*
+ * Put ps into some container so that it can be found during future allocation
+ * requests.
+ */
+static void
+psset_alloc_container_insert(psset_t *psset, hpdata_t *ps) {
+ assert(!hpdata_in_psset_alloc_container_get(ps));
+ hpdata_in_psset_alloc_container_set(ps, true);
+ if (hpdata_empty(ps)) {
+ /*
+ * This prepend, paired with popping the head in psset_fit,
+ * means we implement LIFO ordering for the empty slabs set,
+ * which seems reasonable.
+ */
+ hpdata_empty_list_prepend(&psset->empty, ps);
+ } else if (hpdata_full(ps)) {
+ /*
+ * We don't need to keep track of the full slabs; we're never
+ * going to return them from a psset_pick_alloc call.
+ */
+ } else {
+ size_t longest_free_range = hpdata_longest_free_range_get(ps);
+
+ pszind_t pind = sz_psz2ind(sz_psz_quantize_floor(
+ longest_free_range << LG_PAGE));
+ assert(pind < PSSET_NPSIZES);
+
+ psset_hpdata_heap_insert(psset, pind, ps);
+ }
+}
+
+/* Remove ps from those collections. */
+static void
+psset_alloc_container_remove(psset_t *psset, hpdata_t *ps) {
+ assert(hpdata_in_psset_alloc_container_get(ps));
+ hpdata_in_psset_alloc_container_set(ps, false);
+
+ if (hpdata_empty(ps)) {
+ hpdata_empty_list_remove(&psset->empty, ps);
+ } else if (hpdata_full(ps)) {
+ /* Same as above -- do nothing in this case. */
+ } else {
+ size_t longest_free_range = hpdata_longest_free_range_get(ps);
+
+ pszind_t pind = sz_psz2ind(sz_psz_quantize_floor(
+ longest_free_range << LG_PAGE));
+ assert(pind < PSSET_NPSIZES);
+
+ psset_hpdata_heap_remove(psset, pind, ps);
+ }
+}
+
+static size_t
+psset_purge_list_ind(hpdata_t *ps) {
+ size_t ndirty = hpdata_ndirty_get(ps);
+ /* Shouldn't have something with no dirty pages purgeable. */
+ assert(ndirty > 0);
+ /*
+ * Higher indices correspond to lists we'd like to purge earlier; make
+ * the two highest indices correspond to empty lists, which we attempt
+ * to purge before purging any non-empty list. This has two advantages:
+ * - Empty page slabs are the least likely to get reused (we'll only
+ * pick them for an allocation if we have no other choice).
+ * - Empty page slabs can purge every dirty page they contain in a
+ * single call, which is not usually the case.
+ *
+ * We purge hugeified empty slabs before nonhugeified ones, on the basis
+ * that they are fully dirty, while nonhugified slabs might not be, so
+ * we free up more pages more easily.
+ */
+ if (hpdata_nactive_get(ps) == 0) {
+ if (hpdata_huge_get(ps)) {
+ return PSSET_NPURGE_LISTS - 1;
+ } else {
+ return PSSET_NPURGE_LISTS - 2;
+ }
+ }
+
+ pszind_t pind = sz_psz2ind(sz_psz_quantize_floor(ndirty << LG_PAGE));
+ /*
+ * For non-empty slabs, we may reuse them again. Prefer purging
+ * non-hugeified slabs before hugeified ones then, among pages of
+ * similar dirtiness. We still get some benefit from the hugification.
+ */
+ return (size_t)pind * 2 + (hpdata_huge_get(ps) ? 0 : 1);
+}
+
+static void
+psset_maybe_remove_purge_list(psset_t *psset, hpdata_t *ps) {
+ /*
+ * Remove the hpdata from its purge list (if it's in one). Even if it's
+ * going to stay in the same one, by appending it during
+ * psset_update_end, we move it to the end of its queue, so that we
+ * purge LRU within a given dirtiness bucket.
+ */
+ if (hpdata_purge_allowed_get(ps)) {
+ size_t ind = psset_purge_list_ind(ps);
+ hpdata_purge_list_t *purge_list = &psset->to_purge[ind];
+ hpdata_purge_list_remove(purge_list, ps);
+ if (hpdata_purge_list_empty(purge_list)) {
+ fb_unset(psset->purge_bitmap, PSSET_NPURGE_LISTS, ind);
+ }
+ }
+}
+
+static void
+psset_maybe_insert_purge_list(psset_t *psset, hpdata_t *ps) {
+ if (hpdata_purge_allowed_get(ps)) {
+ size_t ind = psset_purge_list_ind(ps);
+ hpdata_purge_list_t *purge_list = &psset->to_purge[ind];
+ if (hpdata_purge_list_empty(purge_list)) {
+ fb_set(psset->purge_bitmap, PSSET_NPURGE_LISTS, ind);
+ }
+ hpdata_purge_list_append(purge_list, ps);
+ }
+
+}
+
+void
+psset_update_begin(psset_t *psset, hpdata_t *ps) {
+ hpdata_assert_consistent(ps);
+ assert(hpdata_in_psset_get(ps));
+ hpdata_updating_set(ps, true);
+ psset_stats_remove(psset, ps);
+ if (hpdata_in_psset_alloc_container_get(ps)) {
+ /*
+ * Some metadata updates can break alloc container invariants
+ * (e.g. the longest free range determines the hpdata_heap_t the
+ * pageslab lives in).
+ */
+ assert(hpdata_alloc_allowed_get(ps));
+ psset_alloc_container_remove(psset, ps);
+ }
+ psset_maybe_remove_purge_list(psset, ps);
+ /*
+ * We don't update presence in the hugify list; we try to keep it FIFO,
+ * even in the presence of other metadata updates. We'll update
+ * presence at the end of the metadata update if necessary.
+ */
+}
+
+void
+psset_update_end(psset_t *psset, hpdata_t *ps) {
+ assert(hpdata_in_psset_get(ps));
+ hpdata_updating_set(ps, false);
+ psset_stats_insert(psset, ps);
+
+ /*
+ * The update begin should have removed ps from whatever alloc container
+ * it was in.
+ */
+ assert(!hpdata_in_psset_alloc_container_get(ps));
+ if (hpdata_alloc_allowed_get(ps)) {
+ psset_alloc_container_insert(psset, ps);
+ }
+ psset_maybe_insert_purge_list(psset, ps);
+
+ if (hpdata_hugify_allowed_get(ps)
+ && !hpdata_in_psset_hugify_container_get(ps)) {
+ hpdata_in_psset_hugify_container_set(ps, true);
+ hpdata_hugify_list_append(&psset->to_hugify, ps);
+ } else if (!hpdata_hugify_allowed_get(ps)
+ && hpdata_in_psset_hugify_container_get(ps)) {
+ hpdata_in_psset_hugify_container_set(ps, false);
+ hpdata_hugify_list_remove(&psset->to_hugify, ps);
+ }
+ hpdata_assert_consistent(ps);
+}
+
+hpdata_t *
+psset_pick_alloc(psset_t *psset, size_t size) {
+ assert((size & PAGE_MASK) == 0);
+ assert(size <= HUGEPAGE);
+
+ pszind_t min_pind = sz_psz2ind(sz_psz_quantize_ceil(size));
+ pszind_t pind = (pszind_t)fb_ffs(psset->pageslab_bitmap, PSSET_NPSIZES,
+ (size_t)min_pind);
+ if (pind == PSSET_NPSIZES) {
+ return hpdata_empty_list_first(&psset->empty);
+ }
+ hpdata_t *ps = hpdata_age_heap_first(&psset->pageslabs[pind]);
+ if (ps == NULL) {
+ return NULL;
+ }
+
+ hpdata_assert_consistent(ps);
+
+ return ps;
+}
+
+hpdata_t *
+psset_pick_purge(psset_t *psset) {
+ ssize_t ind_ssz = fb_fls(psset->purge_bitmap, PSSET_NPURGE_LISTS,
+ PSSET_NPURGE_LISTS - 1);
+ if (ind_ssz < 0) {
+ return NULL;
+ }
+ pszind_t ind = (pszind_t)ind_ssz;
+ assert(ind < PSSET_NPURGE_LISTS);
+ hpdata_t *ps = hpdata_purge_list_first(&psset->to_purge[ind]);
+ assert(ps != NULL);
+ return ps;
+}
+
+hpdata_t *
+psset_pick_hugify(psset_t *psset) {
+ return hpdata_hugify_list_first(&psset->to_hugify);
+}
+
+void
+psset_insert(psset_t *psset, hpdata_t *ps) {
+ hpdata_in_psset_set(ps, true);
+
+ psset_stats_insert(psset, ps);
+ if (hpdata_alloc_allowed_get(ps)) {
+ psset_alloc_container_insert(psset, ps);
+ }
+ psset_maybe_insert_purge_list(psset, ps);
+
+ if (hpdata_hugify_allowed_get(ps)) {
+ hpdata_in_psset_hugify_container_set(ps, true);
+ hpdata_hugify_list_append(&psset->to_hugify, ps);
+ }
+}
+
+void
+psset_remove(psset_t *psset, hpdata_t *ps) {
+ hpdata_in_psset_set(ps, false);
+
+ psset_stats_remove(psset, ps);
+ if (hpdata_in_psset_alloc_container_get(ps)) {
+ psset_alloc_container_remove(psset, ps);
+ }
+ psset_maybe_remove_purge_list(psset, ps);
+ if (hpdata_in_psset_hugify_container_get(ps)) {
+ hpdata_in_psset_hugify_container_set(ps, false);
+ hpdata_hugify_list_remove(&psset->to_hugify, ps);
+ }
+}
diff --git a/contrib/jemalloc/src/rtree.c b/contrib/jemalloc/src/rtree.c
index 4ae41fe2fec9..6496b5afdc4a 100644
--- a/contrib/jemalloc/src/rtree.c
+++ b/contrib/jemalloc/src/rtree.c
@@ -1,4 +1,3 @@
-#define JEMALLOC_RTREE_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
@@ -10,7 +9,7 @@
* used.
*/
bool
-rtree_new(rtree_t *rtree, bool zeroed) {
+rtree_new(rtree_t *rtree, base_t *base, bool zeroed) {
#ifdef JEMALLOC_JET
if (!zeroed) {
memset(rtree, 0, sizeof(rtree_t)); /* Clear root. */
@@ -18,6 +17,7 @@ rtree_new(rtree_t *rtree, bool zeroed) {
#else
assert(zeroed);
#endif
+ rtree->base = base;
if (malloc_mutex_init(&rtree->init_lock, "rtree", WITNESS_RANK_RTREE,
malloc_mutex_rank_exclusive)) {
@@ -28,75 +28,16 @@ rtree_new(rtree_t *rtree, bool zeroed) {
}
static rtree_node_elm_t *
-rtree_node_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
- return (rtree_node_elm_t *)base_alloc(tsdn, b0get(), nelms *
- sizeof(rtree_node_elm_t), CACHELINE);
+rtree_node_alloc(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
+ return (rtree_node_elm_t *)base_alloc(tsdn, rtree->base,
+ nelms * sizeof(rtree_node_elm_t), CACHELINE);
}
-rtree_node_alloc_t *JET_MUTABLE rtree_node_alloc = rtree_node_alloc_impl;
-
-static void
-rtree_node_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *node) {
- /* Nodes are never deleted during normal operation. */
- not_reached();
-}
-rtree_node_dalloc_t *JET_MUTABLE rtree_node_dalloc =
- rtree_node_dalloc_impl;
static rtree_leaf_elm_t *
-rtree_leaf_alloc_impl(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
- return (rtree_leaf_elm_t *)base_alloc(tsdn, b0get(), nelms *
- sizeof(rtree_leaf_elm_t), CACHELINE);
-}
-rtree_leaf_alloc_t *JET_MUTABLE rtree_leaf_alloc = rtree_leaf_alloc_impl;
-
-static void
-rtree_leaf_dalloc_impl(tsdn_t *tsdn, rtree_t *rtree, rtree_leaf_elm_t *leaf) {
- /* Leaves are never deleted during normal operation. */
- not_reached();
+rtree_leaf_alloc(tsdn_t *tsdn, rtree_t *rtree, size_t nelms) {
+ return (rtree_leaf_elm_t *)base_alloc(tsdn, rtree->base,
+ nelms * sizeof(rtree_leaf_elm_t), CACHELINE);
}
-rtree_leaf_dalloc_t *JET_MUTABLE rtree_leaf_dalloc =
- rtree_leaf_dalloc_impl;
-
-#ifdef JEMALLOC_JET
-# if RTREE_HEIGHT > 1
-static void
-rtree_delete_subtree(tsdn_t *tsdn, rtree_t *rtree, rtree_node_elm_t *subtree,
- unsigned level) {
- size_t nchildren = ZU(1) << rtree_levels[level].bits;
- if (level + 2 < RTREE_HEIGHT) {
- for (size_t i = 0; i < nchildren; i++) {
- rtree_node_elm_t *node =
- (rtree_node_elm_t *)atomic_load_p(&subtree[i].child,
- ATOMIC_RELAXED);
- if (node != NULL) {
- rtree_delete_subtree(tsdn, rtree, node, level +
- 1);
- }
- }
- } else {
- for (size_t i = 0; i < nchildren; i++) {
- rtree_leaf_elm_t *leaf =
- (rtree_leaf_elm_t *)atomic_load_p(&subtree[i].child,
- ATOMIC_RELAXED);
- if (leaf != NULL) {
- rtree_leaf_dalloc(tsdn, rtree, leaf);
- }
- }
- }
-
- if (subtree != rtree->root) {
- rtree_node_dalloc(tsdn, rtree, subtree);
- }
-}
-# endif
-
-void
-rtree_delete(tsdn_t *tsdn, rtree_t *rtree) {
-# if RTREE_HEIGHT > 1
- rtree_delete_subtree(tsdn, rtree, rtree->root, 0);
-# endif
-}
-#endif
static rtree_node_elm_t *
rtree_node_init(tsdn_t *tsdn, rtree_t *rtree, unsigned level,
diff --git a/contrib/jemalloc/src/safety_check.c b/contrib/jemalloc/src/safety_check.c
index 804155dcfc6a..209fdda92b5c 100644
--- a/contrib/jemalloc/src/safety_check.c
+++ b/contrib/jemalloc/src/safety_check.c
@@ -1,9 +1,21 @@
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
-static void (*safety_check_abort)(const char *message);
+static safety_check_abort_hook_t safety_check_abort;
-void safety_check_set_abort(void (*abort_fn)(const char *)) {
+void safety_check_fail_sized_dealloc(bool current_dealloc, const void *ptr,
+ size_t true_size, size_t input_size) {
+ char *src = current_dealloc ? "the current pointer being freed" :
+ "in thread cache, possibly from previous deallocations";
+
+ safety_check_fail("<jemalloc>: size mismatch detected (true size %zu "
+ "vs input size %zu), likely caused by application sized "
+ "deallocation bugs (source address: %p, %s). Suggest building with "
+ "--enable-debug or address sanitizer for debugging. Abort.\n",
+ true_size, input_size, ptr, src);
+}
+
+void safety_check_set_abort(safety_check_abort_hook_t abort_fn) {
safety_check_abort = abort_fn;
}
diff --git a/contrib/jemalloc/src/san.c b/contrib/jemalloc/src/san.c
new file mode 100644
index 000000000000..6e51291135c7
--- /dev/null
+++ b/contrib/jemalloc/src/san.c
@@ -0,0 +1,208 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/ehooks.h"
+#include "jemalloc/internal/san.h"
+#include "jemalloc/internal/tsd.h"
+
+/* The sanitizer options. */
+size_t opt_san_guard_large = SAN_GUARD_LARGE_EVERY_N_EXTENTS_DEFAULT;
+size_t opt_san_guard_small = SAN_GUARD_SMALL_EVERY_N_EXTENTS_DEFAULT;
+
+/* Aligned (-1 is off) ptrs will be junked & stashed on dealloc. */
+ssize_t opt_lg_san_uaf_align = SAN_LG_UAF_ALIGN_DEFAULT;
+
+/*
+ * Initialized in san_init(). When disabled, the mask is set to (uintptr_t)-1
+ * to always fail the nonfast_align check.
+ */
+uintptr_t san_cache_bin_nonfast_mask = SAN_CACHE_BIN_NONFAST_MASK_DEFAULT;
+
+static inline void
+san_find_guarded_addr(edata_t *edata, uintptr_t *guard1, uintptr_t *guard2,
+ uintptr_t *addr, size_t size, bool left, bool right) {
+ assert(!edata_guarded_get(edata));
+ assert(size % PAGE == 0);
+ *addr = (uintptr_t)edata_base_get(edata);
+ if (left) {
+ *guard1 = *addr;
+ *addr += SAN_PAGE_GUARD;
+ } else {
+ *guard1 = 0;
+ }
+
+ if (right) {
+ *guard2 = *addr + size;
+ } else {
+ *guard2 = 0;
+ }
+}
+
+static inline void
+san_find_unguarded_addr(edata_t *edata, uintptr_t *guard1, uintptr_t *guard2,
+ uintptr_t *addr, size_t size, bool left, bool right) {
+ assert(edata_guarded_get(edata));
+ assert(size % PAGE == 0);
+ *addr = (uintptr_t)edata_base_get(edata);
+ if (right) {
+ *guard2 = *addr + size;
+ } else {
+ *guard2 = 0;
+ }
+
+ if (left) {
+ *guard1 = *addr - SAN_PAGE_GUARD;
+ assert(*guard1 != 0);
+ *addr = *guard1;
+ } else {
+ *guard1 = 0;
+ }
+}
+
+void
+san_guard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata, emap_t *emap,
+ bool left, bool right, bool remap) {
+ assert(left || right);
+ if (remap) {
+ emap_deregister_boundary(tsdn, emap, edata);
+ }
+
+ size_t size_with_guards = edata_size_get(edata);
+ size_t usize = (left && right)
+ ? san_two_side_unguarded_sz(size_with_guards)
+ : san_one_side_unguarded_sz(size_with_guards);
+
+ uintptr_t guard1, guard2, addr;
+ san_find_guarded_addr(edata, &guard1, &guard2, &addr, usize, left,
+ right);
+
+ assert(edata_state_get(edata) == extent_state_active);
+ ehooks_guard(tsdn, ehooks, (void *)guard1, (void *)guard2);
+
+ /* Update the guarded addr and usable size of the edata. */
+ edata_size_set(edata, usize);
+ edata_addr_set(edata, (void *)addr);
+ edata_guarded_set(edata, true);
+
+ if (remap) {
+ emap_register_boundary(tsdn, emap, edata, SC_NSIZES,
+ /* slab */ false);
+ }
+}
+
+static void
+san_unguard_pages_impl(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
+ emap_t *emap, bool left, bool right, bool remap) {
+ assert(left || right);
+ /* Remove the inner boundary which no longer exists. */
+ if (remap) {
+ assert(edata_state_get(edata) == extent_state_active);
+ emap_deregister_boundary(tsdn, emap, edata);
+ } else {
+ assert(edata_state_get(edata) == extent_state_retained);
+ }
+
+ size_t size = edata_size_get(edata);
+ size_t size_with_guards = (left && right)
+ ? san_two_side_guarded_sz(size)
+ : san_one_side_guarded_sz(size);
+
+ uintptr_t guard1, guard2, addr;
+ san_find_unguarded_addr(edata, &guard1, &guard2, &addr, size, left,
+ right);
+
+ ehooks_unguard(tsdn, ehooks, (void *)guard1, (void *)guard2);
+
+ /* Update the true addr and usable size of the edata. */
+ edata_size_set(edata, size_with_guards);
+ edata_addr_set(edata, (void *)addr);
+ edata_guarded_set(edata, false);
+
+ /*
+ * Then re-register the outer boundary including the guards, if
+ * requested.
+ */
+ if (remap) {
+ emap_register_boundary(tsdn, emap, edata, SC_NSIZES,
+ /* slab */ false);
+ }
+}
+
+void
+san_unguard_pages(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
+ emap_t *emap, bool left, bool right) {
+ san_unguard_pages_impl(tsdn, ehooks, edata, emap, left, right,
+ /* remap */ true);
+}
+
+void
+san_unguard_pages_pre_destroy(tsdn_t *tsdn, ehooks_t *ehooks, edata_t *edata,
+ emap_t *emap) {
+ emap_assert_not_mapped(tsdn, emap, edata);
+ /*
+ * We don't want to touch the emap of about to be destroyed extents, as
+ * they have been unmapped upon eviction from the retained ecache. Also,
+ * we unguard the extents to the right, because retained extents only
+ * own their right guard page per san_bump_alloc's logic.
+ */
+ san_unguard_pages_impl(tsdn, ehooks, edata, emap, /* left */ false,
+ /* right */ true, /* remap */ false);
+}
+
+static bool
+san_stashed_corrupted(void *ptr, size_t size) {
+ if (san_junk_ptr_should_slow()) {
+ for (size_t i = 0; i < size; i++) {
+ if (((char *)ptr)[i] != (char)uaf_detect_junk) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ void *first, *mid, *last;
+ san_junk_ptr_locations(ptr, size, &first, &mid, &last);
+ if (*(uintptr_t *)first != uaf_detect_junk ||
+ *(uintptr_t *)mid != uaf_detect_junk ||
+ *(uintptr_t *)last != uaf_detect_junk) {
+ return true;
+ }
+
+ return false;
+}
+
+void
+san_check_stashed_ptrs(void **ptrs, size_t nstashed, size_t usize) {
+ /*
+ * Verify that the junked-filled & stashed pointers remain unchanged, to
+ * detect write-after-free.
+ */
+ for (size_t n = 0; n < nstashed; n++) {
+ void *stashed = ptrs[n];
+ assert(stashed != NULL);
+ assert(cache_bin_nonfast_aligned(stashed));
+ if (unlikely(san_stashed_corrupted(stashed, usize))) {
+ safety_check_fail("<jemalloc>: Write-after-free "
+ "detected on deallocated pointer %p (size %zu).\n",
+ stashed, usize);
+ }
+ }
+}
+
+void
+tsd_san_init(tsd_t *tsd) {
+ *tsd_san_extents_until_guard_smallp_get(tsd) = opt_san_guard_small;
+ *tsd_san_extents_until_guard_largep_get(tsd) = opt_san_guard_large;
+}
+
+void
+san_init(ssize_t lg_san_uaf_align) {
+ assert(lg_san_uaf_align == -1 || lg_san_uaf_align >= LG_PAGE);
+ if (lg_san_uaf_align == -1) {
+ san_cache_bin_nonfast_mask = (uintptr_t)-1;
+ return;
+ }
+
+ san_cache_bin_nonfast_mask = ((uintptr_t)1 << lg_san_uaf_align) - 1;
+}
diff --git a/contrib/jemalloc/src/san_bump.c b/contrib/jemalloc/src/san_bump.c
new file mode 100644
index 000000000000..888974555f28
--- /dev/null
+++ b/contrib/jemalloc/src/san_bump.c
@@ -0,0 +1,104 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/san_bump.h"
+#include "jemalloc/internal/pac.h"
+#include "jemalloc/internal/san.h"
+#include "jemalloc/internal/ehooks.h"
+#include "jemalloc/internal/edata_cache.h"
+
+static bool
+san_bump_grow_locked(tsdn_t *tsdn, san_bump_alloc_t *sba, pac_t *pac,
+ ehooks_t *ehooks, size_t size);
+
+edata_t *
+san_bump_alloc(tsdn_t *tsdn, san_bump_alloc_t* sba, pac_t *pac,
+ ehooks_t *ehooks, size_t size, bool zero) {
+ assert(san_bump_enabled());
+
+ edata_t* to_destroy;
+ size_t guarded_size = san_one_side_guarded_sz(size);
+
+ malloc_mutex_lock(tsdn, &sba->mtx);
+
+ if (sba->curr_reg == NULL ||
+ edata_size_get(sba->curr_reg) < guarded_size) {
+ /*
+ * If the current region can't accommodate the allocation,
+ * try replacing it with a larger one and destroy current if the
+ * replacement succeeds.
+ */
+ to_destroy = sba->curr_reg;
+ bool err = san_bump_grow_locked(tsdn, sba, pac, ehooks,
+ guarded_size);
+ if (err) {
+ goto label_err;
+ }
+ } else {
+ to_destroy = NULL;
+ }
+ assert(guarded_size <= edata_size_get(sba->curr_reg));
+ size_t trail_size = edata_size_get(sba->curr_reg) - guarded_size;
+
+ edata_t* edata;
+ if (trail_size != 0) {
+ edata_t* curr_reg_trail = extent_split_wrapper(tsdn, pac,
+ ehooks, sba->curr_reg, guarded_size, trail_size,
+ /* holding_core_locks */ true);
+ if (curr_reg_trail == NULL) {
+ goto label_err;
+ }
+ edata = sba->curr_reg;
+ sba->curr_reg = curr_reg_trail;
+ } else {
+ edata = sba->curr_reg;
+ sba->curr_reg = NULL;
+ }
+
+ malloc_mutex_unlock(tsdn, &sba->mtx);
+
+ assert(!edata_guarded_get(edata));
+ assert(sba->curr_reg == NULL || !edata_guarded_get(sba->curr_reg));
+ assert(to_destroy == NULL || !edata_guarded_get(to_destroy));
+
+ if (to_destroy != NULL) {
+ extent_destroy_wrapper(tsdn, pac, ehooks, to_destroy);
+ }
+
+ san_guard_pages(tsdn, ehooks, edata, pac->emap, /* left */ false,
+ /* right */ true, /* remap */ true);
+
+ if (extent_commit_zero(tsdn, ehooks, edata, /* commit */ true, zero,
+ /* growing_retained */ false)) {
+ extent_record(tsdn, pac, ehooks, &pac->ecache_retained,
+ edata);
+ return NULL;
+ }
+
+ if (config_prof) {
+ extent_gdump_add(tsdn, edata);
+ }
+
+ return edata;
+label_err:
+ malloc_mutex_unlock(tsdn, &sba->mtx);
+ return NULL;
+}
+
+static bool
+san_bump_grow_locked(tsdn_t *tsdn, san_bump_alloc_t *sba, pac_t *pac,
+ ehooks_t *ehooks, size_t size) {
+ malloc_mutex_assert_owner(tsdn, &sba->mtx);
+
+ bool committed = false, zeroed = false;
+ size_t alloc_size = size > SBA_RETAINED_ALLOC_SIZE ? size :
+ SBA_RETAINED_ALLOC_SIZE;
+ assert((alloc_size & PAGE_MASK) == 0);
+ sba->curr_reg = extent_alloc_wrapper(tsdn, pac, ehooks, NULL,
+ alloc_size, PAGE, zeroed, &committed,
+ /* growing_retained */ true);
+ if (sba->curr_reg == NULL) {
+ return true;
+ }
+ return false;
+}
diff --git a/contrib/jemalloc/src/sc.c b/contrib/jemalloc/src/sc.c
index 89ddb6ba6a91..e4a94d89f245 100644
--- a/contrib/jemalloc/src/sc.c
+++ b/contrib/jemalloc/src/sc.c
@@ -13,9 +13,7 @@
* at least the damage is compartmentalized to this file.
*/
-sc_data_t sc_data_global;
-
-static size_t
+size_t
reg_size_compute(int lg_base, int lg_delta, int ndelta) {
return (ZU(1) << lg_base) + (ZU(ndelta) << lg_delta);
}
@@ -64,9 +62,8 @@ size_class(
sc->lg_base = lg_base;
sc->lg_delta = lg_delta;
sc->ndelta = ndelta;
- sc->psz = (reg_size_compute(lg_base, lg_delta, ndelta)
- % (ZU(1) << lg_page) == 0);
- size_t size = (ZU(1) << lg_base) + (ZU(ndelta) << lg_delta);
+ size_t size = reg_size_compute(lg_base, lg_delta, ndelta);
+ sc->psz = (size % (ZU(1) << lg_page) == 0);
if (index == 0) {
assert(!sc->psz);
}
@@ -245,7 +242,7 @@ size_classes(
assert(sc_data->lg_large_minclass == SC_LG_LARGE_MINCLASS);
assert(sc_data->large_maxclass == SC_LARGE_MAXCLASS);
- /*
+ /*
* In the allocation fastpath, we want to assume that we can
* unconditionally subtract the requested allocation size from
* a ssize_t, and detect passing through 0 correctly. This
@@ -257,12 +254,8 @@ size_classes(
void
sc_data_init(sc_data_t *sc_data) {
- assert(!sc_data->initialized);
-
- int lg_max_lookup = 12;
-
size_classes(sc_data, LG_SIZEOF_PTR, LG_QUANTUM, SC_LG_TINY_MIN,
- lg_max_lookup, LG_PAGE, 2);
+ SC_LG_MAX_LOOKUP, LG_PAGE, SC_LG_NGROUP);
sc_data->initialized = true;
}
diff --git a/contrib/jemalloc/src/sec.c b/contrib/jemalloc/src/sec.c
new file mode 100644
index 000000000000..df6755904951
--- /dev/null
+++ b/contrib/jemalloc/src/sec.c
@@ -0,0 +1,422 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/sec.h"
+
+static edata_t *sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size,
+ size_t alignment, bool zero, bool guarded, bool frequent_reuse,
+ bool *deferred_work_generated);
+static bool sec_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ size_t old_size, size_t new_size, bool zero, bool *deferred_work_generated);
+static bool sec_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ size_t old_size, size_t new_size, bool *deferred_work_generated);
+static void sec_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ bool *deferred_work_generated);
+
+static void
+sec_bin_init(sec_bin_t *bin) {
+ bin->being_batch_filled = false;
+ bin->bytes_cur = 0;
+ edata_list_active_init(&bin->freelist);
+}
+
+bool
+sec_init(tsdn_t *tsdn, sec_t *sec, base_t *base, pai_t *fallback,
+ const sec_opts_t *opts) {
+ assert(opts->max_alloc >= PAGE);
+
+ size_t max_alloc = PAGE_FLOOR(opts->max_alloc);
+ pszind_t npsizes = sz_psz2ind(max_alloc) + 1;
+
+ size_t sz_shards = opts->nshards * sizeof(sec_shard_t);
+ size_t sz_bins = opts->nshards * (size_t)npsizes * sizeof(sec_bin_t);
+ size_t sz_alloc = sz_shards + sz_bins;
+ void *dynalloc = base_alloc(tsdn, base, sz_alloc, CACHELINE);
+ if (dynalloc == NULL) {
+ return true;
+ }
+ sec_shard_t *shard_cur = (sec_shard_t *)dynalloc;
+ sec->shards = shard_cur;
+ sec_bin_t *bin_cur = (sec_bin_t *)&shard_cur[opts->nshards];
+ /* Just for asserts, below. */
+ sec_bin_t *bin_start = bin_cur;
+
+ for (size_t i = 0; i < opts->nshards; i++) {
+ sec_shard_t *shard = shard_cur;
+ shard_cur++;
+ bool err = malloc_mutex_init(&shard->mtx, "sec_shard",
+ WITNESS_RANK_SEC_SHARD, malloc_mutex_rank_exclusive);
+ if (err) {
+ return true;
+ }
+ shard->enabled = true;
+ shard->bins = bin_cur;
+ for (pszind_t j = 0; j < npsizes; j++) {
+ sec_bin_init(&shard->bins[j]);
+ bin_cur++;
+ }
+ shard->bytes_cur = 0;
+ shard->to_flush_next = 0;
+ }
+ /*
+ * Should have exactly matched the bin_start to the first unused byte
+ * after the shards.
+ */
+ assert((void *)shard_cur == (void *)bin_start);
+ /* And the last bin to use up the last bytes of the allocation. */
+ assert((char *)bin_cur == ((char *)dynalloc + sz_alloc));
+ sec->fallback = fallback;
+
+
+ sec->opts = *opts;
+ sec->npsizes = npsizes;
+
+ /*
+ * Initialize these last so that an improper use of an SEC whose
+ * initialization failed will segfault in an easy-to-spot way.
+ */
+ sec->pai.alloc = &sec_alloc;
+ sec->pai.alloc_batch = &pai_alloc_batch_default;
+ sec->pai.expand = &sec_expand;
+ sec->pai.shrink = &sec_shrink;
+ sec->pai.dalloc = &sec_dalloc;
+ sec->pai.dalloc_batch = &pai_dalloc_batch_default;
+
+ return false;
+}
+
+static sec_shard_t *
+sec_shard_pick(tsdn_t *tsdn, sec_t *sec) {
+ /*
+ * Eventually, we should implement affinity, tracking source shard using
+ * the edata_t's newly freed up fields. For now, just randomly
+ * distribute across all shards.
+ */
+ if (tsdn_null(tsdn)) {
+ return &sec->shards[0];
+ }
+ tsd_t *tsd = tsdn_tsd(tsdn);
+ uint8_t *idxp = tsd_sec_shardp_get(tsd);
+ if (*idxp == (uint8_t)-1) {
+ /*
+ * First use; initialize using the trick from Daniel Lemire's
+ * "A fast alternative to the modulo reduction. Use a 64 bit
+ * number to store 32 bits, since we'll deliberately overflow
+ * when we multiply by the number of shards.
+ */
+ uint64_t rand32 = prng_lg_range_u64(tsd_prng_statep_get(tsd), 32);
+ uint32_t idx =
+ (uint32_t)((rand32 * (uint64_t)sec->opts.nshards) >> 32);
+ assert(idx < (uint32_t)sec->opts.nshards);
+ *idxp = (uint8_t)idx;
+ }
+ return &sec->shards[*idxp];
+}
+
+/*
+ * Perhaps surprisingly, this can be called on the alloc pathways; if we hit an
+ * empty cache, we'll try to fill it, which can push the shard over it's limit.
+ */
+static void
+sec_flush_some_and_unlock(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard) {
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+ edata_list_active_t to_flush;
+ edata_list_active_init(&to_flush);
+ while (shard->bytes_cur > sec->opts.bytes_after_flush) {
+ /* Pick a victim. */
+ sec_bin_t *bin = &shard->bins[shard->to_flush_next];
+
+ /* Update our victim-picking state. */
+ shard->to_flush_next++;
+ if (shard->to_flush_next == sec->npsizes) {
+ shard->to_flush_next = 0;
+ }
+
+ assert(shard->bytes_cur >= bin->bytes_cur);
+ if (bin->bytes_cur != 0) {
+ shard->bytes_cur -= bin->bytes_cur;
+ bin->bytes_cur = 0;
+ edata_list_active_concat(&to_flush, &bin->freelist);
+ }
+ /*
+ * Either bin->bytes_cur was 0, in which case we didn't touch
+ * the bin list but it should be empty anyways (or else we
+ * missed a bytes_cur update on a list modification), or it
+ * *was* 0 and we emptied it ourselves. Either way, it should
+ * be empty now.
+ */
+ assert(edata_list_active_empty(&bin->freelist));
+ }
+
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+ bool deferred_work_generated = false;
+ pai_dalloc_batch(tsdn, sec->fallback, &to_flush,
+ &deferred_work_generated);
+}
+
+static edata_t *
+sec_shard_alloc_locked(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard,
+ sec_bin_t *bin) {
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+ if (!shard->enabled) {
+ return NULL;
+ }
+ edata_t *edata = edata_list_active_first(&bin->freelist);
+ if (edata != NULL) {
+ edata_list_active_remove(&bin->freelist, edata);
+ assert(edata_size_get(edata) <= bin->bytes_cur);
+ bin->bytes_cur -= edata_size_get(edata);
+ assert(edata_size_get(edata) <= shard->bytes_cur);
+ shard->bytes_cur -= edata_size_get(edata);
+ }
+ return edata;
+}
+
+static edata_t *
+sec_batch_fill_and_alloc(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard,
+ sec_bin_t *bin, size_t size) {
+ malloc_mutex_assert_not_owner(tsdn, &shard->mtx);
+
+ edata_list_active_t result;
+ edata_list_active_init(&result);
+ bool deferred_work_generated = false;
+ size_t nalloc = pai_alloc_batch(tsdn, sec->fallback, size,
+ 1 + sec->opts.batch_fill_extra, &result, &deferred_work_generated);
+
+ edata_t *ret = edata_list_active_first(&result);
+ if (ret != NULL) {
+ edata_list_active_remove(&result, ret);
+ }
+
+ malloc_mutex_lock(tsdn, &shard->mtx);
+ bin->being_batch_filled = false;
+ /*
+ * Handle the easy case first: nothing to cache. Note that this can
+ * only happen in case of OOM, since sec_alloc checks the expected
+ * number of allocs, and doesn't bother going down the batch_fill
+ * pathway if there won't be anything left to cache. So to be in this
+ * code path, we must have asked for > 1 alloc, but only gotten 1 back.
+ */
+ if (nalloc <= 1) {
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+ return ret;
+ }
+
+ size_t new_cached_bytes = (nalloc - 1) * size;
+
+ edata_list_active_concat(&bin->freelist, &result);
+ bin->bytes_cur += new_cached_bytes;
+ shard->bytes_cur += new_cached_bytes;
+
+ if (shard->bytes_cur > sec->opts.max_bytes) {
+ sec_flush_some_and_unlock(tsdn, sec, shard);
+ } else {
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+ }
+
+ return ret;
+}
+
+static edata_t *
+sec_alloc(tsdn_t *tsdn, pai_t *self, size_t size, size_t alignment, bool zero,
+ bool guarded, bool frequent_reuse, bool *deferred_work_generated) {
+ assert((size & PAGE_MASK) == 0);
+ assert(!guarded);
+
+ sec_t *sec = (sec_t *)self;
+
+ if (zero || alignment > PAGE || sec->opts.nshards == 0
+ || size > sec->opts.max_alloc) {
+ return pai_alloc(tsdn, sec->fallback, size, alignment, zero,
+ /* guarded */ false, frequent_reuse,
+ deferred_work_generated);
+ }
+ pszind_t pszind = sz_psz2ind(size);
+ assert(pszind < sec->npsizes);
+
+ sec_shard_t *shard = sec_shard_pick(tsdn, sec);
+ sec_bin_t *bin = &shard->bins[pszind];
+ bool do_batch_fill = false;
+
+ malloc_mutex_lock(tsdn, &shard->mtx);
+ edata_t *edata = sec_shard_alloc_locked(tsdn, sec, shard, bin);
+ if (edata == NULL) {
+ if (!bin->being_batch_filled
+ && sec->opts.batch_fill_extra > 0) {
+ bin->being_batch_filled = true;
+ do_batch_fill = true;
+ }
+ }
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+ if (edata == NULL) {
+ if (do_batch_fill) {
+ edata = sec_batch_fill_and_alloc(tsdn, sec, shard, bin,
+ size);
+ } else {
+ edata = pai_alloc(tsdn, sec->fallback, size, alignment,
+ zero, /* guarded */ false, frequent_reuse,
+ deferred_work_generated);
+ }
+ }
+ return edata;
+}
+
+static bool
+sec_expand(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
+ size_t new_size, bool zero, bool *deferred_work_generated) {
+ sec_t *sec = (sec_t *)self;
+ return pai_expand(tsdn, sec->fallback, edata, old_size, new_size, zero,
+ deferred_work_generated);
+}
+
+static bool
+sec_shrink(tsdn_t *tsdn, pai_t *self, edata_t *edata, size_t old_size,
+ size_t new_size, bool *deferred_work_generated) {
+ sec_t *sec = (sec_t *)self;
+ return pai_shrink(tsdn, sec->fallback, edata, old_size, new_size,
+ deferred_work_generated);
+}
+
+static void
+sec_flush_all_locked(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard) {
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+ shard->bytes_cur = 0;
+ edata_list_active_t to_flush;
+ edata_list_active_init(&to_flush);
+ for (pszind_t i = 0; i < sec->npsizes; i++) {
+ sec_bin_t *bin = &shard->bins[i];
+ bin->bytes_cur = 0;
+ edata_list_active_concat(&to_flush, &bin->freelist);
+ }
+
+ /*
+ * Ordinarily we would try to avoid doing the batch deallocation while
+ * holding the shard mutex, but the flush_all pathways only happen when
+ * we're disabling the HPA or resetting the arena, both of which are
+ * rare pathways.
+ */
+ bool deferred_work_generated = false;
+ pai_dalloc_batch(tsdn, sec->fallback, &to_flush,
+ &deferred_work_generated);
+}
+
+static void
+sec_shard_dalloc_and_unlock(tsdn_t *tsdn, sec_t *sec, sec_shard_t *shard,
+ edata_t *edata) {
+ malloc_mutex_assert_owner(tsdn, &shard->mtx);
+ assert(shard->bytes_cur <= sec->opts.max_bytes);
+ size_t size = edata_size_get(edata);
+ pszind_t pszind = sz_psz2ind(size);
+ assert(pszind < sec->npsizes);
+ /*
+ * Prepending here results in LIFO allocation per bin, which seems
+ * reasonable.
+ */
+ sec_bin_t *bin = &shard->bins[pszind];
+ edata_list_active_prepend(&bin->freelist, edata);
+ bin->bytes_cur += size;
+ shard->bytes_cur += size;
+ if (shard->bytes_cur > sec->opts.max_bytes) {
+ /*
+ * We've exceeded the shard limit. We make two nods in the
+ * direction of fragmentation avoidance: we flush everything in
+ * the shard, rather than one particular bin, and we hold the
+ * lock while flushing (in case one of the extents we flush is
+ * highly preferred from a fragmentation-avoidance perspective
+ * in the backing allocator). This has the extra advantage of
+ * not requiring advanced cache balancing strategies.
+ */
+ sec_flush_some_and_unlock(tsdn, sec, shard);
+ malloc_mutex_assert_not_owner(tsdn, &shard->mtx);
+ } else {
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+ }
+}
+
+static void
+sec_dalloc(tsdn_t *tsdn, pai_t *self, edata_t *edata,
+ bool *deferred_work_generated) {
+ sec_t *sec = (sec_t *)self;
+ if (sec->opts.nshards == 0
+ || edata_size_get(edata) > sec->opts.max_alloc) {
+ pai_dalloc(tsdn, sec->fallback, edata,
+ deferred_work_generated);
+ return;
+ }
+ sec_shard_t *shard = sec_shard_pick(tsdn, sec);
+ malloc_mutex_lock(tsdn, &shard->mtx);
+ if (shard->enabled) {
+ sec_shard_dalloc_and_unlock(tsdn, sec, shard, edata);
+ } else {
+ malloc_mutex_unlock(tsdn, &shard->mtx);
+ pai_dalloc(tsdn, sec->fallback, edata,
+ deferred_work_generated);
+ }
+}
+
+void
+sec_flush(tsdn_t *tsdn, sec_t *sec) {
+ for (size_t i = 0; i < sec->opts.nshards; i++) {
+ malloc_mutex_lock(tsdn, &sec->shards[i].mtx);
+ sec_flush_all_locked(tsdn, sec, &sec->shards[i]);
+ malloc_mutex_unlock(tsdn, &sec->shards[i].mtx);
+ }
+}
+
+void
+sec_disable(tsdn_t *tsdn, sec_t *sec) {
+ for (size_t i = 0; i < sec->opts.nshards; i++) {
+ malloc_mutex_lock(tsdn, &sec->shards[i].mtx);
+ sec->shards[i].enabled = false;
+ sec_flush_all_locked(tsdn, sec, &sec->shards[i]);
+ malloc_mutex_unlock(tsdn, &sec->shards[i].mtx);
+ }
+}
+
+void
+sec_stats_merge(tsdn_t *tsdn, sec_t *sec, sec_stats_t *stats) {
+ size_t sum = 0;
+ for (size_t i = 0; i < sec->opts.nshards; i++) {
+ /*
+ * We could save these lock acquisitions by making bytes_cur
+ * atomic, but stats collection is rare anyways and we expect
+ * the number and type of stats to get more interesting.
+ */
+ malloc_mutex_lock(tsdn, &sec->shards[i].mtx);
+ sum += sec->shards[i].bytes_cur;
+ malloc_mutex_unlock(tsdn, &sec->shards[i].mtx);
+ }
+ stats->bytes += sum;
+}
+
+void
+sec_mutex_stats_read(tsdn_t *tsdn, sec_t *sec,
+ mutex_prof_data_t *mutex_prof_data) {
+ for (size_t i = 0; i < sec->opts.nshards; i++) {
+ malloc_mutex_lock(tsdn, &sec->shards[i].mtx);
+ malloc_mutex_prof_accum(tsdn, mutex_prof_data,
+ &sec->shards[i].mtx);
+ malloc_mutex_unlock(tsdn, &sec->shards[i].mtx);
+ }
+}
+
+void
+sec_prefork2(tsdn_t *tsdn, sec_t *sec) {
+ for (size_t i = 0; i < sec->opts.nshards; i++) {
+ malloc_mutex_prefork(tsdn, &sec->shards[i].mtx);
+ }
+}
+
+void
+sec_postfork_parent(tsdn_t *tsdn, sec_t *sec) {
+ for (size_t i = 0; i < sec->opts.nshards; i++) {
+ malloc_mutex_postfork_parent(tsdn, &sec->shards[i].mtx);
+ }
+}
+
+void
+sec_postfork_child(tsdn_t *tsdn, sec_t *sec) {
+ for (size_t i = 0; i < sec->opts.nshards; i++) {
+ malloc_mutex_postfork_child(tsdn, &sec->shards[i].mtx);
+ }
+}
diff --git a/contrib/jemalloc/src/stats.c b/contrib/jemalloc/src/stats.c
index 118e05d2911a..efc70fd3c8b2 100644
--- a/contrib/jemalloc/src/stats.c
+++ b/contrib/jemalloc/src/stats.c
@@ -1,12 +1,13 @@
-#define JEMALLOC_STATS_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/ctl.h"
#include "jemalloc/internal/emitter.h"
+#include "jemalloc/internal/fxp.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mutex_prof.h"
+#include "jemalloc/internal/prof_stats.h"
const char *global_mutex_names[mutex_prof_num_global_mutexes] = {
#define OP(mtx) #mtx,
@@ -25,22 +26,28 @@ const char *arena_mutex_names[mutex_prof_num_arena_mutexes] = {
xmallctl(n, (void *)v, &sz, NULL, 0); \
} while (0)
-#define CTL_M2_GET(n, i, v, t) do { \
- size_t mib[CTL_MAX_DEPTH]; \
- size_t miblen = sizeof(mib) / sizeof(size_t); \
+#define CTL_LEAF_PREPARE(mib, miblen, name) do { \
+ assert(miblen < CTL_MAX_DEPTH); \
+ size_t miblen_new = CTL_MAX_DEPTH; \
+ xmallctlmibnametomib(mib, miblen, name, &miblen_new); \
+ assert(miblen_new > miblen); \
+} while (0)
+
+#define CTL_LEAF(mib, miblen, leaf, v, t) do { \
+ assert(miblen < CTL_MAX_DEPTH); \
+ size_t miblen_new = CTL_MAX_DEPTH; \
size_t sz = sizeof(t); \
- xmallctlnametomib(n, mib, &miblen); \
- mib[2] = (i); \
- xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \
+ xmallctlbymibname(mib, miblen, leaf, &miblen_new, (void *)v, \
+ &sz, NULL, 0); \
+ assert(miblen_new == miblen + 1); \
} while (0)
-#define CTL_M2_M4_GET(n, i, j, v, t) do { \
+#define CTL_M2_GET(n, i, v, t) do { \
size_t mib[CTL_MAX_DEPTH]; \
size_t miblen = sizeof(mib) / sizeof(size_t); \
size_t sz = sizeof(t); \
xmallctlnametomib(n, mib, &miblen); \
mib[2] = (i); \
- mib[4] = (j); \
xmallctlbymib(mib, miblen, (void *)v, &sz, NULL, 0); \
} while (0)
@@ -50,6 +57,13 @@ const char *arena_mutex_names[mutex_prof_num_arena_mutexes] = {
bool opt_stats_print = false;
char opt_stats_print_opts[stats_print_tot_num_options+1] = "";
+int64_t opt_stats_interval = STATS_INTERVAL_DEFAULT;
+char opt_stats_interval_opts[stats_print_tot_num_options+1] = "";
+
+static counter_accum_t stats_interval_accumulated;
+/* Per thread batch accum size for stats_interval. */
+static uint64_t stats_interval_accum_batch;
+
/******************************************************************************/
static uint64_t
@@ -91,13 +105,6 @@ get_rate_str(uint64_t dividend, uint64_t divisor, char str[6]) {
return false;
}
-#define MUTEX_CTL_STR_MAX_LENGTH 128
-static void
-gen_mutex_ctl_str(char *str, size_t buf_len, const char *prefix,
- const char *mutex, const char *counter) {
- malloc_snprintf(str, buf_len, "stats.%s.%s.%s", prefix, mutex, counter);
-}
-
static void
mutex_stats_init_cols(emitter_row_t *row, const char *table_name,
emitter_col_t *name,
@@ -118,7 +125,7 @@ mutex_stats_init_cols(emitter_row_t *row, const char *table_name,
#define WIDTH_uint32_t 12
#define WIDTH_uint64_t 16
-#define OP(counter, counter_type, human, derived, base_counter) \
+#define OP(counter, counter_type, human, derived, base_counter) \
col = &col_##counter_type[k_##counter_type]; \
++k_##counter_type; \
emitter_col_init(col, row); \
@@ -134,27 +141,31 @@ mutex_stats_init_cols(emitter_row_t *row, const char *table_name,
}
static void
-mutex_stats_read_global(const char *name, emitter_col_t *col_name,
+mutex_stats_read_global(size_t mib[], size_t miblen, const char *name,
+ emitter_col_t *col_name,
emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters],
emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters],
uint64_t uptime) {
- char cmd[MUTEX_CTL_STR_MAX_LENGTH];
+ CTL_LEAF_PREPARE(mib, miblen, name);
+ size_t miblen_name = miblen + 1;
col_name->str_val = name;
emitter_col_t *dst;
#define EMITTER_TYPE_uint32_t emitter_type_uint32
#define EMITTER_TYPE_uint64_t emitter_type_uint64
-#define OP(counter, counter_type, human, derived, base_counter) \
+#define OP(counter, counter_type, human, derived, base_counter) \
dst = &col_##counter_type[mutex_counter_##counter]; \
dst->type = EMITTER_TYPE_##counter_type; \
if (!derived) { \
- gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \
- "mutexes", name, #counter); \
- CTL_GET(cmd, (counter_type *)&dst->bool_val, counter_type); \
- } else { \
- emitter_col_t *base = &col_##counter_type[mutex_counter_##base_counter]; \
- dst->counter_type##_val = rate_per_second(base->counter_type##_val, uptime); \
+ CTL_LEAF(mib, miblen_name, #counter, \
+ (counter_type *)&dst->bool_val, counter_type); \
+ } else { \
+ emitter_col_t *base = \
+ &col_##counter_type[mutex_counter_##base_counter]; \
+ dst->counter_type##_val = \
+ (counter_type)rate_per_second( \
+ base->counter_type##_val, uptime); \
}
MUTEX_PROF_COUNTERS
#undef OP
@@ -163,28 +174,31 @@ mutex_stats_read_global(const char *name, emitter_col_t *col_name,
}
static void
-mutex_stats_read_arena(unsigned arena_ind, mutex_prof_arena_ind_t mutex_ind,
- const char *name, emitter_col_t *col_name,
+mutex_stats_read_arena(size_t mib[], size_t miblen, const char *name,
+ emitter_col_t *col_name,
emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters],
emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters],
uint64_t uptime) {
- char cmd[MUTEX_CTL_STR_MAX_LENGTH];
+ CTL_LEAF_PREPARE(mib, miblen, name);
+ size_t miblen_name = miblen + 1;
col_name->str_val = name;
emitter_col_t *dst;
#define EMITTER_TYPE_uint32_t emitter_type_uint32
#define EMITTER_TYPE_uint64_t emitter_type_uint64
-#define OP(counter, counter_type, human, derived, base_counter) \
+#define OP(counter, counter_type, human, derived, base_counter) \
dst = &col_##counter_type[mutex_counter_##counter]; \
dst->type = EMITTER_TYPE_##counter_type; \
- if (!derived) { \
- gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \
- "arenas.0.mutexes", arena_mutex_names[mutex_ind], #counter);\
- CTL_M2_GET(cmd, arena_ind, (counter_type *)&dst->bool_val, counter_type); \
- } else { \
- emitter_col_t *base = &col_##counter_type[mutex_counter_##base_counter]; \
- dst->counter_type##_val = rate_per_second(base->counter_type##_val, uptime); \
+ if (!derived) { \
+ CTL_LEAF(mib, miblen_name, #counter, \
+ (counter_type *)&dst->bool_val, counter_type); \
+ } else { \
+ emitter_col_t *base = \
+ &col_##counter_type[mutex_counter_##base_counter]; \
+ dst->counter_type##_val = \
+ (counter_type)rate_per_second( \
+ base->counter_type##_val, uptime); \
}
MUTEX_PROF_COUNTERS
#undef OP
@@ -193,26 +207,29 @@ mutex_stats_read_arena(unsigned arena_ind, mutex_prof_arena_ind_t mutex_ind,
}
static void
-mutex_stats_read_arena_bin(unsigned arena_ind, unsigned bin_ind,
+mutex_stats_read_arena_bin(size_t mib[], size_t miblen,
emitter_col_t col_uint64_t[mutex_prof_num_uint64_t_counters],
emitter_col_t col_uint32_t[mutex_prof_num_uint32_t_counters],
uint64_t uptime) {
- char cmd[MUTEX_CTL_STR_MAX_LENGTH];
+ CTL_LEAF_PREPARE(mib, miblen, "mutex");
+ size_t miblen_mutex = miblen + 1;
+
emitter_col_t *dst;
#define EMITTER_TYPE_uint32_t emitter_type_uint32
#define EMITTER_TYPE_uint64_t emitter_type_uint64
-#define OP(counter, counter_type, human, derived, base_counter) \
+#define OP(counter, counter_type, human, derived, base_counter) \
dst = &col_##counter_type[mutex_counter_##counter]; \
dst->type = EMITTER_TYPE_##counter_type; \
- if (!derived) { \
- gen_mutex_ctl_str(cmd, MUTEX_CTL_STR_MAX_LENGTH, \
- "arenas.0.bins.0","mutex", #counter); \
- CTL_M2_M4_GET(cmd, arena_ind, bin_ind, \
- (counter_type *)&dst->bool_val, counter_type); \
- } else { \
- emitter_col_t *base = &col_##counter_type[mutex_counter_##base_counter]; \
- dst->counter_type##_val = rate_per_second(base->counter_type##_val, uptime); \
+ if (!derived) { \
+ CTL_LEAF(mib, miblen_mutex, #counter, \
+ (counter_type *)&dst->bool_val, counter_type); \
+ } else { \
+ emitter_col_t *base = \
+ &col_##counter_type[mutex_counter_##base_counter]; \
+ dst->counter_type##_val = \
+ (counter_type)rate_per_second( \
+ base->counter_type##_val, uptime); \
}
MUTEX_PROF_COUNTERS
#undef OP
@@ -249,25 +266,42 @@ mutex_stats_emit(emitter_t *emitter, emitter_row_t *row,
#undef EMITTER_TYPE_uint64_t
}
-#define COL(row_name, column_name, left_or_right, col_width, etype) \
- emitter_col_t col_##column_name; \
- emitter_col_init(&col_##column_name, &row_name); \
- col_##column_name.justify = emitter_justify_##left_or_right; \
- col_##column_name.width = col_width; \
+#define COL_DECLARE(column_name) \
+ emitter_col_t col_##column_name;
+
+#define COL_INIT(row_name, column_name, left_or_right, col_width, etype)\
+ emitter_col_init(&col_##column_name, &row_name); \
+ col_##column_name.justify = emitter_justify_##left_or_right; \
+ col_##column_name.width = col_width; \
col_##column_name.type = emitter_type_##etype;
-#define COL_HDR(row_name, column_name, human, left_or_right, col_width, etype) \
- COL(row_name, column_name, left_or_right, col_width, etype) \
- emitter_col_t header_##column_name; \
- emitter_col_init(&header_##column_name, &header_##row_name); \
- header_##column_name.justify = emitter_justify_##left_or_right; \
- header_##column_name.width = col_width; \
- header_##column_name.type = emitter_type_title; \
+#define COL(row_name, column_name, left_or_right, col_width, etype) \
+ COL_DECLARE(column_name); \
+ COL_INIT(row_name, column_name, left_or_right, col_width, etype)
+
+#define COL_HDR_DECLARE(column_name) \
+ COL_DECLARE(column_name); \
+ emitter_col_t header_##column_name;
+
+#define COL_HDR_INIT(row_name, column_name, human, left_or_right, \
+ col_width, etype) \
+ COL_INIT(row_name, column_name, left_or_right, col_width, etype)\
+ emitter_col_init(&header_##column_name, &header_##row_name); \
+ header_##column_name.justify = emitter_justify_##left_or_right; \
+ header_##column_name.width = col_width; \
+ header_##column_name.type = emitter_type_title; \
header_##column_name.str_val = human ? human : #column_name;
+#define COL_HDR(row_name, column_name, human, left_or_right, col_width, \
+ etype) \
+ COL_HDR_DECLARE(column_name) \
+ COL_HDR_INIT(row_name, column_name, human, left_or_right, \
+ col_width, etype)
+JEMALLOC_COLD
static void
-stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t uptime) {
+stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i,
+ uint64_t uptime) {
size_t page;
bool in_gap, in_gap_prev;
unsigned nbins, j;
@@ -282,6 +316,9 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti
emitter_row_t row;
emitter_row_init(&row);
+ bool prof_stats_on = config_prof && opt_prof && opt_prof_stats
+ && i == MALLCTL_ARENAS_ALL;
+
COL_HDR(row, size, NULL, right, 20, size)
COL_HDR(row, ind, NULL, right, 4, unsigned)
COL_HDR(row, allocated, NULL, right, 13, uint64)
@@ -291,6 +328,16 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti
COL_HDR(row, ndalloc_ps, "(#/sec)", right, 8, uint64)
COL_HDR(row, nrequests, NULL, right, 13, uint64)
COL_HDR(row, nrequests_ps, "(#/sec)", right, 10, uint64)
+ COL_HDR_DECLARE(prof_live_requested);
+ COL_HDR_DECLARE(prof_live_count);
+ COL_HDR_DECLARE(prof_accum_requested);
+ COL_HDR_DECLARE(prof_accum_count);
+ if (prof_stats_on) {
+ COL_HDR_INIT(row, prof_live_requested, NULL, right, 21, uint64)
+ COL_HDR_INIT(row, prof_live_count, NULL, right, 17, uint64)
+ COL_HDR_INIT(row, prof_accum_requested, NULL, right, 21, uint64)
+ COL_HDR_INIT(row, prof_accum_count, NULL, right, 17, uint64)
+ }
COL_HDR(row, nshards, NULL, right, 9, unsigned)
COL_HDR(row, curregs, NULL, right, 13, size)
COL_HDR(row, curslabs, NULL, right, 13, size)
@@ -334,6 +381,19 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti
emitter_table_row(emitter, &header_row);
emitter_json_array_kv_begin(emitter, "bins");
+ size_t stats_arenas_mib[CTL_MAX_DEPTH];
+ CTL_LEAF_PREPARE(stats_arenas_mib, 0, "stats.arenas");
+ stats_arenas_mib[2] = i;
+ CTL_LEAF_PREPARE(stats_arenas_mib, 3, "bins");
+
+ size_t arenas_bin_mib[CTL_MAX_DEPTH];
+ CTL_LEAF_PREPARE(arenas_bin_mib, 0, "arenas.bin");
+
+ size_t prof_stats_mib[CTL_MAX_DEPTH];
+ if (prof_stats_on) {
+ CTL_LEAF_PREPARE(prof_stats_mib, 0, "prof.stats.bins");
+ }
+
for (j = 0, in_gap = false; j < nbins; j++) {
uint64_t nslabs;
size_t reg_size, slab_size, curregs;
@@ -342,44 +402,57 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti
uint32_t nregs, nshards;
uint64_t nmalloc, ndalloc, nrequests, nfills, nflushes;
uint64_t nreslabs;
+ prof_stats_t prof_live;
+ prof_stats_t prof_accum;
+
+ stats_arenas_mib[4] = j;
+ arenas_bin_mib[2] = j;
+
+ CTL_LEAF(stats_arenas_mib, 5, "nslabs", &nslabs, uint64_t);
+
+ if (prof_stats_on) {
+ prof_stats_mib[3] = j;
+ CTL_LEAF(prof_stats_mib, 4, "live", &prof_live,
+ prof_stats_t);
+ CTL_LEAF(prof_stats_mib, 4, "accum", &prof_accum,
+ prof_stats_t);
+ }
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nslabs", i, j, &nslabs,
- uint64_t);
in_gap_prev = in_gap;
- in_gap = (nslabs == 0);
+ if (prof_stats_on) {
+ in_gap = (nslabs == 0 && prof_accum.count == 0);
+ } else {
+ in_gap = (nslabs == 0);
+ }
if (in_gap_prev && !in_gap) {
emitter_table_printf(emitter,
" ---\n");
}
- CTL_M2_GET("arenas.bin.0.size", j, &reg_size, size_t);
- CTL_M2_GET("arenas.bin.0.nregs", j, &nregs, uint32_t);
- CTL_M2_GET("arenas.bin.0.slab_size", j, &slab_size, size_t);
- CTL_M2_GET("arenas.bin.0.nshards", j, &nshards, uint32_t);
+ if (in_gap && !emitter_outputs_json(emitter)) {
+ continue;
+ }
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nmalloc", i, j, &nmalloc,
- uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.ndalloc", i, j, &ndalloc,
- uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.curregs", i, j, &curregs,
- size_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nrequests", i, j,
- &nrequests, uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nfills", i, j, &nfills,
+ CTL_LEAF(arenas_bin_mib, 3, "size", &reg_size, size_t);
+ CTL_LEAF(arenas_bin_mib, 3, "nregs", &nregs, uint32_t);
+ CTL_LEAF(arenas_bin_mib, 3, "slab_size", &slab_size, size_t);
+ CTL_LEAF(arenas_bin_mib, 3, "nshards", &nshards, uint32_t);
+ CTL_LEAF(stats_arenas_mib, 5, "nmalloc", &nmalloc, uint64_t);
+ CTL_LEAF(stats_arenas_mib, 5, "ndalloc", &ndalloc, uint64_t);
+ CTL_LEAF(stats_arenas_mib, 5, "curregs", &curregs, size_t);
+ CTL_LEAF(stats_arenas_mib, 5, "nrequests", &nrequests,
uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nflushes", i, j, &nflushes,
- uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nreslabs", i, j, &nreslabs,
- uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.curslabs", i, j, &curslabs,
- size_t);
- CTL_M2_M4_GET("stats.arenas.0.bins.0.nonfull_slabs", i, j, &nonfull_slabs,
+ CTL_LEAF(stats_arenas_mib, 5, "nfills", &nfills, uint64_t);
+ CTL_LEAF(stats_arenas_mib, 5, "nflushes", &nflushes, uint64_t);
+ CTL_LEAF(stats_arenas_mib, 5, "nreslabs", &nreslabs, uint64_t);
+ CTL_LEAF(stats_arenas_mib, 5, "curslabs", &curslabs, size_t);
+ CTL_LEAF(stats_arenas_mib, 5, "nonfull_slabs", &nonfull_slabs,
size_t);
if (mutex) {
- mutex_stats_read_arena_bin(i, j, col_mutex64,
- col_mutex32, uptime);
+ mutex_stats_read_arena_bin(stats_arenas_mib, 5,
+ col_mutex64, col_mutex32, uptime);
}
emitter_json_object_begin(emitter);
@@ -391,6 +464,16 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti
&curregs);
emitter_json_kv(emitter, "nrequests", emitter_type_uint64,
&nrequests);
+ if (prof_stats_on) {
+ emitter_json_kv(emitter, "prof_live_requested",
+ emitter_type_uint64, &prof_live.req_sum);
+ emitter_json_kv(emitter, "prof_live_count",
+ emitter_type_uint64, &prof_live.count);
+ emitter_json_kv(emitter, "prof_accum_requested",
+ emitter_type_uint64, &prof_accum.req_sum);
+ emitter_json_kv(emitter, "prof_accum_count",
+ emitter_type_uint64, &prof_accum.count);
+ }
emitter_json_kv(emitter, "nfills", emitter_type_uint64,
&nfills);
emitter_json_kv(emitter, "nflushes", emitter_type_uint64,
@@ -437,6 +520,13 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti
col_ndalloc_ps.uint64_val = rate_per_second(ndalloc, uptime);
col_nrequests.uint64_val = nrequests;
col_nrequests_ps.uint64_val = rate_per_second(nrequests, uptime);
+ if (prof_stats_on) {
+ col_prof_live_requested.uint64_val = prof_live.req_sum;
+ col_prof_live_count.uint64_val = prof_live.count;
+ col_prof_accum_requested.uint64_val =
+ prof_accum.req_sum;
+ col_prof_accum_count.uint64_val = prof_accum.count;
+ }
col_nshards.unsigned_val = nshards;
col_curregs.size_val = curregs;
col_curslabs.size_val = curslabs;
@@ -466,6 +556,7 @@ stats_arena_bins_print(emitter_t *emitter, bool mutex, unsigned i, uint64_t upti
}
}
+JEMALLOC_COLD
static void
stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
unsigned nbins, nlextents, j;
@@ -479,6 +570,9 @@ stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
emitter_row_t row;
emitter_row_init(&row);
+ bool prof_stats_on = config_prof && opt_prof && opt_prof_stats
+ && i == MALLCTL_ARENAS_ALL;
+
COL_HDR(row, size, NULL, right, 20, size)
COL_HDR(row, ind, NULL, right, 4, unsigned)
COL_HDR(row, allocated, NULL, right, 13, size)
@@ -488,6 +582,16 @@ stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
COL_HDR(row, ndalloc_ps, "(#/sec)", right, 8, uint64)
COL_HDR(row, nrequests, NULL, right, 13, uint64)
COL_HDR(row, nrequests_ps, "(#/sec)", right, 8, uint64)
+ COL_HDR_DECLARE(prof_live_requested)
+ COL_HDR_DECLARE(prof_live_count)
+ COL_HDR_DECLARE(prof_accum_requested)
+ COL_HDR_DECLARE(prof_accum_count)
+ if (prof_stats_on) {
+ COL_HDR_INIT(row, prof_live_requested, NULL, right, 21, uint64)
+ COL_HDR_INIT(row, prof_live_count, NULL, right, 17, uint64)
+ COL_HDR_INIT(row, prof_accum_requested, NULL, right, 21, uint64)
+ COL_HDR_INIT(row, prof_accum_count, NULL, right, 17, uint64)
+ }
COL_HDR(row, curlextents, NULL, right, 13, size)
/* As with bins, we label the large extents table. */
@@ -496,16 +600,33 @@ stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
emitter_table_row(emitter, &header_row);
emitter_json_array_kv_begin(emitter, "lextents");
+ size_t stats_arenas_mib[CTL_MAX_DEPTH];
+ CTL_LEAF_PREPARE(stats_arenas_mib, 0, "stats.arenas");
+ stats_arenas_mib[2] = i;
+ CTL_LEAF_PREPARE(stats_arenas_mib, 3, "lextents");
+
+ size_t arenas_lextent_mib[CTL_MAX_DEPTH];
+ CTL_LEAF_PREPARE(arenas_lextent_mib, 0, "arenas.lextent");
+
+ size_t prof_stats_mib[CTL_MAX_DEPTH];
+ if (prof_stats_on) {
+ CTL_LEAF_PREPARE(prof_stats_mib, 0, "prof.stats.lextents");
+ }
+
for (j = 0, in_gap = false; j < nlextents; j++) {
uint64_t nmalloc, ndalloc, nrequests;
size_t lextent_size, curlextents;
+ prof_stats_t prof_live;
+ prof_stats_t prof_accum;
+
+ stats_arenas_mib[4] = j;
+ arenas_lextent_mib[2] = j;
+
+ CTL_LEAF(stats_arenas_mib, 5, "nmalloc", &nmalloc, uint64_t);
+ CTL_LEAF(stats_arenas_mib, 5, "ndalloc", &ndalloc, uint64_t);
+ CTL_LEAF(stats_arenas_mib, 5, "nrequests", &nrequests,
+ uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.lextents.0.nmalloc", i, j,
- &nmalloc, uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.lextents.0.ndalloc", i, j,
- &ndalloc, uint64_t);
- CTL_M2_M4_GET("stats.arenas.0.lextents.0.nrequests", i, j,
- &nrequests, uint64_t);
in_gap_prev = in_gap;
in_gap = (nrequests == 0);
@@ -514,11 +635,29 @@ stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
" ---\n");
}
- CTL_M2_GET("arenas.lextent.0.size", j, &lextent_size, size_t);
- CTL_M2_M4_GET("stats.arenas.0.lextents.0.curlextents", i, j,
- &curlextents, size_t);
+ CTL_LEAF(arenas_lextent_mib, 3, "size", &lextent_size, size_t);
+ CTL_LEAF(stats_arenas_mib, 5, "curlextents", &curlextents,
+ size_t);
+
+ if (prof_stats_on) {
+ prof_stats_mib[3] = j;
+ CTL_LEAF(prof_stats_mib, 4, "live", &prof_live,
+ prof_stats_t);
+ CTL_LEAF(prof_stats_mib, 4, "accum", &prof_accum,
+ prof_stats_t);
+ }
emitter_json_object_begin(emitter);
+ if (prof_stats_on) {
+ emitter_json_kv(emitter, "prof_live_requested",
+ emitter_type_uint64, &prof_live.req_sum);
+ emitter_json_kv(emitter, "prof_live_count",
+ emitter_type_uint64, &prof_live.count);
+ emitter_json_kv(emitter, "prof_accum_requested",
+ emitter_type_uint64, &prof_accum.req_sum);
+ emitter_json_kv(emitter, "prof_accum_count",
+ emitter_type_uint64, &prof_accum.count);
+ }
emitter_json_kv(emitter, "curlextents", emitter_type_size,
&curlextents);
emitter_json_object_end(emitter);
@@ -532,6 +671,13 @@ stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
col_ndalloc_ps.uint64_val = rate_per_second(ndalloc, uptime);
col_nrequests.uint64_val = nrequests;
col_nrequests_ps.uint64_val = rate_per_second(nrequests, uptime);
+ if (prof_stats_on) {
+ col_prof_live_requested.uint64_val = prof_live.req_sum;
+ col_prof_live_count.uint64_val = prof_live.count;
+ col_prof_accum_requested.uint64_val =
+ prof_accum.req_sum;
+ col_prof_accum_count.uint64_val = prof_accum.count;
+ }
col_curlextents.size_val = curlextents;
if (!in_gap) {
@@ -544,6 +690,7 @@ stats_arena_lextents_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
}
}
+JEMALLOC_COLD
static void
stats_arena_extents_print(emitter_t *emitter, unsigned i) {
unsigned j;
@@ -570,22 +717,27 @@ stats_arena_extents_print(emitter_t *emitter, unsigned i) {
emitter_table_row(emitter, &header_row);
emitter_json_array_kv_begin(emitter, "extents");
+ size_t stats_arenas_mib[CTL_MAX_DEPTH];
+ CTL_LEAF_PREPARE(stats_arenas_mib, 0, "stats.arenas");
+ stats_arenas_mib[2] = i;
+ CTL_LEAF_PREPARE(stats_arenas_mib, 3, "extents");
+
in_gap = false;
for (j = 0; j < SC_NPSIZES; j++) {
size_t ndirty, nmuzzy, nretained, total, dirty_bytes,
muzzy_bytes, retained_bytes, total_bytes;
- CTL_M2_M4_GET("stats.arenas.0.extents.0.ndirty", i, j,
- &ndirty, size_t);
- CTL_M2_M4_GET("stats.arenas.0.extents.0.nmuzzy", i, j,
- &nmuzzy, size_t);
- CTL_M2_M4_GET("stats.arenas.0.extents.0.nretained", i, j,
- &nretained, size_t);
- CTL_M2_M4_GET("stats.arenas.0.extents.0.dirty_bytes", i, j,
- &dirty_bytes, size_t);
- CTL_M2_M4_GET("stats.arenas.0.extents.0.muzzy_bytes", i, j,
- &muzzy_bytes, size_t);
- CTL_M2_M4_GET("stats.arenas.0.extents.0.retained_bytes", i, j,
+ stats_arenas_mib[4] = j;
+
+ CTL_LEAF(stats_arenas_mib, 5, "ndirty", &ndirty, size_t);
+ CTL_LEAF(stats_arenas_mib, 5, "nmuzzy", &nmuzzy, size_t);
+ CTL_LEAF(stats_arenas_mib, 5, "nretained", &nretained, size_t);
+ CTL_LEAF(stats_arenas_mib, 5, "dirty_bytes", &dirty_bytes,
+ size_t);
+ CTL_LEAF(stats_arenas_mib, 5, "muzzy_bytes", &muzzy_bytes,
+ size_t);
+ CTL_LEAF(stats_arenas_mib, 5, "retained_bytes",
&retained_bytes, size_t);
+
total = ndirty + nmuzzy + nretained;
total_bytes = dirty_bytes + muzzy_bytes + retained_bytes;
@@ -633,6 +785,230 @@ stats_arena_extents_print(emitter_t *emitter, unsigned i) {
}
static void
+stats_arena_hpa_shard_print(emitter_t *emitter, unsigned i, uint64_t uptime) {
+ emitter_row_t header_row;
+ emitter_row_init(&header_row);
+ emitter_row_t row;
+ emitter_row_init(&row);
+
+ uint64_t npurge_passes;
+ uint64_t npurges;
+ uint64_t nhugifies;
+ uint64_t ndehugifies;
+
+ CTL_M2_GET("stats.arenas.0.hpa_shard.npurge_passes",
+ i, &npurge_passes, uint64_t);
+ CTL_M2_GET("stats.arenas.0.hpa_shard.npurges",
+ i, &npurges, uint64_t);
+ CTL_M2_GET("stats.arenas.0.hpa_shard.nhugifies",
+ i, &nhugifies, uint64_t);
+ CTL_M2_GET("stats.arenas.0.hpa_shard.ndehugifies",
+ i, &ndehugifies, uint64_t);
+
+ size_t npageslabs_huge;
+ size_t nactive_huge;
+ size_t ndirty_huge;
+
+ size_t npageslabs_nonhuge;
+ size_t nactive_nonhuge;
+ size_t ndirty_nonhuge;
+ size_t nretained_nonhuge;
+
+ size_t sec_bytes;
+ CTL_M2_GET("stats.arenas.0.hpa_sec_bytes", i, &sec_bytes, size_t);
+ emitter_kv(emitter, "sec_bytes", "Bytes in small extent cache",
+ emitter_type_size, &sec_bytes);
+
+ /* First, global stats. */
+ emitter_table_printf(emitter,
+ "HPA shard stats:\n"
+ " Purge passes: %" FMTu64 " (%" FMTu64 " / sec)\n"
+ " Purges: %" FMTu64 " (%" FMTu64 " / sec)\n"
+ " Hugeifies: %" FMTu64 " (%" FMTu64 " / sec)\n"
+ " Dehugifies: %" FMTu64 " (%" FMTu64 " / sec)\n"
+ "\n",
+ npurge_passes, rate_per_second(npurge_passes, uptime),
+ npurges, rate_per_second(npurges, uptime),
+ nhugifies, rate_per_second(nhugifies, uptime),
+ ndehugifies, rate_per_second(ndehugifies, uptime));
+
+ emitter_json_object_kv_begin(emitter, "hpa_shard");
+ emitter_json_kv(emitter, "npurge_passes", emitter_type_uint64,
+ &npurge_passes);
+ emitter_json_kv(emitter, "npurges", emitter_type_uint64,
+ &npurges);
+ emitter_json_kv(emitter, "nhugifies", emitter_type_uint64,
+ &nhugifies);
+ emitter_json_kv(emitter, "ndehugifies", emitter_type_uint64,
+ &ndehugifies);
+
+ /* Next, full slab stats. */
+ CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.npageslabs_huge",
+ i, &npageslabs_huge, size_t);
+ CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.nactive_huge",
+ i, &nactive_huge, size_t);
+ CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.ndirty_huge",
+ i, &ndirty_huge, size_t);
+
+ CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.npageslabs_nonhuge",
+ i, &npageslabs_nonhuge, size_t);
+ CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.nactive_nonhuge",
+ i, &nactive_nonhuge, size_t);
+ CTL_M2_GET("stats.arenas.0.hpa_shard.full_slabs.ndirty_nonhuge",
+ i, &ndirty_nonhuge, size_t);
+ nretained_nonhuge = npageslabs_nonhuge * HUGEPAGE_PAGES
+ - nactive_nonhuge - ndirty_nonhuge;
+
+ emitter_table_printf(emitter,
+ " In full slabs:\n"
+ " npageslabs: %zu huge, %zu nonhuge\n"
+ " nactive: %zu huge, %zu nonhuge \n"
+ " ndirty: %zu huge, %zu nonhuge \n"
+ " nretained: 0 huge, %zu nonhuge \n",
+ npageslabs_huge, npageslabs_nonhuge,
+ nactive_huge, nactive_nonhuge,
+ ndirty_huge, ndirty_nonhuge,
+ nretained_nonhuge);
+
+ emitter_json_object_kv_begin(emitter, "full_slabs");
+ emitter_json_kv(emitter, "npageslabs_huge", emitter_type_size,
+ &npageslabs_huge);
+ emitter_json_kv(emitter, "nactive_huge", emitter_type_size,
+ &nactive_huge);
+ emitter_json_kv(emitter, "nactive_huge", emitter_type_size,
+ &nactive_huge);
+ emitter_json_kv(emitter, "npageslabs_nonhuge", emitter_type_size,
+ &npageslabs_nonhuge);
+ emitter_json_kv(emitter, "nactive_nonhuge", emitter_type_size,
+ &nactive_nonhuge);
+ emitter_json_kv(emitter, "ndirty_nonhuge", emitter_type_size,
+ &ndirty_nonhuge);
+ emitter_json_object_end(emitter); /* End "full_slabs" */
+
+ /* Next, empty slab stats. */
+ CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.npageslabs_huge",
+ i, &npageslabs_huge, size_t);
+ CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.nactive_huge",
+ i, &nactive_huge, size_t);
+ CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.ndirty_huge",
+ i, &ndirty_huge, size_t);
+
+ CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.npageslabs_nonhuge",
+ i, &npageslabs_nonhuge, size_t);
+ CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.nactive_nonhuge",
+ i, &nactive_nonhuge, size_t);
+ CTL_M2_GET("stats.arenas.0.hpa_shard.empty_slabs.ndirty_nonhuge",
+ i, &ndirty_nonhuge, size_t);
+ nretained_nonhuge = npageslabs_nonhuge * HUGEPAGE_PAGES
+ - nactive_nonhuge - ndirty_nonhuge;
+
+ emitter_table_printf(emitter,
+ " In empty slabs:\n"
+ " npageslabs: %zu huge, %zu nonhuge\n"
+ " nactive: %zu huge, %zu nonhuge \n"
+ " ndirty: %zu huge, %zu nonhuge \n"
+ " nretained: 0 huge, %zu nonhuge \n"
+ "\n",
+ npageslabs_huge, npageslabs_nonhuge,
+ nactive_huge, nactive_nonhuge,
+ ndirty_huge, ndirty_nonhuge,
+ nretained_nonhuge);
+
+ emitter_json_object_kv_begin(emitter, "empty_slabs");
+ emitter_json_kv(emitter, "npageslabs_huge", emitter_type_size,
+ &npageslabs_huge);
+ emitter_json_kv(emitter, "nactive_huge", emitter_type_size,
+ &nactive_huge);
+ emitter_json_kv(emitter, "nactive_huge", emitter_type_size,
+ &nactive_huge);
+ emitter_json_kv(emitter, "npageslabs_nonhuge", emitter_type_size,
+ &npageslabs_nonhuge);
+ emitter_json_kv(emitter, "nactive_nonhuge", emitter_type_size,
+ &nactive_nonhuge);
+ emitter_json_kv(emitter, "ndirty_nonhuge", emitter_type_size,
+ &ndirty_nonhuge);
+ emitter_json_object_end(emitter); /* End "empty_slabs" */
+
+ COL_HDR(row, size, NULL, right, 20, size)
+ COL_HDR(row, ind, NULL, right, 4, unsigned)
+ COL_HDR(row, npageslabs_huge, NULL, right, 16, size)
+ COL_HDR(row, nactive_huge, NULL, right, 16, size)
+ COL_HDR(row, ndirty_huge, NULL, right, 16, size)
+ COL_HDR(row, npageslabs_nonhuge, NULL, right, 20, size)
+ COL_HDR(row, nactive_nonhuge, NULL, right, 20, size)
+ COL_HDR(row, ndirty_nonhuge, NULL, right, 20, size)
+ COL_HDR(row, nretained_nonhuge, NULL, right, 20, size)
+
+ size_t stats_arenas_mib[CTL_MAX_DEPTH];
+ CTL_LEAF_PREPARE(stats_arenas_mib, 0, "stats.arenas");
+ stats_arenas_mib[2] = i;
+ CTL_LEAF_PREPARE(stats_arenas_mib, 3, "hpa_shard.nonfull_slabs");
+
+ emitter_table_row(emitter, &header_row);
+ emitter_json_array_kv_begin(emitter, "nonfull_slabs");
+ bool in_gap = false;
+ for (pszind_t j = 0; j < PSSET_NPSIZES && j < SC_NPSIZES; j++) {
+ stats_arenas_mib[5] = j;
+
+ CTL_LEAF(stats_arenas_mib, 6, "npageslabs_huge",
+ &npageslabs_huge, size_t);
+ CTL_LEAF(stats_arenas_mib, 6, "nactive_huge",
+ &nactive_huge, size_t);
+ CTL_LEAF(stats_arenas_mib, 6, "ndirty_huge",
+ &ndirty_huge, size_t);
+
+ CTL_LEAF(stats_arenas_mib, 6, "npageslabs_nonhuge",
+ &npageslabs_nonhuge, size_t);
+ CTL_LEAF(stats_arenas_mib, 6, "nactive_nonhuge",
+ &nactive_nonhuge, size_t);
+ CTL_LEAF(stats_arenas_mib, 6, "ndirty_nonhuge",
+ &ndirty_nonhuge, size_t);
+ nretained_nonhuge = npageslabs_nonhuge * HUGEPAGE_PAGES
+ - nactive_nonhuge - ndirty_nonhuge;
+
+ bool in_gap_prev = in_gap;
+ in_gap = (npageslabs_huge == 0 && npageslabs_nonhuge == 0);
+ if (in_gap_prev && !in_gap) {
+ emitter_table_printf(emitter,
+ " ---\n");
+ }
+
+ col_size.size_val = sz_pind2sz(j);
+ col_ind.size_val = j;
+ col_npageslabs_huge.size_val = npageslabs_huge;
+ col_nactive_huge.size_val = nactive_huge;
+ col_ndirty_huge.size_val = ndirty_huge;
+ col_npageslabs_nonhuge.size_val = npageslabs_nonhuge;
+ col_nactive_nonhuge.size_val = nactive_nonhuge;
+ col_ndirty_nonhuge.size_val = ndirty_nonhuge;
+ col_nretained_nonhuge.size_val = nretained_nonhuge;
+ if (!in_gap) {
+ emitter_table_row(emitter, &row);
+ }
+
+ emitter_json_object_begin(emitter);
+ emitter_json_kv(emitter, "npageslabs_huge", emitter_type_size,
+ &npageslabs_huge);
+ emitter_json_kv(emitter, "nactive_huge", emitter_type_size,
+ &nactive_huge);
+ emitter_json_kv(emitter, "ndirty_huge", emitter_type_size,
+ &ndirty_huge);
+ emitter_json_kv(emitter, "npageslabs_nonhuge", emitter_type_size,
+ &npageslabs_nonhuge);
+ emitter_json_kv(emitter, "nactive_nonhuge", emitter_type_size,
+ &nactive_nonhuge);
+ emitter_json_kv(emitter, "ndirty_nonhuge", emitter_type_size,
+ &ndirty_nonhuge);
+ emitter_json_object_end(emitter);
+ }
+ emitter_json_array_end(emitter); /* End "nonfull_slabs" */
+ emitter_json_object_end(emitter); /* End "hpa_shard" */
+ if (in_gap) {
+ emitter_table_printf(emitter, " ---\n");
+ }
+}
+
+static void
stats_arena_mutexes_print(emitter_t *emitter, unsigned arena_ind, uint64_t uptime) {
emitter_row_t row;
emitter_col_t col_name;
@@ -645,21 +1021,27 @@ stats_arena_mutexes_print(emitter_t *emitter, unsigned arena_ind, uint64_t uptim
emitter_json_object_kv_begin(emitter, "mutexes");
emitter_table_row(emitter, &row);
+ size_t stats_arenas_mib[CTL_MAX_DEPTH];
+ CTL_LEAF_PREPARE(stats_arenas_mib, 0, "stats.arenas");
+ stats_arenas_mib[2] = arena_ind;
+ CTL_LEAF_PREPARE(stats_arenas_mib, 3, "mutexes");
+
for (mutex_prof_arena_ind_t i = 0; i < mutex_prof_num_arena_mutexes;
i++) {
const char *name = arena_mutex_names[i];
emitter_json_object_kv_begin(emitter, name);
- mutex_stats_read_arena(arena_ind, i, name, &col_name, col64,
- col32, uptime);
+ mutex_stats_read_arena(stats_arenas_mib, 4, name, &col_name,
+ col64, col32, uptime);
mutex_stats_emit(emitter, &row, col64, col32);
emitter_json_object_end(emitter); /* Close the mutex dict. */
}
emitter_json_object_end(emitter); /* End "mutexes". */
}
+JEMALLOC_COLD
static void
stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large,
- bool mutex, bool extents) {
+ bool mutex, bool extents, bool hpa) {
unsigned nthreads;
const char *dss;
ssize_t dirty_decay_ms, muzzy_decay_ms;
@@ -673,7 +1055,7 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large,
size_t large_allocated;
uint64_t large_nmalloc, large_ndalloc, large_nrequests, large_nfills,
large_nflushes;
- size_t tcache_bytes, abandoned_vm;
+ size_t tcache_bytes, tcache_stashed_bytes, abandoned_vm;
uint64_t uptime;
CTL_GET("arenas.page", &page, size_t);
@@ -817,12 +1199,12 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large,
COL(alloc_count_row, count_nmalloc, right, 16, title);
col_count_nmalloc.str_val = "nmalloc";
- COL(alloc_count_row, count_nmalloc_ps, right, 8, title);
+ COL(alloc_count_row, count_nmalloc_ps, right, 10, title);
col_count_nmalloc_ps.str_val = "(#/sec)";
COL(alloc_count_row, count_ndalloc, right, 16, title);
col_count_ndalloc.str_val = "ndalloc";
- COL(alloc_count_row, count_ndalloc_ps, right, 8, title);
+ COL(alloc_count_row, count_ndalloc_ps, right, 10, title);
col_count_ndalloc_ps.str_val = "(#/sec)";
COL(alloc_count_row, count_nrequests, right, 16, title);
@@ -962,6 +1344,7 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large,
GET_AND_EMIT_MEM_STAT(internal)
GET_AND_EMIT_MEM_STAT(metadata_thp)
GET_AND_EMIT_MEM_STAT(tcache_bytes)
+ GET_AND_EMIT_MEM_STAT(tcache_stashed_bytes)
GET_AND_EMIT_MEM_STAT(resident)
GET_AND_EMIT_MEM_STAT(abandoned_vm)
GET_AND_EMIT_MEM_STAT(extent_avail)
@@ -979,8 +1362,12 @@ stats_arena_print(emitter_t *emitter, unsigned i, bool bins, bool large,
if (extents) {
stats_arena_extents_print(emitter, i);
}
+ if (hpa) {
+ stats_arena_hpa_shard_print(emitter, i, uptime);
+ }
}
+JEMALLOC_COLD
static void
stats_general_print(emitter_t *emitter) {
const char *cpv;
@@ -988,14 +1375,18 @@ stats_general_print(emitter_t *emitter) {
unsigned uv;
uint32_t u32v;
uint64_t u64v;
+ int64_t i64v;
ssize_t ssv, ssv2;
- size_t sv, bsz, usz, ssz, sssz, cpsz;
+ size_t sv, bsz, usz, u32sz, u64sz, i64sz, ssz, sssz, cpsz;
bsz = sizeof(bool);
usz = sizeof(unsigned);
ssz = sizeof(size_t);
sssz = sizeof(ssize_t);
cpsz = sizeof(const char *);
+ u32sz = sizeof(uint32_t);
+ i64sz = sizeof(int64_t);
+ u64sz = sizeof(uint64_t);
CTL_GET("version", &cpv, const char *);
emitter_kv(emitter, "version", "Version", emitter_type_string, &cpv);
@@ -1051,6 +1442,11 @@ stats_general_print(emitter_t *emitter) {
#define OPT_WRITE_UNSIGNED(name) \
OPT_WRITE(name, uv, usz, emitter_type_unsigned)
+#define OPT_WRITE_INT64(name) \
+ OPT_WRITE(name, i64v, i64sz, emitter_type_int64)
+#define OPT_WRITE_UINT64(name) \
+ OPT_WRITE(name, u64v, u64sz, emitter_type_uint64)
+
#define OPT_WRITE_SIZE_T(name) \
OPT_WRITE(name, sv, ssz, emitter_type_size)
#define OPT_WRITE_SSIZE_T(name) \
@@ -1066,13 +1462,43 @@ stats_general_print(emitter_t *emitter) {
OPT_WRITE_BOOL("abort")
OPT_WRITE_BOOL("abort_conf")
+ OPT_WRITE_BOOL("cache_oblivious")
OPT_WRITE_BOOL("confirm_conf")
OPT_WRITE_BOOL("retain")
OPT_WRITE_CHAR_P("dss")
OPT_WRITE_UNSIGNED("narenas")
OPT_WRITE_CHAR_P("percpu_arena")
OPT_WRITE_SIZE_T("oversize_threshold")
+ OPT_WRITE_BOOL("hpa")
+ OPT_WRITE_SIZE_T("hpa_slab_max_alloc")
+ OPT_WRITE_SIZE_T("hpa_hugification_threshold")
+ OPT_WRITE_UINT64("hpa_hugify_delay_ms")
+ OPT_WRITE_UINT64("hpa_min_purge_interval_ms")
+ if (je_mallctl("opt.hpa_dirty_mult", (void *)&u32v, &u32sz, NULL, 0)
+ == 0) {
+ /*
+ * We cheat a little and "know" the secret meaning of this
+ * representation.
+ */
+ if (u32v == (uint32_t)-1) {
+ const char *neg1 = "-1";
+ emitter_kv(emitter, "hpa_dirty_mult",
+ "opt.hpa_dirty_mult", emitter_type_string, &neg1);
+ } else {
+ char buf[FXP_BUF_SIZE];
+ fxp_print(u32v, buf);
+ const char *bufp = buf;
+ emitter_kv(emitter, "hpa_dirty_mult",
+ "opt.hpa_dirty_mult", emitter_type_string, &bufp);
+ }
+ }
+ OPT_WRITE_SIZE_T("hpa_sec_nshards")
+ OPT_WRITE_SIZE_T("hpa_sec_max_alloc")
+ OPT_WRITE_SIZE_T("hpa_sec_max_bytes")
+ OPT_WRITE_SIZE_T("hpa_sec_bytes_after_flush")
+ OPT_WRITE_SIZE_T("hpa_sec_batch_fill_extra")
OPT_WRITE_CHAR_P("metadata_thp")
+ OPT_WRITE_INT64("mutex_max_spin")
OPT_WRITE_BOOL_MUTABLE("background_thread", "background_thread")
OPT_WRITE_SSIZE_T_MUTABLE("dirty_decay_ms", "arenas.dirty_decay_ms")
OPT_WRITE_SSIZE_T_MUTABLE("muzzy_decay_ms", "arenas.muzzy_decay_ms")
@@ -1081,8 +1507,17 @@ stats_general_print(emitter_t *emitter) {
OPT_WRITE_BOOL("zero")
OPT_WRITE_BOOL("utrace")
OPT_WRITE_BOOL("xmalloc")
+ OPT_WRITE_BOOL("experimental_infallible_new")
OPT_WRITE_BOOL("tcache")
- OPT_WRITE_SSIZE_T("lg_tcache_max")
+ OPT_WRITE_SIZE_T("tcache_max")
+ OPT_WRITE_UNSIGNED("tcache_nslots_small_min")
+ OPT_WRITE_UNSIGNED("tcache_nslots_small_max")
+ OPT_WRITE_UNSIGNED("tcache_nslots_large")
+ OPT_WRITE_SSIZE_T("lg_tcache_nslots_mul")
+ OPT_WRITE_SIZE_T("tcache_gc_incr_bytes")
+ OPT_WRITE_SIZE_T("tcache_gc_delay_bytes")
+ OPT_WRITE_UNSIGNED("lg_tcache_flush_small_div")
+ OPT_WRITE_UNSIGNED("lg_tcache_flush_large_div")
OPT_WRITE_CHAR_P("thp")
OPT_WRITE_BOOL("prof")
OPT_WRITE_CHAR_P("prof_prefix")
@@ -1095,8 +1530,14 @@ stats_general_print(emitter_t *emitter) {
OPT_WRITE_BOOL("prof_gdump")
OPT_WRITE_BOOL("prof_final")
OPT_WRITE_BOOL("prof_leak")
+ OPT_WRITE_BOOL("prof_leak_error")
OPT_WRITE_BOOL("stats_print")
OPT_WRITE_CHAR_P("stats_print_opts")
+ OPT_WRITE_BOOL("stats_print")
+ OPT_WRITE_CHAR_P("stats_print_opts")
+ OPT_WRITE_INT64("stats_interval")
+ OPT_WRITE_CHAR_P("stats_interval_opts")
+ OPT_WRITE_CHAR_P("zero_realloc")
emitter_dict_end(emitter);
@@ -1167,38 +1608,41 @@ stats_general_print(emitter_t *emitter) {
"Maximum thread-cached size class", emitter_type_size, &sv);
}
- unsigned nbins;
- CTL_GET("arenas.nbins", &nbins, unsigned);
+ unsigned arenas_nbins;
+ CTL_GET("arenas.nbins", &arenas_nbins, unsigned);
emitter_kv(emitter, "nbins", "Number of bin size classes",
- emitter_type_unsigned, &nbins);
+ emitter_type_unsigned, &arenas_nbins);
- unsigned nhbins;
- CTL_GET("arenas.nhbins", &nhbins, unsigned);
+ unsigned arenas_nhbins;
+ CTL_GET("arenas.nhbins", &arenas_nhbins, unsigned);
emitter_kv(emitter, "nhbins", "Number of thread-cache bin size classes",
- emitter_type_unsigned, &nhbins);
+ emitter_type_unsigned, &arenas_nhbins);
/*
* We do enough mallctls in a loop that we actually want to omit them
* (not just omit the printing).
*/
- if (emitter->output == emitter_output_json) {
+ if (emitter_outputs_json(emitter)) {
emitter_json_array_kv_begin(emitter, "bin");
- for (unsigned i = 0; i < nbins; i++) {
+ size_t arenas_bin_mib[CTL_MAX_DEPTH];
+ CTL_LEAF_PREPARE(arenas_bin_mib, 0, "arenas.bin");
+ for (unsigned i = 0; i < arenas_nbins; i++) {
+ arenas_bin_mib[2] = i;
emitter_json_object_begin(emitter);
- CTL_M2_GET("arenas.bin.0.size", i, &sv, size_t);
+ CTL_LEAF(arenas_bin_mib, 3, "size", &sv, size_t);
emitter_json_kv(emitter, "size", emitter_type_size,
&sv);
- CTL_M2_GET("arenas.bin.0.nregs", i, &u32v, uint32_t);
+ CTL_LEAF(arenas_bin_mib, 3, "nregs", &u32v, uint32_t);
emitter_json_kv(emitter, "nregs", emitter_type_uint32,
&u32v);
- CTL_M2_GET("arenas.bin.0.slab_size", i, &sv, size_t);
+ CTL_LEAF(arenas_bin_mib, 3, "slab_size", &sv, size_t);
emitter_json_kv(emitter, "slab_size", emitter_type_size,
&sv);
- CTL_M2_GET("arenas.bin.0.nshards", i, &u32v, uint32_t);
+ CTL_LEAF(arenas_bin_mib, 3, "nshards", &u32v, uint32_t);
emitter_json_kv(emitter, "nshards", emitter_type_uint32,
&u32v);
@@ -1212,12 +1656,15 @@ stats_general_print(emitter_t *emitter) {
emitter_kv(emitter, "nlextents", "Number of large size classes",
emitter_type_unsigned, &nlextents);
- if (emitter->output == emitter_output_json) {
+ if (emitter_outputs_json(emitter)) {
emitter_json_array_kv_begin(emitter, "lextent");
+ size_t arenas_lextent_mib[CTL_MAX_DEPTH];
+ CTL_LEAF_PREPARE(arenas_lextent_mib, 0, "arenas.lextent");
for (unsigned i = 0; i < nlextents; i++) {
+ arenas_lextent_mib[2] = i;
emitter_json_object_begin(emitter);
- CTL_M2_GET("arenas.lextent.0.size", i, &sv, size_t);
+ CTL_LEAF(arenas_lextent_mib, 3, "size", &sv, size_t);
emitter_json_kv(emitter, "size", emitter_type_size,
&sv);
@@ -1229,9 +1676,10 @@ stats_general_print(emitter_t *emitter) {
emitter_json_object_end(emitter); /* Close "arenas" */
}
+JEMALLOC_COLD
static void
stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
- bool unmerged, bool bins, bool large, bool mutex, bool extents) {
+ bool unmerged, bool bins, bool large, bool mutex, bool extents, bool hpa) {
/*
* These should be deleted. We keep them around for a while, to aid in
* the transition to the emitter code.
@@ -1239,6 +1687,7 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
size_t allocated, active, metadata, metadata_thp, resident, mapped,
retained;
size_t num_background_threads;
+ size_t zero_reallocs;
uint64_t background_thread_num_runs, background_thread_run_interval;
CTL_GET("stats.allocated", &allocated, size_t);
@@ -1249,6 +1698,8 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
CTL_GET("stats.mapped", &mapped, size_t);
CTL_GET("stats.retained", &retained, size_t);
+ CTL_GET("stats.zero_reallocs", &zero_reallocs, size_t);
+
if (have_background_thread) {
CTL_GET("stats.background_thread.num_threads",
&num_background_threads, size_t);
@@ -1272,12 +1723,18 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
emitter_json_kv(emitter, "resident", emitter_type_size, &resident);
emitter_json_kv(emitter, "mapped", emitter_type_size, &mapped);
emitter_json_kv(emitter, "retained", emitter_type_size, &retained);
+ emitter_json_kv(emitter, "zero_reallocs", emitter_type_size,
+ &zero_reallocs);
emitter_table_printf(emitter, "Allocated: %zu, active: %zu, "
"metadata: %zu (n_thp %zu), resident: %zu, mapped: %zu, "
"retained: %zu\n", allocated, active, metadata, metadata_thp,
resident, mapped, retained);
+ /* Strange behaviors */
+ emitter_table_printf(emitter,
+ "Count of realloc(non-null-ptr, 0) calls: %zu\n", zero_reallocs);
+
/* Background thread stats. */
emitter_json_object_kv_begin(emitter, "background_thread");
emitter_json_kv(emitter, "num_threads", emitter_type_size,
@@ -1308,9 +1765,11 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
CTL_M2_GET("stats.arenas.0.uptime", 0, &uptime, uint64_t);
+ size_t stats_mutexes_mib[CTL_MAX_DEPTH];
+ CTL_LEAF_PREPARE(stats_mutexes_mib, 0, "stats.mutexes");
for (int i = 0; i < mutex_prof_num_global_mutexes; i++) {
- mutex_stats_read_global(global_mutex_names[i], &name,
- col64, col32, uptime);
+ mutex_stats_read_global(stats_mutexes_mib, 2,
+ global_mutex_names[i], &name, col64, col32, uptime);
emitter_json_object_kv_begin(emitter, global_mutex_names[i]);
mutex_stats_emit(emitter, &row, col64, col32);
emitter_json_object_end(emitter);
@@ -1355,7 +1814,7 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
emitter_table_printf(emitter, "Merged arenas stats:\n");
emitter_json_object_kv_begin(emitter, "merged");
stats_arena_print(emitter, MALLCTL_ARENAS_ALL, bins,
- large, mutex, extents);
+ large, mutex, extents, hpa);
emitter_json_object_end(emitter); /* Close "merged". */
}
@@ -1366,7 +1825,7 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
"Destroyed arenas stats:\n");
emitter_json_object_kv_begin(emitter, "destroyed");
stats_arena_print(emitter, MALLCTL_ARENAS_DESTROYED,
- bins, large, mutex, extents);
+ bins, large, mutex, extents, hpa);
emitter_json_object_end(emitter); /* Close "destroyed". */
}
@@ -1382,7 +1841,7 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
emitter_table_printf(emitter,
"arenas[%s]:\n", arena_ind_str);
stats_arena_print(emitter, i, bins,
- large, mutex, extents);
+ large, mutex, extents, hpa);
/* Close "<arena-ind>". */
emitter_json_object_end(emitter);
}
@@ -1393,8 +1852,7 @@ stats_print_helper(emitter_t *emitter, bool merged, bool destroyed,
}
void
-stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
- const char *opts) {
+stats_print(write_cb_t *write_cb, void *cbopaque, const char *opts) {
int err;
uint64_t epoch;
size_t u64sz;
@@ -1437,8 +1895,8 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
emitter_t emitter;
emitter_init(&emitter,
- json ? emitter_output_json : emitter_output_table, write_cb,
- cbopaque);
+ json ? emitter_output_json_compact : emitter_output_table,
+ write_cb, cbopaque);
emitter_begin(&emitter);
emitter_table_printf(&emitter, "___ Begin jemalloc statistics ___\n");
emitter_json_object_kv_begin(&emitter, "jemalloc");
@@ -1448,10 +1906,68 @@ stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
}
if (config_stats) {
stats_print_helper(&emitter, merged, destroyed, unmerged,
- bins, large, mutex, extents);
+ bins, large, mutex, extents, hpa);
}
emitter_json_object_end(&emitter); /* Closes the "jemalloc" dict. */
emitter_table_printf(&emitter, "--- End jemalloc statistics ---\n");
emitter_end(&emitter);
}
+
+uint64_t
+stats_interval_new_event_wait(tsd_t *tsd) {
+ return stats_interval_accum_batch;
+}
+
+uint64_t
+stats_interval_postponed_event_wait(tsd_t *tsd) {
+ return TE_MIN_START_WAIT;
+}
+
+void
+stats_interval_event_handler(tsd_t *tsd, uint64_t elapsed) {
+ assert(elapsed > 0 && elapsed != TE_INVALID_ELAPSED);
+ if (counter_accum(tsd_tsdn(tsd), &stats_interval_accumulated,
+ elapsed)) {
+ je_malloc_stats_print(NULL, NULL, opt_stats_interval_opts);
+ }
+}
+
+bool
+stats_boot(void) {
+ uint64_t stats_interval;
+ if (opt_stats_interval < 0) {
+ assert(opt_stats_interval == -1);
+ stats_interval = 0;
+ stats_interval_accum_batch = 0;
+ } else{
+ /* See comments in stats.h */
+ stats_interval = (opt_stats_interval > 0) ?
+ opt_stats_interval : 1;
+ uint64_t batch = stats_interval >>
+ STATS_INTERVAL_ACCUM_LG_BATCH_SIZE;
+ if (batch > STATS_INTERVAL_ACCUM_BATCH_MAX) {
+ batch = STATS_INTERVAL_ACCUM_BATCH_MAX;
+ } else if (batch == 0) {
+ batch = 1;
+ }
+ stats_interval_accum_batch = batch;
+ }
+
+ return counter_accum_init(&stats_interval_accumulated, stats_interval);
+}
+
+void
+stats_prefork(tsdn_t *tsdn) {
+ counter_prefork(tsdn, &stats_interval_accumulated);
+}
+
+void
+stats_postfork_parent(tsdn_t *tsdn) {
+ counter_postfork_parent(tsdn, &stats_interval_accumulated);
+}
+
+void
+stats_postfork_child(tsdn_t *tsdn) {
+ counter_postfork_child(tsdn, &stats_interval_accumulated);
+}
diff --git a/contrib/jemalloc/src/sz.c b/contrib/jemalloc/src/sz.c
index 8633fb05005e..d3115dda7c96 100644
--- a/contrib/jemalloc/src/sz.c
+++ b/contrib/jemalloc/src/sz.c
@@ -1,8 +1,57 @@
#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/sz.h"
JEMALLOC_ALIGNED(CACHELINE)
size_t sz_pind2sz_tab[SC_NPSIZES+1];
+size_t sz_large_pad;
+
+size_t
+sz_psz_quantize_floor(size_t size) {
+ size_t ret;
+ pszind_t pind;
+
+ assert(size > 0);
+ assert((size & PAGE_MASK) == 0);
+
+ pind = sz_psz2ind(size - sz_large_pad + 1);
+ if (pind == 0) {
+ /*
+ * Avoid underflow. This short-circuit would also do the right
+ * thing for all sizes in the range for which there are
+ * PAGE-spaced size classes, but it's simplest to just handle
+ * the one case that would cause erroneous results.
+ */
+ return size;
+ }
+ ret = sz_pind2sz(pind - 1) + sz_large_pad;
+ assert(ret <= size);
+ return ret;
+}
+
+size_t
+sz_psz_quantize_ceil(size_t size) {
+ size_t ret;
+
+ assert(size > 0);
+ assert(size - sz_large_pad <= SC_LARGE_MAXCLASS);
+ assert((size & PAGE_MASK) == 0);
+
+ ret = sz_psz_quantize_floor(size);
+ if (ret < size) {
+ /*
+ * Skip a quantization that may have an adequately large extent,
+ * because under-sized extents may be mixed in. This only
+ * happens when an unusual size is requested, i.e. for aligned
+ * allocation, and is just one of several places where linear
+ * search would potentially find sufficiently aligned available
+ * memory somewhere lower.
+ */
+ ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
+ sz_large_pad;
+ }
+ return ret;
+}
static void
sz_boot_pind2sz_tab(const sc_data_t *sc_data) {
@@ -57,7 +106,8 @@ sz_boot_size2index_tab(const sc_data_t *sc_data) {
}
void
-sz_boot(const sc_data_t *sc_data) {
+sz_boot(const sc_data_t *sc_data, bool cache_oblivious) {
+ sz_large_pad = cache_oblivious ? PAGE : 0;
sz_boot_pind2sz_tab(sc_data);
sz_boot_index2size_tab(sc_data);
sz_boot_size2index_tab(sc_data);
diff --git a/contrib/jemalloc/src/tcache.c b/contrib/jemalloc/src/tcache.c
index 50099a9f2cdc..fa16732e4abf 100644
--- a/contrib/jemalloc/src/tcache.c
+++ b/contrib/jemalloc/src/tcache.c
@@ -1,22 +1,71 @@
-#define JEMALLOC_TCACHE_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/safety_check.h"
+#include "jemalloc/internal/san.h"
#include "jemalloc/internal/sc.h"
/******************************************************************************/
/* Data. */
-bool opt_tcache = true;
-ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
+bool opt_tcache = true;
+
+/* tcache_maxclass is set to 32KB by default. */
+size_t opt_tcache_max = ((size_t)1) << 15;
+
+/* Reasonable defaults for min and max values. */
+unsigned opt_tcache_nslots_small_min = 20;
+unsigned opt_tcache_nslots_small_max = 200;
+unsigned opt_tcache_nslots_large = 20;
+
+/*
+ * We attempt to make the number of slots in a tcache bin for a given size class
+ * equal to the number of objects in a slab times some multiplier. By default,
+ * the multiplier is 2 (i.e. we set the maximum number of objects in the tcache
+ * to twice the number of objects in a slab).
+ * This is bounded by some other constraints as well, like the fact that it
+ * must be even, must be less than opt_tcache_nslots_small_max, etc..
+ */
+ssize_t opt_lg_tcache_nslots_mul = 1;
+
+/*
+ * Number of allocation bytes between tcache incremental GCs. Again, this
+ * default just seems to work well; more tuning is possible.
+ */
+size_t opt_tcache_gc_incr_bytes = 65536;
+
+/*
+ * With default settings, we may end up flushing small bins frequently with
+ * small flush amounts. To limit this tendency, we can set a number of bytes to
+ * "delay" by. If we try to flush N M-byte items, we decrease that size-class's
+ * delay by N * M. So, if delay is 1024 and we're looking at the 64-byte size
+ * class, we won't do any flushing until we've been asked to flush 1024/64 == 16
+ * items. This can happen in any configuration (i.e. being asked to flush 16
+ * items once, or 4 items 4 times).
+ *
+ * Practically, this is stored as a count of items in a uint8_t, so the
+ * effective maximum value for a size class is 255 * sz.
+ */
+size_t opt_tcache_gc_delay_bytes = 0;
+
+/*
+ * When a cache bin is flushed because it's full, how much of it do we flush?
+ * By default, we flush half the maximum number of items.
+ */
+unsigned opt_lg_tcache_flush_small_div = 1;
+unsigned opt_lg_tcache_flush_large_div = 1;
cache_bin_info_t *tcache_bin_info;
-static unsigned stack_nelms; /* Total stack elms per tcache. */
+/* Total stack size required (per tcache). Include the padding above. */
+static size_t tcache_bin_alloc_size;
+static size_t tcache_bin_alloc_alignment;
+
+/* Number of cache bins enabled, including both large and small. */
unsigned nhbins;
+/* Max size class to be cached (can be small or large). */
size_t tcache_maxclass;
tcaches_t *tcaches;
@@ -37,358 +86,551 @@ tcache_salloc(tsdn_t *tsdn, const void *ptr) {
return arena_salloc(tsdn, ptr);
}
-void
-tcache_event_hard(tsd_t *tsd, tcache_t *tcache) {
- szind_t binind = tcache->next_gc_bin;
+uint64_t
+tcache_gc_new_event_wait(tsd_t *tsd) {
+ return opt_tcache_gc_incr_bytes;
+}
+
+uint64_t
+tcache_gc_postponed_event_wait(tsd_t *tsd) {
+ return TE_MIN_START_WAIT;
+}
+
+uint64_t
+tcache_gc_dalloc_new_event_wait(tsd_t *tsd) {
+ return opt_tcache_gc_incr_bytes;
+}
- cache_bin_t *tbin;
- if (binind < SC_NBINS) {
- tbin = tcache_small_bin_get(tcache, binind);
+uint64_t
+tcache_gc_dalloc_postponed_event_wait(tsd_t *tsd) {
+ return TE_MIN_START_WAIT;
+}
+
+static uint8_t
+tcache_gc_item_delay_compute(szind_t szind) {
+ assert(szind < SC_NBINS);
+ size_t sz = sz_index2size(szind);
+ size_t item_delay = opt_tcache_gc_delay_bytes / sz;
+ size_t delay_max = ZU(1)
+ << (sizeof(((tcache_slow_t *)NULL)->bin_flush_delay_items[0]) * 8);
+ if (item_delay >= delay_max) {
+ item_delay = delay_max - 1;
+ }
+ return (uint8_t)item_delay;
+}
+
+static void
+tcache_gc_small(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache,
+ szind_t szind) {
+ /* Aim to flush 3/4 of items below low-water. */
+ assert(szind < SC_NBINS);
+
+ cache_bin_t *cache_bin = &tcache->bins[szind];
+ cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin,
+ &tcache_bin_info[szind]);
+ cache_bin_sz_t low_water = cache_bin_low_water_get(cache_bin,
+ &tcache_bin_info[szind]);
+ assert(!tcache_slow->bin_refilled[szind]);
+
+ size_t nflush = low_water - (low_water >> 2);
+ if (nflush < tcache_slow->bin_flush_delay_items[szind]) {
+ /* Workaround for a conversion warning. */
+ uint8_t nflush_uint8 = (uint8_t)nflush;
+ assert(sizeof(tcache_slow->bin_flush_delay_items[0]) ==
+ sizeof(nflush_uint8));
+ tcache_slow->bin_flush_delay_items[szind] -= nflush_uint8;
+ return;
} else {
- tbin = tcache_large_bin_get(tcache, binind);
+ tcache_slow->bin_flush_delay_items[szind]
+ = tcache_gc_item_delay_compute(szind);
}
- if (tbin->low_water > 0) {
- /*
- * Flush (ceiling) 3/4 of the objects below the low water mark.
- */
- if (binind < SC_NBINS) {
- tcache_bin_flush_small(tsd, tcache, tbin, binind,
- tbin->ncached - tbin->low_water + (tbin->low_water
- >> 2));
- /*
- * Reduce fill count by 2X. Limit lg_fill_div such that
- * the fill count is always at least 1.
- */
- cache_bin_info_t *tbin_info = &tcache_bin_info[binind];
- if ((tbin_info->ncached_max >>
- (tcache->lg_fill_div[binind] + 1)) >= 1) {
- tcache->lg_fill_div[binind]++;
- }
+
+ tcache_bin_flush_small(tsd, tcache, cache_bin, szind,
+ (unsigned)(ncached - nflush));
+
+ /*
+ * Reduce fill count by 2X. Limit lg_fill_div such that
+ * the fill count is always at least 1.
+ */
+ if ((cache_bin_info_ncached_max(&tcache_bin_info[szind])
+ >> (tcache_slow->lg_fill_div[szind] + 1)) >= 1) {
+ tcache_slow->lg_fill_div[szind]++;
+ }
+}
+
+static void
+tcache_gc_large(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache,
+ szind_t szind) {
+ /* Like the small GC; flush 3/4 of untouched items. */
+ assert(szind >= SC_NBINS);
+ cache_bin_t *cache_bin = &tcache->bins[szind];
+ cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin,
+ &tcache_bin_info[szind]);
+ cache_bin_sz_t low_water = cache_bin_low_water_get(cache_bin,
+ &tcache_bin_info[szind]);
+ tcache_bin_flush_large(tsd, tcache, cache_bin, szind,
+ (unsigned)(ncached - low_water + (low_water >> 2)));
+}
+
+static void
+tcache_event(tsd_t *tsd) {
+ tcache_t *tcache = tcache_get(tsd);
+ if (tcache == NULL) {
+ return;
+ }
+
+ tcache_slow_t *tcache_slow = tsd_tcache_slowp_get(tsd);
+ szind_t szind = tcache_slow->next_gc_bin;
+ bool is_small = (szind < SC_NBINS);
+ cache_bin_t *cache_bin = &tcache->bins[szind];
+
+ tcache_bin_flush_stashed(tsd, tcache, cache_bin, szind, is_small);
+
+ cache_bin_sz_t low_water = cache_bin_low_water_get(cache_bin,
+ &tcache_bin_info[szind]);
+ if (low_water > 0) {
+ if (is_small) {
+ tcache_gc_small(tsd, tcache_slow, tcache, szind);
} else {
- tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached
- - tbin->low_water + (tbin->low_water >> 2), tcache);
+ tcache_gc_large(tsd, tcache_slow, tcache, szind);
}
- } else if (tbin->low_water < 0) {
+ } else if (is_small && tcache_slow->bin_refilled[szind]) {
+ assert(low_water == 0);
/*
* Increase fill count by 2X for small bins. Make sure
* lg_fill_div stays greater than 0.
*/
- if (binind < SC_NBINS && tcache->lg_fill_div[binind] > 1) {
- tcache->lg_fill_div[binind]--;
+ if (tcache_slow->lg_fill_div[szind] > 1) {
+ tcache_slow->lg_fill_div[szind]--;
}
+ tcache_slow->bin_refilled[szind] = false;
}
- tbin->low_water = tbin->ncached;
+ cache_bin_low_water_set(cache_bin);
- tcache->next_gc_bin++;
- if (tcache->next_gc_bin == nhbins) {
- tcache->next_gc_bin = 0;
+ tcache_slow->next_gc_bin++;
+ if (tcache_slow->next_gc_bin == nhbins) {
+ tcache_slow->next_gc_bin = 0;
}
}
+void
+tcache_gc_event_handler(tsd_t *tsd, uint64_t elapsed) {
+ assert(elapsed == TE_INVALID_ELAPSED);
+ tcache_event(tsd);
+}
+
+void
+tcache_gc_dalloc_event_handler(tsd_t *tsd, uint64_t elapsed) {
+ assert(elapsed == TE_INVALID_ELAPSED);
+ tcache_event(tsd);
+}
+
void *
-tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
- cache_bin_t *tbin, szind_t binind, bool *tcache_success) {
+tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena,
+ tcache_t *tcache, cache_bin_t *cache_bin, szind_t binind,
+ bool *tcache_success) {
+ tcache_slow_t *tcache_slow = tcache->tcache_slow;
void *ret;
- assert(tcache->arena != NULL);
- arena_tcache_fill_small(tsdn, arena, tcache, tbin, binind,
- config_prof ? tcache->prof_accumbytes : 0);
- if (config_prof) {
- tcache->prof_accumbytes = 0;
- }
- ret = cache_bin_alloc_easy(tbin, tcache_success);
+ assert(tcache_slow->arena != NULL);
+ unsigned nfill = cache_bin_info_ncached_max(&tcache_bin_info[binind])
+ >> tcache_slow->lg_fill_div[binind];
+ arena_cache_bin_fill_small(tsdn, arena, cache_bin,
+ &tcache_bin_info[binind], binind, nfill);
+ tcache_slow->bin_refilled[binind] = true;
+ ret = cache_bin_alloc(cache_bin, tcache_success);
return ret;
}
-/* Enabled with --enable-extra-size-check. */
+static const void *
+tcache_bin_flush_ptr_getter(void *arr_ctx, size_t ind) {
+ cache_bin_ptr_array_t *arr = (cache_bin_ptr_array_t *)arr_ctx;
+ return arr->ptr[ind];
+}
+
static void
-tbin_extents_lookup_size_check(tsdn_t *tsdn, cache_bin_t *tbin, szind_t binind,
- size_t nflush, extent_t **extents){
- rtree_ctx_t rtree_ctx_fallback;
- rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
+tcache_bin_flush_metadata_visitor(void *szind_sum_ctx,
+ emap_full_alloc_ctx_t *alloc_ctx) {
+ size_t *szind_sum = (size_t *)szind_sum_ctx;
+ *szind_sum -= alloc_ctx->szind;
+ util_prefetch_write_range(alloc_ctx->edata, sizeof(edata_t));
+}
- /*
- * Verify that the items in the tcache all have the correct size; this
- * is useful for catching sized deallocation bugs, also to fail early
- * instead of corrupting metadata. Since this can be turned on for opt
- * builds, avoid the branch in the loop.
- */
- szind_t szind;
- size_t sz_sum = binind * nflush;
- for (unsigned i = 0 ; i < nflush; i++) {
- rtree_extent_szind_read(tsdn, &extents_rtree,
- rtree_ctx, (uintptr_t)*(tbin->avail - 1 - i), true,
- &extents[i], &szind);
- sz_sum -= szind;
- }
- if (sz_sum != 0) {
- safety_check_fail("<jemalloc>: size mismatch in thread cache "
- "detected, likely caused by sized deallocation bugs by "
- "application. Abort.\n");
- abort();
+JEMALLOC_NOINLINE static void
+tcache_bin_flush_size_check_fail(cache_bin_ptr_array_t *arr, szind_t szind,
+ size_t nptrs, emap_batch_lookup_result_t *edatas) {
+ bool found_mismatch = false;
+ for (size_t i = 0; i < nptrs; i++) {
+ szind_t true_szind = edata_szind_get(edatas[i].edata);
+ if (true_szind != szind) {
+ found_mismatch = true;
+ safety_check_fail_sized_dealloc(
+ /* current_dealloc */ false,
+ /* ptr */ tcache_bin_flush_ptr_getter(arr, i),
+ /* true_size */ sz_index2size(true_szind),
+ /* input_size */ sz_index2size(szind));
+ }
}
+ assert(found_mismatch);
}
-void
-tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
- szind_t binind, unsigned rem) {
- bool merged_stats = false;
-
- assert(binind < SC_NBINS);
- assert((cache_bin_sz_t)rem <= tbin->ncached);
+static void
+tcache_bin_flush_edatas_lookup(tsd_t *tsd, cache_bin_ptr_array_t *arr,
+ szind_t binind, size_t nflush, emap_batch_lookup_result_t *edatas) {
- arena_t *arena = tcache->arena;
- assert(arena != NULL);
- unsigned nflush = tbin->ncached - rem;
- VARIABLE_ARRAY(extent_t *, item_extent, nflush);
+ /*
+ * This gets compiled away when config_opt_safety_checks is false.
+ * Checks for sized deallocation bugs, failing early rather than
+ * corrupting metadata.
+ */
+ size_t szind_sum = binind * nflush;
+ emap_edata_lookup_batch(tsd, &arena_emap_global, nflush,
+ &tcache_bin_flush_ptr_getter, (void *)arr,
+ &tcache_bin_flush_metadata_visitor, (void *)&szind_sum,
+ edatas);
+ if (config_opt_safety_checks && unlikely(szind_sum != 0)) {
+ tcache_bin_flush_size_check_fail(arr, binind, nflush, edatas);
+ }
+}
- /* Look up extent once per item. */
- if (config_opt_safety_checks) {
- tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind,
- nflush, item_extent);
+JEMALLOC_ALWAYS_INLINE bool
+tcache_bin_flush_match(edata_t *edata, unsigned cur_arena_ind,
+ unsigned cur_binshard, bool small) {
+ if (small) {
+ return edata_arena_ind_get(edata) == cur_arena_ind
+ && edata_binshard_get(edata) == cur_binshard;
} else {
- for (unsigned i = 0 ; i < nflush; i++) {
- item_extent[i] = iealloc(tsd_tsdn(tsd),
- *(tbin->avail - 1 - i));
- }
+ return edata_arena_ind_get(edata) == cur_arena_ind;
}
- while (nflush > 0) {
- /* Lock the arena bin associated with the first object. */
- extent_t *extent = item_extent[0];
- unsigned bin_arena_ind = extent_arena_ind_get(extent);
- arena_t *bin_arena = arena_get(tsd_tsdn(tsd), bin_arena_ind,
- false);
- unsigned binshard = extent_binshard_get(extent);
- assert(binshard < bin_infos[binind].n_shards);
- bin_t *bin = &bin_arena->bins[binind].bin_shards[binshard];
-
- if (config_prof && bin_arena == arena) {
- if (arena_prof_accum(tsd_tsdn(tsd), arena,
- tcache->prof_accumbytes)) {
- prof_idump(tsd_tsdn(tsd));
- }
- tcache->prof_accumbytes = 0;
- }
+}
- malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
- if (config_stats && bin_arena == arena && !merged_stats) {
- merged_stats = true;
- bin->stats.nflushes++;
- bin->stats.nrequests += tbin->tstats.nrequests;
- tbin->tstats.nrequests = 0;
- }
- unsigned ndeferred = 0;
- for (unsigned i = 0; i < nflush; i++) {
- void *ptr = *(tbin->avail - 1 - i);
- extent = item_extent[i];
- assert(ptr != NULL && extent != NULL);
-
- if (extent_arena_ind_get(extent) == bin_arena_ind
- && extent_binshard_get(extent) == binshard) {
- arena_dalloc_bin_junked_locked(tsd_tsdn(tsd),
- bin_arena, bin, binind, extent, ptr);
- } else {
- /*
- * This object was allocated via a different
- * arena bin than the one that is currently
- * locked. Stash the object, so that it can be
- * handled in a future pass.
- */
- *(tbin->avail - 1 - ndeferred) = ptr;
- item_extent[ndeferred] = extent;
- ndeferred++;
- }
- }
- malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
- arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred);
- nflush = ndeferred;
- }
- if (config_stats && !merged_stats) {
- /*
- * The flush loop didn't happen to flush to this thread's
- * arena, so the stats didn't get merged. Manually do so now.
- */
- unsigned binshard;
- bin_t *bin = arena_bin_choose_lock(tsd_tsdn(tsd), arena, binind,
- &binshard);
- bin->stats.nflushes++;
- bin->stats.nrequests += tbin->tstats.nrequests;
- tbin->tstats.nrequests = 0;
- malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
- }
+JEMALLOC_ALWAYS_INLINE void
+tcache_bin_flush_impl(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
+ szind_t binind, cache_bin_ptr_array_t *ptrs, unsigned nflush, bool small) {
+ tcache_slow_t *tcache_slow = tcache->tcache_slow;
+ /*
+ * A couple lookup calls take tsdn; declare it once for convenience
+ * instead of calling tsd_tsdn(tsd) all the time.
+ */
+ tsdn_t *tsdn = tsd_tsdn(tsd);
- memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
- sizeof(void *));
- tbin->ncached = rem;
- if (tbin->ncached < tbin->low_water) {
- tbin->low_water = tbin->ncached;
+ if (small) {
+ assert(binind < SC_NBINS);
+ } else {
+ assert(binind < nhbins);
}
-}
+ arena_t *tcache_arena = tcache_slow->arena;
+ assert(tcache_arena != NULL);
-void
-tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind,
- unsigned rem, tcache_t *tcache) {
- bool merged_stats = false;
+ /*
+ * Variable length array must have > 0 length; the last element is never
+ * touched (it's just included to satisfy the no-zero-length rule).
+ */
+ VARIABLE_ARRAY(emap_batch_lookup_result_t, item_edata, nflush + 1);
+ tcache_bin_flush_edatas_lookup(tsd, ptrs, binind, nflush, item_edata);
- assert(binind < nhbins);
- assert((cache_bin_sz_t)rem <= tbin->ncached);
+ /*
+ * The slabs where we freed the last remaining object in the slab (and
+ * so need to free the slab itself).
+ * Used only if small == true.
+ */
+ unsigned dalloc_count = 0;
+ VARIABLE_ARRAY(edata_t *, dalloc_slabs, nflush + 1);
- arena_t *tcache_arena = tcache->arena;
- assert(tcache_arena != NULL);
- unsigned nflush = tbin->ncached - rem;
- VARIABLE_ARRAY(extent_t *, item_extent, nflush);
-
-#ifndef JEMALLOC_EXTRA_SIZE_CHECK
- /* Look up extent once per item. */
- for (unsigned i = 0 ; i < nflush; i++) {
- item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i));
- }
-#else
- tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind, nflush,
- item_extent);
-#endif
+ /*
+ * We're about to grab a bunch of locks. If one of them happens to be
+ * the one guarding the arena-level stats counters we flush our
+ * thread-local ones to, we do so under one critical section.
+ */
+ bool merged_stats = false;
while (nflush > 0) {
- /* Lock the arena associated with the first object. */
- extent_t *extent = item_extent[0];
- unsigned locked_arena_ind = extent_arena_ind_get(extent);
- arena_t *locked_arena = arena_get(tsd_tsdn(tsd),
- locked_arena_ind, false);
- bool idump;
-
- if (config_prof) {
- idump = false;
+ /* Lock the arena, or bin, associated with the first object. */
+ edata_t *edata = item_edata[0].edata;
+ unsigned cur_arena_ind = edata_arena_ind_get(edata);
+ arena_t *cur_arena = arena_get(tsdn, cur_arena_ind, false);
+
+ /*
+ * These assignments are always overwritten when small is true,
+ * and their values are always ignored when small is false, but
+ * to avoid the technical UB when we pass them as parameters, we
+ * need to intialize them.
+ */
+ unsigned cur_binshard = 0;
+ bin_t *cur_bin = NULL;
+ if (small) {
+ cur_binshard = edata_binshard_get(edata);
+ cur_bin = arena_get_bin(cur_arena, binind,
+ cur_binshard);
+ assert(cur_binshard < bin_infos[binind].n_shards);
+ /*
+ * If you're looking at profiles, you might think this
+ * is a good place to prefetch the bin stats, which are
+ * often a cache miss. This turns out not to be
+ * helpful on the workloads we've looked at, with moving
+ * the bin stats next to the lock seeming to do better.
+ */
}
- bool lock_large = !arena_is_auto(locked_arena);
- if (lock_large) {
- malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->large_mtx);
+ if (small) {
+ malloc_mutex_lock(tsdn, &cur_bin->lock);
}
- for (unsigned i = 0; i < nflush; i++) {
- void *ptr = *(tbin->avail - 1 - i);
- assert(ptr != NULL);
- extent = item_extent[i];
- if (extent_arena_ind_get(extent) == locked_arena_ind) {
- large_dalloc_prep_junked_locked(tsd_tsdn(tsd),
- extent);
- }
+ if (!small && !arena_is_auto(cur_arena)) {
+ malloc_mutex_lock(tsdn, &cur_arena->large_mtx);
}
- if ((config_prof || config_stats) &&
- (locked_arena == tcache_arena)) {
- if (config_prof) {
- idump = arena_prof_accum(tsd_tsdn(tsd),
- tcache_arena, tcache->prof_accumbytes);
- tcache->prof_accumbytes = 0;
+
+ /*
+ * If we acquired the right lock and have some stats to flush,
+ * flush them.
+ */
+ if (config_stats && tcache_arena == cur_arena
+ && !merged_stats) {
+ merged_stats = true;
+ if (small) {
+ cur_bin->stats.nflushes++;
+ cur_bin->stats.nrequests +=
+ cache_bin->tstats.nrequests;
+ cache_bin->tstats.nrequests = 0;
+ } else {
+ arena_stats_large_flush_nrequests_add(tsdn,
+ &tcache_arena->stats, binind,
+ cache_bin->tstats.nrequests);
+ cache_bin->tstats.nrequests = 0;
}
- if (config_stats) {
- merged_stats = true;
- arena_stats_large_flush_nrequests_add(
- tsd_tsdn(tsd), &tcache_arena->stats, binind,
- tbin->tstats.nrequests);
- tbin->tstats.nrequests = 0;
+ }
+
+ /*
+ * Large allocations need special prep done. Afterwards, we can
+ * drop the large lock.
+ */
+ if (!small) {
+ for (unsigned i = 0; i < nflush; i++) {
+ void *ptr = ptrs->ptr[i];
+ edata = item_edata[i].edata;
+ assert(ptr != NULL && edata != NULL);
+
+ if (tcache_bin_flush_match(edata, cur_arena_ind,
+ cur_binshard, small)) {
+ large_dalloc_prep_locked(tsdn,
+ edata);
+ }
}
}
- if (lock_large) {
- malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->large_mtx);
+ if (!small && !arena_is_auto(cur_arena)) {
+ malloc_mutex_unlock(tsdn, &cur_arena->large_mtx);
}
+ /* Deallocate whatever we can. */
unsigned ndeferred = 0;
+ /* Init only to avoid used-uninitialized warning. */
+ arena_dalloc_bin_locked_info_t dalloc_bin_info = {0};
+ if (small) {
+ arena_dalloc_bin_locked_begin(&dalloc_bin_info, binind);
+ }
for (unsigned i = 0; i < nflush; i++) {
- void *ptr = *(tbin->avail - 1 - i);
- extent = item_extent[i];
- assert(ptr != NULL && extent != NULL);
-
- if (extent_arena_ind_get(extent) == locked_arena_ind) {
- large_dalloc_finish(tsd_tsdn(tsd), extent);
- } else {
+ void *ptr = ptrs->ptr[i];
+ edata = item_edata[i].edata;
+ assert(ptr != NULL && edata != NULL);
+ if (!tcache_bin_flush_match(edata, cur_arena_ind,
+ cur_binshard, small)) {
/*
- * This object was allocated via a different
- * arena than the one that is currently locked.
- * Stash the object, so that it can be handled
- * in a future pass.
+ * The object was allocated either via a
+ * different arena, or a different bin in this
+ * arena. Either way, stash the object so that
+ * it can be handled in a future pass.
*/
- *(tbin->avail - 1 - ndeferred) = ptr;
- item_extent[ndeferred] = extent;
+ ptrs->ptr[ndeferred] = ptr;
+ item_edata[ndeferred].edata = edata;
ndeferred++;
+ continue;
+ }
+ if (small) {
+ if (arena_dalloc_bin_locked_step(tsdn,
+ cur_arena, cur_bin, &dalloc_bin_info,
+ binind, edata, ptr)) {
+ dalloc_slabs[dalloc_count] = edata;
+ dalloc_count++;
+ }
+ } else {
+ if (large_dalloc_safety_checks(edata, ptr,
+ binind)) {
+ /* See the comment in isfree. */
+ continue;
+ }
+ large_dalloc_finish(tsdn, edata);
}
}
- if (config_prof && idump) {
- prof_idump(tsd_tsdn(tsd));
+
+ if (small) {
+ arena_dalloc_bin_locked_finish(tsdn, cur_arena, cur_bin,
+ &dalloc_bin_info);
+ malloc_mutex_unlock(tsdn, &cur_bin->lock);
}
- arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush -
- ndeferred);
+ arena_decay_ticks(tsdn, cur_arena, nflush - ndeferred);
nflush = ndeferred;
}
+
+ /* Handle all deferred slab dalloc. */
+ assert(small || dalloc_count == 0);
+ for (unsigned i = 0; i < dalloc_count; i++) {
+ edata_t *slab = dalloc_slabs[i];
+ arena_slab_dalloc(tsdn, arena_get_from_edata(slab), slab);
+
+ }
+
if (config_stats && !merged_stats) {
- /*
- * The flush loop didn't happen to flush to this thread's
- * arena, so the stats didn't get merged. Manually do so now.
- */
- arena_stats_large_flush_nrequests_add(tsd_tsdn(tsd),
- &tcache_arena->stats, binind, tbin->tstats.nrequests);
- tbin->tstats.nrequests = 0;
+ if (small) {
+ /*
+ * The flush loop didn't happen to flush to this
+ * thread's arena, so the stats didn't get merged.
+ * Manually do so now.
+ */
+ bin_t *bin = arena_bin_choose(tsdn, tcache_arena,
+ binind, NULL);
+ malloc_mutex_lock(tsdn, &bin->lock);
+ bin->stats.nflushes++;
+ bin->stats.nrequests += cache_bin->tstats.nrequests;
+ cache_bin->tstats.nrequests = 0;
+ malloc_mutex_unlock(tsdn, &bin->lock);
+ } else {
+ arena_stats_large_flush_nrequests_add(tsdn,
+ &tcache_arena->stats, binind,
+ cache_bin->tstats.nrequests);
+ cache_bin->tstats.nrequests = 0;
+ }
}
- memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
- sizeof(void *));
- tbin->ncached = rem;
- if (tbin->ncached < tbin->low_water) {
- tbin->low_water = tbin->ncached;
+}
+
+JEMALLOC_ALWAYS_INLINE void
+tcache_bin_flush_bottom(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
+ szind_t binind, unsigned rem, bool small) {
+ tcache_bin_flush_stashed(tsd, tcache, cache_bin, binind, small);
+
+ cache_bin_sz_t ncached = cache_bin_ncached_get_local(cache_bin,
+ &tcache_bin_info[binind]);
+ assert((cache_bin_sz_t)rem <= ncached);
+ unsigned nflush = ncached - rem;
+
+ CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nflush);
+ cache_bin_init_ptr_array_for_flush(cache_bin, &tcache_bin_info[binind],
+ &ptrs, nflush);
+
+ tcache_bin_flush_impl(tsd, tcache, cache_bin, binind, &ptrs, nflush,
+ small);
+
+ cache_bin_finish_flush(cache_bin, &tcache_bin_info[binind], &ptrs,
+ ncached - rem);
+}
+
+void
+tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
+ szind_t binind, unsigned rem) {
+ tcache_bin_flush_bottom(tsd, tcache, cache_bin, binind, rem, true);
+}
+
+void
+tcache_bin_flush_large(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
+ szind_t binind, unsigned rem) {
+ tcache_bin_flush_bottom(tsd, tcache, cache_bin, binind, rem, false);
+}
+
+/*
+ * Flushing stashed happens when 1) tcache fill, 2) tcache flush, or 3) tcache
+ * GC event. This makes sure that the stashed items do not hold memory for too
+ * long, and new buffers can only be allocated when nothing is stashed.
+ *
+ * The downside is, the time between stash and flush may be relatively short,
+ * especially when the request rate is high. It lowers the chance of detecting
+ * write-after-free -- however that is a delayed detection anyway, and is less
+ * of a focus than the memory overhead.
+ */
+void
+tcache_bin_flush_stashed(tsd_t *tsd, tcache_t *tcache, cache_bin_t *cache_bin,
+ szind_t binind, bool is_small) {
+ cache_bin_info_t *info = &tcache_bin_info[binind];
+ /*
+ * The two below are for assertion only. The content of original cached
+ * items remain unchanged -- the stashed items reside on the other end
+ * of the stack. Checking the stack head and ncached to verify.
+ */
+ void *head_content = *cache_bin->stack_head;
+ cache_bin_sz_t orig_cached = cache_bin_ncached_get_local(cache_bin,
+ info);
+
+ cache_bin_sz_t nstashed = cache_bin_nstashed_get_local(cache_bin, info);
+ assert(orig_cached + nstashed <= cache_bin_info_ncached_max(info));
+ if (nstashed == 0) {
+ return;
}
+
+ CACHE_BIN_PTR_ARRAY_DECLARE(ptrs, nstashed);
+ cache_bin_init_ptr_array_for_stashed(cache_bin, binind, info, &ptrs,
+ nstashed);
+ san_check_stashed_ptrs(ptrs.ptr, nstashed, sz_index2size(binind));
+ tcache_bin_flush_impl(tsd, tcache, cache_bin, binind, &ptrs, nstashed,
+ is_small);
+ cache_bin_finish_flush_stashed(cache_bin, info);
+
+ assert(cache_bin_nstashed_get_local(cache_bin, info) == 0);
+ assert(cache_bin_ncached_get_local(cache_bin, info) == orig_cached);
+ assert(head_content == *cache_bin->stack_head);
}
void
-tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
- assert(tcache->arena == NULL);
- tcache->arena = arena;
+tcache_arena_associate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
+ tcache_t *tcache, arena_t *arena) {
+ assert(tcache_slow->arena == NULL);
+ tcache_slow->arena = arena;
if (config_stats) {
/* Link into list of extant tcaches. */
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
- ql_elm_new(tcache, link);
- ql_tail_insert(&arena->tcache_ql, tcache, link);
+ ql_elm_new(tcache_slow, link);
+ ql_tail_insert(&arena->tcache_ql, tcache_slow, link);
cache_bin_array_descriptor_init(
- &tcache->cache_bin_array_descriptor, tcache->bins_small,
- tcache->bins_large);
+ &tcache_slow->cache_bin_array_descriptor, tcache->bins);
ql_tail_insert(&arena->cache_bin_array_descriptor_ql,
- &tcache->cache_bin_array_descriptor, link);
+ &tcache_slow->cache_bin_array_descriptor, link);
malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
}
}
static void
-tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache) {
- arena_t *arena = tcache->arena;
+tcache_arena_dissociate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
+ tcache_t *tcache) {
+ arena_t *arena = tcache_slow->arena;
assert(arena != NULL);
if (config_stats) {
/* Unlink from list of extant tcaches. */
malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
if (config_debug) {
bool in_ql = false;
- tcache_t *iter;
+ tcache_slow_t *iter;
ql_foreach(iter, &arena->tcache_ql, link) {
- if (iter == tcache) {
+ if (iter == tcache_slow) {
in_ql = true;
break;
}
}
assert(in_ql);
}
- ql_remove(&arena->tcache_ql, tcache, link);
+ ql_remove(&arena->tcache_ql, tcache_slow, link);
ql_remove(&arena->cache_bin_array_descriptor_ql,
- &tcache->cache_bin_array_descriptor, link);
- tcache_stats_merge(tsdn, tcache, arena);
+ &tcache_slow->cache_bin_array_descriptor, link);
+ tcache_stats_merge(tsdn, tcache_slow->tcache, arena);
malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
}
- tcache->arena = NULL;
+ tcache_slow->arena = NULL;
}
void
-tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
- tcache_arena_dissociate(tsdn, tcache);
- tcache_arena_associate(tsdn, tcache, arena);
+tcache_arena_reassociate(tsdn_t *tsdn, tcache_slow_t *tcache_slow,
+ tcache_t *tcache, arena_t *arena) {
+ tcache_arena_dissociate(tsdn, tcache_slow, tcache);
+ tcache_arena_associate(tsdn, tcache_slow, tcache, arena);
}
bool
@@ -405,56 +647,80 @@ tsd_tcache_enabled_data_init(tsd_t *tsd) {
return false;
}
-/* Initialize auto tcache (embedded in TSD). */
static void
-tcache_init(tsd_t *tsd, tcache_t *tcache, void *avail_stack) {
- memset(&tcache->link, 0, sizeof(ql_elm(tcache_t)));
- tcache->prof_accumbytes = 0;
- tcache->next_gc_bin = 0;
- tcache->arena = NULL;
-
- ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR);
-
- size_t stack_offset = 0;
- assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
- memset(tcache->bins_small, 0, sizeof(cache_bin_t) * SC_NBINS);
- memset(tcache->bins_large, 0, sizeof(cache_bin_t) * (nhbins - SC_NBINS));
- unsigned i = 0;
- for (; i < SC_NBINS; i++) {
- tcache->lg_fill_div[i] = 1;
- stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
- /*
- * avail points past the available space. Allocations will
- * access the slots toward higher addresses (for the benefit of
- * prefetch).
- */
- tcache_small_bin_get(tcache, i)->avail =
- (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset);
+tcache_init(tsd_t *tsd, tcache_slow_t *tcache_slow, tcache_t *tcache,
+ void *mem) {
+ tcache->tcache_slow = tcache_slow;
+ tcache_slow->tcache = tcache;
+
+ memset(&tcache_slow->link, 0, sizeof(ql_elm(tcache_t)));
+ tcache_slow->next_gc_bin = 0;
+ tcache_slow->arena = NULL;
+ tcache_slow->dyn_alloc = mem;
+
+ /*
+ * We reserve cache bins for all small size classes, even if some may
+ * not get used (i.e. bins higher than nhbins). This allows the fast
+ * and common paths to access cache bin metadata safely w/o worrying
+ * about which ones are disabled.
+ */
+ unsigned n_reserved_bins = nhbins < SC_NBINS ? SC_NBINS : nhbins;
+ memset(tcache->bins, 0, sizeof(cache_bin_t) * n_reserved_bins);
+
+ size_t cur_offset = 0;
+ cache_bin_preincrement(tcache_bin_info, nhbins, mem,
+ &cur_offset);
+ for (unsigned i = 0; i < nhbins; i++) {
+ if (i < SC_NBINS) {
+ tcache_slow->lg_fill_div[i] = 1;
+ tcache_slow->bin_refilled[i] = false;
+ tcache_slow->bin_flush_delay_items[i]
+ = tcache_gc_item_delay_compute(i);
+ }
+ cache_bin_t *cache_bin = &tcache->bins[i];
+ cache_bin_init(cache_bin, &tcache_bin_info[i], mem,
+ &cur_offset);
}
- for (; i < nhbins; i++) {
- stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
- tcache_large_bin_get(tcache, i)->avail =
- (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset);
+ /*
+ * For small size classes beyond tcache_maxclass (i.e. nhbins < NBINS),
+ * their cache bins are initialized to a state to safely and efficiently
+ * fail all fastpath alloc / free, so that no additional check around
+ * nhbins is needed on fastpath.
+ */
+ for (unsigned i = nhbins; i < SC_NBINS; i++) {
+ /* Disabled small bins. */
+ cache_bin_t *cache_bin = &tcache->bins[i];
+ void *fake_stack = mem;
+ size_t fake_offset = 0;
+
+ cache_bin_init(cache_bin, &tcache_bin_info[i], fake_stack,
+ &fake_offset);
+ assert(tcache_small_bin_disabled(i, cache_bin));
}
- assert(stack_offset == stack_nelms * sizeof(void *));
+
+ cache_bin_postincrement(tcache_bin_info, nhbins, mem,
+ &cur_offset);
+ /* Sanity check that the whole stack is used. */
+ assert(cur_offset == tcache_bin_alloc_size);
}
/* Initialize auto tcache (embedded in TSD). */
bool
tsd_tcache_data_init(tsd_t *tsd) {
+ tcache_slow_t *tcache_slow = tsd_tcache_slowp_get_unsafe(tsd);
tcache_t *tcache = tsd_tcachep_get_unsafe(tsd);
- assert(tcache_small_bin_get(tcache, 0)->avail == NULL);
- size_t size = stack_nelms * sizeof(void *);
- /* Avoid false cacheline sharing. */
- size = sz_sa2u(size, CACHELINE);
-
- void *avail_array = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true,
- NULL, true, arena_get(TSDN_NULL, 0, true));
- if (avail_array == NULL) {
+
+ assert(cache_bin_still_zero_initialized(&tcache->bins[0]));
+ size_t alignment = tcache_bin_alloc_alignment;
+ size_t size = sz_sa2u(tcache_bin_alloc_size, alignment);
+
+ void *mem = ipallocztm(tsd_tsdn(tsd), size, alignment, true, NULL,
+ true, arena_get(TSDN_NULL, 0, true));
+ if (mem == NULL) {
return true;
}
- tcache_init(tsd, tcache, avail_array);
+ tcache_init(tsd, tcache_slow, tcache, mem);
/*
* Initialization is a bit tricky here. After malloc init is done, all
* threads can rely on arena_choose and associate tcache accordingly.
@@ -463,20 +729,22 @@ tsd_tcache_data_init(tsd_t *tsd) {
* associate its tcache to a0 temporarily, and later on
* arena_choose_hard() will re-associate properly.
*/
- tcache->arena = NULL;
+ tcache_slow->arena = NULL;
arena_t *arena;
if (!malloc_initialized()) {
/* If in initialization, assign to a0. */
arena = arena_get(tsd_tsdn(tsd), 0, false);
- tcache_arena_associate(tsd_tsdn(tsd), tcache, arena);
+ tcache_arena_associate(tsd_tsdn(tsd), tcache_slow, tcache,
+ arena);
} else {
arena = arena_choose(tsd, NULL);
/* This may happen if thread.tcache.enabled is used. */
- if (tcache->arena == NULL) {
- tcache_arena_associate(tsd_tsdn(tsd), tcache, arena);
+ if (tcache_slow->arena == NULL) {
+ tcache_arena_associate(tsd_tsdn(tsd), tcache_slow,
+ tcache, arena);
}
}
- assert(arena == tcache->arena);
+ assert(arena == tcache_slow->arena);
return false;
}
@@ -484,56 +752,49 @@ tsd_tcache_data_init(tsd_t *tsd) {
/* Created manual tcache for tcache.create mallctl. */
tcache_t *
tcache_create_explicit(tsd_t *tsd) {
- tcache_t *tcache;
- size_t size, stack_offset;
-
- size = sizeof(tcache_t);
+ /*
+ * We place the cache bin stacks, then the tcache_t, then a pointer to
+ * the beginning of the whole allocation (for freeing). The makes sure
+ * the cache bins have the requested alignment.
+ */
+ size_t size = tcache_bin_alloc_size + sizeof(tcache_t)
+ + sizeof(tcache_slow_t);
/* Naturally align the pointer stacks. */
size = PTR_CEILING(size);
- stack_offset = size;
- size += stack_nelms * sizeof(void *);
- /* Avoid false cacheline sharing. */
- size = sz_sa2u(size, CACHELINE);
+ size = sz_sa2u(size, tcache_bin_alloc_alignment);
- tcache = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, NULL, true,
- arena_get(TSDN_NULL, 0, true));
- if (tcache == NULL) {
+ void *mem = ipallocztm(tsd_tsdn(tsd), size, tcache_bin_alloc_alignment,
+ true, NULL, true, arena_get(TSDN_NULL, 0, true));
+ if (mem == NULL) {
return NULL;
}
+ tcache_t *tcache = (void *)((uintptr_t)mem + tcache_bin_alloc_size);
+ tcache_slow_t *tcache_slow =
+ (void *)((uintptr_t)mem + tcache_bin_alloc_size + sizeof(tcache_t));
+ tcache_init(tsd, tcache_slow, tcache, mem);
- tcache_init(tsd, tcache,
- (void *)((uintptr_t)tcache + (uintptr_t)stack_offset));
- tcache_arena_associate(tsd_tsdn(tsd), tcache, arena_ichoose(tsd, NULL));
+ tcache_arena_associate(tsd_tsdn(tsd), tcache_slow, tcache,
+ arena_ichoose(tsd, NULL));
return tcache;
}
static void
tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) {
- assert(tcache->arena != NULL);
-
- for (unsigned i = 0; i < SC_NBINS; i++) {
- cache_bin_t *tbin = tcache_small_bin_get(tcache, i);
- tcache_bin_flush_small(tsd, tcache, tbin, i, 0);
+ tcache_slow_t *tcache_slow = tcache->tcache_slow;
+ assert(tcache_slow->arena != NULL);
- if (config_stats) {
- assert(tbin->tstats.nrequests == 0);
+ for (unsigned i = 0; i < nhbins; i++) {
+ cache_bin_t *cache_bin = &tcache->bins[i];
+ if (i < SC_NBINS) {
+ tcache_bin_flush_small(tsd, tcache, cache_bin, i, 0);
+ } else {
+ tcache_bin_flush_large(tsd, tcache, cache_bin, i, 0);
}
- }
- for (unsigned i = SC_NBINS; i < nhbins; i++) {
- cache_bin_t *tbin = tcache_large_bin_get(tcache, i);
- tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
-
if (config_stats) {
- assert(tbin->tstats.nrequests == 0);
+ assert(cache_bin->tstats.nrequests == 0);
}
}
-
- if (config_prof && tcache->prof_accumbytes > 0 &&
- arena_prof_accum(tsd_tsdn(tsd), tcache->arena,
- tcache->prof_accumbytes)) {
- prof_idump(tsd_tsdn(tsd));
- }
}
void
@@ -544,20 +805,17 @@ tcache_flush(tsd_t *tsd) {
static void
tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) {
+ tcache_slow_t *tcache_slow = tcache->tcache_slow;
tcache_flush_cache(tsd, tcache);
- arena_t *arena = tcache->arena;
- tcache_arena_dissociate(tsd_tsdn(tsd), tcache);
+ arena_t *arena = tcache_slow->arena;
+ tcache_arena_dissociate(tsd_tsdn(tsd), tcache_slow, tcache);
if (tsd_tcache) {
- /* Release the avail array for the TSD embedded auto tcache. */
- void *avail_array =
- (void *)((uintptr_t)tcache_small_bin_get(tcache, 0)->avail -
- (uintptr_t)tcache_bin_info[0].ncached_max * sizeof(void *));
- idalloctm(tsd_tsdn(tsd), avail_array, NULL, NULL, true, true);
- } else {
- /* Release both the tcache struct and avail array. */
- idalloctm(tsd_tsdn(tsd), tcache, NULL, NULL, true, true);
+ cache_bin_t *cache_bin = &tcache->bins[0];
+ cache_bin_assert_empty(cache_bin, &tcache_bin_info[0]);
}
+ idalloctm(tsd_tsdn(tsd), tcache_slow->dyn_alloc, NULL, NULL, true,
+ true);
/*
* The deallocation and tcache flush above may not trigger decay since
@@ -571,9 +829,11 @@ tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) {
if (arena_nthreads_get(arena, false) == 0 &&
!background_thread_enabled()) {
/* Force purging when no threads assigned to the arena anymore. */
- arena_decay(tsd_tsdn(tsd), arena, false, true);
+ arena_decay(tsd_tsdn(tsd), arena,
+ /* is_background_thread */ false, /* all */ true);
} else {
- arena_decay(tsd_tsdn(tsd), arena, false, false);
+ arena_decay(tsd_tsdn(tsd), arena,
+ /* is_background_thread */ false, /* all */ false);
}
}
@@ -583,53 +843,51 @@ tcache_cleanup(tsd_t *tsd) {
tcache_t *tcache = tsd_tcachep_get(tsd);
if (!tcache_available(tsd)) {
assert(tsd_tcache_enabled_get(tsd) == false);
- if (config_debug) {
- assert(tcache_small_bin_get(tcache, 0)->avail == NULL);
- }
+ assert(cache_bin_still_zero_initialized(&tcache->bins[0]));
return;
}
assert(tsd_tcache_enabled_get(tsd));
- assert(tcache_small_bin_get(tcache, 0)->avail != NULL);
+ assert(!cache_bin_still_zero_initialized(&tcache->bins[0]));
tcache_destroy(tsd, tcache, true);
if (config_debug) {
- tcache_small_bin_get(tcache, 0)->avail = NULL;
+ /*
+ * For debug testing only, we want to pretend we're still in the
+ * zero-initialized state.
+ */
+ memset(tcache->bins, 0, sizeof(cache_bin_t) * nhbins);
}
}
void
tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
- unsigned i;
-
cassert(config_stats);
/* Merge and reset tcache stats. */
- for (i = 0; i < SC_NBINS; i++) {
- cache_bin_t *tbin = tcache_small_bin_get(tcache, i);
- unsigned binshard;
- bin_t *bin = arena_bin_choose_lock(tsdn, arena, i, &binshard);
- bin->stats.nrequests += tbin->tstats.nrequests;
- malloc_mutex_unlock(tsdn, &bin->lock);
- tbin->tstats.nrequests = 0;
- }
-
- for (; i < nhbins; i++) {
- cache_bin_t *tbin = tcache_large_bin_get(tcache, i);
- arena_stats_large_flush_nrequests_add(tsdn, &arena->stats, i,
- tbin->tstats.nrequests);
- tbin->tstats.nrequests = 0;
+ for (unsigned i = 0; i < nhbins; i++) {
+ cache_bin_t *cache_bin = &tcache->bins[i];
+ if (i < SC_NBINS) {
+ bin_t *bin = arena_bin_choose(tsdn, arena, i, NULL);
+ malloc_mutex_lock(tsdn, &bin->lock);
+ bin->stats.nrequests += cache_bin->tstats.nrequests;
+ malloc_mutex_unlock(tsdn, &bin->lock);
+ } else {
+ arena_stats_large_flush_nrequests_add(tsdn,
+ &arena->stats, i, cache_bin->tstats.nrequests);
+ }
+ cache_bin->tstats.nrequests = 0;
}
}
static bool
-tcaches_create_prep(tsd_t *tsd) {
+tcaches_create_prep(tsd_t *tsd, base_t *base) {
bool err;
- malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
+ malloc_mutex_assert_owner(tsd_tsdn(tsd), &tcaches_mtx);
if (tcaches == NULL) {
- tcaches = base_alloc(tsd_tsdn(tsd), b0get(), sizeof(tcache_t *)
- * (MALLOCX_TCACHE_MAX+1), CACHELINE);
+ tcaches = base_alloc(tsd_tsdn(tsd), base,
+ sizeof(tcache_t *) * (MALLOCX_TCACHE_MAX+1), CACHELINE);
if (tcaches == NULL) {
err = true;
goto label_return;
@@ -643,17 +901,18 @@ tcaches_create_prep(tsd_t *tsd) {
err = false;
label_return:
- malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
return err;
}
bool
-tcaches_create(tsd_t *tsd, unsigned *r_ind) {
+tcaches_create(tsd_t *tsd, base_t *base, unsigned *r_ind) {
witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0);
bool err;
- if (tcaches_create_prep(tsd)) {
+ malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
+
+ if (tcaches_create_prep(tsd, base)) {
err = true;
goto label_return;
}
@@ -665,7 +924,6 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind) {
}
tcaches_t *elm;
- malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
if (tcaches_avail != NULL) {
elm = tcaches_avail;
tcaches_avail = tcaches_avail->next;
@@ -677,10 +935,10 @@ tcaches_create(tsd_t *tsd, unsigned *r_ind) {
*r_ind = tcaches_past;
tcaches_past++;
}
- malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
err = false;
label_return:
+ malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0);
return err;
}
@@ -729,70 +987,115 @@ tcaches_destroy(tsd_t *tsd, unsigned ind) {
}
}
-bool
-tcache_boot(tsdn_t *tsdn) {
- /* If necessary, clamp opt_lg_tcache_max. */
- if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) <
- SC_SMALL_MAXCLASS) {
- tcache_maxclass = SC_SMALL_MAXCLASS;
+static unsigned
+tcache_ncached_max_compute(szind_t szind) {
+ if (szind >= SC_NBINS) {
+ assert(szind < nhbins);
+ return opt_tcache_nslots_large;
+ }
+ unsigned slab_nregs = bin_infos[szind].nregs;
+
+ /* We may modify these values; start with the opt versions. */
+ unsigned nslots_small_min = opt_tcache_nslots_small_min;
+ unsigned nslots_small_max = opt_tcache_nslots_small_max;
+
+ /*
+ * Clamp values to meet our constraints -- even, nonzero, min < max, and
+ * suitable for a cache bin size.
+ */
+ if (opt_tcache_nslots_small_max > CACHE_BIN_NCACHED_MAX) {
+ nslots_small_max = CACHE_BIN_NCACHED_MAX;
+ }
+ if (nslots_small_min % 2 != 0) {
+ nslots_small_min++;
+ }
+ if (nslots_small_max % 2 != 0) {
+ nslots_small_max--;
+ }
+ if (nslots_small_min < 2) {
+ nslots_small_min = 2;
+ }
+ if (nslots_small_max < 2) {
+ nslots_small_max = 2;
+ }
+ if (nslots_small_min > nslots_small_max) {
+ nslots_small_min = nslots_small_max;
+ }
+
+ unsigned candidate;
+ if (opt_lg_tcache_nslots_mul < 0) {
+ candidate = slab_nregs >> (-opt_lg_tcache_nslots_mul);
} else {
- tcache_maxclass = (ZU(1) << opt_lg_tcache_max);
+ candidate = slab_nregs << opt_lg_tcache_nslots_mul;
+ }
+ if (candidate % 2 != 0) {
+ /*
+ * We need the candidate size to be even -- we assume that we
+ * can divide by two and get a positive number (e.g. when
+ * flushing).
+ */
+ ++candidate;
}
+ if (candidate <= nslots_small_min) {
+ return nslots_small_min;
+ } else if (candidate <= nslots_small_max) {
+ return candidate;
+ } else {
+ return nslots_small_max;
+ }
+}
+
+bool
+tcache_boot(tsdn_t *tsdn, base_t *base) {
+ tcache_maxclass = sz_s2u(opt_tcache_max);
+ assert(tcache_maxclass <= TCACHE_MAXCLASS_LIMIT);
+ nhbins = sz_size2index(tcache_maxclass) + 1;
if (malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES,
malloc_mutex_rank_exclusive)) {
return true;
}
- nhbins = sz_size2index(tcache_maxclass) + 1;
-
- /* Initialize tcache_bin_info. */
- tcache_bin_info = (cache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins
- * sizeof(cache_bin_info_t), CACHELINE);
+ /* Initialize tcache_bin_info. See comments in tcache_init(). */
+ unsigned n_reserved_bins = nhbins < SC_NBINS ? SC_NBINS : nhbins;
+ size_t size = n_reserved_bins * sizeof(cache_bin_info_t);
+ tcache_bin_info = (cache_bin_info_t *)base_alloc(tsdn, base, size,
+ CACHELINE);
if (tcache_bin_info == NULL) {
return true;
}
- stack_nelms = 0;
- unsigned i;
- for (i = 0; i < SC_NBINS; i++) {
- if ((bin_infos[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) {
- tcache_bin_info[i].ncached_max =
- TCACHE_NSLOTS_SMALL_MIN;
- } else if ((bin_infos[i].nregs << 1) <=
- TCACHE_NSLOTS_SMALL_MAX) {
- tcache_bin_info[i].ncached_max =
- (bin_infos[i].nregs << 1);
- } else {
- tcache_bin_info[i].ncached_max =
- TCACHE_NSLOTS_SMALL_MAX;
- }
- stack_nelms += tcache_bin_info[i].ncached_max;
+
+ for (szind_t i = 0; i < nhbins; i++) {
+ unsigned ncached_max = tcache_ncached_max_compute(i);
+ cache_bin_info_init(&tcache_bin_info[i], ncached_max);
}
- for (; i < nhbins; i++) {
- tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
- stack_nelms += tcache_bin_info[i].ncached_max;
+ for (szind_t i = nhbins; i < SC_NBINS; i++) {
+ /* Disabled small bins. */
+ cache_bin_info_init(&tcache_bin_info[i], 0);
+ assert(tcache_small_bin_disabled(i, NULL));
}
+ cache_bin_info_compute_alloc(tcache_bin_info, nhbins,
+ &tcache_bin_alloc_size, &tcache_bin_alloc_alignment);
+
return false;
}
void
tcache_prefork(tsdn_t *tsdn) {
- if (!config_prof && opt_tcache) {
- malloc_mutex_prefork(tsdn, &tcaches_mtx);
- }
+ malloc_mutex_prefork(tsdn, &tcaches_mtx);
}
void
tcache_postfork_parent(tsdn_t *tsdn) {
- if (!config_prof && opt_tcache) {
- malloc_mutex_postfork_parent(tsdn, &tcaches_mtx);
- }
+ malloc_mutex_postfork_parent(tsdn, &tcaches_mtx);
}
void
tcache_postfork_child(tsdn_t *tsdn) {
- if (!config_prof && opt_tcache) {
- malloc_mutex_postfork_child(tsdn, &tcaches_mtx);
- }
+ malloc_mutex_postfork_child(tsdn, &tcaches_mtx);
+}
+
+void tcache_assert_initialized(tcache_t *tcache) {
+ assert(!cache_bin_still_zero_initialized(&tcache->bins[0]));
}
diff --git a/contrib/jemalloc/src/thread_event.c b/contrib/jemalloc/src/thread_event.c
new file mode 100644
index 000000000000..37eb5827d3c9
--- /dev/null
+++ b/contrib/jemalloc/src/thread_event.c
@@ -0,0 +1,343 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/thread_event.h"
+
+/*
+ * Signatures for event specific functions. These functions should be defined
+ * by the modules owning each event. The signatures here verify that the
+ * definitions follow the right format.
+ *
+ * The first two are functions computing new / postponed event wait time. New
+ * event wait time is the time till the next event if an event is currently
+ * being triggered; postponed event wait time is the time till the next event
+ * if an event should be triggered but needs to be postponed, e.g. when the TSD
+ * is not nominal or during reentrancy.
+ *
+ * The third is the event handler function, which is called whenever an event
+ * is triggered. The parameter is the elapsed time since the last time an
+ * event of the same type was triggered.
+ */
+#define E(event, condition_unused, is_alloc_event_unused) \
+uint64_t event##_new_event_wait(tsd_t *tsd); \
+uint64_t event##_postponed_event_wait(tsd_t *tsd); \
+void event##_event_handler(tsd_t *tsd, uint64_t elapsed);
+
+ITERATE_OVER_ALL_EVENTS
+#undef E
+
+/* Signatures for internal functions fetching elapsed time. */
+#define E(event, condition_unused, is_alloc_event_unused) \
+static uint64_t event##_fetch_elapsed(tsd_t *tsd);
+
+ITERATE_OVER_ALL_EVENTS
+#undef E
+
+static uint64_t
+tcache_gc_fetch_elapsed(tsd_t *tsd) {
+ return TE_INVALID_ELAPSED;
+}
+
+static uint64_t
+tcache_gc_dalloc_fetch_elapsed(tsd_t *tsd) {
+ return TE_INVALID_ELAPSED;
+}
+
+static uint64_t
+prof_sample_fetch_elapsed(tsd_t *tsd) {
+ uint64_t last_event = thread_allocated_last_event_get(tsd);
+ uint64_t last_sample_event = prof_sample_last_event_get(tsd);
+ prof_sample_last_event_set(tsd, last_event);
+ return last_event - last_sample_event;
+}
+
+static uint64_t
+stats_interval_fetch_elapsed(tsd_t *tsd) {
+ uint64_t last_event = thread_allocated_last_event_get(tsd);
+ uint64_t last_stats_event = stats_interval_last_event_get(tsd);
+ stats_interval_last_event_set(tsd, last_event);
+ return last_event - last_stats_event;
+}
+
+static uint64_t
+peak_alloc_fetch_elapsed(tsd_t *tsd) {
+ return TE_INVALID_ELAPSED;
+}
+
+static uint64_t
+peak_dalloc_fetch_elapsed(tsd_t *tsd) {
+ return TE_INVALID_ELAPSED;
+}
+
+/* Per event facilities done. */
+
+static bool
+te_ctx_has_active_events(te_ctx_t *ctx) {
+ assert(config_debug);
+#define E(event, condition, alloc_event) \
+ if (condition && alloc_event == ctx->is_alloc) { \
+ return true; \
+ }
+ ITERATE_OVER_ALL_EVENTS
+#undef E
+ return false;
+}
+
+static uint64_t
+te_next_event_compute(tsd_t *tsd, bool is_alloc) {
+ uint64_t wait = TE_MAX_START_WAIT;
+#define E(event, condition, alloc_event) \
+ if (is_alloc == alloc_event && condition) { \
+ uint64_t event_wait = \
+ event##_event_wait_get(tsd); \
+ assert(event_wait <= TE_MAX_START_WAIT); \
+ if (event_wait > 0U && event_wait < wait) { \
+ wait = event_wait; \
+ } \
+ }
+
+ ITERATE_OVER_ALL_EVENTS
+#undef E
+ assert(wait <= TE_MAX_START_WAIT);
+ return wait;
+}
+
+static void
+te_assert_invariants_impl(tsd_t *tsd, te_ctx_t *ctx) {
+ uint64_t current_bytes = te_ctx_current_bytes_get(ctx);
+ uint64_t last_event = te_ctx_last_event_get(ctx);
+ uint64_t next_event = te_ctx_next_event_get(ctx);
+ uint64_t next_event_fast = te_ctx_next_event_fast_get(ctx);
+
+ assert(last_event != next_event);
+ if (next_event > TE_NEXT_EVENT_FAST_MAX || !tsd_fast(tsd)) {
+ assert(next_event_fast == 0U);
+ } else {
+ assert(next_event_fast == next_event);
+ }
+
+ /* The subtraction is intentionally susceptible to underflow. */
+ uint64_t interval = next_event - last_event;
+
+ /* The subtraction is intentionally susceptible to underflow. */
+ assert(current_bytes - last_event < interval);
+ uint64_t min_wait = te_next_event_compute(tsd, te_ctx_is_alloc(ctx));
+ /*
+ * next_event should have been pushed up only except when no event is
+ * on and the TSD is just initialized. The last_event == 0U guard
+ * below is stronger than needed, but having an exactly accurate guard
+ * is more complicated to implement.
+ */
+ assert((!te_ctx_has_active_events(ctx) && last_event == 0U) ||
+ interval == min_wait ||
+ (interval < min_wait && interval == TE_MAX_INTERVAL));
+}
+
+void
+te_assert_invariants_debug(tsd_t *tsd) {
+ te_ctx_t ctx;
+ te_ctx_get(tsd, &ctx, true);
+ te_assert_invariants_impl(tsd, &ctx);
+
+ te_ctx_get(tsd, &ctx, false);
+ te_assert_invariants_impl(tsd, &ctx);
+}
+
+/*
+ * Synchronization around the fast threshold in tsd --
+ * There are two threads to consider in the synchronization here:
+ * - The owner of the tsd being updated by a slow path change
+ * - The remote thread, doing that slow path change.
+ *
+ * As a design constraint, we want to ensure that a slow-path transition cannot
+ * be ignored for arbitrarily long, and that if the remote thread causes a
+ * slow-path transition and then communicates with the owner thread that it has
+ * occurred, then the owner will go down the slow path on the next allocator
+ * operation (so that we don't want to just wait until the owner hits its slow
+ * path reset condition on its own).
+ *
+ * Here's our strategy to do that:
+ *
+ * The remote thread will update the slow-path stores to TSD variables, issue a
+ * SEQ_CST fence, and then update the TSD next_event_fast counter. The owner
+ * thread will update next_event_fast, issue an SEQ_CST fence, and then check
+ * its TSD to see if it's on the slow path.
+
+ * This is fairly straightforward when 64-bit atomics are supported. Assume that
+ * the remote fence is sandwiched between two owner fences in the reset pathway.
+ * The case where there is no preceding or trailing owner fence (i.e. because
+ * the owner thread is near the beginning or end of its life) can be analyzed
+ * similarly. The owner store to next_event_fast preceding the earlier owner
+ * fence will be earlier in coherence order than the remote store to it, so that
+ * the owner thread will go down the slow path once the store becomes visible to
+ * it, which is no later than the time of the second fence.
+
+ * The case where we don't support 64-bit atomics is trickier, since word
+ * tearing is possible. We'll repeat the same analysis, and look at the two
+ * owner fences sandwiching the remote fence. The next_event_fast stores done
+ * alongside the earlier owner fence cannot overwrite any of the remote stores
+ * (since they precede the earlier owner fence in sb, which precedes the remote
+ * fence in sc, which precedes the remote stores in sb). After the second owner
+ * fence there will be a re-check of the slow-path variables anyways, so the
+ * "owner will notice that it's on the slow path eventually" guarantee is
+ * satisfied. To make sure that the out-of-band-messaging constraint is as well,
+ * note that either the message passing is sequenced before the second owner
+ * fence (in which case the remote stores happen before the second set of owner
+ * stores, so malloc sees a value of zero for next_event_fast and goes down the
+ * slow path), or it is not (in which case the owner sees the tsd slow-path
+ * writes on its previous update). This leaves open the possibility that the
+ * remote thread will (at some arbitrary point in the future) zero out one half
+ * of the owner thread's next_event_fast, but that's always safe (it just sends
+ * it down the slow path earlier).
+ */
+static void
+te_ctx_next_event_fast_update(te_ctx_t *ctx) {
+ uint64_t next_event = te_ctx_next_event_get(ctx);
+ uint64_t next_event_fast = (next_event <= TE_NEXT_EVENT_FAST_MAX) ?
+ next_event : 0U;
+ te_ctx_next_event_fast_set(ctx, next_event_fast);
+}
+
+void
+te_recompute_fast_threshold(tsd_t *tsd) {
+ if (tsd_state_get(tsd) != tsd_state_nominal) {
+ /* Check first because this is also called on purgatory. */
+ te_next_event_fast_set_non_nominal(tsd);
+ return;
+ }
+
+ te_ctx_t ctx;
+ te_ctx_get(tsd, &ctx, true);
+ te_ctx_next_event_fast_update(&ctx);
+ te_ctx_get(tsd, &ctx, false);
+ te_ctx_next_event_fast_update(&ctx);
+
+ atomic_fence(ATOMIC_SEQ_CST);
+ if (tsd_state_get(tsd) != tsd_state_nominal) {
+ te_next_event_fast_set_non_nominal(tsd);
+ }
+}
+
+static void
+te_adjust_thresholds_helper(tsd_t *tsd, te_ctx_t *ctx,
+ uint64_t wait) {
+ /*
+ * The next threshold based on future events can only be adjusted after
+ * progressing the last_event counter (which is set to current).
+ */
+ assert(te_ctx_current_bytes_get(ctx) == te_ctx_last_event_get(ctx));
+ assert(wait <= TE_MAX_START_WAIT);
+
+ uint64_t next_event = te_ctx_last_event_get(ctx) + (wait <=
+ TE_MAX_INTERVAL ? wait : TE_MAX_INTERVAL);
+ te_ctx_next_event_set(tsd, ctx, next_event);
+}
+
+static uint64_t
+te_clip_event_wait(uint64_t event_wait) {
+ assert(event_wait > 0U);
+ if (TE_MIN_START_WAIT > 1U &&
+ unlikely(event_wait < TE_MIN_START_WAIT)) {
+ event_wait = TE_MIN_START_WAIT;
+ }
+ if (TE_MAX_START_WAIT < UINT64_MAX &&
+ unlikely(event_wait > TE_MAX_START_WAIT)) {
+ event_wait = TE_MAX_START_WAIT;
+ }
+ return event_wait;
+}
+
+void
+te_event_trigger(tsd_t *tsd, te_ctx_t *ctx) {
+ /* usize has already been added to thread_allocated. */
+ uint64_t bytes_after = te_ctx_current_bytes_get(ctx);
+ /* The subtraction is intentionally susceptible to underflow. */
+ uint64_t accumbytes = bytes_after - te_ctx_last_event_get(ctx);
+
+ te_ctx_last_event_set(ctx, bytes_after);
+
+ bool allow_event_trigger = tsd_nominal(tsd) &&
+ tsd_reentrancy_level_get(tsd) == 0;
+ bool is_alloc = ctx->is_alloc;
+ uint64_t wait = TE_MAX_START_WAIT;
+
+#define E(event, condition, alloc_event) \
+ bool is_##event##_triggered = false; \
+ if (is_alloc == alloc_event && condition) { \
+ uint64_t event_wait = event##_event_wait_get(tsd); \
+ assert(event_wait <= TE_MAX_START_WAIT); \
+ if (event_wait > accumbytes) { \
+ event_wait -= accumbytes; \
+ } else if (!allow_event_trigger) { \
+ event_wait = event##_postponed_event_wait(tsd); \
+ } else { \
+ is_##event##_triggered = true; \
+ event_wait = event##_new_event_wait(tsd); \
+ } \
+ event_wait = te_clip_event_wait(event_wait); \
+ event##_event_wait_set(tsd, event_wait); \
+ if (event_wait < wait) { \
+ wait = event_wait; \
+ } \
+ }
+
+ ITERATE_OVER_ALL_EVENTS
+#undef E
+
+ assert(wait <= TE_MAX_START_WAIT);
+ te_adjust_thresholds_helper(tsd, ctx, wait);
+ te_assert_invariants(tsd);
+
+#define E(event, condition, alloc_event) \
+ if (is_alloc == alloc_event && condition && \
+ is_##event##_triggered) { \
+ assert(allow_event_trigger); \
+ uint64_t elapsed = event##_fetch_elapsed(tsd); \
+ event##_event_handler(tsd, elapsed); \
+ }
+
+ ITERATE_OVER_ALL_EVENTS
+#undef E
+
+ te_assert_invariants(tsd);
+}
+
+static void
+te_init(tsd_t *tsd, bool is_alloc) {
+ te_ctx_t ctx;
+ te_ctx_get(tsd, &ctx, is_alloc);
+ /*
+ * Reset the last event to current, which starts the events from a clean
+ * state. This is necessary when re-init the tsd event counters.
+ *
+ * The event counters maintain a relationship with the current bytes:
+ * last_event <= current < next_event. When a reinit happens (e.g.
+ * reincarnated tsd), the last event needs progressing because all
+ * events start fresh from the current bytes.
+ */
+ te_ctx_last_event_set(&ctx, te_ctx_current_bytes_get(&ctx));
+
+ uint64_t wait = TE_MAX_START_WAIT;
+#define E(event, condition, alloc_event) \
+ if (is_alloc == alloc_event && condition) { \
+ uint64_t event_wait = event##_new_event_wait(tsd); \
+ event_wait = te_clip_event_wait(event_wait); \
+ event##_event_wait_set(tsd, event_wait); \
+ if (event_wait < wait) { \
+ wait = event_wait; \
+ } \
+ }
+
+ ITERATE_OVER_ALL_EVENTS
+#undef E
+ te_adjust_thresholds_helper(tsd, &ctx, wait);
+}
+
+void
+tsd_te_init(tsd_t *tsd) {
+ /* Make sure no overflow for the bytes accumulated on event_trigger. */
+ assert(TE_MAX_INTERVAL <= UINT64_MAX - SC_LARGE_MAXCLASS + 1);
+ te_init(tsd, true);
+ te_init(tsd, false);
+ te_assert_invariants(tsd);
+}
diff --git a/contrib/jemalloc/src/ticker.c b/contrib/jemalloc/src/ticker.c
index d7b8cd26c068..790b5c20079c 100644
--- a/contrib/jemalloc/src/ticker.c
+++ b/contrib/jemalloc/src/ticker.c
@@ -1,3 +1,32 @@
-#define JEMALLOC_TICKER_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+/*
+ * To avoid using floating point math down core paths (still necessary because
+ * versions of the glibc dynamic loader that did not preserve xmm registers are
+ * still somewhat common, requiring us to be compilable with -mno-sse), and also
+ * to avoid generally expensive library calls, we use a precomputed table of
+ * values. We want to sample U uniformly on [0, 1], and then compute
+ * ceil(log(u)/log(1-1/nticks)). We're mostly interested in the case where
+ * nticks is reasonably big, so 1/log(1-1/nticks) is well-approximated by
+ * -nticks.
+ *
+ * To compute log(u), we sample an integer in [1, 64] and divide, then just look
+ * up results in a table. As a space-compression mechanism, we store these as
+ * uint8_t by dividing the range (255) by the highest-magnitude value the log
+ * can take on, and using that as a multiplier. We then have to divide by that
+ * multiplier at the end of the computation.
+ *
+ * The values here are computed in src/ticker.py
+ */
+
+const uint8_t ticker_geom_table[1 << TICKER_GEOM_NBITS] = {
+ 254, 211, 187, 169, 156, 144, 135, 127,
+ 120, 113, 107, 102, 97, 93, 89, 85,
+ 81, 77, 74, 71, 68, 65, 62, 60,
+ 57, 55, 53, 50, 48, 46, 44, 42,
+ 40, 39, 37, 35, 33, 32, 30, 29,
+ 27, 26, 24, 23, 21, 20, 19, 18,
+ 16, 15, 14, 13, 12, 10, 9, 8,
+ 7, 6, 5, 4, 3, 2, 1, 0
+};
diff --git a/contrib/jemalloc/src/ticker.py b/contrib/jemalloc/src/ticker.py
new file mode 100755
index 000000000000..3807740c30f2
--- /dev/null
+++ b/contrib/jemalloc/src/ticker.py
@@ -0,0 +1,15 @@
+#!/usr/bin/env python3
+
+import math
+
+# Must match TICKER_GEOM_NBITS
+lg_table_size = 6
+table_size = 2**lg_table_size
+byte_max = 255
+mul = math.floor(-byte_max/math.log(1 / table_size))
+values = [round(-mul * math.log(i / table_size))
+ for i in range(1, table_size+1)]
+print("mul =", mul)
+print("values:")
+for i in range(table_size // 8):
+ print(", ".join((str(x) for x in values[i*8 : i*8 + 8])))
diff --git a/contrib/jemalloc/src/tsd.c b/contrib/jemalloc/src/tsd.c
index a31f6b9698e5..e8e4f3a33959 100644
--- a/contrib/jemalloc/src/tsd.c
+++ b/contrib/jemalloc/src/tsd.c
@@ -1,17 +1,14 @@
-#define JEMALLOC_TSD_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
#include "jemalloc/internal/assert.h"
+#include "jemalloc/internal/san.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/rtree.h"
/******************************************************************************/
/* Data. */
-static unsigned ncleanups;
-static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX];
-
/* TSD_INITIALIZER triggers "-Wmissing-field-initializer" */
JEMALLOC_DIAGNOSTIC_PUSH
JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
@@ -74,7 +71,7 @@ tsd_in_nominal_list(tsd_t *tsd) {
* out of it here.
*/
malloc_mutex_lock(TSDN_NULL, &tsd_nominal_tsds_lock);
- ql_foreach(tsd_list, &tsd_nominal_tsds, TSD_MANGLE(tcache).tsd_link) {
+ ql_foreach(tsd_list, &tsd_nominal_tsds, TSD_MANGLE(tsd_link)) {
if (tsd == tsd_list) {
found = true;
break;
@@ -88,9 +85,9 @@ static void
tsd_add_nominal(tsd_t *tsd) {
assert(!tsd_in_nominal_list(tsd));
assert(tsd_state_get(tsd) <= tsd_state_nominal_max);
- ql_elm_new(tsd, TSD_MANGLE(tcache).tsd_link);
+ ql_elm_new(tsd, TSD_MANGLE(tsd_link));
malloc_mutex_lock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
- ql_tail_insert(&tsd_nominal_tsds, tsd, TSD_MANGLE(tcache).tsd_link);
+ ql_tail_insert(&tsd_nominal_tsds, tsd, TSD_MANGLE(tsd_link));
malloc_mutex_unlock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
}
@@ -99,7 +96,7 @@ tsd_remove_nominal(tsd_t *tsd) {
assert(tsd_in_nominal_list(tsd));
assert(tsd_state_get(tsd) <= tsd_state_nominal_max);
malloc_mutex_lock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
- ql_remove(&tsd_nominal_tsds, tsd, TSD_MANGLE(tcache).tsd_link);
+ ql_remove(&tsd_nominal_tsds, tsd, TSD_MANGLE(tsd_link));
malloc_mutex_unlock(tsd_tsdn(tsd), &tsd_nominal_tsds_lock);
}
@@ -112,11 +109,14 @@ tsd_force_recompute(tsdn_t *tsdn) {
atomic_fence(ATOMIC_RELEASE);
malloc_mutex_lock(tsdn, &tsd_nominal_tsds_lock);
tsd_t *remote_tsd;
- ql_foreach(remote_tsd, &tsd_nominal_tsds, TSD_MANGLE(tcache).tsd_link) {
+ ql_foreach(remote_tsd, &tsd_nominal_tsds, TSD_MANGLE(tsd_link)) {
assert(tsd_atomic_load(&remote_tsd->state, ATOMIC_RELAXED)
<= tsd_state_nominal_max);
- tsd_atomic_store(&remote_tsd->state, tsd_state_nominal_recompute,
- ATOMIC_RELAXED);
+ tsd_atomic_store(&remote_tsd->state,
+ tsd_state_nominal_recompute, ATOMIC_RELAXED);
+ /* See comments in te_recompute_fast_threshold(). */
+ atomic_fence(ATOMIC_SEQ_CST);
+ te_next_event_fast_set_non_nominal(remote_tsd);
}
malloc_mutex_unlock(tsdn, &tsd_nominal_tsds_lock);
}
@@ -175,6 +175,8 @@ tsd_slow_update(tsd_t *tsd) {
old_state = tsd_atomic_exchange(&tsd->state, new_state,
ATOMIC_ACQUIRE);
} while (old_state == tsd_state_nominal_recompute);
+
+ te_recompute_fast_threshold(tsd);
}
void
@@ -207,22 +209,17 @@ tsd_state_set(tsd_t *tsd, uint8_t new_state) {
/*
* This is the tricky case. We're transitioning from
* one nominal state to another. The caller can't know
- * about any races that are occuring at the same time,
+ * about any races that are occurring at the same time,
* so we always have to recompute no matter what.
*/
tsd_slow_update(tsd);
}
}
+ te_recompute_fast_threshold(tsd);
}
-static bool
-tsd_data_init(tsd_t *tsd) {
- /*
- * We initialize the rtree context first (before the tcache), since the
- * tcache initialization depends on it.
- */
- rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd));
-
+static void
+tsd_prng_state_init(tsd_t *tsd) {
/*
* A nondeterministic seed based on the address of tsd reduces
* the likelihood of lockstep non-uniform cache index
@@ -230,9 +227,20 @@ tsd_data_init(tsd_t *tsd) {
* cost of test repeatability. For debug builds, instead use a
* deterministic seed.
*/
- *tsd_offset_statep_get(tsd) = config_debug ? 0 :
+ *tsd_prng_statep_get(tsd) = config_debug ? 0 :
(uint64_t)(uintptr_t)tsd;
+}
+static bool
+tsd_data_init(tsd_t *tsd) {
+ /*
+ * We initialize the rtree context first (before the tcache), since the
+ * tcache initialization depends on it.
+ */
+ rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd));
+ tsd_prng_state_init(tsd);
+ tsd_te_init(tsd); /* event_init may use the prng state above. */
+ tsd_san_init(tsd);
return tsd_tcache_enabled_data_init(tsd);
}
@@ -242,8 +250,6 @@ assert_tsd_data_cleanup_done(tsd_t *tsd) {
assert(!tsd_in_nominal_list(tsd));
assert(*tsd_arenap_get_unsafe(tsd) == NULL);
assert(*tsd_iarenap_get_unsafe(tsd) == NULL);
- assert(*tsd_arenas_tdata_bypassp_get_unsafe(tsd) == true);
- assert(*tsd_arenas_tdatap_get_unsafe(tsd) == NULL);
assert(*tsd_tcache_enabledp_get_unsafe(tsd) == false);
assert(*tsd_prof_tdatap_get_unsafe(tsd) == NULL);
}
@@ -258,9 +264,11 @@ tsd_data_init_nocleanup(tsd_t *tsd) {
* We set up tsd in a way that no cleanup is needed.
*/
rtree_ctx_data_init(tsd_rtree_ctxp_get_unsafe(tsd));
- *tsd_arenas_tdata_bypassp_get(tsd) = true;
*tsd_tcache_enabledp_get_unsafe(tsd) = false;
*tsd_reentrancy_levelp_get(tsd) = 1;
+ tsd_prng_state_init(tsd);
+ tsd_te_init(tsd); /* event_init may use the prng state above. */
+ tsd_san_init(tsd);
assert_tsd_data_cleanup_done(tsd);
return false;
@@ -326,6 +334,9 @@ malloc_tsd_dalloc(void *wrapper) {
}
#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32)
+static unsigned ncleanups;
+static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX];
+
#ifndef _WIN32
JEMALLOC_EXPORT
#endif
@@ -350,23 +361,27 @@ _malloc_thread_cleanup(void) {
}
} while (again);
}
-#endif
+#ifndef _WIN32
+JEMALLOC_EXPORT
+#endif
void
-malloc_tsd_cleanup_register(bool (*f)(void)) {
+_malloc_tsd_cleanup_register(bool (*f)(void)) {
assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX);
cleanups[ncleanups] = f;
ncleanups++;
}
+#endif
+
static void
tsd_do_data_cleanup(tsd_t *tsd) {
prof_tdata_cleanup(tsd);
iarena_cleanup(tsd);
arena_cleanup(tsd);
- arenas_tdata_cleanup(tsd);
tcache_cleanup(tsd);
witnesses_cleanup(tsd_witness_tsdp_get_unsafe(tsd));
+ *tsd_reentrancy_levelp_get(tsd) = 1;
}
void
@@ -387,7 +402,7 @@ tsd_cleanup(void *arg) {
* is still called for testing and completeness.
*/
assert_tsd_data_cleanup_done(tsd);
- /* Fall through. */
+ JEMALLOC_FALLTHROUGH;
case tsd_state_nominal:
case tsd_state_nominal_slow:
tsd_do_data_cleanup(tsd);
@@ -418,7 +433,9 @@ tsd_t *
malloc_tsd_boot0(void) {
tsd_t *tsd;
+#if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32)
ncleanups = 0;
+#endif
if (malloc_mutex_init(&tsd_nominal_tsds_lock, "tsd_nominal_tsds_lock",
WITNESS_RANK_OMIT, malloc_mutex_rank_exclusive)) {
return NULL;
@@ -427,7 +444,6 @@ malloc_tsd_boot0(void) {
return NULL;
}
tsd = tsd_fetch();
- *tsd_arenas_tdata_bypassp_get(tsd) = true;
return tsd;
}
@@ -437,7 +453,6 @@ malloc_tsd_boot1(void) {
tsd_t *tsd = tsd_fetch();
/* malloc_slow has been set properly. Update tsd_slow. */
tsd_slow_update(tsd);
- *tsd_arenas_tdata_bypassp_get(tsd) = false;
}
#ifdef _WIN32
diff --git a/contrib/jemalloc/src/witness.c b/contrib/jemalloc/src/witness.c
index f42b72ad1a2c..4474af04c8dc 100644
--- a/contrib/jemalloc/src/witness.c
+++ b/contrib/jemalloc/src/witness.c
@@ -1,4 +1,3 @@
-#define JEMALLOC_WITNESS_C_
#include "jemalloc/internal/jemalloc_preamble.h"
#include "jemalloc/internal/jemalloc_internal_includes.h"
@@ -15,14 +14,41 @@ witness_init(witness_t *witness, const char *name, witness_rank_t rank,
}
static void
-witness_lock_error_impl(const witness_list_t *witnesses,
- const witness_t *witness) {
- witness_t *w;
+witness_print_witness(witness_t *w, unsigned n) {
+ assert(n > 0);
+ if (n == 1) {
+ malloc_printf(" %s(%u)", w->name, w->rank);
+ } else {
+ malloc_printf(" %s(%u)X%u", w->name, w->rank, n);
+ }
+}
- malloc_printf("<jemalloc>: Lock rank order reversal:");
+static void
+witness_print_witnesses(const witness_list_t *witnesses) {
+ witness_t *w, *last = NULL;
+ unsigned n = 0;
ql_foreach(w, witnesses, link) {
- malloc_printf(" %s(%u)", w->name, w->rank);
+ if (last != NULL && w->rank > last->rank) {
+ assert(w->name != last->name);
+ witness_print_witness(last, n);
+ n = 0;
+ } else if (last != NULL) {
+ assert(w->rank == last->rank);
+ assert(w->name == last->name);
+ }
+ last = w;
+ ++n;
}
+ if (last != NULL) {
+ witness_print_witness(last, n);
+ }
+}
+
+static void
+witness_lock_error_impl(const witness_list_t *witnesses,
+ const witness_t *witness) {
+ malloc_printf("<jemalloc>: Lock rank order reversal:");
+ witness_print_witnesses(witnesses);
malloc_printf(" %s(%u)\n", witness->name, witness->rank);
abort();
}
@@ -49,13 +75,9 @@ witness_not_owner_error_t *JET_MUTABLE witness_not_owner_error =
static void
witness_depth_error_impl(const witness_list_t *witnesses,
witness_rank_t rank_inclusive, unsigned depth) {
- witness_t *w;
-
malloc_printf("<jemalloc>: Should own %u lock%s of rank >= %u:", depth,
(depth != 1) ? "s" : "", rank_inclusive);
- ql_foreach(w, witnesses, link) {
- malloc_printf(" %s(%u)", w->name, w->rank);
- }
+ witness_print_witnesses(witnesses);
malloc_printf("\n");
abort();
}
diff --git a/contrib/jemalloc/src/zone.c b/contrib/jemalloc/src/zone.c
new file mode 100644
index 000000000000..23dfdd04a91d
--- /dev/null
+++ b/contrib/jemalloc/src/zone.c
@@ -0,0 +1,469 @@
+#include "jemalloc/internal/jemalloc_preamble.h"
+#include "jemalloc/internal/jemalloc_internal_includes.h"
+
+#include "jemalloc/internal/assert.h"
+
+#ifndef JEMALLOC_ZONE
+# error "This source file is for zones on Darwin (OS X)."
+#endif
+
+/* Definitions of the following structs in malloc/malloc.h might be too old
+ * for the built binary to run on newer versions of OSX. So use the newest
+ * possible version of those structs.
+ */
+typedef struct _malloc_zone_t {
+ void *reserved1;
+ void *reserved2;
+ size_t (*size)(struct _malloc_zone_t *, const void *);
+ void *(*malloc)(struct _malloc_zone_t *, size_t);
+ void *(*calloc)(struct _malloc_zone_t *, size_t, size_t);
+ void *(*valloc)(struct _malloc_zone_t *, size_t);
+ void (*free)(struct _malloc_zone_t *, void *);
+ void *(*realloc)(struct _malloc_zone_t *, void *, size_t);
+ void (*destroy)(struct _malloc_zone_t *);
+ const char *zone_name;
+ unsigned (*batch_malloc)(struct _malloc_zone_t *, size_t, void **, unsigned);
+ void (*batch_free)(struct _malloc_zone_t *, void **, unsigned);
+ struct malloc_introspection_t *introspect;
+ unsigned version;
+ void *(*memalign)(struct _malloc_zone_t *, size_t, size_t);
+ void (*free_definite_size)(struct _malloc_zone_t *, void *, size_t);
+ size_t (*pressure_relief)(struct _malloc_zone_t *, size_t);
+} malloc_zone_t;
+
+typedef struct {
+ vm_address_t address;
+ vm_size_t size;
+} vm_range_t;
+
+typedef struct malloc_statistics_t {
+ unsigned blocks_in_use;
+ size_t size_in_use;
+ size_t max_size_in_use;
+ size_t size_allocated;
+} malloc_statistics_t;
+
+typedef kern_return_t memory_reader_t(task_t, vm_address_t, vm_size_t, void **);
+
+typedef void vm_range_recorder_t(task_t, void *, unsigned type, vm_range_t *, unsigned);
+
+typedef struct malloc_introspection_t {
+ kern_return_t (*enumerator)(task_t, void *, unsigned, vm_address_t, memory_reader_t, vm_range_recorder_t);
+ size_t (*good_size)(malloc_zone_t *, size_t);
+ boolean_t (*check)(malloc_zone_t *);
+ void (*print)(malloc_zone_t *, boolean_t);
+ void (*log)(malloc_zone_t *, void *);
+ void (*force_lock)(malloc_zone_t *);
+ void (*force_unlock)(malloc_zone_t *);
+ void (*statistics)(malloc_zone_t *, malloc_statistics_t *);
+ boolean_t (*zone_locked)(malloc_zone_t *);
+ boolean_t (*enable_discharge_checking)(malloc_zone_t *);
+ boolean_t (*disable_discharge_checking)(malloc_zone_t *);
+ void (*discharge)(malloc_zone_t *, void *);
+#ifdef __BLOCKS__
+ void (*enumerate_discharged_pointers)(malloc_zone_t *, void (^)(void *, void *));
+#else
+ void *enumerate_unavailable_without_blocks;
+#endif
+ void (*reinit_lock)(malloc_zone_t *);
+} malloc_introspection_t;
+
+extern kern_return_t malloc_get_all_zones(task_t, memory_reader_t, vm_address_t **, unsigned *);
+
+extern malloc_zone_t *malloc_default_zone(void);
+
+extern void malloc_zone_register(malloc_zone_t *zone);
+
+extern void malloc_zone_unregister(malloc_zone_t *zone);
+
+/*
+ * The malloc_default_purgeable_zone() function is only available on >= 10.6.
+ * We need to check whether it is present at runtime, thus the weak_import.
+ */
+extern malloc_zone_t *malloc_default_purgeable_zone(void)
+JEMALLOC_ATTR(weak_import);
+
+/******************************************************************************/
+/* Data. */
+
+static malloc_zone_t *default_zone, *purgeable_zone;
+static malloc_zone_t jemalloc_zone;
+static struct malloc_introspection_t jemalloc_zone_introspect;
+static pid_t zone_force_lock_pid = -1;
+
+/******************************************************************************/
+/* Function prototypes for non-inline static functions. */
+
+static size_t zone_size(malloc_zone_t *zone, const void *ptr);
+static void *zone_malloc(malloc_zone_t *zone, size_t size);
+static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size);
+static void *zone_valloc(malloc_zone_t *zone, size_t size);
+static void zone_free(malloc_zone_t *zone, void *ptr);
+static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
+static void *zone_memalign(malloc_zone_t *zone, size_t alignment,
+ size_t size);
+static void zone_free_definite_size(malloc_zone_t *zone, void *ptr,
+ size_t size);
+static void zone_destroy(malloc_zone_t *zone);
+static unsigned zone_batch_malloc(struct _malloc_zone_t *zone, size_t size,
+ void **results, unsigned num_requested);
+static void zone_batch_free(struct _malloc_zone_t *zone,
+ void **to_be_freed, unsigned num_to_be_freed);
+static size_t zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal);
+static size_t zone_good_size(malloc_zone_t *zone, size_t size);
+static kern_return_t zone_enumerator(task_t task, void *data, unsigned type_mask,
+ vm_address_t zone_address, memory_reader_t reader,
+ vm_range_recorder_t recorder);
+static boolean_t zone_check(malloc_zone_t *zone);
+static void zone_print(malloc_zone_t *zone, boolean_t verbose);
+static void zone_log(malloc_zone_t *zone, void *address);
+static void zone_force_lock(malloc_zone_t *zone);
+static void zone_force_unlock(malloc_zone_t *zone);
+static void zone_statistics(malloc_zone_t *zone,
+ malloc_statistics_t *stats);
+static boolean_t zone_locked(malloc_zone_t *zone);
+static void zone_reinit_lock(malloc_zone_t *zone);
+
+/******************************************************************************/
+/*
+ * Functions.
+ */
+
+static size_t
+zone_size(malloc_zone_t *zone, const void *ptr) {
+ /*
+ * There appear to be places within Darwin (such as setenv(3)) that
+ * cause calls to this function with pointers that *no* zone owns. If
+ * we knew that all pointers were owned by *some* zone, we could split
+ * our zone into two parts, and use one as the default allocator and
+ * the other as the default deallocator/reallocator. Since that will
+ * not work in practice, we must check all pointers to assure that they
+ * reside within a mapped extent before determining size.
+ */
+ return ivsalloc(tsdn_fetch(), ptr);
+}
+
+static void *
+zone_malloc(malloc_zone_t *zone, size_t size) {
+ return je_malloc(size);
+}
+
+static void *
+zone_calloc(malloc_zone_t *zone, size_t num, size_t size) {
+ return je_calloc(num, size);
+}
+
+static void *
+zone_valloc(malloc_zone_t *zone, size_t size) {
+ void *ret = NULL; /* Assignment avoids useless compiler warning. */
+
+ je_posix_memalign(&ret, PAGE, size);
+
+ return ret;
+}
+
+static void
+zone_free(malloc_zone_t *zone, void *ptr) {
+ if (ivsalloc(tsdn_fetch(), ptr) != 0) {
+ je_free(ptr);
+ return;
+ }
+
+ free(ptr);
+}
+
+static void *
+zone_realloc(malloc_zone_t *zone, void *ptr, size_t size) {
+ if (ivsalloc(tsdn_fetch(), ptr) != 0) {
+ return je_realloc(ptr, size);
+ }
+
+ return realloc(ptr, size);
+}
+
+static void *
+zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size) {
+ void *ret = NULL; /* Assignment avoids useless compiler warning. */
+
+ je_posix_memalign(&ret, alignment, size);
+
+ return ret;
+}
+
+static void
+zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size) {
+ size_t alloc_size;
+
+ alloc_size = ivsalloc(tsdn_fetch(), ptr);
+ if (alloc_size != 0) {
+ assert(alloc_size == size);
+ je_free(ptr);
+ return;
+ }
+
+ free(ptr);
+}
+
+static void
+zone_destroy(malloc_zone_t *zone) {
+ /* This function should never be called. */
+ not_reached();
+}
+
+static unsigned
+zone_batch_malloc(struct _malloc_zone_t *zone, size_t size, void **results,
+ unsigned num_requested) {
+ unsigned i;
+
+ for (i = 0; i < num_requested; i++) {
+ results[i] = je_malloc(size);
+ if (!results[i])
+ break;
+ }
+
+ return i;
+}
+
+static void
+zone_batch_free(struct _malloc_zone_t *zone, void **to_be_freed,
+ unsigned num_to_be_freed) {
+ unsigned i;
+
+ for (i = 0; i < num_to_be_freed; i++) {
+ zone_free(zone, to_be_freed[i]);
+ to_be_freed[i] = NULL;
+ }
+}
+
+static size_t
+zone_pressure_relief(struct _malloc_zone_t *zone, size_t goal) {
+ return 0;
+}
+
+static size_t
+zone_good_size(malloc_zone_t *zone, size_t size) {
+ if (size == 0) {
+ size = 1;
+ }
+ return sz_s2u(size);
+}
+
+static kern_return_t
+zone_enumerator(task_t task, void *data, unsigned type_mask,
+ vm_address_t zone_address, memory_reader_t reader,
+ vm_range_recorder_t recorder) {
+ return KERN_SUCCESS;
+}
+
+static boolean_t
+zone_check(malloc_zone_t *zone) {
+ return true;
+}
+
+static void
+zone_print(malloc_zone_t *zone, boolean_t verbose) {
+}
+
+static void
+zone_log(malloc_zone_t *zone, void *address) {
+}
+
+static void
+zone_force_lock(malloc_zone_t *zone) {
+ if (isthreaded) {
+ /*
+ * See the note in zone_force_unlock, below, to see why we need
+ * this.
+ */
+ assert(zone_force_lock_pid == -1);
+ zone_force_lock_pid = getpid();
+ jemalloc_prefork();
+ }
+}
+
+static void
+zone_force_unlock(malloc_zone_t *zone) {
+ /*
+ * zone_force_lock and zone_force_unlock are the entry points to the
+ * forking machinery on OS X. The tricky thing is, the child is not
+ * allowed to unlock mutexes locked in the parent, even if owned by the
+ * forking thread (and the mutex type we use in OS X will fail an assert
+ * if we try). In the child, we can get away with reinitializing all
+ * the mutexes, which has the effect of unlocking them. In the parent,
+ * doing this would mean we wouldn't wake any waiters blocked on the
+ * mutexes we unlock. So, we record the pid of the current thread in
+ * zone_force_lock, and use that to detect if we're in the parent or
+ * child here, to decide which unlock logic we need.
+ */
+ if (isthreaded) {
+ assert(zone_force_lock_pid != -1);
+ if (getpid() == zone_force_lock_pid) {
+ jemalloc_postfork_parent();
+ } else {
+ jemalloc_postfork_child();
+ }
+ zone_force_lock_pid = -1;
+ }
+}
+
+static void
+zone_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) {
+ /* We make no effort to actually fill the values */
+ stats->blocks_in_use = 0;
+ stats->size_in_use = 0;
+ stats->max_size_in_use = 0;
+ stats->size_allocated = 0;
+}
+
+static boolean_t
+zone_locked(malloc_zone_t *zone) {
+ /* Pretend no lock is being held */
+ return false;
+}
+
+static void
+zone_reinit_lock(malloc_zone_t *zone) {
+ /* As of OSX 10.12, this function is only used when force_unlock would
+ * be used if the zone version were < 9. So just use force_unlock. */
+ zone_force_unlock(zone);
+}
+
+static void
+zone_init(void) {
+ jemalloc_zone.size = zone_size;
+ jemalloc_zone.malloc = zone_malloc;
+ jemalloc_zone.calloc = zone_calloc;
+ jemalloc_zone.valloc = zone_valloc;
+ jemalloc_zone.free = zone_free;
+ jemalloc_zone.realloc = zone_realloc;
+ jemalloc_zone.destroy = zone_destroy;
+ jemalloc_zone.zone_name = "jemalloc_zone";
+ jemalloc_zone.batch_malloc = zone_batch_malloc;
+ jemalloc_zone.batch_free = zone_batch_free;
+ jemalloc_zone.introspect = &jemalloc_zone_introspect;
+ jemalloc_zone.version = 9;
+ jemalloc_zone.memalign = zone_memalign;
+ jemalloc_zone.free_definite_size = zone_free_definite_size;
+ jemalloc_zone.pressure_relief = zone_pressure_relief;
+
+ jemalloc_zone_introspect.enumerator = zone_enumerator;
+ jemalloc_zone_introspect.good_size = zone_good_size;
+ jemalloc_zone_introspect.check = zone_check;
+ jemalloc_zone_introspect.print = zone_print;
+ jemalloc_zone_introspect.log = zone_log;
+ jemalloc_zone_introspect.force_lock = zone_force_lock;
+ jemalloc_zone_introspect.force_unlock = zone_force_unlock;
+ jemalloc_zone_introspect.statistics = zone_statistics;
+ jemalloc_zone_introspect.zone_locked = zone_locked;
+ jemalloc_zone_introspect.enable_discharge_checking = NULL;
+ jemalloc_zone_introspect.disable_discharge_checking = NULL;
+ jemalloc_zone_introspect.discharge = NULL;
+#ifdef __BLOCKS__
+ jemalloc_zone_introspect.enumerate_discharged_pointers = NULL;
+#else
+ jemalloc_zone_introspect.enumerate_unavailable_without_blocks = NULL;
+#endif
+ jemalloc_zone_introspect.reinit_lock = zone_reinit_lock;
+}
+
+static malloc_zone_t *
+zone_default_get(void) {
+ malloc_zone_t **zones = NULL;
+ unsigned int num_zones = 0;
+
+ /*
+ * On OSX 10.12, malloc_default_zone returns a special zone that is not
+ * present in the list of registered zones. That zone uses a "lite zone"
+ * if one is present (apparently enabled when malloc stack logging is
+ * enabled), or the first registered zone otherwise. In practice this
+ * means unless malloc stack logging is enabled, the first registered
+ * zone is the default. So get the list of zones to get the first one,
+ * instead of relying on malloc_default_zone.
+ */
+ if (KERN_SUCCESS != malloc_get_all_zones(0, NULL,
+ (vm_address_t**)&zones, &num_zones)) {
+ /*
+ * Reset the value in case the failure happened after it was
+ * set.
+ */
+ num_zones = 0;
+ }
+
+ if (num_zones) {
+ return zones[0];
+ }
+
+ return malloc_default_zone();
+}
+
+/* As written, this function can only promote jemalloc_zone. */
+static void
+zone_promote(void) {
+ malloc_zone_t *zone;
+
+ do {
+ /*
+ * Unregister and reregister the default zone. On OSX >= 10.6,
+ * unregistering takes the last registered zone and places it
+ * at the location of the specified zone. Unregistering the
+ * default zone thus makes the last registered one the default.
+ * On OSX < 10.6, unregistering shifts all registered zones.
+ * The first registered zone then becomes the default.
+ */
+ malloc_zone_unregister(default_zone);
+ malloc_zone_register(default_zone);
+
+ /*
+ * On OSX 10.6, having the default purgeable zone appear before
+ * the default zone makes some things crash because it thinks it
+ * owns the default zone allocated pointers. We thus
+ * unregister/re-register it in order to ensure it's always
+ * after the default zone. On OSX < 10.6, there is no purgeable
+ * zone, so this does nothing. On OSX >= 10.6, unregistering
+ * replaces the purgeable zone with the last registered zone
+ * above, i.e. the default zone. Registering it again then puts
+ * it at the end, obviously after the default zone.
+ */
+ if (purgeable_zone != NULL) {
+ malloc_zone_unregister(purgeable_zone);
+ malloc_zone_register(purgeable_zone);
+ }
+
+ zone = zone_default_get();
+ } while (zone != &jemalloc_zone);
+}
+
+JEMALLOC_ATTR(constructor)
+void
+zone_register(void) {
+ /*
+ * If something else replaced the system default zone allocator, don't
+ * register jemalloc's.
+ */
+ default_zone = zone_default_get();
+ if (!default_zone->zone_name || strcmp(default_zone->zone_name,
+ "DefaultMallocZone") != 0) {
+ return;
+ }
+
+ /*
+ * The default purgeable zone is created lazily by OSX's libc. It uses
+ * the default zone when it is created for "small" allocations
+ * (< 15 KiB), but assumes the default zone is a scalable_zone. This
+ * obviously fails when the default zone is the jemalloc zone, so
+ * malloc_default_purgeable_zone() is called beforehand so that the
+ * default purgeable zone is created when the default zone is still
+ * a scalable_zone. As purgeable zones only exist on >= 10.6, we need
+ * to check for the existence of malloc_default_purgeable_zone() at
+ * run time.
+ */
+ purgeable_zone = (malloc_default_purgeable_zone == NULL) ? NULL :
+ malloc_default_purgeable_zone();
+
+ /* Register the custom zone. At this point it won't be the default. */
+ zone_init();
+ malloc_zone_register(&jemalloc_zone);
+
+ /* Promote the custom zone to be default. */
+ zone_promote();
+}
diff --git a/contrib/kyua/doc/kyuafile.5.in b/contrib/kyua/doc/kyuafile.5.in
index ae1e4fe40e32..43f00816d407 100644
--- a/contrib/kyua/doc/kyuafile.5.in
+++ b/contrib/kyua/doc/kyuafile.5.in
@@ -290,6 +290,16 @@ it can run.
.Pp
ATF:
.Va require.files
+.It Va required_kmods
+Whitespace-separated list of kernel module names that the test requires to
+be loaded before it can run.
+This requirement checking is platform-dependent.
+It is ignored for a non-supported platform.
+Supported platforms:
+.Fx .
+.Pp
+ATF:
+.Va require.kmods
.It Va required_memory
Amount of physical memory that the test needs to run successfully.
.Pp
diff --git a/contrib/kyua/drivers/report_junit_test.cpp b/contrib/kyua/drivers/report_junit_test.cpp
index 0f009c6befd3..1c0929c0fef2 100644
--- a/contrib/kyua/drivers/report_junit_test.cpp
+++ b/contrib/kyua/drivers/report_junit_test.cpp
@@ -70,6 +70,7 @@ static const char* const default_metadata =
"required_configs is empty\n"
"required_disk_space = 0\n"
"required_files is empty\n"
+ "required_kmods is empty\n"
"required_memory = 0\n"
"required_programs is empty\n"
"required_user is empty\n"
@@ -89,6 +90,7 @@ static const char* const overriden_metadata =
"required_configs is empty\n"
"required_disk_space = 0\n"
"required_files is empty\n"
+ "required_kmods is empty\n"
"required_memory = 0\n"
"required_programs is empty\n"
"required_user is empty\n"
@@ -228,6 +230,7 @@ ATF_TEST_CASE_BODY(junit_metadata__overrides)
+ "required_configs = config1\n"
+ "required_disk_space = 456\n"
+ "required_files = file1\n"
+ + "required_kmods is empty\n"
+ "required_memory = 123\n"
+ "required_programs = prog1\n"
+ "required_user = root\n"
diff --git a/contrib/kyua/engine/atf_list.cpp b/contrib/kyua/engine/atf_list.cpp
index e0c4170605d1..5c74a80be913 100644
--- a/contrib/kyua/engine/atf_list.cpp
+++ b/contrib/kyua/engine/atf_list.cpp
@@ -133,10 +133,8 @@ engine::parse_atf_metadata(const model::properties_map& props)
mdbuilder.set_string("required_disk_space", value);
} else if (name == "require.files") {
mdbuilder.set_string("required_files", value);
-#ifdef __FreeBSD__
} else if (name == "require.kmods") {
mdbuilder.set_string("required_kmods", value);
-#endif
} else if (name == "require.machine") {
mdbuilder.set_string("allowed_platforms", value);
} else if (name == "require.memory") {
diff --git a/contrib/kyua/engine/requirements.cpp b/contrib/kyua/engine/requirements.cpp
index dff43e531a57..d5838b83f33a 100644
--- a/contrib/kyua/engine/requirements.cpp
+++ b/contrib/kyua/engine/requirements.cpp
@@ -41,10 +41,6 @@
#include "utils/sanity.hpp"
#include "utils/units.hpp"
-#ifdef __FreeBSD__
-#include <libutil.h>
-#endif
-
namespace config = utils::config;
namespace fs = utils::fs;
namespace passwd = utils::passwd;
@@ -224,26 +220,6 @@ check_required_programs(const model::paths_set& required_programs)
}
-#ifdef __FreeBSD__
-/// Checks if all required kmods are loaded.
-///
-/// \param required_programs Set of kmods.
-///
-/// \return Empty if the required kmods are all loaded or an error
-/// message otherwise.
-static std::string
-check_required_kmods(const model::strings_set& required_kmods)
-{
- for (model::strings_set::const_iterator iter = required_kmods.begin();
- iter != required_kmods.end(); iter++) {
- if (!kld_isloaded((*iter).c_str()))
- return F("Required kmod '%s' not loaded") % *iter;
- }
- return "";
-}
-#endif
-
-
/// Checks if the current system has the specified amount of memory.
///
/// \param required_memory Amount of required physical memory, or zero if not
@@ -289,9 +265,29 @@ check_required_disk_space(const units::bytes& required_disk_space,
}
+/// List of registered extra requirement checkers.
+///
+/// Use register_reqs_checker() to add an entry to this global list.
+static std::vector< std::shared_ptr< engine::reqs_checker > > _reqs_checkers;
+
+
} // anonymous namespace
+const std::vector< std::shared_ptr< engine::reqs_checker > >
+engine::reqs_checkers()
+{
+ return _reqs_checkers;
+}
+
+void
+engine::register_reqs_checker(
+ const std::shared_ptr< engine::reqs_checker > checker)
+{
+ _reqs_checkers.push_back(checker);
+}
+
+
/// Checks if all the requirements specified by the test case are met.
///
/// \param md The test metadata.
@@ -336,12 +332,6 @@ engine::check_reqs(const model::metadata& md, const config::tree& cfg,
if (!reason.empty())
return reason;
-#ifdef __FreeBSD__
- reason = check_required_kmods(md.required_kmods());
- if (!reason.empty())
- return reason;
-#endif
-
reason = check_required_memory(md.required_memory());
if (!reason.empty())
return reason;
@@ -351,6 +341,13 @@ engine::check_reqs(const model::metadata& md, const config::tree& cfg,
if (!reason.empty())
return reason;
+ // Iterate over extra checkers registered.
+ for (auto& checker : engine::reqs_checkers()) {
+ reason = checker->exec(md, cfg, test_suite, work_directory);
+ if (!reason.empty())
+ return reason;
+ }
+
INV(reason.empty());
return reason;
}
diff --git a/contrib/kyua/engine/requirements.hpp b/contrib/kyua/engine/requirements.hpp
index a36a938b3034..92e80c5122aa 100644
--- a/contrib/kyua/engine/requirements.hpp
+++ b/contrib/kyua/engine/requirements.hpp
@@ -44,6 +44,32 @@ namespace engine {
std::string check_reqs(const model::metadata&, const utils::config::tree&,
const std::string&, const utils::fs::path&);
+/// Abstract interface of a requirement checker.
+class reqs_checker {
+public:
+ /// Constructor.
+ reqs_checker() {}
+
+ /// Destructor.
+ virtual ~reqs_checker() {}
+
+ /// Run the checker.
+ virtual std::string exec(const model::metadata&,
+ const utils::config::tree&,
+ const std::string&,
+ const utils::fs::path&) const = 0;
+};
+
+/// Register an extra requirement checker.
+///
+/// \param checker A requirement checker.
+void register_reqs_checker(const std::shared_ptr< reqs_checker > checker);
+
+/// Returns the list of registered extra requirement checkers.
+///
+/// \return A vector of pointers to extra requirement checkers.
+const std::vector< std::shared_ptr< reqs_checker > > reqs_checkers();
+
} // namespace engine
diff --git a/contrib/kyua/integration/cmd_report_junit_test.sh b/contrib/kyua/integration/cmd_report_junit_test.sh
index d86228acf7e5..49b8c5790167 100644
--- a/contrib/kyua/integration/cmd_report_junit_test.sh
+++ b/contrib/kyua/integration/cmd_report_junit_test.sh
@@ -104,6 +104,7 @@ is_exclusive = false
required_configs is empty
required_disk_space = 0
required_files is empty
+required_kmods is empty
required_memory = 0
required_programs is empty
required_user is empty
@@ -144,6 +145,7 @@ is_exclusive = false
required_configs is empty
required_disk_space = 0
required_files is empty
+required_kmods is empty
required_memory = 0
required_programs is empty
required_user is empty
@@ -222,6 +224,7 @@ is_exclusive = false
required_configs is empty
required_disk_space = 0
required_files is empty
+required_kmods is empty
required_memory = 0
required_programs is empty
required_user is empty
@@ -262,6 +265,7 @@ is_exclusive = false
required_configs is empty
required_disk_space = 0
required_files is empty
+required_kmods is empty
required_memory = 0
required_programs is empty
required_user is empty
diff --git a/contrib/kyua/integration/cmd_report_test.sh b/contrib/kyua/integration/cmd_report_test.sh
index 8b2b97f9cb4a..1fc1932d3c47 100644
--- a/contrib/kyua/integration/cmd_report_test.sh
+++ b/contrib/kyua/integration/cmd_report_test.sh
@@ -258,6 +258,7 @@ Metadata:
required_configs is empty
required_disk_space = 0
required_files is empty
+ required_kmods is empty
required_memory = 0
required_programs is empty
required_user is empty
diff --git a/contrib/kyua/model/metadata.cpp b/contrib/kyua/model/metadata.cpp
index a5a9a1315964..afb31435a238 100644
--- a/contrib/kyua/model/metadata.cpp
+++ b/contrib/kyua/model/metadata.cpp
@@ -256,9 +256,7 @@ init_tree(config::tree& tree)
tree.define< bytes_node >("required_disk_space");
tree.define< paths_set_node >("required_files");
tree.define< bytes_node >("required_memory");
-#ifdef __FreeBSD__
tree.define< config::strings_set_node >("required_kmods");
-#endif
tree.define< paths_set_node >("required_programs");
tree.define< user_node >("required_user");
tree.define< delta_node >("timeout");
@@ -285,9 +283,7 @@ set_defaults(config::tree& tree)
tree.set< bytes_node >("required_disk_space", units::bytes(0));
tree.set< paths_set_node >("required_files", model::paths_set());
tree.set< bytes_node >("required_memory", units::bytes(0));
-#ifdef __FreeBSD__
tree.set< config::strings_set_node >("required_kmods", model::strings_set());
-#endif
tree.set< paths_set_node >("required_programs", model::paths_set());
tree.set< user_node >("required_user", "");
// TODO(jmmv): We shouldn't be setting a default timeout like this. See
@@ -603,20 +599,20 @@ model::metadata::required_memory(void) const
}
-#ifdef __FreeBSD__
-/// Returns the list of kmods needed by the test.
+/// Returns the list of kernel modules needed by the test.
///
-/// \return Set of strings.
+/// \return Set of kernel module names.
const model::strings_set&
model::metadata::required_kmods(void) const
{
if (_pimpl->props.is_set("required_kmods")) {
- return _pimpl->props.lookup< config::strings_set_node >("required_kmods");
+ return _pimpl->props.lookup< config::strings_set_node >(
+ "required_kmods");
} else {
- return get_defaults().lookup< config::strings_set_node >("required_kmods");
+ return get_defaults().lookup< config::strings_set_node >(
+ "required_kmods");
}
}
-#endif
/// Returns the list of programs needed by the test.
diff --git a/contrib/kyua/model/metadata.hpp b/contrib/kyua/model/metadata.hpp
index 8af6c7c161af..eee7eaf0f7c4 100644
--- a/contrib/kyua/model/metadata.hpp
+++ b/contrib/kyua/model/metadata.hpp
@@ -76,9 +76,7 @@ public:
const utils::units::bytes& required_disk_space(void) const;
const paths_set& required_files(void) const;
const utils::units::bytes& required_memory(void) const;
-#ifdef __FreeBSD__
const strings_set& required_kmods(void) const;
-#endif
const paths_set& required_programs(void) const;
const std::string& required_user(void) const;
const utils::datetime::delta& timeout(void) const;
@@ -124,9 +122,7 @@ public:
metadata_builder& set_required_disk_space(const utils::units::bytes&);
metadata_builder& set_required_files(const paths_set&);
metadata_builder& set_required_memory(const utils::units::bytes&);
-#ifdef __FreeBSD__
metadata_builder& set_required_kmods(const strings_set&);
-#endif
metadata_builder& set_required_programs(const paths_set&);
metadata_builder& set_required_user(const std::string&);
metadata_builder& set_string(const std::string&, const std::string&);
diff --git a/contrib/kyua/model/metadata_test.cpp b/contrib/kyua/model/metadata_test.cpp
index b4c3dff5b029..bdb1d3655c33 100644
--- a/contrib/kyua/model/metadata_test.cpp
+++ b/contrib/kyua/model/metadata_test.cpp
@@ -57,6 +57,7 @@ ATF_TEST_CASE_BODY(defaults)
ATF_REQUIRE(md.required_configs().empty());
ATF_REQUIRE_EQ(units::bytes(0), md.required_disk_space());
ATF_REQUIRE(md.required_files().empty());
+ ATF_REQUIRE(md.required_kmods().empty());
ATF_REQUIRE_EQ(units::bytes(0), md.required_memory());
ATF_REQUIRE(md.required_programs().empty());
ATF_REQUIRE(md.required_user().empty());
@@ -322,6 +323,7 @@ ATF_TEST_CASE_BODY(to_properties)
props["required_configs"] = "";
props["required_disk_space"] = "0";
props["required_files"] = "bar foo";
+ props["required_kmods"] = "";
props["required_memory"] = "1.00K";
props["required_programs"] = "";
props["required_user"] = "";
@@ -412,7 +414,7 @@ ATF_TEST_CASE_BODY(output__defaults)
"has_cleanup='false', is_exclusive='false', "
"required_configs='', "
"required_disk_space='0', required_files='', "
- "required_memory='0', "
+ "required_kmods='', required_memory='0', "
"required_programs='', required_user='', timeout='300'}",
str.str());
}
@@ -435,7 +437,7 @@ ATF_TEST_CASE_BODY(output__some_values)
"has_cleanup='false', is_exclusive='true', "
"required_configs='', "
"required_disk_space='0', required_files='bar foo', "
- "required_memory='1.00K', "
+ "required_kmods='', required_memory='1.00K', "
"required_programs='', required_user='', timeout='300'}",
str.str());
}
diff --git a/contrib/kyua/model/test_case_test.cpp b/contrib/kyua/model/test_case_test.cpp
index 1e2597d1501e..29df7ee35863 100644
--- a/contrib/kyua/model/test_case_test.cpp
+++ b/contrib/kyua/model/test_case_test.cpp
@@ -204,7 +204,7 @@ ATF_TEST_CASE_BODY(test_case__output)
"has_cleanup='false', "
"is_exclusive='false', "
"required_configs='', required_disk_space='0', required_files='', "
- "required_memory='0', "
+ "required_kmods='', required_memory='0', "
"required_programs='', required_user='', timeout='300'}}",
str.str());
}
diff --git a/contrib/kyua/model/test_program_test.cpp b/contrib/kyua/model/test_program_test.cpp
index ddfbc430387c..f7a84d770fc0 100644
--- a/contrib/kyua/model/test_program_test.cpp
+++ b/contrib/kyua/model/test_program_test.cpp
@@ -547,7 +547,7 @@ check_output__no_test_cases(void)
"description='', execenv='', execenv_jail_params='', "
"has_cleanup='false', is_exclusive='false', "
"required_configs='', required_disk_space='0', required_files='', "
- "required_memory='0', "
+ "required_kmods='', required_memory='0', "
"required_programs='', required_user='', timeout='300'}, "
"test_cases=map()}",
str.str());
@@ -597,7 +597,7 @@ check_output__some_test_cases(void)
"description='', execenv='', execenv_jail_params='', "
"has_cleanup='false', is_exclusive='false', "
"required_configs='', required_disk_space='0', required_files='', "
- "required_memory='0', "
+ "required_kmods='', required_memory='0', "
"required_programs='', required_user='', timeout='300'}, "
"test_cases=map("
"another-name=test_case{name='another-name', "
@@ -605,14 +605,14 @@ check_output__some_test_cases(void)
"description='', execenv='', execenv_jail_params='', "
"has_cleanup='false', is_exclusive='false', "
"required_configs='', required_disk_space='0', required_files='', "
- "required_memory='0', "
+ "required_kmods='', required_memory='0', "
"required_programs='', required_user='', timeout='300'}}, "
"the-name=test_case{name='the-name', "
"metadata=metadata{allowed_architectures='a', allowed_platforms='foo', "
"custom.bar='baz', description='', execenv='', execenv_jail_params='', "
"has_cleanup='false', is_exclusive='false', "
"required_configs='', required_disk_space='0', required_files='', "
- "required_memory='0', "
+ "required_kmods='', required_memory='0', "
"required_programs='', required_user='', timeout='300'}})}",
str.str());
}
diff --git a/contrib/kyua/os/freebsd/main.cpp b/contrib/kyua/os/freebsd/main.cpp
index 13e5dcf0e023..700284b64b78 100644
--- a/contrib/kyua/os/freebsd/main.cpp
+++ b/contrib/kyua/os/freebsd/main.cpp
@@ -31,6 +31,9 @@
#include "engine/execenv/execenv.hpp"
#include "os/freebsd/execenv_jail_manager.hpp"
+#include "engine/requirements.hpp"
+#include "os/freebsd/reqs_checker_kmods.hpp"
+
namespace execenv = engine::execenv;
/// FreeBSD related features initialization.
@@ -50,5 +53,13 @@ freebsd::main(const int, const char* const* const)
std::shared_ptr< execenv::manager >(new freebsd::execenv_jail_manager())
);
+#ifdef __FreeBSD__
+ engine::register_reqs_checker(
+ std::shared_ptr< engine::reqs_checker >(
+ new freebsd::reqs_checker_kmods()
+ )
+ );
+#endif
+
return 0;
}
diff --git a/contrib/kyua/os/freebsd/reqs_checker_kmods.cpp b/contrib/kyua/os/freebsd/reqs_checker_kmods.cpp
new file mode 100644
index 000000000000..3ae3446a7815
--- /dev/null
+++ b/contrib/kyua/os/freebsd/reqs_checker_kmods.cpp
@@ -0,0 +1,50 @@
+// Copyright 2025 The Kyua Authors.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors
+// may be used to endorse or promote products derived from this software
+// without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "os/freebsd/reqs_checker_kmods.hpp"
+
+#include "model/metadata.hpp"
+
+extern "C" {
+#include "libutil.h"
+}
+
+std::string
+freebsd::reqs_checker_kmods::exec(const model::metadata& md,
+ const utils::config::tree&,
+ const std::string&,
+ const utils::fs::path&) const
+{
+ std::string reason = "";
+ for (auto& kmod : md.required_kmods())
+ if (!::kld_isloaded((kmod).c_str()))
+ reason += " " + kmod;
+ if (!reason.empty())
+ reason = "Required kmods are not loaded:" + reason + ".";
+ return reason;
+}
diff --git a/contrib/kyua/os/freebsd/reqs_checker_kmods.hpp b/contrib/kyua/os/freebsd/reqs_checker_kmods.hpp
new file mode 100644
index 000000000000..8c7c69e35d07
--- /dev/null
+++ b/contrib/kyua/os/freebsd/reqs_checker_kmods.hpp
@@ -0,0 +1,54 @@
+// Copyright 2025 The Kyua Authors.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors
+// may be used to endorse or promote products derived from this software
+// without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/// \file os/freebsd/reqs_checker_kmods.hpp
+/// FreeBSD kernel module requirement checker.
+
+#if !defined(FREEBSD_REQS_CHECKER_KMODS)
+#define FREEBSD_REQS_CHECKER_KMODS
+
+#include "engine/requirements.hpp"
+#include "model/metadata_fwd.hpp"
+#include "utils/config/tree_fwd.hpp"
+#include "utils/fs/path_fwd.hpp"
+
+namespace freebsd {
+
+
+class reqs_checker_kmods : public engine::reqs_checker {
+public:
+ std::string exec(const model::metadata&,
+ const utils::config::tree&,
+ const std::string&,
+ const utils::fs::path&) const;
+};
+
+
+} // namespace freebsd
+
+#endif // !defined(FREEBSD_REQS_CHECKER_KMODS)
diff --git a/contrib/lua/Makefile b/contrib/lua/Makefile
index 8efa2eb3fdd6..6e21588476df 100644
--- a/contrib/lua/Makefile
+++ b/contrib/lua/Makefile
@@ -46,7 +46,7 @@ TO_MAN= lua.1 luac.1
# Lua version and release.
V= 5.4
-R= $V.6
+R= $V.8
# Targets start here.
all: $(PLAT)
diff --git a/contrib/lua/README b/contrib/lua/README
index 1ae97165babe..b251d296f687 100644
--- a/contrib/lua/README
+++ b/contrib/lua/README
@@ -1,5 +1,5 @@
-This is Lua 5.4.6, released on 02 May 2023.
+This is Lua 5.4.8, released on 21 May 2025.
For installation instructions, license details, and
further information about Lua, see doc/readme.html.
diff --git a/contrib/lua/doc/contents.html b/contrib/lua/doc/contents.html
index 1231e6d2481d..18b677dbac8f 100644
--- a/contrib/lua/doc/contents.html
+++ b/contrib/lua/doc/contents.html
@@ -10,7 +10,7 @@
<BODY>
<H1>
-<A HREF="http://www.lua.org/"><IMG SRC="logo.gif" ALT="Lua"></A>
+<A HREF="https://www.lua.org/"><IMG SRC="logo.gif" ALT="Lua"></A>
Lua 5.4 Reference Manual
</H1>
@@ -18,7 +18,7 @@ Lua 5.4 Reference Manual
The reference manual is the official definition of the Lua language.
<BR>
For a complete introduction to Lua programming, see the book
-<A HREF="http://www.lua.org/pil/">Programming in Lua</A>.
+<A HREF="https://www.lua.org/pil/">Programming in Lua</A>.
<DIV CLASS="menubar">
<A HREF="manual.html">start</A>
@@ -27,14 +27,14 @@ For a complete introduction to Lua programming, see the book
&middot;
<A HREF="#index">index</A>
&middot;
-<A HREF="http://www.lua.org/manual/">other versions</A>
+<A HREF="https://www.lua.org/manual/">other versions</A>
</DIV>
<P>
<SMALL>
-Copyright &copy; 2020&ndash;2023 Lua.org, PUC-Rio.
+Copyright &copy; 2020&ndash;2025 Lua.org, PUC-Rio.
Freely available under the terms of the
-<A HREF="http://www.lua.org/license.html">Lua license</A>.
+<A HREF="https://www.lua.org/license.html">Lua license</A>.
</SMALL>
<H2><A NAME="contents">Contents</A></H2>
@@ -668,10 +668,10 @@ Freely available under the terms of the
<P CLASS="footer">
Last update:
-Sat Apr 1 17:57:05 UTC 2023
+Wed May 21 21:11:33 UTC 2025
</P>
<!--
-Last change: revised for Lua 5.4.5
+Last change: revised for Lua 5.4.8
-->
</BODY>
diff --git a/contrib/lua/doc/lua.1 b/contrib/lua/doc/lua.1
index 3f472fd81f62..3c9e000234e3 100644
--- a/contrib/lua/doc/lua.1
+++ b/contrib/lua/doc/lua.1
@@ -1,5 +1,5 @@
-.\" $Id: lua.man,v 1.14 2022/09/23 09:06:36 lhf Exp $
-.TH LUA 1 "$Date: 2022/09/23 09:06:36 $"
+.\" $Id: lua.man,v 1.14 2024/05/08 18:48:27 lhf Exp $
+.TH LUA 1 "$Date: 2024/05/08 18:48:27 $"
.SH NAME
lua \- Lua interpreter
.SH SYNOPSIS
@@ -123,7 +123,7 @@ and the version-neutral variants are ignored.
Code to be executed before command line options and scripts.
.TP
.B LUA_PATH, LUA_PATH_5_4
-Initial value of package.cpath,
+Initial value of package.path,
the path used by require to search for Lua loaders.
.TP
.B LUA_CPATH, LUA_CPATH_5_4
diff --git a/contrib/lua/doc/lua.css b/contrib/lua/doc/lua.css
index cbd0799d1525..9013b445c654 100644
--- a/contrib/lua/doc/lua.css
+++ b/contrib/lua/doc/lua.css
@@ -143,6 +143,7 @@ table.book td.cover {
table.book img {
border: solid #000080 1px ;
+ border-radius: 2px ;
}
table.book span {
diff --git a/contrib/lua/doc/manual.html b/contrib/lua/doc/manual.html
index 0af688b343c7..8239bc2a964f 100644
--- a/contrib/lua/doc/manual.html
+++ b/contrib/lua/doc/manual.html
@@ -10,7 +10,7 @@
<BODY>
<H1>
-<A HREF="http://www.lua.org/"><IMG SRC="logo.gif" ALT="Lua"></A>
+<A HREF="https://www.lua.org/"><IMG SRC="logo.gif" ALT="Lua"></A>
Lua 5.4 Reference Manual
</H1>
@@ -19,9 +19,9 @@ by Roberto Ierusalimschy, Luiz Henrique de Figueiredo, Waldemar Celes
<P>
<SMALL>
-Copyright &copy; 2020&ndash;2023 Lua.org, PUC-Rio.
+Copyright &copy; 2020&ndash;2025 Lua.org, PUC-Rio.
Freely available under the terms of the
-<a href="http://www.lua.org/license.html">Lua license</a>.
+<a href="https://www.lua.org/license.html">Lua license</a>.
</SMALL>
<DIV CLASS="menubar">
@@ -29,7 +29,7 @@ Freely available under the terms of the
&middot;
<A HREF="contents.html#index">index</A>
&middot;
-<A HREF="http://www.lua.org/manual/">other versions</A>
+<A HREF="https://www.lua.org/manual/">other versions</A>
</DIV>
<!-- ====================================================================== -->
@@ -391,7 +391,7 @@ Whenever there is an error,
an <em>error object</em>
is propagated with information about the error.
Lua itself only generates errors whose error object is a string,
-but programs may generate errors with
+but programs can generate errors with
any value as the error object.
It is up to the Lua program or its host to handle such error objects.
For historical reasons,
@@ -401,7 +401,7 @@ even though it does not have to be a string.
<p>
When you use <a href="#pdf-xpcall"><code>xpcall</code></a> (or <a href="#lua_pcall"><code>lua_pcall</code></a>, in C)
-you may give a <em>message handler</em>
+you can give a <em>message handler</em>
to be called in case of errors.
This function is called with the original error object
and returns a new error object.
@@ -453,7 +453,7 @@ which is then called a <em>metamethod</em>.
In the previous example, the key is the string "<code>__add</code>"
and the metamethod is the function that performs the addition.
Unless stated otherwise,
-a metamethod may in fact be any callable value,
+a metamethod can in fact be any callable value,
which is either a function or a value with a <code>__call</code> metamethod.
@@ -1725,7 +1725,7 @@ labels in Lua are considered statements too:
<p>
A label is visible in the entire block where it is defined,
except inside nested functions.
-A goto may jump to any visible label as long as it does not
+A goto can jump to any visible label as long as it does not
enter into the scope of a local variable.
A label should not be declared
where a label with the same name is visible,
@@ -5571,7 +5571,7 @@ otherwise, returns <code>NULL</code>.
<hr><h3><a name="lua_toclose"><code>lua_toclose</code></a></h3><p>
-<span class="apii">[-0, +0, <em>m</em>]</span>
+<span class="apii">[-0, +0, <em>v</em>]</span>
<pre>void lua_toclose (lua_State *L, int index);</pre>
<p>
@@ -5592,6 +5592,11 @@ unless previously deactivated by <a href="#lua_closeslot"><code>lua_closeslot</c
<p>
+This function raises an error if the value at the given slot
+neither has a <code>__close</code> metamethod nor is a false value.
+
+
+<p>
This function should not be called for an index
that is equal to or below an active to-be-closed slot.
@@ -5664,6 +5669,12 @@ after its last character (as in&nbsp;C),
but can contain other zeros in its body.
+<p>
+This function can raise memory errors only
+when converting a number to a string
+(as then it may create a new string).
+
+
@@ -11276,13 +11287,13 @@ The returned table can contain all the fields returned by <a href="#lua_getinfo"
with the string <code>what</code> describing which fields to fill in.
The default for <code>what</code> is to get all information available,
except the table of valid lines.
-If present,
-the option '<code>f</code>'
+The option '<code>f</code>'
adds a field named <code>func</code> with the function itself.
-If present,
-the option '<code>L</code>'
-adds a field named <code>activelines</code> with the table of
-valid lines.
+The option '<code>L</code>' adds a field named <code>activelines</code>
+with the table of valid lines,
+provided the function is a Lua function.
+If the function has no debug information,
+the table is empty.
<p>
@@ -11619,6 +11630,10 @@ Lua does not consult any environment variables.
In particular,
the values of <a href="#pdf-package.path"><code>package.path</code></a> and <a href="#pdf-package.cpath"><code>package.cpath</code></a>
are set with the default paths defined in <code>luaconf.h</code>.
+To signal to the libraries that this option is on,
+the stand-alone interpreter sets the field
+<code>"LUA_NOENV"</code> in the registry to a true value.
+Other libraries may consult this field for the same purpose.
<p>
@@ -12033,13 +12048,12 @@ and LiteralString, see <a href="#3.1">&sect;3.1</a>.)
-
<P CLASS="footer">
Last update:
-Tue May 2 20:09:38 UTC 2023
+Wed May 21 21:09:59 UTC 2025
</P>
<!--
-Last change: revised for Lua 5.4.6
+Last change: revised for Lua 5.4.8
-->
</body></html>
diff --git a/contrib/lua/doc/readme.html b/contrib/lua/doc/readme.html
index 918ec8ed9378..a4eb59dd38c6 100644
--- a/contrib/lua/doc/readme.html
+++ b/contrib/lua/doc/readme.html
@@ -29,7 +29,7 @@ tt, kbd, code {
<BODY>
<H1>
-<A HREF="http://www.lua.org/"><IMG SRC="logo.gif" ALT="Lua"></A>
+<A HREF="https://www.lua.org/"><IMG SRC="logo.gif" ALT="Lua"></A>
Welcome to Lua 5.4
</H1>
@@ -49,29 +49,31 @@ Welcome to Lua 5.4
<P>
Lua is a powerful, efficient, lightweight, embeddable scripting language
developed by a
-<A HREF="http://www.lua.org/authors.html">team</A>
+<A HREF="https://www.lua.org/authors.html">team</A>
at
-<A HREF="http://www.puc-rio.br/">PUC-Rio</A>,
+<A HREF="https://www.puc-rio.br/">PUC-Rio</A>,
the Pontifical Catholic University of Rio de Janeiro in Brazil.
Lua is
<A HREF="#license">free software</A>
used in
-<A HREF="http://www.lua.org/uses.html">many products and projects</A>
+<A HREF="https://www.lua.org/uses.html">many products and projects</A>
around the world.
<P>
Lua's
-<A HREF="http://www.lua.org/">official web site</A>
+<A HREF="https://www.lua.org/">official website</A>
provides complete information
about Lua,
including
an
-<A HREF="http://www.lua.org/about.html">executive summary</A>
+<A HREF="https://www.lua.org/about.html">executive summary</A>,
+tips on
+<A HREF="https://www.lua.org/start.html">getting started</A>,
and
updated
-<A HREF="http://www.lua.org/docs.html">documentation</A>,
+<A HREF="https://www.lua.org/docs.html">documentation</A>,
especially the
-<A HREF="http://www.lua.org/manual/5.4/">reference manual</A>,
+<A HREF="https://www.lua.org/manual/5.4/">reference manual</A>,
which may differ slightly from the
<A HREF="contents.html">local copy</A>
distributed in this package.
@@ -79,7 +81,7 @@ distributed in this package.
<H2><A NAME="install">Installing Lua</A></H2>
<P>
Lua is distributed in
-<A HREF="http://www.lua.org/ftp/">source</A>
+<A HREF="https://www.lua.org/ftp/">source</A>
form.
You need to build it before using it.
Building Lua should be straightforward
@@ -88,7 +90,7 @@ Lua is implemented in pure ANSI C and compiles unmodified in all known
platforms that have an ANSI C compiler.
Lua also compiles unmodified as C++.
The instructions given below for building Lua are for Unix-like platforms,
-such as Linux and Mac OS X.
+such as Linux and macOS.
See also
<A HREF="#other">instructions for other systems</A>
and
@@ -97,7 +99,7 @@ and
<P>
If you don't have the time or the inclination to compile Lua yourself,
get a binary from
-<A HREF="http://lua-users.org/wiki/LuaBinaries">LuaBinaries</A>.
+<A HREF="https://luabinaries.sourceforge.net">LuaBinaries</A>.
<H3>Building Lua</H3>
<P>
@@ -107,7 +109,7 @@ Here are the details.
<OL>
<LI>
Open a terminal window and move to
-the top-level directory, which is named <TT>lua-5.4.6</TT>.
+the top-level directory, which is named <TT>lua-5.4.8</TT>.
The <TT>Makefile</TT> there controls both the build process and the installation process.
<P>
<LI>
@@ -211,8 +213,8 @@ then try "<KBD>make linux-readline MYLIBS=-ltermcap</KBD>".
record the changes you've made.
<P>
- On the other hand, if you need to customize some Lua features, you'll need
- to edit <TT>src/luaconf.h</TT> before building and installing Lua.
+ On the other hand, if you need to customize some Lua features,
+ edit <TT>src/luaconf.h</TT> before building and installing Lua.
The edited file will be the one installed, and
it will be used by any Lua clients that you build, to ensure consistency.
Further customization is available to experts by editing the Lua sources.
@@ -241,7 +243,7 @@ compiler:
</DL>
<P>
- To use Lua as a library in your own programs, you'll need to know how to
+ To use Lua as a library in your own programs, you need to know how to
create and use libraries with your compiler. Moreover, to dynamically load
C libraries for Lua, you'll need to know how to create dynamic libraries
and you'll need to make sure that the Lua API functions are accessible to
@@ -284,11 +286,11 @@ lists the
<H2><A NAME="license">License</A></H2>
<P>
-<A HREF="http://www.opensource.org/docs/definition.php">
-<IMG SRC="osi-certified-72x60.png" ALIGN="right" ALT="[osi certified]" STYLE="padding-left: 30px ;">
+<A HREF="https://opensource.org/osd">
+<IMG SRC="OSIApproved_100X125.png" ALIGN="right" ALT="[Open Source Initiative Approved License]" STYLE="padding-left: 1em" WIDTH=50>
</A>
Lua is free software distributed under the terms of the
-<A HREF="http://www.opensource.org/licenses/mit-license.html">MIT license</A>
+<A HREF="https://opensource.org/license/mit">MIT license</A>
reproduced below;
it may be used for any purpose, including commercial purposes,
at absolutely no cost without having to ask us.
@@ -296,11 +298,11 @@ at absolutely no cost without having to ask us.
The only requirement is that if you do use Lua,
then you should give us credit by including the appropriate copyright notice somewhere in your product or its documentation.
-For details, see
-<A HREF="http://www.lua.org/license.html">this</A>.
+For details, see the
+<A HREF="https://www.lua.org/license.html">license page</A>.
<BLOCKQUOTE STYLE="padding-bottom: 0em">
-Copyright &copy; 1994&ndash;2023 Lua.org, PUC-Rio.
+Copyright &copy; 1994&ndash;2025 Lua.org, PUC-Rio.
<P>
Permission is hereby granted, free of charge, to any person obtaining a copy
@@ -327,10 +329,10 @@ THE SOFTWARE.
<P CLASS="footer">
Last update:
-Tue May 2 20:08:55 UTC 2023
+Wed May 21 21:12:01 UTC 2025
</P>
<!--
-Last change: revised for Lua 5.4.6
+Last change: revised for Lua 5.4.8
-->
</BODY>
diff --git a/contrib/lua/src/lapi.c b/contrib/lua/src/lapi.c
index 34e64af1428c..04e09cff7e0d 100644
--- a/contrib/lua/src/lapi.c
+++ b/contrib/lua/src/lapi.c
@@ -417,9 +417,9 @@ LUA_API const char *lua_tolstring (lua_State *L, int idx, size_t *len) {
o = index2value(L, idx); /* previous call may reallocate the stack */
}
if (len != NULL)
- *len = vslen(o);
+ *len = tsslen(tsvalue(o));
lua_unlock(L);
- return svalue(o);
+ return getstr(tsvalue(o));
}
@@ -1343,7 +1343,7 @@ void lua_warning (lua_State *L, const char *msg, int tocont) {
LUA_API void *lua_newuserdatauv (lua_State *L, size_t size, int nuvalue) {
Udata *u;
lua_lock(L);
- api_check(L, 0 <= nuvalue && nuvalue < USHRT_MAX, "invalid value");
+ api_check(L, 0 <= nuvalue && nuvalue < SHRT_MAX, "invalid value");
u = luaS_newudata(L, size, nuvalue);
setuvalue(L, s2v(L->top.p), u);
api_incr_top(L);
diff --git a/contrib/lua/src/lauxlib.c b/contrib/lua/src/lauxlib.c
index 4ca6c6548899..923105ed3176 100644
--- a/contrib/lua/src/lauxlib.c
+++ b/contrib/lua/src/lauxlib.c
@@ -80,6 +80,7 @@ static int pushglobalfuncname (lua_State *L, lua_Debug *ar) {
int top = lua_gettop(L);
lua_getinfo(L, "f", ar); /* push function */
lua_getfield(L, LUA_REGISTRYINDEX, LUA_LOADED_TABLE);
+ luaL_checkstack(L, 6, "not enough stack"); /* slots for 'findfield' */
if (findfield(L, top + 1, 2)) {
const char *name = lua_tostring(L, -1);
if (strncmp(name, LUA_GNAME ".", 3) == 0) { /* name start with '_G.'? */
@@ -249,11 +250,13 @@ LUALIB_API int luaL_fileresult (lua_State *L, int stat, const char *fname) {
return 1;
}
else {
+ const char *msg;
luaL_pushfail(L);
+ msg = (en != 0) ? strerror(en) : "(no extra info)";
if (fname)
- lua_pushfstring(L, "%s: %s", fname, strerror(en));
+ lua_pushfstring(L, "%s: %s", fname, msg);
else
- lua_pushstring(L, strerror(en));
+ lua_pushstring(L, msg);
lua_pushinteger(L, en);
return 3;
}
@@ -732,9 +735,12 @@ static const char *getF (lua_State *L, void *ud, size_t *size) {
static int errfile (lua_State *L, const char *what, int fnameindex) {
- const char *serr = strerror(errno);
+ int err = errno;
const char *filename = lua_tostring(L, fnameindex) + 1;
- lua_pushfstring(L, "cannot %s %s: %s", what, filename, serr);
+ if (err != 0)
+ lua_pushfstring(L, "cannot %s %s: %s", what, filename, strerror(err));
+ else
+ lua_pushfstring(L, "cannot %s %s", what, filename);
lua_remove(L, fnameindex);
return LUA_ERRFILE;
}
@@ -787,6 +793,7 @@ LUALIB_API int luaL_loadfilex (lua_State *L, const char *filename,
}
else {
lua_pushfstring(L, "@%s", filename);
+ errno = 0;
lf.f = fopen(filename, "r");
if (lf.f == NULL) return errfile(L, "open", fnameindex);
}
@@ -796,6 +803,7 @@ LUALIB_API int luaL_loadfilex (lua_State *L, const char *filename,
if (c == LUA_SIGNATURE[0]) { /* binary file? */
lf.n = 0; /* remove possible newline */
if (filename) { /* "real" file? */
+ errno = 0;
lf.f = freopen(filename, "rb", lf.f); /* reopen in binary mode */
if (lf.f == NULL) return errfile(L, "reopen", fnameindex);
skipcomment(lf.f, &c); /* re-read initial portion */
@@ -803,6 +811,7 @@ LUALIB_API int luaL_loadfilex (lua_State *L, const char *filename,
}
if (c != EOF)
lf.buff[lf.n++] = c; /* 'c' is the first character of the stream */
+ errno = 0;
status = lua_load(L, getF, &lf, lua_tostring(L, -1), mode);
readstatus = ferror(lf.f);
if (filename) fclose(lf.f); /* close file (even in case of errors) */
@@ -933,7 +942,7 @@ LUALIB_API const char *luaL_tolstring (lua_State *L, int idx, size_t *len) {
LUALIB_API void luaL_setfuncs (lua_State *L, const luaL_Reg *l, int nup) {
luaL_checkstack(L, nup, "too many upvalues");
for (; l->name != NULL; l++) { /* fill the table with given functions */
- if (l->func == NULL) /* place holder? */
+ if (l->func == NULL) /* placeholder? */
lua_pushboolean(L, 0);
else {
int i;
@@ -1025,9 +1034,14 @@ static void *l_alloc (void *ud, void *ptr, size_t osize, size_t nsize) {
}
+/*
+** Standard panic funcion just prints an error message. The test
+** with 'lua_type' avoids possible memory errors in 'lua_tostring'.
+*/
static int panic (lua_State *L) {
- const char *msg = lua_tostring(L, -1);
- if (msg == NULL) msg = "error object is not a string";
+ const char *msg = (lua_type(L, -1) == LUA_TSTRING)
+ ? lua_tostring(L, -1)
+ : "error object is not a string";
lua_writestringerror("PANIC: unprotected error in call to Lua API (%s)\n",
msg);
return 0; /* return to Lua to abort */
diff --git a/contrib/lua/src/lcode.c b/contrib/lua/src/lcode.c
index 8d6ce8c08bd2..85466a82ee1d 100644
--- a/contrib/lua/src/lcode.c
+++ b/contrib/lua/src/lcode.c
@@ -35,6 +35,7 @@
#define MAXREGS 255
+/* (note that expressions VJMP also have jumps.) */
#define hasjumps(e) ((e)->t != (e)->f)
@@ -415,7 +416,7 @@ int luaK_codeABx (FuncState *fs, OpCode o, int a, unsigned int bc) {
/*
** Format and emit an 'iAsBx' instruction.
*/
-int luaK_codeAsBx (FuncState *fs, OpCode o, int a, int bc) {
+static int codeAsBx (FuncState *fs, OpCode o, int a, int bc) {
unsigned int b = bc + OFFSET_sBx;
lua_assert(getOpMode(o) == iAsBx);
lua_assert(a <= MAXARG_A && b <= MAXARG_Bx);
@@ -678,7 +679,7 @@ static int fitsBx (lua_Integer i) {
void luaK_int (FuncState *fs, int reg, lua_Integer i) {
if (fitsBx(i))
- luaK_codeAsBx(fs, OP_LOADI, reg, cast_int(i));
+ codeAsBx(fs, OP_LOADI, reg, cast_int(i));
else
luaK_codek(fs, reg, luaK_intK(fs, i));
}
@@ -687,7 +688,7 @@ void luaK_int (FuncState *fs, int reg, lua_Integer i) {
static void luaK_float (FuncState *fs, int reg, lua_Number f) {
lua_Integer fi;
if (luaV_flttointeger(f, &fi, F2Ieq) && fitsBx(fi))
- luaK_codeAsBx(fs, OP_LOADF, reg, cast_int(fi));
+ codeAsBx(fs, OP_LOADF, reg, cast_int(fi));
else
luaK_codek(fs, reg, luaK_numberK(fs, f));
}
@@ -783,7 +784,8 @@ void luaK_dischargevars (FuncState *fs, expdesc *e) {
break;
}
case VLOCAL: { /* already in a register */
- e->u.info = e->u.var.ridx;
+ int temp = e->u.var.ridx;
+ e->u.info = temp; /* (can't do a direct assignment; values overlap) */
e->k = VNONRELOC; /* becomes a non-relocatable value */
break;
}
@@ -991,7 +993,7 @@ void luaK_exp2anyregup (FuncState *fs, expdesc *e) {
** or it is a constant.
*/
void luaK_exp2val (FuncState *fs, expdesc *e) {
- if (hasjumps(e))
+ if (e->k == VJMP || hasjumps(e))
luaK_exp2anyreg(fs, e);
else
luaK_dischargevars(fs, e);
@@ -1032,7 +1034,7 @@ static int luaK_exp2K (FuncState *fs, expdesc *e) {
** in the range of R/K indices).
** Returns 1 iff expression is K.
*/
-int luaK_exp2RK (FuncState *fs, expdesc *e) {
+static int exp2RK (FuncState *fs, expdesc *e) {
if (luaK_exp2K(fs, e))
return 1;
else { /* not a constant in the right range: put it in a register */
@@ -1044,7 +1046,7 @@ int luaK_exp2RK (FuncState *fs, expdesc *e) {
static void codeABRK (FuncState *fs, OpCode o, int a, int b,
expdesc *ec) {
- int k = luaK_exp2RK(fs, ec);
+ int k = exp2RK(fs, ec);
luaK_codeABCk(fs, o, a, b, ec->u.info, k);
}
@@ -1222,7 +1224,7 @@ static void codenot (FuncState *fs, expdesc *e) {
/*
-** Check whether expression 'e' is a small literal string
+** Check whether expression 'e' is a short literal string
*/
static int isKstr (FuncState *fs, expdesc *e) {
return (e->k == VK && !hasjumps(e) && e->u.info <= MAXARG_B &&
@@ -1232,7 +1234,7 @@ static int isKstr (FuncState *fs, expdesc *e) {
/*
** Check whether expression 'e' is a literal integer.
*/
-int luaK_isKint (expdesc *e) {
+static int isKint (expdesc *e) {
return (e->k == VKINT && !hasjumps(e));
}
@@ -1242,7 +1244,7 @@ int luaK_isKint (expdesc *e) {
** proper range to fit in register C
*/
static int isCint (expdesc *e) {
- return luaK_isKint(e) && (l_castS2U(e->u.ival) <= l_castS2U(MAXARG_C));
+ return isKint(e) && (l_castS2U(e->u.ival) <= l_castS2U(MAXARG_C));
}
@@ -1251,7 +1253,7 @@ static int isCint (expdesc *e) {
** proper range to fit in register sC
*/
static int isSCint (expdesc *e) {
- return luaK_isKint(e) && fitsC(e->u.ival);
+ return isKint(e) && fitsC(e->u.ival);
}
@@ -1290,15 +1292,17 @@ void luaK_indexed (FuncState *fs, expdesc *t, expdesc *k) {
if (t->k == VUPVAL && !isKstr(fs, k)) /* upvalue indexed by non 'Kstr'? */
luaK_exp2anyreg(fs, t); /* put it in a register */
if (t->k == VUPVAL) {
- t->u.ind.t = t->u.info; /* upvalue index */
- t->u.ind.idx = k->u.info; /* literal string */
+ int temp = t->u.info; /* upvalue index */
+ lua_assert(isKstr(fs, k));
+ t->u.ind.t = temp; /* (can't do a direct assignment; values overlap) */
+ t->u.ind.idx = k->u.info; /* literal short string */
t->k = VINDEXUP;
}
else {
/* register index of the table */
t->u.ind.t = (t->k == VLOCAL) ? t->u.var.ridx: t->u.info;
if (isKstr(fs, k)) {
- t->u.ind.idx = k->u.info; /* literal string */
+ t->u.ind.idx = k->u.info; /* literal short string */
t->k = VINDEXSTR;
}
else if (isCint(k)) {
@@ -1466,7 +1470,7 @@ static void codebinK (FuncState *fs, BinOpr opr,
*/
static int finishbinexpneg (FuncState *fs, expdesc *e1, expdesc *e2,
OpCode op, int line, TMS event) {
- if (!luaK_isKint(e2))
+ if (!isKint(e2))
return 0; /* not an integer constant */
else {
lua_Integer i2 = e2->u.ival;
@@ -1599,7 +1603,7 @@ static void codeeq (FuncState *fs, BinOpr opr, expdesc *e1, expdesc *e2) {
op = OP_EQI;
r2 = im; /* immediate operand */
}
- else if (luaK_exp2RK(fs, e2)) { /* 2nd expression is constant? */
+ else if (exp2RK(fs, e2)) { /* 2nd expression is constant? */
op = OP_EQK;
r2 = e2->u.info; /* constant index */
}
@@ -1665,7 +1669,7 @@ void luaK_infix (FuncState *fs, BinOpr op, expdesc *v) {
}
case OPR_EQ: case OPR_NE: {
if (!tonumeral(v, NULL))
- luaK_exp2RK(fs, v);
+ exp2RK(fs, v);
/* else keep numeral, which may be an immediate operand */
break;
}
diff --git a/contrib/lua/src/lcode.h b/contrib/lua/src/lcode.h
index 326582445263..0b971fc4359b 100644
--- a/contrib/lua/src/lcode.h
+++ b/contrib/lua/src/lcode.h
@@ -61,10 +61,8 @@ typedef enum UnOpr { OPR_MINUS, OPR_BNOT, OPR_NOT, OPR_LEN, OPR_NOUNOPR } UnOpr;
LUAI_FUNC int luaK_code (FuncState *fs, Instruction i);
LUAI_FUNC int luaK_codeABx (FuncState *fs, OpCode o, int A, unsigned int Bx);
-LUAI_FUNC int luaK_codeAsBx (FuncState *fs, OpCode o, int A, int Bx);
LUAI_FUNC int luaK_codeABCk (FuncState *fs, OpCode o, int A,
int B, int C, int k);
-LUAI_FUNC int luaK_isKint (expdesc *e);
LUAI_FUNC int luaK_exp2const (FuncState *fs, const expdesc *e, TValue *v);
LUAI_FUNC void luaK_fixline (FuncState *fs, int line);
LUAI_FUNC void luaK_nil (FuncState *fs, int from, int n);
@@ -76,7 +74,6 @@ LUAI_FUNC int luaK_exp2anyreg (FuncState *fs, expdesc *e);
LUAI_FUNC void luaK_exp2anyregup (FuncState *fs, expdesc *e);
LUAI_FUNC void luaK_exp2nextreg (FuncState *fs, expdesc *e);
LUAI_FUNC void luaK_exp2val (FuncState *fs, expdesc *e);
-LUAI_FUNC int luaK_exp2RK (FuncState *fs, expdesc *e);
LUAI_FUNC void luaK_self (FuncState *fs, expdesc *e, expdesc *key);
LUAI_FUNC void luaK_indexed (FuncState *fs, expdesc *t, expdesc *k);
LUAI_FUNC void luaK_goiftrue (FuncState *fs, expdesc *e);
diff --git a/contrib/lua/src/ldebug.c b/contrib/lua/src/ldebug.c
index 28b1caabf77e..7264fce8a55c 100644
--- a/contrib/lua/src/ldebug.c
+++ b/contrib/lua/src/ldebug.c
@@ -31,12 +31,15 @@
-#define noLuaClosure(f) ((f) == NULL || (f)->c.tt == LUA_VCCL)
+#define LuaClosure(f) ((f) != NULL && (f)->c.tt == LUA_VLCL)
static const char *funcnamefromcall (lua_State *L, CallInfo *ci,
const char **name);
+static const char strlocal[] = "local";
+static const char strupval[] = "upvalue";
+
static int currentpc (CallInfo *ci) {
lua_assert(isLua(ci));
@@ -254,7 +257,7 @@ LUA_API const char *lua_setlocal (lua_State *L, const lua_Debug *ar, int n) {
static void funcinfo (lua_Debug *ar, Closure *cl) {
- if (noLuaClosure(cl)) {
+ if (!LuaClosure(cl)) {
ar->source = "=[C]";
ar->srclen = LL("=[C]");
ar->linedefined = -1;
@@ -288,29 +291,31 @@ static int nextline (const Proto *p, int currentline, int pc) {
static void collectvalidlines (lua_State *L, Closure *f) {
- if (noLuaClosure(f)) {
+ if (!LuaClosure(f)) {
setnilvalue(s2v(L->top.p));
api_incr_top(L);
}
else {
- int i;
- TValue v;
const Proto *p = f->l.p;
int currentline = p->linedefined;
Table *t = luaH_new(L); /* new table to store active lines */
sethvalue2s(L, L->top.p, t); /* push it on stack */
api_incr_top(L);
- setbtvalue(&v); /* boolean 'true' to be the value of all indices */
- if (!p->is_vararg) /* regular function? */
- i = 0; /* consider all instructions */
- else { /* vararg function */
- lua_assert(GET_OPCODE(p->code[0]) == OP_VARARGPREP);
- currentline = nextline(p, currentline, 0);
- i = 1; /* skip first instruction (OP_VARARGPREP) */
- }
- for (; i < p->sizelineinfo; i++) { /* for each instruction */
- currentline = nextline(p, currentline, i); /* get its line */
- luaH_setint(L, t, currentline, &v); /* table[line] = true */
+ if (p->lineinfo != NULL) { /* proto with debug information? */
+ int i;
+ TValue v;
+ setbtvalue(&v); /* boolean 'true' to be the value of all indices */
+ if (!p->is_vararg) /* regular function? */
+ i = 0; /* consider all instructions */
+ else { /* vararg function */
+ lua_assert(GET_OPCODE(p->code[0]) == OP_VARARGPREP);
+ currentline = nextline(p, currentline, 0);
+ i = 1; /* skip first instruction (OP_VARARGPREP) */
+ }
+ for (; i < p->sizelineinfo; i++) { /* for each instruction */
+ currentline = nextline(p, currentline, i); /* get its line */
+ luaH_setint(L, t, currentline, &v); /* table[line] = true */
+ }
}
}
}
@@ -339,7 +344,7 @@ static int auxgetinfo (lua_State *L, const char *what, lua_Debug *ar,
}
case 'u': {
ar->nups = (f == NULL) ? 0 : f->c.nupvalues;
- if (noLuaClosure(f)) {
+ if (!LuaClosure(f)) {
ar->isvararg = 1;
ar->nparams = 0;
}
@@ -417,40 +422,6 @@ LUA_API int lua_getinfo (lua_State *L, const char *what, lua_Debug *ar) {
** =======================================================
*/
-static const char *getobjname (const Proto *p, int lastpc, int reg,
- const char **name);
-
-
-/*
-** Find a "name" for the constant 'c'.
-*/
-static void kname (const Proto *p, int c, const char **name) {
- TValue *kvalue = &p->k[c];
- *name = (ttisstring(kvalue)) ? svalue(kvalue) : "?";
-}
-
-
-/*
-** Find a "name" for the register 'c'.
-*/
-static void rname (const Proto *p, int pc, int c, const char **name) {
- const char *what = getobjname(p, pc, c, name); /* search for 'c' */
- if (!(what && *what == 'c')) /* did not find a constant name? */
- *name = "?";
-}
-
-
-/*
-** Find a "name" for a 'C' value in an RK instruction.
-*/
-static void rkname (const Proto *p, int pc, Instruction i, const char **name) {
- int c = GETARG_C(i); /* key index */
- if (GETARG_k(i)) /* is 'c' a constant? */
- kname(p, c, name);
- else /* 'c' is a register */
- rname(p, pc, c, name);
-}
-
static int filterpc (int pc, int jmptarget) {
if (pc < jmptarget) /* is code conditional (inside a jump)? */
@@ -509,28 +480,29 @@ static int findsetreg (const Proto *p, int lastpc, int reg) {
/*
-** Check whether table being indexed by instruction 'i' is the
-** environment '_ENV'
+** Find a "name" for the constant 'c'.
*/
-static const char *gxf (const Proto *p, int pc, Instruction i, int isup) {
- int t = GETARG_B(i); /* table index */
- const char *name; /* name of indexed variable */
- if (isup) /* is an upvalue? */
- name = upvalname(p, t);
- else
- getobjname(p, pc, t, &name);
- return (name && strcmp(name, LUA_ENV) == 0) ? "global" : "field";
+static const char *kname (const Proto *p, int index, const char **name) {
+ TValue *kvalue = &p->k[index];
+ if (ttisstring(kvalue)) {
+ *name = getstr(tsvalue(kvalue));
+ return "constant";
+ }
+ else {
+ *name = "?";
+ return NULL;
+ }
}
-static const char *getobjname (const Proto *p, int lastpc, int reg,
- const char **name) {
- int pc;
- *name = luaF_getlocalname(p, reg + 1, lastpc);
+static const char *basicgetobjname (const Proto *p, int *ppc, int reg,
+ const char **name) {
+ int pc = *ppc;
+ *name = luaF_getlocalname(p, reg + 1, pc);
if (*name) /* is a local? */
- return "local";
+ return strlocal;
/* else try symbolic execution */
- pc = findsetreg(p, lastpc, reg);
+ *ppc = pc = findsetreg(p, pc, reg);
if (pc != -1) { /* could find instruction? */
Instruction i = p->code[pc];
OpCode op = GET_OPCODE(i);
@@ -538,18 +510,86 @@ static const char *getobjname (const Proto *p, int lastpc, int reg,
case OP_MOVE: {
int b = GETARG_B(i); /* move from 'b' to 'a' */
if (b < GETARG_A(i))
- return getobjname(p, pc, b, name); /* get name for 'b' */
+ return basicgetobjname(p, ppc, b, name); /* get name for 'b' */
break;
}
+ case OP_GETUPVAL: {
+ *name = upvalname(p, GETARG_B(i));
+ return strupval;
+ }
+ case OP_LOADK: return kname(p, GETARG_Bx(i), name);
+ case OP_LOADKX: return kname(p, GETARG_Ax(p->code[pc + 1]), name);
+ default: break;
+ }
+ }
+ return NULL; /* could not find reasonable name */
+}
+
+
+/*
+** Find a "name" for the register 'c'.
+*/
+static void rname (const Proto *p, int pc, int c, const char **name) {
+ const char *what = basicgetobjname(p, &pc, c, name); /* search for 'c' */
+ if (!(what && *what == 'c')) /* did not find a constant name? */
+ *name = "?";
+}
+
+
+/*
+** Find a "name" for a 'C' value in an RK instruction.
+*/
+static void rkname (const Proto *p, int pc, Instruction i, const char **name) {
+ int c = GETARG_C(i); /* key index */
+ if (GETARG_k(i)) /* is 'c' a constant? */
+ kname(p, c, name);
+ else /* 'c' is a register */
+ rname(p, pc, c, name);
+}
+
+
+/*
+** Check whether table being indexed by instruction 'i' is the
+** environment '_ENV'. If the table is an upvalue, get its name;
+** otherwise, find some "name" for the table and check whether
+** that name is the name of a local variable (and not, for instance,
+** a string). Then check that, if there is a name, it is '_ENV'.
+*/
+static const char *isEnv (const Proto *p, int pc, Instruction i, int isup) {
+ int t = GETARG_B(i); /* table index */
+ const char *name; /* name of indexed variable */
+ if (isup) /* is 't' an upvalue? */
+ name = upvalname(p, t);
+ else { /* 't' is a register */
+ const char *what = basicgetobjname(p, &pc, t, &name);
+ if (what != strlocal && what != strupval)
+ name = NULL; /* cannot be the variable _ENV */
+ }
+ return (name && strcmp(name, LUA_ENV) == 0) ? "global" : "field";
+}
+
+
+/*
+** Extend 'basicgetobjname' to handle table accesses
+*/
+static const char *getobjname (const Proto *p, int lastpc, int reg,
+ const char **name) {
+ const char *kind = basicgetobjname(p, &lastpc, reg, name);
+ if (kind != NULL)
+ return kind;
+ else if (lastpc != -1) { /* could find instruction? */
+ Instruction i = p->code[lastpc];
+ OpCode op = GET_OPCODE(i);
+ switch (op) {
case OP_GETTABUP: {
int k = GETARG_C(i); /* key index */
kname(p, k, name);
- return gxf(p, pc, i, 1);
+ return isEnv(p, lastpc, i, 1);
}
case OP_GETTABLE: {
int k = GETARG_C(i); /* key index */
- rname(p, pc, k, name);
- return gxf(p, pc, i, 0);
+ rname(p, lastpc, k, name);
+ return isEnv(p, lastpc, i, 0);
}
case OP_GETI: {
*name = "integer index";
@@ -558,24 +598,10 @@ static const char *getobjname (const Proto *p, int lastpc, int reg,
case OP_GETFIELD: {
int k = GETARG_C(i); /* key index */
kname(p, k, name);
- return gxf(p, pc, i, 0);
- }
- case OP_GETUPVAL: {
- *name = upvalname(p, GETARG_B(i));
- return "upvalue";
- }
- case OP_LOADK:
- case OP_LOADKX: {
- int b = (op == OP_LOADK) ? GETARG_Bx(i)
- : GETARG_Ax(p->code[pc + 1]);
- if (ttisstring(&p->k[b])) {
- *name = svalue(&p->k[b]);
- return "constant";
- }
- break;
+ return isEnv(p, lastpc, i, 0);
}
case OP_SELF: {
- rkname(p, pc, i, name);
+ rkname(p, lastpc, i, name);
return "method";
}
default: break; /* go through to return NULL */
@@ -627,7 +653,7 @@ static const char *funcnamefromcode (lua_State *L, const Proto *p,
default:
return NULL; /* cannot find a reasonable name */
}
- *name = getstr(G(L)->tmname[tm]) + 2;
+ *name = getshrstr(G(L)->tmname[tm]) + 2;
return "metamethod";
}
@@ -684,7 +710,7 @@ static const char *getupvalname (CallInfo *ci, const TValue *o,
for (i = 0; i < c->nupvalues; i++) {
if (c->upvals[i]->v.p == o) {
*name = upvalname(c->p, i);
- return "upvalue";
+ return strupval;
}
}
return NULL;
@@ -866,6 +892,28 @@ static int changedline (const Proto *p, int oldpc, int newpc) {
/*
+** Traces Lua calls. If code is running the first instruction of a function,
+** and function is not vararg, and it is not coming from an yield,
+** calls 'luaD_hookcall'. (Vararg functions will call 'luaD_hookcall'
+** after adjusting its variable arguments; otherwise, they could call
+** a line/count hook before the call hook. Functions coming from
+** an yield already called 'luaD_hookcall' before yielding.)
+*/
+int luaG_tracecall (lua_State *L) {
+ CallInfo *ci = L->ci;
+ Proto *p = ci_func(ci)->p;
+ ci->u.l.trap = 1; /* ensure hooks will be checked */
+ if (ci->u.l.savedpc == p->code) { /* first instruction (not resuming)? */
+ if (p->is_vararg)
+ return 0; /* hooks will start at VARARGPREP instruction */
+ else if (!(ci->callstatus & CIST_HOOKYIELD)) /* not yieded? */
+ luaD_hookcall(L, ci); /* check 'call' hook */
+ }
+ return 1; /* keep 'trap' on */
+}
+
+
+/*
** Traces the execution of a Lua function. Called before the execution
** of each opcode, when debug is on. 'L->oldpc' stores the last
** instruction traced, to detect line changes. When entering a new
@@ -888,12 +936,12 @@ int luaG_traceexec (lua_State *L, const Instruction *pc) {
}
pc++; /* reference is always next instruction */
ci->u.l.savedpc = pc; /* save 'pc' */
- counthook = (--L->hookcount == 0 && (mask & LUA_MASKCOUNT));
+ counthook = (mask & LUA_MASKCOUNT) && (--L->hookcount == 0);
if (counthook)
resethookcount(L); /* reset count */
else if (!(mask & LUA_MASKLINE))
return 1; /* no line hook and count != 0; nothing to be done now */
- if (ci->callstatus & CIST_HOOKYIELD) { /* called hook last time? */
+ if (ci->callstatus & CIST_HOOKYIELD) { /* hook yielded last time? */
ci->callstatus &= ~CIST_HOOKYIELD; /* erase mark */
return 1; /* do not call hook again (VM yielded, so it did not move) */
}
@@ -915,7 +963,6 @@ int luaG_traceexec (lua_State *L, const Instruction *pc) {
if (L->status == LUA_YIELD) { /* did hook yield? */
if (counthook)
L->hookcount = 1; /* undo decrement to zero */
- ci->u.l.savedpc--; /* undo increment (resume will increment it again) */
ci->callstatus |= CIST_HOOKYIELD; /* mark that it yielded */
luaD_throw(L, LUA_YIELD);
}
diff --git a/contrib/lua/src/ldebug.h b/contrib/lua/src/ldebug.h
index 2c3074c61b6f..2bfce3cb5e77 100644
--- a/contrib/lua/src/ldebug.h
+++ b/contrib/lua/src/ldebug.h
@@ -58,6 +58,7 @@ LUAI_FUNC const char *luaG_addinfo (lua_State *L, const char *msg,
TString *src, int line);
LUAI_FUNC l_noret luaG_errormsg (lua_State *L);
LUAI_FUNC int luaG_traceexec (lua_State *L, const Instruction *pc);
+LUAI_FUNC int luaG_tracecall (lua_State *L);
#endif
diff --git a/contrib/lua/src/ldo.c b/contrib/lua/src/ldo.c
index 2a0017ca62a3..c92573d6e699 100644
--- a/contrib/lua/src/ldo.c
+++ b/contrib/lua/src/ldo.c
@@ -94,10 +94,6 @@ void luaD_seterrorobj (lua_State *L, int errcode, StkId oldtop) {
setsvalue2s(L, oldtop, G(L)->memerrmsg); /* reuse preregistered msg. */
break;
}
- case LUA_ERRERR: {
- setsvalue2s(L, oldtop, luaS_newliteral(L, "error in error handling"));
- break;
- }
case LUA_OK: { /* special case only for closing upvalues */
setnilvalue(s2v(oldtop)); /* no error message */
break;
@@ -120,6 +116,7 @@ l_noret luaD_throw (lua_State *L, int errcode) {
else { /* thread has no error handler */
global_State *g = G(L);
errcode = luaE_resetthread(L, errcode); /* close all upvalues */
+ L->status = errcode;
if (g->mainthread->errorJmp) { /* main thread has a handler? */
setobjs2s(L, g->mainthread->top.p++, L->top.p - 1); /* copy error obj. */
luaD_throw(g->mainthread, errcode); /* re-throw in main thread */
@@ -198,6 +195,16 @@ static void correctstack (lua_State *L) {
/* some space for error handling */
#define ERRORSTACKSIZE (LUAI_MAXSTACK + 200)
+
+/* raise an error while running the message handler */
+l_noret luaD_errerr (lua_State *L) {
+ TString *msg = luaS_newliteral(L, "error in error handling");
+ setsvalue2s(L, L->top.p, msg);
+ L->top.p++; /* assume EXTRA_STACK */
+ luaD_throw(L, LUA_ERRERR);
+}
+
+
/*
** Reallocate the stack to a new size, correcting all pointers into it.
** In ISO C, any pointer use after the pointer has been deallocated is
@@ -247,7 +254,7 @@ int luaD_growstack (lua_State *L, int n, int raiseerror) {
a stack error; cannot grow further than that. */
lua_assert(stacksize(L) == ERRORSTACKSIZE);
if (raiseerror)
- luaD_throw(L, LUA_ERRERR); /* error inside message handler */
+ luaD_errerr(L); /* error inside message handler */
return 0; /* if not 'raiseerror', just signal it */
}
else if (n < LUAI_MAXSTACK) { /* avoids arithmetic overflows */
@@ -409,7 +416,7 @@ static void rethook (lua_State *L, CallInfo *ci, int nres) {
** stack, below original 'func', so that 'luaD_precall' can call it. Raise
** an error if there is no '__call' metafield.
*/
-StkId luaD_tryfuncTM (lua_State *L, StkId func) {
+static StkId tryfuncTM (lua_State *L, StkId func) {
const TValue *tm;
StkId p;
checkstackGCp(L, 1, func); /* space for metamethod */
@@ -568,7 +575,7 @@ int luaD_pretailcall (lua_State *L, CallInfo *ci, StkId func,
return -1;
}
default: { /* not a function */
- func = luaD_tryfuncTM(L, func); /* try to get '__call' metamethod */
+ func = tryfuncTM(L, func); /* try to get '__call' metamethod */
/* return luaD_pretailcall(L, ci, func, narg1 + 1, delta); */
narg1++;
goto retry; /* try again */
@@ -609,7 +616,7 @@ CallInfo *luaD_precall (lua_State *L, StkId func, int nresults) {
return ci;
}
default: { /* not a function */
- func = luaD_tryfuncTM(L, func); /* try to get '__call' metamethod */
+ func = tryfuncTM(L, func); /* try to get '__call' metamethod */
/* return luaD_precall(L, func, nresults); */
goto retry; /* try again with metamethod */
}
@@ -792,6 +799,10 @@ static void resume (lua_State *L, void *ud) {
lua_assert(L->status == LUA_YIELD);
L->status = LUA_OK; /* mark that it is running (again) */
if (isLua(ci)) { /* yielded inside a hook? */
+ /* undo increment made by 'luaG_traceexec': instruction was not
+ executed yet */
+ lua_assert(ci->callstatus & CIST_HOOKYIELD);
+ ci->u.l.savedpc--;
L->top.p = firstArg; /* discard arguments */
luaV_execute(L, ci); /* just continue running Lua code */
}
diff --git a/contrib/lua/src/ldo.h b/contrib/lua/src/ldo.h
index 1aa446ad09e9..4de9540ec807 100644
--- a/contrib/lua/src/ldo.h
+++ b/contrib/lua/src/ldo.h
@@ -60,6 +60,7 @@
/* type of protected functions, to be ran by 'runprotected' */
typedef void (*Pfunc) (lua_State *L, void *ud);
+LUAI_FUNC l_noret luaD_errerr (lua_State *L);
LUAI_FUNC void luaD_seterrorobj (lua_State *L, int errcode, StkId oldtop);
LUAI_FUNC int luaD_protectedparser (lua_State *L, ZIO *z, const char *name,
const char *mode);
@@ -71,7 +72,6 @@ LUAI_FUNC int luaD_pretailcall (lua_State *L, CallInfo *ci, StkId func,
LUAI_FUNC CallInfo *luaD_precall (lua_State *L, StkId func, int nResults);
LUAI_FUNC void luaD_call (lua_State *L, StkId func, int nResults);
LUAI_FUNC void luaD_callnoyield (lua_State *L, StkId func, int nResults);
-LUAI_FUNC StkId luaD_tryfuncTM (lua_State *L, StkId func);
LUAI_FUNC int luaD_closeprotected (lua_State *L, ptrdiff_t level, int status);
LUAI_FUNC int luaD_pcall (lua_State *L, Pfunc func, void *u,
ptrdiff_t oldtop, ptrdiff_t ef);
diff --git a/contrib/lua/src/lgc.c b/contrib/lua/src/lgc.c
index a3094ff57126..5817f9eec35a 100644
--- a/contrib/lua/src/lgc.c
+++ b/contrib/lua/src/lgc.c
@@ -542,10 +542,12 @@ static void traversestrongtable (global_State *g, Table *h) {
static lu_mem traversetable (global_State *g, Table *h) {
const char *weakkey, *weakvalue;
const TValue *mode = gfasttm(g, h->metatable, TM_MODE);
+ TString *smode;
markobjectN(g, h->metatable);
- if (mode && ttisstring(mode) && /* is there a weak mode? */
- (cast_void(weakkey = strchr(svalue(mode), 'k')),
- cast_void(weakvalue = strchr(svalue(mode), 'v')),
+ if (mode && ttisshrstring(mode) && /* is there a weak mode? */
+ (cast_void(smode = tsvalue(mode)),
+ cast_void(weakkey = strchr(getshrstr(smode), 'k')),
+ cast_void(weakvalue = strchr(getshrstr(smode), 'v')),
(weakkey || weakvalue))) { /* is really weak? */
if (!weakkey) /* strong keys? */
traverseweakvalue(g, h);
@@ -638,7 +640,9 @@ static int traversethread (global_State *g, lua_State *th) {
for (uv = th->openupval; uv != NULL; uv = uv->u.open.next)
markobject(g, uv); /* open upvalues cannot be collected */
if (g->gcstate == GCSatomic) { /* final traversal? */
- for (; o < th->stack_last.p + EXTRA_STACK; o++)
+ if (!g->gcemergency)
+ luaD_shrinkstack(th); /* do not change stack in emergency cycle */
+ for (o = th->top.p; o < th->stack_last.p + EXTRA_STACK; o++)
setnilvalue(s2v(o)); /* clear dead stack slice */
/* 'remarkupvals' may have removed thread from 'twups' list */
if (!isintwups(th) && th->openupval != NULL) {
@@ -646,8 +650,6 @@ static int traversethread (global_State *g, lua_State *th) {
g->twups = th;
}
}
- else if (!g->gcemergency)
- luaD_shrinkstack(th); /* do not change stack in emergency cycle */
return 1 + stacksize(th);
}
@@ -1409,7 +1411,7 @@ static void stepgenfull (lua_State *L, global_State *g) {
setminordebt(g);
}
else { /* another bad collection; stay in incremental mode */
- g->GCestimate = gettotalbytes(g); /* first estimate */;
+ g->GCestimate = gettotalbytes(g); /* first estimate */
entersweep(L);
luaC_runtilstate(L, bitmask(GCSpause)); /* finish collection */
setpause(g);
@@ -1604,7 +1606,7 @@ static lu_mem singlestep (lua_State *L) {
case GCSenteratomic: {
work = atomic(L); /* work is what was traversed by 'atomic' */
entersweep(L);
- g->GCestimate = gettotalbytes(g); /* first estimate */;
+ g->GCestimate = gettotalbytes(g); /* first estimate */
break;
}
case GCSswpallgc: { /* sweep "regular" objects */
@@ -1710,6 +1712,8 @@ static void fullinc (lua_State *L, global_State *g) {
entersweep(L); /* sweep everything to turn them back to white */
/* finish any pending sweep phase to start a new cycle */
luaC_runtilstate(L, bitmask(GCSpause));
+ luaC_runtilstate(L, bitmask(GCSpropagate)); /* start new cycle */
+ g->gcstate = GCSenteratomic; /* go straight to atomic phase */
luaC_runtilstate(L, bitmask(GCScallfin)); /* run up to finalizers */
/* estimate must be correct after a full GC cycle */
lua_assert(g->GCestimate == gettotalbytes(g));
diff --git a/contrib/lua/src/liolib.c b/contrib/lua/src/liolib.c
index b08397da45da..c5075f3e78a9 100644
--- a/contrib/lua/src/liolib.c
+++ b/contrib/lua/src/liolib.c
@@ -245,8 +245,8 @@ static int f_gc (lua_State *L) {
*/
static int io_fclose (lua_State *L) {
LStream *p = tolstream(L);
- int res = fclose(p->f);
- return luaL_fileresult(L, (res == 0), NULL);
+ errno = 0;
+ return luaL_fileresult(L, (fclose(p->f) == 0), NULL);
}
@@ -272,6 +272,7 @@ static int io_open (lua_State *L) {
LStream *p = newfile(L);
const char *md = mode; /* to traverse/check mode */
luaL_argcheck(L, l_checkmode(md), 2, "invalid mode");
+ errno = 0;
p->f = fopen(filename, mode);
return (p->f == NULL) ? luaL_fileresult(L, 0, filename) : 1;
}
@@ -292,6 +293,7 @@ static int io_popen (lua_State *L) {
const char *mode = luaL_optstring(L, 2, "r");
LStream *p = newprefile(L);
luaL_argcheck(L, l_checkmodep(mode), 2, "invalid mode");
+ errno = 0;
p->f = l_popen(L, filename, mode);
p->closef = &io_pclose;
return (p->f == NULL) ? luaL_fileresult(L, 0, filename) : 1;
@@ -300,6 +302,7 @@ static int io_popen (lua_State *L) {
static int io_tmpfile (lua_State *L) {
LStream *p = newfile(L);
+ errno = 0;
p->f = tmpfile();
return (p->f == NULL) ? luaL_fileresult(L, 0, NULL) : 1;
}
@@ -567,6 +570,7 @@ static int g_read (lua_State *L, FILE *f, int first) {
int nargs = lua_gettop(L) - 1;
int n, success;
clearerr(f);
+ errno = 0;
if (nargs == 0) { /* no arguments? */
success = read_line(L, f, 1);
n = first + 1; /* to return 1 result */
@@ -660,6 +664,7 @@ static int io_readline (lua_State *L) {
static int g_write (lua_State *L, FILE *f, int arg) {
int nargs = lua_gettop(L) - arg;
int status = 1;
+ errno = 0;
for (; nargs--; arg++) {
if (lua_type(L, arg) == LUA_TNUMBER) {
/* optimization: could be done exactly as for strings */
@@ -678,7 +683,8 @@ static int g_write (lua_State *L, FILE *f, int arg) {
}
if (l_likely(status))
return 1; /* file handle already on stack top */
- else return luaL_fileresult(L, status, NULL);
+ else
+ return luaL_fileresult(L, status, NULL);
}
@@ -703,6 +709,7 @@ static int f_seek (lua_State *L) {
l_seeknum offset = (l_seeknum)p3;
luaL_argcheck(L, (lua_Integer)offset == p3, 3,
"not an integer in proper range");
+ errno = 0;
op = l_fseek(f, offset, mode[op]);
if (l_unlikely(op))
return luaL_fileresult(L, 0, NULL); /* error */
@@ -719,19 +726,25 @@ static int f_setvbuf (lua_State *L) {
FILE *f = tofile(L);
int op = luaL_checkoption(L, 2, NULL, modenames);
lua_Integer sz = luaL_optinteger(L, 3, LUAL_BUFFERSIZE);
- int res = setvbuf(f, NULL, mode[op], (size_t)sz);
+ int res;
+ errno = 0;
+ res = setvbuf(f, NULL, mode[op], (size_t)sz);
return luaL_fileresult(L, res == 0, NULL);
}
static int io_flush (lua_State *L) {
- return luaL_fileresult(L, fflush(getiofile(L, IO_OUTPUT)) == 0, NULL);
+ FILE *f = getiofile(L, IO_OUTPUT);
+ errno = 0;
+ return luaL_fileresult(L, fflush(f) == 0, NULL);
}
static int f_flush (lua_State *L) {
- return luaL_fileresult(L, fflush(tofile(L)) == 0, NULL);
+ FILE *f = tofile(L);
+ errno = 0;
+ return luaL_fileresult(L, fflush(f) == 0, NULL);
}
@@ -773,7 +786,7 @@ static const luaL_Reg meth[] = {
** metamethods for file handles
*/
static const luaL_Reg metameth[] = {
- {"__index", NULL}, /* place holder */
+ {"__index", NULL}, /* placeholder */
{"__gc", f_gc},
{"__close", f_gc},
{"__tostring", f_tostring},
diff --git a/contrib/lua/src/lmathlib.c b/contrib/lua/src/lmathlib.c
index d0b1e1e5d6f5..438106348084 100644
--- a/contrib/lua/src/lmathlib.c
+++ b/contrib/lua/src/lmathlib.c
@@ -249,6 +249,15 @@ static int math_type (lua_State *L) {
** ===================================================================
*/
+/*
+** This code uses lots of shifts. ANSI C does not allow shifts greater
+** than or equal to the width of the type being shifted, so some shifts
+** are written in convoluted ways to match that restriction. For
+** preprocessor tests, it assumes a width of 32 bits, so the maximum
+** shift there is 31 bits.
+*/
+
+
/* number of binary digits in the mantissa of a float */
#define FIGS l_floatatt(MANT_DIG)
@@ -271,16 +280,19 @@ static int math_type (lua_State *L) {
/* 'long' has at least 64 bits */
#define Rand64 unsigned long
+#define SRand64 long
#elif !defined(LUA_USE_C89) && defined(LLONG_MAX)
/* there is a 'long long' type (which must have at least 64 bits) */
#define Rand64 unsigned long long
+#define SRand64 long long
#elif ((LUA_MAXUNSIGNED >> 31) >> 31) >= 3
/* 'lua_Unsigned' has at least 64 bits */
#define Rand64 lua_Unsigned
+#define SRand64 lua_Integer
#endif
@@ -319,23 +331,30 @@ static Rand64 nextrand (Rand64 *state) {
}
-/* must take care to not shift stuff by more than 63 slots */
-
-
/*
** Convert bits from a random integer into a float in the
** interval [0,1), getting the higher FIG bits from the
** random unsigned integer and converting that to a float.
+** Some old Microsoft compilers cannot cast an unsigned long
+** to a floating-point number, so we use a signed long as an
+** intermediary. When lua_Number is float or double, the shift ensures
+** that 'sx' is non negative; in that case, a good compiler will remove
+** the correction.
*/
/* must throw out the extra (64 - FIGS) bits */
#define shift64_FIG (64 - FIGS)
-/* to scale to [0, 1), multiply by scaleFIG = 2^(-FIGS) */
+/* 2^(-FIGS) == 2^-1 / 2^(FIGS-1) */
#define scaleFIG (l_mathop(0.5) / ((Rand64)1 << (FIGS - 1)))
static lua_Number I2d (Rand64 x) {
- return (lua_Number)(trim64(x) >> shift64_FIG) * scaleFIG;
+ SRand64 sx = (SRand64)(trim64(x) >> shift64_FIG);
+ lua_Number res = (lua_Number)(sx) * scaleFIG;
+ if (sx < 0)
+ res += l_mathop(1.0); /* correct the two's complement if negative */
+ lua_assert(0 <= res && res < 1);
+ return res;
}
/* convert a 'Rand64' to a 'lua_Unsigned' */
@@ -471,8 +490,6 @@ static lua_Number I2d (Rand64 x) {
#else /* 32 < FIGS <= 64 */
-/* must take care to not shift stuff by more than 31 slots */
-
/* 2^(-FIGS) = 1.0 / 2^30 / 2^3 / 2^(FIGS-33) */
#define scaleFIG \
(l_mathop(1.0) / (UONE << 30) / l_mathop(8.0) / (UONE << (FIGS - 33)))
diff --git a/contrib/lua/src/loadlib.c b/contrib/lua/src/loadlib.c
index d792dffaa03b..6d289fcebb8c 100644
--- a/contrib/lua/src/loadlib.c
+++ b/contrib/lua/src/loadlib.c
@@ -25,15 +25,6 @@
/*
-** LUA_IGMARK is a mark to ignore all before it when building the
-** luaopen_ function name.
-*/
-#if !defined (LUA_IGMARK)
-#define LUA_IGMARK "-"
-#endif
-
-
-/*
** LUA_CSUBSEP is the character that replaces dots in submodule names
** when searching for a C loader.
** LUA_LSUBSEP is the character that replaces dots in submodule names
diff --git a/contrib/lua/src/lobject.c b/contrib/lua/src/lobject.c
index f73ffc6d92bd..9cfa5227eb46 100644
--- a/contrib/lua/src/lobject.c
+++ b/contrib/lua/src/lobject.c
@@ -542,7 +542,7 @@ const char *luaO_pushvfstring (lua_State *L, const char *fmt, va_list argp) {
addstr2buff(&buff, fmt, strlen(fmt)); /* rest of 'fmt' */
clearbuff(&buff); /* empty buffer into the stack */
lua_assert(buff.pushed == 1);
- return svalue(s2v(L->top.p - 1));
+ return getstr(tsvalue(s2v(L->top.p - 1)));
}
diff --git a/contrib/lua/src/lobject.h b/contrib/lua/src/lobject.h
index 556608e4aa21..980e42f8c27a 100644
--- a/contrib/lua/src/lobject.h
+++ b/contrib/lua/src/lobject.h
@@ -386,7 +386,7 @@ typedef struct GCObject {
typedef struct TString {
CommonHeader;
lu_byte extra; /* reserved words for short strings; "has hash" for longs */
- lu_byte shrlen; /* length for short strings */
+ lu_byte shrlen; /* length for short strings, 0xFF for long strings */
unsigned int hash;
union {
size_t lnglen; /* length for long strings */
@@ -398,19 +398,17 @@ typedef struct TString {
/*
-** Get the actual string (array of bytes) from a 'TString'.
+** Get the actual string (array of bytes) from a 'TString'. (Generic
+** version and specialized versions for long and short strings.)
*/
-#define getstr(ts) ((ts)->contents)
+#define getstr(ts) ((ts)->contents)
+#define getlngstr(ts) check_exp((ts)->shrlen == 0xFF, (ts)->contents)
+#define getshrstr(ts) check_exp((ts)->shrlen != 0xFF, (ts)->contents)
-/* get the actual string (array of bytes) from a Lua value */
-#define svalue(o) getstr(tsvalue(o))
-
/* get string length from 'TString *s' */
-#define tsslen(s) ((s)->tt == LUA_VSHRSTR ? (s)->shrlen : (s)->u.lnglen)
-
-/* get string length from 'TValue *o' */
-#define vslen(o) tsslen(tsvalue(o))
+#define tsslen(s) \
+ ((s)->shrlen != 0xFF ? (s)->shrlen : (s)->u.lnglen)
/* }================================================================== */
diff --git a/contrib/lua/src/lopcodes.h b/contrib/lua/src/lopcodes.h
index 4c55145399ff..46911cac14e0 100644
--- a/contrib/lua/src/lopcodes.h
+++ b/contrib/lua/src/lopcodes.h
@@ -210,15 +210,15 @@ OP_LOADNIL,/* A B R[A], R[A+1], ..., R[A+B] := nil */
OP_GETUPVAL,/* A B R[A] := UpValue[B] */
OP_SETUPVAL,/* A B UpValue[B] := R[A] */
-OP_GETTABUP,/* A B C R[A] := UpValue[B][K[C]:string] */
+OP_GETTABUP,/* A B C R[A] := UpValue[B][K[C]:shortstring] */
OP_GETTABLE,/* A B C R[A] := R[B][R[C]] */
OP_GETI,/* A B C R[A] := R[B][C] */
-OP_GETFIELD,/* A B C R[A] := R[B][K[C]:string] */
+OP_GETFIELD,/* A B C R[A] := R[B][K[C]:shortstring] */
-OP_SETTABUP,/* A B C UpValue[A][K[B]:string] := RK(C) */
+OP_SETTABUP,/* A B C UpValue[A][K[B]:shortstring] := RK(C) */
OP_SETTABLE,/* A B C R[A][R[B]] := RK(C) */
OP_SETI,/* A B C R[A][B] := RK(C) */
-OP_SETFIELD,/* A B C R[A][K[B]:string] := RK(C) */
+OP_SETFIELD,/* A B C R[A][K[B]:shortstring] := RK(C) */
OP_NEWTABLE,/* A B C k R[A] := {} */
diff --git a/contrib/lua/src/loslib.c b/contrib/lua/src/loslib.c
index ad5a92768852..ba80d72c4575 100644
--- a/contrib/lua/src/loslib.c
+++ b/contrib/lua/src/loslib.c
@@ -155,6 +155,7 @@ static int os_execute (lua_State *L) {
static int os_remove (lua_State *L) {
const char *filename = luaL_checkstring(L, 1);
+ errno = 0;
return luaL_fileresult(L, remove(filename) == 0, filename);
}
@@ -162,6 +163,7 @@ static int os_remove (lua_State *L) {
static int os_rename (lua_State *L) {
const char *fromname = luaL_checkstring(L, 1);
const char *toname = luaL_checkstring(L, 2);
+ errno = 0;
return luaL_fileresult(L, rename(fromname, toname) == 0, NULL);
}
diff --git a/contrib/lua/src/lparser.c b/contrib/lua/src/lparser.c
index b745f236f068..1ac82990e0c3 100644
--- a/contrib/lua/src/lparser.c
+++ b/contrib/lua/src/lparser.c
@@ -198,7 +198,7 @@ static int new_localvar (LexState *ls, TString *name) {
checklimit(fs, dyd->actvar.n + 1 - fs->firstlocal,
MAXVARS, "local variables");
luaM_growvector(L, dyd->actvar.arr, dyd->actvar.n + 1,
- dyd->actvar.size, Vardesc, USHRT_MAX, "local variables");
+ dyd->actvar.size, Vardesc, SHRT_MAX, "local variables");
var = &dyd->actvar.arr[dyd->actvar.n++];
var->vd.kind = VDKREG; /* default */
var->vd.name = name;
@@ -849,12 +849,11 @@ static void recfield (LexState *ls, ConsControl *cc) {
FuncState *fs = ls->fs;
int reg = ls->fs->freereg;
expdesc tab, key, val;
- if (ls->t.token == TK_NAME) {
- checklimit(fs, cc->nh, MAX_INT, "items in a constructor");
+ if (ls->t.token == TK_NAME)
codename(ls, &key);
- }
else /* ls->t.token == '[' */
yindex(ls, &key);
+ checklimit(fs, cc->nh, MAX_INT, "items in a constructor");
cc->nh++;
checknext(ls, '=');
tab = *cc->t;
@@ -1022,10 +1021,11 @@ static int explist (LexState *ls, expdesc *v) {
}
-static void funcargs (LexState *ls, expdesc *f, int line) {
+static void funcargs (LexState *ls, expdesc *f) {
FuncState *fs = ls->fs;
expdesc args;
int base, nparams;
+ int line = ls->linenumber;
switch (ls->t.token) {
case '(': { /* funcargs -> '(' [ explist ] ')' */
luaX_next(ls);
@@ -1063,8 +1063,8 @@ static void funcargs (LexState *ls, expdesc *f, int line) {
}
init_exp(f, VCALL, luaK_codeABC(fs, OP_CALL, base, nparams+1, 2));
luaK_fixline(fs, line);
- fs->freereg = base+1; /* call remove function and arguments and leaves
- (unless changed) one result */
+ fs->freereg = base+1; /* call removes function and arguments and leaves
+ one result (unless changed later) */
}
@@ -1103,7 +1103,6 @@ static void suffixedexp (LexState *ls, expdesc *v) {
/* suffixedexp ->
primaryexp { '.' NAME | '[' exp ']' | ':' NAME funcargs | funcargs } */
FuncState *fs = ls->fs;
- int line = ls->linenumber;
primaryexp(ls, v);
for (;;) {
switch (ls->t.token) {
@@ -1123,12 +1122,12 @@ static void suffixedexp (LexState *ls, expdesc *v) {
luaX_next(ls);
codename(ls, &key);
luaK_self(fs, v, &key);
- funcargs(ls, v, line);
+ funcargs(ls, v);
break;
}
case '(': case TK_STRING: case '{': { /* funcargs */
luaK_exp2nextreg(fs, v);
- funcargs(ls, v, line);
+ funcargs(ls, v);
break;
}
default: return;
diff --git a/contrib/lua/src/lstate.c b/contrib/lua/src/lstate.c
index 1e925e5ad4cb..f3f2ccfdd5fb 100644
--- a/contrib/lua/src/lstate.c
+++ b/contrib/lua/src/lstate.c
@@ -119,7 +119,7 @@ CallInfo *luaE_extendCI (lua_State *L) {
/*
** free all CallInfo structures not in use by a thread
*/
-void luaE_freeCI (lua_State *L) {
+static void freeCI (lua_State *L) {
CallInfo *ci = L->ci;
CallInfo *next = ci->next;
ci->next = NULL;
@@ -166,7 +166,7 @@ void luaE_checkcstack (lua_State *L) {
if (getCcalls(L) == LUAI_MAXCCALLS)
luaG_runerror(L, "C stack overflow");
else if (getCcalls(L) >= (LUAI_MAXCCALLS / 10 * 11))
- luaD_throw(L, LUA_ERRERR); /* error while handling stack error */
+ luaD_errerr(L); /* error while handling stack error */
}
@@ -204,7 +204,7 @@ static void freestack (lua_State *L) {
if (L->stack.p == NULL)
return; /* stack not completely built yet */
L->ci = &L->base_ci; /* free the entire 'ci' list */
- luaE_freeCI(L);
+ freeCI(L);
lua_assert(L->nci == 0);
luaM_freearray(L, L->stack.p, stacksize(L) + EXTRA_STACK); /* free stack */
}
@@ -272,7 +272,9 @@ static void close_state (lua_State *L) {
luaC_freeallobjects(L); /* just collect its objects */
else { /* closing a fully built state */
L->ci = &L->base_ci; /* unwind CallInfo list */
+ L->errfunc = 0; /* stack unwind can "throw away" the error function */
luaD_closeprotected(L, 1, LUA_OK); /* close all upvalues */
+ L->top.p = L->stack.p + 1; /* empty the stack to run finalizers */
luaC_freeallobjects(L); /* collect all objects */
luai_userstateclose(L);
}
@@ -328,6 +330,7 @@ int luaE_resetthread (lua_State *L, int status) {
if (status == LUA_YIELD)
status = LUA_OK;
L->status = LUA_OK; /* so it can run __close metamethods */
+ L->errfunc = 0; /* stack unwind can "throw away" the error function */
status = luaD_closeprotected(L, 1, status);
if (status != LUA_OK) /* errors? */
luaD_seterrorobj(L, status, L->stack.p + 1);
@@ -433,7 +436,7 @@ void luaE_warning (lua_State *L, const char *msg, int tocont) {
void luaE_warnerror (lua_State *L, const char *where) {
TValue *errobj = s2v(L->top.p - 1); /* error object */
const char *msg = (ttisstring(errobj))
- ? svalue(errobj)
+ ? getstr(tsvalue(errobj))
: "error object is not a string";
/* produce warning "error in %s (%s)" (where, msg) */
luaE_warning(L, "error in ", 1);
diff --git a/contrib/lua/src/lstate.h b/contrib/lua/src/lstate.h
index 8bf6600e3441..007704c826be 100644
--- a/contrib/lua/src/lstate.h
+++ b/contrib/lua/src/lstate.h
@@ -181,7 +181,7 @@ struct CallInfo {
union {
struct { /* only for Lua functions */
const Instruction *savedpc;
- volatile l_signalT trap;
+ volatile l_signalT trap; /* function is tracing lines/counts */
int nextraargs; /* # of extra arguments in vararg functions */
} l;
struct { /* only for C functions */
@@ -396,7 +396,6 @@ union GCUnion {
LUAI_FUNC void luaE_setdebt (global_State *g, l_mem debt);
LUAI_FUNC void luaE_freethread (lua_State *L, lua_State *L1);
LUAI_FUNC CallInfo *luaE_extendCI (lua_State *L);
-LUAI_FUNC void luaE_freeCI (lua_State *L);
LUAI_FUNC void luaE_shrinkCI (lua_State *L);
LUAI_FUNC void luaE_checkcstack (lua_State *L);
LUAI_FUNC void luaE_incCstack (lua_State *L);
diff --git a/contrib/lua/src/lstring.c b/contrib/lua/src/lstring.c
index 13dcaf4259bc..97757355c0b6 100644
--- a/contrib/lua/src/lstring.c
+++ b/contrib/lua/src/lstring.c
@@ -36,7 +36,7 @@ int luaS_eqlngstr (TString *a, TString *b) {
lua_assert(a->tt == LUA_VLNGSTR && b->tt == LUA_VLNGSTR);
return (a == b) || /* same instance or... */
((len == b->u.lnglen) && /* equal length and ... */
- (memcmp(getstr(a), getstr(b), len) == 0)); /* equal contents */
+ (memcmp(getlngstr(a), getlngstr(b), len) == 0)); /* equal contents */
}
@@ -52,7 +52,7 @@ unsigned int luaS_hashlongstr (TString *ts) {
lua_assert(ts->tt == LUA_VLNGSTR);
if (ts->extra == 0) { /* no hash? */
size_t len = ts->u.lnglen;
- ts->hash = luaS_hash(getstr(ts), len, ts->hash);
+ ts->hash = luaS_hash(getlngstr(ts), len, ts->hash);
ts->extra = 1; /* now it has its hash */
}
return ts->hash;
@@ -157,6 +157,7 @@ static TString *createstrobj (lua_State *L, size_t l, int tag, unsigned int h) {
TString *luaS_createlngstrobj (lua_State *L, size_t l) {
TString *ts = createstrobj(L, l, LUA_VLNGSTR, G(L)->seed);
ts->u.lnglen = l;
+ ts->shrlen = 0xFF; /* signals that it is a long string */
return ts;
}
@@ -193,7 +194,7 @@ static TString *internshrstr (lua_State *L, const char *str, size_t l) {
TString **list = &tb->hash[lmod(h, tb->size)];
lua_assert(str != NULL); /* otherwise 'memcmp'/'memcpy' are undefined */
for (ts = *list; ts != NULL; ts = ts->u.hnext) {
- if (l == ts->shrlen && (memcmp(str, getstr(ts), l * sizeof(char)) == 0)) {
+ if (l == ts->shrlen && (memcmp(str, getshrstr(ts), l * sizeof(char)) == 0)) {
/* found! */
if (isdead(g, ts)) /* dead (but not collected yet)? */
changewhite(ts); /* resurrect it */
@@ -206,8 +207,8 @@ static TString *internshrstr (lua_State *L, const char *str, size_t l) {
list = &tb->hash[lmod(h, tb->size)]; /* rehash with new size */
}
ts = createstrobj(L, l, LUA_VSHRSTR, h);
- memcpy(getstr(ts), str, l * sizeof(char));
ts->shrlen = cast_byte(l);
+ memcpy(getshrstr(ts), str, l * sizeof(char));
ts->u.hnext = *list;
*list = ts;
tb->nuse++;
@@ -223,10 +224,10 @@ TString *luaS_newlstr (lua_State *L, const char *str, size_t l) {
return internshrstr(L, str, l);
else {
TString *ts;
- if (l_unlikely(l >= (MAX_SIZE - sizeof(TString))/sizeof(char)))
+ if (l_unlikely(l * sizeof(char) >= (MAX_SIZE - sizeof(TString))))
luaM_toobig(L);
ts = luaS_createlngstrobj(L, l);
- memcpy(getstr(ts), str, l * sizeof(char));
+ memcpy(getlngstr(ts), str, l * sizeof(char));
return ts;
}
}
diff --git a/contrib/lua/src/ltable.c b/contrib/lua/src/ltable.c
index 3c690c5f1751..3353c047939a 100644
--- a/contrib/lua/src/ltable.c
+++ b/contrib/lua/src/ltable.c
@@ -252,7 +252,7 @@ LUAI_FUNC unsigned int luaH_realasize (const Table *t) {
return t->alimit; /* this is the size */
else {
unsigned int size = t->alimit;
- /* compute the smallest power of 2 not smaller than 'n' */
+ /* compute the smallest power of 2 not smaller than 'size' */
size |= (size >> 1);
size |= (size >> 2);
size |= (size >> 4);
@@ -662,7 +662,8 @@ static Node *getfreepos (Table *t) {
** put new key in its main position; otherwise (colliding node is in its main
** position), new key goes to an empty position.
*/
-void luaH_newkey (lua_State *L, Table *t, const TValue *key, TValue *value) {
+static void luaH_newkey (lua_State *L, Table *t, const TValue *key,
+ TValue *value) {
Node *mp;
TValue aux;
if (l_unlikely(ttisnil(key)))
@@ -721,22 +722,36 @@ void luaH_newkey (lua_State *L, Table *t, const TValue *key, TValue *value) {
/*
** Search function for integers. If integer is inside 'alimit', get it
-** directly from the array part. Otherwise, if 'alimit' is not equal to
-** the real size of the array, key still can be in the array part. In
-** this case, try to avoid a call to 'luaH_realasize' when key is just
-** one more than the limit (so that it can be incremented without
-** changing the real size of the array).
+** directly from the array part. Otherwise, if 'alimit' is not
+** the real size of the array, the key still can be in the array part.
+** In this case, do the "Xmilia trick" to check whether 'key-1' is
+** smaller than the real size.
+** The trick works as follow: let 'p' be an integer such that
+** '2^(p+1) >= alimit > 2^p', or '2^(p+1) > alimit-1 >= 2^p'.
+** That is, 2^(p+1) is the real size of the array, and 'p' is the highest
+** bit on in 'alimit-1'. What we have to check becomes 'key-1 < 2^(p+1)'.
+** We compute '(key-1) & ~(alimit-1)', which we call 'res'; it will
+** have the 'p' bit cleared. If the key is outside the array, that is,
+** 'key-1 >= 2^(p+1)', then 'res' will have some bit on higher than 'p',
+** therefore it will be larger or equal to 'alimit', and the check
+** will fail. If 'key-1 < 2^(p+1)', then 'res' has no bit on higher than
+** 'p', and as the bit 'p' itself was cleared, 'res' will be smaller
+** than 2^p, therefore smaller than 'alimit', and the check succeeds.
+** As special cases, when 'alimit' is 0 the condition is trivially false,
+** and when 'alimit' is 1 the condition simplifies to 'key-1 < alimit'.
+** If key is 0 or negative, 'res' will have its higher bit on, so that
+** if cannot be smaller than alimit.
*/
const TValue *luaH_getint (Table *t, lua_Integer key) {
- if (l_castS2U(key) - 1u < t->alimit) /* 'key' in [1, t->alimit]? */
+ lua_Unsigned alimit = t->alimit;
+ if (l_castS2U(key) - 1u < alimit) /* 'key' in [1, t->alimit]? */
return &t->array[key - 1];
- else if (!limitequalsasize(t) && /* key still may be in the array part? */
- (l_castS2U(key) == t->alimit + 1 ||
- l_castS2U(key) - 1u < luaH_realasize(t))) {
+ else if (!isrealasize(t) && /* key still may be in the array part? */
+ (((l_castS2U(key) - 1u) & ~(alimit - 1u)) < alimit)) {
t->alimit = cast_uint(key); /* probably '#t' is here now */
return &t->array[key - 1];
}
- else {
+ else { /* key is not in the array part; check the hash */
Node *n = hashint(t, key);
for (;;) { /* check whether 'key' is somewhere in the chain */
if (keyisinteger(n) && keyival(n) == key)
diff --git a/contrib/lua/src/ltable.h b/contrib/lua/src/ltable.h
index 75dd9e26e015..8e6890342348 100644
--- a/contrib/lua/src/ltable.h
+++ b/contrib/lua/src/ltable.h
@@ -41,8 +41,6 @@ LUAI_FUNC void luaH_setint (lua_State *L, Table *t, lua_Integer key,
LUAI_FUNC const TValue *luaH_getshortstr (Table *t, TString *key);
LUAI_FUNC const TValue *luaH_getstr (Table *t, TString *key);
LUAI_FUNC const TValue *luaH_get (Table *t, const TValue *key);
-LUAI_FUNC void luaH_newkey (lua_State *L, Table *t, const TValue *key,
- TValue *value);
LUAI_FUNC void luaH_set (lua_State *L, Table *t, const TValue *key,
TValue *value);
LUAI_FUNC void luaH_finishset (lua_State *L, Table *t, const TValue *key,
diff --git a/contrib/lua/src/ltm.h b/contrib/lua/src/ltm.h
index c309e2ae10e3..73b833c605da 100644
--- a/contrib/lua/src/ltm.h
+++ b/contrib/lua/src/ltm.h
@@ -9,7 +9,6 @@
#include "lobject.h"
-#include "lstate.h"
/*
@@ -96,8 +95,8 @@ LUAI_FUNC int luaT_callorderiTM (lua_State *L, const TValue *p1, int v2,
int inv, int isfloat, TMS event);
LUAI_FUNC void luaT_adjustvarargs (lua_State *L, int nfixparams,
- CallInfo *ci, const Proto *p);
-LUAI_FUNC void luaT_getvarargs (lua_State *L, CallInfo *ci,
+ struct CallInfo *ci, const Proto *p);
+LUAI_FUNC void luaT_getvarargs (lua_State *L, struct CallInfo *ci,
StkId where, int wanted);
diff --git a/contrib/lua/src/lua.c b/contrib/lua/src/lua.c
index 0ff884545304..4a90e55dd94b 100644
--- a/contrib/lua/src/lua.c
+++ b/contrib/lua/src/lua.c
@@ -115,12 +115,13 @@ static void l_message (const char *pname, const char *msg) {
/*
** Check whether 'status' is not OK and, if so, prints the error
-** message on the top of the stack. It assumes that the error object
-** is a string, as it was either generated by Lua or by 'msghandler'.
+** message on the top of the stack.
*/
static int report (lua_State *L, int status) {
if (status != LUA_OK) {
const char *msg = lua_tostring(L, -1);
+ if (msg == NULL)
+ msg = "(error message not a string)";
l_message(progname, msg);
lua_pop(L, 1); /* remove message */
}
@@ -210,12 +211,17 @@ static int dostring (lua_State *L, const char *s, const char *name) {
/*
** Receives 'globname[=modname]' and runs 'globname = require(modname)'.
+** If there is no explicit modname and globname contains a '-', cut
+** the suffix after '-' (the "version") to make the global name.
*/
static int dolibrary (lua_State *L, char *globname) {
int status;
+ char *suffix = NULL;
char *modname = strchr(globname, '=');
- if (modname == NULL) /* no explicit name? */
+ if (modname == NULL) { /* no explicit name? */
modname = globname; /* module name is equal to global name */
+ suffix = strchr(modname, *LUA_IGMARK); /* look for a suffix mark */
+ }
else {
*modname = '\0'; /* global name ends here */
modname++; /* module name starts after the '=' */
@@ -223,8 +229,11 @@ static int dolibrary (lua_State *L, char *globname) {
lua_getglobal(L, "require");
lua_pushstring(L, modname);
status = docall(L, 1, 1); /* call 'require(modname)' */
- if (status == LUA_OK)
+ if (status == LUA_OK) {
+ if (suffix != NULL) /* is there a suffix mark? */
+ *suffix = '\0'; /* remove suffix from global name */
lua_setglobal(L, globname); /* globname = require(modname) */
+ }
return report(L, status);
}
@@ -481,10 +490,8 @@ static int incomplete (lua_State *L, int status) {
if (status == LUA_ERRSYNTAX) {
size_t lmsg;
const char *msg = lua_tolstring(L, -1, &lmsg);
- if (lmsg >= marklen && strcmp(msg + lmsg - marklen, EOFMARK) == 0) {
- lua_pop(L, 1);
+ if (lmsg >= marklen && strcmp(msg + lmsg - marklen, EOFMARK) == 0)
return 1;
- }
}
return 0; /* else... */
}
@@ -499,9 +506,9 @@ static int pushline (lua_State *L, int firstline) {
size_t l;
const char *prmt = get_prompt(L, firstline);
int readstatus = lua_readline(L, b, prmt);
- if (readstatus == 0)
- return 0; /* no input (prompt will be popped by caller) */
lua_pop(L, 1); /* remove prompt */
+ if (readstatus == 0)
+ return 0; /* no input */
l = strlen(b);
if (l > 0 && b[l-1] == '\n') /* line ends with newline? */
b[--l] = '\0'; /* remove it */
@@ -543,8 +550,9 @@ static int multiline (lua_State *L) {
int status = luaL_loadbuffer(L, line, len, "=stdin"); /* try it */
if (!incomplete(L, status) || !pushline(L, 0)) {
lua_saveline(L, line); /* keep history */
- return status; /* cannot or should not try to add continuation line */
+ return status; /* should not or cannot try to add continuation line */
}
+ lua_remove(L, -2); /* remove error message (from incomplete line) */
lua_pushliteral(L, "\n"); /* add newline... */
lua_insert(L, -2); /* ...between the two lines */
lua_concat(L, 3); /* join them */
diff --git a/contrib/lua/src/lua.h b/contrib/lua/src/lua.h
index fd16cf8050b8..f3ea590d9cd6 100644
--- a/contrib/lua/src/lua.h
+++ b/contrib/lua/src/lua.h
@@ -18,14 +18,14 @@
#define LUA_VERSION_MAJOR "5"
#define LUA_VERSION_MINOR "4"
-#define LUA_VERSION_RELEASE "6"
+#define LUA_VERSION_RELEASE "8"
#define LUA_VERSION_NUM 504
-#define LUA_VERSION_RELEASE_NUM (LUA_VERSION_NUM * 100 + 6)
+#define LUA_VERSION_RELEASE_NUM (LUA_VERSION_NUM * 100 + 8)
#define LUA_VERSION "Lua " LUA_VERSION_MAJOR "." LUA_VERSION_MINOR
#define LUA_RELEASE LUA_VERSION "." LUA_VERSION_RELEASE
-#define LUA_COPYRIGHT LUA_RELEASE " Copyright (C) 1994-2023 Lua.org, PUC-Rio"
+#define LUA_COPYRIGHT LUA_RELEASE " Copyright (C) 1994-2025 Lua.org, PUC-Rio"
#define LUA_AUTHORS "R. Ierusalimschy, L. H. de Figueiredo, W. Celes"
@@ -497,7 +497,7 @@ struct lua_Debug {
/******************************************************************************
-* Copyright (C) 1994-2023 Lua.org, PUC-Rio.
+* Copyright (C) 1994-2025 Lua.org, PUC-Rio.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
diff --git a/contrib/lua/src/luaconf.h b/contrib/lua/src/luaconf.h
index e517fb4d2e75..c7ca969e321f 100644
--- a/contrib/lua/src/luaconf.h
+++ b/contrib/lua/src/luaconf.h
@@ -257,6 +257,15 @@
#endif
+
+/*
+** LUA_IGMARK is a mark to ignore all after it when building the
+** module name (e.g., used to build the luaopen_ function name).
+** Typically, the suffix after the mark is the module version,
+** as in "mod-v1.2.so".
+*/
+#define LUA_IGMARK "-"
+
/* }================================================================== */
diff --git a/contrib/lua/src/lundump.c b/contrib/lua/src/lundump.c
index 02aed64fb622..e8d92a8534ff 100644
--- a/contrib/lua/src/lundump.c
+++ b/contrib/lua/src/lundump.c
@@ -81,7 +81,7 @@ static size_t loadUnsigned (LoadState *S, size_t limit) {
static size_t loadSize (LoadState *S) {
- return loadUnsigned(S, ~(size_t)0);
+ return loadUnsigned(S, MAX_SIZET);
}
@@ -122,7 +122,7 @@ static TString *loadStringN (LoadState *S, Proto *p) {
ts = luaS_createlngstrobj(L, size); /* create string */
setsvalue2s(L, L->top.p, ts); /* anchor it ('loadVector' can GC) */
luaD_inctop(L);
- loadVector(S, getstr(ts), size); /* load directly in final place */
+ loadVector(S, getlngstr(ts), size); /* load directly in final place */
L->top.p--; /* pop string */
}
luaC_objbarrier(L, p, ts);
diff --git a/contrib/lua/src/lundump.h b/contrib/lua/src/lundump.h
index f3748a998075..a97676ca1852 100644
--- a/contrib/lua/src/lundump.h
+++ b/contrib/lua/src/lundump.h
@@ -21,8 +21,7 @@
/*
** Encode major-minor version in one byte, one nibble for each
*/
-#define MYINT(s) (s[0]-'0') /* assume one-digit numerals */
-#define LUAC_VERSION (MYINT(LUA_VERSION_MAJOR)*16+MYINT(LUA_VERSION_MINOR))
+#define LUAC_VERSION (((LUA_VERSION_NUM / 100) * 16) + LUA_VERSION_NUM % 100)
#define LUAC_FORMAT 0 /* this is the official format */
diff --git a/contrib/lua/src/lvm.c b/contrib/lua/src/lvm.c
index 9d1bdfb0bd6e..45b47e7c8793 100644
--- a/contrib/lua/src/lvm.c
+++ b/contrib/lua/src/lvm.c
@@ -95,8 +95,10 @@ static int l_strton (const TValue *obj, TValue *result) {
lua_assert(obj != result);
if (!cvt2num(obj)) /* is object not a string? */
return 0;
- else
- return (luaO_str2num(svalue(obj), result) == vslen(obj) + 1);
+ else {
+ TString *st = tsvalue(obj);
+ return (luaO_str2num(getstr(st), result) == tsslen(st) + 1);
+ }
}
@@ -341,7 +343,10 @@ void luaV_finishset (lua_State *L, const TValue *t, TValue *key,
lua_assert(isempty(slot)); /* slot must be empty */
tm = fasttm(L, h->metatable, TM_NEWINDEX); /* get metamethod */
if (tm == NULL) { /* no metamethod? */
+ sethvalue2s(L, L->top.p, h); /* anchor 't' */
+ L->top.p++; /* assume EXTRA_STACK */
luaH_finishset(L, h, key, slot, val); /* set new value */
+ L->top.p--;
invalidateTMcache(h);
luaC_barrierback(L, obj2gco(h), val);
return;
@@ -370,30 +375,32 @@ void luaV_finishset (lua_State *L, const TValue *t, TValue *key,
/*
-** Compare two strings 'ls' x 'rs', returning an integer less-equal-
-** -greater than zero if 'ls' is less-equal-greater than 'rs'.
+** Compare two strings 'ts1' x 'ts2', returning an integer less-equal-
+** -greater than zero if 'ts1' is less-equal-greater than 'ts2'.
** The code is a little tricky because it allows '\0' in the strings
-** and it uses 'strcoll' (to respect locales) for each segments
-** of the strings.
+** and it uses 'strcoll' (to respect locales) for each segment
+** of the strings. Note that segments can compare equal but still
+** have different lengths.
*/
-static int l_strcmp (const TString *ls, const TString *rs) {
- const char *l = getstr(ls);
- size_t ll = tsslen(ls);
- const char *r = getstr(rs);
- size_t lr = tsslen(rs);
+static int l_strcmp (const TString *ts1, const TString *ts2) {
+ const char *s1 = getstr(ts1);
+ size_t rl1 = tsslen(ts1); /* real length */
+ const char *s2 = getstr(ts2);
+ size_t rl2 = tsslen(ts2);
for (;;) { /* for each segment */
- int temp = strcoll(l, r);
+ int temp = strcoll(s1, s2);
if (temp != 0) /* not equal? */
return temp; /* done */
else { /* strings are equal up to a '\0' */
- size_t len = strlen(l); /* index of first '\0' in both strings */
- if (len == lr) /* 'rs' is finished? */
- return (len == ll) ? 0 : 1; /* check 'ls' */
- else if (len == ll) /* 'ls' is finished? */
- return -1; /* 'ls' is less than 'rs' ('rs' is not finished) */
- /* both strings longer than 'len'; go on comparing after the '\0' */
- len++;
- l += len; ll -= len; r += len; lr -= len;
+ size_t zl1 = strlen(s1); /* index of first '\0' in 's1' */
+ size_t zl2 = strlen(s2); /* index of first '\0' in 's2' */
+ if (zl2 == rl2) /* 's2' is finished? */
+ return (zl1 == rl1) ? 0 : 1; /* check 's1' */
+ else if (zl1 == rl1) /* 's1' is finished? */
+ return -1; /* 's1' is less than 's2' ('s2' is not finished) */
+ /* both strings longer than 'zl'; go on comparing after the '\0' */
+ zl1++; zl2++;
+ s1 += zl1; rl1 -= zl1; s2 += zl2; rl2 -= zl2;
}
}
}
@@ -628,8 +635,9 @@ int luaV_equalobj (lua_State *L, const TValue *t1, const TValue *t2) {
static void copy2buff (StkId top, int n, char *buff) {
size_t tl = 0; /* size already copied */
do {
- size_t l = vslen(s2v(top - n)); /* length of string being copied */
- memcpy(buff + tl, svalue(s2v(top - n)), l * sizeof(char));
+ TString *st = tsvalue(s2v(top - n));
+ size_t l = tsslen(st); /* length of string being copied */
+ memcpy(buff + tl, getstr(st), l * sizeof(char));
tl += l;
} while (--n > 0);
}
@@ -655,12 +663,12 @@ void luaV_concat (lua_State *L, int total) {
}
else {
/* at least two non-empty string values; get as many as possible */
- size_t tl = vslen(s2v(top - 1));
+ size_t tl = tsslen(tsvalue(s2v(top - 1)));
TString *ts;
/* collect total length and number of strings */
for (n = 1; n < total && tostring(L, s2v(top - n - 1)); n++) {
- size_t l = vslen(s2v(top - n - 1));
- if (l_unlikely(l >= (MAX_SIZE/sizeof(char)) - tl)) {
+ size_t l = tsslen(tsvalue(s2v(top - n - 1)));
+ if (l_unlikely(l >= MAX_SIZE - sizeof(TString) - tl)) {
L->top.p = top - total; /* pop strings to avoid wasting stack */
luaG_runerror(L, "string length overflow");
}
@@ -673,7 +681,7 @@ void luaV_concat (lua_State *L, int total) {
}
else { /* long string; copy strings directly to final result */
ts = luaS_createlngstrobj(L, tl);
- copy2buff(top, n, getstr(ts));
+ copy2buff(top, n, getlngstr(ts));
}
setsvalue2s(L, top - n, ts); /* create result */
}
@@ -1159,18 +1167,11 @@ void luaV_execute (lua_State *L, CallInfo *ci) {
startfunc:
trap = L->hookmask;
returning: /* trap already set */
- cl = clLvalue(s2v(ci->func.p));
+ cl = ci_func(ci);
k = cl->p->k;
pc = ci->u.l.savedpc;
- if (l_unlikely(trap)) {
- if (pc == cl->p->code) { /* first instruction (not resuming)? */
- if (cl->p->is_vararg)
- trap = 0; /* hooks will start after VARARGPREP instruction */
- else /* check 'call' hook */
- luaD_hookcall(L, ci);
- }
- ci->u.l.trap = 1; /* assume trap is on, for now */
- }
+ if (l_unlikely(trap))
+ trap = luaG_tracecall(L);
base = ci->func.p + 1;
/* main loop of interpreter */
for (;;) {
@@ -1257,7 +1258,7 @@ void luaV_execute (lua_State *L, CallInfo *ci) {
const TValue *slot;
TValue *upval = cl->upvals[GETARG_B(i)]->v.p;
TValue *rc = KC(i);
- TString *key = tsvalue(rc); /* key must be a string */
+ TString *key = tsvalue(rc); /* key must be a short string */
if (luaV_fastget(L, upval, key, slot, luaH_getshortstr)) {
setobj2s(L, ra, slot);
}
@@ -1300,7 +1301,7 @@ void luaV_execute (lua_State *L, CallInfo *ci) {
const TValue *slot;
TValue *rb = vRB(i);
TValue *rc = KC(i);
- TString *key = tsvalue(rc); /* key must be a string */
+ TString *key = tsvalue(rc); /* key must be a short string */
if (luaV_fastget(L, rb, key, slot, luaH_getshortstr)) {
setobj2s(L, ra, slot);
}
@@ -1313,7 +1314,7 @@ void luaV_execute (lua_State *L, CallInfo *ci) {
TValue *upval = cl->upvals[GETARG_A(i)]->v.p;
TValue *rb = KB(i);
TValue *rc = RKC(i);
- TString *key = tsvalue(rb); /* key must be a string */
+ TString *key = tsvalue(rb); /* key must be a short string */
if (luaV_fastget(L, upval, key, slot, luaH_getshortstr)) {
luaV_finishfastset(L, upval, slot, rc);
}
@@ -1356,7 +1357,7 @@ void luaV_execute (lua_State *L, CallInfo *ci) {
const TValue *slot;
TValue *rb = KB(i);
TValue *rc = RKC(i);
- TString *key = tsvalue(rb); /* key must be a string */
+ TString *key = tsvalue(rb); /* key must be a short string */
if (luaV_fastget(L, s2v(ra), key, slot, luaH_getshortstr)) {
luaV_finishfastset(L, s2v(ra), slot, rc);
}
diff --git a/contrib/mandoc/Makefile b/contrib/mandoc/Makefile
index 7ec34a560504..0830c9f289a3 100644
--- a/contrib/mandoc/Makefile
+++ b/contrib/mandoc/Makefile
@@ -15,7 +15,7 @@
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-VERSION = 1.14.6s20250613
+VERSION = 1.14.6s20250727
# === LIST OF FILES ====================================================
diff --git a/contrib/mandoc/TODO b/contrib/mandoc/TODO
index 3f5a449af68f..5d582b85b154 100644
--- a/contrib/mandoc/TODO
+++ b/contrib/mandoc/TODO
@@ -1,6 +1,6 @@
************************************************************************
* Official mandoc TODO.
-* $Id: TODO,v 1.337 2025/04/08 21:53:14 schwarze Exp $
+* $Id: TODO,v 1.338 2025/07/22 13:36:54 schwarze Exp $
************************************************************************
Many issues are annotated for difficulty as follows:
@@ -505,6 +505,15 @@ are mere guesses, and some may be wrong.
re-reported by tb@ Mon, 16 Mar 2015 16:47:21 +0100
loc ** exist ** algo ** size * imp **
+- Check for bad line breaks caused by PostScript and PDF using variable-
+ width fonts, for example in .Bl -width "string". The difficulty line
+ below describes a naive solution by simply scaling up widths internally
+ or adding default spacing (like in terminal output). If fixes are
+ needed in width measurements, it might be a bit harder, but likely
+ not unreasonably so.
+ reported by Jan Stary 20 May 2024 10:19:01 +0200
+ loc * exist * algo ** size * imp **
+
--- HTML issues --------------------------------------------------------
- support the idiom .TP .IP .TP for multi-paragraph list item bodies
diff --git a/contrib/mandoc/catman.8 b/contrib/mandoc/catman.8
index 903fa1fa82c9..c0f14872afc6 100644
--- a/contrib/mandoc/catman.8
+++ b/contrib/mandoc/catman.8
@@ -1,6 +1,6 @@
-.\" $Id: catman.8,v 1.8 2017/03/18 19:56:01 schwarze Exp $
+.\" $Id: catman.8,v 1.15 2025/07/13 14:15:26 schwarze Exp $
.\"
-.\" Copyright (c) 2017 Ingo Schwarze <schwarze@openbsd.org>
+.\" Copyright (c) 2017, 2025 Ingo Schwarze <schwarze@openbsd.org>
.\"
.\" Permission to use, copy, modify, and distribute this software for any
.\" purpose with or without fee is hereby granted, provided that the above
@@ -14,7 +14,7 @@
.\" ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
.\" OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
.\"
-.Dd $Mdocdate: March 18 2017 $
+.Dd $Mdocdate: July 13 2025 $
.Dt CATMAN 8
.Os
.Sh NAME
@@ -37,9 +37,9 @@ and
format and formats all of them, storing the formatted versions in
the same relative paths below
.Ar dstdir .
-Subdirectories of
+Unless they already exist,
.Ar dstdir
-are created as needed.
+itself and any required subdirectories are created.
Existing files are not explicitly deleted, but possibly overwritten.
.Pp
The options are as follows:
@@ -71,6 +71,18 @@ output mode, the
.Cm fragment
output option is implied.
Other output options are not supported.
+.It Fl v
+Verbose mode, printing additional information to standard error output.
+Specifying this once prints a summary about the number of files
+and directories processed at the end of the iteration.
+Specifying it twice additionally prints debugging information
+about the backchannel from
+.Xr mandocd 8
+to
+.Nm
+that is used to limit the number of files in flight at any given time.
+For details, see
+.Sx DIAGNOSTICS .
.El
.Sh IMPLEMENTATION NOTES
Since this version avoids
@@ -87,33 +99,209 @@ implementations.
.Sh EXIT STATUS
.Ex -std
.Pp
-Possible errors include:
-.Bl -bullet
-.It
-missing, invalid, or excessive command line arguments
-.It
-failure to change the current working directory to
+Failures while trying to open individual manual pages for reading,
+to save individual formatted files to the file system,
+or even to read or create subdirectories do not cause
+.Nm
+to return an error exit status.
+In such cases,
+.Nm
+simply continues with the next file or subdirectory.
+.Sh DIAGNOSTICS
+Some fatal errors cause
+.Nm
+to exit before the iteration over input files is even started:
+.Bl -tag -width Ds -offset indent
+.It unknown option \-\- Ar option
+An invalid option was passed on the command line.
+.It missing arguments: srcdir and dstdir
+No argument was provided.
+Both
.Ar srcdir
-.It
-failure to open
+and
.Ar dstdir
-.It
-communication failure with
+are mandatory.
+.It missing argument: dstdir
+Only one argument was provided.
+The second argument,
+.Ar dstdir ,
+is mandatory, too.
+.It too many arguments: Ar third argument
+Three or more arguments were provided, but only two are supported.
+.It Sy socketpair : Ar reason
+The sockets needed for communication with
+.Xr mandocd 8
+could not be created, for example due to file descriptor or memory exhaustion.
+.It Sy fork : Ar reason
+The new process needed to run
.Xr mandocd 8
-.It
-resource exhaustion, for example file descriptor, process table,
-or memory exhaustion
+could not be created, for example due to process table exhaustion
+or system resource limits.
+.It Sy exec Ns Po Sy mandocd Pc : Ar reason
+The
+.Xr mandocd 8
+child program could not be started, for example because it is not in the
+.Ev PATH
+or has no execute permission.
+.It Sy mkdir No destination Ar dstdir : reason
+The
+.Ar dstdir
+does not exist and could not be created, for example because
+the parent directory does not exist or permission is denied.
+.It Sy open No destination Ar dstdir : reason
+The
+.Ar dstdir
+could not be opened for reading, for example because
+it is not a directory or permission is denied.
+.It Sy chdir No to source Ar srcdir : reason
+The current working directory could not be changed to
+.Ar srcdir ,
+for example because it does not exist, it is not a directory,
+or permission is denied.
+.It Sy fts_open : Ar reason
+Starting the iteration was attempted but failed,
+for example due to memory exhaustion.
.El
.Pp
-Except for memory exhaustion and similar system-level failures,
-failures while trying to open, read, parse, or format individual
-manual pages, to save individual formatted files to the file system,
-or even to create directories do not cause
+Some fatal errors cause the iteration over input files to be aborted
+prematurely:
+.Bl -tag -width Ds -offset indent
+.It FATAL: Sy fts_read : Ar reason
+A call to
+.Xr fts_read 3
+returned
+.Dv NULL ,
+meaning that the iteration failed before being complete.
+.It FATAL: mandocd child died: got Ar SIGNAME
+This message appears if
.Nm
-to return an error exit status.
-In such cases,
+gets the
+.Dv SIGCHLD
+or
+.Dv SIGPIPE
+signal, most likely due to a fatal bug in
+.Xr mandocd 8 .
+.It FATAL: Sy sendmsg : Ar reason
+The file descriptors needed to process one of the manual pages
+could not be sent to
+.Xr mandocd 8 ,
+for example because
+.Xr mandocd 8
+could not be started or died unexpectedly.
+.It FATAL: Sy recv : Ar reason
+Trying to read a reply message from
+.Xr mandocd 8
+failed, most likely because
+.Xr mandocd 8
+unexpectedly died or closed the socket.
+.It FATAL: signal Ar SIGNAME
+This message appears if
+.Nm
+gets a
+.Dv SIGHUP ,
+.Dv SIGINT ,
+or
+.Dv SIGTERM
+signal, for example because the user deliberately killed it.
+.El
+.Pp
+Some non-fatal errors cause a single subdirectory to be skipped.
+The iteration is not aborted but continues with the next subdirectory,
+and the exit status is unaffected:
+.Bl -tag -width Ds -offset indent
+.It directory Ar subdirectory No unreadable : Ar reason
+A directory below
+.Ar srcdir
+could not be read and is skipped.
+.It directory Ar subdirectory No causes cycle
+A directory below
+.Ar srcdir
+is skipped because it would cause cyclic processing.
+.It Sy mkdirat Ar subdirectory : reason
+A required directory below
+.Ar dstdir
+does not exist and could not be created.
+The corresponding subdirectory below
+.Ar srcdir
+is skipped.
+.El
+.Pp
+Some non-fatal errors cause a single source file to be skipped.
+The iteration is not aborted but continues with the next file,
+and the exit status is unaffected:
+.Bl -tag -width Ds -offset indent
+.It file Ar filename : reason
+The function
+.Xr fts_read 3
+reported a non-fatal error with respect to
+.Ar filename .
+.It file Ar filename : No not a regular file
+For example, it might be a symbolic link or a device file.
+.It Sy open Ar filename No for reading : Ar reason
+A file below
+.Ar srcdir
+could not be read, for example due to permission problems.
+.It Sy openat Ar filename No for writing : Ar reason
+A file below
+.Ar dstdir
+could not be created or truncated, for example due to permission problems.
+.El
+.Pp
+If errors occur, the applicable summary messages appear
+after the end of the iteration:
+.Pp
+.Bl -tag -width Ds -offset indent -compact
+.It skipped Ar number No directories due to errors
+.It skipped Ar number No files due to errors
+.It processing aborted due to fatal error
+.El
+.Pp
+If the
+.Fl v
+flag is specified, the following summary message also appears:
+.Bl -tag -width Ds -offset indent
+.It processed Ar nfiles No files in Ar ndirs No directories
+A file is counted if it could be opened for reading and the
+corresponding output file could be opened for writing;
+this does not necessarily mean that it is a useful manual page.
+A directory is counted if it could be opened for reading and the
+corresponding output directory existed or could be created;
+this does not necessarily mean that any files could be
+processed inside.
+.El
+.Pp
+If the
+.Fl v
+flag is specified twice, the following messages also appear:
+.Bl -tag -width Ds -offset indent
+.It allowing up to Ar number No files in flight
+This is printed at the beginning of the iteration,
+showing the maximum number of files that
+.Nm
+allows to be in flight at any given time.
+.It files in flight: Ar old No \- Ar decrement No = Ar new
+This message is printed when
+.Nm
+learns about
+.Xr mandocd 8
+accepting more than one file at the same time.
+The three numbers printed are the old number of files in flight,
+the amount this number is being reduced, and the resulting
+new number of files in flight.
+.It waiting for Ar number No files in flight
+This message is printed at the end of the iteration, after
+.Nm
+has submitted all files to
+.Xr mandocd 8
+that it intends to.
+THe message informs about the number of files still in flight
+at this point.
+The
.Nm
-will simply continue with the next file or subdirectory.
+program then waits until
+.Xr mandocd 8
+has accepted them all or until an error occurs.
+.El
.Sh SEE ALSO
.Xr mandoc 1 ,
.Xr mandocd 8
diff --git a/contrib/mandoc/catman.c b/contrib/mandoc/catman.c
index e46613eb0e8c..c9eda18bf71c 100644
--- a/contrib/mandoc/catman.c
+++ b/contrib/mandoc/catman.c
@@ -1,7 +1,7 @@
-/* $Id: catman.c,v 1.23 2021/10/15 15:04:02 schwarze Exp $ */
+/* $Id: catman.c,v 1.30 2025/07/13 14:15:26 schwarze Exp $ */
/*
+ * Copyright (c) 2017, 2025 Ingo Schwarze <schwarze@openbsd.org>
* Copyright (c) 2017 Michael Stapelberg <stapelberg@debian.org>
- * Copyright (c) 2017 Ingo Schwarze <schwarze@openbsd.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -25,6 +25,7 @@
#include <sys/socket.h>
#include <sys/stat.h>
+#include <assert.h>
#if HAVE_ERR
#include <err.h>
#endif
@@ -35,26 +36,44 @@
#else
#include "compat_fts.h"
#endif
+#include <signal.h>
+#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
+int verbose_flag = 0;
+sig_atomic_t got_signal = 0;
+
int process_manpage(int, int, const char *);
int process_tree(int, int);
void run_mandocd(int, const char *, const char *)
__attribute__((__noreturn__));
+void signal_handler(int);
ssize_t sock_fd_write(int, int, int, int);
void usage(void) __attribute__((__noreturn__));
void
+signal_handler(int signum)
+{
+ got_signal = signum;
+}
+
+void
run_mandocd(int sockfd, const char *outtype, const char* defos)
{
char sockfdstr[10];
+ int len;
- if (snprintf(sockfdstr, sizeof(sockfdstr), "%d", sockfd) == -1)
+ len = snprintf(sockfdstr, sizeof(sockfdstr), "%d", sockfd);
+ if (len >= (int)sizeof(sockfdstr)) {
+ errno = EOVERFLOW;
+ len = -1;
+ }
+ if (len < 0)
err(1, "snprintf");
if (defos == NULL)
execlp("mandocd", "mandocd", "-T", outtype,
@@ -109,10 +128,11 @@ sock_fd_write(int fd, int fd0, int fd1, int fd2)
* to neither cause more than a handful of retries
* in normal operation nor unnecessary delays.
*/
- for (;;) {
- if ((sz = sendmsg(fd, &msg, 0)) != -1 ||
- errno != EAGAIN)
+ while ((sz = sendmsg(fd, &msg, 0)) == -1) {
+ if (errno != EAGAIN) {
+ warn("FATAL: sendmsg");
break;
+ }
nanosleep(&timeout, NULL);
}
return sz;
@@ -125,14 +145,16 @@ process_manpage(int srv_fd, int dstdir_fd, const char *path)
int irc;
if ((in_fd = open(path, O_RDONLY)) == -1) {
- warn("open(%s)", path);
+ warn("open %s for reading", path);
+ fflush(stderr);
return 0;
}
if ((out_fd = openat(dstdir_fd, path,
O_WRONLY | O_NOFOLLOW | O_CREAT | O_TRUNC,
S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH)) == -1) {
- warn("openat(%s)", path);
+ warn("openat %s for writing", path);
+ fflush(stderr);
close(in_fd);
return 0;
}
@@ -142,20 +164,22 @@ process_manpage(int srv_fd, int dstdir_fd, const char *path)
close(in_fd);
close(out_fd);
- if (irc < 0) {
- warn("sendmsg");
- return -1;
- }
- return 0;
+ return irc;
}
int
process_tree(int srv_fd, int dstdir_fd)
{
+ const struct timespec timeout = { 0, 10000000 }; /* 0.01 s */
+ const int max_inflight = 16;
+
FTS *ftsp;
FTSENT *entry;
const char *argv[2];
const char *path;
+ int inflight, irc, decr, fatal;
+ int gooddirs, baddirs, goodfiles, badfiles;
+ char dummy[1];
argv[0] = ".";
argv[1] = (char *)NULL;
@@ -166,13 +190,59 @@ process_tree(int srv_fd, int dstdir_fd)
return -1;
}
- while ((entry = fts_read(ftsp)) != NULL) {
+ if (verbose_flag >= 2) {
+ warnx("allowing up to %d files in flight", max_inflight);
+ fflush(stderr);
+ }
+ inflight = fatal = gooddirs = baddirs = goodfiles = badfiles = 0;
+ while (fatal == 0 && got_signal == 0 &&
+ (entry = fts_read(ftsp)) != NULL) {
+ if (inflight >= max_inflight) {
+ while (recv(srv_fd, dummy, sizeof(dummy), 0) == -1) {
+ if (errno != EAGAIN) {
+ warn("FATAL: recv");
+ fatal = errno;
+ break;
+ }
+ nanosleep(&timeout, NULL);
+ }
+ if (fatal != 0)
+ break;
+ decr = 1;
+ while ((irc = recv(srv_fd, dummy, sizeof(dummy),
+ MSG_DONTWAIT)) > 0)
+ decr++;
+ assert(inflight >= decr);
+ if (verbose_flag >= 2 && decr > 1) {
+ warnx("files in flight: %d - %d = %d",
+ inflight, decr, inflight - decr);
+ fflush(stderr);
+ }
+ inflight -= decr;
+ if (irc == 0) {
+ errno = ECONNRESET;
+ inflight = -1;
+ }
+ if (errno != EAGAIN) {
+ warn("FATAL: recv");
+ fatal = errno;
+ break;
+ }
+ }
path = entry->fts_path + 2;
switch (entry->fts_info) {
case FTS_F:
- if (process_manpage(srv_fd, dstdir_fd, path) == -1) {
- fts_close(ftsp);
- return -1;
+ switch (process_manpage(srv_fd, dstdir_fd, path)) {
+ case -1:
+ fatal = errno;
+ break;
+ case 0:
+ badfiles++;
+ break;
+ default:
+ goodfiles++;
+ inflight++;
+ break;
}
break;
case FTS_D:
@@ -180,25 +250,96 @@ process_tree(int srv_fd, int dstdir_fd)
mkdirat(dstdir_fd, path, S_IRWXU | S_IRGRP |
S_IXGRP | S_IROTH | S_IXOTH) == -1 &&
errno != EEXIST) {
- warn("mkdirat(%s)", path);
+ warn("mkdirat %s", path);
+ fflush(stderr);
(void)fts_set(ftsp, entry, FTS_SKIP);
- }
+ baddirs++;
+ } else
+ gooddirs++;
break;
case FTS_DP:
break;
+ case FTS_DNR:
+ warnx("directory %s unreadable: %s",
+ path, strerror(entry->fts_errno));
+ fflush(stderr);
+ baddirs++;
+ break;
+ case FTS_DC:
+ warnx("directory %s causes cycle", path);
+ fflush(stderr);
+ baddirs++;
+ break;
+ case FTS_ERR:
+ case FTS_NS:
+ warnx("file %s: %s",
+ path, strerror(entry->fts_errno));
+ fflush(stderr);
+ badfiles++;
+ break;
default:
- warnx("%s: not a regular file", path);
+ warnx("file %s: not a regular file", path);
+ fflush(stderr);
+ badfiles++;
break;
}
}
+ if (got_signal != 0) {
+ switch (got_signal) {
+ case SIGCHLD:
+ warnx("FATAL: mandocd child died: got SIGCHLD");
+ break;
+ case SIGPIPE:
+ warnx("FATAL: mandocd child died: got SIGPIPE");
+ break;
+ default:
+ warnx("FATAL: signal SIG%s", sys_signame[got_signal]);
+ break;
+ }
+ inflight = -1;
+ fatal = 1;
+ } else if (fatal == 0 && (fatal = errno) != 0)
+ warn("FATAL: fts_read");
fts_close(ftsp);
- return 0;
+ if (verbose_flag >= 2 && inflight > 0) {
+ warnx("waiting for %d files in flight", inflight);
+ fflush(stderr);
+ }
+ while (inflight > 0) {
+ irc = recv(srv_fd, dummy, sizeof(dummy), 0);
+ if (irc > 0)
+ inflight--;
+ else if (irc == -1 && errno == EAGAIN)
+ nanosleep(&timeout, NULL);
+ else {
+ if (irc == 0)
+ errno = ECONNRESET;
+ warn("recv");
+ inflight = -1;
+ }
+ }
+ if (verbose_flag)
+ warnx("processed %d files in %d directories",
+ goodfiles, gooddirs);
+ if (baddirs > 0)
+ warnx("skipped %d %s due to errors", baddirs,
+ baddirs == 1 ? "directory" : "directories");
+ if (badfiles > 0)
+ warnx("skipped %d %s due to errors", badfiles,
+ badfiles == 1 ? "file" : "files");
+ if (fatal != 0) {
+ warnx("processing aborted due to fatal error, "
+ "results are probably incomplete");
+ inflight = -1;
+ }
+ return inflight;
}
int
main(int argc, char **argv)
{
+ struct sigaction sa;
const char *defos, *outtype;
int srv_fds[2];
int dstdir_fd;
@@ -207,7 +348,7 @@ main(int argc, char **argv)
defos = NULL;
outtype = "ascii";
- while ((opt = getopt(argc, argv, "I:T:")) != -1) {
+ while ((opt = getopt(argc, argv, "I:T:v")) != -1) {
switch (opt) {
case 'I':
defos = optarg;
@@ -215,6 +356,9 @@ main(int argc, char **argv)
case 'T':
outtype = optarg;
break;
+ case 'v':
+ verbose_flag += 1;
+ break;
default:
usage();
}
@@ -224,8 +368,36 @@ main(int argc, char **argv)
argc -= optind;
argv += optind;
}
- if (argc != 2)
+ if (argc != 2) {
+ switch (argc) {
+ case 0:
+ warnx("missing arguments: srcdir and dstdir");
+ break;
+ case 1:
+ warnx("missing argument: dstdir");
+ break;
+ default:
+ warnx("too many arguments: %s", argv[2]);
+ break;
+ }
usage();
+ }
+
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_handler = &signal_handler;
+ sa.sa_flags = SA_NOCLDWAIT;
+ if (sigfillset(&sa.sa_mask) == -1)
+ err(1, "sigfillset");
+ if (sigaction(SIGHUP, &sa, NULL) == -1)
+ err(1, "sigaction(SIGHUP)");
+ if (sigaction(SIGINT, &sa, NULL) == -1)
+ err(1, "sigaction(SIGINT)");
+ if (sigaction(SIGPIPE, &sa, NULL) == -1)
+ err(1, "sigaction(SIGPIPE)");
+ if (sigaction(SIGTERM, &sa, NULL) == -1)
+ err(1, "sigaction(SIGTERM)");
+ if (sigaction(SIGCHLD, &sa, NULL) == -1)
+ err(1, "sigaction(SIGCHLD)");
if (socketpair(AF_LOCAL, SOCK_STREAM, AF_UNSPEC, srv_fds) == -1)
err(1, "socketpair");
@@ -242,11 +414,18 @@ main(int argc, char **argv)
}
close(srv_fds[1]);
- if ((dstdir_fd = open(argv[1], O_RDONLY | O_DIRECTORY)) == -1)
- err(1, "open(%s)", argv[1]);
+ if ((dstdir_fd = open(argv[1], O_RDONLY | O_DIRECTORY)) == -1) {
+ if (errno != ENOENT)
+ err(1, "open destination %s", argv[1]);
+ if (mkdir(argv[1], S_IRWXU |
+ S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH) == -1)
+ err(1, "mkdir destination %s", argv[1]);
+ if ((dstdir_fd = open(argv[1], O_RDONLY | O_DIRECTORY)) == -1)
+ err(1, "open destination %s", argv[1]);
+ }
if (chdir(argv[0]) == -1)
- err(1, "chdir(%s)", argv[0]);
+ err(1, "chdir to source %s", argv[0]);
return process_tree(srv_fds[0], dstdir_fd) == -1 ? 1 : 0;
}
diff --git a/contrib/mandoc/gmdiff b/contrib/mandoc/gmdiff
index 69431f703aaf..54025e4cd450 100644
--- a/contrib/mandoc/gmdiff
+++ b/contrib/mandoc/gmdiff
@@ -45,8 +45,8 @@ while [ -n "$1" ]; do
file=$1
shift
echo " ========== $file ========== "
- $ROFF -mandoc $file | $COLPIPE 2> /tmp/roff.err > /tmp/roff.out
- ${MANDOC:=mandoc} $MOPT $file | $COLPIPE \
+ ($ROFF -mandoc $file | $COLPIPE) 2> /tmp/roff.err > /tmp/roff.out
+ (${MANDOC:=mandoc} $MOPT $file | $COLPIPE) \
2> /tmp/mandoc.err > /tmp/mandoc.out
for i in roff mandoc; do
[ -s /tmp/$i.err ] && echo "$i errors:" && cat /tmp/$i.err
diff --git a/contrib/mandoc/man.7 b/contrib/mandoc/man.7
index 4d27c76ba110..91eafbb35f70 100644
--- a/contrib/mandoc/man.7
+++ b/contrib/mandoc/man.7
@@ -1,7 +1,8 @@
-.\" $Id: man.7,v 1.150 2023/10/23 22:57:54 schwarze Exp $
+.\" $Id: man.7,v 1.154 2025/08/05 21:16:20 schwarze Exp $
.\"
+.\" Copyright (c) 2011-2015, 2017-2020, 2023, 2025
+.\" Ingo Schwarze <schwarze@openbsd.org>
.\" Copyright (c) 2009, 2010, 2011, 2012 Kristaps Dzonsons <kristaps@bsd.lv>
-.\" Copyright (c) 2011-2015,2017-2020,2023 Ingo Schwarze <schwarze@openbsd.org>
.\" Copyright (c) 2017 Anthony Bentley <bentley@openbsd.org>
.\" Copyright (c) 2010 Joerg Sonnenberger <joerg@netbsd.org>
.\"
@@ -17,7 +18,7 @@
.\" ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
.\" OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
.\"
-.Dd $Mdocdate: October 23 2023 $
+.Dd $Mdocdate: August 5 2025 $
.Dt MAN 7
.Os
.Sh NAME
@@ -89,7 +90,7 @@ but can be found in the alphabetical reference below.
.Ss Page header and footer meta-data
.Bl -column "RS, RE" description
.It Ic TH Ta set the title: Ar name section date Op Ar source Op Ar volume
-.It Ic AT Ta display AT&T UNIX version in the page footer (<= 1 argument)
+.It Ic AT Ta display AT&T UNIX version in the page footer (<= 2 arguments)
.It Ic UC Ta display BSD version in the page footer (<= 1 argument)
.El
.Ss Sections and paragraphs
@@ -99,6 +100,7 @@ but can be found in the alphabetical reference below.
.It Ic PP Ta start an undecorated paragraph (no arguments)
.It Ic IP Ta indented paragraph: Op Ar head Op Ar width
.It Ic TP Ta tagged paragraph: Op Ar width
+.It Ic HP Ta hanged paragraph: Op Ar width
.It Ic PD Ta set vertical paragraph distance: Op Ar height
.It Ic EX , EE Ta display an example (no arguments)
.It Ic RS , RE Ta reset the left margin: Op Ar width
@@ -198,11 +200,6 @@ argument is a
scaling width.
If specified, it's saved for later paragraph left margins;
if unspecified, the saved or default width is used.
-.Pp
-This macro is portable, but deprecated
-because it has no good representation in HTML output,
-usually ending up indistinguishable from
-.Ic PP .
.It Ic I
Text is rendered in italics.
.It Ic IB
@@ -239,6 +236,17 @@ A synonym for
End a mailto block started with
.Ic MT .
This is a GNU extension.
+.It Ic MR
+Reference another manual page.
+This is a Plan 9 extension also supported by GNU.
+It has the following syntax:
+.Pp
+.D1 Pf . Ic MR Ar name section Op Ar suffix
+.Pp
+The optional, single
+.Ar suffix
+argument is appended without preceding whitespace
+and typically used for trailing punctuation.
.It Ic MT
Begin a mailto block.
This is a GNU extension.
@@ -250,8 +258,12 @@ link description to be shown
.Ed
.It Ic OP
Optional command-line argument.
-This is a rarely used DWB extension.
-It has the following syntax:
+This is a deprecated GNU extension.
+The name and purpose of the macro match an earlier DWB extension,
+but both the syntax and semantics are incompatible.
+In GNU and
+.Xr mandoc 1 ,
+it has the following syntax:
.Pp
.D1 Pf . Ic OP Ar key Op Ar value
.Pp
@@ -503,43 +515,56 @@ raised.
.Pp
The syntax is as follows:
.Bd -literal -offset indent
-\&.YO \(lBbody...\(rB
-\(lBbody...\(rB
+\&.\e" current-line syntax
+\&.YO \(lBbody ...\(rB
+
+\&.\e" next-line syntax
+\&.YO
+body ...
.Ed
-.Bl -column "MacroX" "ArgumentsX" "ScopeXXXXX" "CompatX" -offset indent
-.It Em Macro Ta Em Arguments Ta Em Scope Ta Em Notes
-.It Ic AT Ta <=1 Ta current Ta \&
-.It Ic B Ta n Ta next-line Ta \&
-.It Ic BI Ta n Ta current Ta \&
-.It Ic BR Ta n Ta current Ta \&
-.It Ic DT Ta 0 Ta current Ta \&
-.It Ic EE Ta 0 Ta current Ta Version 9 At
-.It Ic EX Ta 0 Ta current Ta Version 9 At
-.It Ic I Ta n Ta next-line Ta \&
-.It Ic IB Ta n Ta current Ta \&
-.It Ic IR Ta n Ta current Ta \&
-.It Ic OP Ta >=1 Ta current Ta DWB
-.It Ic PD Ta 1 Ta current Ta \&
-.It Ic RB Ta n Ta current Ta \&
-.It Ic RI Ta n Ta current Ta \&
-.It Ic SB Ta n Ta next-line Ta \&
-.It Ic SM Ta n Ta next-line Ta \&
-.It Ic TH Ta >1, <6 Ta current Ta \&
-.It Ic UC Ta <=1 Ta current Ta \&
-.It Ic in Ta 1 Ta current Ta Xr roff 7
+.Bl -column -offset indent\
+ "Macro" "Arguments" "curr and next" "Version 9 AT&T UNIX"
+.It Em Macro Ta Em Arguments Ta Em Line Scope Ta Em Notes
+.It Ic AT Ta 0 to 2 Ta current Ta \&
+.It Ic B Ta 1 or more Ta curr or next Ta \&
+.It Ic BI Ta 2 or more Ta current Ta \&
+.It Ic BR Ta 2 or more Ta current Ta \&
+.It Ic DT Ta 0 Ta none Ta \&
+.It Ic EE Ta 0 Ta none Ta Version 9 At
+.It Ic EX Ta 0 Ta none Ta Version 9 At
+.It Ic I Ta 1 or more Ta curr or next Ta \&
+.It Ic IB Ta 2 or more Ta current Ta \&
+.It Ic IR Ta 2 or more Ta current Ta \&
+.It Ic MR Ta 2 or 3 Ta current Ta Plan 9
+.It Ic OP Ta 1 or 2 Ta current Ta GNU
+.It Ic PD Ta 0 or 1 Ta current Ta \&
+.It Ic RB Ta 2 or more Ta current Ta \&
+.It Ic RI Ta 2 or more Ta current Ta \&
+.It Ic SB Ta 1 or more Ta curr or next Ta \&
+.It Ic SM Ta 1 or more Ta curr or next Ta \&
+.It Ic TH Ta 3 to 5 Ta current Ta \&
+.It Ic UC Ta 0 or 1 Ta current Ta \&
+.It Ic in Ta 0 or 1 Ta current Ta Xr roff 7
.El
.Ss Block Macros
Block macros comprise a head and body.
-As with in-line macros, the head is scoped to the current line and, in
-one circumstance, the next line (the next-line stipulations as in
+As with in-line macros, the head is scoped to the current line or,
+for some macros, to the next line (the next-line stipulations as in
.Sx Line Macros
apply here as well).
.Pp
The syntax is as follows:
.Bd -literal -offset indent
-\&.YO \(lBhead...\(rB
-\(lBhead...\(rB
-\(lBbody...\(rB
+\&.\e" current-line syntax
+\&.YO \(lBhead ...\(rB
+body ...
+\&...
+
+\&.\e" next-line syntax
+\&.YO \(lBhead\(rB
+head ...
+body ...
+\&...
.Ed
.Pp
The closure of body scope may be to the section, where a macro is closed
@@ -547,40 +572,42 @@ by
.Ic SH ;
sub-section, closed by a section or
.Ic SS ;
-or paragraph, closed by a section, sub-section,
+paragraph, closed by a section, sub-section,
.Ic HP ,
.Ic IP ,
.Ic LP ,
.Ic P ,
.Ic PP ,
-.Ic RE ,
+.Ic RS ,
.Ic SY ,
+.Ic TP ,
or
-.Ic TP .
-No closure refers to an explicit block closing macro.
+.Ic TQ ;
+or to an explicit block closing macro.
.Pp
As a rule, block macros may not be nested; thus, calling a block macro
while another block macro scope is open, and the open scope is not
implicitly closed, is syntactically incorrect.
-.Bl -column "MacroX" "ArgumentsX" "Head ScopeX" "sub-sectionX" "compatX" -offset indent
-.It Em Macro Ta Em Arguments Ta Em Head Scope Ta Em Body Scope Ta Em Notes
-.It Ic HP Ta <2 Ta current Ta paragraph Ta \&
-.It Ic IP Ta <3 Ta current Ta paragraph Ta \&
-.It Ic LP Ta 0 Ta current Ta paragraph Ta \&
-.It Ic ME Ta 0 Ta none Ta none Ta GNU
-.It Ic MT Ta 1 Ta current Ta to \&ME Ta GNU
-.It Ic P Ta 0 Ta current Ta paragraph Ta \&
-.It Ic PP Ta 0 Ta current Ta paragraph Ta \&
-.It Ic RE Ta <=1 Ta current Ta none Ta \&
-.It Ic RS Ta 1 Ta current Ta to \&RE Ta \&
-.It Ic SH Ta >0 Ta next-line Ta section Ta \&
-.It Ic SS Ta >0 Ta next-line Ta sub-section Ta \&
-.It Ic SY Ta 1 Ta current Ta to \&YS Ta GNU
-.It Ic TP Ta n Ta next-line Ta paragraph Ta \&
-.It Ic TQ Ta n Ta next-line Ta paragraph Ta GNU
-.It Ic UE Ta 0 Ta current Ta none Ta GNU
-.It Ic UR Ta 1 Ta current Ta part Ta GNU
-.It Ic YS Ta 0 Ta none Ta none Ta GNU
+.Bl -column -offset indent\
+ "Macro" "Arguments" "curr and next" "sub-section" "Notes"
+.It Em Macro Ta Em Arguments Ta Em Head Scope Ta Em Body Scope Ta Em Notes
+.It Ic HP Ta 0 or 1 Ta current Ta paragraph Ta \&
+.It Ic IP Ta 0 to 2 Ta current Ta paragraph Ta \&
+.It Ic LP Ta 0 Ta none Ta paragraph Ta \&
+.It Ic ME Ta 0 or 1 Ta current Ta none Ta GNU
+.It Ic MT Ta 1 Ta current Ta to \&ME Ta GNU
+.It Ic P Ta 0 Ta none Ta paragraph Ta \&
+.It Ic PP Ta 0 Ta none Ta paragraph Ta \&
+.It Ic RE Ta 0 or 1 Ta current Ta none Ta \&
+.It Ic RS Ta 0 or 1 Ta current Ta to \&RE Ta \&
+.It Ic SH Ta 1 or more Ta curr or next Ta section Ta \&
+.It Ic SS Ta 1 or more Ta curr or next Ta sub-section Ta \&
+.It Ic SY Ta 1 Ta current Ta to \&YS Ta GNU
+.It Ic TP Ta 0 or 1 Ta curr and next Ta paragraph Ta \&
+.It Ic TQ Ta 0 or 1 Ta curr and next Ta paragraph Ta GNU
+.It Ic UE Ta 0 or 1 Ta current Ta none Ta GNU
+.It Ic UR Ta 1 Ta current Ta to \&UE Ta GNU
+.It Ic YS Ta 0 Ta none Ta none Ta GNU
.El
.Pp
If a block macro is next-line scoped, it may only be followed by in-line
diff --git a/contrib/mandoc/man.options.1 b/contrib/mandoc/man.options.1
index d8c790f4fa04..be65ad98fddc 100644
--- a/contrib/mandoc/man.options.1
+++ b/contrib/mandoc/man.options.1
@@ -1,6 +1,6 @@
-.\" $Id: man.options.1,v 1.7 2017/07/04 23:40:01 schwarze Exp $
+.\" $Id: man.options.1,v 1.8 2025/06/30 00:11:06 schwarze Exp $
.\"
-.\" Copyright (c) 2017 Ingo Schwarze <schwarze@openbsd.org>
+.\" Copyright (c) 2017, 2025 Ingo Schwarze <schwarze@openbsd.org>
.\"
.\" Permission to use, copy, modify, and distribute this software for any
.\" purpose with or without fee is hereby granted, provided that the above
@@ -14,7 +14,7 @@
.\" ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
.\" OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
.\"
-.Dd $Mdocdate: July 4 2017 $
+.Dd $Mdocdate: June 30 2025 $
.Dt MAN.OPTIONS 1
.Os
.Sh NAME
@@ -1113,7 +1113,8 @@ print version number
verbose mode
.br
.Nm catman :
-.Fx Pq March 15, 1995
+.Fx Pq March 15, 1995 ,
+.No mandoc Pq June 30, 2025
.br
.Nm makewhatis :
.man15g
diff --git a/contrib/mandoc/man_html.c b/contrib/mandoc/man_html.c
index 6784171af1e6..fc593be1112c 100644
--- a/contrib/mandoc/man_html.c
+++ b/contrib/mandoc/man_html.c
@@ -1,6 +1,6 @@
-/* $Id: man_html.c,v 1.187 2023/10/24 20:53:12 schwarze Exp $ */
+/* $Id: man_html.c,v 1.188 2025/06/26 17:06:34 schwarze Exp $ */
/*
- * Copyright (c) 2013-15,2017-20,2022-23 Ingo Schwarze <schwarze@openbsd.org>
+ * Copyright (c) 2013-2020,2022-2023,2025 Ingo Schwarze <schwarze@openbsd.org>
* Copyright (c) 2008-2012, 2014 Kristaps Dzonsons <kristaps@bsd.lv>
*
* Permission to use, copy, modify, and distribute this software for any
@@ -293,21 +293,30 @@ static void
man_root_post(const struct roff_meta *man, struct html *h)
{
struct tag *t;
+ char *title;
+
+ assert(man->title != NULL);
+ if (man->msec == NULL)
+ title = mandoc_strdup(man->title);
+ else
+ mandoc_asprintf(&title, "%s(%s)", man->title, man->msec);
t = print_otag(h, TAG_DIV, "cr?", "foot", "doc-pagefooter",
"aria-label", "Manual footer line");
print_otag(h, TAG_SPAN, "c", "foot-left");
+ if (man->os != NULL)
+ print_text(h, man->os);
print_stagq(h, t);
print_otag(h, TAG_SPAN, "c", "foot-date");
print_text(h, man->date);
print_stagq(h, t);
- print_otag(h, TAG_SPAN, "c", "foot-os");
- if (man->os != NULL)
- print_text(h, man->os);
+ print_otag(h, TAG_SPAN, "c", "foot-right");
+ print_text(h, title);
print_tagq(h, t);
+ free(title);
}
static int
diff --git a/contrib/mandoc/man_term.c b/contrib/mandoc/man_term.c
index 706fab8cd4d1..ac75c2c5ef40 100644
--- a/contrib/mandoc/man_term.c
+++ b/contrib/mandoc/man_term.c
@@ -1,6 +1,6 @@
-/* $Id: man_term.c,v 1.244 2023/11/13 19:13:01 schwarze Exp $ */
+/* $Id: man_term.c,v 1.248 2025/07/27 15:27:28 schwarze Exp $ */
/*
- * Copyright (c) 2010-15,2017-20,2022-23 Ingo Schwarze <schwarze@openbsd.org>
+ * Copyright (c) 2010-2020,2022-23,2025 Ingo Schwarze <schwarze@openbsd.org>
* Copyright (c) 2008-2012 Kristaps Dzonsons <kristaps@bsd.lv>
*
* Permission to use, copy, modify, and distribute this software for any
@@ -38,14 +38,14 @@
#include "term_tag.h"
#include "main.h"
-#define MAXMARGINS 64 /* maximum number of indented scopes */
+#define MAXMARGINS 64 /* Maximum number of indented scopes. */
struct mtermp {
- int lmargin[MAXMARGINS]; /* margins (incl. vis. page) */
- int lmargincur; /* index of current margin */
- int lmarginsz; /* actual number of nested margins */
- size_t offset; /* default offset to visible page */
- int pardist; /* vert. space before par., unit: [v] */
+ int lmargin[MAXMARGINS]; /* Margins in basic units. */
+ int lmargincur; /* Index of current margin. */
+ int lmarginsz; /* Actual number of nested margins. */
+ size_t offset; /* Default offset in basic units. */
+ int pardist; /* Vert. space before par., unit: [v]. */
};
#define DECL_ARGS struct termp *p, \
@@ -194,12 +194,10 @@ terminal_man(void *arg, const struct roff_meta *man)
}
/*
- * Printing leading vertical space before a block.
- * This is used for the paragraph macros.
- * The rules are pretty simple, since there's very little nesting going
- * on here. Basically, if we're the first within another block (SS/SH),
- * then don't emit vertical space. If we are (RS), then do. If not the
- * first, print it.
+ * Print leading vertical space before a paragraph, unless
+ * it is the first paragraph in a section or subsection.
+ * If it is the first paragraph in an .RS block, consider
+ * that .RS block instead of the paragraph, recursively.
*/
static void
print_bvspace(struct termp *p, struct roff_node *n, int pardist)
@@ -214,9 +212,13 @@ print_bvspace(struct termp *p, struct roff_node *n, int pardist)
nch->type == ROFFT_TBL)
return;
- if (n->parent->tok != MAN_RS && roff_node_prev(n) == NULL)
- return;
-
+ while (roff_node_prev(n) == NULL) {
+ n = n->parent;
+ if (n->tok != MAN_RS)
+ return;
+ if (n->type == ROFFT_BODY)
+ n = n->parent;
+ }
for (i = 0; i < pardist; i++)
term_vspace(p);
}
@@ -372,8 +374,8 @@ static int
pre_in(DECL_ARGS)
{
struct roffsu su;
- const char *cp;
- size_t v;
+ const char *cp; /* Request argument. */
+ size_t v; /* Indentation in basic units. */
int less;
term_newln(p);
@@ -386,17 +388,18 @@ pre_in(DECL_ARGS)
cp = n->child->string;
less = 0;
- if (*cp == '-')
+ if (*cp == '-') {
less = -1;
- else if (*cp == '+')
+ cp++;
+ } else if (*cp == '+') {
less = 1;
- else
- cp--;
+ cp++;
+ }
- if (a2roffsu(++cp, &su, SCALE_EN) == NULL)
+ if (a2roffsu(cp, &su, SCALE_EN) == NULL)
return 0;
- v = term_hen(p, &su);
+ v = term_hspan(p, &su);
if (less < 0)
p->tcol->offset -= p->tcol->offset > v ? v : p->tcol->offset;
@@ -424,7 +427,7 @@ pre_HP(DECL_ARGS)
{
struct roffsu su;
const struct roff_node *nn;
- int len;
+ int len; /* Indentation in basic units. */
switch (n->type) {
case ROFFT_BLOCK:
@@ -450,7 +453,7 @@ pre_HP(DECL_ARGS)
if ((nn = n->parent->head->child) != NULL &&
a2roffsu(nn->string, &su, SCALE_EN) != NULL) {
- len = term_hen(p, &su);
+ len = term_hspan(p, &su);
if (len < 0 && (size_t)(-len) > mt->offset)
len = -mt->offset;
else if (len > SHRT_MAX)
@@ -518,7 +521,7 @@ pre_IP(DECL_ARGS)
{
struct roffsu su;
const struct roff_node *nn;
- int len;
+ int len; /* Indentation in basic units. */
switch (n->type) {
case ROFFT_BLOCK:
@@ -539,7 +542,7 @@ pre_IP(DECL_ARGS)
if ((nn = n->parent->head->child) != NULL &&
(nn = nn->next) != NULL &&
a2roffsu(nn->string, &su, SCALE_EN) != NULL) {
- len = term_hen(p, &su);
+ len = term_hspan(p, &su);
if (len < 0 && (size_t)(-len) > mt->offset)
len = -mt->offset;
else if (len > SHRT_MAX)
@@ -591,7 +594,7 @@ pre_TP(DECL_ARGS)
{
struct roffsu su;
struct roff_node *nn;
- int len;
+ int len; /* Indentation in basic units. */
switch (n->type) {
case ROFFT_BLOCK:
@@ -614,7 +617,7 @@ pre_TP(DECL_ARGS)
if ((nn = n->parent->head->child) != NULL &&
nn->string != NULL && ! (NODE_LINE & nn->flags) &&
a2roffsu(nn->string, &su, SCALE_EN) != NULL) {
- len = term_hen(p, &su);
+ len = term_hspan(p, &su);
if (len < 0 && (size_t)(-len) > mt->offset)
len = -mt->offset;
else if (len > SHRT_MAX)
@@ -691,10 +694,11 @@ pre_SS(DECL_ARGS)
term_vspace(p);
break;
case ROFFT_HEAD:
+ p->fontibi = 1;
term_fontrepl(p, TERMFONT_BOLD);
- p->tcol->offset = term_len(p, 3);
+ p->tcol->offset = term_len(p, p->defindent) / 2 + 1;
p->tcol->rmargin = mt->offset;
- p->trailspace = mt->offset;
+ p->trailspace = mt->offset / term_len(p, 1);
p->flags |= TERMP_NOBREAK | TERMP_BRIND;
break;
case ROFFT_BODY:
@@ -732,10 +736,11 @@ pre_SH(DECL_ARGS)
term_vspace(p);
break;
case ROFFT_HEAD:
+ p->fontibi = 1;
term_fontrepl(p, TERMFONT_BOLD);
p->tcol->offset = 0;
p->tcol->rmargin = mt->offset;
- p->trailspace = mt->offset;
+ p->trailspace = mt->offset / term_len(p, 1);
p->flags |= TERMP_NOBREAK | TERMP_BRIND;
break;
case ROFFT_BODY:
@@ -757,6 +762,8 @@ post_SH(DECL_ARGS)
case ROFFT_BLOCK:
break;
case ROFFT_HEAD:
+ p->fontibi = 0;
+ /* FALLTHROUGH */
case ROFFT_BODY:
term_newln(p);
break;
@@ -787,7 +794,7 @@ pre_RS(DECL_ARGS)
if (n->child == NULL)
n->aux = mt->lmargin[mt->lmargincur];
else if (a2roffsu(n->child->string, &su, SCALE_EN) != NULL)
- n->aux = term_hen(p, &su);
+ n->aux = term_hspan(p, &su);
if (n->aux < 0 && (size_t)(-n->aux) > mt->offset)
n->aux = -mt->offset;
else if (n->aux > SHRT_MAX)
@@ -827,7 +834,7 @@ static int
pre_SY(DECL_ARGS)
{
const struct roff_node *nn;
- int len;
+ int len; /* Indentation in basic units. */
switch (n->type) {
case ROFFT_BLOCK:
@@ -842,7 +849,9 @@ pre_SY(DECL_ARGS)
}
nn = n->parent->head->child;
- len = nn == NULL ? 1 : term_strlen(p, nn->string) + 1;
+ len = term_len(p, 1);
+ if (nn != NULL)
+ len += term_strlen(p, nn->string);
switch (n->type) {
case ROFFT_HEAD:
@@ -1015,40 +1024,26 @@ static void
print_man_foot(struct termp *p, const struct roff_meta *meta)
{
char *title;
- size_t datelen, titlen;
+ size_t datelen, titlen; /* In basic units. */
- assert(meta->title);
- assert(meta->msec);
- assert(meta->date);
+ assert(meta->title != NULL);
+ assert(meta->msec != NULL);
term_fontrepl(p, TERMFONT_NONE);
-
if (meta->hasbody)
term_vspace(p);
- /*
- * Temporary, undocumented option to imitate mdoc(7) output.
- * In the bottom right corner, use the operating system
- * instead of the title.
- */
-
- if ( ! p->mdocstyle) {
- mandoc_asprintf(&title, "%s(%s)",
- meta->title, meta->msec);
- } else if (meta->os != NULL) {
- title = mandoc_strdup(meta->os);
- } else {
- title = mandoc_strdup("");
- }
datelen = term_strlen(p, meta->date);
+ mandoc_asprintf(&title, "%s(%s)", meta->title, meta->msec);
+ titlen = term_strlen(p, title);
/* Bottom left corner: operating system. */
- p->flags |= TERMP_NOSPACE | TERMP_NOBREAK;
- p->trailspace = 1;
p->tcol->offset = 0;
p->tcol->rmargin = p->maxrmargin > datelen ?
(p->maxrmargin + term_len(p, 1) - datelen) / 2 : 0;
+ p->trailspace = 1;
+ p->flags |= TERMP_NOSPACE | TERMP_NOBREAK;
if (meta->os)
term_word(p, meta->os);
@@ -1057,7 +1052,6 @@ print_man_foot(struct termp *p, const struct roff_meta *meta)
/* At the bottom in the middle: manual date. */
p->tcol->offset = p->tcol->rmargin;
- titlen = term_strlen(p, title);
p->tcol->rmargin = p->maxrmargin > titlen ?
p->maxrmargin - titlen : 0;
p->flags |= TERMP_NOSPACE;
@@ -1067,11 +1061,11 @@ print_man_foot(struct termp *p, const struct roff_meta *meta)
/* Bottom right corner: manual title and section. */
- p->flags &= ~TERMP_NOBREAK;
- p->flags |= TERMP_NOSPACE;
- p->trailspace = 0;
p->tcol->offset = p->tcol->rmargin;
p->tcol->rmargin = p->maxrmargin;
+ p->trailspace = 0;
+ p->flags &= ~TERMP_NOBREAK;
+ p->flags |= TERMP_NOSPACE;
term_word(p, title);
term_flushln(p);
@@ -1086,7 +1080,6 @@ print_man_foot(struct termp *p, const struct roff_meta *meta)
p->tcol->offset = 0;
p->flags = 0;
-
free(title);
}
@@ -1095,7 +1088,7 @@ print_man_head(struct termp *p, const struct roff_meta *meta)
{
const char *volume;
char *title;
- size_t vollen, titlen;
+ size_t vollen, titlen; /* In basic units. */
assert(meta->title);
assert(meta->msec);
@@ -1111,7 +1104,8 @@ print_man_head(struct termp *p, const struct roff_meta *meta)
p->flags |= TERMP_NOBREAK | TERMP_NOSPACE;
p->trailspace = 1;
p->tcol->offset = 0;
- p->tcol->rmargin = 2 * (titlen+1) + vollen < p->maxrmargin ?
+ p->tcol->rmargin =
+ titlen * 2 + term_len(p, 2) + vollen < p->maxrmargin ?
(p->maxrmargin - vollen + term_len(p, 1)) / 2 :
vollen < p->maxrmargin ? p->maxrmargin - vollen : 0;
@@ -1123,7 +1117,7 @@ print_man_head(struct termp *p, const struct roff_meta *meta)
p->flags |= TERMP_NOSPACE;
p->tcol->offset = p->tcol->rmargin;
p->tcol->rmargin = p->tcol->offset + vollen + titlen <
- p->maxrmargin ? p->maxrmargin - titlen : p->maxrmargin;
+ p->maxrmargin ? p->maxrmargin - titlen : p->maxrmargin;
term_word(p, volume);
term_flushln(p);
@@ -1143,13 +1137,6 @@ print_man_head(struct termp *p, const struct roff_meta *meta)
p->flags &= ~TERMP_NOSPACE;
p->tcol->offset = 0;
p->tcol->rmargin = p->maxrmargin;
-
- /*
- * Groff prints three blank lines before the content.
- * Do the same, except in the temporary, undocumented
- * mode imitating mdoc(7) output.
- */
-
term_vspace(p);
free(title);
}
diff --git a/contrib/mandoc/man_validate.c b/contrib/mandoc/man_validate.c
index 857adba2798f..57ac9327afd4 100644
--- a/contrib/mandoc/man_validate.c
+++ b/contrib/mandoc/man_validate.c
@@ -1,6 +1,6 @@
-/* $Id: man_validate.c,v 1.159 2023/10/24 20:53:12 schwarze Exp $ */
+/* $Id: man_validate.c,v 1.161 2025/07/09 12:51:06 schwarze Exp $ */
/*
- * Copyright (c) 2010, 2012-2020, 2023 Ingo Schwarze <schwarze@openbsd.org>
+ * Copyright (c) 2010-2020, 2023, 2025 Ingo Schwarze <schwarze@openbsd.org>
* Copyright (c) 2008, 2009, 2010, 2011 Kristaps Dzonsons <kristaps@bsd.lv>
*
* Permission to use, copy, modify, and distribute this software for any
@@ -299,6 +299,14 @@ post_SH(CHKARGS)
nc = n->child;
switch (n->type) {
+ case ROFFT_BLOCK:
+ if ((nc = n->prev) != NULL && nc->tok == ROFF_br) {
+ mandoc_msg(MANDOCERR_PAR_SKIP, nc->line, nc->pos,
+ "%s before first %s", roff_name[nc->tok],
+ roff_name[n->tok]);
+ roff_node_delete(man, nc);
+ }
+ return;
case ROFFT_HEAD:
tag = NULL;
deroff(&tag, n);
@@ -473,7 +481,7 @@ post_TH(CHKARGS)
/* ->TITLE<- MSEC DATE OS VOL */
n = n->child;
- if (n != NULL && n->string != NULL) {
+ if (n != NULL && n->string != NULL && *n->string != '\0') {
for (p = n->string; *p != '\0'; p++) {
/* Only warn about this once... */
if (isalpha((unsigned char)*p) &&
@@ -486,8 +494,8 @@ post_TH(CHKARGS)
}
man->meta.title = mandoc_strdup(n->string);
} else {
- man->meta.title = mandoc_strdup("");
- mandoc_msg(MANDOCERR_TH_NOTITLE, nb->line, nb->pos, "TH");
+ man->meta.title = mandoc_strdup("UNTITLED");
+ mandoc_msg(MANDOCERR_DT_NOTITLE, nb->line, nb->pos, "TH");
}
/* TITLE ->MSEC<- DATE OS VOL */
diff --git a/contrib/mandoc/mandoc.1 b/contrib/mandoc/mandoc.1
index 32a3e2811513..8b6fe7d19b1e 100644
--- a/contrib/mandoc/mandoc.1
+++ b/contrib/mandoc/mandoc.1
@@ -1,4 +1,4 @@
-.\" $Id: mandoc.1,v 1.270 2025/03/03 14:07:51 schwarze Exp $
+.\" $Id: mandoc.1,v 1.272 2025/07/09 13:46:05 schwarze Exp $
.\"
.\" Copyright (c) 2012, 2014-2023, 2025 Ingo Schwarze <schwarze@openbsd.org>
.\" Copyright (c) 2009, 2010, 2011 Kristaps Dzonsons <kristaps@bsd.lv>
@@ -15,7 +15,7 @@
.\" ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
.\" OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
.\"
-.Dd $Mdocdate: March 3 2025 $
+.Dd $Mdocdate: July 9 2025 $
.Dt MANDOC 1
.Os
.Sh NAME
@@ -292,19 +292,6 @@ Increasing this is not recommended; it may result in degraded formatting,
for example overfull lines or ugly line breaks.
When output is to a pager on a terminal that is less than 66 columns
wide, the default is reduced to three columns.
-.It Cm mdoc
-Format
-.Xr man 7
-input files in
-.Xr mdoc 7
-output style.
-This prints the operating system name rather than the page title
-on the right side of the footer line.
-One useful application is for checking that
-.Fl T Cm man
-output formats in the same way as the
-.Xr mdoc 7
-source it was generated from.
.It Cm tag Ns Op = Ns Ar term
If the formatted manual page is opened in a pager,
go to the definition of the
@@ -1121,17 +1108,21 @@ but leaving out the backslash might not be portable.
.Ss Warnings related to the document prologue
.Bl -ohang
.It Sy "missing manual title, using UNTITLED"
-.Pq mdoc
+.Pq mdoc , man
A
.Ic \&Dt
-macro has no arguments, or there is no
+or
+.Ic \&TH
+macro has no arguments, its first argument is an empty string, or there is no
.Ic \&Dt
-macro before the first non-prologue macro.
+macro before the first non-prologue
+.Xr mdoc 7
+macro.
.It Sy "missing manual title, using \(dq\(dq"
.Pq man
-There is no
+An input document does not contain any
.Ic \&TH
-macro, or it has no arguments.
+macro.
.It Sy "missing manual section, using \(dq\(dq"
.Pq mdoc , man
A
diff --git a/contrib/mandoc/mandoc.css b/contrib/mandoc/mandoc.css
index 88432b9322b7..46e03a386ae0 100644
--- a/contrib/mandoc/mandoc.css
+++ b/contrib/mandoc/mandoc.css
@@ -1,4 +1,4 @@
-/* $Id: mandoc.css,v 1.54 2025/01/25 03:18:55 schwarze Exp $ */
+/* $Id: mandoc.css,v 1.55 2025/06/26 17:06:34 schwarze Exp $ */
/*
* Standard style sheet for mandoc(1) -Thtml and man.cgi(8).
*
@@ -73,7 +73,7 @@ div[role=doc-pagefooter] {
.foot-left { flex: 1; }
.foot-date { flex: 0 1 auto;
text-align: center; }
-.foot-os { flex: 1;
+.foot-right { flex: 1;
text-align: right; }
/* Sections and paragraphs. */
diff --git a/contrib/mandoc/mandocd.8 b/contrib/mandoc/mandocd.8
index d679deb1b9e4..aaf4e3dede70 100644
--- a/contrib/mandoc/mandocd.8
+++ b/contrib/mandoc/mandocd.8
@@ -1,6 +1,6 @@
-.\" $Id: mandocd.8,v 1.3 2021/09/28 15:41:41 schwarze Exp $
+.\" $Id: mandocd.8,v 1.5 2025/06/30 15:07:38 schwarze Exp $
.\"
-.\" Copyright (c) 2017 Ingo Schwarze <schwarze@openbsd.org>
+.\" Copyright (c) 2017, 2025 Ingo Schwarze <schwarze@openbsd.org>
.\"
.\" Permission to use, copy, modify, and distribute this software for any
.\" purpose with or without fee is hereby granted, provided that the above
@@ -14,7 +14,7 @@
.\" ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
.\" OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
.\"
-.Dd $Mdocdate: September 28 2021 $
+.Dd $Mdocdate: June 30 2025 $
.Dt MANDOCD 8
.Os
.Sh NAME
@@ -78,6 +78,19 @@ or
input, the second one for formatted output, and the third one
for error output.
.Pp
+After accepting each message,
+.Nm
+replies with a one-byte message of its own,
+such that the parent process can keep track of how many messages
+.Nm
+has already accepted and how many file descriptors
+consequently are still in flight, such that the parent process
+can limit the number of file descriptors in flight at any given time
+in order to prevent
+.Er EMFILE
+failure of
+.Xr sendmsg 2 .
+.Pp
The options are as follows:
.Bl -tag -width Ds
.It Fl I Cm os Ns = Ns Ar name
@@ -112,7 +125,7 @@ Other output options are not supported.
After exhausting one input file descriptor, all three file descriptors
are closed before reading the next dummy byte and control message.
.Pp
-When a zero-byte message is read, when the
+When a zero-byte message or a misformatted message is read, when the
.Ar socket_fd
is closed by the parent process,
or when an error occurs,
@@ -131,9 +144,10 @@ missing, invalid, or excessive
.Xr exec 3
arguments
.It
+communication failure with the parent, for example failure in
.Xr recvmsg 2
-failure, for example due to
-.Er EMSGSIZE
+or
+.Xr send 2
.It
missing or unexpected control data, in particular a
.Fa cmsg_level
diff --git a/contrib/mandoc/mandocd.c b/contrib/mandoc/mandocd.c
index ccc846bd0310..52ba0cc613fa 100644
--- a/contrib/mandoc/mandocd.c
+++ b/contrib/mandoc/mandocd.c
@@ -1,7 +1,7 @@
-/* $Id: mandocd.c,v 1.13 2022/04/14 16:43:44 schwarze Exp $ */
+/* $Id: mandocd.c,v 1.15 2025/06/30 15:04:57 schwarze Exp $ */
/*
+ * Copyright (c) 2017-2019, 2022, 2025 Ingo Schwarze <schwarze@openbsd.org>
* Copyright (c) 2017 Michael Stapelberg <stapelberg@debian.org>
- * Copyright (c) 2017, 2019, 2021 Ingo Schwarze <schwarze@openbsd.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -27,11 +27,14 @@
#if HAVE_ERR
#include <err.h>
#endif
+#include <errno.h>
#include <limits.h>
+#include <signal.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <time.h>
#include <unistd.h>
#include "mandoc.h"
@@ -61,6 +64,7 @@ static void usage(void) __attribute__((__noreturn__));
static int
read_fds(int clientfd, int *fds)
{
+ const struct timespec timeout = { 0, 10000000 }; /* 0.01 s */
struct msghdr msg;
struct iovec iov[1];
unsigned char dummy[1];
@@ -98,6 +102,15 @@ read_fds(int clientfd, int *fds)
break;
}
+ *dummy = '\0';
+ while (send(clientfd, dummy, sizeof(dummy), 0) == -1) {
+ if (errno != EAGAIN) {
+ warn("send");
+ return -1;
+ }
+ nanosleep(&timeout, NULL);
+ }
+
if ((cmsg = CMSG_FIRSTHDR(&msg)) == NULL) {
warnx("CMSG_FIRSTHDR: missing control message");
return -1;
@@ -120,6 +133,7 @@ read_fds(int clientfd, int *fds)
int
main(int argc, char *argv[])
{
+ struct sigaction sa;
struct manoutput options;
struct mparse *parser;
void *formatter;
@@ -170,13 +184,25 @@ main(int argc, char *argv[])
argc -= optind;
argv += optind;
}
- if (argc != 1)
+ if (argc != 1) {
+ if (argc == 0)
+ warnx("missing argument: socket_fd");
+ else
+ warnx("too many arguments: %s", argv[1]);
usage();
+ }
errstr = NULL;
clientfd = strtonum(argv[0], 3, INT_MAX, &errstr);
if (errstr)
- errx(1, "file descriptor %s %s", argv[1], errstr);
+ errx(1, "file descriptor %s is %s", argv[0], errstr);
+
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_handler = SIG_IGN;
+ if (sigfillset(&sa.sa_mask) == -1)
+ err(1, "sigfillset");
+ if (sigaction(SIGPIPE, &sa, NULL) == -1)
+ err(1, "sigaction(SIGPIPE)");
mchars_alloc();
parser = mparse_alloc(MPARSE_SO | MPARSE_UTF8 | MPARSE_LATIN1 |
diff --git a/contrib/mandoc/manpath.c b/contrib/mandoc/manpath.c
index 3760e2293c3a..f744368b5a38 100644
--- a/contrib/mandoc/manpath.c
+++ b/contrib/mandoc/manpath.c
@@ -1,6 +1,6 @@
-/* $Id: manpath.c,v 1.44 2021/11/05 18:03:08 schwarze Exp $ */
+/* $Id: manpath.c,v 1.45 2025/06/26 17:26:23 schwarze Exp $ */
/*
- * Copyright (c) 2011,2014,2015,2017-2019 Ingo Schwarze <schwarze@openbsd.org>
+ * Copyright (c) 2011,2014,2015,2017-2021 Ingo Schwarze <schwarze@openbsd.org>
* Copyright (c) 2011 Kristaps Dzonsons <kristaps@bsd.lv>
*
* Permission to use, copy, modify, and distribute this software for any
@@ -218,7 +218,7 @@ manconf_output(struct manoutput *conf, const char *cp, int fromfile)
/* Token taking an optional argument. */
"tag",
/* Tokens not taking arguments. */
- "fragment", "mdoc", "noval", "toc"
+ "fragment", "noval", "toc"
};
const size_t ntoks = sizeof(toks) / sizeof(toks[0]);
@@ -328,12 +328,9 @@ manconf_output(struct manoutput *conf, const char *cp, int fromfile)
conf->fragment = 1;
return 0;
case 10:
- conf->mdoc = 1;
- return 0;
- case 11:
conf->noval = 1;
return 0;
- case 12:
+ case 11:
conf->toc = 1;
return 0;
default:
diff --git a/contrib/mandoc/mdoc_html.c b/contrib/mandoc/mdoc_html.c
index b67eac4be233..8ac3884c7225 100644
--- a/contrib/mandoc/mdoc_html.c
+++ b/contrib/mandoc/mdoc_html.c
@@ -1,4 +1,4 @@
-/* $Id: mdoc_html.c,v 1.353 2025/01/25 00:22:28 schwarze Exp $ */
+/* $Id: mdoc_html.c,v 1.354 2025/06/26 17:06:34 schwarze Exp $ */
/*
* Copyright (c) 2014-2022, 2025 Ingo Schwarze <schwarze@openbsd.org>
* Copyright (c) 2008-2011, 2014 Kristaps Dzonsons <kristaps@bsd.lv>
@@ -454,20 +454,29 @@ static void
mdoc_root_post(const struct roff_meta *meta, struct html *h)
{
struct tag *t;
+ char *title;
+
+ assert(meta->title != NULL);
+ if (meta->msec == NULL)
+ title = mandoc_strdup(meta->title);
+ else
+ mandoc_asprintf(&title, "%s(%s)", meta->title, meta->msec);
t = print_otag(h, TAG_DIV, "cr?", "foot", "doc-pagefooter",
"aria-label", "Manual footer line");
print_otag(h, TAG_SPAN, "c", "foot-left");
+ print_text(h, meta->os);
print_stagq(h, t);
print_otag(h, TAG_SPAN, "c", "foot-date");
print_text(h, meta->date);
print_stagq(h, t);
- print_otag(h, TAG_SPAN, "c", "foot-os");
- print_text(h, meta->os);
+ print_otag(h, TAG_SPAN, "c", "foot-right");
+ print_text(h, title);
print_tagq(h, t);
+ free(title);
}
static int
diff --git a/contrib/mandoc/mdoc_man.c b/contrib/mandoc/mdoc_man.c
index 5438b2ba5941..99693b5d81dd 100644
--- a/contrib/mandoc/mdoc_man.c
+++ b/contrib/mandoc/mdoc_man.c
@@ -1,4 +1,4 @@
-/* $Id: mdoc_man.c,v 1.139 2025/01/24 22:37:24 schwarze Exp $ */
+/* $Id: mdoc_man.c,v 1.141 2025/07/02 19:57:48 schwarze Exp $ */
/*
* Copyright (c) 2011-2021, 2025 Ingo Schwarze <schwarze@openbsd.org>
*
@@ -494,6 +494,7 @@ print_offs(const char *v, int keywords)
const char *end;
int sz;
+ outflags &= ~MMAN_PP;
print_line(".RS", MMAN_Bk_susp);
/* Convert v into a number (of characters). */
@@ -1616,9 +1617,7 @@ pre_lk(DECL_ARGS)
}
/* Link target. */
- font_push('B');
print_word(link->string);
- font_pop();
/* Trailing punctuation. */
while (punct != NULL) {
diff --git a/contrib/mandoc/mdoc_markdown.c b/contrib/mandoc/mdoc_markdown.c
index 06ca839a58b8..eaa22626c99c 100644
--- a/contrib/mandoc/mdoc_markdown.c
+++ b/contrib/mandoc/mdoc_markdown.c
@@ -1,4 +1,4 @@
-/* $Id: mdoc_markdown.c,v 1.39 2025/01/20 07:01:17 schwarze Exp $ */
+/* $Id: mdoc_markdown.c,v 1.40 2025/06/26 17:06:34 schwarze Exp $ */
/*
* Copyright (c) 2017, 2018, 2020, 2025 Ingo Schwarze <schwarze@openbsd.org>
*
@@ -292,6 +292,14 @@ markdown_mdoc(void *arg, const struct roff_meta *mdoc)
md_word(mdoc->os);
md_word("-");
md_word(mdoc->date);
+ md_word("-");
+ md_word(mdoc->title);
+ if (mdoc->msec != NULL) {
+ outflags &= ~MD_spc;
+ md_word("(");
+ md_word(mdoc->msec);
+ md_word(")");
+ }
putchar('\n');
}
diff --git a/contrib/mandoc/mdoc_term.c b/contrib/mandoc/mdoc_term.c
index 931bc384a002..b0544de0304e 100644
--- a/contrib/mandoc/mdoc_term.c
+++ b/contrib/mandoc/mdoc_term.c
@@ -1,6 +1,6 @@
-/* $Id: mdoc_term.c,v 1.383 2023/11/13 19:13:01 schwarze Exp $ */
+/* $Id: mdoc_term.c,v 1.387 2025/07/27 15:27:28 schwarze Exp $ */
/*
- * Copyright (c) 2010, 2012-2020, 2022 Ingo Schwarze <schwarze@openbsd.org>
+ * Copyright (c) 2010,2012-2020,2022,2025 Ingo Schwarze <schwarze@openbsd.org>
* Copyright (c) 2008, 2009, 2010, 2011 Kristaps Dzonsons <kristaps@bsd.lv>
* Copyright (c) 2013 Franco Fichtner <franco@lastsummer.de>
*
@@ -302,7 +302,7 @@ print_mdoc_node(DECL_ARGS)
{
const struct mdoc_term_act *act;
struct termpair npair;
- size_t offset, rmargin;
+ size_t offset, rmargin; /* In basic units. */
int chld;
/*
@@ -441,70 +441,62 @@ print_mdoc_node(DECL_ARGS)
static void
print_mdoc_foot(struct termp *p, const struct roff_meta *meta)
{
- size_t sz;
+ char *title;
+ size_t datelen, titlen; /* In basic units. */
- term_fontrepl(p, TERMFONT_NONE);
-
- /*
- * Output the footer in new-groff style, that is, three columns
- * with the middle being the manual date and flanking columns
- * being the operating system:
- *
- * SYSTEM DATE SYSTEM
- */
+ assert(meta->title != NULL);
+ datelen = term_strlen(p, meta->date);
+ if (meta->msec == NULL)
+ title = mandoc_strdup(meta->title);
+ else
+ mandoc_asprintf(&title, "%s(%s)", meta->title, meta->msec);
+ titlen = term_strlen(p, title);
+ term_fontrepl(p, TERMFONT_NONE);
term_vspace(p);
+ /* Bottom left corner: operating system. */
+
p->tcol->offset = 0;
- sz = term_strlen(p, meta->date);
- p->tcol->rmargin = p->maxrmargin > sz ?
- (p->maxrmargin + term_len(p, 1) - sz) / 2 : 0;
+ p->tcol->rmargin = p->maxrmargin > datelen ?
+ (p->maxrmargin + term_len(p, 1) - datelen) / 2 : 0;
p->trailspace = 1;
p->flags |= TERMP_NOSPACE | TERMP_NOBREAK;
term_word(p, meta->os);
term_flushln(p);
+ /* At the bottom in the middle: manual date. */
+
p->tcol->offset = p->tcol->rmargin;
- sz = term_strlen(p, meta->os);
- p->tcol->rmargin = p->maxrmargin > sz ? p->maxrmargin - sz : 0;
+ p->tcol->rmargin = p->maxrmargin > titlen ?
+ p->maxrmargin - titlen : 0;
p->flags |= TERMP_NOSPACE;
term_word(p, meta->date);
term_flushln(p);
+ /* Bottom right corner: manual title and section. */
+
p->tcol->offset = p->tcol->rmargin;
p->tcol->rmargin = p->maxrmargin;
p->trailspace = 0;
p->flags &= ~TERMP_NOBREAK;
p->flags |= TERMP_NOSPACE;
- term_word(p, meta->os);
+ term_word(p, title);
term_flushln(p);
p->tcol->offset = 0;
- p->tcol->rmargin = p->maxrmargin;
p->flags = 0;
+ free(title);
}
static void
print_mdoc_head(struct termp *p, const struct roff_meta *meta)
{
char *volume, *title;
- size_t vollen, titlen;
-
- /*
- * The header is strange. It has three components, which are
- * really two with the first duplicated. It goes like this:
- *
- * IDENTIFIER TITLE IDENTIFIER
- *
- * The IDENTIFIER is NAME(SECTION), which is the command-name
- * (if given, or "unknown" if not) followed by the manual page
- * section. These are given in `Dt'. The TITLE is a free-form
- * string depending on the manual volume. If not specified, it
- * switches on the manual section.
- */
+ size_t vollen, titlen; /* In basic units. */
assert(meta->vol);
if (NULL == meta->arch)
@@ -514,6 +506,8 @@ print_mdoc_head(struct termp *p, const struct roff_meta *meta)
meta->vol, meta->arch);
vollen = term_strlen(p, volume);
+ /* Top left corner: manual title and section. */
+
if (NULL == meta->msec)
title = mandoc_strdup(meta->title);
else
@@ -524,13 +518,16 @@ print_mdoc_head(struct termp *p, const struct roff_meta *meta)
p->flags |= TERMP_NOBREAK | TERMP_NOSPACE;
p->trailspace = 1;
p->tcol->offset = 0;
- p->tcol->rmargin = 2 * (titlen+1) + vollen < p->maxrmargin ?
+ p->tcol->rmargin =
+ titlen * 2 + term_len(p, 2) + vollen < p->maxrmargin ?
(p->maxrmargin - vollen + term_len(p, 1)) / 2 :
- vollen < p->maxrmargin ? p->maxrmargin - vollen : 0;
+ vollen < p->maxrmargin ? p->maxrmargin - vollen : 0;
term_word(p, title);
term_flushln(p);
+ /* At the top in the middle: manual volume. */
+
p->flags |= TERMP_NOSPACE;
p->tcol->offset = p->tcol->rmargin;
p->tcol->rmargin = p->tcol->offset + vollen + titlen <
@@ -539,6 +536,8 @@ print_mdoc_head(struct termp *p, const struct roff_meta *meta)
term_word(p, volume);
term_flushln(p);
+ /* Top right corner: title and section, again. */
+
p->flags &= ~TERMP_NOBREAK;
p->trailspace = 0;
if (p->tcol->rmargin + titlen <= p->maxrmargin) {
@@ -556,6 +555,11 @@ print_mdoc_head(struct termp *p, const struct roff_meta *meta)
free(volume);
}
+/*
+ * Interpret the string v as a scaled width or, if the syntax is invalid,
+ * measure how much width it takes up when printed. In both cases,
+ * return the width in basic units.
+ */
static int
a2width(const struct termp *p, const char *v)
{
@@ -564,10 +568,10 @@ a2width(const struct termp *p, const char *v)
end = a2roffsu(v, &su, SCALE_MAX);
if (end == NULL || *end != '\0') {
- su.unit = SCALE_EN;
- su.scale = term_strlen(p, v) / term_strlen(p, "0");
+ su.unit = SCALE_BU;
+ su.scale = term_strlen(p, v);
}
- return term_hen(p, &su);
+ return term_hspan(p, &su);
}
/*
@@ -623,8 +627,11 @@ termp_it_pre(DECL_ARGS)
struct roffsu su;
char buf[24];
const struct roff_node *bl, *nn;
- size_t ncols, dcol;
- int i, offset, width;
+ size_t ncols; /* Number of columns in .Bl -column. */
+ size_t dcol; /* Column spacing in basic units. */
+ int i; /* Zero-based column index. */
+ int offset; /* Start of column in basic units. */
+ int width; /* Column width in basic units. */
enum mdoc_list type;
if (n->type == ROFFT_BLOCK) {
@@ -701,10 +708,9 @@ termp_it_pre(DECL_ARGS)
for (i = 0, nn = n->prev;
nn->prev && i < (int)ncols;
nn = nn->prev, i++) {
- su.unit = SCALE_EN;
- su.scale = term_strlen(p, bl->norm->Bl.cols[i]) /
- term_strlen(p, "0");
- offset += term_hen(p, &su) + dcol;
+ su.unit = SCALE_BU;
+ su.scale = term_strlen(p, bl->norm->Bl.cols[i]);
+ offset += term_hspan(p, &su) + dcol;
}
/*
@@ -720,10 +726,9 @@ termp_it_pre(DECL_ARGS)
* Use the declared column widths, extended as explained
* in the preceding paragraph.
*/
- su.unit = SCALE_EN;
- su.scale = term_strlen(p, bl->norm->Bl.cols[i]) /
- term_strlen(p, "0");
- width = term_hen(p, &su) + dcol;
+ su.unit = SCALE_BU;
+ su.scale = term_strlen(p, bl->norm->Bl.cols[i]);
+ width = term_hspan(p, &su) + dcol;
break;
default:
if (NULL == bl->norm->Bl.width)
@@ -1274,6 +1279,7 @@ termp_sh_pre(DECL_ARGS)
term_vspace(p);
break;
case ROFFT_HEAD:
+ p->fontibi = 1;
return termp_bold_pre(p, pair, meta, n);
case ROFFT_BODY:
p->tcol->offset = term_len(p, p->defindent);
@@ -1294,6 +1300,7 @@ termp_sh_post(DECL_ARGS)
{
switch (n->type) {
case ROFFT_HEAD:
+ p->fontibi = 0;
term_newln(p);
break;
case ROFFT_BODY:
@@ -1421,7 +1428,7 @@ termp_fa_pre(DECL_ARGS)
static int
termp_bd_pre(DECL_ARGS)
{
- int offset;
+ int offset; /* In basic units. */
if (n->type == ROFFT_BLOCK) {
print_bvspace(p, n, n);
@@ -1509,7 +1516,8 @@ termp_ss_pre(DECL_ARGS)
term_vspace(p);
break;
case ROFFT_HEAD:
- p->tcol->offset = term_len(p, (p->defindent+1)/2);
+ p->tcol->offset = term_len(p, p->defindent) / 2 + 1;
+ p->fontibi = 1;
return termp_bold_pre(p, pair, meta, n);
case ROFFT_BODY:
p->tcol->offset = term_len(p, p->defindent);
@@ -1526,8 +1534,16 @@ termp_ss_pre(DECL_ARGS)
static void
termp_ss_post(DECL_ARGS)
{
- if (n->type == ROFFT_HEAD || n->type == ROFFT_BODY)
+ switch (n->type) {
+ case ROFFT_HEAD:
+ p->fontibi = 0;
+ /* FALLTHROUGH */
+ case ROFFT_BODY:
term_newln(p);
+ break;
+ default:
+ break;
+ }
}
static int
@@ -1888,9 +1904,7 @@ termp_lk_pre(DECL_ARGS)
}
/* Link target. */
- term_fontpush(p, TERMFONT_BOLD);
term_word(p, link->string);
- term_fontpop(p);
/* Trailing punctuation. */
while (punct != NULL) {
diff --git a/contrib/mandoc/mdoc_validate.c b/contrib/mandoc/mdoc_validate.c
index 4ca1253e4b70..ac265b88f484 100644
--- a/contrib/mandoc/mdoc_validate.c
+++ b/contrib/mandoc/mdoc_validate.c
@@ -1,4 +1,4 @@
-/* $Id: mdoc_validate.c,v 1.393 2025/06/05 12:38:26 schwarze Exp $ */
+/* $Id: mdoc_validate.c,v 1.396 2025/07/26 12:23:16 schwarze Exp $ */
/*
* Copyright (c) 2010-2022, 2025 Ingo Schwarze <schwarze@openbsd.org>
* Copyright (c) 2008-2012 Kristaps Dzonsons <kristaps@bsd.lv>
@@ -1714,7 +1714,7 @@ post_xx(POST_ARGS)
os = "OpenBSD";
break;
case MDOC_Ux:
- os = "UNIX";
+ os = "Unix";
break;
default:
abort();
@@ -2777,7 +2777,7 @@ post_dd(POST_ARGS)
mandoc_msg(MANDOCERR_PROLOG_ORDER,
n->line, n->pos, "Dd after Os");
- if (mdoc->quick && n != NULL)
+ if (mdoc->quick)
mdoc->meta.date = mandoc_strdup("");
else
mdoc->meta.date = mandoc_normdate(n->child, n);
@@ -2842,8 +2842,7 @@ post_dt(POST_ARGS)
if (nn == NULL) {
mandoc_msg(MANDOCERR_MSEC_MISSING, n->line, n->pos,
"Dt %s", mdoc->meta.title);
- mdoc->meta.vol = mandoc_strdup("LOCAL");
- return; /* msec and arch remain NULL. */
+ return; /* msec, vol, and arch remain NULL. */
}
mdoc->meta.msec = mandoc_strdup(nn->string);
@@ -2854,7 +2853,6 @@ post_dt(POST_ARGS)
if (cp == NULL) {
mandoc_msg(MANDOCERR_MSEC_BAD,
nn->line, nn->pos, "Dt ... %s", nn->string);
- mdoc->meta.vol = mandoc_strdup(nn->string);
} else {
mdoc->meta.vol = mandoc_strdup(cp);
if (mdoc->filesec != '\0' &&
diff --git a/contrib/mandoc/out.c b/contrib/mandoc/out.c
index f6f5859a1629..21c282b2141b 100644
--- a/contrib/mandoc/out.c
+++ b/contrib/mandoc/out.c
@@ -1,8 +1,8 @@
-/* $Id: out.c,v 1.86 2025/01/05 18:14:39 schwarze Exp $ */
+/* $Id: out.c,v 1.87 2025/07/16 14:33:08 schwarze Exp $ */
/*
- * Copyright (c) 2009, 2010, 2011 Kristaps Dzonsons <kristaps@bsd.lv>
- * Copyright (c) 2011, 2014, 2015, 2017, 2018, 2019, 2021
+ * Copyright (c) 2011, 2014, 2015, 2017, 2018, 2019, 2021, 2025
* Ingo Schwarze <schwarze@openbsd.org>
+ * Copyright (c) 2009, 2010, 2011 Kristaps Dzonsons <kristaps@bsd.lv>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -122,9 +122,23 @@ tblcalc(struct rofftbl *tbl, const struct tbl_span *sp_first,
const struct tbl_dat *dp;
struct roffcol *col;
struct tbl_colgroup *first_group, **gp, *g;
- size_t *colwidth;
- size_t ewidth, min1, min2, wanted, width, xwidth;
- int done, icol, maxcol, necol, nxcol, quirkcol;
+
+ /* Widths in basic units. */
+ size_t *colwidth; /* Widths of all columns. */
+ size_t min1; /* Width of the narrowest column. */
+ size_t min2; /* Width of the second narrowest column. */
+ size_t wanted; /* For any of the narrowest columns. */
+ size_t xwidth; /* Total width of columns not to expand. */
+ size_t ewidth; /* Width of widest column to equalize. */
+ size_t width; /* Width of the data in basic units. */
+ size_t enw; /* Width of one EN unit. */
+
+ int icol; /* Column number, starting at zero. */
+ int maxcol; /* Number of last column. */
+ int necol; /* Number of columns to equalize. */
+ int nxcol; /* Number of columns to expand. */
+ int done; /* Boolean: this group is wide enough. */
+ int quirkcol;
/*
* Allocate the master column specifiers. These will hold the
@@ -139,6 +153,7 @@ tblcalc(struct rofftbl *tbl, const struct tbl_span *sp_first,
maxcol = -1;
first_group = NULL;
+ enw = (*tbl->len)(1, tbl->arg);
for (sp = sp_first; sp != NULL; sp = sp->next) {
if (sp->pos != TBL_SPAN_DATA)
continue;
@@ -175,8 +190,8 @@ tblcalc(struct rofftbl *tbl, const struct tbl_span *sp_first,
opts, dp,
dp->block == 0 ? 0 :
dp->layout->width ? dp->layout->width :
- rmargin ? (rmargin + sp->opts->cols / 2)
- / (sp->opts->cols + 1) : 0);
+ rmargin ? (rmargin / enw + sp->opts->cols / 2) /
+ (sp->opts->cols + 1) * enw : 0);
if (dp->hspans == 0)
continue;
@@ -211,8 +226,8 @@ tblcalc(struct rofftbl *tbl, const struct tbl_span *sp_first,
maxcol = sp_first->opts->cols - 1;
for (icol = 0; icol <= maxcol; icol++) {
col = tbl->cols + icol;
- if (col->width < 1)
- col->width = 1;
+ if (col->width < enw)
+ col->width = enw;
/*
* Column spacings are needed for span width
@@ -234,7 +249,8 @@ tblcalc(struct rofftbl *tbl, const struct tbl_span *sp_first,
for (icol = g->startcol; icol <= g->endcol; icol++) {
width = tbl->cols[icol].width;
if (icol < g->endcol)
- width += tbl->cols[icol].spacing;
+ width += (*tbl->len)(tbl->cols[icol].spacing,
+ tbl->arg);
if (g->wanted <= width) {
done = 1;
break;
@@ -372,9 +388,9 @@ tblcalc(struct rofftbl *tbl, const struct tbl_span *sp_first,
*/
if (nxcol && rmargin) {
- xwidth += 3*maxcol +
+ xwidth += (*tbl->len)(3 * maxcol +
(opts->opts & (TBL_OPT_BOX | TBL_OPT_DBOX) ?
- 2 : !!opts->lvert + !!opts->rvert);
+ 2 : !!opts->lvert + !!opts->rvert), tbl->arg);
if (rmargin <= offset + xwidth)
return;
xwidth = rmargin - offset - xwidth;
@@ -387,7 +403,7 @@ tblcalc(struct rofftbl *tbl, const struct tbl_span *sp_first,
*/
if (nxcol == 5) {
- quirkcol = xwidth % nxcol + 2;
+ quirkcol = xwidth / enw % nxcol + 2;
if (quirkcol != 3 && quirkcol != 4)
quirkcol = -1;
} else
@@ -402,7 +418,7 @@ tblcalc(struct rofftbl *tbl, const struct tbl_span *sp_first,
col->width = (double)xwidth * ++necol / nxcol
- ewidth + 0.4995;
if (necol == quirkcol)
- col->width--;
+ col->width -= enw;
ewidth += col->width;
}
}
@@ -444,9 +460,12 @@ tblcalc_literal(struct rofftbl *tbl, struct roffcol *col,
const char *str; /* Beginning of the first line. */
const char *beg; /* Beginning of the current line. */
char *end; /* End of the current line. */
- size_t lsz; /* Length of the current line. */
- size_t wsz; /* Length of the current word. */
- size_t msz; /* Length of the longest line. */
+
+ /* Widths in basic units. */
+ size_t lsz; /* Of the current line. */
+ size_t wsz; /* Of the current word. */
+ size_t msz; /* Of the longest line. */
+ size_t enw; /* Of one EN unit. */
if (dp->string == NULL || *dp->string == '\0')
return 0;
@@ -460,8 +479,9 @@ tblcalc_literal(struct rofftbl *tbl, struct roffcol *col,
end++;
}
wsz = (*tbl->slen)(beg, tbl->arg);
- if (mw && lsz && lsz + 1 + wsz <= mw)
- lsz += 1 + wsz;
+ enw = (*tbl->len)(1, tbl->arg);
+ if (mw && lsz && lsz + enw + wsz <= mw)
+ lsz += enw + wsz;
else
lsz = wsz;
if (msz < lsz)
@@ -479,7 +499,8 @@ tblcalc_number(struct rofftbl *tbl, struct roffcol *col,
const struct tbl_opts *opts, const struct tbl_dat *dp)
{
const char *cp, *lastdigit, *lastpoint;
- size_t intsz, totsz;
+ size_t totsz; /* Total width of the number in basic units. */
+ size_t intsz; /* Width of the integer part in basic units. */
char buf[2];
if (dp->string == NULL || *dp->string == '\0')
diff --git a/contrib/mandoc/out.h b/contrib/mandoc/out.h
index f746e4486958..a3b49b70460d 100644
--- a/contrib/mandoc/out.h
+++ b/contrib/mandoc/out.h
@@ -1,7 +1,7 @@
-/* $Id: out.h,v 1.35 2022/09/11 09:13:48 schwarze Exp $ */
+/* $Id: out.h,v 1.36 2025/07/16 14:33:08 schwarze Exp $ */
/*
+ * Copyright (c) 2011,2014,2017,2018,2025 Ingo Schwarze <schwarze@openbsd.org>
* Copyright (c) 2009, 2010, 2011 Kristaps Dzonsons <kristaps@bsd.lv>
- * Copyright (c) 2014, 2017, 2018 Ingo Schwarze <schwarze@openbsd.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -33,11 +33,11 @@ enum roffscale {
};
struct roffcol {
- size_t width; /* width of cell */
- size_t nwidth; /* max. width of number in cell */
- size_t decimal; /* decimal position in cell */
- size_t spacing; /* spacing after the column */
- int flags; /* layout flags, see tbl_cell */
+ size_t width; /* Width of cell [BU]. */
+ size_t nwidth; /* Maximum width of number [BU]. */
+ size_t decimal; /* Decimal position [BU]. */
+ size_t spacing; /* Spacing after the column [EN]. */
+ int flags; /* Layout flags, see tbl_cell. */
};
struct roffsu {
@@ -45,16 +45,14 @@ struct roffsu {
double scale;
};
-typedef size_t (*tbl_sulen)(const struct roffsu *, void *);
typedef size_t (*tbl_strlen)(const char *, void *);
typedef size_t (*tbl_len)(size_t, void *);
struct rofftbl {
- tbl_sulen sulen; /* calculate scaling unit length */
- tbl_strlen slen; /* calculate string length */
- tbl_len len; /* produce width of empty space */
- struct roffcol *cols; /* master column specifiers */
- void *arg; /* passed to sulen, slen, and len */
+ tbl_strlen slen; /* Calculate string length [BU]. */
+ tbl_len len; /* Produce width of empty space [BU]. */
+ struct roffcol *cols; /* Master column specifiers. */
+ void *arg; /* Passed to slen() and len(). */
};
diff --git a/contrib/mandoc/roff.7 b/contrib/mandoc/roff.7
index 27f83853e75b..adb5852e069b 100644
--- a/contrib/mandoc/roff.7
+++ b/contrib/mandoc/roff.7
@@ -1,6 +1,6 @@
-.\" $Id: roff.7,v 1.121 2023/10/23 20:25:02 schwarze Exp $
+.\" $Id: roff.7,v 1.123 2025/08/04 23:12:08 schwarze Exp $
.\"
-.\" Copyright (c) 2010-2019, 2022-2023 Ingo Schwarze <schwarze@openbsd.org>
+.\" Copyright (c) 2010-2019,2022-2023,2025 Ingo Schwarze <schwarze@openbsd.org>
.\" Copyright (c) 2010, 2011, 2012 Kristaps Dzonsons <kristaps@bsd.lv>
.\"
.\" Permission to use, copy, modify, and distribute this software for any
@@ -15,7 +15,7 @@
.\" ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
.\" OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
.\"
-.Dd $Mdocdate: October 23 2023 $
+.Dd $Mdocdate: August 4 2025 $
.Dt ROFF 7
.Os
.Sh NAME
@@ -79,8 +79,10 @@ They provide free-form text to be printed; the formatting of the text
depends on the respective processing context.
.Sh LANGUAGE SYNTAX
.Nm
-documents may contain only graphable 7-bit ASCII characters, the space
-character, and, in certain circumstances, the tab character.
+documents are text files containing only printable
+.Xr ascii 7
+characters, the space character,
+and, in certain circumstances, the tab character.
The backslash character
.Sq \e
indicates the start of an escape sequence, used for example for
@@ -180,7 +182,7 @@ Italic font.
Return to the previous font.
If a macro caused a font change since the last
.Ic \ef
-eascape sequence or
+escape sequence or
.Ic \&ft
request, this returns to the font before the last font change in
the macro rather than to the font before the last manual font change.
@@ -267,7 +269,7 @@ width of rendered
.Pq en
character
.It u
-default horizontal span for the terminal
+device-dependent basic units
.It M
mini-em (1/100 em)
.El
@@ -289,7 +291,7 @@ for vertical spaces and
for horizontal ones.
.Pp
Examples:
-.Bl -tag -width ".Bl -tag -width 2i" -offset indent -compact
+.Bl -tag -width "xBl -tag -width 2i" -offset indent -compact
.It Li \&.Bl -tag -width 2i
two-inch tagged list indentation in
.Xr mdoc 7
@@ -1319,10 +1321,16 @@ among others because it overrides the
.Xr mandoc 1
.Fl O Cm width
command line option.
-.It Ic \&lnr Ar register Oo Cm + Ns | Ns Cm - Oc Ns Ar value Op Ar increment
+.It Ic \&lnr Ar registername Xo
+.Oo Cm + Ns | Ns Cm \- Oc Ns Ar value
+.Op Ar increment
+.Xc
Set local number register.
This is a Heirloom extension and currently unsupported.
-.It Ic \&lnrf Ar register Oo Cm + Ns | Ns Cm - Oc Ns Ar value Op Ar increment
+.It Ic \&lnrf Ar registername Xo
+.Oo Cm + Ns | Ns Cm \- Oc Ns Ar value
+.Op Ar increment
+.Xc
Set local floating-point register.
This is a Heirloom extension and currently unsupported.
.It Ic \&lpfx Ar string
@@ -1398,10 +1406,13 @@ skipping the
.Ic \&nop
request and any space characters immediately following it.
This is mostly used to indent text lines inside macro definitions.
-.It Ic \&nr Ar register Oo Cm + Ns | Ns Cm - Oc Ns Ar expression Op Ar stepsize
-Define or change a register.
-A register is an arbitrary string value that defines some sort of state,
-which influences parsing and/or formatting.
+.It Ic \&nr Ar registername Xo
+.Oo Cm + Ns | Ns Cm \- Oc Ns Ar expression
+.Op Ar stepsize
+.Xc
+Define or change the number register with the given
+.Ar registername .
+A register can store an integer number.
For the syntax of
.Ar expression ,
see
@@ -1410,6 +1421,9 @@ below.
If it is prefixed by a sign, the register will be
incremented or decremented instead of assigned to.
.Pp
+Once set, the value of a number register can be interpolated using the
+.Ic \en
+escape sequence.
The
.Ar stepsize
is used by the
@@ -1418,29 +1432,13 @@ auto-increment feature.
It remains unchanged when omitted while changing an existing register,
and it defaults to 0 when defining a new register.
.Pp
-The following
-.Ar register
-is handled specially:
-.Bl -tag -width Ds
-.It Cm nS
-If set to a positive integer value, certain
-.Xr mdoc 7
-macros will behave in the same way as in the
-.Em SYNOPSIS
-section.
-If set to 0, these macros will behave in the same way as outside the
-.Em SYNOPSIS
-section, even when called within the
-.Em SYNOPSIS
-section itself.
-Note that starting a new
-.Xr mdoc 7
-section with the
-.Ic \&Sh
-macro will reset this register.
-.El
+Some number registers can be read to inspect parser state,
+and some can be changed to influence formatting.
+For details about individual registers, see the
+.Sx NUMBER REGISTER REFERENCE
+below.
.It Xo
-.Ic \&nrf Ar register Oo Cm + Ns | Ns Cm - Oc Ns Ar expression
+.Ic \&nrf Ar registername Oo Cm + Ns | Ns Cm \- Oc Ns Ar expression
.Op Ar increment
.Xc
Define or change a floating-point register.
@@ -1569,7 +1567,7 @@ requests is not supported, and diversions are not implemented at all.
Rename a number register.
Currently unsupported.
.It Ic \&rr Ar register
-Remove a register.
+Remove a number register.
.It Ic \&rs
End no-space mode.
Currently ignored.
@@ -2152,6 +2150,8 @@ on the current font.
.It Ic \en Ns Oo +|- Oc Ns Ic \&[ Ns Ar name Ns Ic \&]
Interpolate the number register
.Ar name .
+If the register is not yet defined,
+it is automatically initialised to zero before interpolation.
For short names, there are variants
.Ic \en Ns Ar c
and
@@ -2162,6 +2162,9 @@ the register is first incremented or decremented by the
that was specified in the relevant
.Ic \&nr
request, and the changed value is interpolated.
+For the names of predefined registers, see the
+.Sx NUMBER REGISTER REFERENCE
+below.
.It Ic \eO Ns Ar digit , Ic \eO[5 Ns arguments Ns Ic \&]
Suppress output.
This is a groff extension and currently unsupported.
@@ -2250,6 +2253,83 @@ with zero width and height; ignored by
.It Ic \ez
Output the next character without advancing the cursor position.
.El
+.Sh NUMBER REGISTER REFERENCE
+In
+.Xr mdoc 7
+and
+.Xr man 7
+documents, using registers is discouraged.
+For compatibility with legacy documents, the
+.Xr mandoc 1
+.Nm
+parser recognises the following names of read-only registers:
+.Bl -tag -width Ds
+.It Cm .$
+The number of arguments of the innermost user-defined macro
+currently being called, or 0 by default.
+The
+.Ic shift
+request decrements the value of this register.
+.It Cm .A
+Whether ASCII approximation mode is on;
+.Xr mandoc 1
+always returns 0, meaning off.
+.It Cm .g
+Whether the formatter claims groff compatibility;
+.Xr mandoc 1
+always returns 1, meaning yes.
+.It Cm .H
+The minimum horizontal movement in basic units;
+.Xr mandoc 1
+always returns 24, corresponding to one character position.
+.It Cm .j
+The current line adjustment mode;
+.Xr mandoc 1
+always returns 0, meaning flush left.
+.It Cm .l
+The line length in basic units;
+.Xr mandoc 1
+always returns 78 * 24, corresponding to 78 characters per line.
+.It Cm \&.T
+Whether an output device has been selected;
+.Xr mandoc 1
+always returns 1, meaning yes.
+.It Cm .V
+The minimum vertical movement in basic units;
+.Xr mandoc 1
+always returns 40, corresponding to one line height.
+.El
+.Pp
+The
+.Cm nS
+register is handled specially.
+If set to a positive integer value, certain
+.Xr mdoc 7
+macros behave in the same way as in the
+.Em SYNOPSIS
+section.
+If set to 0, these macros behave in the same way as outside the
+.Em SYNOPSIS
+section, even when called within the
+.Em SYNOPSIS
+section itself.
+Starting a new
+.Xr mdoc 7
+section with the
+.Ic \&Sh
+macro resets this register.
+.Pp
+Full
+.Nm
+implementations support large numbers of additional predefined registers.
+While the
+.Ic \&nr
+request supports setting and the
+.Ic \en
+escape sequence supports inspecting arbitrary registers,
+.Xr mandoc 1
+only defines the few registers listed above by default.
+All other registers are undefined by default and yield 0 when interpolated.
.Sh COMPATIBILITY
The
.Xr mandoc 1
@@ -2266,17 +2346,29 @@ never reads or writes external files except via
.Ic \&so
requests with safe relative paths.
.It
-There is no automatic hyphenation, no adjustment to the right margin,
-and very limited support for centering; the output is always set flush-left.
-.It
-Support for setting tabulator and leader characters is missing,
-and support for manually changing indentation is limited.
+There is no automatic hyphenation and no support for the
+.Ic \&ad
+line adjustment request.
+Except when the
+.Ic \&ce
+or
+.Ic \&rj
+requests or the
+.Xr tbl 7
+cell specifications
+.Cm c ,
+.Cm n ,
+or
+.Cm r
+or the table option
+.Cm center
+are used, output is always set flush-left.
.It
-The
-.Sq u
-scaling unit is the default terminal unit.
-In traditional troff systems, this unit changes depending on the
-output media.
+Support for setting tabulator and leader characters is missing, and the
+.Ic \&in
+indentation request is not supported in
+.Xr mdoc 7
+input files.
.It
Width measurements are implemented in a crude way
and often yield wrong results.
@@ -2336,6 +2428,15 @@ implementations.
.%D September 17, 2007
.%U http://heirloom.sourceforge.net/doctools/troff.pdf
.Re
+.Rs
+.%A James Clark
+.%A Werner Lemberg
+.%A G. Branden Robinson
+.%I Free Software Foundation, Inc.
+.%T The GNU Troff Manual
+.%D 1999\(en2023
+.%U https://www.gnu.org/software/groff/manual/
+.Re
.Sh HISTORY
The RUNOFF typesetting system, whose input forms the basis for
.Nm ,
diff --git a/contrib/mandoc/roff_term.c b/contrib/mandoc/roff_term.c
index f696898ebd5a..8f95aa920790 100644
--- a/contrib/mandoc/roff_term.c
+++ b/contrib/mandoc/roff_term.c
@@ -1,6 +1,7 @@
-/* $Id: roff_term.c,v 1.25 2023/04/28 19:11:04 schwarze Exp $ */
+/* $Id: roff_term.c,v 1.26 2025/07/16 14:33:08 schwarze Exp $ */
/*
- * Copyright (c) 2010,2014,2015,2017-2020 Ingo Schwarze <schwarze@openbsd.org>
+ * Copyright (c) 2010, 2014, 2015, 2017-2021, 2025
+ * Ingo Schwarze <schwarze@openbsd.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -159,8 +160,13 @@ static void
roff_term_pre_po(ROFF_TERM_ARGS)
{
struct roffsu su;
- static int po, pouse, polast;
- int ponew;
+
+ /* Page offsets in basic units. */
+ static int polast; /* Previously requested. */
+ static int po; /* Currently requested. */
+ static int pouse; /* Currently used. */
+ int pomax; /* Maximum to be used. */
+ int ponew; /* Newly requested. */
/* Revert the currently active page offset. */
p->tcol->offset -= pouse;
@@ -168,7 +174,7 @@ roff_term_pre_po(ROFF_TERM_ARGS)
/* Determine the requested page offset. */
if (n->child != NULL &&
a2roffsu(n->child->string, &su, SCALE_EM) != NULL) {
- ponew = term_hen(p, &su);
+ ponew = term_hspan(p, &su);
if (*n->child->string == '+' ||
*n->child->string == '-')
ponew += po;
@@ -180,8 +186,9 @@ roff_term_pre_po(ROFF_TERM_ARGS)
po = ponew;
/* Truncate to the range [-offset, 60], remember, and apply it. */
- pouse = po >= 60 ? 60 :
- po < -(int)p->tcol->offset ? -(int)p->tcol->offset : po;
+ pomax = term_len(p, 60);
+ pouse = po >= pomax ? pomax :
+ po < -(int)p->tcol->offset ? -p->tcol->offset : po;
p->tcol->offset += pouse;
}
@@ -219,9 +226,10 @@ static void
roff_term_pre_ti(ROFF_TERM_ARGS)
{
struct roffsu su;
- const char *cp;
- const size_t maxoff = 72;
- int len, sign;
+ const char *cp; /* Request argument. */
+ size_t maxoff; /* Maximum indentation in basic units. */
+ int len; /* Request argument in basic units. */
+ int sign;
roff_term_pre_br(p, n);
@@ -239,7 +247,8 @@ roff_term_pre_ti(ROFF_TERM_ARGS)
if (a2roffsu(cp, &su, SCALE_EM) == NULL)
return;
- len = term_hen(p, &su);
+ len = term_hspan(p, &su);
+ maxoff = term_len(p, 72);
switch (sign) {
case 1:
diff --git a/contrib/mandoc/tbl.h b/contrib/mandoc/tbl.h
index 5e98735d6f97..c7566c110f42 100644
--- a/contrib/mandoc/tbl.h
+++ b/contrib/mandoc/tbl.h
@@ -1,7 +1,7 @@
-/* $Id: tbl.h,v 1.3 2025/01/05 18:14:39 schwarze Exp $ */
+/* $Id: tbl.h,v 1.4 2025/07/16 14:33:08 schwarze Exp $ */
/*
+ * Copyright (c) 2014-2018, 2021, 2025 Ingo Schwarze <schwarze@openbsd.org>
* Copyright (c) 2010, 2011 Kristaps Dzonsons <kristaps@bsd.lv>
- * Copyright (c) 2014,2015,2017,2018,2021 Ingo Schwarze <schwarze@openbsd.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -27,8 +27,8 @@ struct tbl_opts {
#define TBL_OPT_NOSPACE (1 << 6) /* Option "nospaces". */
#define TBL_OPT_NOWARN (1 << 7) /* Option "nowarn". */
int cols; /* Number of columns. */
- int lvert; /* Width of left vertical line. */
- int rvert; /* Width of right vertical line. */
+ int lvert; /* Width of left vertical line in EN. */
+ int rvert; /* Width of right vertical line in EN. */
char tab; /* Option "tab": cell separator. */
char decimal; /* Option "decimalpoint". */
};
@@ -51,9 +51,9 @@ enum tbl_cellt {
*/
struct tbl_cell {
struct tbl_cell *next; /* Layout cell to the right. */
- size_t width; /* Minimum column width. */
- size_t spacing; /* To the right of the column. */
- int vert; /* Width of subsequent vertical line. */
+ size_t width; /* Minimum column width in basic units. */
+ size_t spacing; /* To the right of the column in EN. */
+ int vert; /* Width of subseq. vertical line in EN. */
int col; /* Column number, starting from 0. */
int flags;
#define TBL_CELL_TALIGN (1 << 2) /* t, T */
@@ -73,7 +73,7 @@ struct tbl_row {
struct tbl_row *next; /* Layout row below. */
struct tbl_cell *first; /* Leftmost layout cell. */
struct tbl_cell *last; /* Rightmost layout cell. */
- int vert; /* Width of left vertical line. */
+ int vert; /* Width of left vertical line in EN. */
};
enum tbl_datt {
diff --git a/contrib/mandoc/tbl_html.c b/contrib/mandoc/tbl_html.c
index 57d90c4c2d67..56ea3c08eef4 100644
--- a/contrib/mandoc/tbl_html.c
+++ b/contrib/mandoc/tbl_html.c
@@ -1,4 +1,4 @@
-/* $Id: tbl_html.c,v 1.41 2022/04/23 14:02:17 schwarze Exp $ */
+/* $Id: tbl_html.c,v 1.42 2025/07/16 14:33:08 schwarze Exp $ */
/*
* Copyright (c) 2014, 2015, 2017, 2018, 2021, 2022
* Ingo Schwarze <schwarze@openbsd.org>
@@ -37,7 +37,6 @@
static void html_tblopen(struct html *, const struct tbl_span *);
static size_t html_tbl_len(size_t, void *);
static size_t html_tbl_strlen(const char *, void *);
-static size_t html_tbl_sulen(const struct roffsu *, void *);
static size_t
@@ -52,36 +51,6 @@ html_tbl_strlen(const char *p, void *arg)
return strlen(p);
}
-static size_t
-html_tbl_sulen(const struct roffsu *su, void *arg)
-{
- if (su->scale < 0.0)
- return 0;
-
- switch (su->unit) {
- case SCALE_FS: /* 2^16 basic units */
- return su->scale * 65536.0 / 24.0;
- case SCALE_IN: /* 10 characters per inch */
- return su->scale * 10.0;
- case SCALE_CM: /* 2.54 cm per inch */
- return su->scale * 10.0 / 2.54;
- case SCALE_PC: /* 6 pica per inch */
- case SCALE_VS:
- return su->scale * 10.0 / 6.0;
- case SCALE_EN:
- case SCALE_EM:
- return su->scale;
- case SCALE_PT: /* 12 points per pica */
- return su->scale * 10.0 / 6.0 / 12.0;
- case SCALE_BU: /* 24 basic units per character */
- return su->scale / 24.0;
- case SCALE_MM: /* 1/1000 inch */
- return su->scale / 100.0;
- default:
- abort();
- }
-}
-
static void
html_tblopen(struct html *h, const struct tbl_span *sp)
{
@@ -89,7 +58,6 @@ html_tblopen(struct html *h, const struct tbl_span *sp)
if (h->tbl.cols == NULL) {
h->tbl.len = html_tbl_len;
h->tbl.slen = html_tbl_strlen;
- h->tbl.sulen = html_tbl_sulen;
tblcalc(&h->tbl, sp, 0, 0);
}
assert(NULL == h->tblt);
diff --git a/contrib/mandoc/tbl_layout.c b/contrib/mandoc/tbl_layout.c
index 3b7e64580fd5..aa054dee9411 100644
--- a/contrib/mandoc/tbl_layout.c
+++ b/contrib/mandoc/tbl_layout.c
@@ -1,4 +1,4 @@
-/* $Id: tbl_layout.c,v 1.51 2025/01/05 18:14:39 schwarze Exp $ */
+/* $Id: tbl_layout.c,v 1.52 2025/07/16 14:33:08 schwarze Exp $ */
/*
* Copyright (c) 2012, 2014, 2015, 2017, 2020, 2021, 2025
* Ingo Schwarze <schwarze@openbsd.org>
@@ -66,8 +66,8 @@ mods(struct tbl_node *tbl, struct tbl_cell *cp,
int ln, const char *p, int *pos)
{
char *endptr;
- unsigned long spacing;
- int isz;
+ unsigned long spacing; /* Column spacing in EN units. */
+ int isz; /* Width in basic units. */
enum mandoc_esc fontesc;
mod:
@@ -145,8 +145,7 @@ mod:
mandoc_msg(MANDOCERR_TBLLAYOUT_WIDTH,
ln, *pos, "%s", p + *pos);
else {
- /* Convert from BU to EN and round. */
- cp->width = (isz + 11) /24;
+ cp->width = isz;
(*pos)++;
}
} else {
@@ -155,6 +154,7 @@ mod:
cp->width *= 10;
cp->width += p[(*pos)++] - '0';
}
+ cp->width *= 24;
if (cp->width == 0)
mandoc_msg(MANDOCERR_TBLLAYOUT_WIDTH,
ln, *pos, "%s", p + *pos);
diff --git a/contrib/mandoc/tbl_term.c b/contrib/mandoc/tbl_term.c
index e92349514d9f..a7f057084266 100644
--- a/contrib/mandoc/tbl_term.c
+++ b/contrib/mandoc/tbl_term.c
@@ -1,6 +1,6 @@
-/* $Id: tbl_term.c,v 1.79 2022/08/28 10:58:31 schwarze Exp $ */
+/* $Id: tbl_term.c,v 1.81 2025/07/24 17:54:48 schwarze Exp $ */
/*
- * Copyright (c) 2011-2022 Ingo Schwarze <schwarze@openbsd.org>
+ * Copyright (c) 2011-2022, 2025 Ingo Schwarze <schwarze@openbsd.org>
* Copyright (c) 2009, 2011 Kristaps Dzonsons <kristaps@bsd.lv>
*
* Permission to use, copy, modify, and distribute this software for any
@@ -39,23 +39,23 @@
static size_t term_tbl_len(size_t, void *);
static size_t term_tbl_strlen(const char *, void *);
-static size_t term_tbl_sulen(const struct roffsu *, void *);
static void tbl_data(struct termp *, const struct tbl_opts *,
const struct tbl_cell *,
const struct tbl_dat *,
- const struct roffcol *);
+ const struct roffcol *, size_t *);
static void tbl_direct_border(struct termp *, int, size_t);
-static void tbl_fill_border(struct termp *, int, size_t);
-static void tbl_fill_char(struct termp *, char, size_t);
-static void tbl_fill_string(struct termp *, const char *, size_t);
+static void tbl_fill_border(struct termp *, int, size_t *, size_t);
+static void tbl_fill_char(struct termp *, char, size_t *, size_t);
+static void tbl_fill_string(struct termp *, const char *,
+ size_t *, size_t);
static void tbl_hrule(struct termp *, const struct tbl_span *,
const struct tbl_span *, const struct tbl_span *,
int);
static void tbl_literal(struct termp *, const struct tbl_dat *,
- const struct roffcol *);
+ const struct roffcol *, size_t *);
static void tbl_number(struct termp *, const struct tbl_opts *,
const struct tbl_dat *,
- const struct roffcol *);
+ const struct roffcol *, size_t *);
static void tbl_word(struct termp *, const struct tbl_dat *);
@@ -140,15 +140,6 @@ static const int *borders_locale;
static size_t
-term_tbl_sulen(const struct roffsu *su, void *arg)
-{
- int i;
-
- i = term_hen((const struct termp *)arg, su);
- return i > 0 ? i : 0;
-}
-
-static size_t
term_tbl_strlen(const char *p, void *arg)
{
return term_strlen((const struct termp *)arg, p);
@@ -166,16 +157,29 @@ term_tbl(struct termp *tp, const struct tbl_span *sp)
{
const struct tbl_cell *cp, *cpn, *cpp, *cps;
const struct tbl_dat *dp;
- static size_t offset;
- size_t save_offset;
- size_t coloff, tsz;
- int hspans, ic, more;
- int dvert, fc, horiz, lhori, rhori, uvert;
+
+ /* Positions and widths in basic units. */
+ static size_t offset; /* Of the table as a whole. */
+ size_t save_offset; /* Of the surrounding text. */
+ size_t coloff; /* Of this cell. */
+ size_t tsz; /* Total width of the table. */
+ size_t enw; /* Width of one EN unit. */
+
+ int ic; /* Column number. */
+ int hspans; /* Number of spans following this cell. */
+ int horiz; /* Boolean: this row only contains a line. */
+ int lhori; /* Number of horizontal lines pointing left. */
+ int rhori; /* Number of horizontal lines pointing right. */
+ int dvert; /* Number of vertical lines pointing down. */
+ int uvert; /* Number of vertical lines pointing up. */
+ int fc; /* Frame character index in borders_locale[]. */
+ int more; /* Boolean: there are more columns to print. */
/* Inhibit printing of spaces: we do padding ourselves. */
tp->flags |= TERMP_NOSPACE | TERMP_NONOSPACE;
save_offset = tp->tcol->offset;
+ enw = term_len(tp, 1);
/*
* The first time we're invoked for a given table block,
@@ -188,7 +192,6 @@ term_tbl(struct termp *tp, const struct tbl_span *sp)
tp->tbl.len = term_tbl_len;
tp->tbl.slen = term_tbl_strlen;
- tp->tbl.sulen = term_tbl_sulen;
tp->tbl.arg = tp;
tblcalc(&tp->tbl, sp, tp->tcol->offset, tp->tcol->rmargin);
@@ -197,17 +200,36 @@ term_tbl(struct termp *tp, const struct tbl_span *sp)
offset = tp->tcol->offset;
if (sp->opts->opts & TBL_OPT_CENTRE) {
- tsz = sp->opts->opts & (TBL_OPT_BOX | TBL_OPT_DBOX)
- ? 2 : !!sp->opts->lvert + !!sp->opts->rvert;
+
+ /*
+ * Vertical lines on the edges of the table make the
+ * table wider; take that into account for centering.
+ * The following assignment essentially says that a
+ * line on the right side occupies two columns (which
+ * matches reality) and a line on the left side three
+ * columns (which does not match reality; in fact,
+ * it only occupies two columns). But this is how
+ * groff does centering, so for compatibility, use
+ * the same numbers as groff.
+ */
+
+ tsz = term_len(tp,
+ sp->opts->opts & (TBL_OPT_BOX | TBL_OPT_DBOX) ?
+ 5 : 3 * !!sp->opts->lvert + 2 * !!sp->opts->rvert);
+
+ /* Column widths and column spacing. */
+
for (ic = 0; ic + 1 < sp->opts->cols; ic++)
tsz += tp->tbl.cols[ic].width +
- tp->tbl.cols[ic].spacing;
+ term_len(tp, tp->tbl.cols[ic].spacing);
if (sp->opts->cols)
tsz += tp->tbl.cols[sp->opts->cols - 1].width;
+
if (offset + tsz > tp->tcol->rmargin)
- tsz -= 1;
+ tsz -= enw;
offset = offset + tp->tcol->rmargin > tsz ?
- (offset + tp->tcol->rmargin - tsz) / 2 : 0;
+ ((offset + tp->tcol->rmargin - tsz) / enw / 2) *
+ enw : 0;
tp->tcol->offset = offset;
}
@@ -239,7 +261,7 @@ term_tbl(struct termp *tp, const struct tbl_span *sp)
if (sp->opts->opts & (TBL_OPT_BOX | TBL_OPT_DBOX) ||
sp->opts->lvert)
- coloff++;
+ coloff += enw * 2;
tp->tcol->rmargin = coloff;
/* Set up the data columns. */
@@ -254,7 +276,8 @@ term_tbl(struct termp *tp, const struct tbl_span *sp)
coloff += tp->tbl.cols[ic].width;
tp->tcol->rmargin = coloff;
if (ic + 1 < sp->opts->cols)
- coloff += tp->tbl.cols[ic].spacing;
+ coloff += term_len(tp,
+ tp->tbl.cols[ic].spacing);
if (hspans) {
hspans--;
continue;
@@ -269,7 +292,7 @@ term_tbl(struct termp *tp, const struct tbl_span *sp)
/* Set up a column for a right vertical frame. */
tp->tcol++;
- tp->tcol->offset = coloff + 1;
+ tp->tcol->offset = coloff + enw;
tp->tcol->rmargin = tp->maxrmargin;
/* Spans may have reduced the number of columns. */
@@ -279,6 +302,7 @@ term_tbl(struct termp *tp, const struct tbl_span *sp)
/* Fill the buffers for all data columns. */
tp->tcol = tp->tcols;
+ coloff = tp->tcols[1].offset;
cp = cpn = sp->layout->first;
dp = sp->first;
hspans = 0;
@@ -294,7 +318,9 @@ term_tbl(struct termp *tp, const struct tbl_span *sp)
tp->tcol++;
tp->col = 0;
tp->flags &= ~(TERMP_BACKAFTER | TERMP_BACKBEFORE);
- tbl_data(tp, sp->opts, cp, dp, tp->tbl.cols + ic);
+ tbl_data(tp, sp->opts, cp, dp, tp->tbl.cols + ic,
+ &coloff);
+ coloff += term_len(tp, tp->tbl.cols[ic].spacing);
if (dp != NULL &&
(ic || sp->layout->first->pos != TBL_CELL_SPAN)) {
hspans = dp->hspans;
@@ -328,8 +354,8 @@ term_tbl(struct termp *tp, const struct tbl_span *sp)
fc = BUP * uvert + BDOWN * dvert + BRIGHT * rhori;
if (uvert > 0 || dvert > 0 || (horiz && sp->opts->lvert)) {
(*tp->advance)(tp, tp->tcols->offset);
- tp->viscol = tp->tcol->offset;
- tbl_direct_border(tp, fc, 1);
+ tbl_direct_border(tp, fc, enw);
+ tbl_direct_border(tp, BHORIZ * rhori, enw);
}
/* Print the data cells. */
@@ -348,6 +374,7 @@ term_tbl(struct termp *tp, const struct tbl_span *sp)
for (ic = 0; ic < sp->opts->cols; ic++) {
/*
+ * Handle horizontal alignment.
* Figure out whether to print a
* vertical line after this cell
* and advance to next layout cell.
@@ -362,6 +389,16 @@ term_tbl(struct termp *tp, const struct tbl_span *sp)
if (sp->pos == TBL_SPAN_DATA)
uvert = dvert = cps->vert;
switch (cp->pos) {
+ case TBL_CELL_CENTRE:
+ tp->flags |= TERMP_CENTER;
+ break;
+ case TBL_CELL_RIGHT:
+ tp->flags |= TERMP_RIGHT;
+ break;
+ case TBL_CELL_LONG:
+ if (hspans == 0)
+ tp->tcol->offset += enw;
+ break;
case TBL_CELL_HORIZ:
fc = BHORIZ;
break;
@@ -432,6 +469,9 @@ term_tbl(struct termp *tp, const struct tbl_span *sp)
tp->tcol++;
if (tp->tcol->col < tp->tcol->lastcol)
term_flushln(tp);
+ tp->flags &= ~(TERMP_CENTER | TERMP_RIGHT);
+ if (cp != NULL && cp->pos == TBL_CELL_LONG)
+ tp->tcol->offset -= enw;
if (tp->tcol->col < tp->tcol->lastcol)
more = 1;
@@ -451,15 +491,13 @@ term_tbl(struct termp *tp, const struct tbl_span *sp)
continue;
}
- if (tp->viscol < tp->tcol->rmargin) {
- (*tp->advance)(tp, tp->tcol->rmargin
- - tp->viscol);
- tp->viscol = tp->tcol->rmargin;
- }
+ if (tp->viscol < tp->tcol->rmargin)
+ (*tp->advance)(tp,
+ tp->tcol->rmargin - tp->viscol);
while (tp->viscol < tp->tcol->rmargin +
- tp->tbl.cols[ic].spacing / 2)
+ term_len(tp, tp->tbl.cols[ic].spacing / 2))
tbl_direct_border(tp,
- BHORIZ * lhori, 1);
+ BHORIZ * lhori, enw);
if (tp->tcol + 1 == tp->tcols + tp->lasttcol)
continue;
@@ -479,7 +517,7 @@ term_tbl(struct termp *tp, const struct tbl_span *sp)
if (tp->tbl.cols[ic].spacing)
tbl_direct_border(tp,
BLEFT * lhori + BRIGHT * rhori +
- BUP * uvert + BDOWN * dvert, 1);
+ BUP * uvert + BDOWN * dvert, enw);
if (tp->enc == TERMENC_UTF8)
uvert = dvert = 0;
@@ -489,7 +527,7 @@ term_tbl(struct termp *tp, const struct tbl_span *sp)
tbl_direct_border(tp,
BHORIZ * rhori +
BUP * (uvert > 1) +
- BDOWN * (dvert > 1), 1);
+ BDOWN * (dvert > 1), enw);
}
}
@@ -528,15 +566,14 @@ term_tbl(struct termp *tp, const struct tbl_span *sp)
if (horiz == 0 && (IS_HORIZ(sp->layout->last) == 0 ||
sp->layout->last->col + 1 < sp->opts->cols)) {
tp->tcol++;
- do {
+ if (tp->tcol->offset > tp->viscol)
tbl_direct_border(tp,
- BHORIZ * lhori, 1);
- } while (tp->viscol < tp->tcol->offset);
+ BHORIZ * lhori,
+ tp->tcol->offset - tp->viscol);
}
- tbl_direct_border(tp, fc, 1);
+ tbl_direct_border(tp, fc, enw);
}
(*tp->endline)(tp);
- tp->viscol = 0;
} while (more);
/*
@@ -575,6 +612,7 @@ tbl_hrule(struct termp *tp, const struct tbl_span *spp,
const struct tbl_cell *cpn; /* Layout cell below this line. */
const struct tbl_dat *dpn; /* Data cell below this line. */
const struct roffcol *col; /* Contains width and spacing. */
+ size_t enw; /* Width of one EN unit. */
int opts; /* For the table as a whole. */
int bw; /* Box line width. */
int hw; /* Horizontal line width. */
@@ -599,16 +637,19 @@ tbl_hrule(struct termp *tp, const struct tbl_span *spp,
/* Print the left end of the line. */
- if (tp->viscol == 0) {
+ enw = term_len(tp, 1);
+ if (tp->viscol == 0)
(*tp->advance)(tp, tp->tcols->offset);
- tp->viscol = tp->tcols->offset;
- }
- if (flags != 0)
+ if (flags != 0) {
tbl_direct_border(tp,
(spp == NULL ? 0 : BUP * bw) +
(spn == NULL ? 0 : BDOWN * bw) +
(spp == NULL || cpn == NULL ||
- cpn->pos != TBL_CELL_DOWN ? BRIGHT * hw : 0), 1);
+ cpn->pos != TBL_CELL_DOWN ? BRIGHT * hw : 0), enw);
+ tbl_direct_border(tp,
+ (spp == NULL || cpn == NULL ||
+ cpn->pos != TBL_CELL_DOWN ? BHORIZ * hw : 0), enw);
+ }
col = tp->tbl.cols;
for (;;) {
@@ -625,7 +666,7 @@ tbl_hrule(struct termp *tp, const struct tbl_span *spp,
strcmp(dpn->string, "\\^") != 0))
? hw : 0;
tbl_direct_border(tp, BHORIZ * lw,
- col->width + col->spacing / 2);
+ col->width + term_len(tp, col->spacing / 2));
/*
* Figure out whether a vertical line is crossing
@@ -678,7 +719,7 @@ tbl_hrule(struct termp *tp, const struct tbl_span *spp,
if (col->spacing)
tbl_direct_border(tp, BLEFT * lw +
- BRIGHT * rw + BUP * uw + BDOWN * dw, 1);
+ BRIGHT * rw + BUP * uw + BDOWN * dw, enw);
/*
* In ASCII output, a crossing may print two characters.
@@ -688,13 +729,14 @@ tbl_hrule(struct termp *tp, const struct tbl_span *spp,
uw = dw = 0;
if (col->spacing > 2)
tbl_direct_border(tp,
- BHORIZ * rw + BUP * uw + BDOWN * dw, 1);
+ BHORIZ * rw + BUP * uw + BDOWN * dw, enw);
/* Padding before the start of the next column. */
if (col->spacing > 4)
tbl_direct_border(tp,
- BHORIZ * rw, (col->spacing - 3) / 2);
+ BHORIZ * rw,
+ term_len(tp, (col->spacing - 3) / 2));
}
/* Print the right end of the line. */
@@ -705,23 +747,22 @@ tbl_hrule(struct termp *tp, const struct tbl_span *spp,
(spn == NULL ? 0 : BDOWN * bw) +
(spp == NULL || spn == NULL ||
spn->layout->last->pos != TBL_CELL_DOWN ?
- BLEFT * hw : 0), 1);
+ BLEFT * hw : 0), enw);
(*tp->endline)(tp);
- tp->viscol = 0;
}
}
static void
tbl_data(struct termp *tp, const struct tbl_opts *opts,
const struct tbl_cell *cp, const struct tbl_dat *dp,
- const struct roffcol *col)
+ const struct roffcol *col, size_t *coloff)
{
switch (cp->pos) {
case TBL_CELL_HORIZ:
- tbl_fill_border(tp, BHORIZ, col->width);
+ tbl_fill_border(tp, BHORIZ, coloff, col->width);
return;
case TBL_CELL_DHORIZ:
- tbl_fill_border(tp, BHORIZ * 2, col->width);
+ tbl_fill_border(tp, BHORIZ * 2, coloff, col->width);
return;
default:
break;
@@ -735,11 +776,11 @@ tbl_data(struct termp *tp, const struct tbl_opts *opts,
return;
case TBL_DATA_HORIZ:
case TBL_DATA_NHORIZ:
- tbl_fill_border(tp, BHORIZ, col->width);
+ tbl_fill_border(tp, BHORIZ, coloff, col->width);
return;
case TBL_DATA_NDHORIZ:
case TBL_DATA_DHORIZ:
- tbl_fill_border(tp, BHORIZ * 2, col->width);
+ tbl_fill_border(tp, BHORIZ * 2, coloff, col->width);
return;
default:
break;
@@ -750,10 +791,10 @@ tbl_data(struct termp *tp, const struct tbl_opts *opts,
case TBL_CELL_CENTRE:
case TBL_CELL_LEFT:
case TBL_CELL_RIGHT:
- tbl_literal(tp, dp, col);
+ tbl_literal(tp, dp, col, coloff);
break;
case TBL_CELL_NUMBER:
- tbl_number(tp, opts, dp, col);
+ tbl_number(tp, opts, dp, col, coloff);
break;
case TBL_CELL_DOWN:
case TBL_CELL_SPAN:
@@ -763,46 +804,72 @@ tbl_data(struct termp *tp, const struct tbl_opts *opts,
}
}
+/*
+ * Print multiple copies of the string cp to advance to
+ * len basic units from the left edge of the current column.
+ */
static void
-tbl_fill_string(struct termp *tp, const char *cp, size_t len)
+tbl_fill_string(struct termp *tp, const char *cp, size_t *coloff, size_t len)
{
- size_t i, sz;
+ size_t sz; /* Width of the string cp in basic units. */
+ size_t target; /* Distance from the left margin in basic units. */
+ if (len == 0)
+ return;
sz = term_strlen(tp, cp);
- for (i = 0; i < len; i += sz)
+ target = tp->tcol->offset + len;
+ while (*coloff < target) {
term_word(tp, cp);
+ *coloff += sz;
+ }
}
+/*
+ * Print multiple copies of the ASCII character c to advance to
+ * len basic units from the left edge of the current column.
+ */
static void
-tbl_fill_char(struct termp *tp, char c, size_t len)
+tbl_fill_char(struct termp *tp, char c, size_t *coloff, size_t len)
{
char cp[2];
cp[0] = c;
cp[1] = '\0';
- tbl_fill_string(tp, cp, len);
+ tbl_fill_string(tp, cp, coloff, len);
}
+/*
+ * Print multiple copies of the border c to fill len basic units.
+ * Used for horizontal lines inside table cells.
+ */
static void
-tbl_fill_border(struct termp *tp, int c, size_t len)
+tbl_fill_border(struct termp *tp, int c, size_t *coloff, size_t len)
{
char buf[12];
if ((c = borders_locale[c]) > 127) {
(void)snprintf(buf, sizeof(buf), "\\[u%04x]", c);
- tbl_fill_string(tp, buf, len);
+ tbl_fill_string(tp, buf, coloff, len);
} else
- tbl_fill_char(tp, c, len);
+ tbl_fill_char(tp, c, coloff, len);
}
+/*
+ * The same, but bypassing term_flushln().
+ * Used for horizontal and vertical lines at the edges of table cells.
+ */
static void
tbl_direct_border(struct termp *tp, int c, size_t len)
{
- size_t i, sz;
+ size_t sz; /* Width of the character in basic units. */
+ size_t enw2; /* Width of half an EN in basic units. */
+ size_t target; /* Distance from the left margin in basic units. */
c = borders_locale[c];
- sz = (*tp->width)(tp, c);
- for (i = 0; i < len; i += sz) {
+ sz = (*tp->getwidth)(tp, c);
+ enw2 = (*tp->getwidth)(tp, ' ') / 2;
+ target = tp->viscol + len;
+ while (tp->viscol + enw2 < target) {
(*tp->letter)(tp, c);
tp->viscol += sz;
}
@@ -810,56 +877,36 @@ tbl_direct_border(struct termp *tp, int c, size_t len)
static void
tbl_literal(struct termp *tp, const struct tbl_dat *dp,
- const struct roffcol *col)
+ const struct roffcol *col, size_t *coloff)
{
- size_t len, padl, padr, width;
- int ic, hspans;
+ size_t width; /* Of the cell including following spans [BU]. */
+ int ic; /* Column number of the cell. */
+ int hspans; /* Number of horizontal spans that follow. */
- assert(dp->string);
- len = term_strlen(tp, dp->string);
width = col->width;
ic = dp->layout->col;
hspans = dp->hspans;
while (hspans--) {
- width += tp->tbl.cols[ic].spacing;
+ width += term_len(tp, tp->tbl.cols[ic].spacing);
ic++;
width += tp->tbl.cols[ic].width;
}
-
- padr = width > len ? width - len : 0;
- padl = 0;
-
- switch (dp->layout->pos) {
- case TBL_CELL_LONG:
- padl = term_len(tp, 1);
- padr = padr > padl ? padr - padl : 0;
- break;
- case TBL_CELL_CENTRE:
- if (2 > padr)
- break;
- padl = padr / 2;
- padr -= padl;
- break;
- case TBL_CELL_RIGHT:
- padl = padr;
- padr = 0;
- break;
- default:
- break;
- }
-
- tbl_fill_char(tp, ASCII_NBRSP, padl);
tbl_word(tp, dp);
- tbl_fill_char(tp, ASCII_NBRSP, padr);
+ *coloff += width;
}
static void
tbl_number(struct termp *tp, const struct tbl_opts *opts,
const struct tbl_dat *dp,
- const struct roffcol *col)
+ const struct roffcol *col, size_t *coloff)
{
const char *cp, *lastdigit, *lastpoint;
- size_t intsz, padl, totsz;
+
+ /* Widths in basic units. */
+ size_t pad; /* Padding before the number. */
+ size_t totsz; /* Of the number to be printed. */
+ size_t intsz; /* Of the integer part. */
+
char buf[2];
/*
@@ -883,7 +930,7 @@ tbl_number(struct termp *tp, const struct tbl_opts *opts,
/* Then measure both widths. */
- padl = 0;
+ pad = 0;
totsz = term_strlen(tp, dp->string);
if (lastdigit != NULL) {
if (lastpoint == NULL)
@@ -901,23 +948,19 @@ tbl_number(struct termp *tp, const struct tbl_opts *opts,
*/
if (col->decimal > intsz && col->width > totsz) {
- padl = col->decimal - intsz;
- if (padl + totsz > col->width)
- padl = col->width - totsz;
+ pad = col->decimal - intsz;
+ if (pad + totsz > col->width)
+ pad = col->width - totsz;
}
/* If it is not a number, simply center the string. */
} else if (col->width > totsz)
- padl = (col->width - totsz) / 2;
+ pad = (col->width - totsz) / 2;
- tbl_fill_char(tp, ASCII_NBRSP, padl);
+ tbl_fill_char(tp, ASCII_NBRSP, coloff, pad);
tbl_word(tp, dp);
-
- /* Pad right to fill the column. */
-
- if (col->width > padl + totsz)
- tbl_fill_char(tp, ASCII_NBRSP, col->width - padl - totsz);
+ *coloff += col->width;
}
static void
diff --git a/contrib/mandoc/term.c b/contrib/mandoc/term.c
index 58d9d9bf9240..4dde60d1e45c 100644
--- a/contrib/mandoc/term.c
+++ b/contrib/mandoc/term.c
@@ -1,6 +1,6 @@
-/* $Id: term.c,v 1.291 2023/04/28 19:11:04 schwarze Exp $ */
+/* $Id: term.c,v 1.294 2025/08/01 14:59:39 schwarze Exp $ */
/*
- * Copyright (c) 2010-2022 Ingo Schwarze <schwarze@openbsd.org>
+ * Copyright (c) 2010-2022, 2025 Ingo Schwarze <schwarze@openbsd.org>
* Copyright (c) 2008, 2009, 2010, 2011 Kristaps Dzonsons <kristaps@bsd.lv>
*
* Permission to use, copy, modify, and distribute this software for any
@@ -94,12 +94,15 @@ term_end(struct termp *p)
void
term_flushln(struct termp *p)
{
- size_t vbl; /* Number of blanks to prepend to the output. */
+ /* Widths in basic units. */
+ size_t vbl; /* Whitespace to prepend to the output. */
size_t vbr; /* Actual visual position of the end of field. */
size_t vfield; /* Desired visual field width. */
size_t vtarget; /* Desired visual position of the right margin. */
- size_t ic; /* Character position in the input buffer. */
- size_t nbr; /* Number of characters to print in this field. */
+
+ /* Bytes. */
+ size_t ic; /* Byte index in the input buffer. */
+ size_t nbr; /* Number of bytes to print in this field. */
/*
* Normally, start writing at the left margin, but with the
@@ -108,8 +111,8 @@ term_flushln(struct termp *p)
vbl = (p->flags & TERMP_NOPAD) || p->tcol->offset < p->viscol ?
0 : p->tcol->offset - p->viscol;
- if (p->minbl && vbl < p->minbl)
- vbl = p->minbl;
+ if (p->minbl > 0 && vbl < term_len(p, p->minbl))
+ vbl = term_len(p, p->minbl);
if ((p->flags & TERMP_MULTICOL) == 0)
p->tcol->col = 0;
@@ -137,7 +140,7 @@ term_flushln(struct termp *p)
*/
term_fill(p, &nbr, &vbr,
- p->flags & TERMP_BRNEVER ? SIZE_MAX : vtarget);
+ p->flags & TERMP_BRNEVER ? SIZE_MAX / 2 : vtarget);
if (nbr == 0)
break;
@@ -161,7 +164,7 @@ term_flushln(struct termp *p)
p->tcol->taboff += vbr;
else
p->tcol->taboff += vtarget;
- p->tcol->taboff += (*p->width)(p, ' ');
+ p->tcol->taboff += term_len(p, 1);
/*
* If there is no text left in the field, exit the loop.
@@ -178,7 +181,7 @@ term_flushln(struct termp *p)
continue;
case ' ':
if (p->flags & TERMP_BRTRSP)
- vbr += (*p->width)(p, ' ');
+ vbr += term_len(p, 1);
continue;
case '\n':
case ASCII_NBRZW:
@@ -245,13 +248,13 @@ term_flushln(struct termp *p)
if ((p->flags & TERMP_HANG) == 0 &&
((p->flags & TERMP_NOBREAK) == 0 ||
- vbr + term_len(p, p->trailspace) > vfield))
+ vbr + term_len(p, p->trailspace) > vfield + term_len(p, 1) / 2))
endline(p);
}
/*
- * Store the number of input characters to print in this field in *nbr
- * and their total visual width to print in *vbr.
+ * Store the number of input bytes to print in this field in *nbr
+ * and their total visual width in basic units in *vbr.
* If there is only whitespace in the field, both remain zero.
* The desired visual width of the field is provided by vtarget.
* If the first word is longer, the field will be overrun.
@@ -259,28 +262,33 @@ term_flushln(struct termp *p)
static void
term_fill(struct termp *p, size_t *nbr, size_t *vbr, size_t vtarget)
{
- size_t ic; /* Character position in the input buffer. */
+ /* Widths in basic units. */
size_t vis; /* Visual position of the current character. */
size_t vn; /* Visual position of the next character. */
+ size_t enw; /* Width of an EN unit. */
+ int taboff; /* Temporary offset for literal tabs. */
+
+ size_t ic; /* Byte index in the input buffer. */
int breakline; /* Break at the end of this word. */
int graph; /* Last character was non-blank. */
- int taboff; /* Temporary offset for literal tabs. */
*nbr = *vbr = vis = 0;
breakline = graph = 0;
taboff = p->tcol->taboff;
+ enw = (*p->getwidth)(p, ' ');
+ vtarget += enw / 2;
for (ic = p->tcol->col; ic < p->tcol->lastcol; ic++) {
switch (p->tcol->buf[ic]) {
case '\b': /* Escape \o (overstrike) or backspace markup. */
assert(ic > 0);
- vis -= (*p->width)(p, p->tcol->buf[ic - 1]);
+ vis -= (*p->getwidth)(p, p->tcol->buf[ic - 1]);
continue;
case ' ':
case ASCII_BREAK: /* Escape \: (breakpoint). */
vn = vis;
if (p->tcol->buf[ic] == ' ')
- vn += (*p->width)(p, ' ');
+ vn += enw;
/* Can break at the end of a word. */
if (breakline || vn > vtarget)
break;
@@ -305,7 +313,7 @@ term_fill(struct termp *p, size_t *nbr, size_t *vbr, size_t vtarget)
* hyphen such that we get the correct width.
*/
p->tcol->buf[ic] = '-';
- vis += (*p->width)(p, '-');
+ vis += (*p->getwidth)(p, '-');
if (vis > vtarget) {
ic++;
break;
@@ -315,7 +323,7 @@ term_fill(struct termp *p, size_t *nbr, size_t *vbr, size_t vtarget)
continue;
case ASCII_TABREF:
- taboff = -vis - (*p->width)(p, ' ');
+ taboff = -vis - enw;
continue;
default:
@@ -334,7 +342,7 @@ term_fill(struct termp *p, size_t *nbr, size_t *vbr, size_t vtarget)
p->tcol->buf[ic] = ' ';
/* FALLTHROUGH */
default: /* Printable character. */
- vis += (*p->width)(p, p->tcol->buf[ic]);
+ vis += (*p->getwidth)(p, p->tcol->buf[ic]);
break;
}
graph = 1;
@@ -359,18 +367,20 @@ term_fill(struct termp *p, size_t *nbr, size_t *vbr, size_t vtarget)
/*
* Print the contents of one field
- * with an indentation of vbl visual columns,
- * and an input string length of nbr characters.
+ * with an indentation of vbl basic units
+ * and an input string length of nbr bytes.
*/
static void
term_field(struct termp *p, size_t vbl, size_t nbr)
{
- size_t ic; /* Character position in the input buffer. */
+ /* Widths in basic units. */
size_t vis; /* Visual position of the current character. */
size_t vt; /* Visual position including tab offset. */
size_t dv; /* Visual width of the current character. */
int taboff; /* Temporary offset for literal tabs. */
+ size_t ic; /* Byte position in the input buffer. */
+
vis = 0;
taboff = p->tcol->taboff;
for (ic = p->tcol->col; ic < nbr; ic++) {
@@ -386,7 +396,7 @@ term_field(struct termp *p, size_t vbl, size_t nbr)
case ASCII_NBRZW:
continue;
case ASCII_TABREF:
- taboff = -vis - (*p->width)(p, ' ');
+ taboff = -vis - (*p->getwidth)(p, ' ');
continue;
case '\t':
case ' ':
@@ -398,7 +408,7 @@ term_field(struct termp *p, size_t vbl, size_t nbr)
vt = vis + taboff;
dv = term_tab_next(vt) - vt;
} else
- dv = (*p->width)(p, ' ');
+ dv = (*p->getwidth)(p, ' ');
vbl += dv;
vis += dv;
continue;
@@ -413,7 +423,6 @@ term_field(struct termp *p, size_t vbl, size_t nbr)
if (vbl > 0) {
(*p->advance)(p, vbl);
- p->viscol += vbl;
vbl = 0;
}
@@ -421,11 +430,11 @@ term_field(struct termp *p, size_t vbl, size_t nbr)
(*p->letter)(p, p->tcol->buf[ic]);
if (p->tcol->buf[ic] == '\b') {
- dv = (*p->width)(p, p->tcol->buf[ic - 1]);
+ dv = (*p->getwidth)(p, p->tcol->buf[ic - 1]);
p->viscol -= dv;
vis -= dv;
} else {
- dv = (*p->width)(p, p->tcol->buf[ic]);
+ dv = (*p->getwidth)(p, p->tcol->buf[ic]);
p->viscol += dv;
vis += dv;
}
@@ -433,6 +442,10 @@ term_field(struct termp *p, size_t vbl, size_t nbr)
p->tcol->col = nbr;
}
+/*
+ * Print the margin character, if one is configured,
+ * and end the output line.
+ */
static void
endline(struct termp *p)
{
@@ -441,14 +454,13 @@ endline(struct termp *p)
p->flags &= ~TERMP_ENDMC;
}
if (p->mc != NULL) {
- if (p->viscol && p->maxrmargin >= p->viscol)
- (*p->advance)(p, p->maxrmargin - p->viscol + 1);
+ if (p->viscol > 0 && p->viscol <= p->maxrmargin)
+ (*p->advance)(p,
+ p->maxrmargin - p->viscol + term_len(p, 1));
p->flags |= TERMP_NOBUF | TERMP_NOSPACE;
term_word(p, p->mc);
p->flags &= ~(TERMP_NOBUF | TERMP_NEWMC);
}
- p->viscol = 0;
- p->minbl = 0;
(*p->endline)(p);
}
@@ -477,8 +489,6 @@ term_vspace(struct termp *p)
{
term_newln(p);
- p->viscol = 0;
- p->minbl = 0;
if (0 < p->skipvsp)
p->skipvsp--;
else
@@ -496,34 +506,36 @@ term_fontlast(struct termp *p)
p->fontq[p->fonti] = f;
}
-/* Set font, save current, discard previous; for \f, .ft, .B etc. */
+/* Set font, save current, discard previous; for \f, .ft, and man(7). */
void
term_fontrepl(struct termp *p, enum termfont f)
{
-
p->fontl = p->fontq[p->fonti];
+ if (p->fontibi && f == TERMFONT_UNDER)
+ f = TERMFONT_BI;
p->fontq[p->fonti] = f;
}
-/* Set font, save previous. */
+/* Set font, save previous; for mdoc(7), eqn(7), and tbl(7). */
void
term_fontpush(struct termp *p, enum termfont f)
{
+ enum termfont fl;
- p->fontl = p->fontq[p->fonti];
+ fl = p->fontq[p->fonti];
if (++p->fonti == p->fontsz) {
p->fontsz += 8;
p->fontq = mandoc_reallocarray(p->fontq,
p->fontsz, sizeof(*p->fontq));
}
- p->fontq[p->fonti] = f;
+ p->fontq[p->fonti] = fl;
+ term_fontrepl(p, f);
}
/* Flush to make the saved pointer current again. */
void
term_fontpopq(struct termp *p, int i)
{
-
assert(i >= 0);
if (p->fonti > i)
p->fonti = i;
@@ -533,8 +545,7 @@ term_fontpopq(struct termp *p, int i)
void
term_fontpop(struct termp *p)
{
-
- assert(p->fonti);
+ assert(p->fonti > 0);
p->fonti--;
}
@@ -548,9 +559,14 @@ term_word(struct termp *p, const char *word)
{
struct roffsu su;
const char nbrsp[2] = { ASCII_NBRSP, 0 };
- const char *seq, *cp;
- int sz, uc;
- size_t csz, lsz, ssz;
+ const char *seq; /* Escape sequence argument. */
+ const char *cp; /* String to be printed. */
+ size_t csz; /* String length in basic units. */
+ size_t lsz; /* Line width in basic units. */
+ size_t ssz; /* Substring length in bytes. */
+ int sz; /* Argument length in bytes. */
+ int uc; /* Unicode codepoint number. */
+ int bu; /* Width in basic units. */
enum mandoc_esc esc;
if ((p->flags & TERMP_NOBUF) == 0) {
@@ -663,15 +679,15 @@ term_word(struct termp *p, const char *word)
}
if (*seq == '|') {
seq++;
- uc = -p->col;
+ bu = -term_len(p, p->col);
} else
- uc = 0;
+ bu = 0;
if (a2roffsu(seq, &su, SCALE_EM) == NULL)
continue;
- uc += term_hen(p, &su);
- if (uc >= 0) {
- while (uc > 0) {
- uc -= term_len(p, 1);
+ bu += term_hspan(p, &su);
+ if (bu >= 0) {
+ while (bu > 0) {
+ bu -= term_len(p, 1);
if (p->flags & TERMP_BACKBEFORE)
p->flags &= ~TERMP_BACKBEFORE;
else
@@ -681,17 +697,17 @@ term_word(struct termp *p, const char *word)
}
if (p->flags & TERMP_BACKBEFORE) {
p->flags &= ~TERMP_BACKBEFORE;
- assert(p->col > 0);
+ assert(p->col > 1);
p->col--;
}
- if (p->col >= (size_t)(-uc)) {
- p->col += uc;
+ if (term_len(p, p->col) >= (size_t)(-bu)) {
+ p->col -= -bu / term_len(p, 1);
} else {
- uc += p->col;
+ bu += term_len(p, p->col);
p->col = 0;
- if (p->tcol->offset > (size_t)(-uc)) {
- p->ti += uc;
- p->tcol->offset += uc;
+ if (p->tcol->offset > (size_t)(-bu)) {
+ p->ti += bu;
+ p->tcol->offset += bu;
} else {
p->ti -= p->tcol->offset;
p->tcol->offset = 0;
@@ -701,13 +717,13 @@ term_word(struct termp *p, const char *word)
case ESCAPE_HLINE:
if ((cp = a2roffsu(seq, &su, SCALE_EM)) == NULL)
continue;
- uc = term_hen(p, &su);
- if (uc <= 0) {
+ bu = term_hspan(p, &su);
+ if (bu <= 0) {
if (p->tcol->rmargin <= p->tcol->offset)
continue;
lsz = p->tcol->rmargin - p->tcol->offset;
} else
- lsz = uc;
+ lsz = bu;
if (*cp == seq[-1])
uc = -1;
else if (*cp == '\\') {
@@ -739,13 +755,16 @@ term_word(struct termp *p, const char *word)
csz = term_strlen(p, cp);
ssz = strlen(cp);
} else
- csz = (*p->width)(p, uc);
- while (lsz >= csz) {
+ csz = (*p->getwidth)(p, uc);
+ while (lsz > 0) {
if (p->enc == TERMENC_ASCII)
encode(p, cp, ssz);
else
encode1(p, uc);
- lsz -= csz;
+ if (lsz > csz)
+ lsz -= csz;
+ else
+ lsz = 0;
}
continue;
case ESCAPE_SKIPCHAR:
@@ -954,28 +973,39 @@ term_setwidth(struct termp *p, const char *wstr)
size_t
term_len(const struct termp *p, size_t sz)
{
-
- return (*p->width)(p, ' ') * sz;
+ return (*p->getwidth)(p, ' ') * sz;
}
static size_t
cond_width(const struct termp *p, int c, int *skip)
{
-
if (*skip) {
(*skip) = 0;
return 0;
} else
- return (*p->width)(p, c);
+ return (*p->getwidth)(p, c);
}
size_t
term_strlen(const struct termp *p, const char *cp)
{
- size_t sz, rsz, i;
- int ssz, skip, uc;
- const char *seq, *rhs;
+ const char *seq; /* Escape sequence argument. */
+ const char *rhs; /* String to be printed. */
+
+ /* Widths in basic units. */
+ size_t sz; /* Return value. */
+ size_t this_sz; /* Individual char for overstrike. */
+ size_t max_sz; /* Result of overstrike. */
+
+ /* Numbers of bytes. */
+ size_t rsz; /* Substring length in bytes. */
+ size_t i; /* Byte index in substring. */
+ int ssz; /* Argument length in bytes. */
+ int skip; /* Number of bytes to skip. */
+
+ int uc; /* Unicode codepoint number. */
enum mandoc_esc esc;
+
static const char rej[] = { '\\', ASCII_NBRSP, ASCII_NBRZW,
ASCII_BREAK, ASCII_HYPH, ASCII_TABREF, '\0' };
@@ -1039,18 +1069,18 @@ term_strlen(const struct termp *p, const char *cp)
skip = 1;
continue;
case ESCAPE_OVERSTRIKE:
- rsz = 0;
+ max_sz = 0;
rhs = seq + ssz;
while (seq < rhs) {
if (*seq == '\\') {
mandoc_escape(&seq, NULL, NULL);
continue;
}
- i = (*p->width)(p, *seq++);
- if (rsz < i)
- rsz = i;
+ this_sz = (*p->getwidth)(p, *seq++);
+ if (max_sz < this_sz)
+ max_sz = this_sz;
}
- sz += rsz;
+ sz += max_sz;
continue;
default:
continue;
@@ -1085,7 +1115,7 @@ term_strlen(const struct termp *p, const char *cp)
*/
for (i = 0; i < rsz; i++)
- sz += (*p->width)(p, *rhs++);
+ sz += (*p->getwidth)(p, *rhs++);
break;
case ASCII_NBRSP:
sz += cond_width(p, ' ', &skip);
@@ -1146,25 +1176,10 @@ term_vspan(const struct termp *p, const struct roffsu *su)
}
/*
- * Convert a scaling width to basic units, rounding towards 0.
+ * Convert a scaling width to basic units.
*/
int
term_hspan(const struct termp *p, const struct roffsu *su)
{
-
return (*p->hspan)(p, su);
}
-
-/*
- * Convert a scaling width to basic units, rounding to closest.
- */
-int
-term_hen(const struct termp *p, const struct roffsu *su)
-{
- int bu;
-
- if ((bu = (*p->hspan)(p, su)) >= 0)
- return (bu + 11) / 24;
- else
- return -((-bu + 11) / 24);
-}
diff --git a/contrib/mandoc/term.h b/contrib/mandoc/term.h
index 3b3a79527eeb..1e4659734fc5 100644
--- a/contrib/mandoc/term.h
+++ b/contrib/mandoc/term.h
@@ -1,6 +1,7 @@
-/* $Id: term.h,v 1.134 2022/08/16 17:45:55 schwarze Exp $ */
+/* $Id: term.h,v 1.138 2025/07/27 15:27:28 schwarze Exp $ */
/*
- * Copyright (c) 2011-2015,2017,2019,2022 Ingo Schwarze <schwarze@openbsd.org>
+ * Copyright (c) 2011-2015, 2017, 2019, 2021, 2022, 2025
+ * Ingo Schwarze <schwarze@openbsd.org>
* Copyright (c) 2008, 2009, 2010, 2011 Kristaps Dzonsons <kristaps@bsd.lv>
*
* Permission to use, copy, modify, and distribute this software for any
@@ -44,19 +45,14 @@ struct termp;
typedef void (*term_margin)(struct termp *, const struct roff_meta *);
-struct termp_tbl {
- int width; /* width in fixed chars */
- int decimal; /* decimal point position */
-};
-
struct termp_col {
int *buf; /* Output buffer. */
size_t maxcols; /* Allocated bytes in buf. */
size_t lastcol; /* Last byte in buf. */
size_t col; /* Byte in buf to be written. */
- size_t rmargin; /* Current right margin. */
- size_t offset; /* Current left margin. */
- size_t taboff; /* Offset for literal tabs. */
+ size_t rmargin; /* Current right margin [BU]. */
+ size_t offset; /* Current left margin [BU]. */
+ size_t taboff; /* Offset for literal tabs [BU]. */
};
struct termp {
@@ -66,17 +62,16 @@ struct termp {
size_t maxtcol; /* Allocated table columns. */
size_t lasttcol; /* Last column currently used. */
size_t line; /* Current output line number. */
- size_t defindent; /* Default indent for text. */
- size_t defrmargin; /* Right margin of the device. */
- size_t lastrmargin; /* Right margin before the last ll. */
- size_t maxrmargin; /* Max right margin. */
+ size_t defindent; /* Default indent for text [EN]. */
+ size_t defrmargin; /* Right margin of the device [BU]. */
+ size_t lastrmargin; /* Right margin before last ll [BU]. */
+ size_t maxrmargin; /* Maximum right margin [BU]. */
size_t col; /* Byte position in buf. */
- size_t viscol; /* Chars on current line. */
- size_t trailspace; /* See term_flushln(). */
- size_t minbl; /* Minimum blanks before next field. */
+ size_t viscol; /* Width of the current line [BU]. */
+ size_t trailspace; /* Whitespace after field [EN]. */
+ size_t minbl; /* Whitespace before field [EN]. */
int synopsisonly; /* Print the synopsis only. */
- int mdocstyle; /* Imitate mdoc(7) output. */
- int ti; /* Temporary indent for one line. */
+ int ti; /* Temporary indent for line [BU]. */
int skipvsp; /* Vertical space to skip. */
int flags;
#define TERMP_SENTENCE (1 << 0) /* Space before a sentence. */
@@ -108,6 +103,7 @@ struct termp {
enum termfont *fontq; /* Symmetric fonts. */
int fontsz; /* Allocated size of font stack */
int fonti; /* Index of font stack. */
+ int fontibi; /* Map font I to BI. */
term_margin headf; /* invoked to print head */
term_margin footf; /* invoked to print foot */
void (*letter)(struct termp *, int);
@@ -116,7 +112,7 @@ struct termp {
void (*endline)(struct termp *);
void (*advance)(struct termp *, size_t);
void (*setwidth)(struct termp *, int, int);
- size_t (*width)(const struct termp *, int);
+ size_t (*getwidth)(const struct termp *, int);
int (*hspan)(const struct termp *,
const struct roffsu *);
const void *argf; /* arg for headf/footf */
@@ -143,13 +139,11 @@ void term_end(struct termp *);
void term_setwidth(struct termp *, const char *);
int term_hspan(const struct termp *, const struct roffsu *);
-int term_hen(const struct termp *, const struct roffsu *);
int term_vspan(const struct termp *, const struct roffsu *);
size_t term_strlen(const struct termp *, const char *);
size_t term_len(const struct termp *, size_t);
void term_tab_set(const struct termp *, const char *);
-void term_tab_iset(size_t);
void term_tab_ref(struct termp *);
size_t term_tab_next(size_t);
void term_tab_free(void);
diff --git a/contrib/mandoc/term_ascii.c b/contrib/mandoc/term_ascii.c
index 3942dc757953..990833c8a021 100644
--- a/contrib/mandoc/term_ascii.c
+++ b/contrib/mandoc/term_ascii.c
@@ -1,7 +1,7 @@
-/* $Id: term_ascii.c,v 1.69 2023/11/13 19:13:01 schwarze Exp $ */
+/* $Id: term_ascii.c,v 1.71 2025/07/16 14:33:08 schwarze Exp $ */
/*
+ * Copyright (c) 2014,2015,2017-2020,2025 Ingo Schwarze <schwarze@openbsd.org>
* Copyright (c) 2010, 2011 Kristaps Dzonsons <kristaps@bsd.lv>
- * Copyright (c) 2014,2015,2017,2018,2020 Ingo Schwarze <schwarze@openbsd.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -43,7 +43,7 @@
static struct termp *ascii_init(enum termenc, const struct manoutput *);
static int ascii_hspan(const struct termp *,
const struct roffsu *);
-static size_t ascii_width(const struct termp *, int);
+static size_t ascii_getwidth(const struct termp *, int);
static void ascii_advance(struct termp *, size_t);
static void ascii_begin(struct termp *);
static void ascii_end(struct termp *);
@@ -55,7 +55,7 @@ static void ascii_setwidth(struct termp *, int, int);
static void locale_advance(struct termp *, size_t);
static void locale_endline(struct termp *);
static void locale_letter(struct termp *, int);
-static size_t locale_width(const struct termp *, int);
+static size_t locale_getwidth(const struct termp *, int);
#endif
@@ -73,7 +73,6 @@ ascii_init(enum termenc enc, const struct manoutput *outopts)
p->line = 1;
p->defindent = 5;
- p->defrmargin = p->lastrmargin = 78;
p->fontq = mandoc_reallocarray(NULL,
(p->fontsz = 8), sizeof(*p->fontq));
p->fontq[0] = p->fontl = TERMFONT_NONE;
@@ -82,13 +81,12 @@ ascii_init(enum termenc enc, const struct manoutput *outopts)
p->end = ascii_end;
p->hspan = ascii_hspan;
p->type = TERMTYPE_CHAR;
-
p->enc = TERMENC_ASCII;
p->advance = ascii_advance;
p->endline = ascii_endline;
p->letter = ascii_letter;
p->setwidth = ascii_setwidth;
- p->width = ascii_width;
+ p->getwidth = ascii_getwidth;
#if HAVE_WCHAR
if (enc != TERMENC_ASCII) {
@@ -118,17 +116,15 @@ ascii_init(enum termenc enc, const struct manoutput *outopts)
p->advance = locale_advance;
p->endline = locale_endline;
p->letter = locale_letter;
- p->width = locale_width;
+ p->getwidth = locale_getwidth;
}
}
#endif
+ p->defrmargin = term_len(p, outopts->width ? outopts->width : 78);
+ p->lastrmargin = p->defrmargin;
- if (outopts->mdoc)
- p->mdocstyle = 1;
if (outopts->indent)
p->defindent = outopts->indent;
- if (outopts->width)
- p->defrmargin = outopts->width;
if (outopts->synopsisonly)
p->synopsisonly = 1;
@@ -140,29 +136,24 @@ ascii_init(enum termenc enc, const struct manoutput *outopts)
void *
ascii_alloc(const struct manoutput *outopts)
{
-
return ascii_init(TERMENC_ASCII, outopts);
}
void *
utf8_alloc(const struct manoutput *outopts)
{
-
return ascii_init(TERMENC_UTF8, outopts);
}
void *
locale_alloc(const struct manoutput *outopts)
{
-
return ascii_init(TERMENC_LOCALE, outopts);
}
static void
ascii_setwidth(struct termp *p, int iop, int width)
{
-
- width /= 24;
p->tcol->rmargin = p->defrmargin;
if (iop > 0)
p->defrmargin += width;
@@ -172,8 +163,8 @@ ascii_setwidth(struct termp *p, int iop, int width)
p->defrmargin -= width;
else
p->defrmargin = 0;
- if (p->defrmargin > 1000)
- p->defrmargin = 1000;
+ if (p->defrmargin > term_len(p, 1000))
+ p->defrmargin = term_len(p, 1000);
p->lastrmargin = p->tcol->rmargin;
p->tcol->rmargin = p->maxrmargin = p->defrmargin;
}
@@ -182,67 +173,76 @@ void
terminal_sepline(void *arg)
{
struct termp *p;
- size_t i;
+ size_t i; /* Printed width in basic units. */
+ size_t sz; /* Width of a dash in basic units. */
p = (struct termp *)arg;
(*p->endline)(p);
- for (i = 0; i < p->defrmargin; i++)
+ sz = (*p->getwidth)(p, '-');
+ for (i = 0; i < p->defrmargin; i += sz)
(*p->letter)(p, '-');
(*p->endline)(p);
(*p->endline)(p);
}
static size_t
-ascii_width(const struct termp *p, int c)
+ascii_getwidth(const struct termp *p, int c)
{
- return c != ASCII_BREAK && c != ASCII_NBRZW && c != ASCII_TABREF;
+ switch (c) {
+ case ASCII_BREAK:
+ case ASCII_NBRZW:
+ case ASCII_TABREF:
+ return 0;
+ default:
+ return 24;
+ }
}
void
ascii_free(void *arg)
{
-
term_free((struct termp *)arg);
}
static void
ascii_letter(struct termp *p, int c)
{
-
putchar(c);
}
static void
ascii_begin(struct termp *p)
{
-
(*p->headf)(p, p->argf);
}
static void
ascii_end(struct termp *p)
{
-
(*p->footf)(p, p->argf);
}
static void
ascii_endline(struct termp *p)
{
-
p->line++;
if ((int)p->tcol->offset > p->ti)
p->tcol->offset -= p->ti;
else
p->tcol->offset = 0;
p->ti = 0;
+ p->minbl = 0;
+ p->viscol = 0;
putchar('\n');
}
static void
ascii_advance(struct termp *p, size_t len)
{
- size_t i;
+ size_t dst; /* Destination column in basic units. */
+ size_t sz; /* Width of a space in basic units. */
+
+ sz = (*p->getwidth)(p, ' ');
/*
* XXX We used to have "assert(len < UINT16_MAX)" here.
@@ -250,10 +250,14 @@ ascii_advance(struct termp *p, size_t len)
* can trigger that by merely providing large input.
* For now, simply truncate.
*/
- if (len > 256)
- len = 256;
- for (i = 0; i < len; i++)
+ if (len > 256 * sz)
+ len = 256 * sz;
+
+ dst = p->viscol + len;
+ while (p->viscol + sz / 2 < dst) {
putchar(' ');
+ p->viscol += sz;
+ }
}
static int
@@ -372,7 +376,7 @@ ascii_uc2str(int uc)
#if HAVE_WCHAR
static size_t
-locale_width(const struct termp *p, int c)
+locale_getwidth(const struct termp *p, int c)
{
int rc;
@@ -381,13 +385,16 @@ locale_width(const struct termp *p, int c)
rc = wcwidth(c);
if (rc < 0)
rc = 0;
- return rc;
+ return rc * 24;
}
static void
locale_advance(struct termp *p, size_t len)
{
- size_t i;
+ size_t dst; /* Destination column in basic units. */
+ size_t sz; /* Width of a space in basic units. */
+
+ sz = (*p->getwidth)(p, ' ');
/*
* XXX We used to have "assert(len < UINT16_MAX)" here.
@@ -395,29 +402,33 @@ locale_advance(struct termp *p, size_t len)
* can trigger that by merely providing large input.
* For now, simply truncate.
*/
- if (len > 256)
- len = 256;
- for (i = 0; i < len; i++)
+ if (len > 256 * sz)
+ len = 256 * sz;
+
+ dst = p->viscol + len;
+ while (p->viscol + sz / 2 < dst) {
putwchar(L' ');
+ p->viscol += sz;
+ }
}
static void
locale_endline(struct termp *p)
{
-
p->line++;
if ((int)p->tcol->offset > p->ti)
p->tcol->offset -= p->ti;
else
p->tcol->offset = 0;
p->ti = 0;
+ p->minbl = 0;
+ p->viscol = 0;
putwchar(L'\n');
}
static void
locale_letter(struct termp *p, int c)
{
-
putwchar(c);
}
#endif
diff --git a/contrib/mandoc/term_ps.c b/contrib/mandoc/term_ps.c
index 374d3d9a6abd..4c6368ca1d1f 100644
--- a/contrib/mandoc/term_ps.c
+++ b/contrib/mandoc/term_ps.c
@@ -1,7 +1,7 @@
-/* $Id: term_ps.c,v 1.92 2020/09/06 14:45:22 schwarze Exp $ */
+/* $Id: term_ps.c,v 1.94 2025/07/18 15:47:18 schwarze Exp $ */
/*
+ * Copyright (c) 2014-2017, 2020, 2025 Ingo Schwarze <schwarze@openbsd.org>
* Copyright (c) 2010, 2011 Kristaps Dzonsons <kristaps@bsd.lv>
- * Copyright (c) 2014,2015,2016,2017,2020 Ingo Schwarze <schwarze@openbsd.org>
* Copyright (c) 2017 Marc Espie <espie@openbsd.org>
*
* Permission to use, copy, modify, and distribute this software for any
@@ -95,7 +95,7 @@ struct termp_ps {
static int ps_hspan(const struct termp *,
const struct roffsu *);
-static size_t ps_width(const struct termp *, int);
+static size_t ps_getwidth(const struct termp *, int);
static void ps_advance(struct termp *, size_t);
static void ps_begin(struct termp *);
static void ps_closepage(struct termp *);
@@ -549,7 +549,7 @@ pspdf_alloc(const struct manoutput *outopts, enum termtype type)
p->hspan = ps_hspan;
p->letter = ps_letter;
p->setwidth = ps_setwidth;
- p->width = ps_width;
+ p->getwidth = ps_getwidth;
/* Default to US letter (millimetres). */
@@ -1211,6 +1211,7 @@ ps_advance(struct termp *p, size_t len)
ps_plast(p);
ps_pclose(p);
p->ps->pscol += len;
+ p->viscol += len;
}
static void
@@ -1234,6 +1235,8 @@ ps_endline(struct termp *p)
/* Left-justify. */
p->ps->pscol = p->ps->left;
+ p->viscol = 0;
+ p->minbl = 0;
/* If we haven't printed anything, return. */
@@ -1282,7 +1285,7 @@ ps_setfont(struct termp *p, enum termfont f)
}
static size_t
-ps_width(const struct termp *p, int c)
+ps_getwidth(const struct termp *p, int c)
{
if (c <= 32 || c - 32 >= MAXCHAR)
@@ -1311,7 +1314,7 @@ ps_hspan(const struct termp *p, const struct roffsu *su)
* scaling unit so that output is the same regardless
* the media.
*/
- r = PNT2AFM(p, su->scale * 72.0 / 240.0);
+ r = PNT2AFM(p, su->scale * 72.0 / 10.0);
break;
case SCALE_CM:
r = PNT2AFM(p, su->scale * 72.0 / 2.54);
@@ -1344,8 +1347,7 @@ ps_hspan(const struct termp *p, const struct roffsu *su)
r = su->scale;
break;
}
-
- return r * 24.0;
+ return r;
}
static void
diff --git a/contrib/mandoc/term_tab.c b/contrib/mandoc/term_tab.c
index a2d1074159b9..dd1b6bcdc696 100644
--- a/contrib/mandoc/term_tab.c
+++ b/contrib/mandoc/term_tab.c
@@ -1,6 +1,6 @@
-/* $Id: term_tab.c,v 1.7 2021/10/04 18:56:31 schwarze Exp $ */
+/* $Id: term_tab.c,v 1.9 2025/07/16 14:33:08 schwarze Exp $ */
/*
- * Copyright (c) 2017, 2021 Ingo Schwarze <schwarze@openbsd.org>
+ * Copyright (c) 2017, 2021, 2025 Ingo Schwarze <schwarze@openbsd.org>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -27,7 +27,7 @@
#include "term.h"
struct tablist {
- size_t *t; /* Allocated array of tab positions. */
+ size_t *t; /* Allocated array of tab positions [BU]. */
size_t s; /* Allocated number of positions. */
size_t n; /* Currently used number of positions. */
};
@@ -36,7 +36,7 @@ static struct {
struct tablist a; /* All tab positions for lookup. */
struct tablist p; /* Periodic tab positions to add. */
struct tablist *r; /* Tablist currently being recorded. */
- size_t d; /* Default tab width in units of n. */
+ size_t d; /* Default tab width in basic units. */
} tabs;
@@ -55,7 +55,7 @@ term_tab_set(const struct termp *p, const char *arg)
tabs.r = &tabs.a;
if (tabs.d == 0) {
a2roffsu(".8i", &su, SCALE_IN);
- tabs.d = term_hen(p, &su);
+ tabs.d = term_hspan(p, &su);
}
return;
}
@@ -84,28 +84,13 @@ term_tab_set(const struct termp *p, const char *arg)
/* Append the new position. */
- pos = term_hen(p, &su);
+ pos = term_hspan(p, &su);
tl->t[tl->n] = pos;
if (add && tl->n)
tl->t[tl->n] += tl->t[tl->n - 1];
tl->n++;
}
-/*
- * Simplified version without a parser,
- * never incremental, never periodic, for use by tbl(7).
- */
-void
-term_tab_iset(size_t inc)
-{
- if (tabs.a.n >= tabs.a.s) {
- tabs.a.s += 8;
- tabs.a.t = mandoc_reallocarray(tabs.a.t, tabs.a.s,
- sizeof(*tabs.a.t));
- }
- tabs.a.t[tabs.a.n++] = inc;
-}
-
size_t
term_tab_next(size_t prev)
{
diff --git a/contrib/one-true-awk/FIXES b/contrib/one-true-awk/FIXES
index b3bf38f0aa1c..b876b9ec5ec9 100644
--- a/contrib/one-true-awk/FIXES
+++ b/contrib/one-true-awk/FIXES
@@ -25,6 +25,14 @@ THIS SOFTWARE.
This file lists all bug fixes, changes, etc., made since the
second edition of the AWK book was published in September 2023.
+Aug 04, 2025
+ Fix incorrect divisor in rand() - it was returning
+ even random numbers only. Thanks to Ozan Yigit.
+
+ Fix a syntax issue with /= that caused constants to
+ turn into variables [eg. 42 /= 7]. Thanks to Arnold
+ Robbins.
+
Jan 14, 2025
Fix incorrect error line number issues. unput has
no business managing lineno. Thanks to Ozan Yigit.
diff --git a/contrib/one-true-awk/main.c b/contrib/one-true-awk/main.c
index 361c23e70861..b8053af34b05 100644
--- a/contrib/one-true-awk/main.c
+++ b/contrib/one-true-awk/main.c
@@ -22,7 +22,7 @@ ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
THIS SOFTWARE.
****************************************************************/
-const char *version = "version 20250116";
+const char *version = "version 20250804";
#define DEBUG
#include <stdio.h>
diff --git a/contrib/one-true-awk/run.c b/contrib/one-true-awk/run.c
index eaddfdecbdd3..9bc07a517372 100644
--- a/contrib/one-true-awk/run.c
+++ b/contrib/one-true-awk/run.c
@@ -1567,6 +1567,8 @@ Cell *assign(Node **a, int n) /* a[0] = a[1], a[0] += a[1], etc. */
xf *= yf;
break;
case DIVEQ:
+ if ((x->tval & CON) != 0)
+ FATAL("non-constant required for left side of /=");
if (yf == 0)
FATAL("division by zero in /=");
xf /= yf;
@@ -2188,7 +2190,7 @@ Cell *bltin(Node **a, int n) /* builtin functions. a[0] is type, a[1] is arg lis
/* random() returns numbers in [0..2^31-1]
* in order to get a number in [0, 1), divide it by 2^31
*/
- u = (Awkfloat) random() / (0x7fffffffL + 0x1UL);
+ u = (Awkfloat) random() / RAND_MAX;
break;
case FSRAND:
if (isrec(x)) /* no argument provided */
diff --git a/contrib/tcpdump/print-pfsync.c b/contrib/tcpdump/print-pfsync.c
index 6bf9abaf3903..e4f11930816c 100644
--- a/contrib/tcpdump/print-pfsync.c
+++ b/contrib/tcpdump/print-pfsync.c
@@ -53,8 +53,8 @@
static void pfsync_print(netdissect_options *, struct pfsync_header *,
const u_char *, u_int);
static void print_src_dst(netdissect_options *,
- const struct pfsync_state_peer *,
- const struct pfsync_state_peer *, uint8_t);
+ const struct pf_state_peer_export *,
+ const struct pf_state_peer_export *, uint8_t);
static void print_state(netdissect_options *, union pfsync_state_union *, int);
void
@@ -330,7 +330,7 @@ print_host(netdissect_options *ndo, struct pf_addr *addr, uint16_t port,
}
static void
-print_seq(netdissect_options *ndo, const struct pfsync_state_peer *p)
+print_seq(netdissect_options *ndo, const struct pf_state_peer_export *p)
{
if (p->seqdiff)
ND_PRINT("[%u + %u](+%u)", ntohl(p->seqlo),
@@ -341,8 +341,8 @@ print_seq(netdissect_options *ndo, const struct pfsync_state_peer *p)
}
static void
-print_src_dst(netdissect_options *ndo, const struct pfsync_state_peer *src,
- const struct pfsync_state_peer *dst, uint8_t proto)
+print_src_dst(netdissect_options *ndo, const struct pf_state_peer_export *src,
+ const struct pf_state_peer_export *dst, uint8_t proto)
{
if (proto == IPPROTO_TCP) {
@@ -390,7 +390,7 @@ print_src_dst(netdissect_options *ndo, const struct pfsync_state_peer *src,
static void
print_state(netdissect_options *ndo, union pfsync_state_union *s, int version)
{
- struct pfsync_state_peer *src, *dst;
+ struct pf_state_peer_export *src, *dst;
struct pfsync_state_key *sk, *nk;
int min, sec;
diff --git a/contrib/tzcode/localtime.c b/contrib/tzcode/localtime.c
index a6ec3d8e4e21..0fe7f1ed3f64 100644
--- a/contrib/tzcode/localtime.c
+++ b/contrib/tzcode/localtime.c
@@ -13,37 +13,36 @@
/*LINTLIBRARY*/
#define LOCALTIME_IMPLEMENTATION
+#ifdef __FreeBSD__
#include "namespace.h"
+#include <pthread.h>
+#endif /* __FreeBSD__ */
#ifdef DETECT_TZ_CHANGES
-#ifndef DETECT_TZ_CHANGES_INTERVAL
-#define DETECT_TZ_CHANGES_INTERVAL 61
-#endif
+# ifndef DETECT_TZ_CHANGES_INTERVAL
+# define DETECT_TZ_CHANGES_INTERVAL 61
+# endif
int __tz_change_interval = DETECT_TZ_CHANGES_INTERVAL;
-#include <sys/stat.h>
-#endif
-#include <fcntl.h>
-#if THREAD_SAFE
-#include <pthread.h>
-#endif
+# include <sys/stat.h>
+#endif /* DETECT_TZ_CHANGES */
#include "private.h"
-#include "un-namespace.h"
#include "tzdir.h"
#include "tzfile.h"
-
+#include <fcntl.h>
+#ifdef __FreeBSD__
#include "libc_private.h"
+#include "un-namespace.h"
+#endif /* __FreeBSD__ */
#if defined THREAD_SAFE && THREAD_SAFE
+# include <pthread.h>
+#ifdef __FreeBSD__
+# define pthread_mutex_lock(l) (__isthreaded ? _pthread_mutex_lock(l) : 0)
+# define pthread_mutex_unlock(l) (__isthreaded ? _pthread_mutex_unlock(l) : 0)
+#endif /* __FreeBSD__ */
static pthread_mutex_t locallock = PTHREAD_MUTEX_INITIALIZER;
-static int lock(void) {
- if (__isthreaded)
- return _pthread_mutex_lock(&locallock);
- return 0;
-}
-static void unlock(void) {
- if (__isthreaded)
- _pthread_mutex_unlock(&locallock);
-}
+static int lock(void) { return pthread_mutex_lock(&locallock); }
+static void unlock(void) { pthread_mutex_unlock(&locallock); }
#else
static int lock(void) { return 0; }
static void unlock(void) { }
@@ -166,6 +165,9 @@ struct rule {
int_fast32_t r_time; /* transition time of rule */
};
+#ifdef __FreeBSD__
+static void tzset_unlocked_name(char const *);
+#endif /* __FreeBSD__ */
static struct tm *gmtsub(struct state const *, time_t const *, int_fast32_t,
struct tm *);
static bool increment_overflow(int *, int);
@@ -194,7 +196,7 @@ static struct state *const gmtptr = &gmtmem;
static char lcl_TZname[TZ_STRLEN_MAX + 1];
static int lcl_is_set;
-
+#ifdef __FreeBSD__
static pthread_once_t gmt_once = PTHREAD_ONCE_INIT;
static pthread_once_t gmtime_once = PTHREAD_ONCE_INIT;
static pthread_key_t gmtime_key;
@@ -205,6 +207,7 @@ static int offtime_key_error;
static pthread_once_t localtime_once = PTHREAD_ONCE_INIT;
static pthread_key_t localtime_key;
static int localtime_key_error;
+#endif /* __FreeBSD__ */
/*
** Section 4.12.3 of X3.159-1989 requires that
@@ -398,19 +401,20 @@ scrub_abbrs(struct state *sp)
#ifdef DETECT_TZ_CHANGES
/*
- * Determine if there's a change in the timezone since the last time we checked.
+ * Check whether either the time zone name or the file it refers to has
+ * changed since the last time we checked.
* Returns: -1 on error
- * 0 if the timezone has not changed
- * 1 if the timezone has changed
+ * 0 if the time zone has not changed
+ * 1 if the time zone has changed
*/
static int
-change_in_tz(const char *name)
+tzfile_changed(const char *name, int fd)
{
static char old_name[PATH_MAX];
static struct stat old_sb;
struct stat sb;
- if (stat(name, &sb) != 0)
+ if (_fstat(fd, &sb) != 0)
return -1;
if (strcmp(name, old_name) != 0) {
@@ -429,9 +433,7 @@ change_in_tz(const char *name)
return 0;
}
-#else /* !DETECT_TZ_CHANGES */
-#define change_in_tz(X) 1
-#endif /* !DETECT_TZ_CHANGES */
+#endif /* DETECT_TZ_CHANGES */
/* Input buffer for data read from a compiled tz file. */
union input_buffer {
@@ -444,8 +446,10 @@ union input_buffer {
+ 4 * TZ_MAX_TIMES];
};
+#ifndef __FreeBSD__
/* TZDIR with a trailing '/' rather than a trailing '\0'. */
static char const tzdirslash[sizeof TZDIR] = TZDIR "/";
+#endif /* !__FreeBSD__ */
/* Local storage needed for 'tzloadbody'. */
union local_storage {
@@ -458,6 +462,7 @@ union local_storage {
struct state st;
} u;
+#ifndef __FreeBSD__
/* The name of the file to be opened. Ideally this would have no
size limits, to support arbitrarily long Zone names.
Limiting Zone names to 1024 bytes should suffice for practical use.
@@ -465,6 +470,7 @@ union local_storage {
file_analysis as that struct is allocated anyway, as the other
union member. */
char fullname[max(sizeof(struct file_analysis), sizeof tzdirslash + 1024)];
+#endif /* !__FreeBSD__ */
};
/* Load tz data from the file named NAME into *SP. Read extended
@@ -478,6 +484,11 @@ tzloadbody(char const *name, struct state *sp, bool doextend,
register int fid;
register int stored;
register ssize_t nread;
+#ifdef __FreeBSD__
+ int serrno;
+#else /* !__FreeBSD__ */
+ register bool doaccess;
+#endif /* !__FreeBSD__ */
register union input_buffer *up = &lsp->u.u;
register int tzheadsize = sizeof(struct tzhead);
@@ -491,7 +502,16 @@ tzloadbody(char const *name, struct state *sp, bool doextend,
if (name[0] == ':')
++name;
- if (name[0] != '/') {
+#ifndef __FreeBSD__
+#ifdef SUPPRESS_TZDIR
+ /* Do not prepend TZDIR. This is intended for specialized
+ applications only, due to its security implications. */
+ doaccess = true;
+#else
+ doaccess = name[0] == '/';
+#endif
+ if (!doaccess) {
+ char const *dot;
if (sizeof lsp->fullname - sizeof tzdirslash <= strlen(name))
return ENAMETOOLONG;
@@ -501,27 +521,59 @@ tzloadbody(char const *name, struct state *sp, bool doextend,
memcpy(lsp->fullname, tzdirslash, sizeof tzdirslash);
strcpy(lsp->fullname + sizeof tzdirslash, name);
+ /* Set doaccess if NAME contains a ".." file name
+ component, as such a name could read a file outside
+ the TZDIR virtual subtree. */
+ for (dot = name; (dot = strchr(dot, '.')); dot++)
+ if ((dot == name || dot[-1] == '/') && dot[1] == '.'
+ && (dot[2] == '/' || !dot[2])) {
+ doaccess = true;
+ break;
+ }
+
name = lsp->fullname;
}
- if (doextend) {
- /*
- * Detect if the timezone file has changed. Check
- * 'doextend' to ignore TZDEFRULES; the change_in_tz()
- * function can only keep state for a single file.
- */
- switch (change_in_tz(name)) {
- case -1:
- return errno;
- case 0:
- return 0;
- case 1:
- break;
- }
- }
+ if (doaccess && access(name, R_OK) != 0)
+ return errno;
fid = _open(name, O_RDONLY | O_BINARY);
+#else /* __FreeBSD__ */
+ {
+ const char *relname = name;
+ if (strncmp(relname, TZDIR "/", strlen(TZDIR) + 1) == 0)
+ relname += strlen(TZDIR) + 1;
+ int dd = _open(TZDIR, O_DIRECTORY | O_RDONLY);
+ if (dd < 0)
+ return errno;
+ fid = _openat(dd, relname, O_RDONLY | O_BINARY,
+ issetugid() ? AT_RESOLVE_BENEATH : 0);
+ serrno = errno;
+ _close(dd);
+ errno = serrno;
+ }
+#endif /* __FreeBSD__ */
if (fid < 0)
return errno;
+#ifdef DETECT_TZ_CHANGES
+ if (doextend) {
+ /*
+ * Detect if the timezone file has changed. Check 'doextend' to
+ * ignore TZDEFRULES; the tzfile_changed() function can only
+ * keep state for a single file.
+ */
+ switch (tzfile_changed(name, fid)) {
+ case -1:
+ serrno = errno;
+ _close(fid);
+ return serrno;
+ case 0:
+ _close(fid);
+ return 0;
+ case 1:
+ break;
+ }
+ }
+#endif /* DETECT_TZ_CHANGES */
nread = _read(fid, up->buf, sizeof up->buf);
if (nread < tzheadsize) {
int err = nread < 0 ? errno : EINVAL;
@@ -1370,8 +1422,11 @@ gmtload(struct state *const sp)
}
#ifdef DETECT_TZ_CHANGES
+/*
+ * Check if the time zone data we have is still fresh.
+ */
static int
-recheck_tzdata()
+tzdata_is_fresh(void)
{
static time_t last_checked;
struct timespec now;
@@ -1387,9 +1442,7 @@ recheck_tzdata()
return 0;
}
-#else /* !DETECT_TZ_CHANGES */
-#define recheck_tzdata() 0
-#endif /* !DETECT_TZ_CHANGES */
+#endif /* DETECT_TZ_CHANGES */
/* Initialize *SP to a value appropriate for the TZ setting NAME.
Return 0 on success, an errno value on failure. */
@@ -1419,15 +1472,26 @@ zoneinit(struct state *sp, char const *name)
}
static void
+tzset_unlocked(void)
+{
+#ifdef __FreeBSD__
+ tzset_unlocked_name(getenv("TZ"));
+}
+static void
tzset_unlocked_name(char const *name)
{
+#else
+ char const *name = getenv("TZ");
+#endif
struct state *sp = lclptr;
int lcl = name ? strlen(name) < sizeof lcl_TZname : -1;
if (lcl < 0
? lcl_is_set < 0
: 0 < lcl_is_set && strcmp(lcl_TZname, name) == 0)
- if (recheck_tzdata() == 0)
- return;
+#ifdef DETECT_TZ_CHANGES
+ if (tzdata_is_fresh() == 0)
+#endif /* DETECT_TZ_CHANGES */
+ return;
#ifdef ALL_STATE
if (! sp)
lclptr = sp = malloc(sizeof *lclptr);
@@ -1442,12 +1506,6 @@ tzset_unlocked_name(char const *name)
lcl_is_set = lcl;
}
-static void
-tzset_unlocked(void)
-{
- tzset_unlocked_name(getenv("TZ"));
-}
-
void
tzset(void)
{
@@ -1457,6 +1515,7 @@ tzset(void)
unlock();
}
+#ifdef __FreeBSD__
void
freebsd13_tzsetwall(void)
{
@@ -1468,7 +1527,7 @@ freebsd13_tzsetwall(void)
__sym_compat(tzsetwall, freebsd13_tzsetwall, FBSD_1.0);
__warn_references(tzsetwall,
"warning: tzsetwall() is deprecated, use tzset() instead.");
-
+#endif /* __FreeBSD__ */
static void
gmtcheck(void)
{
@@ -1485,6 +1544,9 @@ gmtcheck(void)
}
unlock();
}
+#ifdef __FreeBSD__
+#define gmtcheck() _once(&gmt_once, gmtcheck)
+#endif
#if NETBSD_INSPIRED
@@ -1652,45 +1714,47 @@ localtime_tzset(time_t const *timep, struct tm *tmp, bool setname)
}
#ifndef DETECT_TZ_CHANGES
if (setname || !lcl_is_set)
-#endif
+#endif /* DETECT_TZ_CHANGES */
tzset_unlocked();
tmp = localsub(lclptr, timep, setname, tmp);
unlock();
return tmp;
}
+#ifdef __FreeBSD__
static void
localtime_key_init(void)
{
-
- localtime_key_error = _pthread_key_create(&localtime_key, free);
+ localtime_key_error = _pthread_key_create(&localtime_key, free);
}
-
+#endif /* __FreeBSD__ */
struct tm *
localtime(const time_t *timep)
{
#if !SUPPORT_C89
- static struct tm tm;
+ static struct tm tm;
#endif
- struct tm *p_tm = &tm;
-
- if (__isthreaded != 0) {
- _pthread_once(&localtime_once, localtime_key_init);
- if (localtime_key_error != 0) {
- errno = localtime_key_error;
- return (NULL);
- }
- if ((p_tm = _pthread_getspecific(localtime_key)) == NULL) {
- if ((p_tm = malloc(sizeof(*p_tm))) == NULL) {
- return (NULL);
- }
- if (_pthread_setspecific(localtime_key, p_tm) != 0) {
- free(p_tm);
- return (NULL);
- }
- }
- }
- return localtime_tzset(timep, p_tm, true);
+#ifdef __FreeBSD__
+ struct tm *p_tm = &tm;
+
+ if (__isthreaded != 0) {
+ _pthread_once(&localtime_once, localtime_key_init);
+ if (localtime_key_error != 0) {
+ errno = localtime_key_error;
+ return (NULL);
+ }
+ if ((p_tm = _pthread_getspecific(localtime_key)) == NULL) {
+ if ((p_tm = malloc(sizeof(*p_tm))) == NULL) {
+ return (NULL);
+ }
+ if (_pthread_setspecific(localtime_key, p_tm) != 0) {
+ free(p_tm);
+ return (NULL);
+ }
+ }
+ }
+#endif /* __FreeBSD__ */
+ return localtime_tzset(timep, p_tm, true);
}
struct tm *
@@ -1729,42 +1793,44 @@ gmtsub(ATTRIBUTE_MAYBE_UNUSED struct state const *sp, time_t const *timep,
struct tm *
gmtime_r(time_t const *restrict timep, struct tm *restrict tmp)
{
- _once(&gmt_once, gmtcheck);
- return gmtsub(gmtptr, timep, 0, tmp);
+ gmtcheck();
+ return gmtsub(gmtptr, timep, 0, tmp);
}
+#ifdef __FreeBSD__
static void
gmtime_key_init(void)
{
-
- gmtime_key_error = _pthread_key_create(&gmtime_key, free);
+ gmtime_key_error = _pthread_key_create(&gmtime_key, free);
}
-
+#endif /* __FreeBSD__ */
struct tm *
gmtime(const time_t *timep)
{
#if !SUPPORT_C89
- static struct tm tm;
+ static struct tm tm;
#endif
- struct tm *p_tm = &tm;
-
- if (__isthreaded != 0) {
- _pthread_once(&gmtime_once, gmtime_key_init);
- if (gmtime_key_error != 0) {
- errno = gmtime_key_error;
- return (NULL);
- }
- if ((p_tm = _pthread_getspecific(gmtime_key)) == NULL) {
- if ((p_tm = malloc(sizeof(*p_tm))) == NULL) {
- return (NULL);
- }
- if (_pthread_setspecific(gmtime_key, p_tm) != 0) {
- free(p_tm);
- return (NULL);
- }
- }
- }
- return gmtime_r(timep, p_tm);
+#ifdef __FreeBSD__
+ struct tm *p_tm = &tm;
+
+ if (__isthreaded != 0) {
+ _pthread_once(&gmtime_once, gmtime_key_init);
+ if (gmtime_key_error != 0) {
+ errno = gmtime_key_error;
+ return (NULL);
+ }
+ if ((p_tm = _pthread_getspecific(gmtime_key)) == NULL) {
+ if ((p_tm = malloc(sizeof(*p_tm))) == NULL) {
+ return (NULL);
+ }
+ if (_pthread_setspecific(gmtime_key, p_tm) != 0) {
+ free(p_tm);
+ return (NULL);
+ }
+ }
+ }
+#endif /* __FreeBSD__ */
+ return gmtime_r(timep, p_tm);
}
#if STD_INSPIRED
@@ -1775,42 +1841,44 @@ gmtime(const time_t *timep)
struct tm *
offtime_r(time_t const *restrict timep, long offset, struct tm *restrict tmp)
{
- _once(&gmt_once, gmtcheck);
- return gmtsub(gmtptr, timep, offset, tmp);
+ gmtcheck();
+ return gmtsub(gmtptr, timep, offset, tmp);
}
+#ifdef __FreeBSD__
static void
offtime_key_init(void)
{
-
- offtime_key_error = _pthread_key_create(&offtime_key, free);
+ offtime_key_error = _pthread_key_create(&offtime_key, free);
}
-
+#endif /* __FreeBSD__ */
struct tm *
offtime(const time_t *timep, long offset)
{
#if !SUPPORT_C89
- static struct tm tm;
+ static struct tm tm;
#endif
- struct tm *p_tm = &tm;
-
- if (__isthreaded != 0) {
- _pthread_once(&offtime_once, offtime_key_init);
- if (offtime_key_error != 0) {
- errno = offtime_key_error;
- return (NULL);
- }
- if ((p_tm = _pthread_getspecific(offtime_key)) == NULL) {
- if ((p_tm = malloc(sizeof(*p_tm))) == NULL) {
- return (NULL);
- }
- if (_pthread_setspecific(offtime_key, p_tm) != 0) {
- free(p_tm);
- return (NULL);
- }
- }
- }
- return offtime_r(timep, offset, p_tm);
+#ifdef __FreeBSD__
+ struct tm *p_tm = &tm;
+
+ if (__isthreaded != 0) {
+ _pthread_once(&offtime_once, offtime_key_init);
+ if (offtime_key_error != 0) {
+ errno = offtime_key_error;
+ return (NULL);
+ }
+ if ((p_tm = _pthread_getspecific(offtime_key)) == NULL) {
+ if ((p_tm = malloc(sizeof(*p_tm))) == NULL) {
+ return (NULL);
+ }
+ if (_pthread_setspecific(offtime_key, p_tm) != 0) {
+ free(p_tm);
+ return (NULL);
+ }
+ }
+ }
+#endif
+ return offtime_r(timep, offset, p_tm);
}
#endif
@@ -2323,7 +2391,6 @@ time1(struct tm *const tmp,
errno = EINVAL;
return WRONG;
}
-
if (tmp->tm_isdst > 1)
tmp->tm_isdst = 1;
t = time2(tmp, funcp, sp, offset, &okay);
@@ -2382,7 +2449,7 @@ mktime_tzname(struct state *sp, struct tm *tmp, bool setname)
if (sp)
return time1(tmp, localsub, sp, setname);
else {
- _once(&gmt_once, gmtcheck);
+ gmtcheck();
return time1(tmp, gmtsub, gmtptr, 0);
}
}
@@ -2438,7 +2505,7 @@ timeoff(struct tm *tmp, long offset)
{
if (tmp)
tmp->tm_isdst = 0;
- _once(&gmt_once, gmtcheck);
+ gmtcheck();
return time1(tmp, gmtsub, gmtptr, offset);
}
@@ -2508,7 +2575,7 @@ time2posix(time_t t)
}
#ifndef DETECT_TZ_CHANGES
if (!lcl_is_set)
-#endif
+#endif /* DETECT_TZ_CHANGES */
tzset_unlocked();
if (lclptr)
t = time2posix_z(lclptr, t);
@@ -2555,7 +2622,7 @@ posix2time(time_t t)
}
#ifndef DETECT_TZ_CHANGES
if (!lcl_is_set)
-#endif
+#endif /* DETECT_TZ_CHANGES */
tzset_unlocked();
if (lclptr)
t = posix2time_z(lclptr, t);
diff --git a/crypto/krb5/src/lib/gssapi/krb5/util_crypt.c b/crypto/krb5/src/lib/gssapi/krb5/util_crypt.c
index 28411429bf6e..386842e8a6e3 100644
--- a/crypto/krb5/src/lib/gssapi/krb5/util_crypt.c
+++ b/crypto/krb5/src/lib/gssapi/krb5/util_crypt.c
@@ -322,12 +322,16 @@ kg_verify_checksum_v3(krb5_context context, krb5_key key, krb5_keyusage usage,
uint8_t ckhdr[16];
krb5_boolean valid;
- /* Compose an RFC 4121 token header with EC and RRC set to 0. */
+ /*
+ * Compose an RFC 4121 token header for the checksum. For a wrap token,
+ * the EC and RRC fields have the value 0 for the checksum operation,
+ * regardless of their values in the actual token (RFC 4121 section 4.2.4).
+ * For a MIC token, the corresponding four bytes have the value 0xFF.
+ */
store_16_be(toktype, ckhdr);
ckhdr[2] = flags;
ckhdr[3] = 0xFF;
- store_16_be(0, ckhdr + 4);
- store_16_be(0, ckhdr + 6);
+ store_32_be((toktype == KG2_TOK_MIC_MSG) ? 0xFFFFFFFF : 0, ckhdr + 4);
store_64_be(seqnum, ckhdr + 8);
/* Verify the checksum over the data and composed header. */
diff --git a/crypto/krb5/src/lib/gssapi/krb5/verify_mic.c b/crypto/krb5/src/lib/gssapi/krb5/verify_mic.c
index 9852f49912a9..1c11d2016fca 100644
--- a/crypto/krb5/src/lib/gssapi/krb5/verify_mic.c
+++ b/crypto/krb5/src/lib/gssapi/krb5/verify_mic.c
@@ -90,7 +90,6 @@ verify_mic_v3(krb5_context context, OM_uint32 *minor_status,
krb5_gss_ctx_id_rec *ctx, struct k5input *in,
gss_buffer_t message)
{
- OM_uint32 status;
krb5_keyusage usage;
krb5_key key;
krb5_cksumtype cksumtype;
@@ -124,12 +123,10 @@ verify_mic_v3(krb5_context context, OM_uint32 *minor_status,
}
assert(key != NULL);
- status = kg_verify_checksum_v3(context, key, usage, cksumtype,
- KG2_TOK_MIC_MSG, flags, seqnum,
- message->value, message->length,
- in->ptr, in->len);
- if (status != GSS_S_COMPLETE)
- return status;
+ if (!kg_verify_checksum_v3(context, key, usage, cksumtype, KG2_TOK_MIC_MSG,
+ flags, seqnum, message->value, message->length,
+ in->ptr, in->len))
+ return GSS_S_BAD_SIG;
return g_seqstate_check(ctx->seqstate, seqnum);
}
diff --git a/crypto/openssl/BSDmakefile b/crypto/openssl/BSDmakefile
new file mode 100644
index 000000000000..bd2bfe0ea033
--- /dev/null
+++ b/crypto/openssl/BSDmakefile
@@ -0,0 +1,99 @@
+# This BSD makefile helps provide a deterministic means of doing a "clean"
+# vendor import of OpenSSL.
+#
+# Recommended use:
+#
+# % make clean
+# % make all
+
+NO_OBJ=
+
+LCRYPTO_SRC= ${SRCTOP}/crypto/openssl
+LCRYPTO_DOC= ${LCRYPTO_SRC}/doc
+
+CAT?= /bin/cat
+MV?= /bin/mv
+PERL?= perl
+
+BN_CONF_H= include/crypto/bn_conf.h
+BN_CONF_H_ORIG= ${BN_CONF_H}.orig
+CONFIGURATION_H= include/openssl/configuration.h
+CONFIGURATION_H_ORIG= ${CONFIGURATION_H}.orig
+
+.PHONY: configure patch all
+.ORDER: configure patch all
+
+configure:
+ @cd ${.CURDIR} && \
+ ${PERL} ./Configure \
+ disable-aria \
+ disable-egd \
+ disable-idea \
+ disable-mdc2 \
+ disable-sm2 \
+ disable-sm3 \
+ disable-sm4 \
+ enable-ec_nistp_64_gcc_128 \
+ enable-ktls \
+ enable-sctp \
+ --openssldir=etc \
+ --prefix=/usr
+ @cd ${.CURDIR} && gmake configdata.pm
+ @cd ${LCRYPTO_SRC} && ${PERL} \
+ ${LCRYPTO_SRC}/freebsd/dump_version_from_configdata.pl > \
+ ${SRCTOP}/secure/lib/libcrypto/Makefile.version
+
+all: patch
+ # Passing `-j ${.MAKE.JOBS}` doesn't work here for some reason.
+ @cd ${.CURDIR} && gmake build_all_generated
+
+ # Clean the pkgconfig files:
+ # 1. Fix --prefix (not sure why configure --prefix isn't honored properly).
+ # 2. Remove duplicate path in CFLAGS.
+ # 3. Remove duplicate path in includedir(s).
+ @find . -name \*.pc -print -exec sed -i '' -E \
+ -e 's,^prefix=.+,prefix=/usr,' \
+ -e 's,[[:space:]]+(\-I)?\$\{prefix\}/\./include[[:space:]]*,,g' \
+ {} +
+
+ @cd ${SRCTOP}/secure/lib/libcrypto && \
+ ${MAKE} cleanasm && \
+ ${MAKE} buildasm
+
+ @rsync -a --delete \
+ --exclude 'Makefile*' --exclude '*.1' \
+ ${LCRYPTO_DOC}/man/ \
+ ${SRCTOP}/secure/lib/libcrypto/man
+
+ @rsync -a --delete \
+ --exclude 'Makefile*' --exclude '*.[357]' \
+ ${LCRYPTO_DOC}/man/man1/ \
+ ${SRCTOP}/secure/usr.bin/openssl/man
+
+
+# This doesn't use standard patching since the generated files can vary
+# depending on the host architecture.
+patch: configure
+ # Spam arch-specific overrides to config files.
+
+ @cd ${.CURDIR} && gmake ${BN_CONF_H} && \
+ ${MV} ${BN_CONF_H} ${BN_CONF_H_ORIG} && \
+ ${CAT} ${BN_CONF_H}.orig \
+ ${LCRYPTO_SRC}/freebsd/${BN_CONF_H} >> \
+ ${BN_CONF_H}
+
+ @cd ${.CURDIR} && \
+ ${MV} ${CONFIGURATION_H} ${CONFIGURATION_H_ORIG} && \
+ ${CAT} ${CONFIGURATION_H_ORIG} \
+ ${LCRYPTO_SRC}/freebsd/${CONFIGURATION_H} >> \
+ ${CONFIGURATION_H}
+
+
+clean: .PHONY
+ @cd ${.CURDIR} && rm -f ${BN_CONF_H_ORIG} ${CONFIGURATION_H_ORIG}
+
+ @cd ${SRCTOP}/secure/lib/libcrypto && ${MAKE} cleanasm
+
+ -@cd ${.CURDIR} && gmake ${.TARGET}
+
+.include <sys.mk>
diff --git a/crypto/openssl/apps/CA.pl b/crypto/openssl/apps/CA.pl
index 70ad231fff04..0861fd7a4da7 100755
--- a/crypto/openssl/apps/CA.pl
+++ b/crypto/openssl/apps/CA.pl
@@ -1,4 +1,4 @@
-#!/usr/local/bin/perl
+#!/usr/bin/env perl
# Copyright 2000-2025 The OpenSSL Project Authors. All Rights Reserved.
#
# Licensed under the Apache License 2.0 (the "License"). You may not use
diff --git a/crypto/openssl/apps/progs.c b/crypto/openssl/apps/progs.c
index 2646a1a35bf3..acc204a3e6e7 100644
--- a/crypto/openssl/apps/progs.c
+++ b/crypto/openssl/apps/progs.c
@@ -2,7 +2,7 @@
* WARNING: do not edit!
* Generated by apps/progs.pl
*
- * Copyright 1995-2023 The OpenSSL Project Authors. All Rights Reserved.
+ * Copyright 1995-2025 The OpenSSL Project Authors. All Rights Reserved.
*
* Licensed under the Apache License 2.0 (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
@@ -89,6 +89,7 @@ FUNCTION functions[] = {
{FT_general, "s_time", s_time_main, s_time_options, NULL, NULL},
#endif
{FT_general, "sess_id", sess_id_main, sess_id_options, NULL, NULL},
+ {FT_general, "skeyutl", skeyutl_main, skeyutl_options, NULL, NULL},
{FT_general, "smime", smime_main, smime_options, NULL, NULL},
{FT_general, "speed", speed_main, speed_options, NULL, NULL},
{FT_general, "spkac", spkac_main, spkac_options, NULL, NULL},
@@ -225,9 +226,15 @@ FUNCTION functions[] = {
{FT_cipher, "camellia-256-ecb", enc_main, enc_options, NULL},
#endif
{FT_cipher, "base64", enc_main, enc_options, NULL},
-#ifdef ZLIB
+#ifndef OPENSSL_NO_ZLIB
{FT_cipher, "zlib", enc_main, enc_options, NULL},
#endif
+#ifndef OPENSSL_NO_BROTLI
+ {FT_cipher, "brotli", enc_main, enc_options, NULL},
+#endif
+#ifndef OPENSSL_NO_ZSTD
+ {FT_cipher, "zstd", enc_main, enc_options, NULL},
+#endif
#ifndef OPENSSL_NO_DES
{FT_cipher, "des", enc_main, enc_options, NULL},
#endif
diff --git a/crypto/openssl/apps/progs.h b/crypto/openssl/apps/progs.h
index 83c829a721bf..1b62ec37dec1 100644
--- a/crypto/openssl/apps/progs.h
+++ b/crypto/openssl/apps/progs.h
@@ -2,7 +2,7 @@
* WARNING: do not edit!
* Generated by apps/progs.pl
*
- * Copyright 1995-2023 The OpenSSL Project Authors. All Rights Reserved.
+ * Copyright 1995-2025 The OpenSSL Project Authors. All Rights Reserved.
*
* Licensed under the Apache License 2.0 (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
@@ -56,6 +56,7 @@ extern int s_client_main(int argc, char *argv[]);
extern int s_server_main(int argc, char *argv[]);
extern int s_time_main(int argc, char *argv[]);
extern int sess_id_main(int argc, char *argv[]);
+extern int skeyutl_main(int argc, char *argv[]);
extern int smime_main(int argc, char *argv[]);
extern int speed_main(int argc, char *argv[]);
extern int spkac_main(int argc, char *argv[]);
@@ -110,6 +111,7 @@ extern const OPTIONS s_client_options[];
extern const OPTIONS s_server_options[];
extern const OPTIONS s_time_options[];
extern const OPTIONS sess_id_options[];
+extern const OPTIONS skeyutl_options[];
extern const OPTIONS smime_options[];
extern const OPTIONS speed_options[];
extern const OPTIONS spkac_options[];
diff --git a/crypto/openssl/configdata.pm b/crypto/openssl/configdata.pm
index b2ea8dcd87ca..ec70eaba9f07 100755
--- a/crypto/openssl/configdata.pm
+++ b/crypto/openssl/configdata.pm
@@ -1,4 +1,4 @@
-#! /usr/local/bin/perl
+#! /usr/bin/env perl
# -*- mode: perl -*-
package configdata;
@@ -21,24 +21,16 @@ our %config = (
"ASFLAGS" => [],
"CC" => "cc",
"CFLAGS" => [
- "-O2 -pipe -fstack-protector-strong -fno-strict-aliasing "
+ "-Wall -O3"
],
- "CPP" => "cpp",
"CPPDEFINES" => [],
- "CPPFLAGS" => [
- ""
- ],
+ "CPPFLAGS" => [],
"CPPINCLUDES" => [],
- "CXX" => "c++",
- "CXXFLAGS" => [
- "-O2 -pipe -fstack-protector-strong -fno-strict-aliasing "
- ],
+ "CXXFLAGS" => [],
"FIPSKEY" => "f4556650ac31d35461610bac4ed81b1a181b2d8a43ea2854cbae22ca74560813",
- "FIPS_VENDOR" => "OpenSSL FIPS Provider",
- "HASHBANGPERL" => "/usr/local/bin/perl",
- "LDFLAGS" => [
- " "
- ],
+ "FIPS_VENDOR" => "OpenSSL non-compliant FIPS Provider",
+ "HASHBANGPERL" => "/usr/bin/env perl",
+ "LDFLAGS" => [],
"LDLIBS" => [],
"OBJCOPY" => "objcopy",
"PERL" => "/usr/local/bin/perl",
@@ -139,7 +131,6 @@ our %config = (
"apps/lib/build.info",
"providers/common/build.info",
"providers/implementations/build.info",
- "providers/fips/build.info",
"doc/man1/build.info",
"ssl/record/methods/build.info",
"providers/common/der/build.info",
@@ -178,6 +169,7 @@ our %config = (
"ex_libs" => [],
"full_version" => "3.5.1",
"includes" => [],
+ "ktls" => "",
"lflags" => [],
"lib_defines" => [
"OPENSSL_PIC"
@@ -192,6 +184,7 @@ our %config = (
"openssl_feature_defines" => [
"OPENSSL_RAND_SEED_OS",
"OPENSSL_THREADS",
+ "OPENSSL_NO_ACVP_TESTS",
"OPENSSL_NO_AFALGENG",
"OPENSSL_NO_ARIA",
"OPENSSL_NO_ASAN",
@@ -203,13 +196,14 @@ our %config = (
"OPENSSL_NO_EGD",
"OPENSSL_NO_EXTERNAL_TESTS",
"OPENSSL_NO_FIPS_JITTER",
+ "OPENSSL_NO_FIPS_POST",
+ "OPENSSL_NO_FIPS_SECURITYCHECKS",
"OPENSSL_NO_FUZZ_AFL",
"OPENSSL_NO_FUZZ_LIBFUZZER",
"OPENSSL_NO_H3DEMO",
"OPENSSL_NO_HQINTEROP",
"OPENSSL_NO_IDEA",
"OPENSSL_NO_JITTER",
- "OPENSSL_NO_KTLS",
"OPENSSL_NO_MD2",
"OPENSSL_NO_MDC2",
"OPENSSL_NO_MSAN",
@@ -222,7 +216,6 @@ our %config = (
"OPENSSL_NO_SSL3_METHOD",
"OPENSSL_NO_SSLKEYLOG",
"OPENSSL_NO_TFO",
- "OPENSSL_NO_TLS_DEPRECATED_EC",
"OPENSSL_NO_TRACE",
"OPENSSL_NO_UBSAN",
"OPENSSL_NO_UNIT_TEST",
@@ -235,30 +228,26 @@ our %config = (
"OPENSSL_NO_ZSTD_DYNAMIC",
"OPENSSL_NO_STATIC_ENGINE"
],
- "openssl_other_defines" => [
- "OPENSSL_NO_KTLS"
- ],
"openssl_sys_defines" => [],
- "openssldir" => "/usr/local/openssl",
- "options" => "--openssldir=/usr/local/openssl --prefix=/usr/local enable-ec_nistp_64_gcc_128 enable-fips enable-sctp no-afalgeng no-aria no-asan no-brotli no-brotli-dynamic no-buildtest-c++ no-crypto-mdebug no-crypto-mdebug-backtrace no-demos no-egd no-external-tests no-fips-jitter no-fuzz-afl no-fuzz-libfuzzer no-h3demo no-hqinterop no-idea no-jitter no-ktls no-legacy no-md2 no-mdc2 no-msan no-pie no-rc5 no-sm2 no-sm3 no-sm4 no-ssl3 no-ssl3-method no-sslkeylog no-tfo no-tls-deprecated-ec no-trace no-ubsan no-unit-test no-uplink no-weak-ssl-ciphers no-winstore no-zlib no-zlib-dynamic no-zstd no-zstd-dynamic",
+ "openssldir" => "etc",
+ "options" => "enable-ec_nistp_64_gcc_128 enable-ktls enable-sctp --openssldir=etc --prefix=/usr no-acvp-tests no-afalgeng no-aria no-asan no-brotli no-brotli-dynamic no-buildtest-c++ no-crypto-mdebug no-crypto-mdebug-backtrace no-demos no-egd no-external-tests no-fips no-fips-jitter no-fips-post no-fips-securitychecks no-fuzz-afl no-fuzz-libfuzzer no-h3demo no-hqinterop no-idea no-jitter no-md2 no-mdc2 no-msan no-pie no-rc5 no-sm2 no-sm3 no-sm4 no-ssl3 no-ssl3-method no-sslkeylog no-tfo no-trace no-ubsan no-unit-test no-uplink no-weak-ssl-ciphers no-winstore no-zlib no-zlib-dynamic no-zstd no-zstd-dynamic",
"patch" => "1",
"perl_archname" => "amd64-freebsd-thread-multi",
"perl_cmd" => "/usr/local/bin/perl",
"perl_version" => "5.40.2",
"perlargv" => [
- "--openssldir=/usr/local/openssl",
- "--prefix=/usr/local",
- "no-aria",
+ "disable-aria",
+ "disable-egd",
+ "disable-idea",
+ "disable-mdc2",
+ "disable-sm2",
+ "disable-sm3",
+ "disable-sm4",
"enable-ec_nistp_64_gcc_128",
- "enable-fips",
- "no-idea",
- "no-legacy",
- "no-mdc2",
+ "enable-ktls",
"enable-sctp",
- "no-sm2",
- "no-sm3",
- "no-sm4",
- "no-tls-deprecated-ec"
+ "--openssldir=etc",
+ "--prefix=/usr"
],
"perlenv" => {
"AR" => undef,
@@ -266,24 +255,24 @@ our %config = (
"AS" => undef,
"ASFLAGS" => undef,
"BUILDFILE" => undef,
- "CC" => "cc",
- "CFLAGS" => "-O2 -pipe -fstack-protector-strong -fno-strict-aliasing ",
- "CPP" => "cpp",
+ "CC" => undef,
+ "CFLAGS" => undef,
+ "CPP" => undef,
"CPPDEFINES" => undef,
- "CPPFLAGS" => "",
+ "CPPFLAGS" => undef,
"CPPINCLUDES" => undef,
"CROSS_COMPILE" => undef,
- "CXX" => "c++",
- "CXXFLAGS" => "-O2 -pipe -fstack-protector-strong -fno-strict-aliasing ",
+ "CXX" => undef,
+ "CXXFLAGS" => undef,
"HASHBANGPERL" => undef,
"LD" => undef,
- "LDFLAGS" => " ",
+ "LDFLAGS" => undef,
"LDLIBS" => undef,
"MT" => undef,
"MTFLAGS" => undef,
"OBJCOPY" => undef,
"OPENSSL_LOCAL_CONFIG_DIR" => undef,
- "PERL" => "/usr/local/bin/perl",
+ "PERL" => undef,
"RANLIB" => undef,
"RC" => undef,
"RCFLAGS" => undef,
@@ -297,12 +286,12 @@ our %config = (
"__CNF_LDFLAGS" => undef,
"__CNF_LDLIBS" => undef
},
- "prefix" => "/usr/local",
+ "prefix" => "/usr",
"prerelease" => "",
"processor" => "",
"rc4_int" => "unsigned int",
"release_date" => "1 Jul 2025",
- "shlib_version" => "17",
+ "shlib_version" => "3",
"sourcedir" => ".",
"target" => "BSD-x86_64",
"version" => "3.5.1"
@@ -521,6 +510,7 @@ our @disablables_int = (
"crmf"
);
our %disabled = (
+ "acvp-tests" => "cascade",
"afalgeng" => "not-linux",
"aria" => "option",
"asan" => "default",
@@ -530,17 +520,18 @@ our %disabled = (
"crypto-mdebug" => "default",
"crypto-mdebug-backtrace" => "default",
"demos" => "default",
- "egd" => "default",
+ "egd" => "option",
"external-tests" => "default",
+ "fips" => "default",
"fips-jitter" => "default",
+ "fips-post" => "cascade",
+ "fips-securitychecks" => "cascade",
"fuzz-afl" => "default",
"fuzz-libfuzzer" => "default",
"h3demo" => "default",
"hqinterop" => "default",
"idea" => "option",
"jitter" => "default",
- "ktls" => "default",
- "legacy" => "option",
"md2" => "default",
"mdc2" => "option",
"msan" => "default",
@@ -553,7 +544,6 @@ our %disabled = (
"ssl3-method" => "default",
"sslkeylog" => "default",
"tfo" => "default",
- "tls-deprecated-ec" => "option",
"trace" => "default",
"ubsan" => "default",
"unit-test" => "default",
@@ -880,7 +870,7 @@ our %unified_info = (
"providers/libdefault.a" => {
"noinst" => "1"
},
- "providers/libfips.a" => {
+ "providers/liblegacy.a" => {
"noinst" => "1"
},
"providers/libtemplate.a" => {
@@ -912,9 +902,6 @@ our %unified_info = (
"engines/padlock" => {
"engine" => "1"
},
- "providers/fips" => {
- "fips" => "1"
- },
"test/p_minimal" => {
"noinst" => "1"
},
@@ -1016,9 +1003,6 @@ our %unified_info = (
"test/aborttest" => {
"noinst" => "1"
},
- "test/acvp_test" => {
- "noinst" => "1"
- },
"test/aesgcmtest" => {
"noinst" => "1"
},
@@ -1142,6 +1126,9 @@ our %unified_info = (
"test/buildtest_c_conf_api" => {
"noinst" => "1"
},
+ "test/buildtest_c_configuration" => {
+ "noinst" => "1"
+ },
"test/buildtest_c_conftypes" => {
"noinst" => "1"
},
@@ -1996,9 +1983,6 @@ our %unified_info = (
"libssl" => [
"AES_ASM"
],
- "providers/fips" => [
- "FIPS_MODULE"
- ],
"providers/legacy" => [
"OPENSSL_CPUID_OBJ"
],
@@ -2053,8 +2037,11 @@ our %unified_info = (
"VPAES_ASM",
"X25519_ASM"
],
- "test/evp_test" => [
- "NO_LEGACY_MODULE"
+ "test/endecode_test" => [
+ "STATIC_LEGACY"
+ ],
+ "test/evp_extra_test" => [
+ "STATIC_LEGACY"
],
"test/provider_internal_test" => [
"PROVIDER_INIT_FUNCTION_NAME=p_test_init"
@@ -2296,9 +2283,6 @@ our %unified_info = (
"apps/progs.h" => [
"apps/progs.c"
],
- "build_modules_nodep" => [
- "providers/fipsmodule.cnf"
- ],
"crypto/aes/aes-586.S" => [
"crypto/perlasm/x86asm.pl"
],
@@ -8214,10 +8198,6 @@ our %unified_info = (
"providers/common/include/prov/der_digests.h",
"providers/common/include/prov/der_rsa.h"
],
- "providers/common/der/libfips-lib-der_rsa_sig.o" => [
- "providers/common/include/prov/der_digests.h",
- "providers/common/include/prov/der_rsa.h"
- ],
"providers/common/include/prov/der_digests.h" => [
"providers/common/der/DIGESTS.asn1",
"providers/common/der/NIST.asn1",
@@ -8252,21 +8232,12 @@ our %unified_info = (
"providers/common/der/oids_to_c.pm",
"providers/common/der/wrap.asn1"
],
- "providers/fips" => [
- "providers/libfips.a"
- ],
- "providers/fipsmodule.cnf" => [
- "providers/fips"
- ],
"providers/implementations/encode_decode/libdefault-lib-encode_key2any.o" => [
"providers/common/include/prov/der_rsa.h"
],
"providers/implementations/kdfs/libdefault-lib-x942kdf.o" => [
"providers/common/include/prov/der_wrap.h"
],
- "providers/implementations/kdfs/libfips-lib-x942kdf.o" => [
- "providers/common/include/prov/der_wrap.h"
- ],
"providers/implementations/signature/libdefault-lib-dsa_sig.o" => [
"providers/common/include/prov/der_dsa.h"
],
@@ -8285,27 +8256,13 @@ our %unified_info = (
"providers/implementations/signature/libdefault-lib-slh_dsa_sig.o" => [
"providers/common/include/prov/der_slh_dsa.h"
],
- "providers/implementations/signature/libfips-lib-dsa_sig.o" => [
- "providers/common/include/prov/der_dsa.h"
- ],
- "providers/implementations/signature/libfips-lib-ecdsa_sig.o" => [
- "providers/common/include/prov/der_ec.h"
- ],
- "providers/implementations/signature/libfips-lib-eddsa_sig.o" => [
- "providers/common/include/prov/der_ecx.h"
- ],
- "providers/implementations/signature/libfips-lib-ml_dsa_sig.o" => [
- "providers/common/include/prov/der_ml_dsa.h"
- ],
- "providers/implementations/signature/libfips-lib-rsa_sig.o" => [
- "providers/common/include/prov/der_rsa.h"
- ],
- "providers/implementations/signature/libfips-lib-slh_dsa_sig.o" => [
- "providers/common/include/prov/der_slh_dsa.h"
- ],
"providers/implementations/signature/sm2_sig.o" => [
"providers/common/include/prov/der_sm2.h"
],
+ "providers/legacy" => [
+ "libcrypto",
+ "providers/liblegacy.a"
+ ],
"providers/libcommon.a" => [
"libcrypto"
],
@@ -8318,10 +8275,6 @@ our %unified_info = (
"test/aborttest" => [
"libcrypto"
],
- "test/acvp_test" => [
- "libcrypto.a",
- "test/libtestutil.a"
- ],
"test/aesgcmtest" => [
"libcrypto",
"test/libtestutil.a"
@@ -8487,6 +8440,10 @@ our %unified_info = (
"libcrypto",
"libssl"
],
+ "test/buildtest_c_configuration" => [
+ "libcrypto",
+ "libssl"
+ ],
"test/buildtest_c_conftypes" => [
"libcrypto",
"libssl"
@@ -8917,6 +8874,8 @@ our %unified_info = (
],
"test/endecode_test" => [
"libcrypto.a",
+ "providers/libcommon.a",
+ "providers/liblegacy.a",
"test/libtestutil.a"
],
"test/endecoder_legacy_test" => [
@@ -8937,6 +8896,8 @@ our %unified_info = (
],
"test/evp_extra_test" => [
"libcrypto.a",
+ "providers/libcommon.a",
+ "providers/liblegacy.a",
"test/libtestutil.a"
],
"test/evp_extra_test2" => [
@@ -9676,6 +9637,9 @@ our %unified_info = (
"crypto/packettest-bin-quic_vlint.o",
"crypto/tls13secretstest-bin-packet.o",
"crypto/tls13secretstest-bin-quic_vlint.o",
+ "crypto/legacy-dso-cpuid.o",
+ "crypto/legacy-dso-ctype.o",
+ "crypto/legacy-dso-x86_64cpuid.o",
"crypto/libcrypto-lib-asn1_dsa.o",
"crypto/libcrypto-lib-bsearch.o",
"crypto/libcrypto-lib-comp_methods.o",
@@ -9792,37 +9756,7 @@ our %unified_info = (
"crypto/libssl-shlib-getenv.o",
"crypto/libssl-shlib-packet.o",
"crypto/libssl-shlib-quic_vlint.o",
- "crypto/libssl-shlib-time.o",
- "crypto/libfips-lib-asn1_dsa.o",
- "crypto/libfips-lib-bsearch.o",
- "crypto/libfips-lib-context.o",
- "crypto/libfips-lib-core_algorithm.o",
- "crypto/libfips-lib-core_fetch.o",
- "crypto/libfips-lib-core_namemap.o",
- "crypto/libfips-lib-cpuid.o",
- "crypto/libfips-lib-cryptlib.o",
- "crypto/libfips-lib-ctype.o",
- "crypto/libfips-lib-der_writer.o",
- "crypto/libfips-lib-ex_data.o",
- "crypto/libfips-lib-initthread.o",
- "crypto/libfips-lib-o_str.o",
- "crypto/libfips-lib-packet.o",
- "crypto/libfips-lib-param_build.o",
- "crypto/libfips-lib-param_build_set.o",
- "crypto/libfips-lib-params.o",
- "crypto/libfips-lib-params_dup.o",
- "crypto/libfips-lib-params_from_text.o",
- "crypto/libfips-lib-params_idx.o",
- "crypto/libfips-lib-provider_core.o",
- "crypto/libfips-lib-provider_predefined.o",
- "crypto/libfips-lib-self_test_core.o",
- "crypto/libfips-lib-sparse_array.o",
- "crypto/libfips-lib-threads_lib.o",
- "crypto/libfips-lib-threads_none.o",
- "crypto/libfips-lib-threads_pthread.o",
- "crypto/libfips-lib-threads_win.o",
- "crypto/libfips-lib-time.o",
- "crypto/libfips-lib-x86_64cpuid.o"
+ "crypto/libssl-shlib-time.o"
],
"products" => {
"bin" => [
@@ -9831,10 +9765,12 @@ our %unified_info = (
"test/packettest",
"test/tls13secretstest"
],
+ "dso" => [
+ "providers/legacy"
+ ],
"lib" => [
"libcrypto",
- "libssl",
- "providers/libfips.a"
+ "libssl"
]
}
},
@@ -9867,22 +9803,11 @@ our %unified_info = (
"crypto/aes/libcrypto-shlib-aesni-x86_64.o",
"crypto/aes/libcrypto-shlib-aesni-xts-avx512.o",
"crypto/aes/libcrypto-shlib-bsaes-x86_64.o",
- "crypto/aes/libcrypto-shlib-vpaes-x86_64.o",
- "crypto/aes/libfips-lib-aes-x86_64.o",
- "crypto/aes/libfips-lib-aes_ecb.o",
- "crypto/aes/libfips-lib-aes_misc.o",
- "crypto/aes/libfips-lib-aesni-mb-x86_64.o",
- "crypto/aes/libfips-lib-aesni-sha1-x86_64.o",
- "crypto/aes/libfips-lib-aesni-sha256-x86_64.o",
- "crypto/aes/libfips-lib-aesni-x86_64.o",
- "crypto/aes/libfips-lib-aesni-xts-avx512.o",
- "crypto/aes/libfips-lib-bsaes-x86_64.o",
- "crypto/aes/libfips-lib-vpaes-x86_64.o"
+ "crypto/aes/libcrypto-shlib-vpaes-x86_64.o"
],
"products" => {
"lib" => [
- "libcrypto",
- "providers/libfips.a"
+ "libcrypto"
]
}
},
@@ -10234,65 +10159,22 @@ our %unified_info = (
"crypto/bn/libcrypto-shlib-rsaz_exp_x2.o",
"crypto/bn/libcrypto-shlib-x86_64-gf2m.o",
"crypto/bn/libcrypto-shlib-x86_64-mont.o",
- "crypto/bn/libcrypto-shlib-x86_64-mont5.o",
- "crypto/bn/libfips-lib-bn_add.o",
- "crypto/bn/libfips-lib-bn_blind.o",
- "crypto/bn/libfips-lib-bn_const.o",
- "crypto/bn/libfips-lib-bn_conv.o",
- "crypto/bn/libfips-lib-bn_ctx.o",
- "crypto/bn/libfips-lib-bn_dh.o",
- "crypto/bn/libfips-lib-bn_div.o",
- "crypto/bn/libfips-lib-bn_exp.o",
- "crypto/bn/libfips-lib-bn_exp2.o",
- "crypto/bn/libfips-lib-bn_gcd.o",
- "crypto/bn/libfips-lib-bn_gf2m.o",
- "crypto/bn/libfips-lib-bn_intern.o",
- "crypto/bn/libfips-lib-bn_kron.o",
- "crypto/bn/libfips-lib-bn_lib.o",
- "crypto/bn/libfips-lib-bn_mod.o",
- "crypto/bn/libfips-lib-bn_mont.o",
- "crypto/bn/libfips-lib-bn_mpi.o",
- "crypto/bn/libfips-lib-bn_mul.o",
- "crypto/bn/libfips-lib-bn_nist.o",
- "crypto/bn/libfips-lib-bn_prime.o",
- "crypto/bn/libfips-lib-bn_rand.o",
- "crypto/bn/libfips-lib-bn_recp.o",
- "crypto/bn/libfips-lib-bn_rsa_fips186_4.o",
- "crypto/bn/libfips-lib-bn_shift.o",
- "crypto/bn/libfips-lib-bn_sqr.o",
- "crypto/bn/libfips-lib-bn_sqrt.o",
- "crypto/bn/libfips-lib-bn_word.o",
- "crypto/bn/libfips-lib-rsaz-2k-avx512.o",
- "crypto/bn/libfips-lib-rsaz-2k-avxifma.o",
- "crypto/bn/libfips-lib-rsaz-3k-avx512.o",
- "crypto/bn/libfips-lib-rsaz-3k-avxifma.o",
- "crypto/bn/libfips-lib-rsaz-4k-avx512.o",
- "crypto/bn/libfips-lib-rsaz-4k-avxifma.o",
- "crypto/bn/libfips-lib-rsaz-avx2.o",
- "crypto/bn/libfips-lib-rsaz-x86_64.o",
- "crypto/bn/libfips-lib-rsaz_exp.o",
- "crypto/bn/libfips-lib-rsaz_exp_x2.o",
- "crypto/bn/libfips-lib-x86_64-gf2m.o",
- "crypto/bn/libfips-lib-x86_64-mont.o",
- "crypto/bn/libfips-lib-x86_64-mont5.o"
+ "crypto/bn/libcrypto-shlib-x86_64-mont5.o"
],
"products" => {
"lib" => [
- "libcrypto",
- "providers/libfips.a"
+ "libcrypto"
]
}
},
"crypto/bn/asm" => {
"deps" => [
"crypto/bn/asm/libcrypto-lib-x86_64-gcc.o",
- "crypto/bn/asm/libcrypto-shlib-x86_64-gcc.o",
- "crypto/bn/asm/libfips-lib-x86_64-gcc.o"
+ "crypto/bn/asm/libcrypto-shlib-x86_64-gcc.o"
],
"products" => {
"lib" => [
- "libcrypto",
- "providers/libfips.a"
+ "libcrypto"
]
}
},
@@ -10301,13 +10183,11 @@ our %unified_info = (
"crypto/buffer/libcrypto-lib-buf_err.o",
"crypto/buffer/libcrypto-lib-buffer.o",
"crypto/buffer/libcrypto-shlib-buf_err.o",
- "crypto/buffer/libcrypto-shlib-buffer.o",
- "crypto/buffer/libfips-lib-buffer.o"
+ "crypto/buffer/libcrypto-shlib-buffer.o"
],
"products" => {
"lib" => [
- "libcrypto",
- "providers/libfips.a"
+ "libcrypto"
]
}
},
@@ -10365,13 +10245,11 @@ our %unified_info = (
"crypto/cmac" => {
"deps" => [
"crypto/cmac/libcrypto-lib-cmac.o",
- "crypto/cmac/libcrypto-shlib-cmac.o",
- "crypto/cmac/libfips-lib-cmac.o"
+ "crypto/cmac/libcrypto-shlib-cmac.o"
],
"products" => {
"lib" => [
- "libcrypto",
- "providers/libfips.a"
+ "libcrypto"
]
}
},
@@ -10583,15 +10461,13 @@ our %unified_info = (
"crypto/des/libcrypto-shlib-set_key.o",
"crypto/des/libcrypto-shlib-str2key.o",
"crypto/des/libcrypto-shlib-xcbc_enc.o",
- "crypto/des/libfips-lib-des_enc.o",
- "crypto/des/libfips-lib-ecb3_enc.o",
- "crypto/des/libfips-lib-fcrypt_b.o",
- "crypto/des/libfips-lib-set_key.o"
+ "crypto/des/liblegacy-lib-des_enc.o",
+ "crypto/des/liblegacy-lib-fcrypt_b.o"
],
"products" => {
"lib" => [
"libcrypto",
- "providers/libfips.a"
+ "providers/liblegacy.a"
]
}
},
@@ -10626,19 +10502,11 @@ our %unified_info = (
"crypto/dh/libcrypto-shlib-dh_meth.o",
"crypto/dh/libcrypto-shlib-dh_pmeth.o",
"crypto/dh/libcrypto-shlib-dh_prn.o",
- "crypto/dh/libcrypto-shlib-dh_rfc5114.o",
- "crypto/dh/libfips-lib-dh_backend.o",
- "crypto/dh/libfips-lib-dh_check.o",
- "crypto/dh/libfips-lib-dh_gen.o",
- "crypto/dh/libfips-lib-dh_group_params.o",
- "crypto/dh/libfips-lib-dh_kdf.o",
- "crypto/dh/libfips-lib-dh_key.o",
- "crypto/dh/libfips-lib-dh_lib.o"
+ "crypto/dh/libcrypto-shlib-dh_rfc5114.o"
],
"products" => {
"lib" => [
- "libcrypto",
- "providers/libfips.a"
+ "libcrypto"
]
}
},
@@ -10673,20 +10541,11 @@ our %unified_info = (
"crypto/dsa/libcrypto-shlib-dsa_pmeth.o",
"crypto/dsa/libcrypto-shlib-dsa_prn.o",
"crypto/dsa/libcrypto-shlib-dsa_sign.o",
- "crypto/dsa/libcrypto-shlib-dsa_vrf.o",
- "crypto/dsa/libfips-lib-dsa_backend.o",
- "crypto/dsa/libfips-lib-dsa_check.o",
- "crypto/dsa/libfips-lib-dsa_gen.o",
- "crypto/dsa/libfips-lib-dsa_key.o",
- "crypto/dsa/libfips-lib-dsa_lib.o",
- "crypto/dsa/libfips-lib-dsa_ossl.o",
- "crypto/dsa/libfips-lib-dsa_sign.o",
- "crypto/dsa/libfips-lib-dsa_vrf.o"
+ "crypto/dsa/libcrypto-shlib-dsa_vrf.o"
],
"products" => {
"lib" => [
- "libcrypto",
- "providers/libfips.a"
+ "libcrypto"
]
}
},
@@ -10792,44 +10651,11 @@ our %unified_info = (
"crypto/ec/libcrypto-shlib-ecx_backend.o",
"crypto/ec/libcrypto-shlib-ecx_key.o",
"crypto/ec/libcrypto-shlib-ecx_meth.o",
- "crypto/ec/libcrypto-shlib-x25519-x86_64.o",
- "crypto/ec/libfips-lib-curve25519.o",
- "crypto/ec/libfips-lib-ec2_oct.o",
- "crypto/ec/libfips-lib-ec2_smpl.o",
- "crypto/ec/libfips-lib-ec_asn1.o",
- "crypto/ec/libfips-lib-ec_backend.o",
- "crypto/ec/libfips-lib-ec_check.o",
- "crypto/ec/libfips-lib-ec_curve.o",
- "crypto/ec/libfips-lib-ec_cvt.o",
- "crypto/ec/libfips-lib-ec_key.o",
- "crypto/ec/libfips-lib-ec_kmeth.o",
- "crypto/ec/libfips-lib-ec_lib.o",
- "crypto/ec/libfips-lib-ec_mult.o",
- "crypto/ec/libfips-lib-ec_oct.o",
- "crypto/ec/libfips-lib-ecdh_kdf.o",
- "crypto/ec/libfips-lib-ecdh_ossl.o",
- "crypto/ec/libfips-lib-ecdsa_ossl.o",
- "crypto/ec/libfips-lib-ecdsa_sign.o",
- "crypto/ec/libfips-lib-ecdsa_vrf.o",
- "crypto/ec/libfips-lib-ecp_mont.o",
- "crypto/ec/libfips-lib-ecp_nist.o",
- "crypto/ec/libfips-lib-ecp_nistp224.o",
- "crypto/ec/libfips-lib-ecp_nistp256.o",
- "crypto/ec/libfips-lib-ecp_nistp384.o",
- "crypto/ec/libfips-lib-ecp_nistp521.o",
- "crypto/ec/libfips-lib-ecp_nistputil.o",
- "crypto/ec/libfips-lib-ecp_nistz256-x86_64.o",
- "crypto/ec/libfips-lib-ecp_nistz256.o",
- "crypto/ec/libfips-lib-ecp_oct.o",
- "crypto/ec/libfips-lib-ecp_smpl.o",
- "crypto/ec/libfips-lib-ecx_backend.o",
- "crypto/ec/libfips-lib-ecx_key.o",
- "crypto/ec/libfips-lib-x25519-x86_64.o"
+ "crypto/ec/libcrypto-shlib-x25519-x86_64.o"
],
"products" => {
"lib" => [
- "libcrypto",
- "providers/libfips.a"
+ "libcrypto"
]
}
},
@@ -10844,43 +10670,33 @@ our %unified_info = (
"crypto/ec/curve448/libcrypto-shlib-curve448_tables.o",
"crypto/ec/curve448/libcrypto-shlib-eddsa.o",
"crypto/ec/curve448/libcrypto-shlib-f_generic.o",
- "crypto/ec/curve448/libcrypto-shlib-scalar.o",
- "crypto/ec/curve448/libfips-lib-curve448.o",
- "crypto/ec/curve448/libfips-lib-curve448_tables.o",
- "crypto/ec/curve448/libfips-lib-eddsa.o",
- "crypto/ec/curve448/libfips-lib-f_generic.o",
- "crypto/ec/curve448/libfips-lib-scalar.o"
+ "crypto/ec/curve448/libcrypto-shlib-scalar.o"
],
"products" => {
"lib" => [
- "libcrypto",
- "providers/libfips.a"
+ "libcrypto"
]
}
},
"crypto/ec/curve448/arch_32" => {
"deps" => [
"crypto/ec/curve448/arch_32/libcrypto-lib-f_impl32.o",
- "crypto/ec/curve448/arch_32/libcrypto-shlib-f_impl32.o",
- "crypto/ec/curve448/arch_32/libfips-lib-f_impl32.o"
+ "crypto/ec/curve448/arch_32/libcrypto-shlib-f_impl32.o"
],
"products" => {
"lib" => [
- "libcrypto",
- "providers/libfips.a"
+ "libcrypto"
]
}
},
"crypto/ec/curve448/arch_64" => {
"deps" => [
"crypto/ec/curve448/arch_64/libcrypto-lib-f_impl64.o",
- "crypto/ec/curve448/arch_64/libcrypto-shlib-f_impl64.o",
- "crypto/ec/curve448/arch_64/libfips-lib-f_impl64.o"
+ "crypto/ec/curve448/arch_64/libcrypto-shlib-f_impl64.o"
],
"products" => {
"lib" => [
- "libcrypto",
- "providers/libfips.a"
+ "libcrypto"
]
}
},
@@ -11163,36 +10979,11 @@ our %unified_info = (
"crypto/evp/libcrypto-shlib-pmeth_lib.o",
"crypto/evp/libcrypto-shlib-s_lib.o",
"crypto/evp/libcrypto-shlib-signature.o",
- "crypto/evp/libcrypto-shlib-skeymgmt_meth.o",
- "crypto/evp/libfips-lib-asymcipher.o",
- "crypto/evp/libfips-lib-dh_support.o",
- "crypto/evp/libfips-lib-digest.o",
- "crypto/evp/libfips-lib-ec_support.o",
- "crypto/evp/libfips-lib-evp_enc.o",
- "crypto/evp/libfips-lib-evp_fetch.o",
- "crypto/evp/libfips-lib-evp_lib.o",
- "crypto/evp/libfips-lib-evp_rand.o",
- "crypto/evp/libfips-lib-evp_utils.o",
- "crypto/evp/libfips-lib-exchange.o",
- "crypto/evp/libfips-lib-kdf_lib.o",
- "crypto/evp/libfips-lib-kdf_meth.o",
- "crypto/evp/libfips-lib-kem.o",
- "crypto/evp/libfips-lib-keymgmt_lib.o",
- "crypto/evp/libfips-lib-keymgmt_meth.o",
- "crypto/evp/libfips-lib-mac_lib.o",
- "crypto/evp/libfips-lib-mac_meth.o",
- "crypto/evp/libfips-lib-p_lib.o",
- "crypto/evp/libfips-lib-pmeth_check.o",
- "crypto/evp/libfips-lib-pmeth_gn.o",
- "crypto/evp/libfips-lib-pmeth_lib.o",
- "crypto/evp/libfips-lib-s_lib.o",
- "crypto/evp/libfips-lib-signature.o",
- "crypto/evp/libfips-lib-skeymgmt_meth.o"
+ "crypto/evp/libcrypto-shlib-skeymgmt_meth.o"
],
"products" => {
"lib" => [
- "libcrypto",
- "providers/libfips.a"
+ "libcrypto"
]
}
},
@@ -11211,19 +11002,11 @@ our %unified_info = (
"crypto/ffc/libcrypto-shlib-ffc_key_validate.o",
"crypto/ffc/libcrypto-shlib-ffc_params.o",
"crypto/ffc/libcrypto-shlib-ffc_params_generate.o",
- "crypto/ffc/libcrypto-shlib-ffc_params_validate.o",
- "crypto/ffc/libfips-lib-ffc_backend.o",
- "crypto/ffc/libfips-lib-ffc_dh.o",
- "crypto/ffc/libfips-lib-ffc_key_generate.o",
- "crypto/ffc/libfips-lib-ffc_key_validate.o",
- "crypto/ffc/libfips-lib-ffc_params.o",
- "crypto/ffc/libfips-lib-ffc_params_generate.o",
- "crypto/ffc/libfips-lib-ffc_params_validate.o"
+ "crypto/ffc/libcrypto-shlib-ffc_params_validate.o"
],
"products" => {
"lib" => [
- "libcrypto",
- "providers/libfips.a"
+ "libcrypto"
]
}
},
@@ -11233,28 +11016,23 @@ our %unified_info = (
"crypto/hashtable/libcrypto-lib-hashtable.o",
"crypto/hashtable/libcrypto-shlib-hashfunc.o",
"crypto/hashtable/libcrypto-shlib-hashtable.o",
- "crypto/hashtable/libssl-shlib-hashfunc.o",
- "crypto/hashtable/libfips-lib-hashfunc.o",
- "crypto/hashtable/libfips-lib-hashtable.o"
+ "crypto/hashtable/libssl-shlib-hashfunc.o"
],
"products" => {
"lib" => [
"libcrypto",
- "libssl",
- "providers/libfips.a"
+ "libssl"
]
}
},
"crypto/hmac" => {
"deps" => [
"crypto/hmac/libcrypto-lib-hmac.o",
- "crypto/hmac/libcrypto-shlib-hmac.o",
- "crypto/hmac/libfips-lib-hmac.o"
+ "crypto/hmac/libcrypto-shlib-hmac.o"
],
"products" => {
"lib" => [
- "libcrypto",
- "providers/libfips.a"
+ "libcrypto"
]
}
},
@@ -11302,13 +11080,11 @@ our %unified_info = (
"crypto/lhash/libcrypto-lib-lh_stats.o",
"crypto/lhash/libcrypto-lib-lhash.o",
"crypto/lhash/libcrypto-shlib-lh_stats.o",
- "crypto/lhash/libcrypto-shlib-lhash.o",
- "crypto/lhash/libfips-lib-lhash.o"
+ "crypto/lhash/libcrypto-shlib-lhash.o"
],
"products" => {
"lib" => [
- "libcrypto",
- "providers/libfips.a"
+ "libcrypto"
]
}
},
@@ -11334,11 +11110,16 @@ our %unified_info = (
"crypto/md5/libcrypto-shlib-md5-x86_64.o",
"crypto/md5/libcrypto-shlib-md5_dgst.o",
"crypto/md5/libcrypto-shlib-md5_one.o",
- "crypto/md5/libcrypto-shlib-md5_sha1.o"
+ "crypto/md5/libcrypto-shlib-md5_sha1.o",
+ "crypto/md5/liblegacy-lib-md5-x86_64.o",
+ "crypto/md5/liblegacy-lib-md5_dgst.o",
+ "crypto/md5/liblegacy-lib-md5_one.o",
+ "crypto/md5/liblegacy-lib-md5_sha1.o"
],
"products" => {
"lib" => [
- "libcrypto"
+ "libcrypto",
+ "providers/liblegacy.a"
]
}
},
@@ -11359,33 +11140,22 @@ our %unified_info = (
"crypto/ml_dsa/libcrypto-shlib-ml_dsa_ntt.o",
"crypto/ml_dsa/libcrypto-shlib-ml_dsa_params.o",
"crypto/ml_dsa/libcrypto-shlib-ml_dsa_sample.o",
- "crypto/ml_dsa/libcrypto-shlib-ml_dsa_sign.o",
- "crypto/ml_dsa/libfips-lib-ml_dsa_encoders.o",
- "crypto/ml_dsa/libfips-lib-ml_dsa_key.o",
- "crypto/ml_dsa/libfips-lib-ml_dsa_key_compress.o",
- "crypto/ml_dsa/libfips-lib-ml_dsa_matrix.o",
- "crypto/ml_dsa/libfips-lib-ml_dsa_ntt.o",
- "crypto/ml_dsa/libfips-lib-ml_dsa_params.o",
- "crypto/ml_dsa/libfips-lib-ml_dsa_sample.o",
- "crypto/ml_dsa/libfips-lib-ml_dsa_sign.o"
+ "crypto/ml_dsa/libcrypto-shlib-ml_dsa_sign.o"
],
"products" => {
"lib" => [
- "libcrypto",
- "providers/libfips.a"
+ "libcrypto"
]
}
},
"crypto/ml_kem" => {
"deps" => [
"crypto/ml_kem/libcrypto-lib-ml_kem.o",
- "crypto/ml_kem/libcrypto-shlib-ml_kem.o",
- "crypto/ml_kem/libfips-lib-ml_kem.o"
+ "crypto/ml_kem/libcrypto-shlib-ml_kem.o"
],
"products" => {
"lib" => [
- "libcrypto",
- "providers/libfips.a"
+ "libcrypto"
]
}
},
@@ -11420,24 +11190,11 @@ our %unified_info = (
"crypto/modes/libcrypto-shlib-siv128.o",
"crypto/modes/libcrypto-shlib-wrap128.o",
"crypto/modes/libcrypto-shlib-xts128.o",
- "crypto/modes/libcrypto-shlib-xts128gb.o",
- "crypto/modes/libfips-lib-aes-gcm-avx512.o",
- "crypto/modes/libfips-lib-aesni-gcm-x86_64.o",
- "crypto/modes/libfips-lib-cbc128.o",
- "crypto/modes/libfips-lib-ccm128.o",
- "crypto/modes/libfips-lib-cfb128.o",
- "crypto/modes/libfips-lib-ctr128.o",
- "crypto/modes/libfips-lib-gcm128.o",
- "crypto/modes/libfips-lib-ghash-x86_64.o",
- "crypto/modes/libfips-lib-ofb128.o",
- "crypto/modes/libfips-lib-wrap128.o",
- "crypto/modes/libfips-lib-xts128.o",
- "crypto/modes/libfips-lib-xts128gb.o"
+ "crypto/modes/libcrypto-shlib-xts128gb.o"
],
"products" => {
"lib" => [
- "libcrypto",
- "providers/libfips.a"
+ "libcrypto"
]
}
},
@@ -11616,17 +11373,11 @@ our %unified_info = (
"crypto/property/libcrypto-shlib-property_err.o",
"crypto/property/libcrypto-shlib-property_parse.o",
"crypto/property/libcrypto-shlib-property_query.o",
- "crypto/property/libcrypto-shlib-property_string.o",
- "crypto/property/libfips-lib-defn_cache.o",
- "crypto/property/libfips-lib-property.o",
- "crypto/property/libfips-lib-property_parse.o",
- "crypto/property/libfips-lib-property_query.o",
- "crypto/property/libfips-lib-property_string.o"
+ "crypto/property/libcrypto-shlib-property_string.o"
],
"products" => {
"lib" => [
- "libcrypto",
- "providers/libfips.a"
+ "libcrypto"
]
}
},
@@ -11647,13 +11398,11 @@ our %unified_info = (
"crypto/rand/libcrypto-shlib-rand_meth.o",
"crypto/rand/libcrypto-shlib-rand_pool.o",
"crypto/rand/libcrypto-shlib-rand_uniform.o",
- "crypto/rand/libcrypto-shlib-randfile.o",
- "crypto/rand/libfips-lib-rand_lib.o"
+ "crypto/rand/libcrypto-shlib-randfile.o"
],
"products" => {
"lib" => [
- "libcrypto",
- "providers/libfips.a"
+ "libcrypto"
]
}
},
@@ -11681,11 +11430,14 @@ our %unified_info = (
"crypto/rc4/libcrypto-lib-rc4-md5-x86_64.o",
"crypto/rc4/libcrypto-lib-rc4-x86_64.o",
"crypto/rc4/libcrypto-shlib-rc4-md5-x86_64.o",
- "crypto/rc4/libcrypto-shlib-rc4-x86_64.o"
+ "crypto/rc4/libcrypto-shlib-rc4-x86_64.o",
+ "crypto/rc4/liblegacy-lib-rc4-md5-x86_64.o",
+ "crypto/rc4/liblegacy-lib-rc4-x86_64.o"
],
"products" => {
"lib" => [
- "libcrypto"
+ "libcrypto",
+ "providers/liblegacy.a"
]
}
},
@@ -11755,29 +11507,11 @@ our %unified_info = (
"crypto/rsa/libcrypto-shlib-rsa_sp800_56b_check.o",
"crypto/rsa/libcrypto-shlib-rsa_sp800_56b_gen.o",
"crypto/rsa/libcrypto-shlib-rsa_x931.o",
- "crypto/rsa/libcrypto-shlib-rsa_x931g.o",
- "crypto/rsa/libfips-lib-rsa_acvp_test_params.o",
- "crypto/rsa/libfips-lib-rsa_backend.o",
- "crypto/rsa/libfips-lib-rsa_chk.o",
- "crypto/rsa/libfips-lib-rsa_crpt.o",
- "crypto/rsa/libfips-lib-rsa_gen.o",
- "crypto/rsa/libfips-lib-rsa_lib.o",
- "crypto/rsa/libfips-lib-rsa_mp_names.o",
- "crypto/rsa/libfips-lib-rsa_none.o",
- "crypto/rsa/libfips-lib-rsa_oaep.o",
- "crypto/rsa/libfips-lib-rsa_ossl.o",
- "crypto/rsa/libfips-lib-rsa_pk1.o",
- "crypto/rsa/libfips-lib-rsa_pss.o",
- "crypto/rsa/libfips-lib-rsa_schemes.o",
- "crypto/rsa/libfips-lib-rsa_sign.o",
- "crypto/rsa/libfips-lib-rsa_sp800_56b_check.o",
- "crypto/rsa/libfips-lib-rsa_sp800_56b_gen.o",
- "crypto/rsa/libfips-lib-rsa_x931.o"
+ "crypto/rsa/libcrypto-shlib-rsa_x931g.o"
],
"products" => {
"lib" => [
- "libcrypto",
- "providers/libfips.a"
+ "libcrypto"
]
}
},
@@ -11823,22 +11557,11 @@ our %unified_info = (
"crypto/sha/libcrypto-shlib-sha256.o",
"crypto/sha/libcrypto-shlib-sha3.o",
"crypto/sha/libcrypto-shlib-sha512-x86_64.o",
- "crypto/sha/libcrypto-shlib-sha512.o",
- "crypto/sha/libfips-lib-keccak1600-x86_64.o",
- "crypto/sha/libfips-lib-sha1-mb-x86_64.o",
- "crypto/sha/libfips-lib-sha1-x86_64.o",
- "crypto/sha/libfips-lib-sha1dgst.o",
- "crypto/sha/libfips-lib-sha256-mb-x86_64.o",
- "crypto/sha/libfips-lib-sha256-x86_64.o",
- "crypto/sha/libfips-lib-sha256.o",
- "crypto/sha/libfips-lib-sha3.o",
- "crypto/sha/libfips-lib-sha512-x86_64.o",
- "crypto/sha/libfips-lib-sha512.o"
+ "crypto/sha/libcrypto-shlib-sha512.o"
],
"products" => {
"lib" => [
- "libcrypto",
- "providers/libfips.a"
+ "libcrypto"
]
}
},
@@ -11876,22 +11599,11 @@ our %unified_info = (
"crypto/slh_dsa/libcrypto-shlib-slh_hypertree.o",
"crypto/slh_dsa/libcrypto-shlib-slh_params.o",
"crypto/slh_dsa/libcrypto-shlib-slh_wots.o",
- "crypto/slh_dsa/libcrypto-shlib-slh_xmss.o",
- "crypto/slh_dsa/libfips-lib-slh_adrs.o",
- "crypto/slh_dsa/libfips-lib-slh_dsa.o",
- "crypto/slh_dsa/libfips-lib-slh_dsa_hash_ctx.o",
- "crypto/slh_dsa/libfips-lib-slh_dsa_key.o",
- "crypto/slh_dsa/libfips-lib-slh_fors.o",
- "crypto/slh_dsa/libfips-lib-slh_hash.o",
- "crypto/slh_dsa/libfips-lib-slh_hypertree.o",
- "crypto/slh_dsa/libfips-lib-slh_params.o",
- "crypto/slh_dsa/libfips-lib-slh_wots.o",
- "crypto/slh_dsa/libfips-lib-slh_xmss.o"
+ "crypto/slh_dsa/libcrypto-shlib-slh_xmss.o"
],
"products" => {
"lib" => [
- "libcrypto",
- "providers/libfips.a"
+ "libcrypto"
]
}
},
@@ -11911,13 +11623,11 @@ our %unified_info = (
"crypto/stack" => {
"deps" => [
"crypto/stack/libcrypto-lib-stack.o",
- "crypto/stack/libcrypto-shlib-stack.o",
- "crypto/stack/libfips-lib-stack.o"
+ "crypto/stack/libcrypto-shlib-stack.o"
],
"products" => {
"lib" => [
- "libcrypto",
- "providers/libfips.a"
+ "libcrypto"
]
}
},
@@ -11952,16 +11662,12 @@ our %unified_info = (
"crypto/thread/libcrypto-shlib-api.o",
"crypto/thread/libcrypto-shlib-arch.o",
"crypto/thread/libcrypto-shlib-internal.o",
- "crypto/thread/libssl-shlib-arch.o",
- "crypto/thread/libfips-lib-api.o",
- "crypto/thread/libfips-lib-arch.o",
- "crypto/thread/libfips-lib-internal.o"
+ "crypto/thread/libssl-shlib-arch.o"
],
"products" => {
"lib" => [
"libcrypto",
- "libssl",
- "providers/libfips.a"
+ "libssl"
]
}
},
@@ -11975,16 +11681,12 @@ our %unified_info = (
"crypto/thread/arch/libcrypto-shlib-thread_win.o",
"crypto/thread/arch/libssl-shlib-thread_none.o",
"crypto/thread/arch/libssl-shlib-thread_posix.o",
- "crypto/thread/arch/libssl-shlib-thread_win.o",
- "crypto/thread/arch/libfips-lib-thread_none.o",
- "crypto/thread/arch/libfips-lib-thread_posix.o",
- "crypto/thread/arch/libfips-lib-thread_win.o"
+ "crypto/thread/arch/libssl-shlib-thread_win.o"
],
"products" => {
"lib" => [
"libcrypto",
- "libssl",
- "providers/libfips.a"
+ "libssl"
]
}
},
@@ -12317,6 +12019,8 @@ our %unified_info = (
},
"providers" => {
"deps" => [
+ "providers/endecode_test-bin-legacyprov.o",
+ "providers/evp_extra_test-bin-legacyprov.o",
"providers/libcrypto-lib-baseprov.o",
"providers/libcrypto-lib-defltprov.o",
"providers/libcrypto-lib-nullprov.o",
@@ -12329,12 +12033,16 @@ our %unified_info = (
"providers/libdefault.a"
],
"products" => {
+ "bin" => [
+ "test/endecode_test",
+ "test/evp_extra_test"
+ ],
"dso" => [
- "providers/fips"
+ "providers/legacy"
],
"lib" => [
"libcrypto",
- "providers/libfips.a"
+ "providers/liblegacy.a"
]
}
},
@@ -12349,19 +12057,13 @@ our %unified_info = (
"providers/common/libdefault-lib-provider_util.o",
"providers/common/libdefault-lib-securitycheck.o",
"providers/common/libdefault-lib-securitycheck_default.o",
- "providers/common/libfips-lib-bio_prov.o",
- "providers/common/libfips-lib-capabilities.o",
- "providers/common/libfips-lib-digest_to_nid.o",
- "providers/common/libfips-lib-provider_seeding.o",
- "providers/common/libfips-lib-provider_util.o",
- "providers/common/libfips-lib-securitycheck.o",
- "providers/common/libfips-lib-securitycheck_fips.o"
+ "providers/common/liblegacy-lib-provider_util.o"
],
"products" => {
"lib" => [
"providers/libcommon.a",
"providers/libdefault.a",
- "providers/libfips.a"
+ "providers/liblegacy.a"
]
}
},
@@ -12383,43 +12085,22 @@ our %unified_info = (
"providers/common/der/libcommon-lib-der_slh_dsa_gen.o",
"providers/common/der/libcommon-lib-der_slh_dsa_key.o",
"providers/common/der/libcommon-lib-der_wrap_gen.o",
- "providers/common/der/libdefault-lib-der_rsa_sig.o",
- "providers/common/der/libfips-lib-der_rsa_sig.o"
+ "providers/common/der/libdefault-lib-der_rsa_sig.o"
],
"products" => {
"lib" => [
"providers/libcommon.a",
- "providers/libdefault.a",
- "providers/libfips.a"
- ]
- }
- },
- "providers/fips" => {
- "deps" => [
- "providers/fips/fips-dso-fips_entry.o",
- "providers/fips/libfips-lib-fipsindicator.o",
- "providers/fips/libfips-lib-fipsprov.o",
- "providers/fips/libfips-lib-self_test.o",
- "providers/fips/libfips-lib-self_test_kats.o"
- ],
- "products" => {
- "dso" => [
- "providers/fips"
- ],
- "lib" => [
- "providers/libfips.a"
+ "providers/libdefault.a"
]
}
},
"providers/implementations/asymciphers" => {
"deps" => [
- "providers/implementations/asymciphers/libdefault-lib-rsa_enc.o",
- "providers/implementations/asymciphers/libfips-lib-rsa_enc.o"
+ "providers/implementations/asymciphers/libdefault-lib-rsa_enc.o"
],
"products" => {
"lib" => [
- "providers/libdefault.a",
- "providers/libfips.a"
+ "providers/libdefault.a"
]
}
},
@@ -12467,31 +12148,29 @@ our %unified_info = (
"providers/implementations/ciphers/libdefault-lib-cipher_tdes_hw.o",
"providers/implementations/ciphers/libdefault-lib-cipher_tdes_wrap.o",
"providers/implementations/ciphers/libdefault-lib-cipher_tdes_wrap_hw.o",
- "providers/implementations/ciphers/libfips-lib-cipher_aes.o",
- "providers/implementations/ciphers/libfips-lib-cipher_aes_cbc_hmac_sha.o",
- "providers/implementations/ciphers/libfips-lib-cipher_aes_cbc_hmac_sha1_hw.o",
- "providers/implementations/ciphers/libfips-lib-cipher_aes_cbc_hmac_sha256_hw.o",
- "providers/implementations/ciphers/libfips-lib-cipher_aes_ccm.o",
- "providers/implementations/ciphers/libfips-lib-cipher_aes_ccm_hw.o",
- "providers/implementations/ciphers/libfips-lib-cipher_aes_gcm.o",
- "providers/implementations/ciphers/libfips-lib-cipher_aes_gcm_hw.o",
- "providers/implementations/ciphers/libfips-lib-cipher_aes_hw.o",
- "providers/implementations/ciphers/libfips-lib-cipher_aes_ocb.o",
- "providers/implementations/ciphers/libfips-lib-cipher_aes_ocb_hw.o",
- "providers/implementations/ciphers/libfips-lib-cipher_aes_wrp.o",
- "providers/implementations/ciphers/libfips-lib-cipher_aes_xts.o",
- "providers/implementations/ciphers/libfips-lib-cipher_aes_xts_fips.o",
- "providers/implementations/ciphers/libfips-lib-cipher_aes_xts_hw.o",
- "providers/implementations/ciphers/libfips-lib-cipher_cts.o",
- "providers/implementations/ciphers/libfips-lib-cipher_tdes.o",
- "providers/implementations/ciphers/libfips-lib-cipher_tdes_common.o",
- "providers/implementations/ciphers/libfips-lib-cipher_tdes_hw.o"
+ "providers/implementations/ciphers/liblegacy-lib-cipher_blowfish.o",
+ "providers/implementations/ciphers/liblegacy-lib-cipher_blowfish_hw.o",
+ "providers/implementations/ciphers/liblegacy-lib-cipher_cast5.o",
+ "providers/implementations/ciphers/liblegacy-lib-cipher_cast5_hw.o",
+ "providers/implementations/ciphers/liblegacy-lib-cipher_des.o",
+ "providers/implementations/ciphers/liblegacy-lib-cipher_des_hw.o",
+ "providers/implementations/ciphers/liblegacy-lib-cipher_desx.o",
+ "providers/implementations/ciphers/liblegacy-lib-cipher_desx_hw.o",
+ "providers/implementations/ciphers/liblegacy-lib-cipher_rc2.o",
+ "providers/implementations/ciphers/liblegacy-lib-cipher_rc2_hw.o",
+ "providers/implementations/ciphers/liblegacy-lib-cipher_rc4.o",
+ "providers/implementations/ciphers/liblegacy-lib-cipher_rc4_hmac_md5.o",
+ "providers/implementations/ciphers/liblegacy-lib-cipher_rc4_hmac_md5_hw.o",
+ "providers/implementations/ciphers/liblegacy-lib-cipher_rc4_hw.o",
+ "providers/implementations/ciphers/liblegacy-lib-cipher_seed.o",
+ "providers/implementations/ciphers/liblegacy-lib-cipher_seed_hw.o",
+ "providers/implementations/ciphers/liblegacy-lib-cipher_tdes_common.o"
],
"products" => {
"lib" => [
"providers/libcommon.a",
"providers/libdefault.a",
- "providers/libfips.a"
+ "providers/liblegacy.a"
]
}
},
@@ -12507,14 +12186,15 @@ our %unified_info = (
"providers/implementations/digests/libdefault-lib-ripemd_prov.o",
"providers/implementations/digests/libdefault-lib-sha2_prov.o",
"providers/implementations/digests/libdefault-lib-sha3_prov.o",
- "providers/implementations/digests/libfips-lib-sha2_prov.o",
- "providers/implementations/digests/libfips-lib-sha3_prov.o"
+ "providers/implementations/digests/liblegacy-lib-md4_prov.o",
+ "providers/implementations/digests/liblegacy-lib-ripemd_prov.o",
+ "providers/implementations/digests/liblegacy-lib-wp_prov.o"
],
"products" => {
"lib" => [
"providers/libcommon.a",
"providers/libdefault.a",
- "providers/libfips.a"
+ "providers/liblegacy.a"
]
}
},
@@ -12546,16 +12226,11 @@ our %unified_info = (
"providers/implementations/exchange/libdefault-lib-dh_exch.o",
"providers/implementations/exchange/libdefault-lib-ecdh_exch.o",
"providers/implementations/exchange/libdefault-lib-ecx_exch.o",
- "providers/implementations/exchange/libdefault-lib-kdf_exch.o",
- "providers/implementations/exchange/libfips-lib-dh_exch.o",
- "providers/implementations/exchange/libfips-lib-ecdh_exch.o",
- "providers/implementations/exchange/libfips-lib-ecx_exch.o",
- "providers/implementations/exchange/libfips-lib-kdf_exch.o"
+ "providers/implementations/exchange/libdefault-lib-kdf_exch.o"
],
"products" => {
"lib" => [
- "providers/libdefault.a",
- "providers/libfips.a"
+ "providers/libdefault.a"
]
}
},
@@ -12574,19 +12249,13 @@ our %unified_info = (
"providers/implementations/kdfs/libdefault-lib-sskdf.o",
"providers/implementations/kdfs/libdefault-lib-tls1_prf.o",
"providers/implementations/kdfs/libdefault-lib-x942kdf.o",
- "providers/implementations/kdfs/libfips-lib-hkdf.o",
- "providers/implementations/kdfs/libfips-lib-kbkdf.o",
- "providers/implementations/kdfs/libfips-lib-pbkdf2.o",
- "providers/implementations/kdfs/libfips-lib-pbkdf2_fips.o",
- "providers/implementations/kdfs/libfips-lib-sshkdf.o",
- "providers/implementations/kdfs/libfips-lib-sskdf.o",
- "providers/implementations/kdfs/libfips-lib-tls1_prf.o",
- "providers/implementations/kdfs/libfips-lib-x942kdf.o"
+ "providers/implementations/kdfs/liblegacy-lib-pbkdf1.o",
+ "providers/implementations/kdfs/liblegacy-lib-pvkkdf.o"
],
"products" => {
"lib" => [
"providers/libdefault.a",
- "providers/libfips.a"
+ "providers/liblegacy.a"
]
}
},
@@ -12598,15 +12267,11 @@ our %unified_info = (
"providers/implementations/kem/libdefault-lib-ml_kem_kem.o",
"providers/implementations/kem/libdefault-lib-mlx_kem.o",
"providers/implementations/kem/libdefault-lib-rsa_kem.o",
- "providers/implementations/kem/libfips-lib-ml_kem_kem.o",
- "providers/implementations/kem/libfips-lib-mlx_kem.o",
- "providers/implementations/kem/libfips-lib-rsa_kem.o",
"providers/implementations/kem/libtemplate-lib-template_kem.o"
],
"products" => {
"lib" => [
"providers/libdefault.a",
- "providers/libfips.a",
"providers/libtemplate.a"
]
}
@@ -12624,23 +12289,11 @@ our %unified_info = (
"providers/implementations/keymgmt/libdefault-lib-mlx_kmgmt.o",
"providers/implementations/keymgmt/libdefault-lib-rsa_kmgmt.o",
"providers/implementations/keymgmt/libdefault-lib-slh_dsa_kmgmt.o",
- "providers/implementations/keymgmt/libfips-lib-dh_kmgmt.o",
- "providers/implementations/keymgmt/libfips-lib-dsa_kmgmt.o",
- "providers/implementations/keymgmt/libfips-lib-ec_kmgmt.o",
- "providers/implementations/keymgmt/libfips-lib-ecx_kmgmt.o",
- "providers/implementations/keymgmt/libfips-lib-kdf_legacy_kmgmt.o",
- "providers/implementations/keymgmt/libfips-lib-mac_legacy_kmgmt.o",
- "providers/implementations/keymgmt/libfips-lib-ml_dsa_kmgmt.o",
- "providers/implementations/keymgmt/libfips-lib-ml_kem_kmgmt.o",
- "providers/implementations/keymgmt/libfips-lib-mlx_kmgmt.o",
- "providers/implementations/keymgmt/libfips-lib-rsa_kmgmt.o",
- "providers/implementations/keymgmt/libfips-lib-slh_dsa_kmgmt.o",
"providers/implementations/keymgmt/libtemplate-lib-template_kmgmt.o"
],
"products" => {
"lib" => [
"providers/libdefault.a",
- "providers/libfips.a",
"providers/libtemplate.a"
]
}
@@ -12654,16 +12307,11 @@ our %unified_info = (
"providers/implementations/macs/libdefault-lib-hmac_prov.o",
"providers/implementations/macs/libdefault-lib-kmac_prov.o",
"providers/implementations/macs/libdefault-lib-poly1305_prov.o",
- "providers/implementations/macs/libdefault-lib-siphash_prov.o",
- "providers/implementations/macs/libfips-lib-cmac_prov.o",
- "providers/implementations/macs/libfips-lib-gmac_prov.o",
- "providers/implementations/macs/libfips-lib-hmac_prov.o",
- "providers/implementations/macs/libfips-lib-kmac_prov.o"
+ "providers/implementations/macs/libdefault-lib-siphash_prov.o"
],
"products" => {
"lib" => [
- "providers/libdefault.a",
- "providers/libfips.a"
+ "providers/libdefault.a"
]
}
},
@@ -12675,18 +12323,11 @@ our %unified_info = (
"providers/implementations/rands/libdefault-lib-drbg_hmac.o",
"providers/implementations/rands/libdefault-lib-seed_src.o",
"providers/implementations/rands/libdefault-lib-seed_src_jitter.o",
- "providers/implementations/rands/libdefault-lib-test_rng.o",
- "providers/implementations/rands/libfips-lib-drbg.o",
- "providers/implementations/rands/libfips-lib-drbg_ctr.o",
- "providers/implementations/rands/libfips-lib-drbg_hash.o",
- "providers/implementations/rands/libfips-lib-drbg_hmac.o",
- "providers/implementations/rands/libfips-lib-fips_crng_test.o",
- "providers/implementations/rands/libfips-lib-test_rng.o"
+ "providers/implementations/rands/libdefault-lib-test_rng.o"
],
"products" => {
"lib" => [
- "providers/libdefault.a",
- "providers/libfips.a"
+ "providers/libdefault.a"
]
}
},
@@ -12711,33 +12352,22 @@ our %unified_info = (
"providers/implementations/signature/libdefault-lib-mac_legacy_sig.o",
"providers/implementations/signature/libdefault-lib-ml_dsa_sig.o",
"providers/implementations/signature/libdefault-lib-rsa_sig.o",
- "providers/implementations/signature/libdefault-lib-slh_dsa_sig.o",
- "providers/implementations/signature/libfips-lib-dsa_sig.o",
- "providers/implementations/signature/libfips-lib-ecdsa_sig.o",
- "providers/implementations/signature/libfips-lib-eddsa_sig.o",
- "providers/implementations/signature/libfips-lib-mac_legacy_sig.o",
- "providers/implementations/signature/libfips-lib-ml_dsa_sig.o",
- "providers/implementations/signature/libfips-lib-rsa_sig.o",
- "providers/implementations/signature/libfips-lib-slh_dsa_sig.o"
+ "providers/implementations/signature/libdefault-lib-slh_dsa_sig.o"
],
"products" => {
"lib" => [
- "providers/libdefault.a",
- "providers/libfips.a"
+ "providers/libdefault.a"
]
}
},
"providers/implementations/skeymgmt" => {
"deps" => [
"providers/implementations/skeymgmt/libdefault-lib-aes_skmgmt.o",
- "providers/implementations/skeymgmt/libdefault-lib-generic.o",
- "providers/implementations/skeymgmt/libfips-lib-aes_skmgmt.o",
- "providers/implementations/skeymgmt/libfips-lib-generic.o"
+ "providers/implementations/skeymgmt/libdefault-lib-generic.o"
],
"products" => {
"lib" => [
- "providers/libdefault.a",
- "providers/libfips.a"
+ "providers/libdefault.a"
]
}
},
@@ -12936,6 +12566,7 @@ our %unified_info = (
"ssl/record/methods" => {
"deps" => [
"ssl/record/methods/libssl-lib-dtls_meth.o",
+ "ssl/record/methods/libssl-lib-ktls_meth.o",
"ssl/record/methods/libssl-lib-ssl3_meth.o",
"ssl/record/methods/libssl-lib-tls13_meth.o",
"ssl/record/methods/libssl-lib-tls1_meth.o",
@@ -12943,6 +12574,7 @@ our %unified_info = (
"ssl/record/methods/libssl-lib-tls_multib.o",
"ssl/record/methods/libssl-lib-tlsany_meth.o",
"ssl/record/methods/libssl-shlib-dtls_meth.o",
+ "ssl/record/methods/libssl-shlib-ktls_meth.o",
"ssl/record/methods/libssl-shlib-ssl3_cbc.o",
"ssl/record/methods/libssl-shlib-ssl3_meth.o",
"ssl/record/methods/libssl-shlib-tls13_meth.o",
@@ -12952,15 +12584,13 @@ our %unified_info = (
"ssl/record/methods/libssl-shlib-tls_pad.o",
"ssl/record/methods/libssl-shlib-tlsany_meth.o",
"ssl/record/methods/libcommon-lib-tls_pad.o",
- "ssl/record/methods/libdefault-lib-ssl3_cbc.o",
- "ssl/record/methods/libfips-lib-ssl3_cbc.o"
+ "ssl/record/methods/libdefault-lib-ssl3_cbc.o"
],
"products" => {
"lib" => [
"libssl",
"providers/libcommon.a",
- "providers/libdefault.a",
- "providers/libfips.a"
+ "providers/libdefault.a"
]
}
},
@@ -19584,18 +19214,9 @@ our %unified_info = (
"providers/common/include/prov/der_wrap.h" => [
"providers/common/include/prov/der_wrap.h.in"
],
- "providers/fips.ld" => [
+ "providers/legacy.ld" => [
"util/providers.num"
],
- "providers/fipsmodule.cnf" => [
- "util/mk-fipsmodule-cnf.pl",
- "-module",
- "\$(FIPSMODULE)",
- "-section_name",
- "fips_sect",
- "-key",
- "\$(FIPSKEY)"
- ],
"test/buildtest_aes.c" => [
"test/generate_buildtest.pl",
"aes"
@@ -19640,6 +19261,10 @@ our %unified_info = (
"test/generate_buildtest.pl",
"conf_api"
],
+ "test/buildtest_configuration.c" => [
+ "test/generate_buildtest.pl",
+ "configuration"
+ ],
"test/buildtest_conftypes.c" => [
"test/generate_buildtest.pl",
"conftypes"
@@ -21222,9 +20847,6 @@ our %unified_info = (
"crypto/bn/libcrypto-shlib-bn_exp.o" => [
"crypto"
],
- "crypto/bn/libfips-lib-bn_exp.o" => [
- "crypto"
- ],
"crypto/bn/mips-mont.o" => [
"crypto"
],
@@ -21321,15 +20943,6 @@ our %unified_info = (
"crypto/ec/libcrypto-shlib-ecx_meth.o" => [
"crypto"
],
- "crypto/ec/libfips-lib-ecp_nistp384.o" => [
- "crypto"
- ],
- "crypto/ec/libfips-lib-ecp_nistp521.o" => [
- "crypto"
- ],
- "crypto/ec/libfips-lib-ecx_key.o" => [
- "crypto"
- ],
"crypto/evp/e_aes.o" => [
"crypto",
"crypto/modes"
@@ -21417,6 +21030,9 @@ our %unified_info = (
"crypto/info.o" => [
"crypto"
],
+ "crypto/legacy-dso-cpuid.o" => [
+ "."
+ ],
"crypto/libcrypto-lib-cpuid.o" => [
"."
],
@@ -21435,9 +21051,6 @@ our %unified_info = (
"crypto/libcrypto-shlib-info.o" => [
"crypto"
],
- "crypto/libfips-lib-cpuid.o" => [
- "."
- ],
"crypto/md5/md5-aarch64.o" => [
"crypto"
],
@@ -21474,9 +21087,6 @@ our %unified_info = (
"crypto/modes/libcrypto-shlib-gcm128.o" => [
"crypto"
],
- "crypto/modes/libfips-lib-gcm128.o" => [
- "crypto"
- ],
"crypto/params_idx.c" => [
"util/perl"
],
@@ -21993,9 +21603,6 @@ our %unified_info = (
"providers/common/der/libdefault-lib-der_rsa_sig.o" => [
"providers/common/include/prov"
],
- "providers/common/der/libfips-lib-der_rsa_sig.o" => [
- "providers/common/include/prov"
- ],
"providers/common/include/prov/der_digests.h" => [
"providers/common/der"
],
@@ -22020,9 +21627,6 @@ our %unified_info = (
"providers/common/include/prov/der_wrap.h" => [
"providers/common/der"
],
- "providers/fips" => [
- "include"
- ],
"providers/implementations/encode_decode/encode_key2any.o" => [
"providers/common/include/prov"
],
@@ -22032,9 +21636,6 @@ our %unified_info = (
"providers/implementations/kdfs/libdefault-lib-x942kdf.o" => [
"providers/common/include/prov"
],
- "providers/implementations/kdfs/libfips-lib-x942kdf.o" => [
- "providers/common/include/prov"
- ],
"providers/implementations/kdfs/x942kdf.o" => [
"providers/common/include/prov"
],
@@ -22065,24 +21666,6 @@ our %unified_info = (
"providers/implementations/signature/libdefault-lib-slh_dsa_sig.o" => [
"providers/common/include/prov"
],
- "providers/implementations/signature/libfips-lib-dsa_sig.o" => [
- "providers/common/include/prov"
- ],
- "providers/implementations/signature/libfips-lib-ecdsa_sig.o" => [
- "providers/common/include/prov"
- ],
- "providers/implementations/signature/libfips-lib-eddsa_sig.o" => [
- "providers/common/include/prov"
- ],
- "providers/implementations/signature/libfips-lib-ml_dsa_sig.o" => [
- "providers/common/include/prov"
- ],
- "providers/implementations/signature/libfips-lib-rsa_sig.o" => [
- "providers/common/include/prov"
- ],
- "providers/implementations/signature/libfips-lib-slh_dsa_sig.o" => [
- "providers/common/include/prov"
- ],
"providers/implementations/signature/ml_dsa_sig.o" => [
"providers/common/include/prov"
],
@@ -22095,6 +21678,11 @@ our %unified_info = (
"providers/implementations/signature/sm2_sig.o" => [
"providers/common/include/prov"
],
+ "providers/legacy" => [
+ "include",
+ "providers/implementations/include",
+ "providers/common/include"
+ ],
"providers/libcommon.a" => [
"crypto",
"include",
@@ -22137,10 +21725,6 @@ our %unified_info = (
"include",
"apps/include"
],
- "test/acvp_test" => [
- "include",
- "apps/include"
- ],
"test/aesgcmtest" => [
"include",
"apps/include",
@@ -22301,6 +21885,9 @@ our %unified_info = (
"test/buildtest_c_conf_api" => [
"include"
],
+ "test/buildtest_c_configuration" => [
+ "include"
+ ],
"test/buildtest_c_conftypes" => [
"include"
],
@@ -22686,7 +22273,9 @@ our %unified_info = (
"test/endecode_test" => [
".",
"include",
- "apps/include"
+ "apps/include",
+ "providers/common/include",
+ "providers/implementations/include"
],
"test/endecoder_legacy_test" => [
".",
@@ -23564,7 +23153,7 @@ our %unified_info = (
"libssl",
"providers/libcommon.a",
"providers/libdefault.a",
- "providers/libfips.a",
+ "providers/liblegacy.a",
"providers/libtemplate.a",
"test/libtestutil.a"
],
@@ -24478,7 +24067,7 @@ our %unified_info = (
"engines/loader_attic",
"engines/ossltest",
"engines/padlock",
- "providers/fips",
+ "providers/legacy",
"test/p_minimal",
"test/p_test"
],
@@ -24515,7 +24104,6 @@ our %unified_info = (
"fuzz/v3name-test",
"fuzz/x509-test",
"test/aborttest",
- "test/acvp_test",
"test/aesgcmtest",
"test/afalgtest",
"test/algorithmid_test",
@@ -24557,6 +24145,7 @@ our %unified_info = (
"test/buildtest_c_cmac",
"test/buildtest_c_cmp_util",
"test/buildtest_c_conf_api",
+ "test/buildtest_c_configuration",
"test/buildtest_c_conftypes",
"test/buildtest_c_core",
"test/buildtest_c_core_dispatch",
@@ -25750,6 +25339,7 @@ our %unified_info = (
"ssl/record/libssl-shlib-rec_layer_d1.o",
"ssl/record/libssl-shlib-rec_layer_s3.o",
"ssl/record/methods/libssl-shlib-dtls_meth.o",
+ "ssl/record/methods/libssl-shlib-ktls_meth.o",
"ssl/record/methods/libssl-shlib-ssl3_cbc.o",
"ssl/record/methods/libssl-shlib-ssl3_meth.o",
"ssl/record/methods/libssl-shlib-tls13_meth.o",
@@ -26203,36 +25793,6 @@ our %unified_info = (
"crypto/aes/libcrypto-shlib-vpaes-x86_64.o" => [
"crypto/aes/vpaes-x86_64.s"
],
- "crypto/aes/libfips-lib-aes-x86_64.o" => [
- "crypto/aes/aes-x86_64.s"
- ],
- "crypto/aes/libfips-lib-aes_ecb.o" => [
- "crypto/aes/aes_ecb.c"
- ],
- "crypto/aes/libfips-lib-aes_misc.o" => [
- "crypto/aes/aes_misc.c"
- ],
- "crypto/aes/libfips-lib-aesni-mb-x86_64.o" => [
- "crypto/aes/aesni-mb-x86_64.s"
- ],
- "crypto/aes/libfips-lib-aesni-sha1-x86_64.o" => [
- "crypto/aes/aesni-sha1-x86_64.s"
- ],
- "crypto/aes/libfips-lib-aesni-sha256-x86_64.o" => [
- "crypto/aes/aesni-sha256-x86_64.s"
- ],
- "crypto/aes/libfips-lib-aesni-x86_64.o" => [
- "crypto/aes/aesni-x86_64.s"
- ],
- "crypto/aes/libfips-lib-aesni-xts-avx512.o" => [
- "crypto/aes/aesni-xts-avx512.s"
- ],
- "crypto/aes/libfips-lib-bsaes-x86_64.o" => [
- "crypto/aes/bsaes-x86_64.s"
- ],
- "crypto/aes/libfips-lib-vpaes-x86_64.o" => [
- "crypto/aes/vpaes-x86_64.s"
- ],
"crypto/asn1/asn1_time_test-bin-a_time.o" => [
"crypto/asn1/a_time.c"
],
@@ -26866,9 +26426,6 @@ our %unified_info = (
"crypto/bn/asm/libcrypto-shlib-x86_64-gcc.o" => [
"crypto/bn/asm/x86_64-gcc.c"
],
- "crypto/bn/asm/libfips-lib-x86_64-gcc.o" => [
- "crypto/bn/asm/x86_64-gcc.c"
- ],
"crypto/bn/libcrypto-lib-bn_add.o" => [
"crypto/bn/bn_add.c"
],
@@ -27139,126 +26696,6 @@ our %unified_info = (
"crypto/bn/libcrypto-shlib-x86_64-mont5.o" => [
"crypto/bn/x86_64-mont5.s"
],
- "crypto/bn/libfips-lib-bn_add.o" => [
- "crypto/bn/bn_add.c"
- ],
- "crypto/bn/libfips-lib-bn_blind.o" => [
- "crypto/bn/bn_blind.c"
- ],
- "crypto/bn/libfips-lib-bn_const.o" => [
- "crypto/bn/bn_const.c"
- ],
- "crypto/bn/libfips-lib-bn_conv.o" => [
- "crypto/bn/bn_conv.c"
- ],
- "crypto/bn/libfips-lib-bn_ctx.o" => [
- "crypto/bn/bn_ctx.c"
- ],
- "crypto/bn/libfips-lib-bn_dh.o" => [
- "crypto/bn/bn_dh.c"
- ],
- "crypto/bn/libfips-lib-bn_div.o" => [
- "crypto/bn/bn_div.c"
- ],
- "crypto/bn/libfips-lib-bn_exp.o" => [
- "crypto/bn/bn_exp.c"
- ],
- "crypto/bn/libfips-lib-bn_exp2.o" => [
- "crypto/bn/bn_exp2.c"
- ],
- "crypto/bn/libfips-lib-bn_gcd.o" => [
- "crypto/bn/bn_gcd.c"
- ],
- "crypto/bn/libfips-lib-bn_gf2m.o" => [
- "crypto/bn/bn_gf2m.c"
- ],
- "crypto/bn/libfips-lib-bn_intern.o" => [
- "crypto/bn/bn_intern.c"
- ],
- "crypto/bn/libfips-lib-bn_kron.o" => [
- "crypto/bn/bn_kron.c"
- ],
- "crypto/bn/libfips-lib-bn_lib.o" => [
- "crypto/bn/bn_lib.c"
- ],
- "crypto/bn/libfips-lib-bn_mod.o" => [
- "crypto/bn/bn_mod.c"
- ],
- "crypto/bn/libfips-lib-bn_mont.o" => [
- "crypto/bn/bn_mont.c"
- ],
- "crypto/bn/libfips-lib-bn_mpi.o" => [
- "crypto/bn/bn_mpi.c"
- ],
- "crypto/bn/libfips-lib-bn_mul.o" => [
- "crypto/bn/bn_mul.c"
- ],
- "crypto/bn/libfips-lib-bn_nist.o" => [
- "crypto/bn/bn_nist.c"
- ],
- "crypto/bn/libfips-lib-bn_prime.o" => [
- "crypto/bn/bn_prime.c"
- ],
- "crypto/bn/libfips-lib-bn_rand.o" => [
- "crypto/bn/bn_rand.c"
- ],
- "crypto/bn/libfips-lib-bn_recp.o" => [
- "crypto/bn/bn_recp.c"
- ],
- "crypto/bn/libfips-lib-bn_rsa_fips186_4.o" => [
- "crypto/bn/bn_rsa_fips186_4.c"
- ],
- "crypto/bn/libfips-lib-bn_shift.o" => [
- "crypto/bn/bn_shift.c"
- ],
- "crypto/bn/libfips-lib-bn_sqr.o" => [
- "crypto/bn/bn_sqr.c"
- ],
- "crypto/bn/libfips-lib-bn_sqrt.o" => [
- "crypto/bn/bn_sqrt.c"
- ],
- "crypto/bn/libfips-lib-bn_word.o" => [
- "crypto/bn/bn_word.c"
- ],
- "crypto/bn/libfips-lib-rsaz-2k-avx512.o" => [
- "crypto/bn/rsaz-2k-avx512.s"
- ],
- "crypto/bn/libfips-lib-rsaz-2k-avxifma.o" => [
- "crypto/bn/rsaz-2k-avxifma.s"
- ],
- "crypto/bn/libfips-lib-rsaz-3k-avx512.o" => [
- "crypto/bn/rsaz-3k-avx512.s"
- ],
- "crypto/bn/libfips-lib-rsaz-3k-avxifma.o" => [
- "crypto/bn/rsaz-3k-avxifma.s"
- ],
- "crypto/bn/libfips-lib-rsaz-4k-avx512.o" => [
- "crypto/bn/rsaz-4k-avx512.s"
- ],
- "crypto/bn/libfips-lib-rsaz-4k-avxifma.o" => [
- "crypto/bn/rsaz-4k-avxifma.s"
- ],
- "crypto/bn/libfips-lib-rsaz-avx2.o" => [
- "crypto/bn/rsaz-avx2.s"
- ],
- "crypto/bn/libfips-lib-rsaz-x86_64.o" => [
- "crypto/bn/rsaz-x86_64.s"
- ],
- "crypto/bn/libfips-lib-rsaz_exp.o" => [
- "crypto/bn/rsaz_exp.c"
- ],
- "crypto/bn/libfips-lib-rsaz_exp_x2.o" => [
- "crypto/bn/rsaz_exp_x2.c"
- ],
- "crypto/bn/libfips-lib-x86_64-gf2m.o" => [
- "crypto/bn/x86_64-gf2m.s"
- ],
- "crypto/bn/libfips-lib-x86_64-mont.o" => [
- "crypto/bn/x86_64-mont.s"
- ],
- "crypto/bn/libfips-lib-x86_64-mont5.o" => [
- "crypto/bn/x86_64-mont5.s"
- ],
"crypto/buffer/libcrypto-lib-buf_err.o" => [
"crypto/buffer/buf_err.c"
],
@@ -27271,9 +26708,6 @@ our %unified_info = (
"crypto/buffer/libcrypto-shlib-buffer.o" => [
"crypto/buffer/buffer.c"
],
- "crypto/buffer/libfips-lib-buffer.o" => [
- "crypto/buffer/buffer.c"
- ],
"crypto/ca_internals_test-bin-ctype.o" => [
"crypto/ctype.c"
],
@@ -27355,9 +26789,6 @@ our %unified_info = (
"crypto/cmac/libcrypto-shlib-cmac.o" => [
"crypto/cmac/cmac.c"
],
- "crypto/cmac/libfips-lib-cmac.o" => [
- "crypto/cmac/cmac.c"
- ],
"crypto/cmp/libcrypto-lib-cmp_asn.o" => [
"crypto/cmp/cmp_asn.c"
],
@@ -27814,18 +27245,12 @@ our %unified_info = (
"crypto/des/libcrypto-shlib-xcbc_enc.o" => [
"crypto/des/xcbc_enc.c"
],
- "crypto/des/libfips-lib-des_enc.o" => [
+ "crypto/des/liblegacy-lib-des_enc.o" => [
"crypto/des/des_enc.c"
],
- "crypto/des/libfips-lib-ecb3_enc.o" => [
- "crypto/des/ecb3_enc.c"
- ],
- "crypto/des/libfips-lib-fcrypt_b.o" => [
+ "crypto/des/liblegacy-lib-fcrypt_b.o" => [
"crypto/des/fcrypt_b.c"
],
- "crypto/des/libfips-lib-set_key.o" => [
- "crypto/des/set_key.c"
- ],
"crypto/dh/libcrypto-lib-dh_ameth.o" => [
"crypto/dh/dh_ameth.c"
],
@@ -27916,27 +27341,6 @@ our %unified_info = (
"crypto/dh/libcrypto-shlib-dh_rfc5114.o" => [
"crypto/dh/dh_rfc5114.c"
],
- "crypto/dh/libfips-lib-dh_backend.o" => [
- "crypto/dh/dh_backend.c"
- ],
- "crypto/dh/libfips-lib-dh_check.o" => [
- "crypto/dh/dh_check.c"
- ],
- "crypto/dh/libfips-lib-dh_gen.o" => [
- "crypto/dh/dh_gen.c"
- ],
- "crypto/dh/libfips-lib-dh_group_params.o" => [
- "crypto/dh/dh_group_params.c"
- ],
- "crypto/dh/libfips-lib-dh_kdf.o" => [
- "crypto/dh/dh_kdf.c"
- ],
- "crypto/dh/libfips-lib-dh_key.o" => [
- "crypto/dh/dh_key.c"
- ],
- "crypto/dh/libfips-lib-dh_lib.o" => [
- "crypto/dh/dh_lib.c"
- ],
"crypto/dsa/libcrypto-lib-dsa_ameth.o" => [
"crypto/dsa/dsa_ameth.c"
],
@@ -28027,30 +27431,6 @@ our %unified_info = (
"crypto/dsa/libcrypto-shlib-dsa_vrf.o" => [
"crypto/dsa/dsa_vrf.c"
],
- "crypto/dsa/libfips-lib-dsa_backend.o" => [
- "crypto/dsa/dsa_backend.c"
- ],
- "crypto/dsa/libfips-lib-dsa_check.o" => [
- "crypto/dsa/dsa_check.c"
- ],
- "crypto/dsa/libfips-lib-dsa_gen.o" => [
- "crypto/dsa/dsa_gen.c"
- ],
- "crypto/dsa/libfips-lib-dsa_key.o" => [
- "crypto/dsa/dsa_key.c"
- ],
- "crypto/dsa/libfips-lib-dsa_lib.o" => [
- "crypto/dsa/dsa_lib.c"
- ],
- "crypto/dsa/libfips-lib-dsa_ossl.o" => [
- "crypto/dsa/dsa_ossl.c"
- ],
- "crypto/dsa/libfips-lib-dsa_sign.o" => [
- "crypto/dsa/dsa_sign.c"
- ],
- "crypto/dsa/libfips-lib-dsa_vrf.o" => [
- "crypto/dsa/dsa_vrf.c"
- ],
"crypto/dso/libcrypto-lib-dso_dl.o" => [
"crypto/dso/dso_dl.c"
],
@@ -28099,18 +27479,12 @@ our %unified_info = (
"crypto/ec/curve448/arch_32/libcrypto-shlib-f_impl32.o" => [
"crypto/ec/curve448/arch_32/f_impl32.c"
],
- "crypto/ec/curve448/arch_32/libfips-lib-f_impl32.o" => [
- "crypto/ec/curve448/arch_32/f_impl32.c"
- ],
"crypto/ec/curve448/arch_64/libcrypto-lib-f_impl64.o" => [
"crypto/ec/curve448/arch_64/f_impl64.c"
],
"crypto/ec/curve448/arch_64/libcrypto-shlib-f_impl64.o" => [
"crypto/ec/curve448/arch_64/f_impl64.c"
],
- "crypto/ec/curve448/arch_64/libfips-lib-f_impl64.o" => [
- "crypto/ec/curve448/arch_64/f_impl64.c"
- ],
"crypto/ec/curve448/libcrypto-lib-curve448.o" => [
"crypto/ec/curve448/curve448.c"
],
@@ -28141,21 +27515,6 @@ our %unified_info = (
"crypto/ec/curve448/libcrypto-shlib-scalar.o" => [
"crypto/ec/curve448/scalar.c"
],
- "crypto/ec/curve448/libfips-lib-curve448.o" => [
- "crypto/ec/curve448/curve448.c"
- ],
- "crypto/ec/curve448/libfips-lib-curve448_tables.o" => [
- "crypto/ec/curve448/curve448_tables.c"
- ],
- "crypto/ec/curve448/libfips-lib-eddsa.o" => [
- "crypto/ec/curve448/eddsa.c"
- ],
- "crypto/ec/curve448/libfips-lib-f_generic.o" => [
- "crypto/ec/curve448/f_generic.c"
- ],
- "crypto/ec/curve448/libfips-lib-scalar.o" => [
- "crypto/ec/curve448/scalar.c"
- ],
"crypto/ec/libcrypto-lib-curve25519.o" => [
"crypto/ec/curve25519.c"
],
@@ -28390,102 +27749,6 @@ our %unified_info = (
"crypto/ec/libcrypto-shlib-x25519-x86_64.o" => [
"crypto/ec/x25519-x86_64.s"
],
- "crypto/ec/libfips-lib-curve25519.o" => [
- "crypto/ec/curve25519.c"
- ],
- "crypto/ec/libfips-lib-ec2_oct.o" => [
- "crypto/ec/ec2_oct.c"
- ],
- "crypto/ec/libfips-lib-ec2_smpl.o" => [
- "crypto/ec/ec2_smpl.c"
- ],
- "crypto/ec/libfips-lib-ec_asn1.o" => [
- "crypto/ec/ec_asn1.c"
- ],
- "crypto/ec/libfips-lib-ec_backend.o" => [
- "crypto/ec/ec_backend.c"
- ],
- "crypto/ec/libfips-lib-ec_check.o" => [
- "crypto/ec/ec_check.c"
- ],
- "crypto/ec/libfips-lib-ec_curve.o" => [
- "crypto/ec/ec_curve.c"
- ],
- "crypto/ec/libfips-lib-ec_cvt.o" => [
- "crypto/ec/ec_cvt.c"
- ],
- "crypto/ec/libfips-lib-ec_key.o" => [
- "crypto/ec/ec_key.c"
- ],
- "crypto/ec/libfips-lib-ec_kmeth.o" => [
- "crypto/ec/ec_kmeth.c"
- ],
- "crypto/ec/libfips-lib-ec_lib.o" => [
- "crypto/ec/ec_lib.c"
- ],
- "crypto/ec/libfips-lib-ec_mult.o" => [
- "crypto/ec/ec_mult.c"
- ],
- "crypto/ec/libfips-lib-ec_oct.o" => [
- "crypto/ec/ec_oct.c"
- ],
- "crypto/ec/libfips-lib-ecdh_kdf.o" => [
- "crypto/ec/ecdh_kdf.c"
- ],
- "crypto/ec/libfips-lib-ecdh_ossl.o" => [
- "crypto/ec/ecdh_ossl.c"
- ],
- "crypto/ec/libfips-lib-ecdsa_ossl.o" => [
- "crypto/ec/ecdsa_ossl.c"
- ],
- "crypto/ec/libfips-lib-ecdsa_sign.o" => [
- "crypto/ec/ecdsa_sign.c"
- ],
- "crypto/ec/libfips-lib-ecdsa_vrf.o" => [
- "crypto/ec/ecdsa_vrf.c"
- ],
- "crypto/ec/libfips-lib-ecp_mont.o" => [
- "crypto/ec/ecp_mont.c"
- ],
- "crypto/ec/libfips-lib-ecp_nist.o" => [
- "crypto/ec/ecp_nist.c"
- ],
- "crypto/ec/libfips-lib-ecp_nistp224.o" => [
- "crypto/ec/ecp_nistp224.c"
- ],
- "crypto/ec/libfips-lib-ecp_nistp256.o" => [
- "crypto/ec/ecp_nistp256.c"
- ],
- "crypto/ec/libfips-lib-ecp_nistp384.o" => [
- "crypto/ec/ecp_nistp384.c"
- ],
- "crypto/ec/libfips-lib-ecp_nistp521.o" => [
- "crypto/ec/ecp_nistp521.c"
- ],
- "crypto/ec/libfips-lib-ecp_nistputil.o" => [
- "crypto/ec/ecp_nistputil.c"
- ],
- "crypto/ec/libfips-lib-ecp_nistz256-x86_64.o" => [
- "crypto/ec/ecp_nistz256-x86_64.s"
- ],
- "crypto/ec/libfips-lib-ecp_nistz256.o" => [
- "crypto/ec/ecp_nistz256.c"
- ],
- "crypto/ec/libfips-lib-ecp_oct.o" => [
- "crypto/ec/ecp_oct.c"
- ],
- "crypto/ec/libfips-lib-ecp_smpl.o" => [
- "crypto/ec/ecp_smpl.c"
- ],
- "crypto/ec/libfips-lib-ecx_backend.o" => [
- "crypto/ec/ecx_backend.c"
- ],
- "crypto/ec/libfips-lib-ecx_key.o" => [
- "crypto/ec/ecx_key.c"
- ],
- "crypto/ec/libfips-lib-x25519-x86_64.o" => [
- "crypto/ec/x25519-x86_64.s"
- ],
"crypto/encode_decode/libcrypto-lib-decoder_err.o" => [
"crypto/encode_decode/decoder_err.c"
],
@@ -29212,78 +28475,6 @@ our %unified_info = (
"crypto/evp/libcrypto-shlib-skeymgmt_meth.o" => [
"crypto/evp/skeymgmt_meth.c"
],
- "crypto/evp/libfips-lib-asymcipher.o" => [
- "crypto/evp/asymcipher.c"
- ],
- "crypto/evp/libfips-lib-dh_support.o" => [
- "crypto/evp/dh_support.c"
- ],
- "crypto/evp/libfips-lib-digest.o" => [
- "crypto/evp/digest.c"
- ],
- "crypto/evp/libfips-lib-ec_support.o" => [
- "crypto/evp/ec_support.c"
- ],
- "crypto/evp/libfips-lib-evp_enc.o" => [
- "crypto/evp/evp_enc.c"
- ],
- "crypto/evp/libfips-lib-evp_fetch.o" => [
- "crypto/evp/evp_fetch.c"
- ],
- "crypto/evp/libfips-lib-evp_lib.o" => [
- "crypto/evp/evp_lib.c"
- ],
- "crypto/evp/libfips-lib-evp_rand.o" => [
- "crypto/evp/evp_rand.c"
- ],
- "crypto/evp/libfips-lib-evp_utils.o" => [
- "crypto/evp/evp_utils.c"
- ],
- "crypto/evp/libfips-lib-exchange.o" => [
- "crypto/evp/exchange.c"
- ],
- "crypto/evp/libfips-lib-kdf_lib.o" => [
- "crypto/evp/kdf_lib.c"
- ],
- "crypto/evp/libfips-lib-kdf_meth.o" => [
- "crypto/evp/kdf_meth.c"
- ],
- "crypto/evp/libfips-lib-kem.o" => [
- "crypto/evp/kem.c"
- ],
- "crypto/evp/libfips-lib-keymgmt_lib.o" => [
- "crypto/evp/keymgmt_lib.c"
- ],
- "crypto/evp/libfips-lib-keymgmt_meth.o" => [
- "crypto/evp/keymgmt_meth.c"
- ],
- "crypto/evp/libfips-lib-mac_lib.o" => [
- "crypto/evp/mac_lib.c"
- ],
- "crypto/evp/libfips-lib-mac_meth.o" => [
- "crypto/evp/mac_meth.c"
- ],
- "crypto/evp/libfips-lib-p_lib.o" => [
- "crypto/evp/p_lib.c"
- ],
- "crypto/evp/libfips-lib-pmeth_check.o" => [
- "crypto/evp/pmeth_check.c"
- ],
- "crypto/evp/libfips-lib-pmeth_gn.o" => [
- "crypto/evp/pmeth_gn.c"
- ],
- "crypto/evp/libfips-lib-pmeth_lib.o" => [
- "crypto/evp/pmeth_lib.c"
- ],
- "crypto/evp/libfips-lib-s_lib.o" => [
- "crypto/evp/s_lib.c"
- ],
- "crypto/evp/libfips-lib-signature.o" => [
- "crypto/evp/signature.c"
- ],
- "crypto/evp/libfips-lib-skeymgmt_meth.o" => [
- "crypto/evp/skeymgmt_meth.c"
- ],
"crypto/ffc/libcrypto-lib-ffc_backend.o" => [
"crypto/ffc/ffc_backend.c"
],
@@ -29326,27 +28517,6 @@ our %unified_info = (
"crypto/ffc/libcrypto-shlib-ffc_params_validate.o" => [
"crypto/ffc/ffc_params_validate.c"
],
- "crypto/ffc/libfips-lib-ffc_backend.o" => [
- "crypto/ffc/ffc_backend.c"
- ],
- "crypto/ffc/libfips-lib-ffc_dh.o" => [
- "crypto/ffc/ffc_dh.c"
- ],
- "crypto/ffc/libfips-lib-ffc_key_generate.o" => [
- "crypto/ffc/ffc_key_generate.c"
- ],
- "crypto/ffc/libfips-lib-ffc_key_validate.o" => [
- "crypto/ffc/ffc_key_validate.c"
- ],
- "crypto/ffc/libfips-lib-ffc_params.o" => [
- "crypto/ffc/ffc_params.c"
- ],
- "crypto/ffc/libfips-lib-ffc_params_generate.o" => [
- "crypto/ffc/ffc_params_generate.c"
- ],
- "crypto/ffc/libfips-lib-ffc_params_validate.o" => [
- "crypto/ffc/ffc_params_validate.c"
- ],
"crypto/hashtable/libcrypto-lib-hashfunc.o" => [
"crypto/hashtable/hashfunc.c"
],
@@ -29359,12 +28529,6 @@ our %unified_info = (
"crypto/hashtable/libcrypto-shlib-hashtable.o" => [
"crypto/hashtable/hashtable.c"
],
- "crypto/hashtable/libfips-lib-hashfunc.o" => [
- "crypto/hashtable/hashfunc.c"
- ],
- "crypto/hashtable/libfips-lib-hashtable.o" => [
- "crypto/hashtable/hashtable.c"
- ],
"crypto/hashtable/libssl-shlib-hashfunc.o" => [
"crypto/hashtable/hashfunc.c"
],
@@ -29374,9 +28538,6 @@ our %unified_info = (
"crypto/hmac/libcrypto-shlib-hmac.o" => [
"crypto/hmac/hmac.c"
],
- "crypto/hmac/libfips-lib-hmac.o" => [
- "crypto/hmac/hmac.c"
- ],
"crypto/hpke/libcrypto-lib-hpke.o" => [
"crypto/hpke/hpke.c"
],
@@ -29413,6 +28574,15 @@ our %unified_info = (
"crypto/kdf/libcrypto-shlib-kdf_err.o" => [
"crypto/kdf/kdf_err.c"
],
+ "crypto/legacy-dso-cpuid.o" => [
+ "crypto/cpuid.c"
+ ],
+ "crypto/legacy-dso-ctype.o" => [
+ "crypto/ctype.c"
+ ],
+ "crypto/legacy-dso-x86_64cpuid.o" => [
+ "crypto/x86_64cpuid.s"
+ ],
"crypto/lhash/libcrypto-lib-lh_stats.o" => [
"crypto/lhash/lh_stats.c"
],
@@ -29425,9 +28595,6 @@ our %unified_info = (
"crypto/lhash/libcrypto-shlib-lhash.o" => [
"crypto/lhash/lhash.c"
],
- "crypto/lhash/libfips-lib-lhash.o" => [
- "crypto/lhash/lhash.c"
- ],
"crypto/libcrypto-lib-asn1_dsa.o" => [
"crypto/asn1_dsa.c"
],
@@ -29764,96 +28931,6 @@ our %unified_info = (
"crypto/libcrypto-shlib-x86_64cpuid.o" => [
"crypto/x86_64cpuid.s"
],
- "crypto/libfips-lib-asn1_dsa.o" => [
- "crypto/asn1_dsa.c"
- ],
- "crypto/libfips-lib-bsearch.o" => [
- "crypto/bsearch.c"
- ],
- "crypto/libfips-lib-context.o" => [
- "crypto/context.c"
- ],
- "crypto/libfips-lib-core_algorithm.o" => [
- "crypto/core_algorithm.c"
- ],
- "crypto/libfips-lib-core_fetch.o" => [
- "crypto/core_fetch.c"
- ],
- "crypto/libfips-lib-core_namemap.o" => [
- "crypto/core_namemap.c"
- ],
- "crypto/libfips-lib-cpuid.o" => [
- "crypto/cpuid.c"
- ],
- "crypto/libfips-lib-cryptlib.o" => [
- "crypto/cryptlib.c"
- ],
- "crypto/libfips-lib-ctype.o" => [
- "crypto/ctype.c"
- ],
- "crypto/libfips-lib-der_writer.o" => [
- "crypto/der_writer.c"
- ],
- "crypto/libfips-lib-ex_data.o" => [
- "crypto/ex_data.c"
- ],
- "crypto/libfips-lib-initthread.o" => [
- "crypto/initthread.c"
- ],
- "crypto/libfips-lib-o_str.o" => [
- "crypto/o_str.c"
- ],
- "crypto/libfips-lib-packet.o" => [
- "crypto/packet.c"
- ],
- "crypto/libfips-lib-param_build.o" => [
- "crypto/param_build.c"
- ],
- "crypto/libfips-lib-param_build_set.o" => [
- "crypto/param_build_set.c"
- ],
- "crypto/libfips-lib-params.o" => [
- "crypto/params.c"
- ],
- "crypto/libfips-lib-params_dup.o" => [
- "crypto/params_dup.c"
- ],
- "crypto/libfips-lib-params_from_text.o" => [
- "crypto/params_from_text.c"
- ],
- "crypto/libfips-lib-params_idx.o" => [
- "crypto/params_idx.c"
- ],
- "crypto/libfips-lib-provider_core.o" => [
- "crypto/provider_core.c"
- ],
- "crypto/libfips-lib-provider_predefined.o" => [
- "crypto/provider_predefined.c"
- ],
- "crypto/libfips-lib-self_test_core.o" => [
- "crypto/self_test_core.c"
- ],
- "crypto/libfips-lib-sparse_array.o" => [
- "crypto/sparse_array.c"
- ],
- "crypto/libfips-lib-threads_lib.o" => [
- "crypto/threads_lib.c"
- ],
- "crypto/libfips-lib-threads_none.o" => [
- "crypto/threads_none.c"
- ],
- "crypto/libfips-lib-threads_pthread.o" => [
- "crypto/threads_pthread.c"
- ],
- "crypto/libfips-lib-threads_win.o" => [
- "crypto/threads_win.c"
- ],
- "crypto/libfips-lib-time.o" => [
- "crypto/time.c"
- ],
- "crypto/libfips-lib-x86_64cpuid.o" => [
- "crypto/x86_64cpuid.s"
- ],
"crypto/libssl-shlib-ctype.o" => [
"crypto/ctype.c"
],
@@ -29905,6 +28982,18 @@ our %unified_info = (
"crypto/md5/libcrypto-shlib-md5_sha1.o" => [
"crypto/md5/md5_sha1.c"
],
+ "crypto/md5/liblegacy-lib-md5-x86_64.o" => [
+ "crypto/md5/md5-x86_64.s"
+ ],
+ "crypto/md5/liblegacy-lib-md5_dgst.o" => [
+ "crypto/md5/md5_dgst.c"
+ ],
+ "crypto/md5/liblegacy-lib-md5_one.o" => [
+ "crypto/md5/md5_one.c"
+ ],
+ "crypto/md5/liblegacy-lib-md5_sha1.o" => [
+ "crypto/md5/md5_sha1.c"
+ ],
"crypto/ml_dsa/libcrypto-lib-ml_dsa_encoders.o" => [
"crypto/ml_dsa/ml_dsa_encoders.c"
],
@@ -29953,39 +29042,12 @@ our %unified_info = (
"crypto/ml_dsa/libcrypto-shlib-ml_dsa_sign.o" => [
"crypto/ml_dsa/ml_dsa_sign.c"
],
- "crypto/ml_dsa/libfips-lib-ml_dsa_encoders.o" => [
- "crypto/ml_dsa/ml_dsa_encoders.c"
- ],
- "crypto/ml_dsa/libfips-lib-ml_dsa_key.o" => [
- "crypto/ml_dsa/ml_dsa_key.c"
- ],
- "crypto/ml_dsa/libfips-lib-ml_dsa_key_compress.o" => [
- "crypto/ml_dsa/ml_dsa_key_compress.c"
- ],
- "crypto/ml_dsa/libfips-lib-ml_dsa_matrix.o" => [
- "crypto/ml_dsa/ml_dsa_matrix.c"
- ],
- "crypto/ml_dsa/libfips-lib-ml_dsa_ntt.o" => [
- "crypto/ml_dsa/ml_dsa_ntt.c"
- ],
- "crypto/ml_dsa/libfips-lib-ml_dsa_params.o" => [
- "crypto/ml_dsa/ml_dsa_params.c"
- ],
- "crypto/ml_dsa/libfips-lib-ml_dsa_sample.o" => [
- "crypto/ml_dsa/ml_dsa_sample.c"
- ],
- "crypto/ml_dsa/libfips-lib-ml_dsa_sign.o" => [
- "crypto/ml_dsa/ml_dsa_sign.c"
- ],
"crypto/ml_kem/libcrypto-lib-ml_kem.o" => [
"crypto/ml_kem/ml_kem.c"
],
"crypto/ml_kem/libcrypto-shlib-ml_kem.o" => [
"crypto/ml_kem/ml_kem.c"
],
- "crypto/ml_kem/libfips-lib-ml_kem.o" => [
- "crypto/ml_kem/ml_kem.c"
- ],
"crypto/modes/libcrypto-lib-aes-gcm-avx512.o" => [
"crypto/modes/aes-gcm-avx512.s"
],
@@ -30076,42 +29138,6 @@ our %unified_info = (
"crypto/modes/libcrypto-shlib-xts128gb.o" => [
"crypto/modes/xts128gb.c"
],
- "crypto/modes/libfips-lib-aes-gcm-avx512.o" => [
- "crypto/modes/aes-gcm-avx512.s"
- ],
- "crypto/modes/libfips-lib-aesni-gcm-x86_64.o" => [
- "crypto/modes/aesni-gcm-x86_64.s"
- ],
- "crypto/modes/libfips-lib-cbc128.o" => [
- "crypto/modes/cbc128.c"
- ],
- "crypto/modes/libfips-lib-ccm128.o" => [
- "crypto/modes/ccm128.c"
- ],
- "crypto/modes/libfips-lib-cfb128.o" => [
- "crypto/modes/cfb128.c"
- ],
- "crypto/modes/libfips-lib-ctr128.o" => [
- "crypto/modes/ctr128.c"
- ],
- "crypto/modes/libfips-lib-gcm128.o" => [
- "crypto/modes/gcm128.c"
- ],
- "crypto/modes/libfips-lib-ghash-x86_64.o" => [
- "crypto/modes/ghash-x86_64.s"
- ],
- "crypto/modes/libfips-lib-ofb128.o" => [
- "crypto/modes/ofb128.c"
- ],
- "crypto/modes/libfips-lib-wrap128.o" => [
- "crypto/modes/wrap128.c"
- ],
- "crypto/modes/libfips-lib-xts128.o" => [
- "crypto/modes/xts128.c"
- ],
- "crypto/modes/libfips-lib-xts128gb.o" => [
- "crypto/modes/xts128gb.c"
- ],
"crypto/objects/libcrypto-lib-o_names.o" => [
"crypto/objects/o_names.c"
],
@@ -30466,21 +29492,6 @@ our %unified_info = (
"crypto/property/libcrypto-shlib-property_string.o" => [
"crypto/property/property_string.c"
],
- "crypto/property/libfips-lib-defn_cache.o" => [
- "crypto/property/defn_cache.c"
- ],
- "crypto/property/libfips-lib-property.o" => [
- "crypto/property/property.c"
- ],
- "crypto/property/libfips-lib-property_parse.o" => [
- "crypto/property/property_parse.c"
- ],
- "crypto/property/libfips-lib-property_query.o" => [
- "crypto/property/property_query.c"
- ],
- "crypto/property/libfips-lib-property_string.o" => [
- "crypto/property/property_string.c"
- ],
"crypto/rand/libcrypto-lib-prov_seed.o" => [
"crypto/rand/prov_seed.c"
],
@@ -30529,9 +29540,6 @@ our %unified_info = (
"crypto/rand/libcrypto-shlib-randfile.o" => [
"crypto/rand/randfile.c"
],
- "crypto/rand/libfips-lib-rand_lib.o" => [
- "crypto/rand/rand_lib.c"
- ],
"crypto/rc2/libcrypto-lib-rc2_cbc.o" => [
"crypto/rc2/rc2_cbc.c"
],
@@ -30574,6 +29582,12 @@ our %unified_info = (
"crypto/rc4/libcrypto-shlib-rc4-x86_64.o" => [
"crypto/rc4/rc4-x86_64.s"
],
+ "crypto/rc4/liblegacy-lib-rc4-md5-x86_64.o" => [
+ "crypto/rc4/rc4-md5-x86_64.s"
+ ],
+ "crypto/rc4/liblegacy-lib-rc4-x86_64.o" => [
+ "crypto/rc4/rc4-x86_64.s"
+ ],
"crypto/ripemd/libcrypto-lib-rmd_dgst.o" => [
"crypto/ripemd/rmd_dgst.c"
],
@@ -30742,57 +29756,6 @@ our %unified_info = (
"crypto/rsa/libcrypto-shlib-rsa_x931g.o" => [
"crypto/rsa/rsa_x931g.c"
],
- "crypto/rsa/libfips-lib-rsa_acvp_test_params.o" => [
- "crypto/rsa/rsa_acvp_test_params.c"
- ],
- "crypto/rsa/libfips-lib-rsa_backend.o" => [
- "crypto/rsa/rsa_backend.c"
- ],
- "crypto/rsa/libfips-lib-rsa_chk.o" => [
- "crypto/rsa/rsa_chk.c"
- ],
- "crypto/rsa/libfips-lib-rsa_crpt.o" => [
- "crypto/rsa/rsa_crpt.c"
- ],
- "crypto/rsa/libfips-lib-rsa_gen.o" => [
- "crypto/rsa/rsa_gen.c"
- ],
- "crypto/rsa/libfips-lib-rsa_lib.o" => [
- "crypto/rsa/rsa_lib.c"
- ],
- "crypto/rsa/libfips-lib-rsa_mp_names.o" => [
- "crypto/rsa/rsa_mp_names.c"
- ],
- "crypto/rsa/libfips-lib-rsa_none.o" => [
- "crypto/rsa/rsa_none.c"
- ],
- "crypto/rsa/libfips-lib-rsa_oaep.o" => [
- "crypto/rsa/rsa_oaep.c"
- ],
- "crypto/rsa/libfips-lib-rsa_ossl.o" => [
- "crypto/rsa/rsa_ossl.c"
- ],
- "crypto/rsa/libfips-lib-rsa_pk1.o" => [
- "crypto/rsa/rsa_pk1.c"
- ],
- "crypto/rsa/libfips-lib-rsa_pss.o" => [
- "crypto/rsa/rsa_pss.c"
- ],
- "crypto/rsa/libfips-lib-rsa_schemes.o" => [
- "crypto/rsa/rsa_schemes.c"
- ],
- "crypto/rsa/libfips-lib-rsa_sign.o" => [
- "crypto/rsa/rsa_sign.c"
- ],
- "crypto/rsa/libfips-lib-rsa_sp800_56b_check.o" => [
- "crypto/rsa/rsa_sp800_56b_check.c"
- ],
- "crypto/rsa/libfips-lib-rsa_sp800_56b_gen.o" => [
- "crypto/rsa/rsa_sp800_56b_gen.c"
- ],
- "crypto/rsa/libfips-lib-rsa_x931.o" => [
- "crypto/rsa/rsa_x931.c"
- ],
"crypto/seed/libcrypto-lib-seed.o" => [
"crypto/seed/seed.c"
],
@@ -30889,36 +29852,6 @@ our %unified_info = (
"crypto/sha/libcrypto-shlib-sha512.o" => [
"crypto/sha/sha512.c"
],
- "crypto/sha/libfips-lib-keccak1600-x86_64.o" => [
- "crypto/sha/keccak1600-x86_64.s"
- ],
- "crypto/sha/libfips-lib-sha1-mb-x86_64.o" => [
- "crypto/sha/sha1-mb-x86_64.s"
- ],
- "crypto/sha/libfips-lib-sha1-x86_64.o" => [
- "crypto/sha/sha1-x86_64.s"
- ],
- "crypto/sha/libfips-lib-sha1dgst.o" => [
- "crypto/sha/sha1dgst.c"
- ],
- "crypto/sha/libfips-lib-sha256-mb-x86_64.o" => [
- "crypto/sha/sha256-mb-x86_64.s"
- ],
- "crypto/sha/libfips-lib-sha256-x86_64.o" => [
- "crypto/sha/sha256-x86_64.s"
- ],
- "crypto/sha/libfips-lib-sha256.o" => [
- "crypto/sha/sha256.c"
- ],
- "crypto/sha/libfips-lib-sha3.o" => [
- "crypto/sha/sha3.c"
- ],
- "crypto/sha/libfips-lib-sha512-x86_64.o" => [
- "crypto/sha/sha512-x86_64.s"
- ],
- "crypto/sha/libfips-lib-sha512.o" => [
- "crypto/sha/sha512.c"
- ],
"crypto/siphash/libcrypto-lib-siphash.o" => [
"crypto/siphash/siphash.c"
],
@@ -30988,36 +29921,6 @@ our %unified_info = (
"crypto/slh_dsa/libcrypto-shlib-slh_xmss.o" => [
"crypto/slh_dsa/slh_xmss.c"
],
- "crypto/slh_dsa/libfips-lib-slh_adrs.o" => [
- "crypto/slh_dsa/slh_adrs.c"
- ],
- "crypto/slh_dsa/libfips-lib-slh_dsa.o" => [
- "crypto/slh_dsa/slh_dsa.c"
- ],
- "crypto/slh_dsa/libfips-lib-slh_dsa_hash_ctx.o" => [
- "crypto/slh_dsa/slh_dsa_hash_ctx.c"
- ],
- "crypto/slh_dsa/libfips-lib-slh_dsa_key.o" => [
- "crypto/slh_dsa/slh_dsa_key.c"
- ],
- "crypto/slh_dsa/libfips-lib-slh_fors.o" => [
- "crypto/slh_dsa/slh_fors.c"
- ],
- "crypto/slh_dsa/libfips-lib-slh_hash.o" => [
- "crypto/slh_dsa/slh_hash.c"
- ],
- "crypto/slh_dsa/libfips-lib-slh_hypertree.o" => [
- "crypto/slh_dsa/slh_hypertree.c"
- ],
- "crypto/slh_dsa/libfips-lib-slh_params.o" => [
- "crypto/slh_dsa/slh_params.c"
- ],
- "crypto/slh_dsa/libfips-lib-slh_wots.o" => [
- "crypto/slh_dsa/slh_wots.c"
- ],
- "crypto/slh_dsa/libfips-lib-slh_xmss.o" => [
- "crypto/slh_dsa/slh_xmss.c"
- ],
"crypto/srp/libcrypto-lib-srp_lib.o" => [
"crypto/srp/srp_lib.c"
],
@@ -31036,9 +29939,6 @@ our %unified_info = (
"crypto/stack/libcrypto-shlib-stack.o" => [
"crypto/stack/stack.c"
],
- "crypto/stack/libfips-lib-stack.o" => [
- "crypto/stack/stack.c"
- ],
"crypto/store/libcrypto-lib-store_err.o" => [
"crypto/store/store_err.c"
],
@@ -31099,15 +29999,6 @@ our %unified_info = (
"crypto/thread/arch/libcrypto-shlib-thread_win.o" => [
"crypto/thread/arch/thread_win.c"
],
- "crypto/thread/arch/libfips-lib-thread_none.o" => [
- "crypto/thread/arch/thread_none.c"
- ],
- "crypto/thread/arch/libfips-lib-thread_posix.o" => [
- "crypto/thread/arch/thread_posix.c"
- ],
- "crypto/thread/arch/libfips-lib-thread_win.o" => [
- "crypto/thread/arch/thread_win.c"
- ],
"crypto/thread/arch/libssl-shlib-thread_none.o" => [
"crypto/thread/arch/thread_none.c"
],
@@ -31135,15 +30026,6 @@ our %unified_info = (
"crypto/thread/libcrypto-shlib-internal.o" => [
"crypto/thread/internal.c"
],
- "crypto/thread/libfips-lib-api.o" => [
- "crypto/thread/api.c"
- ],
- "crypto/thread/libfips-lib-arch.o" => [
- "crypto/thread/arch.c"
- ],
- "crypto/thread/libfips-lib-internal.o" => [
- "crypto/thread/internal.c"
- ],
"crypto/thread/libssl-shlib-arch.o" => [
"crypto/thread/arch.c"
],
@@ -33177,6 +32059,7 @@ our %unified_info = (
"ssl/record/libssl-lib-rec_layer_d1.o",
"ssl/record/libssl-lib-rec_layer_s3.o",
"ssl/record/methods/libssl-lib-dtls_meth.o",
+ "ssl/record/methods/libssl-lib-ktls_meth.o",
"ssl/record/methods/libssl-lib-ssl3_meth.o",
"ssl/record/methods/libssl-lib-tls13_meth.o",
"ssl/record/methods/libssl-lib-tls1_meth.o",
@@ -33247,9 +32130,6 @@ our %unified_info = (
"providers/common/der/libdefault-lib-der_rsa_sig.o" => [
"providers/common/der/der_rsa_sig.c"
],
- "providers/common/der/libfips-lib-der_rsa_sig.o" => [
- "providers/common/der/der_rsa_sig.c"
- ],
"providers/common/libcommon-lib-provider_ctx.o" => [
"providers/common/provider_ctx.c"
],
@@ -33277,52 +32157,18 @@ our %unified_info = (
"providers/common/libdefault-lib-securitycheck_default.o" => [
"providers/common/securitycheck_default.c"
],
- "providers/common/libfips-lib-bio_prov.o" => [
- "providers/common/bio_prov.c"
- ],
- "providers/common/libfips-lib-capabilities.o" => [
- "providers/common/capabilities.c"
- ],
- "providers/common/libfips-lib-digest_to_nid.o" => [
- "providers/common/digest_to_nid.c"
- ],
- "providers/common/libfips-lib-provider_seeding.o" => [
- "providers/common/provider_seeding.c"
- ],
- "providers/common/libfips-lib-provider_util.o" => [
+ "providers/common/liblegacy-lib-provider_util.o" => [
"providers/common/provider_util.c"
],
- "providers/common/libfips-lib-securitycheck.o" => [
- "providers/common/securitycheck.c"
- ],
- "providers/common/libfips-lib-securitycheck_fips.o" => [
- "providers/common/securitycheck_fips.c"
- ],
- "providers/fips" => [
- "providers/fips.ld",
- "providers/fips/fips-dso-fips_entry.o"
- ],
- "providers/fips/fips-dso-fips_entry.o" => [
- "providers/fips/fips_entry.c"
+ "providers/endecode_test-bin-legacyprov.o" => [
+ "providers/legacyprov.c"
],
- "providers/fips/libfips-lib-fipsindicator.o" => [
- "providers/fips/fipsindicator.c"
- ],
- "providers/fips/libfips-lib-fipsprov.o" => [
- "providers/fips/fipsprov.c"
- ],
- "providers/fips/libfips-lib-self_test.o" => [
- "providers/fips/self_test.c"
- ],
- "providers/fips/libfips-lib-self_test_kats.o" => [
- "providers/fips/self_test_kats.c"
+ "providers/evp_extra_test-bin-legacyprov.o" => [
+ "providers/legacyprov.c"
],
"providers/implementations/asymciphers/libdefault-lib-rsa_enc.o" => [
"providers/implementations/asymciphers/rsa_enc.c"
],
- "providers/implementations/asymciphers/libfips-lib-rsa_enc.o" => [
- "providers/implementations/asymciphers/rsa_enc.c"
- ],
"providers/implementations/ciphers/libcommon-lib-ciphercommon.o" => [
"providers/implementations/ciphers/ciphercommon.c"
],
@@ -33449,63 +32295,57 @@ our %unified_info = (
"providers/implementations/ciphers/libdefault-lib-cipher_tdes_wrap_hw.o" => [
"providers/implementations/ciphers/cipher_tdes_wrap_hw.c"
],
- "providers/implementations/ciphers/libfips-lib-cipher_aes.o" => [
- "providers/implementations/ciphers/cipher_aes.c"
- ],
- "providers/implementations/ciphers/libfips-lib-cipher_aes_cbc_hmac_sha.o" => [
- "providers/implementations/ciphers/cipher_aes_cbc_hmac_sha.c"
+ "providers/implementations/ciphers/liblegacy-lib-cipher_blowfish.o" => [
+ "providers/implementations/ciphers/cipher_blowfish.c"
],
- "providers/implementations/ciphers/libfips-lib-cipher_aes_cbc_hmac_sha1_hw.o" => [
- "providers/implementations/ciphers/cipher_aes_cbc_hmac_sha1_hw.c"
+ "providers/implementations/ciphers/liblegacy-lib-cipher_blowfish_hw.o" => [
+ "providers/implementations/ciphers/cipher_blowfish_hw.c"
],
- "providers/implementations/ciphers/libfips-lib-cipher_aes_cbc_hmac_sha256_hw.o" => [
- "providers/implementations/ciphers/cipher_aes_cbc_hmac_sha256_hw.c"
+ "providers/implementations/ciphers/liblegacy-lib-cipher_cast5.o" => [
+ "providers/implementations/ciphers/cipher_cast5.c"
],
- "providers/implementations/ciphers/libfips-lib-cipher_aes_ccm.o" => [
- "providers/implementations/ciphers/cipher_aes_ccm.c"
+ "providers/implementations/ciphers/liblegacy-lib-cipher_cast5_hw.o" => [
+ "providers/implementations/ciphers/cipher_cast5_hw.c"
],
- "providers/implementations/ciphers/libfips-lib-cipher_aes_ccm_hw.o" => [
- "providers/implementations/ciphers/cipher_aes_ccm_hw.c"
+ "providers/implementations/ciphers/liblegacy-lib-cipher_des.o" => [
+ "providers/implementations/ciphers/cipher_des.c"
],
- "providers/implementations/ciphers/libfips-lib-cipher_aes_gcm.o" => [
- "providers/implementations/ciphers/cipher_aes_gcm.c"
+ "providers/implementations/ciphers/liblegacy-lib-cipher_des_hw.o" => [
+ "providers/implementations/ciphers/cipher_des_hw.c"
],
- "providers/implementations/ciphers/libfips-lib-cipher_aes_gcm_hw.o" => [
- "providers/implementations/ciphers/cipher_aes_gcm_hw.c"
+ "providers/implementations/ciphers/liblegacy-lib-cipher_desx.o" => [
+ "providers/implementations/ciphers/cipher_desx.c"
],
- "providers/implementations/ciphers/libfips-lib-cipher_aes_hw.o" => [
- "providers/implementations/ciphers/cipher_aes_hw.c"
+ "providers/implementations/ciphers/liblegacy-lib-cipher_desx_hw.o" => [
+ "providers/implementations/ciphers/cipher_desx_hw.c"
],
- "providers/implementations/ciphers/libfips-lib-cipher_aes_ocb.o" => [
- "providers/implementations/ciphers/cipher_aes_ocb.c"
+ "providers/implementations/ciphers/liblegacy-lib-cipher_rc2.o" => [
+ "providers/implementations/ciphers/cipher_rc2.c"
],
- "providers/implementations/ciphers/libfips-lib-cipher_aes_ocb_hw.o" => [
- "providers/implementations/ciphers/cipher_aes_ocb_hw.c"
+ "providers/implementations/ciphers/liblegacy-lib-cipher_rc2_hw.o" => [
+ "providers/implementations/ciphers/cipher_rc2_hw.c"
],
- "providers/implementations/ciphers/libfips-lib-cipher_aes_wrp.o" => [
- "providers/implementations/ciphers/cipher_aes_wrp.c"
+ "providers/implementations/ciphers/liblegacy-lib-cipher_rc4.o" => [
+ "providers/implementations/ciphers/cipher_rc4.c"
],
- "providers/implementations/ciphers/libfips-lib-cipher_aes_xts.o" => [
- "providers/implementations/ciphers/cipher_aes_xts.c"
+ "providers/implementations/ciphers/liblegacy-lib-cipher_rc4_hmac_md5.o" => [
+ "providers/implementations/ciphers/cipher_rc4_hmac_md5.c"
],
- "providers/implementations/ciphers/libfips-lib-cipher_aes_xts_fips.o" => [
- "providers/implementations/ciphers/cipher_aes_xts_fips.c"
+ "providers/implementations/ciphers/liblegacy-lib-cipher_rc4_hmac_md5_hw.o" => [
+ "providers/implementations/ciphers/cipher_rc4_hmac_md5_hw.c"
],
- "providers/implementations/ciphers/libfips-lib-cipher_aes_xts_hw.o" => [
- "providers/implementations/ciphers/cipher_aes_xts_hw.c"
+ "providers/implementations/ciphers/liblegacy-lib-cipher_rc4_hw.o" => [
+ "providers/implementations/ciphers/cipher_rc4_hw.c"
],
- "providers/implementations/ciphers/libfips-lib-cipher_cts.o" => [
- "providers/implementations/ciphers/cipher_cts.c"
+ "providers/implementations/ciphers/liblegacy-lib-cipher_seed.o" => [
+ "providers/implementations/ciphers/cipher_seed.c"
],
- "providers/implementations/ciphers/libfips-lib-cipher_tdes.o" => [
- "providers/implementations/ciphers/cipher_tdes.c"
+ "providers/implementations/ciphers/liblegacy-lib-cipher_seed_hw.o" => [
+ "providers/implementations/ciphers/cipher_seed_hw.c"
],
- "providers/implementations/ciphers/libfips-lib-cipher_tdes_common.o" => [
+ "providers/implementations/ciphers/liblegacy-lib-cipher_tdes_common.o" => [
"providers/implementations/ciphers/cipher_tdes_common.c"
],
- "providers/implementations/ciphers/libfips-lib-cipher_tdes_hw.o" => [
- "providers/implementations/ciphers/cipher_tdes_hw.c"
- ],
"providers/implementations/digests/libcommon-lib-digestcommon.o" => [
"providers/implementations/digests/digestcommon.c"
],
@@ -33536,11 +32376,14 @@ our %unified_info = (
"providers/implementations/digests/libdefault-lib-sha3_prov.o" => [
"providers/implementations/digests/sha3_prov.c"
],
- "providers/implementations/digests/libfips-lib-sha2_prov.o" => [
- "providers/implementations/digests/sha2_prov.c"
+ "providers/implementations/digests/liblegacy-lib-md4_prov.o" => [
+ "providers/implementations/digests/md4_prov.c"
],
- "providers/implementations/digests/libfips-lib-sha3_prov.o" => [
- "providers/implementations/digests/sha3_prov.c"
+ "providers/implementations/digests/liblegacy-lib-ripemd_prov.o" => [
+ "providers/implementations/digests/ripemd_prov.c"
+ ],
+ "providers/implementations/digests/liblegacy-lib-wp_prov.o" => [
+ "providers/implementations/digests/wp_prov.c"
],
"providers/implementations/encode_decode/libdefault-lib-decode_der2key.o" => [
"providers/implementations/encode_decode/decode_der2key.c"
@@ -33596,18 +32439,6 @@ our %unified_info = (
"providers/implementations/exchange/libdefault-lib-kdf_exch.o" => [
"providers/implementations/exchange/kdf_exch.c"
],
- "providers/implementations/exchange/libfips-lib-dh_exch.o" => [
- "providers/implementations/exchange/dh_exch.c"
- ],
- "providers/implementations/exchange/libfips-lib-ecdh_exch.o" => [
- "providers/implementations/exchange/ecdh_exch.c"
- ],
- "providers/implementations/exchange/libfips-lib-ecx_exch.o" => [
- "providers/implementations/exchange/ecx_exch.c"
- ],
- "providers/implementations/exchange/libfips-lib-kdf_exch.o" => [
- "providers/implementations/exchange/kdf_exch.c"
- ],
"providers/implementations/kdfs/libdefault-lib-argon2.o" => [
"providers/implementations/kdfs/argon2.c"
],
@@ -33647,29 +32478,11 @@ our %unified_info = (
"providers/implementations/kdfs/libdefault-lib-x942kdf.o" => [
"providers/implementations/kdfs/x942kdf.c"
],
- "providers/implementations/kdfs/libfips-lib-hkdf.o" => [
- "providers/implementations/kdfs/hkdf.c"
- ],
- "providers/implementations/kdfs/libfips-lib-kbkdf.o" => [
- "providers/implementations/kdfs/kbkdf.c"
- ],
- "providers/implementations/kdfs/libfips-lib-pbkdf2.o" => [
- "providers/implementations/kdfs/pbkdf2.c"
+ "providers/implementations/kdfs/liblegacy-lib-pbkdf1.o" => [
+ "providers/implementations/kdfs/pbkdf1.c"
],
- "providers/implementations/kdfs/libfips-lib-pbkdf2_fips.o" => [
- "providers/implementations/kdfs/pbkdf2_fips.c"
- ],
- "providers/implementations/kdfs/libfips-lib-sshkdf.o" => [
- "providers/implementations/kdfs/sshkdf.c"
- ],
- "providers/implementations/kdfs/libfips-lib-sskdf.o" => [
- "providers/implementations/kdfs/sskdf.c"
- ],
- "providers/implementations/kdfs/libfips-lib-tls1_prf.o" => [
- "providers/implementations/kdfs/tls1_prf.c"
- ],
- "providers/implementations/kdfs/libfips-lib-x942kdf.o" => [
- "providers/implementations/kdfs/x942kdf.c"
+ "providers/implementations/kdfs/liblegacy-lib-pvkkdf.o" => [
+ "providers/implementations/kdfs/pvkkdf.c"
],
"providers/implementations/kem/libdefault-lib-ec_kem.o" => [
"providers/implementations/kem/ec_kem.c"
@@ -33689,15 +32502,6 @@ our %unified_info = (
"providers/implementations/kem/libdefault-lib-rsa_kem.o" => [
"providers/implementations/kem/rsa_kem.c"
],
- "providers/implementations/kem/libfips-lib-ml_kem_kem.o" => [
- "providers/implementations/kem/ml_kem_kem.c"
- ],
- "providers/implementations/kem/libfips-lib-mlx_kem.o" => [
- "providers/implementations/kem/mlx_kem.c"
- ],
- "providers/implementations/kem/libfips-lib-rsa_kem.o" => [
- "providers/implementations/kem/rsa_kem.c"
- ],
"providers/implementations/kem/libtemplate-lib-template_kem.o" => [
"providers/implementations/kem/template_kem.c"
],
@@ -33734,39 +32538,6 @@ our %unified_info = (
"providers/implementations/keymgmt/libdefault-lib-slh_dsa_kmgmt.o" => [
"providers/implementations/keymgmt/slh_dsa_kmgmt.c"
],
- "providers/implementations/keymgmt/libfips-lib-dh_kmgmt.o" => [
- "providers/implementations/keymgmt/dh_kmgmt.c"
- ],
- "providers/implementations/keymgmt/libfips-lib-dsa_kmgmt.o" => [
- "providers/implementations/keymgmt/dsa_kmgmt.c"
- ],
- "providers/implementations/keymgmt/libfips-lib-ec_kmgmt.o" => [
- "providers/implementations/keymgmt/ec_kmgmt.c"
- ],
- "providers/implementations/keymgmt/libfips-lib-ecx_kmgmt.o" => [
- "providers/implementations/keymgmt/ecx_kmgmt.c"
- ],
- "providers/implementations/keymgmt/libfips-lib-kdf_legacy_kmgmt.o" => [
- "providers/implementations/keymgmt/kdf_legacy_kmgmt.c"
- ],
- "providers/implementations/keymgmt/libfips-lib-mac_legacy_kmgmt.o" => [
- "providers/implementations/keymgmt/mac_legacy_kmgmt.c"
- ],
- "providers/implementations/keymgmt/libfips-lib-ml_dsa_kmgmt.o" => [
- "providers/implementations/keymgmt/ml_dsa_kmgmt.c"
- ],
- "providers/implementations/keymgmt/libfips-lib-ml_kem_kmgmt.o" => [
- "providers/implementations/keymgmt/ml_kem_kmgmt.c"
- ],
- "providers/implementations/keymgmt/libfips-lib-mlx_kmgmt.o" => [
- "providers/implementations/keymgmt/mlx_kmgmt.c"
- ],
- "providers/implementations/keymgmt/libfips-lib-rsa_kmgmt.o" => [
- "providers/implementations/keymgmt/rsa_kmgmt.c"
- ],
- "providers/implementations/keymgmt/libfips-lib-slh_dsa_kmgmt.o" => [
- "providers/implementations/keymgmt/slh_dsa_kmgmt.c"
- ],
"providers/implementations/keymgmt/libtemplate-lib-template_kmgmt.o" => [
"providers/implementations/keymgmt/template_kmgmt.c"
],
@@ -33794,18 +32565,6 @@ our %unified_info = (
"providers/implementations/macs/libdefault-lib-siphash_prov.o" => [
"providers/implementations/macs/siphash_prov.c"
],
- "providers/implementations/macs/libfips-lib-cmac_prov.o" => [
- "providers/implementations/macs/cmac_prov.c"
- ],
- "providers/implementations/macs/libfips-lib-gmac_prov.o" => [
- "providers/implementations/macs/gmac_prov.c"
- ],
- "providers/implementations/macs/libfips-lib-hmac_prov.o" => [
- "providers/implementations/macs/hmac_prov.c"
- ],
- "providers/implementations/macs/libfips-lib-kmac_prov.o" => [
- "providers/implementations/macs/kmac_prov.c"
- ],
"providers/implementations/rands/libdefault-lib-drbg.o" => [
"providers/implementations/rands/drbg.c"
],
@@ -33827,24 +32586,6 @@ our %unified_info = (
"providers/implementations/rands/libdefault-lib-test_rng.o" => [
"providers/implementations/rands/test_rng.c"
],
- "providers/implementations/rands/libfips-lib-drbg.o" => [
- "providers/implementations/rands/drbg.c"
- ],
- "providers/implementations/rands/libfips-lib-drbg_ctr.o" => [
- "providers/implementations/rands/drbg_ctr.c"
- ],
- "providers/implementations/rands/libfips-lib-drbg_hash.o" => [
- "providers/implementations/rands/drbg_hash.c"
- ],
- "providers/implementations/rands/libfips-lib-drbg_hmac.o" => [
- "providers/implementations/rands/drbg_hmac.c"
- ],
- "providers/implementations/rands/libfips-lib-fips_crng_test.o" => [
- "providers/implementations/rands/fips_crng_test.c"
- ],
- "providers/implementations/rands/libfips-lib-test_rng.o" => [
- "providers/implementations/rands/test_rng.c"
- ],
"providers/implementations/rands/seeding/libdefault-lib-rand_cpu_x86.o" => [
"providers/implementations/rands/seeding/rand_cpu_x86.c"
],
@@ -33878,45 +32619,28 @@ our %unified_info = (
"providers/implementations/signature/libdefault-lib-slh_dsa_sig.o" => [
"providers/implementations/signature/slh_dsa_sig.c"
],
- "providers/implementations/signature/libfips-lib-dsa_sig.o" => [
- "providers/implementations/signature/dsa_sig.c"
- ],
- "providers/implementations/signature/libfips-lib-ecdsa_sig.o" => [
- "providers/implementations/signature/ecdsa_sig.c"
- ],
- "providers/implementations/signature/libfips-lib-eddsa_sig.o" => [
- "providers/implementations/signature/eddsa_sig.c"
- ],
- "providers/implementations/signature/libfips-lib-mac_legacy_sig.o" => [
- "providers/implementations/signature/mac_legacy_sig.c"
- ],
- "providers/implementations/signature/libfips-lib-ml_dsa_sig.o" => [
- "providers/implementations/signature/ml_dsa_sig.c"
- ],
- "providers/implementations/signature/libfips-lib-rsa_sig.o" => [
- "providers/implementations/signature/rsa_sig.c"
- ],
- "providers/implementations/signature/libfips-lib-slh_dsa_sig.o" => [
- "providers/implementations/signature/slh_dsa_sig.c"
- ],
"providers/implementations/skeymgmt/libdefault-lib-aes_skmgmt.o" => [
"providers/implementations/skeymgmt/aes_skmgmt.c"
],
"providers/implementations/skeymgmt/libdefault-lib-generic.o" => [
"providers/implementations/skeymgmt/generic.c"
],
- "providers/implementations/skeymgmt/libfips-lib-aes_skmgmt.o" => [
- "providers/implementations/skeymgmt/aes_skmgmt.c"
- ],
- "providers/implementations/skeymgmt/libfips-lib-generic.o" => [
- "providers/implementations/skeymgmt/generic.c"
- ],
"providers/implementations/storemgmt/libdefault-lib-file_store.o" => [
"providers/implementations/storemgmt/file_store.c"
],
"providers/implementations/storemgmt/libdefault-lib-file_store_any2obj.o" => [
"providers/implementations/storemgmt/file_store_any2obj.c"
],
+ "providers/legacy" => [
+ "crypto/legacy-dso-cpuid.o",
+ "crypto/legacy-dso-ctype.o",
+ "crypto/legacy-dso-x86_64cpuid.o",
+ "providers/legacy-dso-legacyprov.o",
+ "providers/legacy.ld"
+ ],
+ "providers/legacy-dso-legacyprov.o" => [
+ "providers/legacyprov.c"
+ ],
"providers/libcommon.a" => [
"providers/common/der/libcommon-lib-der_digests_gen.o",
"providers/common/der/libcommon-lib-der_dsa_gen.o",
@@ -34104,335 +32828,42 @@ our %unified_info = (
"providers/implementations/storemgmt/libdefault-lib-file_store_any2obj.o",
"ssl/record/methods/libdefault-lib-ssl3_cbc.o"
],
- "providers/libfips.a" => [
- "crypto/aes/libfips-lib-aes-x86_64.o",
- "crypto/aes/libfips-lib-aes_ecb.o",
- "crypto/aes/libfips-lib-aes_misc.o",
- "crypto/aes/libfips-lib-aesni-mb-x86_64.o",
- "crypto/aes/libfips-lib-aesni-sha1-x86_64.o",
- "crypto/aes/libfips-lib-aesni-sha256-x86_64.o",
- "crypto/aes/libfips-lib-aesni-x86_64.o",
- "crypto/aes/libfips-lib-aesni-xts-avx512.o",
- "crypto/aes/libfips-lib-bsaes-x86_64.o",
- "crypto/aes/libfips-lib-vpaes-x86_64.o",
- "crypto/bn/asm/libfips-lib-x86_64-gcc.o",
- "crypto/bn/libfips-lib-bn_add.o",
- "crypto/bn/libfips-lib-bn_blind.o",
- "crypto/bn/libfips-lib-bn_const.o",
- "crypto/bn/libfips-lib-bn_conv.o",
- "crypto/bn/libfips-lib-bn_ctx.o",
- "crypto/bn/libfips-lib-bn_dh.o",
- "crypto/bn/libfips-lib-bn_div.o",
- "crypto/bn/libfips-lib-bn_exp.o",
- "crypto/bn/libfips-lib-bn_exp2.o",
- "crypto/bn/libfips-lib-bn_gcd.o",
- "crypto/bn/libfips-lib-bn_gf2m.o",
- "crypto/bn/libfips-lib-bn_intern.o",
- "crypto/bn/libfips-lib-bn_kron.o",
- "crypto/bn/libfips-lib-bn_lib.o",
- "crypto/bn/libfips-lib-bn_mod.o",
- "crypto/bn/libfips-lib-bn_mont.o",
- "crypto/bn/libfips-lib-bn_mpi.o",
- "crypto/bn/libfips-lib-bn_mul.o",
- "crypto/bn/libfips-lib-bn_nist.o",
- "crypto/bn/libfips-lib-bn_prime.o",
- "crypto/bn/libfips-lib-bn_rand.o",
- "crypto/bn/libfips-lib-bn_recp.o",
- "crypto/bn/libfips-lib-bn_rsa_fips186_4.o",
- "crypto/bn/libfips-lib-bn_shift.o",
- "crypto/bn/libfips-lib-bn_sqr.o",
- "crypto/bn/libfips-lib-bn_sqrt.o",
- "crypto/bn/libfips-lib-bn_word.o",
- "crypto/bn/libfips-lib-rsaz-2k-avx512.o",
- "crypto/bn/libfips-lib-rsaz-2k-avxifma.o",
- "crypto/bn/libfips-lib-rsaz-3k-avx512.o",
- "crypto/bn/libfips-lib-rsaz-3k-avxifma.o",
- "crypto/bn/libfips-lib-rsaz-4k-avx512.o",
- "crypto/bn/libfips-lib-rsaz-4k-avxifma.o",
- "crypto/bn/libfips-lib-rsaz-avx2.o",
- "crypto/bn/libfips-lib-rsaz-x86_64.o",
- "crypto/bn/libfips-lib-rsaz_exp.o",
- "crypto/bn/libfips-lib-rsaz_exp_x2.o",
- "crypto/bn/libfips-lib-x86_64-gf2m.o",
- "crypto/bn/libfips-lib-x86_64-mont.o",
- "crypto/bn/libfips-lib-x86_64-mont5.o",
- "crypto/buffer/libfips-lib-buffer.o",
- "crypto/cmac/libfips-lib-cmac.o",
- "crypto/des/libfips-lib-des_enc.o",
- "crypto/des/libfips-lib-ecb3_enc.o",
- "crypto/des/libfips-lib-fcrypt_b.o",
- "crypto/des/libfips-lib-set_key.o",
- "crypto/dh/libfips-lib-dh_backend.o",
- "crypto/dh/libfips-lib-dh_check.o",
- "crypto/dh/libfips-lib-dh_gen.o",
- "crypto/dh/libfips-lib-dh_group_params.o",
- "crypto/dh/libfips-lib-dh_kdf.o",
- "crypto/dh/libfips-lib-dh_key.o",
- "crypto/dh/libfips-lib-dh_lib.o",
- "crypto/dsa/libfips-lib-dsa_backend.o",
- "crypto/dsa/libfips-lib-dsa_check.o",
- "crypto/dsa/libfips-lib-dsa_gen.o",
- "crypto/dsa/libfips-lib-dsa_key.o",
- "crypto/dsa/libfips-lib-dsa_lib.o",
- "crypto/dsa/libfips-lib-dsa_ossl.o",
- "crypto/dsa/libfips-lib-dsa_sign.o",
- "crypto/dsa/libfips-lib-dsa_vrf.o",
- "crypto/ec/curve448/arch_32/libfips-lib-f_impl32.o",
- "crypto/ec/curve448/arch_64/libfips-lib-f_impl64.o",
- "crypto/ec/curve448/libfips-lib-curve448.o",
- "crypto/ec/curve448/libfips-lib-curve448_tables.o",
- "crypto/ec/curve448/libfips-lib-eddsa.o",
- "crypto/ec/curve448/libfips-lib-f_generic.o",
- "crypto/ec/curve448/libfips-lib-scalar.o",
- "crypto/ec/libfips-lib-curve25519.o",
- "crypto/ec/libfips-lib-ec2_oct.o",
- "crypto/ec/libfips-lib-ec2_smpl.o",
- "crypto/ec/libfips-lib-ec_asn1.o",
- "crypto/ec/libfips-lib-ec_backend.o",
- "crypto/ec/libfips-lib-ec_check.o",
- "crypto/ec/libfips-lib-ec_curve.o",
- "crypto/ec/libfips-lib-ec_cvt.o",
- "crypto/ec/libfips-lib-ec_key.o",
- "crypto/ec/libfips-lib-ec_kmeth.o",
- "crypto/ec/libfips-lib-ec_lib.o",
- "crypto/ec/libfips-lib-ec_mult.o",
- "crypto/ec/libfips-lib-ec_oct.o",
- "crypto/ec/libfips-lib-ecdh_kdf.o",
- "crypto/ec/libfips-lib-ecdh_ossl.o",
- "crypto/ec/libfips-lib-ecdsa_ossl.o",
- "crypto/ec/libfips-lib-ecdsa_sign.o",
- "crypto/ec/libfips-lib-ecdsa_vrf.o",
- "crypto/ec/libfips-lib-ecp_mont.o",
- "crypto/ec/libfips-lib-ecp_nist.o",
- "crypto/ec/libfips-lib-ecp_nistp224.o",
- "crypto/ec/libfips-lib-ecp_nistp256.o",
- "crypto/ec/libfips-lib-ecp_nistp384.o",
- "crypto/ec/libfips-lib-ecp_nistp521.o",
- "crypto/ec/libfips-lib-ecp_nistputil.o",
- "crypto/ec/libfips-lib-ecp_nistz256-x86_64.o",
- "crypto/ec/libfips-lib-ecp_nistz256.o",
- "crypto/ec/libfips-lib-ecp_oct.o",
- "crypto/ec/libfips-lib-ecp_smpl.o",
- "crypto/ec/libfips-lib-ecx_backend.o",
- "crypto/ec/libfips-lib-ecx_key.o",
- "crypto/ec/libfips-lib-x25519-x86_64.o",
- "crypto/evp/libfips-lib-asymcipher.o",
- "crypto/evp/libfips-lib-dh_support.o",
- "crypto/evp/libfips-lib-digest.o",
- "crypto/evp/libfips-lib-ec_support.o",
- "crypto/evp/libfips-lib-evp_enc.o",
- "crypto/evp/libfips-lib-evp_fetch.o",
- "crypto/evp/libfips-lib-evp_lib.o",
- "crypto/evp/libfips-lib-evp_rand.o",
- "crypto/evp/libfips-lib-evp_utils.o",
- "crypto/evp/libfips-lib-exchange.o",
- "crypto/evp/libfips-lib-kdf_lib.o",
- "crypto/evp/libfips-lib-kdf_meth.o",
- "crypto/evp/libfips-lib-kem.o",
- "crypto/evp/libfips-lib-keymgmt_lib.o",
- "crypto/evp/libfips-lib-keymgmt_meth.o",
- "crypto/evp/libfips-lib-mac_lib.o",
- "crypto/evp/libfips-lib-mac_meth.o",
- "crypto/evp/libfips-lib-p_lib.o",
- "crypto/evp/libfips-lib-pmeth_check.o",
- "crypto/evp/libfips-lib-pmeth_gn.o",
- "crypto/evp/libfips-lib-pmeth_lib.o",
- "crypto/evp/libfips-lib-s_lib.o",
- "crypto/evp/libfips-lib-signature.o",
- "crypto/evp/libfips-lib-skeymgmt_meth.o",
- "crypto/ffc/libfips-lib-ffc_backend.o",
- "crypto/ffc/libfips-lib-ffc_dh.o",
- "crypto/ffc/libfips-lib-ffc_key_generate.o",
- "crypto/ffc/libfips-lib-ffc_key_validate.o",
- "crypto/ffc/libfips-lib-ffc_params.o",
- "crypto/ffc/libfips-lib-ffc_params_generate.o",
- "crypto/ffc/libfips-lib-ffc_params_validate.o",
- "crypto/hashtable/libfips-lib-hashfunc.o",
- "crypto/hashtable/libfips-lib-hashtable.o",
- "crypto/hmac/libfips-lib-hmac.o",
- "crypto/lhash/libfips-lib-lhash.o",
- "crypto/libfips-lib-asn1_dsa.o",
- "crypto/libfips-lib-bsearch.o",
- "crypto/libfips-lib-context.o",
- "crypto/libfips-lib-core_algorithm.o",
- "crypto/libfips-lib-core_fetch.o",
- "crypto/libfips-lib-core_namemap.o",
- "crypto/libfips-lib-cpuid.o",
- "crypto/libfips-lib-cryptlib.o",
- "crypto/libfips-lib-ctype.o",
- "crypto/libfips-lib-der_writer.o",
- "crypto/libfips-lib-ex_data.o",
- "crypto/libfips-lib-initthread.o",
- "crypto/libfips-lib-o_str.o",
- "crypto/libfips-lib-packet.o",
- "crypto/libfips-lib-param_build.o",
- "crypto/libfips-lib-param_build_set.o",
- "crypto/libfips-lib-params.o",
- "crypto/libfips-lib-params_dup.o",
- "crypto/libfips-lib-params_from_text.o",
- "crypto/libfips-lib-params_idx.o",
- "crypto/libfips-lib-provider_core.o",
- "crypto/libfips-lib-provider_predefined.o",
- "crypto/libfips-lib-self_test_core.o",
- "crypto/libfips-lib-sparse_array.o",
- "crypto/libfips-lib-threads_lib.o",
- "crypto/libfips-lib-threads_none.o",
- "crypto/libfips-lib-threads_pthread.o",
- "crypto/libfips-lib-threads_win.o",
- "crypto/libfips-lib-time.o",
- "crypto/libfips-lib-x86_64cpuid.o",
- "crypto/ml_dsa/libfips-lib-ml_dsa_encoders.o",
- "crypto/ml_dsa/libfips-lib-ml_dsa_key.o",
- "crypto/ml_dsa/libfips-lib-ml_dsa_key_compress.o",
- "crypto/ml_dsa/libfips-lib-ml_dsa_matrix.o",
- "crypto/ml_dsa/libfips-lib-ml_dsa_ntt.o",
- "crypto/ml_dsa/libfips-lib-ml_dsa_params.o",
- "crypto/ml_dsa/libfips-lib-ml_dsa_sample.o",
- "crypto/ml_dsa/libfips-lib-ml_dsa_sign.o",
- "crypto/ml_kem/libfips-lib-ml_kem.o",
- "crypto/modes/libfips-lib-aes-gcm-avx512.o",
- "crypto/modes/libfips-lib-aesni-gcm-x86_64.o",
- "crypto/modes/libfips-lib-cbc128.o",
- "crypto/modes/libfips-lib-ccm128.o",
- "crypto/modes/libfips-lib-cfb128.o",
- "crypto/modes/libfips-lib-ctr128.o",
- "crypto/modes/libfips-lib-gcm128.o",
- "crypto/modes/libfips-lib-ghash-x86_64.o",
- "crypto/modes/libfips-lib-ofb128.o",
- "crypto/modes/libfips-lib-wrap128.o",
- "crypto/modes/libfips-lib-xts128.o",
- "crypto/modes/libfips-lib-xts128gb.o",
- "crypto/property/libfips-lib-defn_cache.o",
- "crypto/property/libfips-lib-property.o",
- "crypto/property/libfips-lib-property_parse.o",
- "crypto/property/libfips-lib-property_query.o",
- "crypto/property/libfips-lib-property_string.o",
- "crypto/rand/libfips-lib-rand_lib.o",
- "crypto/rsa/libfips-lib-rsa_acvp_test_params.o",
- "crypto/rsa/libfips-lib-rsa_backend.o",
- "crypto/rsa/libfips-lib-rsa_chk.o",
- "crypto/rsa/libfips-lib-rsa_crpt.o",
- "crypto/rsa/libfips-lib-rsa_gen.o",
- "crypto/rsa/libfips-lib-rsa_lib.o",
- "crypto/rsa/libfips-lib-rsa_mp_names.o",
- "crypto/rsa/libfips-lib-rsa_none.o",
- "crypto/rsa/libfips-lib-rsa_oaep.o",
- "crypto/rsa/libfips-lib-rsa_ossl.o",
- "crypto/rsa/libfips-lib-rsa_pk1.o",
- "crypto/rsa/libfips-lib-rsa_pss.o",
- "crypto/rsa/libfips-lib-rsa_schemes.o",
- "crypto/rsa/libfips-lib-rsa_sign.o",
- "crypto/rsa/libfips-lib-rsa_sp800_56b_check.o",
- "crypto/rsa/libfips-lib-rsa_sp800_56b_gen.o",
- "crypto/rsa/libfips-lib-rsa_x931.o",
- "crypto/sha/libfips-lib-keccak1600-x86_64.o",
- "crypto/sha/libfips-lib-sha1-mb-x86_64.o",
- "crypto/sha/libfips-lib-sha1-x86_64.o",
- "crypto/sha/libfips-lib-sha1dgst.o",
- "crypto/sha/libfips-lib-sha256-mb-x86_64.o",
- "crypto/sha/libfips-lib-sha256-x86_64.o",
- "crypto/sha/libfips-lib-sha256.o",
- "crypto/sha/libfips-lib-sha3.o",
- "crypto/sha/libfips-lib-sha512-x86_64.o",
- "crypto/sha/libfips-lib-sha512.o",
- "crypto/slh_dsa/libfips-lib-slh_adrs.o",
- "crypto/slh_dsa/libfips-lib-slh_dsa.o",
- "crypto/slh_dsa/libfips-lib-slh_dsa_hash_ctx.o",
- "crypto/slh_dsa/libfips-lib-slh_dsa_key.o",
- "crypto/slh_dsa/libfips-lib-slh_fors.o",
- "crypto/slh_dsa/libfips-lib-slh_hash.o",
- "crypto/slh_dsa/libfips-lib-slh_hypertree.o",
- "crypto/slh_dsa/libfips-lib-slh_params.o",
- "crypto/slh_dsa/libfips-lib-slh_wots.o",
- "crypto/slh_dsa/libfips-lib-slh_xmss.o",
- "crypto/stack/libfips-lib-stack.o",
- "crypto/thread/arch/libfips-lib-thread_none.o",
- "crypto/thread/arch/libfips-lib-thread_posix.o",
- "crypto/thread/arch/libfips-lib-thread_win.o",
- "crypto/thread/libfips-lib-api.o",
- "crypto/thread/libfips-lib-arch.o",
- "crypto/thread/libfips-lib-internal.o",
- "providers/common/der/libfips-lib-der_rsa_sig.o",
- "providers/common/libfips-lib-bio_prov.o",
- "providers/common/libfips-lib-capabilities.o",
- "providers/common/libfips-lib-digest_to_nid.o",
- "providers/common/libfips-lib-provider_seeding.o",
- "providers/common/libfips-lib-provider_util.o",
- "providers/common/libfips-lib-securitycheck.o",
- "providers/common/libfips-lib-securitycheck_fips.o",
- "providers/fips/libfips-lib-fipsindicator.o",
- "providers/fips/libfips-lib-fipsprov.o",
- "providers/fips/libfips-lib-self_test.o",
- "providers/fips/libfips-lib-self_test_kats.o",
- "providers/implementations/asymciphers/libfips-lib-rsa_enc.o",
- "providers/implementations/ciphers/libfips-lib-cipher_aes.o",
- "providers/implementations/ciphers/libfips-lib-cipher_aes_cbc_hmac_sha.o",
- "providers/implementations/ciphers/libfips-lib-cipher_aes_cbc_hmac_sha1_hw.o",
- "providers/implementations/ciphers/libfips-lib-cipher_aes_cbc_hmac_sha256_hw.o",
- "providers/implementations/ciphers/libfips-lib-cipher_aes_ccm.o",
- "providers/implementations/ciphers/libfips-lib-cipher_aes_ccm_hw.o",
- "providers/implementations/ciphers/libfips-lib-cipher_aes_gcm.o",
- "providers/implementations/ciphers/libfips-lib-cipher_aes_gcm_hw.o",
- "providers/implementations/ciphers/libfips-lib-cipher_aes_hw.o",
- "providers/implementations/ciphers/libfips-lib-cipher_aes_ocb.o",
- "providers/implementations/ciphers/libfips-lib-cipher_aes_ocb_hw.o",
- "providers/implementations/ciphers/libfips-lib-cipher_aes_wrp.o",
- "providers/implementations/ciphers/libfips-lib-cipher_aes_xts.o",
- "providers/implementations/ciphers/libfips-lib-cipher_aes_xts_fips.o",
- "providers/implementations/ciphers/libfips-lib-cipher_aes_xts_hw.o",
- "providers/implementations/ciphers/libfips-lib-cipher_cts.o",
- "providers/implementations/ciphers/libfips-lib-cipher_tdes.o",
- "providers/implementations/ciphers/libfips-lib-cipher_tdes_common.o",
- "providers/implementations/ciphers/libfips-lib-cipher_tdes_hw.o",
- "providers/implementations/digests/libfips-lib-sha2_prov.o",
- "providers/implementations/digests/libfips-lib-sha3_prov.o",
- "providers/implementations/exchange/libfips-lib-dh_exch.o",
- "providers/implementations/exchange/libfips-lib-ecdh_exch.o",
- "providers/implementations/exchange/libfips-lib-ecx_exch.o",
- "providers/implementations/exchange/libfips-lib-kdf_exch.o",
- "providers/implementations/kdfs/libfips-lib-hkdf.o",
- "providers/implementations/kdfs/libfips-lib-kbkdf.o",
- "providers/implementations/kdfs/libfips-lib-pbkdf2.o",
- "providers/implementations/kdfs/libfips-lib-pbkdf2_fips.o",
- "providers/implementations/kdfs/libfips-lib-sshkdf.o",
- "providers/implementations/kdfs/libfips-lib-sskdf.o",
- "providers/implementations/kdfs/libfips-lib-tls1_prf.o",
- "providers/implementations/kdfs/libfips-lib-x942kdf.o",
- "providers/implementations/kem/libfips-lib-ml_kem_kem.o",
- "providers/implementations/kem/libfips-lib-mlx_kem.o",
- "providers/implementations/kem/libfips-lib-rsa_kem.o",
- "providers/implementations/keymgmt/libfips-lib-dh_kmgmt.o",
- "providers/implementations/keymgmt/libfips-lib-dsa_kmgmt.o",
- "providers/implementations/keymgmt/libfips-lib-ec_kmgmt.o",
- "providers/implementations/keymgmt/libfips-lib-ecx_kmgmt.o",
- "providers/implementations/keymgmt/libfips-lib-kdf_legacy_kmgmt.o",
- "providers/implementations/keymgmt/libfips-lib-mac_legacy_kmgmt.o",
- "providers/implementations/keymgmt/libfips-lib-ml_dsa_kmgmt.o",
- "providers/implementations/keymgmt/libfips-lib-ml_kem_kmgmt.o",
- "providers/implementations/keymgmt/libfips-lib-mlx_kmgmt.o",
- "providers/implementations/keymgmt/libfips-lib-rsa_kmgmt.o",
- "providers/implementations/keymgmt/libfips-lib-slh_dsa_kmgmt.o",
- "providers/implementations/macs/libfips-lib-cmac_prov.o",
- "providers/implementations/macs/libfips-lib-gmac_prov.o",
- "providers/implementations/macs/libfips-lib-hmac_prov.o",
- "providers/implementations/macs/libfips-lib-kmac_prov.o",
- "providers/implementations/rands/libfips-lib-drbg.o",
- "providers/implementations/rands/libfips-lib-drbg_ctr.o",
- "providers/implementations/rands/libfips-lib-drbg_hash.o",
- "providers/implementations/rands/libfips-lib-drbg_hmac.o",
- "providers/implementations/rands/libfips-lib-fips_crng_test.o",
- "providers/implementations/rands/libfips-lib-test_rng.o",
- "providers/implementations/signature/libfips-lib-dsa_sig.o",
- "providers/implementations/signature/libfips-lib-ecdsa_sig.o",
- "providers/implementations/signature/libfips-lib-eddsa_sig.o",
- "providers/implementations/signature/libfips-lib-mac_legacy_sig.o",
- "providers/implementations/signature/libfips-lib-ml_dsa_sig.o",
- "providers/implementations/signature/libfips-lib-rsa_sig.o",
- "providers/implementations/signature/libfips-lib-slh_dsa_sig.o",
- "providers/implementations/skeymgmt/libfips-lib-aes_skmgmt.o",
- "providers/implementations/skeymgmt/libfips-lib-generic.o",
- "providers/libcommon.a",
- "ssl/record/methods/libfips-lib-ssl3_cbc.o"
+ "providers/liblegacy-lib-prov_running.o" => [
+ "providers/prov_running.c"
+ ],
+ "providers/liblegacy.a" => [
+ "crypto/des/liblegacy-lib-des_enc.o",
+ "crypto/des/liblegacy-lib-fcrypt_b.o",
+ "crypto/md5/liblegacy-lib-md5-x86_64.o",
+ "crypto/md5/liblegacy-lib-md5_dgst.o",
+ "crypto/md5/liblegacy-lib-md5_one.o",
+ "crypto/md5/liblegacy-lib-md5_sha1.o",
+ "crypto/rc4/liblegacy-lib-rc4-md5-x86_64.o",
+ "crypto/rc4/liblegacy-lib-rc4-x86_64.o",
+ "providers/common/liblegacy-lib-provider_util.o",
+ "providers/implementations/ciphers/liblegacy-lib-cipher_blowfish.o",
+ "providers/implementations/ciphers/liblegacy-lib-cipher_blowfish_hw.o",
+ "providers/implementations/ciphers/liblegacy-lib-cipher_cast5.o",
+ "providers/implementations/ciphers/liblegacy-lib-cipher_cast5_hw.o",
+ "providers/implementations/ciphers/liblegacy-lib-cipher_des.o",
+ "providers/implementations/ciphers/liblegacy-lib-cipher_des_hw.o",
+ "providers/implementations/ciphers/liblegacy-lib-cipher_desx.o",
+ "providers/implementations/ciphers/liblegacy-lib-cipher_desx_hw.o",
+ "providers/implementations/ciphers/liblegacy-lib-cipher_rc2.o",
+ "providers/implementations/ciphers/liblegacy-lib-cipher_rc2_hw.o",
+ "providers/implementations/ciphers/liblegacy-lib-cipher_rc4.o",
+ "providers/implementations/ciphers/liblegacy-lib-cipher_rc4_hmac_md5.o",
+ "providers/implementations/ciphers/liblegacy-lib-cipher_rc4_hmac_md5_hw.o",
+ "providers/implementations/ciphers/liblegacy-lib-cipher_rc4_hw.o",
+ "providers/implementations/ciphers/liblegacy-lib-cipher_seed.o",
+ "providers/implementations/ciphers/liblegacy-lib-cipher_seed_hw.o",
+ "providers/implementations/ciphers/liblegacy-lib-cipher_tdes_common.o",
+ "providers/implementations/digests/liblegacy-lib-md4_prov.o",
+ "providers/implementations/digests/liblegacy-lib-ripemd_prov.o",
+ "providers/implementations/digests/liblegacy-lib-wp_prov.o",
+ "providers/implementations/kdfs/liblegacy-lib-pbkdf1.o",
+ "providers/implementations/kdfs/liblegacy-lib-pvkkdf.o",
+ "providers/liblegacy-lib-prov_running.o"
],
"providers/libtemplate.a" => [
"providers/implementations/kem/libtemplate-lib-template_kem.o",
@@ -34894,12 +33325,12 @@ our %unified_info = (
"ssl/record/methods/libdefault-lib-ssl3_cbc.o" => [
"ssl/record/methods/ssl3_cbc.c"
],
- "ssl/record/methods/libfips-lib-ssl3_cbc.o" => [
- "ssl/record/methods/ssl3_cbc.c"
- ],
"ssl/record/methods/libssl-lib-dtls_meth.o" => [
"ssl/record/methods/dtls_meth.c"
],
+ "ssl/record/methods/libssl-lib-ktls_meth.o" => [
+ "ssl/record/methods/ktls_meth.c"
+ ],
"ssl/record/methods/libssl-lib-ssl3_meth.o" => [
"ssl/record/methods/ssl3_meth.c"
],
@@ -34921,6 +33352,9 @@ our %unified_info = (
"ssl/record/methods/libssl-shlib-dtls_meth.o" => [
"ssl/record/methods/dtls_meth.c"
],
+ "ssl/record/methods/libssl-shlib-ktls_meth.o" => [
+ "ssl/record/methods/ktls_meth.c"
+ ],
"ssl/record/methods/libssl-shlib-ssl3_cbc.o" => [
"ssl/record/methods/ssl3_cbc.c"
],
@@ -35026,12 +33460,6 @@ our %unified_info = (
"test/aborttest-bin-aborttest.o" => [
"test/aborttest.c"
],
- "test/acvp_test" => [
- "test/acvp_test-bin-acvp_test.o"
- ],
- "test/acvp_test-bin-acvp_test.o" => [
- "test/acvp_test.c"
- ],
"test/aesgcmtest" => [
"test/aesgcmtest-bin-aesgcmtest.o"
],
@@ -35281,6 +33709,12 @@ our %unified_info = (
"test/buildtest_c_conf_api-bin-buildtest_conf_api.o" => [
"test/buildtest_conf_api.c"
],
+ "test/buildtest_c_configuration" => [
+ "test/buildtest_c_configuration-bin-buildtest_configuration.o"
+ ],
+ "test/buildtest_c_configuration-bin-buildtest_configuration.o" => [
+ "test/buildtest_configuration.c"
+ ],
"test/buildtest_c_conftypes" => [
"test/buildtest_c_conftypes-bin-buildtest_conftypes.o"
],
@@ -35929,6 +34363,7 @@ our %unified_info = (
"test/ectest.c"
],
"test/endecode_test" => [
+ "providers/endecode_test-bin-legacyprov.o",
"test/endecode_test-bin-endecode_test.o",
"test/helpers/endecode_test-bin-predefined_dhparams.o"
],
@@ -35960,6 +34395,7 @@ our %unified_info = (
"test/evp_byname_test.c"
],
"test/evp_extra_test" => [
+ "providers/evp_extra_test-bin-legacyprov.o",
"test/evp_extra_test-bin-evp_extra_test.o",
"test/evp_extra_test-bin-fake_pipelineprov.o",
"test/evp_extra_test-bin-fake_rsaprov.o"
@@ -37268,9 +35704,7 @@ our %unified_info = (
"util/wrap.pl.in"
]
},
- "targets" => [
- "build_modules_nodep"
- ]
+ "targets" => []
);
# Unexported, only used by OpenSSL::Test::Utils::available_protocols()
@@ -37317,6 +35751,9 @@ my @makevars = (
"RM"
);
my %disabled_info = (
+ "acvp-tests" => {
+ "macro" => "OPENSSL_NO_ACVP_TESTS"
+ },
"afalgeng" => {
"macro" => "OPENSSL_NO_AFALGENG"
},
@@ -37353,6 +35790,12 @@ my %disabled_info = (
"fips-jitter" => {
"macro" => "OPENSSL_NO_FIPS_JITTER"
},
+ "fips-post" => {
+ "macro" => "OPENSSL_NO_FIPS_POST"
+ },
+ "fips-securitychecks" => {
+ "macro" => "OPENSSL_NO_FIPS_SECURITYCHECKS"
+ },
"fuzz-afl" => {
"macro" => "OPENSSL_NO_FUZZ_AFL"
},
@@ -37374,9 +35817,6 @@ my %disabled_info = (
"jitter" => {
"macro" => "OPENSSL_NO_JITTER"
},
- "ktls" => {
- "macro" => "OPENSSL_NO_KTLS"
- },
"md2" => {
"macro" => "OPENSSL_NO_MD2",
"skipped" => [
@@ -37431,9 +35871,6 @@ my %disabled_info = (
"tfo" => {
"macro" => "OPENSSL_NO_TFO"
},
- "tls-deprecated-ec" => {
- "macro" => "OPENSSL_NO_TLS_DEPRECATED_EC"
- },
"trace" => {
"macro" => "OPENSSL_NO_TRACE"
},
@@ -37476,8 +35913,8 @@ unless (caller) {
use File::Copy;
use Pod::Usage;
- use lib '/home/khorben/Projects/FreeBSD/ports/security/openssl35/work/openssl-3.5.1/util/perl';
- use OpenSSL::fallback '/home/khorben/Projects/FreeBSD/ports/security/openssl35/work/openssl-3.5.1/external/perl/MODULES.txt';
+ use lib '/usr/home/ngie/git/freebsd-src/crypto/openssl/util/perl';
+ use OpenSSL::fallback '/usr/home/ngie/git/freebsd-src/crypto/openssl/external/perl/MODULES.txt';
my $here = dirname($0);
@@ -37504,7 +35941,7 @@ unless (caller) {
);
use lib '.';
- use lib '/home/khorben/Projects/FreeBSD/ports/security/openssl35/work/openssl-3.5.1/Configurations';
+ use lib '/usr/home/ngie/git/freebsd-src/crypto/openssl/Configurations';
use gentemplate;
open my $buildfile_template_fh, ">$buildfile_template"
@@ -37521,8 +35958,8 @@ unless (caller) {
my $prepend = <<'_____';
use File::Spec::Functions;
-use lib '/home/khorben/Projects/FreeBSD/ports/security/openssl35/work/openssl-3.5.1/util/perl';
-use lib '/home/khorben/Projects/FreeBSD/ports/security/openssl35/work/openssl-3.5.1/Configurations';
+use lib '/usr/home/ngie/git/freebsd-src/crypto/openssl/util/perl';
+use lib '/usr/home/ngie/git/freebsd-src/crypto/openssl/Configurations';
use lib '.';
use platform;
_____
diff --git a/crypto/openssl/exporters/libcrypto.pc b/crypto/openssl/exporters/libcrypto.pc
new file mode 100644
index 000000000000..e7f3953b7eae
--- /dev/null
+++ b/crypto/openssl/exporters/libcrypto.pc
@@ -0,0 +1,13 @@
+prefix=/usr
+exec_prefix=${prefix}
+libdir=${exec_prefix}/lib
+includedir=${prefix}/include
+enginesdir=${libdir}/engines-3
+modulesdir=${libdir}/ossl-modules
+
+Name: OpenSSL-libcrypto
+Description: OpenSSL cryptography library
+Version: 3.5.1
+Libs: -L${libdir} -lcrypto
+Libs.private: -pthread
+Cflags: -I${includedir}
diff --git a/crypto/openssl/exporters/libssl.pc b/crypto/openssl/exporters/libssl.pc
new file mode 100644
index 000000000000..ed6fd275ca26
--- /dev/null
+++ b/crypto/openssl/exporters/libssl.pc
@@ -0,0 +1,11 @@
+prefix=/usr
+exec_prefix=${prefix}
+libdir=${exec_prefix}/lib
+includedir=${prefix}/include
+
+Name: OpenSSL-libssl
+Description: Secure Sockets Layer and cryptography libraries
+Version: 3.5.1
+Requires.private: libcrypto
+Libs: -L${libdir} -lssl
+Cflags: -I${includedir}
diff --git a/crypto/openssl/exporters/openssl.pc b/crypto/openssl/exporters/openssl.pc
new file mode 100644
index 000000000000..892ef113ca0f
--- /dev/null
+++ b/crypto/openssl/exporters/openssl.pc
@@ -0,0 +1,9 @@
+prefix=/usr
+exec_prefix=${prefix}
+libdir=${exec_prefix}/lib
+includedir=${prefix}/include
+
+Name: OpenSSL
+Description: Secure Sockets Layer and cryptography libraries and tools
+Version: 3.5.1
+Requires: libssl libcrypto
diff --git a/crypto/openssl/freebsd/dump_version_from_configdata.pl b/crypto/openssl/freebsd/dump_version_from_configdata.pl
new file mode 100644
index 000000000000..b6137718ba54
--- /dev/null
+++ b/crypto/openssl/freebsd/dump_version_from_configdata.pl
@@ -0,0 +1,21 @@
+#!/usr/bin/env perl
+#
+# This dumps out the values needed to generate manpages and other artifacts
+# which include the release version/date.
+#
+# See also: `secure/lib/libcrypto/Makefile.version`.
+
+use Cwd qw(realpath);
+use File::Basename qw(dirname);
+use Time::Piece;
+
+use lib dirname(dirname(realpath($0)));
+
+use configdata qw(%config);
+
+$OPENSSL_DATE = Time::Piece->strptime($config{"release_date"}, "%d %b %Y")->strftime("%Y-%m-%d");
+
+$OPENSSL_VER = "$config{'major'}.$config{'minor'}.$config{'patch'}";
+
+print("OPENSSL_VER=\t${OPENSSL_VER}\n");
+print("OPENSSL_DATE=\t${OPENSSL_DATE}\n");
diff --git a/crypto/openssl/freebsd/include/crypto/bn_conf.h b/crypto/openssl/freebsd/include/crypto/bn_conf.h
new file mode 100644
index 000000000000..442931b63339
--- /dev/null
+++ b/crypto/openssl/freebsd/include/crypto/bn_conf.h
@@ -0,0 +1,27 @@
+
+/**
+ * OpenSSL's Configure script generates these values automatically for the host
+ * architecture, but FreeBSD provides values which are universal for all
+ * supported target architectures.
+ */
+
+#ifndef __FREEBSD_BN_CONF_H__
+#define __FREEBSD_BN_CONF_H__
+
+# undef SIXTY_FOUR_BIT_LONG
+# undef SIXTY_FOUR_BIT
+# undef THIRTY_TWO_BIT
+
+# if __SIZEOF_LONG__ == 8
+# define SIXTY_FOUR_BIT_LONG
+# undef SIXTY_FOUR_BIT
+# undef THIRTY_TWO_BIT
+# elif __SIZEOF_LONG__ == 4
+# undef SIXTY_FOUR_BIT_LONG
+# undef SIXTY_FOUR_BIT
+# define THIRTY_TWO_BIT
+# else
+# error Unsupported size of long
+# endif
+
+#endif /* __FREEBSD_BN_CONF_H__ */
diff --git a/crypto/openssl/freebsd/include/openssl/configuration.h b/crypto/openssl/freebsd/include/openssl/configuration.h
new file mode 100644
index 000000000000..faea78cb32c8
--- /dev/null
+++ b/crypto/openssl/freebsd/include/openssl/configuration.h
@@ -0,0 +1,38 @@
+
+/**
+ * OpenSSL's Configure script generates these values automatically for the host
+ * architecture, but FreeBSD provides values which are universal for all
+ * supported target architectures.
+ */
+
+#ifndef __FREEBSD_CONFIGURATION_H__
+#define __FREEBSD_CONFIGURATION_H__
+
+# undef OPENSSL_NO_EC_NISTP_64_GCC_128
+# if __SIZEOF_LONG__ == 4 || __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__
+# ifndef OPENSSL_NO_EC_NISTP_64_GCC_128
+# define OPENSSL_NO_EC_NISTP_64_GCC_128
+# endif
+# endif
+
+# undef BN_LLONG
+# undef SIXTY_FOUR_BIT_LONG
+# undef SIXTY_FOUR_BIT
+# undef THIRTY_TWO_BIT
+# if !defined(OPENSSL_SYS_UEFI)
+# if __SIZEOF_LONG__ == 8
+# undef BN_LLONG
+# define SIXTY_FOUR_BIT_LONG
+# undef SIXTY_FOUR_BIT
+# undef THIRTY_TWO_BIT
+# elif __SIZEOF_LONG__ == 4
+# define BN_LLONG
+# undef SIXTY_FOUR_BIT_LONG
+# undef SIXTY_FOUR_BIT
+# define THIRTY_TWO_BIT
+# else
+# error Unsupported size of long
+# endif
+# endif
+
+#endif /* __FREEBSD_CONFIGURATION_H__ */
diff --git a/crypto/openssl/include/crypto/bn_conf.h b/crypto/openssl/include/crypto/bn_conf.h
index 0347a6ddc067..408242f0f8d0 100644
--- a/crypto/openssl/include/crypto/bn_conf.h
+++ b/crypto/openssl/include/crypto/bn_conf.h
@@ -27,3 +27,30 @@
#undef THIRTY_TWO_BIT
#endif
+
+/**
+ * OpenSSL's Configure script generates these values automatically for the host
+ * architecture, but FreeBSD provides values which are universal for all
+ * supported target architectures.
+ */
+
+#ifndef __FREEBSD_BN_CONF_H__
+#define __FREEBSD_BN_CONF_H__
+
+# undef SIXTY_FOUR_BIT_LONG
+# undef SIXTY_FOUR_BIT
+# undef THIRTY_TWO_BIT
+
+# if __SIZEOF_LONG__ == 8
+# define SIXTY_FOUR_BIT_LONG
+# undef SIXTY_FOUR_BIT
+# undef THIRTY_TWO_BIT
+# elif __SIZEOF_LONG__ == 4
+# undef SIXTY_FOUR_BIT_LONG
+# undef SIXTY_FOUR_BIT
+# define THIRTY_TWO_BIT
+# else
+# error Unsupported size of long
+# endif
+
+#endif /* __FREEBSD_BN_CONF_H__ */
diff --git a/crypto/openssl/include/openssl/configuration.h b/crypto/openssl/include/openssl/configuration.h
index 9fd68f770a1a..b4d8283a8b98 100644
--- a/crypto/openssl/include/openssl/configuration.h
+++ b/crypto/openssl/include/openssl/configuration.h
@@ -34,6 +34,9 @@ extern "C" {
# ifndef OPENSSL_THREADS
# define OPENSSL_THREADS
# endif
+# ifndef OPENSSL_NO_ACVP_TESTS
+# define OPENSSL_NO_ACVP_TESTS
+# endif
# ifndef OPENSSL_NO_AFALGENG
# define OPENSSL_NO_AFALGENG
# endif
@@ -67,6 +70,12 @@ extern "C" {
# ifndef OPENSSL_NO_FIPS_JITTER
# define OPENSSL_NO_FIPS_JITTER
# endif
+# ifndef OPENSSL_NO_FIPS_POST
+# define OPENSSL_NO_FIPS_POST
+# endif
+# ifndef OPENSSL_NO_FIPS_SECURITYCHECKS
+# define OPENSSL_NO_FIPS_SECURITYCHECKS
+# endif
# ifndef OPENSSL_NO_FUZZ_AFL
# define OPENSSL_NO_FUZZ_AFL
# endif
@@ -85,9 +94,6 @@ extern "C" {
# ifndef OPENSSL_NO_JITTER
# define OPENSSL_NO_JITTER
# endif
-# ifndef OPENSSL_NO_KTLS
-# define OPENSSL_NO_KTLS
-# endif
# ifndef OPENSSL_NO_MD2
# define OPENSSL_NO_MD2
# endif
@@ -124,9 +130,6 @@ extern "C" {
# ifndef OPENSSL_NO_TFO
# define OPENSSL_NO_TFO
# endif
-# ifndef OPENSSL_NO_TLS_DEPRECATED_EC
-# define OPENSSL_NO_TLS_DEPRECATED_EC
-# endif
# ifndef OPENSSL_NO_TRACE
# define OPENSSL_NO_TRACE
# endif
@@ -189,3 +192,41 @@ extern "C" {
# endif
#endif /* OPENSSL_CONFIGURATION_H */
+
+/**
+ * OpenSSL's Configure script generates these values automatically for the host
+ * architecture, but FreeBSD provides values which are universal for all
+ * supported target architectures.
+ */
+
+#ifndef __FREEBSD_CONFIGURATION_H__
+#define __FREEBSD_CONFIGURATION_H__
+
+# undef OPENSSL_NO_EC_NISTP_64_GCC_128
+# if __SIZEOF_LONG__ == 4 || __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__
+# ifndef OPENSSL_NO_EC_NISTP_64_GCC_128
+# define OPENSSL_NO_EC_NISTP_64_GCC_128
+# endif
+# endif
+
+# undef BN_LLONG
+# undef SIXTY_FOUR_BIT_LONG
+# undef SIXTY_FOUR_BIT
+# undef THIRTY_TWO_BIT
+# if !defined(OPENSSL_SYS_UEFI)
+# if __SIZEOF_LONG__ == 8
+# undef BN_LLONG
+# define SIXTY_FOUR_BIT_LONG
+# undef SIXTY_FOUR_BIT
+# undef THIRTY_TWO_BIT
+# elif __SIZEOF_LONG__ == 4
+# define BN_LLONG
+# undef SIXTY_FOUR_BIT_LONG
+# undef SIXTY_FOUR_BIT
+# define THIRTY_TWO_BIT
+# else
+# error Unsupported size of long
+# endif
+# endif
+
+#endif /* __FREEBSD_CONFIGURATION_H__ */
diff --git a/crypto/openssl/include/openssl/fipskey.h b/crypto/openssl/include/openssl/fipskey.h
index 929db18c6783..620812bf0a5f 100644
--- a/crypto/openssl/include/openssl/fipskey.h
+++ b/crypto/openssl/include/openssl/fipskey.h
@@ -32,7 +32,7 @@ extern "C" {
/*
* The FIPS provider vendor name, as a string.
*/
-#define FIPS_VENDOR "OpenSSL FIPS Provider"
+#define FIPS_VENDOR "OpenSSL non-compliant FIPS Provider"
# ifdef __cplusplus
}
diff --git a/crypto/openssl/include/openssl/opensslv.h b/crypto/openssl/include/openssl/opensslv.h
index 4660b937298f..dd50d89cb998 100644
--- a/crypto/openssl/include/openssl/opensslv.h
+++ b/crypto/openssl/include/openssl/opensslv.h
@@ -57,7 +57,7 @@ extern "C" {
* be related to the API version expressed with the macros above.
* This is defined in free form.
*/
-# define OPENSSL_SHLIB_VERSION 17
+# define OPENSSL_SHLIB_VERSION 3
/*
* SECTION 2: USEFUL MACROS
diff --git a/crypto/openssl/libcrypto.pc b/crypto/openssl/libcrypto.pc
new file mode 100644
index 000000000000..97725059adfb
--- /dev/null
+++ b/crypto/openssl/libcrypto.pc
@@ -0,0 +1,13 @@
+prefix=/usr
+exec_prefix=${prefix}
+libdir=${exec_prefix}
+includedir=${prefix}/include ${prefix}/./include
+enginesdir=${libdir}/engines
+modulesdir=${libdir}/providers
+
+Name: OpenSSL-libcrypto
+Description: OpenSSL cryptography library
+Version: 3.5.1
+Libs: -L${libdir} -lcrypto
+Libs.private: -pthread
+Cflags: -I${prefix}/include -I${prefix}/./include
diff --git a/crypto/openssl/libssl.pc b/crypto/openssl/libssl.pc
new file mode 100644
index 000000000000..e2662ee9e5fa
--- /dev/null
+++ b/crypto/openssl/libssl.pc
@@ -0,0 +1,11 @@
+prefix=/usr
+exec_prefix=${prefix}
+libdir=${exec_prefix}
+includedir=${prefix}/include ${prefix}/./include
+
+Name: OpenSSL-libssl
+Description: Secure Sockets Layer and cryptography libraries
+Version: 3.5.1
+Requires.private: libcrypto
+Libs: -L${libdir} -lssl
+Cflags: -I${prefix}/include -I${prefix}/./include
diff --git a/crypto/openssl/openssl.pc b/crypto/openssl/openssl.pc
new file mode 100644
index 000000000000..ff0a0c2f4e09
--- /dev/null
+++ b/crypto/openssl/openssl.pc
@@ -0,0 +1,9 @@
+prefix=/usr
+exec_prefix=${prefix}
+libdir=${exec_prefix}
+includedir=${prefix}/include ${prefix}/./include
+
+Name: OpenSSL
+Description: Secure Sockets Layer and cryptography libraries and tools
+Version: 3.5.1
+Requires: libssl libcrypto
diff --git a/crypto/openssl/tools/c_rehash b/crypto/openssl/tools/c_rehash
index 2377b88ceda9..f3fbdae831d9 100755
--- a/crypto/openssl/tools/c_rehash
+++ b/crypto/openssl/tools/c_rehash
@@ -1,4 +1,4 @@
-#!/usr/local/bin/perl
+#!/usr/bin/env perl
# WARNING: do not edit!
# Generated by Makefile from tools/c_rehash.in
@@ -12,8 +12,8 @@
# Perl c_rehash script, scan all files in a directory
# and add symbolic links to their hash values.
-my $dir = "/usr/local/openssl";
-my $prefix = "/usr/local";
+my $dir = "etc";
+my $prefix = "/usr";
my $errorcount = 0;
my $openssl = $ENV{OPENSSL} || "openssl";
diff --git a/crypto/openssl/util/shlib_wrap.sh b/crypto/openssl/util/shlib_wrap.sh
index 8b70f5048835..6754c25b9808 100755
--- a/crypto/openssl/util/shlib_wrap.sh
+++ b/crypto/openssl/util/shlib_wrap.sh
@@ -25,8 +25,8 @@ fi
THERE="`echo $0 | sed -e 's|[^/]*$||' 2>/dev/null`.."
[ -d "${THERE}" ] || exec "$@" # should never happen...
-LIBCRYPTOSO="${THERE}/libcrypto.so.17"
-LIBSSLSO="${THERE}/libssl.so.17"
+LIBCRYPTOSO="${THERE}/libcrypto.so.3"
+LIBSSLSO="${THERE}/libssl.so.3"
SYSNAME=`(uname -s) 2>/dev/null`;
case "$SYSNAME" in
diff --git a/crypto/openssl/util/wrap.pl b/crypto/openssl/util/wrap.pl
index 5d6af0a688a8..a2ebf25f3d93 100755
--- a/crypto/openssl/util/wrap.pl
+++ b/crypto/openssl/util/wrap.pl
@@ -1,4 +1,4 @@
-#! /usr/local/bin/perl
+#! /usr/bin/env perl
use strict;
use warnings;
@@ -9,7 +9,7 @@ use File::Spec::Functions;
BEGIN {
# This method corresponds exactly to 'use OpenSSL::Util',
# but allows us to use a platform specific file spec.
- require '/home/khorben/Projects/FreeBSD/ports/security/openssl35/work/openssl-3.5.1/util/perl/OpenSSL/Util.pm';
+ require '/usr/home/ngie/git/freebsd-src/crypto/openssl/util/perl/OpenSSL/Util.pm';
OpenSSL::Util->import();
}
@@ -53,14 +53,14 @@ my $unix_shlib_wrap = catfile($there, 'util/shlib_wrap.sh');
my $std_openssl_conf_include;
if ($ARGV[0] eq '-fips') {
- $std_openssl_conf = '/home/khorben/Projects/FreeBSD/ports/security/openssl35/work/openssl-3.5.1/test/fips-and-base.cnf';
+ $std_openssl_conf = '/usr/home/ngie/git/freebsd-src/crypto/openssl/test/fips-and-base.cnf';
shift;
$std_openssl_conf_include = catdir($there, 'providers');
}
if ($ARGV[0] eq '-jitter') {
- $std_openssl_conf = '/home/khorben/Projects/FreeBSD/ports/security/openssl35/work/openssl-3.5.1/test/default-and-jitter.cnf';
+ $std_openssl_conf = '/usr/home/ngie/git/freebsd-src/crypto/openssl/test/default-and-jitter.cnf';
shift;
$std_openssl_conf_include = catdir($there, 'providers');
diff --git a/etc/mtree/BSD.root.dist b/etc/mtree/BSD.root.dist
index 1312251cdd9e..5ea99ccbdb89 100644
--- a/etc/mtree/BSD.root.dist
+++ b/etc/mtree/BSD.root.dist
@@ -98,16 +98,16 @@
ssh
..
ssl
- certs
+ certs tags=package=caroot
..
- untrusted
+ untrusted tags=package=caroot
..
..
sysctl.kld.d
..
syslog.d
..
- zfs
+ zfs tags=package=zfs
compatibility.d
..
..
diff --git a/etc/mtree/BSD.usr.dist b/etc/mtree/BSD.usr.dist
index 68977a6b8282..ffdd82ae9911 100644
--- a/etc/mtree/BSD.usr.dist
+++ b/etc/mtree/BSD.usr.dist
@@ -267,6 +267,8 @@
..
dtrace
..
+ et
+ ..
examples
BSD_daemon
..
diff --git a/include/Makefile b/include/Makefile
index 2792d594a888..07890362d7a6 100644
--- a/include/Makefile
+++ b/include/Makefile
@@ -293,7 +293,7 @@ LSUBSUBDIRS+= netgraph/bluetooth/include
LSUBDIRS+= fs/cuse
.endif
-.if ${MK_GSSAPI} != "no" && ${MK_MITKRB5} == "no"
+.if ${MK_KERBEROS} != "no" && ${MK_MITKRB5} == "no"
SUBDIR+= gssapi
INCS+= gssapi.h
.endif
diff --git a/include/rpc/auth_des.h b/include/rpc/auth_des.h
index 1b4943a74b8b..0ff43c13139b 100644
--- a/include/rpc/auth_des.h
+++ b/include/rpc/auth_des.h
@@ -33,14 +33,91 @@
* Copyright (c) 1986 - 1991 by Sun Microsystems, Inc.
*/
-/* Note, RPC DES authentication was removed in FreeBSD 15.0. */
+/*
+ * auth_des.h, Protocol for DES style authentication for RPC
+ */
#ifndef _AUTH_DES_
#define _AUTH_DES_
+/*
+ * There are two kinds of "names": fullnames and nicknames
+ */
+enum authdes_namekind {
+ ADN_FULLNAME,
+ ADN_NICKNAME
+};
+
+/*
+ * A fullname contains the network name of the client,
+ * a conversation key and the window
+ */
+struct authdes_fullname {
+ char *name; /* network name of client, up to MAXNETNAMELEN */
+ des_block key; /* conversation key */
+ u_long window; /* associated window */
+};
+
+
+/*
+ * A credential
+ */
+struct authdes_cred {
+ enum authdes_namekind adc_namekind;
+ struct authdes_fullname adc_fullname;
+ u_long adc_nickname;
+};
+
+
+
+/*
+ * A des authentication verifier
+ */
+struct authdes_verf {
+ union {
+ struct timeval adv_ctime; /* clear time */
+ des_block adv_xtime; /* crypt time */
+ } adv_time_u;
+ u_long adv_int_u;
+};
+
+/*
+ * des authentication verifier: client variety
+ *
+ * adv_timestamp is the current time.
+ * adv_winverf is the credential window + 1.
+ * Both are encrypted using the conversation key.
+ */
+#define adv_timestamp adv_time_u.adv_ctime
+#define adv_xtimestamp adv_time_u.adv_xtime
+#define adv_winverf adv_int_u
+
+/*
+ * des authentication verifier: server variety
+ *
+ * adv_timeverf is the client's timestamp + client's window
+ * adv_nickname is the server's nickname for the client.
+ * adv_timeverf is encrypted using the conversation key.
+ */
+#define adv_timeverf adv_time_u.adv_ctime
+#define adv_xtimeverf adv_time_u.adv_xtime
+#define adv_nickname adv_int_u
+
+/*
+ * Map a des credential into a unix cred.
+ *
+ */
+__BEGIN_DECLS
+extern int authdes_getucred( struct authdes_cred *, uid_t *, gid_t *, int *, gid_t * );
+__END_DECLS
+
__BEGIN_DECLS
+extern bool_t xdr_authdes_cred(XDR *, struct authdes_cred *);
+extern bool_t xdr_authdes_verf(XDR *, struct authdes_verf *);
extern int rtime(dev_t, struct netbuf *, int, struct timeval *,
struct timeval *);
+extern void kgetnetname(char *);
+extern enum auth_stat _svcauth_des(struct svc_req *, struct rpc_msg *);
__END_DECLS
#endif /* ndef _AUTH_DES_ */
diff --git a/kerberos5/Makefile.inc b/kerberos5/Makefile.inc
index 2525f3888806..e02e110b5786 100644
--- a/kerberos5/Makefile.inc
+++ b/kerberos5/Makefile.inc
@@ -29,9 +29,7 @@ ETSRCS= \
${KRB5DIR}/lib/wind/wind_err.et \
${KRB5DIR}/lib/ntlm/ntlm_err.et
-.if ${MK_GSSAPI} != "no"
ETSRCS+= ${KRB5DIR}/lib/gssapi/krb5/gkrb5_err.et
-.endif
.for ET in ${ETSRCS}
.for _ET in ${ET:T:R}
diff --git a/kerberos5/lib/Makefile b/kerberos5/lib/Makefile
index 7b63da16e360..1f631b48ce83 100644
--- a/kerberos5/lib/Makefile
+++ b/kerberos5/lib/Makefile
@@ -7,10 +7,8 @@ SUBDIR= libasn1 libhdb \
SUBDIR+= libkafs5 # requires krb_err.h from libkrb5
SUBDIR_DEPEND_libkafs5= libkrb5
-.if ${MK_GSSAPI} != "no"
SUBDIR+= libgssapi_krb5
SUBDIR+= libgssapi_ntlm
SUBDIR+= libgssapi_spnego
-.endif
.include <bsd.subdir.mk>
diff --git a/kerberos5/libexec/Makefile b/kerberos5/libexec/Makefile
index 543331d5029b..2ac9c2d45f28 100644
--- a/kerberos5/libexec/Makefile
+++ b/kerberos5/libexec/Makefile
@@ -4,8 +4,6 @@ SUBDIR= digest-service ipropd-master ipropd-slave hprop hpropd kdc \
kdigest kfd kimpersonate kpasswdd kcm
SUBDIR_PARALLEL=
-.if ${MK_GSSAPI} != "no"
SUBDIR+= kadmind
-.endif
.include <bsd.subdir.mk>
diff --git a/krb5/Makefile b/krb5/Makefile
index e9bbcae106c1..c49601990c4a 100644
--- a/krb5/Makefile
+++ b/krb5/Makefile
@@ -1,7 +1,7 @@
SUBDIR= util .WAIT \
include .WAIT \
lib .WAIT\
- plugins libdata libexec usr.bin usr.sbin
+ plugins libexec usr.bin usr.sbin
# SUBDIR_PARALLEL=
.include <bsd.subdir.mk>
diff --git a/krb5/Makefile.et b/krb5/Makefile.et
index b89e17857675..e80712c6ee2a 100644
--- a/krb5/Makefile.et
+++ b/krb5/Makefile.et
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/Makefile.inc b/krb5/Makefile.inc
index 5d302962208c..512d153126e8 100644
--- a/krb5/Makefile.inc
+++ b/krb5/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
@@ -9,12 +9,13 @@
.include <src.opts.mk>
+PACKAGE?= kerberos
KRB5_VERSION= 1.22-final
# MIT KRB5 uses KRB5_DIR. Heimdal uses KRB5DIR.
KRB5_SRCTOP= ${SRCTOP}/krb5
KRB5_DIR= ${SRCTOP}/crypto/krb5/src
-KRB5_ETDIR?= ${DESTDIR}/usr/share/et
+KRB5_ETDIR?= /usr/share/et
KRB5_SRCLIBDIR= ${KRB5_DIR}/lib
KRB5_OBJTOP= ${OBJTOP}/krb5
diff --git a/krb5/libdata/Makefile b/krb5/Makefile.pc
index c9a2e8e9259f..693a4636d749 100644
--- a/krb5/libdata/Makefile
+++ b/krb5/Makefile.pc
@@ -2,26 +2,15 @@
#
# SPDX-License-Identifier: BSD-2-Clause OR ISC
-.include "../Makefile.inc"
-
-.PATH: ${KRB5_DIR}/build-tools
-
-PACKAGE=kerberos-lib
-
-PCFILES=gssrpc.pc \
- kadm-client.pc \
- kadm-server.pc \
- kdb.pc \
- krb5.pc \
- krb5-gssapi.pc \
- mit-krb5.pc \
- mit-krb5-gssapi.pc
-
-CLEANFILES+= ${PCFILES}
.SUFFIXES: .pc .pc.in
.pc.in.pc:
+ @if ! grep -q "^PACKAGE_VERSION='${KRB5_VERSION}'$$" ${KRB5_DIR}/configure; then \
+ echo "KRB5_VERSION ${KRB5_VERSION} does not match the source:"; \
+ grep "^PACKAGE_VERSION=" ${KRB5_DIR}/configure; \
+ exit 1; \
+ fi >&2
sed -e 's,@prefix@,/usr,g ; \
s,@exec_prefix@,$${prefix},g ; \
s,@libdir@,${LIBDIR},g ; \
@@ -33,12 +22,3 @@ CLEANFILES+= ${PCFILES}
s,@DEFCKTNAME@,FILE:/var/krb5/user/%{euid}/client.keytab,g ; \
s,@COM_ERR_LIB@,-lcom_err,g ;' \
${.IMPSRC} > ${.TARGET}
-
-all: ${PCFILES}
- @if ! grep -q "^PACKAGE_VERSION='${KRB5_VERSION}'$$" ${KRB5_DIR}/configure; then \
- echo "KRB5_VERSION ${KRB5_VERSION} does not match the source:"; \
- grep "^PACKAGE_VERSION=" ${KRB5_DIR}/configure; \
- exit 1; \
- fi >&2
-
-.include <bsd.lib.mk>
diff --git a/krb5/include/Makefile b/krb5/include/Makefile
index 64c5d39b867b..8ce743e95d95 100644
--- a/krb5/include/Makefile
+++ b/krb5/include/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
@@ -7,8 +7,6 @@
# under sponsorship from the FreeBSD Foundation.
#
-PACKAGE= kerberos-lib
-
.include <src.opts.mk>
.include "../Makefile.inc"
diff --git a/krb5/include/Makefile.inc b/krb5/include/Makefile.inc
index 1f0dddcf31e1..8005a7b99e37 100644
--- a/krb5/include/Makefile.inc
+++ b/krb5/include/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
@@ -7,6 +7,4 @@
# under sponsorship from the FreeBSD Foundation.
#
-PACKAGE= kerberos-lib
-
.include "../Makefile.inc"
diff --git a/krb5/include/autoconf.h b/krb5/include/autoconf.h
index fe281d136954..19979b060f10 100644
--- a/krb5/include/autoconf.h
+++ b/krb5/include/autoconf.h
@@ -641,7 +641,7 @@
#define PACKAGE_NAME "Kerberos 5"
/* Define to the full name and version of this package. */
-#define PACKAGE_STRING "Kerberos 5 1.21.1"
+#define PACKAGE_STRING "Kerberos 5 1.22.0"
/* Define to the one symbol short name of this package. */
#define PACKAGE_TARNAME "krb5"
@@ -650,7 +650,7 @@
#define PACKAGE_URL ""
/* Define to the version of this package. */
-#define PACKAGE_VERSION "1.21.1"
+#define PACKAGE_VERSION "1.22.0"
/* Default PKCS11 module name */
#define PKCS11_MODNAME "opensc-pkcs11.so"
diff --git a/krb5/include/gssapi/Makefile b/krb5/include/gssapi/Makefile
index 997cbc06c389..0eabcd130200 100644
--- a/krb5/include/gssapi/Makefile
+++ b/krb5/include/gssapi/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/include/gssrpc/Makefile b/krb5/include/gssrpc/Makefile
index 929c9eaacdc9..1eacd5c91c77 100644
--- a/krb5/include/gssrpc/Makefile
+++ b/krb5/include/gssrpc/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/include/krb5/Makefile b/krb5/include/krb5/Makefile
index 2bbc558394c0..8df5c96559a6 100644
--- a/krb5/include/krb5/Makefile
+++ b/krb5/include/krb5/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/Makefile b/krb5/lib/Makefile
index 9df98e2cc056..3b89effaa29c 100644
--- a/krb5/lib/Makefile
+++ b/krb5/lib/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/Makefile.inc b/krb5/lib/Makefile.inc
index b6e5f6275039..7fc36229a864 100644
--- a/krb5/lib/Makefile.inc
+++ b/krb5/lib/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
@@ -9,7 +9,7 @@
.include "../Makefile.inc"
-PACKAGE?= kerberos-lib
+LIB_PACKAGE=
KRB5_KRB5LIBDIR= ${KRB5_SRCLIBDIR}/krb5
KRB5_K5CRYPTODIR= ${KRB5_SRCLIBDIR}/crypto
diff --git a/krb5/lib/apputils/Makefile b/krb5/lib/apputils/Makefile
index cf430eb3cd27..1a79b3800863 100644
--- a/krb5/lib/apputils/Makefile
+++ b/krb5/lib/apputils/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/crypto/Makefile b/krb5/lib/crypto/Makefile
index 4dda0d85da54..9521b48f020c 100644
--- a/krb5/lib/crypto/Makefile
+++ b/krb5/lib/crypto/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/crypto/builtin/Makefile.inc b/krb5/lib/crypto/builtin/Makefile.inc
index afb1afa27083..59b2e6674b6a 100644
--- a/krb5/lib/crypto/builtin/Makefile.inc
+++ b/krb5/lib/crypto/builtin/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/crypto/builtin/aes/Makefile.inc b/krb5/lib/crypto/builtin/aes/Makefile.inc
index f74b486b5ba5..f46a55921f51 100644
--- a/krb5/lib/crypto/builtin/aes/Makefile.inc
+++ b/krb5/lib/crypto/builtin/aes/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/crypto/builtin/camellia/Makefile.inc b/krb5/lib/crypto/builtin/camellia/Makefile.inc
index eef5973f3a7d..5effb8d132be 100644
--- a/krb5/lib/crypto/builtin/camellia/Makefile.inc
+++ b/krb5/lib/crypto/builtin/camellia/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/crypto/builtin/des/Makefile.inc b/krb5/lib/crypto/builtin/des/Makefile.inc
index 8fd5b615e913..02f8d4a1b53d 100644
--- a/krb5/lib/crypto/builtin/des/Makefile.inc
+++ b/krb5/lib/crypto/builtin/des/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/crypto/builtin/enc_provider/Makefile.inc b/krb5/lib/crypto/builtin/enc_provider/Makefile.inc
index 09df8cbc6300..ef2f6603e60c 100644
--- a/krb5/lib/crypto/builtin/enc_provider/Makefile.inc
+++ b/krb5/lib/crypto/builtin/enc_provider/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/crypto/builtin/hash_provider/Makefile.inc b/krb5/lib/crypto/builtin/hash_provider/Makefile.inc
index 216d7f2d4e9d..4f89791b9f79 100644
--- a/krb5/lib/crypto/builtin/hash_provider/Makefile.inc
+++ b/krb5/lib/crypto/builtin/hash_provider/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/crypto/builtin/md4/Makefile.inc b/krb5/lib/crypto/builtin/md4/Makefile.inc
index baf45bb094d2..e47dea4115aa 100644
--- a/krb5/lib/crypto/builtin/md4/Makefile.inc
+++ b/krb5/lib/crypto/builtin/md4/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/crypto/builtin/md5/Makefile.inc b/krb5/lib/crypto/builtin/md5/Makefile.inc
index ffdccd41fe3f..a01188fa6979 100644
--- a/krb5/lib/crypto/builtin/md5/Makefile.inc
+++ b/krb5/lib/crypto/builtin/md5/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/crypto/builtin/sha1/Makefile.inc b/krb5/lib/crypto/builtin/sha1/Makefile.inc
index 9e5f8a5b9d2c..f8ca967b45fd 100644
--- a/krb5/lib/crypto/builtin/sha1/Makefile.inc
+++ b/krb5/lib/crypto/builtin/sha1/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/crypto/builtin/sha2/Makefile.inc b/krb5/lib/crypto/builtin/sha2/Makefile.inc
index f6ce222f730f..a9463f691c53 100644
--- a/krb5/lib/crypto/builtin/sha2/Makefile.inc
+++ b/krb5/lib/crypto/builtin/sha2/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/crypto/krb/Makefile.inc b/krb5/lib/crypto/krb/Makefile.inc
index a38178fd3a2e..3745ad66e6b9 100644
--- a/krb5/lib/crypto/krb/Makefile.inc
+++ b/krb5/lib/crypto/krb/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/crypto/openssl/Makefile.inc b/krb5/lib/crypto/openssl/Makefile.inc
index 0b81260a059e..6b7bd89338f7 100644
--- a/krb5/lib/crypto/openssl/Makefile.inc
+++ b/krb5/lib/crypto/openssl/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/crypto/openssl/des/Makefile.inc b/krb5/lib/crypto/openssl/des/Makefile.inc
index ef05fe0b0785..9cc99846c6b3 100644
--- a/krb5/lib/crypto/openssl/des/Makefile.inc
+++ b/krb5/lib/crypto/openssl/des/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/crypto/openssl/enc_provider/Makefile.inc b/krb5/lib/crypto/openssl/enc_provider/Makefile.inc
index 8cf42dcc6e31..d1c21f334b10 100644
--- a/krb5/lib/crypto/openssl/enc_provider/Makefile.inc
+++ b/krb5/lib/crypto/openssl/enc_provider/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/crypto/openssl/hash_provider/Makefile.inc b/krb5/lib/crypto/openssl/hash_provider/Makefile.inc
index 83f2f1ba9129..f8d25ff8a8cc 100644
--- a/krb5/lib/crypto/openssl/hash_provider/Makefile.inc
+++ b/krb5/lib/crypto/openssl/hash_provider/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/gssapi/Makefile b/krb5/lib/gssapi/Makefile
index 63e4d7df4bed..a434b29a2dfb 100644
--- a/krb5/lib/gssapi/Makefile
+++ b/krb5/lib/gssapi/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
@@ -10,12 +10,15 @@
.include <src.opts.mk>
.include "../Makefile.inc"
+.include "${KRB5_SRCTOP}/Makefile.pc"
LIB= gssapi_krb5
-# SHLIB_MAJOR= 2
-LDFLAGS=-Wl,--no-undefined
-LIBADD= krb5 k5crypto com_err krb5profile krb5support
+LDFLAGS= -Wl,--no-undefined
+LIBADD= krb5 k5crypto com_err krb5profile krb5support
VERSION_MAP= ${.CURDIR}/version.map
+PCFILES= krb5-gssapi.pc \
+ mit-krb5-gssapi.pc
+CLEANFILES+= ${PCFILES}
# This is a contcatonation of:
# crypto/krb5/src/lib/gssapi/libgssapi_krb5.exports
@@ -44,4 +47,8 @@ CFLAGS+=${DEFINES} \
.include <bsd.lib.mk>
+all: ${PCFILES}
+
.SUFFIXES: .h .c .et
+
+.PATH: ${KRB5_DIR}/build-tools
diff --git a/krb5/lib/gssapi/generic/Makefile.et b/krb5/lib/gssapi/generic/Makefile.et
index 787a1c8aa1e3..c245967e3d40 100644
--- a/krb5/lib/gssapi/generic/Makefile.et
+++ b/krb5/lib/gssapi/generic/Makefile.et
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/gssapi/generic/Makefile.inc b/krb5/lib/gssapi/generic/Makefile.inc
index 9dcc3476775e..f449ab15c379 100644
--- a/krb5/lib/gssapi/generic/Makefile.inc
+++ b/krb5/lib/gssapi/generic/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/gssapi/krb5/Makefile.et b/krb5/lib/gssapi/krb5/Makefile.et
index 347f6a72f3b6..393635d5a8da 100644
--- a/krb5/lib/gssapi/krb5/Makefile.et
+++ b/krb5/lib/gssapi/krb5/Makefile.et
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/gssapi/krb5/Makefile.inc b/krb5/lib/gssapi/krb5/Makefile.inc
index 7e001aa0af27..61010408cd5a 100644
--- a/krb5/lib/gssapi/krb5/Makefile.inc
+++ b/krb5/lib/gssapi/krb5/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/gssapi/mechglue/Makefile.inc b/krb5/lib/gssapi/mechglue/Makefile.inc
index c54cdf37b55b..13855ae4ebad 100644
--- a/krb5/lib/gssapi/mechglue/Makefile.inc
+++ b/krb5/lib/gssapi/mechglue/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/gssapi/spnego/Makefile.inc b/krb5/lib/gssapi/spnego/Makefile.inc
index 4ebac318ef5a..1184ecc3e6a2 100644
--- a/krb5/lib/gssapi/spnego/Makefile.inc
+++ b/krb5/lib/gssapi/spnego/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/kadm5clnt/Makefile b/krb5/lib/kadm5clnt/Makefile
index 898276e77d04..52a7187cf9bb 100644
--- a/krb5/lib/kadm5clnt/Makefile
+++ b/krb5/lib/kadm5clnt/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
@@ -10,12 +10,14 @@
.include <src.opts.mk>
.include "../Makefile.inc"
+.include "${KRB5_SRCTOP}/Makefile.pc"
LIB= kadm5clnt_mit
-# SHLIB_MAJOR= 12
-LDFLAGS=-Wl,--no-undefined
-LIBADD= krb5profile gssrpc gssapi_krb5 krb5 k5crypto krb5support com_err
+LDFLAGS= -Wl,--no-undefined
+LIBADD= krb5profile gssrpc gssapi_krb5 krb5 k5crypto krb5support com_err
VERSION_MAP= ${.CURDIR}/version.map
+PCFILES= kadm-client.pc
+CLEANFILES+= ${PCFILES}
SRCS= alt_prof.c \
chpass_util.c \
@@ -88,6 +90,9 @@ afterinstall:
.include <bsd.lib.mk>
+all: ${PCFILES}
+
.SUFFIXES: .h .c
-.PATH: ${KRB5_DIR}/lib/kadm5
+.PATH: ${KRB5_DIR}/build-tools \
+ ${KRB5_DIR}/lib/kadm5
diff --git a/krb5/lib/kadm5clnt/clnt/Makefile.inc b/krb5/lib/kadm5clnt/clnt/Makefile.inc
index 2c66879d0eec..c42c87918793 100644
--- a/krb5/lib/kadm5clnt/clnt/Makefile.inc
+++ b/krb5/lib/kadm5clnt/clnt/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/kadm5srv/Makefile b/krb5/lib/kadm5srv/Makefile
index aa4fad49ea02..9eecd20ca822 100644
--- a/krb5/lib/kadm5srv/Makefile
+++ b/krb5/lib/kadm5srv/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
@@ -10,12 +10,14 @@
.include <src.opts.mk>
.include "../Makefile.inc"
+.include "${KRB5_SRCTOP}/Makefile.pc"
LIB= kadm5srv_mit
-# SHLIB_MAJOR= 12
-LDFLAGS=-Wl,--no-undefined
-LIBADD= krb5profile gssrpc gssapi_krb5 kdb5 krb5 k5crypto krb5support com_err
+LDFLAGS= -Wl,--no-undefined
+LIBADD= krb5profile gssrpc gssapi_krb5 kdb5 krb5 k5crypto krb5support com_err
VERSION_MAP= ${.CURDIR}/version.map
+PCFILES= kadm-server.pc
+CLEANFILES+= ${PCFILES}
SRCS= alt_prof.c \
chpass_util.c \
@@ -88,6 +90,9 @@ ${GEN_CHPASS_UTIL_STRINGS_C}: ${GEN_CHPASS_UTIL_STRINGS}
.include <bsd.lib.mk>
+all: ${PCFILES}
+
.SUFFIXES: .h .c .et
-.PATH: ${KRB5_DIR}/lib/kadm5
+.PATH: ${KRB5_DIR}/build-tools \
+ ${KRB5_DIR}/lib/kadm5
diff --git a/krb5/lib/kadm5srv/srv/Makefile.inc b/krb5/lib/kadm5srv/srv/Makefile.inc
index cd46e1af7333..2ea4d22932fc 100644
--- a/krb5/lib/kadm5srv/srv/Makefile.inc
+++ b/krb5/lib/kadm5srv/srv/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/kadmin_common/Makefile b/krb5/lib/kadmin_common/Makefile
index d324acbbe31c..9eed31bc94f3 100644
--- a/krb5/lib/kadmin_common/Makefile
+++ b/krb5/lib/kadmin_common/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/kdb/Makefile b/krb5/lib/kdb/Makefile
index ff17900fb7ec..80039ad83502 100644
--- a/krb5/lib/kdb/Makefile
+++ b/krb5/lib/kdb/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
@@ -10,12 +10,14 @@
.include <src.opts.mk>
.include "../Makefile.inc"
+.include "${KRB5_SRCTOP}/Makefile.pc"
LIB= kdb5
-# SHLIB_MAJOR= 10
-LDFLAGS=-Wl,--no-undefined
-LIBADD= krb5profile gssrpc krb5 k5crypto com_err krb5support gssapi_krb5
+LDFLAGS= -Wl,--no-undefined
+LIBADD= krb5profile gssrpc krb5 k5crypto com_err krb5support gssapi_krb5
VERSION_MAP= ${.CURDIR}/version.map
+PCFILES= kdb.pc
+CLEANFILES+= ${PCFILES}
SRCS= decrypt_key.c \
encrypt_key.c \
@@ -60,6 +62,9 @@ ${ADB_ERR_C}: ${ADB_ERR}
.include <bsd.lib.mk>
+all: ${PCFILES}
+
.SUFFIXES: .h .c
-.PATH: ${KRB5_DIR}/lib/kdb
+.PATH: ${KRB5_DIR}/build-tools \
+ ${KRB5_DIR}/lib/kdb
diff --git a/krb5/lib/kprop_util/Makefile b/krb5/lib/kprop_util/Makefile
index 31c35601d1a8..aaaeb969058d 100644
--- a/krb5/lib/kprop_util/Makefile
+++ b/krb5/lib/kprop_util/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/krad/Makefile b/krb5/lib/krad/Makefile
index a990354c1877..36074dff1296 100644
--- a/krb5/lib/krad/Makefile
+++ b/krb5/lib/krad/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/krb5/Makefile b/krb5/lib/krb5/Makefile
index dc6c53ec6ce2..b3587cf58c2b 100644
--- a/krb5/lib/krb5/Makefile
+++ b/krb5/lib/krb5/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
@@ -10,12 +10,15 @@
.include <src.opts.mk>
.include "../Makefile.inc"
+.include "${KRB5_SRCTOP}/Makefile.pc"
LIB= krb5
-LDFLAGS=-Wl,--no-undefined
-LIBADD= krb5profile k5crypto com_err krb5support
-# SHLIB_MAJOR= 3
+LDFLAGS= -Wl,--no-undefined
+LIBADD= krb5profile k5crypto com_err krb5support
VERSION_MAP= ${.CURDIR}/version.map
+PCFILES= krb5.pc \
+ mit-krb5.pc
+CLEANFILES+= ${PCFILES}
SRCS= krb5_libinit.c
@@ -67,6 +70,8 @@ KDCPACKAGE= kerberos-kdc
.include <bsd.lib.mk>
+all: ${PCFILES}
+
.SUFFIXES: .et .man
.man.5:
@@ -75,5 +80,6 @@ KDCPACKAGE= kerberos-kdc
.man.7:
@cp ${.ALLSRC} ${.TARGET}
-.PATH: ${KRB5_DIR}/lib/krb5 \
+.PATH: ${KRB5_DIR}/build-tools \
+ ${KRB5_DIR}/lib/krb5 \
${KRB5_DIR}/man
diff --git a/krb5/lib/krb5/asn.1/Makefile.inc b/krb5/lib/krb5/asn.1/Makefile.inc
index 1c00b4a9389a..a878986e8a24 100644
--- a/krb5/lib/krb5/asn.1/Makefile.inc
+++ b/krb5/lib/krb5/asn.1/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/krb5/ccache/Makefile.inc b/krb5/lib/krb5/ccache/Makefile.inc
index 08514b2819d9..746a24f11e8b 100644
--- a/krb5/lib/krb5/ccache/Makefile.inc
+++ b/krb5/lib/krb5/ccache/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/krb5/docs/Makefile.inc b/krb5/lib/krb5/docs/Makefile.inc
index efc4dc2f6b4b..7c1e55325b4c 100644
--- a/krb5/lib/krb5/docs/Makefile.inc
+++ b/krb5/lib/krb5/docs/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/krb5/error_tables/Makefile.inc b/krb5/lib/krb5/error_tables/Makefile.inc
index 5d2ec3b9e920..e024238219da 100644
--- a/krb5/lib/krb5/error_tables/Makefile.inc
+++ b/krb5/lib/krb5/error_tables/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/krb5/keytab/Makefile.inc b/krb5/lib/krb5/keytab/Makefile.inc
index 8fca2e640f30..800bddb0d952 100644
--- a/krb5/lib/krb5/keytab/Makefile.inc
+++ b/krb5/lib/krb5/keytab/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/krb5/krb/Makefile.inc b/krb5/lib/krb5/krb/Makefile.inc
index aedc0438a596..4d8438246dac 100644
--- a/krb5/lib/krb5/krb/Makefile.inc
+++ b/krb5/lib/krb5/krb/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/krb5/os/Makefile.inc b/krb5/lib/krb5/os/Makefile.inc
index a59f6fc56c59..f81ab7866ecb 100644
--- a/krb5/lib/krb5/os/Makefile.inc
+++ b/krb5/lib/krb5/os/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/krb5/rcache/Makefile.inc b/krb5/lib/krb5/rcache/Makefile.inc
index 4f68dc702dd8..6b5d61e8b75c 100644
--- a/krb5/lib/krb5/rcache/Makefile.inc
+++ b/krb5/lib/krb5/rcache/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/krb5/unicode/Makefile.inc b/krb5/lib/krb5/unicode/Makefile.inc
index 19cea34f2193..ff146961ccea 100644
--- a/krb5/lib/krb5/unicode/Makefile.inc
+++ b/krb5/lib/krb5/unicode/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/lib/rpc/Makefile b/krb5/lib/rpc/Makefile
index a539803cc57c..7e37a5479132 100644
--- a/krb5/lib/rpc/Makefile
+++ b/krb5/lib/rpc/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
@@ -10,12 +10,14 @@
.include <src.opts.mk>
.include "../Makefile.inc"
+.include "${KRB5_SRCTOP}/Makefile.pc"
LIB= gssrpc
-# SHLIB_MAJOR= 4
-LDFLAGS=-Wl,--no-undefined
-LIBADD= gssapi_krb5 krb5 k5crypto com_err krb5support
+LDFLAGS= -Wl,--no-undefined
+LIBADD= gssapi_krb5 krb5 k5crypto com_err krb5support
VERSION_MAP= ${.CURDIR}/version.map
+PCFILES= gssrpc.pc
+CLEANFILES+= ${PCFILES}
SRCS= auth_gss.c \
auth_gssapi.c \
@@ -75,4 +77,7 @@ CFLAGS+=-I${KRB5_DIR}/lib/rpc \
.include <bsd.lib.mk>
-.PATH: ${KRB5_DIR}/lib/rpc
+all: ${PCFILES}
+
+.PATH: ${KRB5_DIR}/build-tools \
+ ${KRB5_DIR}/lib/rpc
diff --git a/krb5/libexec/Makefile b/krb5/libexec/Makefile
index 1f8ce8728d19..38199ed09aa9 100644
--- a/krb5/libexec/Makefile
+++ b/krb5/libexec/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
@@ -7,8 +7,6 @@
# under sponsorship from the FreeBSD Foundation.
#
-#DIR $FreeBSD$
-
SUBDIR= kdc kadmind kpropd kprop kproplog
SUBDIR_PARALLEL=
diff --git a/krb5/libexec/Makefile.inc b/krb5/libexec/Makefile.inc
index 6ce709d08304..b40d1999f23f 100644
--- a/krb5/libexec/Makefile.inc
+++ b/krb5/libexec/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
@@ -9,5 +9,5 @@
.include "../Makefile.inc"
-PACKAGE?= kerberos-kdc
+PACKAGE= kerberos-kdc
BINDIR?= /usr/libexec
diff --git a/krb5/libexec/kadmind/Makefile b/krb5/libexec/kadmind/Makefile
index 62046214af7b..434e4adce211 100644
--- a/krb5/libexec/kadmind/Makefile
+++ b/krb5/libexec/kadmind/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/libexec/kdc/Makefile b/krb5/libexec/kdc/Makefile
index 1a0d5b9c208b..9317ebbbc9a8 100644
--- a/krb5/libexec/kdc/Makefile
+++ b/krb5/libexec/kdc/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/libexec/kprop/Makefile b/krb5/libexec/kprop/Makefile
index 1294d9014ee4..0889562cfa43 100644
--- a/krb5/libexec/kprop/Makefile
+++ b/krb5/libexec/kprop/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/libexec/kpropd/Makefile b/krb5/libexec/kpropd/Makefile
index e7ffe5a26016..667947dcc84c 100644
--- a/krb5/libexec/kpropd/Makefile
+++ b/krb5/libexec/kpropd/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/libexec/kproplog/Makefile b/krb5/libexec/kproplog/Makefile
index 81405260a06e..3a69164ddabf 100644
--- a/krb5/libexec/kproplog/Makefile
+++ b/krb5/libexec/kproplog/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/plugins/Makefile b/krb5/plugins/Makefile
index a72dd4a521bc..d8ba01585f2a 100644
--- a/krb5/plugins/Makefile
+++ b/krb5/plugins/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/plugins/Makefile.inc b/krb5/plugins/Makefile.inc
index d98ed1d3887b..f1ae34663769 100644
--- a/krb5/plugins/Makefile.inc
+++ b/krb5/plugins/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/plugins/audit/Makefile b/krb5/plugins/audit/Makefile
index f85e5d1a81a5..124c6e2b7a5a 100644
--- a/krb5/plugins/audit/Makefile
+++ b/krb5/plugins/audit/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/plugins/k5tls/Makefile b/krb5/plugins/k5tls/Makefile
index 30738a70e71d..928d64d28ab5 100644
--- a/krb5/plugins/k5tls/Makefile
+++ b/krb5/plugins/k5tls/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/plugins/kdb/Makefile b/krb5/plugins/kdb/Makefile
index f9e2a4d1afb1..1d5533bf8d22 100644
--- a/krb5/plugins/kdb/Makefile
+++ b/krb5/plugins/kdb/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/plugins/kdb/Makefile.inc b/krb5/plugins/kdb/Makefile.inc
index 56c36b7539f7..8005a7b99e37 100644
--- a/krb5/plugins/kdb/Makefile.inc
+++ b/krb5/plugins/kdb/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/plugins/kdb/db2/Makefile b/krb5/plugins/kdb/db2/Makefile
index 3230a77171f4..737ae7e15e27 100644
--- a/krb5/plugins/kdb/db2/Makefile
+++ b/krb5/plugins/kdb/db2/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/plugins/kdb/db2/libdb2/Makefile.inc b/krb5/plugins/kdb/db2/libdb2/Makefile.inc
index d31beb5dd5f5..ae035ad49f00 100644
--- a/krb5/plugins/kdb/db2/libdb2/Makefile.inc
+++ b/krb5/plugins/kdb/db2/libdb2/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/plugins/kdb/db2/libdb2/btree/Makefile.inc b/krb5/plugins/kdb/db2/libdb2/btree/Makefile.inc
index 1c1b2b6ea26c..27fc2627c0ef 100644
--- a/krb5/plugins/kdb/db2/libdb2/btree/Makefile.inc
+++ b/krb5/plugins/kdb/db2/libdb2/btree/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/plugins/kdb/db2/libdb2/db/Makefile.inc b/krb5/plugins/kdb/db2/libdb2/db/Makefile.inc
index cdcdac287b5d..66613c4dd69d 100644
--- a/krb5/plugins/kdb/db2/libdb2/db/Makefile.inc
+++ b/krb5/plugins/kdb/db2/libdb2/db/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/plugins/kdb/db2/libdb2/hash/Makefile.inc b/krb5/plugins/kdb/db2/libdb2/hash/Makefile.inc
index 7943281b7797..d6f1fae0414a 100644
--- a/krb5/plugins/kdb/db2/libdb2/hash/Makefile.inc
+++ b/krb5/plugins/kdb/db2/libdb2/hash/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/plugins/kdb/db2/libdb2/include/Makefile.inc b/krb5/plugins/kdb/db2/libdb2/include/Makefile.inc
index 63be42f21082..a73998cdc8d0 100644
--- a/krb5/plugins/kdb/db2/libdb2/include/Makefile.inc
+++ b/krb5/plugins/kdb/db2/libdb2/include/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/plugins/kdb/db2/libdb2/mpool/Makefile.inc b/krb5/plugins/kdb/db2/libdb2/mpool/Makefile.inc
index 801f36a085c7..5b05e9b866e4 100644
--- a/krb5/plugins/kdb/db2/libdb2/mpool/Makefile.inc
+++ b/krb5/plugins/kdb/db2/libdb2/mpool/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/plugins/kdb/db2/libdb2/recno/Makefile.inc b/krb5/plugins/kdb/db2/libdb2/recno/Makefile.inc
index 01557c312a53..10af0f174b56 100644
--- a/krb5/plugins/kdb/db2/libdb2/recno/Makefile.inc
+++ b/krb5/plugins/kdb/db2/libdb2/recno/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/plugins/preauth/Makefile b/krb5/plugins/preauth/Makefile
index 23861859bd2c..b037aa2058ab 100644
--- a/krb5/plugins/preauth/Makefile
+++ b/krb5/plugins/preauth/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/plugins/preauth/Makefile.inc b/krb5/plugins/preauth/Makefile.inc
index 8a713e4d0856..49065f8a3147 100644
--- a/krb5/plugins/preauth/Makefile.inc
+++ b/krb5/plugins/preauth/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/plugins/preauth/otp/Makefile b/krb5/plugins/preauth/otp/Makefile
index 48fc35db0727..fa047b44e176 100644
--- a/krb5/plugins/preauth/otp/Makefile
+++ b/krb5/plugins/preauth/otp/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/plugins/preauth/pkinit/Makefile b/krb5/plugins/preauth/pkinit/Makefile
index 7d227aca5420..47e02b315cd6 100644
--- a/krb5/plugins/preauth/pkinit/Makefile
+++ b/krb5/plugins/preauth/pkinit/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/plugins/preauth/spake/Makefile b/krb5/plugins/preauth/spake/Makefile
index 3aa375cb5100..602d563d291f 100644
--- a/krb5/plugins/preauth/spake/Makefile
+++ b/krb5/plugins/preauth/spake/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/plugins/preauth/test/Makefile b/krb5/plugins/preauth/test/Makefile
index 99f632c0cb17..70292dec9a14 100644
--- a/krb5/plugins/preauth/test/Makefile
+++ b/krb5/plugins/preauth/test/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/usr.bin/Makefile b/krb5/usr.bin/Makefile
index b8f8d471cb9b..625aa2245334 100644
--- a/krb5/usr.bin/Makefile
+++ b/krb5/usr.bin/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/usr.bin/Makefile.inc b/krb5/usr.bin/Makefile.inc
index f27bd78cd54c..55e21f80936c 100644
--- a/krb5/usr.bin/Makefile.inc
+++ b/krb5/usr.bin/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
@@ -9,5 +9,4 @@
.include "../Makefile.inc"
-PACKAGE?= kerberos
BINDIR?= /usr/bin
diff --git a/krb5/usr.bin/gss-client/Makefile b/krb5/usr.bin/gss-client/Makefile
index acd67f9c17c6..e989e9bf58a8 100644
--- a/krb5/usr.bin/gss-client/Makefile
+++ b/krb5/usr.bin/gss-client/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/usr.bin/kadmin/Makefile b/krb5/usr.bin/kadmin/Makefile
index 182cabb8f9f6..854640e2f002 100644
--- a/krb5/usr.bin/kadmin/Makefile
+++ b/krb5/usr.bin/kadmin/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/usr.bin/kdestroy/Makefile b/krb5/usr.bin/kdestroy/Makefile
index e3f493ffdc60..d99b2c07d140 100644
--- a/krb5/usr.bin/kdestroy/Makefile
+++ b/krb5/usr.bin/kdestroy/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/usr.bin/kinit/Makefile b/krb5/usr.bin/kinit/Makefile
index 3d1285137f85..43e5525ffe30 100644
--- a/krb5/usr.bin/kinit/Makefile
+++ b/krb5/usr.bin/kinit/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/usr.bin/klist/Makefile b/krb5/usr.bin/klist/Makefile
index 3094a3b69d4b..539337bdd10b 100644
--- a/krb5/usr.bin/klist/Makefile
+++ b/krb5/usr.bin/klist/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/usr.bin/kpasswd/Makefile b/krb5/usr.bin/kpasswd/Makefile
index e4d7d1e8ab84..262fdf3fe2e0 100644
--- a/krb5/usr.bin/kpasswd/Makefile
+++ b/krb5/usr.bin/kpasswd/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/usr.bin/ksu/Makefile b/krb5/usr.bin/ksu/Makefile
index 7296ea61b73d..aaec461ce0b0 100644
--- a/krb5/usr.bin/ksu/Makefile
+++ b/krb5/usr.bin/ksu/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/usr.bin/kswitch/Makefile b/krb5/usr.bin/kswitch/Makefile
index 10298f34a9d4..bae947ab6e8c 100644
--- a/krb5/usr.bin/kswitch/Makefile
+++ b/krb5/usr.bin/kswitch/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/usr.bin/ktutil/Makefile b/krb5/usr.bin/ktutil/Makefile
index 15991cb49bce..597de6568eaf 100644
--- a/krb5/usr.bin/ktutil/Makefile
+++ b/krb5/usr.bin/ktutil/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/usr.bin/kvno/Makefile b/krb5/usr.bin/kvno/Makefile
index 0a43765e53f4..166f3d4b7086 100644
--- a/krb5/usr.bin/kvno/Makefile
+++ b/krb5/usr.bin/kvno/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/usr.bin/sclient/Makefile b/krb5/usr.bin/sclient/Makefile
index 8a3cc38fe3b6..e98352e98d67 100644
--- a/krb5/usr.bin/sclient/Makefile
+++ b/krb5/usr.bin/sclient/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/usr.bin/sim_client/Makefile b/krb5/usr.bin/sim_client/Makefile
index 3e5ea72407bc..8ab198e42c44 100644
--- a/krb5/usr.bin/sim_client/Makefile
+++ b/krb5/usr.bin/sim_client/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/usr.sbin/Makefile b/krb5/usr.sbin/Makefile
index 083132e2de05..84e4a4dac99a 100644
--- a/krb5/usr.sbin/Makefile
+++ b/krb5/usr.sbin/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
@@ -7,8 +7,6 @@
# under sponsorship from the FreeBSD Foundation.
#
-# $FreeBSD$
-
SUBDIR= kadmin.local kdb5_util sim_server gss-server
SUBDIR_PARALLEL=
diff --git a/krb5/usr.sbin/Makefile.inc b/krb5/usr.sbin/Makefile.inc
index fb0b2dda8c97..689ee64515ae 100644
--- a/krb5/usr.sbin/Makefile.inc
+++ b/krb5/usr.sbin/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/usr.sbin/gss-server/Makefile b/krb5/usr.sbin/gss-server/Makefile
index c42740045ac4..6b56e11f0ac4 100644
--- a/krb5/usr.sbin/gss-server/Makefile
+++ b/krb5/usr.sbin/gss-server/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
@@ -7,8 +7,6 @@
# under sponsorship from the FreeBSD Foundation.
#
-PACKAGE= kerberos
-
PROG= gss-server
LIBADD= gssapi_krb5 krb5 k5crypto com_err krb5profile krb5support sys
diff --git a/krb5/usr.sbin/kadmin.local/Makefile b/krb5/usr.sbin/kadmin.local/Makefile
index 3930c0fc4694..a24d7ecf9046 100644
--- a/krb5/usr.sbin/kadmin.local/Makefile
+++ b/krb5/usr.sbin/kadmin.local/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/usr.sbin/kdb5_util/Makefile b/krb5/usr.sbin/kdb5_util/Makefile
index 0c1ad4d56bad..df45a0248289 100644
--- a/krb5/usr.sbin/kdb5_util/Makefile
+++ b/krb5/usr.sbin/kdb5_util/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/usr.sbin/sim_server/Makefile b/krb5/usr.sbin/sim_server/Makefile
index adaf8d1f087f..ddeff682466f 100644
--- a/krb5/usr.sbin/sim_server/Makefile
+++ b/krb5/usr.sbin/sim_server/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
@@ -7,8 +7,6 @@
# under sponsorship from the FreeBSD Foundation.
#
-PACKAGE= kerberos
-
PROG= sim_server
LIBADD= krb5 k5crypto com_err krb5profile krb5support sys
diff --git a/krb5/usr.sbin/sserver/Makefile b/krb5/usr.sbin/sserver/Makefile
index e4e85b28157e..ae5e120848ff 100644
--- a/krb5/usr.sbin/sserver/Makefile
+++ b/krb5/usr.sbin/sserver/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
@@ -7,8 +7,6 @@
# under sponsorship from the FreeBSD Foundation.
#
-PACKAGE= kerberos
-
PROG= sserver
LIBADD= krb5 k5crypto com_err krb5profile krb5support sys
diff --git a/krb5/util/Makefile b/krb5/util/Makefile
index 17733db5b2a5..8d376e034708 100644
--- a/krb5/util/Makefile
+++ b/krb5/util/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
diff --git a/krb5/util/Makefile.inc b/krb5/util/Makefile.inc
index 95b93a793d77..f1e61f498a4c 100644
--- a/krb5/util/Makefile.inc
+++ b/krb5/util/Makefile.inc
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
@@ -9,5 +9,7 @@
.include "../Makefile.inc"
+LIB_PACKAGE=
+
SHLIBDIR?= /usr/lib
SHLIB_MAJOR?= 122
diff --git a/krb5/util/build-tools/Makefile b/krb5/util/build-tools/Makefile
index 1a6a373f0e7f..8bafbb777c07 100644
--- a/krb5/util/build-tools/Makefile
+++ b/krb5/util/build-tools/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
@@ -7,12 +7,11 @@
# under sponsorship from the FreeBSD Foundation.
#
-PACKAGE= kerberos-lib
-
.include "../Makefile.inc"
SCRIPTS= krb5-config.sh
BINDIR?= /usr/bin
+TAGS= dev
MAN= krb5-config.1
diff --git a/krb5/util/build-tools/krb5-config.sh b/krb5/util/build-tools/krb5-config.sh
index c0481f3417e1..b23fe0141345 100755
--- a/krb5/util/build-tools/krb5-config.sh
+++ b/krb5/util/build-tools/krb5-config.sh
@@ -26,7 +26,7 @@
# Configurable parameters set by autoconf
# Disreagard the above. Edit this by hand in the bespoke FreeBSD build.
-version_string="Kerberos 5 release 1.21.3"
+version_string="Kerberos 5 release 1.22.0"
prefix=/usr
exec_prefix=${prefix}
diff --git a/krb5/util/compile_et/Makefile b/krb5/util/compile_et/Makefile
index 3d48b39cab27..fb87de3dd9c6 100644
--- a/krb5/util/compile_et/Makefile
+++ b/krb5/util/compile_et/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
@@ -7,8 +7,6 @@
# under sponsorship from the FreeBSD Foundation.
#
-PACKAGE= kerberos
-
.include "../Makefile.inc"
SRC_ETDIR= ${KRB5_DIR}/util/et
@@ -18,6 +16,7 @@ SCRIPTS= compile_et
BINDIR?= /usr/bin
MAN= compile_et.1
CLEANFILES= compile_et
+TAGS+= dev
compile_et: compile_et.sh
sh ${SRC_ETDIR}/config_script ${SRC_ETDIR}/compile_et.sh \
diff --git a/krb5/util/et/Makefile b/krb5/util/et/Makefile
index 5d0c2a3e3ca5..35ac601629d7 100644
--- a/krb5/util/et/Makefile
+++ b/krb5/util/et/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
@@ -7,8 +7,6 @@
# under sponsorship from the FreeBSD Foundation.
#
-PACKAGE= kerberos-lib
-
.include <src.opts.mk>
.include "../Makefile.inc"
@@ -20,7 +18,6 @@ INCSDIR=${INCLUDEDIR}
LIBADD= krb5support
VERSION_MAP= ${.CURDIR}/version.map
-
SRCS= com_err.c \
error_message.c \
et_name.c \
@@ -38,6 +35,15 @@ CFLAGS+=-I${KRB5_DIR}/util/et \
-I${.CURDIR:H:H}/include \
-I${.OBJDIR}
+FILESGROUPS= ETSCRIPTS
+
+ETSCRIPTS= et_c.awk \
+ et_h.awk
+
+ETSCRIPTSDIR= ${KRB5_ETDIR}
+ETSCRIPTSMODE= 0444
+ETSCRIPTSTAGS= dev
+
.include <bsd.lib.mk>
.SUFFIXES: .h .c .et .ct .man .3
diff --git a/krb5/util/profile/Makefile b/krb5/util/profile/Makefile
index 7d1b806bfb5d..e62d66636161 100644
--- a/krb5/util/profile/Makefile
+++ b/krb5/util/profile/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
@@ -7,8 +7,6 @@
# under sponsorship from the FreeBSD Foundation.
#
-PACKAGE= kerberos-lib
-
.include <src.opts.mk>
.include "../Makefile.inc"
diff --git a/krb5/util/ss/Makefile b/krb5/util/ss/Makefile
index 2c43f2b5934f..2c48ccf56573 100644
--- a/krb5/util/ss/Makefile
+++ b/krb5/util/ss/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
@@ -7,8 +7,6 @@
# under sponsorship from the FreeBSD Foundation.
#
-PACKAGE= kerberos-lib
-
.include <src.opts.mk>
.include "../Makefile.inc"
diff --git a/krb5/util/support/Makefile b/krb5/util/support/Makefile
index bba65bcd89c1..e2e677fee89d 100644
--- a/krb5/util/support/Makefile
+++ b/krb5/util/support/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
@@ -7,8 +7,6 @@
# under sponsorship from the FreeBSD Foundation.
#
-PACKAGE= kerberos-lib
-
.include <src.opts.mk>
.include "../Makefile.inc"
diff --git a/krb5/util/verto/Makefile b/krb5/util/verto/Makefile
index 8f02d5f897f4..3ae9da9df20b 100644
--- a/krb5/util/verto/Makefile
+++ b/krb5/util/verto/Makefile
@@ -1,5 +1,5 @@
#
-# SPDX-License-Idendifier: BSD-2-Clause
+# SPDX-License-Identifier: BSD-2-Clause
#
# Copyright (c) 2025 FreeBSD Foundation
#
@@ -7,8 +7,6 @@
# under sponsorship from the FreeBSD Foundation.
#
-PACKAGE= kerberos-lib
-
.include <src.opts.mk>
.include "../Makefile.inc"
diff --git a/lib/Makefile b/lib/Makefile
index 9447cc4551c0..2b7cf2fdcb7d 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -179,12 +179,12 @@ SUBDIR.${MK_FDT}+= libfdt
SUBDIR.${MK_FILE}+= libmagic
SUBDIR.${MK_GPIO}+= libgpio
.if ${MK_MITKRB5} == "no"
-SUBDIR.${MK_GSSAPI}+= libgssapi
+SUBDIR.${MK_KERBEROS}+= libgssapi
.endif
-SUBDIR.${MK_GSSAPI}+= librpcsec_gss
+SUBDIR.${MK_KERBEROS}+= librpcsec_gss
SUBDIR.${MK_ICONV}+= libiconv_modules
.if ${MK_MITKRB5} == "no"
-SUBDIR.${MK_KERBEROS_SUPPORT}+= libcom_err
+SUBDIR.${MK_KERBEROS}+= libcom_err
.endif
SUBDIR.${MK_LDNS}+= libldns
SUBDIR.${MK_STATS}+= libstats
diff --git a/lib/libc/i386/gen/makecontext.c b/lib/libc/i386/gen/makecontext.c
index 7b4845ac6bee..9fedd8b99c47 100644
--- a/lib/libc/i386/gen/makecontext.c
+++ b/lib/libc/i386/gen/makecontext.c
@@ -124,7 +124,7 @@ __makecontext(ucontext_t *ucp, void (*start)(void), int argc, ...)
/*
* Setup the top of the stack with the user start routine
- * followed by all of its aguments and the pointer to the
+ * followed by all of its arguments and the pointer to the
* ucontext. We need to leave a spare spot at the top of
* the stack because setcontext will move eip to the top
* of the stack before returning.
diff --git a/lib/libc/net/gai_strerror.c b/lib/libc/net/gai_strerror.c
index 0d4303e76a73..23f58b763573 100644
--- a/lib/libc/net/gai_strerror.c
+++ b/lib/libc/net/gai_strerror.c
@@ -45,22 +45,22 @@
* Entries EAI_ADDRFAMILY (1) and EAI_NODATA (7) were omitted from RFC 3493,
* but are or may be used as extensions or in old code.
*/
-static const char *ai_errlist[] = {
- "Success", /* 0 */
- "Address family for hostname not supported", /* EAI_ADDRFAMILY */
- "Name could not be resolved at this time", /* EAI_AGAIN */
- "Flags parameter had an invalid value", /* EAI_BADFLAGS */
- "Non-recoverable failure in name resolution", /* EAI_FAIL */
- "Address family not recognized", /* EAI_FAMILY */
- "Memory allocation failure", /* EAI_MEMORY */
- "No address associated with hostname", /* EAI_NODATA*/
- "Name does not resolve", /* EAI_NONAME */
- "Service was not recognized for socket type", /* EAI_SERVICE */
- "Intended socket type was not recognized", /* EAI_SOCKTYPE */
- "System error returned in errno", /* EAI_SYSTEM */
- "Invalid value for hints", /* EAI_BADHINTS */
- "Resolved protocol is unknown", /* EAI_PROTOCOL */
- "Argument buffer overflow" /* EAI_OVERFLOW */
+static const char *const ai_errlist[] = {
+ [0] = "Success",
+ [EAI_ADDRFAMILY] = "Address family for hostname not supported",
+ [EAI_AGAIN] = "Name could not be resolved at this time",
+ [EAI_BADFLAGS] = "Flags parameter had an invalid value",
+ [EAI_FAIL] = "Non-recoverable failure in name resolution",
+ [EAI_FAMILY] = "Address family not recognized",
+ [EAI_MEMORY] = "Memory allocation failure",
+ [EAI_NODATA] = "No address associated with hostname",
+ [EAI_NONAME] = "Name does not resolve",
+ [EAI_SERVICE] = "Service was not recognized for socket type",
+ [EAI_SOCKTYPE] = "Intended socket type was not recognized",
+ [EAI_SYSTEM] = "System error returned in errno",
+ [EAI_BADHINTS] = "Invalid value for hints",
+ [EAI_PROTOCOL] = "Resolved protocol is unknown",
+ [EAI_OVERFLOW] = "Argument buffer overflow",
};
#if defined(NLS)
@@ -72,7 +72,7 @@ static int gai_keycreated = 0;
static void
gai_keycreate(void)
{
- gai_keycreated = (thr_keycreate(&gai_key, free) == 0);
+ gai_keycreated = thr_keycreate(&gai_key, free) == 0;
}
#endif
@@ -82,7 +82,9 @@ gai_strerror(int ecode)
#if defined(NLS)
nl_catd catd;
char *buf;
+ int saved_errno;
+ saved_errno = errno;
if (thr_main() != 0)
buf = gai_buf;
else {
@@ -110,11 +112,13 @@ gai_strerror(int ecode)
strlcpy(buf, catgets(catd, 3, NL_MSGMAX, "Unknown error"),
sizeof(gai_buf));
catclose(catd);
- return buf;
+ errno = saved_errno;
+ return (buf);
thr_err:
+ errno = saved_errno;
#endif
if (ecode >= 0 && ecode < EAI_MAX)
- return ai_errlist[ecode];
- return "Unknown error";
+ return (ai_errlist[ecode]);
+ return ("Unknown error");
}
diff --git a/lib/libc/rpc/Symbol.map b/lib/libc/rpc/Symbol.map
index 61e8e084b1e0..105d6fb6b54e 100644
--- a/lib/libc/rpc/Symbol.map
+++ b/lib/libc/rpc/Symbol.map
@@ -8,9 +8,13 @@ FBSD_1.0 {
xdr_desargs;
xdr_desresp;
+ authdes_seccreate;
+ authdes_pk_seccreate;
authnone_create;
authunix_create;
authunix_create_default;
+ xdr_authdes_cred;
+ xdr_authdes_verf;
xdr_authunix_parms;
bindresvport;
bindresvport_sa;
@@ -54,6 +58,15 @@ FBSD_1.0 {
endrpcent;
getrpcent;
getrpcport;
+ key_setsecret;
+ key_secretkey_is_set;
+ key_encryptsession_pk;
+ key_decryptsession_pk;
+ key_encryptsession;
+ key_decryptsession;
+ key_gendes;
+ key_setnet;
+ key_get_conv;
xdr_keystatus;
xdr_keybuf;
xdr_netnamestr;
@@ -117,6 +130,7 @@ FBSD_1.0 {
callrpc;
registerrpc;
clnt_broadcast;
+ authdes_create;
clntunix_create;
svcunix_create;
svcunixfd_create;
@@ -166,6 +180,8 @@ FBSD_1.0 {
_authenticate;
_svcauth_null;
svc_auth_reg;
+ _svcauth_des;
+ authdes_getucred;
_svcauth_unix;
_svcauth_short;
svc_dg_create;
@@ -189,6 +205,9 @@ FBSD_1.8 {
FBSDprivate_1.0 {
__des_crypt_LOCAL;
+ __key_encryptsession_pk_LOCAL;
+ __key_decryptsession_pk_LOCAL;
+ __key_gendes_LOCAL;
__svc_clean_idle;
__rpc_gss_unwrap;
__rpc_gss_unwrap_stub;
diff --git a/lib/libc/rpc/auth_des.c b/lib/libc/rpc/auth_des.c
index 754d55cbed3e..c9b20de25cda 100644
--- a/lib/libc/rpc/auth_des.c
+++ b/lib/libc/rpc/auth_des.c
@@ -30,34 +30,463 @@
/*
* Copyright (c) 1988 by Sun Microsystems, Inc.
*/
-
/*
- * Secure RPC DES authentication was removed in FreeBSD 15.0.
- * These symbols are provided for backward compatibility, but provide no
- * functionality and will always return an error.
+ * auth_des.c, client-side implementation of DES authentication
*/
#include "namespace.h"
#include "reentrant.h"
+#include <err.h>
+#include <errno.h>
+#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <rpc/des_crypt.h>
+#include <syslog.h>
#include <rpc/types.h>
#include <rpc/auth.h>
#include <rpc/auth_des.h>
+#include <rpc/clnt.h>
+#include <rpc/xdr.h>
+#include <sys/socket.h>
+#undef NIS
#include <rpcsvc/nis.h>
#include "un-namespace.h"
+#include "mt_misc.h"
+
+#define USEC_PER_SEC 1000000
+#define RTIME_TIMEOUT 5 /* seconds to wait for sync */
+
+#define AUTH_PRIVATE(auth) (struct ad_private *) auth->ah_private
+#define ALLOC(object_type) (object_type *) mem_alloc(sizeof(object_type))
+#define FREE(ptr, size) mem_free((char *)(ptr), (int) size)
+#define ATTEMPT(xdr_op) if (!(xdr_op)) return (FALSE)
+
+extern bool_t xdr_authdes_cred( XDR *, struct authdes_cred *);
+extern bool_t xdr_authdes_verf( XDR *, struct authdes_verf *);
+extern int key_encryptsession_pk(char *, netobj *, des_block *);
+
+extern bool_t __rpc_get_time_offset(struct timeval *, nis_server *, char *,
+ char **, char **);
-static AUTH *
-__authdes_seccreate(const char *servername, const u_int win,
+/*
+ * DES authenticator operations vector
+ */
+static void authdes_nextverf(AUTH *);
+static bool_t authdes_marshal(AUTH *, XDR *);
+static bool_t authdes_validate(AUTH *, struct opaque_auth *);
+static bool_t authdes_refresh(AUTH *, void *);
+static void authdes_destroy(AUTH *);
+
+static struct auth_ops *authdes_ops(void);
+
+/*
+ * This struct is pointed to by the ah_private field of an "AUTH *"
+ */
+struct ad_private {
+ char *ad_fullname; /* client's full name */
+ u_int ad_fullnamelen; /* length of name, rounded up */
+ char *ad_servername; /* server's full name */
+ u_int ad_servernamelen; /* length of name, rounded up */
+ u_int ad_window; /* client specified window */
+ bool_t ad_dosync; /* synchronize? */
+ struct netbuf ad_syncaddr; /* remote host to synch with */
+ char *ad_timehost; /* remote host to synch with */
+ struct timeval ad_timediff; /* server's time - client's time */
+ u_int ad_nickname; /* server's nickname for client */
+ struct authdes_cred ad_cred; /* storage for credential */
+ struct authdes_verf ad_verf; /* storage for verifier */
+ struct timeval ad_timestamp; /* timestamp sent */
+ des_block ad_xkey; /* encrypted conversation key */
+ u_char ad_pkey[1024]; /* Server's actual public key */
+ char *ad_netid; /* Timehost netid */
+ char *ad_uaddr; /* Timehost uaddr */
+ nis_server *ad_nis_srvr; /* NIS+ server struct */
+};
+
+AUTH *authdes_pk_seccreate(const char *, netobj *, u_int, const char *,
+ const des_block *, nis_server *);
+
+/*
+ * documented version of authdes_seccreate
+ */
+/*
+ servername: network name of server
+ win: time to live
+ timehost: optional hostname to sync with
+ ckey: optional conversation key to use
+*/
+
+AUTH *
+authdes_seccreate(const char *servername, const u_int win,
const char *timehost, const des_block *ckey)
{
- return (NULL);
+ u_char pkey_data[1024];
+ netobj pkey;
+ AUTH *dummy;
+
+ if (! getpublickey(servername, (char *) pkey_data)) {
+ syslog(LOG_ERR,
+ "authdes_seccreate: no public key found for %s",
+ servername);
+ return (NULL);
+ }
+
+ pkey.n_bytes = (char *) pkey_data;
+ pkey.n_len = (u_int)strlen((char *)pkey_data) + 1;
+ dummy = authdes_pk_seccreate(servername, &pkey, win, timehost,
+ ckey, NULL);
+ return (dummy);
}
-__sym_compat(authdes_seccreate, __authdes_seccreate, FBSD_1.0);
-static AUTH *
-__authdes_pk_seccreate(const char *servername __unused, netobj *pkey __unused,
- u_int window __unused, const char *timehost __unused,
- const des_block *ckey __unused, nis_server *srvr __unused)
+/*
+ * Slightly modified version of authdessec_create which takes the public key
+ * of the server principal as an argument. This spares us a call to
+ * getpublickey() which in the nameserver context can cause a deadlock.
+ */
+AUTH *
+authdes_pk_seccreate(const char *servername, netobj *pkey, u_int window,
+ const char *timehost, const des_block *ckey, nis_server *srvr)
{
+ AUTH *auth;
+ struct ad_private *ad;
+ char namebuf[MAXNETNAMELEN+1];
+
+ /*
+ * Allocate everything now
+ */
+ auth = ALLOC(AUTH);
+ if (auth == NULL) {
+ syslog(LOG_ERR, "authdes_pk_seccreate: out of memory");
+ return (NULL);
+ }
+ ad = ALLOC(struct ad_private);
+ if (ad == NULL) {
+ syslog(LOG_ERR, "authdes_pk_seccreate: out of memory");
+ goto failed;
+ }
+ ad->ad_fullname = ad->ad_servername = NULL; /* Sanity reasons */
+ ad->ad_timehost = NULL;
+ ad->ad_netid = NULL;
+ ad->ad_uaddr = NULL;
+ ad->ad_nis_srvr = NULL;
+ ad->ad_timediff.tv_sec = 0;
+ ad->ad_timediff.tv_usec = 0;
+ memcpy(ad->ad_pkey, pkey->n_bytes, pkey->n_len);
+ if (!getnetname(namebuf))
+ goto failed;
+ ad->ad_fullnamelen = RNDUP((u_int) strlen(namebuf));
+ ad->ad_fullname = (char *)mem_alloc(ad->ad_fullnamelen + 1);
+ ad->ad_servernamelen = strlen(servername);
+ ad->ad_servername = (char *)mem_alloc(ad->ad_servernamelen + 1);
+
+ if (ad->ad_fullname == NULL || ad->ad_servername == NULL) {
+ syslog(LOG_ERR, "authdes_seccreate: out of memory");
+ goto failed;
+ }
+ if (timehost != NULL) {
+ ad->ad_timehost = (char *)mem_alloc(strlen(timehost) + 1);
+ if (ad->ad_timehost == NULL) {
+ syslog(LOG_ERR, "authdes_seccreate: out of memory");
+ goto failed;
+ }
+ memcpy(ad->ad_timehost, timehost, strlen(timehost) + 1);
+ ad->ad_dosync = TRUE;
+ } else if (srvr != NULL) {
+ ad->ad_nis_srvr = srvr; /* transient */
+ ad->ad_dosync = TRUE;
+ } else {
+ ad->ad_dosync = FALSE;
+ }
+ memcpy(ad->ad_fullname, namebuf, ad->ad_fullnamelen + 1);
+ memcpy(ad->ad_servername, servername, ad->ad_servernamelen + 1);
+ ad->ad_window = window;
+ if (ckey == NULL) {
+ if (key_gendes(&auth->ah_key) < 0) {
+ syslog(LOG_ERR,
+ "authdes_seccreate: keyserv(1m) is unable to generate session key");
+ goto failed;
+ }
+ } else {
+ auth->ah_key = *ckey;
+ }
+
+ /*
+ * Set up auth handle
+ */
+ auth->ah_cred.oa_flavor = AUTH_DES;
+ auth->ah_verf.oa_flavor = AUTH_DES;
+ auth->ah_ops = authdes_ops();
+ auth->ah_private = (caddr_t)ad;
+
+ if (!authdes_refresh(auth, NULL)) {
+ goto failed;
+ }
+ ad->ad_nis_srvr = NULL; /* not needed any longer */
+ return (auth);
+
+failed:
+ if (auth)
+ FREE(auth, sizeof (AUTH));
+ if (ad) {
+ if (ad->ad_fullname)
+ FREE(ad->ad_fullname, ad->ad_fullnamelen + 1);
+ if (ad->ad_servername)
+ FREE(ad->ad_servername, ad->ad_servernamelen + 1);
+ if (ad->ad_timehost)
+ FREE(ad->ad_timehost, strlen(ad->ad_timehost) + 1);
+ if (ad->ad_netid)
+ FREE(ad->ad_netid, strlen(ad->ad_netid) + 1);
+ if (ad->ad_uaddr)
+ FREE(ad->ad_uaddr, strlen(ad->ad_uaddr) + 1);
+ FREE(ad, sizeof (struct ad_private));
+ }
return (NULL);
}
-__sym_compat(authdes_pk_seccreate, __authdes_pk_seccreate, FBSD_1.0);
+
+/*
+ * Implement the five authentication operations
+ */
+
+
+/*
+ * 1. Next Verifier
+ */
+/*ARGSUSED*/
+static void
+authdes_nextverf(AUTH *auth __unused)
+{
+ /* what the heck am I supposed to do??? */
+}
+
+
+/*
+ * 2. Marshal
+ */
+static bool_t
+authdes_marshal(AUTH *auth, XDR *xdrs)
+{
+/* LINTED pointer alignment */
+ struct ad_private *ad = AUTH_PRIVATE(auth);
+ struct authdes_cred *cred = &ad->ad_cred;
+ struct authdes_verf *verf = &ad->ad_verf;
+ des_block cryptbuf[2];
+ des_block ivec;
+ int status;
+ int len;
+ rpc_inline_t *ixdr;
+
+ /*
+ * Figure out the "time", accounting for any time difference
+ * with the server if necessary.
+ */
+ (void)gettimeofday(&ad->ad_timestamp, NULL);
+ ad->ad_timestamp.tv_sec += ad->ad_timediff.tv_sec;
+ ad->ad_timestamp.tv_usec += ad->ad_timediff.tv_usec;
+ while (ad->ad_timestamp.tv_usec >= USEC_PER_SEC) {
+ ad->ad_timestamp.tv_usec -= USEC_PER_SEC;
+ ad->ad_timestamp.tv_sec++;
+ }
+
+ /*
+ * XDR the timestamp and possibly some other things, then
+ * encrypt them.
+ */
+ ixdr = (rpc_inline_t *)cryptbuf;
+ IXDR_PUT_INT32(ixdr, ad->ad_timestamp.tv_sec);
+ IXDR_PUT_INT32(ixdr, ad->ad_timestamp.tv_usec);
+ if (ad->ad_cred.adc_namekind == ADN_FULLNAME) {
+ IXDR_PUT_U_INT32(ixdr, ad->ad_window);
+ IXDR_PUT_U_INT32(ixdr, ad->ad_window - 1);
+ ivec.key.high = ivec.key.low = 0;
+ status = cbc_crypt((char *)&auth->ah_key, (char *)cryptbuf,
+ (u_int) 2 * sizeof (des_block),
+ DES_ENCRYPT | DES_HW, (char *)&ivec);
+ } else {
+ status = ecb_crypt((char *)&auth->ah_key, (char *)cryptbuf,
+ (u_int) sizeof (des_block),
+ DES_ENCRYPT | DES_HW);
+ }
+ if (DES_FAILED(status)) {
+ syslog(LOG_ERR, "authdes_marshal: DES encryption failure");
+ return (FALSE);
+ }
+ ad->ad_verf.adv_xtimestamp = cryptbuf[0];
+ if (ad->ad_cred.adc_namekind == ADN_FULLNAME) {
+ ad->ad_cred.adc_fullname.window = cryptbuf[1].key.high;
+ ad->ad_verf.adv_winverf = cryptbuf[1].key.low;
+ } else {
+ ad->ad_cred.adc_nickname = ad->ad_nickname;
+ ad->ad_verf.adv_winverf = 0;
+ }
+
+ /*
+ * Serialize the credential and verifier into opaque
+ * authentication data.
+ */
+ if (ad->ad_cred.adc_namekind == ADN_FULLNAME) {
+ len = ((1 + 1 + 2 + 1)*BYTES_PER_XDR_UNIT + ad->ad_fullnamelen);
+ } else {
+ len = (1 + 1)*BYTES_PER_XDR_UNIT;
+ }
+
+ if ((ixdr = xdr_inline(xdrs, 2*BYTES_PER_XDR_UNIT))) {
+ IXDR_PUT_INT32(ixdr, AUTH_DES);
+ IXDR_PUT_INT32(ixdr, len);
+ } else {
+ ATTEMPT(xdr_putint32(xdrs, (int *)&auth->ah_cred.oa_flavor));
+ ATTEMPT(xdr_putint32(xdrs, &len));
+ }
+ ATTEMPT(xdr_authdes_cred(xdrs, cred));
+
+ len = (2 + 1)*BYTES_PER_XDR_UNIT;
+ if ((ixdr = xdr_inline(xdrs, 2*BYTES_PER_XDR_UNIT))) {
+ IXDR_PUT_INT32(ixdr, AUTH_DES);
+ IXDR_PUT_INT32(ixdr, len);
+ } else {
+ ATTEMPT(xdr_putint32(xdrs, (int *)&auth->ah_verf.oa_flavor));
+ ATTEMPT(xdr_putint32(xdrs, &len));
+ }
+ ATTEMPT(xdr_authdes_verf(xdrs, verf));
+ return (TRUE);
+}
+
+
+/*
+ * 3. Validate
+ */
+static bool_t
+authdes_validate(AUTH *auth, struct opaque_auth *rverf)
+{
+/* LINTED pointer alignment */
+ struct ad_private *ad = AUTH_PRIVATE(auth);
+ struct authdes_verf verf;
+ int status;
+ uint32_t *ixdr;
+ des_block buf;
+
+ if (rverf->oa_length != (2 + 1) * BYTES_PER_XDR_UNIT) {
+ return (FALSE);
+ }
+/* LINTED pointer alignment */
+ ixdr = (uint32_t *)rverf->oa_base;
+ buf.key.high = (uint32_t)*ixdr++;
+ buf.key.low = (uint32_t)*ixdr++;
+ verf.adv_int_u = (uint32_t)*ixdr++;
+
+ /*
+ * Decrypt the timestamp
+ */
+ status = ecb_crypt((char *)&auth->ah_key, (char *)&buf,
+ (u_int)sizeof (des_block), DES_DECRYPT | DES_HW);
+
+ if (DES_FAILED(status)) {
+ syslog(LOG_ERR, "authdes_validate: DES decryption failure");
+ return (FALSE);
+ }
+
+ /*
+ * xdr the decrypted timestamp
+ */
+/* LINTED pointer alignment */
+ ixdr = (uint32_t *)buf.c;
+ verf.adv_timestamp.tv_sec = IXDR_GET_INT32(ixdr) + 1;
+ verf.adv_timestamp.tv_usec = IXDR_GET_INT32(ixdr);
+
+ /*
+ * validate
+ */
+ if (bcmp((char *)&ad->ad_timestamp, (char *)&verf.adv_timestamp,
+ sizeof(struct timeval)) != 0) {
+ syslog(LOG_DEBUG, "authdes_validate: verifier mismatch");
+ return (FALSE);
+ }
+
+ /*
+ * We have a nickname now, let's use it
+ */
+ ad->ad_nickname = verf.adv_nickname;
+ ad->ad_cred.adc_namekind = ADN_NICKNAME;
+ return (TRUE);
+}
+
+/*
+ * 4. Refresh
+ */
+/*ARGSUSED*/
+static bool_t
+authdes_refresh(AUTH *auth, void *dummy __unused)
+{
+/* LINTED pointer alignment */
+ struct ad_private *ad = AUTH_PRIVATE(auth);
+ struct authdes_cred *cred = &ad->ad_cred;
+ int ok;
+ netobj pkey;
+
+ if (ad->ad_dosync) {
+ ok = __rpc_get_time_offset(&ad->ad_timediff, ad->ad_nis_srvr,
+ ad->ad_timehost, &(ad->ad_uaddr),
+ &(ad->ad_netid));
+ if (! ok) {
+ /*
+ * Hope the clocks are synced!
+ */
+ ad->ad_dosync = 0;
+ syslog(LOG_DEBUG,
+ "authdes_refresh: unable to synchronize clock");
+ }
+ }
+ ad->ad_xkey = auth->ah_key;
+ pkey.n_bytes = (char *)(ad->ad_pkey);
+ pkey.n_len = (u_int)strlen((char *)ad->ad_pkey) + 1;
+ if (key_encryptsession_pk(ad->ad_servername, &pkey, &ad->ad_xkey) < 0) {
+ syslog(LOG_INFO,
+ "authdes_refresh: keyserv(1m) is unable to encrypt session key");
+ return (FALSE);
+ }
+ cred->adc_fullname.key = ad->ad_xkey;
+ cred->adc_namekind = ADN_FULLNAME;
+ cred->adc_fullname.name = ad->ad_fullname;
+ return (TRUE);
+}
+
+
+/*
+ * 5. Destroy
+ */
+static void
+authdes_destroy(AUTH *auth)
+{
+/* LINTED pointer alignment */
+ struct ad_private *ad = AUTH_PRIVATE(auth);
+
+ FREE(ad->ad_fullname, ad->ad_fullnamelen + 1);
+ FREE(ad->ad_servername, ad->ad_servernamelen + 1);
+ if (ad->ad_timehost)
+ FREE(ad->ad_timehost, strlen(ad->ad_timehost) + 1);
+ if (ad->ad_netid)
+ FREE(ad->ad_netid, strlen(ad->ad_netid) + 1);
+ if (ad->ad_uaddr)
+ FREE(ad->ad_uaddr, strlen(ad->ad_uaddr) + 1);
+ FREE(ad, sizeof (struct ad_private));
+ FREE(auth, sizeof(AUTH));
+}
+
+static struct auth_ops *
+authdes_ops(void)
+{
+ static struct auth_ops ops;
+
+ /* VARIABLES PROTECTED BY ops_lock: ops */
+
+ mutex_lock(&authdes_ops_lock);
+ if (ops.ah_nextverf == NULL) {
+ ops.ah_nextverf = authdes_nextverf;
+ ops.ah_marshal = authdes_marshal;
+ ops.ah_validate = authdes_validate;
+ ops.ah_refresh = authdes_refresh;
+ ops.ah_destroy = authdes_destroy;
+ }
+ mutex_unlock(&authdes_ops_lock);
+ return (&ops);
+}
diff --git a/lib/libc/rpc/authdes_prot.c b/lib/libc/rpc/authdes_prot.c
index 56b44daafe41..79a0e5baa084 100644
--- a/lib/libc/rpc/authdes_prot.c
+++ b/lib/libc/rpc/authdes_prot.c
@@ -42,16 +42,44 @@
#include <rpc/auth_des.h>
#include "un-namespace.h"
-static bool_t
-__xdr_authdes_cred(XDR *xdrs, void *cred)
+#define ATTEMPT(xdr_op) if (!(xdr_op)) return (FALSE)
+
+bool_t
+xdr_authdes_cred(XDR *xdrs, struct authdes_cred *cred)
{
- return (FALSE);
+ enum authdes_namekind *padc_namekind = &cred->adc_namekind;
+ /*
+ * Unrolled xdr
+ */
+ ATTEMPT(xdr_enum(xdrs, (enum_t *) padc_namekind));
+ switch (cred->adc_namekind) {
+ case ADN_FULLNAME:
+ ATTEMPT(xdr_string(xdrs, &cred->adc_fullname.name,
+ MAXNETNAMELEN));
+ ATTEMPT(xdr_opaque(xdrs, (caddr_t)&cred->adc_fullname.key,
+ sizeof(des_block)));
+ ATTEMPT(xdr_opaque(xdrs, (caddr_t)&cred->adc_fullname.window,
+ sizeof(cred->adc_fullname.window)));
+ return (TRUE);
+ case ADN_NICKNAME:
+ ATTEMPT(xdr_opaque(xdrs, (caddr_t)&cred->adc_nickname,
+ sizeof(cred->adc_nickname)));
+ return (TRUE);
+ default:
+ return (FALSE);
+ }
}
-__sym_compat(xdr_authdes_cred, __xdr_authdes_cred, FBSD_1.0);
-static bool_t
-__xdr_authdes_verf(XDR *xdrs, void *verf)
+
+bool_t
+xdr_authdes_verf(XDR *xdrs, struct authdes_verf *verf)
{
- return (FALSE);
+ /*
+ * Unrolled xdr
+ */
+ ATTEMPT(xdr_opaque(xdrs, (caddr_t)&verf->adv_xtimestamp,
+ sizeof(des_block)));
+ ATTEMPT(xdr_opaque(xdrs, (caddr_t)&verf->adv_int_u,
+ sizeof(verf->adv_int_u)));
+ return (TRUE);
}
-__sym_compat(xdr_authdes_verf, __xdr_authdes_verf, FBSD_1.0);
diff --git a/lib/libc/rpc/key_call.c b/lib/libc/rpc/key_call.c
index eb274fcfff36..5c87881c815c 100644
--- a/lib/libc/rpc/key_call.c
+++ b/lib/libc/rpc/key_call.c
@@ -32,78 +32,426 @@
*/
/*
- * Secure RPC keyserver support was removed in FreeBSD 15.0.
- * These symbols are provided for backward compatibility, but provide no
- * functionality and will always return an error.
+ * key_call.c, Interface to keyserver
+ *
+ * setsecretkey(key) - set your secret key
+ * encryptsessionkey(agent, deskey) - encrypt a session key to talk to agent
+ * decryptsessionkey(agent, deskey) - decrypt ditto
+ * gendeskey(deskey) - generate a secure des key
*/
#include "namespace.h"
#include "reentrant.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <errno.h>
#include <rpc/rpc.h>
-#include <rpc/key_prot.h>
#include <rpc/auth.h>
+#include <rpc/auth_unix.h>
+#include <rpc/key_prot.h>
+#include <string.h>
+#include <netconfig.h>
+#include <sys/utsname.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <sys/wait.h>
+#include <sys/fcntl.h>
#include "un-namespace.h"
#include "mt_misc.h"
-static int
-__key_setsecret(const char *secretkey)
+
+#define KEY_TIMEOUT 5 /* per-try timeout in seconds */
+#define KEY_NRETRY 12 /* number of retries */
+
+#ifdef DEBUG
+#define debug(msg) (void) fprintf(stderr, "%s\n", msg);
+#else
+#define debug(msg)
+#endif /* DEBUG */
+
+/*
+ * Hack to allow the keyserver to use AUTH_DES (for authenticated
+ * NIS+ calls, for example). The only functions that get called
+ * are key_encryptsession_pk, key_decryptsession_pk, and key_gendes.
+ *
+ * The approach is to have the keyserver fill in pointers to local
+ * implementations of these functions, and to call those in key_call().
+ */
+
+cryptkeyres *(*__key_encryptsession_pk_LOCAL)(uid_t, void *arg) = 0;
+cryptkeyres *(*__key_decryptsession_pk_LOCAL)(uid_t, void *arg) = 0;
+des_block *(*__key_gendes_LOCAL)(uid_t, void *) = 0;
+
+static int key_call( u_long, xdrproc_t, void *, xdrproc_t, void *);
+
+int
+key_setsecret(const char *secretkey)
{
- return (-1);
+ keystatus status;
+
+ if (!key_call((u_long) KEY_SET, (xdrproc_t)xdr_keybuf,
+ (void *)secretkey,
+ (xdrproc_t)xdr_keystatus, &status)) {
+ return (-1);
+ }
+ if (status != KEY_SUCCESS) {
+ debug("set status is nonzero");
+ return (-1);
+ }
+ return (0);
}
-__sym_compat(key_setsecret, __key_setsecret, FBSD_1.0);
-static int
-__key_secretkey_is_set(void)
+
+/* key_secretkey_is_set() returns 1 if the keyserver has a secret key
+ * stored for the caller's effective uid; it returns 0 otherwise
+ *
+ * N.B.: The KEY_NET_GET key call is undocumented. Applications shouldn't
+ * be using it, because it allows them to get the user's secret key.
+ */
+
+int
+key_secretkey_is_set(void)
{
+ struct key_netstres kres;
+
+ memset((void*)&kres, 0, sizeof (kres));
+ if (key_call((u_long) KEY_NET_GET, (xdrproc_t)xdr_void, NULL,
+ (xdrproc_t)xdr_key_netstres, &kres) &&
+ (kres.status == KEY_SUCCESS) &&
+ (kres.key_netstres_u.knet.st_priv_key[0] != 0)) {
+ /* avoid leaving secret key in memory */
+ memset(kres.key_netstres_u.knet.st_priv_key, 0, HEXKEYBYTES);
+ return (1);
+ }
return (0);
}
-__sym_compat(key_secretkey_is_set, __key_secretkey_is_set, FBSD_1.0);
-static int
-__key_encryptsession_pk(char *remotename, netobj *remotekey, des_block *deskey)
+int
+key_encryptsession_pk(char *remotename, netobj *remotekey, des_block *deskey)
{
- return (-1);
+ cryptkeyarg2 arg;
+ cryptkeyres res;
+
+ arg.remotename = remotename;
+ arg.remotekey = *remotekey;
+ arg.deskey = *deskey;
+ if (!key_call((u_long)KEY_ENCRYPT_PK, (xdrproc_t)xdr_cryptkeyarg2, &arg,
+ (xdrproc_t)xdr_cryptkeyres, &res)) {
+ return (-1);
+ }
+ if (res.status != KEY_SUCCESS) {
+ debug("encrypt status is nonzero");
+ return (-1);
+ }
+ *deskey = res.cryptkeyres_u.deskey;
+ return (0);
}
-__sym_compat(key_encryptsession_pk, __key_encryptsession_pk, FBSD_1.0);
-static int
-__key_decryptsession_pk(char *remotename, netobj *remotekey, des_block *deskey)
+int
+key_decryptsession_pk(char *remotename, netobj *remotekey, des_block *deskey)
{
- return (-1);
+ cryptkeyarg2 arg;
+ cryptkeyres res;
+
+ arg.remotename = remotename;
+ arg.remotekey = *remotekey;
+ arg.deskey = *deskey;
+ if (!key_call((u_long)KEY_DECRYPT_PK, (xdrproc_t)xdr_cryptkeyarg2, &arg,
+ (xdrproc_t)xdr_cryptkeyres, &res)) {
+ return (-1);
+ }
+ if (res.status != KEY_SUCCESS) {
+ debug("decrypt status is nonzero");
+ return (-1);
+ }
+ *deskey = res.cryptkeyres_u.deskey;
+ return (0);
}
-__sym_compat(key_decryptsession_pk, __key_decryptsession_pk, FBSD_1.0);
-static int
-__key_encryptsession(const char *remotename, des_block *deskey)
+int
+key_encryptsession(const char *remotename, des_block *deskey)
{
- return (-1);
+ cryptkeyarg arg;
+ cryptkeyres res;
+
+ arg.remotename = (char *) remotename;
+ arg.deskey = *deskey;
+ if (!key_call((u_long)KEY_ENCRYPT, (xdrproc_t)xdr_cryptkeyarg, &arg,
+ (xdrproc_t)xdr_cryptkeyres, &res)) {
+ return (-1);
+ }
+ if (res.status != KEY_SUCCESS) {
+ debug("encrypt status is nonzero");
+ return (-1);
+ }
+ *deskey = res.cryptkeyres_u.deskey;
+ return (0);
}
-__sym_compat(key_encryptsession, __key_encryptsession, FBSD_1.0);
-static int
-__key_decryptsession(const char *remotename, des_block *deskey)
+int
+key_decryptsession(const char *remotename, des_block *deskey)
{
- return (-1);
+ cryptkeyarg arg;
+ cryptkeyres res;
+
+ arg.remotename = (char *) remotename;
+ arg.deskey = *deskey;
+ if (!key_call((u_long)KEY_DECRYPT, (xdrproc_t)xdr_cryptkeyarg, &arg,
+ (xdrproc_t)xdr_cryptkeyres, &res)) {
+ return (-1);
+ }
+ if (res.status != KEY_SUCCESS) {
+ debug("decrypt status is nonzero");
+ return (-1);
+ }
+ *deskey = res.cryptkeyres_u.deskey;
+ return (0);
}
-__sym_compat(key_decryptsession, __key_decryptsession, FBSD_1.0);
-static int
-__key_gendes(des_block *key)
+int
+key_gendes(des_block *key)
{
- return (-1);
+ if (!key_call((u_long)KEY_GEN, (xdrproc_t)xdr_void, NULL,
+ (xdrproc_t)xdr_des_block, key)) {
+ return (-1);
+ }
+ return (0);
}
-__sym_compat(key_gendes, __key_gendes, FBSD_1.0);
-static int
-__key_setnet(struct key_netstarg *arg)
+int
+key_setnet(struct key_netstarg *arg)
{
- return (-1);
+ keystatus status;
+
+
+ if (!key_call((u_long) KEY_NET_PUT, (xdrproc_t)xdr_key_netstarg, arg,
+ (xdrproc_t)xdr_keystatus, &status)){
+ return (-1);
+ }
+
+ if (status != KEY_SUCCESS) {
+ debug("key_setnet status is nonzero");
+ return (-1);
+ }
+ return (1);
+}
+
+
+int
+key_get_conv(char *pkey, des_block *deskey)
+{
+ cryptkeyres res;
+
+ if (!key_call((u_long) KEY_GET_CONV, (xdrproc_t)xdr_keybuf, pkey,
+ (xdrproc_t)xdr_cryptkeyres, &res)) {
+ return (-1);
+ }
+ if (res.status != KEY_SUCCESS) {
+ debug("get_conv status is nonzero");
+ return (-1);
+ }
+ *deskey = res.cryptkeyres_u.deskey;
+ return (0);
+}
+
+struct key_call_private {
+ CLIENT *client; /* Client handle */
+ pid_t pid; /* process-id at moment of creation */
+ uid_t uid; /* user-id at last authorization */
+};
+static struct key_call_private *key_call_private_main = NULL;
+static thread_key_t key_call_key;
+static once_t key_call_once = ONCE_INITIALIZER;
+static int key_call_key_error;
+
+static void
+key_call_destroy(void *vp)
+{
+ struct key_call_private *kcp = (struct key_call_private *)vp;
+
+ if (kcp) {
+ if (kcp->client)
+ clnt_destroy(kcp->client);
+ free(kcp);
+ }
+}
+
+static void
+key_call_init(void)
+{
+
+ key_call_key_error = thr_keycreate(&key_call_key, key_call_destroy);
}
-__sym_compat(key_setnet, __key_setnet, FBSD_1.0);
+
+/*
+ * Keep the handle cached. This call may be made quite often.
+ */
+static CLIENT *
+getkeyserv_handle(int vers)
+{
+ void *localhandle;
+ struct netconfig *nconf;
+ struct netconfig *tpconf;
+ struct key_call_private *kcp;
+ struct timeval wait_time;
+ struct utsname u;
+ int main_thread;
+ int fd;
+
+#define TOTAL_TIMEOUT 30 /* total timeout talking to keyserver */
+#define TOTAL_TRIES 5 /* Number of tries */
+
+ if ((main_thread = thr_main())) {
+ kcp = key_call_private_main;
+ } else {
+ if (thr_once(&key_call_once, key_call_init) != 0 ||
+ key_call_key_error != 0)
+ return ((CLIENT *) NULL);
+ kcp = (struct key_call_private *)thr_getspecific(key_call_key);
+ }
+ if (kcp == (struct key_call_private *)NULL) {
+ kcp = (struct key_call_private *)malloc(sizeof (*kcp));
+ if (kcp == (struct key_call_private *)NULL) {
+ return ((CLIENT *) NULL);
+ }
+ if (main_thread)
+ key_call_private_main = kcp;
+ else
+ thr_setspecific(key_call_key, (void *) kcp);
+ kcp->client = NULL;
+ }
+
+ /* if pid has changed, destroy client and rebuild */
+ if (kcp->client != NULL && kcp->pid != getpid()) {
+ clnt_destroy(kcp->client);
+ kcp->client = NULL;
+ }
+
+ if (kcp->client != NULL) {
+ /* if uid has changed, build client handle again */
+ if (kcp->uid != geteuid()) {
+ kcp->uid = geteuid();
+ auth_destroy(kcp->client->cl_auth);
+ kcp->client->cl_auth =
+ authsys_create("", kcp->uid, 0, 0, NULL);
+ if (kcp->client->cl_auth == NULL) {
+ clnt_destroy(kcp->client);
+ kcp->client = NULL;
+ return ((CLIENT *) NULL);
+ }
+ }
+ /* Change the version number to the new one */
+ clnt_control(kcp->client, CLSET_VERS, (void *)&vers);
+ return (kcp->client);
+ }
+ if (!(localhandle = setnetconfig())) {
+ return ((CLIENT *) NULL);
+ }
+ tpconf = NULL;
+#if defined(__FreeBSD__)
+ if (uname(&u) == -1)
+#else
+#if defined(i386)
+ if (_nuname(&u) == -1)
+#elif defined(sparc)
+ if (_uname(&u) == -1)
+#else
+#error Unknown architecture!
+#endif
+#endif
+ {
+ endnetconfig(localhandle);
+ return ((CLIENT *) NULL);
+ }
+ while ((nconf = getnetconfig(localhandle)) != NULL) {
+ if (strcmp(nconf->nc_protofmly, NC_LOOPBACK) == 0) {
+ /*
+ * We use COTS_ORD here so that the caller can
+ * find out immediately if the server is dead.
+ */
+ if (nconf->nc_semantics == NC_TPI_COTS_ORD) {
+ kcp->client = clnt_tp_create(u.nodename,
+ KEY_PROG, vers, nconf);
+ if (kcp->client)
+ break;
+ } else {
+ tpconf = nconf;
+ }
+ }
+ }
+ if ((kcp->client == (CLIENT *) NULL) && (tpconf))
+ /* Now, try the CLTS or COTS loopback transport */
+ kcp->client = clnt_tp_create(u.nodename,
+ KEY_PROG, vers, tpconf);
+ endnetconfig(localhandle);
+
+ if (kcp->client == (CLIENT *) NULL) {
+ return ((CLIENT *) NULL);
+ }
+ kcp->uid = geteuid();
+ kcp->pid = getpid();
+ kcp->client->cl_auth = authsys_create("", kcp->uid, 0, 0, NULL);
+ if (kcp->client->cl_auth == NULL) {
+ clnt_destroy(kcp->client);
+ kcp->client = NULL;
+ return ((CLIENT *) NULL);
+ }
+
+ wait_time.tv_sec = TOTAL_TIMEOUT/TOTAL_TRIES;
+ wait_time.tv_usec = 0;
+ (void) clnt_control(kcp->client, CLSET_RETRY_TIMEOUT,
+ (char *)&wait_time);
+ if (clnt_control(kcp->client, CLGET_FD, (char *)&fd))
+ _fcntl(fd, F_SETFD, 1); /* make it "close on exec" */
+
+ return (kcp->client);
+}
+
+/* returns 0 on failure, 1 on success */
static int
-__key_get_conv(char *pkey, des_block *deskey)
+key_call(u_long proc, xdrproc_t xdr_arg, void *arg, xdrproc_t xdr_rslt,
+ void *rslt)
{
- return (-1);
+ CLIENT *clnt;
+ struct timeval wait_time;
+
+ if (proc == KEY_ENCRYPT_PK && __key_encryptsession_pk_LOCAL) {
+ cryptkeyres *res;
+ res = (*__key_encryptsession_pk_LOCAL)(geteuid(), arg);
+ *(cryptkeyres*)rslt = *res;
+ return (1);
+ } else if (proc == KEY_DECRYPT_PK && __key_decryptsession_pk_LOCAL) {
+ cryptkeyres *res;
+ res = (*__key_decryptsession_pk_LOCAL)(geteuid(), arg);
+ *(cryptkeyres*)rslt = *res;
+ return (1);
+ } else if (proc == KEY_GEN && __key_gendes_LOCAL) {
+ des_block *res;
+ res = (*__key_gendes_LOCAL)(geteuid(), 0);
+ *(des_block*)rslt = *res;
+ return (1);
+ }
+
+ if ((proc == KEY_ENCRYPT_PK) || (proc == KEY_DECRYPT_PK) ||
+ (proc == KEY_NET_GET) || (proc == KEY_NET_PUT) ||
+ (proc == KEY_GET_CONV))
+ clnt = getkeyserv_handle(2); /* talk to version 2 */
+ else
+ clnt = getkeyserv_handle(1); /* talk to version 1 */
+
+ if (clnt == NULL) {
+ return (0);
+ }
+
+ wait_time.tv_sec = TOTAL_TIMEOUT;
+ wait_time.tv_usec = 0;
+
+ if (clnt_call(clnt, proc, xdr_arg, arg, xdr_rslt, rslt,
+ wait_time) == RPC_SUCCESS) {
+ return (1);
+ } else {
+ return (0);
+ }
}
-__sym_compat(key_get_conv, __key_get_conv, FBSD_1.0);
diff --git a/lib/libc/rpc/publickey.5 b/lib/libc/rpc/publickey.5
new file mode 100644
index 000000000000..db95c4617b50
--- /dev/null
+++ b/lib/libc/rpc/publickey.5
@@ -0,0 +1,40 @@
+.Dd October 19, 1987
+.Dt PUBLICKEY 5
+.Os
+.Sh NAME
+.Nm publickey
+.Nd "public key database"
+.Sh SYNOPSIS
+.Pa /etc/publickey
+.Sh DESCRIPTION
+.Pa /etc/publickey
+is the public key database used for secure
+RPC (Remote Procedure Calls).
+Each entry in
+the database consists of a network user
+name (which may either refer to
+a user or a hostname), followed by the user's
+public key (in hex
+notation), a colon, and then the user's
+secret key encrypted with
+its login password (also in hex notation).
+.Pp
+This file is altered either by the user through the
+.Xr chkey 1
+command or by the system administrator through the
+.Xr newkey 8
+command.
+The file
+.Pa /etc/publickey
+should only contain data on the
+.Tn NIS
+master machine, where it
+is converted into the
+.Tn NIS
+database
+.Pa publickey.byname .
+.Sh SEE ALSO
+.Xr chkey 1 ,
+.Xr publickey 3 ,
+.Xr newkey 8 ,
+.Xr ypupdated 8
diff --git a/lib/libc/rpc/rpc_secure.3 b/lib/libc/rpc/rpc_secure.3
index ca99b06b556d..ce59bba7115f 100644
--- a/lib/libc/rpc/rpc_secure.3
+++ b/lib/libc/rpc/rpc_secure.3
@@ -1,17 +1,34 @@
.\"
-.Dd August 10, 2025
-.Dt RPC_SECURE 3
+.Dd February 16, 1988
+.Dt RPC 3
.Os
.Sh NAME
.Nm rpc_secure
.Nd library routines for secure remote procedure calls
.Sh SYNOPSIS
.In rpc/rpc.h
+.Ft AUTH *
+.Fo authdes_create
+.Fa "char *name"
+.Fa "unsigned window"
+.Fa "struct sockaddr *addr"
+.Fa "des_block *ckey"
+.Fc
+.Ft int
+.Fn authdes_getucred "struct authdes_cred *adc" "uid_t *uid" "gid_t *gid" "int *grouplen" "gid_t *groups"
.Ft int
.Fn getnetname "char *name"
.Ft int
.Fn host2netname "char *name" "const char *host" "const char *domain"
.Ft int
+.Fn key_decryptsession "const char *remotename" "des_block *deskey"
+.Ft int
+.Fn key_encryptsession "const char *remotename" "des_block *deskey"
+.Ft int
+.Fn key_gendes "des_block *deskey"
+.Ft int
+.Fn key_setsecret "const char *key"
+.Ft int
.Fn netname2host "char *name" "char *host" "int hostlen"
.Ft int
.Fn netname2user "char *name" "uid_t *uidp" "gid_t *gidp" "int *gidlenp" "gid_t *gidlist"
@@ -21,11 +38,101 @@
These routines are part of the
.Tn RPC
library.
+They implement
+.Tn DES
+Authentication.
See
.Xr rpc 3
for further details about
.Tn RPC .
.Pp
+The
+.Fn authdes_create
+is the first of two routines which interface to the
+.Tn RPC
+secure authentication system, known as
+.Tn DES
+authentication.
+The second is
+.Fn authdes_getucred ,
+below.
+.Pp
+Note: the keyserver daemon
+.Xr keyserv 8
+must be running for the
+.Tn DES
+authentication system to work.
+.Pp
+The
+.Fn authdes_create
+function,
+used on the client side, returns an authentication handle that
+will enable the use of the secure authentication system.
+The first argument
+.Fa name
+is the network name, or
+.Fa netname ,
+of the owner of the server process.
+This field usually
+represents a
+.Fa hostname
+derived from the utility routine
+.Fn host2netname ,
+but could also represent a user name using
+.Fn user2netname .
+The second field is window on the validity of
+the client credential, given in seconds.
+A small
+window is more secure than a large one, but choosing
+too small of a window will increase the frequency of
+resynchronizations because of clock drift.
+The third
+argument
+.Fa addr
+is optional.
+If it is
+.Dv NULL ,
+then the authentication system will assume
+that the local clock is always in sync with the server's
+clock, and will not attempt resynchronizations.
+If an address
+is supplied, however, then the system will use the address
+for consulting the remote time service whenever
+resynchronization
+is required.
+This argument is usually the
+address of the
+.Tn RPC
+server itself.
+The final argument
+.Fa ckey
+is also optional.
+If it is
+.Dv NULL ,
+then the authentication system will
+generate a random
+.Tn DES
+key to be used for the encryption of credentials.
+If it is supplied, however, then it will be used instead.
+.Pp
+The
+.Fn authdes_getucred
+function,
+the second of the two
+.Tn DES
+authentication routines,
+is used on the server side for converting a
+.Tn DES
+credential, which is
+operating system independent, into a
+.Ux
+credential.
+This routine differs from utility routine
+.Fn netname2user
+in that
+.Fn authdes_getucred
+pulls its information from a cache, and does not have to do a
+Yellow Pages lookup every time it is called to get its information.
.Pp
The
.Fn getnetname
@@ -54,6 +161,72 @@ Inverse of
.Fn netname2host .
.Pp
The
+.Fn key_decryptsession
+function
+is an interface to the keyserver daemon, which is associated
+with
+.Tn RPC Ns 's
+secure authentication system
+.Tn ( DES
+authentication).
+User programs rarely need to call it, or its associated routines
+.Fn key_encryptsession ,
+.Fn key_gendes
+and
+.Fn key_setsecret .
+System commands such as
+.Xr login 1
+and the
+.Tn RPC
+library are the main clients of these four routines.
+.Pp
+The
+.Fn key_decryptsession
+function
+takes a server netname and a
+.Tn DES
+key, and decrypts the key by
+using the public key of the server and the secret key
+associated with the effective uid of the calling process.
+It
+is the inverse of
+.Fn key_encryptsession .
+.Pp
+The
+.Fn key_encryptsession
+function
+is a keyserver interface routine.
+It
+takes a server netname and a des key, and encrypts
+it using the public key of the server and the secret key
+associated with the effective uid of the calling process.
+It
+is the inverse of
+.Fn key_decryptsession .
+.Pp
+The
+.Fn key_gendes
+function
+is a keyserver interface routine.
+It
+is used to ask the keyserver for a secure conversation key.
+Choosing one
+.Qq random
+is usually not good enough,
+because
+the common ways of choosing random numbers, such as using the
+current time, are very easy to guess.
+.Pp
+The
+.Fn key_setsecret
+function
+is a keyserver interface routine.
+It is used to set the key for
+the effective
+.Fa uid
+of the calling process.
+.Pp
+The
.Fn netname2host
function
converts from an operating-system independent netname to a
diff --git a/lib/libc/rpc/rpc_soc.3 b/lib/libc/rpc/rpc_soc.3
index e6fd8a0da6e4..4abd4b14c475 100644
--- a/lib/libc/rpc/rpc_soc.3
+++ b/lib/libc/rpc/rpc_soc.3
@@ -1,6 +1,6 @@
.\" $NetBSD: rpc_soc.3,v 1.2 2000/06/07 13:39:43 simonb Exp $
.\"
-.Dd August 10, 2025
+.Dd February 16, 1988
.Dt RPC_SOC 3
.Os
.Sh NAME
@@ -100,6 +100,16 @@ to perform the requested service, and then sends back a
reply.
Finally, the procedure call returns to the client.
.Pp
+Routines that are used for Secure
+.Tn RPC ( DES
+authentication) are described in
+.Xr rpc_secure 3 .
+Secure
+.Tn RPC
+can be used only if
+.Tn DES
+encryption is available.
+.Pp
.Bl -tag -width indent -compact
.It Xo
.Ft void
@@ -1691,6 +1701,7 @@ This routine modifies the global variable
Service implementors usually do not need this routine.
.El
.Sh SEE ALSO
+.Xr rpc_secure 3 ,
.Xr xdr 3
.Rs
.%T "Remote Procedure Calls: Protocol Specification"
diff --git a/lib/libc/rpc/rpc_soc.c b/lib/libc/rpc/rpc_soc.c
index e293a2ccf22f..c63b89594ce6 100644
--- a/lib/libc/rpc/rpc_soc.c
+++ b/lib/libc/rpc/rpc_soc.c
@@ -379,13 +379,36 @@ clnt_broadcast(u_long prog, u_long vers, u_long proc, xdrproc_t xargs,
* Create the client des authentication object. Obsoleted by
* authdes_seccreate().
*/
-static AUTH *
-__authdes_create(char *servername, u_int window, struct sockaddr *syncaddr,
+AUTH *
+authdes_create(char *servername, u_int window, struct sockaddr *syncaddr,
des_block *ckey)
+/*
+ * char *servername; // network name of server
+ * u_int window; // time to live
+ * struct sockaddr *syncaddr; // optional hostaddr to sync with
+ * des_block *ckey; // optional conversation key to use
+ */
{
- return (NULL);
+ AUTH *dummy;
+ AUTH *nauth;
+ char hostname[NI_MAXHOST];
+
+ if (syncaddr) {
+ /*
+ * Change addr to hostname, because that is the way
+ * new interface takes it.
+ */
+ if (getnameinfo(syncaddr, syncaddr->sa_len, hostname,
+ sizeof hostname, NULL, 0, 0) != 0)
+ goto fallback;
+
+ nauth = authdes_seccreate(servername, window, hostname, ckey);
+ return (nauth);
+ }
+fallback:
+ dummy = authdes_seccreate(servername, window, NULL, ckey);
+ return (dummy);
}
-__sym_compat(authdes_create, __authdes_create, FBSD_1.0);
/*
* Create a client handle for a unix connection. Obsoleted by clnt_vc_create()
diff --git a/lib/libc/rpc/svc_auth.c b/lib/libc/rpc/svc_auth.c
index b8a9a8f33ebb..eb61171733d6 100644
--- a/lib/libc/rpc/svc_auth.c
+++ b/lib/libc/rpc/svc_auth.c
@@ -114,6 +114,11 @@ _authenticate(struct svc_req *rqst, struct rpc_msg *msg)
case AUTH_SHORT:
dummy = _svcauth_short(rqst, msg);
return (dummy);
+#ifdef DES_BUILTIN
+ case AUTH_DES:
+ dummy = _svcauth_des(rqst, msg);
+ return (dummy);
+#endif
default:
break;
}
@@ -181,6 +186,9 @@ svc_auth_reg(int cred_flavor,
case AUTH_NULL:
case AUTH_SYS:
case AUTH_SHORT:
+#ifdef DES_BUILTIN
+ case AUTH_DES:
+#endif
/* already registered */
return (1);
diff --git a/lib/libc/rpc/svc_auth_des.c b/lib/libc/rpc/svc_auth_des.c
index 8fde5512e53f..d4736cc851e8 100644
--- a/lib/libc/rpc/svc_auth_des.c
+++ b/lib/libc/rpc/svc_auth_des.c
@@ -34,8 +34,17 @@
*/
/*
- * svcauth_des.c, server-side des authentication.
- * This functionality was removed in FreeBSD 15.0.
+ * svcauth_des.c, server-side des authentication
+ *
+ * We insure for the service the following:
+ * (1) The timestamp microseconds do not exceed 1 million.
+ * (2) The timestamp plus the window is less than the current time.
+ * (3) The timestamp is not less than the one previously
+ * seen in the current session.
+ *
+ * It is up to the server to determine if the window size is
+ * too small .
+ *
*/
#include "namespace.h"
@@ -56,27 +65,458 @@
#include <rpc/svc_auth.h>
#include "libc_private.h"
+extern int key_decryptsession_pk(const char *, netobj *, des_block *);
+
+#define debug(msg) printf("svcauth_des: %s\n", msg)
+
+#define USEC_PER_SEC ((u_long) 1000000L)
+#define BEFORE(t1, t2) timercmp(t1, t2, <)
+
+/*
+ * LRU cache of conversation keys and some other useful items.
+ */
+#define AUTHDES_CACHESZ 64
+struct cache_entry {
+ des_block key; /* conversation key */
+ char *rname; /* client's name */
+ u_int window; /* credential lifetime window */
+ struct timeval laststamp; /* detect replays of creds */
+ char *localcred; /* generic local credential */
+};
+static struct cache_entry *authdes_cache/* [AUTHDES_CACHESZ] */;
+static short *authdes_lru/* [AUTHDES_CACHESZ] */;
+
+static void cache_init(void); /* initialize the cache */
+static short cache_spot(des_block *, char *, struct timeval *); /* find an entry in the cache */
+static void cache_ref(short sid); /* note that sid was ref'd */
+
+static void invalidate(char *); /* invalidate entry in cache */
+
+/*
+ * cache statistics
+ */
+static struct {
+ u_long ncachehits; /* times cache hit, and is not replay */
+ u_long ncachereplays; /* times cache hit, and is replay */
+ u_long ncachemisses; /* times cache missed */
+} svcauthdes_stats;
+
/*
* Service side authenticator for AUTH_DES
*/
-static enum auth_stat
-__svcauth_des(struct svc_req *rqst, struct rpc_msg *msg)
+enum auth_stat
+_svcauth_des(struct svc_req *rqst, struct rpc_msg *msg)
{
- return (AUTH_FAILED);
+
+ long *ixdr;
+ des_block cryptbuf[2];
+ struct authdes_cred *cred;
+ struct authdes_verf verf;
+ int status;
+ struct cache_entry *entry;
+ short sid = 0;
+ des_block *sessionkey;
+ des_block ivec;
+ u_int window;
+ struct timeval timestamp;
+ u_long namelen;
+ struct area {
+ struct authdes_cred area_cred;
+ char area_netname[MAXNETNAMELEN+1];
+ } *area;
+
+ if (authdes_cache == NULL) {
+ cache_init();
+ }
+
+ area = (struct area *)rqst->rq_clntcred;
+ cred = (struct authdes_cred *)&area->area_cred;
+
+ /*
+ * Get the credential
+ */
+ ixdr = (long *)msg->rm_call.cb_cred.oa_base;
+ cred->adc_namekind = IXDR_GET_ENUM(ixdr, enum authdes_namekind);
+ switch (cred->adc_namekind) {
+ case ADN_FULLNAME:
+ namelen = IXDR_GET_U_LONG(ixdr);
+ if (namelen > MAXNETNAMELEN) {
+ return (AUTH_BADCRED);
+ }
+ cred->adc_fullname.name = area->area_netname;
+ bcopy((char *)ixdr, cred->adc_fullname.name,
+ (u_int)namelen);
+ cred->adc_fullname.name[namelen] = 0;
+ ixdr += (RNDUP(namelen) / BYTES_PER_XDR_UNIT);
+ cred->adc_fullname.key.key.high = (u_long)*ixdr++;
+ cred->adc_fullname.key.key.low = (u_long)*ixdr++;
+ cred->adc_fullname.window = (u_long)*ixdr++;
+ break;
+ case ADN_NICKNAME:
+ cred->adc_nickname = (u_long)*ixdr++;
+ break;
+ default:
+ return (AUTH_BADCRED);
+ }
+
+ /*
+ * Get the verifier
+ */
+ ixdr = (long *)msg->rm_call.cb_verf.oa_base;
+ verf.adv_xtimestamp.key.high = (u_long)*ixdr++;
+ verf.adv_xtimestamp.key.low = (u_long)*ixdr++;
+ verf.adv_int_u = (u_long)*ixdr++;
+
+
+ /*
+ * Get the conversation key
+ */
+ if (cred->adc_namekind == ADN_FULLNAME) {
+ netobj pkey;
+ char pkey_data[1024];
+
+ sessionkey = &cred->adc_fullname.key;
+ if (! getpublickey(cred->adc_fullname.name, pkey_data)) {
+ debug("getpublickey");
+ return(AUTH_BADCRED);
+ }
+ pkey.n_bytes = pkey_data;
+ pkey.n_len = strlen(pkey_data) + 1;
+ if (key_decryptsession_pk(cred->adc_fullname.name, &pkey,
+ sessionkey) < 0) {
+ debug("decryptsessionkey");
+ return (AUTH_BADCRED); /* key not found */
+ }
+ } else { /* ADN_NICKNAME */
+ sid = (short)cred->adc_nickname;
+ if (sid < 0 || sid >= AUTHDES_CACHESZ) {
+ debug("bad nickname");
+ return (AUTH_BADCRED); /* garbled credential */
+ }
+ sessionkey = &authdes_cache[sid].key;
+ }
+
+
+ /*
+ * Decrypt the timestamp
+ */
+ cryptbuf[0] = verf.adv_xtimestamp;
+ if (cred->adc_namekind == ADN_FULLNAME) {
+ cryptbuf[1].key.high = cred->adc_fullname.window;
+ cryptbuf[1].key.low = verf.adv_winverf;
+ ivec.key.high = ivec.key.low = 0;
+ status = cbc_crypt((char *)sessionkey, (char *)cryptbuf,
+ 2*sizeof(des_block), DES_DECRYPT | DES_HW,
+ (char *)&ivec);
+ } else {
+ status = ecb_crypt((char *)sessionkey, (char *)cryptbuf,
+ sizeof(des_block), DES_DECRYPT | DES_HW);
+ }
+ if (DES_FAILED(status)) {
+ debug("decryption failure");
+ return (AUTH_FAILED); /* system error */
+ }
+
+ /*
+ * XDR the decrypted timestamp
+ */
+ ixdr = (long *)cryptbuf;
+ timestamp.tv_sec = IXDR_GET_LONG(ixdr);
+ timestamp.tv_usec = IXDR_GET_LONG(ixdr);
+
+ /*
+ * Check for valid credentials and verifiers.
+ * They could be invalid because the key was flushed
+ * out of the cache, and so a new session should begin.
+ * Be sure and send AUTH_REJECTED{CRED, VERF} if this is the case.
+ */
+ {
+ struct timeval current;
+ int nick;
+ int winverf;
+
+ if (cred->adc_namekind == ADN_FULLNAME) {
+ window = IXDR_GET_U_LONG(ixdr);
+ winverf = IXDR_GET_U_LONG(ixdr);
+ if (winverf != window - 1) {
+ debug("window verifier mismatch");
+ return (AUTH_BADCRED); /* garbled credential */
+ }
+ sid = cache_spot(sessionkey, cred->adc_fullname.name,
+ &timestamp);
+ if (sid < 0) {
+ debug("replayed credential");
+ return (AUTH_REJECTEDCRED); /* replay */
+ }
+ nick = 0;
+ } else { /* ADN_NICKNAME */
+ window = authdes_cache[sid].window;
+ nick = 1;
+ }
+
+ if ((u_long)timestamp.tv_usec >= USEC_PER_SEC) {
+ debug("invalid usecs");
+ /* cached out (bad key), or garbled verifier */
+ return (nick ? AUTH_REJECTEDVERF : AUTH_BADVERF);
+ }
+ if (nick && BEFORE(&timestamp,
+ &authdes_cache[sid].laststamp)) {
+ debug("timestamp before last seen");
+ return (AUTH_REJECTEDVERF); /* replay */
+ }
+ (void)gettimeofday(&current, NULL);
+ current.tv_sec -= window; /* allow for expiration */
+ if (!BEFORE(&current, &timestamp)) {
+ debug("timestamp expired");
+ /* replay, or garbled credential */
+ return (nick ? AUTH_REJECTEDVERF : AUTH_BADCRED);
+ }
+ }
+
+ /*
+ * Set up the reply verifier
+ */
+ verf.adv_nickname = (u_long)sid;
+
+ /*
+ * xdr the timestamp before encrypting
+ */
+ ixdr = (long *)cryptbuf;
+ IXDR_PUT_LONG(ixdr, timestamp.tv_sec - 1);
+ IXDR_PUT_LONG(ixdr, timestamp.tv_usec);
+
+ /*
+ * encrypt the timestamp
+ */
+ status = ecb_crypt((char *)sessionkey, (char *)cryptbuf,
+ sizeof(des_block), DES_ENCRYPT | DES_HW);
+ if (DES_FAILED(status)) {
+ debug("encryption failure");
+ return (AUTH_FAILED); /* system error */
+ }
+ verf.adv_xtimestamp = cryptbuf[0];
+
+ /*
+ * Serialize the reply verifier, and update rqst
+ */
+ ixdr = (long *)msg->rm_call.cb_verf.oa_base;
+ *ixdr++ = (long)verf.adv_xtimestamp.key.high;
+ *ixdr++ = (long)verf.adv_xtimestamp.key.low;
+ *ixdr++ = (long)verf.adv_int_u;
+
+ rqst->rq_xprt->xp_verf.oa_flavor = AUTH_DES;
+ rqst->rq_xprt->xp_verf.oa_base = msg->rm_call.cb_verf.oa_base;
+ rqst->rq_xprt->xp_verf.oa_length =
+ (char *)ixdr - msg->rm_call.cb_verf.oa_base;
+
+ /*
+ * We succeeded, commit the data to the cache now and
+ * finish cooking the credential.
+ */
+ entry = &authdes_cache[sid];
+ entry->laststamp = timestamp;
+ cache_ref(sid);
+ if (cred->adc_namekind == ADN_FULLNAME) {
+ cred->adc_fullname.window = window;
+ cred->adc_nickname = (u_long)sid; /* save nickname */
+ if (entry->rname != NULL) {
+ mem_free(entry->rname, strlen(entry->rname) + 1);
+ }
+ entry->rname = (char *)mem_alloc((u_int)strlen(cred->adc_fullname.name)
+ + 1);
+ if (entry->rname != NULL) {
+ (void) strcpy(entry->rname, cred->adc_fullname.name);
+ } else {
+ debug("out of memory");
+ }
+ entry->key = *sessionkey;
+ entry->window = window;
+ invalidate(entry->localcred); /* mark any cached cred invalid */
+ } else { /* ADN_NICKNAME */
+ /*
+ * nicknames are cooked into fullnames
+ */
+ cred->adc_namekind = ADN_FULLNAME;
+ cred->adc_fullname.name = entry->rname;
+ cred->adc_fullname.key = entry->key;
+ cred->adc_fullname.window = entry->window;
+ }
+ return (AUTH_OK); /* we made it!*/
}
-__sym_compat(_svcauth_des, __svcauth_des, FBSD_1.0);
/*
+ * Initialize the cache
+ */
+static void
+cache_init(void)
+{
+ int i;
+
+ authdes_cache = (struct cache_entry *)
+ mem_alloc(sizeof(struct cache_entry) * AUTHDES_CACHESZ);
+ bzero((char *)authdes_cache,
+ sizeof(struct cache_entry) * AUTHDES_CACHESZ);
+
+ authdes_lru = (short *)mem_alloc(sizeof(short) * AUTHDES_CACHESZ);
+ /*
+ * Initialize the lru list
+ */
+ for (i = 0; i < AUTHDES_CACHESZ; i++) {
+ authdes_lru[i] = i;
+ }
+}
+
+
+/*
+ * Find the lru victim
+ */
+static short
+cache_victim(void)
+{
+ return (authdes_lru[AUTHDES_CACHESZ-1]);
+}
+
+/*
+ * Note that sid was referenced
+ */
+static void
+cache_ref(short sid)
+{
+ int i;
+ short curr;
+ short prev;
+
+ prev = authdes_lru[0];
+ authdes_lru[0] = sid;
+ for (i = 1; prev != sid; i++) {
+ curr = authdes_lru[i];
+ authdes_lru[i] = prev;
+ prev = curr;
+ }
+}
+
+
+/*
+ * Find a spot in the cache for a credential containing
+ * the items given. Return -1 if a replay is detected, otherwise
+ * return the spot in the cache.
+ */
+static short
+cache_spot(des_block *key, char *name, struct timeval *timestamp)
+{
+ struct cache_entry *cp;
+ int i;
+ u_long hi;
+
+ hi = key->key.high;
+ for (cp = authdes_cache, i = 0; i < AUTHDES_CACHESZ; i++, cp++) {
+ if (cp->key.key.high == hi &&
+ cp->key.key.low == key->key.low &&
+ cp->rname != NULL &&
+ bcmp(cp->rname, name, strlen(name) + 1) == 0) {
+ if (BEFORE(timestamp, &cp->laststamp)) {
+ svcauthdes_stats.ncachereplays++;
+ return (-1); /* replay */
+ }
+ svcauthdes_stats.ncachehits++;
+ return (i); /* refresh */
+ }
+ }
+ svcauthdes_stats.ncachemisses++;
+ return (cache_victim()); /* new credential */
+}
+
+
+#if (defined(sun) || defined(vax) || defined(__FreeBSD__))
+/*
+ * Local credential handling stuff.
+ * NOTE: bsd unix dependent.
+ * Other operating systems should put something else here.
+ */
+#define UNKNOWN -2 /* grouplen, if cached cred is unknown user */
+#define INVALID -1 /* grouplen, if cache entry is invalid */
+
+struct bsdcred {
+ uid_t uid; /* cached uid */
+ gid_t gid; /* cached gid */
+ int grouplen; /* length of cached groups */
+ gid_t groups[NGRPS]; /* cached groups */
+};
+
+/*
* Map a des credential into a unix cred.
* We cache the credential here so the application does
* not have to make an rpc call every time to interpret
* the credential.
*/
-static int
-__authdes_getucred(void *adc, uid_t *uid, gid_t *gid,
+int
+authdes_getucred(struct authdes_cred *adc, uid_t *uid, gid_t *gid,
int *grouplen, gid_t *groups)
{
- return (0);
+ unsigned sid;
+ int i;
+ uid_t i_uid;
+ gid_t i_gid;
+ int i_grouplen;
+ struct bsdcred *cred;
+
+ sid = adc->adc_nickname;
+ if (sid >= AUTHDES_CACHESZ) {
+ debug("invalid nickname");
+ return (0);
+ }
+ cred = (struct bsdcred *)authdes_cache[sid].localcred;
+ if (cred == NULL) {
+ cred = (struct bsdcred *)mem_alloc(sizeof(struct bsdcred));
+ authdes_cache[sid].localcred = (char *)cred;
+ cred->grouplen = INVALID;
+ }
+ if (cred->grouplen == INVALID) {
+ /*
+ * not in cache: lookup
+ */
+ if (!netname2user(adc->adc_fullname.name, &i_uid, &i_gid,
+ &i_grouplen, groups))
+ {
+ debug("unknown netname");
+ cred->grouplen = UNKNOWN; /* mark as lookup up, but not found */
+ return (0);
+ }
+ debug("missed ucred cache");
+ *uid = cred->uid = i_uid;
+ *gid = cred->gid = i_gid;
+ *grouplen = cred->grouplen = i_grouplen;
+ for (i = i_grouplen - 1; i >= 0; i--) {
+ cred->groups[i] = groups[i]; /* int to short */
+ }
+ return (1);
+ } else if (cred->grouplen == UNKNOWN) {
+ /*
+ * Already lookup up, but no match found
+ */
+ return (0);
+ }
+
+ /*
+ * cached credentials
+ */
+ *uid = cred->uid;
+ *gid = cred->gid;
+ *grouplen = cred->grouplen;
+ for (i = cred->grouplen - 1; i >= 0; i--) {
+ groups[i] = cred->groups[i]; /* short to int */
+ }
+ return (1);
+}
+
+static void
+invalidate(char *cred)
+{
+ if (cred == NULL) {
+ return;
+ }
+ ((struct bsdcred *)cred)->grouplen = INVALID;
}
-__sym_compat(authdes_getucred, __authdes_getucred, FBSD_1.0);
+#endif
+
diff --git a/lib/libc/stdlib/malloc/jemalloc/Makefile.inc b/lib/libc/stdlib/malloc/jemalloc/Makefile.inc
index c10d79dbce6c..7722a7b755f3 100644
--- a/lib/libc/stdlib/malloc/jemalloc/Makefile.inc
+++ b/lib/libc/stdlib/malloc/jemalloc/Makefile.inc
@@ -1,15 +1,21 @@
.PATH: ${LIBC_SRCTOP}/stdlib/malloc/jemalloc
JEMALLOCSRCS:= jemalloc.c arena.c background_thread.c base.c bin.c bitmap.c \
- ckh.c ctl.c div.c extent.c extent_dss.c extent_mmap.c hash.c hook.c \
- large.c log.c malloc_io.c mutex.c mutex_pool.c nstime.c pages.c \
- prng.c prof.c rtree.c safety_check.c sc.c stats.c sz.c tcache.c \
- test_hooks.c ticker.c tsd.c witness.c
+ ckh.c ctl.c div.c extent.c extent_dss.c extent_mmap.c hook.c \
+ large.c log.c malloc_io.c mutex.c nstime.c pages.c \
+ prof.c rtree.c safety_check.c sc.c stats.c sz.c tcache.c \
+ test_hooks.c ticker.c tsd.c witness.c \
+ bin_info.c san.c san_bump.c counter.c prof_data.c prof_log.c prof_recent.c prof_stats.c prof_sys.c \
+ emap.c edata.c edata_cache.c pa.c pa_extra.c pac.c decay.c hpa.c hpa_hooks.c fxp.c hpdata.c pai.c \
+ ecache.c ehooks.c eset.c sec.c cache_bin.c peak_event.c psset.c inspect.c exp_grow.c thread_event.c \
+ buf_writer.c
CFLAGS+=-I${SRCTOP}/contrib/jemalloc/include -I${LIBC_SRCTOP}/stdlib/malloc/jemalloc/include
.if ${MK_JEMALLOC_LG_VADDR_WIDE} != no
CFLAGS+=-D_USE_LG_VADDR_WIDE
.endif
+CFLAGS.gcc+=-Wno-error=missing-braces
+
.for src in ${JEMALLOCSRCS}
MISRCS+=jemalloc_${src}
diff --git a/lib/libc/stdlib/malloc/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h b/lib/libc/stdlib/malloc/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h
index 1aedb916976b..900ae867f321 100644
--- a/lib/libc/stdlib/malloc/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h
+++ b/lib/libc/stdlib/malloc/jemalloc/include/jemalloc/internal/jemalloc_internal_defs.h
@@ -45,17 +45,17 @@
#define LG_VADDR 48
/* Defined if C11 atomics are available. */
-#define JEMALLOC_C11_ATOMICS 1
+#define JEMALLOC_C11_ATOMICS
/* Defined if GCC __atomic atomics are available. */
-#define JEMALLOC_GCC_ATOMIC_ATOMICS 1
+#define JEMALLOC_GCC_ATOMIC_ATOMICS
/* and the 8-bit variant support. */
-#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS 1
+#define JEMALLOC_GCC_U8_ATOMIC_ATOMICS
/* Defined if GCC __sync atomics are available. */
-#define JEMALLOC_GCC_SYNC_ATOMICS 1
+#define JEMALLOC_GCC_SYNC_ATOMICS
/* and the 8-bit variant support. */
-#define JEMALLOC_GCC_U8_SYNC_ATOMICS 1
+#define JEMALLOC_GCC_U8_SYNC_ATOMICS
/*
* Defined if __builtin_clz() and __builtin_clzl() are available.
@@ -73,7 +73,7 @@
/*
* Defined if secure_getenv(3) is available.
*/
-/* #undef JEMALLOC_HAVE_SECURE_GETENV */
+#define JEMALLOC_HAVE_SECURE_GETENV
/*
* Defined if issetugid(2) is available.
@@ -84,17 +84,23 @@
#define JEMALLOC_HAVE_PTHREAD_ATFORK
/* Defined if pthread_setname_np(3) is available. */
-/* #undef JEMALLOC_HAVE_PTHREAD_SETNAME_NP */
+#define JEMALLOC_HAVE_PTHREAD_SETNAME_NP
+
+/* Defined if pthread_getname_np(3) is available. */
+#define JEMALLOC_HAVE_PTHREAD_GETNAME_NP
+
+/* Defined if pthread_get_name_np(3) is available. */
+#define JEMALLOC_HAVE_PTHREAD_GET_NAME_NP
/*
* Defined if clock_gettime(CLOCK_MONOTONIC_COARSE, ...) is available.
*/
-/* #undef JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE */
+#define JEMALLOC_HAVE_CLOCK_MONOTONIC_COARSE
/*
* Defined if clock_gettime(CLOCK_MONOTONIC, ...) is available.
*/
-#define JEMALLOC_HAVE_CLOCK_MONOTONIC 1
+#define JEMALLOC_HAVE_CLOCK_MONOTONIC
/*
* Defined if mach_absolute_time() is available.
@@ -102,6 +108,11 @@
/* #undef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME */
/*
+ * Defined if clock_gettime(CLOCK_REALTIME, ...) is available.
+ */
+#define JEMALLOC_HAVE_CLOCK_REALTIME
+
+/*
* Defined if _malloc_thread_cleanup() exists. At least in the case of
* FreeBSD, pthread_key_create() allocates, which if used during malloc
* bootstrapping will cause recursion into the pthreads library. Therefore, if
@@ -122,7 +133,7 @@
* _pthread_mutex_init_calloc_cb(), in which case the function is used in order
* to avoid recursive allocation during mutex initialization.
*/
-#define JEMALLOC_MUTEX_INIT_CB 1
+#define JEMALLOC_MUTEX_INIT_CB
/* Non-empty if the tls_model attribute is supported. */
#define JEMALLOC_TLS_MODEL __attribute__((tls_model("initial-exec")))
@@ -163,6 +174,9 @@
/* Support utrace(2)-based tracing. */
#define JEMALLOC_UTRACE
+/* Support utrace(2)-based tracing (label based signature). */
+/* #undef JEMALLOC_UTRACE_LABEL */
+
/* Support optional abort() on OOM. */
#define JEMALLOC_XMALLOC
@@ -178,6 +192,9 @@
/* One page is 2^LG_PAGE bytes. */
#define LG_PAGE 12
+/* Maximum number of regions in a slab. */
+/* #undef CONFIG_LG_SLAB_MAXREGS */
+
/*
* One huge page is 2^LG_HUGEPAGE bytes. Note that this is defined even if the
* system does not explicitly support huge pages; system calls that require
@@ -292,16 +309,45 @@
/* #undef JEMALLOC_MADVISE_DONTDUMP */
/*
+ * Defined if MADV_[NO]CORE is supported as an argument to madvise.
+ */
+#define JEMALLOC_MADVISE_NOCORE
+
+/* Defined if mprotect(2) is available. */
+#define JEMALLOC_HAVE_MPROTECT
+
+/*
* Defined if transparent huge pages (THPs) are supported via the
* MADV_[NO]HUGEPAGE arguments to madvise(2), and THP support is enabled.
*/
/* #undef JEMALLOC_THP */
+/* Defined if posix_madvise is available. */
+/* #undef JEMALLOC_HAVE_POSIX_MADVISE */
+
+/*
+ * Method for purging unused pages using posix_madvise.
+ *
+ * posix_madvise(..., POSIX_MADV_DONTNEED)
+ */
+/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED */
+/* #undef JEMALLOC_PURGE_POSIX_MADVISE_DONTNEED_ZEROS */
+
+/*
+ * Defined if memcntl page admin call is supported
+ */
+/* #undef JEMALLOC_HAVE_MEMCNTL */
+
+/*
+ * Defined if malloc_size is supported
+ */
+/* #undef JEMALLOC_HAVE_MALLOC_SIZE */
+
/* Define if operating system has alloca.h header. */
/* #undef JEMALLOC_HAS_ALLOCA_H */
/* C99 restrict keyword supported. */
-#define JEMALLOC_HAS_RESTRICT 1
+#define JEMALLOC_HAS_RESTRICT
/* For use by hash code. */
/* #undef JEMALLOC_BIG_ENDIAN */
@@ -334,15 +380,15 @@
#define JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP
/* GNU specific sched_getcpu support */
-/* #undef JEMALLOC_HAVE_SCHED_GETCPU */
+#define JEMALLOC_HAVE_SCHED_GETCPU
/* GNU specific sched_setaffinity support */
-/* #undef JEMALLOC_HAVE_SCHED_SETAFFINITY */
+#define JEMALLOC_HAVE_SCHED_SETAFFINITY
/*
* If defined, all the features necessary for background threads are present.
*/
-#define JEMALLOC_BACKGROUND_THREAD 1
+#define JEMALLOC_BACKGROUND_THREAD
/*
* If defined, jemalloc symbols are not exported (doesn't work when
@@ -354,7 +400,7 @@
#define JEMALLOC_CONFIG_MALLOC_CONF "abort_conf:false"
/* If defined, jemalloc takes the malloc/free/etc. symbol names. */
-#define JEMALLOC_IS_MALLOC 1
+#define JEMALLOC_IS_MALLOC
/*
* Defined if strerror_r returns char * if _GNU_SOURCE is defined.
@@ -364,4 +410,19 @@
/* Performs additional safety checks when defined. */
/* #undef JEMALLOC_OPT_SAFETY_CHECKS */
+/* Is C++ support being built? */
+#define JEMALLOC_ENABLE_CXX
+
+/* Performs additional size checks when defined. */
+/* #undef JEMALLOC_OPT_SIZE_CHECKS */
+
+/* Allows sampled junk and stash for checking use-after-free when defined. */
+/* #undef JEMALLOC_UAF_DETECTION */
+
+/* Darwin VM_MAKE_TAG support */
+/* #undef JEMALLOC_HAVE_VM_MAKE_TAG */
+
+/* If defined, realloc(ptr, 0) defaults to "free" instead of "alloc". */
+/* #undef JEMALLOC_ZERO_REALLOC_DEFAULT_FREE */
+
#endif /* JEMALLOC_INTERNAL_DEFS_H_ */
diff --git a/lib/libc/stdlib/malloc/jemalloc/include/jemalloc/internal/jemalloc_preamble.h b/lib/libc/stdlib/malloc/jemalloc/include/jemalloc/internal/jemalloc_preamble.h
index d1dcc50d5a3b..b6cf5864109d 100644
--- a/lib/libc/stdlib/malloc/jemalloc/include/jemalloc/internal/jemalloc_preamble.h
+++ b/lib/libc/stdlib/malloc/jemalloc/include/jemalloc/internal/jemalloc_preamble.h
@@ -4,12 +4,20 @@
#include "jemalloc_internal_defs.h"
#include "jemalloc/internal/jemalloc_internal_decls.h"
-#ifdef JEMALLOC_UTRACE
+#if defined(JEMALLOC_UTRACE) || defined(JEMALLOC_UTRACE_LABEL)
#include <sys/ktrace.h>
+# if defined(JEMALLOC_UTRACE)
+# define UTRACE_CALL(p, l) utrace(p, l)
+# else
+# define UTRACE_CALL(p, l) utrace("jemalloc_process", p, l)
+# define JEMALLOC_UTRACE
+# endif
#endif
+#ifndef JEMALLOC_PRIVATE_NAMESPACE
#include "un-namespace.h"
#include "libc_private.h"
+#endif
#define JEMALLOC_NO_DEMANGLE
#ifdef JEMALLOC_JET
@@ -177,6 +185,35 @@ static const bool config_opt_safety_checks =
#endif
;
+/*
+ * Extra debugging of sized deallocations too onerous to be included in the
+ * general safety checks.
+ */
+static const bool config_opt_size_checks =
+#if defined(JEMALLOC_OPT_SIZE_CHECKS) || defined(JEMALLOC_DEBUG)
+ true
+#else
+ false
+#endif
+ ;
+
+static const bool config_uaf_detection =
+#if defined(JEMALLOC_UAF_DETECTION) || defined(JEMALLOC_DEBUG)
+ true
+#else
+ false
+#endif
+ ;
+
+/* Whether or not the C++ extensions are enabled. */
+static const bool config_enable_cxx =
+#ifdef JEMALLOC_ENABLE_CXX
+ true
+#else
+ false
+#endif
+;
+
#if defined(_WIN32) || defined(JEMALLOC_HAVE_SCHED_GETCPU)
/* Currently percpu_arena depends on sched_getcpu. */
#define JEMALLOC_PERCPU_ARENA
@@ -206,5 +243,20 @@ static const bool have_background_thread =
false
#endif
;
+static const bool config_high_res_timer =
+#ifdef JEMALLOC_HAVE_CLOCK_REALTIME
+ true
+#else
+ false
+#endif
+ ;
+
+static const bool have_memcntl =
+#ifdef JEMALLOC_HAVE_MEMCNTL
+ true
+#else
+ false
+#endif
+ ;
#endif /* JEMALLOC_PREAMBLE_H */
diff --git a/lib/libc/stdlib/malloc/jemalloc/include/jemalloc/internal/private_namespace.h b/lib/libc/stdlib/malloc/jemalloc/include/jemalloc/internal/private_namespace.h
index a448ad63fa54..08ce9219cbf8 100644
--- a/lib/libc/stdlib/malloc/jemalloc/include/jemalloc/internal/private_namespace.h
+++ b/lib/libc/stdlib/malloc/jemalloc/include/jemalloc/internal/private_namespace.h
@@ -5,10 +5,9 @@
#define arena_init JEMALLOC_N(arena_init)
#define arena_migrate JEMALLOC_N(arena_migrate)
#define arena_set JEMALLOC_N(arena_set)
-#define arena_tdata_get_hard JEMALLOC_N(arena_tdata_get_hard)
#define arenas JEMALLOC_N(arenas)
#define arenas_lock JEMALLOC_N(arenas_lock)
-#define arenas_tdata_cleanup JEMALLOC_N(arenas_tdata_cleanup)
+#define batch_alloc JEMALLOC_N(batch_alloc)
#define bootstrap_calloc JEMALLOC_N(bootstrap_calloc)
#define bootstrap_free JEMALLOC_N(bootstrap_free)
#define bootstrap_malloc JEMALLOC_N(bootstrap_malloc)
@@ -16,8 +15,10 @@
#define iarena_cleanup JEMALLOC_N(iarena_cleanup)
#define je_sdallocx_noflags JEMALLOC_N(je_sdallocx_noflags)
#define jemalloc_postfork_child JEMALLOC_N(jemalloc_postfork_child)
+#define junk_alloc_callback JEMALLOC_N(junk_alloc_callback)
+#define junk_free_callback JEMALLOC_N(junk_free_callback)
#define malloc_default JEMALLOC_N(malloc_default)
-#define malloc_initialized JEMALLOC_N(malloc_initialized)
+#define malloc_init_state JEMALLOC_N(malloc_init_state)
#define malloc_slow JEMALLOC_N(malloc_slow)
#define manual_arena_base JEMALLOC_N(manual_arena_base)
#define narenas_auto JEMALLOC_N(narenas_auto)
@@ -25,49 +26,64 @@
#define ncpus JEMALLOC_N(ncpus)
#define opt_abort JEMALLOC_N(opt_abort)
#define opt_abort_conf JEMALLOC_N(opt_abort_conf)
+#define opt_cache_oblivious JEMALLOC_N(opt_cache_oblivious)
#define opt_confirm_conf JEMALLOC_N(opt_confirm_conf)
+#define opt_experimental_infallible_new JEMALLOC_N(opt_experimental_infallible_new)
+#define opt_hpa JEMALLOC_N(opt_hpa)
+#define opt_hpa_opts JEMALLOC_N(opt_hpa_opts)
+#define opt_hpa_sec_opts JEMALLOC_N(opt_hpa_sec_opts)
#define opt_junk JEMALLOC_N(opt_junk)
#define opt_junk_alloc JEMALLOC_N(opt_junk_alloc)
#define opt_junk_free JEMALLOC_N(opt_junk_free)
#define opt_narenas JEMALLOC_N(opt_narenas)
+#define opt_narenas_ratio JEMALLOC_N(opt_narenas_ratio)
+#define opt_trust_madvise JEMALLOC_N(opt_trust_madvise)
#define opt_utrace JEMALLOC_N(opt_utrace)
#define opt_xmalloc JEMALLOC_N(opt_xmalloc)
#define opt_zero JEMALLOC_N(opt_zero)
+#define opt_zero_realloc_action JEMALLOC_N(opt_zero_realloc_action)
#define sdallocx_default JEMALLOC_N(sdallocx_default)
-#define arena_alloc_junk_small JEMALLOC_N(arena_alloc_junk_small)
+#define zero_realloc_count JEMALLOC_N(zero_realloc_count)
+#define zero_realloc_mode_names JEMALLOC_N(zero_realloc_mode_names)
#define arena_basic_stats_merge JEMALLOC_N(arena_basic_stats_merge)
-#define arena_bin_choose_lock JEMALLOC_N(arena_bin_choose_lock)
+#define arena_bin_choose JEMALLOC_N(arena_bin_choose)
+#define arena_bin_offsets JEMALLOC_N(arena_bin_offsets)
+#define arena_binind_div_info JEMALLOC_N(arena_binind_div_info)
#define arena_boot JEMALLOC_N(arena_boot)
+#define arena_cache_bin_fill_small JEMALLOC_N(arena_cache_bin_fill_small)
#define arena_choose_huge JEMALLOC_N(arena_choose_huge)
-#define arena_dalloc_bin_junked_locked JEMALLOC_N(arena_dalloc_bin_junked_locked)
-#define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
+#define arena_config_default JEMALLOC_N(arena_config_default)
+#define arena_dalloc_bin_locked_handle_newly_empty JEMALLOC_N(arena_dalloc_bin_locked_handle_newly_empty)
+#define arena_dalloc_bin_locked_handle_newly_nonempty JEMALLOC_N(arena_dalloc_bin_locked_handle_newly_nonempty)
#define arena_dalloc_promoted JEMALLOC_N(arena_dalloc_promoted)
#define arena_dalloc_small JEMALLOC_N(arena_dalloc_small)
#define arena_decay JEMALLOC_N(arena_decay)
+#define arena_decay_ms_get JEMALLOC_N(arena_decay_ms_get)
+#define arena_decay_ms_set JEMALLOC_N(arena_decay_ms_set)
#define arena_destroy JEMALLOC_N(arena_destroy)
#define arena_dirty_decay_ms_default_get JEMALLOC_N(arena_dirty_decay_ms_default_get)
#define arena_dirty_decay_ms_default_set JEMALLOC_N(arena_dirty_decay_ms_default_set)
-#define arena_dirty_decay_ms_get JEMALLOC_N(arena_dirty_decay_ms_get)
-#define arena_dirty_decay_ms_set JEMALLOC_N(arena_dirty_decay_ms_set)
+#define arena_do_deferred_work JEMALLOC_N(arena_do_deferred_work)
#define arena_dss_prec_get JEMALLOC_N(arena_dss_prec_get)
#define arena_dss_prec_set JEMALLOC_N(arena_dss_prec_set)
+#define arena_emap_global JEMALLOC_N(arena_emap_global)
#define arena_extent_alloc_large JEMALLOC_N(arena_extent_alloc_large)
#define arena_extent_dalloc_large_prep JEMALLOC_N(arena_extent_dalloc_large_prep)
#define arena_extent_ralloc_large_expand JEMALLOC_N(arena_extent_ralloc_large_expand)
#define arena_extent_ralloc_large_shrink JEMALLOC_N(arena_extent_ralloc_large_shrink)
-#define arena_extent_sn_next JEMALLOC_N(arena_extent_sn_next)
-#define arena_extents_dirty_dalloc JEMALLOC_N(arena_extents_dirty_dalloc)
+#define arena_fill_small_fresh JEMALLOC_N(arena_fill_small_fresh)
+#define arena_get_ehooks JEMALLOC_N(arena_get_ehooks)
+#define arena_handle_deferred_work JEMALLOC_N(arena_handle_deferred_work)
#define arena_init_huge JEMALLOC_N(arena_init_huge)
#define arena_is_huge JEMALLOC_N(arena_is_huge)
#define arena_malloc_hard JEMALLOC_N(arena_malloc_hard)
#define arena_muzzy_decay_ms_default_get JEMALLOC_N(arena_muzzy_decay_ms_default_get)
#define arena_muzzy_decay_ms_default_set JEMALLOC_N(arena_muzzy_decay_ms_default_set)
-#define arena_muzzy_decay_ms_get JEMALLOC_N(arena_muzzy_decay_ms_get)
-#define arena_muzzy_decay_ms_set JEMALLOC_N(arena_muzzy_decay_ms_set)
#define arena_new JEMALLOC_N(arena_new)
#define arena_nthreads_dec JEMALLOC_N(arena_nthreads_dec)
#define arena_nthreads_get JEMALLOC_N(arena_nthreads_get)
#define arena_nthreads_inc JEMALLOC_N(arena_nthreads_inc)
+#define arena_pa_central_global JEMALLOC_N(arena_pa_central_global)
#define arena_palloc JEMALLOC_N(arena_palloc)
#define arena_postfork_child JEMALLOC_N(arena_postfork_child)
#define arena_postfork_parent JEMALLOC_N(arena_postfork_parent)
@@ -79,14 +95,15 @@
#define arena_prefork5 JEMALLOC_N(arena_prefork5)
#define arena_prefork6 JEMALLOC_N(arena_prefork6)
#define arena_prefork7 JEMALLOC_N(arena_prefork7)
+#define arena_prefork8 JEMALLOC_N(arena_prefork8)
#define arena_prof_promote JEMALLOC_N(arena_prof_promote)
#define arena_ralloc JEMALLOC_N(arena_ralloc)
#define arena_ralloc_no_move JEMALLOC_N(arena_ralloc_no_move)
#define arena_reset JEMALLOC_N(arena_reset)
#define arena_retain_grow_limit_get_set JEMALLOC_N(arena_retain_grow_limit_get_set)
+#define arena_set_extent_hooks JEMALLOC_N(arena_set_extent_hooks)
+#define arena_slab_dalloc JEMALLOC_N(arena_slab_dalloc)
#define arena_stats_merge JEMALLOC_N(arena_stats_merge)
-#define arena_tcache_fill_small JEMALLOC_N(arena_tcache_fill_small)
-#define h_steps JEMALLOC_N(h_steps)
#define opt_dirty_decay_ms JEMALLOC_N(opt_dirty_decay_ms)
#define opt_muzzy_decay_ms JEMALLOC_N(opt_muzzy_decay_ms)
#define opt_oversize_threshold JEMALLOC_N(opt_oversize_threshold)
@@ -99,13 +116,14 @@
#define background_thread_ctl_init JEMALLOC_N(background_thread_ctl_init)
#define background_thread_enabled_state JEMALLOC_N(background_thread_enabled_state)
#define background_thread_info JEMALLOC_N(background_thread_info)
-#define background_thread_interval_check JEMALLOC_N(background_thread_interval_check)
+#define background_thread_is_started JEMALLOC_N(background_thread_is_started)
#define background_thread_lock JEMALLOC_N(background_thread_lock)
#define background_thread_postfork_child JEMALLOC_N(background_thread_postfork_child)
#define background_thread_postfork_parent JEMALLOC_N(background_thread_postfork_parent)
#define background_thread_prefork0 JEMALLOC_N(background_thread_prefork0)
#define background_thread_prefork1 JEMALLOC_N(background_thread_prefork1)
#define background_thread_stats_read JEMALLOC_N(background_thread_stats_read)
+#define background_thread_wakeup_early JEMALLOC_N(background_thread_wakeup_early)
#define background_threads_disable JEMALLOC_N(background_threads_disable)
#define background_threads_enable JEMALLOC_N(background_threads_enable)
#define max_background_threads JEMALLOC_N(max_background_threads)
@@ -115,10 +133,11 @@
#define pthread_create_wrapper JEMALLOC_N(pthread_create_wrapper)
#define b0get JEMALLOC_N(b0get)
#define base_alloc JEMALLOC_N(base_alloc)
-#define base_alloc_extent JEMALLOC_N(base_alloc_extent)
+#define base_alloc_edata JEMALLOC_N(base_alloc_edata)
#define base_boot JEMALLOC_N(base_boot)
#define base_delete JEMALLOC_N(base_delete)
-#define base_extent_hooks_get JEMALLOC_N(base_extent_hooks_get)
+#define base_ehooks_get JEMALLOC_N(base_ehooks_get)
+#define base_ehooks_get_for_metadata JEMALLOC_N(base_ehooks_get_for_metadata)
#define base_extent_hooks_set JEMALLOC_N(base_extent_hooks_set)
#define base_new JEMALLOC_N(base_new)
#define base_postfork_child JEMALLOC_N(base_postfork_child)
@@ -127,17 +146,28 @@
#define base_stats_get JEMALLOC_N(base_stats_get)
#define metadata_thp_mode_names JEMALLOC_N(metadata_thp_mode_names)
#define opt_metadata_thp JEMALLOC_N(opt_metadata_thp)
-#define bin_boot JEMALLOC_N(bin_boot)
-#define bin_infos JEMALLOC_N(bin_infos)
#define bin_init JEMALLOC_N(bin_init)
#define bin_postfork_child JEMALLOC_N(bin_postfork_child)
#define bin_postfork_parent JEMALLOC_N(bin_postfork_parent)
#define bin_prefork JEMALLOC_N(bin_prefork)
#define bin_shard_sizes_boot JEMALLOC_N(bin_shard_sizes_boot)
#define bin_update_shard_size JEMALLOC_N(bin_update_shard_size)
+#define bin_info_boot JEMALLOC_N(bin_info_boot)
+#define bin_infos JEMALLOC_N(bin_infos)
#define bitmap_info_init JEMALLOC_N(bitmap_info_init)
#define bitmap_init JEMALLOC_N(bitmap_init)
#define bitmap_size JEMALLOC_N(bitmap_size)
+#define buf_writer_cb JEMALLOC_N(buf_writer_cb)
+#define buf_writer_flush JEMALLOC_N(buf_writer_flush)
+#define buf_writer_init JEMALLOC_N(buf_writer_init)
+#define buf_writer_pipe JEMALLOC_N(buf_writer_pipe)
+#define buf_writer_terminate JEMALLOC_N(buf_writer_terminate)
+#define cache_bin_info_compute_alloc JEMALLOC_N(cache_bin_info_compute_alloc)
+#define cache_bin_info_init JEMALLOC_N(cache_bin_info_init)
+#define cache_bin_init JEMALLOC_N(cache_bin_init)
+#define cache_bin_postincrement JEMALLOC_N(cache_bin_postincrement)
+#define cache_bin_preincrement JEMALLOC_N(cache_bin_preincrement)
+#define cache_bin_still_zero_initialized JEMALLOC_N(cache_bin_still_zero_initialized)
#define ckh_count JEMALLOC_N(ckh_count)
#define ckh_delete JEMALLOC_N(ckh_delete)
#define ckh_insert JEMALLOC_N(ckh_insert)
@@ -149,61 +179,116 @@
#define ckh_search JEMALLOC_N(ckh_search)
#define ckh_string_hash JEMALLOC_N(ckh_string_hash)
#define ckh_string_keycomp JEMALLOC_N(ckh_string_keycomp)
+#define counter_accum_init JEMALLOC_N(counter_accum_init)
+#define counter_postfork_child JEMALLOC_N(counter_postfork_child)
+#define counter_postfork_parent JEMALLOC_N(counter_postfork_parent)
+#define counter_prefork JEMALLOC_N(counter_prefork)
#define ctl_boot JEMALLOC_N(ctl_boot)
#define ctl_bymib JEMALLOC_N(ctl_bymib)
+#define ctl_bymibname JEMALLOC_N(ctl_bymibname)
#define ctl_byname JEMALLOC_N(ctl_byname)
+#define ctl_mibnametomib JEMALLOC_N(ctl_mibnametomib)
+#define ctl_mtx_assert_held JEMALLOC_N(ctl_mtx_assert_held)
#define ctl_nametomib JEMALLOC_N(ctl_nametomib)
#define ctl_postfork_child JEMALLOC_N(ctl_postfork_child)
#define ctl_postfork_parent JEMALLOC_N(ctl_postfork_parent)
#define ctl_prefork JEMALLOC_N(ctl_prefork)
+#define decay_deadline_init JEMALLOC_N(decay_deadline_init)
+#define decay_init JEMALLOC_N(decay_init)
+#define decay_maybe_advance_epoch JEMALLOC_N(decay_maybe_advance_epoch)
+#define decay_ms_valid JEMALLOC_N(decay_ms_valid)
+#define decay_npages_purge_in JEMALLOC_N(decay_npages_purge_in)
+#define decay_ns_until_purge JEMALLOC_N(decay_ns_until_purge)
+#define decay_reinit JEMALLOC_N(decay_reinit)
#define div_init JEMALLOC_N(div_init)
-#define extent_alloc JEMALLOC_N(extent_alloc)
+#define ecache_init JEMALLOC_N(ecache_init)
+#define ecache_postfork_child JEMALLOC_N(ecache_postfork_child)
+#define ecache_postfork_parent JEMALLOC_N(ecache_postfork_parent)
+#define ecache_prefork JEMALLOC_N(ecache_prefork)
+#define edata_avail_any JEMALLOC_N(edata_avail_any)
+#define edata_avail_empty JEMALLOC_N(edata_avail_empty)
+#define edata_avail_first JEMALLOC_N(edata_avail_first)
+#define edata_avail_insert JEMALLOC_N(edata_avail_insert)
+#define edata_avail_new JEMALLOC_N(edata_avail_new)
+#define edata_avail_remove JEMALLOC_N(edata_avail_remove)
+#define edata_avail_remove_any JEMALLOC_N(edata_avail_remove_any)
+#define edata_avail_remove_first JEMALLOC_N(edata_avail_remove_first)
+#define edata_heap_any JEMALLOC_N(edata_heap_any)
+#define edata_heap_empty JEMALLOC_N(edata_heap_empty)
+#define edata_heap_first JEMALLOC_N(edata_heap_first)
+#define edata_heap_insert JEMALLOC_N(edata_heap_insert)
+#define edata_heap_new JEMALLOC_N(edata_heap_new)
+#define edata_heap_remove JEMALLOC_N(edata_heap_remove)
+#define edata_heap_remove_any JEMALLOC_N(edata_heap_remove_any)
+#define edata_heap_remove_first JEMALLOC_N(edata_heap_remove_first)
+#define edata_cache_fast_disable JEMALLOC_N(edata_cache_fast_disable)
+#define edata_cache_fast_get JEMALLOC_N(edata_cache_fast_get)
+#define edata_cache_fast_init JEMALLOC_N(edata_cache_fast_init)
+#define edata_cache_fast_put JEMALLOC_N(edata_cache_fast_put)
+#define edata_cache_get JEMALLOC_N(edata_cache_get)
+#define edata_cache_init JEMALLOC_N(edata_cache_init)
+#define edata_cache_postfork_child JEMALLOC_N(edata_cache_postfork_child)
+#define edata_cache_postfork_parent JEMALLOC_N(edata_cache_postfork_parent)
+#define edata_cache_prefork JEMALLOC_N(edata_cache_prefork)
+#define edata_cache_put JEMALLOC_N(edata_cache_put)
+#define ehooks_default_alloc_impl JEMALLOC_N(ehooks_default_alloc_impl)
+#define ehooks_default_commit_impl JEMALLOC_N(ehooks_default_commit_impl)
+#define ehooks_default_dalloc_impl JEMALLOC_N(ehooks_default_dalloc_impl)
+#define ehooks_default_decommit_impl JEMALLOC_N(ehooks_default_decommit_impl)
+#define ehooks_default_destroy_impl JEMALLOC_N(ehooks_default_destroy_impl)
+#define ehooks_default_extent_hooks JEMALLOC_N(ehooks_default_extent_hooks)
+#define ehooks_default_guard_impl JEMALLOC_N(ehooks_default_guard_impl)
+#define ehooks_default_merge JEMALLOC_N(ehooks_default_merge)
+#define ehooks_default_merge_impl JEMALLOC_N(ehooks_default_merge_impl)
+#define ehooks_default_purge_forced_impl JEMALLOC_N(ehooks_default_purge_forced_impl)
+#define ehooks_default_purge_lazy_impl JEMALLOC_N(ehooks_default_purge_lazy_impl)
+#define ehooks_default_split_impl JEMALLOC_N(ehooks_default_split_impl)
+#define ehooks_default_unguard_impl JEMALLOC_N(ehooks_default_unguard_impl)
+#define ehooks_default_zero_impl JEMALLOC_N(ehooks_default_zero_impl)
+#define ehooks_init JEMALLOC_N(ehooks_init)
+#define emap_deregister_boundary JEMALLOC_N(emap_deregister_boundary)
+#define emap_deregister_interior JEMALLOC_N(emap_deregister_interior)
+#define emap_do_assert_mapped JEMALLOC_N(emap_do_assert_mapped)
+#define emap_do_assert_not_mapped JEMALLOC_N(emap_do_assert_not_mapped)
+#define emap_init JEMALLOC_N(emap_init)
+#define emap_merge_commit JEMALLOC_N(emap_merge_commit)
+#define emap_merge_prepare JEMALLOC_N(emap_merge_prepare)
+#define emap_register_boundary JEMALLOC_N(emap_register_boundary)
+#define emap_register_interior JEMALLOC_N(emap_register_interior)
+#define emap_release_edata JEMALLOC_N(emap_release_edata)
+#define emap_remap JEMALLOC_N(emap_remap)
+#define emap_split_commit JEMALLOC_N(emap_split_commit)
+#define emap_split_prepare JEMALLOC_N(emap_split_prepare)
+#define emap_try_acquire_edata_neighbor JEMALLOC_N(emap_try_acquire_edata_neighbor)
+#define emap_try_acquire_edata_neighbor_expand JEMALLOC_N(emap_try_acquire_edata_neighbor_expand)
+#define emap_update_edata_state JEMALLOC_N(emap_update_edata_state)
+#define eset_fit JEMALLOC_N(eset_fit)
+#define eset_init JEMALLOC_N(eset_init)
+#define eset_insert JEMALLOC_N(eset_insert)
+#define eset_nbytes_get JEMALLOC_N(eset_nbytes_get)
+#define eset_nextents_get JEMALLOC_N(eset_nextents_get)
+#define eset_npages_get JEMALLOC_N(eset_npages_get)
+#define eset_remove JEMALLOC_N(eset_remove)
+#define exp_grow_init JEMALLOC_N(exp_grow_init)
+#define ecache_alloc JEMALLOC_N(ecache_alloc)
+#define ecache_alloc_grow JEMALLOC_N(ecache_alloc_grow)
+#define ecache_dalloc JEMALLOC_N(ecache_dalloc)
+#define ecache_evict JEMALLOC_N(ecache_evict)
#define extent_alloc_wrapper JEMALLOC_N(extent_alloc_wrapper)
-#define extent_avail_any JEMALLOC_N(extent_avail_any)
-#define extent_avail_empty JEMALLOC_N(extent_avail_empty)
-#define extent_avail_first JEMALLOC_N(extent_avail_first)
-#define extent_avail_insert JEMALLOC_N(extent_avail_insert)
-#define extent_avail_new JEMALLOC_N(extent_avail_new)
-#define extent_avail_remove JEMALLOC_N(extent_avail_remove)
-#define extent_avail_remove_any JEMALLOC_N(extent_avail_remove_any)
-#define extent_avail_remove_first JEMALLOC_N(extent_avail_remove_first)
#define extent_boot JEMALLOC_N(extent_boot)
#define extent_commit_wrapper JEMALLOC_N(extent_commit_wrapper)
-#define extent_dalloc JEMALLOC_N(extent_dalloc)
+#define extent_commit_zero JEMALLOC_N(extent_commit_zero)
#define extent_dalloc_gap JEMALLOC_N(extent_dalloc_gap)
#define extent_dalloc_wrapper JEMALLOC_N(extent_dalloc_wrapper)
#define extent_decommit_wrapper JEMALLOC_N(extent_decommit_wrapper)
#define extent_destroy_wrapper JEMALLOC_N(extent_destroy_wrapper)
-#define extent_heap_any JEMALLOC_N(extent_heap_any)
-#define extent_heap_empty JEMALLOC_N(extent_heap_empty)
-#define extent_heap_first JEMALLOC_N(extent_heap_first)
-#define extent_heap_insert JEMALLOC_N(extent_heap_insert)
-#define extent_heap_new JEMALLOC_N(extent_heap_new)
-#define extent_heap_remove JEMALLOC_N(extent_heap_remove)
-#define extent_heap_remove_any JEMALLOC_N(extent_heap_remove_any)
-#define extent_heap_remove_first JEMALLOC_N(extent_heap_remove_first)
-#define extent_hooks_default JEMALLOC_N(extent_hooks_default)
-#define extent_hooks_get JEMALLOC_N(extent_hooks_get)
-#define extent_hooks_set JEMALLOC_N(extent_hooks_set)
+#define extent_gdump_add JEMALLOC_N(extent_gdump_add)
#define extent_merge_wrapper JEMALLOC_N(extent_merge_wrapper)
-#define extent_mutex_pool JEMALLOC_N(extent_mutex_pool)
#define extent_purge_forced_wrapper JEMALLOC_N(extent_purge_forced_wrapper)
#define extent_purge_lazy_wrapper JEMALLOC_N(extent_purge_lazy_wrapper)
+#define extent_record JEMALLOC_N(extent_record)
+#define extent_sn_next JEMALLOC_N(extent_sn_next)
#define extent_split_wrapper JEMALLOC_N(extent_split_wrapper)
-#define extent_util_stats_get JEMALLOC_N(extent_util_stats_get)
-#define extent_util_stats_verbose_get JEMALLOC_N(extent_util_stats_verbose_get)
-#define extents_alloc JEMALLOC_N(extents_alloc)
-#define extents_dalloc JEMALLOC_N(extents_dalloc)
-#define extents_evict JEMALLOC_N(extents_evict)
-#define extents_init JEMALLOC_N(extents_init)
-#define extents_nbytes_get JEMALLOC_N(extents_nbytes_get)
-#define extents_nextents_get JEMALLOC_N(extents_nextents_get)
-#define extents_npages_get JEMALLOC_N(extents_npages_get)
-#define extents_postfork_child JEMALLOC_N(extents_postfork_child)
-#define extents_postfork_parent JEMALLOC_N(extents_postfork_parent)
-#define extents_prefork JEMALLOC_N(extents_prefork)
-#define extents_rtree JEMALLOC_N(extents_rtree)
-#define extents_state_get JEMALLOC_N(extents_state_get)
#define opt_lg_extent_max_active_fit JEMALLOC_N(opt_lg_extent_max_active_fit)
#define dss_prec_names JEMALLOC_N(dss_prec_names)
#define extent_alloc_dss JEMALLOC_N(extent_alloc_dss)
@@ -216,24 +301,66 @@
#define extent_alloc_mmap JEMALLOC_N(extent_alloc_mmap)
#define extent_dalloc_mmap JEMALLOC_N(extent_dalloc_mmap)
#define opt_retain JEMALLOC_N(opt_retain)
+#define fxp_parse JEMALLOC_N(fxp_parse)
+#define fxp_print JEMALLOC_N(fxp_print)
+#define opt_lg_san_uaf_align JEMALLOC_N(opt_lg_san_uaf_align)
+#define opt_san_guard_large JEMALLOC_N(opt_san_guard_large)
+#define opt_san_guard_small JEMALLOC_N(opt_san_guard_small)
+#define san_cache_bin_nonfast_mask JEMALLOC_N(san_cache_bin_nonfast_mask)
+#define san_check_stashed_ptrs JEMALLOC_N(san_check_stashed_ptrs)
+#define san_guard_pages JEMALLOC_N(san_guard_pages)
+#define san_init JEMALLOC_N(san_init)
+#define san_unguard_pages JEMALLOC_N(san_unguard_pages)
+#define san_unguard_pages_pre_destroy JEMALLOC_N(san_unguard_pages_pre_destroy)
+#define tsd_san_init JEMALLOC_N(tsd_san_init)
+#define san_bump_alloc JEMALLOC_N(san_bump_alloc)
#define hook_boot JEMALLOC_N(hook_boot)
#define hook_install JEMALLOC_N(hook_install)
#define hook_invoke_alloc JEMALLOC_N(hook_invoke_alloc)
#define hook_invoke_dalloc JEMALLOC_N(hook_invoke_dalloc)
#define hook_invoke_expand JEMALLOC_N(hook_invoke_expand)
#define hook_remove JEMALLOC_N(hook_remove)
+#define hpa_central_extract JEMALLOC_N(hpa_central_extract)
+#define hpa_central_init JEMALLOC_N(hpa_central_init)
+#define hpa_shard_destroy JEMALLOC_N(hpa_shard_destroy)
+#define hpa_shard_disable JEMALLOC_N(hpa_shard_disable)
+#define hpa_shard_do_deferred_work JEMALLOC_N(hpa_shard_do_deferred_work)
+#define hpa_shard_init JEMALLOC_N(hpa_shard_init)
+#define hpa_shard_postfork_child JEMALLOC_N(hpa_shard_postfork_child)
+#define hpa_shard_postfork_parent JEMALLOC_N(hpa_shard_postfork_parent)
+#define hpa_shard_prefork3 JEMALLOC_N(hpa_shard_prefork3)
+#define hpa_shard_prefork4 JEMALLOC_N(hpa_shard_prefork4)
+#define hpa_shard_set_deferral_allowed JEMALLOC_N(hpa_shard_set_deferral_allowed)
+#define hpa_shard_stats_accum JEMALLOC_N(hpa_shard_stats_accum)
+#define hpa_shard_stats_merge JEMALLOC_N(hpa_shard_stats_merge)
+#define hpa_supported JEMALLOC_N(hpa_supported)
+#define hpa_hooks_default JEMALLOC_N(hpa_hooks_default)
+#define hpdata_age_heap_any JEMALLOC_N(hpdata_age_heap_any)
+#define hpdata_age_heap_empty JEMALLOC_N(hpdata_age_heap_empty)
+#define hpdata_age_heap_first JEMALLOC_N(hpdata_age_heap_first)
+#define hpdata_age_heap_insert JEMALLOC_N(hpdata_age_heap_insert)
+#define hpdata_age_heap_new JEMALLOC_N(hpdata_age_heap_new)
+#define hpdata_age_heap_remove JEMALLOC_N(hpdata_age_heap_remove)
+#define hpdata_age_heap_remove_any JEMALLOC_N(hpdata_age_heap_remove_any)
+#define hpdata_age_heap_remove_first JEMALLOC_N(hpdata_age_heap_remove_first)
+#define hpdata_dehugify JEMALLOC_N(hpdata_dehugify)
+#define hpdata_hugify JEMALLOC_N(hpdata_hugify)
+#define hpdata_init JEMALLOC_N(hpdata_init)
+#define hpdata_purge_begin JEMALLOC_N(hpdata_purge_begin)
+#define hpdata_purge_end JEMALLOC_N(hpdata_purge_end)
+#define hpdata_purge_next JEMALLOC_N(hpdata_purge_next)
+#define hpdata_reserve_alloc JEMALLOC_N(hpdata_reserve_alloc)
+#define hpdata_unreserve JEMALLOC_N(hpdata_unreserve)
+#define inspect_extent_util_stats_get JEMALLOC_N(inspect_extent_util_stats_get)
+#define inspect_extent_util_stats_verbose_get JEMALLOC_N(inspect_extent_util_stats_verbose_get)
#define large_dalloc JEMALLOC_N(large_dalloc)
#define large_dalloc_finish JEMALLOC_N(large_dalloc_finish)
-#define large_dalloc_junk JEMALLOC_N(large_dalloc_junk)
-#define large_dalloc_maybe_junk JEMALLOC_N(large_dalloc_maybe_junk)
-#define large_dalloc_prep_junked_locked JEMALLOC_N(large_dalloc_prep_junked_locked)
+#define large_dalloc_prep_locked JEMALLOC_N(large_dalloc_prep_locked)
#define large_malloc JEMALLOC_N(large_malloc)
#define large_palloc JEMALLOC_N(large_palloc)
-#define large_prof_alloc_time_get JEMALLOC_N(large_prof_alloc_time_get)
-#define large_prof_alloc_time_set JEMALLOC_N(large_prof_alloc_time_set)
-#define large_prof_tctx_get JEMALLOC_N(large_prof_tctx_get)
+#define large_prof_info_get JEMALLOC_N(large_prof_info_get)
+#define large_prof_info_set JEMALLOC_N(large_prof_info_set)
#define large_prof_tctx_reset JEMALLOC_N(large_prof_tctx_reset)
-#define large_prof_tctx_set JEMALLOC_N(large_prof_tctx_set)
#define large_ralloc JEMALLOC_N(large_ralloc)
#define large_ralloc_no_move JEMALLOC_N(large_ralloc_no_move)
#define large_salloc JEMALLOC_N(large_salloc)
@@ -248,14 +375,17 @@
#define malloc_vcprintf JEMALLOC_N(malloc_vcprintf)
#define malloc_vsnprintf JEMALLOC_N(malloc_vsnprintf)
#define malloc_write JEMALLOC_N(malloc_write)
+#define wrtmessage JEMALLOC_N(wrtmessage)
+#define wrtmessage_1_0 JEMALLOC_N(wrtmessage_1_0)
#define malloc_mutex_boot JEMALLOC_N(malloc_mutex_boot)
+#define malloc_mutex_first_thread JEMALLOC_N(malloc_mutex_first_thread)
#define malloc_mutex_init JEMALLOC_N(malloc_mutex_init)
#define malloc_mutex_lock_slow JEMALLOC_N(malloc_mutex_lock_slow)
#define malloc_mutex_postfork_child JEMALLOC_N(malloc_mutex_postfork_child)
#define malloc_mutex_postfork_parent JEMALLOC_N(malloc_mutex_postfork_parent)
#define malloc_mutex_prefork JEMALLOC_N(malloc_mutex_prefork)
#define malloc_mutex_prof_data_reset JEMALLOC_N(malloc_mutex_prof_data_reset)
-#define mutex_pool_init JEMALLOC_N(mutex_pool_init)
+#define opt_mutex_max_spin JEMALLOC_N(opt_mutex_max_spin)
#define nstime_add JEMALLOC_N(nstime_add)
#define nstime_compare JEMALLOC_N(nstime_compare)
#define nstime_copy JEMALLOC_N(nstime_copy)
@@ -265,14 +395,56 @@
#define nstime_imultiply JEMALLOC_N(nstime_imultiply)
#define nstime_init JEMALLOC_N(nstime_init)
#define nstime_init2 JEMALLOC_N(nstime_init2)
+#define nstime_init_update JEMALLOC_N(nstime_init_update)
#define nstime_isubtract JEMALLOC_N(nstime_isubtract)
#define nstime_monotonic JEMALLOC_N(nstime_monotonic)
#define nstime_msec JEMALLOC_N(nstime_msec)
#define nstime_ns JEMALLOC_N(nstime_ns)
+#define nstime_ns_since JEMALLOC_N(nstime_ns_since)
#define nstime_nsec JEMALLOC_N(nstime_nsec)
+#define nstime_prof_init_update JEMALLOC_N(nstime_prof_init_update)
+#define nstime_prof_update JEMALLOC_N(nstime_prof_update)
#define nstime_sec JEMALLOC_N(nstime_sec)
#define nstime_subtract JEMALLOC_N(nstime_subtract)
#define nstime_update JEMALLOC_N(nstime_update)
+#define opt_prof_time_res JEMALLOC_N(opt_prof_time_res)
+#define prof_time_res_mode_names JEMALLOC_N(prof_time_res_mode_names)
+#define pa_alloc JEMALLOC_N(pa_alloc)
+#define pa_central_init JEMALLOC_N(pa_central_init)
+#define pa_dalloc JEMALLOC_N(pa_dalloc)
+#define pa_decay_ms_get JEMALLOC_N(pa_decay_ms_get)
+#define pa_decay_ms_set JEMALLOC_N(pa_decay_ms_set)
+#define pa_expand JEMALLOC_N(pa_expand)
+#define pa_shard_destroy JEMALLOC_N(pa_shard_destroy)
+#define pa_shard_disable_hpa JEMALLOC_N(pa_shard_disable_hpa)
+#define pa_shard_do_deferred_work JEMALLOC_N(pa_shard_do_deferred_work)
+#define pa_shard_enable_hpa JEMALLOC_N(pa_shard_enable_hpa)
+#define pa_shard_init JEMALLOC_N(pa_shard_init)
+#define pa_shard_reset JEMALLOC_N(pa_shard_reset)
+#define pa_shard_retain_grow_limit_get_set JEMALLOC_N(pa_shard_retain_grow_limit_get_set)
+#define pa_shard_set_deferral_allowed JEMALLOC_N(pa_shard_set_deferral_allowed)
+#define pa_shard_time_until_deferred_work JEMALLOC_N(pa_shard_time_until_deferred_work)
+#define pa_shrink JEMALLOC_N(pa_shrink)
+#define pa_shard_basic_stats_merge JEMALLOC_N(pa_shard_basic_stats_merge)
+#define pa_shard_mtx_stats_read JEMALLOC_N(pa_shard_mtx_stats_read)
+#define pa_shard_postfork_child JEMALLOC_N(pa_shard_postfork_child)
+#define pa_shard_postfork_parent JEMALLOC_N(pa_shard_postfork_parent)
+#define pa_shard_prefork0 JEMALLOC_N(pa_shard_prefork0)
+#define pa_shard_prefork2 JEMALLOC_N(pa_shard_prefork2)
+#define pa_shard_prefork3 JEMALLOC_N(pa_shard_prefork3)
+#define pa_shard_prefork4 JEMALLOC_N(pa_shard_prefork4)
+#define pa_shard_prefork5 JEMALLOC_N(pa_shard_prefork5)
+#define pa_shard_stats_merge JEMALLOC_N(pa_shard_stats_merge)
+#define pai_alloc_batch_default JEMALLOC_N(pai_alloc_batch_default)
+#define pai_dalloc_batch_default JEMALLOC_N(pai_dalloc_batch_default)
+#define pac_decay_all JEMALLOC_N(pac_decay_all)
+#define pac_decay_ms_get JEMALLOC_N(pac_decay_ms_get)
+#define pac_decay_ms_set JEMALLOC_N(pac_decay_ms_set)
+#define pac_destroy JEMALLOC_N(pac_destroy)
+#define pac_init JEMALLOC_N(pac_init)
+#define pac_maybe_decay_purge JEMALLOC_N(pac_maybe_decay_purge)
+#define pac_reset JEMALLOC_N(pac_reset)
+#define pac_retain_grow_limit_get_set JEMALLOC_N(pac_retain_grow_limit_get_set)
#define init_system_thp_mode JEMALLOC_N(init_system_thp_mode)
#define opt_thp JEMALLOC_N(opt_thp)
#define pages_boot JEMALLOC_N(pages_boot)
@@ -282,14 +454,23 @@
#define pages_dontdump JEMALLOC_N(pages_dontdump)
#define pages_huge JEMALLOC_N(pages_huge)
#define pages_map JEMALLOC_N(pages_map)
+#define pages_mark_guards JEMALLOC_N(pages_mark_guards)
#define pages_nohuge JEMALLOC_N(pages_nohuge)
#define pages_purge_forced JEMALLOC_N(pages_purge_forced)
#define pages_purge_lazy JEMALLOC_N(pages_purge_lazy)
#define pages_set_thp_state JEMALLOC_N(pages_set_thp_state)
#define pages_unmap JEMALLOC_N(pages_unmap)
+#define pages_unmark_guards JEMALLOC_N(pages_unmark_guards)
#define thp_mode_names JEMALLOC_N(thp_mode_names)
-#define bt2gctx_mtx JEMALLOC_N(bt2gctx_mtx)
-#define bt_init JEMALLOC_N(bt_init)
+#define peak_alloc_event_handler JEMALLOC_N(peak_alloc_event_handler)
+#define peak_alloc_new_event_wait JEMALLOC_N(peak_alloc_new_event_wait)
+#define peak_alloc_postponed_event_wait JEMALLOC_N(peak_alloc_postponed_event_wait)
+#define peak_dalloc_event_handler JEMALLOC_N(peak_dalloc_event_handler)
+#define peak_dalloc_new_event_wait JEMALLOC_N(peak_dalloc_new_event_wait)
+#define peak_dalloc_postponed_event_wait JEMALLOC_N(peak_dalloc_postponed_event_wait)
+#define peak_event_max JEMALLOC_N(peak_event_max)
+#define peak_event_update JEMALLOC_N(peak_event_update)
+#define peak_event_zero JEMALLOC_N(peak_event_zero)
#define lg_prof_sample JEMALLOC_N(lg_prof_sample)
#define opt_lg_prof_interval JEMALLOC_N(opt_lg_prof_interval)
#define opt_lg_prof_sample JEMALLOC_N(opt_lg_prof_sample)
@@ -299,20 +480,25 @@
#define opt_prof_final JEMALLOC_N(opt_prof_final)
#define opt_prof_gdump JEMALLOC_N(opt_prof_gdump)
#define opt_prof_leak JEMALLOC_N(opt_prof_leak)
-#define opt_prof_log JEMALLOC_N(opt_prof_log)
+#define opt_prof_leak_error JEMALLOC_N(opt_prof_leak_error)
#define opt_prof_prefix JEMALLOC_N(opt_prof_prefix)
+#define opt_prof_sys_thread_name JEMALLOC_N(opt_prof_sys_thread_name)
#define opt_prof_thread_active_init JEMALLOC_N(opt_prof_thread_active_init)
-#define prof_accum_init JEMALLOC_N(prof_accum_init)
-#define prof_active JEMALLOC_N(prof_active)
+#define opt_prof_unbias JEMALLOC_N(opt_prof_unbias)
#define prof_active_get JEMALLOC_N(prof_active_get)
#define prof_active_set JEMALLOC_N(prof_active_set)
+#define prof_active_state JEMALLOC_N(prof_active_state)
#define prof_alloc_rollback JEMALLOC_N(prof_alloc_rollback)
-#define prof_backtrace JEMALLOC_N(prof_backtrace)
+#define prof_backtrace_hook JEMALLOC_N(prof_backtrace_hook)
+#define prof_backtrace_hook_get JEMALLOC_N(prof_backtrace_hook_get)
+#define prof_backtrace_hook_set JEMALLOC_N(prof_backtrace_hook_set)
#define prof_boot0 JEMALLOC_N(prof_boot0)
#define prof_boot1 JEMALLOC_N(prof_boot1)
#define prof_boot2 JEMALLOC_N(prof_boot2)
-#define prof_dump_header JEMALLOC_N(prof_dump_header)
-#define prof_dump_open JEMALLOC_N(prof_dump_open)
+#define prof_booted JEMALLOC_N(prof_booted)
+#define prof_dump_hook JEMALLOC_N(prof_dump_hook)
+#define prof_dump_hook_get JEMALLOC_N(prof_dump_hook_get)
+#define prof_dump_hook_set JEMALLOC_N(prof_dump_hook_set)
#define prof_free_sampled_object JEMALLOC_N(prof_free_sampled_object)
#define prof_gdump JEMALLOC_N(prof_gdump)
#define prof_gdump_get JEMALLOC_N(prof_gdump_get)
@@ -320,18 +506,16 @@
#define prof_gdump_val JEMALLOC_N(prof_gdump_val)
#define prof_idump JEMALLOC_N(prof_idump)
#define prof_interval JEMALLOC_N(prof_interval)
-#define prof_log_start JEMALLOC_N(prof_log_start)
-#define prof_log_stop JEMALLOC_N(prof_log_stop)
-#define prof_logging_state JEMALLOC_N(prof_logging_state)
-#define prof_lookup JEMALLOC_N(prof_lookup)
#define prof_malloc_sample_object JEMALLOC_N(prof_malloc_sample_object)
#define prof_mdump JEMALLOC_N(prof_mdump)
#define prof_postfork_child JEMALLOC_N(prof_postfork_child)
#define prof_postfork_parent JEMALLOC_N(prof_postfork_parent)
#define prof_prefork0 JEMALLOC_N(prof_prefork0)
#define prof_prefork1 JEMALLOC_N(prof_prefork1)
-#define prof_reset JEMALLOC_N(prof_reset)
-#define prof_sample_threshold_update JEMALLOC_N(prof_sample_threshold_update)
+#define prof_sample_event_handler JEMALLOC_N(prof_sample_event_handler)
+#define prof_sample_new_event_wait JEMALLOC_N(prof_sample_new_event_wait)
+#define prof_sample_postponed_event_wait JEMALLOC_N(prof_sample_postponed_event_wait)
+#define prof_tctx_create JEMALLOC_N(prof_tctx_create)
#define prof_tdata_cleanup JEMALLOC_N(prof_tdata_cleanup)
#define prof_tdata_init JEMALLOC_N(prof_tdata_init)
#define prof_tdata_reinit JEMALLOC_N(prof_tdata_reinit)
@@ -341,42 +525,157 @@
#define prof_thread_active_set JEMALLOC_N(prof_thread_active_set)
#define prof_thread_name_get JEMALLOC_N(prof_thread_name_get)
#define prof_thread_name_set JEMALLOC_N(prof_thread_name_set)
+#define bt2gctx_mtx JEMALLOC_N(bt2gctx_mtx)
+#define gctx_locks JEMALLOC_N(gctx_locks)
+#define prof_bt_count JEMALLOC_N(prof_bt_count)
+#define prof_bt_hash JEMALLOC_N(prof_bt_hash)
+#define prof_bt_keycomp JEMALLOC_N(prof_bt_keycomp)
+#define prof_cnt_all JEMALLOC_N(prof_cnt_all)
+#define prof_data_init JEMALLOC_N(prof_data_init)
+#define prof_dump_impl JEMALLOC_N(prof_dump_impl)
+#define prof_dump_mtx JEMALLOC_N(prof_dump_mtx)
+#define prof_lookup JEMALLOC_N(prof_lookup)
+#define prof_reset JEMALLOC_N(prof_reset)
+#define prof_shifted_unbiased_cnt JEMALLOC_N(prof_shifted_unbiased_cnt)
+#define prof_tctx_try_destroy JEMALLOC_N(prof_tctx_try_destroy)
+#define prof_tdata_count JEMALLOC_N(prof_tdata_count)
+#define prof_tdata_detach JEMALLOC_N(prof_tdata_detach)
+#define prof_tdata_init_impl JEMALLOC_N(prof_tdata_init_impl)
+#define prof_thread_name_alloc JEMALLOC_N(prof_thread_name_alloc)
+#define prof_thread_name_set_impl JEMALLOC_N(prof_thread_name_set_impl)
+#define prof_unbias_map_init JEMALLOC_N(prof_unbias_map_init)
+#define prof_unbiased_sz JEMALLOC_N(prof_unbiased_sz)
+#define tdata_locks JEMALLOC_N(tdata_locks)
+#define tdatas_mtx JEMALLOC_N(tdatas_mtx)
+#define log_mtx JEMALLOC_N(log_mtx)
+#define opt_prof_log JEMALLOC_N(opt_prof_log)
+#define prof_log_alloc_count JEMALLOC_N(prof_log_alloc_count)
+#define prof_log_bt_count JEMALLOC_N(prof_log_bt_count)
+#define prof_log_dummy_set JEMALLOC_N(prof_log_dummy_set)
+#define prof_log_init JEMALLOC_N(prof_log_init)
+#define prof_log_is_logging JEMALLOC_N(prof_log_is_logging)
+#define prof_log_rep_check JEMALLOC_N(prof_log_rep_check)
+#define prof_log_start JEMALLOC_N(prof_log_start)
+#define prof_log_stop JEMALLOC_N(prof_log_stop)
+#define prof_log_thr_count JEMALLOC_N(prof_log_thr_count)
+#define prof_logging_state JEMALLOC_N(prof_logging_state)
+#define prof_try_log JEMALLOC_N(prof_try_log)
+#define edata_prof_recent_alloc_get_no_lock_test JEMALLOC_N(edata_prof_recent_alloc_get_no_lock_test)
+#define edata_prof_recent_alloc_init JEMALLOC_N(edata_prof_recent_alloc_init)
+#define opt_prof_recent_alloc_max JEMALLOC_N(opt_prof_recent_alloc_max)
+#define prof_recent_alloc JEMALLOC_N(prof_recent_alloc)
+#define prof_recent_alloc_dump JEMALLOC_N(prof_recent_alloc_dump)
+#define prof_recent_alloc_edata_get_no_lock_test JEMALLOC_N(prof_recent_alloc_edata_get_no_lock_test)
+#define prof_recent_alloc_list JEMALLOC_N(prof_recent_alloc_list)
+#define prof_recent_alloc_max_ctl_read JEMALLOC_N(prof_recent_alloc_max_ctl_read)
+#define prof_recent_alloc_max_ctl_write JEMALLOC_N(prof_recent_alloc_max_ctl_write)
+#define prof_recent_alloc_mtx JEMALLOC_N(prof_recent_alloc_mtx)
+#define prof_recent_alloc_prepare JEMALLOC_N(prof_recent_alloc_prepare)
+#define prof_recent_alloc_reset JEMALLOC_N(prof_recent_alloc_reset)
+#define prof_recent_dump_mtx JEMALLOC_N(prof_recent_dump_mtx)
+#define prof_recent_init JEMALLOC_N(prof_recent_init)
+#define opt_prof_stats JEMALLOC_N(opt_prof_stats)
+#define prof_stats_dec JEMALLOC_N(prof_stats_dec)
+#define prof_stats_get_accum JEMALLOC_N(prof_stats_get_accum)
+#define prof_stats_get_live JEMALLOC_N(prof_stats_get_live)
+#define prof_stats_inc JEMALLOC_N(prof_stats_inc)
+#define prof_stats_mtx JEMALLOC_N(prof_stats_mtx)
+#define bt_init JEMALLOC_N(bt_init)
+#define prof_backtrace JEMALLOC_N(prof_backtrace)
+#define prof_base JEMALLOC_N(prof_base)
+#define prof_do_mock JEMALLOC_N(prof_do_mock)
+#define prof_dump_filename_mtx JEMALLOC_N(prof_dump_filename_mtx)
+#define prof_dump_open_file JEMALLOC_N(prof_dump_open_file)
+#define prof_dump_open_maps JEMALLOC_N(prof_dump_open_maps)
+#define prof_dump_write_file JEMALLOC_N(prof_dump_write_file)
+#define prof_fdump_impl JEMALLOC_N(prof_fdump_impl)
+#define prof_gdump_impl JEMALLOC_N(prof_gdump_impl)
+#define prof_get_default_filename JEMALLOC_N(prof_get_default_filename)
+#define prof_getpid JEMALLOC_N(prof_getpid)
+#define prof_hooks_init JEMALLOC_N(prof_hooks_init)
+#define prof_idump_impl JEMALLOC_N(prof_idump_impl)
+#define prof_mdump_impl JEMALLOC_N(prof_mdump_impl)
+#define prof_prefix_set JEMALLOC_N(prof_prefix_set)
+#define prof_sys_thread_name_fetch JEMALLOC_N(prof_sys_thread_name_fetch)
+#define prof_sys_thread_name_read JEMALLOC_N(prof_sys_thread_name_read)
+#define prof_unwind_init JEMALLOC_N(prof_unwind_init)
+#define psset_init JEMALLOC_N(psset_init)
+#define psset_insert JEMALLOC_N(psset_insert)
+#define psset_pick_alloc JEMALLOC_N(psset_pick_alloc)
+#define psset_pick_hugify JEMALLOC_N(psset_pick_hugify)
+#define psset_pick_purge JEMALLOC_N(psset_pick_purge)
+#define psset_remove JEMALLOC_N(psset_remove)
+#define psset_stats_accum JEMALLOC_N(psset_stats_accum)
+#define psset_update_begin JEMALLOC_N(psset_update_begin)
+#define psset_update_end JEMALLOC_N(psset_update_end)
#define rtree_ctx_data_init JEMALLOC_N(rtree_ctx_data_init)
-#define rtree_leaf_alloc JEMALLOC_N(rtree_leaf_alloc)
-#define rtree_leaf_dalloc JEMALLOC_N(rtree_leaf_dalloc)
#define rtree_leaf_elm_lookup_hard JEMALLOC_N(rtree_leaf_elm_lookup_hard)
#define rtree_new JEMALLOC_N(rtree_new)
-#define rtree_node_alloc JEMALLOC_N(rtree_node_alloc)
-#define rtree_node_dalloc JEMALLOC_N(rtree_node_dalloc)
#define safety_check_fail JEMALLOC_N(safety_check_fail)
+#define safety_check_fail_sized_dealloc JEMALLOC_N(safety_check_fail_sized_dealloc)
#define safety_check_set_abort JEMALLOC_N(safety_check_set_abort)
+#define reg_size_compute JEMALLOC_N(reg_size_compute)
+#define sc_boot JEMALLOC_N(sc_boot)
+#define sc_data_init JEMALLOC_N(sc_data_init)
+#define sc_data_update_slab_size JEMALLOC_N(sc_data_update_slab_size)
+#define sec_disable JEMALLOC_N(sec_disable)
+#define sec_flush JEMALLOC_N(sec_flush)
+#define sec_init JEMALLOC_N(sec_init)
+#define sec_mutex_stats_read JEMALLOC_N(sec_mutex_stats_read)
+#define sec_postfork_child JEMALLOC_N(sec_postfork_child)
+#define sec_postfork_parent JEMALLOC_N(sec_postfork_parent)
+#define sec_prefork2 JEMALLOC_N(sec_prefork2)
+#define sec_stats_merge JEMALLOC_N(sec_stats_merge)
#define arena_mutex_names JEMALLOC_N(arena_mutex_names)
#define global_mutex_names JEMALLOC_N(global_mutex_names)
+#define opt_stats_interval JEMALLOC_N(opt_stats_interval)
+#define opt_stats_interval_opts JEMALLOC_N(opt_stats_interval_opts)
#define opt_stats_print JEMALLOC_N(opt_stats_print)
#define opt_stats_print_opts JEMALLOC_N(opt_stats_print_opts)
+#define stats_boot JEMALLOC_N(stats_boot)
+#define stats_interval_event_handler JEMALLOC_N(stats_interval_event_handler)
+#define stats_interval_new_event_wait JEMALLOC_N(stats_interval_new_event_wait)
+#define stats_interval_postponed_event_wait JEMALLOC_N(stats_interval_postponed_event_wait)
+#define stats_postfork_child JEMALLOC_N(stats_postfork_child)
+#define stats_postfork_parent JEMALLOC_N(stats_postfork_parent)
+#define stats_prefork JEMALLOC_N(stats_prefork)
#define stats_print JEMALLOC_N(stats_print)
-#define sc_boot JEMALLOC_N(sc_boot)
-#define sc_data_global JEMALLOC_N(sc_data_global)
-#define sc_data_init JEMALLOC_N(sc_data_init)
-#define sc_data_update_slab_size JEMALLOC_N(sc_data_update_slab_size)
#define sz_boot JEMALLOC_N(sz_boot)
#define sz_index2size_tab JEMALLOC_N(sz_index2size_tab)
+#define sz_large_pad JEMALLOC_N(sz_large_pad)
#define sz_pind2sz_tab JEMALLOC_N(sz_pind2sz_tab)
+#define sz_psz_quantize_ceil JEMALLOC_N(sz_psz_quantize_ceil)
+#define sz_psz_quantize_floor JEMALLOC_N(sz_psz_quantize_floor)
#define sz_size2index_tab JEMALLOC_N(sz_size2index_tab)
#define nhbins JEMALLOC_N(nhbins)
-#define opt_lg_tcache_max JEMALLOC_N(opt_lg_tcache_max)
+#define opt_lg_tcache_flush_large_div JEMALLOC_N(opt_lg_tcache_flush_large_div)
+#define opt_lg_tcache_flush_small_div JEMALLOC_N(opt_lg_tcache_flush_small_div)
+#define opt_lg_tcache_nslots_mul JEMALLOC_N(opt_lg_tcache_nslots_mul)
#define opt_tcache JEMALLOC_N(opt_tcache)
+#define opt_tcache_gc_delay_bytes JEMALLOC_N(opt_tcache_gc_delay_bytes)
+#define opt_tcache_gc_incr_bytes JEMALLOC_N(opt_tcache_gc_incr_bytes)
+#define opt_tcache_max JEMALLOC_N(opt_tcache_max)
+#define opt_tcache_nslots_large JEMALLOC_N(opt_tcache_nslots_large)
+#define opt_tcache_nslots_small_max JEMALLOC_N(opt_tcache_nslots_small_max)
+#define opt_tcache_nslots_small_min JEMALLOC_N(opt_tcache_nslots_small_min)
#define tcache_alloc_small_hard JEMALLOC_N(tcache_alloc_small_hard)
#define tcache_arena_associate JEMALLOC_N(tcache_arena_associate)
#define tcache_arena_reassociate JEMALLOC_N(tcache_arena_reassociate)
+#define tcache_assert_initialized JEMALLOC_N(tcache_assert_initialized)
#define tcache_bin_flush_large JEMALLOC_N(tcache_bin_flush_large)
#define tcache_bin_flush_small JEMALLOC_N(tcache_bin_flush_small)
+#define tcache_bin_flush_stashed JEMALLOC_N(tcache_bin_flush_stashed)
#define tcache_bin_info JEMALLOC_N(tcache_bin_info)
#define tcache_boot JEMALLOC_N(tcache_boot)
#define tcache_cleanup JEMALLOC_N(tcache_cleanup)
#define tcache_create_explicit JEMALLOC_N(tcache_create_explicit)
-#define tcache_event_hard JEMALLOC_N(tcache_event_hard)
#define tcache_flush JEMALLOC_N(tcache_flush)
+#define tcache_gc_dalloc_event_handler JEMALLOC_N(tcache_gc_dalloc_event_handler)
+#define tcache_gc_dalloc_new_event_wait JEMALLOC_N(tcache_gc_dalloc_new_event_wait)
+#define tcache_gc_dalloc_postponed_event_wait JEMALLOC_N(tcache_gc_dalloc_postponed_event_wait)
+#define tcache_gc_event_handler JEMALLOC_N(tcache_gc_event_handler)
+#define tcache_gc_new_event_wait JEMALLOC_N(tcache_gc_new_event_wait)
+#define tcache_gc_postponed_event_wait JEMALLOC_N(tcache_gc_postponed_event_wait)
#define tcache_maxclass JEMALLOC_N(tcache_maxclass)
#define tcache_postfork_child JEMALLOC_N(tcache_postfork_child)
#define tcache_postfork_parent JEMALLOC_N(tcache_postfork_parent)
@@ -391,9 +690,13 @@
#define tsd_tcache_enabled_data_init JEMALLOC_N(tsd_tcache_enabled_data_init)
#define test_hooks_arena_new_hook JEMALLOC_N(test_hooks_arena_new_hook)
#define test_hooks_libc_hook JEMALLOC_N(test_hooks_libc_hook)
+#define te_assert_invariants_debug JEMALLOC_N(te_assert_invariants_debug)
+#define te_event_trigger JEMALLOC_N(te_event_trigger)
+#define te_recompute_fast_threshold JEMALLOC_N(te_recompute_fast_threshold)
+#define tsd_te_init JEMALLOC_N(tsd_te_init)
+#define ticker_geom_table JEMALLOC_N(ticker_geom_table)
#define malloc_tsd_boot0 JEMALLOC_N(malloc_tsd_boot0)
#define malloc_tsd_boot1 JEMALLOC_N(malloc_tsd_boot1)
-#define malloc_tsd_cleanup_register JEMALLOC_N(malloc_tsd_cleanup_register)
#define malloc_tsd_dalloc JEMALLOC_N(malloc_tsd_dalloc)
#define malloc_tsd_malloc JEMALLOC_N(malloc_tsd_malloc)
#define tsd_booted JEMALLOC_N(tsd_booted)
diff --git a/lib/libc/stdlib/malloc/jemalloc/include/jemalloc/internal/public_namespace.h b/lib/libc/stdlib/malloc/jemalloc/include/jemalloc/internal/public_namespace.h
index 78d5c66377c3..6363f085f34b 100644
--- a/lib/libc/stdlib/malloc/jemalloc/include/jemalloc/internal/public_namespace.h
+++ b/lib/libc/stdlib/malloc/jemalloc/include/jemalloc/internal/public_namespace.h
@@ -7,11 +7,12 @@
#define je_mallctlnametomib JEMALLOC_N(mallctlnametomib)
#define je_malloc JEMALLOC_N(malloc)
#define je_malloc_conf JEMALLOC_N(malloc_conf)
+#define je_malloc_conf_2_conf_harder JEMALLOC_N(malloc_conf_2_conf_harder)
#define je_malloc_message JEMALLOC_N(malloc_message)
#define je_malloc_stats_print JEMALLOC_N(malloc_stats_print)
#define je_malloc_usable_size JEMALLOC_N(malloc_usable_size)
#define je_mallocx JEMALLOC_N(mallocx)
-#define je_smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756 JEMALLOC_N(smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756)
+#define je_smallocx_54eaed1d8b56b1aa528be3bdd1877e59c56fa90c JEMALLOC_N(smallocx_54eaed1d8b56b1aa528be3bdd1877e59c56fa90c)
#define je_nallocx JEMALLOC_N(nallocx)
#define je_posix_memalign JEMALLOC_N(posix_memalign)
#define je_rallocx JEMALLOC_N(rallocx)
@@ -19,4 +20,5 @@
#define je_sallocx JEMALLOC_N(sallocx)
#define je_sdallocx JEMALLOC_N(sdallocx)
#define je_xallocx JEMALLOC_N(xallocx)
+#define je_memalign JEMALLOC_N(memalign)
#define je_valloc JEMALLOC_N(valloc)
diff --git a/lib/libc/stdlib/malloc/jemalloc/include/jemalloc/jemalloc.h b/lib/libc/stdlib/malloc/jemalloc/include/jemalloc/jemalloc.h
index c3fa08e83f22..f9ed38656a87 100644
--- a/lib/libc/stdlib/malloc/jemalloc/include/jemalloc/jemalloc.h
+++ b/lib/libc/stdlib/malloc/jemalloc/include/jemalloc/jemalloc.h
@@ -19,6 +19,12 @@ extern "C" {
/* Defined if format(printf, ...) attribute is supported. */
#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF
+/* Defined if fallthrough attribute is supported. */
+#define JEMALLOC_HAVE_ATTR_FALLTHROUGH
+
+/* Defined if cold attribute is supported. */
+#define JEMALLOC_HAVE_ATTR_COLD
+
/*
* Define overrides for non-standard allocator-related functions if they are
* present on the system.
@@ -68,11 +74,12 @@ extern "C" {
# define je_mallctlnametomib mallctlnametomib
# define je_malloc malloc
# define je_malloc_conf malloc_conf
+# define je_malloc_conf_2_conf_harder malloc_conf_2_conf_harder
# define je_malloc_message malloc_message
# define je_malloc_stats_print malloc_stats_print
# define je_malloc_usable_size malloc_usable_size
# define je_mallocx mallocx
-# define je_smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756 smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756
+# define je_smallocx_54eaed1d8b56b1aa528be3bdd1877e59c56fa90c smallocx_54eaed1d8b56b1aa528be3bdd1877e59c56fa90c
# define je_nallocx nallocx
# define je_posix_memalign posix_memalign
# define je_rallocx rallocx
@@ -80,6 +87,7 @@ extern "C" {
# define je_sallocx sallocx
# define je_sdallocx sdallocx
# define je_xallocx xallocx
+# define je_memalign memalign
# define je_valloc valloc
#endif
@@ -91,13 +99,13 @@ extern "C" {
#include <limits.h>
#include <strings.h>
-#define JEMALLOC_VERSION "5.2.1-0-gea6b3e973b477b8061e0076bb257dbd7f3faa756"
+#define JEMALLOC_VERSION "5.3.0-0-g54eaed1d8b56b1aa528be3bdd1877e59c56fa90c"
#define JEMALLOC_VERSION_MAJOR 5
-#define JEMALLOC_VERSION_MINOR 2
-#define JEMALLOC_VERSION_BUGFIX 1
+#define JEMALLOC_VERSION_MINOR 3
+#define JEMALLOC_VERSION_BUGFIX 0
#define JEMALLOC_VERSION_NREV 0
-#define JEMALLOC_VERSION_GID "ea6b3e973b477b8061e0076bb257dbd7f3faa756"
-#define JEMALLOC_VERSION_GID_IDENT ea6b3e973b477b8061e0076bb257dbd7f3faa756
+#define JEMALLOC_VERSION_GID "54eaed1d8b56b1aa528be3bdd1877e59c56fa90c"
+#define JEMALLOC_VERSION_GID_IDENT 54eaed1d8b56b1aa528be3bdd1877e59c56fa90c
#define MALLOCX_LG_ALIGN(la) ((int)(la))
#if LG_SIZEOF_PTR == 2
@@ -158,6 +166,7 @@ extern "C" {
# endif
# define JEMALLOC_FORMAT_ARG(i)
# define JEMALLOC_FORMAT_PRINTF(s, i)
+# define JEMALLOC_FALLTHROUGH
# define JEMALLOC_NOINLINE __declspec(noinline)
# ifdef __cplusplus
# define JEMALLOC_NOTHROW __declspec(nothrow)
@@ -171,6 +180,7 @@ extern "C" {
# else
# define JEMALLOC_ALLOCATOR
# endif
+# define JEMALLOC_COLD
#elif defined(JEMALLOC_HAVE_ATTR)
# define JEMALLOC_ATTR(s) __attribute__((s))
# define JEMALLOC_ALIGNED(s) JEMALLOC_ATTR(aligned(s))
@@ -196,11 +206,21 @@ extern "C" {
# else
# define JEMALLOC_FORMAT_PRINTF(s, i)
# endif
+# ifdef JEMALLOC_HAVE_ATTR_FALLTHROUGH
+# define JEMALLOC_FALLTHROUGH JEMALLOC_ATTR(fallthrough)
+# else
+# define JEMALLOC_FALLTHROUGH
+# endif
# define JEMALLOC_NOINLINE JEMALLOC_ATTR(noinline)
# define JEMALLOC_NOTHROW JEMALLOC_ATTR(nothrow)
# define JEMALLOC_SECTION(s) JEMALLOC_ATTR(section(s))
# define JEMALLOC_RESTRICT_RETURN
# define JEMALLOC_ALLOCATOR
+# ifdef JEMALLOC_HAVE_ATTR_COLD
+# define JEMALLOC_COLD JEMALLOC_ATTR(__cold__)
+# else
+# define JEMALLOC_COLD
+# endif
#else
# define JEMALLOC_ATTR(s)
# define JEMALLOC_ALIGNED(s)
@@ -208,11 +228,19 @@ extern "C" {
# define JEMALLOC_ALLOC_SIZE2(s1, s2)
# define JEMALLOC_EXPORT
# define JEMALLOC_FORMAT_PRINTF(s, i)
+# define JEMALLOC_FALLTHROUGH
# define JEMALLOC_NOINLINE
# define JEMALLOC_NOTHROW
# define JEMALLOC_SECTION(s)
# define JEMALLOC_RESTRICT_RETURN
# define JEMALLOC_ALLOCATOR
+# define JEMALLOC_COLD
+#endif
+
+#if (defined(__APPLE__) || defined(__FreeBSD__)) && !defined(JEMALLOC_NO_RENAME)
+# define JEMALLOC_SYS_NOTHROW
+#else
+# define JEMALLOC_SYS_NOTHROW JEMALLOC_NOTHROW
#endif
/*
@@ -225,21 +253,22 @@ extern JEMALLOC_EXPORT void (*je_malloc_message)(void *cbopaque,
const char *s);
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_NOTHROW *je_malloc(size_t size)
+ void JEMALLOC_SYS_NOTHROW *je_malloc(size_t size)
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1);
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_NOTHROW *je_calloc(size_t num, size_t size)
+ void JEMALLOC_SYS_NOTHROW *je_calloc(size_t num, size_t size)
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2);
-JEMALLOC_EXPORT int JEMALLOC_NOTHROW je_posix_memalign(void **memptr,
- size_t alignment, size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(nonnull(1));
+JEMALLOC_EXPORT int JEMALLOC_SYS_NOTHROW je_posix_memalign(
+ void **memptr, size_t alignment, size_t size) JEMALLOC_CXX_THROW
+ JEMALLOC_ATTR(nonnull(1));
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_NOTHROW *je_aligned_alloc(size_t alignment,
+ void JEMALLOC_SYS_NOTHROW *je_aligned_alloc(size_t alignment,
size_t size) JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc)
JEMALLOC_ALLOC_SIZE(2);
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_NOTHROW *je_realloc(void *ptr, size_t size)
+ void JEMALLOC_SYS_NOTHROW *je_realloc(void *ptr, size_t size)
JEMALLOC_CXX_THROW JEMALLOC_ALLOC_SIZE(2);
-JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_free(void *ptr)
+JEMALLOC_EXPORT void JEMALLOC_SYS_NOTHROW je_free(void *ptr)
JEMALLOC_CXX_THROW;
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
@@ -269,16 +298,20 @@ JEMALLOC_EXPORT void JEMALLOC_NOTHROW je_malloc_stats_print(
const char *opts);
JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_usable_size(
JEMALLOC_USABLE_SIZE_CONST void *ptr) JEMALLOC_CXX_THROW;
+#ifdef JEMALLOC_HAVE_MALLOC_SIZE
+JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW je_malloc_size(
+ const void *ptr);
+#endif
#ifdef JEMALLOC_OVERRIDE_MEMALIGN
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_NOTHROW *je_memalign(size_t alignment, size_t size)
+ void JEMALLOC_SYS_NOTHROW *je_memalign(size_t alignment, size_t size)
JEMALLOC_CXX_THROW JEMALLOC_ATTR(malloc);
#endif
#ifdef JEMALLOC_OVERRIDE_VALLOC
JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
- void JEMALLOC_NOTHROW *je_valloc(size_t size) JEMALLOC_CXX_THROW
+ void JEMALLOC_SYS_NOTHROW *je_valloc(size_t size) JEMALLOC_CXX_THROW
JEMALLOC_ATTR(malloc);
#endif
@@ -380,11 +413,12 @@ struct extent_hooks_s {
# define mallctlnametomib je_mallctlnametomib
# define malloc je_malloc
# define malloc_conf je_malloc_conf
+# define malloc_conf_2_conf_harder je_malloc_conf_2_conf_harder
# define malloc_message je_malloc_message
# define malloc_stats_print je_malloc_stats_print
# define malloc_usable_size je_malloc_usable_size
# define mallocx je_mallocx
-# define smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756 je_smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756
+# define smallocx_54eaed1d8b56b1aa528be3bdd1877e59c56fa90c je_smallocx_54eaed1d8b56b1aa528be3bdd1877e59c56fa90c
# define nallocx je_nallocx
# define posix_memalign je_posix_memalign
# define rallocx je_rallocx
@@ -412,11 +446,12 @@ struct extent_hooks_s {
# undef je_mallctlnametomib
# undef je_malloc
# undef je_malloc_conf
+# undef je_malloc_conf_2_conf_harder
# undef je_malloc_message
# undef je_malloc_stats_print
# undef je_malloc_usable_size
# undef je_mallocx
-# undef je_smallocx_ea6b3e973b477b8061e0076bb257dbd7f3faa756
+# undef je_smallocx_54eaed1d8b56b1aa528be3bdd1877e59c56fa90c
# undef je_nallocx
# undef je_posix_memalign
# undef je_rallocx
diff --git a/lib/libc/stdlib/malloc/jemalloc/include/jemalloc/jemalloc_FreeBSD.h b/lib/libc/stdlib/malloc/jemalloc/include/jemalloc/jemalloc_FreeBSD.h
index e733906bc146..34c86271ab2e 100644
--- a/lib/libc/stdlib/malloc/jemalloc/include/jemalloc/jemalloc_FreeBSD.h
+++ b/lib/libc/stdlib/malloc/jemalloc/include/jemalloc/jemalloc_FreeBSD.h
@@ -2,6 +2,8 @@
* Override settings that were generated in jemalloc_defs.h as necessary.
*/
+#ifndef JEMALLOC_NO_PRIVATE_NAMESPACE
+
#undef JEMALLOC_OVERRIDE_VALLOC
#ifndef MALLOC_PRODUCTION
@@ -96,8 +98,12 @@
#include <machine/cpufunc.h>
#define CPU_SPINWAIT cpu_spinwait()
-/* Disable lazy-lock machinery, mangle isthreaded, and adjust its type. */
+/*
+ * Disable lazy-lock machinery, redirect isthreaded to libc's flag, undo
+ * jemalloc's namespace stuff for it and adjust its type.
+ */
#undef JEMALLOC_LAZY_LOCK
+#undef isthreaded
extern int __isthreaded;
#define isthreaded ((bool)__isthreaded)
@@ -197,3 +203,5 @@ __sym_compat(sallocm, weak_sallocm, FBSD_1.3);
__sym_compat(dallocm, weak_dallocm, FBSD_1.3);
__sym_compat(nallocm, weak_nallocm, FBSD_1.3);
#endif
+
+#endif
diff --git a/lib/libc/stdlib/malloc/jemalloc/include/jemalloc/jemalloc_defs.h b/lib/libc/stdlib/malloc/jemalloc/include/jemalloc/jemalloc_defs.h
new file mode 100644
index 000000000000..fb104aaeec98
--- /dev/null
+++ b/lib/libc/stdlib/malloc/jemalloc/include/jemalloc/jemalloc_defs.h
@@ -0,0 +1,55 @@
+/* include/jemalloc/jemalloc_defs.h. Generated from jemalloc_defs.h.in by configure. */
+/* Defined if __attribute__((...)) syntax is supported. */
+#define JEMALLOC_HAVE_ATTR
+
+/* Defined if alloc_size attribute is supported. */
+#define JEMALLOC_HAVE_ATTR_ALLOC_SIZE
+
+/* Defined if format_arg(...) attribute is supported. */
+#define JEMALLOC_HAVE_ATTR_FORMAT_ARG
+
+/* Defined if format(gnu_printf, ...) attribute is supported. */
+#define JEMALLOC_HAVE_ATTR_FORMAT_GNU_PRINTF
+
+/* Defined if format(printf, ...) attribute is supported. */
+#define JEMALLOC_HAVE_ATTR_FORMAT_PRINTF
+
+/* Defined if fallthrough attribute is supported. */
+#define JEMALLOC_HAVE_ATTR_FALLTHROUGH
+
+/* Defined if cold attribute is supported. */
+#define JEMALLOC_HAVE_ATTR_COLD
+
+/*
+ * Define overrides for non-standard allocator-related functions if they are
+ * present on the system.
+ */
+#define JEMALLOC_OVERRIDE_MEMALIGN
+#define JEMALLOC_OVERRIDE_VALLOC
+
+/*
+ * At least Linux omits the "const" in:
+ *
+ * size_t malloc_usable_size(const void *ptr);
+ *
+ * Match the operating system's prototype.
+ */
+#define JEMALLOC_USABLE_SIZE_CONST const
+
+/*
+ * If defined, specify throw() for the public function prototypes when compiling
+ * with C++. The only justification for this is to match the prototypes that
+ * glibc defines.
+ */
+/* #undef JEMALLOC_USE_CXX_THROW */
+
+#ifdef _MSC_VER
+# ifdef _WIN64
+# define LG_SIZEOF_PTR_WIN 3
+# else
+# define LG_SIZEOF_PTR_WIN 2
+# endif
+#endif
+
+/* sizeof(void *) == 2^LG_SIZEOF_PTR. */
+#define LG_SIZEOF_PTR 3
diff --git a/lib/libc/tests/stdtime/detect_tz_changes_test.c b/lib/libc/tests/stdtime/detect_tz_changes_test.c
index 9722546747fd..75f55bdede04 100644
--- a/lib/libc/tests/stdtime/detect_tz_changes_test.c
+++ b/lib/libc/tests/stdtime/detect_tz_changes_test.c
@@ -20,6 +20,26 @@
#include <atf-c.h>
+static const struct tzcase {
+ const char *tzfn;
+ const char *expect;
+} tzcases[] = {
+ /*
+ * A handful of time zones and the expected result of
+ * strftime("%z (%Z)", tm) when that time zone is active
+ * and tm represents a date in the summer of 2025.
+ */
+ { "America/Vancouver", "-0700 (PDT)" },
+ { "America/New_York", "-0400 (EDT)" },
+ { "Europe/London", "+0100 (BST)" },
+ { "Europe/Paris", "+0200 (CEST)" },
+ { "Asia/Kolkata", "+0530 (IST)" },
+ { "Asia/Tokyo", "+0900 (JST)" },
+ { "Australia/Canberra", "+1000 (AEST)" },
+ { "UTC", "+0000 (UTC)" },
+ { 0 },
+};
+
static const time_t then = 1751328000; /* 2025-07-01 00:00:00 UTC */
static const char *tz_change_interval_sym = "__tz_change_interval";
static int *tz_change_interval_p;
@@ -91,25 +111,6 @@ ATF_TC_HEAD(detect_tz_changes, tc)
}
ATF_TC_BODY(detect_tz_changes, tc)
{
- static const struct tzcase {
- const char *tzfn;
- const char *expect;
- } tzcases[] = {
- /*
- * A handful of time zones and the expected result of
- * strftime("%z (%Z)", tm) when that time zone is active
- * and tm represents a date in the summer of 2025.
- */
- { "America/Vancouver", "-0700 (PDT)" },
- { "America/New_York", "-0400 (EDT)" },
- { "Europe/London", "+0100 (BST)" },
- { "Europe/Paris", "+0200 (CEST)" },
- { "Asia/Kolkata", "+0530 (IST)" },
- { "Asia/Tokyo", "+0900 (JST)" },
- { "Australia/Canberra", "+1000 (AEST)" },
- { "UTC", "+0000 (UTC)" },
- { 0 },
- };
char obuf[1024] = "";
char ebuf[1024] = "";
struct pollfd fds[3];
@@ -272,10 +273,32 @@ ATF_TC_BODY(detect_tz_changes, tc)
ATF_REQUIRE_EQ(0, WEXITSTATUS(status));
}
+ATF_TC(tz_env);
+ATF_TC_HEAD(tz_env, tc)
+{
+ atf_tc_set_md_var(tc, "descr", "Test TZ environment variable");
+}
+ATF_TC_BODY(tz_env, tc)
+{
+ char buf[128];
+ const struct tzcase *tzcase = NULL;
+ struct tm *tm;
+ size_t len;
+
+ for (tzcase = tzcases; tzcase->tzfn != NULL; tzcase++) {
+ setenv("TZ", tzcase->tzfn, 1);
+ ATF_REQUIRE((tm = localtime(&then)) != NULL);
+ len = strftime(buf, sizeof(buf), "%z (%Z)", tm);
+ ATF_REQUIRE(len > 0);
+ ATF_REQUIRE_STREQ(tzcase->expect, buf);
+ }
+}
+
ATF_TP_ADD_TCS(tp)
{
debugging = !getenv("__RUNNING_INSIDE_ATF_RUN") &&
isatty(STDERR_FILENO);
ATF_TP_ADD_TC(tp, detect_tz_changes);
+ ATF_TP_ADD_TC(tp, tz_env);
return (atf_no_error());
}
diff --git a/lib/libcrypt/Makefile b/lib/libcrypt/Makefile
index e939bae1bc25..2580c398155e 100644
--- a/lib/libcrypt/Makefile
+++ b/lib/libcrypt/Makefile
@@ -45,7 +45,7 @@ libcrypt.ald: ${.CURDIR}/${STATIC_LDSCRIPT}
all: ${STATIC_LDSCRIPT} libcrypt.ald
install-libcrypt.a: libcrypt.ald
- ${INSTALL} ${TAG_ARGS:D${TAG_ARGS},dev} -S -C -o ${LIBOWN} -g ${LIBGRP} -m ${LIBMODE} \
+ ${INSTALL} ${DEV_TAG_ARGS} -S -C -o ${LIBOWN} -g ${LIBGRP} -m ${LIBMODE} \
${_INSTALLFLAGS} libcrypt.ald ${DESTDIR}${_LIBDIR}/lib${LIB}.a
realinstall: install-libcrypt.a
diff --git a/lib/libelftc/Makefile b/lib/libelftc/Makefile
index a932b0c4b2a4..a6079b817d87 100644
--- a/lib/libelftc/Makefile
+++ b/lib/libelftc/Makefile
@@ -1,5 +1,6 @@
.include <bsd.own.mk>
+PACKAGE= toolchain
INTERNALLIB=
ELFTCDIR= ${SRCTOP}/contrib/elftoolchain
diff --git a/lib/libifconfig/Makefile b/lib/libifconfig/Makefile
index e8e0651dc0d6..fb7c659e068c 100644
--- a/lib/libifconfig/Makefile
+++ b/lib/libifconfig/Makefile
@@ -1,4 +1,3 @@
-PACKAGE= lib${LIB}
LIB= ifconfig
INTERNALLIB= true
diff --git a/lib/libifconfig/Symbol.map b/lib/libifconfig/Symbol.map
index 2d80fb31652a..2e11ff963909 100644
--- a/lib/libifconfig/Symbol.map
+++ b/lib/libifconfig/Symbol.map
@@ -40,7 +40,6 @@ FBSD_1.6 {
ifconfig_open;
ifconfig_set_capability;
ifconfig_set_description;
- ifconfig_set_fib;
ifconfig_set_metric;
ifconfig_set_mtu;
ifconfig_set_name;
@@ -81,7 +80,6 @@ FBSD_1.6 {
ifconfig_sfp_fc_speed_symbol;
ifconfig_sfp_id_description;
ifconfig_sfp_id_display;
- ifconfig_sfp_id_is_qsfp;
ifconfig_sfp_id_symbol;
ifconfig_sfp_rev_description;
ifconfig_sfp_rev_symbol;
diff --git a/lib/libifconfig/libifconfig.h b/lib/libifconfig/libifconfig.h
index b2f0cf9744ea..a5ce7b375830 100644
--- a/lib/libifconfig/libifconfig.h
+++ b/lib/libifconfig/libifconfig.h
@@ -171,7 +171,6 @@ int ifconfig_set_name(ifconfig_handle_t *h, const char *name,
const char *newname);
int ifconfig_get_orig_name(ifconfig_handle_t *h, const char *ifname,
char **orig_name);
-int ifconfig_set_fib(ifconfig_handle_t *h, const char *name, int fib);
int ifconfig_get_fib(ifconfig_handle_t *h, const char *name, int *fib);
int ifconfig_set_mtu(ifconfig_handle_t *h, const char *name, const int mtu);
int ifconfig_get_mtu(ifconfig_handle_t *h, const char *name, int *mtu);
diff --git a/lib/libiscsiutil/Makefile b/lib/libiscsiutil/Makefile
index c99fb6366536..d9762302fd40 100644
--- a/lib/libiscsiutil/Makefile
+++ b/lib/libiscsiutil/Makefile
@@ -1,6 +1,5 @@
LIB= iscsiutil
INTERNALLIB=
-PACKAGE= iscsi
INCS= libiscsiutil.h
diff --git a/lib/libmilter/Makefile b/lib/libmilter/Makefile
index dd6a481d1a15..ff16b4d00297 100644
--- a/lib/libmilter/Makefile
+++ b/lib/libmilter/Makefile
@@ -1,6 +1,6 @@
.include <src.opts.mk>
-PACKAGE=sendmail
+PACKAGE=lib${LIB}
SENDMAIL_DIR=${SRCTOP}/contrib/sendmail
.PATH: ${SENDMAIL_DIR}/libmilter ${SENDMAIL_DIR}/libsm
diff --git a/lib/libnetbsd/Makefile b/lib/libnetbsd/Makefile
index 1639caf20d6b..4088c2b57176 100644
--- a/lib/libnetbsd/Makefile
+++ b/lib/libnetbsd/Makefile
@@ -1,6 +1,5 @@
.include <bsd.own.mk>
-PACKAGE=lib${LIB}
LIB= netbsd
CFLAGS+= -I${.CURDIR}
diff --git a/lib/libnvmf/Makefile b/lib/libnvmf/Makefile
index b01f5ab82cac..945c31a2986c 100644
--- a/lib/libnvmf/Makefile
+++ b/lib/libnvmf/Makefile
@@ -3,7 +3,6 @@
LIB= nvmf
INTERNALLIB=
-PACKAGE= nvmf
INCS= libnvmf.h
diff --git a/lib/libopenbsd/Makefile b/lib/libopenbsd/Makefile
index dca1c08b0aed..53bd0200934f 100644
--- a/lib/libopenbsd/Makefile
+++ b/lib/libopenbsd/Makefile
@@ -1,4 +1,3 @@
-PACKAGE=lib${LIB}
LIB= openbsd
SRCS= imsg-buffer.c \
imsg.c \
diff --git a/lib/libpfctl/Makefile b/lib/libpfctl/Makefile
index d301d7850b44..4b16d81181a3 100644
--- a/lib/libpfctl/Makefile
+++ b/lib/libpfctl/Makefile
@@ -1,4 +1,3 @@
-PACKAGE= lib${LIB}
LIB= pfctl
INTERNALLIB= true
diff --git a/lib/libpfctl/libpfctl.c b/lib/libpfctl/libpfctl.c
index cbd9d4677146..0037f31df04b 100644
--- a/lib/libpfctl/libpfctl.c
+++ b/lib/libpfctl/libpfctl.c
@@ -2453,7 +2453,7 @@ _pfctl_table_add_addrs_h(struct pfctl_handle *h, struct pfr_table *tbl, struct p
snl_add_msg_attr_table(&nw, PF_TA_TABLE, tbl);
snl_add_msg_attr_u32(&nw, PF_TA_FLAGS, flags);
- for (int i = 0; i < size && i < 256; i++)
+ for (int i = 0; i < size; i++)
snl_add_msg_attr_pfr_addr(&nw, PF_TA_ADDR, &addrs[i]);
if ((hdr = snl_finalize_msg(&nw)) == NULL)
@@ -2481,19 +2481,18 @@ pfctl_table_add_addrs_h(struct pfctl_handle *h, struct pfr_table *tbl, struct pf
int ret;
int off = 0;
int partial_added;
+ int chunk_size;
do {
- ret = _pfctl_table_add_addrs_h(h, tbl, &addr[off], size - off, &partial_added, flags);
+ chunk_size = MIN(size - off, 256);
+ ret = _pfctl_table_add_addrs_h(h, tbl, &addr[off], chunk_size, &partial_added, flags);
if (ret != 0)
break;
if (nadd)
*nadd += partial_added;
- off += partial_added;
+ off += chunk_size;
} while (off < size);
- if (nadd)
- *nadd = off;
-
return (ret);
}
@@ -2521,7 +2520,7 @@ _pfctl_table_del_addrs_h(struct pfctl_handle *h, struct pfr_table *tbl, struct p
snl_add_msg_attr_table(&nw, PF_TA_TABLE, tbl);
snl_add_msg_attr_u32(&nw, PF_TA_FLAGS, flags);
- for (int i = 0; i < size && i < 256; i++)
+ for (int i = 0; i < size; i++)
snl_add_msg_attr_pfr_addr(&nw, PF_TA_ADDR, &addrs[i]);
if ((hdr = snl_finalize_msg(&nw)) == NULL)
@@ -2572,20 +2571,19 @@ pfctl_table_del_addrs_h(struct pfctl_handle *h, struct pfr_table *tbl, struct pf
int ret;
int off = 0;
int partial_deleted;
+ int chunk_size;
do {
- ret = _pfctl_table_del_addrs_h(h, tbl, &addr[off], size - off,
+ chunk_size = MIN(size - off, 256);
+ ret = _pfctl_table_del_addrs_h(h, tbl, &addr[off], chunk_size,
&partial_deleted, flags);
if (ret != 0)
break;
if (ndel)
*ndel += partial_deleted;
- off += partial_deleted;
+ off += chunk_size;
} while (off < size);
- if (ndel)
- *ndel = off;
-
return (ret);
}
diff --git a/lib/libpmcstat/Makefile b/lib/libpmcstat/Makefile
index d1d23cdd57c7..7ca93329e20f 100644
--- a/lib/libpmcstat/Makefile
+++ b/lib/libpmcstat/Makefile
@@ -1,4 +1,3 @@
-PACKAGE=lib${LIB}
LIB= pmcstat
INTERNALLIB=
diff --git a/lib/libprocstat/libprocstat.c b/lib/libprocstat/libprocstat.c
index eb8137f6c76f..1651cbc6820f 100644
--- a/lib/libprocstat/libprocstat.c
+++ b/lib/libprocstat/libprocstat.c
@@ -1330,8 +1330,7 @@ procstat_get_vnode_info_kvm(kvm_t *kd, struct filestat *fst,
return (1);
}
vn->vn_mntdir = getmnton(kd, vnode.v_mount);
- if ((vnode.v_type == VBLK || vnode.v_type == VCHR) &&
- vnode.v_rdev != NULL){
+ if (VTYPE_ISDEV(vnode.v_type) && vnode.v_rdev != NULL) {
vn->vn_dev = dev2udev(kd, vnode.v_rdev);
(void)kdevtoname(kd, vnode.v_rdev, vn->vn_devname);
} else {
diff --git a/lib/librpcsvc/Makefile b/lib/librpcsvc/Makefile
index 04e57990a29d..bdd01c91adab 100644
--- a/lib/librpcsvc/Makefile
+++ b/lib/librpcsvc/Makefile
@@ -14,7 +14,7 @@ OTHERSRCS= rnusers.c rstat.c rwall.c
SECRPCSRCS= secretkey.c xcrypt.c
.if ${MK_NIS} != "no"
-OTHERSRCS+= yp_passwd.c
+OTHERSRCS+= yp_passwd.c yp_update.c
.endif
RPCCOM= RPCGEN_CPP=${CPP:Q} rpcgen -C
diff --git a/lib/librpcsvc/yp_update.c b/lib/librpcsvc/yp_update.c
new file mode 100644
index 000000000000..8a31b2c22d9b
--- /dev/null
+++ b/lib/librpcsvc/yp_update.c
@@ -0,0 +1,199 @@
+/*-
+ * SPDX-License-Identifier: BSD-4-Clause
+ *
+ * Copyright (c) 1995, 1996
+ * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Bill Paul.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * ypupdate client-side library function.
+ *
+ * Written by Bill Paul <wpaul@ctr.columbia.edu>
+ * Center for Telecommunications Research
+ * Columbia University, New York City
+ */
+
+#include <sys/cdefs.h>
+#include <stdlib.h>
+#include <rpc/rpc.h>
+#include <rpcsvc/yp_prot.h>
+#include <rpcsvc/ypclnt.h>
+#include <rpcsvc/ypupdate_prot.h>
+#include <rpc/key_prot.h>
+
+#ifndef WINDOW
+#define WINDOW (60*60)
+#endif
+
+#ifndef TIMEOUT
+#define TIMEOUT 300
+#endif
+
+int
+yp_update(char *domain, char *map, unsigned int ypop, char *key, int keylen,
+ char *data, int datalen)
+{
+ char *master;
+ int rval;
+ unsigned int res;
+ struct ypupdate_args upargs;
+ struct ypdelete_args delargs;
+ CLIENT *clnt;
+ char netname[MAXNETNAMELEN+1];
+ des_block des_key;
+ struct timeval timeout;
+
+ /* Get the master server name for 'domain.' */
+ if ((rval = yp_master(domain, map, &master)))
+ return(rval);
+
+ /* Check that ypupdated is running there. */
+ if (getrpcport(master, YPU_PROG, YPU_VERS, ypop))
+ return(YPERR_DOMAIN);
+
+ /* Get a handle. */
+ if ((clnt = clnt_create(master, YPU_PROG, YPU_VERS, "tcp")) == NULL)
+ return(YPERR_RPC);
+
+ /*
+ * Assemble netname of server.
+ * NOTE: It's difficult to discern from the documentation, but
+ * when you make a Secure RPC call, the netname you pass should
+ * be the netname of the guy on the other side, not your own
+ * netname. This is how the client side knows what public key
+ * to use for the initial exchange. Passing your own netname
+ * only works if the server on the other side is running under
+ * your UID.
+ */
+ if (!host2netname(netname, master, domain)) {
+ clnt_destroy(clnt);
+ return(YPERR_BADARGS);
+ }
+
+ /* Make up a DES session key. */
+ key_gendes(&des_key);
+
+ /* Set up DES authentication. */
+ if ((clnt->cl_auth = (AUTH *)authdes_create(netname, WINDOW, NULL,
+ &des_key)) == NULL) {
+ clnt_destroy(clnt);
+ return(YPERR_RESRC);
+ }
+
+ /* Set a timeout for clnt_call(). */
+ timeout.tv_usec = 0;
+ timeout.tv_sec = TIMEOUT;
+
+ /*
+ * Make the call. Note that we use clnt_call() here rather than
+ * the rpcgen-erated client stubs. We could use those stubs, but
+ * then we'd have to do some gymnastics to get at the error
+ * information to figure out what error code to send back to the
+ * caller. With clnt_call(), we get the error status returned to
+ * us right away, and we only have to exert a small amount of
+ * extra effort.
+ */
+ switch (ypop) {
+ case YPOP_CHANGE:
+ upargs.mapname = map;
+ upargs.key.yp_buf_len = keylen;
+ upargs.key.yp_buf_val = key;
+ upargs.datum.yp_buf_len = datalen;
+ upargs.datum.yp_buf_val = data;
+
+ if ((rval = clnt_call(clnt, YPU_CHANGE,
+ (xdrproc_t)xdr_ypupdate_args, &upargs,
+ (xdrproc_t)xdr_u_int, &res, timeout)) != RPC_SUCCESS) {
+ if (rval == RPC_AUTHERROR)
+ res = YPERR_ACCESS;
+ else
+ res = YPERR_RPC;
+ }
+
+ break;
+ case YPOP_INSERT:
+ upargs.mapname = map;
+ upargs.key.yp_buf_len = keylen;
+ upargs.key.yp_buf_val = key;
+ upargs.datum.yp_buf_len = datalen;
+ upargs.datum.yp_buf_val = data;
+
+ if ((rval = clnt_call(clnt, YPU_INSERT,
+ (xdrproc_t)xdr_ypupdate_args, &upargs,
+ (xdrproc_t)xdr_u_int, &res, timeout)) != RPC_SUCCESS) {
+ if (rval == RPC_AUTHERROR)
+ res = YPERR_ACCESS;
+ else
+ res = YPERR_RPC;
+ }
+
+ break;
+ case YPOP_DELETE:
+ delargs.mapname = map;
+ delargs.key.yp_buf_len = keylen;
+ delargs.key.yp_buf_val = key;
+
+ if ((rval = clnt_call(clnt, YPU_DELETE,
+ (xdrproc_t)xdr_ypdelete_args, &delargs,
+ (xdrproc_t)xdr_u_int, &res, timeout)) != RPC_SUCCESS) {
+ if (rval == RPC_AUTHERROR)
+ res = YPERR_ACCESS;
+ else
+ res = YPERR_RPC;
+ }
+
+ break;
+ case YPOP_STORE:
+ upargs.mapname = map;
+ upargs.key.yp_buf_len = keylen;
+ upargs.key.yp_buf_val = key;
+ upargs.datum.yp_buf_len = datalen;
+ upargs.datum.yp_buf_val = data;
+
+ if ((rval = clnt_call(clnt, YPU_STORE,
+ (xdrproc_t)xdr_ypupdate_args, &upargs,
+ (xdrproc_t)xdr_u_int, &res, timeout)) != RPC_SUCCESS) {
+ if (rval == RPC_AUTHERROR)
+ res = YPERR_ACCESS;
+ else
+ res = YPERR_RPC;
+ }
+
+ break;
+ default:
+ res = YPERR_BADARGS;
+ break;
+ }
+
+ /* All done: tear down the connection. */
+ auth_destroy(clnt->cl_auth);
+ clnt_destroy(clnt);
+ free(master);
+
+ return(res);
+}
diff --git a/lib/libsm/Makefile b/lib/libsm/Makefile
index 448abad59192..538407754f6a 100644
--- a/lib/libsm/Makefile
+++ b/lib/libsm/Makefile
@@ -1,6 +1,5 @@
.include <src.opts.mk>
-PACKAGE=sendmail
SENDMAIL_DIR=${SRCTOP}/contrib/sendmail
.PATH: ${SENDMAIL_DIR}/libsm
diff --git a/lib/libsmdb/Makefile b/lib/libsmdb/Makefile
index 898af78d6101..e82deda57615 100644
--- a/lib/libsmdb/Makefile
+++ b/lib/libsmdb/Makefile
@@ -1,4 +1,3 @@
-PACKAGE=lib${LIB}
SENDMAIL_DIR=${SRCTOP}/contrib/sendmail
.PATH: ${SENDMAIL_DIR}/libsmdb
diff --git a/lib/libsmutil/Makefile b/lib/libsmutil/Makefile
index ef526ff8656d..37ad71e84f58 100644
--- a/lib/libsmutil/Makefile
+++ b/lib/libsmutil/Makefile
@@ -1,6 +1,5 @@
.include <src.opts.mk>
-PACKAGE=lib${LIB}
SENDMAIL_DIR=${SRCTOP}/contrib/sendmail
.PATH: ${SENDMAIL_DIR}/libsmutil
diff --git a/lib/libsys/copy_file_range.2 b/lib/libsys/copy_file_range.2
index bcd9170842d5..829a5a5d3c13 100644
--- a/lib/libsys/copy_file_range.2
+++ b/lib/libsys/copy_file_range.2
@@ -23,7 +23,7 @@
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.Dd December 28, 2023
+.Dd August 16, 2025
.Dt COPY_FILE_RANGE 2
.Os
.Sh NAME
@@ -74,6 +74,7 @@ argument must be opened for reading and the
.Fa outfd
argument must be opened for writing, but not
.Dv O_APPEND .
+.Pp
If
.Fa inoffp
or
@@ -101,9 +102,29 @@ respectively will be used/updated and the file offset for
or
.Fa outfd
respectively will not be affected.
-The
+.Pp
+The only
+.Fa flags
+argument currently defined is
+.Dv COPY_FILE_RANGE_CLONE .
+When this flag is set,
+.Fn copy_file_range
+will return
+.Er EOPNOTSUPP
+if the copy cannot be done via
+block cloning.
+When
.Fa flags
-argument must be 0.
+is 0, a file system may do the copy via block cloning
+or by data copying.
+Block cloning is only possible when the offsets (plus
+.Fa len
+if not to EOF on the input file) are block
+aligned.
+The correct block alignment can normally be acquired via the
+.Dv _PC_CLONE_BLKSIZE
+query for
+.Xr pathconf 2 .
.Pp
This system call attempts to maintain holes in the output file for
the byte range being copied.
@@ -203,9 +224,15 @@ refers to a directory.
File system that stores
.Fa outfd
is full.
+.It Bq Er EOPNOTSUPP
+Cannot do the copy via block cloning and the
+.Dv COPY_FILE_RANGE_CLONE
+.Fa flags
+argument is specified.
.El
.Sh SEE ALSO
-.Xr lseek 2
+.Xr lseek 2 ,
+.Xr pathconf 2
.Sh STANDARDS
The
.Fn copy_file_range
diff --git a/lib/libsys/stat.2 b/lib/libsys/stat.2
index bd9005710147..8107740bd901 100644
--- a/lib/libsys/stat.2
+++ b/lib/libsys/stat.2
@@ -25,7 +25,7 @@
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.Dd March 30, 2021
+.Dd August 17, 2025
.Dt STAT 2
.Os
.Sh NAME
@@ -169,6 +169,9 @@ Flags enabled for the file.
See
.Xr chflags 2
for the list of flags and their description.
+.It Va st_rdev
+Numeric ID of the device referenced by the file, if the file is a
+character or block special; otherwise unspecified.
.El
.Pp
The
diff --git a/lib/libtelnet/Makefile b/lib/libtelnet/Makefile
index 6bc2f5a51de0..2eeac2d44f98 100644
--- a/lib/libtelnet/Makefile
+++ b/lib/libtelnet/Makefile
@@ -1,6 +1,5 @@
.include <src.opts.mk>
-PACKAGE=lib${LIB}
TELNETDIR= ${SRCTOP}/contrib/telnet
.PATH: ${TELNETDIR}/libtelnet
diff --git a/lib/libutil/mntopts.c b/lib/libutil/mntopts.c
index 1d9347e3108a..07d3dd6d98a3 100644
--- a/lib/libutil/mntopts.c
+++ b/lib/libutil/mntopts.c
@@ -185,6 +185,7 @@ getmntpoint(const char *name)
strncpy(statfsp->f_mntfromname, device, len);
}
if (stat(ddevname, &mntdevstat) == 0 &&
+ S_ISCHR(mntdevstat.st_mode) &&
mntdevstat.st_rdev == devstat.st_rdev)
return (statfsp);
}
diff --git a/libexec/nuageinit/nuage.lua b/libexec/nuageinit/nuage.lua
index 493ae11d6ca7..48f54b120615 100644
--- a/libexec/nuageinit/nuage.lua
+++ b/libexec/nuageinit/nuage.lua
@@ -451,6 +451,23 @@ local function chpasswd(obj)
end
end
+local function settimezone(timezone)
+ if timezone == nil then
+ return
+ end
+ local root = os.getenv("NUAGE_FAKE_ROOTDIR")
+ if not root then
+ root = "/"
+ end
+
+ f, _, rc = os.execute("tzsetup -s -C " .. root .. " " .. timezone)
+
+ if not f then
+ warnmsg("Impossible to configure time zone ( rc = " .. rc .. " )")
+ return
+ end
+end
+
local function pkg_bootstrap()
if os.getenv("NUAGE_RUN_TESTS") then
return true
@@ -480,7 +497,7 @@ local function install_package(package)
end
local function run_pkg_cmd(subcmd)
- local cmd = "pkg " .. subcmd .. " -y"
+ local cmd = "env ASSUME_ALWAYS_YES=yes pkg " .. subcmd
if os.getenv("NUAGE_RUN_TESTS") then
print(cmd)
return true
@@ -556,6 +573,7 @@ local n = {
dirname = dirname,
mkdir_p = mkdir_p,
sethostname = sethostname,
+ settimezone = settimezone,
adduser = adduser,
addgroup = addgroup,
addsshkey = addsshkey,
diff --git a/libexec/nuageinit/nuageinit b/libexec/nuageinit/nuageinit
index 0fcdc7274db3..70b27cb33d87 100755
--- a/libexec/nuageinit/nuageinit
+++ b/libexec/nuageinit/nuageinit
@@ -46,7 +46,15 @@ local function open_config(name)
return openat("/etc/rc.conf.d", name)
end
-local function get_ifaces()
+local function open_resolv_conf()
+ return openat("/etc", "resolv.conf")
+end
+
+local function open_resolvconf_conf()
+ return openat("/etc", "resolvconf.conf")
+end
+
+local function get_ifaces_by_mac()
local parser = ucl.parser()
-- grab ifaces
local ns = io.popen("netstat -i --libxo json")
@@ -77,6 +85,10 @@ local function sethostname(obj)
end
end
+local function settimezone(obj)
+ nuage.settimezone(obj.timezone)
+end
+
local function groups(obj)
if obj.groups == nil then return end
@@ -171,6 +183,59 @@ local function ssh_authorized_keys(obj)
end
end
+local function nameservers(interface, obj)
+ local resolvconf_conf_handler = open_resolvconf_conf()
+
+ if obj.search then
+ local with_space = false
+
+ resolvconf_conf_handler:write('search_domains="')
+
+ for _, d in ipairs(obj.search) do
+ if with_space then
+ resolvconf_conf_handler:write(" " .. d)
+ else
+ resolvconf_conf_handler:write(d)
+ with_space = true
+ end
+ end
+
+ resolvconf_conf_handler:write('"\n')
+ end
+
+ if obj.addresses then
+ local with_space = false
+
+ resolvconf_conf_handler:write('name_servers="')
+
+ for _, a in ipairs(obj.addresses) do
+ if with_space then
+ resolvconf_conf_handler:write(" " .. a)
+ else
+ resolvconf_conf_handler:write(a)
+ with_space = true
+ end
+ end
+
+ resolvconf_conf_handler:write('"\n')
+ end
+
+ resolvconf_conf_handler:close()
+
+ local resolv_conf = root .. "/etc/resolv.conf"
+
+ resolv_conf_attr = lfs.attributes(resolv_conf)
+
+ if resolv_conf_attr == nil then
+ resolv_conf_handler = open_resolv_conf()
+ resolv_conf_handler:close()
+ end
+
+ if not os.execute("resolvconf -a " .. interface .. " < " .. resolv_conf) then
+ nuage.warn("Failed to execute resolvconf(8)")
+ end
+end
+
local function install_packages(packages)
if not nuage.pkg_bootstrap() then
nuage.warn("Failed to bootstrap pkg, skip installing packages")
@@ -187,6 +252,85 @@ local function install_packages(packages)
end
end
+local function list_ifaces()
+ local proc = io.popen("ifconfig -l")
+ local raw_ifaces = proc:read("*a")
+ proc:close()
+ local ifaces = {}
+ for i in raw_ifaces:gmatch("[^%s]+") do
+ table.insert(ifaces, i)
+ end
+ return ifaces
+end
+
+local function get_ifaces_by_driver()
+ local proc = io.popen("ifconfig -D")
+ local drivers = {}
+ local last_interface = nil
+ for line in proc:lines() do
+ local interface = line:match("^([%S]+): ")
+
+ if interface then
+ last_interface = interface
+ end
+
+ local driver = line:match("^[%s]+drivername: ([%S]+)$")
+
+ if driver then
+ drivers[driver] = last_interface
+ end
+ end
+ proc:close()
+
+ return drivers
+end
+
+local function match_rules(rules)
+ -- To comply with the cloud-init specification, all rules must match and a table
+ -- with the matching interfaces must be returned. This changes the way we initially
+ -- thought about our implementation, since at first we only needed one interface,
+ -- but cloud-init performs actions on a group of matching interfaces.
+ local interfaces = {}
+ if rules.macaddress then
+ local ifaces = get_ifaces_by_mac()
+ local interface = ifaces[rules.macaddress]
+ if not interface then
+ nuage.warn("not interface matching by MAC address: " .. rules.macaddress)
+ return
+ end
+ interfaces[interface] = 1
+ end
+ if rules.name then
+ local match = false
+ for _, i in pairs(list_ifaces()) do
+ if i:match(rules.name) then
+ match = true
+ interfaces[i] = 1
+ end
+ end
+ if not match then
+ nuage.warn("not interface matching by name: " .. rules.name)
+ return
+ end
+ end
+ if rules.driver then
+ local match = false
+ local drivers = get_ifaces_by_driver()
+ for d in pairs(drivers) do
+ if d:match(rules.driver) then
+ match = true
+ interface = drivers[d]
+ interfaces[interface] = 1
+ end
+ end
+ if not match then
+ nuage.warn("not interface matching by driver: " .. rules.driver)
+ return
+ end
+ end
+ return interfaces
+end
+
local function write_files(files, defer)
if not files then
return
@@ -210,41 +354,76 @@ end
local function network_config(obj)
if obj.network == nil then return end
- local ifaces = get_ifaces()
local network = open_config("network")
local routing = open_config("routing")
local ipv6 = {}
- for _, v in pairs(obj.network.ethernets) do
- if not v.match then
- goto next
+ local set_defaultrouter = true
+ local set_defaultrouter6 = true
+ local set_nameservers = true
+ for i, v in pairs(obj.network.ethernets) do
+ local interfaces = {}
+ if v.match then
+ interfaces = match_rules(v.match)
+
+ if next(interfaces) == nil then
+ goto next
+ end
+ else
+ interfaces[i] = 1
end
- if not v.match.macaddress then
- goto next
+ local extra_opts = ""
+ if v.wakeonlan then
+ extra_opts = extra_opts .. " wol"
end
- if not ifaces[v.match.macaddress] then
- nuage.warn("not interface matching: " .. v.match.macaddress)
- goto next
+ if v.mtu then
+ if type(v.mtu) == "number" then
+ mtu = tostring(v.mtu)
+ else
+ mtu = v.mtu
+ end
+ if mtu:match("%d") then
+ extra_opts = extra_opts .. " mtu " .. mtu
+ else
+ nuage.warn("MTU is not set because the specified value is invalid: " .. mtu)
+ end
end
- local interface = ifaces[v.match.macaddress]
- if v.dhcp4 then
- network:write("ifconfig_" .. interface .. '="DHCP"\n')
- elseif v.addresses then
- for _, a in pairs(v.addresses) do
- if a:match("^(%d+)%.(%d+)%.(%d+)%.(%d+)") then
- network:write("ifconfig_" .. interface .. '="inet ' .. a .. '"\n')
- else
- network:write("ifconfig_" .. interface .. '_ipv6="inet6 ' .. a .. '"\n')
- ipv6[#ipv6 + 1] = interface
+ for interface in pairs(interfaces) do
+ if v.match and v.match.macaddress and v["set-name"] then
+ local ifaces = get_ifaces_by_mac()
+ local matched = ifaces[v.match.macaddress]
+ if matched and matched == interface then
+ network:write("ifconfig_" .. interface .. '_name=' .. v["set-name"] .. '\n')
+ interface = v["set-name"]
+ end
+ end
+ if v.dhcp4 then
+ network:write("ifconfig_" .. interface .. '="DHCP"' .. extra_opts .. '\n')
+ elseif v.addresses then
+ for _, a in pairs(v.addresses) do
+ if a:match("^(%d+)%.(%d+)%.(%d+)%.(%d+)") then
+ network:write("ifconfig_" .. interface .. '="inet ' .. a .. extra_opts .. '"\n')
+ else
+ network:write("ifconfig_" .. interface .. '_ipv6="inet6 ' .. a .. extra_opts .. '"\n')
+ ipv6[#ipv6 + 1] = interface
+ end
+ end
+ if set_nameservers and v.nameservers then
+ set_nameservers = false
+ nameservers(interface, v.nameservers)
+ end
+ if set_defaultrouter and v.gateway4 then
+ set_defaultrouter = false
+ routing:write('defaultrouter="' .. v.gateway4 .. '"\n')
+ end
+ if v.gateway6 then
+ if set_defaultrouter6 then
+ set_defaultrouter6 = false
+ routing:write('ipv6_defaultrouter="' .. v.gateway6 .. '"\n')
+ end
+ routing:write("ipv6_route_" .. interface .. '="' .. v.gateway6)
+ routing:write(" -prefixlen 128 -interface " .. interface .. '"\n')
end
end
- end
- if v.gateway4 then
- routing:write('defaultrouter="' .. v.gateway4 .. '"\n')
- end
- if v.gateway6 then
- routing:write('ipv6_defaultrouter="' .. v.gateway6 .. '"\n')
- routing:write("ipv6_route_" .. interface .. '="' .. v.gateway6)
- routing:write(" -prefixlen 128 -interface " .. interface .. '"\n')
end
::next::
end
@@ -316,7 +495,7 @@ local function config2_network(p)
end
local obj = parser:get_object()
- local ifaces = get_ifaces()
+ local ifaces = get_ifaces_by_mac()
if not ifaces then
nuage.warn("no network interfaces found")
return
@@ -468,6 +647,7 @@ f:close()
if line == "#cloud-config" then
local pre_network_calls = {
sethostname,
+ settimezone,
groups,
create_default_user,
ssh_keys,
diff --git a/libexec/nuageinit/nuageinit.7 b/libexec/nuageinit/nuageinit.7
index 327ce160e151..f27b8bc06042 100644
--- a/libexec/nuageinit/nuageinit.7
+++ b/libexec/nuageinit/nuageinit.7
@@ -143,6 +143,11 @@ Specify a fully qualified domain name for the instance.
Specify the hostname of the instance if
.Qq Ic fqdn
is not set.
+.It Ic timezone
+Sets the system timezone based on the value provided.
+.Pp
+See also
+.Xr tzfile 3 Ns .
.It Ic groups
An array of strings or objects to be created:
.Bl -bullet
@@ -176,6 +181,89 @@ boolean which determines the value of the
configuration in
.Pa /etc/ssh/sshd_config
.It Ic network
+Network configuration parameters.
+.Bl -tag -width "ethernets"
+.It Ic ethernets
+Mapping representing a generic configuration for existing network interfaces.
+.Pp
+Each key is an interface name that is only used when no
+.Sy match
+rule is specified.
+If
+.Sy match
+rules are specified, an arbitrary name can be used
+.Po e.g.: id0 Pc Ns .
+.Bl -tag -width "nameservers"
+.It Ic match
+This selects a subset of available physical devices by various hardware properties.
+The following configuration will then apply to all matching devices, as soon as
+they appear.
+All specified properties must match.
+The following properties for
+creating matches are supported:
+.Bl -tag -width "macaddress"
+.It Ic macaddress
+.No Device's MAC address in the form Sy xx:xx:xx:xx:xx:xx Ns .
+Letters should be lowercase.
+.It Ic name
+Current interface name.
+Lua pattern-matching expressions are supported.
+.It Ic driver
+Interface driver name and unit number of the interface.
+Lua pattern-natching expressions
+are supported.
+.El
+.It Ic set-name
+When matching on unique properties such as MAC, match rules can be written so that they
+match only one device.
+Then this property can be used to give that device a more
+specific/desirable/nicer name than the default.
+.Pp
+While multiple properties can be used in a match,
+.Sy macaddress
+is required for nuageinit to perform the rename.
+.It Ic mtu
+The MTU key represents a device's Maximum Transmission Unit, the largest size packet
+or frame.
+.It Ic wakeonlan
+Enable wake on LAN.
+Off by default.
+.It Ic dhcp4
+Configure the interface to use DHCP.
+.Pp
+This takes precedence over
+.Sy addresses
+when both are specified.
+.It Ic addresses
+List of strings representing IPv4 or IPv6 addresses.
+.It Ic gateway4
+Set default gateway for IPv4, for manual address configuration.
+This requires setting
+.Sy addresses
+too.
+.Pp
+Since only one default router can be configured at a time, this parameter is applied
+when processing the first entry, and any others are silently ignored.
+.It Ic gateway6
+Set default gateway for IPv6, for manual address configuration.
+This requires setting
+.Sy addresses
+too.
+.Pp
+Since only one default router can be configured at a time, this parameter is applied
+when processing the first entry, and any others are silently ignored.
+.It Ic nameservers
+Set DNS servers and search domains, for manual address configuration.
+.Pp
+There are two supported fields:
+.Bl -tag -width "addresses"
+.It Ic search
+Search list for host-name lookup.
+.It Ic addresses
+List of IPv4 or IPv6 name server addresses that the resolver should query.
+.El
+.El
+.El
.It Ic runcmd
An array of commands to be run at the end of the boot process
.It Ic packages
@@ -186,7 +274,7 @@ Update the remote package metadata.
Upgrade the packages installed to their latest version.
.It Ic users
Specify a list of users to be created:
-.Bl -tag -width "plain_text_passwd"
+.Bl -tag -width "ssh_authorized_keys"
.It Ic name
Name of the user.
.It Ic gecos
@@ -201,6 +289,8 @@ The list of other groups the user should belong to.
A boolean which determines if the home directory should be created or not.
.It Ic shell
The shell that should be used for the user.
+.It Ic ssh_authorized_keys
+List of SSH keys for the user.
.It Ic passwd
The encrypted password for the user.
.It Ic plain_text_passwd
@@ -211,7 +301,7 @@ The list of other groups the user should belong to.
.It Ic locked
Boolean to determine if the user account should be locked.
.It Ic sudo
-A string or an array of strings which which should be appended to
+A string or an array of strings which should be appended to
.Pa /usr/local/etc/sudoers.d/90-nuageinit-users
.El
.Pp
@@ -251,7 +341,7 @@ It accepts the following keys for each objects:
The content to be written to the file.
If this key is not existing then an empty file will be created.
.It Ic encoding
-Specifiy the encoding used for content.
+Specify the encoding used for content.
If not specified, then plain text is considered.
Only
.Ar b64
@@ -287,7 +377,7 @@ users:
- name: user
gecos: Foo B. Bar
sudo: ALL=(ALL) NOPASSWD:ALL
- ssh-authorized-keys:
+ ssh_authorized_keys:
- ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAr...
packages:
- neovim
@@ -303,6 +393,12 @@ ssh_keys:
...
-----END OPENSSH PRIVATE KEY-----
ed25519_public: ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIK+MH4E8KO32N5CXRvXVqvyZVl0+6ue4DobdhU0FqFd+
+network:
+ ethernets:
+ vtnet0:
+ addresses:
+ - 192.168.8.2/24
+ gateway4: 192.168.8.1
.Ed
.Sh SEE ALSO
.Xr kenv 2 ,
diff --git a/libexec/nuageinit/tests/Makefile b/libexec/nuageinit/tests/Makefile
index c69bc28a4c86..dc8997717b59 100644
--- a/libexec/nuageinit/tests/Makefile
+++ b/libexec/nuageinit/tests/Makefile
@@ -15,6 +15,7 @@ ${PACKAGE}FILES+= adduser_passwd.lua
${PACKAGE}FILES+= dirname.lua
${PACKAGE}FILES+= err.lua
${PACKAGE}FILES+= sethostname.lua
+${PACKAGE}FILES+= settimezone.lua
${PACKAGE}FILES+= warn.lua
${PACKAGE}FILES+= addfile.lua
diff --git a/libexec/nuageinit/tests/nuage.sh b/libexec/nuageinit/tests/nuage.sh
index 56651c8c5bb7..b709d25532ff 100644
--- a/libexec/nuageinit/tests/nuage.sh
+++ b/libexec/nuageinit/tests/nuage.sh
@@ -7,12 +7,21 @@
export NUAGE_FAKE_ROOTDIR="$PWD"
atf_test_case sethostname
+atf_test_case settimezone
atf_test_case addsshkey
atf_test_case adduser
atf_test_case adduser_passwd
atf_test_case addgroup
atf_test_case addfile
+settimezone_body()
+{
+ atf_check /usr/libexec/flua $(atf_get_srcdir)/settimezone.lua
+ if [ ! -f etc/localtime ]; then
+ atf_fail "localtime not written"
+ fi
+}
+
sethostname_body()
{
atf_check /usr/libexec/flua $(atf_get_srcdir)/sethostname.lua
diff --git a/libexec/nuageinit/tests/nuageinit.sh b/libexec/nuageinit/tests/nuageinit.sh
index 849f1c258b62..98593f7d75b0 100644
--- a/libexec/nuageinit/tests/nuageinit.sh
+++ b/libexec/nuageinit/tests/nuageinit.sh
@@ -815,7 +815,7 @@ config2_userdata_update_packages_body()
package_update: true
EOF
chmod 755 "${PWD}"/media/nuageinit/user_data
- atf_check -o inline:"pkg update -y\n" /usr/libexec/nuageinit "${PWD}"/media/nuageinit postnet
+ atf_check -o inline:"env ASSUME_ALWAYS_YES=yes pkg update\n" /usr/libexec/nuageinit "${PWD}"/media/nuageinit postnet
}
config2_userdata_upgrade_packages_body()
@@ -829,7 +829,7 @@ config2_userdata_upgrade_packages_body()
package_upgrade: true
EOF
chmod 755 "${PWD}"/media/nuageinit/user_data
- atf_check -o inline:"pkg upgrade -y\n" /usr/libexec/nuageinit "${PWD}"/media/nuageinit postnet
+ atf_check -o inline:"env ASSUME_ALWAYS_YES=yes pkg upgrade\n" /usr/libexec/nuageinit "${PWD}"/media/nuageinit postnet
}
config2_userdata_shebang_body()
diff --git a/libexec/nuageinit/tests/settimezone.lua b/libexec/nuageinit/tests/settimezone.lua
new file mode 100644
index 000000000000..a8cacf09f4e7
--- /dev/null
+++ b/libexec/nuageinit/tests/settimezone.lua
@@ -0,0 +1,5 @@
+#!/usr/libexec/flua
+
+local n = require("nuage")
+
+n.settimezone("UTC")
diff --git a/libexec/rc/rc.conf b/libexec/rc/rc.conf
index 07cb9803882c..bfa46bd343a6 100644
--- a/libexec/rc/rc.conf
+++ b/libexec/rc/rc.conf
@@ -396,6 +396,7 @@ rpc_statd_flags="" # Flags to rpc.statd (if enabled).
rpcbind_enable="NO" # Run the portmapper service (YES/NO).
rpcbind_program="/usr/sbin/rpcbind" # path to rpcbind, if you want a different one.
rpcbind_flags="" # Flags to rpcbind (if enabled).
+rpc_ypupdated_enable="NO" # Run if NIS master and SecureRPC (or NO).
nfsv4_server_enable="NO" # Enable support for NFSv4
nfsv4_server_only="NO" # Set NFS server to NFSv4 only
nfscbd_enable="NO" # NFSv4 client side callback daemon
diff --git a/libexec/rc/rc.d/Makefile b/libexec/rc/rc.d/Makefile
index 680b140865df..7c1f50b027a9 100644
--- a/libexec/rc/rc.d/Makefile
+++ b/libexec/rc/rc.d/Makefile
@@ -215,7 +215,7 @@ FTPD= ftpd
FTPDPACKAGE= ftpd
.endif
-.if ${MK_GSSAPI} != "no" && ${MK_KERBEROS_SUPPORT} != "no"
+.if ${MK_KERBEROS_SUPPORT} != "no"
CONFGROUPS+= GSSD
GSSD= gssd
GSSDPACKAGE= gssd
@@ -318,6 +318,7 @@ YP= ypbind \
yppasswdd \
ypserv \
ypset \
+ ypupdated \
ypxfrd \
nisdomain
YPPACKAGE= yp
diff --git a/libexec/rc/rc.d/ypupdated b/libexec/rc/rc.d/ypupdated
new file mode 100755
index 000000000000..1a4c595c745a
--- /dev/null
+++ b/libexec/rc/rc.d/ypupdated
@@ -0,0 +1,35 @@
+#!/bin/sh
+#
+#
+
+# PROVIDE: ypupdated
+# REQUIRE: rpcbind ypserv
+# KEYWORD: shutdown
+
+. /etc/rc.subr
+
+name="ypupdated"
+rcvar="rpc_ypupdated_enable"
+
+: ${ypupdated_svcj_options:="net_basic"}
+
+load_rc_config $name
+
+command="/usr/sbin/rpc.${name}"
+start_precmd="rpc_ypupdated_precmd"
+
+rpc_ypupdated_precmd()
+{
+ local _domain
+
+ force_depend rpcbind || return 1
+ force_depend ypserv nis_server || return 1
+
+ _domain=`domainname`
+ if [ -z "$_domain" ]; then
+ warn "NIS domainname(1) is not set."
+ return 1
+ fi
+}
+
+run_rc_command "$1"
diff --git a/libexec/rtld-elf/rtld.c b/libexec/rtld-elf/rtld.c
index 17196f55c271..d27af520c21d 100644
--- a/libexec/rtld-elf/rtld.c
+++ b/libexec/rtld-elf/rtld.c
@@ -859,6 +859,10 @@ _rtld(Elf_Addr *sp, func_ptr_type *exit_proc, Obj_Entry **objp)
linkmap_add(obj_main);
linkmap_add(&obj_rtld);
+ LD_UTRACE(UTRACE_LOAD_OBJECT, obj_main, obj_main->mapbase,
+ obj_main->mapsize, 0, obj_main->path);
+ LD_UTRACE(UTRACE_LOAD_OBJECT, &obj_rtld, obj_rtld.mapbase,
+ obj_rtld.mapsize, 0, obj_rtld.path);
/* Link the main program into the list of objects. */
TAILQ_INSERT_HEAD(&obj_list, obj_main, next);
@@ -2437,11 +2441,21 @@ parse_rtld_phdr(Obj_Entry *obj)
{
const Elf_Phdr *ph;
Elf_Addr note_start, note_end;
+ bool first_seg;
+ first_seg = true;
obj->stack_flags = PF_X | PF_R | PF_W;
for (ph = obj->phdr;
(const char *)ph < (const char *)obj->phdr + obj->phsize; ph++) {
switch (ph->p_type) {
+ case PT_LOAD:
+ if (first_seg) {
+ obj->vaddrbase = rtld_trunc_page(ph->p_vaddr);
+ first_seg = false;
+ }
+ obj->mapsize = rtld_round_page(ph->p_vaddr +
+ ph->p_memsz) - obj->vaddrbase;
+ break;
case PT_GNU_STACK:
obj->stack_flags = ph->p_flags;
break;
@@ -3031,7 +3045,7 @@ load_kpreload(const void *addr)
}
obj->mapbase = __DECONST(caddr_t, addr);
- obj->mapsize = segn->p_vaddr + segn->p_memsz - (Elf_Addr)addr;
+ obj->mapsize = segn->p_vaddr + segn->p_memsz;
obj->vaddrbase = 0;
obj->relocbase = obj->mapbase;
@@ -3060,7 +3074,8 @@ load_kpreload(const void *addr)
linkmap_add(obj); /* for GDB & dlinfo() */
max_stack_flags |= obj->stack_flags;
- LD_UTRACE(UTRACE_LOAD_OBJECT, obj, obj->mapbase, 0, 0, obj->path);
+ LD_UTRACE(UTRACE_LOAD_OBJECT, obj, obj->mapbase, obj->mapsize, 0,
+ obj->path);
return (0);
}
diff --git a/release/packages/generate-ucl.lua b/release/packages/generate-ucl.lua
index a243c6ea7ad0..5637bbd3ad99 100755
--- a/release/packages/generate-ucl.lua
+++ b/release/packages/generate-ucl.lua
@@ -39,6 +39,10 @@ pkg_suffixes = {
"applications on a 64-bit host.",
},
{
+ "%-lib$", "(libraries)",
+ "This package contains runtime shared libraries.",
+ },
+ {
"%-dev$", "(development files)",
"This package contains development files for "..
"compiling applications."
@@ -98,6 +102,9 @@ function add_gen_dep(pkgname, pkggenname)
if no_gen_deps[pkgname] ~= nil then
return false
end
+ if pkgname:match("%-lib$") ~= nil then
+ return false
+ end
if pkggenname == "kernel" then
return false
end
diff --git a/release/packages/generate-ucl.sh b/release/packages/generate-ucl.sh
index 3078185a3c4e..a08840d5b321 100755
--- a/release/packages/generate-ucl.sh
+++ b/release/packages/generate-ucl.sh
@@ -49,6 +49,9 @@ main() {
*-lib32)
outname="${outname%%-lib32}"
;;
+ *-lib)
+ outname="${outname%%-lib}"
+ ;;
*-man)
outname="${outname%%-man}"
;;
diff --git a/release/packages/ucl/dma-all.ucl b/release/packages/ucl/dma-all.ucl
index e8824acf7a36..63d6c86b0be4 100644
--- a/release/packages/ucl/dma-all.ucl
+++ b/release/packages/ucl/dma-all.ucl
@@ -1,4 +1,12 @@
-comment = "DMA Mail Agent Utilities"
+comment = "DragonFly Mail Agent"
desc = <<EOD
-DMA Mail Agent Utilities
+The DragonFly Mail Agent (dma) is a lightweight mail transport agent intended
+for home and office use. dma can accept mail from local users and deliver it
+either locally or remotely (via SMTP). dma can be configured to route remote
+mail via an SMTP smart host, optionally using STARTTLS and/or authentication.
+
+dma does not implement an SMTP server and cannot receive mail from remote
+systems, nor act as a mail exchanger for other hosts. If an SMTP server
+is required, or when more advanced mail routing is needed, consider using
+the $PKG_NAME_PREFIX-sendmail package instead.
EOD
diff --git a/release/packages/ucl/kerberos-lib-all.ucl b/release/packages/ucl/kerberos-lib-all.ucl
deleted file mode 100644
index b524563a976d..000000000000
--- a/release/packages/ucl/kerberos-lib-all.ucl
+++ /dev/null
@@ -1,4 +0,0 @@
-comment = "Kerberos libraries"
-desc = <<EOD
-Libraries requires to run programs that use Kerberos.
-EOD
diff --git a/release/packages/ucl/libmilter-all.ucl b/release/packages/ucl/libmilter-all.ucl
new file mode 100644
index 000000000000..5c0e4925a9c2
--- /dev/null
+++ b/release/packages/ucl/libmilter-all.ucl
@@ -0,0 +1,7 @@
+comment = "sendmail Mail Filter API library"
+desc = <<EOD
+The sendmail Mail Filter API (Milter) is designed to allow third-party
+programs access to mail messages as they are being processed in order
+to filter meta-information and content. libmilter provides support for
+applications implementing the milter interface.
+EOD
diff --git a/release/packages/ucl/openssl-lib-all.ucl b/release/packages/ucl/openssl-lib-all.ucl
deleted file mode 100644
index c81dd44855cd..000000000000
--- a/release/packages/ucl/openssl-lib-all.ucl
+++ /dev/null
@@ -1,4 +0,0 @@
-comment = "OpenSSL Libraries"
-desc = <<EOD
-OpenSSL Libraries
-EOD
diff --git a/release/packages/ucl/sendmail-all.ucl b/release/packages/ucl/sendmail-all.ucl
index 2711e33a31a8..38f697da24fc 100644
--- a/release/packages/ucl/sendmail-all.ucl
+++ b/release/packages/ucl/sendmail-all.ucl
@@ -1,4 +1,15 @@
-comment = "Sendmail Utilities"
+comment = "sendmail mail transport agent"
desc = <<EOD
-Sendmail Utilities
+The sendmail mail transport agent allows the system to send and receive mail
+for both local and remote users. sendmail can also act as a mail exchanger
+for other hosts, and has a flexible configuration format that can be used to
+rewrite, filter or otherwise process mail before delivering it.
+
+sendmail supports multiple mail transport protocols, including Internet SMTP,
+LMTP, UUCP and DECnet. Support for protocols other than SMTP and LMTP needs
+third-party software not shipped with the base system.
+
+When the full functionality of sendmail is not required, consider using the
+$PKG_NAME_PREFIX-dma package instead, a lightweight MTA which can send (but
+not receive) mail over SMTP.
EOD
diff --git a/sbin/pfctl/pfctl.8 b/sbin/pfctl/pfctl.8
index f582c6301124..5a74a8fd3444 100644
--- a/sbin/pfctl/pfctl.8
+++ b/sbin/pfctl/pfctl.8
@@ -24,7 +24,7 @@
.\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
.\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.\"
-.Dd July 7, 2025
+.Dd August 5, 2025
.Dt PFCTL 8
.Os
.Sh NAME
@@ -410,6 +410,7 @@ This is the default behaviour.
.It Fl o Cm profile
Enable basic ruleset optimizations with profiling.
.El
+.Pp
For further information on the ruleset optimizer, see
.Xr pf.conf 5 .
.It Fl P
@@ -431,7 +432,7 @@ Perform reverse DNS lookups on states and tables when displaying them.
and
.Fl r
are mutually exclusive.
-.It Fl s Ar modifier
+.It Fl s Ar modifier Op Fl R Ar id
Show the filter parameters specified by
.Ar modifier
(may be abbreviated):
@@ -563,19 +564,16 @@ no free ports in translation port range
.It Fl S
Do not perform domain name resolution.
If a name cannot be resolved without DNS, an error will be reported.
-.It Fl T Ar command Op Ar address ...
+.It Fl t Ar table Fl T Ar command Op Ar address ...
Specify the
.Ar command
-(may be abbreviated) to apply to the table.
+(may be abbreviated) to apply to
+.Ar table .
Commands include:
.Pp
-.Bl -tag -width xxxxxxxxxxxx -compact
-.It Fl T Cm kill
-Kill a table.
-.It Fl T Cm flush
-Flush all addresses of a table.
+.Bl -tag -width "-T expire number" -compact
.It Fl T Cm add
-Add one or more addresses in a table.
+Add one or more addresses to a table.
Automatically create a persistent table if it does not exist.
.It Fl T Cm delete
Delete one or more addresses from a table.
@@ -586,6 +584,10 @@ seconds ago.
For entries which have never had their statistics cleared,
.Ar number
refers to the time they were added to the table.
+.It Fl T Cm flush
+Flush all addresses in a table.
+.It Fl T Cm kill
+Kill a table.
.It Fl T Cm replace
Replace the addresses of the table.
Automatically create a persistent table if it does not exist.
@@ -765,8 +767,6 @@ tables of the same name from anchors attached below it.
.It C
This flag is set when per-address counters are enabled on the table.
.El
-.It Fl t Ar table
-Specify the name of the table.
.It Fl v
Produce more verbose output.
A second use of
diff --git a/sbin/pfctl/pfctl_radix.c b/sbin/pfctl/pfctl_radix.c
index 0fe9ca8813bb..398c5e998330 100644
--- a/sbin/pfctl/pfctl_radix.c
+++ b/sbin/pfctl/pfctl_radix.c
@@ -122,6 +122,9 @@ pfr_add_addrs(struct pfr_table *tbl, struct pfr_addr *addr, int size,
{
int ret;
+ if (*nadd)
+ *nadd = 0;
+
ret = pfctl_table_add_addrs_h(pfh, tbl, addr, size, nadd, flags);
if (ret) {
errno = ret;
diff --git a/secure/lib/libcrypto/Makefile b/secure/lib/libcrypto/Makefile
index e7e491124241..f8f8d291ee9e 100644
--- a/secure/lib/libcrypto/Makefile
+++ b/secure/lib/libcrypto/Makefile
@@ -1,12 +1,14 @@
SHLIBDIR?= /lib
-.if !defined(LIBCRYPTO_WITHOUT_SUBDIRS)
+.if !defined(LIBCRYPTO_WITHOUT_SUBDIRS) && !defined(BOOTSTRAPPING)
SUBDIR= engines modules
.endif
+.ifdef BOOTSTRAPPING
+CFLAGS+= -DOPENSSL_NO_SCTP
+.endif
.include <bsd.own.mk>
.include <src.opts.mk>
-PACKAGE= openssl-lib
LIB= crypto
SHLIB_MAJOR= 35
VERSION_MAP= ${.CURDIR}/Version.map
diff --git a/secure/lib/libcrypto/Makefile.common b/secure/lib/libcrypto/Makefile.common
index afcc2a48660c..ad48e2b434cf 100644
--- a/secure/lib/libcrypto/Makefile.common
+++ b/secure/lib/libcrypto/Makefile.common
@@ -6,7 +6,7 @@ CFLAGS+= -DL_ENDIAN
CFLAGS+= -DB_ENDIAN
.endif
-.ifndef WITHOUT_AUTO_ASM
+.if !defined(WITHOUT_AUTO_ASM) && !defined(BOOTSTRAPPING)
.if ${MACHINE_CPUARCH} == "aarch64" || ${MACHINE_CPUARCH} == "amd64" || \
${MACHINE_CPUARCH} == "arm" || ${MACHINE_CPUARCH} == "i386"
ASM_${MACHINE_CPUARCH}=
diff --git a/secure/lib/libcrypto/Makefile.inc b/secure/lib/libcrypto/Makefile.inc
index 55e1687b58cf..4d5e9c68a1fe 100644
--- a/secure/lib/libcrypto/Makefile.inc
+++ b/secure/lib/libcrypto/Makefile.inc
@@ -1,8 +1,10 @@
.include <bsd.own.mk>
+PACKAGE= openssl
+LIB_PACKAGE=
+
# OpenSSL version used for manual page generation
-OPENSSL_VER= 3.5.0
-OPENSSL_DATE= 2025-07-01
+.include "Makefile.version"
LCRYPTO_SRC= ${SRCTOP}/crypto/openssl
LCRYPTO_DOC= ${LCRYPTO_SRC}/doc
diff --git a/secure/lib/libcrypto/Makefile.version b/secure/lib/libcrypto/Makefile.version
new file mode 100644
index 000000000000..e802a1f11618
--- /dev/null
+++ b/secure/lib/libcrypto/Makefile.version
@@ -0,0 +1,2 @@
+OPENSSL_VER= 3.5.1
+OPENSSL_DATE= 2025-07-01
diff --git a/secure/lib/libcrypto/engines/Makefile.inc b/secure/lib/libcrypto/engines/Makefile.inc
index 777193ba3f9a..146c2f794eb6 100644
--- a/secure/lib/libcrypto/engines/Makefile.inc
+++ b/secure/lib/libcrypto/engines/Makefile.inc
@@ -1,4 +1,5 @@
-PACKAGE?= openssl-lib
+PACKAGE?= openssl
+LIB_PACKAGE=
SHLIBDIR= ${LIBDIR}/engines-3
diff --git a/secure/lib/libcrypto/modules/Makefile.inc b/secure/lib/libcrypto/modules/Makefile.inc
index 363b7f4bc766..4b3d9fc512ce 100644
--- a/secure/lib/libcrypto/modules/Makefile.inc
+++ b/secure/lib/libcrypto/modules/Makefile.inc
@@ -1,4 +1,5 @@
-PACKAGE?= openssl-lib
+PACKAGE?= openssl
+LIB_PACKAGE=
SHLIBDIR= ${LIBDIR}/ossl-modules
LCRYPTO_SRC= ${SRCTOP}/crypto/openssl
diff --git a/secure/lib/libssl/Makefile b/secure/lib/libssl/Makefile
index 45f23c9e9ca2..acb63549c1c8 100644
--- a/secure/lib/libssl/Makefile
+++ b/secure/lib/libssl/Makefile
@@ -3,7 +3,6 @@
LIB= ssl
SHLIB_MAJOR= 35
VERSION_MAP= ${.CURDIR}/Version.map
-PACKAGE= openssl-lib
NO_LINT=
@@ -73,6 +72,8 @@ SRCS+= rio_notifier.c poll_builder.c
.if ${MK_OPENSSL_KTLS} == "no"
CFLAGS+=-DOPENSSL_NO_KTLS
+.else
+SRCS+= ktls_meth.c
.endif
LIBADD= crypto
diff --git a/secure/libexec/sshd-session/Makefile b/secure/libexec/sshd-session/Makefile
index 37e099794bd5..5ed459fe492a 100644
--- a/secure/libexec/sshd-session/Makefile
+++ b/secure/libexec/sshd-session/Makefile
@@ -38,7 +38,7 @@ LIBADD+= blacklist
LDFLAGS+=-L${LIBBLACKLISTDIR}
.endif
-.if ${MK_GSSAPI} != "no" && ${MK_KERBEROS_SUPPORT} != "no"
+.if ${MK_KERBEROS_SUPPORT} != "no"
.if ${MK_MITKRB5} != "no"
LIBADD+= gssapi_krb5 krb5
.include "../../krb5/Makefile.inc"
diff --git a/secure/ssh.mk b/secure/ssh.mk
index bb6dd9b748e4..97dd089d98e8 100644
--- a/secure/ssh.mk
+++ b/secure/ssh.mk
@@ -9,12 +9,11 @@ SKSRCS= ssh-sk-client.c
CFLAGS+= -I${SSHDIR} -include ssh_namespace.h
-.if ${MK_GSSAPI} != "no" && ${MK_KERBEROS_SUPPORT} != "no"
+.if ${MK_KERBEROS_SUPPORT} != "no"
CFLAGS+= -include krb5_config.h
.if ${MK_MITKRB5} == "no"
CFLAGS+= -DHEIMDAL=1
.endif
-
.endif
CFLAGS+= -DXAUTH_PATH=\"${LOCALBASE:U/usr/local}/bin/xauth\"
diff --git a/secure/usr.bin/ssh/Makefile b/secure/usr.bin/ssh/Makefile
index a4f36d0fe2df..2b11b783c007 100644
--- a/secure/usr.bin/ssh/Makefile
+++ b/secure/usr.bin/ssh/Makefile
@@ -17,7 +17,7 @@ SRCS+= gss-genr.c
LIBADD= ssh
-.if ${MK_GSSAPI} != "no" && ${MK_KERBEROS_SUPPORT} != "no"
+.if ${MK_KERBEROS_SUPPORT} != "no"
.if ${MK_MITKRB5} == "no"
LIBADD+= gssapi
.else
diff --git a/secure/usr.sbin/sshd/Makefile b/secure/usr.sbin/sshd/Makefile
index f37dfe1c1b3a..433a87984e42 100644
--- a/secure/usr.sbin/sshd/Makefile
+++ b/secure/usr.sbin/sshd/Makefile
@@ -18,7 +18,7 @@ moduli: .MADE
LIBADD= ssh util
-.if ${MK_GSSAPI} != "no" && ${MK_KERBEROS_SUPPORT} != "no"
+.if ${MK_KERBEROS_SUPPORT} != "no"
.if ${MK_MITKRB5} == "no"
LIBADD+= gssapi_krb5 gssapi krb5
.else
diff --git a/share/man/man4/Makefile b/share/man/man4/Makefile
index 1ba1fe46523e..519b113b0a2e 100644
--- a/share/man/man4/Makefile
+++ b/share/man/man4/Makefile
@@ -594,6 +594,7 @@ MAN= aac.4 \
tty.4 \
tun.4 \
tws.4 \
+ u2f.4 \
udp.4 \
udplite.4 \
${_ufshci.4} \
@@ -846,7 +847,7 @@ _cpuctl.4= cpuctl.4
_dpms.4= dpms.4
_ftgpio.4= ftgpio.4
_ftwd.4= ftwd.4
-_hn.4= _hn.4
+_hn.4= hn.4
_hpt27xx.4= hpt27xx.4
_hptiop.4= hptiop.4
_hptmv.4= hptmv.4
diff --git a/share/man/man4/ata.4 b/share/man/man4/ata.4
index feea1dd3cc85..29b6bbef6838 100644
--- a/share/man/man4/ata.4
+++ b/share/man/man4/ata.4
@@ -155,7 +155,9 @@ The
.Va hw.ata.ata_dma_check_80pin
tunable can be set to 0 to disable this check.
.Sh HARDWARE
-The currently supported ATA/SATA controller chips are:
+The
+.Nm
+driver supports the IDE interface on the following ATA/SATA controllers:
.Pp
.Bl -tag -width "Silicon Image:" -compact
.It Acard:
diff --git a/share/man/man4/iflib.4 b/share/man/man4/iflib.4
index 0114263e6ca2..2040698f0087 100644
--- a/share/man/man4/iflib.4
+++ b/share/man/man4/iflib.4
@@ -1,4 +1,4 @@
-.Dd September 27, 2018
+.Dd August 20, 2025
.Dt IFLIB 4
.Os
.Sh NAME
@@ -64,6 +64,18 @@ If this is zero or not set, an RX and TX queue pair will be assigned to each
core.
When set to a non-zero value, TX queues are assigned to cores following the
last RX queue.
+.It Va simple_tx
+When set to one, iflib uses a simple transmit routine with no queuing at all.
+By default, iflib uses a highly optimized, lockless, transmit queue called
+mp_ring.
+This performs well when there are more CPU cores than NIC
+queues and prevents lock contention for transmit resources.
+Unfortunately, mp_ring incurs unneeded overheads on workloads where
+resource contention is not a problem (well behaved applications on
+systems where there are as many NIC queues as CPU cores).
+Note that when this is enabled, the tx_abdicate sysctl is no longer
+applicable and is ignored.
+Defaults to zero.
.El
.Pp
These
diff --git a/share/man/man4/ioat.4 b/share/man/man4/ioat.4
index deef466c0ae0..1c0e1dd49fd1 100644
--- a/share/man/man4/ioat.4
+++ b/share/man/man4/ioat.4
@@ -23,7 +23,7 @@
.\" SUCH DAMAGE.
.\"
.Dd May 3, 2016
-.Dt IOAT 4
+.Dt IOAT 4 amd64
.Os
.Sh NAME
.Nm I/OAT
diff --git a/share/man/man4/iwlwifi.4 b/share/man/man4/iwlwifi.4
index 4a251f239a55..660f6a9bf57c 100644
--- a/share/man/man4/iwlwifi.4
+++ b/share/man/man4/iwlwifi.4
@@ -27,7 +27,7 @@
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.Dd June 13, 2025
+.Dd August 19, 2025
.Dt IWLWIFI 4
.Os
.Sh NAME
@@ -331,7 +331,7 @@ driver first appeared in
802.11n and 802.11ac support for the 22000 and later chipsets first appeared in
.Fx 14.3 .
.Sh BUGS
-Certainly.
+.Lk https://bugs.freebsd.org/bugzilla/showdependencytree.cgi?id=iwlwifi "iwlwifi known bugs"
.Pp
While
.Nm
diff --git a/share/man/man4/iwx.4 b/share/man/man4/iwx.4
index 7cd54d61b920..295a5f318afa 100644
--- a/share/man/man4/iwx.4
+++ b/share/man/man4/iwx.4
@@ -18,7 +18,7 @@
.\" OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
.\"
.Dd May 2, 2025
-.Dt IWX 4
+.Dt IWX 4 amd64
.Os
.Sh NAME
.Nm iwx
diff --git a/share/man/man4/man4.aarch64/armv8crypto.4 b/share/man/man4/man4.aarch64/armv8crypto.4
index 7b8704395daf..0f763adc5766 100644
--- a/share/man/man4/man4.aarch64/armv8crypto.4
+++ b/share/man/man4/man4.aarch64/armv8crypto.4
@@ -25,7 +25,7 @@
.\" SUCH DAMAGE.
.\"
.Dd July 29, 2020
-.Dt ARMV8CRYPTO 4
+.Dt ARMV8CRYPTO 4 aarch64
.Os
.Sh NAME
.Nm armv8crypto
diff --git a/share/man/man4/man4.aarch64/enetc.4 b/share/man/man4/man4.aarch64/enetc.4
index 33f796347f96..e7cfcb7ebe0e 100644
--- a/share/man/man4/man4.aarch64/enetc.4
+++ b/share/man/man4/man4.aarch64/enetc.4
@@ -25,7 +25,7 @@
.\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.\"
.Dd June 11, 2021
-.Dt ENETC 4
+.Dt ENETC 4 aarch64
.Os
.Sh NAME
.Nm enetc
diff --git a/share/man/man4/man4.aarch64/felix.4 b/share/man/man4/man4.aarch64/felix.4
index 15caef6d274f..b97f3c2168e8 100644
--- a/share/man/man4/man4.aarch64/felix.4
+++ b/share/man/man4/man4.aarch64/felix.4
@@ -26,7 +26,7 @@
.\" SUCH DAMAGE.
.\"
.Dd June 21, 2021
-.Dt FELIX 4
+.Dt FELIX 4 aarch64
.Os
.Sh NAME
.Nm felix
diff --git a/share/man/man4/man4.aarch64/rk_gpio.4 b/share/man/man4/man4.aarch64/rk_gpio.4
index b5648662cf5e..b2767dd66dce 100644
--- a/share/man/man4/man4.aarch64/rk_gpio.4
+++ b/share/man/man4/man4.aarch64/rk_gpio.4
@@ -23,7 +23,7 @@
.\" SUCH DAMAGE.
.\"
.Dd Apr 26, 2018
-.Dt RK_GPIO 4
+.Dt RK_GPIO 4 aarch64
.Os
.Sh NAME
.Nm rk_gpio
diff --git a/share/man/man4/man4.aarch64/rk_grf.4 b/share/man/man4/man4.aarch64/rk_grf.4
index 64ed468c1983..b01a93091ecb 100644
--- a/share/man/man4/man4.aarch64/rk_grf.4
+++ b/share/man/man4/man4.aarch64/rk_grf.4
@@ -23,7 +23,7 @@
.\" SUCH DAMAGE.
.\"
.Dd Apr 26, 2018
-.Dt RK_GRF 4
+.Dt RK_GRF 4 aarch64
.Os
.Sh NAME
.Nm rk_grf
diff --git a/share/man/man4/man4.aarch64/rk_grf_gpio.4 b/share/man/man4/man4.aarch64/rk_grf_gpio.4
index 6a5ebbe19e3b..2bfbebce1b76 100644
--- a/share/man/man4/man4.aarch64/rk_grf_gpio.4
+++ b/share/man/man4/man4.aarch64/rk_grf_gpio.4
@@ -4,7 +4,7 @@
.\" SPDX-License-Identifier: BSD-2-Clause
.\"
.Dd March 18, 2025
-.Dt RK_GRF_GPIO 4
+.Dt RK_GRF_GPIO 4 aarch64
.Os
.Sh NAME
.Nm rk_grf_gpio
diff --git a/share/man/man4/man4.aarch64/rk_i2c.4 b/share/man/man4/man4.aarch64/rk_i2c.4
index be1a0fab943e..363cdeac7f72 100644
--- a/share/man/man4/man4.aarch64/rk_i2c.4
+++ b/share/man/man4/man4.aarch64/rk_i2c.4
@@ -25,7 +25,7 @@
.\" SUCH DAMAGE.
.\"
.Dd June 14, 2018
-.Dt RK_I2C 4
+.Dt RK_I2C 4 aarch64
.Os
.Sh NAME
.Nm rk_i2c
diff --git a/share/man/man4/man4.aarch64/rk_pinctrl.4 b/share/man/man4/man4.aarch64/rk_pinctrl.4
index 519b3e793cd1..2be5f363498d 100644
--- a/share/man/man4/man4.aarch64/rk_pinctrl.4
+++ b/share/man/man4/man4.aarch64/rk_pinctrl.4
@@ -23,7 +23,7 @@
.\" SUCH DAMAGE.
.\"
.Dd Apr 26, 2018
-.Dt RK_PINCTRL 4
+.Dt RK_PINCTRL 4 aarch64
.Os
.Sh NAME
.Nm rk_pinctrl
diff --git a/share/man/man4/man4.arm/am335x_dmtpps.4 b/share/man/man4/man4.arm/am335x_dmtpps.4
index d565c65e2cf1..bec5ff7726a0 100644
--- a/share/man/man4/man4.arm/am335x_dmtpps.4
+++ b/share/man/man4/man4.arm/am335x_dmtpps.4
@@ -24,7 +24,7 @@
.\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.\"
.Dd August 12, 2015
-.Dt AM335X_DMTPPS 4
+.Dt AM335X_DMTPPS 4 arm
.Os
.Sh NAME
.Nm am335x_dmtpps
diff --git a/share/man/man4/man4.arm/aw_gpio.4 b/share/man/man4/man4.arm/aw_gpio.4
index 5cbc7562d9bd..ef9fc1fe2733 100644
--- a/share/man/man4/man4.arm/aw_gpio.4
+++ b/share/man/man4/man4.arm/aw_gpio.4
@@ -23,7 +23,7 @@
.\" SUCH DAMAGE.
.\"
.Dd October 8, 2024
-.Dt AW_GPIO 4
+.Dt AW_GPIO 4 arm
.Os
.Sh NAME
.Nm aw_gpio
diff --git a/share/man/man4/man4.arm/aw_mmc.4 b/share/man/man4/man4.arm/aw_mmc.4
index eb7fc9ce020a..e3f961fa5067 100644
--- a/share/man/man4/man4.arm/aw_mmc.4
+++ b/share/man/man4/man4.arm/aw_mmc.4
@@ -23,7 +23,7 @@
.\" SUCH DAMAGE.
.\"
.Dd Dec 25, 2017
-.Dt AW_MMC 4
+.Dt AW_MMC 4 arm
.Os
.Sh NAME
.Nm aw_mmc
diff --git a/share/man/man4/man4.arm/aw_rtc.4 b/share/man/man4/man4.arm/aw_rtc.4
index 1296cd41da68..87212d85116c 100644
--- a/share/man/man4/man4.arm/aw_rtc.4
+++ b/share/man/man4/man4.arm/aw_rtc.4
@@ -23,7 +23,7 @@
.\" SUCH DAMAGE.
.\"
.Dd December 10, 2024
-.Dt AW_RTC 4
+.Dt AW_RTC 4 arm
.Os
.Sh NAME
.Nm aw_rtc
diff --git a/share/man/man4/man4.arm/aw_sid.4 b/share/man/man4/man4.arm/aw_sid.4
index 5cd2f3d5e072..8b3691259f22 100644
--- a/share/man/man4/man4.arm/aw_sid.4
+++ b/share/man/man4/man4.arm/aw_sid.4
@@ -25,7 +25,7 @@
.\" SUCH DAMAGE.
.\"
.Dd October 8, 2024
-.Dt AW_SID 4
+.Dt AW_SID 4 arm
.Os
.Sh NAME
.Nm aw_sid
diff --git a/share/man/man4/man4.arm/aw_spi.4 b/share/man/man4/man4.arm/aw_spi.4
index f8985e1c16bb..d0566a45b54b 100644
--- a/share/man/man4/man4.arm/aw_spi.4
+++ b/share/man/man4/man4.arm/aw_spi.4
@@ -23,7 +23,7 @@
.\" SUCH DAMAGE.
.\"
.Dd May 17, 2018
-.Dt AW_SPI 4
+.Dt AW_SPI 4 arm
.Os
.Sh NAME
.Nm aw_spi
diff --git a/share/man/man4/man4.arm/aw_syscon.4 b/share/man/man4/man4.arm/aw_syscon.4
index e32f329e489a..97f01196a8a6 100644
--- a/share/man/man4/man4.arm/aw_syscon.4
+++ b/share/man/man4/man4.arm/aw_syscon.4
@@ -25,7 +25,7 @@
.\" SUCH DAMAGE.
.\"
.Dd November 11, 2024
-.Dt AW_SYSCON 4
+.Dt AW_SYSCON 4 arm
.Os
.Sh NAME
.Nm aw_syscon
diff --git a/share/man/man4/man4.arm/bcm283x_pwm.4 b/share/man/man4/man4.arm/bcm283x_pwm.4
index 1fb5a830ace7..71d7f0cc3cca 100644
--- a/share/man/man4/man4.arm/bcm283x_pwm.4
+++ b/share/man/man4/man4.arm/bcm283x_pwm.4
@@ -25,7 +25,7 @@
.\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.\"
.Dd September 10, 2018
-.Dt BCM283X_PWM 4
+.Dt BCM283X_PWM 4 arm
.Os
.Sh NAME
.Nm bcm283x_pwm
diff --git a/share/man/man4/man4.arm/devcfg.4 b/share/man/man4/man4.arm/devcfg.4
index ddf368a85f24..cbc205814c69 100644
--- a/share/man/man4/man4.arm/devcfg.4
+++ b/share/man/man4/man4.arm/devcfg.4
@@ -23,7 +23,7 @@
.\" SUCH DAMAGE.
.\"
.Dd February 28, 2013
-.Dt DEVCFG 4
+.Dt DEVCFG 4 arm
.Os
.Sh NAME
.Nm devcfg
diff --git a/share/man/man4/man4.arm/imx6_ahci.4 b/share/man/man4/man4.arm/imx6_ahci.4
index 9979cef50d79..50689e323db8 100644
--- a/share/man/man4/man4.arm/imx6_ahci.4
+++ b/share/man/man4/man4.arm/imx6_ahci.4
@@ -24,7 +24,7 @@
.\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.\"
.Dd July 7, 2018
-.Dt IMX6_AHCI 4
+.Dt IMX6_AHCI 4 arm
.Os
.Sh NAME
.Nm imx6_ahci
diff --git a/share/man/man4/man4.arm/imx6_snvs.4 b/share/man/man4/man4.arm/imx6_snvs.4
index b36c3ddd91c1..2c1db97b231c 100644
--- a/share/man/man4/man4.arm/imx6_snvs.4
+++ b/share/man/man4/man4.arm/imx6_snvs.4
@@ -24,7 +24,7 @@
.\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.\"
.Dd July 8, 2018
-.Dt IMX6_SNVS 4
+.Dt IMX6_SNVS 4 arm
.Os
.Sh NAME
.Nm imx6_snvs
diff --git a/share/man/man4/man4.arm/imx_spi.4 b/share/man/man4/man4.arm/imx_spi.4
index e7555ed20d94..54a5339e3276 100644
--- a/share/man/man4/man4.arm/imx_spi.4
+++ b/share/man/man4/man4.arm/imx_spi.4
@@ -24,7 +24,7 @@
.\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.\"
.Dd July 9, 2018
-.Dt IMX_SPI 4
+.Dt IMX_SPI 4 arm
.Os
.Sh NAME
.Nm imx_spi
diff --git a/share/man/man4/man4.arm/imx_wdog.4 b/share/man/man4/man4.arm/imx_wdog.4
index 4b993e1d066b..cb4d0e13865b 100644
--- a/share/man/man4/man4.arm/imx_wdog.4
+++ b/share/man/man4/man4.arm/imx_wdog.4
@@ -24,7 +24,7 @@
.\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.\"
.Dd July 7, 2018
-.Dt IMX_WDOG 4
+.Dt IMX_WDOG 4 arm
.Os
.Sh NAME
.Nm imx_wdog
diff --git a/share/man/man4/man4.arm/mge.4 b/share/man/man4/man4.arm/mge.4
index e949b36f4307..cba9327eadcf 100644
--- a/share/man/man4/man4.arm/mge.4
+++ b/share/man/man4/man4.arm/mge.4
@@ -24,7 +24,7 @@
.\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.\"
.Dd November 27, 2008
-.Dt MGE 4
+.Dt MGE 4 arm
.Os
.Sh NAME
.Nm mge
diff --git a/share/man/man4/man4.arm/ti_adc.4 b/share/man/man4/man4.arm/ti_adc.4
index d71547231e4c..fb59e1d3e57c 100644
--- a/share/man/man4/man4.arm/ti_adc.4
+++ b/share/man/man4/man4.arm/ti_adc.4
@@ -23,7 +23,7 @@
.\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.\"
.Dd June 1, 2014
-.Dt TI_ADC 4
+.Dt TI_ADC 4 arm
.Os
.Sh NAME
.Nm ti_adc
diff --git a/share/man/man4/man4.powerpc/abtn.4 b/share/man/man4/man4.powerpc/abtn.4
index 92d643d5cf32..7421d0a0b5a6 100644
--- a/share/man/man4/man4.powerpc/abtn.4
+++ b/share/man/man4/man4.powerpc/abtn.4
@@ -25,7 +25,7 @@
.\" POSSIBILITY OF SUCH DAMAGE.
.\"
.Dd October 16, 2011
-.Dt ABTN 4
+.Dt ABTN 4 powerpc
.Os
.Sh NAME
.Nm abtn
diff --git a/share/man/man4/man4.powerpc/adb.4 b/share/man/man4/man4.powerpc/adb.4
index a781787995ab..6041484b5e33 100644
--- a/share/man/man4/man4.powerpc/adb.4
+++ b/share/man/man4/man4.powerpc/adb.4
@@ -24,7 +24,7 @@
.\" POSSIBILITY OF SUCH DAMAGE.
.\"
.Dd December 3, 2009
-.Dt ADB 4
+.Dt ADB 4 powerpc
.Os
.Sh NAME
.Nm adb
diff --git a/share/man/man4/man4.powerpc/akbd.4 b/share/man/man4/man4.powerpc/akbd.4
index 44af08961122..3406f5a1aa76 100644
--- a/share/man/man4/man4.powerpc/akbd.4
+++ b/share/man/man4/man4.powerpc/akbd.4
@@ -24,7 +24,7 @@
.\" POSSIBILITY OF SUCH DAMAGE.
.\"
.Dd December 3, 2009
-.Dt AKBD 4
+.Dt AKBD 4 powerpc
.Os
.Sh NAME
.Nm akbd
diff --git a/share/man/man4/man4.powerpc/ams.4 b/share/man/man4/man4.powerpc/ams.4
index 21be3c098920..d7fa922e7307 100644
--- a/share/man/man4/man4.powerpc/ams.4
+++ b/share/man/man4/man4.powerpc/ams.4
@@ -24,7 +24,7 @@
.\" POSSIBILITY OF SUCH DAMAGE.
.\"
.Dd December 3, 2009
-.Dt AMS 4
+.Dt AMS 4 powerpc
.Os
.Sh NAME
.Nm ams
diff --git a/share/man/man4/man4.powerpc/cuda.4 b/share/man/man4/man4.powerpc/cuda.4
index 7171ebb42373..a52b9a447c9d 100644
--- a/share/man/man4/man4.powerpc/cuda.4
+++ b/share/man/man4/man4.powerpc/cuda.4
@@ -24,7 +24,7 @@
.\" POSSIBILITY OF SUCH DAMAGE.
.\"
.Dd December 3, 2009
-.Dt CUDA 4
+.Dt CUDA 4 powerpc
.Os
.Sh NAME
.Nm cuda
diff --git a/share/man/man4/man4.powerpc/dtsec.4 b/share/man/man4/man4.powerpc/dtsec.4
index 4a60dd0b8824..f18de90c4757 100644
--- a/share/man/man4/man4.powerpc/dtsec.4
+++ b/share/man/man4/man4.powerpc/dtsec.4
@@ -24,7 +24,7 @@
.\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.\"
.Dd October 31, 2017
-.Dt DTSEC 4
+.Dt DTSEC 4 powerpc
.Os
.Sh NAME
.Nm dtsec
diff --git a/share/man/man4/man4.powerpc/llan.4 b/share/man/man4/man4.powerpc/llan.4
index c32ddbca6a00..b78109cac626 100644
--- a/share/man/man4/man4.powerpc/llan.4
+++ b/share/man/man4/man4.powerpc/llan.4
@@ -24,7 +24,7 @@
.\" POSSIBILITY OF SUCH DAMAGE.
.\"
.Dd February 19, 2015
-.Dt LLAN 4
+.Dt LLAN 4 powerpc
.Os
.Sh NAME
.Nm llan
diff --git a/share/man/man4/man4.powerpc/pmu.4 b/share/man/man4/man4.powerpc/pmu.4
index 6eac20cfa6b7..4dfb31f175bd 100644
--- a/share/man/man4/man4.powerpc/pmu.4
+++ b/share/man/man4/man4.powerpc/pmu.4
@@ -24,7 +24,7 @@
.\" POSSIBILITY OF SUCH DAMAGE.
.\"
.Dd December 6, 2008
-.Dt PMU 4
+.Dt PMU 4 powerpc
.Os
.Sh NAME
.Nm pmu
diff --git a/share/man/man4/man4.powerpc/smu.4 b/share/man/man4/man4.powerpc/smu.4
index ef2654746e62..852a08abaa9e 100644
--- a/share/man/man4/man4.powerpc/smu.4
+++ b/share/man/man4/man4.powerpc/smu.4
@@ -24,7 +24,7 @@
.\" POSSIBILITY OF SUCH DAMAGE.
.\"
.Dd February 22, 2010
-.Dt SMU 4
+.Dt SMU 4 powerpc
.Os
.Sh NAME
.Nm smu
diff --git a/share/man/man4/man4.powerpc/snd_ai2s.4 b/share/man/man4/man4.powerpc/snd_ai2s.4
index 3880751e65c9..7a3cd9cb94af 100644
--- a/share/man/man4/man4.powerpc/snd_ai2s.4
+++ b/share/man/man4/man4.powerpc/snd_ai2s.4
@@ -24,7 +24,7 @@
.\" POSSIBILITY OF SUCH DAMAGE.
.\"
.Dd January 20, 2009
-.Dt SND_AI2S 4
+.Dt SND_AI2S 4 powerpc
.Os
.Sh NAME
.Nm snd_ai2s
diff --git a/share/man/man4/man4.powerpc/snd_davbus.4 b/share/man/man4/man4.powerpc/snd_davbus.4
index 6958ebd4b4b5..028225accf52 100644
--- a/share/man/man4/man4.powerpc/snd_davbus.4
+++ b/share/man/man4/man4.powerpc/snd_davbus.4
@@ -24,7 +24,7 @@
.\" POSSIBILITY OF SUCH DAMAGE.
.\"
.Dd January 20, 2009
-.Dt SND_DAVBUS 4
+.Dt SND_DAVBUS 4 powerpc
.Os
.Sh NAME
.Nm snd_davbus
diff --git a/share/man/man4/man4.powerpc/tsec.4 b/share/man/man4/man4.powerpc/tsec.4
index b3ccae648ab8..09510e329ff0 100644
--- a/share/man/man4/man4.powerpc/tsec.4
+++ b/share/man/man4/man4.powerpc/tsec.4
@@ -24,7 +24,7 @@
.\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.\"
.Dd February 20, 2015
-.Dt TSEC 4
+.Dt TSEC 4 powerpc
.Os
.Sh NAME
.Nm tsec
diff --git a/share/man/man4/nvdimm.4 b/share/man/man4/nvdimm.4
index 5b7dbe435c46..2bec51b42d72 100644
--- a/share/man/man4/nvdimm.4
+++ b/share/man/man4/nvdimm.4
@@ -26,7 +26,7 @@
.\" SUCH DAMAGE.
.\"
.Dd September 5, 2019
-.Dt NVDIMM 4
+.Dt NVDIMM 4 amd64
.Os
.Sh NAME
.Nm nvdimm
diff --git a/share/man/man4/qlnxe.4 b/share/man/man4/qlnxe.4
index f545235ec1ff..70bad789add1 100644
--- a/share/man/man4/qlnxe.4
+++ b/share/man/man4/qlnxe.4
@@ -24,7 +24,7 @@
.\" SUCH DAMAGE.
.\"
.Dd May 9, 2017
-.Dt QLNXE 4
+.Dt QLNXE 4 amd64
.Os
.Sh NAME
.Nm qlnxe
diff --git a/share/man/man4/qlxgb.4 b/share/man/man4/qlxgb.4
index 4bf8000d15da..cc97cd060a3f 100644
--- a/share/man/man4/qlxgb.4
+++ b/share/man/man4/qlxgb.4
@@ -24,7 +24,7 @@
.\" SUCH DAMAGE.
.\"
.Dd November 3, 2011
-.Dt QLXGB 4
+.Dt QLXGB 4 amd64
.Os
.Sh NAME
.Nm qlxgb
diff --git a/share/man/man4/qlxgbe.4 b/share/man/man4/qlxgbe.4
index 486a5ec0f682..465e4fc018ad 100644
--- a/share/man/man4/qlxgbe.4
+++ b/share/man/man4/qlxgbe.4
@@ -24,7 +24,7 @@
.\" SUCH DAMAGE.
.\"
.Dd April 1, 2013
-.Dt QLXGBE 4
+.Dt QLXGBE 4 amd64
.Os
.Sh NAME
.Nm qlxgbe
diff --git a/share/man/man4/qlxge.4 b/share/man/man4/qlxge.4
index 4723c56ff68b..14a1e1284fab 100644
--- a/share/man/man4/qlxge.4
+++ b/share/man/man4/qlxge.4
@@ -24,7 +24,7 @@
.\" SUCH DAMAGE.
.\"
.Dd June 21, 2013
-.Dt QLXGE 4
+.Dt QLXGE 4 amd64
.Os
.Sh NAME
.Nm qlxge
diff --git a/share/man/man4/sfxge.4 b/share/man/man4/sfxge.4
index a9724074581e..ea35cf3e573c 100644
--- a/share/man/man4/sfxge.4
+++ b/share/man/man4/sfxge.4
@@ -27,7 +27,7 @@
.\" policies, either expressed or implied, of the FreeBSD Project.
.\"
.Dd February 22, 2015
-.Dt SFXGE 4
+.Dt SFXGE 4 amd64
.Os
.Sh NAME
.Nm sfxge
diff --git a/share/man/man4/smartpqi.4 b/share/man/man4/smartpqi.4
index 5b7ea923e13e..f5fab85d13bd 100644
--- a/share/man/man4/smartpqi.4
+++ b/share/man/man4/smartpqi.4
@@ -25,7 +25,7 @@
.\" SUCH DAMAGE.
.\"
.Dd August 24, 2023
-.Dt SMARTPQI 4
+.Dt SMARTPQI 4 amd64
.Os
.Sh NAME
.Nm smartpqi
diff --git a/share/man/man4/sume.4 b/share/man/man4/sume.4
index 219328a4f4c4..b36f924875e6 100644
--- a/share/man/man4/sume.4
+++ b/share/man/man4/sume.4
@@ -25,7 +25,7 @@
.\" POSSIBILITY OF SUCH DAMAGE.
.\"
.Dd August 30, 2020
-.Dt SUME 4
+.Dt SUME 4 amd64
.Os
.Sh NAME
.Nm sume
diff --git a/share/man/man4/tap.4 b/share/man/man4/tap.4
index 95a681a923d2..a4fe98cdfecf 100644
--- a/share/man/man4/tap.4
+++ b/share/man/man4/tap.4
@@ -203,6 +203,21 @@ The argument should be a pointer to a
The interface name will be returned in the
.Va ifr_name
field.
+.It Dv TAPSTRANSIENT
+The argument should be a pointer to an
+.Va int ;
+this sets the transient flag on
+the
+.Nm
+device.
+A transient
+.Nm
+will be destroyed upon last close.
+.It Dv TAPGTRANSIENT
+The argument should be a pointer to an
+.Va int ;
+this stores the current state (enabled or disabled) of the transient flag into
+it.
.It Dv FIONBIO
Turn non-blocking I/O for reads off or on, according as the argument
.Va int Ns 's
diff --git a/share/man/man4/tun.4 b/share/man/man4/tun.4
index 58f67cb20acb..1c5bd35f0ab8 100644
--- a/share/man/man4/tun.4
+++ b/share/man/man4/tun.4
@@ -282,6 +282,21 @@ The argument should be a pointer to an
the ioctl sets the value to one if the device is in
.Dq multi-af
mode, and zero otherwise.
+.It Dv TUNSTRANSIENT
+The argument should be a pointer to an
+.Va int ;
+this sets the transient flag on
+the
+.Nm
+device.
+A transient
+.Nm
+will be destroyed upon last close.
+.It Dv TUNGTRANSIENT
+The argument should be a pointer to an
+.Va int ;
+this stores the current state (enabled or disabled) of the transient flag into
+it.
.It Dv FIONBIO
Turn non-blocking I/O for reads off or on, according as the argument
.Vt int Ns 's
diff --git a/share/man/man4/u2f.4 b/share/man/man4/u2f.4
new file mode 100644
index 000000000000..4c51e140242e
--- /dev/null
+++ b/share/man/man4/u2f.4
@@ -0,0 +1,96 @@
+.\"
+.\" SPDX-License-Identifier: ISC
+.\"
+.\" $OpenBSD: fido.4,v 1.4 2020/08/21 19:02:46 mglocker Exp $
+.\"
+.\" Copyright (c) 2019 Reyk Floeter <reyk@openbsd.org>
+.\" Copyright (c) 2023 Vladimir Kondratyev <wulf@FreeBSD.org>
+.\"
+.\" Permission to use, copy, modify, and distribute this software for any
+.\" purpose with or without fee is hereby granted, provided that the above
+.\" copyright notice and this permission notice appear in all copies.
+.\"
+.\" THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+.\" WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+.\" MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+.\" ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+.\" WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+.\" ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+.\" OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+.\"
+.Dd August 21, 2023
+.Dt U2F 4
+.Os
+.Sh NAME
+.Nm u2f
+.Nd FIDO/U2F USB security keys
+.Sh SYNOPSIS
+.Cd "device u2f"
+.Pp
+In
+.Xr loader.conf 5 :
+.Cd u2f_load="YES"
+.Pp
+In
+.Xr sysctl.conf 5 :
+.Cd hw.hid.u2f.debug
+.Sh DESCRIPTION
+The
+.Nm
+driver provides support for FIDO/U2F-compatible USB security keys.
+They are Human Interface Devices (HID) which can be accessed via the
+.Pa /dev/u2f/N
+interface.
+.Pp
+The driver is compatible with the
+.Xr read 2 ,
+.Xr write 2 ,
+and
+.Xr ioctl 2
+operations of the generic
+.Xr uhid 4
+device but only accepts the optional HID
+.Xr ioctl 2
+calls from u2f group users.
+.Sh HARDWARE
+The
+.Nm
+driver supports FIDO/U2F-compatible USB security keys.
+.Sh SYSCTL VARIABLES
+The following variables are available as both
+.Xr sysctl 8
+variables and
+.Xr loader 8
+tunables:
+.Bl -tag -width indent
+.It Va hw.hid.u2f.debug
+Debug output level, where 0 is debugging disabled and larger values increase
+debug message verbosity.
+Default is 0.
+.El
+.Sh FILES
+.Bl -tag -width /dev/u2f/* -compact
+.It Pa /dev/u2f/*
+.El
+.Sh SEE ALSO
+.Xr uhid 4 ,
+.Xr usbhid 4 ,
+.Xr usb 4
+.Sh HISTORY
+The
+.Nm
+driver first appeared in
+.Fx 15.0 .
+.Sh AUTHORS
+.An -nosplit
+The
+.Nm
+driver was written by
+.An Vladimir Kondratyev Aq Mt wulf@FreeBSD.org .
+.Pp
+This manual page was written by
+.An Vladimir Kondratyev Aq Mt wulf@FreeBSD.org
+based on the
+.Ox
+.Xr fido 4
+manual page.
diff --git a/share/man/man4/umb.4 b/share/man/man4/umb.4
index 7ecc9a39c1ca..311a50faf8e7 100644
--- a/share/man/man4/umb.4
+++ b/share/man/man4/umb.4
@@ -17,34 +17,34 @@
.\"
.\" $NetBSD: umb.4,v 1.4 2019/08/30 09:22:17 wiz Exp $
.\"
-.Dd May 11, 2025
+.Dd August 4, 2025
.Dt UMB 4
.Os
.Sh NAME
.Nm umb
.Nd USB Mobile Broadband Interface Model (MBIM) cellular modem driver
.Sh SYNOPSIS
-To compile this driver into the kernel,
-place the following lines in your
-kernel configuration file:
-.Bd -ragged -offset indent
+.Cd "device netmap"
.Cd "device usb"
.Cd "device umb"
-.Ed
.Pp
-Alternatively, to load the driver as a
-module at boot time, place the following line in
+In
.Xr loader.conf 5 :
-.Bd -literal -offset indent
-umb_load="YES"
-.Ed
-.Pp
-If neither of the above is done, the driver will automatically be loaded
-by devd(8) when the device is connected.
+.Cd umb_load="YES"
.Sh DESCRIPTION
The
.Nm
driver provides support for USB MBIM devices.
+If the appropriate hardware is detected,
+the driver will be loaded automatically by
+.Xr devmatch 8 .
+To load the driver manually,
+.Cm load
+it in
+.Xr loader.conf 5
+or at the
+.Xr loader 8
+prompt.
.Pp
MBIM devices establish connections via cellular networks such as
GPRS, UMTS, and LTE.
diff --git a/share/man/man4/vtnet.4 b/share/man/man4/vtnet.4
index 8b99cd9f17b9..b6f10ddd87cb 100644
--- a/share/man/man4/vtnet.4
+++ b/share/man/man4/vtnet.4
@@ -22,7 +22,7 @@
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.Dd July 29, 2025
+.Dd August 21, 2025
.Dt VTNET 4
.Os
.Sh NAME
@@ -68,20 +68,29 @@ prompt before booting the kernel or stored in
.Bl -tag -width "xxxxxx"
.It Va hw.vtnet.csum_disable
.It Va hw.vtnet. Ns Ar X Ns Va .csum_disable
-This tunable disables receive and send checksum offload.
+This tunable disables receive and transmit checksum offloading for TCP and
+UDP.
+This also implies that TCP segmentation offloading and large receive offload
+are disabled.
The default value is 0.
.It Va hw.vtnet.fixup_needs_csum
.It Va hw.vtnet. Ns Ar X Ns Va .fixup_needs_csum
-This tunable enforces the calculation of a valid checksum for NEEDS_CSUM
-packets.
+This tunable enforces the calculation of a valid TCP or UDP checksum for
+packets received with
+.Dv VIRTIO_NET_HDR_F_NEEDS_CSUM
+being set in the
+.Va flags
+field of the structure
+.Vt struct virtio_net_hdr .
+It also marks the checksum as being correct in the mbuf packet header.
The default value is 0.
.It Va hw.vtnet.tso_disable
.It Va hw.vtnet. Ns Ar X Ns Va .tso_disable
-This tunable disables TSO.
+This tunable disables TCP segmentation offloading.
The default value is 0.
.It Va hw.vtnet.lro_disable
.It Va hw.vtnet. Ns Ar X Ns Va .lro_disable
-This tunable disables LRO.
+This tunable disables large receive offload.
The default value is 0.
.It Va hw.vtnet.mq_disable
.It Va hw.vtnet. Ns Ar X Ns Va .mq_disable
@@ -117,6 +126,127 @@ This tunable disables ALTQ support, allowing the use of multiqueue instead.
This option applies to all interfaces.
The default value is 0.
.El
+.Sh TRANSMIT QUEUE STATISTICS
+.Bl -tag -width "xxxxxx"
+For each transmit queue of each interface the following read-only statistics
+are provided:
+.Bl -tag -width "xxxxxx"
+.It Va dev.vtnet. Ns Ar X Ns Va .txq Ns Ar Y Ns Va .rescheduled
+The number of times the transmit interrupt handler was rescheduled.
+.It Va dev.vtnet. Ns Ar X Ns Va .txq Ns Ar Y Ns Va .tso
+The number of times TCP segment offloading was performed.
+.It Va dev.vtnet. Ns Ar X Ns Va .txq Ns Ar Y Ns Va .csum
+The number of times transmit checksum offloading for UDP or TCP was
+performed.
+.It Va dev.vtnet. Ns Ar X Ns Va .txq Ns Ar Y Ns Va .omcasts
+The number of multicast packets that were transmitted.
+.It Va dev.vtnet. Ns Ar X Ns Va .txq Ns Ar Y Ns Va .obytes
+The number of bytes that were transmitted (based on Ethernet frames).
+.It Va dev.vtnet. Ns Ar X Ns Va .txq Ns Ar Y Ns Va .opackets
+The number of packets that were transmitted (Ethernet frames).
+.El
+.Sh RECEIVE QUEUE STATISTICS
+For each receive queue of each interface the following read-only statistics
+are provided:
+.Bl -tag -width "xxxxxx"
+.It Va dev.vtnet. Ns Ar X Ns Va .rxq Ns Ar Y Ns Va .rescheduled
+The number of times the receive interrupt handler was rescheduled.
+.It Va dev.vtnet. Ns Ar X Ns Va .rxq Ns Ar Y Ns Va .host_lro
+The number of times TCP large receive offload was performed.
+.It Va dev.vtnet. Ns Ar X Ns Va .rxq Ns Ar Y Ns Va .csum_failed
+Currently not used.
+.It Va dev.vtnet. Ns Ar X Ns Va .rxq Ns Ar Y Ns Va .csum
+The number of times receive checksum offloading for UDP or TCP was performed.
+.It Va dev.vtnet. Ns Ar X Ns Va .rxq Ns Ar Y Ns Va .ierrors
+The number of times an error occurred during input processing.
+.It Va dev.vtnet. Ns Ar X Ns Va .rxq Ns Ar Y Ns Va .iqdrops
+The number of times a packet was dropped during input processing.
+.It Va dev.vtnet. Ns Ar X Ns Va .rxq Ns Ar Y Ns Va .ibytes
+The number of bytes that were received (based on Ethernet frames).
+.It Va dev.vtnet. Ns Ar X Ns Va .rxq Ns Ar Y Ns Va .ipackets
+The number of packets that were received (Ethernet frames).
+.El
+.Sh INTERFACE TRANSMIT STATISTICS
+For each interface the following read-only transmit statistics are provided:
+.Bl -tag -width "xxxxxx"
+.It Va dev.vtnet. Ns Ar X Ns Va .tx_task_rescheduled
+The sum of
+.Va dev.vtnet. Ns Ar X Ns Va .txq Ns Ar Y Ns Va .rescheduled
+over all transmit queues of the interface.
+.It Va dev.vtnet. Ns Ar X Ns Va .tx_tso_offloaded
+The sum of
+.Va dev.vtnet. Ns Ar X Ns Va .txq Ns Ar Y Ns Va .tso
+over all transmit queues of the interface.
+.It Va dev.vtnet. Ns Ar X Ns Va .tx_csum_offloaded
+The sum of
+.Va dev.vtnet. Ns Ar X Ns Va .txq Ns Ar Y Ns Va .csum
+over all transmit queues of the interface.
+.It Va dev.vtnet. Ns Ar X Ns Va .tx_defrag_failed
+The number of times an attempt to defragment an mbuf chain failed during a
+transmit operation.
+.It Va dev.vtnet. Ns Ar X Ns Va .tx_defragged
+The number of times an mbuf chain was defragmented during a transmit operation.
+.It Va dev.vtnet. Ns Ar X Ns Va .tx_tso_without_csum
+The number of times TCP segment offloading was attempted without transmit
+checksum offloading.
+.It Va dev.vtnet. Ns Ar X Ns Va .tx_tso_not_tcp
+The number of times TCP segment offloading was attempted for a non-TCP packet.
+.It Va dev.vtnet. Ns Ar X Ns Va .tx_csum_proto_mismatch
+The number of times the IP protocol version of the transmit checksum
+offloading request did not match the IP protocol version of the packet.
+.It Va dev.vtnet. Ns Ar X Ns Va .tx_csum_unknown_ethtype
+The number of times a transmit offload operation was requested for an
+ethernet frame for which the EtherType was neither IPv4 nor IPv6
+(considering simple VLAN tagging).
+.El
+.Sh INTERFACE RECEIVE STATISTICS
+For each interface the following read-only receive statistics are provided:
+.Bl -tag -width "xxxxxx"
+.It Va dev.vtnet. Ns Ar X Ns Va .rx_task_rescheduled
+The sum of
+.Va dev.vtnet. Ns Ar X Ns Va .rxq Ns Ar Y Ns Va .rescheduled
+over all receive queues of the interface.
+.It Va dev.vtnet. Ns Ar X Ns Va .rx_csum_offloaded
+The sum of
+.Va dev.vtnet. Ns Ar X Ns Va .rxq Ns Ar Y Ns Va .csum
+over all receive queues of the interface.
+.It Va dev.vtnet. Ns Ar X Ns Va .rx_csum_failed
+The sum of
+.Va dev.vtnet. Ns Ar X Ns Va .rxq Ns Ar Y Ns Va .csum_failed
+over all receive queues of the interface.
+.It Va dev.vtnet. Ns Ar X Ns Va .rx_csum_bad_proto
+Currently unused.
+.It Va dev.vtnet. Ns Ar X Ns Va .rx_csum_bad_offset
+Currently unused.
+.It Va dev.vtnet. Ns Ar X Ns Va .rx_csum_bad_ipproto
+Currently unused.
+.It Va dev.vtnet. Ns Ar X Ns Va .rx_csum_bad_ethtype
+The number of times fixing the checksum required by
+.Va hw.vtnet.fixup_needs_csum
+or
+.Va hw.vtnet. Ns Ar X Ns Va .fixup_needs_csum
+was attempted for a packet with an EtherType other than IPv4 or IPv6.
+.It Va dev.vtnet. Ns Ar X Ns Va .rx_mergeable_failed
+The number of times receiving a mergable buffer failed.
+.It Va dev.vtnet. Ns Ar X Ns Va .rx_enq_replacement_failed
+The number of times the enqueuing the replacement receive mbuf chain failed.
+.It Va dev.vtnet. Ns Ar X Ns Va .rx_frame_too_large
+The number of times the frame was loger than the mbuf chain during large
+receive offload without mergeable buffers.
+.It Va dev.vtnet. Ns Ar X Ns Va .mbuf_alloc_failed
+The number of times an mbuf cluster allocation for the receive buffer failed.
+.El
+.Sh INTERFACE CONFIGURATION PARAMETER
+For each interface the following read-only configuration parameters are
+provided:
+.Bl -tag -width "xxxxxx"
+.It Va dev.vtnet. Ns Ar X Ns Va .act_vq_pairs
+The number of active virtqueue pairs.
+.It Va dev.vtnet. Ns Ar X Ns Va .req_vq_pairs
+The number of requested virtqueue pairs.
+.It Va dev.vtnet. Ns Ar X Ns Va .max_vq_pairs
+The maximum number of supported virtqueue pairs.
+.El
.Sh SEE ALSO
.Xr arp 4 ,
.Xr netintro 4 ,
diff --git a/share/man/man5/rc.conf.5 b/share/man/man5/rc.conf.5
index 5084e43f97c4..de2181d638d1 100644
--- a/share/man/man5/rc.conf.5
+++ b/share/man/man5/rc.conf.5
@@ -22,7 +22,7 @@
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.Dd August 10, 2025
+.Dd July 15, 2025
.Dt RC.CONF 5
.Os
.Sh NAME
@@ -2932,6 +2932,13 @@ is set to
these are the flags to pass to the
.Xr rpc.yppasswdd 8
daemon.
+.It Va rpc_ypupdated_enable
+.Pq Vt bool
+If set to
+.Dq Li YES ,
+run the
+.Nm rpc.ypupdated
+daemon at system boot time.
.It Va bsnmpd_enable
.Pq Vt bool
If set to
diff --git a/share/man/man5/src.conf.5 b/share/man/man5/src.conf.5
index 2e694bfe3293..2895c0cf4746 100644
--- a/share/man/man5/src.conf.5
+++ b/share/man/man5/src.conf.5
@@ -1,5 +1,5 @@
.\" DO NOT EDIT-- this file is @generated by tools/build/options/makeman.
-.Dd August 8, 2025
+.Dd August 20, 2025
.Dt SRC.CONF 5
.Os
.Sh NAME
@@ -411,8 +411,11 @@ Build clang-format.
.It Va WITHOUT_CLANG_FULL
Avoid building the ARCMigrate, Rewriter and StaticAnalyzer components of
the Clang C/C++ compiler.
-.It Va WITHOUT_CLEAN
-Do not clean before building world and/or kernel.
+.It Va WITH_CLEAN
+Clean before building world and/or kernel.
+Note that recording a new epoch in
+.Pa .clean_build_epoch
+in the root of the source tree will also force a clean world build.
.It Va WITHOUT_CPP
Do not build
.Xr cpp 1 .
@@ -449,8 +452,6 @@ When set, it enforces these options:
.It
.Va WITHOUT_KERBEROS
.It
-.Va WITHOUT_KERBEROS_SUPPORT
-.It
.Va WITHOUT_LDNS
.It
.Va WITHOUT_LDNS_UTILS
@@ -477,9 +478,9 @@ When set, it enforces these options:
When set, these options are also in effect:
.Pp
.Bl -inset -compact
-.It Va WITHOUT_GSSAPI
+.It Va WITHOUT_KERBEROS_SUPPORT
(unless
-.Va WITH_GSSAPI
+.Va WITH_KERBEROS_SUPPORT
is set explicitly)
.El
.It Va WITH_CTF
@@ -736,8 +737,6 @@ and dependent tests.
Do not build
.Xr gpioctl 8
as part of the base system.
-.It Va WITHOUT_GSSAPI
-Do not build libgssapi.
.It Va WITHOUT_HAST
Do not build
.Xr hastd 8
@@ -837,14 +836,10 @@ Do not build
and
.Xr truss 1 .
.It Va WITHOUT_KERBEROS
-Set this to not build Kerberos 5 (KTH Heimdal).
+Set this to not build Kerberos.
When set, these options are also in effect:
.Pp
.Bl -inset -compact
-.It Va WITHOUT_GSSAPI
-(unless
-.Va WITH_GSSAPI
-is set explicitly)
.It Va WITHOUT_KERBEROS_SUPPORT
(unless
.Va WITH_KERBEROS_SUPPORT
@@ -1430,8 +1425,6 @@ When set, it enforces these options:
.It
.Va WITHOUT_KERBEROS
.It
-.Va WITHOUT_KERBEROS_SUPPORT
-.It
.Va WITHOUT_LDNS
.It
.Va WITHOUT_LDNS_UTILS
@@ -1456,9 +1449,9 @@ When set, it enforces these options:
When set, these options are also in effect:
.Pp
.Bl -inset -compact
-.It Va WITHOUT_GSSAPI
+.It Va WITHOUT_KERBEROS_SUPPORT
(unless
-.Va WITH_GSSAPI
+.Va WITH_KERBEROS_SUPPORT
is set explicitly)
.El
.It Va WITHOUT_OPENSSL_KTLS
diff --git a/share/man/man7/hier.7 b/share/man/man7/hier.7
index 1c69b911f53b..814f5b769be8 100644
--- a/share/man/man7/hier.7
+++ b/share/man/man7/hier.7
@@ -28,7 +28,7 @@
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.Dd October 10, 2024
+.Dd August 18, 2025
.Dt HIER 7
.Os
.Sh NAME
@@ -308,6 +308,21 @@ OpenSSH configuration files; see
.Xr ssh 1
.It Pa ssl/
OpenSSL configuration files
+.Pp
+.Bl -tag -width "untrusted/" -compact
+.It Pa cert.pem
+System trust store in bundle form; see
+.Xr certctl 8 .
+.It Pa certs/
+System trust store in OpenSSL hashed-directory form; see
+.Xr certctl 8 .
+.It Pa openssl.cnf
+OpenSSL configuration file; see
+.Xr openssl.cnf 5 .
+.It Pa untrusted/
+Explicitly distrusted certificates; see
+.Xr certctl 8 .
+.El
.It Pa sysctl.conf
kernel state defaults; see
.Xr sysctl.conf 5
diff --git a/share/mk/Makefile b/share/mk/Makefile
index 4ab5c8cc314b..0e786b381fe2 100644
--- a/share/mk/Makefile
+++ b/share/mk/Makefile
@@ -22,6 +22,7 @@ FILES= \
bsd.confs.mk \
bsd.cpu.mk \
bsd.crunchgen.mk \
+ bsd.debug.mk \
bsd.dep.mk \
bsd.dirs.mk \
bsd.doc.mk \
diff --git a/share/mk/bsd.README b/share/mk/bsd.README
index 4820bf12c72d..89ee8527895e 100644
--- a/share/mk/bsd.README
+++ b/share/mk/bsd.README
@@ -20,6 +20,7 @@ bsd.compiler.mk - defined based on current compiler
bsd.confs.mk - install of configuration files
bsd.cpu.mk - sets CPU/arch-related variables (included from sys.mk)
bsd.crunchgen.mk - building crunched binaries using crunchgen(1)
+bsd.debug.mk - handling debug options for bsd.{prog,lib}.mk
bsd.dep.mk - handle Makefile dependencies
bsd.dirs.mk - handle directory creation
bsd.doc.mk - building troff system documents
diff --git a/share/mk/bsd.compat.mk b/share/mk/bsd.compat.mk
index 6fa732fd730b..bad68d1ebd8e 100644
--- a/share/mk/bsd.compat.mk
+++ b/share/mk/bsd.compat.mk
@@ -74,6 +74,7 @@ LIB32WMAKEFLAGS= \
LIB32WMAKEFLAGS+= NM="${XNM}"
LIB32WMAKEFLAGS+= OBJCOPY="${XOBJCOPY}"
+LIB32WMAKEFLAGS+= STRIPBIN="${XSTRIPBIN}"
LIB32DTRACE= ${DTRACE} -32
LIB32_MACHINE_ABI= ${MACHINE_ABI:N*64} long32 ptr32
diff --git a/share/mk/bsd.debug.mk b/share/mk/bsd.debug.mk
new file mode 100644
index 000000000000..cf2fb4356aef
--- /dev/null
+++ b/share/mk/bsd.debug.mk
@@ -0,0 +1,68 @@
+#
+# This file configures debug options for compiled targets. It is meant
+# to consolidate common logic in bsd.prog.mk and bsd.lib.mk. It should
+# not be included directly by Makefiles.
+#
+
+.include <bsd.opts.mk>
+
+.if ${MK_ASSERT_DEBUG} == "no"
+CFLAGS+= -DNDEBUG
+# XXX: shouldn't we ensure that !asserts marks potentially unused variables as
+# __unused instead of disabling -Werror globally?
+MK_WERROR= no
+.endif
+
+# If reproducible build mode is enabled, map the root of the source
+# directory to /usr/src and the root of the object directory to
+# /usr/obj.
+.if ${MK_REPRODUCIBLE_BUILD} != "no" && !defined(DEBUG_PREFIX)
+.if defined(SRCTOP)
+DEBUG_PREFIX+= ${SRCTOP:S,/$,,}=/usr/src
+.endif
+.if defined(OBJROOT)
+# Strip off compat subdirectories, e.g., /usr/obj/usr/src/amd64.amd64/obj-lib32
+# becomes /usr/obj/usr/src/amd64.amd64, since object files compiled there might
+# refer to something outside the root.
+DEBUG_PREFIX+= ${OBJROOT:S,/$,,:C,/obj-[^/]*$,,}=/usr/obj
+.endif
+.endif
+
+.if defined(DEBUG_PREFIX)
+.for map in ${DEBUG_PREFIX}
+CFLAGS+= -ffile-prefix-map=${map}
+CXXFLAGS+= -ffile-prefix-map=${map}
+.endfor
+.endif
+
+.if defined(DEBUG_FLAGS)
+CFLAGS+=${DEBUG_FLAGS}
+CXXFLAGS+=${DEBUG_FLAGS}
+
+.if ${MK_CTF} != "no" && ${DEBUG_FLAGS:M-g} != ""
+CTFFLAGS+= -g
+.endif
+.else
+STRIP?= -s
+.endif
+
+.if ${MK_DEBUG_FILES} != "no" && empty(DEBUG_FLAGS:M-g) && \
+ empty(DEBUG_FLAGS:M-gdwarf*)
+.if !${COMPILER_FEATURES:Mcompressed-debug}
+CFLAGS+= ${DEBUG_FILES_CFLAGS:N-gz*}
+CXXFLAGS+= ${DEBUG_FILES_CFLAGS:N-gz*}
+.else
+CFLAGS+= ${DEBUG_FILES_CFLAGS}
+CXXFLAGS+= ${DEBUG_FILES_CFLAGS}
+.endif
+CTFFLAGS+= -g
+.endif
+
+_debuginstall:
+.if ${MK_DEBUG_FILES} != "no" && defined(DEBUGFILE)
+.if defined(DEBUGMKDIR)
+ ${INSTALL} ${TAG_ARGS:D${TAG_ARGS},dbg} -d ${DESTDIR}${DEBUGFILEDIR}/
+.endif
+ ${INSTALL} ${TAG_ARGS:D${TAG_ARGS},dbg} -o ${DEBUGOWN} -g ${DEBUGGRP} -m ${DEBUGMODE} \
+ ${DEBUGFILE} ${DESTDIR}${DEBUGFILEDIR}/${DEBUGFILE}
+.endif
diff --git a/share/mk/bsd.endian.mk b/share/mk/bsd.endian.mk
index ba662ffc7439..24da57954b5a 100644
--- a/share/mk/bsd.endian.mk
+++ b/share/mk/bsd.endian.mk
@@ -20,10 +20,17 @@ LOCALEDEF_ENDIAN= -b
#
# During bootstrapping on !FreeBSD OSes, we need to define some value. Short of
# having an exhaustive list for all variants of Linux and MacOS we simply do not
-# set TARGET_ENDIANNESS and poison the other variables. They should be unused
-# during the bootstrap phases (apart from one place that's adequately protected
-# in bsd.compiler.mk) where we're building the bootstrap tools.
+# set TARGET_ENDIANNESS (on Linux) and poison the other variables. They should
+# be unused during the bootstrap phases (apart from one place that's adequately
+# protected in bsd.compiler.mk) where we're building the bootstrap tools.
#
+.if ${.MAKE.OS} == "Darwin"
+# We do assume the endianness on macOS because Apple's modern hardware is all
+# little-endian. This might need revisited in the far future, but for the time
+# being Apple Silicon's reign of terror continues. We only set this one up
+# because libcrypto is now built in bootstrap.
+TARGET_ENDIANNESS= 1234
+.endif
CAP_MKDB_ENDIAN= -B # Poisoned value, invalid flags for both cap_mkdb
LOCALEDEF_ENDIAN= -B # and localedef.
.endif
diff --git a/share/mk/bsd.lib.mk b/share/mk/bsd.lib.mk
index 3013f32c2b36..6caad8956c14 100644
--- a/share/mk/bsd.lib.mk
+++ b/share/mk/bsd.lib.mk
@@ -1,4 +1,3 @@
-
.include <bsd.init.mk>
.include <bsd.compiler.mk>
.include <bsd.linker.mk>
@@ -45,23 +44,6 @@ SONAME?= ${SHLIB_NAME}
CFLAGS+= ${CRUNCH_CFLAGS}
.endif
-.if ${MK_ASSERT_DEBUG} == "no"
-CFLAGS+= -DNDEBUG
-# XXX: shouldn't we ensure that !asserts marks potentially unused variables as
-# __unused instead of disabling -Werror globally?
-MK_WERROR= no
-.endif
-
-.if defined(DEBUG_FLAGS)
-CFLAGS+= ${DEBUG_FLAGS}
-
-.if ${MK_CTF} != "no" && ${DEBUG_FLAGS:M-g} != ""
-CTFFLAGS+= -g
-.endif
-.else
-STRIP?= -s
-.endif
-
.for _libcompat in ${_ALL_libcompats}
.if ${SHLIBDIR:M*/lib${_libcompat}} || ${SHLIBDIR:M*/lib${_libcompat}/*}
TAGS+= lib${_libcompat}
@@ -70,11 +52,31 @@ TAGS+= lib${_libcompat}
.if defined(NO_ROOT)
.if !defined(TAGS) || ! ${TAGS:Mpackage=*}
-TAGS+= package=${PACKAGE:Uutilities}
+TAGS+= package=${PACKAGE:Uutilities}
.endif
-TAG_ARGS= -T ${TAGS:ts,:[*]}
+
+# By default, if PACKAGE=foo, then the native runtime libraries will go into
+# the FreeBSD-foo package, and subpackages will be created for -dev, -lib32,
+# and so on. If LIB_PACKAGE is set, then we also create a subpackage for
+# runtime libraries with a -lib suffix. This is used when a package has
+# libraries and some other content (e.g., executables) to allow consumers to
+# depend on the libraries.
+.if defined(LIB_PACKAGE) && ! ${TAGS:Mlib*}
+.if !defined(PACKAGE)
+.error LIB_PACKAGE cannot be used without PACKAGE
+.endif
+
+LIB_TAG_ARGS= ${TAG_ARGS},lib
+.else
+LIB_TAG_ARGS= ${TAG_ARGS}
.endif
+TAG_ARGS= -T ${TAGS:ts,:[*]}
+
+DBG_TAG_ARGS= ${TAG_ARGS},dbg
+DEV_TAG_ARGS= ${TAG_ARGS},dev
+.endif # !defined(NO_ROOT)
+
# ELF hardening knobs
.if ${MK_BIND_NOW} != "no"
LDFLAGS+= -Wl,-znow
@@ -130,18 +132,6 @@ CXXFLAGS+= -fzero-call-used-regs=${ZEROREG_TYPE}
# bsd.sanitizer.mk is not installed, so don't require it (e.g. for ports).
.sinclude "bsd.sanitizer.mk"
-.if ${MK_DEBUG_FILES} != "no" && empty(DEBUG_FLAGS:M-g) && \
- empty(DEBUG_FLAGS:M-gdwarf*)
-.if !${COMPILER_FEATURES:Mcompressed-debug}
-CFLAGS+= ${DEBUG_FILES_CFLAGS:N-gz*}
-CXXFLAGS+= ${DEBUG_FILES_CFLAGS:N-gz*}
-.else
-CFLAGS+= ${DEBUG_FILES_CFLAGS}
-CXXFLAGS+= ${DEBUG_FILES_CFLAGS}
-.endif
-CTFFLAGS+= -g
-.endif
-
.if ${MACHINE_CPUARCH} == "riscv" && ${LINKER_FEATURES:Mriscv-relaxations} == ""
CFLAGS += -mno-relax
.endif
@@ -156,6 +146,7 @@ _SHLIBDIR:=${SHLIBDIR}
.if defined(SHLIB_NAME)
.if ${MK_DEBUG_FILES} != "no"
SHLIB_NAME_FULL=${SHLIB_NAME}.full
+DEBUGFILE= ${SHLIB_NAME}.debug
# Use ${DEBUGDIR} for base system debug files, else .debug subdirectory
.if ${_SHLIBDIR} == "/boot" ||\
${SHLIBDIR:C%/lib(/.*)?$%/lib%} == "/lib" ||\
@@ -272,16 +263,16 @@ ${SHLIB_NAME_FULL}: ${SOBJS}
.endif
.if ${MK_DEBUG_FILES} != "no"
-CLEANFILES+= ${SHLIB_NAME_FULL} ${SHLIB_NAME}.debug
-${SHLIB_NAME}: ${SHLIB_NAME_FULL} ${SHLIB_NAME}.debug
- ${OBJCOPY} --strip-debug --add-gnu-debuglink=${SHLIB_NAME}.debug \
+CLEANFILES+= ${SHLIB_NAME_FULL} ${DEBUGFILE}
+${SHLIB_NAME}: ${SHLIB_NAME_FULL} ${DEBUGFILE}
+ ${OBJCOPY} --strip-debug --add-gnu-debuglink=${DEBUGFILE} \
${SHLIB_NAME_FULL} ${.TARGET}
.if defined(SHLIB_LINK) && !commands(${SHLIB_LINK:R}.ld)
# Note: This uses ln instead of ${INSTALL_LIBSYMLINK} since we are in OBJDIR
@${LN:Uln} -fs ${SHLIB_NAME} ${SHLIB_LINK}
.endif
-${SHLIB_NAME}.debug: ${SHLIB_NAME_FULL}
+${DEBUGFILE}: ${SHLIB_NAME_FULL}
${OBJCOPY} --only-keep-debug ${SHLIB_NAME_FULL} ${.TARGET}
.endif
.endif #defined(SHLIB_NAME)
@@ -384,7 +375,7 @@ _SHLINSTALLFLAGS:= ${_SHLINSTALLFLAGS${ie}}
installpcfiles: installpcfiles-${pcfile}
installpcfiles-${pcfile}: ${pcfile}
- ${INSTALL} ${TAG_ARGS:D${TAG_ARGS},dev} -o ${LIBOWN} -g ${LIBGRP} -m ${LIBMODE} \
+ ${INSTALL} ${DEV_TAG_ARGS} -o ${LIBOWN} -g ${LIBGRP} -m ${LIBMODE} \
${_INSTALLFLAGS} \
${.ALLSRC} ${DESTDIR}${LIBDATADIR}/pkgconfig/
.endfor
@@ -392,49 +383,42 @@ installpcfiles-${pcfile}: ${pcfile}
installpcfiles: .PHONY
.if !defined(INTERNALLIB)
-realinstall: _libinstall installpcfiles
-.ORDER: beforeinstall _libinstall
+realinstall: _libinstall installpcfiles _debuginstall
+.ORDER: beforeinstall _libinstall _debuginstall
_libinstall:
.if defined(LIB) && !empty(LIB) && ${MK_INSTALLLIB} != "no"
- ${INSTALL} ${TAG_ARGS:D${TAG_ARGS},dev} -o ${LIBOWN} -g ${LIBGRP} -m ${LIBMODE} \
+ ${INSTALL} ${DEV_TAG_ARGS} -o ${LIBOWN} -g ${LIBGRP} -m ${LIBMODE} \
${_INSTALLFLAGS} lib${LIB_PRIVATE}${LIB}${_STATICLIB_SUFFIX}.a ${DESTDIR}${_LIBDIR}/
.endif
.if defined(SHLIB_NAME)
- ${INSTALL} ${TAG_ARGS} ${STRIP} -o ${LIBOWN} -g ${LIBGRP} -m ${LIBMODE} \
+ ${INSTALL} ${LIB_TAG_ARGS} ${STRIP} -o ${LIBOWN} -g ${LIBGRP} -m ${LIBMODE} \
${_INSTALLFLAGS} ${_SHLINSTALLFLAGS} \
${SHLIB_NAME} ${DESTDIR}${_SHLIBDIR}/
-.if ${MK_DEBUG_FILES} != "no"
-.if defined(DEBUGMKDIR)
- ${INSTALL} ${TAG_ARGS:D${TAG_ARGS},dbg} -d ${DESTDIR}${DEBUGFILEDIR}/
-.endif
- ${INSTALL} ${TAG_ARGS:D${TAG_ARGS},dbg} -o ${LIBOWN} -g ${LIBGRP} -m ${DEBUGMODE} \
- ${_INSTALLFLAGS} \
- ${SHLIB_NAME}.debug ${DESTDIR}${DEBUGFILEDIR}/
-.endif
.if defined(SHLIB_LINK)
.if commands(${SHLIB_LINK:R}.ld)
- ${INSTALL} ${TAG_ARGS:D${TAG_ARGS},dev} -S -o ${LIBOWN} -g ${LIBGRP} -m ${LIBMODE} \
+ ${INSTALL} ${DEV_TAG_ARGS} -S -o ${LIBOWN} -g ${LIBGRP} -m ${LIBMODE} \
${_INSTALLFLAGS} ${SHLIB_LINK:R}.ld \
${DESTDIR}${_LIBDIR}/${SHLIB_LINK}
.for _SHLIB_LINK_LINK in ${SHLIB_LDSCRIPT_LINKS}
- ${INSTALL_LIBSYMLINK} ${_SHLINSTALLSYMLINKFLAGS} ${TAG_ARGS} ${SHLIB_LINK} \
- ${DESTDIR}${_LIBDIR}/${_SHLIB_LINK_LINK}
+ ${INSTALL_LIBSYMLINK} ${_SHLINSTALLSYMLINKFLAGS} ${LIB_TAG_ARGS} \
+ ${SHLIB_LINK} ${DESTDIR}${_LIBDIR}/${_SHLIB_LINK_LINK}
.endfor
.else
.if ${_SHLIBDIR} == ${_LIBDIR}
.if ${SHLIB_LINK:Mlib*}
- ${INSTALL_RSYMLINK} ${_SHLINSTALLSYMLINKFLAGS} ${TAG_ARGS:D${TAG_ARGS},dev} \
+ ${INSTALL_RSYMLINK} ${_SHLINSTALLSYMLINKFLAGS} ${DEV_TAG_ARGS} \
${SHLIB_NAME} ${DESTDIR}${_LIBDIR}/${SHLIB_LINK}
.else
- ${INSTALL_RSYMLINK} ${_SHLINSTALLSYMLINKFLAGS} ${TAG_ARGS} ${DESTDIR}${_SHLIBDIR}/${SHLIB_NAME} \
+ ${INSTALL_RSYMLINK} ${_SHLINSTALLSYMLINKFLAGS} ${LIB_TAG_ARGS} \
+ ${DESTDIR}${_SHLIBDIR}/${SHLIB_NAME} \
${DESTDIR}${_LIBDIR}/${SHLIB_LINK}
.endif
.else
.if ${SHLIB_LINK:Mlib*}
- ${INSTALL_RSYMLINK} ${_SHLINSTALLSYMLINKFLAGS} ${TAG_ARGS:D${TAG_ARGS},dev} \
+ ${INSTALL_RSYMLINK} ${_SHLINSTALLSYMLINKFLAGS} ${DEV_TAG_ARGS} \
${DESTDIR}${_SHLIBDIR}/${SHLIB_NAME} ${DESTDIR}${_LIBDIR}/${SHLIB_LINK}
.else
- ${INSTALL_RSYMLINK} ${_SHLINSTALLSYMLINKFLAGS} ${TAG_ARGS} \
+ ${INSTALL_RSYMLINK} ${_SHLINSTALLSYMLINKFLAGS} ${LIB_TAG_ARGS} \
${DESTDIR}${_SHLIBDIR}/${SHLIB_NAME} ${DESTDIR}${_LIBDIR}/${SHLIB_LINK}
.endif
.if exists(${DESTDIR}${_LIBDIR}/${SHLIB_NAME})
@@ -446,7 +430,7 @@ _libinstall:
.endif # SHLIB_LINK
.endif # SHIB_NAME
.if defined(INSTALL_PIC_ARCHIVE) && defined(LIB) && !empty(LIB)
- ${INSTALL} ${TAG_ARGS:D${TAG_ARGS},dev} -o ${LIBOWN} -g ${LIBGRP} -m ${LIBMODE} \
+ ${INSTALL} ${DEV_TAG_ARGS} -o ${LIBOWN} -g ${LIBGRP} -m ${LIBMODE} \
${_INSTALLFLAGS} lib${LIB}_pic.a ${DESTDIR}${_LIBDIR}/
.endif
.endif # !defined(INTERNALLIB)
@@ -466,7 +450,7 @@ LINKGRP?= ${LIBGRP}
LINKMODE?= ${LIBMODE}
SYMLINKOWN?= ${LIBOWN}
SYMLINKGRP?= ${LIBGRP}
-LINKTAGS= dev
+LINKTAGS= dev${_COMPAT_TAG}
.include <bsd.links.mk>
.if ${MK_MAN} != "no" && !defined(LIBRARIES_ONLY)
@@ -501,6 +485,7 @@ SUBDIR_TARGETS+= check
TESTS_LD_LIBRARY_PATH+= ${.OBJDIR}
.endif
+.include <bsd.debug.mk>
.include <bsd.dep.mk>
.include <bsd.clang-analyze.mk>
.include <bsd.obj.mk>
diff --git a/share/mk/bsd.opts.mk b/share/mk/bsd.opts.mk
index 85247d733a14..439924d0d596 100644
--- a/share/mk/bsd.opts.mk
+++ b/share/mk/bsd.opts.mk
@@ -78,6 +78,7 @@ __DEFAULT_NO_OPTIONS = \
CCACHE_BUILD \
CTF \
INSTALL_AS_USER \
+ REPRODUCIBLE_BUILD \
RETPOLINE \
RUN_TESTS \
STALE_STAGED \
diff --git a/share/mk/bsd.own.mk b/share/mk/bsd.own.mk
index 00a048fedc1d..4dffe9723a9e 100644
--- a/share/mk/bsd.own.mk
+++ b/share/mk/bsd.own.mk
@@ -44,6 +44,10 @@
#
# DEBUGMODE Mode for debug files. [${NOBINMODE}]
#
+# DEBUGOWN Owner for debug info files. [root]
+#
+# DEBUGGRP Group for debug info files. [wheel]
+#
#
# KMODDIR Base path for loadable kernel modules
# (see kld(4)). [/boot/modules]
@@ -197,7 +201,8 @@ LIBMODE?= ${NOBINMODE}
DEBUGDIR?= /usr/lib/debug
DEBUGMODE?= ${NOBINMODE}
-
+DEBUGOWN?= ${BINOWN}
+DEBUGGRP?= ${BINGRP}
# Share files
SHAREDIR?= /usr/share
diff --git a/share/mk/bsd.prog.mk b/share/mk/bsd.prog.mk
index 9350d4786cec..10e1c177e2b2 100644
--- a/share/mk/bsd.prog.mk
+++ b/share/mk/bsd.prog.mk
@@ -12,22 +12,6 @@
CFLAGS+=${COPTS}
.endif
-.if ${MK_ASSERT_DEBUG} == "no"
-CFLAGS+= -DNDEBUG
-# XXX: shouldn't we ensure that !asserts marks potentially unused variables as
-# __unused instead of disabling -Werror globally?
-MK_WERROR= no
-.endif
-
-.if defined(DEBUG_FLAGS)
-CFLAGS+=${DEBUG_FLAGS}
-CXXFLAGS+=${DEBUG_FLAGS}
-
-.if ${MK_CTF} != "no" && ${DEBUG_FLAGS:M-g} != ""
-CTFFLAGS+= -g
-.endif
-.endif
-
.if defined(PROG_CXX)
PROG= ${PROG_CXX}
.endif
@@ -109,20 +93,6 @@ CFLAGS += -mno-relax
.if defined(CRUNCH_CFLAGS)
CFLAGS+=${CRUNCH_CFLAGS}
-.else
-.if ${MK_DEBUG_FILES} != "no" && empty(DEBUG_FLAGS:M-g) && \
- empty(DEBUG_FLAGS:M-gdwarf-*)
-.if !${COMPILER_FEATURES:Mcompressed-debug}
-CFLAGS+= ${DEBUG_FILES_CFLAGS:N-gz*}
-.else
-CFLAGS+= ${DEBUG_FILES_CFLAGS}
-.endif
-CTFFLAGS+= -g
-.endif
-.endif
-
-.if !defined(DEBUG_FLAGS)
-STRIP?= -s
.endif
.if defined(NO_ROOT)
@@ -159,6 +129,9 @@ PROG_FULL= ${PROG}
.if defined(PROG)
PROGNAME?= ${PROG}
+.if ${MK_DEBUG_FILES} != "no"
+DEBUGFILE= ${PROGNAME}.debug
+.endif
.if defined(SRCS)
@@ -223,11 +196,12 @@ ${PROG_FULL}: ${OBJS}
.endif # !defined(SRCS)
.if ${MK_DEBUG_FILES} != "no"
-${PROG}: ${PROG_FULL} ${PROGNAME}.debug
- ${OBJCOPY} --strip-debug --add-gnu-debuglink=${PROGNAME}.debug \
+CLEANFILES+= ${PROG_FULL} ${DEBUGFILE}
+${PROG}: ${PROG_FULL} ${DEBUGFILE}
+ ${OBJCOPY} --strip-debug --add-gnu-debuglink=${DEBUGFILE} \
${PROG_FULL} ${.TARGET}
-${PROGNAME}.debug: ${PROG_FULL}
+${DEBUGFILE}: ${PROG_FULL}
${OBJCOPY} --only-keep-debug ${PROG_FULL} ${.TARGET}
.endif
@@ -266,9 +240,6 @@ all: all-man
.if defined(PROG)
CLEANFILES+= ${PROG} ${PROG}.bc ${PROG}.ll
-.if ${MK_DEBUG_FILES} != "no"
-CLEANFILES+= ${PROG_FULL} ${PROGNAME}.debug
-.endif
.endif
.if defined(OBJS)
@@ -308,19 +279,12 @@ _INSTALLFLAGS:= ${_INSTALLFLAGS${ie}}
.endfor
.if !target(realinstall) && !defined(INTERNALPROG)
-realinstall: _proginstall
-.ORDER: beforeinstall _proginstall
+realinstall: _proginstall _debuginstall
+.ORDER: beforeinstall _proginstall _debuginstall
_proginstall:
.if defined(PROG)
${INSTALL} ${TAG_ARGS} ${STRIP} -o ${BINOWN} -g ${BINGRP} -m ${BINMODE} \
${_INSTALLFLAGS} ${PROG} ${DESTDIR}${BINDIR}/${PROGNAME}
-.if ${MK_DEBUG_FILES} != "no"
-.if defined(DEBUGMKDIR)
- ${INSTALL} ${TAG_ARGS:D${TAG_ARGS},dbg} -d ${DESTDIR}${DEBUGFILEDIR}/
-.endif
- ${INSTALL} ${TAG_ARGS:D${TAG_ARGS},dbg} -o ${BINOWN} -g ${BINGRP} -m ${DEBUGMODE} \
- ${PROGNAME}.debug ${DESTDIR}${DEBUGFILEDIR}/${PROGNAME}.debug
-.endif
.endif
.endif # !target(realinstall)
@@ -391,6 +355,7 @@ TESTS_PATH+= ${.OBJDIR}
OBJS_DEPEND_GUESS+= ${SRCS:M*.h}
.endif
+.include <bsd.debug.mk>
.include <bsd.dep.mk>
.include <bsd.clang-analyze.mk>
.include <bsd.obj.mk>
diff --git a/share/mk/local.dirdeps.mk b/share/mk/local.dirdeps.mk
index a92539689a31..bdc7242d4bfd 100644
--- a/share/mk/local.dirdeps.mk
+++ b/share/mk/local.dirdeps.mk
@@ -185,7 +185,7 @@ C_DIRDEPS= \
# libgcc is needed as well but is added later.
-.if ${MK_GSSAPI} != "no"
+.if ${MK_KERBEROS} != "no" && ${MK_MITKRB5} == "no"
C_DIRDEPS+= include/gssapi
.endif
diff --git a/share/mk/src.opts.mk b/share/mk/src.opts.mk
index f146a4b24424..85a003eb4eaf 100644
--- a/share/mk/src.opts.mk
+++ b/share/mk/src.opts.mk
@@ -80,7 +80,6 @@ __DEFAULT_YES_OPTIONS = \
CDDL \
CLANG \
CLANG_BOOTSTRAP \
- CLEAN \
CPP \
CROSS_COMPILER \
CRYPT \
@@ -201,6 +200,7 @@ __DEFAULT_NO_OPTIONS = \
BHYVE_SNAPSHOT \
CLANG_EXTRAS \
CLANG_FORMAT \
+ CLEAN \
DIALOG \
DETECT_TZ_CHANGES \
DISK_IMAGE_TOOLS_BOOTSTRAP \
@@ -508,7 +508,7 @@ MK_LOADER_VERIEXEC_PASS_MANIFEST := no
# MK_* options whose default value depends on another option.
#
.for vv in \
- GSSAPI/KERBEROS \
+ KERBEROS_SUPPORT/KERBEROS \
MAN_UTILS/MAN
.if defined(WITH_${vv:H})
MK_${vv:H}:= yes
diff --git a/share/mk/src.sys.mk b/share/mk/src.sys.mk
index d5c2af0c559d..2b9fc255a26d 100644
--- a/share/mk/src.sys.mk
+++ b/share/mk/src.sys.mk
@@ -42,7 +42,7 @@ CFLAGS+= ${CFCOMMONFLAG}
CFLAGS+= -fmacro-prefix-map=${SRCTOP}=/usr/src -fdebug-prefix-map=${SRCTOP}=/usr/src
.endif
-DEFAULTWARNS= 6
+DEFAULTWARNS?= 6
# tempting, but bsd.compiler.mk causes problems this early
# probably need to remove dependence on bsd.own.mk
diff --git a/stand/common/dev_net.c b/stand/common/dev_net.c
index 964fa514cac5..d1c48d40691a 100644
--- a/stand/common/dev_net.c
+++ b/stand/common/dev_net.c
@@ -66,6 +66,10 @@
#include "dev_net.h"
#include "bootstrap.h"
+#ifndef NETPROTO_DEFAULT
+# define NETPROTO_DEFAULT NET_NFS
+#endif
+
static char *netdev_name;
static int netdev_sock = -1;
static int netdev_opens;
@@ -182,6 +186,7 @@ net_open(struct open_file *f, ...)
setenv("boot.netif.mtu", mtu, 1);
}
+ DEBUG_PRINTF(1,("%s: netproto=%d\n", __func__, netproto));
}
netdev_opens++;
dev->d_opendata = &netdev_sock;
@@ -193,7 +198,7 @@ net_close(struct open_file *f)
{
struct devdesc *dev;
- DEBUG_PRINTF(1,("%s: opens=%d\n", __func__, netdev_opens));
+ DEBUG_PRINTF(2,("%s: opens=%d\n", __func__, netdev_opens));
dev = f->f_devdata;
dev->d_opendata = NULL;
@@ -303,7 +308,7 @@ net_getparams(int sock)
return (EIO);
}
exit:
- if ((rootaddr = net_parse_rootpath()) != INADDR_NONE)
+ if ((rootaddr = net_parse_rootpath()) != htonl(INADDR_NONE))
rootip.s_addr = rootaddr;
DEBUG_PRINTF(1,("%s: proto: %d\n", __func__, netproto));
@@ -344,11 +349,17 @@ net_print(int verbose)
return (ret);
}
+bool
+is_tftp(void)
+{
+ return (netproto == NET_TFTP);
+}
+
/*
* Parses the rootpath if present
*
* The rootpath format can be in the form
- * <scheme>://ip/path
+ * <scheme>://ip[:port]/path
* <scheme>:/path
*
* For compatibility with previous behaviour it also accepts as an NFS scheme
@@ -363,10 +374,10 @@ net_print(int verbose)
uint32_t
net_parse_rootpath(void)
{
- n_long addr = htonl(INADDR_NONE);
+ n_long addr = 0;
size_t i;
char ip[FNAME_SIZE];
- char *ptr, *val;
+ char *ptr, *portp, *val;
netproto = NET_NONE;
@@ -381,7 +392,7 @@ net_parse_rootpath(void)
ptr = rootpath;
/* Fallback for compatibility mode */
if (netproto == NET_NONE) {
- netproto = NET_NFS;
+ netproto = NETPROTO_DEFAULT;
(void)strsep(&ptr, ":");
if (ptr != NULL) {
addr = inet_addr(rootpath);
@@ -394,16 +405,21 @@ net_parse_rootpath(void)
if (*ptr == '/') {
/* we are in the form <scheme>://, we do expect an ip */
ptr++;
- /*
- * XXX when http will be there we will need to check for
- * a port, but right now we do not need it yet
- */
+ portp = val = strchr(ptr, ':');
+ if (val != NULL) {
+ val++;
+ rootport = strtol(val, NULL, 10);
+ }
val = strchr(ptr, '/');
if (val != NULL) {
+ if (portp == NULL)
+ portp = val;
snprintf(ip, sizeof(ip), "%.*s",
- (int)((uintptr_t)val - (uintptr_t)ptr),
+ (int)(portp - ptr),
ptr);
addr = inet_addr(ip);
+ DEBUG_PRINTF(1,("ip=%s addr=%#x\n",
+ ip, addr));
bcopy(val, rootpath, strlen(val) + 1);
}
} else {
@@ -411,6 +427,7 @@ net_parse_rootpath(void)
bcopy(ptr, rootpath, strlen(ptr) + 1);
}
}
-
+ if (addr == 0)
+ addr = htonl(INADDR_NONE);
return (addr);
}
diff --git a/stand/common/gfx_fb.c b/stand/common/gfx_fb.c
index af72ab1a4c99..659bf8540422 100644
--- a/stand/common/gfx_fb.c
+++ b/stand/common/gfx_fb.c
@@ -232,6 +232,69 @@ gfx_parse_mode_str(char *str, int *x, int *y, int *depth)
return (true);
}
+/*
+ * Returns true if we set the color from pre-existing environment, false if
+ * just used existing defaults.
+ */
+static bool
+gfx_fb_evalcolor(const char *envname, teken_color_t *cattr,
+ ev_sethook_t sethook, ev_unsethook_t unsethook)
+{
+ const char *ptr;
+ char env[10];
+ int eflags = EV_VOLATILE | EV_NOKENV;
+ bool from_env = false;
+
+ ptr = getenv(envname);
+ if (ptr != NULL) {
+ *cattr = strtol(ptr, NULL, 10);
+
+ /*
+ * If we can't unset the value, then it's probably hooked
+ * properly and we can just carry on. Otherwise, we want to
+ * reinitialize it so that we can hook it for the console that
+ * we're resetting defaults for.
+ */
+ if (unsetenv(envname) != 0)
+ return (true);
+ from_env = true;
+
+ /*
+ * If we're carrying over an existing value, we *do* want that
+ * to propagate to the kenv.
+ */
+ eflags &= ~EV_NOKENV;
+ }
+
+ snprintf(env, sizeof(env), "%d", *cattr);
+ env_setenv(envname, eflags, env, sethook, unsethook);
+
+ return (from_env);
+}
+
+void
+gfx_fb_setcolors(teken_attr_t *attr, ev_sethook_t sethook,
+ ev_unsethook_t unsethook)
+{
+ const char *ptr;
+ bool need_setattr = false;
+
+ /*
+ * On first run, we setup an environment hook to process any color
+ * changes. If the env is already set, we pick up fg and bg color
+ * values from the environment.
+ */
+ if (gfx_fb_evalcolor("teken.fg_color", &attr->ta_fgcolor,
+ sethook, unsethook))
+ need_setattr = true;
+ if (gfx_fb_evalcolor("teken.bg_color", &attr->ta_bgcolor,
+ sethook, unsethook))
+ need_setattr = true;
+
+ if (need_setattr)
+ teken_set_defattr(&gfx_state.tg_teken, attr);
+}
+
static uint32_t
rgb_color_map(uint8_t index, uint32_t rmax, int roffset,
uint32_t gmax, int goffset, uint32_t bmax, int boffset)
diff --git a/stand/common/gfx_fb.h b/stand/common/gfx_fb.h
index 17e419d8ffd3..d12bcd76b7fa 100644
--- a/stand/common/gfx_fb.h
+++ b/stand/common/gfx_fb.h
@@ -277,6 +277,7 @@ void gfx_fb_bezier(uint32_t, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t,
int gfx_fb_putimage(png_t *, uint32_t, uint32_t, uint32_t, uint32_t, uint32_t);
bool gfx_parse_mode_str(char *, int *, int *, int *);
+void gfx_fb_setcolors(teken_attr_t *, ev_sethook_t, ev_unsethook_t);
void term_image_display(teken_gfx_t *, const teken_rect_t *);
void reset_font_flags(void);
diff --git a/stand/common/misc.c b/stand/common/misc.c
index 402213100951..a7c46ad2e74c 100644
--- a/stand/common/misc.c
+++ b/stand/common/misc.c
@@ -220,3 +220,16 @@ set_currdev(const char *devname)
env_setenv("loaddev", EV_VOLATILE | EV_NOHOOK, devname, env_noset,
env_nounset);
}
+
+#ifndef LOADER_NET_SUPPORT
+/*
+ * This api is normally provided by dev_net.c
+ * This stub keeps libsa happy when LOADER_NET_SUPPORT
+ * is not enabled.
+ */
+bool
+is_tftp(void)
+{
+ return false;
+}
+#endif
diff --git a/stand/common/modinfo.c b/stand/common/modinfo.c
index 313469d32f35..1e39bd858cc2 100644
--- a/stand/common/modinfo.c
+++ b/stand/common/modinfo.c
@@ -172,6 +172,8 @@ md_copyenv(vm_offset_t start)
/* Traverse the environment. */
for (ep = environ; ep != NULL; ep = ep->ev_next) {
+ if ((ep->ev_flags & EV_NOKENV) != 0)
+ continue;
len = strlen(ep->ev_name);
if ((size_t)archsw.arch_copyin(ep->ev_name, addr, len) != len)
break;
diff --git a/stand/defs.mk b/stand/defs.mk
index 8ef84267b198..eb4133b604eb 100644
--- a/stand/defs.mk
+++ b/stand/defs.mk
@@ -207,6 +207,8 @@ LOADER_INTERP?=${LOADER_DEFAULT_INTERP}
# Make sure we use the machine link we're about to create
CFLAGS+=-I.
+.include "${BOOTSRC}/veriexec.mk"
+
all: ${PROG}
CLEANFILES+= teken_state.h
diff --git a/stand/efi/include/amd64/pe.h b/stand/efi/include/amd64/pe.h
index b858ba916c7d..faf8eafde1e9 100644
--- a/stand/efi/include/amd64/pe.h
+++ b/stand/efi/include/amd64/pe.h
@@ -83,7 +83,7 @@ typedef struct _IMAGE_FILE_HEADER {
#define IMAGE_SIZEOF_FILE_HEADER 20
#define IMAGE_FILE_RELOCS_STRIPPED 0x0001 // Relocation info stripped from file.
-#define IMAGE_FILE_EXECUTABLE_IMAGE 0x0002 // File is executable (i.e. no unresolved externel references).
+#define IMAGE_FILE_EXECUTABLE_IMAGE 0x0002 // File is executable (i.e. no unresolved external references).
#define IMAGE_FILE_LINE_NUMS_STRIPPED 0x0004 // Line nunbers stripped from file.
#define IMAGE_FILE_LOCAL_SYMS_STRIPPED 0x0008 // Local symbols stripped from file.
#define IMAGE_FILE_BYTES_REVERSED_LO 0x0080 // Bytes of machine word are reversed.
diff --git a/stand/efi/include/i386/pe.h b/stand/efi/include/i386/pe.h
index c756080fe2d7..226c6c7564a7 100644
--- a/stand/efi/include/i386/pe.h
+++ b/stand/efi/include/i386/pe.h
@@ -83,7 +83,7 @@ typedef struct _IMAGE_FILE_HEADER {
#define IMAGE_SIZEOF_FILE_HEADER 20
#define IMAGE_FILE_RELOCS_STRIPPED 0x0001 // Relocation info stripped from file.
-#define IMAGE_FILE_EXECUTABLE_IMAGE 0x0002 // File is executable (i.e. no unresolved externel references).
+#define IMAGE_FILE_EXECUTABLE_IMAGE 0x0002 // File is executable (i.e. no unresolved external references).
#define IMAGE_FILE_LINE_NUMS_STRIPPED 0x0004 // Line nunbers stripped from file.
#define IMAGE_FILE_LOCAL_SYMS_STRIPPED 0x0008 // Local symbols stripped from file.
#define IMAGE_FILE_BYTES_REVERSED_LO 0x0080 // Bytes of machine word are reversed.
diff --git a/stand/efi/libefi/efi_console.c b/stand/efi/libefi/efi_console.c
index cbb4dd01d1fb..46a3c957f151 100644
--- a/stand/efi/libefi/efi_console.c
+++ b/stand/efi/libefi/efi_console.c
@@ -1041,28 +1041,7 @@ cons_update_mode(bool use_gfx_mode)
a = teken_get_defattr(&gfx_state.tg_teken);
attr = *a;
- /*
- * On first run, we set up the efi_set_colors()
- * callback. If the env is already set, we
- * pick up fg and bg color values from the environment.
- */
- ptr = getenv("teken.fg_color");
- if (ptr != NULL) {
- attr.ta_fgcolor = strtol(ptr, NULL, 10);
- ptr = getenv("teken.bg_color");
- attr.ta_bgcolor = strtol(ptr, NULL, 10);
-
- teken_set_defattr(&gfx_state.tg_teken, &attr);
- } else {
- snprintf(env, sizeof(env), "%d",
- attr.ta_fgcolor);
- env_setenv("teken.fg_color", EV_VOLATILE, env,
- efi_set_colors, env_nounset);
- snprintf(env, sizeof(env), "%d",
- attr.ta_bgcolor);
- env_setenv("teken.bg_color", EV_VOLATILE, env,
- efi_set_colors, env_nounset);
- }
+ gfx_fb_setcolors(&attr, efi_set_colors, env_nounset);
}
}
diff --git a/stand/i386/libi386/vidconsole.c b/stand/i386/libi386/vidconsole.c
index 414803e9af3d..3938bd7822ea 100644
--- a/stand/i386/libi386/vidconsole.c
+++ b/stand/i386/libi386/vidconsole.c
@@ -956,26 +956,7 @@ cons_update_mode(bool use_gfx_mode)
a = teken_get_defattr(&gfx_state.tg_teken);
attr = *a;
- /*
- * On first run, we set up the vidc_set_colors()
- * callback. If the env is already set, we
- * pick up fg and bg color values from the environment.
- */
- ptr = getenv("teken.fg_color");
- if (ptr != NULL) {
- attr.ta_fgcolor = strtol(ptr, NULL, 10);
- ptr = getenv("teken.bg_color");
- attr.ta_bgcolor = strtol(ptr, NULL, 10);
-
- teken_set_defattr(&gfx_state.tg_teken, &attr);
- } else {
- snprintf(env, sizeof(env), "%d", attr.ta_fgcolor);
- env_setenv("teken.fg_color", EV_VOLATILE, env,
- vidc_set_colors, env_nounset);
- snprintf(env, sizeof(env), "%d", attr.ta_bgcolor);
- env_setenv("teken.bg_color", EV_VOLATILE, env,
- vidc_set_colors, env_nounset);
- }
+ gfx_fb_setcolors(&attr, vidc_set_colors, env_nounset);
/* Improve visibility */
if (attr.ta_bgcolor == TC_WHITE)
diff --git a/stand/kboot/kboot/main.c b/stand/kboot/kboot/main.c
index a8c725a514be..4a136b42a4a1 100644
--- a/stand/kboot/kboot/main.c
+++ b/stand/kboot/kboot/main.c
@@ -229,6 +229,7 @@ static struct mapping
uintptr_t pa;
caddr_t va;
} map[MAX_MAP];
+static bool smbios_mmap_file;
static int smbios_fd;
static int nmap;
@@ -238,12 +239,17 @@ caddr_t ptov(uintptr_t pa)
uintptr_t pa2;
struct mapping *m = map;
- pa2 = rounddown(pa, PAGE);
+ if (smbios_mmap_file)
+ pa2 = rounddown(pa, PAGE);
+ else
+ pa2 = pa;
for (int i = 0; i < nmap; i++, m++) {
if (m->pa == pa2) {
return (m->va + pa - m->pa);
}
}
+ if (!smbios_mmap_file)
+ panic("Out of bounds smbios access");
if (nmap == MAX_MAP)
panic("Too many maps for smbios");
@@ -298,6 +304,7 @@ static void
find_smbios(void)
{
char buf[40];
+ void *dmi_data;
uintptr_t pa;
caddr_t va;
@@ -306,17 +313,47 @@ find_smbios(void)
if (pa == 0)
return;
+ dmi_data = NULL;
+ smbios_fd = host_open("/sys/firmware/dmi/tables/DMI", O_RDONLY, 0);
+ if (smbios_fd >= 0) {
+ struct host_kstat sb;
+ struct mapping *m;
+
+ if (host_fstat(smbios_fd, &sb) < 0) {
+ host_close(smbios_fd);
+ goto try_dev_mem;
+ }
+
+ dmi_data = malloc(sb.st_size);
+ if (dmi_data == NULL) {
+ host_close(smbios_fd);
+ goto try_dev_mem;
+ }
+
+ host_read(smbios_fd, dmi_data, sb.st_size);
+
+ m = &map[nmap++];
+ m->pa = pa;
+ m->va = dmi_data;
+ smbios_mmap_file = false;
+ } else {
+try_dev_mem:
+ smbios_fd = host_open("/dev/mem", O_RDONLY, 0);
+ if (smbios_fd < 0) {
+ printf("Can't open /sys/firmware/dmi/tables/DMI or "
+ "/dev/mem to read smbios\n");
+ return;
+ }
+ smbios_mmap_file = true;
+ }
snprintf(buf, sizeof(buf), "%#jx", (uintmax_t)pa);
setenv("hint.smbios.0.mem", buf, 1);
- smbios_fd = host_open("/dev/mem", O_RDONLY, 0);
- if (smbios_fd < 0) {
- printf("Can't open /dev/mem to read smbios\n");
- return;
- }
+
va = ptov(pa);
printf("Start of smbios at pa %p va %p\n", (void *)pa, va);
smbios_detect(va);
smbios_cleanup();
+ free(dmi_data);
host_close(smbios_fd);
}
diff --git a/stand/libsa/bzipfs.c b/stand/libsa/bzipfs.c
index f4002796f0ae..ff7ec16e7dc6 100644
--- a/stand/libsa/bzipfs.c
+++ b/stand/libsa/bzipfs.c
@@ -68,6 +68,7 @@ static int bzf_stat(struct open_file *f, struct stat *sb);
#ifndef REGRESSION
struct fs_ops bzipfs_fsops = {
.fs_name = "bzip",
+ .fs_flags = 0,
.fo_open = bzf_open,
.fo_close = bzf_close,
.fo_read = bzf_read,
diff --git a/stand/libsa/cd9660.c b/stand/libsa/cd9660.c
index 973a7dddcda9..d1da39aa479a 100644
--- a/stand/libsa/cd9660.c
+++ b/stand/libsa/cd9660.c
@@ -81,6 +81,7 @@ static ISO_SUSP_HEADER *susp_lookup_record(struct open_file *f,
struct fs_ops cd9660_fsops = {
.fs_name = "cd9660",
+ .fs_flags = 0,
.fo_open = cd9660_open,
.fo_close = cd9660_close,
.fo_read = cd9660_read,
diff --git a/stand/libsa/dosfs.c b/stand/libsa/dosfs.c
index aca198cdf6fa..38610d917007 100644
--- a/stand/libsa/dosfs.c
+++ b/stand/libsa/dosfs.c
@@ -61,6 +61,7 @@ static int dos_unmount(const char *dev, void *data);
struct fs_ops dosfs_fsops = {
.fs_name = "dosfs",
+ .fs_flags = 0,
.fo_open = dos_open,
.fo_close = dos_close,
.fo_read = dos_read,
diff --git a/stand/libsa/environment.c b/stand/libsa/environment.c
index 95ee1718f8d4..d139249a8e84 100644
--- a/stand/libsa/environment.c
+++ b/stand/libsa/environment.c
@@ -66,6 +66,17 @@ env_setenv(const char *name, int flags, const void *value,
if ((ev = env_getenv(name)) != NULL) {
/*
+ * If the new value doesn't have NOKENV set, we'll drop the flag
+ * if it's set on the entry so that the override propagates
+ * correctly. We do this *before* sending it to the hook in
+ * case the hook declines to operate on it (e.g., because the
+ * value matches what was already set) -- we would still want
+ * the explicitly set value to propagate.
+ */
+ if (!(flags & EV_NOKENV))
+ ev->ev_flags &= ~EV_NOKENV;
+
+ /*
* If there's a set hook, let it do the work
* (unless we are working for one already).
*/
@@ -77,7 +88,6 @@ env_setenv(const char *name, int flags, const void *value,
free(ev->ev_value);
ev->ev_value = NULL;
ev->ev_flags &= ~EV_DYNAMIC;
-
} else {
/*
@@ -123,12 +133,13 @@ env_setenv(const char *name, int flags, const void *value,
/* If we have a new value, use it */
if (flags & EV_VOLATILE) {
ev->ev_value = strdup(value);
- ev->ev_flags |= EV_DYNAMIC;
+ flags |= EV_DYNAMIC;
} else {
ev->ev_value = (char *)value;
- ev->ev_flags |= flags & EV_DYNAMIC;
}
+ ev->ev_flags |= flags & (EV_DYNAMIC | EV_NOKENV);
+
return (0);
}
diff --git a/stand/libsa/ext2fs.c b/stand/libsa/ext2fs.c
index 47812f4543a1..f7096282f156 100644
--- a/stand/libsa/ext2fs.c
+++ b/stand/libsa/ext2fs.c
@@ -106,6 +106,7 @@ static int dtmap[] = { DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR,
struct fs_ops ext2fs_fsops = {
.fs_name = "ext2fs",
+ .fs_flags = 0,
.fo_open = ext2fs_open,
.fo_close = ext2fs_close,
.fo_read = ext2fs_read,
diff --git a/stand/libsa/globals.c b/stand/libsa/globals.c
index 2797045d4faf..6bd3a4243d73 100644
--- a/stand/libsa/globals.c
+++ b/stand/libsa/globals.c
@@ -17,6 +17,7 @@
u_char bcea[6] = BA; /* broadcast ethernet address */
char rootpath[FNAME_SIZE] = "/"; /* root mount path */
+int rootport; /* port for rootpath server */
char bootfile[FNAME_SIZE]; /* bootp says to boot this */
char hostname[FNAME_SIZE]; /* our hostname */
int hostnamelen;
diff --git a/stand/libsa/gzipfs.c b/stand/libsa/gzipfs.c
index 6c2b8cac9e34..6b22f750f3ef 100644
--- a/stand/libsa/gzipfs.c
+++ b/stand/libsa/gzipfs.c
@@ -50,6 +50,7 @@ static int zf_stat(struct open_file *f, struct stat *sb);
struct fs_ops gzipfs_fsops = {
.fs_name = "zip",
+ .fs_flags = 0,
.fo_open = zf_open,
.fo_close = zf_close,
.fo_read = zf_read,
diff --git a/stand/libsa/libsa.3 b/stand/libsa/libsa.3
index 3e3f70610516..0947f97a0a1f 100644
--- a/stand/libsa/libsa.3
+++ b/stand/libsa/libsa.3
@@ -781,6 +781,10 @@ The same as
but for
.Xr bzip2 1 Ns -compressed
files.
+.It Va pkgfs_fsops
+File access from a tar file typically streamed via TFTP.
+The order of files in the tar file must match the order they are
+to be consumed as rewind is not practical.
.El
.Pp
The array of
diff --git a/stand/libsa/mount.c b/stand/libsa/mount.c
index 73bf6ab8118c..c866dc9c7055 100644
--- a/stand/libsa/mount.c
+++ b/stand/libsa/mount.c
@@ -107,7 +107,10 @@ mount(const char *dev, const char *path, int flags __unused, void *data)
fs = file_system[i];
if (fs->fo_mount == NULL)
continue;
-
+ DEBUG_PRINTF(1,("%s: fs=%s path=%s\n",
+ __func__, fs->fs_name, path));
+ if (is_tftp())
+ break;
if (fs->fo_mount(dev, path, &data) != 0)
continue;
diff --git a/stand/libsa/net.h b/stand/libsa/net.h
index d4823d88f58b..945b6b9ea45f 100644
--- a/stand/libsa/net.h
+++ b/stand/libsa/net.h
@@ -75,6 +75,7 @@ enum net_proto {
extern u_char bcea[6];
extern char rootpath[FNAME_SIZE];
+extern int rootport;
extern char bootfile[FNAME_SIZE];
extern char hostname[FNAME_SIZE];
extern int hostnamelen;
diff --git a/stand/libsa/nfs.c b/stand/libsa/nfs.c
index ee6af8a726c7..f3e9060c9881 100644
--- a/stand/libsa/nfs.c
+++ b/stand/libsa/nfs.c
@@ -131,6 +131,7 @@ struct nfs_iodesc nfs_root_node;
struct fs_ops nfs_fsops = {
.fs_name = "nfs",
+ .fs_flags = 0,
.fo_open = nfs_open,
.fo_close = nfs_close,
.fo_read = nfs_read,
diff --git a/stand/libsa/open.c b/stand/libsa/open.c
index ccee4aa5c07b..91848aca7dbe 100644
--- a/stand/libsa/open.c
+++ b/stand/libsa/open.c
@@ -138,6 +138,8 @@ open(const char *fname, int mode)
struct fs_ops *fs;
struct open_file *f;
int fd, i, error, besterror;
+ bool is_dir;
+ size_t n;
const char *file;
TSENTER();
@@ -154,31 +156,42 @@ open(const char *fname, int mode)
f->f_devdata = NULL;
file = NULL;
+ if (exclusive_file_system == NULL ||
+ (exclusive_file_system->fs_flags & FS_OPS_NO_DEVOPEN) == 0) {
+ error = devopen(f, fname, &file);
+ if (error ||
+ (((f->f_flags & F_NODEV) == 0) && f->f_dev == NULL))
+ goto err;
+
+ /* see if we opened a raw device; otherwise, 'file' is the file name. */
+ if (file == NULL || *file == '\0') {
+ f->f_flags |= F_RAW;
+ f->f_rabuf = NULL;
+ TSEXIT();
+ return (fd);
+ }
+ } else
+ file = fname;
+
if (exclusive_file_system != NULL) {
+ /* loader is forcing the filesystem to be used */
fs = exclusive_file_system;
- error = (fs->fo_open)(fname, f);
+ error = (fs->fo_open)(file, f);
if (error == 0)
goto ok;
goto err;
}
- error = devopen(f, fname, &file);
- if (error ||
- (((f->f_flags & F_NODEV) == 0) && f->f_dev == NULL))
- goto err;
-
- /* see if we opened a raw device; otherwise, 'file' is the file name. */
- if (file == NULL || *file == '\0') {
- f->f_flags |= F_RAW;
- f->f_rabuf = NULL;
- TSEXIT();
- return (fd);
- }
-
/* pass file name to the different filesystem open routines */
besterror = ENOENT;
+ n = strlen(file);
+ is_dir = (n > 0 && file[n - 1] == '/');
for (i = 0; file_system[i] != NULL; i++) {
fs = file_system[i];
+ if (is_dir && is_tftp()) {
+ error = EOPNOTSUPP;
+ goto err;
+ }
error = (fs->fo_open)(file, f);
if (error == 0)
goto ok;
diff --git a/stand/libsa/pkgfs.c b/stand/libsa/pkgfs.c
index 32d488de5cfb..6eb3badf7068 100644
--- a/stand/libsa/pkgfs.c
+++ b/stand/libsa/pkgfs.c
@@ -41,6 +41,7 @@ static off_t pkg_atol(const char *, unsigned);
struct fs_ops pkgfs_fsops = {
.fs_name = "pkg",
+ .fs_flags = FS_OPS_NO_DEVOPEN,
.fo_open = pkg_open,
.fo_close = pkg_close,
.fo_read = pkg_read,
diff --git a/stand/libsa/splitfs.c b/stand/libsa/splitfs.c
index 69912522000e..eb4b3a1feb11 100644
--- a/stand/libsa/splitfs.c
+++ b/stand/libsa/splitfs.c
@@ -50,6 +50,7 @@ static int splitfs_stat(struct open_file *f, struct stat *sb);
struct fs_ops splitfs_fsops = {
.fs_name = "split",
+ .fs_flags = 0,
.fo_open = splitfs_open,
.fo_close = splitfs_close,
.fo_read = splitfs_read,
diff --git a/stand/libsa/stand.h b/stand/libsa/stand.h
index 0e99d8778fa6..aaba0aa7fb39 100644
--- a/stand/libsa/stand.h
+++ b/stand/libsa/stand.h
@@ -94,6 +94,8 @@ __BEGIN_DECLS
struct open_file;
+#define FS_OPS_NO_DEVOPEN 1
+
/*
* This structure is used to define file system operations in a file system
* independent way.
@@ -104,6 +106,7 @@ struct open_file;
*/
struct fs_ops {
const char *fs_name;
+ int fs_flags;
int (*fo_open)(const char *path, struct open_file *f);
int (*fo_close)(struct open_file *f);
int (*fo_read)(struct open_file *f, void *buf,
@@ -349,6 +352,7 @@ extern int pager_file(const char *fname);
#define EV_DYNAMIC (1<<0) /* value was dynamically allocated, free if changed/unset */
#define EV_VOLATILE (1<<1) /* value is volatile, make a copy of it */
#define EV_NOHOOK (1<<2) /* don't call hook when setting */
+#define EV_NOKENV (1<<3) /* don't add to kenv (loader-only) */
struct env_var;
typedef char *(ev_format_t)(struct env_var *ev);
@@ -504,6 +508,9 @@ extern void *reallocf(void *, size_t);
*/
caddr_t ptov(uintptr_t);
+/* dev_net.c */
+bool is_tftp(void);
+
/* features.c */
typedef void (feature_iter_fn)(void *, const char *, const char *, bool);
diff --git a/stand/libsa/tftp.c b/stand/libsa/tftp.c
index c6cc8f11a765..656c402683bb 100644
--- a/stand/libsa/tftp.c
+++ b/stand/libsa/tftp.c
@@ -50,6 +50,10 @@
#include <netinet/in_systm.h>
#include <arpa/tftp.h>
+#ifdef LOADER_VERIEXEC
+#include <verify_file.h>
+#endif
+
#include <string.h>
#include "stand.h"
@@ -73,6 +77,7 @@ static int tftp_preload(struct open_file *);
struct fs_ops tftp_fsops = {
.fs_name = "tftp",
+ .fs_flags = 0,
.fo_open = tftp_open,
.fo_close = tftp_close,
.fo_read = tftp_read,
@@ -84,7 +89,6 @@ struct fs_ops tftp_fsops = {
};
static int tftpport = 2000;
-static int is_open = 0;
/*
* The legacy TFTP_BLKSIZE value was SEGSIZE(512).
@@ -98,10 +102,14 @@ static int is_open = 0;
* Jumbo frames in the future.
*/
#define TFTP_MAX_BLKSIZE 9008
-#define TFTP_TRIES 2
+#define TFTP_TRIES 3
struct tftp_handle {
struct iodesc *iodesc;
+ struct iodesc io;
+ int id;
+ ino_t ino;
+ int port;
int currblock; /* contents of lastdata */
unsigned int islastblock:1; /* flag */
unsigned int tries:4; /* number of read attempts */
@@ -177,6 +185,9 @@ tftp_sendack(struct tftp_handle *h, u_short block)
wbuf.t.th_block = htons(block);
wtail += 2;
+ DEBUG_PRINTF(5,("%s: myport=%hu xid=%lu, block=%hu\n",
+ __func__, h->iodesc->myport, h->iodesc->xid, block));
+
sendudp(h->iodesc, &wbuf.t, wtail - (char *)&wbuf.t);
}
@@ -190,6 +201,7 @@ recvtftp(struct iodesc *d, void **pkt, void **payload, time_t tleft,
void *ptr = NULL;
ssize_t len;
int tftp_error;
+ unsigned short block;
errno = 0;
extra = recv_extra;
@@ -203,19 +215,22 @@ recvtftp(struct iodesc *d, void **pkt, void **payload, time_t tleft,
}
extra->rtype = ntohs(t->th_opcode);
- switch (ntohs(t->th_opcode)) {
+ block = ntohs(t->th_block);
+ DEBUG_PRINTF(6,("%s: myport=%hu xid=%lu, block=%hu, opcode=%hu\n",
+ __func__, d->myport, d->xid, block, extra->rtype));
+ switch (extra->rtype) {
case DATA: {
int got;
- if (htons(t->th_block) < (u_short)d->xid) {
+ if (block < (u_short)d->xid) {
/*
* Apparently our ACK was missed, re-send.
*/
- tftp_sendack(h, htons(t->th_block));
+ tftp_sendack(h, block);
free(ptr);
return (-1);
}
- if (htons(t->th_block) != (u_short)d->xid) {
+ if (block != (u_short)d->xid) {
/*
* Packet from the future, drop this.
*/
@@ -241,9 +256,7 @@ recvtftp(struct iodesc *d, void **pkt, void **payload, time_t tleft,
printf("illegal tftp error %d\n", tftp_error);
errno = EIO;
} else {
-#ifdef TFTP_DEBUG
- printf("tftp-error %d\n", tftp_error);
-#endif
+ DEBUG_PRINTF(0, ("tftp-error %d\n", tftp_error));
errno = tftperrors[tftp_error];
}
free(ptr);
@@ -284,9 +297,7 @@ recvtftp(struct iodesc *d, void **pkt, void **payload, time_t tleft,
return (0);
}
default:
-#ifdef TFTP_DEBUG
- printf("tftp type %d not handled\n", ntohs(t->th_opcode));
-#endif
+ DEBUG_PRINTF(0, ("tftp type %hu not handled\n", extra->rtype));
free(ptr);
return (-1);
}
@@ -343,7 +354,7 @@ tftp_makereq(struct tftp_handle *h)
bcopy("0", wtail, 2);
wtail += 2;
- h->iodesc->myport = htons(tftpport + (getsecs() & 0x3ff));
+ h->iodesc->myport = htons(h->port + (getsecs() & 0x3ff));
h->iodesc->destport = htons(IPPORT_TFTP);
h->iodesc->xid = 1; /* expected block */
@@ -351,11 +362,15 @@ tftp_makereq(struct tftp_handle *h)
h->islastblock = 0;
h->validsize = 0;
+ DEBUG_PRINTF(5,("%s: %s: id=%d port=%d myport=%hu xid=1\n",
+ __func__, h->path, h->id, h->port, ntohs(h->iodesc->myport)));
pkt = NULL;
recv_extra.tftp_handle = h;
res = sendrecv(h->iodesc, &sendudp, &wbuf.t, wtail - (char *)&wbuf.t,
&recvtftp, &pkt, (void **)&t, &recv_extra);
if (res == -1) {
+ DEBUG_PRINTF(3,("%s: %s: id=%d errno=%d\n",
+ __func__, h->path, h->id, errno));
free(pkt);
return (errno);
}
@@ -410,12 +425,18 @@ tftp_getnextblock(struct tftp_handle *h)
h->iodesc->xid = h->currblock + 1; /* expected block */
+ DEBUG_PRINTF(5,("%s: %s: id=%d port=%d myport=%hu xid=%lu\n",
+ __func__, h->path, h->id, h->port,
+ ntohs(h->iodesc->myport), h->iodesc->xid));
+
pkt = NULL;
recv_extra.tftp_handle = h;
res = sendrecv(h->iodesc, &sendudp, &wbuf.t, wtail - (char *)&wbuf.t,
&recvtftp, &pkt, (void **)&t, &recv_extra);
if (res == -1) { /* 0 is OK! */
+ DEBUG_PRINTF(3,("%s: %s: id=%d errno=%d\n",
+ __func__, h->path, h->id, errno));
free(pkt);
return (errno);
}
@@ -428,21 +449,32 @@ tftp_getnextblock(struct tftp_handle *h)
if (res < h->tftp_blksize)
h->islastblock = 1; /* EOF */
- if (h->islastblock == 1) {
+ DEBUG_PRINTF(5,("%s: %s: id=%d res=%d blksz=%d last=%d\n",
+ __func__, h->path, h->id, res, h->tftp_blksize, h->islastblock));
+
+ if (h->islastblock) {
/* Send an ACK for the last block */
- wbuf.t.th_block = htons((u_short)h->currblock);
- sendudp(h->iodesc, &wbuf.t, wtail - (char *)&wbuf.t);
+ tftp_sendack(h, h->currblock);
}
return (0);
}
+/*
+ * If doing verification we need to handle multiple
+ * files at the same time.
+ */
+#define TOPEN_MAX 8
+static struct tftp_handle *handles[TOPEN_MAX];
+
static int
tftp_open(const char *path, struct open_file *f)
{
struct devdesc *dev;
struct tftp_handle *tftpfile;
struct iodesc *io;
+ static int lx = 0;
+ int i, x;
int res;
size_t pathsize;
const char *extraslash;
@@ -450,24 +482,39 @@ tftp_open(const char *path, struct open_file *f)
if (netproto != NET_TFTP)
return (EINVAL);
- if (f->f_dev->dv_type != DEVT_NET)
+ if (f->f_dev == NULL || f->f_dev->dv_type != DEVT_NET)
return (EINVAL);
- if (is_open)
+ tftpfile = NULL;
+ for (x = lx + 1, i = 0; i < TOPEN_MAX; i++, x++) {
+ x %= TOPEN_MAX;
+ if (handles[x] == NULL) {
+ handles[x] = tftpfile = calloc(1, sizeof(*tftpfile));
+ if (tftpfile == NULL)
+ return (ENOMEM);
+ /* id allows us to clear the slot on close */
+ tftpfile->id = lx = x;
+ /* port ensures a different session with server */
+ tftpfile->port = (tftpport + (x * tftpport)) & 0xffff;
+ DEBUG_PRINTF(1, ("%s(%s) id=%d port=%d\n",
+ __func__, path, tftpfile->id, tftpfile->port));
+ break;
+ }
+ }
+ if (tftpfile == NULL) {
+ DEBUG_PRINTF(1, ("%s: EBUSY\n", __func__));
return (EBUSY);
-
- tftpfile = calloc(1, sizeof(*tftpfile));
- if (!tftpfile)
- return (ENOMEM);
-
+ }
tftpfile->tftp_blksize = TFTP_REQUESTED_BLKSIZE;
dev = f->f_devdata;
- tftpfile->iodesc = io = socktodesc(*(int *)(dev->d_opendata));
+ io = socktodesc(*(int *)(dev->d_opendata));
if (io == NULL) {
free(tftpfile);
return (EINVAL);
}
+ memcpy(&tftpfile->io, io, sizeof(tftpfile->io));
+ io = tftpfile->iodesc = &tftpfile->io;
io->destip = rootip;
tftpfile->off = 0;
pathsize = (strlen(rootpath) + 1 + strlen(path) + 1) * sizeof(char);
@@ -480,8 +527,11 @@ tftp_open(const char *path, struct open_file *f)
extraslash = "";
else
extraslash = "/";
- res = snprintf(tftpfile->path, pathsize, "%s%s%s",
- rootpath, extraslash, path);
+ if (rootpath[0] == '/' && rootpath[1] == '\0' && path[0] == '/')
+ res = strlcpy(tftpfile->path, path, pathsize);
+ else
+ res = snprintf(tftpfile->path, pathsize, "%s%s%s",
+ rootpath, extraslash, path);
if (res < 0 || res > pathsize) {
free(tftpfile->path);
free(tftpfile);
@@ -491,13 +541,13 @@ tftp_open(const char *path, struct open_file *f)
res = tftp_makereq(tftpfile);
if (res) {
+ handles[tftpfile->id] = NULL;
free(tftpfile->path);
free(tftpfile->pkt);
free(tftpfile);
return (res);
}
f->f_fsdata = tftpfile;
- is_open = 1;
return (0);
}
@@ -547,9 +597,7 @@ tftp_read(struct open_file *f, void *addr, size_t size,
rc = tftp_getnextblock(tftpfile);
if (rc) { /* no answer */
-#ifdef TFTP_DEBUG
- printf("tftp: read error\n");
-#endif
+ DEBUG_PRINTF(0, ("tftp: read error\n"));
if (tftpfile->tries > TFTP_TRIES) {
return (rc);
} else {
@@ -568,10 +616,8 @@ tftp_read(struct open_file *f, void *addr, size_t size,
inbuffer = tftpfile->validsize - offinblock;
if (inbuffer < 0) {
-#ifdef TFTP_DEBUG
- printf("tftp: invalid offset %d\n",
- tftpfile->off);
-#endif
+ DEBUG_PRINTF(0, ("tftp: invalid offset %d\n",
+ tftpfile->off));
return (EINVAL);
}
count = (size < inbuffer ? size : inbuffer);
@@ -586,15 +632,15 @@ tftp_read(struct open_file *f, void *addr, size_t size,
if ((tftpfile->islastblock) && (count == inbuffer))
break; /* EOF */
} else {
-#ifdef TFTP_DEBUG
- printf("tftp: block %d not found\n", needblock);
-#endif
+ DEBUG_PRINTF(0, ("tftp: block %d not found\n", needblock));
return (EINVAL);
}
}
out:
+ DEBUG_PRINTF(4, ("%s(%s) res=%ld\n", __func__, tftpfile->path,
+ (tftpfile->tftp_tsize - tftpfile->off)));
if (resid != NULL)
*resid = res;
return (rc);
@@ -610,15 +656,18 @@ tftp_close(struct open_file *f)
tftp_senderr(tftpfile, 0, "No error: file closed");
if (tftpfile) {
+ DEBUG_PRINTF(1, ("%s(%d): %s\n", __func__,
+ tftpfile->id, tftpfile->path));
+ handles[tftpfile->id] = NULL;
free(tftpfile->path);
free(tftpfile->pkt);
free(tftpfile->tftp_cache);
free(tftpfile);
}
- is_open = 0;
return (0);
}
+
static int
tftp_stat(struct open_file *f, struct stat *sb)
{
@@ -630,6 +679,29 @@ tftp_stat(struct open_file *f, struct stat *sb)
sb->st_uid = 0;
sb->st_gid = 0;
sb->st_size = tftpfile->tftp_tsize;
+ sb->st_mtime = 0;
+#ifdef LOADER_VERIEXEC
+ /* libsecureboot needs st_dev and st_ino at minimum;
+ * we need to fake something that will be close enough to
+ * unique.
+ */
+ sb->st_dev = (dev_t)tftpfile->iodesc->destip.s_addr;
+ /* we don't want to compute this more than once */
+ if (tftpfile->ino == 0) {
+ union {
+ unsigned char digest[SHA_DIGEST_LENGTH];
+ ino_t ino;
+ } u;
+
+ hash_string(tftpfile->path, 0, u.digest, sizeof(u.digest));
+
+ tftpfile->ino = u.ino & 0x7fffffff;
+ DEBUG_PRINTF(2,("%s(%s) dev=%lu ino=%lu\n", __func__,
+ tftpfile->path, (unsigned long)sb->st_dev,
+ (unsigned long)tftpfile->ino));
+ }
+ sb->st_ino = tftpfile->ino;
+#endif
return (0);
}
@@ -827,9 +899,7 @@ tftp_parse_oack(struct tftp_handle *h, char *buf, size_t len)
return (-1);
}
-#ifdef TFTP_DEBUG
- printf("tftp_blksize: %u\n", h->tftp_blksize);
- printf("tftp_tsize: %lu\n", h->tftp_tsize);
-#endif
+ DEBUG_PRINTF(2, ("tftp_blksize: %u\n", h->tftp_blksize));
+ DEBUG_PRINTF(2, ("tftp_tsize: %lu\n", h->tftp_tsize));
return (0);
}
diff --git a/stand/libsa/ufs.c b/stand/libsa/ufs.c
index e1d540ed2321..868e8d47dbbd 100644
--- a/stand/libsa/ufs.c
+++ b/stand/libsa/ufs.c
@@ -93,6 +93,7 @@ static int ufs_unmount(const char *dev, void *data);
struct fs_ops ufs_fsops = {
.fs_name = "ufs",
+ .fs_flags = 0,
.fo_open = ufs_open,
.fo_close = ufs_close,
.fo_read = ufs_read,
@@ -890,6 +891,12 @@ ufs_readdir(struct open_file *f, struct dirent *d)
if (error)
return (error);
dp = (struct direct *)buf;
+ /*
+ * Check for corrupt directory entry and bail out rather
+ * than spin forever hoping that the user has other options.
+ */
+ if (dp->d_reclen == 0)
+ return (0);
fp->f_seekp += dp->d_reclen;
} while (dp->d_ino == (ino_t)0);
diff --git a/stand/libsa/ufsread.c b/stand/libsa/ufsread.c
index 0f9b9bb4e2fb..86ac8fbbbab7 100644
--- a/stand/libsa/ufsread.c
+++ b/stand/libsa/ufsread.c
@@ -108,6 +108,13 @@ fsfind(const char *name, ufs_ino_t * ino)
*ino = d.d_ino;
return d.d_type;
}
+ /*
+ * Check for corrupt directory entry and bail out
+ * rather than spin forever hoping that the user
+ * has other options.
+ */
+ if (d.d_reclen == 0)
+ return 0;
s += d.d_reclen;
}
if (n != -1 && ls)
diff --git a/stand/libsa/zfs/zfsimpl.c b/stand/libsa/zfs/zfsimpl.c
index 971d71d098d3..f15d9b016068 100644
--- a/stand/libsa/zfs/zfsimpl.c
+++ b/stand/libsa/zfs/zfsimpl.c
@@ -107,11 +107,6 @@ typedef struct indirect_vsd {
} indirect_vsd_t;
/*
- * List of all vdevs, chained through v_alllink.
- */
-static vdev_list_t zfs_vdevs;
-
-/*
* List of supported read-incompatible ZFS features. Do not add here features
* marked as ZFEATURE_FLAG_READONLY_COMPAT, they are irrelevant for read-only!
*/
@@ -167,7 +162,6 @@ vdev_indirect_mapping_entry_phys_t *
static void
zfs_init(void)
{
- STAILQ_INIT(&zfs_vdevs);
STAILQ_INIT(&zfs_pools);
dnode_cache_buf = malloc(SPA_MAXBLOCKSIZE);
@@ -839,16 +833,27 @@ vdev_replacing_read(vdev_t *vdev, const blkptr_t *bp, void *buf,
return (kid->v_read(kid, bp, buf, offset, bytes));
}
+/*
+ * List of vdevs that were fully initialized from their own label, but later a
+ * newer label was found that obsoleted the stale label, freeing its
+ * configuration tree. We keep those vdevs around, since a new configuration
+ * may include them.
+ */
+static vdev_list_t orphans = STAILQ_HEAD_INITIALIZER(orphans);
+
static vdev_t *
-vdev_find(uint64_t guid)
+vdev_find(vdev_list_t *list, uint64_t guid)
{
- vdev_t *vdev;
+ vdev_t *vdev, *safe;
- STAILQ_FOREACH(vdev, &zfs_vdevs, v_alllink)
+ STAILQ_FOREACH_SAFE(vdev, list, v_childlink, safe) {
if (vdev->v_guid == guid)
return (vdev);
+ if ((vdev = vdev_find(&vdev->v_children, guid)) != NULL)
+ return (vdev);
+ }
- return (0);
+ return (NULL);
}
static vdev_t *
@@ -857,6 +862,11 @@ vdev_create(uint64_t guid, vdev_read_t *_read)
vdev_t *vdev;
vdev_indirect_config_t *vic;
+ if ((vdev = vdev_find(&orphans, guid))) {
+ STAILQ_REMOVE(&orphans, vdev, vdev, v_childlink);
+ return (vdev);
+ }
+
vdev = calloc(1, sizeof(vdev_t));
if (vdev != NULL) {
STAILQ_INIT(&vdev->v_children);
@@ -871,7 +881,6 @@ vdev_create(uint64_t guid, vdev_read_t *_read)
if (_read != NULL) {
vic = &vdev->vdev_indirect_config;
vic->vic_prev_indirect_vdev = UINT64_MAX;
- STAILQ_INSERT_TAIL(&zfs_vdevs, vdev, v_alllink);
}
}
@@ -1035,22 +1044,19 @@ vdev_init(uint64_t guid, const nvlist_t *nvlist, vdev_t **vdevp)
* STAILQ_INSERT_AFTER.
*/
static vdev_t *
-vdev_find_previous(vdev_t *top_vdev, vdev_t *vdev)
+vdev_find_previous(vdev_t *top_vdev, uint64_t id)
{
vdev_t *v, *previous;
- if (STAILQ_EMPTY(&top_vdev->v_children))
- return (NULL);
-
previous = NULL;
STAILQ_FOREACH(v, &top_vdev->v_children, v_childlink) {
- if (v->v_id > vdev->v_id)
+ if (v->v_id > id)
return (previous);
- if (v->v_id == vdev->v_id)
+ if (v->v_id == id)
return (v);
- if (v->v_id < vdev->v_id)
+ if (v->v_id < id)
previous = v;
}
return (previous);
@@ -1072,7 +1078,7 @@ vdev_child_count(vdev_t *vdev)
/*
* Insert vdev into top_vdev children list. List is ordered by v_id.
*/
-static void
+static vdev_t *
vdev_insert(vdev_t *top_vdev, vdev_t *vdev)
{
vdev_t *previous;
@@ -1085,7 +1091,7 @@ vdev_insert(vdev_t *top_vdev, vdev_t *vdev)
* so we can use either STAILQ_INSERT_HEAD or STAILQ_INSERT_AFTER
* as STAILQ does not have insert before.
*/
- previous = vdev_find_previous(top_vdev, vdev);
+ previous = vdev_find_previous(top_vdev, vdev->v_id);
if (previous == NULL) {
STAILQ_INSERT_HEAD(&top_vdev->v_children, vdev, v_childlink);
@@ -1094,7 +1100,8 @@ vdev_insert(vdev_t *top_vdev, vdev_t *vdev)
* This vdev was configured from label config,
* do not insert duplicate.
*/
- return;
+ free(vdev);
+ return (previous);
} else {
STAILQ_INSERT_AFTER(&top_vdev->v_children, previous, vdev,
v_childlink);
@@ -1103,26 +1110,28 @@ vdev_insert(vdev_t *top_vdev, vdev_t *vdev)
count = vdev_child_count(top_vdev);
if (top_vdev->v_nchildren < count)
top_vdev->v_nchildren = count;
+ return (vdev);
}
static int
-vdev_from_nvlist(spa_t *spa, uint64_t top_guid, uint64_t txg,
- const nvlist_t *nvlist)
+vdev_from_nvlist(spa_t *spa, uint64_t top_guid, uint64_t label_guid,
+ uint64_t txg, const nvlist_t *nvlist)
{
vdev_t *top_vdev, *vdev;
nvlist_t **kids = NULL;
int rc, nkids;
/* Get top vdev. */
- top_vdev = vdev_find(top_guid);
+ top_vdev = vdev_find(&spa->spa_root_vdev->v_children, top_guid);
if (top_vdev == NULL) {
rc = vdev_init(top_guid, nvlist, &top_vdev);
if (rc != 0)
return (rc);
top_vdev->v_spa = spa;
top_vdev->v_top = top_vdev;
+ top_vdev->v_label = label_guid;
top_vdev->v_txg = txg;
- vdev_insert(spa->spa_root_vdev, top_vdev);
+ (void )vdev_insert(spa->spa_root_vdev, top_vdev);
}
/* Add children if there are any. */
@@ -1143,7 +1152,7 @@ vdev_from_nvlist(spa_t *spa, uint64_t top_guid, uint64_t txg,
vdev->v_spa = spa;
vdev->v_top = top_vdev;
- vdev_insert(top_vdev, vdev);
+ vdev = vdev_insert(top_vdev, vdev);
}
} else {
/*
@@ -1162,30 +1171,6 @@ done:
return (rc);
}
-static int
-vdev_init_from_label(spa_t *spa, const nvlist_t *nvlist)
-{
- uint64_t pool_guid, top_guid, txg;
- nvlist_t *vdevs;
- int rc;
-
- if (nvlist_find(nvlist, ZPOOL_CONFIG_POOL_GUID, DATA_TYPE_UINT64,
- NULL, &pool_guid, NULL) ||
- nvlist_find(nvlist, ZPOOL_CONFIG_TOP_GUID, DATA_TYPE_UINT64,
- NULL, &top_guid, NULL) ||
- nvlist_find(nvlist, ZPOOL_CONFIG_POOL_TXG, DATA_TYPE_UINT64,
- NULL, &txg, NULL) != 0 ||
- nvlist_find(nvlist, ZPOOL_CONFIG_VDEV_TREE, DATA_TYPE_NVLIST,
- NULL, &vdevs, NULL)) {
- printf("ZFS: can't find vdev details\n");
- return (ENOENT);
- }
-
- rc = vdev_from_nvlist(spa, top_guid, txg, vdevs);
- nvlist_destroy(vdevs);
- return (rc);
-}
-
static void
vdev_set_state(vdev_t *vdev)
{
@@ -1232,14 +1217,14 @@ vdev_set_state(vdev_t *vdev)
}
static int
-vdev_update_from_nvlist(uint64_t top_guid, const nvlist_t *nvlist)
+vdev_update_from_nvlist(vdev_t *root, uint64_t top_guid, const nvlist_t *nvlist)
{
vdev_t *vdev;
nvlist_t **kids = NULL;
int rc, nkids;
/* Update top vdev. */
- vdev = vdev_find(top_guid);
+ vdev = vdev_find(&root->v_children, top_guid);
if (vdev != NULL)
vdev_set_initial_state(vdev, nvlist);
@@ -1255,7 +1240,7 @@ vdev_update_from_nvlist(uint64_t top_guid, const nvlist_t *nvlist)
if (rc != 0)
break;
- vdev = vdev_find(guid);
+ vdev = vdev_find(&root->v_children, guid);
if (vdev != NULL)
vdev_set_initial_state(vdev, kids[i]);
}
@@ -1271,10 +1256,6 @@ vdev_update_from_nvlist(uint64_t top_guid, const nvlist_t *nvlist)
return (rc);
}
-/*
- * Shall not be called on root vdev, that is not linked into zfs_vdevs.
- * See comment in vdev_create().
- */
static void
vdev_free(struct vdev *vdev)
{
@@ -1282,8 +1263,10 @@ vdev_free(struct vdev *vdev)
STAILQ_FOREACH_SAFE(kid, &vdev->v_children, v_childlink, safe)
vdev_free(kid);
- STAILQ_REMOVE(&zfs_vdevs, vdev, vdev, v_alllink);
- free(vdev);
+ if (vdev->v_phys_read != NULL)
+ STAILQ_INSERT_HEAD(&orphans, vdev, v_childlink);
+ else
+ free(vdev);
}
static int
@@ -1329,15 +1312,16 @@ vdev_init_from_nvlist(spa_t *spa, const nvlist_t *nvlist)
NULL, &guid, NULL);
if (rc != 0)
break;
- vdev = vdev_find(guid);
+ vdev = vdev_find(&spa->spa_root_vdev->v_children, guid);
/*
* Top level vdev is missing, create it.
* XXXGL: how can this happen?
*/
if (vdev == NULL)
- rc = vdev_from_nvlist(spa, guid, 0, kids[i]);
+ rc = vdev_from_nvlist(spa, guid, 0, 0, kids[i]);
else
- rc = vdev_update_from_nvlist(guid, kids[i]);
+ rc = vdev_update_from_nvlist(spa->spa_root_vdev, guid,
+ kids[i]);
if (rc != 0)
break;
}
@@ -1355,6 +1339,53 @@ vdev_init_from_nvlist(spa_t *spa, const nvlist_t *nvlist)
return (rc);
}
+static bool
+nvlist_find_child_guid(const nvlist_t *nvlist, uint64_t guid)
+{
+ nvlist_t **kids = NULL;
+ int nkids, i;
+ bool rv = false;
+
+ if (nvlist_find(nvlist, ZPOOL_CONFIG_CHILDREN, DATA_TYPE_NVLIST_ARRAY,
+ &nkids, &kids, NULL) != 0)
+ nkids = 0;
+
+ for (i = 0; i < nkids; i++) {
+ uint64_t kid_guid;
+
+ if (nvlist_find(kids[i], ZPOOL_CONFIG_GUID, DATA_TYPE_UINT64,
+ NULL, &kid_guid, NULL) != 0)
+ break;
+ if (kid_guid == guid)
+ rv = true;
+ else
+ rv = nvlist_find_child_guid(kids[i], guid);
+ if (rv)
+ break;
+ }
+
+ for (i = 0; i < nkids; i++)
+ nvlist_destroy(kids[i]);
+ free(kids);
+
+ return (rv);
+}
+
+static bool
+nvlist_find_vdev_guid(const nvlist_t *nvlist, uint64_t guid)
+{
+ nvlist_t *vdevs;
+ bool rv;
+
+ if (nvlist_find(nvlist, ZPOOL_CONFIG_VDEV_TREE, DATA_TYPE_NVLIST, NULL,
+ &vdevs, NULL) != 0)
+ return (false);
+ rv = nvlist_find_child_guid(vdevs, guid);
+ nvlist_destroy(vdevs);
+
+ return (rv);
+}
+
static spa_t *
spa_find_by_guid(uint64_t guid)
{
@@ -2023,8 +2054,8 @@ vdev_probe(vdev_phys_read_t *_read, vdev_phys_write_t *_write, void *priv,
{
vdev_t vtmp;
spa_t *spa;
- vdev_t *vdev;
- nvlist_t *nvl;
+ vdev_t *vdev, *top;
+ nvlist_t *nvl, *vdevs;
uint64_t val;
uint64_t guid, pool_guid, top_guid, txg;
const char *pool_name;
@@ -2083,6 +2114,7 @@ vdev_probe(vdev_phys_read_t *_read, vdev_phys_write_t *_write, void *priv,
if (nvlist_find(nvl, ZPOOL_CONFIG_POOL_TXG, DATA_TYPE_UINT64,
NULL, &txg, NULL) != 0 ||
+ txg == 0 ||
nvlist_find(nvl, ZPOOL_CONFIG_TOP_GUID, DATA_TYPE_UINT64,
NULL, &top_guid, NULL) != 0 ||
nvlist_find(nvl, ZPOOL_CONFIG_POOL_GUID, DATA_TYPE_UINT64,
@@ -2092,7 +2124,7 @@ vdev_probe(vdev_phys_read_t *_read, vdev_phys_write_t *_write, void *priv,
nvlist_find(nvl, ZPOOL_CONFIG_GUID, DATA_TYPE_UINT64,
NULL, &guid, NULL) != 0) {
/*
- * Cache and spare devices end up here - just ignore
+ * Cache, spare and replaced devices end up here - just ignore
* them.
*/
nvlist_destroy(nvl);
@@ -2119,22 +2151,47 @@ vdev_probe(vdev_phys_read_t *_read, vdev_phys_write_t *_write, void *priv,
nvlist_destroy(nvl);
return (ENOMEM);
}
- } else {
- struct vdev *kid;
-
- STAILQ_FOREACH(kid, &spa->spa_root_vdev->v_children,
- v_childlink)
- if (kid->v_guid == top_guid && kid->v_txg < txg) {
- printf("ZFS: pool %s vdev %s ignoring stale "
- "label from txg 0x%jx, using 0x%jx@0x%jx\n",
- spa->spa_name, kid->v_name,
- kid->v_txg, guid, txg);
+ }
+
+ /*
+ * Check if configuration is already known. If configuration is known
+ * and txg numbers don't match, we got 2x2 scenarios here. First, is
+ * the label being read right now _newer_ than the one read before.
+ * Second, is the vdev that provided the stale label _present_ in the
+ * newer configuration. If neither is true, we completely ignore the
+ * label.
+ */
+ STAILQ_FOREACH(top, &spa->spa_root_vdev->v_children, v_childlink)
+ if (top->v_guid == top_guid) {
+ bool newer, present;
+
+ if (top->v_txg == txg)
+ break;
+ newer = (top->v_txg < txg);
+ present = newer ?
+ nvlist_find_vdev_guid(nvl, top->v_label) :
+ (vdev_find(&top->v_children, guid) != NULL);
+ printf("ZFS: pool %s vdev %s %s stale label from "
+ "0x%jx@0x%jx, %s 0x%jx@0x%jx\n",
+ spa->spa_name, top->v_name,
+ present ? "using" : "ignoring",
+ newer ? top->v_label : guid,
+ newer ? top->v_txg : txg,
+ present ? "referred by" : "using",
+ newer ? guid : top->v_label,
+ newer ? txg : top->v_txg);
+ if (newer) {
STAILQ_REMOVE(&spa->spa_root_vdev->v_children,
- kid, vdev, v_childlink);
- vdev_free(kid);
+ top, vdev, v_childlink);
+ vdev_free(top);
+ break;
+ } else if (present) {
break;
+ } else {
+ nvlist_destroy(nvl);
+ return (EIO);
}
- }
+ }
/*
* Get the vdev tree and create our in-core copy of it.
@@ -2142,14 +2199,22 @@ vdev_probe(vdev_phys_read_t *_read, vdev_phys_write_t *_write, void *priv,
* be some kind of alias (overlapping slices, dangerously dedicated
* disks etc).
*/
- vdev = vdev_find(guid);
+ vdev = vdev_find(&spa->spa_root_vdev->v_children, guid);
/* Has this vdev already been inited? */
if (vdev && vdev->v_phys_read) {
nvlist_destroy(nvl);
return (EIO);
}
- rc = vdev_init_from_label(spa, nvl);
+ if (nvlist_find(nvl, ZPOOL_CONFIG_VDEV_TREE, DATA_TYPE_NVLIST, NULL,
+ &vdevs, NULL)) {
+ printf("ZFS: can't find vdev details\n");
+ nvlist_destroy(nvl);
+ return (ENOENT);
+ }
+
+ rc = vdev_from_nvlist(spa, top_guid, guid, txg, vdevs);
+ nvlist_destroy(vdevs);
nvlist_destroy(nvl);
if (rc != 0)
return (rc);
@@ -2158,7 +2223,7 @@ vdev_probe(vdev_phys_read_t *_read, vdev_phys_write_t *_write, void *priv,
* We should already have created an incomplete vdev for this
* vdev. Find it and initialise it with our read proc.
*/
- vdev = vdev_find(guid);
+ vdev = vdev_find(&spa->spa_root_vdev->v_children, guid);
if (vdev != NULL) {
vdev->v_phys_read = _read;
vdev->v_phys_write = _write;
diff --git a/stand/loader.mk b/stand/loader.mk
index 0f2ff31a5343..4073e523e552 100644
--- a/stand/loader.mk
+++ b/stand/loader.mk
@@ -101,8 +101,6 @@ SRCS+= interp_simple.c
.error Unknown interpreter ${LOADER_INTERP}
.endif
-.include "${BOOTSRC}/veriexec.mk"
-
.if defined(BOOT_PROMPT_123)
CFLAGS+= -DBOOT_PROMPT_123
.endif
diff --git a/sys/amd64/conf/GENERIC b/sys/amd64/conf/GENERIC
index 385eb9667652..81427b5b18b6 100644
--- a/sys/amd64/conf/GENERIC
+++ b/sys/amd64/conf/GENERIC
@@ -184,6 +184,9 @@ device mrsas # LSI/Avago MegaRAID SAS/SATA, 6Gb/s and 12Gb/s
device nvme # base NVMe driver
device nvd # expose NVMe namespaces as disks, depends on nvme
+# Universal Flash Storage Host Controller Interface support
+device ufshci # UFS host controller
+
# Intel Volume Management Device (VMD) support
device vmd
@@ -383,6 +386,7 @@ options HID_DEBUG # enable debug msgs
device hid # Generic HID support
device hidbus # Generic HID Bus
options IICHID_SAMPLING # Workaround missing GPIO INTR support
+options U2F_MAKE_UHID_ALIAS # install /dev/uhid alias for /dev/u2f/
# EFI devices
device efidev # EFI pseudo-device
diff --git a/sys/arm/arm/pmu_fdt.c b/sys/arm/arm/pmu_fdt.c
index 3e733f3e1b18..dd6087652e38 100644
--- a/sys/arm/arm/pmu_fdt.c
+++ b/sys/arm/arm/pmu_fdt.c
@@ -152,7 +152,7 @@ pmu_parse_intr(device_t dev, struct pmu_softc *sc)
if (intr_is_per_cpu(sc->irq[0].res)) {
if (has_affinity) {
device_printf(dev,
- "Per CPU interupt have declared affinity\n");
+ "Per CPU interrupt have declared affinity\n");
err = ENXIO;
goto done;
}
@@ -179,7 +179,7 @@ pmu_parse_intr(device_t dev, struct pmu_softc *sc)
if (intr_is_per_cpu(sc->irq[i].res))
{
- device_printf(dev, "Unexpected per CPU interupt\n");
+ device_printf(dev, "Unexpected per CPU interrupt\n");
err = ENXIO;
goto done;
}
diff --git a/sys/arm/conf/GENERIC b/sys/arm/conf/GENERIC
index 7394f3842d43..26b0c7bf0294 100644
--- a/sys/arm/conf/GENERIC
+++ b/sys/arm/conf/GENERIC
@@ -261,6 +261,7 @@ device aw_thermal # Allwinner Thermal Sensor Controller
# HID support
device hid # Generic HID support
device hidbus # Generic HID Bus
+options U2F_MAKE_UHID_ALIAS # install /dev/uhid alias for /dev/u2f/
# Flattened Device Tree
options FDT # Configure using FDT/DTB data
diff --git a/sys/arm64/arm64/elf32_machdep.c b/sys/arm64/arm64/elf32_machdep.c
index 7cd5327b9f1b..5c81c6cdce3d 100644
--- a/sys/arm64/arm64/elf32_machdep.c
+++ b/sys/arm64/arm64/elf32_machdep.c
@@ -195,7 +195,7 @@ freebsd32_fetch_syscall_args(struct thread *td)
register_t *ap;
struct syscall_args *sa;
int error, i, nap, narg;
- unsigned int args[4];
+ unsigned int args[6];
nap = 4;
p = td->td_proc;
diff --git a/sys/arm64/conf/std.dev b/sys/arm64/conf/std.dev
index c5c364ffda04..719f272426dd 100644
--- a/sys/arm64/conf/std.dev
+++ b/sys/arm64/conf/std.dev
@@ -115,6 +115,7 @@ device mmcsd # mmc/sd flash cards
options HID_DEBUG # enable debug msgs
device hid # Generic HID support
device hidbus # Generic HID Bus
+options U2F_MAKE_UHID_ALIAS # install /dev/uhid alias for /dev/u2f/
# Firmware
device mmio_sram # Generic on-chip SRAM
diff --git a/sys/cddl/boot/zfs/zfsimpl.h b/sys/cddl/boot/zfs/zfsimpl.h
index 915aeeda3c9e..c9de1fe4c391 100644
--- a/sys/cddl/boot/zfs/zfsimpl.h
+++ b/sys/cddl/boot/zfs/zfsimpl.h
@@ -2021,11 +2021,11 @@ typedef struct vdev_indirect_config {
typedef struct vdev {
STAILQ_ENTRY(vdev) v_childlink; /* link in parent's child list */
- STAILQ_ENTRY(vdev) v_alllink; /* link in global vdev list */
vdev_list_t v_children; /* children of this vdev */
const char *v_name; /* vdev name */
uint64_t v_guid; /* vdev guid */
- uint64_t v_txg; /* most recent transaction */
+ uint64_t v_label; /* label instantiated from (top vdev) */
+ uint64_t v_txg; /* most recent transaction (top vdev) */
uint64_t v_id; /* index in parent */
uint64_t v_psize; /* physical device capacity */
int v_ashift; /* offset to block shift */
diff --git a/sys/compat/linux/linux_netlink.c b/sys/compat/linux/linux_netlink.c
index f51838ee00d7..927f3689e2b6 100644
--- a/sys/compat/linux/linux_netlink.c
+++ b/sys/compat/linux/linux_netlink.c
@@ -242,24 +242,9 @@ nlmsg_copy_nla(const struct nlattr *nla_orig, struct nl_writer *nw)
}
/*
- * Translate a FreeBSD interface name to a Linux interface name.
- */
-static bool
-nlmsg_translate_ifname_nla(struct nlattr *nla, struct nl_writer *nw)
-{
- char ifname[LINUX_IFNAMSIZ];
-
- if (ifname_bsd_to_linux_name((char *)(nla + 1), ifname,
- sizeof(ifname)) <= 0)
- return (false);
- return (nlattr_add_string(nw, IFLA_IFNAME, ifname));
-}
-
-#define LINUX_NLA_UNHANDLED -1
-/*
* Translate a FreeBSD attribute to a Linux attribute.
- * Returns LINUX_NLA_UNHANDLED when the attribute is not processed
- * and the caller must take care of it, otherwise the result is returned.
+ * Returns false when the attribute is not processed and the caller must take
+ * care of it.
*/
static int
nlmsg_translate_all_nla(struct nlmsghdr *hdr, struct nlattr *nla,
@@ -271,22 +256,27 @@ nlmsg_translate_all_nla(struct nlmsghdr *hdr, struct nlattr *nla,
case NL_RTM_DELLINK:
case NL_RTM_GETLINK:
switch (nla->nla_type) {
- case IFLA_IFNAME:
- return (nlmsg_translate_ifname_nla(nla, nw));
+ case IFLA_IFNAME: {
+ char ifname[LINUX_IFNAMSIZ];
+
+ if (ifname_bsd_to_linux_name((char *)(nla + 1), ifname,
+ sizeof(ifname)) > 0)
+ return (true);
+ break;
+ }
default:
break;
}
default:
break;
}
- return (LINUX_NLA_UNHANDLED);
+ return (false);
}
static bool
nlmsg_copy_all_nla(struct nlmsghdr *hdr, int raw_hdrlen, struct nl_writer *nw)
{
struct nlattr *nla;
- int ret;
int hdrlen = NETLINK_ALIGN(raw_hdrlen);
int attrs_len = hdr->nlmsg_len - sizeof(struct nlmsghdr) - hdrlen;
@@ -297,15 +287,12 @@ nlmsg_copy_all_nla(struct nlmsghdr *hdr, int raw_hdrlen, struct nl_writer *nw)
if (nla->nla_len < sizeof(struct nlattr)) {
return (false);
}
- ret = nlmsg_translate_all_nla(hdr, nla, nw);
- if (ret == LINUX_NLA_UNHANDLED)
- ret = nlmsg_copy_nla(nla, nw);
- if (!ret)
+ if (!nlmsg_translate_all_nla(hdr, nla, nw) &&
+ !nlmsg_copy_nla(nla, nw))
return (false);
}
return (true);
}
-#undef LINUX_NLA_UNHANDLED
static unsigned int
rtnl_if_flags_to_linux(unsigned int if_flags)
@@ -563,22 +550,15 @@ nlmsg_to_linux(struct nlmsghdr *hdr, struct nlpcb *nlp, struct nl_writer *nw)
}
}
-static bool
-nlmsgs_to_linux(struct nl_writer *nw, struct nlpcb *nlp)
+static struct nl_buf *
+nlmsgs_to_linux(struct nl_buf *orig, struct nlpcb *nlp)
{
- struct nl_buf *nb, *orig;
- u_int offset, msglen, orig_messages;
+ struct nl_writer nw;
+ u_int offset, msglen;
- RT_LOG(LOG_DEBUG3, "%p: in %u bytes %u messages", __func__,
- nw->buf->datalen, nw->num_messages);
-
- orig = nw->buf;
- nb = nl_buf_alloc(orig->datalen + SCRATCH_BUFFER_SIZE, M_NOWAIT);
- if (__predict_false(nb == NULL))
- return (false);
- nw->buf = nb;
- orig_messages = nw->num_messages;
- nw->num_messages = 0;
+ if (__predict_false(!nl_writer_unicast(&nw,
+ orig->datalen + SCRATCH_BUFFER_SIZE, nlp, false)))
+ return (NULL);
/* Assume correct headers. Buffer IS mutable */
for (offset = 0;
@@ -587,22 +567,18 @@ nlmsgs_to_linux(struct nl_writer *nw, struct nlpcb *nlp)
struct nlmsghdr *hdr = (struct nlmsghdr *)&orig->data[offset];
msglen = NLMSG_ALIGN(hdr->nlmsg_len);
- if (!nlmsg_to_linux(hdr, nlp, nw)) {
+ if (!nlmsg_to_linux(hdr, nlp, &nw)) {
RT_LOG(LOG_DEBUG, "failed to process msg type %d",
hdr->nlmsg_type);
- nl_buf_free(nb);
- nw->buf = orig;
- nw->num_messages = orig_messages;
- return (false);
+ nl_buf_free(nw.buf);
+ return (NULL);
}
}
- MPASS(nw->num_messages == orig_messages);
- MPASS(nw->buf == nb);
- nl_buf_free(orig);
- RT_LOG(LOG_DEBUG3, "%p: out %u bytes", __func__, offset);
+ RT_LOG(LOG_DEBUG3, "%p: in %u bytes %u messages", __func__,
+ nw.buf->datalen, nw.num_messages);
- return (true);
+ return (nw.buf);
}
static struct linux_netlink_provider linux_netlink_v1 = {
diff --git a/sys/compat/linuxkpi/common/include/linux/ieee80211.h b/sys/compat/linuxkpi/common/include/linux/ieee80211.h
index 3644ef80861b..5851ac08f083 100644
--- a/sys/compat/linuxkpi/common/include/linux/ieee80211.h
+++ b/sys/compat/linuxkpi/common/include/linux/ieee80211.h
@@ -35,6 +35,7 @@
#include <asm/unaligned.h>
#include <linux/kernel.h>
#include <linux/bitops.h>
+#include <linux/bitfield.h>
#include <linux/if_ether.h>
/* linux_80211.c */
@@ -121,7 +122,20 @@ enum ieee80211_rate_control_changed_flags {
/* 802.11-2016, 9.4.2.158.3 Supported VHT-MCS and NSS Set field. */
#define IEEE80211_VHT_EXT_NSS_BW_CAPABLE (1 << 13) /* part of tx_highest */
-#define IEEE80211_VHT_MAX_AMPDU_1024K 7 /* 9.4.2.56.3 A-MPDU Parameters field, Table 9-163 */
+/*
+ * 802.11-2020, 9.4.2.157.2 VHT Capabilities Information field,
+ * Table 9-271-Subfields of the VHT Capabilities Information field (continued).
+ */
+enum ieee80211_vht_max_ampdu_len_exp {
+ IEEE80211_VHT_MAX_AMPDU_8K = 0,
+ IEEE80211_VHT_MAX_AMPDU_16K = 1,
+ IEEE80211_VHT_MAX_AMPDU_32K = 2,
+ IEEE80211_VHT_MAX_AMPDU_64K = 3,
+ IEEE80211_VHT_MAX_AMPDU_128K = 4,
+ IEEE80211_VHT_MAX_AMPDU_256K = 5,
+ IEEE80211_VHT_MAX_AMPDU_512K = 6,
+ IEEE80211_VHT_MAX_AMPDU_1024K = 7,
+};
#define IEEE80211_WEP_IV_LEN 3 /* net80211: IEEE80211_WEP_IVLEN */
#define IEEE80211_WEP_ICV_LEN 4
diff --git a/sys/compat/linuxkpi/common/include/net/cfg80211.h b/sys/compat/linuxkpi/common/include/net/cfg80211.h
index 18b34f0e90ec..044f348ef08b 100644
--- a/sys/compat/linuxkpi/common/include/net/cfg80211.h
+++ b/sys/compat/linuxkpi/common/include/net/cfg80211.h
@@ -36,6 +36,7 @@
#include <linux/mutex.h>
#include <linux/if_ether.h>
#include <linux/ethtool.h>
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/netdevice.h>
#include <linux/random.h>
@@ -722,8 +723,10 @@ struct linuxkpi_ieee80211_regdomain {
#define IEEE80211_EML_CAP_TRANSITION_TIMEOUT_128TU 0x04
#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY 0x08
#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_32US 0x10
+#define IEEE80211_EML_CAP_EMLSR_PADDING_DELAY_256US 0x10
#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY 0x20
#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_64US 0x40
+#define IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY_256US 0x40
#define VENDOR_CMD_RAW_DATA (void *)(uintptr_t)(-ENOENT)
@@ -2065,6 +2068,18 @@ nl80211_chan_width_to_mhz(enum nl80211_chan_width width)
}
static __inline ssize_t
+wiphy_locked_debugfs_read(struct wiphy *wiphy, struct file *file,
+ char *buf, size_t bufsize, const char __user *userbuf, size_t count,
+ loff_t *ppos,
+ ssize_t (*handler)(struct wiphy *, struct file *, char *, size_t, void *),
+ void *data)
+{
+ TODO();
+ return (-ENXIO);
+}
+
+
+static __inline ssize_t
wiphy_locked_debugfs_write(struct wiphy *wiphy, struct file *file,
char *buf, size_t bufsize, const char __user *userbuf, size_t count,
ssize_t (*handler)(struct wiphy *, struct file *, char *, size_t, void *),
diff --git a/sys/compat/linuxkpi/common/include/net/mac80211.h b/sys/compat/linuxkpi/common/include/net/mac80211.h
index af3199c38939..2ed595095f9e 100644
--- a/sys/compat/linuxkpi/common/include/net/mac80211.h
+++ b/sys/compat/linuxkpi/common/include/net/mac80211.h
@@ -1184,7 +1184,7 @@ struct wireless_dev *linuxkpi_ieee80211_vif_to_wdev(struct ieee80211_vif *);
void linuxkpi_ieee80211_connection_loss(struct ieee80211_vif *);
void linuxkpi_ieee80211_beacon_loss(struct ieee80211_vif *);
struct sk_buff *linuxkpi_ieee80211_probereq_get(struct ieee80211_hw *,
- uint8_t *, uint8_t *, size_t, size_t);
+ const uint8_t *, const uint8_t *, size_t, size_t);
void linuxkpi_ieee80211_tx_status(struct ieee80211_hw *, struct sk_buff *);
void linuxkpi_ieee80211_tx_status_ext(struct ieee80211_hw *,
struct ieee80211_tx_status *);
@@ -2161,8 +2161,8 @@ ieee80211_nullfunc_get(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
}
static __inline struct sk_buff *
-ieee80211_probereq_get(struct ieee80211_hw *hw, uint8_t *addr,
- uint8_t *ssid, size_t ssid_len, size_t tailroom)
+ieee80211_probereq_get(struct ieee80211_hw *hw, const uint8_t *addr,
+ const uint8_t *ssid, size_t ssid_len, size_t tailroom)
{
return (linuxkpi_ieee80211_probereq_get(hw, addr, ssid, ssid_len,
diff --git a/sys/compat/linuxkpi/common/src/linux_80211.c b/sys/compat/linuxkpi/common/src/linux_80211.c
index 1d00e8da8f9a..a7d6003843ba 100644
--- a/sys/compat/linuxkpi/common/src/linux_80211.c
+++ b/sys/compat/linuxkpi/common/src/linux_80211.c
@@ -7668,8 +7668,8 @@ linuxkpi_ieee80211_queue_work(struct ieee80211_hw *hw,
}
struct sk_buff *
-linuxkpi_ieee80211_probereq_get(struct ieee80211_hw *hw, uint8_t *addr,
- uint8_t *ssid, size_t ssid_len, size_t tailroom)
+linuxkpi_ieee80211_probereq_get(struct ieee80211_hw *hw, const uint8_t *addr,
+ const uint8_t *ssid, size_t ssid_len, size_t tailroom)
{
struct sk_buff *skb;
struct ieee80211_frame *wh;
diff --git a/sys/conf/NOTES b/sys/conf/NOTES
index 2458756ae350..92e98aa57ebf 100644
--- a/sys/conf/NOTES
+++ b/sys/conf/NOTES
@@ -2436,7 +2436,7 @@ options HID_DEBUG # enable debug msgs
device hidbus # HID bus
device hidmap # HID to evdev mapping
device hidraw # Raw access driver
-options HIDRAW_MAKE_UHID_ALIAS # install /dev/uhid alias
+options HIDRAW_MAKE_UHID_ALIAS # install /dev/uhid alias for /dev/hidraw
device hconf # Multitouch configuration TLC
device hcons # Consumer controls
device hgame # Generic game controllers
@@ -2446,6 +2446,8 @@ device hmt # HID multitouch (MS-compatible)
device hpen # Generic pen driver
device hsctrl # System controls
device ps4dshock # Sony PS4 DualShock 4 gamepad driver
+device u2f # FIDO/U2F authenticator
+options U2F_MAKE_UHID_ALIAS # install /dev/uhid alias for /dev/u2f/
device xb360gp # XBox 360 gamepad driver
#####################################################################
diff --git a/sys/conf/files b/sys/conf/files
index be65ed20d6aa..d89813c70355 100644
--- a/sys/conf/files
+++ b/sys/conf/files
@@ -1750,6 +1750,7 @@ dev/hid/hpen.c optional hpen
dev/hid/hsctrl.c optional hsctrl
dev/hid/ietp.c optional ietp
dev/hid/ps4dshock.c optional ps4dshock
+dev/hid/u2f.c optional u2f
dev/hid/xb360gp.c optional xb360gp
dev/hifn/hifn7751.c optional hifn
dev/hptiop/hptiop.c optional hptiop scbus
@@ -2280,6 +2281,8 @@ dev/ixgbe/ixgbe_x540.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_x550.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
+dev/ixgbe/ixgbe_e610.c optional ix inet | ixv inet \
+ compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_dcb.c optional ix inet | ixv inet \
compile-with "${NORMAL_C} -I$S/dev/ixgbe"
dev/ixgbe/ixgbe_dcb_82598.c optional ix inet | ixv inet \
@@ -3448,7 +3451,6 @@ dev/virtio/mmio/virtio_mmio.c optional virtio_mmio
dev/virtio/mmio/virtio_mmio_acpi.c optional virtio_mmio acpi
dev/virtio/mmio/virtio_mmio_cmdline.c optional virtio_mmio
dev/virtio/mmio/virtio_mmio_fdt.c optional virtio_mmio fdt
-dev/virtio/mmio/virtio_mmio_if.m optional virtio_mmio
dev/virtio/network/if_vtnet.c optional vtnet
dev/virtio/balloon/virtio_balloon.c optional virtio_balloon
dev/virtio/block/virtio_blk.c optional virtio_blk
diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64
index 80548320c3fc..c12ab9db030a 100644
--- a/sys/conf/files.amd64
+++ b/sys/conf/files.amd64
@@ -13,14 +13,14 @@ include "conf/files.x86"
#
elf-vdso.so.o standard \
dependency "$S/amd64/amd64/sigtramp.S assym.inc $S/conf/vdso_amd64.ldscript $S/tools/amd64_vdso.sh" \
- compile-with "env AWK='${AWK}' NM='${NM}' LD='${LD}' CC='${CC}' DEBUG='${DEBUG}' OBJCOPY='${OBJCOPY}' ELFDUMP='${ELFDUMP}' S='${S}' sh $S/tools/amd64_vdso.sh" \
+ compile-with "env AWK='${AWK}' NM='${NM}' LD='${LD}' CC='${CC}' OBJCOPY='${OBJCOPY}' ELFDUMP='${ELFDUMP}' S='${S}' sh $S/tools/amd64_vdso.sh" \
no-ctfconvert \
no-implicit-rule before-depend \
clean "elf-vdso.so.o elf-vdso.so.1 vdso_offsets.h sigtramp.pico"
#
elf-vdso32.so.o optional compat_freebsd32 \
dependency "$S/amd64/ia32/ia32_sigtramp.S ia32_assym.h $S/conf/vdso_amd64_ia32.ldscript $S/tools/amd64_ia32_vdso.sh" \
- compile-with "env AWK='${AWK}' NM='${NM}' LD='${LD}' CC='${CC}' DEBUG='${DEBUG}' OBJCOPY='${OBJCOPY}' ELFDUMP='${ELFDUMP}' S='${S}' sh $S/tools/amd64_ia32_vdso.sh" \
+ compile-with "env AWK='${AWK}' NM='${NM}' LD='${LD}' CC='${CC}' OBJCOPY='${OBJCOPY}' ELFDUMP='${ELFDUMP}' S='${S}' sh $S/tools/amd64_ia32_vdso.sh" \
no-ctfconvert \
no-implicit-rule before-depend \
clean "elf-vdso32.so.o elf-vdso32.so.1 vdso_ia32_offsets.h ia32_sigtramp.pico"
@@ -419,6 +419,9 @@ contrib/openzfs/module/icp/asm-x86_64/blake3/blake3_avx512.S optional zfs com
contrib/openzfs/module/icp/asm-x86_64/blake3/blake3_sse2.S optional zfs compile-with "${ZFS_S}"
contrib/openzfs/module/icp/asm-x86_64/blake3/blake3_sse41.S optional zfs compile-with "${ZFS_S}"
+# zfs AVX2 implementation of aes-gcm from BoringSSL
+contrib/openzfs/module/icp/asm-x86_64/modes/aesni-gcm-avx2-vaes.S optional zfs compile-with "${ZFS_S}"
+
# zfs sha2 hash support
zfs-sha256-x86_64.o optional zfs \
dependency "$S/contrib/openzfs/module/icp/asm-x86_64/sha2/sha256-x86_64.S" \
diff --git a/sys/conf/files.arm64 b/sys/conf/files.arm64
index 641001efab5e..45966fea8041 100644
--- a/sys/conf/files.arm64
+++ b/sys/conf/files.arm64
@@ -178,6 +178,8 @@ crypto/des/des_enc.c optional netsmb
crypto/openssl/ossl_aarch64.c optional ossl
crypto/openssl/aarch64/chacha-armv8.S optional ossl \
compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} -I$S/crypto/openssl ${WERROR} ${.IMPSRC}"
+crypto/openssl/aarch64/chacha-armv8-sve.S optional ossl \
+ compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} -I$S/crypto/openssl ${WERROR} ${.IMPSRC}"
crypto/openssl/aarch64/poly1305-armv8.S optional ossl \
compile-with "${CC} -c ${CFLAGS:N-mgeneral-regs-only} -I$S/crypto/openssl ${WERROR} ${.IMPSRC}"
crypto/openssl/aarch64/sha1-armv8.S optional ossl \
diff --git a/sys/conf/kern.post.mk b/sys/conf/kern.post.mk
index 0e4ffd92724e..bb3c7af82a4d 100644
--- a/sys/conf/kern.post.mk
+++ b/sys/conf/kern.post.mk
@@ -372,6 +372,19 @@ _ILINKS+= x86
_ILINKS+= i386
.endif
+.if ${MK_REPRODUCIBLE_BUILD} != "no"
+PREFIX_SYSDIR=/usr/src/sys
+PREFIX_OBJDIR=/usr/obj/usr/src/${MACHINE}.${MACHINE_CPUARCH}/sys/${KERN_IDENT}
+CFLAGS+= -ffile-prefix-map=${SYSDIR}=${PREFIX_SYSDIR}
+CFLAGS+= -ffile-prefix-map=${.OBJDIR}=${PREFIX_OBJDIR}
+.if defined(SYSROOT)
+CFLAGS+= -ffile-prefix-map=${SYSROOT}=/sysroot
+.endif
+.else
+PREFIX_SYSDIR=${SYSDIR}
+PREFIX_OBJDIR=${.OBJDIR}
+.endif
+
# Ensure that the link exists without depending on it when it exists.
# Ensure that debug info references the path in the source tree.
.for _link in ${_ILINKS}
@@ -379,9 +392,9 @@ _ILINKS+= i386
${SRCS} ${DEPENDOBJS}: ${_link}
.endif
.if ${_link} == "machine"
-CFLAGS+= -fdebug-prefix-map=./machine=${SYSDIR}/${MACHINE}/include
+CFLAGS+= -fdebug-prefix-map=./machine=${PREFIX_SYSDIR}/${MACHINE}/include
.else
-CFLAGS+= -fdebug-prefix-map=./${_link}=${SYSDIR}/${_link}/include
+CFLAGS+= -fdebug-prefix-map=./${_link}=${PREFIX_SYSDIR}/${_link}/include
.endif
.endfor
@@ -454,7 +467,7 @@ config.o env.o hints.o vers.o vnode_if.o:
NEWVERS_ENV+= MAKE="${MAKE}"
.if ${MK_REPRODUCIBLE_BUILD} != "no"
-NEWVERS_ARGS+= -R
+NEWVERS_ARGS+= -R -d ${PREFIX_OBJDIR}
.endif
vers.c: .NOMETA_CMP $S/conf/newvers.sh $S/sys/param.h ${SYSTEM_DEP:Nvers.*}
${NEWVERS_ENV} sh $S/conf/newvers.sh ${NEWVERS_ARGS} ${KERN_IDENT}
diff --git a/sys/conf/kern.pre.mk b/sys/conf/kern.pre.mk
index 78178065e15b..1fcfd6467e7f 100644
--- a/sys/conf/kern.pre.mk
+++ b/sys/conf/kern.pre.mk
@@ -214,7 +214,8 @@ ZFS_CFLAGS+= -I$S/contrib/openzfs/module/icp/include \
.if ${MACHINE_ARCH} == "amd64"
ZFS_CFLAGS+= -D__x86_64 -DHAVE_SSE2 -DHAVE_SSSE3 -DHAVE_SSE4_1 -DHAVE_SSE4_2 \
- -DHAVE_AVX -DHAVE_AVX2 -DHAVE_AVX512F -DHAVE_AVX512VL -DHAVE_AVX512BW
+ -DHAVE_AVX -DHAVE_AVX2 -DHAVE_AVX512F -DHAVE_AVX512VL -DHAVE_AVX512BW \
+ -DHAVE_VAES -DHAVE_VPCLMULQDQ
.endif
.if ${MACHINE_ARCH} == "i386" || ${MACHINE_ARCH} == "powerpc" || \
diff --git a/sys/conf/kmod.mk b/sys/conf/kmod.mk
index 645c04cdd135..0fd2d4050cf1 100644
--- a/sys/conf/kmod.mk
+++ b/sys/conf/kmod.mk
@@ -303,6 +303,25 @@ all: ${PROG}
beforedepend: ${_ILINKS}
beforebuild: ${_ILINKS}
+.if ${MK_REPRODUCIBLE_BUILD} != "no"
+PREFIX_SYSDIR=/usr/src/sys
+CFLAGS+= -ffile-prefix-map=${SYSDIR}=${PREFIX_SYSDIR}
+.if defined(KERNBUILDDIR)
+PREFIX_KERNBUILDDIR=/usr/obj/usr/src/${MACHINE}.${MACHINE_CPUARCH}/sys/${KERNBUILDDIR:T}
+PREFIX_OBJDIR=${PREFIX_KERNBUILDDIR}/modules/usr/src/sys/modules/${.OBJDIR:T}
+CFLAGS+= -ffile-prefix-map=${KERNBUILDDIR}=${PREFIX_KERNBUILDDIR}
+.else
+PREFIX_OBJDIR=/usr/obj/usr/src/${MACHINE}.${MACHINE_CPUARCH}/sys/modules/${.OBJDIR:T}
+.endif
+CFLAGS+= -ffile-prefix-map=${.OBJDIR}=${PREFIX_OBJDIR}
+.if defined(SYSROOT)
+CFLAGS+= -ffile-prefix-map=${SYSROOT}=/sysroot
+.endif
+.else
+PREFIX_SYSDIR=${SYSDIR}
+PREFIX_OBJDIR=${.OBJDIR}
+.endif
+
# Ensure that the links exist without depending on it when it exists which
# causes all the modules to be rebuilt when the directory pointed to changes.
# Ensure that debug info references the path in the source tree.
@@ -311,9 +330,9 @@ beforebuild: ${_ILINKS}
OBJS_DEPEND_GUESS+= ${_link}
.endif
.if ${_link} == "machine"
-CFLAGS+= -fdebug-prefix-map=./machine=${SYSDIR}/${MACHINE}/include
+CFLAGS+= -fdebug-prefix-map=./machine=${PREFIX_SYSDIR}/${MACHINE}/include
.else
-CFLAGS+= -fdebug-prefix-map=./${_link}=${SYSDIR}/${_link}/include
+CFLAGS+= -fdebug-prefix-map=./${_link}=${PREFIX_SYSDIR}/${_link}/include
.endif
.endfor
diff --git a/sys/conf/newvers.sh b/sys/conf/newvers.sh
index 66926805052c..8b60da95741e 100644
--- a/sys/conf/newvers.sh
+++ b/sys/conf/newvers.sh
@@ -110,14 +110,18 @@ COPYRIGHT="$COPYRIGHT
# We expand include_metadata later since we may set it to the
# future value of modified.
+builddir=$(pwd)
include_metadata=yes
modified=no
-while getopts crRvV: opt; do
+while getopts cd:rRvV: opt; do
case "$opt" in
c)
echo "$COPYRIGHT"
exit 0
;;
+ d)
+ builddir=$OPTARG
+ ;;
r)
include_metadata=no
;;
@@ -187,7 +191,7 @@ fi
touch version
v=$(cat version)
u=${USER:-root}
-d=$(pwd)
+d=$builddir
h=${HOSTNAME:-$(hostname)}
if [ -n "$SOURCE_DATE_EPOCH" ]; then
if ! t=$(date -ur $SOURCE_DATE_EPOCH 2>/dev/null); then
diff --git a/sys/conf/options b/sys/conf/options
index a637b0b74a77..4009ba2b4843 100644
--- a/sys/conf/options
+++ b/sys/conf/options
@@ -235,11 +235,6 @@ TCP_REQUEST_TRK opt_global.h
TCP_ACCOUNTING opt_global.h
TCP_BBR opt_inet.h
TCP_RACK opt_inet.h
-#
-# TCP SaD Detection is an experimental Sack attack Detection (SaD)
-# algorithm that uses "normal" behaviour with SACK's to detect
-# a possible attack. It is strictly experimental at this point.
-#
TURNSTILE_PROFILING
UMTX_PROFILING
UMTX_CHAINS opt_global.h
@@ -1009,6 +1004,7 @@ IICHID_DEBUG opt_hid.h
IICHID_SAMPLING opt_hid.h
HKBD_DFLT_KEYMAP opt_hkbd.h
HIDRAW_MAKE_UHID_ALIAS opt_hid.h
+U2F_MAKE_UHID_ALIAS opt_hid.h
# kenv options
# The early kernel environment (loader environment, config(8)-provided static)
diff --git a/sys/contrib/dev/acpica/changes.txt b/sys/contrib/dev/acpica/changes.txt
index 435540b254f1..4e3cf4f2f41c 100644
--- a/sys/contrib/dev/acpica/changes.txt
+++ b/sys/contrib/dev/acpica/changes.txt
@@ -1,11 +1,29 @@
----------------------------------------
+7 August 2025. Summary of changes for version 20250807:
+
+Major changes:
+
+Added option to skip the global lock for SMM - Huacai Chen
+
+Fixed non-NUL terminated string implementations - Ahmed Salem
+
+Fixed CCEL and CDAT templates - Ahmed Salem
+
+Fixed a major Linux kernel bug (UAF) that was triggered by unequal number of method parameters (definition) vs arguments (invocation) in different places - Peter Williams, Hans de Goede, Rafael Wysocki
+
+Define distinct D3 states (D3Hot and D3Cold) that help clarify the device behavior support - Aymeric Wibo
+
+A few cleanups, improvements to existing table supports, small fixes, spelling corrections etc.
+
+
+----------------------------------------
4 April 2025. Summary of changes for version 20250404:
Major changes:
Update all the copyright continuation year to 2025 in the license header of all files
-Add complete support for 3 new ACPI tables ? MRRM,ERDT and RIMT (Tony Luck & V L Sunil)
+Add complete support for 3 new ACPI tables - MRRM,ERDT and RIMT (Tony Luck & V L Sunil)
Add a license file to the project which is a great improvement (Dionna Glaze)
@@ -21,11 +39,11 @@ Major changes:
Fix 2 critical CVE addressing memory leaks - Seunghun Han
-EINJ V2 updates ? Zaid Alali (Ampere Computing)
+EINJ V2 updates - Zaid Alali (Ampere Computing)
-CDAT updates ? Ira Weiny (Intel Corporation)
+CDAT updates - Ira Weiny (Intel Corporation)
-Fix mutex handling, don?t release ones that were never acquired ? Daniil Tatianin
+Fix mutex handling, do not release ones that were never acquired - Daniil Tatianin
Experiment with new tag name format Ryyyy_mm_dd to solve chronological sorting problems
@@ -39,7 +57,7 @@ Fix the acpixf.h file which caused issues for the last release (before this) 202
Fix the pointer offset for the SLIC table
-Verify the local environment and GitHub commits are all in sync which was a problem with the second from last release (before this)20240322 (aka 20240323 – date issue)
+Verify the local environment and GitHub commits are all in sync which was a problem with the second from last release (before this)20240322 (aka 20240323 - date issue)
diff --git a/sys/contrib/dev/acpica/common/adisasm.c b/sys/contrib/dev/acpica/common/adisasm.c
index 96cd6c7f5d3c..83125098cbd1 100644
--- a/sys/contrib/dev/acpica/common/adisasm.c
+++ b/sys/contrib/dev/acpica/common/adisasm.c
@@ -481,12 +481,12 @@ AdDisassembleOneTable (
"FieldName : FieldValue (in hex)\n */\n\n");
AcpiDmDumpDataTable (Table);
- fprintf (stderr, "Acpi Data Table [%4.4s] decoded\n",
+ fprintf (stdout, "Acpi Data Table [%4.4s] decoded\n",
AcpiGbl_CDAT ? (char *) AcpiGbl_CDAT : Table->Signature);
if (File)
{
- fprintf (stderr, "Formatted output: %s - %u bytes\n",
+ fprintf (stdout, "Formatted output: %s - %u bytes\n",
DisasmFilename, CmGetFileSize (File));
}
@@ -584,16 +584,16 @@ AdDisassembleOneTable (
AcpiDmDumpDataTable (Table);
- fprintf (stderr, "Disassembly completed\n");
+ fprintf (stdout, "Disassembly completed\n");
if (File)
{
- fprintf (stderr, "ASL Output: %s - %u bytes\n",
+ fprintf (stdout, "ASL Output: %s - %u bytes\n",
DisasmFilename, CmGetFileSize (File));
}
if (AslGbl_MapfileFlag)
{
- fprintf (stderr, "%14s %s - %u bytes\n",
+ fprintf (stdout, "%14s %s - %u bytes\n",
AslGbl_FileDescs[ASL_FILE_MAP_OUTPUT].ShortDescription,
AslGbl_Files[ASL_FILE_MAP_OUTPUT].Filename,
FlGetFileSize (ASL_FILE_MAP_OUTPUT));
@@ -630,7 +630,7 @@ AdReparseOneTable (
ACPI_COMMENT_ADDR_NODE *AddrListHead;
- fprintf (stderr,
+ fprintf (stdout,
"\nFound %u external control methods, "
"reparsing with new information\n",
AcpiDmGetUnresolvedExternalMethodCount ());
diff --git a/sys/contrib/dev/acpica/common/ahtable.c b/sys/contrib/dev/acpica/common/ahtable.c
index 898b2d09f609..587bf61016f0 100644
--- a/sys/contrib/dev/acpica/common/ahtable.c
+++ b/sys/contrib/dev/acpica/common/ahtable.c
@@ -265,6 +265,7 @@ const AH_TABLE AcpiGbl_SupportedTables[] =
{ACPI_SIG_SSDT, "Secondary System Description Table (AML table)"},
{ACPI_SIG_STAO, "Status Override Table"},
{ACPI_SIG_SVKL, "Storage Volume Key Location Table"},
+ {ACPI_SIG_SWFT, "SoundWire File Table"},
{ACPI_SIG_TCPA, "Trusted Computing Platform Alliance Table"},
{ACPI_SIG_TDEL, "TD-Event Log Table"},
{ACPI_SIG_TPM2, "Trusted Platform Module hardware interface Table"},
diff --git a/sys/contrib/dev/acpica/common/dmtable.c b/sys/contrib/dev/acpica/common/dmtable.c
index fcff97a304ae..702f4f7965e4 100644
--- a/sys/contrib/dev/acpica/common/dmtable.c
+++ b/sys/contrib/dev/acpica/common/dmtable.c
@@ -721,6 +721,7 @@ const ACPI_DMTABLE_DATA AcpiDmTableData[] =
{ACPI_SIG_SRAT, NULL, AcpiDmDumpSrat, DtCompileSrat, TemplateSrat},
{ACPI_SIG_STAO, NULL, AcpiDmDumpStao, DtCompileStao, TemplateStao},
{ACPI_SIG_SVKL, AcpiDmTableInfoSvkl, AcpiDmDumpSvkl, DtCompileSvkl, TemplateSvkl},
+ {ACPI_SIG_SWFT, NULL, NULL, NULL, NULL},
{ACPI_SIG_TCPA, NULL, AcpiDmDumpTcpa, DtCompileTcpa, TemplateTcpa},
{ACPI_SIG_TDEL, AcpiDmTableInfoTdel, NULL, NULL, TemplateTdel},
{ACPI_SIG_TPM2, AcpiDmTableInfoTpm2, AcpiDmDumpTpm2, DtCompileTpm2, TemplateTpm2},
diff --git a/sys/contrib/dev/acpica/common/dmtbdump2.c b/sys/contrib/dev/acpica/common/dmtbdump2.c
index 822920d2ea94..d29a60be0f67 100644
--- a/sys/contrib/dev/acpica/common/dmtbdump2.c
+++ b/sys/contrib/dev/acpica/common/dmtbdump2.c
@@ -2637,7 +2637,7 @@ AcpiDmDumpRhct (
RhctIsaString, RhctIsaString->IsaLength, AcpiDmTableInfoRhctIsa1);
if (Subtable->Length > IsaPadOffset)
{
- Status = AcpiDmDumpTable (Table->Length, Offset + SubtableOffset,
+ Status = AcpiDmDumpTable (Table->Length, Offset + IsaPadOffset,
ACPI_ADD_PTR (UINT8, Subtable, IsaPadOffset),
(Subtable->Length - IsaPadOffset), AcpiDmTableInfoRhctIsaPad);
}
diff --git a/sys/contrib/dev/acpica/common/dmtbinfo2.c b/sys/contrib/dev/acpica/common/dmtbinfo2.c
index 9ecf877fcfb0..b7c6d3b8d536 100644
--- a/sys/contrib/dev/acpica/common/dmtbinfo2.c
+++ b/sys/contrib/dev/acpica/common/dmtbinfo2.c
@@ -2180,7 +2180,7 @@ ACPI_DMTABLE_INFO AcpiDmTableInfoRhct[] =
ACPI_DMTABLE_INFO AcpiDmTableInfoRhctNodeHdr[] =
{
{ACPI_DMT_RHCT, ACPI_RHCTH_OFFSET (Type), "Subtable Type", 0},
- {ACPI_DMT_UINT16, ACPI_RHCTH_OFFSET (Length), "Length", 0},
+ {ACPI_DMT_UINT16, ACPI_RHCTH_OFFSET (Length), "Length", DT_LENGTH},
{ACPI_DMT_UINT16, ACPI_RHCTH_OFFSET (Revision), "Revision", 0},
ACPI_DMT_TERMINATOR
};
diff --git a/sys/contrib/dev/acpica/common/dmtbinfo3.c b/sys/contrib/dev/acpica/common/dmtbinfo3.c
index 75b580e0d890..0935fc86aff9 100644
--- a/sys/contrib/dev/acpica/common/dmtbinfo3.c
+++ b/sys/contrib/dev/acpica/common/dmtbinfo3.c
@@ -200,7 +200,7 @@ ACPI_DMTABLE_INFO AcpiDmTableInfoCcel[] =
{
{ACPI_DMT_UINT8, ACPI_CCEL_OFFSET (CCType), "CC Type", 0},
{ACPI_DMT_UINT8, ACPI_CCEL_OFFSET (CCSubType), "CC Sub Type", 0},
- {ACPI_DMT_UINT32, ACPI_CCEL_OFFSET (Reserved), "Reserved", 0},
+ {ACPI_DMT_UINT16, ACPI_CCEL_OFFSET (Reserved), "Reserved", 0},
{ACPI_DMT_UINT64, ACPI_CCEL_OFFSET (LogAreaMinimumLength), "Log Area Minimum Length", 0},
{ACPI_DMT_UINT64, ACPI_CCEL_OFFSET (LogAreaStartAddress), "Log Area Start Address", 0},
ACPI_DMT_TERMINATOR
diff --git a/sys/contrib/dev/acpica/compiler/aslanalyze.c b/sys/contrib/dev/acpica/compiler/aslanalyze.c
index 17e2674817a9..625611a630de 100644
--- a/sys/contrib/dev/acpica/compiler/aslanalyze.c
+++ b/sys/contrib/dev/acpica/compiler/aslanalyze.c
@@ -572,10 +572,22 @@ ApCheckForGpeNameConflict (
ACPI_PARSE_OBJECT *NextOp;
UINT32 GpeNumber;
char Name[ACPI_NAMESEG_SIZE + 1];
- char Target[ACPI_NAMESEG_SIZE];
+ char Target[ACPI_NAMESEG_SIZE] ACPI_NONSTRING;
- /* Need a null-terminated string version of NameSeg */
+ /**
+ * Need a null-terminated string version of NameSeg
+ *
+ * NOTE: during a review on Name[ACPI_NAMESEG_SIZE + 1] having an extra
+ * byte[1], compiler testing exhibited a difference in behavior between
+ * GCC and Clang[2] (at least; MSVC may also exhibit the same) in
+ * how optimization is done. The extra byte is needed to ensure
+ * the signature does not get mangled, subsequently avoiding
+ * GpeNumber being a completely different return value from strtoul.
+ *
+ * [1] https://github.com/acpica/acpica/pull/1019#discussion_r2058687704
+ * [2] https://github.com/acpica/acpica/pull/1019#discussion_r2061953039
+ */
ACPI_MOVE_32_TO_32 (Name, Op->Asl.NameSeg);
Name[ACPI_NAMESEG_SIZE] = 0;
diff --git a/sys/contrib/dev/acpica/compiler/aslrestype2s.c b/sys/contrib/dev/acpica/compiler/aslrestype2s.c
index 096862290384..f47402d4e025 100644
--- a/sys/contrib/dev/acpica/compiler/aslrestype2s.c
+++ b/sys/contrib/dev/acpica/compiler/aslrestype2s.c
@@ -1469,7 +1469,7 @@ RsDoCsi2SerialBusDescriptor (
case 2: /* Local Port Instance [Integer] (_PRT) */
- RsSetFlagBits16 ((UINT16 *) &Descriptor->Csi2SerialBus.TypeSpecificFlags, InitializerOp, 0, 0);
+ RsSetFlagBits16 ((UINT16 *) &Descriptor->Csi2SerialBus.TypeSpecificFlags, InitializerOp, 2, 0);
RsCreateMultiBitField (InitializerOp, ACPI_RESTAG_LOCALPORT,
CurrentByteOffset + ASL_RESDESC_OFFSET (Csi2SerialBus.TypeSpecificFlags), 2, 6);
break;
diff --git a/sys/contrib/dev/acpica/compiler/dttable2.c b/sys/contrib/dev/acpica/compiler/dttable2.c
index 6203a382ad62..754880346299 100644
--- a/sys/contrib/dev/acpica/compiler/dttable2.c
+++ b/sys/contrib/dev/acpica/compiler/dttable2.c
@@ -1929,24 +1929,30 @@ DtCompileRhct (
{
ACPI_STATUS Status;
ACPI_RHCT_NODE_HEADER *RhctHeader;
- ACPI_RHCT_HART_INFO *RhctHartInfo = NULL;
+ ACPI_RHCT_HART_INFO *RhctHartInfo;
DT_SUBTABLE *Subtable;
DT_SUBTABLE *ParentTable;
ACPI_DMTABLE_INFO *InfoTable;
DT_FIELD **PFieldList = (DT_FIELD **) List;
DT_FIELD *SubtableStart;
+ ACPI_TABLE_RHCT *Table;
+ BOOLEAN FirstNode = TRUE;
/* Compile the main table */
+ ParentTable = DtPeekSubtable ();
Status = DtCompileTable (PFieldList, AcpiDmTableInfoRhct,
&Subtable);
if (ACPI_FAILURE (Status))
{
return (Status);
}
+ DtInsertSubtable (ParentTable, Subtable);
+ Table = ACPI_CAST_PTR (ACPI_TABLE_RHCT, ParentTable->Buffer);
+ Table->NodeCount = 0;
+ Table->NodeOffset = sizeof (ACPI_TABLE_RHCT);
- ParentTable = DtPeekSubtable ();
while (*PFieldList)
{
SubtableStart = *PFieldList;
@@ -1961,7 +1967,10 @@ DtCompileRhct (
}
DtInsertSubtable (ParentTable, Subtable);
RhctHeader = ACPI_CAST_PTR (ACPI_RHCT_NODE_HEADER, Subtable->Buffer);
- RhctHeader->Length = (UINT16)(Subtable->Length);
+
+ DtPushSubtable (Subtable);
+ ParentTable = DtPeekSubtable ();
+ Table->NodeCount++;
switch (RhctHeader->Type)
{
@@ -1999,37 +2008,54 @@ DtCompileRhct (
return (Status);
}
DtInsertSubtable (ParentTable, Subtable);
- RhctHeader->Length += (UINT16)(Subtable->Length);
+ if (FirstNode)
+ {
+ Table->NodeOffset = ACPI_PTR_DIFF(ParentTable->Buffer, Table);
+ FirstNode = FALSE;
+ }
/* Compile RHCT subtable additionals */
switch (RhctHeader->Type)
{
- case ACPI_RHCT_NODE_TYPE_HART_INFO:
+ case ACPI_RHCT_NODE_TYPE_ISA_STRING:
- RhctHartInfo = ACPI_SUB_PTR (ACPI_RHCT_HART_INFO,
- Subtable->Buffer, sizeof (ACPI_RHCT_NODE_HEADER));
- if (RhctHartInfo)
+ /*
+ * Padding - Variable-length data
+ * Optionally allows the padding of the ISA string to be used
+ * for filling this field.
+ */
+ Status = DtCompileTable (PFieldList, AcpiDmTableInfoRhctIsaPad,
+ &Subtable);
+ if (ACPI_FAILURE (Status))
+ {
+ return (Status);
+ }
+ if (Subtable)
{
+ DtInsertSubtable (ParentTable, Subtable);
+ }
+ break;
- RhctHartInfo->NumOffsets = 0;
- while (*PFieldList)
- {
- Status = DtCompileTable (PFieldList,
- AcpiDmTableInfoRhctHartInfo2, &Subtable);
- if (ACPI_FAILURE (Status))
- {
- return (Status);
- }
- if (!Subtable)
- {
- break;
- }
+ case ACPI_RHCT_NODE_TYPE_HART_INFO:
- DtInsertSubtable (ParentTable, Subtable);
- RhctHeader->Length += (UINT16)(Subtable->Length);
- RhctHartInfo->NumOffsets++;
+ RhctHartInfo = ACPI_CAST_PTR (ACPI_RHCT_HART_INFO,
+ Subtable->Buffer);
+ RhctHartInfo->NumOffsets = 0;
+ while (*PFieldList)
+ {
+ Status = DtCompileTable (PFieldList,
+ AcpiDmTableInfoRhctHartInfo2, &Subtable);
+ if (ACPI_FAILURE (Status))
+ {
+ return (Status);
}
+ if (!Subtable)
+ {
+ break;
+ }
+ DtInsertSubtable (ParentTable, Subtable);
+ RhctHartInfo->NumOffsets++;
}
break;
@@ -2037,6 +2063,9 @@ DtCompileRhct (
break;
}
+
+ DtPopSubtable ();
+ ParentTable = DtPeekSubtable ();
}
return (AE_OK);
diff --git a/sys/contrib/dev/acpica/compiler/dttemplate.c b/sys/contrib/dev/acpica/compiler/dttemplate.c
index 67b13bb82d1b..d7140712d4e6 100644
--- a/sys/contrib/dev/acpica/compiler/dttemplate.c
+++ b/sys/contrib/dev/acpica/compiler/dttemplate.c
@@ -255,7 +255,7 @@ DtCreateTemplates (
if (AcpiGbl_Optind < 3)
{
- fprintf (stderr, "Creating default template: [DSDT]\n");
+ fprintf (stdout, "Creating default template: [DSDT]\n");
Status = DtCreateOneTemplateFile (ACPI_SIG_DSDT, 0);
goto Exit;
}
@@ -411,7 +411,7 @@ DtCreateAllTemplates (
ACPI_STATUS Status;
- fprintf (stderr, "Creating all supported Template files\n");
+ fprintf (stdout, "Creating all supported Template files\n");
/* Walk entire ACPI table data structure */
@@ -421,8 +421,13 @@ DtCreateAllTemplates (
if (TableData->Template)
{
- Status = DtCreateOneTemplate (TableData->Signature,
- 0, TableData);
+ if (ACPI_COMPARE_NAMESEG (TableData->Signature, ACPI_SIG_CDAT))
+ /* Special handling of CDAT */
+ Status = DtCreateOneTemplate (TableData->Signature,
+ 0, NULL);
+ else
+ Status = DtCreateOneTemplate (TableData->Signature,
+ 0, TableData);
if (ACPI_FAILURE (Status))
{
return (Status);
@@ -563,7 +568,7 @@ DtCreateOneTemplate (
}
else
{
- /* Special ACPI tables - DSDT, SSDT, OSDT, FACS, RSDP */
+ /* Special ACPI tables - DSDT, SSDT, OSDT, FACS, RSDP, CDAT */
AcpiOsPrintf (" (AML byte code table)\n");
AcpiOsPrintf (" */\n");
@@ -621,6 +626,11 @@ DtCreateOneTemplate (
AcpiDmDumpDataTable (ACPI_CAST_PTR (ACPI_TABLE_HEADER,
TemplateRsdp));
}
+ else if (ACPI_COMPARE_NAMESEG (Signature, ACPI_SIG_CDAT))
+ {
+ AcpiDmDumpCdat (ACPI_CAST_PTR (ACPI_TABLE_HEADER,
+ TemplateCdat));
+ }
else
{
fprintf (stderr,
@@ -632,14 +642,14 @@ DtCreateOneTemplate (
if (TableCount == 0)
{
- fprintf (stderr,
+ fprintf (stdout,
"Created ACPI table template for [%4.4s], "
"written to \"%s\"\n",
Signature, DisasmFilename);
}
else
{
- fprintf (stderr,
+ fprintf (stdout,
"Created ACPI table templates for [%4.4s] "
"and %u [SSDT] in same file, written to \"%s\"\n",
Signature, TableCount, DisasmFilename);
diff --git a/sys/contrib/dev/acpica/compiler/dttemplate.h b/sys/contrib/dev/acpica/compiler/dttemplate.h
index 0fdd90f73a23..51a34be5c36b 100644
--- a/sys/contrib/dev/acpica/compiler/dttemplate.h
+++ b/sys/contrib/dev/acpica/compiler/dttemplate.h
@@ -389,7 +389,7 @@ const unsigned char TemplateBoot[] =
const unsigned char TemplateCcel[] =
{
0x43,0x43,0x45,0x4C,0x38,0x00,0x00,0x00, /* 00000000 "CCEL8..." */
- 0x04,0x1C,0x49,0x4E,0x54,0x45,0x4C,0x20, /* 00000008 "..INTEL " */
+ 0x04,0x2E,0x49,0x4E,0x54,0x45,0x4C,0x20, /* 00000008 "..INTEL " */
0x54,0x65,0x6D,0x70,0x6C,0x61,0x74,0x65, /* 00000010 "Template" */
0x00,0x00,0x00,0x00,0x49,0x4E,0x54,0x4C, /* 00000018 "....INTL" */
0x30,0x09,0x21,0x20,0x00,0x00,0x00,0x00, /* 00000020 "0.! ...." */
@@ -1951,25 +1951,25 @@ const unsigned char TemplateRgrt[] =
const unsigned char TemplateRhct[] =
{
- 0x52,0x48,0x43,0x54,0x96,0x00,0x00,0x00, /* 00000000 "RHCT|..." */
- 0x01,0x24,0x4F,0x45,0x4D,0x43,0x41,0x00, /* 00000008 "..OEMCA." */
+ 0x52,0x48,0x43,0x54,0x96,0x00,0x00,0x00, /* 00000000 "RHCT...." */
+ 0x01,0x6D,0x4F,0x45,0x4D,0x43,0x41,0x00, /* 00000008 ".mOEMCA." */
0x54,0x45,0x4D,0x50,0x4C,0x41,0x54,0x45, /* 00000010 "TEMPLATE" */
0x01,0x00,0x00,0x00,0x49,0x4E,0x54,0x4C, /* 00000018 "....INTL" */
- 0x28,0x09,0x22,0x20,0x00,0x00,0x00,0x00, /* 00000020 "... ...." */
+ 0x04,0x04,0x25,0x20,0x00,0x00,0x00,0x00, /* 00000020 "..% ...." */
0x80,0x96,0x98,0x00,0x00,0x00,0x00,0x00, /* 00000028 "........" */
- 0x02,0x00,0x00,0x00,0x38,0x00,0x00,0x00, /* 00000030 "....8..." */
- 0x00,0x00,0x34,0x00,0x01,0x00,0x2B,0x00, /* 00000038 "..4...*." */
+ 0x04,0x00,0x00,0x00,0x38,0x00,0x00,0x00, /* 00000030 "....8..." */
+ 0x00,0x00,0x34,0x00,0x01,0x00,0x2B,0x00, /* 00000038 "..4...+." */
0x72,0x76,0x36,0x34,0x69,0x6D,0x61,0x66, /* 00000040 "rv64imaf" */
0x64,0x63,0x68,0x5F,0x7A,0x69,0x63,0x73, /* 00000048 "dch_zics" */
0x72,0x5F,0x7A,0x69,0x66,0x65,0x6E,0x63, /* 00000050 "r_zifenc" */
0x65,0x69,0x5F,0x7A,0x62,0x61,0x5F,0x7A, /* 00000058 "ei_zba_z" */
0x62,0x62,0x5F,0x7A,0x62,0x63,0x5F,0x7A, /* 00000060 "bb_zbc_z" */
- 0x62,0x73,0x00,0x00,0xFF,0xFF,0x18,0x00, /* 00000068 "bs......" */
- 0x01,0x00,0x03,0x00,0x00,0x00,0x00,0x00, /* 00000070 "........" */
- 0x38,0x00,0x00,0x00,0x7c,0x00,0x00,0x00, /* 00000078 "........" */
- 0x8E,0x00,0x00,0x00,0x01,0x00,0x0A,0x00, /* 00000080 "........" */
- 0x01,0x00,0x00,0x06,0x06,0x06,0x02,0x00, /* 00000088 "........" */
- 0x08,0x00,0x01,0x00,0x00,0x02 /* 00000090 "........" */
+ 0x62,0x73,0x00,0x00,0x01,0x00,0x0A,0x00, /* 00000068 "bs......" */
+ 0x01,0x00,0x00,0x06,0x06,0x06,0x02,0x00, /* 00000070 "........" */
+ 0x08,0x00,0x01,0x00,0x00,0x02,0xFF,0xFF, /* 00000078 "........" */
+ 0x18,0x00,0x01,0x00,0x03,0x00,0x00,0x00, /* 00000080 "........" */
+ 0x00,0x00,0x3B,0x00,0x00,0x00,0x6C,0x00, /* 00000088 "..;...l." */
+ 0x00,0x00,0x76,0x00,0x00,0x00 /* 00000090 "..v..." */
};
const unsigned char TemplateRimt[] =
diff --git a/sys/contrib/dev/acpica/compiler/dtutils.c b/sys/contrib/dev/acpica/compiler/dtutils.c
index f2463f74b8fc..18ea18cefdd6 100644
--- a/sys/contrib/dev/acpica/compiler/dtutils.c
+++ b/sys/contrib/dev/acpica/compiler/dtutils.c
@@ -623,6 +623,7 @@ DtGetFieldLength (
case ACPI_DMT_NFIT:
case ACPI_DMT_PCI_PATH:
case ACPI_DMT_PHAT:
+ case ACPI_DMT_RHCT:
ByteLength = 2;
break;
diff --git a/sys/contrib/dev/acpica/components/disassembler/dmresrcl2.c b/sys/contrib/dev/acpica/components/disassembler/dmresrcl2.c
index dd8cf4889885..551cf8178d94 100644
--- a/sys/contrib/dev/acpica/components/disassembler/dmresrcl2.c
+++ b/sys/contrib/dev/acpica/components/disassembler/dmresrcl2.c
@@ -778,7 +778,7 @@ AcpiDmCsi2SerialBusDescriptor (
AcpiOsPrintf (" 0x%2.2X, 0x%2.2X,\n",
Resource->Csi2SerialBus.TypeSpecificFlags & 0x03,
- Resource->Csi2SerialBus.TypeSpecificFlags & 0xFC);
+ (Resource->Csi2SerialBus.TypeSpecificFlags & 0xFC) >> 2);
/* ResourceSource is a required field */
diff --git a/sys/contrib/dev/acpica/components/dispatcher/dsmethod.c b/sys/contrib/dev/acpica/components/dispatcher/dsmethod.c
index 8b6efc070b1b..becdb95f8b83 100644
--- a/sys/contrib/dev/acpica/components/dispatcher/dsmethod.c
+++ b/sys/contrib/dev/acpica/components/dispatcher/dsmethod.c
@@ -646,8 +646,6 @@ AcpiDsCallControlMethod (
ACPI_WALK_STATE *NextWalkState = NULL;
ACPI_OPERAND_OBJECT *ObjDesc;
ACPI_EVALUATE_INFO *Info;
- UINT32 i;
-
ACPI_FUNCTION_TRACE_PTR (DsCallControlMethod, ThisWalkState);
@@ -670,6 +668,23 @@ AcpiDsCallControlMethod (
return_ACPI_STATUS (AE_NULL_OBJECT);
}
+ if (ThisWalkState->NumOperands < ObjDesc->Method.ParamCount)
+ {
+ ACPI_ERROR ((AE_INFO, "Missing argument(s) for method [%4.4s]",
+ AcpiUtGetNodeName (MethodNode)));
+
+ return_ACPI_STATUS (AE_AML_TOO_FEW_ARGUMENTS);
+ }
+
+ else if (ThisWalkState->NumOperands > ObjDesc->Method.ParamCount)
+ {
+ ACPI_ERROR ((AE_INFO, "Too many arguments for method [%4.4s]",
+ AcpiUtGetNodeName (MethodNode)));
+
+ return_ACPI_STATUS (AE_AML_TOO_MANY_ARGUMENTS);
+ }
+
+
/* Init for new method, possibly wait on method mutex */
Status = AcpiDsBeginMethodExecution (
@@ -726,15 +741,7 @@ AcpiDsCallControlMethod (
* Delete the operands on the previous walkstate operand stack
* (they were copied to new objects)
*/
- for (i = 0; i < ObjDesc->Method.ParamCount; i++)
- {
- AcpiUtRemoveReference (ThisWalkState->Operands [i]);
- ThisWalkState->Operands [i] = NULL;
- }
-
- /* Clear the operand stack */
-
- ThisWalkState->NumOperands = 0;
+ AcpiDsClearOperands (ThisWalkState);
ACPI_DEBUG_PRINT ((ACPI_DB_DISPATCH,
"**** Begin nested execution of [%4.4s] **** WalkState=%p\n",
diff --git a/sys/contrib/dev/acpica/components/dispatcher/dsmthdat.c b/sys/contrib/dev/acpica/components/dispatcher/dsmthdat.c
index 42e1aa505d02..2c45e8c91f57 100644
--- a/sys/contrib/dev/acpica/components/dispatcher/dsmthdat.c
+++ b/sys/contrib/dev/acpica/components/dispatcher/dsmthdat.c
@@ -357,6 +357,7 @@ AcpiDsMethodDataInitArgs (
Index++;
}
+ AcpiExTraceArgs(Params, Index);
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC, "%u args passed to method\n", Index));
return_ACPI_STATUS (AE_OK);
diff --git a/sys/contrib/dev/acpica/components/events/evglock.c b/sys/contrib/dev/acpica/components/events/evglock.c
index 872e7b499a8f..395ca14fb315 100644
--- a/sys/contrib/dev/acpica/components/events/evglock.c
+++ b/sys/contrib/dev/acpica/components/events/evglock.c
@@ -195,6 +195,11 @@ AcpiEvInitGlobalLockHandler (
return_ACPI_STATUS (AE_OK);
}
+ if (!AcpiGbl_UseGlobalLock)
+ {
+ return_ACPI_STATUS (AE_OK);
+ }
+
/* Attempt installation of the global lock handler */
Status = AcpiInstallFixedEventHandler (ACPI_EVENT_GLOBAL,
diff --git a/sys/contrib/dev/acpica/components/executer/extrace.c b/sys/contrib/dev/acpica/components/executer/extrace.c
index 0eceb0ffccb1..d54d4908ca65 100644
--- a/sys/contrib/dev/acpica/components/executer/extrace.c
+++ b/sys/contrib/dev/acpica/components/executer/extrace.c
@@ -269,6 +269,68 @@ AcpiExGetTraceEventName (
#endif
+/*******************************************************************************
+ *
+ * FUNCTION: AcpiExTraceArgs
+ *
+ * PARAMETERS: Params - AML method arguments
+ * Count - numer of method arguments
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Trace any arguments
+ *
+ ******************************************************************************/
+
+void
+AcpiExTraceArgs(ACPI_OPERAND_OBJECT **Params, UINT32 Count)
+{
+ UINT32 i;
+
+ ACPI_FUNCTION_NAME(ExTraceArgs);
+
+ for (i = 0; i < Count; i++)
+ {
+ ACPI_OPERAND_OBJECT *obj_desc = Params[i];
+
+ if (!i)
+ {
+ ACPI_DEBUG_PRINT((ACPI_DB_TRACE_POINT, " "));
+ }
+
+ switch (obj_desc->Common.Type)
+ {
+ case ACPI_TYPE_INTEGER:
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, "%lx", obj_desc->Integer.Value));
+ break;
+
+ case ACPI_TYPE_STRING:
+ if (!obj_desc->String.Length)
+ {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, "NULL"));
+ break;
+ }
+ if (ACPI_IS_DEBUG_ENABLED(ACPI_LV_TRACE_POINT, _COMPONENT))
+ {
+ AcpiUtPrintString(obj_desc->String.Pointer, ACPI_UINT8_MAX);
+ }
+ break;
+
+ default:
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, "Unknown"));
+ break;
+ }
+
+ if ((i + 1) == Count)
+ {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, "\n"));
+ }
+ else
+ {
+ ACPI_DEBUG_PRINT_RAW((ACPI_DB_TRACE_POINT, ", "));
+ }
+ }
+}
/*******************************************************************************
*
@@ -299,9 +361,9 @@ AcpiExTracePoint (
if (Pathname)
{
ACPI_DEBUG_PRINT ((ACPI_DB_TRACE_POINT,
- "%s %s [0x%p:%s] execution.\n",
+ "%s %s [%s] execution.\n",
AcpiExGetTraceEventName (Type), Begin ? "Begin" : "End",
- Aml, Pathname));
+ Pathname));
}
else
{
diff --git a/sys/contrib/dev/acpica/components/parser/psopinfo.c b/sys/contrib/dev/acpica/components/parser/psopinfo.c
index 21c2b831ef24..1db32f4e8246 100644
--- a/sys/contrib/dev/acpica/components/parser/psopinfo.c
+++ b/sys/contrib/dev/acpica/components/parser/psopinfo.c
@@ -180,8 +180,8 @@ const ACPI_OPCODE_INFO *
AcpiPsGetOpcodeInfo (
UINT16 Opcode)
{
-#ifdef ACPI_DEBUG_OUTPUT
- const char *OpcodeName = "Unknown AML opcode";
+#if defined ACPI_ASL_COMPILER && defined ACPI_DEBUG_OUTPUT
+ const char *OpcodeName = "Unknown AML opcode";
#endif
ACPI_FUNCTION_NAME (PsGetOpcodeInfo);
@@ -207,7 +207,7 @@ AcpiPsGetOpcodeInfo (
#if defined ACPI_ASL_COMPILER && defined ACPI_DEBUG_OUTPUT
#include <contrib/dev/acpica/compiler/asldefine.h>
-
+
switch (Opcode)
{
case AML_RAW_DATA_BYTE:
@@ -249,12 +249,12 @@ AcpiPsGetOpcodeInfo (
default:
break;
}
-#endif
/* Unknown AML opcode */
ACPI_DEBUG_PRINT ((ACPI_DB_EXEC,
"%s [%4.4X]\n", OpcodeName, Opcode));
+#endif
return (&AcpiGbl_AmlOpInfo [_UNK]);
}
diff --git a/sys/contrib/dev/acpica/components/tables/tbprint.c b/sys/contrib/dev/acpica/components/tables/tbprint.c
index 7211673c42a2..8b812a890a07 100644
--- a/sys/contrib/dev/acpica/components/tables/tbprint.c
+++ b/sys/contrib/dev/acpica/components/tables/tbprint.c
@@ -279,6 +279,14 @@ AcpiTbPrintTableHeader (
ACPI_CAST_PTR (ACPI_TABLE_RSDP, Header)->Revision,
LocalHeader.OemId));
}
+ else if (AcpiGbl_CDAT && !AcpiUtValidNameseg (Header->Signature))
+ {
+ /* CDAT does not use the common ACPI table header */
+
+ ACPI_INFO (("%-4.4s 0x%8.8X%8.8X %06X",
+ ACPI_SIG_CDAT, ACPI_FORMAT_UINT64 (Address),
+ ACPI_CAST_PTR (ACPI_TABLE_CDAT, Header)->Length));
+ }
else
{
/* Standard ACPI table with full common header */
diff --git a/sys/contrib/dev/acpica/components/utilities/utnonansi.c b/sys/contrib/dev/acpica/components/utilities/utnonansi.c
index bfbe1194ceae..f8b3a29e3283 100644
--- a/sys/contrib/dev/acpica/components/utilities/utnonansi.c
+++ b/sys/contrib/dev/acpica/components/utilities/utnonansi.c
@@ -353,7 +353,7 @@ AcpiUtSafeStrncpy (
{
/* Always terminate destination string */
- memcpy (Dest, Source, DestSize);
+ strncpy (Dest, Source, DestSize);
Dest[DestSize - 1] = 0;
}
diff --git a/sys/contrib/dev/acpica/include/acdebug.h b/sys/contrib/dev/acpica/include/acdebug.h
index e335752148b9..63f39051a8ac 100644
--- a/sys/contrib/dev/acpica/include/acdebug.h
+++ b/sys/contrib/dev/acpica/include/acdebug.h
@@ -187,7 +187,7 @@ typedef struct acpi_db_execute_walk
{
UINT32 Count;
UINT32 MaxCount;
- char NameSeg[ACPI_NAMESEG_SIZE + 1] ACPI_NONSTRING;
+ char NameSeg[ACPI_NAMESEG_SIZE + 1];
} ACPI_DB_EXECUTE_WALK;
diff --git a/sys/contrib/dev/acpica/include/acexcep.h b/sys/contrib/dev/acpica/include/acexcep.h
index 57f98ab4540f..7216e0d49148 100644
--- a/sys/contrib/dev/acpica/include/acexcep.h
+++ b/sys/contrib/dev/acpica/include/acexcep.h
@@ -322,8 +322,11 @@ typedef struct acpi_exception_info
#define AE_AML_TARGET_TYPE EXCEP_AML (0x0023)
#define AE_AML_PROTOCOL EXCEP_AML (0x0024)
#define AE_AML_BUFFER_LENGTH EXCEP_AML (0x0025)
+#define AE_AML_TOO_FEW_ARGUMENTS EXCEP_AML (0x0026)
+#define AE_AML_TOO_MANY_ARGUMENTS EXCEP_AML (0x0027)
-#define AE_CODE_AML_MAX 0x0025
+
+#define AE_CODE_AML_MAX 0x0027
/*
@@ -456,7 +459,9 @@ static const ACPI_EXCEPTION_INFO AcpiGbl_ExceptionNames_Aml[] =
EXCEP_TXT ("AE_AML_UNINITIALIZED_NODE", "A namespace node is uninitialized or unresolved"),
EXCEP_TXT ("AE_AML_TARGET_TYPE", "A target operand of an incorrect type was encountered"),
EXCEP_TXT ("AE_AML_PROTOCOL", "Violation of a fixed ACPI protocol"),
- EXCEP_TXT ("AE_AML_BUFFER_LENGTH", "The length of the buffer is invalid/incorrect")
+ EXCEP_TXT ("AE_AML_BUFFER_LENGTH", "The length of the buffer is invalid/incorrect"),
+ EXCEP_TXT ("AE_AML_TOO_FEW_ARGUMENTS", "There are fewer than expected method arguments"),
+ EXCEP_TXT ("AE_AML_TOO_MANY_ARGUMENTS", "There are too many arguments for this method")
};
static const ACPI_EXCEPTION_INFO AcpiGbl_ExceptionNames_Ctrl[] =
diff --git a/sys/contrib/dev/acpica/include/acinterp.h b/sys/contrib/dev/acpica/include/acinterp.h
index 74166384f172..b7f9e8f615e4 100644
--- a/sys/contrib/dev/acpica/include/acinterp.h
+++ b/sys/contrib/dev/acpica/include/acinterp.h
@@ -280,6 +280,10 @@ AcpiExTracePoint (
UINT8 *Aml,
char *Pathname);
+void
+AcpiExTraceArgs(
+ ACPI_OPERAND_OBJECT **Params,
+ UINT32 Count);
/*
* exfield - ACPI AML (p-code) execution - field manipulation
diff --git a/sys/contrib/dev/acpica/include/acpixf.h b/sys/contrib/dev/acpica/include/acpixf.h
index 193b0e6a70dc..b5961e21bb9b 100644
--- a/sys/contrib/dev/acpica/include/acpixf.h
+++ b/sys/contrib/dev/acpica/include/acpixf.h
@@ -154,7 +154,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20250404
+#define ACPI_CA_VERSION 0x20250807
#include <contrib/dev/acpica/include/acconfig.h>
#include <contrib/dev/acpica/include/actypes.h>
@@ -358,6 +358,12 @@ ACPI_INIT_GLOBAL (UINT8, AcpiGbl_OsiData, 0);
ACPI_INIT_GLOBAL (BOOLEAN, AcpiGbl_ReducedHardware, FALSE);
/*
+ * ACPI Global Lock is mainly used for systems with SMM, so no-SMM systems
+ * (such as LoongArch) may not have and not use Global Lock.
+ */
+ACPI_INIT_GLOBAL (BOOLEAN, AcpiGbl_UseGlobalLock, TRUE);
+
+/*
* Maximum timeout for While() loop iterations before forced method abort.
* This mechanism is intended to prevent infinite loops during interpreter
* execution within a host kernel.
diff --git a/sys/contrib/dev/acpica/include/actbl.h b/sys/contrib/dev/acpica/include/actbl.h
index eafd5d8a0f8b..ae52bd452c90 100644
--- a/sys/contrib/dev/acpica/include/actbl.h
+++ b/sys/contrib/dev/acpica/include/actbl.h
@@ -220,7 +220,7 @@ typedef struct acpi_table_header
char OemId[ACPI_OEM_ID_SIZE] ACPI_NONSTRING; /* ASCII OEM identification */
char OemTableId[ACPI_OEM_TABLE_ID_SIZE] ACPI_NONSTRING; /* ASCII OEM table identification */
UINT32 OemRevision; /* OEM revision number */
- char AslCompilerId[ACPI_NAMESEG_SIZE]; /* ASCII ASL compiler vendor ID */
+ char AslCompilerId[ACPI_NAMESEG_SIZE] ACPI_NONSTRING; /* ASCII ASL compiler vendor ID */
UINT32 AslCompilerRevision; /* ASL compiler version */
} ACPI_TABLE_HEADER;
diff --git a/sys/contrib/dev/acpica/include/actbl1.h b/sys/contrib/dev/acpica/include/actbl1.h
index 876b721068c6..ec04f0a0ab9f 100644
--- a/sys/contrib/dev/acpica/include/actbl1.h
+++ b/sys/contrib/dev/acpica/include/actbl1.h
@@ -262,7 +262,7 @@ typedef struct acpi_whea_header
/* Larger subtable header (when Length can exceed 255) */
-typedef struct acpi_subtable_header_16
+typedef struct acpi_subtbl_hdr_16
{
UINT16 Type;
UINT16 Length;
diff --git a/sys/contrib/dev/acpica/include/actbl2.h b/sys/contrib/dev/acpica/include/actbl2.h
index 4899929b2d45..a74b6d555a3a 100644
--- a/sys/contrib/dev/acpica/include/actbl2.h
+++ b/sys/contrib/dev/acpica/include/actbl2.h
@@ -201,6 +201,7 @@
#define ACPI_SIG_SDEI "SDEI" /* Software Delegated Exception Interface Table */
#define ACPI_SIG_SDEV "SDEV" /* Secure Devices table */
#define ACPI_SIG_SVKL "SVKL" /* Storage Volume Key Location Table */
+#define ACPI_SIG_SWFT "SWFT" /* SoundWire File Table */
#define ACPI_SIG_TDEL "TDEL" /* TD Event Log Table */
@@ -4094,6 +4095,30 @@ enum acpi_svkl_format
ACPI_SVKL_FORMAT_RESERVED = 1 /* 1 and greater are reserved */
};
+/*******************************************************************************
+ *
+ * SWFT - SoundWire File Table
+ * as described in Discovery and Configuration (DisCo) Specification
+ * for SoundWire®
+ * Version 1
+ *
+ ******************************************************************************/
+
+typedef struct acpi_table_swft
+{
+ ACPI_TABLE_HEADER Header; /* Common ACPI table header */
+
+} ACPI_TABLE_SWFT;
+
+typedef struct acpi_swft_file
+{
+ UINT16 VendorID;
+ UINT32 FileID;
+ UINT16 FileVersion;
+ UINT16 FileLength;
+ UINT8 FileData[];
+
+} ACPI_SWFT_FILE;
/*******************************************************************************
*
diff --git a/sys/contrib/openzfs/.github/workflows/scripts/qemu-2-start.sh b/sys/contrib/openzfs/.github/workflows/scripts/qemu-2-start.sh
index 885a64037f89..70a2364f1fc6 100755
--- a/sys/contrib/openzfs/.github/workflows/scripts/qemu-2-start.sh
+++ b/sys/contrib/openzfs/.github/workflows/scripts/qemu-2-start.sh
@@ -109,7 +109,7 @@ case "$OS" in
KSRC="$FREEBSD_SNAP/../amd64/$FreeBSD/src.txz"
;;
freebsd15-0c)
- FreeBSD="15.0-CURRENT"
+ FreeBSD="15.0-PRERELEASE"
OSNAME="FreeBSD $FreeBSD"
OSv="freebsd14.0"
URLxz="$FREEBSD_SNAP/$FreeBSD/amd64/Latest/FreeBSD-$FreeBSD-amd64-BASIC-CI-ufs.raw.xz"
diff --git a/sys/contrib/openzfs/.github/workflows/scripts/qemu-4-build-vm.sh b/sys/contrib/openzfs/.github/workflows/scripts/qemu-4-build-vm.sh
index 17e976ebcc39..2807d9e77127 100755
--- a/sys/contrib/openzfs/.github/workflows/scripts/qemu-4-build-vm.sh
+++ b/sys/contrib/openzfs/.github/workflows/scripts/qemu-4-build-vm.sh
@@ -5,12 +5,13 @@
#
# Usage:
#
-# qemu-4-build-vm.sh OS [--enable-debug][--dkms][--poweroff]
-# [--release][--repo][--tarball]
+# qemu-4-build-vm.sh OS [--enable-debug][--dkms][--patch-level NUM]
+# [--poweroff][--release][--repo][--tarball]
#
# OS: OS name like 'fedora41'
# --enable-debug: Build RPMs with '--enable-debug' (for testing)
# --dkms: Build DKMS RPMs as well
+# --patch-level NUM: Use a custom patch level number for packages.
# --poweroff: Power-off the VM after building
# --release Build zfs-release*.rpm as well
# --repo After building everything, copy RPMs into /tmp/repo
@@ -21,6 +22,7 @@
ENABLE_DEBUG=""
DKMS=""
+PATCH_LEVEL=""
POWEROFF=""
RELEASE=""
REPO=""
@@ -35,6 +37,11 @@ while [[ $# -gt 0 ]]; do
DKMS=1
shift
;;
+ --patch-level)
+ PATCH_LEVEL=$2
+ shift
+ shift
+ ;;
--poweroff)
POWEROFF=1
shift
@@ -215,6 +222,10 @@ function rpm_build_and_install() {
run ./autogen.sh
echo "##[endgroup]"
+ if [ -n "$PATCH_LEVEL" ] ; then
+ sed -i -E 's/(Release:\s+)1/\1'$PATCH_LEVEL'/g' META
+ fi
+
echo "##[group]Configure"
run ./configure --enable-debuginfo $extra
echo "##[endgroup]"
@@ -328,7 +339,13 @@ fi
# almalinux9.5
# fedora42
source /etc/os-release
-sudo hostname "$ID$VERSION_ID"
+ if which hostnamectl &> /dev/null ; then
+ # Fedora 42+ use hostnamectl
+ sudo hostnamectl set-hostname "$ID$VERSION_ID"
+ sudo hostnamectl set-hostname --pretty "$ID$VERSION_ID"
+else
+ sudo hostname "$ID$VERSION_ID"
+fi
# save some sysinfo
uname -a > /var/tmp/uname.txt
diff --git a/sys/contrib/openzfs/.github/workflows/zfs-qemu-packages.yml b/sys/contrib/openzfs/.github/workflows/zfs-qemu-packages.yml
index 5b5afe746859..d8a95954fe1a 100644
--- a/sys/contrib/openzfs/.github/workflows/zfs-qemu-packages.yml
+++ b/sys/contrib/openzfs/.github/workflows/zfs-qemu-packages.yml
@@ -32,6 +32,11 @@ on:
options:
- "Build RPMs"
- "Test repo"
+ patch_level:
+ type: string
+ required: false
+ default: ""
+ description: "(optional) patch level number"
repo_url:
type: string
required: false
@@ -78,7 +83,13 @@ jobs:
mkdir -p /tmp/repo
ssh zfs@vm0 '$HOME/zfs/.github/workflows/scripts/qemu-test-repo-vm.sh' ${{ github.event.inputs.repo_url }}
else
- .github/workflows/scripts/qemu-4-build.sh --repo --release --dkms --tarball ${{ matrix.os }}
+ EXTRA=""
+ if [ -n "${{ github.event.inputs.patch_level }}" ] ; then
+ EXTRA="--patch-level ${{ github.event.inputs.patch_level }}"
+ fi
+
+ .github/workflows/scripts/qemu-4-build.sh $EXTRA \
+ --repo --release --dkms --tarball ${{ matrix.os }}
fi
- name: Prepare artifacts
diff --git a/sys/contrib/openzfs/.mailmap b/sys/contrib/openzfs/.mailmap
index b6d942c000b8..e6f09c6c9d43 100644
--- a/sys/contrib/openzfs/.mailmap
+++ b/sys/contrib/openzfs/.mailmap
@@ -23,6 +23,7 @@
# These maps are making names consistent where they have varied but the email
# address has never changed. In most cases, the full name is in the
# Signed-off-by of a commit with a matching author.
+Achill Gilgenast <achill@achill.org>
Ahelenia Ziemiańska <nabijaczleweli@gmail.com>
Ahelenia Ziemiańska <nabijaczleweli@nabijaczleweli.xyz>
Alex John <alex@stty.io>
@@ -37,6 +38,7 @@ Crag Wang <crag0715@gmail.com>
Damian Szuberski <szuberskidamian@gmail.com>
Daniel Kolesa <daniel@octaforge.org>
Debabrata Banerjee <dbavatar@gmail.com>
+Diwakar Kristappagari <diwakar-k@hpe.com>
Finix Yan <yanchongwen@hotmail.com>
Gaurav Kumar <gauravk.18@gmail.com>
Gionatan Danti <g.danti@assyoma.it>
@@ -145,6 +147,7 @@ Gaurav Kumar <gauravk.18@gmail.com> <gaurkuma@users.noreply.github.com>
George Gaydarov <git@gg7.io> <gg7@users.noreply.github.com>
Georgy Yakovlev <gyakovlev@gentoo.org> <168902+gyakovlev@users.noreply.github.com>
Gerardwx <gerardw@alum.mit.edu> <Gerardwx@users.noreply.github.com>
+Germano Massullo <germano.massullo@gmail.com> <Germano0@users.noreply.github.com>
Gian-Carlo DeFazio <defazio1@llnl.gov> <defaziogiancarlo@users.noreply.github.com>
Giuseppe Di Natale <dinatale2@llnl.gov> <dinatale2@users.noreply.github.com>
Hajo Möller <dasjoe@gmail.com> <dasjoe@users.noreply.github.com>
@@ -164,6 +167,7 @@ John Ramsden <johnramsden@riseup.net> <johnramsden@users.noreply.github.com>
Jonathon Fernyhough <jonathon@m2x.dev> <559369+jonathonf@users.noreply.github.com>
Jose Luis Duran <jlduran@gmail.com> <jlduran@users.noreply.github.com>
Justin Hibbits <chmeeedalf@gmail.com> <chmeeedalf@users.noreply.github.com>
+Kaitlin Hoang <kthoang@amazon.com> <khoang98@users.noreply.github.com>
Kevin Greene <kevin.greene@delphix.com> <104801862+kxgreene@users.noreply.github.com>
Kevin Jin <lostking2008@hotmail.com> <33590050+jxdking@users.noreply.github.com>
Kevin P. Fleming <kevin@km6g.us> <kpfleming@users.noreply.github.com>
diff --git a/sys/contrib/openzfs/AUTHORS b/sys/contrib/openzfs/AUTHORS
index a9d249a66f1e..6c34c07f39ef 100644
--- a/sys/contrib/openzfs/AUTHORS
+++ b/sys/contrib/openzfs/AUTHORS
@@ -10,6 +10,7 @@ PAST MAINTAINERS:
CONTRIBUTORS:
Aaron Fineman <abyxcos@gmail.com>
+ Achill Gilgenast <achill@achill.org>
Adam D. Moss <c@yotes.com>
Adam Leventhal <ahl@delphix.com>
Adam Stevko <adam.stevko@gmail.com>
@@ -59,6 +60,7 @@ CONTRIBUTORS:
Andreas Buschmann <andreas.buschmann@tech.net.de>
Andreas Dilger <adilger@intel.com>
Andreas Vögele <andreas@andreasvoegele.com>
+ Andres <a-d-j-i@users.noreply.github.com>
Andrew Barnes <barnes333@gmail.com>
Andrew Hamilton <ahamilto@tjhsst.edu>
Andrew Innes <andrew.c12@gmail.com>
@@ -72,6 +74,7 @@ CONTRIBUTORS:
Andrey Prokopenko <job@terem.fr>
Andrey Vesnovaty <andrey.vesnovaty@gmail.com>
Andriy Gapon <avg@freebsd.org>
+ Andriy Tkachuk <andriy.tkachuk@seagate.com>
Andy Bakun <github@thwartedefforts.org>
Andy Fiddaman <omnios@citrus-it.co.uk>
Aniruddha Shankar <k@191a.net>
@@ -120,6 +123,7 @@ CONTRIBUTORS:
Caleb James DeLisle <calebdelisle@lavabit.com>
Cameron Harr <harr1@llnl.gov>
Cao Xuewen <cao.xuewen@zte.com.cn>
+ Carl George <carlwgeorge@gmail.com>
Carlo Landmeter <clandmeter@gmail.com>
Carlos Alberto Lopez Perez <clopez@igalia.com>
Cedric Maunoury <cedric.maunoury@gmail.com>
@@ -200,6 +204,7 @@ CONTRIBUTORS:
Dimitri John Ledkov <xnox@ubuntu.com>
Dimitry Andric <dimitry@andric.com>
Dirkjan Bussink <d.bussink@gmail.com>
+ Diwakar Kristappagari <diwakar-k@hpe.com>
Dmitry Khasanov <pik4ez@gmail.com>
Dominic Pearson <dsp@technoanimal.net>
Dominik Hassler <hadfl@omniosce.org>
@@ -250,6 +255,7 @@ CONTRIBUTORS:
George Wilson <gwilson@delphix.com>
Georgy Yakovlev <ya@sysdump.net>
Gerardwx <gerardw@alum.mit.edu>
+ Germano Massullo <germano.massullo@gmail.com>
Gian-Carlo DeFazio <defazio1@llnl.gov>
Gionatan Danti <g.danti@assyoma.it>
Giuseppe Di Natale <guss80@gmail.com>
@@ -287,6 +293,7 @@ CONTRIBUTORS:
Igor K <igor@dilos.org>
Igor Kozhukhov <ikozhukhov@gmail.com>
Igor Lvovsky <ilvovsky@gmail.com>
+ Igor Ostapenko <pm@igoro.pro>
ilbsmart <wgqimut@gmail.com>
Ilkka Sovanto <github@ilkka.kapsi.fi>
illiliti <illiliti@protonmail.com>
@@ -326,6 +333,7 @@ CONTRIBUTORS:
Jinshan Xiong <jinshan.xiong@intel.com>
Jitendra Patidar <jitendra.patidar@nutanix.com>
JK Dingwall <james@dingwall.me.uk>
+ Joel Low <joel@joelsplace.sg>
Joe Stein <joe.stein@delphix.com>
John-Mark Gurney <jmg@funkthat.com>
John Albietz <inthecloud247@gmail.com>
@@ -374,6 +382,7 @@ CONTRIBUTORS:
Kevin Jin <lostking2008@hotmail.com>
Kevin P. Fleming <kevin@km6g.us>
Kevin Tanguy <kevin.tanguy@ovh.net>
+ khoang98 <khoang98@users.noreply.github.com>
KireinaHoro <i@jsteward.moe>
Kjeld Schouten-Lebbing <kjeld@schouten-lebbing.nl>
Kleber Tarcísio <klebertarcisio@yahoo.com.br>
@@ -447,6 +456,7 @@ CONTRIBUTORS:
Max Zettlmeißl <max@zettlmeissl.de>
Md Islam <mdnahian@outlook.com>
megari <megari@iki.fi>
+ Meriel Luna Mittelbach <lunarlambda@gmail.com>
Michael D Labriola <michael.d.labriola@gmail.com>
Michael Franzl <michael@franzl.name>
Michael Gebetsroither <michael@mgeb.org>
@@ -494,6 +504,7 @@ CONTRIBUTORS:
Orivej Desh <orivej@gmx.fr>
Pablo Correa Gómez <ablocorrea@hotmail.com>
Palash Gandhi <pbg4930@rit.edu>
+ Patrick Fasano <patrick@patrickfasano.com>
Patrick Mooney <pmooney@pfmooney.com>
Patrik Greco <sikevux@sikevux.se>
Paul B. Henson <henson@acm.org>
@@ -535,6 +546,7 @@ CONTRIBUTORS:
Remy Blank <remy.blank@pobox.com>
renelson <bnelson@nelsonbe.com>
Reno Reckling <e-github@wthack.de>
+ René Wirnata <rene.wirnata@pandascience.net>
Ricardo M. Correia <ricardo.correia@oracle.com>
Riccardo Schirone <rschirone91@gmail.com>
Richard Allen <belperite@gmail.com>
@@ -640,6 +652,7 @@ CONTRIBUTORS:
tleydxdy <shironeko.github@tesaguri.club>
Tobin Harding <me@tobin.cc>
Todd Seidelmann <seidelma@users.noreply.github.com>
+ Todd Zullinger <tmz@pobox.com>
Tom Caputi <tcaputi@datto.com>
Tom Matthews <tom@axiom-partners.com>
Tomohiro Kusumi <kusumi.tomohiro@gmail.com>
diff --git a/sys/contrib/openzfs/META b/sys/contrib/openzfs/META
index 47f0795bfa11..1a9c671feac6 100644
--- a/sys/contrib/openzfs/META
+++ b/sys/contrib/openzfs/META
@@ -6,5 +6,5 @@ Release: 1
Release-Tags: relext
License: CDDL
Author: OpenZFS
-Linux-Maximum: 6.15
+Linux-Maximum: 6.16
Linux-Minimum: 4.18
diff --git a/sys/contrib/openzfs/cmd/zdb/zdb.c b/sys/contrib/openzfs/cmd/zdb/zdb.c
index 66d5fbd6adbe..a5f23be2aaaf 100644
--- a/sys/contrib/openzfs/cmd/zdb/zdb.c
+++ b/sys/contrib/openzfs/cmd/zdb/zdb.c
@@ -127,6 +127,7 @@ static zfs_range_tree_t *mos_refd_objs;
static spa_t *spa;
static objset_t *os;
static boolean_t kernel_init_done;
+static boolean_t corruption_found = B_FALSE;
static void snprintf_blkptr_compact(char *, size_t, const blkptr_t *,
boolean_t);
@@ -250,6 +251,7 @@ sublivelist_verify_func(void *args, dsl_deadlist_entry_t *dle)
&e->svbr_blk, B_TRUE);
(void) printf("\tERROR: %d unmatched FREE(s): %s\n",
e->svbr_refcnt, blkbuf);
+ corruption_found = B_TRUE;
}
zfs_btree_destroy(&sv->sv_pair);
@@ -405,6 +407,7 @@ verify_livelist_allocs(metaslab_verify_t *mv, uint64_t txg,
(u_longlong_t)DVA_GET_ASIZE(&found->svb_dva),
(u_longlong_t)found->svb_allocated_txg,
(u_longlong_t)txg);
+ corruption_found = B_TRUE;
}
}
}
@@ -426,6 +429,7 @@ metaslab_spacemap_validation_cb(space_map_entry_t *sme, void *arg)
(u_longlong_t)txg, (u_longlong_t)offset,
(u_longlong_t)size, (u_longlong_t)mv->mv_vdid,
(u_longlong_t)mv->mv_msid);
+ corruption_found = B_TRUE;
} else {
zfs_range_tree_add(mv->mv_allocated,
offset, size);
@@ -439,6 +443,7 @@ metaslab_spacemap_validation_cb(space_map_entry_t *sme, void *arg)
(u_longlong_t)txg, (u_longlong_t)offset,
(u_longlong_t)size, (u_longlong_t)mv->mv_vdid,
(u_longlong_t)mv->mv_msid);
+ corruption_found = B_TRUE;
} else {
zfs_range_tree_remove(mv->mv_allocated,
offset, size);
@@ -526,6 +531,7 @@ mv_populate_livelist_allocs(metaslab_verify_t *mv, sublivelist_verify_t *sv)
(u_longlong_t)DVA_GET_VDEV(&svb->svb_dva),
(u_longlong_t)DVA_GET_OFFSET(&svb->svb_dva),
(u_longlong_t)DVA_GET_ASIZE(&svb->svb_dva));
+ corruption_found = B_TRUE;
continue;
}
@@ -542,6 +548,7 @@ mv_populate_livelist_allocs(metaslab_verify_t *mv, sublivelist_verify_t *sv)
(u_longlong_t)DVA_GET_VDEV(&svb->svb_dva),
(u_longlong_t)DVA_GET_OFFSET(&svb->svb_dva),
(u_longlong_t)DVA_GET_ASIZE(&svb->svb_dva));
+ corruption_found = B_TRUE;
continue;
}
@@ -655,6 +662,7 @@ livelist_metaslab_validate(spa_t *spa)
}
(void) printf("ERROR: Found livelist blocks marked as allocated "
"for indirect vdevs:\n");
+ corruption_found = B_TRUE;
zfs_btree_index_t *where = NULL;
sublivelist_verify_block_t *svb;
@@ -827,7 +835,7 @@ usage(void)
(void) fprintf(stderr, "Specify an option more than once (e.g. -bb) "
"to make only that option verbose\n");
(void) fprintf(stderr, "Default is to dump everything non-verbosely\n");
- zdb_exit(1);
+ zdb_exit(2);
}
static void
@@ -2583,19 +2591,17 @@ snprintf_blkptr_compact(char *blkbuf, size_t buflen, const blkptr_t *bp,
}
}
-static void
+static u_longlong_t
print_indirect(spa_t *spa, blkptr_t *bp, const zbookmark_phys_t *zb,
const dnode_phys_t *dnp)
{
char blkbuf[BP_SPRINTF_LEN];
+ u_longlong_t offset;
int l;
- if (!BP_IS_EMBEDDED(bp)) {
- ASSERT3U(BP_GET_TYPE(bp), ==, dnp->dn_type);
- ASSERT3U(BP_GET_LEVEL(bp), ==, zb->zb_level);
- }
+ offset = (u_longlong_t)blkid2offset(dnp, bp, zb);
- (void) printf("%16llx ", (u_longlong_t)blkid2offset(dnp, bp, zb));
+ (void) printf("%16llx ", offset);
ASSERT(zb->zb_level >= 0);
@@ -2610,19 +2616,38 @@ print_indirect(spa_t *spa, blkptr_t *bp, const zbookmark_phys_t *zb,
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp, B_FALSE);
if (dump_opt['Z'] && BP_GET_COMPRESS(bp) == ZIO_COMPRESS_ZSTD)
snprintf_zstd_header(spa, blkbuf, sizeof (blkbuf), bp);
- (void) printf("%s\n", blkbuf);
+ (void) printf("%s", blkbuf);
+
+ if (!BP_IS_EMBEDDED(bp)) {
+ if (BP_GET_TYPE(bp) != dnp->dn_type) {
+ (void) printf(" (ERROR: Block pointer type "
+ "(%llu) does not match dnode type (%hhu))",
+ BP_GET_TYPE(bp), dnp->dn_type);
+ corruption_found = B_TRUE;
+ }
+ if (BP_GET_LEVEL(bp) != zb->zb_level) {
+ (void) printf(" (ERROR: Block pointer level "
+ "(%llu) does not match bookmark level (%lld))",
+ BP_GET_LEVEL(bp), (u_longlong_t)zb->zb_level);
+ corruption_found = B_TRUE;
+ }
+ }
+ (void) printf("\n");
+
+ return (offset);
}
static int
visit_indirect(spa_t *spa, const dnode_phys_t *dnp,
blkptr_t *bp, const zbookmark_phys_t *zb)
{
+ u_longlong_t offset;
int err = 0;
if (BP_GET_BIRTH(bp) == 0)
return (0);
- print_indirect(spa, bp, zb, dnp);
+ offset = print_indirect(spa, bp, zb, dnp);
if (BP_GET_LEVEL(bp) > 0 && !BP_IS_HOLE(bp)) {
arc_flags_t flags = ARC_FLAG_WAIT;
@@ -2652,8 +2677,15 @@ visit_indirect(spa_t *spa, const dnode_phys_t *dnp,
break;
fill += BP_GET_FILL(cbp);
}
- if (!err)
- ASSERT3U(fill, ==, BP_GET_FILL(bp));
+ if (!err) {
+ if (fill != BP_GET_FILL(bp)) {
+ (void) printf("%16llx: Block pointer "
+ "fill (%llu) does not match calculated "
+ "value (%llu)\n", offset, BP_GET_FILL(bp),
+ (u_longlong_t)fill);
+ corruption_found = B_TRUE;
+ }
+ }
arc_buf_destroy(buf, &buf);
}
@@ -2909,6 +2941,7 @@ dump_full_bpobj(bpobj_t *bpo, const char *name, int indent)
(void) printf("ERROR %u while trying to open "
"subobj id %llu\n",
error, (u_longlong_t)subobj);
+ corruption_found = B_TRUE;
continue;
}
dump_full_bpobj(&subbpo, "subobj", indent + 1);
@@ -3088,6 +3121,7 @@ bpobj_count_refd(bpobj_t *bpo)
(void) printf("ERROR %u while trying to open "
"subobj id %llu\n",
error, (u_longlong_t)subobj);
+ corruption_found = B_TRUE;
continue;
}
bpobj_count_refd(&subbpo);
@@ -9634,7 +9668,7 @@ main(int argc, char **argv)
} else if (objset_str && !zdb_numeric(objset_str + 1) &&
dump_opt['N']) {
printf("Supply a numeric objset ID with -N\n");
- error = 1;
+ error = 2;
goto fini;
}
} else {
@@ -9936,5 +9970,8 @@ fini:
if (kernel_init_done)
kernel_fini();
+ if (corruption_found && error == 0)
+ error = 3;
+
return (error);
}
diff --git a/sys/contrib/openzfs/config/kernel-mkdir.m4 b/sys/contrib/openzfs/config/kernel-mkdir.m4
index c1aebc387abe..78b32447c593 100644
--- a/sys/contrib/openzfs/config/kernel-mkdir.m4
+++ b/sys/contrib/openzfs/config/kernel-mkdir.m4
@@ -84,6 +84,8 @@ AC_DEFUN([ZFS_AC_KERNEL_MKDIR], [
AC_DEFINE(HAVE_IOPS_MKDIR_DENTRY, 1,
[iops->mkdir() returns struct dentry*])
],[
+ AC_MSG_RESULT(no)
+
dnl #
dnl # 6.3 API change
dnl # mkdir() takes struct mnt_idmap * as the first arg
diff --git a/sys/contrib/openzfs/config/toolchain-simd.m4 b/sys/contrib/openzfs/config/toolchain-simd.m4
index 344807fc830c..f18c91007cde 100644
--- a/sys/contrib/openzfs/config/toolchain-simd.m4
+++ b/sys/contrib/openzfs/config/toolchain-simd.m4
@@ -24,6 +24,8 @@ AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_TOOLCHAIN_SIMD], [
ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_AES
ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_PCLMULQDQ
ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_MOVBE
+ ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_VAES
+ ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_VPCLMULQDQ
ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_XSAVE
ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_XSAVEOPT
ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_XSAVES
@@ -447,6 +449,48 @@ AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_MOVBE], [
])
dnl #
+dnl # ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_VAES
+dnl #
+AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_VAES], [
+ AC_MSG_CHECKING([whether host toolchain supports VAES])
+
+ AC_LINK_IFELSE([AC_LANG_SOURCE([
+ [
+ int main()
+ {
+ __asm__ __volatile__("vaesenc %ymm0, %ymm1, %ymm0");
+ return (0);
+ }
+ ]])], [
+ AC_MSG_RESULT([yes])
+ AC_DEFINE([HAVE_VAES], 1, [Define if host toolchain supports VAES])
+ ], [
+ AC_MSG_RESULT([no])
+ ])
+])
+
+dnl #
+dnl # ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_VPCLMULQDQ
+dnl #
+AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_VPCLMULQDQ], [
+ AC_MSG_CHECKING([whether host toolchain supports VPCLMULQDQ])
+
+ AC_LINK_IFELSE([AC_LANG_SOURCE([
+ [
+ int main()
+ {
+ __asm__ __volatile__("vpclmulqdq %0, %%ymm4, %%ymm3, %%ymm5" :: "i"(0));
+ return (0);
+ }
+ ]])], [
+ AC_MSG_RESULT([yes])
+ AC_DEFINE([HAVE_VPCLMULQDQ], 1, [Define if host toolchain supports VPCLMULQDQ])
+ ], [
+ AC_MSG_RESULT([no])
+ ])
+])
+
+dnl #
dnl # ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_XSAVE
dnl #
AC_DEFUN([ZFS_AC_CONFIG_TOOLCHAIN_CAN_BUILD_XSAVE], [
diff --git a/sys/contrib/openzfs/contrib/debian/control b/sys/contrib/openzfs/contrib/debian/control
index 96a2bdd88665..c5358dedc0fd 100644
--- a/sys/contrib/openzfs/contrib/debian/control
+++ b/sys/contrib/openzfs/contrib/debian/control
@@ -100,8 +100,8 @@ Depends: ${misc:Depends}, ${shlibs:Depends}
# The libcurl4 is loaded through dlopen("libcurl.so.4").
# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=988521
Recommends: libcurl4
-Breaks: libzfs2, libzfs4, libzfs4linux, libzfs6linux
-Replaces: libzfs2, libzfs4, libzfs4linux, libzfs6linux
+Breaks: libzfs2, libzfs4, libzfs4linux, libzfs6linux, openzfs-libzfs4
+Replaces: libzfs2, libzfs4, libzfs4linux, libzfs6linux, openzfs-libzfs4
Conflicts: libzfs6linux
Description: OpenZFS filesystem library for Linux - general support
OpenZFS is a storage platform that encompasses the functionality of
@@ -128,8 +128,8 @@ Package: openzfs-libzpool6
Section: contrib/libs
Architecture: linux-any
Depends: ${misc:Depends}, ${shlibs:Depends}
-Breaks: libzpool2, libzpool5, libzpool5linux, libzpool6linux
-Replaces: libzpool2, libzpool5, libzpool5linux, libzpool6linux
+Breaks: libzpool2, libzpool5, libzpool6linux
+Replaces: libzpool2, libzpool5, libzpool6linux
Conflicts: libzpool6linux
Description: OpenZFS pool library for Linux
OpenZFS is a storage platform that encompasses the functionality of
diff --git a/sys/contrib/openzfs/contrib/icp/gcm-simd/boringssl/LICENSE b/sys/contrib/openzfs/contrib/icp/gcm-simd/boringssl/LICENSE
new file mode 100644
index 000000000000..04c03a37e0cb
--- /dev/null
+++ b/sys/contrib/openzfs/contrib/icp/gcm-simd/boringssl/LICENSE
@@ -0,0 +1,253 @@
+BoringSSL is a fork of OpenSSL. As such, large parts of it fall under OpenSSL
+licensing. Files that are completely new have a Google copyright and an ISC
+license. This license is reproduced at the bottom of this file.
+
+Contributors to BoringSSL are required to follow the CLA rules for Chromium:
+https://cla.developers.google.com/clas
+
+Files in third_party/ have their own licenses, as described therein. The MIT
+license, for third_party/fiat, which, unlike other third_party directories, is
+compiled into non-test libraries, is included below.
+
+The OpenSSL toolkit stays under a dual license, i.e. both the conditions of the
+OpenSSL License and the original SSLeay license apply to the toolkit. See below
+for the actual license texts. Actually both licenses are BSD-style Open Source
+licenses. In case of any license issues related to OpenSSL please contact
+openssl-core@openssl.org.
+
+The following are Google-internal bug numbers where explicit permission from
+some authors is recorded for use of their work. (This is purely for our own
+record keeping.)
+ 27287199
+ 27287880
+ 27287883
+ 263291445
+
+
+ OpenSSL License
+ ---------------
+
+/* ====================================================================
+ * Copyright (c) 1998-2011 The OpenSSL Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ * endorse or promote products derived from this software without
+ * prior written permission. For written permission, please contact
+ * openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ * nor may "OpenSSL" appear in their names without prior written
+ * permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This product includes cryptographic software written by Eric Young
+ * (eay@cryptsoft.com). This product includes software written by Tim
+ * Hudson (tjh@cryptsoft.com).
+ *
+ */
+
+ Original SSLeay License
+ -----------------------
+
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+
+ISC license used for completely new code in BoringSSL:
+
+/* Copyright 2015 The BoringSSL Authors
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
+
+
+The code in third_party/fiat carries the MIT license:
+
+Copyright (c) 2015-2016 the fiat-crypto authors (see
+https://github.com/mit-plv/fiat-crypto/blob/master/AUTHORS).
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+Licenses for support code
+-------------------------
+
+Parts of the TLS test suite are under the Go license. This code is not included
+in BoringSSL (i.e. libcrypto and libssl) when compiled, however, so
+distributing code linked against BoringSSL does not trigger this license:
+
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+BoringSSL uses the Chromium test infrastructure to run a continuous build,
+trybots etc. The scripts which manage this, and the script for generating build
+metadata, are under the Chromium license. Distributing code linked against
+BoringSSL does not trigger this license.
+
+Copyright 2015 The Chromium Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/sys/contrib/openzfs/contrib/icp/gcm-simd/boringssl/README b/sys/contrib/openzfs/contrib/icp/gcm-simd/boringssl/README
new file mode 100644
index 000000000000..aa6fb6d477fa
--- /dev/null
+++ b/sys/contrib/openzfs/contrib/icp/gcm-simd/boringssl/README
@@ -0,0 +1,11 @@
+This directory contains the original BoringSSL [1] GCM x86-64 assembly
+files [2].
+
+The assembler files where then further modified to fit the ICP conventions.
+
+The main purpose to include these files (and the original ones) here, is to
+serve as a reference if upstream changes need to be applied to the files
+included and modified in the ICP.
+
+[1] https://github.com/google/boringssl
+[2] https://github.com/google/boringssl/blob/d5440dd2c2c500ac2d3bba4afec47a054b4d99ae/gen/bcm/aes-gcm-avx2-x86_64-linux.S
diff --git a/sys/contrib/openzfs/contrib/icp/gcm-simd/boringssl/aes-gcm-avx2-x86_64-linux.S b/sys/contrib/openzfs/contrib/icp/gcm-simd/boringssl/aes-gcm-avx2-x86_64-linux.S
new file mode 100644
index 000000000000..e7327c9de872
--- /dev/null
+++ b/sys/contrib/openzfs/contrib/icp/gcm-simd/boringssl/aes-gcm-avx2-x86_64-linux.S
@@ -0,0 +1,1328 @@
+// SPDX-License-Identifier: Apache-2.0
+// This file is generated from a similarly-named Perl script in the BoringSSL
+// source tree. Do not edit by hand.
+
+#include <openssl/asm_base.h>
+
+#if !defined(OPENSSL_NO_ASM) && defined(OPENSSL_X86_64) && defined(__ELF__)
+.section .rodata
+.align 16
+
+
+.Lbswap_mask:
+.quad 0x08090a0b0c0d0e0f, 0x0001020304050607
+
+
+
+
+
+
+
+
+.Lgfpoly:
+.quad 1, 0xc200000000000000
+
+
+.Lgfpoly_and_internal_carrybit:
+.quad 1, 0xc200000000000001
+
+.align 32
+
+.Lctr_pattern:
+.quad 0, 0
+.quad 1, 0
+.Linc_2blocks:
+.quad 2, 0
+.quad 2, 0
+
+.text
+.globl gcm_init_vpclmulqdq_avx2
+.hidden gcm_init_vpclmulqdq_avx2
+.type gcm_init_vpclmulqdq_avx2,@function
+.align 32
+gcm_init_vpclmulqdq_avx2:
+.cfi_startproc
+
+_CET_ENDBR
+
+
+
+
+
+ vpshufd $0x4e,(%rsi),%xmm3
+
+
+
+
+
+ vpshufd $0xd3,%xmm3,%xmm0
+ vpsrad $31,%xmm0,%xmm0
+ vpaddq %xmm3,%xmm3,%xmm3
+ vpand .Lgfpoly_and_internal_carrybit(%rip),%xmm0,%xmm0
+ vpxor %xmm0,%xmm3,%xmm3
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm6
+
+
+ vpclmulqdq $0x00,%xmm3,%xmm3,%xmm0
+ vpclmulqdq $0x01,%xmm3,%xmm3,%xmm1
+ vpclmulqdq $0x10,%xmm3,%xmm3,%xmm2
+ vpxor %xmm2,%xmm1,%xmm1
+ vpclmulqdq $0x01,%xmm0,%xmm6,%xmm2
+ vpshufd $0x4e,%xmm0,%xmm0
+ vpxor %xmm0,%xmm1,%xmm1
+ vpxor %xmm2,%xmm1,%xmm1
+ vpclmulqdq $0x11,%xmm3,%xmm3,%xmm5
+ vpclmulqdq $0x01,%xmm1,%xmm6,%xmm0
+ vpshufd $0x4e,%xmm1,%xmm1
+ vpxor %xmm1,%xmm5,%xmm5
+ vpxor %xmm0,%xmm5,%xmm5
+
+
+
+ vinserti128 $1,%xmm3,%ymm5,%ymm3
+ vinserti128 $1,%xmm5,%ymm5,%ymm5
+
+
+ vpclmulqdq $0x00,%ymm5,%ymm3,%ymm0
+ vpclmulqdq $0x01,%ymm5,%ymm3,%ymm1
+ vpclmulqdq $0x10,%ymm5,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpclmulqdq $0x01,%ymm0,%ymm6,%ymm2
+ vpshufd $0x4e,%ymm0,%ymm0
+ vpxor %ymm0,%ymm1,%ymm1
+ vpxor %ymm2,%ymm1,%ymm1
+ vpclmulqdq $0x11,%ymm5,%ymm3,%ymm4
+ vpclmulqdq $0x01,%ymm1,%ymm6,%ymm0
+ vpshufd $0x4e,%ymm1,%ymm1
+ vpxor %ymm1,%ymm4,%ymm4
+ vpxor %ymm0,%ymm4,%ymm4
+
+
+
+ vmovdqu %ymm3,96(%rdi)
+ vmovdqu %ymm4,64(%rdi)
+
+
+
+ vpunpcklqdq %ymm3,%ymm4,%ymm0
+ vpunpckhqdq %ymm3,%ymm4,%ymm1
+ vpxor %ymm1,%ymm0,%ymm0
+ vmovdqu %ymm0,128+32(%rdi)
+
+
+ vpclmulqdq $0x00,%ymm5,%ymm4,%ymm0
+ vpclmulqdq $0x01,%ymm5,%ymm4,%ymm1
+ vpclmulqdq $0x10,%ymm5,%ymm4,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpclmulqdq $0x01,%ymm0,%ymm6,%ymm2
+ vpshufd $0x4e,%ymm0,%ymm0
+ vpxor %ymm0,%ymm1,%ymm1
+ vpxor %ymm2,%ymm1,%ymm1
+ vpclmulqdq $0x11,%ymm5,%ymm4,%ymm3
+ vpclmulqdq $0x01,%ymm1,%ymm6,%ymm0
+ vpshufd $0x4e,%ymm1,%ymm1
+ vpxor %ymm1,%ymm3,%ymm3
+ vpxor %ymm0,%ymm3,%ymm3
+
+ vpclmulqdq $0x00,%ymm5,%ymm3,%ymm0
+ vpclmulqdq $0x01,%ymm5,%ymm3,%ymm1
+ vpclmulqdq $0x10,%ymm5,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpclmulqdq $0x01,%ymm0,%ymm6,%ymm2
+ vpshufd $0x4e,%ymm0,%ymm0
+ vpxor %ymm0,%ymm1,%ymm1
+ vpxor %ymm2,%ymm1,%ymm1
+ vpclmulqdq $0x11,%ymm5,%ymm3,%ymm4
+ vpclmulqdq $0x01,%ymm1,%ymm6,%ymm0
+ vpshufd $0x4e,%ymm1,%ymm1
+ vpxor %ymm1,%ymm4,%ymm4
+ vpxor %ymm0,%ymm4,%ymm4
+
+ vmovdqu %ymm3,32(%rdi)
+ vmovdqu %ymm4,0(%rdi)
+
+
+
+ vpunpcklqdq %ymm3,%ymm4,%ymm0
+ vpunpckhqdq %ymm3,%ymm4,%ymm1
+ vpxor %ymm1,%ymm0,%ymm0
+ vmovdqu %ymm0,128(%rdi)
+
+ vzeroupper
+ ret
+
+.cfi_endproc
+.size gcm_init_vpclmulqdq_avx2, . - gcm_init_vpclmulqdq_avx2
+.globl gcm_gmult_vpclmulqdq_avx2
+.hidden gcm_gmult_vpclmulqdq_avx2
+.type gcm_gmult_vpclmulqdq_avx2,@function
+.align 32
+gcm_gmult_vpclmulqdq_avx2:
+.cfi_startproc
+
+_CET_ENDBR
+
+
+
+ vmovdqu (%rdi),%xmm0
+ vmovdqu .Lbswap_mask(%rip),%xmm1
+ vmovdqu 128-16(%rsi),%xmm2
+ vmovdqu .Lgfpoly(%rip),%xmm3
+ vpshufb %xmm1,%xmm0,%xmm0
+
+ vpclmulqdq $0x00,%xmm2,%xmm0,%xmm4
+ vpclmulqdq $0x01,%xmm2,%xmm0,%xmm5
+ vpclmulqdq $0x10,%xmm2,%xmm0,%xmm6
+ vpxor %xmm6,%xmm5,%xmm5
+ vpclmulqdq $0x01,%xmm4,%xmm3,%xmm6
+ vpshufd $0x4e,%xmm4,%xmm4
+ vpxor %xmm4,%xmm5,%xmm5
+ vpxor %xmm6,%xmm5,%xmm5
+ vpclmulqdq $0x11,%xmm2,%xmm0,%xmm0
+ vpclmulqdq $0x01,%xmm5,%xmm3,%xmm4
+ vpshufd $0x4e,%xmm5,%xmm5
+ vpxor %xmm5,%xmm0,%xmm0
+ vpxor %xmm4,%xmm0,%xmm0
+
+
+ vpshufb %xmm1,%xmm0,%xmm0
+ vmovdqu %xmm0,(%rdi)
+ ret
+
+.cfi_endproc
+.size gcm_gmult_vpclmulqdq_avx2, . - gcm_gmult_vpclmulqdq_avx2
+.globl gcm_ghash_vpclmulqdq_avx2
+.hidden gcm_ghash_vpclmulqdq_avx2
+.type gcm_ghash_vpclmulqdq_avx2,@function
+.align 32
+gcm_ghash_vpclmulqdq_avx2:
+.cfi_startproc
+
+_CET_ENDBR
+
+
+
+
+
+
+ vmovdqu .Lbswap_mask(%rip),%xmm6
+ vmovdqu .Lgfpoly(%rip),%xmm7
+
+
+ vmovdqu (%rdi),%xmm5
+ vpshufb %xmm6,%xmm5,%xmm5
+
+
+ cmpq $32,%rcx
+ jb .Lghash_lastblock
+
+
+
+ vinserti128 $1,%xmm6,%ymm6,%ymm6
+ vinserti128 $1,%xmm7,%ymm7,%ymm7
+
+ cmpq $127,%rcx
+ jbe .Lghash_loop_1x
+
+
+ vmovdqu 128(%rsi),%ymm8
+ vmovdqu 128+32(%rsi),%ymm9
+.Lghash_loop_4x:
+
+ vmovdqu 0(%rdx),%ymm1
+ vpshufb %ymm6,%ymm1,%ymm1
+ vmovdqu 0(%rsi),%ymm2
+ vpxor %ymm5,%ymm1,%ymm1
+ vpclmulqdq $0x00,%ymm2,%ymm1,%ymm3
+ vpclmulqdq $0x11,%ymm2,%ymm1,%ymm5
+ vpunpckhqdq %ymm1,%ymm1,%ymm0
+ vpxor %ymm1,%ymm0,%ymm0
+ vpclmulqdq $0x00,%ymm8,%ymm0,%ymm4
+
+ vmovdqu 32(%rdx),%ymm1
+ vpshufb %ymm6,%ymm1,%ymm1
+ vmovdqu 32(%rsi),%ymm2
+ vpclmulqdq $0x00,%ymm2,%ymm1,%ymm0
+ vpxor %ymm0,%ymm3,%ymm3
+ vpclmulqdq $0x11,%ymm2,%ymm1,%ymm0
+ vpxor %ymm0,%ymm5,%ymm5
+ vpunpckhqdq %ymm1,%ymm1,%ymm0
+ vpxor %ymm1,%ymm0,%ymm0
+ vpclmulqdq $0x10,%ymm8,%ymm0,%ymm0
+ vpxor %ymm0,%ymm4,%ymm4
+
+ vmovdqu 64(%rdx),%ymm1
+ vpshufb %ymm6,%ymm1,%ymm1
+ vmovdqu 64(%rsi),%ymm2
+ vpclmulqdq $0x00,%ymm2,%ymm1,%ymm0
+ vpxor %ymm0,%ymm3,%ymm3
+ vpclmulqdq $0x11,%ymm2,%ymm1,%ymm0
+ vpxor %ymm0,%ymm5,%ymm5
+ vpunpckhqdq %ymm1,%ymm1,%ymm0
+ vpxor %ymm1,%ymm0,%ymm0
+ vpclmulqdq $0x00,%ymm9,%ymm0,%ymm0
+ vpxor %ymm0,%ymm4,%ymm4
+
+
+ vmovdqu 96(%rdx),%ymm1
+ vpshufb %ymm6,%ymm1,%ymm1
+ vmovdqu 96(%rsi),%ymm2
+ vpclmulqdq $0x00,%ymm2,%ymm1,%ymm0
+ vpxor %ymm0,%ymm3,%ymm3
+ vpclmulqdq $0x11,%ymm2,%ymm1,%ymm0
+ vpxor %ymm0,%ymm5,%ymm5
+ vpunpckhqdq %ymm1,%ymm1,%ymm0
+ vpxor %ymm1,%ymm0,%ymm0
+ vpclmulqdq $0x10,%ymm9,%ymm0,%ymm0
+ vpxor %ymm0,%ymm4,%ymm4
+
+ vpxor %ymm3,%ymm4,%ymm4
+ vpxor %ymm5,%ymm4,%ymm4
+
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm2
+ vpclmulqdq $0x01,%ymm3,%ymm2,%ymm0
+ vpshufd $0x4e,%ymm3,%ymm3
+ vpxor %ymm3,%ymm4,%ymm4
+ vpxor %ymm0,%ymm4,%ymm4
+
+ vpclmulqdq $0x01,%ymm4,%ymm2,%ymm0
+ vpshufd $0x4e,%ymm4,%ymm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpxor %ymm0,%ymm5,%ymm5
+ vextracti128 $1,%ymm5,%xmm0
+ vpxor %xmm0,%xmm5,%xmm5
+
+ subq $-128,%rdx
+ addq $-128,%rcx
+ cmpq $127,%rcx
+ ja .Lghash_loop_4x
+
+
+ cmpq $32,%rcx
+ jb .Lghash_loop_1x_done
+.Lghash_loop_1x:
+ vmovdqu (%rdx),%ymm0
+ vpshufb %ymm6,%ymm0,%ymm0
+ vpxor %ymm0,%ymm5,%ymm5
+ vmovdqu 128-32(%rsi),%ymm0
+ vpclmulqdq $0x00,%ymm0,%ymm5,%ymm1
+ vpclmulqdq $0x01,%ymm0,%ymm5,%ymm2
+ vpclmulqdq $0x10,%ymm0,%ymm5,%ymm3
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x01,%ymm1,%ymm7,%ymm3
+ vpshufd $0x4e,%ymm1,%ymm1
+ vpxor %ymm1,%ymm2,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x11,%ymm0,%ymm5,%ymm5
+ vpclmulqdq $0x01,%ymm2,%ymm7,%ymm1
+ vpshufd $0x4e,%ymm2,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpxor %ymm1,%ymm5,%ymm5
+
+ vextracti128 $1,%ymm5,%xmm0
+ vpxor %xmm0,%xmm5,%xmm5
+ addq $32,%rdx
+ subq $32,%rcx
+ cmpq $32,%rcx
+ jae .Lghash_loop_1x
+.Lghash_loop_1x_done:
+
+
+.Lghash_lastblock:
+ testq %rcx,%rcx
+ jz .Lghash_done
+ vmovdqu (%rdx),%xmm0
+ vpshufb %xmm6,%xmm0,%xmm0
+ vpxor %xmm0,%xmm5,%xmm5
+ vmovdqu 128-16(%rsi),%xmm0
+ vpclmulqdq $0x00,%xmm0,%xmm5,%xmm1
+ vpclmulqdq $0x01,%xmm0,%xmm5,%xmm2
+ vpclmulqdq $0x10,%xmm0,%xmm5,%xmm3
+ vpxor %xmm3,%xmm2,%xmm2
+ vpclmulqdq $0x01,%xmm1,%xmm7,%xmm3
+ vpshufd $0x4e,%xmm1,%xmm1
+ vpxor %xmm1,%xmm2,%xmm2
+ vpxor %xmm3,%xmm2,%xmm2
+ vpclmulqdq $0x11,%xmm0,%xmm5,%xmm5
+ vpclmulqdq $0x01,%xmm2,%xmm7,%xmm1
+ vpshufd $0x4e,%xmm2,%xmm2
+ vpxor %xmm2,%xmm5,%xmm5
+ vpxor %xmm1,%xmm5,%xmm5
+
+
+.Lghash_done:
+
+ vpshufb %xmm6,%xmm5,%xmm5
+ vmovdqu %xmm5,(%rdi)
+
+ vzeroupper
+ ret
+
+.cfi_endproc
+.size gcm_ghash_vpclmulqdq_avx2, . - gcm_ghash_vpclmulqdq_avx2
+.globl aes_gcm_enc_update_vaes_avx2
+.hidden aes_gcm_enc_update_vaes_avx2
+.type aes_gcm_enc_update_vaes_avx2,@function
+.align 32
+aes_gcm_enc_update_vaes_avx2:
+.cfi_startproc
+
+_CET_ENDBR
+ pushq %r12
+.cfi_adjust_cfa_offset 8
+.cfi_offset %r12,-16
+
+ movq 16(%rsp),%r12
+#ifdef BORINGSSL_DISPATCH_TEST
+.extern BORINGSSL_function_hit
+.hidden BORINGSSL_function_hit
+ movb $1,BORINGSSL_function_hit+8(%rip)
+#endif
+ vbroadcasti128 .Lbswap_mask(%rip),%ymm0
+
+
+
+ vmovdqu (%r12),%xmm1
+ vpshufb %xmm0,%xmm1,%xmm1
+ vbroadcasti128 (%r8),%ymm11
+ vpshufb %ymm0,%ymm11,%ymm11
+
+
+
+ movl 240(%rcx),%r10d
+ leal -20(,%r10,4),%r10d
+
+
+
+
+ leaq 96(%rcx,%r10,4),%r11
+ vbroadcasti128 (%rcx),%ymm9
+ vbroadcasti128 (%r11),%ymm10
+
+
+ vpaddd .Lctr_pattern(%rip),%ymm11,%ymm11
+
+
+
+ cmpq $127,%rdx
+ jbe .Lcrypt_loop_4x_done__func1
+
+ vmovdqu 128(%r9),%ymm7
+ vmovdqu 128+32(%r9),%ymm8
+
+
+
+ vmovdqu .Linc_2blocks(%rip),%ymm2
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm14
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm15
+ vpaddd %ymm2,%ymm11,%ymm11
+
+
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ vpxor %ymm9,%ymm14,%ymm14
+ vpxor %ymm9,%ymm15,%ymm15
+
+ leaq 16(%rcx),%rax
+.Lvaesenc_loop_first_4_vecs__func1:
+ vbroadcasti128 (%rax),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ addq $16,%rax
+ cmpq %rax,%r11
+ jne .Lvaesenc_loop_first_4_vecs__func1
+ vpxor 0(%rdi),%ymm10,%ymm2
+ vpxor 32(%rdi),%ymm10,%ymm3
+ vpxor 64(%rdi),%ymm10,%ymm5
+ vpxor 96(%rdi),%ymm10,%ymm6
+ vaesenclast %ymm2,%ymm12,%ymm12
+ vaesenclast %ymm3,%ymm13,%ymm13
+ vaesenclast %ymm5,%ymm14,%ymm14
+ vaesenclast %ymm6,%ymm15,%ymm15
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %ymm13,32(%rsi)
+ vmovdqu %ymm14,64(%rsi)
+ vmovdqu %ymm15,96(%rsi)
+
+ subq $-128,%rdi
+ addq $-128,%rdx
+ cmpq $127,%rdx
+ jbe .Lghash_last_ciphertext_4x__func1
+.align 16
+.Lcrypt_loop_4x__func1:
+
+
+
+
+ vmovdqu .Linc_2blocks(%rip),%ymm2
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm14
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm15
+ vpaddd %ymm2,%ymm11,%ymm11
+
+
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ vpxor %ymm9,%ymm14,%ymm14
+ vpxor %ymm9,%ymm15,%ymm15
+
+ cmpl $24,%r10d
+ jl .Laes128__func1
+ je .Laes192__func1
+
+ vbroadcasti128 -208(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vbroadcasti128 -192(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+.Laes192__func1:
+ vbroadcasti128 -176(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vbroadcasti128 -160(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+.Laes128__func1:
+ prefetcht0 512(%rdi)
+ prefetcht0 512+64(%rdi)
+
+ vmovdqu 0(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 0(%r9),%ymm4
+ vpxor %ymm1,%ymm3,%ymm3
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x00,%ymm7,%ymm2,%ymm6
+
+ vbroadcasti128 -144(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vbroadcasti128 -128(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vmovdqu 32(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 32(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x10,%ymm7,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vbroadcasti128 -112(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vmovdqu 64(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 64(%r9),%ymm4
+
+ vbroadcasti128 -96(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+
+ vbroadcasti128 -80(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x00,%ymm8,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+
+ vmovdqu 96(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+
+ vbroadcasti128 -64(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vmovdqu 96(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x10,%ymm8,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vbroadcasti128 -48(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm1,%ymm6,%ymm6
+
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm4
+ vpclmulqdq $0x01,%ymm5,%ymm4,%ymm2
+ vpshufd $0x4e,%ymm5,%ymm5
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vbroadcasti128 -32(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vpclmulqdq $0x01,%ymm6,%ymm4,%ymm2
+ vpshufd $0x4e,%ymm6,%ymm6
+ vpxor %ymm6,%ymm1,%ymm1
+ vpxor %ymm2,%ymm1,%ymm1
+
+ vbroadcasti128 -16(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vextracti128 $1,%ymm1,%xmm2
+ vpxor %xmm2,%xmm1,%xmm1
+
+
+ subq $-128,%rsi
+ vpxor 0(%rdi),%ymm10,%ymm2
+ vpxor 32(%rdi),%ymm10,%ymm3
+ vpxor 64(%rdi),%ymm10,%ymm5
+ vpxor 96(%rdi),%ymm10,%ymm6
+ vaesenclast %ymm2,%ymm12,%ymm12
+ vaesenclast %ymm3,%ymm13,%ymm13
+ vaesenclast %ymm5,%ymm14,%ymm14
+ vaesenclast %ymm6,%ymm15,%ymm15
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %ymm13,32(%rsi)
+ vmovdqu %ymm14,64(%rsi)
+ vmovdqu %ymm15,96(%rsi)
+
+ subq $-128,%rdi
+
+ addq $-128,%rdx
+ cmpq $127,%rdx
+ ja .Lcrypt_loop_4x__func1
+.Lghash_last_ciphertext_4x__func1:
+
+ vmovdqu 0(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 0(%r9),%ymm4
+ vpxor %ymm1,%ymm3,%ymm3
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x00,%ymm7,%ymm2,%ymm6
+
+ vmovdqu 32(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 32(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x10,%ymm7,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vmovdqu 64(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 64(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x00,%ymm8,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+
+ vmovdqu 96(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 96(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x10,%ymm8,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm1,%ymm6,%ymm6
+
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm4
+ vpclmulqdq $0x01,%ymm5,%ymm4,%ymm2
+ vpshufd $0x4e,%ymm5,%ymm5
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vpclmulqdq $0x01,%ymm6,%ymm4,%ymm2
+ vpshufd $0x4e,%ymm6,%ymm6
+ vpxor %ymm6,%ymm1,%ymm1
+ vpxor %ymm2,%ymm1,%ymm1
+ vextracti128 $1,%ymm1,%xmm2
+ vpxor %xmm2,%xmm1,%xmm1
+
+ subq $-128,%rsi
+.Lcrypt_loop_4x_done__func1:
+
+ testq %rdx,%rdx
+ jz .Ldone__func1
+
+
+
+
+
+ leaq 128(%r9),%r8
+ subq %rdx,%r8
+
+
+ vpxor %xmm5,%xmm5,%xmm5
+ vpxor %xmm6,%xmm6,%xmm6
+ vpxor %xmm7,%xmm7,%xmm7
+
+ cmpq $64,%rdx
+ jb .Llessthan64bytes__func1
+
+
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ leaq 16(%rcx),%rax
+.Lvaesenc_loop_tail_1__func1:
+ vbroadcasti128 (%rax),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ addq $16,%rax
+ cmpq %rax,%r11
+ jne .Lvaesenc_loop_tail_1__func1
+ vaesenclast %ymm10,%ymm12,%ymm12
+ vaesenclast %ymm10,%ymm13,%ymm13
+
+
+ vmovdqu 0(%rdi),%ymm2
+ vmovdqu 32(%rdi),%ymm3
+ vpxor %ymm2,%ymm12,%ymm12
+ vpxor %ymm3,%ymm13,%ymm13
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %ymm13,32(%rsi)
+
+
+ vpshufb %ymm0,%ymm12,%ymm12
+ vpshufb %ymm0,%ymm13,%ymm13
+ vpxor %ymm1,%ymm12,%ymm12
+ vmovdqu (%r8),%ymm2
+ vmovdqu 32(%r8),%ymm3
+ vpclmulqdq $0x00,%ymm2,%ymm12,%ymm5
+ vpclmulqdq $0x01,%ymm2,%ymm12,%ymm6
+ vpclmulqdq $0x10,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%ymm2,%ymm12,%ymm7
+ vpclmulqdq $0x00,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpclmulqdq $0x01,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x10,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm7,%ymm7
+
+ addq $64,%r8
+ addq $64,%rdi
+ addq $64,%rsi
+ subq $64,%rdx
+ jz .Lreduce__func1
+
+ vpxor %xmm1,%xmm1,%xmm1
+
+
+.Llessthan64bytes__func1:
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ leaq 16(%rcx),%rax
+.Lvaesenc_loop_tail_2__func1:
+ vbroadcasti128 (%rax),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ addq $16,%rax
+ cmpq %rax,%r11
+ jne .Lvaesenc_loop_tail_2__func1
+ vaesenclast %ymm10,%ymm12,%ymm12
+ vaesenclast %ymm10,%ymm13,%ymm13
+
+
+
+
+ cmpq $32,%rdx
+ jb .Lxor_one_block__func1
+ je .Lxor_two_blocks__func1
+
+.Lxor_three_blocks__func1:
+ vmovdqu 0(%rdi),%ymm2
+ vmovdqu 32(%rdi),%xmm3
+ vpxor %ymm2,%ymm12,%ymm12
+ vpxor %xmm3,%xmm13,%xmm13
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %xmm13,32(%rsi)
+
+ vpshufb %ymm0,%ymm12,%ymm12
+ vpshufb %xmm0,%xmm13,%xmm13
+ vpxor %ymm1,%ymm12,%ymm12
+ vmovdqu (%r8),%ymm2
+ vmovdqu 32(%r8),%xmm3
+ vpclmulqdq $0x00,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpclmulqdq $0x01,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x10,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm7,%ymm7
+ jmp .Lghash_mul_one_vec_unreduced__func1
+
+.Lxor_two_blocks__func1:
+ vmovdqu (%rdi),%ymm2
+ vpxor %ymm2,%ymm12,%ymm12
+ vmovdqu %ymm12,(%rsi)
+ vpshufb %ymm0,%ymm12,%ymm12
+ vpxor %ymm1,%ymm12,%ymm12
+ vmovdqu (%r8),%ymm2
+ jmp .Lghash_mul_one_vec_unreduced__func1
+
+.Lxor_one_block__func1:
+ vmovdqu (%rdi),%xmm2
+ vpxor %xmm2,%xmm12,%xmm12
+ vmovdqu %xmm12,(%rsi)
+ vpshufb %xmm0,%xmm12,%xmm12
+ vpxor %xmm1,%xmm12,%xmm12
+ vmovdqu (%r8),%xmm2
+
+.Lghash_mul_one_vec_unreduced__func1:
+ vpclmulqdq $0x00,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpclmulqdq $0x01,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x10,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm7,%ymm7
+
+.Lreduce__func1:
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm2
+ vpclmulqdq $0x01,%ymm5,%ymm2,%ymm3
+ vpshufd $0x4e,%ymm5,%ymm5
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm3,%ymm6,%ymm6
+ vpclmulqdq $0x01,%ymm6,%ymm2,%ymm3
+ vpshufd $0x4e,%ymm6,%ymm6
+ vpxor %ymm6,%ymm7,%ymm7
+ vpxor %ymm3,%ymm7,%ymm7
+ vextracti128 $1,%ymm7,%xmm1
+ vpxor %xmm7,%xmm1,%xmm1
+
+.Ldone__func1:
+
+ vpshufb %xmm0,%xmm1,%xmm1
+ vmovdqu %xmm1,(%r12)
+
+ vzeroupper
+ popq %r12
+.cfi_adjust_cfa_offset -8
+.cfi_restore %r12
+ ret
+
+.cfi_endproc
+.size aes_gcm_enc_update_vaes_avx2, . - aes_gcm_enc_update_vaes_avx2
+.globl aes_gcm_dec_update_vaes_avx2
+.hidden aes_gcm_dec_update_vaes_avx2
+.type aes_gcm_dec_update_vaes_avx2,@function
+.align 32
+aes_gcm_dec_update_vaes_avx2:
+.cfi_startproc
+
+_CET_ENDBR
+ pushq %r12
+.cfi_adjust_cfa_offset 8
+.cfi_offset %r12,-16
+
+ movq 16(%rsp),%r12
+ vbroadcasti128 .Lbswap_mask(%rip),%ymm0
+
+
+
+ vmovdqu (%r12),%xmm1
+ vpshufb %xmm0,%xmm1,%xmm1
+ vbroadcasti128 (%r8),%ymm11
+ vpshufb %ymm0,%ymm11,%ymm11
+
+
+
+ movl 240(%rcx),%r10d
+ leal -20(,%r10,4),%r10d
+
+
+
+
+ leaq 96(%rcx,%r10,4),%r11
+ vbroadcasti128 (%rcx),%ymm9
+ vbroadcasti128 (%r11),%ymm10
+
+
+ vpaddd .Lctr_pattern(%rip),%ymm11,%ymm11
+
+
+
+ cmpq $127,%rdx
+ jbe .Lcrypt_loop_4x_done__func2
+
+ vmovdqu 128(%r9),%ymm7
+ vmovdqu 128+32(%r9),%ymm8
+.align 16
+.Lcrypt_loop_4x__func2:
+
+
+
+
+ vmovdqu .Linc_2blocks(%rip),%ymm2
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm14
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm15
+ vpaddd %ymm2,%ymm11,%ymm11
+
+
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ vpxor %ymm9,%ymm14,%ymm14
+ vpxor %ymm9,%ymm15,%ymm15
+
+ cmpl $24,%r10d
+ jl .Laes128__func2
+ je .Laes192__func2
+
+ vbroadcasti128 -208(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vbroadcasti128 -192(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+.Laes192__func2:
+ vbroadcasti128 -176(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vbroadcasti128 -160(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+.Laes128__func2:
+ prefetcht0 512(%rdi)
+ prefetcht0 512+64(%rdi)
+
+ vmovdqu 0(%rdi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 0(%r9),%ymm4
+ vpxor %ymm1,%ymm3,%ymm3
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x00,%ymm7,%ymm2,%ymm6
+
+ vbroadcasti128 -144(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vbroadcasti128 -128(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vmovdqu 32(%rdi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 32(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x10,%ymm7,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vbroadcasti128 -112(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vmovdqu 64(%rdi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 64(%r9),%ymm4
+
+ vbroadcasti128 -96(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+
+ vbroadcasti128 -80(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x00,%ymm8,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+
+ vmovdqu 96(%rdi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+
+ vbroadcasti128 -64(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vmovdqu 96(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x10,%ymm8,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vbroadcasti128 -48(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm1,%ymm6,%ymm6
+
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm4
+ vpclmulqdq $0x01,%ymm5,%ymm4,%ymm2
+ vpshufd $0x4e,%ymm5,%ymm5
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vbroadcasti128 -32(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vpclmulqdq $0x01,%ymm6,%ymm4,%ymm2
+ vpshufd $0x4e,%ymm6,%ymm6
+ vpxor %ymm6,%ymm1,%ymm1
+ vpxor %ymm2,%ymm1,%ymm1
+
+ vbroadcasti128 -16(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vextracti128 $1,%ymm1,%xmm2
+ vpxor %xmm2,%xmm1,%xmm1
+
+
+
+ vpxor 0(%rdi),%ymm10,%ymm2
+ vpxor 32(%rdi),%ymm10,%ymm3
+ vpxor 64(%rdi),%ymm10,%ymm5
+ vpxor 96(%rdi),%ymm10,%ymm6
+ vaesenclast %ymm2,%ymm12,%ymm12
+ vaesenclast %ymm3,%ymm13,%ymm13
+ vaesenclast %ymm5,%ymm14,%ymm14
+ vaesenclast %ymm6,%ymm15,%ymm15
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %ymm13,32(%rsi)
+ vmovdqu %ymm14,64(%rsi)
+ vmovdqu %ymm15,96(%rsi)
+
+ subq $-128,%rdi
+ subq $-128,%rsi
+ addq $-128,%rdx
+ cmpq $127,%rdx
+ ja .Lcrypt_loop_4x__func2
+.Lcrypt_loop_4x_done__func2:
+
+ testq %rdx,%rdx
+ jz .Ldone__func2
+
+
+
+
+
+ leaq 128(%r9),%r8
+ subq %rdx,%r8
+
+
+ vpxor %xmm5,%xmm5,%xmm5
+ vpxor %xmm6,%xmm6,%xmm6
+ vpxor %xmm7,%xmm7,%xmm7
+
+ cmpq $64,%rdx
+ jb .Llessthan64bytes__func2
+
+
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ leaq 16(%rcx),%rax
+.Lvaesenc_loop_tail_1__func2:
+ vbroadcasti128 (%rax),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ addq $16,%rax
+ cmpq %rax,%r11
+ jne .Lvaesenc_loop_tail_1__func2
+ vaesenclast %ymm10,%ymm12,%ymm12
+ vaesenclast %ymm10,%ymm13,%ymm13
+
+
+ vmovdqu 0(%rdi),%ymm2
+ vmovdqu 32(%rdi),%ymm3
+ vpxor %ymm2,%ymm12,%ymm12
+ vpxor %ymm3,%ymm13,%ymm13
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %ymm13,32(%rsi)
+
+
+ vpshufb %ymm0,%ymm2,%ymm12
+ vpshufb %ymm0,%ymm3,%ymm13
+ vpxor %ymm1,%ymm12,%ymm12
+ vmovdqu (%r8),%ymm2
+ vmovdqu 32(%r8),%ymm3
+ vpclmulqdq $0x00,%ymm2,%ymm12,%ymm5
+ vpclmulqdq $0x01,%ymm2,%ymm12,%ymm6
+ vpclmulqdq $0x10,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%ymm2,%ymm12,%ymm7
+ vpclmulqdq $0x00,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpclmulqdq $0x01,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x10,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm7,%ymm7
+
+ addq $64,%r8
+ addq $64,%rdi
+ addq $64,%rsi
+ subq $64,%rdx
+ jz .Lreduce__func2
+
+ vpxor %xmm1,%xmm1,%xmm1
+
+
+.Llessthan64bytes__func2:
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ leaq 16(%rcx),%rax
+.Lvaesenc_loop_tail_2__func2:
+ vbroadcasti128 (%rax),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ addq $16,%rax
+ cmpq %rax,%r11
+ jne .Lvaesenc_loop_tail_2__func2
+ vaesenclast %ymm10,%ymm12,%ymm12
+ vaesenclast %ymm10,%ymm13,%ymm13
+
+
+
+
+ cmpq $32,%rdx
+ jb .Lxor_one_block__func2
+ je .Lxor_two_blocks__func2
+
+.Lxor_three_blocks__func2:
+ vmovdqu 0(%rdi),%ymm2
+ vmovdqu 32(%rdi),%xmm3
+ vpxor %ymm2,%ymm12,%ymm12
+ vpxor %xmm3,%xmm13,%xmm13
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %xmm13,32(%rsi)
+
+ vpshufb %ymm0,%ymm2,%ymm12
+ vpshufb %xmm0,%xmm3,%xmm13
+ vpxor %ymm1,%ymm12,%ymm12
+ vmovdqu (%r8),%ymm2
+ vmovdqu 32(%r8),%xmm3
+ vpclmulqdq $0x00,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpclmulqdq $0x01,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x10,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm7,%ymm7
+ jmp .Lghash_mul_one_vec_unreduced__func2
+
+.Lxor_two_blocks__func2:
+ vmovdqu (%rdi),%ymm2
+ vpxor %ymm2,%ymm12,%ymm12
+ vmovdqu %ymm12,(%rsi)
+ vpshufb %ymm0,%ymm2,%ymm12
+ vpxor %ymm1,%ymm12,%ymm12
+ vmovdqu (%r8),%ymm2
+ jmp .Lghash_mul_one_vec_unreduced__func2
+
+.Lxor_one_block__func2:
+ vmovdqu (%rdi),%xmm2
+ vpxor %xmm2,%xmm12,%xmm12
+ vmovdqu %xmm12,(%rsi)
+ vpshufb %xmm0,%xmm2,%xmm12
+ vpxor %xmm1,%xmm12,%xmm12
+ vmovdqu (%r8),%xmm2
+
+.Lghash_mul_one_vec_unreduced__func2:
+ vpclmulqdq $0x00,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpclmulqdq $0x01,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x10,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm7,%ymm7
+
+.Lreduce__func2:
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm2
+ vpclmulqdq $0x01,%ymm5,%ymm2,%ymm3
+ vpshufd $0x4e,%ymm5,%ymm5
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm3,%ymm6,%ymm6
+ vpclmulqdq $0x01,%ymm6,%ymm2,%ymm3
+ vpshufd $0x4e,%ymm6,%ymm6
+ vpxor %ymm6,%ymm7,%ymm7
+ vpxor %ymm3,%ymm7,%ymm7
+ vextracti128 $1,%ymm7,%xmm1
+ vpxor %xmm7,%xmm1,%xmm1
+
+.Ldone__func2:
+
+ vpshufb %xmm0,%xmm1,%xmm1
+ vmovdqu %xmm1,(%r12)
+
+ vzeroupper
+ popq %r12
+.cfi_adjust_cfa_offset -8
+.cfi_restore %r12
+ ret
+
+.cfi_endproc
+.size aes_gcm_dec_update_vaes_avx2, . - aes_gcm_dec_update_vaes_avx2
+#endif
diff --git a/sys/contrib/openzfs/contrib/initramfs/scripts/zfs b/sys/contrib/openzfs/contrib/initramfs/scripts/zfs
index c569b2528368..67707e9d80f4 100644
--- a/sys/contrib/openzfs/contrib/initramfs/scripts/zfs
+++ b/sys/contrib/openzfs/contrib/initramfs/scripts/zfs
@@ -979,7 +979,8 @@ mountroot()
touch /run/zfs_unlock_complete
if [ -e /run/zfs_unlock_complete_notify ]; then
- read -r < /run/zfs_unlock_complete_notify
+ # shellcheck disable=SC2034
+ read -r zfs_unlock_complete_notify < /run/zfs_unlock_complete_notify
fi
# ------------
diff --git a/sys/contrib/openzfs/include/os/linux/kernel/linux/simd_x86.h b/sys/contrib/openzfs/include/os/linux/kernel/linux/simd_x86.h
index e8004e18c4a4..326f471d7c9b 100644
--- a/sys/contrib/openzfs/include/os/linux/kernel/linux/simd_x86.h
+++ b/sys/contrib/openzfs/include/os/linux/kernel/linux/simd_x86.h
@@ -598,6 +598,32 @@ zfs_movbe_available(void)
}
/*
+ * Check if VAES instruction set is available
+ */
+static inline boolean_t
+zfs_vaes_available(void)
+{
+#if defined(X86_FEATURE_VAES)
+ return (!!boot_cpu_has(X86_FEATURE_VAES));
+#else
+ return (B_FALSE);
+#endif
+}
+
+/*
+ * Check if VPCLMULQDQ instruction set is available
+ */
+static inline boolean_t
+zfs_vpclmulqdq_available(void)
+{
+#if defined(X86_FEATURE_VPCLMULQDQ)
+ return (!!boot_cpu_has(X86_FEATURE_VPCLMULQDQ));
+#else
+ return (B_FALSE);
+#endif
+}
+
+/*
* Check if SHA_NI instruction set is available
*/
static inline boolean_t
diff --git a/sys/contrib/openzfs/include/os/linux/zfs/sys/trace_zil.h b/sys/contrib/openzfs/include/os/linux/zfs/sys/trace_zil.h
index 955462c85d10..e34ea46b3fe8 100644
--- a/sys/contrib/openzfs/include/os/linux/zfs/sys/trace_zil.h
+++ b/sys/contrib/openzfs/include/os/linux/zfs/sys/trace_zil.h
@@ -139,18 +139,18 @@
#define ZCW_TP_STRUCT_ENTRY \
__field(lwb_t *, zcw_lwb) \
__field(boolean_t, zcw_done) \
- __field(int, zcw_zio_error) \
+ __field(int, zcw_error) \
#define ZCW_TP_FAST_ASSIGN \
__entry->zcw_lwb = zcw->zcw_lwb; \
__entry->zcw_done = zcw->zcw_done; \
- __entry->zcw_zio_error = zcw->zcw_zio_error;
+ __entry->zcw_error = zcw->zcw_error;
#define ZCW_TP_PRINTK_FMT \
"zcw { lwb %p done %u error %u }"
#define ZCW_TP_PRINTK_ARGS \
- __entry->zcw_lwb, __entry->zcw_done, __entry->zcw_zio_error
+ __entry->zcw_lwb, __entry->zcw_done, __entry->zcw_error
/*
* Generic support for two argument tracepoints of the form:
diff --git a/sys/contrib/openzfs/include/sys/spa.h b/sys/contrib/openzfs/include/sys/spa.h
index db6de332ae67..66db16b33c51 100644
--- a/sys/contrib/openzfs/include/sys/spa.h
+++ b/sys/contrib/openzfs/include/sys/spa.h
@@ -880,7 +880,6 @@ extern kcondvar_t spa_namespace_cv;
#define SPA_CONFIG_UPDATE_VDEVS 1
extern void spa_write_cachefile(spa_t *, boolean_t, boolean_t, boolean_t);
-extern void spa_config_load(void);
extern int spa_all_configs(uint64_t *generation, nvlist_t **pools);
extern void spa_config_set(spa_t *spa, nvlist_t *config);
extern nvlist_t *spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg,
@@ -1244,7 +1243,6 @@ extern void vdev_mirror_stat_fini(void);
/* Initialization and termination */
extern void spa_init(spa_mode_t mode);
extern void spa_fini(void);
-extern void spa_boot_init(void *);
/* properties */
extern int spa_prop_set(spa_t *spa, nvlist_t *nvp);
diff --git a/sys/contrib/openzfs/include/sys/zil_impl.h b/sys/contrib/openzfs/include/sys/zil_impl.h
index 44b776e16b52..ea1364a7e35a 100644
--- a/sys/contrib/openzfs/include/sys/zil_impl.h
+++ b/sys/contrib/openzfs/include/sys/zil_impl.h
@@ -41,8 +41,8 @@ extern "C" {
*
* An lwb will start out in the "new" state, and transition to the "opened"
* state via a call to zil_lwb_write_open() on first itx assignment. When
- * transitioning from "new" to "opened" the zilog's "zl_issuer_lock" must be
- * held.
+ * transitioning from "new" to "opened" the zilog's "zl_issuer_lock" and
+ * LWB's "lwb_lock" must be held.
*
* After the lwb is "opened", it can be assigned number of itxs and transition
* into the "closed" state via zil_lwb_write_close() when full or on timeout.
@@ -100,16 +100,22 @@ typedef enum {
* holding the "zl_issuer_lock". After the lwb is issued, the zilog's
* "zl_lock" is used to protect the lwb against concurrent access.
*/
+typedef enum {
+ LWB_FLAG_SLIM = (1<<0), /* log block has slim format */
+ LWB_FLAG_SLOG = (1<<1), /* lwb_blk is on SLOG device */
+ LWB_FLAG_CRASHED = (1<<2), /* lwb is on the crash list */
+} lwb_flag_t;
+
typedef struct lwb {
zilog_t *lwb_zilog; /* back pointer to log struct */
blkptr_t lwb_blk; /* on disk address of this log blk */
- boolean_t lwb_slim; /* log block has slim format */
- boolean_t lwb_slog; /* lwb_blk is on SLOG device */
+ lwb_flag_t lwb_flags; /* extra info about this lwb */
int lwb_error; /* log block allocation error */
int lwb_nmax; /* max bytes in the buffer */
int lwb_nused; /* # used bytes in buffer */
int lwb_nfilled; /* # filled bytes in buffer */
int lwb_sz; /* size of block and buffer */
+ int lwb_min_sz; /* min size for range allocation */
lwb_state_t lwb_state; /* the state of this lwb */
char *lwb_buf; /* log write buffer */
zio_t *lwb_child_zio; /* parent zio for children */
@@ -124,7 +130,7 @@ typedef struct lwb {
list_t lwb_itxs; /* list of itx's */
list_t lwb_waiters; /* list of zil_commit_waiter's */
avl_tree_t lwb_vdev_tree; /* vdevs to flush after lwb write */
- kmutex_t lwb_vdev_lock; /* protects lwb_vdev_tree */
+ kmutex_t lwb_lock; /* protects lwb_vdev_tree and size */
} lwb_t;
/*
@@ -149,7 +155,7 @@ typedef struct zil_commit_waiter {
list_node_t zcw_node; /* linkage in lwb_t:lwb_waiter list */
lwb_t *zcw_lwb; /* back pointer to lwb when linked */
boolean_t zcw_done; /* B_TRUE when "done", else B_FALSE */
- int zcw_zio_error; /* contains the zio io_error value */
+ int zcw_error; /* result to return from zil_commit() */
} zil_commit_waiter_t;
/*
diff --git a/sys/contrib/openzfs/include/sys/zio.h b/sys/contrib/openzfs/include/sys/zio.h
index 4f46eab3db89..353805fcb969 100644
--- a/sys/contrib/openzfs/include/sys/zio.h
+++ b/sys/contrib/openzfs/include/sys/zio.h
@@ -360,26 +360,26 @@ struct zbookmark_err_phys {
(zb)->zb_blkid == ZB_ROOT_BLKID)
typedef struct zio_prop {
- enum zio_checksum zp_checksum;
- enum zio_compress zp_compress;
+ enum zio_checksum zp_checksum:8;
+ enum zio_compress zp_compress:8;
uint8_t zp_complevel;
uint8_t zp_level;
uint8_t zp_copies;
uint8_t zp_gang_copies;
- dmu_object_type_t zp_type;
- boolean_t zp_dedup;
- boolean_t zp_dedup_verify;
- boolean_t zp_nopwrite;
- boolean_t zp_brtwrite;
- boolean_t zp_encrypt;
- boolean_t zp_byteorder;
- boolean_t zp_direct_write;
- boolean_t zp_rewrite;
+ dmu_object_type_t zp_type:8;
+ dmu_object_type_t zp_storage_type:8;
+ boolean_t zp_dedup:1;
+ boolean_t zp_dedup_verify:1;
+ boolean_t zp_nopwrite:1;
+ boolean_t zp_brtwrite:1;
+ boolean_t zp_encrypt:1;
+ boolean_t zp_byteorder:1;
+ boolean_t zp_direct_write:1;
+ boolean_t zp_rewrite:1;
+ uint32_t zp_zpl_smallblk;
uint8_t zp_salt[ZIO_DATA_SALT_LEN];
uint8_t zp_iv[ZIO_DATA_IV_LEN];
uint8_t zp_mac[ZIO_DATA_MAC_LEN];
- uint32_t zp_zpl_smallblk;
- dmu_object_type_t zp_storage_type;
} zio_prop_t;
typedef struct zio_cksum_report zio_cksum_report_t;
@@ -622,7 +622,8 @@ extern zio_t *zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg,
const blkptr_t *bp, zio_flag_t flags);
extern int zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg,
- blkptr_t *new_bp, uint64_t size, boolean_t *slog);
+ blkptr_t *new_bp, uint64_t min_size, uint64_t max_size, boolean_t *slog,
+ boolean_t allow_larger);
extern void zio_flush(zio_t *zio, vdev_t *vd);
extern void zio_shrink(zio_t *zio, uint64_t size);
diff --git a/sys/contrib/openzfs/include/sys/zvol_impl.h b/sys/contrib/openzfs/include/sys/zvol_impl.h
index f3dd9f26f23c..5422e66832c0 100644
--- a/sys/contrib/openzfs/include/sys/zvol_impl.h
+++ b/sys/contrib/openzfs/include/sys/zvol_impl.h
@@ -20,7 +20,7 @@
* CDDL HEADER END
*/
/*
- * Copyright (c) 2024, Klara, Inc.
+ * Copyright (c) 2024, 2025, Klara, Inc.
*/
#ifndef _SYS_ZVOL_IMPL_H
@@ -56,6 +56,7 @@ typedef struct zvol_state {
atomic_t zv_suspend_ref; /* refcount for suspend */
krwlock_t zv_suspend_lock; /* suspend lock */
kcondvar_t zv_removing_cv; /* ready to remove minor */
+ list_node_t zv_remove_node; /* node on removal list */
struct zvol_state_os *zv_zso; /* private platform state */
boolean_t zv_threading; /* volthreading property */
} zvol_state_t;
@@ -135,7 +136,7 @@ int zvol_os_rename_minor(zvol_state_t *zv, const char *newname);
int zvol_os_create_minor(const char *name);
int zvol_os_update_volsize(zvol_state_t *zv, uint64_t volsize);
boolean_t zvol_os_is_zvol(const char *path);
-void zvol_os_clear_private(zvol_state_t *zv);
+void zvol_os_remove_minor(zvol_state_t *zv);
void zvol_os_set_disk_ro(zvol_state_t *zv, int flags);
void zvol_os_set_capacity(zvol_state_t *zv, uint64_t capacity);
diff --git a/sys/contrib/openzfs/lib/libicp/Makefile.am b/sys/contrib/openzfs/lib/libicp/Makefile.am
index ce24d13a760f..23adba10bc44 100644
--- a/sys/contrib/openzfs/lib/libicp/Makefile.am
+++ b/sys/contrib/openzfs/lib/libicp/Makefile.am
@@ -69,6 +69,7 @@ nodist_libicp_la_SOURCES += \
module/icp/asm-x86_64/aes/aes_aesni.S \
module/icp/asm-x86_64/modes/gcm_pclmulqdq.S \
module/icp/asm-x86_64/modes/aesni-gcm-x86_64.S \
+ module/icp/asm-x86_64/modes/aesni-gcm-avx2-vaes.S \
module/icp/asm-x86_64/modes/ghash-x86_64.S \
module/icp/asm-x86_64/sha2/sha256-x86_64.S \
module/icp/asm-x86_64/sha2/sha512-x86_64.S \
diff --git a/sys/contrib/openzfs/lib/libspl/include/sys/simd.h b/sys/contrib/openzfs/lib/libspl/include/sys/simd.h
index 1ef24f5a7d39..4772a5416b2e 100644
--- a/sys/contrib/openzfs/lib/libspl/include/sys/simd.h
+++ b/sys/contrib/openzfs/lib/libspl/include/sys/simd.h
@@ -102,7 +102,9 @@ typedef enum cpuid_inst_sets {
AES,
PCLMULQDQ,
MOVBE,
- SHA_NI
+ SHA_NI,
+ VAES,
+ VPCLMULQDQ
} cpuid_inst_sets_t;
/*
@@ -127,6 +129,8 @@ typedef struct cpuid_feature_desc {
#define _AES_BIT (1U << 25)
#define _PCLMULQDQ_BIT (1U << 1)
#define _MOVBE_BIT (1U << 22)
+#define _VAES_BIT (1U << 9)
+#define _VPCLMULQDQ_BIT (1U << 10)
#define _SHA_NI_BIT (1U << 29)
/*
@@ -157,6 +161,8 @@ static const cpuid_feature_desc_t cpuid_features[] = {
[PCLMULQDQ] = {1U, 0U, _PCLMULQDQ_BIT, ECX },
[MOVBE] = {1U, 0U, _MOVBE_BIT, ECX },
[SHA_NI] = {7U, 0U, _SHA_NI_BIT, EBX },
+ [VAES] = {7U, 0U, _VAES_BIT, ECX },
+ [VPCLMULQDQ] = {7U, 0U, _VPCLMULQDQ_BIT, ECX },
};
/*
@@ -231,6 +237,8 @@ CPUID_FEATURE_CHECK(aes, AES);
CPUID_FEATURE_CHECK(pclmulqdq, PCLMULQDQ);
CPUID_FEATURE_CHECK(movbe, MOVBE);
CPUID_FEATURE_CHECK(shani, SHA_NI);
+CPUID_FEATURE_CHECK(vaes, VAES);
+CPUID_FEATURE_CHECK(vpclmulqdq, VPCLMULQDQ);
/*
* Detect register set support
@@ -382,6 +390,24 @@ zfs_shani_available(void)
}
/*
+ * Check if VAES instruction is available
+ */
+static inline boolean_t
+zfs_vaes_available(void)
+{
+ return (__cpuid_has_vaes());
+}
+
+/*
+ * Check if VPCLMULQDQ instruction is available
+ */
+static inline boolean_t
+zfs_vpclmulqdq_available(void)
+{
+ return (__cpuid_has_vpclmulqdq());
+}
+
+/*
* AVX-512 family of instruction sets:
*
* AVX512F Foundation
diff --git a/sys/contrib/openzfs/lib/libzpool/kernel.c b/sys/contrib/openzfs/lib/libzpool/kernel.c
index e63153a03370..fea2f81458f9 100644
--- a/sys/contrib/openzfs/lib/libzpool/kernel.c
+++ b/sys/contrib/openzfs/lib/libzpool/kernel.c
@@ -38,6 +38,7 @@
#include <sys/processor.h>
#include <sys/rrwlock.h>
#include <sys/spa.h>
+#include <sys/spa_impl.h>
#include <sys/stat.h>
#include <sys/systeminfo.h>
#include <sys/time.h>
@@ -811,6 +812,79 @@ umem_out_of_memory(void)
return (0);
}
+static void
+spa_config_load(void)
+{
+ void *buf = NULL;
+ nvlist_t *nvlist, *child;
+ nvpair_t *nvpair;
+ char *pathname;
+ zfs_file_t *fp;
+ zfs_file_attr_t zfa;
+ uint64_t fsize;
+ int err;
+
+ /*
+ * Open the configuration file.
+ */
+ pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
+
+ (void) snprintf(pathname, MAXPATHLEN, "%s", spa_config_path);
+
+ err = zfs_file_open(pathname, O_RDONLY, 0, &fp);
+ if (err)
+ err = zfs_file_open(ZPOOL_CACHE_BOOT, O_RDONLY, 0, &fp);
+
+ kmem_free(pathname, MAXPATHLEN);
+
+ if (err)
+ return;
+
+ if (zfs_file_getattr(fp, &zfa))
+ goto out;
+
+ fsize = zfa.zfa_size;
+ buf = kmem_alloc(fsize, KM_SLEEP);
+
+ /*
+ * Read the nvlist from the file.
+ */
+ if (zfs_file_read(fp, buf, fsize, NULL) < 0)
+ goto out;
+
+ /*
+ * Unpack the nvlist.
+ */
+ if (nvlist_unpack(buf, fsize, &nvlist, KM_SLEEP) != 0)
+ goto out;
+
+ /*
+ * Iterate over all elements in the nvlist, creating a new spa_t for
+ * each one with the specified configuration.
+ */
+ mutex_enter(&spa_namespace_lock);
+ nvpair = NULL;
+ while ((nvpair = nvlist_next_nvpair(nvlist, nvpair)) != NULL) {
+ if (nvpair_type(nvpair) != DATA_TYPE_NVLIST)
+ continue;
+
+ child = fnvpair_value_nvlist(nvpair);
+
+ if (spa_lookup(nvpair_name(nvpair)) != NULL)
+ continue;
+ (void) spa_add(nvpair_name(nvpair), child, NULL);
+ }
+ mutex_exit(&spa_namespace_lock);
+
+ nvlist_free(nvlist);
+
+out:
+ if (buf != NULL)
+ kmem_free(buf, fsize);
+
+ zfs_file_close(fp);
+}
+
void
kernel_init(int mode)
{
@@ -835,6 +909,7 @@ kernel_init(int mode)
zstd_init();
spa_init((spa_mode_t)mode);
+ spa_config_load();
fletcher_4_init();
diff --git a/sys/contrib/openzfs/lib/libzutil/zutil_import.c b/sys/contrib/openzfs/lib/libzutil/zutil_import.c
index a4a6e76a1d09..08367f4c064d 100644
--- a/sys/contrib/openzfs/lib/libzutil/zutil_import.c
+++ b/sys/contrib/openzfs/lib/libzutil/zutil_import.c
@@ -1903,30 +1903,43 @@ zpool_find_config(libpc_handle_t *hdl, const char *target, nvlist_t **configp,
*sepp = '\0';
pools = zpool_search_import(hdl, args);
+ if (pools == NULL) {
+ zutil_error_aux(hdl, dgettext(TEXT_DOMAIN, "no pools found"));
+ (void) zutil_error_fmt(hdl, LPC_UNKNOWN, dgettext(TEXT_DOMAIN,
+ "failed to find config for pool '%s'"), targetdup);
+ free(targetdup);
+ return (ENOENT);
+ }
- if (pools != NULL) {
- nvpair_t *elem = NULL;
- while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
- VERIFY0(nvpair_value_nvlist(elem, &config));
- if (pool_match(config, targetdup)) {
- count++;
- if (match != NULL) {
- /* multiple matches found */
- continue;
- } else {
- match = fnvlist_dup(config);
- }
+ nvpair_t *elem = NULL;
+ while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
+ VERIFY0(nvpair_value_nvlist(elem, &config));
+ if (pool_match(config, targetdup)) {
+ count++;
+ if (match != NULL) {
+ /* multiple matches found */
+ continue;
+ } else {
+ match = fnvlist_dup(config);
}
}
- fnvlist_free(pools);
}
+ fnvlist_free(pools);
if (count == 0) {
+ zutil_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "no matching pools"));
+ (void) zutil_error_fmt(hdl, LPC_UNKNOWN, dgettext(TEXT_DOMAIN,
+ "failed to find config for pool '%s'"), targetdup);
free(targetdup);
return (ENOENT);
}
if (count > 1) {
+ zutil_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "more than one matching pool"));
+ (void) zutil_error_fmt(hdl, LPC_UNKNOWN, dgettext(TEXT_DOMAIN,
+ "failed to find config for pool '%s'"), targetdup);
free(targetdup);
fnvlist_free(match);
return (EINVAL);
diff --git a/sys/contrib/openzfs/man/man4/zfs.4 b/sys/contrib/openzfs/man/man4/zfs.4
index 4a5f9fd93f4f..5c7958667f92 100644
--- a/sys/contrib/openzfs/man/man4/zfs.4
+++ b/sys/contrib/openzfs/man/man4/zfs.4
@@ -941,10 +941,6 @@ The target number of bytes the ARC should leave as free memory on the system.
If zero, equivalent to the bigger of
.Sy 512 KiB No and Sy all_system_memory/64 .
.
-.It Sy zfs_autoimport_disable Ns = Ns Sy 1 Ns | Ns 0 Pq int
-Disable pool import at module load by ignoring the cache file
-.Pq Sy spa_config_path .
-.
.It Sy zfs_checksum_events_per_second Ns = Ns Sy 20 Ns /s Pq uint
Rate limit checksum events to this many per second.
Note that this should not be set below the ZED thresholds
diff --git a/sys/contrib/openzfs/man/man8/zdb.8 b/sys/contrib/openzfs/man/man8/zdb.8
index 3984aaac5866..0a5b6af73fdb 100644
--- a/sys/contrib/openzfs/man/man8/zdb.8
+++ b/sys/contrib/openzfs/man/man8/zdb.8
@@ -15,7 +15,7 @@
.\" Copyright (c) 2017 Lawrence Livermore National Security, LLC.
.\" Copyright (c) 2017 Intel Corporation.
.\"
-.Dd October 27, 2024
+.Dd April 23, 2025
.Dt ZDB 8
.Os
.
@@ -531,6 +531,18 @@ option, with more occurrences enabling more verbosity.
If no options are specified, all information about the named pool will be
displayed at default verbosity.
.
+.Sh EXIT STATUS
+The
+.Nm
+utility exits
+.Sy 0
+on success,
+.Sy 1
+if a fatal error occurs,
+.Sy 2
+if invalid command line options were specified, or
+.Sy 3
+if on-disk corruption was detected, but was not fatal.
.Sh EXAMPLES
.Ss Example 1 : No Display the configuration of imported pool Ar rpool
.Bd -literal
diff --git a/sys/contrib/openzfs/man/man8/zfs-send.8 b/sys/contrib/openzfs/man/man8/zfs-send.8
index c920a5a48798..f7c6b840303c 100644
--- a/sys/contrib/openzfs/man/man8/zfs-send.8
+++ b/sys/contrib/openzfs/man/man8/zfs-send.8
@@ -173,8 +173,10 @@ The receiving system must have the
feature enabled.
If the
.Sy lz4_compress
-feature is active on the sending system, then the receiving system must have
-that feature enabled as well.
+or
+.Sy zstd_compress
+features are active on the sending system, then the receiving system must have
+the corresponding features enabled as well.
Datasets that are sent with this flag may not be
received as an encrypted dataset, since encrypted datasets cannot use the
.Sy embedded_data
@@ -201,8 +203,10 @@ property for details
.Pc .
If the
.Sy lz4_compress
-feature is active on the sending system, then the receiving system must have
-that feature enabled as well.
+or
+.Sy zstd_compress
+features are active on the sending system, then the receiving system must have
+the corresponding features enabled as well.
If the
.Sy large_blocks
feature is enabled on the sending system but the
@@ -357,8 +361,10 @@ property for details
.Pc .
If the
.Sy lz4_compress
-feature is active on the sending system, then the receiving system must have
-that feature enabled as well.
+or
+.Sy zstd_compress
+features are active on the sending system, then the receiving system must have
+the corresponding features enabled as well.
If the
.Sy large_blocks
feature is enabled on the sending system but the
@@ -400,8 +406,10 @@ The receiving system must have the
feature enabled.
If the
.Sy lz4_compress
-feature is active on the sending system, then the receiving system must have
-that feature enabled as well.
+or
+.Sy zstd_compress
+features are active on the sending system, then the receiving system must have
+the corresponding features enabled as well.
Datasets that are sent with this flag may not be received as an encrypted
dataset,
since encrypted datasets cannot use the
diff --git a/sys/contrib/openzfs/module/Kbuild.in b/sys/contrib/openzfs/module/Kbuild.in
index 3d6f288fa5da..362d2295e091 100644
--- a/sys/contrib/openzfs/module/Kbuild.in
+++ b/sys/contrib/openzfs/module/Kbuild.in
@@ -135,6 +135,7 @@ ICP_OBJS_X86_64 := \
asm-x86_64/sha2/sha256-x86_64.o \
asm-x86_64/sha2/sha512-x86_64.o \
asm-x86_64/modes/aesni-gcm-x86_64.o \
+ asm-x86_64/modes/aesni-gcm-avx2-vaes.o \
asm-x86_64/modes/gcm_pclmulqdq.o \
asm-x86_64/modes/ghash-x86_64.o
diff --git a/sys/contrib/openzfs/module/icp/algs/modes/gcm.c b/sys/contrib/openzfs/module/icp/algs/modes/gcm.c
index c2a982b5a376..3cfa5b8165ce 100644
--- a/sys/contrib/openzfs/module/icp/algs/modes/gcm.c
+++ b/sys/contrib/openzfs/module/icp/algs/modes/gcm.c
@@ -46,6 +46,9 @@
#define IMPL_CYCLE (UINT32_MAX-1)
#ifdef CAN_USE_GCM_ASM
#define IMPL_AVX (UINT32_MAX-2)
+#if CAN_USE_GCM_ASM >= 2
+#define IMPL_AVX2 (UINT32_MAX-3)
+#endif
#endif
#define GCM_IMPL_READ(i) (*(volatile uint32_t *) &(i))
static uint32_t icp_gcm_impl = IMPL_FASTEST;
@@ -56,17 +59,16 @@ static uint32_t user_sel_impl = IMPL_FASTEST;
boolean_t gcm_avx_can_use_movbe = B_FALSE;
/*
* Whether to use the optimized openssl gcm and ghash implementations.
- * Set to true if module parameter icp_gcm_impl == "avx".
*/
-static boolean_t gcm_use_avx = B_FALSE;
-#define GCM_IMPL_USE_AVX (*(volatile boolean_t *)&gcm_use_avx)
+static gcm_impl gcm_impl_used = GCM_IMPL_GENERIC;
+#define GCM_IMPL_USED (*(volatile gcm_impl *)&gcm_impl_used)
extern boolean_t ASMABI atomic_toggle_boolean_nv(volatile boolean_t *);
static inline boolean_t gcm_avx_will_work(void);
-static inline void gcm_set_avx(boolean_t);
-static inline boolean_t gcm_toggle_avx(void);
-static inline size_t gcm_simd_get_htab_size(boolean_t);
+static inline boolean_t gcm_avx2_will_work(void);
+static inline void gcm_use_impl(gcm_impl impl);
+static inline gcm_impl gcm_toggle_impl(void);
static int gcm_mode_encrypt_contiguous_blocks_avx(gcm_ctx_t *, char *, size_t,
crypto_data_t *, size_t);
@@ -89,7 +91,7 @@ gcm_mode_encrypt_contiguous_blocks(gcm_ctx_t *ctx, char *data, size_t length,
void (*xor_block)(uint8_t *, uint8_t *))
{
#ifdef CAN_USE_GCM_ASM
- if (ctx->gcm_use_avx == B_TRUE)
+ if (ctx->impl != GCM_IMPL_GENERIC)
return (gcm_mode_encrypt_contiguous_blocks_avx(
ctx, data, length, out, block_size));
#endif
@@ -208,7 +210,7 @@ gcm_encrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
{
(void) copy_block;
#ifdef CAN_USE_GCM_ASM
- if (ctx->gcm_use_avx == B_TRUE)
+ if (ctx->impl != GCM_IMPL_GENERIC)
return (gcm_encrypt_final_avx(ctx, out, block_size));
#endif
@@ -374,7 +376,7 @@ gcm_decrypt_final(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size,
void (*xor_block)(uint8_t *, uint8_t *))
{
#ifdef CAN_USE_GCM_ASM
- if (ctx->gcm_use_avx == B_TRUE)
+ if (ctx->impl != GCM_IMPL_GENERIC)
return (gcm_decrypt_final_avx(ctx, out, block_size));
#endif
@@ -631,23 +633,23 @@ gcm_init_ctx(gcm_ctx_t *gcm_ctx, char *param,
((aes_key_t *)gcm_ctx->gcm_keysched)->ops->needs_byteswap;
if (GCM_IMPL_READ(icp_gcm_impl) != IMPL_CYCLE) {
- gcm_ctx->gcm_use_avx = GCM_IMPL_USE_AVX;
+ gcm_ctx->impl = GCM_IMPL_USED;
} else {
/*
- * Handle the "cycle" implementation by creating avx and
- * non-avx contexts alternately.
+ * Handle the "cycle" implementation by creating different
+ * contexts, one per implementation.
*/
- gcm_ctx->gcm_use_avx = gcm_toggle_avx();
+ gcm_ctx->impl = gcm_toggle_impl();
- /* The avx impl. doesn't handle byte swapped key schedules. */
- if (gcm_ctx->gcm_use_avx == B_TRUE && needs_bswap == B_TRUE) {
- gcm_ctx->gcm_use_avx = B_FALSE;
+ /* The AVX impl. doesn't handle byte swapped key schedules. */
+ if (needs_bswap == B_TRUE) {
+ gcm_ctx->impl = GCM_IMPL_GENERIC;
}
/*
- * If this is a GCM context, use the MOVBE and the BSWAP
+ * If this is an AVX context, use the MOVBE and the BSWAP
* variants alternately.
*/
- if (gcm_ctx->gcm_use_avx == B_TRUE &&
+ if (gcm_ctx->impl == GCM_IMPL_AVX &&
zfs_movbe_available() == B_TRUE) {
(void) atomic_toggle_boolean_nv(
(volatile boolean_t *)&gcm_avx_can_use_movbe);
@@ -658,12 +660,13 @@ gcm_init_ctx(gcm_ctx_t *gcm_ctx, char *param,
* still they could be created by the aes generic implementation.
* Make sure not to use them since we'll corrupt data if we do.
*/
- if (gcm_ctx->gcm_use_avx == B_TRUE && needs_bswap == B_TRUE) {
- gcm_ctx->gcm_use_avx = B_FALSE;
+ if (gcm_ctx->impl != GCM_IMPL_GENERIC && needs_bswap == B_TRUE) {
+ gcm_ctx->impl = GCM_IMPL_GENERIC;
cmn_err_once(CE_WARN,
"ICP: Can't use the aes generic or cycle implementations "
- "in combination with the gcm avx implementation!");
+ "in combination with the gcm avx or avx2-vaes "
+ "implementation!");
cmn_err_once(CE_WARN,
"ICP: Falling back to a compatible implementation, "
"aes-gcm performance will likely be degraded.");
@@ -672,36 +675,20 @@ gcm_init_ctx(gcm_ctx_t *gcm_ctx, char *param,
"restore performance.");
}
- /* Allocate Htab memory as needed. */
- if (gcm_ctx->gcm_use_avx == B_TRUE) {
- size_t htab_len = gcm_simd_get_htab_size(gcm_ctx->gcm_use_avx);
-
- if (htab_len == 0) {
- return (CRYPTO_MECHANISM_PARAM_INVALID);
- }
- gcm_ctx->gcm_htab_len = htab_len;
- gcm_ctx->gcm_Htable =
- kmem_alloc(htab_len, KM_SLEEP);
-
- if (gcm_ctx->gcm_Htable == NULL) {
- return (CRYPTO_HOST_MEMORY);
- }
+ /*
+ * AVX implementations use Htable with sizes depending on
+ * implementation.
+ */
+ if (gcm_ctx->impl != GCM_IMPL_GENERIC) {
+ rv = gcm_init_avx(gcm_ctx, iv, iv_len, aad, aad_len,
+ block_size);
}
- /* Avx and non avx context initialization differs from here on. */
- if (gcm_ctx->gcm_use_avx == B_FALSE) {
+ else
#endif /* ifdef CAN_USE_GCM_ASM */
- if (gcm_init(gcm_ctx, iv, iv_len, aad, aad_len, block_size,
- encrypt_block, copy_block, xor_block) != CRYPTO_SUCCESS) {
- rv = CRYPTO_MECHANISM_PARAM_INVALID;
- }
-#ifdef CAN_USE_GCM_ASM
- } else {
- if (gcm_init_avx(gcm_ctx, iv, iv_len, aad, aad_len,
- block_size) != CRYPTO_SUCCESS) {
- rv = CRYPTO_MECHANISM_PARAM_INVALID;
- }
+ if (gcm_init(gcm_ctx, iv, iv_len, aad, aad_len, block_size,
+ encrypt_block, copy_block, xor_block) != CRYPTO_SUCCESS) {
+ rv = CRYPTO_MECHANISM_PARAM_INVALID;
}
-#endif /* ifdef CAN_USE_GCM_ASM */
return (rv);
}
@@ -767,6 +754,9 @@ gcm_impl_get_ops(void)
break;
#ifdef CAN_USE_GCM_ASM
case IMPL_AVX:
+#if CAN_USE_GCM_ASM >= 2
+ case IMPL_AVX2:
+#endif
/*
* Make sure that we return a valid implementation while
* switching to the avx implementation since there still
@@ -828,6 +818,13 @@ gcm_impl_init(void)
* Use the avx implementation if it's available and the implementation
* hasn't changed from its default value of fastest on module load.
*/
+#if CAN_USE_GCM_ASM >= 2
+ if (gcm_avx2_will_work()) {
+ if (GCM_IMPL_READ(user_sel_impl) == IMPL_FASTEST) {
+ gcm_use_impl(GCM_IMPL_AVX2);
+ }
+ } else
+#endif
if (gcm_avx_will_work()) {
#ifdef HAVE_MOVBE
if (zfs_movbe_available() == B_TRUE) {
@@ -835,7 +832,7 @@ gcm_impl_init(void)
}
#endif
if (GCM_IMPL_READ(user_sel_impl) == IMPL_FASTEST) {
- gcm_set_avx(B_TRUE);
+ gcm_use_impl(GCM_IMPL_AVX);
}
}
#endif
@@ -852,6 +849,7 @@ static const struct {
{ "fastest", IMPL_FASTEST },
#ifdef CAN_USE_GCM_ASM
{ "avx", IMPL_AVX },
+ { "avx2-vaes", IMPL_AVX2 },
#endif
};
@@ -887,7 +885,13 @@ gcm_impl_set(const char *val)
/* Check mandatory options */
for (i = 0; i < ARRAY_SIZE(gcm_impl_opts); i++) {
#ifdef CAN_USE_GCM_ASM
+#if CAN_USE_GCM_ASM >= 2
/* Ignore avx implementation if it won't work. */
+ if (gcm_impl_opts[i].sel == IMPL_AVX2 &&
+ !gcm_avx2_will_work()) {
+ continue;
+ }
+#endif
if (gcm_impl_opts[i].sel == IMPL_AVX && !gcm_avx_will_work()) {
continue;
}
@@ -915,11 +919,17 @@ gcm_impl_set(const char *val)
* Use the avx implementation if available and the requested one is
* avx or fastest.
*/
+#if CAN_USE_GCM_ASM >= 2
+ if (gcm_avx2_will_work() == B_TRUE &&
+ (impl == IMPL_AVX2 || impl == IMPL_FASTEST)) {
+ gcm_use_impl(GCM_IMPL_AVX2);
+ } else
+#endif
if (gcm_avx_will_work() == B_TRUE &&
(impl == IMPL_AVX || impl == IMPL_FASTEST)) {
- gcm_set_avx(B_TRUE);
+ gcm_use_impl(GCM_IMPL_AVX);
} else {
- gcm_set_avx(B_FALSE);
+ gcm_use_impl(GCM_IMPL_GENERIC);
}
#endif
@@ -952,6 +962,12 @@ icp_gcm_impl_get(char *buffer, zfs_kernel_param_t *kp)
for (i = 0; i < ARRAY_SIZE(gcm_impl_opts); i++) {
#ifdef CAN_USE_GCM_ASM
/* Ignore avx implementation if it won't work. */
+#if CAN_USE_GCM_ASM >= 2
+ if (gcm_impl_opts[i].sel == IMPL_AVX2 &&
+ !gcm_avx2_will_work()) {
+ continue;
+ }
+#endif
if (gcm_impl_opts[i].sel == IMPL_AVX && !gcm_avx_will_work()) {
continue;
}
@@ -993,9 +1009,6 @@ MODULE_PARM_DESC(icp_gcm_impl, "Select gcm implementation.");
/* Clear the FPU registers since they hold sensitive internal state. */
#define clear_fpu_regs() clear_fpu_regs_avx()
-#define GHASH_AVX(ctx, in, len) \
- gcm_ghash_avx((ctx)->gcm_ghash, (const uint64_t *)(ctx)->gcm_Htable, \
- in, len)
#define gcm_incr_counter_block(ctx) gcm_incr_counter_block_by(ctx, 1)
@@ -1010,20 +1023,77 @@ MODULE_PARM_DESC(icp_gcm_impl, "Select gcm implementation.");
static uint32_t gcm_avx_chunk_size =
((32 * 1024) / GCM_AVX_MIN_DECRYPT_BYTES) * GCM_AVX_MIN_DECRYPT_BYTES;
+/*
+ * GCM definitions: uint128_t is copied from include/crypto/modes.h
+ * Avoiding u128 because it is already defined in kernel sources.
+ */
+typedef struct {
+ uint64_t hi, lo;
+} uint128_t;
+
extern void ASMABI clear_fpu_regs_avx(void);
extern void ASMABI gcm_xor_avx(const uint8_t *src, uint8_t *dst);
extern void ASMABI aes_encrypt_intel(const uint32_t rk[], int nr,
const uint32_t pt[4], uint32_t ct[4]);
extern void ASMABI gcm_init_htab_avx(uint64_t *Htable, const uint64_t H[2]);
+#if CAN_USE_GCM_ASM >= 2
+extern void ASMABI gcm_init_vpclmulqdq_avx2(uint128_t Htable[16],
+ const uint64_t H[2]);
+#endif
extern void ASMABI gcm_ghash_avx(uint64_t ghash[2], const uint64_t *Htable,
const uint8_t *in, size_t len);
+#if CAN_USE_GCM_ASM >= 2
+extern void ASMABI gcm_ghash_vpclmulqdq_avx2(uint64_t ghash[2],
+ const uint64_t *Htable, const uint8_t *in, size_t len);
+#endif
+static inline void GHASH_AVX(gcm_ctx_t *ctx, const uint8_t *in, size_t len)
+{
+ switch (ctx->impl) {
+#if CAN_USE_GCM_ASM >= 2
+ case GCM_IMPL_AVX2:
+ gcm_ghash_vpclmulqdq_avx2(ctx->gcm_ghash,
+ (const uint64_t *)ctx->gcm_Htable, in, len);
+ break;
+#endif
+
+ case GCM_IMPL_AVX:
+ gcm_ghash_avx(ctx->gcm_ghash,
+ (const uint64_t *)ctx->gcm_Htable, in, len);
+ break;
+
+ default:
+ VERIFY(B_FALSE);
+ }
+}
+typedef size_t ASMABI aesni_gcm_encrypt_impl(const uint8_t *, uint8_t *,
+ size_t, const void *, uint64_t *, const uint64_t *Htable, uint64_t *);
extern size_t ASMABI aesni_gcm_encrypt(const uint8_t *, uint8_t *, size_t,
const void *, uint64_t *, uint64_t *);
+#if CAN_USE_GCM_ASM >= 2
+extern void ASMABI aes_gcm_enc_update_vaes_avx2(const uint8_t *in,
+ uint8_t *out, size_t len, const void *key, const uint8_t ivec[16],
+ const uint128_t Htable[16], uint8_t Xi[16]);
+#endif
+typedef size_t ASMABI aesni_gcm_decrypt_impl(const uint8_t *, uint8_t *,
+ size_t, const void *, uint64_t *, const uint64_t *Htable, uint64_t *);
extern size_t ASMABI aesni_gcm_decrypt(const uint8_t *, uint8_t *, size_t,
const void *, uint64_t *, uint64_t *);
+#if CAN_USE_GCM_ASM >= 2
+extern void ASMABI aes_gcm_dec_update_vaes_avx2(const uint8_t *in,
+ uint8_t *out, size_t len, const void *key, const uint8_t ivec[16],
+ const uint128_t Htable[16], uint8_t Xi[16]);
+#endif
+
+static inline boolean_t
+gcm_avx2_will_work(void)
+{
+ return (kfpu_allowed() &&
+ zfs_avx2_available() && zfs_vaes_available() &&
+ zfs_vpclmulqdq_available());
+}
static inline boolean_t
gcm_avx_will_work(void)
@@ -1035,33 +1105,67 @@ gcm_avx_will_work(void)
}
static inline void
-gcm_set_avx(boolean_t val)
+gcm_use_impl(gcm_impl impl)
{
- if (gcm_avx_will_work() == B_TRUE) {
- atomic_swap_32(&gcm_use_avx, val);
+ switch (impl) {
+#if CAN_USE_GCM_ASM >= 2
+ case GCM_IMPL_AVX2:
+ if (gcm_avx2_will_work() == B_TRUE) {
+ atomic_swap_32(&gcm_impl_used, impl);
+ return;
+ }
+
+ zfs_fallthrough;
+#endif
+
+ case GCM_IMPL_AVX:
+ if (gcm_avx_will_work() == B_TRUE) {
+ atomic_swap_32(&gcm_impl_used, impl);
+ return;
+ }
+
+ zfs_fallthrough;
+
+ default:
+ atomic_swap_32(&gcm_impl_used, GCM_IMPL_GENERIC);
}
}
static inline boolean_t
-gcm_toggle_avx(void)
+gcm_impl_will_work(gcm_impl impl)
{
- if (gcm_avx_will_work() == B_TRUE) {
- return (atomic_toggle_boolean_nv(&GCM_IMPL_USE_AVX));
- } else {
- return (B_FALSE);
+ switch (impl) {
+#if CAN_USE_GCM_ASM >= 2
+ case GCM_IMPL_AVX2:
+ return (gcm_avx2_will_work());
+#endif
+
+ case GCM_IMPL_AVX:
+ return (gcm_avx_will_work());
+
+ default:
+ return (B_TRUE);
}
}
-static inline size_t
-gcm_simd_get_htab_size(boolean_t simd_mode)
+static inline gcm_impl
+gcm_toggle_impl(void)
{
- switch (simd_mode) {
- case B_TRUE:
- return (2 * 6 * 2 * sizeof (uint64_t));
+ gcm_impl current_impl, new_impl;
+ do { /* handle races */
+ current_impl = atomic_load_32(&gcm_impl_used);
+ new_impl = current_impl;
+ while (B_TRUE) { /* handle incompatble implementations */
+ new_impl = (new_impl + 1) % GCM_IMPL_MAX;
+ if (gcm_impl_will_work(new_impl)) {
+ break;
+ }
+ }
- default:
- return (0);
- }
+ } while (atomic_cas_32(&gcm_impl_used, current_impl, new_impl) !=
+ current_impl);
+
+ return (new_impl);
}
@@ -1077,6 +1181,50 @@ gcm_incr_counter_block_by(gcm_ctx_t *ctx, int n)
ctx->gcm_cb[1] = (ctx->gcm_cb[1] & ~counter_mask) | counter;
}
+static size_t aesni_gcm_encrypt_avx(const uint8_t *in, uint8_t *out,
+ size_t len, const void *key, uint64_t *iv, const uint64_t *Htable,
+ uint64_t *Xip)
+{
+ (void) Htable;
+ return (aesni_gcm_encrypt(in, out, len, key, iv, Xip));
+}
+
+#if CAN_USE_GCM_ASM >= 2
+// kSizeTWithoutLower4Bits is a mask that can be used to zero the lower four
+// bits of a |size_t|.
+// This is from boringssl/crypto/fipsmodule/aes/gcm.cc.inc
+static const size_t kSizeTWithoutLower4Bits = (size_t)-16;
+
+/* The following CRYPTO methods are from boringssl/crypto/internal.h */
+static inline uint32_t CRYPTO_bswap4(uint32_t x) {
+ return (__builtin_bswap32(x));
+}
+
+static inline uint32_t CRYPTO_load_u32_be(const void *in) {
+ uint32_t v;
+ memcpy(&v, in, sizeof (v));
+ return (CRYPTO_bswap4(v));
+}
+
+static inline void CRYPTO_store_u32_be(void *out, uint32_t v) {
+ v = CRYPTO_bswap4(v);
+ memcpy(out, &v, sizeof (v));
+}
+
+static size_t aesni_gcm_encrypt_avx2(const uint8_t *in, uint8_t *out,
+ size_t len, const void *key, uint64_t *iv, const uint64_t *Htable,
+ uint64_t *Xip)
+{
+ uint8_t *ivec = (uint8_t *)iv;
+ len &= kSizeTWithoutLower4Bits;
+ aes_gcm_enc_update_vaes_avx2(in, out, len, key, ivec,
+ (const uint128_t *)Htable, (uint8_t *)Xip);
+ CRYPTO_store_u32_be(&ivec[12],
+ CRYPTO_load_u32_be(&ivec[12]) + len / 16);
+ return (len);
+}
+#endif /* if CAN_USE_GCM_ASM >= 2 */
+
/*
* Encrypt multiple blocks of data in GCM mode.
* This is done in gcm_avx_chunk_size chunks, utilizing AVX assembler routines
@@ -1091,8 +1239,15 @@ gcm_mode_encrypt_contiguous_blocks_avx(gcm_ctx_t *ctx, char *data,
size_t done = 0;
uint8_t *datap = (uint8_t *)data;
size_t chunk_size = (size_t)GCM_CHUNK_SIZE_READ;
+ aesni_gcm_encrypt_impl *encrypt_blocks =
+#if CAN_USE_GCM_ASM >= 2
+ ctx->impl == GCM_IMPL_AVX2 ?
+ aesni_gcm_encrypt_avx2 :
+#endif
+ aesni_gcm_encrypt_avx;
const aes_key_t *key = ((aes_key_t *)ctx->gcm_keysched);
uint64_t *ghash = ctx->gcm_ghash;
+ uint64_t *htable = ctx->gcm_Htable;
uint64_t *cb = ctx->gcm_cb;
uint8_t *ct_buf = NULL;
uint8_t *tmp = (uint8_t *)ctx->gcm_tmp;
@@ -1156,8 +1311,8 @@ gcm_mode_encrypt_contiguous_blocks_avx(gcm_ctx_t *ctx, char *data,
/* Do the bulk encryption in chunk_size blocks. */
for (; bleft >= chunk_size; bleft -= chunk_size) {
kfpu_begin();
- done = aesni_gcm_encrypt(
- datap, ct_buf, chunk_size, key, cb, ghash);
+ done = encrypt_blocks(
+ datap, ct_buf, chunk_size, key, cb, htable, ghash);
clear_fpu_regs();
kfpu_end();
@@ -1180,7 +1335,8 @@ gcm_mode_encrypt_contiguous_blocks_avx(gcm_ctx_t *ctx, char *data,
/* Bulk encrypt the remaining data. */
kfpu_begin();
if (bleft >= GCM_AVX_MIN_ENCRYPT_BYTES) {
- done = aesni_gcm_encrypt(datap, ct_buf, bleft, key, cb, ghash);
+ done = encrypt_blocks(datap, ct_buf, bleft, key, cb, htable,
+ ghash);
if (done == 0) {
rv = CRYPTO_FAILED;
goto out;
@@ -1293,6 +1449,29 @@ gcm_encrypt_final_avx(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size)
return (CRYPTO_SUCCESS);
}
+static size_t aesni_gcm_decrypt_avx(const uint8_t *in, uint8_t *out,
+ size_t len, const void *key, uint64_t *iv, const uint64_t *Htable,
+ uint64_t *Xip)
+{
+ (void) Htable;
+ return (aesni_gcm_decrypt(in, out, len, key, iv, Xip));
+}
+
+#if CAN_USE_GCM_ASM >= 2
+static size_t aesni_gcm_decrypt_avx2(const uint8_t *in, uint8_t *out,
+ size_t len, const void *key, uint64_t *iv, const uint64_t *Htable,
+ uint64_t *Xip)
+{
+ uint8_t *ivec = (uint8_t *)iv;
+ len &= kSizeTWithoutLower4Bits;
+ aes_gcm_dec_update_vaes_avx2(in, out, len, key, ivec,
+ (const uint128_t *)Htable, (uint8_t *)Xip);
+ CRYPTO_store_u32_be(&ivec[12],
+ CRYPTO_load_u32_be(&ivec[12]) + len / 16);
+ return (len);
+}
+#endif /* if CAN_USE_GCM_ASM >= 2 */
+
/*
* Finalize decryption: We just have accumulated crypto text, so now we
* decrypt it here inplace.
@@ -1306,10 +1485,17 @@ gcm_decrypt_final_avx(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size)
B_FALSE);
size_t chunk_size = (size_t)GCM_CHUNK_SIZE_READ;
+ aesni_gcm_decrypt_impl *decrypt_blocks =
+#if CAN_USE_GCM_ASM >= 2
+ ctx->impl == GCM_IMPL_AVX2 ?
+ aesni_gcm_decrypt_avx2 :
+#endif
+ aesni_gcm_decrypt_avx;
size_t pt_len = ctx->gcm_processed_data_len - ctx->gcm_tag_len;
uint8_t *datap = ctx->gcm_pt_buf;
const aes_key_t *key = ((aes_key_t *)ctx->gcm_keysched);
uint32_t *cb = (uint32_t *)ctx->gcm_cb;
+ uint64_t *htable = ctx->gcm_Htable;
uint64_t *ghash = ctx->gcm_ghash;
uint32_t *tmp = (uint32_t *)ctx->gcm_tmp;
int rv = CRYPTO_SUCCESS;
@@ -1322,8 +1508,8 @@ gcm_decrypt_final_avx(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size)
*/
for (bleft = pt_len; bleft >= chunk_size; bleft -= chunk_size) {
kfpu_begin();
- done = aesni_gcm_decrypt(datap, datap, chunk_size,
- (const void *)key, ctx->gcm_cb, ghash);
+ done = decrypt_blocks(datap, datap, chunk_size,
+ (const void *)key, ctx->gcm_cb, htable, ghash);
clear_fpu_regs();
kfpu_end();
if (done != chunk_size) {
@@ -1334,8 +1520,8 @@ gcm_decrypt_final_avx(gcm_ctx_t *ctx, crypto_data_t *out, size_t block_size)
/* Decrypt remainder, which is less than chunk size, in one go. */
kfpu_begin();
if (bleft >= GCM_AVX_MIN_DECRYPT_BYTES) {
- done = aesni_gcm_decrypt(datap, datap, bleft,
- (const void *)key, ctx->gcm_cb, ghash);
+ done = decrypt_blocks(datap, datap, bleft,
+ (const void *)key, ctx->gcm_cb, htable, ghash);
if (done == 0) {
clear_fpu_regs();
kfpu_end();
@@ -1424,13 +1610,42 @@ gcm_init_avx(gcm_ctx_t *ctx, const uint8_t *iv, size_t iv_len,
ASSERT3S(((aes_key_t *)ctx->gcm_keysched)->ops->needs_byteswap, ==,
B_FALSE);
+ size_t htab_len = 0;
+#if CAN_USE_GCM_ASM >= 2
+ if (ctx->impl == GCM_IMPL_AVX2) {
+ /*
+ * BoringSSL's API specifies uint128_t[16] for htab; but only
+ * uint128_t[12] are used.
+ * See https://github.com/google/boringssl/blob/
+ * 813840dd094f9e9c1b00a7368aa25e656554221f1/crypto/fipsmodule/
+ * modes/asm/aes-gcm-avx2-x86_64.pl#L198-L200
+ */
+ htab_len = (2 * 8 * sizeof (uint128_t));
+ } else
+#endif /* CAN_USE_GCM_ASM >= 2 */
+ {
+ htab_len = (2 * 6 * sizeof (uint128_t));
+ }
+
+ ctx->gcm_Htable = kmem_alloc(htab_len, KM_SLEEP);
+ if (ctx->gcm_Htable == NULL) {
+ return (CRYPTO_HOST_MEMORY);
+ }
+
/* Init H (encrypt zero block) and create the initial counter block. */
memset(H, 0, sizeof (ctx->gcm_H));
kfpu_begin();
aes_encrypt_intel(keysched, aes_rounds,
(const uint32_t *)H, (uint32_t *)H);
- gcm_init_htab_avx(ctx->gcm_Htable, H);
+#if CAN_USE_GCM_ASM >= 2
+ if (ctx->impl == GCM_IMPL_AVX2) {
+ gcm_init_vpclmulqdq_avx2((uint128_t *)ctx->gcm_Htable, H);
+ } else
+#endif /* if CAN_USE_GCM_ASM >= 2 */
+ {
+ gcm_init_htab_avx(ctx->gcm_Htable, H);
+ }
if (iv_len == 12) {
memcpy(cb, iv, 12);
diff --git a/sys/contrib/openzfs/module/icp/algs/modes/modes.c b/sys/contrib/openzfs/module/icp/algs/modes/modes.c
index 343591cd9691..ef3c1806e4b6 100644
--- a/sys/contrib/openzfs/module/icp/algs/modes/modes.c
+++ b/sys/contrib/openzfs/module/icp/algs/modes/modes.c
@@ -171,7 +171,7 @@ gcm_clear_ctx(gcm_ctx_t *ctx)
explicit_memset(ctx->gcm_remainder, 0, sizeof (ctx->gcm_remainder));
explicit_memset(ctx->gcm_H, 0, sizeof (ctx->gcm_H));
#if defined(CAN_USE_GCM_ASM)
- if (ctx->gcm_use_avx == B_TRUE) {
+ if (ctx->impl != GCM_IMPL_GENERIC) {
ASSERT3P(ctx->gcm_Htable, !=, NULL);
explicit_memset(ctx->gcm_Htable, 0, ctx->gcm_htab_len);
kmem_free(ctx->gcm_Htable, ctx->gcm_htab_len);
diff --git a/sys/contrib/openzfs/module/icp/asm-x86_64/modes/THIRDPARTYLICENSE.boringssl b/sys/contrib/openzfs/module/icp/asm-x86_64/modes/THIRDPARTYLICENSE.boringssl
new file mode 100644
index 000000000000..04c03a37e0cb
--- /dev/null
+++ b/sys/contrib/openzfs/module/icp/asm-x86_64/modes/THIRDPARTYLICENSE.boringssl
@@ -0,0 +1,253 @@
+BoringSSL is a fork of OpenSSL. As such, large parts of it fall under OpenSSL
+licensing. Files that are completely new have a Google copyright and an ISC
+license. This license is reproduced at the bottom of this file.
+
+Contributors to BoringSSL are required to follow the CLA rules for Chromium:
+https://cla.developers.google.com/clas
+
+Files in third_party/ have their own licenses, as described therein. The MIT
+license, for third_party/fiat, which, unlike other third_party directories, is
+compiled into non-test libraries, is included below.
+
+The OpenSSL toolkit stays under a dual license, i.e. both the conditions of the
+OpenSSL License and the original SSLeay license apply to the toolkit. See below
+for the actual license texts. Actually both licenses are BSD-style Open Source
+licenses. In case of any license issues related to OpenSSL please contact
+openssl-core@openssl.org.
+
+The following are Google-internal bug numbers where explicit permission from
+some authors is recorded for use of their work. (This is purely for our own
+record keeping.)
+ 27287199
+ 27287880
+ 27287883
+ 263291445
+
+
+ OpenSSL License
+ ---------------
+
+/* ====================================================================
+ * Copyright (c) 1998-2011 The OpenSSL Project. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * 3. All advertising materials mentioning features or use of this
+ * software must display the following acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
+ *
+ * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
+ * endorse or promote products derived from this software without
+ * prior written permission. For written permission, please contact
+ * openssl-core@openssl.org.
+ *
+ * 5. Products derived from this software may not be called "OpenSSL"
+ * nor may "OpenSSL" appear in their names without prior written
+ * permission of the OpenSSL Project.
+ *
+ * 6. Redistributions of any form whatsoever must retain the following
+ * acknowledgment:
+ * "This product includes software developed by the OpenSSL Project
+ * for use in the OpenSSL Toolkit (http://www.openssl.org/)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
+ * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR
+ * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ * OF THE POSSIBILITY OF SUCH DAMAGE.
+ * ====================================================================
+ *
+ * This product includes cryptographic software written by Eric Young
+ * (eay@cryptsoft.com). This product includes software written by Tim
+ * Hudson (tjh@cryptsoft.com).
+ *
+ */
+
+ Original SSLeay License
+ -----------------------
+
+/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
+ * All rights reserved.
+ *
+ * This package is an SSL implementation written
+ * by Eric Young (eay@cryptsoft.com).
+ * The implementation was written so as to conform with Netscapes SSL.
+ *
+ * This library is free for commercial and non-commercial use as long as
+ * the following conditions are aheared to. The following conditions
+ * apply to all code found in this distribution, be it the RC4, RSA,
+ * lhash, DES, etc., code; not just the SSL code. The SSL documentation
+ * included with this distribution is covered by the same copyright terms
+ * except that the holder is Tim Hudson (tjh@cryptsoft.com).
+ *
+ * Copyright remains Eric Young's, and as such any Copyright notices in
+ * the code are not to be removed.
+ * If this package is used in a product, Eric Young should be given attribution
+ * as the author of the parts of the library used.
+ * This can be in the form of a textual message at program startup or
+ * in documentation (online or textual) provided with the package.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * "This product includes cryptographic software written by
+ * Eric Young (eay@cryptsoft.com)"
+ * The word 'cryptographic' can be left out if the rouines from the library
+ * being used are not cryptographic related :-).
+ * 4. If you include any Windows specific code (or a derivative thereof) from
+ * the apps directory (application code) you must include an acknowledgement:
+ * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
+ *
+ * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * The licence and distribution terms for any publically available version or
+ * derivative of this code cannot be changed. i.e. this code cannot simply be
+ * copied and put under another distribution licence
+ * [including the GNU Public Licence.]
+ */
+
+
+ISC license used for completely new code in BoringSSL:
+
+/* Copyright 2015 The BoringSSL Authors
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */
+
+
+The code in third_party/fiat carries the MIT license:
+
+Copyright (c) 2015-2016 the fiat-crypto authors (see
+https://github.com/mit-plv/fiat-crypto/blob/master/AUTHORS).
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+Licenses for support code
+-------------------------
+
+Parts of the TLS test suite are under the Go license. This code is not included
+in BoringSSL (i.e. libcrypto and libssl) when compiled, however, so
+distributing code linked against BoringSSL does not trigger this license:
+
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+BoringSSL uses the Chromium test infrastructure to run a continuous build,
+trybots etc. The scripts which manage this, and the script for generating build
+metadata, are under the Chromium license. Distributing code linked against
+BoringSSL does not trigger this license.
+
+Copyright 2015 The Chromium Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/sys/contrib/openzfs/module/icp/asm-x86_64/modes/THIRDPARTYLICENSE.boringssl.descrip b/sys/contrib/openzfs/module/icp/asm-x86_64/modes/THIRDPARTYLICENSE.boringssl.descrip
new file mode 100644
index 000000000000..f63a67a4d2ae
--- /dev/null
+++ b/sys/contrib/openzfs/module/icp/asm-x86_64/modes/THIRDPARTYLICENSE.boringssl.descrip
@@ -0,0 +1 @@
+PORTIONS OF AES GCM and GHASH FUNCTIONALITY
diff --git a/sys/contrib/openzfs/module/icp/asm-x86_64/modes/aesni-gcm-avx2-vaes.S b/sys/contrib/openzfs/module/icp/asm-x86_64/modes/aesni-gcm-avx2-vaes.S
new file mode 100644
index 000000000000..3d1b045127e2
--- /dev/null
+++ b/sys/contrib/openzfs/module/icp/asm-x86_64/modes/aesni-gcm-avx2-vaes.S
@@ -0,0 +1,1323 @@
+// SPDX-License-Identifier: Apache-2.0
+// This file is generated from a similarly-named Perl script in the BoringSSL
+// source tree. Do not edit by hand.
+
+#if defined(__x86_64__) && defined(HAVE_AVX) && \
+ defined(HAVE_VAES) && defined(HAVE_VPCLMULQDQ)
+
+#define _ASM
+#include <sys/asm_linkage.h>
+
+/* Windows userland links with OpenSSL */
+#if !defined (_WIN32) || defined (_KERNEL)
+
+.section .rodata
+.balign 16
+
+
+.Lbswap_mask:
+.quad 0x08090a0b0c0d0e0f, 0x0001020304050607
+
+
+
+
+
+
+
+
+.Lgfpoly:
+.quad 1, 0xc200000000000000
+
+
+.Lgfpoly_and_internal_carrybit:
+.quad 1, 0xc200000000000001
+
+.balign 32
+
+.Lctr_pattern:
+.quad 0, 0
+.quad 1, 0
+.Linc_2blocks:
+.quad 2, 0
+.quad 2, 0
+
+ENTRY_ALIGN(gcm_init_vpclmulqdq_avx2, 32)
+.cfi_startproc
+
+ENDBR
+
+
+
+
+
+ vmovdqu (%rsi),%xmm3
+ // KCF/ICP stores H in network byte order with the hi qword first
+ // so we need to swap all bytes, not the 2 qwords.
+ vmovdqu .Lbswap_mask(%rip),%xmm4
+ vpshufb %xmm4,%xmm3,%xmm3
+
+
+
+
+
+ vpshufd $0xd3,%xmm3,%xmm0
+ vpsrad $31,%xmm0,%xmm0
+ vpaddq %xmm3,%xmm3,%xmm3
+ vpand .Lgfpoly_and_internal_carrybit(%rip),%xmm0,%xmm0
+ vpxor %xmm0,%xmm3,%xmm3
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm6
+
+
+ vpclmulqdq $0x00,%xmm3,%xmm3,%xmm0
+ vpclmulqdq $0x11,%xmm3,%xmm3,%xmm5
+ vpclmulqdq $0x01,%xmm0,%xmm6,%xmm1
+ vpshufd $0x4e,%xmm0,%xmm0
+ vpxor %xmm0,%xmm1,%xmm1
+ vpclmulqdq $0x01,%xmm1,%xmm6,%xmm0
+ vpshufd $0x4e,%xmm1,%xmm1
+ vpxor %xmm1,%xmm5,%xmm5
+ vpxor %xmm0,%xmm5,%xmm5
+
+
+
+ vinserti128 $1,%xmm3,%ymm5,%ymm3
+ vinserti128 $1,%xmm5,%ymm5,%ymm5
+
+
+ vpclmulqdq $0x00,%ymm5,%ymm3,%ymm0
+ vpclmulqdq $0x01,%ymm5,%ymm3,%ymm1
+ vpclmulqdq $0x10,%ymm5,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpclmulqdq $0x01,%ymm0,%ymm6,%ymm2
+ vpshufd $0x4e,%ymm0,%ymm0
+ vpxor %ymm0,%ymm1,%ymm1
+ vpxor %ymm2,%ymm1,%ymm1
+ vpclmulqdq $0x11,%ymm5,%ymm3,%ymm4
+ vpclmulqdq $0x01,%ymm1,%ymm6,%ymm0
+ vpshufd $0x4e,%ymm1,%ymm1
+ vpxor %ymm1,%ymm4,%ymm4
+ vpxor %ymm0,%ymm4,%ymm4
+
+
+
+ vmovdqu %ymm3,96(%rdi)
+ vmovdqu %ymm4,64(%rdi)
+
+
+
+ vpunpcklqdq %ymm3,%ymm4,%ymm0
+ vpunpckhqdq %ymm3,%ymm4,%ymm1
+ vpxor %ymm1,%ymm0,%ymm0
+ vmovdqu %ymm0,128+32(%rdi)
+
+
+ vpclmulqdq $0x00,%ymm5,%ymm4,%ymm0
+ vpclmulqdq $0x01,%ymm5,%ymm4,%ymm1
+ vpclmulqdq $0x10,%ymm5,%ymm4,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpclmulqdq $0x01,%ymm0,%ymm6,%ymm2
+ vpshufd $0x4e,%ymm0,%ymm0
+ vpxor %ymm0,%ymm1,%ymm1
+ vpxor %ymm2,%ymm1,%ymm1
+ vpclmulqdq $0x11,%ymm5,%ymm4,%ymm3
+ vpclmulqdq $0x01,%ymm1,%ymm6,%ymm0
+ vpshufd $0x4e,%ymm1,%ymm1
+ vpxor %ymm1,%ymm3,%ymm3
+ vpxor %ymm0,%ymm3,%ymm3
+
+ vpclmulqdq $0x00,%ymm5,%ymm3,%ymm0
+ vpclmulqdq $0x01,%ymm5,%ymm3,%ymm1
+ vpclmulqdq $0x10,%ymm5,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpclmulqdq $0x01,%ymm0,%ymm6,%ymm2
+ vpshufd $0x4e,%ymm0,%ymm0
+ vpxor %ymm0,%ymm1,%ymm1
+ vpxor %ymm2,%ymm1,%ymm1
+ vpclmulqdq $0x11,%ymm5,%ymm3,%ymm4
+ vpclmulqdq $0x01,%ymm1,%ymm6,%ymm0
+ vpshufd $0x4e,%ymm1,%ymm1
+ vpxor %ymm1,%ymm4,%ymm4
+ vpxor %ymm0,%ymm4,%ymm4
+
+ vmovdqu %ymm3,32(%rdi)
+ vmovdqu %ymm4,0(%rdi)
+
+
+
+ vpunpcklqdq %ymm3,%ymm4,%ymm0
+ vpunpckhqdq %ymm3,%ymm4,%ymm1
+ vpxor %ymm1,%ymm0,%ymm0
+ vmovdqu %ymm0,128(%rdi)
+
+ vzeroupper
+ RET
+
+.cfi_endproc
+SET_SIZE(gcm_init_vpclmulqdq_avx2)
+ENTRY_ALIGN(gcm_gmult_vpclmulqdq_avx2, 32)
+.cfi_startproc
+
+ENDBR
+
+
+
+ vmovdqu (%rdi),%xmm0
+ vmovdqu .Lbswap_mask(%rip),%xmm1
+ vmovdqu 128-16(%rsi),%xmm2
+ vmovdqu .Lgfpoly(%rip),%xmm3
+ vpshufb %xmm1,%xmm0,%xmm0
+
+ vpclmulqdq $0x00,%xmm2,%xmm0,%xmm4
+ vpclmulqdq $0x01,%xmm2,%xmm0,%xmm5
+ vpclmulqdq $0x10,%xmm2,%xmm0,%xmm6
+ vpxor %xmm6,%xmm5,%xmm5
+ vpclmulqdq $0x01,%xmm4,%xmm3,%xmm6
+ vpshufd $0x4e,%xmm4,%xmm4
+ vpxor %xmm4,%xmm5,%xmm5
+ vpxor %xmm6,%xmm5,%xmm5
+ vpclmulqdq $0x11,%xmm2,%xmm0,%xmm0
+ vpclmulqdq $0x01,%xmm5,%xmm3,%xmm4
+ vpshufd $0x4e,%xmm5,%xmm5
+ vpxor %xmm5,%xmm0,%xmm0
+ vpxor %xmm4,%xmm0,%xmm0
+
+
+ vpshufb %xmm1,%xmm0,%xmm0
+ vmovdqu %xmm0,(%rdi)
+
+
+ RET
+
+.cfi_endproc
+SET_SIZE(gcm_gmult_vpclmulqdq_avx2)
+ENTRY_ALIGN(gcm_ghash_vpclmulqdq_avx2, 32)
+.cfi_startproc
+
+ENDBR
+
+
+
+
+
+
+ vmovdqu .Lbswap_mask(%rip),%xmm6
+ vmovdqu .Lgfpoly(%rip),%xmm7
+
+
+ vmovdqu (%rdi),%xmm5
+ vpshufb %xmm6,%xmm5,%xmm5
+
+
+ cmpq $32,%rcx
+ jb .Lghash_lastblock
+
+
+
+ vinserti128 $1,%xmm6,%ymm6,%ymm6
+ vinserti128 $1,%xmm7,%ymm7,%ymm7
+
+ cmpq $127,%rcx
+ jbe .Lghash_loop_1x
+
+
+ vmovdqu 128(%rsi),%ymm8
+ vmovdqu 128+32(%rsi),%ymm9
+.Lghash_loop_4x:
+
+ vmovdqu 0(%rdx),%ymm1
+ vpshufb %ymm6,%ymm1,%ymm1
+ vmovdqu 0(%rsi),%ymm2
+ vpxor %ymm5,%ymm1,%ymm1
+ vpclmulqdq $0x00,%ymm2,%ymm1,%ymm3
+ vpclmulqdq $0x11,%ymm2,%ymm1,%ymm5
+ vpunpckhqdq %ymm1,%ymm1,%ymm0
+ vpxor %ymm1,%ymm0,%ymm0
+ vpclmulqdq $0x00,%ymm8,%ymm0,%ymm4
+
+ vmovdqu 32(%rdx),%ymm1
+ vpshufb %ymm6,%ymm1,%ymm1
+ vmovdqu 32(%rsi),%ymm2
+ vpclmulqdq $0x00,%ymm2,%ymm1,%ymm0
+ vpxor %ymm0,%ymm3,%ymm3
+ vpclmulqdq $0x11,%ymm2,%ymm1,%ymm0
+ vpxor %ymm0,%ymm5,%ymm5
+ vpunpckhqdq %ymm1,%ymm1,%ymm0
+ vpxor %ymm1,%ymm0,%ymm0
+ vpclmulqdq $0x10,%ymm8,%ymm0,%ymm0
+ vpxor %ymm0,%ymm4,%ymm4
+
+ vmovdqu 64(%rdx),%ymm1
+ vpshufb %ymm6,%ymm1,%ymm1
+ vmovdqu 64(%rsi),%ymm2
+ vpclmulqdq $0x00,%ymm2,%ymm1,%ymm0
+ vpxor %ymm0,%ymm3,%ymm3
+ vpclmulqdq $0x11,%ymm2,%ymm1,%ymm0
+ vpxor %ymm0,%ymm5,%ymm5
+ vpunpckhqdq %ymm1,%ymm1,%ymm0
+ vpxor %ymm1,%ymm0,%ymm0
+ vpclmulqdq $0x00,%ymm9,%ymm0,%ymm0
+ vpxor %ymm0,%ymm4,%ymm4
+
+
+ vmovdqu 96(%rdx),%ymm1
+ vpshufb %ymm6,%ymm1,%ymm1
+ vmovdqu 96(%rsi),%ymm2
+ vpclmulqdq $0x00,%ymm2,%ymm1,%ymm0
+ vpxor %ymm0,%ymm3,%ymm3
+ vpclmulqdq $0x11,%ymm2,%ymm1,%ymm0
+ vpxor %ymm0,%ymm5,%ymm5
+ vpunpckhqdq %ymm1,%ymm1,%ymm0
+ vpxor %ymm1,%ymm0,%ymm0
+ vpclmulqdq $0x10,%ymm9,%ymm0,%ymm0
+ vpxor %ymm0,%ymm4,%ymm4
+
+ vpxor %ymm3,%ymm4,%ymm4
+ vpxor %ymm5,%ymm4,%ymm4
+
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm2
+ vpclmulqdq $0x01,%ymm3,%ymm2,%ymm0
+ vpshufd $0x4e,%ymm3,%ymm3
+ vpxor %ymm3,%ymm4,%ymm4
+ vpxor %ymm0,%ymm4,%ymm4
+
+ vpclmulqdq $0x01,%ymm4,%ymm2,%ymm0
+ vpshufd $0x4e,%ymm4,%ymm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpxor %ymm0,%ymm5,%ymm5
+ vextracti128 $1,%ymm5,%xmm0
+ vpxor %xmm0,%xmm5,%xmm5
+
+ subq $-128,%rdx
+ addq $-128,%rcx
+ cmpq $127,%rcx
+ ja .Lghash_loop_4x
+
+
+ cmpq $32,%rcx
+ jb .Lghash_loop_1x_done
+.Lghash_loop_1x:
+ vmovdqu (%rdx),%ymm0
+ vpshufb %ymm6,%ymm0,%ymm0
+ vpxor %ymm0,%ymm5,%ymm5
+ vmovdqu 128-32(%rsi),%ymm0
+ vpclmulqdq $0x00,%ymm0,%ymm5,%ymm1
+ vpclmulqdq $0x01,%ymm0,%ymm5,%ymm2
+ vpclmulqdq $0x10,%ymm0,%ymm5,%ymm3
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x01,%ymm1,%ymm7,%ymm3
+ vpshufd $0x4e,%ymm1,%ymm1
+ vpxor %ymm1,%ymm2,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x11,%ymm0,%ymm5,%ymm5
+ vpclmulqdq $0x01,%ymm2,%ymm7,%ymm1
+ vpshufd $0x4e,%ymm2,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpxor %ymm1,%ymm5,%ymm5
+
+ vextracti128 $1,%ymm5,%xmm0
+ vpxor %xmm0,%xmm5,%xmm5
+ addq $32,%rdx
+ subq $32,%rcx
+ cmpq $32,%rcx
+ jae .Lghash_loop_1x
+.Lghash_loop_1x_done:
+
+
+.Lghash_lastblock:
+ testq %rcx,%rcx
+ jz .Lghash_done
+ vmovdqu (%rdx),%xmm0
+ vpshufb %xmm6,%xmm0,%xmm0
+ vpxor %xmm0,%xmm5,%xmm5
+ vmovdqu 128-16(%rsi),%xmm0
+ vpclmulqdq $0x00,%xmm0,%xmm5,%xmm1
+ vpclmulqdq $0x01,%xmm0,%xmm5,%xmm2
+ vpclmulqdq $0x10,%xmm0,%xmm5,%xmm3
+ vpxor %xmm3,%xmm2,%xmm2
+ vpclmulqdq $0x01,%xmm1,%xmm7,%xmm3
+ vpshufd $0x4e,%xmm1,%xmm1
+ vpxor %xmm1,%xmm2,%xmm2
+ vpxor %xmm3,%xmm2,%xmm2
+ vpclmulqdq $0x11,%xmm0,%xmm5,%xmm5
+ vpclmulqdq $0x01,%xmm2,%xmm7,%xmm1
+ vpshufd $0x4e,%xmm2,%xmm2
+ vpxor %xmm2,%xmm5,%xmm5
+ vpxor %xmm1,%xmm5,%xmm5
+
+
+.Lghash_done:
+
+ vpshufb %xmm6,%xmm5,%xmm5
+ vmovdqu %xmm5,(%rdi)
+
+ vzeroupper
+ RET
+
+.cfi_endproc
+SET_SIZE(gcm_ghash_vpclmulqdq_avx2)
+ENTRY_ALIGN(aes_gcm_enc_update_vaes_avx2, 32)
+.cfi_startproc
+
+ENDBR
+ pushq %r12
+.cfi_adjust_cfa_offset 8
+.cfi_offset %r12,-16
+
+ movq 16(%rsp),%r12
+#ifdef BORINGSSL_DISPATCH_TEST
+.extern BORINGSSL_function_hit
+.hidden BORINGSSL_function_hit
+ movb $1,BORINGSSL_function_hit+6(%rip)
+#endif
+ vbroadcasti128 .Lbswap_mask(%rip),%ymm0
+
+
+
+ vmovdqu (%r12),%xmm1
+ vpshufb %xmm0,%xmm1,%xmm1
+ vbroadcasti128 (%r8),%ymm11
+ vpshufb %ymm0,%ymm11,%ymm11
+
+
+
+ movl 504(%rcx),%r10d // ICP has a larger offset for rounds.
+ leal -24(,%r10,4),%r10d // ICP uses 10,12,14 not 9,11,13 for rounds.
+
+
+
+
+ leaq 96(%rcx,%r10,4),%r11
+ vbroadcasti128 (%rcx),%ymm9
+ vbroadcasti128 (%r11),%ymm10
+
+
+ vpaddd .Lctr_pattern(%rip),%ymm11,%ymm11
+
+
+
+ cmpq $127,%rdx
+ jbe .Lcrypt_loop_4x_done__func1
+
+ vmovdqu 128(%r9),%ymm7
+ vmovdqu 128+32(%r9),%ymm8
+
+
+
+ vmovdqu .Linc_2blocks(%rip),%ymm2
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm14
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm15
+ vpaddd %ymm2,%ymm11,%ymm11
+
+
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ vpxor %ymm9,%ymm14,%ymm14
+ vpxor %ymm9,%ymm15,%ymm15
+
+ leaq 16(%rcx),%rax
+.Lvaesenc_loop_first_4_vecs__func1:
+ vbroadcasti128 (%rax),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ addq $16,%rax
+ cmpq %rax,%r11
+ jne .Lvaesenc_loop_first_4_vecs__func1
+ vpxor 0(%rdi),%ymm10,%ymm2
+ vpxor 32(%rdi),%ymm10,%ymm3
+ vpxor 64(%rdi),%ymm10,%ymm5
+ vpxor 96(%rdi),%ymm10,%ymm6
+ vaesenclast %ymm2,%ymm12,%ymm12
+ vaesenclast %ymm3,%ymm13,%ymm13
+ vaesenclast %ymm5,%ymm14,%ymm14
+ vaesenclast %ymm6,%ymm15,%ymm15
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %ymm13,32(%rsi)
+ vmovdqu %ymm14,64(%rsi)
+ vmovdqu %ymm15,96(%rsi)
+
+ subq $-128,%rdi
+ addq $-128,%rdx
+ cmpq $127,%rdx
+ jbe .Lghash_last_ciphertext_4x__func1
+.balign 16
+.Lcrypt_loop_4x__func1:
+
+
+
+
+ vmovdqu .Linc_2blocks(%rip),%ymm2
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm14
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm15
+ vpaddd %ymm2,%ymm11,%ymm11
+
+
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ vpxor %ymm9,%ymm14,%ymm14
+ vpxor %ymm9,%ymm15,%ymm15
+
+ cmpl $24,%r10d
+ jl .Laes128__func1
+ je .Laes192__func1
+
+ vbroadcasti128 -208(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vbroadcasti128 -192(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+.Laes192__func1:
+ vbroadcasti128 -176(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vbroadcasti128 -160(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+.Laes128__func1:
+ prefetcht0 512(%rdi)
+ prefetcht0 512+64(%rdi)
+
+ vmovdqu 0(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 0(%r9),%ymm4
+ vpxor %ymm1,%ymm3,%ymm3
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x00,%ymm7,%ymm2,%ymm6
+
+ vbroadcasti128 -144(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vbroadcasti128 -128(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vmovdqu 32(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 32(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x10,%ymm7,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vbroadcasti128 -112(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vmovdqu 64(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 64(%r9),%ymm4
+
+ vbroadcasti128 -96(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+
+ vbroadcasti128 -80(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x00,%ymm8,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+
+ vmovdqu 96(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+
+ vbroadcasti128 -64(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vmovdqu 96(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x10,%ymm8,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vbroadcasti128 -48(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm1,%ymm6,%ymm6
+
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm4
+ vpclmulqdq $0x01,%ymm5,%ymm4,%ymm2
+ vpshufd $0x4e,%ymm5,%ymm5
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vbroadcasti128 -32(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vpclmulqdq $0x01,%ymm6,%ymm4,%ymm2
+ vpshufd $0x4e,%ymm6,%ymm6
+ vpxor %ymm6,%ymm1,%ymm1
+ vpxor %ymm2,%ymm1,%ymm1
+
+ vbroadcasti128 -16(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vextracti128 $1,%ymm1,%xmm2
+ vpxor %xmm2,%xmm1,%xmm1
+
+
+ subq $-128,%rsi
+ vpxor 0(%rdi),%ymm10,%ymm2
+ vpxor 32(%rdi),%ymm10,%ymm3
+ vpxor 64(%rdi),%ymm10,%ymm5
+ vpxor 96(%rdi),%ymm10,%ymm6
+ vaesenclast %ymm2,%ymm12,%ymm12
+ vaesenclast %ymm3,%ymm13,%ymm13
+ vaesenclast %ymm5,%ymm14,%ymm14
+ vaesenclast %ymm6,%ymm15,%ymm15
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %ymm13,32(%rsi)
+ vmovdqu %ymm14,64(%rsi)
+ vmovdqu %ymm15,96(%rsi)
+
+ subq $-128,%rdi
+
+ addq $-128,%rdx
+ cmpq $127,%rdx
+ ja .Lcrypt_loop_4x__func1
+.Lghash_last_ciphertext_4x__func1:
+
+ vmovdqu 0(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 0(%r9),%ymm4
+ vpxor %ymm1,%ymm3,%ymm3
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x00,%ymm7,%ymm2,%ymm6
+
+ vmovdqu 32(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 32(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x10,%ymm7,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vmovdqu 64(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 64(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x00,%ymm8,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+
+ vmovdqu 96(%rsi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 96(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x10,%ymm8,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm1,%ymm6,%ymm6
+
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm4
+ vpclmulqdq $0x01,%ymm5,%ymm4,%ymm2
+ vpshufd $0x4e,%ymm5,%ymm5
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vpclmulqdq $0x01,%ymm6,%ymm4,%ymm2
+ vpshufd $0x4e,%ymm6,%ymm6
+ vpxor %ymm6,%ymm1,%ymm1
+ vpxor %ymm2,%ymm1,%ymm1
+ vextracti128 $1,%ymm1,%xmm2
+ vpxor %xmm2,%xmm1,%xmm1
+
+ subq $-128,%rsi
+.Lcrypt_loop_4x_done__func1:
+
+ testq %rdx,%rdx
+ jz .Ldone__func1
+
+
+
+
+
+ leaq 128(%r9),%r8
+ subq %rdx,%r8
+
+
+ vpxor %xmm5,%xmm5,%xmm5
+ vpxor %xmm6,%xmm6,%xmm6
+ vpxor %xmm7,%xmm7,%xmm7
+
+ cmpq $64,%rdx
+ jb .Llessthan64bytes__func1
+
+
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ leaq 16(%rcx),%rax
+.Lvaesenc_loop_tail_1__func1:
+ vbroadcasti128 (%rax),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ addq $16,%rax
+ cmpq %rax,%r11
+ jne .Lvaesenc_loop_tail_1__func1
+ vaesenclast %ymm10,%ymm12,%ymm12
+ vaesenclast %ymm10,%ymm13,%ymm13
+
+
+ vmovdqu 0(%rdi),%ymm2
+ vmovdqu 32(%rdi),%ymm3
+ vpxor %ymm2,%ymm12,%ymm12
+ vpxor %ymm3,%ymm13,%ymm13
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %ymm13,32(%rsi)
+
+
+ vpshufb %ymm0,%ymm12,%ymm12
+ vpshufb %ymm0,%ymm13,%ymm13
+ vpxor %ymm1,%ymm12,%ymm12
+ vmovdqu (%r8),%ymm2
+ vmovdqu 32(%r8),%ymm3
+ vpclmulqdq $0x00,%ymm2,%ymm12,%ymm5
+ vpclmulqdq $0x01,%ymm2,%ymm12,%ymm6
+ vpclmulqdq $0x10,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%ymm2,%ymm12,%ymm7
+ vpclmulqdq $0x00,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpclmulqdq $0x01,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x10,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm7,%ymm7
+
+ addq $64,%r8
+ addq $64,%rdi
+ addq $64,%rsi
+ subq $64,%rdx
+ jz .Lreduce__func1
+
+ vpxor %xmm1,%xmm1,%xmm1
+
+
+.Llessthan64bytes__func1:
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ leaq 16(%rcx),%rax
+.Lvaesenc_loop_tail_2__func1:
+ vbroadcasti128 (%rax),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ addq $16,%rax
+ cmpq %rax,%r11
+ jne .Lvaesenc_loop_tail_2__func1
+ vaesenclast %ymm10,%ymm12,%ymm12
+ vaesenclast %ymm10,%ymm13,%ymm13
+
+
+
+
+ cmpq $32,%rdx
+ jb .Lxor_one_block__func1
+ je .Lxor_two_blocks__func1
+
+.Lxor_three_blocks__func1:
+ vmovdqu 0(%rdi),%ymm2
+ vmovdqu 32(%rdi),%xmm3
+ vpxor %ymm2,%ymm12,%ymm12
+ vpxor %xmm3,%xmm13,%xmm13
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %xmm13,32(%rsi)
+
+ vpshufb %ymm0,%ymm12,%ymm12
+ vpshufb %xmm0,%xmm13,%xmm13
+ vpxor %ymm1,%ymm12,%ymm12
+ vmovdqu (%r8),%ymm2
+ vmovdqu 32(%r8),%xmm3
+ vpclmulqdq $0x00,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpclmulqdq $0x01,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x10,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm7,%ymm7
+ jmp .Lghash_mul_one_vec_unreduced__func1
+
+.Lxor_two_blocks__func1:
+ vmovdqu (%rdi),%ymm2
+ vpxor %ymm2,%ymm12,%ymm12
+ vmovdqu %ymm12,(%rsi)
+ vpshufb %ymm0,%ymm12,%ymm12
+ vpxor %ymm1,%ymm12,%ymm12
+ vmovdqu (%r8),%ymm2
+ jmp .Lghash_mul_one_vec_unreduced__func1
+
+.Lxor_one_block__func1:
+ vmovdqu (%rdi),%xmm2
+ vpxor %xmm2,%xmm12,%xmm12
+ vmovdqu %xmm12,(%rsi)
+ vpshufb %xmm0,%xmm12,%xmm12
+ vpxor %xmm1,%xmm12,%xmm12
+ vmovdqu (%r8),%xmm2
+
+.Lghash_mul_one_vec_unreduced__func1:
+ vpclmulqdq $0x00,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpclmulqdq $0x01,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x10,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm7,%ymm7
+
+.Lreduce__func1:
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm2
+ vpclmulqdq $0x01,%ymm5,%ymm2,%ymm3
+ vpshufd $0x4e,%ymm5,%ymm5
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm3,%ymm6,%ymm6
+ vpclmulqdq $0x01,%ymm6,%ymm2,%ymm3
+ vpshufd $0x4e,%ymm6,%ymm6
+ vpxor %ymm6,%ymm7,%ymm7
+ vpxor %ymm3,%ymm7,%ymm7
+ vextracti128 $1,%ymm7,%xmm1
+ vpxor %xmm7,%xmm1,%xmm1
+
+.Ldone__func1:
+
+ vpshufb %xmm0,%xmm1,%xmm1
+ vmovdqu %xmm1,(%r12)
+
+ vzeroupper
+ popq %r12
+.cfi_adjust_cfa_offset -8
+.cfi_restore %r12
+ RET
+
+.cfi_endproc
+SET_SIZE(aes_gcm_enc_update_vaes_avx2)
+ENTRY_ALIGN(aes_gcm_dec_update_vaes_avx2, 32)
+.cfi_startproc
+
+ENDBR
+ pushq %r12
+.cfi_adjust_cfa_offset 8
+.cfi_offset %r12,-16
+
+ movq 16(%rsp),%r12
+ vbroadcasti128 .Lbswap_mask(%rip),%ymm0
+
+
+
+ vmovdqu (%r12),%xmm1
+ vpshufb %xmm0,%xmm1,%xmm1
+ vbroadcasti128 (%r8),%ymm11
+ vpshufb %ymm0,%ymm11,%ymm11
+
+
+
+ movl 504(%rcx),%r10d // ICP has a larger offset for rounds.
+ leal -24(,%r10,4),%r10d // ICP uses 10,12,14 not 9,11,13 for rounds.
+
+
+
+
+ leaq 96(%rcx,%r10,4),%r11
+ vbroadcasti128 (%rcx),%ymm9
+ vbroadcasti128 (%r11),%ymm10
+
+
+ vpaddd .Lctr_pattern(%rip),%ymm11,%ymm11
+
+
+
+ cmpq $127,%rdx
+ jbe .Lcrypt_loop_4x_done__func2
+
+ vmovdqu 128(%r9),%ymm7
+ vmovdqu 128+32(%r9),%ymm8
+.balign 16
+.Lcrypt_loop_4x__func2:
+
+
+
+
+ vmovdqu .Linc_2blocks(%rip),%ymm2
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm14
+ vpaddd %ymm2,%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm15
+ vpaddd %ymm2,%ymm11,%ymm11
+
+
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ vpxor %ymm9,%ymm14,%ymm14
+ vpxor %ymm9,%ymm15,%ymm15
+
+ cmpl $24,%r10d
+ jl .Laes128__func2
+ je .Laes192__func2
+
+ vbroadcasti128 -208(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vbroadcasti128 -192(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+.Laes192__func2:
+ vbroadcasti128 -176(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vbroadcasti128 -160(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+.Laes128__func2:
+ prefetcht0 512(%rdi)
+ prefetcht0 512+64(%rdi)
+
+ vmovdqu 0(%rdi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 0(%r9),%ymm4
+ vpxor %ymm1,%ymm3,%ymm3
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x00,%ymm7,%ymm2,%ymm6
+
+ vbroadcasti128 -144(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vbroadcasti128 -128(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vmovdqu 32(%rdi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 32(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x10,%ymm7,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vbroadcasti128 -112(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vmovdqu 64(%rdi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+ vmovdqu 64(%r9),%ymm4
+
+ vbroadcasti128 -96(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+
+ vbroadcasti128 -80(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x00,%ymm8,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+
+ vmovdqu 96(%rdi),%ymm3
+ vpshufb %ymm0,%ymm3,%ymm3
+
+ vbroadcasti128 -64(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vmovdqu 96(%r9),%ymm4
+ vpclmulqdq $0x00,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm5,%ymm5
+ vpclmulqdq $0x11,%ymm4,%ymm3,%ymm2
+ vpxor %ymm2,%ymm1,%ymm1
+ vpunpckhqdq %ymm3,%ymm3,%ymm2
+ vpxor %ymm3,%ymm2,%ymm2
+ vpclmulqdq $0x10,%ymm8,%ymm2,%ymm2
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vbroadcasti128 -48(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm1,%ymm6,%ymm6
+
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm4
+ vpclmulqdq $0x01,%ymm5,%ymm4,%ymm2
+ vpshufd $0x4e,%ymm5,%ymm5
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm2,%ymm6,%ymm6
+
+ vbroadcasti128 -32(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+
+ vpclmulqdq $0x01,%ymm6,%ymm4,%ymm2
+ vpshufd $0x4e,%ymm6,%ymm6
+ vpxor %ymm6,%ymm1,%ymm1
+ vpxor %ymm2,%ymm1,%ymm1
+
+ vbroadcasti128 -16(%r11),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ vaesenc %ymm2,%ymm14,%ymm14
+ vaesenc %ymm2,%ymm15,%ymm15
+
+ vextracti128 $1,%ymm1,%xmm2
+ vpxor %xmm2,%xmm1,%xmm1
+
+
+
+ vpxor 0(%rdi),%ymm10,%ymm2
+ vpxor 32(%rdi),%ymm10,%ymm3
+ vpxor 64(%rdi),%ymm10,%ymm5
+ vpxor 96(%rdi),%ymm10,%ymm6
+ vaesenclast %ymm2,%ymm12,%ymm12
+ vaesenclast %ymm3,%ymm13,%ymm13
+ vaesenclast %ymm5,%ymm14,%ymm14
+ vaesenclast %ymm6,%ymm15,%ymm15
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %ymm13,32(%rsi)
+ vmovdqu %ymm14,64(%rsi)
+ vmovdqu %ymm15,96(%rsi)
+
+ subq $-128,%rdi
+ subq $-128,%rsi
+ addq $-128,%rdx
+ cmpq $127,%rdx
+ ja .Lcrypt_loop_4x__func2
+.Lcrypt_loop_4x_done__func2:
+
+ testq %rdx,%rdx
+ jz .Ldone__func2
+
+
+
+
+
+ leaq 128(%r9),%r8
+ subq %rdx,%r8
+
+
+ vpxor %xmm5,%xmm5,%xmm5
+ vpxor %xmm6,%xmm6,%xmm6
+ vpxor %xmm7,%xmm7,%xmm7
+
+ cmpq $64,%rdx
+ jb .Llessthan64bytes__func2
+
+
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ leaq 16(%rcx),%rax
+.Lvaesenc_loop_tail_1__func2:
+ vbroadcasti128 (%rax),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ addq $16,%rax
+ cmpq %rax,%r11
+ jne .Lvaesenc_loop_tail_1__func2
+ vaesenclast %ymm10,%ymm12,%ymm12
+ vaesenclast %ymm10,%ymm13,%ymm13
+
+
+ vmovdqu 0(%rdi),%ymm2
+ vmovdqu 32(%rdi),%ymm3
+ vpxor %ymm2,%ymm12,%ymm12
+ vpxor %ymm3,%ymm13,%ymm13
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %ymm13,32(%rsi)
+
+
+ vpshufb %ymm0,%ymm2,%ymm12
+ vpshufb %ymm0,%ymm3,%ymm13
+ vpxor %ymm1,%ymm12,%ymm12
+ vmovdqu (%r8),%ymm2
+ vmovdqu 32(%r8),%ymm3
+ vpclmulqdq $0x00,%ymm2,%ymm12,%ymm5
+ vpclmulqdq $0x01,%ymm2,%ymm12,%ymm6
+ vpclmulqdq $0x10,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%ymm2,%ymm12,%ymm7
+ vpclmulqdq $0x00,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpclmulqdq $0x01,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x10,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%ymm3,%ymm13,%ymm4
+ vpxor %ymm4,%ymm7,%ymm7
+
+ addq $64,%r8
+ addq $64,%rdi
+ addq $64,%rsi
+ subq $64,%rdx
+ jz .Lreduce__func2
+
+ vpxor %xmm1,%xmm1,%xmm1
+
+
+.Llessthan64bytes__func2:
+ vpshufb %ymm0,%ymm11,%ymm12
+ vpaddd .Linc_2blocks(%rip),%ymm11,%ymm11
+ vpshufb %ymm0,%ymm11,%ymm13
+ vpxor %ymm9,%ymm12,%ymm12
+ vpxor %ymm9,%ymm13,%ymm13
+ leaq 16(%rcx),%rax
+.Lvaesenc_loop_tail_2__func2:
+ vbroadcasti128 (%rax),%ymm2
+ vaesenc %ymm2,%ymm12,%ymm12
+ vaesenc %ymm2,%ymm13,%ymm13
+ addq $16,%rax
+ cmpq %rax,%r11
+ jne .Lvaesenc_loop_tail_2__func2
+ vaesenclast %ymm10,%ymm12,%ymm12
+ vaesenclast %ymm10,%ymm13,%ymm13
+
+
+
+
+ cmpq $32,%rdx
+ jb .Lxor_one_block__func2
+ je .Lxor_two_blocks__func2
+
+.Lxor_three_blocks__func2:
+ vmovdqu 0(%rdi),%ymm2
+ vmovdqu 32(%rdi),%xmm3
+ vpxor %ymm2,%ymm12,%ymm12
+ vpxor %xmm3,%xmm13,%xmm13
+ vmovdqu %ymm12,0(%rsi)
+ vmovdqu %xmm13,32(%rsi)
+
+ vpshufb %ymm0,%ymm2,%ymm12
+ vpshufb %xmm0,%xmm3,%xmm13
+ vpxor %ymm1,%ymm12,%ymm12
+ vmovdqu (%r8),%ymm2
+ vmovdqu 32(%r8),%xmm3
+ vpclmulqdq $0x00,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpclmulqdq $0x01,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x10,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%xmm3,%xmm13,%xmm4
+ vpxor %ymm4,%ymm7,%ymm7
+ jmp .Lghash_mul_one_vec_unreduced__func2
+
+.Lxor_two_blocks__func2:
+ vmovdqu (%rdi),%ymm2
+ vpxor %ymm2,%ymm12,%ymm12
+ vmovdqu %ymm12,(%rsi)
+ vpshufb %ymm0,%ymm2,%ymm12
+ vpxor %ymm1,%ymm12,%ymm12
+ vmovdqu (%r8),%ymm2
+ jmp .Lghash_mul_one_vec_unreduced__func2
+
+.Lxor_one_block__func2:
+ vmovdqu (%rdi),%xmm2
+ vpxor %xmm2,%xmm12,%xmm12
+ vmovdqu %xmm12,(%rsi)
+ vpshufb %xmm0,%xmm2,%xmm12
+ vpxor %xmm1,%xmm12,%xmm12
+ vmovdqu (%r8),%xmm2
+
+.Lghash_mul_one_vec_unreduced__func2:
+ vpclmulqdq $0x00,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm5,%ymm5
+ vpclmulqdq $0x01,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x10,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm6,%ymm6
+ vpclmulqdq $0x11,%ymm2,%ymm12,%ymm4
+ vpxor %ymm4,%ymm7,%ymm7
+
+.Lreduce__func2:
+
+ vbroadcasti128 .Lgfpoly(%rip),%ymm2
+ vpclmulqdq $0x01,%ymm5,%ymm2,%ymm3
+ vpshufd $0x4e,%ymm5,%ymm5
+ vpxor %ymm5,%ymm6,%ymm6
+ vpxor %ymm3,%ymm6,%ymm6
+ vpclmulqdq $0x01,%ymm6,%ymm2,%ymm3
+ vpshufd $0x4e,%ymm6,%ymm6
+ vpxor %ymm6,%ymm7,%ymm7
+ vpxor %ymm3,%ymm7,%ymm7
+ vextracti128 $1,%ymm7,%xmm1
+ vpxor %xmm7,%xmm1,%xmm1
+
+.Ldone__func2:
+
+ vpshufb %xmm0,%xmm1,%xmm1
+ vmovdqu %xmm1,(%r12)
+
+ vzeroupper
+ popq %r12
+.cfi_adjust_cfa_offset -8
+.cfi_restore %r12
+ RET
+
+.cfi_endproc
+SET_SIZE(aes_gcm_dec_update_vaes_avx2)
+
+#endif /* !_WIN32 || _KERNEL */
+
+/* Mark the stack non-executable. */
+#if defined(__linux__) && defined(__ELF__)
+.section .note.GNU-stack,"",%progbits
+#endif
+
+#endif /* defined(__x86_64__) && defined(HAVE_AVX) && defined(HAVE_AES) ... */
diff --git a/sys/contrib/openzfs/module/icp/include/modes/modes.h b/sys/contrib/openzfs/module/icp/include/modes/modes.h
index ca734cf4f045..de11d9eafafb 100644
--- a/sys/contrib/openzfs/module/icp/include/modes/modes.h
+++ b/sys/contrib/openzfs/module/icp/include/modes/modes.h
@@ -42,7 +42,7 @@ extern "C" {
*/
#if defined(__x86_64__) && defined(HAVE_AVX) && \
defined(HAVE_AES) && defined(HAVE_PCLMULQDQ)
-#define CAN_USE_GCM_ASM
+#define CAN_USE_GCM_ASM (HAVE_VAES && HAVE_VPCLMULQDQ ? 2 : 1)
extern boolean_t gcm_avx_can_use_movbe;
#endif
@@ -129,6 +129,15 @@ typedef struct ccm_ctx {
#define ccm_copy_to ccm_common.cc_copy_to
#define ccm_flags ccm_common.cc_flags
+#ifdef CAN_USE_GCM_ASM
+typedef enum gcm_impl {
+ GCM_IMPL_GENERIC = 0,
+ GCM_IMPL_AVX,
+ GCM_IMPL_AVX2,
+ GCM_IMPL_MAX,
+} gcm_impl;
+#endif
+
/*
* gcm_tag_len: Length of authentication tag.
*
@@ -174,7 +183,7 @@ typedef struct gcm_ctx {
uint64_t gcm_len_a_len_c[2];
uint8_t *gcm_pt_buf;
#ifdef CAN_USE_GCM_ASM
- boolean_t gcm_use_avx;
+ enum gcm_impl impl;
#endif
} gcm_ctx_t;
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/kmod_core.c b/sys/contrib/openzfs/module/os/freebsd/zfs/kmod_core.c
index c114db14a916..b218c0da8125 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/kmod_core.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/kmod_core.c
@@ -112,7 +112,6 @@ static int zfs__fini(void);
static void zfs_shutdown(void *, int);
static eventhandler_tag zfs_shutdown_event_tag;
-static eventhandler_tag zfs_mountroot_event_tag;
#define ZFS_MIN_KSTACK_PAGES 4
@@ -311,9 +310,6 @@ zfs_modevent(module_t mod, int type, void *unused __unused)
zfs_shutdown_event_tag = EVENTHANDLER_REGISTER(
shutdown_post_sync, zfs_shutdown, NULL,
SHUTDOWN_PRI_FIRST);
- zfs_mountroot_event_tag = EVENTHANDLER_REGISTER(
- mountroot, spa_boot_init, NULL,
- SI_ORDER_ANY);
}
return (err);
case MOD_UNLOAD:
@@ -322,9 +318,6 @@ zfs_modevent(module_t mod, int type, void *unused __unused)
if (zfs_shutdown_event_tag != NULL)
EVENTHANDLER_DEREGISTER(shutdown_post_sync,
zfs_shutdown_event_tag);
- if (zfs_mountroot_event_tag != NULL)
- EVENTHANDLER_DEREGISTER(mountroot,
- zfs_mountroot_event_tag);
}
return (err);
case MOD_SHUTDOWN:
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c
index 61d0bb26d1e5..a222c5de4a2a 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_ctldir.c
@@ -494,7 +494,7 @@ zfsctl_common_getattr(vnode_t *vp, vattr_t *vap)
vap->va_uid = 0;
vap->va_gid = 0;
- vap->va_rdev = 0;
+ vap->va_rdev = NODEV;
/*
* We are a purely virtual object, so we have no
* blocksize or allocated blocks.
@@ -674,6 +674,7 @@ zfsctl_root_readdir(struct vop_readdir_args *ap)
zfs_uio_t uio;
int *eofp = ap->a_eofflag;
off_t dots_offset;
+ ssize_t orig_resid;
int error;
zfs_uio_init(&uio, ap->a_uio);
@@ -688,16 +689,16 @@ zfsctl_root_readdir(struct vop_readdir_args *ap)
* count to return is 0.
*/
if (zfs_uio_offset(&uio) == 3 * sizeof (entry)) {
+ if (eofp != NULL)
+ *eofp = 1;
return (0);
}
+ orig_resid = zfs_uio_resid(&uio);
error = sfs_readdir_common(zfsvfs->z_root, ZFSCTL_INO_ROOT, ap, &uio,
&dots_offset);
- if (error != 0) {
- if (error == ENAMETOOLONG) /* ran out of destination space */
- error = 0;
- return (error);
- }
+ if (error != 0)
+ goto err;
if (zfs_uio_offset(&uio) != dots_offset)
return (SET_ERROR(EINVAL));
@@ -710,8 +711,11 @@ zfsctl_root_readdir(struct vop_readdir_args *ap)
entry.d_reclen = sizeof (entry);
error = vfs_read_dirent(ap, &entry, zfs_uio_offset(&uio));
if (error != 0) {
- if (error == ENAMETOOLONG)
- error = 0;
+err:
+ if (error == ENAMETOOLONG) {
+ error = orig_resid == zfs_uio_resid(&uio) ?
+ EINVAL : 0;
+ }
return (SET_ERROR(error));
}
if (eofp != NULL)
@@ -1056,17 +1060,21 @@ zfsctl_snapdir_readdir(struct vop_readdir_args *ap)
zfs_uio_t uio;
int *eofp = ap->a_eofflag;
off_t dots_offset;
+ ssize_t orig_resid;
int error;
zfs_uio_init(&uio, ap->a_uio);
+ orig_resid = zfs_uio_resid(&uio);
ASSERT3S(vp->v_type, ==, VDIR);
error = sfs_readdir_common(ZFSCTL_INO_ROOT, ZFSCTL_INO_SNAPDIR, ap,
&uio, &dots_offset);
if (error != 0) {
- if (error == ENAMETOOLONG) /* ran out of destination space */
- error = 0;
+ if (error == ENAMETOOLONG) { /* ran out of destination space */
+ error = orig_resid == zfs_uio_resid(&uio) ?
+ EINVAL : 0;
+ }
return (error);
}
@@ -1084,9 +1092,13 @@ zfsctl_snapdir_readdir(struct vop_readdir_args *ap)
dsl_pool_config_exit(dmu_objset_pool(zfsvfs->z_os), FTAG);
if (error != 0) {
if (error == ENOENT) {
- if (eofp != NULL)
- *eofp = 1;
- error = 0;
+ if (orig_resid == zfs_uio_resid(&uio)) {
+ error = EINVAL;
+ } else {
+ error = 0;
+ if (eofp != NULL)
+ *eofp = 1;
+ }
}
zfs_exit(zfsvfs, FTAG);
return (error);
@@ -1099,8 +1111,10 @@ zfsctl_snapdir_readdir(struct vop_readdir_args *ap)
entry.d_reclen = sizeof (entry);
error = vfs_read_dirent(ap, &entry, zfs_uio_offset(&uio));
if (error != 0) {
- if (error == ENAMETOOLONG)
- error = 0;
+ if (error == ENAMETOOLONG) {
+ error = orig_resid == zfs_uio_resid(&uio) ?
+ EINVAL : 0;
+ }
zfs_exit(zfsvfs, FTAG);
return (SET_ERROR(error));
}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c
index 1813c411b013..174141a5deab 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c
@@ -1695,6 +1695,7 @@ zfs_readdir(vnode_t *vp, zfs_uio_t *uio, cred_t *cr, int *eofp,
objset_t *os;
caddr_t outbuf;
size_t bufsize;
+ ssize_t orig_resid;
zap_cursor_t zc;
zap_attribute_t *zap;
uint_t bytes_wanted;
@@ -1735,7 +1736,7 @@ zfs_readdir(vnode_t *vp, zfs_uio_t *uio, cred_t *cr, int *eofp,
/*
* Quit if directory has been removed (posix)
*/
- if ((*eofp = zp->z_unlinked) != 0) {
+ if ((*eofp = (zp->z_unlinked != 0)) != 0) {
zfs_exit(zfsvfs, FTAG);
return (0);
}
@@ -1743,6 +1744,7 @@ zfs_readdir(vnode_t *vp, zfs_uio_t *uio, cred_t *cr, int *eofp,
error = 0;
os = zfsvfs->z_os;
offset = zfs_uio_offset(uio);
+ orig_resid = zfs_uio_resid(uio);
prefetch = zp->z_zn_prefetch;
zap = zap_attribute_long_alloc();
@@ -1922,7 +1924,7 @@ update:
kmem_free(outbuf, bufsize);
if (error == ENOENT)
- error = 0;
+ error = orig_resid == zfs_uio_resid(uio) ? EINVAL : 0;
ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
@@ -2013,7 +2015,7 @@ zfs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr)
if (vp->v_type == VBLK || vp->v_type == VCHR)
vap->va_rdev = zfs_cmpldev(rdev);
else
- vap->va_rdev = 0;
+ vap->va_rdev = NODEV;
vap->va_gen = zp->z_gen;
vap->va_flags = 0; /* FreeBSD: Reset chflags(2) flags. */
vap->va_filerev = zp->z_seq;
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c
index 265dfd55fc4d..0dd2ecd7fd8d 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zvol_os.c
@@ -31,7 +31,7 @@
* Copyright (c) 2012, 2017 by Delphix. All rights reserved.
* Copyright (c) 2013, Joyent, Inc. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
- * Copyright (c) 2024, Klara, Inc.
+ * Copyright (c) 2024, 2025, Klara, Inc.
*/
/* Portions Copyright 2011 Martin Matuska <mm@FreeBSD.org> */
@@ -196,7 +196,6 @@ DECLARE_GEOM_CLASS(zfs_zvol_class, zfs_zvol);
static int zvol_geom_open(struct g_provider *pp, int flag, int count);
static int zvol_geom_close(struct g_provider *pp, int flag, int count);
-static void zvol_geom_destroy(zvol_state_t *zv);
static int zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace);
static void zvol_geom_bio_start(struct bio *bp);
static int zvol_geom_bio_getattr(struct bio *bp);
@@ -226,25 +225,14 @@ zvol_geom_open(struct g_provider *pp, int flag, int count)
}
retry:
- rw_enter(&zvol_state_lock, ZVOL_RW_READER);
- /*
- * Obtain a copy of private under zvol_state_lock to make sure either
- * the result of zvol free code setting private to NULL is observed,
- * or the zv is protected from being freed because of the positive
- * zv_open_count.
- */
- zv = pp->private;
- if (zv == NULL) {
- rw_exit(&zvol_state_lock);
- err = SET_ERROR(ENXIO);
- goto out_locked;
- }
+ zv = atomic_load_ptr(&pp->private);
+ if (zv == NULL)
+ return (SET_ERROR(ENXIO));
mutex_enter(&zv->zv_state_lock);
if (zv->zv_zso->zso_dying || zv->zv_flags & ZVOL_REMOVING) {
- rw_exit(&zvol_state_lock);
err = SET_ERROR(ENXIO);
- goto out_zv_locked;
+ goto out_locked;
}
ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM);
@@ -257,8 +245,24 @@ retry:
drop_suspend = B_TRUE;
if (!rw_tryenter(&zv->zv_suspend_lock, ZVOL_RW_READER)) {
mutex_exit(&zv->zv_state_lock);
+
+ /*
+ * Removal may happen while the locks are down, so
+ * we can't trust zv any longer; we have to start over.
+ */
+ zv = atomic_load_ptr(&pp->private);
+ if (zv == NULL)
+ return (SET_ERROR(ENXIO));
+
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
mutex_enter(&zv->zv_state_lock);
+
+ if (zv->zv_zso->zso_dying ||
+ zv->zv_flags & ZVOL_REMOVING) {
+ err = SET_ERROR(ENXIO);
+ goto out_locked;
+ }
+
/* Check to see if zv_suspend_lock is needed. */
if (zv->zv_open_count != 0) {
rw_exit(&zv->zv_suspend_lock);
@@ -266,7 +270,6 @@ retry:
}
}
}
- rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
@@ -294,7 +297,7 @@ retry:
if (drop_namespace)
mutex_exit(&spa_namespace_lock);
if (err)
- goto out_zv_locked;
+ goto out_locked;
pp->mediasize = zv->zv_volsize;
pp->stripeoffset = 0;
pp->stripesize = zv->zv_volblocksize;
@@ -329,9 +332,8 @@ out_opened:
zvol_last_close(zv);
wakeup(zv);
}
-out_zv_locked:
- mutex_exit(&zv->zv_state_lock);
out_locked:
+ mutex_exit(&zv->zv_state_lock);
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
return (err);
@@ -345,12 +347,9 @@ zvol_geom_close(struct g_provider *pp, int flag, int count)
boolean_t drop_suspend = B_TRUE;
int new_open_count;
- rw_enter(&zvol_state_lock, ZVOL_RW_READER);
- zv = pp->private;
- if (zv == NULL) {
- rw_exit(&zvol_state_lock);
+ zv = atomic_load_ptr(&pp->private);
+ if (zv == NULL)
return (SET_ERROR(ENXIO));
- }
mutex_enter(&zv->zv_state_lock);
if (zv->zv_flags & ZVOL_EXCL) {
@@ -377,6 +376,15 @@ zvol_geom_close(struct g_provider *pp, int flag, int count)
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
mutex_enter(&zv->zv_state_lock);
+
+ /*
+ * Unlike in zvol_geom_open(), we don't check if
+ * removal started here, because we might be one of the
+ * openers that needs to be thrown out! If we're the
+ * last, we need to call zvol_last_close() below to
+ * finish cleanup. So, no special treatment for us.
+ */
+
/* Check to see if zv_suspend_lock is needed. */
new_open_count = zv->zv_open_count - count;
if (new_open_count != 0) {
@@ -387,7 +395,6 @@ zvol_geom_close(struct g_provider *pp, int flag, int count)
} else {
drop_suspend = B_FALSE;
}
- rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
@@ -408,20 +415,6 @@ zvol_geom_close(struct g_provider *pp, int flag, int count)
return (0);
}
-static void
-zvol_geom_destroy(zvol_state_t *zv)
-{
- struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
- struct g_provider *pp = zsg->zsg_provider;
-
- ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_GEOM);
-
- g_topology_assert();
-
- zsg->zsg_provider = NULL;
- g_wither_geom(pp->geom, ENXIO);
-}
-
void
zvol_wait_close(zvol_state_t *zv)
{
@@ -454,7 +447,7 @@ zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace)
("Unsupported access request to %s (acr=%d, acw=%d, ace=%d).",
pp->name, acr, acw, ace));
- if (pp->private == NULL) {
+ if (atomic_load_ptr(&pp->private) == NULL) {
if (acr <= 0 && acw <= 0 && ace <= 0)
return (0);
return (pp->error);
@@ -921,25 +914,14 @@ zvol_cdev_open(struct cdev *dev, int flags, int fmt, struct thread *td)
boolean_t drop_suspend = B_FALSE;
retry:
- rw_enter(&zvol_state_lock, ZVOL_RW_READER);
- /*
- * Obtain a copy of si_drv2 under zvol_state_lock to make sure either
- * the result of zvol free code setting si_drv2 to NULL is observed,
- * or the zv is protected from being freed because of the positive
- * zv_open_count.
- */
- zv = dev->si_drv2;
- if (zv == NULL) {
- rw_exit(&zvol_state_lock);
- err = SET_ERROR(ENXIO);
- goto out_locked;
- }
+ zv = atomic_load_ptr(&dev->si_drv2);
+ if (zv == NULL)
+ return (SET_ERROR(ENXIO));
mutex_enter(&zv->zv_state_lock);
- if (zv->zv_zso->zso_dying) {
- rw_exit(&zvol_state_lock);
+ if (zv->zv_zso->zso_dying || zv->zv_flags & ZVOL_REMOVING) {
err = SET_ERROR(ENXIO);
- goto out_zv_locked;
+ goto out_locked;
}
ASSERT3S(zv->zv_volmode, ==, ZFS_VOLMODE_DEV);
@@ -954,6 +936,13 @@ retry:
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
mutex_enter(&zv->zv_state_lock);
+
+ if (unlikely(zv->zv_flags & ZVOL_REMOVING)) {
+ /* Removal started while locks were down. */
+ err = SET_ERROR(ENXIO);
+ goto out_locked;
+ }
+
/* Check to see if zv_suspend_lock is needed. */
if (zv->zv_open_count != 0) {
rw_exit(&zv->zv_suspend_lock);
@@ -961,7 +950,6 @@ retry:
}
}
}
- rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
@@ -989,7 +977,7 @@ retry:
if (drop_namespace)
mutex_exit(&spa_namespace_lock);
if (err)
- goto out_zv_locked;
+ goto out_locked;
}
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
@@ -1016,9 +1004,8 @@ out_opened:
zvol_last_close(zv);
wakeup(zv);
}
-out_zv_locked:
- mutex_exit(&zv->zv_state_lock);
out_locked:
+ mutex_exit(&zv->zv_state_lock);
if (drop_suspend)
rw_exit(&zv->zv_suspend_lock);
return (err);
@@ -1030,12 +1017,9 @@ zvol_cdev_close(struct cdev *dev, int flags, int fmt, struct thread *td)
zvol_state_t *zv;
boolean_t drop_suspend = B_TRUE;
- rw_enter(&zvol_state_lock, ZVOL_RW_READER);
- zv = dev->si_drv2;
- if (zv == NULL) {
- rw_exit(&zvol_state_lock);
+ zv = atomic_load_ptr(&dev->si_drv2);
+ if (zv == NULL)
return (SET_ERROR(ENXIO));
- }
mutex_enter(&zv->zv_state_lock);
if (zv->zv_flags & ZVOL_EXCL) {
@@ -1060,6 +1044,15 @@ zvol_cdev_close(struct cdev *dev, int flags, int fmt, struct thread *td)
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
mutex_enter(&zv->zv_state_lock);
+
+ /*
+ * Unlike in zvol_cdev_open(), we don't check if
+ * removal started here, because we might be one of the
+ * openers that needs to be thrown out! If we're the
+ * last, we need to call zvol_last_close() below to
+ * finish cleanup. So, no special treatment for us.
+ */
+
/* Check to see if zv_suspend_lock is needed. */
if (zv->zv_open_count != 1) {
rw_exit(&zv->zv_suspend_lock);
@@ -1069,7 +1062,6 @@ zvol_cdev_close(struct cdev *dev, int flags, int fmt, struct thread *td)
} else {
drop_suspend = B_FALSE;
}
- rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
@@ -1101,7 +1093,8 @@ zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data,
int error;
boolean_t sync;
- zv = dev->si_drv2;
+ zv = atomic_load_ptr(&dev->si_drv2);
+ ASSERT3P(zv, !=, NULL);
error = 0;
KASSERT(zv->zv_open_count > 0,
@@ -1162,6 +1155,7 @@ zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data,
*(off_t *)data = 0;
break;
case DIOCGATTR: {
+ rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
spa_t *spa = dmu_objset_spa(zv->zv_objset);
struct diocgattr_arg *arg = (struct diocgattr_arg *)data;
uint64_t refd, avail, usedobjs, availobjs;
@@ -1186,6 +1180,7 @@ zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data,
arg->value.off = refd / DEV_BSIZE;
} else
error = SET_ERROR(ENOIOCTL);
+ rw_exit(&zv->zv_suspend_lock);
break;
}
case FIOSEEKHOLE:
@@ -1196,10 +1191,12 @@ zvol_cdev_ioctl(struct cdev *dev, ulong_t cmd, caddr_t data,
hole = (cmd == FIOSEEKHOLE);
noff = *off;
+ rw_enter(&zv->zv_suspend_lock, ZVOL_RW_READER);
lr = zfs_rangelock_enter(&zv->zv_rangelock, 0, UINT64_MAX,
RL_READER);
error = dmu_offset_next(zv->zv_objset, ZVOL_OBJ, hole, &noff);
zfs_rangelock_exit(lr);
+ rw_exit(&zv->zv_suspend_lock);
*off = noff;
break;
}
@@ -1400,42 +1397,65 @@ zvol_alloc(const char *name, uint64_t volsize, uint64_t volblocksize,
* Remove minor node for the specified volume.
*/
void
-zvol_os_free(zvol_state_t *zv)
+zvol_os_remove_minor(zvol_state_t *zv)
{
- ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
- ASSERT(!MUTEX_HELD(&zv->zv_state_lock));
+ ASSERT(MUTEX_HELD(&zv->zv_state_lock));
ASSERT0(zv->zv_open_count);
+ ASSERT0(atomic_read(&zv->zv_suspend_ref));
+ ASSERT(zv->zv_flags & ZVOL_REMOVING);
- ZFS_LOG(1, "ZVOL %s destroyed.", zv->zv_name);
-
- rw_destroy(&zv->zv_suspend_lock);
- zfs_rangelock_fini(&zv->zv_rangelock);
+ struct zvol_state_os *zso = zv->zv_zso;
+ zv->zv_zso = NULL;
if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
- struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
- struct g_provider *pp __maybe_unused = zsg->zsg_provider;
-
- ASSERT0P(pp->private);
+ struct zvol_state_geom *zsg = &zso->zso_geom;
+ struct g_provider *pp = zsg->zsg_provider;
+ atomic_store_ptr(&pp->private, NULL);
+ mutex_exit(&zv->zv_state_lock);
g_topology_lock();
- zvol_geom_destroy(zv);
+ g_wither_geom(pp->geom, ENXIO);
g_topology_unlock();
} else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
- struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev;
+ struct zvol_state_dev *zsd = &zso->zso_dev;
struct cdev *dev = zsd->zsd_cdev;
+ if (dev != NULL)
+ atomic_store_ptr(&dev->si_drv2, NULL);
+ mutex_exit(&zv->zv_state_lock);
+
if (dev != NULL) {
- ASSERT0P(dev->si_drv2);
destroy_dev(dev);
knlist_clear(&zsd->zsd_selinfo.si_note, 0);
knlist_destroy(&zsd->zsd_selinfo.si_note);
}
}
+ kmem_free(zso, sizeof (struct zvol_state_os));
+
+ mutex_enter(&zv->zv_state_lock);
+}
+
+void
+zvol_os_free(zvol_state_t *zv)
+{
+ ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
+ ASSERT(!MUTEX_HELD(&zv->zv_state_lock));
+ ASSERT0(zv->zv_open_count);
+ ASSERT0P(zv->zv_zso);
+
+ ASSERT0P(zv->zv_objset);
+ ASSERT0P(zv->zv_zilog);
+ ASSERT0P(zv->zv_dn);
+
+ ZFS_LOG(1, "ZVOL %s destroyed.", zv->zv_name);
+
+ rw_destroy(&zv->zv_suspend_lock);
+ zfs_rangelock_fini(&zv->zv_rangelock);
+
mutex_destroy(&zv->zv_state_lock);
cv_destroy(&zv->zv_removing_cv);
dataset_kstats_destroy(&zv->zv_kstat);
- kmem_free(zv->zv_zso, sizeof (struct zvol_state_os));
kmem_free(zv, sizeof (zvol_state_t));
zvol_minors--;
}
@@ -1538,28 +1558,6 @@ out_doi:
return (error);
}
-void
-zvol_os_clear_private(zvol_state_t *zv)
-{
- ASSERT(RW_LOCK_HELD(&zvol_state_lock));
- if (zv->zv_volmode == ZFS_VOLMODE_GEOM) {
- struct zvol_state_geom *zsg = &zv->zv_zso->zso_geom;
- struct g_provider *pp = zsg->zsg_provider;
-
- if (pp->private == NULL) /* already cleared */
- return;
-
- pp->private = NULL;
- ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
- } else if (zv->zv_volmode == ZFS_VOLMODE_DEV) {
- struct zvol_state_dev *zsd = &zv->zv_zso->zso_dev;
- struct cdev *dev = zsd->zsd_cdev;
-
- if (dev != NULL)
- dev->si_drv2 = NULL;
- }
-}
-
int
zvol_os_update_volsize(zvol_state_t *zv, uint64_t volsize)
{
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c b/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c
index a73acdad34ae..bac166fcd89e 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zvol_os.c
@@ -22,7 +22,7 @@
/*
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2024, Rob Norris <robn@despairlabs.com>
- * Copyright (c) 2024, Klara, Inc.
+ * Copyright (c) 2024, 2025, Klara, Inc.
*/
#include <sys/dataset_kstats.h>
@@ -679,28 +679,19 @@ zvol_open(struct block_device *bdev, fmode_t flag)
retry:
#endif
- rw_enter(&zvol_state_lock, RW_READER);
- /*
- * Obtain a copy of private_data under the zvol_state_lock to make
- * sure that either the result of zvol free code path setting
- * disk->private_data to NULL is observed, or zvol_os_free()
- * is not called on this zv because of the positive zv_open_count.
- */
+
#ifdef HAVE_BLK_MODE_T
- zv = disk->private_data;
+ zv = atomic_load_ptr(&disk->private_data);
#else
- zv = bdev->bd_disk->private_data;
+ zv = atomic_load_ptr(&bdev->bd_disk->private_data);
#endif
if (zv == NULL) {
- rw_exit(&zvol_state_lock);
return (-SET_ERROR(ENXIO));
}
mutex_enter(&zv->zv_state_lock);
-
if (unlikely(zv->zv_flags & ZVOL_REMOVING)) {
mutex_exit(&zv->zv_state_lock);
- rw_exit(&zvol_state_lock);
return (-SET_ERROR(ENXIO));
}
@@ -712,8 +703,28 @@ retry:
if (zv->zv_open_count == 0) {
if (!rw_tryenter(&zv->zv_suspend_lock, RW_READER)) {
mutex_exit(&zv->zv_state_lock);
+
+ /*
+ * Removal may happen while the locks are down, so
+ * we can't trust zv any longer; we have to start over.
+ */
+#ifdef HAVE_BLK_MODE_T
+ zv = atomic_load_ptr(&disk->private_data);
+#else
+ zv = atomic_load_ptr(&bdev->bd_disk->private_data);
+#endif
+ if (zv == NULL)
+ return (-SET_ERROR(ENXIO));
+
rw_enter(&zv->zv_suspend_lock, RW_READER);
mutex_enter(&zv->zv_state_lock);
+
+ if (unlikely(zv->zv_flags & ZVOL_REMOVING)) {
+ mutex_exit(&zv->zv_state_lock);
+ rw_exit(&zv->zv_suspend_lock);
+ return (-SET_ERROR(ENXIO));
+ }
+
/* check to see if zv_suspend_lock is needed */
if (zv->zv_open_count != 0) {
rw_exit(&zv->zv_suspend_lock);
@@ -724,7 +735,6 @@ retry:
drop_suspend = B_TRUE;
}
}
- rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
@@ -821,11 +831,11 @@ zvol_release(struct gendisk *disk, fmode_t unused)
#if !defined(HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_1ARG)
(void) unused;
#endif
- zvol_state_t *zv;
boolean_t drop_suspend = B_TRUE;
- rw_enter(&zvol_state_lock, RW_READER);
- zv = disk->private_data;
+ zvol_state_t *zv = atomic_load_ptr(&disk->private_data);
+ if (zv == NULL)
+ return;
mutex_enter(&zv->zv_state_lock);
ASSERT3U(zv->zv_open_count, >, 0);
@@ -839,6 +849,15 @@ zvol_release(struct gendisk *disk, fmode_t unused)
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, RW_READER);
mutex_enter(&zv->zv_state_lock);
+
+ /*
+ * Unlike in zvol_open(), we don't check if removal
+ * started here, because we might be one of the openers
+ * that needs to be thrown out! If we're the last, we
+ * need to call zvol_last_close() below to finish
+ * cleanup. So, no special treatment for us.
+ */
+
/* check to see if zv_suspend_lock is needed */
if (zv->zv_open_count != 1) {
rw_exit(&zv->zv_suspend_lock);
@@ -848,7 +867,6 @@ zvol_release(struct gendisk *disk, fmode_t unused)
} else {
drop_suspend = B_FALSE;
}
- rw_exit(&zvol_state_lock);
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
@@ -868,9 +886,10 @@ static int
zvol_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- zvol_state_t *zv = bdev->bd_disk->private_data;
int error = 0;
+ zvol_state_t *zv = atomic_load_ptr(&bdev->bd_disk->private_data);
+ ASSERT3P(zv, !=, NULL);
ASSERT3U(zv->zv_open_count, >, 0);
switch (cmd) {
@@ -923,9 +942,8 @@ zvol_check_events(struct gendisk *disk, unsigned int clearing)
{
unsigned int mask = 0;
- rw_enter(&zvol_state_lock, RW_READER);
+ zvol_state_t *zv = atomic_load_ptr(&disk->private_data);
- zvol_state_t *zv = disk->private_data;
if (zv != NULL) {
mutex_enter(&zv->zv_state_lock);
mask = zv->zv_changed ? DISK_EVENT_MEDIA_CHANGE : 0;
@@ -933,17 +951,14 @@ zvol_check_events(struct gendisk *disk, unsigned int clearing)
mutex_exit(&zv->zv_state_lock);
}
- rw_exit(&zvol_state_lock);
-
return (mask);
}
static int
zvol_revalidate_disk(struct gendisk *disk)
{
- rw_enter(&zvol_state_lock, RW_READER);
+ zvol_state_t *zv = atomic_load_ptr(&disk->private_data);
- zvol_state_t *zv = disk->private_data;
if (zv != NULL) {
mutex_enter(&zv->zv_state_lock);
set_capacity(zv->zv_zso->zvo_disk,
@@ -951,8 +966,6 @@ zvol_revalidate_disk(struct gendisk *disk)
mutex_exit(&zv->zv_state_lock);
}
- rw_exit(&zvol_state_lock);
-
return (0);
}
@@ -971,16 +984,6 @@ zvol_os_update_volsize(zvol_state_t *zv, uint64_t volsize)
return (0);
}
-void
-zvol_os_clear_private(zvol_state_t *zv)
-{
- /*
- * Cleared while holding zvol_state_lock as a writer
- * which will prevent zvol_open() from opening it.
- */
- zv->zv_zso->zvo_disk->private_data = NULL;
-}
-
/*
* Provide a simple virtual geometry for legacy compatibility. For devices
* smaller than 1 MiB a small head and sector count is used to allow very
@@ -990,9 +993,10 @@ zvol_os_clear_private(zvol_state_t *zv)
static int
zvol_getgeo(struct block_device *bdev, struct hd_geometry *geo)
{
- zvol_state_t *zv = bdev->bd_disk->private_data;
sector_t sectors;
+ zvol_state_t *zv = atomic_load_ptr(&bdev->bd_disk->private_data);
+ ASSERT3P(zv, !=, NULL);
ASSERT3U(zv->zv_open_count, >, 0);
sectors = get_capacity(zv->zv_zso->zvo_disk);
@@ -1417,53 +1421,70 @@ out_kmem:
return (ret);
}
-/*
- * Cleanup then free a zvol_state_t which was created by zvol_alloc().
- * At this time, the structure is not opened by anyone, is taken off
- * the zvol_state_list, and has its private data set to NULL.
- * The zvol_state_lock is dropped.
- *
- * This function may take many milliseconds to complete (e.g. we've seen
- * it take over 256ms), due to the calls to "blk_cleanup_queue" and
- * "del_gendisk". Thus, consumers need to be careful to account for this
- * latency when calling this function.
- */
void
-zvol_os_free(zvol_state_t *zv)
+zvol_os_remove_minor(zvol_state_t *zv)
{
-
- ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
- ASSERT(!MUTEX_HELD(&zv->zv_state_lock));
+ ASSERT(MUTEX_HELD(&zv->zv_state_lock));
ASSERT0(zv->zv_open_count);
- ASSERT0P(zv->zv_zso->zvo_disk->private_data);
+ ASSERT0(atomic_read(&zv->zv_suspend_ref));
+ ASSERT(zv->zv_flags & ZVOL_REMOVING);
- rw_destroy(&zv->zv_suspend_lock);
- zfs_rangelock_fini(&zv->zv_rangelock);
+ struct zvol_state_os *zso = zv->zv_zso;
+ zv->zv_zso = NULL;
+
+ /* Clearing private_data will make new callers return immediately. */
+ atomic_store_ptr(&zso->zvo_disk->private_data, NULL);
+
+ /*
+ * Drop the state lock before calling del_gendisk(). There may be
+ * callers waiting to acquire it, but del_gendisk() will block until
+ * they exit, which would deadlock.
+ */
+ mutex_exit(&zv->zv_state_lock);
- del_gendisk(zv->zv_zso->zvo_disk);
+ del_gendisk(zso->zvo_disk);
#if defined(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS) && \
(defined(HAVE_BLK_ALLOC_DISK) || defined(HAVE_BLK_ALLOC_DISK_2ARG))
#if defined(HAVE_BLK_CLEANUP_DISK)
- blk_cleanup_disk(zv->zv_zso->zvo_disk);
+ blk_cleanup_disk(zso->zvo_disk);
#else
- put_disk(zv->zv_zso->zvo_disk);
+ put_disk(zso->zvo_disk);
#endif
#else
- blk_cleanup_queue(zv->zv_zso->zvo_queue);
- put_disk(zv->zv_zso->zvo_disk);
+ blk_cleanup_queue(zso->zvo_queue);
+ put_disk(zso->zvo_disk);
#endif
- if (zv->zv_zso->use_blk_mq)
- blk_mq_free_tag_set(&zv->zv_zso->tag_set);
+ if (zso->use_blk_mq)
+ blk_mq_free_tag_set(&zso->tag_set);
+
+ ida_simple_remove(&zvol_ida, MINOR(zso->zvo_dev) >> ZVOL_MINOR_BITS);
- ida_simple_remove(&zvol_ida,
- MINOR(zv->zv_zso->zvo_dev) >> ZVOL_MINOR_BITS);
+ kmem_free(zso, sizeof (struct zvol_state_os));
+
+ mutex_enter(&zv->zv_state_lock);
+}
+
+void
+zvol_os_free(zvol_state_t *zv)
+{
+
+ ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
+ ASSERT(!MUTEX_HELD(&zv->zv_state_lock));
+ ASSERT0(zv->zv_open_count);
+ ASSERT0P(zv->zv_zso);
+
+ ASSERT0P(zv->zv_objset);
+ ASSERT0P(zv->zv_zilog);
+ ASSERT0P(zv->zv_dn);
+
+ rw_destroy(&zv->zv_suspend_lock);
+ zfs_rangelock_fini(&zv->zv_rangelock);
cv_destroy(&zv->zv_removing_cv);
mutex_destroy(&zv->zv_state_lock);
dataset_kstats_destroy(&zv->zv_kstat);
- kmem_free(zv->zv_zso, sizeof (struct zvol_state_os));
kmem_free(zv, sizeof (zvol_state_t));
}
diff --git a/sys/contrib/openzfs/module/zcommon/simd_stat.c b/sys/contrib/openzfs/module/zcommon/simd_stat.c
index 11e2080ff9f2..007ae9e4fbbc 100644
--- a/sys/contrib/openzfs/module/zcommon/simd_stat.c
+++ b/sys/contrib/openzfs/module/zcommon/simd_stat.c
@@ -118,6 +118,10 @@ simd_stat_kstat_data(char *buf, size_t size, void *data)
"pclmulqdq", zfs_pclmulqdq_available());
off += SIMD_STAT_PRINT(simd_stat_kstat_payload,
"movbe", zfs_movbe_available());
+ off += SIMD_STAT_PRINT(simd_stat_kstat_payload,
+ "vaes", zfs_vaes_available());
+ off += SIMD_STAT_PRINT(simd_stat_kstat_payload,
+ "vpclmulqdq", zfs_vpclmulqdq_available());
off += SIMD_STAT_PRINT(simd_stat_kstat_payload,
"osxsave", boot_cpu_has(X86_FEATURE_OSXSAVE));
diff --git a/sys/contrib/openzfs/module/zfs/dbuf.c b/sys/contrib/openzfs/module/zfs/dbuf.c
index 3d0f88b36336..7403f10d91b7 100644
--- a/sys/contrib/openzfs/module/zfs/dbuf.c
+++ b/sys/contrib/openzfs/module/zfs/dbuf.c
@@ -2557,12 +2557,13 @@ dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
/*
* Due to our use of dn_nlevels below, this can only be called
- * in open context, unless we are operating on the MOS.
- * From syncing context, dn_nlevels may be different from the
- * dn_nlevels used when dbuf was dirtied.
+ * in open context, unless we are operating on the MOS or it's
+ * a special object. From syncing context, dn_nlevels may be
+ * different from the dn_nlevels used when dbuf was dirtied.
*/
ASSERT(db->db_objset ==
dmu_objset_pool(db->db_objset)->dp_meta_objset ||
+ DMU_OBJECT_IS_SPECIAL(db->db.db_object) ||
txg != spa_syncing_txg(dmu_objset_spa(db->db_objset)));
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT0(db->db_level);
diff --git a/sys/contrib/openzfs/module/zfs/multilist.c b/sys/contrib/openzfs/module/zfs/multilist.c
index 7b85d19e19ee..46fb79269310 100644
--- a/sys/contrib/openzfs/module/zfs/multilist.c
+++ b/sys/contrib/openzfs/module/zfs/multilist.c
@@ -81,7 +81,7 @@ multilist_create_impl(multilist_t *ml, size_t size, size_t offset,
ml->ml_num_sublists = num;
ml->ml_index_func = index_func;
- ml->ml_sublists = kmem_zalloc(sizeof (multilist_sublist_t) *
+ ml->ml_sublists = vmem_zalloc(sizeof (multilist_sublist_t) *
ml->ml_num_sublists, KM_SLEEP);
ASSERT3P(ml->ml_sublists, !=, NULL);
@@ -134,7 +134,7 @@ multilist_destroy(multilist_t *ml)
}
ASSERT3P(ml->ml_sublists, !=, NULL);
- kmem_free(ml->ml_sublists,
+ vmem_free(ml->ml_sublists,
sizeof (multilist_sublist_t) * ml->ml_num_sublists);
ml->ml_num_sublists = 0;
diff --git a/sys/contrib/openzfs/module/zfs/spa_config.c b/sys/contrib/openzfs/module/zfs/spa_config.c
index 7d4d06659146..cf28955b0c50 100644
--- a/sys/contrib/openzfs/module/zfs/spa_config.c
+++ b/sys/contrib/openzfs/module/zfs/spa_config.c
@@ -48,18 +48,17 @@
/*
* Pool configuration repository.
*
- * Pool configuration is stored as a packed nvlist on the filesystem. By
- * default, all pools are stored in /etc/zfs/zpool.cache and loaded on boot
- * (when the ZFS module is loaded). Pools can also have the 'cachefile'
- * property set that allows them to be stored in an alternate location until
- * the control of external software.
+ * Pool configuration is stored as a packed nvlist on the filesystem. When
+ * pools are imported they are added to the /etc/zfs/zpool.cache file and
+ * removed from it when exported. For each cache file, we have a single nvlist
+ * which holds all the configuration information. Pools can also have the
+ * 'cachefile' property set which allows this config to be stored in an
+ * alternate location under the control of external software.
*
- * For each cache file, we have a single nvlist which holds all the
- * configuration information. When the module loads, we read this information
- * from /etc/zfs/zpool.cache and populate the SPA namespace. This namespace is
- * maintained independently in spa.c. Whenever the namespace is modified, or
- * the configuration of a pool is changed, we call spa_write_cachefile(), which
- * walks through all the active pools and writes the configuration to disk.
+ * The kernel independantly maintains an AVL tree of imported pools. See the
+ * "SPA locking" comment in spa.c. Whenever a pool configuration is modified
+ * we call spa_write_cachefile() which walks through all the active pools and
+ * writes the updated configuration to to /etc/zfs/zpool.cache file.
*/
static uint64_t spa_config_generation = 1;
@@ -69,94 +68,6 @@ static uint64_t spa_config_generation = 1;
* userland pools when doing testing.
*/
char *spa_config_path = (char *)ZPOOL_CACHE;
-#ifdef _KERNEL
-static int zfs_autoimport_disable = B_TRUE;
-#endif
-
-/*
- * Called when the module is first loaded, this routine loads the configuration
- * file into the SPA namespace. It does not actually open or load the pools; it
- * only populates the namespace.
- */
-void
-spa_config_load(void)
-{
- void *buf = NULL;
- nvlist_t *nvlist, *child;
- nvpair_t *nvpair;
- char *pathname;
- zfs_file_t *fp;
- zfs_file_attr_t zfa;
- uint64_t fsize;
- int err;
-
-#ifdef _KERNEL
- if (zfs_autoimport_disable)
- return;
-#endif
-
- /*
- * Open the configuration file.
- */
- pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
-
- (void) snprintf(pathname, MAXPATHLEN, "%s", spa_config_path);
-
- err = zfs_file_open(pathname, O_RDONLY, 0, &fp);
-
-#ifdef __FreeBSD__
- if (err)
- err = zfs_file_open(ZPOOL_CACHE_BOOT, O_RDONLY, 0, &fp);
-#endif
- kmem_free(pathname, MAXPATHLEN);
-
- if (err)
- return;
-
- if (zfs_file_getattr(fp, &zfa))
- goto out;
-
- fsize = zfa.zfa_size;
- buf = kmem_alloc(fsize, KM_SLEEP);
-
- /*
- * Read the nvlist from the file.
- */
- if (zfs_file_read(fp, buf, fsize, NULL) < 0)
- goto out;
-
- /*
- * Unpack the nvlist.
- */
- if (nvlist_unpack(buf, fsize, &nvlist, KM_SLEEP) != 0)
- goto out;
-
- /*
- * Iterate over all elements in the nvlist, creating a new spa_t for
- * each one with the specified configuration.
- */
- mutex_enter(&spa_namespace_lock);
- nvpair = NULL;
- while ((nvpair = nvlist_next_nvpair(nvlist, nvpair)) != NULL) {
- if (nvpair_type(nvpair) != DATA_TYPE_NVLIST)
- continue;
-
- child = fnvpair_value_nvlist(nvpair);
-
- if (spa_lookup(nvpair_name(nvpair)) != NULL)
- continue;
- (void) spa_add(nvpair_name(nvpair), child, NULL);
- }
- mutex_exit(&spa_namespace_lock);
-
- nvlist_free(nvlist);
-
-out:
- if (buf != NULL)
- kmem_free(buf, fsize);
-
- zfs_file_close(fp);
-}
static int
spa_config_remove(spa_config_dirent_t *dp)
@@ -623,7 +534,6 @@ spa_config_update(spa_t *spa, int what)
spa_config_update(spa, SPA_CONFIG_UPDATE_VDEVS);
}
-EXPORT_SYMBOL(spa_config_load);
EXPORT_SYMBOL(spa_all_configs);
EXPORT_SYMBOL(spa_config_set);
EXPORT_SYMBOL(spa_config_generate);
@@ -634,8 +544,3 @@ EXPORT_SYMBOL(spa_config_update);
ZFS_MODULE_PARAM(zfs_spa, spa_, config_path, STRING, ZMOD_RD,
"SPA config file (/etc/zfs/zpool.cache)");
#endif
-
-#ifdef _KERNEL
-ZFS_MODULE_PARAM(zfs, zfs_, autoimport_disable, INT, ZMOD_RW,
- "Disable pool import at module load");
-#endif
diff --git a/sys/contrib/openzfs/module/zfs/spa_misc.c b/sys/contrib/openzfs/module/zfs/spa_misc.c
index cce772eae598..dceafbc27556 100644
--- a/sys/contrib/openzfs/module/zfs/spa_misc.c
+++ b/sys/contrib/openzfs/module/zfs/spa_misc.c
@@ -2548,13 +2548,6 @@ spa_name_compare(const void *a1, const void *a2)
}
void
-spa_boot_init(void *unused)
-{
- (void) unused;
- spa_config_load();
-}
-
-void
spa_init(spa_mode_t mode)
{
mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
@@ -2607,7 +2600,6 @@ spa_init(spa_mode_t mode)
chksum_init();
zpool_prop_init();
zpool_feature_init();
- spa_config_load();
vdev_prop_init();
l2arc_start();
scan_init();
diff --git a/sys/contrib/openzfs/module/zfs/zil.c b/sys/contrib/openzfs/module/zfs/zil.c
index 31b59c55f17b..0307df55aa21 100644
--- a/sys/contrib/openzfs/module/zfs/zil.c
+++ b/sys/contrib/openzfs/module/zfs/zil.c
@@ -819,34 +819,37 @@ zil_lwb_vdev_compare(const void *x1, const void *x2)
* we choose them here and later make the block allocation match.
*/
static lwb_t *
-zil_alloc_lwb(zilog_t *zilog, int sz, blkptr_t *bp, boolean_t slog,
- uint64_t txg, lwb_state_t state)
+zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, int min_sz, int sz,
+ boolean_t slog, uint64_t txg)
{
lwb_t *lwb;
lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
+ lwb->lwb_flags = 0;
lwb->lwb_zilog = zilog;
if (bp) {
lwb->lwb_blk = *bp;
- lwb->lwb_slim = (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2);
+ if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2)
+ lwb->lwb_flags |= LWB_FLAG_SLIM;
sz = BP_GET_LSIZE(bp);
+ lwb->lwb_min_sz = sz;
} else {
BP_ZERO(&lwb->lwb_blk);
- lwb->lwb_slim = (spa_version(zilog->zl_spa) >=
- SPA_VERSION_SLIM_ZIL);
+ if (spa_version(zilog->zl_spa) >= SPA_VERSION_SLIM_ZIL)
+ lwb->lwb_flags |= LWB_FLAG_SLIM;
+ lwb->lwb_min_sz = min_sz;
}
- lwb->lwb_slog = slog;
+ if (slog)
+ lwb->lwb_flags |= LWB_FLAG_SLOG;
lwb->lwb_error = 0;
- if (lwb->lwb_slim) {
- lwb->lwb_nmax = sz;
- lwb->lwb_nused = lwb->lwb_nfilled = sizeof (zil_chain_t);
- } else {
- lwb->lwb_nmax = sz - sizeof (zil_chain_t);
- lwb->lwb_nused = lwb->lwb_nfilled = 0;
- }
+ /*
+ * Buffer allocation and capacity setup will be done in
+ * zil_lwb_write_open() when the LWB is opened for ITX assignment.
+ */
+ lwb->lwb_nmax = lwb->lwb_nused = lwb->lwb_nfilled = 0;
lwb->lwb_sz = sz;
- lwb->lwb_state = state;
- lwb->lwb_buf = zio_buf_alloc(sz);
+ lwb->lwb_buf = NULL;
+ lwb->lwb_state = LWB_STATE_NEW;
lwb->lwb_child_zio = NULL;
lwb->lwb_write_zio = NULL;
lwb->lwb_root_zio = NULL;
@@ -857,8 +860,6 @@ zil_alloc_lwb(zilog_t *zilog, int sz, blkptr_t *bp, boolean_t slog,
mutex_enter(&zilog->zl_lock);
list_insert_tail(&zilog->zl_lwb_list, lwb);
- if (state != LWB_STATE_NEW)
- zilog->zl_last_lwb_opened = lwb;
mutex_exit(&zilog->zl_lock);
return (lwb);
@@ -878,7 +879,7 @@ zil_free_lwb(zilog_t *zilog, lwb_t *lwb)
VERIFY(list_is_empty(&lwb->lwb_itxs));
VERIFY(list_is_empty(&lwb->lwb_waiters));
ASSERT(avl_is_empty(&lwb->lwb_vdev_tree));
- ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock));
+ ASSERT(!MUTEX_HELD(&lwb->lwb_lock));
/*
* Clear the zilog's field to indicate this lwb is no longer
@@ -1019,7 +1020,7 @@ zil_create(zilog_t *zilog)
}
error = zio_alloc_zil(zilog->zl_spa, zilog->zl_os, txg, &blk,
- ZIL_MIN_BLKSZ, &slog);
+ ZIL_MIN_BLKSZ, ZIL_MIN_BLKSZ, &slog, B_TRUE);
if (error == 0)
zil_init_log_chain(zilog, &blk);
}
@@ -1028,7 +1029,7 @@ zil_create(zilog_t *zilog)
* Allocate a log write block (lwb) for the first log block.
*/
if (error == 0)
- lwb = zil_alloc_lwb(zilog, 0, &blk, slog, txg, LWB_STATE_NEW);
+ lwb = zil_alloc_lwb(zilog, &blk, 0, 0, slog, txg);
/*
* If we just allocated the first log block, commit our transaction
@@ -1324,10 +1325,12 @@ zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx)
* zil_commit() is racing with spa_sync().
*/
static void
-zil_commit_waiter_skip(zil_commit_waiter_t *zcw)
+zil_commit_waiter_done(zil_commit_waiter_t *zcw, int err)
{
mutex_enter(&zcw->zcw_lock);
ASSERT3B(zcw->zcw_done, ==, B_FALSE);
+ zcw->zcw_lwb = NULL;
+ zcw->zcw_error = err;
zcw->zcw_done = B_TRUE;
cv_broadcast(&zcw->zcw_cv);
mutex_exit(&zcw->zcw_lock);
@@ -1389,7 +1392,7 @@ zil_lwb_add_block(lwb_t *lwb, const blkptr_t *bp)
if (zil_nocacheflush)
return;
- mutex_enter(&lwb->lwb_vdev_lock);
+ mutex_enter(&lwb->lwb_lock);
for (i = 0; i < ndvas; i++) {
zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
if (avl_find(t, &zvsearch, &where) == NULL) {
@@ -1398,7 +1401,7 @@ zil_lwb_add_block(lwb_t *lwb, const blkptr_t *bp)
avl_insert(t, zv, where);
}
}
- mutex_exit(&lwb->lwb_vdev_lock);
+ mutex_exit(&lwb->lwb_lock);
}
static void
@@ -1415,12 +1418,12 @@ zil_lwb_flush_defer(lwb_t *lwb, lwb_t *nlwb)
/*
* While 'lwb' is at a point in its lifetime where lwb_vdev_tree does
- * not need the protection of lwb_vdev_lock (it will only be modified
+ * not need the protection of lwb_lock (it will only be modified
* while holding zilog->zl_lock) as its writes and those of its
* children have all completed. The younger 'nlwb' may be waiting on
* future writes to additional vdevs.
*/
- mutex_enter(&nlwb->lwb_vdev_lock);
+ mutex_enter(&nlwb->lwb_lock);
/*
* Tear down the 'lwb' vdev tree, ensuring that entries which do not
* exist in 'nlwb' are moved to it, freeing any would-be duplicates.
@@ -1434,7 +1437,7 @@ zil_lwb_flush_defer(lwb_t *lwb, lwb_t *nlwb)
kmem_free(zv, sizeof (*zv));
}
}
- mutex_exit(&nlwb->lwb_vdev_lock);
+ mutex_exit(&nlwb->lwb_lock);
}
void
@@ -1491,10 +1494,6 @@ zil_lwb_flush_vdevs_done(zio_t *zio)
zil_itx_destroy(itx, 0);
while ((zcw = list_remove_head(&lwb->lwb_waiters)) != NULL) {
- mutex_enter(&zcw->zcw_lock);
-
- ASSERT3P(zcw->zcw_lwb, ==, lwb);
- zcw->zcw_lwb = NULL;
/*
* We expect any ZIO errors from child ZIOs to have been
* propagated "up" to this specific LWB's root ZIO, in
@@ -1509,14 +1508,7 @@ zil_lwb_flush_vdevs_done(zio_t *zio)
* errors not being handled correctly here. See the
* comment above the call to "zio_flush" for details.
*/
-
- zcw->zcw_zio_error = zio->io_error;
-
- ASSERT3B(zcw->zcw_done, ==, B_FALSE);
- zcw->zcw_done = B_TRUE;
- cv_broadcast(&zcw->zcw_cv);
-
- mutex_exit(&zcw->zcw_lock);
+ zil_commit_waiter_done(zcw, zio->io_error);
}
uint64_t txg = lwb->lwb_issued_txg;
@@ -1588,7 +1580,7 @@ zil_lwb_write_done(zio_t *zio)
avl_tree_t *t = &lwb->lwb_vdev_tree;
void *cookie = NULL;
zil_vdev_node_t *zv;
- lwb_t *nlwb;
+ lwb_t *nlwb = NULL;
ASSERT3S(spa_config_held(spa, SCL_STATE, RW_READER), !=, 0);
@@ -1608,9 +1600,11 @@ zil_lwb_write_done(zio_t *zio)
* its write ZIO a parent this ZIO. In such case we can not defer
* our flushes or below may be a race between the done callbacks.
*/
- nlwb = list_next(&zilog->zl_lwb_list, lwb);
- if (nlwb && nlwb->lwb_state != LWB_STATE_ISSUED)
- nlwb = NULL;
+ if (!(lwb->lwb_flags & LWB_FLAG_CRASHED)) {
+ nlwb = list_next(&zilog->zl_lwb_list, lwb);
+ if (nlwb && nlwb->lwb_state != LWB_STATE_ISSUED)
+ nlwb = NULL;
+ }
mutex_exit(&zilog->zl_lock);
if (avl_numnodes(t) == 0)
@@ -1624,12 +1618,17 @@ zil_lwb_write_done(zio_t *zio)
* written out.
*
* Additionally, we don't perform any further error handling at
- * this point (e.g. setting "zcw_zio_error" appropriately), as
- * we expect that to occur in "zil_lwb_flush_vdevs_done" (thus,
- * we expect any error seen here, to have been propagated to
- * that function).
+ * this point (e.g. setting "zcw_error" appropriately), as we
+ * expect that to occur in "zil_lwb_flush_vdevs_done" (thus, we
+ * expect any error seen here, to have been propagated to that
+ * function).
+ *
+ * Note that we treat a "crashed" LWB as though it was in error,
+ * even if it did appear to succeed, because we've already
+ * signaled error and cleaned up waiters and committers in
+ * zil_crash(); we just want to clean up and get out of here.
*/
- if (zio->io_error != 0) {
+ if (zio->io_error != 0 || (lwb->lwb_flags & LWB_FLAG_CRASHED)) {
while ((zv = avl_destroy_nodes(t, &cookie)) != NULL)
kmem_free(zv, sizeof (*zv));
return;
@@ -1742,10 +1741,26 @@ zil_lwb_write_open(zilog_t *zilog, lwb_t *lwb)
return;
}
+ mutex_enter(&lwb->lwb_lock);
mutex_enter(&zilog->zl_lock);
lwb->lwb_state = LWB_STATE_OPENED;
zilog->zl_last_lwb_opened = lwb;
mutex_exit(&zilog->zl_lock);
+ mutex_exit(&lwb->lwb_lock);
+
+ /*
+ * Allocate buffer and set up LWB capacities.
+ */
+ ASSERT0P(lwb->lwb_buf);
+ ASSERT3U(lwb->lwb_sz, >, 0);
+ lwb->lwb_buf = zio_buf_alloc(lwb->lwb_sz);
+ if (lwb->lwb_flags & LWB_FLAG_SLIM) {
+ lwb->lwb_nmax = lwb->lwb_sz;
+ lwb->lwb_nused = lwb->lwb_nfilled = sizeof (zil_chain_t);
+ } else {
+ lwb->lwb_nmax = lwb->lwb_sz - sizeof (zil_chain_t);
+ lwb->lwb_nused = lwb->lwb_nfilled = 0;
+ }
}
/*
@@ -1762,6 +1777,8 @@ static uint_t
zil_lwb_plan(zilog_t *zilog, uint64_t size, uint_t *minsize)
{
uint_t md = zilog->zl_max_block_size - sizeof (zil_chain_t);
+ uint_t waste = zil_max_waste_space(zilog);
+ waste = MAX(waste, zilog->zl_cur_max);
if (size <= md) {
/*
@@ -1772,9 +1789,10 @@ zil_lwb_plan(zilog_t *zilog, uint64_t size, uint_t *minsize)
} else if (size > 8 * md) {
/*
* Big bursts use maximum blocks. The first block size
- * is hard to predict, but it does not really matter.
+ * is hard to predict, but we need at least enough space
+ * to make reasonable progress.
*/
- *minsize = 0;
+ *minsize = waste;
return (md);
}
@@ -1787,57 +1805,52 @@ zil_lwb_plan(zilog_t *zilog, uint64_t size, uint_t *minsize)
uint_t s = size;
uint_t n = DIV_ROUND_UP(s, md - sizeof (lr_write_t));
uint_t chunk = DIV_ROUND_UP(s, n);
- uint_t waste = zil_max_waste_space(zilog);
- waste = MAX(waste, zilog->zl_cur_max);
if (chunk <= md - waste) {
*minsize = MAX(s - (md - waste) * (n - 1), waste);
return (chunk);
} else {
- *minsize = 0;
+ *minsize = waste;
return (md);
}
}
/*
* Try to predict next block size based on previous history. Make prediction
- * sufficient for 7 of 8 previous bursts. Don't try to save if the saving is
- * less then 50%, extra writes may cost more, but we don't want single spike
- * to badly affect our predictions.
+ * sufficient for 7 of 8 previous bursts, but don't try to save if the saving
+ * is less then 50%. Extra writes may cost more, but we don't want single
+ * spike to badly affect our predictions.
*/
-static uint_t
-zil_lwb_predict(zilog_t *zilog)
+static void
+zil_lwb_predict(zilog_t *zilog, uint64_t *min_predict, uint64_t *max_predict)
{
- uint_t m, o;
+ uint_t m1 = 0, m2 = 0, o;
- /* If we are in the middle of a burst, take it into account also. */
- if (zilog->zl_cur_size > 0) {
- o = zil_lwb_plan(zilog, zilog->zl_cur_size, &m);
- } else {
+ /* If we are in the middle of a burst, take it as another data point. */
+ if (zilog->zl_cur_size > 0)
+ o = zil_lwb_plan(zilog, zilog->zl_cur_size, &m1);
+ else
o = UINT_MAX;
- m = 0;
- }
- /* Find minimum optimal size. We don't need to go below that. */
- for (int i = 0; i < ZIL_BURSTS; i++)
- o = MIN(o, zilog->zl_prev_opt[i]);
-
- /* Find two biggest minimal first block sizes above the optimal. */
- uint_t m1 = MAX(m, o), m2 = o;
+ /* Find two largest minimal first block sizes. */
for (int i = 0; i < ZIL_BURSTS; i++) {
- m = zilog->zl_prev_min[i];
- if (m >= m1) {
+ uint_t cur = zilog->zl_prev_min[i];
+ if (cur >= m1) {
m2 = m1;
- m1 = m;
- } else if (m > m2) {
- m2 = m;
+ m1 = cur;
+ } else if (cur > m2) {
+ m2 = cur;
}
}
- /*
- * If second minimum size gives 50% saving -- use it. It may cost us
- * one additional write later, but the space saving is just too big.
- */
- return ((m1 < m2 * 2) ? m1 : m2);
+ /* Minimum should guarantee progress in most cases. */
+ *min_predict = (m1 < m2 * 2) ? m1 : m2;
+
+ /* Maximum doesn't need to go below the minimum optimal size. */
+ for (int i = 0; i < ZIL_BURSTS; i++)
+ o = MIN(o, zilog->zl_prev_opt[i]);
+ m1 = MAX(m1, o);
+ m2 = MAX(m2, o);
+ *max_predict = (m1 < m2 * 2) ? m1 : m2;
}
/*
@@ -1845,12 +1858,13 @@ zil_lwb_predict(zilog_t *zilog)
* Has to be called under zl_issuer_lock to chain more lwbs.
*/
static lwb_t *
-zil_lwb_write_close(zilog_t *zilog, lwb_t *lwb, lwb_state_t state)
+zil_lwb_write_close(zilog_t *zilog, lwb_t *lwb)
{
- uint64_t blksz, plan, plan2;
+ uint64_t minbs, maxbs;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED);
+ membar_producer();
lwb->lwb_state = LWB_STATE_CLOSED;
/*
@@ -1875,27 +1889,34 @@ zil_lwb_write_close(zilog_t *zilog, lwb_t *lwb, lwb_state_t state)
* Try to predict what can it be and plan for the worst case.
*/
uint_t m;
- plan = zil_lwb_plan(zilog, zilog->zl_cur_left, &m);
+ maxbs = zil_lwb_plan(zilog, zilog->zl_cur_left, &m);
+ minbs = m;
if (zilog->zl_parallel) {
- plan2 = zil_lwb_plan(zilog, zilog->zl_cur_left +
- zil_lwb_predict(zilog), &m);
- if (plan < plan2)
- plan = plan2;
+ uint64_t minp, maxp;
+ zil_lwb_predict(zilog, &minp, &maxp);
+ maxp = zil_lwb_plan(zilog, zilog->zl_cur_left + maxp,
+ &m);
+ if (maxbs < maxp)
+ maxbs = maxp;
}
} else {
/*
* The previous burst is done and we can only predict what
* will come next.
*/
- plan = zil_lwb_predict(zilog);
+ zil_lwb_predict(zilog, &minbs, &maxbs);
}
- blksz = plan + sizeof (zil_chain_t);
- blksz = P2ROUNDUP_TYPED(blksz, ZIL_MIN_BLKSZ, uint64_t);
- blksz = MIN(blksz, zilog->zl_max_block_size);
- DTRACE_PROBE3(zil__block__size, zilog_t *, zilog, uint64_t, blksz,
- uint64_t, plan);
- return (zil_alloc_lwb(zilog, blksz, NULL, 0, 0, state));
+ minbs += sizeof (zil_chain_t);
+ maxbs += sizeof (zil_chain_t);
+ minbs = P2ROUNDUP_TYPED(minbs, ZIL_MIN_BLKSZ, uint64_t);
+ maxbs = P2ROUNDUP_TYPED(maxbs, ZIL_MIN_BLKSZ, uint64_t);
+ maxbs = MIN(maxbs, zilog->zl_max_block_size);
+ minbs = MIN(minbs, maxbs);
+ DTRACE_PROBE3(zil__block__size, zilog_t *, zilog, uint64_t, minbs,
+ uint64_t, maxbs);
+
+ return (zil_alloc_lwb(zilog, NULL, minbs, maxbs, 0, 0));
}
/*
@@ -1944,14 +1965,16 @@ zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb)
mutex_exit(&zilog->zl_lock);
next_lwb:
- if (lwb->lwb_slim)
+ if (lwb->lwb_flags & LWB_FLAG_SLIM)
zilc = (zil_chain_t *)lwb->lwb_buf;
else
zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_nmax);
- int wsz = lwb->lwb_sz;
+ uint64_t alloc_size = BP_GET_LSIZE(&lwb->lwb_blk);
+ int wsz = alloc_size;
if (lwb->lwb_error == 0) {
abd_t *lwb_abd = abd_get_from_buf(lwb->lwb_buf, lwb->lwb_sz);
- if (!lwb->lwb_slog || zilog->zl_cur_size <= zil_slog_bulk)
+ if (!(lwb->lwb_flags & LWB_FLAG_SLOG) ||
+ zilog->zl_cur_size <= zil_slog_bulk)
prio = ZIO_PRIORITY_SYNC_WRITE;
else
prio = ZIO_PRIORITY_ASYNC_WRITE;
@@ -1959,16 +1982,17 @@ next_lwb:
ZB_ZIL_OBJECT, ZB_ZIL_LEVEL,
lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]);
lwb->lwb_write_zio = zio_rewrite(lwb->lwb_root_zio, spa, 0,
- &lwb->lwb_blk, lwb_abd, lwb->lwb_sz, zil_lwb_write_done,
+ &lwb->lwb_blk, lwb_abd, alloc_size, zil_lwb_write_done,
lwb, prio, ZIO_FLAG_CANFAIL, &zb);
zil_lwb_add_block(lwb, &lwb->lwb_blk);
- if (lwb->lwb_slim) {
+ if (lwb->lwb_flags & LWB_FLAG_SLIM) {
/* For Slim ZIL only write what is used. */
wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ,
int);
- ASSERT3S(wsz, <=, lwb->lwb_sz);
- zio_shrink(lwb->lwb_write_zio, wsz);
+ ASSERT3S(wsz, <=, alloc_size);
+ if (wsz < alloc_size)
+ zio_shrink(lwb->lwb_write_zio, wsz);
wsz = lwb->lwb_write_zio->io_size;
}
memset(lwb->lwb_buf + lwb->lwb_nused, 0, wsz - lwb->lwb_nused);
@@ -2004,13 +2028,53 @@ next_lwb:
BP_ZERO(bp);
error = lwb->lwb_error;
if (error == 0) {
- error = zio_alloc_zil(spa, zilog->zl_os, txg, bp, nlwb->lwb_sz,
- &slog);
+ /*
+ * Allocation flexibility depends on LWB state:
+ * if NEW: allow range allocation and larger sizes;
+ * if OPENED: use fixed predetermined allocation size;
+ * if CLOSED + Slim: allocate precisely for actual usage.
+ */
+ boolean_t flexible = (nlwb->lwb_state == LWB_STATE_NEW);
+ if (flexible) {
+ /* We need to prevent opening till we update lwb_sz. */
+ mutex_enter(&nlwb->lwb_lock);
+ flexible = (nlwb->lwb_state == LWB_STATE_NEW);
+ if (!flexible)
+ mutex_exit(&nlwb->lwb_lock); /* We lost. */
+ }
+ boolean_t closed_slim = (nlwb->lwb_state == LWB_STATE_CLOSED &&
+ (lwb->lwb_flags & LWB_FLAG_SLIM));
+
+ uint64_t min_size, max_size;
+ if (closed_slim) {
+ /* This transition is racy, but only one way. */
+ membar_consumer();
+ min_size = max_size = P2ROUNDUP_TYPED(nlwb->lwb_nused,
+ ZIL_MIN_BLKSZ, uint64_t);
+ } else if (flexible) {
+ min_size = nlwb->lwb_min_sz;
+ max_size = nlwb->lwb_sz;
+ } else {
+ min_size = max_size = nlwb->lwb_sz;
+ }
+
+ error = zio_alloc_zil(spa, zilog->zl_os, txg, bp,
+ min_size, max_size, &slog, flexible);
+ if (error == 0) {
+ if (closed_slim)
+ ASSERT3U(BP_GET_LSIZE(bp), ==, max_size);
+ else if (flexible)
+ nlwb->lwb_sz = BP_GET_LSIZE(bp);
+ else
+ ASSERT3U(BP_GET_LSIZE(bp), ==, nlwb->lwb_sz);
+ }
+ if (flexible)
+ mutex_exit(&nlwb->lwb_lock);
}
if (error == 0) {
ASSERT3U(BP_GET_BIRTH(bp), ==, txg);
- BP_SET_CHECKSUM(bp, nlwb->lwb_slim ? ZIO_CHECKSUM_ZILOG2 :
- ZIO_CHECKSUM_ZILOG);
+ BP_SET_CHECKSUM(bp, (nlwb->lwb_flags & LWB_FLAG_SLIM) ?
+ ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG);
bp->blk_cksum = lwb->lwb_blk.blk_cksum;
bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++;
}
@@ -2039,14 +2103,15 @@ next_lwb:
if (nlwb) {
nlwb->lwb_blk = *bp;
nlwb->lwb_error = error;
- nlwb->lwb_slog = slog;
+ if (slog)
+ nlwb->lwb_flags |= LWB_FLAG_SLOG;
nlwb->lwb_alloc_txg = txg;
if (nlwb->lwb_state != LWB_STATE_READY)
nlwb = NULL;
}
mutex_exit(&zilog->zl_lock);
- if (lwb->lwb_slog) {
+ if (lwb->lwb_flags & LWB_FLAG_SLOG) {
ZIL_STAT_BUMP(zilog, zil_itx_metaslab_slog_count);
ZIL_STAT_INCR(zilog, zil_itx_metaslab_slog_bytes,
lwb->lwb_nused);
@@ -2220,7 +2285,6 @@ zil_lwb_assign(zilog_t *zilog, lwb_t *lwb, itx_t *itx, list_t *ilwbs)
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT3P(lwb, !=, NULL);
- ASSERT3P(lwb->lwb_buf, !=, NULL);
zil_lwb_write_open(zilog, lwb);
@@ -2262,9 +2326,10 @@ cont:
(dlen % max_log_data == 0 ||
lwb_sp < reclen + dlen % max_log_data))) {
list_insert_tail(ilwbs, lwb);
- lwb = zil_lwb_write_close(zilog, lwb, LWB_STATE_OPENED);
+ lwb = zil_lwb_write_close(zilog, lwb);
if (lwb == NULL)
return (NULL);
+ zil_lwb_write_open(zilog, lwb);
lwb_sp = lwb->lwb_nmax - lwb->lwb_nused;
}
@@ -2554,7 +2619,7 @@ zil_itxg_clean(void *arg)
* called) we will hit this case.
*/
if (itx->itx_lr.lrc_txtype == TX_COMMIT)
- zil_commit_waiter_skip(itx->itx_private);
+ zil_commit_waiter_done(itx->itx_private, 0);
zil_itx_destroy(itx, 0);
}
@@ -2742,6 +2807,7 @@ zil_crash_clean(zilog_t *zilog, uint64_t synced_txg)
}
/* This LWB is from the past, so we can clean it up now. */
+ ASSERT(lwb->lwb_flags & LWB_FLAG_CRASHED);
list_remove(&zilog->zl_lwb_crash_list, lwb);
if (lwb->lwb_buf != NULL)
zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
@@ -2981,7 +3047,7 @@ zil_prune_commit_list(zilog_t *zilog)
* never any itx's for it to wait on), so it's
* safe to skip this waiter and mark it done.
*/
- zil_commit_waiter_skip(itx->itx_private);
+ zil_commit_waiter_done(itx->itx_private, 0);
} else {
zil_commit_waiter_link_lwb(itx->itx_private, last_lwb);
}
@@ -3212,15 +3278,21 @@ zil_process_commit_list(zilog_t *zilog, zil_commit_waiter_t *zcw, list_t *ilwbs)
* "next" lwb on-disk. When this happens, we must stall
* the ZIL write pipeline; see the comment within
* zil_commit_writer_stall() for more details.
+ *
+ * ESHUTDOWN has to be handled carefully here. If we get it,
+ * then the pool suspended and zil_crash() was called, so we
+ * need to stop trying and just get an error back to the
+ * callers.
*/
int err = 0;
while ((lwb = list_remove_head(ilwbs)) != NULL) {
- err = zil_lwb_write_issue(zilog, lwb);
- if (err != 0)
- break;
+ if (err == 0)
+ err = zil_lwb_write_issue(zilog, lwb);
}
- if (err == 0)
+ if (err != ESHUTDOWN)
err = zil_commit_writer_stall(zilog);
+ if (err == ESHUTDOWN)
+ err = SET_ERROR(EIO);
/*
* Additionally, we have to signal and mark the "nolwb"
@@ -3230,7 +3302,7 @@ zil_process_commit_list(zilog_t *zilog, zil_commit_waiter_t *zcw, list_t *ilwbs)
*/
zil_commit_waiter_t *zcw;
while ((zcw = list_remove_head(&nolwb_waiters)) != NULL)
- zil_commit_waiter_skip(zcw);
+ zil_commit_waiter_done(zcw, err);
/*
* And finally, we have to destroy the itx's that
@@ -3238,7 +3310,7 @@ zil_process_commit_list(zilog_t *zilog, zil_commit_waiter_t *zcw, list_t *ilwbs)
* the itx's callback if one exists for the itx.
*/
while ((itx = list_remove_head(&nolwb_itxs)) != NULL)
- zil_itx_destroy(itx, 0);
+ zil_itx_destroy(itx, err);
} else {
ASSERT(list_is_empty(&nolwb_waiters));
ASSERT3P(lwb, !=, NULL);
@@ -3292,17 +3364,17 @@ zil_process_commit_list(zilog_t *zilog, zil_commit_waiter_t *zcw, list_t *ilwbs)
(!zilog->zl_parallel || zilog->zl_suspend > 0)) {
zil_burst_done(zilog);
list_insert_tail(ilwbs, lwb);
- lwb = zil_lwb_write_close(zilog, lwb, LWB_STATE_NEW);
+ lwb = zil_lwb_write_close(zilog, lwb);
if (lwb == NULL) {
int err = 0;
while ((lwb =
list_remove_head(ilwbs)) != NULL) {
- err = zil_lwb_write_issue(zilog, lwb);
- if (err != 0)
- break;
+ if (err == 0)
+ err = zil_lwb_write_issue(
+ zilog, lwb);
}
- if (err == 0)
- zil_commit_writer_stall(zilog);
+ if (err != ESHUTDOWN)
+ (void) zil_commit_writer_stall(zilog);
}
}
}
@@ -3470,7 +3542,7 @@ zil_commit_waiter_timeout(zilog_t *zilog, zil_commit_waiter_t *zcw)
* hasn't been issued.
*/
zil_burst_done(zilog);
- lwb_t *nlwb = zil_lwb_write_close(zilog, lwb, LWB_STATE_NEW);
+ lwb_t *nlwb = zil_lwb_write_close(zilog, lwb);
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_CLOSED);
@@ -3546,7 +3618,7 @@ zil_commit_waiter(zilog_t *zilog, zil_commit_waiter_t *zcw)
* commit itxs. When this occurs, the commit waiters linked
* off of these commit itxs will not be committed to an
* lwb. Additionally, these commit waiters will not be
- * marked done until zil_commit_waiter_skip() is called via
+ * marked done until zil_commit_waiter_done() is called via
* zil_itxg_clean().
*
* Thus, it's possible for this commit waiter (i.e. the
@@ -3624,7 +3696,7 @@ zil_alloc_commit_waiter(void)
list_link_init(&zcw->zcw_node);
zcw->zcw_lwb = NULL;
zcw->zcw_done = B_FALSE;
- zcw->zcw_zio_error = 0;
+ zcw->zcw_error = 0;
return (zcw);
}
@@ -3728,6 +3800,9 @@ zil_crash(zilog_t *zilog)
*/
for (lwb_t *lwb = list_head(&zilog->zl_lwb_crash_list); lwb != NULL;
lwb = list_next(&zilog->zl_lwb_crash_list, lwb)) {
+ ASSERT(!(lwb->lwb_flags & LWB_FLAG_CRASHED));
+ lwb->lwb_flags |= LWB_FLAG_CRASHED;
+
itx_t *itx;
while ((itx = list_remove_head(&lwb->lwb_itxs)) != NULL)
zil_itx_destroy(itx, EIO);
@@ -3736,7 +3811,7 @@ zil_crash(zilog_t *zilog)
while ((zcw = list_remove_head(&lwb->lwb_waiters)) != NULL) {
mutex_enter(&zcw->zcw_lock);
zcw->zcw_lwb = NULL;
- zcw->zcw_zio_error = EIO;
+ zcw->zcw_error = EIO;
zcw->zcw_done = B_TRUE;
cv_broadcast(&zcw->zcw_cv);
mutex_exit(&zcw->zcw_lock);
@@ -4014,7 +4089,7 @@ zil_commit_impl(zilog_t *zilog, uint64_t foid)
zil_commit_waiter(zilog, zcw);
int err = 0;
- if (zcw->zcw_zio_error != 0) {
+ if (zcw->zcw_error != 0) {
/*
* If there was an error writing out the ZIL blocks that
* this thread is waiting on, then we fallback to
@@ -4149,7 +4224,7 @@ zil_lwb_cons(void *vbuf, void *unused, int kmflag)
offsetof(zil_commit_waiter_t, zcw_node));
avl_create(&lwb->lwb_vdev_tree, zil_lwb_vdev_compare,
sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node));
- mutex_init(&lwb->lwb_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
+ mutex_init(&lwb->lwb_lock, NULL, MUTEX_DEFAULT, NULL);
return (0);
}
@@ -4158,7 +4233,7 @@ zil_lwb_dest(void *vbuf, void *unused)
{
(void) unused;
lwb_t *lwb = vbuf;
- mutex_destroy(&lwb->lwb_vdev_lock);
+ mutex_destroy(&lwb->lwb_lock);
avl_destroy(&lwb->lwb_vdev_tree);
list_destroy(&lwb->lwb_waiters);
list_destroy(&lwb->lwb_itxs);
@@ -4381,7 +4456,7 @@ zil_close(zilog_t *zilog)
if (lwb != NULL) {
ASSERT(list_is_empty(&zilog->zl_lwb_list));
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_NEW);
- zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
+ ASSERT0P(lwb->lwb_buf);
zil_free_lwb(zilog, lwb);
}
mutex_exit(&zilog->zl_lock);
@@ -4472,16 +4547,16 @@ zil_suspend(const char *osname, void **cookiep)
cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock);
mutex_exit(&zilog->zl_lock);
- if (cookiep == NULL)
+ if (zilog->zl_restart_txg > 0) {
+ /* ZIL crashed while we were waiting. */
+ zil_resume(os);
+ error = SET_ERROR(EBUSY);
+ } else if (cookiep == NULL)
zil_resume(os);
else
*cookiep = os;
- if (zilog->zl_restart_txg > 0)
- /* ZIL crashed while we were waiting. */
- return (SET_ERROR(EBUSY));
-
- return (0);
+ return (error);
}
/*
diff --git a/sys/contrib/openzfs/module/zfs/zio.c b/sys/contrib/openzfs/module/zfs/zio.c
index 3f0ddb63249d..4cf8912d4269 100644
--- a/sys/contrib/openzfs/module/zfs/zio.c
+++ b/sys/contrib/openzfs/module/zfs/zio.c
@@ -4434,12 +4434,15 @@ zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp)
*/
int
zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp,
- uint64_t size, boolean_t *slog)
+ uint64_t min_size, uint64_t max_size, boolean_t *slog,
+ boolean_t allow_larger)
{
int error;
zio_alloc_list_t io_alloc_list;
+ uint64_t alloc_size = 0;
ASSERT(txg > spa_syncing_txg(spa));
+ ASSERT3U(min_size, <=, max_size);
metaslab_trace_init(&io_alloc_list);
@@ -4448,7 +4451,7 @@ zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp,
* Fill in the obvious ones before calling into metaslab_alloc().
*/
BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
- BP_SET_PSIZE(new_bp, size);
+ BP_SET_PSIZE(new_bp, max_size);
BP_SET_LEVEL(new_bp, 0);
/*
@@ -4463,43 +4466,51 @@ zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp,
ZIOSTAT_BUMP(ziostat_total_allocations);
/* Try log class (dedicated slog devices) first */
- error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1,
- txg, NULL, flags, &io_alloc_list, allocator, NULL);
+ error = metaslab_alloc_range(spa, spa_log_class(spa), min_size,
+ max_size, new_bp, 1, txg, NULL, flags, &io_alloc_list, allocator,
+ NULL, &alloc_size);
*slog = (error == 0);
/* Try special_embedded_log class (reserved on special vdevs) */
if (error != 0) {
- error = metaslab_alloc(spa, spa_special_embedded_log_class(spa),
- size, new_bp, 1, txg, NULL, flags, &io_alloc_list,
- allocator, NULL);
+ error = metaslab_alloc_range(spa,
+ spa_special_embedded_log_class(spa), min_size, max_size,
+ new_bp, 1, txg, NULL, flags, &io_alloc_list, allocator,
+ NULL, &alloc_size);
}
/* Try special class (general special vdev allocation) */
if (error != 0) {
- error = metaslab_alloc(spa, spa_special_class(spa), size,
- new_bp, 1, txg, NULL, flags, &io_alloc_list, allocator,
- NULL);
+ error = metaslab_alloc_range(spa, spa_special_class(spa),
+ min_size, max_size, new_bp, 1, txg, NULL, flags,
+ &io_alloc_list, allocator, NULL, &alloc_size);
}
/* Try embedded_log class (reserved on normal vdevs) */
if (error != 0) {
- error = metaslab_alloc(spa, spa_embedded_log_class(spa), size,
- new_bp, 1, txg, NULL, flags, &io_alloc_list, allocator,
- NULL);
+ error = metaslab_alloc_range(spa, spa_embedded_log_class(spa),
+ min_size, max_size, new_bp, 1, txg, NULL, flags,
+ &io_alloc_list, allocator, NULL, &alloc_size);
}
/* Finally fall back to normal class */
if (error != 0) {
ZIOSTAT_BUMP(ziostat_alloc_class_fallbacks);
- error = metaslab_alloc(spa, spa_normal_class(spa), size,
- new_bp, 1, txg, NULL, flags, &io_alloc_list, allocator,
- NULL);
+ error = metaslab_alloc_range(spa, spa_normal_class(spa),
+ min_size, max_size, new_bp, 1, txg, NULL, flags,
+ &io_alloc_list, allocator, NULL, &alloc_size);
}
metaslab_trace_fini(&io_alloc_list);
if (error == 0) {
- BP_SET_LSIZE(new_bp, size);
- BP_SET_PSIZE(new_bp, size);
+ if (!allow_larger)
+ alloc_size = MIN(alloc_size, max_size);
+ else if (max_size <= SPA_OLD_MAXBLOCKSIZE)
+ alloc_size = MIN(alloc_size, SPA_OLD_MAXBLOCKSIZE);
+ alloc_size = P2ALIGN_TYPED(alloc_size, ZIL_MIN_BLKSZ, uint64_t);
+
+ BP_SET_LSIZE(new_bp, alloc_size);
+ BP_SET_PSIZE(new_bp, alloc_size);
BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF);
BP_SET_CHECKSUM(new_bp,
spa_version(spa) >= SPA_VERSION_SLIM_ZIL
@@ -4527,8 +4538,8 @@ zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp,
}
} else {
zfs_dbgmsg("%s: zil block allocation failure: "
- "size %llu, error %d", spa_name(spa), (u_longlong_t)size,
- error);
+ "min_size %llu, max_size %llu, error %d", spa_name(spa),
+ (u_longlong_t)min_size, (u_longlong_t)max_size, error);
}
return (error);
diff --git a/sys/contrib/openzfs/module/zfs/zvol.c b/sys/contrib/openzfs/module/zfs/zvol.c
index 29f51e230a37..2fd3e1c37045 100644
--- a/sys/contrib/openzfs/module/zfs/zvol.c
+++ b/sys/contrib/openzfs/module/zfs/zvol.c
@@ -38,25 +38,36 @@
* Copyright 2014 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright (c) 2012, 2019 by Delphix. All rights reserved.
- * Copyright (c) 2024, Klara, Inc.
+ * Copyright (c) 2024, 2025, Klara, Inc.
*/
/*
* Note on locking of zvol state structures.
*
- * These structures are used to maintain internal state used to emulate block
- * devices on top of zvols. In particular, management of device minor number
- * operations - create, remove, rename, and set_snapdev - involves access to
- * these structures. The zvol_state_lock is primarily used to protect the
- * zvol_state_list. The zv->zv_state_lock is used to protect the contents
- * of the zvol_state_t structures, as well as to make sure that when the
- * time comes to remove the structure from the list, it is not in use, and
- * therefore, it can be taken off zvol_state_list and freed.
+ * zvol_state_t represents the connection between a single dataset
+ * (DMU_OST_ZVOL) and the device "minor" (some OS-specific representation of a
+ * "disk" or "device" or "volume", eg, a /dev/zdXX node, a GEOM object, etc).
*
- * The zv_suspend_lock was introduced to allow for suspending I/O to a zvol,
- * e.g. for the duration of receive and rollback operations. This lock can be
- * held for significant periods of time. Given that it is undesirable to hold
- * mutexes for long periods of time, the following lock ordering applies:
+ * The global zvol_state_lock is used to protect access to zvol_state_list and
+ * zvol_htable, which are the primary way to obtain a zvol_state_t from a name.
+ * It should not be used for anything not name-relateds, and you should avoid
+ * sleeping or waiting while its held. See zvol_find_by_name(), zvol_insert(),
+ * zvol_remove().
+ *
+ * The zv_state_lock is used to protect the contents of the associated
+ * zvol_state_t. Most of the zvol_state_t is dedicated to control and
+ * configuration; almost none of it is needed for data operations (that is,
+ * read, write, flush) so this lock is rarely taken during general IO. It
+ * should be released quickly; you should avoid sleeping or waiting while its
+ * held.
+ *
+ * zv_suspend_lock is used to suspend IO/data operations to a zvol. The read
+ * half should held for the duration of an IO operation. The write half should
+ * be taken when something to wait for IO to complete and the block further IO,
+ * eg for the duration of receive and rollback operations. This lock can be
+ * held for long periods of time.
+ *
+ * Thus, the following lock ordering appies.
* - take zvol_state_lock if necessary, to protect zvol_state_list
* - take zv_suspend_lock if necessary, by the code path in question
* - take zv_state_lock to protect zvol_state_t
@@ -67,9 +78,8 @@
* these operations are serialized per pool. Consequently, we can be certain
* that for a given zvol, there is only one operation at a time in progress.
* That is why one can be sure that first, zvol_state_t for a given zvol is
- * allocated and placed on zvol_state_list, and then other minor operations
- * for this zvol are going to proceed in the order of issue.
- *
+ * allocated and placed on zvol_state_list, and then other minor operations for
+ * this zvol are going to proceed in the order of issue.
*/
#include <sys/dataset_kstats.h>
@@ -1570,184 +1580,156 @@ zvol_create_minors_impl(zvol_task_t *task)
}
/*
- * Remove minors for specified dataset including children and snapshots.
- */
-
-/*
- * Remove the minor for a given zvol. This will do it all:
- * - flag the zvol for removal, so new requests are rejected
- * - wait until outstanding requests are completed
- * - remove it from lists
- * - free it
- * It's also usable as a taskq task, and smells nice too.
+ * Remove minors for specified dataset and, optionally, its children and
+ * snapshots.
*/
static void
-zvol_remove_minor_task(void *arg)
-{
- zvol_state_t *zv = (zvol_state_t *)arg;
-
- ASSERT(!RW_LOCK_HELD(&zvol_state_lock));
- ASSERT(!MUTEX_HELD(&zv->zv_state_lock));
-
- mutex_enter(&zv->zv_state_lock);
- while (zv->zv_open_count > 0 || atomic_read(&zv->zv_suspend_ref)) {
- zv->zv_flags |= ZVOL_REMOVING;
- cv_wait(&zv->zv_removing_cv, &zv->zv_state_lock);
- }
- mutex_exit(&zv->zv_state_lock);
-
- rw_enter(&zvol_state_lock, RW_WRITER);
- mutex_enter(&zv->zv_state_lock);
-
- zvol_remove(zv);
- zvol_os_clear_private(zv);
-
- mutex_exit(&zv->zv_state_lock);
- rw_exit(&zvol_state_lock);
-
- zvol_os_free(zv);
-}
-
-static void
-zvol_free_task(void *arg)
-{
- zvol_os_free(arg);
-}
-
-static void
zvol_remove_minors_impl(zvol_task_t *task)
{
zvol_state_t *zv, *zv_next;
const char *name = task ? task->zt_name1 : NULL;
int namelen = ((name) ? strlen(name) : 0);
- taskqid_t t;
- list_t delay_list, free_list;
+ boolean_t children = task ? !!task->zt_value : B_TRUE;
if (zvol_inhibit_dev)
return;
- list_create(&delay_list, sizeof (zvol_state_t),
- offsetof(zvol_state_t, zv_next));
- list_create(&free_list, sizeof (zvol_state_t),
- offsetof(zvol_state_t, zv_next));
+ /*
+ * We collect up zvols that we want to remove on a separate list, so
+ * that we don't have to hold zvol_state_lock for the whole time.
+ *
+ * We can't remove them from the global lists until we're completely
+ * done with them, because that would make them appear to ZFS-side ops
+ * that they don't exist, and the name might be reused, which can't be
+ * good.
+ */
+ list_t remove_list;
+ list_create(&remove_list, sizeof (zvol_state_t),
+ offsetof(zvol_state_t, zv_remove_node));
- rw_enter(&zvol_state_lock, RW_WRITER);
+ rw_enter(&zvol_state_lock, RW_READER);
for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
zv_next = list_next(&zvol_state_list, zv);
mutex_enter(&zv->zv_state_lock);
+ if (zv->zv_flags & ZVOL_REMOVING) {
+ /* Another thread is handling shutdown, skip it. */
+ mutex_exit(&zv->zv_state_lock);
+ continue;
+ }
+
+ /*
+ * This zvol should be removed if:
+ * - no name was offered (ie removing all at shutdown); or
+ * - name matches exactly; or
+ * - we were asked to remove children, and
+ * - the start of the name matches, and
+ * - there is a '/' immediately after the matched name; or
+ * - there is a '@' immediately after the matched name
+ */
if (name == NULL || strcmp(zv->zv_name, name) == 0 ||
- (strncmp(zv->zv_name, name, namelen) == 0 &&
+ (children && strncmp(zv->zv_name, name, namelen) == 0 &&
(zv->zv_name[namelen] == '/' ||
zv->zv_name[namelen] == '@'))) {
- /*
- * By holding zv_state_lock here, we guarantee that no
- * one is currently using this zv
- */
/*
- * If in use, try to throw everyone off and try again
- * later.
+ * Matched, so mark it removal. We want to take the
+ * write half of the suspend lock to make sure that
+ * the zvol is not suspended, and give any data ops
+ * chance to finish.
*/
- if (zv->zv_open_count > 0 ||
- atomic_read(&zv->zv_suspend_ref)) {
- zv->zv_flags |= ZVOL_REMOVING;
- t = taskq_dispatch(
- zv->zv_objset->os_spa->spa_zvol_taskq,
- zvol_remove_minor_task, zv, TQ_SLEEP);
- if (t == TASKQID_INVALID) {
- /*
- * Couldn't create the task, so we'll
- * do it in place once the loop is
- * finished.
- */
- list_insert_head(&delay_list, zv);
- }
+ mutex_exit(&zv->zv_state_lock);
+ rw_enter(&zv->zv_suspend_lock, RW_WRITER);
+ mutex_enter(&zv->zv_state_lock);
+
+ if (zv->zv_flags & ZVOL_REMOVING) {
+ /* Another thread has taken it, let them. */
mutex_exit(&zv->zv_state_lock);
+ rw_exit(&zv->zv_suspend_lock);
continue;
}
- zvol_remove(zv);
-
/*
- * Cleared while holding zvol_state_lock as a writer
- * which will prevent zvol_open() from opening it.
+ * Mark it and unlock. New entries will see the flag
+ * and return ENXIO.
*/
- zvol_os_clear_private(zv);
-
- /* Drop zv_state_lock before zvol_free() */
+ zv->zv_flags |= ZVOL_REMOVING;
mutex_exit(&zv->zv_state_lock);
+ rw_exit(&zv->zv_suspend_lock);
- /* Try parallel zv_free, if failed do it in place */
- t = taskq_dispatch(system_taskq, zvol_free_task, zv,
- TQ_SLEEP);
- if (t == TASKQID_INVALID)
- list_insert_head(&free_list, zv);
- } else {
+ /* Put it on the list for the next stage. */
+ list_insert_head(&remove_list, zv);
+ } else
mutex_exit(&zv->zv_state_lock);
- }
}
- rw_exit(&zvol_state_lock);
- /* Wait for zvols that we couldn't create a remove task for */
- while ((zv = list_remove_head(&delay_list)) != NULL)
- zvol_remove_minor_task(zv);
-
- /* Free any that we couldn't free in parallel earlier */
- while ((zv = list_remove_head(&free_list)) != NULL)
- zvol_os_free(zv);
-}
-
-/* Remove minor for this specific volume only */
-static int
-zvol_remove_minor_impl(const char *name)
-{
- zvol_state_t *zv = NULL, *zv_next;
-
- if (zvol_inhibit_dev)
- return (0);
+ rw_exit(&zvol_state_lock);
- rw_enter(&zvol_state_lock, RW_WRITER);
+ /* Didn't match any, nothing to do! */
+ if (list_is_empty(&remove_list)) {
+ if (task)
+ task->zt_error = SET_ERROR(ENOENT);
+ return;
+ }
- for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
- zv_next = list_next(&zvol_state_list, zv);
+ /* Actually shut them all down. */
+ for (zv = list_head(&remove_list); zv != NULL; zv = zv_next) {
+ zv_next = list_next(&remove_list, zv);
mutex_enter(&zv->zv_state_lock);
- if (strcmp(zv->zv_name, name) == 0)
- /* Found, leave the the loop with zv_lock held */
- break;
- mutex_exit(&zv->zv_state_lock);
- }
-
- if (zv == NULL) {
- rw_exit(&zvol_state_lock);
- return (SET_ERROR(ENOENT));
- }
- ASSERT(MUTEX_HELD(&zv->zv_state_lock));
+ /*
+ * Still open or suspended, just wait. This can happen if, for
+ * example, we managed to acquire zv_state_lock in the moments
+ * where zvol_open() or zvol_release() are trading locks to
+ * call zvol_first_open() or zvol_last_close().
+ */
+ while (zv->zv_open_count > 0 ||
+ atomic_read(&zv->zv_suspend_ref))
+ cv_wait(&zv->zv_removing_cv, &zv->zv_state_lock);
- if (zv->zv_open_count > 0 || atomic_read(&zv->zv_suspend_ref)) {
/*
- * In use, so try to throw everyone off, then wait
- * until finished.
+ * No users, shut down the OS side. This may not remove the
+ * minor from view immediately, depending on the kernel
+ * specifics, but it will ensure that it is unusable and that
+ * this zvol_state_t can never again be reached from an OS-side
+ * operation.
*/
- zv->zv_flags |= ZVOL_REMOVING;
+ zvol_os_remove_minor(zv);
mutex_exit(&zv->zv_state_lock);
+
+ /* Remove it from the name lookup lists */
+ rw_enter(&zvol_state_lock, RW_WRITER);
+ zvol_remove(zv);
rw_exit(&zvol_state_lock);
- zvol_remove_minor_task(zv);
- return (0);
}
- zvol_remove(zv);
- zvol_os_clear_private(zv);
+ /*
+ * Our own references on remove_list is the last one, free them and
+ * we're done.
+ */
+ while ((zv = list_remove_head(&remove_list)) != NULL)
+ zvol_os_free(zv);
- mutex_exit(&zv->zv_state_lock);
- rw_exit(&zvol_state_lock);
+ list_destroy(&remove_list);
+}
- zvol_os_free(zv);
+/* Remove minor for this specific volume only */
+static int
+zvol_remove_minor_impl(const char *name)
+{
+ if (zvol_inhibit_dev)
+ return (0);
- return (0);
+ zvol_task_t task;
+ memset(&task, 0, sizeof (zvol_task_t));
+ strlcpy(task.zt_name1, name, sizeof (task.zt_name1));
+ task.zt_value = B_FALSE;
+
+ zvol_remove_minors_impl(&task);
+
+ return (task.zt_error);
}
/*
@@ -2067,6 +2049,7 @@ zvol_remove_minors(spa_t *spa, const char *name, boolean_t async)
task = kmem_zalloc(sizeof (zvol_task_t), KM_SLEEP);
task->zt_op = ZVOL_ASYNC_REMOVE_MINORS;
strlcpy(task->zt_name1, name, sizeof (task->zt_name1));
+ task->zt_value = B_TRUE;
id = taskq_dispatch(spa->spa_zvol_taskq, zvol_task_cb, task, TQ_SLEEP);
if ((async == B_FALSE) && (id != TASKQID_INVALID))
taskq_wait_id(spa->spa_zvol_taskq, id);
@@ -2188,14 +2171,6 @@ zvol_fini_impl(void)
zvol_remove_minors_impl(NULL);
- /*
- * The call to "zvol_remove_minors_impl" may dispatch entries to
- * the system_taskq, but it doesn't wait for those entries to
- * complete before it returns. Thus, we must wait for all of the
- * removals to finish, before we can continue.
- */
- taskq_wait_outstanding(system_taskq, 0);
-
kmem_free(zvol_htable, ZVOL_HT_SIZE * sizeof (struct hlist_head));
list_destroy(&zvol_state_list);
rw_destroy(&zvol_state_lock);
diff --git a/sys/contrib/openzfs/module/zstd/zfs_zstd.c b/sys/contrib/openzfs/module/zstd/zfs_zstd.c
index 391216d6e263..3db196953f74 100644
--- a/sys/contrib/openzfs/module/zstd/zfs_zstd.c
+++ b/sys/contrib/openzfs/module/zstd/zfs_zstd.c
@@ -876,9 +876,9 @@ static void __init
zstd_mempool_init(void)
{
zstd_mempool_cctx =
- kmem_zalloc(ZSTD_POOL_MAX * sizeof (struct zstd_pool), KM_SLEEP);
+ vmem_zalloc(ZSTD_POOL_MAX * sizeof (struct zstd_pool), KM_SLEEP);
zstd_mempool_dctx =
- kmem_zalloc(ZSTD_POOL_MAX * sizeof (struct zstd_pool), KM_SLEEP);
+ vmem_zalloc(ZSTD_POOL_MAX * sizeof (struct zstd_pool), KM_SLEEP);
for (int i = 0; i < ZSTD_POOL_MAX; i++) {
mutex_init(&zstd_mempool_cctx[i].barrier, NULL,
@@ -924,8 +924,8 @@ zstd_mempool_deinit(void)
release_pool(&zstd_mempool_dctx[i]);
}
- kmem_free(zstd_mempool_dctx, ZSTD_POOL_MAX * sizeof (struct zstd_pool));
- kmem_free(zstd_mempool_cctx, ZSTD_POOL_MAX * sizeof (struct zstd_pool));
+ vmem_free(zstd_mempool_dctx, ZSTD_POOL_MAX * sizeof (struct zstd_pool));
+ vmem_free(zstd_mempool_cctx, ZSTD_POOL_MAX * sizeof (struct zstd_pool));
zstd_mempool_dctx = NULL;
zstd_mempool_cctx = NULL;
}
diff --git a/sys/contrib/openzfs/scripts/spdxcheck.pl b/sys/contrib/openzfs/scripts/spdxcheck.pl
index 88f5a235d70c..cdab5368f19c 100755
--- a/sys/contrib/openzfs/scripts/spdxcheck.pl
+++ b/sys/contrib/openzfs/scripts/spdxcheck.pl
@@ -190,6 +190,7 @@ my @path_license_tags = (
['BSD-2-Clause OR GPL-2.0-only', 'CDDL-1.0'],
'module/icp' => ['Apache-2.0', 'CDDL-1.0'],
+ 'contrib/icp' => ['Apache-2.0', 'CDDL-1.0'],
# Python bindings are always Apache-2.0
'contrib/pyzfs' => ['Apache-2.0'],
diff --git a/sys/contrib/openzfs/tests/runfiles/common.run b/sys/contrib/openzfs/tests/runfiles/common.run
index 131845f5ed40..2da46458289a 100644
--- a/sys/contrib/openzfs/tests/runfiles/common.run
+++ b/sys/contrib/openzfs/tests/runfiles/common.run
@@ -1093,7 +1093,7 @@ tests = ['zvol_misc_002_pos', 'zvol_misc_hierarchy', 'zvol_misc_rename_inuse',
tags = ['functional', 'zvol', 'zvol_misc']
[tests/functional/zvol/zvol_stress]
-tests = ['zvol_stress']
+tests = ['zvol_stress', 'zvol_stress_destroy']
tags = ['functional', 'zvol', 'zvol_stress']
[tests/functional/zvol/zvol_swap]
diff --git a/sys/contrib/openzfs/tests/zfs-tests/cmd/crypto_test.c b/sys/contrib/openzfs/tests/zfs-tests/cmd/crypto_test.c
index e08003f80464..cbebd33e0bf6 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/cmd/crypto_test.c
+++ b/sys/contrib/openzfs/tests/zfs-tests/cmd/crypto_test.c
@@ -529,6 +529,8 @@ static const char *aes_gcm_impl[][2] = {
{ "aesni", "pclmulqdq" },
{ "x86_64", "avx" },
{ "aesni", "avx" },
+ { "x86_64", "avx2" },
+ { "aesni", "avx2" },
};
/* signature of function to call after setting implementation params */
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am b/sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am
index b8b8bbe45a42..41e7b45ef4ec 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/Makefile.am
@@ -2244,6 +2244,7 @@ nobase_dist_datadir_zfs_tests_tests_SCRIPTS += \
functional/zvol/zvol_stress/cleanup.ksh \
functional/zvol/zvol_stress/setup.ksh \
functional/zvol/zvol_stress/zvol_stress.ksh \
+ functional/zvol/zvol_stress/zvol_stress_destroy.ksh \
functional/zvol/zvol_swap/cleanup.ksh \
functional/zvol/zvol_swap/setup.ksh \
functional/zvol/zvol_swap/zvol_swap_001_pos.ksh \
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_stress/zvol_stress_destroy.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_stress/zvol_stress_destroy.ksh
new file mode 100755
index 000000000000..669b59fac01f
--- /dev/null
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_stress/zvol_stress_destroy.ksh
@@ -0,0 +1,66 @@
+#!/bin/ksh -p
+# SPDX-License-Identifier: CDDL-1.0
+#
+# CDDL HEADER START
+#
+# The contents of this file are subject to the terms of the
+# Common Development and Distribution License (the "License").
+# You may not use this file except in compliance with the License.
+#
+# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+# or https://opensource.org/licenses/CDDL-1.0.
+# See the License for the specific language governing permissions
+# and limitations under the License.
+#
+# When distributing Covered Code, include this CDDL HEADER in each
+# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+# If applicable, add the following below this CDDL HEADER, with the
+# fields enclosed by brackets "[]" replaced with your own identifying
+# information: Portions Copyright [yyyy] [name of copyright owner]
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2025, Klara, Inc.
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+verify_runnable "global"
+
+typeset -i nzvols=1000
+typeset -i parallel=$(( $(get_num_cpus) * 2 ))
+
+function cleanup {
+ for zvol in $(zfs list -Ho name -t vol) ; do
+ log_must_busy zfs destroy $zvol
+ done
+}
+
+log_onexit cleanup
+
+log_assert "stress test concurrent zvol create/destroy"
+
+function destroy_zvols_until {
+ typeset cond=$1
+ while true ; do
+ IFS='' zfs list -Ho name -t vol | read -r -d '' zvols
+ if [[ -n $zvols ]] ; then
+ echo $zvols | xargs -n 1 -P $parallel zfs destroy
+ fi
+ if ! $cond ; then
+ break
+ fi
+ done
+}
+
+( seq $nzvols | \
+ xargs -P $parallel -I % zfs create -s -V 1G $TESTPOOL/testvol% ) &
+cpid=$!
+sleep 1
+
+destroy_zvols_until "kill -0 $cpid"
+destroy_zvols_until "false"
+
+log_pass "stress test done"
diff --git a/sys/crypto/ccp/ccp.c b/sys/crypto/ccp/ccp.c
index 7db9a27ab059..c3d40f6e99ac 100644
--- a/sys/crypto/ccp/ccp.c
+++ b/sys/crypto/ccp/ccp.c
@@ -79,7 +79,7 @@ static struct pciid {
{ 0x15df1022, "AMD CCP-5a" },
};
-static struct random_source random_ccp = {
+static const struct random_source random_ccp = {
.rs_ident = "AMD CCP TRNG",
.rs_source = RANDOM_PURE_CCP,
.rs_read = random_ccp_read,
diff --git a/sys/dev/acpica/acpi_powerres.c b/sys/dev/acpica/acpi_powerres.c
index 29d1690f1bdd..0a8b67a5fa84 100644
--- a/sys/dev/acpica/acpi_powerres.c
+++ b/sys/dev/acpica/acpi_powerres.c
@@ -76,6 +76,13 @@ struct acpi_powerconsumer {
/* Device which is powered */
ACPI_HANDLE ac_consumer;
int ac_state;
+
+ struct {
+ bool prx_has;
+ size_t prx_count;
+ ACPI_HANDLE *prx_deps;
+ } ac_prx[ACPI_D_STATE_COUNT];
+
TAILQ_ENTRY(acpi_powerconsumer) ac_link;
TAILQ_HEAD(,acpi_powerreference) ac_references;
};
@@ -96,9 +103,7 @@ static TAILQ_HEAD(acpi_powerconsumer_list, acpi_powerconsumer)
ACPI_SERIAL_DECL(powerres, "ACPI power resources");
static ACPI_STATUS acpi_pwr_register_consumer(ACPI_HANDLE consumer);
-#ifdef notyet
static ACPI_STATUS acpi_pwr_deregister_consumer(ACPI_HANDLE consumer);
-#endif /* notyet */
static ACPI_STATUS acpi_pwr_register_resource(ACPI_HANDLE res);
#ifdef notyet
static ACPI_STATUS acpi_pwr_deregister_resource(ACPI_HANDLE res);
@@ -112,6 +117,8 @@ static struct acpi_powerresource
*acpi_pwr_find_resource(ACPI_HANDLE res);
static struct acpi_powerconsumer
*acpi_pwr_find_consumer(ACPI_HANDLE consumer);
+static ACPI_STATUS acpi_pwr_infer_state(struct acpi_powerconsumer *pc);
+static ACPI_STATUS acpi_pwr_get_state_locked(ACPI_HANDLE consumer, int *state);
/*
* Register a power resource.
@@ -222,6 +229,84 @@ acpi_pwr_deregister_resource(ACPI_HANDLE res)
#endif /* notyet */
/*
+ * Evaluate the _PRx (power resources each D-state depends on). This also
+ * populates the acpi_powerresources queue with the power resources discovered
+ * during this step.
+ *
+ * ACPI 7.3.8 - 7.3.11 guarantee that _PRx will return the same data each
+ * time they are evaluated.
+ *
+ * If this function fails, acpi_pwr_deregister_consumer() must be called on the
+ * power consumer to free already allocated memory.
+ */
+static ACPI_STATUS
+acpi_pwr_get_power_resources(ACPI_HANDLE consumer, struct acpi_powerconsumer *pc)
+{
+ ACPI_INTEGER status;
+ ACPI_STRING reslist_name;
+ ACPI_HANDLE reslist_handle;
+ ACPI_STRING reslist_names[] = {"_PR0", "_PR1", "_PR2", "_PR3"};
+ ACPI_BUFFER reslist;
+ ACPI_OBJECT *reslist_object;
+ ACPI_OBJECT *dep;
+ ACPI_HANDLE *res;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+ ACPI_SERIAL_ASSERT(powerres);
+
+ MPASS(consumer != NULL);
+
+ for (int state = ACPI_STATE_D0; state <= ACPI_STATE_D3_HOT; state++) {
+ pc->ac_prx[state].prx_has = false;
+ pc->ac_prx[state].prx_count = 0;
+ pc->ac_prx[state].prx_deps = NULL;
+
+ reslist_name = reslist_names[state - ACPI_STATE_D0];
+ if (ACPI_FAILURE(AcpiGetHandle(consumer, reslist_name, &reslist_handle)))
+ continue;
+
+ reslist.Pointer = NULL;
+ reslist.Length = ACPI_ALLOCATE_BUFFER;
+ status = AcpiEvaluateObjectTyped(reslist_handle, NULL, NULL, &reslist,
+ ACPI_TYPE_PACKAGE);
+ if (ACPI_FAILURE(status) || reslist.Pointer == NULL)
+ /*
+ * ACPI_ALLOCATE_BUFFER entails everything will be freed on error
+ * by AcpiEvaluateObjectTyped.
+ */
+ continue;
+
+ reslist_object = (ACPI_OBJECT *)reslist.Pointer;
+ pc->ac_prx[state].prx_has = true;
+ pc->ac_prx[state].prx_count = reslist_object->Package.Count;
+
+ if (reslist_object->Package.Count == 0) {
+ AcpiOsFree(reslist_object);
+ continue;
+ }
+
+ pc->ac_prx[state].prx_deps = mallocarray(pc->ac_prx[state].prx_count,
+ sizeof(*pc->ac_prx[state].prx_deps), M_ACPIPWR, M_NOWAIT);
+ if (pc->ac_prx[state].prx_deps == NULL) {
+ AcpiOsFree(reslist_object);
+ return_ACPI_STATUS (AE_NO_MEMORY);
+ }
+
+ for (size_t i = 0; i < reslist_object->Package.Count; i++) {
+ dep = &reslist_object->Package.Elements[i];
+ res = dep->Reference.Handle;
+ pc->ac_prx[state].prx_deps[i] = res;
+
+ /* It's fine to attempt to register the same resource twice. */
+ acpi_pwr_register_resource(res);
+ }
+ AcpiOsFree(reslist_object);
+ }
+
+ return_ACPI_STATUS (AE_OK);
+}
+
+/*
* Register a power consumer.
*
* It's OK to call this if we already know about the consumer.
@@ -229,6 +314,7 @@ acpi_pwr_deregister_resource(ACPI_HANDLE res)
static ACPI_STATUS
acpi_pwr_register_consumer(ACPI_HANDLE consumer)
{
+ ACPI_INTEGER status;
struct acpi_powerconsumer *pc;
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
@@ -239,14 +325,30 @@ acpi_pwr_register_consumer(ACPI_HANDLE consumer)
return_ACPI_STATUS (AE_OK);
/* Allocate a new power consumer */
- if ((pc = malloc(sizeof(*pc), M_ACPIPWR, M_NOWAIT)) == NULL)
+ if ((pc = malloc(sizeof(*pc), M_ACPIPWR, M_NOWAIT | M_ZERO)) == NULL)
return_ACPI_STATUS (AE_NO_MEMORY);
TAILQ_INSERT_HEAD(&acpi_powerconsumers, pc, ac_link);
TAILQ_INIT(&pc->ac_references);
pc->ac_consumer = consumer;
- /* XXX we should try to find its current state */
- pc->ac_state = ACPI_STATE_UNKNOWN;
+ /*
+ * Get all its power resource dependencies, if it has _PRx. We do this now
+ * as an opportunity to populate the acpi_powerresources queue.
+ *
+ * If this fails, immediately deregister it.
+ */
+ status = acpi_pwr_get_power_resources(consumer, pc);
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS,
+ "failed to get power resources for %s\n",
+ acpi_name(consumer)));
+ acpi_pwr_deregister_consumer(consumer);
+ return_ACPI_STATUS (status);
+ }
+
+ /* Find its initial state. */
+ if (ACPI_FAILURE(acpi_pwr_get_state_locked(consumer, &pc->ac_state)))
+ pc->ac_state = ACPI_STATE_UNKNOWN;
ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "registered power consumer %s\n",
acpi_name(consumer)));
@@ -254,7 +356,6 @@ acpi_pwr_register_consumer(ACPI_HANDLE consumer)
return_ACPI_STATUS (AE_OK);
}
-#ifdef notyet
/*
* Deregister a power consumer.
*
@@ -279,6 +380,9 @@ acpi_pwr_deregister_consumer(ACPI_HANDLE consumer)
/* Pull the consumer off the list and free it */
TAILQ_REMOVE(&acpi_powerconsumers, pc, ac_link);
+ for (size_t i = 0; i < sizeof(pc->ac_prx) / sizeof(*pc->ac_prx); i++)
+ if (pc->ac_prx[i].prx_deps != NULL)
+ free(pc->ac_prx[i].prx_deps, M_ACPIPWR);
free(pc, M_ACPIPWR);
ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "deregistered power consumer %s\n",
@@ -286,10 +390,139 @@ acpi_pwr_deregister_consumer(ACPI_HANDLE consumer)
return_ACPI_STATUS (AE_OK);
}
-#endif /* notyet */
/*
- * Set a power consumer to a particular power state.
+ * The _PSC control method isn't required if it's possible to infer the D-state
+ * from the _PRx control methods. (See 7.3.6.)
+ * We can infer that a given D-state has been achieved when all the dependencies
+ * are in the ON state.
+ */
+static ACPI_STATUS
+acpi_pwr_infer_state(struct acpi_powerconsumer *pc)
+{
+ ACPI_HANDLE *res;
+ uint32_t on;
+ bool all_on = false;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+ ACPI_SERIAL_ASSERT(powerres);
+
+ /* It is important we go from the hottest to the coldest state. */
+ for (
+ pc->ac_state = ACPI_STATE_D0;
+ pc->ac_state <= ACPI_STATE_D3_HOT && !all_on;
+ pc->ac_state++
+ ) {
+ MPASS(pc->ac_state <= sizeof(pc->ac_prx) / sizeof(*pc->ac_prx));
+
+ if (!pc->ac_prx[pc->ac_state].prx_has)
+ continue;
+
+ all_on = true;
+
+ for (size_t i = 0; i < pc->ac_prx[pc->ac_state].prx_count; i++) {
+ res = pc->ac_prx[pc->ac_state].prx_deps[i];
+ /* If failure, better to assume D-state is hotter than colder. */
+ if (ACPI_FAILURE(acpi_GetInteger(res, "_STA", &on)))
+ continue;
+ if (on == 0) {
+ all_on = false;
+ break;
+ }
+ }
+ }
+
+ MPASS(pc->ac_state != ACPI_STATE_D0);
+
+ /*
+ * If none of the power resources required for the shallower D-states are
+ * on, then we can assume it is unpowered (i.e. D3cold). A device is not
+ * required to support D3cold however; in that case, _PR3 is not explicitly
+ * provided. Those devices should default to D3hot instead.
+ *
+ * See comments of first row of table 7.1 in ACPI spec.
+ */
+ if (!all_on)
+ pc->ac_state = pc->ac_prx[ACPI_STATE_D3_HOT].prx_has ?
+ ACPI_STATE_D3_COLD : ACPI_STATE_D3_HOT;
+ else
+ pc->ac_state--;
+
+ return_ACPI_STATUS (AE_OK);
+}
+
+static ACPI_STATUS
+acpi_pwr_get_state_locked(ACPI_HANDLE consumer, int *state)
+{
+ struct acpi_powerconsumer *pc;
+ ACPI_HANDLE method_handle;
+ ACPI_STATUS status;
+ ACPI_BUFFER result;
+ ACPI_OBJECT *object = NULL;
+
+ ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
+ ACPI_SERIAL_ASSERT(powerres);
+
+ if (consumer == NULL)
+ return_ACPI_STATUS (AE_NOT_FOUND);
+
+ if ((pc = acpi_pwr_find_consumer(consumer)) == NULL) {
+ if (ACPI_FAILURE(status = acpi_pwr_register_consumer(consumer)))
+ goto out;
+ if ((pc = acpi_pwr_find_consumer(consumer)) == NULL)
+ panic("acpi added power consumer but can't find it");
+ }
+
+ status = AcpiGetHandle(consumer, "_PSC", &method_handle);
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "no _PSC object - %s\n",
+ AcpiFormatException(status)));
+ status = acpi_pwr_infer_state(pc);
+ if (ACPI_FAILURE(status)) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "couldn't infer D-state - %s\n",
+ AcpiFormatException(status)));
+ pc->ac_state = ACPI_STATE_UNKNOWN;
+ }
+ goto out;
+ }
+
+ result.Pointer = NULL;
+ result.Length = ACPI_ALLOCATE_BUFFER;
+ status = AcpiEvaluateObjectTyped(method_handle, NULL, NULL, &result, ACPI_TYPE_INTEGER);
+ if (ACPI_FAILURE(status) || result.Pointer == NULL) {
+ ACPI_DEBUG_PRINT((ACPI_DB_OBJECTS, "failed to get state with _PSC - %s\n",
+ AcpiFormatException(status)));
+ pc->ac_state = ACPI_STATE_UNKNOWN;
+ goto out;
+ }
+
+ object = (ACPI_OBJECT *)result.Pointer;
+ pc->ac_state = ACPI_STATE_D0 + object->Integer.Value;
+ status = AE_OK;
+
+out:
+ if (object != NULL)
+ AcpiOsFree(object);
+ *state = pc->ac_state;
+ return_ACPI_STATUS (status);
+}
+
+/*
+ * Get a power consumer's D-state.
+ */
+ACPI_STATUS
+acpi_pwr_get_state(ACPI_HANDLE consumer, int *state)
+{
+ ACPI_STATUS res;
+
+ ACPI_SERIAL_BEGIN(powerres);
+ res = acpi_pwr_get_state_locked(consumer, state);
+ ACPI_SERIAL_END(powerres);
+ return (res);
+}
+
+/*
+ * Set a power consumer to a particular D-state.
*/
ACPI_STATUS
acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state)
@@ -300,6 +533,7 @@ acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state)
ACPI_OBJECT *reslist_object;
ACPI_STATUS status;
char *method_name, *reslist_name = NULL;
+ int new_state;
ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
@@ -501,8 +735,28 @@ acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state)
}
}
- /* Transition was successful */
- pc->ac_state = state;
+ /*
+ * Make sure the transition succeeded. If getting new state failed,
+ * just assume the new state is what we wanted. This was the behaviour
+ * before we were checking D-states.
+ */
+ if (ACPI_FAILURE(acpi_pwr_get_state_locked(consumer, &new_state))) {
+ printf("%s: failed to get new D-state\n", __func__);
+ pc->ac_state = state;
+ } else {
+ if (new_state != state)
+ printf("%s: new power state %s is not the one requested %s\n",
+ __func__, acpi_d_state_to_str(new_state),
+ acpi_d_state_to_str(state));
+ pc->ac_state = new_state;
+ }
+
+ /*
+ * We consider the transition successful even if the state we got doesn't
+ * reflect what we set it to. This is because we weren't previously
+ * checking the new state at all, so there might exist buggy platforms on
+ * which suspend would otherwise succeed if we failed here.
+ */
status = AE_OK;
out:
diff --git a/sys/dev/acpica/acpivar.h b/sys/dev/acpica/acpivar.h
index 6887f080311d..7495a010432b 100644
--- a/sys/dev/acpica/acpivar.h
+++ b/sys/dev/acpica/acpivar.h
@@ -490,6 +490,7 @@ EVENTHANDLER_DECLARE(acpi_video_event, acpi_event_handler_t);
/* Device power control. */
ACPI_STATUS acpi_pwr_wake_enable(ACPI_HANDLE consumer, int enable);
+ACPI_STATUS acpi_pwr_get_state(ACPI_HANDLE consumer, int *state);
ACPI_STATUS acpi_pwr_switch_consumer(ACPI_HANDLE consumer, int state);
acpi_pwr_for_sleep_t acpi_device_pwr_for_sleep;
int acpi_set_powerstate(device_t child, int state);
diff --git a/sys/dev/bnxt/bnxt_re/qplib_res.c b/sys/dev/bnxt/bnxt_re/qplib_res.c
index 69661c67708c..f527af031176 100644
--- a/sys/dev/bnxt/bnxt_re/qplib_res.c
+++ b/sys/dev/bnxt/bnxt_re/qplib_res.c
@@ -875,7 +875,7 @@ int bnxt_qplib_alloc_dpi(struct bnxt_qplib_res *res,
dpi->umdbr = umaddr;
switch (type) {
case BNXT_QPLIB_DPI_TYPE_KERNEL:
- /* priviledged dbr was already mapped just initialize it. */
+ /* privileged dbr was already mapped just initialize it. */
dpi->umdbr = dpit->ucreg.bar_base +
dpit->ucreg.offset + bit_num * PAGE_SIZE;
dpi->dbr = dpit->priv_db;
@@ -1150,7 +1150,7 @@ int bnxt_qplib_map_db_bar(struct bnxt_qplib_res *res)
}
ucreg->bar_reg = ioremap(ucreg->bar_base, ucreg->len);
if (!ucreg->bar_reg) {
- dev_err(&res->pdev->dev, "priviledged dpi map failed!\n");
+ dev_err(&res->pdev->dev, "privileged dpi map failed!\n");
return -ENOMEM;
}
diff --git a/sys/dev/e1000/e1000_phy.c b/sys/dev/e1000/e1000_phy.c
index c34897e3b31a..634f48171c3e 100644
--- a/sys/dev/e1000/e1000_phy.c
+++ b/sys/dev/e1000/e1000_phy.c
@@ -1707,10 +1707,9 @@ s32 e1000_setup_copper_link_generic(struct e1000_hw *hw)
* autonegotiation.
*/
ret_val = e1000_copper_link_autoneg(hw);
- if (ret_val && !hw->mac.forced_speed_duplex)
+ if (ret_val)
return ret_val;
- }
- if (!hw->mac.autoneg || (ret_val && hw->mac.forced_speed_duplex)) {
+ } else {
/* PHY will be set to 10H, 10F, 100H or 100F
* depending on user settings.
*/
diff --git a/sys/dev/e1000/if_em.c b/sys/dev/e1000/if_em.c
index f0ef6051fab1..9c5ae2806f75 100644
--- a/sys/dev/e1000/if_em.c
+++ b/sys/dev/e1000/if_em.c
@@ -2000,18 +2000,7 @@ em_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
(sc->hw.phy.media_type == e1000_media_type_internal_serdes)) {
if (sc->hw.mac.type == e1000_82545)
fiber_type = IFM_1000_LX;
- switch (sc->link_speed) {
- case 10:
- ifmr->ifm_active |= IFM_10_FL;
- break;
- case 100:
- ifmr->ifm_active |= IFM_100_FX;
- break;
- case 1000:
- default:
- ifmr->ifm_active |= fiber_type | IFM_FDX;
- break;
- }
+ ifmr->ifm_active |= fiber_type | IFM_FDX;
} else {
switch (sc->link_speed) {
case 10:
@@ -2024,12 +2013,11 @@ em_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr)
ifmr->ifm_active |= IFM_1000_T;
break;
}
+ if (sc->link_duplex == FULL_DUPLEX)
+ ifmr->ifm_active |= IFM_FDX;
+ else
+ ifmr->ifm_active |= IFM_HDX;
}
-
- if (sc->link_duplex == FULL_DUPLEX)
- ifmr->ifm_active |= IFM_FDX;
- else
- ifmr->ifm_active |= IFM_HDX;
}
/*********************************************************************
@@ -2063,26 +2051,6 @@ em_if_media_change(if_ctx_t ctx)
sc->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
break;
case IFM_100_TX:
- sc->hw.mac.autoneg = DO_AUTO_NEG;
- if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
- sc->hw.phy.autoneg_advertised = ADVERTISE_100_FULL;
- sc->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL;
- } else {
- sc->hw.phy.autoneg_advertised = ADVERTISE_100_HALF;
- sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
- }
- break;
- case IFM_10_T:
- sc->hw.mac.autoneg = DO_AUTO_NEG;
- if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) {
- sc->hw.phy.autoneg_advertised = ADVERTISE_10_FULL;
- sc->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL;
- } else {
- sc->hw.phy.autoneg_advertised = ADVERTISE_10_HALF;
- sc->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF;
- }
- break;
- case IFM_100_FX:
sc->hw.mac.autoneg = false;
sc->hw.phy.autoneg_advertised = 0;
if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
@@ -2090,7 +2058,7 @@ em_if_media_change(if_ctx_t ctx)
else
sc->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF;
break;
- case IFM_10_FL:
+ case IFM_10_T:
sc->hw.mac.autoneg = false;
sc->hw.phy.autoneg_advertised = 0;
if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX)
diff --git a/sys/dev/fdt/fdt_common.c b/sys/dev/fdt/fdt_common.c
index 1fea4c6f1392..f43551c6310e 100644
--- a/sys/dev/fdt/fdt_common.c
+++ b/sys/dev/fdt/fdt_common.c
@@ -62,8 +62,6 @@
SYSCTL_NODE(_hw, OID_AUTO, fdt, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"Flattened Device Tree");
-struct fdt_ic_list fdt_ic_list_head = SLIST_HEAD_INITIALIZER(fdt_ic_list_head);
-
static int
fdt_get_range_by_busaddr(phandle_t node, u_long addr, u_long *base,
u_long *size)
diff --git a/sys/dev/fdt/fdt_common.h b/sys/dev/fdt/fdt_common.h
index ece54290a6ad..f597233f9771 100644
--- a/sys/dev/fdt/fdt_common.h
+++ b/sys/dev/fdt/fdt_common.h
@@ -59,13 +59,6 @@ struct fdt_fixup_entry {
extern struct fdt_fixup_entry fdt_fixup_table[];
#endif
-extern SLIST_HEAD(fdt_ic_list, fdt_ic) fdt_ic_list_head;
-struct fdt_ic {
- SLIST_ENTRY(fdt_ic) fdt_ics;
- ihandle_t iph;
- device_t dev;
-};
-
#if defined(FDT_DTB_STATIC)
extern u_char fdt_static_dtb;
#endif
diff --git a/sys/dev/gpio/acpi_gpiobus.c b/sys/dev/gpio/acpi_gpiobus.c
index 170f23615416..0d2455cab399 100644
--- a/sys/dev/gpio/acpi_gpiobus.c
+++ b/sys/dev/gpio/acpi_gpiobus.c
@@ -37,6 +37,7 @@
#include <dev/gpio/gpiobusvar.h>
#include <dev/gpio/acpi_gpiobusvar.h>
#include <dev/gpio/gpiobus_internal.h>
+#include <sys/sbuf.h>
#include "gpiobus_if.h"
@@ -52,12 +53,11 @@ struct acpi_gpiobus_ctx {
struct acpi_gpiobus_ivar
{
- struct gpiobus_ivar gpiobus; /* Must come first */
- ACPI_HANDLE dev_handle; /* ACPI handle for bus */
- uint32_t flags;
+ struct gpiobus_ivar gpiobus;
+ ACPI_HANDLE handle;
};
-static uint32_t
+uint32_t
acpi_gpiobus_convflags(ACPI_RESOURCE_GPIO *gpio_res)
{
uint32_t flags = 0;
@@ -150,70 +150,24 @@ acpi_gpiobus_enumerate_res(ACPI_RESOURCE *res, void *context)
return (AE_OK);
}
-static struct acpi_gpiobus_ivar *
-acpi_gpiobus_setup_devinfo(device_t bus, device_t child,
- ACPI_RESOURCE_GPIO *gpio_res)
-{
- struct acpi_gpiobus_ivar *devi;
-
- devi = malloc(sizeof(*devi), M_DEVBUF, M_NOWAIT | M_ZERO);
- if (devi == NULL)
- return (NULL);
- resource_list_init(&devi->gpiobus.rl);
-
- devi->flags = acpi_gpiobus_convflags(gpio_res);
- if (acpi_quirks & ACPI_Q_AEI_NOPULL)
- devi->flags &= ~GPIO_PIN_PULLUP;
-
- devi->gpiobus.npins = 1;
- if (gpiobus_alloc_ivars(&devi->gpiobus) != 0) {
- free(devi, M_DEVBUF);
- return (NULL);
- }
-
- for (int i = 0; i < devi->gpiobus.npins; i++)
- devi->gpiobus.pins[i] = gpio_res->PinTable[i];
-
- return (devi);
-}
-
static ACPI_STATUS
acpi_gpiobus_enumerate_aei(ACPI_RESOURCE *res, void *context)
{
ACPI_RESOURCE_GPIO *gpio_res = &res->Data.Gpio;
- struct acpi_gpiobus_ctx *ctx = context;
- device_t bus = ctx->sc->sc_busdev;
- device_t child;
- struct acpi_gpiobus_ivar *devi;
+ uint32_t *npins = context, *pins = npins + 1;
- /* Check that we have a GpioInt object. */
+ /*
+ * Check that we have a GpioInt object.
+ * Note that according to the spec this
+ * should always be the case.
+ */
if (res->Type != ACPI_RESOURCE_TYPE_GPIO)
return (AE_OK);
if (gpio_res->ConnectionType != ACPI_RESOURCE_GPIO_TYPE_INT)
return (AE_OK);
- /* Add a child. */
- child = device_add_child_ordered(bus, 0, "gpio_aei", DEVICE_UNIT_ANY);
- if (child == NULL)
- return (AE_OK);
- devi = acpi_gpiobus_setup_devinfo(bus, child, gpio_res);
- if (devi == NULL) {
- device_delete_child(bus, child);
- return (AE_OK);
- }
- device_set_ivars(child, devi);
-
- for (int i = 0; i < devi->gpiobus.npins; i++) {
- if (GPIOBUS_PIN_SETFLAGS(bus, child, 0, devi->flags &
- ~GPIO_INTR_MASK)) {
- device_delete_child(bus, child);
- return (AE_OK);
- }
- }
-
- /* Pass ACPI information to children. */
- devi->dev_handle = ctx->dev_handle;
-
+ for (int i = 0; i < gpio_res->PinTableLength; i++)
+ pins[(*npins)++] = gpio_res->PinTable[i];
return (AE_OK);
}
@@ -296,6 +250,63 @@ err:
return (AE_BAD_PARAMETER);
}
+static void
+acpi_gpiobus_attach_aei(struct acpi_gpiobus_softc *sc, ACPI_HANDLE handle)
+{
+ struct acpi_gpiobus_ivar *devi;
+ ACPI_HANDLE aei_handle;
+ device_t child;
+ uint32_t *pins;
+ ACPI_STATUS status;
+ int err;
+
+ status = AcpiGetHandle(handle, "_AEI", &aei_handle);
+ if (ACPI_FAILURE(status))
+ return;
+
+ /* pins[0] specifies the length of the array. */
+ pins = mallocarray(sc->super_sc.sc_npins + 1,
+ sizeof(uint32_t), M_DEVBUF, M_WAITOK);
+ pins[0] = 0;
+
+ status = AcpiWalkResources(handle, "_AEI",
+ acpi_gpiobus_enumerate_aei, pins);
+ if (ACPI_FAILURE(status)) {
+ device_printf(sc->super_sc.sc_busdev,
+ "Failed to enumerate AEI resources\n");
+ free(pins, M_DEVBUF);
+ return;
+ }
+
+ child = BUS_ADD_CHILD(sc->super_sc.sc_busdev, 0, "gpio_aei",
+ DEVICE_UNIT_ANY);
+ if (child == NULL) {
+ device_printf(sc->super_sc.sc_busdev,
+ "Failed to add gpio_aei child\n");
+ free(pins, M_DEVBUF);
+ return;
+ }
+
+ devi = device_get_ivars(child);
+ devi->gpiobus.npins = pins[0];
+ devi->handle = aei_handle;
+
+ err = gpiobus_alloc_ivars(&devi->gpiobus);
+ if (err != 0) {
+ device_printf(sc->super_sc.sc_busdev,
+ "Failed to allocate gpio_aei ivars\n");
+ device_delete_child(sc->super_sc.sc_busdev, child);
+ free(pins, M_DEVBUF);
+ return;
+ }
+
+ for (int i = 0; i < pins[0]; i++)
+ devi->gpiobus.pins[i] = pins[i + 1];
+ free(pins, M_DEVBUF);
+
+ bus_attach_children(sc->super_sc.sc_busdev);
+}
+
static int
acpi_gpiobus_probe(device_t dev)
{
@@ -353,13 +364,8 @@ acpi_gpiobus_attach(device_t dev)
if (ACPI_FAILURE(status))
device_printf(dev, "Failed to enumerate GPIO resources\n");
- /* Look for AEI children */
- status = AcpiWalkResources(handle, "_AEI", acpi_gpiobus_enumerate_aei,
- &ctx);
-
- if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
- device_printf(dev, "Failed to enumerate AEI resources\n");
-
+ /* Look for AEI child */
+ acpi_gpiobus_attach_aei(sc, handle);
return (0);
}
@@ -390,10 +396,7 @@ acpi_gpiobus_read_ivar(device_t dev, device_t child, int which,
switch (which) {
case ACPI_GPIOBUS_IVAR_HANDLE:
- *result = (uintptr_t)devi->dev_handle;
- break;
- case ACPI_GPIOBUS_IVAR_FLAGS:
- *result = (uintptr_t)devi->flags;
+ *result = (uintptr_t)devi->handle;
break;
default:
return (gpiobus_read_ivar(dev, child, which, result));
@@ -402,6 +405,28 @@ acpi_gpiobus_read_ivar(device_t dev, device_t child, int which,
return (0);
}
+static device_t
+acpi_gpiobus_add_child(device_t dev, u_int order, const char *name, int unit)
+{
+ return (gpiobus_add_child_common(dev, order, name, unit,
+ sizeof(struct acpi_gpiobus_ivar)));
+}
+
+static int
+acpi_gpiobus_child_location(device_t bus, device_t child, struct sbuf *sb)
+{
+ struct acpi_gpiobus_ivar *devi;
+ int err;
+
+ err = gpiobus_child_location(bus, child, sb);
+ if (err != 0)
+ return (err);
+
+ devi = device_get_ivars(child);
+ sbuf_printf(sb, " handle=%s", acpi_name(devi->handle));
+ return (0);
+}
+
static device_method_t acpi_gpiobus_methods[] = {
/* Device interface */
DEVMETHOD(device_probe, acpi_gpiobus_probe),
@@ -410,6 +435,8 @@ static device_method_t acpi_gpiobus_methods[] = {
/* Bus interface */
DEVMETHOD(bus_read_ivar, acpi_gpiobus_read_ivar),
+ DEVMETHOD(bus_add_child, acpi_gpiobus_add_child),
+ DEVMETHOD(bus_child_location, acpi_gpiobus_child_location),
DEVMETHOD_END
};
diff --git a/sys/dev/gpio/acpi_gpiobusvar.h b/sys/dev/gpio/acpi_gpiobusvar.h
index f8d502eab9d1..288e8bd0f2af 100644
--- a/sys/dev/gpio/acpi_gpiobusvar.h
+++ b/sys/dev/gpio/acpi_gpiobusvar.h
@@ -33,16 +33,16 @@
#include <contrib/dev/acpica/include/acpi.h>
enum acpi_gpiobus_ivars {
- ACPI_GPIOBUS_IVAR_HANDLE = 10600,
- ACPI_GPIOBUS_IVAR_FLAGS,
+ ACPI_GPIOBUS_IVAR_HANDLE = 10600
};
#define ACPI_GPIOBUS_ACCESSOR(var, ivar, type) \
__BUS_ACCESSOR(acpi_gpiobus, var, ACPI_GPIOBUS, ivar, type)
ACPI_GPIOBUS_ACCESSOR(handle, HANDLE, ACPI_HANDLE)
-ACPI_GPIOBUS_ACCESSOR(flags, FLAGS, uint32_t)
#undef ACPI_GPIOBUS_ACCESSOR
+uint32_t acpi_gpiobus_convflags(ACPI_RESOURCE_GPIO *);
+
#endif /* __ACPI_GPIOBUS_H__ */
diff --git a/sys/dev/gpio/gpioaei.c b/sys/dev/gpio/gpioaei.c
index ecae8ccaf2fa..7b97277aaf61 100644
--- a/sys/dev/gpio/gpioaei.c
+++ b/sys/dev/gpio/gpioaei.c
@@ -45,13 +45,21 @@ enum gpio_aei_type {
ACPI_AEI_TYPE_EVT
};
-struct gpio_aei_softc {
- ACPI_HANDLE handle;
- enum gpio_aei_type type;
- int pin;
+struct gpio_aei_ctx {
+ SLIST_ENTRY(gpio_aei_ctx) next;
struct resource * intr_res;
- int intr_rid;
void * intr_cookie;
+ ACPI_HANDLE handle;
+ gpio_pin_t gpio;
+ uint32_t pin;
+ int intr_rid;
+ enum gpio_aei_type type;
+};
+
+struct gpio_aei_softc {
+ SLIST_HEAD(, gpio_aei_ctx) aei_ctx;
+ ACPI_HANDLE dev_handle;
+ device_t dev;
};
static int
@@ -65,69 +73,157 @@ gpio_aei_probe(device_t dev)
static void
gpio_aei_intr(void * arg)
{
- struct gpio_aei_softc * sc = arg;
+ struct gpio_aei_ctx * ctx = arg;
/* Ask ACPI to run the appropriate _EVT, _Exx or _Lxx method. */
- if (sc->type == ACPI_AEI_TYPE_EVT)
- acpi_SetInteger(sc->handle, NULL, sc->pin);
+ if (ctx->type == ACPI_AEI_TYPE_EVT)
+ acpi_SetInteger(ctx->handle, NULL, ctx->pin);
else
- AcpiEvaluateObject(sc->handle, NULL, NULL, NULL);
+ AcpiEvaluateObject(ctx->handle, NULL, NULL, NULL);
+}
+
+static ACPI_STATUS
+gpio_aei_enumerate(ACPI_RESOURCE * res, void * context)
+{
+ ACPI_RESOURCE_GPIO * gpio_res = &res->Data.Gpio;
+ struct gpio_aei_softc * sc = context;
+ uint32_t flags, maxpin;
+ device_t busdev;
+ int err;
+
+ /*
+ * Check that we have a GpioInt object.
+ * Note that according to the spec this
+ * should always be the case.
+ */
+ if (res->Type != ACPI_RESOURCE_TYPE_GPIO)
+ return (AE_OK);
+ if (gpio_res->ConnectionType != ACPI_RESOURCE_GPIO_TYPE_INT)
+ return (AE_OK);
+
+ flags = acpi_gpiobus_convflags(gpio_res);
+ if (acpi_quirks & ACPI_Q_AEI_NOPULL)
+ flags &= ~GPIO_PIN_PULLUP;
+
+ err = GPIO_PIN_MAX(acpi_get_device(sc->dev_handle), &maxpin);
+ if (err != 0)
+ return (AE_ERROR);
+
+ busdev = GPIO_GET_BUS(acpi_get_device(sc->dev_handle));
+ for (int i = 0; i < gpio_res->PinTableLength; i++) {
+ struct gpio_aei_ctx * ctx;
+ uint32_t pin = gpio_res->PinTable[i];
+
+ if (__predict_false(pin > maxpin)) {
+ device_printf(sc->dev,
+ "Invalid pin 0x%x, max: 0x%x (bad ACPI tables?)\n",
+ pin, maxpin);
+ continue;
+ }
+
+ ctx = malloc(sizeof(struct gpio_aei_ctx), M_DEVBUF, M_WAITOK);
+ ctx->type = ACPI_AEI_TYPE_UNKNOWN;
+ if (pin <= 255) {
+ char objname[5]; /* "_EXX" or "_LXX" */
+ sprintf(objname, "_%c%02X",
+ (flags & GPIO_INTR_EDGE_MASK) ? 'E' : 'L', pin);
+ if (ACPI_SUCCESS(AcpiGetHandle(sc->dev_handle, objname,
+ &ctx->handle)))
+ ctx->type = ACPI_AEI_TYPE_ELX;
+ }
+
+ if (ctx->type == ACPI_AEI_TYPE_UNKNOWN) {
+ if (ACPI_SUCCESS(AcpiGetHandle(sc->dev_handle, "_EVT",
+ &ctx->handle)))
+ ctx->type = ACPI_AEI_TYPE_EVT;
+ else {
+ device_printf(sc->dev,
+ "AEI Device type is unknown for pin 0x%x\n",
+ pin);
+
+ free(ctx, M_DEVBUF);
+ continue;
+ }
+ }
+
+ err = gpio_pin_get_by_bus_pinnum(busdev, pin, &ctx->gpio);
+ if (err != 0) {
+ device_printf(sc->dev, "Cannot acquire pin 0x%x\n",
+ pin);
+
+ free(ctx, M_DEVBUF);
+ continue;
+ }
+
+ err = gpio_pin_setflags(ctx->gpio, flags & ~GPIO_INTR_MASK);
+ if (err != 0) {
+ device_printf(sc->dev,
+ "Cannot set pin flags for pin 0x%x\n", pin);
+
+ gpio_pin_release(ctx->gpio);
+ free(ctx, M_DEVBUF);
+ continue;
+ }
+
+ ctx->intr_rid = 0;
+ ctx->intr_res = gpio_alloc_intr_resource(sc->dev,
+ &ctx->intr_rid, RF_ACTIVE, ctx->gpio,
+ flags & GPIO_INTR_MASK);
+ if (ctx->intr_res == NULL) {
+ device_printf(sc->dev,
+ "Cannot allocate an IRQ for pin 0x%x\n", pin);
+
+ gpio_pin_release(ctx->gpio);
+ free(ctx, M_DEVBUF);
+ continue;
+ }
+
+ err = bus_setup_intr(sc->dev, ctx->intr_res, INTR_TYPE_MISC |
+ INTR_MPSAFE | INTR_EXCL | INTR_SLEEPABLE, NULL,
+ gpio_aei_intr, ctx, &ctx->intr_cookie);
+ if (err != 0) {
+ device_printf(sc->dev,
+ "Cannot set up an IRQ for pin 0x%x\n", pin);
+
+ bus_release_resource(sc->dev, ctx->intr_res);
+ gpio_pin_release(ctx->gpio);
+ free(ctx, M_DEVBUF);
+ continue;
+ }
+
+ ctx->pin = pin;
+ SLIST_INSERT_HEAD(&sc->aei_ctx, ctx, next);
+ }
+
+ return (AE_OK);
}
static int
gpio_aei_attach(device_t dev)
{
struct gpio_aei_softc * sc = device_get_softc(dev);
- gpio_pin_t pin;
- uint32_t flags;
ACPI_HANDLE handle;
- int err;
+ ACPI_STATUS status;
/* This is us. */
device_set_desc(dev, "ACPI Event Information Device");
- /* Store parameters needed by gpio_aei_intr. */
handle = acpi_gpiobus_get_handle(dev);
- if (gpio_pin_get_by_child_index(dev, 0, &pin) != 0) {
- device_printf(dev, "Unable to get the input pin\n");
+ status = AcpiGetParent(handle, &sc->dev_handle);
+ if (ACPI_FAILURE(status)) {
+ device_printf(dev, "Cannot get parent of %s\n",
+ acpi_name(handle));
return (ENXIO);
}
- sc->type = ACPI_AEI_TYPE_UNKNOWN;
- sc->pin = pin->pin;
-
- flags = acpi_gpiobus_get_flags(dev);
- if (pin->pin <= 255) {
- char objname[5]; /* "_EXX" or "_LXX" */
- sprintf(objname, "_%c%02X",
- (flags & GPIO_INTR_EDGE_MASK) ? 'E' : 'L', pin->pin);
- if (ACPI_SUCCESS(AcpiGetHandle(handle, objname, &sc->handle)))
- sc->type = ACPI_AEI_TYPE_ELX;
- }
- if (sc->type == ACPI_AEI_TYPE_UNKNOWN) {
- if (ACPI_SUCCESS(AcpiGetHandle(handle, "_EVT", &sc->handle)))
- sc->type = ACPI_AEI_TYPE_EVT;
- }
-
- if (sc->type == ACPI_AEI_TYPE_UNKNOWN) {
- device_printf(dev, "ACPI Event Information Device type is unknown");
- return (ENOTSUP);
- }
+ SLIST_INIT(&sc->aei_ctx);
+ sc->dev = dev;
- /* Set up the interrupt. */
- if ((sc->intr_res = gpio_alloc_intr_resource(dev, &sc->intr_rid,
- RF_ACTIVE, pin, flags & GPIO_INTR_MASK)) == NULL) {
- device_printf(dev, "Cannot allocate an IRQ\n");
- return (ENOTSUP);
- }
- err = bus_setup_intr(dev, sc->intr_res, INTR_TYPE_MISC | INTR_MPSAFE |
- INTR_EXCL | INTR_SLEEPABLE, NULL, gpio_aei_intr, sc,
- &sc->intr_cookie);
- if (err != 0) {
- device_printf(dev, "Cannot set up IRQ\n");
- bus_release_resource(dev, SYS_RES_IRQ, sc->intr_rid,
- sc->intr_res);
- return (err);
+ status = AcpiWalkResources(sc->dev_handle, "_AEI",
+ gpio_aei_enumerate, sc);
+ if (ACPI_FAILURE(status)) {
+ device_printf(dev, "Failed to enumerate AEI resources\n");
+ return (ENXIO);
}
return (0);
@@ -137,9 +233,15 @@ static int
gpio_aei_detach(device_t dev)
{
struct gpio_aei_softc * sc = device_get_softc(dev);
+ struct gpio_aei_ctx * ctx, * tctx;
+
+ SLIST_FOREACH_SAFE(ctx, &sc->aei_ctx, next, tctx) {
+ bus_teardown_intr(dev, ctx->intr_res, ctx->intr_cookie);
+ bus_release_resource(dev, ctx->intr_res);
+ gpio_pin_release(ctx->gpio);
+ free(ctx, M_DEVBUF);
+ }
- bus_teardown_intr(dev, sc->intr_res, sc->intr_cookie);
- bus_release_resource(dev, SYS_RES_IRQ, sc->intr_rid, sc->intr_res);
return (0);
}
diff --git a/sys/dev/gpio/gpiobus.c b/sys/dev/gpio/gpiobus.c
index c25c41f43042..5f1f6532a79b 100644
--- a/sys/dev/gpio/gpiobus.c
+++ b/sys/dev/gpio/gpiobus.c
@@ -57,7 +57,6 @@ static int gpiobus_suspend(device_t);
static int gpiobus_resume(device_t);
static void gpiobus_probe_nomatch(device_t, device_t);
static int gpiobus_print_child(device_t, device_t);
-static int gpiobus_child_location(device_t, device_t, struct sbuf *);
static device_t gpiobus_add_child(device_t, u_int, const char *, int);
static void gpiobus_hinted_child(device_t, const char *, int);
@@ -662,7 +661,7 @@ gpiobus_print_child(device_t dev, device_t child)
return (retval);
}
-static int
+int
gpiobus_child_location(device_t bus, device_t child, struct sbuf *sb)
{
struct gpiobus_ivar *devi;
@@ -674,16 +673,19 @@ gpiobus_child_location(device_t bus, device_t child, struct sbuf *sb)
return (0);
}
-static device_t
-gpiobus_add_child(device_t dev, u_int order, const char *name, int unit)
+device_t
+gpiobus_add_child_common(device_t dev, u_int order, const char *name, int unit,
+ size_t ivars_size)
{
device_t child;
struct gpiobus_ivar *devi;
+ KASSERT(ivars_size >= sizeof(struct gpiobus_ivar),
+ ("child ivars must include gpiobus_ivar as their first member"));
child = device_add_child_ordered(dev, order, name, unit);
if (child == NULL)
return (child);
- devi = malloc(sizeof(struct gpiobus_ivar), M_DEVBUF, M_NOWAIT | M_ZERO);
+ devi = malloc(ivars_size, M_DEVBUF, M_NOWAIT | M_ZERO);
if (devi == NULL) {
device_delete_child(dev, child);
return (NULL);
@@ -694,6 +696,13 @@ gpiobus_add_child(device_t dev, u_int order, const char *name, int unit)
return (child);
}
+static device_t
+gpiobus_add_child(device_t dev, u_int order, const char *name, int unit)
+{
+ return (gpiobus_add_child_common(dev, order, name, unit,
+ sizeof(struct gpiobus_ivar)));
+}
+
static void
gpiobus_child_deleted(device_t dev, device_t child)
{
diff --git a/sys/dev/gpio/gpiobus_internal.h b/sys/dev/gpio/gpiobus_internal.h
index de3f57663132..c198e5f79989 100644
--- a/sys/dev/gpio/gpiobus_internal.h
+++ b/sys/dev/gpio/gpiobus_internal.h
@@ -42,6 +42,8 @@ void gpiobus_free_ivars(struct gpiobus_ivar *);
int gpiobus_read_ivar(device_t, device_t, int, uintptr_t *);
int gpiobus_acquire_pin(device_t, uint32_t);
void gpiobus_release_pin(device_t, uint32_t);
+int gpiobus_child_location(device_t, device_t, struct sbuf *);
+device_t gpiobus_add_child_common(device_t, u_int, const char *, int, size_t);
extern driver_t gpiobus_driver;
#endif
diff --git a/sys/dev/gpio/ofw_gpiobus.c b/sys/dev/gpio/ofw_gpiobus.c
index fc5fb03d6824..b12b78fac18c 100644
--- a/sys/dev/gpio/ofw_gpiobus.c
+++ b/sys/dev/gpio/ofw_gpiobus.c
@@ -451,28 +451,22 @@ ofw_gpiobus_add_child(device_t dev, u_int order, const char *name, int unit)
device_t child;
struct ofw_gpiobus_devinfo *devi;
- child = device_add_child_ordered(dev, order, name, unit);
+ child = gpiobus_add_child_common(dev, order, name, unit,
+ sizeof(struct ofw_gpiobus_devinfo));
if (child == NULL)
- return (child);
- devi = malloc(sizeof(struct ofw_gpiobus_devinfo), M_DEVBUF,
- M_NOWAIT | M_ZERO);
- if (devi == NULL) {
- device_delete_child(dev, child);
- return (0);
- }
+ return (NULL);
/*
* NULL all the OFW-related parts of the ivars for non-OFW
* children.
*/
+ devi = device_get_ivars(child);
devi->opd_obdinfo.obd_node = -1;
devi->opd_obdinfo.obd_name = NULL;
devi->opd_obdinfo.obd_compat = NULL;
devi->opd_obdinfo.obd_type = NULL;
devi->opd_obdinfo.obd_model = NULL;
- device_set_ivars(child, devi);
-
return (child);
}
diff --git a/sys/dev/hid/hidbus.c b/sys/dev/hid/hidbus.c
index 96d36c8d191d..683449fca49c 100644
--- a/sys/dev/hid/hidbus.c
+++ b/sys/dev/hid/hidbus.c
@@ -65,7 +65,7 @@ struct hidbus_ivars {
struct mtx *mtx; /* child intr mtx */
hid_intr_t *intr_handler; /* executed under mtx*/
void *intr_ctx;
- unsigned int refcnt; /* protected by mtx */
+ bool active; /* protected by mtx */
struct epoch_context epoch_ctx;
CK_STAILQ_ENTRY(hidbus_ivars) link;
};
@@ -398,7 +398,7 @@ hidbus_child_detached(device_t bus, device_t child)
struct hidbus_softc *sc = device_get_softc(bus);
struct hidbus_ivars *tlc = device_get_ivars(child);
- KASSERT(tlc->refcnt == 0, ("Child device is running"));
+ KASSERT(!tlc->active, ("Child device is running"));
tlc->mtx = &sc->mtx;
tlc->intr_handler = NULL;
tlc->flags &= ~HIDBUS_FLAG_CAN_POLL;
@@ -423,7 +423,7 @@ hidbus_child_deleted(device_t bus, device_t child)
struct hidbus_ivars *tlc = device_get_ivars(child);
sx_xlock(&sc->sx);
- KASSERT(tlc->refcnt == 0, ("Child device is running"));
+ KASSERT(!tlc->active, ("Child device is running"));
CK_STAILQ_REMOVE(&sc->tlcs, tlc, hidbus_ivars, link);
sx_unlock(&sc->sx);
epoch_call(INPUT_EPOCH, hidbus_ivar_dtor, &tlc->epoch_ctx);
@@ -572,7 +572,7 @@ hidbus_intr(void *context, void *buf, hid_size_t len)
if (!HID_IN_POLLING_MODE())
epoch_enter_preempt(INPUT_EPOCH, &et);
CK_STAILQ_FOREACH(tlc, &sc->tlcs, link) {
- if (tlc->refcnt == 0 || tlc->intr_handler == NULL)
+ if (!tlc->active || tlc->intr_handler == NULL)
continue;
if (HID_IN_POLLING_MODE()) {
if ((tlc->flags & HIDBUS_FLAG_CAN_POLL) != 0)
@@ -602,21 +602,14 @@ hidbus_intr_start(device_t bus, device_t child)
MPASS(bus == device_get_parent(child));
struct hidbus_softc *sc = device_get_softc(bus);
struct hidbus_ivars *ivar = device_get_ivars(child);
- struct hidbus_ivars *tlc;
- bool refcnted = false;
int error;
if (sx_xlock_sig(&sc->sx) != 0)
return (EINTR);
- CK_STAILQ_FOREACH(tlc, &sc->tlcs, link) {
- refcnted |= (tlc->refcnt != 0);
- if (tlc == ivar) {
- mtx_lock(tlc->mtx);
- ++tlc->refcnt;
- mtx_unlock(tlc->mtx);
- }
- }
- error = refcnted ? 0 : hid_intr_start(bus);
+ mtx_lock(ivar->mtx);
+ ivar->active = true;
+ mtx_unlock(ivar->mtx);
+ error = hid_intr_start(bus);
sx_unlock(&sc->sx);
return (error);
@@ -629,21 +622,17 @@ hidbus_intr_stop(device_t bus, device_t child)
struct hidbus_softc *sc = device_get_softc(bus);
struct hidbus_ivars *ivar = device_get_ivars(child);
struct hidbus_ivars *tlc;
- bool refcnted = false;
+ bool active = false;
int error;
if (sx_xlock_sig(&sc->sx) != 0)
return (EINTR);
- CK_STAILQ_FOREACH(tlc, &sc->tlcs, link) {
- if (tlc == ivar) {
- mtx_lock(tlc->mtx);
- MPASS(tlc->refcnt != 0);
- --tlc->refcnt;
- mtx_unlock(tlc->mtx);
- }
- refcnted |= (tlc->refcnt != 0);
- }
- error = refcnted ? 0 : hid_intr_stop(bus);
+ mtx_lock(ivar->mtx);
+ ivar->active = false;
+ mtx_unlock(ivar->mtx);
+ CK_STAILQ_FOREACH(tlc, &sc->tlcs, link)
+ active |= tlc->active;
+ error = active ? 0 : hid_intr_stop(bus);
sx_unlock(&sc->sx);
return (error);
diff --git a/sys/dev/hid/hidquirk.h b/sys/dev/hid/hidquirk.h
index 4f8b8acbe201..f6fa9f88c6c9 100644
--- a/sys/dev/hid/hidquirk.h
+++ b/sys/dev/hid/hidquirk.h
@@ -50,6 +50,7 @@
HQ(IS_XBOX360GP), /* device is XBox 360 GamePad */ \
HQ(NOWRITE), /* device does not support writes */ \
HQ(IICHID_SAMPLING), /* IIC backend runs in sampling mode */ \
+ HQ(NO_READAHEAD), /* Disable interrupt after one report */\
\
/* Various quirks */ \
HQ(HID_IGNORE), /* device should be ignored by hid class */ \
diff --git a/sys/dev/hid/hidraw.c b/sys/dev/hid/hidraw.c
index 06f70070f61b..4855843cd265 100644
--- a/sys/dev/hid/hidraw.c
+++ b/sys/dev/hid/hidraw.c
@@ -85,6 +85,12 @@ SYSCTL_INT(_hw_hid_hidraw, OID_AUTO, debug, CTLFLAG_RWTUN,
free((buf), M_DEVBUF); \
}
+#ifdef HIDRAW_MAKE_UHID_ALIAS
+#define HIDRAW_NAME "uhid"
+#else
+#define HIDRAW_NAME "hidraw"
+#endif
+
struct hidraw_softc {
device_t sc_dev; /* base device */
@@ -183,8 +189,8 @@ hidraw_identify(driver_t *driver, device_t parent)
{
device_t child;
- if (device_find_child(parent, "hidraw", DEVICE_UNIT_ANY) == NULL) {
- child = BUS_ADD_CHILD(parent, 0, "hidraw",
+ if (device_find_child(parent, HIDRAW_NAME, DEVICE_UNIT_ANY) == NULL) {
+ child = BUS_ADD_CHILD(parent, 0, HIDRAW_NAME,
device_get_unit(parent));
if (child != NULL)
hidbus_set_index(child, HIDRAW_INDEX);
@@ -1050,7 +1056,7 @@ static device_method_t hidraw_methods[] = {
};
static driver_t hidraw_driver = {
- "hidraw",
+ HIDRAW_NAME,
hidraw_methods,
sizeof(struct hidraw_softc)
};
diff --git a/sys/dev/hid/hkbd.c b/sys/dev/hid/hkbd.c
index 5eff7557bc42..6255c42d3b62 100644
--- a/sys/dev/hid/hkbd.c
+++ b/sys/dev/hid/hkbd.c
@@ -95,14 +95,16 @@
#ifdef HID_DEBUG
static int hkbd_debug = 0;
+#endif
static int hkbd_no_leds = 0;
static SYSCTL_NODE(_hw_hid, OID_AUTO, hkbd, CTLFLAG_RW, 0, "USB keyboard");
+#ifdef HID_DEBUG
SYSCTL_INT(_hw_hid_hkbd, OID_AUTO, debug, CTLFLAG_RWTUN,
&hkbd_debug, 0, "Debug level");
+#endif
SYSCTL_INT(_hw_hid_hkbd, OID_AUTO, no_leds, CTLFLAG_RWTUN,
&hkbd_no_leds, 0, "Disables setting of keyboard leds");
-#endif
#define INPUT_EPOCH global_epoch_preempt
@@ -1596,8 +1598,16 @@ hkbd_ioctl_locked(keyboard_t *kbd, u_long cmd, caddr_t arg)
sc->sc_state &= ~LOCK_MASK;
sc->sc_state |= *(int *)arg;
- /* set LEDs and quit */
- return (hkbd_ioctl_locked(kbd, KDSETLED, arg));
+ /*
+ * Attempt to set the keyboard LEDs; ignore the return value
+ * intentionally. Note: Some hypervisors/emulators (e.g., QEMU,
+ * Parallels—at least as of the time of writing) may fail when
+ * setting LEDs. This can prevent kbdmux from attaching the
+ * keyboard, which in turn may block the console from accessing
+ * it.
+ */
+ (void)hkbd_ioctl_locked(kbd, KDSETLED, arg);
+ return (0);
case KDSETREPEAT: /* set keyboard repeat rate (new
* interface) */
@@ -1766,10 +1776,8 @@ hkbd_set_leds(struct hkbd_softc *sc, uint8_t leds)
SYSCONS_LOCK_ASSERT();
DPRINTF("leds=0x%02x\n", leds);
-#ifdef HID_DEBUG
if (hkbd_no_leds)
return (0);
-#endif
memset(sc->sc_buffer, 0, HKBD_BUFFER_SIZE);
@@ -1820,6 +1828,7 @@ hkbd_set_leds(struct hkbd_softc *sc, uint8_t leds)
SYSCONS_UNLOCK();
error = hid_write(sc->sc_dev, buf, len);
SYSCONS_LOCK();
+ DPRINTF("error %d", error);
return (error);
}
diff --git a/sys/dev/hid/ietp.c b/sys/dev/hid/ietp.c
index 217585a7948b..73a5cb7414d4 100644
--- a/sys/dev/hid/ietp.c
+++ b/sys/dev/hid/ietp.c
@@ -102,6 +102,7 @@ struct ietp_softc {
device_t dev;
struct evdev_dev *evdev;
+ bool open;
uint8_t report_id;
hid_size_t report_len;
@@ -217,13 +218,25 @@ static const struct evdev_methods ietp_evdev_methods = {
static int
ietp_ev_open(struct evdev_dev *evdev)
{
- return (hid_intr_start(evdev_get_softc(evdev)));
+ struct ietp_softc *sc = evdev_get_softc(evdev);
+ int error;
+
+ error = hid_intr_start(sc->dev);
+ if (error == 0)
+ sc->open = true;
+ return (error);
}
static int
ietp_ev_close(struct evdev_dev *evdev)
{
- return (hid_intr_stop(evdev_get_softc(evdev)));
+ struct ietp_softc *sc = evdev_get_softc(evdev);
+ int error;
+
+ error = hid_intr_stop(sc->dev);
+ if (error == 0)
+ sc->open = false;
+ return (error);
}
static int
@@ -275,7 +288,7 @@ ietp_attach(struct ietp_softc *sc)
evdev_set_id(sc->evdev, hw->idBus, hw->idVendor, hw->idProduct,
hw->idVersion);
evdev_set_serial(sc->evdev, hw->serial);
- evdev_set_methods(sc->evdev, sc->dev, &ietp_evdev_methods);
+ evdev_set_methods(sc->evdev, sc, &ietp_evdev_methods);
evdev_set_flag(sc->evdev, EVDEV_FLAG_MT_STCOMPAT);
evdev_set_flag(sc->evdev, EVDEV_FLAG_EXT_EPOCH); /* hidbus child */
@@ -584,11 +597,13 @@ ietp_iic_set_absolute_mode(device_t dev, bool enable)
* Some ASUS touchpads need to be powered on to enter absolute mode.
*/
require_wakeup = false;
- for (i = 0; i < nitems(special_fw); i++) {
- if (sc->ic_type == special_fw[i].ic_type &&
- sc->product_id == special_fw[i].product_id) {
- require_wakeup = true;
- break;
+ if (!sc->open) {
+ for (i = 0; i < nitems(special_fw); i++) {
+ if (sc->ic_type == special_fw[i].ic_type &&
+ sc->product_id == special_fw[i].product_id) {
+ require_wakeup = true;
+ break;
+ }
}
}
diff --git a/sys/dev/hid/u2f.c b/sys/dev/hid/u2f.c
new file mode 100644
index 000000000000..ac2eba7a499d
--- /dev/null
+++ b/sys/dev/hid/u2f.c
@@ -0,0 +1,590 @@
+/*-
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (c) 2022-2023 Vladimir Kondratyev <wulf@FreeBSD.org>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include "opt_hid.h"
+
+#include <sys/param.h>
+#ifdef COMPAT_FREEBSD32
+#include <sys/abi_compat.h>
+#endif
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/fcntl.h>
+#include <sys/filio.h>
+#include <sys/ioccom.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <sys/mutex.h>
+#include <sys/poll.h>
+#include <sys/priv.h>
+#include <sys/proc.h>
+#include <sys/selinfo.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+#include <sys/uio.h>
+
+#include <dev/evdev/input.h>
+
+#define HID_DEBUG_VAR u2f_debug
+#include <dev/hid/hid.h>
+#include <dev/hid/hidbus.h>
+#include <dev/hid/hidquirk.h>
+
+#include <dev/usb/usb_ioctl.h>
+
+#ifdef HID_DEBUG
+static int u2f_debug = 0;
+static SYSCTL_NODE(_hw_hid, OID_AUTO, u2f, CTLFLAG_RW, 0,
+ "FIDO/U2F authenticator");
+SYSCTL_INT(_hw_hid_u2f, OID_AUTO, debug, CTLFLAG_RWTUN,
+ &u2f_debug, 0, "Debug level");
+#endif
+
+#define U2F_MAX_REPORT_SIZE 64
+
+/* A match on these entries will load u2f */
+static const struct hid_device_id u2f_devs[] = {
+ { HID_BUS(BUS_USB), HID_TLC(HUP_FIDO, HUF_U2FHID) },
+};
+
+struct u2f_softc {
+ device_t sc_dev; /* base device */
+ struct cdev *dev;
+
+ struct mtx sc_mtx; /* hidbus private mutex */
+ void *sc_rdesc;
+ hid_size_t sc_rdesc_size;
+ hid_size_t sc_isize;
+ hid_size_t sc_osize;
+ struct selinfo sc_rsel;
+ struct { /* driver state */
+ bool open:1; /* device is open */
+ bool aslp:1; /* waiting for device data in read() */
+ bool sel:1; /* waiting for device data in poll() */
+ bool data:1; /* input report is stored in sc_buf */
+ int reserved:28;
+ } sc_state;
+ int sc_fflags; /* access mode for open lifetime */
+
+ uint8_t sc_buf[U2F_MAX_REPORT_SIZE];
+};
+
+static d_open_t u2f_open;
+static d_read_t u2f_read;
+static d_write_t u2f_write;
+static d_ioctl_t u2f_ioctl;
+static d_poll_t u2f_poll;
+static d_kqfilter_t u2f_kqfilter;
+
+static d_priv_dtor_t u2f_dtor;
+
+static struct cdevsw u2f_cdevsw = {
+ .d_version = D_VERSION,
+ .d_open = u2f_open,
+ .d_read = u2f_read,
+ .d_write = u2f_write,
+ .d_ioctl = u2f_ioctl,
+ .d_poll = u2f_poll,
+ .d_kqfilter = u2f_kqfilter,
+ .d_name = "u2f",
+};
+
+static hid_intr_t u2f_intr;
+
+static device_probe_t u2f_probe;
+static device_attach_t u2f_attach;
+static device_detach_t u2f_detach;
+
+static int u2f_kqread(struct knote *, long);
+static void u2f_kqdetach(struct knote *);
+static void u2f_notify(struct u2f_softc *);
+
+static struct filterops u2f_filterops_read = {
+ .f_isfd = 1,
+ .f_detach = u2f_kqdetach,
+ .f_event = u2f_kqread,
+};
+
+static int
+u2f_probe(device_t dev)
+{
+ int error;
+
+ error = HIDBUS_LOOKUP_DRIVER_INFO(dev, u2f_devs);
+ if (error != 0)
+ return (error);
+
+ hidbus_set_desc(dev, "Authenticator");
+
+ return (BUS_PROBE_GENERIC);
+}
+
+static int
+u2f_attach(device_t dev)
+{
+ struct u2f_softc *sc = device_get_softc(dev);
+ struct hid_device_info *hw = __DECONST(struct hid_device_info *,
+ hid_get_device_info(dev));
+ struct make_dev_args mda;
+ int error;
+
+ sc->sc_dev = dev;
+
+ error = hid_get_report_descr(dev, &sc->sc_rdesc, &sc->sc_rdesc_size);
+ if (error != 0)
+ return (ENXIO);
+ sc->sc_isize = hid_report_size_max(sc->sc_rdesc, sc->sc_rdesc_size,
+ hid_input, NULL);
+ if (sc->sc_isize > U2F_MAX_REPORT_SIZE) {
+ device_printf(dev, "Input report size too large. Truncate.\n");
+ sc->sc_isize = U2F_MAX_REPORT_SIZE;
+ }
+ sc->sc_osize = hid_report_size_max(sc->sc_rdesc, sc->sc_rdesc_size,
+ hid_output, NULL);
+ if (sc->sc_osize > U2F_MAX_REPORT_SIZE) {
+ device_printf(dev, "Output report size too large. Truncate.\n");
+ sc->sc_osize = U2F_MAX_REPORT_SIZE;
+ }
+
+ mtx_init(&sc->sc_mtx, "u2f lock", NULL, MTX_DEF);
+ knlist_init_mtx(&sc->sc_rsel.si_note, &sc->sc_mtx);
+
+ make_dev_args_init(&mda);
+ mda.mda_flags = MAKEDEV_WAITOK;
+ mda.mda_devsw = &u2f_cdevsw;
+ mda.mda_uid = UID_ROOT;
+ mda.mda_gid = GID_U2F;
+ mda.mda_mode = 0660;
+ mda.mda_si_drv1 = sc;
+
+ error = make_dev_s(&mda, &sc->dev, "u2f/%d", device_get_unit(dev));
+ if (error) {
+ device_printf(dev, "Can not create character device\n");
+ u2f_detach(dev);
+ return (error);
+ }
+#ifdef U2F_MAKE_UHID_ALIAS
+ (void)make_dev_alias(sc->dev, "uhid%d", device_get_unit(dev));
+#endif
+
+ hid_add_dynamic_quirk(hw, HQ_NO_READAHEAD);
+
+ hidbus_set_lock(dev, &sc->sc_mtx);
+ hidbus_set_intr(dev, u2f_intr, sc);
+
+ return (0);
+}
+
+static int
+u2f_detach(device_t dev)
+{
+ struct u2f_softc *sc = device_get_softc(dev);
+
+ DPRINTF("sc=%p\n", sc);
+
+ if (sc->dev != NULL) {
+ mtx_lock(&sc->sc_mtx);
+ sc->dev->si_drv1 = NULL;
+ /* Wake everyone */
+ u2f_notify(sc);
+ mtx_unlock(&sc->sc_mtx);
+ destroy_dev(sc->dev);
+ }
+
+ hid_intr_stop(sc->sc_dev);
+
+ knlist_clear(&sc->sc_rsel.si_note, 0);
+ knlist_destroy(&sc->sc_rsel.si_note);
+ seldrain(&sc->sc_rsel);
+ mtx_destroy(&sc->sc_mtx);
+
+ return (0);
+}
+
+void
+u2f_intr(void *context, void *buf, hid_size_t len)
+{
+ struct u2f_softc *sc = context;
+
+ mtx_assert(&sc->sc_mtx, MA_OWNED);
+
+ DPRINTFN(5, "len=%d\n", len);
+ DPRINTFN(5, "data = %*D\n", len, buf, " ");
+
+ if (sc->sc_state.data)
+ return;
+
+ if (len > sc->sc_isize)
+ len = sc->sc_isize;
+
+ bcopy(buf, sc->sc_buf, len);
+
+ /* Make sure we don't process old data */
+ if (len < sc->sc_isize)
+ bzero(sc->sc_buf + len, sc->sc_isize - len);
+
+ sc->sc_state.data = true;
+
+ u2f_notify(sc);
+}
+
+static int
+u2f_open(struct cdev *dev, int flag, int mode, struct thread *td)
+{
+ struct u2f_softc *sc = dev->si_drv1;
+ int error;
+
+ if (sc == NULL)
+ return (ENXIO);
+
+ DPRINTF("sc=%p\n", sc);
+
+ mtx_lock(&sc->sc_mtx);
+ if (sc->sc_state.open) {
+ mtx_unlock(&sc->sc_mtx);
+ return (EBUSY);
+ }
+ sc->sc_state.open = true;
+ mtx_unlock(&sc->sc_mtx);
+
+ error = devfs_set_cdevpriv(sc, u2f_dtor);
+ if (error != 0) {
+ mtx_lock(&sc->sc_mtx);
+ sc->sc_state.open = false;
+ mtx_unlock(&sc->sc_mtx);
+ return (error);
+ }
+
+ /* Set up interrupt pipe. */
+ sc->sc_state.data = false;
+ sc->sc_fflags = flag;
+
+ return (0);
+}
+
+
+static void
+u2f_dtor(void *data)
+{
+ struct u2f_softc *sc = data;
+
+#ifdef NOT_YET
+ /* Disable interrupts. */
+ hid_intr_stop(sc->sc_dev);
+#endif
+
+ mtx_lock(&sc->sc_mtx);
+ sc->sc_state.open = false;
+ mtx_unlock(&sc->sc_mtx);
+}
+
+static int
+u2f_read(struct cdev *dev, struct uio *uio, int flag)
+{
+ uint8_t buf[U2F_MAX_REPORT_SIZE];
+ struct u2f_softc *sc = dev->si_drv1;
+ size_t length = 0;
+ int error;
+
+ DPRINTFN(1, "\n");
+
+ if (sc == NULL)
+ return (EIO);
+
+ if (!sc->sc_state.data)
+ hid_intr_start(sc->sc_dev);
+
+ mtx_lock(&sc->sc_mtx);
+ if (dev->si_drv1 == NULL) {
+ error = EIO;
+ goto exit;
+ }
+
+ while (!sc->sc_state.data) {
+ if (flag & O_NONBLOCK) {
+ error = EWOULDBLOCK;
+ goto exit;
+ }
+ sc->sc_state.aslp = true;
+ DPRINTFN(5, "sleep on %p\n", &sc->sc_buf);
+ error = mtx_sleep(&sc->sc_buf, &sc->sc_mtx, PZERO | PCATCH,
+ "u2frd", 0);
+ DPRINTFN(5, "woke, error=%d\n", error);
+ if (dev->si_drv1 == NULL)
+ error = EIO;
+ if (error) {
+ sc->sc_state.aslp = false;
+ goto exit;
+ }
+ }
+
+ if (sc->sc_state.data && uio->uio_resid > 0) {
+ length = min(uio->uio_resid, sc->sc_isize);
+ memcpy(buf, sc->sc_buf, length);
+ sc->sc_state.data = false;
+ }
+exit:
+ mtx_unlock(&sc->sc_mtx);
+ if (length != 0) {
+ /* Copy the data to the user process. */
+ DPRINTFN(5, "got %lu chars\n", (u_long)length);
+ error = uiomove(buf, length, uio);
+ }
+
+ return (error);
+}
+
+static int
+u2f_write(struct cdev *dev, struct uio *uio, int flag)
+{
+ uint8_t buf[U2F_MAX_REPORT_SIZE];
+ struct u2f_softc *sc = dev->si_drv1;
+ int error;
+
+ DPRINTFN(1, "\n");
+
+ if (sc == NULL)
+ return (EIO);
+
+ if (uio->uio_resid != sc->sc_osize)
+ return (EINVAL);
+ error = uiomove(buf, uio->uio_resid, uio);
+ if (error == 0)
+ error = hid_write(sc->sc_dev, buf, sc->sc_osize);
+
+ return (error);
+}
+
+#ifdef COMPAT_FREEBSD32
+static void
+update_ugd32(const struct usb_gen_descriptor *ugd,
+ struct usb_gen_descriptor32 *ugd32)
+{
+ /* Don't update hgd_data pointer */
+ CP(*ugd, *ugd32, ugd_lang_id);
+ CP(*ugd, *ugd32, ugd_maxlen);
+ CP(*ugd, *ugd32, ugd_actlen);
+ CP(*ugd, *ugd32, ugd_offset);
+ CP(*ugd, *ugd32, ugd_config_index);
+ CP(*ugd, *ugd32, ugd_string_index);
+ CP(*ugd, *ugd32, ugd_iface_index);
+ CP(*ugd, *ugd32, ugd_altif_index);
+ CP(*ugd, *ugd32, ugd_endpt_index);
+ CP(*ugd, *ugd32, ugd_report_type);
+ /* Don't update reserved */
+}
+#endif
+
+static int
+u2f_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
+ struct thread *td)
+{
+#ifdef COMPAT_FREEBSD32
+ struct usb_gen_descriptor local_ugd;
+ struct usb_gen_descriptor32 *ugd32 = NULL;
+#endif
+ struct u2f_softc *sc = dev->si_drv1;
+ struct usb_gen_descriptor *ugd = (struct usb_gen_descriptor *)addr;
+ uint32_t size;
+
+ DPRINTFN(2, "cmd=%lx\n", cmd);
+
+ if (sc == NULL)
+ return (EIO);
+
+#ifdef COMPAT_FREEBSD32
+ switch (cmd) {
+ case USB_GET_REPORT_DESC32:
+ cmd = _IOC_NEWTYPE(cmd, struct usb_gen_descriptor);
+ ugd32 = (struct usb_gen_descriptor32 *)addr;
+ ugd = &local_ugd;
+ PTRIN_CP(*ugd32, *ugd, ugd_data);
+ CP(*ugd32, *ugd, ugd_lang_id);
+ CP(*ugd32, *ugd, ugd_maxlen);
+ CP(*ugd32, *ugd, ugd_actlen);
+ CP(*ugd32, *ugd, ugd_offset);
+ CP(*ugd32, *ugd, ugd_config_index);
+ CP(*ugd32, *ugd, ugd_string_index);
+ CP(*ugd32, *ugd, ugd_iface_index);
+ CP(*ugd32, *ugd, ugd_altif_index);
+ CP(*ugd32, *ugd, ugd_endpt_index);
+ CP(*ugd32, *ugd, ugd_report_type);
+ /* Don't copy reserved */
+ break;
+ }
+#endif
+
+ /* fixed-length ioctls handling */
+ switch (cmd) {
+ case FIONBIO:
+ /* All handled in the upper FS layer. */
+ return (0);
+
+ case USB_GET_REPORT_DESC:
+ size = MIN(sc->sc_rdesc_size, ugd->ugd_maxlen);
+ ugd->ugd_actlen = size;
+#ifdef COMPAT_FREEBSD32
+ if (ugd32 != NULL)
+ update_ugd32(ugd, ugd32);
+#endif
+ if (ugd->ugd_data == NULL)
+ return (0); /* descriptor length only */
+
+ return (copyout(sc->sc_rdesc, ugd->ugd_data, size));
+
+ case USB_GET_DEVICEINFO:
+ return(hid_ioctl(
+ sc->sc_dev, USB_GET_DEVICEINFO, (uintptr_t)addr));
+ }
+
+ return (EINVAL);
+}
+
+static int
+u2f_poll(struct cdev *dev, int events, struct thread *td)
+{
+ struct u2f_softc *sc = dev->si_drv1;
+ int revents = 0;
+ bool start_intr = false;
+
+ if (sc == NULL)
+ return (POLLHUP);
+
+ if (events & (POLLOUT | POLLWRNORM) && (sc->sc_fflags & FWRITE))
+ revents |= events & (POLLOUT | POLLWRNORM);
+ if (events & (POLLIN | POLLRDNORM) && (sc->sc_fflags & FREAD)) {
+ mtx_lock(&sc->sc_mtx);
+ if (sc->sc_state.data)
+ revents |= events & (POLLIN | POLLRDNORM);
+ else {
+ sc->sc_state.sel = true;
+ start_intr = true;
+ selrecord(td, &sc->sc_rsel);
+ }
+ mtx_unlock(&sc->sc_mtx);
+ if (start_intr)
+ hid_intr_start(sc->sc_dev);
+ }
+
+ return (revents);
+}
+
+static int
+u2f_kqfilter(struct cdev *dev, struct knote *kn)
+{
+ struct u2f_softc *sc = dev->si_drv1;
+
+ if (sc == NULL)
+ return (ENXIO);
+
+ switch(kn->kn_filter) {
+ case EVFILT_READ:
+ if (sc->sc_fflags & FREAD) {
+ kn->kn_fop = &u2f_filterops_read;
+ break;
+ }
+ /* FALLTHROUGH */
+ default:
+ return(EINVAL);
+ }
+ kn->kn_hook = sc;
+
+ knlist_add(&sc->sc_rsel.si_note, kn, 0);
+ return (0);
+}
+
+static int
+u2f_kqread(struct knote *kn, long hint)
+{
+ struct u2f_softc *sc = kn->kn_hook;
+ int ret;
+
+ mtx_assert(&sc->sc_mtx, MA_OWNED);
+
+ if (sc->dev->si_drv1 == NULL) {
+ kn->kn_flags |= EV_EOF;
+ ret = 1;
+ } else {
+ ret = sc->sc_state.data ? 1 : 0;
+ if (!sc->sc_state.data)
+ hid_intr_start(sc->sc_dev);
+ }
+
+ return (ret);
+}
+
+static void
+u2f_kqdetach(struct knote *kn)
+{
+ struct u2f_softc *sc = kn->kn_hook;
+
+ knlist_remove(&sc->sc_rsel.si_note, kn, 0);
+}
+
+static void
+u2f_notify(struct u2f_softc *sc)
+{
+ mtx_assert(&sc->sc_mtx, MA_OWNED);
+
+ if (sc->sc_state.aslp) {
+ sc->sc_state.aslp = false;
+ DPRINTFN(5, "waking %p\n", &sc->sc_buf);
+ wakeup(&sc->sc_buf);
+ }
+ if (sc->sc_state.sel) {
+ sc->sc_state.sel = false;
+ selwakeuppri(&sc->sc_rsel, PZERO);
+ }
+ KNOTE_LOCKED(&sc->sc_rsel.si_note, 0);
+}
+
+static device_method_t u2f_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, u2f_probe),
+ DEVMETHOD(device_attach, u2f_attach),
+ DEVMETHOD(device_detach, u2f_detach),
+
+ DEVMETHOD_END
+};
+
+static driver_t u2f_driver = {
+#ifdef U2F_MAKE_UHID_ALIAS
+ "uhid",
+#else
+ "u2f",
+#endif
+ u2f_methods,
+ sizeof(struct u2f_softc)
+};
+
+DRIVER_MODULE(u2f, hidbus, u2f_driver, NULL, NULL);
+MODULE_DEPEND(u2f, hidbus, 1, 1, 1);
+MODULE_DEPEND(u2f, hid, 1, 1, 1);
+MODULE_VERSION(u2f, 1);
+HID_PNP_INFO(u2f_devs);
diff --git a/sys/dev/ice/ice_bitops.h b/sys/dev/ice/ice_bitops.h
index c480900596f4..a623f810c101 100644
--- a/sys/dev/ice/ice_bitops.h
+++ b/sys/dev/ice/ice_bitops.h
@@ -198,7 +198,7 @@ static inline void ice_zero_bitmap(ice_bitmap_t *bmp, u16 size)
* ice_and_bitmap - bitwise AND 2 bitmaps and store result in dst bitmap
* @dst: Destination bitmap that receive the result of the operation
* @bmp1: The first bitmap to intersect
- * @bmp2: The second bitmap to intersect wit the first
+ * @bmp2: The second bitmap to intersect with the first
* @size: Size of the bitmaps in bits
*
* This function performs a bitwise AND on two "source" bitmaps of the same size
@@ -237,7 +237,7 @@ ice_and_bitmap(ice_bitmap_t *dst, const ice_bitmap_t *bmp1,
* ice_or_bitmap - bitwise OR 2 bitmaps and store result in dst bitmap
* @dst: Destination bitmap that receive the result of the operation
* @bmp1: The first bitmap to intersect
- * @bmp2: The second bitmap to intersect wit the first
+ * @bmp2: The second bitmap to intersect with the first
* @size: Size of the bitmaps in bits
*
* This function performs a bitwise OR on two "source" bitmaps of the same size
diff --git a/sys/dev/ice/ice_lan_tx_rx.h b/sys/dev/ice/ice_lan_tx_rx.h
index 693e0ca5efc6..eedacdab0216 100644
--- a/sys/dev/ice/ice_lan_tx_rx.h
+++ b/sys/dev/ice/ice_lan_tx_rx.h
@@ -630,7 +630,7 @@ enum ice_rxdid {
ICE_RXDID_LAST = 63,
};
-/* Recceive Flex descriptor Dword Index */
+/* Receive Flex descriptor Dword Index */
enum ice_flex_word {
ICE_RX_FLEX_DWORD_0 = 0,
ICE_RX_FLEX_DWORD_1,
diff --git a/sys/dev/ice/ice_lib.h b/sys/dev/ice/ice_lib.h
index 308b2bda2790..640bdf8fed7b 100644
--- a/sys/dev/ice/ice_lib.h
+++ b/sys/dev/ice/ice_lib.h
@@ -313,7 +313,7 @@ enum ice_dyn_idx_t {
ICE_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
};
-/* By convenction ITR0 is used for RX, and ITR1 is used for TX */
+/* By convention ITR0 is used for RX, and ITR1 is used for TX */
#define ICE_RX_ITR ICE_IDX_ITR0
#define ICE_TX_ITR ICE_IDX_ITR1
diff --git a/sys/dev/ice/ice_protocol_type.h b/sys/dev/ice/ice_protocol_type.h
index 300d61bfb5d9..b90c25e6c427 100644
--- a/sys/dev/ice/ice_protocol_type.h
+++ b/sys/dev/ice/ice_protocol_type.h
@@ -143,7 +143,7 @@ enum ice_prot_id {
ICE_PROT_LLDP_OF = 117,
ICE_PROT_ARP_OF = 118,
ICE_PROT_EAPOL_OF = 120,
- ICE_PROT_META_ID = 255, /* when offset == metaddata */
+ ICE_PROT_META_ID = 255, /* when offset == metadata */
ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */
};
diff --git a/sys/dev/iicbus/iichid.c b/sys/dev/iicbus/iichid.c
index 3f1d7a0cefba..fdb4816b8bd9 100644
--- a/sys/dev/iicbus/iichid.c
+++ b/sys/dev/iicbus/iichid.c
@@ -861,7 +861,8 @@ iichid_intr_start(device_t dev, device_t child __unused)
sc = device_get_softc(dev);
DPRINTF(sc, "iichid device open\n");
- iichid_set_power_state(sc, IICHID_PS_ON, IICHID_PS_NULL);
+ if (!sc->open)
+ iichid_set_power_state(sc, IICHID_PS_ON, IICHID_PS_NULL);
return (0);
}
diff --git a/sys/dev/isci/scil/intel_sata.h b/sys/dev/isci/scil/intel_sata.h
index 4cf4adf03e07..fdad5be9b083 100644
--- a/sys/dev/isci/scil/intel_sata.h
+++ b/sys/dev/isci/scil/intel_sata.h
@@ -61,7 +61,7 @@
*
* @brief This file defines all of the SATA releated constants, enumerations,
* and types. Please note that this file does not necessarily contain
- * an exhaustive list of all contants and commands.
+ * an exhaustive list of all constants and commands.
*/
/**
diff --git a/sys/dev/ixgbe/if_ix.c b/sys/dev/ixgbe/if_ix.c
index 959afa79e7da..73c0fd1ab16f 100644
--- a/sys/dev/ixgbe/if_ix.c
+++ b/sys/dev/ixgbe/if_ix.c
@@ -45,7 +45,7 @@
/************************************************************************
* Driver version
************************************************************************/
-static const char ixgbe_driver_version[] = "4.0.1-k";
+static const char ixgbe_driver_version[] = "5.0.1-k";
/************************************************************************
* PCI Device ID Table
@@ -144,6 +144,16 @@ static const pci_vendor_info_t ixgbe_vendor_info_array[] =
"Intel(R) X540-T2 (Bypass)"),
PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS,
"Intel(R) X520 82599 (Bypass)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_BACKPLANE,
+ "Intel(R) E610 (Backplane)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_SFP,
+ "Intel(R) E610 (SFP)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_2_5G_T,
+ "Intel(R) E610 (2.5 GbE)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_10G_T,
+ "Intel(R) E610 (10 GbE)"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_SGMII,
+ "Intel(R) E610 (SGMII)"),
/* required last entry */
PVID_END
};
@@ -253,6 +263,10 @@ static int ixgbe_sysctl_tso_tcp_flags_mask(SYSCTL_HANDLER_ARGS);
static void ixgbe_handle_msf(void *);
static void ixgbe_handle_mod(void *);
static void ixgbe_handle_phy(void *);
+static void ixgbe_handle_fw_event(void *);
+
+static int ixgbe_enable_lse(struct ixgbe_softc *sc);
+static int ixgbe_disable_lse(struct ixgbe_softc *sc);
/************************************************************************
* FreeBSD Device Interface Entry Points
@@ -621,6 +635,7 @@ ixgbe_initialize_rss_mapping(struct ixgbe_softc *sc)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_E610:
table_size = 512;
break;
default:
@@ -902,6 +917,32 @@ ixgbe_initialize_transmit_units(if_ctx_t ctx)
} /* ixgbe_initialize_transmit_units */
+static int
+ixgbe_check_fw_api_version(struct ixgbe_softc *sc)
+{
+ struct ixgbe_hw *hw = &sc->hw;
+ if (hw->api_maj_ver > IXGBE_FW_API_VER_MAJOR) {
+ device_printf(sc->dev,
+ "The driver for the device stopped because the NVM "
+ "image is newer than expected. You must install the "
+ "most recent version of the network driver.\n");
+ return (EOPNOTSUPP);
+ } else if (hw->api_maj_ver == IXGBE_FW_API_VER_MAJOR &&
+ hw->api_min_ver > (IXGBE_FW_API_VER_MINOR + 2)) {
+ device_printf(sc->dev,
+ "The driver for the device detected a newer version of "
+ "the NVM image than expected. Please install the most "
+ "recent version of the network driver.\n");
+ } else if (hw->api_maj_ver < IXGBE_FW_API_VER_MAJOR ||
+ hw->api_min_ver < IXGBE_FW_API_VER_MINOR - 2) {
+ device_printf(sc->dev,
+ "The driver for the device detected an older version "
+ "of the NVM image than expected. "
+ "Please update the NVM image.\n");
+ }
+ return (0);
+}
+
/************************************************************************
* ixgbe_register
************************************************************************/
@@ -970,6 +1011,9 @@ ixgbe_if_attach_pre(if_ctx_t ctx)
goto err_pci;
}
+ if (hw->mac.type == ixgbe_mac_E610)
+ ixgbe_init_aci(hw);
+
if (hw->mac.ops.fw_recovery_mode &&
hw->mac.ops.fw_recovery_mode(hw)) {
device_printf(dev,
@@ -1058,6 +1102,12 @@ ixgbe_if_attach_pre(if_ctx_t ctx)
break;
}
+ /* Check the FW API version */
+ if (hw->mac.type == ixgbe_mac_E610 && ixgbe_check_fw_api_version(sc)) {
+ error = EIO;
+ goto err_pci;
+ }
+
/* Most of the iflib initialization... */
iflib_set_mac(ctx, hw->mac.addr);
@@ -1111,6 +1161,9 @@ err_pci:
IXGBE_WRITE_REG(&sc->hw, IXGBE_CTRL_EXT, ctrl_ext);
ixgbe_free_pci_resources(ctx);
+ if (hw->mac.type == ixgbe_mac_E610)
+ ixgbe_shutdown_aci(hw);
+
return (error);
} /* ixgbe_if_attach_pre */
@@ -1358,6 +1411,10 @@ ixgbe_add_media_types(if_ctx_t ctx)
/* Media types with matching FreeBSD media defines */
if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T)
ifmedia_add(sc->media, IFM_ETHER | IFM_10G_T, 0, NULL);
+ if (layer & IXGBE_PHYSICAL_LAYER_5000BASE_T)
+ ifmedia_add(sc->media, IFM_ETHER | IFM_5000_T, 0, NULL);
+ if (layer & IXGBE_PHYSICAL_LAYER_2500BASE_T)
+ ifmedia_add(sc->media, IFM_ETHER | IFM_2500_T, 0, NULL);
if (layer & IXGBE_PHYSICAL_LAYER_1000BASE_T)
ifmedia_add(sc->media, IFM_ETHER | IFM_1000_T, 0, NULL);
if (layer & IXGBE_PHYSICAL_LAYER_100BASE_TX)
@@ -1459,6 +1516,7 @@ ixgbe_is_sfp(struct ixgbe_hw *hw)
}
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_E610:
if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber)
return (true);
return (false);
@@ -1525,6 +1583,15 @@ ixgbe_config_link(if_ctx_t ctx)
IXGBE_LINK_SPEED_5GB_FULL);
}
+ if (hw->mac.type == ixgbe_mac_E610) {
+ hw->phy.ops.init(hw);
+ err = ixgbe_enable_lse(sc);
+ if (err)
+ device_printf(sc->dev,
+ "Failed to enable Link Status Event, "
+ "error: %d", err);
+ }
+
if (hw->mac.ops.setup_link)
err = hw->mac.ops.setup_link(hw, autoneg,
sc->link_up);
@@ -2158,14 +2225,15 @@ get_parent_info:
ixgbe_set_pci_config_data_generic(hw, link);
display:
- device_printf(dev, "PCI Express Bus: Speed %s %s\n",
- ((hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
+ device_printf(dev, "PCI Express Bus: Speed %s Width %s\n",
+ ((hw->bus.speed == ixgbe_bus_speed_16000) ? "16.0GT/s" :
+ (hw->bus.speed == ixgbe_bus_speed_8000) ? "8.0GT/s" :
(hw->bus.speed == ixgbe_bus_speed_5000) ? "5.0GT/s" :
(hw->bus.speed == ixgbe_bus_speed_2500) ? "2.5GT/s" :
"Unknown"),
- ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "Width x8" :
- (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "Width x4" :
- (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "Width x1" :
+ ((hw->bus.width == ixgbe_bus_width_pcie_x8) ? "x8" :
+ (hw->bus.width == ixgbe_bus_width_pcie_x4) ? "x4" :
+ (hw->bus.width == ixgbe_bus_width_pcie_x1) ? "x1" :
"Unknown"));
if (bus_info_valid) {
@@ -2372,14 +2440,17 @@ ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
ifmr->ifm_status |= IFM_ACTIVE;
layer = sc->phy_layer;
- if (layer & IXGBE_PHYSICAL_LAYER_10GBASE_T ||
- layer & IXGBE_PHYSICAL_LAYER_1000BASE_T ||
- layer & IXGBE_PHYSICAL_LAYER_100BASE_TX ||
- layer & IXGBE_PHYSICAL_LAYER_10BASE_T)
+ if (layer & IXGBE_PHYSICAL_LAYERS_BASE_T_ALL)
switch (sc->link_speed) {
case IXGBE_LINK_SPEED_10GB_FULL:
ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
break;
+ case IXGBE_LINK_SPEED_5GB_FULL:
+ ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
+ break;
+ case IXGBE_LINK_SPEED_2_5GB_FULL:
+ ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
+ break;
case IXGBE_LINK_SPEED_1GB_FULL:
ifmr->ifm_active |= IFM_1000_T | IFM_FDX;
break;
@@ -2390,15 +2461,6 @@ ixgbe_if_media_status(if_ctx_t ctx, struct ifmediareq * ifmr)
ifmr->ifm_active |= IFM_10_T | IFM_FDX;
break;
}
- if (hw->mac.type == ixgbe_mac_X550)
- switch (sc->link_speed) {
- case IXGBE_LINK_SPEED_5GB_FULL:
- ifmr->ifm_active |= IFM_5000_T | IFM_FDX;
- break;
- case IXGBE_LINK_SPEED_2_5GB_FULL:
- ifmr->ifm_active |= IFM_2500_T | IFM_FDX;
- break;
- }
if (layer & IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU ||
layer & IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA)
switch (sc->link_speed) {
@@ -2676,6 +2738,11 @@ ixgbe_msix_link(void *arg)
sc->task_requests |= IXGBE_REQUEST_TASK_LSC;
}
+ if (eicr & IXGBE_EICR_FW_EVENT) {
+ IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EICR_FW_EVENT);
+ sc->task_requests |= IXGBE_REQUEST_TASK_FWEVENT;
+ }
+
if (sc->hw.mac.type != ixgbe_mac_82598EB) {
if ((sc->feat_en & IXGBE_FEATURE_FDIR) &&
(eicr & IXGBE_EICR_FLOW_DIR)) {
@@ -2734,11 +2801,16 @@ ixgbe_msix_link(void *arg)
/* Check for VF message */
if ((sc->feat_en & IXGBE_FEATURE_SRIOV) &&
- (eicr & IXGBE_EICR_MAILBOX))
+ (eicr & IXGBE_EICR_MAILBOX)) {
sc->task_requests |= IXGBE_REQUEST_TASK_MBX;
+ }
}
- if (ixgbe_is_sfp(hw)) {
+ /*
+ * On E610, the firmware handles PHY configuration, so
+ * there is no need to perform any SFP-specific tasks.
+ */
+ if (hw->mac.type != ixgbe_mac_E610 && ixgbe_is_sfp(hw)) {
/* Pluggable optics-related interrupt */
if (hw->mac.type >= ixgbe_mac_X540)
eicr_mask = IXGBE_EICR_GPI_SDP0_X540;
@@ -2985,7 +3057,13 @@ ixgbe_if_detach(if_ctx_t ctx)
callout_drain(&sc->fw_mode_timer);
+ if (sc->hw.mac.type == ixgbe_mac_E610) {
+ ixgbe_disable_lse(sc);
+ ixgbe_shutdown_aci(&sc->hw);
+ }
+
ixgbe_free_pci_resources(ctx);
+
free(sc->mta, M_IXGBE);
return (0);
@@ -3404,6 +3482,7 @@ ixgbe_set_ivar(struct ixgbe_softc *sc, u8 entry, u8 vector, s8 type)
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_E610:
if (type == -1) { /* MISC IVAR */
index = (entry & 1) * 8;
ivar = IXGBE_READ_REG(hw, IXGBE_IVAR_MISC);
@@ -3826,6 +3905,96 @@ ixgbe_handle_phy(void *context)
} /* ixgbe_handle_phy */
/************************************************************************
+ * ixgbe_enable_lse - enable link status events
+ *
+ * Sets mask and enables link status events
+ ************************************************************************/
+s32 ixgbe_enable_lse(struct ixgbe_softc *sc)
+{
+ s32 error;
+
+ u16 mask = ~((u16)(IXGBE_ACI_LINK_EVENT_UPDOWN |
+ IXGBE_ACI_LINK_EVENT_MEDIA_NA |
+ IXGBE_ACI_LINK_EVENT_MODULE_QUAL_FAIL |
+ IXGBE_ACI_LINK_EVENT_PHY_FW_LOAD_FAIL));
+
+ error = ixgbe_configure_lse(&sc->hw, TRUE, mask);
+ if (error)
+ return (error);
+
+ sc->lse_mask = mask;
+ return (IXGBE_SUCCESS);
+} /* ixgbe_enable_lse */
+
+/************************************************************************
+ * ixgbe_disable_lse - disable link status events
+ ************************************************************************/
+s32 ixgbe_disable_lse(struct ixgbe_softc *sc)
+{
+ s32 error;
+
+ error = ixgbe_configure_lse(&sc->hw, false, sc->lse_mask);
+ if (error)
+ return (error);
+
+ sc->lse_mask = 0;
+ return (IXGBE_SUCCESS);
+} /* ixgbe_disable_lse */
+
+/************************************************************************
+ * ixgbe_handle_fw_event - Tasklet for MSI-X Link Status Event interrupts
+ ************************************************************************/
+static void
+ixgbe_handle_fw_event(void *context)
+{
+ if_ctx_t ctx = context;
+ struct ixgbe_softc *sc = iflib_get_softc(ctx);
+ struct ixgbe_hw *hw = &sc->hw;
+ struct ixgbe_aci_event event;
+ bool pending = false;
+ s32 error;
+
+ event.buf_len = IXGBE_ACI_MAX_BUFFER_SIZE;
+ event.msg_buf = malloc(event.buf_len, M_IXGBE, M_ZERO | M_NOWAIT);
+ if (!event.msg_buf) {
+ device_printf(sc->dev, "Can not allocate buffer for "
+ "event message\n");
+ return;
+ }
+
+ do {
+ error = ixgbe_aci_get_event(hw, &event, &pending);
+ if (error) {
+ device_printf(sc->dev, "Error getting event from "
+ "FW:%d\n", error);
+ break;
+ }
+
+ switch (le16toh(event.desc.opcode)) {
+ case ixgbe_aci_opc_get_link_status:
+ sc->task_requests |= IXGBE_REQUEST_TASK_LSC;
+ break;
+
+ case ixgbe_aci_opc_temp_tca_event:
+ if (hw->adapter_stopped == FALSE)
+ ixgbe_if_stop(ctx);
+ device_printf(sc->dev,
+ "CRITICAL: OVER TEMP!! PHY IS SHUT DOWN!!\n");
+ device_printf(sc->dev, "System shutdown required!\n");
+ break;
+
+ default:
+ device_printf(sc->dev,
+ "Unknown FW event captured, opcode=0x%04X\n",
+ le16toh(event.desc.opcode));
+ break;
+ }
+ } while (pending);
+
+ free(event.msg_buf, M_IXGBE);
+} /* ixgbe_handle_fw_event */
+
+/************************************************************************
* ixgbe_if_stop - Stop the hardware
*
* Disables all traffic on the adapter by issuing a
@@ -3899,6 +4068,8 @@ ixgbe_if_update_admin_status(if_ctx_t ctx)
}
/* Handle task requests from msix_link() */
+ if (sc->task_requests & IXGBE_REQUEST_TASK_FWEVENT)
+ ixgbe_handle_fw_event(ctx);
if (sc->task_requests & IXGBE_REQUEST_TASK_MOD)
ixgbe_handle_mod(ctx);
if (sc->task_requests & IXGBE_REQUEST_TASK_MSF)
@@ -3986,6 +4157,9 @@ ixgbe_if_enable_intr(if_ctx_t ctx)
mask |= IXGBE_EICR_GPI_SDP0_X540;
mask |= IXGBE_EIMS_ECC;
break;
+ case ixgbe_mac_E610:
+ mask |= IXGBE_EIMS_FW_EVENT;
+ break;
default:
break;
}
@@ -4008,6 +4182,7 @@ ixgbe_if_enable_intr(if_ctx_t ctx)
/* Don't autoclear Link */
mask &= ~IXGBE_EIMS_OTHER;
mask &= ~IXGBE_EIMS_LSC;
+ mask &= ~IXGBE_EIMS_FW_EVENT;
if (sc->feat_cap & IXGBE_FEATURE_SRIOV)
mask &= ~IXGBE_EIMS_MAILBOX;
IXGBE_WRITE_REG(hw, IXGBE_EIAC, mask);
@@ -4026,7 +4201,7 @@ ixgbe_if_enable_intr(if_ctx_t ctx)
} /* ixgbe_if_enable_intr */
/************************************************************************
- * ixgbe_disable_intr
+ * ixgbe_if_disable_intr
************************************************************************/
static void
ixgbe_if_disable_intr(if_ctx_t ctx)
@@ -4176,8 +4351,9 @@ ixgbe_intr(void *arg)
/* External PHY interrupt */
if ((hw->phy.type == ixgbe_phy_x550em_ext_t) &&
- (eicr & IXGBE_EICR_GPI_SDP0_X540))
+ (eicr & IXGBE_EICR_GPI_SDP0_X540)) {
sc->task_requests |= IXGBE_REQUEST_TASK_PHY;
+ }
return (FILTER_SCHEDULE_THREAD);
} /* ixgbe_intr */
@@ -4219,7 +4395,7 @@ ixgbe_sysctl_flowcntl(SYSCTL_HANDLER_ARGS)
int error, fc;
sc = (struct ixgbe_softc *)arg1;
- fc = sc->hw.fc.current_mode;
+ fc = sc->hw.fc.requested_mode;
error = sysctl_handle_int(oidp, &fc, 0, req);
if ((error) || (req->newptr == NULL))
@@ -4248,12 +4424,10 @@ ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc)
case ixgbe_fc_rx_pause:
case ixgbe_fc_tx_pause:
case ixgbe_fc_full:
- sc->hw.fc.requested_mode = fc;
if (sc->num_rx_queues > 1)
ixgbe_disable_rx_drop(sc);
break;
case ixgbe_fc_none:
- sc->hw.fc.requested_mode = ixgbe_fc_none;
if (sc->num_rx_queues > 1)
ixgbe_enable_rx_drop(sc);
break;
@@ -4261,6 +4435,8 @@ ixgbe_set_flowcntl(struct ixgbe_softc *sc, int fc)
return (EINVAL);
}
+ sc->hw.fc.requested_mode = fc;
+
/* Don't autoneg if forcing a value */
sc->hw.fc.disable_fc_autoneg = true;
ixgbe_fc_enable(&sc->hw);
@@ -4978,6 +5154,9 @@ ixgbe_init_device_features(struct ixgbe_softc *sc)
if (sc->hw.device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP)
sc->feat_cap &= ~IXGBE_FEATURE_LEGACY_IRQ;
break;
+ case ixgbe_mac_E610:
+ sc->feat_cap |= IXGBE_FEATURE_RECOVERY_MODE;
+ break;
default:
break;
}
diff --git a/sys/dev/ixgbe/if_ixv.c b/sys/dev/ixgbe/if_ixv.c
index 54b2c8c1dd68..8a1c1aae041d 100644
--- a/sys/dev/ixgbe/if_ixv.c
+++ b/sys/dev/ixgbe/if_ixv.c
@@ -68,6 +68,8 @@ static const pci_vendor_info_t ixv_vendor_info_array[] =
"Intel(R) X552 Virtual Function"),
PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF,
"Intel(R) X553 Virtual Function"),
+ PVID(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_E610_VF,
+ "Intel(R) E610 Virtual Function"),
/* required last entry */
PVID_END
};
@@ -1020,6 +1022,9 @@ ixv_identify_hardware(if_ctx_t ctx)
case IXGBE_DEV_ID_X550EM_A_VF:
hw->mac.type = ixgbe_mac_X550EM_a_vf;
break;
+ case IXGBE_DEV_ID_E610_VF:
+ hw->mac.type = ixgbe_mac_E610_vf;
+ break;
default:
device_printf(dev, "unknown mac type\n");
hw->mac.type = ixgbe_mac_unknown;
@@ -1955,6 +1960,7 @@ ixv_init_device_features(struct ixgbe_softc *sc)
case ixgbe_mac_X550_vf:
case ixgbe_mac_X550EM_x_vf:
case ixgbe_mac_X550EM_a_vf:
+ case ixgbe_mac_E610_vf:
sc->feat_cap |= IXGBE_FEATURE_NEEDS_CTXD;
sc->feat_cap |= IXGBE_FEATURE_RSS;
break;
diff --git a/sys/dev/ixgbe/ixgbe.h b/sys/dev/ixgbe/ixgbe.h
index 341d4ebfcebc..844064bf8543 100644
--- a/sys/dev/ixgbe/ixgbe.h
+++ b/sys/dev/ixgbe/ixgbe.h
@@ -86,6 +86,7 @@
#include "ixgbe_phy.h"
#include "ixgbe_vf.h"
#include "ixgbe_features.h"
+#include "ixgbe_e610.h"
/* Tunables */
@@ -195,6 +196,15 @@
CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \
CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP)
+/* All BASE-T Physical layers */
+#define IXGBE_PHYSICAL_LAYERS_BASE_T_ALL \
+ (IXGBE_PHYSICAL_LAYER_10GBASE_T |\
+ IXGBE_PHYSICAL_LAYER_5000BASE_T |\
+ IXGBE_PHYSICAL_LAYER_2500BASE_T |\
+ IXGBE_PHYSICAL_LAYER_1000BASE_T |\
+ IXGBE_PHYSICAL_LAYER_100BASE_TX |\
+ IXGBE_PHYSICAL_LAYER_10BASE_T)
+
#define IXGBE_CAPS (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_TSO | \
IFCAP_LRO | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | \
IFCAP_VLAN_HWCSUM | IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU | \
@@ -464,6 +474,7 @@ struct ixgbe_softc {
/* Feature capable/enabled flags. See ixgbe_features.h */
u32 feat_cap;
u32 feat_en;
+ u16 lse_mask;
};
/* Precision Time Sync (IEEE 1588) defines */
diff --git a/sys/dev/ixgbe/ixgbe_api.c b/sys/dev/ixgbe/ixgbe_api.c
index 4c50f10ed92e..f11f52a646e4 100644
--- a/sys/dev/ixgbe/ixgbe_api.c
+++ b/sys/dev/ixgbe/ixgbe_api.c
@@ -112,11 +112,15 @@ s32 ixgbe_init_shared_code(struct ixgbe_hw *hw)
case ixgbe_mac_X550EM_a:
status = ixgbe_init_ops_X550EM_a(hw);
break;
+ case ixgbe_mac_E610:
+ status = ixgbe_init_ops_E610(hw);
+ break;
case ixgbe_mac_82599_vf:
case ixgbe_mac_X540_vf:
case ixgbe_mac_X550_vf:
case ixgbe_mac_X550EM_x_vf:
case ixgbe_mac_X550EM_a_vf:
+ case ixgbe_mac_E610_vf:
status = ixgbe_init_ops_vf(hw);
break;
default:
@@ -240,6 +244,18 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw)
hw->mac.type = ixgbe_mac_X550EM_a_vf;
hw->mvals = ixgbe_mvals_X550EM_a;
break;
+ case IXGBE_DEV_ID_E610_BACKPLANE:
+ case IXGBE_DEV_ID_E610_SFP:
+ case IXGBE_DEV_ID_E610_10G_T:
+ case IXGBE_DEV_ID_E610_2_5G_T:
+ case IXGBE_DEV_ID_E610_SGMII:
+ hw->mac.type = ixgbe_mac_E610;
+ hw->mvals = ixgbe_mvals_X550EM_a;
+ break;
+ case IXGBE_DEV_ID_E610_VF:
+ hw->mac.type = ixgbe_mac_E610_vf;
+ hw->mvals = ixgbe_mvals_X550EM_a;
+ break;
default:
ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED;
ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED,
diff --git a/sys/dev/ixgbe/ixgbe_api.h b/sys/dev/ixgbe/ixgbe_api.h
index b81510dacb95..2b4cec8d110e 100644
--- a/sys/dev/ixgbe/ixgbe_api.h
+++ b/sys/dev/ixgbe/ixgbe_api.h
@@ -48,6 +48,7 @@ extern s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw);
+extern s32 ixgbe_init_ops_E610(struct ixgbe_hw *hw);
extern s32 ixgbe_init_ops_vf(struct ixgbe_hw *hw);
s32 ixgbe_set_mac_type(struct ixgbe_hw *hw);
diff --git a/sys/dev/ixgbe/ixgbe_common.c b/sys/dev/ixgbe/ixgbe_common.c
index df7ab90e72ab..bff022585a03 100644
--- a/sys/dev/ixgbe/ixgbe_common.c
+++ b/sys/dev/ixgbe/ixgbe_common.c
@@ -178,6 +178,7 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_A_SFP_N:
case IXGBE_DEV_ID_X550EM_A_QSFP:
case IXGBE_DEV_ID_X550EM_A_QSFP_N:
+ case IXGBE_DEV_ID_E610_SFP:
supported = false;
break;
default:
@@ -210,6 +211,8 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_X550EM_A_10G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T:
case IXGBE_DEV_ID_X550EM_A_1G_T_L:
+ case IXGBE_DEV_ID_E610_10G_T:
+ case IXGBE_DEV_ID_E610_2_5G_T:
supported = true;
break;
default:
@@ -616,7 +619,8 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
}
}
- if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) {
+ if (hw->mac.type == ixgbe_mac_X540 ||
+ hw->mac.type == ixgbe_mac_X550) {
if (hw->phy.id == 0)
ixgbe_identify_phy(hw);
hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL,
@@ -1037,6 +1041,9 @@ void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status)
case IXGBE_PCI_LINK_SPEED_8000:
hw->bus.speed = ixgbe_bus_speed_8000;
break;
+ case IXGBE_PCI_LINK_SPEED_16000:
+ hw->bus.speed = ixgbe_bus_speed_16000;
+ break;
default:
hw->bus.speed = ixgbe_bus_speed_unknown;
break;
@@ -1059,7 +1066,9 @@ s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_get_bus_info_generic");
/* Get the negotiated link width and speed from PCI config space */
- link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS);
+ link_status = IXGBE_READ_PCIE_WORD(hw, hw->mac.type == ixgbe_mac_E610 ?
+ IXGBE_PCI_LINK_STATUS_E610 :
+ IXGBE_PCI_LINK_STATUS);
ixgbe_set_pci_config_data_generic(hw, link_status);
@@ -1878,7 +1887,6 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_get_eeprom_semaphore");
-
/* Get SMBI software semaphore between device drivers first */
for (i = 0; i < timeout; i++) {
/*
@@ -3363,7 +3371,6 @@ s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw)
DEBUGFUNC("ixgbe_disable_sec_rx_path_generic");
-
secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL);
secrxreg |= IXGBE_SECRXCTRL_RX_DIS;
IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg);
@@ -3692,6 +3699,10 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS;
max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
break;
+ case ixgbe_mac_E610:
+ pcie_offset = IXGBE_PCIE_MSIX_E610_CAPS;
+ max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
+ break;
default:
return msix_count;
}
@@ -4139,7 +4150,6 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw)
return IXGBE_SUCCESS;
}
-
/**
* ixgbe_toggle_txdctl_generic - Toggle VF's queues
* @hw: pointer to hardware structure
@@ -4323,7 +4333,8 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
break;
case IXGBE_LINKS_SPEED_100_82599:
*speed = IXGBE_LINK_SPEED_100_FULL;
- if (hw->mac.type == ixgbe_mac_X550) {
+ if (hw->mac.type == ixgbe_mac_X550 ||
+ hw->mac.type == ixgbe_mac_E610) {
if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
*speed = IXGBE_LINK_SPEED_5GB_FULL;
}
@@ -5494,6 +5505,7 @@ void ixgbe_get_nvm_version(struct ixgbe_hw *hw,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_E610:
/* version of eeprom section */
if (ixgbe_read_eeprom(hw, NVM_EEP_OFFSET_X540, &word))
word = NVM_VER_INVALID;
@@ -5512,6 +5524,7 @@ void ixgbe_get_nvm_version(struct ixgbe_hw *hw,
case ixgbe_mac_X550:
case ixgbe_mac_X550EM_x:
case ixgbe_mac_X550EM_a:
+ case ixgbe_mac_E610:
/* intel phy firmware version */
if (ixgbe_read_eeprom(hw, NVM_EEP_PHY_OFF_X540, &word))
word = NVM_VER_INVALID;
diff --git a/sys/dev/ixgbe/ixgbe_e610.c b/sys/dev/ixgbe/ixgbe_e610.c
new file mode 100644
index 000000000000..95c6dca416c6
--- /dev/null
+++ b/sys/dev/ixgbe/ixgbe_e610.c
@@ -0,0 +1,5567 @@
+/******************************************************************************
+ SPDX-License-Identifier: BSD-3-Clause
+
+ Copyright (c) 2025, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+#include "ixgbe_type.h"
+#include "ixgbe_e610.h"
+#include "ixgbe_x550.h"
+#include "ixgbe_common.h"
+#include "ixgbe_phy.h"
+#include "ixgbe_api.h"
+
+/**
+ * ixgbe_init_aci - initialization routine for Admin Command Interface
+ * @hw: pointer to the hardware structure
+ *
+ * Initialize the ACI lock.
+ */
+void ixgbe_init_aci(struct ixgbe_hw *hw)
+{
+ ixgbe_init_lock(&hw->aci.lock);
+}
+
+/**
+ * ixgbe_shutdown_aci - shutdown routine for Admin Command Interface
+ * @hw: pointer to the hardware structure
+ *
+ * Destroy the ACI lock.
+ */
+void ixgbe_shutdown_aci(struct ixgbe_hw *hw)
+{
+ ixgbe_destroy_lock(&hw->aci.lock);
+}
+
+/**
+ * ixgbe_should_retry_aci_send_cmd_execute - decide if ACI command should
+ * be resent
+ * @opcode: ACI opcode
+ *
+ * Check if ACI command should be sent again depending on the provided opcode.
+ *
+ * Return: true if the sending command routine should be repeated,
+ * otherwise false.
+ */
+static bool ixgbe_should_retry_aci_send_cmd_execute(u16 opcode)
+{
+ switch (opcode) {
+ case ixgbe_aci_opc_disable_rxen:
+ case ixgbe_aci_opc_get_phy_caps:
+ case ixgbe_aci_opc_get_link_status:
+ case ixgbe_aci_opc_get_link_topo:
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ixgbe_aci_send_cmd_execute - execute sending FW Admin Command to FW Admin
+ * Command Interface
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ *
+ * Admin Command is sent using CSR by setting descriptor and buffer in specific
+ * registers.
+ *
+ * Return: the exit code of the operation.
+ * * - IXGBE_SUCCESS - success.
+ * * - IXGBE_ERR_ACI_DISABLED - CSR mechanism is not enabled.
+ * * - IXGBE_ERR_ACI_BUSY - CSR mechanism is busy.
+ * * - IXGBE_ERR_PARAM - buf_size is too big or
+ * invalid argument buf or buf_size.
+ * * - IXGBE_ERR_ACI_TIMEOUT - Admin Command X command timeout.
+ * * - IXGBE_ERR_ACI_ERROR - Admin Command X invalid state of HICR register or
+ * Admin Command failed because of bad opcode was returned or
+ * Admin Command failed with error Y.
+ */
+static s32
+ixgbe_aci_send_cmd_execute(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
+ void *buf, u16 buf_size)
+{
+ u32 hicr = 0, tmp_buf_size = 0, i = 0;
+ u32 *raw_desc = (u32 *)desc;
+ s32 status = IXGBE_SUCCESS;
+ bool valid_buf = false;
+ u32 *tmp_buf = NULL;
+ u16 opcode = 0;
+
+ do {
+ hw->aci.last_status = IXGBE_ACI_RC_OK;
+
+ /* It's necessary to check if mechanism is enabled */
+ hicr = IXGBE_READ_REG(hw, PF_HICR);
+ if (!(hicr & PF_HICR_EN)) {
+ status = IXGBE_ERR_ACI_DISABLED;
+ break;
+ }
+ if (hicr & PF_HICR_C) {
+ hw->aci.last_status = IXGBE_ACI_RC_EBUSY;
+ status = IXGBE_ERR_ACI_BUSY;
+ break;
+ }
+ opcode = desc->opcode;
+
+ if (buf_size > IXGBE_ACI_MAX_BUFFER_SIZE) {
+ status = IXGBE_ERR_PARAM;
+ break;
+ }
+
+ if (buf)
+ desc->flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_BUF);
+
+ /* Check if buf and buf_size are proper params */
+ if (desc->flags & IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_BUF)) {
+ if ((buf && buf_size == 0) ||
+ (buf == NULL && buf_size)) {
+ status = IXGBE_ERR_PARAM;
+ break;
+ }
+ if (buf && buf_size)
+ valid_buf = true;
+ }
+
+ if (valid_buf == true) {
+ if (buf_size % 4 == 0)
+ tmp_buf_size = buf_size;
+ else
+ tmp_buf_size = (buf_size & (u16)(~0x03)) + 4;
+
+ tmp_buf = (u32*)ixgbe_malloc(hw, tmp_buf_size);
+ if (!tmp_buf)
+ return IXGBE_ERR_OUT_OF_MEM;
+
+ /* tmp_buf will be firstly filled with 0xFF and after
+ * that the content of buf will be written into it.
+ * This approach lets us use valid buf_size and
+ * prevents us from reading past buf area
+ * when buf_size mod 4 not equal to 0.
+ */
+ memset(tmp_buf, 0xFF, tmp_buf_size);
+ memcpy(tmp_buf, buf, buf_size);
+
+ if (tmp_buf_size > IXGBE_ACI_LG_BUF)
+ desc->flags |=
+ IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_LB);
+
+ desc->datalen = IXGBE_CPU_TO_LE16(buf_size);
+
+ if (desc->flags & IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD)) {
+ for (i = 0; i < tmp_buf_size / 4; i++) {
+ IXGBE_WRITE_REG(hw, PF_HIBA(i),
+ IXGBE_LE32_TO_CPU(tmp_buf[i]));
+ }
+ }
+ }
+
+ /* Descriptor is written to specific registers */
+ for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++)
+ IXGBE_WRITE_REG(hw, PF_HIDA(i),
+ IXGBE_LE32_TO_CPU(raw_desc[i]));
+
+ /* SW has to set PF_HICR.C bit and clear PF_HICR.SV and
+ * PF_HICR_EV
+ */
+ hicr = IXGBE_READ_REG(hw, PF_HICR);
+ hicr = (hicr | PF_HICR_C) & ~(PF_HICR_SV | PF_HICR_EV);
+ IXGBE_WRITE_REG(hw, PF_HICR, hicr);
+
+ /* Wait for sync Admin Command response */
+ for (i = 0; i < IXGBE_ACI_SYNC_RESPONSE_TIMEOUT; i += 1) {
+ hicr = IXGBE_READ_REG(hw, PF_HICR);
+ if ((hicr & PF_HICR_SV) || !(hicr & PF_HICR_C))
+ break;
+
+ msec_delay(1);
+ }
+
+ /* Wait for async Admin Command response */
+ if ((hicr & PF_HICR_SV) && (hicr & PF_HICR_C)) {
+ for (i = 0; i < IXGBE_ACI_ASYNC_RESPONSE_TIMEOUT;
+ i += 1) {
+ hicr = IXGBE_READ_REG(hw, PF_HICR);
+ if ((hicr & PF_HICR_EV) || !(hicr & PF_HICR_C))
+ break;
+
+ msec_delay(1);
+ }
+ }
+
+ /* Read sync Admin Command response */
+ if ((hicr & PF_HICR_SV)) {
+ for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
+ raw_desc[i] = IXGBE_READ_REG(hw, PF_HIDA(i));
+ raw_desc[i] = IXGBE_CPU_TO_LE32(raw_desc[i]);
+ }
+ }
+
+ /* Read async Admin Command response */
+ if ((hicr & PF_HICR_EV) && !(hicr & PF_HICR_C)) {
+ for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
+ raw_desc[i] = IXGBE_READ_REG(hw, PF_HIDA_2(i));
+ raw_desc[i] = IXGBE_CPU_TO_LE32(raw_desc[i]);
+ }
+ }
+
+ /* Handle timeout and invalid state of HICR register */
+ if (hicr & PF_HICR_C) {
+ status = IXGBE_ERR_ACI_TIMEOUT;
+ break;
+ } else if (!(hicr & PF_HICR_SV) && !(hicr & PF_HICR_EV)) {
+ status = IXGBE_ERR_ACI_ERROR;
+ break;
+ }
+
+ /* For every command other than 0x0014 treat opcode mismatch
+ * as an error. Response to 0x0014 command read from HIDA_2
+ * is a descriptor of an event which is expected to contain
+ * different opcode than the command.
+ */
+ if (desc->opcode != opcode &&
+ opcode != IXGBE_CPU_TO_LE16(ixgbe_aci_opc_get_fw_event)) {
+ status = IXGBE_ERR_ACI_ERROR;
+ break;
+ }
+
+ if (desc->retval != IXGBE_ACI_RC_OK) {
+ hw->aci.last_status = (enum ixgbe_aci_err)desc->retval;
+ status = IXGBE_ERR_ACI_ERROR;
+ break;
+ }
+
+ /* Write a response values to a buf */
+ if (valid_buf && (desc->flags &
+ IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_BUF))) {
+ for (i = 0; i < tmp_buf_size / 4; i++) {
+ tmp_buf[i] = IXGBE_READ_REG(hw, PF_HIBA(i));
+ tmp_buf[i] = IXGBE_CPU_TO_LE32(tmp_buf[i]);
+ }
+ memcpy(buf, tmp_buf, buf_size);
+ }
+ } while (0);
+
+ if (tmp_buf)
+ ixgbe_free(hw, tmp_buf);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_send_cmd - send FW Admin Command to FW Admin Command Interface
+ * @hw: pointer to the HW struct
+ * @desc: descriptor describing the command
+ * @buf: buffer to use for indirect commands (NULL for direct commands)
+ * @buf_size: size of buffer for indirect commands (0 for direct commands)
+ *
+ * Helper function to send FW Admin Commands to the FW Admin Command Interface.
+ *
+ * Retry sending the FW Admin Command multiple times to the FW ACI
+ * if the EBUSY Admin Command error is returned.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
+ void *buf, u16 buf_size)
+{
+ struct ixgbe_aci_desc desc_cpy;
+ enum ixgbe_aci_err last_status;
+ bool is_cmd_for_retry;
+ u8 *buf_cpy = NULL;
+ s32 status;
+ u16 opcode;
+ u8 idx = 0;
+
+ opcode = IXGBE_LE16_TO_CPU(desc->opcode);
+ is_cmd_for_retry = ixgbe_should_retry_aci_send_cmd_execute(opcode);
+ memset(&desc_cpy, 0, sizeof(desc_cpy));
+
+ if (is_cmd_for_retry) {
+ if (buf) {
+ buf_cpy = (u8 *)ixgbe_malloc(hw, buf_size);
+ if (!buf_cpy)
+ return IXGBE_ERR_OUT_OF_MEM;
+ }
+ memcpy(&desc_cpy, desc, sizeof(desc_cpy));
+ }
+
+ do {
+ ixgbe_acquire_lock(&hw->aci.lock);
+ status = ixgbe_aci_send_cmd_execute(hw, desc, buf, buf_size);
+ last_status = hw->aci.last_status;
+ ixgbe_release_lock(&hw->aci.lock);
+
+ if (!is_cmd_for_retry || status == IXGBE_SUCCESS ||
+ (last_status != IXGBE_ACI_RC_EBUSY && status != IXGBE_ERR_ACI_ERROR))
+ break;
+
+ if (buf)
+ memcpy(buf, buf_cpy, buf_size);
+ memcpy(desc, &desc_cpy, sizeof(desc_cpy));
+
+ msec_delay(IXGBE_ACI_SEND_DELAY_TIME_MS);
+ } while (++idx < IXGBE_ACI_SEND_MAX_EXECUTE);
+
+ if (buf_cpy)
+ ixgbe_free(hw, buf_cpy);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_check_event_pending - check if there are any pending events
+ * @hw: pointer to the HW struct
+ *
+ * Determine if there are any pending events.
+ *
+ * Return: true if there are any currently pending events
+ * otherwise false.
+ */
+bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw)
+{
+ u32 ep_bit_mask;
+ u32 fwsts;
+
+ ep_bit_mask = hw->bus.func ? GL_FWSTS_EP_PF1 : GL_FWSTS_EP_PF0;
+
+ /* Check state of Event Pending (EP) bit */
+ fwsts = IXGBE_READ_REG(hw, GL_FWSTS);
+ return (fwsts & ep_bit_mask) ? true : false;
+}
+
+/**
+ * ixgbe_aci_get_event - get an event from ACI
+ * @hw: pointer to the HW struct
+ * @e: event information structure
+ * @pending: optional flag signaling that there are more pending events
+ *
+ * Obtain an event from ACI and return its content
+ * through 'e' using ACI command (0x0014).
+ * Provide information if there are more events
+ * to retrieve through 'pending'.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e,
+ bool *pending)
+{
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ if (!e || (!e->msg_buf && e->buf_len) || (e->msg_buf && !e->buf_len))
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_acquire_lock(&hw->aci.lock);
+
+ /* Check if there are any events pending */
+ if (!ixgbe_aci_check_event_pending(hw)) {
+ status = IXGBE_ERR_ACI_NO_EVENTS;
+ goto aci_get_event_exit;
+ }
+
+ /* Obtain pending event */
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_fw_event);
+ status = ixgbe_aci_send_cmd_execute(hw, &desc, e->msg_buf, e->buf_len);
+ if (status)
+ goto aci_get_event_exit;
+
+ /* Returned 0x0014 opcode indicates that no event was obtained */
+ if (desc.opcode == IXGBE_CPU_TO_LE16(ixgbe_aci_opc_get_fw_event)) {
+ status = IXGBE_ERR_ACI_NO_EVENTS;
+ goto aci_get_event_exit;
+ }
+
+ /* Determine size of event data */
+ e->msg_len = MIN_T(u16, IXGBE_LE16_TO_CPU(desc.datalen), e->buf_len);
+ /* Write event descriptor to event info structure */
+ memcpy(&e->desc, &desc, sizeof(e->desc));
+
+ /* Check if there are any further events pending */
+ if (pending) {
+ *pending = ixgbe_aci_check_event_pending(hw);
+ }
+
+aci_get_event_exit:
+ ixgbe_release_lock(&hw->aci.lock);
+
+ return status;
+}
+
+/**
+ * ixgbe_fill_dflt_direct_cmd_desc - fill ACI descriptor with default values.
+ * @desc: pointer to the temp descriptor (non DMA mem)
+ * @opcode: the opcode can be used to decide which flags to turn off or on
+ *
+ * Helper function to fill the descriptor desc with default values
+ * and the provided opcode.
+ */
+void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode)
+{
+ /* zero out the desc */
+ memset(desc, 0, sizeof(*desc));
+ desc->opcode = IXGBE_CPU_TO_LE16(opcode);
+ desc->flags = IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_SI);
+}
+
+/**
+ * ixgbe_aci_get_fw_ver - get the firmware version
+ * @hw: pointer to the HW struct
+ *
+ * Get the firmware version using ACI command (0x0001).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_fw_ver(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_ver *resp;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ resp = &desc.params.get_ver;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_ver);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ if (!status) {
+ hw->fw_branch = resp->fw_branch;
+ hw->fw_maj_ver = resp->fw_major;
+ hw->fw_min_ver = resp->fw_minor;
+ hw->fw_patch = resp->fw_patch;
+ hw->fw_build = IXGBE_LE32_TO_CPU(resp->fw_build);
+ hw->api_branch = resp->api_branch;
+ hw->api_maj_ver = resp->api_major;
+ hw->api_min_ver = resp->api_minor;
+ hw->api_patch = resp->api_patch;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_send_driver_ver - send the driver version to firmware
+ * @hw: pointer to the HW struct
+ * @dv: driver's major, minor version
+ *
+ * Send the driver version to the firmware
+ * using the ACI command (0x0002).
+ *
+ * Return: the exit code of the operation.
+ * Returns IXGBE_ERR_PARAM, if dv is NULL.
+ */
+s32 ixgbe_aci_send_driver_ver(struct ixgbe_hw *hw, struct ixgbe_driver_ver *dv)
+{
+ struct ixgbe_aci_cmd_driver_ver *cmd;
+ struct ixgbe_aci_desc desc;
+ u16 len;
+
+ cmd = &desc.params.driver_ver;
+
+ if (!dv)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_driver_ver);
+
+ desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
+ cmd->major_ver = dv->major_ver;
+ cmd->minor_ver = dv->minor_ver;
+ cmd->build_ver = dv->build_ver;
+ cmd->subbuild_ver = dv->subbuild_ver;
+
+ len = 0;
+ while (len < sizeof(dv->driver_string) &&
+ IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
+ len++;
+
+ return ixgbe_aci_send_cmd(hw, &desc, dv->driver_string, len);
+}
+
+/**
+ * ixgbe_aci_req_res - request a common resource
+ * @hw: pointer to the HW struct
+ * @res: resource ID
+ * @access: access type
+ * @sdp_number: resource number
+ * @timeout: the maximum time in ms that the driver may hold the resource
+ *
+ * Requests a common resource using the ACI command (0x0008).
+ * Specifies the maximum time the driver may hold the resource.
+ * If the requested resource is currently occupied by some other driver,
+ * a busy return value is returned and the timeout field value indicates the
+ * maximum time the current owner has to free it.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32
+ixgbe_aci_req_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+ enum ixgbe_aci_res_access_type access, u8 sdp_number,
+ u32 *timeout)
+{
+ struct ixgbe_aci_cmd_req_res *cmd_resp;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd_resp = &desc.params.res_owner;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_req_res);
+
+ cmd_resp->res_id = IXGBE_CPU_TO_LE16(res);
+ cmd_resp->access_type = IXGBE_CPU_TO_LE16(access);
+ cmd_resp->res_number = IXGBE_CPU_TO_LE32(sdp_number);
+ cmd_resp->timeout = IXGBE_CPU_TO_LE32(*timeout);
+ *timeout = 0;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ /* The completion specifies the maximum time in ms that the driver
+ * may hold the resource in the Timeout field.
+ * If the resource is held by some other driver, the command completes
+ * with a busy return value and the timeout field indicates the maximum
+ * time the current owner of the resource has to free it.
+ */
+ if (!status || hw->aci.last_status == IXGBE_ACI_RC_EBUSY)
+ *timeout = IXGBE_LE32_TO_CPU(cmd_resp->timeout);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_release_res - release a common resource using ACI
+ * @hw: pointer to the HW struct
+ * @res: resource ID
+ * @sdp_number: resource number
+ *
+ * Release a common resource using ACI command (0x0009).
+ *
+ * Return: the exit code of the operation.
+ */
+static s32
+ixgbe_aci_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+ u8 sdp_number)
+{
+ struct ixgbe_aci_cmd_req_res *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.res_owner;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_release_res);
+
+ cmd->res_id = IXGBE_CPU_TO_LE16(res);
+ cmd->res_number = IXGBE_CPU_TO_LE32(sdp_number);
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_acquire_res - acquire the ownership of a resource
+ * @hw: pointer to the HW structure
+ * @res: resource ID
+ * @access: access type (read or write)
+ * @timeout: timeout in milliseconds
+ *
+ * Make an attempt to acquire the ownership of a resource using
+ * the ixgbe_aci_req_res to utilize ACI.
+ * In case if some other driver has previously acquired the resource and
+ * performed any necessary updates, the IXGBE_ERR_ACI_NO_WORK is returned,
+ * and the caller does not obtain the resource and has no further work to do.
+ * If needed, the function will poll until the current lock owner timeouts.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+ enum ixgbe_aci_res_access_type access, u32 timeout)
+{
+#define IXGBE_RES_POLLING_DELAY_MS 10
+ u32 delay = IXGBE_RES_POLLING_DELAY_MS;
+ u32 res_timeout = timeout;
+ u32 retry_timeout = 0;
+ s32 status;
+
+ status = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
+
+ /* A return code of IXGBE_ERR_ACI_NO_WORK means that another driver has
+ * previously acquired the resource and performed any necessary updates;
+ * in this case the caller does not obtain the resource and has no
+ * further work to do.
+ */
+ if (status == IXGBE_ERR_ACI_NO_WORK)
+ goto ixgbe_acquire_res_exit;
+
+ /* If necessary, poll until the current lock owner timeouts.
+ * Set retry_timeout to the timeout value reported by the FW in the
+ * response to the "Request Resource Ownership" (0x0008) Admin Command
+ * as it indicates the maximum time the current owner of the resource
+ * is allowed to hold it.
+ */
+ retry_timeout = res_timeout;
+ while (status && retry_timeout && res_timeout) {
+ msec_delay(delay);
+ retry_timeout = (retry_timeout > delay) ?
+ retry_timeout - delay : 0;
+ status = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
+
+ if (status == IXGBE_ERR_ACI_NO_WORK)
+ /* lock free, but no work to do */
+ break;
+
+ if (!status)
+ /* lock acquired */
+ break;
+ }
+
+ixgbe_acquire_res_exit:
+ return status;
+}
+
+/**
+ * ixgbe_release_res - release a common resource
+ * @hw: pointer to the HW structure
+ * @res: resource ID
+ *
+ * Release a common resource using ixgbe_aci_release_res.
+ */
+void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res)
+{
+ u32 total_delay = 0;
+ s32 status;
+
+ status = ixgbe_aci_release_res(hw, res, 0);
+
+ /* There are some rare cases when trying to release the resource
+ * results in an admin command timeout, so handle them correctly.
+ */
+ while ((status == IXGBE_ERR_ACI_TIMEOUT) &&
+ (total_delay < IXGBE_ACI_RELEASE_RES_TIMEOUT)) {
+ msec_delay(1);
+ status = ixgbe_aci_release_res(hw, res, 0);
+ total_delay++;
+ }
+}
+
+/**
+ * ixgbe_parse_common_caps - Parse common device/function capabilities
+ * @hw: pointer to the HW struct
+ * @caps: pointer to common capabilities structure
+ * @elem: the capability element to parse
+ * @prefix: message prefix for tracing capabilities
+ *
+ * Given a capability element, extract relevant details into the common
+ * capability structure.
+ *
+ * Return: true if the capability matches one of the common capability ids,
+ * false otherwise.
+ */
+static bool
+ixgbe_parse_common_caps(struct ixgbe_hw *hw, struct ixgbe_hw_common_caps *caps,
+ struct ixgbe_aci_cmd_list_caps_elem *elem,
+ const char *prefix)
+{
+ u32 logical_id = IXGBE_LE32_TO_CPU(elem->logical_id);
+ u32 phys_id = IXGBE_LE32_TO_CPU(elem->phys_id);
+ u32 number = IXGBE_LE32_TO_CPU(elem->number);
+ u16 cap = IXGBE_LE16_TO_CPU(elem->cap);
+ bool found = true;
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ switch (cap) {
+ case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
+ caps->valid_functions = number;
+ break;
+ case IXGBE_ACI_CAPS_SRIOV:
+ caps->sr_iov_1_1 = (number == 1);
+ break;
+ case IXGBE_ACI_CAPS_VMDQ:
+ caps->vmdq = (number == 1);
+ break;
+ case IXGBE_ACI_CAPS_DCB:
+ caps->dcb = (number == 1);
+ caps->active_tc_bitmap = logical_id;
+ caps->maxtc = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_RSS:
+ caps->rss_table_size = number;
+ caps->rss_table_entry_width = logical_id;
+ break;
+ case IXGBE_ACI_CAPS_RXQS:
+ caps->num_rxq = number;
+ caps->rxq_first_id = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_TXQS:
+ caps->num_txq = number;
+ caps->txq_first_id = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_MSIX:
+ caps->num_msix_vectors = number;
+ caps->msix_vector_first_id = phys_id;
+ break;
+ case IXGBE_ACI_CAPS_NVM_VER:
+ break;
+ case IXGBE_ACI_CAPS_NVM_MGMT:
+ caps->sec_rev_disabled =
+ (number & IXGBE_NVM_MGMT_SEC_REV_DISABLED) ?
+ true : false;
+ caps->update_disabled =
+ (number & IXGBE_NVM_MGMT_UPDATE_DISABLED) ?
+ true : false;
+ caps->nvm_unified_update =
+ (number & IXGBE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
+ true : false;
+ caps->netlist_auth =
+ (number & IXGBE_NVM_MGMT_NETLIST_AUTH_SUPPORT) ?
+ true : false;
+ break;
+ case IXGBE_ACI_CAPS_MAX_MTU:
+ caps->max_mtu = number;
+ break;
+ case IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE:
+ caps->pcie_reset_avoidance = (number > 0);
+ break;
+ case IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT:
+ caps->reset_restrict_support = (number == 1);
+ break;
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0:
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1:
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2:
+ case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3:
+ {
+ u8 index = cap - IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0;
+
+ caps->ext_topo_dev_img_ver_high[index] = number;
+ caps->ext_topo_dev_img_ver_low[index] = logical_id;
+ caps->ext_topo_dev_img_part_num[index] =
+ (phys_id & IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M) >>
+ IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_S;
+ caps->ext_topo_dev_img_load_en[index] =
+ (phys_id & IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0;
+ caps->ext_topo_dev_img_prog_en[index] =
+ (phys_id & IXGBE_EXT_TOPO_DEV_IMG_PROG_EN) != 0;
+ break;
+ }
+ case IXGBE_ACI_CAPS_OROM_RECOVERY_UPDATE:
+ caps->orom_recovery_update = (number == 1);
+ break;
+ case IXGBE_ACI_CAPS_NEXT_CLUSTER_ID:
+ caps->next_cluster_id_support = (number == 1);
+ DEBUGOUT2("%s: next_cluster_id_support = %d\n",
+ prefix, caps->next_cluster_id_support);
+ break;
+ default:
+ /* Not one of the recognized common capabilities */
+ found = false;
+ }
+
+ return found;
+}
+
+/**
+ * ixgbe_hweight8 - count set bits among the 8 lowest bits
+ * @w: variable storing set bits to count
+ *
+ * Return: the number of set bits among the 8 lowest bits in the provided value.
+ */
+static u8 ixgbe_hweight8(u32 w)
+{
+ u8 hweight = 0, i;
+
+ for (i = 0; i < 8; i++)
+ if (w & (1 << i))
+ hweight++;
+
+ return hweight;
+}
+
+/**
+ * ixgbe_hweight32 - count set bits among the 32 lowest bits
+ * @w: variable storing set bits to count
+ *
+ * Return: the number of set bits among the 32 lowest bits in the
+ * provided value.
+ */
+static u8 ixgbe_hweight32(u32 w)
+{
+ u32 bitMask = 0x1, i;
+ u8 bitCnt = 0;
+
+ for (i = 0; i < 32; i++)
+ {
+ if (w & bitMask)
+ bitCnt++;
+
+ bitMask = bitMask << 0x1;
+ }
+
+ return bitCnt;
+}
+
+/**
+ * ixgbe_parse_valid_functions_cap - Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS for device capabilities.
+ */
+static void
+ixgbe_parse_valid_functions_cap(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ u32 number = IXGBE_LE32_TO_CPU(cap->number);
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ dev_p->num_funcs = ixgbe_hweight32(number);
+}
+
+/**
+ * ixgbe_parse_vf_dev_caps - Parse IXGBE_ACI_CAPS_VF device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_VF for device capabilities.
+ */
+static void ixgbe_parse_vf_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ u32 number = IXGBE_LE32_TO_CPU(cap->number);
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ dev_p->num_vfs_exposed = number;
+}
+
+/**
+ * ixgbe_parse_vsi_dev_caps - Parse IXGBE_ACI_CAPS_VSI device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_VSI for device capabilities.
+ */
+static void ixgbe_parse_vsi_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ u32 number = IXGBE_LE32_TO_CPU(cap->number);
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ dev_p->num_vsi_allocd_to_host = number;
+}
+
+/**
+ * ixgbe_parse_fdir_dev_caps - Parse IXGBE_ACI_CAPS_FD device caps
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @cap: capability element to parse
+ *
+ * Parse IXGBE_ACI_CAPS_FD for device capabilities.
+ */
+static void ixgbe_parse_fdir_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ u32 number = IXGBE_LE32_TO_CPU(cap->number);
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ dev_p->num_flow_director_fltr = number;
+}
+
+/**
+ * ixgbe_parse_dev_caps - Parse device capabilities
+ * @hw: pointer to the HW struct
+ * @dev_p: pointer to device capabilities structure
+ * @buf: buffer containing the device capability records
+ * @cap_count: the number of capabilities
+ *
+ * Helper device to parse device (0x000B) capabilities list. For
+ * capabilities shared between device and function, this relies on
+ * ixgbe_parse_common_caps.
+ *
+ * Loop through the list of provided capabilities and extract the relevant
+ * data into the device capabilities structured.
+ */
+static void ixgbe_parse_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_p,
+ void *buf, u32 cap_count)
+{
+ struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
+ u32 i;
+
+ cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
+
+ memset(dev_p, 0, sizeof(*dev_p));
+
+ for (i = 0; i < cap_count; i++) {
+ u16 cap = IXGBE_LE16_TO_CPU(cap_resp[i].cap);
+ bool found;
+
+ found = ixgbe_parse_common_caps(hw, &dev_p->common_cap,
+ &cap_resp[i], "dev caps");
+
+ switch (cap) {
+ case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
+ ixgbe_parse_valid_functions_cap(hw, dev_p,
+ &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_VF:
+ ixgbe_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_VSI:
+ ixgbe_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_FD:
+ ixgbe_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
+ break;
+ default:
+ /* Don't list common capabilities as unknown */
+ if (!found)
+ break;
+ }
+ }
+
+}
+
+/**
+ * ixgbe_parse_vf_func_caps - Parse IXGBE_ACI_CAPS_VF function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for IXGBE_ACI_CAPS_VF.
+ */
+static void ixgbe_parse_vf_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ u32 logical_id = IXGBE_LE32_TO_CPU(cap->logical_id);
+ u32 number = IXGBE_LE32_TO_CPU(cap->number);
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ func_p->num_allocd_vfs = number;
+ func_p->vf_base_id = logical_id;
+}
+
+/**
+ * ixgbe_get_num_per_func - determine number of resources per PF
+ * @hw: pointer to the HW structure
+ * @max: value to be evenly split between each PF
+ *
+ * Determine the number of valid functions by going through the bitmap returned
+ * from parsing capabilities and use this to calculate the number of resources
+ * per PF based on the max value passed in.
+ *
+ * Return: the number of resources per PF or 0, if no PH are available.
+ */
+static u32 ixgbe_get_num_per_func(struct ixgbe_hw *hw, u32 max)
+{
+ u8 funcs;
+
+#define IXGBE_CAPS_VALID_FUNCS_M 0xFF
+ funcs = ixgbe_hweight8(hw->dev_caps.common_cap.valid_functions &
+ IXGBE_CAPS_VALID_FUNCS_M);
+
+ if (!funcs)
+ return 0;
+
+ return max / funcs;
+}
+
+/**
+ * ixgbe_parse_vsi_func_caps - Parse IXGBE_ACI_CAPS_VSI function caps
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @cap: pointer to the capability element to parse
+ *
+ * Extract function capabilities for IXGBE_ACI_CAPS_VSI.
+ */
+static void ixgbe_parse_vsi_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_p,
+ struct ixgbe_aci_cmd_list_caps_elem *cap)
+{
+ func_p->guar_num_vsi = ixgbe_get_num_per_func(hw, IXGBE_MAX_VSI);
+}
+
+/**
+ * ixgbe_parse_func_caps - Parse function capabilities
+ * @hw: pointer to the HW struct
+ * @func_p: pointer to function capabilities structure
+ * @buf: buffer containing the function capability records
+ * @cap_count: the number of capabilities
+ *
+ * Helper function to parse function (0x000A) capabilities list. For
+ * capabilities shared between device and function, this relies on
+ * ixgbe_parse_common_caps.
+ *
+ * Loop through the list of provided capabilities and extract the relevant
+ * data into the function capabilities structured.
+ */
+static void ixgbe_parse_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_p,
+ void *buf, u32 cap_count)
+{
+ struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
+ u32 i;
+
+ cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
+
+ memset(func_p, 0, sizeof(*func_p));
+
+ for (i = 0; i < cap_count; i++) {
+ u16 cap = IXGBE_LE16_TO_CPU(cap_resp[i].cap);
+ ixgbe_parse_common_caps(hw, &func_p->common_cap,
+ &cap_resp[i], "func caps");
+
+ switch (cap) {
+ case IXGBE_ACI_CAPS_VF:
+ ixgbe_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
+ break;
+ case IXGBE_ACI_CAPS_VSI:
+ ixgbe_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
+ break;
+ default:
+ /* Don't list common capabilities as unknown */
+ break;
+ }
+ }
+
+}
+
+/**
+ * ixgbe_aci_list_caps - query function/device capabilities
+ * @hw: pointer to the HW struct
+ * @buf: a buffer to hold the capabilities
+ * @buf_size: size of the buffer
+ * @cap_count: if not NULL, set to the number of capabilities reported
+ * @opc: capabilities type to discover, device or function
+ *
+ * Get the function (0x000A) or device (0x000B) capabilities description from
+ * firmware and store it in the buffer.
+ *
+ * If the cap_count pointer is not NULL, then it is set to the number of
+ * capabilities firmware will report. Note that if the buffer size is too
+ * small, it is possible the command will return IXGBE_ERR_OUT_OF_MEM. The
+ * cap_count will still be updated in this case. It is recommended that the
+ * buffer size be set to IXGBE_ACI_MAX_BUFFER_SIZE (the largest possible
+ * buffer that firmware could return) to avoid this.
+ *
+ * Return: the exit code of the operation.
+ * Exit code of IXGBE_ERR_OUT_OF_MEM means the buffer size is too small.
+ */
+s32 ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size,
+ u32 *cap_count, enum ixgbe_aci_opc opc)
+{
+ struct ixgbe_aci_cmd_list_caps *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.get_cap;
+
+ if (opc != ixgbe_aci_opc_list_func_caps &&
+ opc != ixgbe_aci_opc_list_dev_caps)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, opc);
+ status = ixgbe_aci_send_cmd(hw, &desc, buf, buf_size);
+
+ if (cap_count)
+ *cap_count = IXGBE_LE32_TO_CPU(cmd->count);
+
+ return status;
+}
+
+/**
+ * ixgbe_discover_dev_caps - Read and extract device capabilities
+ * @hw: pointer to the hardware structure
+ * @dev_caps: pointer to device capabilities structure
+ *
+ * Read the device capabilities and extract them into the dev_caps structure
+ * for later use.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_discover_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_caps)
+{
+ u32 status, cap_count = 0;
+ u8 *cbuf = NULL;
+
+ cbuf = (u8*)ixgbe_malloc(hw, IXGBE_ACI_MAX_BUFFER_SIZE);
+ if (!cbuf)
+ return IXGBE_ERR_OUT_OF_MEM;
+ /* Although the driver doesn't know the number of capabilities the
+ * device will return, we can simply send a 4KB buffer, the maximum
+ * possible size that firmware can return.
+ */
+ cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
+ sizeof(struct ixgbe_aci_cmd_list_caps_elem);
+
+ status = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
+ &cap_count,
+ ixgbe_aci_opc_list_dev_caps);
+ if (!status)
+ ixgbe_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
+
+ if (cbuf)
+ ixgbe_free(hw, cbuf);
+
+ return status;
+}
+
+/**
+ * ixgbe_discover_func_caps - Read and extract function capabilities
+ * @hw: pointer to the hardware structure
+ * @func_caps: pointer to function capabilities structure
+ *
+ * Read the function capabilities and extract them into the func_caps structure
+ * for later use.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_discover_func_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_func_caps *func_caps)
+{
+ u32 cap_count = 0;
+ u8 *cbuf = NULL;
+ s32 status;
+
+ cbuf = (u8*)ixgbe_malloc(hw, IXGBE_ACI_MAX_BUFFER_SIZE);
+ if(!cbuf)
+ return IXGBE_ERR_OUT_OF_MEM;
+ /* Although the driver doesn't know the number of capabilities the
+ * device will return, we can simply send a 4KB buffer, the maximum
+ * possible size that firmware can return.
+ */
+ cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
+ sizeof(struct ixgbe_aci_cmd_list_caps_elem);
+
+ status = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
+ &cap_count,
+ ixgbe_aci_opc_list_func_caps);
+ if (!status)
+ ixgbe_parse_func_caps(hw, func_caps, cbuf, cap_count);
+
+ if (cbuf)
+ ixgbe_free(hw, cbuf);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_caps - get info about the HW
+ * @hw: pointer to the hardware structure
+ *
+ * Retrieve both device and function capabilities.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_caps(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ status = ixgbe_discover_dev_caps(hw, &hw->dev_caps);
+ if (status)
+ return status;
+
+ return ixgbe_discover_func_caps(hw, &hw->func_caps);
+}
+
+/**
+ * ixgbe_aci_disable_rxen - disable RX
+ * @hw: pointer to the HW struct
+ *
+ * Request a safe disable of Receive Enable using ACI command (0x000C).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_disable_rxen(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_disable_rxen *cmd;
+ struct ixgbe_aci_desc desc;
+
+ UNREFERENCED_1PARAMETER(hw);
+
+ cmd = &desc.params.disable_rxen;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_disable_rxen);
+
+ cmd->lport_num = (u8)hw->bus.func;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_get_phy_caps - returns PHY capabilities
+ * @hw: pointer to the HW struct
+ * @qual_mods: report qualified modules
+ * @report_mode: report mode capabilities
+ * @pcaps: structure for PHY capabilities to be filled
+ *
+ * Returns the various PHY capabilities supported on the Port
+ * using ACI command (0x0600).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode,
+ struct ixgbe_aci_cmd_get_phy_caps_data *pcaps)
+{
+ struct ixgbe_aci_cmd_get_phy_caps *cmd;
+ u16 pcaps_size = sizeof(*pcaps);
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.get_phy;
+
+ if (!pcaps || (report_mode & ~IXGBE_ACI_REPORT_MODE_M))
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_phy_caps);
+
+ if (qual_mods)
+ cmd->param0 |= IXGBE_CPU_TO_LE16(IXGBE_ACI_GET_PHY_RQM);
+
+ cmd->param0 |= IXGBE_CPU_TO_LE16(report_mode);
+ status = ixgbe_aci_send_cmd(hw, &desc, pcaps, pcaps_size);
+
+ if (status == IXGBE_SUCCESS &&
+ report_mode == IXGBE_ACI_REPORT_TOPO_CAP_MEDIA) {
+ hw->phy.phy_type_low = IXGBE_LE64_TO_CPU(pcaps->phy_type_low);
+ hw->phy.phy_type_high = IXGBE_LE64_TO_CPU(pcaps->phy_type_high);
+ memcpy(hw->link.link_info.module_type, &pcaps->module_type,
+ sizeof(hw->link.link_info.module_type));
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_phy_caps_equals_cfg - check if capabilities match the PHY config
+ * @phy_caps: PHY capabilities
+ * @phy_cfg: PHY configuration
+ *
+ * Helper function to determine if PHY capabilities match PHY
+ * configuration
+ *
+ * Return: true if PHY capabilities match PHY configuration.
+ */
+bool
+ixgbe_phy_caps_equals_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *phy_caps,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *phy_cfg)
+{
+ u8 caps_mask, cfg_mask;
+
+ if (!phy_caps || !phy_cfg)
+ return false;
+
+ /* These bits are not common between capabilities and configuration.
+ * Do not use them to determine equality.
+ */
+ caps_mask = IXGBE_ACI_PHY_CAPS_MASK & ~(IXGBE_ACI_PHY_AN_MODE |
+ IXGBE_ACI_PHY_EN_MOD_QUAL);
+ cfg_mask = IXGBE_ACI_PHY_ENA_VALID_MASK &
+ ~IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
+ phy_caps->phy_type_high != phy_cfg->phy_type_high ||
+ ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
+ phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
+ phy_caps->eee_cap != phy_cfg->eee_cap ||
+ phy_caps->eeer_value != phy_cfg->eeer_value ||
+ phy_caps->link_fec_options != phy_cfg->link_fec_opt)
+ return false;
+
+ return true;
+}
+
+/**
+ * ixgbe_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
+ * @caps: PHY ability structure to copy data from
+ * @cfg: PHY configuration structure to copy data to
+ *
+ * Helper function to copy data from PHY capabilities data structure
+ * to PHY configuration data structure
+ */
+void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
+{
+ if (!caps || !cfg)
+ return;
+
+ memset(cfg, 0, sizeof(*cfg));
+ cfg->phy_type_low = caps->phy_type_low;
+ cfg->phy_type_high = caps->phy_type_high;
+ cfg->caps = caps->caps;
+ cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
+ cfg->eee_cap = caps->eee_cap;
+ cfg->eeer_value = caps->eeer_value;
+ cfg->link_fec_opt = caps->link_fec_options;
+ cfg->module_compliance_enforcement =
+ caps->module_compliance_enforcement;
+}
+
+/**
+ * ixgbe_aci_set_phy_cfg - set PHY configuration
+ * @hw: pointer to the HW struct
+ * @cfg: structure with PHY configuration data to be set
+ *
+ * Set the various PHY configuration parameters supported on the Port
+ * using ACI command (0x0601).
+ * One or more of the Set PHY config parameters may be ignored in an MFP
+ * mode as the PF may not have the privilege to set some of the PHY Config
+ * parameters.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
+{
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ if (!cfg)
+ return IXGBE_ERR_PARAM;
+
+ /* Ensure that only valid bits of cfg->caps can be turned on. */
+ if (cfg->caps & ~IXGBE_ACI_PHY_ENA_VALID_MASK) {
+ cfg->caps &= IXGBE_ACI_PHY_ENA_VALID_MASK;
+ }
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_phy_cfg);
+ desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, cfg, sizeof(*cfg));
+
+ if (!status)
+ hw->phy.curr_user_phy_cfg = *cfg;
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_set_link_restart_an - set up link and restart AN
+ * @hw: pointer to the HW struct
+ * @ena_link: if true: enable link, if false: disable link
+ *
+ * Function sets up the link and restarts the Auto-Negotiation over the link.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link)
+{
+ struct ixgbe_aci_cmd_restart_an *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.restart_an;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_restart_an);
+
+ cmd->cmd_flags = IXGBE_ACI_RESTART_AN_LINK_RESTART;
+ if (ena_link)
+ cmd->cmd_flags |= IXGBE_ACI_RESTART_AN_LINK_ENABLE;
+ else
+ cmd->cmd_flags &= ~IXGBE_ACI_RESTART_AN_LINK_ENABLE;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_is_media_cage_present - check if media cage is present
+ * @hw: pointer to the HW struct
+ *
+ * Identify presence of media cage using the ACI command (0x06E0).
+ *
+ * Return: true if media cage is present, else false. If no cage, then
+ * media type is backplane or BASE-T.
+ */
+static bool ixgbe_is_media_cage_present(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_link_topo *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.get_link_topo;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
+
+ cmd->addr.topo_params.node_type_ctx =
+ (IXGBE_ACI_LINK_TOPO_NODE_CTX_PORT <<
+ IXGBE_ACI_LINK_TOPO_NODE_CTX_S);
+
+ /* set node type */
+ cmd->addr.topo_params.node_type_ctx |=
+ (IXGBE_ACI_LINK_TOPO_NODE_TYPE_M &
+ IXGBE_ACI_LINK_TOPO_NODE_TYPE_CAGE);
+
+ /* Node type cage can be used to determine if cage is present. If AQC
+ * returns error (ENOENT), then no cage present. If no cage present then
+ * connection type is backplane or BASE-T.
+ */
+ return ixgbe_aci_get_netlist_node(hw, cmd, NULL, NULL);
+}
+
+/**
+ * ixgbe_get_media_type_from_phy_type - Gets media type based on phy type
+ * @hw: pointer to the HW struct
+ *
+ * Try to identify the media type based on the phy type.
+ * If more than one media type, the ixgbe_media_type_unknown is returned.
+ * First, phy_type_low is checked, then phy_type_high.
+ * If none are identified, the ixgbe_media_type_unknown is returned
+ *
+ * Return: type of a media based on phy type in form of enum.
+ */
+static enum ixgbe_media_type
+ixgbe_get_media_type_from_phy_type(struct ixgbe_hw *hw)
+{
+ struct ixgbe_link_status *hw_link_info;
+
+ if (!hw)
+ return ixgbe_media_type_unknown;
+
+ hw_link_info = &hw->link.link_info;
+ if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
+ /* If more than one media type is selected, report unknown */
+ return ixgbe_media_type_unknown;
+
+ if (hw_link_info->phy_type_low) {
+ /* 1G SGMII is a special case where some DA cable PHYs
+ * may show this as an option when it really shouldn't
+ * be since SGMII is meant to be between a MAC and a PHY
+ * in a backplane. Try to detect this case and handle it
+ */
+ if (hw_link_info->phy_type_low == IXGBE_PHY_TYPE_LOW_1G_SGMII &&
+ (hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
+ IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
+ hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
+ IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
+ return ixgbe_media_type_da;
+
+ switch (hw_link_info->phy_type_low) {
+ case IXGBE_PHY_TYPE_LOW_1000BASE_SX:
+ case IXGBE_PHY_TYPE_LOW_1000BASE_LX:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_SR:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_LR:
+ return ixgbe_media_type_fiber;
+ case IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
+ return ixgbe_media_type_fiber;
+ case IXGBE_PHY_TYPE_LOW_100BASE_TX:
+ case IXGBE_PHY_TYPE_LOW_1000BASE_T:
+ case IXGBE_PHY_TYPE_LOW_2500BASE_T:
+ case IXGBE_PHY_TYPE_LOW_5GBASE_T:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_T:
+ return ixgbe_media_type_copper;
+ case IXGBE_PHY_TYPE_LOW_10G_SFI_DA:
+ return ixgbe_media_type_da;
+ case IXGBE_PHY_TYPE_LOW_1000BASE_KX:
+ case IXGBE_PHY_TYPE_LOW_2500BASE_KX:
+ case IXGBE_PHY_TYPE_LOW_2500BASE_X:
+ case IXGBE_PHY_TYPE_LOW_5GBASE_KR:
+ case IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1:
+ case IXGBE_PHY_TYPE_LOW_10G_SFI_C2C:
+ return ixgbe_media_type_backplane;
+ }
+ } else {
+ switch (hw_link_info->phy_type_high) {
+ case IXGBE_PHY_TYPE_HIGH_10BASE_T:
+ return ixgbe_media_type_copper;
+ }
+ }
+ return ixgbe_media_type_unknown;
+}
+
+/**
+ * ixgbe_update_link_info - update status of the HW network link
+ * @hw: pointer to the HW struct
+ *
+ * Update the status of the HW network link.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_update_link_info(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data *pcaps;
+ struct ixgbe_link_status *li;
+ s32 status;
+
+ if (!hw)
+ return IXGBE_ERR_PARAM;
+
+ li = &hw->link.link_info;
+
+ status = ixgbe_aci_get_link_info(hw, true, NULL);
+ if (status)
+ return status;
+
+ if (li->link_info & IXGBE_ACI_MEDIA_AVAILABLE) {
+ pcaps = (struct ixgbe_aci_cmd_get_phy_caps_data *)
+ ixgbe_malloc(hw, sizeof(*pcaps));
+ if (!pcaps)
+ return IXGBE_ERR_OUT_OF_MEM;
+
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+ pcaps);
+
+ if (status == IXGBE_SUCCESS)
+ memcpy(li->module_type, &pcaps->module_type,
+ sizeof(li->module_type));
+
+ ixgbe_free(hw, pcaps);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_get_link_status - get status of the HW network link
+ * @hw: pointer to the HW struct
+ * @link_up: pointer to bool (true/false = linkup/linkdown)
+ *
+ * Variable link_up is true if link is up, false if link is down.
+ * The variable link_up is invalid if status is non zero. As a
+ * result of this call, link status reporting becomes enabled
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up)
+{
+ s32 status = IXGBE_SUCCESS;
+
+ if (!hw || !link_up)
+ return IXGBE_ERR_PARAM;
+
+ if (hw->link.get_link_info) {
+ status = ixgbe_update_link_info(hw);
+ if (status) {
+ return status;
+ }
+ }
+
+ *link_up = hw->link.link_info.link_info & IXGBE_ACI_LINK_UP;
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_get_link_info - get the link status
+ * @hw: pointer to the HW struct
+ * @ena_lse: enable/disable LinkStatusEvent reporting
+ * @link: pointer to link status structure - optional
+ *
+ * Get the current Link Status using ACI command (0x607).
+ * The current link can be optionally provided to update
+ * the status.
+ *
+ * Return: the link status of the adapter.
+ */
+s32 ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
+ struct ixgbe_link_status *link)
+{
+ struct ixgbe_aci_cmd_get_link_status_data link_data = { 0 };
+ struct ixgbe_aci_cmd_get_link_status *resp;
+ struct ixgbe_link_status *li_old, *li;
+ struct ixgbe_fc_info *hw_fc_info;
+ struct ixgbe_aci_desc desc;
+ bool tx_pause, rx_pause;
+ u8 cmd_flags;
+ s32 status;
+
+ if (!hw)
+ return IXGBE_ERR_PARAM;
+
+ li_old = &hw->link.link_info_old;
+ li = &hw->link.link_info;
+ hw_fc_info = &hw->fc;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status);
+ cmd_flags = (ena_lse) ? IXGBE_ACI_LSE_ENA : IXGBE_ACI_LSE_DIS;
+ resp = &desc.params.get_link_status;
+ resp->cmd_flags = cmd_flags;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, &link_data, sizeof(link_data));
+
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ /* save off old link status information */
+ *li_old = *li;
+
+ /* update current link status information */
+ li->link_speed = IXGBE_LE16_TO_CPU(link_data.link_speed);
+ li->phy_type_low = IXGBE_LE64_TO_CPU(link_data.phy_type_low);
+ li->phy_type_high = IXGBE_LE64_TO_CPU(link_data.phy_type_high);
+ li->link_info = link_data.link_info;
+ li->link_cfg_err = link_data.link_cfg_err;
+ li->an_info = link_data.an_info;
+ li->ext_info = link_data.ext_info;
+ li->max_frame_size = IXGBE_LE16_TO_CPU(link_data.max_frame_size);
+ li->fec_info = link_data.cfg & IXGBE_ACI_FEC_MASK;
+ li->topo_media_conflict = link_data.topo_media_conflict;
+ li->pacing = link_data.cfg & (IXGBE_ACI_CFG_PACING_M |
+ IXGBE_ACI_CFG_PACING_TYPE_M);
+
+ /* update fc info */
+ tx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_TX);
+ rx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_RX);
+ if (tx_pause && rx_pause)
+ hw_fc_info->current_mode = ixgbe_fc_full;
+ else if (tx_pause)
+ hw_fc_info->current_mode = ixgbe_fc_tx_pause;
+ else if (rx_pause)
+ hw_fc_info->current_mode = ixgbe_fc_rx_pause;
+ else
+ hw_fc_info->current_mode = ixgbe_fc_none;
+
+ li->lse_ena = !!(resp->cmd_flags & IXGBE_ACI_LSE_IS_ENABLED);
+
+ /* save link status information */
+ if (link)
+ *link = *li;
+
+ /* flag cleared so calling functions don't call AQ again */
+ hw->link.get_link_info = false;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_aci_set_event_mask - set event mask
+ * @hw: pointer to the HW struct
+ * @port_num: port number of the physical function
+ * @mask: event mask to be set
+ *
+ * Set the event mask using ACI command (0x0613).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask)
+{
+ struct ixgbe_aci_cmd_set_event_mask *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.set_event_mask;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_event_mask);
+
+ cmd->event_mask = IXGBE_CPU_TO_LE16(mask);
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_configure_lse - enable/disable link status events
+ * @hw: pointer to the HW struct
+ * @activate: bool value deciding if lse should be enabled nor disabled
+ * @mask: event mask to be set; a set bit means deactivation of the
+ * corresponding event
+ *
+ * Set the event mask and then enable or disable link status events
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask)
+{
+ s32 rc;
+
+ rc = ixgbe_aci_set_event_mask(hw, (u8)hw->bus.func, mask);
+ if (rc) {
+ return rc;
+ }
+
+ /* Enabling link status events generation by fw */
+ rc = ixgbe_aci_get_link_info(hw, activate, NULL);
+ if (rc) {
+ return rc;
+ }
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_aci_get_netlist_node - get a node handle
+ * @hw: pointer to the hw struct
+ * @cmd: get_link_topo AQ structure
+ * @node_part_number: output node part number if node found
+ * @node_handle: output node handle parameter if node found
+ *
+ * Get the netlist node and assigns it to
+ * the provided handle using ACI command (0x06E0).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle)
+{
+ struct ixgbe_aci_desc desc;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
+ desc.params.get_link_topo = *cmd;
+
+ if (ixgbe_aci_send_cmd(hw, &desc, NULL, 0))
+ return IXGBE_ERR_NOT_SUPPORTED;
+
+ if (node_handle)
+ *node_handle =
+ IXGBE_LE16_TO_CPU(desc.params.get_link_topo.addr.handle);
+ if (node_part_number)
+ *node_part_number = desc.params.get_link_topo.node_part_num;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_find_netlist_node - find a node handle
+ * @hw: pointer to the hw struct
+ * @node_type_ctx: type of netlist node to look for
+ * @node_part_number: node part number to look for
+ * @node_handle: output parameter if node found - optional
+ *
+ * Find and return the node handle for a given node type and part number in the
+ * netlist. When found IXGBE_SUCCESS is returned, IXGBE_ERR_NOT_SUPPORTED
+ * otherwise. If @node_handle provided, it would be set to found node handle.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_find_netlist_node(struct ixgbe_hw *hw, u8 node_type_ctx,
+ u8 node_part_number, u16 *node_handle)
+{
+ struct ixgbe_aci_cmd_get_link_topo cmd;
+ u8 rec_node_part_number;
+ u16 rec_node_handle;
+ s32 status;
+ u8 idx;
+
+ for (idx = 0; idx < IXGBE_MAX_NETLIST_SIZE; idx++) {
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.addr.topo_params.node_type_ctx =
+ (node_type_ctx << IXGBE_ACI_LINK_TOPO_NODE_TYPE_S);
+ cmd.addr.topo_params.index = idx;
+
+ status = ixgbe_aci_get_netlist_node(hw, &cmd,
+ &rec_node_part_number,
+ &rec_node_handle);
+ if (status)
+ return status;
+
+ if (rec_node_part_number == node_part_number) {
+ if (node_handle)
+ *node_handle = rec_node_handle;
+ return IXGBE_SUCCESS;
+ }
+ }
+
+ return IXGBE_ERR_NOT_SUPPORTED;
+}
+
+/**
+ * ixgbe_aci_read_i2c - read I2C register value
+ * @hw: pointer to the hw struct
+ * @topo_addr: topology address for a device to communicate with
+ * @bus_addr: 7-bit I2C bus address
+ * @addr: I2C memory address (I2C offset) with up to 16 bits
+ * @params: I2C parameters: bit [7] - Repeated start,
+ * bits [6:5] data offset size,
+ * bit [4] - I2C address type, bits [3:0] - data size
+ * to read (0-16 bytes)
+ * @data: pointer to data (0 to 16 bytes) to be read from the I2C device
+ *
+ * Read the value of the I2C pin register using ACI command (0x06E2).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_read_i2c(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data)
+{
+ struct ixgbe_aci_desc desc = { 0 };
+ struct ixgbe_aci_cmd_i2c *cmd;
+ u8 data_size;
+ s32 status;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_read_i2c);
+ cmd = &desc.params.read_write_i2c;
+
+ if (!data)
+ return IXGBE_ERR_PARAM;
+
+ data_size = (params & IXGBE_ACI_I2C_DATA_SIZE_M) >>
+ IXGBE_ACI_I2C_DATA_SIZE_S;
+
+ cmd->i2c_bus_addr = IXGBE_CPU_TO_LE16(bus_addr);
+ cmd->topo_addr = topo_addr;
+ cmd->i2c_params = params;
+ cmd->i2c_addr = addr;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (!status) {
+ struct ixgbe_aci_cmd_read_i2c_resp *resp;
+ u8 i;
+
+ resp = &desc.params.read_i2c_resp;
+ for (i = 0; i < data_size; i++) {
+ *data = resp->i2c_data[i];
+ data++;
+ }
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_write_i2c - write a value to I2C register
+ * @hw: pointer to the hw struct
+ * @topo_addr: topology address for a device to communicate with
+ * @bus_addr: 7-bit I2C bus address
+ * @addr: I2C memory address (I2C offset) with up to 16 bits
+ * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size
+ * to write (0-7 bytes)
+ * @data: pointer to data (0 to 4 bytes) to be written to the I2C device
+ *
+ * Write a value to the I2C pin register using ACI command (0x06E3).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_write_i2c(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data)
+{
+ struct ixgbe_aci_desc desc = { 0 };
+ struct ixgbe_aci_cmd_i2c *cmd;
+ u8 i, data_size;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_write_i2c);
+ cmd = &desc.params.read_write_i2c;
+
+ data_size = (params & IXGBE_ACI_I2C_DATA_SIZE_M) >>
+ IXGBE_ACI_I2C_DATA_SIZE_S;
+
+ /* data_size limited to 4 */
+ if (data_size > 4)
+ return IXGBE_ERR_PARAM;
+
+ cmd->i2c_bus_addr = IXGBE_CPU_TO_LE16(bus_addr);
+ cmd->topo_addr = topo_addr;
+ cmd->i2c_params = params;
+ cmd->i2c_addr = addr;
+
+ for (i = 0; i < data_size; i++) {
+ cmd->i2c_data[i] = *data;
+ data++;
+ }
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_set_port_id_led - set LED value for the given port
+ * @hw: pointer to the HW struct
+ * @orig_mode: set LED original mode
+ *
+ * Set LED value for the given port (0x06E9)
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_port_id_led(struct ixgbe_hw *hw, bool orig_mode)
+{
+ struct ixgbe_aci_cmd_set_port_id_led *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.set_port_id_led;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_port_id_led);
+
+ cmd->lport_num = (u8)hw->bus.func;
+ cmd->lport_num_valid = IXGBE_ACI_PORT_ID_PORT_NUM_VALID;
+
+ if (orig_mode)
+ cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_ORIG;
+ else
+ cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_BLINK;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_set_gpio - set GPIO pin state
+ * @hw: pointer to the hw struct
+ * @gpio_ctrl_handle: GPIO controller node handle
+ * @pin_idx: IO Number of the GPIO that needs to be set
+ * @value: SW provide IO value to set in the LSB
+ *
+ * Set the GPIO pin state that is a part of the topology
+ * using ACI command (0x06EC).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_gpio(struct ixgbe_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
+ bool value)
+{
+ struct ixgbe_aci_cmd_gpio *cmd;
+ struct ixgbe_aci_desc desc;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_gpio);
+ cmd = &desc.params.read_write_gpio;
+ cmd->gpio_ctrl_handle = IXGBE_CPU_TO_LE16(gpio_ctrl_handle);
+ cmd->gpio_num = pin_idx;
+ cmd->gpio_val = value ? 1 : 0;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_get_gpio - get GPIO pin state
+ * @hw: pointer to the hw struct
+ * @gpio_ctrl_handle: GPIO controller node handle
+ * @pin_idx: IO Number of the GPIO that needs to be set
+ * @value: IO value read
+ *
+ * Get the value of a GPIO signal which is part of the topology
+ * using ACI command (0x06ED).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_gpio(struct ixgbe_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
+ bool *value)
+{
+ struct ixgbe_aci_cmd_gpio *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_gpio);
+ cmd = &desc.params.read_write_gpio;
+ cmd->gpio_ctrl_handle = IXGBE_CPU_TO_LE16(gpio_ctrl_handle);
+ cmd->gpio_num = pin_idx;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (status)
+ return status;
+
+ *value = !!cmd->gpio_val;
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_aci_sff_eeprom - read/write SFF EEPROM
+ * @hw: pointer to the HW struct
+ * @lport: bits [7:0] = logical port, bit [8] = logical port valid
+ * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
+ * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
+ * @page: QSFP page
+ * @page_bank_ctrl: configuration of SFF/CMIS paging and banking control
+ * @data: pointer to data buffer to be read/written to the I2C device.
+ * @length: 1-16 for read, 1 for write.
+ * @write: 0 read, 1 for write.
+ *
+ * Read/write SFF EEPROM using ACI command (0x06EE).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_sff_eeprom(struct ixgbe_hw *hw, u16 lport, u8 bus_addr,
+ u16 mem_addr, u8 page, u8 page_bank_ctrl, u8 *data,
+ u8 length, bool write)
+{
+ struct ixgbe_aci_cmd_sff_eeprom *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ if (!data || (mem_addr & 0xff00))
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_sff_eeprom);
+ cmd = &desc.params.read_write_sff_param;
+ desc.flags = IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
+ cmd->lport_num = (u8)(lport & 0xff);
+ cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
+ cmd->i2c_bus_addr = IXGBE_CPU_TO_LE16(((bus_addr >> 1) &
+ IXGBE_ACI_SFF_I2CBUS_7BIT_M) |
+ ((page_bank_ctrl <<
+ IXGBE_ACI_SFF_PAGE_BANK_CTRL_S) &
+ IXGBE_ACI_SFF_PAGE_BANK_CTRL_M));
+ cmd->i2c_offset = IXGBE_CPU_TO_LE16(mem_addr & 0xff);
+ cmd->module_page = page;
+ if (write)
+ cmd->i2c_bus_addr |= IXGBE_CPU_TO_LE16(IXGBE_ACI_SFF_IS_WRITE);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, data, length);
+ return status;
+}
+
+/**
+ * ixgbe_aci_prog_topo_dev_nvm - program Topology Device NVM
+ * @hw: pointer to the hardware structure
+ * @topo_params: pointer to structure storing topology parameters for a device
+ *
+ * Program Topology Device NVM using ACI command (0x06F2).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_prog_topo_dev_nvm(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_params *topo_params)
+{
+ struct ixgbe_aci_cmd_prog_topo_dev_nvm *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.prog_topo_dev_nvm;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_prog_topo_dev_nvm);
+
+ memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params));
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_read_topo_dev_nvm - read Topology Device NVM
+ * @hw: pointer to the hardware structure
+ * @topo_params: pointer to structure storing topology parameters for a device
+ * @start_address: byte offset in the topology device NVM
+ * @data: pointer to data buffer
+ * @data_size: number of bytes to be read from the topology device NVM
+ * Read Topology Device NVM (0x06F3)
+ *
+ * Read Topology of Device NVM using ACI command (0x06F3).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_read_topo_dev_nvm(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_params *topo_params,
+ u32 start_address, u8 *data, u8 data_size)
+{
+ struct ixgbe_aci_cmd_read_topo_dev_nvm *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ if (!data || data_size == 0 ||
+ data_size > IXGBE_ACI_READ_TOPO_DEV_NVM_DATA_READ_SIZE)
+ return IXGBE_ERR_PARAM;
+
+ cmd = &desc.params.read_topo_dev_nvm;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_read_topo_dev_nvm);
+
+ desc.datalen = IXGBE_CPU_TO_LE16(data_size);
+ memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params));
+ cmd->start_address = IXGBE_CPU_TO_LE32(start_address);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (status)
+ return status;
+
+ memcpy(data, cmd->data_read, data_size);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_acquire_nvm - Generic request for acquiring the NVM ownership
+ * @hw: pointer to the HW structure
+ * @access: NVM access type (read or write)
+ *
+ * Request NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_acquire_nvm(struct ixgbe_hw *hw,
+ enum ixgbe_aci_res_access_type access)
+{
+ u32 fla;
+
+ /* Skip if we are in blank NVM programming mode */
+ fla = IXGBE_READ_REG(hw, GLNVM_FLA);
+ if ((fla & GLNVM_FLA_LOCKED_M) == 0)
+ return IXGBE_SUCCESS;
+
+ return ixgbe_acquire_res(hw, IXGBE_NVM_RES_ID, access,
+ IXGBE_NVM_TIMEOUT);
+}
+
+/**
+ * ixgbe_release_nvm - Generic request for releasing the NVM ownership
+ * @hw: pointer to the HW structure
+ *
+ * Release NVM ownership.
+ */
+void ixgbe_release_nvm(struct ixgbe_hw *hw)
+{
+ u32 fla;
+
+ /* Skip if we are in blank NVM programming mode */
+ fla = IXGBE_READ_REG(hw, GLNVM_FLA);
+ if ((fla & GLNVM_FLA_LOCKED_M) == 0)
+ return;
+
+ ixgbe_release_res(hw, IXGBE_NVM_RES_ID);
+}
+
+
+/**
+ * ixgbe_aci_read_nvm - read NVM
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be read (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @read_shadow_ram: tell if this is a shadow RAM read
+ *
+ * Read the NVM using ACI command (0x0701).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command,
+ bool read_shadow_ram)
+{
+ struct ixgbe_aci_desc desc;
+ struct ixgbe_aci_cmd_nvm *cmd;
+
+ cmd = &desc.params.nvm;
+
+ if (offset > IXGBE_ACI_NVM_MAX_OFFSET)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_read);
+
+ if (!read_shadow_ram && module_typeid == IXGBE_ACI_NVM_START_POINT)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_FLASH_ONLY;
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
+ cmd->module_typeid = IXGBE_CPU_TO_LE16(module_typeid);
+ cmd->offset_low = IXGBE_CPU_TO_LE16(offset & 0xFFFF);
+ cmd->offset_high = (offset >> 16) & 0xFF;
+ cmd->length = IXGBE_CPU_TO_LE16(length);
+
+ return ixgbe_aci_send_cmd(hw, &desc, data, length);
+}
+
+/**
+ * ixgbe_aci_erase_nvm - erase NVM sector
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ *
+ * Erase the NVM sector using the ACI command (0x0702).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_erase_nvm(struct ixgbe_hw *hw, u16 module_typeid)
+{
+ struct ixgbe_aci_desc desc;
+ struct ixgbe_aci_cmd_nvm *cmd;
+ s32 status;
+ __le16 len;
+
+ /* read a length value from SR, so module_typeid is equal to 0 */
+ /* calculate offset where module size is placed from bytes to words */
+ /* set last command and read from SR values to true */
+ status = ixgbe_aci_read_nvm(hw, 0, 2 * module_typeid + 2, 2, &len, true,
+ true);
+ if (status)
+ return status;
+
+ cmd = &desc.params.nvm;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_erase);
+
+ cmd->module_typeid = IXGBE_CPU_TO_LE16(module_typeid);
+ cmd->length = len;
+ cmd->offset_low = 0;
+ cmd->offset_high = 0;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_aci_update_nvm - update NVM
+ * @hw: pointer to the HW struct
+ * @module_typeid: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be written (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @command_flags: command parameters
+ *
+ * Update the NVM using the ACI command (0x0703).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_update_nvm(struct ixgbe_hw *hw, u16 module_typeid,
+ u32 offset, u16 length, void *data,
+ bool last_command, u8 command_flags)
+{
+ struct ixgbe_aci_desc desc;
+ struct ixgbe_aci_cmd_nvm *cmd;
+
+ cmd = &desc.params.nvm;
+
+ /* In offset the highest byte must be zeroed. */
+ if (offset & 0xFF000000)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_write);
+
+ cmd->cmd_flags |= command_flags;
+
+ /* If this is the last command in a series, set the proper flag. */
+ if (last_command)
+ cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
+ cmd->module_typeid = IXGBE_CPU_TO_LE16(module_typeid);
+ cmd->offset_low = IXGBE_CPU_TO_LE16(offset & 0xFFFF);
+ cmd->offset_high = (offset >> 16) & 0xFF;
+ cmd->length = IXGBE_CPU_TO_LE16(length);
+
+ desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
+
+ return ixgbe_aci_send_cmd(hw, &desc, data, length);
+}
+
+/**
+ * ixgbe_aci_read_nvm_cfg - read an NVM config block
+ * @hw: pointer to the HW struct
+ * @cmd_flags: NVM access admin command bits
+ * @field_id: field or feature ID
+ * @data: buffer for result
+ * @buf_size: buffer size
+ * @elem_count: pointer to count of elements read by FW
+ *
+ * Reads a single or multiple feature/field ID and data using ACI command
+ * (0x0704).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_read_nvm_cfg(struct ixgbe_hw *hw, u8 cmd_flags,
+ u16 field_id, void *data, u16 buf_size,
+ u16 *elem_count)
+{
+ struct ixgbe_aci_cmd_nvm_cfg *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.nvm_cfg;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_cfg_read);
+
+ cmd->cmd_flags = cmd_flags;
+ cmd->id = IXGBE_CPU_TO_LE16(field_id);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, data, buf_size);
+ if (!status && elem_count)
+ *elem_count = IXGBE_LE16_TO_CPU(cmd->count);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_write_nvm_cfg - write an NVM config block
+ * @hw: pointer to the HW struct
+ * @cmd_flags: NVM access admin command bits
+ * @data: buffer for result
+ * @buf_size: buffer size
+ * @elem_count: count of elements to be written
+ *
+ * Writes a single or multiple feature/field ID and data using ACI command
+ * (0x0705).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_write_nvm_cfg(struct ixgbe_hw *hw, u8 cmd_flags,
+ void *data, u16 buf_size, u16 elem_count)
+{
+ struct ixgbe_aci_cmd_nvm_cfg *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.nvm_cfg;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_cfg_write);
+ desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
+
+ cmd->count = IXGBE_CPU_TO_LE16(elem_count);
+ cmd->cmd_flags = cmd_flags;
+
+ return ixgbe_aci_send_cmd(hw, &desc, data, buf_size);
+}
+
+/**
+ * ixgbe_nvm_validate_checksum - validate checksum
+ * @hw: pointer to the HW struct
+ *
+ * Verify NVM PFA checksum validity using ACI command (0x0706).
+ * If the checksum verification failed, IXGBE_ERR_NVM_CHECKSUM is returned.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_nvm_checksum *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ cmd = &desc.params.nvm_checksum;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum);
+ cmd->flags = IXGBE_ACI_NVM_CHECKSUM_VERIFY;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ ixgbe_release_nvm(hw);
+
+ if (!status)
+ if (IXGBE_LE16_TO_CPU(cmd->checksum) !=
+ IXGBE_ACI_NVM_CHECKSUM_CORRECT) {
+ ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
+ "Invalid Shadow Ram checksum");
+ status = IXGBE_ERR_NVM_CHECKSUM;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_nvm_recalculate_checksum - recalculate checksum
+ * @hw: pointer to the HW struct
+ *
+ * Recalculate NVM PFA checksum using ACI command (0x0706).
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_nvm_recalculate_checksum(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_nvm_checksum *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
+ if (status)
+ return status;
+
+ cmd = &desc.params.nvm_checksum;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum);
+ cmd->flags = IXGBE_ACI_NVM_CHECKSUM_RECALC;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_nvm_write_activate - NVM activate write
+ * @hw: pointer to the HW struct
+ * @cmd_flags: flags for write activate command
+ * @response_flags: response indicators from firmware
+ *
+ * Update the control word with the required banks' validity bits
+ * and dumps the Shadow RAM to flash using ACI command (0x0707).
+ *
+ * cmd_flags controls which banks to activate, the preservation level to use
+ * when activating the NVM bank, and whether an EMP reset is required for
+ * activation.
+ *
+ * Note that the 16bit cmd_flags value is split between two separate 1 byte
+ * flag values in the descriptor.
+ *
+ * On successful return of the firmware command, the response_flags variable
+ * is updated with the flags reported by firmware indicating certain status,
+ * such as whether EMP reset is enabled.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_nvm_write_activate(struct ixgbe_hw *hw, u16 cmd_flags,
+ u8 *response_flags)
+{
+ struct ixgbe_aci_desc desc;
+ struct ixgbe_aci_cmd_nvm *cmd;
+ s32 status;
+
+ cmd = &desc.params.nvm;
+ ixgbe_fill_dflt_direct_cmd_desc(&desc,
+ ixgbe_aci_opc_nvm_write_activate);
+
+ cmd->cmd_flags = LO_BYTE(cmd_flags);
+ cmd->offset_high = HI_BYTE(cmd_flags);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (!status && response_flags)
+ *response_flags = cmd->cmd_flags;
+
+ return status;
+}
+
+/**
+ * ixgbe_get_flash_bank_offset - Get offset into requested flash bank
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive flash bank
+ * @module: the module to read from
+ *
+ * Based on the module, lookup the module offset from the beginning of the
+ * flash.
+ *
+ * Return: the flash offset. Note that a value of zero is invalid and must be
+ * treated as an error.
+ */
+static u32 ixgbe_get_flash_bank_offset(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u16 module)
+{
+ struct ixgbe_bank_info *banks = &hw->flash.banks;
+ enum ixgbe_flash_bank active_bank;
+ bool second_bank_active;
+ u32 offset, size;
+
+ switch (module) {
+ case E610_SR_1ST_NVM_BANK_PTR:
+ offset = banks->nvm_ptr;
+ size = banks->nvm_size;
+ active_bank = banks->nvm_bank;
+ break;
+ case E610_SR_1ST_OROM_BANK_PTR:
+ offset = banks->orom_ptr;
+ size = banks->orom_size;
+ active_bank = banks->orom_bank;
+ break;
+ case E610_SR_NETLIST_BANK_PTR:
+ offset = banks->netlist_ptr;
+ size = banks->netlist_size;
+ active_bank = banks->netlist_bank;
+ break;
+ default:
+ return 0;
+ }
+
+ switch (active_bank) {
+ case IXGBE_1ST_FLASH_BANK:
+ second_bank_active = false;
+ break;
+ case IXGBE_2ND_FLASH_BANK:
+ second_bank_active = true;
+ break;
+ default:
+ return 0;
+ }
+
+ /* The second flash bank is stored immediately following the first
+ * bank. Based on whether the 1st or 2nd bank is active, and whether
+ * we want the active or inactive bank, calculate the desired offset.
+ */
+ switch (bank) {
+ case IXGBE_ACTIVE_FLASH_BANK:
+ return offset + (second_bank_active ? size : 0);
+ case IXGBE_INACTIVE_FLASH_BANK:
+ return offset + (second_bank_active ? 0 : size);
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbe_read_flash_module - Read a word from one of the main NVM modules
+ * @hw: pointer to the HW structure
+ * @bank: which bank of the module to read
+ * @module: the module to read
+ * @offset: the offset into the module in bytes
+ * @data: storage for the word read from the flash
+ * @length: bytes of data to read
+ *
+ * Read data from the specified flash module. The bank parameter indicates
+ * whether or not to read from the active bank or the inactive bank of that
+ * module.
+ *
+ * The word will be read using flat NVM access, and relies on the
+ * hw->flash.banks data being setup by ixgbe_determine_active_flash_banks()
+ * during initialization.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_flash_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u16 module, u32 offset, u8 *data, u32 length)
+{
+ s32 status;
+ u32 start;
+
+ start = ixgbe_get_flash_bank_offset(hw, bank, module);
+ if (!start) {
+ return IXGBE_ERR_PARAM;
+ }
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ status = ixgbe_read_flat_nvm(hw, start + offset, &length, data, false);
+
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_read_netlist_module - Read data from the netlist module area
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive module
+ * @offset: offset into the netlist to read from
+ * @data: storage for returned word value
+ *
+ * Read a word from the specified netlist bank.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_netlist_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ __le16 data_local;
+ s32 status;
+
+ status = ixgbe_read_flash_module(hw, bank, E610_SR_NETLIST_BANK_PTR,
+ offset * sizeof(u16),
+ (u8 *)&data_local,
+ sizeof(u16));
+ if (!status)
+ *data = IXGBE_LE16_TO_CPU(data_local);
+
+ return status;
+}
+
+/**
+ * ixgbe_read_nvm_module - Read from the active main NVM module
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from active or inactive NVM module
+ * @offset: offset into the NVM module to read, in words
+ * @data: storage for returned word value
+ *
+ * Read the specified word from the active NVM module. This includes the CSS
+ * header at the start of the NVM module.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_nvm_module(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ __le16 data_local;
+ s32 status;
+
+ status = ixgbe_read_flash_module(hw, bank, E610_SR_1ST_NVM_BANK_PTR,
+ offset * sizeof(u16),
+ (u8 *)&data_local,
+ sizeof(u16));
+ if (!status)
+ *data = IXGBE_LE16_TO_CPU(data_local);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_nvm_css_hdr_len - Read the CSS header length from the
+ * NVM CSS header
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @hdr_len: storage for header length in words
+ *
+ * Read the CSS header length from the NVM CSS header and add the
+ * Authentication header size, and then convert to words.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_get_nvm_css_hdr_len(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 *hdr_len)
+{
+ u16 hdr_len_l, hdr_len_h;
+ u32 hdr_len_dword;
+ s32 status;
+
+ status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_L,
+ &hdr_len_l);
+ if (status)
+ return status;
+
+ status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_H,
+ &hdr_len_h);
+ if (status)
+ return status;
+
+ /* CSS header length is in DWORD, so convert to words and add
+ * authentication header size
+ */
+ hdr_len_dword = hdr_len_h << 16 | hdr_len_l;
+ *hdr_len = (hdr_len_dword * 2) + IXGBE_NVM_AUTH_HEADER_LEN;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_nvm_sr_copy - Read a word from the Shadow RAM copy in the NVM bank
+ * @hw: pointer to the HW structure
+ * @bank: whether to read from the active or inactive NVM module
+ * @offset: offset into the Shadow RAM copy to read, in words
+ * @data: storage for returned word value
+ *
+ * Read the specified word from the copy of the Shadow RAM found in the
+ * specified NVM module.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_nvm_sr_copy(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ u32 offset, u16 *data)
+{
+ u32 hdr_len;
+ s32 status;
+
+ status = ixgbe_get_nvm_css_hdr_len(hw, bank, &hdr_len);
+ if (status)
+ return status;
+
+ hdr_len = ROUND_UP(hdr_len, 32);
+
+ return ixgbe_read_nvm_module(hw, bank, hdr_len + offset, data);
+}
+
+/**
+ * ixgbe_get_nvm_minsrevs - Get the minsrevs values from flash
+ * @hw: pointer to the HW struct
+ * @minsrevs: structure to store NVM and OROM minsrev values
+ *
+ * Read the Minimum Security Revision TLV and extract
+ * the revision values from the flash image
+ * into a readable structure for processing.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_nvm_minsrevs(struct ixgbe_hw *hw,
+ struct ixgbe_minsrev_info *minsrevs)
+{
+ struct ixgbe_aci_cmd_nvm_minsrev data;
+ s32 status;
+ u16 valid;
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ status = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_MINSREV_MOD_ID,
+ 0, sizeof(data), &data,
+ true, false);
+
+ ixgbe_release_nvm(hw);
+
+ if (status)
+ return status;
+
+ valid = IXGBE_LE16_TO_CPU(data.validity);
+
+ /* Extract NVM minimum security revision */
+ if (valid & IXGBE_ACI_NVM_MINSREV_NVM_VALID) {
+ u16 minsrev_l = IXGBE_LE16_TO_CPU(data.nvm_minsrev_l);
+ u16 minsrev_h = IXGBE_LE16_TO_CPU(data.nvm_minsrev_h);
+
+ minsrevs->nvm = minsrev_h << 16 | minsrev_l;
+ minsrevs->nvm_valid = true;
+ }
+
+ /* Extract the OROM minimum security revision */
+ if (valid & IXGBE_ACI_NVM_MINSREV_OROM_VALID) {
+ u16 minsrev_l = IXGBE_LE16_TO_CPU(data.orom_minsrev_l);
+ u16 minsrev_h = IXGBE_LE16_TO_CPU(data.orom_minsrev_h);
+
+ minsrevs->orom = minsrev_h << 16 | minsrev_l;
+ minsrevs->orom_valid = true;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_update_nvm_minsrevs - Update minsrevs TLV data in flash
+ * @hw: pointer to the HW struct
+ * @minsrevs: minimum security revision information
+ *
+ * Update the NVM or Option ROM minimum security revision fields in the PFA
+ * area of the flash. Reads the minsrevs->nvm_valid and minsrevs->orom_valid
+ * fields to determine what update is being requested. If the valid bit is not
+ * set for that module, then the associated minsrev will be left as is.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_update_nvm_minsrevs(struct ixgbe_hw *hw,
+ struct ixgbe_minsrev_info *minsrevs)
+{
+ struct ixgbe_aci_cmd_nvm_minsrev data;
+ s32 status;
+
+ if (!minsrevs->nvm_valid && !minsrevs->orom_valid) {
+ return IXGBE_ERR_PARAM;
+ }
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
+ if (status)
+ return status;
+
+ /* Get current data */
+ status = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_MINSREV_MOD_ID, 0,
+ sizeof(data), &data, true, false);
+ if (status)
+ goto exit_release_res;
+
+ if (minsrevs->nvm_valid) {
+ data.nvm_minsrev_l = IXGBE_CPU_TO_LE16(minsrevs->nvm & 0xFFFF);
+ data.nvm_minsrev_h = IXGBE_CPU_TO_LE16(minsrevs->nvm >> 16);
+ data.validity |=
+ IXGBE_CPU_TO_LE16(IXGBE_ACI_NVM_MINSREV_NVM_VALID);
+ }
+
+ if (minsrevs->orom_valid) {
+ data.orom_minsrev_l = IXGBE_CPU_TO_LE16(minsrevs->orom & 0xFFFF);
+ data.orom_minsrev_h = IXGBE_CPU_TO_LE16(minsrevs->orom >> 16);
+ data.validity |=
+ IXGBE_CPU_TO_LE16(IXGBE_ACI_NVM_MINSREV_OROM_VALID);
+ }
+
+ /* Update flash data */
+ status = ixgbe_aci_update_nvm(hw, IXGBE_ACI_NVM_MINSREV_MOD_ID, 0,
+ sizeof(data), &data, false,
+ IXGBE_ACI_NVM_SPECIAL_UPDATE);
+ if (status)
+ goto exit_release_res;
+
+ /* Dump the Shadow RAM to the flash */
+ status = ixgbe_nvm_write_activate(hw, 0, NULL);
+
+exit_release_res:
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_nvm_srev - Read the security revision from the NVM CSS header
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @srev: storage for security revision
+ *
+ * Read the security revision out of the CSS header of the active NVM module
+ * bank.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_get_nvm_srev(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank, u32 *srev)
+{
+ u16 srev_l, srev_h;
+ s32 status;
+
+ status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_L, &srev_l);
+ if (status)
+ return status;
+
+ status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_H, &srev_h);
+ if (status)
+ return status;
+
+ *srev = srev_h << 16 | srev_l;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_nvm_ver_info - Read NVM version information
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @nvm: pointer to NVM info structure
+ *
+ * Read the NVM EETRACK ID and map version of the main NVM image bank, filling
+ * in the nvm info structure.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_get_nvm_ver_info(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ struct ixgbe_nvm_info *nvm)
+{
+ u16 eetrack_lo, eetrack_hi, ver;
+ s32 status;
+
+ status = ixgbe_read_nvm_sr_copy(hw, bank,
+ E610_SR_NVM_DEV_STARTER_VER, &ver);
+ if (status) {
+ return status;
+ }
+
+ nvm->major = (ver & E610_NVM_VER_HI_MASK) >> E610_NVM_VER_HI_SHIFT;
+ nvm->minor = (ver & E610_NVM_VER_LO_MASK) >> E610_NVM_VER_LO_SHIFT;
+
+ status = ixgbe_read_nvm_sr_copy(hw, bank, E610_SR_NVM_EETRACK_LO,
+ &eetrack_lo);
+ if (status) {
+ return status;
+ }
+ status = ixgbe_read_nvm_sr_copy(hw, bank, E610_SR_NVM_EETRACK_HI,
+ &eetrack_hi);
+ if (status) {
+ return status;
+ }
+
+ nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
+
+ status = ixgbe_get_nvm_srev(hw, bank, &nvm->srev);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_inactive_nvm_ver - Read Option ROM version from the inactive bank
+ * @hw: pointer to the HW structure
+ * @nvm: storage for Option ROM version information
+ *
+ * Reads the NVM EETRACK ID, Map version, and security revision of the
+ * inactive NVM bank. Used to access version data for a pending update that
+ * has not yet been activated.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_inactive_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm)
+{
+ return ixgbe_get_nvm_ver_info(hw, IXGBE_INACTIVE_FLASH_BANK, nvm);
+}
+
+/**
+ * ixgbe_get_active_nvm_ver - Read Option ROM version from the active bank
+ * @hw: pointer to the HW structure
+ * @nvm: storage for Option ROM version information
+ *
+ * Reads the NVM EETRACK ID, Map version, and security revision of the
+ * active NVM bank.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_active_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm)
+{
+ return ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK, nvm);
+}
+
+/**
+ * ixgbe_get_netlist_info
+ * @hw: pointer to the HW struct
+ * @bank: whether to read from the active or inactive flash bank
+ * @netlist: pointer to netlist version info structure
+ *
+ * Get the netlist version information from the requested bank. Reads the Link
+ * Topology section to find the Netlist ID block and extract the relevant
+ * information into the netlist version structure.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_get_netlist_info(struct ixgbe_hw *hw,
+ enum ixgbe_bank_select bank,
+ struct ixgbe_netlist_info *netlist)
+{
+ u16 module_id, length, node_count, i;
+ u16 *id_blk;
+ s32 status;
+
+ status = ixgbe_read_netlist_module(hw, bank, IXGBE_NETLIST_TYPE_OFFSET,
+ &module_id);
+ if (status)
+ return status;
+
+ if (module_id != IXGBE_NETLIST_LINK_TOPO_MOD_ID) {
+ return IXGBE_ERR_NVM;
+ }
+
+ status = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_MODULE_LEN,
+ &length);
+ if (status)
+ return status;
+
+ /* sanity check that we have at least enough words to store the
+ * netlist ID block
+ */
+ if (length < IXGBE_NETLIST_ID_BLK_SIZE) {
+ return IXGBE_ERR_NVM;
+ }
+
+ status = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_NODE_COUNT,
+ &node_count);
+ if (status)
+ return status;
+ node_count &= IXGBE_LINK_TOPO_NODE_COUNT_M;
+
+ id_blk = (u16 *)ixgbe_calloc(hw, IXGBE_NETLIST_ID_BLK_SIZE,
+ sizeof(*id_blk));
+ if (!id_blk)
+ return IXGBE_ERR_NO_SPACE;
+
+ /* Read out the entire Netlist ID Block at once. */
+ status = ixgbe_read_flash_module(hw, bank, E610_SR_NETLIST_BANK_PTR,
+ IXGBE_NETLIST_ID_BLK_OFFSET(node_count) * sizeof(u16),
+ (u8 *)id_blk,
+ IXGBE_NETLIST_ID_BLK_SIZE * sizeof(u16));
+ if (status)
+ goto exit_error;
+
+ for (i = 0; i < IXGBE_NETLIST_ID_BLK_SIZE; i++)
+ id_blk[i] = IXGBE_LE16_TO_CPU(((__le16 *)id_blk)[i]);
+
+ netlist->major = id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_LOW];
+ netlist->minor = id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_LOW];
+ netlist->type = id_blk[IXGBE_NETLIST_ID_BLK_TYPE_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_TYPE_LOW];
+ netlist->rev = id_blk[IXGBE_NETLIST_ID_BLK_REV_HIGH] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_REV_LOW];
+ netlist->cust_ver = id_blk[IXGBE_NETLIST_ID_BLK_CUST_VER];
+ /* Read the left most 4 bytes of SHA */
+ netlist->hash = id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(15)] << 16 |
+ id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(14)];
+
+exit_error:
+ ixgbe_free(hw, id_blk);
+
+ return status;
+}
+
+/**
+ * ixgbe_get_inactive_netlist_ver
+ * @hw: pointer to the HW struct
+ * @netlist: pointer to netlist version info structure
+ *
+ * Read the netlist version data from the inactive netlist bank. Used to
+ * extract version data of a pending flash update in order to display the
+ * version data.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_inactive_netlist_ver(struct ixgbe_hw *hw,
+ struct ixgbe_netlist_info *netlist)
+{
+ return ixgbe_get_netlist_info(hw, IXGBE_INACTIVE_FLASH_BANK, netlist);
+}
+
+/**
+ * ixgbe_read_sr_pointer - Read the value of a Shadow RAM pointer word
+ * @hw: pointer to the HW structure
+ * @offset: the word offset of the Shadow RAM word to read
+ * @pointer: pointer value read from Shadow RAM
+ *
+ * Read the given Shadow RAM word, and convert it to a pointer value specified
+ * in bytes. This function assumes the specified offset is a valid pointer
+ * word.
+ *
+ * Each pointer word specifies whether it is stored in word size or 4KB
+ * sector size by using the highest bit. The reported pointer value will be in
+ * bytes, intended for flat NVM reads.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_sr_pointer(struct ixgbe_hw *hw, u16 offset, u32 *pointer)
+{
+ s32 status;
+ u16 value;
+
+ status = ixgbe_read_ee_aci_E610(hw, offset, &value);
+ if (status)
+ return status;
+
+ /* Determine if the pointer is in 4KB or word units */
+ if (value & IXGBE_SR_NVM_PTR_4KB_UNITS)
+ *pointer = (value & ~IXGBE_SR_NVM_PTR_4KB_UNITS) * 4 * 1024;
+ else
+ *pointer = value * 2;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_sr_area_size - Read an area size from a Shadow RAM word
+ * @hw: pointer to the HW structure
+ * @offset: the word offset of the Shadow RAM to read
+ * @size: size value read from the Shadow RAM
+ *
+ * Read the given Shadow RAM word, and convert it to an area size value
+ * specified in bytes. This function assumes the specified offset is a valid
+ * area size word.
+ *
+ * Each area size word is specified in 4KB sector units. This function reports
+ * the size in bytes, intended for flat NVM reads.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_read_sr_area_size(struct ixgbe_hw *hw, u16 offset, u32 *size)
+{
+ s32 status;
+ u16 value;
+
+ status = ixgbe_read_ee_aci_E610(hw, offset, &value);
+ if (status)
+ return status;
+
+ /* Area sizes are always specified in 4KB units */
+ *size = value * 4 * 1024;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_discover_flash_size - Discover the available flash size.
+ * @hw: pointer to the HW struct
+ *
+ * The device flash could be up to 16MB in size. However, it is possible that
+ * the actual size is smaller. Use bisection to determine the accessible size
+ * of flash memory.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_discover_flash_size(struct ixgbe_hw *hw)
+{
+ u32 min_size = 0, max_size = IXGBE_ACI_NVM_MAX_OFFSET + 1;
+ s32 status;
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ while ((max_size - min_size) > 1) {
+ u32 offset = (max_size + min_size) / 2;
+ u32 len = 1;
+ u8 data;
+
+ status = ixgbe_read_flat_nvm(hw, offset, &len, &data, false);
+ if (status == IXGBE_ERR_ACI_ERROR &&
+ hw->aci.last_status == IXGBE_ACI_RC_EINVAL) {
+ status = IXGBE_SUCCESS;
+ max_size = offset;
+ } else if (!status) {
+ min_size = offset;
+ } else {
+ /* an unexpected error occurred */
+ goto err_read_flat_nvm;
+ }
+ }
+
+ hw->flash.flash_size = max_size;
+
+err_read_flat_nvm:
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_determine_active_flash_banks - Discover active bank for each module
+ * @hw: pointer to the HW struct
+ *
+ * Read the Shadow RAM control word and determine which banks are active for
+ * the NVM, OROM, and Netlist modules. Also read and calculate the associated
+ * pointer and size. These values are then cached into the ixgbe_flash_info
+ * structure for later use in order to calculate the correct offset to read
+ * from the active module.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_determine_active_flash_banks(struct ixgbe_hw *hw)
+{
+ struct ixgbe_bank_info *banks = &hw->flash.banks;
+ u16 ctrl_word;
+ s32 status;
+
+ status = ixgbe_read_ee_aci_E610(hw, E610_SR_NVM_CTRL_WORD, &ctrl_word);
+ if (status) {
+ return status;
+ }
+
+ /* Check that the control word indicates validity */
+ if ((ctrl_word & IXGBE_SR_CTRL_WORD_1_M) >> IXGBE_SR_CTRL_WORD_1_S !=
+ IXGBE_SR_CTRL_WORD_VALID) {
+ return IXGBE_ERR_CONFIG;
+ }
+
+ if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NVM_BANK))
+ banks->nvm_bank = IXGBE_1ST_FLASH_BANK;
+ else
+ banks->nvm_bank = IXGBE_2ND_FLASH_BANK;
+
+ if (!(ctrl_word & IXGBE_SR_CTRL_WORD_OROM_BANK))
+ banks->orom_bank = IXGBE_1ST_FLASH_BANK;
+ else
+ banks->orom_bank = IXGBE_2ND_FLASH_BANK;
+
+ if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NETLIST_BANK))
+ banks->netlist_bank = IXGBE_1ST_FLASH_BANK;
+ else
+ banks->netlist_bank = IXGBE_2ND_FLASH_BANK;
+
+ status = ixgbe_read_sr_pointer(hw, E610_SR_1ST_NVM_BANK_PTR,
+ &banks->nvm_ptr);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_read_sr_area_size(hw, E610_SR_NVM_BANK_SIZE,
+ &banks->nvm_size);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_read_sr_pointer(hw, E610_SR_1ST_OROM_BANK_PTR,
+ &banks->orom_ptr);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_read_sr_area_size(hw, E610_SR_OROM_BANK_SIZE,
+ &banks->orom_size);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_read_sr_pointer(hw, E610_SR_NETLIST_BANK_PTR,
+ &banks->netlist_ptr);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_read_sr_area_size(hw, E610_SR_NETLIST_BANK_SIZE,
+ &banks->netlist_size);
+ if (status) {
+ return status;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_init_nvm - initializes NVM setting
+ * @hw: pointer to the HW struct
+ *
+ * Read and populate NVM settings such as Shadow RAM size,
+ * max_timeout, and blank_nvm_mode
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_init_nvm(struct ixgbe_hw *hw)
+{
+ struct ixgbe_flash_info *flash = &hw->flash;
+ u32 fla, gens_stat, status;
+ u8 sr_size;
+
+ /* The SR size is stored regardless of the NVM programming mode
+ * as the blank mode may be used in the factory line.
+ */
+ gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
+ sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S;
+
+ /* Switching to words (sr_size contains power of 2) */
+ flash->sr_words = BIT(sr_size) * IXGBE_SR_WORDS_IN_1KB;
+
+ /* Check if we are in the normal or blank NVM programming mode */
+ fla = IXGBE_READ_REG(hw, GLNVM_FLA);
+ if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */
+ flash->blank_nvm_mode = false;
+ } else {
+ /* Blank programming mode */
+ flash->blank_nvm_mode = true;
+ return IXGBE_ERR_NVM_BLANK_MODE;
+ }
+
+ status = ixgbe_discover_flash_size(hw);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_determine_active_flash_banks(hw);
+ if (status) {
+ return status;
+ }
+
+ status = ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK,
+ &flash->nvm);
+ if (status) {
+ return status;
+ }
+
+ /* read the netlist version information */
+ status = ixgbe_get_netlist_info(hw, IXGBE_ACTIVE_FLASH_BANK,
+ &flash->netlist);
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_sanitize_operate - Clear the user data
+ * @hw: pointer to the HW struct
+ *
+ * Clear user data from NVM using ACI command (0x070C).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_sanitize_operate(struct ixgbe_hw *hw)
+{
+ s32 status;
+ u8 values;
+
+ u8 cmd_flags = IXGBE_ACI_SANITIZE_REQ_OPERATE |
+ IXGBE_ACI_SANITIZE_OPERATE_SUBJECT_CLEAR;
+
+ status = ixgbe_sanitize_nvm(hw, cmd_flags, &values);
+ if (status)
+ return status;
+ if ((!(values & IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_DONE) &&
+ !(values & IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_DONE)) ||
+ ((values & IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_DONE) &&
+ !(values & IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_SUCCESS)) ||
+ ((values & IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_DONE) &&
+ !(values & IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_SUCCESS)))
+ return IXGBE_ERR_ACI_ERROR;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_sanitize_nvm - Sanitize NVM
+ * @hw: pointer to the HW struct
+ * @cmd_flags: flag to the ACI command
+ * @values: values returned from the command
+ *
+ * Sanitize NVM using ACI command (0x070C).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_sanitize_nvm(struct ixgbe_hw *hw, u8 cmd_flags, u8 *values)
+{
+ struct ixgbe_aci_desc desc;
+ struct ixgbe_aci_cmd_nvm_sanitization *cmd;
+ s32 status;
+
+ cmd = &desc.params.nvm_sanitization;
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_sanitization);
+ cmd->cmd_flags = cmd_flags;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (values)
+ *values = cmd->values;
+
+ return status;
+}
+
+/**
+ * ixgbe_read_sr_word_aci - Reads Shadow RAM via ACI
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @data: word read from the Shadow RAM
+ *
+ * Reads one 16 bit word from the Shadow RAM using ixgbe_read_flat_nvm.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ u32 bytes = sizeof(u16);
+ __le16 data_local;
+ s32 status;
+
+ status = ixgbe_read_flat_nvm(hw, offset * sizeof(u16), &bytes,
+ (u8 *)&data_local, true);
+ if (status)
+ return status;
+
+ *data = IXGBE_LE16_TO_CPU(data_local);
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_sr_buf_aci - Reads Shadow RAM buf via ACI
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
+ * @words: (in) number of words to read; (out) number of words actually read
+ * @data: words read from the Shadow RAM
+ *
+ * Reads 16 bit words (data buf) from the Shadow RAM. Ownership of the NVM is
+ * taken before reading the buffer and later released.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words,
+ u16 *data)
+{
+ u32 bytes = *words * 2, i;
+ s32 status;
+
+ status = ixgbe_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true);
+
+ *words = bytes / 2;
+
+ for (i = 0; i < *words; i++)
+ data[i] = IXGBE_LE16_TO_CPU(((__le16 *)data)[i]);
+
+ return status;
+}
+
+/**
+ * ixgbe_read_flat_nvm - Read portion of NVM by flat offset
+ * @hw: pointer to the HW struct
+ * @offset: offset from beginning of NVM
+ * @length: (in) number of bytes to read; (out) number of bytes actually read
+ * @data: buffer to return data in (sized to fit the specified length)
+ * @read_shadow_ram: if true, read from shadow RAM instead of NVM
+ *
+ * Reads a portion of the NVM, as a flat memory space. This function correctly
+ * breaks read requests across Shadow RAM sectors, prevents Shadow RAM size
+ * from being exceeded in case of Shadow RAM read requests and ensures that no
+ * single read request exceeds the maximum 4KB read for a single admin command.
+ *
+ * Returns a status code on failure. Note that the data pointer may be
+ * partially updated if some reads succeed before a failure.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length,
+ u8 *data, bool read_shadow_ram)
+{
+ u32 inlen = *length;
+ u32 bytes_read = 0;
+ bool last_cmd;
+ s32 status;
+
+ *length = 0;
+
+ /* Verify the length of the read if this is for the Shadow RAM */
+ if (read_shadow_ram && ((offset + inlen) >
+ (hw->eeprom.word_size * 2u))) {
+ return IXGBE_ERR_PARAM;
+ }
+
+ do {
+ u32 read_size, sector_offset;
+
+ /* ixgbe_aci_read_nvm cannot read more than 4KB at a time.
+ * Additionally, a read from the Shadow RAM may not cross over
+ * a sector boundary. Conveniently, the sector size is also 4KB.
+ */
+ sector_offset = offset % IXGBE_ACI_MAX_BUFFER_SIZE;
+ read_size = MIN_T(u32,
+ IXGBE_ACI_MAX_BUFFER_SIZE - sector_offset,
+ inlen - bytes_read);
+
+ last_cmd = !(bytes_read + read_size < inlen);
+
+ /* ixgbe_aci_read_nvm takes the length as a u16. Our read_size
+ * is calculated using a u32, but the IXGBE_ACI_MAX_BUFFER_SIZE
+ * maximum size guarantees that it will fit within the 2 bytes.
+ */
+ status = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_START_POINT,
+ offset, (u16)read_size,
+ data + bytes_read, last_cmd,
+ read_shadow_ram);
+ if (status)
+ break;
+
+ bytes_read += read_size;
+ offset += read_size;
+ } while (!last_cmd);
+
+ *length = bytes_read;
+ return status;
+}
+
+/**
+ * ixgbe_check_sr_access_params - verify params for Shadow RAM R/W operations.
+ * @hw: pointer to the HW structure
+ * @offset: offset in words from module start
+ * @words: number of words to access
+ *
+ * Check if all the parameters are valid
+ * before performing any Shadow RAM read/write operations.
+ *
+ * Return: the exit code of the operation.
+ * * - IXGBE_SUCCESS - success.
+ * * - IXGBE_ERR_PARAM - NVM error: offset beyond SR limit or
+ * NVM error: tried to access more words then the set limit or
+ * NVM error: cannot spread over two sectors.
+ */
+static s32 ixgbe_check_sr_access_params(struct ixgbe_hw *hw, u32 offset,
+ u16 words)
+{
+ if ((offset + words) > hw->eeprom.word_size) {
+ return IXGBE_ERR_PARAM;
+ }
+
+ if (words > IXGBE_SR_SECTOR_SIZE_IN_WORDS) {
+ /* We can access only up to 4KB (one sector),
+ * in one Admin Command write
+ */
+ return IXGBE_ERR_PARAM;
+ }
+
+ if (((offset + (words - 1)) / IXGBE_SR_SECTOR_SIZE_IN_WORDS) !=
+ (offset / IXGBE_SR_SECTOR_SIZE_IN_WORDS)) {
+ /* A single access cannot spread over two sectors */
+ return IXGBE_ERR_PARAM;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_write_sr_word_aci - Writes Shadow RAM word
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM word to write
+ * @data: word to write to the Shadow RAM
+ *
+ * Writes a 16 bit word to the Shadow RAM using the admin command.
+ * NVM ownership must be acquired before calling this function and released
+ * by a caller. To commit SR to NVM update checksum function should be called.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_write_sr_word_aci(struct ixgbe_hw *hw, u32 offset, const u16 *data)
+{
+ __le16 data_local = IXGBE_CPU_TO_LE16(*data);
+ s32 status;
+
+ status = ixgbe_check_sr_access_params(hw, offset, 1);
+ if (!status)
+ status = ixgbe_aci_update_nvm(hw, 0, BYTES_PER_WORD * offset,
+ BYTES_PER_WORD, &data_local,
+ false, 0);
+
+ return status;
+}
+
+/**
+ * ixgbe_write_sr_buf_aci - Writes Shadow RAM buf
+ * @hw: pointer to the HW structure
+ * @offset: offset of the Shadow RAM buffer to write
+ * @words: number of words to write
+ * @data: words to write to the Shadow RAM
+ *
+ * Writes a 16 bit word to the Shadow RAM using the admin command.
+ * NVM ownership must be acquired before calling this function and released
+ * by a caller. To commit SR to NVM update checksum function should be called.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_write_sr_buf_aci(struct ixgbe_hw *hw, u32 offset, u16 words,
+ const u16 *data)
+{
+ __le16 *data_local;
+ s32 status;
+ void *vmem;
+ u32 i;
+
+ vmem = ixgbe_calloc(hw, words, sizeof(u16));
+ if (!vmem)
+ return IXGBE_ERR_OUT_OF_MEM;
+ data_local = (__le16 *)vmem;
+
+ for (i = 0; i < words; i++)
+ data_local[i] = IXGBE_CPU_TO_LE16(data[i]);
+
+ /* Here we will only write one buffer as the size of the modules
+ * mirrored in the Shadow RAM is always less than 4K.
+ */
+ status = ixgbe_check_sr_access_params(hw, offset, words);
+ if (!status)
+ status = ixgbe_aci_update_nvm(hw, 0, BYTES_PER_WORD * offset,
+ BYTES_PER_WORD * words,
+ data_local, false, 0);
+
+ ixgbe_free(hw, vmem);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_alternate_write - write to alternate structure
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be written
+ * @reg_val0: value to be written under 'reg_addr0'
+ * @reg_addr1: address of second dword to be written
+ * @reg_val1: value to be written under 'reg_addr1'
+ *
+ * Write one or two dwords to alternate structure using ACI command (0x0900).
+ * Fields are indicated by 'reg_addr0' and 'reg_addr1' register numbers.
+ *
+ * Return: 0 on success and error code on failure.
+ */
+s32 ixgbe_aci_alternate_write(struct ixgbe_hw *hw, u32 reg_addr0,
+ u32 reg_val0, u32 reg_addr1, u32 reg_val1)
+{
+ struct ixgbe_aci_cmd_read_write_alt_direct *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.read_write_alt_direct;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_write_alt_direct);
+ cmd->dword0_addr = IXGBE_CPU_TO_LE32(reg_addr0);
+ cmd->dword1_addr = IXGBE_CPU_TO_LE32(reg_addr1);
+ cmd->dword0_value = IXGBE_CPU_TO_LE32(reg_val0);
+ cmd->dword1_value = IXGBE_CPU_TO_LE32(reg_val1);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_alternate_read - read from alternate structure
+ * @hw: pointer to the hardware structure
+ * @reg_addr0: address of first dword to be read
+ * @reg_val0: pointer for data read from 'reg_addr0'
+ * @reg_addr1: address of second dword to be read
+ * @reg_val1: pointer for data read from 'reg_addr1'
+ *
+ * Read one or two dwords from alternate structure using ACI command (0x0902).
+ * Fields are indicated by 'reg_addr0' and 'reg_addr1' register numbers.
+ * If 'reg_val1' pointer is not passed then only register at 'reg_addr0'
+ * is read.
+ *
+ * Return: 0 on success and error code on failure.
+ */
+s32 ixgbe_aci_alternate_read(struct ixgbe_hw *hw, u32 reg_addr0,
+ u32 *reg_val0, u32 reg_addr1, u32 *reg_val1)
+{
+ struct ixgbe_aci_cmd_read_write_alt_direct *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.read_write_alt_direct;
+
+ if (!reg_val0)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_read_alt_direct);
+ cmd->dword0_addr = IXGBE_CPU_TO_LE32(reg_addr0);
+ cmd->dword1_addr = IXGBE_CPU_TO_LE32(reg_addr1);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ if (status == IXGBE_SUCCESS) {
+ *reg_val0 = IXGBE_LE32_TO_CPU(cmd->dword0_value);
+
+ if (reg_val1)
+ *reg_val1 = IXGBE_LE32_TO_CPU(cmd->dword1_value);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_alternate_write_done - check if writing to alternate structure
+ * is done
+ * @hw: pointer to the HW structure.
+ * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
+ * @reset_needed: indicates the SW should trigger GLOBAL reset
+ *
+ * Indicates to the FW that alternate structures have been changed.
+ *
+ * Return: 0 on success and error code on failure.
+ */
+s32 ixgbe_aci_alternate_write_done(struct ixgbe_hw *hw, u8 bios_mode,
+ bool *reset_needed)
+{
+ struct ixgbe_aci_cmd_done_alt_write *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.done_alt_write;
+
+ if (!reset_needed)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_done_alt_write);
+ cmd->flags = bios_mode;
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+ if (!status)
+ *reset_needed = (IXGBE_LE16_TO_CPU(cmd->flags) &
+ IXGBE_ACI_RESP_RESET_NEEDED) != 0;
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_alternate_clear - clear alternate structure
+ * @hw: pointer to the HW structure.
+ *
+ * Clear the alternate structures of the port from which the function
+ * is called.
+ *
+ * Return: 0 on success and error code on failure.
+ */
+s32 ixgbe_aci_alternate_clear(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc,
+ ixgbe_aci_opc_clear_port_alt_write);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+
+ return status;
+}
+
+/**
+ * ixgbe_aci_get_internal_data - get internal FW/HW data
+ * @hw: pointer to the hardware structure
+ * @cluster_id: specific cluster to dump
+ * @table_id: table ID within cluster
+ * @start: index of line in the block to read
+ * @buf: dump buffer
+ * @buf_size: dump buffer size
+ * @ret_buf_size: return buffer size (returned by FW)
+ * @ret_next_cluster: next cluster to read (returned by FW)
+ * @ret_next_table: next block to read (returned by FW)
+ * @ret_next_index: next index to read (returned by FW)
+ *
+ * Get internal FW/HW data using ACI command (0xFF08) for debug purposes.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_get_internal_data(struct ixgbe_hw *hw, u16 cluster_id,
+ u16 table_id, u32 start, void *buf,
+ u16 buf_size, u16 *ret_buf_size,
+ u16 *ret_next_cluster, u16 *ret_next_table,
+ u32 *ret_next_index)
+{
+ struct ixgbe_aci_cmd_debug_dump_internals *cmd;
+ struct ixgbe_aci_desc desc;
+ s32 status;
+
+ cmd = &desc.params.debug_dump;
+
+ if (buf_size == 0 || !buf)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc,
+ ixgbe_aci_opc_debug_dump_internals);
+
+ cmd->cluster_id = IXGBE_CPU_TO_LE16(cluster_id);
+ cmd->table_id = IXGBE_CPU_TO_LE16(table_id);
+ cmd->idx = IXGBE_CPU_TO_LE32(start);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, buf, buf_size);
+
+ if (!status) {
+ if (ret_buf_size)
+ *ret_buf_size = IXGBE_LE16_TO_CPU(desc.datalen);
+ if (ret_next_cluster)
+ *ret_next_cluster = IXGBE_LE16_TO_CPU(cmd->cluster_id);
+ if (ret_next_table)
+ *ret_next_table = IXGBE_LE16_TO_CPU(cmd->table_id);
+ if (ret_next_index)
+ *ret_next_index = IXGBE_LE32_TO_CPU(cmd->idx);
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_validate_nvm_rw_reg - Check that an NVM access request is valid
+ * @cmd: NVM access command structure
+ *
+ * Validates that an NVM access structure is request to read or write a valid
+ * register offset. First validates that the module and flags are correct, and
+ * then ensures that the register offset is one of the accepted registers.
+ *
+ * Return: 0 if the register access is valid, out of range error code otherwise.
+ */
+static s32
+ixgbe_validate_nvm_rw_reg(struct ixgbe_nvm_access_cmd *cmd)
+{
+ u16 i;
+
+ switch (cmd->offset) {
+ case GL_HICR:
+ case GL_HICR_EN: /* Note, this register is read only */
+ case GL_FWSTS:
+ case GL_MNG_FWSM:
+ case GLNVM_GENS:
+ case GLNVM_FLA:
+ case GL_FWRESETCNT:
+ return 0;
+ default:
+ break;
+ }
+
+ for (i = 0; i <= GL_HIDA_MAX_INDEX; i++)
+ if (cmd->offset == (u32)GL_HIDA(i))
+ return 0;
+
+ for (i = 0; i <= GL_HIBA_MAX_INDEX; i++)
+ if (cmd->offset == (u32)GL_HIBA(i))
+ return 0;
+
+ /* All other register offsets are not valid */
+ return IXGBE_ERR_OUT_OF_RANGE;
+}
+
+/**
+ * ixgbe_nvm_access_read - Handle an NVM read request
+ * @hw: pointer to the HW struct
+ * @cmd: NVM access command to process
+ * @data: storage for the register value read
+ *
+ * Process an NVM access request to read a register.
+ *
+ * Return: 0 if the register read is valid and successful,
+ * out of range error code otherwise.
+ */
+static s32 ixgbe_nvm_access_read(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_access_cmd *cmd,
+ struct ixgbe_nvm_access_data *data)
+{
+ s32 status;
+
+ /* Always initialize the output data, even on failure */
+ memset(&data->regval, 0, cmd->data_size);
+
+ /* Make sure this is a valid read/write access request */
+ status = ixgbe_validate_nvm_rw_reg(cmd);
+ if (status)
+ return status;
+
+ DEBUGOUT1("NVM access: reading register %08x\n", cmd->offset);
+
+ /* Read the register and store the contents in the data field */
+ data->regval = IXGBE_READ_REG(hw, cmd->offset);
+
+ return 0;
+}
+
+/**
+ * ixgbe_nvm_access_write - Handle an NVM write request
+ * @hw: pointer to the HW struct
+ * @cmd: NVM access command to process
+ * @data: NVM access data to write
+ *
+ * Process an NVM access request to write a register.
+ *
+ * Return: 0 if the register write is valid and successful,
+ * out of range error code otherwise.
+ */
+static s32 ixgbe_nvm_access_write(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_access_cmd *cmd,
+ struct ixgbe_nvm_access_data *data)
+{
+ s32 status;
+
+ /* Make sure this is a valid read/write access request */
+ status = ixgbe_validate_nvm_rw_reg(cmd);
+ if (status)
+ return status;
+
+ /* Reject requests to write to read-only registers */
+ switch (cmd->offset) {
+ case GL_HICR_EN:
+ return IXGBE_ERR_OUT_OF_RANGE;
+ default:
+ break;
+ }
+
+ DEBUGOUT2("NVM access: writing register %08x with value %08x\n",
+ cmd->offset, data->regval);
+
+ /* Write the data field to the specified register */
+ IXGBE_WRITE_REG(hw, cmd->offset, data->regval);
+
+ return 0;
+}
+
+/**
+ * ixgbe_handle_nvm_access - Handle an NVM access request
+ * @hw: pointer to the HW struct
+ * @cmd: NVM access command info
+ * @data: pointer to read or return data
+ *
+ * Process an NVM access request. Read the command structure information and
+ * determine if it is valid. If not, report an error indicating the command
+ * was invalid.
+ *
+ * For valid commands, perform the necessary function, copying the data into
+ * the provided data buffer.
+ *
+ * Return: 0 if the nvm access request is valid and successful,
+ * error code otherwise.
+ */
+s32 ixgbe_handle_nvm_access(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_access_cmd *cmd,
+ struct ixgbe_nvm_access_data *data)
+{
+ switch (cmd->command) {
+ case IXGBE_NVM_CMD_READ:
+ return ixgbe_nvm_access_read(hw, cmd, data);
+ case IXGBE_NVM_CMD_WRITE:
+ return ixgbe_nvm_access_write(hw, cmd, data);
+ default:
+ return IXGBE_ERR_PARAM;
+ }
+}
+
+/**
+ * ixgbe_aci_set_health_status_config - Configure FW health events
+ * @hw: pointer to the HW struct
+ * @event_source: type of diagnostic events to enable
+ *
+ * Configure the health status event types that the firmware will send to this
+ * PF using ACI command (0xFF20). The supported event types are: PF-specific,
+ * all PFs, and global.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_aci_set_health_status_config(struct ixgbe_hw *hw, u8 event_source)
+{
+ struct ixgbe_aci_cmd_set_health_status_config *cmd;
+ struct ixgbe_aci_desc desc;
+
+ cmd = &desc.params.set_health_status_config;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc,
+ ixgbe_aci_opc_set_health_status_config);
+
+ cmd->event_source = event_source;
+
+ return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
+}
+
+/**
+ * ixgbe_init_ops_E610 - Inits func ptrs and MAC type
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the function pointers and assign the MAC type for E610.
+ * Does not touch the hardware.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_init_ops_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ ret_val = ixgbe_init_ops_X550(hw);
+
+ /* MAC */
+ mac->ops.reset_hw = ixgbe_reset_hw_E610;
+ mac->ops.start_hw = ixgbe_start_hw_E610;
+ mac->ops.get_media_type = ixgbe_get_media_type_E610;
+ mac->ops.get_supported_physical_layer =
+ ixgbe_get_supported_physical_layer_E610;
+ mac->ops.get_san_mac_addr = NULL;
+ mac->ops.set_san_mac_addr = NULL;
+ mac->ops.get_wwn_prefix = NULL;
+ mac->ops.setup_link = ixgbe_setup_link_E610;
+ mac->ops.check_link = ixgbe_check_link_E610;
+ mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_E610;
+ mac->ops.setup_fc = ixgbe_setup_fc_E610;
+ mac->ops.fc_autoneg = ixgbe_fc_autoneg_E610;
+ mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_E610;
+ mac->ops.disable_rx = ixgbe_disable_rx_E610;
+ mac->ops.setup_eee = ixgbe_setup_eee_E610;
+ mac->ops.fw_recovery_mode = ixgbe_fw_recovery_mode_E610;
+ mac->ops.fw_rollback_mode = ixgbe_fw_rollback_mode_E610;
+ mac->ops.get_fw_tsam_mode = ixgbe_get_fw_tsam_mode_E610;
+ mac->ops.get_fw_version = ixgbe_aci_get_fw_ver;
+ mac->ops.get_nvm_version = ixgbe_get_active_nvm_ver;
+ mac->ops.get_thermal_sensor_data = NULL;
+ mac->ops.init_thermal_sensor_thresh = NULL;
+
+ /* PHY */
+ phy->ops.init = ixgbe_init_phy_ops_E610;
+ phy->ops.identify = ixgbe_identify_phy_E610;
+ phy->eee_speeds_supported = IXGBE_LINK_SPEED_10_FULL |
+ IXGBE_LINK_SPEED_100_FULL |
+ IXGBE_LINK_SPEED_1GB_FULL;
+ phy->eee_speeds_advertised = phy->eee_speeds_supported;
+
+ /* Additional ops overrides for e610 to go here */
+ eeprom->ops.init_params = ixgbe_init_eeprom_params_E610;
+ eeprom->ops.read = ixgbe_read_ee_aci_E610;
+ eeprom->ops.read_buffer = ixgbe_read_ee_aci_buffer_E610;
+ eeprom->ops.write = ixgbe_write_ee_aci_E610;
+ eeprom->ops.write_buffer = ixgbe_write_ee_aci_buffer_E610;
+ eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_E610;
+ eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_E610;
+ eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_E610;
+ eeprom->ops.read_pba_string = ixgbe_read_pba_string_E610;
+
+ /* Initialize bus function number */
+ hw->mac.ops.set_lan_id(hw);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_reset_hw_E610 - Perform hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks
+ * and clears all interrupts, and perform a reset.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_reset_hw_E610(struct ixgbe_hw *hw)
+{
+ u32 swfw_mask = hw->phy.phy_semaphore_mask;
+ u32 ctrl, i;
+ s32 status;
+
+ DEBUGFUNC("ixgbe_reset_hw_E610");
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ status = hw->mac.ops.stop_adapter(hw);
+ if (status != IXGBE_SUCCESS)
+ goto reset_hw_out;
+
+ /* flush pending Tx transactions */
+ ixgbe_clear_tx_pending(hw);
+
+ status = hw->phy.ops.init(hw);
+ if (status != IXGBE_SUCCESS)
+ DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n",
+ status);
+mac_reset_top:
+ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
+ if (status != IXGBE_SUCCESS) {
+ ERROR_REPORT2(IXGBE_ERROR_CAUTION,
+ "semaphore failed with %d", status);
+ return IXGBE_ERR_SWFW_SYNC;
+ }
+ ctrl = IXGBE_CTRL_RST;
+ ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
+ IXGBE_WRITE_FLUSH(hw);
+ hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+ /* Poll for reset bit to self-clear indicating reset is complete */
+ for (i = 0; i < 10; i++) {
+ usec_delay(1);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+ if (!(ctrl & IXGBE_CTRL_RST_MASK))
+ break;
+ }
+
+ if (ctrl & IXGBE_CTRL_RST_MASK) {
+ status = IXGBE_ERR_RESET_FAILED;
+ ERROR_REPORT1(IXGBE_ERROR_POLLING,
+ "Reset polling failed to complete.\n");
+ }
+ msec_delay(100);
+
+ /*
+ * Double resets are required for recovery from certain error
+ * conditions. Between resets, it is necessary to stall to allow time
+ * for any pending HW events to complete.
+ */
+ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
+ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
+ goto mac_reset_top;
+ }
+
+ /* Set the Rx packet buffer size. */
+ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
+
+ /* Store the permanent mac address */
+ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+ /*
+ * Store MAC address from RAR0, clear receive address registers, and
+ * clear the multicast table. Also reset num_rar_entries to 128,
+ * since we modify this value when programming the SAN MAC address.
+ */
+ hw->mac.num_rar_entries = 128;
+ hw->mac.ops.init_rx_addrs(hw);
+
+reset_hw_out:
+ return status;
+}
+
+/**
+ * ixgbe_start_hw_E610 - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Gets firmware version and if API version matches it
+ * starts the hardware using the generic start_hw function
+ * and the generation start_hw function.
+ * Then performs revision-specific operations, if any.
+ **/
+s32 ixgbe_start_hw_E610(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_SUCCESS;
+
+ ret_val = hw->mac.ops.get_fw_version(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = ixgbe_start_hw_generic(hw);
+ if (ret_val != IXGBE_SUCCESS)
+ goto out;
+
+ ixgbe_start_hw_gen2(hw);
+
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_get_media_type_E610 - Gets media type
+ * @hw: pointer to the HW struct
+ *
+ * In order to get the media type, the function gets PHY
+ * capabilities and later on use them to identify the PHY type
+ * checking phy_type_high and phy_type_low.
+ *
+ * Return: the type of media in form of ixgbe_media_type enum
+ * or ixgbe_media_type_unknown in case of an error.
+ */
+enum ixgbe_media_type ixgbe_get_media_type_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ u64 phy_mask = 0;
+ s32 rc;
+ u8 i;
+
+ rc = ixgbe_update_link_info(hw);
+ if (rc) {
+ return ixgbe_media_type_unknown;
+ }
+
+ /* If there is no link but PHY (dongle) is available SW should use
+ * Get PHY Caps admin command instead of Get Link Status, find most
+ * significant bit that is set in PHY types reported by the command
+ * and use it to discover media type.
+ */
+ if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP) &&
+ (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE)) {
+ /* Get PHY Capabilities */
+ rc = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+ &pcaps);
+ if (rc) {
+ return ixgbe_media_type_unknown;
+ }
+
+ /* Check if there is some bit set in phy_type_high */
+ for (i = 64; i > 0; i--) {
+ phy_mask = (u64)((u64)1 << (i - 1));
+ if ((pcaps.phy_type_high & phy_mask) != 0) {
+ /* If any bit is set treat it as PHY type */
+ hw->link.link_info.phy_type_high = phy_mask;
+ hw->link.link_info.phy_type_low = 0;
+ break;
+ }
+ phy_mask = 0;
+ }
+
+ /* If nothing found in phy_type_high search in phy_type_low */
+ if (phy_mask == 0) {
+ for (i = 64; i > 0; i--) {
+ phy_mask = (u64)((u64)1 << (i - 1));
+ if ((pcaps.phy_type_low & phy_mask) != 0) {
+ /* If any bit is set treat it as PHY type */
+ hw->link.link_info.phy_type_high = 0;
+ hw->link.link_info.phy_type_low = phy_mask;
+ break;
+ }
+ }
+ }
+
+ }
+
+ /* Based on link status or search above try to discover media type */
+ hw->phy.media_type = ixgbe_get_media_type_from_phy_type(hw);
+
+ return hw->phy.media_type;
+}
+
+/**
+ * ixgbe_get_supported_physical_layer_E610 - Returns physical layer type
+ * @hw: pointer to hardware structure
+ *
+ * Determines physical layer capabilities of the current configuration.
+ *
+ * Return: the exit code of the operation.
+ **/
+u64 ixgbe_get_supported_physical_layer_E610(struct ixgbe_hw *hw)
+{
+ u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ u64 phy_type;
+ s32 rc;
+
+ rc = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+ &pcaps);
+ if (rc)
+ return IXGBE_PHYSICAL_LAYER_UNKNOWN;
+
+ phy_type = IXGBE_LE64_TO_CPU(pcaps.phy_type_low);
+ if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_T)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_1000BASE_T)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_100BASE_TX)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_LR)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_LR;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_SR)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_SR;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_1000BASE_KX)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_1000BASE_SX)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_SX;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_2500BASE_KX)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_2500BASE_KX;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_2500BASE_T)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_2500BASE_T;
+ if(phy_type & IXGBE_PHY_TYPE_LOW_5GBASE_T)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_5000BASE_T;
+
+ phy_type = IXGBE_LE64_TO_CPU(pcaps.phy_type_high);
+ if(phy_type & IXGBE_PHY_TYPE_HIGH_10BASE_T)
+ physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T;
+
+ return physical_layer;
+}
+
+/**
+ * ixgbe_setup_link_E610 - Set up link
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait: true when waiting for completion is needed
+ *
+ * Set up the link with the specified speed.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_setup_link_E610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait)
+{
+ /* Simply request FW to perform proper PHY setup */
+ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
+}
+
+/**
+ * ixgbe_check_link_E610 - Determine link and speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true when link is up
+ * @link_up_wait_to_complete: bool used to wait for link up or not
+ *
+ * Determine if the link is up and the current link speed
+ * using ACI command (0x0607).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_check_link_E610(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete)
+{
+ s32 rc;
+ u32 i;
+
+ if (!speed || !link_up)
+ return IXGBE_ERR_PARAM;
+
+ /* Set get_link_info flag to ensure that fresh
+ * link information will be obtained from FW
+ * by sending Get Link Status admin command. */
+ hw->link.get_link_info = true;
+
+ /* Update link information in adapter context. */
+ rc = ixgbe_get_link_status(hw, link_up);
+ if (rc)
+ return rc;
+
+ /* Wait for link up if it was requested. */
+ if (link_up_wait_to_complete && *link_up == false) {
+ for (i = 0; i < hw->mac.max_link_up_time; i++) {
+ msec_delay(100);
+ hw->link.get_link_info = true;
+ rc = ixgbe_get_link_status(hw, link_up);
+ if (rc)
+ return rc;
+ if (*link_up)
+ break;
+ }
+ }
+
+ /* Use link information in adapter context updated by the call
+ * to ixgbe_get_link_status() to determine current link speed.
+ * Link speed information is valid only when link up was
+ * reported by FW. */
+ if (*link_up) {
+ switch (hw->link.link_info.link_speed) {
+ case IXGBE_ACI_LINK_SPEED_10MB:
+ *speed = IXGBE_LINK_SPEED_10_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_100MB:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_1000MB:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_2500MB:
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_5GB:
+ *speed = IXGBE_LINK_SPEED_5GB_FULL;
+ break;
+ case IXGBE_ACI_LINK_SPEED_10GB:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ break;
+ default:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ break;
+ }
+ } else {
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_link_capabilities_E610 - Determine link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: true when autoneg or autotry is enabled
+ *
+ * Determine speed and AN parameters of a link.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_link_capabilities_E610(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg)
+{
+ if (!speed || !autoneg)
+ return IXGBE_ERR_PARAM;
+
+ *autoneg = true;
+ *speed = hw->phy.speeds_supported;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_cfg_phy_fc - Configure PHY Flow Control (FC) data based on FC mode
+ * @hw: pointer to hardware structure
+ * @cfg: PHY configuration data to set FC mode
+ * @req_mode: FC mode to configure
+ *
+ * Configures PHY Flow Control according to the provided configuration.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_cfg_phy_fc(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg,
+ enum ixgbe_fc_mode req_mode)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data* pcaps = NULL;
+ s32 status = IXGBE_SUCCESS;
+ u8 pause_mask = 0x0;
+
+ if (!cfg)
+ return IXGBE_ERR_PARAM;
+
+ switch (req_mode) {
+ case ixgbe_fc_auto:
+ {
+ pcaps = (struct ixgbe_aci_cmd_get_phy_caps_data *)
+ ixgbe_malloc(hw, sizeof(*pcaps));
+ if (!pcaps) {
+ status = IXGBE_ERR_OUT_OF_MEM;
+ goto out;
+ }
+
+ /* Query the value of FC that both the NIC and the attached
+ * media can do. */
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_MEDIA, pcaps);
+ if (status)
+ goto out;
+
+ pause_mask |= pcaps->caps & IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
+ pause_mask |= pcaps->caps & IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
+
+ break;
+ }
+ case ixgbe_fc_full:
+ pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
+ pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
+ break;
+ case ixgbe_fc_rx_pause:
+ pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
+ break;
+ case ixgbe_fc_tx_pause:
+ pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
+ break;
+ default:
+ break;
+ }
+
+ /* clear the old pause settings */
+ cfg->caps &= ~(IXGBE_ACI_PHY_EN_TX_LINK_PAUSE |
+ IXGBE_ACI_PHY_EN_RX_LINK_PAUSE);
+
+ /* set the new capabilities */
+ cfg->caps |= pause_mask;
+
+out:
+ if (pcaps)
+ ixgbe_free(hw, pcaps);
+ return status;
+}
+
+/**
+ * ixgbe_setup_fc_E610 - Set up flow control
+ * @hw: pointer to hardware structure
+ *
+ * Set up flow control. This has to be done during init time.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_setup_fc_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps = { 0 };
+ struct ixgbe_aci_cmd_set_phy_cfg_data cfg = { 0 };
+ s32 status;
+
+ /* Get the current PHY config */
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG, &pcaps);
+ if (status)
+ return status;
+
+ ixgbe_copy_phy_caps_to_cfg(&pcaps, &cfg);
+
+ /* Configure the set PHY data */
+ status = ixgbe_cfg_phy_fc(hw, &cfg, hw->fc.requested_mode);
+ if (status)
+ return status;
+
+ /* If the capabilities have changed, then set the new config */
+ if (cfg.caps != pcaps.caps) {
+ cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ status = ixgbe_aci_set_phy_cfg(hw, &cfg);
+ if (status)
+ return status;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_fc_autoneg_E610 - Configure flow control
+ * @hw: pointer to hardware structure
+ *
+ * Configure Flow Control.
+ */
+void ixgbe_fc_autoneg_E610(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ /* Get current link status.
+ * Current FC mode will be stored in the hw context. */
+ status = ixgbe_aci_get_link_info(hw, false, NULL);
+ if (status) {
+ goto out;
+ }
+
+ /* Check if the link is up */
+ if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP)) {
+ status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ goto out;
+ }
+
+ /* Check if auto-negotiation has completed */
+ if (!(hw->link.link_info.an_info & IXGBE_ACI_AN_COMPLETED)) {
+ status = IXGBE_ERR_FC_NOT_NEGOTIATED;
+ goto out;
+ }
+
+out:
+ if (status == IXGBE_SUCCESS) {
+ hw->fc.fc_was_autonegged = true;
+ } else {
+ hw->fc.fc_was_autonegged = false;
+ hw->fc.current_mode = hw->fc.requested_mode;
+ }
+}
+
+/**
+ * ixgbe_set_fw_drv_ver_E610 - Send driver version to FW
+ * @hw: pointer to the HW structure
+ * @maj: driver version major number
+ * @minor: driver version minor number
+ * @build: driver version build number
+ * @sub: driver version sub build number
+ * @len: length of driver_ver string
+ * @driver_ver: driver string
+ *
+ * Send driver version number to Firmware using ACI command (0x0002).
+ *
+ * Return: the exit code of the operation.
+ * IXGBE_SUCCESS - OK
+ * IXGBE_ERR_PARAM - incorrect parameters were given
+ * IXGBE_ERR_ACI_ERROR - encountered an error during sending the command
+ * IXGBE_ERR_ACI_TIMEOUT - a timeout occurred
+ * IXGBE_ERR_OUT_OF_MEM - ran out of memory
+ */
+s32 ixgbe_set_fw_drv_ver_E610(struct ixgbe_hw *hw, u8 maj, u8 minor, u8 build,
+ u8 sub, u16 len, const char *driver_ver)
+{
+ size_t limited_len = min(len, (u16)IXGBE_DRV_VER_STR_LEN_E610);
+ struct ixgbe_driver_ver dv;
+
+ DEBUGFUNC("ixgbe_set_fw_drv_ver_E610");
+
+ if (!len || !driver_ver)
+ return IXGBE_ERR_PARAM;
+
+ dv.major_ver = maj;
+ dv.minor_ver = minor;
+ dv.build_ver = build;
+ dv.subbuild_ver = sub;
+
+ memset(dv.driver_string, 0, IXGBE_DRV_VER_STR_LEN_E610);
+ memcpy(dv.driver_string, driver_ver, limited_len);
+
+ return ixgbe_aci_send_driver_ver(hw, &dv);
+}
+
+/**
+ * ixgbe_disable_rx_E610 - Disable RX unit
+ * @hw: pointer to hardware structure
+ *
+ * Disable RX DMA unit on E610 with use of ACI command (0x000C).
+ *
+ * Return: the exit code of the operation.
+ */
+void ixgbe_disable_rx_E610(struct ixgbe_hw *hw)
+{
+ u32 rxctrl;
+
+ DEBUGFUNC("ixgbe_disable_rx_E610");
+
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ if (rxctrl & IXGBE_RXCTRL_RXEN) {
+ u32 pfdtxgswc;
+ s32 status;
+
+ pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+ if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
+ pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
+ hw->mac.set_lben = true;
+ } else {
+ hw->mac.set_lben = false;
+ }
+
+ status = ixgbe_aci_disable_rxen(hw);
+
+ /* If we fail - disable RX using register write */
+ if (status) {
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
+ if (rxctrl & IXGBE_RXCTRL_RXEN) {
+ rxctrl &= ~IXGBE_RXCTRL_RXEN;
+ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
+ }
+ }
+ }
+}
+
+/**
+ * ixgbe_setup_eee_E610 - Enable/disable EEE support
+ * @hw: pointer to the HW structure
+ * @enable_eee: boolean flag to enable EEE
+ *
+ * Enables/disable EEE based on enable_eee flag.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_setup_eee_E610(struct ixgbe_hw *hw, bool enable_eee)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = { 0 };
+ struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = { 0 };
+ u16 eee_cap = 0;
+ s32 status;
+
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG, &phy_caps);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
+
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ if (enable_eee) {
+ if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_100BASE_TX)
+ eee_cap |= IXGBE_ACI_PHY_EEE_EN_100BASE_TX;
+ if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_T)
+ eee_cap |= IXGBE_ACI_PHY_EEE_EN_1000BASE_T;
+ if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_KX)
+ eee_cap |= IXGBE_ACI_PHY_EEE_EN_1000BASE_KX;
+ if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_T)
+ eee_cap |= IXGBE_ACI_PHY_EEE_EN_10GBASE_T;
+ if (phy_caps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1)
+ eee_cap |= IXGBE_ACI_PHY_EEE_EN_10GBASE_KR;
+ if (phy_caps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10BASE_T)
+ eee_cap |= IXGBE_ACI_PHY_EEE_EN_10BASE_T;
+ }
+
+ /* Set EEE capability for particular PHY types */
+ phy_cfg.eee_cap = IXGBE_CPU_TO_LE16(eee_cap);
+
+ status = ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
+
+ return status;
+}
+
+/**
+ * ixgbe_fw_recovery_mode_E610 - Check FW NVM recovery mode
+ * @hw: pointer to hardware structure
+ *
+ * Checks FW NVM recovery mode by
+ * reading the value of the dedicated register.
+ *
+ * Return: true if FW is in recovery mode, otherwise false.
+ */
+bool ixgbe_fw_recovery_mode_E610(struct ixgbe_hw *hw)
+{
+ u32 fwsm = IXGBE_READ_REG(hw, GL_MNG_FWSM);
+
+ return !!(fwsm & GL_MNG_FWSM_FW_MODES_RECOVERY_M);
+}
+
+/**
+ * ixgbe_fw_rollback_mode_E610 - Check FW NVM Rollback
+ * @hw: pointer to hardware structure
+ *
+ * Checks FW NVM Rollback mode by reading the
+ * value of the dedicated register.
+ *
+ * Return: true if FW is in Rollback mode, otherwise false.
+ */
+bool ixgbe_fw_rollback_mode_E610(struct ixgbe_hw *hw)
+{
+ u32 fwsm = IXGBE_READ_REG(hw, GL_MNG_FWSM);
+
+ return !!(fwsm & GL_MNG_FWSM_FW_MODES_ROLLBACK_M);
+}
+
+/**
+ * ixgbe_get_fw_tsam_mode_E610 - Check FW NVM Thermal Sensor Autonomous Mode
+ * @hw: pointer to hardware structure
+ *
+ * Checks Thermal Sensor Autonomous Mode by reading the
+ * value of the dedicated register.
+ *
+ * Return: true if FW is in TSAM, otherwise false.
+ */
+bool ixgbe_get_fw_tsam_mode_E610(struct ixgbe_hw *hw)
+{
+ u32 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_X550EM_a);
+
+ return !!(fwsm & IXGBE_FWSM_TS_ENABLED);
+}
+
+/**
+ * ixgbe_init_phy_ops_E610 - PHY specific init
+ * @hw: pointer to hardware structure
+ *
+ * Initialize any function pointers that were not able to be
+ * set during init_shared_code because the PHY type was not known.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_init_phy_ops_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mac_info *mac = &hw->mac;
+ struct ixgbe_phy_info *phy = &hw->phy;
+ s32 ret_val;
+
+ phy->ops.identify_sfp = ixgbe_identify_module_E610;
+ phy->ops.read_reg = NULL; /* PHY reg access is not required */
+ phy->ops.write_reg = NULL;
+ phy->ops.read_reg_mdi = NULL;
+ phy->ops.write_reg_mdi = NULL;
+ phy->ops.setup_link = ixgbe_setup_phy_link_E610;
+ phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_E610;
+ phy->ops.read_i2c_byte = NULL; /* disabled for E610 */
+ phy->ops.write_i2c_byte = NULL; /* disabled for E610 */
+ phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_E610;
+ phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_E610;
+ phy->ops.write_i2c_eeprom = ixgbe_write_i2c_eeprom_E610;
+ phy->ops.i2c_bus_clear = NULL; /* do not use generic implementation */
+ phy->ops.check_overtemp = ixgbe_check_overtemp_E610;
+ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
+ phy->ops.set_phy_power = ixgbe_set_phy_power_E610;
+ else
+ phy->ops.set_phy_power = NULL;
+ phy->ops.enter_lplu = ixgbe_enter_lplu_E610;
+ phy->ops.handle_lasi = NULL; /* no implementation for E610 */
+ phy->ops.read_i2c_byte_unlocked = NULL; /* disabled for E610 */
+ phy->ops.write_i2c_byte_unlocked = NULL; /* disabled for E610 */
+
+ /* TODO: Set functions pointers based on device ID */
+
+ /* Identify the PHY */
+ ret_val = phy->ops.identify(hw);
+ if (ret_val != IXGBE_SUCCESS)
+ return ret_val;
+
+ /* TODO: Set functions pointers based on PHY type */
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_identify_phy_E610 - Identify PHY
+ * @hw: pointer to hardware structure
+ *
+ * Determine PHY type, supported speeds and PHY ID.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_identify_phy_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ s32 rc;
+
+ /* Set PHY type */
+ hw->phy.type = ixgbe_phy_fw;
+
+ rc = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
+ &pcaps);
+ if (rc)
+ return rc;
+
+ if (!(pcaps.module_compliance_enforcement &
+ IXGBE_ACI_MOD_ENFORCE_STRICT_MODE)) {
+ /* Handle lenient mode */
+ rc = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA,
+ &pcaps);
+ if (rc)
+ return rc;
+ }
+
+ /* Determine supported speeds */
+ hw->phy.speeds_supported = IXGBE_LINK_SPEED_UNKNOWN;
+
+ if (pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10BASE_T ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10M_SGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10_FULL;
+ if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_100BASE_TX ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_100M_SGMII ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_100M_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
+ if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_T ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_SX ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_LX ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_KX ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1G_SGMII ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_1G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
+ if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_T ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_DA ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_SR ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_LR ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1 ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_C2C ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
+
+ /* 2.5 and 5 Gbps link speeds must be excluded from the
+ * auto-negotiation set used during driver initialization due to
+ * compatibility issues with certain switches. Those issues do not
+ * exist in case of E610 2.5G SKU device (0x57b1).
+ */
+ if (!hw->phy.autoneg_advertised &&
+ hw->device_id != IXGBE_DEV_ID_E610_2_5G_T)
+ hw->phy.autoneg_advertised = hw->phy.speeds_supported;
+
+ if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_T ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_X ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_KX ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_SGMII ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
+
+ if (!hw->phy.autoneg_advertised &&
+ hw->device_id == IXGBE_DEV_ID_E610_2_5G_T)
+ hw->phy.autoneg_advertised = hw->phy.speeds_supported;
+
+ if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_T ||
+ pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_KR ||
+ pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_5G_USXGMII)
+ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
+
+ /* Set PHY ID */
+ memcpy(&hw->phy.id, pcaps.phy_id_oui, sizeof(u32));
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_identify_module_E610 - Identify SFP module type
+ * @hw: pointer to hardware structure
+ *
+ * Identify the SFP module type.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_identify_module_E610(struct ixgbe_hw *hw)
+{
+ bool media_available;
+ u8 module_type;
+ s32 rc;
+
+ rc = ixgbe_update_link_info(hw);
+ if (rc)
+ goto err;
+
+ media_available =
+ (hw->link.link_info.link_info &
+ IXGBE_ACI_MEDIA_AVAILABLE) ? true : false;
+
+ if (media_available) {
+ hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+
+ /* Get module type from hw context updated by ixgbe_update_link_info() */
+ module_type = hw->link.link_info.module_type[IXGBE_ACI_MOD_TYPE_IDENT];
+
+ if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE) ||
+ (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE)) {
+ hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
+ } else if (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR) {
+ hw->phy.sfp_type = ixgbe_sfp_type_sr;
+ } else if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR) ||
+ (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM)) {
+ hw->phy.sfp_type = ixgbe_sfp_type_lr;
+ }
+ rc = IXGBE_SUCCESS;
+ } else {
+ hw->phy.sfp_type = ixgbe_sfp_type_not_present;
+ rc = IXGBE_ERR_SFP_NOT_PRESENT;
+ }
+err:
+ return rc;
+}
+
+/**
+ * ixgbe_setup_phy_link_E610 - Sets up firmware-controlled PHYs
+ * @hw: pointer to hardware structure
+ *
+ * Set the parameters for the firmware-controlled PHYs.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_setup_phy_link_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ struct ixgbe_aci_cmd_set_phy_cfg_data pcfg;
+ u8 rmode = IXGBE_ACI_REPORT_TOPO_CAP_MEDIA;
+ u64 sup_phy_type_low, sup_phy_type_high;
+ s32 rc;
+
+ rc = ixgbe_aci_get_link_info(hw, false, NULL);
+ if (rc) {
+ goto err;
+ }
+
+ /* If media is not available get default config */
+ if (!(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE))
+ rmode = IXGBE_ACI_REPORT_DFLT_CFG;
+
+ rc = ixgbe_aci_get_phy_caps(hw, false, rmode, &pcaps);
+ if (rc) {
+ goto err;
+ }
+
+ sup_phy_type_low = pcaps.phy_type_low;
+ sup_phy_type_high = pcaps.phy_type_high;
+
+ /* Get Active configuration to avoid unintended changes */
+ rc = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_ACTIVE_CFG,
+ &pcaps);
+ if (rc) {
+ goto err;
+ }
+ ixgbe_copy_phy_caps_to_cfg(&pcaps, &pcfg);
+
+ /* Set default PHY types for a given speed */
+ pcfg.phy_type_low = 0;
+ pcfg.phy_type_high = 0;
+
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) {
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_10BASE_T;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_10M_SGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) {
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_100BASE_TX;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_100M_SGMII;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_100M_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_T;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_SX;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_LX;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_KX;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1G_SGMII;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_1G_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) {
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_T;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_X;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_KX;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_SGMII;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) {
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_T;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_KR;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_5G_USXGMII;
+ }
+ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) {
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_T;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_DA;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_SR;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_LR;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC;
+ pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_C2C;
+ pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_10G_USXGMII;
+ }
+
+ /* Mask the set values to avoid requesting unsupported link types */
+ pcfg.phy_type_low &= sup_phy_type_low;
+ pcfg.phy_type_high &= sup_phy_type_high;
+
+ if (pcfg.phy_type_high != pcaps.phy_type_high ||
+ pcfg.phy_type_low != pcaps.phy_type_low ||
+ pcfg.caps != pcaps.caps) {
+ pcfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
+ pcfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ rc = ixgbe_aci_set_phy_cfg(hw, &pcfg);
+ }
+
+err:
+ return rc;
+}
+
+/**
+ * ixgbe_get_phy_firmware_version_E610 - Gets the PHY Firmware Version
+ * @hw: pointer to hardware structure
+ * @firmware_version: pointer to the PHY Firmware Version
+ *
+ * Determines PHY FW version based on response to Get PHY Capabilities
+ * admin command (0x0600).
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_get_phy_firmware_version_E610(struct ixgbe_hw *hw,
+ u16 *firmware_version)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
+ s32 status;
+
+ if (!firmware_version)
+ return IXGBE_ERR_PARAM;
+
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG,
+ &pcaps);
+ if (status)
+ return status;
+
+ /* TODO: determine which bytes of the 8-byte phy_fw_ver
+ * field should be written to the 2-byte firmware_version
+ * output argument. */
+ memcpy(firmware_version, pcaps.phy_fw_ver, sizeof(u16));
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_i2c_sff8472_E610 - Reads 8 bit word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: byte offset at address 0xA2
+ * @sff8472_data: value read
+ *
+ * Performs byte read operation from SFP module's SFF-8472 data over I2C.
+ *
+ * Return: the exit code of the operation.
+ **/
+s32 ixgbe_read_i2c_sff8472_E610(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data)
+{
+ return ixgbe_aci_sff_eeprom(hw, 0, IXGBE_I2C_EEPROM_DEV_ADDR2,
+ byte_offset, 0,
+ IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE,
+ sff8472_data, 1, false);
+}
+
+/**
+ * ixgbe_read_i2c_eeprom_E610 - Reads 8 bit EEPROM word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to read
+ * @eeprom_data: value read
+ *
+ * Performs byte read operation from SFP module's EEPROM over I2C interface.
+ *
+ * Return: the exit code of the operation.
+ **/
+s32 ixgbe_read_i2c_eeprom_E610(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data)
+{
+ return ixgbe_aci_sff_eeprom(hw, 0, IXGBE_I2C_EEPROM_DEV_ADDR,
+ byte_offset, 0,
+ IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE,
+ eeprom_data, 1, false);
+}
+
+/**
+ * ixgbe_write_i2c_eeprom_E610 - Writes 8 bit EEPROM word over I2C interface
+ * @hw: pointer to hardware structure
+ * @byte_offset: EEPROM byte offset to write
+ * @eeprom_data: value to write
+ *
+ * Performs byte write operation to SFP module's EEPROM over I2C interface.
+ *
+ * Return: the exit code of the operation.
+ **/
+s32 ixgbe_write_i2c_eeprom_E610(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 eeprom_data)
+{
+ return ixgbe_aci_sff_eeprom(hw, 0, IXGBE_I2C_EEPROM_DEV_ADDR,
+ byte_offset, 0,
+ IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE,
+ &eeprom_data, 1, true);
+}
+
+/**
+ * ixgbe_check_overtemp_E610 - Check firmware-controlled PHYs for overtemp
+ * @hw: pointer to hardware structure
+ *
+ * Get the link status and check if the PHY temperature alarm detected.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_check_overtemp_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_link_status_data link_data = { 0 };
+ struct ixgbe_aci_cmd_get_link_status *resp;
+ struct ixgbe_aci_desc desc;
+ s32 status = IXGBE_SUCCESS;
+
+ if (!hw)
+ return IXGBE_ERR_PARAM;
+
+ ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status);
+ resp = &desc.params.get_link_status;
+ resp->cmd_flags = IXGBE_CPU_TO_LE16(IXGBE_ACI_LSE_NOP);
+
+ status = ixgbe_aci_send_cmd(hw, &desc, &link_data, sizeof(link_data));
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ if (link_data.ext_info & IXGBE_ACI_LINK_PHY_TEMP_ALARM) {
+ ERROR_REPORT1(IXGBE_ERROR_CAUTION,
+ "PHY Temperature Alarm detected");
+ status = IXGBE_ERR_OVERTEMP;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_set_phy_power_E610 - Control power for copper PHY
+ * @hw: pointer to hardware structure
+ * @on: true for on, false for off
+ *
+ * Set the power on/off of the PHY
+ * by getting its capabilities and setting the appropriate
+ * configuration parameters.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_set_phy_power_E610(struct ixgbe_hw *hw, bool on)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = { 0 };
+ struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = { 0 };
+ s32 status;
+
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG, &phy_caps);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
+
+ if (on) {
+ phy_cfg.caps &= ~IXGBE_ACI_PHY_ENA_LOW_POWER;
+ } else {
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LOW_POWER;
+ }
+
+ /* PHY is already in requested power mode */
+ if (phy_caps.caps == phy_cfg.caps)
+ return IXGBE_SUCCESS;
+
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
+ phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
+
+ status = ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
+
+ return status;
+}
+
+/**
+ * ixgbe_enter_lplu_E610 - Transition to low power states
+ * @hw: pointer to hardware structure
+ *
+ * Configures Low Power Link Up on transition to low power states
+ * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
+ * X557 PHY immediately prior to entering LPLU.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_enter_lplu_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = { 0 };
+ struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = { 0 };
+ s32 status;
+
+ status = ixgbe_aci_get_phy_caps(hw, false,
+ IXGBE_ACI_REPORT_ACTIVE_CFG, &phy_caps);
+ if (status != IXGBE_SUCCESS)
+ return status;
+
+ ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
+
+ phy_cfg.low_power_ctrl_an |= IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG;
+
+ status = ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
+
+ return status;
+}
+
+/**
+ * ixgbe_init_eeprom_params_E610 - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ *
+ * Initializes the EEPROM parameters ixgbe_eeprom_info within the
+ * ixgbe_hw struct in order to set up EEPROM access.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_init_eeprom_params_E610(struct ixgbe_hw *hw)
+{
+ struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+ u32 gens_stat;
+ u8 sr_size;
+
+ if (eeprom->type == ixgbe_eeprom_uninitialized) {
+ eeprom->type = ixgbe_flash;
+
+ gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
+ sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >>
+ GLNVM_GENS_SR_SIZE_S;
+
+ /* Switching to words (sr_size contains power of 2) */
+ eeprom->word_size = BIT(sr_size) * IXGBE_SR_WORDS_IN_1KB;
+
+ DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
+ eeprom->type, eeprom->word_size);
+ }
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_read_ee_aci_E610 - Read EEPROM word using the admin command.
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EEPROM
+ *
+ * Reads a 16 bit word from the EEPROM using the ACI.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding with reading.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_ee_aci_E610(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+ s32 status;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ status = ixgbe_read_sr_word_aci(hw, offset, data);
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_read_ee_aci_buffer_E610- Read EEPROM word(s) using admin commands.
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @words: number of words
+ * @data: word(s) read from the EEPROM
+ *
+ * Reads a 16 bit word(s) from the EEPROM using the ACI.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding with reading.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_ee_aci_buffer_E610(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ s32 status;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ status = ixgbe_read_sr_buf_aci(hw, offset, &words, data);
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_write_ee_aci_E610 - Write EEPROM word using the admin command.
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the ACI.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding with writing.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_write_ee_aci_E610(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+ s32 status;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
+ if (status)
+ return status;
+
+ status = ixgbe_write_sr_word_aci(hw, (u32)offset, &data);
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_write_ee_aci_buffer_E610 - Write EEPROM word(s) using admin commands.
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to write
+ * @words: number of words
+ * @data: word(s) write to the EEPROM
+ *
+ * Write a 16 bit word(s) to the EEPROM using the ACI.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding with writing.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_write_ee_aci_buffer_E610(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data)
+{
+ s32 status;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
+ if (status)
+ return status;
+
+ status = ixgbe_write_sr_buf_aci(hw, (u32)offset, words, data);
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_calc_eeprom_checksum_E610 - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ *
+ * Calculate SW Checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
+ * is customer specific and unknown. Therefore, this function skips all maximum
+ * possible size of VPD (1kB).
+ * If the EEPROM params are not initialized, the function
+ * initializes them before proceeding.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the negative error code on error, or the 16-bit checksum
+ */
+s32 ixgbe_calc_eeprom_checksum_E610(struct ixgbe_hw *hw)
+{
+ bool nvm_acquired = false;
+ u16 pcie_alt_module = 0;
+ u16 checksum_local = 0;
+ u16 checksum = 0;
+ u16 vpd_module;
+ void *vmem;
+ s32 status;
+ u16 *data;
+ u16 i;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ vmem = ixgbe_calloc(hw, IXGBE_SR_SECTOR_SIZE_IN_WORDS, sizeof(u16));
+ if (!vmem)
+ return IXGBE_ERR_OUT_OF_MEM;
+ data = (u16 *)vmem;
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ goto ixgbe_calc_sr_checksum_exit;
+ nvm_acquired = true;
+
+ /* read pointer to VPD area */
+ status = ixgbe_read_sr_word_aci(hw, E610_SR_VPD_PTR, &vpd_module);
+ if (status)
+ goto ixgbe_calc_sr_checksum_exit;
+
+ /* read pointer to PCIe Alt Auto-load module */
+ status = ixgbe_read_sr_word_aci(hw, E610_SR_PCIE_ALT_AUTO_LOAD_PTR,
+ &pcie_alt_module);
+ if (status)
+ goto ixgbe_calc_sr_checksum_exit;
+
+ /* Calculate SW checksum that covers the whole 64kB shadow RAM
+ * except the VPD and PCIe ALT Auto-load modules
+ */
+ for (i = 0; i < hw->eeprom.word_size; i++) {
+ /* Read SR page */
+ if ((i % IXGBE_SR_SECTOR_SIZE_IN_WORDS) == 0) {
+ u16 words = IXGBE_SR_SECTOR_SIZE_IN_WORDS;
+
+ status = ixgbe_read_sr_buf_aci(hw, i, &words, data);
+ if (status != IXGBE_SUCCESS)
+ goto ixgbe_calc_sr_checksum_exit;
+ }
+
+ /* Skip Checksum word */
+ if (i == E610_SR_SW_CHECKSUM_WORD)
+ continue;
+ /* Skip VPD module (convert byte size to word count) */
+ if (i >= (u32)vpd_module &&
+ i < ((u32)vpd_module + E610_SR_VPD_SIZE_WORDS))
+ continue;
+ /* Skip PCIe ALT module (convert byte size to word count) */
+ if (i >= (u32)pcie_alt_module &&
+ i < ((u32)pcie_alt_module + E610_SR_PCIE_ALT_SIZE_WORDS))
+ continue;
+
+ checksum_local += data[i % IXGBE_SR_SECTOR_SIZE_IN_WORDS];
+ }
+
+ checksum = (u16)IXGBE_SR_SW_CHECKSUM_BASE - checksum_local;
+
+ixgbe_calc_sr_checksum_exit:
+ if(nvm_acquired)
+ ixgbe_release_nvm(hw);
+ ixgbe_free(hw, vmem);
+
+ if(!status)
+ return (s32)checksum;
+ else
+ return status;
+}
+
+/**
+ * ixgbe_update_eeprom_checksum_E610 - Updates the EEPROM checksum and flash
+ * @hw: pointer to hardware structure
+ *
+ * After writing EEPROM to Shadow RAM, software sends the admin command
+ * to recalculate and update EEPROM checksum and instructs the hardware
+ * to update the flash.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_update_eeprom_checksum_E610(struct ixgbe_hw *hw)
+{
+ s32 status;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ status = ixgbe_nvm_recalculate_checksum(hw);
+ if (status)
+ return status;
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
+ if (status)
+ return status;
+
+ status = ixgbe_nvm_write_activate(hw, IXGBE_ACI_NVM_ACTIV_REQ_EMPR,
+ NULL);
+ ixgbe_release_nvm(hw);
+
+ return status;
+}
+
+/**
+ * ixgbe_validate_eeprom_checksum_E610 - Validate EEPROM checksum
+ * @hw: pointer to hardware structure
+ * @checksum_val: calculated checksum
+ *
+ * Performs checksum calculation and validates the EEPROM checksum. If the
+ * caller does not need checksum_val, the value can be NULL.
+ * If the EEPROM params are not initialized, the function
+ * initialize them before proceeding.
+ * The function acquires and then releases the NVM ownership.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_validate_eeprom_checksum_E610(struct ixgbe_hw *hw, u16 *checksum_val)
+{
+ u32 status;
+
+ if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
+ status = ixgbe_init_eeprom_params(hw);
+ if (status)
+ return status;
+ }
+
+ status = ixgbe_nvm_validate_checksum(hw);
+
+ if (status)
+ return status;
+
+ if (checksum_val) {
+ u16 tmp_checksum;
+ status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
+ if (status)
+ return status;
+
+ status = ixgbe_read_sr_word_aci(hw, E610_SR_SW_CHECKSUM_WORD,
+ &tmp_checksum);
+ ixgbe_release_nvm(hw);
+
+ if (!status)
+ *checksum_val = tmp_checksum;
+ }
+
+ return status;
+}
+
+/**
+ * ixgbe_get_pfa_module_tlv - Reads sub module TLV from NVM PFA
+ * @hw: pointer to hardware structure
+ * @module_tlv: pointer to module TLV to return
+ * @module_tlv_len: pointer to module TLV length to return
+ * @module_type: module type requested
+ *
+ * Finds the requested sub module TLV type from the Preserved Field
+ * Area (PFA) and returns the TLV pointer and length. The caller can
+ * use these to read the variable length TLV value.
+ *
+ * Return: the exit code of the operation.
+ */
+static s32 ixgbe_get_pfa_module_tlv(struct ixgbe_hw *hw, u16 *module_tlv,
+ u16 *module_tlv_len, u16 module_type)
+{
+ u16 pfa_len, pfa_ptr, pfa_end_ptr;
+ u16 next_tlv;
+ s32 status;
+
+ status = ixgbe_read_ee_aci_E610(hw, E610_SR_PFA_PTR, &pfa_ptr);
+ if (status != IXGBE_SUCCESS) {
+ return status;
+ }
+ status = ixgbe_read_ee_aci_E610(hw, pfa_ptr, &pfa_len);
+ if (status != IXGBE_SUCCESS) {
+ return status;
+ }
+ /* Starting with first TLV after PFA length, iterate through the list
+ * of TLVs to find the requested one.
+ */
+ next_tlv = pfa_ptr + 1;
+ pfa_end_ptr = pfa_ptr + pfa_len;
+ while (next_tlv < pfa_end_ptr) {
+ u16 tlv_sub_module_type, tlv_len;
+
+ /* Read TLV type */
+ status = ixgbe_read_ee_aci_E610(hw, next_tlv,
+ &tlv_sub_module_type);
+ if (status != IXGBE_SUCCESS) {
+ break;
+ }
+ /* Read TLV length */
+ status = ixgbe_read_ee_aci_E610(hw, next_tlv + 1, &tlv_len);
+ if (status != IXGBE_SUCCESS) {
+ break;
+ }
+ if (tlv_sub_module_type == module_type) {
+ if (tlv_len) {
+ *module_tlv = next_tlv;
+ *module_tlv_len = tlv_len;
+ return IXGBE_SUCCESS;
+ }
+ return IXGBE_ERR_INVAL_SIZE;
+ }
+ /* Check next TLV, i.e. current TLV pointer + length + 2 words
+ * (for current TLV's type and length)
+ */
+ next_tlv = next_tlv + tlv_len + 2;
+ }
+ /* Module does not exist */
+ return IXGBE_ERR_DOES_NOT_EXIST;
+}
+
+/**
+ * ixgbe_read_pba_string_E610 - Reads part number string from NVM
+ * @hw: pointer to hardware structure
+ * @pba_num: stores the part number string from the NVM
+ * @pba_num_size: part number string buffer length
+ *
+ * Reads the part number string from the NVM.
+ *
+ * Return: the exit code of the operation.
+ */
+s32 ixgbe_read_pba_string_E610(struct ixgbe_hw *hw, u8 *pba_num,
+ u32 pba_num_size)
+{
+ u16 pba_tlv, pba_tlv_len;
+ u16 pba_word, pba_size;
+ s32 status;
+ u16 i;
+
+ status = ixgbe_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len,
+ E610_SR_PBA_BLOCK_PTR);
+ if (status != IXGBE_SUCCESS) {
+ return status;
+ }
+
+ /* pba_size is the next word */
+ status = ixgbe_read_ee_aci_E610(hw, (pba_tlv + 2), &pba_size);
+ if (status != IXGBE_SUCCESS) {
+ return status;
+ }
+
+ if (pba_tlv_len < pba_size) {
+ return IXGBE_ERR_INVAL_SIZE;
+ }
+
+ /* Subtract one to get PBA word count (PBA Size word is included in
+ * total size)
+ */
+ pba_size--;
+ if (pba_num_size < (((u32)pba_size * 2) + 1)) {
+ return IXGBE_ERR_PARAM;
+ }
+
+ for (i = 0; i < pba_size; i++) {
+ status = ixgbe_read_ee_aci_E610(hw, (pba_tlv + 2 + 1) + i,
+ &pba_word);
+ if (status != IXGBE_SUCCESS) {
+ return status;
+ }
+
+ pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
+ pba_num[(i * 2) + 1] = pba_word & 0xFF;
+ }
+ pba_num[(pba_size * 2)] = '\0';
+
+ return status;
+}
diff --git a/sys/dev/ixgbe/ixgbe_e610.h b/sys/dev/ixgbe/ixgbe_e610.h
new file mode 100644
index 000000000000..94e600139499
--- /dev/null
+++ b/sys/dev/ixgbe/ixgbe_e610.h
@@ -0,0 +1,224 @@
+/******************************************************************************
+ SPDX-License-Identifier: BSD-3-Clause
+
+ Copyright (c) 2025, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+#ifndef _IXGBE_E610_H_
+#define _IXGBE_E610_H_
+
+#include "ixgbe_type.h"
+
+void ixgbe_init_aci(struct ixgbe_hw *hw);
+void ixgbe_shutdown_aci(struct ixgbe_hw *hw);
+s32 ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
+ void *buf, u16 buf_size);
+bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw);
+s32 ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e,
+ bool *pending);
+
+void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode);
+
+s32 ixgbe_aci_get_fw_ver(struct ixgbe_hw *hw);
+s32 ixgbe_aci_send_driver_ver(struct ixgbe_hw *hw, struct ixgbe_driver_ver *dv);
+s32 ixgbe_aci_set_pf_context(struct ixgbe_hw *hw, u8 pf_id);
+
+s32 ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
+ enum ixgbe_aci_res_access_type access, u32 timeout);
+void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res);
+s32 ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size,
+ u32 *cap_count, enum ixgbe_aci_opc opc);
+s32 ixgbe_discover_dev_caps(struct ixgbe_hw *hw,
+ struct ixgbe_hw_dev_caps *dev_caps);
+s32 ixgbe_discover_func_caps(struct ixgbe_hw* hw,
+ struct ixgbe_hw_func_caps* func_caps);
+s32 ixgbe_get_caps(struct ixgbe_hw *hw);
+s32 ixgbe_aci_disable_rxen(struct ixgbe_hw *hw);
+s32 ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode,
+ struct ixgbe_aci_cmd_get_phy_caps_data *pcaps);
+bool ixgbe_phy_caps_equals_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg);
+void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg);
+s32 ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg);
+s32 ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link);
+s32 ixgbe_update_link_info(struct ixgbe_hw *hw);
+s32 ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up);
+s32 ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
+ struct ixgbe_link_status *link);
+s32 ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask);
+s32 ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask);
+
+s32 ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_get_link_topo *cmd,
+ u8 *node_part_number, u16 *node_handle);
+s32 ixgbe_find_netlist_node(struct ixgbe_hw *hw, u8 node_type_ctx,
+ u8 node_part_number, u16 *node_handle);
+s32 ixgbe_aci_read_i2c(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data);
+s32 ixgbe_aci_write_i2c(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr,
+ u16 bus_addr, __le16 addr, u8 params, u8 *data);
+
+s32 ixgbe_aci_set_port_id_led(struct ixgbe_hw *hw, bool orig_mode);
+s32 ixgbe_aci_set_gpio(struct ixgbe_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
+ bool value);
+s32 ixgbe_aci_get_gpio(struct ixgbe_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
+ bool *value);
+s32 ixgbe_aci_sff_eeprom(struct ixgbe_hw *hw, u16 lport, u8 bus_addr,
+ u16 mem_addr, u8 page, u8 page_bank_ctrl, u8 *data,
+ u8 length, bool write);
+s32 ixgbe_aci_prog_topo_dev_nvm(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_params *topo_params);
+s32 ixgbe_aci_read_topo_dev_nvm(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_link_topo_params *topo_params,
+ u32 start_address, u8 *data, u8 data_size);
+
+s32 ixgbe_acquire_nvm(struct ixgbe_hw *hw,
+ enum ixgbe_aci_res_access_type access);
+void ixgbe_release_nvm(struct ixgbe_hw *hw);
+
+s32 ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
+ u16 length, void *data, bool last_command,
+ bool read_shadow_ram);
+
+s32 ixgbe_aci_erase_nvm(struct ixgbe_hw *hw, u16 module_typeid);
+s32 ixgbe_aci_update_nvm(struct ixgbe_hw *hw, u16 module_typeid,
+ u32 offset, u16 length, void *data,
+ bool last_command, u8 command_flags);
+
+s32 ixgbe_aci_read_nvm_cfg(struct ixgbe_hw *hw, u8 cmd_flags,
+ u16 field_id, void *data, u16 buf_size,
+ u16 *elem_count);
+s32 ixgbe_aci_write_nvm_cfg(struct ixgbe_hw *hw, u8 cmd_flags,
+ void *data, u16 buf_size, u16 elem_count);
+
+s32 ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw);
+s32 ixgbe_nvm_recalculate_checksum(struct ixgbe_hw *hw);
+
+s32 ixgbe_nvm_write_activate(struct ixgbe_hw *hw, u16 cmd_flags,
+ u8 *response_flags);
+
+s32 ixgbe_get_nvm_minsrevs(struct ixgbe_hw *hw, struct ixgbe_minsrev_info *minsrevs);
+s32 ixgbe_update_nvm_minsrevs(struct ixgbe_hw *hw, struct ixgbe_minsrev_info *minsrevs);
+
+s32 ixgbe_get_inactive_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm);
+s32 ixgbe_get_active_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm);
+
+s32 ixgbe_get_inactive_netlist_ver(struct ixgbe_hw *hw, struct ixgbe_netlist_info *netlist);
+s32 ixgbe_init_nvm(struct ixgbe_hw *hw);
+
+s32 ixgbe_sanitize_operate(struct ixgbe_hw *hw);
+s32 ixgbe_sanitize_nvm(struct ixgbe_hw *hw, u8 cmd_flags, u8 *values);
+
+s32 ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words, u16 *data);
+s32 ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length,
+ u8 *data, bool read_shadow_ram);
+
+s32 ixgbe_write_sr_word_aci(struct ixgbe_hw *hw, u32 offset, const u16 *data);
+s32 ixgbe_write_sr_buf_aci(struct ixgbe_hw *hw, u32 offset, u16 words, const u16 *data);
+
+s32 ixgbe_aci_alternate_write(struct ixgbe_hw *hw, u32 reg_addr0,
+ u32 reg_val0, u32 reg_addr1, u32 reg_val1);
+s32 ixgbe_aci_alternate_read(struct ixgbe_hw *hw, u32 reg_addr0,
+ u32 *reg_val0, u32 reg_addr1, u32 *reg_val1);
+s32 ixgbe_aci_alternate_write_done(struct ixgbe_hw *hw, u8 bios_mode,
+ bool *reset_needed);
+s32 ixgbe_aci_alternate_clear(struct ixgbe_hw *hw);
+
+s32 ixgbe_aci_get_internal_data(struct ixgbe_hw *hw, u16 cluster_id,
+ u16 table_id, u32 start, void *buf,
+ u16 buf_size, u16 *ret_buf_size,
+ u16 *ret_next_cluster, u16 *ret_next_table,
+ u32 *ret_next_index);
+
+s32 ixgbe_handle_nvm_access(struct ixgbe_hw *hw,
+ struct ixgbe_nvm_access_cmd *cmd,
+ struct ixgbe_nvm_access_data *data);
+
+s32 ixgbe_aci_set_health_status_config(struct ixgbe_hw *hw, u8 event_source);
+
+/* E610 operations */
+s32 ixgbe_init_ops_E610(struct ixgbe_hw *hw);
+s32 ixgbe_reset_hw_E610(struct ixgbe_hw *hw);
+s32 ixgbe_start_hw_E610(struct ixgbe_hw *hw);
+enum ixgbe_media_type ixgbe_get_media_type_E610(struct ixgbe_hw *hw);
+u64 ixgbe_get_supported_physical_layer_E610(struct ixgbe_hw *hw);
+s32 ixgbe_setup_link_E610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait);
+s32 ixgbe_check_link_E610(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete);
+s32 ixgbe_get_link_capabilities_E610(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *autoneg);
+s32 ixgbe_cfg_phy_fc(struct ixgbe_hw *hw,
+ struct ixgbe_aci_cmd_set_phy_cfg_data *cfg,
+ enum ixgbe_fc_mode req_mode);
+s32 ixgbe_setup_fc_E610(struct ixgbe_hw *hw);
+void ixgbe_fc_autoneg_E610(struct ixgbe_hw *hw);
+s32 ixgbe_set_fw_drv_ver_E610(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build,
+ u8 sub, u16 len, const char *driver_ver);
+void ixgbe_disable_rx_E610(struct ixgbe_hw *hw);
+s32 ixgbe_setup_eee_E610(struct ixgbe_hw *hw, bool enable_eee);
+bool ixgbe_fw_recovery_mode_E610(struct ixgbe_hw *hw);
+bool ixgbe_fw_rollback_mode_E610(struct ixgbe_hw *hw);
+bool ixgbe_get_fw_tsam_mode_E610(struct ixgbe_hw *hw);
+s32 ixgbe_init_phy_ops_E610(struct ixgbe_hw *hw);
+s32 ixgbe_identify_phy_E610(struct ixgbe_hw *hw);
+s32 ixgbe_identify_module_E610(struct ixgbe_hw *hw);
+s32 ixgbe_setup_phy_link_E610(struct ixgbe_hw *hw);
+s32 ixgbe_get_phy_firmware_version_E610(struct ixgbe_hw *hw,
+ u16 *firmware_version);
+s32 ixgbe_read_i2c_sff8472_E610(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *sff8472_data);
+s32 ixgbe_read_i2c_eeprom_E610(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 *eeprom_data);
+s32 ixgbe_write_i2c_eeprom_E610(struct ixgbe_hw *hw, u8 byte_offset,
+ u8 eeprom_data);
+s32 ixgbe_check_overtemp_E610(struct ixgbe_hw *hw);
+s32 ixgbe_set_phy_power_E610(struct ixgbe_hw *hw, bool on);
+s32 ixgbe_enter_lplu_E610(struct ixgbe_hw *hw);
+s32 ixgbe_init_eeprom_params_E610(struct ixgbe_hw *hw);
+s32 ixgbe_read_ee_aci_E610(struct ixgbe_hw *hw, u16 offset, u16 *data);
+s32 ixgbe_read_ee_aci_buffer_E610(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 ixgbe_write_ee_aci_E610(struct ixgbe_hw *hw, u16 offset, u16 data);
+s32 ixgbe_write_ee_aci_buffer_E610(struct ixgbe_hw *hw, u16 offset,
+ u16 words, u16 *data);
+s32 ixgbe_calc_eeprom_checksum_E610(struct ixgbe_hw *hw);
+s32 ixgbe_update_eeprom_checksum_E610(struct ixgbe_hw *hw);
+s32 ixgbe_validate_eeprom_checksum_E610(struct ixgbe_hw *hw, u16 *checksum_val);
+s32 ixgbe_read_pba_string_E610(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size);
+
+#endif /* _IXGBE_E610_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_osdep.c b/sys/dev/ixgbe/ixgbe_osdep.c
index 892924712c38..9bd9ce63b786 100644
--- a/sys/dev/ixgbe/ixgbe_osdep.c
+++ b/sys/dev/ixgbe/ixgbe_osdep.c
@@ -114,3 +114,29 @@ ixgbe_link_speed_to_baudrate(ixgbe_link_speed speed)
return baudrate;
}
+
+void
+ixgbe_init_lock(struct ixgbe_lock *lock)
+{
+ mtx_init(&lock->mutex, "mutex",
+ "ixgbe ACI lock", MTX_DEF | MTX_DUPOK);
+}
+
+void
+ixgbe_acquire_lock(struct ixgbe_lock *lock)
+{
+ mtx_lock(&lock->mutex);
+}
+
+void
+ixgbe_release_lock(struct ixgbe_lock *lock)
+{
+ mtx_unlock(&lock->mutex);
+}
+
+void
+ixgbe_destroy_lock(struct ixgbe_lock *lock)
+{
+ if (mtx_initialized(&lock->mutex))
+ mtx_destroy(&lock->mutex);
+}
diff --git a/sys/dev/ixgbe/ixgbe_osdep.h b/sys/dev/ixgbe/ixgbe_osdep.h
index cf7c578fd684..8cf1d13736ce 100644
--- a/sys/dev/ixgbe/ixgbe_osdep.h
+++ b/sys/dev/ixgbe/ixgbe_osdep.h
@@ -133,7 +133,9 @@ enum {
/* XXX these need to be revisited */
#define IXGBE_CPU_TO_LE16 htole16
#define IXGBE_CPU_TO_LE32 htole32
+#define IXGBE_LE16_TO_CPU le16toh
#define IXGBE_LE32_TO_CPU le32toh
+#define IXGBE_LE64_TO_CPU le64toh
#define IXGBE_LE32_TO_CPUS(x) *(x) = le32dec(x)
#define IXGBE_CPU_TO_BE16 htobe16
#define IXGBE_CPU_TO_BE32 htobe32
@@ -146,6 +148,7 @@ typedef int16_t s16;
typedef uint32_t u32;
typedef int32_t s32;
typedef uint64_t u64;
+typedef int64_t s64;
#ifndef __bool_true_false_are_defined
typedef boolean_t bool;
#endif
@@ -195,6 +198,11 @@ struct ixgbe_osdep
bus_space_handle_t mem_bus_space_handle;
};
+struct ixgbe_lock
+{
+ struct mtx mutex;
+};
+
/* These routines need struct ixgbe_hw declared */
struct ixgbe_hw;
device_t ixgbe_dev_from_hw(struct ixgbe_hw *hw);
@@ -222,4 +230,27 @@ extern void ixgbe_write_reg_array(struct ixgbe_hw *, u32, u32, u32);
#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, val) \
ixgbe_write_reg_array(a, reg, offset, val)
+void ixgbe_init_lock(struct ixgbe_lock *);
+void ixgbe_destroy_lock(struct ixgbe_lock *);
+void ixgbe_acquire_lock(struct ixgbe_lock *);
+void ixgbe_release_lock(struct ixgbe_lock *);
+
+static inline void *
+ixgbe_calloc(struct ixgbe_hw __unused *hw, size_t count, size_t size)
+{
+ return (malloc(count * size, M_DEVBUF, M_ZERO | M_NOWAIT));
+}
+
+static inline void *
+ixgbe_malloc(struct ixgbe_hw __unused *hw, size_t size)
+{
+ return (malloc(size, M_DEVBUF, M_ZERO | M_NOWAIT));
+}
+
+static inline void
+ixgbe_free(struct ixgbe_hw __unused *hw, void *addr)
+{
+ free(addr, M_DEVBUF);
+}
+
#endif /* _IXGBE_OSDEP_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_type.h b/sys/dev/ixgbe/ixgbe_type.h
index 91b46da72c75..0bbe7806d41d 100644
--- a/sys/dev/ixgbe/ixgbe_type.h
+++ b/sys/dev/ixgbe/ixgbe_type.h
@@ -74,6 +74,7 @@
*/
#include "ixgbe_osdep.h"
+#include "ixgbe_type_e610.h"
/* Override this by setting IOMEM in your ixgbe_osdep.h header */
#define IOMEM
@@ -150,12 +151,19 @@
#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD
#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE
#define IXGBE_DEV_ID_X550EM_X_XFI 0x15B0
+#define IXGBE_DEV_ID_E610_BACKPLANE 0x57AE
+#define IXGBE_DEV_ID_E610_SFP 0x57AF
+#define IXGBE_DEV_ID_E610_10G_T 0x57B0
+#define IXGBE_DEV_ID_E610_2_5G_T 0x57B1
+#define IXGBE_DEV_ID_E610_SGMII 0x57B2
#define IXGBE_DEV_ID_X550_VF_HV 0x1564
#define IXGBE_DEV_ID_X550_VF 0x1565
#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5
#define IXGBE_DEV_ID_X550EM_A_VF_HV 0x15B4
#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8
#define IXGBE_DEV_ID_X550EM_X_VF_HV 0x15A9
+#define IXGBE_DEV_ID_E610_VF 0x57AD
+#define IXGBE_SUBDEV_ID_E610_VF_HV 0x0001
#define IXGBE_CAT(r, m) IXGBE_##r##m
@@ -1969,6 +1977,7 @@ enum {
#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */
#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */
+#define IXGBE_EICR_FW_EVENT 0x00200000 /* Async FW event */
#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */
#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */
#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */
@@ -2004,6 +2013,7 @@ enum {
#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */
#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EICS_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */
#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
@@ -2025,6 +2035,7 @@ enum {
#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */
#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMS_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */
#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermal Sensor Event */
#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
@@ -2047,6 +2058,7 @@ enum {
#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */
#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */
+#define IXGBE_EIMC_FW_EVENT IXGBE_EICR_FW_EVENT /* Async FW event */
#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */
#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */
#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */
@@ -2454,6 +2466,7 @@ enum {
#define IXGBE_82599_SERIAL_NUMBER_MAC_ADDR 0x11
#define IXGBE_X550_SERIAL_NUMBER_MAC_ADDR 0x04
+#define IXGBE_PCIE_MSIX_E610_CAPS 0xB2
#define IXGBE_PCIE_MSIX_82599_CAPS 0x72
#define IXGBE_MAX_MSIX_VECTORS_82599 0x40
#define IXGBE_PCIE_MSIX_82598_CAPS 0x62
@@ -2571,6 +2584,7 @@ enum {
#define IXGBE_PCI_DEVICE_STATUS 0xAA
#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020
#define IXGBE_PCI_LINK_STATUS 0xB2
+#define IXGBE_PCI_LINK_STATUS_E610 0x82
#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
#define IXGBE_PCI_LINK_WIDTH 0x3F0
#define IXGBE_PCI_LINK_WIDTH_1 0x10
@@ -2581,6 +2595,7 @@ enum {
#define IXGBE_PCI_LINK_SPEED_2500 0x1
#define IXGBE_PCI_LINK_SPEED_5000 0x2
#define IXGBE_PCI_LINK_SPEED_8000 0x3
+#define IXGBE_PCI_LINK_SPEED_16000 0x4
#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E
#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005
@@ -3743,6 +3758,8 @@ enum ixgbe_mac_type {
ixgbe_mac_X550_vf,
ixgbe_mac_X550EM_x_vf,
ixgbe_mac_X550EM_a_vf,
+ ixgbe_mac_E610,
+ ixgbe_mac_E610_vf,
ixgbe_num_macs
};
@@ -3822,7 +3839,9 @@ enum ixgbe_media_type {
ixgbe_media_type_copper,
ixgbe_media_type_backplane,
ixgbe_media_type_cx4,
- ixgbe_media_type_virtual
+ ixgbe_media_type_virtual,
+ ixgbe_media_type_da,
+ ixgbe_media_type_aui
};
/* Flow Control Settings */
@@ -3831,6 +3850,7 @@ enum ixgbe_fc_mode {
ixgbe_fc_rx_pause,
ixgbe_fc_tx_pause,
ixgbe_fc_full,
+ ixgbe_fc_auto,
ixgbe_fc_default
};
@@ -3863,6 +3883,7 @@ enum ixgbe_bus_speed {
ixgbe_bus_speed_2500 = 2500,
ixgbe_bus_speed_5000 = 5000,
ixgbe_bus_speed_8000 = 8000,
+ ixgbe_bus_speed_16000 = 16000,
ixgbe_bus_speed_reserved
};
@@ -4007,6 +4028,7 @@ struct ixgbe_eeprom_operations {
s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
s32 (*update_checksum)(struct ixgbe_hw *);
s32 (*calc_checksum)(struct ixgbe_hw *);
+ s32 (*read_pba_string)(struct ixgbe_hw *, u8 *, u32);
};
struct ixgbe_mac_operations {
@@ -4118,6 +4140,10 @@ struct ixgbe_mac_operations {
void (*mdd_event)(struct ixgbe_hw *hw, u32 *vf_bitmap);
void (*restore_mdd_vf)(struct ixgbe_hw *hw, u32 vf);
bool (*fw_recovery_mode)(struct ixgbe_hw *hw);
+ bool (*fw_rollback_mode)(struct ixgbe_hw *hw);
+ bool (*get_fw_tsam_mode)(struct ixgbe_hw *hw);
+ s32 (*get_fw_version)(struct ixgbe_hw *hw);
+ s32 (*get_nvm_version)(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm);
};
struct ixgbe_phy_operations {
@@ -4162,6 +4188,9 @@ struct ixgbe_link_operations {
struct ixgbe_link_info {
struct ixgbe_link_operations ops;
u8 addr;
+ struct ixgbe_link_status link_info;
+ struct ixgbe_link_status link_info_old;
+ u8 get_link_info;
};
struct ixgbe_eeprom_info {
@@ -4233,6 +4262,9 @@ struct ixgbe_phy_info {
bool reset_if_overtemp;
bool qsfp_shared_i2c_bus;
u32 nw_mng_if_sel;
+ u64 phy_type_low;
+ u64 phy_type_high;
+ struct ixgbe_aci_cmd_set_phy_cfg_data curr_user_phy_cfg;
};
#include "ixgbe_mbx.h"
@@ -4261,6 +4293,22 @@ struct ixgbe_hw {
bool wol_enabled;
bool need_crosstalk_fix;
u32 fw_rst_cnt;
+ u8 api_branch;
+ u8 api_maj_ver;
+ u8 api_min_ver;
+ u8 api_patch;
+ u8 fw_branch;
+ u8 fw_maj_ver;
+ u8 fw_min_ver;
+ u8 fw_patch;
+ u32 fw_build;
+ struct ixgbe_aci_info aci;
+ struct ixgbe_flash_info flash;
+ struct ixgbe_hw_dev_caps dev_caps;
+ struct ixgbe_hw_func_caps func_caps;
+ struct ixgbe_fwlog_cfg fwlog_cfg;
+ bool fwlog_support_ena;
+ struct ixgbe_fwlog_ring fwlog_ring;
};
#define ixgbe_call_func(hw, func, params, error) \
@@ -4312,6 +4360,24 @@ struct ixgbe_hw {
#define IXGBE_ERR_MBX_NOMSG -42
#define IXGBE_ERR_TIMEOUT -43
+#define IXGBE_ERR_NOT_SUPPORTED -45
+#define IXGBE_ERR_OUT_OF_RANGE -46
+
+#define IXGBE_ERR_NVM -50
+#define IXGBE_ERR_NVM_CHECKSUM -51
+#define IXGBE_ERR_BUF_TOO_SHORT -52
+#define IXGBE_ERR_NVM_BLANK_MODE -53
+#define IXGBE_ERR_INVAL_SIZE -54
+#define IXGBE_ERR_DOES_NOT_EXIST -55
+
+#define IXGBE_ERR_ACI_ERROR -100
+#define IXGBE_ERR_ACI_DISABLED -101
+#define IXGBE_ERR_ACI_TIMEOUT -102
+#define IXGBE_ERR_ACI_BUSY -103
+#define IXGBE_ERR_ACI_NO_WORK -104
+#define IXGBE_ERR_ACI_NO_EVENTS -105
+#define IXGBE_ERR_FW_API_VER -106
+
#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
@@ -4540,5 +4606,6 @@ struct ixgbe_bypass_eeprom {
#define IXGBE_REQUEST_TASK_FDIR 0x08
#define IXGBE_REQUEST_TASK_PHY 0x10
#define IXGBE_REQUEST_TASK_LSC 0x20
+#define IXGBE_REQUEST_TASK_FWEVENT 0x40
#endif /* _IXGBE_TYPE_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_type_e610.h b/sys/dev/ixgbe/ixgbe_type_e610.h
new file mode 100644
index 000000000000..e300030c3ba4
--- /dev/null
+++ b/sys/dev/ixgbe/ixgbe_type_e610.h
@@ -0,0 +1,2278 @@
+/******************************************************************************
+ SPDX-License-Identifier: BSD-3-Clause
+
+ Copyright (c) 2025, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+#ifndef _IXGBE_TYPE_E610_H_
+#define _IXGBE_TYPE_E610_H_
+
+
+/* Generic defines */
+#ifndef BIT
+#define BIT(a) (1UL << (a))
+#endif /* !BIT */
+#ifndef BIT_ULL
+#define BIT_ULL(a) (1ULL << (a))
+#endif /* !BIT_ULL */
+#ifndef BITS_PER_BYTE
+#define BITS_PER_BYTE 8
+#endif /* !BITS_PER_BYTE */
+#ifndef DIVIDE_AND_ROUND_UP
+#define DIVIDE_AND_ROUND_UP(a, b) (((a) + (b) - 1) / (b))
+#endif /* !DIVIDE_AND_ROUND_UP */
+
+#ifndef ROUND_UP
+/**
+ * ROUND_UP - round up to next arbitrary multiple (not a power of 2)
+ * @a: value to round up
+ * @b: arbitrary multiple
+ *
+ * Round up to the next multiple of the arbitrary b.
+ */
+#define ROUND_UP(a, b) ((b) * DIVIDE_AND_ROUND_UP((a), (b)))
+#endif /* !ROUND_UP */
+
+#define MAKEMASK(mask, shift) (mask << shift)
+
+#define BYTES_PER_WORD 2
+#define BYTES_PER_DWORD 4
+
+#ifndef BITS_PER_LONG
+#define BITS_PER_LONG 64
+#endif /* !BITS_PER_LONG */
+#ifndef BITS_PER_LONG_LONG
+#define BITS_PER_LONG_LONG 64
+#endif /* !BITS_PER_LONG_LONG */
+#undef GENMASK
+#define GENMASK(h, l) \
+ (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+#undef GENMASK_ULL
+#define GENMASK_ULL(h, l) \
+ (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+
+/* Data type manipulation macros. */
+#define HI_DWORD(x) ((u32)((((x) >> 16) >> 16) & 0xFFFFFFFF))
+#define LO_DWORD(x) ((u32)((x) & 0xFFFFFFFF))
+#define HI_WORD(x) ((u16)(((x) >> 16) & 0xFFFF))
+#define LO_WORD(x) ((u16)((x) & 0xFFFF))
+#define HI_BYTE(x) ((u8)(((x) >> 8) & 0xFF))
+#define LO_BYTE(x) ((u8)((x) & 0xFF))
+
+#ifndef MIN_T
+#define MIN_T(_t, _a, _b) min((_t)(_a), (_t)(_b))
+#endif
+
+#define IS_ASCII(_ch) ((_ch) < 0x80)
+
+/**
+ * ixgbe_struct_size - size of struct with C99 flexible array member
+ * @ptr: pointer to structure
+ * @field: flexible array member (last member of the structure)
+ * @num: number of elements of that flexible array member
+ */
+#define ixgbe_struct_size(ptr, field, num) \
+ (sizeof(*(ptr)) + sizeof(*(ptr)->field) * (num))
+
+/* General E610 defines */
+#define IXGBE_MAX_VSI 768
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define E610_SR_VPD_SIZE_WORDS 512
+#define E610_SR_PCIE_ALT_SIZE_WORDS 512
+
+/* Checksum and Shadow RAM pointers */
+#define E610_SR_NVM_DEV_STARTER_VER 0x18
+#define E610_NVM_VER_LO_SHIFT 0
+#define E610_NVM_VER_LO_MASK (0xff << E610_NVM_VER_LO_SHIFT)
+#define E610_NVM_VER_HI_SHIFT 12
+#define E610_NVM_VER_HI_MASK (0xf << E610_NVM_VER_HI_SHIFT)
+#define E610_SR_NVM_MAP_VER 0x29
+#define E610_SR_NVM_EETRACK_LO 0x2D
+#define E610_SR_NVM_EETRACK_HI 0x2E
+#define E610_SR_VPD_PTR 0x2F
+#define E610_SR_PCIE_ALT_AUTO_LOAD_PTR 0x3E
+#define E610_SR_SW_CHECKSUM_WORD 0x3F
+#define E610_SR_PFA_PTR 0x40
+#define E610_SR_1ST_NVM_BANK_PTR 0x42
+#define E610_SR_NVM_BANK_SIZE 0x43
+#define E610_SR_1ST_OROM_BANK_PTR 0x44
+#define E610_SR_OROM_BANK_SIZE 0x45
+#define E610_SR_NETLIST_BANK_PTR 0x46
+#define E610_SR_NETLIST_BANK_SIZE 0x47
+#define E610_SR_POINTER_TYPE_BIT BIT(15)
+#define E610_SR_POINTER_MASK 0x7fff
+#define E610_SR_HALF_4KB_SECTOR_UNITS 2048
+#define E610_GET_PFA_POINTER_IN_WORDS(offset) \
+ ((offset & E610_SR_POINTER_TYPE_BIT) == E610_SR_POINTER_TYPE_BIT) ? \
+ ((offset & E610_SR_POINTER_MASK) * E610_SR_HALF_4KB_SECTOR_UNITS) : \
+ (offset & E610_SR_POINTER_MASK)
+
+/* Checksum and Shadow RAM pointers */
+#define E610_SR_NVM_CTRL_WORD 0x00
+#define E610_SR_PBA_BLOCK_PTR 0x16
+
+/* The Orom version topology */
+#define IXGBE_OROM_VER_PATCH_SHIFT 0
+#define IXGBE_OROM_VER_PATCH_MASK (0xff << IXGBE_OROM_VER_PATCH_SHIFT)
+#define IXGBE_OROM_VER_BUILD_SHIFT 8
+#define IXGBE_OROM_VER_BUILD_MASK (0xffff << IXGBE_OROM_VER_BUILD_SHIFT)
+#define IXGBE_OROM_VER_SHIFT 24
+#define IXGBE_OROM_VER_MASK (0xff << IXGBE_OROM_VER_SHIFT)
+
+/* CSS Header words */
+#define IXGBE_NVM_CSS_HDR_LEN_L 0x02
+#define IXGBE_NVM_CSS_HDR_LEN_H 0x03
+#define IXGBE_NVM_CSS_SREV_L 0x14
+#define IXGBE_NVM_CSS_SREV_H 0x15
+
+/* Length of Authentication header section in words */
+#define IXGBE_NVM_AUTH_HEADER_LEN 0x08
+
+/* The Netlist ID Block is located after all of the Link Topology nodes. */
+#define IXGBE_NETLIST_ID_BLK_SIZE 0x30
+#define IXGBE_NETLIST_ID_BLK_OFFSET(n) IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0004 + 2 * (n))
+
+/* netlist ID block field offsets (word offsets) */
+#define IXGBE_NETLIST_ID_BLK_MAJOR_VER_LOW 0x02
+#define IXGBE_NETLIST_ID_BLK_MAJOR_VER_HIGH 0x03
+#define IXGBE_NETLIST_ID_BLK_MINOR_VER_LOW 0x04
+#define IXGBE_NETLIST_ID_BLK_MINOR_VER_HIGH 0x05
+#define IXGBE_NETLIST_ID_BLK_TYPE_LOW 0x06
+#define IXGBE_NETLIST_ID_BLK_TYPE_HIGH 0x07
+#define IXGBE_NETLIST_ID_BLK_REV_LOW 0x08
+#define IXGBE_NETLIST_ID_BLK_REV_HIGH 0x09
+#define IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(n) (0x0A + (n))
+#define IXGBE_NETLIST_ID_BLK_CUST_VER 0x2F
+
+/* The Link Topology Netlist section is stored as a series of words. It is
+ * stored in the NVM as a TLV, with the first two words containing the type
+ * and length.
+ */
+#define IXGBE_NETLIST_LINK_TOPO_MOD_ID 0x011B
+#define IXGBE_NETLIST_TYPE_OFFSET 0x0000
+#define IXGBE_NETLIST_LEN_OFFSET 0x0001
+
+/* The Link Topology section follows the TLV header. When reading the netlist
+ * using ixgbe_read_netlist_module, we need to account for the 2-word TLV
+ * header.
+ */
+#define IXGBE_NETLIST_LINK_TOPO_OFFSET(n) ((n) + 2)
+#define IXGBE_LINK_TOPO_MODULE_LEN IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0000)
+#define IXGBE_LINK_TOPO_NODE_COUNT IXGBE_NETLIST_LINK_TOPO_OFFSET(0x0001)
+#define IXGBE_LINK_TOPO_NODE_COUNT_M MAKEMASK(0x3FF, 0)
+
+/* Auxiliary field, mask and shift definition for Shadow RAM and NVM Flash */
+#define IXGBE_SR_CTRL_WORD_1_S 0x06
+#define IXGBE_SR_CTRL_WORD_1_M (0x03 << IXGBE_SR_CTRL_WORD_1_S)
+#define IXGBE_SR_CTRL_WORD_VALID 0x1
+#define IXGBE_SR_CTRL_WORD_OROM_BANK BIT(3)
+#define IXGBE_SR_CTRL_WORD_NETLIST_BANK BIT(4)
+#define IXGBE_SR_CTRL_WORD_NVM_BANK BIT(5)
+#define IXGBE_SR_NVM_PTR_4KB_UNITS BIT(15)
+
+/* These macros strip from NVM Image Revision the particular part of NVM ver:
+ major ver, minor ver and image id */
+#define E610_NVM_MAJOR_VER(x) ((x & 0xF000) >> 12)
+#define E610_NVM_MINOR_VER(x) (x & 0x00FF)
+
+/* Shadow RAM related */
+#define IXGBE_SR_SECTOR_SIZE_IN_WORDS 0x800
+#define IXGBE_SR_WORDS_IN_1KB 512
+/* Checksum should be calculated such that after adding all the words,
+ * including the checksum word itself, the sum should be 0xBABA.
+ */
+#define IXGBE_SR_SW_CHECKSUM_BASE 0xBABA
+
+/* Netlist */
+#define IXGBE_MAX_NETLIST_SIZE 10
+
+/* General registers */
+
+/* Firmware Status Register (GL_FWSTS) */
+#define GL_FWSTS 0x00083048 /* Reset Source: POR */
+#define GL_FWSTS_FWS0B_S 0
+#define GL_FWSTS_FWS0B_M MAKEMASK(0xFF, 0)
+#define GL_FWSTS_FWROWD_S 8
+#define GL_FWSTS_FWROWD_M BIT(8)
+#define GL_FWSTS_FWRI_S 9
+#define GL_FWSTS_FWRI_M BIT(9)
+#define GL_FWSTS_FWS1B_S 16
+#define GL_FWSTS_FWS1B_M MAKEMASK(0xFF, 16)
+#define GL_FWSTS_EP_PF0 BIT(24)
+#define GL_FWSTS_EP_PF1 BIT(25)
+
+/* Recovery mode values of Firmware Status 1 Byte (FWS1B) bitfield */
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_CORER_LEGACY 0x0B
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_GLOBR_LEGACY 0x0C
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_CORER 0x30
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_GLOBR 0x31
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_TRANSITION 0x32
+#define GL_FWSTS_FWS1B_RECOVERY_MODE_NVM 0x33
+
+/* Firmware Status (GL_MNG_FWSM) */
+#define GL_MNG_FWSM 0x000B6134 /* Reset Source: POR */
+#define GL_MNG_FWSM_FW_MODES_S 0
+#define GL_MNG_FWSM_FW_MODES_M MAKEMASK(0x7, 0)
+#define GL_MNG_FWSM_RSV0_S 2
+#define GL_MNG_FWSM_RSV0_M MAKEMASK(0xFF, 2)
+#define GL_MNG_FWSM_EEP_RELOAD_IND_S 10
+#define GL_MNG_FWSM_EEP_RELOAD_IND_M BIT(10)
+#define GL_MNG_FWSM_RSV1_S 11
+#define GL_MNG_FWSM_RSV1_M MAKEMASK(0xF, 11)
+#define GL_MNG_FWSM_RSV2_S 15
+#define GL_MNG_FWSM_RSV2_M BIT(15)
+#define GL_MNG_FWSM_PCIR_AL_FAILURE_S 16
+#define GL_MNG_FWSM_PCIR_AL_FAILURE_M BIT(16)
+#define GL_MNG_FWSM_POR_AL_FAILURE_S 17
+#define GL_MNG_FWSM_POR_AL_FAILURE_M BIT(17)
+#define GL_MNG_FWSM_RSV3_S 18
+#define GL_MNG_FWSM_RSV3_M BIT(18)
+#define GL_MNG_FWSM_EXT_ERR_IND_S 19
+#define GL_MNG_FWSM_EXT_ERR_IND_M MAKEMASK(0x3F, 19)
+#define GL_MNG_FWSM_RSV4_S 25
+#define GL_MNG_FWSM_RSV4_M BIT(25)
+#define GL_MNG_FWSM_RESERVED_11_S 26
+#define GL_MNG_FWSM_RESERVED_11_M MAKEMASK(0xF, 26)
+#define GL_MNG_FWSM_RSV5_S 30
+#define GL_MNG_FWSM_RSV5_M MAKEMASK(0x3, 30)
+
+/* FW mode indications */
+#define GL_MNG_FWSM_FW_MODES_DEBUG_M BIT(0)
+#define GL_MNG_FWSM_FW_MODES_RECOVERY_M BIT(1)
+#define GL_MNG_FWSM_FW_MODES_ROLLBACK_M BIT(2)
+
+/* Global NVM General Status Register */
+#define GLNVM_GENS 0x000B6100 /* Reset Source: POR */
+#define GLNVM_GENS_NVM_PRES_S 0
+#define GLNVM_GENS_NVM_PRES_M BIT(0)
+#define GLNVM_GENS_SR_SIZE_S 5
+#define GLNVM_GENS_SR_SIZE_M MAKEMASK(0x7, 5)
+#define GLNVM_GENS_BANK1VAL_S 8
+#define GLNVM_GENS_BANK1VAL_M BIT(8)
+#define GLNVM_GENS_ALT_PRST_S 23
+#define GLNVM_GENS_ALT_PRST_M BIT(23)
+#define GLNVM_GENS_FL_AUTO_RD_S 25
+#define GLNVM_GENS_FL_AUTO_RD_M BIT(25)
+
+/* Flash Access Register */
+#define GLNVM_FLA 0x000B6108 /* Reset Source: POR */
+#define GLNVM_FLA_LOCKED_S 6
+#define GLNVM_FLA_LOCKED_M BIT(6)
+
+/* Bit Bang registers */
+#define RDASB_MSGCTL 0x000B6820
+#define RDASB_MSGCTL_HDR_DWS_S 0
+#define RDASB_MSGCTL_EXP_RDW_S 8
+#define RDASB_MSGCTL_CMDV_M BIT(31)
+#define RDASB_RSPCTL 0x000B6824
+#define RDASB_RSPCTL_BAD_LENGTH_M BIT(30)
+#define RDASB_RSPCTL_NOT_SUCCESS_M BIT(31)
+#define RDASB_WHDR0 0x000B68F4
+#define RDASB_WHDR1 0x000B68F8
+#define RDASB_WHDR2 0x000B68FC
+#define RDASB_WHDR3 0x000B6900
+#define RDASB_WHDR4 0x000B6904
+#define RDASB_RHDR0 0x000B6AFC
+#define RDASB_RHDR0_RESPONSE_S 27
+#define RDASB_RHDR0_RESPONSE_M MAKEMASK(0x7, 27)
+#define RDASB_RDATA0 0x000B6B00
+#define RDASB_RDATA1 0x000B6B04
+
+/* SPI Registers */
+#define SPISB_MSGCTL 0x000B7020
+#define SPISB_MSGCTL_HDR_DWS_S 0
+#define SPISB_MSGCTL_EXP_RDW_S 8
+#define SPISB_MSGCTL_MSG_MODE_S 26
+#define SPISB_MSGCTL_TOKEN_MODE_S 28
+#define SPISB_MSGCTL_BARCLR_S 30
+#define SPISB_MSGCTL_CMDV_S 31
+#define SPISB_MSGCTL_CMDV_M BIT(31)
+#define SPISB_RSPCTL 0x000B7024
+#define SPISB_RSPCTL_BAD_LENGTH_M BIT(30)
+#define SPISB_RSPCTL_NOT_SUCCESS_M BIT(31)
+#define SPISB_WHDR0 0x000B70F4
+#define SPISB_WHDR0_DEST_SEL_S 12
+#define SPISB_WHDR0_OPCODE_SEL_S 16
+#define SPISB_WHDR0_TAG_S 24
+#define SPISB_WHDR1 0x000B70F8
+#define SPISB_WHDR2 0x000B70FC
+#define SPISB_RDATA 0x000B7300
+#define SPISB_WDATA 0x000B7100
+
+/* Firmware Reset Count register */
+#define GL_FWRESETCNT 0x00083100 /* Reset Source: POR */
+#define GL_FWRESETCNT_FWRESETCNT_S 0
+#define GL_FWRESETCNT_FWRESETCNT_M MAKEMASK(0xFFFFFFFF, 0)
+
+/* Admin Command Interface (ACI) registers */
+#define PF_HIDA(_i) (0x00085000 + ((_i) * 4))
+#define PF_HIDA_2(_i) (0x00085020 + ((_i) * 4))
+#define PF_HIBA(_i) (0x00084000 + ((_i) * 4))
+#define PF_HICR 0x00082048
+
+#define PF_HIDA_MAX_INDEX 15
+#define PF_HIBA_MAX_INDEX 1023
+
+#define PF_HICR_EN BIT(0)
+#define PF_HICR_C BIT(1)
+#define PF_HICR_SV BIT(2)
+#define PF_HICR_EV BIT(3)
+
+#define GL_HIDA(_i) (0x00082000 + ((_i) * 4))
+#define GL_HIDA_2(_i) (0x00082020 + ((_i) * 4))
+#define GL_HIBA(_i) (0x00081000 + ((_i) * 4))
+#define GL_HICR 0x00082040
+
+#define GL_HIDA_MAX_INDEX 15
+#define GL_HIBA_MAX_INDEX 1023
+
+#define GL_HICR_C BIT(1)
+#define GL_HICR_SV BIT(2)
+#define GL_HICR_EV BIT(3)
+
+#define GL_HICR_EN 0x00082044
+
+#define GL_HICR_EN_CHECK BIT(0)
+
+/* Admin Command Interface (ACI) defines */
+/* Defines that help manage the driver vs FW API checks.
+ */
+#define IXGBE_FW_API_VER_BRANCH 0x00
+#define IXGBE_FW_API_VER_MAJOR 0x01
+#define IXGBE_FW_API_VER_MINOR 0x07
+#define IXGBE_FW_API_VER_DIFF_ALLOWED 0x02
+
+#define IXGBE_ACI_DESC_SIZE 32
+#define IXGBE_ACI_DESC_SIZE_IN_DWORDS IXGBE_ACI_DESC_SIZE / BYTES_PER_DWORD
+
+#define IXGBE_ACI_MAX_BUFFER_SIZE 4096 /* Size in bytes */
+#define IXGBE_ACI_DESC_COOKIE_L_DWORD_OFFSET 3
+#define IXGBE_ACI_SEND_DELAY_TIME_MS 10
+#define IXGBE_ACI_SEND_MAX_EXECUTE 3
+/* [ms] timeout of waiting for sync response */
+#define IXGBE_ACI_SYNC_RESPONSE_TIMEOUT 100000
+/* [ms] timeout of waiting for async response */
+#define IXGBE_ACI_ASYNC_RESPONSE_TIMEOUT 150000
+/* [ms] timeout of waiting for resource release */
+#define IXGBE_ACI_RELEASE_RES_TIMEOUT 10000
+
+/* Timestamp spacing for Tools ACI: queue is active if spacing is within the range [LO..HI] */
+#define IXGBE_TOOLS_ACI_ACTIVE_STAMP_SPACING_LO 0
+#define IXGBE_TOOLS_ACI_ACTIVE_STAMP_SPACING_HI 200
+
+/* Timestamp spacing for Tools ACI: queue is expired if spacing is outside the range [LO..HI] */
+#define IXGBE_TOOLS_ACI_EXPIRED_STAMP_SPACING_LO -5
+#define IXGBE_TOOLS_ACI_EXPIRED_STAMP_SPACING_HI 205
+
+/* FW defined boundary for a large buffer, 4k >= Large buffer > 512 bytes */
+#define IXGBE_ACI_LG_BUF 512
+
+/* Flags sub-structure
+ * |0 |1 |2 |3 |4 |5 |6 |7 |8 |9 |10 |11 |12 |13 |14 |15 |
+ * |DD |CMP|ERR|VFE| * * RESERVED * * |LB |RD |VFC|BUF|SI |EI |FE |
+ */
+
+/* command flags and offsets */
+#define IXGBE_ACI_FLAG_DD_S 0
+#define IXGBE_ACI_FLAG_CMP_S 1
+#define IXGBE_ACI_FLAG_ERR_S 2
+#define IXGBE_ACI_FLAG_VFE_S 3
+#define IXGBE_ACI_FLAG_LB_S 9
+#define IXGBE_ACI_FLAG_RD_S 10
+#define IXGBE_ACI_FLAG_VFC_S 11
+#define IXGBE_ACI_FLAG_BUF_S 12
+#define IXGBE_ACI_FLAG_SI_S 13
+#define IXGBE_ACI_FLAG_EI_S 14
+#define IXGBE_ACI_FLAG_FE_S 15
+
+#define IXGBE_ACI_FLAG_DD BIT(IXGBE_ACI_FLAG_DD_S) /* 0x1 */
+#define IXGBE_ACI_FLAG_CMP BIT(IXGBE_ACI_FLAG_CMP_S) /* 0x2 */
+#define IXGBE_ACI_FLAG_ERR BIT(IXGBE_ACI_FLAG_ERR_S) /* 0x4 */
+#define IXGBE_ACI_FLAG_VFE BIT(IXGBE_ACI_FLAG_VFE_S) /* 0x8 */
+#define IXGBE_ACI_FLAG_LB BIT(IXGBE_ACI_FLAG_LB_S) /* 0x200 */
+#define IXGBE_ACI_FLAG_RD BIT(IXGBE_ACI_FLAG_RD_S) /* 0x400 */
+#define IXGBE_ACI_FLAG_VFC BIT(IXGBE_ACI_FLAG_VFC_S) /* 0x800 */
+#define IXGBE_ACI_FLAG_BUF BIT(IXGBE_ACI_FLAG_BUF_S) /* 0x1000 */
+#define IXGBE_ACI_FLAG_SI BIT(IXGBE_ACI_FLAG_SI_S) /* 0x2000 */
+#define IXGBE_ACI_FLAG_EI BIT(IXGBE_ACI_FLAG_EI_S) /* 0x4000 */
+#define IXGBE_ACI_FLAG_FE BIT(IXGBE_ACI_FLAG_FE_S) /* 0x8000 */
+
+/* Admin Command Interface (ACI) error codes */
+enum ixgbe_aci_err {
+ IXGBE_ACI_RC_OK = 0, /* Success */
+ IXGBE_ACI_RC_EPERM = 1, /* Operation not permitted */
+ IXGBE_ACI_RC_ENOENT = 2, /* No such element */
+ IXGBE_ACI_RC_ESRCH = 3, /* Bad opcode */
+ IXGBE_ACI_RC_EINTR = 4, /* Operation interrupted */
+ IXGBE_ACI_RC_EIO = 5, /* I/O error */
+ IXGBE_ACI_RC_ENXIO = 6, /* No such resource */
+ IXGBE_ACI_RC_E2BIG = 7, /* Arg too long */
+ IXGBE_ACI_RC_EAGAIN = 8, /* Try again */
+ IXGBE_ACI_RC_ENOMEM = 9, /* Out of memory */
+ IXGBE_ACI_RC_EACCES = 10, /* Permission denied */
+ IXGBE_ACI_RC_EFAULT = 11, /* Bad address */
+ IXGBE_ACI_RC_EBUSY = 12, /* Device or resource busy */
+ IXGBE_ACI_RC_EEXIST = 13, /* Object already exists */
+ IXGBE_ACI_RC_EINVAL = 14, /* Invalid argument */
+ IXGBE_ACI_RC_ENOTTY = 15, /* Not a typewriter */
+ IXGBE_ACI_RC_ENOSPC = 16, /* No space left or allocation failure */
+ IXGBE_ACI_RC_ENOSYS = 17, /* Function not implemented */
+ IXGBE_ACI_RC_ERANGE = 18, /* Parameter out of range */
+ IXGBE_ACI_RC_EFLUSHED = 19, /* Cmd flushed due to prev cmd error */
+ IXGBE_ACI_RC_BAD_ADDR = 20, /* Descriptor contains a bad pointer */
+ IXGBE_ACI_RC_EMODE = 21, /* Op not allowed in current dev mode */
+ IXGBE_ACI_RC_EFBIG = 22, /* File too big */
+ IXGBE_ACI_RC_ESBCOMP = 23, /* SB-IOSF completion unsuccessful */
+ IXGBE_ACI_RC_ENOSEC = 24, /* Missing security manifest */
+ IXGBE_ACI_RC_EBADSIG = 25, /* Bad RSA signature */
+ IXGBE_ACI_RC_ESVN = 26, /* SVN number prohibits this package */
+ IXGBE_ACI_RC_EBADMAN = 27, /* Manifest hash mismatch */
+ IXGBE_ACI_RC_EBADBUF = 28, /* Buffer hash mismatches manifest */
+ IXGBE_ACI_RC_EACCES_BMCU = 29, /* BMC Update in progress */
+};
+
+/* Admin Command Interface (ACI) opcodes */
+enum ixgbe_aci_opc {
+ ixgbe_aci_opc_get_ver = 0x0001,
+ ixgbe_aci_opc_driver_ver = 0x0002,
+ ixgbe_aci_opc_get_exp_err = 0x0005,
+
+ /* resource ownership */
+ ixgbe_aci_opc_req_res = 0x0008,
+ ixgbe_aci_opc_release_res = 0x0009,
+
+ /* device/function capabilities */
+ ixgbe_aci_opc_list_func_caps = 0x000A,
+ ixgbe_aci_opc_list_dev_caps = 0x000B,
+
+ /* safe disable of RXEN */
+ ixgbe_aci_opc_disable_rxen = 0x000C,
+
+ /* FW events */
+ ixgbe_aci_opc_get_fw_event = 0x0014,
+
+ /* PHY commands */
+ ixgbe_aci_opc_get_phy_caps = 0x0600,
+ ixgbe_aci_opc_set_phy_cfg = 0x0601,
+ ixgbe_aci_opc_restart_an = 0x0605,
+ ixgbe_aci_opc_get_link_status = 0x0607,
+ ixgbe_aci_opc_set_event_mask = 0x0613,
+ ixgbe_aci_opc_get_link_topo = 0x06E0,
+ ixgbe_aci_opc_read_i2c = 0x06E2,
+ ixgbe_aci_opc_write_i2c = 0x06E3,
+ ixgbe_aci_opc_read_mdio = 0x06E4,
+ ixgbe_aci_opc_write_mdio = 0x06E5,
+ ixgbe_aci_opc_set_gpio_by_func = 0x06E6,
+ ixgbe_aci_opc_get_gpio_by_func = 0x06E7,
+ ixgbe_aci_opc_set_port_id_led = 0x06E9,
+ ixgbe_aci_opc_set_gpio = 0x06EC,
+ ixgbe_aci_opc_get_gpio = 0x06ED,
+ ixgbe_aci_opc_sff_eeprom = 0x06EE,
+ ixgbe_aci_opc_prog_topo_dev_nvm = 0x06F2,
+ ixgbe_aci_opc_read_topo_dev_nvm = 0x06F3,
+
+ /* NVM commands */
+ ixgbe_aci_opc_nvm_read = 0x0701,
+ ixgbe_aci_opc_nvm_erase = 0x0702,
+ ixgbe_aci_opc_nvm_write = 0x0703,
+ ixgbe_aci_opc_nvm_cfg_read = 0x0704,
+ ixgbe_aci_opc_nvm_cfg_write = 0x0705,
+ ixgbe_aci_opc_nvm_checksum = 0x0706,
+ ixgbe_aci_opc_nvm_write_activate = 0x0707,
+ ixgbe_aci_opc_nvm_sr_dump = 0x0707,
+ ixgbe_aci_opc_nvm_save_factory_settings = 0x0708,
+ ixgbe_aci_opc_nvm_update_empr = 0x0709,
+ ixgbe_aci_opc_nvm_pkg_data = 0x070A,
+ ixgbe_aci_opc_nvm_pass_component_tbl = 0x070B,
+ ixgbe_aci_opc_nvm_sanitization = 0x070C,
+
+ /* Alternate Structure Commands */
+ ixgbe_aci_opc_write_alt_direct = 0x0900,
+ ixgbe_aci_opc_write_alt_indirect = 0x0901,
+ ixgbe_aci_opc_read_alt_direct = 0x0902,
+ ixgbe_aci_opc_read_alt_indirect = 0x0903,
+ ixgbe_aci_opc_done_alt_write = 0x0904,
+ ixgbe_aci_opc_clear_port_alt_write = 0x0906,
+
+ ixgbe_aci_opc_temp_tca_event = 0x0C94,
+
+ /* debug commands */
+ ixgbe_aci_opc_debug_dump_internals = 0xFF08,
+
+ /* SystemDiagnostic commands */
+ ixgbe_aci_opc_set_health_status_config = 0xFF20,
+ ixgbe_aci_opc_get_supported_health_status_codes = 0xFF21,
+ ixgbe_aci_opc_get_health_status = 0xFF22,
+ ixgbe_aci_opc_clear_health_status = 0xFF23,
+
+ /* FW Logging Commands */
+ ixgbe_aci_opc_fw_logs_config = 0xFF30,
+ ixgbe_aci_opc_fw_logs_register = 0xFF31,
+ ixgbe_aci_opc_fw_logs_query = 0xFF32,
+ ixgbe_aci_opc_fw_logs_event = 0xFF33,
+ ixgbe_aci_opc_fw_logs_get = 0xFF34,
+ ixgbe_aci_opc_fw_logs_clear = 0xFF35
+};
+
+/* This macro is used to generate a compilation error if a structure
+ * is not exactly the correct length. It gives a divide by zero error if the
+ * structure is not of the correct size, otherwise it creates an enum that is
+ * never used.
+ */
+#define IXGBE_CHECK_STRUCT_LEN(n, X) enum ixgbe_static_assert_enum_##X \
+ { ixgbe_static_assert_##X = (n) / ((sizeof(struct X) == (n)) ? 1 : 0) }
+
+/* This macro is used to generate a compilation error if a variable-length
+ * structure is not exactly the correct length assuming a single element of
+ * the variable-length object as the last element of the structure. It gives
+ * a divide by zero error if the structure is not of the correct size,
+ * otherwise it creates an enum that is never used.
+ */
+#define IXGBE_CHECK_VAR_LEN_STRUCT_LEN(n, X, T) enum ixgbe_static_assert_enum_##X \
+ { ixgbe_static_assert_##X = (n) / \
+ (((sizeof(struct X) + sizeof(T)) == (n)) ? 1 : 0) }
+
+/* This macro is used to ensure that parameter structures (i.e. structures
+ * in the params union member of struct ixgbe_aci_desc) are 16 bytes in length.
+ *
+ * NOT intended to be used to check the size of an indirect command/response
+ * additional data buffer (e.g. struct foo) which should just happen to be 16
+ * bytes (instead, use IXGBE_CHECK_STRUCT_LEN(16, foo) for that).
+ */
+#define IXGBE_CHECK_PARAM_LEN(X) IXGBE_CHECK_STRUCT_LEN(16, X)
+
+struct ixgbe_aci_cmd_generic {
+ __le32 param0;
+ __le32 param1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_generic);
+
+/* Get version (direct 0x0001) */
+struct ixgbe_aci_cmd_get_ver {
+ __le32 rom_ver;
+ __le32 fw_build;
+ u8 fw_branch;
+ u8 fw_major;
+ u8 fw_minor;
+ u8 fw_patch;
+ u8 api_branch;
+ u8 api_major;
+ u8 api_minor;
+ u8 api_patch;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_ver);
+
+#define IXGBE_DRV_VER_STR_LEN_E610 32
+
+struct ixgbe_driver_ver {
+ u8 major_ver;
+ u8 minor_ver;
+ u8 build_ver;
+ u8 subbuild_ver;
+ u8 driver_string[IXGBE_DRV_VER_STR_LEN_E610];
+};
+
+/* Send driver version (indirect 0x0002) */
+struct ixgbe_aci_cmd_driver_ver {
+ u8 major_ver;
+ u8 minor_ver;
+ u8 build_ver;
+ u8 subbuild_ver;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_driver_ver);
+
+/* Get Expanded Error Code (0x0005, direct) */
+struct ixgbe_aci_cmd_get_exp_err {
+ __le32 reason;
+#define IXGBE_ACI_EXPANDED_ERROR_NOT_PROVIDED 0xFFFFFFFF
+ __le32 identifier;
+ u8 rsvd[8];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_exp_err);
+
+/* FW update timeout definitions are in milliseconds */
+#define IXGBE_NVM_TIMEOUT 180000
+#define IXGBE_CHANGE_LOCK_TIMEOUT 1000
+#define IXGBE_GLOBAL_CFG_LOCK_TIMEOUT 3000
+
+enum ixgbe_aci_res_access_type {
+ IXGBE_RES_READ = 1,
+ IXGBE_RES_WRITE
+};
+
+enum ixgbe_aci_res_ids {
+ IXGBE_NVM_RES_ID = 1,
+ IXGBE_SPD_RES_ID,
+ IXGBE_CHANGE_LOCK_RES_ID,
+ IXGBE_GLOBAL_CFG_LOCK_RES_ID
+};
+
+/* Request resource ownership (direct 0x0008)
+ * Release resource ownership (direct 0x0009)
+ */
+struct ixgbe_aci_cmd_req_res {
+ __le16 res_id;
+#define IXGBE_ACI_RES_ID_NVM 1
+#define IXGBE_ACI_RES_ID_SDP 2
+#define IXGBE_ACI_RES_ID_CHNG_LOCK 3
+#define IXGBE_ACI_RES_ID_GLBL_LOCK 4
+ __le16 access_type;
+#define IXGBE_ACI_RES_ACCESS_READ 1
+#define IXGBE_ACI_RES_ACCESS_WRITE 2
+
+ /* Upon successful completion, FW writes this value and driver is
+ * expected to release resource before timeout. This value is provided
+ * in milliseconds.
+ */
+ __le32 timeout;
+#define IXGBE_ACI_RES_NVM_READ_DFLT_TIMEOUT_MS 3000
+#define IXGBE_ACI_RES_NVM_WRITE_DFLT_TIMEOUT_MS 180000
+#define IXGBE_ACI_RES_CHNG_LOCK_DFLT_TIMEOUT_MS 1000
+#define IXGBE_ACI_RES_GLBL_LOCK_DFLT_TIMEOUT_MS 3000
+ /* For SDP: pin ID of the SDP */
+ __le32 res_number;
+ /* Status is only used for IXGBE_ACI_RES_ID_GLBL_LOCK */
+ __le16 status;
+#define IXGBE_ACI_RES_GLBL_SUCCESS 0
+#define IXGBE_ACI_RES_GLBL_IN_PROG 1
+#define IXGBE_ACI_RES_GLBL_DONE 2
+ u8 reserved[2];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_req_res);
+
+/* Get function capabilities (indirect 0x000A)
+ * Get device capabilities (indirect 0x000B)
+ */
+struct ixgbe_aci_cmd_list_caps {
+ u8 cmd_flags;
+ u8 pf_index;
+ u8 reserved[2];
+ __le32 count;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_list_caps);
+
+/* Device/Function buffer entry, repeated per reported capability */
+struct ixgbe_aci_cmd_list_caps_elem {
+ __le16 cap;
+#define IXGBE_ACI_CAPS_VALID_FUNCTIONS 0x0005
+#define IXGBE_ACI_MAX_VALID_FUNCTIONS 0x8
+#define IXGBE_ACI_CAPS_SRIOV 0x0012
+#define IXGBE_ACI_CAPS_VF 0x0013
+#define IXGBE_ACI_CAPS_VMDQ 0x0014
+#define IXGBE_ACI_CAPS_VSI 0x0017
+#define IXGBE_ACI_CAPS_DCB 0x0018
+#define IXGBE_ACI_CAPS_RSS 0x0040
+#define IXGBE_ACI_CAPS_RXQS 0x0041
+#define IXGBE_ACI_CAPS_TXQS 0x0042
+#define IXGBE_ACI_CAPS_MSIX 0x0043
+#define IXGBE_ACI_CAPS_FD 0x0045
+#define IXGBE_ACI_CAPS_MAX_MTU 0x0047
+#define IXGBE_ACI_CAPS_NVM_VER 0x0048
+#define IXGBE_ACI_CAPS_INLINE_IPSEC 0x0070
+#define IXGBE_ACI_CAPS_NUM_ENABLED_PORTS 0x0072
+#define IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE 0x0076
+#define IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
+#define IXGBE_ACI_CAPS_NVM_MGMT 0x0080
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0 0x0081
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1 0x0082
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2 0x0083
+#define IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3 0x0084
+#define IXGBE_ACI_CAPS_OROM_RECOVERY_UPDATE 0x0090
+#define IXGBE_ACI_CAPS_NEXT_CLUSTER_ID 0x0096
+ u8 major_ver;
+ u8 minor_ver;
+ /* Number of resources described by this capability */
+ __le32 number;
+ /* Only meaningful for some types of resources */
+ __le32 logical_id;
+ /* Only meaningful for some types of resources */
+ __le32 phys_id;
+ __le64 rsvd1;
+ __le64 rsvd2;
+};
+
+IXGBE_CHECK_STRUCT_LEN(32, ixgbe_aci_cmd_list_caps_elem);
+
+/* Disable RXEN (direct 0x000C) */
+struct ixgbe_aci_cmd_disable_rxen {
+ u8 lport_num;
+ u8 reserved[15];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_disable_rxen);
+
+/* Get FW Event (indirect 0x0014) */
+struct ixgbe_aci_cmd_get_fw_event {
+ __le16 fw_buf_status;
+#define IXGBE_ACI_GET_FW_EVENT_STATUS_OBTAINED BIT(0)
+#define IXGBE_ACI_GET_FW_EVENT_STATUS_PENDING BIT(1)
+ u8 rsvd[14];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_fw_event);
+
+/* Get PHY capabilities (indirect 0x0600) */
+struct ixgbe_aci_cmd_get_phy_caps {
+ u8 lport_num;
+ u8 reserved;
+ __le16 param0;
+ /* 18.0 - Report qualified modules */
+#define IXGBE_ACI_GET_PHY_RQM BIT(0)
+ /* 18.1 - 18.3 : Report mode
+ * 000b - Report topology capabilities, without media
+ * 001b - Report topology capabilities, with media
+ * 010b - Report Active configuration
+ * 011b - Report PHY Type and FEC mode capabilities
+ * 100b - Report Default capabilities
+ */
+#define IXGBE_ACI_REPORT_MODE_S 1
+#define IXGBE_ACI_REPORT_MODE_M (7 << IXGBE_ACI_REPORT_MODE_S)
+#define IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA 0
+#define IXGBE_ACI_REPORT_TOPO_CAP_MEDIA BIT(1)
+#define IXGBE_ACI_REPORT_ACTIVE_CFG BIT(2)
+#define IXGBE_ACI_REPORT_DFLT_CFG BIT(3)
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_phy_caps);
+
+/* This is #define of PHY type (Extended):
+ * The first set of defines is for phy_type_low.
+ */
+#define IXGBE_PHY_TYPE_LOW_100BASE_TX BIT_ULL(0)
+#define IXGBE_PHY_TYPE_LOW_100M_SGMII BIT_ULL(1)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_T BIT_ULL(2)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_SX BIT_ULL(3)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_LX BIT_ULL(4)
+#define IXGBE_PHY_TYPE_LOW_1000BASE_KX BIT_ULL(5)
+#define IXGBE_PHY_TYPE_LOW_1G_SGMII BIT_ULL(6)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_T BIT_ULL(7)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_X BIT_ULL(8)
+#define IXGBE_PHY_TYPE_LOW_2500BASE_KX BIT_ULL(9)
+#define IXGBE_PHY_TYPE_LOW_5GBASE_T BIT_ULL(10)
+#define IXGBE_PHY_TYPE_LOW_5GBASE_KR BIT_ULL(11)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_T BIT_ULL(12)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_DA BIT_ULL(13)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_SR BIT_ULL(14)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_LR BIT_ULL(15)
+#define IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1 BIT_ULL(16)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC BIT_ULL(17)
+#define IXGBE_PHY_TYPE_LOW_10G_SFI_C2C BIT_ULL(18)
+#define IXGBE_PHY_TYPE_LOW_MAX_INDEX 18
+/* The second set of defines is for phy_type_high. */
+#define IXGBE_PHY_TYPE_HIGH_10BASE_T BIT_ULL(1)
+#define IXGBE_PHY_TYPE_HIGH_10M_SGMII BIT_ULL(2)
+#define IXGBE_PHY_TYPE_HIGH_2500M_SGMII BIT_ULL(56)
+#define IXGBE_PHY_TYPE_HIGH_100M_USXGMII BIT_ULL(57)
+#define IXGBE_PHY_TYPE_HIGH_1G_USXGMII BIT_ULL(58)
+#define IXGBE_PHY_TYPE_HIGH_2500M_USXGMII BIT_ULL(59)
+#define IXGBE_PHY_TYPE_HIGH_5G_USXGMII BIT_ULL(60)
+#define IXGBE_PHY_TYPE_HIGH_10G_USXGMII BIT_ULL(61)
+#define IXGBE_PHY_TYPE_HIGH_MAX_INDEX 61
+
+struct ixgbe_aci_cmd_get_phy_caps_data {
+ __le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+ u8 caps;
+#define IXGBE_ACI_PHY_EN_TX_LINK_PAUSE BIT(0)
+#define IXGBE_ACI_PHY_EN_RX_LINK_PAUSE BIT(1)
+#define IXGBE_ACI_PHY_LOW_POWER_MODE BIT(2)
+#define IXGBE_ACI_PHY_EN_LINK BIT(3)
+#define IXGBE_ACI_PHY_AN_MODE BIT(4)
+#define IXGBE_ACI_PHY_EN_MOD_QUAL BIT(5)
+#define IXGBE_ACI_PHY_EN_LESM BIT(6)
+#define IXGBE_ACI_PHY_EN_AUTO_FEC BIT(7)
+#define IXGBE_ACI_PHY_CAPS_MASK MAKEMASK(0xff, 0)
+ u8 low_power_ctrl_an;
+#define IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG BIT(0)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE28 BIT(1)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE73 BIT(2)
+#define IXGBE_ACI_PHY_AN_EN_CLAUSE37 BIT(3)
+ __le16 eee_cap;
+#define IXGBE_ACI_PHY_EEE_EN_100BASE_TX BIT(0)
+#define IXGBE_ACI_PHY_EEE_EN_1000BASE_T BIT(1)
+#define IXGBE_ACI_PHY_EEE_EN_10GBASE_T BIT(2)
+#define IXGBE_ACI_PHY_EEE_EN_1000BASE_KX BIT(3)
+#define IXGBE_ACI_PHY_EEE_EN_10GBASE_KR BIT(4)
+#define IXGBE_ACI_PHY_EEE_EN_25GBASE_KR BIT(5)
+#define IXGBE_ACI_PHY_EEE_EN_10BASE_T BIT(11)
+ __le16 eeer_value;
+ u8 phy_id_oui[4]; /* PHY/Module ID connected on the port */
+ u8 phy_fw_ver[8];
+ u8 link_fec_options;
+#define IXGBE_ACI_PHY_FEC_10G_KR_40G_KR4_EN BIT(0)
+#define IXGBE_ACI_PHY_FEC_10G_KR_40G_KR4_REQ BIT(1)
+#define IXGBE_ACI_PHY_FEC_25G_RS_528_REQ BIT(2)
+#define IXGBE_ACI_PHY_FEC_25G_KR_REQ BIT(3)
+#define IXGBE_ACI_PHY_FEC_25G_RS_544_REQ BIT(4)
+#define IXGBE_ACI_PHY_FEC_25G_RS_CLAUSE91_EN BIT(6)
+#define IXGBE_ACI_PHY_FEC_25G_KR_CLAUSE74_EN BIT(7)
+#define IXGBE_ACI_PHY_FEC_MASK MAKEMASK(0xdf, 0)
+ u8 module_compliance_enforcement;
+#define IXGBE_ACI_MOD_ENFORCE_STRICT_MODE BIT(0)
+ u8 extended_compliance_code;
+#define IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE 3
+ u8 module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE];
+#define IXGBE_ACI_MOD_TYPE_BYTE0_SFP_PLUS 0xA0
+#define IXGBE_ACI_MOD_TYPE_BYTE0_QSFP_PLUS 0x80
+#define IXGBE_ACI_MOD_TYPE_IDENT 1
+#define IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE BIT(0)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE BIT(1)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR BIT(4)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR BIT(5)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM BIT(6)
+#define IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_ER BIT(7)
+#define IXGBE_ACI_MOD_TYPE_BYTE2_SFP_PLUS 0xA0
+#define IXGBE_ACI_MOD_TYPE_BYTE2_QSFP_PLUS 0x86
+ u8 qualified_module_count;
+ u8 rsvd2[7]; /* Bytes 47:41 reserved */
+#define IXGBE_ACI_QUAL_MOD_COUNT_MAX 16
+ struct {
+ u8 v_oui[3];
+ u8 rsvd3;
+ u8 v_part[16];
+ __le32 v_rev;
+ __le64 rsvd4;
+ } qual_modules[IXGBE_ACI_QUAL_MOD_COUNT_MAX];
+};
+
+IXGBE_CHECK_STRUCT_LEN(560, ixgbe_aci_cmd_get_phy_caps_data);
+
+/* Set PHY capabilities (direct 0x0601)
+ * NOTE: This command must be followed by setup link and restart auto-neg
+ */
+struct ixgbe_aci_cmd_set_phy_cfg {
+ u8 reserved[8];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_phy_cfg);
+
+/* Set PHY config command data structure */
+struct ixgbe_aci_cmd_set_phy_cfg_data {
+ __le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+ u8 caps;
+#define IXGBE_ACI_PHY_ENA_VALID_MASK MAKEMASK(0xef, 0)
+#define IXGBE_ACI_PHY_ENA_TX_PAUSE_ABILITY BIT(0)
+#define IXGBE_ACI_PHY_ENA_RX_PAUSE_ABILITY BIT(1)
+#define IXGBE_ACI_PHY_ENA_LOW_POWER BIT(2)
+#define IXGBE_ACI_PHY_ENA_LINK BIT(3)
+#define IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT BIT(5)
+#define IXGBE_ACI_PHY_ENA_LESM BIT(6)
+#define IXGBE_ACI_PHY_ENA_AUTO_FEC BIT(7)
+ u8 low_power_ctrl_an;
+ __le16 eee_cap; /* Value from ixgbe_aci_get_phy_caps */
+ __le16 eeer_value; /* Use defines from ixgbe_aci_get_phy_caps */
+ u8 link_fec_opt; /* Use defines from ixgbe_aci_get_phy_caps */
+ u8 module_compliance_enforcement;
+};
+
+IXGBE_CHECK_STRUCT_LEN(24, ixgbe_aci_cmd_set_phy_cfg_data);
+
+/* Restart AN command data structure (direct 0x0605)
+ * Also used for response, with only the lport_num field present.
+ */
+struct ixgbe_aci_cmd_restart_an {
+ u8 reserved[2];
+ u8 cmd_flags;
+#define IXGBE_ACI_RESTART_AN_LINK_RESTART BIT(1)
+#define IXGBE_ACI_RESTART_AN_LINK_ENABLE BIT(2)
+ u8 reserved2[13];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_restart_an);
+
+#pragma pack(1)
+/* Get link status (indirect 0x0607), also used for Link Status Event */
+struct ixgbe_aci_cmd_get_link_status {
+ u8 reserved[2];
+ u8 cmd_flags;
+#define IXGBE_ACI_LSE_M 0x3
+#define IXGBE_ACI_LSE_NOP 0x0
+#define IXGBE_ACI_LSE_DIS 0x2
+#define IXGBE_ACI_LSE_ENA 0x3
+ /* only response uses this flag */
+#define IXGBE_ACI_LSE_IS_ENABLED 0x1
+ u8 reserved2[5];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_link_status);
+
+/* Get link status response data structure, also used for Link Status Event */
+struct ixgbe_aci_cmd_get_link_status_data {
+ u8 topo_media_conflict;
+#define IXGBE_ACI_LINK_TOPO_CONFLICT BIT(0)
+#define IXGBE_ACI_LINK_MEDIA_CONFLICT BIT(1)
+#define IXGBE_ACI_LINK_TOPO_CORRUPT BIT(2)
+#define IXGBE_ACI_LINK_TOPO_UNREACH_PRT BIT(4)
+#define IXGBE_ACI_LINK_TOPO_UNDRUTIL_PRT BIT(5)
+#define IXGBE_ACI_LINK_TOPO_UNDRUTIL_MEDIA BIT(6)
+#define IXGBE_ACI_LINK_TOPO_UNSUPP_MEDIA BIT(7)
+ u8 link_cfg_err;
+#define IXGBE_ACI_LINK_CFG_ERR BIT(0)
+#define IXGBE_ACI_LINK_CFG_COMPLETED BIT(1)
+#define IXGBE_ACI_LINK_ACT_PORT_OPT_INVAL BIT(2)
+#define IXGBE_ACI_LINK_FEAT_ID_OR_CONFIG_ID_INVAL BIT(3)
+#define IXGBE_ACI_LINK_TOPO_CRITICAL_SDP_ERR BIT(4)
+#define IXGBE_ACI_LINK_MODULE_POWER_UNSUPPORTED BIT(5)
+#define IXGBE_ACI_LINK_EXTERNAL_PHY_LOAD_FAILURE BIT(6)
+#define IXGBE_ACI_LINK_INVAL_MAX_POWER_LIMIT BIT(7)
+ u8 link_info;
+#define IXGBE_ACI_LINK_UP BIT(0) /* Link Status */
+#define IXGBE_ACI_LINK_FAULT BIT(1)
+#define IXGBE_ACI_LINK_FAULT_TX BIT(2)
+#define IXGBE_ACI_LINK_FAULT_RX BIT(3)
+#define IXGBE_ACI_LINK_FAULT_REMOTE BIT(4)
+#define IXGBE_ACI_LINK_UP_PORT BIT(5) /* External Port Link Status */
+#define IXGBE_ACI_MEDIA_AVAILABLE BIT(6)
+#define IXGBE_ACI_SIGNAL_DETECT BIT(7)
+ u8 an_info;
+#define IXGBE_ACI_AN_COMPLETED BIT(0)
+#define IXGBE_ACI_LP_AN_ABILITY BIT(1)
+#define IXGBE_ACI_PD_FAULT BIT(2) /* Parallel Detection Fault */
+#define IXGBE_ACI_FEC_EN BIT(3)
+#define IXGBE_ACI_PHY_LOW_POWER BIT(4) /* Low Power State */
+#define IXGBE_ACI_LINK_PAUSE_TX BIT(5)
+#define IXGBE_ACI_LINK_PAUSE_RX BIT(6)
+#define IXGBE_ACI_QUALIFIED_MODULE BIT(7)
+ u8 ext_info;
+#define IXGBE_ACI_LINK_PHY_TEMP_ALARM BIT(0)
+#define IXGBE_ACI_LINK_EXCESSIVE_ERRORS BIT(1) /* Excessive Link Errors */
+ /* Port Tx Suspended */
+#define IXGBE_ACI_LINK_TX_S 2
+#define IXGBE_ACI_LINK_TX_M (0x03 << IXGBE_ACI_LINK_TX_S)
+#define IXGBE_ACI_LINK_TX_ACTIVE 0
+#define IXGBE_ACI_LINK_TX_DRAINED 1
+#define IXGBE_ACI_LINK_TX_FLUSHED 3
+ u8 lb_status;
+#define IXGBE_ACI_LINK_LB_PHY_LCL BIT(0)
+#define IXGBE_ACI_LINK_LB_PHY_RMT BIT(1)
+#define IXGBE_ACI_LINK_LB_MAC_LCL BIT(2)
+#define IXGBE_ACI_LINK_LB_PHY_IDX_S 3
+#define IXGBE_ACI_LINK_LB_PHY_IDX_M (0x7 << IXGBE_ACI_LB_PHY_IDX_S)
+ __le16 max_frame_size;
+ u8 cfg;
+#define IXGBE_ACI_LINK_25G_KR_FEC_EN BIT(0)
+#define IXGBE_ACI_LINK_25G_RS_528_FEC_EN BIT(1)
+#define IXGBE_ACI_LINK_25G_RS_544_FEC_EN BIT(2)
+#define IXGBE_ACI_FEC_MASK MAKEMASK(0x7, 0)
+ /* Pacing Config */
+#define IXGBE_ACI_CFG_PACING_S 3
+#define IXGBE_ACI_CFG_PACING_M (0xF << IXGBE_ACI_CFG_PACING_S)
+#define IXGBE_ACI_CFG_PACING_TYPE_M BIT(7)
+#define IXGBE_ACI_CFG_PACING_TYPE_AVG 0
+#define IXGBE_ACI_CFG_PACING_TYPE_FIXED IXGBE_ACI_CFG_PACING_TYPE_M
+ /* External Device Power Ability */
+ u8 power_desc;
+#define IXGBE_ACI_PWR_CLASS_M 0x3F
+#define IXGBE_ACI_LINK_PWR_BASET_LOW_HIGH 0
+#define IXGBE_ACI_LINK_PWR_BASET_HIGH 1
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_1 0
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_2 1
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_3 2
+#define IXGBE_ACI_LINK_PWR_QSFP_CLASS_4 3
+ __le16 link_speed;
+#define IXGBE_ACI_LINK_SPEED_M 0x7FF
+#define IXGBE_ACI_LINK_SPEED_10MB BIT(0)
+#define IXGBE_ACI_LINK_SPEED_100MB BIT(1)
+#define IXGBE_ACI_LINK_SPEED_1000MB BIT(2)
+#define IXGBE_ACI_LINK_SPEED_2500MB BIT(3)
+#define IXGBE_ACI_LINK_SPEED_5GB BIT(4)
+#define IXGBE_ACI_LINK_SPEED_10GB BIT(5)
+#define IXGBE_ACI_LINK_SPEED_20GB BIT(6)
+#define IXGBE_ACI_LINK_SPEED_25GB BIT(7)
+#define IXGBE_ACI_LINK_SPEED_40GB BIT(8)
+#define IXGBE_ACI_LINK_SPEED_50GB BIT(9)
+#define IXGBE_ACI_LINK_SPEED_100GB BIT(10)
+#define IXGBE_ACI_LINK_SPEED_200GB BIT(11)
+#define IXGBE_ACI_LINK_SPEED_UNKNOWN BIT(15)
+ __le16 reserved3; /* Aligns next field to 8-byte boundary */
+ u8 ext_fec_status;
+#define IXGBE_ACI_LINK_RS_272_FEC_EN BIT(0) /* RS 272 FEC enabled */
+ u8 reserved4;
+ __le64 phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+ __le64 phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+ /* Get link status version 2 link partner data */
+ __le64 lp_phy_type_low; /* Use values from IXGBE_PHY_TYPE_LOW_* */
+ __le64 lp_phy_type_high; /* Use values from IXGBE_PHY_TYPE_HIGH_* */
+ u8 lp_fec_adv;
+#define IXGBE_ACI_LINK_LP_10G_KR_FEC_CAP BIT(0)
+#define IXGBE_ACI_LINK_LP_25G_KR_FEC_CAP BIT(1)
+#define IXGBE_ACI_LINK_LP_RS_528_FEC_CAP BIT(2)
+#define IXGBE_ACI_LINK_LP_50G_KR_272_FEC_CAP BIT(3)
+#define IXGBE_ACI_LINK_LP_100G_KR_272_FEC_CAP BIT(4)
+#define IXGBE_ACI_LINK_LP_200G_KR_272_FEC_CAP BIT(5)
+ u8 lp_fec_req;
+#define IXGBE_ACI_LINK_LP_10G_KR_FEC_REQ BIT(0)
+#define IXGBE_ACI_LINK_LP_25G_KR_FEC_REQ BIT(1)
+#define IXGBE_ACI_LINK_LP_RS_528_FEC_REQ BIT(2)
+#define IXGBE_ACI_LINK_LP_KR_272_FEC_REQ BIT(3)
+ u8 lp_flowcontrol;
+#define IXGBE_ACI_LINK_LP_PAUSE_ADV BIT(0)
+#define IXGBE_ACI_LINK_LP_ASM_DIR_ADV BIT(1)
+ u8 reserved5[5];
+};
+#pragma pack()
+
+IXGBE_CHECK_STRUCT_LEN(56, ixgbe_aci_cmd_get_link_status_data);
+
+/* Set event mask command (direct 0x0613) */
+struct ixgbe_aci_cmd_set_event_mask {
+ u8 reserved[8];
+ __le16 event_mask;
+#define IXGBE_ACI_LINK_EVENT_UPDOWN BIT(1)
+#define IXGBE_ACI_LINK_EVENT_MEDIA_NA BIT(2)
+#define IXGBE_ACI_LINK_EVENT_LINK_FAULT BIT(3)
+#define IXGBE_ACI_LINK_EVENT_PHY_TEMP_ALARM BIT(4)
+#define IXGBE_ACI_LINK_EVENT_EXCESSIVE_ERRORS BIT(5)
+#define IXGBE_ACI_LINK_EVENT_SIGNAL_DETECT BIT(6)
+#define IXGBE_ACI_LINK_EVENT_AN_COMPLETED BIT(7)
+#define IXGBE_ACI_LINK_EVENT_MODULE_QUAL_FAIL BIT(8)
+#define IXGBE_ACI_LINK_EVENT_PORT_TX_SUSPENDED BIT(9)
+#define IXGBE_ACI_LINK_EVENT_TOPO_CONFLICT BIT(10)
+#define IXGBE_ACI_LINK_EVENT_MEDIA_CONFLICT BIT(11)
+#define IXGBE_ACI_LINK_EVENT_PHY_FW_LOAD_FAIL BIT(12)
+ u8 reserved1[6];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_event_mask);
+
+struct ixgbe_aci_cmd_link_topo_params {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define IXGBE_ACI_LINK_TOPO_PORT_NUM_VALID BIT(0)
+ u8 node_type_ctx;
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_S 0
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_M (0xF << IXGBE_ACI_LINK_TOPO_NODE_TYPE_S)
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_PHY 0
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_GPIO_CTRL 1
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_MUX_CTRL 2
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_LED_CTRL 3
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_LED 4
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_THERMAL 5
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_CAGE 6
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_MEZZ 7
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_ID_EEPROM 8
+#define IXGBE_ACI_LINK_TOPO_NODE_TYPE_GPS 11
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_S 4
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_M \
+ (0xF << IXGBE_ACI_LINK_TOPO_NODE_CTX_S)
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_GLOBAL 0
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_BOARD 1
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_PORT 2
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE 3
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE_HANDLE 4
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_DIRECT_BUS_ACCESS 5
+#define IXGBE_ACI_LINK_TOPO_NODE_CTX_NODE_HANDLE_BUS_ADDRESS 6
+ u8 index;
+};
+
+IXGBE_CHECK_STRUCT_LEN(4, ixgbe_aci_cmd_link_topo_params);
+
+struct ixgbe_aci_cmd_link_topo_addr {
+ struct ixgbe_aci_cmd_link_topo_params topo_params;
+ __le16 handle;
+#define IXGBE_ACI_LINK_TOPO_HANDLE_S 0
+#define IXGBE_ACI_LINK_TOPO_HANDLE_M (0x3FF << IXGBE_ACI_LINK_TOPO_HANDLE_S)
+/* Used to decode the handle field */
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_M BIT(9)
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_LOM BIT(9)
+#define IXGBE_ACI_LINK_TOPO_HANDLE_BRD_TYPE_MEZZ 0
+#define IXGBE_ACI_LINK_TOPO_HANDLE_NODE_S 0
+/* In case of a Mezzanine type */
+#define IXGBE_ACI_LINK_TOPO_HANDLE_MEZZ_NODE_M \
+ (0x3F << IXGBE_ACI_LINK_TOPO_HANDLE_NODE_S)
+#define IXGBE_ACI_LINK_TOPO_HANDLE_MEZZ_S 6
+#define IXGBE_ACI_LINK_TOPO_HANDLE_MEZZ_M \
+ (0x7 << IXGBE_ACI_LINK_TOPO_HANDLE_MEZZ_S)
+/* In case of a LOM type */
+#define IXGBE_ACI_LINK_TOPO_HANDLE_LOM_NODE_M \
+ (0x1FF << IXGBE_ACI_LINK_TOPO_HANDLE_NODE_S)
+};
+
+IXGBE_CHECK_STRUCT_LEN(6, ixgbe_aci_cmd_link_topo_addr);
+
+/* Get Link Topology Handle (direct, 0x06E0) */
+struct ixgbe_aci_cmd_get_link_topo {
+ struct ixgbe_aci_cmd_link_topo_addr addr;
+ u8 node_part_num;
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_PCA9575 0x21
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_GEN_GPS 0x48
+#define IXGBE_ACI_GET_LINK_TOPO_NODE_NR_E610_PTC 0x49
+ u8 rsvd[9];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_link_topo);
+
+/* Read/Write I2C (direct, 0x06E2/0x06E3) */
+struct ixgbe_aci_cmd_i2c {
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr;
+ __le16 i2c_addr;
+ u8 i2c_params;
+#define IXGBE_ACI_I2C_DATA_SIZE_S 0
+#define IXGBE_ACI_I2C_DATA_SIZE_M (0xF << IXGBE_ACI_I2C_DATA_SIZE_S)
+#define IXGBE_ACI_I2C_ADDR_TYPE_M BIT(4)
+#define IXGBE_ACI_I2C_ADDR_TYPE_7BIT 0
+#define IXGBE_ACI_I2C_ADDR_TYPE_10BIT IXGBE_ACI_I2C_ADDR_TYPE_M
+#define IXGBE_ACI_I2C_DATA_OFFSET_S 5
+#define IXGBE_ACI_I2C_DATA_OFFSET_M (0x3 << IXGBE_ACI_I2C_DATA_OFFSET_S)
+#define IXGBE_ACI_I2C_USE_REPEATED_START BIT(7)
+ u8 rsvd;
+ __le16 i2c_bus_addr;
+#define IXGBE_ACI_I2C_ADDR_7BIT_MASK 0x7F
+#define IXGBE_ACI_I2C_ADDR_10BIT_MASK 0x3FF
+ u8 i2c_data[4]; /* Used only by write command, reserved in read. */
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_i2c);
+
+/* Read I2C Response (direct, 0x06E2) */
+struct ixgbe_aci_cmd_read_i2c_resp {
+ u8 i2c_data[16];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_read_i2c_resp);
+
+/* Read/Write MDIO (direct, 0x06E4/0x06E5) */
+struct ixgbe_aci_cmd_mdio {
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr;
+ u8 mdio_device_addr;
+#define IXGBE_ACI_MDIO_DEV_S 0
+#define IXGBE_ACI_MDIO_DEV_M (0x1F << IXGBE_ACI_MDIO_DEV_S)
+#define IXGBE_ACI_MDIO_CLAUSE_22 BIT(5)
+#define IXGBE_ACI_MDIO_CLAUSE_45 BIT(6)
+ u8 mdio_bus_address;
+#define IXGBE_ACI_MDIO_BUS_ADDR_S 0
+#define IXGBE_ACI_MDIO_BUS_ADDR_M (0x1F << IXGBE_ACI_MDIO_BUS_ADDR_S)
+ __le16 offset;
+ __le16 data; /* Input in write cmd, output in read cmd. */
+ u8 rsvd1[4];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_mdio);
+
+/* Set/Get GPIO By Function (direct, 0x06E6/0x06E7) */
+struct ixgbe_aci_cmd_gpio_by_func {
+ struct ixgbe_aci_cmd_link_topo_addr topo_addr;
+ u8 io_func_num;
+#define IXGBE_ACI_GPIO_FUNC_S 0
+#define IXGBE_ACI_GPIO_FUNC_M (0x1F << IXGBE_ACI_GPIO_IO_FUNC_NUM_S)
+ u8 io_value; /* Input in write cmd, output in read cmd. */
+#define IXGBE_ACI_GPIO_ON BIT(0)
+#define IXGBE_ACI_GPIO_OFF 0
+ u8 rsvd[8];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_gpio_by_func);
+
+/* Set Port Identification LED (direct, 0x06E9) */
+struct ixgbe_aci_cmd_set_port_id_led {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define IXGBE_ACI_PORT_ID_PORT_NUM_VALID BIT(0)
+ u8 ident_mode;
+#define IXGBE_ACI_PORT_IDENT_LED_BLINK BIT(0)
+#define IXGBE_ACI_PORT_IDENT_LED_ORIG 0
+ u8 rsvd[13];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_port_id_led);
+
+/* Set/Get GPIO (direct, 0x06EC/0x06ED) */
+struct ixgbe_aci_cmd_gpio {
+ __le16 gpio_ctrl_handle;
+#define IXGBE_ACI_GPIO_HANDLE_S 0
+#define IXGBE_ACI_GPIO_HANDLE_M (0x3FF << IXGBE_ACI_GPIO_HANDLE_S)
+ u8 gpio_num;
+ u8 gpio_val;
+ u8 rsvd[12];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_gpio);
+
+/* Read/Write SFF EEPROM command (indirect 0x06EE) */
+struct ixgbe_aci_cmd_sff_eeprom {
+ u8 lport_num;
+ u8 lport_num_valid;
+#define IXGBE_ACI_SFF_PORT_NUM_VALID BIT(0)
+ __le16 i2c_bus_addr;
+#define IXGBE_ACI_SFF_I2CBUS_7BIT_M 0x7F
+#define IXGBE_ACI_SFF_I2CBUS_10BIT_M 0x3FF
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_M BIT(10)
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_7BIT 0
+#define IXGBE_ACI_SFF_I2CBUS_TYPE_10BIT IXGBE_ACI_SFF_I2CBUS_TYPE_M
+#define IXGBE_ACI_SFF_PAGE_BANK_CTRL_S 11
+#define IXGBE_ACI_SFF_PAGE_BANK_CTRL_M (0x3 << IXGBE_ACI_SFF_PAGE_BANK_CTRL_S)
+#define IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE 0
+#define IXGBE_ACI_SFF_UPDATE_PAGE 1
+#define IXGBE_ACI_SFF_UPDATE_BANK 2
+#define IXGBE_ACI_SFF_UPDATE_PAGE_BANK 3
+#define IXGBE_ACI_SFF_IS_WRITE BIT(15)
+ __le16 i2c_offset;
+ u8 module_bank;
+ u8 module_page;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_sff_eeprom);
+
+/* Program Topology Device NVM (direct, 0x06F2) */
+struct ixgbe_aci_cmd_prog_topo_dev_nvm {
+ struct ixgbe_aci_cmd_link_topo_params topo_params;
+ u8 rsvd[12];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_prog_topo_dev_nvm);
+
+/* Read Topology Device NVM (direct, 0x06F3) */
+struct ixgbe_aci_cmd_read_topo_dev_nvm {
+ struct ixgbe_aci_cmd_link_topo_params topo_params;
+ __le32 start_address;
+#define IXGBE_ACI_READ_TOPO_DEV_NVM_DATA_READ_SIZE 8
+ u8 data_read[IXGBE_ACI_READ_TOPO_DEV_NVM_DATA_READ_SIZE];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_read_topo_dev_nvm);
+
+/* NVM Read command (indirect 0x0701)
+ * NVM Erase commands (direct 0x0702)
+ * NVM Write commands (indirect 0x0703)
+ * NVM Write Activate commands (direct 0x0707)
+ * NVM Shadow RAM Dump commands (direct 0x0707)
+ */
+struct ixgbe_aci_cmd_nvm {
+#define IXGBE_ACI_NVM_MAX_OFFSET 0xFFFFFF
+ __le16 offset_low;
+ u8 offset_high; /* For Write Activate offset_high is used as flags2 */
+ u8 cmd_flags;
+#define IXGBE_ACI_NVM_LAST_CMD BIT(0)
+#define IXGBE_ACI_NVM_PCIR_REQ BIT(0) /* Used by NVM Write reply */
+#define IXGBE_ACI_NVM_PRESERVATION_S 1 /* Used by NVM Write Activate only */
+#define IXGBE_ACI_NVM_PRESERVATION_M (3 << IXGBE_ACI_NVM_PRESERVATION_S)
+#define IXGBE_ACI_NVM_NO_PRESERVATION (0 << IXGBE_ACI_NVM_PRESERVATION_S)
+#define IXGBE_ACI_NVM_PRESERVE_ALL BIT(1)
+#define IXGBE_ACI_NVM_FACTORY_DEFAULT (2 << IXGBE_ACI_NVM_PRESERVATION_S)
+#define IXGBE_ACI_NVM_PRESERVE_SELECTED (3 << IXGBE_ACI_NVM_PRESERVATION_S)
+#define IXGBE_ACI_NVM_ACTIV_SEL_NVM BIT(3) /* Write Activate/SR Dump only */
+#define IXGBE_ACI_NVM_ACTIV_SEL_OROM BIT(4)
+#define IXGBE_ACI_NVM_ACTIV_SEL_NETLIST BIT(5)
+#define IXGBE_ACI_NVM_SPECIAL_UPDATE BIT(6)
+#define IXGBE_ACI_NVM_REVERT_LAST_ACTIV BIT(6) /* Write Activate only */
+#define IXGBE_ACI_NVM_ACTIV_SEL_MASK MAKEMASK(0x7, 3)
+#define IXGBE_ACI_NVM_FLASH_ONLY BIT(7)
+#define IXGBE_ACI_NVM_RESET_LVL_M MAKEMASK(0x3, 0) /* Write reply only */
+#define IXGBE_ACI_NVM_POR_FLAG 0
+#define IXGBE_ACI_NVM_PERST_FLAG 1
+#define IXGBE_ACI_NVM_EMPR_FLAG 2
+#define IXGBE_ACI_NVM_EMPR_ENA BIT(0) /* Write Activate reply only */
+ /* For Write Activate, several flags are sent as part of a separate
+ * flags2 field using a separate byte. For simplicity of the software
+ * interface, we pass the flags as a 16 bit value so these flags are
+ * all offset by 8 bits
+ */
+#define IXGBE_ACI_NVM_ACTIV_REQ_EMPR BIT(8) /* NVM Write Activate only */
+ __le16 module_typeid;
+ __le16 length;
+#define IXGBE_ACI_NVM_ERASE_LEN 0xFFFF
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* NVM Module_Type ID, needed offset and read_len for struct ixgbe_aci_cmd_nvm. */
+#define IXGBE_ACI_NVM_SECTOR_UNIT 4096 /* In Bytes */
+#define IXGBE_ACI_NVM_WORD_UNIT 2 /* In Bytes */
+
+#define IXGBE_ACI_NVM_START_POINT 0
+#define IXGBE_ACI_NVM_EMP_SR_PTR_OFFSET 0x90
+#define IXGBE_ACI_NVM_EMP_SR_PTR_RD_LEN 2 /* In Bytes */
+#define IXGBE_ACI_NVM_EMP_SR_PTR_M MAKEMASK(0x7FFF, 0)
+#define IXGBE_ACI_NVM_EMP_SR_PTR_TYPE_S 15
+#define IXGBE_ACI_NVM_EMP_SR_PTR_TYPE_M BIT(15)
+#define IXGBE_ACI_NVM_EMP_SR_PTR_TYPE_SECTOR 1
+
+#define IXGBE_ACI_NVM_LLDP_CFG_PTR_OFFSET 0x46
+#define IXGBE_ACI_NVM_LLDP_CFG_HEADER_LEN 2 /* In Bytes */
+#define IXGBE_ACI_NVM_LLDP_CFG_PTR_RD_LEN 2 /* In Bytes */
+
+#define IXGBE_ACI_NVM_LLDP_PRESERVED_MOD_ID 0x129
+#define IXGBE_ACI_NVM_CUR_LLDP_PERSIST_RD_OFFSET 2 /* In Bytes */
+#define IXGBE_ACI_NVM_LLDP_STATUS_M MAKEMASK(0xF, 0)
+#define IXGBE_ACI_NVM_LLDP_STATUS_M_LEN 4 /* In Bits */
+#define IXGBE_ACI_NVM_LLDP_STATUS_RD_LEN 4 /* In Bytes */
+
+#define IXGBE_ACI_NVM_MINSREV_MOD_ID 0x130
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_nvm);
+
+/* Used for reading and writing MinSRev using 0x0701 and 0x0703. Note that the
+ * type field is excluded from the section when reading and writing from
+ * a module using the module_typeid field with these AQ commands.
+ */
+struct ixgbe_aci_cmd_nvm_minsrev {
+ __le16 length;
+ __le16 validity;
+#define IXGBE_ACI_NVM_MINSREV_NVM_VALID BIT(0)
+#define IXGBE_ACI_NVM_MINSREV_OROM_VALID BIT(1)
+ __le16 nvm_minsrev_l;
+ __le16 nvm_minsrev_h;
+ __le16 orom_minsrev_l;
+ __le16 orom_minsrev_h;
+};
+
+IXGBE_CHECK_STRUCT_LEN(12, ixgbe_aci_cmd_nvm_minsrev);
+
+/* Used for 0x0704 as well as for 0x0705 commands */
+struct ixgbe_aci_cmd_nvm_cfg {
+ u8 cmd_flags;
+#define IXGBE_ACI_ANVM_MULTIPLE_ELEMS BIT(0)
+#define IXGBE_ACI_ANVM_IMMEDIATE_FIELD BIT(1)
+#define IXGBE_ACI_ANVM_NEW_CFG BIT(2)
+ u8 reserved;
+ __le16 count;
+ __le16 id;
+ u8 reserved1[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_nvm_cfg);
+
+struct ixgbe_aci_cmd_nvm_cfg_data {
+ __le16 field_id;
+ __le16 field_options;
+ __le16 field_value;
+};
+
+IXGBE_CHECK_STRUCT_LEN(6, ixgbe_aci_cmd_nvm_cfg_data);
+
+/* NVM Checksum Command (direct, 0x0706) */
+struct ixgbe_aci_cmd_nvm_checksum {
+ u8 flags;
+#define IXGBE_ACI_NVM_CHECKSUM_VERIFY BIT(0)
+#define IXGBE_ACI_NVM_CHECKSUM_RECALC BIT(1)
+ u8 rsvd;
+ __le16 checksum; /* Used only by response */
+#define IXGBE_ACI_NVM_CHECKSUM_CORRECT 0xBABA
+ u8 rsvd2[12];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_nvm_checksum);
+
+/* Used for NVM Sanitization command - 0x070C */
+struct ixgbe_aci_cmd_nvm_sanitization {
+ u8 cmd_flags;
+#define IXGBE_ACI_SANITIZE_REQ_READ 0
+#define IXGBE_ACI_SANITIZE_REQ_OPERATE BIT(0)
+
+#define IXGBE_ACI_SANITIZE_READ_SUBJECT_NVM_BITS 0
+#define IXGBE_ACI_SANITIZE_READ_SUBJECT_NVM_STATE BIT(1)
+#define IXGBE_ACI_SANITIZE_OPERATE_SUBJECT_CLEAR 0
+ u8 values;
+#define IXGBE_ACI_SANITIZE_NVM_BITS_HOST_CLEAN_SUPPORT BIT(0)
+#define IXGBE_ACI_SANITIZE_NVM_BITS_BMC_CLEAN_SUPPORT BIT(2)
+#define IXGBE_ACI_SANITIZE_NVM_STATE_HOST_CLEAN_DONE BIT(0)
+#define IXGBE_ACI_SANITIZE_NVM_STATE_HOST_CLEAN_SUCCESS BIT(1)
+#define IXGBE_ACI_SANITIZE_NVM_STATE_BMC_CLEAN_DONE BIT(2)
+#define IXGBE_ACI_SANITIZE_NVM_STATE_BMC_CLEAN_SUCCESS BIT(3)
+#define IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_DONE BIT(0)
+#define IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_SUCCESS BIT(1)
+#define IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_DONE BIT(2)
+#define IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_SUCCESS BIT(3)
+ u8 reserved[14];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_nvm_sanitization);
+
+/* Write/Read Alternate - Direct (direct 0x0900/0x0902) */
+struct ixgbe_aci_cmd_read_write_alt_direct {
+ __le32 dword0_addr;
+ __le32 dword0_value;
+ __le32 dword1_addr;
+ __le32 dword1_value;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_read_write_alt_direct);
+
+/* Write/Read Alternate - Indirect (indirect 0x0901/0x0903) */
+struct ixgbe_aci_cmd_read_write_alt_indirect {
+ __le32 base_dword_addr;
+ __le32 num_dwords;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_read_write_alt_indirect);
+
+/* Done Alternate Write (direct 0x0904) */
+struct ixgbe_aci_cmd_done_alt_write {
+ u8 flags;
+#define IXGBE_ACI_CMD_UEFI_BIOS_MODE BIT(0)
+#define IXGBE_ACI_RESP_RESET_NEEDED BIT(1)
+ u8 reserved[15];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_done_alt_write);
+
+/* Clear Port Alternate Write (direct 0x0906) */
+struct ixgbe_aci_cmd_clear_port_alt_write {
+ u8 reserved[16];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_clear_port_alt_write);
+
+/* Get CGU abilities command response data structure (indirect 0x0C61) */
+struct ixgbe_aci_cmd_get_cgu_abilities {
+ u8 num_inputs;
+ u8 num_outputs;
+ u8 pps_dpll_idx;
+ u8 synce_dpll_idx;
+ __le32 max_in_freq;
+ __le32 max_in_phase_adj;
+ __le32 max_out_freq;
+ __le32 max_out_phase_adj;
+ u8 cgu_part_num;
+ u8 rsvd[3];
+};
+
+IXGBE_CHECK_STRUCT_LEN(24, ixgbe_aci_cmd_get_cgu_abilities);
+
+#define IXGBE_ACI_NODE_HANDLE_VALID BIT(10)
+#define IXGBE_ACI_NODE_HANDLE MAKEMASK(0x3FF, 0)
+#define IXGBE_ACI_DRIVING_CLK_NUM_SHIFT 10
+#define IXGBE_ACI_DRIVING_CLK_NUM MAKEMASK(0x3F, IXGBE_ACI_DRIVING_CLK_NUM_SHIFT)
+
+/* Set CGU input config (direct 0x0C62) */
+struct ixgbe_aci_cmd_set_cgu_input_config {
+ u8 input_idx;
+ u8 flags1;
+#define IXGBE_ACI_SET_CGU_IN_CFG_FLG1_UPDATE_FREQ BIT(6)
+#define IXGBE_ACI_SET_CGU_IN_CFG_FLG1_UPDATE_DELAY BIT(7)
+ u8 flags2;
+#define IXGBE_ACI_SET_CGU_IN_CFG_FLG2_INPUT_EN BIT(5)
+#define IXGBE_ACI_SET_CGU_IN_CFG_FLG2_ESYNC_EN BIT(6)
+ u8 rsvd;
+ __le32 freq;
+ __le32 phase_delay;
+ u8 rsvd2[2];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_cgu_input_config);
+
+/* Get CGU input config response descriptor structure (direct 0x0C63) */
+struct ixgbe_aci_cmd_get_cgu_input_config {
+ u8 input_idx;
+ u8 status;
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_LOS BIT(0)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_SCM_FAIL BIT(1)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_CFM_FAIL BIT(2)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_GST_FAIL BIT(3)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_PFM_FAIL BIT(4)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_ESYNC_FAIL BIT(6)
+#define IXGBE_ACI_GET_CGU_IN_CFG_STATUS_ESYNC_CAP BIT(7)
+ u8 type;
+#define IXGBE_ACI_GET_CGU_IN_CFG_TYPE_READ_ONLY BIT(0)
+#define IXGBE_ACI_GET_CGU_IN_CFG_TYPE_GPS BIT(4)
+#define IXGBE_ACI_GET_CGU_IN_CFG_TYPE_EXTERNAL BIT(5)
+#define IXGBE_ACI_GET_CGU_IN_CFG_TYPE_PHY BIT(6)
+ u8 flags1;
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG1_PHASE_DELAY_SUPP BIT(0)
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG1_1PPS_SUPP BIT(2)
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG1_10MHZ_SUPP BIT(3)
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG1_ANYFREQ BIT(7)
+ __le32 freq;
+ __le32 phase_delay;
+ u8 flags2;
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG2_INPUT_EN BIT(5)
+#define IXGBE_ACI_GET_CGU_IN_CFG_FLG2_ESYNC_EN BIT(6)
+ u8 rsvd[1];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_cgu_input_config);
+
+/* Set CGU output config (direct 0x0C64) */
+struct ixgbe_aci_cmd_set_cgu_output_config {
+ u8 output_idx;
+ u8 flags;
+#define IXGBE_ACI_SET_CGU_OUT_CFG_OUT_EN BIT(0)
+#define IXGBE_ACI_SET_CGU_OUT_CFG_ESYNC_EN BIT(1)
+#define IXGBE_ACI_SET_CGU_OUT_CFG_UPDATE_FREQ BIT(2)
+#define IXGBE_ACI_SET_CGU_OUT_CFG_UPDATE_PHASE BIT(3)
+#define IXGBE_ACI_SET_CGU_OUT_CFG_UPDATE_SRC_SEL BIT(4)
+ u8 src_sel;
+#define IXGBE_ACI_SET_CGU_OUT_CFG_DPLL_SRC_SEL MAKEMASK(0x1F, 0)
+ u8 rsvd;
+ __le32 freq;
+ __le32 phase_delay;
+ u8 rsvd2[2];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_cgu_output_config);
+
+/* Get CGU output config (direct 0x0C65) */
+struct ixgbe_aci_cmd_get_cgu_output_config {
+ u8 output_idx;
+ u8 flags;
+#define IXGBE_ACI_GET_CGU_OUT_CFG_OUT_EN BIT(0)
+#define IXGBE_ACI_GET_CGU_OUT_CFG_ESYNC_EN BIT(1)
+#define IXGBE_ACI_GET_CGU_OUT_CFG_ESYNC_ABILITY BIT(2)
+ u8 src_sel;
+#define IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_SRC_SEL_SHIFT 0
+#define IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_SRC_SEL \
+ MAKEMASK(0x1F, IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_SRC_SEL_SHIFT)
+#define IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_MODE_SHIFT 5
+#define IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_MODE \
+ MAKEMASK(0x7, IXGBE_ACI_GET_CGU_OUT_CFG_DPLL_MODE_SHIFT)
+ u8 rsvd;
+ __le32 freq;
+ __le32 src_freq;
+ u8 rsvd2[2];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_cgu_output_config);
+
+/* Get CGU DPLL status (direct 0x0C66) */
+struct ixgbe_aci_cmd_get_cgu_dpll_status {
+ u8 dpll_num;
+ u8 ref_state;
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_LOS BIT(0)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_SCM BIT(1)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_CFM BIT(2)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_GST BIT(3)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_PFM BIT(4)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_FAST_LOCK_EN BIT(5)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_REF_SW_ESYNC BIT(6)
+ __le16 dpll_state;
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_LOCK BIT(0)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_HO BIT(1)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_HO_READY BIT(2)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_FLHIT BIT(5)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_PSLHIT BIT(7)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_CLK_REF_SHIFT 8
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_CLK_REF_SEL \
+ MAKEMASK(0x1F, IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_CLK_REF_SHIFT)
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_MODE_SHIFT 13
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_MODE \
+ MAKEMASK(0x7, IXGBE_ACI_GET_CGU_DPLL_STATUS_STATE_MODE_SHIFT)
+ __le32 phase_offset_h;
+ __le32 phase_offset_l;
+ u8 eec_mode;
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_EEC_MODE_1 0xA
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_EEC_MODE_2 0xB
+#define IXGBE_ACI_GET_CGU_DPLL_STATUS_EEC_MODE_UNKNOWN 0xF
+ u8 rsvd[1];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_cgu_dpll_status);
+
+/* Set CGU DPLL config (direct 0x0C67) */
+struct ixgbe_aci_cmd_set_cgu_dpll_config {
+ u8 dpll_num;
+ u8 ref_state;
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_LOS BIT(0)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_SCM BIT(1)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_CFM BIT(2)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_GST BIT(3)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_PFM BIT(4)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_FLOCK_EN BIT(5)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_REF_SW_ESYNC BIT(6)
+ u8 rsvd;
+ u8 config;
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_CLK_REF_SEL MAKEMASK(0x1F, 0)
+#define IXGBE_ACI_SET_CGU_DPLL_CONFIG_MODE MAKEMASK(0x7, 5)
+ u8 rsvd2[8];
+ u8 eec_mode;
+ u8 rsvd3[1];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_cgu_dpll_config);
+
+/* Set CGU reference priority (direct 0x0C68) */
+struct ixgbe_aci_cmd_set_cgu_ref_prio {
+ u8 dpll_num;
+ u8 ref_idx;
+ u8 ref_priority;
+ u8 rsvd[11];
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_cgu_ref_prio);
+
+/* Get CGU reference priority (direct 0x0C69) */
+struct ixgbe_aci_cmd_get_cgu_ref_prio {
+ u8 dpll_num;
+ u8 ref_idx;
+ u8 ref_priority; /* Valid only in response */
+ u8 rsvd[13];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_cgu_ref_prio);
+
+/* Get CGU info (direct 0x0C6A) */
+struct ixgbe_aci_cmd_get_cgu_info {
+ __le32 cgu_id;
+ __le32 cgu_cfg_ver;
+ __le32 cgu_fw_ver;
+ u8 node_part_num;
+ u8 dev_rev;
+ __le16 node_handle;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_cgu_info);
+
+struct ixgbe_aci_cmd_temp_tca_event {
+ u8 event_desc;
+#define IXGBE_TEMP_TCA_EVENT_DESC_SUBJ_SHIFT 0
+#define IXGBE_TEMP_TCA_EVENT_DESC_SUBJ_NVM 0
+#define IXGBE_TEMP_TCA_EVENT_DESC_SUBJ_EVENT_STATE 1
+#define IXGBE_TEMP_TCA_EVENT_DESC_SUBJ_ALL 2
+
+#define IXGBE_TEMP_TCA_EVENT_DESC_ALARM_SHIFT 2
+#define IXGBE_TEMP_TCA_EVENT_DESC_WARNING_CLEARED 0
+#define IXGBE_TEMP_TCA_EVENT_DESC_ALARM_CLEARED 1
+#define IXGBE_TEMP_TCA_EVENT_DESC_WARNING_RAISED 2
+#define IXGBE_TEMP_TCA_EVENT_DESC_ALARM_RAISED 3
+
+ u8 reserved;
+ __le16 temperature;
+ __le16 thermal_sensor_max_value;
+ __le16 thermal_sensor_min_value;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_temp_tca_event);
+
+/* Debug Dump Internal Data (indirect 0xFF08) */
+struct ixgbe_aci_cmd_debug_dump_internals {
+ __le16 cluster_id; /* Expresses next cluster ID in response */
+#define IXGBE_ACI_DBG_DUMP_CLUSTER_ID_LINK 0
+#define IXGBE_ACI_DBG_DUMP_CLUSTER_ID_FULL_CSR_SPACE 1
+ __le16 table_id; /* Used only for non-memory clusters */
+ __le32 idx; /* In table entries for tables, in bytes for memory */
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_debug_dump_internals);
+
+/* Set Health Status (direct 0xFF20) */
+struct ixgbe_aci_cmd_set_health_status_config {
+ u8 event_source;
+#define IXGBE_ACI_HEALTH_STATUS_SET_PF_SPECIFIC_MASK BIT(0)
+#define IXGBE_ACI_HEALTH_STATUS_SET_ALL_PF_MASK BIT(1)
+#define IXGBE_ACI_HEALTH_STATUS_SET_GLOBAL_MASK BIT(2)
+ u8 reserved[15];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_set_health_status_config);
+
+#define IXGBE_ACI_HEALTH_STATUS_ERR_UNKNOWN_MOD_STRICT 0x101
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_TYPE 0x102
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_QUAL 0x103
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_COMM 0x104
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_CONFLICT 0x105
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_NOT_PRESENT 0x106
+#define IXGBE_ACI_HEALTH_STATUS_INFO_MOD_UNDERUTILIZED 0x107
+#define IXGBE_ACI_HEALTH_STATUS_ERR_UNKNOWN_MOD_LENIENT 0x108
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MOD_DIAGNOSTIC_FEATURE 0x109
+#define IXGBE_ACI_HEALTH_STATUS_ERR_INVALID_LINK_CFG 0x10B
+#define IXGBE_ACI_HEALTH_STATUS_ERR_PORT_ACCESS 0x10C
+#define IXGBE_ACI_HEALTH_STATUS_ERR_PORT_UNREACHABLE 0x10D
+#define IXGBE_ACI_HEALTH_STATUS_INFO_PORT_SPEED_MOD_LIMITED 0x10F
+#define IXGBE_ACI_HEALTH_STATUS_ERR_PARALLEL_FAULT 0x110
+#define IXGBE_ACI_HEALTH_STATUS_INFO_PORT_SPEED_PHY_LIMITED 0x111
+#define IXGBE_ACI_HEALTH_STATUS_ERR_NETLIST_TOPO 0x112
+#define IXGBE_ACI_HEALTH_STATUS_ERR_NETLIST 0x113
+#define IXGBE_ACI_HEALTH_STATUS_ERR_TOPO_CONFLICT 0x114
+#define IXGBE_ACI_HEALTH_STATUS_ERR_LINK_HW_ACCESS 0x115
+#define IXGBE_ACI_HEALTH_STATUS_ERR_LINK_RUNTIME 0x116
+#define IXGBE_ACI_HEALTH_STATUS_ERR_DNL_INIT 0x117
+#define IXGBE_ACI_HEALTH_STATUS_ERR_PHY_NVM_PROG 0x120
+#define IXGBE_ACI_HEALTH_STATUS_ERR_PHY_FW_LOAD 0x121
+#define IXGBE_ACI_HEALTH_STATUS_INFO_RECOVERY 0x500
+#define IXGBE_ACI_HEALTH_STATUS_ERR_FLASH_ACCESS 0x501
+#define IXGBE_ACI_HEALTH_STATUS_ERR_NVM_AUTH 0x502
+#define IXGBE_ACI_HEALTH_STATUS_ERR_OROM_AUTH 0x503
+#define IXGBE_ACI_HEALTH_STATUS_ERR_DDP_AUTH 0x504
+#define IXGBE_ACI_HEALTH_STATUS_ERR_NVM_COMPAT 0x505
+#define IXGBE_ACI_HEALTH_STATUS_ERR_OROM_COMPAT 0x506
+#define IXGBE_ACI_HEALTH_STATUS_ERR_NVM_SEC_VIOLATION 0x507
+#define IXGBE_ACI_HEALTH_STATUS_ERR_OROM_SEC_VIOLATION 0x508
+#define IXGBE_ACI_HEALTH_STATUS_ERR_DCB_MIB 0x509
+#define IXGBE_ACI_HEALTH_STATUS_ERR_MNG_TIMEOUT 0x50A
+#define IXGBE_ACI_HEALTH_STATUS_ERR_BMC_RESET 0x50B
+#define IXGBE_ACI_HEALTH_STATUS_ERR_LAST_MNG_FAIL 0x50C
+#define IXGBE_ACI_HEALTH_STATUS_ERR_RESOURCE_ALLOC_FAIL 0x50D
+#define IXGBE_ACI_HEALTH_STATUS_ERR_FW_LOOP 0x1000
+#define IXGBE_ACI_HEALTH_STATUS_ERR_FW_PFR_FAIL 0x1001
+#define IXGBE_ACI_HEALTH_STATUS_ERR_LAST_FAIL_AQ 0x1002
+
+/* Get Health Status codes (indirect 0xFF21) */
+struct ixgbe_aci_cmd_get_supported_health_status_codes {
+ __le16 health_code_count;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_supported_health_status_codes);
+
+/* Get Health Status (indirect 0xFF22) */
+struct ixgbe_aci_cmd_get_health_status {
+ __le16 health_status_count;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_get_health_status);
+
+/* Get Health Status event buffer entry, (0xFF22)
+ * repeated per reported health status
+ */
+struct ixgbe_aci_cmd_health_status_elem {
+ __le16 health_status_code;
+ __le16 event_source;
+#define IXGBE_ACI_HEALTH_STATUS_PF (0x1)
+#define IXGBE_ACI_HEALTH_STATUS_PORT (0x2)
+#define IXGBE_ACI_HEALTH_STATUS_GLOBAL (0x3)
+ __le32 internal_data1;
+#define IXGBE_ACI_HEALTH_STATUS_UNDEFINED_DATA (0xDEADBEEF)
+ __le32 internal_data2;
+};
+
+IXGBE_CHECK_STRUCT_LEN(12, ixgbe_aci_cmd_health_status_elem);
+
+/* Clear Health Status (direct 0xFF23) */
+struct ixgbe_aci_cmd_clear_health_status {
+ __le32 reserved[4];
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_clear_health_status);
+
+enum ixgbe_aci_fw_logging_mod {
+ IXGBE_ACI_FW_LOG_ID_GENERAL = 0,
+ IXGBE_ACI_FW_LOG_ID_CTRL = 1,
+ IXGBE_ACI_FW_LOG_ID_LINK = 2,
+ IXGBE_ACI_FW_LOG_ID_LINK_TOPO = 3,
+ IXGBE_ACI_FW_LOG_ID_DNL = 4,
+ IXGBE_ACI_FW_LOG_ID_I2C = 5,
+ IXGBE_ACI_FW_LOG_ID_SDP = 6,
+ IXGBE_ACI_FW_LOG_ID_MDIO = 7,
+ IXGBE_ACI_FW_LOG_ID_ADMINQ = 8,
+ IXGBE_ACI_FW_LOG_ID_HDMA = 9,
+ IXGBE_ACI_FW_LOG_ID_LLDP = 10,
+ IXGBE_ACI_FW_LOG_ID_DCBX = 11,
+ IXGBE_ACI_FW_LOG_ID_DCB = 12,
+ IXGBE_ACI_FW_LOG_ID_XLR = 13,
+ IXGBE_ACI_FW_LOG_ID_NVM = 14,
+ IXGBE_ACI_FW_LOG_ID_AUTH = 15,
+ IXGBE_ACI_FW_LOG_ID_VPD = 16,
+ IXGBE_ACI_FW_LOG_ID_IOSF = 17,
+ IXGBE_ACI_FW_LOG_ID_PARSER = 18,
+ IXGBE_ACI_FW_LOG_ID_SW = 19,
+ IXGBE_ACI_FW_LOG_ID_SCHEDULER = 20,
+ IXGBE_ACI_FW_LOG_ID_TXQ = 21,
+ IXGBE_ACI_FW_LOG_ID_ACL = 22,
+ IXGBE_ACI_FW_LOG_ID_POST = 23,
+ IXGBE_ACI_FW_LOG_ID_WATCHDOG = 24,
+ IXGBE_ACI_FW_LOG_ID_TASK_DISPATCH = 25,
+ IXGBE_ACI_FW_LOG_ID_MNG = 26,
+ IXGBE_ACI_FW_LOG_ID_SYNCE = 27,
+ IXGBE_ACI_FW_LOG_ID_HEALTH = 28,
+ IXGBE_ACI_FW_LOG_ID_TSDRV = 29,
+ IXGBE_ACI_FW_LOG_ID_PFREG = 30,
+ IXGBE_ACI_FW_LOG_ID_MDLVER = 31,
+ IXGBE_ACI_FW_LOG_ID_MAX = 32,
+};
+
+/* Only a single log level should be set and all log levels under the set value
+ * are enabled, e.g. if log level is set to IXGBE_FWLOG_LEVEL_VERBOSE, then all
+ * other log levels are included (except IXGBE_FWLOG_LEVEL_NONE)
+ */
+enum ixgbe_fwlog_level {
+ IXGBE_FWLOG_LEVEL_NONE = 0,
+ IXGBE_FWLOG_LEVEL_ERROR = 1,
+ IXGBE_FWLOG_LEVEL_WARNING = 2,
+ IXGBE_FWLOG_LEVEL_NORMAL = 3,
+ IXGBE_FWLOG_LEVEL_VERBOSE = 4,
+ IXGBE_FWLOG_LEVEL_INVALID, /* all values >= this entry are invalid */
+};
+
+struct ixgbe_fwlog_module_entry {
+ /* module ID for the corresponding firmware logging event */
+ u16 module_id;
+ /* verbosity level for the module_id */
+ u8 log_level;
+};
+
+struct ixgbe_fwlog_cfg {
+ /* list of modules for configuring log level */
+ struct ixgbe_fwlog_module_entry module_entries[IXGBE_ACI_FW_LOG_ID_MAX];
+#define IXGBE_FWLOG_OPTION_ARQ_ENA BIT(0)
+#define IXGBE_FWLOG_OPTION_UART_ENA BIT(1)
+ /* set before calling ixgbe_fwlog_init() so the PF registers for firmware
+ * logging on initialization
+ */
+#define IXGBE_FWLOG_OPTION_REGISTER_ON_INIT BIT(2)
+ /* set in the ixgbe_fwlog_get() response if the PF is registered for FW
+ * logging events over ARQ
+ */
+#define IXGBE_FWLOG_OPTION_IS_REGISTERED BIT(3)
+ /* options used to configure firmware logging */
+ u16 options;
+ /* minimum number of log events sent per Admin Receive Queue event */
+ u8 log_resolution;
+};
+
+struct ixgbe_fwlog_data {
+ u16 data_size;
+ u8 *data;
+};
+
+struct ixgbe_fwlog_ring {
+ struct ixgbe_fwlog_data *rings;
+ u16 size;
+ u16 head;
+ u16 tail;
+};
+
+#define IXGBE_FWLOG_RING_SIZE_DFLT 256
+#define IXGBE_FWLOG_RING_SIZE_MAX 512
+
+/* Set FW Logging configuration (indirect 0xFF30)
+ * Register for FW Logging (indirect 0xFF31)
+ * Query FW Logging (indirect 0xFF32)
+ * FW Log Event (indirect 0xFF33)
+ * Get FW Log (indirect 0xFF34)
+ * Clear FW Log (indirect 0xFF35)
+ */
+struct ixgbe_aci_cmd_fw_log {
+ u8 cmd_flags;
+#define IXGBE_ACI_FW_LOG_CONF_UART_EN BIT(0)
+#define IXGBE_ACI_FW_LOG_CONF_AQ_EN BIT(1)
+#define IXGBE_ACI_FW_LOG_QUERY_REGISTERED BIT(2)
+#define IXGBE_ACI_FW_LOG_CONF_SET_VALID BIT(3)
+#define IXGBE_ACI_FW_LOG_AQ_REGISTER BIT(0)
+#define IXGBE_ACI_FW_LOG_AQ_QUERY BIT(2)
+#define IXGBE_ACI_FW_LOG_PERSISTENT BIT(0)
+ u8 rsp_flag;
+#define IXGBE_ACI_FW_LOG_MORE_DATA BIT(1)
+ __le16 fw_rt_msb;
+ union {
+ struct {
+ __le32 fw_rt_lsb;
+ } sync;
+ struct {
+ __le16 log_resolution;
+#define IXGBE_ACI_FW_LOG_MIN_RESOLUTION (1)
+#define IXGBE_ACI_FW_LOG_MAX_RESOLUTION (128)
+ __le16 mdl_cnt;
+ } cfg;
+ } ops;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+IXGBE_CHECK_PARAM_LEN(ixgbe_aci_cmd_fw_log);
+
+/* Response Buffer for:
+ * Set Firmware Logging Configuration (0xFF30)
+ * Query FW Logging (0xFF32)
+ */
+struct ixgbe_aci_cmd_fw_log_cfg_resp {
+ __le16 module_identifier;
+ u8 log_level;
+ u8 rsvd0;
+};
+
+IXGBE_CHECK_STRUCT_LEN(4, ixgbe_aci_cmd_fw_log_cfg_resp);
+
+/**
+ * struct ixgbe_aq_desc - Admin Command (AC) descriptor
+ * @flags: IXGBE_ACI_FLAG_* flags
+ * @opcode: Admin command opcode
+ * @datalen: length in bytes of indirect/external data buffer
+ * @retval: return value from firmware
+ * @cookie_high: opaque data high-half
+ * @cookie_low: opaque data low-half
+ * @params: command-specific parameters
+ *
+ * Descriptor format for commands the driver posts via the Admin Command Interface
+ * (ACI). The firmware writes back onto the command descriptor and returns
+ * the result of the command. Asynchronous events that are not an immediate
+ * result of the command are written to the Admin Command Interface (ACI) using
+ * the same descriptor format. Descriptors are in little-endian notation with
+ * 32-bit words.
+ */
+struct ixgbe_aci_desc {
+ __le16 flags;
+ __le16 opcode;
+ __le16 datalen;
+ __le16 retval;
+ __le32 cookie_high;
+ __le32 cookie_low;
+ union {
+ u8 raw[16];
+ struct ixgbe_aci_cmd_generic generic;
+ struct ixgbe_aci_cmd_get_ver get_ver;
+ struct ixgbe_aci_cmd_driver_ver driver_ver;
+ struct ixgbe_aci_cmd_get_exp_err exp_err;
+ struct ixgbe_aci_cmd_req_res res_owner;
+ struct ixgbe_aci_cmd_list_caps get_cap;
+ struct ixgbe_aci_cmd_disable_rxen disable_rxen;
+ struct ixgbe_aci_cmd_get_fw_event get_fw_event;
+ struct ixgbe_aci_cmd_get_phy_caps get_phy;
+ struct ixgbe_aci_cmd_set_phy_cfg set_phy;
+ struct ixgbe_aci_cmd_restart_an restart_an;
+ struct ixgbe_aci_cmd_get_link_status get_link_status;
+ struct ixgbe_aci_cmd_set_event_mask set_event_mask;
+ struct ixgbe_aci_cmd_get_link_topo get_link_topo;
+ struct ixgbe_aci_cmd_i2c read_write_i2c;
+ struct ixgbe_aci_cmd_read_i2c_resp read_i2c_resp;
+ struct ixgbe_aci_cmd_mdio read_write_mdio;
+ struct ixgbe_aci_cmd_mdio read_mdio;
+ struct ixgbe_aci_cmd_mdio write_mdio;
+ struct ixgbe_aci_cmd_set_port_id_led set_port_id_led;
+ struct ixgbe_aci_cmd_gpio_by_func read_write_gpio_by_func;
+ struct ixgbe_aci_cmd_gpio read_write_gpio;
+ struct ixgbe_aci_cmd_sff_eeprom read_write_sff_param;
+ struct ixgbe_aci_cmd_prog_topo_dev_nvm prog_topo_dev_nvm;
+ struct ixgbe_aci_cmd_read_topo_dev_nvm read_topo_dev_nvm;
+ struct ixgbe_aci_cmd_nvm nvm;
+ struct ixgbe_aci_cmd_nvm_cfg nvm_cfg;
+ struct ixgbe_aci_cmd_nvm_checksum nvm_checksum;
+ struct ixgbe_aci_cmd_read_write_alt_direct read_write_alt_direct;
+ struct ixgbe_aci_cmd_read_write_alt_indirect read_write_alt_indirect;
+ struct ixgbe_aci_cmd_done_alt_write done_alt_write;
+ struct ixgbe_aci_cmd_clear_port_alt_write clear_port_alt_write;
+ struct ixgbe_aci_cmd_debug_dump_internals debug_dump;
+ struct ixgbe_aci_cmd_set_health_status_config
+ set_health_status_config;
+ struct ixgbe_aci_cmd_get_supported_health_status_codes
+ get_supported_health_status_codes;
+ struct ixgbe_aci_cmd_get_health_status get_health_status;
+ struct ixgbe_aci_cmd_clear_health_status clear_health_status;
+ struct ixgbe_aci_cmd_fw_log fw_log;
+ struct ixgbe_aci_cmd_nvm_sanitization nvm_sanitization;
+ } params;
+};
+
+/* E610-specific adapter context structures */
+
+struct ixgbe_link_status {
+ /* Refer to ixgbe_aci_phy_type for bits definition */
+ u64 phy_type_low;
+ u64 phy_type_high;
+ u8 topo_media_conflict;
+ u16 max_frame_size;
+ u16 link_speed;
+ u16 req_speeds;
+ u8 link_cfg_err;
+ u8 lse_ena; /* Link Status Event notification */
+ u8 link_info;
+ u8 an_info;
+ u8 ext_info;
+ u8 fec_info;
+ u8 pacing;
+ /* Refer to #define from module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE] of
+ * ixgbe_aci_get_phy_caps structure
+ */
+ u8 module_type[IXGBE_ACI_MODULE_TYPE_TOTAL_BYTE];
+};
+
+/* Common HW capabilities for SW use */
+struct ixgbe_hw_common_caps {
+ /* Write CSR protection */
+ u64 wr_csr_prot;
+ u32 switching_mode;
+ /* switching mode supported - EVB switching (including cloud) */
+#define IXGBE_NVM_IMAGE_TYPE_EVB 0x0
+
+ /* Manageability mode & supported protocols over MCTP */
+ u32 mgmt_mode;
+#define IXGBE_MGMT_MODE_PASS_THRU_MODE_M 0xF
+#define IXGBE_MGMT_MODE_CTL_INTERFACE_M 0xF0
+#define IXGBE_MGMT_MODE_REDIR_SB_INTERFACE_M 0xF00
+
+ u32 mgmt_protocols_mctp;
+#define IXGBE_MGMT_MODE_PROTO_RSVD BIT(0)
+#define IXGBE_MGMT_MODE_PROTO_PLDM BIT(1)
+#define IXGBE_MGMT_MODE_PROTO_OEM BIT(2)
+#define IXGBE_MGMT_MODE_PROTO_NC_SI BIT(3)
+
+ u32 os2bmc;
+ u32 valid_functions;
+ /* DCB capabilities */
+ u32 active_tc_bitmap;
+ u32 maxtc;
+
+ /* RSS related capabilities */
+ u32 rss_table_size; /* 512 for PFs and 64 for VFs */
+ u32 rss_table_entry_width; /* RSS Entry width in bits */
+
+ /* Tx/Rx queues */
+ u32 num_rxq; /* Number/Total Rx queues */
+ u32 rxq_first_id; /* First queue ID for Rx queues */
+ u32 num_txq; /* Number/Total Tx queues */
+ u32 txq_first_id; /* First queue ID for Tx queues */
+
+ /* MSI-X vectors */
+ u32 num_msix_vectors;
+ u32 msix_vector_first_id;
+
+ /* Max MTU for function or device */
+ u32 max_mtu;
+
+ /* WOL related */
+ u32 num_wol_proxy_fltr;
+ u32 wol_proxy_vsi_seid;
+
+ /* LED/SDP pin count */
+ u32 led_pin_num;
+ u32 sdp_pin_num;
+
+ /* LED/SDP - Supports up to 12 LED pins and 8 SDP signals */
+#define IXGBE_MAX_SUPPORTED_GPIO_LED 12
+#define IXGBE_MAX_SUPPORTED_GPIO_SDP 8
+ u8 led[IXGBE_MAX_SUPPORTED_GPIO_LED];
+ u8 sdp[IXGBE_MAX_SUPPORTED_GPIO_SDP];
+ /* SR-IOV virtualization */
+ u8 sr_iov_1_1; /* SR-IOV enabled */
+ /* VMDQ */
+ u8 vmdq; /* VMDQ supported */
+
+ /* EVB capabilities */
+ u8 evb_802_1_qbg; /* Edge Virtual Bridging */
+ u8 evb_802_1_qbh; /* Bridge Port Extension */
+
+ u8 dcb;
+ u8 iscsi;
+ u8 mgmt_cem;
+
+ /* WoL and APM support */
+#define IXGBE_WOL_SUPPORT_M BIT(0)
+#define IXGBE_ACPI_PROG_MTHD_M BIT(1)
+#define IXGBE_PROXY_SUPPORT_M BIT(2)
+ u8 apm_wol_support;
+ u8 acpi_prog_mthd;
+ u8 proxy_support;
+ bool sec_rev_disabled;
+ bool update_disabled;
+ bool nvm_unified_update;
+ bool netlist_auth;
+#define IXGBE_NVM_MGMT_SEC_REV_DISABLED BIT(0)
+#define IXGBE_NVM_MGMT_UPDATE_DISABLED BIT(1)
+#define IXGBE_NVM_MGMT_UNIFIED_UPD_SUPPORT BIT(3)
+#define IXGBE_NVM_MGMT_NETLIST_AUTH_SUPPORT BIT(5)
+ bool no_drop_policy_support;
+ /* PCIe reset avoidance */
+ bool pcie_reset_avoidance; /* false: not supported, true: supported */
+ /* Post update reset restriction */
+ bool reset_restrict_support; /* false: not supported, true: supported */
+
+ /* External topology device images within the NVM */
+#define IXGBE_EXT_TOPO_DEV_IMG_COUNT 4
+ u32 ext_topo_dev_img_ver_high[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+ u32 ext_topo_dev_img_ver_low[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+ u8 ext_topo_dev_img_part_num[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_S 8
+#define IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M \
+ MAKEMASK(0xFF, IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_S)
+ bool ext_topo_dev_img_load_en[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN BIT(0)
+ bool ext_topo_dev_img_prog_en[IXGBE_EXT_TOPO_DEV_IMG_COUNT];
+#define IXGBE_EXT_TOPO_DEV_IMG_PROG_EN BIT(1)
+ /* Support for OROM update in Recovery Mode. */
+ bool orom_recovery_update;
+ bool next_cluster_id_support;
+};
+
+#pragma pack(1)
+struct ixgbe_orom_civd_info {
+ u8 signature[4]; /* Must match ASCII '$CIV' characters */
+ u8 checksum; /* Simple modulo 256 sum of all structure bytes must equal 0 */
+ __le32 combo_ver; /* Combo Image Version number */
+ u8 combo_name_len; /* Length of the unicode combo image version string, max of 32 */
+ __le16 combo_name[32]; /* Unicode string representing the Combo Image version */
+};
+#pragma pack()
+
+/* Function specific capabilities */
+struct ixgbe_hw_func_caps {
+ struct ixgbe_hw_common_caps common_cap;
+ u32 num_allocd_vfs; /* Number of allocated VFs */
+ u32 vf_base_id; /* Logical ID of the first VF */
+ u32 guar_num_vsi;
+ bool no_drop_policy_ena;
+};
+
+/* Device wide capabilities */
+struct ixgbe_hw_dev_caps {
+ struct ixgbe_hw_common_caps common_cap;
+ u32 num_vfs_exposed; /* Total number of VFs exposed */
+ u32 num_vsi_allocd_to_host; /* Excluding EMP VSI */
+ u32 num_flow_director_fltr; /* Number of FD filters available */
+ u32 num_funcs;
+};
+
+/* ACI event information */
+struct ixgbe_aci_event {
+ struct ixgbe_aci_desc desc;
+ u16 msg_len;
+ u16 buf_len;
+ u8 *msg_buf;
+};
+
+struct ixgbe_aci_info {
+ enum ixgbe_aci_err last_status; /* last status of sent admin command */
+ struct ixgbe_lock lock; /* admin command interface lock */
+};
+
+/* Minimum Security Revision information */
+struct ixgbe_minsrev_info {
+ u32 nvm;
+ u32 orom;
+ u8 nvm_valid : 1;
+ u8 orom_valid : 1;
+};
+
+/* Enumeration of which flash bank is desired to read from, either the active
+ * bank or the inactive bank. Used to abstract 1st and 2nd bank notion from
+ * code which just wants to read the active or inactive flash bank.
+ */
+enum ixgbe_bank_select {
+ IXGBE_ACTIVE_FLASH_BANK,
+ IXGBE_INACTIVE_FLASH_BANK,
+};
+
+/* Option ROM version information */
+struct ixgbe_orom_info {
+ u8 major; /* Major version of OROM */
+ u8 patch; /* Patch version of OROM */
+ u16 build; /* Build version of OROM */
+ u32 srev; /* Security revision */
+};
+
+/* NVM version information */
+struct ixgbe_nvm_info {
+ u32 eetrack;
+ u32 srev;
+ u8 major;
+ u8 minor;
+};
+
+/* netlist version information */
+struct ixgbe_netlist_info {
+ u32 major; /* major high/low */
+ u32 minor; /* minor high/low */
+ u32 type; /* type high/low */
+ u32 rev; /* revision high/low */
+ u32 hash; /* SHA-1 hash word */
+ u16 cust_ver; /* customer version */
+};
+
+/* Enumeration of possible flash banks for the NVM, OROM, and Netlist modules
+ * of the flash image.
+ */
+enum ixgbe_flash_bank {
+ IXGBE_INVALID_FLASH_BANK,
+ IXGBE_1ST_FLASH_BANK,
+ IXGBE_2ND_FLASH_BANK,
+};
+
+/* information for accessing NVM, OROM, and Netlist flash banks */
+struct ixgbe_bank_info {
+ u32 nvm_ptr; /* Pointer to 1st NVM bank */
+ u32 nvm_size; /* Size of NVM bank */
+ u32 orom_ptr; /* Pointer to 1st OROM bank */
+ u32 orom_size; /* Size of OROM bank */
+ u32 netlist_ptr; /* Pointer to 1st Netlist bank */
+ u32 netlist_size; /* Size of Netlist bank */
+ enum ixgbe_flash_bank nvm_bank; /* Active NVM bank */
+ enum ixgbe_flash_bank orom_bank; /* Active OROM bank */
+ enum ixgbe_flash_bank netlist_bank; /* Active Netlist bank */
+};
+
+/* Flash Chip Information */
+struct ixgbe_flash_info {
+ struct ixgbe_orom_info orom; /* Option ROM version info */
+ struct ixgbe_nvm_info nvm; /* NVM version information */
+ struct ixgbe_netlist_info netlist; /* Netlist version info */
+ struct ixgbe_bank_info banks; /* Flash Bank information */
+ u16 sr_words; /* Shadow RAM size in words */
+ u32 flash_size; /* Size of available flash in bytes */
+ u8 blank_nvm_mode; /* is NVM empty (no FW present) */
+};
+
+#define IXGBE_NVM_CMD_READ 0x0000000B
+#define IXGBE_NVM_CMD_WRITE 0x0000000C
+
+/* NVM Access command */
+struct ixgbe_nvm_access_cmd {
+ u32 command; /* NVM command: READ or WRITE */
+ u32 offset; /* Offset to read/write, in bytes */
+ u32 data_size; /* Size of data field, in bytes */
+};
+
+/* NVM Access data */
+struct ixgbe_nvm_access_data {
+ u32 regval; /* Storage for register value */
+};
+
+#endif /* _IXGBE_TYPE_E610_H_ */
diff --git a/sys/dev/ixgbe/ixgbe_vf.c b/sys/dev/ixgbe/ixgbe_vf.c
index cac3c6b5e5e7..4e48f7f33c9d 100644
--- a/sys/dev/ixgbe/ixgbe_vf.c
+++ b/sys/dev/ixgbe/ixgbe_vf.c
@@ -656,7 +656,8 @@ s32 ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
break;
case IXGBE_LINKS_SPEED_100_82599:
*speed = IXGBE_LINK_SPEED_100_FULL;
- if (hw->mac.type == ixgbe_mac_X550_vf) {
+ if (hw->mac.type == ixgbe_mac_X550_vf ||
+ hw->mac.type == ixgbe_mac_E610_vf) {
if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
*speed = IXGBE_LINK_SPEED_5GB_FULL;
}
diff --git a/sys/dev/ixl/if_ixl.c b/sys/dev/ixl/if_ixl.c
index 60e66aeaf579..43c3af056b67 100644
--- a/sys/dev/ixl/if_ixl.c
+++ b/sys/dev/ixl/if_ixl.c
@@ -1151,13 +1151,20 @@ ixl_if_enable_intr(if_ctx_t ctx)
struct ixl_pf *pf = iflib_get_softc(ctx);
struct ixl_vsi *vsi = &pf->vsi;
struct i40e_hw *hw = vsi->hw;
- struct ixl_rx_queue *que = vsi->rx_queues;
+ struct ixl_rx_queue *rx_que = vsi->rx_queues;
ixl_enable_intr0(hw);
/* Enable queue interrupts */
- for (int i = 0; i < vsi->num_rx_queues; i++, que++)
- /* TODO: Queue index parameter is probably wrong */
- ixl_enable_queue(hw, que->rxr.me);
+ if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
+ for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
+ ixl_enable_queue(hw, rx_que->rxr.me);
+ } else {
+ /*
+ * Set PFINT_LNKLST0 FIRSTQ_INDX to 0x0 to enable
+ * triggering interrupts by queues.
+ */
+ wr32(hw, I40E_PFINT_LNKLST0, 0x0);
+ }
}
/*
@@ -1175,11 +1182,13 @@ ixl_if_disable_intr(if_ctx_t ctx)
if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
- ixl_disable_queue(hw, rx_que->msix - 1);
+ ixl_disable_queue(hw, rx_que->rxr.me);
} else {
- // Set PFINT_LNKLST0 FIRSTQ_INDX to 0x7FF
- // stops queues from triggering interrupts
- wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
+ /*
+ * Set PFINT_LNKLST0 FIRSTQ_INDX to End of List (0x7FF)
+ * to stop queues from triggering interrupts.
+ */
+ wr32(hw, I40E_PFINT_LNKLST0, IXL_QUEUE_EOL);
}
}
diff --git a/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_offload.c b/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_offload.c
index 978e5f25ceaf..cc0bc1f3fcd2 100644
--- a/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_offload.c
+++ b/sys/dev/mlx5/mlx5_accel/mlx5_ipsec_offload.c
@@ -120,7 +120,7 @@ static void mlx5e_ipsec_packet_setup(void *obj, u32 pdn,
switch (attrs->dir) {
case IPSEC_DIR_OUTBOUND:
- if (attrs->replay_esn.replay_window != 0)
+ if (attrs->replay_esn.trigger)
MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_INC_SN);
else
MLX5_SET(ipsec_aso, aso_ctx, mode, MLX5_IPSEC_ASO_MODE);
diff --git a/sys/dev/netmap/if_ptnet.c b/sys/dev/netmap/if_ptnet.c
index bf14bfdb73ea..9c06f7fec530 100644
--- a/sys/dev/netmap/if_ptnet.c
+++ b/sys/dev/netmap/if_ptnet.c
@@ -27,8 +27,9 @@
/* Driver for ptnet paravirtualized network device. */
#include <sys/cdefs.h>
+#include "opt_inet.h"
+#include "opt_inet6.h"
-#include <sys/types.h>
#include <sys/param.h>
#include <sys/systm.h>
#include <sys/kernel.h>
@@ -75,9 +76,6 @@
#include <dev/pci/pcivar.h>
#include <dev/pci/pcireg.h>
-#include "opt_inet.h"
-#include "opt_inet6.h"
-
#include <sys/selinfo.h>
#include <net/netmap.h>
#include <dev/netmap/netmap_kern.h>
diff --git a/sys/dev/psci/smccc_trng.c b/sys/dev/psci/smccc_trng.c
index ab98837d3841..8a2e5508ef48 100644
--- a/sys/dev/psci/smccc_trng.c
+++ b/sys/dev/psci/smccc_trng.c
@@ -58,7 +58,7 @@ static device_attach_t trng_attach;
static unsigned trng_read(void *, unsigned);
-static struct random_source random_trng = {
+static const struct random_source random_trng = {
.rs_ident = "Arm SMCCC TRNG",
.rs_source = RANDOM_PURE_ARM_TRNG,
.rs_read = trng_read,
diff --git a/sys/dev/qcom_rnd/qcom_rnd.c b/sys/dev/qcom_rnd/qcom_rnd.c
index fdd0b553523e..a5ece7e00f28 100644
--- a/sys/dev/qcom_rnd/qcom_rnd.c
+++ b/sys/dev/qcom_rnd/qcom_rnd.c
@@ -63,7 +63,7 @@ static int qcom_rnd_detach(device_t);
static int qcom_rnd_harvest(struct qcom_rnd_softc *, void *, size_t *);
static unsigned qcom_rnd_read(void *, unsigned);
-static struct random_source random_qcom_rnd = {
+static const struct random_source random_qcom_rnd = {
.rs_ident = "Qualcomm Entropy Adapter",
.rs_source = RANDOM_PURE_QUALCOMM,
.rs_read = qcom_rnd_read,
diff --git a/sys/dev/random/armv8rng.c b/sys/dev/random/armv8rng.c
index 61698bfff820..524d80317681 100644
--- a/sys/dev/random/armv8rng.c
+++ b/sys/dev/random/armv8rng.c
@@ -44,7 +44,7 @@
static u_int random_rndr_read(void *, u_int);
static bool has_rndr;
-static struct random_source random_armv8_rndr = {
+static const struct random_source random_armv8_rndr = {
.rs_ident = "Armv8 rndr RNG",
.rs_source = RANDOM_PURE_ARMV8,
.rs_read = random_rndr_read,
diff --git a/sys/dev/random/darn.c b/sys/dev/random/darn.c
index a66754e095fb..9bb4991df82f 100644
--- a/sys/dev/random/darn.c
+++ b/sys/dev/random/darn.c
@@ -56,7 +56,7 @@
static u_int random_darn_read(void *, u_int);
-static struct random_source random_darn = {
+static const struct random_source random_darn = {
.rs_ident = "PowerISA DARN random number generator",
.rs_source = RANDOM_PURE_DARN,
.rs_read = random_darn_read
diff --git a/sys/dev/random/ivy.c b/sys/dev/random/ivy.c
index 05474d977276..fa1e4831f1b9 100644
--- a/sys/dev/random/ivy.c
+++ b/sys/dev/random/ivy.c
@@ -51,7 +51,7 @@
static bool has_rdrand, has_rdseed;
static u_int random_ivy_read(void *, u_int);
-static struct random_source random_ivy = {
+static const struct random_source random_ivy = {
.rs_ident = "Intel Secure Key RNG",
.rs_source = RANDOM_PURE_RDRAND,
.rs_read = random_ivy_read
diff --git a/sys/dev/random/nehemiah.c b/sys/dev/random/nehemiah.c
index f76071290b8f..56f144169dae 100644
--- a/sys/dev/random/nehemiah.c
+++ b/sys/dev/random/nehemiah.c
@@ -44,7 +44,7 @@
static u_int random_nehemiah_read(void *, u_int);
-static struct random_source random_nehemiah = {
+static const struct random_source random_nehemiah = {
.rs_ident = "VIA Nehemiah Padlock RNG",
.rs_source = RANDOM_PURE_NEHEMIAH,
.rs_read = random_nehemiah_read
diff --git a/sys/dev/random/random_harvestq.c b/sys/dev/random/random_harvestq.c
index c7762967c4fb..84ec174bd08e 100644
--- a/sys/dev/random/random_harvestq.c
+++ b/sys/dev/random/random_harvestq.c
@@ -110,7 +110,7 @@ __read_frequently u_int hc_source_mask;
struct random_sources {
CK_LIST_ENTRY(random_sources) rrs_entries;
- struct random_source *rrs_source;
+ const struct random_source *rrs_source;
};
static CK_LIST_HEAD(sources_head, random_sources) source_list =
@@ -493,9 +493,9 @@ random_healthtest_init(enum random_entropy_source source)
* The RCT limit comes from the formula in section 4.4.1.
*
* The APT cutoff is calculated using the formula in section 4.4.2
- * footnote 10 with the window size changed from 512 to 511, since the
- * test as written counts the number of samples equal to the first
- * sample in the window, and thus tests W-1 samples.
+ * footnote 10 with the number of Bernoulli trials changed from W to
+ * W-1, since the test as written counts the number of samples equal to
+ * the first sample in the window, and thus tests W-1 samples.
*/
ht->ht_rct_limit = 35;
ht->ht_apt_cutoff = 330;
@@ -849,7 +849,7 @@ random_harvest_deregister_source(enum random_entropy_source source)
}
void
-random_source_register(struct random_source *rsource)
+random_source_register(const struct random_source *rsource)
{
struct random_sources *rrs;
@@ -868,7 +868,7 @@ random_source_register(struct random_source *rsource)
}
void
-random_source_deregister(struct random_source *rsource)
+random_source_deregister(const struct random_source *rsource)
{
struct random_sources *rrs = NULL;
diff --git a/sys/dev/random/randomdev.h b/sys/dev/random/randomdev.h
index e1c9ac7b680d..6d742447ea8b 100644
--- a/sys/dev/random/randomdev.h
+++ b/sys/dev/random/randomdev.h
@@ -103,8 +103,8 @@ struct random_source {
random_source_read_t *rs_read;
};
-void random_source_register(struct random_source *);
-void random_source_deregister(struct random_source *);
+void random_source_register(const struct random_source *);
+void random_source_deregister(const struct random_source *);
#endif /* _KERNEL */
diff --git a/sys/dev/ufshci/ufshci.h b/sys/dev/ufshci/ufshci.h
index 9f0faaadeb57..b96d82ff836e 100644
--- a/sys/dev/ufshci/ufshci.h
+++ b/sys/dev/ufshci/ufshci.h
@@ -160,19 +160,19 @@ enum ufshci_data_direction {
UFSHCI_DATA_DIRECTION_RESERVED = 0b11,
};
-enum ufshci_overall_command_status {
- UFSHCI_OCS_SUCCESS = 0x0,
- UFSHCI_OCS_INVALID_COMMAND_TABLE_ATTRIBUTES = 0x01,
- UFSHCI_OCS_INVALID_PRDT_ATTRIBUTES = 0x02,
- UFSHCI_OCS_MISMATCH_DATA_BUFFER_SIZE = 0x03,
- UFSHCI_OCS_MISMATCH_RESPONSE_UPIU_SIZE = 0x04,
- UFSHCI_OCS_COMMUNICATION_FAILURE_WITHIN_UIC_LAYERS = 0x05,
- UFSHCI_OCS_ABORTED = 0x06,
- UFSHCI_OCS_HOST_CONTROLLER_FATAL_ERROR = 0x07,
- UFSHCI_OCS_DEVICE_FATAL_ERROR = 0x08,
- UFSHCI_OCS_INVALID_CRYPTO_CONFIGURATION = 0x09,
- UFSHCI_OCS_GENERAL_CRYPTO_ERROR = 0x0A,
- UFSHCI_OCS_INVALID = 0xF,
+enum ufshci_utr_overall_command_status {
+ UFSHCI_UTR_OCS_SUCCESS = 0x0,
+ UFSHCI_UTR_OCS_INVALID_COMMAND_TABLE_ATTRIBUTES = 0x01,
+ UFSHCI_UTR_OCS_INVALID_PRDT_ATTRIBUTES = 0x02,
+ UFSHCI_UTR_OCS_MISMATCH_DATA_BUFFER_SIZE = 0x03,
+ UFSHCI_UTR_OCS_MISMATCH_RESPONSE_UPIU_SIZE = 0x04,
+ UFSHCI_UTR_OCS_COMMUNICATION_FAILURE_WITHIN_UIC_LAYERS = 0x05,
+ UFSHCI_UTR_OCS_ABORTED = 0x06,
+ UFSHCI_UTR_OCS_HOST_CONTROLLER_FATAL_ERROR = 0x07,
+ UFSHCI_UTR_OCS_DEVICE_FATAL_ERROR = 0x08,
+ UFSHCI_UTR_OCS_INVALID_CRYPTO_CONFIGURATION = 0x09,
+ UFSHCI_UTR_OCS_GENERAL_CRYPTO_ERROR = 0x0A,
+ UFSHCI_UTR_OCS_INVALID = 0xF,
};
struct ufshci_utp_xfer_req_desc {
@@ -271,6 +271,18 @@ _Static_assert(sizeof(struct ufshci_utp_cmd_desc) ==
#define UFSHCI_UTP_TASK_MGMT_REQ_SIZE 32
#define UFSHCI_UTP_TASK_MGMT_RESP_SIZE 32
+enum ufshci_utmr_overall_command_status {
+ UFSHCI_UTMR_OCS_SUCCESS = 0x0,
+ UFSHCI_UTMR_OCS_INVALID_TASK_MANAGEMENT_FUNCTION_ATTRIBUTES = 0x01,
+ UFSHCI_UTMR_OCS_MISMATCH_TASK_MANAGEMENT_REQUEST_SIZE = 0x02,
+ UFSHCI_UTMR_OCS_MISMATCH_TASK_MANAGEMENT_RESPONSE_SIZE = 0x03,
+ UFSHCI_UTMR_OCS_PEER_COMMUNICATION_FAILURE = 0x04,
+ UFSHCI_UTMR_OCS_ABORTED = 0x05,
+ UFSHCI_UTMR_OCS_FATAL_ERROR = 0x06,
+ UFSHCI_UTMR_OCS_DEVICE_FATAL_ERROR = 0x07,
+ UFSHCI_UTMR_OCS_INVALID = 0xF,
+};
+
/* UFSHCI spec 4.1, section 6.3.1 "UTP Task Management Request Descriptor" */
struct ufshci_utp_task_mgmt_req_desc {
/* dword 0 */
@@ -356,6 +368,7 @@ struct ufshci_upiu {
_Static_assert(sizeof(struct ufshci_upiu) == 512,
"ufshci_upiu must be 512 bytes");
+/* UFS Spec 4.1, section 10.7.1 "COMMAND UPIU" */
struct ufshci_cmd_command_upiu {
/* dword 0-2 */
struct ufshci_upiu_header header;
@@ -376,6 +389,7 @@ _Static_assert(sizeof(struct ufshci_cmd_command_upiu) % UFSHCI_UPIU_ALIGNMENT ==
0,
"UPIU requires 64-bit alignment");
+/* UFS Spec 4.1, section 10.7.2 "RESPONSE UPIU" */
struct ufshci_cmd_response_upiu {
/* dword 0-2 */
struct ufshci_upiu_header header;
@@ -403,6 +417,69 @@ _Static_assert(sizeof(struct ufshci_cmd_response_upiu) %
0,
"UPIU requires 64-bit alignment");
+enum task_management_function {
+ UFSHCI_TASK_MGMT_FUNCTION_ABORT_TASK = 0x01,
+ UFSHCI_TASK_MGMT_FUNCTION_ABORT_TASK_SET = 0x02,
+ UFSHCI_TASK_MGMT_FUNCTION_CLEAR_TASK_SET = 0x04,
+ UFSHCI_TASK_MGMT_FUNCTION_LOGICAL_UNIT_RESET = 0x08,
+ UFSHCI_TASK_MGMT_FUNCTION_QUERY_TASK = 0x80,
+ UFSHCI_TASK_MGMT_FUNCTION_QUERY_TASKSET = 0x81,
+};
+
+/* UFS Spec 4.1, section 10.7.6 "TASK MANAGEMENT REQUEST UPIU" */
+struct ufshci_task_mgmt_request_upiu {
+ /* dword 0-2 */
+ struct ufshci_upiu_header header;
+ /* dword 3 */
+ uint32_t input_param1; /* (Big-endian) */
+ /* dword 4 */
+ uint32_t input_param2; /* (Big-endian) */
+ /* dword 5 */
+ uint32_t input_param3; /* (Big-endian) */
+ /* dword 6-7 */
+ uint8_t reserved[8];
+} __packed __aligned(4);
+
+_Static_assert(sizeof(struct ufshci_task_mgmt_request_upiu) == 32,
+ "bad size for ufshci_task_mgmt_request_upiu");
+_Static_assert(sizeof(struct ufshci_task_mgmt_request_upiu) <=
+ UFSHCI_UTP_XFER_RESP_SIZE,
+ "bad size for ufshci_task_mgmt_request_upiu");
+_Static_assert(sizeof(struct ufshci_task_mgmt_request_upiu) %
+ UFSHCI_UPIU_ALIGNMENT ==
+ 0,
+ "UPIU requires 64-bit alignment");
+
+enum task_management_service_response {
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_COMPLETE = 0x00,
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_NOT_SUPPORTED = 0x04,
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_FAILED = 0x05,
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_FUNCTION_SUCCEEDED = 0x08,
+ UFSHCI_TASK_MGMT_SERVICE_RESPONSE_INCORRECT_LUN = 0x09,
+};
+
+/* UFS Spec 4.1, section 10.7.7 "TASK MANAGEMENT RESPONSE UPIU" */
+struct ufshci_task_mgmt_response_upiu {
+ /* dword 0-2 */
+ struct ufshci_upiu_header header;
+ /* dword 3 */
+ uint32_t output_param1; /* (Big-endian) */
+ /* dword 4 */
+ uint32_t output_param2; /* (Big-endian) */
+ /* dword 5-7 */
+ uint8_t reserved[12];
+} __packed __aligned(4);
+
+_Static_assert(sizeof(struct ufshci_task_mgmt_response_upiu) == 32,
+ "bad size for ufshci_task_mgmt_response_upiu");
+_Static_assert(sizeof(struct ufshci_task_mgmt_response_upiu) <=
+ UFSHCI_UTP_XFER_RESP_SIZE,
+ "bad size for ufshci_task_mgmt_response_upiu");
+_Static_assert(sizeof(struct ufshci_task_mgmt_response_upiu) %
+ UFSHCI_UPIU_ALIGNMENT ==
+ 0,
+ "UPIU requires 64-bit alignment");
+
/* UFS Spec 4.1, section 10.7.8 "QUERY REQUEST UPIU" */
enum ufshci_query_function {
UFSHCI_QUERY_FUNC_STANDARD_READ_REQUEST = 0x01,
@@ -554,6 +631,7 @@ union ufshci_reponse_upiu {
struct ufshci_upiu_header header;
struct ufshci_cmd_response_upiu cmd_response_upiu;
struct ufshci_query_response_upiu query_response_upiu;
+ struct ufshci_task_mgmt_response_upiu task_mgmt_response_upiu;
struct ufshci_nop_in_upiu nop_in_upiu;
};
diff --git a/sys/dev/ufshci/ufshci_ctrlr.c b/sys/dev/ufshci/ufshci_ctrlr.c
index 55d8363d3287..37bd32665b2b 100644
--- a/sys/dev/ufshci/ufshci_ctrlr.c
+++ b/sys/dev/ufshci/ufshci_ctrlr.c
@@ -154,12 +154,12 @@ ufshci_ctrlr_construct(struct ufshci_controller *ctrlr, device_t dev)
/* TODO: Initialize interrupt Aggregation Control Register (UTRIACR) */
/* Allocate and initialize UTP Task Management Request List. */
- error = ufshci_utm_req_queue_construct(ctrlr);
+ error = ufshci_utmr_req_queue_construct(ctrlr);
if (error)
return (error);
/* Allocate and initialize UTP Transfer Request List or SQ/CQ. */
- error = ufshci_ut_req_queue_construct(ctrlr);
+ error = ufshci_utr_req_queue_construct(ctrlr);
if (error)
return (error);
@@ -179,8 +179,8 @@ ufshci_ctrlr_destruct(struct ufshci_controller *ctrlr, device_t dev)
/* TODO: Flush In-flight IOs */
/* Release resources */
- ufshci_utm_req_queue_destroy(ctrlr);
- ufshci_ut_req_queue_destroy(ctrlr);
+ ufshci_utmr_req_queue_destroy(ctrlr);
+ ufshci_utr_req_queue_destroy(ctrlr);
if (ctrlr->tag)
bus_teardown_intr(ctrlr->dev, ctrlr->res, ctrlr->tag);
@@ -215,8 +215,8 @@ ufshci_ctrlr_reset(struct ufshci_controller *ctrlr)
ufshci_mmio_write_4(ctrlr, ie, 0);
/* Release resources */
- ufshci_utm_req_queue_destroy(ctrlr);
- ufshci_ut_req_queue_destroy(ctrlr);
+ ufshci_utmr_req_queue_destroy(ctrlr);
+ ufshci_utr_req_queue_destroy(ctrlr);
/* Reset Host Controller */
error = ufshci_ctrlr_enable_host_ctrlr(ctrlr);
@@ -232,12 +232,12 @@ ufshci_ctrlr_reset(struct ufshci_controller *ctrlr)
ufshci_mmio_write_4(ctrlr, ie, ie);
/* Allocate and initialize UTP Task Management Request List. */
- error = ufshci_utm_req_queue_construct(ctrlr);
+ error = ufshci_utmr_req_queue_construct(ctrlr);
if (error)
return (error);
/* Allocate and initialize UTP Transfer Request List or SQ/CQ. */
- error = ufshci_ut_req_queue_construct(ctrlr);
+ error = ufshci_utr_req_queue_construct(ctrlr);
if (error)
return (error);
@@ -245,6 +245,15 @@ ufshci_ctrlr_reset(struct ufshci_controller *ctrlr)
}
int
+ufshci_ctrlr_submit_task_mgmt_request(struct ufshci_controller *ctrlr,
+ struct ufshci_request *req)
+{
+ return (
+ ufshci_req_queue_submit_request(&ctrlr->task_mgmt_req_queue, req,
+ /*is_admin*/ false));
+}
+
+int
ufshci_ctrlr_submit_admin_request(struct ufshci_controller *ctrlr,
struct ufshci_request *req)
{
@@ -360,8 +369,8 @@ ufshci_ctrlr_start_config_hook(void *arg)
TSENTER();
- if (ufshci_utm_req_queue_enable(ctrlr) == 0 &&
- ufshci_ut_req_queue_enable(ctrlr) == 0)
+ if (ufshci_utmr_req_queue_enable(ctrlr) == 0 &&
+ ufshci_utr_req_queue_enable(ctrlr) == 0)
ufshci_ctrlr_start(ctrlr);
else
ufshci_ctrlr_fail(ctrlr, false);
@@ -445,9 +454,9 @@ ufshci_ctrlr_poll(struct ufshci_controller *ctrlr)
}
/* UTP Task Management Request Completion Status */
if (is & UFSHCIM(UFSHCI_IS_REG_UTMRCS)) {
- ufshci_printf(ctrlr, "TODO: Implement UTMR completion\n");
ufshci_mmio_write_4(ctrlr, is, UFSHCIM(UFSHCI_IS_REG_UTMRCS));
- /* TODO: Implement UTMR completion */
+ ufshci_req_queue_process_completions(
+ &ctrlr->task_mgmt_req_queue);
}
/* UTP Transfer Request Completion Status */
if (is & UFSHCIM(UFSHCI_IS_REG_UTRCS)) {
diff --git a/sys/dev/ufshci/ufshci_ctrlr_cmd.c b/sys/dev/ufshci/ufshci_ctrlr_cmd.c
index ddf28c58fa88..71d163d998af 100644
--- a/sys/dev/ufshci/ufshci_ctrlr_cmd.c
+++ b/sys/dev/ufshci/ufshci_ctrlr_cmd.c
@@ -8,6 +8,32 @@
#include "ufshci_private.h"
void
+ufshci_ctrlr_cmd_send_task_mgmt_request(struct ufshci_controller *ctrlr,
+ ufshci_cb_fn_t cb_fn, void *cb_arg, uint8_t function, uint8_t lun,
+ uint8_t task_tag, uint8_t iid)
+{
+ struct ufshci_request *req;
+ struct ufshci_task_mgmt_request_upiu *upiu;
+
+ req = ufshci_allocate_request_vaddr(NULL, 0, M_WAITOK, cb_fn, cb_arg);
+
+ req->request_size = sizeof(struct ufshci_task_mgmt_request_upiu);
+ req->response_size = sizeof(struct ufshci_task_mgmt_response_upiu);
+
+ upiu = (struct ufshci_task_mgmt_request_upiu *)&req->request_upiu;
+ memset(upiu, 0, req->request_size);
+ upiu->header.trans_type =
+ UFSHCI_UPIU_TRANSACTION_CODE_TASK_MANAGEMENT_REQUEST;
+ upiu->header.lun = lun;
+ upiu->header.ext_iid_or_function = function;
+ upiu->input_param1 = lun;
+ upiu->input_param2 = task_tag;
+ upiu->input_param3 = iid;
+
+ ufshci_ctrlr_submit_task_mgmt_request(ctrlr, req);
+}
+
+void
ufshci_ctrlr_cmd_send_nop(struct ufshci_controller *ctrlr, ufshci_cb_fn_t cb_fn,
void *cb_arg)
{
diff --git a/sys/dev/ufshci/ufshci_private.h b/sys/dev/ufshci/ufshci_private.h
index ac58d44102a0..1a2742ae2e80 100644
--- a/sys/dev/ufshci/ufshci_private.h
+++ b/sys/dev/ufshci/ufshci_private.h
@@ -125,6 +125,8 @@ struct ufshci_qops {
struct ufshci_tracker **tr);
void (*ring_doorbell)(struct ufshci_controller *ctrlr,
struct ufshci_tracker *tr);
+ bool (*is_doorbell_cleared)(struct ufshci_controller *ctrlr,
+ uint8_t slot);
void (*clear_cpl_ntf)(struct ufshci_controller *ctrlr,
struct ufshci_tracker *tr);
bool (*process_cpl)(struct ufshci_req_queue *req_queue);
@@ -143,7 +145,10 @@ struct ufshci_hw_queue {
int domain;
int cpu;
- struct ufshci_utp_xfer_req_desc *utrd;
+ union {
+ struct ufshci_utp_xfer_req_desc *utrd;
+ struct ufshci_utp_task_mgmt_req_desc *utmrd;
+ };
bus_dma_tag_t dma_tag_queue;
bus_dmamap_t queuemem_map;
@@ -333,6 +338,8 @@ int ufshci_ctrlr_reset(struct ufshci_controller *ctrlr);
void ufshci_ctrlr_start_config_hook(void *arg);
void ufshci_ctrlr_poll(struct ufshci_controller *ctrlr);
+int ufshci_ctrlr_submit_task_mgmt_request(struct ufshci_controller *ctrlr,
+ struct ufshci_request *req);
int ufshci_ctrlr_submit_admin_request(struct ufshci_controller *ctrlr,
struct ufshci_request *req);
int ufshci_ctrlr_submit_io_request(struct ufshci_controller *ctrlr,
@@ -351,6 +358,9 @@ int ufshci_dev_init_ufs_power_mode(struct ufshci_controller *ctrlr);
int ufshci_dev_get_descriptor(struct ufshci_controller *ctrlr);
/* Controller Command */
+void ufshci_ctrlr_cmd_send_task_mgmt_request(struct ufshci_controller *ctrlr,
+ ufshci_cb_fn_t cb_fn, void *cb_arg, uint8_t function, uint8_t lun,
+ uint8_t task_tag, uint8_t iid);
void ufshci_ctrlr_cmd_send_nop(struct ufshci_controller *ctrlr,
ufshci_cb_fn_t cb_fn, void *cb_arg);
void ufshci_ctrlr_cmd_send_query_request(struct ufshci_controller *ctrlr,
@@ -361,12 +371,12 @@ void ufshci_ctrlr_cmd_send_scsi_command(struct ufshci_controller *ctrlr,
/* Request Queue */
bool ufshci_req_queue_process_completions(struct ufshci_req_queue *req_queue);
-int ufshci_utm_req_queue_construct(struct ufshci_controller *ctrlr);
-int ufshci_ut_req_queue_construct(struct ufshci_controller *ctrlr);
-void ufshci_utm_req_queue_destroy(struct ufshci_controller *ctrlr);
-void ufshci_ut_req_queue_destroy(struct ufshci_controller *ctrlr);
-int ufshci_utm_req_queue_enable(struct ufshci_controller *ctrlr);
-int ufshci_ut_req_queue_enable(struct ufshci_controller *ctrlr);
+int ufshci_utmr_req_queue_construct(struct ufshci_controller *ctrlr);
+int ufshci_utr_req_queue_construct(struct ufshci_controller *ctrlr);
+void ufshci_utmr_req_queue_destroy(struct ufshci_controller *ctrlr);
+void ufshci_utr_req_queue_destroy(struct ufshci_controller *ctrlr);
+int ufshci_utmr_req_queue_enable(struct ufshci_controller *ctrlr);
+int ufshci_utr_req_queue_enable(struct ufshci_controller *ctrlr);
void ufshci_req_queue_fail(struct ufshci_controller *ctrlr,
struct ufshci_hw_queue *hwq);
int ufshci_req_queue_submit_request(struct ufshci_req_queue *req_queue,
@@ -385,9 +395,17 @@ int ufshci_req_sdb_enable(struct ufshci_controller *ctrlr,
struct ufshci_req_queue *req_queue);
int ufshci_req_sdb_reserve_slot(struct ufshci_req_queue *req_queue,
struct ufshci_tracker **tr);
-void ufshci_req_sdb_ring_doorbell(struct ufshci_controller *ctrlr,
+void ufshci_req_sdb_utmr_ring_doorbell(struct ufshci_controller *ctrlr,
+ struct ufshci_tracker *tr);
+void ufshci_req_sdb_utr_ring_doorbell(struct ufshci_controller *ctrlr,
+ struct ufshci_tracker *tr);
+bool ufshci_req_sdb_utmr_is_doorbell_cleared(struct ufshci_controller *ctrlr,
+ uint8_t slot);
+bool ufshci_req_sdb_utr_is_doorbell_cleared(struct ufshci_controller *ctrlr,
+ uint8_t slot);
+void ufshci_req_sdb_utmr_clear_cpl_ntf(struct ufshci_controller *ctrlr,
struct ufshci_tracker *tr);
-void ufshci_req_sdb_clear_cpl_ntf(struct ufshci_controller *ctrlr,
+void ufshci_req_sdb_utr_clear_cpl_ntf(struct ufshci_controller *ctrlr,
struct ufshci_tracker *tr);
bool ufshci_req_sdb_process_cpl(struct ufshci_req_queue *req_queue);
int ufshci_req_sdb_get_inflight_io(struct ufshci_controller *ctrlr);
diff --git a/sys/dev/ufshci/ufshci_req_queue.c b/sys/dev/ufshci/ufshci_req_queue.c
index cc9a2ddae768..bb6efa6d2ccc 100644
--- a/sys/dev/ufshci/ufshci_req_queue.c
+++ b/sys/dev/ufshci/ufshci_req_queue.c
@@ -19,21 +19,36 @@
static void ufshci_req_queue_submit_tracker(struct ufshci_req_queue *req_queue,
struct ufshci_tracker *tr, enum ufshci_data_direction data_direction);
-static const struct ufshci_qops sdb_qops = {
+static const struct ufshci_qops sdb_utmr_qops = {
.construct = ufshci_req_sdb_construct,
.destroy = ufshci_req_sdb_destroy,
.get_hw_queue = ufshci_req_sdb_get_hw_queue,
.enable = ufshci_req_sdb_enable,
.reserve_slot = ufshci_req_sdb_reserve_slot,
.reserve_admin_slot = ufshci_req_sdb_reserve_slot,
- .ring_doorbell = ufshci_req_sdb_ring_doorbell,
- .clear_cpl_ntf = ufshci_req_sdb_clear_cpl_ntf,
+ .ring_doorbell = ufshci_req_sdb_utmr_ring_doorbell,
+ .is_doorbell_cleared = ufshci_req_sdb_utmr_is_doorbell_cleared,
+ .clear_cpl_ntf = ufshci_req_sdb_utmr_clear_cpl_ntf,
+ .process_cpl = ufshci_req_sdb_process_cpl,
+ .get_inflight_io = ufshci_req_sdb_get_inflight_io,
+};
+
+static const struct ufshci_qops sdb_utr_qops = {
+ .construct = ufshci_req_sdb_construct,
+ .destroy = ufshci_req_sdb_destroy,
+ .get_hw_queue = ufshci_req_sdb_get_hw_queue,
+ .enable = ufshci_req_sdb_enable,
+ .reserve_slot = ufshci_req_sdb_reserve_slot,
+ .reserve_admin_slot = ufshci_req_sdb_reserve_slot,
+ .ring_doorbell = ufshci_req_sdb_utr_ring_doorbell,
+ .is_doorbell_cleared = ufshci_req_sdb_utr_is_doorbell_cleared,
+ .clear_cpl_ntf = ufshci_req_sdb_utr_clear_cpl_ntf,
.process_cpl = ufshci_req_sdb_process_cpl,
.get_inflight_io = ufshci_req_sdb_get_inflight_io,
};
int
-ufshci_utm_req_queue_construct(struct ufshci_controller *ctrlr)
+ufshci_utmr_req_queue_construct(struct ufshci_controller *ctrlr)
{
struct ufshci_req_queue *req_queue;
int error;
@@ -44,7 +59,7 @@ ufshci_utm_req_queue_construct(struct ufshci_controller *ctrlr)
*/
req_queue = &ctrlr->task_mgmt_req_queue;
req_queue->queue_mode = UFSHCI_Q_MODE_SDB;
- req_queue->qops = sdb_qops;
+ req_queue->qops = sdb_utmr_qops;
error = req_queue->qops.construct(ctrlr, req_queue, UFSHCI_UTRM_ENTRIES,
/*is_task_mgmt*/ true);
@@ -53,21 +68,21 @@ ufshci_utm_req_queue_construct(struct ufshci_controller *ctrlr)
}
void
-ufshci_utm_req_queue_destroy(struct ufshci_controller *ctrlr)
+ufshci_utmr_req_queue_destroy(struct ufshci_controller *ctrlr)
{
ctrlr->task_mgmt_req_queue.qops.destroy(ctrlr,
&ctrlr->task_mgmt_req_queue);
}
int
-ufshci_utm_req_queue_enable(struct ufshci_controller *ctrlr)
+ufshci_utmr_req_queue_enable(struct ufshci_controller *ctrlr)
{
return (ctrlr->task_mgmt_req_queue.qops.enable(ctrlr,
&ctrlr->task_mgmt_req_queue));
}
int
-ufshci_ut_req_queue_construct(struct ufshci_controller *ctrlr)
+ufshci_utr_req_queue_construct(struct ufshci_controller *ctrlr)
{
struct ufshci_req_queue *req_queue;
int error;
@@ -79,7 +94,7 @@ ufshci_ut_req_queue_construct(struct ufshci_controller *ctrlr)
*/
req_queue = &ctrlr->transfer_req_queue;
req_queue->queue_mode = UFSHCI_Q_MODE_SDB;
- req_queue->qops = sdb_qops;
+ req_queue->qops = sdb_utr_qops;
error = req_queue->qops.construct(ctrlr, req_queue, UFSHCI_UTR_ENTRIES,
/*is_task_mgmt*/ false);
@@ -88,14 +103,14 @@ ufshci_ut_req_queue_construct(struct ufshci_controller *ctrlr)
}
void
-ufshci_ut_req_queue_destroy(struct ufshci_controller *ctrlr)
+ufshci_utr_req_queue_destroy(struct ufshci_controller *ctrlr)
{
ctrlr->transfer_req_queue.qops.destroy(ctrlr,
&ctrlr->transfer_req_queue);
}
int
-ufshci_ut_req_queue_enable(struct ufshci_controller *ctrlr)
+ufshci_utr_req_queue_enable(struct ufshci_controller *ctrlr)
{
return (ctrlr->transfer_req_queue.qops.enable(ctrlr,
&ctrlr->transfer_req_queue));
@@ -213,20 +228,30 @@ ufshci_req_queue_complete_tracker(struct ufshci_tracker *tr)
struct ufshci_req_queue *req_queue = tr->req_queue;
struct ufshci_request *req = tr->req;
struct ufshci_completion cpl;
- struct ufshci_utp_xfer_req_desc *desc;
uint8_t ocs;
bool retry, error, retriable;
mtx_assert(&tr->hwq->qlock, MA_NOTOWNED);
- bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
- BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
+ /* Copy the response from the Request Descriptor or UTP Command
+ * Descriptor. */
+ if (req_queue->is_task_mgmt) {
+ cpl.size = tr->response_size;
+ memcpy(&cpl.response_upiu,
+ (void *)tr->hwq->utmrd[tr->slot_num].response_upiu,
+ cpl.size);
- cpl.size = tr->response_size;
- memcpy(&cpl.response_upiu, (void *)tr->ucd->response_upiu, cpl.size);
+ ocs = tr->hwq->utmrd[tr->slot_num].overall_command_status;
+ } else {
+ bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
+ BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
- desc = &tr->hwq->utrd[tr->slot_num];
- ocs = desc->overall_command_status;
+ cpl.size = tr->response_size;
+ memcpy(&cpl.response_upiu, (void *)tr->ucd->response_upiu,
+ cpl.size);
+
+ ocs = tr->hwq->utrd[tr->slot_num].overall_command_status;
+ }
error = ufshci_req_queue_response_is_error(req_queue, ocs,
&cpl.response_upiu);
@@ -358,7 +383,19 @@ ufshci_req_queue_prepare_prdt(struct ufshci_tracker *tr)
}
static void
-ufshci_req_queue_fill_descriptor(struct ufshci_utp_xfer_req_desc *desc,
+ufshci_req_queue_fill_utmr_descriptor(
+ struct ufshci_utp_task_mgmt_req_desc *desc, struct ufshci_request *req)
+{
+ memset(desc, 0, sizeof(struct ufshci_utp_task_mgmt_req_desc));
+ desc->interrupt = true;
+ /* Set the initial value to Invalid. */
+ desc->overall_command_status = UFSHCI_UTMR_OCS_INVALID;
+
+ memcpy(desc->request_upiu, &req->request_upiu, req->request_size);
+}
+
+static void
+ufshci_req_queue_fill_utr_descriptor(struct ufshci_utp_xfer_req_desc *desc,
uint8_t data_direction, const uint64_t paddr, const uint16_t response_off,
const uint16_t response_len, const uint16_t prdt_off,
const uint16_t prdt_entry_cnt)
@@ -378,7 +415,7 @@ ufshci_req_queue_fill_descriptor(struct ufshci_utp_xfer_req_desc *desc,
desc->data_direction = data_direction;
desc->interrupt = true;
/* Set the initial value to Invalid. */
- desc->overall_command_status = UFSHCI_OCS_INVALID;
+ desc->overall_command_status = UFSHCI_UTR_OCS_INVALID;
desc->utp_command_descriptor_base_address = (uint32_t)(paddr &
0xffffffff);
desc->utp_command_descriptor_base_address_upper = (uint32_t)(paddr >>
@@ -407,26 +444,32 @@ ufshci_req_queue_submit_tracker(struct ufshci_req_queue *req_queue,
/* TODO: Check timeout */
- request_len = req->request_size;
- response_off = UFSHCI_UTP_XFER_REQ_SIZE;
- response_len = req->response_size;
-
- /* Prepare UTP Command Descriptor */
- memcpy(tr->ucd, &req->request_upiu, request_len);
- memset((uint8_t *)tr->ucd + response_off, 0, response_len);
-
- /* Prepare PRDT */
- if (req->payload_valid)
- ufshci_req_queue_prepare_prdt(tr);
-
- bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
- BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
-
- /* Prepare UTP Transfer Request Descriptor. */
- ucd_paddr = tr->ucd_bus_addr;
- ufshci_req_queue_fill_descriptor(&tr->hwq->utrd[slot_num],
- data_direction, ucd_paddr, response_off, response_len, tr->prdt_off,
- tr->prdt_entry_cnt);
+ if (req_queue->is_task_mgmt) {
+ /* Prepare UTP Task Management Request Descriptor. */
+ ufshci_req_queue_fill_utmr_descriptor(&tr->hwq->utmrd[slot_num],
+ req);
+ } else {
+ request_len = req->request_size;
+ response_off = UFSHCI_UTP_XFER_REQ_SIZE;
+ response_len = req->response_size;
+
+ /* Prepare UTP Command Descriptor */
+ memcpy(tr->ucd, &req->request_upiu, request_len);
+ memset((uint8_t *)tr->ucd + response_off, 0, response_len);
+
+ /* Prepare PRDT */
+ if (req->payload_valid)
+ ufshci_req_queue_prepare_prdt(tr);
+
+ /* Prepare UTP Transfer Request Descriptor. */
+ ucd_paddr = tr->ucd_bus_addr;
+ ufshci_req_queue_fill_utr_descriptor(&tr->hwq->utrd[slot_num],
+ data_direction, ucd_paddr, response_off, response_len,
+ tr->prdt_off, tr->prdt_entry_cnt);
+
+ bus_dmamap_sync(req_queue->dma_tag_ucd, req_queue->ucdmem_map,
+ BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
+ }
bus_dmamap_sync(tr->hwq->dma_tag_queue, tr->hwq->queuemem_map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
diff --git a/sys/dev/ufshci/ufshci_req_sdb.c b/sys/dev/ufshci/ufshci_req_sdb.c
index b1f303afaef5..834a459d48e3 100644
--- a/sys/dev/ufshci/ufshci_req_sdb.c
+++ b/sys/dev/ufshci/ufshci_req_sdb.c
@@ -26,12 +26,6 @@ ufshci_req_sdb_cmd_desc_destroy(struct ufshci_req_queue *req_queue)
tr = hwq->act_tr[i];
bus_dmamap_destroy(req_queue->dma_tag_payload,
tr->payload_dma_map);
- free(tr, M_UFSHCI);
- }
-
- if (hwq->act_tr) {
- free(hwq->act_tr, M_UFSHCI);
- hwq->act_tr = NULL;
}
if (req_queue->ucd) {
@@ -76,7 +70,6 @@ ufshci_req_sdb_cmd_desc_construct(struct ufshci_req_queue *req_queue,
uint32_t num_entries, struct ufshci_controller *ctrlr)
{
struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
- struct ufshci_tracker *tr;
size_t ucd_allocsz, payload_allocsz;
uint8_t *ucdmem;
int i, error;
@@ -134,27 +127,14 @@ ufshci_req_sdb_cmd_desc_construct(struct ufshci_req_queue *req_queue,
goto out;
}
- hwq->act_tr = malloc_domainset(sizeof(struct ufshci_tracker *) *
- req_queue->num_entries,
- M_UFSHCI, DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
-
for (i = 0; i < req_queue->num_trackers; i++) {
- tr = malloc_domainset(sizeof(struct ufshci_tracker), M_UFSHCI,
- DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
-
bus_dmamap_create(req_queue->dma_tag_payload, 0,
- &tr->payload_dma_map);
+ &hwq->act_tr[i]->payload_dma_map);
- tr->req_queue = req_queue;
- tr->slot_num = i;
- tr->slot_state = UFSHCI_SLOT_STATE_FREE;
-
- tr->ucd = (struct ufshci_utp_cmd_desc *)ucdmem;
- tr->ucd_bus_addr = hwq->ucd_bus_addr[i];
+ hwq->act_tr[i]->ucd = (struct ufshci_utp_cmd_desc *)ucdmem;
+ hwq->act_tr[i]->ucd_bus_addr = hwq->ucd_bus_addr[i];
ucdmem += sizeof(struct ufshci_utp_cmd_desc);
-
- hwq->act_tr[i] = tr;
}
return (0);
@@ -163,25 +143,16 @@ out:
return (ENOMEM);
}
-static bool
-ufshci_req_sdb_is_doorbell_cleared(struct ufshci_controller *ctrlr,
- uint8_t slot)
-{
- uint32_t utrldbr;
-
- utrldbr = ufshci_mmio_read_4(ctrlr, utrldbr);
- return (!(utrldbr & (1 << slot)));
-}
-
int
ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
struct ufshci_req_queue *req_queue, uint32_t num_entries, bool is_task_mgmt)
{
struct ufshci_hw_queue *hwq;
- size_t allocsz;
+ size_t desc_size, alloc_size;
uint64_t queuemem_phys;
uint8_t *queuemem;
- int error;
+ struct ufshci_tracker *tr;
+ int i, error;
req_queue->ctrlr = ctrlr;
req_queue->is_task_mgmt = is_task_mgmt;
@@ -209,10 +180,13 @@ ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
* Descriptor (UTRD) or UTP Task Management Request Descriptor (UTMRD))
* Note: UTRD/UTMRD format is restricted to 1024-byte alignment.
*/
- allocsz = num_entries * sizeof(struct ufshci_utp_xfer_req_desc);
+ desc_size = is_task_mgmt ?
+ sizeof(struct ufshci_utp_task_mgmt_req_desc) :
+ sizeof(struct ufshci_utp_xfer_req_desc);
+ alloc_size = num_entries * desc_size;
error = bus_dma_tag_create(bus_get_dma_tag(ctrlr->dev), 1024,
ctrlr->page_size, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
- allocsz, 1, allocsz, 0, NULL, NULL, &hwq->dma_tag_queue);
+ alloc_size, 1, alloc_size, 0, NULL, NULL, &hwq->dma_tag_queue);
if (error != 0) {
ufshci_printf(ctrlr, "request queue tag create failed %d\n",
error);
@@ -227,7 +201,7 @@ ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
}
if (bus_dmamap_load(hwq->dma_tag_queue, hwq->queuemem_map, queuemem,
- allocsz, ufshci_single_map, &queuemem_phys, 0) != 0) {
+ alloc_size, ufshci_single_map, &queuemem_phys, 0) != 0) {
ufshci_printf(ctrlr, "failed to load request queue memory\n");
bus_dmamem_free(hwq->dma_tag_queue, hwq->utrd,
hwq->queuemem_map);
@@ -238,13 +212,30 @@ ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
hwq->num_intr_handler_calls = 0;
hwq->num_retries = 0;
hwq->num_failures = 0;
- hwq->utrd = (struct ufshci_utp_xfer_req_desc *)queuemem;
hwq->req_queue_addr = queuemem_phys;
+ /* Allocate trackers */
+ hwq->act_tr = malloc_domainset(sizeof(struct ufshci_tracker *) *
+ req_queue->num_entries,
+ M_UFSHCI, DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
+
+ for (i = 0; i < req_queue->num_trackers; i++) {
+ tr = malloc_domainset(sizeof(struct ufshci_tracker), M_UFSHCI,
+ DOMAINSET_PREF(req_queue->domain), M_ZERO | M_WAITOK);
+
+ tr->req_queue = req_queue;
+ tr->slot_num = i;
+ tr->slot_state = UFSHCI_SLOT_STATE_FREE;
+
+ hwq->act_tr[i] = tr;
+ }
+
if (is_task_mgmt) {
/* UTP Task Management Request (UTMR) */
uint32_t utmrlba, utmrlbau;
+ hwq->utmrd = (struct ufshci_utp_task_mgmt_req_desc *)queuemem;
+
utmrlba = hwq->req_queue_addr & 0xffffffff;
utmrlbau = hwq->req_queue_addr >> 32;
ufshci_mmio_write_4(ctrlr, utmrlba, utmrlba);
@@ -253,6 +244,8 @@ ufshci_req_sdb_construct(struct ufshci_controller *ctrlr,
/* UTP Transfer Request (UTR) */
uint32_t utrlba, utrlbau;
+ hwq->utrd = (struct ufshci_utp_xfer_req_desc *)queuemem;
+
/*
* Allocate physical memory for the command descriptor.
* UTP Transfer Request (UTR) requires memory for a separate
@@ -284,10 +277,22 @@ ufshci_req_sdb_destroy(struct ufshci_controller *ctrlr,
struct ufshci_req_queue *req_queue)
{
struct ufshci_hw_queue *hwq = &req_queue->hwq[UFSHCI_SDB_Q];
+ struct ufshci_tracker *tr;
+ int i;
if (!req_queue->is_task_mgmt)
ufshci_req_sdb_cmd_desc_destroy(&ctrlr->transfer_req_queue);
+ for (i = 0; i < req_queue->num_trackers; i++) {
+ tr = hwq->act_tr[i];
+ free(tr, M_UFSHCI);
+ }
+
+ if (hwq->act_tr) {
+ free(hwq->act_tr, M_UFSHCI);
+ hwq->act_tr = NULL;
+ }
+
if (hwq->utrd != NULL) {
bus_dmamap_unload(hwq->dma_tag_queue, hwq->queuemem_map);
bus_dmamem_free(hwq->dma_tag_queue, hwq->utrd,
@@ -389,7 +394,18 @@ ufshci_req_sdb_reserve_slot(struct ufshci_req_queue *req_queue,
}
void
-ufshci_req_sdb_clear_cpl_ntf(struct ufshci_controller *ctrlr,
+ufshci_req_sdb_utmr_clear_cpl_ntf(struct ufshci_controller *ctrlr,
+ struct ufshci_tracker *tr)
+{
+ /*
+ * NOP
+ * UTP Task Management does not have a Completion Notification
+ * Register.
+ */
+}
+
+void
+ufshci_req_sdb_utr_clear_cpl_ntf(struct ufshci_controller *ctrlr,
struct ufshci_tracker *tr)
{
uint32_t utrlcnr;
@@ -399,7 +415,19 @@ ufshci_req_sdb_clear_cpl_ntf(struct ufshci_controller *ctrlr,
}
void
-ufshci_req_sdb_ring_doorbell(struct ufshci_controller *ctrlr,
+ufshci_req_sdb_utmr_ring_doorbell(struct ufshci_controller *ctrlr,
+ struct ufshci_tracker *tr)
+{
+ uint32_t utmrldbr = 0;
+
+ utmrldbr |= 1 << tr->slot_num;
+ ufshci_mmio_write_4(ctrlr, utmrldbr, utmrldbr);
+
+ tr->req_queue->hwq[UFSHCI_SDB_Q].num_cmds++;
+}
+
+void
+ufshci_req_sdb_utr_ring_doorbell(struct ufshci_controller *ctrlr,
struct ufshci_tracker *tr)
{
uint32_t utrldbr = 0;
@@ -408,9 +436,26 @@ ufshci_req_sdb_ring_doorbell(struct ufshci_controller *ctrlr,
ufshci_mmio_write_4(ctrlr, utrldbr, utrldbr);
tr->req_queue->hwq[UFSHCI_SDB_Q].num_cmds++;
+}
+
+bool
+ufshci_req_sdb_utmr_is_doorbell_cleared(struct ufshci_controller *ctrlr,
+ uint8_t slot)
+{
+ uint32_t utmrldbr;
+
+ utmrldbr = ufshci_mmio_read_4(ctrlr, utmrldbr);
+ return (!(utmrldbr & (1 << slot)));
+}
- // utrldbr = ufshci_mmio_read_4(ctrlr, utrldbr);
- // printf("DB=0x%08x\n", utrldbr);
+bool
+ufshci_req_sdb_utr_is_doorbell_cleared(struct ufshci_controller *ctrlr,
+ uint8_t slot)
+{
+ uint32_t utrldbr;
+
+ utrldbr = ufshci_mmio_read_4(ctrlr, utrldbr);
+ return (!(utrldbr & (1 << slot)));
}
bool
@@ -435,7 +480,7 @@ ufshci_req_sdb_process_cpl(struct ufshci_req_queue *req_queue)
* is cleared.
*/
if (tr->slot_state == UFSHCI_SLOT_STATE_SCHEDULED &&
- ufshci_req_sdb_is_doorbell_cleared(req_queue->ctrlr,
+ req_queue->qops.is_doorbell_cleared(req_queue->ctrlr,
slot)) {
ufshci_req_queue_complete_tracker(tr);
done = true;
diff --git a/sys/dev/usb/input/uhid.c b/sys/dev/usb/input/uhid.c
index a31081663f0c..e2b97f5accac 100644
--- a/sys/dev/usb/input/uhid.c
+++ b/sys/dev/usb/input/uhid.c
@@ -40,8 +40,6 @@
* HID spec: http://www.usb.org/developers/devclass_docs/HID1_11.pdf
*/
-#include "opt_hid.h"
-
#include <sys/stdint.h>
#include <sys/stddef.h>
#include <sys/param.h>
@@ -928,11 +926,7 @@ static device_method_t uhid_methods[] = {
};
static driver_t uhid_driver = {
-#ifdef HIDRAW_MAKE_UHID_ALIAS
- .name = "hidraw",
-#else
.name = "uhid",
-#endif
.methods = uhid_methods,
.size = sizeof(struct uhid_softc),
};
diff --git a/sys/dev/usb/input/usbhid.c b/sys/dev/usb/input/usbhid.c
index df810012b3f8..cba3f34053e5 100644
--- a/sys/dev/usb/input/usbhid.c
+++ b/sys/dev/usb/input/usbhid.c
@@ -114,6 +114,7 @@ struct usbhid_xfer_ctx {
void *cb_ctx;
int waiters;
bool influx;
+ bool no_readahead;
};
struct usbhid_softc {
@@ -272,7 +273,7 @@ usbhid_intr_handler_cb(struct usbhid_xfer_ctx *xfer_ctx)
sc->sc_intr_handler(sc->sc_intr_ctx, xfer_ctx->buf,
xfer_ctx->req.intr.actlen);
- return (0);
+ return (xfer_ctx->no_readahead ? ECANCELED : 0);
}
static int
@@ -430,6 +431,7 @@ usbhid_intr_start(device_t dev, device_t child __unused)
.cb = usbhid_intr_handler_cb,
.cb_ctx = sc,
.buf = sc->sc_intr_buf,
+ .no_readahead = hid_test_quirk(&sc->sc_hw, HQ_NO_READAHEAD),
};
sc->sc_xfer_ctx[POLL_XFER(USBHID_INTR_IN_DT)] = (struct usbhid_xfer_ctx) {
.req.intr.maxlen =
@@ -705,6 +707,10 @@ usbhid_ioctl(device_t dev, device_t child __unused, unsigned long cmd,
if (error == 0)
ucr->ucr_actlen = UGETW(req.ctrl.wLength);
break;
+ case USB_GET_DEVICEINFO:
+ error = usbd_fill_deviceinfo(sc->sc_udev,
+ (struct usb_device_info *)data);
+ break;
default:
error = EINVAL;
}
diff --git a/sys/dev/usb/usb_device.c b/sys/dev/usb/usb_device.c
index 60c2d6745b3f..f0989972f49f 100644
--- a/sys/dev/usb/usb_device.c
+++ b/sys/dev/usb/usb_device.c
@@ -3111,3 +3111,51 @@ usbd_get_endpoint_mode(struct usb_device *udev, struct usb_endpoint *ep)
{
return (ep->ep_mode);
}
+
+/*------------------------------------------------------------------------*
+ * usbd_fill_deviceinfo
+ *
+ * This function dumps information about an USB device to the
+ * structure pointed to by the "di" argument.
+ *
+ * Returns:
+ * 0: Success
+ * Else: Failure
+ *------------------------------------------------------------------------*/
+int
+usbd_fill_deviceinfo(struct usb_device *udev, struct usb_device_info *di)
+{
+ struct usb_device *hub;
+
+ bzero(di, sizeof(di[0]));
+
+ di->udi_bus = device_get_unit(udev->bus->bdev);
+ di->udi_addr = udev->address;
+ di->udi_index = udev->device_index;
+ strlcpy(di->udi_serial, usb_get_serial(udev), sizeof(di->udi_serial));
+ strlcpy(di->udi_vendor, usb_get_manufacturer(udev), sizeof(di->udi_vendor));
+ strlcpy(di->udi_product, usb_get_product(udev), sizeof(di->udi_product));
+ usb_printbcd(di->udi_release, sizeof(di->udi_release),
+ UGETW(udev->ddesc.bcdDevice));
+ di->udi_vendorNo = UGETW(udev->ddesc.idVendor);
+ di->udi_productNo = UGETW(udev->ddesc.idProduct);
+ di->udi_releaseNo = UGETW(udev->ddesc.bcdDevice);
+ di->udi_class = udev->ddesc.bDeviceClass;
+ di->udi_subclass = udev->ddesc.bDeviceSubClass;
+ di->udi_protocol = udev->ddesc.bDeviceProtocol;
+ di->udi_config_no = udev->curr_config_no;
+ di->udi_config_index = udev->curr_config_index;
+ di->udi_power = udev->flags.self_powered ? 0 : udev->power;
+ di->udi_speed = udev->speed;
+ di->udi_mode = udev->flags.usb_mode;
+ di->udi_power_mode = udev->power_mode;
+ di->udi_suspended = udev->flags.peer_suspended;
+
+ hub = udev->parent_hub;
+ if (hub) {
+ di->udi_hubaddr = hub->address;
+ di->udi_hubindex = hub->device_index;
+ di->udi_hubport = udev->port_no;
+ }
+ return (0);
+}
diff --git a/sys/dev/usb/usb_generic.c b/sys/dev/usb/usb_generic.c
index c0af27d77e5d..ccb0b2184ec4 100644
--- a/sys/dev/usb/usb_generic.c
+++ b/sys/dev/usb/usb_generic.c
@@ -831,42 +831,7 @@ ugen_get_iface_driver(struct usb_fifo *f, struct usb_gen_descriptor *ugd)
int
ugen_fill_deviceinfo(struct usb_fifo *f, struct usb_device_info *di)
{
- struct usb_device *udev;
- struct usb_device *hub;
-
- udev = f->udev;
-
- bzero(di, sizeof(di[0]));
-
- di->udi_bus = device_get_unit(udev->bus->bdev);
- di->udi_addr = udev->address;
- di->udi_index = udev->device_index;
- strlcpy(di->udi_serial, usb_get_serial(udev), sizeof(di->udi_serial));
- strlcpy(di->udi_vendor, usb_get_manufacturer(udev), sizeof(di->udi_vendor));
- strlcpy(di->udi_product, usb_get_product(udev), sizeof(di->udi_product));
- usb_printbcd(di->udi_release, sizeof(di->udi_release),
- UGETW(udev->ddesc.bcdDevice));
- di->udi_vendorNo = UGETW(udev->ddesc.idVendor);
- di->udi_productNo = UGETW(udev->ddesc.idProduct);
- di->udi_releaseNo = UGETW(udev->ddesc.bcdDevice);
- di->udi_class = udev->ddesc.bDeviceClass;
- di->udi_subclass = udev->ddesc.bDeviceSubClass;
- di->udi_protocol = udev->ddesc.bDeviceProtocol;
- di->udi_config_no = udev->curr_config_no;
- di->udi_config_index = udev->curr_config_index;
- di->udi_power = udev->flags.self_powered ? 0 : udev->power;
- di->udi_speed = udev->speed;
- di->udi_mode = udev->flags.usb_mode;
- di->udi_power_mode = udev->power_mode;
- di->udi_suspended = udev->flags.peer_suspended;
-
- hub = udev->parent_hub;
- if (hub) {
- di->udi_hubaddr = hub->address;
- di->udi_hubindex = hub->device_index;
- di->udi_hubport = udev->port_no;
- }
- return (0);
+ return (usbd_fill_deviceinfo(f->udev, di));
}
int
diff --git a/sys/dev/usb/usbdi.h b/sys/dev/usb/usbdi.h
index 08d130aa2868..0826d9f078c4 100644
--- a/sys/dev/usb/usbdi.h
+++ b/sys/dev/usb/usbdi.h
@@ -38,6 +38,7 @@ struct usb_process;
struct usb_proc_msg;
struct usb_mbuf;
struct usb_fs_privdata;
+struct usb_device_info;
struct mbuf;
typedef enum { /* keep in sync with usb_errstr_table */
@@ -587,6 +588,8 @@ usb_error_t usbd_set_endpoint_mode(struct usb_device *udev,
struct usb_endpoint *ep, uint8_t ep_mode);
uint8_t usbd_get_endpoint_mode(struct usb_device *udev,
struct usb_endpoint *ep);
+int usbd_fill_deviceinfo(struct usb_device *udev,
+ struct usb_device_info *di);
const struct usb_device_id *usbd_lookup_id_by_info(
const struct usb_device_id *id, usb_size_t sizeof_id,
diff --git a/sys/dev/virtio/mmio/virtio_mmio.c b/sys/dev/virtio/mmio/virtio_mmio.c
index 5a81c8a24779..fe531fced998 100644
--- a/sys/dev/virtio/mmio/virtio_mmio.c
+++ b/sys/dev/virtio/mmio/virtio_mmio.c
@@ -53,7 +53,6 @@
#include <dev/virtio/virtqueue.h>
#include <dev/virtio/mmio/virtio_mmio.h>
-#include "virtio_mmio_if.h"
#include "virtio_bus_if.h"
#include "virtio_if.h"
@@ -79,7 +78,6 @@ static int vtmmio_alloc_virtqueues(device_t, int,
struct vq_alloc_info *);
static int vtmmio_setup_intr(device_t, enum intr_type);
static void vtmmio_stop(device_t);
-static void vtmmio_poll(device_t);
static int vtmmio_reinit(device_t, uint64_t);
static void vtmmio_reinit_complete(device_t);
static void vtmmio_notify_virtqueue(device_t, uint16_t, bus_size_t);
@@ -104,29 +102,11 @@ static void vtmmio_vq_intr(void *);
* I/O port read/write wrappers.
*/
#define vtmmio_write_config_1(sc, o, v) \
-do { \
- if (sc->platform != NULL) \
- VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \
- bus_write_1((sc)->res[0], (o), (v)); \
- if (sc->platform != NULL) \
- VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \
-} while (0)
+ bus_write_1((sc)->res[0], (o), (v))
#define vtmmio_write_config_2(sc, o, v) \
-do { \
- if (sc->platform != NULL) \
- VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \
- bus_write_2((sc)->res[0], (o), (v)); \
- if (sc->platform != NULL) \
- VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \
-} while (0)
+ bus_write_2((sc)->res[0], (o), (v))
#define vtmmio_write_config_4(sc, o, v) \
-do { \
- if (sc->platform != NULL) \
- VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \
- bus_write_4((sc)->res[0], (o), (v)); \
- if (sc->platform != NULL) \
- VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \
-} while (0)
+ bus_write_4((sc)->res[0], (o), (v))
#define vtmmio_read_config_1(sc, o) \
bus_read_1((sc)->res[0], (o))
@@ -157,7 +137,6 @@ static device_method_t vtmmio_methods[] = {
DEVMETHOD(virtio_bus_alloc_virtqueues, vtmmio_alloc_virtqueues),
DEVMETHOD(virtio_bus_setup_intr, vtmmio_setup_intr),
DEVMETHOD(virtio_bus_stop, vtmmio_stop),
- DEVMETHOD(virtio_bus_poll, vtmmio_poll),
DEVMETHOD(virtio_bus_reinit, vtmmio_reinit),
DEVMETHOD(virtio_bus_reinit_complete, vtmmio_reinit_complete),
DEVMETHOD(virtio_bus_notify_vq, vtmmio_notify_virtqueue),
@@ -220,19 +199,9 @@ vtmmio_setup_intr(device_t dev, enum intr_type type)
{
struct vtmmio_softc *sc;
int rid;
- int err;
sc = device_get_softc(dev);
- if (sc->platform != NULL) {
- err = VIRTIO_MMIO_SETUP_INTR(sc->platform, sc->dev,
- vtmmio_vq_intr, sc);
- if (err == 0) {
- /* Okay we have backend-specific interrupts */
- return (0);
- }
- }
-
rid = 0;
sc->res[1] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
RF_ACTIVE);
@@ -597,17 +566,6 @@ vtmmio_stop(device_t dev)
vtmmio_reset(device_get_softc(dev));
}
-static void
-vtmmio_poll(device_t dev)
-{
- struct vtmmio_softc *sc;
-
- sc = device_get_softc(dev);
-
- if (sc->platform != NULL)
- VIRTIO_MMIO_POLL(sc->platform);
-}
-
static int
vtmmio_reinit(device_t dev, uint64_t features)
{
diff --git a/sys/dev/virtio/mmio/virtio_mmio.h b/sys/dev/virtio/mmio/virtio_mmio.h
index ac6a96c1c7fe..edcbf0519acc 100644
--- a/sys/dev/virtio/mmio/virtio_mmio.h
+++ b/sys/dev/virtio/mmio/virtio_mmio.h
@@ -37,7 +37,6 @@ struct vtmmio_virtqueue;
struct vtmmio_softc {
device_t dev;
- device_t platform;
struct resource *res[2];
uint64_t vtmmio_features;
diff --git a/sys/dev/virtio/mmio/virtio_mmio_fdt.c b/sys/dev/virtio/mmio/virtio_mmio_fdt.c
index 7fba8aad8db8..bb9ea8efbaeb 100644
--- a/sys/dev/virtio/mmio/virtio_mmio_fdt.c
+++ b/sys/dev/virtio/mmio/virtio_mmio_fdt.c
@@ -63,12 +63,10 @@
#include <dev/virtio/mmio/virtio_mmio.h>
static int vtmmio_fdt_probe(device_t);
-static int vtmmio_fdt_attach(device_t);
static device_method_t vtmmio_fdt_methods[] = {
/* Device interface. */
DEVMETHOD(device_probe, vtmmio_fdt_probe),
- DEVMETHOD(device_attach, vtmmio_fdt_attach),
DEVMETHOD_END
};
@@ -93,48 +91,3 @@ vtmmio_fdt_probe(device_t dev)
return (vtmmio_probe(dev));
}
-
-static int
-vtmmio_setup_platform(device_t dev, struct vtmmio_softc *sc)
-{
- phandle_t platform_node;
- struct fdt_ic *ic;
- phandle_t xref;
- phandle_t node;
-
- sc->platform = NULL;
-
- if ((node = ofw_bus_get_node(dev)) == -1)
- return (ENXIO);
-
- if (OF_searchencprop(node, "platform", &xref,
- sizeof(xref)) == -1) {
- return (ENXIO);
- }
-
- platform_node = OF_node_from_xref(xref);
-
- SLIST_FOREACH(ic, &fdt_ic_list_head, fdt_ics) {
- if (ic->iph == platform_node) {
- sc->platform = ic->dev;
- break;
- }
- }
-
- if (sc->platform == NULL) {
- /* No platform-specific device. Ignore it. */
- }
-
- return (0);
-}
-
-static int
-vtmmio_fdt_attach(device_t dev)
-{
- struct vtmmio_softc *sc;
-
- sc = device_get_softc(dev);
- vtmmio_setup_platform(dev, sc);
-
- return (vtmmio_attach(dev));
-}
diff --git a/sys/dev/virtio/mmio/virtio_mmio_if.m b/sys/dev/virtio/mmio/virtio_mmio_if.m
deleted file mode 100644
index baebbd9a0b1c..000000000000
--- a/sys/dev/virtio/mmio/virtio_mmio_if.m
+++ /dev/null
@@ -1,99 +0,0 @@
-#-
-# Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
-# All rights reserved.
-#
-# This software was developed by SRI International and the University of
-# Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
-# ("CTSRD"), as part of the DARPA CRASH research programme.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
-# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
-# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
-# SUCH DAMAGE.
-#
-#
-
-#include <sys/types.h>
-
-#
-# This is optional interface to virtio mmio backend.
-# Useful when backend is implemented not by the hardware but software, e.g.
-# by using another cpu core.
-#
-
-INTERFACE virtio_mmio;
-
-CODE {
- static int
- virtio_mmio_prewrite(device_t dev, size_t offset, int val)
- {
-
- return (1);
- }
-
- static int
- virtio_mmio_note(device_t dev, size_t offset, int val)
- {
-
- return (1);
- }
-
- static int
- virtio_mmio_setup_intr(device_t dev, device_t mmio_dev,
- void *handler, void *ih_user)
- {
-
- return (1);
- }
-};
-
-#
-# Inform backend we are going to write data at offset.
-#
-METHOD int prewrite {
- device_t dev;
- size_t offset;
- int val;
-} DEFAULT virtio_mmio_prewrite;
-
-#
-# Inform backend we have data wrotten to offset.
-#
-METHOD int note {
- device_t dev;
- size_t offset;
- int val;
-} DEFAULT virtio_mmio_note;
-
-#
-# Inform backend we are going to poll virtqueue.
-#
-METHOD int poll {
- device_t dev;
-};
-
-#
-# Setup backend-specific interrupts.
-#
-METHOD int setup_intr {
- device_t dev;
- device_t mmio_dev;
- void *handler;
- void *ih_user;
-} DEFAULT virtio_mmio_setup_intr;
diff --git a/sys/dev/virtio/network/if_vtnet.c b/sys/dev/virtio/network/if_vtnet.c
index 2ff9be9680b8..ecb3dbb370e5 100644
--- a/sys/dev/virtio/network/if_vtnet.c
+++ b/sys/dev/virtio/network/if_vtnet.c
@@ -28,6 +28,9 @@
/* Driver for VirtIO network devices. */
+#include "opt_inet.h"
+#include "opt_inet6.h"
+
#include <sys/param.h>
#include <sys/eventhandler.h>
#include <sys/systm.h>
@@ -82,9 +85,6 @@
#include <dev/virtio/network/if_vtnetvar.h>
#include "virtio_if.h"
-#include "opt_inet.h"
-#include "opt_inet6.h"
-
#if defined(INET) || defined(INET6)
#include <machine/in_cksum.h>
#endif
diff --git a/sys/dev/virtio/random/virtio_random.c b/sys/dev/virtio/random/virtio_random.c
index f938ba99ae53..3f30c8b68f4c 100644
--- a/sys/dev/virtio/random/virtio_random.c
+++ b/sys/dev/virtio/random/virtio_random.c
@@ -77,7 +77,7 @@ static struct virtio_feature_desc vtrnd_feature_desc[] = {
{ 0, NULL }
};
-static struct random_source random_vtrnd = {
+static const struct random_source random_vtrnd = {
.rs_ident = "VirtIO Entropy Adapter",
.rs_source = RANDOM_PURE_VIRTIO,
.rs_read = vtrnd_read,
diff --git a/sys/dev/virtio/virtio_bus_if.m b/sys/dev/virtio/virtio_bus_if.m
index 57ae90bdc917..4181b641faad 100644
--- a/sys/dev/virtio/virtio_bus_if.m
+++ b/sys/dev/virtio/virtio_bus_if.m
@@ -109,7 +109,3 @@ METHOD void write_device_config {
int len;
};
-METHOD void poll {
- device_t dev;
-};
-
diff --git a/sys/dev/virtio/virtqueue.c b/sys/dev/virtio/virtqueue.c
index 8cc3326dc08e..cc7a233d60ee 100644
--- a/sys/dev/virtio/virtqueue.c
+++ b/sys/dev/virtio/virtqueue.c
@@ -605,10 +605,8 @@ virtqueue_poll(struct virtqueue *vq, uint32_t *len)
{
void *cookie;
- VIRTIO_BUS_POLL(vq->vq_dev);
while ((cookie = virtqueue_dequeue(vq, len)) == NULL) {
cpu_spinwait();
- VIRTIO_BUS_POLL(vq->vq_dev);
}
return (cookie);
diff --git a/sys/fs/cd9660/cd9660_vnops.c b/sys/fs/cd9660/cd9660_vnops.c
index c4d0e6ba7b30..4a2b80a7ccdd 100644
--- a/sys/fs/cd9660/cd9660_vnops.c
+++ b/sys/fs/cd9660/cd9660_vnops.c
@@ -124,7 +124,7 @@ cd9660_access(struct vop_access_args *ap)
uid_t uid;
gid_t gid;
- if (vp->v_type == VCHR || vp->v_type == VBLK)
+ if (VN_ISDEV(vp))
return (EOPNOTSUPP);
/*
@@ -162,7 +162,7 @@ cd9660_open(struct vop_open_args *ap)
struct vnode *vp = ap->a_vp;
struct iso_node *ip = VTOI(vp);
- if (vp->v_type == VCHR || vp->v_type == VBLK)
+ if (VN_ISDEV(vp))
return (EOPNOTSUPP);
vnode_create_vobject(vp, ip->i_size, ap->a_td);
@@ -191,7 +191,7 @@ cd9660_getattr(struct vop_getattr_args *ap)
vap->va_atime = ip->inode.iso_atime;
vap->va_mtime = ip->inode.iso_mtime;
vap->va_ctime = ip->inode.iso_ctime;
- vap->va_rdev = ip->inode.iso_rdev;
+ vap->va_rdev = VN_ISDEV(vp) ? ip->inode.iso_rdev : NODEV;
vap->va_size = (u_quad_t) ip->i_size;
if (ip->i_size == 0 && (vap->va_mode & S_IFMT) == S_IFLNK) {
@@ -242,7 +242,7 @@ cd9660_ioctl(struct vop_ioctl_args *ap)
VOP_UNLOCK(vp);
return (EBADF);
}
- if (vp->v_type == VCHR || vp->v_type == VBLK) {
+ if (VN_ISDEV(vp)) {
VOP_UNLOCK(vp);
return (EOPNOTSUPP);
}
@@ -280,7 +280,7 @@ cd9660_read(struct vop_read_args *ap)
int seqcount;
long size, n, on;
- if (vp->v_type == VCHR || vp->v_type == VBLK)
+ if (VN_ISDEV(vp))
return (EOPNOTSUPP);
seqcount = ap->a_ioflag >> IO_SEQSHIFT;
@@ -711,7 +711,7 @@ cd9660_strategy(struct vop_strategy_args *ap)
struct bufobj *bo;
ip = VTOI(vp);
- if (vp->v_type == VBLK || vp->v_type == VCHR)
+ if (VN_ISDEV(vp))
panic("cd9660_strategy: spec");
if (bp->b_blkno == bp->b_lblkno) {
bp->b_blkno = (ip->iso_start + bp->b_lblkno) <<
@@ -818,7 +818,7 @@ cd9660_getpages(struct vop_getpages_args *ap)
struct vnode *vp;
vp = ap->a_vp;
- if (vp->v_type == VCHR || vp->v_type == VBLK)
+ if (VN_ISDEV(vp))
return (EOPNOTSUPP);
if (use_buf_pager)
diff --git a/sys/fs/ext2fs/ext2_vnops.c b/sys/fs/ext2fs/ext2_vnops.c
index 064c10bd18b2..00389c927087 100644
--- a/sys/fs/ext2fs/ext2_vnops.c
+++ b/sys/fs/ext2fs/ext2_vnops.c
@@ -222,7 +222,7 @@ ext2_itimes_locked(struct vnode *vp)
ip = VTOI(vp);
if ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_UPDATE)) == 0)
return;
- if ((vp->v_type == VBLK || vp->v_type == VCHR))
+ if (VN_ISDEV(vp))
ip->i_flag |= IN_LAZYMOD;
else
ip->i_flag |= IN_MODIFIED;
@@ -276,7 +276,7 @@ static int
ext2_open(struct vop_open_args *ap)
{
- if (ap->a_vp->v_type == VBLK || ap->a_vp->v_type == VCHR)
+ if (VN_ISDEV(ap->a_vp))
return (EOPNOTSUPP);
/*
@@ -360,7 +360,7 @@ ext2_getattr(struct vop_getattr_args *ap)
vap->va_nlink = ip->i_nlink;
vap->va_uid = ip->i_uid;
vap->va_gid = ip->i_gid;
- vap->va_rdev = ip->i_rdev;
+ vap->va_rdev = VN_ISDEV(vp) ? ip->i_rdev : NODEV;
vap->va_size = ip->i_size;
vap->va_atime.tv_sec = ip->i_atime;
vap->va_atime.tv_nsec = E2DI_HAS_XTIME(ip) ? ip->i_atimensec : 0;
@@ -1571,7 +1571,7 @@ ext2_strategy(struct vop_strategy_args *ap)
daddr_t blkno;
int error;
- if (vp->v_type == VBLK || vp->v_type == VCHR)
+ if (VN_ISDEV(vp))
panic("ext2_strategy: spec");
if (bp->b_blkno == bp->b_lblkno) {
if (VTOI(ap->a_vp)->i_flag & IN_E4EXTENTS)
@@ -1733,7 +1733,7 @@ ext2_deleteextattr(struct vop_deleteextattr_args *ap)
if (!EXT2_HAS_COMPAT_FEATURE(ip->i_e2fs, EXT2F_COMPAT_EXT_ATTR))
return (EOPNOTSUPP);
- if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
+ if (VN_ISDEV(ap->a_vp))
return (EOPNOTSUPP);
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
@@ -1771,7 +1771,7 @@ ext2_getextattr(struct vop_getextattr_args *ap)
if (!EXT2_HAS_COMPAT_FEATURE(ip->i_e2fs, EXT2F_COMPAT_EXT_ATTR))
return (EOPNOTSUPP);
- if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
+ if (VN_ISDEV(ap->a_vp))
return (EOPNOTSUPP);
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
@@ -1814,7 +1814,7 @@ ext2_listextattr(struct vop_listextattr_args *ap)
if (!EXT2_HAS_COMPAT_FEATURE(ip->i_e2fs, EXT2F_COMPAT_EXT_ATTR))
return (EOPNOTSUPP);
- if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
+ if (VN_ISDEV(ap->a_vp))
return (EOPNOTSUPP);
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
@@ -1855,7 +1855,7 @@ ext2_setextattr(struct vop_setextattr_args *ap)
if (!EXT2_HAS_COMPAT_FEATURE(ip->i_e2fs, EXT2F_COMPAT_EXT_ATTR))
return (EOPNOTSUPP);
- if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
+ if (VN_ISDEV(ap->a_vp))
return (EOPNOTSUPP);
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
diff --git a/sys/fs/fuse/fuse_vnops.c b/sys/fs/fuse/fuse_vnops.c
index b782146b7278..de60a4717dd4 100644
--- a/sys/fs/fuse/fuse_vnops.c
+++ b/sys/fs/fuse/fuse_vnops.c
@@ -1748,7 +1748,7 @@ fuse_vnop_open(struct vop_open_args *ap)
if (fuse_isdeadfs(vp))
return (EXTERROR(ENXIO, "This FUSE session is about "
"to be closed"));
- if (vp->v_type == VCHR || vp->v_type == VBLK || vp->v_type == VFIFO)
+ if (VN_ISDEV(vp) || vp->v_type == VFIFO)
return (EXTERROR(EOPNOTSUPP, "Unsupported vnode type",
vp->v_type));
if ((a_mode & (FREAD | FWRITE | FEXEC)) == 0)
diff --git a/sys/fs/nfs/nfsport.h b/sys/fs/nfs/nfsport.h
index bd6107187966..4e9aae70da6f 100644
--- a/sys/fs/nfs/nfsport.h
+++ b/sys/fs/nfs/nfsport.h
@@ -909,15 +909,6 @@ int nfsmsleep(void *, void *, int, const char *, struct timespec *);
#define NFSBZERO(s, l) bzero((s), (l))
/*
- * Some queue.h files don't have these dfined in them.
- */
-#ifndef LIST_END
-#define LIST_END(head) NULL
-#define SLIST_END(head) NULL
-#define TAILQ_END(head) NULL
-#endif
-
-/*
* This must be defined to be a global variable that increments once
* per second, but never stops or goes backwards, even when a "date"
* command changes the TOD clock. It is used for delta times for
@@ -1026,7 +1017,7 @@ MALLOC_DECLARE(M_NEWNFSDSESSION);
int nfscl_loadattrcache(struct vnode **, struct nfsvattr *, void *, int, int);
int newnfs_realign(struct mbuf **, int);
bool ncl_pager_setsize(struct vnode *vp, u_quad_t *nsizep);
-void ncl_copy_vattr(struct vattr *dst, struct vattr *src);
+void ncl_copy_vattr(struct vnode *vp, struct vattr *dst, struct vattr *src);
/*
* If the port runs on an SMP box that can enforce Atomic ops with low
@@ -1038,9 +1029,6 @@ void ncl_copy_vattr(struct vattr *dst, struct vattr *src);
#define NFSDECRGLOBAL(a) ((a)--)
/*
- * Assorted funky stuff to make things work under Darwin8.
- */
-/*
* These macros checks for a field in vattr being set.
*/
#define NFSATTRISSET(t, v, a) ((v)->a != (t)VNOVAL)
diff --git a/sys/fs/nfsclient/nfs_clport.c b/sys/fs/nfsclient/nfs_clport.c
index 704aeeeabdf2..e9f1dc23ddbe 100644
--- a/sys/fs/nfsclient/nfs_clport.c
+++ b/sys/fs/nfsclient/nfs_clport.c
@@ -412,7 +412,7 @@ nfscl_warn_fileid(struct nfsmount *nmp, struct nfsvattr *oldnap,
}
void
-ncl_copy_vattr(struct vattr *dst, struct vattr *src)
+ncl_copy_vattr(struct vnode *vp, struct vattr *dst, struct vattr *src)
{
dst->va_type = src->va_type;
dst->va_mode = src->va_mode;
@@ -429,7 +429,7 @@ ncl_copy_vattr(struct vattr *dst, struct vattr *src)
dst->va_birthtime = src->va_birthtime;
dst->va_gen = src->va_gen;
dst->va_flags = src->va_flags;
- dst->va_rdev = src->va_rdev;
+ dst->va_rdev = VN_ISDEV(vp) ? src->va_rdev : NODEV;
dst->va_bytes = src->va_bytes;
dst->va_filerev = src->va_filerev;
}
@@ -595,7 +595,7 @@ nfscl_loadattrcache(struct vnode **vpp, struct nfsvattr *nap, void *nvaper,
KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
}
if (vaper != NULL) {
- ncl_copy_vattr(vaper, vap);
+ ncl_copy_vattr(vp, vaper, vap);
if (np->n_flag & NCHG) {
if (np->n_flag & NACC)
vaper->va_atime = np->n_atim;
diff --git a/sys/fs/nfsclient/nfs_clvnops.c b/sys/fs/nfsclient/nfs_clvnops.c
index 6824ee6ef13d..a8b06fdb261b 100644
--- a/sys/fs/nfsclient/nfs_clvnops.c
+++ b/sys/fs/nfsclient/nfs_clvnops.c
@@ -1026,7 +1026,7 @@ nfs_getattr(struct vop_getattr_args *ap)
* cached attributes should be ignored.
*/
if (nmp->nm_fhsize > 0 && ncl_getattrcache(vp, &vattr) == 0) {
- ncl_copy_vattr(vap, &vattr);
+ ncl_copy_vattr(vp, vap, &vattr);
/*
* Get the local modify time for the case of a write
@@ -1782,7 +1782,7 @@ nfs_mknodrpc(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
int error = 0, attrflag, dattrflag;
u_int32_t rdev;
- if (vap->va_type == VCHR || vap->va_type == VBLK)
+ if (VATTR_ISDEV(vap))
rdev = vap->va_rdev;
else if (vap->va_type == VFIFO || vap->va_type == VSOCK)
rdev = 0xffffffff;
diff --git a/sys/fs/nfsserver/nfs_nfsdport.c b/sys/fs/nfsserver/nfs_nfsdport.c
index 496cac263fa0..b2966934f9b7 100644
--- a/sys/fs/nfsserver/nfs_nfsdport.c
+++ b/sys/fs/nfsserver/nfs_nfsdport.c
@@ -3751,6 +3751,7 @@ nfsrv_v4rootexport(void *argp, struct ucred *cred, struct thread *p)
NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, nfsexargp->fspec);
if ((error = namei(&nd)) != 0)
goto out;
+ NDFREE_PNBUF(&nd);
error = nfsvno_getfh(nd.ni_vp, &fh, p);
vrele(nd.ni_vp);
if (!error) {
diff --git a/sys/fs/nfsserver/nfs_nfsdstate.c b/sys/fs/nfsserver/nfs_nfsdstate.c
index 36e3ce7b7142..111b0f26d0b5 100644
--- a/sys/fs/nfsserver/nfs_nfsdstate.c
+++ b/sys/fs/nfsserver/nfs_nfsdstate.c
@@ -7731,6 +7731,7 @@ nfsrv_setdsserver(char *dspathp, char *mdspathp, NFSPROC_T *p,
NFSD_DEBUG(4, "lookup=%d\n", error);
if (error != 0)
return (error);
+ NDFREE_PNBUF(&nd);
if (nd.ni_vp->v_type != VDIR) {
vput(nd.ni_vp);
NFSD_DEBUG(4, "dspath not dir\n");
@@ -7767,6 +7768,7 @@ nfsrv_setdsserver(char *dspathp, char *mdspathp, NFSPROC_T *p,
NFSD_DEBUG(4, "dsdirpath=%s lookup=%d\n", dsdirpath, error);
if (error != 0)
break;
+ NDFREE_PNBUF(&nd);
if (nd.ni_vp->v_type != VDIR) {
vput(nd.ni_vp);
error = ENOTDIR;
@@ -7795,6 +7797,7 @@ nfsrv_setdsserver(char *dspathp, char *mdspathp, NFSPROC_T *p,
NFSD_DEBUG(4, "mds lookup=%d\n", error);
if (error != 0)
goto out;
+ NDFREE_PNBUF(&nd);
if (nd.ni_vp->v_type != VDIR) {
vput(nd.ni_vp);
error = ENOTDIR;
@@ -8654,6 +8657,7 @@ nfsrv_mdscopymr(char *mdspathp, char *dspathp, char *curdspathp, char *buf,
NFSD_DEBUG(4, "lookup=%d\n", error);
if (error != 0)
return (error);
+ NDFREE_PNBUF(&nd);
if (nd.ni_vp->v_type != VREG) {
vput(nd.ni_vp);
NFSD_DEBUG(4, "mdspath not reg\n");
@@ -8675,6 +8679,7 @@ nfsrv_mdscopymr(char *mdspathp, char *dspathp, char *curdspathp, char *buf,
vput(vp);
return (error);
}
+ NDFREE_PNBUF(&nd);
if (nd.ni_vp->v_type != VDIR) {
vput(nd.ni_vp);
vput(vp);
@@ -8717,6 +8722,7 @@ nfsrv_mdscopymr(char *mdspathp, char *dspathp, char *curdspathp, char *buf,
vput(curvp);
return (error);
}
+ NDFREE_PNBUF(&nd);
if (nd.ni_vp->v_type != VDIR || nd.ni_vp == curvp) {
vput(nd.ni_vp);
vput(vp);
diff --git a/sys/fs/p9fs/p9fs_vnops.c b/sys/fs/p9fs/p9fs_vnops.c
index 227e2b93883e..acb73973d93b 100644
--- a/sys/fs/p9fs/p9fs_vnops.c
+++ b/sys/fs/p9fs/p9fs_vnops.c
@@ -1326,7 +1326,7 @@ p9fs_read(struct vop_read_args *ap)
np = P9FS_VTON(vp);
error = 0;
- if (vp->v_type == VCHR || vp->v_type == VBLK)
+ if (VN_ISDEV(vp))
return (EOPNOTSUPP);
if (vp->v_type != VREG)
return (EISDIR);
diff --git a/sys/fs/tarfs/tarfs_vnops.c b/sys/fs/tarfs/tarfs_vnops.c
index afb8e05f5929..acf18de5ab51 100644
--- a/sys/fs/tarfs/tarfs_vnops.c
+++ b/sys/fs/tarfs/tarfs_vnops.c
@@ -208,8 +208,7 @@ tarfs_getattr(struct vop_getattr_args *ap)
vap->va_birthtime = tnp->birthtime;
vap->va_gen = tnp->gen;
vap->va_flags = tnp->flags;
- vap->va_rdev = (vp->v_type == VBLK || vp->v_type == VCHR) ?
- tnp->rdev : NODEV;
+ vap->va_rdev = VN_ISDEV(vp) ? tnp->rdev : NODEV;
vap->va_bytes = round_page(tnp->physize);
vap->va_filerev = 0;
@@ -515,7 +514,7 @@ tarfs_read(struct vop_read_args *ap)
uiop = ap->a_uio;
vp = ap->a_vp;
- if (vp->v_type == VCHR || vp->v_type == VBLK)
+ if (VN_ISDEV(vp))
return (EOPNOTSUPP);
if (vp->v_type != VREG)
diff --git a/sys/fs/tmpfs/tmpfs_subr.c b/sys/fs/tmpfs/tmpfs_subr.c
index 1237f6b92cdb..dd281d18d87d 100644
--- a/sys/fs/tmpfs/tmpfs_subr.c
+++ b/sys/fs/tmpfs/tmpfs_subr.c
@@ -551,7 +551,7 @@ tmpfs_alloc_node(struct mount *mp, struct tmpfs_mount *tmp, __enum_uint8(vtype)
MPASS(IMPLIES(tmp->tm_root == NULL, parent == NULL && type == VDIR));
MPASS((type == VLNK) ^ (target == NULL));
- MPASS((type == VBLK || type == VCHR) ^ (rdev == VNOVAL));
+ MPASS(VTYPE_ISDEV(type) ^ (rdev == VNOVAL));
if (tmp->tm_nodes_inuse >= tmp->tm_nodes_max)
return (ENOSPC);
diff --git a/sys/fs/tmpfs/tmpfs_vnops.c b/sys/fs/tmpfs/tmpfs_vnops.c
index 79b6c8b2e6a1..0f4ea2fdc28c 100644
--- a/sys/fs/tmpfs/tmpfs_vnops.c
+++ b/sys/fs/tmpfs/tmpfs_vnops.c
@@ -280,8 +280,7 @@ tmpfs_mknod(struct vop_mknod_args *v)
struct componentname *cnp = v->a_cnp;
struct vattr *vap = v->a_vap;
- if (vap->va_type != VBLK && vap->va_type != VCHR &&
- vap->va_type != VFIFO)
+ if (!VATTR_ISDEV(vap) && vap->va_type != VFIFO)
return (EINVAL);
return (tmpfs_alloc_file(dvp, vpp, vap, cnp, NULL));
@@ -462,8 +461,7 @@ tmpfs_stat(struct vop_stat_args *v)
sb->st_nlink = node->tn_links;
sb->st_uid = node->tn_uid;
sb->st_gid = node->tn_gid;
- sb->st_rdev = (vp->v_type == VBLK || vp->v_type == VCHR) ?
- node->tn_rdev : NODEV;
+ sb->st_rdev = VN_ISDEV(vp) ? node->tn_rdev : NODEV;
sb->st_size = node->tn_size;
sb->st_atim.tv_sec = node->tn_atime.tv_sec;
sb->st_atim.tv_nsec = node->tn_atime.tv_nsec;
@@ -521,8 +519,7 @@ tmpfs_getattr(struct vop_getattr_args *v)
vap->va_birthtime = node->tn_birthtime;
vap->va_gen = node->tn_gen;
vap->va_flags = node->tn_flags;
- vap->va_rdev = (vp->v_type == VBLK || vp->v_type == VCHR) ?
- node->tn_rdev : NODEV;
+ vap->va_rdev = VN_ISDEV(vp) ? node->tn_rdev : NODEV;
if (vp->v_type == VREG) {
#ifdef __ILP32__
vm_object_t obj = node->tn_reg.tn_aobj;
@@ -1918,7 +1915,7 @@ tmpfs_deleteextattr(struct vop_deleteextattr_args *ap)
node = VP_TO_TMPFS_NODE(vp);
tmp = VFS_TO_TMPFS(vp->v_mount);
- if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
+ if (VN_ISDEV(ap->a_vp))
return (EOPNOTSUPP);
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
ap->a_cred, ap->a_td, VWRITE);
@@ -1956,7 +1953,7 @@ tmpfs_getextattr(struct vop_getextattr_args *ap)
int error;
node = VP_TO_TMPFS_NODE(vp);
- if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
+ if (VN_ISDEV(ap->a_vp))
return (EOPNOTSUPP);
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
ap->a_cred, ap->a_td, VREAD);
@@ -1993,7 +1990,7 @@ tmpfs_listextattr(struct vop_listextattr_args *ap)
int error;
node = VP_TO_TMPFS_NODE(vp);
- if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
+ if (VN_ISDEV(ap->a_vp))
return (EOPNOTSUPP);
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
ap->a_cred, ap->a_td, VREAD);
@@ -2037,7 +2034,7 @@ tmpfs_setextattr(struct vop_setextattr_args *ap)
tmp = VFS_TO_TMPFS(vp->v_mount);
attr_size = ap->a_uio->uio_resid;
diff = 0;
- if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
+ if (VN_ISDEV(ap->a_vp))
return (EOPNOTSUPP);
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
ap->a_cred, ap->a_td, VWRITE);
diff --git a/sys/geom/part/g_part.c b/sys/geom/part/g_part.c
index 41125f6478ac..88e44b335b29 100644
--- a/sys/geom/part/g_part.c
+++ b/sys/geom/part/g_part.c
@@ -1046,7 +1046,7 @@ g_part_ctl_create(struct gctl_req *req, struct g_part_parms *gpp)
/*
* Synthesize a disk geometry. Some partitioning schemes
* depend on it and since some file systems need it even
- * when the partitition scheme doesn't, we do it here in
+ * when the partition scheme doesn't, we do it here in
* scheme-independent code.
*/
g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
@@ -1539,7 +1539,7 @@ g_part_ctl_undo(struct gctl_req *req, struct g_part_parms *gpp)
/*
* Synthesize a disk geometry. Some partitioning schemes
* depend on it and since some file systems need it even
- * when the partitition scheme doesn't, we do it here in
+ * when the partition scheme doesn't, we do it here in
* scheme-independent code.
*/
pp = cp->provider;
@@ -2023,7 +2023,7 @@ g_part_taste(struct g_class *mp, struct g_provider *pp, int flags __unused)
/*
* Synthesize a disk geometry. Some partitioning schemes
* depend on it and since some file systems need it even
- * when the partitition scheme doesn't, we do it here in
+ * when the partition scheme doesn't, we do it here in
* scheme-independent code.
*/
g_part_geometry(table, cp, pp->mediasize / pp->sectorsize);
diff --git a/sys/i386/conf/GENERIC b/sys/i386/conf/GENERIC
index f577cd07ac7c..88b8967cd693 100644
--- a/sys/i386/conf/GENERIC
+++ b/sys/i386/conf/GENERIC
@@ -343,3 +343,4 @@ options HID_DEBUG # enable debug msgs
device hid # Generic HID support
device hidbus # Generic HID Bus
options IICHID_SAMPLING # Workaround missing GPIO INTR support
+options U2F_MAKE_UHID_ALIAS # install /dev/uhid alias for /dev/u2f/
diff --git a/sys/isa/isareg.h b/sys/isa/isareg.h
index e89136c7e1e5..8b2d55608078 100644
--- a/sys/isa/isareg.h
+++ b/sys/isa/isareg.h
@@ -49,7 +49,7 @@
#define IO_RTC 0x070 /* RTC */
#define IO_ICU2 0x0A0 /* 8259A Interrupt Controller #2 */
-#define IO_MDA 0x3B0 /* Monochome Adapter */
+#define IO_MDA 0x3B0 /* Monochrome Adapter */
#define IO_VGA 0x3C0 /* E/VGA Ports */
#define IO_CGA 0x3D0 /* CGA Ports */
diff --git a/sys/kern/imgact_elf.c b/sys/kern/imgact_elf.c
index 2690ad3b2679..5a53fac50f2c 100644
--- a/sys/kern/imgact_elf.c
+++ b/sys/kern/imgact_elf.c
@@ -84,6 +84,13 @@
#define ELF_NOTE_ROUNDSIZE 4
#define OLD_EI_BRAND 8
+/*
+ * ELF_ABI_NAME is a string name of the ELF ABI. ELF_ABI_ID is used
+ * to build variable names.
+ */
+#define ELF_ABI_NAME __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
+#define ELF_ABI_ID __CONCAT(elf, __ELF_WORD_SIZE)
+
static int __elfN(check_header)(const Elf_Ehdr *hdr);
static Elf_Brandinfo *__elfN(get_brandinfo)(struct image_params *imgp,
const char *interp, int32_t *osrel, uint32_t *fctl0);
@@ -104,14 +111,15 @@ static Elf_Word __elfN(untrans_prot)(vm_prot_t);
static size_t __elfN(prepare_register_notes)(struct thread *td,
struct note_info_list *list, struct thread *target_td);
-SYSCTL_NODE(_kern, OID_AUTO, __CONCAT(elf, __ELF_WORD_SIZE),
- CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
+SYSCTL_NODE(_kern, OID_AUTO, ELF_ABI_ID, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
"");
+#define ELF_NODE_OID __CONCAT(_kern_, ELF_ABI_ID)
+
int __elfN(fallback_brand) = -1;
-SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
+SYSCTL_INT(ELF_NODE_OID, OID_AUTO,
fallback_brand, CTLFLAG_RWTUN, &__elfN(fallback_brand), 0,
- __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) " brand of last resort");
+ ELF_ABI_NAME " brand of last resort");
static int elf_legacy_coredump = 0;
SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW,
@@ -126,22 +134,22 @@ int __elfN(nxstack) =
#else
0;
#endif
-SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
+SYSCTL_INT(ELF_NODE_OID, OID_AUTO,
nxstack, CTLFLAG_RW, &__elfN(nxstack), 0,
- __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": support PT_GNU_STACK for non-executable stack control");
+ ELF_ABI_NAME ": support PT_GNU_STACK for non-executable stack control");
#if defined(__amd64__)
static int __elfN(vdso) = 1;
-SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO,
+SYSCTL_INT(ELF_NODE_OID, OID_AUTO,
vdso, CTLFLAG_RWTUN, &__elfN(vdso), 0,
- __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": enable vdso preloading");
+ ELF_ABI_NAME ": enable vdso preloading");
#else
static int __elfN(vdso) = 0;
#endif
#if __ELF_WORD_SIZE == 32 && (defined(__amd64__) || defined(__i386__))
int i386_read_exec = 0;
-SYSCTL_INT(_kern_elf32, OID_AUTO, read_exec, CTLFLAG_RW, &i386_read_exec, 0,
+SYSCTL_INT(ELF_NODE_OID, OID_AUTO, read_exec, CTLFLAG_RW, &i386_read_exec, 0,
"enable execution from readable segments");
#endif
@@ -161,15 +169,15 @@ sysctl_pie_base(SYSCTL_HANDLER_ARGS)
__elfN(pie_base) = val;
return (0);
}
-SYSCTL_PROC(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, pie_base,
+SYSCTL_PROC(ELF_NODE_OID, OID_AUTO, pie_base,
CTLTYPE_ULONG | CTLFLAG_MPSAFE | CTLFLAG_RW, NULL, 0,
sysctl_pie_base, "LU",
"PIE load base without randomization");
-SYSCTL_NODE(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, aslr,
+SYSCTL_NODE(ELF_NODE_OID, OID_AUTO, aslr,
CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
"");
-#define ASLR_NODE_OID __CONCAT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), _aslr)
+#define ASLR_NODE_OID __CONCAT(ELF_NODE_OID, _aslr)
/*
* Enable ASLR by default for 64-bit non-PIE binaries. 32-bit architectures
@@ -179,8 +187,7 @@ SYSCTL_NODE(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, aslr,
static int __elfN(aslr_enabled) = __ELF_WORD_SIZE == 64;
SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, enable, CTLFLAG_RWTUN,
&__elfN(aslr_enabled), 0,
- __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
- ": enable address map randomization");
+ ELF_ABI_NAME ": enable address map randomization");
/*
* Enable ASLR by default for 64-bit PIE binaries.
@@ -188,8 +195,7 @@ SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, enable, CTLFLAG_RWTUN,
static int __elfN(pie_aslr_enabled) = __ELF_WORD_SIZE == 64;
SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, pie_enable, CTLFLAG_RWTUN,
&__elfN(pie_aslr_enabled), 0,
- __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
- ": enable address map randomization for PIE binaries");
+ ELF_ABI_NAME ": enable address map randomization for PIE binaries");
/*
* Sbrk is deprecated and it can be assumed that in most cases it will not be
@@ -199,27 +205,25 @@ SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, pie_enable, CTLFLAG_RWTUN,
static int __elfN(aslr_honor_sbrk) = 0;
SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, honor_sbrk, CTLFLAG_RW,
&__elfN(aslr_honor_sbrk), 0,
- __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE)) ": assume sbrk is used");
+ ELF_ABI_NAME ": assume sbrk is used");
static int __elfN(aslr_stack) = __ELF_WORD_SIZE == 64;
SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, stack, CTLFLAG_RWTUN,
&__elfN(aslr_stack), 0,
- __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
- ": enable stack address randomization");
+ ELF_ABI_NAME ": enable stack address randomization");
static int __elfN(aslr_shared_page) = __ELF_WORD_SIZE == 64;
SYSCTL_INT(ASLR_NODE_OID, OID_AUTO, shared_page, CTLFLAG_RWTUN,
&__elfN(aslr_shared_page), 0,
- __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
- ": enable shared page address randomization");
+ ELF_ABI_NAME ": enable shared page address randomization");
static int __elfN(sigfastblock) = 1;
-SYSCTL_INT(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, sigfastblock,
+SYSCTL_INT(ELF_NODE_OID, OID_AUTO, sigfastblock,
CTLFLAG_RWTUN, &__elfN(sigfastblock), 0,
"enable sigfastblock for new processes");
static bool __elfN(allow_wx) = true;
-SYSCTL_BOOL(__CONCAT(_kern_elf, __ELF_WORD_SIZE), OID_AUTO, allow_wx,
+SYSCTL_BOOL(ELF_NODE_OID, OID_AUTO, allow_wx,
CTLFLAG_RWTUN, &__elfN(allow_wx), 0,
"Allow pages to be mapped simultaneously writable and executable");
@@ -2951,9 +2955,9 @@ __elfN(check_note)(struct image_params *imgp, Elf_Brandnote *brandnote,
*/
static struct execsw __elfN(execsw) = {
.ex_imgact = __CONCAT(exec_, __elfN(imgact)),
- .ex_name = __XSTRING(__CONCAT(ELF, __ELF_WORD_SIZE))
+ .ex_name = ELF_ABI_NAME
};
-EXEC_SET(__CONCAT(elf, __ELF_WORD_SIZE), __elfN(execsw));
+EXEC_SET(ELF_ABI_ID, __elfN(execsw));
static vm_prot_t
__elfN(trans_prot)(Elf_Word flags)
diff --git a/sys/kern/kern_prot.c b/sys/kern/kern_prot.c
index beab30a9d157..0ca42d640767 100644
--- a/sys/kern/kern_prot.c
+++ b/sys/kern/kern_prot.c
@@ -332,7 +332,7 @@ freebsd14_getgroups(struct thread *td, struct freebsd14_getgroups_args *uap)
}
error = copyout(&cred->cr_gid, uap->gidset, sizeof(gid_t));
- if (error != 0)
+ if (error == 0)
error = copyout(cred->cr_groups, uap->gidset + 1,
(ngrp - 1) * sizeof(gid_t));
@@ -2921,7 +2921,8 @@ crextend(struct ucred *cr, int n)
* Normalizes a set of groups to be applied to a 'struct ucred'.
*
* Normalization ensures that the supplementary groups are sorted in ascending
- * order and do not contain duplicates.
+ * order and do not contain duplicates. This allows group_is_supplementary
+ * to do a binary search.
*/
static void
groups_normalize(int *ngrp, gid_t *groups)
diff --git a/sys/kern/uipc_usrreq.c b/sys/kern/uipc_usrreq.c
index 0056dac65c7d..19870e989437 100644
--- a/sys/kern/uipc_usrreq.c
+++ b/sys/kern/uipc_usrreq.c
@@ -154,15 +154,12 @@ static struct task unp_defer_task;
* and don't really want to reserve the sendspace. Their recvspace should be
* large enough for at least one max-size datagram plus address.
*/
-#ifndef PIPSIZ
-#define PIPSIZ 8192
-#endif
-static u_long unpst_sendspace = PIPSIZ;
-static u_long unpst_recvspace = PIPSIZ;
+static u_long unpst_sendspace = 64*1024;
+static u_long unpst_recvspace = 64*1024;
static u_long unpdg_maxdgram = 8*1024; /* support 8KB syslog msgs */
static u_long unpdg_recvspace = 16*1024;
-static u_long unpsp_sendspace = PIPSIZ;
-static u_long unpsp_recvspace = PIPSIZ;
+static u_long unpsp_sendspace = 64*1024;
+static u_long unpsp_recvspace = 64*1024;
static SYSCTL_NODE(_net, PF_LOCAL, local, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
"Local domain");
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index fa655c43d155..19c39e42bafa 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -5170,7 +5170,7 @@ bufstrategy(struct bufobj *bo, struct buf *bp)
vp = bp->b_vp;
KASSERT(vp == bo->bo_private, ("Inconsistent vnode bufstrategy"));
- KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
+ KASSERT(!VN_ISDEV(vp),
("Wrong vnode in bufstrategy(bp=%p, vp=%p)", bp, vp));
i = VOP_STRATEGY(vp, bp);
KASSERT(i == 0, ("VOP_STRATEGY failed bp=%p vp=%p", bp, bp->b_vp));
diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c
index b805e147bd62..bf3ed9d515dc 100644
--- a/sys/kern/vfs_syscalls.c
+++ b/sys/kern/vfs_syscalls.c
@@ -2839,7 +2839,7 @@ setfflags(struct thread *td, struct vnode *vp, u_long flags)
* if they are allowed to set flags and programs assume that
* chown can't fail when done as root.
*/
- if (vp->v_type == VCHR || vp->v_type == VBLK) {
+ if (VN_ISDEV(vp)) {
error = priv_check(td, PRIV_VFS_CHFLAGS_DEV);
if (error != 0)
return (error);
diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c
index 93f87ddae4de..a4f41192f684 100644
--- a/sys/kern/vfs_vnops.c
+++ b/sys/kern/vfs_vnops.c
@@ -3444,7 +3444,7 @@ vn_generic_copy_file_range(struct vnode *invp, off_t *inoffp,
dat = NULL;
if ((flags & COPY_FILE_RANGE_CLONE) != 0) {
- error = ENOSYS;
+ error = EOPNOTSUPP;
goto out;
}
diff --git a/sys/libkern/qsort.c b/sys/libkern/qsort.c
index 0255a3d64d76..342b1525dd8a 100644
--- a/sys/libkern/qsort.c
+++ b/sys/libkern/qsort.c
@@ -114,11 +114,10 @@ qsort(void *a, size_t n, size_t es, cmp_t *cmp)
char *pa, *pb, *pc, *pd, *pl, *pm, *pn;
size_t d1, d2;
int cmp_result;
- int swaptype_long, swaptype_int, swap_cnt;
+ int swaptype_long, swaptype_int;
loop: SWAPINIT(long, a, es);
SWAPINIT(int, a, es);
- swap_cnt = 0;
if (n < 7) {
for (pm = (char *)a + es; pm < (char *)a + n * es; pm += es)
for (pl = pm;
@@ -147,7 +146,6 @@ loop: SWAPINIT(long, a, es);
for (;;) {
while (pb <= pc && (cmp_result = CMP(thunk, pb, a)) <= 0) {
if (cmp_result == 0) {
- swap_cnt = 1;
swap(pa, pb);
pa += es;
}
@@ -155,7 +153,6 @@ loop: SWAPINIT(long, a, es);
}
while (pb <= pc && (cmp_result = CMP(thunk, pc, a)) >= 0) {
if (cmp_result == 0) {
- swap_cnt = 1;
swap(pc, pd);
pd -= es;
}
@@ -164,18 +161,9 @@ loop: SWAPINIT(long, a, es);
if (pb > pc)
break;
swap(pb, pc);
- swap_cnt = 1;
pb += es;
pc -= es;
}
- if (swap_cnt == 0) { /* Switch to insertion sort */
- for (pm = (char *)a + es; pm < (char *)a + n * es; pm += es)
- for (pl = pm;
- pl > (char *)a && CMP(thunk, pl - es, pl) > 0;
- pl -= es)
- swap(pl, pl - es);
- return;
- }
pn = (char *)a + n * es;
d1 = MIN(pa - (char *)a, pb - pa);
diff --git a/sys/modules/e6000sw/Makefile b/sys/modules/e6000sw/Makefile
index da08f80b0a29..73cbaea801f0 100644
--- a/sys/modules/e6000sw/Makefile
+++ b/sys/modules/e6000sw/Makefile
@@ -3,6 +3,6 @@
KMOD= e6000sw
SRCS= e6000sw.c
-SRCS+= bus_if.h etherswitch_if.h mdio_if.h miibus_if.h ofw_bus_if.h opt_platform.h
+SRCS+= bus_if.h device_if.h etherswitch_if.h mdio_if.h miibus_if.h ofw_bus_if.h opt_platform.h
.include <bsd.kmod.mk>
diff --git a/sys/modules/etherswitch/Makefile b/sys/modules/etherswitch/Makefile
index 087231545cd4..0b16a19e5117 100644
--- a/sys/modules/etherswitch/Makefile
+++ b/sys/modules/etherswitch/Makefile
@@ -3,7 +3,7 @@
KMOD = etherswitch
SRCS= etherswitch.c
-SRCS+= mdio_if.h miibus_if.h etherswitch_if.h etherswitch_if.c
+SRCS+= bus_if.h device_if.h mdio_if.h miibus_if.h etherswitch_if.h etherswitch_if.c
CFLAGS+= -I${SRCTOP}/sys/dev/etherswitch
.include <bsd.kmod.mk>
diff --git a/sys/modules/evdev/Makefile b/sys/modules/evdev/Makefile
index bd66013885db..20813b73f6dd 100644
--- a/sys/modules/evdev/Makefile
+++ b/sys/modules/evdev/Makefile
@@ -2,7 +2,7 @@
KMOD= evdev
SRCS= cdev.c evdev.c evdev_mt.c evdev_utils.c
-SRCS+= opt_evdev.h bus_if.h device_if.h
+SRCS+= opt_evdev.h opt_kbd.h bus_if.h device_if.h
EXPORT_SYMS= YES
diff --git a/sys/modules/gpio/gpioaei/Makefile b/sys/modules/gpio/gpioaei/Makefile
index 8f856af48eb7..1f0f1d0e53a6 100644
--- a/sys/modules/gpio/gpioaei/Makefile
+++ b/sys/modules/gpio/gpioaei/Makefile
@@ -10,6 +10,8 @@ SRCS+= \
gpio_if.h \
gpiobus_if.h
+SRCS+= opt_acpi.h opt_platform.h
+
CFLAGS+= -I. -I${SRCTOP}/sys/dev/gpio/
.include <bsd.kmod.mk>
diff --git a/sys/modules/gve/Makefile b/sys/modules/gve/Makefile
index 08b26a994e36..ece275485df7 100644
--- a/sys/modules/gve/Makefile
+++ b/sys/modules/gve/Makefile
@@ -40,5 +40,5 @@ SRCS= gve_main.c \
gve_tx_dqo.c \
gve_sysctl.c
SRCS+= device_if.h bus_if.h pci_if.h
-
+SRCS+= opt_inet6.h
.include <bsd.kmod.mk>
diff --git a/sys/modules/hid/Makefile b/sys/modules/hid/Makefile
index 56c3267d8684..10720570deb7 100644
--- a/sys/modules/hid/Makefile
+++ b/sys/modules/hid/Makefile
@@ -17,6 +17,7 @@ SUBDIR += \
hsctrl \
ietp \
ps4dshock \
+ u2f \
xb360gp
.include <bsd.subdir.mk>
diff --git a/sys/modules/hid/u2f/Makefile b/sys/modules/hid/u2f/Makefile
new file mode 100644
index 000000000000..227e7154035b
--- /dev/null
+++ b/sys/modules/hid/u2f/Makefile
@@ -0,0 +1,8 @@
+.PATH: ${SRCTOP}/sys/dev/hid
+
+KMOD= u2f
+SRCS= u2f.c
+SRCS+= opt_hid.h opt_usb.h
+SRCS+= bus_if.h device_if.h
+
+.include <bsd.kmod.mk>
diff --git a/sys/modules/if_infiniband/Makefile b/sys/modules/if_infiniband/Makefile
index 01e3164b1271..7ec343999da1 100644
--- a/sys/modules/if_infiniband/Makefile
+++ b/sys/modules/if_infiniband/Makefile
@@ -3,7 +3,8 @@
KMOD= if_infiniband
SRCS= if_infiniband.c \
opt_inet.h \
- opt_inet6.h
+ opt_inet6.h \
+ opt_kbd.h
EXPORT_SYMS= YES
diff --git a/sys/modules/if_vlan/Makefile b/sys/modules/if_vlan/Makefile
index 3077f4289d5a..0cdab3f7653a 100644
--- a/sys/modules/if_vlan/Makefile
+++ b/sys/modules/if_vlan/Makefile
@@ -2,6 +2,6 @@
KMOD= if_vlan
SRCS= if_vlan.c
-SRCS+= opt_inet.h opt_inet6.h opt_kern_tls.h opt_vlan.h opt_ratelimit.h
+SRCS+= opt_inet.h opt_inet6.h opt_ipsec.h opt_kern_tls.h opt_vlan.h opt_ratelimit.h
.include <bsd.kmod.mk>
diff --git a/sys/modules/ix/Makefile b/sys/modules/ix/Makefile
index ad9f36e054e3..aec6eaabffdd 100644
--- a/sys/modules/ix/Makefile
+++ b/sys/modules/ix/Makefile
@@ -7,7 +7,7 @@ SRCS += if_ix.c if_bypass.c if_fdir.c if_sriov.c ix_txrx.c ixgbe_osdep.c
# Shared source
SRCS += ixgbe_common.c ixgbe_api.c ixgbe_phy.c ixgbe_mbx.c ixgbe_vf.c
SRCS += ixgbe_dcb.c ixgbe_dcb_82598.c ixgbe_dcb_82599.c
-SRCS += ixgbe_82598.c ixgbe_82599.c ixgbe_x540.c ixgbe_x550.c
+SRCS += ixgbe_82598.c ixgbe_82599.c ixgbe_x540.c ixgbe_x550.c ixgbe_e610.c
CFLAGS+= -I${SRCTOP}/sys/dev/ixgbe
.include <bsd.kmod.mk>
diff --git a/sys/modules/ixv/Makefile b/sys/modules/ixv/Makefile
index 1b4431ac11cd..e7066bb7829b 100644
--- a/sys/modules/ixv/Makefile
+++ b/sys/modules/ixv/Makefile
@@ -7,7 +7,7 @@ SRCS += if_ixv.c if_fdir.c ix_txrx.c ixgbe_osdep.c
# Shared source
SRCS += ixgbe_common.c ixgbe_api.c ixgbe_phy.c ixgbe_mbx.c ixgbe_vf.c
SRCS += ixgbe_dcb.c ixgbe_dcb_82598.c ixgbe_dcb_82599.c
-SRCS += ixgbe_82598.c ixgbe_82599.c ixgbe_x540.c ixgbe_x550.c
+SRCS += ixgbe_82598.c ixgbe_82599.c ixgbe_x540.c ixgbe_x550.c ixgbe_e610.c
CFLAGS+= -I${SRCTOP}/sys/dev/ixgbe
.include <bsd.kmod.mk>
diff --git a/sys/modules/linux64/Makefile b/sys/modules/linux64/Makefile
index b23891a65a4f..327da11afdaf 100644
--- a/sys/modules/linux64/Makefile
+++ b/sys/modules/linux64/Makefile
@@ -31,6 +31,7 @@ SRCS= linux_dummy_machdep.c \
opt_ktrace.h \
opt_inet6.h \
opt_posix.h \
+ opt_usb.h \
bus_if.h \
device_if.h \
vnode_if.h \
diff --git a/sys/modules/md/Makefile b/sys/modules/md/Makefile
index 2b0586c44717..3f16e04860a1 100644
--- a/sys/modules/md/Makefile
+++ b/sys/modules/md/Makefile
@@ -1,6 +1,6 @@
.PATH: ${SRCTOP}/sys/dev/md
KMOD= geom_md
-SRCS= md.c opt_md.h opt_geom.h opt_rootdevname.h vnode_if.h
+SRCS= bus_if.h device_if.h md.c opt_md.h opt_geom.h opt_rootdevname.h vnode_if.h
.include <bsd.kmod.mk>
diff --git a/sys/modules/miiproxy/Makefile b/sys/modules/miiproxy/Makefile
index 5173358989da..730bef4220cd 100644
--- a/sys/modules/miiproxy/Makefile
+++ b/sys/modules/miiproxy/Makefile
@@ -3,7 +3,7 @@
KMOD = miiproxy
SRCS= miiproxy.c
-SRCS+= mdio_if.h miibus_if.h
+SRCS+= bus_if.h mdio_if.h miibus_if.h opt_platform.h
CFLAGS+= -I${SRCTOP}/sys/dev/etherswitch
.include <bsd.kmod.mk>
diff --git a/sys/modules/mlx5/Makefile b/sys/modules/mlx5/Makefile
index 506c045ab0ce..65341fdfb8aa 100644
--- a/sys/modules/mlx5/Makefile
+++ b/sys/modules/mlx5/Makefile
@@ -46,7 +46,7 @@ mlx5_ipsec_offload.c \
mlx5_ipsec.c \
mlx5_ipsec_rxtx.c
SRCS+= ${LINUXKPI_GENSRCS}
-SRCS+= opt_inet.h opt_inet6.h opt_rss.h opt_ratelimit.h
+SRCS+= opt_inet.h opt_inet6.h opt_ipsec.h opt_rss.h opt_ratelimit.h
CFLAGS+= -I${SRCTOP}/sys/ofed/include
CFLAGS+= -I${SRCTOP}/sys/ofed/include/uapi
diff --git a/sys/modules/mlx5en/Makefile b/sys/modules/mlx5en/Makefile
index 03bf174e33b0..3697fa65dc83 100644
--- a/sys/modules/mlx5en/Makefile
+++ b/sys/modules/mlx5en/Makefile
@@ -15,7 +15,7 @@ mlx5_en_rl.c \
mlx5_en_txrx.c \
mlx5_en_port_buffer.c
SRCS+= ${LINUXKPI_GENSRCS}
-SRCS+= opt_inet.h opt_inet6.h opt_rss.h opt_ratelimit.h opt_kern_tls.h
+SRCS+= opt_inet.h opt_inet6.h opt_ipsec.h opt_rss.h opt_ratelimit.h opt_kern_tls.h
.if defined(HAVE_PER_CQ_EVENT_PACKET)
CFLAGS+= -DHAVE_PER_CQ_EVENT_PACKET
diff --git a/sys/modules/netgraph/ksocket/Makefile b/sys/modules/netgraph/ksocket/Makefile
index 395fdbd7b3e3..7099648f6219 100644
--- a/sys/modules/netgraph/ksocket/Makefile
+++ b/sys/modules/netgraph/ksocket/Makefile
@@ -1,4 +1,6 @@
KMOD= ng_ksocket
SRCS= ng_ksocket.c
+SRCS+= opt_inet6.h
+
.include <bsd.kmod.mk>
diff --git a/sys/modules/nvmf/nvmf/Makefile b/sys/modules/nvmf/nvmf/Makefile
index 7ebe614998bd..21d73d363d2f 100644
--- a/sys/modules/nvmf/nvmf/Makefile
+++ b/sys/modules/nvmf/nvmf/Makefile
@@ -10,4 +10,7 @@ SRCS= nvmf.c \
nvmf_qpair.c \
nvmf_sim.c
+SRCS+= bus_if.h device_if.h
+SRCS+= opt_cam.h
+
.include <bsd.kmod.mk>
diff --git a/sys/modules/ossl/Makefile b/sys/modules/ossl/Makefile
index 7a92742d6b36..ac2c752e922e 100644
--- a/sys/modules/ossl/Makefile
+++ b/sys/modules/ossl/Makefile
@@ -29,6 +29,7 @@ SRCS.arm= \
SRCS.aarch64= \
chacha-armv8.S \
+ chacha-armv8-sve.S \
poly1305-armv8.S \
sha1-armv8.S \
sha256-armv8.S \
diff --git a/sys/modules/qlnx/qlnxev/Makefile b/sys/modules/qlnx/qlnxev/Makefile
index ed62f1f1dd40..766a5a950032 100644
--- a/sys/modules/qlnx/qlnxev/Makefile
+++ b/sys/modules/qlnx/qlnxev/Makefile
@@ -49,6 +49,7 @@ SRCS+=ecore_vf.c
SRCS+=qlnx_ioctl.c
SRCS+=qlnx_os.c
+SRCS+=opt_inet.h
SRCS+= ${LINUXKPI_GENSRCS}
diff --git a/sys/modules/rtw88/Makefile b/sys/modules/rtw88/Makefile
index 9739ede11073..822be639da43 100644
--- a/sys/modules/rtw88/Makefile
+++ b/sys/modules/rtw88/Makefile
@@ -43,6 +43,7 @@ SRCS+= ${LINUXKPI_GENSRCS}
SRCS+= opt_wlan.h opt_inet6.h opt_inet.h
CFLAGS+= -DKBUILD_MODNAME='"rtw88"'
+CFLAGS+= -DLINUXKPI_VERSION=61400
CFLAGS+= -I${DEVRTW88DIR}
CFLAGS+= ${LINUXKPI_INCLUDES}
diff --git a/sys/modules/rtw89/Makefile b/sys/modules/rtw89/Makefile
index 09580f288c62..e66f85c3ac17 100644
--- a/sys/modules/rtw89/Makefile
+++ b/sys/modules/rtw89/Makefile
@@ -39,6 +39,7 @@ SRCS+= ${LINUXKPI_GENSRCS}
SRCS+= opt_wlan.h opt_inet6.h opt_inet.h opt_acpi.h
CFLAGS+= -DKBUILD_MODNAME='"rtw89"'
+CFLAGS+= -DLINUXKPI_VERSION=61400
CFLAGS+= -DLINUXKPI_WANT_LINUX_ACPI
CFLAGS+= -I${DEVRTW89DIR}
diff --git a/sys/modules/uinput/Makefile b/sys/modules/uinput/Makefile
index 66ade2a5bb33..a9e2ec867b91 100644
--- a/sys/modules/uinput/Makefile
+++ b/sys/modules/uinput/Makefile
@@ -2,6 +2,6 @@
KMOD= uinput
SRCS= uinput.c
-SRCS+= opt_evdev.h
+SRCS+= opt_evdev.h opt_kbd.h
.include <bsd.kmod.mk>
diff --git a/sys/modules/usb/usie/Makefile b/sys/modules/usb/usie/Makefile
index 6a5f79248ff8..9edeed082f8d 100644
--- a/sys/modules/usb/usie/Makefile
+++ b/sys/modules/usb/usie/Makefile
@@ -29,6 +29,6 @@
KMOD = usie
SRCS = if_usie.c
SRCS += opt_bus.h opt_usb.h device_if.h bus_if.h \
- usb_if.h usbdevs.h opt_inet.h
+ usb_if.h usbdevs.h opt_inet.h opt_inet6.h
.include <bsd.kmod.mk>
diff --git a/sys/modules/usb/wmt/Makefile b/sys/modules/usb/wmt/Makefile
index 72cf1d814908..8cb5abd7383e 100644
--- a/sys/modules/usb/wmt/Makefile
+++ b/sys/modules/usb/wmt/Makefile
@@ -3,6 +3,6 @@ S= ${SRCTOP}/sys
.PATH: $S/dev/usb/input
KMOD= wmt
-SRCS= opt_bus.h opt_usb.h device_if.h bus_if.h usb_if.h usbdevs.h wmt.c
+SRCS= opt_bus.h opt_kbd.h opt_usb.h device_if.h bus_if.h usb_if.h usbdevs.h wmt.c
.include <bsd.kmod.mk>
diff --git a/sys/modules/zfs/Makefile b/sys/modules/zfs/Makefile
index 2dd9e2be3f56..ec531ed646a7 100644
--- a/sys/modules/zfs/Makefile
+++ b/sys/modules/zfs/Makefile
@@ -15,6 +15,7 @@ KMOD= zfs
${SRCDIR}/icp/asm-ppc64/sha2 \
${SRCDIR}/icp/asm-ppc64/blake3 \
${SRCDIR}/icp/asm-x86_64/blake3 \
+ ${SRCDIR}/icp/asm-x86_64/modes \
${SRCDIR}/icp/asm-x86_64/sha2 \
${SRCDIR}/os/freebsd/spl \
${SRCDIR}/os/freebsd/zfs \
@@ -40,7 +41,8 @@ CFLAGS+= -D__KERNEL__ -DFREEBSD_NAMECACHE -DBUILDING_ZFS \
.if ${MACHINE_ARCH} == "amd64"
CFLAGS+= -D__x86_64 -DHAVE_SSE2 -DHAVE_SSSE3 -DHAVE_SSE4_1 -DHAVE_SSE4_2 \
- -DHAVE_AVX -DHAVE_AVX2 -DHAVE_AVX512F -DHAVE_AVX512VL -DHAVE_AVX512BW
+ -DHAVE_AVX -DHAVE_AVX2 -DHAVE_AVX512F -DHAVE_AVX512VL -DHAVE_AVX512BW \
+ -DHAVE_VAES -DHAVE_VPCLMULQDQ
.endif
.if ${MACHINE_ARCH} == "i386" || ${MACHINE_ARCH} == "powerpc" || \
@@ -82,6 +84,9 @@ SRCS+= blake3_avx2.S \
blake3_avx512.S \
blake3_sse2.S \
blake3_sse41.S
+
+#icp/asm-x86_64/modes
+SRCS+= aesni-gcm-avx2-vaes.S
.endif
#icp/algs/sha2
diff --git a/sys/modules/zfs/zfs_config.h b/sys/modules/zfs/zfs_config.h
index c79c9eaa1a5f..12274bcceea1 100644
--- a/sys/modules/zfs/zfs_config.h
+++ b/sys/modules/zfs/zfs_config.h
@@ -704,6 +704,11 @@
/* iops->setattr() takes struct user_namespace* */
/* #undef HAVE_USERNS_IOPS_SETATTR */
+#ifdef __amd64__
+/* Define if host toolchain supports VAES */
+#define HAVE_VAES 1
+#endif
+
/* fops->clone_file_range() is available */
/* #undef HAVE_VFS_CLONE_FILE_RANGE */
@@ -743,6 +748,11 @@
/* __vmalloc page flags exists */
/* #undef HAVE_VMALLOC_PAGE_KERNEL */
+#ifdef __amd64__
+/* Define if host toolchain supports VPCLMULQDQ */
+#define HAVE_VPCLMULQDQ 1
+#endif
+
/* int (*writepage_t)() takes struct folio* */
/* #undef HAVE_WRITEPAGE_T_FOLIO */
@@ -830,7 +840,7 @@
/* #undef ZFS_DEVICE_MINOR */
/* Define the project alias string. */
-#define ZFS_META_ALIAS "zfs-2.3.99-539-FreeBSD_g1d0b94c4e"
+#define ZFS_META_ALIAS "zfs-2.3.99-571-FreeBSD_ga9410ccbd"
/* Define the project author. */
#define ZFS_META_AUTHOR "OpenZFS"
@@ -839,7 +849,7 @@
/* #undef ZFS_META_DATA */
/* Define the maximum compatible kernel version. */
-#define ZFS_META_KVER_MAX "6.15"
+#define ZFS_META_KVER_MAX "6.16"
/* Define the minimum compatible kernel version. */
#define ZFS_META_KVER_MIN "4.18"
@@ -860,7 +870,7 @@
#define ZFS_META_NAME "zfs"
/* Define the project release. */
-#define ZFS_META_RELEASE "539-FreeBSD_g1d0b94c4e"
+#define ZFS_META_RELEASE "571-FreeBSD_ga9410ccbd"
/* Define the project version. */
#define ZFS_META_VERSION "2.3.99"
diff --git a/sys/modules/zfs/zfs_gitrev.h b/sys/modules/zfs/zfs_gitrev.h
index 20fd58c620b5..5c265cf5b08e 100644
--- a/sys/modules/zfs/zfs_gitrev.h
+++ b/sys/modules/zfs/zfs_gitrev.h
@@ -1 +1 @@
-#define ZFS_META_GITREV "zfs-2.3.99-539-g1d0b94c4e"
+#define ZFS_META_GITREV "zfs-2.3.99-571-ga9410ccbd"
diff --git a/sys/net/if_bridgevar.h b/sys/net/if_bridgevar.h
index b0f579f688ac..5ed8c19f3128 100644
--- a/sys/net/if_bridgevar.h
+++ b/sys/net/if_bridgevar.h
@@ -159,7 +159,7 @@ struct ifbreq {
uint32_t ifbr_addrexceeded; /* member if addr violations */
ether_vlanid_t ifbr_pvid; /* member if PVID */
uint16_t ifbr_vlanproto; /* member if VLAN protocol */
- uint8_t pad[32];
+ uint8_t pad[28];
};
/* BRDGGIFFLAGS, BRDGSIFFLAGS */
diff --git a/sys/net/if_clone.h b/sys/net/if_clone.h
index 5a74ffa1cc2f..d780e49af25f 100644
--- a/sys/net/if_clone.h
+++ b/sys/net/if_clone.h
@@ -153,7 +153,7 @@ int if_clone_destroy(const char *);
int if_clone_list(struct if_clonereq *);
void if_clone_restoregroup(struct ifnet *);
-/* The below interfaces are used only by epair(4). */
+/* The below interfaces are used only by epair(4) and tun(4)/tap(4). */
void if_clone_addif(struct if_clone *, struct ifnet *);
int if_clone_destroyif(struct if_clone *, struct ifnet *);
diff --git a/sys/net/if_pfsync.h b/sys/net/if_pfsync.h
index 1efc220aa8e1..e99df0b85ccf 100644
--- a/sys/net/if_pfsync.h
+++ b/sys/net/if_pfsync.h
@@ -160,8 +160,8 @@ struct pfsync_ins_ack {
struct pfsync_upd_c {
u_int64_t id;
- struct pfsync_state_peer src;
- struct pfsync_state_peer dst;
+ struct pf_state_peer_export src;
+ struct pf_state_peer_export dst;
u_int32_t creatorid;
u_int32_t expire;
u_int8_t timeout;
diff --git a/sys/net/if_tap.h b/sys/net/if_tap.h
index d84cd2eba6f3..8297b8d9e3d2 100644
--- a/sys/net/if_tap.h
+++ b/sys/net/if_tap.h
@@ -57,6 +57,8 @@
#define TAPGIFNAME TUNGIFNAME
#define TAPSVNETHDR _IOW('t', 91, int)
#define TAPGVNETHDR _IOR('t', 94, int)
+#define TAPSTRANSIENT TUNSTRANSIENT
+#define TAPGTRANSIENT TUNGTRANSIENT
/* VMware ioctl's */
#define VMIO_SIOCSIFFLAGS _IOWINT('V', 0)
diff --git a/sys/net/if_tun.h b/sys/net/if_tun.h
index a8fb61db45a2..ccdc25944823 100644
--- a/sys/net/if_tun.h
+++ b/sys/net/if_tun.h
@@ -43,5 +43,7 @@ struct tuninfo {
#define TUNSIFPID _IO('t', 95)
#define TUNSIFHEAD _IOW('t', 96, int)
#define TUNGIFHEAD _IOR('t', 97, int)
+#define TUNSTRANSIENT _IOW('t', 98, int)
+#define TUNGTRANSIENT _IOR('t', 99, int)
#endif /* !_NET_IF_TUN_H_ */
diff --git a/sys/net/if_tuntap.c b/sys/net/if_tuntap.c
index 5e6f65c04b2f..c8dbb6aa8893 100644
--- a/sys/net/if_tuntap.c
+++ b/sys/net/if_tuntap.c
@@ -132,6 +132,7 @@ struct tuntap_softc {
#define TUN_DYING 0x0200
#define TUN_L2 0x0400
#define TUN_VMNET 0x0800
+#define TUN_TRANSIENT 0x1000
#define TUN_DRIVER_IDENT_MASK (TUN_L2 | TUN_VMNET)
#define TUN_READY (TUN_OPEN | TUN_INITED)
@@ -443,6 +444,18 @@ tuntap_name2info(const char *name, int *outunit, int *outflags)
return (0);
}
+static struct if_clone *
+tuntap_cloner_from_flags(int tun_flags)
+{
+
+ for (u_int i = 0; i < NDRV; i++)
+ if ((tun_flags & TUN_DRIVER_IDENT_MASK) ==
+ tuntap_drivers[i].ident_flags)
+ return (V_tuntap_driver_cloners[i]);
+
+ return (NULL);
+}
+
/*
* Get driver information from a set of flags specified. Masks the identifying
* part of the flags and compares it against all of the available
@@ -615,19 +628,39 @@ out:
CURVNET_RESTORE();
}
-static void
-tun_destroy(struct tuntap_softc *tp)
+static int
+tun_destroy(struct tuntap_softc *tp, bool may_intr)
{
+ int error;
TUN_LOCK(tp);
+
+ /*
+ * Transient tunnels may have set TUN_DYING if we're being destroyed as
+ * a result of the last close, which we'll allow.
+ */
+ MPASS((tp->tun_flags & (TUN_DYING | TUN_TRANSIENT)) != TUN_DYING);
tp->tun_flags |= TUN_DYING;
- if (tp->tun_busy != 0)
- cv_wait_unlock(&tp->tun_cv, &tp->tun_mtx);
- else
- TUN_UNLOCK(tp);
+ error = 0;
+ while (tp->tun_busy != 0) {
+ if (may_intr)
+ error = cv_wait_sig(&tp->tun_cv, &tp->tun_mtx);
+ else
+ cv_wait(&tp->tun_cv, &tp->tun_mtx);
+ if (error != 0) {
+ tp->tun_flags &= ~TUN_DYING;
+ TUN_UNLOCK(tp);
+ return (error);
+ }
+ }
+ TUN_UNLOCK(tp);
CURVNET_SET(TUN2IFP(tp)->if_vnet);
+ mtx_lock(&tunmtx);
+ TAILQ_REMOVE(&tunhead, tp, tun_list);
+ mtx_unlock(&tunmtx);
+
/* destroy_dev will take care of any alias. */
destroy_dev(tp->tun_dev);
seldrain(&tp->tun_rsel);
@@ -648,6 +681,8 @@ tun_destroy(struct tuntap_softc *tp)
cv_destroy(&tp->tun_cv);
free(tp, M_TUN);
CURVNET_RESTORE();
+
+ return (0);
}
static int
@@ -655,12 +690,7 @@ tun_clone_destroy(struct if_clone *ifc __unused, struct ifnet *ifp, uint32_t fla
{
struct tuntap_softc *tp = ifp->if_softc;
- mtx_lock(&tunmtx);
- TAILQ_REMOVE(&tunhead, tp, tun_list);
- mtx_unlock(&tunmtx);
- tun_destroy(tp);
-
- return (0);
+ return (tun_destroy(tp, true));
}
static void
@@ -702,9 +732,9 @@ tun_uninit(const void *unused __unused)
mtx_lock(&tunmtx);
while ((tp = TAILQ_FIRST(&tunhead)) != NULL) {
- TAILQ_REMOVE(&tunhead, tp, tun_list);
mtx_unlock(&tunmtx);
- tun_destroy(tp);
+ /* tun_destroy() will remove it from the tailq. */
+ tun_destroy(tp, false);
mtx_lock(&tunmtx);
}
mtx_unlock(&tunmtx);
@@ -1217,6 +1247,23 @@ out:
tun_vnethdr_set(ifp, 0);
tun_unbusy_locked(tp);
+ if ((tp->tun_flags & TUN_TRANSIENT) != 0) {
+ struct if_clone *cloner;
+ int error __diagused;
+
+ /* Mark it busy so that nothing can re-open it. */
+ tp->tun_flags |= TUN_DYING;
+ TUN_UNLOCK(tp);
+
+ CURVNET_SET_QUIET(ifp->if_home_vnet);
+ cloner = tuntap_cloner_from_flags(tp->tun_flags);
+ CURVNET_RESTORE();
+
+ error = if_clone_destroyif(cloner, ifp);
+ MPASS(error == 0 || error == EINTR || error == ERESTART);
+ return;
+ }
+
TUN_UNLOCK(tp);
}
@@ -1668,6 +1715,19 @@ tunioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag,
case TUNGDEBUG:
*(int *)data = tundebug;
break;
+ case TUNSTRANSIENT:
+ TUN_LOCK(tp);
+ if (*(int *)data)
+ tp->tun_flags |= TUN_TRANSIENT;
+ else
+ tp->tun_flags &= ~TUN_TRANSIENT;
+ TUN_UNLOCK(tp);
+ break;
+ case TUNGTRANSIENT:
+ TUN_LOCK(tp);
+ *(int *)data = (tp->tun_flags & TUN_TRANSIENT) != 0;
+ TUN_UNLOCK(tp);
+ break;
case FIONBIO:
break;
case FIOASYNC:
diff --git a/sys/net/iflib.c b/sys/net/iflib.c
index 6638c90882aa..98c59e5de988 100644
--- a/sys/net/iflib.c
+++ b/sys/net/iflib.c
@@ -142,6 +142,7 @@ struct iflib_ctx;
static void iru_init(if_rxd_update_t iru, iflib_rxq_t rxq, uint8_t flid);
static void iflib_timer(void *arg);
static void iflib_tqg_detach(if_ctx_t ctx);
+static int iflib_simple_transmit(if_t ifp, struct mbuf *m);
typedef struct iflib_filter_info {
driver_filter_t *ifi_filter;
@@ -198,6 +199,7 @@ struct iflib_ctx {
uint8_t ifc_sysctl_use_logical_cores;
uint16_t ifc_sysctl_extra_msix_vectors;
bool ifc_cpus_are_physical_cores;
+ bool ifc_sysctl_simple_tx;
qidx_t ifc_sysctl_ntxds[8];
qidx_t ifc_sysctl_nrxds[8];
@@ -725,6 +727,7 @@ static void iflib_free_intr_mem(if_ctx_t ctx);
#ifndef __NO_STRICT_ALIGNMENT
static struct mbuf *iflib_fixup_rx(struct mbuf *m);
#endif
+static __inline int iflib_completed_tx_reclaim(iflib_txq_t txq, int thresh);
static SLIST_HEAD(cpu_offset_list, cpu_offset) cpu_offsets =
SLIST_HEAD_INITIALIZER(cpu_offsets);
@@ -2624,8 +2627,10 @@ iflib_stop(if_ctx_t ctx)
#endif /* DEV_NETMAP */
CALLOUT_UNLOCK(txq);
- /* clean any enqueued buffers */
- iflib_ifmp_purge(txq);
+ if (!ctx->ifc_sysctl_simple_tx) {
+ /* clean any enqueued buffers */
+ iflib_ifmp_purge(txq);
+ }
/* Free any existing tx buffers. */
for (j = 0; j < txq->ift_size; j++) {
iflib_txsd_free(ctx, txq, j);
@@ -2910,7 +2915,9 @@ iflib_rxeof(iflib_rxq_t rxq, qidx_t budget)
struct if_rxd_info ri;
int err, budget_left, rx_bytes, rx_pkts;
iflib_fl_t fl;
+#if defined(INET6) || defined(INET)
int lro_enabled;
+#endif
uint8_t retval = 0;
/*
@@ -2936,7 +2943,9 @@ iflib_rxeof(iflib_rxq_t rxq, qidx_t budget)
return (retval);
}
+#if defined(INET6) || defined(INET)
lro_enabled = (if_getcapenable(ifp) & IFCAP_LRO);
+#endif
/* pfil needs the vnet to be set */
CURVNET_SET_QUIET(if_getvnet(ifp));
@@ -3631,13 +3640,16 @@ defrag:
* cxgb
*/
if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) {
- txq->ift_no_desc_avail++;
- bus_dmamap_unload(buf_tag, map);
- DBG_COUNTER_INC(encap_txq_avail_fail);
- DBG_COUNTER_INC(encap_txd_encap_fail);
- if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0)
- GROUPTASK_ENQUEUE(&txq->ift_task);
- return (ENOBUFS);
+ (void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
+ if (__predict_false(nsegs + 2 > TXQ_AVAIL(txq))) {
+ txq->ift_no_desc_avail++;
+ bus_dmamap_unload(buf_tag, map);
+ DBG_COUNTER_INC(encap_txq_avail_fail);
+ DBG_COUNTER_INC(encap_txd_encap_fail);
+ if ((txq->ift_task.gt_task.ta_flags & TASK_ENQUEUED) == 0)
+ GROUPTASK_ENQUEUE(&txq->ift_task);
+ return (ENOBUFS);
+ }
}
/*
* On Intel cards we can greatly reduce the number of TX interrupts
@@ -4010,6 +4022,12 @@ _task_fn_tx(void *context)
netmap_tx_irq(ifp, txq->ift_id))
goto skip_ifmp;
#endif
+ if (ctx->ifc_sysctl_simple_tx) {
+ mtx_lock(&txq->ift_mtx);
+ (void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
+ mtx_unlock(&txq->ift_mtx);
+ goto skip_ifmp;
+ }
#ifdef ALTQ
if (if_altq_is_enabled(ifp))
iflib_altq_if_start(ifp);
@@ -4023,9 +4041,8 @@ _task_fn_tx(void *context)
*/
if (abdicate)
ifmp_ring_check_drainage(txq->ift_br, TX_BATCH_SIZE);
-#ifdef DEV_NETMAP
+
skip_ifmp:
-#endif
if (ctx->ifc_flags & IFC_LEGACY)
IFDI_INTR_ENABLE(ctx);
else
@@ -5127,7 +5144,14 @@ iflib_device_register(device_t dev, void *sc, if_shared_ctx_t sctx, if_ctx_t *ct
scctx = &ctx->ifc_softc_ctx;
ifp = ctx->ifc_ifp;
-
+ if (ctx->ifc_sysctl_simple_tx) {
+#ifndef ALTQ
+ if_settransmitfn(ifp, iflib_simple_transmit);
+ device_printf(dev, "using simple if_transmit\n");
+#else
+ device_printf(dev, "ALTQ prevents using simple if_transmit\n");
+#endif
+ }
iflib_reset_qvalues(ctx);
IFNET_WLOCK();
CTX_LOCK(ctx);
@@ -6762,6 +6786,9 @@ iflib_add_device_sysctl_pre(if_ctx_t ctx)
SYSCTL_ADD_CONST_STRING(ctx_list, oid_list, OID_AUTO, "driver_version",
CTLFLAG_RD, ctx->ifc_sctx->isc_driver_version, "driver version");
+ SYSCTL_ADD_BOOL(ctx_list, oid_list, OID_AUTO, "simple_tx",
+ CTLFLAG_RDTUN, &ctx->ifc_sysctl_simple_tx, 0,
+ "use simple tx ring");
SYSCTL_ADD_U16(ctx_list, oid_list, OID_AUTO, "override_ntxqs",
CTLFLAG_RWTUN, &ctx->ifc_sysctl_ntxqs, 0,
"# of txqs to use, 0 => use default #");
@@ -7084,3 +7111,48 @@ iflib_debugnet_poll(if_t ifp, int count)
return (0);
}
#endif /* DEBUGNET */
+
+
+static inline iflib_txq_t
+iflib_simple_select_queue(if_ctx_t ctx, struct mbuf *m)
+{
+ int qidx;
+
+ if ((NTXQSETS(ctx) > 1) && M_HASHTYPE_GET(m))
+ qidx = QIDX(ctx, m);
+ else
+ qidx = NTXQSETS(ctx) + FIRST_QSET(ctx) - 1;
+ return (&ctx->ifc_txqs[qidx]);
+}
+
+static int
+iflib_simple_transmit(if_t ifp, struct mbuf *m)
+{
+ if_ctx_t ctx;
+ iflib_txq_t txq;
+ int error;
+ int bytes_sent = 0, pkt_sent = 0, mcast_sent = 0;
+
+
+ ctx = if_getsoftc(ifp);
+ if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
+ IFF_DRV_RUNNING)
+ return (EBUSY);
+ txq = iflib_simple_select_queue(ctx, m);
+ mtx_lock(&txq->ift_mtx);
+ error = iflib_encap(txq, &m);
+ if (error == 0) {
+ pkt_sent++;
+ bytes_sent += m->m_pkthdr.len;
+ mcast_sent += !!(m->m_flags & M_MCAST);
+ (void)iflib_txd_db_check(txq, true);
+ }
+ (void)iflib_completed_tx_reclaim(txq, RECLAIM_THRESH(ctx));
+ mtx_unlock(&txq->ift_mtx);
+ if_inc_counter(ifp, IFCOUNTER_OBYTES, bytes_sent);
+ if_inc_counter(ifp, IFCOUNTER_OPACKETS, pkt_sent);
+ if (mcast_sent)
+ if_inc_counter(ifp, IFCOUNTER_OMCASTS, mcast_sent);
+
+ return (error);
+}
diff --git a/sys/net/pfvar.h b/sys/net/pfvar.h
index c397f0b67896..d6c13470f2eb 100644
--- a/sys/net/pfvar.h
+++ b/sys/net/pfvar.h
@@ -1020,7 +1020,7 @@ struct pf_state_scrub_export {
#define PF_SCRUB_FLAG_VALID 0x01
uint8_t scrub_flag;
uint32_t pfss_ts_mod; /* timestamp modulation */
-};
+} __packed;
struct pf_state_key_export {
struct pf_addr addr[2];
@@ -1037,7 +1037,7 @@ struct pf_state_peer_export {
uint8_t state; /* active state level */
uint8_t wscale; /* window scaling factor */
uint8_t dummy[6];
-};
+} __packed;
_Static_assert(sizeof(struct pf_state_peer_export) == 32, "size incorrect");
struct pf_state_export {
@@ -1179,26 +1179,6 @@ struct pf_test_ctx {
* Unified state structures for pulling states out of the kernel
* used by pfsync(4) and the pf(4) ioctl.
*/
-struct pfsync_state_scrub {
- u_int16_t pfss_flags;
- u_int8_t pfss_ttl; /* stashed TTL */
-#define PFSYNC_SCRUB_FLAG_VALID 0x01
- u_int8_t scrub_flag;
- u_int32_t pfss_ts_mod; /* timestamp modulation */
-} __packed;
-
-struct pfsync_state_peer {
- struct pfsync_state_scrub scrub; /* state is scrubbed */
- u_int32_t seqlo; /* Max sequence number sent */
- u_int32_t seqhi; /* Max the other end ACKd + win */
- u_int32_t seqdiff; /* Sequence number modulator */
- u_int16_t max_win; /* largest window (pre scaling) */
- u_int16_t mss; /* Maximum segment size option */
- u_int8_t state; /* active state level */
- u_int8_t wscale; /* window scaling factor */
- u_int8_t pad[6];
-} __packed;
-
struct pfsync_state_key {
struct pf_addr addr[2];
u_int16_t port[2];
@@ -1208,8 +1188,8 @@ struct pfsync_state_1301 {
u_int64_t id;
char ifname[IFNAMSIZ];
struct pfsync_state_key key[2];
- struct pfsync_state_peer src;
- struct pfsync_state_peer dst;
+ struct pf_state_peer_export src;
+ struct pf_state_peer_export dst;
struct pf_addr rt_addr;
u_int32_t rule;
u_int32_t anchor;
@@ -1235,8 +1215,8 @@ struct pfsync_state_1400 {
u_int64_t id;
char ifname[IFNAMSIZ];
struct pfsync_state_key key[2];
- struct pfsync_state_peer src;
- struct pfsync_state_peer dst;
+ struct pf_state_peer_export src;
+ struct pf_state_peer_export dst;
struct pf_addr rt_addr;
u_int32_t rule;
u_int32_t anchor;
@@ -1323,39 +1303,10 @@ extern pflog_packet_t *pflog_packet_ptr;
/* for copies to/from network byte order */
/* ioctl interface also uses network byte order */
-#define pf_state_peer_hton(s,d) do { \
- (d)->seqlo = htonl((s)->seqlo); \
- (d)->seqhi = htonl((s)->seqhi); \
- (d)->seqdiff = htonl((s)->seqdiff); \
- (d)->max_win = htons((s)->max_win); \
- (d)->mss = htons((s)->mss); \
- (d)->state = (s)->state; \
- (d)->wscale = (s)->wscale; \
- if ((s)->scrub) { \
- (d)->scrub.pfss_flags = \
- htons((s)->scrub->pfss_flags & PFSS_TIMESTAMP); \
- (d)->scrub.pfss_ttl = (s)->scrub->pfss_ttl; \
- (d)->scrub.pfss_ts_mod = htonl((s)->scrub->pfss_ts_mod);\
- (d)->scrub.scrub_flag = PFSYNC_SCRUB_FLAG_VALID; \
- } \
-} while (0)
-
-#define pf_state_peer_ntoh(s,d) do { \
- (d)->seqlo = ntohl((s)->seqlo); \
- (d)->seqhi = ntohl((s)->seqhi); \
- (d)->seqdiff = ntohl((s)->seqdiff); \
- (d)->max_win = ntohs((s)->max_win); \
- (d)->mss = ntohs((s)->mss); \
- (d)->state = (s)->state; \
- (d)->wscale = (s)->wscale; \
- if ((s)->scrub.scrub_flag == PFSYNC_SCRUB_FLAG_VALID && \
- (d)->scrub != NULL) { \
- (d)->scrub->pfss_flags = \
- ntohs((s)->scrub.pfss_flags) & PFSS_TIMESTAMP; \
- (d)->scrub->pfss_ttl = (s)->scrub.pfss_ttl; \
- (d)->scrub->pfss_ts_mod = ntohl((s)->scrub.pfss_ts_mod);\
- } \
-} while (0)
+void pf_state_peer_hton(const struct pf_state_peer *,
+ struct pf_state_peer_export *);
+void pf_state_peer_ntoh(const struct pf_state_peer_export *,
+ struct pf_state_peer *);
#define pf_state_counter_hton(s,d) do { \
d[0] = htonl((s>>32)&0xffffffff); \
diff --git a/sys/netinet/ip_icmp.c b/sys/netinet/ip_icmp.c
index 543b846fbba5..fc0848b2c944 100644
--- a/sys/netinet/ip_icmp.c
+++ b/sys/netinet/ip_icmp.c
@@ -391,7 +391,6 @@ stdreply: icmpelen = max(8, min(V_icmp_quotelen, ntohs(oip->ip_len) -
nip->ip_hl = 5;
nip->ip_p = IPPROTO_ICMP;
nip->ip_tos = 0;
- nip->ip_off = 0;
if (V_error_keeptags)
m_tag_copy_chain(m, n, M_NOWAIT);
@@ -872,6 +871,8 @@ match:
mac_netinet_icmp_replyinplace(m);
#endif
ip->ip_src = t;
+ /* ip->ip_tos will be reflected. */
+ ip->ip_off = htons(0);
ip->ip_ttl = V_ip_defttl;
if (optlen > 0) {
diff --git a/sys/netinet/udp_usrreq.c b/sys/netinet/udp_usrreq.c
index 2f2f9abf1c83..3e6519118a40 100644
--- a/sys/netinet/udp_usrreq.c
+++ b/sys/netinet/udp_usrreq.c
@@ -448,7 +448,7 @@ udp_multi_input(struct mbuf *m, int proto, struct sockaddr_in *udp_in)
/*
* No matching pcb found; discard datagram. (No need
* to send an ICMP Port Unreachable for a broadcast
- * or multicast datgram.)
+ * or multicast datagram.)
*/
UDPSTAT_INC(udps_noport);
if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr)))
diff --git a/sys/netinet6/udp6_usrreq.c b/sys/netinet6/udp6_usrreq.c
index 0027cf3bd230..1a32365f5d1d 100644
--- a/sys/netinet6/udp6_usrreq.c
+++ b/sys/netinet6/udp6_usrreq.c
@@ -341,7 +341,7 @@ udp6_multi_input(struct mbuf *m, int off, int proto,
/*
* No matching pcb found; discard datagram. (No need
* to send an ICMP Port Unreachable for a broadcast
- * or multicast datgram.)
+ * or multicast datagram.)
*/
UDPSTAT_INC(udps_noport);
UDPSTAT_INC(udps_noportmcast);
diff --git a/sys/netlink/netlink_io.c b/sys/netlink/netlink_io.c
index e7908d6f3a44..2391d8ea752c 100644
--- a/sys/netlink/netlink_io.c
+++ b/sys/netlink/netlink_io.c
@@ -216,16 +216,17 @@ nl_send(struct nl_writer *nw, struct nlpcb *nlp)
hdr->nlmsg_len);
}
- if (nlp->nl_linux && linux_netlink_p != NULL &&
- __predict_false(!linux_netlink_p->msgs_to_linux(nw, nlp))) {
+ if (nlp->nl_linux && linux_netlink_p != NULL) {
+ nb = linux_netlink_p->msgs_to_linux(nw->buf, nlp);
nl_buf_free(nw->buf);
nw->buf = NULL;
- return (false);
+ if (nb == NULL)
+ return (false);
+ } else {
+ nb = nw->buf;
+ nw->buf = NULL;
}
- nb = nw->buf;
- nw->buf = NULL;
-
SOCK_RECVBUF_LOCK(so);
if (!nw->ignore_limit && __predict_false(sb->sb_hiwat <= sb->sb_ccc)) {
SOCK_RECVBUF_UNLOCK(so);
diff --git a/sys/netlink/netlink_linux.h b/sys/netlink/netlink_linux.h
index d4c451d470b2..794065692901 100644
--- a/sys/netlink/netlink_linux.h
+++ b/sys/netlink/netlink_linux.h
@@ -37,7 +37,7 @@ struct nlpcb;
struct nl_pstate;
struct nl_writer;
-typedef bool msgs_to_linux_cb_t(struct nl_writer *nw, struct nlpcb *nlp);
+typedef struct nl_buf * msgs_to_linux_cb_t(struct nl_buf *, struct nlpcb *);
typedef int msg_from_linux_cb_t(int netlink_family, struct nlmsghdr **hdr,
struct nl_pstate *npt);
diff --git a/sys/netlink/route/iface.c b/sys/netlink/route/iface.c
index 8b871576d0b2..9beb80792af4 100644
--- a/sys/netlink/route/iface.c
+++ b/sys/netlink/route/iface.c
@@ -403,6 +403,7 @@ static const struct nlattr_parser nla_p_linfo[] = {
NL_DECLARE_ATTR_PARSER(linfo_parser, nla_p_linfo);
static const struct nlattr_parser nla_p_if[] = {
+ { .type = IFLA_ADDRESS, .off = _OUT(ifla_address), .cb = nlattr_get_nla },
{ .type = IFLA_IFNAME, .off = _OUT(ifla_ifname), .cb = nlattr_get_string },
{ .type = IFLA_MTU, .off = _OUT(ifla_mtu), .cb = nlattr_get_uint32 },
{ .type = IFLA_LINK, .off = _OUT(ifla_link), .cb = nlattr_get_uint32 },
diff --git a/sys/netlink/route/iface_drivers.c b/sys/netlink/route/iface_drivers.c
index 4bf913d9c978..21db3017df18 100644
--- a/sys/netlink/route/iface_drivers.c
+++ b/sys/netlink/route/iface_drivers.c
@@ -105,6 +105,24 @@ _nl_modify_ifp_generic(struct ifnet *ifp, struct nl_parsed_link *lattrs,
}
}
+ if (lattrs->ifla_address != NULL) {
+ if (nlp_has_priv(npt->nlp, PRIV_NET_SETIFMAC)) {
+ error = if_setlladdr(ifp,
+ NLA_DATA(lattrs->ifla_address),
+ NLA_DATA_LEN(lattrs->ifla_address));
+ if (error != 0) {
+ nlmsg_report_err_msg(npt,
+ "setting IFLA_ADDRESS failed with error code: %d",
+ error);
+ return (error);
+ }
+ } else {
+ nlmsg_report_err_msg(npt,
+ "Not enough privileges to set IFLA_ADDRESS");
+ return (EPERM);
+ }
+ }
+
return (0);
}
diff --git a/sys/netlink/route/route_var.h b/sys/netlink/route/route_var.h
index b84b34461e35..41f110038b54 100644
--- a/sys/netlink/route/route_var.h
+++ b/sys/netlink/route/route_var.h
@@ -69,6 +69,7 @@ struct nl_parsed_link {
char *ifla_cloner;
char *ifla_ifalias;
struct nlattr *ifla_idata;
+ struct nlattr *ifla_address;
unsigned short ifi_type;
int ifi_index;
uint32_t ifla_link;
diff --git a/sys/netpfil/pf/if_pfsync.c b/sys/netpfil/pf/if_pfsync.c
index e34c08c8c4db..585c196391c0 100644
--- a/sys/netpfil/pf/if_pfsync.c
+++ b/sys/netpfil/pf/if_pfsync.c
@@ -123,8 +123,8 @@ union inet_template {
sizeof(struct pfsync_header) + \
sizeof(struct pfsync_subheader) )
-static int pfsync_upd_tcp(struct pf_kstate *, struct pfsync_state_peer *,
- struct pfsync_state_peer *);
+static int pfsync_upd_tcp(struct pf_kstate *, struct pf_state_peer_export *,
+ struct pf_state_peer_export *);
static int pfsync_in_clr(struct mbuf *, int, int, int, int);
static int pfsync_in_ins(struct mbuf *, int, int, int, int);
static int pfsync_in_iack(struct mbuf *, int, int, int, int);
@@ -330,7 +330,7 @@ SYSCTL_UINT(_net_pfsync, OID_AUTO, defer_delay, CTLFLAG_VNET | CTLFLAG_RW,
static int pfsync_clone_create(struct if_clone *, int, caddr_t);
static void pfsync_clone_destroy(struct ifnet *);
-static int pfsync_alloc_scrub_memory(struct pfsync_state_peer *,
+static int pfsync_alloc_scrub_memory(struct pf_state_peer_export *,
struct pf_state_peer *);
static int pfsyncoutput(struct ifnet *, struct mbuf *,
const struct sockaddr *, struct route *);
@@ -502,7 +502,7 @@ pfsync_clone_destroy(struct ifnet *ifp)
}
static int
-pfsync_alloc_scrub_memory(struct pfsync_state_peer *s,
+pfsync_alloc_scrub_memory(struct pf_state_peer_export *s,
struct pf_state_peer *d)
{
if (s->scrub.scrub_flag && d->scrub == NULL) {
@@ -1172,8 +1172,8 @@ pfsync_in_iack(struct mbuf *m, int offset, int count, int flags, int action)
}
static int
-pfsync_upd_tcp(struct pf_kstate *st, struct pfsync_state_peer *src,
- struct pfsync_state_peer *dst)
+pfsync_upd_tcp(struct pf_kstate *st, struct pf_state_peer_export *src,
+ struct pf_state_peer_export *dst)
{
int sync = 0;
diff --git a/sys/netpfil/pf/pf.c b/sys/netpfil/pf/pf.c
index 9d83e7b82e6f..8cd4fff95b15 100644
--- a/sys/netpfil/pf/pf.c
+++ b/sys/netpfil/pf/pf.c
@@ -2069,6 +2069,44 @@ pf_find_state_all_exists(const struct pf_state_key_cmp *key, u_int dir)
return (false);
}
+void
+pf_state_peer_hton(const struct pf_state_peer *s, struct pf_state_peer_export *d)
+{
+ d->seqlo = htonl(s->seqlo);
+ d->seqhi = htonl(s->seqhi);
+ d->seqdiff = htonl(s->seqdiff);
+ d->max_win = htons(s->max_win);
+ d->mss = htons(s->mss);
+ d->state = s->state;
+ d->wscale = s->wscale;
+ if (s->scrub) {
+ d->scrub.pfss_flags = htons(
+ s->scrub->pfss_flags & PFSS_TIMESTAMP);
+ d->scrub.pfss_ttl = (s)->scrub->pfss_ttl;
+ d->scrub.pfss_ts_mod = htonl((s)->scrub->pfss_ts_mod);
+ d->scrub.scrub_flag = PF_SCRUB_FLAG_VALID;
+ }
+}
+
+void
+pf_state_peer_ntoh(const struct pf_state_peer_export *s, struct pf_state_peer *d)
+{
+ d->seqlo = ntohl(s->seqlo);
+ d->seqhi = ntohl(s->seqhi);
+ d->seqdiff = ntohl(s->seqdiff);
+ d->max_win = ntohs(s->max_win);
+ d->mss = ntohs(s->mss);
+ d->state = s->state;
+ d->wscale = s->wscale;
+ if (s->scrub.scrub_flag == PF_SCRUB_FLAG_VALID &&
+ d->scrub != NULL) {
+ d->scrub->pfss_flags = ntohs(s->scrub.pfss_flags) &
+ PFSS_TIMESTAMP;
+ d->scrub->pfss_ttl = s->scrub.pfss_ttl;
+ d->scrub->pfss_ts_mod = ntohl(s->scrub.pfss_ts_mod);
+ }
+}
+
struct pf_udp_mapping *
pf_udp_mapping_create(sa_family_t af, struct pf_addr *src_addr, uint16_t src_port,
struct pf_addr *nat_addr, uint16_t nat_port)
diff --git a/sys/netpfil/pf/pf_nl.c b/sys/netpfil/pf/pf_nl.c
index 09754359ec2d..45b5b8dd5fef 100644
--- a/sys/netpfil/pf/pf_nl.c
+++ b/sys/netpfil/pf/pf_nl.c
@@ -118,7 +118,7 @@ dump_state_peer(struct nl_writer *nw, int attr, const struct pf_state_peer *peer
nlattr_add_u16(nw, PF_STP_PFSS_FLAGS, pfss_flags);
nlattr_add_u32(nw, PF_STP_PFSS_TS_MOD, sc->pfss_ts_mod);
nlattr_add_u8(nw, PF_STP_PFSS_TTL, sc->pfss_ttl);
- nlattr_add_u8(nw, PF_STP_SCRUB_FLAG, PFSYNC_SCRUB_FLAG_VALID);
+ nlattr_add_u8(nw, PF_STP_SCRUB_FLAG, PF_SCRUB_FLAG_VALID);
}
nlattr_set_len(nw, off);
diff --git a/sys/powerpc/conf/GENERIC64 b/sys/powerpc/conf/GENERIC64
index 85711c8fc3ff..630c88b97dd7 100644
--- a/sys/powerpc/conf/GENERIC64
+++ b/sys/powerpc/conf/GENERIC64
@@ -293,3 +293,4 @@ device virtio_balloon # VirtIO Memory Balloon device
options HID_DEBUG # enable debug msgs
device hid # Generic HID support
device hidbus # Generic HID Bus
+options U2F_MAKE_UHID_ALIAS # install /dev/uhid alias for /dev/u2f/
diff --git a/sys/powerpc/conf/GENERIC64LE b/sys/powerpc/conf/GENERIC64LE
index a56feb6574a4..eb9a9441425d 100644
--- a/sys/powerpc/conf/GENERIC64LE
+++ b/sys/powerpc/conf/GENERIC64LE
@@ -274,3 +274,4 @@ device virtio_balloon # VirtIO Memory Balloon device
options HID_DEBUG # enable debug msgs
device hid # Generic HID support
device hidbus # Generic HID Bus
+options U2F_MAKE_UHID_ALIAS # install /dev/uhid alias for /dev/u2f/
diff --git a/sys/riscv/conf/GENERIC b/sys/riscv/conf/GENERIC
index a8500fe80019..2ff711e80127 100644
--- a/sys/riscv/conf/GENERIC
+++ b/sys/riscv/conf/GENERIC
@@ -132,6 +132,7 @@ device umass # Disks/Mass storage - Requires scbus and da
options HID_DEBUG # enable debug msgs
device hid # Generic HID support
device hidbus # Generic HID Bus
+options U2F_MAKE_UHID_ALIAS # install /dev/uhid alias for /dev/u2f/
# Serial (COM) ports
device uart # Generic UART driver
diff --git a/sys/riscv/starfive/jh7110_pcie.c b/sys/riscv/starfive/jh7110_pcie.c
index 2d0a4be69b2c..5181252ab2dc 100644
--- a/sys/riscv/starfive/jh7110_pcie.c
+++ b/sys/riscv/starfive/jh7110_pcie.c
@@ -483,6 +483,16 @@ jh7110_pcie_msi_enable_intr(device_t dev, struct intr_irqsrc *isrc)
}
static void
+jh7110_pcie_msi_post_filter(device_t dev, struct intr_irqsrc *isrc)
+{
+}
+
+static void
+jh7110_pcie_msi_post_ithread(device_t dev, struct intr_irqsrc *isrc)
+{
+}
+
+static void
jh7110_pcie_msi_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
{
struct jh7110_pcie_softc *sc;
@@ -1008,6 +1018,8 @@ static device_method_t jh7110_pcie_methods[] = {
/* Interrupt controller interface */
DEVMETHOD(pic_enable_intr, jh7110_pcie_msi_enable_intr),
DEVMETHOD(pic_disable_intr, jh7110_pcie_msi_disable_intr),
+ DEVMETHOD(pic_post_filter, jh7110_pcie_msi_post_filter),
+ DEVMETHOD(pic_post_ithread, jh7110_pcie_msi_post_ithread),
DEVMETHOD(pic_pre_ithread, jh7110_pcie_msi_pre_ithread),
/* OFW bus interface */
diff --git a/sys/rpc/auth.h b/sys/rpc/auth.h
index 3d58fb19536b..33c33ffd594d 100644
--- a/sys/rpc/auth.h
+++ b/sys/rpc/auth.h
@@ -246,6 +246,19 @@ extern AUTH *authunix_create_default(void); /* takes no parameters */
extern AUTH *authnone_create(void); /* takes no parameters */
extern AUTH *authtls_create(void); /* takes no parameters */
__END_DECLS
+/*
+ * DES style authentication
+ * AUTH *authsecdes_create(servername, window, timehost, ckey)
+ * char *servername; - network name of server
+ * u_int window; - time to live
+ * const char *timehost; - optional hostname to sync with
+ * des_block *ckey; - optional conversation key to use
+ */
+__BEGIN_DECLS
+extern AUTH *authdes_create (char *, u_int, struct sockaddr *, des_block *);
+extern AUTH *authdes_seccreate (const char *, const u_int, const char *,
+ const des_block *);
+__END_DECLS
__BEGIN_DECLS
extern bool_t xdr_opaque_auth (XDR *, struct opaque_auth *);
@@ -267,6 +280,19 @@ extern void passwd2des ( char *, char * );
__END_DECLS
/*
+ *
+ * These routines interface to the keyserv daemon
+ *
+ */
+__BEGIN_DECLS
+extern int key_decryptsession(const char *, des_block *);
+extern int key_encryptsession(const char *, des_block *);
+extern int key_gendes(des_block *);
+extern int key_setsecret(const char *);
+extern int key_secretkey_is_set(void);
+__END_DECLS
+
+/*
* Publickey routines.
*/
__BEGIN_DECLS
diff --git a/sys/security/mac_do/mac_do.c b/sys/security/mac_do/mac_do.c
index 8856be5fa1a3..7a5ac2e01f75 100644
--- a/sys/security/mac_do/mac_do.c
+++ b/sys/security/mac_do/mac_do.c
@@ -44,7 +44,7 @@ SYSCTL_INT(_security_mac_do, OID_AUTO, print_parse_error, CTLFLAG_RWTUN,
&print_parse_error, 0, "Print parse errors on setting rules "
"(via sysctl(8)).");
-static MALLOC_DEFINE(M_DO, "do_rule", "Rules for mac_do");
+static MALLOC_DEFINE(M_MAC_DO, "mac_do", "mac_do(4) security module");
#define MAC_RULE_STRING_LEN 1024
@@ -319,17 +319,17 @@ toast_rules(struct rules *const rules)
struct rule *rule, *rule_next;
STAILQ_FOREACH_SAFE(rule, head, r_entries, rule_next) {
- free(rule->uids, M_DO);
- free(rule->gids, M_DO);
- free(rule, M_DO);
+ free(rule->uids, M_MAC_DO);
+ free(rule->gids, M_MAC_DO);
+ free(rule, M_MAC_DO);
}
- free(rules, M_DO);
+ free(rules, M_MAC_DO);
}
static struct rules *
alloc_rules(void)
{
- struct rules *const rules = malloc(sizeof(*rules), M_DO, M_WAITOK);
+ struct rules *const rules = malloc(sizeof(*rules), M_MAC_DO, M_WAITOK);
_Static_assert(MAC_RULE_STRING_LEN > 0, "MAC_RULE_STRING_LEN <= 0!");
rules->string[0] = 0;
@@ -433,7 +433,7 @@ static void
make_parse_error(struct parse_error **const parse_error, const size_t pos,
const char *const fmt, ...)
{
- struct parse_error *const err = malloc(sizeof(*err), M_DO, M_WAITOK);
+ struct parse_error *const err = malloc(sizeof(*err), M_MAC_DO, M_WAITOK);
va_list ap;
err->pos = pos;
@@ -448,7 +448,7 @@ make_parse_error(struct parse_error **const parse_error, const size_t pos,
static void
free_parse_error(struct parse_error *const parse_error)
{
- free(parse_error, M_DO);
+ free(parse_error, M_MAC_DO);
}
static int
@@ -733,7 +733,7 @@ parse_target_clause(char *to, struct rule *const rule,
"Too many target clauses of type '%s'.", to_type);
return (EOVERFLOW);
}
- ie = malloc(sizeof(*ie), M_DO, M_WAITOK);
+ ie = malloc(sizeof(*ie), M_MAC_DO, M_WAITOK);
ie->spec = is;
STAILQ_INSERT_TAIL(list, ie, ie_entries);
check_type_and_id_spec(type, &is);
@@ -784,7 +784,7 @@ pour_list_into_rule(const id_type_t type, struct id_list *const list,
STAILQ_FOREACH_SAFE(ie, list, ie_entries, ie_next) {
MPASS(idx < *nb);
array[idx] = ie->spec;
- free(ie, M_DO);
+ free(ie, M_MAC_DO);
++idx;
}
MPASS(idx == *nb);
@@ -874,7 +874,7 @@ parse_single_rule(char *rule, struct rules *const rules,
STAILQ_INIT(&gid_list);
/* Freed when the 'struct rules' container is freed. */
- new = malloc(sizeof(*new), M_DO, M_WAITOK | M_ZERO);
+ new = malloc(sizeof(*new), M_MAC_DO, M_WAITOK | M_ZERO);
from_type = strsep_noblanks(&rule, "=");
MPASS(from_type != NULL); /* Because 'rule' was not NULL. */
@@ -933,7 +933,7 @@ parse_single_rule(char *rule, struct rules *const rules,
} while (to_list != NULL);
if (new->uids_nb != 0) {
- new->uids = malloc(sizeof(*new->uids) * new->uids_nb, M_DO,
+ new->uids = malloc(sizeof(*new->uids) * new->uids_nb, M_MAC_DO,
M_WAITOK);
error = pour_list_into_rule(IT_UID, &uid_list, new->uids,
&new->uids_nb, parse_error);
@@ -949,7 +949,7 @@ parse_single_rule(char *rule, struct rules *const rules,
}
if (new->gids_nb != 0) {
- new->gids = malloc(sizeof(*new->gids) * new->gids_nb, M_DO,
+ new->gids = malloc(sizeof(*new->gids) * new->gids_nb, M_MAC_DO,
M_WAITOK);
error = pour_list_into_rule(IT_GID, &gid_list, new->gids,
&new->gids_nb, parse_error);
@@ -969,13 +969,13 @@ parse_single_rule(char *rule, struct rules *const rules,
return (0);
einval:
- free(new->gids, M_DO);
- free(new->uids, M_DO);
- free(new, M_DO);
+ free(new->gids, M_MAC_DO);
+ free(new->uids, M_MAC_DO);
+ free(new, M_MAC_DO);
STAILQ_FOREACH_SAFE(ie, &gid_list, ie_entries, ie_next)
- free(ie, M_DO);
+ free(ie, M_MAC_DO);
STAILQ_FOREACH_SAFE(ie, &uid_list, ie_entries, ie_next)
- free(ie, M_DO);
+ free(ie, M_MAC_DO);
MPASS(*parse_error != NULL);
return (EINVAL);
}
@@ -1028,7 +1028,7 @@ parse_rules(const char *const string, struct rules **const rulesp,
bcopy(string, rules->string, len + 1);
MPASS(rules->string[len] == '\0'); /* Catch some races. */
- copy = malloc(len + 1, M_DO, M_WAITOK);
+ copy = malloc(len + 1, M_MAC_DO, M_WAITOK);
bcopy(string, copy, len + 1);
MPASS(copy[len] == '\0'); /* Catch some races. */
@@ -1046,7 +1046,7 @@ parse_rules(const char *const string, struct rules **const rulesp,
*rulesp = rules;
out:
- free(copy, M_DO);
+ free(copy, M_MAC_DO);
return (error);
}
@@ -1226,7 +1226,7 @@ parse_and_set_rules(struct prison *const pr, const char *rules_string,
static int
mac_do_sysctl_rules(SYSCTL_HANDLER_ARGS)
{
- char *const buf = malloc(MAC_RULE_STRING_LEN, M_DO, M_WAITOK);
+ char *const buf = malloc(MAC_RULE_STRING_LEN, M_MAC_DO, M_WAITOK);
struct prison *const td_pr = req->td->td_ucred->cr_prison;
struct prison *pr;
struct rules *rules;
@@ -1250,7 +1250,7 @@ mac_do_sysctl_rules(SYSCTL_HANDLER_ARGS)
free_parse_error(parse_error);
}
out:
- free(buf, M_DO);
+ free(buf, M_MAC_DO);
return (error);
}
@@ -1573,7 +1573,7 @@ set_data_header(void *const data, const size_t size, const int priv,
static void *
alloc_data(void *const data, const size_t size)
{
- struct mac_do_data_header *const hdr = realloc(data, size, M_DO,
+ struct mac_do_data_header *const hdr = realloc(data, size, M_MAC_DO,
M_WAITOK);
MPASS(size >= sizeof(struct mac_do_data_header));
@@ -1602,7 +1602,7 @@ alloc_data(void *const data, const size_t size)
static void
dealloc_thread_osd(void *const value)
{
- free(value, M_DO);
+ free(value, M_MAC_DO);
}
/*
diff --git a/sys/sys/conf.h b/sys/sys/conf.h
index 1646aa108701..a830c9d4c622 100644
--- a/sys/sys/conf.h
+++ b/sys/sys/conf.h
@@ -159,6 +159,7 @@ typedef int dumper_hdr_t(struct dumperinfo *di, struct kerneldumpheader *kdh);
#define GID_RT_PRIO 47
#define GID_ID_PRIO 48
#define GID_DIALER 68
+#define GID_U2F 116
#define GID_NOGROUP 65533
#define GID_NOBODY 65534
diff --git a/sys/sys/param.h b/sys/sys/param.h
index c410a6ee666f..fc2a78883f1e 100644
--- a/sys/sys/param.h
+++ b/sys/sys/param.h
@@ -74,7 +74,7 @@
* cannot include sys/param.h and should only be updated here.
*/
#undef __FreeBSD_version
-#define __FreeBSD_version 1500060
+#define __FreeBSD_version 1500063
/*
* __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD,
diff --git a/sys/sys/vnode.h b/sys/sys/vnode.h
index 074769d55c2d..6ef9bbec9446 100644
--- a/sys/sys/vnode.h
+++ b/sys/sys/vnode.h
@@ -67,6 +67,11 @@ __enum_uint8_decl(vtype) {
VLASTTYPE = VMARKER,
};
+/*
+ * We frequently need to test is something is a device node.
+ */
+#define VTYPE_ISDEV(vtype) ((vtype) == VCHR || (vtype) == VBLK)
+
__enum_uint8_decl(vstate) {
VSTATE_UNINITIALIZED,
VSTATE_CONSTRUCTED,
@@ -199,6 +204,8 @@ struct vnode {
int v_seqc_users; /* i modifications pending */
};
+#define VN_ISDEV(vp) VTYPE_ISDEV((vp)->v_type)
+
#ifndef DEBUG_LOCKS
#ifdef _LP64
/*
@@ -309,6 +316,8 @@ struct vattr {
long va_spare; /* remain quad aligned */
};
+#define VATTR_ISDEV(vap) VTYPE_ISDEV((vap)->va_type)
+
/*
* Flags for va_vaflags.
*/
diff --git a/sys/tools/amd64_ia32_vdso.sh b/sys/tools/amd64_ia32_vdso.sh
index 85d2299b45d0..e5865639d398 100644
--- a/sys/tools/amd64_ia32_vdso.sh
+++ b/sys/tools/amd64_ia32_vdso.sh
@@ -58,7 +58,7 @@ then
exit 1
fi
-${CC} ${DEBUG} -x assembler-with-cpp -DLOCORE -fPIC -nostdinc -c \
+${CC} -x assembler-with-cpp -DLOCORE -fPIC -nostdinc -c \
-o elf-vdso32.so.o -I. -I"${S}" -include opt_global.h \
-DVDSO_NAME=elf_vdso32_so_1 -DVDSO_FILE=\"elf-vdso32.so.1\" \
"${S}"/tools/vdso_wrap.S
diff --git a/sys/tools/amd64_vdso.sh b/sys/tools/amd64_vdso.sh
index 2a83ae874ab7..ed91ddc8abb5 100644
--- a/sys/tools/amd64_vdso.sh
+++ b/sys/tools/amd64_vdso.sh
@@ -67,7 +67,7 @@ then
exit 1
fi
-${CC} ${DEBUG} -x assembler-with-cpp -DLOCORE -fPIC -nostdinc -c \
+${CC} -x assembler-with-cpp -DLOCORE -fPIC -nostdinc -c \
-o elf-vdso.so.o -I. -I"${S}" -include opt_global.h \
-DVDSO_NAME=elf_vdso_so_1 -DVDSO_FILE=\"elf-vdso.so.1\" \
"${S}"/tools/vdso_wrap.S
diff --git a/sys/ufs/ffs/ffs_vnops.c b/sys/ufs/ffs/ffs_vnops.c
index 897a21032907..c7e2b3f4b8e6 100644
--- a/sys/ufs/ffs/ffs_vnops.c
+++ b/sys/ufs/ffs/ffs_vnops.c
@@ -1550,7 +1550,7 @@ ffs_openextattr(
} */ *ap)
{
- if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
+ if (VN_ISDEV(ap->a_vp))
return (EOPNOTSUPP);
return (ffs_open_ea(ap->a_vp, ap->a_cred, ap->a_td));
@@ -1572,7 +1572,7 @@ ffs_closeextattr(
struct vnode *vp;
vp = ap->a_vp;
- if (vp->v_type == VCHR || vp->v_type == VBLK)
+ if (VN_ISDEV(vp))
return (EOPNOTSUPP);
if (ap->a_commit && (vp->v_mount->mnt_flag & MNT_RDONLY) != 0)
return (EROFS);
@@ -1610,7 +1610,7 @@ ffs_deleteextattr(
vp = ap->a_vp;
ip = VTOI(vp);
- if (vp->v_type == VCHR || vp->v_type == VBLK)
+ if (VN_ISDEV(vp))
return (EOPNOTSUPP);
if (strlen(ap->a_name) == 0)
return (EINVAL);
@@ -1688,7 +1688,7 @@ ffs_getextattr(
ip = VTOI(ap->a_vp);
- if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
+ if (VN_ISDEV(ap->a_vp))
return (EOPNOTSUPP);
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
@@ -1738,7 +1738,7 @@ ffs_listextattr(
ip = VTOI(ap->a_vp);
- if (ap->a_vp->v_type == VCHR || ap->a_vp->v_type == VBLK)
+ if (VN_ISDEV(ap->a_vp))
return (EOPNOTSUPP);
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
@@ -1803,7 +1803,7 @@ ffs_setextattr(
ip = VTOI(vp);
fs = ITOFS(ip);
- if (vp->v_type == VCHR || vp->v_type == VBLK)
+ if (VN_ISDEV(vp))
return (EOPNOTSUPP);
if (strlen(ap->a_name) == 0)
return (EINVAL);
diff --git a/sys/ufs/ufs/ufs_vnops.c b/sys/ufs/ufs/ufs_vnops.c
index ffc993aef9fc..0921eee92b9d 100644
--- a/sys/ufs/ufs/ufs_vnops.c
+++ b/sys/ufs/ufs/ufs_vnops.c
@@ -156,30 +156,30 @@ ufs_itimes_locked(struct vnode *vp)
if ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_UPDATE)) == 0)
return;
- if ((vp->v_type == VBLK || vp->v_type == VCHR) && !DOINGSOFTDEP(vp))
+ if (VN_ISDEV(vp) && !DOINGSOFTDEP(vp))
UFS_INODE_SET_FLAG(ip, IN_LAZYMOD);
else if (((vp->v_mount->mnt_kern_flag &
- (MNTK_SUSPENDED | MNTK_SUSPEND)) == 0) ||
- (ip->i_flag & (IN_CHANGE | IN_UPDATE)))
+ (MNTK_SUSPENDED | MNTK_SUSPEND)) == 0) ||
+ (ip->i_flag & (IN_CHANGE | IN_UPDATE)) != 0)
UFS_INODE_SET_FLAG(ip, IN_MODIFIED);
- else if (ip->i_flag & IN_ACCESS)
+ else if ((ip->i_flag & IN_ACCESS) != 0)
UFS_INODE_SET_FLAG(ip, IN_LAZYACCESS);
vfs_timestamp(&ts);
- if (ip->i_flag & IN_ACCESS) {
+ if ((ip->i_flag & IN_ACCESS) != 0) {
DIP_SET(ip, i_atime, ts.tv_sec);
DIP_SET(ip, i_atimensec, ts.tv_nsec);
}
- if (ip->i_flag & IN_UPDATE) {
+ if ((ip->i_flag & IN_UPDATE) != 0) {
DIP_SET(ip, i_mtime, ts.tv_sec);
DIP_SET(ip, i_mtimensec, ts.tv_nsec);
}
- if (ip->i_flag & IN_CHANGE) {
+ if ((ip->i_flag & IN_CHANGE) != 0) {
DIP_SET(ip, i_ctime, ts.tv_sec);
DIP_SET(ip, i_ctimensec, ts.tv_nsec);
DIP_SET(ip, i_modrev, DIP(ip, i_modrev) + 1);
}
- out:
+out:
ip->i_flag &= ~(IN_ACCESS | IN_CHANGE | IN_UPDATE);
}
@@ -319,7 +319,7 @@ ufs_open(struct vop_open_args *ap)
struct vnode *vp = ap->a_vp;
struct inode *ip;
- if (vp->v_type == VCHR || vp->v_type == VBLK)
+ if (VN_ISDEV(vp))
return (EOPNOTSUPP);
ip = VTOI(vp);
@@ -540,7 +540,7 @@ ufs_stat(struct vop_stat_args *ap)
sb->st_uid = ip->i_uid;
sb->st_gid = ip->i_gid;
if (I_IS_UFS1(ip)) {
- sb->st_rdev = ip->i_din1->di_rdev;
+ sb->st_rdev = VN_ISDEV(vp) ? ip->i_din1->di_rdev : NODEV;
sb->st_size = ip->i_din1->di_size;
sb->st_mtim.tv_sec = ip->i_din1->di_mtime;
sb->st_mtim.tv_nsec = ip->i_din1->di_mtimensec;
@@ -551,7 +551,7 @@ ufs_stat(struct vop_stat_args *ap)
sb->st_blocks = dbtob((uint64_t)ip->i_din1->di_blocks) / S_BLKSIZE;
sb->st_filerev = ip->i_din1->di_modrev;
} else {
- sb->st_rdev = ip->i_din2->di_rdev;
+ sb->st_rdev = VN_ISDEV(vp) ? ip->i_din2->di_rdev : NODEV;
sb->st_size = ip->i_din2->di_size;
sb->st_mtim.tv_sec = ip->i_din2->di_mtime;
sb->st_mtim.tv_nsec = ip->i_din2->di_mtimensec;
@@ -603,7 +603,7 @@ ufs_getattr(
vap->va_uid = ip->i_uid;
vap->va_gid = ip->i_gid;
if (I_IS_UFS1(ip)) {
- vap->va_rdev = ip->i_din1->di_rdev;
+ vap->va_rdev = VN_ISDEV(vp) ? ip->i_din1->di_rdev : NODEV;
vap->va_size = ip->i_din1->di_size;
vap->va_mtime.tv_sec = ip->i_din1->di_mtime;
vap->va_mtime.tv_nsec = ip->i_din1->di_mtimensec;
@@ -612,7 +612,7 @@ ufs_getattr(
vap->va_bytes = dbtob((uint64_t)ip->i_din1->di_blocks);
vap->va_filerev = ip->i_din1->di_modrev;
} else {
- vap->va_rdev = ip->i_din2->di_rdev;
+ vap->va_rdev = VN_ISDEV(vp) ? ip->i_din2->di_rdev : NODEV;
vap->va_size = ip->i_din2->di_size;
vap->va_mtime.tv_sec = ip->i_din2->di_mtime;
vap->va_mtime.tv_nsec = ip->i_din2->di_mtimensec;
diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c
index de8a6c52c08f..244aa31ea703 100644
--- a/sys/vm/vnode_pager.c
+++ b/sys/vm/vnode_pager.c
@@ -901,8 +901,7 @@ vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int count,
int error, before, after, rbehind, rahead, poff, i;
int bytecount, secmask;
- KASSERT(vp->v_type != VCHR && vp->v_type != VBLK,
- ("%s does not support devices", __func__));
+ KASSERT(!VN_ISDEV(vp), ("%s does not support devices", __func__));
if (VN_IS_DOOMED(vp))
return (VM_PAGER_BAD);
diff --git a/tests/sys/fs/fusefs/mockfs.cc b/tests/sys/fs/fusefs/mockfs.cc
index 65cdc3919652..e8081dea9604 100644
--- a/tests/sys/fs/fusefs/mockfs.cc
+++ b/tests/sys/fs/fusefs/mockfs.cc
@@ -472,7 +472,7 @@ MockFS::MockFS(int max_read, int max_readahead, bool allow_other,
sprintf(fdstr, "%d", m_fuse_fd);
build_iovec(&iov, &iovlen, "fd", fdstr, -1);
if (m_maxread > 0) {
- char val[10];
+ char val[12];
snprintf(val, sizeof(val), "%d", m_maxread);
build_iovec(&iov, &iovlen, "max_read=", &val, -1);
diff --git a/tests/sys/net/Makefile b/tests/sys/net/Makefile
index 65cc99a3e932..e390c6e8059d 100644
--- a/tests/sys/net/Makefile
+++ b/tests/sys/net/Makefile
@@ -40,6 +40,7 @@ ${PACKAGE}FILESMODE_stp.py= 0555
MAN=
PROGS+= randsleep
+PROGS+= transient_tuntap
CFLAGS+= -I${.CURDIR:H:H}
diff --git a/tests/sys/net/if_tun_test.sh b/tests/sys/net/if_tun_test.sh
index a4ffe66e04ce..f4ce7800272e 100755
--- a/tests/sys/net/if_tun_test.sh
+++ b/tests/sys/net/if_tun_test.sh
@@ -56,8 +56,30 @@ basic_cleanup()
vnet_cleanup
}
+atf_test_case "transient" "cleanup"
+transient_head()
+{
+ atf_set descr "Test transient tunnel support"
+ atf_set require.user root
+}
+transient_body()
+{
+ vnet_init
+ vnet_mkjail one
+
+ tun=$(jexec one ifconfig tun create)
+ atf_check -s exit:0 -o not-empty jexec one ifconfig ${tun}
+ jexec one $(atf_get_srcdir)/transient_tuntap /dev/${tun}
+ atf_check -s not-exit:0 -e not-empty jexec one ifconfig ${tun}
+}
+transient_cleanup()
+{
+ vnet_cleanup
+}
+
atf_init_test_cases()
{
atf_add_test_case "235704"
atf_add_test_case "basic"
+ atf_add_test_case "transient"
}
diff --git a/tests/sys/net/transient_tuntap.c b/tests/sys/net/transient_tuntap.c
new file mode 100644
index 000000000000..b0cf43064317
--- /dev/null
+++ b/tests/sys/net/transient_tuntap.c
@@ -0,0 +1,54 @@
+/*-
+ * Copyright (c) 2024 Kyle Evans <kevans@FreeBSD.org>
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ */
+
+/*
+ * This test simply configures the tunnel as transient and exits. By the time
+ * we return, the tunnel should be gone because the last reference disappears.
+ */
+
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <net/if_tun.h>
+#include <net/if_tap.h>
+
+#include <assert.h>
+#include <err.h>
+#include <fcntl.h>
+#include <string.h>
+#include <unistd.h>
+
+int
+main(int argc, char *argv[])
+{
+ unsigned long tunreq;
+ const char *tundev;
+ int one = 1, tunfd;
+
+ assert(argc > 1);
+ tundev = argv[1];
+
+ tunfd = open(tundev, O_RDWR);
+ assert(tunfd >= 0);
+
+ /*
+ * These are technically the same request, but we'll use the technically
+ * correct one just in case.
+ */
+ if (strstr(tundev, "tun") != NULL) {
+ tunreq = TUNSTRANSIENT;
+ } else {
+ assert(strstr(tundev, "tap") != NULL);
+ tunreq = TAPSTRANSIENT;
+ }
+
+ if (ioctl(tunfd, tunreq, &one) == -1)
+ err(1, "ioctl");
+
+ /* Final close should destroy the tunnel automagically. */
+ close(tunfd);
+
+ return (0);
+}
diff --git a/tests/sys/netpfil/pf/table.sh b/tests/sys/netpfil/pf/table.sh
index c773518e95e4..65492545a13b 100644
--- a/tests/sys/netpfil/pf/table.sh
+++ b/tests/sys/netpfil/pf/table.sh
@@ -641,9 +641,31 @@ large_body()
-e match:"${expected}/${expected} addresses added." \
jexec alcatraz pfctl -t foo -T add -f ${pwd}/foo.lst
actual=$(jexec alcatraz pfctl -t foo -T show | wc -l | awk '{ print $1; }')
- if [[ $actual -ne $expected ]]; then
+ if [ $actual -ne $expected ]; then
atf_fail "Unexpected number of table entries $expected $acual"
fi
+
+ # The second pass should work too, but confirm we've inserted everything
+ atf_check -s exit:0 \
+ -e match:"0/${expected} addresses added." \
+ jexec alcatraz pfctl -t foo -T add -f ${pwd}/foo.lst
+
+ echo '42.42.42.42' >> ${pwd}/foo.lst
+ expected=$((${expected} + 1))
+
+ # And we can also insert one additional address
+ atf_check -s exit:0 \
+ -e match:"1/${expected} addresses added." \
+ jexec alcatraz pfctl -t foo -T add -f ${pwd}/foo.lst
+
+ # Try to delete one address
+ atf_check -s exit:0 \
+ -e match:"1/1 addresses deleted." \
+ jexec alcatraz pfctl -t foo -T delete 42.42.42.42
+ # And again, for the same address
+ atf_check -s exit:0 \
+ -e match:"0/1 addresses deleted." \
+ jexec alcatraz pfctl -t foo -T delete 42.42.42.42
}
large_cleanup()
diff --git a/tools/build/Makefile b/tools/build/Makefile
index 83f589ce3864..3c4e07e3cfc2 100644
--- a/tools/build/Makefile
+++ b/tools/build/Makefile
@@ -495,6 +495,7 @@ INSTALLDIR_LIST= \
bin \
lib/geom \
usr/include/casper \
+ usr/include/openssl \
usr/include/private/ucl \
usr/include/private/zstd \
usr/lib \
diff --git a/tools/build/depend-cleanup.sh b/tools/build/depend-cleanup.sh
index cd51c59ff0e1..22bf34439758 100755
--- a/tools/build/depend-cleanup.sh
+++ b/tools/build/depend-cleanup.sh
@@ -50,12 +50,12 @@
# - Replacing generated files with files committed to the tree. This is special
# case of moving from one directory to another. The stale generated file also
# needs to be deleted, so that it isn't found in make's .PATH. Note the
-# unconditional `rm -f`: there's no need for an extra call to first check for
+# unconditional `rm -fv`: there's no need for an extra call to first check for
# the file's existence.
#
# # 20250110 3863fec1ce2d add strlen SIMD implementation
# clean_dep lib/libc strlen S arm-optimized-routines
-# run rm -f "$OBJTOP"/lib/libc/strlen.S
+# run rm -fv "$OBJTOP"/lib/libc/strlen.S
#
# A rule may be required for only one architecture:
#
@@ -152,6 +152,11 @@ run()
fi
}
+# Clean the depend and object files for a given source file if the
+# depend file matches a regex (which defaults to the source file
+# name). This is typically used if a file was renamed, especially if
+# only its extension was changed (e.g. from .c to .cc).
+#
# $1 directory
# $2 source filename w/o extension
# $3 source extension
@@ -162,13 +167,34 @@ clean_dep()
dirprfx=${libcompat:+obj-lib${libcompat}/}
if egrep -qw "${4:-$2\.$3}" "$OBJTOP"/$dirprfx$1/.depend.$2.*o 2>/dev/null; then
echo "Removing stale ${libcompat:+lib${libcompat} }dependencies and objects for $2.$3"
- run rm -f \
+ run rm -fv \
"$OBJTOP"/$dirprfx$1/.depend.$2.* \
"$OBJTOP"/$dirprfx$1/$2.*o
fi
done
}
+# Clean the object file for a given source file if it exists and
+# matches a regex. This is typically used if a a change in CFLAGS or
+# similar caused a change in the generated code without a change in
+# the sources.
+#
+# $1 directory
+# $2 source filename w/o extension
+# $3 source extension
+# $4 regex for egrep -w
+clean_obj()
+{
+ for libcompat in "" $ALL_libcompats; do
+ dirprfx=${libcompat:+obj-lib${libcompat}/}
+ if strings "$OBJTOP"/$dirprfx$1/$2.*o 2>/dev/null | egrep -qw "${4}"; then
+ echo "Removing stale ${libcompat:+lib${libcompat} }objects for $2.$3"
+ run rm -fv \
+ "$OBJTOP"/$dirprfx$1/$2.*o
+ fi
+ done
+}
+
extract_epoch()
{
[ -s "$1" ] || return 0
@@ -243,7 +269,7 @@ fi
if stat "$OBJTOP"/tests/sys/kqueue/libkqueue/*kqtest* \
"$OBJTOP"/tests/sys/kqueue/libkqueue/.depend.kqtest* >/dev/null 2>&1; then
echo "Removing old kqtest"
- run rm -f "$OBJTOP"/tests/sys/kqueue/libkqueue/.depend.* \
+ run rm -fv "$OBJTOP"/tests/sys/kqueue/libkqueue/.depend.* \
"$OBJTOP"/tests/sys/kqueue/libkqueue/*
fi
@@ -317,7 +343,7 @@ fi
if [ -f "$OBJTOP"/rescue/rescue/rescue.mk ] && \
! grep -q 'nvme_util.o' "$OBJTOP"/rescue/rescue/rescue.mk; then
echo "removing rescue.mk without nvme_util.o"
- run rm -f "$OBJTOP"/rescue/rescue/rescue.mk
+ run rm -fv "$OBJTOP"/rescue/rescue/rescue.mk
fi
# 20240910 e2df9bb44109
@@ -337,7 +363,7 @@ if [ ${MACHINE} = riscv ]; then
fi
if ! grep -q 'lib/libc/csu/riscv/reloc\.c' "$f"; then
echo "Removing stale dependencies and objects for libc_start1.c"
- run rm -f \
+ run rm -fv \
"$OBJTOP"/lib/libc/.depend.libc_start1.* \
"$OBJTOP"/lib/libc/libc_start1.*o
break
@@ -351,28 +377,28 @@ f="$p"/arm_mve_builtin_sema.inc
if [ -e "$f" ]; then
if grep -q SemaBuiltinConstantArgRange "$f"; then
echo "Removing pre-llvm19 clang-tblgen output"
- run rm -f "$p"/*.inc
+ run rm -fv "$p"/*.inc
fi
fi
# 20241025 cb5e41b16083 Unbundle hash functions fom lib/libcrypt
-clean_dep lib/libcrypt crypt-md5 c
-clean_dep lib/libcrypt crypt-nthash c
-clean_dep lib/libcrypt crypt-sha256 c
-clean_dep lib/libcrypt crypt-sha512 c
+clean_obj lib/libcrypt crypt-md5 c __MD5Init
+clean_obj lib/libcrypt crypt-nthash c __MD4Init
+clean_obj lib/libcrypt crypt-sha256 c __SHA256Init
+clean_obj lib/libcrypt crypt-sha512 c __SHA512Init
# 20241213 b55f5e1c4ae3 jemalloc: Move generated jemalloc.3 into lib/libc tree
if [ -h "$OBJTOP"/lib/libc/jemalloc.3 ]; then
# Have to cleanup the jemalloc.3 in the obj tree since make gets
# confused and won't use the one in lib/libc/malloc/jemalloc/jemalloc.3
echo "Removing stale jemalloc.3 object"
- run rm -f "$OBJTOP"/lib/libc/jemalloc.3
+ run rm -fv "$OBJTOP"/lib/libc/jemalloc.3
fi
if [ $MACHINE_ARCH = aarch64 ]; then
# 20250110 5e7d93a60440 add strcmp SIMD implementation
ALL_libcompats= clean_dep lib/libc strcmp S arm-optimized-routines
- run rm -f "$OBJTOP"/lib/libc/strcmp.S
+ run rm -fv "$OBJTOP"/lib/libc/strcmp.S
# 20250110 b91003acffe7 add strspn optimized implementation
ALL_libcompats= clean_dep lib/libc strspn c
@@ -391,7 +417,7 @@ if [ $MACHINE_ARCH = aarch64 ]; then
# 20250110 25c485e14769 add strncmp SIMD implementation
ALL_libcompats= clean_dep lib/libc strncmp S arm-optimized-routines
- run rm -f "$OBJTOP"/lib/libc/strncmp.S
+ run rm -fv "$OBJTOP"/lib/libc/strncmp.S
# 20250110 bad17991c06d add memccpy SIMD implementation
ALL_libcompats= clean_dep lib/libc memccpy c
@@ -402,11 +428,11 @@ if [ $MACHINE_ARCH = aarch64 ]; then
# 20250110 bea89d038ac5 add strlcat SIMD implementation, and move memchr
ALL_libcompats= clean_dep lib/libc strlcat c "libc.string.strlcat.c"
ALL_libcompats= clean_dep lib/libc memchr S "[[:space:]]memchr.S"
- run rm -f "$OBJTOP"/lib/libc/memchr.S
+ run rm -fv "$OBJTOP"/lib/libc/memchr.S
# 20250110 3863fec1ce2d add strlen SIMD implementation
ALL_libcompats= clean_dep lib/libc strlen S arm-optimized-routines
- run rm -f "$OBJTOP"/lib/libc/strlen.S
+ run rm -fv "$OBJTOP"/lib/libc/strlen.S
# 20250110 79e01e7e643c add bcopy & bzero wrapper
ALL_libcompats= clean_dep lib/libc bcopy c "libc.string.bcopy.c"
@@ -431,15 +457,15 @@ clean_dep usr.sbin/ctld uclparse c
# 20250425 2e47f35be5dc libllvm, libclang and liblldb became shared libraries
if [ -f "$OBJTOP"/lib/clang/libllvm/libllvm.a ]; then
echo "Removing old static libllvm library"
- run rm -f "$OBJTOP"/lib/clang/libllvm/libllvm.a
+ run rm -fv "$OBJTOP"/lib/clang/libllvm/libllvm.a
fi
if [ -f "$OBJTOP"/lib/clang/libclang/libclang.a ]; then
echo "Removing old static libclang library"
- run rm -f "$OBJTOP"/lib/clang/libclang/libclang.a
+ run rm -fv "$OBJTOP"/lib/clang/libclang/libclang.a
fi
if [ -f "$OBJTOP"/lib/clang/liblldb/liblldb.a ]; then
echo "Removing old static liblldb library"
- run rm -f "$OBJTOP"/lib/clang/liblldb/liblldb.a
+ run rm -fv "$OBJTOP"/lib/clang/liblldb/liblldb.a
fi
# 20250813 4f766afc1ca0 tcopy converted to C++
diff --git a/tools/build/mk/OptionalObsoleteFiles.inc b/tools/build/mk/OptionalObsoleteFiles.inc
index 0781dc331e95..fe92cf671ebc 100644
--- a/tools/build/mk/OptionalObsoleteFiles.inc
+++ b/tools/build/mk/OptionalObsoleteFiles.inc
@@ -1709,6 +1709,8 @@ OLD_FILES+=usr/share/examples/hostapd/hostapd.conf
OLD_FILES+=usr/share/examples/hostapd/hostapd.eap_user
OLD_FILES+=usr/share/examples/hostapd/hostapd.wpa_psk
OLD_FILES+=usr/share/examples/indent/indent.pro
+OLD_FILES+=usr/share/examples/inotify/Makefile
+OLD_FILES+=usr/share/examples/inotify/inotify.c
OLD_FILES+=usr/share/examples/ipfilter/BASIC.NAT
OLD_FILES+=usr/share/examples/ipfilter/BASIC_1.FW
OLD_FILES+=usr/share/examples/ipfilter/BASIC_2.FW
@@ -1921,6 +1923,7 @@ OLD_DIRS+=usr/share/examples/hast
OLD_DIRS+=usr/share/examples/ibcs2
OLD_DIRS+=usr/share/examples/hostapd
OLD_DIRS+=usr/share/examples/indent
+OLD_DIRS+=usr/share/examples/inotify
OLD_DIRS+=usr/share/examples/ipfilter
OLD_DIRS+=usr/share/examples/ipfw
OLD_DIRS+=usr/share/examples/jails
@@ -2259,83 +2262,6 @@ OLD_FILES+=usr/share/man/man3/gpio_pin_tristate.3.gz
OLD_FILES+=usr/share/man/man8/gpioctl.8.gz
.endif
-.if ${MK_GSSAPI} == "no" || ${MK_KERBEROS_SUPPORT} == "no"
-OLD_FILES+=etc/rc.d/gssd
-OLD_FILES+=usr/sbin/gssd
-OLD_FILES+=usr/share/man/man8/gssd.8.gz
-.endif
-
-.if ${MK_GSSAPI} == no
-OLD_FILES+=usr/include/gssapi/gssapi.h
-OLD_DIRS+=usr/include/gssapi
-OLD_FILES+=usr/include/gssapi.h
-OLD_FILES+=usr/lib/libgssapi.a
-OLD_FILES+=usr/lib/libgssapi.so
-OLD_LIBS+=usr/lib/libgssapi.so.10
-OLD_FILES+=usr/lib/libgssapi_p.a
-OLD_FILES+=usr/lib/librpcsec_gss.a
-OLD_FILES+=usr/lib/librpcsec_gss.so
-OLD_LIBS+=usr/lib/librpcsec_gss.so.1
-OLD_FILES+=usr/share/man/man3/gss_accept_sec_context.3.gz
-OLD_FILES+=usr/share/man/man3/gss_acquire_cred.3.gz
-OLD_FILES+=usr/share/man/man3/gss_add_cred.3.gz
-OLD_FILES+=usr/share/man/man3/gss_add_oid_set_member.3.gz
-OLD_FILES+=usr/share/man/man3/gss_canonicalize_name.3.gz
-OLD_FILES+=usr/share/man/man3/gss_compare_name.3.gz
-OLD_FILES+=usr/share/man/man3/gss_context_time.3.gz
-OLD_FILES+=usr/share/man/man3/gss_create_empty_oid_set.3.gz
-OLD_FILES+=usr/share/man/man3/gss_delete_sec_context.3.gz
-OLD_FILES+=usr/share/man/man3/gss_display_name.3.gz
-OLD_FILES+=usr/share/man/man3/gss_display_status.3.gz
-OLD_FILES+=usr/share/man/man3/gss_duplicate_name.3.gz
-OLD_FILES+=usr/share/man/man3/gss_export_name.3.gz
-OLD_FILES+=usr/share/man/man3/gss_export_sec_context.3.gz
-OLD_FILES+=usr/share/man/man3/gss_get_mic.3.gz
-OLD_FILES+=usr/share/man/man3/gss_import_name.3.gz
-OLD_FILES+=usr/share/man/man3/gss_import_sec_context.3.gz
-OLD_FILES+=usr/share/man/man3/gss_indicate_mechs.3.gz
-OLD_FILES+=usr/share/man/man3/gss_init_sec_context.3.gz
-OLD_FILES+=usr/share/man/man3/gss_inquire_context.3.gz
-OLD_FILES+=usr/share/man/man3/gss_inquire_cred.3.gz
-OLD_FILES+=usr/share/man/man3/gss_inquire_cred_by_mech.3.gz
-OLD_FILES+=usr/share/man/man3/gss_inquire_mechs_for_name.3.gz
-OLD_FILES+=usr/share/man/man3/gss_inquire_names_for_mech.3.gz
-OLD_FILES+=usr/share/man/man3/gss_process_context_token.3.gz
-OLD_FILES+=usr/share/man/man3/gss_release_buffer.3.gz
-OLD_FILES+=usr/share/man/man3/gss_release_cred.3.gz
-OLD_FILES+=usr/share/man/man3/gss_release_name.3.gz
-OLD_FILES+=usr/share/man/man3/gss_release_oid_set.3.gz
-OLD_FILES+=usr/share/man/man3/gss_seal.3.gz
-OLD_FILES+=usr/share/man/man3/gss_sign.3.gz
-OLD_FILES+=usr/share/man/man3/gss_test_oid_set_member.3.gz
-OLD_FILES+=usr/share/man/man3/gss_unseal.3.gz
-OLD_FILES+=usr/share/man/man3/gss_unwrap.3.gz
-OLD_FILES+=usr/share/man/man3/gss_verify.3.gz
-OLD_FILES+=usr/share/man/man3/gss_verify_mic.3.gz
-OLD_FILES+=usr/share/man/man3/gss_wrap.3.gz
-OLD_FILES+=usr/share/man/man3/gss_wrap_size_limit.3.gz
-OLD_FILES+=usr/share/man/man3/gssapi.3.gz
-OLD_FILES+=usr/share/man/man3/rpc_gss_get_error.3.gz
-OLD_FILES+=usr/share/man/man3/rpc_gss_get_mech_info.3.gz
-OLD_FILES+=usr/share/man/man3/rpc_gss_get_mechanisms.3.gz
-OLD_FILES+=usr/share/man/man3/rpc_gss_get_principal_name.3.gz
-OLD_FILES+=usr/share/man/man3/rpc_gss_get_versions.3.gz
-OLD_FILES+=usr/share/man/man3/rpc_gss_getcred.3.gz
-OLD_FILES+=usr/share/man/man3/rpc_gss_is_installed.3.gz
-OLD_FILES+=usr/share/man/man3/rpc_gss_max_data_length.3.gz
-OLD_FILES+=usr/share/man/man3/rpc_gss_mech_to_oid.3.gz
-OLD_FILES+=usr/share/man/man3/rpc_gss_oid_to_mech.3.gz
-OLD_FILES+=usr/share/man/man3/rpc_gss_qop_to_num.3.gz
-OLD_FILES+=usr/share/man/man3/rpc_gss_seccreate.3.gz
-OLD_FILES+=usr/share/man/man3/rpc_gss_set_callback.3.gz
-OLD_FILES+=usr/share/man/man3/rpc_gss_set_defaults.3.gz
-OLD_FILES+=usr/share/man/man3/rpc_gss_set_svc_name.3.gz
-OLD_FILES+=usr/share/man/man3/rpc_gss_svc_max_data_length.3.gz
-OLD_FILES+=usr/share/man/man3/rpcsec_gss.3.gz
-OLD_FILES+=usr/share/man/man5/mech.5.gz
-OLD_FILES+=usr/share/man/man5/qop.5.gz
-.endif
-
.if ${MK_HAST} == no
OLD_FILES+=etc/rc.d/hastd
OLD_FILES+=sbin/hastctl
@@ -2733,2116 +2659,94 @@ OLD_FILES+=usr/share/man/man1/kdump.1.gz
OLD_FILES+=usr/share/man/man1/truss.1.gz
.endif
-.if ${MK_KERBEROS} == no
-.if ${MK_MITKRB5} == no
-# Remove Heimdal because we do not want Kerberos
-OLD_FILES+=etc/rc.d/ipropd_master
-OLD_FILES+=etc/rc.d/ipropd_slave
-OLD_FILES+=usr/bin/asn1_compile
+.if ${MK_KERBEROS_SUPPORT} == "no"
+OLD_FILES+=etc/rc.d/gssd
+OLD_FILES+=usr/sbin/gssd
+OLD_FILES+=usr/share/man/man8/gssd.8.gz
+.endif
+
+# Kerberos files which are installed by both Heimdal and MIT. These should
+# only be removed if Kerberos is disabled entirely.
+.if ${MK_KERBEROS} == "no"
+
OLD_FILES+=usr/bin/compile_et
-OLD_FILES+=usr/bin/hxtool
+OLD_FILES+=usr/share/man/man1/compile_et.1.gz
+# This is kadmin(1) in MIT but kadmin(8) in Heimdal, therefore the manpage
+# is not listed here.
OLD_FILES+=usr/bin/kadmin
-OLD_FILES+=usr/bin/kcc
OLD_FILES+=usr/bin/kdestroy
-OLD_FILES+=usr/bin/kf
-OLD_FILES+=usr/bin/kgetcred
-OLD_FILES+=usr/bin/kinit
-OLD_FILES+=usr/bin/klist
-OLD_FILES+=usr/bin/kpasswd
-OLD_FILES+=usr/bin/krb5-config
-OLD_FILES+=usr/bin/ksu
-OLD_FILES+=usr/bin/kswitch
-OLD_FILES+=usr/bin/make-roken
-OLD_FILES+=usr/bin/slc
-OLD_FILES+=usr/bin/string2key
-OLD_FILES+=usr/bin/verify_krb5_conf
-OLD_FILES+=usr/include/asn1-common.h
-OLD_FILES+=usr/include/asn1_err.h
-OLD_FILES+=usr/include/base64.h
-OLD_FILES+=usr/include/cms_asn1.h
-OLD_FILES+=usr/include/common.h
-OLD_FILES+=usr/include/crmf_asn1.h
-OLD_FILES+=usr/include/der-private.h
-OLD_FILES+=usr/include/der-protos.h
-OLD_FILES+=usr/include/der.h
-OLD_FILES+=usr/include/digest_asn1.h
-OLD_FILES+=usr/include/edwards25519_fiat.h
-OLD_FILES+=usr/include/edwards25519_tables.h
-OLD_FILES+=usr/include/getarg.h
-OLD_FILES+=usr/include/groups.h
-OLD_FILES+=usr/include/gssapi/gssapi_krb5.h
-OLD_FILES+=usr/include/hdb-protos.h
-OLD_FILES+=usr/include/hdb.h
-OLD_FILES+=usr/include/hdb_asn1.h
-OLD_FILES+=usr/include/hdb_err.h
-OLD_FILES+=usr/include/heim_asn1.h
-OLD_FILES+=usr/include/heim_err.h
-OLD_FILES+=usr/include/heim_threads.h
-OLD_FILES+=usr/include/heimbase.h
-OLD_FILES+=usr/include/heimntlm-protos.h
-OLD_FILES+=usr/include/heimntlm.h
-OLD_FILES+=usr/include/hex.h
-OLD_FILES+=usr/include/hx509-private.h
-OLD_FILES+=usr/include/hx509-protos.h
-OLD_FILES+=usr/include/hx509.h
-OLD_FILES+=usr/include/hx509_err.h
-OLD_FILES+=usr/include/iana.h
-OLD_FILES+=usr/include/k524_err.h
-OLD_FILES+=usr/include/kadm5/admin.h
-OLD_FILES+=usr/include/kadm5/kadm5-private.h
-OLD_FILES+=usr/include/kadm5/kadm5-protos.h
-OLD_FILES+=usr/include/kadm5/kadm5-pwcheck.h
-OLD_FILES+=usr/include/kadm5/kadm5_err.h
-OLD_FILES+=usr/include/kadm5/private.h
-OLD_DIRS+=usr/include/kadm5
-OLD_FILES+=usr/include/kafs.h
-OLD_FILES+=usr/include/kdc-protos.h
-OLD_FILES+=usr/include/kdc.h
-OLD_FILES+=usr/include/krb5-private.h
-OLD_FILES+=usr/include/krb5-protos.h
-OLD_FILES+=usr/include/krb5-types.h
-OLD_FILES+=usr/include/krb5.h
-OLD_FILES+=usr/include/krb5/ccache_plugin.h
-OLD_FILES+=usr/include/krb5/locate_plugin.h
-OLD_FILES+=usr/include/krb5/send_to_kdc_plugin.h
-OLD_FILES+=usr/include/krb5/windc_plugin.h
-OLD_DIRS+=usr/include/krb5
-OLD_FILES+=usr/include/krb5_asn1.h
-OLD_FILES+=usr/include/krb5_ccapi.h
-OLD_FILES+=usr/include/krb5_err.h
-OLD_FILES+=usr/include/kx509_asn1.h
-OLD_FILES+=usr/include/ntlm_err.h
-OLD_FILES+=usr/include/ocsp_asn1.h
-OLD_FILES+=usr/include/parse_bytes.h
-OLD_FILES+=usr/include/parse_time.h
-OLD_FILES+=usr/include/parse_units.h
-OLD_FILES+=usr/include/pkcs10_asn1.h
-OLD_FILES+=usr/include/pkcs12_asn1.h
-OLD_FILES+=usr/include/pkcs8_asn1.h
-OLD_FILES+=usr/include/pkcs9_asn1.h
-OLD_FILES+=usr/include/pkinit_asn1.h
-OLD_FILES+=usr/include/resolve.h
-OLD_FILES+=usr/include/rfc2459_asn1.h
-OLD_FILES+=usr/include/roken-common.h
-OLD_FILES+=usr/include/rtbl.h
-OLD_FILES+=usr/include/trace.h
-OLD_FILES+=usr/include/util.h
-OLD_FILES+=usr/include/wind.h
-OLD_FILES+=usr/include/wind_err.h
-OLD_FILES+=usr/include/xdbm.h
-OLD_FILES+=usr/lib/libasn1.a
-OLD_FILES+=usr/lib/libasn1.so
-OLD_LIBS+=usr/lib/libasn1.so.11
-OLD_FILES+=usr/lib/libasn1_p.a
-OLD_FILES+=usr/lib/libcom_err.a
-OLD_FILES+=usr/lib/libcom_err.so
-OLD_LIBS+=usr/lib/libcom_err.so.5
-OLD_FILES+=usr/lib/libcom_err_p.a
-OLD_FILES+=usr/lib/libgssapi_krb5.a
-OLD_FILES+=usr/lib/libgssapi_krb5.so
-OLD_LIBS+=usr/lib/libgssapi_krb5.so.10
-OLD_FILES+=usr/lib/libgssapi_krb5_p.a
-OLD_FILES+=usr/lib/libgssapi_mech.a
-OLD_FILES+=usr/lib/libgssapi_mech.so
-OLD_LIBS+=usr/lib/libgssapi_mech.so.10
-OLD_FILES+=usr/lib/libgssapi_ntlm.a
-OLD_FILES+=usr/lib/libgssapi_ntlm.so
-OLD_LIBS+=usr/lib/libgssapi_ntlm.so.10
-OLD_FILES+=usr/lib/libgssapi_ntlm_p.a
-OLD_FILES+=usr/lib/libgssapi_spnego.a
-OLD_FILES+=usr/lib/libgssapi_spnego.so
-OLD_LIBS+=usr/lib/libgssapi_spnego.so.10
-OLD_FILES+=usr/lib/libgssapi_spnego_p.a
-OLD_FILES+=usr/lib/libhdb.a
-OLD_FILES+=usr/lib/libhdb.so
-OLD_LIBS+=usr/lib/libhdb.so.11
-OLD_FILES+=usr/lib/libhdb_p.a
-OLD_FILES+=usr/lib/libheimbase.a
-OLD_FILES+=usr/lib/libheimbase.so
-OLD_LIBS+=usr/lib/libheimbase.so.11
-OLD_FILES+=usr/lib/libheimbase_p.a
-OLD_FILES+=usr/lib/libheimntlm.a
-OLD_FILES+=usr/lib/libheimntlm.so
-OLD_LIBS+=usr/lib/libheimntlm.so.11
-OLD_FILES+=usr/lib/libheimntlm_p.a
-OLD_FILES+=usr/lib/libheimsqlite.a
-OLD_FILES+=usr/lib/libheimsqlite.so
-OLD_LIBS+=usr/lib/libheimsqlite.so.11
-OLD_FILES+=usr/lib/libheimsqlite_p.a
-OLD_FILES+=usr/lib/libhx509.a
-OLD_FILES+=usr/lib/libhx509.so
-OLD_LIBS+=usr/lib/libhx509.so.11
-OLD_FILES+=usr/lib/libhx509_p.a
-OLD_FILES+=usr/lib/libkadm5clnt.a
-OLD_FILES+=usr/lib/libkadm5clnt.so
-OLD_LIBS+=usr/lib/libkadm5clnt.so.11
-OLD_FILES+=usr/lib/libkadm5clnt_p.a
-OLD_FILES+=usr/lib/libkadm5srv.a
-OLD_FILES+=usr/lib/libkadm5srv.so
-OLD_LIBS+=usr/lib/libkadm5srv.so.11
-OLD_FILES+=usr/lib/libkadm5srv_p.a
-OLD_FILES+=usr/lib/libkafs5.a
-OLD_FILES+=usr/lib/libkafs5.so
-OLD_LIBS+=usr/lib/libkafs5.so.11
-OLD_FILES+=usr/lib/libkafs5_p.a
-OLD_FILES+=usr/lib/libkdc.a
-OLD_FILES+=usr/lib/libkdc.so
-OLD_LIBS+=usr/lib/libkdc.so.11
-OLD_FILES+=usr/lib/libkdc_p.a
-OLD_FILES+=usr/lib/libkrb5.a
-OLD_FILES+=usr/lib/libkrb5.so
-OLD_LIBS+=usr/lib/libkrb5.so.11
-OLD_FILES+=usr/lib/libkrb5_p.a
-OLD_FILES+=usr/lib/libroken.a
-OLD_FILES+=usr/lib/libroken.so
-OLD_LIBS+=usr/lib/libroken.so.11
-OLD_FILES+=usr/lib/libroken_p.a
-OLD_FILES+=usr/lib/libwind.a
-OLD_FILES+=usr/lib/libwind.so
-OLD_LIBS+=usr/lib/libwind.so.11
-OLD_FILES+=usr/lib/libwind_p.a
-OLD_FILES+=usr/lib/libprivateheimipcc.a
-OLD_FILES+=usr/lib/libprivateheimipcc.so
-OLD_LIBS+=usr/lib/libprivateheimipcc.so.11
-OLD_FILES+=usr/lib/libprivateheimipcc_p.a
-OLD_FILES+=usr/lib/libprivateheimipcs.a
-OLD_FILES+=usr/lib/libprivateheimipcs.so
-OLD_LIBS+=usr/lib/libprivateheimipcs.so.11
-OLD_FILES+=usr/lib/libprivateheimipcs_p.a
-OLD_FILES+=usr/libexec/digest-service
-OLD_FILES+=usr/libexec/hprop
-OLD_FILES+=usr/libexec/hpropd
-OLD_FILES+=usr/libexec/ipropd-master
-OLD_FILES+=usr/libexec/ipropd-slave
-OLD_FILES+=usr/libexec/kadmind
-OLD_FILES+=usr/libexec/kcm
-OLD_FILES+=usr/libexec/kdc
-OLD_FILES+=usr/libexec/kdigest
-OLD_FILES+=usr/libexec/kfd
-OLD_FILES+=usr/libexec/kimpersonate
-OLD_FILES+=usr/libexec/kpasswdd
-OLD_FILES+=usr/sbin/kstash
-OLD_FILES+=usr/sbin/ktutil
-OLD_FILES+=usr/sbin/iprop-log
OLD_FILES+=usr/share/man/man1/kdestroy.1.gz
-OLD_FILES+=usr/share/man/man1/kf.1.gz
+OLD_FILES+=usr/bin/kinit
OLD_FILES+=usr/share/man/man1/kinit.1.gz
+OLD_FILES+=usr/bin/klist
OLD_FILES+=usr/share/man/man1/klist.1.gz
+OLD_FILES+=usr/bin/kpasswd
OLD_FILES+=usr/share/man/man1/kpasswd.1.gz
+OLD_FILES+=usr/bin/krb5-config
OLD_FILES+=usr/share/man/man1/krb5-config.1.gz
+OLD_FILES+=usr/bin/kswitch
OLD_FILES+=usr/share/man/man1/kswitch.1.gz
-OLD_FILES+=usr/share/man/man3/HDB.3.gz
-OLD_FILES+=usr/share/man/man3/hdb__del.3.gz
-OLD_FILES+=usr/share/man/man3/hdb__get.3.gz
-OLD_FILES+=usr/share/man/man3/hdb__put.3.gz
-OLD_FILES+=usr/share/man/man3/hdb_auth_status.3.gz
-OLD_FILES+=usr/share/man/man3/hdb_check_constrained_delegation.3.gz
-OLD_FILES+=usr/share/man/man3/hdb_check_pkinit_ms_upn_match.3.gz
-OLD_FILES+=usr/share/man/man3/hdb_check_s4u2self.3.gz
-OLD_FILES+=usr/share/man/man3/hdb_close.3.gz
-OLD_FILES+=usr/share/man/man3/hdb_destroy.3.gz
-OLD_FILES+=usr/share/man/man3/hdb_entry_ex.3.gz
-OLD_FILES+=usr/share/man/man3/hdb_fetch_kvno.3.gz
-OLD_FILES+=usr/share/man/man3/hdb_firstkey.3.gz
-OLD_FILES+=usr/share/man/man3/hdb_free.3.gz
-OLD_FILES+=usr/share/man/man3/hdb_get_realms.3.gz
-OLD_FILES+=usr/share/man/man3/hdb_lock.3.gz
-OLD_FILES+=usr/share/man/man3/hdb_name.3.gz
-OLD_FILES+=usr/share/man/man3/hdb_nextkey.3.gz
-OLD_FILES+=usr/share/man/man3/hdb_open.3.gz
-OLD_FILES+=usr/share/man/man3/hdb_password.3.gz
-OLD_FILES+=usr/share/man/man3/hdb_remove.3.gz
-OLD_FILES+=usr/share/man/man3/hdb_rename.3.gz
-OLD_FILES+=usr/share/man/man3/hdb_store.3.gz
-OLD_FILES+=usr/share/man/man3/hdb_unlock.3.gz
-OLD_FILES+=usr/share/man/man3/heim_ntlm_build_ntlm1_master.3.gz
-OLD_FILES+=usr/share/man/man3/heim_ntlm_build_ntlm2_master.3.gz
-OLD_FILES+=usr/share/man/man3/heim_ntlm_calculate_lm2.3.gz
-OLD_FILES+=usr/share/man/man3/heim_ntlm_calculate_ntlm1.3.gz
-OLD_FILES+=usr/share/man/man3/heim_ntlm_calculate_ntlm2.3.gz
-OLD_FILES+=usr/share/man/man3/heim_ntlm_decode_targetinfo.3.gz
-OLD_FILES+=usr/share/man/man3/heim_ntlm_encode_targetinfo.3.gz
-OLD_FILES+=usr/share/man/man3/heim_ntlm_encode_type1.3.gz
-OLD_FILES+=usr/share/man/man3/heim_ntlm_encode_type2.3.gz
-OLD_FILES+=usr/share/man/man3/heim_ntlm_encode_type3.3.gz
-OLD_FILES+=usr/share/man/man3/heim_ntlm_free_buf.3.gz
-OLD_FILES+=usr/share/man/man3/heim_ntlm_free_targetinfo.3.gz
-OLD_FILES+=usr/share/man/man3/heim_ntlm_free_type1.3.gz
-OLD_FILES+=usr/share/man/man3/heim_ntlm_free_type2.3.gz
-OLD_FILES+=usr/share/man/man3/heim_ntlm_free_type3.3.gz
-OLD_FILES+=usr/share/man/man3/heim_ntlm_keyex_unwrap.3.gz
-OLD_FILES+=usr/share/man/man3/heim_ntlm_nt_key.3.gz
-OLD_FILES+=usr/share/man/man3/heim_ntlm_ntlmv2_key.3.gz
-OLD_FILES+=usr/share/man/man3/heim_ntlm_verify_ntlm2.3.gz
-OLD_FILES+=usr/share/man/man3/hx509.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_bitstring_print.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_ca.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_ca_sign.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_ca_sign_self.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_add_crl_dp_uri.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_add_eku.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_add_san_hostname.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_add_san_jid.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_add_san_ms_upn.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_add_san_otherName.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_add_san_pkinit.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_add_san_rfc822name.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_free.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_init.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_ca.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_domaincontroller.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_notAfter.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_notAfter_lifetime.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_notBefore.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_proxy.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_serialnumber.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_spki.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_subject.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_template.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_set_unique.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_subject_expand.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_ca_tbs_template_units.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_cert.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_cert_binary.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_cert_check_eku.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_cert_cmp.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_cert_find_subjectAltName_otherName.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_cert_free.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_cert_get_SPKI.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_cert_get_SPKI_AlgorithmIdentifier.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_cert_get_attribute.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_cert_get_base_subject.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_cert_get_friendly_name.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_cert_get_issuer.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_cert_get_issuer_unique_id.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_cert_get_notAfter.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_cert_get_notBefore.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_cert_get_serialnumber.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_cert_get_subject.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_cert_get_subject_unique_id.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_cert_init.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_cert_init_data.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_cert_keyusage_print.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_cert_ref.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_cert_set_friendly_name.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_certs_add.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_certs_append.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_certs_end_seq.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_certs_filter.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_certs_find.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_certs_free.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_certs_info.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_certs_init.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_certs_iter_f.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_certs_merge.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_certs_next_cert.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_certs_start_seq.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_certs_store.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_ci_print_names.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_clear_error_string.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_cms.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_cms_create_signed_1.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_cms_envelope_1.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_cms_unenvelope.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_cms_unwrap_ContentInfo.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_cms_verify_signed.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_cms_wrap_ContentInfo.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_context_free.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_context_init.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_context_set_missing_revoke.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_crl_add_revoked_certs.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_crl_alloc.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_crl_free.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_crl_lifetime.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_crl_sign.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_crypto.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_env.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_env_add.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_env_add_binding.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_env_find.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_env_find_binding.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_env_free.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_env_lfind.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_err.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_error.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_free_error_string.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_free_octet_string_list.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_general_name_unparse.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_get_error_string.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_get_one_cert.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_keyset.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_lock.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_misc.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_name.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_name_binary.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_name_cmp.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_name_copy.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_name_expand.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_name_free.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_name_is_null_p.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_name_to_Name.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_name_to_string.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_ocsp_request.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_ocsp_verify.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_oid_print.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_oid_sprint.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_parse_name.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_peer.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_peer_info_add_cms_alg.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_peer_info_alloc.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_peer_info_free.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_peer_info_set_cert.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_peer_info_set_cms_algs.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_print.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_print_cert.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_print_stdout.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_query.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_query_alloc.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_query_free.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_query_match_cmp_func.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_query_match_eku.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_query_match_friendly_name.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_query_match_issuer_serial.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_query_match_option.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_query_statistic_file.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_query_unparse_stats.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_revoke.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_revoke_add_crl.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_revoke_add_ocsp.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_revoke_free.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_revoke_init.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_revoke_ocsp_print.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_revoke_verify.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_set_error_string.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_set_error_stringv.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_unparse_der_name.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_validate_cert.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_validate_ctx_add_flags.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_validate_ctx_free.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_validate_ctx_init.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_validate_ctx_set_print.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_verify.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_verify_attach_anchors.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_verify_attach_revoke.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_verify_ctx_f_allow_default_trustanchors.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_verify_destroy_ctx.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_verify_hostname.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_verify_init_ctx.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_verify_path.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_verify_set_max_depth.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_verify_set_proxy_certificate.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_verify_set_strict_rfc3280_verification.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_verify_set_time.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_verify_signature.3.gz
-OLD_FILES+=usr/share/man/man3/hx509_xfree.3.gz
-OLD_FILES+=usr/share/man/man3/k_afs_cell_of_file.3.gz
-OLD_FILES+=usr/share/man/man3/k_hasafs.3.gz
-OLD_FILES+=usr/share/man/man3/k_pioctl.3.gz
-OLD_FILES+=usr/share/man/man3/k_setpag.3.gz
-OLD_FILES+=usr/share/man/man3/k_unlog.3.gz
-OLD_FILES+=usr/share/man/man3/kadm5_pwcheck.3.gz
-OLD_FILES+=usr/share/man/man3/kafs.3.gz
-OLD_FILES+=usr/share/man/man3/kafs5.3.gz
-OLD_FILES+=usr/share/man/man3/kafs_set_verbose.3.gz
-OLD_FILES+=usr/share/man/man3/kafs_settoken.3.gz
-OLD_FILES+=usr/share/man/man3/kafs_settoken5.3.gz
-OLD_FILES+=usr/share/man/man3/kafs_settoken_rxkad.3.gz
-OLD_FILES+=usr/share/man/man3/krb5.3.gz
-OLD_FILES+=usr/share/man/man3/krb524_convert_creds_kdc.3.gz
-OLD_FILES+=usr/share/man/man3/krb524_convert_creds_kdc_ccache.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_425_conv_principal.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_425_conv_principal_ext.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_524_conv_principal.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_acc_ops.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_acl_match_file.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_acl_match_string.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_add_et_list.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_add_extra_addresses.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_add_ignore_addresses.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_addlog_dest.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_addlog_func.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_addr2sockaddr.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_address.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_address_compare.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_address_order.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_address_prefixlen_boundary.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_address_search.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_afslog.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_afslog_uid.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_allow_weak_crypto.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_aname_to_localname.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_anyaddr.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_appdefault.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_appdefault_boolean.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_appdefault_string.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_appdefault_time.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_append_addresses.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_auth.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_auth_con_free.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_auth_con_genaddrs.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_auth_con_getaddrs.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_auth_con_getflags.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_auth_con_getkey.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_auth_con_getlocalsubkey.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_auth_con_getrcache.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_auth_con_getremotesubkey.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_auth_con_getuserkey.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_auth_con_init.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_auth_con_initivector.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_auth_con_setaddrs.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_auth_con_setaddrs_from_fd.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_auth_con_setflags.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_auth_con_setivector.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_auth_con_setkey.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_auth_con_setlocalsubkey.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_auth_con_setrcache.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_auth_con_setremotesubkey.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_auth_con_setuserkey.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_auth_context.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_auth_getauthenticator.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_auth_getcksumtype.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_auth_getkeytype.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_auth_getlocalseqnumber.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_auth_getremoteseqnumber.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_auth_setcksumtype.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_auth_setkeytype.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_auth_setlocalseqnumber.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_auth_setremoteseqnumber.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_build_principal.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_build_principal_ext.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_build_principal_va.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_build_principal_va_ext.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_c_enctype_compare.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_c_make_checksum.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_cache_end_seq_get.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_cache_get_first.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_cache_match.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_cache_next.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_clear_mcred.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_close.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_copy_cache.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_copy_creds.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_copy_match_f.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_default.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_default_name.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_destroy.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_end_seq_get.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_gen_new.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_get_config.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_get_flags.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_get_friendly_name.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_get_full_name.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_get_kdc_offset.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_get_lifetime.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_get_name.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_get_ops.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_get_prefix_ops.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_get_principal.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_get_type.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_get_version.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_initialize.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_last_change_time.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_move.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_new_unique.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_next_cred.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_register.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_remove_cred.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_resolve.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_retrieve_cred.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_set_config.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_set_default_name.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_set_flags.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_set_friendly_name.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_set_kdc_offset.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_start_seq_get.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_store_cred.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_support_switch.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cc_switch.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_ccache.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_ccache_intro.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cccol_cursor_free.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cccol_cursor_new.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cccol_cursor_next.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cccol_last_change_time.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_change_password.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_check_transited.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_checksum_is_collision_proof.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_checksum_is_keyed.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_checksumsize.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_cksumtype_to_enctype.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_clear_error_message.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_clear_error_string.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_closelog.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_compare_creds.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_config_file_free.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_config_free_strings.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_config_get_bool.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_config_get_bool_default.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_config_get_list.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_config_get_string.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_config_get_string_default.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_config_get_strings.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_config_get_time.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_config_get_time_default.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_config_parse_file_multi.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_config_parse_string_multi.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_config_vget_bool.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_config_vget_bool_default.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_config_vget_list.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_config_vget_string.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_config_vget_string_default.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_config_vget_strings.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_config_vget_time.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_config_vget_time_default.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_copy_address.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_copy_addresses.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_copy_context.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_copy_creds.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_copy_creds_contents.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_copy_data.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_copy_host_realm.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_copy_keyblock.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_copy_keyblock_contents.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_copy_principal.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_copy_ticket.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_create_checksum.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_create_checksum_iov.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_credential.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_creds.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_creds_get_ticket_flags.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_crypto.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_crypto_destroy.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_crypto_fx_cf2.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_crypto_getblocksize.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_crypto_getconfoundersize.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_crypto_getenctype.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_crypto_getpadsize.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_crypto_init.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_crypto_iov.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_data_alloc.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_data_cmp.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_data_copy.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_data_ct_cmp.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_data_free.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_data_realloc.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_data_zero.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_decrypt.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_decrypt_EncryptedData.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_decrypt_iov_ivec.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_deprecated.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_digest.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_digest_probe.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_eai_to_heim_errno.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_encrypt.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_encrypt_EncryptedData.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_encrypt_iov_ivec.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_enctype_disable.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_enctype_enable.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_enctype_valid.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_enctypes_compatible_keys.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_error.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_expand_hostname.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_expand_hostname_realms.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_fcc_ops.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_fileformats.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_find_padata.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_free_address.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_free_addresses.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_free_config_files.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_free_context.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_free_cred_contents.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_free_creds.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_free_creds_contents.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_free_data.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_free_data_contents.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_free_error_string.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_free_host_realm.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_free_keyblock.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_free_keyblock_contents.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_free_krbhst.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_free_principal.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_free_ticket.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_free_unparsed_name.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_fwd_tgt_creds.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_generate_random_block.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_generate_subkey.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_generate_subkey_extended.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_all_client_addrs.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_all_server_addrs.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_cred_from_kdc.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_cred_from_kdc_opt.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_credentials.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_creds.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_default_config_files.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_default_in_tkt_etypes.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_default_principal.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_default_realm.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_default_realms.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_dns_canonicalize_hostname.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_extra_addresses.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_fcache_version.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_forwarded_creds.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_host_realm.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_ignore_addresses.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_in_cred.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_in_tkt_with_keytab.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_in_tkt_with_password.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_in_tkt_with_skey.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_init_creds.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_init_creds_keyblock.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_init_creds_keytab.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_init_creds_opt_alloc.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_init_creds_opt_free.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_init_creds_opt_get_error.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_init_creds_opt_init.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_init_creds_password.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_kdc_sec_offset.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_krb524hst.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_krb_admin_hst.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_krb_changepw_hst.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_krbhst.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_max_time_skew.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_use_admin_kdc.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_get_validated_creds.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_getportbyname.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_h_addr2addr.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_h_addr2sockaddr.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_h_errno_to_heim_errno.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_init_context.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_init_creds_free.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_init_creds_get.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_init_creds_get_error.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_init_creds_init.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_init_creds_intro.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_init_creds_set_keytab.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_init_creds_set_password.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_init_creds_set_service.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_init_creds_step.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_init_ets.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_initlog.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_introduction.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_is_config_principal.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_is_thread_safe.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_kerberos_enctypes.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_keyblock_get_enctype.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_keyblock_init.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_keyblock_zero.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_keytab.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_keytab_intro.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_keytab_key_proc.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_keytype_to_enctypes.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_keytype_to_enctypes_default.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_keytype_to_string.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_krbhst_format_string.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_krbhst_free.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_krbhst_get_addrinfo.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_krbhst_init.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_krbhst_next.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_krbhst_next_as_string.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_krbhst_reset.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_kt_add_entry.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_kt_close.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_kt_compare.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_kt_copy_entry_contents.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_kt_default.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_kt_default_modify_name.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_kt_default_name.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_kt_destroy.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_kt_end_seq_get.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_kt_free_entry.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_kt_get_entry.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_kt_get_full_name.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_kt_get_name.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_kt_get_type.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_kt_have_content.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_kt_next_entry.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_kt_read_service_key.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_kt_register.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_kt_remove_entry.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_kt_resolve.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_kt_start_seq_get.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_kuserok.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_log.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_log_msg.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_make_addrport.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_make_principal.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_max_sockaddr_size.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_mcc_ops.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_mk_req.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_mk_safe.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_openlog.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_pac.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_pac_get_buffer.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_pac_verify.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_parse_address.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_parse_name.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_parse_name_flags.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_parse_nametype.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_password_key_proc.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_plugin_register.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_prepend_config_files_default.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_princ_realm.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_princ_set_realm.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_principal.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_principal_compare.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_principal_compare_any_realm.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_principal_get_comp_string.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_principal_get_num_comp.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_principal_get_realm.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_principal_get_type.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_principal_intro.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_principal_is_krbtgt.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_principal_match.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_principal_set_realm.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_principal_set_type.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_print_address.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_random_to_key.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_rcache.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_rd_error.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_rd_req_ctx.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_rd_req_in_ctx_alloc.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_rd_req_in_set_keytab.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_rd_req_in_set_pac_check.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_rd_req_out_ctx_free.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_rd_req_out_get_server.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_rd_safe.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_realm_compare.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_ret_address.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_ret_addrs.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_ret_authdata.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_ret_creds.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_ret_creds_tag.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_ret_data.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_ret_int16.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_ret_int32.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_ret_int8.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_ret_keyblock.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_ret_principal.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_ret_string.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_ret_stringz.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_ret_times.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_ret_uint16.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_ret_uint32.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_ret_uint8.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_set_config_files.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_set_default_in_tkt_etypes.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_set_default_realm.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_set_dns_canonicalize_hostname.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_set_error_message.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_set_error_string.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_set_extra_addresses.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_set_fcache_version.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_set_home_dir_access.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_set_ignore_addresses.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_set_kdc_sec_offset.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_set_max_time_skew.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_set_password.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_set_real_time.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_set_use_admin_kdc.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_sname_to_principal.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_sock_to_principal.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_sockaddr2address.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_sockaddr2port.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_sockaddr_uninteresting.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_storage.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_storage_clear_flags.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_storage_emem.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_storage_free.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_storage_from_data.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_storage_from_fd.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_storage_from_mem.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_storage_from_readonly_mem.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_storage_get_byteorder.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_storage_get_eof_code.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_storage_is_flags.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_storage_read.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_storage_seek.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_storage_set_byteorder.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_storage_set_eof_code.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_storage_set_flags.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_storage_set_max_alloc.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_storage_to_data.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_storage_truncate.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_storage_write.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_store_address.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_store_addrs.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_store_authdata.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_store_creds.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_store_creds_tag.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_store_data.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_store_int16.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_store_int32.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_store_int8.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_store_keyblock.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_store_principal.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_store_string.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_store_stringz.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_store_times.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_store_uint16.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_store_uint32.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_store_uint8.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_string_to_key.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_string_to_keytype.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_support.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_ticket.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_ticket_get_authorization_data_type.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_ticket_get_client.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_ticket_get_endtime.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_ticket_get_flags.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_ticket_get_server.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_timeofday.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_unparse_name.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_unparse_name_fixed.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_unparse_name_fixed_flags.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_unparse_name_fixed_short.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_unparse_name_flags.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_unparse_name_short.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_us_timeofday.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_v4compat.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_verify_checksum.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_verify_checksum_iov.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_verify_init_creds.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_verify_opt_init.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_verify_opt_set_flags.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_verify_opt_set_keytab.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_verify_opt_set_secure.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_verify_opt_set_service.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_verify_user.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_verify_user_lrealm.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_verify_user_opt.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_vlog.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_vlog_msg.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_vset_error_string.3.gz
-OLD_FILES+=usr/share/man/man3/krb5_vwarn.3.gz
-OLD_FILES+=usr/share/man/man3/krb_afslog.3.gz
-OLD_FILES+=usr/share/man/man3/krb_afslog_uid.3.gz
-OLD_FILES+=usr/share/man/man3/ntlm_buf.3.gz
-OLD_FILES+=usr/share/man/man3/ntlm_core.3.gz
-OLD_FILES+=usr/share/man/man3/ntlm_type1.3.gz
-OLD_FILES+=usr/share/man/man3/ntlm_type2.3.gz
-OLD_FILES+=usr/share/man/man3/ntlm_type3.3.gz
-OLD_FILES+=usr/share/man/man5/krb5.conf.5.gz
-OLD_FILES+=usr/share/man/man8/hprop.8.gz
-OLD_FILES+=usr/share/man/man8/hpropd.8.gz
-OLD_FILES+=usr/share/man/man8/iprop-log.8.gz
-OLD_FILES+=usr/share/man/man8/iprop.8.gz
-OLD_FILES+=usr/share/man/man8/kadmin.8.gz
-OLD_FILES+=usr/share/man/man8/kadmind.8.gz
-OLD_FILES+=usr/share/man/man8/kcm.8.gz
-OLD_FILES+=usr/share/man/man8/kdc.8.gz
-OLD_FILES+=usr/share/man/man8/kdigest.8.gz
-OLD_FILES+=usr/share/man/man8/kerberos.8.gz
-OLD_FILES+=usr/share/man/man8/kimpersonate.8.gz
-OLD_FILES+=usr/share/man/man8/kpasswdd.8.gz
-OLD_FILES+=usr/share/man/man8/kstash.8.gz
-OLD_FILES+=usr/share/man/man8/ktutil.8.gz
-OLD_FILES+=usr/share/man/man8/string2key.8.gz
-OLD_FILES+=usr/share/man/man8/verify_krb5_conf.8.gz
-.else
-# Remove MIT KRB5 because we do not want Kerberos
-OLD_FILES+=usr/bin/compile_et
-OLD_FILES+=usr/bin/gss-client
-OLD_FILES+=usr/bin/k5srvutil
-OLD_FILES+=usr/bin/kadmin
-OLD_FILES+=usr/bin/kdestroy
-OLD_FILES+=usr/bin/kinit
-OLD_FILES+=usr/bin/klist
-OLD_FILES+=usr/bin/kpasswd
-OLD_FILES+=usr/bin/krb5-config
+# MIT has a manpage for this, Heimdal does not.
OLD_FILES+=usr/bin/ksu
-OLD_FILES+=usr/bin/kswitch
-OLD_FILES+=usr/bin/ktutil
-OLD_FILES+=usr/bin/kvno
-OLD_FILES+=usr/bin/sclient
-OLD_FILES+=usr/bin/sim_client
-OLD_FILES+=usr/bin/uuclient
-OLD_FILES+=etc/rc.d/kpropd
+
OLD_FILES+=usr/include/com_err.h
-OLD_FILES+=usr/include/common.h
-OLD_FILES+=usr/include/edwards25519_fiat.h
-OLD_FILES+=usr/include/edwards25519_tables.h
-OLD_FILES+=usr/include/groups.h
OLD_FILES+=usr/include/gssapi.h
OLD_FILES+=usr/include/gssapi/gssapi.h
-OLD_FILES+=usr/include/gssapi/gssapi_alloc.h
-OLD_FILES+=usr/include/gssapi/gssapi_ext.h
-OLD_FILES+=usr/include/gssapi/gssapi_generic.h
OLD_FILES+=usr/include/gssapi/gssapi_krb5.h
-OLD_FILES+=usr/include/gssapi/mechglue.h
-OLD_FILES+=usr/include/gssrpc/auth.h
-OLD_FILES+=usr/include/gssrpc/auth_gss.h
-OLD_FILES+=usr/include/gssrpc/auth_gssapi.h
-OLD_FILES+=usr/include/gssrpc/auth_unix.h
-OLD_FILES+=usr/include/gssrpc/clnt.h
-OLD_FILES+=usr/include/gssrpc/netdb.h
-OLD_FILES+=usr/include/gssrpc/pmap_clnt.h
-OLD_FILES+=usr/include/gssrpc/pmap_prot.h
-OLD_FILES+=usr/include/gssrpc/pmap_rmt.h
-OLD_FILES+=usr/include/gssrpc/rename.h
-OLD_FILES+=usr/include/gssrpc/rpc.h
-OLD_FILES+=usr/include/gssrpc/rpc_msg.h
-OLD_FILES+=usr/include/gssrpc/svc.h
-OLD_FILES+=usr/include/gssrpc/svc_auth.h
-OLD_FILES+=usr/include/gssrpc/types.h
-OLD_FILES+=usr/include/gssrpc/xdr.h
-OLD_FILES+=usr/include/iana.h
OLD_FILES+=usr/include/kadm5/admin.h
-OLD_FILES+=usr/include/kadm5/chpass_util_strings.h
-OLD_FILES+=usr/include/kadm5/kadm_err.h
-OLD_FILES+=usr/include/kdb.h
-OLD_FILES+=usr/include/krad.h
OLD_FILES+=usr/include/krb5.h
-OLD_FILES+=usr/include/krb5/ccselect_plugin.h
-OLD_FILES+=usr/include/krb5/certauth_plugin.h
-OLD_FILES+=usr/include/krb5/clpreauth_plugin.h
-OLD_FILES+=usr/include/krb5/hostrealm_plugin.h
-OLD_FILES+=usr/include/krb5/kadm5_auth_plugin.h
-OLD_FILES+=usr/include/krb5/kadm5_hook_plugin.h
-OLD_FILES+=usr/include/krb5/kdcpolicy_plugin.h
-OLD_FILES+=usr/include/krb5/kdcpreauth_plugin.h
-OLD_FILES+=usr/include/krb5/krb5.h
-OLD_FILES+=usr/include/krb5/localauth_plugin.h
OLD_FILES+=usr/include/krb5/locate_plugin.h
-OLD_FILES+=usr/include/krb5/plugin.h
-OLD_FILES+=usr/include/krb5/preauth_plugin.h
-OLD_FILES+=usr/include/krb5/pwqual_plugin.h
-OLD_FILES+=usr/include/profile.h
-OLD_FILES+=usr/include/trace.h
-OLD_FILES+=usr/include/util.h
-OLD_FILES+=usr/include/verto-module.h
-OLD_FILES+=usr/include/verto.h
-OLD_FILES+=usr/lib/krb5/plugins/kdb/db2.so
-OLD_FILES+=usr/lib/krb5/plugins/preauth/otp.so
-OLD_FILES+=usr/lib/krb5/plugins/preauth/pkinit.so
-OLD_FILES+=usr/lib/krb5/plugins/preauth/spake.so
-OLD_FILES+=usr/lib/krb5/plugins/preauth/test.so
-OLD_FILES+=usr/lib/krb5/plugins/tls/k5tls.so
+
OLD_FILES+=usr/lib/libcom_err.a
-OLD_LIBS+=usr/lib/libcom_err.so
-OLD_LIBS+=usr/lib/libcom_err.so.122
+OLD_FILES+=usr/lib/libcom_err.so
+OLD_FILES+=usr/lib/libgssapi_krb5.a
OLD_FILES+=usr/lib/libgssapi_krb5.so
-OLD_LIBS+=usr/lib/libgssapi_krb5.so.122
-OLD_FILES+=usr/lib/libgssrpc.so
-OLD_LIBS+=usr/lib/libgssrpc.so.122
-OLD_FILES+=usr/lib/libk5crypto.so
-OLD_LIBS+=usr/lib/libk5crypto.so.122
OLD_FILES+=usr/lib/libkadm5clnt.so
-OLD_FILES+=usr/lib/libkadm5clnt_mit.so
-OLD_LIBS+=usr/lib/libkadm5clnt_mit.so.122
-OLD_FILES+=usr/lib/libkadm5srv.so
-OLD_FILES+=usr/lib/libkadm5srv_mit.so
-OLD_LIBS+=usr/lib/libkadm5srv_mit.so.122
-OLD_FILES+=usr/lib/libkdb5.so
-OLD_LIBS+=usr/lib/libkdb5.so.122
-OLD_FILES+=usr/lib/libkrad.so
-OLD_LIBS+=usr/lib/libkrad.so.122
+OLD_FILES+=usr/lib/libkrb5.a
OLD_FILES+=usr/lib/libkrb5.so
-OLD_LIBS+=usr/lib/libkrb5.so.122
-OLD_FILES+=usr/lib/libkrb5profile.a
-OLD_FILES+=usr/lib/libkrb5profile.so
-OLD_LIBS+=usr/lib/libkrb5profile.so.122
-OLD_FILES+=usr/lib/libkrb5support.a
-OLD_FILES+=usr/lib/libkrb5support.so
-OLD_LIBS+=usr/lib/libkrb5support.so.122
-OLD_FILES+=usr/lib/libverto.so
-OLD_LIBS+=usr/lib/libverto.so.122
-OLD_FILES+=usr/libdata/pkgconfig/gssrpc.pc
-OLD_FILES+=usr/libdata/pkgconfig/kadm-client.pc
-OLD_FILES+=usr/libdata/pkgconfig/kadm-server.pc
-OLD_FILES+=usr/libdata/pkgconfig/kdb.pc
-OLD_FILES+=usr/libdata/pkgconfig/krb5-gssapi.pc
-OLD_FILES+=usr/libdata/pkgconfig/krb5.pc
-OLD_FILES+=usr/libdata/pkgconfig/mit-krb5-gssapi.pc
-OLD_FILES+=usr/libdata/pkgconfig/mit-krb5.pc
-OLD_FILES+=usr/libexec/krb5kdc
+OLD_FILES+=usr/lib/librpcsec_gss.a
+OLD_FILES+=usr/lib/librpcsec_gss.so
+OLD_LIBS+=usr/lib/librpcsec_gss.so.1
+OLD_FILES+=usr/lib/pam_krb5.so
+OLD_LIBS+=usr/lib/pam_krb5.so.6
+OLD_FILES+=usr/share/man/man8/pam_krb5.8.gz
+OLD_FILES+=usr/lib/pam_ksu.so
+OLD_LIBS+=usr/lib/pam_ksu.so.6
+OLD_FILES+=usr/share/man/man8/pam_ksu.8.gz
+
OLD_FILES+=usr/libexec/kadmind
-OLD_FILES+=usr/libexec/kprop
-OLD_FILES+=usr/libexec/kpropd
-OLD_FILES+=usr/sbin/gss-server
-OLD_FILES+=usr/sbin/kadmin.local
-OLD_FILES+=usr/sbin/kdb5_util
-OLD_FILES+=usr/sbin/kproplog
-OLD_FILES+=usr/sbin/krb5-send-pr
-OLD_FILES+=usr/sbin/sim_server
-OLD_FILES+=usr/sbin/sserver
-OLD_FILES+=usr/sbin/uuserver
-OLD_FILES+=usr/share/doc/krb5/doc/html/.buildinfo
-OLD_FILES+=usr/share/doc/krb5/doc/html/_static/agogo.css
-OLD_FILES+=usr/share/doc/krb5/doc/html/_static/basic.css
-OLD_FILES+=usr/share/doc/krb5/doc/html/_static/bgfooter.png
-OLD_FILES+=usr/share/doc/krb5/doc/html/_static/bgtop.png
-OLD_FILES+=usr/share/doc/krb5/doc/html/_static/doctools.js
-OLD_FILES+=usr/share/doc/krb5/doc/html/_static/documentation_options.js
-OLD_FILES+=usr/share/doc/krb5/doc/html/_static/file.png
-OLD_FILES+=usr/share/doc/krb5/doc/html/_static/jquery.js
-OLD_FILES+=usr/share/doc/krb5/doc/html/_static/kerb.css
-OLD_FILES+=usr/share/doc/krb5/doc/html/_static/language_data.js
-OLD_FILES+=usr/share/doc/krb5/doc/html/_static/minus.png
-OLD_FILES+=usr/share/doc/krb5/doc/html/_static/plus.png
-OLD_FILES+=usr/share/doc/krb5/doc/html/_static/pygments.css
-OLD_FILES+=usr/share/doc/krb5/doc/html/_static/searchtools.js
-OLD_FILES+=usr/share/doc/krb5/doc/html/_static/underscore.js
-OLD_FILES+=usr/share/doc/krb5/doc/html/about.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/admin_commands/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/admin_commands/k5srvutil.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/admin_commands/kadmin_local.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/admin_commands/kadmind.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/admin_commands/kdb5_ldap_util.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/admin_commands/kdb5_util.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/admin_commands/kprop.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/admin_commands/kpropd.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/admin_commands/kproplog.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/admin_commands/krb5kdc.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/admin_commands/ktutil.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/admin_commands/sserver.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/advanced/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/advanced/retiring-des.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/appl_servers.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/auth_indicator.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/backup_host.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/conf_files/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/conf_files/kadm5_acl.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/conf_files/kdc_conf.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/conf_files/krb5_conf.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/conf_ldap.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/database.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/dbtypes.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/dictionary.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/enctypes.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/env_variables.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/host_config.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/https.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/install.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/install_appl_srv.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/install_clients.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/install_kdc.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/lockout.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/otp.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/pkinit.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/princ_dns.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/realm_config.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/spake.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/troubleshoot.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/various_envs.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/gssapi.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/h5l_mit_apidiff.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/init_creds.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/princ_handle.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_425_conv_principal.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_524_conv_principal.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_524_convert_creds.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_address_compare.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_address_order.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_address_search.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_allow_weak_crypto.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_aname_to_localname.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_anonymous_principal.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_anonymous_realm.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_appdefault_boolean.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_appdefault_string.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_free.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_genaddrs.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_get_checksum_func.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_getaddrs.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_getauthenticator.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_getflags.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_getkey.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_getkey_k.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_getlocalseqnumber.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_getlocalsubkey.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_getrcache.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_getrecvsubkey.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_getrecvsubkey_k.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_getremoteseqnumber.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_getremotesubkey.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_getsendsubkey.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_getsendsubkey_k.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_init.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_initivector.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_set_checksum_func.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_set_req_cksumtype.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_setaddrs.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_setflags.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_setports.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_setrcache.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_setrecvsubkey.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_setrecvsubkey_k.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_setsendsubkey.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_setsendsubkey_k.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_setuseruserkey.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_build_principal.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_build_principal_alloc_va.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_build_principal_ext.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_build_principal_va.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_block_size.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_checksum_length.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_crypto_length.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_crypto_length_iov.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_decrypt.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_decrypt_iov.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_derive_prfplus.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_encrypt.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_encrypt_iov.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_encrypt_length.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_enctype_compare.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_free_state.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_fx_cf2_simple.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_init_state.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_is_coll_proof_cksum.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_is_keyed_cksum.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_keyed_checksum_types.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_keylengths.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_make_checksum.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_make_checksum_iov.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_make_random_key.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_padding_length.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_prf.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_prf_length.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_prfplus.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_random_add_entropy.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_random_make_octets.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_random_os_entropy.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_random_seed.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_random_to_key.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_string_to_key.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_string_to_key_with_params.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_valid_cksumtype.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_valid_enctype.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_verify_checksum.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_verify_checksum_iov.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_calculate_checksum.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_cache_match.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_close.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_copy_creds.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_default.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_default_name.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_destroy.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_dup.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_end_seq_get.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_gen_new.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_get_config.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_get_flags.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_get_full_name.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_get_name.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_get_principal.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_get_type.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_initialize.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_move.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_new_unique.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_next_cred.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_remove_cred.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_resolve.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_retrieve_cred.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_select.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_set_config.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_set_default_name.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_set_flags.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_start_seq_get.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_store_cred.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_support_switch.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_switch.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cccol_cursor_free.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cccol_cursor_new.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cccol_cursor_next.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cccol_have_content.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_change_password.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_check_clockskew.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_checksum_size.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_chpw_message.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cksumtype_to_string.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_clear_error_message.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_copy_addresses.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_copy_authdata.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_copy_authenticator.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_copy_checksum.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_copy_context.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_copy_creds.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_copy_data.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_copy_error_message.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_copy_keyblock.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_copy_keyblock_contents.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_copy_principal.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_copy_ticket.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_decode_authdata_container.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_decode_ticket.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_decrypt.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_deltat_to_string.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_eblock_enctype.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_encode_authdata_container.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_encrypt.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_encrypt_size.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_enctype_to_name.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_enctype_to_string.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_expand_hostname.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_find_authdata.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_finish_key.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_finish_random_key.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_addresses.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_ap_rep_enc_part.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_authdata.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_authenticator.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_checksum.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_checksum_contents.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_cksumtypes.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_context.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_cred_contents.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_creds.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_data.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_data_contents.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_default_realm.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_enctypes.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_error.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_error_message.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_host_realm.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_keyblock.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_keyblock_contents.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_keytab_entry_contents.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_principal.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_string.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_tgt_creds.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_ticket.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_unparsed_name.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_fwd_tgt_creds.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_credentials.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_credentials_renew.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_credentials_validate.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_default_realm.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_error_message.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_etype_info.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_fallback_host_realm.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_host_realm.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_in_tkt_with_keytab.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_in_tkt_with_password.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_in_tkt_with_skey.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_keytab.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_alloc.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_free.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_get_fast_flags.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_init.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_address_list.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_anonymous.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_canonicalize.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_change_password_prompt.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_etype_list.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_expire_callback.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_fast_ccache.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_fast_ccache_name.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_fast_flags.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_forwardable.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_in_ccache.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_out_ccache.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_pa.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_pac_request.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_preauth_list.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_proxiable.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_renew_life.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_responder.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_salt.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_tkt_life.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_password.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_permitted_enctypes.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_profile.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_prompt_types.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_renewed_creds.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_server_rcache.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_time_offsets.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_validated_creds.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_init_context.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_init_context_profile.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_init_creds_free.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_init_creds_get.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_init_creds_get_creds.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_init_creds_get_error.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_init_creds_get_times.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_init_creds_init.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_init_creds_set_keytab.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_init_creds_set_password.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_init_creds_set_service.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_init_creds_step.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_init_keyblock.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_init_random_key.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_init_secure_context.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_is_config_principal.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_is_referral_realm.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_is_thread_safe.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_k_create_key.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_k_decrypt.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_k_decrypt_iov.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_k_encrypt.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_k_encrypt_iov.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_k_free_key.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_k_key_enctype.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_k_key_keyblock.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_k_make_checksum.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_k_make_checksum_iov.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_k_prf.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_k_reference_key.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_k_verify_checksum.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_k_verify_checksum_iov.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kdc_sign_ticket.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kdc_verify_ticket.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_add_entry.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_client_default.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_close.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_default.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_default_name.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_dup.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_end_seq_get.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_free_entry.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_get_entry.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_get_name.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_get_type.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_have_content.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_next_entry.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_read_service_key.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_remove_entry.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_resolve.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_start_seq_get.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kuserok.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_make_authdata_kdc_issued.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_marshal_credentials.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_merge_authdata.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_mk_1cred.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_mk_error.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_mk_ncred.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_mk_priv.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_mk_rep.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_mk_rep_dce.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_mk_req.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_mk_req_extended.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_mk_safe.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_os_localaddr.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_pac_add_buffer.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_pac_free.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_pac_get_buffer.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_pac_get_client_info.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_pac_get_types.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_pac_init.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_pac_parse.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_pac_sign.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_pac_sign_ext.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_pac_verify.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_pac_verify_ext.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_parse_name.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_parse_name_flags.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_prepend_error_message.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_principal2salt.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_principal_compare.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_principal_compare_any_realm.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_principal_compare_flags.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_process_key.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_prompter_posix.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_random_key.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_rd_cred.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_rd_error.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_rd_priv.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_rd_rep.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_rd_rep_dce.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_rd_req.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_rd_safe.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_read_password.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_realm_compare.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_recvauth.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_recvauth_version.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_responder_get_challenge.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_responder_list_questions.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_responder_otp_challenge_free.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_responder_otp_get_challenge.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_responder_otp_set_answer.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_responder_pkinit_challenge_free.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_responder_pkinit_get_challenge.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_responder_pkinit_set_answer.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_responder_set_answer.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_salttype_to_string.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_sendauth.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_server_decrypt_ticket_keytab.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_set_default_realm.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_set_default_tgs_enctypes.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_set_error_message.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_set_kdc_recv_hook.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_set_kdc_send_hook.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_set_password.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_set_password_using_ccache.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_set_principal_realm.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_set_real_time.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_set_trace_callback.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_set_trace_filename.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_sname_match.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_sname_to_principal.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_string_to_cksumtype.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_string_to_deltat.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_string_to_enctype.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_string_to_key.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_string_to_salttype.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_string_to_timestamp.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_timeofday.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_timestamp_to_sfstring.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_timestamp_to_string.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_tkt_creds_free.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_tkt_creds_get.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_tkt_creds_get_creds.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_tkt_creds_get_times.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_tkt_creds_init.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_tkt_creds_step.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_unmarshal_credentials.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_unparse_name.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_unparse_name_ext.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_unparse_name_flags.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_unparse_name_flags_ext.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_us_timeofday.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_use_enctype.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_verify_authdata_kdc_issued.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_verify_checksum.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_verify_init_creds.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_verify_init_creds_opt_init.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_verify_init_creds_opt_set_ap_req_nofail.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_vprepend_error_message.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_vset_error_message.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_vwrap_error_message.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_wrap_error_message.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ADDRTYPE_ADDRPORT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ADDRTYPE_CHAOS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ADDRTYPE_DDP.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ADDRTYPE_INET.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ADDRTYPE_INET6.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ADDRTYPE_IPPORT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ADDRTYPE_ISO.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ADDRTYPE_IS_LOCAL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ADDRTYPE_NETBIOS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ADDRTYPE_XNS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/AD_TYPE_EXTERNAL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/AD_TYPE_FIELD_TYPE_MASK.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/AD_TYPE_REGISTERED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/AD_TYPE_RESERVED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/AP_OPTS_ETYPE_NEGOTIATION.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/AP_OPTS_MUTUAL_REQUIRED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/AP_OPTS_RESERVED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/AP_OPTS_USE_SESSION_KEY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/AP_OPTS_USE_SUBKEY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/AP_OPTS_WIRE_MASK.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_CMAC_CAMELLIA128.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_CMAC_CAMELLIA256.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_CRC32.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_DESCBC.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_HMAC_MD5_ARCFOUR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_HMAC_SHA1_96_AES128.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_HMAC_SHA1_96_AES256.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_HMAC_SHA1_DES3.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_HMAC_SHA256_128_AES128.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_HMAC_SHA384_192_AES256.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_MD5_HMAC_ARCFOUR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_NIST_SHA.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_RSA_MD4.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_RSA_MD4_DES.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_RSA_MD5.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_RSA_MD5_DES.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_SHA1.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_AES128_CTS_HMAC_SHA1_96.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_AES128_CTS_HMAC_SHA256_128.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_AES256_CTS_HMAC_SHA1_96.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_AES256_CTS_HMAC_SHA384_192.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_ARCFOUR_HMAC.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_ARCFOUR_HMAC_EXP.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_CAMELLIA128_CTS_CMAC.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_CAMELLIA256_CTS_CMAC.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_DES3_CBC_ENV.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_DES3_CBC_RAW.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_DES3_CBC_SHA.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_DES3_CBC_SHA1.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_DES_CBC_CRC.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_DES_CBC_MD4.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_DES_CBC_MD5.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_DES_CBC_RAW.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_DES_HMAC_SHA1.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_DSA_SHA1_CMS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_MD5_RSA_CMS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_NULL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_RC2_CBC_ENV.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_RSA_ENV.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_RSA_ES_OAEP_ENV.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_SHA1_RSA_CMS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_UNKNOWN.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KDC_OPT_ALLOW_POSTDATE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KDC_OPT_CANONICALIZE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KDC_OPT_CNAME_IN_ADDL_TKT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KDC_OPT_DISABLE_TRANSITED_CHECK.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KDC_OPT_ENC_TKT_IN_SKEY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KDC_OPT_FORWARDABLE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KDC_OPT_FORWARDED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KDC_OPT_POSTDATED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KDC_OPT_PROXIABLE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KDC_OPT_PROXY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KDC_OPT_RENEW.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KDC_OPT_RENEWABLE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KDC_OPT_RENEWABLE_OK.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KDC_OPT_REQUEST_ANONYMOUS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KDC_OPT_VALIDATE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KDC_TKT_COMMON_MASK.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_ALTAUTH_ATT_CHALLENGE_RESPONSE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_ANONYMOUS_PRINCSTR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_ANONYMOUS_REALMSTR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AP_REP.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AP_REQ.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AS_REP.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AS_REQ.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTHDATA_AND_OR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTHDATA_AP_OPTIONS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTHDATA_AUTH_INDICATOR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTHDATA_CAMMAC.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTHDATA_ETYPE_NEGOTIATION.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTHDATA_FX_ARMOR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTHDATA_IF_RELEVANT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTHDATA_INITIAL_VERIFIED_CAS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTHDATA_KDC_ISSUED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTHDATA_MANDATORY_FOR_KDC.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTHDATA_OSF_DCE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTHDATA_SESAME.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTHDATA_SIGNTICKET.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTHDATA_WIN2K_PAC.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTH_CONTEXT_DO_SEQUENCE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTH_CONTEXT_DO_TIME.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTH_CONTEXT_GENERATE_LOCAL_ADDR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTH_CONTEXT_GENERATE_LOCAL_FULL_ADDR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTH_CONTEXT_GENERATE_REMOTE_ADDR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTH_CONTEXT_GENERATE_REMOTE_FULL_ADDR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTH_CONTEXT_PERMIT_ALL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTH_CONTEXT_RET_SEQUENCE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTH_CONTEXT_RET_TIME.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTH_CONTEXT_USE_SUBKEY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_CRED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_CRYPTO_TYPE_CHECKSUM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_CRYPTO_TYPE_DATA.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_CRYPTO_TYPE_EMPTY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_CRYPTO_TYPE_HEADER.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_CRYPTO_TYPE_PADDING.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_CRYPTO_TYPE_SIGN_ONLY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_CRYPTO_TYPE_STREAM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_CRYPTO_TYPE_TRAILER.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_CYBERSAFE_SECUREID.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_DOMAIN_X500_COMPRESS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_ENCPADATA_REQ_ENC_PA_REP.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_ERROR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_FAST_REQUIRED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GC_CACHED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GC_CANONICALIZE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GC_CONSTRAINED_DELEGATION.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GC_FORWARDABLE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GC_NO_STORE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GC_NO_TRANSIT_CHECK.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GC_USER_USER.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GET_INIT_CREDS_OPT_ADDRESS_LIST.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GET_INIT_CREDS_OPT_ANONYMOUS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GET_INIT_CREDS_OPT_CANONICALIZE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GET_INIT_CREDS_OPT_CHG_PWD_PRMPT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GET_INIT_CREDS_OPT_ETYPE_LIST.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GET_INIT_CREDS_OPT_FORWARDABLE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GET_INIT_CREDS_OPT_PREAUTH_LIST.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GET_INIT_CREDS_OPT_PROXIABLE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GET_INIT_CREDS_OPT_RENEW_LIFE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GET_INIT_CREDS_OPT_SALT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GET_INIT_CREDS_OPT_TKT_LIFE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_INIT_CONTEXT_KDC.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_INIT_CONTEXT_SECURE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_INIT_CREDS_STEP_FLAG_CONTINUE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_INT16_MAX.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_INT16_MIN.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_INT32_MAX.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_INT32_MIN.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_AD_ITE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_AD_KDCISSUED_CKSUM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_AD_MTE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_AD_SIGNEDPATH.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_APP_DATA_CKSUM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_APP_DATA_ENCRYPT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_AP_REP_ENCPART.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_AP_REQ_AUTH.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_AP_REQ_AUTH_CKSUM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_AS_REP_ENCPART.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_AS_REQ.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_AS_REQ_PA_ENC_TS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_CAMMAC.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_ENC_CHALLENGE_CLIENT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_ENC_CHALLENGE_KDC.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_FAST_ENC.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_FAST_FINISHED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_FAST_REP.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_FAST_REQ_CHKSUM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_GSS_TOK_MIC.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_GSS_TOK_WRAP_INTEG.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_GSS_TOK_WRAP_PRIV.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_IAKERB_FINISHED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_KDC_REP_TICKET.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_KRB_CRED_ENCPART.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_KRB_ERROR_CKSUM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_KRB_PRIV_ENCPART.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_KRB_SAFE_CKSUM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_PA_AS_FRESHNESS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_PA_FX_COOKIE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_PA_OTP_REQUEST.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_PA_PKINIT_KX.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_PA_S4U_X509_USER_REPLY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_PA_S4U_X509_USER_REQUEST.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_PA_SAM_CHALLENGE_CKSUM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_PA_SAM_CHALLENGE_TRACKID.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_PA_SAM_RESPONSE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_SPAKE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_TGS_REP_ENCPART_SESSKEY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_TGS_REP_ENCPART_SUBKEY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_TGS_REQ_AD_SESSKEY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_TGS_REQ_AD_SUBKEY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_TGS_REQ_AUTH.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_TGS_REQ_AUTH_CKSUM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KPASSWD_ACCESSDENIED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KPASSWD_AUTHERROR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KPASSWD_BAD_VERSION.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KPASSWD_HARDERROR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KPASSWD_INITIAL_FLAG_NEEDED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KPASSWD_MALFORMED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KPASSWD_SOFTERROR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KPASSWD_SUCCESS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_LRQ_ALL_ACCT_EXPTIME.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_LRQ_ALL_LAST_INITIAL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_LRQ_ALL_LAST_RENEWAL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_LRQ_ALL_LAST_REQ.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_LRQ_ALL_LAST_TGT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_LRQ_ALL_LAST_TGT_ISSUED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_LRQ_ALL_PW_EXPTIME.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_LRQ_NONE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_LRQ_ONE_ACCT_EXPTIME.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_LRQ_ONE_LAST_INITIAL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_LRQ_ONE_LAST_RENEWAL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_LRQ_ONE_LAST_REQ.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_LRQ_ONE_LAST_TGT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_LRQ_ONE_LAST_TGT_ISSUED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_LRQ_ONE_PW_EXPTIME.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_NT_ENTERPRISE_PRINCIPAL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_NT_ENT_PRINCIPAL_AND_ID.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_NT_MS_PRINCIPAL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_NT_MS_PRINCIPAL_AND_ID.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_NT_PRINCIPAL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_NT_SMTP_NAME.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_NT_SRV_HST.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_NT_SRV_INST.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_NT_SRV_XHST.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_NT_UID.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_NT_UNKNOWN.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_NT_WELLKNOWN.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_NT_X500_PRINCIPAL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PAC_ATTRIBUTES_INFO.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PAC_CLIENT_CLAIMS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PAC_CLIENT_INFO.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PAC_CREDENTIALS_INFO.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PAC_DELEGATION_INFO.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PAC_DEVICE_CLAIMS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PAC_DEVICE_INFO.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PAC_FULL_CHECKSUM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PAC_LOGON_INFO.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PAC_PRIVSVR_CHECKSUM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PAC_REQUESTOR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PAC_SERVER_CHECKSUM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PAC_TICKET_CHECKSUM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PAC_UPN_DNS_INFO.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_AFS3_SALT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_AP_REQ.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_AS_CHECKSUM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_AS_FRESHNESS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_ENCRYPTED_CHALLENGE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_ENC_SANDIA_SECURID.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_ENC_TIMESTAMP.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_ENC_UNIX_TIME.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_ETYPE_INFO.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_ETYPE_INFO2.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_FOR_USER.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_FX_COOKIE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_FX_ERROR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_FX_FAST.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_GET_FROM_TYPED_DATA.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_NONE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_OSF_DCE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_OTP_CHALLENGE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_OTP_PIN_CHANGE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_OTP_REQUEST.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_PAC_OPTIONS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_PAC_REQUEST.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_PKINIT_KX.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_PK_AS_REP.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_PK_AS_REP_OLD.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_PK_AS_REQ.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_PK_AS_REQ_OLD.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_PW_SALT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_REDHAT_IDP_OAUTH2.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_REDHAT_PASSKEY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_REFERRAL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_S4U_X509_USER.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_SAM_CHALLENGE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_SAM_CHALLENGE_2.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_SAM_REDIRECT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_SAM_RESPONSE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_SAM_RESPONSE_2.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_SESAME.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_SPAKE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_SVR_REFERRAL_INFO.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_TGS_REQ.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_USE_SPECIFIED_KVNO.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PRINCIPAL_COMPARE_CASEFOLD.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PRINCIPAL_COMPARE_ENTERPRISE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PRINCIPAL_COMPARE_IGNORE_REALM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PRINCIPAL_COMPARE_UTF8.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PRINCIPAL_PARSE_ENTERPRISE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PRINCIPAL_PARSE_IGNORE_REALM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PRINCIPAL_PARSE_NO_DEF_REALM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PRINCIPAL_PARSE_NO_REALM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PRINCIPAL_PARSE_REQUIRE_REALM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PRINCIPAL_UNPARSE_DISPLAY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PRINCIPAL_UNPARSE_NO_REALM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PRINCIPAL_UNPARSE_SHORT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PRIV.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PROMPT_TYPE_NEW_PASSWORD.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PROMPT_TYPE_NEW_PASSWORD_AGAIN.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PROMPT_TYPE_PASSWORD.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PROMPT_TYPE_PREAUTH.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PVNO.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_REALM_BRANCH_CHAR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_RECVAUTH_BADAUTHVERS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_RECVAUTH_SKIP_VERSION.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_REFERRAL_REALM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_RESPONDER_OTP_FLAGS_COLLECT_PIN.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_RESPONDER_OTP_FLAGS_COLLECT_TOKEN.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_RESPONDER_OTP_FLAGS_NEXTOTP.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_RESPONDER_OTP_FLAGS_SEPARATE_PIN.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_RESPONDER_OTP_FORMAT_ALPHANUMERIC.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_RESPONDER_OTP_FORMAT_DECIMAL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_RESPONDER_OTP_FORMAT_HEXADECIMAL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_RESPONDER_PKINIT_FLAGS_TOKEN_USER_PIN_COUNT_LOW.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_RESPONDER_PKINIT_FLAGS_TOKEN_USER_PIN_FINAL_TRY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_RESPONDER_PKINIT_FLAGS_TOKEN_USER_PIN_LOCKED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_RESPONDER_QUESTION_OTP.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_RESPONDER_QUESTION_PASSWORD.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_RESPONDER_QUESTION_PKINIT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_SAFE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_SAM_MUST_PK_ENCRYPT_SAD.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_SAM_SEND_ENCRYPTED_SAD.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_SAM_USE_SAD_AS_KEY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TC_MATCH_2ND_TKT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TC_MATCH_AUTHDATA.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TC_MATCH_FLAGS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TC_MATCH_FLAGS_EXACT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TC_MATCH_IS_SKEY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TC_MATCH_KTYPE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TC_MATCH_SRV_NAMEONLY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TC_MATCH_TIMES.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TC_MATCH_TIMES_EXACT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TC_NOTICKET.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TC_OPENCLOSE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TC_SUPPORTED_KTYPES.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TGS_NAME.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TGS_NAME_SIZE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TGS_REP.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TGS_REQ.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TKT_CREDS_STEP_FLAG_CONTINUE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_VERIFY_INIT_CREDS_OPT_AP_REQ_NOFAIL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_WELLKNOWN_NAMESTR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/LR_TYPE_INTERPRETATION_MASK.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/LR_TYPE_THIS_SERVER_ONLY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/MAX_KEYTAB_NAME_LEN.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/MSEC_DIRBIT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/MSEC_VAL_MASK.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/SALT_TYPE_AFS_LENGTH.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/SALT_TYPE_NO_LENGTH.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/THREEPARAMOPEN.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/TKT_FLG_ANONYMOUS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/TKT_FLG_ENC_PA_REP.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/TKT_FLG_FORWARDABLE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/TKT_FLG_FORWARDED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/TKT_FLG_HW_AUTH.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/TKT_FLG_INITIAL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/TKT_FLG_INVALID.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/TKT_FLG_MAY_POSTDATE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/TKT_FLG_OK_AS_DELEGATE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/TKT_FLG_POSTDATED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/TKT_FLG_PRE_AUTH.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/TKT_FLG_PROXIABLE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/TKT_FLG_PROXY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/TKT_FLG_RENEWABLE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/TKT_FLG_TRANSIT_POLICY_CHECKED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/VALID_INT_BITS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/VALID_UINT_BITS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/krb524_convert_creds_kdc.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/krb524_init_ets.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/krb5_const.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/krb5_princ_component.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/krb5_princ_name.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/krb5_princ_realm.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/krb5_princ_set_realm.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/krb5_princ_set_realm_data.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/krb5_princ_set_realm_length.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/krb5_princ_size.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/krb5_princ_type.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/krb5_roundup.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/krb5_x.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/krb5_xc.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_address.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_addrtype.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_ap_rep.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_ap_rep_enc_part.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_ap_req.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_auth_context.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_authdata.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_authdatatype.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_authenticator.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_boolean.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_cc_cursor.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_ccache.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_cccol_cursor.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_checksum.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_cksumtype.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_const_pointer.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_const_principal.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_context.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_cred.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_cred_enc_part.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_cred_info.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_creds.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_crypto_iov.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_cryptotype.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_data.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_deltat.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_enc_data.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_enc_kdc_rep_part.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_enc_tkt_part.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_encrypt_block.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_enctype.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_error.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_error_code.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_expire_callback_func.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_flags.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_get_init_creds_opt.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_gic_opt_pa_data.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_init_creds_context.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_int16.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_int32.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_kdc_rep.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_kdc_req.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_key.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_keyblock.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_keytab.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_keytab_entry.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_keyusage.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_kt_cursor.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_kvno.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_last_req_entry.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_magic.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_mk_req_checksum_func.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_msgtype.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_octet.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_pa_data.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_pa_pac_req.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_pa_server_referral_data.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_pa_svr_referral_data.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_pac.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_pointer.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_post_recv_fn.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_pre_send_fn.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_preauthtype.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_principal.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_principal_data.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_prompt.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_prompt_type.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_prompter_fct.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_pwd_data.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_rcache.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_replay_data.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_responder_context.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_responder_fn.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_responder_otp_challenge.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_responder_otp_tokeninfo.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_responder_pkinit_challenge.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_responder_pkinit_identity.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_response.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_ticket.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_ticket_times.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_timestamp.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_tkt_authent.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_tkt_creds_context.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_trace_callback.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_trace_info.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_transited.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_typed_data.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_ui_2.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_ui_4.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_verify_init_creds_opt.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/passwd_phrase_element.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/y2038.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/basic/ccache_def.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/basic/date_format.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/basic/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/basic/keytab_def.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/basic/rcache_def.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/basic/stash_file_def.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/build/directory_org.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/build/doing_build.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/build/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/build/options2configure.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/build/osconf.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/build_this.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/copyright.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/formats/ccache_file_format.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/formats/cookie.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/formats/freshness_token.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/formats/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/formats/keytab_file_format.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/formats/rcache_file_format.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/genindex-A.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/genindex-C.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/genindex-E.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/genindex-K.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/genindex-L.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/genindex-M.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/genindex-P.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/genindex-R.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/genindex-S.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/genindex-T.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/genindex-V.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/genindex-all.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/genindex.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/mitK5defaults.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/mitK5features.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/mitK5license.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/objects.inv
-OLD_FILES+=usr/share/doc/krb5/doc/html/plugindev/ccselect.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/plugindev/certauth.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/plugindev/clpreauth.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/plugindev/general.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/plugindev/gssapi.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/plugindev/hostrealm.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/plugindev/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/plugindev/internal.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/plugindev/kadm5_auth.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/plugindev/kadm5_hook.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/plugindev/kdcpolicy.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/plugindev/kdcpreauth.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/plugindev/localauth.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/plugindev/locate.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/plugindev/profile.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/plugindev/pwqual.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/resources.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/search.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/searchindex.js
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/pwd_mgmt.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/tkt_mgmt.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/user_commands/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/user_commands/kdestroy.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/user_commands/kinit.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/user_commands/klist.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/user_commands/kpasswd.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/user_commands/krb5-config.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/user_commands/ksu.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/user_commands/kswitch.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/user_commands/kvno.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/user_commands/sclient.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/user_config/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/user_config/k5identity.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/user_config/k5login.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/user_config/kerberos.html
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/GMakefile
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/LICRcyr2utf8.xdy
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/LICRlatin2utf8.xdy
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/LatinRules.xdy
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/admin.pdf
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/admin.tex
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/appdev.pdf
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/appdev.tex
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/basic.pdf
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/basic.tex
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/build.pdf
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/build.tex
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/latexmkjarc
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/latexmkrc
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/make.bat
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/plugindev.pdf
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/plugindev.tex
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/python.ist
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinx.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinx.xdy
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxhighlight.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxhowto.cls
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxlatexadmonitions.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxlatexcontainers.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxlatexgraphics.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxlatexindbibtoc.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxlatexlists.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxlatexliterals.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxlatexnumfig.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxlatexobjects.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxlatexshadowbox.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxlatexstyleheadings.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxlatexstylepage.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxlatexstyletext.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxlatextables.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxmanual.cls
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxmessages.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxoptionsgeometry.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxoptionshyperref.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxpackagecyrillic.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxpackagefootnote.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/user.pdf
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/user.tex
-OLD_FILES+=usr/share/et/et_c.awk
-OLD_FILES+=usr/share/et/et_h.awk
-OLD_FILES+=usr/share/examples/krb5/kdc.conf
-OLD_FILES+=usr/share/examples/krb5/krb5.conf
-OLD_FILES+=usr/share/examples/krb5/services.append
-OLD_FILES+=usr/share/licenses/krb5-1.21.3_1/LICENSE
-OLD_FILES+=usr/share/licenses/krb5-1.21.3_1/MIT
-OLD_FILES+=usr/share/licenses/krb5-1.21.3_1/catalog.mk
-OLD_FILES+=usr/share/locale/de/LC_MESSAGES/mit-krb5.mo
-OLD_FILES+=usr/share/locale/en_US/LC_MESSAGES/mit-krb5.mo
-OLD_FILES+=usr/share/locale/ka/LC_MESSAGES/mit-krb5.mo
-OLD_FILES+=usr/share/man/man1/compile_et.1.gz
-OLD_FILES+=usr/share/man/man1/k5srvutil.1.gz
-OLD_FILES+=usr/share/man/man1/kadmin.1.gz
-OLD_FILES+=usr/share/man/man1/kdestroy.1.gz
-OLD_FILES+=usr/share/man/man1/kinit.1.gz
-OLD_FILES+=usr/share/man/man1/klist.1.gz
-OLD_FILES+=usr/share/man/man1/kpasswd.1.gz
-OLD_FILES+=usr/share/man/man1/krb5-config.1.gz
-OLD_FILES+=usr/share/man/man1/ksu.1.gz
-OLD_FILES+=usr/share/man/man1/kswitch.1.gz
-OLD_FILES+=usr/share/man/man1/ktutil.1.gz
-OLD_FILES+=usr/share/man/man1/kvno.1.gz
-OLD_FILES+=usr/share/man/man1/sclient.1.gz
-OLD_FILES+=usr/share/man/man5/.k5identity.5.gz
-OLD_FILES+=usr/share/man/man5/.k5login.5.gz
-OLD_FILES+=usr/share/man/man5/k5identity.5.gz
-OLD_FILES+=usr/share/man/man5/k5login.5.gz
-OLD_FILES+=usr/share/man/man5/kadm5.acl.5.gz
-OLD_FILES+=usr/share/man/man5/kdc.conf.5.gz
-OLD_FILES+=usr/share/man/man5/krb5.conf.5.gz
-OLD_FILES+=usr/share/man/man7/kerberos.7.gz
-OLD_FILES+=usr/share/man/man8/kadmin.local.8.gz
OLD_FILES+=usr/share/man/man8/kadmind.8.gz
-OLD_FILES+=usr/share/man/man8/kdb5_ldap_util.8.gz
-OLD_FILES+=usr/share/man/man8/kdb5_util.8.gz
-OLD_FILES+=usr/share/man/man8/kprop.8.gz
-OLD_FILES+=usr/share/man/man8/kpropd.8.gz
-OLD_FILES+=usr/share/man/man8/kproplog.8.gz
-OLD_FILES+=usr/share/man/man8/krb5kdc.8.gz
-OLD_FILES+=usr/share/man/man8/sserver.8.gz
-.endif
-.else
-.if ${MK_MITKRB5} != "no"
-# Remove Heimdal because we want MIT KRB5 but not Heimdal
-OLD_FILES+=etc/gss/qop
-OLD_FILES+=etc/rc.d/ipropd_master
-OLD_FILES+=etc/rc.d/ipropd_slave
+
+OLD_FILES+=usr/share/man/man3/com_err.3.gz
+OLD_FILES+=usr/share/man/man3/rpcsec_gss.3.gz
+OLD_FILES+=usr/share/man/man3/rpc_gss_get_error.3.gz
+OLD_FILES+=usr/share/man/man3/rpc_gss_get_mech_info.3.gz
+OLD_FILES+=usr/share/man/man3/rpc_gss_get_mechanisms.3.gz
+OLD_FILES+=usr/share/man/man3/rpc_gss_get_principal_name.3.gz
+OLD_FILES+=usr/share/man/man3/rpc_gss_get_versions.3.gz
+OLD_FILES+=usr/share/man/man3/rpc_gss_getcred.3.gz
+OLD_FILES+=usr/share/man/man3/rpc_gss_is_installed.3.gz
+OLD_FILES+=usr/share/man/man3/rpc_gss_max_data_length.3.gz
+OLD_FILES+=usr/share/man/man3/rpc_gss_mech_to_oid.3.gz
+OLD_FILES+=usr/share/man/man3/rpc_gss_oid_to_mech.3.gz
+OLD_FILES+=usr/share/man/man3/rpc_gss_qop_to_num.3.gz
+OLD_FILES+=usr/share/man/man3/rpc_gss_seccreate.3.gz
+OLD_FILES+=usr/share/man/man3/rpc_gss_set_callback.3.gz
+OLD_FILES+=usr/share/man/man3/rpc_gss_set_defaults.3.gz
+OLD_FILES+=usr/share/man/man3/rpc_gss_set_svc_name.3.gz
+OLD_FILES+=usr/share/man/man3/rpc_gss_svc_max_data_length.3.gz
+OLD_FILES+=usr/share/man/man5/krb5.conf.5.gz
+.endif # ${MK_KERBEROS} == "no"
+
+# Heimdal-specific files that don't exist in MIT Kerberos. These should be
+# removed if Kerberos is disabled, or if MIT Kerberos is selected.
+.if ${MK_KERBEROS} == "no" || ${MK_MITKRB5} != "no"
+
+# compile_et is a binary in Heimdal, but a shell script in MIT Kerberos.
+# When switching from Heimdal to MIT, we need to remove the debug symbols
+# explicitly.
+OLD_FILES+=usr/lib/debug/usr/bin/compile_et.debug
+
OLD_FILES+=usr/bin/asn1_compile
OLD_FILES+=usr/bin/hxtool
OLD_FILES+=usr/bin/kcc
@@ -4856,16 +2760,13 @@ OLD_FILES+=usr/include/asn1-common.h
OLD_FILES+=usr/include/asn1_err.h
OLD_FILES+=usr/include/base64.h
OLD_FILES+=usr/include/cms_asn1.h
-OLD_FILES+=usr/include/common.h
+OLD_FILES+=usr/include/com_right.h
OLD_FILES+=usr/include/crmf_asn1.h
OLD_FILES+=usr/include/der-private.h
OLD_FILES+=usr/include/der-protos.h
OLD_FILES+=usr/include/der.h
OLD_FILES+=usr/include/digest_asn1.h
-OLD_FILES+=usr/include/edwards25519_fiat.h
-OLD_FILES+=usr/include/edwards25519_tables.h
OLD_FILES+=usr/include/getarg.h
-OLD_FILES+=usr/include/groups.h
OLD_FILES+=usr/include/hdb-protos.h
OLD_FILES+=usr/include/hdb.h
OLD_FILES+=usr/include/hdb_asn1.h
@@ -4881,7 +2782,6 @@ OLD_FILES+=usr/include/hx509-private.h
OLD_FILES+=usr/include/hx509-protos.h
OLD_FILES+=usr/include/hx509.h
OLD_FILES+=usr/include/hx509_err.h
-OLD_FILES+=usr/include/iana.h
OLD_FILES+=usr/include/k524_err.h
OLD_FILES+=usr/include/kadm5/kadm5-private.h
OLD_FILES+=usr/include/kadm5/kadm5-protos.h
@@ -4914,86 +2814,43 @@ OLD_FILES+=usr/include/pkinit_asn1.h
OLD_FILES+=usr/include/resolve.h
OLD_FILES+=usr/include/rfc2459_asn1.h
OLD_FILES+=usr/include/roken-common.h
+OLD_FILES+=usr/include/roken.h
OLD_FILES+=usr/include/rtbl.h
-OLD_FILES+=usr/include/trace.h
-OLD_FILES+=usr/include/util.h
OLD_FILES+=usr/include/wind.h
OLD_FILES+=usr/include/wind_err.h
OLD_FILES+=usr/include/xdbm.h
OLD_FILES+=usr/lib/libasn1.a
OLD_FILES+=usr/lib/libasn1.so
-OLD_LIBS+=usr/lib/libasn1.so.11
-OLD_FILES+=usr/lib/libasn1_p.a
-OLD_LIBS+=usr/lib/libcom_err.so.5
-OLD_FILES+=usr/lib/libcom_err_p.a
-OLD_LIBS+=usr/lib/libgssapi.a
-OLD_LIBS+=usr/lib/libgssapi.so.10
-OLD_LIBS+=usr/lib/libgssapi_krb5.so.10
-OLD_FILES+=usr/lib/libgssapi_krb5_p.a
-OLD_FILES+=usr/lib/libgssapi_mech.a
-OLD_FILES+=usr/lib/libgssapi_mech.so
-OLD_LIBS+=usr/lib/libgssapi_mech.so.10
+OLD_FILES+=usr/lib/libgssapi.a
+OLD_FILES+=usr/lib/libgssapi.so
OLD_FILES+=usr/lib/libgssapi_ntlm.a
OLD_FILES+=usr/lib/libgssapi_ntlm.so
-OLD_LIBS+=usr/lib/libgssapi_ntlm.so.10
-OLD_FILES+=usr/lib/libgssapi_ntlm_p.a
OLD_FILES+=usr/lib/libgssapi_spnego.a
OLD_FILES+=usr/lib/libgssapi_spnego.so
-OLD_LIBS+=usr/lib/libgssapi_spnego.so.10
-OLD_FILES+=usr/lib/libgssapi_spnego_p.a
OLD_FILES+=usr/lib/libhdb.a
OLD_FILES+=usr/lib/libhdb.so
-OLD_LIBS+=usr/lib/libhdb.so.11
-OLD_FILES+=usr/lib/libhdb_p.a
OLD_FILES+=usr/lib/libheimbase.a
OLD_FILES+=usr/lib/libheimbase.so
-OLD_LIBS+=usr/lib/libheimbase.so.11
-OLD_FILES+=usr/lib/libheimbase_p.a
OLD_FILES+=usr/lib/libheimntlm.a
OLD_FILES+=usr/lib/libheimntlm.so
-OLD_LIBS+=usr/lib/libheimntlm.so.11
-OLD_FILES+=usr/lib/libheimntlm_p.a
-OLD_FILES+=usr/lib/libheimsqlite.a
-OLD_FILES+=usr/lib/libheimsqlite.so
-OLD_LIBS+=usr/lib/libheimsqlite.so.11
-OLD_FILES+=usr/lib/libheimsqlite_p.a
OLD_FILES+=usr/lib/libhx509.a
OLD_FILES+=usr/lib/libhx509.so
-OLD_LIBS+=usr/lib/libhx509.so.11
-OLD_FILES+=usr/lib/libhx509_p.a
OLD_FILES+=usr/lib/libkadm5clnt.a
-OLD_LIBS+=usr/lib/libkadm5clnt.so.11
-OLD_FILES+=usr/lib/libkadm5clnt_p.a
OLD_FILES+=usr/lib/libkadm5srv.a
OLD_FILES+=usr/lib/libkadm5srv.so
-OLD_LIBS+=usr/lib/libkadm5srv.so.11
-OLD_FILES+=usr/lib/libkadm5srv_p.a
OLD_FILES+=usr/lib/libkafs5.a
OLD_FILES+=usr/lib/libkafs5.so
-OLD_LIBS+=usr/lib/libkafs5.so.11
-OLD_FILES+=usr/lib/libkafs5_p.a
OLD_FILES+=usr/lib/libkdc.a
OLD_FILES+=usr/lib/libkdc.so
-OLD_LIBS+=usr/lib/libkdc.so.11
-OLD_FILES+=usr/lib/libkdc_p.a
-OLD_LIBS+=usr/lib/libkrb5.so.11
-OLD_FILES+=usr/lib/libkrb5_p.a
-OLD_FILES+=usr/lib/libroken.a
-OLD_FILES+=usr/lib/libroken.so
-OLD_LIBS+=usr/lib/libroken.so.11
-OLD_FILES+=usr/lib/libroken_p.a
-OLD_FILES+=usr/lib/libwind.a
-OLD_FILES+=usr/lib/libwind.so
-OLD_LIBS+=usr/lib/libwind.so.11
-OLD_FILES+=usr/lib/libwind_p.a
OLD_FILES+=usr/lib/libprivateheimipcc.a
OLD_FILES+=usr/lib/libprivateheimipcc.so
-OLD_LIBS+=usr/lib/libprivateheimipcc.so.11
-OLD_FILES+=usr/lib/libprivateheimipcc_p.a
OLD_FILES+=usr/lib/libprivateheimipcs.a
OLD_FILES+=usr/lib/libprivateheimipcs.so
-OLD_LIBS+=usr/lib/libprivateheimipcs.so.11
-OLD_FILES+=usr/lib/libprivateheimipcs_p.a
+OLD_FILES+=usr/lib/libroken.a
+OLD_FILES+=usr/lib/libroken.so
+OLD_FILES+=usr/lib/libwind.a
+OLD_FILES+=usr/lib/libwind.so
+OLD_FILES+=usr/lib/libwind.so.11
OLD_FILES+=usr/libexec/digest-service
OLD_FILES+=usr/libexec/hprop
OLD_FILES+=usr/libexec/hpropd
@@ -5005,11 +2862,51 @@ OLD_FILES+=usr/libexec/kdigest
OLD_FILES+=usr/libexec/kfd
OLD_FILES+=usr/libexec/kimpersonate
OLD_FILES+=usr/libexec/kpasswdd
+OLD_FILES+=usr/sbin/iprop-log
OLD_FILES+=usr/sbin/kstash
OLD_FILES+=usr/sbin/ktutil
-OLD_FILES+=usr/sbin/iprop-log
OLD_FILES+=usr/share/man/man1/kf.1.gz
+OLD_FILES+=usr/share/man/man1/kgetcred.1.gz
OLD_FILES+=usr/share/man/man3/HDB.3.gz
+OLD_FILES+=usr/share/man/man3/gss_accept_sec_context.3.gz
+OLD_FILES+=usr/share/man/man3/gss_acquire_cred.3.gz
+OLD_FILES+=usr/share/man/man3/gss_add_cred.3.gz
+OLD_FILES+=usr/share/man/man3/gss_add_oid_set_member.3.gz
+OLD_FILES+=usr/share/man/man3/gss_canonicalize_name.3.gz
+OLD_FILES+=usr/share/man/man3/gss_compare_name.3.gz
+OLD_FILES+=usr/share/man/man3/gss_context_time.3.gz
+OLD_FILES+=usr/share/man/man3/gss_create_empty_oid_set.3.gz
+OLD_FILES+=usr/share/man/man3/gss_delete_sec_context.3.gz
+OLD_FILES+=usr/share/man/man3/gss_display_name.3.gz
+OLD_FILES+=usr/share/man/man3/gss_display_status.3.gz
+OLD_FILES+=usr/share/man/man3/gss_duplicate_name.3.gz
+OLD_FILES+=usr/share/man/man3/gss_export_name.3.gz
+OLD_FILES+=usr/share/man/man3/gss_export_sec_context.3.gz
+OLD_FILES+=usr/share/man/man3/gss_get_mic.3.gz
+OLD_FILES+=usr/share/man/man3/gss_import_name.3.gz
+OLD_FILES+=usr/share/man/man3/gss_import_sec_context.3.gz
+OLD_FILES+=usr/share/man/man3/gss_indicate_mechs.3.gz
+OLD_FILES+=usr/share/man/man3/gss_init_sec_context.3.gz
+OLD_FILES+=usr/share/man/man3/gss_inquire_context.3.gz
+OLD_FILES+=usr/share/man/man3/gss_inquire_cred.3.gz
+OLD_FILES+=usr/share/man/man3/gss_inquire_cred_by_mech.3.gz
+OLD_FILES+=usr/share/man/man3/gss_inquire_mechs_for_name.3.gz
+OLD_FILES+=usr/share/man/man3/gss_inquire_names_for_mech.3.gz
+OLD_FILES+=usr/share/man/man3/gss_process_context_token.3.gz
+OLD_FILES+=usr/share/man/man3/gss_release_buffer.3.gz
+OLD_FILES+=usr/share/man/man3/gss_release_cred.3.gz
+OLD_FILES+=usr/share/man/man3/gss_release_name.3.gz
+OLD_FILES+=usr/share/man/man3/gss_release_oid_set.3.gz
+OLD_FILES+=usr/share/man/man3/gss_seal.3.gz
+OLD_FILES+=usr/share/man/man3/gss_sign.3.gz
+OLD_FILES+=usr/share/man/man3/gss_test_oid_set_member.3.gz
+OLD_FILES+=usr/share/man/man3/gss_unseal.3.gz
+OLD_FILES+=usr/share/man/man3/gss_unwrap.3.gz
+OLD_FILES+=usr/share/man/man3/gss_verify.3.gz
+OLD_FILES+=usr/share/man/man3/gss_verify_mic.3.gz
+OLD_FILES+=usr/share/man/man3/gss_wrap.3.gz
+OLD_FILES+=usr/share/man/man3/gss_wrap_size_limit.3.gz
+OLD_FILES+=usr/share/man/man3/gssapi.3.gz
OLD_FILES+=usr/share/man/man3/hdb__del.3.gz
OLD_FILES+=usr/share/man/man3/hdb__get.3.gz
OLD_FILES+=usr/share/man/man3/hdb__put.3.gz
@@ -5699,6 +3596,8 @@ OLD_FILES+=usr/share/man/man3/ntlm_core.3.gz
OLD_FILES+=usr/share/man/man3/ntlm_type1.3.gz
OLD_FILES+=usr/share/man/man3/ntlm_type2.3.gz
OLD_FILES+=usr/share/man/man3/ntlm_type3.3.gz
+OLD_FILES+=usr/share/man/man5/mech.5.gz
+OLD_FILES+=usr/share/man/man5/qop.5.gz
OLD_FILES+=usr/share/man/man8/hprop.8.gz
OLD_FILES+=usr/share/man/man8/hpropd.8.gz
OLD_FILES+=usr/share/man/man8/iprop-log.8.gz
@@ -5708,36 +3607,56 @@ OLD_FILES+=usr/share/man/man8/kcm.8.gz
OLD_FILES+=usr/share/man/man8/kdc.8.gz
OLD_FILES+=usr/share/man/man8/kdigest.8.gz
OLD_FILES+=usr/share/man/man8/kerberos.8.gz
+OLD_FILES+=usr/share/man/man8/kfd.8.gz
OLD_FILES+=usr/share/man/man8/kimpersonate.8.gz
OLD_FILES+=usr/share/man/man8/kpasswdd.8.gz
OLD_FILES+=usr/share/man/man8/kstash.8.gz
OLD_FILES+=usr/share/man/man8/ktutil.8.gz
OLD_FILES+=usr/share/man/man8/string2key.8.gz
OLD_FILES+=usr/share/man/man8/verify_krb5_conf.8.gz
-.else
-# Remove MIT KRB5 because we want Heimdal but not MIT
+
+OLD_LIBS+=usr/lib/libasn1.so.11
+OLD_LIBS+=usr/lib/libcom_err.so.5
+OLD_LIBS+=usr/lib/libgssapi.so.10
+OLD_LIBS+=usr/lib/libgssapi_krb5.so.10
+OLD_LIBS+=usr/lib/libgssapi_ntlm.so.10
+OLD_LIBS+=usr/lib/libgssapi_spnego.so.10
+OLD_LIBS+=usr/lib/libheimbase.so.11
+OLD_LIBS+=usr/lib/libheimntlm.so.11
+OLD_LIBS+=usr/lib/libhx509.so.11
+OLD_LIBS+=usr/lib/libhdb.so.11
+OLD_LIBS+=usr/lib/libkadm5clnt.so.11
+OLD_LIBS+=usr/lib/libkadm5srv.so.11
+OLD_LIBS+=usr/lib/libkafs5.so.11
+OLD_LIBS+=usr/lib/libkdc.so.11
+OLD_LIBS+=usr/lib/libkrb5.so.11
+OLD_LIBS+=usr/lib/libprivateheimipcc.so.11
+OLD_LIBS+=usr/lib/libprivateheimipcs.so.11
+OLD_LIBS+=usr/lib/libroken.so.11
+.endif # ${MK_KERBEROS} == "no" || ${MK_MITKRB5} != "no"
+
+# MIT-specific files that don't exist in Heimdal. These should be removed if
+# Kerberos is disabled, or if Heimdal is selected.
+.if ${MK_KERBEROS} == "no" || ${MK_MITKRB5} == "no"
+OLD_DIRS+=usr/lib/debug/usr/lib/krb5/plugins/kdb
+OLD_DIRS+=usr/lib/debug/usr/lib/krb5/plugins/preauth
+OLD_DIRS+=usr/lib/debug/usr/lib/krb5/plugins/tls
+OLD_DIRS+=usr/lib/debug/usr/lib/krb5/plugins
+OLD_DIRS+=usr/lib/debug/usr/lib/krb5
+
+# Heimdal doesn't install debug symbols for these.
+OLD_FILES+=usr/lib/debug/usr/bin/klist.debug
+OLD_FILES+=usr/lib/debug/usr/bin/kswitch.debug
+
OLD_FILES+=usr/bin/gss-client
OLD_FILES+=usr/bin/k5srvutil
OLD_FILES+=usr/bin/ktutil
OLD_FILES+=usr/bin/kvno
OLD_FILES+=usr/bin/sclient
-OLD_FILES+=usr/bin/sim_client
-OLD_FILES+=usr/bin/uuclient
-OLD_FILES+=etc/rc.d/kpropd
-OLD_FILES+=usr/include/common.h
-OLD_FILES+=usr/include/edwards25519_fiat.h
-OLD_FILES+=usr/include/edwards25519_tables.h
-OLD_FILES+=usr/include/groups.h
-OLD_FILES+=usr/include/gssapi/gssapi_ext.h
-OLD_FILES+=usr/include/gssapi/gssapi_oid.h
OLD_FILES+=usr/include/gssapi/gssapi_alloc.h
+OLD_FILES+=usr/include/gssapi/gssapi_ext.h
OLD_FILES+=usr/include/gssapi/gssapi_generic.h
-OLD_FILES+=usr/include/gssapi/gssapi_spnego.h
-OLD_FILES+=usr/include/gssapi/gssapi_asn1-priv.h
-OLD_FILES+=usr/include/gssapi/spnego_asn1-priv.h
-OLD_FILES+=usr/include/gssapi/gssapi_asn1.h
-OLD_FILES+=usr/include/gssapi/gssapi_ntlm.h
-OLD_FILES+=usr/include/gssapi/spnego_asn1.h
+OLD_FILES+=usr/include/gssapi/mechglue.h
OLD_FILES+=usr/include/gssrpc/auth.h
OLD_FILES+=usr/include/gssrpc/auth_gss.h
OLD_FILES+=usr/include/gssrpc/auth_gssapi.h
@@ -5754,7 +3673,6 @@ OLD_FILES+=usr/include/gssrpc/svc.h
OLD_FILES+=usr/include/gssrpc/svc_auth.h
OLD_FILES+=usr/include/gssrpc/types.h
OLD_FILES+=usr/include/gssrpc/xdr.h
-OLD_FILES+=usr/include/iana.h
OLD_FILES+=usr/include/kadm5/chpass_util_strings.h
OLD_FILES+=usr/include/kadm5/kadm_err.h
OLD_FILES+=usr/include/kdb.h
@@ -5762,1118 +3680,72 @@ OLD_FILES+=usr/include/krad.h
OLD_FILES+=usr/include/krb5/ccselect_plugin.h
OLD_FILES+=usr/include/krb5/certauth_plugin.h
OLD_FILES+=usr/include/krb5/clpreauth_plugin.h
+OLD_FILES+=usr/include/krb5/gssapi_err_generic.h
+OLD_FILES+=usr/include/krb5/gssapi_err_krb5.h
OLD_FILES+=usr/include/krb5/hostrealm_plugin.h
OLD_FILES+=usr/include/krb5/kadm5_auth_plugin.h
OLD_FILES+=usr/include/krb5/kadm5_hook_plugin.h
OLD_FILES+=usr/include/krb5/kdcpolicy_plugin.h
OLD_FILES+=usr/include/krb5/kdcpreauth_plugin.h
+OLD_FILES+=usr/include/krb5/krb5.h
OLD_FILES+=usr/include/krb5/localauth_plugin.h
OLD_FILES+=usr/include/krb5/plugin.h
OLD_FILES+=usr/include/krb5/preauth_plugin.h
OLD_FILES+=usr/include/krb5/pwqual_plugin.h
OLD_FILES+=usr/include/profile.h
-OLD_FILES+=usr/include/trace.h
-OLD_FILES+=usr/include/util.h
OLD_FILES+=usr/include/verto-module.h
OLD_FILES+=usr/include/verto.h
OLD_FILES+=usr/lib/krb5/plugins/kdb/db2.so
+OLD_FILES+=usr/lib/krb5/plugins/kdb/db2.so.122
OLD_FILES+=usr/lib/krb5/plugins/preauth/otp.so
+OLD_FILES+=usr/lib/krb5/plugins/preauth/otp.so.122
OLD_FILES+=usr/lib/krb5/plugins/preauth/pkinit.so
+OLD_FILES+=usr/lib/krb5/plugins/preauth/pkinit.so.122
OLD_FILES+=usr/lib/krb5/plugins/preauth/spake.so
+OLD_FILES+=usr/lib/krb5/plugins/preauth/spake.so.122
OLD_FILES+=usr/lib/krb5/plugins/preauth/test.so
+OLD_FILES+=usr/lib/krb5/plugins/preauth/test.so.122
OLD_FILES+=usr/lib/krb5/plugins/tls/k5tls.so
-OLD_LIBS+=usr/lib/libcom_err.so.122
-OLD_LIBS+=usr/lib/libgssapi_krb5.so.122
+OLD_FILES+=usr/lib/krb5/plugins/tls/k5tls.so.122
OLD_FILES+=usr/lib/libgssrpc.a
OLD_FILES+=usr/lib/libgssrpc.so
-OLD_LIBS+=usr/lib/libgssrpc.so.122
OLD_FILES+=usr/lib/libk5crypto.a
OLD_FILES+=usr/lib/libk5crypto.so
-OLD_LIBS+=usr/lib/libk5crypto.so.122
OLD_FILES+=usr/lib/libkadm5clnt_mit.a
OLD_FILES+=usr/lib/libkadm5clnt_mit.so
-OLD_LIBS+=usr/lib/libkadm5clnt_mit.so.122
OLD_FILES+=usr/lib/libkadm5srv_mit.a
OLD_FILES+=usr/lib/libkadm5srv_mit.so
-OLD_LIBS+=usr/lib/libkadm5srv_mit.so.122
OLD_FILES+=usr/lib/libkdb5.a
OLD_FILES+=usr/lib/libkdb5.so
-OLD_LIBS+=usr/lib/libkdb5.so.122
-OLD_FILES+=usr/lib/libkrad.so
OLD_FILES+=usr/lib/libkrad.a
-OLD_LIBS+=usr/lib/libkrad.so.122
-OLD_LIBS+=usr/lib/libkrb5.so.122
+OLD_FILES+=usr/lib/libkrad.so
OLD_FILES+=usr/lib/libkrb5profile.a
OLD_FILES+=usr/lib/libkrb5profile.so
-OLD_LIBS+=usr/lib/libkrb5profile.so.122
OLD_FILES+=usr/lib/libkrb5support.a
OLD_FILES+=usr/lib/libkrb5support.so
-OLD_LIBS+=usr/lib/libkrb5support.so.122
OLD_FILES+=usr/lib/libverto.a
OLD_FILES+=usr/lib/libverto.so
-OLD_LIBS+=usr/lib/libverto.so.122
OLD_FILES+=usr/libdata/pkgconfig/gssrpc.pc
-OLD_FILES+=usr/libdata/pkgconfig/kadm-client.pc
OLD_FILES+=usr/libdata/pkgconfig/kadm-server.pc
+OLD_FILES+=usr/libdata/pkgconfig/kadm-client.pc
OLD_FILES+=usr/libdata/pkgconfig/kdb.pc
OLD_FILES+=usr/libdata/pkgconfig/krb5-gssapi.pc
OLD_FILES+=usr/libdata/pkgconfig/krb5.pc
-OLD_FILES+=usr/libdata/pkgconfig/mit-krb5-gssapi.pc
OLD_FILES+=usr/libdata/pkgconfig/mit-krb5.pc
+OLD_FILES+=usr/libdata/pkgconfig/mit-krb5-gssapi.pc
+OLD_FILES+=usr/libexec/kprop
+OLD_FILES+=usr/libexec/kpropd
+OLD_FILES+=usr/libexec/kproplog
+OLD_FILES+=usr/libexec/krb5kdc
OLD_FILES+=usr/sbin/gss-server
OLD_FILES+=usr/sbin/kadmin.local
-OLD_FILES+=usr/sbin/kadmind
OLD_FILES+=usr/sbin/kdb5_util
-OLD_FILES+=usr/sbin/kprop
-OLD_FILES+=usr/sbin/kpropd
-OLD_FILES+=usr/sbin/kproplog
-OLD_FILES+=usr/sbin/krb5-send-pr
-OLD_FILES+=usr/sbin/krb5kdc
OLD_FILES+=usr/sbin/sim_server
-OLD_FILES+=usr/sbin/sserver
-OLD_FILES+=usr/sbin/uuserver
-OLD_FILES+=usr/share/doc/krb5/doc/html/.buildinfo
-OLD_FILES+=usr/share/doc/krb5/doc/html/_static/agogo.css
-OLD_FILES+=usr/share/doc/krb5/doc/html/_static/basic.css
-OLD_FILES+=usr/share/doc/krb5/doc/html/_static/bgfooter.png
-OLD_FILES+=usr/share/doc/krb5/doc/html/_static/bgtop.png
-OLD_FILES+=usr/share/doc/krb5/doc/html/_static/doctools.js
-OLD_FILES+=usr/share/doc/krb5/doc/html/_static/documentation_options.js
-OLD_FILES+=usr/share/doc/krb5/doc/html/_static/file.png
-OLD_FILES+=usr/share/doc/krb5/doc/html/_static/jquery.js
-OLD_FILES+=usr/share/doc/krb5/doc/html/_static/kerb.css
-OLD_FILES+=usr/share/doc/krb5/doc/html/_static/language_data.js
-OLD_FILES+=usr/share/doc/krb5/doc/html/_static/minus.png
-OLD_FILES+=usr/share/doc/krb5/doc/html/_static/plus.png
-OLD_FILES+=usr/share/doc/krb5/doc/html/_static/pygments.css
-OLD_FILES+=usr/share/doc/krb5/doc/html/_static/searchtools.js
-OLD_FILES+=usr/share/doc/krb5/doc/html/_static/underscore.js
-OLD_FILES+=usr/share/doc/krb5/doc/html/about.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/admin_commands/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/admin_commands/k5srvutil.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/admin_commands/kadmin_local.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/admin_commands/kadmind.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/admin_commands/kdb5_ldap_util.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/admin_commands/kdb5_util.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/admin_commands/kprop.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/admin_commands/kpropd.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/admin_commands/kproplog.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/admin_commands/krb5kdc.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/admin_commands/ktutil.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/admin_commands/sserver.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/advanced/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/advanced/retiring-des.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/appl_servers.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/auth_indicator.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/backup_host.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/conf_files/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/conf_files/kadm5_acl.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/conf_files/kdc_conf.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/conf_files/krb5_conf.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/conf_ldap.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/database.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/dbtypes.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/dictionary.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/enctypes.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/env_variables.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/host_config.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/https.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/install.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/install_appl_srv.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/install_clients.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/install_kdc.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/lockout.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/otp.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/pkinit.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/princ_dns.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/realm_config.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/spake.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/troubleshoot.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/admin/various_envs.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/gssapi.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/h5l_mit_apidiff.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/init_creds.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/princ_handle.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_425_conv_principal.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_524_conv_principal.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_524_convert_creds.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_address_compare.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_address_order.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_address_search.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_allow_weak_crypto.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_aname_to_localname.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_anonymous_principal.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_anonymous_realm.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_appdefault_boolean.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_appdefault_string.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_free.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_genaddrs.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_get_checksum_func.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_getaddrs.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_getauthenticator.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_getflags.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_getkey.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_getkey_k.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_getlocalseqnumber.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_getlocalsubkey.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_getrcache.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_getrecvsubkey.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_getrecvsubkey_k.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_getremoteseqnumber.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_getremotesubkey.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_getsendsubkey.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_getsendsubkey_k.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_init.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_initivector.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_set_checksum_func.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_set_req_cksumtype.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_setaddrs.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_setflags.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_setports.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_setrcache.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_setrecvsubkey.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_setrecvsubkey_k.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_setsendsubkey.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_setsendsubkey_k.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_auth_con_setuseruserkey.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_build_principal.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_build_principal_alloc_va.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_build_principal_ext.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_build_principal_va.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_block_size.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_checksum_length.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_crypto_length.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_crypto_length_iov.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_decrypt.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_decrypt_iov.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_derive_prfplus.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_encrypt.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_encrypt_iov.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_encrypt_length.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_enctype_compare.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_free_state.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_fx_cf2_simple.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_init_state.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_is_coll_proof_cksum.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_is_keyed_cksum.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_keyed_checksum_types.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_keylengths.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_make_checksum.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_make_checksum_iov.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_make_random_key.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_padding_length.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_prf.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_prf_length.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_prfplus.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_random_add_entropy.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_random_make_octets.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_random_os_entropy.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_random_seed.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_random_to_key.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_string_to_key.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_string_to_key_with_params.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_valid_cksumtype.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_valid_enctype.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_verify_checksum.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_c_verify_checksum_iov.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_calculate_checksum.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_cache_match.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_close.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_copy_creds.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_default.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_default_name.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_destroy.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_dup.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_end_seq_get.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_gen_new.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_get_config.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_get_flags.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_get_full_name.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_get_name.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_get_principal.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_get_type.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_initialize.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_move.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_new_unique.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_next_cred.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_remove_cred.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_resolve.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_retrieve_cred.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_select.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_set_config.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_set_default_name.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_set_flags.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_start_seq_get.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_store_cred.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_support_switch.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cc_switch.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cccol_cursor_free.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cccol_cursor_new.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cccol_cursor_next.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cccol_have_content.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_change_password.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_check_clockskew.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_checksum_size.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_chpw_message.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_cksumtype_to_string.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_clear_error_message.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_copy_addresses.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_copy_authdata.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_copy_authenticator.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_copy_checksum.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_copy_context.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_copy_creds.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_copy_data.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_copy_error_message.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_copy_keyblock.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_copy_keyblock_contents.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_copy_principal.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_copy_ticket.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_decode_authdata_container.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_decode_ticket.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_decrypt.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_deltat_to_string.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_eblock_enctype.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_encode_authdata_container.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_encrypt.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_encrypt_size.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_enctype_to_name.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_enctype_to_string.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_expand_hostname.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_find_authdata.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_finish_key.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_finish_random_key.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_addresses.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_ap_rep_enc_part.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_authdata.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_authenticator.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_checksum.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_checksum_contents.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_cksumtypes.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_context.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_cred_contents.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_creds.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_data.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_data_contents.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_default_realm.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_enctypes.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_error.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_error_message.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_host_realm.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_keyblock.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_keyblock_contents.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_keytab_entry_contents.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_principal.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_string.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_tgt_creds.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_ticket.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_free_unparsed_name.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_fwd_tgt_creds.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_credentials.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_credentials_renew.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_credentials_validate.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_default_realm.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_error_message.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_etype_info.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_fallback_host_realm.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_host_realm.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_in_tkt_with_keytab.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_in_tkt_with_password.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_in_tkt_with_skey.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_keytab.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_alloc.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_free.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_get_fast_flags.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_init.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_address_list.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_anonymous.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_canonicalize.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_change_password_prompt.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_etype_list.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_expire_callback.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_fast_ccache.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_fast_ccache_name.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_fast_flags.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_forwardable.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_in_ccache.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_out_ccache.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_pa.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_pac_request.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_preauth_list.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_proxiable.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_renew_life.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_responder.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_salt.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_opt_set_tkt_life.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_init_creds_password.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_permitted_enctypes.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_profile.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_prompt_types.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_renewed_creds.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_server_rcache.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_time_offsets.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_get_validated_creds.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_init_context.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_init_context_profile.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_init_creds_free.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_init_creds_get.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_init_creds_get_creds.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_init_creds_get_error.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_init_creds_get_times.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_init_creds_init.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_init_creds_set_keytab.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_init_creds_set_password.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_init_creds_set_service.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_init_creds_step.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_init_keyblock.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_init_random_key.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_init_secure_context.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_is_config_principal.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_is_referral_realm.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_is_thread_safe.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_k_create_key.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_k_decrypt.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_k_decrypt_iov.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_k_encrypt.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_k_encrypt_iov.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_k_free_key.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_k_key_enctype.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_k_key_keyblock.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_k_make_checksum.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_k_make_checksum_iov.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_k_prf.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_k_reference_key.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_k_verify_checksum.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_k_verify_checksum_iov.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kdc_sign_ticket.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kdc_verify_ticket.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_add_entry.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_client_default.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_close.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_default.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_default_name.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_dup.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_end_seq_get.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_free_entry.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_get_entry.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_get_name.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_get_type.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_have_content.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_next_entry.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_read_service_key.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_remove_entry.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_resolve.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kt_start_seq_get.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_kuserok.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_make_authdata_kdc_issued.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_marshal_credentials.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_merge_authdata.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_mk_1cred.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_mk_error.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_mk_ncred.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_mk_priv.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_mk_rep.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_mk_rep_dce.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_mk_req.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_mk_req_extended.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_mk_safe.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_os_localaddr.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_pac_add_buffer.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_pac_free.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_pac_get_buffer.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_pac_get_client_info.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_pac_get_types.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_pac_init.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_pac_parse.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_pac_sign.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_pac_sign_ext.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_pac_verify.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_pac_verify_ext.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_parse_name.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_parse_name_flags.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_prepend_error_message.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_principal2salt.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_principal_compare.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_principal_compare_any_realm.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_principal_compare_flags.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_process_key.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_prompter_posix.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_random_key.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_rd_cred.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_rd_error.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_rd_priv.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_rd_rep.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_rd_rep_dce.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_rd_req.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_rd_safe.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_read_password.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_realm_compare.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_recvauth.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_recvauth_version.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_responder_get_challenge.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_responder_list_questions.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_responder_otp_challenge_free.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_responder_otp_get_challenge.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_responder_otp_set_answer.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_responder_pkinit_challenge_free.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_responder_pkinit_get_challenge.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_responder_pkinit_set_answer.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_responder_set_answer.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_salttype_to_string.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_sendauth.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_server_decrypt_ticket_keytab.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_set_default_realm.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_set_default_tgs_enctypes.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_set_error_message.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_set_kdc_recv_hook.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_set_kdc_send_hook.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_set_password.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_set_password_using_ccache.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_set_principal_realm.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_set_real_time.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_set_trace_callback.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_set_trace_filename.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_sname_match.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_sname_to_principal.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_string_to_cksumtype.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_string_to_deltat.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_string_to_enctype.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_string_to_key.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_string_to_salttype.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_string_to_timestamp.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_timeofday.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_timestamp_to_sfstring.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_timestamp_to_string.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_tkt_creds_free.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_tkt_creds_get.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_tkt_creds_get_creds.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_tkt_creds_get_times.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_tkt_creds_init.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_tkt_creds_step.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_unmarshal_credentials.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_unparse_name.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_unparse_name_ext.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_unparse_name_flags.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_unparse_name_flags_ext.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_us_timeofday.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_use_enctype.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_verify_authdata_kdc_issued.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_verify_checksum.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_verify_init_creds.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_verify_init_creds_opt_init.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_verify_init_creds_opt_set_ap_req_nofail.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_vprepend_error_message.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_vset_error_message.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_vwrap_error_message.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/api/krb5_wrap_error_message.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ADDRTYPE_ADDRPORT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ADDRTYPE_CHAOS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ADDRTYPE_DDP.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ADDRTYPE_INET.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ADDRTYPE_INET6.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ADDRTYPE_IPPORT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ADDRTYPE_ISO.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ADDRTYPE_IS_LOCAL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ADDRTYPE_NETBIOS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ADDRTYPE_XNS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/AD_TYPE_EXTERNAL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/AD_TYPE_FIELD_TYPE_MASK.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/AD_TYPE_REGISTERED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/AD_TYPE_RESERVED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/AP_OPTS_ETYPE_NEGOTIATION.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/AP_OPTS_MUTUAL_REQUIRED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/AP_OPTS_RESERVED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/AP_OPTS_USE_SESSION_KEY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/AP_OPTS_USE_SUBKEY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/AP_OPTS_WIRE_MASK.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_CMAC_CAMELLIA128.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_CMAC_CAMELLIA256.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_CRC32.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_DESCBC.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_HMAC_MD5_ARCFOUR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_HMAC_SHA1_96_AES128.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_HMAC_SHA1_96_AES256.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_HMAC_SHA1_DES3.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_HMAC_SHA256_128_AES128.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_HMAC_SHA384_192_AES256.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_MD5_HMAC_ARCFOUR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_NIST_SHA.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_RSA_MD4.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_RSA_MD4_DES.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_RSA_MD5.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_RSA_MD5_DES.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/CKSUMTYPE_SHA1.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_AES128_CTS_HMAC_SHA1_96.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_AES128_CTS_HMAC_SHA256_128.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_AES256_CTS_HMAC_SHA1_96.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_AES256_CTS_HMAC_SHA384_192.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_ARCFOUR_HMAC.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_ARCFOUR_HMAC_EXP.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_CAMELLIA128_CTS_CMAC.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_CAMELLIA256_CTS_CMAC.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_DES3_CBC_ENV.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_DES3_CBC_RAW.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_DES3_CBC_SHA.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_DES3_CBC_SHA1.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_DES_CBC_CRC.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_DES_CBC_MD4.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_DES_CBC_MD5.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_DES_CBC_RAW.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_DES_HMAC_SHA1.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_DSA_SHA1_CMS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_MD5_RSA_CMS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_NULL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_RC2_CBC_ENV.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_RSA_ENV.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_RSA_ES_OAEP_ENV.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_SHA1_RSA_CMS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/ENCTYPE_UNKNOWN.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KDC_OPT_ALLOW_POSTDATE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KDC_OPT_CANONICALIZE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KDC_OPT_CNAME_IN_ADDL_TKT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KDC_OPT_DISABLE_TRANSITED_CHECK.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KDC_OPT_ENC_TKT_IN_SKEY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KDC_OPT_FORWARDABLE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KDC_OPT_FORWARDED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KDC_OPT_POSTDATED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KDC_OPT_PROXIABLE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KDC_OPT_PROXY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KDC_OPT_RENEW.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KDC_OPT_RENEWABLE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KDC_OPT_RENEWABLE_OK.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KDC_OPT_REQUEST_ANONYMOUS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KDC_OPT_VALIDATE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KDC_TKT_COMMON_MASK.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_ALTAUTH_ATT_CHALLENGE_RESPONSE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_ANONYMOUS_PRINCSTR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_ANONYMOUS_REALMSTR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AP_REP.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AP_REQ.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AS_REP.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AS_REQ.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTHDATA_AND_OR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTHDATA_AP_OPTIONS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTHDATA_AUTH_INDICATOR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTHDATA_CAMMAC.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTHDATA_ETYPE_NEGOTIATION.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTHDATA_FX_ARMOR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTHDATA_IF_RELEVANT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTHDATA_INITIAL_VERIFIED_CAS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTHDATA_KDC_ISSUED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTHDATA_MANDATORY_FOR_KDC.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTHDATA_OSF_DCE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTHDATA_SESAME.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTHDATA_SIGNTICKET.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTHDATA_WIN2K_PAC.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTH_CONTEXT_DO_SEQUENCE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTH_CONTEXT_DO_TIME.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTH_CONTEXT_GENERATE_LOCAL_ADDR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTH_CONTEXT_GENERATE_LOCAL_FULL_ADDR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTH_CONTEXT_GENERATE_REMOTE_ADDR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTH_CONTEXT_GENERATE_REMOTE_FULL_ADDR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTH_CONTEXT_PERMIT_ALL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTH_CONTEXT_RET_SEQUENCE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTH_CONTEXT_RET_TIME.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_AUTH_CONTEXT_USE_SUBKEY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_CRED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_CRYPTO_TYPE_CHECKSUM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_CRYPTO_TYPE_DATA.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_CRYPTO_TYPE_EMPTY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_CRYPTO_TYPE_HEADER.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_CRYPTO_TYPE_PADDING.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_CRYPTO_TYPE_SIGN_ONLY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_CRYPTO_TYPE_STREAM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_CRYPTO_TYPE_TRAILER.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_CYBERSAFE_SECUREID.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_DOMAIN_X500_COMPRESS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_ENCPADATA_REQ_ENC_PA_REP.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_ERROR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_FAST_REQUIRED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GC_CACHED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GC_CANONICALIZE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GC_CONSTRAINED_DELEGATION.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GC_FORWARDABLE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GC_NO_STORE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GC_NO_TRANSIT_CHECK.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GC_USER_USER.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GET_INIT_CREDS_OPT_ADDRESS_LIST.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GET_INIT_CREDS_OPT_ANONYMOUS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GET_INIT_CREDS_OPT_CANONICALIZE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GET_INIT_CREDS_OPT_CHG_PWD_PRMPT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GET_INIT_CREDS_OPT_ETYPE_LIST.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GET_INIT_CREDS_OPT_FORWARDABLE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GET_INIT_CREDS_OPT_PREAUTH_LIST.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GET_INIT_CREDS_OPT_PROXIABLE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GET_INIT_CREDS_OPT_RENEW_LIFE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GET_INIT_CREDS_OPT_SALT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_GET_INIT_CREDS_OPT_TKT_LIFE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_INIT_CONTEXT_KDC.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_INIT_CONTEXT_SECURE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_INIT_CREDS_STEP_FLAG_CONTINUE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_INT16_MAX.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_INT16_MIN.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_INT32_MAX.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_INT32_MIN.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_AD_ITE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_AD_KDCISSUED_CKSUM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_AD_MTE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_AD_SIGNEDPATH.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_APP_DATA_CKSUM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_APP_DATA_ENCRYPT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_AP_REP_ENCPART.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_AP_REQ_AUTH.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_AP_REQ_AUTH_CKSUM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_AS_REP_ENCPART.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_AS_REQ.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_AS_REQ_PA_ENC_TS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_CAMMAC.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_ENC_CHALLENGE_CLIENT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_ENC_CHALLENGE_KDC.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_FAST_ENC.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_FAST_FINISHED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_FAST_REP.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_FAST_REQ_CHKSUM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_GSS_TOK_MIC.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_GSS_TOK_WRAP_INTEG.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_GSS_TOK_WRAP_PRIV.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_IAKERB_FINISHED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_KDC_REP_TICKET.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_KRB_CRED_ENCPART.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_KRB_ERROR_CKSUM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_KRB_PRIV_ENCPART.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_KRB_SAFE_CKSUM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_PA_AS_FRESHNESS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_PA_FX_COOKIE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_PA_OTP_REQUEST.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_PA_PKINIT_KX.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_PA_S4U_X509_USER_REPLY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_PA_S4U_X509_USER_REQUEST.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_PA_SAM_CHALLENGE_CKSUM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_PA_SAM_CHALLENGE_TRACKID.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_PA_SAM_RESPONSE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_SPAKE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_TGS_REP_ENCPART_SESSKEY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_TGS_REP_ENCPART_SUBKEY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_TGS_REQ_AD_SESSKEY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_TGS_REQ_AD_SUBKEY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_TGS_REQ_AUTH.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KEYUSAGE_TGS_REQ_AUTH_CKSUM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KPASSWD_ACCESSDENIED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KPASSWD_AUTHERROR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KPASSWD_BAD_VERSION.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KPASSWD_HARDERROR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KPASSWD_INITIAL_FLAG_NEEDED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KPASSWD_MALFORMED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KPASSWD_SOFTERROR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_KPASSWD_SUCCESS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_LRQ_ALL_ACCT_EXPTIME.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_LRQ_ALL_LAST_INITIAL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_LRQ_ALL_LAST_RENEWAL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_LRQ_ALL_LAST_REQ.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_LRQ_ALL_LAST_TGT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_LRQ_ALL_LAST_TGT_ISSUED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_LRQ_ALL_PW_EXPTIME.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_LRQ_NONE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_LRQ_ONE_ACCT_EXPTIME.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_LRQ_ONE_LAST_INITIAL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_LRQ_ONE_LAST_RENEWAL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_LRQ_ONE_LAST_REQ.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_LRQ_ONE_LAST_TGT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_LRQ_ONE_LAST_TGT_ISSUED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_LRQ_ONE_PW_EXPTIME.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_NT_ENTERPRISE_PRINCIPAL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_NT_ENT_PRINCIPAL_AND_ID.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_NT_MS_PRINCIPAL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_NT_MS_PRINCIPAL_AND_ID.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_NT_PRINCIPAL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_NT_SMTP_NAME.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_NT_SRV_HST.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_NT_SRV_INST.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_NT_SRV_XHST.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_NT_UID.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_NT_UNKNOWN.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_NT_WELLKNOWN.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_NT_X500_PRINCIPAL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PAC_ATTRIBUTES_INFO.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PAC_CLIENT_CLAIMS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PAC_CLIENT_INFO.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PAC_CREDENTIALS_INFO.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PAC_DELEGATION_INFO.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PAC_DEVICE_CLAIMS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PAC_DEVICE_INFO.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PAC_FULL_CHECKSUM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PAC_LOGON_INFO.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PAC_PRIVSVR_CHECKSUM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PAC_REQUESTOR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PAC_SERVER_CHECKSUM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PAC_TICKET_CHECKSUM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PAC_UPN_DNS_INFO.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_AFS3_SALT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_AP_REQ.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_AS_CHECKSUM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_AS_FRESHNESS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_ENCRYPTED_CHALLENGE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_ENC_SANDIA_SECURID.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_ENC_TIMESTAMP.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_ENC_UNIX_TIME.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_ETYPE_INFO.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_ETYPE_INFO2.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_FOR_USER.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_FX_COOKIE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_FX_ERROR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_FX_FAST.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_GET_FROM_TYPED_DATA.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_NONE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_OSF_DCE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_OTP_CHALLENGE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_OTP_PIN_CHANGE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_OTP_REQUEST.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_PAC_OPTIONS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_PAC_REQUEST.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_PKINIT_KX.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_PK_AS_REP.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_PK_AS_REP_OLD.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_PK_AS_REQ.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_PK_AS_REQ_OLD.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_PW_SALT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_REDHAT_IDP_OAUTH2.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_REDHAT_PASSKEY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_REFERRAL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_S4U_X509_USER.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_SAM_CHALLENGE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_SAM_CHALLENGE_2.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_SAM_REDIRECT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_SAM_RESPONSE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_SAM_RESPONSE_2.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_SESAME.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_SPAKE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_SVR_REFERRAL_INFO.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_TGS_REQ.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PADATA_USE_SPECIFIED_KVNO.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PRINCIPAL_COMPARE_CASEFOLD.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PRINCIPAL_COMPARE_ENTERPRISE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PRINCIPAL_COMPARE_IGNORE_REALM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PRINCIPAL_COMPARE_UTF8.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PRINCIPAL_PARSE_ENTERPRISE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PRINCIPAL_PARSE_IGNORE_REALM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PRINCIPAL_PARSE_NO_DEF_REALM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PRINCIPAL_PARSE_NO_REALM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PRINCIPAL_PARSE_REQUIRE_REALM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PRINCIPAL_UNPARSE_DISPLAY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PRINCIPAL_UNPARSE_NO_REALM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PRINCIPAL_UNPARSE_SHORT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PRIV.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PROMPT_TYPE_NEW_PASSWORD.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PROMPT_TYPE_NEW_PASSWORD_AGAIN.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PROMPT_TYPE_PASSWORD.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PROMPT_TYPE_PREAUTH.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_PVNO.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_REALM_BRANCH_CHAR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_RECVAUTH_BADAUTHVERS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_RECVAUTH_SKIP_VERSION.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_REFERRAL_REALM.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_RESPONDER_OTP_FLAGS_COLLECT_PIN.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_RESPONDER_OTP_FLAGS_COLLECT_TOKEN.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_RESPONDER_OTP_FLAGS_NEXTOTP.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_RESPONDER_OTP_FLAGS_SEPARATE_PIN.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_RESPONDER_OTP_FORMAT_ALPHANUMERIC.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_RESPONDER_OTP_FORMAT_DECIMAL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_RESPONDER_OTP_FORMAT_HEXADECIMAL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_RESPONDER_PKINIT_FLAGS_TOKEN_USER_PIN_COUNT_LOW.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_RESPONDER_PKINIT_FLAGS_TOKEN_USER_PIN_FINAL_TRY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_RESPONDER_PKINIT_FLAGS_TOKEN_USER_PIN_LOCKED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_RESPONDER_QUESTION_OTP.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_RESPONDER_QUESTION_PASSWORD.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_RESPONDER_QUESTION_PKINIT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_SAFE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_SAM_MUST_PK_ENCRYPT_SAD.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_SAM_SEND_ENCRYPTED_SAD.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_SAM_USE_SAD_AS_KEY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TC_MATCH_2ND_TKT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TC_MATCH_AUTHDATA.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TC_MATCH_FLAGS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TC_MATCH_FLAGS_EXACT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TC_MATCH_IS_SKEY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TC_MATCH_KTYPE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TC_MATCH_SRV_NAMEONLY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TC_MATCH_TIMES.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TC_MATCH_TIMES_EXACT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TC_NOTICKET.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TC_OPENCLOSE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TC_SUPPORTED_KTYPES.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TGS_NAME.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TGS_NAME_SIZE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TGS_REP.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TGS_REQ.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_TKT_CREDS_STEP_FLAG_CONTINUE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_VERIFY_INIT_CREDS_OPT_AP_REQ_NOFAIL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/KRB5_WELLKNOWN_NAMESTR.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/LR_TYPE_INTERPRETATION_MASK.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/LR_TYPE_THIS_SERVER_ONLY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/MAX_KEYTAB_NAME_LEN.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/MSEC_DIRBIT.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/MSEC_VAL_MASK.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/SALT_TYPE_AFS_LENGTH.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/SALT_TYPE_NO_LENGTH.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/THREEPARAMOPEN.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/TKT_FLG_ANONYMOUS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/TKT_FLG_ENC_PA_REP.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/TKT_FLG_FORWARDABLE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/TKT_FLG_FORWARDED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/TKT_FLG_HW_AUTH.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/TKT_FLG_INITIAL.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/TKT_FLG_INVALID.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/TKT_FLG_MAY_POSTDATE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/TKT_FLG_OK_AS_DELEGATE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/TKT_FLG_POSTDATED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/TKT_FLG_PRE_AUTH.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/TKT_FLG_PROXIABLE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/TKT_FLG_PROXY.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/TKT_FLG_RENEWABLE.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/TKT_FLG_TRANSIT_POLICY_CHECKED.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/VALID_INT_BITS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/VALID_UINT_BITS.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/krb524_convert_creds_kdc.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/krb524_init_ets.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/krb5_const.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/krb5_princ_component.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/krb5_princ_name.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/krb5_princ_realm.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/krb5_princ_set_realm.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/krb5_princ_set_realm_data.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/krb5_princ_set_realm_length.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/krb5_princ_size.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/krb5_princ_type.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/krb5_roundup.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/krb5_x.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/macros/krb5_xc.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_address.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_addrtype.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_ap_rep.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_ap_rep_enc_part.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_ap_req.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_auth_context.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_authdata.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_authdatatype.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_authenticator.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_boolean.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_cc_cursor.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_ccache.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_cccol_cursor.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_checksum.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_cksumtype.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_const_pointer.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_const_principal.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_context.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_cred.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_cred_enc_part.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_cred_info.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_creds.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_crypto_iov.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_cryptotype.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_data.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_deltat.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_enc_data.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_enc_kdc_rep_part.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_enc_tkt_part.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_encrypt_block.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_enctype.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_error.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_error_code.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_expire_callback_func.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_flags.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_get_init_creds_opt.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_gic_opt_pa_data.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_init_creds_context.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_int16.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_int32.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_kdc_rep.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_kdc_req.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_key.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_keyblock.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_keytab.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_keytab_entry.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_keyusage.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_kt_cursor.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_kvno.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_last_req_entry.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_magic.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_mk_req_checksum_func.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_msgtype.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_octet.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_pa_data.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_pa_pac_req.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_pa_server_referral_data.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_pa_svr_referral_data.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_pac.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_pointer.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_post_recv_fn.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_pre_send_fn.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_preauthtype.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_principal.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_principal_data.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_prompt.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_prompt_type.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_prompter_fct.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_pwd_data.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_rcache.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_replay_data.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_responder_context.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_responder_fn.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_responder_otp_challenge.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_responder_otp_tokeninfo.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_responder_pkinit_challenge.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_responder_pkinit_identity.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_response.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_ticket.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_ticket_times.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_timestamp.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_tkt_authent.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_tkt_creds_context.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_trace_callback.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_trace_info.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_transited.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_typed_data.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_ui_2.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_ui_4.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/krb5_verify_init_creds_opt.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/refs/types/passwd_phrase_element.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/appdev/y2038.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/basic/ccache_def.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/basic/date_format.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/basic/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/basic/keytab_def.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/basic/rcache_def.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/basic/stash_file_def.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/build/directory_org.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/build/doing_build.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/build/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/build/options2configure.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/build/osconf.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/build_this.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/copyright.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/formats/ccache_file_format.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/formats/cookie.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/formats/freshness_token.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/formats/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/formats/keytab_file_format.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/formats/rcache_file_format.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/genindex-A.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/genindex-C.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/genindex-E.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/genindex-K.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/genindex-L.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/genindex-M.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/genindex-P.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/genindex-R.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/genindex-S.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/genindex-T.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/genindex-V.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/genindex-all.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/genindex.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/mitK5defaults.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/mitK5features.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/mitK5license.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/objects.inv
-OLD_FILES+=usr/share/doc/krb5/doc/html/plugindev/ccselect.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/plugindev/certauth.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/plugindev/clpreauth.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/plugindev/general.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/plugindev/gssapi.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/plugindev/hostrealm.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/plugindev/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/plugindev/internal.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/plugindev/kadm5_auth.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/plugindev/kadm5_hook.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/plugindev/kdcpolicy.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/plugindev/kdcpreauth.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/plugindev/localauth.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/plugindev/locate.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/plugindev/profile.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/plugindev/pwqual.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/resources.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/search.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/searchindex.js
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/pwd_mgmt.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/tkt_mgmt.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/user_commands/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/user_commands/kdestroy.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/user_commands/kinit.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/user_commands/klist.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/user_commands/kpasswd.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/user_commands/krb5-config.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/user_commands/ksu.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/user_commands/kswitch.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/user_commands/kvno.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/user_commands/sclient.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/user_config/index.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/user_config/k5identity.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/user_config/k5login.html
-OLD_FILES+=usr/share/doc/krb5/doc/html/user/user_config/kerberos.html
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/GMakefile
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/LICRcyr2utf8.xdy
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/LICRlatin2utf8.xdy
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/LatinRules.xdy
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/admin.pdf
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/admin.tex
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/appdev.pdf
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/appdev.tex
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/basic.pdf
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/basic.tex
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/build.pdf
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/build.tex
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/latexmkjarc
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/latexmkrc
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/make.bat
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/plugindev.pdf
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/plugindev.tex
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/python.ist
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinx.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinx.xdy
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxhighlight.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxhowto.cls
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxlatexadmonitions.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxlatexcontainers.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxlatexgraphics.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxlatexindbibtoc.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxlatexlists.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxlatexliterals.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxlatexnumfig.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxlatexobjects.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxlatexshadowbox.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxlatexstyleheadings.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxlatexstylepage.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxlatexstyletext.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxlatextables.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxmanual.cls
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxmessages.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxoptionsgeometry.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxoptionshyperref.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxpackagecyrillic.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/sphinxpackagefootnote.sty
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/user.pdf
-OLD_FILES+=usr/share/doc/krb5/doc/pdf/user.tex
-OLD_FILES+=usr/share/et/et_c.awk
OLD_FILES+=usr/share/et/et_h.awk
-OLD_FILES+=usr/share/examples/krb5/kdc.conf
-OLD_FILES+=usr/share/examples/krb5/krb5.conf
-OLD_FILES+=usr/share/examples/krb5/services.append
-OLD_FILES+=usr/share/licenses/krb5-1.21.3_1/LICENSE
-OLD_FILES+=usr/share/licenses/krb5-1.21.3_1/MIT
-OLD_FILES+=usr/share/licenses/krb5-1.21.3_1/catalog.mk
-OLD_FILES+=usr/share/locale/de/LC_MESSAGES/mit-krb5.mo
-OLD_FILES+=usr/share/locale/en_US/LC_MESSAGES/mit-krb5.mo
-OLD_FILES+=usr/share/locale/ka/LC_MESSAGES/mit-krb5.mo
+OLD_FILES+=usr/share/et/et_c.awk
OLD_FILES+=usr/share/man/man1/k5srvutil.1.gz
OLD_FILES+=usr/share/man/man1/kadmin.1.gz
+OLD_FILES+=usr/share/man/man1/ksu.1.gz
OLD_FILES+=usr/share/man/man1/ktutil.1.gz
OLD_FILES+=usr/share/man/man1/kvno.1.gz
OLD_FILES+=usr/share/man/man1/sclient.1.gz
@@ -6885,29 +3757,26 @@ OLD_FILES+=usr/share/man/man5/kadm5.acl.5.gz
OLD_FILES+=usr/share/man/man5/kdc.conf.5.gz
OLD_FILES+=usr/share/man/man7/kerberos.7.gz
OLD_FILES+=usr/share/man/man8/kadmin.local.8.gz
-OLD_FILES+=usr/share/man/man8/kdb5_ldap_util.8.gz
OLD_FILES+=usr/share/man/man8/kdb5_util.8.gz
OLD_FILES+=usr/share/man/man8/kprop.8.gz
OLD_FILES+=usr/share/man/man8/kpropd.8.gz
OLD_FILES+=usr/share/man/man8/kproplog.8.gz
OLD_FILES+=usr/share/man/man8/krb5kdc.8.gz
-OLD_FILES+=usr/share/man/man8/sserver.8.gz
-.endif
-.endif
+OLD_FILES+=usr/share/man/man8/pam-krb5.8.gz
-.if ${MK_KERBEROS_SUPPORT} == no
-.if ${MK_MITKRB5} == no
-OLD_FILES+=usr/bin/compile_et
-OLD_FILES+=usr/include/com_err.h
-OLD_FILES+=usr/include/com_right.h
-OLD_FILES+=usr/lib/libcom_err.a
-OLD_FILES+=usr/lib/libcom_err.so
-OLD_LIBS+=usr/lib/libcom_err.so.5
-OLD_FILES+=usr/lib/libcom_err_p.a
-OLD_FILES+=usr/share/man/man1/compile_et.1.gz
-OLD_FILES+=usr/share/man/man3/com_err.3.gz
-.endif
-.endif
+OLD_LIBS+=usr/lib/libcom_err.so.122
+OLD_LIBS+=usr/lib/libgssapi_krb5.so.122
+OLD_LIBS+=usr/lib/libgssrpc.so.122
+OLD_LIBS+=usr/lib/libk5crypto.so.122
+OLD_LIBS+=usr/lib/libkadm5clnt_mit.so.122
+OLD_LIBS+=usr/lib/libkadm5srv_mit.so.122
+OLD_LIBS+=usr/lib/libkdb5.so.122
+OLD_LIBS+=usr/lib/libkrad.so.122
+OLD_LIBS+=usr/lib/libkrb5.so.122
+OLD_LIBS+=usr/lib/libkrb5profile.so.122
+OLD_LIBS+=usr/lib/libkrb5support.so.122
+OLD_LIBS+=usr/lib/libverto.so.122
+.endif # ${MK_KERBEROS} == "no" || ${MK_MITKRB5} == "no"
.if ${MK_LDNS} == no
OLD_FILES+=usr/lib/libprivateldns.a
@@ -8874,6 +5743,7 @@ OLD_FILES+=etc/rc.d/ypldap
OLD_FILES+=etc/rc.d/yppasswdd
OLD_FILES+=etc/rc.d/ypserv
OLD_FILES+=etc/rc.d/ypset
+OLD_FILES+=etc/rc.d/ypupdated
OLD_FILES+=etc/rc.d/ypxfrd
OLD_FILES+=usr/bin/ypcat
OLD_FILES+=usr/bin/ypchfn
@@ -8891,6 +5761,7 @@ OLD_FILES+=usr/libexec/mknetid
OLD_FILES+=usr/libexec/yppwupdate
OLD_FILES+=usr/libexec/ypxfr
OLD_FILES+=usr/sbin/rpc.yppasswdd
+OLD_FILES+=usr/sbin/rpc.ypupdated
OLD_FILES+=usr/sbin/rpc.ypxfrd
OLD_FILES+=usr/sbin/yp_mkdb
OLD_FILES+=usr/sbin/ypbind
diff --git a/tools/build/options/WITHOUT_GSSAPI b/tools/build/options/WITHOUT_GSSAPI
deleted file mode 100644
index 3b208b6edecd..000000000000
--- a/tools/build/options/WITHOUT_GSSAPI
+++ /dev/null
@@ -1 +0,0 @@
-Do not build libgssapi.
diff --git a/tools/build/options/WITHOUT_KERBEROS b/tools/build/options/WITHOUT_KERBEROS
index 98e1ffe3721d..e0301ee1d786 100644
--- a/tools/build/options/WITHOUT_KERBEROS
+++ b/tools/build/options/WITHOUT_KERBEROS
@@ -1 +1 @@
-Set this to not build Kerberos 5 (KTH Heimdal).
+Set this to not build Kerberos.
diff --git a/tools/build/options/WITH_CLEAN b/tools/build/options/WITH_CLEAN
new file mode 100644
index 000000000000..0bb05e33371b
--- /dev/null
+++ b/tools/build/options/WITH_CLEAN
@@ -0,0 +1,4 @@
+Clean before building world and/or kernel.
+Note that recording a new epoch in
+.Pa .clean_build_epoch
+in the root of the source tree will also force a clean world build.
diff --git a/tools/test/netfibs/reflector.sh b/tools/test/netfibs/reflector.sh
index b9500689a321..a68019532010 100755
--- a/tools/test/netfibs/reflector.sh
+++ b/tools/test/netfibs/reflector.sh
@@ -897,7 +897,7 @@ testrx_run_test()
*) _opts="-d" ;;
esac
- # Convert netcat options to reflect aguments.
+ # Convert netcat options to reflect arguments.
case "${_o}" in
-i) _opts="${_opts} -T TCP6" ;; # Use TCP for START/DONE.
-u) _opts="${_opts} -T UDP6" ;;
diff --git a/tools/tools/usbtest/usb_msc_test.c b/tools/tools/usbtest/usb_msc_test.c
index 713da381820e..1b9c3192a472 100644
--- a/tools/tools/usbtest/usb_msc_test.c
+++ b/tools/tools/usbtest/usb_msc_test.c
@@ -952,7 +952,6 @@ find_usb_endpoints(struct libusb20_device *pdev, uint8_t class,
struct libusb20_interface *iface;
struct libusb20_endpoint *ep;
uint8_t x;
- uint8_t y;
uint8_t z;
*in_ep = 0;
@@ -966,9 +965,6 @@ find_usb_endpoints(struct libusb20_device *pdev, uint8_t class,
return;
for (x = 0; x != pcfg->num_interface; x++) {
-
- y = alt_setting;
-
iface = (pcfg->interface + x);
if ((iface->desc.bInterfaceClass == class) &&
diff --git a/usr.bin/Makefile b/usr.bin/Makefile
index 512f75b5d093..da1a9b3a681f 100644
--- a/usr.bin/Makefile
+++ b/usr.bin/Makefile
@@ -219,7 +219,7 @@ SUBDIR.${MK_ISCSI}+= iscsictl
SUBDIR.${MK_KDUMP}+= kdump
SUBDIR.${MK_KDUMP}+= truss
.if ${MK_MITKRB5} == "no"
-SUBDIR.${MK_KERBEROS_SUPPORT}+= compile_et
+SUBDIR.${MK_KERBEROS}+= compile_et
.endif
SUBDIR.${MK_LDNS_UTILS}+= drill
SUBDIR.${MK_LDNS_UTILS}+= host
diff --git a/usr.bin/kyua/Makefile b/usr.bin/kyua/Makefile
index a4f95f1106d9..daefedbf8bca 100644
--- a/usr.bin/kyua/Makefile
+++ b/usr.bin/kyua/Makefile
@@ -129,7 +129,8 @@ SRCS+= engine/atf.cpp \
engine/execenv/execenv_host.cpp
SRCS+= os/freebsd/execenv_jail_manager.cpp \
- os/freebsd/main.cpp
+ os/freebsd/main.cpp \
+ os/freebsd/reqs_checker_kmods.cpp
SRCS+= store/dbtypes.cpp \
store/exceptions.cpp \
diff --git a/usr.bin/localedef/localedef.1 b/usr.bin/localedef/localedef.1
index be37715f490d..918b57961c6c 100644
--- a/usr.bin/localedef/localedef.1
+++ b/usr.bin/localedef/localedef.1
@@ -164,7 +164,7 @@ unless instructed otherwise by the
.Fl D (
BSD
output) option.
-The contants of this directory should generally be copied into the
+The content of this directory should generally be copied into the
appropriate subdirectory of
.Pa /usr/share/locale
in order the definitions to be visible to programs linked with libc.
diff --git a/usr.bin/tcopy/tcopy.cc b/usr.bin/tcopy/tcopy.cc
index 891c37f871e5..37a146376c2e 100644
--- a/usr.bin/tcopy/tcopy.cc
+++ b/usr.bin/tcopy/tcopy.cc
@@ -738,7 +738,7 @@ main(int argc, char *argv[])
warnx("illegal block size");
usage();
}
- if (maxblk <= 0) {
+ if (tmp <= 0) {
warnx("illegal block size");
usage();
}
diff --git a/usr.sbin/Makefile b/usr.sbin/Makefile
index 90c23dc26cc5..b97c22ffeb08 100644
--- a/usr.sbin/Makefile
+++ b/usr.sbin/Makefile
@@ -139,9 +139,7 @@ SUBDIR.${MK_FLOPPY}+= fdformat
SUBDIR.${MK_FLOPPY}+= fdread
SUBDIR.${MK_FLOPPY}+= fdwrite
SUBDIR.${MK_FREEBSD_UPDATE}+= freebsd-update
-.if ${MK_KERBEROS_SUPPORT} != "no"
-SUBDIR.${MK_GSSAPI}+= gssd
-.endif
+SUBDIR.${MK_KERBEROS_SUPPORT}+= gssd
SUBDIR.${MK_GPIO}+= gpioctl
SUBDIR.${MK_HYPERV}+= hyperv
SUBDIR.${MK_INET6}+= ip6addrctl
@@ -174,6 +172,7 @@ SUBDIR.${MK_NETGRAPH}+= flowctl
SUBDIR.${MK_NETGRAPH}+= ngctl
SUBDIR.${MK_NETGRAPH}+= nghook
SUBDIR.${MK_NIS}+= rpc.yppasswdd
+SUBDIR.${MK_NIS}+= rpc.ypupdated
SUBDIR.${MK_NIS}+= rpc.ypxfrd
SUBDIR.${MK_NIS}+= ypbind
SUBDIR.${MK_NIS}+= ypldap
diff --git a/usr.sbin/certctl/certctl.8 b/usr.sbin/certctl/certctl.8
index 97bdc840c359..edf993e1361a 100644
--- a/usr.sbin/certctl/certctl.8
+++ b/usr.sbin/certctl/certctl.8
@@ -24,7 +24,7 @@
.\" IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
.\" POSSIBILITY OF SUCH DAMAGE.
.\"
-.Dd August 11, 2025
+.Dd August 18, 2025
.Dt CERTCTL 8
.Os
.Sh NAME
@@ -63,6 +63,8 @@ This option is only valid in conjunction with the
command.
.It Fl D Ar destdir
Specify the DESTDIR (overriding values from the environment).
+.It Fl d Ar distbase
+Specify the DISTBASE (overriding values from the environment).
.It Fl l
When listing installed (trusted or untrusted) certificates, show the
full path and distinguished name for each certificate.
@@ -117,7 +119,13 @@ Remove the specified file from the untrusted list.
.Sh ENVIRONMENT
.Bl -tag -width UNTRUSTDESTDIR
.It Ev DESTDIR
-Alternate destination directory to operate on.
+Absolute path to an alternate destination directory to operate on
+instead of the file system root, e.g.
+.Dq Li /tmp/install .
+.It Ev DISTBASE
+Additional path component to include when operating on certificate directories.
+This must start with a slash, e.g.
+.Dq Li /base .
.It Ev LOCALBASE
Location for local programs.
Defaults to the value of the user.localbase sysctl which is usually
@@ -125,22 +133,22 @@ Defaults to the value of the user.localbase sysctl which is usually
.It Ev TRUSTPATH
List of paths to search for trusted certificates.
Default:
-.Pa ${DESTDIR}/usr/share/certs/trusted
+.Pa ${DESTDIR}${DISTBASE}/usr/share/certs/trusted
.Pa ${DESTDIR}${LOCALBASE}/share/certs/trusted
.Pa ${DESTDIR}${LOCALBASE}/share/certs
.It Ev UNTRUSTPATH
List of paths to search for untrusted certificates.
Default:
-.Pa ${DESTDIR}/usr/share/certs/untrusted
+.Pa ${DESTDIR}${DISTBASE}/usr/share/certs/untrusted
.Pa ${DESTDIR}${LOCALBASE}/share/certs/untrusted
.It Ev TRUSTDESTDIR
Destination directory for symbolic links to trusted certificates.
Default:
-.Pa ${DESTDIR}/etc/ssl/certs
+.Pa ${DESTDIR}${DISTBASE}/etc/ssl/certs
.It Ev UNTRUSTDESTDIR
Destination directory for symbolic links to untrusted certificates.
Default:
-.Pa ${DESTDIR}/etc/ssl/untrusted
+.Pa ${DESTDIR}${DISTBASE}/etc/ssl/untrusted
.It Ev BUNDLE
File name of bundle to produce.
.El
diff --git a/usr.sbin/certctl/certctl.c b/usr.sbin/certctl/certctl.c
index 365870167aeb..3601f6929fc4 100644
--- a/usr.sbin/certctl/certctl.c
+++ b/usr.sbin/certctl/certctl.c
@@ -63,6 +63,7 @@ static bool verbose;
static const char *localbase;
static const char *destdir;
+static const char *distbase;
static const char *metalog;
static const char *uname = "root";
@@ -100,6 +101,50 @@ static char *bundle_dest;
static FILE *mlf;
/*
+ * Create a directory and its parents as needed.
+ */
+static void
+mkdirp(const char *dir)
+{
+ struct stat sb;
+ const char *sep;
+ char *parent;
+
+ if (stat(dir, &sb) == 0)
+ return;
+ if ((sep = strrchr(dir, '/')) != NULL) {
+ parent = xasprintf("%.*s", (int)(sep - dir), dir);
+ mkdirp(parent);
+ free(parent);
+ }
+ info("creating %s", dir);
+ if (mkdir(dir, 0755) != 0)
+ err(1, "mkdir %s", dir);
+}
+
+/*
+ * Remove duplicate and trailing slashes from a path.
+ */
+static char *
+normalize_path(const char *str)
+{
+ char *buf, *dst;
+
+ if ((buf = malloc(strlen(str) + 1)) == NULL)
+ err(1, NULL);
+ for (dst = buf; *str != '\0'; dst++) {
+ if ((*dst = *str++) == '/') {
+ while (*str == '/')
+ str++;
+ if (*str == '\0')
+ break;
+ }
+ }
+ *dst = '\0';
+ return (buf);
+}
+
+/*
* Split a colon-separated list into a NULL-terminated array.
*/
static char **
@@ -124,14 +169,14 @@ split_paths(const char *str)
}
/*
- * Expand %L into LOCALBASE and prefix DESTDIR.
+ * Expand %L into LOCALBASE and prefix DESTDIR and DISTBASE as needed.
*/
static char *
expand_path(const char *template)
{
if (template[0] == '%' && template[1] == 'L')
return (xasprintf("%s%s%s", destdir, localbase, template + 2));
- return (xasprintf("%s%s", destdir, template));
+ return (xasprintf("%s%s%s", destdir, distbase, template));
}
/*
@@ -155,6 +200,9 @@ expand_paths(const char *const *templates)
/*
* If destdir is a prefix of path, returns a pointer to the rest of path,
* otherwise returns path.
+ *
+ * Note that this intentionally does not strip distbase from the path!
+ * Unlike destdir, distbase is expected to be included in the metalog.
*/
static const char *
unexpand_path(const char *path)
@@ -268,7 +316,7 @@ read_cert(const char *path, struct cert_tree *tree, struct cert_tree *exclude)
X509_NAME *name;
struct cert *cert;
unsigned long hash;
- int ni, no;
+ int len, ni, no;
if ((f = fopen(path, "r")) == NULL) {
warn("%s", path);
@@ -293,11 +341,21 @@ read_cert(const char *path, struct cert_tree *tree, struct cert_tree *exclude)
cert->x509 = x509;
name = X509_get_subject_name(x509);
cert->hash = X509_NAME_hash_ex(name, NULL, NULL, NULL);
- cert->name = X509_NAME_oneline(name, NULL, 0);
+ len = X509_NAME_get_text_by_NID(name, NID_commonName,
+ NULL, 0);
+ if (len > 0) {
+ if ((cert->name = malloc(len + 1)) == NULL)
+ err(1, NULL);
+ X509_NAME_get_text_by_NID(name, NID_commonName,
+ cert->name, len + 1);
+ } else {
+ /* fallback for certificates without CN */
+ cert->name = X509_NAME_oneline(name, NULL, 0);
+ }
cert->path = xstrdup(unexpand_path(path));
if (RB_INSERT(cert_tree, tree, cert) != NULL)
errx(1, "unexpected duplicate");
- info("%08lx: %s", cert->hash, strrchr(cert->name, '=') + 1);
+ info("%08lx: %s", cert->hash, cert->name);
no++;
}
/*
@@ -488,9 +546,10 @@ write_certs(const char *dir, struct cert_tree *tree)
free(tmppath);
tmppath = NULL;
}
+ fflush(f);
/* emit metalog */
if (mlf != NULL) {
- fprintf(mlf, "%s/%s type=file "
+ fprintf(mlf, ".%s/%s type=file "
"uname=%s gname=%s mode=%#o size=%ld\n",
unexpand_path(dir), path,
uname, gname, mode, ftell(f));
@@ -561,7 +620,7 @@ write_bundle(const char *dir, const char *file, struct cert_tree *tree)
}
if (ret == 0 && mlf != NULL) {
fprintf(mlf,
- "%s/%s type=file uname=%s gname=%s mode=%#o size=%ld\n",
+ ".%s/%s type=file uname=%s gname=%s mode=%#o size=%ld\n",
unexpand_path(dir), file, uname, gname, mode, ftell(f));
}
fclose(f);
@@ -648,7 +707,7 @@ save_trusted(void)
{
int ret;
- /* save untrusted certs */
+ mkdirp(trusted_dest);
ret = write_certs(trusted_dest, &trusted);
return (ret);
}
@@ -663,6 +722,7 @@ save_untrusted(void)
{
int ret;
+ mkdirp(untrusted_dest);
ret = write_certs(untrusted_dest, &untrusted);
return (ret);
}
@@ -684,6 +744,7 @@ save_bundle(void)
} else {
dir = xasprintf("%.*s", (int)(sep - bundle_dest), bundle_dest);
file = sep + 1;
+ mkdirp(dir);
}
ret = write_bundle(dir, file, &trusted);
free(dir);
@@ -925,6 +986,14 @@ set_defaults(void)
if (destdir == NULL &&
(destdir = getenv("DESTDIR")) == NULL)
destdir = "";
+ destdir = normalize_path(destdir);
+
+ if (distbase == NULL &&
+ (distbase = getenv("DISTBASE")) == NULL)
+ distbase = "";
+ if (*distbase != '\0' && *distbase != '/')
+ errx(1, "DISTBASE=%s does not begin with a slash", distbase);
+ distbase = normalize_path(distbase);
if (unprivileged && metalog == NULL &&
(metalog = getenv("METALOG")) == NULL)
@@ -950,22 +1019,23 @@ set_defaults(void)
if ((value = getenv("TRUSTDESTDIR")) != NULL ||
(value = getenv("CERTDESTDIR")) != NULL)
- trusted_dest = xstrdup(value);
+ trusted_dest = normalize_path(value);
else
trusted_dest = expand_path(TRUSTED_PATH);
if ((value = getenv("UNTRUSTDESTDIR")) != NULL)
- untrusted_dest = xstrdup(value);
+ untrusted_dest = normalize_path(value);
else
untrusted_dest = expand_path(UNTRUSTED_PATH);
if ((value = getenv("BUNDLE")) != NULL)
- bundle_dest = xstrdup(value);
+ bundle_dest = normalize_path(value);
else
bundle_dest = expand_path(BUNDLE_PATH);
info("localbase:\t%s", localbase);
info("destdir:\t%s", destdir);
+ info("distbase:\t%s", distbase);
info("unprivileged:\t%s", unprivileged ? "true" : "false");
info("verbose:\t%s", verbose ? "true" : "false");
}
@@ -987,11 +1057,11 @@ static struct {
static void
usage(void)
{
- fprintf(stderr, "usage: certctl [-lv] [-D destdir] list\n"
- " certctl [-lv] [-D destdir] untrusted\n"
- " certctl [-BnUv] [-D destdir] [-M metalog] rehash\n"
- " certctl [-nv] [-D destdir] untrust <file>\n"
- " certctl [-nv] [-D destdir] trust <file>\n");
+ fprintf(stderr, "usage: certctl [-lv] [-D destdir] [-d distbase] list\n"
+ " certctl [-lv] [-D destdir] [-d distbase] untrusted\n"
+ " certctl [-BnUv] [-D destdir] [-d distbase] [-M metalog] rehash\n"
+ " certctl [-nv] [-D destdir] [-d distbase] untrust <file>\n"
+ " certctl [-nv] [-D destdir] [-d distbase] trust <file>\n");
exit(1);
}
@@ -1001,7 +1071,7 @@ main(int argc, char *argv[])
const char *command;
int opt;
- while ((opt = getopt(argc, argv, "BcD:g:lL:M:no:Uv")) != -1)
+ while ((opt = getopt(argc, argv, "BcD:d:g:lL:M:no:Uv")) != -1)
switch (opt) {
case 'B':
nobundle = true;
@@ -1012,6 +1082,9 @@ main(int argc, char *argv[])
case 'D':
destdir = optarg;
break;
+ case 'd':
+ distbase = optarg;
+ break;
case 'g':
gname = optarg;
break;
diff --git a/usr.sbin/certctl/tests/certctl_test.sh b/usr.sbin/certctl/tests/certctl_test.sh
index 4e236d5bfae8..74749db0b3f5 100644
--- a/usr.sbin/certctl/tests/certctl_test.sh
+++ b/usr.sbin/certctl/tests/certctl_test.sh
@@ -60,41 +60,77 @@ ZWUPHYWKKTVEFBJOLLPDAIKGRDFVXZID $collhash
EOF
}
+sortfile() {
+ for filename; do
+ sort "${filename}" >"${filename}"-
+ mv "${filename}"- "${filename}"
+ done
+}
+
certctl_setup()
{
export DESTDIR="$PWD"
# Create input directories
- mkdir -p usr/share/certs/trusted
- mkdir -p usr/share/certs/untrusted
- mkdir -p usr/local/share/certs
+ mkdir -p ${DESTDIR}${DISTBASE}/usr/share/certs/trusted
+ mkdir -p ${DESTDIR}${DISTBASE}/usr/share/certs/untrusted
+ mkdir -p ${DESTDIR}/usr/local/share/certs
- # Create output directories
- mkdir -p etc/ssl/certs
- mkdir -p etc/ssl/untrusted
+ # Do not create output directories; certctl will take care of it
+ #mkdir -p ${DESTDIR}${DISTBASE}/etc/ssl/certs
+ #mkdir -p ${DESTDIR}${DISTBASE}/etc/ssl/untrusted
# Generate a random key
keyname="testkey"
gen_key ${keyname}
# Generate certificates
+ :>metalog.expect
+ :>trusted.expect
+ :>untrusted.expect
+ metalog() {
+ echo ".${DISTBASE}$@ type=file" >>metalog.expect
+ }
+ trusted() {
+ local crtname=$1
+ local filename=$2
+ printf "%s\t%s\n" "${filename}" "${crtname}" >>trusted.expect
+ metalog "/etc/ssl/certs/${filename}"
+ }
+ untrusted() {
+ local crtname=$1
+ local filename=$2
+ printf "%s\t%s\n" "${filename}" "${crtname}" >>untrusted.expect
+ metalog "/etc/ssl/untrusted/${filename}"
+ }
set1 | while read crtname hash ; do
gen_crt ${crtname} ${keyname}
- mv ${crtname}.crt usr/share/certs/trusted
+ mv ${crtname}.crt ${DESTDIR}${DISTBASE}/usr/share/certs/trusted
+ trusted "${crtname}" "${hash}.0"
done
+ local c=0
coll | while read crtname hash ; do
gen_crt ${crtname} ${keyname}
- mv ${crtname}.crt usr/share/certs/trusted
+ mv ${crtname}.crt ${DESTDIR}${DISTBASE}/usr/share/certs/trusted
+ trusted "${crtname}" "${hash}.${c}"
+ c=$((c+1))
done
set2 | while read crtname hash ; do
gen_crt ${crtname} ${keyname}
openssl x509 -in ${crtname}.crt
rm ${crtname}.crt
+ trusted "${crtname}" "${hash}.0"
done >usr/local/share/certs/bundle.crt
set3 | while read crtname hash ; do
gen_crt ${crtname} ${keyname}
- mv ${crtname}.crt usr/share/certs/untrusted
+ mv ${crtname}.crt ${DESTDIR}${DISTBASE}/usr/share/certs/untrusted
+ untrusted "${crtname}" "${hash}.0"
done
+ metalog "/etc/ssl/cert.pem"
+ unset -f untrusted
+ unset -f trusted
+ unset -f metalog
+ sortfile *.expect
}
check_trusted() {
@@ -102,12 +138,12 @@ check_trusted() {
local subject="$(subject ${crtname})"
local c=${2:-1}
- atf_check -o match:"found: ${c}\$" \
+ atf_check -e ignore -o match:"found: ${c}\$" \
openssl storeutl -noout -subject "${subject}" \
- etc/ssl/certs
- atf_check -o match:"found: 0\$" \
+ ${DESTDIR}${DISTBASE}/etc/ssl/certs
+ atf_check -e ignore -o not-match:"found: [1-9]" \
openssl storeutl -noout -subject "${subject}" \
- etc/ssl/untrusted
+ ${DESTDIR}${DISTBASE}/etc/ssl/untrusted
}
check_untrusted() {
@@ -115,23 +151,25 @@ check_untrusted() {
local subject="$(subject ${crtname})"
local c=${2:-1}
- atf_check -o match:"found: 0\$" \
+ atf_check -e ignore -o not-match:"found: [1-9]" \
openssl storeutl -noout -subject "${subject}" \
- etc/ssl/certs
- atf_check -o match:"found: ${c}\$" \
+ ${DESTDIR}/${DISTBASE}/etc/ssl/certs
+ atf_check -e ignore -o match:"found: ${c}\$" \
openssl storeutl -noout -subject "${subject}" \
- etc/ssl/untrusted
+ ${DESTDIR}/${DISTBASE}/etc/ssl/untrusted
}
check_in_bundle() {
+ local b=${DISTBASE}${DISTBASE+/}
local crtfile=$1
local line
line=$(tail +5 "${crtfile}" | head -1)
- atf_check grep -q "${line}" etc/ssl/cert.pem
+ atf_check grep -q "${line}" ${DESTDIR}${DISTBASE}/etc/ssl/cert.pem
}
check_not_in_bundle() {
+ local b=${DISTBASE}${DISTBASE+/}
local crtfile=$1
local line
@@ -150,7 +188,7 @@ rehash_body()
atf_check certctl rehash
# Verify non-colliding trusted certificates
- (set1 ; set2) > trusted
+ (set1; set2) >trusted
while read crtname hash ; do
check_trusted "${crtname}"
done <trusted
@@ -167,7 +205,7 @@ rehash_body()
check_untrusted "${crtname}"
done <untrusted
- # Verify bundle; storeutl is no help here
+ # Verify bundle
for f in etc/ssl/certs/*.? ; do
check_in_bundle "${f}"
done
@@ -176,6 +214,31 @@ rehash_body()
done
}
+atf_test_case list
+list_head()
+{
+ atf_set "descr" "Test the list and untrusted commands"
+}
+list_body()
+{
+ certctl_setup
+ atf_check certctl rehash
+
+ atf_check -o save:trusted.out certctl list
+ sortfile trusted.out
+ # the ordering of the colliding certificates is partly
+ # determined by fields that change every time we regenerate
+ # them, so ignore them in the diff
+ atf_check diff -u \
+ --ignore-matching-lines $collhash \
+ trusted.expect trusted.out
+
+ atf_check -o save:untrusted.out certctl untrusted
+ sortfile untrusted.out
+ atf_check diff -u \
+ untrusted.expect untrusted.out
+}
+
atf_test_case trust
trust_head()
{
@@ -185,7 +248,7 @@ trust_body()
{
certctl_setup
atf_check certctl rehash
- crtname=NJWIRLPWAIICVJBKXXHFHLCPAERZATRL
+ crtname=$(set3 | (read crtname hash ; echo ${crtname}))
crtfile=usr/share/certs/untrusted/${crtname}.crt
check_untrusted ${crtname}
check_not_in_bundle ${crtfile}
@@ -204,7 +267,7 @@ untrust_body()
{
certctl_setup
atf_check certctl rehash
- crtname=AVOYKJHSLFHWPVQMKBHENUAHJTEGMCCB
+ crtname=$(set1 | (read crtname hash ; echo ${crtname}))
crtfile=usr/share/certs/trusted/${crtname}.crt
check_trusted "${crtname}"
check_in_bundle ${crtfile}
@@ -213,9 +276,57 @@ untrust_body()
check_not_in_bundle ${crtfile}
}
+atf_test_case metalog
+metalog_head()
+{
+ atf_set "descr" "Verify the metalog"
+}
+metalog_body()
+{
+ export DISTBASE=/base
+ certctl_setup
+
+ # certctl gets DESTDIR and DISTBASE from environment
+ rm -f metalog.orig
+ atf_check certctl -U -M metalog.orig rehash
+ sed -E 's/(type=file) .*/\1/' metalog.orig | sort >metalog.short
+ atf_check diff -u metalog.expect metalog.short
+
+ # certctl gets DESTDIR and DISTBASE from command line
+ rm -f metalog.orig
+ atf_check env -uDESTDIR -uDISTBASE \
+ certctl -D ${DESTDIR} -d ${DISTBASE} -U -M metalog.orig rehash
+ sed -E 's/(type=file) .*/\1/' metalog.orig | sort >metalog.short
+ atf_check diff -u metalog.expect metalog.short
+
+ # as above, but intentionally add trailing slashes
+ rm -f metalog.orig
+ atf_check env -uDESTDIR -uDISTBASE \
+ certctl -D ${DESTDIR}// -d ${DISTBASE}/ -U -M metalog.orig rehash
+ sed -E 's/(type=file) .*/\1/' metalog.orig | sort >metalog.short
+ atf_check diff -u metalog.expect metalog.short
+}
+
+atf_test_case misc
+misc_head()
+{
+ atf_set "descr" "Test miscellaneous edge cases"
+}
+misc_body()
+{
+ # certctl rejects DISTBASE that does not begin with a slash
+ atf_check -s exit:1 -e match:"begin with a slash" \
+ certctl -d base -n rehash
+ atf_check -s exit:1 -e match:"begin with a slash" \
+ env DISTBASE=base certctl -n rehash
+}
+
atf_init_test_cases()
{
atf_add_test_case rehash
+ atf_add_test_case list
atf_add_test_case trust
atf_add_test_case untrust
+ atf_add_test_case metalog
+ atf_add_test_case misc
}
diff --git a/usr.sbin/freebsd-update/freebsd-update.sh b/usr.sbin/freebsd-update/freebsd-update.sh
index c388e76644dc..81040431ea79 100644
--- a/usr.sbin/freebsd-update/freebsd-update.sh
+++ b/usr.sbin/freebsd-update/freebsd-update.sh
@@ -3000,7 +3000,7 @@ install_from_index () {
if [ -z "${LINK}" ]; then
# Create a file, without setting flags.
gunzip < files/${HASH}.gz > ${HASH}
- install -S -o ${OWNER} -g ${GROUP} \
+ install -o ${OWNER} -g ${GROUP} \
-m ${PERM} ${HASH} ${BASEDIR}/${FPATH}
rm ${HASH}
else
diff --git a/usr.sbin/jail/config.c b/usr.sbin/jail/config.c
index 70de82e662e7..1bad04ccde68 100644
--- a/usr.sbin/jail/config.c
+++ b/usr.sbin/jail/config.c
@@ -189,7 +189,7 @@ load_config(const char *cfname)
* jail is created or found.
*/
if (j->intparams[KP_NAME] == NULL)
- add_param(j, j->intparams[KP_JID], KP_NAME, NULL);
+ add_param(j, NULL, KP_NAME, j->name);
/* Resolve any variable substitutions. */
pgen = 0;
diff --git a/usr.sbin/jail/tests/jail_basic_test.sh b/usr.sbin/jail/tests/jail_basic_test.sh
index 509900e8569c..6802da7b049a 100755
--- a/usr.sbin/jail/tests/jail_basic_test.sh
+++ b/usr.sbin/jail/tests/jail_basic_test.sh
@@ -198,7 +198,7 @@ clean_jails()
fi
while read jail; do
- if jls -e -j "$jail"; then
+ if jls -c -j "$jail"; then
jail -r "$jail"
fi
done < jails.lst
@@ -211,10 +211,23 @@ jid_name_set_body()
echo "basejail" >> jails.lst
echo "$jid { name = basejail; persist; }" > jail.conf
atf_check -o match:"$jid: created" jail -f jail.conf -c "$jid"
+ # Confirm that we didn't override the explicitly-set name with the jid
+ # as the name.
+ atf_check -o match:"basejail" jls -j "$jid" name
+ atf_check -o match:"$jid: removed" jail -f jail.conf -r "$jid"
+
+ echo "$jid { host.hostname = \"\${name}\"; persist; }" > jail.conf
+ atf_check -o match:"$jid: created" jail -f jail.conf -c "$jid"
+ # Confirm that ${name} expanded and expanded correctly to the
+ # jid-implied name.
+ atf_check -o match:"$jid" jls -j "$jid" host.hostname
atf_check -o match:"$jid: removed" jail -f jail.conf -r "$jid"
echo "basejail { jid = $jid; persist; }" > jail.conf
atf_check -o match:"basejail: created" jail -f jail.conf -c basejail
+ # Confirm that our jid assigment in the definition worked out and we
+ # did in-fact create the jail there.
+ atf_check -o match:"$jid" jls -j "basejail" jid
atf_check -o match:"basejail: removed" jail -f jail.conf -r basejail
}
diff --git a/usr.sbin/pw/pw.8 b/usr.sbin/pw/pw.8
index c72623ee05b3..5eae810b6732 100644
--- a/usr.sbin/pw/pw.8
+++ b/usr.sbin/pw/pw.8
@@ -22,7 +22,7 @@
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
-.Dd July 29, 2024
+.Dd August 19, 2025
.Dt PW 8
.Os
.Sh NAME
@@ -191,7 +191,12 @@ utility handles updating the
.Xr master.passwd 5 ,
.Xr group 5
and the secure and insecure
-password database files, and must be run as root.
+password database files, and must be run as root
+.Po except when using
+.Fl R
+or
+.Fl V
+.Pc .
.Pp
The first one or two keywords provided to
.Nm
diff --git a/usr.sbin/pw/pw.c b/usr.sbin/pw/pw.c
index fc17f6dba022..a4c95258f3bb 100644
--- a/usr.sbin/pw/pw.c
+++ b/usr.sbin/pw/pw.c
@@ -162,6 +162,7 @@ main(int argc, char *argv[])
snprintf(conf.etcpath, sizeof(conf.etcpath),
"%s%s", optarg, arg == 'R' ?
_PATH_PWD : "");
+ conf.altroot = true;
} else
break;
}
diff --git a/usr.sbin/pw/pw_user.c b/usr.sbin/pw/pw_user.c
index d9fd8c77c13e..8a9a4342f5ef 100644
--- a/usr.sbin/pw/pw_user.c
+++ b/usr.sbin/pw/pw_user.c
@@ -238,6 +238,13 @@ perform_chgpwent(const char *name, struct passwd *pwd, char *nispasswd)
}
}
+static void
+pw_check_root(void)
+{
+ if (!conf.altroot && geteuid() != 0)
+ errx(EX_NOPERM, "you must be root");
+}
+
/*
* The M_LOCK and M_UNLOCK functions simply add or remove
* a "*LOCKED*" prefix from in front of the password to
@@ -256,8 +263,7 @@ pw_userlock(char *arg1, int mode)
bool locked = false;
uid_t id = (uid_t)-1;
- if (geteuid() != 0)
- errx(EX_NOPERM, "you must be root");
+ pw_check_root();
if (arg1 == NULL)
errx(EX_DATAERR, "username or id required");
@@ -1324,8 +1330,8 @@ pw_user_add(int argc, char **argv, char *arg1)
if (argc > 0)
usage();
- if (geteuid() != 0 && ! dryrun)
- errx(EX_NOPERM, "you must be root");
+ if (!dryrun)
+ pw_check_root();
if (quiet)
freopen(_PATH_DEVNULL, "w", stderr);
@@ -1641,8 +1647,8 @@ pw_user_mod(int argc, char **argv, char *arg1)
if (argc > 0)
usage();
- if (geteuid() != 0 && ! dryrun)
- errx(EX_NOPERM, "you must be root");
+ if (!dryrun)
+ pw_check_root();
if (quiet)
freopen(_PATH_DEVNULL, "w", stderr);
diff --git a/usr.sbin/pw/pwupd.h b/usr.sbin/pw/pwupd.h
index 262b044e07fc..a39a022ca309 100644
--- a/usr.sbin/pw/pwupd.h
+++ b/usr.sbin/pw/pwupd.h
@@ -78,6 +78,7 @@ struct pwconf {
char etcpath[MAXPATHLEN];
int fd;
int rootfd;
+ bool altroot;
bool checkduplicate;
};
diff --git a/usr.sbin/rpc.ypupdated/Makefile b/usr.sbin/rpc.ypupdated/Makefile
new file mode 100644
index 000000000000..78ee19fc7a6d
--- /dev/null
+++ b/usr.sbin/rpc.ypupdated/Makefile
@@ -0,0 +1,32 @@
+.PATH: ${SRCTOP}/usr.sbin/ypserv ${SRCTOP}/libexec/ypxfr
+
+PACKAGE=yp
+PROG= rpc.ypupdated
+MAN=
+SRCS= ypupdate_prot_svc.c ypupdate_prot.h ypupdated_main.c \
+ yp_error.c update.c ypupdated_server.c \
+ yp_dblookup.c yp_dbwrite.c yp_dbdelete.c yp_dbupdate.c
+
+#CFLAGS+= -DYP
+CFLAGS+= -I${SRCTOP}/usr.sbin/ypserv -I. -I${SRCTOP}/libexec/ypxfr
+
+WARNS?= 1
+
+LIBADD= rpcsvc
+
+CLEANFILES= ypupdate_prot_svc.c ypupdate_prot.h
+
+RPCDIR= ${SYSROOT:U${DESTDIR}}/usr/include/rpcsvc
+RPCGEN= RPCGEN_CPP=${CPP:Q} rpcgen -I -C
+
+# We need to remove the 'static' keyword from _rpcsvcstate so that
+# ypupdated_main.c can see it.
+ypupdate_prot_svc.c: ${RPCDIR}/ypupdate_prot.x
+ rm -f ${.TARGET}
+ ${RPCGEN} -m ${.ALLSRC} | \
+ sed s/"static int _rpcsvcstate"/"int _rpcsvcstate"/g > ${.TARGET}
+
+ypupdate_prot.h: ${RPCDIR}/ypupdate_prot.x
+ ${RPCGEN} -h -o ${.TARGET} ${.ALLSRC}
+
+.include <bsd.prog.mk>
diff --git a/usr.sbin/rpc.ypupdated/Makefile.depend b/usr.sbin/rpc.ypupdated/Makefile.depend
new file mode 100644
index 000000000000..352a225b19c6
--- /dev/null
+++ b/usr.sbin/rpc.ypupdated/Makefile.depend
@@ -0,0 +1,18 @@
+# Autogenerated - do NOT edit!
+
+DIRDEPS = \
+ include \
+ include/rpc \
+ include/rpcsvc \
+ include/xlocale \
+ lib/${CSU_DIR} \
+ lib/libc \
+ lib/libcompiler_rt \
+ lib/librpcsvc \
+
+
+.include <dirdeps.mk>
+
+.if ${DEP_RELDIR} == ${_DEP_RELDIR}
+# local dependencies - needed for -jN in clean tree
+.endif
diff --git a/usr.sbin/rpc.ypupdated/update.c b/usr.sbin/rpc.ypupdated/update.c
new file mode 100644
index 000000000000..332ed752acc4
--- /dev/null
+++ b/usr.sbin/rpc.ypupdated/update.c
@@ -0,0 +1,328 @@
+/*
+ * Sun RPC is a product of Sun Microsystems, Inc. and is provided for
+ * unrestricted use provided that this legend is included on all tape
+ * media and as a part of the software program in whole or part. Users
+ * may copy or modify Sun RPC without charge, but are not authorized
+ * to license or distribute it to anyone else except as part of a product or
+ * program developed by the user or with the express written consent of
+ * Sun Microsystems, Inc.
+ *
+ * SUN RPC IS PROVIDED AS IS WITH NO WARRANTIES OF ANY KIND INCLUDING THE
+ * WARRANTIES OF DESIGN, MERCHANTIBILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE, OR ARISING FROM A COURSE OF DEALING, USAGE OR TRADE PRACTICE.
+ *
+ * Sun RPC is provided with no support and without any obligation on the
+ * part of Sun Microsystems, Inc. to assist in its use, correction,
+ * modification or enhancement.
+ *
+ * SUN MICROSYSTEMS, INC. SHALL HAVE NO LIABILITY WITH RESPECT TO THE
+ * INFRINGEMENT OF COPYRIGHTS, TRADE SECRETS OR ANY PATENTS BY SUN RPC
+ * OR ANY PART THEREOF.
+ *
+ * In no event will Sun Microsystems, Inc. be liable for any lost revenue
+ * or profits or other special, indirect and consequential damages, even if
+ * Sun has been advised of the possibility of such damages.
+ *
+ * Sun Microsystems, Inc.
+ * 2550 Garcia Avenue
+ * Mountain View, California 94043
+ */
+
+/*
+ * Copyright (C) 1986, 1989, Sun Microsystems, Inc.
+ */
+
+/*
+ * Administrative tool to add a new user to the publickey database
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <rpc/rpc.h>
+#include <rpc/key_prot.h>
+#ifdef YP
+#include <rpcsvc/yp_prot.h>
+#include <rpcsvc/ypclnt.h>
+#include <sys/wait.h>
+#include <netdb.h>
+#endif /* YP */
+#include <pwd.h>
+#include <string.h>
+#include <sys/resource.h>
+#include "ypupdated_extern.h"
+
+#ifdef YP
+#define MAXMAPNAMELEN 256
+#else
+#define YPOP_CHANGE 1 /* change, do not add */
+#define YPOP_INSERT 2 /* add, do not change */
+#define YPOP_DELETE 3 /* delete this entry */
+#define YPOP_STORE 4 /* add, or change */
+#endif
+
+#ifdef YP
+static char SHELL[] = "/bin/sh";
+static char YPDBPATH[]="/var/yp"; /* This is defined but not used! */
+static char PKMAP[] = "publickey.byname";
+static char UPDATEFILE[] = "updaters";
+static char PKFILE[] = "/etc/publickey";
+#endif /* YP */
+
+#ifdef YP
+static int _openchild(char *, FILE **, FILE **);
+
+/*
+ * Determine if requester is allowed to update the given map,
+ * and update it if so. Returns the yp status, which is zero
+ * if there is no access violation.
+ */
+int
+mapupdate(char *requester, char *mapname, u_int op, u_int keylen, char *key,
+ u_int datalen, char *data)
+{
+ char updater[MAXMAPNAMELEN + 40];
+ FILE *childargs;
+ FILE *childrslt;
+#ifdef WEXITSTATUS
+ int status;
+#else
+ union wait status;
+#endif
+ pid_t pid;
+ u_int yperrno;
+
+
+#ifdef DEBUG
+ printf("%s %s\n", key, data);
+#endif
+ (void)sprintf(updater, "make -s -f %s/%s %s", YPDBPATH, /* !!! */
+ UPDATEFILE, mapname);
+ pid = _openchild(updater, &childargs, &childrslt);
+ if (pid < 0) {
+ return (YPERR_YPERR);
+ }
+
+ /*
+ * Write to child
+ */
+ (void)fprintf(childargs, "%s\n", requester);
+ (void)fprintf(childargs, "%u\n", op);
+ (void)fprintf(childargs, "%u\n", keylen);
+ (void)fwrite(key, (int)keylen, 1, childargs);
+ (void)fprintf(childargs, "\n");
+ (void)fprintf(childargs, "%u\n", datalen);
+ (void)fwrite(data, (int)datalen, 1, childargs);
+ (void)fprintf(childargs, "\n");
+ (void)fclose(childargs);
+
+ /*
+ * Read from child
+ */
+ (void)fscanf(childrslt, "%d", &yperrno);
+ (void)fclose(childrslt);
+
+ (void)wait(&status);
+#ifdef WEXITSTATUS
+ if (WEXITSTATUS(status) != 0)
+#else
+ if (status.w_retcode != 0)
+#endif
+ return (YPERR_YPERR);
+ return (yperrno);
+}
+
+/*
+ * returns pid, or -1 for failure
+ */
+static int
+_openchild(char *command, FILE **fto, FILE **ffrom)
+{
+ int i;
+ pid_t pid;
+ int pdto[2];
+ int pdfrom[2];
+ char *com;
+ struct rlimit rl;
+
+ if (pipe(pdto) < 0) {
+ goto error1;
+ }
+ if (pipe(pdfrom) < 0) {
+ goto error2;
+ }
+ switch (pid = fork()) {
+ case -1:
+ goto error3;
+
+ case 0:
+ /*
+ * child: read from pdto[0], write into pdfrom[1]
+ */
+ (void)close(0);
+ (void)dup(pdto[0]);
+ (void)close(1);
+ (void)dup(pdfrom[1]);
+ getrlimit(RLIMIT_NOFILE, &rl);
+ for (i = rl.rlim_max - 1; i >= 3; i--) {
+ (void) close(i);
+ }
+ com = malloc((unsigned) strlen(command) + 6);
+ if (com == NULL) {
+ _exit(~0);
+ }
+ (void)sprintf(com, "exec %s", command);
+ execl(SHELL, basename(SHELL), "-c", com, (char *)NULL);
+ _exit(~0);
+
+ default:
+ /*
+ * parent: write into pdto[1], read from pdfrom[0]
+ */
+ *fto = fdopen(pdto[1], "w");
+ (void)close(pdto[0]);
+ *ffrom = fdopen(pdfrom[0], "r");
+ (void)close(pdfrom[1]);
+ break;
+ }
+ return (pid);
+
+ /*
+ * error cleanup and return
+ */
+error3:
+ (void)close(pdfrom[0]);
+ (void)close(pdfrom[1]);
+error2:
+ (void)close(pdto[0]);
+ (void)close(pdto[1]);
+error1:
+ return (-1);
+}
+
+static char *
+basename(char *path)
+{
+ char *p;
+
+ p = strrchr(path, '/');
+ if (p == NULL) {
+ return (path);
+ } else {
+ return (p + 1);
+ }
+}
+
+#else /* YP */
+
+static int match(char *, char *);
+
+/*
+ * Determine if requester is allowed to update the given map,
+ * and update it if so. Returns the status, which is zero
+ * if there is no access violation. This function updates
+ * the local file and then shuts up.
+ */
+int
+localupdate(char *name, char *filename, u_int op, u_int keylen __unused,
+ char *key, u_int datalen __unused, char *data)
+{
+ char line[256];
+ FILE *rf;
+ FILE *wf;
+ char *tmpname;
+ int err;
+
+ /*
+ * Check permission
+ */
+ if (strcmp(name, key) != 0) {
+ return (ERR_ACCESS);
+ }
+ if (strcmp(name, "nobody") == 0) {
+ /*
+ * Can't change "nobody"s key.
+ */
+ return (ERR_ACCESS);
+ }
+
+ /*
+ * Open files
+ */
+ tmpname = malloc(strlen(filename) + 4);
+ if (tmpname == NULL) {
+ return (ERR_MALLOC);
+ }
+ sprintf(tmpname, "%s.tmp", filename);
+ rf = fopen(filename, "r");
+ if (rf == NULL) {
+ err = ERR_READ;
+ goto cleanup;
+ }
+ wf = fopen(tmpname, "w");
+ if (wf == NULL) {
+ fclose(rf);
+ err = ERR_WRITE;
+ goto cleanup;
+ }
+ err = -1;
+ while (fgets(line, sizeof (line), rf)) {
+ if (err < 0 && match(line, name)) {
+ switch (op) {
+ case YPOP_INSERT:
+ err = ERR_KEY;
+ break;
+ case YPOP_STORE:
+ case YPOP_CHANGE:
+ fprintf(wf, "%s %s\n", key, data);
+ err = 0;
+ break;
+ case YPOP_DELETE:
+ /* do nothing */
+ err = 0;
+ break;
+ }
+ } else {
+ fputs(line, wf);
+ }
+ }
+ if (err < 0) {
+ switch (op) {
+ case YPOP_CHANGE:
+ case YPOP_DELETE:
+ err = ERR_KEY;
+ break;
+ case YPOP_INSERT:
+ case YPOP_STORE:
+ err = 0;
+ fprintf(wf, "%s %s\n", key, data);
+ break;
+ }
+ }
+ fclose(wf);
+ fclose(rf);
+ if (err == 0) {
+ if (rename(tmpname, filename) < 0) {
+ err = ERR_DBASE;
+ goto cleanup;
+ }
+ } else {
+ if (unlink(tmpname) < 0) {
+ err = ERR_DBASE;
+ goto cleanup;
+ }
+ }
+cleanup:
+ free(tmpname);
+ return (err);
+}
+
+static int
+match(char *line, char *name)
+{
+ int len;
+
+ len = strlen(name);
+ return (strncmp(line, name, len) == 0 &&
+ (line[len] == ' ' || line[len] == '\t'));
+}
+#endif /* !YP */
diff --git a/usr.sbin/rpc.ypupdated/yp_dbdelete.c b/usr.sbin/rpc.ypupdated/yp_dbdelete.c
new file mode 100644
index 000000000000..d07a8fcd2ff9
--- /dev/null
+++ b/usr.sbin/rpc.ypupdated/yp_dbdelete.c
@@ -0,0 +1,68 @@
+/*-
+ * SPDX-License-Identifier: BSD-4-Clause
+ *
+ * Copyright (c) 1995, 1996
+ * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Bill Paul.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <string.h>
+#include <limits.h>
+#include <unistd.h>
+#include <db.h>
+#include <sys/stat.h>
+#include <errno.h>
+#include <paths.h>
+#include <rpcsvc/yp.h>
+#include "ypxfr_extern.h"
+
+int
+yp_del_record(DB *dbp, DBT *key)
+{
+ int rval;
+
+ if ((rval = (dbp->del)(dbp,key,0))) {
+ switch (rval) {
+ case 1:
+ return(YP_FALSE);
+ break;
+ case -1:
+ default:
+ (void)(dbp->close)(dbp);
+ return(YP_BADDB);
+ break;
+ }
+ }
+
+ return(YP_TRUE);
+}
diff --git a/usr.sbin/rpc.ypupdated/yp_dbupdate.c b/usr.sbin/rpc.ypupdated/yp_dbupdate.c
new file mode 100644
index 000000000000..af17bf5244ef
--- /dev/null
+++ b/usr.sbin/rpc.ypupdated/yp_dbupdate.c
@@ -0,0 +1,147 @@
+/*-
+ * SPDX-License-Identifier: BSD-4-Clause
+ *
+ * Copyright (c) 1996
+ * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Bill Paul.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#include <sys/fcntl.h>
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <limits.h>
+#include <db.h>
+#include <unistd.h>
+#include <rpcsvc/ypclnt.h>
+#include <rpcsvc/ypupdate_prot.h>
+#include "ypxfr_extern.h"
+#include "ypupdated_extern.h"
+
+static int
+yp_domake(char *map, char *domain)
+{
+ int pid;
+
+ switch ((pid = fork())) {
+ case 0:
+ execlp(MAP_UPDATE_PATH, MAP_UPDATE, map, domain, (char *)NULL);
+ yp_error("couldn't exec map update process: %s",
+ strerror(errno));
+ exit(1);
+ break;
+ case -1:
+ yp_error("fork() failed: %s", strerror(errno));
+ return(YPERR_YPERR);
+ break;
+ default:
+ children++;
+ break;
+ }
+
+ return(0);
+}
+
+int
+ypmap_update(char *netname, char *map, unsigned int op, unsigned int keylen,
+ char *keyval, unsigned int datlen, char *datval)
+{
+ DB *dbp;
+ DBT key = { NULL, 0 }, data = { NULL, 0 };
+ char *yp_last = "YP_LAST_MODIFIED";
+ char yplastbuf[32];
+ char *domptr;
+ int rval = 0;
+
+ if ((domptr = strchr(netname, '@')) == NULL)
+ return(ERR_ACCESS);
+ domptr++;
+
+
+ dbp = yp_open_db_rw(domptr, map, O_RDWR);
+ if (dbp == NULL)
+ return(ERR_DBASE);
+
+ key.data = keyval;
+ key.size = keylen;
+ data.data = datval;
+ data.size = datlen;
+
+ switch (op) {
+ case YPOP_DELETE: /* delete this entry */
+ rval = yp_del_record(dbp, &key);
+ if (rval == YP_TRUE)
+ rval = 0;
+ break;
+ case YPOP_INSERT: /* add, do not change */
+ rval = yp_put_record(dbp, &key, &data, 0);
+ if (rval == YP_TRUE)
+ rval = 0;
+ break;
+ case YPOP_STORE: /* add, or change */
+ rval = yp_put_record(dbp, &key, &data, 1);
+ if (rval == YP_TRUE)
+ rval = 0;
+ break;
+ case YPOP_CHANGE: /* change, do not add */
+ if (yp_get_record(domptr, map, &key, &data, 0) != YP_TRUE) {
+ rval = ERR_KEY;
+ break;
+ }
+ rval = yp_put_record(dbp, &key, &data, 1);
+ if (rval == YP_TRUE)
+ rval = 0;
+ break;
+ default:
+ yp_error("unknown update command: (%d)", op);
+ }
+
+ if (rval) {
+ (void)(dbp->close)(dbp);
+ return(rval);
+ }
+
+ snprintf(yplastbuf, sizeof(yplastbuf), "%jd", (intmax_t)time(NULL));
+ key.data = yp_last;
+ key.size = strlen(yp_last);
+ data.data = (char *)&yplastbuf;
+ data.size = strlen(yplastbuf);
+ if (yp_put_record(dbp, &key, &data, 1) != YP_TRUE) {
+ yp_error("failed to update timestamp in %s/%s", domptr, map);
+ (void)(dbp->close)(dbp);
+ return(ERR_DBASE);
+ }
+
+ (void)(dbp->close)(dbp);
+ return(yp_domake(map, domptr));
+}
diff --git a/usr.sbin/rpc.ypupdated/ypupdate b/usr.sbin/rpc.ypupdated/ypupdate
new file mode 100755
index 000000000000..8795ef3baf80
--- /dev/null
+++ b/usr.sbin/rpc.ypupdated/ypupdate
@@ -0,0 +1,32 @@
+#!/bin/sh
+#
+# This script is invoked by rpc.ypupdatedd to propagate NIS maps
+# after the master map databases have been modified. It expects
+# to be passed two arguments: the name of the map that was updated
+# and the name of the domain where the map resides.
+# These are passed to /var/yp/Makefile.
+#
+# Comment out the LOG=yes line to disable logging.
+#
+#
+
+LOG=yes
+LOGFILE=/var/yp/ypupdate.log
+
+umask 077
+
+if [ ! -f $LOGFILE ];
+then
+ /usr/bin/touch $LOGFILE
+ echo "# Edit /usr/libexec/yppwupdate to disable" >> $LOGFILE
+ echo "# logging to this file from yppasswdd." >> $LOGFILE
+ echo -n "# Log started on: " >> $LOGFILE
+ /bin/date >> $LOGFILE
+fi
+
+if [ ! $LOG ];
+then
+ cd /var/yp/$2; /usr/bin/make -f ../Makefile $1 2>&1
+else
+ cd /var/yp/$2; /usr/bin/make -f ../Makefile $1 >> $LOGFILE
+fi
diff --git a/usr.sbin/rpc.ypupdated/ypupdated_extern.h b/usr.sbin/rpc.ypupdated/ypupdated_extern.h
new file mode 100644
index 000000000000..90968df36748
--- /dev/null
+++ b/usr.sbin/rpc.ypupdated/ypupdated_extern.h
@@ -0,0 +1,32 @@
+/*
+ */
+
+#include <db.h>
+
+#define YPOP_CHANGE 1 /* change, do not add */
+#define YPOP_INSERT 2 /* add, do not change */
+#define YPOP_DELETE 3 /* delete this entry */
+#define YPOP_STORE 4 /* add, or change */
+
+#define ERR_ACCESS 1
+#define ERR_MALLOC 2
+#define ERR_READ 3
+#define ERR_WRITE 4
+#define ERR_DBASE 5
+#define ERR_KEY 6
+
+#ifndef YPLIBDIR
+#define YPLIBDIR "/usr/libexec/"
+#endif
+
+#ifndef MAP_UPPATE
+#define MAP_UPDATE "ypupdate"
+#endif
+
+#define MAP_UPDATE_PATH YPLIBDIR MAP_UPDATE
+
+extern int children;
+extern void ypu_prog_1(struct svc_req *, register SVCXPRT *);
+extern int localupdate(char *, char *, u_int, u_int, char *, u_int, char *);
+extern int ypmap_update(char *, char *, u_int, u_int, char *, u_int, char *);
+extern int yp_del_record(DB *, DBT *);
diff --git a/usr.sbin/rpc.ypupdated/ypupdated_main.c b/usr.sbin/rpc.ypupdated/ypupdated_main.c
new file mode 100644
index 000000000000..dae956594da0
--- /dev/null
+++ b/usr.sbin/rpc.ypupdated/ypupdated_main.c
@@ -0,0 +1,287 @@
+/*-
+ * SPDX-License-Identifier: BSD-4-Clause
+ *
+ * Copyright (c) 1995, 1996
+ * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Bill Paul.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+#include "ypupdate_prot.h"
+#include <stdio.h>
+#include <stdlib.h> /* getenv, exit */
+#include <rpc/pmap_clnt.h> /* for pmap_unset */
+#include <rpc/rpc_com.h>
+#include <string.h> /* strcmp */
+#include <signal.h>
+#ifdef __cplusplus
+#include <sysent.h> /* getdtablesize, open */
+#endif /* __cplusplus */
+#include <memory.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <syslog.h>
+#include <sys/wait.h>
+#include <errno.h>
+#include <err.h>
+#include <unistd.h>
+#include "ypupdated_extern.h"
+#include "yp_extern.h"
+
+#ifndef SIG_PF
+#define SIG_PF void(*)(int)
+#endif
+
+#ifdef DEBUG
+#define RPC_SVC_FG
+#endif
+
+#define _RPCSVC_CLOSEDOWN 120
+int _rpcpmstart; /* Started by a port monitor ? */
+static int _rpcfdtype;
+ /* Whether Stream or Datagram ? */
+ /* States a server can be in wrt request */
+
+#define _IDLE 0
+#define _SERVED 1
+#define _SERVING 2
+
+extern int _rpcsvcstate; /* Set when a request is serviced */
+
+int debug;
+
+char *progname = "rpc.ypupdated";
+char *yp_dir = "/var/yp/";
+
+static void
+_msgout(char* msg)
+{
+#ifdef RPC_SVC_FG
+ if (_rpcpmstart)
+ syslog(LOG_ERR, "%s", msg);
+ else
+ warnx("%s", msg);
+#else
+ syslog(LOG_ERR, "%s", msg);
+#endif
+}
+
+static void
+closedown(int sig)
+{
+ if (_rpcsvcstate == _IDLE) {
+ extern fd_set svc_fdset;
+ static int size;
+ int i, openfd;
+
+ if (_rpcfdtype == SOCK_DGRAM)
+ exit(0);
+ if (size == 0) {
+ size = getdtablesize();
+ }
+ for (i = 0, openfd = 0; i < size && openfd < 2; i++)
+ if (FD_ISSET(i, &svc_fdset))
+ openfd++;
+ if (openfd <= 1)
+ exit(0);
+ }
+ if (_rpcsvcstate == _SERVED)
+ _rpcsvcstate = _IDLE;
+
+ (void) signal(SIGALRM, (SIG_PF) closedown);
+ (void) alarm(_RPCSVC_CLOSEDOWN/2);
+}
+
+static void
+ypupdated_svc_run(void)
+{
+#ifdef FD_SETSIZE
+ fd_set readfds;
+#else
+ int readfds;
+#endif /* def FD_SETSIZE */
+ extern int forked;
+ int pid;
+ int fd_setsize = _rpc_dtablesize();
+
+ /* Establish the identity of the parent ypupdated process. */
+ pid = getpid();
+
+ for (;;) {
+#ifdef FD_SETSIZE
+ readfds = svc_fdset;
+#else
+ readfds = svc_fds;
+#endif /* def FD_SETSIZE */
+ switch (select(fd_setsize, &readfds, NULL, NULL,
+ (struct timeval *)0)) {
+ case -1:
+ if (errno == EINTR) {
+ continue;
+ }
+ warn("svc_run: - select failed");
+ return;
+ case 0:
+ continue;
+ default:
+ svc_getreqset(&readfds);
+ if (forked && pid != getpid())
+ exit(0);
+ }
+ }
+}
+
+static void
+reaper(int sig)
+{
+ int status;
+
+ if (sig == SIGHUP) {
+#ifdef foo
+ load_securenets();
+#endif
+ return;
+ }
+
+ if (sig == SIGCHLD) {
+ while (wait3(&status, WNOHANG, NULL) > 0)
+ children--;
+ } else {
+ (void) pmap_unset(YPU_PROG, YPU_VERS);
+ exit(0);
+ }
+}
+
+void
+usage(void)
+{
+ fprintf(stderr, "rpc.ypupdatedd [-p path]\n");
+ exit(0);
+}
+
+int
+main(int argc, char *argv[])
+{
+ register SVCXPRT *transp = NULL;
+ int sock;
+ int proto = 0;
+ struct sockaddr_in saddr;
+ int asize = sizeof (saddr);
+ int ch;
+
+ while ((ch = getopt(argc, argv, "p:h")) != -1) {
+ switch (ch) {
+ case 'p':
+ yp_dir = optarg;
+ break;
+ default:
+ usage();
+ break;
+ }
+ }
+#ifdef foo
+ load_securenets();
+#endif
+
+ if (svc_auth_reg(AUTH_DES, _svcauth_des) == -1) {
+ yp_error("failed to register AUTH_DES flavor");
+ exit(1);
+ }
+
+ if (getsockname(0, (struct sockaddr *)&saddr, &asize) == 0) {
+ int ssize = sizeof (int);
+
+ if (saddr.sin_family != AF_INET)
+ exit(1);
+ if (getsockopt(0, SOL_SOCKET, SO_TYPE,
+ (char *)&_rpcfdtype, &ssize) == -1)
+ exit(1);
+ sock = 0;
+ _rpcpmstart = 1;
+ proto = 0;
+ openlog("rpc.ypupdatedd", LOG_PID, LOG_DAEMON);
+ } else {
+#ifndef RPC_SVC_FG
+ if (daemon(0,0)) {
+ err(1, "cannot fork");
+ }
+ openlog("rpc.ypupdated", LOG_PID, LOG_DAEMON);
+#endif
+ sock = RPC_ANYSOCK;
+ (void) pmap_unset(YPU_PROG, YPU_VERS);
+ }
+
+ if ((_rpcfdtype == 0) || (_rpcfdtype == SOCK_DGRAM)) {
+ transp = svcudp_create(sock);
+ if (transp == NULL) {
+ _msgout("cannot create udp service.");
+ exit(1);
+ }
+ if (!_rpcpmstart)
+ proto = IPPROTO_UDP;
+ if (!svc_register(transp, YPU_PROG, YPU_VERS, ypu_prog_1, proto)) {
+ _msgout("unable to register (YPU_PROG, YPU_VERS, udp).");
+ exit(1);
+ }
+ }
+
+ if ((_rpcfdtype == 0) || (_rpcfdtype == SOCK_STREAM)) {
+ transp = svctcp_create(sock, 0, 0);
+ if (transp == NULL) {
+ _msgout("cannot create tcp service.");
+ exit(1);
+ }
+ if (!_rpcpmstart)
+ proto = IPPROTO_TCP;
+ if (!svc_register(transp, YPU_PROG, YPU_VERS, ypu_prog_1, proto)) {
+ _msgout("unable to register (YPU_PROG, YPU_VERS, tcp).");
+ exit(1);
+ }
+ }
+
+ if (transp == (SVCXPRT *)NULL) {
+ _msgout("could not create a handle");
+ exit(1);
+ }
+ if (_rpcpmstart) {
+ (void) signal(SIGALRM, (SIG_PF) closedown);
+ (void) alarm(_RPCSVC_CLOSEDOWN/2);
+ }
+
+ (void) signal(SIGPIPE, SIG_IGN);
+ (void) signal(SIGCHLD, (SIG_PF) reaper);
+ (void) signal(SIGTERM, (SIG_PF) reaper);
+ (void) signal(SIGINT, (SIG_PF) reaper);
+ (void) signal(SIGHUP, (SIG_PF) reaper);
+
+ ypupdated_svc_run();
+ _msgout("svc_run returned");
+ exit(1);
+ /* NOTREACHED */
+}
diff --git a/usr.sbin/rpc.ypupdated/ypupdated_server.c b/usr.sbin/rpc.ypupdated/ypupdated_server.c
new file mode 100644
index 000000000000..47e52401cd6e
--- /dev/null
+++ b/usr.sbin/rpc.ypupdated/ypupdated_server.c
@@ -0,0 +1,227 @@
+/*-
+ * SPDX-License-Identifier: BSD-4-Clause
+ *
+ * Copyright (c) 1995, 1996
+ * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Bill Paul.
+ * 4. Neither the name of the author nor the names of any co-contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * ypupdate server implementation
+ *
+ * Written by Bill Paul <wpaul@ctr.columbia.edu>
+ * Center for Telecommunications Research
+ * Columbia University, New York City
+ */
+
+#include <sys/cdefs.h>
+#include <stdio.h>
+#include <rpc/rpc.h>
+#include <rpc/key_prot.h>
+#include <sys/param.h>
+#include <rpcsvc/yp.h>
+#include "ypupdate_prot.h"
+#include "ypupdated_extern.h"
+#include "yp_extern.h"
+#include "ypxfr_extern.h"
+
+int children = 0;
+int forked = 0;
+
+/*
+ * Try to avoid spoofing: if a client chooses to use a very large
+ * window and then tries a bunch of randomly chosen encrypted timestamps,
+ * there's a chance he might stumble onto a valid combination.
+ * We therefore reject any RPCs with a window size larger than a preset
+ * value.
+ */
+#ifndef WINDOW
+#define WINDOW (60*60)
+#endif
+
+static enum auth_stat
+yp_checkauth(struct svc_req *svcreq)
+{
+ struct authdes_cred *des_cred;
+
+ switch (svcreq->rq_cred.oa_flavor) {
+ case AUTH_DES:
+ des_cred = (struct authdes_cred *) svcreq->rq_clntcred;
+ if (des_cred->adc_fullname.window > WINDOW) {
+ yp_error("warning: client-specified window size \
+was too large -- possible spoof attempt");
+ return(AUTH_BADCRED);
+ }
+ return(AUTH_OK);
+ break;
+ case AUTH_UNIX:
+ case AUTH_NONE:
+ yp_error("warning: client didn't use DES authentication");
+ return(AUTH_TOOWEAK);
+ break;
+ default:
+ yp_error("client used unknown auth flavor");
+ return(AUTH_REJECTEDCRED);
+ break;
+ }
+}
+
+unsigned int *
+ypu_change_1_svc(struct ypupdate_args *args, struct svc_req *svcreq)
+{
+ struct authdes_cred *des_cred;
+ static int res;
+ char *netname;
+ enum auth_stat astat;
+
+ res = 0;
+
+ astat = yp_checkauth(svcreq);
+
+ if (astat != AUTH_OK) {
+ svcerr_auth(svcreq->rq_xprt, astat);
+ return(&res);
+ }
+
+ des_cred = (struct authdes_cred *) svcreq->rq_clntcred;
+ netname = des_cred->adc_fullname.name;
+
+ res = localupdate(netname, "/etc/publickey", YPOP_CHANGE,
+ args->key.yp_buf_len, args->key.yp_buf_val,
+ args->datum.yp_buf_len, args->datum.yp_buf_val);
+
+ if (res)
+ return (&res);
+
+ res = ypmap_update(netname, args->mapname, YPOP_CHANGE,
+ args->key.yp_buf_len, args->key.yp_buf_val,
+ args->datum.yp_buf_len, args->datum.yp_buf_val);
+
+ return (&res);
+}
+
+unsigned int *
+ypu_insert_1_svc(struct ypupdate_args *args, struct svc_req *svcreq)
+{
+ struct authdes_cred *des_cred;
+ static int res;
+ char *netname;
+ enum auth_stat astat;
+
+ res = 0;
+
+ astat = yp_checkauth(svcreq);
+
+ if (astat != AUTH_OK) {
+ svcerr_auth(svcreq->rq_xprt, astat);
+ return(&res);
+ }
+
+ des_cred = (struct authdes_cred *) svcreq->rq_clntcred;
+ netname = des_cred->adc_fullname.name;
+
+ res = localupdate(netname, "/etc/publickey", YPOP_INSERT,
+ args->key.yp_buf_len, args->key.yp_buf_val,
+ args->datum.yp_buf_len, args->datum.yp_buf_val);
+
+ if (res)
+ return (&res);
+
+ res = ypmap_update(netname, args->mapname, YPOP_INSERT,
+ args->key.yp_buf_len, args->key.yp_buf_val,
+ args->datum.yp_buf_len, args->datum.yp_buf_val);
+
+ return (&res);
+}
+
+unsigned int *
+ypu_delete_1_svc(struct ypdelete_args *args, struct svc_req *svcreq)
+{
+ struct authdes_cred *des_cred;
+ static int res;
+ char *netname;
+ enum auth_stat astat;
+
+ res = 0;
+
+ astat = yp_checkauth(svcreq);
+
+ if (astat != AUTH_OK) {
+ svcerr_auth(svcreq->rq_xprt, astat);
+ return(&res);
+ }
+
+ des_cred = (struct authdes_cred *) svcreq->rq_clntcred;
+ netname = des_cred->adc_fullname.name;
+
+ res = localupdate(netname, "/etc/publickey", YPOP_DELETE,
+ args->key.yp_buf_len, args->key.yp_buf_val,
+ 0, NULL);
+
+ if (res)
+ return (&res);
+
+ res = ypmap_update(netname, args->mapname, YPOP_DELETE,
+ args->key.yp_buf_len, args->key.yp_buf_val,
+ 0, NULL);
+
+ return (&res);
+}
+
+unsigned int *
+ypu_store_1_svc(struct ypupdate_args *args, struct svc_req *svcreq)
+{
+ struct authdes_cred *des_cred;
+ static int res;
+ char *netname;
+ enum auth_stat astat;
+
+ res = 0;
+
+ astat = yp_checkauth(svcreq);
+
+ if (astat != AUTH_OK) {
+ svcerr_auth(svcreq->rq_xprt, astat);
+ return(&res);
+ }
+
+ des_cred = (struct authdes_cred *) svcreq->rq_clntcred;
+ netname = des_cred->adc_fullname.name;
+
+ res = localupdate(netname, "/etc/publickey", YPOP_STORE,
+ args->key.yp_buf_len, args->key.yp_buf_val,
+ args->datum.yp_buf_len, args->datum.yp_buf_val);
+
+ if (res)
+ return (&res);
+
+ res = ypmap_update(netname, args->mapname, YPOP_STORE,
+ args->key.yp_buf_len, args->key.yp_buf_val,
+ args->datum.yp_buf_len, args->datum.yp_buf_val);
+
+ return (&res);
+}
diff --git a/usr.sbin/services_mkdb/services b/usr.sbin/services_mkdb/services
index 4a5b6863d92d..c5f950831767 100644
--- a/usr.sbin/services_mkdb/services
+++ b/usr.sbin/services_mkdb/services
@@ -893,7 +893,7 @@ biff 512/udp comsat #used by mail system to notify users
# processes on the same machine
login 513/tcp #remote login a la telnet;
# automatic authentication performed
-# based on priviledged port numbers
+# based on privileged port numbers
# and distributed data bases which
# identify "authentication domains"
who 513/udp whod #maintains data bases showing who's
diff --git a/usr.sbin/syslogd/syslogd.c b/usr.sbin/syslogd/syslogd.c
index fe7427130b78..81bbbbe66be8 100644
--- a/usr.sbin/syslogd/syslogd.c
+++ b/usr.sbin/syslogd/syslogd.c
@@ -1830,15 +1830,14 @@ fprintlog_write(struct filed *f, struct iovlist *il, int flags)
case EHOSTUNREACH:
case EHOSTDOWN:
case EADDRNOTAVAIL:
+ case EAGAIN:
+ case ECONNREFUSED:
break;
/* case EBADF: */
/* case EACCES: */
/* case ENOTSOCK: */
/* case EFAULT: */
/* case EMSGSIZE: */
- /* case EAGAIN: */
- /* case ENOBUFS: */
- /* case ECONNREFUSED: */
default:
dprintf("removing entry: errno=%d\n", e);
f->f_type = F_UNUSED;
diff --git a/usr.sbin/unbound/setup/local-unbound-setup.sh b/usr.sbin/unbound/setup/local-unbound-setup.sh
index d52534b46fa3..d57d74952fc7 100755
--- a/usr.sbin/unbound/setup/local-unbound-setup.sh
+++ b/usr.sbin/unbound/setup/local-unbound-setup.sh
@@ -259,7 +259,7 @@ gen_unbound_conf() {
echo " pidfile: ${pidfile}"
echo " auto-trust-anchor-file: ${anchor}"
if [ "${use_tls}" = "yes" ] ; then
- echo " tls-system-cert: yes"
+ echo " tls-cert-bundle: /etc/ssl/cert.pem"
fi
echo ""
if [ -f "${forward_conf}" ] ; then
diff --git a/usr.sbin/watch/watch.8 b/usr.sbin/watch/watch.8
index 7acd79df8710..3cc72267f207 100644
--- a/usr.sbin/watch/watch.8
+++ b/usr.sbin/watch/watch.8
@@ -28,7 +28,7 @@ The
utility writes to standard output.
.Pp
The options are as follows:
-.Bl -tag -width indent
+.Bl -tag -width "-f snpdev"
.It Fl c
Reconnect on close.
If the tty observed by