aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRobert Nagy <rnagy@FreeBSD.org>2024-02-03 16:07:19 +0000
committerRobert Nagy <rnagy@FreeBSD.org>2024-02-03 16:14:38 +0000
commit52eee8b78d38ef94e15a5ce4f36c80468e99cb9a (patch)
tree84135b98b94a454ceac2e1ccf6e724a54fcce08e
parenta9e0d2d682e081b5853ec6c2cba4b4b4effa88c3 (diff)
downloadports-52eee8b78d38ef94e15a5ce4f36c80468e99cb9a.tar.gz
ports-52eee8b78d38ef94e15a5ce4f36c80468e99cb9a.zip
www/ungoogled-chromium: update to 121.0.6167.139
-rw-r--r--www/ungoogled-chromium/Makefile14
-rw-r--r--www/ungoogled-chromium/distinfo14
-rw-r--r--www/ungoogled-chromium/files/patch-BUILD.gn20
-rw-r--r--www/ungoogled-chromium/files/patch-apps_ui_views_app__window__frame__view.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-base_BUILD.gn20
-rw-r--r--www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__alloc.gni4
-rw-r--r--www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_BUILD.gn10
-rw-r--r--www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__base_debug_stack__trace__posix.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__base_threading_platform__thread__posix.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__config.h27
-rw-r--r--www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__constants.h6
-rw-r--r--www/ungoogled-chromium/files/patch-base_compiler__specific.h8
-rw-r--r--www/ungoogled-chromium/files/patch-base_files_file__path__watcher.h4
-rw-r--r--www/ungoogled-chromium/files/patch-base_files_file__path__watcher__unittest.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-base_files_file__util__posix.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-base_logging__unittest.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-base_native__library__unittest.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-base_process_memory__linux.cc10
-rw-r--r--www/ungoogled-chromium/files/patch-base_process_process__handle.h4
-rw-r--r--www/ungoogled-chromium/files/patch-base_rand__util.h4
-rw-r--r--www/ungoogled-chromium/files/patch-base_system_sys__info.h6
-rw-r--r--www/ungoogled-chromium/files/patch-base_test_launcher_test__launcher.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-build_config_compiler_BUILD.gn20
-rw-r--r--www/ungoogled-chromium/files/patch-build_config_linux_atspi2_BUILD.gn25
-rw-r--r--www/ungoogled-chromium/files/patch-cc_BUILD.gn4
-rw-r--r--www/ungoogled-chromium/files/patch-cc_base_features.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_app_app__management__strings.grdp4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_app_chrome__main.cc15
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_app_chrome__main__delegate.cc24
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_app_chromium__strings.grd8
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_app_generated__resources.grd20
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_app_google__chrome__strings.grd6
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_app_theme_chrome__unscaled__resources.grd39
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_app_theme_theme__resources.grd69
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_BUILD.gn4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_about__flags.cc93
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_accessibility_pdf__ocr__controller.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_apps_app__service_publishers_extension__apps.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_browser__features.cc18
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_browser__features.h11
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_browser__process__impl.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_browser__process__impl.h4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_chrome__browser__interface__binders.cc20
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_chrome__browser__main.cc42
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_chrome__content__browser__client.cc34
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_chrome__content__browser__client.h4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_component__updater_registration.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_compose_chrome__compose__client.cc19
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_content__settings_one__time__permission__provider.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_download_chrome__download__manager__delegate.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_download_download__item__model.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_download_download__prefs.cc14
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_enterprise_connectors_analysis_analysis__service__settings.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_extensions_BUILD.gn4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_api__browser__context__keyed__service__factories.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_enterprise__reporting__private_chrome__desktop__report__request__helper.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_enterprise__reporting__private_enterprise__reporting__private__api.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_passwords__private_passwords__private__delegate__impl.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_settings__private_prefs__util.cc20
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_webrtc__logging__private_webrtc__logging__private__api.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_webstore__private_webstore__private__api.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_file__system__access_chrome__file__system__access__permission__context.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_flag__descriptions.cc24
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_flag__descriptions.h22
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_media__galleries_media__file__system__registry.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_media_webrtc_webrtc__event__log__uploader.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_media_webrtc_webrtc__log__uploader.cc20
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_media_webrtc_webrtc__logging__controller.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_media_webrtc_webrtc__logging__controller.h6
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_memory__details.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_metrics_chrome__browser__main__extra__parts__metrics.cc10
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_metrics_chrome__browser__main__extra__parts__metrics.h4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_metrics_chrome__metrics__service__client.cc42
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_net_system__network__context__manager.cc24
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_net_system__network__context__manager.h8
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_password__manager_password__reuse__manager__factory.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_policy_chrome__browser__cloud__management__controller__desktop.cc10
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_policy_configuration__policy__handler__list__factory.cc68
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_prefs_browser__prefs.cc14
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_printing_printer__query.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_profiles_chrome__browser__main__extra__parts__profiles.cc20
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_renderer__preferences__util.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_resources_settings_route.ts4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_safe__browsing_cloud__content__scanning_binary__upload__service.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_safe__browsing_download__protection_file__analyzer.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_search__engine__choice_search__engine__choice__client__side__trial.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_chrome__pages.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_chrome__pages.h4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_startup_startup__browser__creator.cc10
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_tab__helpers.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_ui__features.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_ui__features.h4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_views_dark__mode__manager__linux.h10
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_views_frame_browser__view.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_views_frame_picture__in__picture__browser__frame__view.cc22
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_views_profiles_dice__web__signin__interception__backdrop__layer.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_views_profiles_profile__menu__view__base.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_views_tab__search__bubble__host.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_views_tabs_tab.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_views_user__education_browser__user__education__service.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_web__applications_web__app__dialogs.h4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_about_about__ui.cc (renamed from www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_about__ui.cc)8
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_chrome__web__ui__controller__factory.cc14
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_realbox_realbox__handler.cc19
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_settings_accessibility__main__handler.cc37
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_settings_accessibility__main__handler.h38
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_settings_settings__localized__strings__provider.cc16
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_browser_webauthn_chrome__authenticator__request__delegate.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_common_chrome__features.cc10
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_common_chrome__features.h10
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_common_chrome__paths.cc12
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_common_chrome__paths.h14
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_common_chrome__switches.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_common_pref__names.h20
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_common_url__constants.h4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_common_webui__url__constants.cc12
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_common_webui__url__constants.h10
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_renderer_chrome__render__frame__observer.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_services_printing_print__backend__service__impl.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_test_BUILD.gn4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_updater_configurator.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_updater_util_posix__util.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-chrome_utility_services.cc14
-rw-r--r--www/ungoogled-chromium/files/patch-chromecast_browser_cast__content__browser__client.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-components_autofill_core_browser_data__model_autofill__i18n__api.h16
-rw-r--r--www/ungoogled-chromium/files/patch-components_autofill_core_browser_payments_iban__save__manager.cc14
-rw-r--r--www/ungoogled-chromium/files/patch-components_autofill_core_browser_personal__data__manager.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-components_autofill_core_common_autofill__payments__features.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-components_autofill_core_common_autofill__util.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-components_commerce__strings.grdp4
-rw-r--r--www/ungoogled-chromium/files/patch-components_commerce_core_commerce__feature__list.cc13
-rw-r--r--www/ungoogled-chromium/files/patch-components_components__chromium__strings.grd4
-rw-r--r--www/ungoogled-chromium/files/patch-components_components__google__chrome__strings.grd4
-rw-r--r--www/ungoogled-chromium/files/patch-components_cookie__config_cookie__store__util.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-components_crash_core_app_crashpad__handler__main.cc20
-rw-r--r--www/ungoogled-chromium/files/patch-components_embedder__support_user__agent__utils.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__configurations.cc15
-rw-r--r--www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__constants.cc13
-rw-r--r--www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__constants.h15
-rw-r--r--www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__list.cc17
-rw-r--r--www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__list.h32
-rw-r--r--www/ungoogled-chromium/files/patch-components_gwp__asan_client_gwp__asan.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-components_gwp__asan_client_gwp__asan__features.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-components_management__strings.grdp4
-rw-r--r--www/ungoogled-chromium/files/patch-components_metrics_metrics__log.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-components_neterror_resources_neterror.js4
-rw-r--r--www/ungoogled-chromium/files/patch-components_network__session__configurator_browser_network__session__configurator.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-components_network__session__configurator_browser_network__session__configurator__unittest.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-components_omnibox_browser_omnibox__edit__model.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-components_optimization__guide_core_optimization__guide__util.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-components_optimization__guide_core_tflite__model__executor.h12
-rw-r--r--www/ungoogled-chromium/files/patch-components_paint__preview_browser_paint__preview__client.cc18
-rw-r--r--www/ungoogled-chromium/files/patch-components_paint__preview_browser_paint__preview__client__unittest.cc18
-rw-r--r--www/ungoogled-chromium/files/patch-components_paint__preview_player_player__compositor__delegate.cc18
-rw-r--r--www/ungoogled-chromium/files/patch-components_password__manager_core_browser_features_password__features.cc13
-rw-r--r--www/ungoogled-chromium/files/patch-components_password__manager_core_browser_features_password__features.h11
-rw-r--r--www/ungoogled-chromium/files/patch-components_password__manager_core_browser_password__store_login__database__unittest.cc (renamed from www/ungoogled-chromium/files/patch-components_password__manager_core_browser_login__database__unittest.cc)6
-rw-r--r--www/ungoogled-chromium/files/patch-components_policy_core_browser_policy__pref__mapping__test.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-components_policy_core_common_cloud_cloud__policy__client.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-components_policy_core_common_policy__loader__common.cc10
-rw-r--r--www/ungoogled-chromium/files/patch-components_policy_core_common_policy__paths.cc10
-rw-r--r--www/ungoogled-chromium/files/patch-components_policy_core_common_policy__utils.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-components_qr__code__generator_BUILD.gn32
-rw-r--r--www/ungoogled-chromium/files/patch-components_qr__code__generator_features.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-components_qr__code__generator_features.h14
-rw-r--r--www/ungoogled-chromium/files/patch-components_qr__code__generator_qr__code__generator.cc47
-rw-r--r--www/ungoogled-chromium/files/patch-components_search__engines_template__url__service.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-components_services_paint__preview__compositor_paint__preview__compositor__collection__impl.cc10
-rw-r--r--www/ungoogled-chromium/files/patch-components_services_screen__ai_sandbox_screen__ai__sandbox__hook__linux.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-components_services_screen__ai_screen__ai__library__wrapper.cc29
-rw-r--r--www/ungoogled-chromium/files/patch-components_supervised__user_core_browser_proto__fetcher.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-components_supervised__user_core_browser_supervised__user__service.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-components_supervised__user_core_common_features.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-components_sync_base_features.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-components_user__education_common_product__messaging__controller.cc10
-rw-r--r--www/ungoogled-chromium/files/patch-components_user__education_views_help__bubble__view.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-components_viz_host_host__display__client.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-components_viz_host_host__display__client.h11
-rw-r--r--www/ungoogled-chromium/files/patch-components_viz_service_display__embedder_skia__output__surface__impl.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-components_viz_service_display__embedder_skia__output__surface__impl__on__gpu.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-components_viz_service_display_skia__renderer.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-components_viz_service_frame__sinks_root__compositor__frame__sink__impl.cc20
-rw-r--r--www/ungoogled-chromium/files/patch-components_viz_service_frame__sinks_root__compositor__frame__sink__impl.h11
-rw-r--r--www/ungoogled-chromium/files/patch-components_viz_test_fake__display__client.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-components_viz_test_fake__display__client.h11
-rw-r--r--www/ungoogled-chromium/files/patch-components_viz_test_mock__display__client.h11
-rw-r--r--www/ungoogled-chromium/files/patch-content_app_content__main__runner__impl.cc20
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_BUILD.gn4
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_audio_audio__service.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_browser__main__loop.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_compositor_viz__process__transport__factory.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_generic__sensor_frame__sensor__provider__proxy.cc14
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_gpu_compositor__util.cc26
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_interest__group_header__direct__from__seller__signals.cc10
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_media_media__keys__listener__manager__impl.cc14
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_network__service__instance__impl.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_renderer__host_delegated__frame__host.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__process__host__impl.cc12
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__widget__host__view__aura.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__widget__host__view__aura.h4
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__widget__host__view__event__handler.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_utility__sandbox__delegate.cc14
-rw-r--r--www/ungoogled-chromium/files/patch-content_browser_zygote__host_zygote__host__impl__linux.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-content_child_BUILD.gn4
-rw-r--r--www/ungoogled-chromium/files/patch-content_common_BUILD.gn39
-rw-r--r--www/ungoogled-chromium/files/patch-content_common_features.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-content_common_features.h6
-rw-r--r--www/ungoogled-chromium/files/patch-content_common_gpu__pre__sandbox__hook__bsd.cc (renamed from www/ungoogled-chromium/files/patch-content_gpu_gpu__sandbox__hook__bsd.cc)8
-rw-r--r--www/ungoogled-chromium/files/patch-content_common_gpu__pre__sandbox__hook__bsd.h23
-rw-r--r--www/ungoogled-chromium/files/patch-content_gpu_BUILD.gn20
-rw-r--r--www/ungoogled-chromium/files/patch-content_gpu_gpu__main.cc12
-rw-r--r--www/ungoogled-chromium/files/patch-content_gpu_gpu__sandbox__hook__bsd.h19
-rw-r--r--www/ungoogled-chromium/files/patch-content_public_browser_content__browser__client.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-content_public_common_content__features.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-content_public_common_content__switches.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-content_public_common_content__switches.h4
-rw-r--r--www/ungoogled-chromium/files/patch-content_renderer_render__process__impl.cc27
-rw-r--r--www/ungoogled-chromium/files/patch-content_renderer_render__thread__impl.cc10
-rw-r--r--www/ungoogled-chromium/files/patch-content_renderer_renderer__blink__platform__impl.cc12
-rw-r--r--www/ungoogled-chromium/files/patch-content_renderer_renderer__blink__platform__impl.h4
-rw-r--r--www/ungoogled-chromium/files/patch-content_shell_BUILD.gn4
-rw-r--r--www/ungoogled-chromium/files/patch-content_test_BUILD.gn4
-rw-r--r--www/ungoogled-chromium/files/patch-content_utility_services.cc12
-rw-r--r--www/ungoogled-chromium/files/patch-content_utility_utility__main.cc31
-rw-r--r--www/ungoogled-chromium/files/patch-extensions_common_api_runtime.json4
-rw-r--r--www/ungoogled-chromium/files/patch-gpu_command__buffer_service_dawn__context__provider.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-gpu_command__buffer_service_shared__image_external__vk__image__backing.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-gpu_command__buffer_service_shared__image_external__vk__image__backing__factory.cc13
-rw-r--r--www/ungoogled-chromium/files/patch-gpu_command__buffer_service_shared__image_shared__image__manager.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-gpu_command__buffer_service_webgpu__decoder__impl.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-gpu_config_gpu__control__list.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-gpu_config_gpu__finch__features.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-gpu_config_gpu__info__collector.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-gpu_ipc_service_gpu__init.cc14
-rw-r--r--www/ungoogled-chromium/files/patch-gpu_vulkan_vulkan__function__pointers.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-ipc_ipc__channel.h4
-rw-r--r--www/ungoogled-chromium/files/patch-media_base_media__switches.cc25
-rw-r--r--www/ungoogled-chromium/files/patch-media_base_media__switches.h12
-rw-r--r--www/ungoogled-chromium/files/patch-media_base_video__frame.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-media_capture_video_fake__video__capture__device__factory.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-media_capture_video_linux_v4l2__capture__delegate.cc30
-rw-r--r--www/ungoogled-chromium/files/patch-media_gpu_chromeos_libyuv__image__processor__backend.cc29
-rw-r--r--www/ungoogled-chromium/files/patch-media_gpu_chromeos_mailbox__video__frame__converter.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-media_gpu_chromeos_platform__video__frame__utils.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-media_gpu_chromeos_video__decoder__pipeline.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-media_gpu_vaapi_vaapi__video__decoder.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-media_gpu_vaapi_vaapi__wrapper.cc31
-rw-r--r--www/ungoogled-chromium/files/patch-media_video_gpu__memory__buffer__video__frame__pool.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-media_video_video__encode__accelerator__adapter.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-mojo_public_c_system_thunks.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-mojo_public_tools_bindings_mojom.gni4
-rw-r--r--www/ungoogled-chromium/files/patch-net_BUILD.gn14
-rw-r--r--www/ungoogled-chromium/files/patch-net_base_features.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-net_cert_cert__verify__proc.h4
-rw-r--r--www/ungoogled-chromium/files/patch-net_cert_pki_general__names.h10
-rw-r--r--www/ungoogled-chromium/files/patch-net_filter_zstd__source__stream.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-net_socket_udp__socket__posix.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-net_socket_udp__socket__unittest.cc12
-rw-r--r--www/ungoogled-chromium/files/patch-net_tools_cert__verify__tool_cert__verify__comparision__tool.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-net_tools_cert__verify__tool_cert__verify__tool.cc12
-rw-r--r--www/ungoogled-chromium/files/patch-remoting_host_base_desktop__environment__options.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-remoting_host_client__session.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-remoting_host_host__attributes.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-remoting_host_it2me_it2me__host.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-remoting_host_me2me__desktop__environment.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-remoting_host_remoting__me2me__host.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-remoting_host_setup_start__host__main.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-remoting_host_webauthn_remote__webauthn__caller__security__utils.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-sandbox_policy_features.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-sandbox_policy_features.h4
-rw-r--r--www/ungoogled-chromium/files/patch-services_audio_audio__sandbox__hook__linux.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-services_device_compute__pressure_cpu__probe.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-services_device_geolocation_location__arbitrator.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-services_device_hid_BUILD.gn4
-rw-r--r--www/ungoogled-chromium/files/patch-services_device_usb_BUILD.gn8
-rw-r--r--www/ungoogled-chromium/files/patch-services_network_BUILD.gn4
-rw-r--r--www/ungoogled-chromium/files/patch-services_network_network__context.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-services_network_network__context.h4
-rw-r--r--www/ungoogled-chromium/files/patch-services_network_network__service.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-services_network_network__service.h8
-rw-r--r--www/ungoogled-chromium/files/patch-services_network_public_mojom_BUILD.gn6
-rw-r--r--www/ungoogled-chromium/files/patch-services_on__device__model_on__device__model__service.h22
-rw-r--r--www/ungoogled-chromium/files/patch-services_on__device__model_pre__sandbox__init.cc38
-rw-r--r--www/ungoogled-chromium/files/patch-services_resource__coordinator_memory__instrumentation_queued__request__dispatcher.cc16
-rw-r--r--www/ungoogled-chromium/files/patch-services_tracing_public_cpp_stack__sampling_tracing__sampler__profiler.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-services_video__capture_video__capture__service__impl.cc10
-rw-r--r--www/ungoogled-chromium/files/patch-services_video__capture_video__capture__service__impl.h8
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_abseil-cpp_absl_base_internal_raw__logging.cc16
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_angle_BUILD.gn8
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_angle_src_common_system__utils.cpp11
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_angle_src_common_system__utils__linux.cpp35
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_common_renderer__preferences_renderer__preferences__mojom__traits.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_public_common_renderer__preferences_renderer__preferences.h4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_public_common_renderer__preferences_renderer__preferences__mojom__traits.h4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_public_platform_platform.h4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_public_platform_web__vector.h4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_controller_blink__initializer.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_exported_web__view__impl.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_frame_web__frame__test.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_html_canvas_canvas__async__blob__creator.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_inspector_inspector__memory__agent.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_layout_layout__view.cc8
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_layout_ng_physical__fragment__rare__data.h10
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_scroll_scrollbar__theme__aura.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_xml_xslt__processor.h14
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_xml_xslt__processor__libxslt.cc14
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_ml_webnn_features.gni16
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_ml_webnn_ml__graph__xnnpack.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_webgl_webgl__rendering__context__base.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_webgpu_gpu__queue.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_BUILD.gn4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_fonts_font__cache.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_fonts_skia_font__cache__skia.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_graphics_video__frame__submitter.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_peerconnection_rtc__video__encoder__factory.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_runtime__enabled__features.json514
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_crashpad_crashpad_util_misc_uuid.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_distributed__point__functions_BUILD.gn12
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_ffmpeg_configure4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_ffmpeg_libavutil_x86_x86inc.asm4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_leveldatabase_env__chromium.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_libc++_src_src_chrono.cpp11
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_libc++_src_src_filesystem_filesystem__clock.cpp11
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_libc++abi_src_src_cxa__guard__impl.h24
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_libvpx_source_libvpx_vpx__ports_aarch64__cpudetect.c4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_perfetto_include_perfetto_base_time.h8
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_perfetto_src_trace__processor_db_storage_numeric__storage.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_perfetto_src_tracing_core_tracing__service__impl.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_speech-dispatcher_libspeechd__version.h4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_vulkan-deps_vulkan-loader_src_loader_vk__loader__platform.h4
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_vulkan__memory__allocator_include_vk__mem__alloc.h39390
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_wayland_include_config.h23
-rw-r--r--www/ungoogled-chromium/files/patch-third__party_webrtc_modules_audio__device_BUILD.gn4
-rw-r--r--www/ungoogled-chromium/files/patch-tools_gn_build_gen.py4
-rw-r--r--www/ungoogled-chromium/files/patch-tools_json__schema__compiler_feature__compiler.py4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_base_resource_resource__bundle.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_base_ui__base__features.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-ui_base_webui_web__ui__util.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-ui_base_x_x11__cursor__loader.cc12
-rw-r--r--www/ungoogled-chromium/files/patch-ui_base_x_x11__display__manager.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_color_color__id.h4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_compositor_compositor.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-ui_compositor_compositor.h20
-rw-r--r--www/ungoogled-chromium/files/patch-ui_compositor_compositor__observer.h20
-rw-r--r--www/ungoogled-chromium/files/patch-ui_display_screen.h4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_events_devices_x11_device__data__manager__x11.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_gfx_BUILD.gn4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_gfx_font__fallback__linux.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_gfx_font__render__params.h10
-rw-r--r--www/ungoogled-chromium/files/patch-ui_gfx_platform__font__skia.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-ui_gfx_x_generated__protos_dri3.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_gfx_x_generated__protos_shm.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_gl_BUILD.gn10
-rw-r--r--www/ungoogled-chromium/files/patch-ui_gl_gl__context.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_gl_gl__switches.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_native__theme_native__theme__features.cc20
-rw-r--r--www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_common_wayland__util.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_host_wayland__toplevel__window.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_host_wayland__toplevel__window.h4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_host_wayland__window.cc10
-rw-r--r--www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_host_xdg__toplevel__wrapper__impl.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_host_zwp__text__input__wrapper__v1.cc21
-rw-r--r--www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_ozone__platform__wayland.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_ozone_public_platform__screen.h11
-rw-r--r--www/ungoogled-chromium/files/patch-ui_qt_BUILD.gn11
-rw-r--r--www/ungoogled-chromium/files/patch-ui_views_controls_textfield_textfield.cc14
-rw-r--r--www/ungoogled-chromium/files/patch-ui_views_controls_textfield_textfield.h6
-rw-r--r--www/ungoogled-chromium/files/patch-ui_views_views__delegate.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_views_views__delegate.h4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_views_widget_desktop__aura_desktop__window__tree__host__platform.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_views_widget_widget.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-ui_views_widget_widget.h4
-rw-r--r--www/ungoogled-chromium/files/patch-v8_BUILD.gn10
-rw-r--r--www/ungoogled-chromium/files/patch-v8_src_api_api.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-v8_src_baseline_x64_baseline-assembler-x64-inl.h13
-rw-r--r--www/ungoogled-chromium/files/patch-v8_src_builtins_x64_builtins-x64.cc46
-rw-r--r--www/ungoogled-chromium/files/patch-v8_src_codegen_x64_assembler-x64.cc42
-rw-r--r--www/ungoogled-chromium/files/patch-v8_src_codegen_x64_assembler-x64.h21
-rw-r--r--www/ungoogled-chromium/files/patch-v8_src_codegen_x64_macro-assembler-x64.cc20
-rw-r--r--www/ungoogled-chromium/files/patch-v8_src_codegen_x64_macro-assembler-x64.h20
-rw-r--r--www/ungoogled-chromium/files/patch-v8_src_compiler_backend_x64_code-generator-x64.cc16
-rw-r--r--www/ungoogled-chromium/files/patch-v8_src_deoptimizer_x64_deoptimizer-x64.cc11
-rw-r--r--www/ungoogled-chromium/files/patch-v8_src_execution_isolate.cc6
-rw-r--r--www/ungoogled-chromium/files/patch-v8_src_flags_flags.cc4
-rw-r--r--www/ungoogled-chromium/files/patch-v8_src_maglev_x64_maglev-assembler-x64-inl.h14
-rw-r--r--www/ungoogled-chromium/files/patch-v8_src_maglev_x64_maglev-assembler-x64.cc16
-rw-r--r--www/ungoogled-chromium/files/patch-v8_src_regexp_x64_regexp-macro-assembler-x64.cc22
-rw-r--r--www/ungoogled-chromium/files/patch-v8_src_regexp_x64_regexp-macro-assembler-x64.h12
-rw-r--r--www/ungoogled-chromium/files/patch-v8_src_wasm_jump-table-assembler.cc51
-rw-r--r--www/ungoogled-chromium/files/patch-v8_src_wasm_jump-table-assembler.h16
-rw-r--r--www/ungoogled-chromium/pkg-plist1
391 files changed, 2223 insertions, 41342 deletions
diff --git a/www/ungoogled-chromium/Makefile b/www/ungoogled-chromium/Makefile
index f31d9b89b70a..e71b90602b0a 100644
--- a/www/ungoogled-chromium/Makefile
+++ b/www/ungoogled-chromium/Makefile
@@ -1,5 +1,5 @@
PORTNAME= ungoogled-chromium
-PORTVERSION= 120.0.6099.224
+PORTVERSION= 121.0.6167.139
UGVERSION= ${DISTVERSION}-1
CATEGORIES= www wayland
MASTER_SITES= https://commondatastorage.googleapis.com/chromium-browser-official/ \
@@ -37,7 +37,6 @@ BUILD_DEPENDS= bash:shells/bash \
LIB_DEPENDS= libatk-bridge-2.0.so:accessibility/at-spi2-core \
libatspi.so:accessibility/at-spi2-core \
libspeechd.so:accessibility/speech-dispatcher \
- libsnappy.so:archivers/snappy \
libFLAC.so:audio/flac \
libopus.so:audio/opus \
libspeex.so:audio/speex \
@@ -108,7 +107,8 @@ GN_ARGS+= enable_widevine=true \
toolkit_views=true \
use_allocator_shim=false \
use_aura=true \
- use_custom_libcxx=false \
+ use_custom_libcxx=true \
+ use_custom_libunwind=true \
use_lld=true \
use_partition_alloc=true \
use_partition_alloc_as_malloc=false \
@@ -128,6 +128,7 @@ GN_ARGS+= build_with_tflite_lib=false \
clang_use_chrome_plugins=false \
disable_fieldtrial_testing_config=true \
enable_backup_ref_ptr_support=false \
+ enable_log_error_not_reached=true \
enable_hangout_services_extension=false \
enable_mdns=false \
enable_nacl=false \
@@ -242,8 +243,8 @@ TEST_ALL_TARGET= ${TEST_TARGETS}
IGNORE= you have selected HEIMDAL_BASE but do not have Heimdal installed in base
.endif
-.if ${COMPILER_VERSION} < 160
-LLVM_DEFAULT= 16
+.if ${COMPILER_VERSION} < 170
+LLVM_DEFAULT= 17
BUILD_DEPENDS+= clang${LLVM_DEFAULT}:devel/llvm${LLVM_DEFAULT}
BINARY_ALIAS+= cpp=${LOCALBASE}/bin/clang-cpp${LLVM_DEFAULT} \
cc=${LOCALBASE}/bin/clang${LLVM_DEFAULT} \
@@ -311,7 +312,7 @@ pre-configure:
cd ${WRKSRC} && ${SETENV} ${CONFIGURE_ENV} ${PYTHON_CMD} \
./build/linux/unbundle/replace_gn_files.py --system-libraries \
dav1d flac fontconfig freetype harfbuzz-ng icu libdrm libevent libpng \
- libusb libwebp libxml libxslt openh264 opus snappy || ${FALSE}
+ libusb libwebp libxml libxslt openh264 opus || ${FALSE}
# Chromium uses an unreleased version of FFmpeg, so configure it
cd ${WRKSRC}/third_party/ffmpeg && \
${PYTHON_CMD} chromium/scripts/build_ffmpeg.py freebsd ${FFMPEG_TARGET} \
@@ -384,6 +385,7 @@ do-install:
# SwiftShader
.if ${ARCH} != aarch64
${INSTALL_LIB} ${WRKSRC}/out/${BUILDTYPE}/libvk_swiftshader.so ${STAGEDIR}${DATADIR}
+ ${INSTALL_DATA} ${WRKSRC}/out/${BUILDTYPE}/vk_swiftshader_icd.json ${STAGEDIR}${DATADIR}
.endif
post-install-DEBUG-on:
diff --git a/www/ungoogled-chromium/distinfo b/www/ungoogled-chromium/distinfo
index abeeb60bfdf9..67fae4ef012a 100644
--- a/www/ungoogled-chromium/distinfo
+++ b/www/ungoogled-chromium/distinfo
@@ -1,9 +1,9 @@
-TIMESTAMP = 1705683725
-SHA256 (chromium-120.0.6099.224.tar.xz) = 850a85c8d8a01041a07dfaaea8289fa5f8294b4e375e6b77997b61434e0a2f1a
-SIZE (chromium-120.0.6099.224.tar.xz) = 3299542904
-SHA256 (ungoogled-chromium-120.0.6099.224-1.tar.gz) = b96fd3d5d64ffd5efa3bc52966adfd7fd1dd3e85ebf3517924054b577ac03750
-SIZE (ungoogled-chromium-120.0.6099.224-1.tar.gz) = 665887
-SHA256 (chromium-120.0.6099.224-testdata.tar.xz) = db8ef8f50dd759f548fd269202f9d084e3d1bb92b587bd5e0023707ac29518ea
-SIZE (chromium-120.0.6099.224-testdata.tar.xz) = 268575524
+TIMESTAMP = 1706973655
+SHA256 (chromium-121.0.6167.139.tar.xz) = e12cc967bef7a79630828792f02d95297a06eb905c98e4c6e065fd5e74d6f9ff
+SIZE (chromium-121.0.6167.139.tar.xz) = 3340424948
+SHA256 (ungoogled-chromium-121.0.6167.139-1.tar.gz) = a4f389f9159effbbe47a07ade0b8b9b86ebb64a56230ea7a4b43664cd499f829
+SIZE (ungoogled-chromium-121.0.6167.139-1.tar.gz) = 666180
+SHA256 (chromium-121.0.6167.139-testdata.tar.xz) = ea1431ca694fd314ec542642f1ef5112df737792106dd991edf46eb012732a15
+SIZE (chromium-121.0.6167.139-testdata.tar.xz) = 272290844
SHA256 (test_fonts-336e775eec536b2d785cc80eff6ac39051931286.tar.gz) = a2ca2962daf482a8f943163541e1c73ba4b2694fabcd2510981f2db4eda493c8
SIZE (test_fonts-336e775eec536b2d785cc80eff6ac39051931286.tar.gz) = 32624734
diff --git a/www/ungoogled-chromium/files/patch-BUILD.gn b/www/ungoogled-chromium/files/patch-BUILD.gn
index cb8b72117f53..a0b35734ba84 100644
--- a/www/ungoogled-chromium/files/patch-BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-BUILD.gn
@@ -1,6 +1,6 @@
---- BUILD.gn.orig 2023-12-23 12:33:28 UTC
+--- BUILD.gn.orig 2024-02-03 15:42:55 UTC
+++ BUILD.gn
-@@ -61,7 +61,7 @@ declare_args() {
+@@ -62,7 +62,7 @@ declare_args() {
root_extra_deps = []
}
@@ -9,7 +9,7 @@
# An official (maximally optimized!) component (optimized for build times)
# build doesn't make sense and usually doesn't work.
assert(!is_component_build)
-@@ -95,7 +95,6 @@ group("gn_all") {
+@@ -94,7 +94,6 @@ group("gn_all") {
"//codelabs",
"//components:components_unittests",
"//components/gwp_asan:gwp_asan_unittests",
@@ -17,7 +17,7 @@
"//net:net_unittests",
"//sandbox:sandbox_unittests",
"//services:services_unittests",
-@@ -415,7 +414,7 @@ group("gn_all") {
+@@ -417,7 +416,7 @@ group("gn_all") {
}
}
@@ -26,7 +26,7 @@
deps += [
"//third_party/breakpad:breakpad_unittests",
"//third_party/breakpad:core-2-minidump",
-@@ -607,6 +606,15 @@ group("gn_all") {
+@@ -609,6 +608,15 @@ group("gn_all") {
}
}
@@ -42,7 +42,7 @@
if (is_mac) {
deps += [
"//third_party/breakpad:dump_syms",
-@@ -656,7 +664,7 @@ group("gn_all") {
+@@ -655,7 +663,7 @@ group("gn_all") {
host_os == "win") {
deps += [ "//chrome/test/mini_installer:mini_installer_tests" ]
}
@@ -51,7 +51,7 @@
deps += [ "//third_party/breakpad:symupload($host_toolchain)" ]
}
-@@ -1077,7 +1085,7 @@ if (use_blink && !is_cronet_build) {
+@@ -1076,7 +1084,7 @@ if (use_blink && !is_cronet_build) {
]
}
@@ -60,7 +60,7 @@
script_test("chrome_wpt_tests") {
script = "//third_party/blink/tools/run_wpt_tests.py"
args = [
-@@ -1150,7 +1158,7 @@ if (use_blink && !is_cronet_build) {
+@@ -1166,7 +1174,7 @@ if (use_blink && !is_cronet_build) {
data_deps += [ "//content/web_test:web_test_common_mojom_js_data_deps" ]
}
@@ -69,7 +69,7 @@
data_deps +=
[ "//third_party/breakpad:minidump_stackwalk($host_toolchain)" ]
}
-@@ -1159,7 +1167,7 @@ if (use_blink && !is_cronet_build) {
+@@ -1175,7 +1183,7 @@ if (use_blink && !is_cronet_build) {
data_deps += [ "//third_party/breakpad:dump_syms($host_toolchain)" ]
}
@@ -78,7 +78,7 @@
data_deps += [ "//third_party/breakpad:dump_syms($host_toolchain)" ]
}
-@@ -1624,7 +1632,7 @@ group("chromium_builder_perf") {
+@@ -1616,7 +1624,7 @@ group("chromium_builder_perf") {
data_deps += [ "//chrome/test:performance_browser_tests" ]
}
diff --git a/www/ungoogled-chromium/files/patch-apps_ui_views_app__window__frame__view.cc b/www/ungoogled-chromium/files/patch-apps_ui_views_app__window__frame__view.cc
index 91ad45d16a5a..9060c0d436ee 100644
--- a/www/ungoogled-chromium/files/patch-apps_ui_views_app__window__frame__view.cc
+++ b/www/ungoogled-chromium/files/patch-apps_ui_views_app__window__frame__view.cc
@@ -1,6 +1,6 @@
---- apps/ui/views/app_window_frame_view.cc.orig 2023-12-23 12:33:28 UTC
+--- apps/ui/views/app_window_frame_view.cc.orig 2024-02-03 15:42:55 UTC
+++ apps/ui/views/app_window_frame_view.cc
-@@ -149,7 +149,7 @@ gfx::Rect AppWindowFrameView::GetWindowBoundsForClient
+@@ -148,7 +148,7 @@ gfx::Rect AppWindowFrameView::GetWindowBoundsForClient
gfx::Rect window_bounds = client_bounds;
// TODO(crbug.com/1052397): Revisit once build flag switch of lacros-chrome is
// complete.
diff --git a/www/ungoogled-chromium/files/patch-base_BUILD.gn b/www/ungoogled-chromium/files/patch-base_BUILD.gn
index a9c528e7a0b8..16ea2291458f 100644
--- a/www/ungoogled-chromium/files/patch-base_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-base_BUILD.gn
@@ -1,4 +1,4 @@
---- base/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+--- base/BUILD.gn.orig 2024-02-03 15:42:55 UTC
+++ base/BUILD.gn
@@ -186,7 +186,7 @@ buildflag_header("ios_cronet_buildflags") {
flags = [ "CRONET_BUILD=$is_cronet_build" ]
@@ -9,7 +9,7 @@
buildflag_header("message_pump_buildflags") {
header = "message_pump_buildflags.h"
header_dir = "base/message_loop"
-@@ -1105,11 +1105,23 @@ component("base") {
+@@ -1098,11 +1098,23 @@ component("base") {
# Needed for <atomic> if using newer C++ library than sysroot, except if
# building inside the cros_sdk environment - use host_toolchain as a
# more robust check for this.
@@ -34,7 +34,7 @@
if (use_allocator_shim) {
if (is_apple) {
sources += [ "allocator/early_zone_registration_apple.h" ]
-@@ -1129,7 +1141,7 @@ component("base") {
+@@ -1122,7 +1134,7 @@ component("base") {
# Allow more direct string conversions on platforms with native utf8
# strings
@@ -43,7 +43,7 @@
defines += [ "SYSTEM_NATIVE_UTF8" ]
}
-@@ -2088,6 +2100,22 @@ component("base") {
+@@ -2077,6 +2089,22 @@ component("base") {
]
}
@@ -66,7 +66,7 @@
# iOS
if (is_ios) {
sources += [
-@@ -2220,6 +2248,29 @@ component("base") {
+@@ -2207,6 +2235,29 @@ component("base") {
}
}
@@ -96,7 +96,7 @@
if (use_blink) {
sources += [
"files/file_path_watcher.cc",
-@@ -2230,7 +2281,7 @@ component("base") {
+@@ -2217,7 +2268,7 @@ component("base") {
}
if (dep_libevent) {
@@ -105,7 +105,7 @@
}
if (use_libevent) {
-@@ -3619,7 +3670,7 @@ test("base_unittests") {
+@@ -3615,7 +3666,7 @@ test("base_unittests") {
]
}
@@ -114,7 +114,7 @@
sources += [
"debug/proc_maps_linux_unittest.cc",
"files/scoped_file_linux_unittest.cc",
-@@ -3640,7 +3691,7 @@ test("base_unittests") {
+@@ -3637,7 +3688,7 @@ test("base_unittests") {
"posix/file_descriptor_shuffle_unittest.cc",
"posix/unix_domain_socket_unittest.cc",
]
@@ -123,7 +123,7 @@
sources += [
"profiler/stack_base_address_posix_unittest.cc",
"profiler/stack_copier_signal_unittest.cc",
-@@ -3651,7 +3702,7 @@ test("base_unittests") {
+@@ -3648,7 +3699,7 @@ test("base_unittests") {
# Allow more direct string conversions on platforms with native utf8
# strings
@@ -132,7 +132,7 @@
defines += [ "SYSTEM_NATIVE_UTF8" ]
}
-@@ -3913,7 +3964,7 @@ test("base_unittests") {
+@@ -3909,7 +3960,7 @@ test("base_unittests") {
}
}
diff --git a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__alloc.gni b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__alloc.gni
index 042ffbd2d64c..dcc48c6c3a9b 100644
--- a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__alloc.gni
+++ b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_partition__alloc.gni
@@ -1,6 +1,6 @@
---- base/allocator/partition_allocator/partition_alloc.gni.orig 2023-11-04 07:08:51 UTC
+--- base/allocator/partition_allocator/partition_alloc.gni.orig 2024-02-03 15:42:55 UTC
+++ base/allocator/partition_allocator/partition_alloc.gni
-@@ -319,7 +319,7 @@ declare_args() {
+@@ -333,7 +333,7 @@ declare_args() {
# pkeys support is explicitly disabled in all Cronet builds, as some test
# dependencies that use partition_allocator are compiled in AOSP against a
# version of glibc that does not include pkeys syscall numbers.
diff --git a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_BUILD.gn b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_BUILD.gn
index 1ea41bbd5f5f..05b7ea1ba80d 100644
--- a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_BUILD.gn
@@ -1,11 +1,11 @@
---- base/allocator/partition_allocator/src/partition_alloc/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+--- base/allocator/partition_allocator/src/partition_alloc/BUILD.gn.orig 2024-02-03 15:42:55 UTC
+++ base/allocator/partition_allocator/src/partition_alloc/BUILD.gn
-@@ -582,7 +582,7 @@ source_set("allocator_shim") {
- ]
- configs += [ ":mac_no_default_new_delete_symbols" ]
+@@ -855,7 +855,7 @@ if (is_clang_or_gcc) {
+ configs -= [ "//build/config/compiler:enable_arc" ]
+ }
}
- if (is_chromeos || is_linux) {
+ if ((is_chromeos || is_linux) && !is_bsd) {
- sources += [
+ shim_headers += [
"shim/allocator_shim_override_cpp_symbols.h",
"shim/allocator_shim_override_glibc_weak_symbols.h",
diff --git a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__base_debug_stack__trace__posix.cc b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__base_debug_stack__trace__posix.cc
index 7a76584417bf..1691ac5222bf 100644
--- a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__base_debug_stack__trace__posix.cc
+++ b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__base_debug_stack__trace__posix.cc
@@ -1,4 +1,4 @@
---- base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/stack_trace_posix.cc.orig 2023-12-23 12:33:28 UTC
+--- base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/stack_trace_posix.cc.orig 2024-02-03 15:42:55 UTC
+++ base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/debug/stack_trace_posix.cc
@@ -12,11 +12,11 @@
#include <string.h>
@@ -11,10 +11,10 @@
-#if BUILDFLAG(IS_APPLE)
+#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_BSD)
- #define HAVE_DLADDR
#include <dlfcn.h>
#endif
-@@ -25,7 +25,7 @@ namespace partition_alloc::internal::base::debug {
+
+@@ -24,7 +24,7 @@ namespace partition_alloc::internal::base::debug {
namespace {
@@ -23,7 +23,7 @@
constexpr size_t kBufferSize = 4096u;
-@@ -359,7 +359,7 @@ void PrintStackTraceInternal(const void** trace, size_
+@@ -358,7 +358,7 @@ void PrintStackTraceInternal(const void** trace, size_
}
#endif // !BUILDFLAG(IS_APPLE)
diff --git a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__base_threading_platform__thread__posix.cc b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__base_threading_platform__thread__posix.cc
index 4dc47dee29c3..04d72ef5dc44 100644
--- a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__base_threading_platform__thread__posix.cc
+++ b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__base_threading_platform__thread__posix.cc
@@ -1,8 +1,8 @@
---- base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_posix.cc.orig 2023-12-23 12:33:28 UTC
+--- base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_posix.cc.orig 2024-02-03 15:42:55 UTC
+++ base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_posix.cc
@@ -17,7 +17,7 @@
- #include "base/allocator/partition_allocator/src/partition_alloc/partition_alloc_base/threading/platform_thread_internal_posix.h"
- #include "build/build_config.h"
+ #include "partition_alloc/partition_alloc_base/logging.h"
+ #include "partition_alloc/partition_alloc_base/threading/platform_thread_internal_posix.h"
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
diff --git a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__config.h b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__config.h
index 85a99bacad9e..2c7dcf90d524 100644
--- a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__config.h
+++ b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__config.h
@@ -1,15 +1,6 @@
---- base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h.orig 2023-12-23 12:33:28 UTC
+--- base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h.orig 2024-02-03 15:42:55 UTC
+++ base/allocator/partition_allocator/src/partition_alloc/partition_alloc_config.h
-@@ -94,7 +94,7 @@ static_assert(sizeof(void*) != 8, "");
- // POSIX is not only UNIX, e.g. macOS and other OSes. We do use Linux-specific
- // features such as futex(2).
- #define PA_CONFIG_HAS_LINUX_KERNEL() \
-- (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID))
-+ (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_BSD))
-
- // On some platforms, we implement locking by spinning in userspace, then going
- // into the kernel only if there is contention. This requires platform support,
-@@ -241,7 +241,7 @@ constexpr bool kUseLazyCommit = false;
+@@ -255,7 +255,7 @@ constexpr bool kUseLazyCommit = false;
// On these platforms, lock all the partitions before fork(), and unlock after.
// This may be required on more platforms in the future.
#define PA_CONFIG_HAS_ATFORK_HANDLER() \
@@ -18,12 +9,12 @@
// PartitionAlloc uses PartitionRootEnumerator to acquire all
// PartitionRoots at BeforeFork and to release at AfterFork.
-@@ -288,7 +288,7 @@ constexpr bool kUseLazyCommit = false;
+@@ -301,7 +301,7 @@ constexpr bool kUseLazyCommit = false;
+ //
// Also enabled on ARM64 macOS, as the 16kiB pages on this platform lead to
// larger slot spans.
- #define PA_CONFIG_PREFER_SMALLER_SLOT_SPANS() \
-- (BUILDFLAG(IS_LINUX) || (BUILDFLAG(IS_MAC) && defined(ARCH_CPU_ARM64)))
-+ (BUILDFLAG(IS_LINUX) || (BUILDFLAG(IS_MAC) && defined(ARCH_CPU_ARM64)) || BUILDFLAG(IS_BSD))
-
- // Enable shadow metadata.
- //
+-#if BUILDFLAG(IS_LINUX) || (BUILDFLAG(IS_MAC) && defined(ARCH_CPU_ARM64))
++#if BUILDFLAG(IS_LINUX) || (BUILDFLAG(IS_MAC) && defined(ARCH_CPU_ARM64)) || BUILDFLAG(IS_BSD)
+ #define PA_CONFIG_PREFER_SMALLER_SLOT_SPANS() 1
+ #else
+ #define PA_CONFIG_PREFER_SMALLER_SLOT_SPANS() 0
diff --git a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__constants.h b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__constants.h
index b2aeb5832373..69d14e15a403 100644
--- a/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__constants.h
+++ b/www/ungoogled-chromium/files/patch-base_allocator_partition__allocator_src_partition__alloc_partition__alloc__constants.h
@@ -1,6 +1,6 @@
---- base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h.orig 2023-12-23 12:33:28 UTC
+--- base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h.orig 2024-02-03 15:42:55 UTC
+++ base/allocator/partition_allocator/src/partition_alloc/partition_alloc_constants.h
-@@ -107,7 +107,7 @@ PartitionPageShift() {
+@@ -111,7 +111,7 @@ PartitionPageShift() {
return 18; // 256 KiB
}
#elif (BUILDFLAG(IS_APPLE) && defined(ARCH_CPU_64_BITS)) || \
@@ -9,7 +9,7 @@
PA_ALWAYS_INLINE PAGE_ALLOCATOR_CONSTANTS_DECLARE_CONSTEXPR size_t
PartitionPageShift() {
return PageAllocationGranularityShift() + 2;
-@@ -309,7 +309,8 @@ constexpr size_t kNumPools = kMaxPoolHandle - 1;
+@@ -313,7 +313,8 @@ constexpr size_t kNumPools = kMaxPoolHandle - 1;
// 8GB for each of the glued pools).
#if BUILDFLAG(HAS_64_BIT_POINTERS)
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_IOS) || \
diff --git a/www/ungoogled-chromium/files/patch-base_compiler__specific.h b/www/ungoogled-chromium/files/patch-base_compiler__specific.h
index 97e912d487ba..993dd32e52f5 100644
--- a/www/ungoogled-chromium/files/patch-base_compiler__specific.h
+++ b/www/ungoogled-chromium/files/patch-base_compiler__specific.h
@@ -1,4 +1,4 @@
---- base/compiler_specific.h.orig 2023-05-05 12:12:41 UTC
+--- base/compiler_specific.h.orig 2024-02-03 15:42:55 UTC
+++ base/compiler_specific.h
@@ -41,9 +41,9 @@
// Annotate a function indicating it should not be inlined.
@@ -12,8 +12,8 @@
#define NOINLINE __attribute__((noinline))
#elif defined(COMPILER_MSVC)
#define NOINLINE __declspec(noinline)
-@@ -51,9 +51,9 @@
- #define NOINLINE
+@@ -60,9 +60,9 @@
+ #define NOOPT
#endif
-#if defined(__clang__) && defined(NDEBUG) && HAS_ATTRIBUTE(always_inline)
@@ -24,7 +24,7 @@
#define ALWAYS_INLINE inline __attribute__((__always_inline__))
#elif defined(COMPILER_MSVC) && defined(NDEBUG)
#define ALWAYS_INLINE __forceinline
-@@ -69,7 +69,7 @@
+@@ -78,7 +78,7 @@
// prevent code folding, see NO_CODE_FOLDING() in base/debug/alias.h.
// Use like:
// NOT_TAIL_CALLED void FooBar();
diff --git a/www/ungoogled-chromium/files/patch-base_files_file__path__watcher.h b/www/ungoogled-chromium/files/patch-base_files_file__path__watcher.h
index 39142906151a..b280ba971e02 100644
--- a/www/ungoogled-chromium/files/patch-base_files_file__path__watcher.h
+++ b/www/ungoogled-chromium/files/patch-base_files_file__path__watcher.h
@@ -1,6 +1,6 @@
---- base/files/file_path_watcher.h.orig 2023-02-11 09:11:04 UTC
+--- base/files/file_path_watcher.h.orig 2024-02-03 15:42:55 UTC
+++ base/files/file_path_watcher.h
-@@ -59,7 +59,7 @@ class BASE_EXPORT FilePathWatcher {
+@@ -105,7 +105,7 @@ class BASE_EXPORT FilePathWatcher {
Type type = Type::kNonRecursive;
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) || \
diff --git a/www/ungoogled-chromium/files/patch-base_files_file__path__watcher__unittest.cc b/www/ungoogled-chromium/files/patch-base_files_file__path__watcher__unittest.cc
index ae01847cfb6b..b597267cc7f9 100644
--- a/www/ungoogled-chromium/files/patch-base_files_file__path__watcher__unittest.cc
+++ b/www/ungoogled-chromium/files/patch-base_files_file__path__watcher__unittest.cc
@@ -1,6 +1,6 @@
---- base/files/file_path_watcher_unittest.cc.orig 2023-12-23 12:33:28 UTC
+--- base/files/file_path_watcher_unittest.cc.orig 2024-02-03 15:42:55 UTC
+++ base/files/file_path_watcher_unittest.cc
-@@ -703,7 +703,7 @@ TEST_F(FilePathWatcherTest, WatchDirectory) {
+@@ -836,7 +836,7 @@ TEST_F(FilePathWatcherTest, WatchDirectory) {
}
delegate.RunUntilEventsMatch(event_expecter);
@@ -9,7 +9,7 @@
ASSERT_TRUE(WriteFile(file1, "content v2"));
// Mac implementation does not detect files modified in a directory.
// TODO(https://crbug.com/1432064): Expect that no events are fired on Mac.
-@@ -1639,7 +1639,7 @@ namespace {
+@@ -1772,7 +1772,7 @@ namespace {
enum Permission { Read, Write, Execute };
@@ -18,7 +18,7 @@
bool ChangeFilePermissions(const FilePath& path, Permission perm, bool allow) {
struct stat stat_buf;
-@@ -1673,7 +1673,7 @@ bool ChangeFilePermissions(const FilePath& path, Permi
+@@ -1806,7 +1806,7 @@ bool ChangeFilePermissions(const FilePath& path, Permi
} // namespace
diff --git a/www/ungoogled-chromium/files/patch-base_files_file__util__posix.cc b/www/ungoogled-chromium/files/patch-base_files_file__util__posix.cc
index 66d45ef47f19..55558c3efc75 100644
--- a/www/ungoogled-chromium/files/patch-base_files_file__util__posix.cc
+++ b/www/ungoogled-chromium/files/patch-base_files_file__util__posix.cc
@@ -1,6 +1,6 @@
---- base/files/file_util_posix.cc.orig 2023-11-04 07:08:51 UTC
+--- base/files/file_util_posix.cc.orig 2024-02-03 15:42:55 UTC
+++ base/files/file_util_posix.cc
-@@ -756,33 +756,34 @@ bool CreateDirectoryAndGetError(const FilePath& full_p
+@@ -772,33 +772,34 @@ bool CreateDirectoryAndGetError(const FilePath& full_p
File::Error* error) {
ScopedBlockingCall scoped_blocking_call(
FROM_HERE, BlockingType::MAY_BLOCK); // For call to mkdir().
diff --git a/www/ungoogled-chromium/files/patch-base_logging__unittest.cc b/www/ungoogled-chromium/files/patch-base_logging__unittest.cc
index 5264a5d646fe..ca8f3e9ab943 100644
--- a/www/ungoogled-chromium/files/patch-base_logging__unittest.cc
+++ b/www/ungoogled-chromium/files/patch-base_logging__unittest.cc
@@ -1,4 +1,4 @@
---- base/logging_unittest.cc.orig 2023-02-11 09:11:04 UTC
+--- base/logging_unittest.cc.orig 2024-02-03 15:42:55 UTC
+++ base/logging_unittest.cc
@@ -32,7 +32,7 @@
#include "base/posix/eintr_wrapper.h"
@@ -9,7 +9,7 @@
#include <ucontext.h>
#endif
-@@ -575,14 +575,18 @@ void CheckCrashTestSighandler(int, siginfo_t* info, vo
+@@ -570,14 +570,18 @@ void CheckCrashTestSighandler(int, siginfo_t* info, vo
// need the arch-specific boilerplate below, which is inspired by breakpad.
// At the same time, on OSX, ucontext.h is deprecated but si_addr works fine.
uintptr_t crash_addr = 0;
diff --git a/www/ungoogled-chromium/files/patch-base_native__library__unittest.cc b/www/ungoogled-chromium/files/patch-base_native__library__unittest.cc
index 366ca9740016..8d6f7841fee7 100644
--- a/www/ungoogled-chromium/files/patch-base_native__library__unittest.cc
+++ b/www/ungoogled-chromium/files/patch-base_native__library__unittest.cc
@@ -1,6 +1,6 @@
---- base/native_library_unittest.cc.orig 2022-10-29 17:50:56 UTC
+--- base/native_library_unittest.cc.orig 2024-02-03 15:42:55 UTC
+++ base/native_library_unittest.cc
-@@ -134,7 +134,7 @@ TEST(NativeLibraryTest, LoadLibrary) {
+@@ -135,7 +135,7 @@ TEST(NativeLibraryTest, LoadLibrary) {
// versions with respect to symbol resolution scope.
// TSan and MSan error out on RTLD_DEEPBIND, https://crbug.com/705255
#if !BUILDFLAG(IS_ANDROID) && !defined(THREAD_SANITIZER) && \
diff --git a/www/ungoogled-chromium/files/patch-base_process_memory__linux.cc b/www/ungoogled-chromium/files/patch-base_process_memory__linux.cc
index 2df2a75c31ae..e2285cb45b47 100644
--- a/www/ungoogled-chromium/files/patch-base_process_memory__linux.cc
+++ b/www/ungoogled-chromium/files/patch-base_process_memory__linux.cc
@@ -1,6 +1,6 @@
---- base/process/memory_linux.cc.orig 2023-02-11 09:11:04 UTC
+--- base/process/memory_linux.cc.orig 2024-02-03 15:42:55 UTC
+++ base/process/memory_linux.cc
-@@ -20,6 +20,7 @@
+@@ -28,6 +28,7 @@ void __libc_free(void*);
namespace base {
@@ -8,7 +8,7 @@
namespace {
void ReleaseReservationOrTerminate() {
-@@ -29,12 +30,14 @@ void ReleaseReservationOrTerminate() {
+@@ -37,12 +38,14 @@ void ReleaseReservationOrTerminate() {
}
} // namespace
@@ -23,7 +23,7 @@
// Set the new-out of memory handler.
std::set_new_handler(&ReleaseReservationOrTerminate);
// If we're using glibc's allocator, the above functions will override
-@@ -43,8 +46,10 @@ void EnableTerminationOnOutOfMemory() {
+@@ -51,8 +54,10 @@ void EnableTerminationOnOutOfMemory() {
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
allocator_shim::SetCallNewHandlerOnMallocFailure(true);
#endif
@@ -34,7 +34,7 @@
// ScopedAllowBlocking() has private constructor and it can only be used in
// friend classes/functions. Declaring a class is easier in this situation to
// avoid adding more dependency to thread_restrictions.h because of the
-@@ -104,6 +109,7 @@ bool AdjustOOMScoreHelper::AdjustOOMScore(ProcessId pr
+@@ -112,6 +117,7 @@ bool AdjustOOMScoreHelper::AdjustOOMScore(ProcessId pr
bool AdjustOOMScore(ProcessId process, int score) {
return AdjustOOMScoreHelper::AdjustOOMScore(process, score);
}
diff --git a/www/ungoogled-chromium/files/patch-base_process_process__handle.h b/www/ungoogled-chromium/files/patch-base_process_process__handle.h
index 9015f4dbd03d..af8d6ebc83c0 100644
--- a/www/ungoogled-chromium/files/patch-base_process_process__handle.h
+++ b/www/ungoogled-chromium/files/patch-base_process_process__handle.h
@@ -1,6 +1,6 @@
---- base/process/process_handle.h.orig 2022-10-01 07:40:07 UTC
+--- base/process/process_handle.h.orig 2024-02-03 15:42:55 UTC
+++ base/process/process_handle.h
-@@ -106,7 +106,7 @@ BASE_EXPORT ProcessId GetCurrentProcId();
+@@ -86,7 +86,7 @@ BASE_EXPORT ProcessId GetCurrentProcId();
// processes may be reused.
BASE_EXPORT UniqueProcId GetUniqueIdForProcess();
diff --git a/www/ungoogled-chromium/files/patch-base_rand__util.h b/www/ungoogled-chromium/files/patch-base_rand__util.h
index 8588ca910ef5..114f20ea622e 100644
--- a/www/ungoogled-chromium/files/patch-base_rand__util.h
+++ b/www/ungoogled-chromium/files/patch-base_rand__util.h
@@ -1,6 +1,6 @@
---- base/rand_util.h.orig 2023-12-23 12:33:28 UTC
+--- base/rand_util.h.orig 2024-02-03 15:42:55 UTC
+++ base/rand_util.h
-@@ -136,7 +136,7 @@ void RandomShuffle(Itr first, Itr last) {
+@@ -146,7 +146,7 @@ void RandomShuffle(Itr first, Itr last) {
std::shuffle(first, last, RandomBitGenerator());
}
diff --git a/www/ungoogled-chromium/files/patch-base_system_sys__info.h b/www/ungoogled-chromium/files/patch-base_system_sys__info.h
index e25e3466169b..4ba8bcc9e887 100644
--- a/www/ungoogled-chromium/files/patch-base_system_sys__info.h
+++ b/www/ungoogled-chromium/files/patch-base_system_sys__info.h
@@ -1,6 +1,6 @@
---- base/system/sys_info.h.orig 2023-12-23 12:33:28 UTC
+--- base/system/sys_info.h.orig 2024-02-03 15:42:55 UTC
+++ base/system/sys_info.h
-@@ -321,6 +321,8 @@ class BASE_EXPORT SysInfo {
+@@ -318,6 +318,8 @@ class BASE_EXPORT SysInfo {
static void ResetCpuSecurityMitigationsEnabledForTesting();
#endif
@@ -9,7 +9,7 @@
private:
friend class test::ScopedAmountOfPhysicalMemoryOverride;
FRIEND_TEST_ALL_PREFIXES(SysInfoTest, AmountOfAvailablePhysicalMemory);
-@@ -333,7 +335,7 @@ class BASE_EXPORT SysInfo {
+@@ -330,7 +332,7 @@ class BASE_EXPORT SysInfo {
static HardwareInfo GetHardwareInfoSync();
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) || \
diff --git a/www/ungoogled-chromium/files/patch-base_test_launcher_test__launcher.cc b/www/ungoogled-chromium/files/patch-base_test_launcher_test__launcher.cc
index 4b5d43684baf..68696a0e50d2 100644
--- a/www/ungoogled-chromium/files/patch-base_test_launcher_test__launcher.cc
+++ b/www/ungoogled-chromium/files/patch-base_test_launcher_test__launcher.cc
@@ -1,6 +1,6 @@
---- base/test/launcher/test_launcher.cc.orig 2023-11-04 07:08:51 UTC
+--- base/test/launcher/test_launcher.cc.orig 2024-02-03 15:42:55 UTC
+++ base/test/launcher/test_launcher.cc
-@@ -70,6 +70,7 @@
+@@ -69,6 +69,7 @@
#include "testing/gtest/include/gtest/gtest.h"
#if BUILDFLAG(IS_POSIX)
diff --git a/www/ungoogled-chromium/files/patch-build_config_compiler_BUILD.gn b/www/ungoogled-chromium/files/patch-build_config_compiler_BUILD.gn
index e34084854863..37c34ba3af4b 100644
--- a/www/ungoogled-chromium/files/patch-build_config_compiler_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-build_config_compiler_BUILD.gn
@@ -1,4 +1,4 @@
---- build/config/compiler/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+--- build/config/compiler/BUILD.gn.orig 2024-02-03 15:42:55 UTC
+++ build/config/compiler/BUILD.gn
@@ -202,7 +202,7 @@ declare_args() {
# This greatly reduces the size of debug builds, at the cost of
@@ -56,7 +56,7 @@
# TODO(https://crbug.com/972449): turn on for ChromeOS when that
# toolchain has this flag.
# We only use one version of LLVM within a build so there's no need to
-@@ -1173,7 +1180,7 @@ config("compiler_cpu_abi") {
+@@ -1175,7 +1182,7 @@ config("compiler_cpu_abi") {
]
}
} else if (current_cpu == "arm") {
@@ -65,7 +65,7 @@
!(is_chromeos_lacros && is_chromeos_device)) {
cflags += [ "--target=arm-linux-gnueabihf" ]
ldflags += [ "--target=arm-linux-gnueabihf" ]
-@@ -1188,7 +1195,7 @@ config("compiler_cpu_abi") {
+@@ -1190,7 +1197,7 @@ config("compiler_cpu_abi") {
cflags += [ "-mtune=$arm_tune" ]
}
} else if (current_cpu == "arm64") {
@@ -74,7 +74,7 @@
!(is_chromeos_lacros && is_chromeos_device)) {
cflags += [ "--target=aarch64-linux-gnu" ]
ldflags += [ "--target=aarch64-linux-gnu" ]
-@@ -1523,7 +1530,7 @@ config("compiler_deterministic") {
+@@ -1525,7 +1532,7 @@ config("compiler_deterministic") {
# different build directory like "out/feature_a" and "out/feature_b" if
# we build same files with same compile flag.
# Other paths are already given in relative, no need to normalize them.
@@ -83,7 +83,7 @@
# TODO(https://crbug.com/1231236): Use -ffile-compilation-dir= here.
cflags += [
"-Xclang",
-@@ -1575,7 +1582,7 @@ config("compiler_deterministic") {
+@@ -1577,7 +1584,7 @@ config("compiler_deterministic") {
}
config("clang_revision") {
@@ -92,7 +92,7 @@
update_args = [
"--print-revision",
"--verify-version=$clang_version",
-@@ -1860,7 +1867,7 @@ config("default_warnings") {
+@@ -1862,7 +1869,7 @@ config("default_warnings") {
"-Wno-ignored-pragma-optimize",
]
@@ -101,7 +101,7 @@
cflags += [
# TODO(crbug.com/1343975) Evaluate and possibly enable.
"-Wno-deprecated-builtins",
-@@ -2066,7 +2073,7 @@ config("no_chromium_code") {
+@@ -2065,7 +2072,7 @@ config("no_chromium_code") {
# third-party libraries.
"-Wno-c++11-narrowing",
]
@@ -110,7 +110,7 @@
cflags += [
# Disabled for similar reasons as -Wunused-variable.
"-Wno-unused-but-set-variable",
-@@ -2595,7 +2602,7 @@ config("afdo_optimize_size") {
+@@ -2594,7 +2601,7 @@ config("afdo_optimize_size") {
# There are some targeted places that AFDO regresses, so we provide a separate
# config to allow AFDO to be disabled per-target.
config("afdo") {
@@ -119,7 +119,7 @@
cflags = []
if (clang_emit_debug_info_for_profiling) {
# Add the following flags to generate debug info for profiling.
-@@ -2622,7 +2629,7 @@ config("afdo") {
+@@ -2621,7 +2628,7 @@ config("afdo") {
cflags += [ "-Wno-backend-plugin" ]
inputs = [ _clang_sample_profile ]
}
@@ -128,7 +128,7 @@
cflags = [ "-fauto-profile=${auto_profile_path}" ]
inputs = [ auto_profile_path ]
}
-@@ -2786,7 +2793,8 @@ config("symbols") {
+@@ -2785,7 +2792,8 @@ config("symbols") {
configs += [ "//build/config:compress_debug_sections" ]
}
diff --git a/www/ungoogled-chromium/files/patch-build_config_linux_atspi2_BUILD.gn b/www/ungoogled-chromium/files/patch-build_config_linux_atspi2_BUILD.gn
deleted file mode 100644
index 7db5ce0b19d7..000000000000
--- a/www/ungoogled-chromium/files/patch-build_config_linux_atspi2_BUILD.gn
+++ /dev/null
@@ -1,25 +0,0 @@
---- build/config/linux/atspi2/BUILD.gn.orig 2023-11-04 07:08:51 UTC
-+++ build/config/linux/atspi2/BUILD.gn
-@@ -21,10 +21,21 @@ if (use_atk) {
- minor = atspi_version[1]
- micro = atspi_version[2]
-
-+ # These aren't necessarily used if atspi is not old enough to require them.
-+ # Also, gn considers variables unused if the only use of them is
-+ # short-circuited away, so for example if major == 2 and minor == 48, micro
-+ # would be unused.
-+ not_needed([
-+ "major",
-+ "minor",
-+ "micro",
-+ ])
-+
- # ATSPI 2.49.90 now defines these for us and it's an error for us to
- # redefine them on the compiler command line.
- # See ATSPI 927344a34cd5bf81fc64da4968241735ecb4f03b
-- if (minor < 49 || (minor == 49 && micro < 90)) {
-+ if (major < 2 || (major == 2 && minor < 49) ||
-+ (major == 2 && minor == 49 && micro < 90)) {
- defines = [
- "ATSPI_MAJOR_VERSION=$major",
- "ATSPI_MINOR_VERSION=$minor",
diff --git a/www/ungoogled-chromium/files/patch-cc_BUILD.gn b/www/ungoogled-chromium/files/patch-cc_BUILD.gn
index a000d3ec36d6..fd0d5ae41680 100644
--- a/www/ungoogled-chromium/files/patch-cc_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-cc_BUILD.gn
@@ -1,4 +1,4 @@
---- cc/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+--- cc/BUILD.gn.orig 2024-02-03 15:42:55 UTC
+++ cc/BUILD.gn
@@ -676,7 +676,7 @@ cc_test_static_library("test_support") {
if (enable_vulkan) {
@@ -9,7 +9,7 @@
data_deps = [ "//third_party/mesa_headers" ]
}
if (skia_use_dawn) {
-@@ -931,7 +931,6 @@ cc_test("cc_unittests") {
+@@ -935,7 +935,6 @@ cc_test("cc_unittests") {
data = [ "//components/test/data/viz/" ]
data_deps = [
"//testing/buildbot/filters:cc_unittests_filters",
diff --git a/www/ungoogled-chromium/files/patch-cc_base_features.cc b/www/ungoogled-chromium/files/patch-cc_base_features.cc
index d0a1240cdb76..4f02e6d54c5c 100644
--- a/www/ungoogled-chromium/files/patch-cc_base_features.cc
+++ b/www/ungoogled-chromium/files/patch-cc_base_features.cc
@@ -1,6 +1,6 @@
---- cc/base/features.cc.orig 2023-11-04 07:08:51 UTC
+--- cc/base/features.cc.orig 2024-02-03 15:42:55 UTC
+++ cc/base/features.cc
-@@ -84,7 +84,7 @@ BASE_FEATURE(kNormalPriorityImageDecoding,
+@@ -88,7 +88,7 @@ BASE_FEATURE(kNormalPriorityImageDecoding,
// be using a separate flag to control the launch on GL.
BASE_FEATURE(kUseDMSAAForTiles,
"UseDMSAAForTiles",
diff --git a/www/ungoogled-chromium/files/patch-chrome_app_app__management__strings.grdp b/www/ungoogled-chromium/files/patch-chrome_app_app__management__strings.grdp
index 414d9b737109..e03479dc5c5b 100644
--- a/www/ungoogled-chromium/files/patch-chrome_app_app__management__strings.grdp
+++ b/www/ungoogled-chromium/files/patch-chrome_app_app__management__strings.grdp
@@ -1,6 +1,6 @@
---- chrome/app/app_management_strings.grdp.orig 2023-12-23 12:33:28 UTC
+--- chrome/app/app_management_strings.grdp.orig 2024-02-03 15:42:55 UTC
+++ chrome/app/app_management_strings.grdp
-@@ -194,7 +194,7 @@
+@@ -197,7 +197,7 @@
You can open and edit supported files with this app from Finder or other apps. To control which files open this app by default, <ph name="BEGIN_LINK">&lt;a href="#"&gt;</ph>learn how to set default apps on your device<ph name="END_LINK">&lt;/a&gt;</ph>.
</message>
</if>
diff --git a/www/ungoogled-chromium/files/patch-chrome_app_chrome__main.cc b/www/ungoogled-chromium/files/patch-chrome_app_chrome__main.cc
index 67152198dec3..e3a135d7cf5f 100644
--- a/www/ungoogled-chromium/files/patch-chrome_app_chrome__main.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_app_chrome__main.cc
@@ -1,4 +1,4 @@
---- chrome/app/chrome_main.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/app/chrome_main.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/app/chrome_main.cc
@@ -29,11 +29,11 @@
#include "chrome/app/notification_metrics.h"
@@ -14,7 +14,16 @@
#include "chrome/app/chrome_main_linux.h"
#endif
-@@ -138,7 +138,7 @@ int ChromeMain(int argc, const char** argv) {
+@@ -81,7 +81,7 @@ int ChromeMain(int argc, const char** argv) {
+ #error Unknown platform.
+ #endif
+
+-#if BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ PossiblyDetermineFallbackChromeChannel(argv[0]);
+ #endif
+
+@@ -142,7 +142,7 @@ int ChromeMain(int argc, const char** argv) {
SetUpBundleOverrides();
#endif
@@ -23,7 +32,7 @@
AppendExtraArgumentsToCommandLine(command_line);
#endif
-@@ -167,7 +167,7 @@ int ChromeMain(int argc, const char** argv) {
+@@ -171,7 +171,7 @@ int ChromeMain(int argc, const char** argv) {
headless_mode_handle = headless::InitHeadlessMode();
} else {
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_MAC) || \
diff --git a/www/ungoogled-chromium/files/patch-chrome_app_chrome__main__delegate.cc b/www/ungoogled-chromium/files/patch-chrome_app_chrome__main__delegate.cc
index 10fa79af0ded..1c4bd9a9c12e 100644
--- a/www/ungoogled-chromium/files/patch-chrome_app_chrome__main__delegate.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_app_chrome__main__delegate.cc
@@ -1,6 +1,6 @@
---- chrome/app/chrome_main_delegate.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/app/chrome_main_delegate.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/app/chrome_main_delegate.cc
-@@ -145,7 +145,7 @@
+@@ -146,7 +146,7 @@
#include "components/about_ui/credit_utils.h"
#endif
@@ -9,7 +9,7 @@
#include "components/nacl/common/nacl_paths.h"
#include "components/nacl/zygote/nacl_fork_delegate_linux.h"
#endif
-@@ -189,16 +189,16 @@
+@@ -190,16 +190,16 @@
#include "v8/include/v8.h"
#endif
@@ -29,7 +29,7 @@
#include "chrome/browser/policy/policy_path_parser.h"
#include "components/crash/core/app/crashpad.h"
#endif
-@@ -337,7 +337,7 @@ void AdjustLinuxOOMScore(const std::string& process_ty
+@@ -338,7 +338,7 @@ void AdjustLinuxOOMScore(const std::string& process_ty
// and resources loaded.
bool SubprocessNeedsResourceBundle(const std::string& process_type) {
return
@@ -38,7 +38,7 @@
// The zygote process opens the resources for the renderers.
process_type == switches::kZygoteProcess ||
#endif
-@@ -422,7 +422,7 @@ bool HandleVersionSwitches(const base::CommandLine& co
+@@ -423,7 +423,7 @@ bool HandleVersionSwitches(const base::CommandLine& co
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
@@ -47,7 +47,7 @@
// Show the man page if --help or -h is on the command line.
void HandleHelpSwitches(const base::CommandLine& command_line) {
if (command_line.HasSwitch(switches::kHelp) ||
-@@ -600,7 +600,7 @@ void InitializeUserDataDir(base::CommandLine* command_
+@@ -601,7 +601,7 @@ void InitializeUserDataDir(base::CommandLine* command_
std::string process_type =
command_line->GetSwitchValueASCII(switches::kProcessType);
@@ -56,7 +56,7 @@
// On Linux, Chrome does not support running multiple copies under different
// DISPLAYs, so the profile directory can be specified in the environment to
// support the virtual desktop use-case.
-@@ -690,7 +690,7 @@ void RecordMainStartupMetrics(base::TimeTicks applicat
+@@ -691,7 +691,7 @@ void RecordMainStartupMetrics(base::TimeTicks applicat
#endif
#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || \
@@ -65,7 +65,7 @@
// Record the startup process creation time on supported platforms. On Android
// this is recorded in ChromeMainDelegateAndroid.
startup_metric_utils::GetCommon().RecordStartupProcessCreationTime(
-@@ -1044,7 +1044,7 @@ void ChromeMainDelegate::CommonEarlyInitialization(Inv
+@@ -1048,7 +1048,7 @@ void ChromeMainDelegate::CommonEarlyInitialization(Inv
base::InitializeCpuReductionExperiment();
base::sequence_manager::internal::SequenceManagerImpl::InitializeFeatures();
base::sequence_manager::internal::ThreadController::InitializeFeatures();
@@ -74,7 +74,7 @@
base::MessagePumpLibevent::InitializeFeatures();
#elif BUILDFLAG(IS_MAC)
base::PlatformThread::InitFeaturesPostFieldTrial();
-@@ -1196,7 +1196,7 @@ absl::optional<int> ChromeMainDelegate::BasicStartupCo
+@@ -1200,7 +1200,7 @@ absl::optional<int> ChromeMainDelegate::BasicStartupCo
// TODO(crbug.com/1052397): Revisit the macro expression once build flag
// switch of lacros-chrome is complete.
@@ -83,7 +83,7 @@
// This will directly exit if the user asked for help.
HandleHelpSwitches(command_line);
#endif
-@@ -1226,7 +1226,7 @@ absl::optional<int> ChromeMainDelegate::BasicStartupCo
+@@ -1230,7 +1230,7 @@ absl::optional<int> ChromeMainDelegate::BasicStartupCo
#if BUILDFLAG(IS_CHROMEOS)
chromeos::dbus_paths::RegisterPathProvider();
#endif
@@ -92,7 +92,7 @@
nacl::RegisterPathProvider();
#endif
-@@ -1637,7 +1637,7 @@ void ChromeMainDelegate::PreSandboxStartup() {
+@@ -1641,7 +1641,7 @@ void ChromeMainDelegate::PreSandboxStartup() {
CHECK(!loaded_locale.empty()) << "Locale could not be found for " << locale;
}
@@ -101,7 +101,7 @@
// Zygote needs to call InitCrashReporter() in RunZygote().
if (process_type != switches::kZygoteProcess) {
if (command_line.HasSwitch(switches::kPreCrashpadCrashTest)) {
-@@ -1746,7 +1746,7 @@ absl::variant<int, content::MainFunctionParams> Chrome
+@@ -1750,7 +1750,7 @@ absl::variant<int, content::MainFunctionParams> Chrome
// This entry is not needed on Linux, where the NaCl loader
// process is launched via nacl_helper instead.
diff --git a/www/ungoogled-chromium/files/patch-chrome_app_chromium__strings.grd b/www/ungoogled-chromium/files/patch-chrome_app_chromium__strings.grd
index 15677f01c8b2..c75fad5e0529 100644
--- a/www/ungoogled-chromium/files/patch-chrome_app_chromium__strings.grd
+++ b/www/ungoogled-chromium/files/patch-chrome_app_chromium__strings.grd
@@ -1,4 +1,4 @@
---- chrome/app/chromium_strings.grd.orig 2023-12-23 12:33:28 UTC
+--- chrome/app/chromium_strings.grd.orig 2024-02-03 15:42:55 UTC
+++ chrome/app/chromium_strings.grd
@@ -315,7 +315,7 @@ If you update this file, be sure also to update google
Welcome to Chromium; new browser window opened
@@ -9,7 +9,7 @@
<message name="IDS_FIRST_RUN_DIALOG_WINDOW_TITLE" desc="Window title of First Run dialog on Mac and Linux, displayed in title bar">
Welcome to Chromium
</message>
-@@ -465,7 +465,7 @@ If you update this file, be sure also to update google
+@@ -483,7 +483,7 @@ If you update this file, be sure also to update google
To get future Chromium updates, you'll need Windows 10 or later. This computer is using Windows 8.1.
</message>
</if>
@@ -18,7 +18,7 @@
<message name="IDS_LINUX_OBSOLETE" desc="A message displayed on an at-launch infobar and about:help warning the user that the OS version they are using is no longer supported.">
Chromium may not function correctly because it is no longer supported on this Linux distribution
</message>
-@@ -892,7 +892,7 @@ Permissions you've already given to websites and apps
+@@ -920,7 +920,7 @@ Permissions you've already given to websites and apps
</message>
</if>
@@ -27,7 +27,7 @@
<message name="IDS_RELAUNCH_TO_UPDATE_ALT" desc="Alternate text label of the relaunch to update Chrome menu item" translateable="false">
Not used in Chromium. Placeholder to keep resource maps in sync.
</message>
-@@ -1277,7 +1277,7 @@ Permissions you've already given to websites and apps
+@@ -1299,7 +1299,7 @@ Permissions you've already given to websites and apps
</message>
</if>
diff --git a/www/ungoogled-chromium/files/patch-chrome_app_generated__resources.grd b/www/ungoogled-chromium/files/patch-chrome_app_generated__resources.grd
index dd28a6a845f5..5e1c92e4ad19 100644
--- a/www/ungoogled-chromium/files/patch-chrome_app_generated__resources.grd
+++ b/www/ungoogled-chromium/files/patch-chrome_app_generated__resources.grd
@@ -1,4 +1,4 @@
---- chrome/app/generated_resources.grd.orig 2023-12-23 12:33:28 UTC
+--- chrome/app/generated_resources.grd.orig 2024-02-03 15:42:55 UTC
+++ chrome/app/generated_resources.grd
@@ -2,7 +2,7 @@
@@ -9,7 +9,7 @@
for making strings OS specific. Other platform defines such as use_titlecase
are declared in tools/grit/grit_rule.gni.
-->
-@@ -3609,7 +3609,7 @@ are declared in tools/grit/grit_rule.gni.
+@@ -3665,7 +3665,7 @@ are declared in tools/grit/grit_rule.gni.
</if>
<!-- Bluetooth Device Credentials (i.e. PIN/Passkey) dialog -->
@@ -18,7 +18,7 @@
<message name="IDS_BLUETOOTH_DEVICE_CREDENTIALS_TITLE" desc="Title of the Bluetooth device credentials prompt dialog.">
Device Credentials
</message>
-@@ -5494,7 +5494,7 @@ are declared in tools/grit/grit_rule.gni.
+@@ -5553,7 +5553,7 @@ are declared in tools/grit/grit_rule.gni.
Read information about your browser, OS, device, installed software, registry values and files
</message>
</if>
@@ -27,7 +27,7 @@
<message name="IDS_EXTENSION_PROMPT_WARNING_ENTERPRISE_REPORTING_PRIVATE_ENABLED_LINUX_AND_MACOS" desc="Permission string for enterprise private reporting permission on Linux and MacOS.">
Read information about your browser, OS, device, installed software and files
</message>
-@@ -6207,7 +6207,7 @@ Keep your key file in a safe place. You will need it t
+@@ -6284,7 +6284,7 @@ Keep your key file in a safe place. You will need it t
Old versions of Chrome Apps won't open on Windows devices after December 2022. Contact your administrator to update to a new version or remove this app.
</message>
</if>
@@ -36,7 +36,7 @@
<message name="IDS_FORCE_INSTALLED_DEPRECATED_APPS_CONTENT" desc="Content of the force installed deprecated app dialog">
Old versions of Chrome Apps won't open on Linux devices after December 2022. Contact your administrator to update to a new version or remove this app.
</message>
-@@ -6253,7 +6253,7 @@ Keep your key file in a safe place. You will need it t
+@@ -6330,7 +6330,7 @@ Keep your key file in a safe place. You will need it t
Old versions of Chrome apps won't open on Windows devices after December 2022. You can check if there's a new version available.
</message>
</if>
@@ -45,7 +45,7 @@
<message name="IDS_DEPRECATED_APPS_MONITOR_RENDERER" desc="Dialog content that educates users that Chrome Apps will soon no longer launch.">
Old versions of Chrome apps won't open on Linux devices after December 2022. You can check if there's a new version available.
</message>
-@@ -10611,7 +10611,7 @@ Check your passwords anytime in <ph name="GOOGLE_PASSW
+@@ -10956,7 +10956,7 @@ Check your passwords anytime in <ph name="GOOGLE_PASSW
<message name="IDS_APP_MENU_BUTTON_UPDATE" desc="Short label next to app-menu button when an update is available.">
Update
</message>
@@ -54,7 +54,7 @@
<message name="IDS_APP_MENU_BUTTON_UPDATE_ALT1" desc="Alternate short label next to app-menu button when an update is available.">
Finish update
</message>
-@@ -10958,7 +10958,7 @@ Check your passwords anytime in <ph name="GOOGLE_PASSW
+@@ -11303,7 +11303,7 @@ Check your passwords anytime in <ph name="GOOGLE_PASSW
Google Pay
</message>
@@ -63,7 +63,7 @@
<message name="IDS_SHOW_WINDOW_DECORATIONS" desc="The label of a radio button in the options dialog for using the system title bar and borders.">
Use system title bar and borders
</message>
-@@ -11956,7 +11956,7 @@ Check your passwords anytime in <ph name="GOOGLE_PASSW
+@@ -12301,7 +12301,7 @@ Check your passwords anytime in <ph name="GOOGLE_PASSW
</message>
<!-- Device Trust Consent dialog -->
@@ -72,7 +72,7 @@
<message name="IDS_DEVICE_SIGNALS_CONSENT_DIALOG_TITLE" desc="Title of the dialog shown when user consent is required to share device signals.">
Share information about your device?
</message>
-@@ -12364,7 +12364,7 @@ Please help our engineers fix this problem. Tell us wh
+@@ -12709,7 +12709,7 @@ Please help our engineers fix this problem. Tell us wh
Set as default
</message>
@@ -81,7 +81,7 @@
<message name="IDS_MINIMIZE_WINDOW_MENU" desc="The Linux browser window menu item text for minimizing the window.">
Minimize
</message>
-@@ -14571,7 +14571,7 @@ Please help our engineers fix this problem. Tell us wh
+@@ -14940,7 +14940,7 @@ Please help our engineers fix this problem. Tell us wh
Open Anyway
</message>
diff --git a/www/ungoogled-chromium/files/patch-chrome_app_google__chrome__strings.grd b/www/ungoogled-chromium/files/patch-chrome_app_google__chrome__strings.grd
index ebcc1f209c09..7010821a6d3e 100644
--- a/www/ungoogled-chromium/files/patch-chrome_app_google__chrome__strings.grd
+++ b/www/ungoogled-chromium/files/patch-chrome_app_google__chrome__strings.grd
@@ -1,4 +1,4 @@
---- chrome/app/google_chrome_strings.grd.orig 2023-12-23 12:33:28 UTC
+--- chrome/app/google_chrome_strings.grd.orig 2024-02-03 15:42:55 UTC
+++ chrome/app/google_chrome_strings.grd
@@ -301,7 +301,7 @@ chromium_strings.grd. -->
Welcome to Chrome; new browser window opened
@@ -18,7 +18,7 @@
<message name="IDS_LINUX_OBSOLETE" desc="A message displayed on an at-launch infobar and about:help warning the user that the OS version they are using is no longer supported.">
Google Chrome may not function correctly because it is no longer supported on this Linux distribution
</message>
-@@ -877,7 +877,7 @@ Permissions you've already given to websites and apps
+@@ -887,7 +887,7 @@ Permissions you've already given to websites and apps
</if>
</if>
@@ -27,7 +27,7 @@
<if expr="use_titlecase">
<message name="IDS_RELAUNCH_TO_UPDATE_ALT" desc="Alternate text label of the relaunch to update Chrome menu item">
Relaunch to Update - Your tabs will reopen
-@@ -1302,7 +1302,7 @@ Permissions you've already given to websites and apps
+@@ -1306,7 +1306,7 @@ Permissions you've already given to websites and apps
</message>
</if>
diff --git a/www/ungoogled-chromium/files/patch-chrome_app_theme_chrome__unscaled__resources.grd b/www/ungoogled-chromium/files/patch-chrome_app_theme_chrome__unscaled__resources.grd
index 196d15c1de10..92a05007281d 100644
--- a/www/ungoogled-chromium/files/patch-chrome_app_theme_chrome__unscaled__resources.grd
+++ b/www/ungoogled-chromium/files/patch-chrome_app_theme_chrome__unscaled__resources.grd
@@ -1,29 +1,20 @@
---- chrome/app/theme/chrome_unscaled_resources.grd.orig 2023-12-23 12:33:28 UTC
+--- chrome/app/theme/chrome_unscaled_resources.grd.orig 2024-02-03 15:42:55 UTC
+++ chrome/app/theme/chrome_unscaled_resources.grd
-@@ -16,7 +16,7 @@
+@@ -14,7 +14,7 @@
+ </outputs>
+ <release seq="1">
<includes>
- <if expr="_google_chrome">
+- <if expr="is_linux">
++ <if expr="is_posix">
<then>
+ <include name="IDR_PRODUCT_LOGO_64" file="${branding_path_component}/linux/product_logo_64.png" type="BINDATA" />
+ <include name="IDR_PRODUCT_LOGO_128" file="${branding_path_component}/linux/product_logo_128.png" type="BINDATA" />
+@@ -40,7 +40,7 @@
+ <then>
+ <include name="IDR_PRODUCT_LOGO_SVG" file="${branding_path_component}/chrome_logo.svg" type="BINDATA" />
+ <include name="IDR_PRODUCT_LOGO_ANIMATION_SVG" file="${branding_path_component}/chrome_logo_animation.svg" type="BINDATA" />
- <if expr="is_linux">
+ <if expr="is_posix">
- <then>
- <include name="IDR_PRODUCT_LOGO_64" file="google_chrome/linux/product_logo_64.png" type="BINDATA" />
- <include name="IDR_PRODUCT_LOGO_128" file="google_chrome/linux/product_logo_128.png" type="BINDATA" />
-@@ -63,7 +63,7 @@
- <else> <!-- not _google_chrome -->
- <if expr="_is_chrome_for_testing_branded">
- <then>
-- <if expr="is_linux">
-+ <if expr="is_posix">
- <then>
- <include name="IDR_PRODUCT_LOGO_64" file="google_chrome/google_chrome_for_testing/linux/product_logo_64.png" type="BINDATA" />
- <include name="IDR_PRODUCT_LOGO_128" file="google_chrome/google_chrome_for_testing/linux/product_logo_128.png" type="BINDATA" />
-@@ -77,7 +77,7 @@
- </if>
- </then>
- <else> <!-- not _is_chrome_for_testing_branded -->
-- <if expr="is_linux">
-+ <if expr="is_posix">
- <then>
- <include name="IDR_PRODUCT_LOGO_64" file="chromium/linux/product_logo_64.png" type="BINDATA" />
- <include name="IDR_PRODUCT_LOGO_128" file="chromium/linux/product_logo_128.png" type="BINDATA" />
+ <include name="IDR_PRODUCT_LOGO_128_BETA" file="${branding_path_component}/linux/product_logo_128_beta.png" type="BINDATA" />
+ <include name="IDR_PRODUCT_LOGO_128_DEV" file="${branding_path_component}/linux/product_logo_128_dev.png" type="BINDATA" />
+ </if>
diff --git a/www/ungoogled-chromium/files/patch-chrome_app_theme_theme__resources.grd b/www/ungoogled-chromium/files/patch-chrome_app_theme_theme__resources.grd
index 9cbc7cac2c56..70f3a27eab57 100644
--- a/www/ungoogled-chromium/files/patch-chrome_app_theme_theme__resources.grd
+++ b/www/ungoogled-chromium/files/patch-chrome_app_theme_theme__resources.grd
@@ -1,59 +1,46 @@
---- chrome/app/theme/theme_resources.grd.orig 2023-10-13 13:20:35 UTC
+--- chrome/app/theme/theme_resources.grd.orig 2024-02-03 15:42:55 UTC
+++ chrome/app/theme/theme_resources.grd
-@@ -146,14 +146,14 @@
+@@ -153,14 +153,14 @@
</if>
<if expr="_google_chrome">
<then>
- <if expr="is_linux">
+ <if expr="is_posix">
<then>
- <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_16" file="google_chrome/linux/product_logo_16.png" />
- <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_32" file="google_chrome/linux/product_logo_32.png" />
- <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_32_BETA" file="google_chrome/linux/product_logo_32_beta.png" />
- <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_32_DEV" file="google_chrome/linux/product_logo_32_dev.png" />
+ <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_16" file="${branding_path_component}/linux/product_logo_16.png" />
+ <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_32" file="${branding_path_component}/linux/product_logo_32.png" />
+ <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_32_BETA" file="${branding_path_component}/linux/product_logo_32_beta.png" />
+ <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_32_DEV" file="${branding_path_component}/linux/product_logo_32_dev.png" />
</then>
- <else> <!-- not is_linux -->
+ <else> <!-- not is_posix -->
<if expr="not is_android">
- <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_16" file="google_chrome/product_logo_16.png" />
- <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_32" file="google_chrome/product_logo_32.png" />
-@@ -161,7 +161,7 @@
- <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_32_BETA" file="google_chrome/product_logo_32_beta.png" />
- <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_32_DEV" file="google_chrome/product_logo_32_dev.png" />
+ <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_16" file="${branding_path_component}/product_logo_16.png" />
+ <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_32" file="${branding_path_component}/product_logo_32.png" />
+@@ -168,7 +168,7 @@
+ <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_32_BETA" file="${branding_path_component}/product_logo_32_beta.png" />
+ <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_32_DEV" file="${branding_path_component}/product_logo_32_dev.png" />
</else>
- </if> <!-- is_linux -->
+ </if> <!-- is_posix -->
- <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_32_CANARY" file="google_chrome/product_logo_32_canary.png" />
+ <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_32_CANARY" file="${branding_path_component}/product_logo_32_canary.png" />
<if expr="not is_android">
- <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_NAME_22" file="google_chrome/product_logo_name_22.png" />
-@@ -173,7 +173,7 @@
+ <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_NAME_22" file="${branding_path_component}/product_logo_name_22.png" />
+@@ -178,7 +178,7 @@
+ <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_ENTERPRISE_WHITE" file="${branding_path_component}/product_logo_enterprise_white.png" />
+ </then>
<else> <!-- not _google_chrome -->
- <if expr="_is_chrome_for_testing_branded">
+- <if expr="is_linux">
++ <if expr="is_posix">
<then>
-- <if expr="is_linux">
-+ <if expr="is_posix">
- <then>
- <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_16" file="google_chrome/google_chrome_for_testing/linux/product_logo_16.png" />
- <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_32" file="google_chrome/google_chrome_for_testing/linux/product_logo_32.png" />
-@@ -182,10 +182,10 @@
- <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_16" file="google_chrome/google_chrome_for_testing/product_logo_16.png" />
- <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_32" file="google_chrome/google_chrome_for_testing/product_logo_32.png" />
- </else>
-- </if> <!-- is_linux -->
-+ </if> <!-- is_posix -->
- </then>
- <else> <!-- not _is_chrome_for_testing_branded -->
-- <if expr="is_linux">
-+ <if expr="is_posix">
- <then>
- <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_16" file="chromium/linux/product_logo_16.png" />
- <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_32" file="chromium/linux/product_logo_32.png" />
-@@ -194,7 +194,7 @@
- <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_16" file="chromium/product_logo_16.png" />
- <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_32" file="chromium/product_logo_32.png" />
- </else>
-- </if> <!-- is_linux -->
-+ </if> <!-- is_posix -->
+ <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_16" file="${branding_path_component}/linux/product_logo_16.png" />
+ <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_32" file="${branding_path_component}/linux/product_logo_32.png" />
+@@ -187,7 +187,7 @@
+ <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_16" file="${branding_path_component}/product_logo_16.png" />
+ <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_32" file="${branding_path_component}/product_logo_32.png" />
</else>
- </if> <!-- _is_chrome_for_testing_branded -->
- <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_NAME_22" file="chromium/product_logo_name_22.png" />
+- </if> <!-- is_linux -->
++ </if> <!-- is_posix -->
+ <if expr="_is_chrome_for_testing_branded">
+ <then>
+ <structure type="chrome_scaled_image" name="IDR_PRODUCT_LOGO_NAME_22" file="chromium/product_logo_name_22.png" />
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_BUILD.gn b/www/ungoogled-chromium/files/patch-chrome_browser_BUILD.gn
index 4fef52ab5e8c..b471a7a1320d 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_BUILD.gn
@@ -1,6 +1,6 @@
---- chrome/browser/BUILD.gn.orig 2024-01-17 09:38:09 UTC
+--- chrome/browser/BUILD.gn.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/BUILD.gn
-@@ -6526,6 +6526,13 @@ static_library("browser") {
+@@ -6515,6 +6515,13 @@ static_library("browser") {
}
}
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_about__flags.cc b/www/ungoogled-chromium/files/patch-chrome_browser_about__flags.cc
index d0fd06a31585..6875e193fa0f 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_about__flags.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_about__flags.cc
@@ -1,4 +1,4 @@
---- chrome/browser/about_flags.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/about_flags.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/about_flags.cc
@@ -232,7 +232,7 @@
#include "ui/ui_features.h"
@@ -9,7 +9,7 @@
#include "base/allocator/buildflags.h"
#endif
-@@ -331,7 +331,7 @@
+@@ -330,7 +330,7 @@
#include "device/vr/public/cpp/features.h"
#endif
@@ -18,7 +18,7 @@
#include "ui/ozone/buildflags.h"
#include "ui/ozone/public/ozone_switches.h"
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_ASH)
-@@ -345,7 +345,7 @@
+@@ -344,7 +344,7 @@
#include "chrome/browser/win/titlebar_config.h"
#endif
@@ -27,7 +27,7 @@
#include "chrome/browser/enterprise/profile_management/profile_management_features.h"
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
-@@ -475,7 +475,7 @@ const FeatureEntry::FeatureVariation kDXGIWaitableSwap
+@@ -482,7 +482,7 @@ const FeatureEntry::FeatureVariation kDXGIWaitableSwap
{"Max 3 Frames", &kDXGIWaitableSwapChain3Frames, 1, nullptr}};
#endif
@@ -36,16 +36,34 @@
const FeatureEntry::Choice kOzonePlatformHintRuntimeChoices[] = {
{flag_descriptions::kOzonePlatformHintChoiceDefault, "", ""},
{flag_descriptions::kOzonePlatformHintChoiceAuto,
-@@ -1464,7 +1464,7 @@ const FeatureEntry::FeatureVariation kChromeRefresh202
+@@ -1452,7 +1452,7 @@ const FeatureEntry::FeatureVariation kChromeRefresh202
nullptr}};
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_MAC) || \
- BUILDFLAG(IS_WIN) || BUILDFLAG(IS_FUCHSIA)
+ BUILDFLAG(IS_WIN) || BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_BSD)
- const FeatureEntry::FeatureParam kShortcutBoostSearchAndUrl1414[] = {
- {"ShortcutBoostSearchScore", "1414"},
- {"ShortcutBoostUrlScore", "1414"}};
-@@ -5304,13 +5304,13 @@ const FeatureEntry kFeatureEntries[] = {
+ const FeatureEntry::FeatureParam kShortcutBoostSingleUrl[] = {
+ {"ShortcutBoostSearchScore", "0"},
+ {"ShortcutBoostNonTopHitThreshold", "0"},
+@@ -3935,7 +3935,7 @@ const flags_ui::FeatureEntry::FeatureVariation
+ std::size(kParcelTrackingTestDataOutForDelivery), nullptr},
+ };
+
+-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_BSD)
+ const flags_ui::FeatureEntry::FeatureParam
+ kDesktopPWAsLinkCapturingDefaultOn[] = {{"on_by_default", "true"}};
+ const flags_ui::FeatureEntry::FeatureParam
+@@ -4684,7 +4684,7 @@ const FeatureEntry kFeatureEntries[] = {
+ "DXGIWaitableSwapChain"),
+ },
+ #endif // BUILDFLAG(IS_WIN)
+-#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ {
+ "fluent-overlay-scrollbars",
+ flag_descriptions::kFluentOverlayScrollbarsName,
+@@ -5228,13 +5228,13 @@ const FeatureEntry kFeatureEntries[] = {
FEATURE_VALUE_TYPE(features::kWebShare)},
#endif // BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC)
@@ -61,8 +79,8 @@
{"skip-undecryptable-passwords",
flag_descriptions::kSkipUndecryptablePasswordsName,
flag_descriptions::kSkipUndecryptablePasswordsDescription,
-@@ -5621,7 +5621,7 @@ const FeatureEntry kFeatureEntries[] = {
- FEATURE_VALUE_TYPE(feed::kFeedSportsCard)},
+@@ -5548,7 +5548,7 @@ const FeatureEntry kFeatureEntries[] = {
+ FEATURE_VALUE_TYPE(feed::kRefreshFeedOnRestart)},
#endif // BUILDFLAG(IS_ANDROID)
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_MAC) || \
- BUILDFLAG(IS_WIN) || BUILDFLAG(IS_FUCHSIA)
@@ -70,7 +88,7 @@
{"following-feed-sidepanel", flag_descriptions::kFollowingFeedSidepanelName,
flag_descriptions::kFollowingFeedSidepanelDescription, kOsDesktop,
FEATURE_VALUE_TYPE(feed::kWebUiFeed)},
-@@ -6260,7 +6260,7 @@ const FeatureEntry kFeatureEntries[] = {
+@@ -6201,7 +6201,7 @@ const FeatureEntry kFeatureEntries[] = {
FEATURE_VALUE_TYPE(omnibox::kZeroSuggestInMemoryCaching)},
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_MAC) || \
@@ -79,7 +97,7 @@
{"omnibox-actions-ui-simplification",
flag_descriptions::kOmniboxActionsUISimplificationName,
flag_descriptions::kOmniboxActionsUISimplificationDescription, kOsDesktop,
-@@ -7096,7 +7096,7 @@ const FeatureEntry kFeatureEntries[] = {
+@@ -7056,7 +7056,7 @@ const FeatureEntry kFeatureEntries[] = {
flag_descriptions::kParallelDownloadingDescription, kOsAll,
FEATURE_VALUE_TYPE(download::features::kParallelDownloading)},
@@ -88,7 +106,7 @@
{"enable-async-dns", flag_descriptions::kAsyncDnsName,
flag_descriptions::kAsyncDnsDescription, kOsWin | kOsLinux,
FEATURE_VALUE_TYPE(features::kAsyncDns)},
-@@ -8131,7 +8131,7 @@ const FeatureEntry kFeatureEntries[] = {
+@@ -8075,7 +8075,7 @@ const FeatureEntry kFeatureEntries[] = {
FEATURE_VALUE_TYPE(supervised_user::kEnableProtoApiForClassifyUrl)},
#endif // BUILDFLAG(ENABLE_SUPERVISED_USERS)
@@ -97,7 +115,7 @@
{"enable-network-service-sandbox",
flag_descriptions::kEnableNetworkServiceSandboxName,
flag_descriptions::kEnableNetworkServiceSandboxDescription,
-@@ -8156,7 +8156,7 @@ const FeatureEntry kFeatureEntries[] = {
+@@ -8100,7 +8100,7 @@ const FeatureEntry kFeatureEntries[] = {
FEATURE_VALUE_TYPE(
supervised_user::kFilterWebsitesForSupervisedUsersOnDesktopAndIOS)},
@@ -106,7 +124,7 @@
{"enable-family-link-extensions-permissions",
flag_descriptions::
kEnableExtensionsPermissionsForSupervisedUsersOnDesktopName,
-@@ -8826,7 +8826,7 @@ const FeatureEntry kFeatureEntries[] = {
+@@ -8750,7 +8750,7 @@ const FeatureEntry kFeatureEntries[] = {
#endif
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -115,7 +133,7 @@
{"quick-commands", flag_descriptions::kQuickCommandsName,
flag_descriptions::kQuickCommandsDescription, kOsDesktop,
FEATURE_VALUE_TYPE(features::kQuickCommands)},
-@@ -9097,7 +9097,7 @@ const FeatureEntry kFeatureEntries[] = {
+@@ -9024,7 +9024,7 @@ const FeatureEntry kFeatureEntries[] = {
FEATURE_VALUE_TYPE(ash::features::kWallpaperPerDesk)},
#endif // BUILDFLAG(IS_CHROMEOS_ASH)
@@ -124,7 +142,7 @@
{"enable-get-all-screens-media", flag_descriptions::kGetAllScreensMediaName,
flag_descriptions::kGetAllScreensMediaDescription,
kOsCrOS | kOsLacros | kOsLinux,
-@@ -9139,7 +9139,7 @@ const FeatureEntry kFeatureEntries[] = {
+@@ -9066,7 +9066,7 @@ const FeatureEntry kFeatureEntries[] = {
#if BUILDFLAG(IS_WIN) || \
(BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS)) || \
@@ -133,25 +151,25 @@
{
"ui-debug-tools",
flag_descriptions::kUIDebugToolsName,
-@@ -9697,7 +9697,7 @@ const FeatureEntry kFeatureEntries[] = {
+@@ -9657,7 +9657,7 @@ const FeatureEntry kFeatureEntries[] = {
#endif
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || \
-- BUILDFLAG(IS_CHROMEOS_ASH)
-+ BUILDFLAG(IS_CHROMEOS_ASH) || BUILDFLAG(IS_BSD)
+- BUILDFLAG(IS_CHROMEOS)
++ BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
{"document-picture-in-picture-api",
flag_descriptions::kDocumentPictureInPictureApiName,
flag_descriptions::kDocumentPictureInPictureApiDescription,
-@@ -10804,7 +10804,7 @@ const FeatureEntry kFeatureEntries[] = {
+@@ -10781,7 +10781,7 @@ const FeatureEntry kFeatureEntries[] = {
+ kOsDesktop | kOsAndroid,
FEATURE_VALUE_TYPE(features::kProcessPerSiteUpToMainFrameThreshold)},
- #if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || \
-- BUILDFLAG(IS_FUCHSIA)
-+ BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_BSD)
+-#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
{"camera-mic-effects", flag_descriptions::kCameraMicEffectsName,
flag_descriptions::kCameraMicEffectsDescription,
- static_cast<unsigned short>(kOsMac | kOsWin | kOsLinux | kOsFuchsia),
-@@ -10986,7 +10986,7 @@ const FeatureEntry kFeatureEntries[] = {
+ static_cast<unsigned short>(kOsMac | kOsWin | kOsLinux),
+@@ -10947,7 +10947,7 @@ const FeatureEntry kFeatureEntries[] = {
password_manager::features::kFillingAcrossAffiliatedWebsitesAndroid)},
#endif
@@ -160,21 +178,12 @@
{"third-party-profile-management",
flag_descriptions::kThirdPartyProfileManagementName,
flag_descriptions::kThirdPartyProfileManagementDescription,
-@@ -11300,7 +11300,7 @@ const FeatureEntry kFeatureEntries[] = {
- kOsDesktop, FEATURE_VALUE_TYPE(blink::features::kPasswordStrongLabel)},
- #endif
-
--#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
-+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_BSD)
- {"attach-logs-to-autofill-rater-extentsion-report",
- flag_descriptions::kAttachLogsToAutofillRaterExtensionReportName,
- flag_descriptions::kAttachLogsToAutofillRaterExtensionReportDescription,
-@@ -11309,7 +11309,7 @@ const FeatureEntry kFeatureEntries[] = {
- kAttachLogsToAutofillRaterExtensionReport)},
- #endif
+@@ -11338,7 +11338,7 @@ const FeatureEntry kFeatureEntries[] = {
+ flag_descriptions::kAutofillEnableCardBenefitsDescription, kOsAll,
+ FEATURE_VALUE_TYPE(autofill::features::kAutofillEnableCardBenefits)},
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_BSD)
- {"fill-multi-line", flag_descriptions::kFillMultiLineName,
- flag_descriptions::kFillMultiLineDescription, kOsWin | kOsLinux | kOsMac,
- FEATURE_VALUE_TYPE(compose::features::kFillMultiLine)},
+ {flag_descriptions::kAutofillContentEditablesId,
+ flag_descriptions::kAutofillContentEditablesName,
+ flag_descriptions::kAutofillContentEditablesDescription,
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_accessibility_pdf__ocr__controller.cc b/www/ungoogled-chromium/files/patch-chrome_browser_accessibility_pdf__ocr__controller.cc
new file mode 100644
index 000000000000..37545e2bd4e5
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_accessibility_pdf__ocr__controller.cc
@@ -0,0 +1,11 @@
+--- chrome/browser/accessibility/pdf_ocr_controller.cc.orig 2024-02-03 15:42:55 UTC
++++ chrome/browser/accessibility/pdf_ocr_controller.cc
+@@ -86,7 +86,7 @@ void AnnounceToScreenReader(const int message_id) {
+ // TODO(crbug.com/1442928): Sending announcements results in a failure in
+ // `AuraLinuxAccessibilityInProcessBrowserTest::IndexInParentWithModal` and
+ // flaky fail when running Chrome.
+-#if !BUILDFLAG(IS_LINUX)
++#if !BUILDFLAG(IS_LINUX) && !BUILDFLAG(IS_BSD)
+ const Browser* browser = BrowserList::GetInstance()->GetLastActive();
+ if (!browser) {
+ VLOG(2) << "Browser is not ready to announce";
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_apps_app__service_publishers_extension__apps.cc b/www/ungoogled-chromium/files/patch-chrome_browser_apps_app__service_publishers_extension__apps.cc
index bf6ce9ac8fd4..404a68d59f09 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_apps_app__service_publishers_extension__apps.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_apps_app__service_publishers_extension__apps.cc
@@ -1,6 +1,6 @@
---- chrome/browser/apps/app_service/publishers/extension_apps.cc.orig 2023-06-05 19:39:05 UTC
+--- chrome/browser/apps/app_service/publishers/extension_apps.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/apps/app_service/publishers/extension_apps.cc
-@@ -25,7 +25,7 @@ ExtensionApps::~ExtensionApps() = default;
+@@ -24,7 +24,7 @@ ExtensionApps::~ExtensionApps() = default;
bool ExtensionApps::Accepts(const extensions::Extension* extension) {
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_browser__features.cc b/www/ungoogled-chromium/files/patch-chrome_browser_browser__features.cc
index 96dce6508dba..5d63c1e5cd26 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_browser__features.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_browser__features.cc
@@ -1,4 +1,4 @@
---- chrome/browser/browser_features.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/browser_features.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/browser_features.cc
@@ -28,7 +28,7 @@ BASE_FEATURE(kClosedTabCache,
BASE_FEATURE(kDestroyProfileOnBrowserClose,
@@ -9,12 +9,12 @@
base::FEATURE_ENABLED_BY_DEFAULT);
#else
base::FEATURE_DISABLED_BY_DEFAULT);
-@@ -292,7 +292,7 @@ BASE_FEATURE(kOmniboxTriggerForNoStatePrefetch,
- "OmniboxTriggerForNoStatePrefetch",
- base::FEATURE_DISABLED_BY_DEFAULT);
-
--#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
-+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_BSD)
- BASE_FEATURE(kPayloadTestComponent,
- "PayloadTestComponent",
+@@ -286,7 +286,7 @@ BASE_FEATURE(kNewTabPageTriggerForPrerender2,
+ BASE_FEATURE(kSupportSearchSuggestionForPrerender2,
+ "SupportSearchSuggestionForPrerender2",
+ #if BUILDFLAG(IS_CHROMEOS_ASH) || BUILDFLAG(IS_CHROMEOS_LACROS) || \
+- BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
++ BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_BSD)
+ base::FEATURE_ENABLED_BY_DEFAULT);
+ #else
base::FEATURE_DISABLED_BY_DEFAULT);
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_browser__features.h b/www/ungoogled-chromium/files/patch-chrome_browser_browser__features.h
deleted file mode 100644
index 5fa408811eee..000000000000
--- a/www/ungoogled-chromium/files/patch-chrome_browser_browser__features.h
+++ /dev/null
@@ -1,11 +0,0 @@
---- chrome/browser/browser_features.h.orig 2023-12-23 12:33:28 UTC
-+++ chrome/browser/browser_features.h
-@@ -121,7 +121,7 @@ BASE_DECLARE_FEATURE(kAutocompleteActionPredictorConfi
-
- BASE_DECLARE_FEATURE(kOmniboxTriggerForNoStatePrefetch);
-
--#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
-+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_BSD)
- BASE_DECLARE_FEATURE(kPayloadTestComponent);
- #endif
-
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_browser__process__impl.cc b/www/ungoogled-chromium/files/patch-chrome_browser_browser__process__impl.cc
index 1ead4591c186..cd2a55388044 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_browser__process__impl.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_browser__process__impl.cc
@@ -1,4 +1,4 @@
---- chrome/browser/browser_process_impl.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/browser_process_impl.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/browser_process_impl.cc
@@ -218,7 +218,7 @@
#include "components/enterprise/browser/controller/chrome_browser_cloud_management_controller.h"
@@ -9,7 +9,7 @@
#include "chrome/browser/error_reporting/chrome_js_error_report_processor.h" // nogncheck
#endif
-@@ -1217,7 +1217,7 @@ void BrowserProcessImpl::PreMainMessageLoopRun() {
+@@ -1224,7 +1224,7 @@ void BrowserProcessImpl::PreMainMessageLoopRun() {
ApplyMetricsReportingPolicy();
@@ -18,7 +18,7 @@
ChromeJsErrorReportProcessor::Create();
#endif
-@@ -1462,7 +1462,7 @@ void BrowserProcessImpl::Unpin() {
+@@ -1470,7 +1470,7 @@ void BrowserProcessImpl::Unpin() {
// Mac is currently not supported.
// TODO(crbug.com/1052397): Revisit once build flag switch of lacros-chrome is
// complete.
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_browser__process__impl.h b/www/ungoogled-chromium/files/patch-chrome_browser_browser__process__impl.h
index e348b2a30c7e..0eb8f45a5164 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_browser__process__impl.h
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_browser__process__impl.h
@@ -1,6 +1,6 @@
---- chrome/browser/browser_process_impl.h.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/browser_process_impl.h.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/browser_process_impl.h
-@@ -384,7 +384,7 @@ class BrowserProcessImpl : public BrowserProcess,
+@@ -386,7 +386,7 @@ class BrowserProcessImpl : public BrowserProcess,
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_chrome__browser__interface__binders.cc b/www/ungoogled-chromium/files/patch-chrome_browser_chrome__browser__interface__binders.cc
index bb2823ce451a..d26ece805458 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_chrome__browser__interface__binders.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_chrome__browser__interface__binders.cc
@@ -1,6 +1,6 @@
---- chrome/browser/chrome_browser_interface_binders.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/chrome_browser_interface_binders.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/chrome_browser_interface_binders.cc
-@@ -130,13 +130,13 @@
+@@ -126,13 +126,13 @@
#endif // BUILDFLAG(FULL_SAFE_BROWSING)
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -16,16 +16,16 @@
#include "chrome/browser/ui/webui/app_settings/web_app_settings_ui.h"
#include "ui/webui/resources/cr_components/app_management/app_management.mojom.h"
#endif
-@@ -224,7 +224,7 @@
+@@ -218,7 +218,7 @@
#endif // BUILDFLAG(IS_ANDROID)
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
- BUILDFLAG(IS_CHROMEOS)
+ BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
- #include "chrome/browser/companion/visual_search/visual_search_suggestions_service_factory.h"
+ #include "chrome/browser/companion/visual_query/visual_query_suggestions_service_factory.h"
#include "chrome/browser/ui/web_applications/sub_apps_service_impl.h"
#include "chrome/browser/ui/webui/discards/discards.mojom.h"
-@@ -863,7 +863,7 @@ void BindScreen2xMainContentExtractor(
+@@ -862,7 +862,7 @@ void BindScreen2xMainContentExtractor(
#endif
#if BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || \
@@ -34,7 +34,7 @@
void BindVisualSuggestionsModelProvider(
content::RenderFrameHost* frame_host,
mojo::PendingReceiver<
-@@ -1009,7 +1009,7 @@ void PopulateChromeFrameBinders(
+@@ -1004,7 +1004,7 @@ void PopulateChromeFrameBinders(
#endif // BUILDFLAG(ENABLE_SPEECH_SERVICE)
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -43,7 +43,7 @@
if (!render_frame_host->GetParent()) {
map->Add<chrome::mojom::DraggableRegions>(
base::BindRepeating(&DraggableRegionsHostImpl::CreateIfAllowed));
-@@ -1017,7 +1017,7 @@ void PopulateChromeFrameBinders(
+@@ -1012,7 +1012,7 @@ void PopulateChromeFrameBinders(
#endif
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -52,7 +52,7 @@
if (base::FeatureList::IsEnabled(blink::features::kDesktopPWAsSubApps) &&
!render_frame_host->GetParentOrOuterDocument()) {
// The service binder will reject non-primary main frames, but we still need
-@@ -1097,7 +1097,7 @@ void PopulateChromeWebUIFrameBinders(
+@@ -1094,7 +1094,7 @@ void PopulateChromeWebUIFrameBinders(
commerce::CommerceInternalsUI>(map);
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -61,7 +61,7 @@
RegisterWebUIControllerInterfaceBinder<
connectors_internals::mojom::PageHandler,
enterprise_connectors::ConnectorsInternalsUI>(map);
-@@ -1118,7 +1118,7 @@ void PopulateChromeWebUIFrameBinders(
+@@ -1115,7 +1115,7 @@ void PopulateChromeWebUIFrameBinders(
#endif
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -70,7 +70,7 @@
RegisterWebUIControllerInterfaceBinder<
app_management::mojom::PageHandlerFactory, WebAppSettingsUI>(map);
#endif
-@@ -1651,7 +1651,7 @@ void PopulateChromeWebUIFrameBinders(
+@@ -1653,7 +1653,7 @@ void PopulateChromeWebUIFrameBinders(
#endif // BUILDFLAG(IS_CHROMEOS_ASH)
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_chrome__browser__main.cc b/www/ungoogled-chromium/files/patch-chrome_browser_chrome__browser__main.cc
index 1079adeec055..72552d59f9fc 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_chrome__browser__main.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_chrome__browser__main.cc
@@ -1,6 +1,6 @@
---- chrome/browser/chrome_browser_main.cc.orig 2024-01-17 09:38:09 UTC
+--- chrome/browser/chrome_browser_main.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/chrome_browser_main.cc
-@@ -246,11 +246,11 @@
+@@ -247,11 +247,11 @@
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
@@ -14,7 +14,7 @@
#include "components/crash/core/app/crashpad.h"
#endif
-@@ -284,14 +284,14 @@
+@@ -285,14 +285,14 @@
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -31,7 +31,24 @@
#include "chrome/browser/headless/headless_mode_metrics.h" // nogncheck
#include "chrome/browser/headless/headless_mode_util.h" // nogncheck
#include "components/headless/select_file_dialog/headless_select_file_dialog.h"
-@@ -1039,7 +1039,7 @@ int ChromeBrowserMainParts::PreCreateThreadsImpl() {
+@@ -354,14 +354,14 @@
+ #endif // BUILDFLAG(IS_WIN) && BUILDFLAG(USE_BROWSER_SPELLCHECKER)
+
+ #if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
+- BUILDFLAG(IS_WIN) || BUILDFLAG(IS_ANDROID)
++ BUILDFLAG(IS_WIN) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_BSD)
+ #include "sql/database.h"
+ #endif // BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) ||
+ // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_ANDROID)
+
+ namespace {
+ #if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
+- BUILDFLAG(IS_WIN) || BUILDFLAG(IS_ANDROID)
++ BUILDFLAG(IS_WIN) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_BSD)
+ constexpr base::FilePath::CharType kMediaHistoryDatabaseName[] =
+ FILE_PATH_LITERAL("Media History");
+
+@@ -1068,7 +1068,7 @@ int ChromeBrowserMainParts::PreCreateThreadsImpl() {
browser_creator_->AddFirstRunTabs(master_prefs_->new_tabs);
}
@@ -40,7 +57,7 @@
// Create directory for user-level Native Messaging manifest files. This
// makes it less likely that the directory will be created by third-party
// software with incorrect owner or permission. See crbug.com/725513 .
-@@ -1095,7 +1095,7 @@ int ChromeBrowserMainParts::PreCreateThreadsImpl() {
+@@ -1124,7 +1124,7 @@ int ChromeBrowserMainParts::PreCreateThreadsImpl() {
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -49,7 +66,16 @@
metrics::DesktopSessionDurationTracker::Initialize();
ProfileActivityMetricsRecorder::Initialize();
TouchModeStatsTracker::Initialize(
-@@ -1331,7 +1331,7 @@ void ChromeBrowserMainParts::PostProfileInit(Profile*
+@@ -1323,7 +1323,7 @@ void ChromeBrowserMainParts::PostProfileInit(Profile*
+ #endif // BUILDFLAG(IS_WIN)
+
+ #if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
+- BUILDFLAG(IS_WIN) || BUILDFLAG(IS_ANDROID)
++ BUILDFLAG(IS_WIN) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_BSD)
+ // Delete the media history database if it still exists.
+ // TODO(crbug.com/1198344): Remove this.
+ base::ThreadPool::PostTask(
+@@ -1372,7 +1372,7 @@ void ChromeBrowserMainParts::PostProfileInit(Profile*
*UrlLanguageHistogramFactory::GetForBrowserContext(profile));
#endif // BUILDFLAG(IS_CHROMEOS_ASH)
@@ -58,7 +84,7 @@
if (headless::IsHeadlessMode()) {
headless::ReportHeadlessActionMetrics();
}
-@@ -1437,7 +1437,7 @@ int ChromeBrowserMainParts::PreMainMessageLoopRunImpl(
+@@ -1478,7 +1478,7 @@ int ChromeBrowserMainParts::PreMainMessageLoopRunImpl(
// In headless mode provide alternate SelectFileDialog factory overriding
// any platform specific SelectFileDialog implementation that may have been
// set.
@@ -67,7 +93,7 @@
if (headless::IsHeadlessMode()) {
headless::HeadlessSelectFileDialogFactory::SetUp();
}
-@@ -1969,7 +1969,7 @@ bool ChromeBrowserMainParts::ProcessSingletonNotificat
+@@ -2008,7 +2008,7 @@ bool ChromeBrowserMainParts::ProcessSingletonNotificat
// Drop the request if headless mode is in effect or the request is from
// a headless Chrome process.
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_chrome__content__browser__client.cc b/www/ungoogled-chromium/files/patch-chrome_browser_chrome__content__browser__client.cc
index 250a74c43658..56ceeeb1ed88 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_chrome__content__browser__client.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_chrome__content__browser__client.cc
@@ -1,6 +1,6 @@
---- chrome/browser/chrome_content_browser_client.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/chrome_content_browser_client.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/chrome_content_browser_client.cc
-@@ -446,7 +446,7 @@
+@@ -458,7 +458,7 @@
#include "storage/browser/file_system/external_mount_points.h"
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
@@ -9,7 +9,7 @@
#include "chrome/browser/chrome_browser_main_linux.h"
#include "chrome/browser/ui/views/chrome_browser_main_extra_parts_views_linux.h"
#elif BUILDFLAG(IS_ANDROID)
-@@ -547,12 +547,12 @@
+@@ -561,12 +561,12 @@
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
@@ -24,7 +24,7 @@
#include "components/crash/core/app/crash_switches.h"
#include "components/crash/core/app/crashpad.h"
#endif
-@@ -563,14 +563,14 @@
+@@ -577,14 +577,14 @@
#include "chrome/browser/apps/link_capturing/web_app_link_capturing_delegate.h"
#endif
@@ -41,7 +41,7 @@
#include "chrome/browser/enterprise/connectors/device_trust/navigation_throttle.h"
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) ||
// BUILDFLAG(IS_CHROMEOS_ASH)
-@@ -586,7 +586,7 @@
+@@ -600,7 +600,7 @@
#include "components/lens/lens_features.h"
#endif
@@ -50,7 +50,7 @@
#include "chrome/browser/chrome_browser_main_extra_parts_linux.h"
#elif BUILDFLAG(IS_OZONE)
#include "chrome/browser/chrome_browser_main_extra_parts_ozone.h"
-@@ -1566,7 +1566,7 @@ void ChromeContentBrowserClient::RegisterLocalStatePre
+@@ -1535,7 +1535,7 @@ void ChromeContentBrowserClient::RegisterLocalStatePre
registry->RegisterBooleanPref(prefs::kNativeClientForceAllowed, false);
registry->RegisterBooleanPref(
policy::policy_prefs::kPPAPISharedImagesForVideoDecoderAllowed, true);
@@ -59,7 +59,7 @@
registry->RegisterBooleanPref(prefs::kOutOfProcessSystemDnsResolutionEnabled,
true);
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_ANDROID)
-@@ -1690,7 +1690,7 @@ ChromeContentBrowserClient::CreateBrowserMainParts(boo
+@@ -1659,7 +1659,7 @@ ChromeContentBrowserClient::CreateBrowserMainParts(boo
#elif BUILDFLAG(IS_CHROMEOS_LACROS)
main_parts = std::make_unique<ChromeBrowserMainPartsLacros>(
is_integration_test, &startup_data_);
@@ -68,7 +68,7 @@
main_parts = std::make_unique<ChromeBrowserMainPartsLinux>(
is_integration_test, &startup_data_);
#elif BUILDFLAG(IS_ANDROID)
-@@ -1727,7 +1727,7 @@ ChromeContentBrowserClient::CreateBrowserMainParts(boo
+@@ -1696,7 +1696,7 @@ ChromeContentBrowserClient::CreateBrowserMainParts(boo
std::make_unique<ChromeBrowserMainExtraPartsViewsLacros>());
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
@@ -77,7 +77,7 @@
main_parts->AddParts(
std::make_unique<ChromeBrowserMainExtraPartsViewsLinux>());
#else
-@@ -1748,7 +1748,7 @@ ChromeContentBrowserClient::CreateBrowserMainParts(boo
+@@ -1717,7 +1717,7 @@ ChromeContentBrowserClient::CreateBrowserMainParts(boo
main_parts->AddParts(std::make_unique<ChromeBrowserMainExtraPartsLacros>());
#endif
@@ -86,7 +86,7 @@
main_parts->AddParts(std::make_unique<ChromeBrowserMainExtraPartsLinux>());
#elif BUILDFLAG(IS_OZONE)
main_parts->AddParts(std::make_unique<ChromeBrowserMainExtraPartsOzone>());
-@@ -1767,7 +1767,7 @@ ChromeContentBrowserClient::CreateBrowserMainParts(boo
+@@ -1736,7 +1736,7 @@ ChromeContentBrowserClient::CreateBrowserMainParts(boo
chrome::AddMetricsExtraParts(main_parts.get());
@@ -95,7 +95,7 @@
main_parts->AddParts(
std::make_unique<
chrome::enterprise_util::ChromeBrowserMainExtraPartsEnterprise>());
-@@ -2601,6 +2601,8 @@ void ChromeContentBrowserClient::AppendExtraCommandLin
+@@ -2574,6 +2574,8 @@ void ChromeContentBrowserClient::AppendExtraCommandLin
#if(0)
#if BUILDFLAG(IS_ANDROID)
bool enable_crash_reporter = true;
@@ -104,7 +104,7 @@
#elif BUILDFLAG(IS_CHROMEOS)
bool enable_crash_reporter = false;
if (crash_reporter::IsCrashpadEnabled()) {
-@@ -2966,7 +2968,7 @@ void ChromeContentBrowserClient::AppendExtraCommandLin
+@@ -2915,7 +2917,7 @@ void ChromeContentBrowserClient::AppendExtraCommandLin
ThreadProfilerConfiguration::Get()->AppendCommandLineSwitchForChildProcess(
command_line);
@@ -113,7 +113,7 @@
// Opt into a hardened stack canary mitigation if it hasn't already been
// force-disabled.
if (!browser_command_line.HasSwitch(switches::kChangeStackGuardOnFork)) {
-@@ -4637,7 +4639,7 @@ void ChromeContentBrowserClient::GetAdditionalFileSyst
+@@ -4597,7 +4599,7 @@ void ChromeContentBrowserClient::GetAdditionalFileSyst
}
}
@@ -122,7 +122,7 @@
void ChromeContentBrowserClient::GetAdditionalMappedFilesForChildProcess(
const base::CommandLine& command_line,
int child_process_id,
-@@ -5192,7 +5194,7 @@ ChromeContentBrowserClient::CreateThrottlesForNavigati
+@@ -5160,7 +5162,7 @@ ChromeContentBrowserClient::CreateThrottlesForNavigati
&throttles);
}
@@ -131,7 +131,7 @@
MaybeAddThrottle(
WebAppSettingsNavigationThrottle::MaybeCreateThrottleFor(handle),
&throttles);
-@@ -5202,7 +5204,7 @@ ChromeContentBrowserClient::CreateThrottlesForNavigati
+@@ -5170,7 +5172,7 @@ ChromeContentBrowserClient::CreateThrottlesForNavigati
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) || \
@@ -140,7 +140,7 @@
MaybeAddThrottle(enterprise_connectors::DeviceTrustNavigationThrottle::
MaybeCreateThrottleFor(handle),
&throttles);
-@@ -5233,7 +5235,7 @@ ChromeContentBrowserClient::CreateThrottlesForNavigati
+@@ -5201,7 +5203,7 @@ ChromeContentBrowserClient::CreateThrottlesForNavigati
}
#endif
@@ -149,7 +149,7 @@
MaybeAddThrottle(browser_switcher::BrowserSwitcherNavigationThrottle::
MaybeCreateThrottleFor(handle),
&throttles);
-@@ -7101,7 +7103,7 @@ bool ChromeContentBrowserClient::ShouldSandboxNetworkS
+@@ -7105,7 +7107,7 @@ bool ChromeContentBrowserClient::ShouldSandboxNetworkS
bool ChromeContentBrowserClient::ShouldRunOutOfProcessSystemDnsResolution() {
// This enterprise policy is supported on Android, but the feature will not be
// launched there.
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_chrome__content__browser__client.h b/www/ungoogled-chromium/files/patch-chrome_browser_chrome__content__browser__client.h
index 75b592168657..7b5d60dbee23 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_chrome__content__browser__client.h
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_chrome__content__browser__client.h
@@ -1,6 +1,6 @@
---- chrome/browser/chrome_content_browser_client.h.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/chrome_content_browser_client.h.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/chrome_content_browser_client.h
-@@ -484,7 +484,7 @@ class ChromeContentBrowserClient : public content::Con
+@@ -495,7 +495,7 @@ class ChromeContentBrowserClient : public content::Con
void OverridePageVisibilityState(
content::RenderFrameHost* render_frame_host,
content::PageVisibilityState* visibility_state) override;
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_component__updater_registration.cc b/www/ungoogled-chromium/files/patch-chrome_browser_component__updater_registration.cc
deleted file mode 100644
index fdcdb8513898..000000000000
--- a/www/ungoogled-chromium/files/patch-chrome_browser_component__updater_registration.cc
+++ /dev/null
@@ -1,11 +0,0 @@
---- chrome/browser/component_updater/registration.cc.orig 2023-12-23 12:33:28 UTC
-+++ chrome/browser/component_updater/registration.cc
-@@ -223,7 +223,7 @@ void RegisterComponentsForUpdate() {
-
- RegisterTpcdMetadataComponent(cus);
-
--#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
-+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_BSD)
- // TODO(crbug.com/1490685): Remove this test component once the
- // experiment has concluded.
- if (base::FeatureList::IsEnabled(features::kPayloadTestComponent)) {
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_compose_chrome__compose__client.cc b/www/ungoogled-chromium/files/patch-chrome_browser_compose_chrome__compose__client.cc
new file mode 100644
index 000000000000..7dd1017f2706
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_compose_chrome__compose__client.cc
@@ -0,0 +1,19 @@
+--- chrome/browser/compose/chrome_compose_client.cc.orig 2024-02-03 15:42:55 UTC
++++ chrome/browser/compose/chrome_compose_client.cc
+@@ -170,14 +170,14 @@ void ChromeComposeClient::CloseUI(compose::mojom::Clos
+ void ChromeComposeClient::ApproveConsent() {
+ pref_service_->SetBoolean(
+ unified_consent::prefs::kPageContentCollectionEnabled, true);
+-#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ pref_service_->SetBoolean(prefs::kPrefHasAcceptedComposeConsent, true);
+ #endif
+ UpdateAllSessionsWithConsentApproved();
+ }
+
+ void ChromeComposeClient::AcknowledgeConsentDisclaimer() {
+-#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ pref_service_->SetBoolean(prefs::kPrefHasAcceptedComposeConsent, true);
+ #endif
+ UpdateAllSessionsWithConsentApproved();
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_content__settings_one__time__permission__provider.cc b/www/ungoogled-chromium/files/patch-chrome_browser_content__settings_one__time__permission__provider.cc
index b399a104be8f..2face7964fe6 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_content__settings_one__time__permission__provider.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_content__settings_one__time__permission__provider.cc
@@ -1,6 +1,6 @@
---- chrome/browser/content_settings/one_time_permission_provider.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/content_settings/one_time_permission_provider.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/content_settings/one_time_permission_provider.cc
-@@ -226,8 +226,13 @@ void OneTimePermissionProvider::OnSuspend() {
+@@ -231,8 +231,13 @@ void OneTimePermissionProvider::OnSuspend() {
while (rule_iterator && rule_iterator->HasNext()) {
auto rule = rule_iterator->Next();
@@ -14,7 +14,7 @@
permissions::PermissionUmaUtil::RecordOneTimePermissionEvent(
setting_type,
permissions::OneTimePermissionEvent::EXPIRED_ON_SUSPEND);
-@@ -329,8 +334,13 @@ void OneTimePermissionProvider::DeleteEntriesMatchingG
+@@ -334,8 +339,13 @@ void OneTimePermissionProvider::DeleteEntriesMatchingG
auto rule = rule_iterator->Next();
if (rule->primary_pattern.Matches(origin_gurl) &&
rule->secondary_pattern.Matches(origin_gurl)) {
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_download_chrome__download__manager__delegate.cc b/www/ungoogled-chromium/files/patch-chrome_browser_download_chrome__download__manager__delegate.cc
index 18c7c76ed788..0b94540c52d3 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_download_chrome__download__manager__delegate.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_download_chrome__download__manager__delegate.cc
@@ -1,6 +1,6 @@
---- chrome/browser/download/chrome_download_manager_delegate.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/download/chrome_download_manager_delegate.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/download/chrome_download_manager_delegate.cc
-@@ -1594,7 +1594,7 @@ void ChromeDownloadManagerDelegate::OnDownloadTargetDe
+@@ -1604,7 +1604,7 @@ void ChromeDownloadManagerDelegate::OnDownloadTargetDe
bool ChromeDownloadManagerDelegate::IsOpenInBrowserPreferreredForFile(
const base::FilePath& path) {
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
@@ -9,7 +9,7 @@
if (path.MatchesExtension(FILE_PATH_LITERAL(".pdf"))) {
return !download_prefs_->ShouldOpenPdfInSystemReader();
}
-@@ -1660,7 +1660,7 @@ void ChromeDownloadManagerDelegate::CheckDownloadAllow
+@@ -1670,7 +1670,7 @@ void ChromeDownloadManagerDelegate::CheckDownloadAllow
content::CheckDownloadAllowedCallback check_download_allowed_cb) {
DCHECK_CURRENTLY_ON(BrowserThread::UI);
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_download_download__item__model.cc b/www/ungoogled-chromium/files/patch-chrome_browser_download_download__item__model.cc
index 896bea41fc9d..e5e821de0a9a 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_download_download__item__model.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_download_download__item__model.cc
@@ -1,6 +1,6 @@
---- chrome/browser/download/download_item_model.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/download/download_item_model.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/download/download_item_model.cc
-@@ -752,7 +752,7 @@ bool DownloadItemModel::IsCommandChecked(
+@@ -753,7 +753,7 @@ bool DownloadItemModel::IsCommandChecked(
download_crx_util::IsExtensionDownload(*download_);
case DownloadCommands::ALWAYS_OPEN_TYPE:
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
@@ -9,7 +9,7 @@
if (download_commands->CanOpenPdfInSystemViewer()) {
DownloadPrefs* prefs = DownloadPrefs::FromBrowserContext(profile());
return prefs->ShouldOpenPdfInSystemReader();
-@@ -798,7 +798,7 @@ void DownloadItemModel::ExecuteCommand(DownloadCommand
+@@ -799,7 +799,7 @@ void DownloadItemModel::ExecuteCommand(DownloadCommand
DownloadCommands::ALWAYS_OPEN_TYPE);
DownloadPrefs* prefs = DownloadPrefs::FromBrowserContext(profile());
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
@@ -18,7 +18,7 @@
if (download_commands->CanOpenPdfInSystemViewer()) {
prefs->SetShouldOpenPdfInSystemReader(!is_checked);
SetShouldPreferOpeningInBrowser(is_checked);
-@@ -1078,7 +1078,7 @@ void DownloadItemModel::DetermineAndSetShouldPreferOpe
+@@ -1079,7 +1079,7 @@ void DownloadItemModel::DetermineAndSetShouldPreferOpe
return;
}
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_download_download__prefs.cc b/www/ungoogled-chromium/files/patch-chrome_browser_download_download__prefs.cc
index 3b01f73d9a1f..5be8ba6ace66 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_download_download__prefs.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_download_download__prefs.cc
@@ -1,4 +1,4 @@
---- chrome/browser/download/download_prefs.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/download/download_prefs.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/download/download_prefs.cc
@@ -11,6 +11,7 @@
#include <vector>
@@ -28,7 +28,7 @@
base::FilePath home_dir = base::GetHomeDir();
if (download_path == home_dir) {
return true;
-@@ -181,7 +186,7 @@ DownloadPrefs::DownloadPrefs(Profile* profile) : profi
+@@ -179,7 +184,7 @@ DownloadPrefs::DownloadPrefs(Profile* profile) : profi
#endif // BUILDFLAG(IS_CHROMEOS_ASH)
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
@@ -37,7 +37,7 @@
should_open_pdf_in_system_reader_ =
prefs->GetBoolean(prefs::kOpenPdfDownloadInSystemReader);
#endif
-@@ -303,7 +308,7 @@ void DownloadPrefs::RegisterProfilePrefs(
+@@ -301,7 +306,7 @@ void DownloadPrefs::RegisterProfilePrefs(
registry->RegisterFilePathPref(prefs::kSaveFileDefaultDirectory,
default_download_path);
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
@@ -46,7 +46,7 @@
registry->RegisterBooleanPref(prefs::kOpenPdfDownloadInSystemReader, false);
#endif
#if BUILDFLAG(IS_ANDROID)
-@@ -463,7 +468,7 @@ void DownloadPrefs::DisableAutoOpenByUserBasedOnExtens
+@@ -461,7 +466,7 @@ void DownloadPrefs::DisableAutoOpenByUserBasedOnExtens
}
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
@@ -55,7 +55,7 @@
void DownloadPrefs::SetShouldOpenPdfInSystemReader(bool should_open) {
if (should_open_pdf_in_system_reader_ == should_open)
return;
-@@ -495,7 +500,7 @@ bool DownloadPrefs::ShouldOpenPdfInSystemReader() cons
+@@ -493,7 +498,7 @@ bool DownloadPrefs::ShouldOpenPdfInSystemReader() cons
void DownloadPrefs::ResetAutoOpenByUser() {
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
@@ -64,7 +64,7 @@
SetShouldOpenPdfInSystemReader(false);
#endif
auto_open_by_user_.clear();
-@@ -526,7 +531,7 @@ void DownloadPrefs::SaveAutoOpenState() {
+@@ -524,7 +529,7 @@ void DownloadPrefs::SaveAutoOpenState() {
bool DownloadPrefs::CanPlatformEnableAutoOpenForPdf() const {
#if BUILDFLAG(IS_CHROMEOS)
return false; // There is no UI for auto-open on ChromeOS.
@@ -73,7 +73,7 @@
return ShouldOpenPdfInSystemReader();
#else
return false;
-@@ -650,7 +655,14 @@ base::FilePath DownloadPrefs::SanitizeDownloadTargetPa
+@@ -648,7 +653,14 @@ base::FilePath DownloadPrefs::SanitizeDownloadTargetPa
#else
// If the stored download directory is an absolute path, we presume it's
// correct; there's not really much more validation we can do here.
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_enterprise_connectors_analysis_analysis__service__settings.cc b/www/ungoogled-chromium/files/patch-chrome_browser_enterprise_connectors_analysis_analysis__service__settings.cc
index ef116a33cc9d..aeb244aa14f2 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_enterprise_connectors_analysis_analysis__service__settings.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_enterprise_connectors_analysis_analysis__service__settings.cc
@@ -1,6 +1,6 @@
---- chrome/browser/enterprise/connectors/analysis/analysis_service_settings.cc.orig 2023-10-13 13:20:35 UTC
+--- chrome/browser/enterprise/connectors/analysis/analysis_service_settings.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/enterprise/connectors/analysis/analysis_service_settings.cc
-@@ -143,7 +143,7 @@ AnalysisServiceSettings::AnalysisServiceSettings(
+@@ -150,7 +150,7 @@ AnalysisServiceSettings::AnalysisServiceSettings(
const char* verification_key = kKeyWindowsVerification;
#elif BUILDFLAG(IS_MAC)
const char* verification_key = kKeyMacVerification;
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_extensions_BUILD.gn b/www/ungoogled-chromium/files/patch-chrome_browser_extensions_BUILD.gn
index 013de666cc0d..51d963b0a2fd 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_extensions_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_extensions_BUILD.gn
@@ -1,6 +1,6 @@
---- chrome/browser/extensions/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/extensions/BUILD.gn.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/extensions/BUILD.gn
-@@ -1366,6 +1366,10 @@ static_library("extensions") {
+@@ -1371,6 +1371,10 @@ static_library("extensions") {
deps += [ "//chrome/services/printing/public/mojom" ]
}
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_api__browser__context__keyed__service__factories.cc b/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_api__browser__context__keyed__service__factories.cc
index 58ff5224bd5c..9750db8e83d7 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_api__browser__context__keyed__service__factories.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_api__browser__context__keyed__service__factories.cc
@@ -1,15 +1,15 @@
---- chrome/browser/extensions/api/api_browser_context_keyed_service_factories.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/extensions/api/api_browser_context_keyed_service_factories.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/extensions/api/api_browser_context_keyed_service_factories.cc
-@@ -43,7 +43,7 @@
- #include "extensions/browser/api/bluetooth_low_energy/bluetooth_low_energy_api.h"
+@@ -44,7 +44,7 @@
#include "extensions/browser/api/networking_private/networking_private_delegate_factory.h"
+ #include "printing/buildflags/buildflags.h"
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_BSD)
#include "chrome/browser/extensions/api/system_indicator/system_indicator_manager_factory.h"
#endif
-@@ -118,7 +118,7 @@ void EnsureApiBrowserContextKeyedServiceFactoriesBuilt
+@@ -126,7 +126,7 @@ void EnsureApiBrowserContextKeyedServiceFactoriesBuilt
extensions::SettingsPrivateEventRouterFactory::GetInstance();
extensions::SettingsOverridesAPI::GetFactoryInstance();
extensions::SidePanelService::GetFactoryInstance();
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_enterprise__reporting__private_chrome__desktop__report__request__helper.cc b/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_enterprise__reporting__private_chrome__desktop__report__request__helper.cc
index a1298a5a5312..21a210163251 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_enterprise__reporting__private_chrome__desktop__report__request__helper.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_enterprise__reporting__private_chrome__desktop__report__request__helper.cc
@@ -1,4 +1,4 @@
---- chrome/browser/extensions/api/enterprise_reporting_private/chrome_desktop_report_request_helper.cc.orig 2022-10-01 07:40:07 UTC
+--- chrome/browser/extensions/api/enterprise_reporting_private/chrome_desktop_report_request_helper.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/extensions/api/enterprise_reporting_private/chrome_desktop_report_request_helper.cc
@@ -24,7 +24,7 @@
#include "base/win/registry.h"
@@ -9,7 +9,7 @@
#include "base/environment.h"
#include "base/nix/xdg_util.h"
#endif
-@@ -256,7 +256,7 @@ base::FilePath GetEndpointVerificationDir() {
+@@ -255,7 +255,7 @@ base::FilePath GetEndpointVerificationDir() {
bool got_path = false;
#if BUILDFLAG(IS_WIN)
got_path = base::PathService::Get(base::DIR_LOCAL_APP_DATA, &path);
@@ -18,7 +18,7 @@
std::unique_ptr<base::Environment> env(base::Environment::Create());
path = base::nix::GetXDGDirectory(env.get(), base::nix::kXdgConfigHomeEnvVar,
base::nix::kDotConfigDir);
-@@ -267,7 +267,7 @@ base::FilePath GetEndpointVerificationDir() {
+@@ -266,7 +266,7 @@ base::FilePath GetEndpointVerificationDir() {
if (!got_path)
return path;
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_enterprise__reporting__private_enterprise__reporting__private__api.cc b/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_enterprise__reporting__private_enterprise__reporting__private__api.cc
index e3f255d946ae..9eb96eabdc78 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_enterprise__reporting__private_enterprise__reporting__private__api.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_enterprise__reporting__private_enterprise__reporting__private__api.cc
@@ -1,4 +1,4 @@
---- chrome/browser/extensions/api/enterprise_reporting_private/enterprise_reporting_private_api.cc.orig 2023-05-05 12:12:41 UTC
+--- chrome/browser/extensions/api/enterprise_reporting_private/enterprise_reporting_private_api.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/extensions/api/enterprise_reporting_private/enterprise_reporting_private_api.cc
@@ -32,7 +32,7 @@
#include "components/reporting/util/statusor.h"
@@ -27,7 +27,7 @@
EnterpriseReportingPrivateGetPersistentSecretFunction::
EnterpriseReportingPrivateGetPersistentSecretFunction() = default;
-@@ -593,7 +593,7 @@ void EnterpriseReportingPrivateEnqueueRecordFunction::
+@@ -592,7 +592,7 @@ void EnterpriseReportingPrivateEnqueueRecordFunction::
}
#endif
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_passwords__private_passwords__private__delegate__impl.cc b/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_passwords__private_passwords__private__delegate__impl.cc
index 650059c88b6b..70260fc8c6bf 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_passwords__private_passwords__private__delegate__impl.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_passwords__private_passwords__private__delegate__impl.cc
@@ -1,6 +1,6 @@
---- chrome/browser/extensions/api/passwords_private/passwords_private_delegate_impl.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/extensions/api/passwords_private/passwords_private_delegate_impl.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/extensions/api/passwords_private/passwords_private_delegate_impl.cc
-@@ -601,7 +601,7 @@ void PasswordsPrivateDelegateImpl::OnFetchingFamilyMem
+@@ -597,7 +597,7 @@ void PasswordsPrivateDelegateImpl::OnFetchingFamilyMem
}
void PasswordsPrivateDelegateImpl::OsReauthTimeoutCall() {
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_settings__private_prefs__util.cc b/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_settings__private_prefs__util.cc
index 0500e77eb932..fe834aef36d1 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_settings__private_prefs__util.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_settings__private_prefs__util.cc
@@ -1,20 +1,20 @@
---- chrome/browser/extensions/api/settings_private/prefs_util.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/extensions/api/settings_private/prefs_util.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/extensions/api/settings_private/prefs_util.cc
-@@ -193,7 +193,7 @@ const PrefsUtil::TypedPrefMap& PrefsUtil::GetAllowlist
+@@ -194,7 +194,7 @@ const PrefsUtil::TypedPrefMap& PrefsUtil::GetAllowlist
(*s_allowlist)[::prefs::kSidePanelHorizontalAlignment] =
- settings_api::PrefType::PREF_TYPE_BOOLEAN;
+ settings_api::PrefType::kBoolean;
-#if BUILDFLAG(IS_LINUX)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
(*s_allowlist)[::prefs::kUseCustomChromeFrame] =
- settings_api::PrefType::PREF_TYPE_BOOLEAN;
+ settings_api::PrefType::kBoolean;
#endif
-@@ -205,7 +205,7 @@ const PrefsUtil::TypedPrefMap& PrefsUtil::GetAllowlist
- settings_api::PrefType::PREF_TYPE_STRING;
- (*s_allowlist)[::prefs::kPolicyThemeColor] =
- settings_api::PrefType::PREF_TYPE_NUMBER;
+@@ -203,7 +203,7 @@ const PrefsUtil::TypedPrefMap& PrefsUtil::GetAllowlist
+ // Appearance settings.
+ (*s_allowlist)[::prefs::kCurrentThemeID] = settings_api::PrefType::kString;
+ (*s_allowlist)[::prefs::kPolicyThemeColor] = settings_api::PrefType::kNumber;
-#if BUILDFLAG(IS_LINUX)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
- (*s_allowlist)[::prefs::kSystemTheme] =
- settings_api::PrefType::PREF_TYPE_NUMBER;
+ (*s_allowlist)[::prefs::kSystemTheme] = settings_api::PrefType::kNumber;
#endif
+ (*s_allowlist)[::prefs::kHomePage] = settings_api::PrefType::kUrl;
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_webrtc__logging__private_webrtc__logging__private__api.cc b/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_webrtc__logging__private_webrtc__logging__private__api.cc
index 562190140f2e..e932e7b72d73 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_webrtc__logging__private_webrtc__logging__private__api.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_webrtc__logging__private_webrtc__logging__private__api.cc
@@ -1,4 +1,4 @@
---- chrome/browser/extensions/api/webrtc_logging_private/webrtc_logging_private_api.cc.orig 2022-10-01 07:40:07 UTC
+--- chrome/browser/extensions/api/webrtc_logging_private/webrtc_logging_private_api.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/extensions/api/webrtc_logging_private/webrtc_logging_private_api.cc
@@ -29,7 +29,7 @@
#include "extensions/browser/process_manager.h"
@@ -18,7 +18,7 @@
if (extension) {
enabled_by_permissions =
extension->permissions_data()->active_permissions().HasAPIPermission(
-@@ -580,7 +580,7 @@ void WebrtcLoggingPrivateStartEventLoggingFunction::Fi
+@@ -576,7 +576,7 @@ void WebrtcLoggingPrivateStartEventLoggingFunction::Fi
ExtensionFunction::ResponseAction
WebrtcLoggingPrivateGetLogsDirectoryFunction::Run() {
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_webstore__private_webstore__private__api.cc b/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_webstore__private_webstore__private__api.cc
index 76a832c4d16e..fc107f3eb034 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_webstore__private_webstore__private__api.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_extensions_api_webstore__private_webstore__private__api.cc
@@ -1,6 +1,6 @@
---- chrome/browser/extensions/api/webstore_private/webstore_private_api.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/extensions/api/webstore_private/webstore_private_api.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/extensions/api/webstore_private/webstore_private_api.cc
-@@ -907,7 +907,7 @@ void WebstorePrivateBeginInstallWithManifest3Function:
+@@ -892,7 +892,7 @@ void WebstorePrivateBeginInstallWithManifest3Function:
RequestExtensionApproval(contents);
return;
}
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_file__system__access_chrome__file__system__access__permission__context.cc b/www/ungoogled-chromium/files/patch-chrome_browser_file__system__access_chrome__file__system__access__permission__context.cc
index a38aaf39990d..ef67ef69c571 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_file__system__access_chrome__file__system__access__permission__context.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_file__system__access_chrome__file__system__access__permission__context.cc
@@ -1,6 +1,6 @@
---- chrome/browser/file_system_access/chrome_file_system_access_permission_context.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/file_system_access/chrome_file_system_access_permission_context.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/file_system_access/chrome_file_system_access_permission_context.cc
-@@ -321,7 +321,7 @@ const struct {
+@@ -323,7 +323,7 @@ const struct {
FILE_PATH_LITERAL("Library/Mobile Documents/com~apple~CloudDocs"),
kDontBlockChildren},
#endif
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_flag__descriptions.cc b/www/ungoogled-chromium/files/patch-chrome_browser_flag__descriptions.cc
index 355357c4f623..8f2209bd649d 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_flag__descriptions.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_flag__descriptions.cc
@@ -1,6 +1,6 @@
---- chrome/browser/flag_descriptions.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/flag_descriptions.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/flag_descriptions.cc
-@@ -7255,7 +7255,7 @@ const char kLacrosMergeIcuDataFileDescription[] =
+@@ -7424,7 +7424,7 @@ const char kLacrosMergeIcuDataFileDescription[] =
"Enables sharing common areas of icudtl.dat between Ash and Lacros.";
#endif // #if BUILDFLAG(IS_CHROMEOS_LACROS)
@@ -9,7 +9,7 @@
const char kGetAllScreensMediaName[] = "GetAllScreensMedia API";
const char kGetAllScreensMediaDescription[] =
"When enabled, the getAllScreensMedia API for capturing multiple screens "
-@@ -7494,7 +7494,7 @@ const char kSearchWebInSidePanelDescription[] =
+@@ -7664,7 +7664,7 @@ const char kSearchWebInSidePanelDescription[] =
// Random platform combinations -----------------------------------------------
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -18,7 +18,7 @@
const char kQuickCommandsName[] = "Quick Commands";
const char kQuickCommandsDescription[] =
"Enable a text interface to browser features. Invoke with Ctrl-Space.";
-@@ -7503,7 +7503,7 @@ const char kQuickCommandsDescription[] =
+@@ -7673,7 +7673,7 @@ const char kQuickCommandsDescription[] =
// BUILDFLAG(IS_FUCHSIA)
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -27,7 +27,7 @@
const char kFollowingFeedSidepanelName[] = "Following feed in the sidepanel";
const char kFollowingFeedSidepanelDescription[] =
"Enables the following feed in the sidepanel.";
-@@ -7518,7 +7518,7 @@ const char kEnableProtoApiForClassifyUrlDescription[]
+@@ -7688,7 +7688,7 @@ const char kEnableProtoApiForClassifyUrlDescription[]
"instead of JSON.";
#endif
@@ -36,7 +36,7 @@
const char kEnableNetworkServiceSandboxName[] =
"Enable the network service sandbox.";
const char kEnableNetworkServiceSandboxDescription[] =
-@@ -7542,7 +7542,7 @@ const char kWebShareDescription[] =
+@@ -7712,7 +7712,7 @@ const char kWebShareDescription[] =
"platforms.";
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_MAC)
@@ -45,7 +45,7 @@
const char kOzonePlatformHintChoiceDefault[] = "Default";
const char kOzonePlatformHintChoiceAuto[] = "Auto";
const char kOzonePlatformHintChoiceX11[] = "X11";
-@@ -7562,7 +7562,7 @@ const char kWebBluetoothConfirmPairingSupportDescripti
+@@ -7732,7 +7732,7 @@ const char kWebBluetoothConfirmPairingSupportDescripti
"Bluetooth";
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX)
@@ -54,7 +54,7 @@
const char kSkipUndecryptablePasswordsName[] =
"Skip undecryptable passwords to use the available decryptable "
"passwords.";
-@@ -7576,7 +7576,7 @@ const char kForcePasswordInitialSyncWhenDecryptionFail
+@@ -7746,7 +7746,7 @@ const char kForcePasswordInitialSyncWhenDecryptionFail
"storage and requests initial sync.";
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC)
@@ -63,7 +63,7 @@
const char kAsyncDnsName[] = "Async DNS resolver";
const char kAsyncDnsDescription[] = "Enables the built-in DNS resolver.";
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX)
-@@ -7675,7 +7675,7 @@ const char kElasticOverscrollDescription[] =
+@@ -7852,7 +7852,7 @@ const char kElementCaptureDescription[] =
#if BUILDFLAG(IS_WIN) || \
(BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS)) || \
@@ -72,9 +72,9 @@
const char kUIDebugToolsName[] = "Debugging tools for UI";
const char kUIDebugToolsDescription[] =
"Enables additional keyboard shortcuts to help debugging.";
-@@ -7721,7 +7721,7 @@ const char kEnableAudioFocusEnforcementDescription[] =
- "any one time. Requires #enable-media-session-service to be enabled too.";
- #endif // BUILDFLAG(IS_CHROMEOS_ASH)
+@@ -7904,7 +7904,7 @@ const char kComposeName[] = "CCO Edits";
+ const char kComposeDescription[] = "Enables CCO editing feature";
+ #endif // BUILDFLAG(ENABLE_COMPOSE)
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_BSD)
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_flag__descriptions.h b/www/ungoogled-chromium/files/patch-chrome_browser_flag__descriptions.h
index 096de91b0cff..e3fe84bb172a 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_flag__descriptions.h
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_flag__descriptions.h
@@ -1,6 +1,6 @@
---- chrome/browser/flag_descriptions.h.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/flag_descriptions.h.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/flag_descriptions.h
-@@ -4172,7 +4172,7 @@ extern const char kLacrosMergeIcuDataFileName[];
+@@ -4260,7 +4260,7 @@ extern const char kLacrosMergeIcuDataFileName[];
extern const char kLacrosMergeIcuDataFileDescription[];
#endif // #if BUILDFLAG(IS_CHROMEOS_LACROS)
@@ -9,7 +9,7 @@
extern const char kGetAllScreensMediaName[];
extern const char kGetAllScreensMediaDescription[];
#endif // BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX)
-@@ -4312,14 +4312,14 @@ extern const char kSearchWebInSidePanelDescription[];
+@@ -4400,14 +4400,14 @@ extern const char kSearchWebInSidePanelDescription[];
// Random platform combinations -----------------------------------------------
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -26,7 +26,7 @@
extern const char kWebShareName[];
extern const char kWebShareDescription[];
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_MAC)
-@@ -4329,7 +4329,7 @@ extern const char kWebBluetoothConfirmPairingSupportNa
+@@ -4417,7 +4417,7 @@ extern const char kWebBluetoothConfirmPairingSupportNa
extern const char kWebBluetoothConfirmPairingSupportDescription[];
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX)
@@ -35,7 +35,7 @@
extern const char kOzonePlatformHintChoiceDefault[];
extern const char kOzonePlatformHintChoiceAuto[];
extern const char kOzonePlatformHintChoiceX11[];
-@@ -4339,7 +4339,7 @@ extern const char kOzonePlatformHintName[];
+@@ -4427,7 +4427,7 @@ extern const char kOzonePlatformHintName[];
extern const char kOzonePlatformHintDescription[];
#endif // BUILDFLAG(IS_LINUX)
@@ -44,7 +44,7 @@
extern const char kSkipUndecryptablePasswordsName[];
extern const char kSkipUndecryptablePasswordsDescription[];
-@@ -4347,13 +4347,13 @@ extern const char kForcePasswordInitialSyncWhenDecrypt
+@@ -4435,13 +4435,13 @@ extern const char kForcePasswordInitialSyncWhenDecrypt
extern const char kForcePasswordInitialSyncWhenDecryptionFailsDescription[];
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC)
@@ -60,7 +60,7 @@
extern const char kFollowingFeedSidepanelName[];
extern const char kFollowingFeedSidepanelDescription[];
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) ||
-@@ -4364,7 +4364,7 @@ extern const char kEnableProtoApiForClassifyUrlName[];
+@@ -4452,7 +4452,7 @@ extern const char kEnableProtoApiForClassifyUrlName[];
extern const char kEnableProtoApiForClassifyUrlDescription[];
#endif // BUILDFLAG(ENABLE_SUPERVISED_USERS)
@@ -69,7 +69,7 @@
extern const char kEnableNetworkServiceSandboxName[];
extern const char kEnableNetworkServiceSandboxDescription[];
-@@ -4441,7 +4441,7 @@ extern const char kElasticOverscrollDescription[];
+@@ -4534,7 +4534,7 @@ extern const char kElementCaptureDescription[];
#if BUILDFLAG(IS_WIN) || \
(BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS)) || \
@@ -78,9 +78,9 @@
extern const char kUIDebugToolsName[];
extern const char kUIDebugToolsDescription[];
-@@ -4471,7 +4471,7 @@ extern const char kEnableAudioFocusEnforcementName[];
- extern const char kEnableAudioFocusEnforcementDescription[];
- #endif // BUILDFLAG(IS_CHROMEOS_ASH)
+@@ -4570,7 +4570,7 @@ extern const char kComposeName[];
+ extern const char kComposeDescription[];
+ #endif // BUILDFLAG(ENABLE_COMPOSE)
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_BSD)
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_media__galleries_media__file__system__registry.cc b/www/ungoogled-chromium/files/patch-chrome_browser_media__galleries_media__file__system__registry.cc
index 2ad587fe56a5..7b34614eaebf 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_media__galleries_media__file__system__registry.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_media__galleries_media__file__system__registry.cc
@@ -1,6 +1,6 @@
---- chrome/browser/media_galleries/media_file_system_registry.cc.orig 2023-08-18 10:26:52 UTC
+--- chrome/browser/media_galleries/media_file_system_registry.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/media_galleries/media_file_system_registry.cc
-@@ -586,7 +586,12 @@ class MediaFileSystemRegistry::MediaFileSystemContextI
+@@ -584,7 +584,12 @@ class MediaFileSystemRegistry::MediaFileSystemContextI
// Constructor in 'private' section because depends on private class definition.
MediaFileSystemRegistry::MediaFileSystemRegistry()
: file_system_context_(new MediaFileSystemContextImpl) {
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_media_webrtc_webrtc__event__log__uploader.cc b/www/ungoogled-chromium/files/patch-chrome_browser_media_webrtc_webrtc__event__log__uploader.cc
deleted file mode 100644
index 2d734b26fc96..000000000000
--- a/www/ungoogled-chromium/files/patch-chrome_browser_media_webrtc_webrtc__event__log__uploader.cc
+++ /dev/null
@@ -1,11 +0,0 @@
---- chrome/browser/media/webrtc/webrtc_event_log_uploader.cc.orig 2023-07-21 09:49:17 UTC
-+++ chrome/browser/media/webrtc/webrtc_event_log_uploader.cc
-@@ -42,7 +42,7 @@ const char kProduct[] = "Chrome";
- const char kProduct[] = "Chrome_Mac";
- #elif BUILDFLAG(IS_CHROMEOS_ASH)
- const char kProduct[] = "Chrome_ChromeOS";
--#elif BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS)
-+#elif BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS) || BUILDFLAG(IS_BSD)
- const char kProduct[] = "Chrome_Linux";
- #elif BUILDFLAG(IS_ANDROID)
- const char kProduct[] = "Chrome_Android";
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_media_webrtc_webrtc__log__uploader.cc b/www/ungoogled-chromium/files/patch-chrome_browser_media_webrtc_webrtc__log__uploader.cc
index 90eb8474fc66..5fb1f8f16b15 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_media_webrtc_webrtc__log__uploader.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_media_webrtc_webrtc__log__uploader.cc
@@ -1,13 +1,11 @@
---- chrome/browser/media/webrtc/webrtc_log_uploader.cc.orig 2023-08-18 10:26:52 UTC
+--- chrome/browser/media/webrtc/webrtc_log_uploader.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/media/webrtc/webrtc_log_uploader.cc
-@@ -291,6 +291,10 @@ void WebRtcLogUploader::SetupMultipart(
- const char product[] = "Chrome_ChromeOS";
- #elif BUILDFLAG(IS_FUCHSIA)
- const char product[] = "Chrome_Fuchsia";
-+#elif defined(OS_OPENBSD)
-+ const char product[] = "Chrome_OpenBSD";
-+#elif defined(OS_FREEBSD)
-+ const char product[] = "Chrome_FreeBSD";
+@@ -100,7 +100,7 @@ std::string GetLogUploadProduct() {
+ const char product[] = "Chrome_Mac";
+ // TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
+ // of lacros-chrome is complete.
+-#elif BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS)
++#elif BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS) || BUILDFLAG(IS_BSD)
+ #if !defined(ADDRESS_SANITIZER)
+ const char product[] = "Chrome_Linux";
#else
- #error Platform not supported.
- #endif
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_media_webrtc_webrtc__logging__controller.cc b/www/ungoogled-chromium/files/patch-chrome_browser_media_webrtc_webrtc__logging__controller.cc
index 81e65df077ef..e954a3a19498 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_media_webrtc_webrtc__logging__controller.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_media_webrtc_webrtc__logging__controller.cc
@@ -1,4 +1,4 @@
---- chrome/browser/media/webrtc/webrtc_logging_controller.cc.orig 2023-05-05 12:12:41 UTC
+--- chrome/browser/media/webrtc/webrtc_logging_controller.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/media/webrtc/webrtc_logging_controller.cc
@@ -25,10 +25,10 @@
#include "components/webrtc_logging/browser/text_log_list.h"
@@ -13,7 +13,7 @@
using webrtc_event_logging::WebRtcEventLogManager;
-@@ -284,7 +284,7 @@ void WebRtcLoggingController::StartEventLogging(
+@@ -288,7 +288,7 @@ void WebRtcLoggingController::StartEventLogging(
web_app_id, callback);
}
@@ -22,7 +22,7 @@
void WebRtcLoggingController::GetLogsDirectory(
LogsDirectoryCallback callback,
LogsDirectoryErrorCallback error_callback) {
-@@ -329,7 +329,7 @@ void WebRtcLoggingController::GrantLogsDirectoryAccess
+@@ -334,7 +334,7 @@ void WebRtcLoggingController::GrantLogsDirectoryAccess
FROM_HERE,
base::BindOnce(std::move(callback), file_system.id(), registered_name));
}
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_media_webrtc_webrtc__logging__controller.h b/www/ungoogled-chromium/files/patch-chrome_browser_media_webrtc_webrtc__logging__controller.h
index c7248c4978bf..01d95274459d 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_media_webrtc_webrtc__logging__controller.h
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_media_webrtc_webrtc__logging__controller.h
@@ -1,6 +1,6 @@
---- chrome/browser/media/webrtc/webrtc_logging_controller.h.orig 2023-05-05 12:12:41 UTC
+--- chrome/browser/media/webrtc/webrtc_logging_controller.h.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/media/webrtc/webrtc_logging_controller.h
-@@ -134,7 +134,7 @@ class WebRtcLoggingController
+@@ -133,7 +133,7 @@ class WebRtcLoggingController
size_t web_app_id,
const StartEventLoggingCallback& callback);
@@ -9,7 +9,7 @@
// Ensures that the WebRTC Logs directory exists and then grants render
// process access to the 'WebRTC Logs' directory, and invokes |callback| with
// the ids necessary to create a DirectoryEntry object.
-@@ -199,7 +199,7 @@ class WebRtcLoggingController
+@@ -197,7 +197,7 @@ class WebRtcLoggingController
content::BrowserContext* GetBrowserContext() const;
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_memory__details.cc b/www/ungoogled-chromium/files/patch-chrome_browser_memory__details.cc
index fbdd427c4108..4e1da4080755 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_memory__details.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_memory__details.cc
@@ -1,4 +1,4 @@
---- chrome/browser/memory_details.cc.orig 2023-09-17 07:59:53 UTC
+--- chrome/browser/memory_details.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/memory_details.cc
@@ -38,7 +38,7 @@
#include "services/resource_coordinator/public/cpp/memory_instrumentation/memory_instrumentation.h"
@@ -10,7 +10,7 @@
#endif
@@ -335,7 +335,7 @@ void MemoryDetails::CollectChildInfoOnUIThread() {
- std::ref(process)));
+ });
}
-#if BUILDFLAG(IS_POSIX) && !BUILDFLAG(IS_MAC) && !BUILDFLAG(IS_ANDROID)
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_metrics_chrome__browser__main__extra__parts__metrics.cc b/www/ungoogled-chromium/files/patch-chrome_browser_metrics_chrome__browser__main__extra__parts__metrics.cc
index 1bb40b2461e5..0f0589ee302e 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_metrics_chrome__browser__main__extra__parts__metrics.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_metrics_chrome__browser__main__extra__parts__metrics.cc
@@ -1,4 +1,4 @@
---- chrome/browser/metrics/chrome_browser_main_extra_parts_metrics.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/metrics/chrome_browser_main_extra_parts_metrics.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/metrics/chrome_browser_main_extra_parts_metrics.cc
@@ -78,8 +78,10 @@
@@ -12,7 +12,7 @@
#include "base/linux_util.h"
#include "base/strings/string_split.h"
-@@ -108,7 +110,7 @@
+@@ -104,7 +106,7 @@
#include "chromeos/crosapi/cpp/crosapi_constants.h"
#endif // BUILDFLAG(IS_CHROMEOS_LACROS)
@@ -21,7 +21,7 @@
#include "chrome/browser/metrics/pressure/pressure_metrics_reporter.h"
#endif // BUILDFLAG(IS_LINUX)
-@@ -117,7 +119,7 @@
+@@ -113,7 +115,7 @@
#include "components/user_manager/user_manager.h"
#endif // BUILDFLAG(IS_CHROMEOS_ASH)
@@ -30,7 +30,7 @@
#include "components/power_metrics/system_power_monitor.h"
#endif
-@@ -917,7 +919,7 @@ void RecordStartupMetrics() {
+@@ -837,7 +839,7 @@ void RecordStartupMetrics() {
// Record whether Chrome is the default browser or not.
// Disabled on Linux due to hanging browser tests, see crbug.com/1216328.
@@ -39,7 +39,7 @@
shell_integration::DefaultWebClientState default_state =
shell_integration::GetDefaultBrowser();
base::UmaHistogramEnumeration("DefaultBrowser.State", default_state,
-@@ -1239,11 +1241,11 @@ void ChromeBrowserMainExtraPartsMetrics::PostBrowserSt
+@@ -1145,11 +1147,11 @@ void ChromeBrowserMainExtraPartsMetrics::PostBrowserSt
}
#endif // !BUILDFLAG(IS_ANDROID)
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_metrics_chrome__browser__main__extra__parts__metrics.h b/www/ungoogled-chromium/files/patch-chrome_browser_metrics_chrome__browser__main__extra__parts__metrics.h
index 618333dd53d0..884b8ffe80c2 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_metrics_chrome__browser__main__extra__parts__metrics.h
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_metrics_chrome__browser__main__extra__parts__metrics.h
@@ -1,4 +1,4 @@
---- chrome/browser/metrics/chrome_browser_main_extra_parts_metrics.h.orig 2023-09-01 04:52:40 UTC
+--- chrome/browser/metrics/chrome_browser_main_extra_parts_metrics.h.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/metrics/chrome_browser_main_extra_parts_metrics.h
@@ -33,7 +33,7 @@ class PowerMetricsReporter;
class ProcessMonitor;
@@ -9,7 +9,7 @@
class PressureMetricsReporter;
#endif // BUILDFLAG(IS_LINUX)
-@@ -139,7 +139,7 @@ class ChromeBrowserMainExtraPartsMetrics : public Chro
+@@ -131,7 +131,7 @@ class ChromeBrowserMainExtraPartsMetrics : public Chro
std::unique_ptr<BatteryDischargeReporter> battery_discharge_reporter_;
#endif // !BUILDFLAG(IS_ANDROID)
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_metrics_chrome__metrics__service__client.cc b/www/ungoogled-chromium/files/patch-chrome_browser_metrics_chrome__metrics__service__client.cc
index 765b133fbd40..8ace2339f351 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_metrics_chrome__metrics__service__client.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_metrics_chrome__metrics__service__client.cc
@@ -1,6 +1,6 @@
---- chrome/browser/metrics/chrome_metrics_service_client.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/metrics/chrome_metrics_service_client.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/metrics/chrome_metrics_service_client.cc
-@@ -184,7 +184,7 @@
+@@ -185,7 +185,7 @@
#include "chrome/notification_helper/notification_helper_constants.h"
#endif
@@ -9,16 +9,16 @@
#include "components/metrics/motherboard_metrics_provider.h"
#endif
-@@ -210,7 +210,7 @@
+@@ -202,7 +202,7 @@
+ #include "chrome/browser/metrics/power/power_metrics_provider_mac.h"
+ #endif
+
+-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
+ #include "chrome/browser/metrics/bluetooth_metrics_provider.h"
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
- #if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
-- BUILDFLAG(IS_CHROMEOS_LACROS) || BUILDFLAG(IS_ANDROID)
-+ BUILDFLAG(IS_CHROMEOS_LACROS) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_BSD)
- #include "chrome/browser/metrics/family_link_user_metrics_provider.h"
- #endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || (BUILDFLAG(IS_LINUX) ||
- // BUILDFLAG(IS_CHROMEOS_LACROS))||BUILDFLAG(IS_ANDROID))
-@@ -810,7 +810,7 @@ void ChromeMetricsServiceClient::RegisterMetricsServic
+@@ -806,7 +806,7 @@ void ChromeMetricsServiceClient::RegisterMetricsServic
metrics_service_->RegisterMetricsProvider(
std::make_unique<metrics::CPUMetricsProvider>());
@@ -27,16 +27,16 @@
metrics_service_->RegisterMetricsProvider(
std::make_unique<metrics::MotherboardMetricsProvider>());
#endif
-@@ -894,7 +894,7 @@ void ChromeMetricsServiceClient::RegisterMetricsServic
-
+@@ -891,7 +891,7 @@ void ChromeMetricsServiceClient::RegisterMetricsServic
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
--#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || \
-+#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_BSD) || \
- (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS))
+ #if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || \
+- (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS))
++ (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS)) || BUILDFLAG(IS_BSD)
metrics_service_->RegisterMetricsProvider(
std::make_unique<DesktopPlatformFeaturesMetricsProvider>());
-@@ -1013,7 +1013,7 @@ void ChromeMetricsServiceClient::RegisterMetricsServic
+ #endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || (BUILDFLAG(IS_LINUX) ||
+@@ -1004,7 +1004,7 @@ void ChromeMetricsServiceClient::RegisterMetricsServic
std::make_unique<PowerMetricsProvider>());
#endif
@@ -45,12 +45,12 @@
metrics_service_->RegisterMetricsProvider(
metrics::CreateDesktopSessionMetricsProvider());
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || (BUILDFLAG(IS_LINUX)
-@@ -1212,7 +1212,7 @@ bool ChromeMetricsServiceClient::RegisterForProfileEve
- #endif
+@@ -1204,7 +1204,7 @@ bool ChromeMetricsServiceClient::RegisterForProfileEve
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
--#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || \
-+#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_BSD) || \
- (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS))
+ #if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || \
+- (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS))
++ (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS)) || BUILDFLAG(IS_BSD)
// This creates the DesktopProfileSessionDurationsServices if it didn't exist
// already.
+ metrics::DesktopProfileSessionDurationsServiceFactory::GetForBrowserContext(
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_net_system__network__context__manager.cc b/www/ungoogled-chromium/files/patch-chrome_browser_net_system__network__context__manager.cc
index 40e8b406f606..579260e4a05e 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_net_system__network__context__manager.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_net_system__network__context__manager.cc
@@ -1,4 +1,4 @@
---- chrome/browser/net/system_network_context_manager.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/net/system_network_context_manager.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/net/system_network_context_manager.cc
@@ -92,7 +92,7 @@
@@ -104,8 +104,8 @@
pref_change_registrar_.Add(kGssapiDesiredPref, auth_pref_callback);
#endif // BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX)
-@@ -577,7 +577,7 @@ SystemNetworkContextManager::SystemNetworkContextManag
- #endif // BUILDFLAG(CHROME_ROOT_STORE_POLICY_SUPPORTED)
+@@ -570,7 +570,7 @@ SystemNetworkContextManager::SystemNetworkContextManag
+ #endif
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
- BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
@@ -113,7 +113,7 @@
pref_change_registrar_.Add(
prefs::kEnforceLocalAnchorConstraintsEnabled,
base::BindRepeating(&SystemNetworkContextManager::
-@@ -632,7 +632,7 @@ void SystemNetworkContextManager::RegisterPrefs(PrefRe
+@@ -625,7 +625,7 @@ void SystemNetworkContextManager::RegisterPrefs(PrefRe
registry->RegisterBooleanPref(prefs::kKerberosEnabled, false);
#endif // BUILDFLAG(IS_CHROMEOS_LACROS)
@@ -122,16 +122,16 @@
registry->RegisterBooleanPref(prefs::kAuthNegotiateDelegateByKdcPolicy,
false);
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_CHROMEOS)
-@@ -661,7 +661,7 @@ void SystemNetworkContextManager::RegisterPrefs(PrefRe
- registry->RegisterBooleanPref(prefs::kChromeRootStoreEnabled, false);
- #endif // BUILDFLAG(CHROME_ROOT_STORE_POLICY_SUPPORTED)
+@@ -649,7 +649,7 @@ void SystemNetworkContextManager::RegisterPrefs(PrefRe
+ registry->RegisterIntegerPref(prefs::kMaxConnectionsPerProxy, -1);
+
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
- BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
+ BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_BSD)
// Note that the default value is not relevant because the pref is only
// evaluated when it is managed.
registry->RegisterBooleanPref(prefs::kEnforceLocalAnchorConstraintsEnabled,
-@@ -670,11 +670,11 @@ void SystemNetworkContextManager::RegisterPrefs(PrefRe
+@@ -658,11 +658,11 @@ void SystemNetworkContextManager::RegisterPrefs(PrefRe
registry->RegisterListPref(prefs::kExplicitlyAllowedNetworkPorts);
@@ -145,7 +145,7 @@
registry->RegisterBooleanPref(prefs::kReceivedHttpAuthNegotiateHeader, false);
#endif // BUILDFLAG(IS_LINUX)
-@@ -727,7 +727,7 @@ void SystemNetworkContextManager::OnNetworkServiceCrea
+@@ -715,7 +715,7 @@ void SystemNetworkContextManager::OnNetworkServiceCrea
OnNewHttpAuthDynamicParams(http_auth_dynamic_params);
network_service->ConfigureHttpAuthPrefs(std::move(http_auth_dynamic_params));
@@ -154,7 +154,7 @@
gssapi_library_loader_observer_.Install(network_service);
#endif // BUILDFLAG(IS_LINUX)
-@@ -938,7 +938,7 @@ bool SystemNetworkContextManager::IsNetworkSandboxEnab
+@@ -929,7 +929,7 @@ bool SystemNetworkContextManager::IsNetworkSandboxEnab
break;
}
@@ -163,8 +163,8 @@
if (!enabled) {
g_network_service_will_allow_gssapi_library_load = true;
}
-@@ -1052,7 +1052,7 @@ void SystemNetworkContextManager::UpdateChromeRootStor
- #endif // BUILDFLAG(CHROME_ROOT_STORE_POLICY_SUPPORTED)
+@@ -1010,7 +1010,7 @@ void SystemNetworkContextManager::UpdateExplicitlyAllo
+ }
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
- BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_net_system__network__context__manager.h b/www/ungoogled-chromium/files/patch-chrome_browser_net_system__network__context__manager.h
index e6d3afbe29e6..9dfce83009dd 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_net_system__network__context__manager.h
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_net_system__network__context__manager.h
@@ -1,4 +1,4 @@
---- chrome/browser/net/system_network_context_manager.h.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/net/system_network_context_manager.h.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/net/system_network_context_manager.h
@@ -185,7 +185,7 @@ class SystemNetworkContextManager {
class URLLoaderFactoryForSystem;
@@ -9,8 +9,8 @@
class GssapiLibraryLoadObserver
: public network::mojom::GssapiLibraryLoadObserver {
public:
-@@ -229,7 +229,7 @@ class SystemNetworkContextManager {
- #endif // BUILDFLAG(CHROME_ROOT_STORE_POLICY_SUPPORTED)
+@@ -221,7 +221,7 @@ class SystemNetworkContextManager {
+ void UpdateExplicitlyAllowedNetworkPorts();
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
- BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
@@ -18,7 +18,7 @@
// Applies the current value of the kEnforceLocalAnchorConstraintsEnabled
// pref to the enforcement state.
void UpdateEnforceLocalAnchorConstraintsEnabled();
-@@ -277,7 +277,7 @@ class SystemNetworkContextManager {
+@@ -269,7 +269,7 @@ class SystemNetworkContextManager {
static absl::optional<bool> certificate_transparency_enabled_for_testing_;
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_password__manager_password__reuse__manager__factory.cc b/www/ungoogled-chromium/files/patch-chrome_browser_password__manager_password__reuse__manager__factory.cc
index 012af53dc653..1614b36758d1 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_password__manager_password__reuse__manager__factory.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_password__manager_password__reuse__manager__factory.cc
@@ -1,6 +1,6 @@
---- chrome/browser/password_manager/password_reuse_manager_factory.cc.orig 2023-09-17 07:59:53 UTC
+--- chrome/browser/password_manager/password_reuse_manager_factory.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/password_manager/password_reuse_manager_factory.cc
-@@ -115,7 +115,7 @@ PasswordReuseManagerFactory::BuildServiceInstanceForBr
+@@ -133,7 +133,7 @@ PasswordReuseManagerFactory::BuildServiceInstanceForBr
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_policy_chrome__browser__cloud__management__controller__desktop.cc b/www/ungoogled-chromium/files/patch-chrome_browser_policy_chrome__browser__cloud__management__controller__desktop.cc
index 22b95bef86c3..02bd1963c2da 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_policy_chrome__browser__cloud__management__controller__desktop.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_policy_chrome__browser__cloud__management__controller__desktop.cc
@@ -1,6 +1,6 @@
---- chrome/browser/policy/chrome_browser_cloud_management_controller_desktop.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/policy/chrome_browser_cloud_management_controller_desktop.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/policy/chrome_browser_cloud_management_controller_desktop.cc
-@@ -49,7 +49,7 @@
+@@ -50,7 +50,7 @@
#include "chrome/browser/policy/browser_dm_token_storage_mac.h"
#endif // BUILDFLAG(IS_MAC)
@@ -9,7 +9,7 @@
#include "chrome/browser/policy/browser_dm_token_storage_linux.h"
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-@@ -58,7 +58,7 @@
+@@ -59,7 +59,7 @@
#include "chrome/install_static/install_util.h"
#endif // BUILDFLAG(IS_WIN)
@@ -18,7 +18,7 @@
#include "chrome/browser/enterprise/connectors/device_trust/key_management/browser/device_trust_key_manager_impl.h"
#include "chrome/browser/enterprise/connectors/device_trust/key_management/browser/key_rotation_launcher.h"
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC)
-@@ -90,7 +90,7 @@ void ChromeBrowserCloudManagementControllerDesktop::
+@@ -91,7 +91,7 @@ void ChromeBrowserCloudManagementControllerDesktop::
#if BUILDFLAG(IS_MAC)
storage_delegate = std::make_unique<BrowserDMTokenStorageMac>();
@@ -27,7 +27,7 @@
storage_delegate = std::make_unique<BrowserDMTokenStorageLinux>();
#elif BUILDFLAG(IS_WIN)
storage_delegate = std::make_unique<BrowserDMTokenStorageWin>();
-@@ -245,7 +245,7 @@ ChromeBrowserCloudManagementControllerDesktop::CreateC
+@@ -261,7 +261,7 @@ ChromeBrowserCloudManagementControllerDesktop::CreateC
std::unique_ptr<enterprise_connectors::DeviceTrustKeyManager>
ChromeBrowserCloudManagementControllerDesktop::CreateDeviceTrustKeyManager() {
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_policy_configuration__policy__handler__list__factory.cc b/www/ungoogled-chromium/files/patch-chrome_browser_policy_configuration__policy__handler__list__factory.cc
index 7fe4326af89e..c16ee001da08 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_policy_configuration__policy__handler__list__factory.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_policy_configuration__policy__handler__list__factory.cc
@@ -1,6 +1,6 @@
---- chrome/browser/policy/configuration_policy_handler_list_factory.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/policy/configuration_policy_handler_list_factory.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/policy/configuration_policy_handler_list_factory.cc
-@@ -216,19 +216,20 @@
+@@ -221,19 +221,20 @@
#include "components/spellcheck/browser/pref_names.h"
#endif // BUILDFLAG(ENABLE_SPELLCHECK)
@@ -24,7 +24,16 @@
#include "chrome/browser/privacy_sandbox/privacy_sandbox_policy_handler.h"
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) ||
// BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_FUCHSIA) ||
-@@ -798,14 +799,14 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
+@@ -244,7 +245,7 @@
+ #endif
+
+ #if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
+- BUILDFLAG(IS_CHROMEOS_ASH)
++ BUILDFLAG(IS_CHROMEOS_ASH) || BUILDFLAG(IS_BSD)
+ #include "chrome/browser/policy/battery_saver_policy_handler.h"
+ #endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) ||
+ // BUILDFLAG(IS_CHROMEOS_ASH)
+@@ -806,14 +807,14 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
prefs::kManagedDefaultGeolocationSetting,
base::Value::Type::INTEGER },
#if BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN) \
@@ -41,7 +50,7 @@
{ key::kFullscreenAllowed,
prefs::kFullscreenAllowed,
base::Value::Type::BOOLEAN },
-@@ -1537,7 +1538,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
+@@ -1545,7 +1546,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
base::Value::Type::LIST },
#endif // BUILDFLAG(IS_CHROMEOS_ASH)
@@ -50,7 +59,7 @@
{ key::kGSSAPILibraryName,
prefs::kGSSAPILibraryName,
base::Value::Type::STRING },
-@@ -1582,7 +1583,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
+@@ -1590,7 +1591,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
base::Value::Type::BOOLEAN },
#endif // BUILDFLAG(IS_WIN)
@@ -59,7 +68,7 @@
{ key::kNetworkServiceSandboxEnabled,
prefs::kNetworkServiceSandboxEnabled,
base::Value::Type::BOOLEAN },
-@@ -1608,18 +1609,18 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
+@@ -1616,18 +1617,18 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
prefs::kTotalMemoryLimitMb,
base::Value::Type::INTEGER },
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC)
@@ -81,7 +90,7 @@
{ key::kDefaultBrowserSettingEnabled,
prefs::kDefaultBrowserSettingEnabled,
base::Value::Type::BOOLEAN },
-@@ -1632,7 +1633,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
+@@ -1640,7 +1641,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX)
// || BUILDFLAG(IS_FUCHSIA)
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) \
@@ -90,7 +99,7 @@
{ key::kAutoplayAllowed,
prefs::kAutoplayAllowed,
base::Value::Type::BOOLEAN },
-@@ -1730,7 +1731,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
+@@ -1738,7 +1739,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
base::Value::Type::BOOLEAN },
#endif // !BUILDFLAG(IS_ANDROID) && !BUILDFLAG(IS_CHROMEOS)
@@ -99,7 +108,7 @@
{ key::kAlternativeBrowserPath,
browser_switcher::prefs::kAlternativeBrowserPath,
base::Value::Type::STRING },
-@@ -1823,7 +1824,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
+@@ -1831,7 +1832,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
base::Value::Type::BOOLEAN },
#endif // BUILDFLAG(IS_CHROMEOS)
@@ -108,7 +117,7 @@
// TODO(crbug.com/1454054): replace the
// kGetDisplayMediaSetSelectAllScreensAllowedForUrls policy by a policy that
// matches the name of the new `getAllScreensMedia` API.
-@@ -1832,7 +1833,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
+@@ -1840,7 +1841,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
base::Value::Type::LIST },
#endif // BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX)
@@ -117,8 +126,8 @@
{ key::kAuthNegotiateDelegateByKdcPolicy,
prefs::kAuthNegotiateDelegateByKdcPolicy,
base::Value::Type::BOOLEAN },
-@@ -1863,7 +1864,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
- #endif // BUILDFLAG(CHROME_ROOT_STORE_POLICY_SUPPORTED)
+@@ -1865,7 +1866,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
+ #endif // BUILDFLAG(ENABLE_EXTENSIONS)
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
- BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
@@ -126,16 +135,16 @@
{ key::kEnforceLocalAnchorConstraintsEnabled,
prefs::kEnforceLocalAnchorConstraintsEnabled,
base::Value::Type::BOOLEAN },
-@@ -1939,7 +1940,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
+@@ -1941,7 +1942,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
ash::prefs::kUrlParameterToAutofillSAMLUsername,
base::Value::Type::STRING },
#endif // BUILDFLAG(IS_CHROMEOS_ASH)
-#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_ASH)
+#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_ASH) || BUILDFLAG(IS_BSD)
- { key::kBatterySaverModeAvailability,
- performance_manager::user_tuning::prefs::kBatterySaverModeState,
- base::Value::Type::INTEGER },
-@@ -1970,7 +1971,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
+ { key::kTabDiscardingExceptions,
+ performance_manager::user_tuning::prefs::kManagedTabDiscardingExceptions,
+ base::Value::Type::LIST },
+@@ -1969,7 +1970,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
prefs::kCloudApAuthEnabled,
base::Value::Type::INTEGER },
#endif // BUILDFLAG(IS_WIN)
@@ -144,7 +153,7 @@
{ key::kOutOfProcessSystemDnsResolutionEnabled,
prefs::kOutOfProcessSystemDnsResolutionEnabled,
base::Value::Type::BOOLEAN },
-@@ -2007,7 +2008,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
+@@ -2006,7 +2007,7 @@ const PolicyToPreferenceMapEntry kSimplePolicyMap[] =
prefs::kManagedPrivateNetworkAccessRestrictionsEnabled,
base::Value::Type::BOOLEAN },
#if BUILDFLAG(ENABLE_EXTENSIONS)
@@ -153,7 +162,7 @@
{ key::kExtensionInstallTypeBlocklist,
extensions::pref_names::kExtensionInstallTypeBlocklist,
base::Value::Type::LIST},
-@@ -2102,7 +2103,7 @@ std::unique_ptr<ConfigurationPolicyHandlerList> BuildH
+@@ -2116,7 +2117,7 @@ std::unique_ptr<ConfigurationPolicyHandlerList> BuildH
// Policies for all platforms - End
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -161,8 +170,8 @@
+ BUILDFLAG(IS_CHROMEOS_ASH) || BUILDFLAG(IS_BSD)
handlers->AddHandler(
std::make_unique<performance_manager::HighEfficiencyPolicyHandler>());
- #endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) ||
-@@ -2278,7 +2279,7 @@ std::unique_ptr<ConfigurationPolicyHandlerList> BuildH
+ // Note: This needs to be created after `DefaultSearchPolicyHandler`.
+@@ -2295,7 +2296,7 @@ std::unique_ptr<ConfigurationPolicyHandlerList> BuildH
#endif // BUILDFLAG(IS_ANDROID)
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -171,7 +180,7 @@
handlers->AddHandler(
std::make_unique<enterprise_idle::IdleTimeoutPolicyHandler>());
handlers->AddHandler(
-@@ -2336,7 +2337,7 @@ std::unique_ptr<ConfigurationPolicyHandlerList> BuildH
+@@ -2353,7 +2354,7 @@ std::unique_ptr<ConfigurationPolicyHandlerList> BuildH
key::kBrowsingDataLifetime, browsing_data::prefs::kBrowsingDataLifetime,
chrome_schema));
@@ -180,7 +189,7 @@
handlers->AddHandler(std::make_unique<LocalSyncPolicyHandler>());
handlers->AddHandler(std::make_unique<ThemeColorPolicyHandler>());
handlers->AddHandler(
-@@ -2730,7 +2731,7 @@ std::unique_ptr<ConfigurationPolicyHandlerList> BuildH
+@@ -2747,7 +2748,7 @@ std::unique_ptr<ConfigurationPolicyHandlerList> BuildH
#endif
#if BUILDFLAG(ENABLE_SPELLCHECK)
@@ -189,7 +198,7 @@
handlers->AddHandler(std::make_unique<SpellcheckLanguagePolicyHandler>());
handlers->AddHandler(
std::make_unique<SpellcheckLanguageBlocklistPolicyHandler>(
-@@ -2738,7 +2739,7 @@ std::unique_ptr<ConfigurationPolicyHandlerList> BuildH
+@@ -2755,7 +2756,7 @@ std::unique_ptr<ConfigurationPolicyHandlerList> BuildH
#endif // BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN)
#endif // BUILDFLAG(ENABLE_SPELLCHECK)
@@ -198,7 +207,7 @@
handlers->AddHandler(std::make_unique<SimplePolicyHandler>(
key::kAllowSystemNotifications, prefs::kAllowSystemNotifications,
base::Value::Type::BOOLEAN));
-@@ -2755,7 +2756,7 @@ std::unique_ptr<ConfigurationPolicyHandlerList> BuildH
+@@ -2772,7 +2773,7 @@ std::unique_ptr<ConfigurationPolicyHandlerList> BuildH
/*new_policy_handler=*/std::make_unique<
first_party_sets::FirstPartySetsOverridesPolicyHandler>(
policy::key::kRelatedWebsiteSetsOverrides, chrome_schema)));
@@ -207,3 +216,12 @@
BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_ANDROID)
handlers->AddHandler(std::make_unique<PrivacySandboxPolicyHandler>());
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) ||
+@@ -2797,7 +2798,7 @@ std::unique_ptr<ConfigurationPolicyHandlerList> BuildH
+ base::Value::Type::BOOLEAN)));
+
+ #if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
+- BUILDFLAG(IS_CHROMEOS_ASH)
++ BUILDFLAG(IS_CHROMEOS_ASH) || BUILDFLAG(IS_BSD)
+ handlers->AddHandler(std::make_unique<BatterySaverPolicyHandler>());
+ #endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) ||
+ // BUILDFLAG(IS_CHROMEOS_ASH)
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_prefs_browser__prefs.cc b/www/ungoogled-chromium/files/patch-chrome_browser_prefs_browser__prefs.cc
index 6b9205a044d7..5713b9a7e612 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_prefs_browser__prefs.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_prefs_browser__prefs.cc
@@ -1,6 +1,6 @@
---- chrome/browser/prefs/browser_prefs.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/prefs/browser_prefs.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/prefs/browser_prefs.cc
-@@ -482,18 +482,18 @@
+@@ -490,18 +490,18 @@
#endif
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -22,7 +22,7 @@
#include "chrome/browser/enterprise/signin/enterprise_signin_prefs.h"
#endif
-@@ -525,7 +525,7 @@
+@@ -533,7 +533,7 @@
#include "chrome/browser/sessions/session_service_log.h"
#endif
@@ -31,7 +31,7 @@
#include "ui/color/system_theme.h"
#endif
-@@ -649,7 +649,7 @@ const char kPluginsPluginsList[] = "plugins.plugins_li
+@@ -657,7 +657,7 @@ const char kPluginsPluginsList[] = "plugins.plugins_li
const char kPluginsShowDetails[] = "plugins.show_details";
// Deprecated 02/2023.
@@ -40,7 +40,7 @@
const char kWebAppsUrlHandlerInfo[] = "web_apps.url_handler_info";
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX)
-@@ -979,7 +979,7 @@ void RegisterLocalStatePrefsForMigration(PrefRegistryS
+@@ -1017,7 +1017,7 @@ void RegisterLocalStatePrefsForMigration(PrefRegistryS
#endif // BUILDFLAG(IS_CHROMEOS_ASH)
// Deprecated 02/2023.
@@ -49,7 +49,7 @@
registry->RegisterDictionaryPref(kWebAppsUrlHandlerInfo);
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX)
-@@ -1952,12 +1952,12 @@ void RegisterProfilePrefs(user_prefs::PrefRegistrySync
+@@ -2022,12 +2022,12 @@ void RegisterProfilePrefs(user_prefs::PrefRegistrySync
#endif
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -64,7 +64,7 @@
browser_switcher::BrowserSwitcherPrefs::RegisterProfilePrefs(registry);
enterprise_signin::RegisterProfilePrefs(registry);
#endif
-@@ -2114,7 +2114,7 @@ void MigrateObsoleteLocalStatePrefs(PrefService* local
+@@ -2187,7 +2187,7 @@ void MigrateObsoleteLocalStatePrefs(PrefService* local
#endif // BUILDFLAG(IS_CHROMEOS_ASH)
// Added 02/2023
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_printing_printer__query.cc b/www/ungoogled-chromium/files/patch-chrome_browser_printing_printer__query.cc
index cced7ce1f221..6ee1b2033231 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_printing_printer__query.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_printing_printer__query.cc
@@ -1,8 +1,8 @@
---- chrome/browser/printing/printer_query.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/printing/printer_query.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/printing/printer_query.cc
@@ -341,7 +341,7 @@ void PrinterQuery::UpdatePrintSettings(base::Value::Di
crash_key = std::make_unique<crash_keys::ScopedPrinterInfo>(
- print_backend->GetPrinterDriverInfo(printer_name));
+ printer_name, print_backend->GetPrinterDriverInfo(printer_name));
-#if BUILDFLAG(IS_LINUX) && BUILDFLAG(USE_CUPS)
+#if (BUILDFLAG(IS_BSD) || BUILDFLAG(IS_LINUX)) && BUILDFLAG(USE_CUPS)
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_profiles_chrome__browser__main__extra__parts__profiles.cc b/www/ungoogled-chromium/files/patch-chrome_browser_profiles_chrome__browser__main__extra__parts__profiles.cc
index 75f5eee09229..466db643ca3c 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_profiles_chrome__browser__main__extra__parts__profiles.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_profiles_chrome__browser__main__extra__parts__profiles.cc
@@ -1,6 +1,6 @@
---- chrome/browser/profiles/chrome_browser_main_extra_parts_profiles.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/profiles/chrome_browser_main_extra_parts_profiles.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/profiles/chrome_browser_main_extra_parts_profiles.cc
-@@ -400,18 +400,18 @@
+@@ -401,18 +401,18 @@
#endif
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -22,16 +22,16 @@
#include "chrome/browser/browser_switcher/browser_switcher_service_factory.h"
#include "chrome/browser/enterprise/connectors/analysis/local_binary_upload_service_factory.h"
#include "chrome/browser/enterprise/signals/signals_aggregator_factory.h"
-@@ -620,7 +620,7 @@ void ChromeBrowserMainExtraPartsProfiles::
- if (breadcrumbs::IsEnabled()) {
- BreadcrumbManagerKeyedServiceFactory::GetInstance();
- }
+@@ -643,7 +643,7 @@ void ChromeBrowserMainExtraPartsProfiles::
+ DiceBoundSessionCookieServiceFactory::GetInstance();
+ #endif // BUILDFLAG(ENABLE_DICE_SUPPORT)
+ #endif // BUILDFLAG(ENABLE_BOUND_SESSION_CREDENTIALS)
-#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX)
+#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
browser_switcher::BrowserSwitcherServiceFactory::GetInstance();
#endif
browser_sync::UserEventServiceFactory::GetInstance();
-@@ -726,26 +726,26 @@ void ChromeBrowserMainExtraPartsProfiles::
+@@ -763,26 +763,26 @@ void ChromeBrowserMainExtraPartsProfiles::
enterprise_commands::UserRemoteCommandsServiceFactory::GetInstance();
#endif
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -63,7 +63,7 @@
enterprise_signin::EnterpriseSigninServiceFactory::GetInstance();
#endif
#if BUILDFLAG(ENABLE_SESSION_SERVICE)
-@@ -861,7 +861,7 @@ void ChromeBrowserMainExtraPartsProfiles::
+@@ -900,7 +900,7 @@ void ChromeBrowserMainExtraPartsProfiles::
#endif
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
@@ -72,7 +72,7 @@
(BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS))
metrics::DesktopProfileSessionDurationsServiceFactory::GetInstance();
#endif
-@@ -958,7 +958,7 @@ void ChromeBrowserMainExtraPartsProfiles::
+@@ -998,7 +998,7 @@ void ChromeBrowserMainExtraPartsProfiles::
#if BUILDFLAG(IS_CHROMEOS)
policy::PolicyCertServiceFactory::GetInstance();
#endif
@@ -81,7 +81,7 @@
policy::ProfileTokenPolicyWebSigninServiceFactory::GetInstance();
#endif
policy::UserCloudPolicyInvalidatorFactory::GetInstance();
-@@ -1002,7 +1002,7 @@ void ChromeBrowserMainExtraPartsProfiles::
+@@ -1042,7 +1042,7 @@ void ChromeBrowserMainExtraPartsProfiles::
#if !BUILDFLAG(IS_ANDROID)
ProfileThemeUpdateServiceFactory::GetInstance();
#endif
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_renderer__preferences__util.cc b/www/ungoogled-chromium/files/patch-chrome_browser_renderer__preferences__util.cc
index e4cae54149d2..abefa3235fa4 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_renderer__preferences__util.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_renderer__preferences__util.cc
@@ -1,4 +1,4 @@
---- chrome/browser/renderer_preferences_util.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/renderer_preferences_util.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/renderer_preferences_util.cc
@@ -38,7 +38,7 @@
#include "ui/views/controls/textfield/textfield.h"
@@ -9,7 +9,7 @@
#include "chrome/browser/themes/theme_service.h"
#include "chrome/browser/themes/theme_service_factory.h"
#include "ui/linux/linux_ui.h"
-@@ -152,7 +152,7 @@ void UpdateFromSystemSettings(blink::RendererPreferenc
+@@ -150,7 +150,7 @@ void UpdateFromSystemSettings(blink::RendererPreferenc
prefs->caret_blink_interval = views::Textfield::GetCaretBlinkInterval();
#endif
@@ -18,7 +18,7 @@
auto* linux_ui_theme = ui::LinuxUiTheme::GetForProfile(profile);
if (linux_ui_theme) {
if (ThemeServiceFactory::GetForProfile(profile)->UsingSystemTheme()) {
-@@ -175,7 +175,7 @@ void UpdateFromSystemSettings(blink::RendererPreferenc
+@@ -173,7 +173,7 @@ void UpdateFromSystemSettings(blink::RendererPreferenc
#endif
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) || \
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_resources_settings_route.ts b/www/ungoogled-chromium/files/patch-chrome_browser_resources_settings_route.ts
index 576d1db37c95..c8253a535b32 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_resources_settings_route.ts
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_resources_settings_route.ts
@@ -1,6 +1,6 @@
---- chrome/browser/resources/settings/route.ts.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/resources/settings/route.ts.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/resources/settings/route.ts
-@@ -242,7 +242,7 @@ function createBrowserSettingsRoutes(): SettingsRoutes
+@@ -245,7 +245,7 @@ function createBrowserSettingsRoutes(): SettingsRoutes
r.ACCESSIBILITY = r.ADVANCED.createSection(
'/accessibility', 'a11y', loadTimeData.getString('a11yPageTitle'));
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_safe__browsing_cloud__content__scanning_binary__upload__service.cc b/www/ungoogled-chromium/files/patch-chrome_browser_safe__browsing_cloud__content__scanning_binary__upload__service.cc
index 6382cc88f723..9d9077489c9b 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_safe__browsing_cloud__content__scanning_binary__upload__service.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_safe__browsing_cloud__content__scanning_binary__upload__service.cc
@@ -1,4 +1,4 @@
---- chrome/browser/safe_browsing/cloud_content_scanning/binary_upload_service.cc.orig 2023-10-13 13:20:35 UTC
+--- chrome/browser/safe_browsing/cloud_content_scanning/binary_upload_service.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/safe_browsing/cloud_content_scanning/binary_upload_service.cc
@@ -18,7 +18,7 @@
#include "net/base/url_util.h"
@@ -9,7 +9,7 @@
#include "chrome/browser/enterprise/connectors/analysis/local_binary_upload_service_factory.h"
#endif
-@@ -421,7 +421,7 @@ BinaryUploadService* BinaryUploadService::GetForProfil
+@@ -417,7 +417,7 @@ BinaryUploadService* BinaryUploadService::GetForProfil
Profile* profile,
const enterprise_connectors::AnalysisSettings& settings) {
// Local content analysis is supported only on desktop platforms.
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_safe__browsing_download__protection_file__analyzer.cc b/www/ungoogled-chromium/files/patch-chrome_browser_safe__browsing_download__protection_file__analyzer.cc
index b01714448ca9..0e60d126047e 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_safe__browsing_download__protection_file__analyzer.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_safe__browsing_download__protection_file__analyzer.cc
@@ -1,4 +1,4 @@
---- chrome/browser/safe_browsing/download_protection/file_analyzer.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/safe_browsing/download_protection/file_analyzer.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/safe_browsing/download_protection/file_analyzer.cc
@@ -21,7 +21,7 @@
#include "content/public/browser/browser_thread.h"
@@ -18,7 +18,7 @@
} else if (inspection_type == DownloadFileType::OFFICE_DOCUMENT) {
StartExtractDocumentFeatures();
#endif
-@@ -314,7 +314,7 @@ void FileAnalyzer::OnDmgAnalysisFinished(
+@@ -318,7 +318,7 @@ void FileAnalyzer::OnDmgAnalysisFinished(
}
#endif // BUILDFLAG(IS_MAC)
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_search__engine__choice_search__engine__choice__client__side__trial.cc b/www/ungoogled-chromium/files/patch-chrome_browser_search__engine__choice_search__engine__choice__client__side__trial.cc
new file mode 100644
index 000000000000..1b41343b7acc
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_search__engine__choice_search__engine__choice__client__side__trial.cc
@@ -0,0 +1,11 @@
+--- chrome/browser/search_engine_choice/search_engine_choice_client_side_trial.cc.orig 2024-02-03 15:42:55 UTC
++++ chrome/browser/search_engine_choice/search_engine_choice_client_side_trial.cc
+@@ -133,7 +133,7 @@ void RegisterLocalStatePrefs(PrefRegistrySimple* regis
+ void SetUpIfNeeded(const base::FieldTrial::EntropyProvider& entropy_provider,
+ base::FeatureList* feature_list,
+ PrefService* local_state) {
+-#if !BUILDFLAG(IS_LINUX) && !BUILDFLAG(IS_MAC)
++#if !BUILDFLAG(IS_LINUX) && !BUILDFLAG(IS_MAC) && !BUILDFLAG(IS_BSD)
+ // Platform not in scope for this client-side trial.
+ return;
+ #else
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_chrome__pages.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_chrome__pages.cc
index de286bc517b7..b0b1ad95d2e9 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_chrome__pages.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_chrome__pages.cc
@@ -1,6 +1,6 @@
---- chrome/browser/ui/chrome_pages.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/ui/chrome_pages.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/ui/chrome_pages.cc
-@@ -78,7 +78,7 @@
+@@ -79,7 +79,7 @@
#endif
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -9,7 +9,7 @@
#include "chrome/browser/web_applications/web_app_utils.h"
#endif
-@@ -671,7 +671,7 @@ void ShowShortcutCustomizationApp(Profile* profile,
+@@ -701,7 +701,7 @@ void ShowShortcutCustomizationApp(Profile* profile,
}
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_chrome__pages.h b/www/ungoogled-chromium/files/patch-chrome_browser_ui_chrome__pages.h
index b455b16f2562..d2306a5edcbe 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_chrome__pages.h
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_chrome__pages.h
@@ -1,4 +1,4 @@
---- chrome/browser/ui/chrome_pages.h.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/ui/chrome_pages.h.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/ui/chrome_pages.h
@@ -38,7 +38,7 @@ enum class ConsentLevel;
} // namespace signin
@@ -9,7 +9,7 @@
namespace web_app {
enum class AppSettingsPageEntryPoint;
} // namespace web_app
-@@ -255,7 +255,7 @@ void ShowShortcutCustomizationApp(Profile* profile,
+@@ -258,7 +258,7 @@ void ShowShortcutCustomizationApp(Profile* profile,
#endif
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_startup_startup__browser__creator.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_startup_startup__browser__creator.cc
index 6ff8648c67a5..7fcaa43ce758 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_startup_startup__browser__creator.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_startup_startup__browser__creator.cc
@@ -1,6 +1,6 @@
---- chrome/browser/ui/startup/startup_browser_creator.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/ui/startup/startup_browser_creator.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/ui/startup/startup_browser_creator.cc
-@@ -133,7 +133,7 @@
+@@ -135,7 +135,7 @@
#include "chrome/credential_provider/common/gcp_strings.h"
#endif // BUILDFLAG(IS_WIN)
@@ -9,7 +9,7 @@
#include "chrome/browser/headless/headless_mode_util.h"
#include "chrome/browser/ui/startup/web_app_info_recorder_utils.h"
#include "components/headless/policy/headless_mode_policy.h"
-@@ -925,7 +925,7 @@ bool StartupBrowserCreator::ProcessCmdLineImpl(
+@@ -935,7 +935,7 @@ bool StartupBrowserCreator::ProcessCmdLineImpl(
TRACE_EVENT0("startup", "StartupBrowserCreator::ProcessCmdLineImpl");
ComputeAndRecordLaunchMode(command_line);
@@ -18,7 +18,7 @@
if (headless::IsHeadlessMode() &&
headless::HeadlessModePolicy::IsHeadlessModeDisabled(
g_browser_process->local_state())) {
-@@ -1029,7 +1029,7 @@ bool StartupBrowserCreator::ProcessCmdLineImpl(
+@@ -1040,7 +1040,7 @@ bool StartupBrowserCreator::ProcessCmdLineImpl(
silent_launch = true;
}
@@ -27,7 +27,7 @@
// Writes open and installed web apps to the specified file without
// launching a new browser window or tab.
if (base::FeatureList::IsEnabled(features::kListWebAppsSwitch) &&
-@@ -1242,7 +1242,7 @@ bool StartupBrowserCreator::ProcessCmdLineImpl(
+@@ -1251,7 +1251,7 @@ bool StartupBrowserCreator::ProcessCmdLineImpl(
CHECK_EQ(profile_info.mode, StartupProfileMode::kBrowserWindow)
<< "Failed launch with app: couldn't pick a profile";
std::string app_id = command_line.GetSwitchValueASCII(switches::kAppId);
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_tab__helpers.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_tab__helpers.cc
index b427cf4831a4..182245ec2438 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_tab__helpers.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_tab__helpers.cc
@@ -1,6 +1,6 @@
---- chrome/browser/ui/tab_helpers.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/ui/tab_helpers.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/ui/tab_helpers.cc
-@@ -225,7 +225,7 @@
+@@ -226,7 +226,7 @@
#endif
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -9,7 +9,7 @@
#include "chrome/browser/ui/blocked_content/framebust_block_tab_helper.h"
#include "chrome/browser/ui/browser_finder.h"
#include "chrome/browser/ui/hats/hats_helper.h"
-@@ -641,12 +641,12 @@ void TabHelpers::AttachTabHelpers(WebContents* web_con
+@@ -645,12 +645,12 @@ void TabHelpers::AttachTabHelpers(WebContents* web_con
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || \
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_ui__features.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_ui__features.cc
index 49b1b836aadf..c9035beb5c7a 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_ui__features.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_ui__features.cc
@@ -1,6 +1,6 @@
---- chrome/browser/ui/ui_features.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/ui/ui_features.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/ui/ui_features.cc
-@@ -315,7 +315,7 @@ BASE_FEATURE(kTopChromeWebUIUsesSpareRenderer,
+@@ -345,7 +345,7 @@ BASE_FEATURE(kTopChromeWebUIUsesSpareRenderer,
"TopChromeWebUIUsesSpareRenderer",
base::FEATURE_ENABLED_BY_DEFAULT);
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_ui__features.h b/www/ungoogled-chromium/files/patch-chrome_browser_ui_ui__features.h
index c9caefdd0d78..74bc3326ec94 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_ui__features.h
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_ui__features.h
@@ -1,6 +1,6 @@
---- chrome/browser/ui/ui_features.h.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/ui/ui_features.h.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/ui/ui_features.h
-@@ -195,7 +195,7 @@ BASE_DECLARE_FEATURE(kToolbarUseHardwareBitmapDraw);
+@@ -219,7 +219,7 @@ BASE_DECLARE_FEATURE(kToolbarUseHardwareBitmapDraw);
BASE_DECLARE_FEATURE(kTopChromeWebUIUsesSpareRenderer);
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_dark__mode__manager__linux.h b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_dark__mode__manager__linux.h
deleted file mode 100644
index 95d6fd1c40bd..000000000000
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_dark__mode__manager__linux.h
+++ /dev/null
@@ -1,10 +0,0 @@
---- chrome/browser/ui/views/dark_mode_manager_linux.h.orig 2023-10-13 13:20:35 UTC
-+++ chrome/browser/ui/views/dark_mode_manager_linux.h
-@@ -6,6 +6,7 @@
- #define CHROME_BROWSER_UI_VIEWS_DARK_MODE_MANAGER_LINUX_H_
-
- #include <string>
-+#include <vector>
-
- #include "base/gtest_prod_util.h"
- #include "base/memory/scoped_refptr.h"
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_frame_browser__view.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_frame_browser__view.cc
index f2d8fe3a3cb8..3d09c4446d06 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_frame_browser__view.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_frame_browser__view.cc
@@ -1,6 +1,6 @@
---- chrome/browser/ui/views/frame/browser_view.cc.orig 2024-01-06 08:40:52 UTC
+--- chrome/browser/ui/views/frame/browser_view.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/ui/views/frame/browser_view.cc
-@@ -2172,7 +2172,7 @@ void BrowserView::TabDraggingStatusChanged(bool is_dra
+@@ -2154,7 +2154,7 @@ void BrowserView::TabDraggingStatusChanged(bool is_dra
// CrOS cleanup is done.
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_frame_picture__in__picture__browser__frame__view.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_frame_picture__in__picture__browser__frame__view.cc
index 1628cdf0e82b..d1b21a926df4 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_frame_picture__in__picture__browser__frame__view.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_frame_picture__in__picture__browser__frame__view.cc
@@ -1,6 +1,6 @@
---- chrome/browser/ui/views/frame/picture_in_picture_browser_frame_view.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/ui/views/frame/picture_in_picture_browser_frame_view.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/ui/views/frame/picture_in_picture_browser_frame_view.cc
-@@ -55,7 +55,7 @@
+@@ -57,7 +57,7 @@
#include "ui/aura/window.h"
#endif
@@ -9,7 +9,7 @@
#include "chrome/browser/ui/views/frame/browser_frame_view_paint_utils_linux.h"
#include "chrome/browser/ui/views/frame/desktop_browser_frame_aura_linux.h"
#endif
-@@ -83,7 +83,7 @@ constexpr int kContentSettingIconSize = 16;
+@@ -85,7 +85,7 @@ constexpr int kContentSettingIconSize = 16;
// The height of the controls bar at the top of the window.
constexpr int kTopControlsHeight = 30;
@@ -18,7 +18,7 @@
// Frame border when window shadow is not drawn.
constexpr int kFrameBorderThickness = 4;
#endif
-@@ -178,7 +178,7 @@ class WindowEventObserver : public ui::EventObserver {
+@@ -180,7 +180,7 @@ class WindowEventObserver : public ui::EventObserver {
gfx::Rect input_bounds = pip_browser_frame_view_->GetLocalBounds();
@@ -27,7 +27,7 @@
// Calculate input bounds for Linux. This is needed because the input bounds
// is not necessary the same as the local bounds on Linux.
if (pip_browser_frame_view_->ShouldDrawFrameShadow()) {
-@@ -560,7 +560,7 @@ PictureInPictureBrowserFrameView::PictureInPictureBrow
+@@ -570,7 +570,7 @@ PictureInPictureBrowserFrameView::PictureInPictureBrow
AddChildView(std::move(auto_pip_setting_overlay));
}
@@ -36,7 +36,7 @@
frame_background_ = std::make_unique<views::FrameBackground>();
#endif
-@@ -736,7 +736,7 @@ void PictureInPictureBrowserFrameView::OnThemeChanged(
+@@ -746,7 +746,7 @@ void PictureInPictureBrowserFrameView::OnThemeChanged(
for (ContentSettingImageView* view : content_setting_views_)
view->SetIconColor(color_provider->GetColor(kColorPipWindowForeground));
@@ -45,7 +45,7 @@
// On Linux the top bar background will be drawn in OnPaint().
top_bar_container_view_->SetBackground(views::CreateSolidBackground(
color_provider->GetColor(kColorPipWindowTopBarBackground)));
-@@ -811,7 +811,7 @@ void PictureInPictureBrowserFrameView::RemovedFromWidg
+@@ -825,7 +825,7 @@ void PictureInPictureBrowserFrameView::RemovedFromWidg
BrowserNonClientFrameView::RemovedFromWidget();
}
@@ -54,7 +54,7 @@
gfx::Insets PictureInPictureBrowserFrameView::MirroredFrameBorderInsets()
const {
auto border = FrameBorderInsets();
-@@ -1058,7 +1058,7 @@ void PictureInPictureBrowserFrameView::AnimationProgre
+@@ -1079,7 +1079,7 @@ void PictureInPictureBrowserFrameView::AnimationProgre
// views::View implementations:
void PictureInPictureBrowserFrameView::OnPaint(gfx::Canvas* canvas) {
@@ -63,7 +63,7 @@
// Draw the PiP window frame borders and shadows, including the top bar
// background.
if (window_frame_provider_) {
-@@ -1182,7 +1182,7 @@ void PictureInPictureBrowserFrameView::UpdateTopBarVie
+@@ -1203,7 +1203,7 @@ void PictureInPictureBrowserFrameView::UpdateTopBarVie
}
gfx::Insets PictureInPictureBrowserFrameView::FrameBorderInsets() const {
@@ -72,7 +72,7 @@
if (window_frame_provider_) {
const auto insets = window_frame_provider_->GetFrameThicknessDip();
const auto tiled_edges = frame()->tiled_edges();
-@@ -1203,7 +1203,7 @@ gfx::Insets PictureInPictureBrowserFrameView::FrameBor
+@@ -1224,7 +1224,7 @@ gfx::Insets PictureInPictureBrowserFrameView::FrameBor
}
gfx::Insets PictureInPictureBrowserFrameView::ResizeBorderInsets() const {
@@ -81,7 +81,7 @@
return FrameBorderInsets();
#elif BUILDFLAG(IS_CHROMEOS_ASH)
return gfx::Insets(chromeos::kResizeInsideBoundsSize);
-@@ -1224,7 +1224,7 @@ gfx::Size PictureInPictureBrowserFrameView::GetNonClie
+@@ -1245,7 +1245,7 @@ gfx::Size PictureInPictureBrowserFrameView::GetNonClie
top_height + border_thickness.bottom());
}
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_profiles_dice__web__signin__interception__backdrop__layer.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_profiles_dice__web__signin__interception__backdrop__layer.cc
deleted file mode 100644
index 8b8dcee0dd1b..000000000000
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_profiles_dice__web__signin__interception__backdrop__layer.cc
+++ /dev/null
@@ -1,11 +0,0 @@
---- chrome/browser/ui/views/profiles/dice_web_signin_interception_backdrop_layer.cc.orig 2023-11-04 07:08:51 UTC
-+++ chrome/browser/ui/views/profiles/dice_web_signin_interception_backdrop_layer.cc
-@@ -89,7 +89,7 @@ void DiceWebSigninInterceptionBackdropLayer::DrawDarkB
- // The dark layer should be drawn on top of this region, but it's not easily
- // accessible in code, and requires specific implementations.
- // After the 2023 refresh, this drag area no longer exists.
--#if BUILDFLAG(IS_LINUX)
-+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
- // On linux, the drag area is accessible through the
- // `MirroredFrameBorderInsets()` function, which crashes on non-Linux
- // platforms.
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_profiles_profile__menu__view__base.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_profiles_profile__menu__view__base.cc
index 7bf20a0225aa..956917593de9 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_profiles_profile__menu__view__base.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_profiles_profile__menu__view__base.cc
@@ -1,6 +1,6 @@
---- chrome/browser/ui/views/profiles/profile_menu_view_base.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/ui/views/profiles/profile_menu_view_base.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/ui/views/profiles/profile_menu_view_base.cc
-@@ -660,7 +660,7 @@ void ProfileMenuViewBase::SetProfileIdentityInfo(
+@@ -666,7 +666,7 @@ void ProfileMenuViewBase::SetProfileIdentityInfo(
// TODO(crbug.com/1052397): Revisit once build flag switch of lacros-chrome is
// complete.
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_tab__search__bubble__host.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_tab__search__bubble__host.cc
index 8990ee8f867f..a8f45dea4c54 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_tab__search__bubble__host.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_tab__search__bubble__host.cc
@@ -1,6 +1,6 @@
---- chrome/browser/ui/views/tab_search_bubble_host.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/ui/views/tab_search_bubble_host.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/ui/views/tab_search_bubble_host.cc
-@@ -222,7 +222,7 @@ bool TabSearchBubbleHost::ShouldTabSearchRenderBeforeT
+@@ -249,7 +249,7 @@ bool TabSearchBubbleHost::ShouldTabSearchRenderBeforeT
// Mac should have tabsearch on the right side. Windows >= Win10 has the
// Tab Search button as a FrameCaptionButton, but it still needs to be on the
// left if it exists.
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_tabs_tab.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_tabs_tab.cc
index fa2848338a2b..cb765ded8f72 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_tabs_tab.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_tabs_tab.cc
@@ -1,6 +1,6 @@
---- chrome/browser/ui/views/tabs/tab.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/ui/views/tabs/tab.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/ui/views/tabs/tab.cc
-@@ -612,7 +612,7 @@ void Tab::MaybeUpdateHoverStatus(const ui::MouseEvent&
+@@ -611,7 +611,7 @@ void Tab::MaybeUpdateHoverStatus(const ui::MouseEvent&
if (mouse_hovered_ || !GetWidget()->IsMouseEventsEnabled())
return;
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_user__education_browser__user__education__service.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_user__education_browser__user__education__service.cc
new file mode 100644
index 000000000000..033ab8620bc7
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_views_user__education_browser__user__education__service.cc
@@ -0,0 +1,11 @@
+--- chrome/browser/ui/views/user_education/browser_user_education_service.cc.orig 2024-02-03 15:42:55 UTC
++++ chrome/browser/ui/views/user_education/browser_user_education_service.cc
+@@ -716,7 +716,7 @@ void MaybeRegisterChromeFeaturePromos(
+ FeaturePromoSpecification::AcceleratorInfo())
+ .SetBubbleArrow(HelpBubbleArrow::kTopLeft)));
+
+-#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN)
++#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_BSD)
+ // kIPHDesktopPWAsLinkCapturingLaunch:
+ registry.RegisterFeature(std::move(
+ FeaturePromoSpecification::CreateForCustomAction(
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_web__applications_web__app__dialogs.h b/www/ungoogled-chromium/files/patch-chrome_browser_ui_web__applications_web__app__dialogs.h
index f5c30e1db9b6..3e860acb2b02 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_web__applications_web__app__dialogs.h
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_web__applications_web__app__dialogs.h
@@ -1,6 +1,6 @@
---- chrome/browser/ui/web_applications/web_app_dialogs.h.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/ui/web_applications/web_app_dialogs.h.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/ui/web_applications/web_app_dialogs.h
-@@ -20,7 +20,7 @@
+@@ -21,7 +21,7 @@
#include "ui/gfx/native_widget_types.h"
static_assert(BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) ||
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_about__ui.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_about_about__ui.cc
index 9460753140a2..4d73fac74ede 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_about__ui.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_about_about__ui.cc
@@ -1,6 +1,6 @@
---- chrome/browser/ui/webui/about_ui.cc.orig 2023-07-21 09:49:17 UTC
-+++ chrome/browser/ui/webui/about_ui.cc
-@@ -635,7 +635,7 @@ std::string ChromeURLs() {
+--- chrome/browser/ui/webui/about/about_ui.cc.orig 2024-02-03 15:42:55 UTC
++++ chrome/browser/ui/webui/about/about_ui.cc
+@@ -592,7 +592,7 @@ std::string ChromeURLs(content::BrowserContext* browse
return html;
}
@@ -9,7 +9,7 @@
std::string AboutLinuxProxyConfig() {
std::string data;
AppendHeader(&data,
-@@ -689,7 +689,7 @@ void AboutUIHTMLSource::StartDataRequest(
+@@ -646,7 +646,7 @@ void AboutUIHTMLSource::StartDataRequest(
response =
ui::ResourceBundle::GetSharedInstance().LoadDataResourceString(idr);
}
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_chrome__web__ui__controller__factory.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_chrome__web__ui__controller__factory.cc
index 6ef87ebe30ac..33468735a7fc 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_chrome__web__ui__controller__factory.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_chrome__web__ui__controller__factory.cc
@@ -1,6 +1,6 @@
---- chrome/browser/ui/webui/chrome_web_ui_controller_factory.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/ui/webui/chrome_web_ui_controller_factory.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/ui/webui/chrome_web_ui_controller_factory.cc
-@@ -211,7 +211,7 @@
+@@ -210,7 +210,7 @@
#include "chrome/browser/ui/webui/chromeos/chrome_url_disabled/chrome_url_disabled_ui.h"
#endif
@@ -9,7 +9,7 @@
#include "chrome/browser/ui/webui/webui_js_error/webui_js_error_ui.h"
#endif
-@@ -237,17 +237,17 @@
+@@ -236,17 +236,17 @@
#endif
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -30,7 +30,7 @@
#include "chrome/browser/ui/webui/connectors_internals/connectors_internals_ui.h"
#endif
-@@ -404,7 +404,7 @@ bool IsAboutUI(const GURL& url) {
+@@ -399,7 +399,7 @@ bool IsAboutUI(const GURL& url) {
#if !BUILDFLAG(IS_ANDROID)
|| url.host_piece() == chrome::kChromeUITermsHost
#endif
@@ -39,7 +39,7 @@
|| url.host_piece() == chrome::kChromeUILinuxProxyConfigHost
#endif
#if BUILDFLAG(IS_CHROMEOS_ASH)
-@@ -668,7 +668,7 @@ WebUIFactoryFunction GetWebUIFactoryFunction(WebUI* we
+@@ -653,7 +653,7 @@ WebUIFactoryFunction GetWebUIFactoryFunction(WebUI* we
if (url.host_piece() == chrome::kChromeUIMobileSetupHost)
return &NewWebUI<ash::cellular_setup::MobileSetupUI>;
#endif // BUILDFLAG(IS_CHROMEOS_ASH)
@@ -48,7 +48,7 @@
if (url.host_piece() == chrome::kChromeUIWebUIJsErrorHost)
return &NewWebUI<WebUIJsErrorUI>;
#endif
-@@ -731,7 +731,7 @@ WebUIFactoryFunction GetWebUIFactoryFunction(WebUI* we
+@@ -718,7 +718,7 @@ WebUIFactoryFunction GetWebUIFactoryFunction(WebUI* we
if (url.host_piece() == chrome::kChromeUINaClHost)
return &NewWebUI<NaClUI>;
#endif
@@ -57,7 +57,7 @@
defined(TOOLKIT_VIEWS)) || \
defined(USE_AURA)
if (url.host_piece() == chrome::kChromeUITabModalConfirmDialogHost)
-@@ -792,27 +792,27 @@ WebUIFactoryFunction GetWebUIFactoryFunction(WebUI* we
+@@ -779,27 +779,27 @@ WebUIFactoryFunction GetWebUIFactoryFunction(WebUI* we
}
#endif
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_realbox_realbox__handler.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_realbox_realbox__handler.cc
index 6c37e7f790e5..0d249bc944bf 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_realbox_realbox__handler.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_realbox_realbox__handler.cc
@@ -1,15 +1,24 @@
---- chrome/browser/ui/webui/realbox/realbox_handler.cc.orig 2023-10-13 13:20:35 UTC
+--- chrome/browser/ui/webui/realbox/realbox_handler.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/ui/webui/realbox/realbox_handler.cc
-@@ -146,7 +146,7 @@ constexpr char kMacShareIconResourceName[] =
+@@ -146,7 +146,7 @@ const char* kMacShareIconResourceName =
#elif BUILDFLAG(IS_WIN)
- constexpr char kWinShareIconResourceName[] =
+ const char* kWinShareIconResourceName =
"//resources/cr_components/omnibox/icons/win_share.svg";
-#elif BUILDFLAG(IS_LINUX)
+#elif BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
- constexpr char kLinuxShareIconResourceName[] =
+ const char* kLinuxShareIconResourceName =
"//resources/cr_components/omnibox/icons/share.svg";
#else
-@@ -745,7 +745,7 @@ std::string RealboxHandler::PedalVectorIconToResourceN
+@@ -203,7 +203,7 @@ static void DefineChromeRefreshRealboxIcons() {
+ #elif BUILDFLAG(IS_WIN)
+ kWinShareIconResourceName =
+ "//resources/cr_components/omnibox/icons/win_share_cr23.svg";
+-#elif BUILDFLAG(IS_LINUX)
++#elif BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ kLinuxShareIconResourceName =
+ "//resources/cr_components/omnibox/icons/share_cr23.svg";
+ #else
+@@ -818,7 +818,7 @@ std::string RealboxHandler::PedalVectorIconToResourceN
icon.name == omnibox::kShareWinChromeRefreshIcon.name) {
return kWinShareIconResourceName;
}
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_settings_accessibility__main__handler.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_settings_accessibility__main__handler.cc
new file mode 100644
index 000000000000..34b34c062c6b
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_settings_accessibility__main__handler.cc
@@ -0,0 +1,37 @@
+--- chrome/browser/ui/webui/settings/accessibility_main_handler.cc.orig 2024-02-03 15:42:55 UTC
++++ chrome/browser/ui/webui/settings/accessibility_main_handler.cc
+@@ -19,7 +19,7 @@
+ #include "content/public/browser/web_contents.h"
+ #include "content/public/browser/web_ui.h"
+
+-#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC)
++#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_BSD)
+ #include "ui/accessibility/accessibility_features.h"
+ #endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC)
+
+@@ -49,7 +49,7 @@ void AccessibilityMainHandler::OnJavascriptAllowed() {
+ base::Unretained(this)));
+ #endif // BUILDFLAG(IS_CHROMEOS_ASH)
+
+-#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC)
++#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_BSD)
+ if (features::IsPdfOcrEnabled()) {
+ CHECK(!component_ready_observer_.IsObserving());
+ component_ready_observer_.Observe(
+@@ -63,14 +63,14 @@ void AccessibilityMainHandler::OnJavascriptDisallowed(
+ accessibility_subscription_ = {};
+ #endif // BUILDFLAG(IS_CHROMEOS_ASH)
+
+-#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC)
++#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_BSD)
+ if (features::IsPdfOcrEnabled()) {
+ component_ready_observer_.Reset();
+ }
+ #endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC)
+ }
+
+-#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC)
++#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_BSD)
+ void AccessibilityMainHandler::DownloadProgressChanged(double progress) {
+ CHECK_GE(progress, 0.0);
+ CHECK_LE(progress, 1.0);
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_settings_accessibility__main__handler.h b/www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_settings_accessibility__main__handler.h
new file mode 100644
index 000000000000..d8c33eb570a3
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_settings_accessibility__main__handler.h
@@ -0,0 +1,38 @@
+--- chrome/browser/ui/webui/settings/accessibility_main_handler.h.orig 2024-02-03 15:42:55 UTC
++++ chrome/browser/ui/webui/settings/accessibility_main_handler.h
+@@ -13,7 +13,7 @@
+ #include "chrome/browser/ash/accessibility/accessibility_manager.h"
+ #endif // BUILDFLAG(IS_CHROMEOS_ASH)
+
+-#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC)
++#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_BSD)
+ #include "base/scoped_observation.h"
+ #include "chrome/browser/screen_ai/screen_ai_install_state.h"
+ #endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC)
+@@ -24,7 +24,7 @@ namespace settings {
+ // chrome://settings/accessibility.
+ class AccessibilityMainHandler
+ : public ::settings::SettingsPageUIHandler
+-#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC)
++#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_BSD)
+ ,
+ public screen_ai::ScreenAIInstallState::Observer
+ #endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC)
+@@ -40,7 +40,7 @@ class AccessibilityMainHandler
+ void OnJavascriptAllowed() override;
+ void OnJavascriptDisallowed() override;
+
+-#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC)
++#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_BSD)
+ // screen_ai::ScreenAIInstallState::Observer:
+ void DownloadProgressChanged(double progress) override;
+ void StateChanged(screen_ai::ScreenAIInstallState::State state) override;
+@@ -59,7 +59,7 @@ class AccessibilityMainHandler
+ base::CallbackListSubscription accessibility_subscription_;
+ #endif // BUILDFLAG(IS_CHROMEOS_ASH)
+
+-#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC)
++#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_BSD)
+ base::ScopedObservation<screen_ai::ScreenAIInstallState,
+ screen_ai::ScreenAIInstallState::Observer>
+ component_ready_observer_{this};
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_settings_settings__localized__strings__provider.cc b/www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_settings_settings__localized__strings__provider.cc
index 55c5353432be..ec977f17a280 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_settings_settings__localized__strings__provider.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_ui_webui_settings_settings__localized__strings__provider.cc
@@ -1,6 +1,6 @@
---- chrome/browser/ui/webui/settings/settings_localized_strings_provider.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/browser/ui/webui/settings/settings_localized_strings_provider.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/ui/webui/settings/settings_localized_strings_provider.cc
-@@ -136,7 +136,7 @@
+@@ -137,7 +137,7 @@
#include "ash/webui/settings/public/constants/routes.mojom.h"
#endif
@@ -9,7 +9,7 @@
#include "ui/display/screen.h"
#endif
-@@ -155,7 +155,7 @@
+@@ -156,7 +156,7 @@
#include "chrome/browser/ui/webui/certificate_manager_localized_strings_provider.h"
#endif
@@ -18,7 +18,7 @@
#include "ui/linux/linux_ui_factory.h"
#include "ui/ozone/public/ozone_platform.h"
#endif
-@@ -263,7 +263,7 @@ void AddCommonStrings(content::WebUIDataSource* html_s
+@@ -262,7 +262,7 @@ void AddCommonStrings(content::WebUIDataSource* html_s
base::FeatureList::IsEnabled(
supervised_user::kClearingCookiesKeepsSupervisedUsersSignedIn));
@@ -27,7 +27,7 @@
bool allow_qt_theme = base::FeatureList::IsEnabled(ui::kAllowQt);
#else
bool allow_qt_theme = false;
-@@ -294,7 +294,7 @@ void AddA11yStrings(content::WebUIDataSource* html_sou
+@@ -291,7 +291,7 @@ void AddA11yStrings(content::WebUIDataSource* html_sou
{"focusHighlightLabel",
IDS_SETTINGS_ACCESSIBILITY_FOCUS_HIGHLIGHT_DESCRIPTION},
#endif
@@ -36,7 +36,7 @@
{"overscrollHistoryNavigationTitle",
IDS_SETTINGS_OVERSCROLL_HISTORY_NAVIGATION_TITLE},
{"overscrollHistoryNavigationSubtitle",
-@@ -433,7 +433,7 @@ void AddAppearanceStrings(content::WebUIDataSource* ht
+@@ -436,7 +436,7 @@ void AddAppearanceStrings(content::WebUIDataSource* ht
{"huge", IDS_SETTINGS_HUGE_FONT_SIZE},
{"sidePanelAlignLeft", IDS_SETTINGS_SIDE_PANEL_ALIGN_LEFT},
{"sidePanelAlignRight", IDS_SETTINGS_SIDE_PANEL_ALIGN_RIGHT},
@@ -45,7 +45,7 @@
{"gtkTheme", IDS_SETTINGS_GTK_THEME},
{"useGtkTheme", IDS_SETTINGS_USE_GTK_THEME},
{"qtTheme", IDS_SETTINGS_QT_THEME},
-@@ -443,7 +443,7 @@ void AddAppearanceStrings(content::WebUIDataSource* ht
+@@ -446,7 +446,7 @@ void AddAppearanceStrings(content::WebUIDataSource* ht
#else
{"resetToDefaultTheme", IDS_SETTINGS_RESET_TO_DEFAULT_THEME},
#endif
@@ -54,7 +54,7 @@
{"showWindowDecorations", IDS_SHOW_WINDOW_DECORATIONS},
#endif
#if BUILDFLAG(IS_MAC)
-@@ -468,7 +468,7 @@ void AddAppearanceStrings(content::WebUIDataSource* ht
+@@ -471,7 +471,7 @@ void AddAppearanceStrings(content::WebUIDataSource* ht
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
diff --git a/www/ungoogled-chromium/files/patch-chrome_browser_webauthn_chrome__authenticator__request__delegate.cc b/www/ungoogled-chromium/files/patch-chrome_browser_webauthn_chrome__authenticator__request__delegate.cc
index f69ac2c627df..6e417941e071 100644
--- a/www/ungoogled-chromium/files/patch-chrome_browser_webauthn_chrome__authenticator__request__delegate.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_browser_webauthn_chrome__authenticator__request__delegate.cc
@@ -1,6 +1,6 @@
---- chrome/browser/webauthn/chrome_authenticator_request_delegate.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/browser/webauthn/chrome_authenticator_request_delegate.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/browser/webauthn/chrome_authenticator_request_delegate.cc
-@@ -677,7 +677,7 @@ void ChromeAuthenticatorRequestDelegate::ConfigureDisc
+@@ -669,7 +669,7 @@ void ChromeAuthenticatorRequestDelegate::ConfigureDisc
g_observer->ConfiguringCable(request_type);
}
diff --git a/www/ungoogled-chromium/files/patch-chrome_common_chrome__features.cc b/www/ungoogled-chromium/files/patch-chrome_common_chrome__features.cc
index e3a49b249012..8e999c0d7375 100644
--- a/www/ungoogled-chromium/files/patch-chrome_common_chrome__features.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_common_chrome__features.cc
@@ -1,4 +1,4 @@
---- chrome/common/chrome_features.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/common/chrome_features.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/common/chrome_features.cc
@@ -76,7 +76,7 @@ BASE_FEATURE(kAppShimNotificationAttribution,
BASE_FEATURE(kAsyncDns,
@@ -36,7 +36,7 @@
base::FEATURE_ENABLED_BY_DEFAULT
#else
base::FEATURE_DISABLED_BY_DEFAULT
-@@ -338,7 +338,7 @@ BASE_FEATURE(kDesktopPWAsWebBundles,
+@@ -350,7 +350,7 @@ BASE_FEATURE(kDesktopPWAsWebBundles,
base::FEATURE_DISABLED_BY_DEFAULT);
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -45,7 +45,7 @@
// Controls whether Chrome Apps are supported. See https://crbug.com/1221251.
// If the feature is disabled, Chrome Apps continue to work. If enabled, Chrome
// Apps will not launch and will be marked in the UI as deprecated.
-@@ -377,7 +377,7 @@ const base::FeatureParam<bool> kDnsOverHttpsFallbackPa
+@@ -389,7 +389,7 @@ const base::FeatureParam<bool> kDnsOverHttpsFallbackPa
const base::FeatureParam<bool> kDnsOverHttpsShowUiParam {
&kDnsOverHttps, "ShowUi",
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_MAC) || \
@@ -54,7 +54,7 @@
true
#else
false
-@@ -907,7 +907,7 @@ BASE_FEATURE(kLacrosSharedComponentsDir,
+@@ -946,7 +946,7 @@ BASE_FEATURE(kLacrosSharedComponentsDir,
base::FEATURE_ENABLED_BY_DEFAULT);
#endif // BUILDFLAG(IS_CHROMEOS_ASH)
@@ -63,7 +63,7 @@
BASE_FEATURE(kLinuxLowMemoryMonitor,
"LinuxLowMemoryMonitor",
base::FEATURE_DISABLED_BY_DEFAULT);
-@@ -920,7 +920,7 @@ constexpr base::FeatureParam<int> kLinuxLowMemoryMonit
+@@ -959,7 +959,7 @@ constexpr base::FeatureParam<int> kLinuxLowMemoryMonit
&kLinuxLowMemoryMonitor, "critical_level", 255};
#endif // BUILDFLAG(IS_LINUX) && !BUILDFLAG(IS_CHROMEOS)
diff --git a/www/ungoogled-chromium/files/patch-chrome_common_chrome__features.h b/www/ungoogled-chromium/files/patch-chrome_common_chrome__features.h
index 73773b5bcd79..dee73b2b7303 100644
--- a/www/ungoogled-chromium/files/patch-chrome_common_chrome__features.h
+++ b/www/ungoogled-chromium/files/patch-chrome_common_chrome__features.h
@@ -1,6 +1,6 @@
---- chrome/common/chrome_features.h.orig 2023-12-23 12:33:28 UTC
+--- chrome/common/chrome_features.h.orig 2024-02-03 15:42:55 UTC
+++ chrome/common/chrome_features.h
-@@ -61,13 +61,13 @@ BASE_DECLARE_FEATURE(kAppShimNotificationAttribution);
+@@ -62,13 +62,13 @@ BASE_DECLARE_FEATURE(kAppShimNotificationAttribution);
COMPONENT_EXPORT(CHROME_FEATURES) BASE_DECLARE_FEATURE(kAsyncDns);
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -16,7 +16,7 @@
COMPONENT_EXPORT(CHROME_FEATURES)
BASE_DECLARE_FEATURE(kBackgroundModeAllowRestart);
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-@@ -204,7 +204,7 @@ BASE_DECLARE_FEATURE(kDesktopPWAsTabStripSettings);
+@@ -227,7 +227,7 @@ BASE_DECLARE_FEATURE(kDesktopPWAsTabStripSettings);
COMPONENT_EXPORT(CHROME_FEATURES) BASE_DECLARE_FEATURE(kDesktopPWAsWebBundles);
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -25,7 +25,7 @@
COMPONENT_EXPORT(CHROME_FEATURES) BASE_DECLARE_FEATURE(kChromeAppsDeprecation);
COMPONENT_EXPORT(CHROME_FEATURES)
BASE_DECLARE_FEATURE(kKeepForceInstalledPreinstalledApps);
-@@ -523,7 +523,7 @@ COMPONENT_EXPORT(CHROME_FEATURES)
+@@ -562,7 +562,7 @@ COMPONENT_EXPORT(CHROME_FEATURES)
BASE_DECLARE_FEATURE(kLacrosSharedComponentsDir);
#endif // BUILDFLAG(IS_CHROMEOS_ASH)
@@ -34,7 +34,7 @@
COMPONENT_EXPORT(CHROME_FEATURES) BASE_DECLARE_FEATURE(kLinuxLowMemoryMonitor);
COMPONENT_EXPORT(CHROME_FEATURES)
extern const base::FeatureParam<int> kLinuxLowMemoryMonitorModerateLevel;
-@@ -531,7 +531,7 @@ COMPONENT_EXPORT(CHROME_FEATURES)
+@@ -570,7 +570,7 @@ COMPONENT_EXPORT(CHROME_FEATURES)
extern const base::FeatureParam<int> kLinuxLowMemoryMonitorCriticalLevel;
#endif // BUILDFLAG(IS_LINUX) && !BUILDFLAG(IS_CHROMEOS)
diff --git a/www/ungoogled-chromium/files/patch-chrome_common_chrome__paths.cc b/www/ungoogled-chromium/files/patch-chrome_common_chrome__paths.cc
index 24b0a5573cb2..e5acec16a29c 100644
--- a/www/ungoogled-chromium/files/patch-chrome_common_chrome__paths.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_common_chrome__paths.cc
@@ -1,4 +1,4 @@
---- chrome/common/chrome_paths.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/common/chrome_paths.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/common/chrome_paths.cc
@@ -30,7 +30,7 @@
#include "base/apple/foundation_util.h"
@@ -63,16 +63,16 @@
case chrome::DIR_STANDALONE_EXTERNAL_EXTENSIONS: {
cur = base::FilePath(kFilepathSinglePrefExtensions);
break;
-@@ -590,7 +590,7 @@ bool PathProvider(int key, base::FilePath* result) {
- break;
+@@ -591,7 +591,7 @@ bool PathProvider(int key, base::FilePath* result) {
#endif
--#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_MAC)
-+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_BSD)
+ #if BUILDFLAG(ENABLE_EXTENSIONS) && \
+- (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_MAC))
++ (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_BSD))
case chrome::DIR_NATIVE_MESSAGING:
#if BUILDFLAG(IS_MAC)
#if BUILDFLAG(GOOGLE_CHROME_BRANDING)
-@@ -604,9 +604,12 @@ bool PathProvider(int key, base::FilePath* result) {
+@@ -605,9 +605,12 @@ bool PathProvider(int key, base::FilePath* result) {
#if BUILDFLAG(GOOGLE_CHROME_BRANDING)
cur = base::FilePath(
FILE_PATH_LITERAL("/etc/opt/chrome/native-messaging-hosts"));
diff --git a/www/ungoogled-chromium/files/patch-chrome_common_chrome__paths.h b/www/ungoogled-chromium/files/patch-chrome_common_chrome__paths.h
index e2a79acceb73..d8a27de8d80b 100644
--- a/www/ungoogled-chromium/files/patch-chrome_common_chrome__paths.h
+++ b/www/ungoogled-chromium/files/patch-chrome_common_chrome__paths.h
@@ -1,6 +1,6 @@
---- chrome/common/chrome_paths.h.orig 2023-09-17 07:59:53 UTC
+--- chrome/common/chrome_paths.h.orig 2024-02-03 15:42:55 UTC
+++ chrome/common/chrome_paths.h
-@@ -57,7 +57,7 @@ enum {
+@@ -58,7 +58,7 @@ enum {
#if BUILDFLAG(IS_CHROMEOS_ASH) || \
((BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS)) && \
BUILDFLAG(CHROMIUM_BRANDING)) || \
@@ -9,7 +9,7 @@
DIR_USER_EXTERNAL_EXTENSIONS, // Directory for per-user external extensions
// on Chrome Mac and Chromium Linux.
// On Chrome OS, this path is used for OEM
-@@ -65,7 +65,7 @@ enum {
+@@ -66,7 +66,7 @@ enum {
// create it.
#endif
@@ -18,12 +18,12 @@
DIR_STANDALONE_EXTERNAL_EXTENSIONS, // Directory for 'per-extension'
// definition manifest files that
// describe extensions which are to be
-@@ -123,7 +123,7 @@ enum {
- // must be cleared on device reboot.
+@@ -125,7 +125,7 @@ enum {
#endif
--#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_MAC)
-+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_BSD)
+ #if BUILDFLAG(ENABLE_EXTENSIONS) && \
+- (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_MAC))
++ (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_BSD))
DIR_NATIVE_MESSAGING, // System directory where native messaging host
// manifest files are stored.
DIR_USER_NATIVE_MESSAGING, // Directory with Native Messaging Hosts
diff --git a/www/ungoogled-chromium/files/patch-chrome_common_chrome__switches.cc b/www/ungoogled-chromium/files/patch-chrome_common_chrome__switches.cc
index a89942245543..b10af2d00142 100644
--- a/www/ungoogled-chromium/files/patch-chrome_common_chrome__switches.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_common_chrome__switches.cc
@@ -1,6 +1,6 @@
---- chrome/common/chrome_switches.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/common/chrome_switches.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/common/chrome_switches.cc
-@@ -857,14 +857,14 @@ const char kAllowNaClSocketAPI[] = "allow-nacl-socket-
+@@ -858,14 +858,14 @@ const char kAllowNaClSocketAPI[] = "allow-nacl-socket-
#endif
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_MAC) || \
diff --git a/www/ungoogled-chromium/files/patch-chrome_common_pref__names.h b/www/ungoogled-chromium/files/patch-chrome_common_pref__names.h
index 543263c9af41..691e3358409a 100644
--- a/www/ungoogled-chromium/files/patch-chrome_common_pref__names.h
+++ b/www/ungoogled-chromium/files/patch-chrome_common_pref__names.h
@@ -1,6 +1,6 @@
---- chrome/common/pref_names.h.orig 2023-12-23 12:33:28 UTC
+--- chrome/common/pref_names.h.orig 2024-02-03 15:42:55 UTC
+++ chrome/common/pref_names.h
-@@ -1315,7 +1315,7 @@ inline constexpr char kUseAshProxy[] = "lacros.proxy.u
+@@ -1330,7 +1330,7 @@ inline constexpr char kUseAshProxy[] = "lacros.proxy.u
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
@@ -9,7 +9,7 @@
// Linux specific preference on whether we should match the system theme.
inline constexpr char kSystemTheme[] = "extensions.theme.system_theme";
#endif
-@@ -1441,7 +1441,7 @@ inline constexpr char kShowUpdatePromotionInfoBar[] =
+@@ -1456,7 +1456,7 @@ inline constexpr char kShowUpdatePromotionInfoBar[] =
"browser.show_update_promotion_info_bar";
#endif
@@ -18,7 +18,7 @@
// Boolean that is false if we should show window manager decorations. If
// true, we draw a custom chrome frame (thicker title bar and blue border).
inline constexpr char kUseCustomChromeFrame[] = "browser.custom_chrome_frame";
-@@ -1989,7 +1989,7 @@ inline constexpr char kDownloadDefaultDirectory[] =
+@@ -2007,7 +2007,7 @@ inline constexpr char kDownloadDefaultDirectory[] =
inline constexpr char kDownloadDirUpgraded[] = "download.directory_upgrade";
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
@@ -27,7 +27,7 @@
inline constexpr char kOpenPdfDownloadInSystemReader[] =
"download.open_pdf_in_system_reader";
#endif
-@@ -2407,14 +2407,14 @@ inline constexpr char kMediaStorageIdSalt[] = "media.s
+@@ -2439,14 +2439,14 @@ inline constexpr char kMediaStorageIdSalt[] = "media.s
inline constexpr char kMediaCdmOriginData[] = "media.cdm.origin_data";
#endif // BUILDFLAG(IS_WIN)
@@ -44,7 +44,7 @@
// Records whether the user has seen an HTTP auth "negotiate" header.
inline constexpr char kReceivedHttpAuthNegotiateHeader[] =
"net.received_http_auth_negotiate_headers";
-@@ -2492,7 +2492,7 @@ inline constexpr char kAmbientAuthenticationInPrivateM
+@@ -2524,7 +2524,7 @@ inline constexpr char kAmbientAuthenticationInPrivateM
inline constexpr char kBasicAuthOverHttpEnabled[] =
"auth.basic_over_http_enabled";
@@ -53,7 +53,7 @@
// Boolean that specifies whether OK-AS-DELEGATE flag from KDC is respected
// along with kAuthNegotiateDelegateAllowlist.
inline constexpr char kAuthNegotiateDelegateByKdcPolicy[] =
-@@ -3533,7 +3533,7 @@ inline constexpr char kFileOrDirectoryPickerWithoutGes
+@@ -3563,7 +3563,7 @@ inline constexpr char kFileOrDirectoryPickerWithoutGes
inline constexpr char kSandboxExternalProtocolBlocked[] =
"profile.sandbox_external_protocol_blocked";
@@ -62,16 +62,16 @@
// Boolean that indicates if system notifications are allowed to be used in
// place of Chrome notifications.
inline constexpr char kAllowSystemNotifications[] =
-@@ -3589,7 +3589,7 @@ inline constexpr char kCACertificateManagementAllowed[
- inline constexpr char kChromeRootStoreEnabled[] = "chrome_root_store_enabled";
+@@ -3612,7 +3612,7 @@ inline constexpr char kCACertificateManagementAllowed[
#endif
+
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
- BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
+ BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_BSD)
inline constexpr char kEnforceLocalAnchorConstraintsEnabled[] =
"enforce_local_anchor_constraints_enabled";
#endif
-@@ -3903,7 +3903,7 @@ inline constexpr char kThrottleNonVisibleCrossOriginIf
+@@ -3924,7 +3924,7 @@ inline constexpr char kThrottleNonVisibleCrossOriginIf
inline constexpr char kNewBaseUrlInheritanceBehaviorAllowed[] =
"new_base_url_inheritance_behavior_allowed";
diff --git a/www/ungoogled-chromium/files/patch-chrome_common_url__constants.h b/www/ungoogled-chromium/files/patch-chrome_common_url__constants.h
index 993a4c2de308..e0079d7ea876 100644
--- a/www/ungoogled-chromium/files/patch-chrome_common_url__constants.h
+++ b/www/ungoogled-chromium/files/patch-chrome_common_url__constants.h
@@ -1,6 +1,6 @@
---- chrome/common/url_constants.h.orig 2023-12-23 12:33:28 UTC
+--- chrome/common/url_constants.h.orig 2024-02-03 15:42:55 UTC
+++ chrome/common/url_constants.h
-@@ -824,7 +824,7 @@ inline constexpr char kPhoneHubPermissionLearnMoreURL[
+@@ -821,7 +821,7 @@ inline constexpr char kPhoneHubPermissionLearnMoreURL[
"https://support.9oo91e.qjz9zk/chromebook?p=multidevice";
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
diff --git a/www/ungoogled-chromium/files/patch-chrome_common_webui__url__constants.cc b/www/ungoogled-chromium/files/patch-chrome_common_webui__url__constants.cc
index 94aebdf4f32c..2d368eb9196d 100644
--- a/www/ungoogled-chromium/files/patch-chrome_common_webui__url__constants.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_common_webui__url__constants.cc
@@ -1,6 +1,6 @@
---- chrome/common/webui_url_constants.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/common/webui_url_constants.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/common/webui_url_constants.cc
-@@ -512,18 +512,18 @@ const char kOsUIShortcutCustomizationAppURL[] = "os://
+@@ -508,18 +508,18 @@ const char kOsUIShortcutCustomizationAppURL[] = "os://
const char kOsUIVersionURL[] = "os://version";
#endif
@@ -22,7 +22,7 @@
const char kChromeUIDiscardsHost[] = "discards";
const char kChromeUIDiscardsURL[] = "chrome://discards/";
#endif
-@@ -538,14 +538,14 @@ const char kChromeUILinuxProxyConfigHost[] = "linux-pr
+@@ -534,14 +534,14 @@ const char kChromeUILinuxProxyConfigHost[] = "linux-pr
#endif
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
@@ -39,7 +39,7 @@
const char kChromeUIBrowserSwitchHost[] = "browser-switch";
const char kChromeUIBrowserSwitchURL[] = "chrome://browser-switch/";
const char kChromeUIEnterpriseProfileWelcomeHost[] =
-@@ -564,7 +564,7 @@ const char kChromeUIProfilePickerUrl[] = "chrome://pro
+@@ -560,7 +560,7 @@ const char kChromeUIProfilePickerUrl[] = "chrome://pro
const char kChromeUIProfilePickerStartupQuery[] = "startup";
#endif
@@ -48,7 +48,7 @@
defined(TOOLKIT_VIEWS)) || \
defined(USE_AURA)
const char kChromeUITabModalConfirmDialogHost[] = "tab-modal-confirm-dialog";
-@@ -650,7 +650,7 @@ const char kCookiesSubPagePath[] = "/cookies";
+@@ -643,7 +643,7 @@ const char kCookiesSubPagePath[] = "/cookies";
#endif
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -57,7 +57,7 @@
const char kChromeUIWebAppSettingsURL[] = "chrome://app-settings/";
const char kChromeUIWebAppSettingsHost[] = "app-settings";
#endif
-@@ -890,7 +890,7 @@ const char* const kChromeDebugURLs[] = {
+@@ -877,7 +877,7 @@ const char* const kChromeDebugURLs[] = {
blink::kChromeUIGpuJavaCrashURL,
kChromeUIJavaCrashURL,
#endif
diff --git a/www/ungoogled-chromium/files/patch-chrome_common_webui__url__constants.h b/www/ungoogled-chromium/files/patch-chrome_common_webui__url__constants.h
index 6da30d328a94..ed248625cd91 100644
--- a/www/ungoogled-chromium/files/patch-chrome_common_webui__url__constants.h
+++ b/www/ungoogled-chromium/files/patch-chrome_common_webui__url__constants.h
@@ -1,6 +1,6 @@
---- chrome/common/webui_url_constants.h.orig 2023-12-23 12:33:28 UTC
+--- chrome/common/webui_url_constants.h.orig 2024-02-03 15:42:55 UTC
+++ chrome/common/webui_url_constants.h
-@@ -426,24 +426,24 @@ extern const char kOsUIShortcutCustomizationAppURL[];
+@@ -424,24 +424,24 @@ extern const char kOsUIShortcutCustomizationAppURL[];
extern const char kOsUIVersionURL[];
#endif
@@ -29,7 +29,7 @@
extern const char kChromeUIWebAppSettingsURL[];
extern const char kChromeUIWebAppSettingsHost[];
#endif
-@@ -458,7 +458,7 @@ extern const char kChromeUILinuxProxyConfigHost[];
+@@ -456,7 +456,7 @@ extern const char kChromeUILinuxProxyConfigHost[];
#endif
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
@@ -38,7 +38,7 @@
extern const char kChromeUISandboxHost[];
#endif
-@@ -470,7 +470,7 @@ extern const char kChromeUISearchEngineChoiceHost[];
+@@ -468,7 +468,7 @@ extern const char kChromeUISearchEngineChoiceHost[];
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_FUCHSIA) || \
@@ -47,7 +47,7 @@
extern const char kChromeUIBrowserSwitchHost[];
extern const char kChromeUIBrowserSwitchURL[];
extern const char kChromeUIEnterpriseProfileWelcomeHost[];
-@@ -486,7 +486,7 @@ extern const char kChromeUIProfilePickerUrl[];
+@@ -484,7 +484,7 @@ extern const char kChromeUIProfilePickerUrl[];
extern const char kChromeUIProfilePickerStartupQuery[];
#endif
diff --git a/www/ungoogled-chromium/files/patch-chrome_renderer_chrome__render__frame__observer.cc b/www/ungoogled-chromium/files/patch-chrome_renderer_chrome__render__frame__observer.cc
index 25b570c45369..3b5806157ebc 100644
--- a/www/ungoogled-chromium/files/patch-chrome_renderer_chrome__render__frame__observer.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_renderer_chrome__render__frame__observer.cc
@@ -1,6 +1,6 @@
---- chrome/renderer/chrome_render_frame_observer.cc.orig 2023-09-17 07:59:53 UTC
+--- chrome/renderer/chrome_render_frame_observer.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/renderer/chrome_render_frame_observer.cc
-@@ -351,7 +351,7 @@ void ChromeRenderFrameObserver::OnDestruct() {
+@@ -355,7 +355,7 @@ void ChromeRenderFrameObserver::WillDetach(blink::Deta
void ChromeRenderFrameObserver::DraggableRegionsChanged() {
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
diff --git a/www/ungoogled-chromium/files/patch-chrome_services_printing_print__backend__service__impl.cc b/www/ungoogled-chromium/files/patch-chrome_services_printing_print__backend__service__impl.cc
index 1d31d0fb8ce0..7d01d1f1adef 100644
--- a/www/ungoogled-chromium/files/patch-chrome_services_printing_print__backend__service__impl.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_services_printing_print__backend__service__impl.cc
@@ -1,4 +1,4 @@
---- chrome/services/printing/print_backend_service_impl.cc.orig 2023-12-23 12:33:28 UTC
+--- chrome/services/printing/print_backend_service_impl.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/services/printing/print_backend_service_impl.cc
@@ -46,7 +46,7 @@
#include "printing/backend/cups_connection_pool.h"
@@ -38,7 +38,7 @@
// are using `TestPrintingContext`.
@@ -681,7 +681,7 @@ void PrintBackendServiceImpl::UpdatePrintSettings(
crash_keys_ = std::make_unique<crash_keys::ScopedPrinterInfo>(
- print_backend_->GetPrinterDriverInfo(*printer_name));
+ *printer_name, print_backend_->GetPrinterDriverInfo(*printer_name));
-#if BUILDFLAG(IS_LINUX) && BUILDFLAG(USE_CUPS)
+#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)) && BUILDFLAG(USE_CUPS)
diff --git a/www/ungoogled-chromium/files/patch-chrome_test_BUILD.gn b/www/ungoogled-chromium/files/patch-chrome_test_BUILD.gn
index 17f16dee3f92..8bb63e912baf 100644
--- a/www/ungoogled-chromium/files/patch-chrome_test_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-chrome_test_BUILD.gn
@@ -1,6 +1,6 @@
---- chrome/test/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+--- chrome/test/BUILD.gn.orig 2024-02-03 15:42:55 UTC
+++ chrome/test/BUILD.gn
-@@ -11236,7 +11236,7 @@ test("chrome_app_unittests") {
+@@ -11475,7 +11475,7 @@ test("chrome_app_unittests") {
"//components/heap_profiling/in_process",
"//components/safe_browsing:buildflags",
]
diff --git a/www/ungoogled-chromium/files/patch-chrome_updater_configurator.cc b/www/ungoogled-chromium/files/patch-chrome_updater_configurator.cc
index af613b28fba0..9347ad66bf92 100644
--- a/www/ungoogled-chromium/files/patch-chrome_updater_configurator.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_updater_configurator.cc
@@ -1,7 +1,7 @@
---- chrome/updater/configurator.cc.orig 2023-07-21 09:49:17 UTC
+--- chrome/updater/configurator.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/updater/configurator.cc
@@ -63,7 +63,7 @@ Configurator::Configurator(scoped_refptr<UpdaterPrefs>
- return absl::nullopt;
+ return std::nullopt;
#endif
}()) {
-#if BUILDFLAG(IS_LINUX)
diff --git a/www/ungoogled-chromium/files/patch-chrome_updater_util_posix__util.cc b/www/ungoogled-chromium/files/patch-chrome_updater_util_posix__util.cc
index c9cee8818b99..3f184cecebd6 100644
--- a/www/ungoogled-chromium/files/patch-chrome_updater_util_posix__util.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_updater_util_posix__util.cc
@@ -1,6 +1,6 @@
---- chrome/updater/util/posix_util.cc.orig 2023-05-05 12:12:41 UTC
+--- chrome/updater/util/posix_util.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/updater/util/posix_util.cc
-@@ -16,7 +16,7 @@
+@@ -18,7 +18,7 @@
#include "chrome/updater/updater_branding.h"
#include "chrome/updater/util/util.h"
diff --git a/www/ungoogled-chromium/files/patch-chrome_utility_services.cc b/www/ungoogled-chromium/files/patch-chrome_utility_services.cc
index e64548f55b1c..e51f76135ec9 100644
--- a/www/ungoogled-chromium/files/patch-chrome_utility_services.cc
+++ b/www/ungoogled-chromium/files/patch-chrome_utility_services.cc
@@ -1,6 +1,6 @@
---- chrome/utility/services.cc.orig 2023-11-04 07:08:51 UTC
+--- chrome/utility/services.cc.orig 2024-02-03 15:42:55 UTC
+++ chrome/utility/services.cc
-@@ -56,7 +56,7 @@
+@@ -57,7 +57,7 @@
#include "chrome/services/system_signals/mac/mac_system_signals_service.h"
#endif // BUILDFLAG(IS_MAC)
@@ -9,7 +9,7 @@
#include "chrome/services/system_signals/linux/linux_system_signals_service.h"
#endif // BUILDFLAG(IS_LINUX)
-@@ -82,7 +82,7 @@
+@@ -83,7 +83,7 @@
#include "chrome/services/file_util/file_util_service.h" // nogncheck
#endif
@@ -18,7 +18,7 @@
#include "chrome/services/file_util/document_analysis_service.h" // nogncheck
#endif
-@@ -218,7 +218,7 @@ auto RunMacNotificationService(
+@@ -219,7 +219,7 @@ auto RunMacNotificationService(
}
#endif // BUILDFLAG(IS_MAC)
@@ -27,7 +27,7 @@
auto RunSystemSignalsService(
mojo::PendingReceiver<device_signals::mojom::SystemSignalsService>
receiver) {
-@@ -278,7 +278,7 @@ auto RunCupsIppParser(
+@@ -279,7 +279,7 @@ auto RunCupsIppParser(
}
#endif
@@ -36,7 +36,7 @@
auto RunDocumentAnalysis(
mojo::PendingReceiver<chrome::mojom::DocumentAnalysisService> receiver) {
return std::make_unique<DocumentAnalysisService>(std::move(receiver));
-@@ -466,7 +466,7 @@ void RegisterMainThreadServices(mojo::ServiceFactory&
+@@ -467,7 +467,7 @@ void RegisterMainThreadServices(mojo::ServiceFactory&
services.Add(RunWindowsIconReader);
#endif // BUILDFLAG(IS_WIN)
@@ -45,7 +45,7 @@
services.Add(RunSystemSignalsService);
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX)
-@@ -482,7 +482,7 @@ void RegisterMainThreadServices(mojo::ServiceFactory&
+@@ -483,7 +483,7 @@ void RegisterMainThreadServices(mojo::ServiceFactory&
services.Add(RunFileUtil);
#endif
diff --git a/www/ungoogled-chromium/files/patch-chromecast_browser_cast__content__browser__client.cc b/www/ungoogled-chromium/files/patch-chromecast_browser_cast__content__browser__client.cc
index 69fe61c1d1d6..5b840a6664f5 100644
--- a/www/ungoogled-chromium/files/patch-chromecast_browser_cast__content__browser__client.cc
+++ b/www/ungoogled-chromium/files/patch-chromecast_browser_cast__content__browser__client.cc
@@ -1,6 +1,6 @@
---- chromecast/browser/cast_content_browser_client.cc.orig 2023-11-04 07:08:51 UTC
+--- chromecast/browser/cast_content_browser_client.cc.orig 2024-02-03 15:42:55 UTC
+++ chromecast/browser/cast_content_browser_client.cc
-@@ -435,7 +435,7 @@ void CastContentBrowserClient::AppendExtraCommandLineS
+@@ -432,7 +432,7 @@ void CastContentBrowserClient::AppendExtraCommandLineS
switches::kAudioOutputChannels));
}
} else if (process_type == switches::kGpuProcess) {
diff --git a/www/ungoogled-chromium/files/patch-components_autofill_core_browser_data__model_autofill__i18n__api.h b/www/ungoogled-chromium/files/patch-components_autofill_core_browser_data__model_autofill__i18n__api.h
deleted file mode 100644
index 05ca4c2bc4c1..000000000000
--- a/www/ungoogled-chromium/files/patch-components_autofill_core_browser_data__model_autofill__i18n__api.h
+++ /dev/null
@@ -1,16 +0,0 @@
---- components/autofill/core/browser/data_model/autofill_i18n_api.h.orig 2023-12-23 12:33:28 UTC
-+++ components/autofill/core/browser/data_model/autofill_i18n_api.h
-@@ -15,9 +15,13 @@ namespace autofill::i18n_model_definition {
- // Country code that represents autofill's legacy address hierarchy model as
- // stored `kAutofillModelRules`. As a workaround for GCC we declare the
- // std::string constexpr first.
-+// XXX
-+#if 0
- constexpr inline std::string kLegacyHierarchyCountryCodeString{"XX"};
- constexpr AddressCountryCode kLegacyHierarchyCountryCode =
- AddressCountryCode(kLegacyHierarchyCountryCodeString);
-+#endif
-+inline AddressCountryCode kLegacyHierarchyCountryCode = AddressCountryCode("XX");
-
- // Creates an instance of the address hierarchy model corresponding to the
- // provided country. All the nodes have empty values, except for the country
diff --git a/www/ungoogled-chromium/files/patch-components_autofill_core_browser_payments_iban__save__manager.cc b/www/ungoogled-chromium/files/patch-components_autofill_core_browser_payments_iban__save__manager.cc
deleted file mode 100644
index d47301736a88..000000000000
--- a/www/ungoogled-chromium/files/patch-components_autofill_core_browser_payments_iban__save__manager.cc
+++ /dev/null
@@ -1,14 +0,0 @@
---- components/autofill/core/browser/payments/iban_save_manager.cc.orig 2023-12-23 12:33:28 UTC
-+++ components/autofill/core/browser/payments/iban_save_manager.cc
-@@ -123,7 +123,11 @@ bool IbanSaveManager::ShouldOfferUploadSave(
-
- // Offer server save for this IBAN if it doesn't already match an existing
- // server IBAN.
-+#if (_LIBCPP_VERSION >= 160000)
- return std::ranges::none_of(
-+#else
-+ return base::ranges::none_of(
-+#endif
- personal_data_manager_->GetServerIbans(),
- [&iban_import_candidate](const auto& iban) {
- return iban->MatchesPrefixSuffixAndLength(iban_import_candidate);
diff --git a/www/ungoogled-chromium/files/patch-components_autofill_core_browser_personal__data__manager.cc b/www/ungoogled-chromium/files/patch-components_autofill_core_browser_personal__data__manager.cc
index 52b6b0d2a2db..9cfe94abc3ec 100644
--- a/www/ungoogled-chromium/files/patch-components_autofill_core_browser_personal__data__manager.cc
+++ b/www/ungoogled-chromium/files/patch-components_autofill_core_browser_personal__data__manager.cc
@@ -1,6 +1,6 @@
---- components/autofill/core/browser/personal_data_manager.cc.orig 2023-12-23 12:33:28 UTC
+--- components/autofill/core/browser/personal_data_manager.cc.orig 2024-02-03 15:42:55 UTC
+++ components/autofill/core/browser/personal_data_manager.cc
-@@ -2397,7 +2397,8 @@ bool PersonalDataManager::ShouldShowCardsFromAccountOp
+@@ -2380,7 +2380,8 @@ bool PersonalDataManager::ShouldShowCardsFromAccountOp
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS) || \
diff --git a/www/ungoogled-chromium/files/patch-components_autofill_core_common_autofill__payments__features.cc b/www/ungoogled-chromium/files/patch-components_autofill_core_common_autofill__payments__features.cc
index fe704330166f..eff10a30d525 100644
--- a/www/ungoogled-chromium/files/patch-components_autofill_core_common_autofill__payments__features.cc
+++ b/www/ungoogled-chromium/files/patch-components_autofill_core_common_autofill__payments__features.cc
@@ -1,6 +1,6 @@
---- components/autofill/core/common/autofill_payments_features.cc.orig 2023-12-23 12:33:28 UTC
+--- components/autofill/core/common/autofill_payments_features.cc.orig 2024-02-03 15:42:55 UTC
+++ components/autofill/core/common/autofill_payments_features.cc
-@@ -261,7 +261,7 @@ BASE_FEATURE(kEnablePixPayments,
+@@ -282,7 +282,7 @@ BASE_FEATURE(kEnablePixPayments,
bool ShouldShowImprovedUserConsentForCreditCardSave() {
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
diff --git a/www/ungoogled-chromium/files/patch-components_autofill_core_common_autofill__util.cc b/www/ungoogled-chromium/files/patch-components_autofill_core_common_autofill__util.cc
index 867c199e0b4d..6931999518e5 100644
--- a/www/ungoogled-chromium/files/patch-components_autofill_core_common_autofill__util.cc
+++ b/www/ungoogled-chromium/files/patch-components_autofill_core_common_autofill__util.cc
@@ -1,6 +1,6 @@
---- components/autofill/core/common/autofill_util.cc.orig 2023-12-23 12:33:28 UTC
+--- components/autofill/core/common/autofill_util.cc.orig 2024-02-03 15:42:55 UTC
+++ components/autofill/core/common/autofill_util.cc
-@@ -207,7 +207,7 @@ size_t LevenshteinDistance(std::u16string_view a,
+@@ -143,7 +143,7 @@ bool SanitizedFieldIsEmpty(const std::u16string& value
bool ShouldAutoselectFirstSuggestionOnArrowDown() {
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_LINUX) || \
diff --git a/www/ungoogled-chromium/files/patch-components_commerce__strings.grdp b/www/ungoogled-chromium/files/patch-components_commerce__strings.grdp
index a6759af5596b..8e532afd44d4 100644
--- a/www/ungoogled-chromium/files/patch-components_commerce__strings.grdp
+++ b/www/ungoogled-chromium/files/patch-components_commerce__strings.grdp
@@ -1,6 +1,6 @@
---- components/commerce_strings.grdp.orig 2023-11-04 07:08:51 UTC
+--- components/commerce_strings.grdp.orig 2024-02-03 15:42:55 UTC
+++ components/commerce_strings.grdp
-@@ -304,7 +304,7 @@
+@@ -309,7 +309,7 @@
<message name="IDS_PRICE_HISTORY_DESCRIPTION" desc="The description of the Price History section in the Shopping Insights side panel explaining how the prices are gathered.">
Typical prices are based on stores across the web over the past 90 days.
</message>
diff --git a/www/ungoogled-chromium/files/patch-components_commerce_core_commerce__feature__list.cc b/www/ungoogled-chromium/files/patch-components_commerce_core_commerce__feature__list.cc
index a2e7b9dec04b..f35dc3c191d9 100644
--- a/www/ungoogled-chromium/files/patch-components_commerce_core_commerce__feature__list.cc
+++ b/www/ungoogled-chromium/files/patch-components_commerce_core_commerce__feature__list.cc
@@ -1,6 +1,15 @@
---- components/commerce/core/commerce_feature_list.cc.orig 2023-12-23 12:33:28 UTC
+--- components/commerce/core/commerce_feature_list.cc.orig 2024-02-03 15:42:55 UTC
+++ components/commerce/core/commerce_feature_list.cc
-@@ -225,7 +225,7 @@ BASE_FEATURE(kShoppingCollection,
+@@ -168,7 +168,7 @@ BASE_FEATURE(kPriceInsights,
+ "PriceInsights",
+ base::FEATURE_DISABLED_BY_DEFAULT);
+ #if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
+- BUILDFLAG(IS_CHROMEOS)
++ BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
+ BASE_FEATURE(kPriceInsightsRegionLaunched,
+ "PriceInsightsRegionLaunched",
+ base::FEATURE_ENABLED_BY_DEFAULT);
+@@ -249,7 +249,7 @@ BASE_FEATURE(kShoppingCollection,
BASE_FEATURE(kShoppingList, "ShoppingList", base::FEATURE_DISABLED_BY_DEFAULT);
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || \
diff --git a/www/ungoogled-chromium/files/patch-components_components__chromium__strings.grd b/www/ungoogled-chromium/files/patch-components_components__chromium__strings.grd
index 33a2403ebe42..a5f39f1d2607 100644
--- a/www/ungoogled-chromium/files/patch-components_components__chromium__strings.grd
+++ b/www/ungoogled-chromium/files/patch-components_components__chromium__strings.grd
@@ -1,6 +1,6 @@
---- components/components_chromium_strings.grd.orig 2023-08-18 10:26:52 UTC
+--- components/components_chromium_strings.grd.orig 2024-02-03 15:42:55 UTC
+++ components/components_chromium_strings.grd
-@@ -226,7 +226,7 @@
+@@ -220,7 +220,7 @@
</message>
</if>
<!-- The ChromeOS version of this string is defined in //components/error_page_strings.grdp. -->
diff --git a/www/ungoogled-chromium/files/patch-components_components__google__chrome__strings.grd b/www/ungoogled-chromium/files/patch-components_components__google__chrome__strings.grd
index c7921db24989..78b6b322c422 100644
--- a/www/ungoogled-chromium/files/patch-components_components__google__chrome__strings.grd
+++ b/www/ungoogled-chromium/files/patch-components_components__google__chrome__strings.grd
@@ -1,6 +1,6 @@
---- components/components_google_chrome_strings.grd.orig 2023-08-18 10:26:52 UTC
+--- components/components_google_chrome_strings.grd.orig 2024-02-03 15:42:55 UTC
+++ components/components_google_chrome_strings.grd
-@@ -226,7 +226,7 @@
+@@ -220,7 +220,7 @@
</message>
</if>
<!-- The ChromeOS version of this string is defined in //components/error_page_strings.grdp. -->
diff --git a/www/ungoogled-chromium/files/patch-components_cookie__config_cookie__store__util.cc b/www/ungoogled-chromium/files/patch-components_cookie__config_cookie__store__util.cc
index b8cfb001b843..7202960affdb 100644
--- a/www/ungoogled-chromium/files/patch-components_cookie__config_cookie__store__util.cc
+++ b/www/ungoogled-chromium/files/patch-components_cookie__config_cookie__store__util.cc
@@ -1,9 +1,9 @@
---- components/cookie_config/cookie_store_util.cc.orig 2022-10-01 07:40:07 UTC
+--- components/cookie_config/cookie_store_util.cc.orig 2024-02-03 15:42:55 UTC
+++ components/cookie_config/cookie_store_util.cc
-@@ -12,7 +12,7 @@
+@@ -13,7 +13,7 @@
namespace cookie_config {
- #if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_LINUX) || \
+ #if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
- BUILDFLAG(IS_CHROMEOS)
+ BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
namespace {
diff --git a/www/ungoogled-chromium/files/patch-components_crash_core_app_crashpad__handler__main.cc b/www/ungoogled-chromium/files/patch-components_crash_core_app_crashpad__handler__main.cc
new file mode 100644
index 000000000000..8906203801c1
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-components_crash_core_app_crashpad__handler__main.cc
@@ -0,0 +1,20 @@
+--- components/crash/core/app/crashpad_handler_main.cc.orig 2024-02-03 15:42:55 UTC
++++ components/crash/core/app/crashpad_handler_main.cc
+@@ -10,7 +10,7 @@
+ #include "third_party/crashpad/crashpad/handler/handler_main.h"
+ #include "third_party/crashpad/crashpad/handler/user_stream_data_source.h"
+
+-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_MAC)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_BSD)
+ #include "components/stability_report/user_stream_data_source_posix.h"
+ #endif
+
+@@ -31,7 +31,7 @@ __attribute__((visibility("default"), used)) int Crash
+ char* argv[]) {
+ crashpad::UserStreamDataSources user_stream_data_sources;
+
+-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_MAC)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_BSD)
+ user_stream_data_sources.push_back(
+ std::make_unique<stability_report::UserStreamDataSourcePosix>());
+ #endif
diff --git a/www/ungoogled-chromium/files/patch-components_embedder__support_user__agent__utils.cc b/www/ungoogled-chromium/files/patch-components_embedder__support_user__agent__utils.cc
index f6829ded62ab..ccb070499af6 100644
--- a/www/ungoogled-chromium/files/patch-components_embedder__support_user__agent__utils.cc
+++ b/www/ungoogled-chromium/files/patch-components_embedder__support_user__agent__utils.cc
@@ -1,6 +1,6 @@
---- components/embedder_support/user_agent_utils.cc.orig 2023-10-13 13:20:35 UTC
+--- components/embedder_support/user_agent_utils.cc.orig 2024-02-03 15:42:55 UTC
+++ components/embedder_support/user_agent_utils.cc
-@@ -432,6 +432,9 @@ std::string GetPlatformForUAMetadata() {
+@@ -445,6 +445,9 @@ std::string GetPlatformForUAMetadata() {
# else
return "Chromium OS";
# endif
diff --git a/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__configurations.cc b/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__configurations.cc
index 356461cb3726..07da73e397bb 100644
--- a/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__configurations.cc
+++ b/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__configurations.cc
@@ -1,15 +1,24 @@
---- components/feature_engagement/public/feature_configurations.cc.orig 2023-12-23 12:33:28 UTC
+--- components/feature_engagement/public/feature_configurations.cc.orig 2024-02-03 15:42:55 UTC
+++ components/feature_engagement/public/feature_configurations.cc
-@@ -49,7 +49,7 @@ FeatureConfig CreateAlwaysTriggerConfig(const base::Fe
+@@ -48,7 +48,7 @@ FeatureConfig CreateAlwaysTriggerConfig(const base::Fe
+
absl::optional<FeatureConfig> GetClientSideFeatureConfig(
const base::Feature* feature) {
+-#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN)
++#if BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_BSD)
+
+ // The IPH bubble for link capturing has a trigger set to ANY so that it
+ // always shows up. The per app specific guardrails are independently stored
+@@ -67,7 +67,7 @@ absl::optional<FeatureConfig> GetClientSideFeatureConf
+
+ #endif // BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN)
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_LINUX) || \
- BUILDFLAG(IS_CHROMEOS)
+ BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
if (kIPHPasswordsAccountStorageFeature.name == feature->name) {
absl::optional<FeatureConfig> config = FeatureConfig();
config->valid = true;
-@@ -1440,7 +1440,8 @@ absl::optional<FeatureConfig> GetClientSideFeatureConf
+@@ -1498,7 +1498,8 @@ absl::optional<FeatureConfig> GetClientSideFeatureConf
#endif // BUILDFLAG(IS_ANDROID)
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_LINUX) || \
diff --git a/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__constants.cc b/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__constants.cc
index 24f3ebbfb923..ce76cdc97ade 100644
--- a/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__constants.cc
+++ b/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__constants.cc
@@ -1,4 +1,4 @@
---- components/feature_engagement/public/feature_constants.cc.orig 2023-12-23 12:33:28 UTC
+--- components/feature_engagement/public/feature_constants.cc.orig 2024-02-03 15:42:55 UTC
+++ components/feature_engagement/public/feature_constants.cc
@@ -21,7 +21,7 @@ BASE_FEATURE(kUseClientConfigIPH,
BASE_FEATURE(kIPHDummyFeature, "IPH_Dummy", base::FEATURE_DISABLED_BY_DEFAULT);
@@ -9,7 +9,7 @@
BASE_FEATURE(kIPHBatterySaverModeFeature,
"IPH_BatterySaverMode",
base::FEATURE_ENABLED_BY_DEFAULT);
-@@ -560,7 +560,7 @@ constexpr base::FeatureParam<int> kDefaultBrowserEligi
+@@ -583,7 +583,7 @@ constexpr base::FeatureParam<int> kDefaultBrowserEligi
/*default_value=*/365};
#endif // BUILDFLAG(IS_IOS)
@@ -18,3 +18,12 @@
BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FUCHSIA)
BASE_FEATURE(kIPHAutofillExternalAccountProfileSuggestionFeature,
"IPH_AutofillExternalAccountProfileSuggestion",
+@@ -709,7 +709,7 @@ BASE_FEATURE(kIPHScalableIphGamingFeature,
+ base::FEATURE_DISABLED_BY_DEFAULT);
+ #endif
+
+-#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ // This can be enabled by default, as the DesktopPWAsLinkCapturing flag is
+ // needed for the IPH linked to this feature to work, and use-cases to show
+ // the IPH are guarded by that flag.
diff --git a/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__constants.h b/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__constants.h
index c4632760c9e7..daabcd04d99a 100644
--- a/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__constants.h
+++ b/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__constants.h
@@ -1,6 +1,6 @@
---- components/feature_engagement/public/feature_constants.h.orig 2023-12-23 12:33:28 UTC
+--- components/feature_engagement/public/feature_constants.h.orig 2024-02-03 15:42:55 UTC
+++ components/feature_engagement/public/feature_constants.h
-@@ -24,7 +24,7 @@ BASE_DECLARE_FEATURE(kUseClientConfigIPH);
+@@ -25,7 +25,7 @@ BASE_DECLARE_FEATURE(kUseClientConfigIPH);
BASE_DECLARE_FEATURE(kIPHDummyFeature);
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_LINUX) || \
@@ -9,7 +9,7 @@
BASE_DECLARE_FEATURE(kIPHBatterySaverModeFeature);
BASE_DECLARE_FEATURE(kIPHCompanionSidePanelFeature);
BASE_DECLARE_FEATURE(kIPHCompanionSidePanelRegionSearchFeature);
-@@ -233,7 +233,7 @@ extern const base::FeatureParam<int>
+@@ -243,7 +243,7 @@ extern const base::FeatureParam<int>
kDefaultBrowserEligibilitySlidingWindowParam;
#endif // BUILDFLAG(IS_IOS)
@@ -18,3 +18,12 @@
BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FUCHSIA)
BASE_DECLARE_FEATURE(kIPHAutofillExternalAccountProfileSuggestionFeature);
BASE_DECLARE_FEATURE(kIPHAutofillVirtualCardCVCSuggestionFeature);
+@@ -291,7 +291,7 @@ BASE_DECLARE_FEATURE(kIPHScalableIphHelpAppBasedTenFea
+ BASE_DECLARE_FEATURE(kIPHScalableIphGamingFeature);
+ #endif
+
+-#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ BASE_DECLARE_FEATURE(kIPHDesktopPWAsLinkCapturingLaunch);
+ #endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX)
+
diff --git a/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__list.cc b/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__list.cc
index 525645b64e51..68fe461750b7 100644
--- a/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__list.cc
+++ b/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__list.cc
@@ -1,7 +1,7 @@
---- components/feature_engagement/public/feature_list.cc.orig 2023-12-23 12:33:28 UTC
+--- components/feature_engagement/public/feature_list.cc.orig 2024-02-03 15:42:55 UTC
+++ components/feature_engagement/public/feature_list.cc
-@@ -146,7 +146,7 @@ const base::Feature* const kAllFeatures[] = {
- &kIPHiOSParcelTrackingFeature,
+@@ -150,7 +150,7 @@ const base::Feature* const kAllFeatures[] = {
+ &kIPHiOSBlueDotPromoEnhancedSafeBrowsingFeature,
#endif // BUILDFLAG(IS_IOS)
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_LINUX) || \
- BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_FUCHSIA)
@@ -9,7 +9,7 @@
&kIPHBatterySaverModeFeature,
&kIPHCompanionSidePanelFeature,
&kIPHCompanionSidePanelRegionSearchFeature,
-@@ -196,7 +196,7 @@ const base::Feature* const kAllFeatures[] = {
+@@ -205,7 +205,7 @@ const base::Feature* const kAllFeatures[] = {
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_LINUX) ||
// BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_FUCHSIA)
@@ -18,3 +18,12 @@
BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FUCHSIA)
&kIPHAutofillExternalAccountProfileSuggestionFeature,
&kIPHAutofillVirtualCardCVCSuggestionFeature,
+@@ -253,7 +253,7 @@ const base::Feature* const kAllFeatures[] = {
+ &kIPHScalableIphGamingFeature,
+ #endif // BUILDFLAG(IS_CHROMEOS_ASH)
+
+-#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ &kIPHDesktopPWAsLinkCapturingLaunch,
+ #endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX)
+
diff --git a/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__list.h b/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__list.h
index 6106a2312f83..0e7330b19d0f 100644
--- a/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__list.h
+++ b/www/ungoogled-chromium/files/patch-components_feature__engagement_public_feature__list.h
@@ -1,6 +1,6 @@
---- components/feature_engagement/public/feature_list.h.orig 2023-12-23 12:33:28 UTC
+--- components/feature_engagement/public/feature_list.h.orig 2024-02-03 15:42:55 UTC
+++ components/feature_engagement/public/feature_list.h
-@@ -269,7 +269,7 @@ DEFINE_VARIATION_PARAM(kIPHiOSParcelTrackingFeature,
+@@ -278,7 +278,7 @@ DEFINE_VARIATION_PARAM(kIPHiOSBlueDotPromoEnhancedSafe
#endif // BUILDFLAG(IS_IOS)
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_LINUX) || \
@@ -9,7 +9,7 @@
DEFINE_VARIATION_PARAM(kIPHBatterySaverModeFeature, "IPH_BatterySaverMode");
DEFINE_VARIATION_PARAM(kIPHCompanionSidePanelFeature, "IPH_CompanionSidePanel");
DEFINE_VARIATION_PARAM(kIPHCompanionSidePanelRegionSearchFeature,
-@@ -352,7 +352,7 @@ DEFINE_VARIATION_PARAM(kIPHBackNavigationMenuFeature,
+@@ -369,7 +369,7 @@ DEFINE_VARIATION_PARAM(kIPHBackNavigationMenuFeature,
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_LINUX) ||
// BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_FUCHSIA)
@@ -18,16 +18,25 @@
BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FUCHSIA)
DEFINE_VARIATION_PARAM(kIPHAutofillExternalAccountProfileSuggestionFeature,
"IPH_AutofillExternalAccountProfileSuggestion");
-@@ -567,7 +567,7 @@ constexpr flags_ui::FeatureEntry::FeatureVariation
- VARIATION_ENTRY(kIPHiOSChoiceScreenFeature),
- VARIATION_ENTRY(kIPHiOSParcelTrackingFeature),
+@@ -453,7 +453,7 @@ DEFINE_VARIATION_PARAM(kIPHScalableIphHelpAppBasedTenF
+ DEFINE_VARIATION_PARAM(kIPHScalableIphGamingFeature, "IPH_ScalableIphGaming");
+ #endif // BUILDFLAG(IS_CHROMEOS_ASH)
+
+-#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ DEFINE_VARIATION_PARAM(kIPHDesktopPWAsLinkCapturingLaunch,
+ "IPH_DesktopPWAsLinkCapturingLaunch");
+ #endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX)
+@@ -592,7 +592,7 @@ constexpr flags_ui::FeatureEntry::FeatureVariation
+ VARIATION_ENTRY(kIPHiOSPullToRefreshFeature),
+ VARIATION_ENTRY(kIPHiOSReplaceSyncPromosWithSignInPromos),
#elif BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
- BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_FUCHSIA)
+ BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_BSD)
VARIATION_ENTRY(kIPHBatterySaverModeFeature),
VARIATION_ENTRY(kIPHCompanionSidePanelFeature),
VARIATION_ENTRY(kIPHCompanionSidePanelRegionSearchFeature),
-@@ -617,7 +617,7 @@ constexpr flags_ui::FeatureEntry::FeatureVariation
+@@ -647,7 +647,7 @@ constexpr flags_ui::FeatureEntry::FeatureVariation
#endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) ||
// BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_FUCHSIA)
@@ -36,3 +45,12 @@
BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_FUCHSIA)
VARIATION_ENTRY(kIPHAutofillExternalAccountProfileSuggestionFeature),
VARIATION_ENTRY(kIPHAutofillVirtualCardCVCSuggestionFeature),
+@@ -695,7 +695,7 @@ constexpr flags_ui::FeatureEntry::FeatureVariation
+ VARIATION_ENTRY(kIPHScalableIphGamingFeature),
+ #endif // BUILDFLAG(IS_CHROMEOS_ASH)
+
+-#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ VARIATION_ENTRY(kIPHDesktopPWAsLinkCapturingLaunch),
+ #endif // BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX)
+
diff --git a/www/ungoogled-chromium/files/patch-components_gwp__asan_client_gwp__asan.cc b/www/ungoogled-chromium/files/patch-components_gwp__asan_client_gwp__asan.cc
index 7ec400f40446..8544e53758a0 100644
--- a/www/ungoogled-chromium/files/patch-components_gwp__asan_client_gwp__asan.cc
+++ b/www/ungoogled-chromium/files/patch-components_gwp__asan_client_gwp__asan.cc
@@ -1,6 +1,6 @@
---- components/gwp_asan/client/gwp_asan.cc.orig 2023-11-04 07:08:51 UTC
+--- components/gwp_asan/client/gwp_asan.cc.orig 2024-02-03 15:42:55 UTC
+++ components/gwp_asan/client/gwp_asan.cc
-@@ -66,7 +66,7 @@ constexpr bool kCpuIs64Bit =
+@@ -70,7 +70,7 @@ namespace {
// ProcessSamplingBoost is the multiplier to increase the
// ProcessSamplingProbability in scenarios where we want to perform additional
// testing (e.g., on canary/dev builds).
diff --git a/www/ungoogled-chromium/files/patch-components_gwp__asan_client_gwp__asan__features.cc b/www/ungoogled-chromium/files/patch-components_gwp__asan_client_gwp__asan__features.cc
index 86da65e7a2d1..4d6141f7f61b 100644
--- a/www/ungoogled-chromium/files/patch-components_gwp__asan_client_gwp__asan__features.cc
+++ b/www/ungoogled-chromium/files/patch-components_gwp__asan_client_gwp__asan__features.cc
@@ -1,11 +1,11 @@
---- components/gwp_asan/client/gwp_asan_features.cc.orig 2023-11-04 07:08:51 UTC
+--- components/gwp_asan/client/gwp_asan_features.cc.orig 2024-02-03 15:42:55 UTC
+++ components/gwp_asan/client/gwp_asan_features.cc
@@ -9,7 +9,7 @@
namespace gwp_asan::internal {
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_LINUX) || \
-- BUILDFLAG(IS_CHROMEOS)
-+ BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
+- BUILDFLAG(IS_CHROMEOS) || \
++ BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD) || \
+ (BUILDFLAG(IS_ANDROID) && defined(ARCH_CPU_64_BITS))
constexpr base::FeatureState kDefaultEnabled = base::FEATURE_ENABLED_BY_DEFAULT;
#else
- constexpr base::FeatureState kDefaultEnabled =
diff --git a/www/ungoogled-chromium/files/patch-components_management__strings.grdp b/www/ungoogled-chromium/files/patch-components_management__strings.grdp
index de63d9c13b01..e00e42c38e3f 100644
--- a/www/ungoogled-chromium/files/patch-components_management__strings.grdp
+++ b/www/ungoogled-chromium/files/patch-components_management__strings.grdp
@@ -1,6 +1,6 @@
---- components/management_strings.grdp.orig 2023-10-13 13:20:35 UTC
+--- components/management_strings.grdp.orig 2024-02-03 15:42:55 UTC
+++ components/management_strings.grdp
-@@ -346,7 +346,7 @@
+@@ -345,7 +345,7 @@
</if>
<!-- Strings related to Chrome Enterprise Device Signals Sharing -->
diff --git a/www/ungoogled-chromium/files/patch-components_metrics_metrics__log.cc b/www/ungoogled-chromium/files/patch-components_metrics_metrics__log.cc
index 8d451716d7a6..2b99ebbca668 100644
--- a/www/ungoogled-chromium/files/patch-components_metrics_metrics__log.cc
+++ b/www/ungoogled-chromium/files/patch-components_metrics_metrics__log.cc
@@ -1,4 +1,4 @@
---- components/metrics/metrics_log.cc.orig 2023-10-13 13:20:35 UTC
+--- components/metrics/metrics_log.cc.orig 2024-02-03 15:42:55 UTC
+++ components/metrics/metrics_log.cc
@@ -53,7 +53,7 @@
#include "base/win/current_module.h"
@@ -9,7 +9,7 @@
#include "base/environment.h"
#include "base/nix/xdg_util.h"
#endif
-@@ -139,7 +139,7 @@ void RecordCurrentTime(
+@@ -142,7 +142,7 @@ void RecordCurrentTime(
}
}
@@ -18,7 +18,7 @@
metrics::SystemProfileProto::OS::XdgSessionType ToProtoSessionType(
base::nix::SessionType session_type) {
switch (session_type) {
-@@ -399,7 +399,7 @@ void MetricsLog::RecordCoreSystemProfile(
+@@ -393,7 +393,7 @@ void MetricsLog::RecordCoreSystemProfile(
// OperatingSystemVersion refers to the ChromeOS release version.
#if BUILDFLAG(IS_CHROMEOS_ASH)
os->set_kernel_version(base::SysInfo::KernelVersion());
@@ -27,7 +27,7 @@
// Linux operating system version is copied over into kernel version to be
// consistent.
os->set_kernel_version(base::SysInfo::OperatingSystemVersion());
-@@ -416,7 +416,7 @@ void MetricsLog::RecordCoreSystemProfile(
+@@ -410,7 +410,7 @@ void MetricsLog::RecordCoreSystemProfile(
os->set_build_number(base::SysInfo::GetIOSBuildNumber());
#endif
diff --git a/www/ungoogled-chromium/files/patch-components_neterror_resources_neterror.js b/www/ungoogled-chromium/files/patch-components_neterror_resources_neterror.js
index a15572eb9d3f..88a5e7416cc4 100644
--- a/www/ungoogled-chromium/files/patch-components_neterror_resources_neterror.js
+++ b/www/ungoogled-chromium/files/patch-components_neterror_resources_neterror.js
@@ -1,6 +1,6 @@
---- components/neterror/resources/neterror.js.orig 2023-12-23 12:33:28 UTC
+--- components/neterror/resources/neterror.js.orig 2024-02-03 15:42:55 UTC
+++ components/neterror/resources/neterror.js
-@@ -141,7 +141,7 @@ function detailsButtonClick() {
+@@ -142,7 +142,7 @@ function detailsButtonClick() {
let primaryControlOnLeft = true;
// clang-format off
diff --git a/www/ungoogled-chromium/files/patch-components_network__session__configurator_browser_network__session__configurator.cc b/www/ungoogled-chromium/files/patch-components_network__session__configurator_browser_network__session__configurator.cc
index 5eff2e034085..3ea4c4224f9a 100644
--- a/www/ungoogled-chromium/files/patch-components_network__session__configurator_browser_network__session__configurator.cc
+++ b/www/ungoogled-chromium/files/patch-components_network__session__configurator_browser_network__session__configurator.cc
@@ -1,6 +1,6 @@
---- components/network_session_configurator/browser/network_session_configurator.cc.orig 2023-11-04 07:08:51 UTC
+--- components/network_session_configurator/browser/network_session_configurator.cc.orig 2024-02-03 15:42:55 UTC
+++ components/network_session_configurator/browser/network_session_configurator.cc
-@@ -801,7 +801,7 @@ net::URLRequestContextBuilder::HttpCacheParams::Type C
+@@ -813,7 +813,7 @@ net::URLRequestContextBuilder::HttpCacheParams::Type C
// backport, having it behave differently than in stable would be a bigger
// problem. TODO: Does this work in later macOS releases?
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
diff --git a/www/ungoogled-chromium/files/patch-components_network__session__configurator_browser_network__session__configurator__unittest.cc b/www/ungoogled-chromium/files/patch-components_network__session__configurator_browser_network__session__configurator__unittest.cc
index 750baf161d3f..ba5fcbc914f4 100644
--- a/www/ungoogled-chromium/files/patch-components_network__session__configurator_browser_network__session__configurator__unittest.cc
+++ b/www/ungoogled-chromium/files/patch-components_network__session__configurator_browser_network__session__configurator__unittest.cc
@@ -1,6 +1,6 @@
---- components/network_session_configurator/browser/network_session_configurator_unittest.cc.orig 2023-09-17 07:59:53 UTC
+--- components/network_session_configurator/browser/network_session_configurator_unittest.cc.orig 2024-02-03 15:42:55 UTC
+++ components/network_session_configurator/browser/network_session_configurator_unittest.cc
-@@ -775,7 +775,7 @@ TEST_F(NetworkSessionConfiguratorTest, HostRules) {
+@@ -788,7 +788,7 @@ TEST_F(NetworkSessionConfiguratorTest, HostRules) {
TEST_F(NetworkSessionConfiguratorTest, DefaultCacheBackend) {
#if BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
diff --git a/www/ungoogled-chromium/files/patch-components_omnibox_browser_omnibox__edit__model.cc b/www/ungoogled-chromium/files/patch-components_omnibox_browser_omnibox__edit__model.cc
index b583321cabf2..c09a09c17567 100644
--- a/www/ungoogled-chromium/files/patch-components_omnibox_browser_omnibox__edit__model.cc
+++ b/www/ungoogled-chromium/files/patch-components_omnibox_browser_omnibox__edit__model.cc
@@ -1,8 +1,8 @@
---- components/omnibox/browser/omnibox_edit_model.cc.orig 2023-06-05 19:39:05 UTC
+--- components/omnibox/browser/omnibox_edit_model.cc.orig 2024-02-03 15:42:55 UTC
+++ components/omnibox/browser/omnibox_edit_model.cc
-@@ -23,6 +23,7 @@
- #include "base/trace_event/trace_event.h"
+@@ -24,6 +24,7 @@
#include "base/trace_event/typed_macros.h"
+ #include "build/branding_buildflags.h"
#include "build/build_config.h"
+#include "build/branding_buildflags.h"
#include "components/bookmarks/browser/bookmark_model.h"
diff --git a/www/ungoogled-chromium/files/patch-components_optimization__guide_core_optimization__guide__util.cc b/www/ungoogled-chromium/files/patch-components_optimization__guide_core_optimization__guide__util.cc
index 77d43e6cdc4e..06b3bd133710 100644
--- a/www/ungoogled-chromium/files/patch-components_optimization__guide_core_optimization__guide__util.cc
+++ b/www/ungoogled-chromium/files/patch-components_optimization__guide_core_optimization__guide__util.cc
@@ -1,6 +1,6 @@
---- components/optimization_guide/core/optimization_guide_util.cc.orig 2023-12-23 12:33:28 UTC
+--- components/optimization_guide/core/optimization_guide_util.cc.orig 2024-02-03 15:42:55 UTC
+++ components/optimization_guide/core/optimization_guide_util.cc
-@@ -34,7 +34,7 @@ optimization_guide::proto::Platform GetPlatform() {
+@@ -39,7 +39,7 @@ optimization_guide::proto::Platform GetPlatform() {
return optimization_guide::proto::PLATFORM_CHROMEOS;
#elif BUILDFLAG(IS_ANDROID)
return optimization_guide::proto::PLATFORM_ANDROID;
diff --git a/www/ungoogled-chromium/files/patch-components_optimization__guide_core_tflite__model__executor.h b/www/ungoogled-chromium/files/patch-components_optimization__guide_core_tflite__model__executor.h
index 6570359db5dd..d7e2a32077ad 100644
--- a/www/ungoogled-chromium/files/patch-components_optimization__guide_core_tflite__model__executor.h
+++ b/www/ungoogled-chromium/files/patch-components_optimization__guide_core_tflite__model__executor.h
@@ -1,6 +1,6 @@
---- components/optimization_guide/core/tflite_model_executor.h.orig 2023-10-13 13:20:35 UTC
+--- components/optimization_guide/core/tflite_model_executor.h.orig 2024-02-03 15:42:55 UTC
+++ components/optimization_guide/core/tflite_model_executor.h
-@@ -240,7 +240,11 @@ class TFLiteModelExecutor : public ModelExecutor<Outpu
+@@ -241,7 +241,11 @@ class TFLiteModelExecutor : public ModelExecutor<Outpu
void SendForBatchExecution(
BatchExecutionCallback callback_on_complete,
base::TimeTicks start_time,
@@ -12,7 +12,7 @@
override {
DCHECK(execution_task_runner_->RunsTasksInCurrentSequence());
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
-@@ -262,7 +266,11 @@ class TFLiteModelExecutor : public ModelExecutor<Outpu
+@@ -263,7 +267,11 @@ class TFLiteModelExecutor : public ModelExecutor<Outpu
// Starts the synchronous execution of the model. Returns model outputs.
// Model needs to be loaded. Synchronous calls do not load or unload model.
std::vector<absl::optional<OutputType>> SendForBatchExecutionSync(
@@ -24,7 +24,7 @@
override {
DCHECK(execution_task_runner_->RunsTasksInCurrentSequence());
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
-@@ -417,7 +425,11 @@ class TFLiteModelExecutor : public ModelExecutor<Outpu
+@@ -421,7 +429,11 @@ class TFLiteModelExecutor : public ModelExecutor<Outpu
// executes it on the model execution thread.
void LoadModelFileAndBatchExecute(
BatchExecutionCallback callback_on_complete,
@@ -36,7 +36,7 @@
DCHECK(execution_task_runner_->RunsTasksInCurrentSequence());
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
-@@ -434,7 +446,11 @@ class TFLiteModelExecutor : public ModelExecutor<Outpu
+@@ -438,7 +450,11 @@ class TFLiteModelExecutor : public ModelExecutor<Outpu
// Batch executes the loaded model for inputs.
void BatchExecuteLoadedModel(
@@ -48,7 +48,7 @@
std::vector<absl::optional<OutputType>>* outputs) {
DCHECK(execution_task_runner_->RunsTasksInCurrentSequence());
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
-@@ -494,7 +510,11 @@ class TFLiteModelExecutor : public ModelExecutor<Outpu
+@@ -498,7 +514,11 @@ class TFLiteModelExecutor : public ModelExecutor<Outpu
// Unloads the model if needed.
void BatchExecuteLoadedModelAndRunCallback(
BatchExecutionCallback callback_on_complete,
diff --git a/www/ungoogled-chromium/files/patch-components_paint__preview_browser_paint__preview__client.cc b/www/ungoogled-chromium/files/patch-components_paint__preview_browser_paint__preview__client.cc
index 3f1ab3625f94..e6d1708b0d60 100644
--- a/www/ungoogled-chromium/files/patch-components_paint__preview_browser_paint__preview__client.cc
+++ b/www/ungoogled-chromium/files/patch-components_paint__preview_browser_paint__preview__client.cc
@@ -1,13 +1,13 @@
---- components/paint_preview/browser/paint_preview_client.cc.orig 2023-03-10 11:01:21 UTC
+--- components/paint_preview/browser/paint_preview_client.cc.orig 2024-02-03 15:42:55 UTC
+++ components/paint_preview/browser/paint_preview_client.cc
-@@ -310,8 +310,8 @@ void PaintPreviewClient::CapturePaintPreview(
- metadata->set_url(url.spec());
+@@ -313,8 +313,8 @@ void PaintPreviewClient::CapturePaintPreview(
metadata->set_version(kPaintPreviewVersion);
auto* chromeVersion = metadata->mutable_chrome_version();
-- chromeVersion->set_major(CHROME_VERSION_MAJOR);
-- chromeVersion->set_minor(CHROME_VERSION_MINOR);
-+ chromeVersion->set_gmajor(CHROME_VERSION_MAJOR);
-+ chromeVersion->set_gminor(CHROME_VERSION_MINOR);
- chromeVersion->set_build(CHROME_VERSION_BUILD);
- chromeVersion->set_patch(CHROME_VERSION_PATCH);
+ const auto& current_chrome_version = version_info::GetVersion();
+- chromeVersion->set_major(current_chrome_version.components()[0]);
+- chromeVersion->set_minor(current_chrome_version.components()[1]);
++ chromeVersion->set_gmajor(current_chrome_version.components()[0]);
++ chromeVersion->set_gminor(current_chrome_version.components()[1]);
+ chromeVersion->set_build(current_chrome_version.components()[2]);
+ chromeVersion->set_patch(current_chrome_version.components()[3]);
document_data.callback = std::move(callback);
diff --git a/www/ungoogled-chromium/files/patch-components_paint__preview_browser_paint__preview__client__unittest.cc b/www/ungoogled-chromium/files/patch-components_paint__preview_browser_paint__preview__client__unittest.cc
index 4a711056abdd..871e53829153 100644
--- a/www/ungoogled-chromium/files/patch-components_paint__preview_browser_paint__preview__client__unittest.cc
+++ b/www/ungoogled-chromium/files/patch-components_paint__preview_browser_paint__preview__client__unittest.cc
@@ -1,13 +1,13 @@
---- components/paint_preview/browser/paint_preview_client_unittest.cc.orig 2022-10-01 07:40:07 UTC
+--- components/paint_preview/browser/paint_preview_client_unittest.cc.orig 2024-02-03 15:42:55 UTC
+++ components/paint_preview/browser/paint_preview_client_unittest.cc
-@@ -171,8 +171,8 @@ TEST_P(PaintPreviewClientRenderViewHostTest, CaptureMa
- metadata->set_url(expected_url.spec());
+@@ -174,8 +174,8 @@ TEST_P(PaintPreviewClientRenderViewHostTest, CaptureMa
metadata->set_version(kPaintPreviewVersion);
auto* chromeVersion = metadata->mutable_chrome_version();
-- chromeVersion->set_major(CHROME_VERSION_MAJOR);
-- chromeVersion->set_minor(CHROME_VERSION_MINOR);
-+ chromeVersion->set_gmajor(CHROME_VERSION_MAJOR);
-+ chromeVersion->set_gminor(CHROME_VERSION_MINOR);
- chromeVersion->set_build(CHROME_VERSION_BUILD);
- chromeVersion->set_patch(CHROME_VERSION_PATCH);
+ const auto& current_chrome_version = version_info::GetVersion();
+- chromeVersion->set_major(current_chrome_version.components()[0]);
+- chromeVersion->set_minor(current_chrome_version.components()[1]);
++ chromeVersion->set_gmajor(current_chrome_version.components()[0]);
++ chromeVersion->set_gminor(current_chrome_version.components()[1]);
+ chromeVersion->set_build(current_chrome_version.components()[2]);
+ chromeVersion->set_patch(current_chrome_version.components()[3]);
PaintPreviewFrameProto* main_frame = expected_proto.mutable_root_frame();
diff --git a/www/ungoogled-chromium/files/patch-components_paint__preview_player_player__compositor__delegate.cc b/www/ungoogled-chromium/files/patch-components_paint__preview_player_player__compositor__delegate.cc
index 6d6e7184185a..3d2e83f72365 100644
--- a/www/ungoogled-chromium/files/patch-components_paint__preview_player_player__compositor__delegate.cc
+++ b/www/ungoogled-chromium/files/patch-components_paint__preview_player_player__compositor__delegate.cc
@@ -1,13 +1,13 @@
---- components/paint_preview/player/player_compositor_delegate.cc.orig 2023-03-10 11:01:21 UTC
+--- components/paint_preview/player/player_compositor_delegate.cc.orig 2024-02-03 15:42:55 UTC
+++ components/paint_preview/player/player_compositor_delegate.cc
-@@ -452,8 +452,8 @@ void PlayerCompositorDelegate::ValidateProtoAndLoadAXT
- // use the AXTreeUpdate.
+@@ -455,8 +455,8 @@ void PlayerCompositorDelegate::ValidateProtoAndLoadAXT
auto chrome_version = capture_result_->proto.metadata().chrome_version();
+ const auto& current_chrome_version = version_info::GetVersion();
if (capture_result_->proto.metadata().has_chrome_version() &&
-- chrome_version.major() == CHROME_VERSION_MAJOR &&
-- chrome_version.minor() == CHROME_VERSION_MINOR &&
-+ chrome_version.gmajor() == CHROME_VERSION_MAJOR &&
-+ chrome_version.gminor() == CHROME_VERSION_MINOR &&
- chrome_version.build() == CHROME_VERSION_BUILD &&
- chrome_version.patch() == CHROME_VERSION_PATCH) {
+- chrome_version.major() == current_chrome_version.components()[0] &&
+- chrome_version.minor() == current_chrome_version.components()[1] &&
++ chrome_version.gmajor() == current_chrome_version.components()[0] &&
++ chrome_version.gminor() == current_chrome_version.components()[1] &&
+ chrome_version.build() == current_chrome_version.components()[2] &&
+ chrome_version.patch() == current_chrome_version.components()[3]) {
paint_preview_service_->GetFileMixin()->GetAXTreeUpdate(
diff --git a/www/ungoogled-chromium/files/patch-components_password__manager_core_browser_features_password__features.cc b/www/ungoogled-chromium/files/patch-components_password__manager_core_browser_features_password__features.cc
index 3ae8b65fbabd..1782f6251da1 100644
--- a/www/ungoogled-chromium/files/patch-components_password__manager_core_browser_features_password__features.cc
+++ b/www/ungoogled-chromium/files/patch-components_password__manager_core_browser_features_password__features.cc
@@ -1,15 +1,6 @@
---- components/password_manager/core/browser/features/password_features.cc.orig 2023-12-23 12:33:28 UTC
+--- components/password_manager/core/browser/features/password_features.cc.orig 2024-02-03 15:42:55 UTC
+++ components/password_manager/core/browser/features/password_features.cc
-@@ -8,7 +8,7 @@
-
- namespace password_manager::features {
-
--#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
-+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_BSD)
- // Enables attaching password manager and autofill internals logs to an Autofill
- // Rater Extension Report.
- BASE_FEATURE(kAttachLogsToAutofillRaterExtensionReport,
-@@ -30,7 +30,7 @@ BASE_FEATURE(kBiometricTouchToFill,
+@@ -22,7 +22,7 @@ BASE_FEATURE(kBiometricTouchToFill,
// Delete undecryptable passwords from the store when Sync is active.
BASE_FEATURE(kClearUndecryptablePasswordsOnSync,
"ClearUndecryptablePasswordsInSync",
diff --git a/www/ungoogled-chromium/files/patch-components_password__manager_core_browser_features_password__features.h b/www/ungoogled-chromium/files/patch-components_password__manager_core_browser_features_password__features.h
deleted file mode 100644
index 7ab44347a8cd..000000000000
--- a/www/ungoogled-chromium/files/patch-components_password__manager_core_browser_features_password__features.h
+++ /dev/null
@@ -1,11 +0,0 @@
---- components/password_manager/core/browser/features/password_features.h.orig 2023-12-23 12:33:28 UTC
-+++ components/password_manager/core/browser/features/password_features.h
-@@ -15,7 +15,7 @@ namespace password_manager::features {
- // All features in alphabetical order. The features should be documented
- // alongside the definition of their values in the .cc file.
-
--#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN)
-+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_BSD)
- BASE_DECLARE_FEATURE(kAttachLogsToAutofillRaterExtensionReport);
- #endif
-
diff --git a/www/ungoogled-chromium/files/patch-components_password__manager_core_browser_login__database__unittest.cc b/www/ungoogled-chromium/files/patch-components_password__manager_core_browser_password__store_login__database__unittest.cc
index 82b928d178d8..a0eadaaf32c6 100644
--- a/www/ungoogled-chromium/files/patch-components_password__manager_core_browser_login__database__unittest.cc
+++ b/www/ungoogled-chromium/files/patch-components_password__manager_core_browser_password__store_login__database__unittest.cc
@@ -1,6 +1,6 @@
---- components/password_manager/core/browser/login_database_unittest.cc.orig 2023-12-23 12:33:28 UTC
-+++ components/password_manager/core/browser/login_database_unittest.cc
-@@ -2102,7 +2102,7 @@ INSTANTIATE_TEST_SUITE_P(MigrationToVCurrent,
+--- components/password_manager/core/browser/password_store/login_database_unittest.cc.orig 2024-02-03 15:42:55 UTC
++++ components/password_manager/core/browser/password_store/login_database_unittest.cc
+@@ -2106,7 +2106,7 @@ INSTANTIATE_TEST_SUITE_P(MigrationToVCurrent,
LoginDatabaseMigrationTestBroken,
testing::Values(1, 2, 3, 24));
diff --git a/www/ungoogled-chromium/files/patch-components_policy_core_browser_policy__pref__mapping__test.cc b/www/ungoogled-chromium/files/patch-components_policy_core_browser_policy__pref__mapping__test.cc
index 2a01a193027f..aafb8351d68c 100644
--- a/www/ungoogled-chromium/files/patch-components_policy_core_browser_policy__pref__mapping__test.cc
+++ b/www/ungoogled-chromium/files/patch-components_policy_core_browser_policy__pref__mapping__test.cc
@@ -1,6 +1,6 @@
---- components/policy/core/browser/policy_pref_mapping_test.cc.orig 2023-05-05 12:12:41 UTC
+--- components/policy/core/browser/policy_pref_mapping_test.cc.orig 2024-02-03 15:42:55 UTC
+++ components/policy/core/browser/policy_pref_mapping_test.cc
-@@ -327,7 +327,7 @@ class PolicyTestCase {
+@@ -318,7 +318,7 @@ class PolicyTestCase {
const std::string os("chromeos_lacros");
#elif BUILDFLAG(IS_IOS)
const std::string os("ios");
diff --git a/www/ungoogled-chromium/files/patch-components_policy_core_common_cloud_cloud__policy__client.cc b/www/ungoogled-chromium/files/patch-components_policy_core_common_cloud_cloud__policy__client.cc
index f4519d4f45e6..66ac3ce5bc26 100644
--- a/www/ungoogled-chromium/files/patch-components_policy_core_common_cloud_cloud__policy__client.cc
+++ b/www/ungoogled-chromium/files/patch-components_policy_core_common_cloud_cloud__policy__client.cc
@@ -1,6 +1,6 @@
---- components/policy/core/common/cloud/cloud_policy_client.cc.orig 2023-12-23 12:33:28 UTC
+--- components/policy/core/common/cloud/cloud_policy_client.cc.orig 2024-02-03 15:42:55 UTC
+++ components/policy/core/common/cloud/cloud_policy_client.cc
-@@ -508,7 +508,7 @@ void CloudPolicyClient::FetchPolicy(PolicyFetchReason
+@@ -516,7 +516,7 @@ void CloudPolicyClient::FetchPolicy(PolicyFetchReason
fetch_request->set_invalidation_payload(invalidation_payload_);
}
}
diff --git a/www/ungoogled-chromium/files/patch-components_policy_core_common_policy__loader__common.cc b/www/ungoogled-chromium/files/patch-components_policy_core_common_policy__loader__common.cc
index 35580deac1fe..580f31e777c3 100644
--- a/www/ungoogled-chromium/files/patch-components_policy_core_common_policy__loader__common.cc
+++ b/www/ungoogled-chromium/files/patch-components_policy_core_common_policy__loader__common.cc
@@ -1,4 +1,4 @@
---- components/policy/core/common/policy_loader_common.cc.orig 2023-09-17 07:59:53 UTC
+--- components/policy/core/common/policy_loader_common.cc.orig 2024-02-03 15:42:55 UTC
+++ components/policy/core/common/policy_loader_common.cc
@@ -46,7 +46,7 @@ const char* kSensitivePolicies[] = {
key::kDefaultSearchProviderEnabled,
@@ -7,11 +7,11 @@
- BUILDFLAG(IS_CHROMEOS)
+ BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
key::kAutoOpenFileTypes,
- key::kSafeBrowsingAllowlistDomains,
key::kHomepageIsNewTabPage,
-@@ -55,7 +55,7 @@ const char* kSensitivePolicies[] = {
- key::kRestoreOnStartup,
- key::kRestoreOnStartupURLs,
+ key::kPasswordProtectionChangePasswordURL,
+@@ -56,7 +56,7 @@ const char* kSensitivePolicies[] = {
+ key::kSafeBrowsingAllowlistDomains,
+ key::kSiteSearchSettings,
#endif
-#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX)
+#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
diff --git a/www/ungoogled-chromium/files/patch-components_policy_core_common_policy__paths.cc b/www/ungoogled-chromium/files/patch-components_policy_core_common_policy__paths.cc
index e191efba1473..48cc79a820c4 100644
--- a/www/ungoogled-chromium/files/patch-components_policy_core_common_policy__paths.cc
+++ b/www/ungoogled-chromium/files/patch-components_policy_core_common_policy__paths.cc
@@ -1,11 +1,11 @@
---- components/policy/core/common/policy_paths.cc.orig 2023-10-13 13:20:35 UTC
+--- components/policy/core/common/policy_paths.cc.orig 2024-02-03 15:42:55 UTC
+++ components/policy/core/common/policy_paths.cc
-@@ -16,6 +16,8 @@ namespace policy {
- #if BUILDFLAG(GOOGLE_CHROME_BRANDING) || \
- BUILDFLAG(GOOGLE_CHROME_FOR_TESTING_BRANDING)
+@@ -17,6 +17,8 @@ namespace policy {
const char kPolicyPath[] = "/etc/opt/chrome/policies";
+ #elif BUILDFLAG(GOOGLE_CHROME_FOR_TESTING_BRANDING)
+ const char kPolicyPath[] = "/etc/opt/chrome_for_testing/policies";
+#elif BUILDFLAG(IS_FREEBSD)
+const char kPolicyPath[] = "/usr/local/etc/chromium/policies";
#else
const char kPolicyPath[] = "/etc/chromium/policies";
- #endif // BUILDFLAG(GOOGLE_CHROME_BRANDING) || \
+ #endif // BUILDFLAG(GOOGLE_CHROME_BRANDING)
diff --git a/www/ungoogled-chromium/files/patch-components_policy_core_common_policy__utils.cc b/www/ungoogled-chromium/files/patch-components_policy_core_common_policy__utils.cc
new file mode 100644
index 000000000000..e74592a74610
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-components_policy_core_common_policy__utils.cc
@@ -0,0 +1,11 @@
+--- components/policy/core/common/policy_utils.cc.orig 2024-02-03 15:42:55 UTC
++++ components/policy/core/common/policy_utils.cc
+@@ -28,7 +28,7 @@ bool IsPolicyTestingEnabled(PrefService* pref_service,
+ return true;
+ }
+
+-#if BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ if (channel == version_info::Channel::DEV) {
+ return true;
+ }
diff --git a/www/ungoogled-chromium/files/patch-components_qr__code__generator_BUILD.gn b/www/ungoogled-chromium/files/patch-components_qr__code__generator_BUILD.gn
new file mode 100644
index 000000000000..d875fc1d0e7d
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-components_qr__code__generator_BUILD.gn
@@ -0,0 +1,32 @@
+--- components/qr_code_generator/BUILD.gn.orig 2024-02-03 15:42:55 UTC
++++ components/qr_code_generator/BUILD.gn
+@@ -39,20 +39,20 @@ source_set("qr_code_generator") {
+ ]
+ deps = [
+ ":qr_code_generator_features",
+- ":qr_code_generator_ffi_glue",
++# ":qr_code_generator_ffi_glue",
+ "//base",
+ ]
+ public_deps = [ "//base" ]
+ }
+
+-rust_static_library("qr_code_generator_ffi_glue") {
+- allow_unsafe = true # Needed for FFI that underpins the `cxx` crate.
+- crate_root = "qr_code_generator_ffi_glue.rs"
+- sources = [ "qr_code_generator_ffi_glue.rs" ]
+- cxx_bindings = [ "qr_code_generator_ffi_glue.rs" ]
+- visibility = [ ":qr_code_generator" ]
+- deps = [ "//third_party/rust/qr_code/v2:lib" ]
+-}
++#rust_static_library("qr_code_generator_ffi_glue") {
++# allow_unsafe = true # Needed for FFI that underpins the `cxx` crate.
++# crate_root = "qr_code_generator_ffi_glue.rs"
++# sources = [ "qr_code_generator_ffi_glue.rs" ]
++# cxx_bindings = [ "qr_code_generator_ffi_glue.rs" ]
++# visibility = [ ":qr_code_generator" ]
++# deps = [ "//third_party/rust/qr_code/v2:lib" ]
++#}
+
+ source_set("unit_tests") {
+ testonly = true
diff --git a/www/ungoogled-chromium/files/patch-components_qr__code__generator_features.cc b/www/ungoogled-chromium/files/patch-components_qr__code__generator_features.cc
new file mode 100644
index 000000000000..3ef382e44cd7
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-components_qr__code__generator_features.cc
@@ -0,0 +1,11 @@
+--- components/qr_code_generator/features.cc.orig 2024-02-03 15:42:55 UTC
++++ components/qr_code_generator/features.cc
+@@ -12,7 +12,7 @@ namespace qr_code_generator {
+ BASE_FEATURE(kRustyQrCodeGeneratorFeature,
+ "RustyQrCodeGenerator",
+ #if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
+- BUILDFLAG(IS_ANDROID)
++ BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_BSD)
+ base::FEATURE_ENABLED_BY_DEFAULT);
+ #else
+ base::FEATURE_DISABLED_BY_DEFAULT);
diff --git a/www/ungoogled-chromium/files/patch-components_qr__code__generator_features.h b/www/ungoogled-chromium/files/patch-components_qr__code__generator_features.h
new file mode 100644
index 000000000000..13207fa9e9ef
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-components_qr__code__generator_features.h
@@ -0,0 +1,14 @@
+--- components/qr_code_generator/features.h.orig 2024-02-03 15:42:55 UTC
++++ components/qr_code_generator/features.h
+@@ -26,7 +26,11 @@ BASE_DECLARE_FEATURE(kRustyQrCodeGeneratorFeature);
+ // See https://crbug.com/1431991 for more details about the feature and the
+ // Rust QR Code Generator project.
+ inline bool IsRustyQrCodeGeneratorFeatureEnabled() {
++#ifdef notyet
+ return base::FeatureList::IsEnabled(kRustyQrCodeGeneratorFeature);
++#else
++ return false;
++#endif
+ }
+
+ } // namespace qr_code_generator
diff --git a/www/ungoogled-chromium/files/patch-components_qr__code__generator_qr__code__generator.cc b/www/ungoogled-chromium/files/patch-components_qr__code__generator_qr__code__generator.cc
new file mode 100644
index 000000000000..3f40ca70edf6
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-components_qr__code__generator_qr__code__generator.cc
@@ -0,0 +1,47 @@
+--- components/qr_code_generator/qr_code_generator.cc.orig 2024-02-03 15:42:55 UTC
++++ components/qr_code_generator/qr_code_generator.cc
+@@ -11,12 +11,16 @@
+ #include <vector>
+
+ #include "base/check_op.h"
++#ifdef notyet
+ #include "base/containers/span_rust.h"
++#endif
+ #include "base/memory/raw_ptr.h"
+ #include "base/notreached.h"
+ #include "base/numerics/safe_conversions.h"
+ #include "components/qr_code_generator/features.h"
++#ifdef notyet
+ #include "components/qr_code_generator/qr_code_generator_ffi_glue.rs.h"
++#endif
+
+ namespace qr_code_generator {
+
+@@ -572,6 +576,7 @@ size_t SegmentSpanLength(base::span<const QRCodeGenera
+ return sum;
+ }
+
++#ifdef notyet
+ absl::optional<QRCodeGenerator::GeneratedCode> GenerateQrCodeUsingRust(
+ base::span<const uint8_t> in,
+ absl::optional<int> min_version) {
+@@ -596,6 +601,7 @@ absl::optional<QRCodeGenerator::GeneratedCode> Generat
+ CHECK_EQ(code.data.size(), static_cast<size_t>(code.qr_size * code.qr_size));
+ return code;
+ }
++#endif
+
+ } // namespace
+
+@@ -617,9 +623,11 @@ absl::optional<QRCodeGenerator::GeneratedCode> QRCodeG
+ return absl::nullopt;
+ }
+
++#ifdef notyet
+ if (IsRustyQrCodeGeneratorFeatureEnabled()) {
+ return GenerateQrCodeUsingRust(in, min_version);
+ }
++#endif
+
+ std::vector<Segment> segments;
+ const QRVersionInfo* version_info = nullptr;
diff --git a/www/ungoogled-chromium/files/patch-components_search__engines_template__url__service.cc b/www/ungoogled-chromium/files/patch-components_search__engines_template__url__service.cc
new file mode 100644
index 000000000000..6a4f73686db9
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-components_search__engines_template__url__service.cc
@@ -0,0 +1,11 @@
+--- components/search_engines/template_url_service.cc.orig 2024-02-03 15:42:55 UTC
++++ components/search_engines/template_url_service.cc
+@@ -2588,7 +2588,7 @@ bool TemplateURLService::MatchesDefaultSearchProvider(
+ std::unique_ptr<EnterpriseSiteSearchManager>
+ TemplateURLService::GetEnterpriseSiteSearchManager(PrefService* prefs) {
+ #if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
+- BUILDFLAG(IS_CHROMEOS_ASH)
++ BUILDFLAG(IS_CHROMEOS_ASH) || BUILDFLAG(IS_BSD)
+ return base::FeatureList::IsEnabled(omnibox::kSiteSearchSettingsPolicy)
+ ? std::make_unique<EnterpriseSiteSearchManager>(
+ prefs, base::BindRepeating(
diff --git a/www/ungoogled-chromium/files/patch-components_services_paint__preview__compositor_paint__preview__compositor__collection__impl.cc b/www/ungoogled-chromium/files/patch-components_services_paint__preview__compositor_paint__preview__compositor__collection__impl.cc
index cad028161414..65dd7bed4046 100644
--- a/www/ungoogled-chromium/files/patch-components_services_paint__preview__compositor_paint__preview__compositor__collection__impl.cc
+++ b/www/ungoogled-chromium/files/patch-components_services_paint__preview__compositor_paint__preview__compositor__collection__impl.cc
@@ -1,6 +1,6 @@
---- components/services/paint_preview_compositor/paint_preview_compositor_collection_impl.cc.orig 2023-09-17 07:59:53 UTC
+--- components/services/paint_preview_compositor/paint_preview_compositor_collection_impl.cc.orig 2024-02-03 15:42:55 UTC
+++ components/services/paint_preview_compositor/paint_preview_compositor_collection_impl.cc
-@@ -21,7 +21,7 @@
+@@ -22,7 +22,7 @@
#if BUILDFLAG(IS_WIN)
#include "content/public/child/dwrite_font_proxy_init_win.h"
@@ -9,7 +9,7 @@
#include "components/services/font/public/cpp/font_loader.h"
#endif
-@@ -78,7 +78,7 @@ PaintPreviewCompositorCollectionImpl::PaintPreviewComp
+@@ -79,7 +79,7 @@ PaintPreviewCompositorCollectionImpl::PaintPreviewComp
// Initialize font access for Skia.
#if BUILDFLAG(IS_WIN)
content::InitializeDWriteFontProxy();
@@ -18,8 +18,8 @@
mojo::PendingRemote<font_service::mojom::FontService> font_service;
content::UtilityThread::Get()->BindHostReceiver(
font_service.InitWithNewPipeAndPassReceiver());
-@@ -101,7 +101,7 @@ PaintPreviewCompositorCollectionImpl::PaintPreviewComp
- base::BindOnce([] { SkFontMgr::RefDefault(); }));
+@@ -102,7 +102,7 @@ PaintPreviewCompositorCollectionImpl::PaintPreviewComp
+ base::BindOnce([] { skia::DefaultFontMgr(); }));
// Sanity check that fonts are working.
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
diff --git a/www/ungoogled-chromium/files/patch-components_services_screen__ai_sandbox_screen__ai__sandbox__hook__linux.cc b/www/ungoogled-chromium/files/patch-components_services_screen__ai_sandbox_screen__ai__sandbox__hook__linux.cc
index d84000473787..9c50afad25ad 100644
--- a/www/ungoogled-chromium/files/patch-components_services_screen__ai_sandbox_screen__ai__sandbox__hook__linux.cc
+++ b/www/ungoogled-chromium/files/patch-components_services_screen__ai_sandbox_screen__ai__sandbox__hook__linux.cc
@@ -1,4 +1,4 @@
---- components/services/screen_ai/sandbox/screen_ai_sandbox_hook_linux.cc.orig 2023-09-17 07:59:53 UTC
+--- components/services/screen_ai/sandbox/screen_ai_sandbox_hook_linux.cc.orig 2024-02-03 15:42:55 UTC
+++ components/services/screen_ai/sandbox/screen_ai_sandbox_hook_linux.cc
@@ -53,6 +53,7 @@ bool ScreenAIPreSandboxHook(sandbox::policy::SandboxLi
}
@@ -8,7 +8,7 @@
auto* instance = sandbox::policy::SandboxLinux::GetInstance();
std::vector<BrokerFilePermission> permissions{
-@@ -82,6 +83,7 @@ bool ScreenAIPreSandboxHook(sandbox::policy::SandboxLi
+@@ -75,6 +76,7 @@ bool ScreenAIPreSandboxHook(sandbox::policy::SandboxLi
sandbox::syscall_broker::COMMAND_OPEN}),
permissions, sandbox::policy::SandboxLinux::PreSandboxHook(), options);
instance->EngageNamespaceSandboxIfPossible();
diff --git a/www/ungoogled-chromium/files/patch-components_services_screen__ai_screen__ai__library__wrapper.cc b/www/ungoogled-chromium/files/patch-components_services_screen__ai_screen__ai__library__wrapper.cc
deleted file mode 100644
index b7324ba3188a..000000000000
--- a/www/ungoogled-chromium/files/patch-components_services_screen__ai_screen__ai__library__wrapper.cc
+++ /dev/null
@@ -1,29 +0,0 @@
---- components/services/screen_ai/screen_ai_library_wrapper.cc.orig 2023-11-04 07:08:51 UTC
-+++ components/services/screen_ai/screen_ai_library_wrapper.cc
-@@ -190,7 +190,7 @@ ScreenAILibraryWrapper::PerformOcr(const SkBitmap& ima
-
- // TODO(crbug.com/1443341): Remove this after fixing the crash issue on Linux
- // official.
--#if BUILDFLAG(IS_LINUX)
-+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
- free_library_allocated_char_array_(library_buffer.release());
- #endif
-
-@@ -222,7 +222,7 @@ ScreenAILibraryWrapper::ExtractLayout(const SkBitmap&
-
- // TODO(crbug.com/1443341): Remove this after fixing the crash issue on Linux
- // official.
--#if BUILDFLAG(IS_LINUX)
-+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
- free_library_allocated_char_array_(library_buffer.release());
- #endif
-
-@@ -256,7 +256,7 @@ absl::optional<std::vector<int32_t>> ScreenAILibraryWr
-
- // TODO(crbug.com/1443341): Remove this after fixing the crash issue on Linux
- // official.
--#if BUILDFLAG(IS_LINUX)
-+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
- free_library_allocated_int32_array_(library_buffer.release());
- #endif
-
diff --git a/www/ungoogled-chromium/files/patch-components_supervised__user_core_browser_proto__fetcher.cc b/www/ungoogled-chromium/files/patch-components_supervised__user_core_browser_proto__fetcher.cc
index 515e561211d0..9e4aca87feac 100644
--- a/www/ungoogled-chromium/files/patch-components_supervised__user_core_browser_proto__fetcher.cc
+++ b/www/ungoogled-chromium/files/patch-components_supervised__user_core_browser_proto__fetcher.cc
@@ -1,6 +1,6 @@
---- components/supervised_user/core/browser/proto_fetcher.cc.orig 2023-11-04 07:08:51 UTC
+--- components/supervised_user/core/browser/proto_fetcher.cc.orig 2024-02-03 15:42:55 UTC
+++ components/supervised_user/core/browser/proto_fetcher.cc
-@@ -525,8 +525,11 @@ class RetryingFetcherImpl final : public DeferredFetch
+@@ -527,8 +527,11 @@ class RetryingFetcherImpl final : public DeferredFetch
// Not copyable.
RetryingFetcherImpl(const RetryingFetcherImpl&) = delete;
RetryingFetcherImpl& operator=(const RetryingFetcherImpl&) = delete;
@@ -13,7 +13,7 @@
callback_ = std::move(callback);
Retry();
}
-@@ -564,7 +567,11 @@ class RetryingFetcherImpl final : public DeferredFetch
+@@ -566,7 +569,11 @@ class RetryingFetcherImpl final : public DeferredFetch
}
// Client callback.
@@ -25,7 +25,7 @@
// Retry controls.
base::OneShotTimer timer_;
-@@ -707,7 +714,11 @@ ParallelFetchManager<Request, Response>::ParallelFetch
+@@ -709,7 +716,11 @@ ParallelFetchManager<Request, Response>::ParallelFetch
template <typename Request, typename Response>
void ParallelFetchManager<Request, Response>::Fetch(
const Request& request,
diff --git a/www/ungoogled-chromium/files/patch-components_supervised__user_core_browser_supervised__user__service.cc b/www/ungoogled-chromium/files/patch-components_supervised__user_core_browser_supervised__user__service.cc
index 4b7b22ad7ea2..3d1ef98d5d60 100644
--- a/www/ungoogled-chromium/files/patch-components_supervised__user_core_browser_supervised__user__service.cc
+++ b/www/ungoogled-chromium/files/patch-components_supervised__user_core_browser_supervised__user__service.cc
@@ -1,6 +1,6 @@
---- components/supervised_user/core/browser/supervised_user_service.cc.orig 2023-12-23 12:33:28 UTC
+--- components/supervised_user/core/browser/supervised_user_service.cc.orig 2024-02-03 15:42:55 UTC
+++ components/supervised_user/core/browser/supervised_user_service.cc
-@@ -212,7 +212,7 @@ FirstTimeInterstitialBannerState SupervisedUserService
+@@ -162,7 +162,7 @@ FirstTimeInterstitialBannerState SupervisedUserService
const FirstTimeInterstitialBannerState original_state) {
FirstTimeInterstitialBannerState target_state = original_state;
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
diff --git a/www/ungoogled-chromium/files/patch-components_supervised__user_core_common_features.cc b/www/ungoogled-chromium/files/patch-components_supervised__user_core_common_features.cc
index 7297e0f0d14e..fc76e3d4d6ba 100644
--- a/www/ungoogled-chromium/files/patch-components_supervised__user_core_common_features.cc
+++ b/www/ungoogled-chromium/files/patch-components_supervised__user_core_common_features.cc
@@ -1,6 +1,6 @@
---- components/supervised_user/core/common/features.cc.orig 2023-12-23 12:33:28 UTC
+--- components/supervised_user/core/common/features.cc.orig 2024-02-03 15:42:55 UTC
+++ components/supervised_user/core/common/features.cc
-@@ -91,7 +91,7 @@ BASE_FEATURE(kEnableManagedByParentUi,
+@@ -85,7 +85,7 @@ BASE_FEATURE(kEnableManagedByParentUi,
"EnableManagedByParentUi",
base::FEATURE_DISABLED_BY_DEFAULT);
@@ -9,7 +9,7 @@
BASE_FEATURE(kEnableExtensionsPermissionsForSupervisedUsersOnDesktop,
"EnableExtensionsPermissionsForSupervisedUsersOnDesktop",
base::FEATURE_DISABLED_BY_DEFAULT);
-@@ -149,7 +149,7 @@ bool IsChildAccountSupervisionEnabled() {
+@@ -143,7 +143,7 @@ bool IsChildAccountSupervisionEnabled() {
return base::FeatureList::IsEnabled(
supervised_user::
kFilterWebsitesForSupervisedUsersOnDesktopAndIOS) ||
diff --git a/www/ungoogled-chromium/files/patch-components_sync_base_features.cc b/www/ungoogled-chromium/files/patch-components_sync_base_features.cc
index d9b1b79eda88..fc5b7d003f05 100644
--- a/www/ungoogled-chromium/files/patch-components_sync_base_features.cc
+++ b/www/ungoogled-chromium/files/patch-components_sync_base_features.cc
@@ -1,8 +1,8 @@
---- components/sync/base/features.cc.orig 2023-12-23 12:33:28 UTC
+--- components/sync/base/features.cc.orig 2024-02-03 15:42:55 UTC
+++ components/sync/base/features.cc
@@ -101,7 +101,7 @@ BASE_FEATURE(kEnablePreferencesAccountStorage,
BASE_FEATURE(kSyncPollImmediatelyOnEveryStartup,
- "SyncPollImmediatelyOnEveryStartup",
+ "SyncPollImmediatelyOnEveryStartup2",
#if BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || \
- BUILDFLAG(IS_WIN)
+ BUILDFLAG(IS_WIN) || BUILDFLAG(IS_BSD)
diff --git a/www/ungoogled-chromium/files/patch-components_user__education_common_product__messaging__controller.cc b/www/ungoogled-chromium/files/patch-components_user__education_common_product__messaging__controller.cc
deleted file mode 100644
index 7ae8348dc317..000000000000
--- a/www/ungoogled-chromium/files/patch-components_user__education_common_product__messaging__controller.cc
+++ /dev/null
@@ -1,10 +0,0 @@
---- components/user_education/common/product_messaging_controller.cc.orig 2023-11-04 07:08:51 UTC
-+++ components/user_education/common/product_messaging_controller.cc
-@@ -6,6 +6,7 @@
-
- #include <sstream>
- #include <utility>
-+#include <vector>
-
- #include "base/containers/contains.h"
- #include "base/functional/callback.h"
diff --git a/www/ungoogled-chromium/files/patch-components_user__education_views_help__bubble__view.cc b/www/ungoogled-chromium/files/patch-components_user__education_views_help__bubble__view.cc
index f38a157a54bc..324ce2898837 100644
--- a/www/ungoogled-chromium/files/patch-components_user__education_views_help__bubble__view.cc
+++ b/www/ungoogled-chromium/files/patch-components_user__education_views_help__bubble__view.cc
@@ -1,6 +1,6 @@
---- components/user_education/views/help_bubble_view.cc.orig 2023-12-23 12:33:28 UTC
+--- components/user_education/views/help_bubble_view.cc.orig 2024-02-03 15:42:55 UTC
+++ components/user_education/views/help_bubble_view.cc
-@@ -1003,7 +1003,7 @@ gfx::Rect HelpBubbleView::GetAnchorRect() const {
+@@ -1009,7 +1009,7 @@ gfx::Rect HelpBubbleView::GetAnchorRect() const {
void HelpBubbleView::OnBeforeBubbleWidgetInit(views::Widget::InitParams* params,
views::Widget* widget) const {
BubbleDialogDelegateView::OnBeforeBubbleWidgetInit(params, widget);
diff --git a/www/ungoogled-chromium/files/patch-components_viz_host_host__display__client.cc b/www/ungoogled-chromium/files/patch-components_viz_host_host__display__client.cc
new file mode 100644
index 000000000000..5114daf0ae4a
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-components_viz_host_host__display__client.cc
@@ -0,0 +1,11 @@
+--- components/viz/host/host_display_client.cc.orig 2024-02-03 15:42:55 UTC
++++ components/viz/host/host_display_client.cc
+@@ -64,7 +64,7 @@ void HostDisplayClient::AddChildWindowToBrowser(
+ }
+ #endif
+
+-#if BUILDFLAG(IS_LINUX) && BUILDFLAG(IS_OZONE_X11)
++#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)) && BUILDFLAG(IS_OZONE_X11)
+ void HostDisplayClient::DidCompleteSwapWithNewSize(const gfx::Size& size) {
+ NOTIMPLEMENTED();
+ }
diff --git a/www/ungoogled-chromium/files/patch-components_viz_host_host__display__client.h b/www/ungoogled-chromium/files/patch-components_viz_host_host__display__client.h
new file mode 100644
index 000000000000..052bd733f3d5
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-components_viz_host_host__display__client.h
@@ -0,0 +1,11 @@
+--- components/viz/host/host_display_client.h.orig 2024-02-03 15:42:55 UTC
++++ components/viz/host/host_display_client.h
+@@ -52,7 +52,7 @@ class VIZ_HOST_EXPORT HostDisplayClient : public mojom
+ void AddChildWindowToBrowser(gpu::SurfaceHandle child_window) override;
+ #endif
+
+-#if BUILDFLAG(IS_LINUX) && BUILDFLAG(IS_OZONE_X11)
++#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)) && BUILDFLAG(IS_OZONE_X11)
+ void DidCompleteSwapWithNewSize(const gfx::Size& size) override;
+ #endif // BUILDFLAG(IS_LINUX) && BUILDFLAG(IS_OZONE_X11)
+
diff --git a/www/ungoogled-chromium/files/patch-components_viz_service_display__embedder_skia__output__surface__impl.cc b/www/ungoogled-chromium/files/patch-components_viz_service_display__embedder_skia__output__surface__impl.cc
index 8472e0a48b64..407292cd2e97 100644
--- a/www/ungoogled-chromium/files/patch-components_viz_service_display__embedder_skia__output__surface__impl.cc
+++ b/www/ungoogled-chromium/files/patch-components_viz_service_display__embedder_skia__output__surface__impl.cc
@@ -1,6 +1,6 @@
---- components/viz/service/display_embedder/skia_output_surface_impl.cc.orig 2023-11-04 07:08:51 UTC
+--- components/viz/service/display_embedder/skia_output_surface_impl.cc.orig 2024-02-03 15:42:55 UTC
+++ components/viz/service/display_embedder/skia_output_surface_impl.cc
-@@ -1512,7 +1512,7 @@ GrBackendFormat SkiaOutputSurfaceImpl::GetGrBackendFor
+@@ -1528,7 +1528,7 @@ GrBackendFormat SkiaOutputSurfaceImpl::GetGrBackendFor
->GetDeviceQueue()
->GetVulkanPhysicalDevice(),
VK_IMAGE_TILING_OPTIMAL, vk_format, yuv_color_space, ycbcr_info);
diff --git a/www/ungoogled-chromium/files/patch-components_viz_service_display__embedder_skia__output__surface__impl__on__gpu.cc b/www/ungoogled-chromium/files/patch-components_viz_service_display__embedder_skia__output__surface__impl__on__gpu.cc
index 18a956045818..d24c1851b7dd 100644
--- a/www/ungoogled-chromium/files/patch-components_viz_service_display__embedder_skia__output__surface__impl__on__gpu.cc
+++ b/www/ungoogled-chromium/files/patch-components_viz_service_display__embedder_skia__output__surface__impl__on__gpu.cc
@@ -1,6 +1,6 @@
---- components/viz/service/display_embedder/skia_output_surface_impl_on_gpu.cc.orig 2023-12-23 12:33:28 UTC
+--- components/viz/service/display_embedder/skia_output_surface_impl_on_gpu.cc.orig 2024-02-03 15:42:55 UTC
+++ components/viz/service/display_embedder/skia_output_surface_impl_on_gpu.cc
-@@ -1567,7 +1567,12 @@ void SkiaOutputSurfaceImplOnGpu::CopyOutputNV12(
+@@ -1564,7 +1564,12 @@ void SkiaOutputSurfaceImplOnGpu::CopyOutputNV12(
// Issue readbacks from the surfaces:
for (size_t i = 0; i < CopyOutputResult::kNV12MaxPlanes; ++i) {
diff --git a/www/ungoogled-chromium/files/patch-components_viz_service_display_skia__renderer.cc b/www/ungoogled-chromium/files/patch-components_viz_service_display_skia__renderer.cc
index 1af9a5547977..a78b8ba26bf8 100644
--- a/www/ungoogled-chromium/files/patch-components_viz_service_display_skia__renderer.cc
+++ b/www/ungoogled-chromium/files/patch-components_viz_service_display_skia__renderer.cc
@@ -1,6 +1,6 @@
---- components/viz/service/display/skia_renderer.cc.orig 2023-11-04 07:08:51 UTC
+--- components/viz/service/display/skia_renderer.cc.orig 2024-02-03 15:42:55 UTC
+++ components/viz/service/display/skia_renderer.cc
-@@ -1219,7 +1219,7 @@ void SkiaRenderer::ClearFramebuffer() {
+@@ -1340,7 +1340,7 @@ void SkiaRenderer::ClearFramebuffer() {
if (current_frame()->current_render_pass->has_transparent_background) {
ClearCanvas(SkColors::kTransparent);
} else {
diff --git a/www/ungoogled-chromium/files/patch-components_viz_service_frame__sinks_root__compositor__frame__sink__impl.cc b/www/ungoogled-chromium/files/patch-components_viz_service_frame__sinks_root__compositor__frame__sink__impl.cc
new file mode 100644
index 000000000000..f74e8160250e
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-components_viz_service_frame__sinks_root__compositor__frame__sink__impl.cc
@@ -0,0 +1,20 @@
+--- components/viz/service/frame_sinks/root_compositor_frame_sink_impl.cc.orig 2024-02-03 15:42:55 UTC
++++ components/viz/service/frame_sinks/root_compositor_frame_sink_impl.cc
+@@ -109,7 +109,7 @@ RootCompositorFrameSinkImpl::Create(
+ output_surface->SetNeedsSwapSizeNotifications(
+ params->send_swap_size_notifications);
+
+-#if BUILDFLAG(IS_LINUX) && BUILDFLAG(IS_OZONE_X11)
++#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)) && BUILDFLAG(IS_OZONE_X11)
+ // For X11, we need notify client about swap completion after resizing, so the
+ // client can use it for synchronize with X11 WM.
+ output_surface->SetNeedsSwapSizeNotifications(true);
+@@ -713,7 +713,7 @@ void RootCompositorFrameSinkImpl::DisplayDidCompleteSw
+ #if BUILDFLAG(IS_ANDROID)
+ if (display_client_ && enable_swap_competion_callback_)
+ display_client_->DidCompleteSwapWithSize(pixel_size);
+-#elif BUILDFLAG(IS_LINUX) && BUILDFLAG(IS_OZONE_X11)
++#elif (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)) && BUILDFLAG(IS_OZONE_X11)
+ if (display_client_ && pixel_size != last_swap_pixel_size_) {
+ last_swap_pixel_size_ = pixel_size;
+ display_client_->DidCompleteSwapWithNewSize(last_swap_pixel_size_);
diff --git a/www/ungoogled-chromium/files/patch-components_viz_service_frame__sinks_root__compositor__frame__sink__impl.h b/www/ungoogled-chromium/files/patch-components_viz_service_frame__sinks_root__compositor__frame__sink__impl.h
new file mode 100644
index 000000000000..f5ae7066bc75
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-components_viz_service_frame__sinks_root__compositor__frame__sink__impl.h
@@ -0,0 +1,11 @@
+--- components/viz/service/frame_sinks/root_compositor_frame_sink_impl.h.orig 2024-02-03 15:42:55 UTC
++++ components/viz/service/frame_sinks/root_compositor_frame_sink_impl.h
+@@ -210,7 +210,7 @@ class VIZ_SERVICE_EXPORT RootCompositorFrameSinkImpl
+ // to actually unref.
+ LocalSurfaceId to_evict_on_next_draw_and_swap_ = LocalSurfaceId();
+
+-#if BUILDFLAG(IS_LINUX) && BUILDFLAG(IS_OZONE_X11)
++#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)) && BUILDFLAG(IS_OZONE_X11)
+ gfx::Size last_swap_pixel_size_;
+ #endif // BUILDFLAG(IS_LINUX) && BUILDFLAG(IS_OZONE_X11)
+
diff --git a/www/ungoogled-chromium/files/patch-components_viz_test_fake__display__client.cc b/www/ungoogled-chromium/files/patch-components_viz_test_fake__display__client.cc
new file mode 100644
index 000000000000..0e88e07ff33d
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-components_viz_test_fake__display__client.cc
@@ -0,0 +1,11 @@
+--- components/viz/test/fake_display_client.cc.orig 2024-02-03 15:42:55 UTC
++++ components/viz/test/fake_display_client.cc
+@@ -27,7 +27,7 @@ void FakeDisplayClient::AddChildWindowToBrowser(
+ gpu::SurfaceHandle child_window) {}
+ #endif
+
+-#if BUILDFLAG(IS_LINUX) && BUILDFLAG(IS_OZONE_X11)
++#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)) && BUILDFLAG(IS_OZONE_X11)
+ void FakeDisplayClient::DidCompleteSwapWithNewSize(const gfx::Size& size) {}
+ #endif // BUILDFLAG(IS_LINUX) && BUILDFLAG(IS_OZONE_X11)
+
diff --git a/www/ungoogled-chromium/files/patch-components_viz_test_fake__display__client.h b/www/ungoogled-chromium/files/patch-components_viz_test_fake__display__client.h
new file mode 100644
index 000000000000..a9d29794d04e
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-components_viz_test_fake__display__client.h
@@ -0,0 +1,11 @@
+--- components/viz/test/fake_display_client.h.orig 2024-02-03 15:42:55 UTC
++++ components/viz/test/fake_display_client.h
+@@ -36,7 +36,7 @@ class FakeDisplayClient : public mojom::DisplayClient
+ void AddChildWindowToBrowser(gpu::SurfaceHandle child_window) override;
+ #endif
+
+-#if BUILDFLAG(IS_LINUX) && BUILDFLAG(IS_OZONE_X11)
++#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)) && BUILDFLAG(IS_OZONE_X11)
+ void DidCompleteSwapWithNewSize(const gfx::Size& size) override;
+ #endif // BUILDFLAG(IS_LINUX) && BUILDFLAG(IS_OZONE_X11)
+
diff --git a/www/ungoogled-chromium/files/patch-components_viz_test_mock__display__client.h b/www/ungoogled-chromium/files/patch-components_viz_test_mock__display__client.h
new file mode 100644
index 000000000000..f933949438b1
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-components_viz_test_mock__display__client.h
@@ -0,0 +1,11 @@
+--- components/viz/test/mock_display_client.h.orig 2024-02-03 15:42:55 UTC
++++ components/viz/test/mock_display_client.h
+@@ -43,7 +43,7 @@ class MockDisplayClient : public mojom::DisplayClient
+ MOCK_METHOD1(SetWideColorEnabled, void(bool enabled));
+ MOCK_METHOD1(SetPreferredRefreshRate, void(float refresh_rate));
+ #endif
+-#if BUILDFLAG(IS_LINUX) && BUILDFLAG(IS_OZONE_X11)
++#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)) && BUILDFLAG(IS_OZONE_X11)
+ MOCK_METHOD1(DidCompleteSwapWithNewSize, void(const gfx::Size&));
+ #endif // BUILDFLAG(IS_LINUX) && BUILDFLAG(IS_OZONE_X11)
+
diff --git a/www/ungoogled-chromium/files/patch-content_app_content__main__runner__impl.cc b/www/ungoogled-chromium/files/patch-content_app_content__main__runner__impl.cc
index dfb3212bb0e3..2a2061155425 100644
--- a/www/ungoogled-chromium/files/patch-content_app_content__main__runner__impl.cc
+++ b/www/ungoogled-chromium/files/patch-content_app_content__main__runner__impl.cc
@@ -1,6 +1,6 @@
---- content/app/content_main_runner_impl.cc.orig 2023-11-04 07:08:51 UTC
+--- content/app/content_main_runner_impl.cc.orig 2024-02-03 15:42:55 UTC
+++ content/app/content_main_runner_impl.cc
-@@ -144,13 +144,13 @@
+@@ -143,13 +143,13 @@
#include "content/browser/posix_file_descriptor_info_impl.h"
#include "content/public/common/content_descriptors.h"
@@ -16,7 +16,7 @@
#include "base/files/file_path_watcher_inotify.h"
#include "base/native_library.h"
#include "base/rand_util.h"
-@@ -188,6 +188,10 @@
+@@ -187,6 +187,10 @@
#include "media/base/media_switches.h"
#endif
@@ -27,7 +27,7 @@
#if BUILDFLAG(IS_ANDROID)
#include "base/system/sys_info.h"
#include "content/browser/android/battery_metrics.h"
-@@ -374,7 +378,7 @@ void InitializeZygoteSandboxForBrowserProcess(
+@@ -373,7 +377,7 @@ void InitializeZygoteSandboxForBrowserProcess(
}
#endif // BUILDFLAG(USE_ZYGOTE)
@@ -36,7 +36,7 @@
#if BUILDFLAG(ENABLE_PPAPI)
// Loads the (native) libraries but does not initialize them (i.e., does not
-@@ -412,7 +416,10 @@ void PreloadLibraryCdms() {
+@@ -411,7 +415,10 @@ void PreloadLibraryCdms() {
void PreSandboxInit() {
// Ensure the /dev/urandom is opened.
@@ -47,7 +47,7 @@
// May use sysinfo(), sched_getaffinity(), and open various /sys/ and /proc/
// files.
-@@ -424,9 +431,16 @@ void PreSandboxInit() {
+@@ -423,9 +430,16 @@ void PreSandboxInit() {
// https://boringssl.9oo91esource.qjz9zk/boringssl/+/HEAD/SANDBOXING.md
CRYPTO_pre_sandbox_init();
@@ -64,7 +64,7 @@
#if BUILDFLAG(ENABLE_PPAPI)
// Ensure access to the Pepper plugins before the sandbox is turned on.
-@@ -833,11 +847,10 @@ int ContentMainRunnerImpl::Initialize(ContentMainParam
+@@ -832,11 +846,10 @@ int ContentMainRunnerImpl::Initialize(ContentMainParam
kFieldTrialDescriptor + base::GlobalDescriptors::kBaseDescriptor);
#endif // !BUILDFLAG(IS_ANDROID)
@@ -78,7 +78,7 @@
#endif // !BUILDFLAG(IS_WIN)
-@@ -1033,8 +1046,20 @@ int ContentMainRunnerImpl::Initialize(ContentMainParam
+@@ -1029,8 +1042,20 @@ int ContentMainRunnerImpl::Initialize(ContentMainParam
process_type == switches::kZygoteProcess) {
PreSandboxInit();
}
@@ -99,7 +99,7 @@
delegate_->SandboxInitialized(process_type);
#if BUILDFLAG(USE_ZYGOTE)
-@@ -1102,7 +1127,7 @@ int NO_STACK_PROTECTOR ContentMainRunnerImpl::Run() {
+@@ -1098,7 +1123,7 @@ int NO_STACK_PROTECTOR ContentMainRunnerImpl::Run() {
->ReconfigureAfterFeatureListInit(process_type);
}
@@ -108,7 +108,7 @@
// If dynamic Mojo Core is being used, ensure that it's loaded very early in
// the child/zygote process, before any sandbox is initialized. The library
// is not fully initialized with IPC support until a ChildProcess is later
-@@ -1137,6 +1162,11 @@ int NO_STACK_PROTECTOR ContentMainRunnerImpl::Run() {
+@@ -1133,6 +1158,11 @@ int NO_STACK_PROTECTOR ContentMainRunnerImpl::Run() {
content_main_params_.reset();
RegisterMainThreadFactories();
diff --git a/www/ungoogled-chromium/files/patch-content_browser_BUILD.gn b/www/ungoogled-chromium/files/patch-content_browser_BUILD.gn
index fe6bdde7874d..20a02a72cd15 100644
--- a/www/ungoogled-chromium/files/patch-content_browser_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-content_browser_BUILD.gn
@@ -1,6 +1,6 @@
---- content/browser/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+--- content/browser/BUILD.gn.orig 2024-02-03 15:42:55 UTC
+++ content/browser/BUILD.gn
-@@ -2474,6 +2474,13 @@ source_set("browser") {
+@@ -2453,6 +2453,13 @@ source_set("browser") {
deps += [ "//media/mojo/mojom/stable:stable_video_decoder" ]
}
diff --git a/www/ungoogled-chromium/files/patch-content_browser_audio_audio__service.cc b/www/ungoogled-chromium/files/patch-content_browser_audio_audio__service.cc
index 6e8d91909525..675da3a96881 100644
--- a/www/ungoogled-chromium/files/patch-content_browser_audio_audio__service.cc
+++ b/www/ungoogled-chromium/files/patch-content_browser_audio_audio__service.cc
@@ -1,4 +1,4 @@
---- content/browser/audio/audio_service.cc.orig 2023-04-08 11:38:38 UTC
+--- content/browser/audio/audio_service.cc.orig 2024-02-03 15:42:55 UTC
+++ content/browser/audio/audio_service.cc
@@ -33,7 +33,7 @@
#if BUILDFLAG(ENABLE_PASSTHROUGH_AUDIO_CODECS)
@@ -9,7 +9,7 @@
#include "ui/display/display_util.h"
#endif // BUILDFLAG(IS_LINUX)
-@@ -225,7 +225,7 @@ audio::mojom::AudioService& GetAudioService() {
+@@ -196,7 +196,7 @@ audio::mojom::AudioService& GetAudioService() {
->PostTaskAndReplyWithResult(
FROM_HERE, base::BindOnce(&ScanEdidBitstreams),
base::BindOnce(&LaunchAudioService, std::move(receiver)));
diff --git a/www/ungoogled-chromium/files/patch-content_browser_browser__main__loop.cc b/www/ungoogled-chromium/files/patch-content_browser_browser__main__loop.cc
index af79fb42ce12..ded1b1ccff11 100644
--- a/www/ungoogled-chromium/files/patch-content_browser_browser__main__loop.cc
+++ b/www/ungoogled-chromium/files/patch-content_browser_browser__main__loop.cc
@@ -1,6 +1,6 @@
---- content/browser/browser_main_loop.cc.orig 2023-12-23 12:33:28 UTC
+--- content/browser/browser_main_loop.cc.orig 2024-02-03 15:42:55 UTC
+++ content/browser/browser_main_loop.cc
-@@ -247,6 +247,12 @@
+@@ -248,6 +248,12 @@
#include "mojo/public/cpp/bindings/lib/test_random_mojo_delays.h"
#endif
@@ -13,7 +13,7 @@
// One of the linux specific headers defines this as a macro.
#ifdef DestroyAll
#undef DestroyAll
-@@ -552,6 +558,12 @@ int BrowserMainLoop::EarlyInitialization() {
+@@ -553,6 +559,12 @@ int BrowserMainLoop::EarlyInitialization() {
// by now since a thread to start the ServiceManager has been created
// before the browser main loop starts.
DCHECK(SandboxHostLinux::GetInstance()->IsInitialized());
@@ -26,7 +26,7 @@
#endif
// GLib's spawning of new processes is buggy, so it's important that at this
-@@ -589,7 +601,7 @@ int BrowserMainLoop::EarlyInitialization() {
+@@ -590,7 +602,7 @@ int BrowserMainLoop::EarlyInitialization() {
base::PlatformThread::SetCurrentThreadType(base::ThreadType::kCompositing);
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
diff --git a/www/ungoogled-chromium/files/patch-content_browser_compositor_viz__process__transport__factory.cc b/www/ungoogled-chromium/files/patch-content_browser_compositor_viz__process__transport__factory.cc
new file mode 100644
index 000000000000..0553009913e1
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-content_browser_compositor_viz__process__transport__factory.cc
@@ -0,0 +1,11 @@
+--- content/browser/compositor/viz_process_transport_factory.cc.orig 2024-02-03 15:42:55 UTC
++++ content/browser/compositor/viz_process_transport_factory.cc
+@@ -118,7 +118,7 @@ class HostDisplayClient : public viz::HostDisplayClien
+ HostDisplayClient& operator=(const HostDisplayClient&) = delete;
+
+ // viz::HostDisplayClient:
+-#if BUILDFLAG(IS_LINUX) && BUILDFLAG(IS_OZONE_X11)
++#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)) && BUILDFLAG(IS_OZONE_X11)
+ void DidCompleteSwapWithNewSize(const gfx::Size& size) override {
+ compositor_->OnCompleteSwapWithNewSize(size);
+ }
diff --git a/www/ungoogled-chromium/files/patch-content_browser_generic__sensor_frame__sensor__provider__proxy.cc b/www/ungoogled-chromium/files/patch-content_browser_generic__sensor_frame__sensor__provider__proxy.cc
deleted file mode 100644
index 920a631de1a7..000000000000
--- a/www/ungoogled-chromium/files/patch-content_browser_generic__sensor_frame__sensor__provider__proxy.cc
+++ /dev/null
@@ -1,14 +0,0 @@
---- content/browser/generic_sensor/frame_sensor_provider_proxy.cc.orig 2023-12-23 12:33:28 UTC
-+++ content/browser/generic_sensor/frame_sensor_provider_proxy.cc
-@@ -20,7 +20,11 @@ namespace content {
-
- namespace {
-
-+#if (_LIBCPP_VERSION >= 160000)
- constexpr std::vector<blink::mojom::PermissionsPolicyFeature>
-+#else
-+std::vector<blink::mojom::PermissionsPolicyFeature>
-+#endif
- SensorTypeToPermissionsPolicyFeatures(SensorType type) {
- switch (type) {
- case SensorType::AMBIENT_LIGHT:
diff --git a/www/ungoogled-chromium/files/patch-content_browser_gpu_compositor__util.cc b/www/ungoogled-chromium/files/patch-content_browser_gpu_compositor__util.cc
index ab38be8135c5..8672a4a1766f 100644
--- a/www/ungoogled-chromium/files/patch-content_browser_gpu_compositor__util.cc
+++ b/www/ungoogled-chromium/files/patch-content_browser_gpu_compositor__util.cc
@@ -1,20 +1,20 @@
---- content/browser/gpu/compositor_util.cc.orig 2023-01-13 08:56:02 UTC
+--- content/browser/gpu/compositor_util.cc.orig 2024-02-03 15:42:55 UTC
+++ content/browser/gpu/compositor_util.cc
-@@ -148,7 +148,7 @@ const GpuFeatureData GetGpuFeatureData(
- {"video_decode",
- SafeGetFeatureStatus(gpu_feature_info,
- gpu::GPU_FEATURE_TYPE_ACCELERATED_VIDEO_DECODE),
+@@ -149,7 +149,7 @@ const GpuFeatureData GetGpuFeatureData(
+ {"video_decode",
+ SafeGetFeatureStatus(gpu_feature_info,
+ gpu::GPU_FEATURE_TYPE_ACCELERATED_VIDEO_DECODE),
-#if BUILDFLAG(IS_LINUX)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
- !base::FeatureList::IsEnabled(media::kVaapiVideoDecodeLinux) ||
+ !base::FeatureList::IsEnabled(media::kVaapiVideoDecodeLinux) ||
#endif // BUILDFLAG(IS_LINUX)
- command_line.HasSwitch(switches::kDisableAcceleratedVideoDecode),
-@@ -159,7 +159,7 @@ const GpuFeatureData GetGpuFeatureData(
- {"video_encode",
- SafeGetFeatureStatus(gpu_feature_info,
- gpu::GPU_FEATURE_TYPE_ACCELERATED_VIDEO_ENCODE),
+ command_line.HasSwitch(switches::kDisableAcceleratedVideoDecode),
+@@ -160,7 +160,7 @@ const GpuFeatureData GetGpuFeatureData(
+ {"video_encode",
+ SafeGetFeatureStatus(gpu_feature_info,
+ gpu::GPU_FEATURE_TYPE_ACCELERATED_VIDEO_ENCODE),
-#if BUILDFLAG(IS_LINUX)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
- !base::FeatureList::IsEnabled(media::kVaapiVideoEncodeLinux),
+ !base::FeatureList::IsEnabled(media::kVaapiVideoEncodeLinux),
#else
- command_line.HasSwitch(switches::kDisableAcceleratedVideoEncode),
+ command_line.HasSwitch(switches::kDisableAcceleratedVideoEncode),
diff --git a/www/ungoogled-chromium/files/patch-content_browser_interest__group_header__direct__from__seller__signals.cc b/www/ungoogled-chromium/files/patch-content_browser_interest__group_header__direct__from__seller__signals.cc
index 8d66a28779e3..41e6eeaa728c 100644
--- a/www/ungoogled-chromium/files/patch-content_browser_interest__group_header__direct__from__seller__signals.cc
+++ b/www/ungoogled-chromium/files/patch-content_browser_interest__group_header__direct__from__seller__signals.cc
@@ -1,8 +1,8 @@
---- content/browser/interest_group/header_direct_from_seller_signals.cc.orig 2023-11-04 07:08:51 UTC
+--- content/browser/interest_group/header_direct_from_seller_signals.cc.orig 2024-02-03 15:42:55 UTC
+++ content/browser/interest_group/header_direct_from_seller_signals.cc
-@@ -206,7 +206,11 @@ void OnJsonDecoded(
+@@ -58,7 +58,11 @@ HeaderDirectFromSellerSignals::Result::Result(
- } // namespace
+ HeaderDirectFromSellerSignals::Result::~Result() = default;
+#if defined(__clang__) && (__clang_major__ >= 16)
HeaderDirectFromSellerSignals::HeaderDirectFromSellerSignals() = default;
@@ -10,5 +10,5 @@
+HeaderDirectFromSellerSignals::HeaderDirectFromSellerSignals() {}
+#endif
- HeaderDirectFromSellerSignals::~HeaderDirectFromSellerSignals() = default;
-
+ HeaderDirectFromSellerSignals::~HeaderDirectFromSellerSignals() {
+ base::UmaHistogramCounts10000(
diff --git a/www/ungoogled-chromium/files/patch-content_browser_media_media__keys__listener__manager__impl.cc b/www/ungoogled-chromium/files/patch-content_browser_media_media__keys__listener__manager__impl.cc
index e33e979c7b2b..13e95b0227e2 100644
--- a/www/ungoogled-chromium/files/patch-content_browser_media_media__keys__listener__manager__impl.cc
+++ b/www/ungoogled-chromium/files/patch-content_browser_media_media__keys__listener__manager__impl.cc
@@ -1,11 +1,11 @@
---- content/browser/media/media_keys_listener_manager_impl.cc.orig 2023-12-23 12:33:28 UTC
+--- content/browser/media/media_keys_listener_manager_impl.cc.orig 2024-02-03 15:42:55 UTC
+++ content/browser/media/media_keys_listener_manager_impl.cc
-@@ -252,7 +252,7 @@ void MediaKeysListenerManagerImpl::StartListeningForMe
- // TODO(crbug.com/1052397): Revisit once build flag switch of lacros-chrome is
- // complete.
+@@ -300,7 +300,7 @@ void MediaKeysListenerManagerImpl::StartListeningForMe
+ }
+
#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_LACROS)) || \
- BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC)
+ BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_BSD)
- system_media_controls_ = system_media_controls::SystemMediaControls::Create(
- media::AudioManager::GetGlobalAppName());
- #endif
+ // Create SystemMediaControls with the SingletonHwnd.
+ browser_system_media_controls_ =
+ system_media_controls::SystemMediaControls::Create(
diff --git a/www/ungoogled-chromium/files/patch-content_browser_network__service__instance__impl.cc b/www/ungoogled-chromium/files/patch-content_browser_network__service__instance__impl.cc
index 53453e580e24..c6858e9796e0 100644
--- a/www/ungoogled-chromium/files/patch-content_browser_network__service__instance__impl.cc
+++ b/www/ungoogled-chromium/files/patch-content_browser_network__service__instance__impl.cc
@@ -1,6 +1,6 @@
---- content/browser/network_service_instance_impl.cc.orig 2023-12-23 12:33:28 UTC
+--- content/browser/network_service_instance_impl.cc.orig 2024-02-03 15:42:55 UTC
+++ content/browser/network_service_instance_impl.cc
-@@ -80,7 +80,7 @@
+@@ -81,7 +81,7 @@
#include "content/browser/network/network_service_process_tracker_win.h"
#endif
@@ -9,7 +9,7 @@
#include "content/browser/system_dns_resolution/system_dns_resolver.h"
#include "services/network/public/mojom/system_dns_resolution.mojom-forward.h"
#endif
-@@ -357,7 +357,7 @@ void CreateInProcessNetworkService(
+@@ -359,7 +359,7 @@ void CreateInProcessNetworkService(
std::move(receiver)));
}
@@ -18,7 +18,7 @@
// Runs a self-owned SystemDnsResolverMojoImpl. This is meant to run on a
// high-priority thread pool.
void RunSystemDnsResolverOnThreadPool(
-@@ -426,7 +426,7 @@ network::mojom::NetworkServiceParamsPtr CreateNetworkS
+@@ -428,7 +428,7 @@ network::mojom::NetworkServiceParamsPtr CreateNetworkS
}
#endif // BUILDFLAG(IS_POSIX)
diff --git a/www/ungoogled-chromium/files/patch-content_browser_renderer__host_delegated__frame__host.cc b/www/ungoogled-chromium/files/patch-content_browser_renderer__host_delegated__frame__host.cc
index bd7b0c44d2b2..5d72b42587e0 100644
--- a/www/ungoogled-chromium/files/patch-content_browser_renderer__host_delegated__frame__host.cc
+++ b/www/ungoogled-chromium/files/patch-content_browser_renderer__host_delegated__frame__host.cc
@@ -1,6 +1,6 @@
---- content/browser/renderer_host/delegated_frame_host.cc.orig 2023-09-17 07:59:53 UTC
+--- content/browser/renderer_host/delegated_frame_host.cc.orig 2024-02-03 15:42:55 UTC
+++ content/browser/renderer_host/delegated_frame_host.cc
-@@ -312,7 +312,7 @@ void DelegatedFrameHost::EmbedSurface(
+@@ -317,7 +317,7 @@ void DelegatedFrameHost::EmbedSurface(
if (!primary_surface_id ||
primary_surface_id->local_surface_id() != local_surface_id_) {
diff --git a/www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__process__host__impl.cc b/www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__process__host__impl.cc
index 818762f9fa59..953906dd9060 100644
--- a/www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__process__host__impl.cc
+++ b/www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__process__host__impl.cc
@@ -1,4 +1,4 @@
---- content/browser/renderer_host/render_process_host_impl.cc.orig 2023-12-23 12:33:28 UTC
+--- content/browser/renderer_host/render_process_host_impl.cc.orig 2024-02-03 15:42:55 UTC
+++ content/browser/renderer_host/render_process_host_impl.cc
@@ -223,7 +223,7 @@
#include "third_party/blink/public/mojom/android_font_lookup/android_font_lookup.mojom.h"
@@ -9,7 +9,7 @@
#include <sys/resource.h>
#include "components/services/font/public/mojom/font_service.mojom.h" // nogncheck
-@@ -953,7 +953,7 @@ static constexpr size_t kUnknownPlatformProcessLimit =
+@@ -952,7 +952,7 @@ static constexpr size_t kUnknownPlatformProcessLimit =
// to indicate failure and std::numeric_limits<size_t>::max() to indicate
// unlimited.
size_t GetPlatformProcessLimit() {
@@ -18,7 +18,7 @@
struct rlimit limit;
if (getrlimit(RLIMIT_NPROC, &limit) != 0)
return kUnknownPlatformProcessLimit;
-@@ -1160,7 +1160,7 @@ class RenderProcessHostImpl::IOThreadHostImpl : public
+@@ -1157,7 +1157,7 @@ class RenderProcessHostImpl::IOThreadHostImpl : public
return;
}
@@ -27,7 +27,7 @@
if (auto font_receiver = receiver.As<font_service::mojom::FontService>()) {
ConnectToFontService(std::move(font_receiver));
return;
-@@ -1254,7 +1254,7 @@ class RenderProcessHostImpl::IOThreadHostImpl : public
+@@ -1251,7 +1251,7 @@ class RenderProcessHostImpl::IOThreadHostImpl : public
std::unique_ptr<service_manager::BinderRegistry> binders_;
mojo::Receiver<mojom::ChildProcessHost> receiver_{this};
@@ -36,7 +36,7 @@
mojo::Remote<media::mojom::VideoEncodeAcceleratorProviderFactory>
video_encode_accelerator_factory_remote_;
ChildThreadTypeSwitcher child_thread_type_switcher_;
-@@ -3331,7 +3331,7 @@ void RenderProcessHostImpl::AppendRendererCommandLine(
+@@ -3328,7 +3328,7 @@ void RenderProcessHostImpl::AppendRendererCommandLine(
base::TimeTicks::UnixEpoch().since_origin().InMicroseconds()));
}
@@ -45,7 +45,7 @@
// Append `kDisableVideoCaptureUseGpuMemoryBuffer` flag if there is no support
// for NV12 GPU memory buffer.
if (switches::IsVideoCaptureUseGpuMemoryBufferEnabled() &&
-@@ -3391,6 +3391,7 @@ void RenderProcessHostImpl::PropagateBrowserCommandLin
+@@ -3388,6 +3388,7 @@ void RenderProcessHostImpl::PropagateBrowserCommandLin
switches::kDisableSpeechAPI,
switches::kDisableThreadedCompositing,
switches::kDisableTouchDragDrop,
diff --git a/www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__widget__host__view__aura.cc b/www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__widget__host__view__aura.cc
index 616ec7e61f28..7d862a1edf98 100644
--- a/www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__widget__host__view__aura.cc
+++ b/www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__widget__host__view__aura.cc
@@ -1,4 +1,4 @@
---- content/browser/renderer_host/render_widget_host_view_aura.cc.orig 2023-12-23 12:33:28 UTC
+--- content/browser/renderer_host/render_widget_host_view_aura.cc.orig 2024-02-03 15:42:55 UTC
+++ content/browser/renderer_host/render_widget_host_view_aura.cc
@@ -121,7 +121,7 @@
#include "ui/gfx/gdi_util.h"
@@ -27,7 +27,7 @@
bool RenderWidgetHostViewAura::SetCompositionFromExistingText(
const gfx::Range& range,
const std::vector<ui::ImeTextSpan>& ui_ime_text_spans) {
-@@ -2562,7 +2562,7 @@ bool RenderWidgetHostViewAura::NeedsInputGrab() {
+@@ -2568,7 +2568,7 @@ bool RenderWidgetHostViewAura::NeedsInputGrab() {
}
bool RenderWidgetHostViewAura::NeedsMouseCapture() {
@@ -36,7 +36,7 @@
return NeedsInputGrab();
#else
return false;
-@@ -2745,7 +2745,7 @@ void RenderWidgetHostViewAura::ForwardKeyboardEventWit
+@@ -2751,7 +2751,7 @@ void RenderWidgetHostViewAura::ForwardKeyboardEventWit
if (!target_host)
return;
diff --git a/www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__widget__host__view__aura.h b/www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__widget__host__view__aura.h
index b77efd688359..b4876116b414 100644
--- a/www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__widget__host__view__aura.h
+++ b/www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__widget__host__view__aura.h
@@ -1,6 +1,6 @@
---- content/browser/renderer_host/render_widget_host_view_aura.h.orig 2023-10-13 13:20:35 UTC
+--- content/browser/renderer_host/render_widget_host_view_aura.h.orig 2024-02-03 15:42:55 UTC
+++ content/browser/renderer_host/render_widget_host_view_aura.h
-@@ -257,7 +257,7 @@ class CONTENT_EXPORT RenderWidgetHostViewAura
+@@ -258,7 +258,7 @@ class CONTENT_EXPORT RenderWidgetHostViewAura
ukm::SourceId GetClientSourceForMetrics() const override;
bool ShouldDoLearning() override;
diff --git a/www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__widget__host__view__event__handler.cc b/www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__widget__host__view__event__handler.cc
index d109d25c8aaa..634e7b5df1a8 100644
--- a/www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__widget__host__view__event__handler.cc
+++ b/www/ungoogled-chromium/files/patch-content_browser_renderer__host_render__widget__host__view__event__handler.cc
@@ -1,6 +1,6 @@
---- content/browser/renderer_host/render_widget_host_view_event_handler.cc.orig 2023-07-21 09:49:17 UTC
+--- content/browser/renderer_host/render_widget_host_view_event_handler.cc.orig 2024-02-03 15:42:55 UTC
+++ content/browser/renderer_host/render_widget_host_view_event_handler.cc
-@@ -597,7 +597,7 @@ bool RenderWidgetHostViewEventHandler::CanRendererHand
+@@ -599,7 +599,7 @@ bool RenderWidgetHostViewEventHandler::CanRendererHand
if (event->type() == ui::ET_MOUSE_EXITED) {
if (mouse_locked || selection_popup)
return false;
diff --git a/www/ungoogled-chromium/files/patch-content_browser_utility__sandbox__delegate.cc b/www/ungoogled-chromium/files/patch-content_browser_utility__sandbox__delegate.cc
index 4a08305ce7d5..1784f0f436ac 100644
--- a/www/ungoogled-chromium/files/patch-content_browser_utility__sandbox__delegate.cc
+++ b/www/ungoogled-chromium/files/patch-content_browser_utility__sandbox__delegate.cc
@@ -1,11 +1,7 @@
---- content/browser/utility_sandbox_delegate.cc.orig 2023-12-23 12:33:28 UTC
+--- content/browser/utility_sandbox_delegate.cc.orig 2024-02-03 15:42:55 UTC
+++ content/browser/utility_sandbox_delegate.cc
-@@ -64,13 +64,13 @@ UtilitySandboxedProcessLauncherDelegate::
- #if BUILDFLAG(ENABLE_PPAPI)
- sandbox_type_ == sandbox::mojom::Sandbox::kPpapi ||
- #endif
--#if BUILDFLAG(IS_FUCHSIA)
-+#if BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_BSD)
+@@ -67,10 +67,10 @@ UtilitySandboxedProcessLauncherDelegate::
+ #if BUILDFLAG(IS_FUCHSIA)
sandbox_type_ == sandbox::mojom::Sandbox::kVideoCapture ||
#endif
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_ASH)
@@ -17,10 +13,10 @@
sandbox_type_ == sandbox::mojom::Sandbox::kHardwareVideoEncoding ||
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
#if BUILDFLAG(IS_CHROMEOS_ASH)
-@@ -125,10 +125,10 @@ ZygoteCommunication* UtilitySandboxedProcessLauncherDe
- // unsandboxed zygote and then apply their actual sandboxes in the forked
+@@ -126,10 +126,10 @@ ZygoteCommunication* UtilitySandboxedProcessLauncherDe
// process upon startup.
if (sandbox_type_ == sandbox::mojom::Sandbox::kNetwork ||
+ sandbox_type_ == sandbox::mojom::Sandbox::kOnDeviceModelExecution ||
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_ASH)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_ASH) || BUILDFLAG(IS_BSD)
sandbox_type_ == sandbox::mojom::Sandbox::kHardwareVideoDecoding ||
diff --git a/www/ungoogled-chromium/files/patch-content_browser_zygote__host_zygote__host__impl__linux.cc b/www/ungoogled-chromium/files/patch-content_browser_zygote__host_zygote__host__impl__linux.cc
index dc208ce38bd5..888867630fc8 100644
--- a/www/ungoogled-chromium/files/patch-content_browser_zygote__host_zygote__host__impl__linux.cc
+++ b/www/ungoogled-chromium/files/patch-content_browser_zygote__host_zygote__host__impl__linux.cc
@@ -1,4 +1,4 @@
---- content/browser/zygote_host/zygote_host_impl_linux.cc.orig 2022-12-02 17:56:32 UTC
+--- content/browser/zygote_host/zygote_host_impl_linux.cc.orig 2024-02-03 15:42:55 UTC
+++ content/browser/zygote_host/zygote_host_impl_linux.cc
@@ -19,8 +19,10 @@
#include "build/chromeos_buildflags.h"
@@ -63,9 +63,9 @@
base::FileHandleMappingVector additional_remapped_fds) {
+#if !BUILDFLAG(IS_BSD)
int fds[2];
- CHECK_EQ(0, socketpair(AF_UNIX, SOCK_SEQPACKET, 0, fds));
+ CHECK_EQ(0, socketpair(AF_UNIX, SOCK_SEQPACKET | SOCK_CLOEXEC, 0, fds));
CHECK(base::UnixDomainSocket::EnableReceiveProcessId(fds[0]));
-@@ -224,9 +235,12 @@ pid_t ZygoteHostImpl::LaunchZygote(
+@@ -225,9 +236,12 @@ pid_t ZygoteHostImpl::LaunchZygote(
AddZygotePid(pid);
return pid;
diff --git a/www/ungoogled-chromium/files/patch-content_child_BUILD.gn b/www/ungoogled-chromium/files/patch-content_child_BUILD.gn
index 81140ab6a872..69b71cc77ea3 100644
--- a/www/ungoogled-chromium/files/patch-content_child_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-content_child_BUILD.gn
@@ -1,6 +1,6 @@
---- content/child/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+--- content/child/BUILD.gn.orig 2024-02-03 15:42:55 UTC
+++ content/child/BUILD.gn
-@@ -129,6 +129,13 @@ target(link_target_type, "child") {
+@@ -131,6 +131,13 @@ target(link_target_type, "child") {
]
}
diff --git a/www/ungoogled-chromium/files/patch-content_common_BUILD.gn b/www/ungoogled-chromium/files/patch-content_common_BUILD.gn
index c7e5e189815c..1574526bed99 100644
--- a/www/ungoogled-chromium/files/patch-content_common_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-content_common_BUILD.gn
@@ -1,6 +1,41 @@
---- content/common/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+--- content/common/BUILD.gn.orig 2024-02-03 15:42:55 UTC
+++ content/common/BUILD.gn
-@@ -438,6 +438,9 @@ if (is_linux || is_chromeos) {
+@@ -404,20 +404,29 @@ source_set("common") {
+ }
+
+ if (is_linux || is_chromeos) {
+- sources += [
+- "gpu_pre_sandbox_hook_linux.cc",
+- "gpu_pre_sandbox_hook_linux.h",
+- ]
++ if (is_bsd) {
++ sources += [
++ "gpu_pre_sandbox_hook_bsd.cc",
++ "gpu_pre_sandbox_hook_bsd.h",
++ ]
++ } else {
++ sources += [
++ "gpu_pre_sandbox_hook_linux.cc",
++ "gpu_pre_sandbox_hook_linux.h",
++ ]
++ }
+ public_deps += [ "//sandbox/policy" ]
+ deps += [
+ ":sandbox_support_linux",
+ ":set_process_title_linux",
+ "//media/gpu:buildflags",
+ "//sandbox/linux:sandbox_services",
+- "//sandbox/linux:seccomp_bpf",
+ "//sandbox/policy:chromecast_sandbox_allowlist_buildflags",
+ "//third_party/fontconfig",
+ ]
++ if (use_seccomp_bpf) {
++ deps += [ "//sandbox/linux:seccomp_bpf" ]
++ }
+ if (use_v4l2_codec) {
+ deps += [ "//media/gpu/v4l2" ]
+ }
+@@ -487,6 +496,9 @@ if (is_linux || is_chromeos) {
public = [ "set_process_title_linux.h" ]
sources = [ "set_process_title_linux.cc" ]
deps = [ "//base" ]
diff --git a/www/ungoogled-chromium/files/patch-content_common_features.cc b/www/ungoogled-chromium/files/patch-content_common_features.cc
index 60982604be55..4c542f897601 100644
--- a/www/ungoogled-chromium/files/patch-content_common_features.cc
+++ b/www/ungoogled-chromium/files/patch-content_common_features.cc
@@ -1,6 +1,6 @@
---- content/common/features.cc.orig 2023-12-23 12:33:28 UTC
+--- content/common/features.cc.orig 2024-02-03 15:42:55 UTC
+++ content/common/features.cc
-@@ -154,7 +154,7 @@ BASE_FEATURE(kEnableBackForwardCacheForScreenReader,
+@@ -174,7 +174,7 @@ BASE_FEATURE(kEnableBackForwardCacheForScreenReader,
base::FEATURE_ENABLED_BY_DEFAULT);
// Enables error reporting for JS errors inside DevTools frontend host
@@ -9,7 +9,7 @@
BASE_FEATURE(kEnableDevToolsJsErrorReporting,
"EnableDevToolsJsErrorReporting",
base::FEATURE_DISABLED_BY_DEFAULT);
-@@ -246,7 +246,7 @@ BASE_FEATURE(kGroupNIKByJoiningOrigin,
+@@ -256,7 +256,7 @@ BASE_FEATURE(kGroupNIKByJoiningOrigin,
// process and having browser process handle adjusting thread properties (nice
// value, c-group, latency sensitivity...) for children which have sandbox
// restrictions.
diff --git a/www/ungoogled-chromium/files/patch-content_common_features.h b/www/ungoogled-chromium/files/patch-content_common_features.h
index 3dca36b0de1f..eb03d5218987 100644
--- a/www/ungoogled-chromium/files/patch-content_common_features.h
+++ b/www/ungoogled-chromium/files/patch-content_common_features.h
@@ -1,8 +1,8 @@
---- content/common/features.h.orig 2023-12-23 12:33:28 UTC
+--- content/common/features.h.orig 2024-02-03 15:42:55 UTC
+++ content/common/features.h
-@@ -37,7 +37,7 @@ CONTENT_EXPORT BASE_DECLARE_FEATURE(kDeviceMonitorMac)
+@@ -38,7 +38,7 @@ CONTENT_EXPORT BASE_DECLARE_FEATURE(kDeviceMonitorMac)
+ #endif
CONTENT_EXPORT BASE_DECLARE_FEATURE(kDocumentPolicyNegotiation);
- CONTENT_EXPORT BASE_DECLARE_FEATURE(kEnumerateDevicesHideDeviceIDs);
CONTENT_EXPORT BASE_DECLARE_FEATURE(kEnableBackForwardCacheForScreenReader);
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
diff --git a/www/ungoogled-chromium/files/patch-content_gpu_gpu__sandbox__hook__bsd.cc b/www/ungoogled-chromium/files/patch-content_common_gpu__pre__sandbox__hook__bsd.cc
index 6cb80ac48962..1524c25783b1 100644
--- a/www/ungoogled-chromium/files/patch-content_gpu_gpu__sandbox__hook__bsd.cc
+++ b/www/ungoogled-chromium/files/patch-content_common_gpu__pre__sandbox__hook__bsd.cc
@@ -1,11 +1,11 @@
---- content/gpu/gpu_sandbox_hook_bsd.cc.orig 2023-05-05 12:12:41 UTC
-+++ content/gpu/gpu_sandbox_hook_bsd.cc
+--- content/common/gpu_pre_sandbox_hook_bsd.cc.orig 2024-02-03 15:42:55 UTC
++++ content/common/gpu_pre_sandbox_hook_bsd.cc
@@ -0,0 +1,69 @@
+// Copyright 2023 The Chromium Authors
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
-+#include "content/gpu/gpu_sandbox_hook_bsd.h"
++#include "content/common/gpu_pre_sandbox_hook_bsd.h"
+
+#include <dlfcn.h>
+#include <errno.h>
@@ -59,7 +59,7 @@
+
+} // namespace
+
-+bool GpuProcessPreSandboxHook(sandbox::policy::SandboxLinux::Options options) {
++bool GpuPreSandboxHook(sandbox::policy::SandboxLinux::Options options) {
+ if (!LoadLibrariesForGpu(options))
+ return false;
+
diff --git a/www/ungoogled-chromium/files/patch-content_common_gpu__pre__sandbox__hook__bsd.h b/www/ungoogled-chromium/files/patch-content_common_gpu__pre__sandbox__hook__bsd.h
new file mode 100644
index 000000000000..581ba1cfe5e4
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-content_common_gpu__pre__sandbox__hook__bsd.h
@@ -0,0 +1,23 @@
+--- content/common/gpu_pre_sandbox_hook_bsd.h.orig 2024-02-03 15:42:55 UTC
++++ content/common/gpu_pre_sandbox_hook_bsd.h
+@@ -0,0 +1,20 @@
++// Copyright 2017 The Chromium Authors
++// Use of this source code is governed by a BSD-style license that can be
++// found in the LICENSE file.
++
++#ifndef CONTENT_COMMON_GPU_PRE_SANDBOX_HOOK_BSD_H_
++#define CONTENT_COMMON_GPU_PRE_SANDBOX_HOOK_BSD_H_
++
++#include "base/component_export.h"
++#include "sandbox/policy/sandbox.h"
++
++namespace content {
++
++// A pre-sandbox hook to use on Linux-based systems in sandboxed processes that
++// require general GPU usage.
++COMPONENT_EXPORT(GPU_PRE_SANDBOX_HOOK)
++bool GpuPreSandboxHook(sandbox::policy::SandboxLinux::Options options);
++
++} // namespace content
++
++#endif // CONTENT_COMMON_GPU_PRE_SANDBOX_HOOK_BSD_H_
diff --git a/www/ungoogled-chromium/files/patch-content_gpu_BUILD.gn b/www/ungoogled-chromium/files/patch-content_gpu_BUILD.gn
deleted file mode 100644
index 98b2322a6551..000000000000
--- a/www/ungoogled-chromium/files/patch-content_gpu_BUILD.gn
+++ /dev/null
@@ -1,20 +0,0 @@
---- content/gpu/BUILD.gn.orig 2023-06-05 19:39:05 UTC
-+++ content/gpu/BUILD.gn
-@@ -104,6 +104,17 @@ target(link_target_type, "gpu_sources") {
- }
- }
-
-+ if (is_bsd) {
-+ sources -= [
-+ "gpu_sandbox_hook_linux.cc",
-+ "gpu_sandbox_hook_linux.h",
-+ ]
-+ sources += [
-+ "gpu_sandbox_hook_bsd.cc",
-+ "gpu_sandbox_hook_bsd.h",
-+ ]
-+ }
-+
- if (is_chromeos_ash) {
- deps += [
- "//components/services/font/public/cpp",
diff --git a/www/ungoogled-chromium/files/patch-content_gpu_gpu__main.cc b/www/ungoogled-chromium/files/patch-content_gpu_gpu__main.cc
index 91ec5755fa97..34ee29699e2c 100644
--- a/www/ungoogled-chromium/files/patch-content_gpu_gpu__main.cc
+++ b/www/ungoogled-chromium/files/patch-content_gpu_gpu__main.cc
@@ -1,4 +1,4 @@
---- content/gpu/gpu_main.cc.orig 2023-12-23 12:33:28 UTC
+--- content/gpu/gpu_main.cc.orig 2024-02-03 15:42:55 UTC
+++ content/gpu/gpu_main.cc
@@ -91,7 +91,7 @@
#include "sandbox/win/src/sandbox.h"
@@ -7,7 +7,7 @@
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
#include "content/child/sandboxed_process_thread_type_handler.h"
- #include "content/gpu/gpu_sandbox_hook_linux.h"
+ #include "content/common/gpu_pre_sandbox_hook_linux.h"
#include "sandbox/policy/linux/sandbox_linux.h"
@@ -114,7 +114,7 @@ namespace content {
@@ -27,7 +27,7 @@
return StartSandboxLinux(watchdog_thread, gpu_info, gpu_prefs);
#elif BUILDFLAG(IS_WIN)
return StartSandboxWindows(sandbox_info_);
-@@ -289,7 +289,7 @@ int GpuMain(MainFunctionParams parameters) {
+@@ -291,7 +291,7 @@ int GpuMain(MainFunctionParams parameters) {
std::make_unique<base::SingleThreadTaskExecutor>(
gpu_preferences.message_pump_type);
}
@@ -36,7 +36,7 @@
#error "Unsupported Linux platform."
#elif BUILDFLAG(IS_MAC)
// Cross-process CoreAnimation requires a CFRunLoop to function at all, and
-@@ -328,7 +328,8 @@ int GpuMain(MainFunctionParams parameters) {
+@@ -330,7 +330,8 @@ int GpuMain(MainFunctionParams parameters) {
// before it.
InitializeSkia();
@@ -46,7 +46,7 @@
// Thread type delegate of the process should be registered before
// first thread type change in ChildProcess constructor.
// It also needs to be registered before the process has multiple threads,
-@@ -436,7 +437,7 @@ int GpuMain(MainFunctionParams parameters) {
+@@ -438,7 +439,7 @@ int GpuMain(MainFunctionParams parameters) {
namespace {
@@ -55,7 +55,7 @@
bool StartSandboxLinux(gpu::GpuWatchdogThread* watchdog_thread,
const gpu::GPUInfo* gpu_info,
const gpu::GpuPreferences& gpu_prefs) {
-@@ -476,7 +477,7 @@ bool StartSandboxLinux(gpu::GpuWatchdogThread* watchdo
+@@ -478,7 +479,7 @@ bool StartSandboxLinux(gpu::GpuWatchdogThread* watchdo
sandbox_options.accelerated_video_encode_enabled =
!gpu_prefs.disable_accelerated_video_encode;
diff --git a/www/ungoogled-chromium/files/patch-content_gpu_gpu__sandbox__hook__bsd.h b/www/ungoogled-chromium/files/patch-content_gpu_gpu__sandbox__hook__bsd.h
deleted file mode 100644
index 253d32561200..000000000000
--- a/www/ungoogled-chromium/files/patch-content_gpu_gpu__sandbox__hook__bsd.h
+++ /dev/null
@@ -1,19 +0,0 @@
---- content/gpu/gpu_sandbox_hook_bsd.h.orig 2023-05-05 12:12:41 UTC
-+++ content/gpu/gpu_sandbox_hook_bsd.h
-@@ -0,0 +1,16 @@
-+// Copyright 2023 The Chromium Authors
-+// Use of this source code is governed by a BSD-style license that can be
-+// found in the LICENSE file.
-+
-+#ifndef CONTENT_GPU_GPU_SANDBOX_HOOK_BSD_H_
-+#define CONTENT_GPU_GPU_SANDBOX_HOOK_BSD_H_
-+
-+#include "sandbox/policy/sandbox.h"
-+
-+namespace content {
-+
-+bool GpuProcessPreSandboxHook(sandbox::policy::SandboxLinux::Options options);
-+
-+} // namespace content
-+
-+#endif // CONTENT_GPU_GPU_SANDBOX_HOOK_BSD_H_
diff --git a/www/ungoogled-chromium/files/patch-content_public_browser_content__browser__client.cc b/www/ungoogled-chromium/files/patch-content_public_browser_content__browser__client.cc
index 76b11df472f9..b708c0c3f47a 100644
--- a/www/ungoogled-chromium/files/patch-content_public_browser_content__browser__client.cc
+++ b/www/ungoogled-chromium/files/patch-content_public_browser_content__browser__client.cc
@@ -1,6 +1,6 @@
---- content/public/browser/content_browser_client.cc.orig 2023-12-23 12:33:28 UTC
+--- content/public/browser/content_browser_client.cc.orig 2024-02-03 15:42:55 UTC
+++ content/public/browser/content_browser_client.cc
-@@ -1273,7 +1273,7 @@ bool ContentBrowserClient::ShouldRunOutOfProcessSystem
+@@ -1286,7 +1286,7 @@ bool ContentBrowserClient::ShouldRunOutOfProcessSystem
// that can be adequately sandboxed.
// Currently Android's network service will not run out of process or sandboxed,
// so OutOfProcessSystemDnsResolution is not currently enabled on Android.
diff --git a/www/ungoogled-chromium/files/patch-content_public_common_content__features.cc b/www/ungoogled-chromium/files/patch-content_public_common_content__features.cc
index 2a98684ad6fe..2e2dd2c67ad5 100644
--- a/www/ungoogled-chromium/files/patch-content_public_common_content__features.cc
+++ b/www/ungoogled-chromium/files/patch-content_public_common_content__features.cc
@@ -1,4 +1,4 @@
---- content/public/common/content_features.cc.orig 2023-12-23 12:33:28 UTC
+--- content/public/common/content_features.cc.orig 2024-02-03 15:42:55 UTC
+++ content/public/common/content_features.cc
@@ -40,7 +40,7 @@ BASE_FEATURE(kAudioServiceOutOfProcess,
"AudioServiceOutOfProcess",
@@ -18,7 +18,7 @@
base::FEATURE_ENABLED_BY_DEFAULT
#else
base::FEATURE_DISABLED_BY_DEFAULT
-@@ -1170,7 +1170,7 @@ BASE_FEATURE(kWebAssemblyTiering,
+@@ -1190,7 +1190,7 @@ BASE_FEATURE(kWebAssemblyTiering,
BASE_FEATURE(kWebAssemblyTrapHandler,
"WebAssemblyTrapHandler",
#if ((BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_WIN) || \
@@ -27,7 +27,7 @@
defined(ARCH_CPU_X86_64)) || \
(BUILDFLAG(IS_MAC) && defined(ARCH_CPU_ARM64))
base::FEATURE_ENABLED_BY_DEFAULT
-@@ -1212,7 +1212,11 @@ BASE_FEATURE(kWebUICodeCache,
+@@ -1232,7 +1232,11 @@ BASE_FEATURE(kWebUICodeCache,
// Controls whether the WebUSB API is enabled:
// https://wicg.github.io/webusb
diff --git a/www/ungoogled-chromium/files/patch-content_public_common_content__switches.cc b/www/ungoogled-chromium/files/patch-content_public_common_content__switches.cc
index 8e88023a0de9..1012c1bb664a 100644
--- a/www/ungoogled-chromium/files/patch-content_public_common_content__switches.cc
+++ b/www/ungoogled-chromium/files/patch-content_public_common_content__switches.cc
@@ -1,4 +1,4 @@
---- content/public/common/content_switches.cc.orig 2023-12-23 12:33:28 UTC
+--- content/public/common/content_switches.cc.orig 2024-02-03 15:42:55 UTC
+++ content/public/common/content_switches.cc
@@ -364,6 +364,8 @@ const char kEnableIsolatedWebAppsInRenderer[] =
// builds.
@@ -9,7 +9,7 @@
// Enables the type, downlinkMax attributes of the NetInfo API. Also, enables
// triggering of change attribute of the NetInfo API when there is a change in
// the connection type.
-@@ -994,7 +996,7 @@ const char kEnableAutomation[] = "enable-automation";
+@@ -1007,7 +1009,7 @@ const char kPreventResizingContentsForTesting[] =
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
diff --git a/www/ungoogled-chromium/files/patch-content_public_common_content__switches.h b/www/ungoogled-chromium/files/patch-content_public_common_content__switches.h
index c9ceac0379ff..94cf18dd9a31 100644
--- a/www/ungoogled-chromium/files/patch-content_public_common_content__switches.h
+++ b/www/ungoogled-chromium/files/patch-content_public_common_content__switches.h
@@ -1,4 +1,4 @@
---- content/public/common/content_switches.h.orig 2023-12-23 12:33:28 UTC
+--- content/public/common/content_switches.h.orig 2024-02-03 15:42:55 UTC
+++ content/public/common/content_switches.h
@@ -115,6 +115,7 @@ CONTENT_EXPORT extern const char kEnableGpuMemoryBuffe
CONTENT_EXPORT extern const char kEnableIsolatedWebAppsInRenderer[];
@@ -8,7 +8,7 @@
CONTENT_EXPORT extern const char kEnableNetworkInformationDownlinkMax[];
CONTENT_EXPORT extern const char kEnableCanvas2DLayers[];
CONTENT_EXPORT extern const char kEnablePluginPlaceholderTesting[];
-@@ -265,7 +266,7 @@ CONTENT_EXPORT extern const char kRendererWaitForJavaD
+@@ -269,7 +270,7 @@ CONTENT_EXPORT extern const char kPreventResizingConte
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
diff --git a/www/ungoogled-chromium/files/patch-content_renderer_render__process__impl.cc b/www/ungoogled-chromium/files/patch-content_renderer_render__process__impl.cc
index d3b1d49d98f7..9dffb196f1e7 100644
--- a/www/ungoogled-chromium/files/patch-content_renderer_render__process__impl.cc
+++ b/www/ungoogled-chromium/files/patch-content_renderer_render__process__impl.cc
@@ -1,20 +1,11 @@
---- content/renderer/render_process_impl.cc.orig 2023-12-23 12:33:28 UTC
+--- content/renderer/render_process_impl.cc.orig 2024-02-03 15:42:55 UTC
+++ content/renderer/render_process_impl.cc
-@@ -47,7 +47,7 @@
- #if BUILDFLAG(IS_WIN)
- #include "base/win/win_util.h"
- #endif
--#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)) && defined(ARCH_CPU_X86_64)
-+#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)) && defined(ARCH_CPU_X86_64)
- #include "v8/include/v8-wasm-trap-handler-posix.h"
- #endif
+@@ -44,7 +44,7 @@
+ #include "third_party/blink/public/web/web_frame.h"
+ #include "v8/include/v8-initialization.h"
-@@ -224,7 +224,7 @@ RenderProcessImpl::RenderProcessImpl()
- v8::V8::SetFlagsFromString(kSABPerContextFlag, sizeof(kSABPerContextFlag));
- }
-
--#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)) && defined(ARCH_CPU_X86_64)
-+#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)) && defined(ARCH_CPU_X86_64)
- if (base::FeatureList::IsEnabled(features::kWebAssemblyTrapHandler)) {
- base::CommandLine* const command_line =
- base::CommandLine::ForCurrentProcess();
+-#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)) && \
++#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)) && \
+ (defined(ARCH_CPU_X86_64) || defined(ARCH_CPU_ARM64))
+ #define ENABLE_WEB_ASSEMBLY_TRAP_HANDLER_LINUX
+ #endif
diff --git a/www/ungoogled-chromium/files/patch-content_renderer_render__thread__impl.cc b/www/ungoogled-chromium/files/patch-content_renderer_render__thread__impl.cc
index a1872fb8a085..2e03ee417f95 100644
--- a/www/ungoogled-chromium/files/patch-content_renderer_render__thread__impl.cc
+++ b/www/ungoogled-chromium/files/patch-content_renderer_render__thread__impl.cc
@@ -1,6 +1,6 @@
---- content/renderer/render_thread_impl.cc.orig 2023-12-23 12:33:28 UTC
+--- content/renderer/render_thread_impl.cc.orig 2024-02-03 15:42:55 UTC
+++ content/renderer/render_thread_impl.cc
-@@ -205,6 +205,8 @@
+@@ -206,6 +206,8 @@
#if BUILDFLAG(IS_APPLE)
#include <malloc/malloc.h>
@@ -9,7 +9,7 @@
#else
#include <malloc.h>
#endif
-@@ -1020,7 +1022,7 @@ media::GpuVideoAcceleratorFactories* RenderThreadImpl:
+@@ -1013,7 +1015,7 @@ media::GpuVideoAcceleratorFactories* RenderThreadImpl:
kGpuStreamIdMedia, kGpuStreamPriorityMedia);
const bool enable_video_decode_accelerator =
@@ -18,7 +18,7 @@
base::FeatureList::IsEnabled(media::kVaapiVideoDecodeLinux) &&
#endif // BUILDFLAG(IS_LINUX)
!cmd_line->HasSwitch(switches::kDisableAcceleratedVideoDecode) &&
-@@ -1029,7 +1031,7 @@ media::GpuVideoAcceleratorFactories* RenderThreadImpl:
+@@ -1022,7 +1024,7 @@ media::GpuVideoAcceleratorFactories* RenderThreadImpl:
gpu::kGpuFeatureStatusEnabled);
const bool enable_video_encode_accelerator =
@@ -27,7 +27,7 @@
base::FeatureList::IsEnabled(media::kVaapiVideoEncodeLinux) &&
#else
!cmd_line->HasSwitch(switches::kDisableAcceleratedVideoEncode) &&
-@@ -1807,7 +1809,7 @@ std::unique_ptr<CodecFactory> RenderThreadImpl::Create
+@@ -1797,7 +1799,7 @@ std::unique_ptr<CodecFactory> RenderThreadImpl::Create
bool enable_video_encode_accelerator) {
mojo::PendingRemote<media::mojom::VideoEncodeAcceleratorProvider>
vea_provider;
diff --git a/www/ungoogled-chromium/files/patch-content_renderer_renderer__blink__platform__impl.cc b/www/ungoogled-chromium/files/patch-content_renderer_renderer__blink__platform__impl.cc
index 582dac289c64..dc2e5301031b 100644
--- a/www/ungoogled-chromium/files/patch-content_renderer_renderer__blink__platform__impl.cc
+++ b/www/ungoogled-chromium/files/patch-content_renderer_renderer__blink__platform__impl.cc
@@ -1,4 +1,4 @@
---- content/renderer/renderer_blink_platform_impl.cc.orig 2023-12-23 12:33:28 UTC
+--- content/renderer/renderer_blink_platform_impl.cc.orig 2024-02-03 15:42:55 UTC
+++ content/renderer/renderer_blink_platform_impl.cc
@@ -113,7 +113,7 @@
@@ -9,10 +9,10 @@
#include "content/child/child_process_sandbox_support_impl_linux.h"
#include "content/child/sandboxed_process_thread_type_handler.h"
#endif
-@@ -182,13 +182,13 @@ RendererBlinkPlatformImpl::RendererBlinkPlatformImpl(
- sudden_termination_disables_(0),
+@@ -183,13 +183,13 @@ RendererBlinkPlatformImpl::RendererBlinkPlatformImpl(
is_locked_to_site_(false),
- main_thread_scheduler_(main_thread_scheduler) {
+ main_thread_scheduler_(main_thread_scheduler),
+ next_frame_sink_id_(uint32_t{std::numeric_limits<int32_t>::max()} + 1) {
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
sk_sp<font_service::FontLoader> font_loader;
@@ -25,7 +25,7 @@
mojo::PendingRemote<font_service::mojom::FontService> font_service;
RenderThreadImpl::current()->BindHostReceiver(
font_service.InitWithNewPipeAndPassReceiver());
-@@ -197,7 +197,7 @@ RendererBlinkPlatformImpl::RendererBlinkPlatformImpl(
+@@ -198,7 +198,7 @@ RendererBlinkPlatformImpl::RendererBlinkPlatformImpl(
#endif
}
@@ -34,7 +34,7 @@
if (sandboxEnabled()) {
#if BUILDFLAG(IS_MAC)
sandbox_support_ = std::make_unique<WebSandboxSupportMac>();
-@@ -258,7 +258,7 @@ void RendererBlinkPlatformImpl::SetThreadType(base::Pl
+@@ -259,7 +259,7 @@ void RendererBlinkPlatformImpl::SetThreadType(base::Pl
#endif
blink::WebSandboxSupport* RendererBlinkPlatformImpl::GetSandboxSupport() {
diff --git a/www/ungoogled-chromium/files/patch-content_renderer_renderer__blink__platform__impl.h b/www/ungoogled-chromium/files/patch-content_renderer_renderer__blink__platform__impl.h
index 933e6c843204..a3fbb683138d 100644
--- a/www/ungoogled-chromium/files/patch-content_renderer_renderer__blink__platform__impl.h
+++ b/www/ungoogled-chromium/files/patch-content_renderer_renderer__blink__platform__impl.h
@@ -1,6 +1,6 @@
---- content/renderer/renderer_blink_platform_impl.h.orig 2023-12-23 12:33:28 UTC
+--- content/renderer/renderer_blink_platform_impl.h.orig 2024-02-03 15:42:55 UTC
+++ content/renderer/renderer_blink_platform_impl.h
-@@ -234,7 +234,7 @@ class CONTENT_EXPORT RendererBlinkPlatformImpl : publi
+@@ -244,7 +244,7 @@ class CONTENT_EXPORT RendererBlinkPlatformImpl : publi
void Collect3DContextInformation(blink::Platform::GraphicsInfo* gl_info,
const gpu::GPUInfo& gpu_info) const;
diff --git a/www/ungoogled-chromium/files/patch-content_shell_BUILD.gn b/www/ungoogled-chromium/files/patch-content_shell_BUILD.gn
index 228195a4f068..07d22da58822 100644
--- a/www/ungoogled-chromium/files/patch-content_shell_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-content_shell_BUILD.gn
@@ -1,6 +1,6 @@
---- content/shell/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+--- content/shell/BUILD.gn.orig 2024-02-03 15:42:55 UTC
+++ content/shell/BUILD.gn
-@@ -1023,7 +1023,7 @@ group("content_shell_crash_test") {
+@@ -1027,7 +1027,7 @@ group("content_shell_crash_test") {
if (is_win) {
data_deps += [ "//build/win:copy_cdb_to_output" ]
}
diff --git a/www/ungoogled-chromium/files/patch-content_test_BUILD.gn b/www/ungoogled-chromium/files/patch-content_test_BUILD.gn
index 24262177f7b3..254f1cea1c82 100644
--- a/www/ungoogled-chromium/files/patch-content_test_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-content_test_BUILD.gn
@@ -1,6 +1,6 @@
---- content/test/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+--- content/test/BUILD.gn.orig 2024-02-03 15:42:55 UTC
+++ content/test/BUILD.gn
-@@ -2269,7 +2269,7 @@ static_library("run_all_unittests") {
+@@ -2265,7 +2265,7 @@ static_library("run_all_unittests") {
":test_support",
"//base/test:test_support",
]
diff --git a/www/ungoogled-chromium/files/patch-content_utility_services.cc b/www/ungoogled-chromium/files/patch-content_utility_services.cc
index 9ee56dc42d26..1db9b73f2744 100644
--- a/www/ungoogled-chromium/files/patch-content_utility_services.cc
+++ b/www/ungoogled-chromium/files/patch-content_utility_services.cc
@@ -1,4 +1,4 @@
---- content/utility/services.cc.orig 2023-12-23 12:33:28 UTC
+--- content/utility/services.cc.orig 2024-02-03 15:42:55 UTC
+++ content/utility/services.cc
@@ -67,7 +67,7 @@
extern sandbox::TargetServices* g_utility_target_services;
@@ -27,7 +27,7 @@
#include "media/capture/capture_switches.h"
#include "services/viz/public/cpp/gpu/gpu.h"
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN)
-@@ -224,7 +224,7 @@ auto RunAudio(mojo::PendingReceiver<audio::mojom::Audi
+@@ -225,7 +225,7 @@ auto RunAudio(mojo::PendingReceiver<audio::mojom::Audi
<< "task_policy_set TASK_QOS_POLICY";
#endif
@@ -36,7 +36,7 @@
auto* command_line = base::CommandLine::ForCurrentProcess();
if (sandbox::policy::SandboxTypeFromCommandLine(*command_line) ==
sandbox::mojom::Sandbox::kNoSandbox) {
-@@ -306,7 +306,7 @@ auto RunVideoCapture(
+@@ -307,7 +307,7 @@ auto RunVideoCapture(
mojo::PendingReceiver<video_capture::mojom::VideoCaptureService> receiver) {
auto service = std::make_unique<UtilityThreadVideoCaptureServiceImpl>(
std::move(receiver), base::SingleThreadTaskRunner::GetCurrentDefault());
@@ -45,7 +45,7 @@
if (switches::IsVideoCaptureUseGpuMemoryBufferEnabled()) {
mojo::PendingRemote<viz::mojom::Gpu> remote_gpu;
content::UtilityThread::Get()->BindHostReceiver(
-@@ -345,7 +345,7 @@ auto RunOOPArcVideoAcceleratorFactoryService(
+@@ -346,7 +346,7 @@ auto RunOOPArcVideoAcceleratorFactoryService(
#endif // BUILDFLAG(IS_CHROMEOS_ASH) && (BUILDFLAG(USE_VAAPI) ||
// BUILDFLAG(USE_V4L2_CODEC))
@@ -54,7 +54,7 @@
(BUILDFLAG(USE_VAAPI) || BUILDFLAG(USE_V4L2_CODEC))
auto RunStableVideoDecoderFactoryProcessService(
mojo::PendingReceiver<
-@@ -356,7 +356,7 @@ auto RunStableVideoDecoderFactoryProcessService(
+@@ -357,7 +357,7 @@ auto RunStableVideoDecoderFactoryProcessService(
#endif // (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_ASH)) &&
// (BUILDFLAG(USE_VAAPI) || BUILDFLAG(USE_V4L2_CODEC))
@@ -63,7 +63,7 @@
auto RunVideoEncodeAcceleratorProviderFactory(
mojo::PendingReceiver<media::mojom::VideoEncodeAcceleratorProviderFactory>
receiver) {
-@@ -379,7 +379,7 @@ void RegisterIOThreadServices(mojo::ServiceFactory& se
+@@ -380,7 +380,7 @@ void RegisterIOThreadServices(mojo::ServiceFactory& se
// loop of type IO that can get notified when pipes have data.
services.Add(RunNetworkService);
diff --git a/www/ungoogled-chromium/files/patch-content_utility_utility__main.cc b/www/ungoogled-chromium/files/patch-content_utility_utility__main.cc
index 4239a3025a30..83df8019fd70 100644
--- a/www/ungoogled-chromium/files/patch-content_utility_utility__main.cc
+++ b/www/ungoogled-chromium/files/patch-content_utility_utility__main.cc
@@ -1,6 +1,6 @@
---- content/utility/utility_main.cc.orig 2023-12-23 12:33:28 UTC
+--- content/utility/utility_main.cc.orig 2024-02-03 15:42:55 UTC
+++ content/utility/utility_main.cc
-@@ -37,7 +37,7 @@
+@@ -36,17 +36,21 @@
#include "third_party/icu/source/common/unicode/unistr.h"
#include "third_party/icu/source/i18n/unicode/timezone.h"
@@ -9,7 +9,11 @@
#include "base/file_descriptor_store.h"
#include "base/files/file_util.h"
#include "base/pickle.h"
-@@ -46,7 +46,9 @@
+ #include "content/child/sandboxed_process_thread_type_handler.h"
++#if BUILDFLAG(IS_LINUX)
+ #include "content/common/gpu_pre_sandbox_hook_linux.h"
++#endif
+ #include "content/public/common/content_descriptor_keys.h"
#include "content/utility/speech/speech_recognition_sandbox_hook_linux.h"
#include "gpu/config/gpu_info_collector.h"
#include "media/gpu/sandbox/hardware_video_encoding_sandbox_hook_linux.h"
@@ -19,7 +23,7 @@
#include "services/audio/audio_sandbox_hook_linux.h"
#include "services/network/network_sandbox_hook_linux.h"
// gn check is not smart enough to realize that this include only applies to
-@@ -58,10 +60,14 @@
+@@ -58,10 +62,15 @@
#endif
#endif
@@ -30,12 +34,13 @@
+#if BUILDFLAG(IS_BSD)
+#include "sandbox/policy/sandbox.h"
++#include "content/common/gpu_pre_sandbox_hook_bsd.h"
+#endif
+
#if BUILDFLAG(IS_CHROMEOS_ASH)
#include "chromeos/ash/components/assistant/buildflags.h"
#include "chromeos/ash/services/ime/ime_sandbox_hook.h"
-@@ -73,7 +79,7 @@
+@@ -73,7 +82,7 @@
#endif // BUILDFLAG(IS_CHROMEOS_ASH)
#if (BUILDFLAG(ENABLE_SCREEN_AI_SERVICE) && \
@@ -44,7 +49,7 @@
#include "components/services/screen_ai/sandbox/screen_ai_sandbox_hook_linux.h" // nogncheck
#endif
-@@ -99,7 +105,7 @@ namespace content {
+@@ -99,7 +108,7 @@ namespace content {
namespace {
@@ -53,7 +58,7 @@
std::vector<std::string> GetNetworkContextsParentDirectories() {
base::MemoryMappedFile::Region region;
base::ScopedFD read_pipe_fd = base::FileDescriptorStore::GetInstance().TakeFD(
-@@ -127,7 +133,7 @@ std::vector<std::string> GetNetworkContextsParentDirec
+@@ -127,7 +136,7 @@ std::vector<std::string> GetNetworkContextsParentDirec
bool ShouldUseAmdGpuPolicy(sandbox::mojom::Sandbox sandbox_type) {
const bool obtain_gpu_info =
@@ -62,7 +67,7 @@
sandbox_type == sandbox::mojom::Sandbox::kHardwareVideoDecoding ||
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS_ASH)
sandbox_type == sandbox::mojom::Sandbox::kHardwareVideoEncoding;
-@@ -248,7 +254,8 @@ int UtilityMain(MainFunctionParams parameters) {
+@@ -250,7 +259,8 @@ int UtilityMain(MainFunctionParams parameters) {
}
}
@@ -72,7 +77,7 @@
// Thread type delegate of the process should be registered before
// first thread type change in ChildProcess constructor.
// It also needs to be registered before the process has multiple threads,
-@@ -259,7 +266,7 @@ int UtilityMain(MainFunctionParams parameters) {
+@@ -261,7 +271,7 @@ int UtilityMain(MainFunctionParams parameters) {
}
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
@@ -81,7 +86,7 @@
// Initializes the sandbox before any threads are created.
// TODO(jorgelo): move this after GTK initialization when we enable a strict
// Seccomp-BPF policy.
-@@ -288,7 +295,7 @@ int UtilityMain(MainFunctionParams parameters) {
+@@ -296,7 +306,7 @@ int UtilityMain(MainFunctionParams parameters) {
pre_sandbox_hook = base::BindOnce(&screen_ai::ScreenAIPreSandboxHook);
break;
#endif
@@ -90,15 +95,15 @@
case sandbox::mojom::Sandbox::kHardwareVideoDecoding:
pre_sandbox_hook =
base::BindOnce(&media::HardwareVideoDecodingPreSandboxHook);
-@@ -315,6 +322,7 @@ int UtilityMain(MainFunctionParams parameters) {
+@@ -323,6 +333,7 @@ int UtilityMain(MainFunctionParams parameters) {
default:
break;
}
+#if !BUILDFLAG(IS_BSD)
if (!sandbox::policy::IsUnsandboxedSandboxType(sandbox_type) &&
(parameters.zygote_child || !pre_sandbox_hook.is_null())) {
- sandbox::policy::SandboxLinux::Options sandbox_options;
-@@ -323,6 +331,11 @@ int UtilityMain(MainFunctionParams parameters) {
+ sandbox_options.use_amd_specific_policies =
+@@ -330,6 +341,11 @@ int UtilityMain(MainFunctionParams parameters) {
sandbox::policy::Sandbox::Initialize(
sandbox_type, std::move(pre_sandbox_hook), sandbox_options);
}
diff --git a/www/ungoogled-chromium/files/patch-extensions_common_api_runtime.json b/www/ungoogled-chromium/files/patch-extensions_common_api_runtime.json
index 594e9fa1e44e..2145cb725a6f 100644
--- a/www/ungoogled-chromium/files/patch-extensions_common_api_runtime.json
+++ b/www/ungoogled-chromium/files/patch-extensions_common_api_runtime.json
@@ -1,6 +1,6 @@
---- extensions/common/api/runtime.json.orig 2023-09-17 07:59:53 UTC
+--- extensions/common/api/runtime.json.orig 2024-02-03 15:42:55 UTC
+++ extensions/common/api/runtime.json
-@@ -89,6 +89,7 @@
+@@ -86,6 +86,7 @@
{"name": "cros", "description": "Specifies the Chrome operating system."},
{"name": "linux", "description": "Specifies the Linux operating system."},
{"name": "openbsd", "description": "Specifies the OpenBSD operating system."},
diff --git a/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_dawn__context__provider.cc b/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_dawn__context__provider.cc
index 0665bdab9d2d..0ac84acbd562 100644
--- a/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_dawn__context__provider.cc
+++ b/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_dawn__context__provider.cc
@@ -1,11 +1,11 @@
---- gpu/command_buffer/service/dawn_context_provider.cc.orig 2023-12-23 12:33:28 UTC
+--- gpu/command_buffer/service/dawn_context_provider.cc.orig 2024-02-03 15:42:55 UTC
+++ gpu/command_buffer/service/dawn_context_provider.cc
-@@ -178,7 +178,7 @@ wgpu::BackendType DawnContextProvider::GetDefaultBacke
+@@ -186,7 +186,7 @@ wgpu::BackendType DawnContextProvider::GetDefaultBacke
return base::FeatureList::IsEnabled(features::kSkiaGraphiteDawnUseD3D12)
? wgpu::BackendType::D3D12
: wgpu::BackendType::D3D11;
--#elif BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-+#elif BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
+-#elif BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
++#elif BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_BSD)
return wgpu::BackendType::Vulkan;
#elif BUILDFLAG(IS_APPLE)
return wgpu::BackendType::Metal;
diff --git a/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_shared__image_external__vk__image__backing.cc b/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_shared__image_external__vk__image__backing.cc
index fac12b99ffb1..543920db54d4 100644
--- a/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_shared__image_external__vk__image__backing.cc
+++ b/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_shared__image_external__vk__image__backing.cc
@@ -1,4 +1,4 @@
---- gpu/command_buffer/service/shared_image/external_vk_image_backing.cc.orig 2023-11-04 07:08:51 UTC
+--- gpu/command_buffer/service/shared_image/external_vk_image_backing.cc.orig 2024-02-03 15:42:55 UTC
+++ gpu/command_buffer/service/shared_image/external_vk_image_backing.cc
@@ -47,7 +47,7 @@
#include "ui/gl/gl_version_info.h"
@@ -7,9 +7,9 @@
-#if BUILDFLAG(IS_LINUX) && BUILDFLAG(USE_DAWN)
+#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)) && BUILDFLAG(USE_DAWN)
#include "gpu/command_buffer/service/shared_image/external_vk_image_dawn_representation.h"
- #endif
-
-@@ -679,7 +679,7 @@ std::unique_ptr<DawnImageRepresentation> ExternalVkIma
+ #if BUILDFLAG(DAWN_ENABLE_BACKEND_OPENGLES)
+ #include "gpu/command_buffer/service/shared_image/dawn_gl_texture_representation.h"
+@@ -682,7 +682,7 @@ std::unique_ptr<DawnImageRepresentation> ExternalVkIma
const wgpu::Device& wgpuDevice,
wgpu::BackendType backend_type,
std::vector<wgpu::TextureFormat> view_formats) {
diff --git a/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_shared__image_external__vk__image__backing__factory.cc b/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_shared__image_external__vk__image__backing__factory.cc
index bf59491df6c3..ee17776f6d82 100644
--- a/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_shared__image_external__vk__image__backing__factory.cc
+++ b/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_shared__image_external__vk__image__backing__factory.cc
@@ -1,6 +1,6 @@
---- gpu/command_buffer/service/shared_image/external_vk_image_backing_factory.cc.orig 2023-11-04 07:08:51 UTC
+--- gpu/command_buffer/service/shared_image/external_vk_image_backing_factory.cc.orig 2024-02-03 15:42:55 UTC
+++ gpu/command_buffer/service/shared_image/external_vk_image_backing_factory.cc
-@@ -99,7 +99,7 @@ base::flat_map<VkFormat, VkImageUsageFlags> CreateImag
+@@ -139,7 +139,7 @@ bool IsFormatSupported(viz::SharedImageFormat format,
} // namespace
constexpr uint32_t kSupportedUsage =
@@ -9,12 +9,3 @@
SHARED_IMAGE_USAGE_WEBGPU | SHARED_IMAGE_USAGE_WEBGPU_SWAP_CHAIN_TEXTURE |
SHARED_IMAGE_USAGE_WEBGPU_STORAGE_TEXTURE |
#endif
-@@ -268,7 +268,7 @@ bool ExternalVkImageBackingFactory::IsSupported(
- return false;
- }
-
--#if BUILDFLAG(IS_LINUX)
-+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
- if (format.IsLegacyMultiplanar()) {
- // ExternalVkImageBacking doesn't work properly with external sampler
- // multi-planar formats on Linux, see https://crbug.com/1394888.
diff --git a/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_shared__image_shared__image__manager.cc b/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_shared__image_shared__image__manager.cc
index f5ab58a223a3..a44a95c72425 100644
--- a/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_shared__image_shared__image__manager.cc
+++ b/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_shared__image_shared__image__manager.cc
@@ -1,6 +1,6 @@
---- gpu/command_buffer/service/shared_image/shared_image_manager.cc.orig 2023-09-17 07:59:53 UTC
+--- gpu/command_buffer/service/shared_image/shared_image_manager.cc.orig 2024-02-03 15:42:55 UTC
+++ gpu/command_buffer/service/shared_image/shared_image_manager.cc
-@@ -507,7 +507,7 @@ bool SharedImageManager::SupportsScanoutImages() {
+@@ -548,7 +548,7 @@ bool SharedImageManager::SupportsScanoutImages() {
return true;
#elif BUILDFLAG(IS_ANDROID)
return base::AndroidHardwareBufferCompat::IsSupportAvailable();
diff --git a/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_webgpu__decoder__impl.cc b/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_webgpu__decoder__impl.cc
index b9a8f49ecae2..86df4b577178 100644
--- a/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_webgpu__decoder__impl.cc
+++ b/www/ungoogled-chromium/files/patch-gpu_command__buffer_service_webgpu__decoder__impl.cc
@@ -1,4 +1,4 @@
---- gpu/command_buffer/service/webgpu_decoder_impl.cc.orig 2023-12-23 12:33:28 UTC
+--- gpu/command_buffer/service/webgpu_decoder_impl.cc.orig 2024-02-03 15:42:55 UTC
+++ gpu/command_buffer/service/webgpu_decoder_impl.cc
@@ -1252,7 +1252,7 @@ void WebGPUDecoderImpl::RequestAdapterImpl(
force_fallback_adapter = true;
@@ -9,7 +9,7 @@
if (!shared_context_state_->GrContextIsVulkan() &&
!shared_context_state_->IsGraphiteDawnVulkan() &&
use_webgpu_adapter_ != WebGPUAdapterName::kOpenGLES) {
-@@ -1879,7 +1879,7 @@ WebGPUDecoderImpl::AssociateMailboxDawn(
+@@ -1889,7 +1889,7 @@ WebGPUDecoderImpl::AssociateMailboxDawn(
}
#if !BUILDFLAG(IS_WIN) && !BUILDFLAG(IS_CHROMEOS) && !BUILDFLAG(IS_APPLE) && \
diff --git a/www/ungoogled-chromium/files/patch-gpu_config_gpu__control__list.cc b/www/ungoogled-chromium/files/patch-gpu_config_gpu__control__list.cc
index 7d6087e62dea..679093115c56 100644
--- a/www/ungoogled-chromium/files/patch-gpu_config_gpu__control__list.cc
+++ b/www/ungoogled-chromium/files/patch-gpu_config_gpu__control__list.cc
@@ -1,4 +1,4 @@
---- gpu/config/gpu_control_list.cc.orig 2023-11-04 07:08:51 UTC
+--- gpu/config/gpu_control_list.cc.orig 2024-02-03 15:42:55 UTC
+++ gpu/config/gpu_control_list.cc
@@ -276,7 +276,7 @@ bool GpuControlList::More::GLVersionInfoMismatch(
GpuControlList::GLType GpuControlList::More::GetDefaultGLType() {
@@ -9,7 +9,7 @@
return kGLTypeGL;
#elif BUILDFLAG(IS_MAC)
return kGLTypeGL;
-@@ -820,7 +820,7 @@ GpuControlList::OsType GpuControlList::GetOsType() {
+@@ -811,7 +811,7 @@ GpuControlList::OsType GpuControlList::GetOsType() {
return kOsAndroid;
#elif BUILDFLAG(IS_FUCHSIA)
return kOsFuchsia;
diff --git a/www/ungoogled-chromium/files/patch-gpu_config_gpu__finch__features.cc b/www/ungoogled-chromium/files/patch-gpu_config_gpu__finch__features.cc
index 897e33753c74..1e94f2171e9c 100644
--- a/www/ungoogled-chromium/files/patch-gpu_config_gpu__finch__features.cc
+++ b/www/ungoogled-chromium/files/patch-gpu_config_gpu__finch__features.cc
@@ -1,4 +1,4 @@
---- gpu/config/gpu_finch_features.cc.orig 2023-11-04 07:08:51 UTC
+--- gpu/config/gpu_finch_features.cc.orig 2024-02-03 15:42:55 UTC
+++ gpu/config/gpu_finch_features.cc
@@ -63,7 +63,7 @@ bool IsDeviceBlocked(const char* field, const std::str
BASE_FEATURE(kUseGles2ForOopR,
@@ -9,7 +9,7 @@
base::FEATURE_DISABLED_BY_DEFAULT
#else
base::FEATURE_ENABLED_BY_DEFAULT
-@@ -147,7 +147,8 @@ BASE_FEATURE(kAggressiveSkiaGpuResourcePurge,
+@@ -143,7 +143,8 @@ BASE_FEATURE(kAggressiveSkiaGpuResourcePurge,
BASE_FEATURE(kDefaultEnableGpuRasterization,
"DefaultEnableGpuRasterization",
#if BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_CHROMEOS) || \
diff --git a/www/ungoogled-chromium/files/patch-gpu_config_gpu__info__collector.cc b/www/ungoogled-chromium/files/patch-gpu_config_gpu__info__collector.cc
new file mode 100644
index 000000000000..647704decfc5
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-gpu_config_gpu__info__collector.cc
@@ -0,0 +1,11 @@
+--- gpu/config/gpu_info_collector.cc.orig 2024-02-03 15:42:55 UTC
++++ gpu/config/gpu_info_collector.cc
+@@ -362,7 +362,7 @@ void ReportWebGPUAdapterMetrics(dawn::native::Instance
+ void ReportWebGPUSupportMetrics(dawn::native::Instance* instance) {
+ static BASE_FEATURE(kCollectWebGPUSupportMetrics,
+ "CollectWebGPUSupportMetrics",
+-#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_ANDROID) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ base::FEATURE_DISABLED_BY_DEFAULT);
+ #else
+ base::FEATURE_ENABLED_BY_DEFAULT);
diff --git a/www/ungoogled-chromium/files/patch-gpu_ipc_service_gpu__init.cc b/www/ungoogled-chromium/files/patch-gpu_ipc_service_gpu__init.cc
index d2e24a50017e..4d9f91a6bdbf 100644
--- a/www/ungoogled-chromium/files/patch-gpu_ipc_service_gpu__init.cc
+++ b/www/ungoogled-chromium/files/patch-gpu_ipc_service_gpu__init.cc
@@ -1,4 +1,4 @@
---- gpu/ipc/service/gpu_init.cc.orig 2023-12-23 12:33:28 UTC
+--- gpu/ipc/service/gpu_init.cc.orig 2024-02-03 15:42:55 UTC
+++ gpu/ipc/service/gpu_init.cc
@@ -357,7 +357,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandL
enable_watchdog = false;
@@ -18,7 +18,7 @@
// On Chrome OS ARM Mali, GPU driver userspace creates threads when
// initializing a GL context, so start the sandbox early.
// TODO(zmo): Need to collect OS version before this.
-@@ -491,7 +491,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandL
+@@ -495,7 +495,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandL
gpu_preferences_.gr_context_type = GrContextType::kGL;
}
@@ -27,7 +27,7 @@
// The ContentSandboxHelper is currently the only one implementation of
// GpuSandboxHelper and it has no dependency. Except on Linux where
// VaapiWrapper checks the GL implementation to determine which display
-@@ -573,7 +573,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandL
+@@ -577,7 +577,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandL
command_line, gpu_feature_info_,
gpu_preferences_.disable_software_rasterizer, false);
if (gl_use_swiftshader_) {
@@ -36,7 +36,7 @@
VLOG(1) << "Quit GPU process launch to fallback to SwiftShader cleanly "
<< "on Linux";
return false;
-@@ -726,7 +726,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandL
+@@ -733,7 +733,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandL
InitializePlatformOverlaySettings(&gpu_info_, gpu_feature_info_);
@@ -45,7 +45,7 @@
// Driver may create a compatibility profile context when collect graphics
// information on Linux platform. Try to collect graphics information
// based on core profile context after disabling platform extensions.
-@@ -781,7 +781,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandL
+@@ -788,7 +788,7 @@ bool GpuInit::InitializeAndStartSandbox(base::CommandL
}
}
}
@@ -54,7 +54,7 @@
(BUILDFLAG(IS_CHROMEOS) && !BUILDFLAG(IS_CHROMEOS_DEVICE))
if (!gl_disabled && !gl_use_swiftshader_ && std::getenv("RUNNING_UNDER_RR")) {
// https://rr-project.org/ is a Linux-only record-and-replay debugger that
-@@ -935,7 +935,7 @@ void GpuInit::InitializeInProcess(base::CommandLine* c
+@@ -941,7 +941,7 @@ void GpuInit::InitializeInProcess(base::CommandLine* c
}
bool gl_disabled = gl::GetGLImplementation() == gl::kGLImplementationDisabled;
@@ -63,7 +63,7 @@
(BUILDFLAG(IS_CHROMEOS) && !BUILDFLAG(IS_CHROMEOS_DEVICE))
if (!gl_disabled && !gl_use_swiftshader_ && std::getenv("RUNNING_UNDER_RR")) {
// https://rr-project.org/ is a Linux-only record-and-replay debugger that
-@@ -1005,7 +1005,7 @@ void GpuInit::InitializeInProcess(base::CommandLine* c
+@@ -1011,7 +1011,7 @@ void GpuInit::InitializeInProcess(base::CommandLine* c
}
}
diff --git a/www/ungoogled-chromium/files/patch-gpu_vulkan_vulkan__function__pointers.cc b/www/ungoogled-chromium/files/patch-gpu_vulkan_vulkan__function__pointers.cc
index 194afa826aa6..43f7f16e6b80 100644
--- a/www/ungoogled-chromium/files/patch-gpu_vulkan_vulkan__function__pointers.cc
+++ b/www/ungoogled-chromium/files/patch-gpu_vulkan_vulkan__function__pointers.cc
@@ -1,6 +1,6 @@
---- gpu/vulkan/vulkan_function_pointers.cc.orig 2023-11-04 07:08:51 UTC
+--- gpu/vulkan/vulkan_function_pointers.cc.orig 2024-02-03 15:42:55 UTC
+++ gpu/vulkan/vulkan_function_pointers.cc
-@@ -1241,7 +1241,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointer
+@@ -1215,7 +1215,7 @@ bool VulkanFunctionPointers::BindDeviceFunctionPointer
}
}
@@ -9,7 +9,7 @@
if (gfx::HasExtension(enabled_extensions,
VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME)) {
vkGetImageDrmFormatModifierPropertiesEXT =
-@@ -1440,7 +1440,7 @@ void VulkanFunctionPointers::ResetForTesting() {
+@@ -1414,7 +1414,7 @@ void VulkanFunctionPointers::ResetForTesting() {
vkGetSwapchainImagesKHR = nullptr;
vkQueuePresentKHR = nullptr;
diff --git a/www/ungoogled-chromium/files/patch-ipc_ipc__channel.h b/www/ungoogled-chromium/files/patch-ipc_ipc__channel.h
index b02bad5b4d05..17341fbf9237 100644
--- a/www/ungoogled-chromium/files/patch-ipc_ipc__channel.h
+++ b/www/ungoogled-chromium/files/patch-ipc_ipc__channel.h
@@ -1,6 +1,6 @@
---- ipc/ipc_channel.h.orig 2023-03-10 11:01:21 UTC
+--- ipc/ipc_channel.h.orig 2024-02-03 15:42:55 UTC
+++ ipc/ipc_channel.h
-@@ -233,7 +233,7 @@ class COMPONENT_EXPORT(IPC) Channel : public Sender {
+@@ -240,7 +240,7 @@ class COMPONENT_EXPORT(IPC) Channel : public Sender {
static std::string GenerateUniqueRandomChannelID();
#endif
diff --git a/www/ungoogled-chromium/files/patch-media_base_media__switches.cc b/www/ungoogled-chromium/files/patch-media_base_media__switches.cc
index cadcdde513ae..a28305438146 100644
--- a/www/ungoogled-chromium/files/patch-media_base_media__switches.cc
+++ b/www/ungoogled-chromium/files/patch-media_base_media__switches.cc
@@ -1,6 +1,6 @@
---- media/base/media_switches.cc.orig 2023-12-23 12:33:28 UTC
+--- media/base/media_switches.cc.orig 2024-02-03 15:42:55 UTC
+++ media/base/media_switches.cc
-@@ -17,7 +17,7 @@
+@@ -21,7 +21,7 @@
#include "ui/gl/gl_features.h"
#include "ui/gl/gl_utils.h"
@@ -9,7 +9,7 @@
#include "base/cpu.h"
#endif
-@@ -702,7 +702,7 @@ BASE_FEATURE(kFallbackAfterDecodeError,
+@@ -708,7 +708,7 @@ BASE_FEATURE(kFallbackAfterDecodeError,
// Show toolbar button that opens dialog for controlling media sessions.
BASE_FEATURE(kGlobalMediaControls,
"GlobalMediaControls",
@@ -18,7 +18,7 @@
base::FEATURE_ENABLED_BY_DEFAULT
#else
base::FEATURE_DISABLED_BY_DEFAULT
-@@ -725,7 +725,7 @@ BASE_FEATURE(kGlobalMediaControlsCrOSUpdatedUI,
+@@ -731,7 +731,7 @@ BASE_FEATURE(kGlobalMediaControlsCrOSUpdatedUI,
// If enabled, users can request Media Remoting without fullscreen-in-tab.
BASE_FEATURE(kMediaRemotingWithoutFullscreen,
"MediaRemotingWithoutFullscreen",
@@ -27,7 +27,7 @@
base::FEATURE_ENABLED_BY_DEFAULT
#else
base::FEATURE_DISABLED_BY_DEFAULT
-@@ -737,7 +737,7 @@ BASE_FEATURE(kMediaRemotingWithoutFullscreen,
+@@ -743,7 +743,7 @@ BASE_FEATURE(kMediaRemotingWithoutFullscreen,
BASE_FEATURE(kGlobalMediaControlsPictureInPicture,
"GlobalMediaControlsPictureInPicture",
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
@@ -36,7 +36,7 @@
base::FEATURE_ENABLED_BY_DEFAULT
#else
base::FEATURE_DISABLED_BY_DEFAULT
-@@ -776,7 +776,7 @@ BASE_FEATURE(kUnifiedAutoplay,
+@@ -777,7 +777,7 @@ BASE_FEATURE(kUnifiedAutoplay,
"UnifiedAutoplay",
base::FEATURE_ENABLED_BY_DEFAULT);
@@ -45,7 +45,7 @@
// Enable vaapi video decoding on linux. This is already enabled by default on
// chromeos, but needs an experiment on linux.
BASE_FEATURE(kVaapiVideoDecodeLinux,
-@@ -856,7 +856,7 @@ BASE_FEATURE(kVaapiVp9SModeHWEncoding,
+@@ -863,7 +863,7 @@ BASE_FEATURE(kVaapiVp9SModeHWEncoding,
"VaapiVp9SModeHWEncoding",
base::FEATURE_DISABLED_BY_DEFAULT);
#endif // defined(ARCH_CPU_X86_FAMILY) && BUILDFLAG(IS_CHROMEOS)
@@ -54,7 +54,16 @@
// Enables the new V4L2StatefulVideoDecoder instead of V4L2VideoDecoder.
BASE_FEATURE(kV4L2FlatStatelessVideoDecoder,
"V4L2FlatStatelessVideoDecoder",
-@@ -1384,7 +1384,7 @@ const base::Feature MEDIA_EXPORT kUseOutOfProcessVideo
+@@ -967,7 +967,7 @@ BASE_FEATURE(kLiveCaptionUseWaitK,
+ // Live Caption can be used in multiple languages, as opposed to just English.
+ BASE_FEATURE(kLiveCaptionMultiLanguage,
+ "LiveCaptionMultiLanguage",
+-#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ base::FEATURE_ENABLED_BY_DEFAULT
+ #else
+ base::FEATURE_DISABLED_BY_DEFAULT
+@@ -1402,7 +1402,7 @@ const base::Feature MEDIA_EXPORT kUseOutOfProcessVideo
};
#endif // BUILDFLAG(ALLOW_OOP_VIDEO_DECODER)
diff --git a/www/ungoogled-chromium/files/patch-media_base_media__switches.h b/www/ungoogled-chromium/files/patch-media_base_media__switches.h
index 097363ff628a..1e898273c29e 100644
--- a/www/ungoogled-chromium/files/patch-media_base_media__switches.h
+++ b/www/ungoogled-chromium/files/patch-media_base_media__switches.h
@@ -1,15 +1,15 @@
---- media/base/media_switches.h.orig 2023-12-23 12:33:28 UTC
+--- media/base/media_switches.h.orig 2024-02-03 15:42:55 UTC
+++ media/base/media_switches.h
-@@ -322,7 +322,7 @@ MEDIA_EXPORT BASE_DECLARE_FEATURE(kUseDecoderStreamFor
- MEDIA_EXPORT BASE_DECLARE_FEATURE(kUseElementInsteadOfRegionCapture);
+@@ -323,7 +323,7 @@ MEDIA_EXPORT BASE_DECLARE_FEATURE(kUnifiedAutoplay);
+ MEDIA_EXPORT BASE_DECLARE_FEATURE(kUseAndroidOverlayForSecureOnly);
+ MEDIA_EXPORT BASE_DECLARE_FEATURE(kUseDecoderStreamForWebRTC);
MEDIA_EXPORT BASE_DECLARE_FEATURE(kUseFakeDeviceForMediaStream);
- MEDIA_EXPORT BASE_DECLARE_FEATURE(kUseMediaHistoryStore);
-#if BUILDFLAG(IS_LINUX)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
MEDIA_EXPORT BASE_DECLARE_FEATURE(kVaapiVideoDecodeLinux);
MEDIA_EXPORT BASE_DECLARE_FEATURE(kVaapiVideoDecodeLinuxGL);
MEDIA_EXPORT BASE_DECLARE_FEATURE(kVaapiVideoEncodeLinux);
-@@ -340,7 +340,7 @@ MEDIA_EXPORT BASE_DECLARE_FEATURE(kVaapiH264TemporalLa
+@@ -342,7 +342,7 @@ MEDIA_EXPORT BASE_DECLARE_FEATURE(kVaapiH264TemporalLa
MEDIA_EXPORT BASE_DECLARE_FEATURE(kVaapiVp8TemporalLayerHWEncoding);
MEDIA_EXPORT BASE_DECLARE_FEATURE(kVaapiVp9SModeHWEncoding);
#endif // defined(ARCH_CPU_X86_FAMILY) && BUILDFLAG(IS_CHROMEOS)
@@ -18,7 +18,7 @@
MEDIA_EXPORT BASE_DECLARE_FEATURE(kV4L2FlatStatelessVideoDecoder);
MEDIA_EXPORT BASE_DECLARE_FEATURE(kV4L2FlatStatefulVideoDecoder);
#endif // BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX)
-@@ -461,7 +461,7 @@ MEDIA_EXPORT BASE_DECLARE_FEATURE(kExposeOutOfProcessV
+@@ -463,7 +463,7 @@ MEDIA_EXPORT BASE_DECLARE_FEATURE(kExposeOutOfProcessV
MEDIA_EXPORT BASE_DECLARE_FEATURE(kUseOutOfProcessVideoDecoding);
#endif // BUILDFLAG(ALLOW_OOP_VIDEO_DECODER)
diff --git a/www/ungoogled-chromium/files/patch-media_base_video__frame.cc b/www/ungoogled-chromium/files/patch-media_base_video__frame.cc
index 6a615018885f..55a517ab43e7 100644
--- a/www/ungoogled-chromium/files/patch-media_base_video__frame.cc
+++ b/www/ungoogled-chromium/files/patch-media_base_video__frame.cc
@@ -1,4 +1,4 @@
---- media/base/video_frame.cc.orig 2023-11-04 07:08:51 UTC
+--- media/base/video_frame.cc.orig 2024-02-03 15:42:55 UTC
+++ media/base/video_frame.cc
@@ -80,7 +80,7 @@ std::string VideoFrame::StorageTypeToString(
return "OWNED_MEMORY";
@@ -45,7 +45,7 @@
// static
scoped_refptr<VideoFrame> VideoFrame::WrapExternalDmabufs(
const VideoFrameLayout& layout,
-@@ -903,7 +903,7 @@ scoped_refptr<VideoFrame> VideoFrame::WrapVideoFrame(
+@@ -901,7 +901,7 @@ scoped_refptr<VideoFrame> VideoFrame::WrapVideoFrame(
}
}
@@ -54,7 +54,7 @@
DCHECK(frame->dmabuf_fds_);
// If there are any |dmabuf_fds_| plugged in, we should refer them too.
wrapping_frame->dmabuf_fds_ = frame->dmabuf_fds_;
-@@ -1311,7 +1311,7 @@ const gpu::MailboxHolder& VideoFrame::mailbox_holder(
+@@ -1312,7 +1312,7 @@ const gpu::MailboxHolder& VideoFrame::mailbox_holder(
: mailbox_holders_[texture_index];
}
@@ -63,7 +63,7 @@
const std::vector<base::ScopedFD>& VideoFrame::DmabufFds() const {
DCHECK_EQ(storage_type_, STORAGE_DMABUFS);
-@@ -1424,7 +1424,7 @@ VideoFrame::VideoFrame(const VideoFrameLayout& layout,
+@@ -1425,7 +1425,7 @@ VideoFrame::VideoFrame(const VideoFrameLayout& layout,
storage_type_(storage_type),
visible_rect_(Intersection(visible_rect, gfx::Rect(layout.coded_size()))),
natural_size_(natural_size),
diff --git a/www/ungoogled-chromium/files/patch-media_capture_video_fake__video__capture__device__factory.cc b/www/ungoogled-chromium/files/patch-media_capture_video_fake__video__capture__device__factory.cc
index afdc0359e849..ff91c2e0d0a2 100644
--- a/www/ungoogled-chromium/files/patch-media_capture_video_fake__video__capture__device__factory.cc
+++ b/www/ungoogled-chromium/files/patch-media_capture_video_fake__video__capture__device__factory.cc
@@ -1,6 +1,6 @@
---- media/capture/video/fake_video_capture_device_factory.cc.orig 2023-08-18 10:26:52 UTC
+--- media/capture/video/fake_video_capture_device_factory.cc.orig 2024-02-03 15:42:55 UTC
+++ media/capture/video/fake_video_capture_device_factory.cc
-@@ -229,7 +229,7 @@ void FakeVideoCaptureDeviceFactory::GetDevicesInfo(
+@@ -230,7 +230,7 @@ void FakeVideoCaptureDeviceFactory::GetDevicesInfo(
int entry_index = 0;
for (const auto& entry : devices_config_) {
VideoCaptureApi api =
diff --git a/www/ungoogled-chromium/files/patch-media_capture_video_linux_v4l2__capture__delegate.cc b/www/ungoogled-chromium/files/patch-media_capture_video_linux_v4l2__capture__delegate.cc
index 50cd194d1544..b389b3c8800e 100644
--- a/www/ungoogled-chromium/files/patch-media_capture_video_linux_v4l2__capture__delegate.cc
+++ b/www/ungoogled-chromium/files/patch-media_capture_video_linux_v4l2__capture__delegate.cc
@@ -1,4 +1,4 @@
---- media/capture/video/linux/v4l2_capture_delegate.cc.orig 2023-11-04 07:08:51 UTC
+--- media/capture/video/linux/v4l2_capture_delegate.cc.orig 2024-02-03 15:42:55 UTC
+++ media/capture/video/linux/v4l2_capture_delegate.cc
@@ -5,8 +5,10 @@
#include "media/capture/video/linux/v4l2_capture_delegate.h"
@@ -11,7 +11,7 @@
#include <poll.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
-@@ -26,17 +28,19 @@
+@@ -27,17 +29,19 @@
#include "media/capture/video/blob_utils.h"
#include "media/capture/video/linux/video_capture_device_linux.h"
@@ -32,7 +32,7 @@
// TODO(aleksandar.stojiljkovic): Wrap this with kernel version check once the
// format is introduced to kernel.
-@@ -46,6 +50,14 @@ using media::mojom::MeteringMode;
+@@ -47,6 +51,14 @@ using media::mojom::MeteringMode;
#define V4L2_PIX_FMT_INVZ v4l2_fourcc('I', 'N', 'V', 'Z')
#endif
@@ -47,7 +47,7 @@
namespace media {
namespace {
-@@ -260,7 +272,7 @@ bool V4L2CaptureDelegate::IsBlockedControl(int control
+@@ -265,7 +277,7 @@ bool V4L2CaptureDelegate::IsBlockedControl(int control
// static
bool V4L2CaptureDelegate::IsControllableControl(
int control_id,
@@ -56,7 +56,7 @@
const int special_control_id = GetControllingSpecialControl(control_id);
if (!special_control_id) {
// The control is not controlled by a special control thus the control is
-@@ -316,7 +328,7 @@ V4L2CaptureDelegate::V4L2CaptureDelegate(
+@@ -321,7 +333,7 @@ V4L2CaptureDelegate::V4L2CaptureDelegate(
is_capturing_(false),
timeout_count_(0),
rotation_(rotation) {
@@ -65,7 +65,7 @@
use_gpu_buffer_ = switches::IsVideoCaptureUseGpuMemoryBufferEnabled();
#endif // BUILDFLAG(IS_LINUX)
}
-@@ -443,7 +455,7 @@ void V4L2CaptureDelegate::AllocateAndStart(
+@@ -448,7 +460,7 @@ void V4L2CaptureDelegate::AllocateAndStart(
client_->OnStarted();
@@ -74,7 +74,7 @@
if (use_gpu_buffer_) {
v4l2_gpu_helper_ = std::make_unique<V4L2CaptureDelegateGpuHelper>(
std::move(gmb_support_test_));
-@@ -793,7 +805,7 @@ void V4L2CaptureDelegate::SetGPUEnvironmentForTesting(
+@@ -798,7 +810,7 @@ void V4L2CaptureDelegate::SetGPUEnvironmentForTesting(
V4L2CaptureDelegate::~V4L2CaptureDelegate() = default;
@@ -83,7 +83,7 @@
int num_retries = 0;
for (; DoIoctl(request, argp) < 0 && num_retries < kMaxIOCtrlRetries;
++num_retries) {
-@@ -803,7 +815,7 @@ bool V4L2CaptureDelegate::RunIoctl(int request, void*
+@@ -808,7 +820,7 @@ bool V4L2CaptureDelegate::RunIoctl(int request, void*
return num_retries != kMaxIOCtrlRetries;
}
@@ -92,7 +92,7 @@
return HANDLE_EINTR(v4l2_->ioctl(device_fd_.get(), request, argp));
}
-@@ -814,6 +826,7 @@ bool V4L2CaptureDelegate::IsControllableControl(int co
+@@ -819,6 +831,7 @@ bool V4L2CaptureDelegate::IsControllableControl(int co
}
void V4L2CaptureDelegate::ReplaceControlEventSubscriptions() {
@@ -100,7 +100,7 @@
constexpr uint32_t kControlIds[] = {V4L2_CID_AUTO_EXPOSURE_BIAS,
V4L2_CID_AUTO_WHITE_BALANCE,
V4L2_CID_BRIGHTNESS,
-@@ -841,6 +854,7 @@ void V4L2CaptureDelegate::ReplaceControlEventSubscript
+@@ -846,6 +859,7 @@ void V4L2CaptureDelegate::ReplaceControlEventSubscript
<< ", {type = V4L2_EVENT_CTRL, id = " << control_id << "}";
}
}
@@ -108,7 +108,7 @@
}
mojom::RangePtr V4L2CaptureDelegate::RetrieveUserControlRange(int control_id) {
-@@ -1021,7 +1035,11 @@ void V4L2CaptureDelegate::DoCapture() {
+@@ -1026,7 +1040,11 @@ void V4L2CaptureDelegate::DoCapture() {
pollfd device_pfd = {};
device_pfd.fd = device_fd_.get();
@@ -120,7 +120,7 @@
const int result =
HANDLE_EINTR(v4l2_->poll(&device_pfd, 1, kCaptureTimeoutMs));
-@@ -1059,6 +1077,7 @@ void V4L2CaptureDelegate::DoCapture() {
+@@ -1064,6 +1082,7 @@ void V4L2CaptureDelegate::DoCapture() {
timeout_count_ = 0;
}
@@ -128,7 +128,7 @@
// Dequeue events if the driver has filled in some.
if (device_pfd.revents & POLLPRI) {
bool controls_changed = false;
-@@ -1093,6 +1112,7 @@ void V4L2CaptureDelegate::DoCapture() {
+@@ -1098,6 +1117,7 @@ void V4L2CaptureDelegate::DoCapture() {
client_->OnCaptureConfigurationChanged();
}
}
@@ -136,7 +136,7 @@
// Deenqueue, send and reenqueue a buffer if the driver has filled one in.
if (device_pfd.revents & POLLIN) {
-@@ -1146,7 +1166,7 @@ void V4L2CaptureDelegate::DoCapture() {
+@@ -1151,7 +1171,7 @@ void V4L2CaptureDelegate::DoCapture() {
// workable on Linux.
// See http://crbug.com/959919.
@@ -145,7 +145,7 @@
if (use_gpu_buffer_) {
v4l2_gpu_helper_->OnIncomingCapturedData(
client_.get(), buffer_tracker->start(),
-@@ -1219,7 +1239,7 @@ void V4L2CaptureDelegate::SetErrorState(VideoCaptureEr
+@@ -1224,7 +1244,7 @@ void V4L2CaptureDelegate::SetErrorState(VideoCaptureEr
client_->OnError(error, from_here, reason);
}
diff --git a/www/ungoogled-chromium/files/patch-media_gpu_chromeos_libyuv__image__processor__backend.cc b/www/ungoogled-chromium/files/patch-media_gpu_chromeos_libyuv__image__processor__backend.cc
new file mode 100644
index 000000000000..a3800f548426
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-media_gpu_chromeos_libyuv__image__processor__backend.cc
@@ -0,0 +1,29 @@
+--- media/gpu/chromeos/libyuv_image_processor_backend.cc.orig 2024-02-03 15:42:55 UTC
++++ media/gpu/chromeos/libyuv_image_processor_backend.cc
+@@ -47,7 +47,7 @@ static constexpr struct {
+ #define CONV(in, out, trans, result) \
+ {Fourcc::in, Fourcc::out, Transform::trans, SupportResult::result}
+ // Conversion.
+-#if BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ CONV(NV12, AR24, kConversion, Supported),
+ #endif
+ CONV(NV12, NV12, kConversion, Supported),
+@@ -430,7 +430,7 @@ int LibYUVImageProcessorBackend::DoConversion(const Vi
+ fr->GetWritableVisibleData(VideoFrame::kUVPlane)), \
+ fr->stride(VideoFrame::kUVPlane)
+
+-#if BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ #define ARGB_DATA(fr) \
+ fr->GetWritableVisibleData(VideoFrame::kARGBPlane), \
+ fr->stride(VideoFrame::kARGBPlane)
+@@ -573,7 +573,7 @@ int LibYUVImageProcessorBackend::DoConversion(const Vi
+ }
+ }
+
+-#if BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ if (output->format() == PIXEL_FORMAT_ARGB) {
+ if (input_config_.fourcc == Fourcc(Fourcc::NV12)) {
+ return LIBYUV_FUNC(NV12ToARGB, Y_UV_DATA(input),
diff --git a/www/ungoogled-chromium/files/patch-media_gpu_chromeos_mailbox__video__frame__converter.cc b/www/ungoogled-chromium/files/patch-media_gpu_chromeos_mailbox__video__frame__converter.cc
index 8ac777ee5f7f..ce3cf63e58b9 100644
--- a/www/ungoogled-chromium/files/patch-media_gpu_chromeos_mailbox__video__frame__converter.cc
+++ b/www/ungoogled-chromium/files/patch-media_gpu_chromeos_mailbox__video__frame__converter.cc
@@ -1,6 +1,6 @@
---- media/gpu/chromeos/mailbox_video_frame_converter.cc.orig 2023-11-04 07:08:51 UTC
+--- media/gpu/chromeos/mailbox_video_frame_converter.cc.orig 2024-02-03 15:42:55 UTC
+++ media/gpu/chromeos/mailbox_video_frame_converter.cc
-@@ -59,7 +59,7 @@ viz::SharedImageFormat GetSharedImageFormat(gfx::Buffe
+@@ -61,7 +61,7 @@ viz::SharedImageFormat GetSharedImageFormat(gfx::Buffe
<< static_cast<int>(buffer_format);
NOTREACHED_NORETURN();
}
diff --git a/www/ungoogled-chromium/files/patch-media_gpu_chromeos_platform__video__frame__utils.cc b/www/ungoogled-chromium/files/patch-media_gpu_chromeos_platform__video__frame__utils.cc
new file mode 100644
index 000000000000..c849f0f21bf7
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-media_gpu_chromeos_platform__video__frame__utils.cc
@@ -0,0 +1,11 @@
+--- media/gpu/chromeos/platform_video_frame_utils.cc.orig 2024-02-03 15:42:55 UTC
++++ media/gpu/chromeos/platform_video_frame_utils.cc
+@@ -119,7 +119,7 @@ class GbmDeviceWrapper {
+ // TODO(b/313513760): don't guard base::File::FLAG_WRITE behind
+ // BUILDFLAG(IS_LINUX) && BUILDFLAG(USE_V4L2_CODEC) once the hardware video
+ // decoding sandbox allows R+W access to the render nodes.
+-#if BUILDFLAG(IS_LINUX) && BUILDFLAG(USE_V4L2_CODEC)
++#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)) && BUILDFLAG(USE_V4L2_CODEC)
+ // Needed on Linux for gbm_create_device().
+ | base::File::FLAG_WRITE
+ #endif
diff --git a/www/ungoogled-chromium/files/patch-media_gpu_chromeos_video__decoder__pipeline.cc b/www/ungoogled-chromium/files/patch-media_gpu_chromeos_video__decoder__pipeline.cc
index 7145be00c95f..38f5d6cf9ce9 100644
--- a/www/ungoogled-chromium/files/patch-media_gpu_chromeos_video__decoder__pipeline.cc
+++ b/www/ungoogled-chromium/files/patch-media_gpu_chromeos_video__decoder__pipeline.cc
@@ -1,6 +1,6 @@
---- media/gpu/chromeos/video_decoder_pipeline.cc.orig 2023-12-23 12:33:28 UTC
+--- media/gpu/chromeos/video_decoder_pipeline.cc.orig 2024-02-03 15:42:55 UTC
+++ media/gpu/chromeos/video_decoder_pipeline.cc
-@@ -1048,14 +1048,14 @@ VideoDecoderPipeline::PickDecoderOutputFormat(
+@@ -999,14 +999,14 @@ VideoDecoderPipeline::PickDecoderOutputFormat(
}
#endif
@@ -17,7 +17,7 @@
// Linux w/ V4L2 should not use a custom allocator
// Only tested with video_decode_accelerator_tests
// TODO(wenst@) Test with full Chromium Browser
-@@ -1192,7 +1192,7 @@ VideoDecoderPipeline::PickDecoderOutputFormat(
+@@ -1143,7 +1143,7 @@ VideoDecoderPipeline::PickDecoderOutputFormat(
<< " VideoFrames";
auxiliary_frame_pool_->set_parent_task_runner(decoder_task_runner_);
diff --git a/www/ungoogled-chromium/files/patch-media_gpu_vaapi_vaapi__video__decoder.cc b/www/ungoogled-chromium/files/patch-media_gpu_vaapi_vaapi__video__decoder.cc
index 6ae70645f1f4..8116c94bd933 100644
--- a/www/ungoogled-chromium/files/patch-media_gpu_vaapi_vaapi__video__decoder.cc
+++ b/www/ungoogled-chromium/files/patch-media_gpu_vaapi_vaapi__video__decoder.cc
@@ -1,6 +1,6 @@
---- media/gpu/vaapi/vaapi_video_decoder.cc.orig 2023-12-23 12:33:28 UTC
+--- media/gpu/vaapi/vaapi_video_decoder.cc.orig 2024-02-03 15:42:55 UTC
+++ media/gpu/vaapi/vaapi_video_decoder.cc
-@@ -776,7 +776,7 @@ void VaapiVideoDecoder::ApplyResolutionChangeWithScree
+@@ -799,7 +799,7 @@ void VaapiVideoDecoder::ApplyResolutionChangeWithScree
const gfx::Size decoder_natural_size =
aspect_ratio_.GetNaturalSize(decoder_visible_rect);
diff --git a/www/ungoogled-chromium/files/patch-media_gpu_vaapi_vaapi__wrapper.cc b/www/ungoogled-chromium/files/patch-media_gpu_vaapi_vaapi__wrapper.cc
index a2df0937a157..d510086ea1bb 100644
--- a/www/ungoogled-chromium/files/patch-media_gpu_vaapi_vaapi__wrapper.cc
+++ b/www/ungoogled-chromium/files/patch-media_gpu_vaapi_vaapi__wrapper.cc
@@ -1,4 +1,4 @@
---- media/gpu/vaapi/vaapi_wrapper.cc.orig 2023-12-23 12:33:28 UTC
+--- media/gpu/vaapi/vaapi_wrapper.cc.orig 2024-02-03 15:42:55 UTC
+++ media/gpu/vaapi/vaapi_wrapper.cc
@@ -71,7 +71,7 @@
using media_gpu_vaapi::kModuleVa_prot;
@@ -9,34 +9,7 @@
#include "base/files/file_util.h"
#include "base/strings/string_split.h"
#endif
-@@ -1445,7 +1445,7 @@ bool IsVBREncodingSupported(VAProfile va_profile) {
- return VASupportedProfiles::Get().IsProfileSupported(mode, va_profile);
- }
-
--#if BUILDFLAG(IS_LINUX)
-+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
- // Some VA-API drivers (vdpau-va-driver) will crash if used with VA/DRM on
- // NVIDIA GPUs. This function checks if such drivers are present.
- bool IsBrokenNvidiaVaapiDriverPresent() {
-@@ -1506,7 +1506,7 @@ void VADisplayStateSingleton::PreSandboxInitialization
- VADisplayStateSingleton& va_display_state = GetInstance();
- base::AutoLock lock(va_display_state.lock_);
-
--#if BUILDFLAG(IS_LINUX)
-+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
- std::string va_driver_name;
- auto env = base::Environment::Create();
- if (env->GetVar("LIBVA_DRIVER_NAME", &va_driver_name) &&
-@@ -1543,7 +1543,7 @@ void VADisplayStateSingleton::PreSandboxInitialization
- if (base::EqualsCaseInsensitiveASCII(version_name, "vgem")) {
- continue;
- }
--#if BUILDFLAG(IS_LINUX)
-+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
- // Skip NVIDIA GPUs if the VA-API driver used for them is known for crashing
- // with VA/DRM.
- if (is_nvidia_va_drm_broken &&
-@@ -1576,7 +1576,7 @@ VADisplayStateHandle VADisplayStateSingleton::GetHandl
+@@ -1514,7 +1514,7 @@ VADisplayStateHandle VADisplayStateSingleton::GetHandl
return {};
}
diff --git a/www/ungoogled-chromium/files/patch-media_video_gpu__memory__buffer__video__frame__pool.cc b/www/ungoogled-chromium/files/patch-media_video_gpu__memory__buffer__video__frame__pool.cc
index ea8532f595de..0ae6aeaa67ee 100644
--- a/www/ungoogled-chromium/files/patch-media_video_gpu__memory__buffer__video__frame__pool.cc
+++ b/www/ungoogled-chromium/files/patch-media_video_gpu__memory__buffer__video__frame__pool.cc
@@ -1,6 +1,6 @@
---- media/video/gpu_memory_buffer_video_frame_pool.cc.orig 2023-12-23 12:33:28 UTC
+--- media/video/gpu_memory_buffer_video_frame_pool.cc.orig 2024-02-03 15:42:55 UTC
+++ media/video/gpu_memory_buffer_video_frame_pool.cc
-@@ -774,7 +774,7 @@ void GpuMemoryBufferVideoFramePool::PoolImpl::CreateHa
+@@ -790,7 +790,7 @@ void GpuMemoryBufferVideoFramePool::PoolImpl::CreateHa
}
bool is_software_backed_video_frame = !video_frame->HasTextures();
@@ -9,7 +9,7 @@
is_software_backed_video_frame &= !video_frame->HasDmaBufs();
#endif
-@@ -1224,7 +1224,7 @@ scoped_refptr<VideoFrame> GpuMemoryBufferVideoFramePoo
+@@ -1240,7 +1240,7 @@ scoped_refptr<VideoFrame> GpuMemoryBufferVideoFramePoo
}
#endif
@@ -18,7 +18,7 @@
is_webgpu_compatible = (gpu_memory_buffer != nullptr);
if (is_webgpu_compatible) {
is_webgpu_compatible &=
-@@ -1243,7 +1243,7 @@ scoped_refptr<VideoFrame> GpuMemoryBufferVideoFramePoo
+@@ -1259,7 +1259,7 @@ scoped_refptr<VideoFrame> GpuMemoryBufferVideoFramePoo
gpu::SHARED_IMAGE_USAGE_DISPLAY_READ |
gpu::SHARED_IMAGE_USAGE_SCANOUT;
diff --git a/www/ungoogled-chromium/files/patch-media_video_video__encode__accelerator__adapter.cc b/www/ungoogled-chromium/files/patch-media_video_video__encode__accelerator__adapter.cc
index 1529957e7ab9..cf6bd776f88a 100644
--- a/www/ungoogled-chromium/files/patch-media_video_video__encode__accelerator__adapter.cc
+++ b/www/ungoogled-chromium/files/patch-media_video_video__encode__accelerator__adapter.cc
@@ -1,6 +1,6 @@
---- media/video/video_encode_accelerator_adapter.cc.orig 2023-12-23 12:33:28 UTC
+--- media/video/video_encode_accelerator_adapter.cc.orig 2024-02-03 15:42:55 UTC
+++ media/video/video_encode_accelerator_adapter.cc
-@@ -153,7 +153,7 @@ VideoEncodeAccelerator::Config SetUpVeaConfig(
+@@ -154,7 +154,7 @@ VideoEncodeAccelerator::Config SetUpVeaConfig(
if (is_rgb)
config.input_format = PIXEL_FORMAT_I420;
@@ -9,7 +9,7 @@
if (format != PIXEL_FORMAT_I420 ||
!VideoFrame::IsStorageTypeMappable(storage_type)) {
// ChromeOS/Linux hardware video encoders supports I420 on-memory
-@@ -478,7 +478,7 @@ void VideoEncodeAcceleratorAdapter::InitializeInternal
+@@ -479,7 +479,7 @@ void VideoEncodeAcceleratorAdapter::InitializeInternal
SetUpVeaConfig(profile_, options_, format, first_frame->storage_type(),
supported_rc_modes_, required_encoder_type_);
diff --git a/www/ungoogled-chromium/files/patch-mojo_public_c_system_thunks.cc b/www/ungoogled-chromium/files/patch-mojo_public_c_system_thunks.cc
index 707e39658eb1..18abce4d2479 100644
--- a/www/ungoogled-chromium/files/patch-mojo_public_c_system_thunks.cc
+++ b/www/ungoogled-chromium/files/patch-mojo_public_c_system_thunks.cc
@@ -1,4 +1,4 @@
---- mojo/public/c/system/thunks.cc.orig 2023-11-04 07:08:51 UTC
+--- mojo/public/c/system/thunks.cc.orig 2024-02-03 15:42:55 UTC
+++ mojo/public/c/system/thunks.cc
@@ -24,7 +24,7 @@
#include "mojo/public/c/system/message_pipe.h"
@@ -6,9 +6,9 @@
#if BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN) || \
- BUILDFLAG(IS_FUCHSIA)
+ BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_BSD)
+ #include <optional>
#include "base/environment.h"
#include "base/files/file_path.h"
- #include "base/scoped_native_library.h"
@@ -73,7 +73,7 @@ class CoreLibraryInitializer {
MojoResult LoadLibrary(base::FilePath library_path) {
@@ -33,6 +33,6 @@
#if BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN) || \
- BUILDFLAG(IS_FUCHSIA)
+ BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_BSD)
- absl::optional<base::ScopedNativeLibrary> library_;
+ std::optional<base::ScopedNativeLibrary> library_;
#endif
};
diff --git a/www/ungoogled-chromium/files/patch-mojo_public_tools_bindings_mojom.gni b/www/ungoogled-chromium/files/patch-mojo_public_tools_bindings_mojom.gni
index d99980f5fddf..ffcd86640c21 100644
--- a/www/ungoogled-chromium/files/patch-mojo_public_tools_bindings_mojom.gni
+++ b/www/ungoogled-chromium/files/patch-mojo_public_tools_bindings_mojom.gni
@@ -1,6 +1,6 @@
---- mojo/public/tools/bindings/mojom.gni.orig 2023-12-23 12:33:28 UTC
+--- mojo/public/tools/bindings/mojom.gni.orig 2024-02-03 15:42:55 UTC
+++ mojo/public/tools/bindings/mojom.gni
-@@ -758,6 +758,16 @@ template("mojom") {
+@@ -760,6 +760,16 @@ template("mojom") {
enabled_features += [ "is_apple" ]
}
diff --git a/www/ungoogled-chromium/files/patch-net_BUILD.gn b/www/ungoogled-chromium/files/patch-net_BUILD.gn
index 3c23ef8bb76b..cb91e33dd6ef 100644
--- a/www/ungoogled-chromium/files/patch-net_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-net_BUILD.gn
@@ -1,4 +1,4 @@
---- net/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+--- net/BUILD.gn.orig 2024-02-03 15:42:55 UTC
+++ net/BUILD.gn
@@ -122,7 +122,7 @@ net_configs = [
"//build/config/compiler:wexit_time_destructors",
@@ -9,7 +9,7 @@
net_configs += [ "//build/config/linux:libresolv" ]
}
-@@ -1242,6 +1242,19 @@ component("net") {
+@@ -1179,6 +1179,19 @@ component("net") {
]
}
@@ -29,7 +29,7 @@
if (is_mac) {
sources += [
"base/network_notification_thread_mac.cc",
-@@ -1398,7 +1411,7 @@ component("net") {
+@@ -1335,7 +1348,7 @@ component("net") {
}
# Use getifaddrs() on POSIX platforms, except Linux.
@@ -38,7 +38,7 @@
sources += [
"base/network_interfaces_getifaddrs.cc",
"base/network_interfaces_getifaddrs.h",
-@@ -2888,7 +2901,7 @@ test("net_unittests") {
+@@ -2794,7 +2807,7 @@ test("net_unittests") {
]
}
@@ -47,7 +47,7 @@
sources += [
"base/address_tracker_linux_unittest.cc",
"base/network_interfaces_linux_unittest.cc",
-@@ -2976,6 +2989,10 @@ test("net_unittests") {
+@@ -2882,6 +2895,10 @@ test("net_unittests") {
]
}
@@ -58,7 +58,7 @@
if (enable_websockets) {
deps += [ "//net/server:tests" ]
}
-@@ -3046,7 +3063,7 @@ test("net_unittests") {
+@@ -2952,7 +2969,7 @@ test("net_unittests") {
]
}
@@ -67,7 +67,7 @@
sources += [ "tools/quic/quic_simple_server_test.cc" ]
}
-@@ -3181,7 +3198,7 @@ test("net_unittests") {
+@@ -3087,7 +3104,7 @@ test("net_unittests") {
}
# Use getifaddrs() on POSIX platforms, except Linux.
diff --git a/www/ungoogled-chromium/files/patch-net_base_features.cc b/www/ungoogled-chromium/files/patch-net_base_features.cc
index cd4a602cc266..d5236988d91e 100644
--- a/www/ungoogled-chromium/files/patch-net_base_features.cc
+++ b/www/ungoogled-chromium/files/patch-net_base_features.cc
@@ -1,6 +1,6 @@
---- net/base/features.cc.orig 2023-12-23 12:33:28 UTC
+--- net/base/features.cc.orig 2024-02-03 15:42:55 UTC
+++ net/base/features.cc
-@@ -470,7 +470,12 @@ BASE_FEATURE(kSpdyHeadersToHttpResponseUseBuilder,
+@@ -477,7 +477,12 @@ BASE_FEATURE(kSpdyHeadersToHttpResponseUseBuilder,
"SpdyHeadersToHttpResponseUseBuilder",
base::FEATURE_DISABLED_BY_DEFAULT);
diff --git a/www/ungoogled-chromium/files/patch-net_cert_cert__verify__proc.h b/www/ungoogled-chromium/files/patch-net_cert_cert__verify__proc.h
index 9d3ecd3a221d..05a2f4c1409e 100644
--- a/www/ungoogled-chromium/files/patch-net_cert_cert__verify__proc.h
+++ b/www/ungoogled-chromium/files/patch-net_cert_cert__verify__proc.h
@@ -1,6 +1,6 @@
---- net/cert/cert_verify_proc.h.orig 2023-08-18 10:26:52 UTC
+--- net/cert/cert_verify_proc.h.orig 2024-02-03 15:42:55 UTC
+++ net/cert/cert_verify_proc.h
-@@ -79,7 +79,7 @@ class NET_EXPORT CertVerifyProc
+@@ -130,7 +130,7 @@ class NET_EXPORT CertVerifyProc
kMaxValue = kChainLengthOne
};
diff --git a/www/ungoogled-chromium/files/patch-net_cert_pki_general__names.h b/www/ungoogled-chromium/files/patch-net_cert_pki_general__names.h
deleted file mode 100644
index 438450760ea8..000000000000
--- a/www/ungoogled-chromium/files/patch-net_cert_pki_general__names.h
+++ /dev/null
@@ -1,10 +0,0 @@
---- net/cert/pki/general_names.h.orig 2023-11-04 07:08:51 UTC
-+++ net/cert/pki/general_names.h
-@@ -8,6 +8,7 @@
- #include <memory>
- #include <string_view>
- #include <vector>
-+#include <string>
-
- #include "net/base/net_export.h"
- #include "net/cert/pki/cert_error_id.h"
diff --git a/www/ungoogled-chromium/files/patch-net_filter_zstd__source__stream.cc b/www/ungoogled-chromium/files/patch-net_filter_zstd__source__stream.cc
index 7a6d8cbf0674..105ac8652cd0 100644
--- a/www/ungoogled-chromium/files/patch-net_filter_zstd__source__stream.cc
+++ b/www/ungoogled-chromium/files/patch-net_filter_zstd__source__stream.cc
@@ -1,8 +1,8 @@
---- net/filter/zstd_source_stream.cc.orig 2023-10-13 13:20:35 UTC
+--- net/filter/zstd_source_stream.cc.orig 2024-02-03 15:42:55 UTC
+++ net/filter/zstd_source_stream.cc
-@@ -6,6 +6,7 @@
-
+@@ -7,6 +7,7 @@
#include <algorithm>
+ #include <unordered_map>
#include <utility>
+#include <unordered_map>
diff --git a/www/ungoogled-chromium/files/patch-net_socket_udp__socket__posix.cc b/www/ungoogled-chromium/files/patch-net_socket_udp__socket__posix.cc
index f46d960787b4..00931e2fe3f8 100644
--- a/www/ungoogled-chromium/files/patch-net_socket_udp__socket__posix.cc
+++ b/www/ungoogled-chromium/files/patch-net_socket_udp__socket__posix.cc
@@ -1,6 +1,6 @@
---- net/socket/udp_socket_posix.cc.orig 2023-12-23 12:33:28 UTC
+--- net/socket/udp_socket_posix.cc.orig 2024-02-03 15:42:55 UTC
+++ net/socket/udp_socket_posix.cc
-@@ -612,12 +612,17 @@ int UDPSocketPosix::SetRecvEcn() {
+@@ -610,12 +610,17 @@ int UDPSocketPosix::SetRecvEcn() {
}
}
@@ -19,7 +19,7 @@
if (confirm) {
sendto_flags_ |= MSG_CONFIRM;
} else {
-@@ -638,7 +643,7 @@ int UDPSocketPosix::SetBroadcast(bool broadcast) {
+@@ -636,7 +641,7 @@ int UDPSocketPosix::SetBroadcast(bool broadcast) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
int value = broadcast ? 1 : 0;
int rv;
@@ -28,7 +28,7 @@
// SO_REUSEPORT on OSX permits multiple processes to each receive
// UDP multicast or broadcast datagrams destined for the bound
// port.
-@@ -951,7 +956,7 @@ int UDPSocketPosix::DoBind(const IPEndPoint& address)
+@@ -949,7 +954,7 @@ int UDPSocketPosix::DoBind(const IPEndPoint& address)
#if BUILDFLAG(IS_CHROMEOS_ASH)
if (last_error == EINVAL)
return ERR_ADDRESS_IN_USE;
diff --git a/www/ungoogled-chromium/files/patch-net_socket_udp__socket__unittest.cc b/www/ungoogled-chromium/files/patch-net_socket_udp__socket__unittest.cc
index 0929885f9be9..1458f8c0c98d 100644
--- a/www/ungoogled-chromium/files/patch-net_socket_udp__socket__unittest.cc
+++ b/www/ungoogled-chromium/files/patch-net_socket_udp__socket__unittest.cc
@@ -1,6 +1,6 @@
---- net/socket/udp_socket_unittest.cc.orig 2023-04-08 11:38:38 UTC
+--- net/socket/udp_socket_unittest.cc.orig 2024-02-03 15:42:55 UTC
+++ net/socket/udp_socket_unittest.cc
-@@ -345,7 +345,7 @@ TEST_F(UDPSocketTest, PartialRecv) {
+@@ -344,7 +344,7 @@ TEST_F(UDPSocketTest, PartialRecv) {
EXPECT_EQ(second_packet, received);
}
@@ -9,7 +9,7 @@
// - MacOS: requires root permissions on OSX 10.7+.
// - Android: devices attached to testbots don't have default network, so
// broadcasting to 255.255.255.255 returns error -109 (Address not reachable).
-@@ -656,7 +656,7 @@ TEST_F(UDPSocketTest, ClientSetDoNotFragment) {
+@@ -655,7 +655,7 @@ TEST_F(UDPSocketTest, ClientSetDoNotFragment) {
EXPECT_THAT(rv, IsOk());
rv = client.SetDoNotFragment();
@@ -18,7 +18,7 @@
// TODO(crbug.com/945590): IP_MTU_DISCOVER is not implemented on Fuchsia.
EXPECT_THAT(rv, IsError(ERR_NOT_IMPLEMENTED));
#elif BUILDFLAG(IS_MAC)
-@@ -684,7 +684,7 @@ TEST_F(UDPSocketTest, ServerSetDoNotFragment) {
+@@ -683,7 +683,7 @@ TEST_F(UDPSocketTest, ServerSetDoNotFragment) {
EXPECT_THAT(rv, IsOk());
rv = server.SetDoNotFragment();
@@ -27,7 +27,7 @@
// TODO(crbug.com/945590): IP_MTU_DISCOVER is not implemented on Fuchsia.
EXPECT_THAT(rv, IsError(ERR_NOT_IMPLEMENTED));
#elif BUILDFLAG(IS_MAC)
-@@ -749,7 +749,7 @@ TEST_F(UDPSocketTest, JoinMulticastGroup) {
+@@ -748,7 +748,7 @@ TEST_F(UDPSocketTest, JoinMulticastGroup) {
// TODO(https://crbug.com/947115): failing on device on iOS 12.2.
// TODO(https://crbug.com/1227554): flaky on Mac 11.
@@ -36,7 +36,7 @@
#define MAYBE_SharedMulticastAddress DISABLED_SharedMulticastAddress
#else
#define MAYBE_SharedMulticastAddress SharedMulticastAddress
-@@ -803,7 +803,7 @@ TEST_F(UDPSocketTest, MAYBE_SharedMulticastAddress) {
+@@ -802,7 +802,7 @@ TEST_F(UDPSocketTest, MAYBE_SharedMulticastAddress) {
NetLogSource());
ASSERT_THAT(client_socket.Connect(send_address), IsOk());
diff --git a/www/ungoogled-chromium/files/patch-net_tools_cert__verify__tool_cert__verify__comparision__tool.cc b/www/ungoogled-chromium/files/patch-net_tools_cert__verify__tool_cert__verify__comparision__tool.cc
index 5f3cc8b7932c..2fbe0f06883f 100644
--- a/www/ungoogled-chromium/files/patch-net_tools_cert__verify__tool_cert__verify__comparision__tool.cc
+++ b/www/ungoogled-chromium/files/patch-net_tools_cert__verify__tool_cert__verify__comparision__tool.cc
@@ -1,4 +1,4 @@
---- net/tools/cert_verify_tool/cert_verify_comparision_tool.cc.orig 2023-11-04 07:08:51 UTC
+--- net/tools/cert_verify_tool/cert_verify_comparision_tool.cc.orig 2024-02-03 15:42:55 UTC
+++ net/tools/cert_verify_tool/cert_verify_comparision_tool.cc
@@ -34,7 +34,7 @@
#include "net/url_request/url_request_context_builder.h"
@@ -18,7 +18,7 @@
// On Linux, use a fixed ProxyConfigService, since the default one
// depends on glib.
//
-@@ -125,7 +125,7 @@ class CertVerifyImpl {
+@@ -122,7 +122,7 @@ class CertVerifyImpl {
std::unique_ptr<CertVerifyImpl> CreateCertVerifyImplFromName(
base::StringPiece impl_name,
scoped_refptr<net::CertNetFetcher> cert_net_fetcher) {
diff --git a/www/ungoogled-chromium/files/patch-net_tools_cert__verify__tool_cert__verify__tool.cc b/www/ungoogled-chromium/files/patch-net_tools_cert__verify__tool_cert__verify__tool.cc
index 3555c12e8bfc..51955ad226c2 100644
--- a/www/ungoogled-chromium/files/patch-net_tools_cert__verify__tool_cert__verify__tool.cc
+++ b/www/ungoogled-chromium/files/patch-net_tools_cert__verify__tool_cert__verify__tool.cc
@@ -1,15 +1,15 @@
---- net/tools/cert_verify_tool/cert_verify_tool.cc.orig 2023-12-23 12:33:28 UTC
+--- net/tools/cert_verify_tool/cert_verify_tool.cc.orig 2024-02-03 15:42:55 UTC
+++ net/tools/cert_verify_tool/cert_verify_tool.cc
-@@ -31,7 +31,7 @@
- #include "net/url_request/url_request_context_builder.h"
- #include "net/url_request/url_request_context_getter.h"
+@@ -32,7 +32,7 @@
+ #include "third_party/boringssl/src/pki/trust_store.h"
+ #include "third_party/boringssl/src/pki/trust_store_collection.h"
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
#include "net/proxy_resolution/proxy_config.h"
#include "net/proxy_resolution/proxy_config_service_fixed.h"
#endif
-@@ -63,7 +63,7 @@ void SetUpOnNetworkThread(
+@@ -64,7 +64,7 @@ void SetUpOnNetworkThread(
base::WaitableEvent* initialization_complete_event) {
net::URLRequestContextBuilder url_request_context_builder;
url_request_context_builder.set_user_agent(GetUserAgent());
@@ -18,7 +18,7 @@
// On Linux, use a fixed ProxyConfigService, since the default one
// depends on glib.
//
-@@ -545,7 +545,7 @@ int main(int argc, char** argv) {
+@@ -562,7 +562,7 @@ int main(int argc, char** argv) {
std::string impls_str = command_line.GetSwitchValueASCII("impls");
if (impls_str.empty()) {
// Default value.
diff --git a/www/ungoogled-chromium/files/patch-remoting_host_base_desktop__environment__options.cc b/www/ungoogled-chromium/files/patch-remoting_host_base_desktop__environment__options.cc
index 628af848ec45..cd198691cad0 100644
--- a/www/ungoogled-chromium/files/patch-remoting_host_base_desktop__environment__options.cc
+++ b/www/ungoogled-chromium/files/patch-remoting_host_base_desktop__environment__options.cc
@@ -1,6 +1,6 @@
---- remoting/host/base/desktop_environment_options.cc.orig 2023-03-10 11:01:21 UTC
+--- remoting/host/base/desktop_environment_options.cc.orig 2024-02-03 15:42:55 UTC
+++ remoting/host/base/desktop_environment_options.cc
-@@ -119,7 +119,7 @@ bool DesktopEnvironmentOptions::capture_video_on_dedic
+@@ -118,7 +118,7 @@ bool DesktopEnvironmentOptions::capture_video_on_dedic
// TODO(joedow): Determine whether we can migrate additional platforms to
// using the DesktopCaptureWrapper instead of the DesktopCaptureProxy. Then
// clean up DesktopCapturerProxy::Core::CreateCapturer().
diff --git a/www/ungoogled-chromium/files/patch-remoting_host_client__session.cc b/www/ungoogled-chromium/files/patch-remoting_host_client__session.cc
index efc9a595baee..11a101d35c64 100644
--- a/www/ungoogled-chromium/files/patch-remoting_host_client__session.cc
+++ b/www/ungoogled-chromium/files/patch-remoting_host_client__session.cc
@@ -1,6 +1,6 @@
---- remoting/host/client_session.cc.orig 2023-11-04 07:08:51 UTC
+--- remoting/host/client_session.cc.orig 2024-02-03 15:42:55 UTC
+++ remoting/host/client_session.cc
-@@ -157,7 +157,7 @@ void ClientSession::NotifyClientResolution(
+@@ -159,7 +159,7 @@ void ClientSession::NotifyClientResolution(
if (desktop_environment_options_.enable_curtaining()) {
dpi_vector.set(resolution.x_dpi(), resolution.y_dpi());
}
diff --git a/www/ungoogled-chromium/files/patch-remoting_host_host__attributes.cc b/www/ungoogled-chromium/files/patch-remoting_host_host__attributes.cc
index 34b59bebe2df..0d8b40a47309 100644
--- a/www/ungoogled-chromium/files/patch-remoting_host_host__attributes.cc
+++ b/www/ungoogled-chromium/files/patch-remoting_host_host__attributes.cc
@@ -1,6 +1,6 @@
---- remoting/host/host_attributes.cc.orig 2023-06-05 19:39:05 UTC
+--- remoting/host/host_attributes.cc.orig 2024-02-03 15:42:55 UTC
+++ remoting/host/host_attributes.cc
-@@ -105,7 +105,7 @@ std::string GetHostAttributes() {
+@@ -104,7 +104,7 @@ std::string GetHostAttributes() {
if (media::InitializeMediaFoundation()) {
result.push_back("HWEncoder");
}
diff --git a/www/ungoogled-chromium/files/patch-remoting_host_it2me_it2me__host.cc b/www/ungoogled-chromium/files/patch-remoting_host_it2me_it2me__host.cc
index a7fd374314d1..a90205fdaab7 100644
--- a/www/ungoogled-chromium/files/patch-remoting_host_it2me_it2me__host.cc
+++ b/www/ungoogled-chromium/files/patch-remoting_host_it2me_it2me__host.cc
@@ -1,6 +1,6 @@
---- remoting/host/it2me/it2me_host.cc.orig 2023-09-17 07:59:53 UTC
+--- remoting/host/it2me/it2me_host.cc.orig 2024-02-03 15:42:55 UTC
+++ remoting/host/it2me/it2me_host.cc
-@@ -49,7 +49,7 @@
+@@ -52,7 +52,7 @@
#include "remoting/host/chromeos/features.h"
#endif
@@ -9,7 +9,7 @@
#include "remoting/host/linux/wayland_manager.h"
#include "remoting/host/linux/wayland_utils.h"
#endif // BUILDFLAG(IS_LINUX)
-@@ -121,7 +121,7 @@ void It2MeHost::Connect(
+@@ -178,7 +178,7 @@ void It2MeHost::Connect(
OnPolicyUpdate(std::move(policies));
@@ -18,7 +18,7 @@
if (IsRunningWayland()) {
WaylandManager::Get()->Init(host_context_->ui_task_runner());
}
-@@ -261,7 +261,7 @@ void It2MeHost::ConnectOnNetworkThread(
+@@ -338,7 +338,7 @@ void It2MeHost::ConnectOnNetworkThread(
// Set up the desktop environment options.
DesktopEnvironmentOptions options(DesktopEnvironmentOptions::CreateDefault());
diff --git a/www/ungoogled-chromium/files/patch-remoting_host_me2me__desktop__environment.cc b/www/ungoogled-chromium/files/patch-remoting_host_me2me__desktop__environment.cc
index 9be23b1853e9..ce7f94619d11 100644
--- a/www/ungoogled-chromium/files/patch-remoting_host_me2me__desktop__environment.cc
+++ b/www/ungoogled-chromium/files/patch-remoting_host_me2me__desktop__environment.cc
@@ -1,4 +1,4 @@
---- remoting/host/me2me_desktop_environment.cc.orig 2023-12-23 12:33:28 UTC
+--- remoting/host/me2me_desktop_environment.cc.orig 2024-02-03 15:42:55 UTC
+++ remoting/host/me2me_desktop_environment.cc
@@ -125,7 +125,7 @@ std::string Me2MeDesktopEnvironment::GetCapabilities()
capabilities += protocol::kRemoteWebAuthnCapability;
@@ -9,7 +9,7 @@
if (!IsRunningWayland()) {
capabilities += " ";
capabilities += protocol::kMultiStreamCapability;
-@@ -171,7 +171,7 @@ Me2MeDesktopEnvironment::Me2MeDesktopEnvironment(
+@@ -164,7 +164,7 @@ Me2MeDesktopEnvironment::Me2MeDesktopEnvironment(
// properly under Xvfb.
mutable_desktop_capture_options()->set_use_update_notifications(true);
@@ -18,7 +18,7 @@
// Setting this option to false means that the capture differ wrapper will not
// be used when the X11 capturer is selected. This reduces the X11 capture
// time by a few milliseconds per frame and is safe because we can rely on
-@@ -180,7 +180,7 @@ Me2MeDesktopEnvironment::Me2MeDesktopEnvironment(
+@@ -173,7 +173,7 @@ Me2MeDesktopEnvironment::Me2MeDesktopEnvironment(
mutable_desktop_capture_options()->set_detect_updated_region(false);
#endif
@@ -27,7 +27,7 @@
if (IsRunningWayland()) {
mutable_desktop_capture_options()->set_prefer_cursor_embedded(false);
}
-@@ -205,7 +205,7 @@ bool Me2MeDesktopEnvironment::InitializeSecurity(
+@@ -198,7 +198,7 @@ bool Me2MeDesktopEnvironment::InitializeSecurity(
// Otherwise, if the session is shared with the local user start monitoring
// the local input and create the in-session UI.
diff --git a/www/ungoogled-chromium/files/patch-remoting_host_remoting__me2me__host.cc b/www/ungoogled-chromium/files/patch-remoting_host_remoting__me2me__host.cc
index 02d2503311f7..b5b3ab9d0564 100644
--- a/www/ungoogled-chromium/files/patch-remoting_host_remoting__me2me__host.cc
+++ b/www/ungoogled-chromium/files/patch-remoting_host_remoting__me2me__host.cc
@@ -1,4 +1,4 @@
---- remoting/host/remoting_me2me_host.cc.orig 2023-12-23 12:33:28 UTC
+--- remoting/host/remoting_me2me_host.cc.orig 2024-02-03 15:42:55 UTC
+++ remoting/host/remoting_me2me_host.cc
@@ -127,7 +127,7 @@
#include "remoting/host/mac/permission_utils.h"
@@ -119,7 +119,7 @@
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_BSD)
- absl::optional<bool> host_username_match_required =
+ std::optional<bool> host_username_match_required =
policies.FindBool(policy::key::kRemoteAccessHostMatchUsername);
if (!host_username_match_required.has_value()) {
@@ -1800,7 +1800,7 @@ void HostProcess::StartHost() {
diff --git a/www/ungoogled-chromium/files/patch-remoting_host_setup_start__host__main.cc b/www/ungoogled-chromium/files/patch-remoting_host_setup_start__host__main.cc
index 5a51c6e6955d..6e7027be97be 100644
--- a/www/ungoogled-chromium/files/patch-remoting_host_setup_start__host__main.cc
+++ b/www/ungoogled-chromium/files/patch-remoting_host_setup_start__host__main.cc
@@ -1,4 +1,4 @@
---- remoting/host/setup/start_host_main.cc.orig 2023-12-23 12:33:28 UTC
+--- remoting/host/setup/start_host_main.cc.orig 2024-02-03 15:42:55 UTC
+++ remoting/host/setup/start_host_main.cc
@@ -36,7 +36,7 @@
#include <unistd.h>
@@ -9,7 +9,7 @@
#include "remoting/host/setup/daemon_controller_delegate_linux.h"
#include "remoting/host/setup/start_host_as_root.h"
#endif // BUILDFLAG(IS_LINUX)
-@@ -242,7 +242,7 @@ bool InitializeCorpMachineParams(HostStarter::Params&
+@@ -250,7 +250,7 @@ bool InitializeCorpMachineParams(HostStarter::Params&
} // namespace
int StartHostMain(int argc, char** argv) {
@@ -18,7 +18,7 @@
// Minimize the amount of code that runs as root on Posix systems.
if (getuid() == 0) {
return remoting::StartHostAsRoot(argc, argv);
-@@ -274,7 +274,7 @@ int StartHostMain(int argc, char** argv) {
+@@ -281,7 +281,7 @@ int StartHostMain(int argc, char** argv) {
mojo::core::Init();
diff --git a/www/ungoogled-chromium/files/patch-remoting_host_webauthn_remote__webauthn__caller__security__utils.cc b/www/ungoogled-chromium/files/patch-remoting_host_webauthn_remote__webauthn__caller__security__utils.cc
index cc87f25fd08e..095f8ff9648d 100644
--- a/www/ungoogled-chromium/files/patch-remoting_host_webauthn_remote__webauthn__caller__security__utils.cc
+++ b/www/ungoogled-chromium/files/patch-remoting_host_webauthn_remote__webauthn__caller__security__utils.cc
@@ -1,6 +1,6 @@
---- remoting/host/webauthn/remote_webauthn_caller_security_utils.cc.orig 2022-10-29 17:50:56 UTC
+--- remoting/host/webauthn/remote_webauthn_caller_security_utils.cc.orig 2024-02-03 15:42:55 UTC
+++ remoting/host/webauthn/remote_webauthn_caller_security_utils.cc
-@@ -9,7 +9,7 @@
+@@ -10,7 +10,7 @@
#include "base/strings/utf_string_conversions.h"
#include "build/build_config.h"
@@ -9,7 +9,7 @@
#include "base/containers/fixed_flat_set.h"
#include "base/files/file_path.h"
#include "base/process/process_handle.h"
-@@ -37,7 +37,7 @@ namespace {
+@@ -38,7 +38,7 @@ namespace {
// No static variables needed for debug builds.
@@ -18,7 +18,7 @@
constexpr auto kAllowedCallerPrograms =
base::MakeFixedFlatSet<base::FilePath::StringPieceType>({
-@@ -81,7 +81,7 @@ bool IsLaunchedByTrustedProcess() {
+@@ -82,7 +82,7 @@ bool IsLaunchedByTrustedProcess() {
#if !defined(NDEBUG)
// Just return true on debug builds for the convenience of development.
return true;
diff --git a/www/ungoogled-chromium/files/patch-sandbox_policy_features.cc b/www/ungoogled-chromium/files/patch-sandbox_policy_features.cc
index 92c31045660e..2ad6bb42b041 100644
--- a/www/ungoogled-chromium/files/patch-sandbox_policy_features.cc
+++ b/www/ungoogled-chromium/files/patch-sandbox_policy_features.cc
@@ -1,6 +1,6 @@
---- sandbox/policy/features.cc.orig 2023-11-11 14:10:41 UTC
+--- sandbox/policy/features.cc.orig 2024-02-03 15:42:55 UTC
+++ sandbox/policy/features.cc
-@@ -19,7 +19,11 @@ namespace sandbox::policy::features {
+@@ -20,7 +20,11 @@ namespace sandbox::policy::features {
// (Only causes an effect when feature kNetworkServiceInProcess is disabled.)
BASE_FEATURE(kNetworkServiceSandbox,
"NetworkServiceSandbox",
@@ -12,7 +12,7 @@
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
// Enables a fine-grained seccomp-BPF syscall filter for the network service.
-@@ -126,7 +130,7 @@ BASE_FEATURE(kForceSpectreVariant2Mitigation,
+@@ -128,7 +132,7 @@ BASE_FEATURE(kForceSpectreVariant2Mitigation,
base::FEATURE_DISABLED_BY_DEFAULT);
#endif // BUILDFLAG(IS_CHROMEOS_ASH)
diff --git a/www/ungoogled-chromium/files/patch-sandbox_policy_features.h b/www/ungoogled-chromium/files/patch-sandbox_policy_features.h
index c0e1e8739077..2b0a887ac3fc 100644
--- a/www/ungoogled-chromium/files/patch-sandbox_policy_features.h
+++ b/www/ungoogled-chromium/files/patch-sandbox_policy_features.h
@@ -1,4 +1,4 @@
---- sandbox/policy/features.h.orig 2023-11-11 14:10:41 UTC
+--- sandbox/policy/features.h.orig 2024-02-03 15:42:55 UTC
+++ sandbox/policy/features.h
@@ -44,7 +44,7 @@ SANDBOX_POLICY_EXPORT BASE_DECLARE_FEATURE(kSpectreVar
SANDBOX_POLICY_EXPORT BASE_DECLARE_FEATURE(kForceSpectreVariant2Mitigation);
@@ -8,4 +8,4 @@
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
SANDBOX_POLICY_EXPORT BASE_DECLARE_FEATURE(
kForceDisableSpectreVariant2MitigationInNetworkService);
- #endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
+
diff --git a/www/ungoogled-chromium/files/patch-services_audio_audio__sandbox__hook__linux.cc b/www/ungoogled-chromium/files/patch-services_audio_audio__sandbox__hook__linux.cc
index 1e7db60c2b89..6fc93cf06fde 100644
--- a/www/ungoogled-chromium/files/patch-services_audio_audio__sandbox__hook__linux.cc
+++ b/www/ungoogled-chromium/files/patch-services_audio_audio__sandbox__hook__linux.cc
@@ -1,6 +1,6 @@
---- services/audio/audio_sandbox_hook_linux.cc.orig 2023-09-17 07:59:53 UTC
+--- services/audio/audio_sandbox_hook_linux.cc.orig 2024-02-03 15:42:55 UTC
+++ services/audio/audio_sandbox_hook_linux.cc
-@@ -143,6 +143,7 @@ void AddPulseAudioFilePermissions(
+@@ -144,6 +144,7 @@ void AddPulseAudioFilePermissions(
}
#endif
@@ -8,7 +8,7 @@
std::vector<BrokerFilePermission> GetAudioFilePermissions() {
std::vector<BrokerFilePermission> permissions{
BrokerFilePermission::ReadOnly("/dev/urandom"),
-@@ -171,10 +172,12 @@ void LoadAudioLibraries() {
+@@ -172,10 +173,12 @@ void LoadAudioLibraries() {
}
}
}
@@ -21,7 +21,7 @@
LoadAudioLibraries();
auto* instance = sandbox::policy::SandboxLinux::GetInstance();
instance->StartBrokerProcess(MakeBrokerCommandSet({
-@@ -194,6 +197,7 @@ bool AudioPreSandboxHook(sandbox::policy::SandboxLinux
+@@ -195,6 +198,7 @@ bool AudioPreSandboxHook(sandbox::policy::SandboxLinux
// TODO(https://crbug.com/850878) enable namespace sandbox. Currently, if
// enabled, connect() on pulse native socket fails with ENOENT (called from
// pa_context_connect).
diff --git a/www/ungoogled-chromium/files/patch-services_device_compute__pressure_cpu__probe.cc b/www/ungoogled-chromium/files/patch-services_device_compute__pressure_cpu__probe.cc
index 1378a2940b46..6864c5ee3da7 100644
--- a/www/ungoogled-chromium/files/patch-services_device_compute__pressure_cpu__probe.cc
+++ b/www/ungoogled-chromium/files/patch-services_device_compute__pressure_cpu__probe.cc
@@ -1,6 +1,6 @@
---- services/device/compute_pressure/cpu_probe.cc.orig 2023-05-05 12:12:41 UTC
+--- services/device/compute_pressure/cpu_probe.cc.orig 2024-02-03 15:42:55 UTC
+++ services/device/compute_pressure/cpu_probe.cc
-@@ -44,6 +44,7 @@ std::unique_ptr<CpuProbe> CpuProbe::Create(
+@@ -60,6 +60,7 @@ std::unique_ptr<CpuProbe> CpuProbe::Create(
#elif BUILDFLAG(IS_MAC)
return CpuProbeMac::Create(sampling_interval, std::move(sampling_callback));
#else
diff --git a/www/ungoogled-chromium/files/patch-services_device_geolocation_location__arbitrator.cc b/www/ungoogled-chromium/files/patch-services_device_geolocation_location__arbitrator.cc
index 5930f534ca7a..8d01281b633f 100644
--- a/www/ungoogled-chromium/files/patch-services_device_geolocation_location__arbitrator.cc
+++ b/www/ungoogled-chromium/files/patch-services_device_geolocation_location__arbitrator.cc
@@ -1,6 +1,6 @@
---- services/device/geolocation/location_arbitrator.cc.orig 2023-12-23 12:33:28 UTC
+--- services/device/geolocation/location_arbitrator.cc.orig 2024-02-03 15:42:55 UTC
+++ services/device/geolocation/location_arbitrator.cc
-@@ -193,7 +193,7 @@ LocationArbitrator::NewNetworkLocationProvider(
+@@ -194,7 +194,7 @@ LocationArbitrator::NewNetworkLocationProvider(
std::unique_ptr<LocationProvider>
LocationArbitrator::NewSystemLocationProvider() {
diff --git a/www/ungoogled-chromium/files/patch-services_device_hid_BUILD.gn b/www/ungoogled-chromium/files/patch-services_device_hid_BUILD.gn
index c6988fb161dc..ac8047fb73bb 100644
--- a/www/ungoogled-chromium/files/patch-services_device_hid_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-services_device_hid_BUILD.gn
@@ -1,6 +1,6 @@
---- services/device/hid/BUILD.gn.orig 2023-07-21 09:49:17 UTC
+--- services/device/hid/BUILD.gn.orig 2024-02-03 15:42:55 UTC
+++ services/device/hid/BUILD.gn
-@@ -34,7 +34,26 @@ source_set("hid") {
+@@ -36,7 +36,26 @@ source_set("hid") {
"//services/device/public/mojom",
]
diff --git a/www/ungoogled-chromium/files/patch-services_device_usb_BUILD.gn b/www/ungoogled-chromium/files/patch-services_device_usb_BUILD.gn
index bf0f26b44369..04497c96078a 100644
--- a/www/ungoogled-chromium/files/patch-services_device_usb_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-services_device_usb_BUILD.gn
@@ -1,6 +1,6 @@
---- services/device/usb/BUILD.gn.orig 2023-11-04 07:08:51 UTC
+--- services/device/usb/BUILD.gn.orig 2024-02-03 15:42:55 UTC
+++ services/device/usb/BUILD.gn
-@@ -90,15 +90,17 @@ static_library("usb") {
+@@ -92,15 +92,17 @@ static_library("usb") {
deps += [ "//third_party/re2" ]
}
@@ -27,7 +27,7 @@
# These sources and deps are required for libusb.
# TODO(https://crbug.com/1096743) Remove these sources.
-@@ -122,6 +124,13 @@ static_library("usb") {
+@@ -124,6 +126,13 @@ static_library("usb") {
deps += [ "//third_party/libusb" ]
}
@@ -41,7 +41,7 @@
if (is_linux || is_chromeos) {
sources += [
"usb_device_linux.cc",
-@@ -139,7 +148,7 @@ static_library("usb") {
+@@ -141,7 +150,7 @@ static_library("usb") {
deps += [ "//device/udev_linux" ]
}
diff --git a/www/ungoogled-chromium/files/patch-services_network_BUILD.gn b/www/ungoogled-chromium/files/patch-services_network_BUILD.gn
index 0cdba42b2519..685787ca90e5 100644
--- a/www/ungoogled-chromium/files/patch-services_network_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-services_network_BUILD.gn
@@ -1,6 +1,6 @@
---- services/network/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+--- services/network/BUILD.gn.orig 2024-02-03 15:42:55 UTC
+++ services/network/BUILD.gn
-@@ -394,7 +394,6 @@ if (is_linux || is_chromeos) {
+@@ -397,7 +397,6 @@ if (is_linux || is_chromeos) {
]
deps = [
"//base:base",
diff --git a/www/ungoogled-chromium/files/patch-services_network_network__context.cc b/www/ungoogled-chromium/files/patch-services_network_network__context.cc
index e1250b4ada0a..ebbd0df336bc 100644
--- a/www/ungoogled-chromium/files/patch-services_network_network__context.cc
+++ b/www/ungoogled-chromium/files/patch-services_network_network__context.cc
@@ -1,6 +1,6 @@
---- services/network/network_context.cc.orig 2023-12-23 12:33:28 UTC
+--- services/network/network_context.cc.orig 2024-02-03 15:42:55 UTC
+++ services/network/network_context.cc
-@@ -470,7 +470,7 @@ NetworkContext::NetworkContextHttpAuthPreferences::
+@@ -471,7 +471,7 @@ NetworkContext::NetworkContextHttpAuthPreferences::
NetworkContext::NetworkContextHttpAuthPreferences::
~NetworkContextHttpAuthPreferences() = default;
@@ -9,7 +9,7 @@
bool NetworkContext::NetworkContextHttpAuthPreferences::AllowGssapiLibraryLoad()
const {
if (network_service_) {
-@@ -2385,7 +2385,7 @@ void NetworkContext::OnHttpAuthDynamicParamsChanged(
+@@ -2360,7 +2360,7 @@ void NetworkContext::OnHttpAuthDynamicParamsChanged(
http_auth_dynamic_network_service_params->android_negotiate_account_type);
#endif // BUILDFLAG(IS_ANDROID)
diff --git a/www/ungoogled-chromium/files/patch-services_network_network__context.h b/www/ungoogled-chromium/files/patch-services_network_network__context.h
index 753099c096d1..67a28365272f 100644
--- a/www/ungoogled-chromium/files/patch-services_network_network__context.h
+++ b/www/ungoogled-chromium/files/patch-services_network_network__context.h
@@ -1,6 +1,6 @@
---- services/network/network_context.h.orig 2023-12-23 12:33:28 UTC
+--- services/network/network_context.h.orig 2024-02-03 15:42:55 UTC
+++ services/network/network_context.h
-@@ -677,7 +677,7 @@ class COMPONENT_EXPORT(NETWORK_SERVICE) NetworkContext
+@@ -674,7 +674,7 @@ class COMPONENT_EXPORT(NETWORK_SERVICE) NetworkContext
public:
explicit NetworkContextHttpAuthPreferences(NetworkService* network_service);
~NetworkContextHttpAuthPreferences() override;
diff --git a/www/ungoogled-chromium/files/patch-services_network_network__service.cc b/www/ungoogled-chromium/files/patch-services_network_network__service.cc
index 075b8ca407fb..48c213feb1a6 100644
--- a/www/ungoogled-chromium/files/patch-services_network_network__service.cc
+++ b/www/ungoogled-chromium/files/patch-services_network_network__service.cc
@@ -1,6 +1,6 @@
---- services/network/network_service.cc.orig 2023-12-23 12:33:28 UTC
+--- services/network/network_service.cc.orig 2024-02-03 15:42:55 UTC
+++ services/network/network_service.cc
-@@ -96,7 +96,7 @@
+@@ -100,7 +100,7 @@
#include "third_party/boringssl/src/include/openssl/cpu.h"
#endif
@@ -9,7 +9,7 @@
BUILDFLAG(IS_CHROMEOS_LACROS)
#include "components/os_crypt/sync/key_storage_config_linux.h"
-@@ -998,7 +998,7 @@ void NetworkService::SetExplicitlyAllowedPorts(
+@@ -1070,7 +1070,7 @@ void NetworkService::SetExplicitlyAllowedPorts(
net::SetExplicitlyAllowedPorts(ports);
}
@@ -18,7 +18,7 @@
void NetworkService::SetGssapiLibraryLoadObserver(
mojo::PendingRemote<mojom::GssapiLibraryLoadObserver>
gssapi_library_load_observer) {
-@@ -1080,7 +1080,7 @@ NetworkService::CreateHttpAuthHandlerFactory(NetworkCo
+@@ -1160,7 +1160,7 @@ NetworkService::CreateHttpAuthHandlerFactory(NetworkCo
);
}
diff --git a/www/ungoogled-chromium/files/patch-services_network_network__service.h b/www/ungoogled-chromium/files/patch-services_network_network__service.h
index 6ee082caae20..a8becf1a8f8e 100644
--- a/www/ungoogled-chromium/files/patch-services_network_network__service.h
+++ b/www/ungoogled-chromium/files/patch-services_network_network__service.h
@@ -1,6 +1,6 @@
---- services/network/network_service.h.orig 2023-12-23 12:33:28 UTC
+--- services/network/network_service.h.orig 2024-02-03 15:42:55 UTC
+++ services/network/network_service.h
-@@ -225,7 +225,7 @@ class COMPONENT_EXPORT(NETWORK_SERVICE) NetworkService
+@@ -224,7 +224,7 @@ class COMPONENT_EXPORT(NETWORK_SERVICE) NetworkService
mojo::PendingReceiver<mojom::NetworkServiceTest> receiver) override;
void SetFirstPartySets(net::GlobalFirstPartySets sets) override;
void SetExplicitlyAllowedPorts(const std::vector<uint16_t>& ports) override;
@@ -9,7 +9,7 @@
void SetGssapiLibraryLoadObserver(
mojo::PendingRemote<mojom::GssapiLibraryLoadObserver>
gssapi_library_load_observer) override;
-@@ -252,7 +252,7 @@ class COMPONENT_EXPORT(NETWORK_SERVICE) NetworkService
+@@ -253,7 +253,7 @@ class COMPONENT_EXPORT(NETWORK_SERVICE) NetworkService
std::unique_ptr<net::HttpAuthHandlerFactory> CreateHttpAuthHandlerFactory(
NetworkContext* network_context);
@@ -18,7 +18,7 @@
// This is called just before a GSSAPI library may be loaded.
void OnBeforeGssapiLibraryLoad();
#endif // BUILDFLAG(IS_LINUX)
-@@ -494,7 +494,7 @@ class COMPONENT_EXPORT(NETWORK_SERVICE) NetworkService
+@@ -506,7 +506,7 @@ class COMPONENT_EXPORT(NETWORK_SERVICE) NetworkService
// leaking stale listeners between tests.
std::unique_ptr<net::NetworkChangeNotifier> mock_network_change_notifier_;
diff --git a/www/ungoogled-chromium/files/patch-services_network_public_mojom_BUILD.gn b/www/ungoogled-chromium/files/patch-services_network_public_mojom_BUILD.gn
index 5c9ef60e4ac0..4570ffbf4df4 100644
--- a/www/ungoogled-chromium/files/patch-services_network_public_mojom_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-services_network_public_mojom_BUILD.gn
@@ -1,6 +1,6 @@
---- services/network/public/mojom/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+--- services/network/public/mojom/BUILD.gn.orig 2024-02-03 15:42:55 UTC
+++ services/network/public/mojom/BUILD.gn
-@@ -521,11 +521,11 @@ mojom("url_loader_base") {
+@@ -519,11 +519,11 @@ mojom("url_loader_base") {
}
enabled_features = []
@@ -14,7 +14,7 @@
# TODO(crbug.com/1431866): Remove this once is_linux in the mojom IDL does
# not include lacros.
enabled_features += [ "use_network_interface_change_listener" ]
-@@ -1442,7 +1442,7 @@ mojom("mojom") {
+@@ -1439,7 +1439,7 @@ mojom("mojom") {
}
}
diff --git a/www/ungoogled-chromium/files/patch-services_on__device__model_on__device__model__service.h b/www/ungoogled-chromium/files/patch-services_on__device__model_on__device__model__service.h
new file mode 100644
index 000000000000..97f59bbb6453
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-services_on__device__model_on__device__model__service.h
@@ -0,0 +1,22 @@
+--- services/on_device_model/on_device_model_service.h.orig 2024-02-03 15:42:55 UTC
++++ services/on_device_model/on_device_model_service.h
+@@ -18,6 +18,10 @@
+ #include "sandbox/policy/linux/sandbox_linux.h"
+ #endif
+
++#if BUILDFLAG(IS_BSD)
++#include "sandbox/policy/sandbox.h"
++#endif
++
+ namespace on_device_model {
+
+ class COMPONENT_EXPORT(ON_DEVICE_MODEL) OnDeviceModelService
+@@ -27,7 +31,7 @@ class COMPONENT_EXPORT(ON_DEVICE_MODEL) OnDeviceModelS
+ // These are defined separately in pre_sandbox_init.cc for explicit security
+ // review coverage.
+ [[nodiscard]] static bool PreSandboxInit();
+-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
+ static void AddSandboxLinuxOptions(
+ sandbox::policy::SandboxLinux::Options& options);
+ #endif
diff --git a/www/ungoogled-chromium/files/patch-services_on__device__model_pre__sandbox__init.cc b/www/ungoogled-chromium/files/patch-services_on__device__model_pre__sandbox__init.cc
new file mode 100644
index 000000000000..d0c11516d9e9
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-services_on__device__model_pre__sandbox__init.cc
@@ -0,0 +1,38 @@
+--- services/on_device_model/pre_sandbox_init.cc.orig 2024-02-03 15:42:55 UTC
++++ services/on_device_model/pre_sandbox_init.cc
+@@ -14,7 +14,7 @@
+ #include "services/on_device_model/ml/chrome_ml.h" // nogncheck
+ #endif
+
+-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
+ #include "gpu/config/gpu_info_collector.h" // nogncheck
+ #include "third_party/dawn/include/dawn/dawn_proc.h" // nogncheck
+ #include "third_party/dawn/include/dawn/native/DawnNative.h" // nogncheck
+@@ -25,7 +25,7 @@ namespace on_device_model {
+
+ namespace {
+
+-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
+ constexpr uint32_t kVendorIdAMD = 0x1002;
+ constexpr uint32_t kVendorIdIntel = 0x8086;
+ constexpr uint32_t kVendorIdNVIDIA = 0x10DE;
+@@ -74,7 +74,7 @@ bool OnDeviceModelService::PreSandboxInit() {
+ }
+ #endif
+
+-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
+ // Warm any relevant drivers before attempting to bring up the sandbox. For
+ // good measure we initialize a device instance for any adapter with an
+ // appropriate backend on top of any integrated or discrete GPU.
+@@ -100,7 +100,7 @@ bool OnDeviceModelService::PreSandboxInit() {
+ return true;
+ }
+
+-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
+ // static
+ void OnDeviceModelService::AddSandboxLinuxOptions(
+ sandbox::policy::SandboxLinux::Options& options) {
diff --git a/www/ungoogled-chromium/files/patch-services_resource__coordinator_memory__instrumentation_queued__request__dispatcher.cc b/www/ungoogled-chromium/files/patch-services_resource__coordinator_memory__instrumentation_queued__request__dispatcher.cc
index fa9b47c8c519..59d670254adf 100644
--- a/www/ungoogled-chromium/files/patch-services_resource__coordinator_memory__instrumentation_queued__request__dispatcher.cc
+++ b/www/ungoogled-chromium/files/patch-services_resource__coordinator_memory__instrumentation_queued__request__dispatcher.cc
@@ -1,6 +1,6 @@
---- services/resource_coordinator/memory_instrumentation/queued_request_dispatcher.cc.orig 2023-10-13 13:20:35 UTC
+--- services/resource_coordinator/memory_instrumentation/queued_request_dispatcher.cc.orig 2024-02-03 15:42:55 UTC
+++ services/resource_coordinator/memory_instrumentation/queued_request_dispatcher.cc
-@@ -53,7 +53,7 @@ uint32_t CalculatePrivateFootprintKb(const mojom::RawO
+@@ -54,7 +54,7 @@ uint32_t CalculatePrivateFootprintKb(const mojom::RawO
uint32_t shared_resident_kb) {
DCHECK(os_dump.platform_private_footprint);
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) || \
@@ -9,7 +9,7 @@
uint64_t rss_anon_bytes = os_dump.platform_private_footprint->rss_anon_bytes;
uint64_t vm_swap_bytes = os_dump.platform_private_footprint->vm_swap_bytes;
return (rss_anon_bytes + vm_swap_bytes) / 1024;
-@@ -81,7 +81,7 @@ memory_instrumentation::mojom::OSMemDumpPtr CreatePubl
+@@ -83,7 +83,7 @@ memory_instrumentation::mojom::OSMemDumpPtr CreatePubl
os_dump->is_peak_rss_resettable = internal_os_dump.is_peak_rss_resettable;
os_dump->private_footprint_kb =
CalculatePrivateFootprintKb(internal_os_dump, shared_resident_kb);
@@ -18,7 +18,7 @@
os_dump->private_footprint_swap_kb =
internal_os_dump.platform_private_footprint->vm_swap_bytes / 1024;
#endif
-@@ -217,7 +217,7 @@ void QueuedRequestDispatcher::SetUpAndDispatch(
+@@ -219,7 +219,7 @@ void QueuedRequestDispatcher::SetUpAndDispatch(
// On most platforms each process can dump data about their own process
// so ask each process to do so Linux is special see below.
@@ -27,7 +27,7 @@
request->pending_responses.insert({client_info.pid, ResponseType::kOSDump});
client->RequestOSMemoryDump(request->memory_map_option(),
{base::kNullProcessId},
-@@ -232,7 +232,7 @@ void QueuedRequestDispatcher::SetUpAndDispatch(
+@@ -234,7 +234,7 @@ void QueuedRequestDispatcher::SetUpAndDispatch(
// In some cases, OS stats can only be dumped from a privileged process to
// get around to sandboxing/selinux restrictions (see crbug.com/461788).
@@ -36,7 +36,7 @@
std::vector<base::ProcessId> pids;
mojom::ClientProcess* browser_client = nullptr;
base::ProcessId browser_client_pid = base::kNullProcessId;
-@@ -278,7 +278,7 @@ void QueuedRequestDispatcher::SetUpAndDispatchVmRegion
+@@ -280,7 +280,7 @@ void QueuedRequestDispatcher::SetUpAndDispatchVmRegion
const OsCallback& os_callback) {
// On Linux, OS stats can only be dumped from a privileged process to
// get around to sandboxing/selinux restrictions (see crbug.com/461788).
@@ -45,7 +45,7 @@
mojom::ClientProcess* browser_client = nullptr;
base::ProcessId browser_client_pid = 0;
for (const auto& client_info : clients) {
-@@ -328,7 +328,7 @@ QueuedRequestDispatcher::FinalizeVmRegionRequest(
+@@ -330,7 +330,7 @@ QueuedRequestDispatcher::FinalizeVmRegionRequest(
// each client process provides 1 OS dump, % the case where the client is
// disconnected mid dump.
OSMemDumpMap& extra_os_dumps = response.second.os_dumps;
@@ -54,7 +54,7 @@
for (auto& kv : extra_os_dumps) {
auto pid = kv.first == base::kNullProcessId ? original_pid : kv.first;
DCHECK(results.find(pid) == results.end());
-@@ -389,7 +389,7 @@ void QueuedRequestDispatcher::Finalize(QueuedRequest*
+@@ -391,7 +391,7 @@ void QueuedRequestDispatcher::Finalize(QueuedRequest*
// crash). In the latter case (OS_LINUX) we expect the full map to come
// from the browser process response.
OSMemDumpMap& extra_os_dumps = response.second.os_dumps;
diff --git a/www/ungoogled-chromium/files/patch-services_tracing_public_cpp_stack__sampling_tracing__sampler__profiler.cc b/www/ungoogled-chromium/files/patch-services_tracing_public_cpp_stack__sampling_tracing__sampler__profiler.cc
index d0e94811411c..8393a9ee8cdf 100644
--- a/www/ungoogled-chromium/files/patch-services_tracing_public_cpp_stack__sampling_tracing__sampler__profiler.cc
+++ b/www/ungoogled-chromium/files/patch-services_tracing_public_cpp_stack__sampling_tracing__sampler__profiler.cc
@@ -1,6 +1,6 @@
---- services/tracing/public/cpp/stack_sampling/tracing_sampler_profiler.cc.orig 2023-02-11 09:11:04 UTC
+--- services/tracing/public/cpp/stack_sampling/tracing_sampler_profiler.cc.orig 2024-02-03 15:42:55 UTC
+++ services/tracing/public/cpp/stack_sampling/tracing_sampler_profiler.cc
-@@ -37,7 +37,7 @@
+@@ -38,7 +38,7 @@
#include "third_party/perfetto/protos/perfetto/trace/track_event/process_descriptor.pbzero.h"
#include "third_party/perfetto/protos/perfetto/trace/track_event/thread_descriptor.pbzero.h"
diff --git a/www/ungoogled-chromium/files/patch-services_video__capture_video__capture__service__impl.cc b/www/ungoogled-chromium/files/patch-services_video__capture_video__capture__service__impl.cc
index 32ce52b7c1cc..dc08359e662b 100644
--- a/www/ungoogled-chromium/files/patch-services_video__capture_video__capture__service__impl.cc
+++ b/www/ungoogled-chromium/files/patch-services_video__capture_video__capture__service__impl.cc
@@ -1,6 +1,6 @@
---- services/video_capture/video_capture_service_impl.cc.orig 2023-10-13 13:20:35 UTC
+--- services/video_capture/video_capture_service_impl.cc.orig 2024-02-03 15:42:55 UTC
+++ services/video_capture/video_capture_service_impl.cc
-@@ -40,7 +40,7 @@
+@@ -41,7 +41,7 @@
#include "services/video_capture/lacros/device_factory_adapter_lacros.h"
#endif // BUILDFLAG(IS_CHROMEOS_LACROS)
@@ -9,7 +9,7 @@
#include "media/capture/capture_switches.h"
#include "media/capture/video/video_capture_gpu_channel_host.h"
#include "services/viz/public/cpp/gpu/context_provider_command_buffer.h"
-@@ -107,7 +107,7 @@ class VideoCaptureServiceImpl::GpuDependenciesContext
+@@ -108,7 +108,7 @@ class VideoCaptureServiceImpl::GpuDependenciesContext
this};
};
@@ -18,7 +18,7 @@
// Intended usage of this class is to create viz::Gpu in utility process and
// connect to viz::GpuClient of browser process, which will call to Gpu service.
// Also, this class holds the viz::ContextProvider to listen and monitor Gpu
-@@ -288,7 +288,7 @@ void VideoCaptureServiceImpl::LazyInitializeGpuDepende
+@@ -299,7 +299,7 @@ void VideoCaptureServiceImpl::LazyInitializeGpuDepende
if (!gpu_dependencies_context_)
gpu_dependencies_context_ = std::make_unique<GpuDependenciesContext>();
@@ -27,7 +27,7 @@
if (switches::IsVideoCaptureUseGpuMemoryBufferEnabled()) {
if (!viz_gpu_context_provider_) {
viz_gpu_context_provider_ =
-@@ -384,7 +384,7 @@ void VideoCaptureServiceImpl::OnGpuInfoUpdate(const CH
+@@ -409,7 +409,7 @@ void VideoCaptureServiceImpl::OnGpuInfoUpdate(const CH
}
#endif
diff --git a/www/ungoogled-chromium/files/patch-services_video__capture_video__capture__service__impl.h b/www/ungoogled-chromium/files/patch-services_video__capture_video__capture__service__impl.h
index 4ef5b7e38910..5495bfd96a7c 100644
--- a/www/ungoogled-chromium/files/patch-services_video__capture_video__capture__service__impl.h
+++ b/www/ungoogled-chromium/files/patch-services_video__capture_video__capture__service__impl.h
@@ -1,6 +1,6 @@
---- services/video_capture/video_capture_service_impl.h.orig 2023-09-17 07:59:53 UTC
+--- services/video_capture/video_capture_service_impl.h.orig 2024-02-03 15:42:55 UTC
+++ services/video_capture/video_capture_service_impl.h
-@@ -24,7 +24,7 @@
+@@ -26,7 +26,7 @@
#include "services/video_capture/ash/video_capture_device_factory_ash.h"
#endif // BUILDFLAG(IS_CHROMEOS_ASH)
@@ -9,7 +9,7 @@
#include "services/viz/public/cpp/gpu/gpu.h"
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN)
-@@ -62,7 +62,7 @@ class VideoCaptureServiceImpl : public mojom::VideoCap
+@@ -69,7 +69,7 @@ class VideoCaptureServiceImpl : public mojom::VideoCap
#if BUILDFLAG(IS_WIN)
void OnGpuInfoUpdate(const CHROME_LUID& luid) override;
#endif
@@ -18,7 +18,7 @@
void SetVizGpu(std::unique_ptr<viz::Gpu> viz_gpu);
#endif // BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_WIN)
private:
-@@ -87,7 +87,7 @@ class VideoCaptureServiceImpl : public mojom::VideoCap
+@@ -105,7 +105,7 @@ class VideoCaptureServiceImpl : public mojom::VideoCap
factory_receivers_ash_;
#endif
diff --git a/www/ungoogled-chromium/files/patch-third__party_abseil-cpp_absl_base_internal_raw__logging.cc b/www/ungoogled-chromium/files/patch-third__party_abseil-cpp_absl_base_internal_raw__logging.cc
deleted file mode 100644
index b61d636a13e8..000000000000
--- a/www/ungoogled-chromium/files/patch-third__party_abseil-cpp_absl_base_internal_raw__logging.cc
+++ /dev/null
@@ -1,16 +0,0 @@
---- third_party/abseil-cpp/absl/base/internal/raw_logging.cc.orig 2023-08-18 10:26:52 UTC
-+++ third_party/abseil-cpp/absl/base/internal/raw_logging.cc
-@@ -56,10 +56,12 @@
- // ABSL_HAVE_SYSCALL_WRITE is defined when the platform provides the syscall
- // syscall(SYS_write, /*int*/ fd, /*char* */ buf, /*size_t*/ len);
- // for low level operations that want to avoid libc.
--#if (defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__)) && \
-+#if (defined(__linux__) || defined(__FreeBSD__)) && \
- !defined(__ANDROID__)
- #include <sys/syscall.h>
- #define ABSL_HAVE_SYSCALL_WRITE 1
-+#define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
-+#elif defined(__OpenBSD__)
- #define ABSL_LOW_LEVEL_WRITE_SUPPORTED 1
- #else
- #undef ABSL_HAVE_SYSCALL_WRITE
diff --git a/www/ungoogled-chromium/files/patch-third__party_angle_BUILD.gn b/www/ungoogled-chromium/files/patch-third__party_angle_BUILD.gn
index 07393a2531c4..06bfed10b2ce 100644
--- a/www/ungoogled-chromium/files/patch-third__party_angle_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-third__party_angle_BUILD.gn
@@ -1,6 +1,6 @@
---- third_party/angle/BUILD.gn.orig 2023-10-13 13:20:35 UTC
+--- third_party/angle/BUILD.gn.orig 2024-02-03 15:42:55 UTC
+++ third_party/angle/BUILD.gn
-@@ -325,7 +325,6 @@ config("extra_warnings") {
+@@ -326,7 +326,6 @@ config("extra_warnings") {
"-Wtautological-type-limit-compare",
"-Wundefined-reinterpret-cast",
"-Wunneeded-internal-declaration",
@@ -8,7 +8,7 @@
"-Wsuggest-destructor-override",
"-Wsuggest-override",
-@@ -496,7 +495,7 @@ template("angle_common_lib") {
+@@ -497,7 +496,7 @@ template("angle_common_lib") {
all_dependent_configs = [ ":angle_disable_pool_alloc" ]
}
@@ -17,7 +17,7 @@
libs = [ "dl" ]
}
-@@ -651,6 +650,9 @@ angle_static_library("angle_gpu_info_util") {
+@@ -652,6 +651,9 @@ angle_static_library("angle_gpu_info_util") {
"Xi",
"Xext",
]
diff --git a/www/ungoogled-chromium/files/patch-third__party_angle_src_common_system__utils.cpp b/www/ungoogled-chromium/files/patch-third__party_angle_src_common_system__utils.cpp
new file mode 100644
index 000000000000..b8ce2542ca1b
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-third__party_angle_src_common_system__utils.cpp
@@ -0,0 +1,11 @@
+--- third_party/angle/src/common/system_utils.cpp.orig 2024-02-03 15:42:55 UTC
++++ third_party/angle/src/common/system_utils.cpp
+@@ -25,7 +25,7 @@ namespace angle
+ {
+ std::string GetExecutableName()
+ {
+-#if defined(ANGLE_PLATFORM_ANDROID) && __ANDROID_API__ >= 21
++#if (defined(ANGLE_PLATFORM_ANDROID) && __ANDROID_API__ >= 21) || defined(ANGLE_PLATFORM_BSD)
+ // Support for "getprogname" function in bionic was introduced in L (API level 21)
+ const char *executableName = getprogname();
+ return (executableName) ? std::string(executableName) : "ANGLE";
diff --git a/www/ungoogled-chromium/files/patch-third__party_angle_src_common_system__utils__linux.cpp b/www/ungoogled-chromium/files/patch-third__party_angle_src_common_system__utils__linux.cpp
new file mode 100644
index 000000000000..b313113f4c63
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-third__party_angle_src_common_system__utils__linux.cpp
@@ -0,0 +1,35 @@
+--- third_party/angle/src/common/system_utils_linux.cpp.orig 2024-02-03 15:42:55 UTC
++++ third_party/angle/src/common/system_utils_linux.cpp
+@@ -15,8 +15,13 @@
+
+ #include <array>
+
++#if ANGLE_PLATFORM_OPENBSD
++#include <pthread_np.h>
++#endif
++
+ namespace angle
+ {
++#if ANGLE_PLATFORM_LINUX
+ std::string GetExecutablePath()
+ {
+ // We cannot use lstat to get the size of /proc/self/exe as it always returns 0
+@@ -32,6 +37,7 @@ std::string GetExecutablePath()
+ path[result] = '\0';
+ return path;
+ }
++#endif
+
+ std::string GetExecutableDirectory()
+ {
+@@ -56,6 +62,10 @@ void SetCurrentThreadName(const char *name)
+ {
+ // There's a 15-character (16 including '\0') limit. If the name is too big (and ERANGE is
+ // returned), just ignore the name.
++#if ANGLE_PLATFORM_OPENBSD
++ pthread_set_name_np(pthread_self(), name);
++#else
+ pthread_setname_np(pthread_self(), name);
++#endif
+ }
+ } // namespace angle
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_common_renderer__preferences_renderer__preferences__mojom__traits.cc b/www/ungoogled-chromium/files/patch-third__party_blink_common_renderer__preferences_renderer__preferences__mojom__traits.cc
index 9d51e9534a48..6607453b7ff2 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_common_renderer__preferences_renderer__preferences__mojom__traits.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_common_renderer__preferences_renderer__preferences__mojom__traits.cc
@@ -1,6 +1,6 @@
---- third_party/blink/common/renderer_preferences/renderer_preferences_mojom_traits.cc.orig 2023-08-18 10:26:52 UTC
+--- third_party/blink/common/renderer_preferences/renderer_preferences_mojom_traits.cc.orig 2024-02-03 15:42:55 UTC
+++ third_party/blink/common/renderer_preferences/renderer_preferences_mojom_traits.cc
-@@ -66,7 +66,7 @@ bool StructTraits<blink::mojom::RendererPreferencesDat
+@@ -63,7 +63,7 @@ bool StructTraits<blink::mojom::RendererPreferencesDat
out->send_subresource_notification = data.send_subresource_notification();
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_public_common_renderer__preferences_renderer__preferences.h b/www/ungoogled-chromium/files/patch-third__party_blink_public_common_renderer__preferences_renderer__preferences.h
index 1f6d9549da41..4c327174072f 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_public_common_renderer__preferences_renderer__preferences.h
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_public_common_renderer__preferences_renderer__preferences.h
@@ -1,6 +1,6 @@
---- third_party/blink/public/common/renderer_preferences/renderer_preferences.h.orig 2023-08-18 10:26:52 UTC
+--- third_party/blink/public/common/renderer_preferences/renderer_preferences.h.orig 2024-02-03 15:42:55 UTC
+++ third_party/blink/public/common/renderer_preferences/renderer_preferences.h
-@@ -64,7 +64,7 @@ struct BLINK_COMMON_EXPORT RendererPreferences {
+@@ -63,7 +63,7 @@ struct BLINK_COMMON_EXPORT RendererPreferences {
UserAgentOverride user_agent_override;
std::string accept_languages;
bool send_subresource_notification{false};
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_public_common_renderer__preferences_renderer__preferences__mojom__traits.h b/www/ungoogled-chromium/files/patch-third__party_blink_public_common_renderer__preferences_renderer__preferences__mojom__traits.h
index 9f9d575306ff..d41ba2c9be4d 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_public_common_renderer__preferences_renderer__preferences__mojom__traits.h
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_public_common_renderer__preferences_renderer__preferences__mojom__traits.h
@@ -1,6 +1,6 @@
---- third_party/blink/public/common/renderer_preferences/renderer_preferences_mojom_traits.h.orig 2023-08-18 10:26:52 UTC
+--- third_party/blink/public/common/renderer_preferences/renderer_preferences_mojom_traits.h.orig 2024-02-03 15:42:55 UTC
+++ third_party/blink/public/common/renderer_preferences/renderer_preferences_mojom_traits.h
-@@ -162,7 +162,7 @@ struct BLINK_COMMON_EXPORT
+@@ -157,7 +157,7 @@ struct BLINK_COMMON_EXPORT
return data.send_subresource_notification;
}
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_public_platform_platform.h b/www/ungoogled-chromium/files/patch-third__party_blink_public_platform_platform.h
index d1fbba07d061..e75f239e01df 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_public_platform_platform.h
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_public_platform_platform.h
@@ -1,6 +1,6 @@
---- third_party/blink/public/platform/platform.h.orig 2023-11-04 07:08:51 UTC
+--- third_party/blink/public/platform/platform.h.orig 2024-02-03 15:42:55 UTC
+++ third_party/blink/public/platform/platform.h
-@@ -344,7 +344,7 @@ class BLINK_PLATFORM_EXPORT Platform {
+@@ -345,7 +345,7 @@ class BLINK_PLATFORM_EXPORT Platform {
return nullptr;
}
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_public_platform_web__vector.h b/www/ungoogled-chromium/files/patch-third__party_blink_public_platform_web__vector.h
index 135e7aeb131e..9297af44a15d 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_public_platform_web__vector.h
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_public_platform_web__vector.h
@@ -1,6 +1,6 @@
---- third_party/blink/public/platform/web_vector.h.orig 2022-10-01 07:40:07 UTC
+--- third_party/blink/public/platform/web_vector.h.orig 2024-02-03 15:42:55 UTC
+++ third_party/blink/public/platform/web_vector.h
-@@ -81,7 +81,7 @@ class WebVector {
+@@ -86,7 +86,7 @@ class WebVector {
// The vector can be populated using reserve() and emplace_back().
WebVector() = default;
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_controller_blink__initializer.cc b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_controller_blink__initializer.cc
index ae38a7b88e74..cea470a6d52b 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_controller_blink__initializer.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_controller_blink__initializer.cc
@@ -1,4 +1,4 @@
---- third_party/blink/renderer/controller/blink_initializer.cc.orig 2023-12-23 12:33:28 UTC
+--- third_party/blink/renderer/controller/blink_initializer.cc.orig 2024-02-03 15:42:55 UTC
+++ third_party/blink/renderer/controller/blink_initializer.cc
@@ -78,12 +78,12 @@
#include "third_party/blink/renderer/controller/private_memory_footprint_provider.h"
@@ -15,7 +15,7 @@
#include "third_party/blink/renderer/controller/highest_pmf_reporter.h"
#include "third_party/blink/renderer/controller/user_level_memory_pressure_signal_generator.h"
#endif
-@@ -232,7 +232,7 @@ void BlinkInitializer::RegisterInterfaces(mojo::Binder
+@@ -251,7 +251,7 @@ void BlinkInitializer::RegisterInterfaces(mojo::Binder
main_thread_task_runner);
#endif
@@ -24,7 +24,7 @@
binders.Add<mojom::blink::MemoryUsageMonitorLinux>(
ConvertToBaseRepeatingCallback(
CrossThreadBindRepeating(&MemoryUsageMonitorPosix::Bind)),
-@@ -271,7 +271,7 @@ void BlinkInitializer::RegisterMemoryWatchers(Platform
+@@ -290,7 +290,7 @@ void BlinkInitializer::RegisterMemoryWatchers(Platform
#endif
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID) || \
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_exported_web__view__impl.cc b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_exported_web__view__impl.cc
index bf36a9571a5b..05cca7baf218 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_exported_web__view__impl.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_exported_web__view__impl.cc
@@ -1,6 +1,6 @@
---- third_party/blink/renderer/core/exported/web_view_impl.cc.orig 2023-12-23 12:33:28 UTC
+--- third_party/blink/renderer/core/exported/web_view_impl.cc.orig 2024-02-03 15:42:55 UTC
+++ third_party/blink/renderer/core/exported/web_view_impl.cc
-@@ -424,7 +424,7 @@ SkFontHinting RendererPreferencesToSkiaHinting(
+@@ -425,7 +425,7 @@ SkFontHinting RendererPreferencesToSkiaHinting(
const blink::RendererPreferences& prefs) {
// TODO(crbug.com/1052397): Revisit once build flag switch of lacros-chrome is
// complete.
@@ -9,7 +9,7 @@
if (!prefs.should_antialias_text) {
// When anti-aliasing is off, GTK maps all non-zero hinting settings to
// 'Normal' hinting so we do the same. Otherwise, folks who have 'Slight'
-@@ -3351,7 +3351,7 @@ void WebViewImpl::UpdateFontRenderingFromRendererPrefs
+@@ -3360,7 +3360,7 @@ void WebViewImpl::UpdateFontRenderingFromRendererPrefs
renderer_preferences_.use_subpixel_positioning);
// TODO(crbug.com/1052397): Revisit once build flag switch of lacros-chrome is
// complete.
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_frame_web__frame__test.cc b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_frame_web__frame__test.cc
index a0baeb575a87..1220c1003118 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_frame_web__frame__test.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_frame_web__frame__test.cc
@@ -1,6 +1,6 @@
---- third_party/blink/renderer/core/frame/web_frame_test.cc.orig 2023-12-23 12:33:28 UTC
+--- third_party/blink/renderer/core/frame/web_frame_test.cc.orig 2024-02-03 15:42:55 UTC
+++ third_party/blink/renderer/core/frame/web_frame_test.cc
-@@ -6468,7 +6468,7 @@ TEST_F(WebFrameTest, DISABLED_PositionForPointTest) {
+@@ -6465,7 +6465,7 @@ TEST_F(WebFrameTest, DISABLED_PositionForPointTest) {
}
#if BUILDFLAG(IS_FUCHSIA) || BUILDFLAG(IS_APPLE) || BUILDFLAG(IS_LINUX) || \
@@ -9,7 +9,7 @@
// TODO(crbug.com/1090246): Fix these tests on Fuchsia and re-enable.
// TODO(crbug.com/1317375): Build these tests on all platforms.
#define MAYBE_SelectRangeStaysHorizontallyAlignedWhenMoved \
-@@ -6877,7 +6877,7 @@ TEST_F(CompositedSelectionBoundsTest, LargeSelectionSc
+@@ -6874,7 +6874,7 @@ TEST_F(CompositedSelectionBoundsTest, LargeSelectionSc
TEST_F(CompositedSelectionBoundsTest, LargeSelectionNoScroll) {
RunTest("composited_selection_bounds_large_selection_noscroll.html");
}
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_html_canvas_canvas__async__blob__creator.cc b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_html_canvas_canvas__async__blob__creator.cc
index 4002c561aeab..8b3648e02a6c 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_html_canvas_canvas__async__blob__creator.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_html_canvas_canvas__async__blob__creator.cc
@@ -1,6 +1,6 @@
---- third_party/blink/renderer/core/html/canvas/canvas_async_blob_creator.cc.orig 2023-10-13 13:20:35 UTC
+--- third_party/blink/renderer/core/html/canvas/canvas_async_blob_creator.cc.orig 2024-02-03 15:42:55 UTC
+++ third_party/blink/renderer/core/html/canvas/canvas_async_blob_creator.cc
-@@ -48,7 +48,7 @@ constexpr base::TimeDelta kEncodeRowSlackBeforeDeadlin
+@@ -50,7 +50,7 @@ constexpr base::TimeDelta kEncodeRowSlackBeforeDeadlin
/* The value is based on user statistics on Nov 2017. */
#if (BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_MAC) || \
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_inspector_inspector__memory__agent.cc b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_inspector_inspector__memory__agent.cc
index a83a8218ef78..2e6144cc35c9 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_inspector_inspector__memory__agent.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_inspector_inspector__memory__agent.cc
@@ -1,6 +1,6 @@
---- third_party/blink/renderer/core/inspector/inspector_memory_agent.cc.orig 2023-11-04 07:08:51 UTC
+--- third_party/blink/renderer/core/inspector/inspector_memory_agent.cc.orig 2024-02-03 15:42:55 UTC
+++ third_party/blink/renderer/core/inspector/inspector_memory_agent.cc
-@@ -190,7 +190,7 @@ InspectorMemoryAgent::GetSamplingProfileById(uint32_t
+@@ -192,7 +192,7 @@ InspectorMemoryAgent::GetSamplingProfileById(uint32_t
Vector<String> InspectorMemoryAgent::Symbolize(
const WebVector<const void*>& addresses) {
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_layout_layout__view.cc b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_layout_layout__view.cc
index f019344f4cdd..56da89bb067f 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_layout_layout__view.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_layout_layout__view.cc
@@ -1,6 +1,6 @@
---- third_party/blink/renderer/core/layout/layout_view.cc.orig 2023-12-23 12:33:28 UTC
+--- third_party/blink/renderer/core/layout/layout_view.cc.orig 2024-02-03 15:42:55 UTC
+++ third_party/blink/renderer/core/layout/layout_view.cc
-@@ -73,7 +73,7 @@
+@@ -72,7 +72,7 @@
#include "ui/gfx/geometry/quad_f.h"
#include "ui/gfx/geometry/size_conversions.h"
@@ -9,8 +9,8 @@
#include "third_party/blink/renderer/platform/fonts/font_cache.h"
#endif
-@@ -808,7 +808,7 @@ void LayoutView::UpdateLayout() {
- fragmentation_context_.Clear();
+@@ -813,7 +813,7 @@ void LayoutView::UpdateLayout() {
+ intrinsic_logical_widths_ = LogicalWidth();
}
-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_layout_ng_physical__fragment__rare__data.h b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_layout_ng_physical__fragment__rare__data.h
deleted file mode 100644
index 36e7d6193ed9..000000000000
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_layout_ng_physical__fragment__rare__data.h
+++ /dev/null
@@ -1,10 +0,0 @@
---- third_party/blink/renderer/core/layout/ng/physical_fragment_rare_data.h.orig 2023-09-17 07:59:53 UTC
-+++ third_party/blink/renderer/core/layout/ng/physical_fragment_rare_data.h
-@@ -5,6 +5,7 @@
- #ifndef THIRD_PARTY_BLINK_RENDERER_CORE_LAYOUT_NG_PHYSICAL_FRAGMENT_RARE_DATA_H_
- #define THIRD_PARTY_BLINK_RENDERER_CORE_LAYOUT_NG_PHYSICAL_FRAGMENT_RARE_DATA_H_
-
-+#include <bit>
- #include <climits>
-
- #include "third_party/blink/renderer/core/layout/geometry/logical_rect.h"
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_scroll_scrollbar__theme__aura.cc b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_scroll_scrollbar__theme__aura.cc
index 138df21d9064..efe8d9bdf4a4 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_scroll_scrollbar__theme__aura.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_scroll_scrollbar__theme__aura.cc
@@ -1,6 +1,6 @@
---- third_party/blink/renderer/core/scroll/scrollbar_theme_aura.cc.orig 2023-12-23 12:33:28 UTC
+--- third_party/blink/renderer/core/scroll/scrollbar_theme_aura.cc.orig 2024-02-03 15:42:55 UTC
+++ third_party/blink/renderer/core/scroll/scrollbar_theme_aura.cc
-@@ -157,7 +157,7 @@ bool ScrollbarThemeAura::SupportsDragSnapBack() const
+@@ -150,7 +150,7 @@ bool ScrollbarThemeAura::SupportsDragSnapBack() const
// is true for at least GTK and QT apps).
// TODO(crbug.com/1052397): Revisit once build flag switch of lacros-chrome is
// complete.
@@ -9,7 +9,7 @@
return false;
#else
return true;
-@@ -360,7 +360,7 @@ bool ScrollbarThemeAura::ShouldCenterOnThumb(const Scr
+@@ -354,7 +354,7 @@ bool ScrollbarThemeAura::ShouldCenterOnThumb(const Scr
const WebMouseEvent& event) {
// TODO(crbug.com/1052397): Revisit once build flag switch of lacros-chrome is
// complete.
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_xml_xslt__processor.h b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_xml_xslt__processor.h
new file mode 100644
index 000000000000..c3359d6b9be6
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_xml_xslt__processor.h
@@ -0,0 +1,14 @@
+--- third_party/blink/renderer/core/xml/xslt_processor.h.orig 2024-02-03 15:42:55 UTC
++++ third_party/blink/renderer/core/xml/xslt_processor.h
+@@ -77,7 +77,11 @@ class XSLTProcessor final : public ScriptWrappable {
+
+ void reset();
+
++#if (LIBXML_VERSION > 21106)
+ static void ParseErrorFunc(void* user_data, const xmlError*);
++#else
++ static void ParseErrorFunc(void* user_data, xmlError*);
++#endif
+ static void GenericErrorFunc(void* user_data, const char* msg, ...);
+
+ // Only for libXSLT callbacks
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_xml_xslt__processor__libxslt.cc b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_xml_xslt__processor__libxslt.cc
new file mode 100644
index 000000000000..5af5353c3fcc
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_core_xml_xslt__processor__libxslt.cc
@@ -0,0 +1,14 @@
+--- third_party/blink/renderer/core/xml/xslt_processor_libxslt.cc.orig 2024-02-03 15:42:55 UTC
++++ third_party/blink/renderer/core/xml/xslt_processor_libxslt.cc
+@@ -66,7 +66,11 @@ void XSLTProcessor::GenericErrorFunc(void*, const char
+ // It would be nice to do something with this error message.
+ }
+
++#if (LIBXML_VERSION > 21106)
+ void XSLTProcessor::ParseErrorFunc(void* user_data, const xmlError* error) {
++#else
++void XSLTProcessor::ParseErrorFunc(void* user_data, xmlError* error) {
++#endif
+ FrameConsole* console = static_cast<FrameConsole*>(user_data);
+ if (!console)
+ return;
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_ml_webnn_features.gni b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_ml_webnn_features.gni
index 7399ed3eb028..aac4ab7dd831 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_ml_webnn_features.gni
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_ml_webnn_features.gni
@@ -1,11 +1,11 @@
---- third_party/blink/renderer/modules/ml/webnn/features.gni.orig 2023-06-05 19:39:05 UTC
+--- third_party/blink/renderer/modules/ml/webnn/features.gni.orig 2024-02-03 15:42:55 UTC
+++ third_party/blink/renderer/modules/ml/webnn/features.gni
-@@ -7,7 +7,7 @@ declare_args() {
+@@ -6,7 +6,7 @@ import("//build/config/chrome_build.gni")
+ declare_args() {
# This enables building WebNN with XNNPACK. Currently only available for
- # Windows and Linux on x64 or x86.
- build_webnn_with_xnnpack =
-- (is_linux || is_win) && (current_cpu == "x64" || current_cpu == "x86")
-+ !is_bsd && (is_win && (current_cpu == "x64" || current_cpu == "x86"))
+ # Windows, macOS and Linux on x64, x86 and arm64.
+- build_webnn_with_xnnpack = (is_linux || is_win || is_mac) &&
++ build_webnn_with_xnnpack = !is_bsd && (is_linux || is_win || is_mac) &&
+ (current_cpu == "x64" || current_cpu == "x86" ||
+ (current_cpu == "arm64" && !is_win))
- # This build flag enables WebNN on ChromeOS platform to access hardware
- # acceleration by using ModelLoader mojo interface.
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_ml_webnn_ml__graph__xnnpack.cc b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_ml_webnn_ml__graph__xnnpack.cc
index 656405787501..c8b381969e51 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_ml_webnn_ml__graph__xnnpack.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_ml_webnn_ml__graph__xnnpack.cc
@@ -1,6 +1,6 @@
---- third_party/blink/renderer/modules/ml/webnn/ml_graph_xnnpack.cc.orig 2023-12-23 12:33:28 UTC
+--- third_party/blink/renderer/modules/ml/webnn/ml_graph_xnnpack.cc.orig 2024-02-03 15:42:55 UTC
+++ third_party/blink/renderer/modules/ml/webnn/ml_graph_xnnpack.cc
-@@ -196,7 +196,7 @@ class SharedXnnpackContext : public ThreadSafeRefCount
+@@ -197,7 +197,7 @@ class SharedXnnpackContext : public ThreadSafeRefCount
~SharedXnnpackContext() {
base::AutoLock auto_lock(SharedXnnpackContextLock());
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_webgl_webgl__rendering__context__base.cc b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_webgl_webgl__rendering__context__base.cc
index ee261f45ca31..f51dfded8c30 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_webgl_webgl__rendering__context__base.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_webgl_webgl__rendering__context__base.cc
@@ -1,6 +1,6 @@
---- third_party/blink/renderer/modules/webgl/webgl_rendering_context_base.cc.orig 2023-12-23 12:33:28 UTC
+--- third_party/blink/renderer/modules/webgl/webgl_rendering_context_base.cc.orig 2024-02-03 15:42:55 UTC
+++ third_party/blink/renderer/modules/webgl/webgl_rendering_context_base.cc
-@@ -6312,7 +6312,7 @@ void WebGLRenderingContextBase::TexImageHelperMediaVid
+@@ -6358,7 +6358,7 @@ void WebGLRenderingContextBase::TexImageHelperMediaVid
constexpr bool kAllowZeroCopyImages = true;
#endif
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_webgpu_gpu__queue.cc b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_webgpu_gpu__queue.cc
index a90e004c9d99..8a50945f5409 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_webgpu_gpu__queue.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_modules_webgpu_gpu__queue.cc
@@ -1,6 +1,6 @@
---- third_party/blink/renderer/modules/webgpu/gpu_queue.cc.orig 2023-12-23 12:33:28 UTC
+--- third_party/blink/renderer/modules/webgpu/gpu_queue.cc.orig 2024-02-03 15:42:55 UTC
+++ third_party/blink/renderer/modules/webgpu/gpu_queue.cc
-@@ -746,7 +746,7 @@ bool GPUQueue::CopyFromCanvasSourceImage(
+@@ -755,7 +755,7 @@ bool GPUQueue::CopyFromCanvasSourceImage(
// on linux platform.
// TODO(crbug.com/1424119): using a webgpu mailbox texture on the OpenGLES
// backend is failing for unknown reasons.
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_BUILD.gn b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_BUILD.gn
index f67ecc10916a..1388b8f3fc04 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_BUILD.gn
@@ -1,6 +1,6 @@
---- third_party/blink/renderer/platform/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+--- third_party/blink/renderer/platform/BUILD.gn.orig 2024-02-03 15:42:55 UTC
+++ third_party/blink/renderer/platform/BUILD.gn
-@@ -1928,7 +1928,7 @@ static_library("test_support") {
+@@ -1933,7 +1933,7 @@ static_library("test_support") {
]
# fuzzed_data_provider may not work with a custom toolchain.
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_fonts_font__cache.cc b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_fonts_font__cache.cc
index 91f5f6742a86..34e8b6d1169a 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_fonts_font__cache.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_fonts_font__cache.cc
@@ -1,6 +1,6 @@
---- third_party/blink/renderer/platform/fonts/font_cache.cc.orig 2023-08-18 10:26:52 UTC
+--- third_party/blink/renderer/platform/fonts/font_cache.cc.orig 2024-02-03 15:42:55 UTC
+++ third_party/blink/renderer/platform/fonts/font_cache.cc
-@@ -72,7 +72,7 @@ extern const char kNotoColorEmojiCompat[] = "Noto Colo
+@@ -73,7 +73,7 @@ extern const char kNotoColorEmojiCompat[] = "Noto Colo
SkFontMgr* FontCache::static_font_manager_ = nullptr;
@@ -9,7 +9,7 @@
float FontCache::device_scale_factor_ = 1.0;
#endif
-@@ -118,7 +118,7 @@ FontPlatformData* FontCache::SystemFontPlatformData(
+@@ -119,7 +119,7 @@ FontPlatformData* FontCache::SystemFontPlatformData(
const FontDescription& font_description) {
const AtomicString& family = FontCache::SystemFontFamily();
#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_FUCHSIA) || \
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_fonts_skia_font__cache__skia.cc b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_fonts_skia_font__cache__skia.cc
index 782b17bad5b7..1fdabbbb4f2f 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_fonts_skia_font__cache__skia.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_fonts_skia_font__cache__skia.cc
@@ -1,6 +1,6 @@
---- third_party/blink/renderer/platform/fonts/skia/font_cache_skia.cc.orig 2023-05-05 12:12:41 UTC
+--- third_party/blink/renderer/platform/fonts/skia/font_cache_skia.cc.orig 2024-02-03 15:42:55 UTC
+++ third_party/blink/renderer/platform/fonts/skia/font_cache_skia.cc
-@@ -64,7 +64,7 @@ AtomicString ToAtomicString(const SkString& str) {
+@@ -65,7 +65,7 @@ AtomicString ToAtomicString(const SkString& str) {
return AtomicString::FromUTF8(str.c_str(), str.size());
}
@@ -9,7 +9,7 @@
// This function is called on android or when we are emulating android fonts on
// linux and the embedder has overriden the default fontManager with
// WebFontRendering::setSkiaFontMgr.
-@@ -246,7 +246,7 @@ std::unique_ptr<FontPlatformData> FontCache::CreateFon
+@@ -247,7 +247,7 @@ std::unique_ptr<FontPlatformData> FontCache::CreateFon
std::string name;
sk_sp<SkTypeface> typeface;
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_graphics_video__frame__submitter.cc b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_graphics_video__frame__submitter.cc
index f81d045e7d3f..2d89b9744eff 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_graphics_video__frame__submitter.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_graphics_video__frame__submitter.cc
@@ -1,6 +1,6 @@
---- third_party/blink/renderer/platform/graphics/video_frame_submitter.cc.orig 2023-11-04 07:08:51 UTC
+--- third_party/blink/renderer/platform/graphics/video_frame_submitter.cc.orig 2024-02-03 15:42:55 UTC
+++ third_party/blink/renderer/platform/graphics/video_frame_submitter.cc
-@@ -358,7 +358,7 @@ void VideoFrameSubmitter::OnBeginFrame(
+@@ -365,7 +365,7 @@ void VideoFrameSubmitter::OnBeginFrame(
continue;
auto& feedback =
timing_details.find(frame_token)->value.presentation_feedback;
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_peerconnection_rtc__video__encoder__factory.cc b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_peerconnection_rtc__video__encoder__factory.cc
index 3792bf7405b1..fe0c0b4e67f3 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_peerconnection_rtc__video__encoder__factory.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_peerconnection_rtc__video__encoder__factory.cc
@@ -1,6 +1,6 @@
---- third_party/blink/renderer/platform/peerconnection/rtc_video_encoder_factory.cc.orig 2023-10-13 13:20:35 UTC
+--- third_party/blink/renderer/platform/peerconnection/rtc_video_encoder_factory.cc.orig 2024-02-03 15:42:55 UTC
+++ third_party/blink/renderer/platform/peerconnection/rtc_video_encoder_factory.cc
-@@ -198,12 +198,12 @@ SupportedFormats GetSupportedFormatsInternal(
+@@ -202,12 +202,12 @@ SupportedFormats GetSupportedFormatsInternal(
supported_formats.scalability_modes.push_back(profile.scalability_modes);
supported_formats.sdp_formats.push_back(std::move(*format));
diff --git a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_runtime__enabled__features.json5 b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_runtime__enabled__features.json5
index fa96b4e88617..5cfc09f992a2 100644
--- a/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_runtime__enabled__features.json5
+++ b/www/ungoogled-chromium/files/patch-third__party_blink_renderer_platform_runtime__enabled__features.json5
@@ -1,6 +1,6 @@
---- third_party/blink/renderer/platform/runtime_enabled_features.json5.orig 2023-12-23 12:33:28 UTC
+--- third_party/blink/renderer/platform/runtime_enabled_features.json5.orig 2024-02-03 15:42:55 UTC
+++ third_party/blink/renderer/platform/runtime_enabled_features.json5
-@@ -2006,7 +2006,7 @@
+@@ -1930,7 +1930,7 @@
base_feature_status: "enabled",
copied_from_base_feature_if: "overridden",
origin_trial_feature_name: "FullscreenPopupWindows",
@@ -9,7 +9,7 @@
},
{
name: "GamepadButtonAxisEvents",
-@@ -2929,7 +2929,7 @@
+@@ -2828,7 +2828,7 @@
name: "PaymentHandlerMinimalHeaderUX",
origin_trial_feature_name: "PaymentHandlerMinimalHeaderUX",
origin_trial_allows_third_party: true,
@@ -18,16 +18,16 @@
status: "stable",
},
{
-@@ -3976,7 +3976,7 @@
+@@ -3842,7 +3842,7 @@
name: "UnrestrictedSharedArrayBuffer",
base_feature: "none",
origin_trial_feature_name: "UnrestrictedSharedArrayBuffer",
- origin_trial_os: ["win", "mac", "linux", "fuchsia", "chromeos"],
+ origin_trial_os: ["win", "mac", "linux", "fuchsia", "chromeos", "openbsd", "freebsd"],
},
- {
- // This flag makes IDL reflected attributes with the "URL" IDL attribute
-@@ -4194,12 +4194,12 @@
+ // Enables using policy-controlled feature "usb-unrestricted" to allow
+ // isolated context to access protected USB interface classes and to
+@@ -4072,12 +4072,12 @@
status: "experimental",
base_feature: "none",
origin_trial_feature_name: "WebAppUrlHandling",
diff --git a/www/ungoogled-chromium/files/patch-third__party_crashpad_crashpad_util_misc_uuid.cc b/www/ungoogled-chromium/files/patch-third__party_crashpad_crashpad_util_misc_uuid.cc
index 2786281df1db..f5c31f5d5c0d 100644
--- a/www/ungoogled-chromium/files/patch-third__party_crashpad_crashpad_util_misc_uuid.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_crashpad_crashpad_util_misc_uuid.cc
@@ -1,6 +1,6 @@
---- third_party/crashpad/crashpad/util/misc/uuid.cc.orig 2022-10-01 07:40:07 UTC
+--- third_party/crashpad/crashpad/util/misc/uuid.cc.orig 2024-02-03 15:42:55 UTC
+++ third_party/crashpad/crashpad/util/misc/uuid.cc
-@@ -101,7 +101,7 @@ bool UUID::InitializeWithNew() {
+@@ -102,7 +102,7 @@ bool UUID::InitializeWithNew() {
InitializeFromBytes(uuid);
return true;
#elif BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
diff --git a/www/ungoogled-chromium/files/patch-third__party_distributed__point__functions_BUILD.gn b/www/ungoogled-chromium/files/patch-third__party_distributed__point__functions_BUILD.gn
deleted file mode 100644
index 531d1cf719b3..000000000000
--- a/www/ungoogled-chromium/files/patch-third__party_distributed__point__functions_BUILD.gn
+++ /dev/null
@@ -1,12 +0,0 @@
---- third_party/distributed_point_functions/BUILD.gn.orig 2023-02-11 09:11:04 UTC
-+++ third_party/distributed_point_functions/BUILD.gn
-@@ -61,6 +61,9 @@ source_set("distributed_point_functions") {
- configs -= [ "//build/config/compiler:chromium_code" ]
- configs += [ "//build/config/compiler:no_chromium_code" ]
-
-+ # XXX clang13 crashes with optimizations
-+ configs += [ "//build/config/compiler:no_optimize" ]
-+
- public_configs = [ ":distributed_point_functions_includes" ]
- }
-
diff --git a/www/ungoogled-chromium/files/patch-third__party_ffmpeg_configure b/www/ungoogled-chromium/files/patch-third__party_ffmpeg_configure
index cf279d0d359f..7ffc26518b01 100644
--- a/www/ungoogled-chromium/files/patch-third__party_ffmpeg_configure
+++ b/www/ungoogled-chromium/files/patch-third__party_ffmpeg_configure
@@ -1,6 +1,6 @@
---- third_party/ffmpeg/configure.orig 2023-12-23 12:33:28 UTC
+--- third_party/ffmpeg/configure.orig 2024-02-03 15:42:55 UTC
+++ third_party/ffmpeg/configure
-@@ -5604,6 +5604,7 @@ case $target_os in
+@@ -5615,6 +5615,7 @@ case $target_os in
disable symver
;;
freebsd)
diff --git a/www/ungoogled-chromium/files/patch-third__party_ffmpeg_libavutil_x86_x86inc.asm b/www/ungoogled-chromium/files/patch-third__party_ffmpeg_libavutil_x86_x86inc.asm
index 643c1091b5e9..19bd8bd907c0 100644
--- a/www/ungoogled-chromium/files/patch-third__party_ffmpeg_libavutil_x86_x86inc.asm
+++ b/www/ungoogled-chromium/files/patch-third__party_ffmpeg_libavutil_x86_x86inc.asm
@@ -1,4 +1,4 @@
---- third_party/ffmpeg/libavutil/x86/x86inc.asm.orig 2023-07-21 09:49:17 UTC
+--- third_party/ffmpeg/libavutil/x86/x86inc.asm.orig 2024-02-03 15:42:55 UTC
+++ third_party/ffmpeg/libavutil/x86/x86inc.asm
@@ -53,6 +53,12 @@
%endif
@@ -13,7 +13,7 @@
%define WIN64 0
%define UNIX64 0
%if ARCH_X86_64
-@@ -768,6 +774,7 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg,
+@@ -778,6 +784,7 @@ BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg,
%endif
align function_align
%2:
diff --git a/www/ungoogled-chromium/files/patch-third__party_leveldatabase_env__chromium.cc b/www/ungoogled-chromium/files/patch-third__party_leveldatabase_env__chromium.cc
index 2fc78eb5c6e3..6f5eaa9c7210 100644
--- a/www/ungoogled-chromium/files/patch-third__party_leveldatabase_env__chromium.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_leveldatabase_env__chromium.cc
@@ -1,6 +1,6 @@
---- third_party/leveldatabase/env_chromium.cc.orig 2023-09-17 07:59:53 UTC
+--- third_party/leveldatabase/env_chromium.cc.orig 2024-02-03 15:42:55 UTC
+++ third_party/leveldatabase/env_chromium.cc
-@@ -320,7 +320,8 @@ ChromiumWritableFile::ChromiumWritableFile(const std::
+@@ -325,7 +325,8 @@ ChromiumWritableFile::ChromiumWritableFile(const std::
Status ChromiumWritableFile::SyncParent() {
TRACE_EVENT0("leveldb", "SyncParent");
diff --git a/www/ungoogled-chromium/files/patch-third__party_libc++_src_src_chrono.cpp b/www/ungoogled-chromium/files/patch-third__party_libc++_src_src_chrono.cpp
new file mode 100644
index 000000000000..1c934631045c
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-third__party_libc++_src_src_chrono.cpp
@@ -0,0 +1,11 @@
+--- third_party/libc++/src/src/chrono.cpp.orig 2024-02-03 15:42:55 UTC
++++ third_party/libc++/src/src/chrono.cpp
+@@ -31,7 +31,7 @@
+ # include <sys/time.h> // for gettimeofday and timeval
+ #endif
+
+-#if defined(__APPLE__) || defined (__gnu_hurd__) || (defined(_POSIX_TIMERS) && _POSIX_TIMERS > 0)
++#if defined(__APPLE__) || defined (__gnu_hurd__) || (defined(_POSIX_TIMERS) && _POSIX_TIMERS > 0) || defined(__OpenBSD__)
+ # define _LIBCPP_HAS_CLOCK_GETTIME
+ #endif
+
diff --git a/www/ungoogled-chromium/files/patch-third__party_libc++_src_src_filesystem_filesystem__clock.cpp b/www/ungoogled-chromium/files/patch-third__party_libc++_src_src_filesystem_filesystem__clock.cpp
new file mode 100644
index 000000000000..75e982a50ff4
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-third__party_libc++_src_src_filesystem_filesystem__clock.cpp
@@ -0,0 +1,11 @@
+--- third_party/libc++/src/src/filesystem/filesystem_clock.cpp.orig 2024-02-03 15:42:55 UTC
++++ third_party/libc++/src/src/filesystem/filesystem_clock.cpp
+@@ -29,7 +29,7 @@
+ # include <sys/time.h> // for gettimeofday and timeval
+ #endif
+
+-#if defined(__APPLE__) || defined (__gnu_hurd__) || (defined(_POSIX_TIMERS) && _POSIX_TIMERS > 0)
++#if defined(__APPLE__) || defined (__gnu_hurd__) || (defined(_POSIX_TIMERS) && _POSIX_TIMERS > 0) || defined(__OpenBSD__)
+ # define _LIBCPP_HAS_CLOCK_GETTIME
+ #endif
+
diff --git a/www/ungoogled-chromium/files/patch-third__party_libc++abi_src_src_cxa__guard__impl.h b/www/ungoogled-chromium/files/patch-third__party_libc++abi_src_src_cxa__guard__impl.h
new file mode 100644
index 000000000000..f9dfa8f7ee68
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-third__party_libc++abi_src_src_cxa__guard__impl.h
@@ -0,0 +1,24 @@
+--- third_party/libc++abi/src/src/cxa_guard_impl.h.orig 2024-02-03 15:42:55 UTC
++++ third_party/libc++abi/src/src/cxa_guard_impl.h
+@@ -411,7 +411,20 @@ struct InitByteGlobalMutex { (private)
+ // Futex Implementation
+ //===----------------------------------------------------------------------===//
+
+-#if defined(SYS_futex)
++#ifdef __OpenBSD__
++#include <sys/futex.h>
++
++void PlatformFutexWait(int* addr, int expect) {
++ constexpr int WAIT = 0;
++ futex((volatile uint32_t *)addr, WAIT, expect, NULL, NULL);
++ __tsan_acquire(addr);
++}
++void PlatformFutexWake(int* addr) {
++ constexpr int WAKE = 1;
++ __tsan_release(addr);
++ futex((volatile uint32_t *)addr, WAKE, INT_MAX, NULL, NULL);
++}
++#elif defined(SYS_futex)
+ void PlatformFutexWait(int* addr, int expect) {
+ constexpr int WAIT = 0;
+ syscall(SYS_futex, addr, WAIT, expect, 0);
diff --git a/www/ungoogled-chromium/files/patch-third__party_libvpx_source_libvpx_vpx__ports_aarch64__cpudetect.c b/www/ungoogled-chromium/files/patch-third__party_libvpx_source_libvpx_vpx__ports_aarch64__cpudetect.c
index ceeec71c950d..b7886c1b37ba 100644
--- a/www/ungoogled-chromium/files/patch-third__party_libvpx_source_libvpx_vpx__ports_aarch64__cpudetect.c
+++ b/www/ungoogled-chromium/files/patch-third__party_libvpx_source_libvpx_vpx__ports_aarch64__cpudetect.c
@@ -1,4 +1,4 @@
---- third_party/libvpx/source/libvpx/vpx_ports/aarch64_cpudetect.c.orig 2024-01-09 18:29:07 UTC
+--- third_party/libvpx/source/libvpx/vpx_ports/aarch64_cpudetect.c.orig 2024-02-03 15:42:55 UTC
+++ third_party/libvpx/source/libvpx/vpx_ports/aarch64_cpudetect.c
@@ -91,9 +91,23 @@ static int arm_get_cpu_caps(void) {
return flags;
@@ -21,7 +21,7 @@
+ elf_aux_info(type, &ret, sizeof(ret));
+ return ret;
+}
-+#endif
++#endif
// Define hwcap values ourselves: building with an old auxv header where these
// hwcap values are not defined should not prevent features from being enabled.
diff --git a/www/ungoogled-chromium/files/patch-third__party_perfetto_include_perfetto_base_time.h b/www/ungoogled-chromium/files/patch-third__party_perfetto_include_perfetto_base_time.h
index 780047685cde..a90ffcda2fa5 100644
--- a/www/ungoogled-chromium/files/patch-third__party_perfetto_include_perfetto_base_time.h
+++ b/www/ungoogled-chromium/files/patch-third__party_perfetto_include_perfetto_base_time.h
@@ -1,6 +1,6 @@
---- third_party/perfetto/include/perfetto/base/time.h.orig 2023-12-23 12:33:28 UTC
+--- third_party/perfetto/include/perfetto/base/time.h.orig 2024-02-03 15:42:55 UTC
+++ third_party/perfetto/include/perfetto/base/time.h
-@@ -167,6 +167,9 @@ inline TimeNanos GetTimeInternalNs(clockid_t clk_id) {
+@@ -193,6 +193,9 @@ inline TimeNanos GetTimeInternalNs(clockid_t clk_id) {
// Return ns from boot. Conversely to GetWallTimeNs, this clock counts also time
// during suspend (when supported).
inline TimeNanos GetBootTimeNs() {
@@ -10,7 +10,7 @@
// Determine if CLOCK_BOOTTIME is available on the first call.
static const clockid_t kBootTimeClockSource = [] {
struct timespec ts = {};
-@@ -174,6 +177,7 @@ inline TimeNanos GetBootTimeNs() {
+@@ -200,6 +203,7 @@ inline TimeNanos GetBootTimeNs() {
return res == 0 ? CLOCK_BOOTTIME : kWallTimeClockSource;
}();
return GetTimeInternalNs(kBootTimeClockSource);
@@ -18,7 +18,7 @@
}
inline TimeNanos GetWallTimeNs() {
-@@ -181,7 +185,13 @@ inline TimeNanos GetWallTimeNs() {
+@@ -207,7 +211,13 @@ inline TimeNanos GetWallTimeNs() {
}
inline TimeNanos GetWallTimeRawNs() {
diff --git a/www/ungoogled-chromium/files/patch-third__party_perfetto_src_trace__processor_db_storage_numeric__storage.cc b/www/ungoogled-chromium/files/patch-third__party_perfetto_src_trace__processor_db_storage_numeric__storage.cc
index a948ded56936..e8ca7dd8b578 100644
--- a/www/ungoogled-chromium/files/patch-third__party_perfetto_src_trace__processor_db_storage_numeric__storage.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_perfetto_src_trace__processor_db_storage_numeric__storage.cc
@@ -1,6 +1,6 @@
---- third_party/perfetto/src/trace_processor/db/storage/numeric_storage.cc.orig 2023-12-23 12:33:28 UTC
+--- third_party/perfetto/src/trace_processor/db/storage/numeric_storage.cc.orig 2024-02-03 15:42:55 UTC
+++ third_party/perfetto/src/trace_processor/db/storage/numeric_storage.cc
-@@ -251,8 +251,13 @@ BitVector NumericStorage::LinearSearchInternal(FilterO
+@@ -267,8 +267,13 @@ BitVector NumericStorageBase::LinearSearchInternal(Fil
} else if (const auto* i32 = std::get_if<int32_t>(&*val)) {
auto* start = static_cast<const int32_t*>(data_) + range.start;
TypedLinearSearch(*i32, start, op, builder);
diff --git a/www/ungoogled-chromium/files/patch-third__party_perfetto_src_tracing_core_tracing__service__impl.cc b/www/ungoogled-chromium/files/patch-third__party_perfetto_src_tracing_core_tracing__service__impl.cc
index eb388ea2051a..d1df26e326eb 100644
--- a/www/ungoogled-chromium/files/patch-third__party_perfetto_src_tracing_core_tracing__service__impl.cc
+++ b/www/ungoogled-chromium/files/patch-third__party_perfetto_src_tracing_core_tracing__service__impl.cc
@@ -1,6 +1,6 @@
---- third_party/perfetto/src/tracing/core/tracing_service_impl.cc.orig 2023-10-13 13:20:35 UTC
+--- third_party/perfetto/src/tracing/core/tracing_service_impl.cc.orig 2024-02-03 15:42:55 UTC
+++ third_party/perfetto/src/tracing/core/tracing_service_impl.cc
-@@ -3213,7 +3213,8 @@ bool TracingServiceImpl::SnapshotClocks(
+@@ -3223,7 +3223,8 @@ bool TracingServiceImpl::SnapshotClocks(
#if !PERFETTO_BUILDFLAG(PERFETTO_OS_APPLE) && \
!PERFETTO_BUILDFLAG(PERFETTO_OS_WIN) && \
diff --git a/www/ungoogled-chromium/files/patch-third__party_speech-dispatcher_libspeechd__version.h b/www/ungoogled-chromium/files/patch-third__party_speech-dispatcher_libspeechd__version.h
index 6b0aebb7e6fe..8d17171d30f9 100644
--- a/www/ungoogled-chromium/files/patch-third__party_speech-dispatcher_libspeechd__version.h
+++ b/www/ungoogled-chromium/files/patch-third__party_speech-dispatcher_libspeechd__version.h
@@ -1,4 +1,4 @@
---- third_party/speech-dispatcher/libspeechd_version.h.orig 2023-12-23 12:33:28 UTC
+--- third_party/speech-dispatcher/libspeechd_version.h.orig 2024-02-03 15:42:55 UTC
+++ third_party/speech-dispatcher/libspeechd_version.h
@@ -0,0 +1,29 @@
+/*
@@ -19,7 +19,7 @@
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ *
-+ * $Id: patch-third_party_speech-dispatcher_libspeechd_version_h,v 1.1 2023/12/11 14:26:54 robert Exp $
++ * $Id: patch-third_party_speech-dispatcher_libspeechd_version_h,v 1.2 2024/02/02 19:52:06 robert Exp $
+ */
+
+#ifndef _LIBSPEECHD_VERSION_H
diff --git a/www/ungoogled-chromium/files/patch-third__party_vulkan-deps_vulkan-loader_src_loader_vk__loader__platform.h b/www/ungoogled-chromium/files/patch-third__party_vulkan-deps_vulkan-loader_src_loader_vk__loader__platform.h
index 97a59703ee88..800b95449e1d 100644
--- a/www/ungoogled-chromium/files/patch-third__party_vulkan-deps_vulkan-loader_src_loader_vk__loader__platform.h
+++ b/www/ungoogled-chromium/files/patch-third__party_vulkan-deps_vulkan-loader_src_loader_vk__loader__platform.h
@@ -1,6 +1,6 @@
---- third_party/vulkan-deps/vulkan-loader/src/loader/vk_loader_platform.h.orig 2023-11-04 07:08:51 UTC
+--- third_party/vulkan-deps/vulkan-loader/src/loader/vk_loader_platform.h.orig 2024-02-03 15:42:55 UTC
+++ third_party/vulkan-deps/vulkan-loader/src/loader/vk_loader_platform.h
-@@ -312,7 +312,15 @@ static inline char *loader_platform_executable_path(ch
+@@ -316,7 +316,15 @@ static inline char *loader_platform_executable_path(ch
return buffer;
}
diff --git a/www/ungoogled-chromium/files/patch-third__party_vulkan__memory__allocator_include_vk__mem__alloc.h b/www/ungoogled-chromium/files/patch-third__party_vulkan__memory__allocator_include_vk__mem__alloc.h
index 1ceb82d0ae31..652f5d77e410 100644
--- a/www/ungoogled-chromium/files/patch-third__party_vulkan__memory__allocator_include_vk__mem__alloc.h
+++ b/www/ungoogled-chromium/files/patch-third__party_vulkan__memory__allocator_include_vk__mem__alloc.h
@@ -1,39383 +1,11 @@
---- third_party/vulkan_memory_allocator/include/vk_mem_alloc.h.orig 2023-12-23 12:33:28 UTC
+--- third_party/vulkan_memory_allocator/include/vk_mem_alloc.h.orig 2024-02-03 15:42:55 UTC
+++ third_party/vulkan_memory_allocator/include/vk_mem_alloc.h
-@@ -1,19690 +1,19690 @@
--//
--// Copyright (c) 2017-2022 Advanced Micro Devices, Inc. All rights reserved.
--//
--// Permission is hereby granted, free of charge, to any person obtaining a copy
--// of this software and associated documentation files (the "Software"), to deal
--// in the Software without restriction, including without limitation the rights
--// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
--// copies of the Software, and to permit persons to whom the Software is
--// furnished to do so, subject to the following conditions:
--//
--// The above copyright notice and this permission notice shall be included in
--// all copies or substantial portions of the Software.
--//
--// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
--// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
--// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
--// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
--// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
--// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
--// THE SOFTWARE.
--//
--
--#ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
--#define AMD_VULKAN_MEMORY_ALLOCATOR_H
--
--/** \mainpage Vulkan Memory Allocator
--
--<b>Version 3.1.0-development</b>
--
--Copyright (c) 2017-2022 Advanced Micro Devices, Inc. All rights reserved. \n
--License: MIT
--
--<b>API documentation divided into groups:</b> [Modules](modules.html)
--
--\section main_table_of_contents Table of contents
--
--- <b>User guide</b>
-- - \subpage quick_start
-- - [Project setup](@ref quick_start_project_setup)
-- - [Initialization](@ref quick_start_initialization)
-- - [Resource allocation](@ref quick_start_resource_allocation)
-- - \subpage choosing_memory_type
-- - [Usage](@ref choosing_memory_type_usage)
-- - [Required and preferred flags](@ref choosing_memory_type_required_preferred_flags)
-- - [Explicit memory types](@ref choosing_memory_type_explicit_memory_types)
-- - [Custom memory pools](@ref choosing_memory_type_custom_memory_pools)
-- - [Dedicated allocations](@ref choosing_memory_type_dedicated_allocations)
-- - \subpage memory_mapping
-- - [Mapping functions](@ref memory_mapping_mapping_functions)
-- - [Persistently mapped memory](@ref memory_mapping_persistently_mapped_memory)
-- - [Cache flush and invalidate](@ref memory_mapping_cache_control)
-- - \subpage staying_within_budget
-- - [Querying for budget](@ref staying_within_budget_querying_for_budget)
-- - [Controlling memory usage](@ref staying_within_budget_controlling_memory_usage)
-- - \subpage resource_aliasing
-- - \subpage custom_memory_pools
-- - [Choosing memory type index](@ref custom_memory_pools_MemTypeIndex)
-- - [Linear allocation algorithm](@ref linear_algorithm)
-- - [Free-at-once](@ref linear_algorithm_free_at_once)
-- - [Stack](@ref linear_algorithm_stack)
-- - [Double stack](@ref linear_algorithm_double_stack)
-- - [Ring buffer](@ref linear_algorithm_ring_buffer)
-- - \subpage defragmentation
-- - \subpage statistics
-- - [Numeric statistics](@ref statistics_numeric_statistics)
-- - [JSON dump](@ref statistics_json_dump)
-- - \subpage allocation_annotation
-- - [Allocation user data](@ref allocation_user_data)
-- - [Allocation names](@ref allocation_names)
-- - \subpage virtual_allocator
-- - \subpage debugging_memory_usage
-- - [Memory initialization](@ref debugging_memory_usage_initialization)
-- - [Margins](@ref debugging_memory_usage_margins)
-- - [Corruption detection](@ref debugging_memory_usage_corruption_detection)
-- - \subpage opengl_interop
--- \subpage usage_patterns
-- - [GPU-only resource](@ref usage_patterns_gpu_only)
-- - [Staging copy for upload](@ref usage_patterns_staging_copy_upload)
-- - [Readback](@ref usage_patterns_readback)
-- - [Advanced data uploading](@ref usage_patterns_advanced_data_uploading)
-- - [Other use cases](@ref usage_patterns_other_use_cases)
--- \subpage configuration
-- - [Pointers to Vulkan functions](@ref config_Vulkan_functions)
-- - [Custom host memory allocator](@ref custom_memory_allocator)
-- - [Device memory allocation callbacks](@ref allocation_callbacks)
-- - [Device heap memory limit](@ref heap_memory_limit)
--- <b>Extension support</b>
-- - \subpage vk_khr_dedicated_allocation
-- - \subpage enabling_buffer_device_address
-- - \subpage vk_ext_memory_priority
-- - \subpage vk_amd_device_coherent_memory
--- \subpage general_considerations
-- - [Thread safety](@ref general_considerations_thread_safety)
-- - [Versioning and compatibility](@ref general_considerations_versioning_and_compatibility)
-- - [Validation layer warnings](@ref general_considerations_validation_layer_warnings)
-- - [Allocation algorithm](@ref general_considerations_allocation_algorithm)
-- - [Features not supported](@ref general_considerations_features_not_supported)
--
--\section main_see_also See also
--
--- [**Product page on GPUOpen**](https://gpuopen.com/gaming-product/vulkan-memory-allocator/)
--- [**Source repository on GitHub**](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator)
--
--\defgroup group_init Library initialization
--
--\brief API elements related to the initialization and management of the entire library, especially #VmaAllocator object.
--
--\defgroup group_alloc Memory allocation
--
--\brief API elements related to the allocation, deallocation, and management of Vulkan memory, buffers, images.
--Most basic ones being: vmaCreateBuffer(), vmaCreateImage().
--
--\defgroup group_virtual Virtual allocator
--
--\brief API elements related to the mechanism of \ref virtual_allocator - using the core allocation algorithm
--for user-defined purpose without allocating any real GPU memory.
--
--\defgroup group_stats Statistics
--
--\brief API elements that query current status of the allocator, from memory usage, budget, to full dump of the internal state in JSON format.
--See documentation chapter: \ref statistics.
--*/
--
--
--#ifdef __cplusplus
--extern "C" {
--#endif
--
--#include <vulkan/vulkan.h>
--
--#if !defined(VMA_VULKAN_VERSION)
-- #if defined(VK_VERSION_1_3)
-- #define VMA_VULKAN_VERSION 1003000
-- #elif defined(VK_VERSION_1_2)
-- #define VMA_VULKAN_VERSION 1002000
-- #elif defined(VK_VERSION_1_1)
-- #define VMA_VULKAN_VERSION 1001000
-- #else
-- #define VMA_VULKAN_VERSION 1000000
-- #endif
--#endif
--
--#if defined(__ANDROID__) && defined(VK_NO_PROTOTYPES) && VMA_STATIC_VULKAN_FUNCTIONS
-- extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
-- extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;
-- extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
-- extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
-- extern PFN_vkAllocateMemory vkAllocateMemory;
-- extern PFN_vkFreeMemory vkFreeMemory;
-- extern PFN_vkMapMemory vkMapMemory;
-- extern PFN_vkUnmapMemory vkUnmapMemory;
-- extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
-- extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
-- extern PFN_vkBindBufferMemory vkBindBufferMemory;
-- extern PFN_vkBindImageMemory vkBindImageMemory;
-- extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
-- extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
-- extern PFN_vkCreateBuffer vkCreateBuffer;
-- extern PFN_vkDestroyBuffer vkDestroyBuffer;
-- extern PFN_vkCreateImage vkCreateImage;
-- extern PFN_vkDestroyImage vkDestroyImage;
-- extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
-- #if VMA_VULKAN_VERSION >= 1001000
-- extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2;
-- extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2;
-- extern PFN_vkBindBufferMemory2 vkBindBufferMemory2;
-- extern PFN_vkBindImageMemory2 vkBindImageMemory2;
-- extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2;
-- #endif // #if VMA_VULKAN_VERSION >= 1001000
--#endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES
--
--#if !defined(VMA_DEDICATED_ALLOCATION)
-- #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
-- #define VMA_DEDICATED_ALLOCATION 1
-- #else
-- #define VMA_DEDICATED_ALLOCATION 0
-- #endif
--#endif
--
--#if !defined(VMA_BIND_MEMORY2)
-- #if VK_KHR_bind_memory2
-- #define VMA_BIND_MEMORY2 1
-- #else
-- #define VMA_BIND_MEMORY2 0
-- #endif
--#endif
--
--#if !defined(VMA_MEMORY_BUDGET)
-- #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000)
-- #define VMA_MEMORY_BUDGET 1
-- #else
-- #define VMA_MEMORY_BUDGET 0
-- #endif
--#endif
--
--// Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers.
--#if !defined(VMA_BUFFER_DEVICE_ADDRESS)
-- #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000
-- #define VMA_BUFFER_DEVICE_ADDRESS 1
-- #else
-- #define VMA_BUFFER_DEVICE_ADDRESS 0
-- #endif
--#endif
--
--// Defined to 1 when VK_EXT_memory_priority device extension is defined in Vulkan headers.
--#if !defined(VMA_MEMORY_PRIORITY)
-- #if VK_EXT_memory_priority
-- #define VMA_MEMORY_PRIORITY 1
-- #else
-- #define VMA_MEMORY_PRIORITY 0
-- #endif
--#endif
--
--// Defined to 1 when VK_KHR_external_memory device extension is defined in Vulkan headers.
--#if !defined(VMA_EXTERNAL_MEMORY)
-- #if VK_KHR_external_memory
-- #define VMA_EXTERNAL_MEMORY 1
-- #else
-- #define VMA_EXTERNAL_MEMORY 0
-- #endif
--#endif
--
--// Define these macros to decorate all public functions with additional code,
--// before and after returned type, appropriately. This may be useful for
--// exporting the functions when compiling VMA as a separate library. Example:
--// #define VMA_CALL_PRE __declspec(dllexport)
--// #define VMA_CALL_POST __cdecl
--#ifndef VMA_CALL_PRE
-- #define VMA_CALL_PRE
--#endif
--#ifndef VMA_CALL_POST
-- #define VMA_CALL_POST
--#endif
--
--// Define this macro to decorate pNext pointers with an attribute specifying the Vulkan
--// structure that will be extended via the pNext chain.
--#ifndef VMA_EXTENDS_VK_STRUCT
-- #define VMA_EXTENDS_VK_STRUCT(vkStruct)
--#endif
--
--// Define this macro to decorate pointers with an attribute specifying the
--// length of the array they point to if they are not null.
--//
--// The length may be one of
--// - The name of another parameter in the argument list where the pointer is declared
--// - The name of another member in the struct where the pointer is declared
--// - The name of a member of a struct type, meaning the value of that member in
--// the context of the call. For example
--// VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount"),
--// this means the number of memory heaps available in the device associated
--// with the VmaAllocator being dealt with.
--#ifndef VMA_LEN_IF_NOT_NULL
-- #define VMA_LEN_IF_NOT_NULL(len)
--#endif
--
--// The VMA_NULLABLE macro is defined to be _Nullable when compiling with Clang.
--// see: https://clang.llvm.org/docs/AttributeReference.html#nullable
--#ifndef VMA_NULLABLE
-- #ifdef __clang__
-- #define VMA_NULLABLE _Nullable
-- #else
-- #define VMA_NULLABLE
-- #endif
--#endif
--
--// The VMA_NOT_NULL macro is defined to be _Nonnull when compiling with Clang.
--// see: https://clang.llvm.org/docs/AttributeReference.html#nonnull
--#ifndef VMA_NOT_NULL
-- #ifdef __clang__
-- #define VMA_NOT_NULL _Nonnull
-- #else
-- #define VMA_NOT_NULL
-- #endif
--#endif
--
--// If non-dispatchable handles are represented as pointers then we can give
--// then nullability annotations
--#ifndef VMA_NOT_NULL_NON_DISPATCHABLE
-- #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
-- #define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL
-- #else
-- #define VMA_NOT_NULL_NON_DISPATCHABLE
-- #endif
--#endif
--
--#ifndef VMA_NULLABLE_NON_DISPATCHABLE
-- #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
-- #define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE
-- #else
-- #define VMA_NULLABLE_NON_DISPATCHABLE
-- #endif
--#endif
--
--#ifndef VMA_STATS_STRING_ENABLED
-- #define VMA_STATS_STRING_ENABLED 1
--#endif
--
--////////////////////////////////////////////////////////////////////////////////
--////////////////////////////////////////////////////////////////////////////////
--//
--// INTERFACE
--//
--////////////////////////////////////////////////////////////////////////////////
--////////////////////////////////////////////////////////////////////////////////
--
--// Sections for managing code placement in file, only for development purposes e.g. for convenient folding inside an IDE.
--#ifndef _VMA_ENUM_DECLARATIONS
--
--/**
--\addtogroup group_init
--@{
--*/
--
--/// Flags for created #VmaAllocator.
--typedef enum VmaAllocatorCreateFlagBits
--{
-- /** \brief Allocator and all objects created from it will not be synchronized internally, so you must guarantee they are used from only one thread at a time or synchronized externally by you.
--
-- Using this flag may increase performance because internal mutexes are not used.
-- */
-- VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001,
-- /** \brief Enables usage of VK_KHR_dedicated_allocation extension.
--
-- The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`.
-- When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1.
--
-- Using this extension will automatically allocate dedicated blocks of memory for
-- some buffers and images instead of suballocating place for them out of bigger
-- memory blocks (as if you explicitly used #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
-- flag) when it is recommended by the driver. It may improve performance on some
-- GPUs.
--
-- You may set this flag only if you found out that following device extensions are
-- supported, you enabled them while creating Vulkan device passed as
-- VmaAllocatorCreateInfo::device, and you want them to be used internally by this
-- library:
--
-- - VK_KHR_get_memory_requirements2 (device extension)
-- - VK_KHR_dedicated_allocation (device extension)
--
-- When this flag is set, you can experience following warnings reported by Vulkan
-- validation layer. You can ignore them.
--
-- > vkBindBufferMemory(): Binding memory to buffer 0x2d but vkGetBufferMemoryRequirements() has not been called on that buffer.
-- */
-- VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT = 0x00000002,
-- /**
-- Enables usage of VK_KHR_bind_memory2 extension.
--
-- The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`.
-- When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1.
--
-- You may set this flag only if you found out that this device extension is supported,
-- you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,
-- and you want it to be used internally by this library.
--
-- The extension provides functions `vkBindBufferMemory2KHR` and `vkBindImageMemory2KHR`,
-- which allow to pass a chain of `pNext` structures while binding.
-- This flag is required if you use `pNext` parameter in vmaBindBufferMemory2() or vmaBindImageMemory2().
-- */
-- VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT = 0x00000004,
-- /**
-- Enables usage of VK_EXT_memory_budget extension.
--
-- You may set this flag only if you found out that this device extension is supported,
-- you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,
-- and you want it to be used internally by this library, along with another instance extension
-- VK_KHR_get_physical_device_properties2, which is required by it (or Vulkan 1.1, where this extension is promoted).
--
-- The extension provides query for current memory usage and budget, which will probably
-- be more accurate than an estimation used by the library otherwise.
-- */
-- VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT = 0x00000008,
-- /**
-- Enables usage of VK_AMD_device_coherent_memory extension.
--
-- You may set this flag only if you:
--
-- - found out that this device extension is supported and enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,
-- - checked that `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true and set it while creating the Vulkan device,
-- - want it to be used internally by this library.
--
-- The extension and accompanying device feature provide access to memory types with
-- `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flags.
-- They are useful mostly for writing breadcrumb markers - a common method for debugging GPU crash/hang/TDR.
--
-- When the extension is not enabled, such memory types are still enumerated, but their usage is illegal.
-- To protect from this error, if you don't create the allocator with this flag, it will refuse to allocate any memory or create a custom pool in such memory type,
-- returning `VK_ERROR_FEATURE_NOT_PRESENT`.
-- */
-- VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT = 0x00000010,
-- /**
-- Enables usage of "buffer device address" feature, which allows you to use function
-- `vkGetBufferDeviceAddress*` to get raw GPU pointer to a buffer and pass it for usage inside a shader.
--
-- You may set this flag only if you:
--
-- 1. (For Vulkan version < 1.2) Found as available and enabled device extension
-- VK_KHR_buffer_device_address.
-- This extension is promoted to core Vulkan 1.2.
-- 2. Found as available and enabled device feature `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress`.
--
-- When this flag is set, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT` using VMA.
-- The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT` to
-- allocated memory blocks wherever it might be needed.
--
-- For more information, see documentation chapter \ref enabling_buffer_device_address.
-- */
-- VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT = 0x00000020,
-- /**
-- Enables usage of VK_EXT_memory_priority extension in the library.
--
-- You may set this flag only if you found available and enabled this device extension,
-- along with `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority == VK_TRUE`,
-- while creating Vulkan device passed as VmaAllocatorCreateInfo::device.
--
-- When this flag is used, VmaAllocationCreateInfo::priority and VmaPoolCreateInfo::priority
-- are used to set priorities of allocated Vulkan memory. Without it, these variables are ignored.
--
-- A priority must be a floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations.
-- Larger values are higher priority. The granularity of the priorities is implementation-dependent.
-- It is automatically passed to every call to `vkAllocateMemory` done by the library using structure `VkMemoryPriorityAllocateInfoEXT`.
-- The value to be used for default priority is 0.5.
-- For more details, see the documentation of the VK_EXT_memory_priority extension.
-- */
-- VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT = 0x00000040,
--
-- VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
--} VmaAllocatorCreateFlagBits;
--/// See #VmaAllocatorCreateFlagBits.
--typedef VkFlags VmaAllocatorCreateFlags;
--
--/** @} */
--
--/**
--\addtogroup group_alloc
--@{
--*/
--
--/// \brief Intended usage of the allocated memory.
--typedef enum VmaMemoryUsage
--{
-- /** No intended memory usage specified.
-- Use other members of VmaAllocationCreateInfo to specify your requirements.
-- */
-- VMA_MEMORY_USAGE_UNKNOWN = 0,
-- /**
-- \deprecated Obsolete, preserved for backward compatibility.
-- Prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
-- */
-- VMA_MEMORY_USAGE_GPU_ONLY = 1,
-- /**
-- \deprecated Obsolete, preserved for backward compatibility.
-- Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` and `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT`.
-- */
-- VMA_MEMORY_USAGE_CPU_ONLY = 2,
-- /**
-- \deprecated Obsolete, preserved for backward compatibility.
-- Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
-- */
-- VMA_MEMORY_USAGE_CPU_TO_GPU = 3,
-- /**
-- \deprecated Obsolete, preserved for backward compatibility.
-- Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`.
-- */
-- VMA_MEMORY_USAGE_GPU_TO_CPU = 4,
-- /**
-- \deprecated Obsolete, preserved for backward compatibility.
-- Prefers not `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
-- */
-- VMA_MEMORY_USAGE_CPU_COPY = 5,
-- /**
-- Lazily allocated GPU memory having `VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT`.
-- Exists mostly on mobile platforms. Using it on desktop PC or other GPUs with no such memory type present will fail the allocation.
--
-- Usage: Memory for transient attachment images (color attachments, depth attachments etc.), created with `VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT`.
--
-- Allocations with this usage are always created as dedicated - it implies #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
-- */
-- VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED = 6,
-- /**
-- Selects best memory type automatically.
-- This flag is recommended for most common use cases.
--
-- When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT),
-- you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
-- in VmaAllocationCreateInfo::flags.
--
-- It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g.
-- vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo()
-- and not with generic memory allocation functions.
-- */
-- VMA_MEMORY_USAGE_AUTO = 7,
-- /**
-- Selects best memory type automatically with preference for GPU (device) memory.
--
-- When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT),
-- you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
-- in VmaAllocationCreateInfo::flags.
--
-- It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g.
-- vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo()
-- and not with generic memory allocation functions.
-- */
-- VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE = 8,
-- /**
-- Selects best memory type automatically with preference for CPU (host) memory.
--
-- When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT),
-- you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
-- in VmaAllocationCreateInfo::flags.
--
-- It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g.
-- vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo()
-- and not with generic memory allocation functions.
-- */
-- VMA_MEMORY_USAGE_AUTO_PREFER_HOST = 9,
--
-- VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF
--} VmaMemoryUsage;
--
--/// Flags to be passed as VmaAllocationCreateInfo::flags.
--typedef enum VmaAllocationCreateFlagBits
--{
-- /** \brief Set this flag if the allocation should have its own memory block.
--
-- Use it for special, big resources, like fullscreen images used as attachments.
-- */
-- VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT = 0x00000001,
--
-- /** \brief Set this flag to only try to allocate from existing `VkDeviceMemory` blocks and never create new such block.
--
-- If new allocation cannot be placed in any of the existing blocks, allocation
-- fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY` error.
--
-- You should not use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and
-- #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense.
-- */
-- VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT = 0x00000002,
-- /** \brief Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
--
-- Pointer to mapped memory will be returned through VmaAllocationInfo::pMappedData.
--
-- It is valid to use this flag for allocation made from memory type that is not
-- `HOST_VISIBLE`. This flag is then ignored and memory is not mapped. This is
-- useful if you need an allocation that is efficient to use on GPU
-- (`DEVICE_LOCAL`) and still want to map it directly if possible on platforms that
-- support it (e.g. Intel GPU).
-- */
-- VMA_ALLOCATION_CREATE_MAPPED_BIT = 0x00000004,
-- /** \deprecated Preserved for backward compatibility. Consider using vmaSetAllocationName() instead.
--
-- Set this flag to treat VmaAllocationCreateInfo::pUserData as pointer to a
-- null-terminated string. Instead of copying pointer value, a local copy of the
-- string is made and stored in allocation's `pName`. The string is automatically
-- freed together with the allocation. It is also used in vmaBuildStatsString().
-- */
-- VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT = 0x00000020,
-- /** Allocation will be created from upper stack in a double stack pool.
--
-- This flag is only allowed for custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT flag.
-- */
-- VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = 0x00000040,
-- /** Create both buffer/image and allocation, but don't bind them together.
-- It is useful when you want to bind yourself to do some more advanced binding, e.g. using some extensions.
-- The flag is meaningful only with functions that bind by default: vmaCreateBuffer(), vmaCreateImage().
-- Otherwise it is ignored.
--
-- If you want to make sure the new buffer/image is not tied to the new memory allocation
-- through `VkMemoryDedicatedAllocateInfoKHR` structure in case the allocation ends up in its own memory block,
-- use also flag #VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT.
-- */
-- VMA_ALLOCATION_CREATE_DONT_BIND_BIT = 0x00000080,
-- /** Create allocation only if additional device memory required for it, if any, won't exceed
-- memory budget. Otherwise return `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
-- */
-- VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT = 0x00000100,
-- /** \brief Set this flag if the allocated memory will have aliasing resources.
--
-- Usage of this flag prevents supplying `VkMemoryDedicatedAllocateInfoKHR` when #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT is specified.
-- Otherwise created dedicated memory will not be suitable for aliasing resources, resulting in Vulkan Validation Layer errors.
-- */
-- VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT = 0x00000200,
-- /**
-- Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT).
--
-- - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value,
-- you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect.
-- - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`.
-- This includes allocations created in \ref custom_memory_pools.
--
-- Declares that mapped memory will only be written sequentially, e.g. using `memcpy()` or a loop writing number-by-number,
-- never read or accessed randomly, so a memory type can be selected that is uncached and write-combined.
--
-- \warning Violating this declaration may work correctly, but will likely be very slow.
-- Watch out for implicit reads introduced by doing e.g. `pMappedData[i] += x;`
-- Better prepare your data in a local variable and `memcpy()` it to the mapped pointer all at once.
-- */
-- VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT = 0x00000400,
-- /**
-- Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT).
--
-- - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value,
-- you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect.
-- - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`.
-- This includes allocations created in \ref custom_memory_pools.
--
-- Declares that mapped memory can be read, written, and accessed in random order,
-- so a `HOST_CACHED` memory type is required.
-- */
-- VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT = 0x00000800,
-- /**
-- Together with #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT,
-- it says that despite request for host access, a not-`HOST_VISIBLE` memory type can be selected
-- if it may improve performance.
--
-- By using this flag, you declare that you will check if the allocation ended up in a `HOST_VISIBLE` memory type
-- (e.g. using vmaGetAllocationMemoryProperties()) and if not, you will create some "staging" buffer and
-- issue an explicit transfer to write/read your data.
-- To prepare for this possibility, don't forget to add appropriate flags like
-- `VK_BUFFER_USAGE_TRANSFER_DST_BIT`, `VK_BUFFER_USAGE_TRANSFER_SRC_BIT` to the parameters of created buffer or image.
-- */
-- VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT = 0x00001000,
-- /** Allocation strategy that chooses smallest possible free range for the allocation
-- to minimize memory usage and fragmentation, possibly at the expense of allocation time.
-- */
-- VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = 0x00010000,
-- /** Allocation strategy that chooses first suitable free range for the allocation -
-- not necessarily in terms of the smallest offset but the one that is easiest and fastest to find
-- to minimize allocation time, possibly at the expense of allocation quality.
-- */
-- VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = 0x00020000,
-- /** Allocation strategy that chooses always the lowest offset in available space.
-- This is not the most efficient strategy but achieves highly packed data.
-- Used internally by defragmentation, not recommended in typical usage.
-- */
-- VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = 0x00040000,
-- /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT.
-- */
-- VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT,
-- /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT.
-- */
-- VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT,
-- /** A bit mask to extract only `STRATEGY` bits from entire set of flags.
-- */
-- VMA_ALLOCATION_CREATE_STRATEGY_MASK =
-- VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT |
-- VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT |
-- VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
--
-- VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
--} VmaAllocationCreateFlagBits;
--/// See #VmaAllocationCreateFlagBits.
--typedef VkFlags VmaAllocationCreateFlags;
--
--/// Flags to be passed as VmaPoolCreateInfo::flags.
--typedef enum VmaPoolCreateFlagBits
--{
-- /** \brief Use this flag if you always allocate only buffers and linear images or only optimal images out of this pool and so Buffer-Image Granularity can be ignored.
--
-- This is an optional optimization flag.
--
-- If you always allocate using vmaCreateBuffer(), vmaCreateImage(),
-- vmaAllocateMemoryForBuffer(), then you don't need to use it because allocator
-- knows exact type of your allocations so it can handle Buffer-Image Granularity
-- in the optimal way.
--
-- If you also allocate using vmaAllocateMemoryForImage() or vmaAllocateMemory(),
-- exact type of such allocations is not known, so allocator must be conservative
-- in handling Buffer-Image Granularity, which can lead to suboptimal allocation
-- (wasted memory). In that case, if you can make sure you always allocate only
-- buffers and linear images or only optimal images out of this pool, use this flag
-- to make allocator disregard Buffer-Image Granularity and so make allocations
-- faster and more optimal.
-- */
-- VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT = 0x00000002,
--
-- /** \brief Enables alternative, linear allocation algorithm in this pool.
--
-- Specify this flag to enable linear allocation algorithm, which always creates
-- new allocations after last one and doesn't reuse space from allocations freed in
-- between. It trades memory consumption for simplified algorithm and data
-- structure, which has better performance and uses less memory for metadata.
--
-- By using this flag, you can achieve behavior of free-at-once, stack,
-- ring buffer, and double stack.
-- For details, see documentation chapter \ref linear_algorithm.
-- */
-- VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT = 0x00000004,
--
-- /** Bit mask to extract only `ALGORITHM` bits from entire set of flags.
-- */
-- VMA_POOL_CREATE_ALGORITHM_MASK =
-- VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT,
--
-- VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
--} VmaPoolCreateFlagBits;
--/// Flags to be passed as VmaPoolCreateInfo::flags. See #VmaPoolCreateFlagBits.
--typedef VkFlags VmaPoolCreateFlags;
--
--/// Flags to be passed as VmaDefragmentationInfo::flags.
--typedef enum VmaDefragmentationFlagBits
--{
-- /* \brief Use simple but fast algorithm for defragmentation.
-- May not achieve best results but will require least time to compute and least allocations to copy.
-- */
-- VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT = 0x1,
-- /* \brief Default defragmentation algorithm, applied also when no `ALGORITHM` flag is specified.
-- Offers a balance between defragmentation quality and the amount of allocations and bytes that need to be moved.
-- */
-- VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT = 0x2,
-- /* \brief Perform full defragmentation of memory.
-- Can result in notably more time to compute and allocations to copy, but will achieve best memory packing.
-- */
-- VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT = 0x4,
-- /** \brief Use the most roboust algorithm at the cost of time to compute and number of copies to make.
-- Only available when bufferImageGranularity is greater than 1, since it aims to reduce
-- alignment issues between different types of resources.
-- Otherwise falls back to same behavior as #VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT.
-- */
-- VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT = 0x8,
--
-- /// A bit mask to extract only `ALGORITHM` bits from entire set of flags.
-- VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK =
-- VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT |
-- VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT |
-- VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT |
-- VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT,
--
-- VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
--} VmaDefragmentationFlagBits;
--/// See #VmaDefragmentationFlagBits.
--typedef VkFlags VmaDefragmentationFlags;
--
--/// Operation performed on single defragmentation move. See structure #VmaDefragmentationMove.
--typedef enum VmaDefragmentationMoveOperation
--{
-- /// Buffer/image has been recreated at `dstTmpAllocation`, data has been copied, old buffer/image has been destroyed. `srcAllocation` should be changed to point to the new place. This is the default value set by vmaBeginDefragmentationPass().
-- VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY = 0,
-- /// Set this value if you cannot move the allocation. New place reserved at `dstTmpAllocation` will be freed. `srcAllocation` will remain unchanged.
-- VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE = 1,
-- /// Set this value if you decide to abandon the allocation and you destroyed the buffer/image. New place reserved at `dstTmpAllocation` will be freed, along with `srcAllocation`, which will be destroyed.
-- VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY = 2,
--} VmaDefragmentationMoveOperation;
--
--/** @} */
--
--/**
--\addtogroup group_virtual
--@{
--*/
--
--/// Flags to be passed as VmaVirtualBlockCreateInfo::flags.
--typedef enum VmaVirtualBlockCreateFlagBits
--{
-- /** \brief Enables alternative, linear allocation algorithm in this virtual block.
--
-- Specify this flag to enable linear allocation algorithm, which always creates
-- new allocations after last one and doesn't reuse space from allocations freed in
-- between. It trades memory consumption for simplified algorithm and data
-- structure, which has better performance and uses less memory for metadata.
--
-- By using this flag, you can achieve behavior of free-at-once, stack,
-- ring buffer, and double stack.
-- For details, see documentation chapter \ref linear_algorithm.
-- */
-- VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT = 0x00000001,
--
-- /** \brief Bit mask to extract only `ALGORITHM` bits from entire set of flags.
-- */
-- VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK =
-- VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT,
--
-- VMA_VIRTUAL_BLOCK_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
--} VmaVirtualBlockCreateFlagBits;
--/// Flags to be passed as VmaVirtualBlockCreateInfo::flags. See #VmaVirtualBlockCreateFlagBits.
--typedef VkFlags VmaVirtualBlockCreateFlags;
--
--/// Flags to be passed as VmaVirtualAllocationCreateInfo::flags.
--typedef enum VmaVirtualAllocationCreateFlagBits
--{
-- /** \brief Allocation will be created from upper stack in a double stack pool.
--
-- This flag is only allowed for virtual blocks created with #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT flag.
-- */
-- VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT,
-- /** \brief Allocation strategy that tries to minimize memory usage.
-- */
-- VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT,
-- /** \brief Allocation strategy that tries to minimize allocation time.
-- */
-- VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT,
-- /** Allocation strategy that chooses always the lowest offset in available space.
-- This is not the most efficient strategy but achieves highly packed data.
-- */
-- VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
-- /** \brief A bit mask to extract only `STRATEGY` bits from entire set of flags.
--
-- These strategy flags are binary compatible with equivalent flags in #VmaAllocationCreateFlagBits.
-- */
-- VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK = VMA_ALLOCATION_CREATE_STRATEGY_MASK,
--
-- VMA_VIRTUAL_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
--} VmaVirtualAllocationCreateFlagBits;
--/// Flags to be passed as VmaVirtualAllocationCreateInfo::flags. See #VmaVirtualAllocationCreateFlagBits.
--typedef VkFlags VmaVirtualAllocationCreateFlags;
--
--/** @} */
--
--#endif // _VMA_ENUM_DECLARATIONS
--
--#ifndef _VMA_DATA_TYPES_DECLARATIONS
--
--/**
--\addtogroup group_init
--@{ */
--
--/** \struct VmaAllocator
--\brief Represents main object of this library initialized.
--
--Fill structure #VmaAllocatorCreateInfo and call function vmaCreateAllocator() to create it.
--Call function vmaDestroyAllocator() to destroy it.
--
--It is recommended to create just one object of this type per `VkDevice` object,
--right after Vulkan is initialized and keep it alive until before Vulkan device is destroyed.
--*/
--VK_DEFINE_HANDLE(VmaAllocator)
--
--/** @} */
--
--/**
--\addtogroup group_alloc
--@{
--*/
--
--/** \struct VmaPool
--\brief Represents custom memory pool
--
--Fill structure VmaPoolCreateInfo and call function vmaCreatePool() to create it.
--Call function vmaDestroyPool() to destroy it.
--
--For more information see [Custom memory pools](@ref choosing_memory_type_custom_memory_pools).
--*/
--VK_DEFINE_HANDLE(VmaPool)
--
--/** \struct VmaAllocation
--\brief Represents single memory allocation.
--
--It may be either dedicated block of `VkDeviceMemory` or a specific region of a bigger block of this type
--plus unique offset.
--
--There are multiple ways to create such object.
--You need to fill structure VmaAllocationCreateInfo.
--For more information see [Choosing memory type](@ref choosing_memory_type).
--
--Although the library provides convenience functions that create Vulkan buffer or image,
--allocate memory for it and bind them together,
--binding of the allocation to a buffer or an image is out of scope of the allocation itself.
--Allocation object can exist without buffer/image bound,
--binding can be done manually by the user, and destruction of it can be done
--independently of destruction of the allocation.
--
--The object also remembers its size and some other information.
--To retrieve this information, use function vmaGetAllocationInfo() and inspect
--returned structure VmaAllocationInfo.
--*/
--VK_DEFINE_HANDLE(VmaAllocation)
--
--/** \struct VmaDefragmentationContext
--\brief An opaque object that represents started defragmentation process.
--
--Fill structure #VmaDefragmentationInfo and call function vmaBeginDefragmentation() to create it.
--Call function vmaEndDefragmentation() to destroy it.
--*/
--VK_DEFINE_HANDLE(VmaDefragmentationContext)
--
--/** @} */
--
--/**
--\addtogroup group_virtual
--@{
--*/
--
--/** \struct VmaVirtualAllocation
--\brief Represents single memory allocation done inside VmaVirtualBlock.
--
--Use it as a unique identifier to virtual allocation within the single block.
--
--Use value `VK_NULL_HANDLE` to represent a null/invalid allocation.
--*/
--VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaVirtualAllocation)
--
--/** @} */
--
--/**
--\addtogroup group_virtual
--@{
--*/
--
--/** \struct VmaVirtualBlock
--\brief Handle to a virtual block object that allows to use core allocation algorithm without allocating any real GPU memory.
--
--Fill in #VmaVirtualBlockCreateInfo structure and use vmaCreateVirtualBlock() to create it. Use vmaDestroyVirtualBlock() to destroy it.
--For more information, see documentation chapter \ref virtual_allocator.
--
--This object is not thread-safe - should not be used from multiple threads simultaneously, must be synchronized externally.
--*/
--VK_DEFINE_HANDLE(VmaVirtualBlock)
--
--/** @} */
--
--/**
--\addtogroup group_init
--@{
--*/
--
--/// Callback function called after successful vkAllocateMemory.
--typedef void (VKAPI_PTR* PFN_vmaAllocateDeviceMemoryFunction)(
-- VmaAllocator VMA_NOT_NULL allocator,
-- uint32_t memoryType,
-- VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
-- VkDeviceSize size,
-- void* VMA_NULLABLE pUserData);
--
--/// Callback function called before vkFreeMemory.
--typedef void (VKAPI_PTR* PFN_vmaFreeDeviceMemoryFunction)(
-- VmaAllocator VMA_NOT_NULL allocator,
-- uint32_t memoryType,
-- VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
-- VkDeviceSize size,
-- void* VMA_NULLABLE pUserData);
--
--/** \brief Set of callbacks that the library will call for `vkAllocateMemory` and `vkFreeMemory`.
--
--Provided for informative purpose, e.g. to gather statistics about number of
--allocations or total amount of memory allocated in Vulkan.
--
--Used in VmaAllocatorCreateInfo::pDeviceMemoryCallbacks.
--*/
--typedef struct VmaDeviceMemoryCallbacks
--{
-- /// Optional, can be null.
-- PFN_vmaAllocateDeviceMemoryFunction VMA_NULLABLE pfnAllocate;
-- /// Optional, can be null.
-- PFN_vmaFreeDeviceMemoryFunction VMA_NULLABLE pfnFree;
-- /// Optional, can be null.
-- void* VMA_NULLABLE pUserData;
--} VmaDeviceMemoryCallbacks;
--
--/** \brief Pointers to some Vulkan functions - a subset used by the library.
--
--Used in VmaAllocatorCreateInfo::pVulkanFunctions.
--*/
--typedef struct VmaVulkanFunctions
--{
-- /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS.
-- PFN_vkGetInstanceProcAddr VMA_NULLABLE vkGetInstanceProcAddr;
-- /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS.
-- PFN_vkGetDeviceProcAddr VMA_NULLABLE vkGetDeviceProcAddr;
-- PFN_vkGetPhysicalDeviceProperties VMA_NULLABLE vkGetPhysicalDeviceProperties;
-- PFN_vkGetPhysicalDeviceMemoryProperties VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties;
-- PFN_vkAllocateMemory VMA_NULLABLE vkAllocateMemory;
-- PFN_vkFreeMemory VMA_NULLABLE vkFreeMemory;
-- PFN_vkMapMemory VMA_NULLABLE vkMapMemory;
-- PFN_vkUnmapMemory VMA_NULLABLE vkUnmapMemory;
-- PFN_vkFlushMappedMemoryRanges VMA_NULLABLE vkFlushMappedMemoryRanges;
-- PFN_vkInvalidateMappedMemoryRanges VMA_NULLABLE vkInvalidateMappedMemoryRanges;
-- PFN_vkBindBufferMemory VMA_NULLABLE vkBindBufferMemory;
-- PFN_vkBindImageMemory VMA_NULLABLE vkBindImageMemory;
-- PFN_vkGetBufferMemoryRequirements VMA_NULLABLE vkGetBufferMemoryRequirements;
-- PFN_vkGetImageMemoryRequirements VMA_NULLABLE vkGetImageMemoryRequirements;
-- PFN_vkCreateBuffer VMA_NULLABLE vkCreateBuffer;
-- PFN_vkDestroyBuffer VMA_NULLABLE vkDestroyBuffer;
-- PFN_vkCreateImage VMA_NULLABLE vkCreateImage;
-- PFN_vkDestroyImage VMA_NULLABLE vkDestroyImage;
-- PFN_vkCmdCopyBuffer VMA_NULLABLE vkCmdCopyBuffer;
--#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-- /// Fetch "vkGetBufferMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetBufferMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension.
-- PFN_vkGetBufferMemoryRequirements2KHR VMA_NULLABLE vkGetBufferMemoryRequirements2KHR;
-- /// Fetch "vkGetImageMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetImageMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension.
-- PFN_vkGetImageMemoryRequirements2KHR VMA_NULLABLE vkGetImageMemoryRequirements2KHR;
--#endif
--#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
-- /// Fetch "vkBindBufferMemory2" on Vulkan >= 1.1, fetch "vkBindBufferMemory2KHR" when using VK_KHR_bind_memory2 extension.
-- PFN_vkBindBufferMemory2KHR VMA_NULLABLE vkBindBufferMemory2KHR;
-- /// Fetch "vkBindImageMemory2" on Vulkan >= 1.1, fetch "vkBindImageMemory2KHR" when using VK_KHR_bind_memory2 extension.
-- PFN_vkBindImageMemory2KHR VMA_NULLABLE vkBindImageMemory2KHR;
--#endif
--#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
-- PFN_vkGetPhysicalDeviceMemoryProperties2KHR VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties2KHR;
--#endif
--#if VMA_VULKAN_VERSION >= 1003000
-- /// Fetch from "vkGetDeviceBufferMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceBufferMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4.
-- PFN_vkGetDeviceBufferMemoryRequirements VMA_NULLABLE vkGetDeviceBufferMemoryRequirements;
-- /// Fetch from "vkGetDeviceImageMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceImageMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4.
-- PFN_vkGetDeviceImageMemoryRequirements VMA_NULLABLE vkGetDeviceImageMemoryRequirements;
--#endif
--} VmaVulkanFunctions;
--
--/// Description of a Allocator to be created.
--typedef struct VmaAllocatorCreateInfo
--{
-- /// Flags for created allocator. Use #VmaAllocatorCreateFlagBits enum.
-- VmaAllocatorCreateFlags flags;
-- /// Vulkan physical device.
-- /** It must be valid throughout whole lifetime of created allocator. */
-- VkPhysicalDevice VMA_NOT_NULL physicalDevice;
-- /// Vulkan device.
-- /** It must be valid throughout whole lifetime of created allocator. */
-- VkDevice VMA_NOT_NULL device;
-- /// Preferred size of a single `VkDeviceMemory` block to be allocated from large heaps > 1 GiB. Optional.
-- /** Set to 0 to use default, which is currently 256 MiB. */
-- VkDeviceSize preferredLargeHeapBlockSize;
-- /// Custom CPU memory allocation callbacks. Optional.
-- /** Optional, can be null. When specified, will also be used for all CPU-side memory allocations. */
-- const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks;
-- /// Informative callbacks for `vkAllocateMemory`, `vkFreeMemory`. Optional.
-- /** Optional, can be null. */
-- const VmaDeviceMemoryCallbacks* VMA_NULLABLE pDeviceMemoryCallbacks;
-- /** \brief Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out of particular Vulkan memory heap.
--
-- If not NULL, it must be a pointer to an array of
-- `VkPhysicalDeviceMemoryProperties::memoryHeapCount` elements, defining limit on
-- maximum number of bytes that can be allocated out of particular Vulkan memory
-- heap.
--
-- Any of the elements may be equal to `VK_WHOLE_SIZE`, which means no limit on that
-- heap. This is also the default in case of `pHeapSizeLimit` = NULL.
--
-- If there is a limit defined for a heap:
--
-- - If user tries to allocate more memory from that heap using this allocator,
-- the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
-- - If the limit is smaller than heap size reported in `VkMemoryHeap::size`, the
-- value of this limit will be reported instead when using vmaGetMemoryProperties().
--
-- Warning! Using this feature may not be equivalent to installing a GPU with
-- smaller amount of memory, because graphics driver doesn't necessary fail new
-- allocations with `VK_ERROR_OUT_OF_DEVICE_MEMORY` result when memory capacity is
-- exceeded. It may return success and just silently migrate some device memory
-- blocks to system RAM. This driver behavior can also be controlled using
-- VK_AMD_memory_overallocation_behavior extension.
-- */
-- const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit;
--
-- /** \brief Pointers to Vulkan functions. Can be null.
--
-- For details see [Pointers to Vulkan functions](@ref config_Vulkan_functions).
-- */
-- const VmaVulkanFunctions* VMA_NULLABLE pVulkanFunctions;
-- /** \brief Handle to Vulkan instance object.
--
-- Starting from version 3.0.0 this member is no longer optional, it must be set!
-- */
-- VkInstance VMA_NOT_NULL instance;
-- /** \brief Optional. The highest version of Vulkan that the application is designed to use.
--
-- It must be a value in the format as created by macro `VK_MAKE_VERSION` or a constant like: `VK_API_VERSION_1_1`, `VK_API_VERSION_1_0`.
-- The patch version number specified is ignored. Only the major and minor versions are considered.
-- It must be less or equal (preferably equal) to value as passed to `vkCreateInstance` as `VkApplicationInfo::apiVersion`.
-- Only versions 1.0, 1.1, 1.2, 1.3 are supported by the current implementation.
-- Leaving it initialized to zero is equivalent to `VK_API_VERSION_1_0`.
-- */
-- uint32_t vulkanApiVersion;
--#if VMA_EXTERNAL_MEMORY
-- /** \brief Either null or a pointer to an array of external memory handle types for each Vulkan memory type.
--
-- If not NULL, it must be a pointer to an array of `VkPhysicalDeviceMemoryProperties::memoryTypeCount`
-- elements, defining external memory handle types of particular Vulkan memory type,
-- to be passed using `VkExportMemoryAllocateInfoKHR`.
--
-- Any of the elements may be equal to 0, which means not to use `VkExportMemoryAllocateInfoKHR` on this memory type.
-- This is also the default in case of `pTypeExternalMemoryHandleTypes` = NULL.
-- */
-- const VkExternalMemoryHandleTypeFlagsKHR* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryTypeCount") pTypeExternalMemoryHandleTypes;
--#endif // #if VMA_EXTERNAL_MEMORY
--} VmaAllocatorCreateInfo;
--
--/// Information about existing #VmaAllocator object.
--typedef struct VmaAllocatorInfo
--{
-- /** \brief Handle to Vulkan instance object.
--
-- This is the same value as has been passed through VmaAllocatorCreateInfo::instance.
-- */
-- VkInstance VMA_NOT_NULL instance;
-- /** \brief Handle to Vulkan physical device object.
--
-- This is the same value as has been passed through VmaAllocatorCreateInfo::physicalDevice.
-- */
-- VkPhysicalDevice VMA_NOT_NULL physicalDevice;
-- /** \brief Handle to Vulkan device object.
--
-- This is the same value as has been passed through VmaAllocatorCreateInfo::device.
-- */
-- VkDevice VMA_NOT_NULL device;
--} VmaAllocatorInfo;
--
--/** @} */
--
--/**
--\addtogroup group_stats
--@{
--*/
--
--/** \brief Calculated statistics of memory usage e.g. in a specific memory type, heap, custom pool, or total.
--
--These are fast to calculate.
--See functions: vmaGetHeapBudgets(), vmaGetPoolStatistics().
--*/
--typedef struct VmaStatistics
--{
-- /** \brief Number of `VkDeviceMemory` objects - Vulkan memory blocks allocated.
-- */
-- uint32_t blockCount;
-- /** \brief Number of #VmaAllocation objects allocated.
--
-- Dedicated allocations have their own blocks, so each one adds 1 to `allocationCount` as well as `blockCount`.
-- */
-- uint32_t allocationCount;
-- /** \brief Number of bytes allocated in `VkDeviceMemory` blocks.
--
-- \note To avoid confusion, please be aware that what Vulkan calls an "allocation" - a whole `VkDeviceMemory` object
-- (e.g. as in `VkPhysicalDeviceLimits::maxMemoryAllocationCount`) is called a "block" in VMA, while VMA calls
-- "allocation" a #VmaAllocation object that represents a memory region sub-allocated from such block, usually for a single buffer or image.
-- */
-- VkDeviceSize blockBytes;
-- /** \brief Total number of bytes occupied by all #VmaAllocation objects.
--
-- Always less or equal than `blockBytes`.
-- Difference `(blockBytes - allocationBytes)` is the amount of memory allocated from Vulkan
-- but unused by any #VmaAllocation.
-- */
-- VkDeviceSize allocationBytes;
--} VmaStatistics;
--
--/** \brief More detailed statistics than #VmaStatistics.
--
--These are slower to calculate. Use for debugging purposes.
--See functions: vmaCalculateStatistics(), vmaCalculatePoolStatistics().
--
--Previous version of the statistics API provided averages, but they have been removed
--because they can be easily calculated as:
--
--\code
--VkDeviceSize allocationSizeAvg = detailedStats.statistics.allocationBytes / detailedStats.statistics.allocationCount;
--VkDeviceSize unusedBytes = detailedStats.statistics.blockBytes - detailedStats.statistics.allocationBytes;
--VkDeviceSize unusedRangeSizeAvg = unusedBytes / detailedStats.unusedRangeCount;
--\endcode
--*/
--typedef struct VmaDetailedStatistics
--{
-- /// Basic statistics.
-- VmaStatistics statistics;
-- /// Number of free ranges of memory between allocations.
-- uint32_t unusedRangeCount;
-- /// Smallest allocation size. `VK_WHOLE_SIZE` if there are 0 allocations.
-- VkDeviceSize allocationSizeMin;
-- /// Largest allocation size. 0 if there are 0 allocations.
-- VkDeviceSize allocationSizeMax;
-- /// Smallest empty range size. `VK_WHOLE_SIZE` if there are 0 empty ranges.
-- VkDeviceSize unusedRangeSizeMin;
-- /// Largest empty range size. 0 if there are 0 empty ranges.
-- VkDeviceSize unusedRangeSizeMax;
--} VmaDetailedStatistics;
--
--/** \brief General statistics from current state of the Allocator -
--total memory usage across all memory heaps and types.
--
--These are slower to calculate. Use for debugging purposes.
--See function vmaCalculateStatistics().
--*/
--typedef struct VmaTotalStatistics
--{
-- VmaDetailedStatistics memoryType[VK_MAX_MEMORY_TYPES];
-- VmaDetailedStatistics memoryHeap[VK_MAX_MEMORY_HEAPS];
-- VmaDetailedStatistics total;
--} VmaTotalStatistics;
--
--/** \brief Statistics of current memory usage and available budget for a specific memory heap.
--
--These are fast to calculate.
--See function vmaGetHeapBudgets().
--*/
--typedef struct VmaBudget
--{
-- /** \brief Statistics fetched from the library.
-- */
-- VmaStatistics statistics;
-- /** \brief Estimated current memory usage of the program, in bytes.
--
-- Fetched from system using VK_EXT_memory_budget extension if enabled.
--
-- It might be different than `statistics.blockBytes` (usually higher) due to additional implicit objects
-- also occupying the memory, like swapchain, pipelines, descriptor heaps, command buffers, or
-- `VkDeviceMemory` blocks allocated outside of this library, if any.
-- */
-- VkDeviceSize usage;
-- /** \brief Estimated amount of memory available to the program, in bytes.
--
-- Fetched from system using VK_EXT_memory_budget extension if enabled.
--
-- It might be different (most probably smaller) than `VkMemoryHeap::size[heapIndex]` due to factors
-- external to the program, decided by the operating system.
-- Difference `budget - usage` is the amount of additional memory that can probably
-- be allocated without problems. Exceeding the budget may result in various problems.
-- */
-- VkDeviceSize budget;
--} VmaBudget;
--
--/** @} */
--
--/**
--\addtogroup group_alloc
--@{
--*/
--
--/** \brief Parameters of new #VmaAllocation.
--
--To be used with functions like vmaCreateBuffer(), vmaCreateImage(), and many others.
--*/
--typedef struct VmaAllocationCreateInfo
--{
-- /// Use #VmaAllocationCreateFlagBits enum.
-- VmaAllocationCreateFlags flags;
-- /** \brief Intended usage of memory.
--
-- You can leave #VMA_MEMORY_USAGE_UNKNOWN if you specify memory requirements in other way. \n
-- If `pool` is not null, this member is ignored.
-- */
-- VmaMemoryUsage usage;
-- /** \brief Flags that must be set in a Memory Type chosen for an allocation.
--
-- Leave 0 if you specify memory requirements in other way. \n
-- If `pool` is not null, this member is ignored.*/
-- VkMemoryPropertyFlags requiredFlags;
-- /** \brief Flags that preferably should be set in a memory type chosen for an allocation.
--
-- Set to 0 if no additional flags are preferred. \n
-- If `pool` is not null, this member is ignored. */
-- VkMemoryPropertyFlags preferredFlags;
-- /** \brief Bitmask containing one bit set for every memory type acceptable for this allocation.
--
-- Value 0 is equivalent to `UINT32_MAX` - it means any memory type is accepted if
-- it meets other requirements specified by this structure, with no further
-- restrictions on memory type index. \n
-- If `pool` is not null, this member is ignored.
-- */
-- uint32_t memoryTypeBits;
-- /** \brief Pool that this allocation should be created in.
--
-- Leave `VK_NULL_HANDLE` to allocate from default pool. If not null, members:
-- `usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored.
-- */
-- VmaPool VMA_NULLABLE pool;
-- /** \brief Custom general-purpose pointer that will be stored in #VmaAllocation, can be read as VmaAllocationInfo::pUserData and changed using vmaSetAllocationUserData().
--
-- If #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is used, it must be either
-- null or pointer to a null-terminated string. The string will be then copied to
-- internal buffer, so it doesn't need to be valid after allocation call.
-- */
-- void* VMA_NULLABLE pUserData;
-- /** \brief A floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations.
--
-- It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object
-- and this allocation ends up as dedicated or is explicitly forced as dedicated using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
-- Otherwise, it has the priority of a memory block where it is placed and this variable is ignored.
-- */
-- float priority;
--} VmaAllocationCreateInfo;
--
--/// Describes parameter of created #VmaPool.
--typedef struct VmaPoolCreateInfo
--{
-- /** \brief Vulkan memory type index to allocate this pool from.
-- */
-- uint32_t memoryTypeIndex;
-- /** \brief Use combination of #VmaPoolCreateFlagBits.
-- */
-- VmaPoolCreateFlags flags;
-- /** \brief Size of a single `VkDeviceMemory` block to be allocated as part of this pool, in bytes. Optional.
--
-- Specify nonzero to set explicit, constant size of memory blocks used by this
-- pool.
--
-- Leave 0 to use default and let the library manage block sizes automatically.
-- Sizes of particular blocks may vary.
-- In this case, the pool will also support dedicated allocations.
-- */
-- VkDeviceSize blockSize;
-- /** \brief Minimum number of blocks to be always allocated in this pool, even if they stay empty.
--
-- Set to 0 to have no preallocated blocks and allow the pool be completely empty.
-- */
-- size_t minBlockCount;
-- /** \brief Maximum number of blocks that can be allocated in this pool. Optional.
--
-- Set to 0 to use default, which is `SIZE_MAX`, which means no limit.
--
-- Set to same value as VmaPoolCreateInfo::minBlockCount to have fixed amount of memory allocated
-- throughout whole lifetime of this pool.
-- */
-- size_t maxBlockCount;
-- /** \brief A floating-point value between 0 and 1, indicating the priority of the allocations in this pool relative to other memory allocations.
--
-- It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object.
-- Otherwise, this variable is ignored.
-- */
-- float priority;
-- /** \brief Additional minimum alignment to be used for all allocations created from this pool. Can be 0.
--
-- Leave 0 (default) not to impose any additional alignment. If not 0, it must be a power of two.
-- It can be useful in cases where alignment returned by Vulkan by functions like `vkGetBufferMemoryRequirements` is not enough,
-- e.g. when doing interop with OpenGL.
-- */
-- VkDeviceSize minAllocationAlignment;
-- /** \brief Additional `pNext` chain to be attached to `VkMemoryAllocateInfo` used for every allocation made by this pool. Optional.
--
-- Optional, can be null. If not null, it must point to a `pNext` chain of structures that can be attached to `VkMemoryAllocateInfo`.
-- It can be useful for special needs such as adding `VkExportMemoryAllocateInfoKHR`.
-- Structures pointed by this member must remain alive and unchanged for the whole lifetime of the custom pool.
--
-- Please note that some structures, e.g. `VkMemoryPriorityAllocateInfoEXT`, `VkMemoryDedicatedAllocateInfoKHR`,
-- can be attached automatically by this library when using other, more convenient of its features.
-- */
-- void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkMemoryAllocateInfo) pMemoryAllocateNext;
--} VmaPoolCreateInfo;
--
--/** @} */
--
--/**
--\addtogroup group_alloc
--@{
--*/
--
--/// Parameters of #VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
--typedef struct VmaAllocationInfo
--{
-- /** \brief Memory type index that this allocation was allocated from.
--
-- It never changes.
-- */
-- uint32_t memoryType;
-- /** \brief Handle to Vulkan memory object.
--
-- Same memory object can be shared by multiple allocations.
--
-- It can change after the allocation is moved during \ref defragmentation.
-- */
-- VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory;
-- /** \brief Offset in `VkDeviceMemory` object to the beginning of this allocation, in bytes. `(deviceMemory, offset)` pair is unique to this allocation.
--
-- You usually don't need to use this offset. If you create a buffer or an image together with the allocation using e.g. function
-- vmaCreateBuffer(), vmaCreateImage(), functions that operate on these resources refer to the beginning of the buffer or image,
-- not entire device memory block. Functions like vmaMapMemory(), vmaBindBufferMemory() also refer to the beginning of the allocation
-- and apply this offset automatically.
--
-- It can change after the allocation is moved during \ref defragmentation.
-- */
-- VkDeviceSize offset;
-- /** \brief Size of this allocation, in bytes.
--
-- It never changes.
--
-- \note Allocation size returned in this variable may be greater than the size
-- requested for the resource e.g. as `VkBufferCreateInfo::size`. Whole size of the
-- allocation is accessible for operations on memory e.g. using a pointer after
-- mapping with vmaMapMemory(), but operations on the resource e.g. using
-- `vkCmdCopyBuffer` must be limited to the size of the resource.
-- */
-- VkDeviceSize size;
-- /** \brief Pointer to the beginning of this allocation as mapped data.
--
-- If the allocation hasn't been mapped using vmaMapMemory() and hasn't been
-- created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value is null.
--
-- It can change after call to vmaMapMemory(), vmaUnmapMemory().
-- It can also change after the allocation is moved during \ref defragmentation.
-- */
-- void* VMA_NULLABLE pMappedData;
-- /** \brief Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vmaSetAllocationUserData().
--
-- It can change after call to vmaSetAllocationUserData() for this allocation.
-- */
-- void* VMA_NULLABLE pUserData;
-- /** \brief Custom allocation name that was set with vmaSetAllocationName().
--
-- It can change after call to vmaSetAllocationName() for this allocation.
--
-- Another way to set custom name is to pass it in VmaAllocationCreateInfo::pUserData with
-- additional flag #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT set [DEPRECATED].
-- */
-- const char* VMA_NULLABLE pName;
--} VmaAllocationInfo;
--
--/** Callback function called during vmaBeginDefragmentation() to check custom criterion about ending current defragmentation pass.
--
--Should return true if the defragmentation needs to stop current pass.
--*/
--typedef VkBool32 (VKAPI_PTR* PFN_vmaCheckDefragmentationBreakFunction)(void* VMA_NULLABLE pUserData);
--
--/** \brief Parameters for defragmentation.
--
--To be used with function vmaBeginDefragmentation().
--*/
--typedef struct VmaDefragmentationInfo
--{
-- /// \brief Use combination of #VmaDefragmentationFlagBits.
-- VmaDefragmentationFlags flags;
-- /** \brief Custom pool to be defragmented.
--
-- If null then default pools will undergo defragmentation process.
-- */
-- VmaPool VMA_NULLABLE pool;
-- /** \brief Maximum numbers of bytes that can be copied during single pass, while moving allocations to different places.
--
-- `0` means no limit.
-- */
-- VkDeviceSize maxBytesPerPass;
-- /** \brief Maximum number of allocations that can be moved during single pass to a different place.
--
-- `0` means no limit.
-- */
-- uint32_t maxAllocationsPerPass;
-- /** \brief Optional custom callback for stopping vmaBeginDefragmentation().
--
-- Have to return true for breaking current defragmentation pass.
-- */
-- PFN_vmaCheckDefragmentationBreakFunction VMA_NULLABLE pfnBreakCallback;
-- /// \brief Optional data to pass to custom callback for stopping pass of defragmentation.
-- void* VMA_NULLABLE pBreakCallbackUserData;
--} VmaDefragmentationInfo;
--
--/// Single move of an allocation to be done for defragmentation.
--typedef struct VmaDefragmentationMove
--{
-- /// Operation to be performed on the allocation by vmaEndDefragmentationPass(). Default value is #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY. You can modify it.
-- VmaDefragmentationMoveOperation operation;
-- /// Allocation that should be moved.
-- VmaAllocation VMA_NOT_NULL srcAllocation;
-- /** \brief Temporary allocation pointing to destination memory that will replace `srcAllocation`.
--
-- \warning Do not store this allocation in your data structures! It exists only temporarily, for the duration of the defragmentation pass,
-- to be used for binding new buffer/image to the destination memory using e.g. vmaBindBufferMemory().
-- vmaEndDefragmentationPass() will destroy it and make `srcAllocation` point to this memory.
-- */
-- VmaAllocation VMA_NOT_NULL dstTmpAllocation;
--} VmaDefragmentationMove;
--
--/** \brief Parameters for incremental defragmentation steps.
--
--To be used with function vmaBeginDefragmentationPass().
--*/
--typedef struct VmaDefragmentationPassMoveInfo
--{
-- /// Number of elements in the `pMoves` array.
-- uint32_t moveCount;
-- /** \brief Array of moves to be performed by the user in the current defragmentation pass.
--
-- Pointer to an array of `moveCount` elements, owned by VMA, created in vmaBeginDefragmentationPass(), destroyed in vmaEndDefragmentationPass().
--
-- For each element, you should:
--
-- 1. Create a new buffer/image in the place pointed by VmaDefragmentationMove::dstMemory + VmaDefragmentationMove::dstOffset.
-- 2. Copy data from the VmaDefragmentationMove::srcAllocation e.g. using `vkCmdCopyBuffer`, `vkCmdCopyImage`.
-- 3. Make sure these commands finished executing on the GPU.
-- 4. Destroy the old buffer/image.
--
-- Only then you can finish defragmentation pass by calling vmaEndDefragmentationPass().
-- After this call, the allocation will point to the new place in memory.
--
-- Alternatively, if you cannot move specific allocation, you can set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE.
--
-- Alternatively, if you decide you want to completely remove the allocation:
--
-- 1. Destroy its buffer/image.
-- 2. Set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY.
--
-- Then, after vmaEndDefragmentationPass() the allocation will be freed.
-- */
-- VmaDefragmentationMove* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(moveCount) pMoves;
--} VmaDefragmentationPassMoveInfo;
--
--/// Statistics returned for defragmentation process in function vmaEndDefragmentation().
--typedef struct VmaDefragmentationStats
--{
-- /// Total number of bytes that have been copied while moving allocations to different places.
-- VkDeviceSize bytesMoved;
-- /// Total number of bytes that have been released to the system by freeing empty `VkDeviceMemory` objects.
-- VkDeviceSize bytesFreed;
-- /// Number of allocations that have been moved to different places.
-- uint32_t allocationsMoved;
-- /// Number of empty `VkDeviceMemory` objects that have been released to the system.
-- uint32_t deviceMemoryBlocksFreed;
--} VmaDefragmentationStats;
--
--/** @} */
--
--/**
--\addtogroup group_virtual
--@{
--*/
--
--/// Parameters of created #VmaVirtualBlock object to be passed to vmaCreateVirtualBlock().
--typedef struct VmaVirtualBlockCreateInfo
--{
-- /** \brief Total size of the virtual block.
--
-- Sizes can be expressed in bytes or any units you want as long as you are consistent in using them.
-- For example, if you allocate from some array of structures, 1 can mean single instance of entire structure.
-- */
-- VkDeviceSize size;
--
-- /** \brief Use combination of #VmaVirtualBlockCreateFlagBits.
-- */
-- VmaVirtualBlockCreateFlags flags;
--
-- /** \brief Custom CPU memory allocation callbacks. Optional.
--
-- Optional, can be null. When specified, they will be used for all CPU-side memory allocations.
-- */
-- const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks;
--} VmaVirtualBlockCreateInfo;
--
--/// Parameters of created virtual allocation to be passed to vmaVirtualAllocate().
--typedef struct VmaVirtualAllocationCreateInfo
--{
-- /** \brief Size of the allocation.
--
-- Cannot be zero.
-- */
-- VkDeviceSize size;
-- /** \brief Required alignment of the allocation. Optional.
--
-- Must be power of two. Special value 0 has the same meaning as 1 - means no special alignment is required, so allocation can start at any offset.
-- */
-- VkDeviceSize alignment;
-- /** \brief Use combination of #VmaVirtualAllocationCreateFlagBits.
-- */
-- VmaVirtualAllocationCreateFlags flags;
-- /** \brief Custom pointer to be associated with the allocation. Optional.
--
-- It can be any value and can be used for user-defined purposes. It can be fetched or changed later.
-- */
-- void* VMA_NULLABLE pUserData;
--} VmaVirtualAllocationCreateInfo;
--
--/// Parameters of an existing virtual allocation, returned by vmaGetVirtualAllocationInfo().
--typedef struct VmaVirtualAllocationInfo
--{
-- /** \brief Offset of the allocation.
--
-- Offset at which the allocation was made.
-- */
-- VkDeviceSize offset;
-- /** \brief Size of the allocation.
--
-- Same value as passed in VmaVirtualAllocationCreateInfo::size.
-- */
-- VkDeviceSize size;
-- /** \brief Custom pointer associated with the allocation.
--
-- Same value as passed in VmaVirtualAllocationCreateInfo::pUserData or to vmaSetVirtualAllocationUserData().
-- */
-- void* VMA_NULLABLE pUserData;
--} VmaVirtualAllocationInfo;
--
--/** @} */
--
--#endif // _VMA_DATA_TYPES_DECLARATIONS
--
--#ifndef _VMA_FUNCTION_HEADERS
--
--/**
--\addtogroup group_init
--@{
--*/
--
--/// Creates #VmaAllocator object.
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
-- const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo,
-- VmaAllocator VMA_NULLABLE* VMA_NOT_NULL pAllocator);
--
--/// Destroys allocator object.
--VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
-- VmaAllocator VMA_NULLABLE allocator);
--
--/** \brief Returns information about existing #VmaAllocator object - handle to Vulkan device etc.
--
--It might be useful if you want to keep just the #VmaAllocator handle and fetch other required handles to
--`VkPhysicalDevice`, `VkDevice` etc. every time using this function.
--*/
--VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaAllocatorInfo* VMA_NOT_NULL pAllocatorInfo);
--
--/**
--PhysicalDeviceProperties are fetched from physicalDevice by the allocator.
--You can access it here, without fetching it again on your own.
--*/
--VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
-- VmaAllocator VMA_NOT_NULL allocator,
-- const VkPhysicalDeviceProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceProperties);
--
--/**
--PhysicalDeviceMemoryProperties are fetched from physicalDevice by the allocator.
--You can access it here, without fetching it again on your own.
--*/
--VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
-- VmaAllocator VMA_NOT_NULL allocator,
-- const VkPhysicalDeviceMemoryProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceMemoryProperties);
--
--/**
--\brief Given Memory Type Index, returns Property Flags of this memory type.
--
--This is just a convenience function. Same information can be obtained using
--vmaGetMemoryProperties().
--*/
--VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
-- VmaAllocator VMA_NOT_NULL allocator,
-- uint32_t memoryTypeIndex,
-- VkMemoryPropertyFlags* VMA_NOT_NULL pFlags);
--
--/** \brief Sets index of the current frame.
--*/
--VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
-- VmaAllocator VMA_NOT_NULL allocator,
-- uint32_t frameIndex);
--
--/** @} */
--
--/**
--\addtogroup group_stats
--@{
--*/
--
--/** \brief Retrieves statistics from current state of the Allocator.
--
--This function is called "calculate" not "get" because it has to traverse all
--internal data structures, so it may be quite slow. Use it for debugging purposes.
--For faster but more brief statistics suitable to be called every frame or every allocation,
--use vmaGetHeapBudgets().
--
--Note that when using allocator from multiple threads, returned information may immediately
--become outdated.
--*/
--VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaTotalStatistics* VMA_NOT_NULL pStats);
--
--/** \brief Retrieves information about current memory usage and budget for all memory heaps.
--
--\param allocator
--\param[out] pBudgets Must point to array with number of elements at least equal to number of memory heaps in physical device used.
--
--This function is called "get" not "calculate" because it is very fast, suitable to be called
--every frame or every allocation. For more detailed statistics use vmaCalculateStatistics().
--
--Note that when using allocator from multiple threads, returned information may immediately
--become outdated.
--*/
--VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaBudget* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pBudgets);
--
--/** @} */
--
--/**
--\addtogroup group_alloc
--@{
--*/
--
--/**
--\brief Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
--
--This algorithm tries to find a memory type that:
--
--- Is allowed by memoryTypeBits.
--- Contains all the flags from pAllocationCreateInfo->requiredFlags.
--- Matches intended usage.
--- Has as many flags from pAllocationCreateInfo->preferredFlags as possible.
--
--\return Returns VK_ERROR_FEATURE_NOT_PRESENT if not found. Receiving such result
--from this function or any other allocating function probably means that your
--device doesn't support any memory type with requested features for the specific
--type of resource you want to use it for. Please check parameters of your
--resource, like image layout (OPTIMAL versus LINEAR) or mip level count.
--*/
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
-- VmaAllocator VMA_NOT_NULL allocator,
-- uint32_t memoryTypeBits,
-- const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
-- uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
--
--/**
--\brief Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
--
--It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex.
--It internally creates a temporary, dummy buffer that never has memory bound.
--*/
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
-- VmaAllocator VMA_NOT_NULL allocator,
-- const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
-- const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
-- uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
--
--/**
--\brief Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
--
--It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex.
--It internally creates a temporary, dummy image that never has memory bound.
--*/
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
-- VmaAllocator VMA_NOT_NULL allocator,
-- const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
-- const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
-- uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
--
--/** \brief Allocates Vulkan device memory and creates #VmaPool object.
--
--\param allocator Allocator object.
--\param pCreateInfo Parameters of pool to create.
--\param[out] pPool Handle to created pool.
--*/
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
-- VmaAllocator VMA_NOT_NULL allocator,
-- const VmaPoolCreateInfo* VMA_NOT_NULL pCreateInfo,
-- VmaPool VMA_NULLABLE* VMA_NOT_NULL pPool);
--
--/** \brief Destroys #VmaPool object and frees Vulkan device memory.
--*/
--VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaPool VMA_NULLABLE pool);
--
--/** @} */
--
--/**
--\addtogroup group_stats
--@{
--*/
--
--/** \brief Retrieves statistics of existing #VmaPool object.
--
--\param allocator Allocator object.
--\param pool Pool object.
--\param[out] pPoolStats Statistics of specified pool.
--*/
--VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaPool VMA_NOT_NULL pool,
-- VmaStatistics* VMA_NOT_NULL pPoolStats);
--
--/** \brief Retrieves detailed statistics of existing #VmaPool object.
--
--\param allocator Allocator object.
--\param pool Pool object.
--\param[out] pPoolStats Statistics of specified pool.
--*/
--VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaPool VMA_NOT_NULL pool,
-- VmaDetailedStatistics* VMA_NOT_NULL pPoolStats);
--
--/** @} */
--
--/**
--\addtogroup group_alloc
--@{
--*/
--
--/** \brief Checks magic number in margins around all allocations in given memory pool in search for corruptions.
--
--Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero,
--`VMA_DEBUG_MARGIN` is defined to nonzero and the pool is created in memory type that is
--`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection).
--
--Possible return values:
--
--- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for specified pool.
--- `VK_SUCCESS` - corruption detection has been performed and succeeded.
--- `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations.
-- `VMA_ASSERT` is also fired in that case.
--- Other value: Error returned by Vulkan, e.g. memory mapping failure.
--*/
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaPool VMA_NOT_NULL pool);
--
--/** \brief Retrieves name of a custom pool.
--
--After the call `ppName` is either null or points to an internally-owned null-terminated string
--containing name of the pool that was previously set. The pointer becomes invalid when the pool is
--destroyed or its name is changed using vmaSetPoolName().
--*/
--VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaPool VMA_NOT_NULL pool,
-- const char* VMA_NULLABLE* VMA_NOT_NULL ppName);
--
--/** \brief Sets name of a custom pool.
--
--`pName` can be either null or pointer to a null-terminated string with new name for the pool.
--Function makes internal copy of the string, so it can be changed or freed immediately after this call.
--*/
--VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaPool VMA_NOT_NULL pool,
-- const char* VMA_NULLABLE pName);
--
--/** \brief General purpose memory allocation.
--
--\param allocator
--\param pVkMemoryRequirements
--\param pCreateInfo
--\param[out] pAllocation Handle to allocated memory.
--\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
--
--You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages().
--
--It is recommended to use vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(),
--vmaCreateBuffer(), vmaCreateImage() instead whenever possible.
--*/
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
-- VmaAllocator VMA_NOT_NULL allocator,
-- const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements,
-- const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
-- VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
-- VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
--
--/** \brief General purpose memory allocation for multiple allocation objects at once.
--
--\param allocator Allocator object.
--\param pVkMemoryRequirements Memory requirements for each allocation.
--\param pCreateInfo Creation parameters for each allocation.
--\param allocationCount Number of allocations to make.
--\param[out] pAllocations Pointer to array that will be filled with handles to created allocations.
--\param[out] pAllocationInfo Optional. Pointer to array that will be filled with parameters of created allocations.
--
--You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages().
--
--Word "pages" is just a suggestion to use this function to allocate pieces of memory needed for sparse binding.
--It is just a general purpose allocation function able to make multiple allocations at once.
--It may be internally optimized to be more efficient than calling vmaAllocateMemory() `allocationCount` times.
--
--All allocations are made using same parameters. All of them are created out of the same memory pool and type.
--If any allocation fails, all allocations already made within this function call are also freed, so that when
--returned result is not `VK_SUCCESS`, `pAllocation` array is always entirely filled with `VK_NULL_HANDLE`.
--*/
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
-- VmaAllocator VMA_NOT_NULL allocator,
-- const VkMemoryRequirements* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements,
-- const VmaAllocationCreateInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo,
-- size_t allocationCount,
-- VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
-- VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo);
--
--/** \brief Allocates memory suitable for given `VkBuffer`.
--
--\param allocator
--\param buffer
--\param pCreateInfo
--\param[out] pAllocation Handle to allocated memory.
--\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
--
--It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindBufferMemory().
--
--This is a special-purpose function. In most cases you should use vmaCreateBuffer().
--
--You must free the allocation using vmaFreeMemory() when no longer needed.
--*/
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
-- const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
-- VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
-- VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
--
--/** \brief Allocates memory suitable for given `VkImage`.
--
--\param allocator
--\param image
--\param pCreateInfo
--\param[out] pAllocation Handle to allocated memory.
--\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
--
--It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindImageMemory().
--
--This is a special-purpose function. In most cases you should use vmaCreateImage().
--
--You must free the allocation using vmaFreeMemory() when no longer needed.
--*/
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
-- const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
-- VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
-- VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
--
--/** \brief Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
--
--Passing `VK_NULL_HANDLE` as `allocation` is valid. Such function call is just skipped.
--*/
--VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
-- VmaAllocator VMA_NOT_NULL allocator,
-- const VmaAllocation VMA_NULLABLE allocation);
--
--/** \brief Frees memory and destroys multiple allocations.
--
--Word "pages" is just a suggestion to use this function to free pieces of memory used for sparse binding.
--It is just a general purpose function to free memory and destroy allocations made using e.g. vmaAllocateMemory(),
--vmaAllocateMemoryPages() and other functions.
--It may be internally optimized to be more efficient than calling vmaFreeMemory() `allocationCount` times.
--
--Allocations in `pAllocations` array can come from any memory pools and types.
--Passing `VK_NULL_HANDLE` as elements of `pAllocations` array is valid. Such entries are just skipped.
--*/
--VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
-- VmaAllocator VMA_NOT_NULL allocator,
-- size_t allocationCount,
-- const VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations);
--
--/** \brief Returns current information about specified allocation.
--
--Current parameters of given allocation are returned in `pAllocationInfo`.
--
--Although this function doesn't lock any mutex, so it should be quite efficient,
--you should avoid calling it too often.
--You can retrieve same VmaAllocationInfo structure while creating your resource, from function
--vmaCreateBuffer(), vmaCreateImage(). You can remember it if you are sure parameters don't change
--(e.g. due to defragmentation).
--*/
--VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaAllocation VMA_NOT_NULL allocation,
-- VmaAllocationInfo* VMA_NOT_NULL pAllocationInfo);
--
--/** \brief Sets pUserData in given allocation to new value.
--
--The value of pointer `pUserData` is copied to allocation's `pUserData`.
--It is opaque, so you can use it however you want - e.g.
--as a pointer, ordinal number or some handle to you own data.
--*/
--VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaAllocation VMA_NOT_NULL allocation,
-- void* VMA_NULLABLE pUserData);
--
--/** \brief Sets pName in given allocation to new value.
--
--`pName` must be either null, or pointer to a null-terminated string. The function
--makes local copy of the string and sets it as allocation's `pName`. String
--passed as pName doesn't need to be valid for whole lifetime of the allocation -
--you can free it after this call. String previously pointed by allocation's
--`pName` is freed from memory.
--*/
--VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaAllocation VMA_NOT_NULL allocation,
-- const char* VMA_NULLABLE pName);
--
--/**
--\brief Given an allocation, returns Property Flags of its memory type.
--
--This is just a convenience function. Same information can be obtained using
--vmaGetAllocationInfo() + vmaGetMemoryProperties().
--*/
--VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaAllocation VMA_NOT_NULL allocation,
-- VkMemoryPropertyFlags* VMA_NOT_NULL pFlags);
--
--/** \brief Maps memory represented by given allocation and returns pointer to it.
--
--Maps memory represented by given allocation to make it accessible to CPU code.
--When succeeded, `*ppData` contains pointer to first byte of this memory.
--
--\warning
--If the allocation is part of a bigger `VkDeviceMemory` block, returned pointer is
--correctly offsetted to the beginning of region assigned to this particular allocation.
--Unlike the result of `vkMapMemory`, it points to the allocation, not to the beginning of the whole block.
--You should not add VmaAllocationInfo::offset to it!
--
--Mapping is internally reference-counted and synchronized, so despite raw Vulkan
--function `vkMapMemory()` cannot be used to map same block of `VkDeviceMemory`
--multiple times simultaneously, it is safe to call this function on allocations
--assigned to the same memory block. Actual Vulkan memory will be mapped on first
--mapping and unmapped on last unmapping.
--
--If the function succeeded, you must call vmaUnmapMemory() to unmap the
--allocation when mapping is no longer needed or before freeing the allocation, at
--the latest.
--
--It also safe to call this function multiple times on the same allocation. You
--must call vmaUnmapMemory() same number of times as you called vmaMapMemory().
--
--It is also safe to call this function on allocation created with
--#VMA_ALLOCATION_CREATE_MAPPED_BIT flag. Its memory stays mapped all the time.
--You must still call vmaUnmapMemory() same number of times as you called
--vmaMapMemory(). You must not call vmaUnmapMemory() additional time to free the
--"0-th" mapping made automatically due to #VMA_ALLOCATION_CREATE_MAPPED_BIT flag.
--
--This function fails when used on allocation made in memory type that is not
--`HOST_VISIBLE`.
--
--This function doesn't automatically flush or invalidate caches.
--If the allocation is made from a memory types that is not `HOST_COHERENT`,
--you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification.
--*/
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaAllocation VMA_NOT_NULL allocation,
-- void* VMA_NULLABLE* VMA_NOT_NULL ppData);
--
--/** \brief Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
--
--For details, see description of vmaMapMemory().
--
--This function doesn't automatically flush or invalidate caches.
--If the allocation is made from a memory types that is not `HOST_COHERENT`,
--you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification.
--*/
--VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaAllocation VMA_NOT_NULL allocation);
--
--/** \brief Flushes memory of given allocation.
--
--Calls `vkFlushMappedMemoryRanges()` for memory associated with given range of given allocation.
--It needs to be called after writing to a mapped memory for memory types that are not `HOST_COHERENT`.
--Unmap operation doesn't do that automatically.
--
--- `offset` must be relative to the beginning of allocation.
--- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation.
--- `offset` and `size` don't have to be aligned.
-- They are internally rounded down/up to multiply of `nonCoherentAtomSize`.
--- If `size` is 0, this call is ignored.
--- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`,
-- this call is ignored.
--
--Warning! `offset` and `size` are relative to the contents of given `allocation`.
--If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively.
--Do not pass allocation's offset as `offset`!!!
--
--This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is
--called, otherwise `VK_SUCCESS`.
--*/
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaAllocation VMA_NOT_NULL allocation,
-- VkDeviceSize offset,
-- VkDeviceSize size);
--
--/** \brief Invalidates memory of given allocation.
--
--Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given range of given allocation.
--It needs to be called before reading from a mapped memory for memory types that are not `HOST_COHERENT`.
--Map operation doesn't do that automatically.
--
--- `offset` must be relative to the beginning of allocation.
--- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation.
--- `offset` and `size` don't have to be aligned.
-- They are internally rounded down/up to multiply of `nonCoherentAtomSize`.
--- If `size` is 0, this call is ignored.
--- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`,
-- this call is ignored.
--
--Warning! `offset` and `size` are relative to the contents of given `allocation`.
--If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively.
--Do not pass allocation's offset as `offset`!!!
--
--This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if
--it is called, otherwise `VK_SUCCESS`.
--*/
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaAllocation VMA_NOT_NULL allocation,
-- VkDeviceSize offset,
-- VkDeviceSize size);
--
--/** \brief Flushes memory of given set of allocations.
--
--Calls `vkFlushMappedMemoryRanges()` for memory associated with given ranges of given allocations.
--For more information, see documentation of vmaFlushAllocation().
--
--\param allocator
--\param allocationCount
--\param allocations
--\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero.
--\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations.
--
--This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is
--called, otherwise `VK_SUCCESS`.
--*/
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
-- VmaAllocator VMA_NOT_NULL allocator,
-- uint32_t allocationCount,
-- const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
-- const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
-- const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
--
--/** \brief Invalidates memory of given set of allocations.
--
--Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given ranges of given allocations.
--For more information, see documentation of vmaInvalidateAllocation().
--
--\param allocator
--\param allocationCount
--\param allocations
--\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero.
--\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations.
--
--This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if it is
--called, otherwise `VK_SUCCESS`.
--*/
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
-- VmaAllocator VMA_NOT_NULL allocator,
-- uint32_t allocationCount,
-- const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
-- const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
-- const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
--
--/** \brief Checks magic number in margins around all allocations in given memory types (in both default and custom pools) in search for corruptions.
--
--\param allocator
--\param memoryTypeBits Bit mask, where each bit set means that a memory type with that index should be checked.
--
--Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero,
--`VMA_DEBUG_MARGIN` is defined to nonzero and only for memory types that are
--`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection).
--
--Possible return values:
--
--- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for any of specified memory types.
--- `VK_SUCCESS` - corruption detection has been performed and succeeded.
--- `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations.
-- `VMA_ASSERT` is also fired in that case.
--- Other value: Error returned by Vulkan, e.g. memory mapping failure.
--*/
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(
-- VmaAllocator VMA_NOT_NULL allocator,
-- uint32_t memoryTypeBits);
--
--/** \brief Begins defragmentation process.
--
--\param allocator Allocator object.
--\param pInfo Structure filled with parameters of defragmentation.
--\param[out] pContext Context object that must be passed to vmaEndDefragmentation() to finish defragmentation.
--\returns
--- `VK_SUCCESS` if defragmentation can begin.
--- `VK_ERROR_FEATURE_NOT_PRESENT` if defragmentation is not supported.
--
--For more information about defragmentation, see documentation chapter:
--[Defragmentation](@ref defragmentation).
--*/
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation(
-- VmaAllocator VMA_NOT_NULL allocator,
-- const VmaDefragmentationInfo* VMA_NOT_NULL pInfo,
-- VmaDefragmentationContext VMA_NULLABLE* VMA_NOT_NULL pContext);
--
--/** \brief Ends defragmentation process.
--
--\param allocator Allocator object.
--\param context Context object that has been created by vmaBeginDefragmentation().
--\param[out] pStats Optional stats for the defragmentation. Can be null.
--
--Use this function to finish defragmentation started by vmaBeginDefragmentation().
--*/
--VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaDefragmentationContext VMA_NOT_NULL context,
-- VmaDefragmentationStats* VMA_NULLABLE pStats);
--
--/** \brief Starts single defragmentation pass.
--
--\param allocator Allocator object.
--\param context Context object that has been created by vmaBeginDefragmentation().
--\param[out] pPassInfo Computed information for current pass.
--\returns
--- `VK_SUCCESS` if no more moves are possible. Then you can omit call to vmaEndDefragmentationPass() and simply end whole defragmentation.
--- `VK_INCOMPLETE` if there are pending moves returned in `pPassInfo`. You need to perform them, call vmaEndDefragmentationPass(),
-- and then preferably try another pass with vmaBeginDefragmentationPass().
--*/
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaDefragmentationContext VMA_NOT_NULL context,
-- VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo);
--
--/** \brief Ends single defragmentation pass.
--
--\param allocator Allocator object.
--\param context Context object that has been created by vmaBeginDefragmentation().
--\param pPassInfo Computed information for current pass filled by vmaBeginDefragmentationPass() and possibly modified by you.
--
--Returns `VK_SUCCESS` if no more moves are possible or `VK_INCOMPLETE` if more defragmentations are possible.
--
--Ends incremental defragmentation pass and commits all defragmentation moves from `pPassInfo`.
--After this call:
--
--- Allocations at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY
-- (which is the default) will be pointing to the new destination place.
--- Allocation at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY
-- will be freed.
--
--If no more moves are possible you can end whole defragmentation.
--*/
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaDefragmentationContext VMA_NOT_NULL context,
-- VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo);
--
--/** \brief Binds buffer to allocation.
--
--Binds specified buffer to region of memory represented by specified allocation.
--Gets `VkDeviceMemory` handle and offset from the allocation.
--If you want to create a buffer, allocate memory for it and bind them together separately,
--you should use this function for binding instead of standard `vkBindBufferMemory()`,
--because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple
--allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously
--(which is illegal in Vulkan).
--
--It is recommended to use function vmaCreateBuffer() instead of this one.
--*/
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaAllocation VMA_NOT_NULL allocation,
-- VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer);
--
--/** \brief Binds buffer to allocation with additional parameters.
--
--\param allocator
--\param allocation
--\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0.
--\param buffer
--\param pNext A chain of structures to be attached to `VkBindBufferMemoryInfoKHR` structure used internally. Normally it should be null.
--
--This function is similar to vmaBindBufferMemory(), but it provides additional parameters.
--
--If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag
--or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails.
--*/
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaAllocation VMA_NOT_NULL allocation,
-- VkDeviceSize allocationLocalOffset,
-- VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
-- const void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkBindBufferMemoryInfoKHR) pNext);
--
--/** \brief Binds image to allocation.
--
--Binds specified image to region of memory represented by specified allocation.
--Gets `VkDeviceMemory` handle and offset from the allocation.
--If you want to create an image, allocate memory for it and bind them together separately,
--you should use this function for binding instead of standard `vkBindImageMemory()`,
--because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple
--allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously
--(which is illegal in Vulkan).
--
--It is recommended to use function vmaCreateImage() instead of this one.
--*/
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaAllocation VMA_NOT_NULL allocation,
-- VkImage VMA_NOT_NULL_NON_DISPATCHABLE image);
--
--/** \brief Binds image to allocation with additional parameters.
--
--\param allocator
--\param allocation
--\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0.
--\param image
--\param pNext A chain of structures to be attached to `VkBindImageMemoryInfoKHR` structure used internally. Normally it should be null.
--
--This function is similar to vmaBindImageMemory(), but it provides additional parameters.
--
--If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag
--or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails.
--*/
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaAllocation VMA_NOT_NULL allocation,
-- VkDeviceSize allocationLocalOffset,
-- VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
-- const void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkBindImageMemoryInfoKHR) pNext);
--
--/** \brief Creates a new `VkBuffer`, allocates and binds memory for it.
--
--\param allocator
--\param pBufferCreateInfo
--\param pAllocationCreateInfo
--\param[out] pBuffer Buffer that was created.
--\param[out] pAllocation Allocation that was created.
--\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
--
--This function automatically:
--
---# Creates buffer.
---# Allocates appropriate memory for it.
---# Binds the buffer with the memory.
--
--If any of these operations fail, buffer and allocation are not created,
--returned value is negative error code, `*pBuffer` and `*pAllocation` are null.
--
--If the function succeeded, you must destroy both buffer and allocation when you
--no longer need them using either convenience function vmaDestroyBuffer() or
--separately, using `vkDestroyBuffer()` and vmaFreeMemory().
--
--If #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag was used,
--VK_KHR_dedicated_allocation extension is used internally to query driver whether
--it requires or prefers the new buffer to have dedicated allocation. If yes,
--and if dedicated allocation is possible
--(#VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT is not used), it creates dedicated
--allocation for this buffer, just like when using
--#VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
--
--\note This function creates a new `VkBuffer`. Sub-allocation of parts of one large buffer,
--although recommended as a good practice, is out of scope of this library and could be implemented
--by the user as a higher-level logic on top of VMA.
--*/
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
-- VmaAllocator VMA_NOT_NULL allocator,
-- const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
-- const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
-- VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer,
-- VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
-- VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
--
--/** \brief Creates a buffer with additional minimum alignment.
--
--Similar to vmaCreateBuffer() but provides additional parameter `minAlignment` which allows to specify custom,
--minimum alignment to be used when placing the buffer inside a larger memory block, which may be needed e.g.
--for interop with OpenGL.
--*/
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment(
-- VmaAllocator VMA_NOT_NULL allocator,
-- const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
-- const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
-- VkDeviceSize minAlignment,
-- VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer,
-- VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
-- VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
--
--/** \brief Creates a new `VkBuffer`, binds already created memory for it.
--
--\param allocator
--\param allocation Allocation that provides memory to be used for binding new buffer to it.
--\param pBufferCreateInfo
--\param[out] pBuffer Buffer that was created.
--
--This function automatically:
--
---# Creates buffer.
---# Binds the buffer with the supplied memory.
--
--If any of these operations fail, buffer is not created,
--returned value is negative error code and `*pBuffer` is null.
--
--If the function succeeded, you must destroy the buffer when you
--no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding
--allocation you can use convenience function vmaDestroyBuffer().
--
--\note There is a new version of this function augmented with parameter `allocationLocalOffset` - see vmaCreateAliasingBuffer2().
--*/
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaAllocation VMA_NOT_NULL allocation,
-- const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
-- VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer);
--
--/** \brief Creates a new `VkBuffer`, binds already created memory for it.
--
--\param allocator
--\param allocation Allocation that provides memory to be used for binding new buffer to it.
--\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the allocation. Normally it should be 0.
--\param pBufferCreateInfo
--\param[out] pBuffer Buffer that was created.
--
--This function automatically:
--
---# Creates buffer.
---# Binds the buffer with the supplied memory.
--
--If any of these operations fail, buffer is not created,
--returned value is negative error code and `*pBuffer` is null.
--
--If the function succeeded, you must destroy the buffer when you
--no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding
--allocation you can use convenience function vmaDestroyBuffer().
--
--\note This is a new version of the function augmented with parameter `allocationLocalOffset`.
--*/
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer2(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaAllocation VMA_NOT_NULL allocation,
-- VkDeviceSize allocationLocalOffset,
-- const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
-- VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer);
--
--/** \brief Destroys Vulkan buffer and frees allocated memory.
--
--This is just a convenience function equivalent to:
--
--\code
--vkDestroyBuffer(device, buffer, allocationCallbacks);
--vmaFreeMemory(allocator, allocation);
--\endcode
--
--It is safe to pass null as buffer and/or allocation.
--*/
--VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer,
-- VmaAllocation VMA_NULLABLE allocation);
--
--/// Function similar to vmaCreateBuffer().
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
-- VmaAllocator VMA_NOT_NULL allocator,
-- const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
-- const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
-- VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage,
-- VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
-- VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
--
--/// Function similar to vmaCreateAliasingBuffer() but for images.
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaAllocation VMA_NOT_NULL allocation,
-- const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
-- VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage);
--
--/// Function similar to vmaCreateAliasingBuffer2() but for images.
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage2(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaAllocation VMA_NOT_NULL allocation,
-- VkDeviceSize allocationLocalOffset,
-- const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
-- VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage);
--
--/** \brief Destroys Vulkan image and frees allocated memory.
--
--This is just a convenience function equivalent to:
--
--\code
--vkDestroyImage(device, image, allocationCallbacks);
--vmaFreeMemory(allocator, allocation);
--\endcode
--
--It is safe to pass null as image and/or allocation.
--*/
--VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VkImage VMA_NULLABLE_NON_DISPATCHABLE image,
-- VmaAllocation VMA_NULLABLE allocation);
--
--/** @} */
--
--/**
--\addtogroup group_virtual
--@{
--*/
--
--/** \brief Creates new #VmaVirtualBlock object.
--
--\param pCreateInfo Parameters for creation.
--\param[out] pVirtualBlock Returned virtual block object or `VMA_NULL` if creation failed.
--*/
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock(
-- const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo,
-- VmaVirtualBlock VMA_NULLABLE* VMA_NOT_NULL pVirtualBlock);
--
--/** \brief Destroys #VmaVirtualBlock object.
--
--Please note that you should consciously handle virtual allocations that could remain unfreed in the block.
--You should either free them individually using vmaVirtualFree() or call vmaClearVirtualBlock()
--if you are sure this is what you want. If you do neither, an assert is called.
--
--If you keep pointers to some additional metadata associated with your virtual allocations in their `pUserData`,
--don't forget to free them.
--*/
--VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock(
-- VmaVirtualBlock VMA_NULLABLE virtualBlock);
--
--/** \brief Returns true of the #VmaVirtualBlock is empty - contains 0 virtual allocations and has all its space available for new allocations.
--*/
--VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty(
-- VmaVirtualBlock VMA_NOT_NULL virtualBlock);
--
--/** \brief Returns information about a specific virtual allocation within a virtual block, like its size and `pUserData` pointer.
--*/
--VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(
-- VmaVirtualBlock VMA_NOT_NULL virtualBlock,
-- VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo);
--
--/** \brief Allocates new virtual allocation inside given #VmaVirtualBlock.
--
--If the allocation fails due to not enough free space available, `VK_ERROR_OUT_OF_DEVICE_MEMORY` is returned
--(despite the function doesn't ever allocate actual GPU memory).
--`pAllocation` is then set to `VK_NULL_HANDLE` and `pOffset`, if not null, it set to `UINT64_MAX`.
--
--\param virtualBlock Virtual block
--\param pCreateInfo Parameters for the allocation
--\param[out] pAllocation Returned handle of the new allocation
--\param[out] pOffset Returned offset of the new allocation. Optional, can be null.
--*/
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(
-- VmaVirtualBlock VMA_NOT_NULL virtualBlock,
-- const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
-- VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation,
-- VkDeviceSize* VMA_NULLABLE pOffset);
--
--/** \brief Frees virtual allocation inside given #VmaVirtualBlock.
--
--It is correct to call this function with `allocation == VK_NULL_HANDLE` - it does nothing.
--*/
--VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(
-- VmaVirtualBlock VMA_NOT_NULL virtualBlock,
-- VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation);
--
--/** \brief Frees all virtual allocations inside given #VmaVirtualBlock.
--
--You must either call this function or free each virtual allocation individually with vmaVirtualFree()
--before destroying a virtual block. Otherwise, an assert is called.
--
--If you keep pointer to some additional metadata associated with your virtual allocation in its `pUserData`,
--don't forget to free it as well.
--*/
--VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock(
-- VmaVirtualBlock VMA_NOT_NULL virtualBlock);
--
--/** \brief Changes custom pointer associated with given virtual allocation.
--*/
--VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData(
-- VmaVirtualBlock VMA_NOT_NULL virtualBlock,
-- VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation,
-- void* VMA_NULLABLE pUserData);
--
--/** \brief Calculates and returns statistics about virtual allocations and memory usage in given #VmaVirtualBlock.
--
--This function is fast to call. For more detailed statistics, see vmaCalculateVirtualBlockStatistics().
--*/
--VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics(
-- VmaVirtualBlock VMA_NOT_NULL virtualBlock,
-- VmaStatistics* VMA_NOT_NULL pStats);
--
--/** \brief Calculates and returns detailed statistics about virtual allocations and memory usage in given #VmaVirtualBlock.
--
--This function is slow to call. Use for debugging purposes.
--For less detailed statistics, see vmaGetVirtualBlockStatistics().
--*/
--VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics(
-- VmaVirtualBlock VMA_NOT_NULL virtualBlock,
-- VmaDetailedStatistics* VMA_NOT_NULL pStats);
--
--/** @} */
--
--#if VMA_STATS_STRING_ENABLED
--/**
--\addtogroup group_stats
--@{
--*/
--
--/** \brief Builds and returns a null-terminated string in JSON format with information about given #VmaVirtualBlock.
--\param virtualBlock Virtual block.
--\param[out] ppStatsString Returned string.
--\param detailedMap Pass `VK_FALSE` to only obtain statistics as returned by vmaCalculateVirtualBlockStatistics(). Pass `VK_TRUE` to also obtain full list of allocations and free spaces.
--
--Returned string must be freed using vmaFreeVirtualBlockStatsString().
--*/
--VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString(
-- VmaVirtualBlock VMA_NOT_NULL virtualBlock,
-- char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString,
-- VkBool32 detailedMap);
--
--/// Frees a string returned by vmaBuildVirtualBlockStatsString().
--VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString(
-- VmaVirtualBlock VMA_NOT_NULL virtualBlock,
-- char* VMA_NULLABLE pStatsString);
--
--/** \brief Builds and returns statistics as a null-terminated string in JSON format.
--\param allocator
--\param[out] ppStatsString Must be freed using vmaFreeStatsString() function.
--\param detailedMap
--*/
--VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
-- VmaAllocator VMA_NOT_NULL allocator,
-- char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString,
-- VkBool32 detailedMap);
--
--VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
-- VmaAllocator VMA_NOT_NULL allocator,
-- char* VMA_NULLABLE pStatsString);
--
--/** @} */
--
--#endif // VMA_STATS_STRING_ENABLED
--
--#endif // _VMA_FUNCTION_HEADERS
--
--#ifdef __cplusplus
--}
--#endif
--
--#endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
--
--////////////////////////////////////////////////////////////////////////////////
--////////////////////////////////////////////////////////////////////////////////
--//
--// IMPLEMENTATION
--//
--////////////////////////////////////////////////////////////////////////////////
--////////////////////////////////////////////////////////////////////////////////
--
--// For Visual Studio IntelliSense.
--#if defined(__cplusplus) && defined(__INTELLISENSE__)
--#define VMA_IMPLEMENTATION
--#endif
--
--#ifdef VMA_IMPLEMENTATION
--#undef VMA_IMPLEMENTATION
--
--#include <cstdint>
--#include <cstdlib>
--#include <cstring>
--#include <utility>
--#include <type_traits>
--
--#ifdef _MSC_VER
-- #include <intrin.h> // For functions like __popcnt, _BitScanForward etc.
--#endif
--#if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20
-- #include <bit> // For std::popcount
--#endif
--
--#if VMA_STATS_STRING_ENABLED
-- #include <cstdio> // For snprintf
--#endif
--
--/*******************************************************************************
--CONFIGURATION SECTION
--
--Define some of these macros before each #include of this header or change them
--here if you need other then default behavior depending on your environment.
--*/
--#ifndef _VMA_CONFIGURATION
--
--/*
--Define this macro to 1 to make the library fetch pointers to Vulkan functions
--internally, like:
--
-- vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
--*/
--#if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
-- #define VMA_STATIC_VULKAN_FUNCTIONS 1
--#endif
--
--/*
--Define this macro to 1 to make the library fetch pointers to Vulkan functions
--internally, like:
--
-- vulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(device, "vkAllocateMemory");
--
--To use this feature in new versions of VMA you now have to pass
--VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as
--VmaAllocatorCreateInfo::pVulkanFunctions. Other members can be null.
--*/
--#if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS)
-- #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1
--#endif
--
--#ifndef VMA_USE_STL_SHARED_MUTEX
-- #if __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17
-- #define VMA_USE_STL_SHARED_MUTEX 1
-- // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
-- // Otherwise it is always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
-- #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
-- #define VMA_USE_STL_SHARED_MUTEX 1
-- #else
-- #define VMA_USE_STL_SHARED_MUTEX 0
-- #endif
--#endif
--
--/*
--Define this macro to include custom header files without having to edit this file directly, e.g.:
--
-- // Inside of "my_vma_configuration_user_includes.h":
--
-- #include "my_custom_assert.h" // for MY_CUSTOM_ASSERT
-- #include "my_custom_min.h" // for my_custom_min
-- #include <algorithm>
-- #include <mutex>
--
-- // Inside a different file, which includes "vk_mem_alloc.h":
--
-- #define VMA_CONFIGURATION_USER_INCLUDES_H "my_vma_configuration_user_includes.h"
-- #define VMA_ASSERT(expr) MY_CUSTOM_ASSERT(expr)
-- #define VMA_MIN(v1, v2) (my_custom_min(v1, v2))
-- #include "vk_mem_alloc.h"
-- ...
--
--The following headers are used in this CONFIGURATION section only, so feel free to
--remove them if not needed.
--*/
--#if !defined(VMA_CONFIGURATION_USER_INCLUDES_H)
-- #include <cassert> // for assert
-- #include <algorithm> // for min, max
-- #include <mutex>
--#else
-- #include VMA_CONFIGURATION_USER_INCLUDES_H
--#endif
--
--#ifndef VMA_NULL
-- // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
-- #define VMA_NULL nullptr
--#endif
--
--// Used to silence warnings for implicit fallthrough.
--#ifndef VMA_FALLTHROUGH
-- #if __has_cpp_attribute(clang::fallthrough)
-- #define VMA_FALLTHROUGH [[clang::fallthrough]];
-- #elif __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17
-- #define VMA_FALLTHROUGH [[fallthrough]]
-- #else
-- #define VMA_FALLTHROUGH
-- #endif
--#endif
--
--// Normal assert to check for programmer's errors, especially in Debug configuration.
--#ifndef VMA_ASSERT
-- #ifdef NDEBUG
-- #define VMA_ASSERT(expr)
-- #else
-- #define VMA_ASSERT(expr) assert(expr)
-- #endif
--#endif
--
--// Assert that will be called very often, like inside data structures e.g. operator[].
--// Making it non-empty can make program slow.
--#ifndef VMA_HEAVY_ASSERT
-- #ifdef NDEBUG
-- #define VMA_HEAVY_ASSERT(expr)
-- #else
-- #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
-- #endif
--#endif
--
--// If your compiler is not compatible with C++17 and definition of
--// aligned_alloc() function is missing, uncommenting following line may help:
--
--//#include <malloc.h>
--
--#if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
--#include <cstdlib>
--void* vma_aligned_alloc(size_t alignment, size_t size)
--{
-- // alignment must be >= sizeof(void*)
-- if(alignment < sizeof(void*))
-- {
-- alignment = sizeof(void*);
-- }
--
-- return memalign(alignment, size);
--}
+@@ -2760,7 +2760,7 @@ void* vma_aligned_alloc(size_t alignment, size_t size)
+
+ return memalign(alignment, size);
+ }
-#elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC))
--#include <cstdlib>
--
--#if defined(__APPLE__)
--#include <AvailabilityMacros.h>
--#endif
--
--void *vma_aligned_alloc(size_t alignment, size_t size)
--{
-- // Unfortunately, aligned_alloc causes VMA to crash due to it returning null pointers. (At least under 11.4)
-- // Therefore, for now disable this specific exception until a proper solution is found.
-- //#if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0))
-- //#if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0
-- // // For C++14, usr/include/malloc/_malloc.h declares aligned_alloc()) only
-- // // with the MacOSX11.0 SDK in Xcode 12 (which is what adds
-- // // MAC_OS_X_VERSION_10_16), even though the function is marked
-- // // available for 10.15. That is why the preprocessor checks for 10.16 but
-- // // the __builtin_available checks for 10.15.
-- // // People who use C++17 could call aligned_alloc with the 10.15 SDK already.
-- // if (__builtin_available(macOS 10.15, iOS 13, *))
-- // return aligned_alloc(alignment, size);
-- //#endif
-- //#endif
--
-- // alignment must be >= sizeof(void*)
-- if(alignment < sizeof(void*))
-- {
-- alignment = sizeof(void*);
-- }
--
-- void *pointer;
-- if(posix_memalign(&pointer, alignment, size) == 0)
-- return pointer;
-- return VMA_NULL;
--}
--#elif defined(_WIN32)
--void* vma_aligned_alloc(size_t alignment, size_t size)
--{
-- return _aligned_malloc(size, alignment);
--}
--#elif __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17
--void* vma_aligned_alloc(size_t alignment, size_t size)
--{
-- return aligned_alloc(alignment, size);
--}
--#else
--void* vma_aligned_alloc(size_t alignment, size_t size)
--{
-- VMA_ASSERT(0 && "Could not implement aligned_alloc automatically. Please enable C++17 or later in your compiler or provide custom implementation of macro VMA_SYSTEM_ALIGNED_MALLOC (and VMA_SYSTEM_ALIGNED_FREE if needed) using the API of your system.");
-- return VMA_NULL;
--}
--#endif
--
--#if defined(_WIN32)
--static void vma_aligned_free(void* ptr)
--{
-- _aligned_free(ptr);
--}
--#else
--static void vma_aligned_free(void* VMA_NULLABLE ptr)
--{
-- free(ptr);
--}
--#endif
--
--#ifndef VMA_ALIGN_OF
-- #define VMA_ALIGN_OF(type) (alignof(type))
--#endif
--
--#ifndef VMA_SYSTEM_ALIGNED_MALLOC
-- #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size))
--#endif
--
--#ifndef VMA_SYSTEM_ALIGNED_FREE
-- // VMA_SYSTEM_FREE is the old name, but might have been defined by the user
-- #if defined(VMA_SYSTEM_FREE)
-- #define VMA_SYSTEM_ALIGNED_FREE(ptr) VMA_SYSTEM_FREE(ptr)
-- #else
-- #define VMA_SYSTEM_ALIGNED_FREE(ptr) vma_aligned_free(ptr)
-- #endif
--#endif
--
--#ifndef VMA_COUNT_BITS_SET
-- // Returns number of bits set to 1 in (v)
-- #define VMA_COUNT_BITS_SET(v) VmaCountBitsSet(v)
--#endif
--
--#ifndef VMA_BITSCAN_LSB
-- // Scans integer for index of first nonzero value from the Least Significant Bit (LSB). If mask is 0 then returns UINT8_MAX
-- #define VMA_BITSCAN_LSB(mask) VmaBitScanLSB(mask)
--#endif
--
--#ifndef VMA_BITSCAN_MSB
-- // Scans integer for index of first nonzero value from the Most Significant Bit (MSB). If mask is 0 then returns UINT8_MAX
-- #define VMA_BITSCAN_MSB(mask) VmaBitScanMSB(mask)
--#endif
--
--#ifndef VMA_MIN
-- #define VMA_MIN(v1, v2) ((std::min)((v1), (v2)))
--#endif
--
--#ifndef VMA_MAX
-- #define VMA_MAX(v1, v2) ((std::max)((v1), (v2)))
--#endif
--
--#ifndef VMA_SWAP
-- #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
--#endif
--
--#ifndef VMA_SORT
-- #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
--#endif
--
--#ifndef VMA_DEBUG_LOG_FORMAT
-- #define VMA_DEBUG_LOG_FORMAT(format, ...)
-- /*
-- #define VMA_DEBUG_LOG_FORMAT(format, ...) do { \
-- printf((format), __VA_ARGS__); \
-- printf("\n"); \
-- } while(false)
-- */
--#endif
--
--#ifndef VMA_DEBUG_LOG
-- #define VMA_DEBUG_LOG(str) VMA_DEBUG_LOG_FORMAT("%s", (str))
--#endif
--
--#ifndef VMA_CLASS_NO_COPY
-- #define VMA_CLASS_NO_COPY(className) \
-- private: \
-- className(const className&) = delete; \
-- className& operator=(const className&) = delete;
--#endif
--#ifndef VMA_CLASS_NO_COPY_NO_MOVE
-- #define VMA_CLASS_NO_COPY_NO_MOVE(className) \
-- private: \
-- className(const className&) = delete; \
-- className(className&&) = delete; \
-- className& operator=(const className&) = delete; \
-- className& operator=(className&&) = delete;
--#endif
--
--// Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
--#if VMA_STATS_STRING_ENABLED
-- static inline void VmaUint32ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint32_t num)
-- {
-- snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
-- }
-- static inline void VmaUint64ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint64_t num)
-- {
-- snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
-- }
-- static inline void VmaPtrToStr(char* VMA_NOT_NULL outStr, size_t strLen, const void* ptr)
-- {
-- snprintf(outStr, strLen, "%p", ptr);
-- }
--#endif
--
--#ifndef VMA_MUTEX
-- class VmaMutex
-- {
-- VMA_CLASS_NO_COPY_NO_MOVE(VmaMutex)
-- public:
-- VmaMutex() { }
-- void Lock() { m_Mutex.lock(); }
-- void Unlock() { m_Mutex.unlock(); }
-- bool TryLock() { return m_Mutex.try_lock(); }
-- private:
-- std::mutex m_Mutex;
-- };
-- #define VMA_MUTEX VmaMutex
--#endif
--
--// Read-write mutex, where "read" is shared access, "write" is exclusive access.
--#ifndef VMA_RW_MUTEX
-- #if VMA_USE_STL_SHARED_MUTEX
-- // Use std::shared_mutex from C++17.
-- #include <shared_mutex>
-- class VmaRWMutex
-- {
-- public:
-- void LockRead() { m_Mutex.lock_shared(); }
-- void UnlockRead() { m_Mutex.unlock_shared(); }
-- bool TryLockRead() { return m_Mutex.try_lock_shared(); }
-- void LockWrite() { m_Mutex.lock(); }
-- void UnlockWrite() { m_Mutex.unlock(); }
-- bool TryLockWrite() { return m_Mutex.try_lock(); }
-- private:
-- std::shared_mutex m_Mutex;
-- };
-- #define VMA_RW_MUTEX VmaRWMutex
-- #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
-- // Use SRWLOCK from WinAPI.
-- // Minimum supported client = Windows Vista, server = Windows Server 2008.
-- class VmaRWMutex
-- {
-- public:
-- VmaRWMutex() { InitializeSRWLock(&m_Lock); }
-- void LockRead() { AcquireSRWLockShared(&m_Lock); }
-- void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
-- bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; }
-- void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
-- void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
-- bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; }
-- private:
-- SRWLOCK m_Lock;
-- };
-- #define VMA_RW_MUTEX VmaRWMutex
-- #else
-- // Less efficient fallback: Use normal mutex.
-- class VmaRWMutex
-- {
-- public:
-- void LockRead() { m_Mutex.Lock(); }
-- void UnlockRead() { m_Mutex.Unlock(); }
-- bool TryLockRead() { return m_Mutex.TryLock(); }
-- void LockWrite() { m_Mutex.Lock(); }
-- void UnlockWrite() { m_Mutex.Unlock(); }
-- bool TryLockWrite() { return m_Mutex.TryLock(); }
-- private:
-- VMA_MUTEX m_Mutex;
-- };
-- #define VMA_RW_MUTEX VmaRWMutex
-- #endif // #if VMA_USE_STL_SHARED_MUTEX
--#endif // #ifndef VMA_RW_MUTEX
--
--/*
--If providing your own implementation, you need to implement a subset of std::atomic.
--*/
--#ifndef VMA_ATOMIC_UINT32
-- #include <atomic>
-- #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
--#endif
--
--#ifndef VMA_ATOMIC_UINT64
-- #include <atomic>
-- #define VMA_ATOMIC_UINT64 std::atomic<uint64_t>
--#endif
--
--#ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
-- /**
-- Every allocation will have its own memory block.
-- Define to 1 for debugging purposes only.
-- */
-- #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
--#endif
--
--#ifndef VMA_MIN_ALIGNMENT
-- /**
-- Minimum alignment of all allocations, in bytes.
-- Set to more than 1 for debugging purposes. Must be power of two.
-- */
-- #ifdef VMA_DEBUG_ALIGNMENT // Old name
-- #define VMA_MIN_ALIGNMENT VMA_DEBUG_ALIGNMENT
-- #else
-- #define VMA_MIN_ALIGNMENT (1)
-- #endif
--#endif
--
--#ifndef VMA_DEBUG_MARGIN
-- /**
-- Minimum margin after every allocation, in bytes.
-- Set nonzero for debugging purposes only.
-- */
-- #define VMA_DEBUG_MARGIN (0)
--#endif
--
--#ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
-- /**
-- Define this macro to 1 to automatically fill new allocations and destroyed
-- allocations with some bit pattern.
-- */
-- #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
--#endif
--
--#ifndef VMA_DEBUG_DETECT_CORRUPTION
-- /**
-- Define this macro to 1 together with non-zero value of VMA_DEBUG_MARGIN to
-- enable writing magic value to the margin after every allocation and
-- validating it, so that memory corruptions (out-of-bounds writes) are detected.
-- */
-- #define VMA_DEBUG_DETECT_CORRUPTION (0)
--#endif
--
--#ifndef VMA_DEBUG_GLOBAL_MUTEX
-- /**
-- Set this to 1 for debugging purposes only, to enable single mutex protecting all
-- entry calls to the library. Can be useful for debugging multithreading issues.
-- */
-- #define VMA_DEBUG_GLOBAL_MUTEX (0)
--#endif
--
--#ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
-- /**
-- Minimum value for VkPhysicalDeviceLimits::bufferImageGranularity.
-- Set to more than 1 for debugging purposes only. Must be power of two.
-- */
-- #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
--#endif
--
--#ifndef VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT
-- /*
-- Set this to 1 to make VMA never exceed VkPhysicalDeviceLimits::maxMemoryAllocationCount
-- and return error instead of leaving up to Vulkan implementation what to do in such cases.
-- */
-- #define VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT (0)
--#endif
--
--#ifndef VMA_SMALL_HEAP_MAX_SIZE
-- /// Maximum size of a memory heap in Vulkan to consider it "small".
-- #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
--#endif
--
--#ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
-- /// Default size of a block allocated as single VkDeviceMemory from a "large" heap.
-- #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
--#endif
--
--/*
--Mapping hysteresis is a logic that launches when vmaMapMemory/vmaUnmapMemory is called
--or a persistently mapped allocation is created and destroyed several times in a row.
--It keeps additional +1 mapping of a device memory block to prevent calling actual
--vkMapMemory/vkUnmapMemory too many times, which may improve performance and help
--tools like RenderDoc.
--*/
--#ifndef VMA_MAPPING_HYSTERESIS_ENABLED
-- #define VMA_MAPPING_HYSTERESIS_ENABLED 1
--#endif
--
--#define VMA_VALIDATE(cond) do { if(!(cond)) { \
-- VMA_ASSERT(0 && "Validation failed: " #cond); \
-- return false; \
-- } } while(false)
--
--/*******************************************************************************
--END OF CONFIGURATION
--*/
--#endif // _VMA_CONFIGURATION
--
--
--static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
--static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
--// Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
--static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
--
--// Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants.
--static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;
--static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;
--static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000;
--static const uint32_t VK_IMAGE_CREATE_DISJOINT_BIT_COPY = 0x00000200;
--static const int32_t VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY = 1000158000;
--static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
--static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
--static const uint32_t VMA_VENDOR_ID_AMD = 4098;
--
--// This one is tricky. Vulkan specification defines this code as available since
--// Vulkan 1.0, but doesn't actually define it in Vulkan SDK earlier than 1.2.131.
--// See pull request #207.
--#define VK_ERROR_UNKNOWN_COPY ((VkResult)-13)
--
--
--#if VMA_STATS_STRING_ENABLED
--// Correspond to values of enum VmaSuballocationType.
--static const char* VMA_SUBALLOCATION_TYPE_NAMES[] =
--{
-- "FREE",
-- "UNKNOWN",
-- "BUFFER",
-- "IMAGE_UNKNOWN",
-- "IMAGE_LINEAR",
-- "IMAGE_OPTIMAL",
--};
--#endif
--
--static VkAllocationCallbacks VmaEmptyAllocationCallbacks =
-- { VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
--
--
--#ifndef _VMA_ENUM_DECLARATIONS
--
--enum VmaSuballocationType
--{
-- VMA_SUBALLOCATION_TYPE_FREE = 0,
-- VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
-- VMA_SUBALLOCATION_TYPE_BUFFER = 2,
-- VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
-- VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
-- VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
-- VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
--};
--
--enum VMA_CACHE_OPERATION
--{
-- VMA_CACHE_FLUSH,
-- VMA_CACHE_INVALIDATE
--};
--
--enum class VmaAllocationRequestType
--{
-- Normal,
-- TLSF,
-- // Used by "Linear" algorithm.
-- UpperAddress,
-- EndOf1st,
-- EndOf2nd,
--};
--
--#endif // _VMA_ENUM_DECLARATIONS
--
--#ifndef _VMA_FORWARD_DECLARATIONS
--// Opaque handle used by allocation algorithms to identify single allocation in any conforming way.
--VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaAllocHandle)
--
--struct VmaMutexLock;
--struct VmaMutexLockRead;
--struct VmaMutexLockWrite;
--
--template<typename T>
--struct AtomicTransactionalIncrement;
--
--template<typename T>
--struct VmaStlAllocator;
--
--template<typename T, typename AllocatorT>
--class VmaVector;
--
--template<typename T, typename AllocatorT, size_t N>
--class VmaSmallVector;
--
--template<typename T>
--class VmaPoolAllocator;
--
--template<typename T>
--struct VmaListItem;
--
--template<typename T>
--class VmaRawList;
--
--template<typename T, typename AllocatorT>
--class VmaList;
--
--template<typename ItemTypeTraits>
--class VmaIntrusiveLinkedList;
--
--// Unused in this version
--#if 0
--template<typename T1, typename T2>
--struct VmaPair;
--template<typename FirstT, typename SecondT>
--struct VmaPairFirstLess;
--
--template<typename KeyT, typename ValueT>
--class VmaMap;
--#endif
--
--#if VMA_STATS_STRING_ENABLED
--class VmaStringBuilder;
--class VmaJsonWriter;
--#endif
--
--class VmaDeviceMemoryBlock;
--
--struct VmaDedicatedAllocationListItemTraits;
--class VmaDedicatedAllocationList;
--
--struct VmaSuballocation;
--struct VmaSuballocationOffsetLess;
--struct VmaSuballocationOffsetGreater;
--struct VmaSuballocationItemSizeLess;
--
--typedef VmaList<VmaSuballocation, VmaStlAllocator<VmaSuballocation>> VmaSuballocationList;
--
--struct VmaAllocationRequest;
--
--class VmaBlockMetadata;
--class VmaBlockMetadata_Linear;
--class VmaBlockMetadata_TLSF;
--
--class VmaBlockVector;
--
--struct VmaPoolListItemTraits;
--
--struct VmaCurrentBudgetData;
--
--class VmaAllocationObjectAllocator;
--
--#endif // _VMA_FORWARD_DECLARATIONS
--
--
--#ifndef _VMA_FUNCTIONS
--
--/*
--Returns number of bits set to 1 in (v).
--
--On specific platforms and compilers you can use instrinsics like:
--
--Visual Studio:
-- return __popcnt(v);
--GCC, Clang:
-- return static_cast<uint32_t>(__builtin_popcount(v));
--
--Define macro VMA_COUNT_BITS_SET to provide your optimized implementation.
--But you need to check in runtime whether user's CPU supports these, as some old processors don't.
--*/
--static inline uint32_t VmaCountBitsSet(uint32_t v)
--{
--#if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20
-- return std::popcount(v);
--#else
-- uint32_t c = v - ((v >> 1) & 0x55555555);
-- c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
-- c = ((c >> 4) + c) & 0x0F0F0F0F;
-- c = ((c >> 8) + c) & 0x00FF00FF;
-- c = ((c >> 16) + c) & 0x0000FFFF;
-- return c;
--#endif
--}
--
--static inline uint8_t VmaBitScanLSB(uint64_t mask)
--{
--#if defined(_MSC_VER) && defined(_WIN64)
-- unsigned long pos;
-- if (_BitScanForward64(&pos, mask))
-- return static_cast<uint8_t>(pos);
-- return UINT8_MAX;
--#elif defined __GNUC__ || defined __clang__
-- return static_cast<uint8_t>(__builtin_ffsll(mask)) - 1U;
--#else
-- uint8_t pos = 0;
-- uint64_t bit = 1;
-- do
-- {
-- if (mask & bit)
-- return pos;
-- bit <<= 1;
-- } while (pos++ < 63);
-- return UINT8_MAX;
--#endif
--}
--
--static inline uint8_t VmaBitScanLSB(uint32_t mask)
--{
--#ifdef _MSC_VER
-- unsigned long pos;
-- if (_BitScanForward(&pos, mask))
-- return static_cast<uint8_t>(pos);
-- return UINT8_MAX;
--#elif defined __GNUC__ || defined __clang__
-- return static_cast<uint8_t>(__builtin_ffs(mask)) - 1U;
--#else
-- uint8_t pos = 0;
-- uint32_t bit = 1;
-- do
-- {
-- if (mask & bit)
-- return pos;
-- bit <<= 1;
-- } while (pos++ < 31);
-- return UINT8_MAX;
--#endif
--}
--
--static inline uint8_t VmaBitScanMSB(uint64_t mask)
--{
--#if defined(_MSC_VER) && defined(_WIN64)
-- unsigned long pos;
-- if (_BitScanReverse64(&pos, mask))
-- return static_cast<uint8_t>(pos);
--#elif defined __GNUC__ || defined __clang__
-- if (mask)
-- return 63 - static_cast<uint8_t>(__builtin_clzll(mask));
--#else
-- uint8_t pos = 63;
-- uint64_t bit = 1ULL << 63;
-- do
-- {
-- if (mask & bit)
-- return pos;
-- bit >>= 1;
-- } while (pos-- > 0);
--#endif
-- return UINT8_MAX;
--}
--
--static inline uint8_t VmaBitScanMSB(uint32_t mask)
--{
--#ifdef _MSC_VER
-- unsigned long pos;
-- if (_BitScanReverse(&pos, mask))
-- return static_cast<uint8_t>(pos);
--#elif defined __GNUC__ || defined __clang__
-- if (mask)
-- return 31 - static_cast<uint8_t>(__builtin_clz(mask));
--#else
-- uint8_t pos = 31;
-- uint32_t bit = 1UL << 31;
-- do
-- {
-- if (mask & bit)
-- return pos;
-- bit >>= 1;
-- } while (pos-- > 0);
--#endif
-- return UINT8_MAX;
--}
--
--/*
--Returns true if given number is a power of two.
--T must be unsigned integer number or signed integer but always nonnegative.
--For 0 returns true.
--*/
--template <typename T>
--inline bool VmaIsPow2(T x)
--{
-- return (x & (x - 1)) == 0;
--}
--
--// Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
--// Use types like uint32_t, uint64_t as T.
--template <typename T>
--static inline T VmaAlignUp(T val, T alignment)
--{
-- VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
-- return (val + alignment - 1) & ~(alignment - 1);
--}
--
--// Aligns given value down to nearest multiply of align value. For example: VmaAlignDown(11, 8) = 8.
--// Use types like uint32_t, uint64_t as T.
--template <typename T>
--static inline T VmaAlignDown(T val, T alignment)
--{
-- VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
-- return val & ~(alignment - 1);
--}
--
--// Division with mathematical rounding to nearest number.
--template <typename T>
--static inline T VmaRoundDiv(T x, T y)
--{
-- return (x + (y / (T)2)) / y;
--}
--
--// Divide by 'y' and round up to nearest integer.
--template <typename T>
--static inline T VmaDivideRoundingUp(T x, T y)
--{
-- return (x + y - (T)1) / y;
--}
--
--// Returns smallest power of 2 greater or equal to v.
--static inline uint32_t VmaNextPow2(uint32_t v)
--{
-- v--;
-- v |= v >> 1;
-- v |= v >> 2;
-- v |= v >> 4;
-- v |= v >> 8;
-- v |= v >> 16;
-- v++;
-- return v;
--}
--
--static inline uint64_t VmaNextPow2(uint64_t v)
--{
-- v--;
-- v |= v >> 1;
-- v |= v >> 2;
-- v |= v >> 4;
-- v |= v >> 8;
-- v |= v >> 16;
-- v |= v >> 32;
-- v++;
-- return v;
--}
--
--// Returns largest power of 2 less or equal to v.
--static inline uint32_t VmaPrevPow2(uint32_t v)
--{
-- v |= v >> 1;
-- v |= v >> 2;
-- v |= v >> 4;
-- v |= v >> 8;
-- v |= v >> 16;
-- v = v ^ (v >> 1);
-- return v;
--}
--
--static inline uint64_t VmaPrevPow2(uint64_t v)
--{
-- v |= v >> 1;
-- v |= v >> 2;
-- v |= v >> 4;
-- v |= v >> 8;
-- v |= v >> 16;
-- v |= v >> 32;
-- v = v ^ (v >> 1);
-- return v;
--}
--
--static inline bool VmaStrIsEmpty(const char* pStr)
--{
-- return pStr == VMA_NULL || *pStr == '\0';
--}
--
--/*
--Returns true if two memory blocks occupy overlapping pages.
--ResourceA must be in less memory offset than ResourceB.
--
--Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
--chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
--*/
--static inline bool VmaBlocksOnSamePage(
-- VkDeviceSize resourceAOffset,
-- VkDeviceSize resourceASize,
-- VkDeviceSize resourceBOffset,
-- VkDeviceSize pageSize)
--{
-- VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
-- VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
-- VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
-- VkDeviceSize resourceBStart = resourceBOffset;
-- VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
-- return resourceAEndPage == resourceBStartPage;
--}
--
--/*
--Returns true if given suballocation types could conflict and must respect
--VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
--or linear image and another one is optimal image. If type is unknown, behave
--conservatively.
--*/
--static inline bool VmaIsBufferImageGranularityConflict(
-- VmaSuballocationType suballocType1,
-- VmaSuballocationType suballocType2)
--{
-- if (suballocType1 > suballocType2)
-- {
-- VMA_SWAP(suballocType1, suballocType2);
-- }
--
-- switch (suballocType1)
-- {
-- case VMA_SUBALLOCATION_TYPE_FREE:
-- return false;
-- case VMA_SUBALLOCATION_TYPE_UNKNOWN:
-- return true;
-- case VMA_SUBALLOCATION_TYPE_BUFFER:
-- return
-- suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
-- suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
-- case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
-- return
-- suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
-- suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
-- suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
-- case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
-- return
-- suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
-- case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
-- return false;
-- default:
-- VMA_ASSERT(0);
-- return true;
-- }
--}
--
--static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
--{
--#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
-- uint32_t* pDst = (uint32_t*)((char*)pData + offset);
-- const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
-- for (size_t i = 0; i < numberCount; ++i, ++pDst)
-- {
-- *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
-- }
--#else
-- // no-op
--#endif
--}
--
--static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
--{
--#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
-- const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
-- const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
-- for (size_t i = 0; i < numberCount; ++i, ++pSrc)
-- {
-- if (*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
-- {
-- return false;
-- }
-- }
--#endif
-- return true;
--}
--
--/*
--Fills structure with parameters of an example buffer to be used for transfers
--during GPU memory defragmentation.
--*/
--static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
--{
-- memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
-- outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
-- outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
-- outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
--}
--
--
--/*
--Performs binary search and returns iterator to first element that is greater or
--equal to (key), according to comparison (cmp).
--
--Cmp should return true if first argument is less than second argument.
--
--Returned value is the found element, if present in the collection or place where
--new element with value (key) should be inserted.
--*/
--template <typename CmpLess, typename IterT, typename KeyT>
--static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT& key, const CmpLess& cmp)
--{
-- size_t down = 0, up = size_t(end - beg);
-- while (down < up)
-- {
-- const size_t mid = down + (up - down) / 2; // Overflow-safe midpoint calculation
-- if (cmp(*(beg + mid), key))
-- {
-- down = mid + 1;
-- }
-- else
-- {
-- up = mid;
-- }
-- }
-- return beg + down;
--}
--
--template<typename CmpLess, typename IterT, typename KeyT>
--IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
--{
-- IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
-- beg, end, value, cmp);
-- if (it == end ||
-- (!cmp(*it, value) && !cmp(value, *it)))
-- {
-- return it;
-- }
-- return end;
--}
--
--/*
--Returns true if all pointers in the array are not-null and unique.
--Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
--T must be pointer type, e.g. VmaAllocation, VmaPool.
--*/
--template<typename T>
--static bool VmaValidatePointerArray(uint32_t count, const T* arr)
--{
-- for (uint32_t i = 0; i < count; ++i)
-- {
-- const T iPtr = arr[i];
-- if (iPtr == VMA_NULL)
-- {
-- return false;
-- }
-- for (uint32_t j = i + 1; j < count; ++j)
-- {
-- if (iPtr == arr[j])
-- {
-- return false;
-- }
-- }
-- }
-- return true;
--}
--
--template<typename MainT, typename NewT>
--static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct)
--{
-- newStruct->pNext = mainStruct->pNext;
-- mainStruct->pNext = newStruct;
--}
--
--// This is the main algorithm that guides the selection of a memory type best for an allocation -
--// converts usage to required/preferred/not preferred flags.
--static bool FindMemoryPreferences(
-- bool isIntegratedGPU,
-- const VmaAllocationCreateInfo& allocCreateInfo,
-- VkFlags bufImgUsage, // VkBufferCreateInfo::usage or VkImageCreateInfo::usage. UINT32_MAX if unknown.
-- VkMemoryPropertyFlags& outRequiredFlags,
-- VkMemoryPropertyFlags& outPreferredFlags,
-- VkMemoryPropertyFlags& outNotPreferredFlags)
--{
-- outRequiredFlags = allocCreateInfo.requiredFlags;
-- outPreferredFlags = allocCreateInfo.preferredFlags;
-- outNotPreferredFlags = 0;
--
-- switch(allocCreateInfo.usage)
-- {
-- case VMA_MEMORY_USAGE_UNKNOWN:
-- break;
-- case VMA_MEMORY_USAGE_GPU_ONLY:
-- if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
-- {
-- outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
-- }
-- break;
-- case VMA_MEMORY_USAGE_CPU_ONLY:
-- outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
-- break;
-- case VMA_MEMORY_USAGE_CPU_TO_GPU:
-- outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
-- if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
-- {
-- outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
-- }
-- break;
-- case VMA_MEMORY_USAGE_GPU_TO_CPU:
-- outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
-- outPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
-- break;
-- case VMA_MEMORY_USAGE_CPU_COPY:
-- outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
-- break;
-- case VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED:
-- outRequiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
-- break;
-- case VMA_MEMORY_USAGE_AUTO:
-- case VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE:
-- case VMA_MEMORY_USAGE_AUTO_PREFER_HOST:
-- {
-- if(bufImgUsage == UINT32_MAX)
-- {
-- VMA_ASSERT(0 && "VMA_MEMORY_USAGE_AUTO* values can only be used with functions like vmaCreateBuffer, vmaCreateImage so that the details of the created resource are known.");
-- return false;
-- }
-- // This relies on values of VK_IMAGE_USAGE_TRANSFER* being the same VK_BUFFER_IMAGE_TRANSFER*.
-- const bool deviceAccess = (bufImgUsage & ~(VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT)) != 0;
-- const bool hostAccessSequentialWrite = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT) != 0;
-- const bool hostAccessRandom = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) != 0;
-- const bool hostAccessAllowTransferInstead = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) != 0;
-- const bool preferDevice = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE;
-- const bool preferHost = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST;
--
-- // CPU random access - e.g. a buffer written to or transferred from GPU to read back on CPU.
-- if(hostAccessRandom)
-- {
-- if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost)
-- {
-- // Nice if it will end up in HOST_VISIBLE, but more importantly prefer DEVICE_LOCAL.
-- // Omitting HOST_VISIBLE here is intentional.
-- // In case there is DEVICE_LOCAL | HOST_VISIBLE | HOST_CACHED, it will pick that one.
-- // Otherwise, this will give same weight to DEVICE_LOCAL as HOST_VISIBLE | HOST_CACHED and select the former if occurs first on the list.
-- outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
-- }
-- else
-- {
-- // Always CPU memory, cached.
-- outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
-- }
-- }
-- // CPU sequential write - may be CPU or host-visible GPU memory, uncached and write-combined.
-- else if(hostAccessSequentialWrite)
-- {
-- // Want uncached and write-combined.
-- outNotPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
--
-- if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost)
-- {
-- outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
-- }
-- else
-- {
-- outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
-- // Direct GPU access, CPU sequential write (e.g. a dynamic uniform buffer updated every frame)
-- if(deviceAccess)
-- {
-- // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose GPU memory.
-- if(preferHost)
-- outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
-- else
-- outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
-- }
-- // GPU no direct access, CPU sequential write (e.g. an upload buffer to be transferred to the GPU)
-- else
-- {
-- // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose CPU memory.
-- if(preferDevice)
-- outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
-- else
-- outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
-- }
-- }
-- }
-- // No CPU access
-- else
-- {
-- // if(deviceAccess)
-- //
-- // GPU access, no CPU access (e.g. a color attachment image) - prefer GPU memory,
-- // unless there is a clear preference from the user not to do so.
-- //
-- // else:
-- //
-- // No direct GPU access, no CPU access, just transfers.
-- // It may be staging copy intended for e.g. preserving image for next frame (then better GPU memory) or
-- // a "swap file" copy to free some GPU memory (then better CPU memory).
-- // Up to the user to decide. If no preferece, assume the former and choose GPU memory.
--
-- if(preferHost)
-- outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
-- else
-- outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
-- }
-- break;
-- }
-- default:
-- VMA_ASSERT(0);
-- }
--
-- // Avoid DEVICE_COHERENT unless explicitly requested.
-- if(((allocCreateInfo.requiredFlags | allocCreateInfo.preferredFlags) &
-- (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0)
-- {
-- outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY;
-- }
--
-- return true;
--}
--
--////////////////////////////////////////////////////////////////////////////////
--// Memory allocation
--
--static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
--{
-- void* result = VMA_NULL;
-- if ((pAllocationCallbacks != VMA_NULL) &&
-- (pAllocationCallbacks->pfnAllocation != VMA_NULL))
-- {
-- result = (*pAllocationCallbacks->pfnAllocation)(
-- pAllocationCallbacks->pUserData,
-- size,
-- alignment,
-- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
-- }
-- else
-- {
-- result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
-- }
-- VMA_ASSERT(result != VMA_NULL && "CPU memory allocation failed.");
-- return result;
--}
--
--static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
--{
-- if ((pAllocationCallbacks != VMA_NULL) &&
-- (pAllocationCallbacks->pfnFree != VMA_NULL))
-- {
-- (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
-- }
-- else
-- {
-- VMA_SYSTEM_ALIGNED_FREE(ptr);
-- }
--}
--
--template<typename T>
--static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
--{
-- return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
--}
--
--template<typename T>
--static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
--{
-- return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
--}
--
--#define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
--
--#define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
--
--template<typename T>
--static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
--{
-- ptr->~T();
-- VmaFree(pAllocationCallbacks, ptr);
--}
--
--template<typename T>
--static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
--{
-- if (ptr != VMA_NULL)
-- {
-- for (size_t i = count; i--; )
-- {
-- ptr[i].~T();
-- }
-- VmaFree(pAllocationCallbacks, ptr);
-- }
--}
--
--static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
--{
-- if (srcStr != VMA_NULL)
-- {
-- const size_t len = strlen(srcStr);
-- char* const result = vma_new_array(allocs, char, len + 1);
-- memcpy(result, srcStr, len + 1);
-- return result;
-- }
-- return VMA_NULL;
--}
--
--#if VMA_STATS_STRING_ENABLED
--static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr, size_t strLen)
--{
-- if (srcStr != VMA_NULL)
-- {
-- char* const result = vma_new_array(allocs, char, strLen + 1);
-- memcpy(result, srcStr, strLen);
-- result[strLen] = '\0';
-- return result;
-- }
-- return VMA_NULL;
--}
--#endif // VMA_STATS_STRING_ENABLED
--
--static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
--{
-- if (str != VMA_NULL)
-- {
-- const size_t len = strlen(str);
-- vma_delete_array(allocs, str, len + 1);
-- }
--}
--
--template<typename CmpLess, typename VectorT>
--size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
--{
-- const size_t indexToInsert = VmaBinaryFindFirstNotLess(
-- vector.data(),
-- vector.data() + vector.size(),
-- value,
-- CmpLess()) - vector.data();
-- VmaVectorInsert(vector, indexToInsert, value);
-- return indexToInsert;
--}
--
--template<typename CmpLess, typename VectorT>
--bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
--{
-- CmpLess comparator;
-- typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
-- vector.begin(),
-- vector.end(),
-- value,
-- comparator);
-- if ((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
-- {
-- size_t indexToRemove = it - vector.begin();
-- VmaVectorRemove(vector, indexToRemove);
-- return true;
-- }
-- return false;
--}
--#endif // _VMA_FUNCTIONS
--
--#ifndef _VMA_STATISTICS_FUNCTIONS
--
--static void VmaClearStatistics(VmaStatistics& outStats)
--{
-- outStats.blockCount = 0;
-- outStats.allocationCount = 0;
-- outStats.blockBytes = 0;
-- outStats.allocationBytes = 0;
--}
--
--static void VmaAddStatistics(VmaStatistics& inoutStats, const VmaStatistics& src)
--{
-- inoutStats.blockCount += src.blockCount;
-- inoutStats.allocationCount += src.allocationCount;
-- inoutStats.blockBytes += src.blockBytes;
-- inoutStats.allocationBytes += src.allocationBytes;
--}
--
--static void VmaClearDetailedStatistics(VmaDetailedStatistics& outStats)
--{
-- VmaClearStatistics(outStats.statistics);
-- outStats.unusedRangeCount = 0;
-- outStats.allocationSizeMin = VK_WHOLE_SIZE;
-- outStats.allocationSizeMax = 0;
-- outStats.unusedRangeSizeMin = VK_WHOLE_SIZE;
-- outStats.unusedRangeSizeMax = 0;
--}
--
--static void VmaAddDetailedStatisticsAllocation(VmaDetailedStatistics& inoutStats, VkDeviceSize size)
--{
-- inoutStats.statistics.allocationCount++;
-- inoutStats.statistics.allocationBytes += size;
-- inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, size);
-- inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, size);
--}
--
--static void VmaAddDetailedStatisticsUnusedRange(VmaDetailedStatistics& inoutStats, VkDeviceSize size)
--{
-- inoutStats.unusedRangeCount++;
-- inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, size);
-- inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, size);
--}
--
--static void VmaAddDetailedStatistics(VmaDetailedStatistics& inoutStats, const VmaDetailedStatistics& src)
--{
-- VmaAddStatistics(inoutStats.statistics, src.statistics);
-- inoutStats.unusedRangeCount += src.unusedRangeCount;
-- inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, src.allocationSizeMin);
-- inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, src.allocationSizeMax);
-- inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, src.unusedRangeSizeMin);
-- inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, src.unusedRangeSizeMax);
--}
--
--#endif // _VMA_STATISTICS_FUNCTIONS
--
--#ifndef _VMA_MUTEX_LOCK
--// Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
--struct VmaMutexLock
--{
-- VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLock)
--public:
-- VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
-- m_pMutex(useMutex ? &mutex : VMA_NULL)
-- {
-- if (m_pMutex) { m_pMutex->Lock(); }
-- }
-- ~VmaMutexLock() { if (m_pMutex) { m_pMutex->Unlock(); } }
--
--private:
-- VMA_MUTEX* m_pMutex;
--};
--
--// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
--struct VmaMutexLockRead
--{
-- VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLockRead)
--public:
-- VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
-- m_pMutex(useMutex ? &mutex : VMA_NULL)
-- {
-- if (m_pMutex) { m_pMutex->LockRead(); }
-- }
-- ~VmaMutexLockRead() { if (m_pMutex) { m_pMutex->UnlockRead(); } }
--
--private:
-- VMA_RW_MUTEX* m_pMutex;
--};
--
--// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
--struct VmaMutexLockWrite
--{
-- VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLockWrite)
--public:
-- VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex)
-- : m_pMutex(useMutex ? &mutex : VMA_NULL)
-- {
-- if (m_pMutex) { m_pMutex->LockWrite(); }
-- }
-- ~VmaMutexLockWrite() { if (m_pMutex) { m_pMutex->UnlockWrite(); } }
--
--private:
-- VMA_RW_MUTEX* m_pMutex;
--};
--
--#if VMA_DEBUG_GLOBAL_MUTEX
-- static VMA_MUTEX gDebugGlobalMutex;
-- #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
--#else
-- #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
--#endif
--#endif // _VMA_MUTEX_LOCK
--
--#ifndef _VMA_ATOMIC_TRANSACTIONAL_INCREMENT
--// An object that increments given atomic but decrements it back in the destructor unless Commit() is called.
--template<typename AtomicT>
--struct AtomicTransactionalIncrement
--{
--public:
-- using T = decltype(AtomicT().load());
--
-- ~AtomicTransactionalIncrement()
-- {
-- if(m_Atomic)
-- --(*m_Atomic);
-- }
--
-- void Commit() { m_Atomic = nullptr; }
-- T Increment(AtomicT* atomic)
-- {
-- m_Atomic = atomic;
-- return m_Atomic->fetch_add(1);
-- }
--
--private:
-- AtomicT* m_Atomic = nullptr;
--};
--#endif // _VMA_ATOMIC_TRANSACTIONAL_INCREMENT
--
--#ifndef _VMA_STL_ALLOCATOR
--// STL-compatible allocator.
--template<typename T>
--struct VmaStlAllocator
--{
-- const VkAllocationCallbacks* const m_pCallbacks;
-- typedef T value_type;
--
-- VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) {}
-- template<typename U>
-- VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) {}
-- VmaStlAllocator(const VmaStlAllocator&) = default;
-- VmaStlAllocator& operator=(const VmaStlAllocator&) = delete;
--
-- T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
-- void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
--
-- template<typename U>
-- bool operator==(const VmaStlAllocator<U>& rhs) const
-- {
-- return m_pCallbacks == rhs.m_pCallbacks;
-- }
-- template<typename U>
-- bool operator!=(const VmaStlAllocator<U>& rhs) const
-- {
-- return m_pCallbacks != rhs.m_pCallbacks;
-- }
--};
--#endif // _VMA_STL_ALLOCATOR
--
--#ifndef _VMA_VECTOR
--/* Class with interface compatible with subset of std::vector.
--T must be POD because constructors and destructors are not called and memcpy is
--used for these objects. */
--template<typename T, typename AllocatorT>
--class VmaVector
--{
--public:
-- typedef T value_type;
-- typedef T* iterator;
-- typedef const T* const_iterator;
--
-- VmaVector(const AllocatorT& allocator);
-- VmaVector(size_t count, const AllocatorT& allocator);
-- // This version of the constructor is here for compatibility with pre-C++14 std::vector.
-- // value is unused.
-- VmaVector(size_t count, const T& value, const AllocatorT& allocator) : VmaVector(count, allocator) {}
-- VmaVector(const VmaVector<T, AllocatorT>& src);
-- VmaVector& operator=(const VmaVector& rhs);
-- ~VmaVector() { VmaFree(m_Allocator.m_pCallbacks, m_pArray); }
--
-- bool empty() const { return m_Count == 0; }
-- size_t size() const { return m_Count; }
-- T* data() { return m_pArray; }
-- T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; }
-- T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; }
-- const T* data() const { return m_pArray; }
-- const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; }
-- const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; }
--
-- iterator begin() { return m_pArray; }
-- iterator end() { return m_pArray + m_Count; }
-- const_iterator cbegin() const { return m_pArray; }
-- const_iterator cend() const { return m_pArray + m_Count; }
-- const_iterator begin() const { return cbegin(); }
-- const_iterator end() const { return cend(); }
--
-- void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); }
-- void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); }
-- void push_front(const T& src) { insert(0, src); }
--
-- void push_back(const T& src);
-- void reserve(size_t newCapacity, bool freeMemory = false);
-- void resize(size_t newCount);
-- void clear() { resize(0); }
-- void shrink_to_fit();
-- void insert(size_t index, const T& src);
-- void remove(size_t index);
--
-- T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; }
-- const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; }
--
--private:
-- AllocatorT m_Allocator;
-- T* m_pArray;
-- size_t m_Count;
-- size_t m_Capacity;
--};
--
--#ifndef _VMA_VECTOR_FUNCTIONS
--template<typename T, typename AllocatorT>
--VmaVector<T, AllocatorT>::VmaVector(const AllocatorT& allocator)
-- : m_Allocator(allocator),
-- m_pArray(VMA_NULL),
-- m_Count(0),
-- m_Capacity(0) {}
--
--template<typename T, typename AllocatorT>
--VmaVector<T, AllocatorT>::VmaVector(size_t count, const AllocatorT& allocator)
-- : m_Allocator(allocator),
-- m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
-- m_Count(count),
-- m_Capacity(count) {}
--
--template<typename T, typename AllocatorT>
--VmaVector<T, AllocatorT>::VmaVector(const VmaVector& src)
-- : m_Allocator(src.m_Allocator),
-- m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
-- m_Count(src.m_Count),
-- m_Capacity(src.m_Count)
--{
-- if (m_Count != 0)
-- {
-- memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
-- }
--}
--
--template<typename T, typename AllocatorT>
--VmaVector<T, AllocatorT>& VmaVector<T, AllocatorT>::operator=(const VmaVector& rhs)
--{
-- if (&rhs != this)
-- {
-- resize(rhs.m_Count);
-- if (m_Count != 0)
-- {
-- memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
-- }
-- }
-- return *this;
--}
--
--template<typename T, typename AllocatorT>
--void VmaVector<T, AllocatorT>::push_back(const T& src)
--{
-- const size_t newIndex = size();
-- resize(newIndex + 1);
-- m_pArray[newIndex] = src;
--}
--
--template<typename T, typename AllocatorT>
--void VmaVector<T, AllocatorT>::reserve(size_t newCapacity, bool freeMemory)
--{
-- newCapacity = VMA_MAX(newCapacity, m_Count);
--
-- if ((newCapacity < m_Capacity) && !freeMemory)
-- {
-- newCapacity = m_Capacity;
-- }
--
-- if (newCapacity != m_Capacity)
-- {
-- T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
-- if (m_Count != 0)
-- {
-- memcpy(newArray, m_pArray, m_Count * sizeof(T));
-- }
-- VmaFree(m_Allocator.m_pCallbacks, m_pArray);
-- m_Capacity = newCapacity;
-- m_pArray = newArray;
-- }
--}
--
--template<typename T, typename AllocatorT>
--void VmaVector<T, AllocatorT>::resize(size_t newCount)
--{
-- size_t newCapacity = m_Capacity;
-- if (newCount > m_Capacity)
-- {
-- newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
-- }
--
-- if (newCapacity != m_Capacity)
-- {
-- T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
-- const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
-- if (elementsToCopy != 0)
-- {
-- memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
-- }
-- VmaFree(m_Allocator.m_pCallbacks, m_pArray);
-- m_Capacity = newCapacity;
-- m_pArray = newArray;
-- }
--
-- m_Count = newCount;
--}
--
--template<typename T, typename AllocatorT>
--void VmaVector<T, AllocatorT>::shrink_to_fit()
--{
-- if (m_Capacity > m_Count)
-- {
-- T* newArray = VMA_NULL;
-- if (m_Count > 0)
-- {
-- newArray = VmaAllocateArray<T>(m_Allocator.m_pCallbacks, m_Count);
-- memcpy(newArray, m_pArray, m_Count * sizeof(T));
-- }
-- VmaFree(m_Allocator.m_pCallbacks, m_pArray);
-- m_Capacity = m_Count;
-- m_pArray = newArray;
-- }
--}
--
--template<typename T, typename AllocatorT>
--void VmaVector<T, AllocatorT>::insert(size_t index, const T& src)
--{
-- VMA_HEAVY_ASSERT(index <= m_Count);
-- const size_t oldCount = size();
-- resize(oldCount + 1);
-- if (index < oldCount)
-- {
-- memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
-- }
-- m_pArray[index] = src;
--}
--
--template<typename T, typename AllocatorT>
--void VmaVector<T, AllocatorT>::remove(size_t index)
--{
-- VMA_HEAVY_ASSERT(index < m_Count);
-- const size_t oldCount = size();
-- if (index < oldCount - 1)
-- {
-- memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
-- }
-- resize(oldCount - 1);
--}
--#endif // _VMA_VECTOR_FUNCTIONS
--
--template<typename T, typename allocatorT>
--static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
--{
-- vec.insert(index, item);
--}
--
--template<typename T, typename allocatorT>
--static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
--{
-- vec.remove(index);
--}
--#endif // _VMA_VECTOR
--
--#ifndef _VMA_SMALL_VECTOR
--/*
--This is a vector (a variable-sized array), optimized for the case when the array is small.
--
--It contains some number of elements in-place, which allows it to avoid heap allocation
--when the actual number of elements is below that threshold. This allows normal "small"
--cases to be fast without losing generality for large inputs.
--*/
--template<typename T, typename AllocatorT, size_t N>
--class VmaSmallVector
--{
--public:
-- typedef T value_type;
-- typedef T* iterator;
--
-- VmaSmallVector(const AllocatorT& allocator);
-- VmaSmallVector(size_t count, const AllocatorT& allocator);
-- template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
-- VmaSmallVector(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>&) = delete;
-- template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
-- VmaSmallVector<T, AllocatorT, N>& operator=(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>&) = delete;
-- ~VmaSmallVector() = default;
--
-- bool empty() const { return m_Count == 0; }
-- size_t size() const { return m_Count; }
-- T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
-- T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; }
-- T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; }
-- const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
-- const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; }
-- const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; }
--
-- iterator begin() { return data(); }
-- iterator end() { return data() + m_Count; }
--
-- void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); }
-- void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); }
-- void push_front(const T& src) { insert(0, src); }
--
-- void push_back(const T& src);
-- void resize(size_t newCount, bool freeMemory = false);
-- void clear(bool freeMemory = false);
-- void insert(size_t index, const T& src);
-- void remove(size_t index);
--
-- T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; }
-- const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; }
--
--private:
-- size_t m_Count;
-- T m_StaticArray[N]; // Used when m_Size <= N
-- VmaVector<T, AllocatorT> m_DynamicArray; // Used when m_Size > N
--};
--
--#ifndef _VMA_SMALL_VECTOR_FUNCTIONS
--template<typename T, typename AllocatorT, size_t N>
--VmaSmallVector<T, AllocatorT, N>::VmaSmallVector(const AllocatorT& allocator)
-- : m_Count(0),
-- m_DynamicArray(allocator) {}
--
--template<typename T, typename AllocatorT, size_t N>
--VmaSmallVector<T, AllocatorT, N>::VmaSmallVector(size_t count, const AllocatorT& allocator)
-- : m_Count(count),
-- m_DynamicArray(count > N ? count : 0, allocator) {}
--
--template<typename T, typename AllocatorT, size_t N>
--void VmaSmallVector<T, AllocatorT, N>::push_back(const T& src)
--{
-- const size_t newIndex = size();
-- resize(newIndex + 1);
-- data()[newIndex] = src;
--}
--
--template<typename T, typename AllocatorT, size_t N>
--void VmaSmallVector<T, AllocatorT, N>::resize(size_t newCount, bool freeMemory)
--{
-- if (newCount > N && m_Count > N)
-- {
-- // Any direction, staying in m_DynamicArray
-- m_DynamicArray.resize(newCount);
-- if (freeMemory)
-- {
-- m_DynamicArray.shrink_to_fit();
-- }
-- }
-- else if (newCount > N && m_Count <= N)
-- {
-- // Growing, moving from m_StaticArray to m_DynamicArray
-- m_DynamicArray.resize(newCount);
-- if (m_Count > 0)
-- {
-- memcpy(m_DynamicArray.data(), m_StaticArray, m_Count * sizeof(T));
-- }
-- }
-- else if (newCount <= N && m_Count > N)
-- {
-- // Shrinking, moving from m_DynamicArray to m_StaticArray
-- if (newCount > 0)
-- {
-- memcpy(m_StaticArray, m_DynamicArray.data(), newCount * sizeof(T));
-- }
-- m_DynamicArray.resize(0);
-- if (freeMemory)
-- {
-- m_DynamicArray.shrink_to_fit();
-- }
-- }
-- else
-- {
-- // Any direction, staying in m_StaticArray - nothing to do here
-- }
-- m_Count = newCount;
--}
--
--template<typename T, typename AllocatorT, size_t N>
--void VmaSmallVector<T, AllocatorT, N>::clear(bool freeMemory)
--{
-- m_DynamicArray.clear();
-- if (freeMemory)
-- {
-- m_DynamicArray.shrink_to_fit();
-- }
-- m_Count = 0;
--}
--
--template<typename T, typename AllocatorT, size_t N>
--void VmaSmallVector<T, AllocatorT, N>::insert(size_t index, const T& src)
--{
-- VMA_HEAVY_ASSERT(index <= m_Count);
-- const size_t oldCount = size();
-- resize(oldCount + 1);
-- T* const dataPtr = data();
-- if (index < oldCount)
-- {
-- // I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray.
-- memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) * sizeof(T));
-- }
-- dataPtr[index] = src;
--}
--
--template<typename T, typename AllocatorT, size_t N>
--void VmaSmallVector<T, AllocatorT, N>::remove(size_t index)
--{
-- VMA_HEAVY_ASSERT(index < m_Count);
-- const size_t oldCount = size();
-- if (index < oldCount - 1)
-- {
-- // I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray.
-- T* const dataPtr = data();
-- memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T));
-- }
-- resize(oldCount - 1);
--}
--#endif // _VMA_SMALL_VECTOR_FUNCTIONS
--#endif // _VMA_SMALL_VECTOR
--
--#ifndef _VMA_POOL_ALLOCATOR
--/*
--Allocator for objects of type T using a list of arrays (pools) to speed up
--allocation. Number of elements that can be allocated is not bounded because
--allocator can create multiple blocks.
--*/
--template<typename T>
--class VmaPoolAllocator
--{
-- VMA_CLASS_NO_COPY_NO_MOVE(VmaPoolAllocator)
--public:
-- VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
-- ~VmaPoolAllocator();
-- template<typename... Types> T* Alloc(Types&&... args);
-- void Free(T* ptr);
--
--private:
-- union Item
-- {
-- uint32_t NextFreeIndex;
-- alignas(T) char Value[sizeof(T)];
-- };
-- struct ItemBlock
-- {
-- Item* pItems;
-- uint32_t Capacity;
-- uint32_t FirstFreeIndex;
-- };
--
-- const VkAllocationCallbacks* m_pAllocationCallbacks;
-- const uint32_t m_FirstBlockCapacity;
-- VmaVector<ItemBlock, VmaStlAllocator<ItemBlock>> m_ItemBlocks;
--
-- ItemBlock& CreateNewBlock();
--};
--
--#ifndef _VMA_POOL_ALLOCATOR_FUNCTIONS
--template<typename T>
--VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity)
-- : m_pAllocationCallbacks(pAllocationCallbacks),
-- m_FirstBlockCapacity(firstBlockCapacity),
-- m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
--{
-- VMA_ASSERT(m_FirstBlockCapacity > 1);
--}
--
--template<typename T>
--VmaPoolAllocator<T>::~VmaPoolAllocator()
--{
-- for (size_t i = m_ItemBlocks.size(); i--;)
-- vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
-- m_ItemBlocks.clear();
--}
--
--template<typename T>
--template<typename... Types> T* VmaPoolAllocator<T>::Alloc(Types&&... args)
--{
-- for (size_t i = m_ItemBlocks.size(); i--; )
-- {
-- ItemBlock& block = m_ItemBlocks[i];
-- // This block has some free items: Use first one.
-- if (block.FirstFreeIndex != UINT32_MAX)
-- {
-- Item* const pItem = &block.pItems[block.FirstFreeIndex];
-- block.FirstFreeIndex = pItem->NextFreeIndex;
-- T* result = (T*)&pItem->Value;
-- new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
-- return result;
-- }
-- }
--
-- // No block has free item: Create new one and use it.
-- ItemBlock& newBlock = CreateNewBlock();
-- Item* const pItem = &newBlock.pItems[0];
-- newBlock.FirstFreeIndex = pItem->NextFreeIndex;
-- T* result = (T*)&pItem->Value;
-- new(result) T(std::forward<Types>(args)...); // Explicit constructor call.
-- return result;
--}
--
--template<typename T>
--void VmaPoolAllocator<T>::Free(T* ptr)
--{
-- // Search all memory blocks to find ptr.
-- for (size_t i = m_ItemBlocks.size(); i--; )
-- {
-- ItemBlock& block = m_ItemBlocks[i];
--
-- // Casting to union.
-- Item* pItemPtr;
-- memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
--
-- // Check if pItemPtr is in address range of this block.
-- if ((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
-- {
-- ptr->~T(); // Explicit destructor call.
-- const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
-- pItemPtr->NextFreeIndex = block.FirstFreeIndex;
-- block.FirstFreeIndex = index;
-- return;
-- }
-- }
-- VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
--}
--
--template<typename T>
--typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
--{
-- const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
-- m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
--
-- const ItemBlock newBlock =
-- {
-- vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
-- newBlockCapacity,
-- 0
-- };
--
-- m_ItemBlocks.push_back(newBlock);
--
-- // Setup singly-linked list of all free items in this block.
-- for (uint32_t i = 0; i < newBlockCapacity - 1; ++i)
-- newBlock.pItems[i].NextFreeIndex = i + 1;
-- newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
-- return m_ItemBlocks.back();
--}
--#endif // _VMA_POOL_ALLOCATOR_FUNCTIONS
--#endif // _VMA_POOL_ALLOCATOR
--
--#ifndef _VMA_RAW_LIST
--template<typename T>
--struct VmaListItem
--{
-- VmaListItem* pPrev;
-- VmaListItem* pNext;
-- T Value;
--};
--
--// Doubly linked list.
--template<typename T>
--class VmaRawList
--{
-- VMA_CLASS_NO_COPY_NO_MOVE(VmaRawList)
--public:
-- typedef VmaListItem<T> ItemType;
--
-- VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
-- // Intentionally not calling Clear, because that would be unnecessary
-- // computations to return all items to m_ItemAllocator as free.
-- ~VmaRawList() = default;
--
-- size_t GetCount() const { return m_Count; }
-- bool IsEmpty() const { return m_Count == 0; }
--
-- ItemType* Front() { return m_pFront; }
-- ItemType* Back() { return m_pBack; }
-- const ItemType* Front() const { return m_pFront; }
-- const ItemType* Back() const { return m_pBack; }
--
-- ItemType* PushFront();
-- ItemType* PushBack();
-- ItemType* PushFront(const T& value);
-- ItemType* PushBack(const T& value);
-- void PopFront();
-- void PopBack();
--
-- // Item can be null - it means PushBack.
-- ItemType* InsertBefore(ItemType* pItem);
-- // Item can be null - it means PushFront.
-- ItemType* InsertAfter(ItemType* pItem);
-- ItemType* InsertBefore(ItemType* pItem, const T& value);
-- ItemType* InsertAfter(ItemType* pItem, const T& value);
--
-- void Clear();
-- void Remove(ItemType* pItem);
--
--private:
-- const VkAllocationCallbacks* const m_pAllocationCallbacks;
-- VmaPoolAllocator<ItemType> m_ItemAllocator;
-- ItemType* m_pFront;
-- ItemType* m_pBack;
-- size_t m_Count;
--};
--
--#ifndef _VMA_RAW_LIST_FUNCTIONS
--template<typename T>
--VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks)
-- : m_pAllocationCallbacks(pAllocationCallbacks),
-- m_ItemAllocator(pAllocationCallbacks, 128),
-- m_pFront(VMA_NULL),
-- m_pBack(VMA_NULL),
-- m_Count(0) {}
--
--template<typename T>
--VmaListItem<T>* VmaRawList<T>::PushFront()
--{
-- ItemType* const pNewItem = m_ItemAllocator.Alloc();
-- pNewItem->pPrev = VMA_NULL;
-- if (IsEmpty())
-- {
-- pNewItem->pNext = VMA_NULL;
-- m_pFront = pNewItem;
-- m_pBack = pNewItem;
-- m_Count = 1;
-- }
-- else
-- {
-- pNewItem->pNext = m_pFront;
-- m_pFront->pPrev = pNewItem;
-- m_pFront = pNewItem;
-- ++m_Count;
-- }
-- return pNewItem;
--}
--
--template<typename T>
--VmaListItem<T>* VmaRawList<T>::PushBack()
--{
-- ItemType* const pNewItem = m_ItemAllocator.Alloc();
-- pNewItem->pNext = VMA_NULL;
-- if(IsEmpty())
-- {
-- pNewItem->pPrev = VMA_NULL;
-- m_pFront = pNewItem;
-- m_pBack = pNewItem;
-- m_Count = 1;
-- }
-- else
-- {
-- pNewItem->pPrev = m_pBack;
-- m_pBack->pNext = pNewItem;
-- m_pBack = pNewItem;
-- ++m_Count;
-- }
-- return pNewItem;
--}
--
--template<typename T>
--VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
--{
-- ItemType* const pNewItem = PushFront();
-- pNewItem->Value = value;
-- return pNewItem;
--}
--
--template<typename T>
--VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
--{
-- ItemType* const pNewItem = PushBack();
-- pNewItem->Value = value;
-- return pNewItem;
--}
--
--template<typename T>
--void VmaRawList<T>::PopFront()
--{
-- VMA_HEAVY_ASSERT(m_Count > 0);
-- ItemType* const pFrontItem = m_pFront;
-- ItemType* const pNextItem = pFrontItem->pNext;
-- if (pNextItem != VMA_NULL)
-- {
-- pNextItem->pPrev = VMA_NULL;
-- }
-- m_pFront = pNextItem;
-- m_ItemAllocator.Free(pFrontItem);
-- --m_Count;
--}
--
--template<typename T>
--void VmaRawList<T>::PopBack()
--{
-- VMA_HEAVY_ASSERT(m_Count > 0);
-- ItemType* const pBackItem = m_pBack;
-- ItemType* const pPrevItem = pBackItem->pPrev;
-- if(pPrevItem != VMA_NULL)
-- {
-- pPrevItem->pNext = VMA_NULL;
-- }
-- m_pBack = pPrevItem;
-- m_ItemAllocator.Free(pBackItem);
-- --m_Count;
--}
--
--template<typename T>
--void VmaRawList<T>::Clear()
--{
-- if (IsEmpty() == false)
-- {
-- ItemType* pItem = m_pBack;
-- while (pItem != VMA_NULL)
-- {
-- ItemType* const pPrevItem = pItem->pPrev;
-- m_ItemAllocator.Free(pItem);
-- pItem = pPrevItem;
-- }
-- m_pFront = VMA_NULL;
-- m_pBack = VMA_NULL;
-- m_Count = 0;
-- }
--}
--
--template<typename T>
--void VmaRawList<T>::Remove(ItemType* pItem)
--{
-- VMA_HEAVY_ASSERT(pItem != VMA_NULL);
-- VMA_HEAVY_ASSERT(m_Count > 0);
--
-- if(pItem->pPrev != VMA_NULL)
-- {
-- pItem->pPrev->pNext = pItem->pNext;
-- }
-- else
-- {
-- VMA_HEAVY_ASSERT(m_pFront == pItem);
-- m_pFront = pItem->pNext;
-- }
--
-- if(pItem->pNext != VMA_NULL)
-- {
-- pItem->pNext->pPrev = pItem->pPrev;
-- }
-- else
-- {
-- VMA_HEAVY_ASSERT(m_pBack == pItem);
-- m_pBack = pItem->pPrev;
-- }
--
-- m_ItemAllocator.Free(pItem);
-- --m_Count;
--}
--
--template<typename T>
--VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
--{
-- if(pItem != VMA_NULL)
-- {
-- ItemType* const prevItem = pItem->pPrev;
-- ItemType* const newItem = m_ItemAllocator.Alloc();
-- newItem->pPrev = prevItem;
-- newItem->pNext = pItem;
-- pItem->pPrev = newItem;
-- if(prevItem != VMA_NULL)
-- {
-- prevItem->pNext = newItem;
-- }
-- else
-- {
-- VMA_HEAVY_ASSERT(m_pFront == pItem);
-- m_pFront = newItem;
-- }
-- ++m_Count;
-- return newItem;
-- }
-- else
-- return PushBack();
--}
--
--template<typename T>
--VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
--{
-- if(pItem != VMA_NULL)
-- {
-- ItemType* const nextItem = pItem->pNext;
-- ItemType* const newItem = m_ItemAllocator.Alloc();
-- newItem->pNext = nextItem;
-- newItem->pPrev = pItem;
-- pItem->pNext = newItem;
-- if(nextItem != VMA_NULL)
-- {
-- nextItem->pPrev = newItem;
-- }
-- else
-- {
-- VMA_HEAVY_ASSERT(m_pBack == pItem);
-- m_pBack = newItem;
-- }
-- ++m_Count;
-- return newItem;
-- }
-- else
-- return PushFront();
--}
--
--template<typename T>
--VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
--{
-- ItemType* const newItem = InsertBefore(pItem);
-- newItem->Value = value;
-- return newItem;
--}
--
--template<typename T>
--VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
--{
-- ItemType* const newItem = InsertAfter(pItem);
-- newItem->Value = value;
-- return newItem;
--}
--#endif // _VMA_RAW_LIST_FUNCTIONS
--#endif // _VMA_RAW_LIST
--
--#ifndef _VMA_LIST
--template<typename T, typename AllocatorT>
--class VmaList
--{
-- VMA_CLASS_NO_COPY_NO_MOVE(VmaList)
--public:
-- class reverse_iterator;
-- class const_iterator;
-- class const_reverse_iterator;
--
-- class iterator
-- {
-- friend class const_iterator;
-- friend class VmaList<T, AllocatorT>;
-- public:
-- iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
-- iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
--
-- T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
-- T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
--
-- bool operator==(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
-- bool operator!=(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
--
-- iterator operator++(int) { iterator result = *this; ++*this; return result; }
-- iterator operator--(int) { iterator result = *this; --*this; return result; }
--
-- iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; }
-- iterator& operator--();
--
-- private:
-- VmaRawList<T>* m_pList;
-- VmaListItem<T>* m_pItem;
--
-- iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}
-- };
-- class reverse_iterator
-- {
-- friend class const_reverse_iterator;
-- friend class VmaList<T, AllocatorT>;
-- public:
-- reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
-- reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
--
-- T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
-- T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
--
-- bool operator==(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
-- bool operator!=(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
--
-- reverse_iterator operator++(int) { reverse_iterator result = *this; ++* this; return result; }
-- reverse_iterator operator--(int) { reverse_iterator result = *this; --* this; return result; }
--
-- reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; }
-- reverse_iterator& operator--();
--
-- private:
-- VmaRawList<T>* m_pList;
-- VmaListItem<T>* m_pItem;
--
-- reverse_iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}
-- };
-- class const_iterator
-- {
-- friend class VmaList<T, AllocatorT>;
-- public:
-- const_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
-- const_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
-- const_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
--
-- iterator drop_const() { return { const_cast<VmaRawList<T>*>(m_pList), const_cast<VmaListItem<T>*>(m_pItem) }; }
--
-- const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
-- const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
--
-- bool operator==(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
-- bool operator!=(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
--
-- const_iterator operator++(int) { const_iterator result = *this; ++* this; return result; }
-- const_iterator operator--(int) { const_iterator result = *this; --* this; return result; }
--
-- const_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; }
-- const_iterator& operator--();
--
-- private:
-- const VmaRawList<T>* m_pList;
-- const VmaListItem<T>* m_pItem;
--
-- const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}
-- };
-- class const_reverse_iterator
-- {
-- friend class VmaList<T, AllocatorT>;
-- public:
-- const_reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
-- const_reverse_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
-- const_reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
--
-- reverse_iterator drop_const() { return { const_cast<VmaRawList<T>*>(m_pList), const_cast<VmaListItem<T>*>(m_pItem) }; }
--
-- const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
-- const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
--
-- bool operator==(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
-- bool operator!=(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
--
-- const_reverse_iterator operator++(int) { const_reverse_iterator result = *this; ++* this; return result; }
-- const_reverse_iterator operator--(int) { const_reverse_iterator result = *this; --* this; return result; }
--
-- const_reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; }
-- const_reverse_iterator& operator--();
--
-- private:
-- const VmaRawList<T>* m_pList;
-- const VmaListItem<T>* m_pItem;
--
-- const_reverse_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}
-- };
--
-- VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) {}
--
-- bool empty() const { return m_RawList.IsEmpty(); }
-- size_t size() const { return m_RawList.GetCount(); }
--
-- iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
-- iterator end() { return iterator(&m_RawList, VMA_NULL); }
--
-- const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
-- const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
--
-- const_iterator begin() const { return cbegin(); }
-- const_iterator end() const { return cend(); }
--
-- reverse_iterator rbegin() { return reverse_iterator(&m_RawList, m_RawList.Back()); }
-- reverse_iterator rend() { return reverse_iterator(&m_RawList, VMA_NULL); }
--
-- const_reverse_iterator crbegin() const { return const_reverse_iterator(&m_RawList, m_RawList.Back()); }
-- const_reverse_iterator crend() const { return const_reverse_iterator(&m_RawList, VMA_NULL); }
--
-- const_reverse_iterator rbegin() const { return crbegin(); }
-- const_reverse_iterator rend() const { return crend(); }
--
-- void push_back(const T& value) { m_RawList.PushBack(value); }
-- iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
--
-- void clear() { m_RawList.Clear(); }
-- void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
--
--private:
-- VmaRawList<T> m_RawList;
--};
--
--#ifndef _VMA_LIST_FUNCTIONS
--template<typename T, typename AllocatorT>
--typename VmaList<T, AllocatorT>::iterator& VmaList<T, AllocatorT>::iterator::operator--()
--{
-- if (m_pItem != VMA_NULL)
-- {
-- m_pItem = m_pItem->pPrev;
-- }
-- else
-- {
-- VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
-- m_pItem = m_pList->Back();
-- }
-- return *this;
--}
--
--template<typename T, typename AllocatorT>
--typename VmaList<T, AllocatorT>::reverse_iterator& VmaList<T, AllocatorT>::reverse_iterator::operator--()
--{
-- if (m_pItem != VMA_NULL)
-- {
-- m_pItem = m_pItem->pNext;
-- }
-- else
-- {
-- VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
-- m_pItem = m_pList->Front();
-- }
-- return *this;
--}
--
--template<typename T, typename AllocatorT>
--typename VmaList<T, AllocatorT>::const_iterator& VmaList<T, AllocatorT>::const_iterator::operator--()
--{
-- if (m_pItem != VMA_NULL)
-- {
-- m_pItem = m_pItem->pPrev;
-- }
-- else
-- {
-- VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
-- m_pItem = m_pList->Back();
-- }
-- return *this;
--}
--
--template<typename T, typename AllocatorT>
--typename VmaList<T, AllocatorT>::const_reverse_iterator& VmaList<T, AllocatorT>::const_reverse_iterator::operator--()
--{
-- if (m_pItem != VMA_NULL)
-- {
-- m_pItem = m_pItem->pNext;
-- }
-- else
-- {
-- VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
-- m_pItem = m_pList->Back();
-- }
-- return *this;
--}
--#endif // _VMA_LIST_FUNCTIONS
--#endif // _VMA_LIST
--
--#ifndef _VMA_INTRUSIVE_LINKED_LIST
--/*
--Expected interface of ItemTypeTraits:
--struct MyItemTypeTraits
--{
-- typedef MyItem ItemType;
-- static ItemType* GetPrev(const ItemType* item) { return item->myPrevPtr; }
-- static ItemType* GetNext(const ItemType* item) { return item->myNextPtr; }
-- static ItemType*& AccessPrev(ItemType* item) { return item->myPrevPtr; }
-- static ItemType*& AccessNext(ItemType* item) { return item->myNextPtr; }
--};
--*/
--template<typename ItemTypeTraits>
--class VmaIntrusiveLinkedList
--{
--public:
-- typedef typename ItemTypeTraits::ItemType ItemType;
-- static ItemType* GetPrev(const ItemType* item) { return ItemTypeTraits::GetPrev(item); }
-- static ItemType* GetNext(const ItemType* item) { return ItemTypeTraits::GetNext(item); }
--
-- // Movable, not copyable.
-- VmaIntrusiveLinkedList() = default;
-- VmaIntrusiveLinkedList(VmaIntrusiveLinkedList && src);
-- VmaIntrusiveLinkedList(const VmaIntrusiveLinkedList&) = delete;
-- VmaIntrusiveLinkedList& operator=(VmaIntrusiveLinkedList&& src);
-- VmaIntrusiveLinkedList& operator=(const VmaIntrusiveLinkedList&) = delete;
-- ~VmaIntrusiveLinkedList() { VMA_HEAVY_ASSERT(IsEmpty()); }
--
-- size_t GetCount() const { return m_Count; }
-- bool IsEmpty() const { return m_Count == 0; }
-- ItemType* Front() { return m_Front; }
-- ItemType* Back() { return m_Back; }
-- const ItemType* Front() const { return m_Front; }
-- const ItemType* Back() const { return m_Back; }
--
-- void PushBack(ItemType* item);
-- void PushFront(ItemType* item);
-- ItemType* PopBack();
-- ItemType* PopFront();
--
-- // MyItem can be null - it means PushBack.
-- void InsertBefore(ItemType* existingItem, ItemType* newItem);
-- // MyItem can be null - it means PushFront.
-- void InsertAfter(ItemType* existingItem, ItemType* newItem);
-- void Remove(ItemType* item);
-- void RemoveAll();
--
--private:
-- ItemType* m_Front = VMA_NULL;
-- ItemType* m_Back = VMA_NULL;
-- size_t m_Count = 0;
--};
--
--#ifndef _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS
--template<typename ItemTypeTraits>
--VmaIntrusiveLinkedList<ItemTypeTraits>::VmaIntrusiveLinkedList(VmaIntrusiveLinkedList&& src)
-- : m_Front(src.m_Front), m_Back(src.m_Back), m_Count(src.m_Count)
--{
-- src.m_Front = src.m_Back = VMA_NULL;
-- src.m_Count = 0;
--}
--
--template<typename ItemTypeTraits>
--VmaIntrusiveLinkedList<ItemTypeTraits>& VmaIntrusiveLinkedList<ItemTypeTraits>::operator=(VmaIntrusiveLinkedList&& src)
--{
-- if (&src != this)
-- {
-- VMA_HEAVY_ASSERT(IsEmpty());
-- m_Front = src.m_Front;
-- m_Back = src.m_Back;
-- m_Count = src.m_Count;
-- src.m_Front = src.m_Back = VMA_NULL;
-- src.m_Count = 0;
-- }
-- return *this;
--}
--
--template<typename ItemTypeTraits>
--void VmaIntrusiveLinkedList<ItemTypeTraits>::PushBack(ItemType* item)
--{
-- VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);
-- if (IsEmpty())
-- {
-- m_Front = item;
-- m_Back = item;
-- m_Count = 1;
-- }
-- else
-- {
-- ItemTypeTraits::AccessPrev(item) = m_Back;
-- ItemTypeTraits::AccessNext(m_Back) = item;
-- m_Back = item;
-- ++m_Count;
-- }
--}
--
--template<typename ItemTypeTraits>
--void VmaIntrusiveLinkedList<ItemTypeTraits>::PushFront(ItemType* item)
--{
-- VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);
-- if (IsEmpty())
-- {
-- m_Front = item;
-- m_Back = item;
-- m_Count = 1;
-- }
-- else
-- {
-- ItemTypeTraits::AccessNext(item) = m_Front;
-- ItemTypeTraits::AccessPrev(m_Front) = item;
-- m_Front = item;
-- ++m_Count;
-- }
--}
--
--template<typename ItemTypeTraits>
--typename VmaIntrusiveLinkedList<ItemTypeTraits>::ItemType* VmaIntrusiveLinkedList<ItemTypeTraits>::PopBack()
--{
-- VMA_HEAVY_ASSERT(m_Count > 0);
-- ItemType* const backItem = m_Back;
-- ItemType* const prevItem = ItemTypeTraits::GetPrev(backItem);
-- if (prevItem != VMA_NULL)
-- {
-- ItemTypeTraits::AccessNext(prevItem) = VMA_NULL;
-- }
-- m_Back = prevItem;
-- --m_Count;
-- ItemTypeTraits::AccessPrev(backItem) = VMA_NULL;
-- ItemTypeTraits::AccessNext(backItem) = VMA_NULL;
-- return backItem;
--}
--
--template<typename ItemTypeTraits>
--typename VmaIntrusiveLinkedList<ItemTypeTraits>::ItemType* VmaIntrusiveLinkedList<ItemTypeTraits>::PopFront()
--{
-- VMA_HEAVY_ASSERT(m_Count > 0);
-- ItemType* const frontItem = m_Front;
-- ItemType* const nextItem = ItemTypeTraits::GetNext(frontItem);
-- if (nextItem != VMA_NULL)
-- {
-- ItemTypeTraits::AccessPrev(nextItem) = VMA_NULL;
-- }
-- m_Front = nextItem;
-- --m_Count;
-- ItemTypeTraits::AccessPrev(frontItem) = VMA_NULL;
-- ItemTypeTraits::AccessNext(frontItem) = VMA_NULL;
-- return frontItem;
--}
--
--template<typename ItemTypeTraits>
--void VmaIntrusiveLinkedList<ItemTypeTraits>::InsertBefore(ItemType* existingItem, ItemType* newItem)
--{
-- VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);
-- if (existingItem != VMA_NULL)
-- {
-- ItemType* const prevItem = ItemTypeTraits::GetPrev(existingItem);
-- ItemTypeTraits::AccessPrev(newItem) = prevItem;
-- ItemTypeTraits::AccessNext(newItem) = existingItem;
-- ItemTypeTraits::AccessPrev(existingItem) = newItem;
-- if (prevItem != VMA_NULL)
-- {
-- ItemTypeTraits::AccessNext(prevItem) = newItem;
-- }
-- else
-- {
-- VMA_HEAVY_ASSERT(m_Front == existingItem);
-- m_Front = newItem;
-- }
-- ++m_Count;
-- }
-- else
-- PushBack(newItem);
--}
--
--template<typename ItemTypeTraits>
--void VmaIntrusiveLinkedList<ItemTypeTraits>::InsertAfter(ItemType* existingItem, ItemType* newItem)
--{
-- VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);
-- if (existingItem != VMA_NULL)
-- {
-- ItemType* const nextItem = ItemTypeTraits::GetNext(existingItem);
-- ItemTypeTraits::AccessNext(newItem) = nextItem;
-- ItemTypeTraits::AccessPrev(newItem) = existingItem;
-- ItemTypeTraits::AccessNext(existingItem) = newItem;
-- if (nextItem != VMA_NULL)
-- {
-- ItemTypeTraits::AccessPrev(nextItem) = newItem;
-- }
-- else
-- {
-- VMA_HEAVY_ASSERT(m_Back == existingItem);
-- m_Back = newItem;
-- }
-- ++m_Count;
-- }
-- else
-- return PushFront(newItem);
--}
--
--template<typename ItemTypeTraits>
--void VmaIntrusiveLinkedList<ItemTypeTraits>::Remove(ItemType* item)
--{
-- VMA_HEAVY_ASSERT(item != VMA_NULL && m_Count > 0);
-- if (ItemTypeTraits::GetPrev(item) != VMA_NULL)
-- {
-- ItemTypeTraits::AccessNext(ItemTypeTraits::AccessPrev(item)) = ItemTypeTraits::GetNext(item);
-- }
-- else
-- {
-- VMA_HEAVY_ASSERT(m_Front == item);
-- m_Front = ItemTypeTraits::GetNext(item);
-- }
--
-- if (ItemTypeTraits::GetNext(item) != VMA_NULL)
-- {
-- ItemTypeTraits::AccessPrev(ItemTypeTraits::AccessNext(item)) = ItemTypeTraits::GetPrev(item);
-- }
-- else
-- {
-- VMA_HEAVY_ASSERT(m_Back == item);
-- m_Back = ItemTypeTraits::GetPrev(item);
-- }
-- ItemTypeTraits::AccessPrev(item) = VMA_NULL;
-- ItemTypeTraits::AccessNext(item) = VMA_NULL;
-- --m_Count;
--}
--
--template<typename ItemTypeTraits>
--void VmaIntrusiveLinkedList<ItemTypeTraits>::RemoveAll()
--{
-- if (!IsEmpty())
-- {
-- ItemType* item = m_Back;
-- while (item != VMA_NULL)
-- {
-- ItemType* const prevItem = ItemTypeTraits::AccessPrev(item);
-- ItemTypeTraits::AccessPrev(item) = VMA_NULL;
-- ItemTypeTraits::AccessNext(item) = VMA_NULL;
-- item = prevItem;
-- }
-- m_Front = VMA_NULL;
-- m_Back = VMA_NULL;
-- m_Count = 0;
-- }
--}
--#endif // _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS
--#endif // _VMA_INTRUSIVE_LINKED_LIST
--
--// Unused in this version.
--#if 0
--
--#ifndef _VMA_PAIR
--template<typename T1, typename T2>
--struct VmaPair
--{
-- T1 first;
-- T2 second;
--
-- VmaPair() : first(), second() {}
-- VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) {}
--};
--
--template<typename FirstT, typename SecondT>
--struct VmaPairFirstLess
--{
-- bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
-- {
-- return lhs.first < rhs.first;
-- }
-- bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
-- {
-- return lhs.first < rhsFirst;
-- }
--};
--#endif // _VMA_PAIR
--
--#ifndef _VMA_MAP
--/* Class compatible with subset of interface of std::unordered_map.
--KeyT, ValueT must be POD because they will be stored in VmaVector.
--*/
--template<typename KeyT, typename ValueT>
--class VmaMap
--{
--public:
-- typedef VmaPair<KeyT, ValueT> PairType;
-- typedef PairType* iterator;
--
-- VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) {}
--
-- iterator begin() { return m_Vector.begin(); }
-- iterator end() { return m_Vector.end(); }
-- size_t size() { return m_Vector.size(); }
--
-- void insert(const PairType& pair);
-- iterator find(const KeyT& key);
-- void erase(iterator it);
--
--private:
-- VmaVector< PairType, VmaStlAllocator<PairType>> m_Vector;
--};
--
--#ifndef _VMA_MAP_FUNCTIONS
--template<typename KeyT, typename ValueT>
--void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
--{
-- const size_t indexToInsert = VmaBinaryFindFirstNotLess(
-- m_Vector.data(),
-- m_Vector.data() + m_Vector.size(),
-- pair,
-- VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
-- VmaVectorInsert(m_Vector, indexToInsert, pair);
--}
--
--template<typename KeyT, typename ValueT>
--VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
--{
-- PairType* it = VmaBinaryFindFirstNotLess(
-- m_Vector.data(),
-- m_Vector.data() + m_Vector.size(),
-- key,
-- VmaPairFirstLess<KeyT, ValueT>());
-- if ((it != m_Vector.end()) && (it->first == key))
-- {
-- return it;
-- }
-- else
-- {
-- return m_Vector.end();
-- }
--}
--
--template<typename KeyT, typename ValueT>
--void VmaMap<KeyT, ValueT>::erase(iterator it)
--{
-- VmaVectorRemove(m_Vector, it - m_Vector.begin());
--}
--#endif // _VMA_MAP_FUNCTIONS
--#endif // _VMA_MAP
--
--#endif // #if 0
--
--#if !defined(_VMA_STRING_BUILDER) && VMA_STATS_STRING_ENABLED
--class VmaStringBuilder
--{
--public:
-- VmaStringBuilder(const VkAllocationCallbacks* allocationCallbacks) : m_Data(VmaStlAllocator<char>(allocationCallbacks)) {}
-- ~VmaStringBuilder() = default;
--
-- size_t GetLength() const { return m_Data.size(); }
-- const char* GetData() const { return m_Data.data(); }
-- void AddNewLine() { Add('\n'); }
-- void Add(char ch) { m_Data.push_back(ch); }
--
-- void Add(const char* pStr);
-- void AddNumber(uint32_t num);
-- void AddNumber(uint64_t num);
-- void AddPointer(const void* ptr);
--
--private:
-- VmaVector<char, VmaStlAllocator<char>> m_Data;
--};
--
--#ifndef _VMA_STRING_BUILDER_FUNCTIONS
--void VmaStringBuilder::Add(const char* pStr)
--{
-- const size_t strLen = strlen(pStr);
-- if (strLen > 0)
-- {
-- const size_t oldCount = m_Data.size();
-- m_Data.resize(oldCount + strLen);
-- memcpy(m_Data.data() + oldCount, pStr, strLen);
-- }
--}
--
--void VmaStringBuilder::AddNumber(uint32_t num)
--{
-- char buf[11];
-- buf[10] = '\0';
-- char* p = &buf[10];
-- do
-- {
-- *--p = '0' + (char)(num % 10);
-- num /= 10;
-- } while (num);
-- Add(p);
--}
--
--void VmaStringBuilder::AddNumber(uint64_t num)
--{
-- char buf[21];
-- buf[20] = '\0';
-- char* p = &buf[20];
-- do
-- {
-- *--p = '0' + (char)(num % 10);
-- num /= 10;
-- } while (num);
-- Add(p);
--}
--
--void VmaStringBuilder::AddPointer(const void* ptr)
--{
-- char buf[21];
-- VmaPtrToStr(buf, sizeof(buf), ptr);
-- Add(buf);
--}
--#endif //_VMA_STRING_BUILDER_FUNCTIONS
--#endif // _VMA_STRING_BUILDER
--
--#if !defined(_VMA_JSON_WRITER) && VMA_STATS_STRING_ENABLED
--/*
--Allows to conveniently build a correct JSON document to be written to the
--VmaStringBuilder passed to the constructor.
--*/
--class VmaJsonWriter
--{
-- VMA_CLASS_NO_COPY_NO_MOVE(VmaJsonWriter)
--public:
-- // sb - string builder to write the document to. Must remain alive for the whole lifetime of this object.
-- VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
-- ~VmaJsonWriter();
--
-- // Begins object by writing "{".
-- // Inside an object, you must call pairs of WriteString and a value, e.g.:
-- // j.BeginObject(true); j.WriteString("A"); j.WriteNumber(1); j.WriteString("B"); j.WriteNumber(2); j.EndObject();
-- // Will write: { "A": 1, "B": 2 }
-- void BeginObject(bool singleLine = false);
-- // Ends object by writing "}".
-- void EndObject();
--
-- // Begins array by writing "[".
-- // Inside an array, you can write a sequence of any values.
-- void BeginArray(bool singleLine = false);
-- // Ends array by writing "[".
-- void EndArray();
--
-- // Writes a string value inside "".
-- // pStr can contain any ANSI characters, including '"', new line etc. - they will be properly escaped.
-- void WriteString(const char* pStr);
--
-- // Begins writing a string value.
-- // Call BeginString, ContinueString, ContinueString, ..., EndString instead of
-- // WriteString to conveniently build the string content incrementally, made of
-- // parts including numbers.
-- void BeginString(const char* pStr = VMA_NULL);
-- // Posts next part of an open string.
-- void ContinueString(const char* pStr);
-- // Posts next part of an open string. The number is converted to decimal characters.
-- void ContinueString(uint32_t n);
-- void ContinueString(uint64_t n);
-- // Posts next part of an open string. Pointer value is converted to characters
-- // using "%p" formatting - shown as hexadecimal number, e.g.: 000000081276Ad00
-- void ContinueString_Pointer(const void* ptr);
-- // Ends writing a string value by writing '"'.
-- void EndString(const char* pStr = VMA_NULL);
--
-- // Writes a number value.
-- void WriteNumber(uint32_t n);
-- void WriteNumber(uint64_t n);
-- // Writes a boolean value - false or true.
-- void WriteBool(bool b);
-- // Writes a null value.
-- void WriteNull();
--
--private:
-- enum COLLECTION_TYPE
-- {
-- COLLECTION_TYPE_OBJECT,
-- COLLECTION_TYPE_ARRAY,
-- };
-- struct StackItem
-- {
-- COLLECTION_TYPE type;
-- uint32_t valueCount;
-- bool singleLineMode;
-- };
--
-- static const char* const INDENT;
--
-- VmaStringBuilder& m_SB;
-- VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
-- bool m_InsideString;
--
-- void BeginValue(bool isString);
-- void WriteIndent(bool oneLess = false);
--};
--const char* const VmaJsonWriter::INDENT = " ";
--
--#ifndef _VMA_JSON_WRITER_FUNCTIONS
--VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb)
-- : m_SB(sb),
-- m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
-- m_InsideString(false) {}
--
--VmaJsonWriter::~VmaJsonWriter()
--{
-- VMA_ASSERT(!m_InsideString);
-- VMA_ASSERT(m_Stack.empty());
--}
--
--void VmaJsonWriter::BeginObject(bool singleLine)
--{
-- VMA_ASSERT(!m_InsideString);
--
-- BeginValue(false);
-- m_SB.Add('{');
--
-- StackItem item;
-- item.type = COLLECTION_TYPE_OBJECT;
-- item.valueCount = 0;
-- item.singleLineMode = singleLine;
-- m_Stack.push_back(item);
--}
--
--void VmaJsonWriter::EndObject()
--{
-- VMA_ASSERT(!m_InsideString);
--
-- WriteIndent(true);
-- m_SB.Add('}');
--
-- VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
-- m_Stack.pop_back();
--}
--
--void VmaJsonWriter::BeginArray(bool singleLine)
--{
-- VMA_ASSERT(!m_InsideString);
--
-- BeginValue(false);
-- m_SB.Add('[');
--
-- StackItem item;
-- item.type = COLLECTION_TYPE_ARRAY;
-- item.valueCount = 0;
-- item.singleLineMode = singleLine;
-- m_Stack.push_back(item);
--}
--
--void VmaJsonWriter::EndArray()
--{
-- VMA_ASSERT(!m_InsideString);
--
-- WriteIndent(true);
-- m_SB.Add(']');
--
-- VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
-- m_Stack.pop_back();
--}
--
--void VmaJsonWriter::WriteString(const char* pStr)
--{
-- BeginString(pStr);
-- EndString();
--}
--
--void VmaJsonWriter::BeginString(const char* pStr)
--{
-- VMA_ASSERT(!m_InsideString);
--
-- BeginValue(true);
-- m_SB.Add('"');
-- m_InsideString = true;
-- if (pStr != VMA_NULL && pStr[0] != '\0')
-- {
-- ContinueString(pStr);
-- }
--}
--
--void VmaJsonWriter::ContinueString(const char* pStr)
--{
-- VMA_ASSERT(m_InsideString);
--
-- const size_t strLen = strlen(pStr);
-- for (size_t i = 0; i < strLen; ++i)
-- {
-- char ch = pStr[i];
-- if (ch == '\\')
-- {
-- m_SB.Add("\\\\");
-- }
-- else if (ch == '"')
-- {
-- m_SB.Add("\\\"");
-- }
-- else if (ch >= 32)
-- {
-- m_SB.Add(ch);
-- }
-- else switch (ch)
-- {
-- case '\b':
-- m_SB.Add("\\b");
-- break;
-- case '\f':
-- m_SB.Add("\\f");
-- break;
-- case '\n':
-- m_SB.Add("\\n");
-- break;
-- case '\r':
-- m_SB.Add("\\r");
-- break;
-- case '\t':
-- m_SB.Add("\\t");
-- break;
-- default:
-- VMA_ASSERT(0 && "Character not currently supported.");
-- }
-- }
--}
--
--void VmaJsonWriter::ContinueString(uint32_t n)
--{
-- VMA_ASSERT(m_InsideString);
-- m_SB.AddNumber(n);
--}
--
--void VmaJsonWriter::ContinueString(uint64_t n)
--{
-- VMA_ASSERT(m_InsideString);
-- m_SB.AddNumber(n);
--}
--
--void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
--{
-- VMA_ASSERT(m_InsideString);
-- m_SB.AddPointer(ptr);
--}
--
--void VmaJsonWriter::EndString(const char* pStr)
--{
-- VMA_ASSERT(m_InsideString);
-- if (pStr != VMA_NULL && pStr[0] != '\0')
-- {
-- ContinueString(pStr);
-- }
-- m_SB.Add('"');
-- m_InsideString = false;
--}
--
--void VmaJsonWriter::WriteNumber(uint32_t n)
--{
-- VMA_ASSERT(!m_InsideString);
-- BeginValue(false);
-- m_SB.AddNumber(n);
--}
--
--void VmaJsonWriter::WriteNumber(uint64_t n)
--{
-- VMA_ASSERT(!m_InsideString);
-- BeginValue(false);
-- m_SB.AddNumber(n);
--}
--
--void VmaJsonWriter::WriteBool(bool b)
--{
-- VMA_ASSERT(!m_InsideString);
-- BeginValue(false);
-- m_SB.Add(b ? "true" : "false");
--}
--
--void VmaJsonWriter::WriteNull()
--{
-- VMA_ASSERT(!m_InsideString);
-- BeginValue(false);
-- m_SB.Add("null");
--}
--
--void VmaJsonWriter::BeginValue(bool isString)
--{
-- if (!m_Stack.empty())
-- {
-- StackItem& currItem = m_Stack.back();
-- if (currItem.type == COLLECTION_TYPE_OBJECT &&
-- currItem.valueCount % 2 == 0)
-- {
-- VMA_ASSERT(isString);
-- }
--
-- if (currItem.type == COLLECTION_TYPE_OBJECT &&
-- currItem.valueCount % 2 != 0)
-- {
-- m_SB.Add(": ");
-- }
-- else if (currItem.valueCount > 0)
-- {
-- m_SB.Add(", ");
-- WriteIndent();
-- }
-- else
-- {
-- WriteIndent();
-- }
-- ++currItem.valueCount;
-- }
--}
--
--void VmaJsonWriter::WriteIndent(bool oneLess)
--{
-- if (!m_Stack.empty() && !m_Stack.back().singleLineMode)
-- {
-- m_SB.AddNewLine();
--
-- size_t count = m_Stack.size();
-- if (count > 0 && oneLess)
-- {
-- --count;
-- }
-- for (size_t i = 0; i < count; ++i)
-- {
-- m_SB.Add(INDENT);
-- }
-- }
--}
--#endif // _VMA_JSON_WRITER_FUNCTIONS
--
--static void VmaPrintDetailedStatistics(VmaJsonWriter& json, const VmaDetailedStatistics& stat)
--{
-- json.BeginObject();
--
-- json.WriteString("BlockCount");
-- json.WriteNumber(stat.statistics.blockCount);
-- json.WriteString("BlockBytes");
-- json.WriteNumber(stat.statistics.blockBytes);
-- json.WriteString("AllocationCount");
-- json.WriteNumber(stat.statistics.allocationCount);
-- json.WriteString("AllocationBytes");
-- json.WriteNumber(stat.statistics.allocationBytes);
-- json.WriteString("UnusedRangeCount");
-- json.WriteNumber(stat.unusedRangeCount);
--
-- if (stat.statistics.allocationCount > 1)
-- {
-- json.WriteString("AllocationSizeMin");
-- json.WriteNumber(stat.allocationSizeMin);
-- json.WriteString("AllocationSizeMax");
-- json.WriteNumber(stat.allocationSizeMax);
-- }
-- if (stat.unusedRangeCount > 1)
-- {
-- json.WriteString("UnusedRangeSizeMin");
-- json.WriteNumber(stat.unusedRangeSizeMin);
-- json.WriteString("UnusedRangeSizeMax");
-- json.WriteNumber(stat.unusedRangeSizeMax);
-- }
-- json.EndObject();
--}
--#endif // _VMA_JSON_WRITER
--
--#ifndef _VMA_MAPPING_HYSTERESIS
--
--class VmaMappingHysteresis
--{
-- VMA_CLASS_NO_COPY_NO_MOVE(VmaMappingHysteresis)
--public:
-- VmaMappingHysteresis() = default;
--
-- uint32_t GetExtraMapping() const { return m_ExtraMapping; }
--
-- // Call when Map was called.
-- // Returns true if switched to extra +1 mapping reference count.
-- bool PostMap()
-- {
--#if VMA_MAPPING_HYSTERESIS_ENABLED
-- if(m_ExtraMapping == 0)
-- {
-- ++m_MajorCounter;
-- if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING)
-- {
-- m_ExtraMapping = 1;
-- m_MajorCounter = 0;
-- m_MinorCounter = 0;
-- return true;
-- }
-- }
-- else // m_ExtraMapping == 1
-- PostMinorCounter();
--#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED
-- return false;
-- }
--
-- // Call when Unmap was called.
-- void PostUnmap()
-- {
--#if VMA_MAPPING_HYSTERESIS_ENABLED
-- if(m_ExtraMapping == 0)
-- ++m_MajorCounter;
-- else // m_ExtraMapping == 1
-- PostMinorCounter();
--#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED
-- }
--
-- // Call when allocation was made from the memory block.
-- void PostAlloc()
-- {
--#if VMA_MAPPING_HYSTERESIS_ENABLED
-- if(m_ExtraMapping == 1)
-- ++m_MajorCounter;
-- else // m_ExtraMapping == 0
-- PostMinorCounter();
--#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED
-- }
--
-- // Call when allocation was freed from the memory block.
-- // Returns true if switched to extra -1 mapping reference count.
-- bool PostFree()
-- {
--#if VMA_MAPPING_HYSTERESIS_ENABLED
-- if(m_ExtraMapping == 1)
-- {
-- ++m_MajorCounter;
-- if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING &&
-- m_MajorCounter > m_MinorCounter + 1)
-- {
-- m_ExtraMapping = 0;
-- m_MajorCounter = 0;
-- m_MinorCounter = 0;
-- return true;
-- }
-- }
-- else // m_ExtraMapping == 0
-- PostMinorCounter();
--#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED
-- return false;
-- }
--
--private:
-- static const int32_t COUNTER_MIN_EXTRA_MAPPING = 7;
--
-- uint32_t m_MinorCounter = 0;
-- uint32_t m_MajorCounter = 0;
-- uint32_t m_ExtraMapping = 0; // 0 or 1.
--
-- void PostMinorCounter()
-- {
-- if(m_MinorCounter < m_MajorCounter)
-- {
-- ++m_MinorCounter;
-- }
-- else if(m_MajorCounter > 0)
-- {
-- --m_MajorCounter;
-- --m_MinorCounter;
-- }
-- }
--};
--
--#endif // _VMA_MAPPING_HYSTERESIS
--
--#ifndef _VMA_DEVICE_MEMORY_BLOCK
--/*
--Represents a single block of device memory (`VkDeviceMemory`) with all the
--data about its regions (aka suballocations, #VmaAllocation), assigned and free.
--
--Thread-safety:
--- Access to m_pMetadata must be externally synchronized.
--- Map, Unmap, Bind* are synchronized internally.
--*/
--class VmaDeviceMemoryBlock
--{
-- VMA_CLASS_NO_COPY_NO_MOVE(VmaDeviceMemoryBlock)
--public:
-- VmaBlockMetadata* m_pMetadata;
--
-- VmaDeviceMemoryBlock(VmaAllocator hAllocator);
-- ~VmaDeviceMemoryBlock();
--
-- // Always call after construction.
-- void Init(
-- VmaAllocator hAllocator,
-- VmaPool hParentPool,
-- uint32_t newMemoryTypeIndex,
-- VkDeviceMemory newMemory,
-- VkDeviceSize newSize,
-- uint32_t id,
-- uint32_t algorithm,
-- VkDeviceSize bufferImageGranularity);
-- // Always call before destruction.
-- void Destroy(VmaAllocator allocator);
--
-- VmaPool GetParentPool() const { return m_hParentPool; }
-- VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
-- uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
-- uint32_t GetId() const { return m_Id; }
-- void* GetMappedData() const { return m_pMappedData; }
-- uint32_t GetMapRefCount() const { return m_MapCount; }
--
-- // Call when allocation/free was made from m_pMetadata.
-- // Used for m_MappingHysteresis.
-- void PostAlloc(VmaAllocator hAllocator);
-- void PostFree(VmaAllocator hAllocator);
--
-- // Validates all data structures inside this object. If not valid, returns false.
-- bool Validate() const;
-- VkResult CheckCorruption(VmaAllocator hAllocator);
--
-- // ppData can be null.
-- VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
-- void Unmap(VmaAllocator hAllocator, uint32_t count);
--
-- VkResult WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
-- VkResult ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
--
-- VkResult BindBufferMemory(
-- const VmaAllocator hAllocator,
-- const VmaAllocation hAllocation,
-- VkDeviceSize allocationLocalOffset,
-- VkBuffer hBuffer,
-- const void* pNext);
-- VkResult BindImageMemory(
-- const VmaAllocator hAllocator,
-- const VmaAllocation hAllocation,
-- VkDeviceSize allocationLocalOffset,
-- VkImage hImage,
-- const void* pNext);
--
--private:
-- VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
-- uint32_t m_MemoryTypeIndex;
-- uint32_t m_Id;
-- VkDeviceMemory m_hMemory;
--
-- /*
-- Protects access to m_hMemory so it is not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
-- Also protects m_MapCount, m_pMappedData.
-- Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
-- */
-- VMA_MUTEX m_MapAndBindMutex;
-- VmaMappingHysteresis m_MappingHysteresis;
-- uint32_t m_MapCount;
-- void* m_pMappedData;
--};
--#endif // _VMA_DEVICE_MEMORY_BLOCK
--
--#ifndef _VMA_ALLOCATION_T
--struct VmaAllocation_T
--{
-- friend struct VmaDedicatedAllocationListItemTraits;
--
-- enum FLAGS
-- {
-- FLAG_PERSISTENT_MAP = 0x01,
-- FLAG_MAPPING_ALLOWED = 0x02,
-- };
--
--public:
-- enum ALLOCATION_TYPE
-- {
-- ALLOCATION_TYPE_NONE,
-- ALLOCATION_TYPE_BLOCK,
-- ALLOCATION_TYPE_DEDICATED,
-- };
--
-- // This struct is allocated using VmaPoolAllocator.
-- VmaAllocation_T(bool mappingAllowed);
-- ~VmaAllocation_T();
--
-- void InitBlockAllocation(
-- VmaDeviceMemoryBlock* block,
-- VmaAllocHandle allocHandle,
-- VkDeviceSize alignment,
-- VkDeviceSize size,
-- uint32_t memoryTypeIndex,
-- VmaSuballocationType suballocationType,
-- bool mapped);
-- // pMappedData not null means allocation is created with MAPPED flag.
-- void InitDedicatedAllocation(
-- VmaPool hParentPool,
-- uint32_t memoryTypeIndex,
-- VkDeviceMemory hMemory,
-- VmaSuballocationType suballocationType,
-- void* pMappedData,
-- VkDeviceSize size);
--
-- ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
-- VkDeviceSize GetAlignment() const { return m_Alignment; }
-- VkDeviceSize GetSize() const { return m_Size; }
-- void* GetUserData() const { return m_pUserData; }
-- const char* GetName() const { return m_pName; }
-- VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
--
-- VmaDeviceMemoryBlock* GetBlock() const { VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); return m_BlockAllocation.m_Block; }
-- uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
-- bool IsPersistentMap() const { return (m_Flags & FLAG_PERSISTENT_MAP) != 0; }
-- bool IsMappingAllowed() const { return (m_Flags & FLAG_MAPPING_ALLOWED) != 0; }
--
-- void SetUserData(VmaAllocator hAllocator, void* pUserData) { m_pUserData = pUserData; }
-- void SetName(VmaAllocator hAllocator, const char* pName);
-- void FreeName(VmaAllocator hAllocator);
-- uint8_t SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation);
-- VmaAllocHandle GetAllocHandle() const;
-- VkDeviceSize GetOffset() const;
-- VmaPool GetParentPool() const;
-- VkDeviceMemory GetMemory() const;
-- void* GetMappedData() const;
--
-- void BlockAllocMap();
-- void BlockAllocUnmap();
-- VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
-- void DedicatedAllocUnmap(VmaAllocator hAllocator);
--
--#if VMA_STATS_STRING_ENABLED
-- uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
--
-- void InitBufferImageUsage(uint32_t bufferImageUsage);
-- void PrintParameters(class VmaJsonWriter& json) const;
--#endif
--
--private:
-- // Allocation out of VmaDeviceMemoryBlock.
-- struct BlockAllocation
-- {
-- VmaDeviceMemoryBlock* m_Block;
-- VmaAllocHandle m_AllocHandle;
-- };
-- // Allocation for an object that has its own private VkDeviceMemory.
-- struct DedicatedAllocation
-- {
-- VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
-- VkDeviceMemory m_hMemory;
-- void* m_pMappedData; // Not null means memory is mapped.
-- VmaAllocation_T* m_Prev;
-- VmaAllocation_T* m_Next;
-- };
-- union
-- {
-- // Allocation out of VmaDeviceMemoryBlock.
-- BlockAllocation m_BlockAllocation;
-- // Allocation for an object that has its own private VkDeviceMemory.
-- DedicatedAllocation m_DedicatedAllocation;
-- };
--
-- VkDeviceSize m_Alignment;
-- VkDeviceSize m_Size;
-- void* m_pUserData;
-- char* m_pName;
-- uint32_t m_MemoryTypeIndex;
-- uint8_t m_Type; // ALLOCATION_TYPE
-- uint8_t m_SuballocationType; // VmaSuballocationType
-- // Reference counter for vmaMapMemory()/vmaUnmapMemory().
-- uint8_t m_MapCount;
-- uint8_t m_Flags; // enum FLAGS
--#if VMA_STATS_STRING_ENABLED
-- uint32_t m_BufferImageUsage; // 0 if unknown.
--#endif
--};
--#endif // _VMA_ALLOCATION_T
--
--#ifndef _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS
--struct VmaDedicatedAllocationListItemTraits
--{
-- typedef VmaAllocation_T ItemType;
--
-- static ItemType* GetPrev(const ItemType* item)
-- {
-- VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
-- return item->m_DedicatedAllocation.m_Prev;
-- }
-- static ItemType* GetNext(const ItemType* item)
-- {
-- VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
-- return item->m_DedicatedAllocation.m_Next;
-- }
-- static ItemType*& AccessPrev(ItemType* item)
-- {
-- VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
-- return item->m_DedicatedAllocation.m_Prev;
-- }
-- static ItemType*& AccessNext(ItemType* item)
-- {
-- VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
-- return item->m_DedicatedAllocation.m_Next;
-- }
--};
--#endif // _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS
--
--#ifndef _VMA_DEDICATED_ALLOCATION_LIST
--/*
--Stores linked list of VmaAllocation_T objects.
--Thread-safe, synchronized internally.
--*/
--class VmaDedicatedAllocationList
--{
-- VMA_CLASS_NO_COPY_NO_MOVE(VmaDedicatedAllocationList)
--public:
-- VmaDedicatedAllocationList() {}
-- ~VmaDedicatedAllocationList();
--
-- void Init(bool useMutex) { m_UseMutex = useMutex; }
-- bool Validate();
--
-- void AddDetailedStatistics(VmaDetailedStatistics& inoutStats);
-- void AddStatistics(VmaStatistics& inoutStats);
--#if VMA_STATS_STRING_ENABLED
-- // Writes JSON array with the list of allocations.
-- void BuildStatsString(VmaJsonWriter& json);
--#endif
--
-- bool IsEmpty();
-- void Register(VmaAllocation alloc);
-- void Unregister(VmaAllocation alloc);
--
--private:
-- typedef VmaIntrusiveLinkedList<VmaDedicatedAllocationListItemTraits> DedicatedAllocationLinkedList;
--
-- bool m_UseMutex = true;
-- VMA_RW_MUTEX m_Mutex;
-- DedicatedAllocationLinkedList m_AllocationList;
--};
--
--#ifndef _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS
--
--VmaDedicatedAllocationList::~VmaDedicatedAllocationList()
--{
-- VMA_HEAVY_ASSERT(Validate());
--
-- if (!m_AllocationList.IsEmpty())
-- {
-- VMA_ASSERT(false && "Unfreed dedicated allocations found!");
-- }
--}
--
--bool VmaDedicatedAllocationList::Validate()
--{
-- const size_t declaredCount = m_AllocationList.GetCount();
-- size_t actualCount = 0;
-- VmaMutexLockRead lock(m_Mutex, m_UseMutex);
-- for (VmaAllocation alloc = m_AllocationList.Front();
-- alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc))
-- {
-- ++actualCount;
-- }
-- VMA_VALIDATE(actualCount == declaredCount);
--
-- return true;
--}
--
--void VmaDedicatedAllocationList::AddDetailedStatistics(VmaDetailedStatistics& inoutStats)
--{
-- for(auto* item = m_AllocationList.Front(); item != nullptr; item = DedicatedAllocationLinkedList::GetNext(item))
-- {
-- const VkDeviceSize size = item->GetSize();
-- inoutStats.statistics.blockCount++;
-- inoutStats.statistics.blockBytes += size;
-- VmaAddDetailedStatisticsAllocation(inoutStats, item->GetSize());
-- }
--}
--
--void VmaDedicatedAllocationList::AddStatistics(VmaStatistics& inoutStats)
--{
-- VmaMutexLockRead lock(m_Mutex, m_UseMutex);
--
-- const uint32_t allocCount = (uint32_t)m_AllocationList.GetCount();
-- inoutStats.blockCount += allocCount;
-- inoutStats.allocationCount += allocCount;
--
-- for(auto* item = m_AllocationList.Front(); item != nullptr; item = DedicatedAllocationLinkedList::GetNext(item))
-- {
-- const VkDeviceSize size = item->GetSize();
-- inoutStats.blockBytes += size;
-- inoutStats.allocationBytes += size;
-- }
--}
--
--#if VMA_STATS_STRING_ENABLED
--void VmaDedicatedAllocationList::BuildStatsString(VmaJsonWriter& json)
--{
-- VmaMutexLockRead lock(m_Mutex, m_UseMutex);
-- json.BeginArray();
-- for (VmaAllocation alloc = m_AllocationList.Front();
-- alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc))
-- {
-- json.BeginObject(true);
-- alloc->PrintParameters(json);
-- json.EndObject();
-- }
-- json.EndArray();
--}
--#endif // VMA_STATS_STRING_ENABLED
--
--bool VmaDedicatedAllocationList::IsEmpty()
--{
-- VmaMutexLockRead lock(m_Mutex, m_UseMutex);
-- return m_AllocationList.IsEmpty();
--}
--
--void VmaDedicatedAllocationList::Register(VmaAllocation alloc)
--{
-- VmaMutexLockWrite lock(m_Mutex, m_UseMutex);
-- m_AllocationList.PushBack(alloc);
--}
--
--void VmaDedicatedAllocationList::Unregister(VmaAllocation alloc)
--{
-- VmaMutexLockWrite lock(m_Mutex, m_UseMutex);
-- m_AllocationList.Remove(alloc);
--}
--#endif // _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS
--#endif // _VMA_DEDICATED_ALLOCATION_LIST
--
--#ifndef _VMA_SUBALLOCATION
--/*
--Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
--allocated memory block or free.
--*/
--struct VmaSuballocation
--{
-- VkDeviceSize offset;
-- VkDeviceSize size;
-- void* userData;
-- VmaSuballocationType type;
--};
--
--// Comparator for offsets.
--struct VmaSuballocationOffsetLess
--{
-- bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
-- {
-- return lhs.offset < rhs.offset;
-- }
--};
--
--struct VmaSuballocationOffsetGreater
--{
-- bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
-- {
-- return lhs.offset > rhs.offset;
-- }
--};
--
--struct VmaSuballocationItemSizeLess
--{
-- bool operator()(const VmaSuballocationList::iterator lhs,
-- const VmaSuballocationList::iterator rhs) const
-- {
-- return lhs->size < rhs->size;
-- }
--
-- bool operator()(const VmaSuballocationList::iterator lhs,
-- VkDeviceSize rhsSize) const
-- {
-- return lhs->size < rhsSize;
-- }
--};
--#endif // _VMA_SUBALLOCATION
--
--#ifndef _VMA_ALLOCATION_REQUEST
--/*
--Parameters of planned allocation inside a VmaDeviceMemoryBlock.
--item points to a FREE suballocation.
--*/
--struct VmaAllocationRequest
--{
-- VmaAllocHandle allocHandle;
-- VkDeviceSize size;
-- VmaSuballocationList::iterator item;
-- void* customData;
-- uint64_t algorithmData;
-- VmaAllocationRequestType type;
--};
--#endif // _VMA_ALLOCATION_REQUEST
--
--#ifndef _VMA_BLOCK_METADATA
--/*
--Data structure used for bookkeeping of allocations and unused ranges of memory
--in a single VkDeviceMemory block.
--*/
--class VmaBlockMetadata
--{
-- VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata)
--public:
-- // pAllocationCallbacks, if not null, must be owned externally - alive and unchanged for the whole lifetime of this object.
-- VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks,
-- VkDeviceSize bufferImageGranularity, bool isVirtual);
-- virtual ~VmaBlockMetadata() = default;
--
-- virtual void Init(VkDeviceSize size) { m_Size = size; }
-- bool IsVirtual() const { return m_IsVirtual; }
-- VkDeviceSize GetSize() const { return m_Size; }
--
-- // Validates all data structures inside this object. If not valid, returns false.
-- virtual bool Validate() const = 0;
-- virtual size_t GetAllocationCount() const = 0;
-- virtual size_t GetFreeRegionsCount() const = 0;
-- virtual VkDeviceSize GetSumFreeSize() const = 0;
-- // Returns true if this block is empty - contains only single free suballocation.
-- virtual bool IsEmpty() const = 0;
-- virtual void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) = 0;
-- virtual VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const = 0;
-- virtual void* GetAllocationUserData(VmaAllocHandle allocHandle) const = 0;
--
-- virtual VmaAllocHandle GetAllocationListBegin() const = 0;
-- virtual VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const = 0;
-- virtual VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const = 0;
--
-- // Shouldn't modify blockCount.
-- virtual void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const = 0;
-- virtual void AddStatistics(VmaStatistics& inoutStats) const = 0;
--
--#if VMA_STATS_STRING_ENABLED
-- virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
--#endif
--
-- // Tries to find a place for suballocation with given parameters inside this block.
-- // If succeeded, fills pAllocationRequest and returns true.
-- // If failed, returns false.
-- virtual bool CreateAllocationRequest(
-- VkDeviceSize allocSize,
-- VkDeviceSize allocAlignment,
-- bool upperAddress,
-- VmaSuballocationType allocType,
-- // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
-- uint32_t strategy,
-- VmaAllocationRequest* pAllocationRequest) = 0;
--
-- virtual VkResult CheckCorruption(const void* pBlockData) = 0;
--
-- // Makes actual allocation based on request. Request must already be checked and valid.
-- virtual void Alloc(
-- const VmaAllocationRequest& request,
-- VmaSuballocationType type,
-- void* userData) = 0;
--
-- // Frees suballocation assigned to given memory region.
-- virtual void Free(VmaAllocHandle allocHandle) = 0;
--
-- // Frees all allocations.
-- // Careful! Don't call it if there are VmaAllocation objects owned by userData of cleared allocations!
-- virtual void Clear() = 0;
--
-- virtual void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) = 0;
-- virtual void DebugLogAllAllocations() const = 0;
--
--protected:
-- const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
-- VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
-- VkDeviceSize GetDebugMargin() const { return VkDeviceSize(IsVirtual() ? 0 : VMA_DEBUG_MARGIN); }
--
-- void DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const;
--#if VMA_STATS_STRING_ENABLED
-- // mapRefCount == UINT32_MAX means unspecified.
-- void PrintDetailedMap_Begin(class VmaJsonWriter& json,
-- VkDeviceSize unusedBytes,
-- size_t allocationCount,
-- size_t unusedRangeCount) const;
-- void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
-- VkDeviceSize offset, VkDeviceSize size, void* userData) const;
-- void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
-- VkDeviceSize offset,
-- VkDeviceSize size) const;
-- void PrintDetailedMap_End(class VmaJsonWriter& json) const;
--#endif
--
--private:
-- VkDeviceSize m_Size;
-- const VkAllocationCallbacks* m_pAllocationCallbacks;
-- const VkDeviceSize m_BufferImageGranularity;
-- const bool m_IsVirtual;
--};
--
--#ifndef _VMA_BLOCK_METADATA_FUNCTIONS
--VmaBlockMetadata::VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks,
-- VkDeviceSize bufferImageGranularity, bool isVirtual)
-- : m_Size(0),
-- m_pAllocationCallbacks(pAllocationCallbacks),
-- m_BufferImageGranularity(bufferImageGranularity),
-- m_IsVirtual(isVirtual) {}
--
--void VmaBlockMetadata::DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const
--{
-- if (IsVirtual())
-- {
-- VMA_DEBUG_LOG_FORMAT("UNFREED VIRTUAL ALLOCATION; Offset: %llu; Size: %llu; UserData: %p", offset, size, userData);
-- }
-- else
-- {
-- VMA_ASSERT(userData != VMA_NULL);
-- VmaAllocation allocation = reinterpret_cast<VmaAllocation>(userData);
--
-- userData = allocation->GetUserData();
-- const char* name = allocation->GetName();
--
--#if VMA_STATS_STRING_ENABLED
-- VMA_DEBUG_LOG_FORMAT("UNFREED ALLOCATION; Offset: %llu; Size: %llu; UserData: %p; Name: %s; Type: %s; Usage: %u",
-- offset, size, userData, name ? name : "vma_empty",
-- VMA_SUBALLOCATION_TYPE_NAMES[allocation->GetSuballocationType()],
-- allocation->GetBufferImageUsage());
--#else
-- VMA_DEBUG_LOG_FORMAT("UNFREED ALLOCATION; Offset: %llu; Size: %llu; UserData: %p; Name: %s; Type: %u",
-- offset, size, userData, name ? name : "vma_empty",
-- (uint32_t)allocation->GetSuballocationType());
--#endif // VMA_STATS_STRING_ENABLED
-- }
--
--}
--
--#if VMA_STATS_STRING_ENABLED
--void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
-- VkDeviceSize unusedBytes, size_t allocationCount, size_t unusedRangeCount) const
--{
-- json.WriteString("TotalBytes");
-- json.WriteNumber(GetSize());
--
-- json.WriteString("UnusedBytes");
-- json.WriteNumber(unusedBytes);
--
-- json.WriteString("Allocations");
-- json.WriteNumber((uint64_t)allocationCount);
--
-- json.WriteString("UnusedRanges");
-- json.WriteNumber((uint64_t)unusedRangeCount);
--
-- json.WriteString("Suballocations");
-- json.BeginArray();
--}
--
--void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
-- VkDeviceSize offset, VkDeviceSize size, void* userData) const
--{
-- json.BeginObject(true);
--
-- json.WriteString("Offset");
-- json.WriteNumber(offset);
--
-- if (IsVirtual())
-- {
-- json.WriteString("Size");
-- json.WriteNumber(size);
-- if (userData)
-- {
-- json.WriteString("CustomData");
-- json.BeginString();
-- json.ContinueString_Pointer(userData);
-- json.EndString();
-- }
-- }
-- else
-- {
-- ((VmaAllocation)userData)->PrintParameters(json);
-- }
--
-- json.EndObject();
--}
--
--void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
-- VkDeviceSize offset, VkDeviceSize size) const
--{
-- json.BeginObject(true);
--
-- json.WriteString("Offset");
-- json.WriteNumber(offset);
--
-- json.WriteString("Type");
-- json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
--
-- json.WriteString("Size");
-- json.WriteNumber(size);
--
-- json.EndObject();
--}
--
--void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
--{
-- json.EndArray();
--}
--#endif // VMA_STATS_STRING_ENABLED
--#endif // _VMA_BLOCK_METADATA_FUNCTIONS
--#endif // _VMA_BLOCK_METADATA
--
--#ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY
--// Before deleting object of this class remember to call 'Destroy()'
--class VmaBlockBufferImageGranularity final
--{
--public:
-- struct ValidationContext
-- {
-- const VkAllocationCallbacks* allocCallbacks;
-- uint16_t* pageAllocs;
-- };
--
-- VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity);
-- ~VmaBlockBufferImageGranularity();
--
-- bool IsEnabled() const { return m_BufferImageGranularity > MAX_LOW_BUFFER_IMAGE_GRANULARITY; }
--
-- void Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size);
-- // Before destroying object you must call free it's memory
-- void Destroy(const VkAllocationCallbacks* pAllocationCallbacks);
--
-- void RoundupAllocRequest(VmaSuballocationType allocType,
-- VkDeviceSize& inOutAllocSize,
-- VkDeviceSize& inOutAllocAlignment) const;
--
-- bool CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset,
-- VkDeviceSize allocSize,
-- VkDeviceSize blockOffset,
-- VkDeviceSize blockSize,
-- VmaSuballocationType allocType) const;
--
-- void AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size);
-- void FreePages(VkDeviceSize offset, VkDeviceSize size);
-- void Clear();
--
-- ValidationContext StartValidation(const VkAllocationCallbacks* pAllocationCallbacks,
-- bool isVirutal) const;
-- bool Validate(ValidationContext& ctx, VkDeviceSize offset, VkDeviceSize size) const;
-- bool FinishValidation(ValidationContext& ctx) const;
--
--private:
-- static const uint16_t MAX_LOW_BUFFER_IMAGE_GRANULARITY = 256;
--
-- struct RegionInfo
-- {
-- uint8_t allocType;
-- uint16_t allocCount;
-- };
--
-- VkDeviceSize m_BufferImageGranularity;
-- uint32_t m_RegionCount;
-- RegionInfo* m_RegionInfo;
--
-- uint32_t GetStartPage(VkDeviceSize offset) const { return OffsetToPageIndex(offset & ~(m_BufferImageGranularity - 1)); }
-- uint32_t GetEndPage(VkDeviceSize offset, VkDeviceSize size) const { return OffsetToPageIndex((offset + size - 1) & ~(m_BufferImageGranularity - 1)); }
--
-- uint32_t OffsetToPageIndex(VkDeviceSize offset) const;
-- void AllocPage(RegionInfo& page, uint8_t allocType);
--};
--
--#ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS
--VmaBlockBufferImageGranularity::VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity)
-- : m_BufferImageGranularity(bufferImageGranularity),
-- m_RegionCount(0),
-- m_RegionInfo(VMA_NULL) {}
--
--VmaBlockBufferImageGranularity::~VmaBlockBufferImageGranularity()
--{
-- VMA_ASSERT(m_RegionInfo == VMA_NULL && "Free not called before destroying object!");
--}
--
--void VmaBlockBufferImageGranularity::Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size)
--{
-- if (IsEnabled())
-- {
-- m_RegionCount = static_cast<uint32_t>(VmaDivideRoundingUp(size, m_BufferImageGranularity));
-- m_RegionInfo = vma_new_array(pAllocationCallbacks, RegionInfo, m_RegionCount);
-- memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo));
-- }
--}
--
--void VmaBlockBufferImageGranularity::Destroy(const VkAllocationCallbacks* pAllocationCallbacks)
--{
-- if (m_RegionInfo)
-- {
-- vma_delete_array(pAllocationCallbacks, m_RegionInfo, m_RegionCount);
-- m_RegionInfo = VMA_NULL;
-- }
--}
--
--void VmaBlockBufferImageGranularity::RoundupAllocRequest(VmaSuballocationType allocType,
-- VkDeviceSize& inOutAllocSize,
-- VkDeviceSize& inOutAllocAlignment) const
--{
-- if (m_BufferImageGranularity > 1 &&
-- m_BufferImageGranularity <= MAX_LOW_BUFFER_IMAGE_GRANULARITY)
-- {
-- if (allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
-- allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
-- allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
-- {
-- inOutAllocAlignment = VMA_MAX(inOutAllocAlignment, m_BufferImageGranularity);
-- inOutAllocSize = VmaAlignUp(inOutAllocSize, m_BufferImageGranularity);
-- }
-- }
--}
--
--bool VmaBlockBufferImageGranularity::CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset,
-- VkDeviceSize allocSize,
-- VkDeviceSize blockOffset,
-- VkDeviceSize blockSize,
-- VmaSuballocationType allocType) const
--{
-- if (IsEnabled())
-- {
-- uint32_t startPage = GetStartPage(inOutAllocOffset);
-- if (m_RegionInfo[startPage].allocCount > 0 &&
-- VmaIsBufferImageGranularityConflict(static_cast<VmaSuballocationType>(m_RegionInfo[startPage].allocType), allocType))
-- {
-- inOutAllocOffset = VmaAlignUp(inOutAllocOffset, m_BufferImageGranularity);
-- if (blockSize < allocSize + inOutAllocOffset - blockOffset)
-- return true;
-- ++startPage;
-- }
-- uint32_t endPage = GetEndPage(inOutAllocOffset, allocSize);
-- if (endPage != startPage &&
-- m_RegionInfo[endPage].allocCount > 0 &&
-- VmaIsBufferImageGranularityConflict(static_cast<VmaSuballocationType>(m_RegionInfo[endPage].allocType), allocType))
-- {
-- return true;
-- }
-- }
-- return false;
--}
--
--void VmaBlockBufferImageGranularity::AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size)
--{
-- if (IsEnabled())
-- {
-- uint32_t startPage = GetStartPage(offset);
-- AllocPage(m_RegionInfo[startPage], allocType);
--
-- uint32_t endPage = GetEndPage(offset, size);
-- if (startPage != endPage)
-- AllocPage(m_RegionInfo[endPage], allocType);
-- }
--}
--
--void VmaBlockBufferImageGranularity::FreePages(VkDeviceSize offset, VkDeviceSize size)
--{
-- if (IsEnabled())
-- {
-- uint32_t startPage = GetStartPage(offset);
-- --m_RegionInfo[startPage].allocCount;
-- if (m_RegionInfo[startPage].allocCount == 0)
-- m_RegionInfo[startPage].allocType = VMA_SUBALLOCATION_TYPE_FREE;
-- uint32_t endPage = GetEndPage(offset, size);
-- if (startPage != endPage)
-- {
-- --m_RegionInfo[endPage].allocCount;
-- if (m_RegionInfo[endPage].allocCount == 0)
-- m_RegionInfo[endPage].allocType = VMA_SUBALLOCATION_TYPE_FREE;
-- }
-- }
--}
--
--void VmaBlockBufferImageGranularity::Clear()
--{
-- if (m_RegionInfo)
-- memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo));
--}
--
--VmaBlockBufferImageGranularity::ValidationContext VmaBlockBufferImageGranularity::StartValidation(
-- const VkAllocationCallbacks* pAllocationCallbacks, bool isVirutal) const
--{
-- ValidationContext ctx{ pAllocationCallbacks, VMA_NULL };
-- if (!isVirutal && IsEnabled())
-- {
-- ctx.pageAllocs = vma_new_array(pAllocationCallbacks, uint16_t, m_RegionCount);
-- memset(ctx.pageAllocs, 0, m_RegionCount * sizeof(uint16_t));
-- }
-- return ctx;
--}
--
--bool VmaBlockBufferImageGranularity::Validate(ValidationContext& ctx,
-- VkDeviceSize offset, VkDeviceSize size) const
--{
-- if (IsEnabled())
-- {
-- uint32_t start = GetStartPage(offset);
-- ++ctx.pageAllocs[start];
-- VMA_VALIDATE(m_RegionInfo[start].allocCount > 0);
--
-- uint32_t end = GetEndPage(offset, size);
-- if (start != end)
-- {
-- ++ctx.pageAllocs[end];
-- VMA_VALIDATE(m_RegionInfo[end].allocCount > 0);
-- }
-- }
-- return true;
--}
--
--bool VmaBlockBufferImageGranularity::FinishValidation(ValidationContext& ctx) const
--{
-- // Check proper page structure
-- if (IsEnabled())
-- {
-- VMA_ASSERT(ctx.pageAllocs != VMA_NULL && "Validation context not initialized!");
--
-- for (uint32_t page = 0; page < m_RegionCount; ++page)
-- {
-- VMA_VALIDATE(ctx.pageAllocs[page] == m_RegionInfo[page].allocCount);
-- }
-- vma_delete_array(ctx.allocCallbacks, ctx.pageAllocs, m_RegionCount);
-- ctx.pageAllocs = VMA_NULL;
-- }
-- return true;
--}
--
--uint32_t VmaBlockBufferImageGranularity::OffsetToPageIndex(VkDeviceSize offset) const
--{
-- return static_cast<uint32_t>(offset >> VMA_BITSCAN_MSB(m_BufferImageGranularity));
--}
--
--void VmaBlockBufferImageGranularity::AllocPage(RegionInfo& page, uint8_t allocType)
--{
-- // When current alloc type is free then it can be overridden by new type
-- if (page.allocCount == 0 || (page.allocCount > 0 && page.allocType == VMA_SUBALLOCATION_TYPE_FREE))
-- page.allocType = allocType;
--
-- ++page.allocCount;
--}
--#endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS
--#endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY
--
--#if 0
--#ifndef _VMA_BLOCK_METADATA_GENERIC
--class VmaBlockMetadata_Generic : public VmaBlockMetadata
--{
-- friend class VmaDefragmentationAlgorithm_Generic;
-- friend class VmaDefragmentationAlgorithm_Fast;
-- VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_Generic)
--public:
-- VmaBlockMetadata_Generic(const VkAllocationCallbacks* pAllocationCallbacks,
-- VkDeviceSize bufferImageGranularity, bool isVirtual);
-- virtual ~VmaBlockMetadata_Generic() = default;
--
-- size_t GetAllocationCount() const override { return m_Suballocations.size() - m_FreeCount; }
-- VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize; }
-- bool IsEmpty() const override { return (m_Suballocations.size() == 1) && (m_FreeCount == 1); }
-- void Free(VmaAllocHandle allocHandle) override { FreeSuballocation(FindAtOffset((VkDeviceSize)allocHandle - 1)); }
-- VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; }
--
-- void Init(VkDeviceSize size) override;
-- bool Validate() const override;
--
-- void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override;
-- void AddStatistics(VmaStatistics& inoutStats) const override;
--
--#if VMA_STATS_STRING_ENABLED
-- void PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const override;
--#endif
--
-- bool CreateAllocationRequest(
-- VkDeviceSize allocSize,
-- VkDeviceSize allocAlignment,
-- bool upperAddress,
-- VmaSuballocationType allocType,
-- uint32_t strategy,
-- VmaAllocationRequest* pAllocationRequest) override;
--
-- VkResult CheckCorruption(const void* pBlockData) override;
--
-- void Alloc(
-- const VmaAllocationRequest& request,
-- VmaSuballocationType type,
-- void* userData) override;
--
-- void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override;
-- void* GetAllocationUserData(VmaAllocHandle allocHandle) const override;
-- VmaAllocHandle GetAllocationListBegin() const override;
-- VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override;
-- void Clear() override;
-- void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override;
-- void DebugLogAllAllocations() const override;
--
--private:
-- uint32_t m_FreeCount;
-- VkDeviceSize m_SumFreeSize;
-- VmaSuballocationList m_Suballocations;
-- // Suballocations that are free. Sorted by size, ascending.
-- VmaVector<VmaSuballocationList::iterator, VmaStlAllocator<VmaSuballocationList::iterator>> m_FreeSuballocationsBySize;
--
-- VkDeviceSize AlignAllocationSize(VkDeviceSize size) const { return IsVirtual() ? size : VmaAlignUp(size, (VkDeviceSize)16); }
--
-- VmaSuballocationList::iterator FindAtOffset(VkDeviceSize offset) const;
-- bool ValidateFreeSuballocationList() const;
--
-- // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
-- // If yes, fills pOffset and returns true. If no, returns false.
-- bool CheckAllocation(
-- VkDeviceSize allocSize,
-- VkDeviceSize allocAlignment,
-- VmaSuballocationType allocType,
-- VmaSuballocationList::const_iterator suballocItem,
-- VmaAllocHandle* pAllocHandle) const;
--
-- // Given free suballocation, it merges it with following one, which must also be free.
-- void MergeFreeWithNext(VmaSuballocationList::iterator item);
-- // Releases given suballocation, making it free.
-- // Merges it with adjacent free suballocations if applicable.
-- // Returns iterator to new free suballocation at this place.
-- VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
-- // Given free suballocation, it inserts it into sorted list of
-- // m_FreeSuballocationsBySize if it is suitable.
-- void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
-- // Given free suballocation, it removes it from sorted list of
-- // m_FreeSuballocationsBySize if it is suitable.
-- void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
--};
--
--#ifndef _VMA_BLOCK_METADATA_GENERIC_FUNCTIONS
--VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(const VkAllocationCallbacks* pAllocationCallbacks,
-- VkDeviceSize bufferImageGranularity, bool isVirtual)
-- : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual),
-- m_FreeCount(0),
-- m_SumFreeSize(0),
-- m_Suballocations(VmaStlAllocator<VmaSuballocation>(pAllocationCallbacks)),
-- m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(pAllocationCallbacks)) {}
--
--void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
--{
-- VmaBlockMetadata::Init(size);
--
-- m_FreeCount = 1;
-- m_SumFreeSize = size;
--
-- VmaSuballocation suballoc = {};
-- suballoc.offset = 0;
-- suballoc.size = size;
-- suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
--
-- m_Suballocations.push_back(suballoc);
-- m_FreeSuballocationsBySize.push_back(m_Suballocations.begin());
--}
--
--bool VmaBlockMetadata_Generic::Validate() const
--{
-- VMA_VALIDATE(!m_Suballocations.empty());
--
-- // Expected offset of new suballocation as calculated from previous ones.
-- VkDeviceSize calculatedOffset = 0;
-- // Expected number of free suballocations as calculated from traversing their list.
-- uint32_t calculatedFreeCount = 0;
-- // Expected sum size of free suballocations as calculated from traversing their list.
-- VkDeviceSize calculatedSumFreeSize = 0;
-- // Expected number of free suballocations that should be registered in
-- // m_FreeSuballocationsBySize calculated from traversing their list.
-- size_t freeSuballocationsToRegister = 0;
-- // True if previous visited suballocation was free.
-- bool prevFree = false;
--
-- const VkDeviceSize debugMargin = GetDebugMargin();
--
-- for (const auto& subAlloc : m_Suballocations)
-- {
-- // Actual offset of this suballocation doesn't match expected one.
-- VMA_VALIDATE(subAlloc.offset == calculatedOffset);
--
-- const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
-- // Two adjacent free suballocations are invalid. They should be merged.
-- VMA_VALIDATE(!prevFree || !currFree);
--
-- VmaAllocation alloc = (VmaAllocation)subAlloc.userData;
-- if (!IsVirtual())
-- {
-- VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));
-- }
--
-- if (currFree)
-- {
-- calculatedSumFreeSize += subAlloc.size;
-- ++calculatedFreeCount;
-- ++freeSuballocationsToRegister;
--
-- // Margin required between allocations - every free space must be at least that large.
-- VMA_VALIDATE(subAlloc.size >= debugMargin);
-- }
-- else
-- {
-- if (!IsVirtual())
-- {
-- VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == subAlloc.offset + 1);
-- VMA_VALIDATE(alloc->GetSize() == subAlloc.size);
-- }
--
-- // Margin required between allocations - previous allocation must be free.
-- VMA_VALIDATE(debugMargin == 0 || prevFree);
-- }
--
-- calculatedOffset += subAlloc.size;
-- prevFree = currFree;
-- }
--
-- // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
-- // match expected one.
-- VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
--
-- VkDeviceSize lastSize = 0;
-- for (size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
-- {
-- VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
--
-- // Only free suballocations can be registered in m_FreeSuballocationsBySize.
-- VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
-- // They must be sorted by size ascending.
-- VMA_VALIDATE(suballocItem->size >= lastSize);
--
-- lastSize = suballocItem->size;
-- }
--
-- // Check if totals match calculated values.
-- VMA_VALIDATE(ValidateFreeSuballocationList());
-- VMA_VALIDATE(calculatedOffset == GetSize());
-- VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
-- VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
--
-- return true;
--}
--
--void VmaBlockMetadata_Generic::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const
--{
-- const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
-- inoutStats.statistics.blockCount++;
-- inoutStats.statistics.blockBytes += GetSize();
--
-- for (const auto& suballoc : m_Suballocations)
-- {
-- if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
-- VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);
-- else
-- VmaAddDetailedStatisticsUnusedRange(inoutStats, suballoc.size);
-- }
--}
--
--void VmaBlockMetadata_Generic::AddStatistics(VmaStatistics& inoutStats) const
--{
-- inoutStats.blockCount++;
-- inoutStats.allocationCount += (uint32_t)m_Suballocations.size() - m_FreeCount;
-- inoutStats.blockBytes += GetSize();
-- inoutStats.allocationBytes += GetSize() - m_SumFreeSize;
--}
--
--#if VMA_STATS_STRING_ENABLED
--void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const
--{
-- PrintDetailedMap_Begin(json,
-- m_SumFreeSize, // unusedBytes
-- m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
-- m_FreeCount, // unusedRangeCount
-- mapRefCount);
--
-- for (const auto& suballoc : m_Suballocations)
-- {
-- if (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
-- {
-- PrintDetailedMap_UnusedRange(json, suballoc.offset, suballoc.size);
-- }
-- else
-- {
-- PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);
-- }
-- }
--
-- PrintDetailedMap_End(json);
--}
--#endif // VMA_STATS_STRING_ENABLED
--
--bool VmaBlockMetadata_Generic::CreateAllocationRequest(
-- VkDeviceSize allocSize,
-- VkDeviceSize allocAlignment,
-- bool upperAddress,
-- VmaSuballocationType allocType,
-- uint32_t strategy,
-- VmaAllocationRequest* pAllocationRequest)
--{
-- VMA_ASSERT(allocSize > 0);
-- VMA_ASSERT(!upperAddress);
-- VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
-- VMA_ASSERT(pAllocationRequest != VMA_NULL);
-- VMA_HEAVY_ASSERT(Validate());
--
-- allocSize = AlignAllocationSize(allocSize);
--
-- pAllocationRequest->type = VmaAllocationRequestType::Normal;
-- pAllocationRequest->size = allocSize;
--
-- const VkDeviceSize debugMargin = GetDebugMargin();
--
-- // There is not enough total free space in this block to fulfill the request: Early return.
-- if (m_SumFreeSize < allocSize + debugMargin)
-- {
-- return false;
-- }
--
-- // New algorithm, efficiently searching freeSuballocationsBySize.
-- const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
-- if (freeSuballocCount > 0)
-- {
-- if (strategy == 0 ||
-- strategy == VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT)
-- {
-- // Find first free suballocation with size not less than allocSize + debugMargin.
-- VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
-- m_FreeSuballocationsBySize.data(),
-- m_FreeSuballocationsBySize.data() + freeSuballocCount,
-- allocSize + debugMargin,
-- VmaSuballocationItemSizeLess());
-- size_t index = it - m_FreeSuballocationsBySize.data();
-- for (; index < freeSuballocCount; ++index)
-- {
-- if (CheckAllocation(
-- allocSize,
-- allocAlignment,
-- allocType,
-- m_FreeSuballocationsBySize[index],
-- &pAllocationRequest->allocHandle))
-- {
-- pAllocationRequest->item = m_FreeSuballocationsBySize[index];
-- return true;
-- }
-- }
-- }
-- else if (strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
-- {
-- for (VmaSuballocationList::iterator it = m_Suballocations.begin();
-- it != m_Suballocations.end();
-- ++it)
-- {
-- if (it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
-- allocSize,
-- allocAlignment,
-- allocType,
-- it,
-- &pAllocationRequest->allocHandle))
-- {
-- pAllocationRequest->item = it;
-- return true;
-- }
-- }
-- }
-- else
-- {
-- VMA_ASSERT(strategy & (VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT | VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT ));
-- // Search staring from biggest suballocations.
-- for (size_t index = freeSuballocCount; index--; )
-- {
-- if (CheckAllocation(
-- allocSize,
-- allocAlignment,
-- allocType,
-- m_FreeSuballocationsBySize[index],
-- &pAllocationRequest->allocHandle))
-- {
-- pAllocationRequest->item = m_FreeSuballocationsBySize[index];
-- return true;
-- }
-- }
-- }
-- }
--
-- return false;
--}
--
--VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
--{
-- for (auto& suballoc : m_Suballocations)
-- {
-- if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
-- {
-- if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
-- {
-- VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
-- return VK_ERROR_UNKNOWN_COPY;
-- }
-- }
-- }
--
-- return VK_SUCCESS;
--}
--
--void VmaBlockMetadata_Generic::Alloc(
-- const VmaAllocationRequest& request,
-- VmaSuballocationType type,
-- void* userData)
--{
-- VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
-- VMA_ASSERT(request.item != m_Suballocations.end());
-- VmaSuballocation& suballoc = *request.item;
-- // Given suballocation is a free block.
-- VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
--
-- // Given offset is inside this suballocation.
-- VMA_ASSERT((VkDeviceSize)request.allocHandle - 1 >= suballoc.offset);
-- const VkDeviceSize paddingBegin = (VkDeviceSize)request.allocHandle - suballoc.offset - 1;
-- VMA_ASSERT(suballoc.size >= paddingBegin + request.size);
-- const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - request.size;
--
-- // Unregister this free suballocation from m_FreeSuballocationsBySize and update
-- // it to become used.
-- UnregisterFreeSuballocation(request.item);
--
-- suballoc.offset = (VkDeviceSize)request.allocHandle - 1;
-- suballoc.size = request.size;
-- suballoc.type = type;
-- suballoc.userData = userData;
--
-- // If there are any free bytes remaining at the end, insert new free suballocation after current one.
-- if (paddingEnd)
-- {
-- VmaSuballocation paddingSuballoc = {};
-- paddingSuballoc.offset = suballoc.offset + suballoc.size;
-- paddingSuballoc.size = paddingEnd;
-- paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
-- VmaSuballocationList::iterator next = request.item;
-- ++next;
-- const VmaSuballocationList::iterator paddingEndItem =
-- m_Suballocations.insert(next, paddingSuballoc);
-- RegisterFreeSuballocation(paddingEndItem);
-- }
--
-- // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
-- if (paddingBegin)
-- {
-- VmaSuballocation paddingSuballoc = {};
-- paddingSuballoc.offset = suballoc.offset - paddingBegin;
-- paddingSuballoc.size = paddingBegin;
-- paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
-- const VmaSuballocationList::iterator paddingBeginItem =
-- m_Suballocations.insert(request.item, paddingSuballoc);
-- RegisterFreeSuballocation(paddingBeginItem);
-- }
--
-- // Update totals.
-- m_FreeCount = m_FreeCount - 1;
-- if (paddingBegin > 0)
-- {
-- ++m_FreeCount;
-- }
-- if (paddingEnd > 0)
-- {
-- ++m_FreeCount;
-- }
-- m_SumFreeSize -= request.size;
--}
--
--void VmaBlockMetadata_Generic::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo)
--{
-- outInfo.offset = (VkDeviceSize)allocHandle - 1;
-- const VmaSuballocation& suballoc = *FindAtOffset(outInfo.offset);
-- outInfo.size = suballoc.size;
-- outInfo.pUserData = suballoc.userData;
--}
--
--void* VmaBlockMetadata_Generic::GetAllocationUserData(VmaAllocHandle allocHandle) const
--{
-- return FindAtOffset((VkDeviceSize)allocHandle - 1)->userData;
--}
--
--VmaAllocHandle VmaBlockMetadata_Generic::GetAllocationListBegin() const
--{
-- if (IsEmpty())
-- return VK_NULL_HANDLE;
--
-- for (const auto& suballoc : m_Suballocations)
-- {
-- if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
-- return (VmaAllocHandle)(suballoc.offset + 1);
-- }
-- VMA_ASSERT(false && "Should contain at least 1 allocation!");
-- return VK_NULL_HANDLE;
--}
--
--VmaAllocHandle VmaBlockMetadata_Generic::GetNextAllocation(VmaAllocHandle prevAlloc) const
--{
-- VmaSuballocationList::const_iterator prev = FindAtOffset((VkDeviceSize)prevAlloc - 1);
--
-- for (VmaSuballocationList::const_iterator it = ++prev; it != m_Suballocations.end(); ++it)
-- {
-- if (it->type != VMA_SUBALLOCATION_TYPE_FREE)
-- return (VmaAllocHandle)(it->offset + 1);
-- }
-- return VK_NULL_HANDLE;
--}
--
--void VmaBlockMetadata_Generic::Clear()
--{
-- const VkDeviceSize size = GetSize();
--
-- VMA_ASSERT(IsVirtual());
-- m_FreeCount = 1;
-- m_SumFreeSize = size;
-- m_Suballocations.clear();
-- m_FreeSuballocationsBySize.clear();
--
-- VmaSuballocation suballoc = {};
-- suballoc.offset = 0;
-- suballoc.size = size;
-- suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
-- m_Suballocations.push_back(suballoc);
--
-- m_FreeSuballocationsBySize.push_back(m_Suballocations.begin());
--}
--
--void VmaBlockMetadata_Generic::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData)
--{
-- VmaSuballocation& suballoc = *FindAtOffset((VkDeviceSize)allocHandle - 1);
-- suballoc.userData = userData;
--}
--
--void VmaBlockMetadata_Generic::DebugLogAllAllocations() const
--{
-- for (const auto& suballoc : m_Suballocations)
-- {
-- if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
-- DebugLogAllocation(suballoc.offset, suballoc.size, suballoc.userData);
-- }
--}
--
--VmaSuballocationList::iterator VmaBlockMetadata_Generic::FindAtOffset(VkDeviceSize offset) const
--{
-- VMA_HEAVY_ASSERT(!m_Suballocations.empty());
-- const VkDeviceSize last = m_Suballocations.rbegin()->offset;
-- if (last == offset)
-- return m_Suballocations.rbegin().drop_const();
-- const VkDeviceSize first = m_Suballocations.begin()->offset;
-- if (first == offset)
-- return m_Suballocations.begin().drop_const();
--
-- const size_t suballocCount = m_Suballocations.size();
-- const VkDeviceSize step = (last - first + m_Suballocations.begin()->size) / suballocCount;
-- auto findSuballocation = [&](auto begin, auto end) -> VmaSuballocationList::iterator
-- {
-- for (auto suballocItem = begin;
-- suballocItem != end;
-- ++suballocItem)
-- {
-- if (suballocItem->offset == offset)
-- return suballocItem.drop_const();
-- }
-- VMA_ASSERT(false && "Not found!");
-- return m_Suballocations.end().drop_const();
-- };
-- // If requested offset is closer to the end of range, search from the end
-- if (offset - first > suballocCount * step / 2)
-- {
-- return findSuballocation(m_Suballocations.rbegin(), m_Suballocations.rend());
-- }
-- return findSuballocation(m_Suballocations.begin(), m_Suballocations.end());
--}
--
--bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
--{
-- VkDeviceSize lastSize = 0;
-- for (size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
-- {
-- const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
--
-- VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
-- VMA_VALIDATE(it->size >= lastSize);
-- lastSize = it->size;
-- }
-- return true;
--}
--
--bool VmaBlockMetadata_Generic::CheckAllocation(
-- VkDeviceSize allocSize,
-- VkDeviceSize allocAlignment,
-- VmaSuballocationType allocType,
-- VmaSuballocationList::const_iterator suballocItem,
-- VmaAllocHandle* pAllocHandle) const
--{
-- VMA_ASSERT(allocSize > 0);
-- VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
-- VMA_ASSERT(suballocItem != m_Suballocations.cend());
-- VMA_ASSERT(pAllocHandle != VMA_NULL);
--
-- const VkDeviceSize debugMargin = GetDebugMargin();
-- const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity();
--
-- const VmaSuballocation& suballoc = *suballocItem;
-- VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
--
-- // Size of this suballocation is too small for this request: Early return.
-- if (suballoc.size < allocSize)
-- {
-- return false;
-- }
--
-- // Start from offset equal to beginning of this suballocation.
-- VkDeviceSize offset = suballoc.offset + (suballocItem == m_Suballocations.cbegin() ? 0 : GetDebugMargin());
--
-- // Apply debugMargin from the end of previous alloc.
-- if (debugMargin > 0)
-- {
-- offset += debugMargin;
-- }
--
-- // Apply alignment.
-- offset = VmaAlignUp(offset, allocAlignment);
--
-- // Check previous suballocations for BufferImageGranularity conflicts.
-- // Make bigger alignment if necessary.
-- if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment)
-- {
-- bool bufferImageGranularityConflict = false;
-- VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
-- while (prevSuballocItem != m_Suballocations.cbegin())
-- {
-- --prevSuballocItem;
-- const VmaSuballocation& prevSuballoc = *prevSuballocItem;
-- if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, offset, bufferImageGranularity))
-- {
-- if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
-- {
-- bufferImageGranularityConflict = true;
-- break;
-- }
-- }
-- else
-- // Already on previous page.
-- break;
-- }
-- if (bufferImageGranularityConflict)
-- {
-- offset = VmaAlignUp(offset, bufferImageGranularity);
-- }
-- }
--
-- // Calculate padding at the beginning based on current offset.
-- const VkDeviceSize paddingBegin = offset - suballoc.offset;
--
-- // Fail if requested size plus margin after is bigger than size of this suballocation.
-- if (paddingBegin + allocSize + debugMargin > suballoc.size)
-- {
-- return false;
-- }
--
-- // Check next suballocations for BufferImageGranularity conflicts.
-- // If conflict exists, allocation cannot be made here.
-- if (allocSize % bufferImageGranularity || offset % bufferImageGranularity)
-- {
-- VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
-- ++nextSuballocItem;
-- while (nextSuballocItem != m_Suballocations.cend())
-- {
-- const VmaSuballocation& nextSuballoc = *nextSuballocItem;
-- if (VmaBlocksOnSamePage(offset, allocSize, nextSuballoc.offset, bufferImageGranularity))
-- {
-- if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
-- {
-- return false;
-- }
-- }
-- else
-- {
-- // Already on next page.
-- break;
-- }
-- ++nextSuballocItem;
-- }
-- }
--
-- *pAllocHandle = (VmaAllocHandle)(offset + 1);
-- // All tests passed: Success. pAllocHandle is already filled.
-- return true;
--}
--
--void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
--{
-- VMA_ASSERT(item != m_Suballocations.end());
-- VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
--
-- VmaSuballocationList::iterator nextItem = item;
-- ++nextItem;
-- VMA_ASSERT(nextItem != m_Suballocations.end());
-- VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
--
-- item->size += nextItem->size;
-- --m_FreeCount;
-- m_Suballocations.erase(nextItem);
--}
--
--VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
--{
-- // Change this suballocation to be marked as free.
-- VmaSuballocation& suballoc = *suballocItem;
-- suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
-- suballoc.userData = VMA_NULL;
--
-- // Update totals.
-- ++m_FreeCount;
-- m_SumFreeSize += suballoc.size;
--
-- // Merge with previous and/or next suballocation if it's also free.
-- bool mergeWithNext = false;
-- bool mergeWithPrev = false;
--
-- VmaSuballocationList::iterator nextItem = suballocItem;
-- ++nextItem;
-- if ((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
-- {
-- mergeWithNext = true;
-- }
--
-- VmaSuballocationList::iterator prevItem = suballocItem;
-- if (suballocItem != m_Suballocations.begin())
-- {
-- --prevItem;
-- if (prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
-- {
-- mergeWithPrev = true;
-- }
-- }
--
-- if (mergeWithNext)
-- {
-- UnregisterFreeSuballocation(nextItem);
-- MergeFreeWithNext(suballocItem);
-- }
--
-- if (mergeWithPrev)
-- {
-- UnregisterFreeSuballocation(prevItem);
-- MergeFreeWithNext(prevItem);
-- RegisterFreeSuballocation(prevItem);
-- return prevItem;
-- }
-- else
-- {
-- RegisterFreeSuballocation(suballocItem);
-- return suballocItem;
-- }
--}
--
--void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
--{
-- VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
-- VMA_ASSERT(item->size > 0);
--
-- // You may want to enable this validation at the beginning or at the end of
-- // this function, depending on what do you want to check.
-- VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
--
-- if (m_FreeSuballocationsBySize.empty())
-- {
-- m_FreeSuballocationsBySize.push_back(item);
-- }
-- else
-- {
-- VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
-- }
--
-- //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
--}
--
--void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
--{
-- VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
-- VMA_ASSERT(item->size > 0);
--
-- // You may want to enable this validation at the beginning or at the end of
-- // this function, depending on what do you want to check.
-- VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
--
-- VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
-- m_FreeSuballocationsBySize.data(),
-- m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
-- item,
-- VmaSuballocationItemSizeLess());
-- for (size_t index = it - m_FreeSuballocationsBySize.data();
-- index < m_FreeSuballocationsBySize.size();
-- ++index)
-- {
-- if (m_FreeSuballocationsBySize[index] == item)
-- {
-- VmaVectorRemove(m_FreeSuballocationsBySize, index);
-- return;
-- }
-- VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
-- }
-- VMA_ASSERT(0 && "Not found.");
--
-- //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
--}
--#endif // _VMA_BLOCK_METADATA_GENERIC_FUNCTIONS
--#endif // _VMA_BLOCK_METADATA_GENERIC
--#endif // #if 0
--
--#ifndef _VMA_BLOCK_METADATA_LINEAR
--/*
--Allocations and their references in internal data structure look like this:
--
--if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
--
-- 0 +-------+
-- | |
-- | |
-- | |
-- +-------+
-- | Alloc | 1st[m_1stNullItemsBeginCount]
-- +-------+
-- | Alloc | 1st[m_1stNullItemsBeginCount + 1]
-- +-------+
-- | ... |
-- +-------+
-- | Alloc | 1st[1st.size() - 1]
-- +-------+
-- | |
-- | |
-- | |
--GetSize() +-------+
--
--if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
--
-- 0 +-------+
-- | Alloc | 2nd[0]
-- +-------+
-- | Alloc | 2nd[1]
-- +-------+
-- | ... |
-- +-------+
-- | Alloc | 2nd[2nd.size() - 1]
-- +-------+
-- | |
-- | |
-- | |
-- +-------+
-- | Alloc | 1st[m_1stNullItemsBeginCount]
-- +-------+
-- | Alloc | 1st[m_1stNullItemsBeginCount + 1]
-- +-------+
-- | ... |
-- +-------+
-- | Alloc | 1st[1st.size() - 1]
-- +-------+
-- | |
--GetSize() +-------+
--
--if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
--
-- 0 +-------+
-- | |
-- | |
-- | |
-- +-------+
-- | Alloc | 1st[m_1stNullItemsBeginCount]
-- +-------+
-- | Alloc | 1st[m_1stNullItemsBeginCount + 1]
-- +-------+
-- | ... |
-- +-------+
-- | Alloc | 1st[1st.size() - 1]
-- +-------+
-- | |
-- | |
-- | |
-- +-------+
-- | Alloc | 2nd[2nd.size() - 1]
-- +-------+
-- | ... |
-- +-------+
-- | Alloc | 2nd[1]
-- +-------+
-- | Alloc | 2nd[0]
--GetSize() +-------+
--
--*/
--class VmaBlockMetadata_Linear : public VmaBlockMetadata
--{
-- VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_Linear)
--public:
-- VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks,
-- VkDeviceSize bufferImageGranularity, bool isVirtual);
-- virtual ~VmaBlockMetadata_Linear() = default;
--
-- VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize; }
-- bool IsEmpty() const override { return GetAllocationCount() == 0; }
-- VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; }
--
-- void Init(VkDeviceSize size) override;
-- bool Validate() const override;
-- size_t GetAllocationCount() const override;
-- size_t GetFreeRegionsCount() const override;
--
-- void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override;
-- void AddStatistics(VmaStatistics& inoutStats) const override;
--
--#if VMA_STATS_STRING_ENABLED
-- void PrintDetailedMap(class VmaJsonWriter& json) const override;
--#endif
--
-- bool CreateAllocationRequest(
-- VkDeviceSize allocSize,
-- VkDeviceSize allocAlignment,
-- bool upperAddress,
-- VmaSuballocationType allocType,
-- uint32_t strategy,
-- VmaAllocationRequest* pAllocationRequest) override;
--
-- VkResult CheckCorruption(const void* pBlockData) override;
--
-- void Alloc(
-- const VmaAllocationRequest& request,
-- VmaSuballocationType type,
-- void* userData) override;
--
-- void Free(VmaAllocHandle allocHandle) override;
-- void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override;
-- void* GetAllocationUserData(VmaAllocHandle allocHandle) const override;
-- VmaAllocHandle GetAllocationListBegin() const override;
-- VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override;
-- VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override;
-- void Clear() override;
-- void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override;
-- void DebugLogAllAllocations() const override;
--
--private:
-- /*
-- There are two suballocation vectors, used in ping-pong way.
-- The one with index m_1stVectorIndex is called 1st.
-- The one with index (m_1stVectorIndex ^ 1) is called 2nd.
-- 2nd can be non-empty only when 1st is not empty.
-- When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
-- */
-- typedef VmaVector<VmaSuballocation, VmaStlAllocator<VmaSuballocation>> SuballocationVectorType;
--
-- enum SECOND_VECTOR_MODE
-- {
-- SECOND_VECTOR_EMPTY,
-- /*
-- Suballocations in 2nd vector are created later than the ones in 1st, but they
-- all have smaller offset.
-- */
-- SECOND_VECTOR_RING_BUFFER,
-- /*
-- Suballocations in 2nd vector are upper side of double stack.
-- They all have offsets higher than those in 1st vector.
-- Top of this stack means smaller offsets, but higher indices in this vector.
-- */
-- SECOND_VECTOR_DOUBLE_STACK,
-- };
--
-- VkDeviceSize m_SumFreeSize;
-- SuballocationVectorType m_Suballocations0, m_Suballocations1;
-- uint32_t m_1stVectorIndex;
-- SECOND_VECTOR_MODE m_2ndVectorMode;
-- // Number of items in 1st vector with hAllocation = null at the beginning.
-- size_t m_1stNullItemsBeginCount;
-- // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
-- size_t m_1stNullItemsMiddleCount;
-- // Number of items in 2nd vector with hAllocation = null.
-- size_t m_2ndNullItemsCount;
--
-- SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
-- SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
-- const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
-- const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
--
-- VmaSuballocation& FindSuballocation(VkDeviceSize offset) const;
-- bool ShouldCompact1st() const;
-- void CleanupAfterFree();
--
-- bool CreateAllocationRequest_LowerAddress(
-- VkDeviceSize allocSize,
-- VkDeviceSize allocAlignment,
-- VmaSuballocationType allocType,
-- uint32_t strategy,
-- VmaAllocationRequest* pAllocationRequest);
-- bool CreateAllocationRequest_UpperAddress(
-- VkDeviceSize allocSize,
-- VkDeviceSize allocAlignment,
-- VmaSuballocationType allocType,
-- uint32_t strategy,
-- VmaAllocationRequest* pAllocationRequest);
--};
--
--#ifndef _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS
--VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks,
-- VkDeviceSize bufferImageGranularity, bool isVirtual)
-- : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual),
-- m_SumFreeSize(0),
-- m_Suballocations0(VmaStlAllocator<VmaSuballocation>(pAllocationCallbacks)),
-- m_Suballocations1(VmaStlAllocator<VmaSuballocation>(pAllocationCallbacks)),
-- m_1stVectorIndex(0),
-- m_2ndVectorMode(SECOND_VECTOR_EMPTY),
-- m_1stNullItemsBeginCount(0),
-- m_1stNullItemsMiddleCount(0),
-- m_2ndNullItemsCount(0) {}
--
--void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
--{
-- VmaBlockMetadata::Init(size);
-- m_SumFreeSize = size;
--}
--
--bool VmaBlockMetadata_Linear::Validate() const
--{
-- const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-- const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
--
-- VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
-- VMA_VALIDATE(!suballocations1st.empty() ||
-- suballocations2nd.empty() ||
-- m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
--
-- if (!suballocations1st.empty())
-- {
-- // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
-- VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].type != VMA_SUBALLOCATION_TYPE_FREE);
-- // Null item at the end should be just pop_back().
-- VMA_VALIDATE(suballocations1st.back().type != VMA_SUBALLOCATION_TYPE_FREE);
-- }
-- if (!suballocations2nd.empty())
-- {
-- // Null item at the end should be just pop_back().
-- VMA_VALIDATE(suballocations2nd.back().type != VMA_SUBALLOCATION_TYPE_FREE);
-- }
--
-- VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
-- VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
--
-- VkDeviceSize sumUsedSize = 0;
-- const size_t suballoc1stCount = suballocations1st.size();
-- const VkDeviceSize debugMargin = GetDebugMargin();
-- VkDeviceSize offset = 0;
--
-- if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-- {
-- const size_t suballoc2ndCount = suballocations2nd.size();
-- size_t nullItem2ndCount = 0;
-- for (size_t i = 0; i < suballoc2ndCount; ++i)
-- {
-- const VmaSuballocation& suballoc = suballocations2nd[i];
-- const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
--
-- VmaAllocation const alloc = (VmaAllocation)suballoc.userData;
-- if (!IsVirtual())
-- {
-- VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));
-- }
-- VMA_VALIDATE(suballoc.offset >= offset);
--
-- if (!currFree)
-- {
-- if (!IsVirtual())
-- {
-- VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1);
-- VMA_VALIDATE(alloc->GetSize() == suballoc.size);
-- }
-- sumUsedSize += suballoc.size;
-- }
-- else
-- {
-- ++nullItem2ndCount;
-- }
--
-- offset = suballoc.offset + suballoc.size + debugMargin;
-- }
--
-- VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
-- }
--
-- for (size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
-- {
-- const VmaSuballocation& suballoc = suballocations1st[i];
-- VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
-- suballoc.userData == VMA_NULL);
-- }
--
-- size_t nullItem1stCount = m_1stNullItemsBeginCount;
--
-- for (size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
-- {
-- const VmaSuballocation& suballoc = suballocations1st[i];
-- const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
--
-- VmaAllocation const alloc = (VmaAllocation)suballoc.userData;
-- if (!IsVirtual())
-- {
-- VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));
-- }
-- VMA_VALIDATE(suballoc.offset >= offset);
-- VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
--
-- if (!currFree)
-- {
-- if (!IsVirtual())
-- {
-- VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1);
-- VMA_VALIDATE(alloc->GetSize() == suballoc.size);
-- }
-- sumUsedSize += suballoc.size;
-- }
-- else
-- {
-- ++nullItem1stCount;
-- }
--
-- offset = suballoc.offset + suballoc.size + debugMargin;
-- }
-- VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
--
-- if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
-- {
-- const size_t suballoc2ndCount = suballocations2nd.size();
-- size_t nullItem2ndCount = 0;
-- for (size_t i = suballoc2ndCount; i--; )
-- {
-- const VmaSuballocation& suballoc = suballocations2nd[i];
-- const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
--
-- VmaAllocation const alloc = (VmaAllocation)suballoc.userData;
-- if (!IsVirtual())
-- {
-- VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));
-- }
-- VMA_VALIDATE(suballoc.offset >= offset);
--
-- if (!currFree)
-- {
-- if (!IsVirtual())
-- {
-- VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1);
-- VMA_VALIDATE(alloc->GetSize() == suballoc.size);
-- }
-- sumUsedSize += suballoc.size;
-- }
-- else
-- {
-- ++nullItem2ndCount;
-- }
--
-- offset = suballoc.offset + suballoc.size + debugMargin;
-- }
--
-- VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
-- }
--
-- VMA_VALIDATE(offset <= GetSize());
-- VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
--
-- return true;
--}
--
--size_t VmaBlockMetadata_Linear::GetAllocationCount() const
--{
-- return AccessSuballocations1st().size() - m_1stNullItemsBeginCount - m_1stNullItemsMiddleCount +
-- AccessSuballocations2nd().size() - m_2ndNullItemsCount;
--}
--
--size_t VmaBlockMetadata_Linear::GetFreeRegionsCount() const
--{
-- // Function only used for defragmentation, which is disabled for this algorithm
-- VMA_ASSERT(0);
-- return SIZE_MAX;
--}
--
--void VmaBlockMetadata_Linear::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const
--{
-- const VkDeviceSize size = GetSize();
-- const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-- const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-- const size_t suballoc1stCount = suballocations1st.size();
-- const size_t suballoc2ndCount = suballocations2nd.size();
--
-- inoutStats.statistics.blockCount++;
-- inoutStats.statistics.blockBytes += size;
--
-- VkDeviceSize lastOffset = 0;
--
-- if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-- {
-- const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
-- size_t nextAlloc2ndIndex = 0;
-- while (lastOffset < freeSpace2ndTo1stEnd)
-- {
-- // Find next non-null allocation or move nextAllocIndex to the end.
-- while (nextAlloc2ndIndex < suballoc2ndCount &&
-- suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
-- {
-- ++nextAlloc2ndIndex;
-- }
--
-- // Found non-null allocation.
-- if (nextAlloc2ndIndex < suballoc2ndCount)
-- {
-- const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
--
-- // 1. Process free space before this allocation.
-- if (lastOffset < suballoc.offset)
-- {
-- // There is free space from lastOffset to suballoc.offset.
-- const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
-- VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
-- }
--
-- // 2. Process this allocation.
-- // There is allocation with suballoc.offset, suballoc.size.
-- VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);
--
-- // 3. Prepare for next iteration.
-- lastOffset = suballoc.offset + suballoc.size;
-- ++nextAlloc2ndIndex;
-- }
-- // We are at the end.
-- else
-- {
-- // There is free space from lastOffset to freeSpace2ndTo1stEnd.
-- if (lastOffset < freeSpace2ndTo1stEnd)
-- {
-- const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
-- VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
-- }
--
-- // End of loop.
-- lastOffset = freeSpace2ndTo1stEnd;
-- }
-- }
-- }
--
-- size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
-- const VkDeviceSize freeSpace1stTo2ndEnd =
-- m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
-- while (lastOffset < freeSpace1stTo2ndEnd)
-- {
-- // Find next non-null allocation or move nextAllocIndex to the end.
-- while (nextAlloc1stIndex < suballoc1stCount &&
-- suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)
-- {
-- ++nextAlloc1stIndex;
-- }
--
-- // Found non-null allocation.
-- if (nextAlloc1stIndex < suballoc1stCount)
-- {
-- const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
--
-- // 1. Process free space before this allocation.
-- if (lastOffset < suballoc.offset)
-- {
-- // There is free space from lastOffset to suballoc.offset.
-- const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
-- VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
-- }
--
-- // 2. Process this allocation.
-- // There is allocation with suballoc.offset, suballoc.size.
-- VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);
--
-- // 3. Prepare for next iteration.
-- lastOffset = suballoc.offset + suballoc.size;
-- ++nextAlloc1stIndex;
-- }
-- // We are at the end.
-- else
-- {
-- // There is free space from lastOffset to freeSpace1stTo2ndEnd.
-- if (lastOffset < freeSpace1stTo2ndEnd)
-- {
-- const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
-- VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
-- }
--
-- // End of loop.
-- lastOffset = freeSpace1stTo2ndEnd;
-- }
-- }
--
-- if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
-- {
-- size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
-- while (lastOffset < size)
-- {
-- // Find next non-null allocation or move nextAllocIndex to the end.
-- while (nextAlloc2ndIndex != SIZE_MAX &&
-- suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
-- {
-- --nextAlloc2ndIndex;
-- }
--
-- // Found non-null allocation.
-- if (nextAlloc2ndIndex != SIZE_MAX)
-- {
-- const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
--
-- // 1. Process free space before this allocation.
-- if (lastOffset < suballoc.offset)
-- {
-- // There is free space from lastOffset to suballoc.offset.
-- const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
-- VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
-- }
--
-- // 2. Process this allocation.
-- // There is allocation with suballoc.offset, suballoc.size.
-- VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);
--
-- // 3. Prepare for next iteration.
-- lastOffset = suballoc.offset + suballoc.size;
-- --nextAlloc2ndIndex;
-- }
-- // We are at the end.
-- else
-- {
-- // There is free space from lastOffset to size.
-- if (lastOffset < size)
-- {
-- const VkDeviceSize unusedRangeSize = size - lastOffset;
-- VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
-- }
--
-- // End of loop.
-- lastOffset = size;
-- }
-- }
-- }
--}
--
--void VmaBlockMetadata_Linear::AddStatistics(VmaStatistics& inoutStats) const
--{
-- const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-- const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-- const VkDeviceSize size = GetSize();
-- const size_t suballoc1stCount = suballocations1st.size();
-- const size_t suballoc2ndCount = suballocations2nd.size();
--
-- inoutStats.blockCount++;
-- inoutStats.blockBytes += size;
-- inoutStats.allocationBytes += size - m_SumFreeSize;
--
-- VkDeviceSize lastOffset = 0;
--
-- if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-- {
-- const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
-- size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
-- while (lastOffset < freeSpace2ndTo1stEnd)
-- {
-- // Find next non-null allocation or move nextAlloc2ndIndex to the end.
-- while (nextAlloc2ndIndex < suballoc2ndCount &&
-- suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
-- {
-- ++nextAlloc2ndIndex;
-- }
--
-- // Found non-null allocation.
-- if (nextAlloc2ndIndex < suballoc2ndCount)
-- {
-- const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
--
-- // Process this allocation.
-- // There is allocation with suballoc.offset, suballoc.size.
-- ++inoutStats.allocationCount;
--
-- // Prepare for next iteration.
-- lastOffset = suballoc.offset + suballoc.size;
-- ++nextAlloc2ndIndex;
-- }
-- // We are at the end.
-- else
-- {
-- // End of loop.
-- lastOffset = freeSpace2ndTo1stEnd;
-- }
-- }
-- }
--
-- size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
-- const VkDeviceSize freeSpace1stTo2ndEnd =
-- m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
-- while (lastOffset < freeSpace1stTo2ndEnd)
-- {
-- // Find next non-null allocation or move nextAllocIndex to the end.
-- while (nextAlloc1stIndex < suballoc1stCount &&
-- suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)
-- {
-- ++nextAlloc1stIndex;
-- }
--
-- // Found non-null allocation.
-- if (nextAlloc1stIndex < suballoc1stCount)
-- {
-- const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
--
-- // Process this allocation.
-- // There is allocation with suballoc.offset, suballoc.size.
-- ++inoutStats.allocationCount;
--
-- // Prepare for next iteration.
-- lastOffset = suballoc.offset + suballoc.size;
-- ++nextAlloc1stIndex;
-- }
-- // We are at the end.
-- else
-- {
-- // End of loop.
-- lastOffset = freeSpace1stTo2ndEnd;
-- }
-- }
--
-- if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
-- {
-- size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
-- while (lastOffset < size)
-- {
-- // Find next non-null allocation or move nextAlloc2ndIndex to the end.
-- while (nextAlloc2ndIndex != SIZE_MAX &&
-- suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
-- {
-- --nextAlloc2ndIndex;
-- }
--
-- // Found non-null allocation.
-- if (nextAlloc2ndIndex != SIZE_MAX)
-- {
-- const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
--
-- // Process this allocation.
-- // There is allocation with suballoc.offset, suballoc.size.
-- ++inoutStats.allocationCount;
--
-- // Prepare for next iteration.
-- lastOffset = suballoc.offset + suballoc.size;
-- --nextAlloc2ndIndex;
-- }
-- // We are at the end.
-- else
-- {
-- // End of loop.
-- lastOffset = size;
-- }
-- }
-- }
--}
--
--#if VMA_STATS_STRING_ENABLED
--void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
--{
-- const VkDeviceSize size = GetSize();
-- const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-- const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-- const size_t suballoc1stCount = suballocations1st.size();
-- const size_t suballoc2ndCount = suballocations2nd.size();
--
-- // FIRST PASS
--
-- size_t unusedRangeCount = 0;
-- VkDeviceSize usedBytes = 0;
--
-- VkDeviceSize lastOffset = 0;
--
-- size_t alloc2ndCount = 0;
-- if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-- {
-- const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
-- size_t nextAlloc2ndIndex = 0;
-- while (lastOffset < freeSpace2ndTo1stEnd)
-- {
-- // Find next non-null allocation or move nextAlloc2ndIndex to the end.
-- while (nextAlloc2ndIndex < suballoc2ndCount &&
-- suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
-- {
-- ++nextAlloc2ndIndex;
-- }
--
-- // Found non-null allocation.
-- if (nextAlloc2ndIndex < suballoc2ndCount)
-- {
-- const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
--
-- // 1. Process free space before this allocation.
-- if (lastOffset < suballoc.offset)
-- {
-- // There is free space from lastOffset to suballoc.offset.
-- ++unusedRangeCount;
-- }
--
-- // 2. Process this allocation.
-- // There is allocation with suballoc.offset, suballoc.size.
-- ++alloc2ndCount;
-- usedBytes += suballoc.size;
--
-- // 3. Prepare for next iteration.
-- lastOffset = suballoc.offset + suballoc.size;
-- ++nextAlloc2ndIndex;
-- }
-- // We are at the end.
-- else
-- {
-- if (lastOffset < freeSpace2ndTo1stEnd)
-- {
-- // There is free space from lastOffset to freeSpace2ndTo1stEnd.
-- ++unusedRangeCount;
-- }
--
-- // End of loop.
-- lastOffset = freeSpace2ndTo1stEnd;
-- }
-- }
-- }
--
-- size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
-- size_t alloc1stCount = 0;
-- const VkDeviceSize freeSpace1stTo2ndEnd =
-- m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
-- while (lastOffset < freeSpace1stTo2ndEnd)
-- {
-- // Find next non-null allocation or move nextAllocIndex to the end.
-- while (nextAlloc1stIndex < suballoc1stCount &&
-- suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)
-- {
-- ++nextAlloc1stIndex;
-- }
--
-- // Found non-null allocation.
-- if (nextAlloc1stIndex < suballoc1stCount)
-- {
-- const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
--
-- // 1. Process free space before this allocation.
-- if (lastOffset < suballoc.offset)
-- {
-- // There is free space from lastOffset to suballoc.offset.
-- ++unusedRangeCount;
-- }
--
-- // 2. Process this allocation.
-- // There is allocation with suballoc.offset, suballoc.size.
-- ++alloc1stCount;
-- usedBytes += suballoc.size;
--
-- // 3. Prepare for next iteration.
-- lastOffset = suballoc.offset + suballoc.size;
-- ++nextAlloc1stIndex;
-- }
-- // We are at the end.
-- else
-- {
-- if (lastOffset < size)
-- {
-- // There is free space from lastOffset to freeSpace1stTo2ndEnd.
-- ++unusedRangeCount;
-- }
--
-- // End of loop.
-- lastOffset = freeSpace1stTo2ndEnd;
-- }
-- }
--
-- if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
-- {
-- size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
-- while (lastOffset < size)
-- {
-- // Find next non-null allocation or move nextAlloc2ndIndex to the end.
-- while (nextAlloc2ndIndex != SIZE_MAX &&
-- suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
-- {
-- --nextAlloc2ndIndex;
-- }
--
-- // Found non-null allocation.
-- if (nextAlloc2ndIndex != SIZE_MAX)
-- {
-- const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
--
-- // 1. Process free space before this allocation.
-- if (lastOffset < suballoc.offset)
-- {
-- // There is free space from lastOffset to suballoc.offset.
-- ++unusedRangeCount;
-- }
--
-- // 2. Process this allocation.
-- // There is allocation with suballoc.offset, suballoc.size.
-- ++alloc2ndCount;
-- usedBytes += suballoc.size;
--
-- // 3. Prepare for next iteration.
-- lastOffset = suballoc.offset + suballoc.size;
-- --nextAlloc2ndIndex;
-- }
-- // We are at the end.
-- else
-- {
-- if (lastOffset < size)
-- {
-- // There is free space from lastOffset to size.
-- ++unusedRangeCount;
-- }
--
-- // End of loop.
-- lastOffset = size;
-- }
-- }
-- }
--
-- const VkDeviceSize unusedBytes = size - usedBytes;
-- PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
--
-- // SECOND PASS
-- lastOffset = 0;
--
-- if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-- {
-- const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
-- size_t nextAlloc2ndIndex = 0;
-- while (lastOffset < freeSpace2ndTo1stEnd)
-- {
-- // Find next non-null allocation or move nextAlloc2ndIndex to the end.
-- while (nextAlloc2ndIndex < suballoc2ndCount &&
-- suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
-- {
-- ++nextAlloc2ndIndex;
-- }
--
-- // Found non-null allocation.
-- if (nextAlloc2ndIndex < suballoc2ndCount)
-- {
-- const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
--
-- // 1. Process free space before this allocation.
-- if (lastOffset < suballoc.offset)
-- {
-- // There is free space from lastOffset to suballoc.offset.
-- const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
-- PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
-- }
--
-- // 2. Process this allocation.
-- // There is allocation with suballoc.offset, suballoc.size.
-- PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);
--
-- // 3. Prepare for next iteration.
-- lastOffset = suballoc.offset + suballoc.size;
-- ++nextAlloc2ndIndex;
-- }
-- // We are at the end.
-- else
-- {
-- if (lastOffset < freeSpace2ndTo1stEnd)
-- {
-- // There is free space from lastOffset to freeSpace2ndTo1stEnd.
-- const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
-- PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
-- }
--
-- // End of loop.
-- lastOffset = freeSpace2ndTo1stEnd;
-- }
-- }
-- }
--
-- nextAlloc1stIndex = m_1stNullItemsBeginCount;
-- while (lastOffset < freeSpace1stTo2ndEnd)
-- {
-- // Find next non-null allocation or move nextAllocIndex to the end.
-- while (nextAlloc1stIndex < suballoc1stCount &&
-- suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)
-- {
-- ++nextAlloc1stIndex;
-- }
--
-- // Found non-null allocation.
-- if (nextAlloc1stIndex < suballoc1stCount)
-- {
-- const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
--
-- // 1. Process free space before this allocation.
-- if (lastOffset < suballoc.offset)
-- {
-- // There is free space from lastOffset to suballoc.offset.
-- const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
-- PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
-- }
--
-- // 2. Process this allocation.
-- // There is allocation with suballoc.offset, suballoc.size.
-- PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);
--
-- // 3. Prepare for next iteration.
-- lastOffset = suballoc.offset + suballoc.size;
-- ++nextAlloc1stIndex;
-- }
-- // We are at the end.
-- else
-- {
-- if (lastOffset < freeSpace1stTo2ndEnd)
-- {
-- // There is free space from lastOffset to freeSpace1stTo2ndEnd.
-- const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
-- PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
-- }
--
-- // End of loop.
-- lastOffset = freeSpace1stTo2ndEnd;
-- }
-- }
--
-- if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
-- {
-- size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
-- while (lastOffset < size)
-- {
-- // Find next non-null allocation or move nextAlloc2ndIndex to the end.
-- while (nextAlloc2ndIndex != SIZE_MAX &&
-- suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
-- {
-- --nextAlloc2ndIndex;
-- }
--
-- // Found non-null allocation.
-- if (nextAlloc2ndIndex != SIZE_MAX)
-- {
-- const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
--
-- // 1. Process free space before this allocation.
-- if (lastOffset < suballoc.offset)
-- {
-- // There is free space from lastOffset to suballoc.offset.
-- const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
-- PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
-- }
--
-- // 2. Process this allocation.
-- // There is allocation with suballoc.offset, suballoc.size.
-- PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);
--
-- // 3. Prepare for next iteration.
-- lastOffset = suballoc.offset + suballoc.size;
-- --nextAlloc2ndIndex;
-- }
-- // We are at the end.
-- else
-- {
-- if (lastOffset < size)
-- {
-- // There is free space from lastOffset to size.
-- const VkDeviceSize unusedRangeSize = size - lastOffset;
-- PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
-- }
--
-- // End of loop.
-- lastOffset = size;
-- }
-- }
-- }
--
-- PrintDetailedMap_End(json);
--}
--#endif // VMA_STATS_STRING_ENABLED
--
--bool VmaBlockMetadata_Linear::CreateAllocationRequest(
-- VkDeviceSize allocSize,
-- VkDeviceSize allocAlignment,
-- bool upperAddress,
-- VmaSuballocationType allocType,
-- uint32_t strategy,
-- VmaAllocationRequest* pAllocationRequest)
--{
-- VMA_ASSERT(allocSize > 0);
-- VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
-- VMA_ASSERT(pAllocationRequest != VMA_NULL);
-- VMA_HEAVY_ASSERT(Validate());
-- pAllocationRequest->size = allocSize;
-- return upperAddress ?
-- CreateAllocationRequest_UpperAddress(
-- allocSize, allocAlignment, allocType, strategy, pAllocationRequest) :
-- CreateAllocationRequest_LowerAddress(
-- allocSize, allocAlignment, allocType, strategy, pAllocationRequest);
--}
--
--VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
--{
-- VMA_ASSERT(!IsVirtual());
-- SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-- for (size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
-- {
-- const VmaSuballocation& suballoc = suballocations1st[i];
-- if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
-- {
-- if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
-- {
-- VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
-- return VK_ERROR_UNKNOWN_COPY;
-- }
-- }
-- }
--
-- SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-- for (size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
-- {
-- const VmaSuballocation& suballoc = suballocations2nd[i];
-- if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
-- {
-- if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
-- {
-- VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
-- return VK_ERROR_UNKNOWN_COPY;
-- }
-- }
-- }
--
-- return VK_SUCCESS;
--}
--
--void VmaBlockMetadata_Linear::Alloc(
-- const VmaAllocationRequest& request,
-- VmaSuballocationType type,
-- void* userData)
--{
-- const VkDeviceSize offset = (VkDeviceSize)request.allocHandle - 1;
-- const VmaSuballocation newSuballoc = { offset, request.size, userData, type };
--
-- switch (request.type)
-- {
-- case VmaAllocationRequestType::UpperAddress:
-- {
-- VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
-- "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
-- SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-- suballocations2nd.push_back(newSuballoc);
-- m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
-- }
-- break;
-- case VmaAllocationRequestType::EndOf1st:
-- {
-- SuballocationVectorType& suballocations1st = AccessSuballocations1st();
--
-- VMA_ASSERT(suballocations1st.empty() ||
-- offset >= suballocations1st.back().offset + suballocations1st.back().size);
-- // Check if it fits before the end of the block.
-- VMA_ASSERT(offset + request.size <= GetSize());
--
-- suballocations1st.push_back(newSuballoc);
-- }
-- break;
-- case VmaAllocationRequestType::EndOf2nd:
-- {
-- SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-- // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
-- VMA_ASSERT(!suballocations1st.empty() &&
-- offset + request.size <= suballocations1st[m_1stNullItemsBeginCount].offset);
-- SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
--
-- switch (m_2ndVectorMode)
-- {
-- case SECOND_VECTOR_EMPTY:
-- // First allocation from second part ring buffer.
-- VMA_ASSERT(suballocations2nd.empty());
-- m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
-- break;
-- case SECOND_VECTOR_RING_BUFFER:
-- // 2-part ring buffer is already started.
-- VMA_ASSERT(!suballocations2nd.empty());
-- break;
-- case SECOND_VECTOR_DOUBLE_STACK:
-- VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
-- break;
-- default:
-- VMA_ASSERT(0);
-- }
--
-- suballocations2nd.push_back(newSuballoc);
-- }
-- break;
-- default:
-- VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
-- }
--
-- m_SumFreeSize -= newSuballoc.size;
--}
--
--void VmaBlockMetadata_Linear::Free(VmaAllocHandle allocHandle)
--{
-- SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-- SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-- VkDeviceSize offset = (VkDeviceSize)allocHandle - 1;
--
-- if (!suballocations1st.empty())
-- {
-- // First allocation: Mark it as next empty at the beginning.
-- VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
-- if (firstSuballoc.offset == offset)
-- {
-- firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
-- firstSuballoc.userData = VMA_NULL;
-- m_SumFreeSize += firstSuballoc.size;
-- ++m_1stNullItemsBeginCount;
-- CleanupAfterFree();
-- return;
-- }
-- }
--
-- // Last allocation in 2-part ring buffer or top of upper stack (same logic).
-- if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
-- m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
-- {
-- VmaSuballocation& lastSuballoc = suballocations2nd.back();
-- if (lastSuballoc.offset == offset)
-- {
-- m_SumFreeSize += lastSuballoc.size;
-- suballocations2nd.pop_back();
-- CleanupAfterFree();
-- return;
-- }
-- }
-- // Last allocation in 1st vector.
-- else if (m_2ndVectorMode == SECOND_VECTOR_EMPTY)
-- {
-- VmaSuballocation& lastSuballoc = suballocations1st.back();
-- if (lastSuballoc.offset == offset)
-- {
-- m_SumFreeSize += lastSuballoc.size;
-- suballocations1st.pop_back();
-- CleanupAfterFree();
-- return;
-- }
-- }
--
-- VmaSuballocation refSuballoc;
-- refSuballoc.offset = offset;
-- // Rest of members stays uninitialized intentionally for better performance.
--
-- // Item from the middle of 1st vector.
-- {
-- const SuballocationVectorType::iterator it = VmaBinaryFindSorted(
-- suballocations1st.begin() + m_1stNullItemsBeginCount,
-- suballocations1st.end(),
-- refSuballoc,
-- VmaSuballocationOffsetLess());
-- if (it != suballocations1st.end())
-- {
-- it->type = VMA_SUBALLOCATION_TYPE_FREE;
-- it->userData = VMA_NULL;
-- ++m_1stNullItemsMiddleCount;
-- m_SumFreeSize += it->size;
-- CleanupAfterFree();
-- return;
-- }
-- }
--
-- if (m_2ndVectorMode != SECOND_VECTOR_EMPTY)
-- {
-- // Item from the middle of 2nd vector.
-- const SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
-- VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
-- VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
-- if (it != suballocations2nd.end())
-- {
-- it->type = VMA_SUBALLOCATION_TYPE_FREE;
-- it->userData = VMA_NULL;
-- ++m_2ndNullItemsCount;
-- m_SumFreeSize += it->size;
-- CleanupAfterFree();
-- return;
-- }
-- }
--
-- VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
--}
--
--void VmaBlockMetadata_Linear::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo)
--{
-- outInfo.offset = (VkDeviceSize)allocHandle - 1;
-- VmaSuballocation& suballoc = FindSuballocation(outInfo.offset);
-- outInfo.size = suballoc.size;
-- outInfo.pUserData = suballoc.userData;
--}
--
--void* VmaBlockMetadata_Linear::GetAllocationUserData(VmaAllocHandle allocHandle) const
--{
-- return FindSuballocation((VkDeviceSize)allocHandle - 1).userData;
--}
--
--VmaAllocHandle VmaBlockMetadata_Linear::GetAllocationListBegin() const
--{
-- // Function only used for defragmentation, which is disabled for this algorithm
-- VMA_ASSERT(0);
-- return VK_NULL_HANDLE;
--}
--
--VmaAllocHandle VmaBlockMetadata_Linear::GetNextAllocation(VmaAllocHandle prevAlloc) const
--{
-- // Function only used for defragmentation, which is disabled for this algorithm
-- VMA_ASSERT(0);
-- return VK_NULL_HANDLE;
--}
--
--VkDeviceSize VmaBlockMetadata_Linear::GetNextFreeRegionSize(VmaAllocHandle alloc) const
--{
-- // Function only used for defragmentation, which is disabled for this algorithm
-- VMA_ASSERT(0);
-- return 0;
--}
--
--void VmaBlockMetadata_Linear::Clear()
--{
-- m_SumFreeSize = GetSize();
-- m_Suballocations0.clear();
-- m_Suballocations1.clear();
-- // Leaving m_1stVectorIndex unchanged - it doesn't matter.
-- m_2ndVectorMode = SECOND_VECTOR_EMPTY;
-- m_1stNullItemsBeginCount = 0;
-- m_1stNullItemsMiddleCount = 0;
-- m_2ndNullItemsCount = 0;
--}
--
--void VmaBlockMetadata_Linear::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData)
--{
-- VmaSuballocation& suballoc = FindSuballocation((VkDeviceSize)allocHandle - 1);
-- suballoc.userData = userData;
--}
--
--void VmaBlockMetadata_Linear::DebugLogAllAllocations() const
--{
-- const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-- for (auto it = suballocations1st.begin() + m_1stNullItemsBeginCount; it != suballocations1st.end(); ++it)
-- if (it->type != VMA_SUBALLOCATION_TYPE_FREE)
-- DebugLogAllocation(it->offset, it->size, it->userData);
--
-- const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-- for (auto it = suballocations2nd.begin(); it != suballocations2nd.end(); ++it)
-- if (it->type != VMA_SUBALLOCATION_TYPE_FREE)
-- DebugLogAllocation(it->offset, it->size, it->userData);
--}
--
--VmaSuballocation& VmaBlockMetadata_Linear::FindSuballocation(VkDeviceSize offset) const
--{
-- const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-- const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
--
-- VmaSuballocation refSuballoc;
-- refSuballoc.offset = offset;
-- // Rest of members stays uninitialized intentionally for better performance.
--
-- // Item from the 1st vector.
-- {
-- SuballocationVectorType::const_iterator it = VmaBinaryFindSorted(
-- suballocations1st.begin() + m_1stNullItemsBeginCount,
-- suballocations1st.end(),
-- refSuballoc,
-- VmaSuballocationOffsetLess());
-- if (it != suballocations1st.end())
-- {
-- return const_cast<VmaSuballocation&>(*it);
-- }
-- }
--
-- if (m_2ndVectorMode != SECOND_VECTOR_EMPTY)
-- {
-- // Rest of members stays uninitialized intentionally for better performance.
-- SuballocationVectorType::const_iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
-- VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
-- VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
-- if (it != suballocations2nd.end())
-- {
-- return const_cast<VmaSuballocation&>(*it);
-- }
-- }
--
-- VMA_ASSERT(0 && "Allocation not found in linear allocator!");
-- return const_cast<VmaSuballocation&>(suballocations1st.back()); // Should never occur.
--}
--
--bool VmaBlockMetadata_Linear::ShouldCompact1st() const
--{
-- const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
-- const size_t suballocCount = AccessSuballocations1st().size();
-- return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
--}
--
--void VmaBlockMetadata_Linear::CleanupAfterFree()
--{
-- SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-- SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
--
-- if (IsEmpty())
-- {
-- suballocations1st.clear();
-- suballocations2nd.clear();
-- m_1stNullItemsBeginCount = 0;
-- m_1stNullItemsMiddleCount = 0;
-- m_2ndNullItemsCount = 0;
-- m_2ndVectorMode = SECOND_VECTOR_EMPTY;
-- }
-- else
-- {
-- const size_t suballoc1stCount = suballocations1st.size();
-- const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
-- VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
--
-- // Find more null items at the beginning of 1st vector.
-- while (m_1stNullItemsBeginCount < suballoc1stCount &&
-- suballocations1st[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE)
-- {
-- ++m_1stNullItemsBeginCount;
-- --m_1stNullItemsMiddleCount;
-- }
--
-- // Find more null items at the end of 1st vector.
-- while (m_1stNullItemsMiddleCount > 0 &&
-- suballocations1st.back().type == VMA_SUBALLOCATION_TYPE_FREE)
-- {
-- --m_1stNullItemsMiddleCount;
-- suballocations1st.pop_back();
-- }
--
-- // Find more null items at the end of 2nd vector.
-- while (m_2ndNullItemsCount > 0 &&
-- suballocations2nd.back().type == VMA_SUBALLOCATION_TYPE_FREE)
-- {
-- --m_2ndNullItemsCount;
-- suballocations2nd.pop_back();
-- }
--
-- // Find more null items at the beginning of 2nd vector.
-- while (m_2ndNullItemsCount > 0 &&
-- suballocations2nd[0].type == VMA_SUBALLOCATION_TYPE_FREE)
-- {
-- --m_2ndNullItemsCount;
-- VmaVectorRemove(suballocations2nd, 0);
-- }
--
-- if (ShouldCompact1st())
-- {
-- const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
-- size_t srcIndex = m_1stNullItemsBeginCount;
-- for (size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
-- {
-- while (suballocations1st[srcIndex].type == VMA_SUBALLOCATION_TYPE_FREE)
-- {
-- ++srcIndex;
-- }
-- if (dstIndex != srcIndex)
-- {
-- suballocations1st[dstIndex] = suballocations1st[srcIndex];
-- }
-- ++srcIndex;
-- }
-- suballocations1st.resize(nonNullItemCount);
-- m_1stNullItemsBeginCount = 0;
-- m_1stNullItemsMiddleCount = 0;
-- }
--
-- // 2nd vector became empty.
-- if (suballocations2nd.empty())
-- {
-- m_2ndVectorMode = SECOND_VECTOR_EMPTY;
-- }
--
-- // 1st vector became empty.
-- if (suballocations1st.size() - m_1stNullItemsBeginCount == 0)
-- {
-- suballocations1st.clear();
-- m_1stNullItemsBeginCount = 0;
--
-- if (!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-- {
-- // Swap 1st with 2nd. Now 2nd is empty.
-- m_2ndVectorMode = SECOND_VECTOR_EMPTY;
-- m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
-- while (m_1stNullItemsBeginCount < suballocations2nd.size() &&
-- suballocations2nd[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE)
-- {
-- ++m_1stNullItemsBeginCount;
-- --m_1stNullItemsMiddleCount;
-- }
-- m_2ndNullItemsCount = 0;
-- m_1stVectorIndex ^= 1;
-- }
-- }
-- }
--
-- VMA_HEAVY_ASSERT(Validate());
--}
--
--bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
-- VkDeviceSize allocSize,
-- VkDeviceSize allocAlignment,
-- VmaSuballocationType allocType,
-- uint32_t strategy,
-- VmaAllocationRequest* pAllocationRequest)
--{
-- const VkDeviceSize blockSize = GetSize();
-- const VkDeviceSize debugMargin = GetDebugMargin();
-- const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity();
-- SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-- SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
--
-- if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
-- {
-- // Try to allocate at the end of 1st vector.
--
-- VkDeviceSize resultBaseOffset = 0;
-- if (!suballocations1st.empty())
-- {
-- const VmaSuballocation& lastSuballoc = suballocations1st.back();
-- resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin;
-- }
--
-- // Start from offset equal to beginning of free space.
-- VkDeviceSize resultOffset = resultBaseOffset;
--
-- // Apply alignment.
-- resultOffset = VmaAlignUp(resultOffset, allocAlignment);
--
-- // Check previous suballocations for BufferImageGranularity conflicts.
-- // Make bigger alignment if necessary.
-- if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations1st.empty())
-- {
-- bool bufferImageGranularityConflict = false;
-- for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
-- {
-- const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
-- if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
-- {
-- if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
-- {
-- bufferImageGranularityConflict = true;
-- break;
-- }
-- }
-- else
-- // Already on previous page.
-- break;
-- }
-- if (bufferImageGranularityConflict)
-- {
-- resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
-- }
-- }
--
-- const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
-- suballocations2nd.back().offset : blockSize;
--
-- // There is enough free space at the end after alignment.
-- if (resultOffset + allocSize + debugMargin <= freeSpaceEnd)
-- {
-- // Check next suballocations for BufferImageGranularity conflicts.
-- // If conflict exists, allocation cannot be made here.
-- if ((allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
-- {
-- for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
-- {
-- const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
-- if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
-- {
-- if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
-- {
-- return false;
-- }
-- }
-- else
-- {
-- // Already on previous page.
-- break;
-- }
-- }
-- }
--
-- // All tests passed: Success.
-- pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1);
-- // pAllocationRequest->item, customData unused.
-- pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
-- return true;
-- }
-- }
--
-- // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
-- // beginning of 1st vector as the end of free space.
-- if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-- {
-- VMA_ASSERT(!suballocations1st.empty());
--
-- VkDeviceSize resultBaseOffset = 0;
-- if (!suballocations2nd.empty())
-- {
-- const VmaSuballocation& lastSuballoc = suballocations2nd.back();
-- resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin;
-- }
--
-- // Start from offset equal to beginning of free space.
-- VkDeviceSize resultOffset = resultBaseOffset;
--
-- // Apply alignment.
-- resultOffset = VmaAlignUp(resultOffset, allocAlignment);
--
-- // Check previous suballocations for BufferImageGranularity conflicts.
-- // Make bigger alignment if necessary.
-- if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty())
-- {
-- bool bufferImageGranularityConflict = false;
-- for (size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
-- {
-- const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
-- if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
-- {
-- if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
-- {
-- bufferImageGranularityConflict = true;
-- break;
-- }
-- }
-- else
-- // Already on previous page.
-- break;
-- }
-- if (bufferImageGranularityConflict)
-- {
-- resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
-- }
-- }
--
-- size_t index1st = m_1stNullItemsBeginCount;
--
-- // There is enough free space at the end after alignment.
-- if ((index1st == suballocations1st.size() && resultOffset + allocSize + debugMargin <= blockSize) ||
-- (index1st < suballocations1st.size() && resultOffset + allocSize + debugMargin <= suballocations1st[index1st].offset))
-- {
-- // Check next suballocations for BufferImageGranularity conflicts.
-- // If conflict exists, allocation cannot be made here.
-- if (allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity)
-- {
-- for (size_t nextSuballocIndex = index1st;
-- nextSuballocIndex < suballocations1st.size();
-- nextSuballocIndex++)
-- {
-- const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
-- if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
-- {
-- if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
-- {
-- return false;
-- }
-- }
-- else
-- {
-- // Already on next page.
-- break;
-- }
-- }
-- }
--
-- // All tests passed: Success.
-- pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1);
-- pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
-- // pAllocationRequest->item, customData unused.
-- return true;
-- }
-- }
--
-- return false;
--}
--
--bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
-- VkDeviceSize allocSize,
-- VkDeviceSize allocAlignment,
-- VmaSuballocationType allocType,
-- uint32_t strategy,
-- VmaAllocationRequest* pAllocationRequest)
--{
-- const VkDeviceSize blockSize = GetSize();
-- const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity();
-- SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-- SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
--
-- if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-- {
-- VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
-- return false;
-- }
--
-- // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
-- if (allocSize > blockSize)
-- {
-- return false;
-- }
-- VkDeviceSize resultBaseOffset = blockSize - allocSize;
-- if (!suballocations2nd.empty())
-- {
-- const VmaSuballocation& lastSuballoc = suballocations2nd.back();
-- resultBaseOffset = lastSuballoc.offset - allocSize;
-- if (allocSize > lastSuballoc.offset)
-- {
-- return false;
-- }
-- }
--
-- // Start from offset equal to end of free space.
-- VkDeviceSize resultOffset = resultBaseOffset;
--
-- const VkDeviceSize debugMargin = GetDebugMargin();
--
-- // Apply debugMargin at the end.
-- if (debugMargin > 0)
-- {
-- if (resultOffset < debugMargin)
-- {
-- return false;
-- }
-- resultOffset -= debugMargin;
-- }
--
-- // Apply alignment.
-- resultOffset = VmaAlignDown(resultOffset, allocAlignment);
--
-- // Check next suballocations from 2nd for BufferImageGranularity conflicts.
-- // Make bigger alignment if necessary.
-- if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty())
-- {
-- bool bufferImageGranularityConflict = false;
-- for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
-- {
-- const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
-- if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
-- {
-- if (VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
-- {
-- bufferImageGranularityConflict = true;
-- break;
-- }
-- }
-- else
-- // Already on previous page.
-- break;
-- }
-- if (bufferImageGranularityConflict)
-- {
-- resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
-- }
-- }
--
-- // There is enough free space.
-- const VkDeviceSize endOf1st = !suballocations1st.empty() ?
-- suballocations1st.back().offset + suballocations1st.back().size :
-- 0;
-- if (endOf1st + debugMargin <= resultOffset)
-- {
-- // Check previous suballocations for BufferImageGranularity conflicts.
-- // If conflict exists, allocation cannot be made here.
-- if (bufferImageGranularity > 1)
-- {
-- for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
-- {
-- const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
-- if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
-- {
-- if (VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
-- {
-- return false;
-- }
-- }
-- else
-- {
-- // Already on next page.
-- break;
-- }
-- }
-- }
--
-- // All tests passed: Success.
-- pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1);
-- // pAllocationRequest->item unused.
-- pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
-- return true;
-- }
--
-- return false;
--}
--#endif // _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS
--#endif // _VMA_BLOCK_METADATA_LINEAR
--
--#if 0
--#ifndef _VMA_BLOCK_METADATA_BUDDY
--/*
--- GetSize() is the original size of allocated memory block.
--- m_UsableSize is this size aligned down to a power of two.
-- All allocations and calculations happen relative to m_UsableSize.
--- GetUnusableSize() is the difference between them.
-- It is reported as separate, unused range, not available for allocations.
--
--Node at level 0 has size = m_UsableSize.
--Each next level contains nodes with size 2 times smaller than current level.
--m_LevelCount is the maximum number of levels to use in the current object.
--*/
--class VmaBlockMetadata_Buddy : public VmaBlockMetadata
--{
-- VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_Buddy)
--public:
-- VmaBlockMetadata_Buddy(const VkAllocationCallbacks* pAllocationCallbacks,
-- VkDeviceSize bufferImageGranularity, bool isVirtual);
-- virtual ~VmaBlockMetadata_Buddy();
--
-- size_t GetAllocationCount() const override { return m_AllocationCount; }
-- VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize + GetUnusableSize(); }
-- bool IsEmpty() const override { return m_Root->type == Node::TYPE_FREE; }
-- VkResult CheckCorruption(const void* pBlockData) override { return VK_ERROR_FEATURE_NOT_PRESENT; }
-- VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; }
-- void DebugLogAllAllocations() const override { DebugLogAllAllocationNode(m_Root, 0); }
--
-- void Init(VkDeviceSize size) override;
-- bool Validate() const override;
--
-- void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override;
-- void AddStatistics(VmaStatistics& inoutStats) const override;
--
--#if VMA_STATS_STRING_ENABLED
-- void PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const override;
--#endif
--
-- bool CreateAllocationRequest(
-- VkDeviceSize allocSize,
-- VkDeviceSize allocAlignment,
-- bool upperAddress,
-- VmaSuballocationType allocType,
-- uint32_t strategy,
-- VmaAllocationRequest* pAllocationRequest) override;
--
-- void Alloc(
-- const VmaAllocationRequest& request,
-- VmaSuballocationType type,
-- void* userData) override;
--
-- void Free(VmaAllocHandle allocHandle) override;
-- void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override;
-- void* GetAllocationUserData(VmaAllocHandle allocHandle) const override;
-- VmaAllocHandle GetAllocationListBegin() const override;
-- VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override;
-- void Clear() override;
-- void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override;
--
--private:
-- static const size_t MAX_LEVELS = 48;
--
-- struct ValidationContext
-- {
-- size_t calculatedAllocationCount = 0;
-- size_t calculatedFreeCount = 0;
-- VkDeviceSize calculatedSumFreeSize = 0;
-- };
-- struct Node
-- {
-- VkDeviceSize offset;
-- enum TYPE
-- {
-- TYPE_FREE,
-- TYPE_ALLOCATION,
-- TYPE_SPLIT,
-- TYPE_COUNT
-- } type;
-- Node* parent;
-- Node* buddy;
--
-- union
-- {
-- struct
-- {
-- Node* prev;
-- Node* next;
-- } free;
-- struct
-- {
-- void* userData;
-- } allocation;
-- struct
-- {
-- Node* leftChild;
-- } split;
-- };
-- };
--
-- // Size of the memory block aligned down to a power of two.
-- VkDeviceSize m_UsableSize;
-- uint32_t m_LevelCount;
-- VmaPoolAllocator<Node> m_NodeAllocator;
-- Node* m_Root;
-- struct
-- {
-- Node* front;
-- Node* back;
-- } m_FreeList[MAX_LEVELS];
--
-- // Number of nodes in the tree with type == TYPE_ALLOCATION.
-- size_t m_AllocationCount;
-- // Number of nodes in the tree with type == TYPE_FREE.
-- size_t m_FreeCount;
-- // Doesn't include space wasted due to internal fragmentation - allocation sizes are just aligned up to node sizes.
-- // Doesn't include unusable size.
-- VkDeviceSize m_SumFreeSize;
--
-- VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
-- VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
--
-- VkDeviceSize AlignAllocationSize(VkDeviceSize size) const
-- {
-- if (!IsVirtual())
-- {
-- size = VmaAlignUp(size, (VkDeviceSize)16);
-- }
-- return VmaNextPow2(size);
-- }
-- Node* FindAllocationNode(VkDeviceSize offset, uint32_t& outLevel) const;
-- void DeleteNodeChildren(Node* node);
-- bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
-- uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
-- void AddNodeToDetailedStatistics(VmaDetailedStatistics& inoutStats, const Node* node, VkDeviceSize levelNodeSize) const;
-- // Adds node to the front of FreeList at given level.
-- // node->type must be FREE.
-- // node->free.prev, next can be undefined.
-- void AddToFreeListFront(uint32_t level, Node* node);
-- // Removes node from FreeList at given level.
-- // node->type must be FREE.
-- // node->free.prev, next stay untouched.
-- void RemoveFromFreeList(uint32_t level, Node* node);
-- void DebugLogAllAllocationNode(Node* node, uint32_t level) const;
--
--#if VMA_STATS_STRING_ENABLED
-- void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
--#endif
--};
--
--#ifndef _VMA_BLOCK_METADATA_BUDDY_FUNCTIONS
--VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(const VkAllocationCallbacks* pAllocationCallbacks,
-- VkDeviceSize bufferImageGranularity, bool isVirtual)
-- : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual),
-- m_NodeAllocator(pAllocationCallbacks, 32), // firstBlockCapacity
-- m_Root(VMA_NULL),
-- m_AllocationCount(0),
-- m_FreeCount(1),
-- m_SumFreeSize(0)
--{
-- memset(m_FreeList, 0, sizeof(m_FreeList));
--}
--
--VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
--{
-- DeleteNodeChildren(m_Root);
-- m_NodeAllocator.Free(m_Root);
--}
--
--void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
--{
-- VmaBlockMetadata::Init(size);
--
-- m_UsableSize = VmaPrevPow2(size);
-- m_SumFreeSize = m_UsableSize;
--
-- // Calculate m_LevelCount.
-- const VkDeviceSize minNodeSize = IsVirtual() ? 1 : 16;
-- m_LevelCount = 1;
-- while (m_LevelCount < MAX_LEVELS &&
-- LevelToNodeSize(m_LevelCount) >= minNodeSize)
-- {
-- ++m_LevelCount;
-- }
--
-- Node* rootNode = m_NodeAllocator.Alloc();
-- rootNode->offset = 0;
-- rootNode->type = Node::TYPE_FREE;
-- rootNode->parent = VMA_NULL;
-- rootNode->buddy = VMA_NULL;
--
-- m_Root = rootNode;
-- AddToFreeListFront(0, rootNode);
--}
--
--bool VmaBlockMetadata_Buddy::Validate() const
--{
-- // Validate tree.
-- ValidationContext ctx;
-- if (!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
-- {
-- VMA_VALIDATE(false && "ValidateNode failed.");
-- }
-- VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
-- VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
--
-- // Validate free node lists.
-- for (uint32_t level = 0; level < m_LevelCount; ++level)
-- {
-- VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
-- m_FreeList[level].front->free.prev == VMA_NULL);
--
-- for (Node* node = m_FreeList[level].front;
-- node != VMA_NULL;
-- node = node->free.next)
-- {
-- VMA_VALIDATE(node->type == Node::TYPE_FREE);
--
-- if (node->free.next == VMA_NULL)
-- {
-- VMA_VALIDATE(m_FreeList[level].back == node);
-- }
-- else
-- {
-- VMA_VALIDATE(node->free.next->free.prev == node);
-- }
-- }
-- }
--
-- // Validate that free lists ar higher levels are empty.
-- for (uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
-- {
-- VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
-- }
--
-- return true;
--}
--
--void VmaBlockMetadata_Buddy::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const
--{
-- inoutStats.statistics.blockCount++;
-- inoutStats.statistics.blockBytes += GetSize();
--
-- AddNodeToDetailedStatistics(inoutStats, m_Root, LevelToNodeSize(0));
--
-- const VkDeviceSize unusableSize = GetUnusableSize();
-- if (unusableSize > 0)
-- VmaAddDetailedStatisticsUnusedRange(inoutStats, unusableSize);
--}
--
--void VmaBlockMetadata_Buddy::AddStatistics(VmaStatistics& inoutStats) const
--{
-- inoutStats.blockCount++;
-- inoutStats.allocationCount += (uint32_t)m_AllocationCount;
-- inoutStats.blockBytes += GetSize();
-- inoutStats.allocationBytes += GetSize() - m_SumFreeSize;
--}
--
--#if VMA_STATS_STRING_ENABLED
--void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const
--{
-- VmaDetailedStatistics stats;
-- VmaClearDetailedStatistics(stats);
-- AddDetailedStatistics(stats);
--
-- PrintDetailedMap_Begin(
-- json,
-- stats.statistics.blockBytes - stats.statistics.allocationBytes,
-- stats.statistics.allocationCount,
-- stats.unusedRangeCount,
-- mapRefCount);
--
-- PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
--
-- const VkDeviceSize unusableSize = GetUnusableSize();
-- if (unusableSize > 0)
-- {
-- PrintDetailedMap_UnusedRange(json,
-- m_UsableSize, // offset
-- unusableSize); // size
-- }
--
-- PrintDetailedMap_End(json);
--}
--#endif // VMA_STATS_STRING_ENABLED
--
--bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
-- VkDeviceSize allocSize,
-- VkDeviceSize allocAlignment,
-- bool upperAddress,
-- VmaSuballocationType allocType,
-- uint32_t strategy,
-- VmaAllocationRequest* pAllocationRequest)
--{
-- VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
--
-- allocSize = AlignAllocationSize(allocSize);
--
-- // Simple way to respect bufferImageGranularity. May be optimized some day.
-- // Whenever it might be an OPTIMAL image...
-- if (allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
-- allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
-- allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
-- {
-- allocAlignment = VMA_MAX(allocAlignment, GetBufferImageGranularity());
-- allocSize = VmaAlignUp(allocSize, GetBufferImageGranularity());
-- }
--
-- if (allocSize > m_UsableSize)
-- {
-- return false;
-- }
--
-- const uint32_t targetLevel = AllocSizeToLevel(allocSize);
-- for (uint32_t level = targetLevel; level--; )
-- {
-- for (Node* freeNode = m_FreeList[level].front;
-- freeNode != VMA_NULL;
-- freeNode = freeNode->free.next)
-- {
-- if (freeNode->offset % allocAlignment == 0)
-- {
-- pAllocationRequest->type = VmaAllocationRequestType::Normal;
-- pAllocationRequest->allocHandle = (VmaAllocHandle)(freeNode->offset + 1);
-- pAllocationRequest->size = allocSize;
-- pAllocationRequest->customData = (void*)(uintptr_t)level;
-- return true;
-- }
-- }
-- }
--
-- return false;
--}
--
--void VmaBlockMetadata_Buddy::Alloc(
-- const VmaAllocationRequest& request,
-- VmaSuballocationType type,
-- void* userData)
--{
-- VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
--
-- const uint32_t targetLevel = AllocSizeToLevel(request.size);
-- uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
--
-- Node* currNode = m_FreeList[currLevel].front;
-- VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
-- const VkDeviceSize offset = (VkDeviceSize)request.allocHandle - 1;
-- while (currNode->offset != offset)
-- {
-- currNode = currNode->free.next;
-- VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
-- }
--
-- // Go down, splitting free nodes.
-- while (currLevel < targetLevel)
-- {
-- // currNode is already first free node at currLevel.
-- // Remove it from list of free nodes at this currLevel.
-- RemoveFromFreeList(currLevel, currNode);
--
-- const uint32_t childrenLevel = currLevel + 1;
--
-- // Create two free sub-nodes.
-- Node* leftChild = m_NodeAllocator.Alloc();
-- Node* rightChild = m_NodeAllocator.Alloc();
--
-- leftChild->offset = currNode->offset;
-- leftChild->type = Node::TYPE_FREE;
-- leftChild->parent = currNode;
-- leftChild->buddy = rightChild;
--
-- rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
-- rightChild->type = Node::TYPE_FREE;
-- rightChild->parent = currNode;
-- rightChild->buddy = leftChild;
--
-- // Convert current currNode to split type.
-- currNode->type = Node::TYPE_SPLIT;
-- currNode->split.leftChild = leftChild;
--
-- // Add child nodes to free list. Order is important!
-- AddToFreeListFront(childrenLevel, rightChild);
-- AddToFreeListFront(childrenLevel, leftChild);
--
-- ++m_FreeCount;
-- ++currLevel;
-- currNode = m_FreeList[currLevel].front;
--
-- /*
-- We can be sure that currNode, as left child of node previously split,
-- also fulfills the alignment requirement.
-- */
-- }
--
-- // Remove from free list.
-- VMA_ASSERT(currLevel == targetLevel &&
-- currNode != VMA_NULL &&
-- currNode->type == Node::TYPE_FREE);
-- RemoveFromFreeList(currLevel, currNode);
--
-- // Convert to allocation node.
-- currNode->type = Node::TYPE_ALLOCATION;
-- currNode->allocation.userData = userData;
--
-- ++m_AllocationCount;
-- --m_FreeCount;
-- m_SumFreeSize -= request.size;
--}
--
--void VmaBlockMetadata_Buddy::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo)
--{
-- uint32_t level = 0;
-- outInfo.offset = (VkDeviceSize)allocHandle - 1;
-- const Node* const node = FindAllocationNode(outInfo.offset, level);
-- outInfo.size = LevelToNodeSize(level);
-- outInfo.pUserData = node->allocation.userData;
--}
--
--void* VmaBlockMetadata_Buddy::GetAllocationUserData(VmaAllocHandle allocHandle) const
--{
-- uint32_t level = 0;
-- const Node* const node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level);
-- return node->allocation.userData;
--}
--
--VmaAllocHandle VmaBlockMetadata_Buddy::GetAllocationListBegin() const
--{
-- // Function only used for defragmentation, which is disabled for this algorithm
-- return VK_NULL_HANDLE;
--}
--
--VmaAllocHandle VmaBlockMetadata_Buddy::GetNextAllocation(VmaAllocHandle prevAlloc) const
--{
-- // Function only used for defragmentation, which is disabled for this algorithm
-- return VK_NULL_HANDLE;
--}
--
--void VmaBlockMetadata_Buddy::DeleteNodeChildren(Node* node)
--{
-- if (node->type == Node::TYPE_SPLIT)
-- {
-- DeleteNodeChildren(node->split.leftChild->buddy);
-- DeleteNodeChildren(node->split.leftChild);
-- const VkAllocationCallbacks* allocationCallbacks = GetAllocationCallbacks();
-- m_NodeAllocator.Free(node->split.leftChild->buddy);
-- m_NodeAllocator.Free(node->split.leftChild);
-- }
--}
--
--void VmaBlockMetadata_Buddy::Clear()
--{
-- DeleteNodeChildren(m_Root);
-- m_Root->type = Node::TYPE_FREE;
-- m_AllocationCount = 0;
-- m_FreeCount = 1;
-- m_SumFreeSize = m_UsableSize;
--}
--
--void VmaBlockMetadata_Buddy::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData)
--{
-- uint32_t level = 0;
-- Node* const node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level);
-- node->allocation.userData = userData;
--}
--
--VmaBlockMetadata_Buddy::Node* VmaBlockMetadata_Buddy::FindAllocationNode(VkDeviceSize offset, uint32_t& outLevel) const
--{
-- Node* node = m_Root;
-- VkDeviceSize nodeOffset = 0;
-- outLevel = 0;
-- VkDeviceSize levelNodeSize = LevelToNodeSize(0);
-- while (node->type == Node::TYPE_SPLIT)
-- {
-- const VkDeviceSize nextLevelNodeSize = levelNodeSize >> 1;
-- if (offset < nodeOffset + nextLevelNodeSize)
-- {
-- node = node->split.leftChild;
-- }
-- else
-- {
-- node = node->split.leftChild->buddy;
-- nodeOffset += nextLevelNodeSize;
-- }
-- ++outLevel;
-- levelNodeSize = nextLevelNodeSize;
-- }
--
-- VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
-- return node;
--}
--
--bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
--{
-- VMA_VALIDATE(level < m_LevelCount);
-- VMA_VALIDATE(curr->parent == parent);
-- VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
-- VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
-- switch (curr->type)
-- {
-- case Node::TYPE_FREE:
-- // curr->free.prev, next are validated separately.
-- ctx.calculatedSumFreeSize += levelNodeSize;
-- ++ctx.calculatedFreeCount;
-- break;
-- case Node::TYPE_ALLOCATION:
-- ++ctx.calculatedAllocationCount;
-- if (!IsVirtual())
-- {
-- VMA_VALIDATE(curr->allocation.userData != VMA_NULL);
-- }
-- break;
-- case Node::TYPE_SPLIT:
-- {
-- const uint32_t childrenLevel = level + 1;
-- const VkDeviceSize childrenLevelNodeSize = levelNodeSize >> 1;
-- const Node* const leftChild = curr->split.leftChild;
-- VMA_VALIDATE(leftChild != VMA_NULL);
-- VMA_VALIDATE(leftChild->offset == curr->offset);
-- if (!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
-- {
-- VMA_VALIDATE(false && "ValidateNode for left child failed.");
-- }
-- const Node* const rightChild = leftChild->buddy;
-- VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
-- if (!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
-- {
-- VMA_VALIDATE(false && "ValidateNode for right child failed.");
-- }
-- }
-- break;
-- default:
-- return false;
-- }
--
-- return true;
--}
--
--uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
--{
-- // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
-- uint32_t level = 0;
-- VkDeviceSize currLevelNodeSize = m_UsableSize;
-- VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
-- while (allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
-- {
-- ++level;
-- currLevelNodeSize >>= 1;
-- nextLevelNodeSize >>= 1;
-- }
-- return level;
--}
--
--void VmaBlockMetadata_Buddy::Free(VmaAllocHandle allocHandle)
--{
-- uint32_t level = 0;
-- Node* node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level);
--
-- ++m_FreeCount;
-- --m_AllocationCount;
-- m_SumFreeSize += LevelToNodeSize(level);
--
-- node->type = Node::TYPE_FREE;
--
-- // Join free nodes if possible.
-- while (level > 0 && node->buddy->type == Node::TYPE_FREE)
-- {
-- RemoveFromFreeList(level, node->buddy);
-- Node* const parent = node->parent;
--
-- m_NodeAllocator.Free(node->buddy);
-- m_NodeAllocator.Free(node);
-- parent->type = Node::TYPE_FREE;
--
-- node = parent;
-- --level;
-- --m_FreeCount;
-- }
--
-- AddToFreeListFront(level, node);
--}
--
--void VmaBlockMetadata_Buddy::AddNodeToDetailedStatistics(VmaDetailedStatistics& inoutStats, const Node* node, VkDeviceSize levelNodeSize) const
--{
-- switch (node->type)
-- {
-- case Node::TYPE_FREE:
-- VmaAddDetailedStatisticsUnusedRange(inoutStats, levelNodeSize);
-- break;
-- case Node::TYPE_ALLOCATION:
-- VmaAddDetailedStatisticsAllocation(inoutStats, levelNodeSize);
-- break;
-- case Node::TYPE_SPLIT:
-- {
-- const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
-- const Node* const leftChild = node->split.leftChild;
-- AddNodeToDetailedStatistics(inoutStats, leftChild, childrenNodeSize);
-- const Node* const rightChild = leftChild->buddy;
-- AddNodeToDetailedStatistics(inoutStats, rightChild, childrenNodeSize);
-- }
-- break;
-- default:
-- VMA_ASSERT(0);
-- }
--}
--
--void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
--{
-- VMA_ASSERT(node->type == Node::TYPE_FREE);
--
-- // List is empty.
-- Node* const frontNode = m_FreeList[level].front;
-- if (frontNode == VMA_NULL)
-- {
-- VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
-- node->free.prev = node->free.next = VMA_NULL;
-- m_FreeList[level].front = m_FreeList[level].back = node;
-- }
-- else
-- {
-- VMA_ASSERT(frontNode->free.prev == VMA_NULL);
-- node->free.prev = VMA_NULL;
-- node->free.next = frontNode;
-- frontNode->free.prev = node;
-- m_FreeList[level].front = node;
-- }
--}
--
--void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
--{
-- VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
--
-- // It is at the front.
-- if (node->free.prev == VMA_NULL)
-- {
-- VMA_ASSERT(m_FreeList[level].front == node);
-- m_FreeList[level].front = node->free.next;
-- }
-- else
-- {
-- Node* const prevFreeNode = node->free.prev;
-- VMA_ASSERT(prevFreeNode->free.next == node);
-- prevFreeNode->free.next = node->free.next;
-- }
--
-- // It is at the back.
-- if (node->free.next == VMA_NULL)
-- {
-- VMA_ASSERT(m_FreeList[level].back == node);
-- m_FreeList[level].back = node->free.prev;
-- }
-- else
-- {
-- Node* const nextFreeNode = node->free.next;
-- VMA_ASSERT(nextFreeNode->free.prev == node);
-- nextFreeNode->free.prev = node->free.prev;
-- }
--}
--
--void VmaBlockMetadata_Buddy::DebugLogAllAllocationNode(Node* node, uint32_t level) const
--{
-- switch (node->type)
-- {
-- case Node::TYPE_FREE:
-- break;
-- case Node::TYPE_ALLOCATION:
-- DebugLogAllocation(node->offset, LevelToNodeSize(level), node->allocation.userData);
-- break;
-- case Node::TYPE_SPLIT:
-- {
-- ++level;
-- DebugLogAllAllocationNode(node->split.leftChild, level);
-- DebugLogAllAllocationNode(node->split.leftChild->buddy, level);
-- }
-- break;
-- default:
-- VMA_ASSERT(0);
-- }
--}
--
--#if VMA_STATS_STRING_ENABLED
--void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
--{
-- switch (node->type)
-- {
-- case Node::TYPE_FREE:
-- PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
-- break;
-- case Node::TYPE_ALLOCATION:
-- PrintDetailedMap_Allocation(json, node->offset, levelNodeSize, node->allocation.userData);
-- break;
-- case Node::TYPE_SPLIT:
-- {
-- const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
-- const Node* const leftChild = node->split.leftChild;
-- PrintDetailedMapNode(json, leftChild, childrenNodeSize);
-- const Node* const rightChild = leftChild->buddy;
-- PrintDetailedMapNode(json, rightChild, childrenNodeSize);
-- }
-- break;
-- default:
-- VMA_ASSERT(0);
-- }
--}
--#endif // VMA_STATS_STRING_ENABLED
--#endif // _VMA_BLOCK_METADATA_BUDDY_FUNCTIONS
--#endif // _VMA_BLOCK_METADATA_BUDDY
--#endif // #if 0
--
--#ifndef _VMA_BLOCK_METADATA_TLSF
--// To not search current larger region if first allocation won't succeed and skip to smaller range
--// use with VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT as strategy in CreateAllocationRequest().
--// When fragmentation and reusal of previous blocks doesn't matter then use with
--// VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT for fastest alloc time possible.
--class VmaBlockMetadata_TLSF : public VmaBlockMetadata
--{
-- VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_TLSF)
--public:
-- VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks,
-- VkDeviceSize bufferImageGranularity, bool isVirtual);
-- virtual ~VmaBlockMetadata_TLSF();
--
-- size_t GetAllocationCount() const override { return m_AllocCount; }
-- size_t GetFreeRegionsCount() const override { return m_BlocksFreeCount + 1; }
-- VkDeviceSize GetSumFreeSize() const override { return m_BlocksFreeSize + m_NullBlock->size; }
-- bool IsEmpty() const override { return m_NullBlock->offset == 0; }
-- VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return ((Block*)allocHandle)->offset; }
--
-- void Init(VkDeviceSize size) override;
-- bool Validate() const override;
--
-- void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override;
-- void AddStatistics(VmaStatistics& inoutStats) const override;
--
--#if VMA_STATS_STRING_ENABLED
-- void PrintDetailedMap(class VmaJsonWriter& json) const override;
--#endif
--
-- bool CreateAllocationRequest(
-- VkDeviceSize allocSize,
-- VkDeviceSize allocAlignment,
-- bool upperAddress,
-- VmaSuballocationType allocType,
-- uint32_t strategy,
-- VmaAllocationRequest* pAllocationRequest) override;
--
-- VkResult CheckCorruption(const void* pBlockData) override;
-- void Alloc(
-- const VmaAllocationRequest& request,
-- VmaSuballocationType type,
-- void* userData) override;
--
-- void Free(VmaAllocHandle allocHandle) override;
-- void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override;
-- void* GetAllocationUserData(VmaAllocHandle allocHandle) const override;
-- VmaAllocHandle GetAllocationListBegin() const override;
-- VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override;
-- VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override;
-- void Clear() override;
-- void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override;
-- void DebugLogAllAllocations() const override;
--
--private:
-- // According to original paper it should be preferable 4 or 5:
-- // M. Masmano, I. Ripoll, A. Crespo, and J. Real "TLSF: a New Dynamic Memory Allocator for Real-Time Systems"
-- // http://www.gii.upv.es/tlsf/files/ecrts04_tlsf.pdf
-- static const uint8_t SECOND_LEVEL_INDEX = 5;
-- static const uint16_t SMALL_BUFFER_SIZE = 256;
-- static const uint32_t INITIAL_BLOCK_ALLOC_COUNT = 16;
-- static const uint8_t MEMORY_CLASS_SHIFT = 7;
-- static const uint8_t MAX_MEMORY_CLASSES = 65 - MEMORY_CLASS_SHIFT;
--
-- class Block
-- {
-- public:
-- VkDeviceSize offset;
-- VkDeviceSize size;
-- Block* prevPhysical;
-- Block* nextPhysical;
--
-- void MarkFree() { prevFree = VMA_NULL; }
-- void MarkTaken() { prevFree = this; }
-- bool IsFree() const { return prevFree != this; }
-- void*& UserData() { VMA_HEAVY_ASSERT(!IsFree()); return userData; }
-- Block*& PrevFree() { return prevFree; }
-- Block*& NextFree() { VMA_HEAVY_ASSERT(IsFree()); return nextFree; }
--
-- private:
-- Block* prevFree; // Address of the same block here indicates that block is taken
-- union
-- {
-- Block* nextFree;
-- void* userData;
-- };
-- };
--
-- size_t m_AllocCount;
-- // Total number of free blocks besides null block
-- size_t m_BlocksFreeCount;
-- // Total size of free blocks excluding null block
-- VkDeviceSize m_BlocksFreeSize;
-- uint32_t m_IsFreeBitmap;
-- uint8_t m_MemoryClasses;
-- uint32_t m_InnerIsFreeBitmap[MAX_MEMORY_CLASSES];
-- uint32_t m_ListsCount;
-- /*
-- * 0: 0-3 lists for small buffers
-- * 1+: 0-(2^SLI-1) lists for normal buffers
-- */
-- Block** m_FreeList;
-- VmaPoolAllocator<Block> m_BlockAllocator;
-- Block* m_NullBlock;
-- VmaBlockBufferImageGranularity m_GranularityHandler;
--
-- uint8_t SizeToMemoryClass(VkDeviceSize size) const;
-- uint16_t SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const;
-- uint32_t GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const;
-- uint32_t GetListIndex(VkDeviceSize size) const;
--
-- void RemoveFreeBlock(Block* block);
-- void InsertFreeBlock(Block* block);
-- void MergeBlock(Block* block, Block* prev);
--
-- Block* FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const;
-- bool CheckBlock(
-- Block& block,
-- uint32_t listIndex,
-- VkDeviceSize allocSize,
-- VkDeviceSize allocAlignment,
-- VmaSuballocationType allocType,
-- VmaAllocationRequest* pAllocationRequest);
--};
--
--#ifndef _VMA_BLOCK_METADATA_TLSF_FUNCTIONS
--VmaBlockMetadata_TLSF::VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks,
-- VkDeviceSize bufferImageGranularity, bool isVirtual)
-- : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual),
-- m_AllocCount(0),
-- m_BlocksFreeCount(0),
-- m_BlocksFreeSize(0),
-- m_IsFreeBitmap(0),
-- m_MemoryClasses(0),
-- m_ListsCount(0),
-- m_FreeList(VMA_NULL),
-- m_BlockAllocator(pAllocationCallbacks, INITIAL_BLOCK_ALLOC_COUNT),
-- m_NullBlock(VMA_NULL),
-- m_GranularityHandler(bufferImageGranularity) {}
--
--VmaBlockMetadata_TLSF::~VmaBlockMetadata_TLSF()
--{
-- if (m_FreeList)
-- vma_delete_array(GetAllocationCallbacks(), m_FreeList, m_ListsCount);
-- m_GranularityHandler.Destroy(GetAllocationCallbacks());
--}
--
--void VmaBlockMetadata_TLSF::Init(VkDeviceSize size)
--{
-- VmaBlockMetadata::Init(size);
--
-- if (!IsVirtual())
-- m_GranularityHandler.Init(GetAllocationCallbacks(), size);
--
-- m_NullBlock = m_BlockAllocator.Alloc();
-- m_NullBlock->size = size;
-- m_NullBlock->offset = 0;
-- m_NullBlock->prevPhysical = VMA_NULL;
-- m_NullBlock->nextPhysical = VMA_NULL;
-- m_NullBlock->MarkFree();
-- m_NullBlock->NextFree() = VMA_NULL;
-- m_NullBlock->PrevFree() = VMA_NULL;
-- uint8_t memoryClass = SizeToMemoryClass(size);
-- uint16_t sli = SizeToSecondIndex(size, memoryClass);
-- m_ListsCount = (memoryClass == 0 ? 0 : (memoryClass - 1) * (1UL << SECOND_LEVEL_INDEX) + sli) + 1;
-- if (IsVirtual())
-- m_ListsCount += 1UL << SECOND_LEVEL_INDEX;
-- else
-- m_ListsCount += 4;
--
-- m_MemoryClasses = memoryClass + uint8_t(2);
-- memset(m_InnerIsFreeBitmap, 0, MAX_MEMORY_CLASSES * sizeof(uint32_t));
--
-- m_FreeList = vma_new_array(GetAllocationCallbacks(), Block*, m_ListsCount);
-- memset(m_FreeList, 0, m_ListsCount * sizeof(Block*));
--}
--
--bool VmaBlockMetadata_TLSF::Validate() const
--{
-- VMA_VALIDATE(GetSumFreeSize() <= GetSize());
--
-- VkDeviceSize calculatedSize = m_NullBlock->size;
-- VkDeviceSize calculatedFreeSize = m_NullBlock->size;
-- size_t allocCount = 0;
-- size_t freeCount = 0;
--
-- // Check integrity of free lists
-- for (uint32_t list = 0; list < m_ListsCount; ++list)
-- {
-- Block* block = m_FreeList[list];
-- if (block != VMA_NULL)
-- {
-- VMA_VALIDATE(block->IsFree());
-- VMA_VALIDATE(block->PrevFree() == VMA_NULL);
-- while (block->NextFree())
-- {
-- VMA_VALIDATE(block->NextFree()->IsFree());
-- VMA_VALIDATE(block->NextFree()->PrevFree() == block);
-- block = block->NextFree();
-- }
-- }
-- }
--
-- VkDeviceSize nextOffset = m_NullBlock->offset;
-- auto validateCtx = m_GranularityHandler.StartValidation(GetAllocationCallbacks(), IsVirtual());
--
-- VMA_VALIDATE(m_NullBlock->nextPhysical == VMA_NULL);
-- if (m_NullBlock->prevPhysical)
-- {
-- VMA_VALIDATE(m_NullBlock->prevPhysical->nextPhysical == m_NullBlock);
-- }
-- // Check all blocks
-- for (Block* prev = m_NullBlock->prevPhysical; prev != VMA_NULL; prev = prev->prevPhysical)
-- {
-- VMA_VALIDATE(prev->offset + prev->size == nextOffset);
-- nextOffset = prev->offset;
-- calculatedSize += prev->size;
--
-- uint32_t listIndex = GetListIndex(prev->size);
-- if (prev->IsFree())
-- {
-- ++freeCount;
-- // Check if free block belongs to free list
-- Block* freeBlock = m_FreeList[listIndex];
-- VMA_VALIDATE(freeBlock != VMA_NULL);
--
-- bool found = false;
-- do
-- {
-- if (freeBlock == prev)
-- found = true;
--
-- freeBlock = freeBlock->NextFree();
-- } while (!found && freeBlock != VMA_NULL);
--
-- VMA_VALIDATE(found);
-- calculatedFreeSize += prev->size;
-- }
-- else
-- {
-- ++allocCount;
-- // Check if taken block is not on a free list
-- Block* freeBlock = m_FreeList[listIndex];
-- while (freeBlock)
-- {
-- VMA_VALIDATE(freeBlock != prev);
-- freeBlock = freeBlock->NextFree();
-- }
--
-- if (!IsVirtual())
-- {
-- VMA_VALIDATE(m_GranularityHandler.Validate(validateCtx, prev->offset, prev->size));
-- }
-- }
--
-- if (prev->prevPhysical)
-- {
-- VMA_VALIDATE(prev->prevPhysical->nextPhysical == prev);
-- }
-- }
--
-- if (!IsVirtual())
-- {
-- VMA_VALIDATE(m_GranularityHandler.FinishValidation(validateCtx));
-- }
--
-- VMA_VALIDATE(nextOffset == 0);
-- VMA_VALIDATE(calculatedSize == GetSize());
-- VMA_VALIDATE(calculatedFreeSize == GetSumFreeSize());
-- VMA_VALIDATE(allocCount == m_AllocCount);
-- VMA_VALIDATE(freeCount == m_BlocksFreeCount);
--
-- return true;
--}
--
--void VmaBlockMetadata_TLSF::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const
--{
-- inoutStats.statistics.blockCount++;
-- inoutStats.statistics.blockBytes += GetSize();
-- if (m_NullBlock->size > 0)
-- VmaAddDetailedStatisticsUnusedRange(inoutStats, m_NullBlock->size);
--
-- for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
-- {
-- if (block->IsFree())
-- VmaAddDetailedStatisticsUnusedRange(inoutStats, block->size);
-- else
-- VmaAddDetailedStatisticsAllocation(inoutStats, block->size);
-- }
--}
--
--void VmaBlockMetadata_TLSF::AddStatistics(VmaStatistics& inoutStats) const
--{
-- inoutStats.blockCount++;
-- inoutStats.allocationCount += (uint32_t)m_AllocCount;
-- inoutStats.blockBytes += GetSize();
-- inoutStats.allocationBytes += GetSize() - GetSumFreeSize();
--}
--
--#if VMA_STATS_STRING_ENABLED
--void VmaBlockMetadata_TLSF::PrintDetailedMap(class VmaJsonWriter& json) const
--{
-- size_t blockCount = m_AllocCount + m_BlocksFreeCount;
-- VmaStlAllocator<Block*> allocator(GetAllocationCallbacks());
-- VmaVector<Block*, VmaStlAllocator<Block*>> blockList(blockCount, allocator);
--
-- size_t i = blockCount;
-- for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
-- {
-- blockList[--i] = block;
-- }
-- VMA_ASSERT(i == 0);
--
-- VmaDetailedStatistics stats;
-- VmaClearDetailedStatistics(stats);
-- AddDetailedStatistics(stats);
--
-- PrintDetailedMap_Begin(json,
-- stats.statistics.blockBytes - stats.statistics.allocationBytes,
-- stats.statistics.allocationCount,
-- stats.unusedRangeCount);
--
-- for (; i < blockCount; ++i)
-- {
-- Block* block = blockList[i];
-- if (block->IsFree())
-- PrintDetailedMap_UnusedRange(json, block->offset, block->size);
-- else
-- PrintDetailedMap_Allocation(json, block->offset, block->size, block->UserData());
-- }
-- if (m_NullBlock->size > 0)
-- PrintDetailedMap_UnusedRange(json, m_NullBlock->offset, m_NullBlock->size);
--
-- PrintDetailedMap_End(json);
--}
--#endif
--
--bool VmaBlockMetadata_TLSF::CreateAllocationRequest(
-- VkDeviceSize allocSize,
-- VkDeviceSize allocAlignment,
-- bool upperAddress,
-- VmaSuballocationType allocType,
-- uint32_t strategy,
-- VmaAllocationRequest* pAllocationRequest)
--{
-- VMA_ASSERT(allocSize > 0 && "Cannot allocate empty block!");
-- VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
--
-- // For small granularity round up
-- if (!IsVirtual())
-- m_GranularityHandler.RoundupAllocRequest(allocType, allocSize, allocAlignment);
--
-- allocSize += GetDebugMargin();
-- // Quick check for too small pool
-- if (allocSize > GetSumFreeSize())
-- return false;
--
-- // If no free blocks in pool then check only null block
-- if (m_BlocksFreeCount == 0)
-- return CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest);
--
-- // Round up to the next block
-- VkDeviceSize sizeForNextList = allocSize;
-- VkDeviceSize smallSizeStep = VkDeviceSize(SMALL_BUFFER_SIZE / (IsVirtual() ? 1 << SECOND_LEVEL_INDEX : 4));
-- if (allocSize > SMALL_BUFFER_SIZE)
-- {
-- sizeForNextList += (1ULL << (VMA_BITSCAN_MSB(allocSize) - SECOND_LEVEL_INDEX));
-- }
-- else if (allocSize > SMALL_BUFFER_SIZE - smallSizeStep)
-- sizeForNextList = SMALL_BUFFER_SIZE + 1;
-- else
-- sizeForNextList += smallSizeStep;
--
-- uint32_t nextListIndex = m_ListsCount;
-- uint32_t prevListIndex = m_ListsCount;
-- Block* nextListBlock = VMA_NULL;
-- Block* prevListBlock = VMA_NULL;
--
-- // Check blocks according to strategies
-- if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT)
-- {
-- // Quick check for larger block first
-- nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);
-- if (nextListBlock != VMA_NULL && CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
-- return true;
--
-- // If not fitted then null block
-- if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))
-- return true;
--
-- // Null block failed, search larger bucket
-- while (nextListBlock)
-- {
-- if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
-- return true;
-- nextListBlock = nextListBlock->NextFree();
-- }
--
-- // Failed again, check best fit bucket
-- prevListBlock = FindFreeBlock(allocSize, prevListIndex);
-- while (prevListBlock)
-- {
-- if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
-- return true;
-- prevListBlock = prevListBlock->NextFree();
-- }
-- }
-- else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT)
-- {
-- // Check best fit bucket
-- prevListBlock = FindFreeBlock(allocSize, prevListIndex);
-- while (prevListBlock)
-- {
-- if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
-- return true;
-- prevListBlock = prevListBlock->NextFree();
-- }
--
-- // If failed check null block
-- if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))
-- return true;
--
-- // Check larger bucket
-- nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);
-- while (nextListBlock)
-- {
-- if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
-- return true;
-- nextListBlock = nextListBlock->NextFree();
-- }
-- }
-- else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT )
-- {
-- // Perform search from the start
-- VmaStlAllocator<Block*> allocator(GetAllocationCallbacks());
-- VmaVector<Block*, VmaStlAllocator<Block*>> blockList(m_BlocksFreeCount, allocator);
--
-- size_t i = m_BlocksFreeCount;
-- for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
-- {
-- if (block->IsFree() && block->size >= allocSize)
-- blockList[--i] = block;
-- }
--
-- for (; i < m_BlocksFreeCount; ++i)
-- {
-- Block& block = *blockList[i];
-- if (CheckBlock(block, GetListIndex(block.size), allocSize, allocAlignment, allocType, pAllocationRequest))
-- return true;
-- }
--
-- // If failed check null block
-- if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))
-- return true;
--
-- // Whole range searched, no more memory
-- return false;
-- }
-- else
-- {
-- // Check larger bucket
-- nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);
-- while (nextListBlock)
-- {
-- if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
-- return true;
-- nextListBlock = nextListBlock->NextFree();
-- }
--
-- // If failed check null block
-- if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))
-- return true;
--
-- // Check best fit bucket
-- prevListBlock = FindFreeBlock(allocSize, prevListIndex);
-- while (prevListBlock)
-- {
-- if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
-- return true;
-- prevListBlock = prevListBlock->NextFree();
-- }
-- }
--
-- // Worst case, full search has to be done
-- while (++nextListIndex < m_ListsCount)
-- {
-- nextListBlock = m_FreeList[nextListIndex];
-- while (nextListBlock)
-- {
-- if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
-- return true;
-- nextListBlock = nextListBlock->NextFree();
-- }
-- }
--
-- // No more memory sadly
-- return false;
--}
--
--VkResult VmaBlockMetadata_TLSF::CheckCorruption(const void* pBlockData)
--{
-- for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
-- {
-- if (!block->IsFree())
-- {
-- if (!VmaValidateMagicValue(pBlockData, block->offset + block->size))
-- {
-- VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
-- return VK_ERROR_UNKNOWN_COPY;
-- }
-- }
-- }
--
-- return VK_SUCCESS;
--}
--
--void VmaBlockMetadata_TLSF::Alloc(
-- const VmaAllocationRequest& request,
-- VmaSuballocationType type,
-- void* userData)
--{
-- VMA_ASSERT(request.type == VmaAllocationRequestType::TLSF);
--
-- // Get block and pop it from the free list
-- Block* currentBlock = (Block*)request.allocHandle;
-- VkDeviceSize offset = request.algorithmData;
-- VMA_ASSERT(currentBlock != VMA_NULL);
-- VMA_ASSERT(currentBlock->offset <= offset);
--
-- if (currentBlock != m_NullBlock)
-- RemoveFreeBlock(currentBlock);
--
-- VkDeviceSize debugMargin = GetDebugMargin();
-- VkDeviceSize misssingAlignment = offset - currentBlock->offset;
--
-- // Append missing alignment to prev block or create new one
-- if (misssingAlignment)
-- {
-- Block* prevBlock = currentBlock->prevPhysical;
-- VMA_ASSERT(prevBlock != VMA_NULL && "There should be no missing alignment at offset 0!");
--
-- if (prevBlock->IsFree() && prevBlock->size != debugMargin)
-- {
-- uint32_t oldList = GetListIndex(prevBlock->size);
-- prevBlock->size += misssingAlignment;
-- // Check if new size crosses list bucket
-- if (oldList != GetListIndex(prevBlock->size))
-- {
-- prevBlock->size -= misssingAlignment;
-- RemoveFreeBlock(prevBlock);
-- prevBlock->size += misssingAlignment;
-- InsertFreeBlock(prevBlock);
-- }
-- else
-- m_BlocksFreeSize += misssingAlignment;
-- }
-- else
-- {
-- Block* newBlock = m_BlockAllocator.Alloc();
-- currentBlock->prevPhysical = newBlock;
-- prevBlock->nextPhysical = newBlock;
-- newBlock->prevPhysical = prevBlock;
-- newBlock->nextPhysical = currentBlock;
-- newBlock->size = misssingAlignment;
-- newBlock->offset = currentBlock->offset;
-- newBlock->MarkTaken();
--
-- InsertFreeBlock(newBlock);
-- }
--
-- currentBlock->size -= misssingAlignment;
-- currentBlock->offset += misssingAlignment;
-- }
--
-- VkDeviceSize size = request.size + debugMargin;
-- if (currentBlock->size == size)
-- {
-- if (currentBlock == m_NullBlock)
-- {
-- // Setup new null block
-- m_NullBlock = m_BlockAllocator.Alloc();
-- m_NullBlock->size = 0;
-- m_NullBlock->offset = currentBlock->offset + size;
-- m_NullBlock->prevPhysical = currentBlock;
-- m_NullBlock->nextPhysical = VMA_NULL;
-- m_NullBlock->MarkFree();
-- m_NullBlock->PrevFree() = VMA_NULL;
-- m_NullBlock->NextFree() = VMA_NULL;
-- currentBlock->nextPhysical = m_NullBlock;
-- currentBlock->MarkTaken();
-- }
-- }
-- else
-- {
-- VMA_ASSERT(currentBlock->size > size && "Proper block already found, shouldn't find smaller one!");
--
-- // Create new free block
-- Block* newBlock = m_BlockAllocator.Alloc();
-- newBlock->size = currentBlock->size - size;
-- newBlock->offset = currentBlock->offset + size;
-- newBlock->prevPhysical = currentBlock;
-- newBlock->nextPhysical = currentBlock->nextPhysical;
-- currentBlock->nextPhysical = newBlock;
-- currentBlock->size = size;
--
-- if (currentBlock == m_NullBlock)
-- {
-- m_NullBlock = newBlock;
-- m_NullBlock->MarkFree();
-- m_NullBlock->NextFree() = VMA_NULL;
-- m_NullBlock->PrevFree() = VMA_NULL;
-- currentBlock->MarkTaken();
-- }
-- else
-- {
-- newBlock->nextPhysical->prevPhysical = newBlock;
-- newBlock->MarkTaken();
-- InsertFreeBlock(newBlock);
-- }
-- }
-- currentBlock->UserData() = userData;
--
-- if (debugMargin > 0)
-- {
-- currentBlock->size -= debugMargin;
-- Block* newBlock = m_BlockAllocator.Alloc();
-- newBlock->size = debugMargin;
-- newBlock->offset = currentBlock->offset + currentBlock->size;
-- newBlock->prevPhysical = currentBlock;
-- newBlock->nextPhysical = currentBlock->nextPhysical;
-- newBlock->MarkTaken();
-- currentBlock->nextPhysical->prevPhysical = newBlock;
-- currentBlock->nextPhysical = newBlock;
-- InsertFreeBlock(newBlock);
-- }
--
-- if (!IsVirtual())
-- m_GranularityHandler.AllocPages((uint8_t)(uintptr_t)request.customData,
-- currentBlock->offset, currentBlock->size);
-- ++m_AllocCount;
--}
--
--void VmaBlockMetadata_TLSF::Free(VmaAllocHandle allocHandle)
--{
-- Block* block = (Block*)allocHandle;
-- Block* next = block->nextPhysical;
-- VMA_ASSERT(!block->IsFree() && "Block is already free!");
--
-- if (!IsVirtual())
-- m_GranularityHandler.FreePages(block->offset, block->size);
-- --m_AllocCount;
--
-- VkDeviceSize debugMargin = GetDebugMargin();
-- if (debugMargin > 0)
-- {
-- RemoveFreeBlock(next);
-- MergeBlock(next, block);
-- block = next;
-- next = next->nextPhysical;
-- }
--
-- // Try merging
-- Block* prev = block->prevPhysical;
-- if (prev != VMA_NULL && prev->IsFree() && prev->size != debugMargin)
-- {
-- RemoveFreeBlock(prev);
-- MergeBlock(block, prev);
-- }
--
-- if (!next->IsFree())
-- InsertFreeBlock(block);
-- else if (next == m_NullBlock)
-- MergeBlock(m_NullBlock, block);
-- else
-- {
-- RemoveFreeBlock(next);
-- MergeBlock(next, block);
-- InsertFreeBlock(next);
-- }
--}
--
--void VmaBlockMetadata_TLSF::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo)
--{
-- Block* block = (Block*)allocHandle;
-- VMA_ASSERT(!block->IsFree() && "Cannot get allocation info for free block!");
-- outInfo.offset = block->offset;
-- outInfo.size = block->size;
-- outInfo.pUserData = block->UserData();
--}
--
--void* VmaBlockMetadata_TLSF::GetAllocationUserData(VmaAllocHandle allocHandle) const
--{
-- Block* block = (Block*)allocHandle;
-- VMA_ASSERT(!block->IsFree() && "Cannot get user data for free block!");
-- return block->UserData();
--}
--
--VmaAllocHandle VmaBlockMetadata_TLSF::GetAllocationListBegin() const
--{
-- if (m_AllocCount == 0)
-- return VK_NULL_HANDLE;
--
-- for (Block* block = m_NullBlock->prevPhysical; block; block = block->prevPhysical)
-- {
-- if (!block->IsFree())
-- return (VmaAllocHandle)block;
-- }
-- VMA_ASSERT(false && "If m_AllocCount > 0 then should find any allocation!");
-- return VK_NULL_HANDLE;
--}
--
--VmaAllocHandle VmaBlockMetadata_TLSF::GetNextAllocation(VmaAllocHandle prevAlloc) const
--{
-- Block* startBlock = (Block*)prevAlloc;
-- VMA_ASSERT(!startBlock->IsFree() && "Incorrect block!");
--
-- for (Block* block = startBlock->prevPhysical; block; block = block->prevPhysical)
-- {
-- if (!block->IsFree())
-- return (VmaAllocHandle)block;
-- }
-- return VK_NULL_HANDLE;
--}
--
--VkDeviceSize VmaBlockMetadata_TLSF::GetNextFreeRegionSize(VmaAllocHandle alloc) const
--{
-- Block* block = (Block*)alloc;
-- VMA_ASSERT(!block->IsFree() && "Incorrect block!");
--
-- if (block->prevPhysical)
-- return block->prevPhysical->IsFree() ? block->prevPhysical->size : 0;
-- return 0;
--}
--
--void VmaBlockMetadata_TLSF::Clear()
--{
-- m_AllocCount = 0;
-- m_BlocksFreeCount = 0;
-- m_BlocksFreeSize = 0;
-- m_IsFreeBitmap = 0;
-- m_NullBlock->offset = 0;
-- m_NullBlock->size = GetSize();
-- Block* block = m_NullBlock->prevPhysical;
-- m_NullBlock->prevPhysical = VMA_NULL;
-- while (block)
-- {
-- Block* prev = block->prevPhysical;
-- m_BlockAllocator.Free(block);
-- block = prev;
-- }
-- memset(m_FreeList, 0, m_ListsCount * sizeof(Block*));
-- memset(m_InnerIsFreeBitmap, 0, m_MemoryClasses * sizeof(uint32_t));
-- m_GranularityHandler.Clear();
--}
--
--void VmaBlockMetadata_TLSF::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData)
--{
-- Block* block = (Block*)allocHandle;
-- VMA_ASSERT(!block->IsFree() && "Trying to set user data for not allocated block!");
-- block->UserData() = userData;
--}
--
--void VmaBlockMetadata_TLSF::DebugLogAllAllocations() const
--{
-- for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
-- if (!block->IsFree())
-- DebugLogAllocation(block->offset, block->size, block->UserData());
--}
--
--uint8_t VmaBlockMetadata_TLSF::SizeToMemoryClass(VkDeviceSize size) const
--{
-- if (size > SMALL_BUFFER_SIZE)
-- return uint8_t(VMA_BITSCAN_MSB(size) - MEMORY_CLASS_SHIFT);
-- return 0;
--}
--
--uint16_t VmaBlockMetadata_TLSF::SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const
--{
-- if (memoryClass == 0)
-- {
-- if (IsVirtual())
-- return static_cast<uint16_t>((size - 1) / 8);
-- else
-- return static_cast<uint16_t>((size - 1) / 64);
-- }
-- return static_cast<uint16_t>((size >> (memoryClass + MEMORY_CLASS_SHIFT - SECOND_LEVEL_INDEX)) ^ (1U << SECOND_LEVEL_INDEX));
--}
--
--uint32_t VmaBlockMetadata_TLSF::GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const
--{
-- if (memoryClass == 0)
-- return secondIndex;
--
-- const uint32_t index = static_cast<uint32_t>(memoryClass - 1) * (1 << SECOND_LEVEL_INDEX) + secondIndex;
-- if (IsVirtual())
-- return index + (1 << SECOND_LEVEL_INDEX);
-- else
-- return index + 4;
--}
--
--uint32_t VmaBlockMetadata_TLSF::GetListIndex(VkDeviceSize size) const
--{
-- uint8_t memoryClass = SizeToMemoryClass(size);
-- return GetListIndex(memoryClass, SizeToSecondIndex(size, memoryClass));
--}
--
--void VmaBlockMetadata_TLSF::RemoveFreeBlock(Block* block)
--{
-- VMA_ASSERT(block != m_NullBlock);
-- VMA_ASSERT(block->IsFree());
--
-- if (block->NextFree() != VMA_NULL)
-- block->NextFree()->PrevFree() = block->PrevFree();
-- if (block->PrevFree() != VMA_NULL)
-- block->PrevFree()->NextFree() = block->NextFree();
-- else
-- {
-- uint8_t memClass = SizeToMemoryClass(block->size);
-- uint16_t secondIndex = SizeToSecondIndex(block->size, memClass);
-- uint32_t index = GetListIndex(memClass, secondIndex);
-- VMA_ASSERT(m_FreeList[index] == block);
-- m_FreeList[index] = block->NextFree();
-- if (block->NextFree() == VMA_NULL)
-- {
-- m_InnerIsFreeBitmap[memClass] &= ~(1U << secondIndex);
-- if (m_InnerIsFreeBitmap[memClass] == 0)
-- m_IsFreeBitmap &= ~(1UL << memClass);
-- }
-- }
-- block->MarkTaken();
-- block->UserData() = VMA_NULL;
-- --m_BlocksFreeCount;
-- m_BlocksFreeSize -= block->size;
--}
--
--void VmaBlockMetadata_TLSF::InsertFreeBlock(Block* block)
--{
-- VMA_ASSERT(block != m_NullBlock);
-- VMA_ASSERT(!block->IsFree() && "Cannot insert block twice!");
--
-- uint8_t memClass = SizeToMemoryClass(block->size);
-- uint16_t secondIndex = SizeToSecondIndex(block->size, memClass);
-- uint32_t index = GetListIndex(memClass, secondIndex);
-- VMA_ASSERT(index < m_ListsCount);
-- block->PrevFree() = VMA_NULL;
-- block->NextFree() = m_FreeList[index];
-- m_FreeList[index] = block;
-- if (block->NextFree() != VMA_NULL)
-- block->NextFree()->PrevFree() = block;
-- else
-- {
-- m_InnerIsFreeBitmap[memClass] |= 1U << secondIndex;
-- m_IsFreeBitmap |= 1UL << memClass;
-- }
-- ++m_BlocksFreeCount;
-- m_BlocksFreeSize += block->size;
--}
--
--void VmaBlockMetadata_TLSF::MergeBlock(Block* block, Block* prev)
--{
-- VMA_ASSERT(block->prevPhysical == prev && "Cannot merge separate physical regions!");
-- VMA_ASSERT(!prev->IsFree() && "Cannot merge block that belongs to free list!");
--
-- block->offset = prev->offset;
-- block->size += prev->size;
-- block->prevPhysical = prev->prevPhysical;
-- if (block->prevPhysical)
-- block->prevPhysical->nextPhysical = block;
-- m_BlockAllocator.Free(prev);
--}
--
--VmaBlockMetadata_TLSF::Block* VmaBlockMetadata_TLSF::FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const
--{
-- uint8_t memoryClass = SizeToMemoryClass(size);
-- uint32_t innerFreeMap = m_InnerIsFreeBitmap[memoryClass] & (~0U << SizeToSecondIndex(size, memoryClass));
-- if (!innerFreeMap)
-- {
-- // Check higher levels for available blocks
-- uint32_t freeMap = m_IsFreeBitmap & (~0UL << (memoryClass + 1));
-- if (!freeMap)
-- return VMA_NULL; // No more memory available
--
-- // Find lowest free region
-- memoryClass = VMA_BITSCAN_LSB(freeMap);
-- innerFreeMap = m_InnerIsFreeBitmap[memoryClass];
-- VMA_ASSERT(innerFreeMap != 0);
-- }
-- // Find lowest free subregion
-- listIndex = GetListIndex(memoryClass, VMA_BITSCAN_LSB(innerFreeMap));
-- VMA_ASSERT(m_FreeList[listIndex]);
-- return m_FreeList[listIndex];
--}
--
--bool VmaBlockMetadata_TLSF::CheckBlock(
-- Block& block,
-- uint32_t listIndex,
-- VkDeviceSize allocSize,
-- VkDeviceSize allocAlignment,
-- VmaSuballocationType allocType,
-- VmaAllocationRequest* pAllocationRequest)
--{
-- VMA_ASSERT(block.IsFree() && "Block is already taken!");
--
-- VkDeviceSize alignedOffset = VmaAlignUp(block.offset, allocAlignment);
-- if (block.size < allocSize + alignedOffset - block.offset)
-- return false;
--
-- // Check for granularity conflicts
-- if (!IsVirtual() &&
-- m_GranularityHandler.CheckConflictAndAlignUp(alignedOffset, allocSize, block.offset, block.size, allocType))
-- return false;
--
-- // Alloc successful
-- pAllocationRequest->type = VmaAllocationRequestType::TLSF;
-- pAllocationRequest->allocHandle = (VmaAllocHandle)&block;
-- pAllocationRequest->size = allocSize - GetDebugMargin();
-- pAllocationRequest->customData = (void*)allocType;
-- pAllocationRequest->algorithmData = alignedOffset;
--
-- // Place block at the start of list if it's normal block
-- if (listIndex != m_ListsCount && block.PrevFree())
-- {
-- block.PrevFree()->NextFree() = block.NextFree();
-- if (block.NextFree())
-- block.NextFree()->PrevFree() = block.PrevFree();
-- block.PrevFree() = VMA_NULL;
-- block.NextFree() = m_FreeList[listIndex];
-- m_FreeList[listIndex] = &block;
-- if (block.NextFree())
-- block.NextFree()->PrevFree() = &block;
-- }
--
-- return true;
--}
--#endif // _VMA_BLOCK_METADATA_TLSF_FUNCTIONS
--#endif // _VMA_BLOCK_METADATA_TLSF
--
--#ifndef _VMA_BLOCK_VECTOR
--/*
--Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
--Vulkan memory type.
--
--Synchronized internally with a mutex.
--*/
--class VmaBlockVector
--{
-- friend struct VmaDefragmentationContext_T;
-- VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockVector)
--public:
-- VmaBlockVector(
-- VmaAllocator hAllocator,
-- VmaPool hParentPool,
-- uint32_t memoryTypeIndex,
-- VkDeviceSize preferredBlockSize,
-- size_t minBlockCount,
-- size_t maxBlockCount,
-- VkDeviceSize bufferImageGranularity,
-- bool explicitBlockSize,
-- uint32_t algorithm,
-- float priority,
-- VkDeviceSize minAllocationAlignment,
-- void* pMemoryAllocateNext);
-- ~VmaBlockVector();
--
-- VmaAllocator GetAllocator() const { return m_hAllocator; }
-- VmaPool GetParentPool() const { return m_hParentPool; }
-- bool IsCustomPool() const { return m_hParentPool != VMA_NULL; }
-- uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
-- VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
-- VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
-- uint32_t GetAlgorithm() const { return m_Algorithm; }
-- bool HasExplicitBlockSize() const { return m_ExplicitBlockSize; }
-- float GetPriority() const { return m_Priority; }
-- const void* GetAllocationNextPtr() const { return m_pMemoryAllocateNext; }
-- // To be used only while the m_Mutex is locked. Used during defragmentation.
-- size_t GetBlockCount() const { return m_Blocks.size(); }
-- // To be used only while the m_Mutex is locked. Used during defragmentation.
-- VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
-- VMA_RW_MUTEX &GetMutex() { return m_Mutex; }
--
-- VkResult CreateMinBlocks();
-- void AddStatistics(VmaStatistics& inoutStats);
-- void AddDetailedStatistics(VmaDetailedStatistics& inoutStats);
-- bool IsEmpty();
-- bool IsCorruptionDetectionEnabled() const;
--
-- VkResult Allocate(
-- VkDeviceSize size,
-- VkDeviceSize alignment,
-- const VmaAllocationCreateInfo& createInfo,
-- VmaSuballocationType suballocType,
-- size_t allocationCount,
-- VmaAllocation* pAllocations);
--
-- void Free(const VmaAllocation hAllocation);
--
--#if VMA_STATS_STRING_ENABLED
-- void PrintDetailedMap(class VmaJsonWriter& json);
--#endif
--
-- VkResult CheckCorruption();
--
--private:
-- const VmaAllocator m_hAllocator;
-- const VmaPool m_hParentPool;
-- const uint32_t m_MemoryTypeIndex;
-- const VkDeviceSize m_PreferredBlockSize;
-- const size_t m_MinBlockCount;
-- const size_t m_MaxBlockCount;
-- const VkDeviceSize m_BufferImageGranularity;
-- const bool m_ExplicitBlockSize;
-- const uint32_t m_Algorithm;
-- const float m_Priority;
-- const VkDeviceSize m_MinAllocationAlignment;
--
-- void* const m_pMemoryAllocateNext;
-- VMA_RW_MUTEX m_Mutex;
-- // Incrementally sorted by sumFreeSize, ascending.
-- VmaVector<VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*>> m_Blocks;
-- uint32_t m_NextBlockId;
-- bool m_IncrementalSort = true;
--
-- void SetIncrementalSort(bool val) { m_IncrementalSort = val; }
--
-- VkDeviceSize CalcMaxBlockSize() const;
-- // Finds and removes given block from vector.
-- void Remove(VmaDeviceMemoryBlock* pBlock);
-- // Performs single step in sorting m_Blocks. They may not be fully sorted
-- // after this call.
-- void IncrementallySortBlocks();
-- void SortByFreeSize();
--
-- VkResult AllocatePage(
-- VkDeviceSize size,
-- VkDeviceSize alignment,
-- const VmaAllocationCreateInfo& createInfo,
-- VmaSuballocationType suballocType,
-- VmaAllocation* pAllocation);
--
-- VkResult AllocateFromBlock(
-- VmaDeviceMemoryBlock* pBlock,
-- VkDeviceSize size,
-- VkDeviceSize alignment,
-- VmaAllocationCreateFlags allocFlags,
-- void* pUserData,
-- VmaSuballocationType suballocType,
-- uint32_t strategy,
-- VmaAllocation* pAllocation);
--
-- VkResult CommitAllocationRequest(
-- VmaAllocationRequest& allocRequest,
-- VmaDeviceMemoryBlock* pBlock,
-- VkDeviceSize alignment,
-- VmaAllocationCreateFlags allocFlags,
-- void* pUserData,
-- VmaSuballocationType suballocType,
-- VmaAllocation* pAllocation);
--
-- VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
-- bool HasEmptyBlock();
--};
--#endif // _VMA_BLOCK_VECTOR
--
--#ifndef _VMA_DEFRAGMENTATION_CONTEXT
--struct VmaDefragmentationContext_T
--{
-- VMA_CLASS_NO_COPY_NO_MOVE(VmaDefragmentationContext_T)
--public:
-- VmaDefragmentationContext_T(
-- VmaAllocator hAllocator,
-- const VmaDefragmentationInfo& info);
-- ~VmaDefragmentationContext_T();
--
-- void GetStats(VmaDefragmentationStats& outStats) { outStats = m_GlobalStats; }
--
-- VkResult DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo);
-- VkResult DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo);
--
--private:
-- // Max number of allocations to ignore due to size constraints before ending single pass
-- static const uint8_t MAX_ALLOCS_TO_IGNORE = 16;
-- enum class CounterStatus { Pass, Ignore, End };
--
-- struct FragmentedBlock
-- {
-- uint32_t data;
-- VmaDeviceMemoryBlock* block;
-- };
-- struct StateBalanced
-- {
-- VkDeviceSize avgFreeSize = 0;
-- VkDeviceSize avgAllocSize = UINT64_MAX;
-- };
-- struct StateExtensive
-- {
-- enum class Operation : uint8_t
-- {
-- FindFreeBlockBuffer, FindFreeBlockTexture, FindFreeBlockAll,
-- MoveBuffers, MoveTextures, MoveAll,
-- Cleanup, Done
-- };
--
-- Operation operation = Operation::FindFreeBlockTexture;
-- size_t firstFreeBlock = SIZE_MAX;
-- };
-- struct MoveAllocationData
-- {
-- VkDeviceSize size;
-- VkDeviceSize alignment;
-- VmaSuballocationType type;
-- VmaAllocationCreateFlags flags;
-- VmaDefragmentationMove move = {};
-- };
--
-- const VkDeviceSize m_MaxPassBytes;
-- const uint32_t m_MaxPassAllocations;
-- const PFN_vmaCheckDefragmentationBreakFunction m_BreakCallback;
-- void* m_BreakCallbackUserData;
--
-- VmaStlAllocator<VmaDefragmentationMove> m_MoveAllocator;
-- VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove>> m_Moves;
--
-- uint8_t m_IgnoredAllocs = 0;
-- uint32_t m_Algorithm;
-- uint32_t m_BlockVectorCount;
-- VmaBlockVector* m_PoolBlockVector;
-- VmaBlockVector** m_pBlockVectors;
-- size_t m_ImmovableBlockCount = 0;
-- VmaDefragmentationStats m_GlobalStats = { 0 };
-- VmaDefragmentationStats m_PassStats = { 0 };
-- void* m_AlgorithmState = VMA_NULL;
--
-- static MoveAllocationData GetMoveData(VmaAllocHandle handle, VmaBlockMetadata* metadata);
-- CounterStatus CheckCounters(VkDeviceSize bytes);
-- bool IncrementCounters(VkDeviceSize bytes);
-- bool ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block);
-- bool AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector);
--
-- bool ComputeDefragmentation(VmaBlockVector& vector, size_t index);
-- bool ComputeDefragmentation_Fast(VmaBlockVector& vector);
-- bool ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update);
-- bool ComputeDefragmentation_Full(VmaBlockVector& vector);
-- bool ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index);
--
-- void UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state);
-- bool MoveDataToFreeBlocks(VmaSuballocationType currentType,
-- VmaBlockVector& vector, size_t firstFreeBlock,
-- bool& texturePresent, bool& bufferPresent, bool& otherPresent);
--};
--#endif // _VMA_DEFRAGMENTATION_CONTEXT
--
--#ifndef _VMA_POOL_T
--struct VmaPool_T
--{
-- friend struct VmaPoolListItemTraits;
-- VMA_CLASS_NO_COPY_NO_MOVE(VmaPool_T)
--public:
-- VmaBlockVector m_BlockVector;
-- VmaDedicatedAllocationList m_DedicatedAllocations;
--
-- VmaPool_T(
-- VmaAllocator hAllocator,
-- const VmaPoolCreateInfo& createInfo,
-- VkDeviceSize preferredBlockSize);
-- ~VmaPool_T();
--
-- uint32_t GetId() const { return m_Id; }
-- void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
--
-- const char* GetName() const { return m_Name; }
-- void SetName(const char* pName);
--
--#if VMA_STATS_STRING_ENABLED
-- //void PrintDetailedMap(class VmaStringBuilder& sb);
--#endif
--
--private:
-- uint32_t m_Id;
-- char* m_Name;
-- VmaPool_T* m_PrevPool = VMA_NULL;
-- VmaPool_T* m_NextPool = VMA_NULL;
--};
--
--struct VmaPoolListItemTraits
--{
-- typedef VmaPool_T ItemType;
--
-- static ItemType* GetPrev(const ItemType* item) { return item->m_PrevPool; }
-- static ItemType* GetNext(const ItemType* item) { return item->m_NextPool; }
-- static ItemType*& AccessPrev(ItemType* item) { return item->m_PrevPool; }
-- static ItemType*& AccessNext(ItemType* item) { return item->m_NextPool; }
--};
--#endif // _VMA_POOL_T
--
--#ifndef _VMA_CURRENT_BUDGET_DATA
--struct VmaCurrentBudgetData
--{
-- VMA_CLASS_NO_COPY_NO_MOVE(VmaCurrentBudgetData)
--public:
--
-- VMA_ATOMIC_UINT32 m_BlockCount[VK_MAX_MEMORY_HEAPS];
-- VMA_ATOMIC_UINT32 m_AllocationCount[VK_MAX_MEMORY_HEAPS];
-- VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS];
-- VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS];
--
--#if VMA_MEMORY_BUDGET
-- VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch;
-- VMA_RW_MUTEX m_BudgetMutex;
-- uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS];
-- uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS];
-- uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS];
--#endif // VMA_MEMORY_BUDGET
--
-- VmaCurrentBudgetData();
--
-- void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize);
-- void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize);
--};
--
--#ifndef _VMA_CURRENT_BUDGET_DATA_FUNCTIONS
--VmaCurrentBudgetData::VmaCurrentBudgetData()
--{
-- for (uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex)
-- {
-- m_BlockCount[heapIndex] = 0;
-- m_AllocationCount[heapIndex] = 0;
-- m_BlockBytes[heapIndex] = 0;
-- m_AllocationBytes[heapIndex] = 0;
--#if VMA_MEMORY_BUDGET
-- m_VulkanUsage[heapIndex] = 0;
-- m_VulkanBudget[heapIndex] = 0;
-- m_BlockBytesAtBudgetFetch[heapIndex] = 0;
--#endif
-- }
--
--#if VMA_MEMORY_BUDGET
-- m_OperationsSinceBudgetFetch = 0;
--#endif
--}
--
--void VmaCurrentBudgetData::AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
--{
-- m_AllocationBytes[heapIndex] += allocationSize;
-- ++m_AllocationCount[heapIndex];
--#if VMA_MEMORY_BUDGET
-- ++m_OperationsSinceBudgetFetch;
--#endif
--}
--
--void VmaCurrentBudgetData::RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
--{
-- VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize);
-- m_AllocationBytes[heapIndex] -= allocationSize;
-- VMA_ASSERT(m_AllocationCount[heapIndex] > 0);
-- --m_AllocationCount[heapIndex];
--#if VMA_MEMORY_BUDGET
-- ++m_OperationsSinceBudgetFetch;
--#endif
--}
--#endif // _VMA_CURRENT_BUDGET_DATA_FUNCTIONS
--#endif // _VMA_CURRENT_BUDGET_DATA
--
--#ifndef _VMA_ALLOCATION_OBJECT_ALLOCATOR
--/*
--Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
--*/
--class VmaAllocationObjectAllocator
--{
-- VMA_CLASS_NO_COPY_NO_MOVE(VmaAllocationObjectAllocator)
--public:
-- VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks)
-- : m_Allocator(pAllocationCallbacks, 1024) {}
--
-- template<typename... Types> VmaAllocation Allocate(Types&&... args);
-- void Free(VmaAllocation hAlloc);
--
--private:
-- VMA_MUTEX m_Mutex;
-- VmaPoolAllocator<VmaAllocation_T> m_Allocator;
--};
--
--template<typename... Types>
--VmaAllocation VmaAllocationObjectAllocator::Allocate(Types&&... args)
--{
-- VmaMutexLock mutexLock(m_Mutex);
-- return m_Allocator.Alloc<Types...>(std::forward<Types>(args)...);
--}
--
--void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
--{
-- VmaMutexLock mutexLock(m_Mutex);
-- m_Allocator.Free(hAlloc);
--}
--#endif // _VMA_ALLOCATION_OBJECT_ALLOCATOR
--
--#ifndef _VMA_VIRTUAL_BLOCK_T
--struct VmaVirtualBlock_T
--{
-- VMA_CLASS_NO_COPY_NO_MOVE(VmaVirtualBlock_T)
--public:
-- const bool m_AllocationCallbacksSpecified;
-- const VkAllocationCallbacks m_AllocationCallbacks;
--
-- VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo);
-- ~VmaVirtualBlock_T();
--
-- VkResult Init() { return VK_SUCCESS; }
-- bool IsEmpty() const { return m_Metadata->IsEmpty(); }
-- void Free(VmaVirtualAllocation allocation) { m_Metadata->Free((VmaAllocHandle)allocation); }
-- void SetAllocationUserData(VmaVirtualAllocation allocation, void* userData) { m_Metadata->SetAllocationUserData((VmaAllocHandle)allocation, userData); }
-- void Clear() { m_Metadata->Clear(); }
--
-- const VkAllocationCallbacks* GetAllocationCallbacks() const;
-- void GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo);
-- VkResult Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation,
-- VkDeviceSize* outOffset);
-- void GetStatistics(VmaStatistics& outStats) const;
-- void CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const;
--#if VMA_STATS_STRING_ENABLED
-- void BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const;
--#endif
--
--private:
-- VmaBlockMetadata* m_Metadata;
--};
--
--#ifndef _VMA_VIRTUAL_BLOCK_T_FUNCTIONS
--VmaVirtualBlock_T::VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo)
-- : m_AllocationCallbacksSpecified(createInfo.pAllocationCallbacks != VMA_NULL),
-- m_AllocationCallbacks(createInfo.pAllocationCallbacks != VMA_NULL ? *createInfo.pAllocationCallbacks : VmaEmptyAllocationCallbacks)
--{
-- const uint32_t algorithm = createInfo.flags & VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK;
-- switch (algorithm)
-- {
-- case 0:
-- m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_TLSF)(VK_NULL_HANDLE, 1, true);
-- break;
-- case VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT:
-- m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_Linear)(VK_NULL_HANDLE, 1, true);
-- break;
-- default:
-- VMA_ASSERT(0);
-- m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_TLSF)(VK_NULL_HANDLE, 1, true);
-- }
--
-- m_Metadata->Init(createInfo.size);
--}
--
--VmaVirtualBlock_T::~VmaVirtualBlock_T()
--{
-- // Define macro VMA_DEBUG_LOG_FORMAT to receive the list of the unfreed allocations
-- if (!m_Metadata->IsEmpty())
-- m_Metadata->DebugLogAllAllocations();
-- // This is the most important assert in the entire library.
-- // Hitting it means you have some memory leak - unreleased virtual allocations.
-- VMA_ASSERT(m_Metadata->IsEmpty() && "Some virtual allocations were not freed before destruction of this virtual block!");
--
-- vma_delete(GetAllocationCallbacks(), m_Metadata);
--}
--
--const VkAllocationCallbacks* VmaVirtualBlock_T::GetAllocationCallbacks() const
--{
-- return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL;
--}
--
--void VmaVirtualBlock_T::GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo)
--{
-- m_Metadata->GetAllocationInfo((VmaAllocHandle)allocation, outInfo);
--}
--
--VkResult VmaVirtualBlock_T::Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation,
-- VkDeviceSize* outOffset)
--{
-- VmaAllocationRequest request = {};
-- if (m_Metadata->CreateAllocationRequest(
-- createInfo.size, // allocSize
-- VMA_MAX(createInfo.alignment, (VkDeviceSize)1), // allocAlignment
-- (createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0, // upperAddress
-- VMA_SUBALLOCATION_TYPE_UNKNOWN, // allocType - unimportant
-- createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK, // strategy
-- &request))
-- {
-- m_Metadata->Alloc(request,
-- VMA_SUBALLOCATION_TYPE_UNKNOWN, // type - unimportant
-- createInfo.pUserData);
-- outAllocation = (VmaVirtualAllocation)request.allocHandle;
-- if(outOffset)
-- *outOffset = m_Metadata->GetAllocationOffset(request.allocHandle);
-- return VK_SUCCESS;
-- }
-- outAllocation = (VmaVirtualAllocation)VK_NULL_HANDLE;
-- if (outOffset)
-- *outOffset = UINT64_MAX;
-- return VK_ERROR_OUT_OF_DEVICE_MEMORY;
--}
--
--void VmaVirtualBlock_T::GetStatistics(VmaStatistics& outStats) const
--{
-- VmaClearStatistics(outStats);
-- m_Metadata->AddStatistics(outStats);
--}
--
--void VmaVirtualBlock_T::CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const
--{
-- VmaClearDetailedStatistics(outStats);
-- m_Metadata->AddDetailedStatistics(outStats);
--}
--
--#if VMA_STATS_STRING_ENABLED
--void VmaVirtualBlock_T::BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const
--{
-- VmaJsonWriter json(GetAllocationCallbacks(), sb);
-- json.BeginObject();
--
-- VmaDetailedStatistics stats;
-- CalculateDetailedStatistics(stats);
--
-- json.WriteString("Stats");
-- VmaPrintDetailedStatistics(json, stats);
--
-- if (detailedMap)
-- {
-- json.WriteString("Details");
-- json.BeginObject();
-- m_Metadata->PrintDetailedMap(json);
-- json.EndObject();
-- }
--
-- json.EndObject();
--}
--#endif // VMA_STATS_STRING_ENABLED
--#endif // _VMA_VIRTUAL_BLOCK_T_FUNCTIONS
--#endif // _VMA_VIRTUAL_BLOCK_T
--
--
--// Main allocator object.
--struct VmaAllocator_T
--{
-- VMA_CLASS_NO_COPY_NO_MOVE(VmaAllocator_T)
--public:
-- bool m_UseMutex;
-- uint32_t m_VulkanApiVersion;
-- bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
-- bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
-- bool m_UseExtMemoryBudget;
-- bool m_UseAmdDeviceCoherentMemory;
-- bool m_UseKhrBufferDeviceAddress;
-- bool m_UseExtMemoryPriority;
-- VkDevice m_hDevice;
-- VkInstance m_hInstance;
-- bool m_AllocationCallbacksSpecified;
-- VkAllocationCallbacks m_AllocationCallbacks;
-- VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
-- VmaAllocationObjectAllocator m_AllocationObjectAllocator;
--
-- // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size.
-- uint32_t m_HeapSizeLimitMask;
--
-- VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
-- VkPhysicalDeviceMemoryProperties m_MemProps;
--
-- // Default pools.
-- VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
-- VmaDedicatedAllocationList m_DedicatedAllocations[VK_MAX_MEMORY_TYPES];
--
-- VmaCurrentBudgetData m_Budget;
-- VMA_ATOMIC_UINT32 m_DeviceMemoryCount; // Total number of VkDeviceMemory objects.
--
-- VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
-- VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
-- ~VmaAllocator_T();
--
-- const VkAllocationCallbacks* GetAllocationCallbacks() const
-- {
-- return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL;
-- }
-- const VmaVulkanFunctions& GetVulkanFunctions() const
-- {
-- return m_VulkanFunctions;
-- }
--
-- VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; }
--
-- VkDeviceSize GetBufferImageGranularity() const
-- {
-- return VMA_MAX(
-- static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
-- m_PhysicalDeviceProperties.limits.bufferImageGranularity);
-- }
--
-- uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
-- uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
--
-- uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
-- {
-- VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
-- return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
-- }
-- // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
-- bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
-- {
-- return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
-- VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
-- }
-- // Minimum alignment for all allocations in specific memory type.
-- VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
-- {
-- return IsMemoryTypeNonCoherent(memTypeIndex) ?
-- VMA_MAX((VkDeviceSize)VMA_MIN_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
-- (VkDeviceSize)VMA_MIN_ALIGNMENT;
-- }
--
-- bool IsIntegratedGpu() const
-- {
-- return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
-- }
--
-- uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; }
--
-- void GetBufferMemoryRequirements(
-- VkBuffer hBuffer,
-- VkMemoryRequirements& memReq,
-- bool& requiresDedicatedAllocation,
-- bool& prefersDedicatedAllocation) const;
-- void GetImageMemoryRequirements(
-- VkImage hImage,
-- VkMemoryRequirements& memReq,
-- bool& requiresDedicatedAllocation,
-- bool& prefersDedicatedAllocation) const;
-- VkResult FindMemoryTypeIndex(
-- uint32_t memoryTypeBits,
-- const VmaAllocationCreateInfo* pAllocationCreateInfo,
-- VkFlags bufImgUsage, // VkBufferCreateInfo::usage or VkImageCreateInfo::usage. UINT32_MAX if unknown.
-- uint32_t* pMemoryTypeIndex) const;
--
-- // Main allocation function.
-- VkResult AllocateMemory(
-- const VkMemoryRequirements& vkMemReq,
-- bool requiresDedicatedAllocation,
-- bool prefersDedicatedAllocation,
-- VkBuffer dedicatedBuffer,
-- VkImage dedicatedImage,
-- VkFlags dedicatedBufferImageUsage, // UINT32_MAX if unknown.
-- const VmaAllocationCreateInfo& createInfo,
-- VmaSuballocationType suballocType,
-- size_t allocationCount,
-- VmaAllocation* pAllocations);
--
-- // Main deallocation function.
-- void FreeMemory(
-- size_t allocationCount,
-- const VmaAllocation* pAllocations);
--
-- void CalculateStatistics(VmaTotalStatistics* pStats);
--
-- void GetHeapBudgets(
-- VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount);
--
--#if VMA_STATS_STRING_ENABLED
-- void PrintDetailedMap(class VmaJsonWriter& json);
--#endif
--
-- void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
--
-- VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
-- void DestroyPool(VmaPool pool);
-- void GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats);
-- void CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats);
--
-- void SetCurrentFrameIndex(uint32_t frameIndex);
-- uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
--
-- VkResult CheckPoolCorruption(VmaPool hPool);
-- VkResult CheckCorruption(uint32_t memoryTypeBits);
--
-- // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping.
-- VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
-- // Call to Vulkan function vkFreeMemory with accompanying bookkeeping.
-- void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
-- // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR.
-- VkResult BindVulkanBuffer(
-- VkDeviceMemory memory,
-- VkDeviceSize memoryOffset,
-- VkBuffer buffer,
-- const void* pNext);
-- // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR.
-- VkResult BindVulkanImage(
-- VkDeviceMemory memory,
-- VkDeviceSize memoryOffset,
-- VkImage image,
-- const void* pNext);
--
-- VkResult Map(VmaAllocation hAllocation, void** ppData);
-- void Unmap(VmaAllocation hAllocation);
--
-- VkResult BindBufferMemory(
-- VmaAllocation hAllocation,
-- VkDeviceSize allocationLocalOffset,
-- VkBuffer hBuffer,
-- const void* pNext);
-- VkResult BindImageMemory(
-- VmaAllocation hAllocation,
-- VkDeviceSize allocationLocalOffset,
-- VkImage hImage,
-- const void* pNext);
--
-- VkResult FlushOrInvalidateAllocation(
-- VmaAllocation hAllocation,
-- VkDeviceSize offset, VkDeviceSize size,
-- VMA_CACHE_OPERATION op);
-- VkResult FlushOrInvalidateAllocations(
-- uint32_t allocationCount,
-- const VmaAllocation* allocations,
-- const VkDeviceSize* offsets, const VkDeviceSize* sizes,
-- VMA_CACHE_OPERATION op);
--
-- void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
--
-- /*
-- Returns bit mask of memory types that can support defragmentation on GPU as
-- they support creation of required buffer for copy operations.
-- */
-- uint32_t GetGpuDefragmentationMemoryTypeBits();
--
--#if VMA_EXTERNAL_MEMORY
-- VkExternalMemoryHandleTypeFlagsKHR GetExternalMemoryHandleTypeFlags(uint32_t memTypeIndex) const
-- {
-- return m_TypeExternalMemoryHandleTypes[memTypeIndex];
-- }
--#endif // #if VMA_EXTERNAL_MEMORY
--
--private:
-- VkDeviceSize m_PreferredLargeHeapBlockSize;
--
-- VkPhysicalDevice m_PhysicalDevice;
-- VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
-- VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
--#if VMA_EXTERNAL_MEMORY
-- VkExternalMemoryHandleTypeFlagsKHR m_TypeExternalMemoryHandleTypes[VK_MAX_MEMORY_TYPES];
--#endif // #if VMA_EXTERNAL_MEMORY
--
-- VMA_RW_MUTEX m_PoolsMutex;
-- typedef VmaIntrusiveLinkedList<VmaPoolListItemTraits> PoolList;
-- // Protected by m_PoolsMutex.
-- PoolList m_Pools;
-- uint32_t m_NextPoolId;
--
-- VmaVulkanFunctions m_VulkanFunctions;
--
-- // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types.
-- uint32_t m_GlobalMemoryTypeBits;
--
-- void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
--
--#if VMA_STATIC_VULKAN_FUNCTIONS == 1
-- void ImportVulkanFunctions_Static();
--#endif
--
-- void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions);
--
--#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
-- void ImportVulkanFunctions_Dynamic();
--#endif
--
-- void ValidateVulkanFunctions();
--
-- VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
--
-- VkResult AllocateMemoryOfType(
-- VmaPool pool,
-- VkDeviceSize size,
-- VkDeviceSize alignment,
-- bool dedicatedPreferred,
-- VkBuffer dedicatedBuffer,
-- VkImage dedicatedImage,
-- VkFlags dedicatedBufferImageUsage,
-- const VmaAllocationCreateInfo& createInfo,
-- uint32_t memTypeIndex,
-- VmaSuballocationType suballocType,
-- VmaDedicatedAllocationList& dedicatedAllocations,
-- VmaBlockVector& blockVector,
-- size_t allocationCount,
-- VmaAllocation* pAllocations);
--
-- // Helper function only to be used inside AllocateDedicatedMemory.
-- VkResult AllocateDedicatedMemoryPage(
-- VmaPool pool,
-- VkDeviceSize size,
-- VmaSuballocationType suballocType,
-- uint32_t memTypeIndex,
-- const VkMemoryAllocateInfo& allocInfo,
-- bool map,
-- bool isUserDataString,
-- bool isMappingAllowed,
-- void* pUserData,
-- VmaAllocation* pAllocation);
--
-- // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
-- VkResult AllocateDedicatedMemory(
-- VmaPool pool,
-- VkDeviceSize size,
-- VmaSuballocationType suballocType,
-- VmaDedicatedAllocationList& dedicatedAllocations,
-- uint32_t memTypeIndex,
-- bool map,
-- bool isUserDataString,
-- bool isMappingAllowed,
-- bool canAliasMemory,
-- void* pUserData,
-- float priority,
-- VkBuffer dedicatedBuffer,
-- VkImage dedicatedImage,
-- VkFlags dedicatedBufferImageUsage,
-- size_t allocationCount,
-- VmaAllocation* pAllocations,
-- const void* pNextChain = nullptr);
--
-- void FreeDedicatedMemory(const VmaAllocation allocation);
--
-- VkResult CalcMemTypeParams(
-- VmaAllocationCreateInfo& outCreateInfo,
-- uint32_t memTypeIndex,
-- VkDeviceSize size,
-- size_t allocationCount);
-- VkResult CalcAllocationParams(
-- VmaAllocationCreateInfo& outCreateInfo,
-- bool dedicatedRequired,
-- bool dedicatedPreferred);
--
-- /*
-- Calculates and returns bit mask of memory types that can support defragmentation
-- on GPU as they support creation of required buffer for copy operations.
-- */
-- uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
-- uint32_t CalculateGlobalMemoryTypeBits() const;
--
-- bool GetFlushOrInvalidateRange(
-- VmaAllocation allocation,
-- VkDeviceSize offset, VkDeviceSize size,
-- VkMappedMemoryRange& outRange) const;
--
--#if VMA_MEMORY_BUDGET
-- void UpdateVulkanBudget();
--#endif // #if VMA_MEMORY_BUDGET
--};
--
--
--#ifndef _VMA_MEMORY_FUNCTIONS
--static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
--{
-- return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
--}
--
--static void VmaFree(VmaAllocator hAllocator, void* ptr)
--{
-- VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
--}
--
--template<typename T>
--static T* VmaAllocate(VmaAllocator hAllocator)
--{
-- return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
--}
--
--template<typename T>
--static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
--{
-- return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
--}
--
--template<typename T>
--static void vma_delete(VmaAllocator hAllocator, T* ptr)
--{
-- if(ptr != VMA_NULL)
-- {
-- ptr->~T();
-- VmaFree(hAllocator, ptr);
-- }
--}
--
--template<typename T>
--static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
--{
-- if(ptr != VMA_NULL)
-- {
-- for(size_t i = count; i--; )
-- ptr[i].~T();
-- VmaFree(hAllocator, ptr);
-- }
--}
--#endif // _VMA_MEMORY_FUNCTIONS
--
--#ifndef _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS
--VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator)
-- : m_pMetadata(VMA_NULL),
-- m_MemoryTypeIndex(UINT32_MAX),
-- m_Id(0),
-- m_hMemory(VK_NULL_HANDLE),
-- m_MapCount(0),
-- m_pMappedData(VMA_NULL) {}
--
--VmaDeviceMemoryBlock::~VmaDeviceMemoryBlock()
--{
-- VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
-- VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
--}
--
--void VmaDeviceMemoryBlock::Init(
-- VmaAllocator hAllocator,
-- VmaPool hParentPool,
-- uint32_t newMemoryTypeIndex,
-- VkDeviceMemory newMemory,
-- VkDeviceSize newSize,
-- uint32_t id,
-- uint32_t algorithm,
-- VkDeviceSize bufferImageGranularity)
--{
-- VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
--
-- m_hParentPool = hParentPool;
-- m_MemoryTypeIndex = newMemoryTypeIndex;
-- m_Id = id;
-- m_hMemory = newMemory;
--
-- switch (algorithm)
-- {
-- case 0:
-- m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(),
-- bufferImageGranularity, false); // isVirtual
-- break;
-- case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT:
-- m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator->GetAllocationCallbacks(),
-- bufferImageGranularity, false); // isVirtual
-- break;
-- default:
-- VMA_ASSERT(0);
-- m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(),
-- bufferImageGranularity, false); // isVirtual
-- }
-- m_pMetadata->Init(newSize);
--}
--
--void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
--{
-- // Define macro VMA_DEBUG_LOG_FORMAT to receive the list of the unfreed allocations
-- if (!m_pMetadata->IsEmpty())
-- m_pMetadata->DebugLogAllAllocations();
-- // This is the most important assert in the entire library.
-- // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
-- VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
--
-- VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
-- allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
-- m_hMemory = VK_NULL_HANDLE;
--
-- vma_delete(allocator, m_pMetadata);
-- m_pMetadata = VMA_NULL;
--}
--
--void VmaDeviceMemoryBlock::PostAlloc(VmaAllocator hAllocator)
--{
-- VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
-- m_MappingHysteresis.PostAlloc();
--}
--
--void VmaDeviceMemoryBlock::PostFree(VmaAllocator hAllocator)
--{
-- VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
-- if(m_MappingHysteresis.PostFree())
-- {
-- VMA_ASSERT(m_MappingHysteresis.GetExtraMapping() == 0);
-- if (m_MapCount == 0)
-- {
-- m_pMappedData = VMA_NULL;
-- (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
-- }
-- }
--}
--
--bool VmaDeviceMemoryBlock::Validate() const
--{
-- VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
-- (m_pMetadata->GetSize() != 0));
--
-- return m_pMetadata->Validate();
--}
--
--VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
--{
-- void* pData = nullptr;
-- VkResult res = Map(hAllocator, 1, &pData);
-- if (res != VK_SUCCESS)
-- {
-- return res;
-- }
--
-- res = m_pMetadata->CheckCorruption(pData);
--
-- Unmap(hAllocator, 1);
--
-- return res;
--}
--
--VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
--{
-- if (count == 0)
-- {
-- return VK_SUCCESS;
-- }
--
-- VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
-- const uint32_t oldTotalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping();
-- m_MappingHysteresis.PostMap();
-- if (oldTotalMapCount != 0)
-- {
-- m_MapCount += count;
-- VMA_ASSERT(m_pMappedData != VMA_NULL);
-- if (ppData != VMA_NULL)
-- {
-- *ppData = m_pMappedData;
-- }
-- return VK_SUCCESS;
-- }
-- else
-- {
-- VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
-- hAllocator->m_hDevice,
-- m_hMemory,
-- 0, // offset
-- VK_WHOLE_SIZE,
-- 0, // flags
-- &m_pMappedData);
-- if (result == VK_SUCCESS)
-- {
-- if (ppData != VMA_NULL)
-- {
-- *ppData = m_pMappedData;
-- }
-- m_MapCount = count;
-- }
-- return result;
-- }
--}
--
--void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
--{
-- if (count == 0)
-- {
-- return;
-- }
--
-- VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
-- if (m_MapCount >= count)
-- {
-- m_MapCount -= count;
-- const uint32_t totalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping();
-- if (totalMapCount == 0)
-- {
-- m_pMappedData = VMA_NULL;
-- (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
-- }
-- m_MappingHysteresis.PostUnmap();
-- }
-- else
-- {
-- VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
-- }
--}
--
--VkResult VmaDeviceMemoryBlock::WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
--{
-- VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
--
-- void* pData;
-- VkResult res = Map(hAllocator, 1, &pData);
-- if (res != VK_SUCCESS)
-- {
-- return res;
-- }
--
-- VmaWriteMagicValue(pData, allocOffset + allocSize);
--
-- Unmap(hAllocator, 1);
-- return VK_SUCCESS;
--}
--
--VkResult VmaDeviceMemoryBlock::ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
--{
-- VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
--
-- void* pData;
-- VkResult res = Map(hAllocator, 1, &pData);
-- if (res != VK_SUCCESS)
-- {
-- return res;
-- }
--
-- if (!VmaValidateMagicValue(pData, allocOffset + allocSize))
-- {
-- VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
-- }
--
-- Unmap(hAllocator, 1);
-- return VK_SUCCESS;
--}
--
--VkResult VmaDeviceMemoryBlock::BindBufferMemory(
-- const VmaAllocator hAllocator,
-- const VmaAllocation hAllocation,
-- VkDeviceSize allocationLocalOffset,
-- VkBuffer hBuffer,
-- const void* pNext)
--{
-- VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
-- hAllocation->GetBlock() == this);
-- VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
-- "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
-- const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
-- // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
-- VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
-- return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
--}
--
--VkResult VmaDeviceMemoryBlock::BindImageMemory(
-- const VmaAllocator hAllocator,
-- const VmaAllocation hAllocation,
-- VkDeviceSize allocationLocalOffset,
-- VkImage hImage,
-- const void* pNext)
--{
-- VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
-- hAllocation->GetBlock() == this);
-- VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
-- "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
-- const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
-- // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
-- VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
-- return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
--}
--#endif // _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS
--
--#ifndef _VMA_ALLOCATION_T_FUNCTIONS
--VmaAllocation_T::VmaAllocation_T(bool mappingAllowed)
-- : m_Alignment{ 1 },
-- m_Size{ 0 },
-- m_pUserData{ VMA_NULL },
-- m_pName{ VMA_NULL },
-- m_MemoryTypeIndex{ 0 },
-- m_Type{ (uint8_t)ALLOCATION_TYPE_NONE },
-- m_SuballocationType{ (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN },
-- m_MapCount{ 0 },
-- m_Flags{ 0 }
--{
-- if(mappingAllowed)
-- m_Flags |= (uint8_t)FLAG_MAPPING_ALLOWED;
--
--#if VMA_STATS_STRING_ENABLED
-- m_BufferImageUsage = 0;
--#endif
--}
--
--VmaAllocation_T::~VmaAllocation_T()
--{
-- VMA_ASSERT(m_MapCount == 0 && "Allocation was not unmapped before destruction.");
--
-- // Check if owned string was freed.
-- VMA_ASSERT(m_pName == VMA_NULL);
--}
--
--void VmaAllocation_T::InitBlockAllocation(
-- VmaDeviceMemoryBlock* block,
-- VmaAllocHandle allocHandle,
-- VkDeviceSize alignment,
-- VkDeviceSize size,
-- uint32_t memoryTypeIndex,
-- VmaSuballocationType suballocationType,
-- bool mapped)
--{
-- VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
-- VMA_ASSERT(block != VMA_NULL);
-- m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
-- m_Alignment = alignment;
-- m_Size = size;
-- m_MemoryTypeIndex = memoryTypeIndex;
-- if(mapped)
-- {
-- VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.");
-- m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP;
-- }
-- m_SuballocationType = (uint8_t)suballocationType;
-- m_BlockAllocation.m_Block = block;
-- m_BlockAllocation.m_AllocHandle = allocHandle;
--}
--
--void VmaAllocation_T::InitDedicatedAllocation(
-- VmaPool hParentPool,
-- uint32_t memoryTypeIndex,
-- VkDeviceMemory hMemory,
-- VmaSuballocationType suballocationType,
-- void* pMappedData,
-- VkDeviceSize size)
--{
-- VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
-- VMA_ASSERT(hMemory != VK_NULL_HANDLE);
-- m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
-- m_Alignment = 0;
-- m_Size = size;
-- m_MemoryTypeIndex = memoryTypeIndex;
-- m_SuballocationType = (uint8_t)suballocationType;
-- if(pMappedData != VMA_NULL)
-- {
-- VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.");
-- m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP;
-- }
-- m_DedicatedAllocation.m_hParentPool = hParentPool;
-- m_DedicatedAllocation.m_hMemory = hMemory;
-- m_DedicatedAllocation.m_pMappedData = pMappedData;
-- m_DedicatedAllocation.m_Prev = VMA_NULL;
-- m_DedicatedAllocation.m_Next = VMA_NULL;
--}
--
--void VmaAllocation_T::SetName(VmaAllocator hAllocator, const char* pName)
--{
-- VMA_ASSERT(pName == VMA_NULL || pName != m_pName);
--
-- FreeName(hAllocator);
--
-- if (pName != VMA_NULL)
-- m_pName = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), pName);
--}
--
--uint8_t VmaAllocation_T::SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation)
--{
-- VMA_ASSERT(allocation != VMA_NULL);
-- VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
-- VMA_ASSERT(allocation->m_Type == ALLOCATION_TYPE_BLOCK);
--
-- if (m_MapCount != 0)
-- m_BlockAllocation.m_Block->Unmap(hAllocator, m_MapCount);
--
-- m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, allocation);
-- VMA_SWAP(m_BlockAllocation, allocation->m_BlockAllocation);
-- m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, this);
--
--#if VMA_STATS_STRING_ENABLED
-- VMA_SWAP(m_BufferImageUsage, allocation->m_BufferImageUsage);
--#endif
-- return m_MapCount;
--}
--
--VmaAllocHandle VmaAllocation_T::GetAllocHandle() const
--{
-- switch (m_Type)
-- {
-- case ALLOCATION_TYPE_BLOCK:
-- return m_BlockAllocation.m_AllocHandle;
-- case ALLOCATION_TYPE_DEDICATED:
-- return VK_NULL_HANDLE;
-- default:
-- VMA_ASSERT(0);
-- return VK_NULL_HANDLE;
-- }
--}
--
--VkDeviceSize VmaAllocation_T::GetOffset() const
--{
-- switch (m_Type)
-- {
-- case ALLOCATION_TYPE_BLOCK:
-- return m_BlockAllocation.m_Block->m_pMetadata->GetAllocationOffset(m_BlockAllocation.m_AllocHandle);
-- case ALLOCATION_TYPE_DEDICATED:
-- return 0;
-- default:
-- VMA_ASSERT(0);
-- return 0;
-- }
--}
--
--VmaPool VmaAllocation_T::GetParentPool() const
--{
-- switch (m_Type)
-- {
-- case ALLOCATION_TYPE_BLOCK:
-- return m_BlockAllocation.m_Block->GetParentPool();
-- case ALLOCATION_TYPE_DEDICATED:
-- return m_DedicatedAllocation.m_hParentPool;
-- default:
-- VMA_ASSERT(0);
-- return VK_NULL_HANDLE;
-- }
--}
--
--VkDeviceMemory VmaAllocation_T::GetMemory() const
--{
-- switch (m_Type)
-- {
-- case ALLOCATION_TYPE_BLOCK:
-- return m_BlockAllocation.m_Block->GetDeviceMemory();
-- case ALLOCATION_TYPE_DEDICATED:
-- return m_DedicatedAllocation.m_hMemory;
-- default:
-- VMA_ASSERT(0);
-- return VK_NULL_HANDLE;
-- }
--}
--
--void* VmaAllocation_T::GetMappedData() const
--{
-- switch (m_Type)
-- {
-- case ALLOCATION_TYPE_BLOCK:
-- if (m_MapCount != 0 || IsPersistentMap())
-- {
-- void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
-- VMA_ASSERT(pBlockData != VMA_NULL);
-- return (char*)pBlockData + GetOffset();
-- }
-- else
-- {
-- return VMA_NULL;
-- }
-- break;
-- case ALLOCATION_TYPE_DEDICATED:
-- VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0 || IsPersistentMap()));
-- return m_DedicatedAllocation.m_pMappedData;
-- default:
-- VMA_ASSERT(0);
-- return VMA_NULL;
-- }
--}
--
--void VmaAllocation_T::BlockAllocMap()
--{
-- VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
-- VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.");
--
-- if (m_MapCount < 0xFF)
-- {
-- ++m_MapCount;
-- }
-- else
-- {
-- VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
-- }
--}
--
--void VmaAllocation_T::BlockAllocUnmap()
--{
-- VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
--
-- if (m_MapCount > 0)
-- {
-- --m_MapCount;
-- }
-- else
-- {
-- VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
-- }
--}
--
--VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
--{
-- VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
-- VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.");
--
-- if (m_MapCount != 0 || IsPersistentMap())
-- {
-- if (m_MapCount < 0xFF)
-- {
-- VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
-- *ppData = m_DedicatedAllocation.m_pMappedData;
-- ++m_MapCount;
-- return VK_SUCCESS;
-- }
-- else
-- {
-- VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
-- return VK_ERROR_MEMORY_MAP_FAILED;
-- }
-- }
-- else
-- {
-- VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
-- hAllocator->m_hDevice,
-- m_DedicatedAllocation.m_hMemory,
-- 0, // offset
-- VK_WHOLE_SIZE,
-- 0, // flags
-- ppData);
-- if (result == VK_SUCCESS)
-- {
-- m_DedicatedAllocation.m_pMappedData = *ppData;
-- m_MapCount = 1;
-- }
-- return result;
-- }
--}
--
--void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
--{
-- VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
--
-- if (m_MapCount > 0)
-- {
-- --m_MapCount;
-- if (m_MapCount == 0 && !IsPersistentMap())
-- {
-- m_DedicatedAllocation.m_pMappedData = VMA_NULL;
-- (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
-- hAllocator->m_hDevice,
-- m_DedicatedAllocation.m_hMemory);
-- }
-- }
-- else
-- {
-- VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
-- }
--}
--
--#if VMA_STATS_STRING_ENABLED
--void VmaAllocation_T::InitBufferImageUsage(uint32_t bufferImageUsage)
--{
-- VMA_ASSERT(m_BufferImageUsage == 0);
-- m_BufferImageUsage = bufferImageUsage;
--}
--
--void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
--{
-- json.WriteString("Type");
-- json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
--
-- json.WriteString("Size");
-- json.WriteNumber(m_Size);
-- json.WriteString("Usage");
-- json.WriteNumber(m_BufferImageUsage);
--
-- if (m_pUserData != VMA_NULL)
-- {
-- json.WriteString("CustomData");
-- json.BeginString();
-- json.ContinueString_Pointer(m_pUserData);
-- json.EndString();
-- }
-- if (m_pName != VMA_NULL)
-- {
-- json.WriteString("Name");
-- json.WriteString(m_pName);
-- }
--}
--#endif // VMA_STATS_STRING_ENABLED
--
--void VmaAllocation_T::FreeName(VmaAllocator hAllocator)
--{
-- if(m_pName)
-- {
-- VmaFreeString(hAllocator->GetAllocationCallbacks(), m_pName);
-- m_pName = VMA_NULL;
-- }
--}
--#endif // _VMA_ALLOCATION_T_FUNCTIONS
--
--#ifndef _VMA_BLOCK_VECTOR_FUNCTIONS
--VmaBlockVector::VmaBlockVector(
-- VmaAllocator hAllocator,
-- VmaPool hParentPool,
-- uint32_t memoryTypeIndex,
-- VkDeviceSize preferredBlockSize,
-- size_t minBlockCount,
-- size_t maxBlockCount,
-- VkDeviceSize bufferImageGranularity,
-- bool explicitBlockSize,
-- uint32_t algorithm,
-- float priority,
-- VkDeviceSize minAllocationAlignment,
-- void* pMemoryAllocateNext)
-- : m_hAllocator(hAllocator),
-- m_hParentPool(hParentPool),
-- m_MemoryTypeIndex(memoryTypeIndex),
-- m_PreferredBlockSize(preferredBlockSize),
-- m_MinBlockCount(minBlockCount),
-- m_MaxBlockCount(maxBlockCount),
-- m_BufferImageGranularity(bufferImageGranularity),
-- m_ExplicitBlockSize(explicitBlockSize),
-- m_Algorithm(algorithm),
-- m_Priority(priority),
-- m_MinAllocationAlignment(minAllocationAlignment),
-- m_pMemoryAllocateNext(pMemoryAllocateNext),
-- m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
-- m_NextBlockId(0) {}
--
--VmaBlockVector::~VmaBlockVector()
--{
-- for (size_t i = m_Blocks.size(); i--; )
-- {
-- m_Blocks[i]->Destroy(m_hAllocator);
-- vma_delete(m_hAllocator, m_Blocks[i]);
-- }
--}
--
--VkResult VmaBlockVector::CreateMinBlocks()
--{
-- for (size_t i = 0; i < m_MinBlockCount; ++i)
-- {
-- VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
-- if (res != VK_SUCCESS)
-- {
-- return res;
-- }
-- }
-- return VK_SUCCESS;
--}
--
--void VmaBlockVector::AddStatistics(VmaStatistics& inoutStats)
--{
-- VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
--
-- const size_t blockCount = m_Blocks.size();
-- for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
-- {
-- const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
-- VMA_ASSERT(pBlock);
-- VMA_HEAVY_ASSERT(pBlock->Validate());
-- pBlock->m_pMetadata->AddStatistics(inoutStats);
-- }
--}
--
--void VmaBlockVector::AddDetailedStatistics(VmaDetailedStatistics& inoutStats)
--{
-- VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
--
-- const size_t blockCount = m_Blocks.size();
-- for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
-- {
-- const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
-- VMA_ASSERT(pBlock);
-- VMA_HEAVY_ASSERT(pBlock->Validate());
-- pBlock->m_pMetadata->AddDetailedStatistics(inoutStats);
-- }
--}
--
--bool VmaBlockVector::IsEmpty()
--{
-- VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
-- return m_Blocks.empty();
--}
--
--bool VmaBlockVector::IsCorruptionDetectionEnabled() const
--{
-- const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
-- return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
-- (VMA_DEBUG_MARGIN > 0) &&
-- (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
-- (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
--}
--
--VkResult VmaBlockVector::Allocate(
-- VkDeviceSize size,
-- VkDeviceSize alignment,
-- const VmaAllocationCreateInfo& createInfo,
-- VmaSuballocationType suballocType,
-- size_t allocationCount,
-- VmaAllocation* pAllocations)
--{
-- size_t allocIndex;
-- VkResult res = VK_SUCCESS;
--
-- alignment = VMA_MAX(alignment, m_MinAllocationAlignment);
--
-- if (IsCorruptionDetectionEnabled())
-- {
-- size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
-- alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
-- }
--
-- {
-- VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
-- for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
-- {
-- res = AllocatePage(
-- size,
-- alignment,
-- createInfo,
-- suballocType,
-- pAllocations + allocIndex);
-- if (res != VK_SUCCESS)
-- {
-- break;
-- }
-- }
-- }
--
-- if (res != VK_SUCCESS)
-- {
-- // Free all already created allocations.
-- while (allocIndex--)
-- Free(pAllocations[allocIndex]);
-- memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
-- }
--
-- return res;
--}
--
--VkResult VmaBlockVector::AllocatePage(
-- VkDeviceSize size,
-- VkDeviceSize alignment,
-- const VmaAllocationCreateInfo& createInfo,
-- VmaSuballocationType suballocType,
-- VmaAllocation* pAllocation)
--{
-- const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
--
-- VkDeviceSize freeMemory;
-- {
-- const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
-- VmaBudget heapBudget = {};
-- m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1);
-- freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0;
-- }
--
-- const bool canFallbackToDedicated = !HasExplicitBlockSize() &&
-- (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0;
-- const bool canCreateNewBlock =
-- ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
-- (m_Blocks.size() < m_MaxBlockCount) &&
-- (freeMemory >= size || !canFallbackToDedicated);
-- uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
--
-- // Upper address can only be used with linear allocator and within single memory block.
-- if (isUpperAddress &&
-- (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
-- {
-- return VK_ERROR_FEATURE_NOT_PRESENT;
-- }
--
-- // Early reject: requested allocation size is larger that maximum block size for this block vector.
-- if (size + VMA_DEBUG_MARGIN > m_PreferredBlockSize)
-- {
-- return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-- }
--
-- // 1. Search existing allocations. Try to allocate.
-- if (m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
-- {
-- // Use only last block.
-- if (!m_Blocks.empty())
-- {
-- VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
-- VMA_ASSERT(pCurrBlock);
-- VkResult res = AllocateFromBlock(
-- pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
-- if (res == VK_SUCCESS)
-- {
-- VMA_DEBUG_LOG_FORMAT(" Returned from last block #%u", pCurrBlock->GetId());
-- IncrementallySortBlocks();
-- return VK_SUCCESS;
-- }
-- }
-- }
-- else
-- {
-- if (strategy != VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT) // MIN_MEMORY or default
-- {
-- const bool isHostVisible =
-- (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
-- if(isHostVisible)
-- {
-- const bool isMappingAllowed = (createInfo.flags &
-- (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0;
-- /*
-- For non-mappable allocations, check blocks that are not mapped first.
-- For mappable allocations, check blocks that are already mapped first.
-- This way, having many blocks, we will separate mappable and non-mappable allocations,
-- hopefully limiting the number of blocks that are mapped, which will help tools like RenderDoc.
-- */
-- for(size_t mappingI = 0; mappingI < 2; ++mappingI)
-- {
-- // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
-- for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
-- {
-- VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
-- VMA_ASSERT(pCurrBlock);
-- const bool isBlockMapped = pCurrBlock->GetMappedData() != VMA_NULL;
-- if((mappingI == 0) == (isMappingAllowed == isBlockMapped))
-- {
-- VkResult res = AllocateFromBlock(
-- pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
-- if (res == VK_SUCCESS)
-- {
-- VMA_DEBUG_LOG_FORMAT(" Returned from existing block #%u", pCurrBlock->GetId());
-- IncrementallySortBlocks();
-- return VK_SUCCESS;
-- }
-- }
-- }
-- }
-- }
-- else
-- {
-- // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
-- for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
-- {
-- VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
-- VMA_ASSERT(pCurrBlock);
-- VkResult res = AllocateFromBlock(
-- pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
-- if (res == VK_SUCCESS)
-- {
-- VMA_DEBUG_LOG_FORMAT(" Returned from existing block #%u", pCurrBlock->GetId());
-- IncrementallySortBlocks();
-- return VK_SUCCESS;
-- }
-- }
-- }
-- }
-- else // VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
-- {
-- // Backward order in m_Blocks - prefer blocks with largest amount of free space.
-- for (size_t blockIndex = m_Blocks.size(); blockIndex--; )
-- {
-- VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
-- VMA_ASSERT(pCurrBlock);
-- VkResult res = AllocateFromBlock(pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
-- if (res == VK_SUCCESS)
-- {
-- VMA_DEBUG_LOG_FORMAT(" Returned from existing block #%u", pCurrBlock->GetId());
-- IncrementallySortBlocks();
-- return VK_SUCCESS;
-- }
-- }
-- }
-- }
--
-- // 2. Try to create new block.
-- if (canCreateNewBlock)
-- {
-- // Calculate optimal size for new block.
-- VkDeviceSize newBlockSize = m_PreferredBlockSize;
-- uint32_t newBlockSizeShift = 0;
-- const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
--
-- if (!m_ExplicitBlockSize)
-- {
-- // Allocate 1/8, 1/4, 1/2 as first blocks.
-- const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
-- for (uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
-- {
-- const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
-- if (smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
-- {
-- newBlockSize = smallerNewBlockSize;
-- ++newBlockSizeShift;
-- }
-- else
-- {
-- break;
-- }
-- }
-- }
--
-- size_t newBlockIndex = 0;
-- VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
-- CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
-- // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
-- if (!m_ExplicitBlockSize)
-- {
-- while (res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
-- {
-- const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
-- if (smallerNewBlockSize >= size)
-- {
-- newBlockSize = smallerNewBlockSize;
-- ++newBlockSizeShift;
-- res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
-- CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
-- }
-- else
-- {
-- break;
-- }
-- }
-- }
--
-- if (res == VK_SUCCESS)
-- {
-- VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
-- VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
--
-- res = AllocateFromBlock(
-- pBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
-- if (res == VK_SUCCESS)
-- {
-- VMA_DEBUG_LOG_FORMAT(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize);
-- IncrementallySortBlocks();
-- return VK_SUCCESS;
-- }
-- else
-- {
-- // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
-- return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-- }
-- }
-- }
--
-- return VK_ERROR_OUT_OF_DEVICE_MEMORY;
--}
--
--void VmaBlockVector::Free(const VmaAllocation hAllocation)
--{
-- VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
--
-- bool budgetExceeded = false;
-- {
-- const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
-- VmaBudget heapBudget = {};
-- m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1);
-- budgetExceeded = heapBudget.usage >= heapBudget.budget;
-- }
--
-- // Scope for lock.
-- {
-- VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
--
-- VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
--
-- if (IsCorruptionDetectionEnabled())
-- {
-- VkResult res = pBlock->ValidateMagicValueAfterAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
-- VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
-- }
--
-- if (hAllocation->IsPersistentMap())
-- {
-- pBlock->Unmap(m_hAllocator, 1);
-- }
--
-- const bool hadEmptyBlockBeforeFree = HasEmptyBlock();
-- pBlock->m_pMetadata->Free(hAllocation->GetAllocHandle());
-- pBlock->PostFree(m_hAllocator);
-- VMA_HEAVY_ASSERT(pBlock->Validate());
--
-- VMA_DEBUG_LOG_FORMAT(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
--
-- const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount;
-- // pBlock became empty after this deallocation.
-- if (pBlock->m_pMetadata->IsEmpty())
-- {
-- // Already had empty block. We don't want to have two, so delete this one.
-- if ((hadEmptyBlockBeforeFree || budgetExceeded) && canDeleteBlock)
-- {
-- pBlockToDelete = pBlock;
-- Remove(pBlock);
-- }
-- // else: We now have one empty block - leave it. A hysteresis to avoid allocating whole block back and forth.
-- }
-- // pBlock didn't become empty, but we have another empty block - find and free that one.
-- // (This is optional, heuristics.)
-- else if (hadEmptyBlockBeforeFree && canDeleteBlock)
-- {
-- VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
-- if (pLastBlock->m_pMetadata->IsEmpty())
-- {
-- pBlockToDelete = pLastBlock;
-- m_Blocks.pop_back();
-- }
-- }
--
-- IncrementallySortBlocks();
-- }
--
-- // Destruction of a free block. Deferred until this point, outside of mutex
-- // lock, for performance reason.
-- if (pBlockToDelete != VMA_NULL)
-- {
-- VMA_DEBUG_LOG_FORMAT(" Deleted empty block #%u", pBlockToDelete->GetId());
-- pBlockToDelete->Destroy(m_hAllocator);
-- vma_delete(m_hAllocator, pBlockToDelete);
-- }
--
-- m_hAllocator->m_Budget.RemoveAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), hAllocation->GetSize());
-- m_hAllocator->m_AllocationObjectAllocator.Free(hAllocation);
--}
--
--VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
--{
-- VkDeviceSize result = 0;
-- for (size_t i = m_Blocks.size(); i--; )
-- {
-- result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
-- if (result >= m_PreferredBlockSize)
-- {
-- break;
-- }
-- }
-- return result;
--}
--
--void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
--{
-- for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
-- {
-- if (m_Blocks[blockIndex] == pBlock)
-- {
-- VmaVectorRemove(m_Blocks, blockIndex);
-- return;
-- }
-- }
-- VMA_ASSERT(0);
--}
--
--void VmaBlockVector::IncrementallySortBlocks()
--{
-- if (!m_IncrementalSort)
-- return;
-- if (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
-- {
-- // Bubble sort only until first swap.
-- for (size_t i = 1; i < m_Blocks.size(); ++i)
-- {
-- if (m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
-- {
-- VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
-- return;
-- }
-- }
-- }
--}
--
--void VmaBlockVector::SortByFreeSize()
--{
-- VMA_SORT(m_Blocks.begin(), m_Blocks.end(),
-- [](VmaDeviceMemoryBlock* b1, VmaDeviceMemoryBlock* b2) -> bool
-- {
-- return b1->m_pMetadata->GetSumFreeSize() < b2->m_pMetadata->GetSumFreeSize();
-- });
--}
--
--VkResult VmaBlockVector::AllocateFromBlock(
-- VmaDeviceMemoryBlock* pBlock,
-- VkDeviceSize size,
-- VkDeviceSize alignment,
-- VmaAllocationCreateFlags allocFlags,
-- void* pUserData,
-- VmaSuballocationType suballocType,
-- uint32_t strategy,
-- VmaAllocation* pAllocation)
--{
-- const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
--
-- VmaAllocationRequest currRequest = {};
-- if (pBlock->m_pMetadata->CreateAllocationRequest(
-- size,
-- alignment,
-- isUpperAddress,
-- suballocType,
-- strategy,
-- &currRequest))
-- {
-- return CommitAllocationRequest(currRequest, pBlock, alignment, allocFlags, pUserData, suballocType, pAllocation);
-- }
-- return VK_ERROR_OUT_OF_DEVICE_MEMORY;
--}
--
--VkResult VmaBlockVector::CommitAllocationRequest(
-- VmaAllocationRequest& allocRequest,
-- VmaDeviceMemoryBlock* pBlock,
-- VkDeviceSize alignment,
-- VmaAllocationCreateFlags allocFlags,
-- void* pUserData,
-- VmaSuballocationType suballocType,
-- VmaAllocation* pAllocation)
--{
-- const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
-- const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
-- const bool isMappingAllowed = (allocFlags &
-- (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0;
--
-- pBlock->PostAlloc(m_hAllocator);
-- // Allocate from pCurrBlock.
-- if (mapped)
-- {
-- VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
-- if (res != VK_SUCCESS)
-- {
-- return res;
-- }
-- }
--
-- *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(isMappingAllowed);
-- pBlock->m_pMetadata->Alloc(allocRequest, suballocType, *pAllocation);
-- (*pAllocation)->InitBlockAllocation(
-- pBlock,
-- allocRequest.allocHandle,
-- alignment,
-- allocRequest.size, // Not size, as actual allocation size may be larger than requested!
-- m_MemoryTypeIndex,
-- suballocType,
-- mapped);
-- VMA_HEAVY_ASSERT(pBlock->Validate());
-- if (isUserDataString)
-- (*pAllocation)->SetName(m_hAllocator, (const char*)pUserData);
-- else
-- (*pAllocation)->SetUserData(m_hAllocator, pUserData);
-- m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), allocRequest.size);
-- if (VMA_DEBUG_INITIALIZE_ALLOCATIONS)
-- {
-- m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
-- }
-- if (IsCorruptionDetectionEnabled())
-- {
-- VkResult res = pBlock->WriteMagicValueAfterAllocation(m_hAllocator, (*pAllocation)->GetOffset(), allocRequest.size);
-- VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
-- }
-- return VK_SUCCESS;
--}
--
--VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
--{
-- VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
-- allocInfo.pNext = m_pMemoryAllocateNext;
-- allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
-- allocInfo.allocationSize = blockSize;
--
--#if VMA_BUFFER_DEVICE_ADDRESS
-- // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature.
-- VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
-- if (m_hAllocator->m_UseKhrBufferDeviceAddress)
-- {
-- allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
-- VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
-- }
--#endif // VMA_BUFFER_DEVICE_ADDRESS
--
--#if VMA_MEMORY_PRIORITY
-- VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };
-- if (m_hAllocator->m_UseExtMemoryPriority)
-- {
-- VMA_ASSERT(m_Priority >= 0.f && m_Priority <= 1.f);
-- priorityInfo.priority = m_Priority;
-- VmaPnextChainPushFront(&allocInfo, &priorityInfo);
-- }
--#endif // VMA_MEMORY_PRIORITY
--
--#if VMA_EXTERNAL_MEMORY
-- // Attach VkExportMemoryAllocateInfoKHR if necessary.
-- VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR };
-- exportMemoryAllocInfo.handleTypes = m_hAllocator->GetExternalMemoryHandleTypeFlags(m_MemoryTypeIndex);
-- if (exportMemoryAllocInfo.handleTypes != 0)
-- {
-- VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo);
-- }
--#endif // VMA_EXTERNAL_MEMORY
--
-- VkDeviceMemory mem = VK_NULL_HANDLE;
-- VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
-- if (res < 0)
-- {
-- return res;
-- }
--
-- // New VkDeviceMemory successfully created.
--
-- // Create new Allocation for it.
-- VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
-- pBlock->Init(
-- m_hAllocator,
-- m_hParentPool,
-- m_MemoryTypeIndex,
-- mem,
-- allocInfo.allocationSize,
-- m_NextBlockId++,
-- m_Algorithm,
-- m_BufferImageGranularity);
--
-- m_Blocks.push_back(pBlock);
-- if (pNewBlockIndex != VMA_NULL)
-- {
-- *pNewBlockIndex = m_Blocks.size() - 1;
-- }
--
-- return VK_SUCCESS;
--}
--
--bool VmaBlockVector::HasEmptyBlock()
--{
-- for (size_t index = 0, count = m_Blocks.size(); index < count; ++index)
-- {
-- VmaDeviceMemoryBlock* const pBlock = m_Blocks[index];
-- if (pBlock->m_pMetadata->IsEmpty())
-- {
-- return true;
-- }
-- }
-- return false;
--}
--
--#if VMA_STATS_STRING_ENABLED
--void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
--{
-- VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
--
--
-- json.BeginObject();
-- for (size_t i = 0; i < m_Blocks.size(); ++i)
-- {
-- json.BeginString();
-- json.ContinueString(m_Blocks[i]->GetId());
-- json.EndString();
--
-- json.BeginObject();
-- json.WriteString("MapRefCount");
-- json.WriteNumber(m_Blocks[i]->GetMapRefCount());
--
-- m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
-- json.EndObject();
-- }
-- json.EndObject();
--}
--#endif // VMA_STATS_STRING_ENABLED
--
--VkResult VmaBlockVector::CheckCorruption()
--{
-- if (!IsCorruptionDetectionEnabled())
-- {
-- return VK_ERROR_FEATURE_NOT_PRESENT;
-- }
--
-- VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
-- for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
-- {
-- VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
-- VMA_ASSERT(pBlock);
-- VkResult res = pBlock->CheckCorruption(m_hAllocator);
-- if (res != VK_SUCCESS)
-- {
-- return res;
-- }
-- }
-- return VK_SUCCESS;
--}
--
--#endif // _VMA_BLOCK_VECTOR_FUNCTIONS
--
--#ifndef _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS
--VmaDefragmentationContext_T::VmaDefragmentationContext_T(
-- VmaAllocator hAllocator,
-- const VmaDefragmentationInfo& info)
-- : m_MaxPassBytes(info.maxBytesPerPass == 0 ? VK_WHOLE_SIZE : info.maxBytesPerPass),
-- m_MaxPassAllocations(info.maxAllocationsPerPass == 0 ? UINT32_MAX : info.maxAllocationsPerPass),
-- m_BreakCallback(info.pfnBreakCallback),
-- m_BreakCallbackUserData(info.pBreakCallbackUserData),
-- m_MoveAllocator(hAllocator->GetAllocationCallbacks()),
-- m_Moves(m_MoveAllocator)
--{
-- m_Algorithm = info.flags & VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK;
--
-- if (info.pool != VMA_NULL)
-- {
-- m_BlockVectorCount = 1;
-- m_PoolBlockVector = &info.pool->m_BlockVector;
-- m_pBlockVectors = &m_PoolBlockVector;
-- m_PoolBlockVector->SetIncrementalSort(false);
-- m_PoolBlockVector->SortByFreeSize();
-- }
-- else
-- {
-- m_BlockVectorCount = hAllocator->GetMemoryTypeCount();
-- m_PoolBlockVector = VMA_NULL;
-- m_pBlockVectors = hAllocator->m_pBlockVectors;
-- for (uint32_t i = 0; i < m_BlockVectorCount; ++i)
-- {
-- VmaBlockVector* vector = m_pBlockVectors[i];
-- if (vector != VMA_NULL)
-- {
-- vector->SetIncrementalSort(false);
-- vector->SortByFreeSize();
-- }
-- }
-- }
--
-- switch (m_Algorithm)
-- {
-- case 0: // Default algorithm
-- m_Algorithm = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT;
-- m_AlgorithmState = vma_new_array(hAllocator, StateBalanced, m_BlockVectorCount);
-- break;
-- case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT:
-- m_AlgorithmState = vma_new_array(hAllocator, StateBalanced, m_BlockVectorCount);
-- break;
-- case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT:
-- if (hAllocator->GetBufferImageGranularity() > 1)
-- {
-- m_AlgorithmState = vma_new_array(hAllocator, StateExtensive, m_BlockVectorCount);
-- }
-- break;
-- }
--}
--
--VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
--{
-- if (m_PoolBlockVector != VMA_NULL)
-- {
-- m_PoolBlockVector->SetIncrementalSort(true);
-- }
-- else
-- {
-- for (uint32_t i = 0; i < m_BlockVectorCount; ++i)
-- {
-- VmaBlockVector* vector = m_pBlockVectors[i];
-- if (vector != VMA_NULL)
-- vector->SetIncrementalSort(true);
-- }
-- }
--
-- if (m_AlgorithmState)
-- {
-- switch (m_Algorithm)
-- {
-- case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT:
-- vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast<StateBalanced*>(m_AlgorithmState), m_BlockVectorCount);
-- break;
-- case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT:
-- vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast<StateExtensive*>(m_AlgorithmState), m_BlockVectorCount);
-- break;
-- default:
-- VMA_ASSERT(0);
-- }
-- }
--}
--
--VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo)
--{
-- if (m_PoolBlockVector != VMA_NULL)
-- {
-- VmaMutexLockWrite lock(m_PoolBlockVector->GetMutex(), m_PoolBlockVector->GetAllocator()->m_UseMutex);
--
-- if (m_PoolBlockVector->GetBlockCount() > 1)
-- ComputeDefragmentation(*m_PoolBlockVector, 0);
-- else if (m_PoolBlockVector->GetBlockCount() == 1)
-- ReallocWithinBlock(*m_PoolBlockVector, m_PoolBlockVector->GetBlock(0));
-- }
-- else
-- {
-- for (uint32_t i = 0; i < m_BlockVectorCount; ++i)
-- {
-- if (m_pBlockVectors[i] != VMA_NULL)
-- {
-- VmaMutexLockWrite lock(m_pBlockVectors[i]->GetMutex(), m_pBlockVectors[i]->GetAllocator()->m_UseMutex);
--
-- if (m_pBlockVectors[i]->GetBlockCount() > 1)
-- {
-- if (ComputeDefragmentation(*m_pBlockVectors[i], i))
-- break;
-- }
-- else if (m_pBlockVectors[i]->GetBlockCount() == 1)
-- {
-- if (ReallocWithinBlock(*m_pBlockVectors[i], m_pBlockVectors[i]->GetBlock(0)))
-- break;
-- }
-- }
-- }
-- }
--
-- moveInfo.moveCount = static_cast<uint32_t>(m_Moves.size());
-- if (moveInfo.moveCount > 0)
-- {
-- moveInfo.pMoves = m_Moves.data();
-- return VK_INCOMPLETE;
-- }
--
-- moveInfo.pMoves = VMA_NULL;
-- return VK_SUCCESS;
--}
--
--VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo)
--{
-- VMA_ASSERT(moveInfo.moveCount > 0 ? moveInfo.pMoves != VMA_NULL : true);
--
-- VkResult result = VK_SUCCESS;
-- VmaStlAllocator<FragmentedBlock> blockAllocator(m_MoveAllocator.m_pCallbacks);
-- VmaVector<FragmentedBlock, VmaStlAllocator<FragmentedBlock>> immovableBlocks(blockAllocator);
-- VmaVector<FragmentedBlock, VmaStlAllocator<FragmentedBlock>> mappedBlocks(blockAllocator);
--
-- VmaAllocator allocator = VMA_NULL;
-- for (uint32_t i = 0; i < moveInfo.moveCount; ++i)
-- {
-- VmaDefragmentationMove& move = moveInfo.pMoves[i];
-- size_t prevCount = 0, currentCount = 0;
-- VkDeviceSize freedBlockSize = 0;
--
-- uint32_t vectorIndex;
-- VmaBlockVector* vector;
-- if (m_PoolBlockVector != VMA_NULL)
-- {
-- vectorIndex = 0;
-- vector = m_PoolBlockVector;
-- }
-- else
-- {
-- vectorIndex = move.srcAllocation->GetMemoryTypeIndex();
-- vector = m_pBlockVectors[vectorIndex];
-- VMA_ASSERT(vector != VMA_NULL);
-- }
--
-- switch (move.operation)
-- {
-- case VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY:
-- {
-- uint8_t mapCount = move.srcAllocation->SwapBlockAllocation(vector->m_hAllocator, move.dstTmpAllocation);
-- if (mapCount > 0)
-- {
-- allocator = vector->m_hAllocator;
-- VmaDeviceMemoryBlock* newMapBlock = move.srcAllocation->GetBlock();
-- bool notPresent = true;
-- for (FragmentedBlock& block : mappedBlocks)
-- {
-- if (block.block == newMapBlock)
-- {
-- notPresent = false;
-- block.data += mapCount;
-- break;
-- }
-- }
-- if (notPresent)
-- mappedBlocks.push_back({ mapCount, newMapBlock });
-- }
--
-- // Scope for locks, Free have it's own lock
-- {
-- VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
-- prevCount = vector->GetBlockCount();
-- freedBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize();
-- }
-- vector->Free(move.dstTmpAllocation);
-- {
-- VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
-- currentCount = vector->GetBlockCount();
-- }
--
-- result = VK_INCOMPLETE;
-- break;
-- }
-- case VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE:
-- {
-- m_PassStats.bytesMoved -= move.srcAllocation->GetSize();
-- --m_PassStats.allocationsMoved;
-- vector->Free(move.dstTmpAllocation);
--
-- VmaDeviceMemoryBlock* newBlock = move.srcAllocation->GetBlock();
-- bool notPresent = true;
-- for (const FragmentedBlock& block : immovableBlocks)
-- {
-- if (block.block == newBlock)
-- {
-- notPresent = false;
-- break;
-- }
-- }
-- if (notPresent)
-- immovableBlocks.push_back({ vectorIndex, newBlock });
-- break;
-- }
-- case VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY:
-- {
-- m_PassStats.bytesMoved -= move.srcAllocation->GetSize();
-- --m_PassStats.allocationsMoved;
-- // Scope for locks, Free have it's own lock
-- {
-- VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
-- prevCount = vector->GetBlockCount();
-- freedBlockSize = move.srcAllocation->GetBlock()->m_pMetadata->GetSize();
-- }
-- vector->Free(move.srcAllocation);
-- {
-- VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
-- currentCount = vector->GetBlockCount();
-- }
-- freedBlockSize *= prevCount - currentCount;
--
-- VkDeviceSize dstBlockSize;
-- {
-- VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
-- dstBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize();
-- }
-- vector->Free(move.dstTmpAllocation);
-- {
-- VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
-- freedBlockSize += dstBlockSize * (currentCount - vector->GetBlockCount());
-- currentCount = vector->GetBlockCount();
-- }
--
-- result = VK_INCOMPLETE;
-- break;
-- }
-- default:
-- VMA_ASSERT(0);
-- }
--
-- if (prevCount > currentCount)
-- {
-- size_t freedBlocks = prevCount - currentCount;
-- m_PassStats.deviceMemoryBlocksFreed += static_cast<uint32_t>(freedBlocks);
-- m_PassStats.bytesFreed += freedBlockSize;
-- }
--
-- if(m_Algorithm == VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT &&
-- m_AlgorithmState != VMA_NULL)
-- {
-- // Avoid unnecessary tries to allocate when new free block is available
-- StateExtensive& state = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[vectorIndex];
-- if (state.firstFreeBlock != SIZE_MAX)
-- {
-- const size_t diff = prevCount - currentCount;
-- if (state.firstFreeBlock >= diff)
-- {
-- state.firstFreeBlock -= diff;
-- if (state.firstFreeBlock != 0)
-- state.firstFreeBlock -= vector->GetBlock(state.firstFreeBlock - 1)->m_pMetadata->IsEmpty();
-- }
-- else
-- state.firstFreeBlock = 0;
-- }
-- }
-- }
-- moveInfo.moveCount = 0;
-- moveInfo.pMoves = VMA_NULL;
-- m_Moves.clear();
--
-- // Update stats
-- m_GlobalStats.allocationsMoved += m_PassStats.allocationsMoved;
-- m_GlobalStats.bytesFreed += m_PassStats.bytesFreed;
-- m_GlobalStats.bytesMoved += m_PassStats.bytesMoved;
-- m_GlobalStats.deviceMemoryBlocksFreed += m_PassStats.deviceMemoryBlocksFreed;
-- m_PassStats = { 0 };
--
-- // Move blocks with immovable allocations according to algorithm
-- if (immovableBlocks.size() > 0)
-- {
-- do
-- {
-- if(m_Algorithm == VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT)
-- {
-- if (m_AlgorithmState != VMA_NULL)
-- {
-- bool swapped = false;
-- // Move to the start of free blocks range
-- for (const FragmentedBlock& block : immovableBlocks)
-- {
-- StateExtensive& state = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[block.data];
-- if (state.operation != StateExtensive::Operation::Cleanup)
-- {
-- VmaBlockVector* vector = m_pBlockVectors[block.data];
-- VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
--
-- for (size_t i = 0, count = vector->GetBlockCount() - m_ImmovableBlockCount; i < count; ++i)
-- {
-- if (vector->GetBlock(i) == block.block)
-- {
-- VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[vector->GetBlockCount() - ++m_ImmovableBlockCount]);
-- if (state.firstFreeBlock != SIZE_MAX)
-- {
-- if (i + 1 < state.firstFreeBlock)
-- {
-- if (state.firstFreeBlock > 1)
-- VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[--state.firstFreeBlock]);
-- else
-- --state.firstFreeBlock;
-- }
-- }
-- swapped = true;
-- break;
-- }
-- }
-- }
-- }
-- if (swapped)
-- result = VK_INCOMPLETE;
-- break;
-- }
-- }
--
-- // Move to the beginning
-- for (const FragmentedBlock& block : immovableBlocks)
-- {
-- VmaBlockVector* vector = m_pBlockVectors[block.data];
-- VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
--
-- for (size_t i = m_ImmovableBlockCount; i < vector->GetBlockCount(); ++i)
-- {
-- if (vector->GetBlock(i) == block.block)
-- {
-- VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[m_ImmovableBlockCount++]);
-- break;
-- }
-- }
-- }
-- } while (false);
-- }
--
-- // Bulk-map destination blocks
-- for (const FragmentedBlock& block : mappedBlocks)
-- {
-- VkResult res = block.block->Map(allocator, block.data, VMA_NULL);
-- VMA_ASSERT(res == VK_SUCCESS);
-- }
-- return result;
--}
--
--bool VmaDefragmentationContext_T::ComputeDefragmentation(VmaBlockVector& vector, size_t index)
--{
-- switch (m_Algorithm)
-- {
-- case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT:
-- return ComputeDefragmentation_Fast(vector);
-- case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT:
-- return ComputeDefragmentation_Balanced(vector, index, true);
-- case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT:
-- return ComputeDefragmentation_Full(vector);
-- case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT:
-- return ComputeDefragmentation_Extensive(vector, index);
-- default:
-- VMA_ASSERT(0);
-- return ComputeDefragmentation_Balanced(vector, index, true);
-- }
--}
--
--VmaDefragmentationContext_T::MoveAllocationData VmaDefragmentationContext_T::GetMoveData(
-- VmaAllocHandle handle, VmaBlockMetadata* metadata)
--{
-- MoveAllocationData moveData;
-- moveData.move.srcAllocation = (VmaAllocation)metadata->GetAllocationUserData(handle);
-- moveData.size = moveData.move.srcAllocation->GetSize();
-- moveData.alignment = moveData.move.srcAllocation->GetAlignment();
-- moveData.type = moveData.move.srcAllocation->GetSuballocationType();
-- moveData.flags = 0;
--
-- if (moveData.move.srcAllocation->IsPersistentMap())
-- moveData.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT;
-- if (moveData.move.srcAllocation->IsMappingAllowed())
-- moveData.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT;
--
-- return moveData;
--}
--
--VmaDefragmentationContext_T::CounterStatus VmaDefragmentationContext_T::CheckCounters(VkDeviceSize bytes)
--{
-- // Check custom criteria if exists
-- if (m_BreakCallback && m_BreakCallback(m_BreakCallbackUserData))
-- return CounterStatus::End;
--
-- // Ignore allocation if will exceed max size for copy
-- if (m_PassStats.bytesMoved + bytes > m_MaxPassBytes)
-- {
-- if (++m_IgnoredAllocs < MAX_ALLOCS_TO_IGNORE)
-- return CounterStatus::Ignore;
-- else
-- return CounterStatus::End;
-- }
-- else
-- m_IgnoredAllocs = 0;
-- return CounterStatus::Pass;
--}
--
--bool VmaDefragmentationContext_T::IncrementCounters(VkDeviceSize bytes)
--{
-- m_PassStats.bytesMoved += bytes;
-- // Early return when max found
-- if (++m_PassStats.allocationsMoved >= m_MaxPassAllocations || m_PassStats.bytesMoved >= m_MaxPassBytes)
-- {
-- VMA_ASSERT((m_PassStats.allocationsMoved == m_MaxPassAllocations ||
-- m_PassStats.bytesMoved == m_MaxPassBytes) && "Exceeded maximal pass threshold!");
-- return true;
-- }
-- return false;
--}
--
--bool VmaDefragmentationContext_T::ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block)
--{
-- VmaBlockMetadata* metadata = block->m_pMetadata;
--
-- for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
-- handle != VK_NULL_HANDLE;
-- handle = metadata->GetNextAllocation(handle))
-- {
-- MoveAllocationData moveData = GetMoveData(handle, metadata);
-- // Ignore newly created allocations by defragmentation algorithm
-- if (moveData.move.srcAllocation->GetUserData() == this)
-- continue;
-- switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
-- {
-- case CounterStatus::Ignore:
-- continue;
-- case CounterStatus::End:
-- return true;
-- case CounterStatus::Pass:
-- break;
-- default:
-- VMA_ASSERT(0);
-- }
--
-- VkDeviceSize offset = moveData.move.srcAllocation->GetOffset();
-- if (offset != 0 && metadata->GetSumFreeSize() >= moveData.size)
-- {
-- VmaAllocationRequest request = {};
-- if (metadata->CreateAllocationRequest(
-- moveData.size,
-- moveData.alignment,
-- false,
-- moveData.type,
-- VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
-- &request))
-- {
-- if (metadata->GetAllocationOffset(request.allocHandle) < offset)
-- {
-- if (vector.CommitAllocationRequest(
-- request,
-- block,
-- moveData.alignment,
-- moveData.flags,
-- this,
-- moveData.type,
-- &moveData.move.dstTmpAllocation) == VK_SUCCESS)
-- {
-- m_Moves.push_back(moveData.move);
-- if (IncrementCounters(moveData.size))
-- return true;
-- }
-- }
-- }
-- }
-- }
-- return false;
--}
--
--bool VmaDefragmentationContext_T::AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector)
--{
-- for (; start < end; ++start)
-- {
-- VmaDeviceMemoryBlock* dstBlock = vector.GetBlock(start);
-- if (dstBlock->m_pMetadata->GetSumFreeSize() >= data.size)
-- {
-- if (vector.AllocateFromBlock(dstBlock,
-- data.size,
-- data.alignment,
-- data.flags,
-- this,
-- data.type,
-- 0,
-- &data.move.dstTmpAllocation) == VK_SUCCESS)
-- {
-- m_Moves.push_back(data.move);
-- if (IncrementCounters(data.size))
-- return true;
-- break;
-- }
-- }
-- }
-- return false;
--}
--
--bool VmaDefragmentationContext_T::ComputeDefragmentation_Fast(VmaBlockVector& vector)
--{
-- // Move only between blocks
--
-- // Go through allocations in last blocks and try to fit them inside first ones
-- for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)
-- {
-- VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata;
--
-- for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
-- handle != VK_NULL_HANDLE;
-- handle = metadata->GetNextAllocation(handle))
-- {
-- MoveAllocationData moveData = GetMoveData(handle, metadata);
-- // Ignore newly created allocations by defragmentation algorithm
-- if (moveData.move.srcAllocation->GetUserData() == this)
-- continue;
-- switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
-- {
-- case CounterStatus::Ignore:
-- continue;
-- case CounterStatus::End:
-- return true;
-- case CounterStatus::Pass:
-- break;
-- default:
-- VMA_ASSERT(0);
-- }
--
-- // Check all previous blocks for free space
-- if (AllocInOtherBlock(0, i, moveData, vector))
-- return true;
-- }
-- }
-- return false;
--}
--
--bool VmaDefragmentationContext_T::ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update)
--{
-- // Go over every allocation and try to fit it in previous blocks at lowest offsets,
-- // if not possible: realloc within single block to minimize offset (exclude offset == 0),
-- // but only if there are noticeable gaps between them (some heuristic, ex. average size of allocation in block)
-- VMA_ASSERT(m_AlgorithmState != VMA_NULL);
--
-- StateBalanced& vectorState = reinterpret_cast<StateBalanced*>(m_AlgorithmState)[index];
-- if (update && vectorState.avgAllocSize == UINT64_MAX)
-- UpdateVectorStatistics(vector, vectorState);
--
-- const size_t startMoveCount = m_Moves.size();
-- VkDeviceSize minimalFreeRegion = vectorState.avgFreeSize / 2;
-- for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)
-- {
-- VmaDeviceMemoryBlock* block = vector.GetBlock(i);
-- VmaBlockMetadata* metadata = block->m_pMetadata;
-- VkDeviceSize prevFreeRegionSize = 0;
--
-- for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
-- handle != VK_NULL_HANDLE;
-- handle = metadata->GetNextAllocation(handle))
-- {
-- MoveAllocationData moveData = GetMoveData(handle, metadata);
-- // Ignore newly created allocations by defragmentation algorithm
-- if (moveData.move.srcAllocation->GetUserData() == this)
-- continue;
-- switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
-- {
-- case CounterStatus::Ignore:
-- continue;
-- case CounterStatus::End:
-- return true;
-- case CounterStatus::Pass:
-- break;
-- default:
-- VMA_ASSERT(0);
-- }
--
-- // Check all previous blocks for free space
-- const size_t prevMoveCount = m_Moves.size();
-- if (AllocInOtherBlock(0, i, moveData, vector))
-- return true;
--
-- VkDeviceSize nextFreeRegionSize = metadata->GetNextFreeRegionSize(handle);
-- // If no room found then realloc within block for lower offset
-- VkDeviceSize offset = moveData.move.srcAllocation->GetOffset();
-- if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size)
-- {
-- // Check if realloc will make sense
-- if (prevFreeRegionSize >= minimalFreeRegion ||
-- nextFreeRegionSize >= minimalFreeRegion ||
-- moveData.size <= vectorState.avgFreeSize ||
-- moveData.size <= vectorState.avgAllocSize)
-- {
-- VmaAllocationRequest request = {};
-- if (metadata->CreateAllocationRequest(
-- moveData.size,
-- moveData.alignment,
-- false,
-- moveData.type,
-- VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
-- &request))
-- {
-- if (metadata->GetAllocationOffset(request.allocHandle) < offset)
-- {
-- if (vector.CommitAllocationRequest(
-- request,
-- block,
-- moveData.alignment,
-- moveData.flags,
-- this,
-- moveData.type,
-- &moveData.move.dstTmpAllocation) == VK_SUCCESS)
-- {
-- m_Moves.push_back(moveData.move);
-- if (IncrementCounters(moveData.size))
-- return true;
-- }
-- }
-- }
-- }
-- }
-- prevFreeRegionSize = nextFreeRegionSize;
-- }
-- }
--
-- // No moves performed, update statistics to current vector state
-- if (startMoveCount == m_Moves.size() && !update)
-- {
-- vectorState.avgAllocSize = UINT64_MAX;
-- return ComputeDefragmentation_Balanced(vector, index, false);
-- }
-- return false;
--}
--
--bool VmaDefragmentationContext_T::ComputeDefragmentation_Full(VmaBlockVector& vector)
--{
-- // Go over every allocation and try to fit it in previous blocks at lowest offsets,
-- // if not possible: realloc within single block to minimize offset (exclude offset == 0)
--
-- for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)
-- {
-- VmaDeviceMemoryBlock* block = vector.GetBlock(i);
-- VmaBlockMetadata* metadata = block->m_pMetadata;
--
-- for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
-- handle != VK_NULL_HANDLE;
-- handle = metadata->GetNextAllocation(handle))
-- {
-- MoveAllocationData moveData = GetMoveData(handle, metadata);
-- // Ignore newly created allocations by defragmentation algorithm
-- if (moveData.move.srcAllocation->GetUserData() == this)
-- continue;
-- switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
-- {
-- case CounterStatus::Ignore:
-- continue;
-- case CounterStatus::End:
-- return true;
-- case CounterStatus::Pass:
-- break;
-- default:
-- VMA_ASSERT(0);
-- }
--
-- // Check all previous blocks for free space
-- const size_t prevMoveCount = m_Moves.size();
-- if (AllocInOtherBlock(0, i, moveData, vector))
-- return true;
--
-- // If no room found then realloc within block for lower offset
-- VkDeviceSize offset = moveData.move.srcAllocation->GetOffset();
-- if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size)
-- {
-- VmaAllocationRequest request = {};
-- if (metadata->CreateAllocationRequest(
-- moveData.size,
-- moveData.alignment,
-- false,
-- moveData.type,
-- VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
-- &request))
-- {
-- if (metadata->GetAllocationOffset(request.allocHandle) < offset)
-- {
-- if (vector.CommitAllocationRequest(
-- request,
-- block,
-- moveData.alignment,
-- moveData.flags,
-- this,
-- moveData.type,
-- &moveData.move.dstTmpAllocation) == VK_SUCCESS)
-- {
-- m_Moves.push_back(moveData.move);
-- if (IncrementCounters(moveData.size))
-- return true;
-- }
-- }
-- }
-- }
-- }
-- }
-- return false;
--}
--
--bool VmaDefragmentationContext_T::ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index)
--{
-- // First free single block, then populate it to the brim, then free another block, and so on
--
-- // Fallback to previous algorithm since without granularity conflicts it can achieve max packing
-- if (vector.m_BufferImageGranularity == 1)
-- return ComputeDefragmentation_Full(vector);
--
-- VMA_ASSERT(m_AlgorithmState != VMA_NULL);
--
-- StateExtensive& vectorState = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[index];
--
-- bool texturePresent = false, bufferPresent = false, otherPresent = false;
-- switch (vectorState.operation)
-- {
-- case StateExtensive::Operation::Done: // Vector defragmented
-- return false;
-- case StateExtensive::Operation::FindFreeBlockBuffer:
-- case StateExtensive::Operation::FindFreeBlockTexture:
-- case StateExtensive::Operation::FindFreeBlockAll:
-- {
-- // No more blocks to free, just perform fast realloc and move to cleanup
-- if (vectorState.firstFreeBlock == 0)
-- {
-- vectorState.operation = StateExtensive::Operation::Cleanup;
-- return ComputeDefragmentation_Fast(vector);
-- }
--
-- // No free blocks, have to clear last one
-- size_t last = (vectorState.firstFreeBlock == SIZE_MAX ? vector.GetBlockCount() : vectorState.firstFreeBlock) - 1;
-- VmaBlockMetadata* freeMetadata = vector.GetBlock(last)->m_pMetadata;
--
-- const size_t prevMoveCount = m_Moves.size();
-- for (VmaAllocHandle handle = freeMetadata->GetAllocationListBegin();
-- handle != VK_NULL_HANDLE;
-- handle = freeMetadata->GetNextAllocation(handle))
-- {
-- MoveAllocationData moveData = GetMoveData(handle, freeMetadata);
-- switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
-- {
-- case CounterStatus::Ignore:
-- continue;
-- case CounterStatus::End:
-- return true;
-- case CounterStatus::Pass:
-- break;
-- default:
-- VMA_ASSERT(0);
-- }
--
-- // Check all previous blocks for free space
-- if (AllocInOtherBlock(0, last, moveData, vector))
-- {
-- // Full clear performed already
-- if (prevMoveCount != m_Moves.size() && freeMetadata->GetNextAllocation(handle) == VK_NULL_HANDLE)
-- vectorState.firstFreeBlock = last;
-- return true;
-- }
-- }
--
-- if (prevMoveCount == m_Moves.size())
-- {
-- // Cannot perform full clear, have to move data in other blocks around
-- if (last != 0)
-- {
-- for (size_t i = last - 1; i; --i)
-- {
-- if (ReallocWithinBlock(vector, vector.GetBlock(i)))
-- return true;
-- }
-- }
--
-- if (prevMoveCount == m_Moves.size())
-- {
-- // No possible reallocs within blocks, try to move them around fast
-- return ComputeDefragmentation_Fast(vector);
-- }
-- }
-- else
-- {
-- switch (vectorState.operation)
-- {
-- case StateExtensive::Operation::FindFreeBlockBuffer:
-- vectorState.operation = StateExtensive::Operation::MoveBuffers;
-- break;
-- case StateExtensive::Operation::FindFreeBlockTexture:
-- vectorState.operation = StateExtensive::Operation::MoveTextures;
-- break;
-- case StateExtensive::Operation::FindFreeBlockAll:
-- vectorState.operation = StateExtensive::Operation::MoveAll;
-- break;
-- default:
-- VMA_ASSERT(0);
-- vectorState.operation = StateExtensive::Operation::MoveTextures;
-- }
-- vectorState.firstFreeBlock = last;
-- // Nothing done, block found without reallocations, can perform another reallocs in same pass
-- return ComputeDefragmentation_Extensive(vector, index);
-- }
-- break;
-- }
-- case StateExtensive::Operation::MoveTextures:
-- {
-- if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL, vector,
-- vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent))
-- {
-- if (texturePresent)
-- {
-- vectorState.operation = StateExtensive::Operation::FindFreeBlockTexture;
-- return ComputeDefragmentation_Extensive(vector, index);
-- }
--
-- if (!bufferPresent && !otherPresent)
-- {
-- vectorState.operation = StateExtensive::Operation::Cleanup;
-- break;
-- }
--
-- // No more textures to move, check buffers
-- vectorState.operation = StateExtensive::Operation::MoveBuffers;
-- bufferPresent = false;
-- otherPresent = false;
-- }
-- else
-- break;
-- VMA_FALLTHROUGH; // Fallthrough
-- }
-- case StateExtensive::Operation::MoveBuffers:
-- {
-- if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_BUFFER, vector,
-- vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent))
-- {
-- if (bufferPresent)
-- {
-- vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer;
-- return ComputeDefragmentation_Extensive(vector, index);
-- }
--
-- if (!otherPresent)
-- {
-- vectorState.operation = StateExtensive::Operation::Cleanup;
-- break;
-- }
--
-- // No more buffers to move, check all others
-- vectorState.operation = StateExtensive::Operation::MoveAll;
-- otherPresent = false;
-- }
-- else
-- break;
-- VMA_FALLTHROUGH; // Fallthrough
-- }
-- case StateExtensive::Operation::MoveAll:
-- {
-- if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_FREE, vector,
-- vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent))
-- {
-- if (otherPresent)
-- {
-- vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer;
-- return ComputeDefragmentation_Extensive(vector, index);
-- }
-- // Everything moved
-- vectorState.operation = StateExtensive::Operation::Cleanup;
-- }
-- break;
-- }
-- case StateExtensive::Operation::Cleanup:
-- // Cleanup is handled below so that other operations may reuse the cleanup code. This case is here to prevent the unhandled enum value warning (C4062).
-- break;
-- }
--
-- if (vectorState.operation == StateExtensive::Operation::Cleanup)
-- {
-- // All other work done, pack data in blocks even tighter if possible
-- const size_t prevMoveCount = m_Moves.size();
-- for (size_t i = 0; i < vector.GetBlockCount(); ++i)
-- {
-- if (ReallocWithinBlock(vector, vector.GetBlock(i)))
-- return true;
-- }
--
-- if (prevMoveCount == m_Moves.size())
-- vectorState.operation = StateExtensive::Operation::Done;
-- }
-- return false;
--}
--
--void VmaDefragmentationContext_T::UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state)
--{
-- size_t allocCount = 0;
-- size_t freeCount = 0;
-- state.avgFreeSize = 0;
-- state.avgAllocSize = 0;
--
-- for (size_t i = 0; i < vector.GetBlockCount(); ++i)
-- {
-- VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata;
--
-- allocCount += metadata->GetAllocationCount();
-- freeCount += metadata->GetFreeRegionsCount();
-- state.avgFreeSize += metadata->GetSumFreeSize();
-- state.avgAllocSize += metadata->GetSize();
-- }
--
-- state.avgAllocSize = (state.avgAllocSize - state.avgFreeSize) / allocCount;
-- state.avgFreeSize /= freeCount;
--}
--
--bool VmaDefragmentationContext_T::MoveDataToFreeBlocks(VmaSuballocationType currentType,
-- VmaBlockVector& vector, size_t firstFreeBlock,
-- bool& texturePresent, bool& bufferPresent, bool& otherPresent)
--{
-- const size_t prevMoveCount = m_Moves.size();
-- for (size_t i = firstFreeBlock ; i;)
-- {
-- VmaDeviceMemoryBlock* block = vector.GetBlock(--i);
-- VmaBlockMetadata* metadata = block->m_pMetadata;
--
-- for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
-- handle != VK_NULL_HANDLE;
-- handle = metadata->GetNextAllocation(handle))
-- {
-- MoveAllocationData moveData = GetMoveData(handle, metadata);
-- // Ignore newly created allocations by defragmentation algorithm
-- if (moveData.move.srcAllocation->GetUserData() == this)
-- continue;
-- switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
-- {
-- case CounterStatus::Ignore:
-- continue;
-- case CounterStatus::End:
-- return true;
-- case CounterStatus::Pass:
-- break;
-- default:
-- VMA_ASSERT(0);
-- }
--
-- // Move only single type of resources at once
-- if (!VmaIsBufferImageGranularityConflict(moveData.type, currentType))
-- {
-- // Try to fit allocation into free blocks
-- if (AllocInOtherBlock(firstFreeBlock, vector.GetBlockCount(), moveData, vector))
-- return false;
-- }
--
-- if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL))
-- texturePresent = true;
-- else if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_BUFFER))
-- bufferPresent = true;
-- else
-- otherPresent = true;
-- }
-- }
-- return prevMoveCount == m_Moves.size();
--}
--#endif // _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS
--
--#ifndef _VMA_POOL_T_FUNCTIONS
--VmaPool_T::VmaPool_T(
-- VmaAllocator hAllocator,
-- const VmaPoolCreateInfo& createInfo,
-- VkDeviceSize preferredBlockSize)
-- : m_BlockVector(
-- hAllocator,
-- this, // hParentPool
-- createInfo.memoryTypeIndex,
-- createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
-- createInfo.minBlockCount,
-- createInfo.maxBlockCount,
-- (createInfo.flags& VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
-- createInfo.blockSize != 0, // explicitBlockSize
-- createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK, // algorithm
-- createInfo.priority,
-- VMA_MAX(hAllocator->GetMemoryTypeMinAlignment(createInfo.memoryTypeIndex), createInfo.minAllocationAlignment),
-- createInfo.pMemoryAllocateNext),
-- m_Id(0),
-- m_Name(VMA_NULL) {}
--
--VmaPool_T::~VmaPool_T()
--{
-- VMA_ASSERT(m_PrevPool == VMA_NULL && m_NextPool == VMA_NULL);
--}
--
--void VmaPool_T::SetName(const char* pName)
--{
-- const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();
-- VmaFreeString(allocs, m_Name);
--
-- if (pName != VMA_NULL)
-- {
-- m_Name = VmaCreateStringCopy(allocs, pName);
-- }
-- else
-- {
-- m_Name = VMA_NULL;
-- }
--}
--#endif // _VMA_POOL_T_FUNCTIONS
--
--#ifndef _VMA_ALLOCATOR_T_FUNCTIONS
--VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
-- m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
-- m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0),
-- m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
-- m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
-- m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0),
-- m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0),
-- m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0),
-- m_UseExtMemoryPriority((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT) != 0),
-- m_hDevice(pCreateInfo->device),
-- m_hInstance(pCreateInfo->instance),
-- m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
-- m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
-- *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
-- m_AllocationObjectAllocator(&m_AllocationCallbacks),
-- m_HeapSizeLimitMask(0),
-- m_DeviceMemoryCount(0),
-- m_PreferredLargeHeapBlockSize(0),
-- m_PhysicalDevice(pCreateInfo->physicalDevice),
-- m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
-- m_NextPoolId(0),
-- m_GlobalMemoryTypeBits(UINT32_MAX)
--{
-- if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-- {
-- m_UseKhrDedicatedAllocation = false;
-- m_UseKhrBindMemory2 = false;
-- }
--
-- if(VMA_DEBUG_DETECT_CORRUPTION)
-- {
-- // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
-- VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
-- }
--
-- VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance);
--
-- if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
-- {
--#if !(VMA_DEDICATED_ALLOCATION)
-- if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0)
-- {
-- VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
-- }
--#endif
--#if !(VMA_BIND_MEMORY2)
-- if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
-- {
-- VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
-- }
--#endif
-- }
--#if !(VMA_MEMORY_BUDGET)
-- if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0)
-- {
-- VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
-- }
--#endif
--#if !(VMA_BUFFER_DEVICE_ADDRESS)
-- if(m_UseKhrBufferDeviceAddress)
-- {
-- VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
-- }
--#endif
--#if VMA_VULKAN_VERSION < 1003000
-- if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0))
-- {
-- VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_3 but required Vulkan version is disabled by preprocessor macros.");
-- }
--#endif
--#if VMA_VULKAN_VERSION < 1002000
-- if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0))
-- {
-- VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros.");
-- }
--#endif
--#if VMA_VULKAN_VERSION < 1001000
-- if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-- {
-- VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.");
-- }
--#endif
--#if !(VMA_MEMORY_PRIORITY)
-- if(m_UseExtMemoryPriority)
-- {
-- VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
-- }
--#endif
--
-- memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
-- memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
-- memset(&m_MemProps, 0, sizeof(m_MemProps));
--
-- memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
-- memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));
--
--#if VMA_EXTERNAL_MEMORY
-- memset(&m_TypeExternalMemoryHandleTypes, 0, sizeof(m_TypeExternalMemoryHandleTypes));
--#endif // #if VMA_EXTERNAL_MEMORY
--
-- if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
-- {
-- m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData;
-- m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
-- m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
-- }
--
-- ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
--
-- (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
-- (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
--
-- VMA_ASSERT(VmaIsPow2(VMA_MIN_ALIGNMENT));
-- VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
-- VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
-- VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
--
-- m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
-- pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
--
-- m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits();
--
--#if VMA_EXTERNAL_MEMORY
-- if(pCreateInfo->pTypeExternalMemoryHandleTypes != VMA_NULL)
-- {
-- memcpy(m_TypeExternalMemoryHandleTypes, pCreateInfo->pTypeExternalMemoryHandleTypes,
-- sizeof(VkExternalMemoryHandleTypeFlagsKHR) * GetMemoryTypeCount());
-- }
--#endif // #if VMA_EXTERNAL_MEMORY
--
-- if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
-- {
-- for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
-- {
-- const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
-- if(limit != VK_WHOLE_SIZE)
-- {
-- m_HeapSizeLimitMask |= 1u << heapIndex;
-- if(limit < m_MemProps.memoryHeaps[heapIndex].size)
-- {
-- m_MemProps.memoryHeaps[heapIndex].size = limit;
-- }
-- }
-- }
-- }
--
-- for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-- {
-- // Create only supported types
-- if((m_GlobalMemoryTypeBits & (1u << memTypeIndex)) != 0)
-- {
-- const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
-- m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
-- this,
-- VK_NULL_HANDLE, // hParentPool
-- memTypeIndex,
-- preferredBlockSize,
-- 0,
-- SIZE_MAX,
-- GetBufferImageGranularity(),
-- false, // explicitBlockSize
-- 0, // algorithm
-- 0.5f, // priority (0.5 is the default per Vulkan spec)
-- GetMemoryTypeMinAlignment(memTypeIndex), // minAllocationAlignment
-- VMA_NULL); // // pMemoryAllocateNext
-- // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
-- // becase minBlockCount is 0.
-- }
-- }
--}
--
--VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
--{
-- VkResult res = VK_SUCCESS;
--
--#if VMA_MEMORY_BUDGET
-- if(m_UseExtMemoryBudget)
-- {
-- UpdateVulkanBudget();
-- }
--#endif // #if VMA_MEMORY_BUDGET
--
-- return res;
--}
--
--VmaAllocator_T::~VmaAllocator_T()
--{
-- VMA_ASSERT(m_Pools.IsEmpty());
--
-- for(size_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
-- {
-- vma_delete(this, m_pBlockVectors[memTypeIndex]);
-- }
--}
--
--void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
--{
--#if VMA_STATIC_VULKAN_FUNCTIONS == 1
-- ImportVulkanFunctions_Static();
--#endif
--
-- if(pVulkanFunctions != VMA_NULL)
-- {
-- ImportVulkanFunctions_Custom(pVulkanFunctions);
-- }
--
--#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
-- ImportVulkanFunctions_Dynamic();
--#endif
--
-- ValidateVulkanFunctions();
--}
--
--#if VMA_STATIC_VULKAN_FUNCTIONS == 1
--
--void VmaAllocator_T::ImportVulkanFunctions_Static()
--{
-- // Vulkan 1.0
-- m_VulkanFunctions.vkGetInstanceProcAddr = (PFN_vkGetInstanceProcAddr)vkGetInstanceProcAddr;
-- m_VulkanFunctions.vkGetDeviceProcAddr = (PFN_vkGetDeviceProcAddr)vkGetDeviceProcAddr;
-- m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
-- m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
-- m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
-- m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
-- m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
-- m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
-- m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
-- m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
-- m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
-- m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
-- m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
-- m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
-- m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
-- m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
-- m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
-- m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
-- m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
--
-- // Vulkan 1.1
--#if VMA_VULKAN_VERSION >= 1001000
-- if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-- {
-- m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2;
-- m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2;
-- m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2;
-- m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2;
-- }
--#endif
--
--#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
-- if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-- {
-- m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2;
-- }
--#endif
--
--#if VMA_VULKAN_VERSION >= 1003000
-- if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0))
-- {
-- m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements = (PFN_vkGetDeviceBufferMemoryRequirements)vkGetDeviceBufferMemoryRequirements;
-- m_VulkanFunctions.vkGetDeviceImageMemoryRequirements = (PFN_vkGetDeviceImageMemoryRequirements)vkGetDeviceImageMemoryRequirements;
-- }
--#endif
--}
--
--#endif // VMA_STATIC_VULKAN_FUNCTIONS == 1
--
--void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions)
--{
-- VMA_ASSERT(pVulkanFunctions != VMA_NULL);
--
--#define VMA_COPY_IF_NOT_NULL(funcName) \
-- if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
--
-- VMA_COPY_IF_NOT_NULL(vkGetInstanceProcAddr);
-- VMA_COPY_IF_NOT_NULL(vkGetDeviceProcAddr);
-- VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
-- VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
-- VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
-- VMA_COPY_IF_NOT_NULL(vkFreeMemory);
-- VMA_COPY_IF_NOT_NULL(vkMapMemory);
-- VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
-- VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
-- VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
-- VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
-- VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
-- VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
-- VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
-- VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
-- VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
-- VMA_COPY_IF_NOT_NULL(vkCreateImage);
-- VMA_COPY_IF_NOT_NULL(vkDestroyImage);
-- VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
--
--#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-- VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
-- VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
--#endif
--
--#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
-- VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
-- VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
--#endif
--
--#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
-- VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
--#endif
--
--#if VMA_VULKAN_VERSION >= 1003000
-- VMA_COPY_IF_NOT_NULL(vkGetDeviceBufferMemoryRequirements);
-- VMA_COPY_IF_NOT_NULL(vkGetDeviceImageMemoryRequirements);
--#endif
--
--#undef VMA_COPY_IF_NOT_NULL
--}
--
--#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
--
--void VmaAllocator_T::ImportVulkanFunctions_Dynamic()
--{
-- VMA_ASSERT(m_VulkanFunctions.vkGetInstanceProcAddr && m_VulkanFunctions.vkGetDeviceProcAddr &&
-- "To use VMA_DYNAMIC_VULKAN_FUNCTIONS in new versions of VMA you now have to pass "
-- "VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as VmaAllocatorCreateInfo::pVulkanFunctions. "
-- "Other members can be null.");
--
--#define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \
-- if(m_VulkanFunctions.memberName == VMA_NULL) \
-- m_VulkanFunctions.memberName = \
-- (functionPointerType)m_VulkanFunctions.vkGetInstanceProcAddr(m_hInstance, functionNameString);
--#define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \
-- if(m_VulkanFunctions.memberName == VMA_NULL) \
-- m_VulkanFunctions.memberName = \
-- (functionPointerType)m_VulkanFunctions.vkGetDeviceProcAddr(m_hDevice, functionNameString);
--
-- VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties");
-- VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties, "vkGetPhysicalDeviceMemoryProperties");
-- VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory, "vkAllocateMemory");
-- VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory, "vkFreeMemory");
-- VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory, "vkMapMemory");
-- VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory, "vkUnmapMemory");
-- VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges, "vkFlushMappedMemoryRanges");
-- VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges, "vkInvalidateMappedMemoryRanges");
-- VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory, "vkBindBufferMemory");
-- VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory, "vkBindImageMemory");
-- VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements, "vkGetBufferMemoryRequirements");
-- VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements, "vkGetImageMemoryRequirements");
-- VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer, "vkCreateBuffer");
-- VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer, "vkDestroyBuffer");
-- VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage, "vkCreateImage");
-- VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage, "vkDestroyImage");
-- VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, "vkCmdCopyBuffer");
--
--#if VMA_VULKAN_VERSION >= 1001000
-- if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-- {
-- VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2, "vkGetBufferMemoryRequirements2");
-- VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2, "vkGetImageMemoryRequirements2");
-- VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2, "vkBindBufferMemory2");
-- VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2, "vkBindImageMemory2");
-- }
--#endif
--
--#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
-- if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-- {
-- VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2, "vkGetPhysicalDeviceMemoryProperties2");
-- }
-- else if(m_UseExtMemoryBudget)
-- {
-- VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2, "vkGetPhysicalDeviceMemoryProperties2KHR");
-- }
--#endif
--
--#if VMA_DEDICATED_ALLOCATION
-- if(m_UseKhrDedicatedAllocation)
-- {
-- VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, "vkGetBufferMemoryRequirements2KHR");
-- VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, "vkGetImageMemoryRequirements2KHR");
-- }
--#endif
--
--#if VMA_BIND_MEMORY2
-- if(m_UseKhrBindMemory2)
-- {
-- VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, "vkBindBufferMemory2KHR");
-- VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, "vkBindImageMemory2KHR");
-- }
--#endif // #if VMA_BIND_MEMORY2
--
--#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
-- if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-- {
-- VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2");
-- }
-- else if(m_UseExtMemoryBudget)
-- {
-- VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR");
-- }
--#endif // #if VMA_MEMORY_BUDGET
--
--#if VMA_VULKAN_VERSION >= 1003000
-- if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0))
-- {
-- VMA_FETCH_DEVICE_FUNC(vkGetDeviceBufferMemoryRequirements, PFN_vkGetDeviceBufferMemoryRequirements, "vkGetDeviceBufferMemoryRequirements");
-- VMA_FETCH_DEVICE_FUNC(vkGetDeviceImageMemoryRequirements, PFN_vkGetDeviceImageMemoryRequirements, "vkGetDeviceImageMemoryRequirements");
-- }
--#endif
--
--#undef VMA_FETCH_DEVICE_FUNC
--#undef VMA_FETCH_INSTANCE_FUNC
--}
--
--#endif // VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
--
--void VmaAllocator_T::ValidateVulkanFunctions()
--{
-- VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
-- VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
-- VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
-- VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
-- VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
-- VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
-- VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
-- VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
-- VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
-- VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
-- VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
-- VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
-- VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
-- VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
-- VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
-- VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
-- VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
--
--#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-- if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
-- {
-- VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
-- VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
-- }
--#endif
--
--#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
-- if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)
-- {
-- VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
-- VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
-- }
--#endif
--
--#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
-- if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-- {
-- VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);
-- }
--#endif
--
--#if VMA_VULKAN_VERSION >= 1003000
-- if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0))
-- {
-- VMA_ASSERT(m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements != VMA_NULL);
-- VMA_ASSERT(m_VulkanFunctions.vkGetDeviceImageMemoryRequirements != VMA_NULL);
-- }
--#endif
--}
--
--VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
--{
-- const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
-- const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
-- const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
-- return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);
--}
--
--VkResult VmaAllocator_T::AllocateMemoryOfType(
-- VmaPool pool,
-- VkDeviceSize size,
-- VkDeviceSize alignment,
-- bool dedicatedPreferred,
-- VkBuffer dedicatedBuffer,
-- VkImage dedicatedImage,
-- VkFlags dedicatedBufferImageUsage,
-- const VmaAllocationCreateInfo& createInfo,
-- uint32_t memTypeIndex,
-- VmaSuballocationType suballocType,
-- VmaDedicatedAllocationList& dedicatedAllocations,
-- VmaBlockVector& blockVector,
-- size_t allocationCount,
-- VmaAllocation* pAllocations)
--{
-- VMA_ASSERT(pAllocations != VMA_NULL);
-- VMA_DEBUG_LOG_FORMAT(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
--
-- VmaAllocationCreateInfo finalCreateInfo = createInfo;
-- VkResult res = CalcMemTypeParams(
-- finalCreateInfo,
-- memTypeIndex,
-- size,
-- allocationCount);
-- if(res != VK_SUCCESS)
-- return res;
--
-- if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
-- {
-- return AllocateDedicatedMemory(
-- pool,
-- size,
-- suballocType,
-- dedicatedAllocations,
-- memTypeIndex,
-- (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
-- (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
-- (finalCreateInfo.flags &
-- (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0,
-- (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0,
-- finalCreateInfo.pUserData,
-- finalCreateInfo.priority,
-- dedicatedBuffer,
-- dedicatedImage,
-- dedicatedBufferImageUsage,
-- allocationCount,
-- pAllocations,
-- blockVector.GetAllocationNextPtr());
-- }
-- else
-- {
-- const bool canAllocateDedicated =
-- (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
-- (pool == VK_NULL_HANDLE || !blockVector.HasExplicitBlockSize());
--
-- if(canAllocateDedicated)
-- {
-- // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
-- if(size > blockVector.GetPreferredBlockSize() / 2)
-- {
-- dedicatedPreferred = true;
-- }
-- // Protection against creating each allocation as dedicated when we reach or exceed heap size/budget,
-- // which can quickly deplete maxMemoryAllocationCount: Don't prefer dedicated allocations when above
-- // 3/4 of the maximum allocation count.
-- if(m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount < UINT32_MAX / 4 &&
-- m_DeviceMemoryCount.load() > m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount * 3 / 4)
-- {
-- dedicatedPreferred = false;
-- }
--
-- if(dedicatedPreferred)
-- {
-- res = AllocateDedicatedMemory(
-- pool,
-- size,
-- suballocType,
-- dedicatedAllocations,
-- memTypeIndex,
-- (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
-- (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
-- (finalCreateInfo.flags &
-- (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0,
-- (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0,
-- finalCreateInfo.pUserData,
-- finalCreateInfo.priority,
-- dedicatedBuffer,
-- dedicatedImage,
-- dedicatedBufferImageUsage,
-- allocationCount,
-- pAllocations,
-- blockVector.GetAllocationNextPtr());
-- if(res == VK_SUCCESS)
-- {
-- // Succeeded: AllocateDedicatedMemory function already filled pMemory, nothing more to do here.
-- VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
-- return VK_SUCCESS;
-- }
-- }
-- }
--
-- res = blockVector.Allocate(
-- size,
-- alignment,
-- finalCreateInfo,
-- suballocType,
-- allocationCount,
-- pAllocations);
-- if(res == VK_SUCCESS)
-- return VK_SUCCESS;
--
-- // Try dedicated memory.
-- if(canAllocateDedicated && !dedicatedPreferred)
-- {
-- res = AllocateDedicatedMemory(
-- pool,
-- size,
-- suballocType,
-- dedicatedAllocations,
-- memTypeIndex,
-- (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
-- (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
-- (finalCreateInfo.flags &
-- (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0,
-- (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0,
-- finalCreateInfo.pUserData,
-- finalCreateInfo.priority,
-- dedicatedBuffer,
-- dedicatedImage,
-- dedicatedBufferImageUsage,
-- allocationCount,
-- pAllocations,
-- blockVector.GetAllocationNextPtr());
-- if(res == VK_SUCCESS)
-- {
-- // Succeeded: AllocateDedicatedMemory function already filled pMemory, nothing more to do here.
-- VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
-- return VK_SUCCESS;
-- }
-- }
-- // Everything failed: Return error code.
-- VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
-- return res;
-- }
--}
--
--VkResult VmaAllocator_T::AllocateDedicatedMemory(
-- VmaPool pool,
-- VkDeviceSize size,
-- VmaSuballocationType suballocType,
-- VmaDedicatedAllocationList& dedicatedAllocations,
-- uint32_t memTypeIndex,
-- bool map,
-- bool isUserDataString,
-- bool isMappingAllowed,
-- bool canAliasMemory,
-- void* pUserData,
-- float priority,
-- VkBuffer dedicatedBuffer,
-- VkImage dedicatedImage,
-- VkFlags dedicatedBufferImageUsage,
-- size_t allocationCount,
-- VmaAllocation* pAllocations,
-- const void* pNextChain)
--{
-- VMA_ASSERT(allocationCount > 0 && pAllocations);
--
-- VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
-- allocInfo.memoryTypeIndex = memTypeIndex;
-- allocInfo.allocationSize = size;
-- allocInfo.pNext = pNextChain;
--
--#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-- VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
-- if(!canAliasMemory)
-- {
-- if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-- {
-- if(dedicatedBuffer != VK_NULL_HANDLE)
-- {
-- VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
-- dedicatedAllocInfo.buffer = dedicatedBuffer;
-- VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
-- }
-- else if(dedicatedImage != VK_NULL_HANDLE)
-- {
-- dedicatedAllocInfo.image = dedicatedImage;
-- VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
-- }
-- }
-- }
--#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
--
--#if VMA_BUFFER_DEVICE_ADDRESS
-- VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
-- if(m_UseKhrBufferDeviceAddress)
-- {
-- bool canContainBufferWithDeviceAddress = true;
-- if(dedicatedBuffer != VK_NULL_HANDLE)
-- {
-- canContainBufferWithDeviceAddress = dedicatedBufferImageUsage == UINT32_MAX || // Usage flags unknown
-- (dedicatedBufferImageUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0;
-- }
-- else if(dedicatedImage != VK_NULL_HANDLE)
-- {
-- canContainBufferWithDeviceAddress = false;
-- }
-- if(canContainBufferWithDeviceAddress)
-- {
-- allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
-- VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
-- }
-- }
--#endif // #if VMA_BUFFER_DEVICE_ADDRESS
--
--#if VMA_MEMORY_PRIORITY
-- VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };
-- if(m_UseExtMemoryPriority)
-- {
-- VMA_ASSERT(priority >= 0.f && priority <= 1.f);
-- priorityInfo.priority = priority;
-- VmaPnextChainPushFront(&allocInfo, &priorityInfo);
-- }
--#endif // #if VMA_MEMORY_PRIORITY
--
--#if VMA_EXTERNAL_MEMORY
-- // Attach VkExportMemoryAllocateInfoKHR if necessary.
-- VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR };
-- exportMemoryAllocInfo.handleTypes = GetExternalMemoryHandleTypeFlags(memTypeIndex);
-- if(exportMemoryAllocInfo.handleTypes != 0)
-- {
-- VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo);
-- }
--#endif // #if VMA_EXTERNAL_MEMORY
--
-- size_t allocIndex;
-- VkResult res = VK_SUCCESS;
-- for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
-- {
-- res = AllocateDedicatedMemoryPage(
-- pool,
-- size,
-- suballocType,
-- memTypeIndex,
-- allocInfo,
-- map,
-- isUserDataString,
-- isMappingAllowed,
-- pUserData,
-- pAllocations + allocIndex);
-- if(res != VK_SUCCESS)
-- {
-- break;
-- }
-- }
--
-- if(res == VK_SUCCESS)
-- {
-- for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
-- {
-- dedicatedAllocations.Register(pAllocations[allocIndex]);
-- }
-- VMA_DEBUG_LOG_FORMAT(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
-- }
-- else
-- {
-- // Free all already created allocations.
-- while(allocIndex--)
-- {
-- VmaAllocation currAlloc = pAllocations[allocIndex];
-- VkDeviceMemory hMemory = currAlloc->GetMemory();
--
-- /*
-- There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
-- before vkFreeMemory.
--
-- if(currAlloc->GetMappedData() != VMA_NULL)
-- {
-- (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
-- }
-- */
--
-- FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
-- m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());
-- m_AllocationObjectAllocator.Free(currAlloc);
-- }
--
-- memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
-- }
--
-- return res;
--}
--
--VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
-- VmaPool pool,
-- VkDeviceSize size,
-- VmaSuballocationType suballocType,
-- uint32_t memTypeIndex,
-- const VkMemoryAllocateInfo& allocInfo,
-- bool map,
-- bool isUserDataString,
-- bool isMappingAllowed,
-- void* pUserData,
-- VmaAllocation* pAllocation)
--{
-- VkDeviceMemory hMemory = VK_NULL_HANDLE;
-- VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
-- if(res < 0)
-- {
-- VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
-- return res;
-- }
--
-- void* pMappedData = VMA_NULL;
-- if(map)
-- {
-- res = (*m_VulkanFunctions.vkMapMemory)(
-- m_hDevice,
-- hMemory,
-- 0,
-- VK_WHOLE_SIZE,
-- 0,
-- &pMappedData);
-- if(res < 0)
-- {
-- VMA_DEBUG_LOG(" vkMapMemory FAILED");
-- FreeVulkanMemory(memTypeIndex, size, hMemory);
-- return res;
-- }
-- }
--
-- *pAllocation = m_AllocationObjectAllocator.Allocate(isMappingAllowed);
-- (*pAllocation)->InitDedicatedAllocation(pool, memTypeIndex, hMemory, suballocType, pMappedData, size);
-- if (isUserDataString)
-- (*pAllocation)->SetName(this, (const char*)pUserData);
-- else
-- (*pAllocation)->SetUserData(this, pUserData);
-- m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);
-- if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
-- {
-- FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
-- }
--
-- return VK_SUCCESS;
--}
--
--void VmaAllocator_T::GetBufferMemoryRequirements(
-- VkBuffer hBuffer,
-- VkMemoryRequirements& memReq,
-- bool& requiresDedicatedAllocation,
-- bool& prefersDedicatedAllocation) const
--{
--#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-- if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-- {
-- VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
-- memReqInfo.buffer = hBuffer;
--
-- VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
--
-- VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
-- VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
--
-- (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
--
-- memReq = memReq2.memoryRequirements;
-- requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
-- prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
-- }
-- else
--#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-- {
-- (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
-- requiresDedicatedAllocation = false;
-- prefersDedicatedAllocation = false;
-- }
--}
--
--void VmaAllocator_T::GetImageMemoryRequirements(
-- VkImage hImage,
-- VkMemoryRequirements& memReq,
-- bool& requiresDedicatedAllocation,
-- bool& prefersDedicatedAllocation) const
--{
--#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-- if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-- {
-- VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
-- memReqInfo.image = hImage;
--
-- VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
--
-- VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
-- VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
--
-- (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
--
-- memReq = memReq2.memoryRequirements;
-- requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
-- prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
-- }
-- else
--#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-- {
-- (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
-- requiresDedicatedAllocation = false;
-- prefersDedicatedAllocation = false;
-- }
--}
--
--VkResult VmaAllocator_T::FindMemoryTypeIndex(
-- uint32_t memoryTypeBits,
-- const VmaAllocationCreateInfo* pAllocationCreateInfo,
-- VkFlags bufImgUsage,
-- uint32_t* pMemoryTypeIndex) const
--{
-- memoryTypeBits &= GetGlobalMemoryTypeBits();
--
-- if(pAllocationCreateInfo->memoryTypeBits != 0)
-- {
-- memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
-- }
--
-- VkMemoryPropertyFlags requiredFlags = 0, preferredFlags = 0, notPreferredFlags = 0;
-- if(!FindMemoryPreferences(
-- IsIntegratedGpu(),
-- *pAllocationCreateInfo,
-- bufImgUsage,
-- requiredFlags, preferredFlags, notPreferredFlags))
-- {
-- return VK_ERROR_FEATURE_NOT_PRESENT;
-- }
--
-- *pMemoryTypeIndex = UINT32_MAX;
-- uint32_t minCost = UINT32_MAX;
-- for(uint32_t memTypeIndex = 0, memTypeBit = 1;
-- memTypeIndex < GetMemoryTypeCount();
-- ++memTypeIndex, memTypeBit <<= 1)
-- {
-- // This memory type is acceptable according to memoryTypeBits bitmask.
-- if((memTypeBit & memoryTypeBits) != 0)
-- {
-- const VkMemoryPropertyFlags currFlags =
-- m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
-- // This memory type contains requiredFlags.
-- if((requiredFlags & ~currFlags) == 0)
-- {
-- // Calculate cost as number of bits from preferredFlags not present in this memory type.
-- uint32_t currCost = VMA_COUNT_BITS_SET(preferredFlags & ~currFlags) +
-- VMA_COUNT_BITS_SET(currFlags & notPreferredFlags);
-- // Remember memory type with lowest cost.
-- if(currCost < minCost)
-- {
-- *pMemoryTypeIndex = memTypeIndex;
-- if(currCost == 0)
-- {
-- return VK_SUCCESS;
-- }
-- minCost = currCost;
-- }
-- }
-- }
-- }
-- return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
--}
--
--VkResult VmaAllocator_T::CalcMemTypeParams(
-- VmaAllocationCreateInfo& inoutCreateInfo,
-- uint32_t memTypeIndex,
-- VkDeviceSize size,
-- size_t allocationCount)
--{
-- // If memory type is not HOST_VISIBLE, disable MAPPED.
-- if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
-- (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
-- {
-- inoutCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
-- }
--
-- if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
-- (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0)
-- {
-- const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
-- VmaBudget heapBudget = {};
-- GetHeapBudgets(&heapBudget, heapIndex, 1);
-- if(heapBudget.usage + size * allocationCount > heapBudget.budget)
-- {
-- return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-- }
-- }
-- return VK_SUCCESS;
--}
--
--VkResult VmaAllocator_T::CalcAllocationParams(
-- VmaAllocationCreateInfo& inoutCreateInfo,
-- bool dedicatedRequired,
-- bool dedicatedPreferred)
--{
-- VMA_ASSERT((inoutCreateInfo.flags &
-- (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) !=
-- (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) &&
-- "Specifying both flags VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT and VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT is incorrect.");
-- VMA_ASSERT((((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) == 0 ||
-- (inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0)) &&
-- "Specifying VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT requires also VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.");
-- if(inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST)
-- {
-- if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0)
-- {
-- VMA_ASSERT((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0 &&
-- "When using VMA_ALLOCATION_CREATE_MAPPED_BIT and usage = VMA_MEMORY_USAGE_AUTO*, you must also specify VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.");
-- }
-- }
--
-- // If memory is lazily allocated, it should be always dedicated.
-- if(dedicatedRequired ||
-- inoutCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED)
-- {
-- inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
-- }
--
-- if(inoutCreateInfo.pool != VK_NULL_HANDLE)
-- {
-- if(inoutCreateInfo.pool->m_BlockVector.HasExplicitBlockSize() &&
-- (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
-- {
-- VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT while current custom pool doesn't support dedicated allocations.");
-- return VK_ERROR_FEATURE_NOT_PRESENT;
-- }
-- inoutCreateInfo.priority = inoutCreateInfo.pool->m_BlockVector.GetPriority();
-- }
--
-- if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
-- (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
-- {
-- VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
-- return VK_ERROR_FEATURE_NOT_PRESENT;
-- }
--
-- if(VMA_DEBUG_ALWAYS_DEDICATED_MEMORY &&
-- (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
-- {
-- inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
-- }
--
-- // Non-auto USAGE values imply HOST_ACCESS flags.
-- // And so does VMA_MEMORY_USAGE_UNKNOWN because it is used with custom pools.
-- // Which specific flag is used doesn't matter. They change things only when used with VMA_MEMORY_USAGE_AUTO*.
-- // Otherwise they just protect from assert on mapping.
-- if(inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO &&
-- inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE &&
-- inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_HOST)
-- {
-- if((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) == 0)
-- {
-- inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT;
-- }
-- }
--
-- return VK_SUCCESS;
--}
--
--VkResult VmaAllocator_T::AllocateMemory(
-- const VkMemoryRequirements& vkMemReq,
-- bool requiresDedicatedAllocation,
-- bool prefersDedicatedAllocation,
-- VkBuffer dedicatedBuffer,
-- VkImage dedicatedImage,
-- VkFlags dedicatedBufferImageUsage,
-- const VmaAllocationCreateInfo& createInfo,
-- VmaSuballocationType suballocType,
-- size_t allocationCount,
-- VmaAllocation* pAllocations)
--{
-- memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
--
-- VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
--
-- if(vkMemReq.size == 0)
-- {
-- return VK_ERROR_INITIALIZATION_FAILED;
-- }
--
-- VmaAllocationCreateInfo createInfoFinal = createInfo;
-- VkResult res = CalcAllocationParams(createInfoFinal, requiresDedicatedAllocation, prefersDedicatedAllocation);
-- if(res != VK_SUCCESS)
-- return res;
--
-- if(createInfoFinal.pool != VK_NULL_HANDLE)
-- {
-- VmaBlockVector& blockVector = createInfoFinal.pool->m_BlockVector;
-- return AllocateMemoryOfType(
-- createInfoFinal.pool,
-- vkMemReq.size,
-- vkMemReq.alignment,
-- prefersDedicatedAllocation,
-- dedicatedBuffer,
-- dedicatedImage,
-- dedicatedBufferImageUsage,
-- createInfoFinal,
-- blockVector.GetMemoryTypeIndex(),
-- suballocType,
-- createInfoFinal.pool->m_DedicatedAllocations,
-- blockVector,
-- allocationCount,
-- pAllocations);
-- }
-- else
-- {
-- // Bit mask of memory Vulkan types acceptable for this allocation.
-- uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
-- uint32_t memTypeIndex = UINT32_MAX;
-- res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex);
-- // Can't find any single memory type matching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
-- if(res != VK_SUCCESS)
-- return res;
-- do
-- {
-- VmaBlockVector* blockVector = m_pBlockVectors[memTypeIndex];
-- VMA_ASSERT(blockVector && "Trying to use unsupported memory type!");
-- res = AllocateMemoryOfType(
-- VK_NULL_HANDLE,
-- vkMemReq.size,
-- vkMemReq.alignment,
-- requiresDedicatedAllocation || prefersDedicatedAllocation,
-- dedicatedBuffer,
-- dedicatedImage,
-- dedicatedBufferImageUsage,
-- createInfoFinal,
-- memTypeIndex,
-- suballocType,
-- m_DedicatedAllocations[memTypeIndex],
-- *blockVector,
-- allocationCount,
-- pAllocations);
-- // Allocation succeeded
-- if(res == VK_SUCCESS)
-- return VK_SUCCESS;
--
-- // Remove old memTypeIndex from list of possibilities.
-- memoryTypeBits &= ~(1u << memTypeIndex);
-- // Find alternative memTypeIndex.
-- res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex);
-- } while(res == VK_SUCCESS);
--
-- // No other matching memory type index could be found.
-- // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
-- return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-- }
--}
--
--void VmaAllocator_T::FreeMemory(
-- size_t allocationCount,
-- const VmaAllocation* pAllocations)
--{
-- VMA_ASSERT(pAllocations);
--
-- for(size_t allocIndex = allocationCount; allocIndex--; )
-- {
-- VmaAllocation allocation = pAllocations[allocIndex];
--
-- if(allocation != VK_NULL_HANDLE)
-- {
-- if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
-- {
-- FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
-- }
--
-- allocation->FreeName(this);
--
-- switch(allocation->GetType())
-- {
-- case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
-- {
-- VmaBlockVector* pBlockVector = VMA_NULL;
-- VmaPool hPool = allocation->GetParentPool();
-- if(hPool != VK_NULL_HANDLE)
-- {
-- pBlockVector = &hPool->m_BlockVector;
-- }
-- else
-- {
-- const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
-- pBlockVector = m_pBlockVectors[memTypeIndex];
-- VMA_ASSERT(pBlockVector && "Trying to free memory of unsupported type!");
-- }
-- pBlockVector->Free(allocation);
-- }
-- break;
-- case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
-- FreeDedicatedMemory(allocation);
-- break;
-- default:
-- VMA_ASSERT(0);
-- }
-- }
-- }
--}
--
--void VmaAllocator_T::CalculateStatistics(VmaTotalStatistics* pStats)
--{
-- // Initialize.
-- VmaClearDetailedStatistics(pStats->total);
-- for(uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
-- VmaClearDetailedStatistics(pStats->memoryType[i]);
-- for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
-- VmaClearDetailedStatistics(pStats->memoryHeap[i]);
--
-- // Process default pools.
-- for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-- {
-- VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
-- if (pBlockVector != VMA_NULL)
-- pBlockVector->AddDetailedStatistics(pStats->memoryType[memTypeIndex]);
-- }
--
-- // Process custom pools.
-- {
-- VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
-- for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
-- {
-- VmaBlockVector& blockVector = pool->m_BlockVector;
-- const uint32_t memTypeIndex = blockVector.GetMemoryTypeIndex();
-- blockVector.AddDetailedStatistics(pStats->memoryType[memTypeIndex]);
-- pool->m_DedicatedAllocations.AddDetailedStatistics(pStats->memoryType[memTypeIndex]);
-- }
-- }
--
-- // Process dedicated allocations.
-- for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-- {
-- m_DedicatedAllocations[memTypeIndex].AddDetailedStatistics(pStats->memoryType[memTypeIndex]);
-- }
--
-- // Sum from memory types to memory heaps.
-- for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-- {
-- const uint32_t memHeapIndex = m_MemProps.memoryTypes[memTypeIndex].heapIndex;
-- VmaAddDetailedStatistics(pStats->memoryHeap[memHeapIndex], pStats->memoryType[memTypeIndex]);
-- }
--
-- // Sum from memory heaps to total.
-- for(uint32_t memHeapIndex = 0; memHeapIndex < GetMemoryHeapCount(); ++memHeapIndex)
-- VmaAddDetailedStatistics(pStats->total, pStats->memoryHeap[memHeapIndex]);
--
-- VMA_ASSERT(pStats->total.statistics.allocationCount == 0 ||
-- pStats->total.allocationSizeMax >= pStats->total.allocationSizeMin);
-- VMA_ASSERT(pStats->total.unusedRangeCount == 0 ||
-- pStats->total.unusedRangeSizeMax >= pStats->total.unusedRangeSizeMin);
--}
--
--void VmaAllocator_T::GetHeapBudgets(VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount)
--{
--#if VMA_MEMORY_BUDGET
-- if(m_UseExtMemoryBudget)
-- {
-- if(m_Budget.m_OperationsSinceBudgetFetch < 30)
-- {
-- VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);
-- for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets)
-- {
-- const uint32_t heapIndex = firstHeap + i;
--
-- outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex];
-- outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex];
-- outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex];
-- outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
--
-- if(m_Budget.m_VulkanUsage[heapIndex] + outBudgets->statistics.blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])
-- {
-- outBudgets->usage = m_Budget.m_VulkanUsage[heapIndex] +
-- outBudgets->statistics.blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
-- }
-- else
-- {
-- outBudgets->usage = 0;
-- }
--
-- // Have to take MIN with heap size because explicit HeapSizeLimit is included in it.
-- outBudgets->budget = VMA_MIN(
-- m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);
-- }
-- }
-- else
-- {
-- UpdateVulkanBudget(); // Outside of mutex lock
-- GetHeapBudgets(outBudgets, firstHeap, heapCount); // Recursion
-- }
-- }
-- else
--#endif
-- {
-- for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets)
-- {
-- const uint32_t heapIndex = firstHeap + i;
--
-- outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex];
-- outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex];
-- outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex];
-- outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
--
-- outBudgets->usage = outBudgets->statistics.blockBytes;
-- outBudgets->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
-- }
-- }
--}
--
--void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
--{
-- pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
-- pAllocationInfo->deviceMemory = hAllocation->GetMemory();
-- pAllocationInfo->offset = hAllocation->GetOffset();
-- pAllocationInfo->size = hAllocation->GetSize();
-- pAllocationInfo->pMappedData = hAllocation->GetMappedData();
-- pAllocationInfo->pUserData = hAllocation->GetUserData();
-- pAllocationInfo->pName = hAllocation->GetName();
--}
--
--VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
--{
-- VMA_DEBUG_LOG_FORMAT(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
--
-- VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
--
-- // Protection against uninitialized new structure member. If garbage data are left there, this pointer dereference would crash.
-- if(pCreateInfo->pMemoryAllocateNext)
-- {
-- VMA_ASSERT(((const VkBaseInStructure*)pCreateInfo->pMemoryAllocateNext)->sType != 0);
-- }
--
-- if(newCreateInfo.maxBlockCount == 0)
-- {
-- newCreateInfo.maxBlockCount = SIZE_MAX;
-- }
-- if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
-- {
-- return VK_ERROR_INITIALIZATION_FAILED;
-- }
-- // Memory type index out of range or forbidden.
-- if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() ||
-- ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0)
-- {
-- return VK_ERROR_FEATURE_NOT_PRESENT;
-- }
-- if(newCreateInfo.minAllocationAlignment > 0)
-- {
-- VMA_ASSERT(VmaIsPow2(newCreateInfo.minAllocationAlignment));
-- }
--
-- const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
--
-- *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
--
-- VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
-- if(res != VK_SUCCESS)
-- {
-- vma_delete(this, *pPool);
-- *pPool = VMA_NULL;
-- return res;
-- }
--
-- // Add to m_Pools.
-- {
-- VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
-- (*pPool)->SetId(m_NextPoolId++);
-- m_Pools.PushBack(*pPool);
-- }
--
-- return VK_SUCCESS;
--}
--
--void VmaAllocator_T::DestroyPool(VmaPool pool)
--{
-- // Remove from m_Pools.
-- {
-- VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
-- m_Pools.Remove(pool);
-- }
--
-- vma_delete(this, pool);
--}
--
--void VmaAllocator_T::GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats)
--{
-- VmaClearStatistics(*pPoolStats);
-- pool->m_BlockVector.AddStatistics(*pPoolStats);
-- pool->m_DedicatedAllocations.AddStatistics(*pPoolStats);
--}
--
--void VmaAllocator_T::CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats)
--{
-- VmaClearDetailedStatistics(*pPoolStats);
-- pool->m_BlockVector.AddDetailedStatistics(*pPoolStats);
-- pool->m_DedicatedAllocations.AddDetailedStatistics(*pPoolStats);
--}
--
--void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
--{
-- m_CurrentFrameIndex.store(frameIndex);
--
--#if VMA_MEMORY_BUDGET
-- if(m_UseExtMemoryBudget)
-- {
-- UpdateVulkanBudget();
-- }
--#endif // #if VMA_MEMORY_BUDGET
--}
--
--VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
--{
-- return hPool->m_BlockVector.CheckCorruption();
--}
--
--VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
--{
-- VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
--
-- // Process default pools.
-- for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-- {
-- VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
-- if(pBlockVector != VMA_NULL)
-- {
-- VkResult localRes = pBlockVector->CheckCorruption();
-- switch(localRes)
-- {
-- case VK_ERROR_FEATURE_NOT_PRESENT:
-- break;
-- case VK_SUCCESS:
-- finalRes = VK_SUCCESS;
-- break;
-- default:
-- return localRes;
-- }
-- }
-- }
--
-- // Process custom pools.
-- {
-- VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
-- for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
-- {
-- if(((1u << pool->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
-- {
-- VkResult localRes = pool->m_BlockVector.CheckCorruption();
-- switch(localRes)
-- {
-- case VK_ERROR_FEATURE_NOT_PRESENT:
-- break;
-- case VK_SUCCESS:
-- finalRes = VK_SUCCESS;
-- break;
-- default:
-- return localRes;
-- }
-- }
-- }
-- }
--
-- return finalRes;
--}
--
--VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
--{
-- AtomicTransactionalIncrement<VMA_ATOMIC_UINT32> deviceMemoryCountIncrement;
-- const uint64_t prevDeviceMemoryCount = deviceMemoryCountIncrement.Increment(&m_DeviceMemoryCount);
--#if VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT
-- if(prevDeviceMemoryCount >= m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount)
-- {
-- return VK_ERROR_TOO_MANY_OBJECTS;
-- }
--#endif
--
-- const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
--
-- // HeapSizeLimit is in effect for this heap.
-- if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0)
-- {
-- const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
-- VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];
-- for(;;)
-- {
-- const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;
-- if(blockBytesAfterAllocation > heapSize)
-- {
-- return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-- }
-- if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation))
-- {
-- break;
-- }
-- }
-- }
-- else
-- {
-- m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;
-- }
-- ++m_Budget.m_BlockCount[heapIndex];
--
-- // VULKAN CALL vkAllocateMemory.
-- VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
--
-- if(res == VK_SUCCESS)
-- {
--#if VMA_MEMORY_BUDGET
-- ++m_Budget.m_OperationsSinceBudgetFetch;
--#endif
--
-- // Informative callback.
-- if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
-- {
-- (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData);
-- }
--
-- deviceMemoryCountIncrement.Commit();
-- }
-- else
-- {
-- --m_Budget.m_BlockCount[heapIndex];
-- m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;
-- }
--
-- return res;
--}
--
--void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
--{
-- // Informative callback.
-- if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
-- {
-- (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData);
-- }
--
-- // VULKAN CALL vkFreeMemory.
-- (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
--
-- const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
-- --m_Budget.m_BlockCount[heapIndex];
-- m_Budget.m_BlockBytes[heapIndex] -= size;
--
-- --m_DeviceMemoryCount;
--}
--
--VkResult VmaAllocator_T::BindVulkanBuffer(
-- VkDeviceMemory memory,
-- VkDeviceSize memoryOffset,
-- VkBuffer buffer,
-- const void* pNext)
--{
-- if(pNext != VMA_NULL)
-- {
--#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
-- if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
-- m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
-- {
-- VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
-- bindBufferMemoryInfo.pNext = pNext;
-- bindBufferMemoryInfo.buffer = buffer;
-- bindBufferMemoryInfo.memory = memory;
-- bindBufferMemoryInfo.memoryOffset = memoryOffset;
-- return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
-- }
-- else
--#endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
-- {
-- return VK_ERROR_EXTENSION_NOT_PRESENT;
-- }
-- }
-- else
-- {
-- return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
-- }
--}
--
--VkResult VmaAllocator_T::BindVulkanImage(
-- VkDeviceMemory memory,
-- VkDeviceSize memoryOffset,
-- VkImage image,
-- const void* pNext)
--{
-- if(pNext != VMA_NULL)
-- {
--#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
-- if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
-- m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
-- {
-- VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
-- bindBufferMemoryInfo.pNext = pNext;
-- bindBufferMemoryInfo.image = image;
-- bindBufferMemoryInfo.memory = memory;
-- bindBufferMemoryInfo.memoryOffset = memoryOffset;
-- return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
-- }
-- else
--#endif // #if VMA_BIND_MEMORY2
-- {
-- return VK_ERROR_EXTENSION_NOT_PRESENT;
-- }
-- }
-- else
-- {
-- return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
-- }
--}
--
--VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
--{
-- switch(hAllocation->GetType())
-- {
-- case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
-- {
-- VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
-- char *pBytes = VMA_NULL;
-- VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
-- if(res == VK_SUCCESS)
-- {
-- *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
-- hAllocation->BlockAllocMap();
-- }
-- return res;
-- }
-- case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
-- return hAllocation->DedicatedAllocMap(this, ppData);
-- default:
-- VMA_ASSERT(0);
-- return VK_ERROR_MEMORY_MAP_FAILED;
-- }
--}
--
--void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
--{
-- switch(hAllocation->GetType())
-- {
-- case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
-- {
-- VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
-- hAllocation->BlockAllocUnmap();
-- pBlock->Unmap(this, 1);
-- }
-- break;
-- case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
-- hAllocation->DedicatedAllocUnmap(this);
-- break;
-- default:
-- VMA_ASSERT(0);
-- }
--}
--
--VkResult VmaAllocator_T::BindBufferMemory(
-- VmaAllocation hAllocation,
-- VkDeviceSize allocationLocalOffset,
-- VkBuffer hBuffer,
-- const void* pNext)
--{
-- VkResult res = VK_ERROR_UNKNOWN;
-- switch(hAllocation->GetType())
-- {
-- case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
-- res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
-- break;
-- case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
-- {
-- VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
-- VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block.");
-- res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
-- break;
-- }
-- default:
-- VMA_ASSERT(0);
-- }
-- return res;
--}
--
--VkResult VmaAllocator_T::BindImageMemory(
-- VmaAllocation hAllocation,
-- VkDeviceSize allocationLocalOffset,
-- VkImage hImage,
-- const void* pNext)
--{
-- VkResult res = VK_ERROR_UNKNOWN;
-- switch(hAllocation->GetType())
-- {
-- case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
-- res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
-- break;
-- case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
-- {
-- VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
-- VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block.");
-- res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
-- break;
-- }
-- default:
-- VMA_ASSERT(0);
-- }
-- return res;
--}
--
--VkResult VmaAllocator_T::FlushOrInvalidateAllocation(
-- VmaAllocation hAllocation,
-- VkDeviceSize offset, VkDeviceSize size,
-- VMA_CACHE_OPERATION op)
--{
-- VkResult res = VK_SUCCESS;
--
-- VkMappedMemoryRange memRange = {};
-- if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange))
-- {
-- switch(op)
-- {
-- case VMA_CACHE_FLUSH:
-- res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
-- break;
-- case VMA_CACHE_INVALIDATE:
-- res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
-- break;
-- default:
-- VMA_ASSERT(0);
-- }
-- }
-- // else: Just ignore this call.
-- return res;
--}
--
--VkResult VmaAllocator_T::FlushOrInvalidateAllocations(
-- uint32_t allocationCount,
-- const VmaAllocation* allocations,
-- const VkDeviceSize* offsets, const VkDeviceSize* sizes,
-- VMA_CACHE_OPERATION op)
--{
-- typedef VmaStlAllocator<VkMappedMemoryRange> RangeAllocator;
-- typedef VmaSmallVector<VkMappedMemoryRange, RangeAllocator, 16> RangeVector;
-- RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks()));
--
-- for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
-- {
-- const VmaAllocation alloc = allocations[allocIndex];
-- const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0;
-- const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE;
-- VkMappedMemoryRange newRange;
-- if(GetFlushOrInvalidateRange(alloc, offset, size, newRange))
-- {
-- ranges.push_back(newRange);
-- }
-- }
--
-- VkResult res = VK_SUCCESS;
-- if(!ranges.empty())
-- {
-- switch(op)
-- {
-- case VMA_CACHE_FLUSH:
-- res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
-- break;
-- case VMA_CACHE_INVALIDATE:
-- res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
-- break;
-- default:
-- VMA_ASSERT(0);
-- }
-- }
-- // else: Just ignore this call.
-- return res;
--}
--
--void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
--{
-- VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
--
-- const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
-- VmaPool parentPool = allocation->GetParentPool();
-- if(parentPool == VK_NULL_HANDLE)
-- {
-- // Default pool
-- m_DedicatedAllocations[memTypeIndex].Unregister(allocation);
-- }
-- else
-- {
-- // Custom pool
-- parentPool->m_DedicatedAllocations.Unregister(allocation);
-- }
--
-- VkDeviceMemory hMemory = allocation->GetMemory();
--
-- /*
-- There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
-- before vkFreeMemory.
--
-- if(allocation->GetMappedData() != VMA_NULL)
-- {
-- (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
-- }
-- */
--
-- FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
--
-- m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());
-- m_AllocationObjectAllocator.Free(allocation);
--
-- VMA_DEBUG_LOG_FORMAT(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
--}
--
--uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
--{
-- VkBufferCreateInfo dummyBufCreateInfo;
-- VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
--
-- uint32_t memoryTypeBits = 0;
--
-- // Create buffer.
-- VkBuffer buf = VK_NULL_HANDLE;
-- VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
-- m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
-- if(res == VK_SUCCESS)
-- {
-- // Query for supported memory types.
-- VkMemoryRequirements memReq;
-- (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
-- memoryTypeBits = memReq.memoryTypeBits;
--
-- // Destroy buffer.
-- (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
-- }
--
-- return memoryTypeBits;
--}
--
--uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const
--{
-- // Make sure memory information is already fetched.
-- VMA_ASSERT(GetMemoryTypeCount() > 0);
--
-- uint32_t memoryTypeBits = UINT32_MAX;
--
-- if(!m_UseAmdDeviceCoherentMemory)
-- {
-- // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD.
-- for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-- {
-- if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
-- {
-- memoryTypeBits &= ~(1u << memTypeIndex);
-- }
-- }
-- }
--
-- return memoryTypeBits;
--}
--
--bool VmaAllocator_T::GetFlushOrInvalidateRange(
-- VmaAllocation allocation,
-- VkDeviceSize offset, VkDeviceSize size,
-- VkMappedMemoryRange& outRange) const
--{
-- const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
-- if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
-- {
-- const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
-- const VkDeviceSize allocationSize = allocation->GetSize();
-- VMA_ASSERT(offset <= allocationSize);
--
-- outRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
-- outRange.pNext = VMA_NULL;
-- outRange.memory = allocation->GetMemory();
--
-- switch(allocation->GetType())
-- {
-- case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
-- outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
-- if(size == VK_WHOLE_SIZE)
-- {
-- outRange.size = allocationSize - outRange.offset;
-- }
-- else
-- {
-- VMA_ASSERT(offset + size <= allocationSize);
-- outRange.size = VMA_MIN(
-- VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize),
-- allocationSize - outRange.offset);
-- }
-- break;
-- case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
-- {
-- // 1. Still within this allocation.
-- outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
-- if(size == VK_WHOLE_SIZE)
-- {
-- size = allocationSize - offset;
-- }
-- else
-- {
-- VMA_ASSERT(offset + size <= allocationSize);
-- }
-- outRange.size = VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize);
--
-- // 2. Adjust to whole block.
-- const VkDeviceSize allocationOffset = allocation->GetOffset();
-- VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
-- const VkDeviceSize blockSize = allocation->GetBlock()->m_pMetadata->GetSize();
-- outRange.offset += allocationOffset;
-- outRange.size = VMA_MIN(outRange.size, blockSize - outRange.offset);
--
-- break;
-- }
-- default:
-- VMA_ASSERT(0);
-- }
-- return true;
-- }
-- return false;
--}
--
--#if VMA_MEMORY_BUDGET
--void VmaAllocator_T::UpdateVulkanBudget()
--{
-- VMA_ASSERT(m_UseExtMemoryBudget);
--
-- VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };
--
-- VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
-- VmaPnextChainPushFront(&memProps, &budgetProps);
--
-- GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
--
-- {
-- VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);
--
-- for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
-- {
-- m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
-- m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
-- m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
--
-- // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size.
-- if(m_Budget.m_VulkanBudget[heapIndex] == 0)
-- {
-- m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
-- }
-- else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size)
-- {
-- m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size;
-- }
-- if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0)
-- {
-- m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
-- }
-- }
-- m_Budget.m_OperationsSinceBudgetFetch = 0;
-- }
--}
--#endif // VMA_MEMORY_BUDGET
--
--void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
--{
-- if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
-- hAllocation->IsMappingAllowed() &&
-- (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
-- {
-- void* pData = VMA_NULL;
-- VkResult res = Map(hAllocation, &pData);
-- if(res == VK_SUCCESS)
-- {
-- memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
-- FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
-- Unmap(hAllocation);
-- }
-- else
-- {
-- VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
-- }
-- }
--}
--
--uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
--{
-- uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
-- if(memoryTypeBits == UINT32_MAX)
-- {
-- memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
-- m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
-- }
-- return memoryTypeBits;
--}
--
--#if VMA_STATS_STRING_ENABLED
--void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
--{
-- json.WriteString("DefaultPools");
-- json.BeginObject();
-- {
-- for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-- {
-- VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex];
-- VmaDedicatedAllocationList& dedicatedAllocList = m_DedicatedAllocations[memTypeIndex];
-- if (pBlockVector != VMA_NULL)
-- {
-- json.BeginString("Type ");
-- json.ContinueString(memTypeIndex);
-- json.EndString();
-- json.BeginObject();
-- {
-- json.WriteString("PreferredBlockSize");
-- json.WriteNumber(pBlockVector->GetPreferredBlockSize());
--
-- json.WriteString("Blocks");
-- pBlockVector->PrintDetailedMap(json);
--
-- json.WriteString("DedicatedAllocations");
-- dedicatedAllocList.BuildStatsString(json);
-- }
-- json.EndObject();
-- }
-- }
-- }
-- json.EndObject();
--
-- json.WriteString("CustomPools");
-- json.BeginObject();
-- {
-- VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
-- if (!m_Pools.IsEmpty())
-- {
-- for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-- {
-- bool displayType = true;
-- size_t index = 0;
-- for (VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
-- {
-- VmaBlockVector& blockVector = pool->m_BlockVector;
-- if (blockVector.GetMemoryTypeIndex() == memTypeIndex)
-- {
-- if (displayType)
-- {
-- json.BeginString("Type ");
-- json.ContinueString(memTypeIndex);
-- json.EndString();
-- json.BeginArray();
-- displayType = false;
-- }
--
-- json.BeginObject();
-- {
-- json.WriteString("Name");
-- json.BeginString();
-- json.ContinueString((uint64_t)index++);
-- if (pool->GetName())
-- {
-- json.ContinueString(" - ");
-- json.ContinueString(pool->GetName());
-- }
-- json.EndString();
--
-- json.WriteString("PreferredBlockSize");
-- json.WriteNumber(blockVector.GetPreferredBlockSize());
--
-- json.WriteString("Blocks");
-- blockVector.PrintDetailedMap(json);
--
-- json.WriteString("DedicatedAllocations");
-- pool->m_DedicatedAllocations.BuildStatsString(json);
-- }
-- json.EndObject();
-- }
-- }
--
-- if (!displayType)
-- json.EndArray();
-- }
-- }
-- }
-- json.EndObject();
--}
--#endif // VMA_STATS_STRING_ENABLED
--#endif // _VMA_ALLOCATOR_T_FUNCTIONS
--
--
--#ifndef _VMA_PUBLIC_INTERFACE
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
-- const VmaAllocatorCreateInfo* pCreateInfo,
-- VmaAllocator* pAllocator)
--{
-- VMA_ASSERT(pCreateInfo && pAllocator);
-- VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 ||
-- (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 3));
-- VMA_DEBUG_LOG("vmaCreateAllocator");
-- *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
-- VkResult result = (*pAllocator)->Init(pCreateInfo);
-- if(result < 0)
-- {
-- vma_delete(pCreateInfo->pAllocationCallbacks, *pAllocator);
-- *pAllocator = VK_NULL_HANDLE;
-- }
-- return result;
--}
--
--VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
-- VmaAllocator allocator)
--{
-- if(allocator != VK_NULL_HANDLE)
-- {
-- VMA_DEBUG_LOG("vmaDestroyAllocator");
-- VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks; // Have to copy the callbacks when destroying.
-- vma_delete(&allocationCallbacks, allocator);
-- }
--}
--
--VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo)
--{
-- VMA_ASSERT(allocator && pAllocatorInfo);
-- pAllocatorInfo->instance = allocator->m_hInstance;
-- pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice();
-- pAllocatorInfo->device = allocator->m_hDevice;
--}
--
--VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
-- VmaAllocator allocator,
-- const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
--{
-- VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
-- *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
--}
--
--VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
-- VmaAllocator allocator,
-- const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
--{
-- VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
-- *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
--}
--
--VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
-- VmaAllocator allocator,
-- uint32_t memoryTypeIndex,
-- VkMemoryPropertyFlags* pFlags)
--{
-- VMA_ASSERT(allocator && pFlags);
-- VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
-- *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
--}
--
--VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
-- VmaAllocator allocator,
-- uint32_t frameIndex)
--{
-- VMA_ASSERT(allocator);
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- allocator->SetCurrentFrameIndex(frameIndex);
--}
--
--VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics(
-- VmaAllocator allocator,
-- VmaTotalStatistics* pStats)
--{
-- VMA_ASSERT(allocator && pStats);
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
-- allocator->CalculateStatistics(pStats);
--}
--
--VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets(
-- VmaAllocator allocator,
-- VmaBudget* pBudgets)
--{
-- VMA_ASSERT(allocator && pBudgets);
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
-- allocator->GetHeapBudgets(pBudgets, 0, allocator->GetMemoryHeapCount());
--}
--
--#if VMA_STATS_STRING_ENABLED
--
--VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
-- VmaAllocator allocator,
-- char** ppStatsString,
-- VkBool32 detailedMap)
--{
-- VMA_ASSERT(allocator && ppStatsString);
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- VmaStringBuilder sb(allocator->GetAllocationCallbacks());
-- {
-- VmaBudget budgets[VK_MAX_MEMORY_HEAPS];
-- allocator->GetHeapBudgets(budgets, 0, allocator->GetMemoryHeapCount());
--
-- VmaTotalStatistics stats;
-- allocator->CalculateStatistics(&stats);
--
-- VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
-- json.BeginObject();
-- {
-- json.WriteString("General");
-- json.BeginObject();
-- {
-- const VkPhysicalDeviceProperties& deviceProperties = allocator->m_PhysicalDeviceProperties;
-- const VkPhysicalDeviceMemoryProperties& memoryProperties = allocator->m_MemProps;
--
-- json.WriteString("API");
-- json.WriteString("Vulkan");
--
-- json.WriteString("apiVersion");
-- json.BeginString();
-- json.ContinueString(VK_VERSION_MAJOR(deviceProperties.apiVersion));
-- json.ContinueString(".");
-- json.ContinueString(VK_VERSION_MINOR(deviceProperties.apiVersion));
-- json.ContinueString(".");
-- json.ContinueString(VK_VERSION_PATCH(deviceProperties.apiVersion));
-- json.EndString();
--
-- json.WriteString("GPU");
-- json.WriteString(deviceProperties.deviceName);
-- json.WriteString("deviceType");
-- json.WriteNumber(static_cast<uint32_t>(deviceProperties.deviceType));
--
-- json.WriteString("maxMemoryAllocationCount");
-- json.WriteNumber(deviceProperties.limits.maxMemoryAllocationCount);
-- json.WriteString("bufferImageGranularity");
-- json.WriteNumber(deviceProperties.limits.bufferImageGranularity);
-- json.WriteString("nonCoherentAtomSize");
-- json.WriteNumber(deviceProperties.limits.nonCoherentAtomSize);
--
-- json.WriteString("memoryHeapCount");
-- json.WriteNumber(memoryProperties.memoryHeapCount);
-- json.WriteString("memoryTypeCount");
-- json.WriteNumber(memoryProperties.memoryTypeCount);
-- }
-- json.EndObject();
-- }
-- {
-- json.WriteString("Total");
-- VmaPrintDetailedStatistics(json, stats.total);
-- }
-- {
-- json.WriteString("MemoryInfo");
-- json.BeginObject();
-- {
-- for (uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
-- {
-- json.BeginString("Heap ");
-- json.ContinueString(heapIndex);
-- json.EndString();
-- json.BeginObject();
-- {
-- const VkMemoryHeap& heapInfo = allocator->m_MemProps.memoryHeaps[heapIndex];
-- json.WriteString("Flags");
-- json.BeginArray(true);
-- {
-- if (heapInfo.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT)
-- json.WriteString("DEVICE_LOCAL");
-- #if VMA_VULKAN_VERSION >= 1001000
-- if (heapInfo.flags & VK_MEMORY_HEAP_MULTI_INSTANCE_BIT)
-- json.WriteString("MULTI_INSTANCE");
-- #endif
--
-- VkMemoryHeapFlags flags = heapInfo.flags &
-- ~(VK_MEMORY_HEAP_DEVICE_LOCAL_BIT
-- #if VMA_VULKAN_VERSION >= 1001000
-- | VK_MEMORY_HEAP_MULTI_INSTANCE_BIT
-- #endif
-- );
-- if (flags != 0)
-- json.WriteNumber(flags);
-- }
-- json.EndArray();
--
-- json.WriteString("Size");
-- json.WriteNumber(heapInfo.size);
--
-- json.WriteString("Budget");
-- json.BeginObject();
-- {
-- json.WriteString("BudgetBytes");
-- json.WriteNumber(budgets[heapIndex].budget);
-- json.WriteString("UsageBytes");
-- json.WriteNumber(budgets[heapIndex].usage);
-- }
-- json.EndObject();
--
-- json.WriteString("Stats");
-- VmaPrintDetailedStatistics(json, stats.memoryHeap[heapIndex]);
--
-- json.WriteString("MemoryPools");
-- json.BeginObject();
-- {
-- for (uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
-- {
-- if (allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
-- {
-- json.BeginString("Type ");
-- json.ContinueString(typeIndex);
-- json.EndString();
-- json.BeginObject();
-- {
-- json.WriteString("Flags");
-- json.BeginArray(true);
-- {
-- VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
-- if (flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
-- json.WriteString("DEVICE_LOCAL");
-- if (flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
-- json.WriteString("HOST_VISIBLE");
-- if (flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)
-- json.WriteString("HOST_COHERENT");
-- if (flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT)
-- json.WriteString("HOST_CACHED");
-- if (flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT)
-- json.WriteString("LAZILY_ALLOCATED");
-- #if VMA_VULKAN_VERSION >= 1001000
-- if (flags & VK_MEMORY_PROPERTY_PROTECTED_BIT)
-- json.WriteString("PROTECTED");
-- #endif
-- #if VK_AMD_device_coherent_memory
-- if (flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY)
-- json.WriteString("DEVICE_COHERENT_AMD");
-- if (flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)
-- json.WriteString("DEVICE_UNCACHED_AMD");
-- #endif
--
-- flags &= ~(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
-- #if VMA_VULKAN_VERSION >= 1001000
-- | VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT
-- #endif
-- #if VK_AMD_device_coherent_memory
-- | VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY
-- | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY
-- #endif
-- | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
-- | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
-- | VK_MEMORY_PROPERTY_HOST_CACHED_BIT);
-- if (flags != 0)
-- json.WriteNumber(flags);
-- }
-- json.EndArray();
--
-- json.WriteString("Stats");
-- VmaPrintDetailedStatistics(json, stats.memoryType[typeIndex]);
-- }
-- json.EndObject();
-- }
-- }
--
-- }
-- json.EndObject();
-- }
-- json.EndObject();
-- }
-- }
-- json.EndObject();
-- }
--
-- if (detailedMap == VK_TRUE)
-- allocator->PrintDetailedMap(json);
--
-- json.EndObject();
-- }
--
-- *ppStatsString = VmaCreateStringCopy(allocator->GetAllocationCallbacks(), sb.GetData(), sb.GetLength());
--}
--
--VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
-- VmaAllocator allocator,
-- char* pStatsString)
--{
-- if(pStatsString != VMA_NULL)
-- {
-- VMA_ASSERT(allocator);
-- VmaFreeString(allocator->GetAllocationCallbacks(), pStatsString);
-- }
--}
--
--#endif // VMA_STATS_STRING_ENABLED
--
--/*
--This function is not protected by any mutex because it just reads immutable data.
--*/
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
-- VmaAllocator allocator,
-- uint32_t memoryTypeBits,
-- const VmaAllocationCreateInfo* pAllocationCreateInfo,
-- uint32_t* pMemoryTypeIndex)
--{
-- VMA_ASSERT(allocator != VK_NULL_HANDLE);
-- VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
-- VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
--
-- return allocator->FindMemoryTypeIndex(memoryTypeBits, pAllocationCreateInfo, UINT32_MAX, pMemoryTypeIndex);
--}
--
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
-- VmaAllocator allocator,
-- const VkBufferCreateInfo* pBufferCreateInfo,
-- const VmaAllocationCreateInfo* pAllocationCreateInfo,
-- uint32_t* pMemoryTypeIndex)
--{
-- VMA_ASSERT(allocator != VK_NULL_HANDLE);
-- VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
-- VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
-- VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
--
-- const VkDevice hDev = allocator->m_hDevice;
-- const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions();
-- VkResult res;
--
--#if VMA_VULKAN_VERSION >= 1003000
-- if(funcs->vkGetDeviceBufferMemoryRequirements)
-- {
-- // Can query straight from VkBufferCreateInfo :)
-- VkDeviceBufferMemoryRequirements devBufMemReq = {VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS};
-- devBufMemReq.pCreateInfo = pBufferCreateInfo;
--
-- VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2};
-- (*funcs->vkGetDeviceBufferMemoryRequirements)(hDev, &devBufMemReq, &memReq);
--
-- res = allocator->FindMemoryTypeIndex(
-- memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo, pBufferCreateInfo->usage, pMemoryTypeIndex);
-- }
-- else
--#endif // #if VMA_VULKAN_VERSION >= 1003000
-- {
-- // Must create a dummy buffer to query :(
-- VkBuffer hBuffer = VK_NULL_HANDLE;
-- res = funcs->vkCreateBuffer(
-- hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
-- if(res == VK_SUCCESS)
-- {
-- VkMemoryRequirements memReq = {};
-- funcs->vkGetBufferMemoryRequirements(hDev, hBuffer, &memReq);
--
-- res = allocator->FindMemoryTypeIndex(
-- memReq.memoryTypeBits, pAllocationCreateInfo, pBufferCreateInfo->usage, pMemoryTypeIndex);
--
-- funcs->vkDestroyBuffer(
-- hDev, hBuffer, allocator->GetAllocationCallbacks());
-- }
-- }
-- return res;
--}
--
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
-- VmaAllocator allocator,
-- const VkImageCreateInfo* pImageCreateInfo,
-- const VmaAllocationCreateInfo* pAllocationCreateInfo,
-- uint32_t* pMemoryTypeIndex)
--{
-- VMA_ASSERT(allocator != VK_NULL_HANDLE);
-- VMA_ASSERT(pImageCreateInfo != VMA_NULL);
-- VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
-- VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
--
-- const VkDevice hDev = allocator->m_hDevice;
-- const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions();
-- VkResult res;
--
--#if VMA_VULKAN_VERSION >= 1003000
-- if(funcs->vkGetDeviceImageMemoryRequirements)
-- {
-- // Can query straight from VkImageCreateInfo :)
-- VkDeviceImageMemoryRequirements devImgMemReq = {VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS};
-- devImgMemReq.pCreateInfo = pImageCreateInfo;
-- VMA_ASSERT(pImageCreateInfo->tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY && (pImageCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT_COPY) == 0 &&
-- "Cannot use this VkImageCreateInfo with vmaFindMemoryTypeIndexForImageInfo as I don't know what to pass as VkDeviceImageMemoryRequirements::planeAspect.");
--
-- VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2};
-- (*funcs->vkGetDeviceImageMemoryRequirements)(hDev, &devImgMemReq, &memReq);
--
-- res = allocator->FindMemoryTypeIndex(
-- memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo, pImageCreateInfo->usage, pMemoryTypeIndex);
-- }
-- else
--#endif // #if VMA_VULKAN_VERSION >= 1003000
-- {
-- // Must create a dummy image to query :(
-- VkImage hImage = VK_NULL_HANDLE;
-- res = funcs->vkCreateImage(
-- hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
-- if(res == VK_SUCCESS)
-- {
-- VkMemoryRequirements memReq = {};
-- funcs->vkGetImageMemoryRequirements(hDev, hImage, &memReq);
--
-- res = allocator->FindMemoryTypeIndex(
-- memReq.memoryTypeBits, pAllocationCreateInfo, pImageCreateInfo->usage, pMemoryTypeIndex);
--
-- funcs->vkDestroyImage(
-- hDev, hImage, allocator->GetAllocationCallbacks());
-- }
-- }
-- return res;
--}
--
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
-- VmaAllocator allocator,
-- const VmaPoolCreateInfo* pCreateInfo,
-- VmaPool* pPool)
--{
-- VMA_ASSERT(allocator && pCreateInfo && pPool);
--
-- VMA_DEBUG_LOG("vmaCreatePool");
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- return allocator->CreatePool(pCreateInfo, pPool);
--}
--
--VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
-- VmaAllocator allocator,
-- VmaPool pool)
--{
-- VMA_ASSERT(allocator);
--
-- if(pool == VK_NULL_HANDLE)
-- {
-- return;
-- }
--
-- VMA_DEBUG_LOG("vmaDestroyPool");
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- allocator->DestroyPool(pool);
--}
--
--VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics(
-- VmaAllocator allocator,
-- VmaPool pool,
-- VmaStatistics* pPoolStats)
--{
-- VMA_ASSERT(allocator && pool && pPoolStats);
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- allocator->GetPoolStatistics(pool, pPoolStats);
--}
--
--VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics(
-- VmaAllocator allocator,
-- VmaPool pool,
-- VmaDetailedStatistics* pPoolStats)
--{
-- VMA_ASSERT(allocator && pool && pPoolStats);
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- allocator->CalculatePoolStatistics(pool, pPoolStats);
--}
--
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
--{
-- VMA_ASSERT(allocator && pool);
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- VMA_DEBUG_LOG("vmaCheckPoolCorruption");
--
-- return allocator->CheckPoolCorruption(pool);
--}
--
--VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
-- VmaAllocator allocator,
-- VmaPool pool,
-- const char** ppName)
--{
-- VMA_ASSERT(allocator && pool && ppName);
--
-- VMA_DEBUG_LOG("vmaGetPoolName");
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- *ppName = pool->GetName();
--}
--
--VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
-- VmaAllocator allocator,
-- VmaPool pool,
-- const char* pName)
--{
-- VMA_ASSERT(allocator && pool);
--
-- VMA_DEBUG_LOG("vmaSetPoolName");
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- pool->SetName(pName);
--}
--
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
-- VmaAllocator allocator,
-- const VkMemoryRequirements* pVkMemoryRequirements,
-- const VmaAllocationCreateInfo* pCreateInfo,
-- VmaAllocation* pAllocation,
-- VmaAllocationInfo* pAllocationInfo)
--{
-- VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
--
-- VMA_DEBUG_LOG("vmaAllocateMemory");
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- VkResult result = allocator->AllocateMemory(
-- *pVkMemoryRequirements,
-- false, // requiresDedicatedAllocation
-- false, // prefersDedicatedAllocation
-- VK_NULL_HANDLE, // dedicatedBuffer
-- VK_NULL_HANDLE, // dedicatedImage
-- UINT32_MAX, // dedicatedBufferImageUsage
-- *pCreateInfo,
-- VMA_SUBALLOCATION_TYPE_UNKNOWN,
-- 1, // allocationCount
-- pAllocation);
--
-- if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
-- {
-- allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
-- }
--
-- return result;
--}
--
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
-- VmaAllocator allocator,
-- const VkMemoryRequirements* pVkMemoryRequirements,
-- const VmaAllocationCreateInfo* pCreateInfo,
-- size_t allocationCount,
-- VmaAllocation* pAllocations,
-- VmaAllocationInfo* pAllocationInfo)
--{
-- if(allocationCount == 0)
-- {
-- return VK_SUCCESS;
-- }
--
-- VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
--
-- VMA_DEBUG_LOG("vmaAllocateMemoryPages");
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- VkResult result = allocator->AllocateMemory(
-- *pVkMemoryRequirements,
-- false, // requiresDedicatedAllocation
-- false, // prefersDedicatedAllocation
-- VK_NULL_HANDLE, // dedicatedBuffer
-- VK_NULL_HANDLE, // dedicatedImage
-- UINT32_MAX, // dedicatedBufferImageUsage
-- *pCreateInfo,
-- VMA_SUBALLOCATION_TYPE_UNKNOWN,
-- allocationCount,
-- pAllocations);
--
-- if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
-- {
-- for(size_t i = 0; i < allocationCount; ++i)
-- {
-- allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
-- }
-- }
--
-- return result;
--}
--
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
-- VmaAllocator allocator,
-- VkBuffer buffer,
-- const VmaAllocationCreateInfo* pCreateInfo,
-- VmaAllocation* pAllocation,
-- VmaAllocationInfo* pAllocationInfo)
--{
-- VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
--
-- VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- VkMemoryRequirements vkMemReq = {};
-- bool requiresDedicatedAllocation = false;
-- bool prefersDedicatedAllocation = false;
-- allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
-- requiresDedicatedAllocation,
-- prefersDedicatedAllocation);
--
-- VkResult result = allocator->AllocateMemory(
-- vkMemReq,
-- requiresDedicatedAllocation,
-- prefersDedicatedAllocation,
-- buffer, // dedicatedBuffer
-- VK_NULL_HANDLE, // dedicatedImage
-- UINT32_MAX, // dedicatedBufferImageUsage
-- *pCreateInfo,
-- VMA_SUBALLOCATION_TYPE_BUFFER,
-- 1, // allocationCount
-- pAllocation);
--
-- if(pAllocationInfo && result == VK_SUCCESS)
-- {
-- allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
-- }
--
-- return result;
--}
--
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
-- VmaAllocator allocator,
-- VkImage image,
-- const VmaAllocationCreateInfo* pCreateInfo,
-- VmaAllocation* pAllocation,
-- VmaAllocationInfo* pAllocationInfo)
--{
-- VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
--
-- VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- VkMemoryRequirements vkMemReq = {};
-- bool requiresDedicatedAllocation = false;
-- bool prefersDedicatedAllocation = false;
-- allocator->GetImageMemoryRequirements(image, vkMemReq,
-- requiresDedicatedAllocation, prefersDedicatedAllocation);
--
-- VkResult result = allocator->AllocateMemory(
-- vkMemReq,
-- requiresDedicatedAllocation,
-- prefersDedicatedAllocation,
-- VK_NULL_HANDLE, // dedicatedBuffer
-- image, // dedicatedImage
-- UINT32_MAX, // dedicatedBufferImageUsage
-- *pCreateInfo,
-- VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
-- 1, // allocationCount
-- pAllocation);
--
-- if(pAllocationInfo && result == VK_SUCCESS)
-- {
-- allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
-- }
--
-- return result;
--}
--
--VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
-- VmaAllocator allocator,
-- VmaAllocation allocation)
--{
-- VMA_ASSERT(allocator);
--
-- if(allocation == VK_NULL_HANDLE)
-- {
-- return;
-- }
--
-- VMA_DEBUG_LOG("vmaFreeMemory");
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- allocator->FreeMemory(
-- 1, // allocationCount
-- &allocation);
--}
--
--VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
-- VmaAllocator allocator,
-- size_t allocationCount,
-- const VmaAllocation* pAllocations)
--{
-- if(allocationCount == 0)
-- {
-- return;
-- }
--
-- VMA_ASSERT(allocator);
--
-- VMA_DEBUG_LOG("vmaFreeMemoryPages");
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- allocator->FreeMemory(allocationCount, pAllocations);
--}
--
--VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
-- VmaAllocator allocator,
-- VmaAllocation allocation,
-- VmaAllocationInfo* pAllocationInfo)
--{
-- VMA_ASSERT(allocator && allocation && pAllocationInfo);
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- allocator->GetAllocationInfo(allocation, pAllocationInfo);
--}
--
--VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
-- VmaAllocator allocator,
-- VmaAllocation allocation,
-- void* pUserData)
--{
-- VMA_ASSERT(allocator && allocation);
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- allocation->SetUserData(allocator, pUserData);
--}
--
--VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaAllocation VMA_NOT_NULL allocation,
-- const char* VMA_NULLABLE pName)
--{
-- allocation->SetName(allocator, pName);
--}
--
--VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaAllocation VMA_NOT_NULL allocation,
-- VkMemoryPropertyFlags* VMA_NOT_NULL pFlags)
--{
-- VMA_ASSERT(allocator && allocation && pFlags);
-- const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
-- *pFlags = allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
--}
--
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
-- VmaAllocator allocator,
-- VmaAllocation allocation,
-- void** ppData)
--{
-- VMA_ASSERT(allocator && allocation && ppData);
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- return allocator->Map(allocation, ppData);
--}
--
--VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
-- VmaAllocator allocator,
-- VmaAllocation allocation)
--{
-- VMA_ASSERT(allocator && allocation);
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- allocator->Unmap(allocation);
--}
--
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(
-- VmaAllocator allocator,
-- VmaAllocation allocation,
-- VkDeviceSize offset,
-- VkDeviceSize size)
--{
-- VMA_ASSERT(allocator && allocation);
--
-- VMA_DEBUG_LOG("vmaFlushAllocation");
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
--
-- return res;
--}
--
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(
-- VmaAllocator allocator,
-- VmaAllocation allocation,
-- VkDeviceSize offset,
-- VkDeviceSize size)
--{
-- VMA_ASSERT(allocator && allocation);
--
-- VMA_DEBUG_LOG("vmaInvalidateAllocation");
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
--
-- return res;
--}
--
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
-- VmaAllocator allocator,
-- uint32_t allocationCount,
-- const VmaAllocation* allocations,
-- const VkDeviceSize* offsets,
-- const VkDeviceSize* sizes)
--{
-- VMA_ASSERT(allocator);
--
-- if(allocationCount == 0)
-- {
-- return VK_SUCCESS;
-- }
--
-- VMA_ASSERT(allocations);
--
-- VMA_DEBUG_LOG("vmaFlushAllocations");
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_FLUSH);
--
-- return res;
--}
--
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
-- VmaAllocator allocator,
-- uint32_t allocationCount,
-- const VmaAllocation* allocations,
-- const VkDeviceSize* offsets,
-- const VkDeviceSize* sizes)
--{
-- VMA_ASSERT(allocator);
--
-- if(allocationCount == 0)
-- {
-- return VK_SUCCESS;
-- }
--
-- VMA_ASSERT(allocations);
--
-- VMA_DEBUG_LOG("vmaInvalidateAllocations");
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_INVALIDATE);
--
-- return res;
--}
--
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(
-- VmaAllocator allocator,
-- uint32_t memoryTypeBits)
--{
-- VMA_ASSERT(allocator);
--
-- VMA_DEBUG_LOG("vmaCheckCorruption");
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- return allocator->CheckCorruption(memoryTypeBits);
--}
--
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation(
-- VmaAllocator allocator,
-- const VmaDefragmentationInfo* pInfo,
-- VmaDefragmentationContext* pContext)
--{
-- VMA_ASSERT(allocator && pInfo && pContext);
--
-- VMA_DEBUG_LOG("vmaBeginDefragmentation");
--
-- if (pInfo->pool != VMA_NULL)
-- {
-- // Check if run on supported algorithms
-- if (pInfo->pool->m_BlockVector.GetAlgorithm() & VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
-- return VK_ERROR_FEATURE_NOT_PRESENT;
-- }
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- *pContext = vma_new(allocator, VmaDefragmentationContext_T)(allocator, *pInfo);
-- return VK_SUCCESS;
--}
--
--VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation(
-- VmaAllocator allocator,
-- VmaDefragmentationContext context,
-- VmaDefragmentationStats* pStats)
--{
-- VMA_ASSERT(allocator && context);
--
-- VMA_DEBUG_LOG("vmaEndDefragmentation");
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- if (pStats)
-- context->GetStats(*pStats);
-- vma_delete(allocator, context);
--}
--
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaDefragmentationContext VMA_NOT_NULL context,
-- VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo)
--{
-- VMA_ASSERT(context && pPassInfo);
--
-- VMA_DEBUG_LOG("vmaBeginDefragmentationPass");
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- return context->DefragmentPassBegin(*pPassInfo);
--}
--
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaDefragmentationContext VMA_NOT_NULL context,
-- VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo)
--{
-- VMA_ASSERT(context && pPassInfo);
--
-- VMA_DEBUG_LOG("vmaEndDefragmentationPass");
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- return context->DefragmentPassEnd(*pPassInfo);
--}
--
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
-- VmaAllocator allocator,
-- VmaAllocation allocation,
-- VkBuffer buffer)
--{
-- VMA_ASSERT(allocator && allocation && buffer);
--
-- VMA_DEBUG_LOG("vmaBindBufferMemory");
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
--}
--
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
-- VmaAllocator allocator,
-- VmaAllocation allocation,
-- VkDeviceSize allocationLocalOffset,
-- VkBuffer buffer,
-- const void* pNext)
--{
-- VMA_ASSERT(allocator && allocation && buffer);
--
-- VMA_DEBUG_LOG("vmaBindBufferMemory2");
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
--}
--
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
-- VmaAllocator allocator,
-- VmaAllocation allocation,
-- VkImage image)
--{
-- VMA_ASSERT(allocator && allocation && image);
--
-- VMA_DEBUG_LOG("vmaBindImageMemory");
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
--}
--
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
-- VmaAllocator allocator,
-- VmaAllocation allocation,
-- VkDeviceSize allocationLocalOffset,
-- VkImage image,
-- const void* pNext)
--{
-- VMA_ASSERT(allocator && allocation && image);
--
-- VMA_DEBUG_LOG("vmaBindImageMemory2");
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
--}
--
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
-- VmaAllocator allocator,
-- const VkBufferCreateInfo* pBufferCreateInfo,
-- const VmaAllocationCreateInfo* pAllocationCreateInfo,
-- VkBuffer* pBuffer,
-- VmaAllocation* pAllocation,
-- VmaAllocationInfo* pAllocationInfo)
--{
-- VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
--
-- if(pBufferCreateInfo->size == 0)
-- {
-- return VK_ERROR_INITIALIZATION_FAILED;
-- }
-- if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
-- !allocator->m_UseKhrBufferDeviceAddress)
-- {
-- VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
-- return VK_ERROR_INITIALIZATION_FAILED;
-- }
--
-- VMA_DEBUG_LOG("vmaCreateBuffer");
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- *pBuffer = VK_NULL_HANDLE;
-- *pAllocation = VK_NULL_HANDLE;
--
-- // 1. Create VkBuffer.
-- VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
-- allocator->m_hDevice,
-- pBufferCreateInfo,
-- allocator->GetAllocationCallbacks(),
-- pBuffer);
-- if(res >= 0)
-- {
-- // 2. vkGetBufferMemoryRequirements.
-- VkMemoryRequirements vkMemReq = {};
-- bool requiresDedicatedAllocation = false;
-- bool prefersDedicatedAllocation = false;
-- allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
-- requiresDedicatedAllocation, prefersDedicatedAllocation);
--
-- // 3. Allocate memory using allocator.
-- res = allocator->AllocateMemory(
-- vkMemReq,
-- requiresDedicatedAllocation,
-- prefersDedicatedAllocation,
-- *pBuffer, // dedicatedBuffer
-- VK_NULL_HANDLE, // dedicatedImage
-- pBufferCreateInfo->usage, // dedicatedBufferImageUsage
-- *pAllocationCreateInfo,
-- VMA_SUBALLOCATION_TYPE_BUFFER,
-- 1, // allocationCount
-- pAllocation);
--
-- if(res >= 0)
-- {
-- // 3. Bind buffer with memory.
-- if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
-- {
-- res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
-- }
-- if(res >= 0)
-- {
-- // All steps succeeded.
-- #if VMA_STATS_STRING_ENABLED
-- (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
-- #endif
-- if(pAllocationInfo != VMA_NULL)
-- {
-- allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
-- }
--
-- return VK_SUCCESS;
-- }
-- allocator->FreeMemory(
-- 1, // allocationCount
-- pAllocation);
-- *pAllocation = VK_NULL_HANDLE;
-- (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
-- *pBuffer = VK_NULL_HANDLE;
-- return res;
-- }
-- (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
-- *pBuffer = VK_NULL_HANDLE;
-- return res;
-- }
-- return res;
--}
--
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment(
-- VmaAllocator allocator,
-- const VkBufferCreateInfo* pBufferCreateInfo,
-- const VmaAllocationCreateInfo* pAllocationCreateInfo,
-- VkDeviceSize minAlignment,
-- VkBuffer* pBuffer,
-- VmaAllocation* pAllocation,
-- VmaAllocationInfo* pAllocationInfo)
--{
-- VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && VmaIsPow2(minAlignment) && pBuffer && pAllocation);
--
-- if(pBufferCreateInfo->size == 0)
-- {
-- return VK_ERROR_INITIALIZATION_FAILED;
-- }
-- if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
-- !allocator->m_UseKhrBufferDeviceAddress)
-- {
-- VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
-- return VK_ERROR_INITIALIZATION_FAILED;
-- }
--
-- VMA_DEBUG_LOG("vmaCreateBufferWithAlignment");
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- *pBuffer = VK_NULL_HANDLE;
-- *pAllocation = VK_NULL_HANDLE;
--
-- // 1. Create VkBuffer.
-- VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
-- allocator->m_hDevice,
-- pBufferCreateInfo,
-- allocator->GetAllocationCallbacks(),
-- pBuffer);
-- if(res >= 0)
-- {
-- // 2. vkGetBufferMemoryRequirements.
-- VkMemoryRequirements vkMemReq = {};
-- bool requiresDedicatedAllocation = false;
-- bool prefersDedicatedAllocation = false;
-- allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
-- requiresDedicatedAllocation, prefersDedicatedAllocation);
--
-- // 2a. Include minAlignment
-- vkMemReq.alignment = VMA_MAX(vkMemReq.alignment, minAlignment);
--
-- // 3. Allocate memory using allocator.
-- res = allocator->AllocateMemory(
-- vkMemReq,
-- requiresDedicatedAllocation,
-- prefersDedicatedAllocation,
-- *pBuffer, // dedicatedBuffer
-- VK_NULL_HANDLE, // dedicatedImage
-- pBufferCreateInfo->usage, // dedicatedBufferImageUsage
-- *pAllocationCreateInfo,
-- VMA_SUBALLOCATION_TYPE_BUFFER,
-- 1, // allocationCount
-- pAllocation);
--
-- if(res >= 0)
-- {
-- // 3. Bind buffer with memory.
-- if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
-- {
-- res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
-- }
-- if(res >= 0)
-- {
-- // All steps succeeded.
-- #if VMA_STATS_STRING_ENABLED
-- (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
-- #endif
-- if(pAllocationInfo != VMA_NULL)
-- {
-- allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
-- }
--
-- return VK_SUCCESS;
-- }
-- allocator->FreeMemory(
-- 1, // allocationCount
-- pAllocation);
-- *pAllocation = VK_NULL_HANDLE;
-- (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
-- *pBuffer = VK_NULL_HANDLE;
-- return res;
-- }
-- (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
-- *pBuffer = VK_NULL_HANDLE;
-- return res;
-- }
-- return res;
--}
--
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaAllocation VMA_NOT_NULL allocation,
-- const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
-- VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer)
--{
-- return vmaCreateAliasingBuffer2(allocator, allocation, 0, pBufferCreateInfo, pBuffer);
--}
--
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer2(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaAllocation VMA_NOT_NULL allocation,
-- VkDeviceSize allocationLocalOffset,
-- const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
-- VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer)
--{
-- VMA_ASSERT(allocator && pBufferCreateInfo && pBuffer && allocation);
-- VMA_ASSERT(allocationLocalOffset + pBufferCreateInfo->size <= allocation->GetSize());
--
-- VMA_DEBUG_LOG("vmaCreateAliasingBuffer2");
--
-- *pBuffer = VK_NULL_HANDLE;
--
-- if (pBufferCreateInfo->size == 0)
-- {
-- return VK_ERROR_INITIALIZATION_FAILED;
-- }
-- if ((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
-- !allocator->m_UseKhrBufferDeviceAddress)
-- {
-- VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
-- return VK_ERROR_INITIALIZATION_FAILED;
-- }
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- // 1. Create VkBuffer.
-- VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
-- allocator->m_hDevice,
-- pBufferCreateInfo,
-- allocator->GetAllocationCallbacks(),
-- pBuffer);
-- if (res >= 0)
-- {
-- // 2. Bind buffer with memory.
-- res = allocator->BindBufferMemory(allocation, allocationLocalOffset, *pBuffer, VMA_NULL);
-- if (res >= 0)
-- {
-- return VK_SUCCESS;
-- }
-- (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
-- }
-- return res;
--}
--
--VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
-- VmaAllocator allocator,
-- VkBuffer buffer,
-- VmaAllocation allocation)
--{
-- VMA_ASSERT(allocator);
--
-- if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
-- {
-- return;
-- }
--
-- VMA_DEBUG_LOG("vmaDestroyBuffer");
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- if(buffer != VK_NULL_HANDLE)
-- {
-- (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
-- }
--
-- if(allocation != VK_NULL_HANDLE)
-- {
-- allocator->FreeMemory(
-- 1, // allocationCount
-- &allocation);
-- }
--}
--
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
-- VmaAllocator allocator,
-- const VkImageCreateInfo* pImageCreateInfo,
-- const VmaAllocationCreateInfo* pAllocationCreateInfo,
-- VkImage* pImage,
-- VmaAllocation* pAllocation,
-- VmaAllocationInfo* pAllocationInfo)
--{
-- VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
--
-- if(pImageCreateInfo->extent.width == 0 ||
-- pImageCreateInfo->extent.height == 0 ||
-- pImageCreateInfo->extent.depth == 0 ||
-- pImageCreateInfo->mipLevels == 0 ||
-- pImageCreateInfo->arrayLayers == 0)
-- {
-- return VK_ERROR_INITIALIZATION_FAILED;
-- }
--
-- VMA_DEBUG_LOG("vmaCreateImage");
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- *pImage = VK_NULL_HANDLE;
-- *pAllocation = VK_NULL_HANDLE;
--
-- // 1. Create VkImage.
-- VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
-- allocator->m_hDevice,
-- pImageCreateInfo,
-- allocator->GetAllocationCallbacks(),
-- pImage);
-- if(res >= 0)
-- {
-- VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
-- VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
-- VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
--
-- // 2. Allocate memory using allocator.
-- VkMemoryRequirements vkMemReq = {};
-- bool requiresDedicatedAllocation = false;
-- bool prefersDedicatedAllocation = false;
-- allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
-- requiresDedicatedAllocation, prefersDedicatedAllocation);
--
-- res = allocator->AllocateMemory(
-- vkMemReq,
-- requiresDedicatedAllocation,
-- prefersDedicatedAllocation,
-- VK_NULL_HANDLE, // dedicatedBuffer
-- *pImage, // dedicatedImage
-- pImageCreateInfo->usage, // dedicatedBufferImageUsage
-- *pAllocationCreateInfo,
-- suballocType,
-- 1, // allocationCount
-- pAllocation);
--
-- if(res >= 0)
-- {
-- // 3. Bind image with memory.
-- if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
-- {
-- res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
-- }
-- if(res >= 0)
-- {
-- // All steps succeeded.
-- #if VMA_STATS_STRING_ENABLED
-- (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
-- #endif
-- if(pAllocationInfo != VMA_NULL)
-- {
-- allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
-- }
--
-- return VK_SUCCESS;
-- }
-- allocator->FreeMemory(
-- 1, // allocationCount
-- pAllocation);
-- *pAllocation = VK_NULL_HANDLE;
-- (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
-- *pImage = VK_NULL_HANDLE;
-- return res;
-- }
-- (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
-- *pImage = VK_NULL_HANDLE;
-- return res;
-- }
-- return res;
--}
--
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaAllocation VMA_NOT_NULL allocation,
-- const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
-- VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage)
--{
-- return vmaCreateAliasingImage2(allocator, allocation, 0, pImageCreateInfo, pImage);
--}
--
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage2(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VmaAllocation VMA_NOT_NULL allocation,
-- VkDeviceSize allocationLocalOffset,
-- const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
-- VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage)
--{
-- VMA_ASSERT(allocator && pImageCreateInfo && pImage && allocation);
--
-- *pImage = VK_NULL_HANDLE;
--
-- VMA_DEBUG_LOG("vmaCreateImage2");
--
-- if (pImageCreateInfo->extent.width == 0 ||
-- pImageCreateInfo->extent.height == 0 ||
-- pImageCreateInfo->extent.depth == 0 ||
-- pImageCreateInfo->mipLevels == 0 ||
-- pImageCreateInfo->arrayLayers == 0)
-- {
-- return VK_ERROR_INITIALIZATION_FAILED;
-- }
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- // 1. Create VkImage.
-- VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
-- allocator->m_hDevice,
-- pImageCreateInfo,
-- allocator->GetAllocationCallbacks(),
-- pImage);
-- if (res >= 0)
-- {
-- // 2. Bind image with memory.
-- res = allocator->BindImageMemory(allocation, allocationLocalOffset, *pImage, VMA_NULL);
-- if (res >= 0)
-- {
-- return VK_SUCCESS;
-- }
-- (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
-- }
-- return res;
--}
--
--VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
-- VmaAllocator VMA_NOT_NULL allocator,
-- VkImage VMA_NULLABLE_NON_DISPATCHABLE image,
-- VmaAllocation VMA_NULLABLE allocation)
--{
-- VMA_ASSERT(allocator);
--
-- if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
-- {
-- return;
-- }
--
-- VMA_DEBUG_LOG("vmaDestroyImage");
--
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK
--
-- if(image != VK_NULL_HANDLE)
-- {
-- (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
-- }
-- if(allocation != VK_NULL_HANDLE)
-- {
-- allocator->FreeMemory(
-- 1, // allocationCount
-- &allocation);
-- }
--}
--
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock(
-- const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo,
-- VmaVirtualBlock VMA_NULLABLE * VMA_NOT_NULL pVirtualBlock)
--{
-- VMA_ASSERT(pCreateInfo && pVirtualBlock);
-- VMA_ASSERT(pCreateInfo->size > 0);
-- VMA_DEBUG_LOG("vmaCreateVirtualBlock");
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK;
-- *pVirtualBlock = vma_new(pCreateInfo->pAllocationCallbacks, VmaVirtualBlock_T)(*pCreateInfo);
-- VkResult res = (*pVirtualBlock)->Init();
-- if(res < 0)
-- {
-- vma_delete(pCreateInfo->pAllocationCallbacks, *pVirtualBlock);
-- *pVirtualBlock = VK_NULL_HANDLE;
-- }
-- return res;
--}
--
--VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock(VmaVirtualBlock VMA_NULLABLE virtualBlock)
--{
-- if(virtualBlock != VK_NULL_HANDLE)
-- {
-- VMA_DEBUG_LOG("vmaDestroyVirtualBlock");
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK;
-- VkAllocationCallbacks allocationCallbacks = virtualBlock->m_AllocationCallbacks; // Have to copy the callbacks when destroying.
-- vma_delete(&allocationCallbacks, virtualBlock);
-- }
--}
--
--VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty(VmaVirtualBlock VMA_NOT_NULL virtualBlock)
--{
-- VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
-- VMA_DEBUG_LOG("vmaIsVirtualBlockEmpty");
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK;
-- return virtualBlock->IsEmpty() ? VK_TRUE : VK_FALSE;
--}
--
--VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
-- VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo)
--{
-- VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pVirtualAllocInfo != VMA_NULL);
-- VMA_DEBUG_LOG("vmaGetVirtualAllocationInfo");
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK;
-- virtualBlock->GetAllocationInfo(allocation, *pVirtualAllocInfo);
--}
--
--VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
-- const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation,
-- VkDeviceSize* VMA_NULLABLE pOffset)
--{
-- VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pCreateInfo != VMA_NULL && pAllocation != VMA_NULL);
-- VMA_DEBUG_LOG("vmaVirtualAllocate");
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK;
-- return virtualBlock->Allocate(*pCreateInfo, *pAllocation, pOffset);
--}
--
--VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation)
--{
-- if(allocation != VK_NULL_HANDLE)
-- {
-- VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
-- VMA_DEBUG_LOG("vmaVirtualFree");
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK;
-- virtualBlock->Free(allocation);
-- }
--}
--
--VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock(VmaVirtualBlock VMA_NOT_NULL virtualBlock)
--{
-- VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
-- VMA_DEBUG_LOG("vmaClearVirtualBlock");
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK;
-- virtualBlock->Clear();
--}
--
--VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
-- VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, void* VMA_NULLABLE pUserData)
--{
-- VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
-- VMA_DEBUG_LOG("vmaSetVirtualAllocationUserData");
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK;
-- virtualBlock->SetAllocationUserData(allocation, pUserData);
--}
--
--VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
-- VmaStatistics* VMA_NOT_NULL pStats)
--{
-- VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL);
-- VMA_DEBUG_LOG("vmaGetVirtualBlockStatistics");
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK;
-- virtualBlock->GetStatistics(*pStats);
--}
--
--VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
-- VmaDetailedStatistics* VMA_NOT_NULL pStats)
--{
-- VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL);
-- VMA_DEBUG_LOG("vmaCalculateVirtualBlockStatistics");
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK;
-- virtualBlock->CalculateDetailedStatistics(*pStats);
--}
--
--#if VMA_STATS_STRING_ENABLED
--
--VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
-- char* VMA_NULLABLE * VMA_NOT_NULL ppStatsString, VkBool32 detailedMap)
--{
-- VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && ppStatsString != VMA_NULL);
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK;
-- const VkAllocationCallbacks* allocationCallbacks = virtualBlock->GetAllocationCallbacks();
-- VmaStringBuilder sb(allocationCallbacks);
-- virtualBlock->BuildStatsString(detailedMap != VK_FALSE, sb);
-- *ppStatsString = VmaCreateStringCopy(allocationCallbacks, sb.GetData(), sb.GetLength());
--}
--
--VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
-- char* VMA_NULLABLE pStatsString)
--{
-- if(pStatsString != VMA_NULL)
-- {
-- VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
-- VMA_DEBUG_GLOBAL_MUTEX_LOCK;
-- VmaFreeString(virtualBlock->GetAllocationCallbacks(), pStatsString);
-- }
--}
--#endif // VMA_STATS_STRING_ENABLED
--#endif // _VMA_PUBLIC_INTERFACE
--#endif // VMA_IMPLEMENTATION
--
--/**
--\page quick_start Quick start
--
--\section quick_start_project_setup Project setup
--
--Vulkan Memory Allocator comes in form of a "stb-style" single header file.
--You don't need to build it as a separate library project.
--You can add this file directly to your project and submit it to code repository next to your other source files.
--
--"Single header" doesn't mean that everything is contained in C/C++ declarations,
--like it tends to be in case of inline functions or C++ templates.
--It means that implementation is bundled with interface in a single file and needs to be extracted using preprocessor macro.
--If you don't do it properly, you will get linker errors.
--
--To do it properly:
--
---# Include "vk_mem_alloc.h" file in each CPP file where you want to use the library.
-- This includes declarations of all members of the library.
---# In exactly one CPP file define following macro before this include.
-- It enables also internal definitions.
--
--\code
--#define VMA_IMPLEMENTATION
--#include "vk_mem_alloc.h"
--\endcode
--
--It may be a good idea to create dedicated CPP file just for this purpose.
--
--This library includes header `<vulkan/vulkan.h>`, which in turn
--includes `<windows.h>` on Windows. If you need some specific macros defined
--before including these headers (like `WIN32_LEAN_AND_MEAN` or
--`WINVER` for Windows, `VK_USE_PLATFORM_WIN32_KHR` for Vulkan), you must define
--them before every `#include` of this library.
--
--This library is written in C++, but has C-compatible interface.
--Thus you can include and use vk_mem_alloc.h in C or C++ code, but full
--implementation with `VMA_IMPLEMENTATION` macro must be compiled as C++, NOT as C.
--Some features of C++14 are used. STL containers, RTTI, or C++ exceptions are not used.
--
--
--\section quick_start_initialization Initialization
--
--At program startup:
--
---# Initialize Vulkan to have `VkPhysicalDevice`, `VkDevice` and `VkInstance` object.
---# Fill VmaAllocatorCreateInfo structure and create #VmaAllocator object by
-- calling vmaCreateAllocator().
--
--Only members `physicalDevice`, `device`, `instance` are required.
--However, you should inform the library which Vulkan version do you use by setting
--VmaAllocatorCreateInfo::vulkanApiVersion and which extensions did you enable
--by setting VmaAllocatorCreateInfo::flags (like #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT for VK_KHR_buffer_device_address).
--Otherwise, VMA would use only features of Vulkan 1.0 core with no extensions.
--
--\subsection quick_start_initialization_selecting_vulkan_version Selecting Vulkan version
--
--VMA supports Vulkan version down to 1.0, for backward compatibility.
--If you want to use higher version, you need to inform the library about it.
--This is a two-step process.
--
--<b>Step 1: Compile time.</b> By default, VMA compiles with code supporting the highest
--Vulkan version found in the included `<vulkan/vulkan.h>` that is also supported by the library.
--If this is OK, you don't need to do anything.
--However, if you want to compile VMA as if only some lower Vulkan version was available,
--define macro `VMA_VULKAN_VERSION` before every `#include "vk_mem_alloc.h"`.
--It should have decimal numeric value in form of ABBBCCC, where A = major, BBB = minor, CCC = patch Vulkan version.
--For example, to compile against Vulkan 1.2:
--
--\code
--#define VMA_VULKAN_VERSION 1002000 // Vulkan 1.2
--#include "vk_mem_alloc.h"
--\endcode
--
--<b>Step 2: Runtime.</b> Even when compiled with higher Vulkan version available,
--VMA can use only features of a lower version, which is configurable during creation of the #VmaAllocator object.
--By default, only Vulkan 1.0 is used.
--To initialize the allocator with support for higher Vulkan version, you need to set member
--VmaAllocatorCreateInfo::vulkanApiVersion to an appropriate value, e.g. using constants like `VK_API_VERSION_1_2`.
--See code sample below.
--
--\subsection quick_start_initialization_importing_vulkan_functions Importing Vulkan functions
--
--You may need to configure importing Vulkan functions. There are 3 ways to do this:
--
---# **If you link with Vulkan static library** (e.g. "vulkan-1.lib" on Windows):
-- - You don't need to do anything.
-- - VMA will use these, as macro `VMA_STATIC_VULKAN_FUNCTIONS` is defined to 1 by default.
---# **If you want VMA to fetch pointers to Vulkan functions dynamically** using `vkGetInstanceProcAddr`,
-- `vkGetDeviceProcAddr` (this is the option presented in the example below):
-- - Define `VMA_STATIC_VULKAN_FUNCTIONS` to 0, `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 1.
-- - Provide pointers to these two functions via VmaVulkanFunctions::vkGetInstanceProcAddr,
-- VmaVulkanFunctions::vkGetDeviceProcAddr.
-- - The library will fetch pointers to all other functions it needs internally.
---# **If you fetch pointers to all Vulkan functions in a custom way**, e.g. using some loader like
-- [Volk](https://github.com/zeux/volk):
-- - Define `VMA_STATIC_VULKAN_FUNCTIONS` and `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 0.
-- - Pass these pointers via structure #VmaVulkanFunctions.
--
--Example for case 2:
--
--\code
--#define VMA_STATIC_VULKAN_FUNCTIONS 0
--#define VMA_DYNAMIC_VULKAN_FUNCTIONS 1
--#include "vk_mem_alloc.h"
--
--...
--
--VmaVulkanFunctions vulkanFunctions = {};
--vulkanFunctions.vkGetInstanceProcAddr = &vkGetInstanceProcAddr;
--vulkanFunctions.vkGetDeviceProcAddr = &vkGetDeviceProcAddr;
--
--VmaAllocatorCreateInfo allocatorCreateInfo = {};
--allocatorCreateInfo.vulkanApiVersion = VK_API_VERSION_1_2;
--allocatorCreateInfo.physicalDevice = physicalDevice;
--allocatorCreateInfo.device = device;
--allocatorCreateInfo.instance = instance;
--allocatorCreateInfo.pVulkanFunctions = &vulkanFunctions;
--
--VmaAllocator allocator;
--vmaCreateAllocator(&allocatorCreateInfo, &allocator);
--\endcode
--
--
--\section quick_start_resource_allocation Resource allocation
--
--When you want to create a buffer or image:
--
---# Fill `VkBufferCreateInfo` / `VkImageCreateInfo` structure.
---# Fill VmaAllocationCreateInfo structure.
---# Call vmaCreateBuffer() / vmaCreateImage() to get `VkBuffer`/`VkImage` with memory
-- already allocated and bound to it, plus #VmaAllocation objects that represents its underlying memory.
--
--\code
--VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
--bufferInfo.size = 65536;
--bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
--
--VmaAllocationCreateInfo allocInfo = {};
--allocInfo.usage = VMA_MEMORY_USAGE_AUTO;
--
--VkBuffer buffer;
--VmaAllocation allocation;
--vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
--\endcode
--
--Don't forget to destroy your objects when no longer needed:
--
--\code
--vmaDestroyBuffer(allocator, buffer, allocation);
--vmaDestroyAllocator(allocator);
--\endcode
--
--
--\page choosing_memory_type Choosing memory type
--
--Physical devices in Vulkan support various combinations of memory heaps and
--types. Help with choosing correct and optimal memory type for your specific
--resource is one of the key features of this library. You can use it by filling
--appropriate members of VmaAllocationCreateInfo structure, as described below.
--You can also combine multiple methods.
--
---# If you just want to find memory type index that meets your requirements, you
-- can use function: vmaFindMemoryTypeIndexForBufferInfo(),
-- vmaFindMemoryTypeIndexForImageInfo(), vmaFindMemoryTypeIndex().
---# If you want to allocate a region of device memory without association with any
-- specific image or buffer, you can use function vmaAllocateMemory(). Usage of
-- this function is not recommended and usually not needed.
-- vmaAllocateMemoryPages() function is also provided for creating multiple allocations at once,
-- which may be useful for sparse binding.
---# If you already have a buffer or an image created, you want to allocate memory
-- for it and then you will bind it yourself, you can use function
-- vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage().
-- For binding you should use functions: vmaBindBufferMemory(), vmaBindImageMemory()
-- or their extended versions: vmaBindBufferMemory2(), vmaBindImageMemory2().
---# **This is the easiest and recommended way to use this library:**
-- If you want to create a buffer or an image, allocate memory for it and bind
-- them together, all in one call, you can use function vmaCreateBuffer(),
-- vmaCreateImage().
--
--When using 3. or 4., the library internally queries Vulkan for memory types
--supported for that buffer or image (function `vkGetBufferMemoryRequirements()`)
--and uses only one of these types.
--
--If no memory type can be found that meets all the requirements, these functions
--return `VK_ERROR_FEATURE_NOT_PRESENT`.
--
--You can leave VmaAllocationCreateInfo structure completely filled with zeros.
--It means no requirements are specified for memory type.
--It is valid, although not very useful.
--
--\section choosing_memory_type_usage Usage
--
--The easiest way to specify memory requirements is to fill member
--VmaAllocationCreateInfo::usage using one of the values of enum #VmaMemoryUsage.
--It defines high level, common usage types.
--Since version 3 of the library, it is recommended to use #VMA_MEMORY_USAGE_AUTO to let it select best memory type for your resource automatically.
--
--For example, if you want to create a uniform buffer that will be filled using
--transfer only once or infrequently and then used for rendering every frame as a uniform buffer, you can
--do it using following code. The buffer will most likely end up in a memory type with
--`VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT` to be fast to access by the GPU device.
--
--\code
--VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
--bufferInfo.size = 65536;
--bufferInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
--
--VmaAllocationCreateInfo allocInfo = {};
--allocInfo.usage = VMA_MEMORY_USAGE_AUTO;
--
--VkBuffer buffer;
--VmaAllocation allocation;
--vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
--\endcode
--
--If you have a preference for putting the resource in GPU (device) memory or CPU (host) memory
--on systems with discrete graphics card that have the memories separate, you can use
--#VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST.
--
--When using `VMA_MEMORY_USAGE_AUTO*` while you want to map the allocated memory,
--you also need to specify one of the host access flags:
--#VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.
--This will help the library decide about preferred memory type to ensure it has `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`
--so you can map it.
--
--For example, a staging buffer that will be filled via mapped pointer and then
--used as a source of transfer to the buffer described previously can be created like this.
--It will likely end up in a memory type that is `HOST_VISIBLE` and `HOST_COHERENT`
--but not `HOST_CACHED` (meaning uncached, write-combined) and not `DEVICE_LOCAL` (meaning system RAM).
--
--\code
--VkBufferCreateInfo stagingBufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
--stagingBufferInfo.size = 65536;
--stagingBufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
--
--VmaAllocationCreateInfo stagingAllocInfo = {};
--stagingAllocInfo.usage = VMA_MEMORY_USAGE_AUTO;
--stagingAllocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT;
--
--VkBuffer stagingBuffer;
--VmaAllocation stagingAllocation;
--vmaCreateBuffer(allocator, &stagingBufferInfo, &stagingAllocInfo, &stagingBuffer, &stagingAllocation, nullptr);
--\endcode
--
--For more examples of creating different kinds of resources, see chapter \ref usage_patterns.
--
--Usage values `VMA_MEMORY_USAGE_AUTO*` are legal to use only when the library knows
--about the resource being created by having `VkBufferCreateInfo` / `VkImageCreateInfo` passed,
--so they work with functions like: vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo() etc.
--If you allocate raw memory using function vmaAllocateMemory(), you have to use other means of selecting
--memory type, as described below.
--
--\note
--Old usage values (`VMA_MEMORY_USAGE_GPU_ONLY`, `VMA_MEMORY_USAGE_CPU_ONLY`,
--`VMA_MEMORY_USAGE_CPU_TO_GPU`, `VMA_MEMORY_USAGE_GPU_TO_CPU`, `VMA_MEMORY_USAGE_CPU_COPY`)
--are still available and work same way as in previous versions of the library
--for backward compatibility, but they are not recommended.
--
--\section choosing_memory_type_required_preferred_flags Required and preferred flags
--
--You can specify more detailed requirements by filling members
--VmaAllocationCreateInfo::requiredFlags and VmaAllocationCreateInfo::preferredFlags
--with a combination of bits from enum `VkMemoryPropertyFlags`. For example,
--if you want to create a buffer that will be persistently mapped on host (so it
--must be `HOST_VISIBLE`) and preferably will also be `HOST_COHERENT` and `HOST_CACHED`,
--use following code:
--
--\code
--VmaAllocationCreateInfo allocInfo = {};
--allocInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
--allocInfo.preferredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
--allocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT;
--
--VkBuffer buffer;
--VmaAllocation allocation;
--vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
--\endcode
--
--A memory type is chosen that has all the required flags and as many preferred
--flags set as possible.
--
--Value passed in VmaAllocationCreateInfo::usage is internally converted to a set of required and preferred flags,
--plus some extra "magic" (heuristics).
--
--\section choosing_memory_type_explicit_memory_types Explicit memory types
--
--If you inspected memory types available on the physical device and you have
--a preference for memory types that you want to use, you can fill member
--VmaAllocationCreateInfo::memoryTypeBits. It is a bit mask, where each bit set
--means that a memory type with that index is allowed to be used for the
--allocation. Special value 0, just like `UINT32_MAX`, means there are no
--restrictions to memory type index.
--
--Please note that this member is NOT just a memory type index.
--Still you can use it to choose just one, specific memory type.
--For example, if you already determined that your buffer should be created in
--memory type 2, use following code:
--
--\code
--uint32_t memoryTypeIndex = 2;
--
--VmaAllocationCreateInfo allocInfo = {};
--allocInfo.memoryTypeBits = 1u << memoryTypeIndex;
--
--VkBuffer buffer;
--VmaAllocation allocation;
--vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
--\endcode
--
--
--\section choosing_memory_type_custom_memory_pools Custom memory pools
--
--If you allocate from custom memory pool, all the ways of specifying memory
--requirements described above are not applicable and the aforementioned members
--of VmaAllocationCreateInfo structure are ignored. Memory type is selected
--explicitly when creating the pool and then used to make all the allocations from
--that pool. For further details, see \ref custom_memory_pools.
--
--\section choosing_memory_type_dedicated_allocations Dedicated allocations
--
--Memory for allocations is reserved out of larger block of `VkDeviceMemory`
--allocated from Vulkan internally. That is the main feature of this whole library.
--You can still request a separate memory block to be created for an allocation,
--just like you would do in a trivial solution without using any allocator.
--In that case, a buffer or image is always bound to that memory at offset 0.
--This is called a "dedicated allocation".
--You can explicitly request it by using flag #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
--The library can also internally decide to use dedicated allocation in some cases, e.g.:
--
--- When the size of the allocation is large.
--- When [VK_KHR_dedicated_allocation](@ref vk_khr_dedicated_allocation) extension is enabled
-- and it reports that dedicated allocation is required or recommended for the resource.
--- When allocation of next big memory block fails due to not enough device memory,
-- but allocation with the exact requested size succeeds.
--
--
--\page memory_mapping Memory mapping
--
--To "map memory" in Vulkan means to obtain a CPU pointer to `VkDeviceMemory`,
--to be able to read from it or write to it in CPU code.
--Mapping is possible only of memory allocated from a memory type that has
--`VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag.
--Functions `vkMapMemory()`, `vkUnmapMemory()` are designed for this purpose.
--You can use them directly with memory allocated by this library,
--but it is not recommended because of following issue:
--Mapping the same `VkDeviceMemory` block multiple times is illegal - only one mapping at a time is allowed.
--This includes mapping disjoint regions. Mapping is not reference-counted internally by Vulkan.
--Because of this, Vulkan Memory Allocator provides following facilities:
--
--\note If you want to be able to map an allocation, you need to specify one of the flags
--#VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
--in VmaAllocationCreateInfo::flags. These flags are required for an allocation to be mappable
--when using #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` enum values.
--For other usage values they are ignored and every such allocation made in `HOST_VISIBLE` memory type is mappable,
--but they can still be used for consistency.
--
--\section memory_mapping_mapping_functions Mapping functions
--
--The library provides following functions for mapping of a specific #VmaAllocation: vmaMapMemory(), vmaUnmapMemory().
--They are safer and more convenient to use than standard Vulkan functions.
--You can map an allocation multiple times simultaneously - mapping is reference-counted internally.
--You can also map different allocations simultaneously regardless of whether they use the same `VkDeviceMemory` block.
--The way it is implemented is that the library always maps entire memory block, not just region of the allocation.
--For further details, see description of vmaMapMemory() function.
--Example:
--
--\code
--// Having these objects initialized:
--struct ConstantBuffer
--{
-- ...
--};
--ConstantBuffer constantBufferData = ...
--
--VmaAllocator allocator = ...
--VkBuffer constantBuffer = ...
--VmaAllocation constantBufferAllocation = ...
--
--// You can map and fill your buffer using following code:
--
--void* mappedData;
--vmaMapMemory(allocator, constantBufferAllocation, &mappedData);
--memcpy(mappedData, &constantBufferData, sizeof(constantBufferData));
--vmaUnmapMemory(allocator, constantBufferAllocation);
--\endcode
--
--When mapping, you may see a warning from Vulkan validation layer similar to this one:
--
--<i>Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.</i>
--
--It happens because the library maps entire `VkDeviceMemory` block, where different
--types of images and buffers may end up together, especially on GPUs with unified memory like Intel.
--You can safely ignore it if you are sure you access only memory of the intended
--object that you wanted to map.
--
--
--\section memory_mapping_persistently_mapped_memory Persistently mapped memory
--
--Keeping your memory persistently mapped is generally OK in Vulkan.
--You don't need to unmap it before using its data on the GPU.
--The library provides a special feature designed for that:
--Allocations made with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag set in
--VmaAllocationCreateInfo::flags stay mapped all the time,
--so you can just access CPU pointer to it any time
--without a need to call any "map" or "unmap" function.
--Example:
--
--\code
--VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
--bufCreateInfo.size = sizeof(ConstantBuffer);
--bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
--
--VmaAllocationCreateInfo allocCreateInfo = {};
--allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
--allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
-- VMA_ALLOCATION_CREATE_MAPPED_BIT;
--
--VkBuffer buf;
--VmaAllocation alloc;
--VmaAllocationInfo allocInfo;
--vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
--
--// Buffer is already mapped. You can access its memory.
--memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData));
--\endcode
--
--\note #VMA_ALLOCATION_CREATE_MAPPED_BIT by itself doesn't guarantee that the allocation will end up
--in a mappable memory type.
--For this, you need to also specify #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or
--#VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.
--#VMA_ALLOCATION_CREATE_MAPPED_BIT only guarantees that if the memory is `HOST_VISIBLE`, the allocation will be mapped on creation.
--For an example of how to make use of this fact, see section \ref usage_patterns_advanced_data_uploading.
--
--\section memory_mapping_cache_control Cache flush and invalidate
--
--Memory in Vulkan doesn't need to be unmapped before using it on GPU,
--but unless a memory types has `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT` flag set,
--you need to manually **invalidate** cache before reading of mapped pointer
--and **flush** cache after writing to mapped pointer.
--Map/unmap operations don't do that automatically.
--Vulkan provides following functions for this purpose `vkFlushMappedMemoryRanges()`,
--`vkInvalidateMappedMemoryRanges()`, but this library provides more convenient
--functions that refer to given allocation object: vmaFlushAllocation(),
--vmaInvalidateAllocation(),
--or multiple objects at once: vmaFlushAllocations(), vmaInvalidateAllocations().
--
--Regions of memory specified for flush/invalidate must be aligned to
--`VkPhysicalDeviceLimits::nonCoherentAtomSize`. This is automatically ensured by the library.
--In any memory type that is `HOST_VISIBLE` but not `HOST_COHERENT`, all allocations
--within blocks are aligned to this value, so their offsets are always multiply of
--`nonCoherentAtomSize` and two different allocations never share same "line" of this size.
--
--Also, Windows drivers from all 3 PC GPU vendors (AMD, Intel, NVIDIA)
--currently provide `HOST_COHERENT` flag on all memory types that are
--`HOST_VISIBLE`, so on PC you may not need to bother.
--
--
--\page staying_within_budget Staying within budget
--
--When developing a graphics-intensive game or program, it is important to avoid allocating
--more GPU memory than it is physically available. When the memory is over-committed,
--various bad things can happen, depending on the specific GPU, graphics driver, and
--operating system:
--
--- It may just work without any problems.
--- The application may slow down because some memory blocks are moved to system RAM
-- and the GPU has to access them through PCI Express bus.
--- A new allocation may take very long time to complete, even few seconds, and possibly
-- freeze entire system.
--- The new allocation may fail with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
--- It may even result in GPU crash (TDR), observed as `VK_ERROR_DEVICE_LOST`
-- returned somewhere later.
--
--\section staying_within_budget_querying_for_budget Querying for budget
--
--To query for current memory usage and available budget, use function vmaGetHeapBudgets().
--Returned structure #VmaBudget contains quantities expressed in bytes, per Vulkan memory heap.
--
--Please note that this function returns different information and works faster than
--vmaCalculateStatistics(). vmaGetHeapBudgets() can be called every frame or even before every
--allocation, while vmaCalculateStatistics() is intended to be used rarely,
--only to obtain statistical information, e.g. for debugging purposes.
--
--It is recommended to use <b>VK_EXT_memory_budget</b> device extension to obtain information
--about the budget from Vulkan device. VMA is able to use this extension automatically.
--When not enabled, the allocator behaves same way, but then it estimates current usage
--and available budget based on its internal information and Vulkan memory heap sizes,
--which may be less precise. In order to use this extension:
--
--1. Make sure extensions VK_EXT_memory_budget and VK_KHR_get_physical_device_properties2
-- required by it are available and enable them. Please note that the first is a device
-- extension and the second is instance extension!
--2. Use flag #VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT when creating #VmaAllocator object.
--3. Make sure to call vmaSetCurrentFrameIndex() every frame. Budget is queried from
-- Vulkan inside of it to avoid overhead of querying it with every allocation.
--
--\section staying_within_budget_controlling_memory_usage Controlling memory usage
--
--There are many ways in which you can try to stay within the budget.
--
--First, when making new allocation requires allocating a new memory block, the library
--tries not to exceed the budget automatically. If a block with default recommended size
--(e.g. 256 MB) would go over budget, a smaller block is allocated, possibly even
--dedicated memory for just this resource.
--
--If the size of the requested resource plus current memory usage is more than the
--budget, by default the library still tries to create it, leaving it to the Vulkan
--implementation whether the allocation succeeds or fails. You can change this behavior
--by using #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag. With it, the allocation is
--not made if it would exceed the budget or if the budget is already exceeded.
--VMA then tries to make the allocation from the next eligible Vulkan memory type.
--The all of them fail, the call then fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
--Example usage pattern may be to pass the #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag
--when creating resources that are not essential for the application (e.g. the texture
--of a specific object) and not to pass it when creating critically important resources
--(e.g. render targets).
--
--On AMD graphics cards there is a custom vendor extension available: <b>VK_AMD_memory_overallocation_behavior</b>
--that allows to control the behavior of the Vulkan implementation in out-of-memory cases -
--whether it should fail with an error code or still allow the allocation.
--Usage of this extension involves only passing extra structure on Vulkan device creation,
--so it is out of scope of this library.
--
--Finally, you can also use #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT flag to make sure
--a new allocation is created only when it fits inside one of the existing memory blocks.
--If it would require to allocate a new block, if fails instead with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
--This also ensures that the function call is very fast because it never goes to Vulkan
--to obtain a new block.
--
--\note Creating \ref custom_memory_pools with VmaPoolCreateInfo::minBlockCount
--set to more than 0 will currently try to allocate memory blocks without checking whether they
--fit within budget.
--
--
--\page resource_aliasing Resource aliasing (overlap)
--
--New explicit graphics APIs (Vulkan and Direct3D 12), thanks to manual memory
--management, give an opportunity to alias (overlap) multiple resources in the
--same region of memory - a feature not available in the old APIs (Direct3D 11, OpenGL).
--It can be useful to save video memory, but it must be used with caution.
--
--For example, if you know the flow of your whole render frame in advance, you
--are going to use some intermediate textures or buffers only during a small range of render passes,
--and you know these ranges don't overlap in time, you can bind these resources to
--the same place in memory, even if they have completely different parameters (width, height, format etc.).
--
--![Resource aliasing (overlap)](../gfx/Aliasing.png)
--
--Such scenario is possible using VMA, but you need to create your images manually.
--Then you need to calculate parameters of an allocation to be made using formula:
--
--- allocation size = max(size of each image)
--- allocation alignment = max(alignment of each image)
--- allocation memoryTypeBits = bitwise AND(memoryTypeBits of each image)
--
--Following example shows two different images bound to the same place in memory,
--allocated to fit largest of them.
--
--\code
--// A 512x512 texture to be sampled.
--VkImageCreateInfo img1CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
--img1CreateInfo.imageType = VK_IMAGE_TYPE_2D;
--img1CreateInfo.extent.width = 512;
--img1CreateInfo.extent.height = 512;
--img1CreateInfo.extent.depth = 1;
--img1CreateInfo.mipLevels = 10;
--img1CreateInfo.arrayLayers = 1;
--img1CreateInfo.format = VK_FORMAT_R8G8B8A8_SRGB;
--img1CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
--img1CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
--img1CreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
--img1CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
--
--// A full screen texture to be used as color attachment.
--VkImageCreateInfo img2CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
--img2CreateInfo.imageType = VK_IMAGE_TYPE_2D;
--img2CreateInfo.extent.width = 1920;
--img2CreateInfo.extent.height = 1080;
--img2CreateInfo.extent.depth = 1;
--img2CreateInfo.mipLevels = 1;
--img2CreateInfo.arrayLayers = 1;
--img2CreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
--img2CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
--img2CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
--img2CreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
--img2CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
--
--VkImage img1;
--res = vkCreateImage(device, &img1CreateInfo, nullptr, &img1);
--VkImage img2;
--res = vkCreateImage(device, &img2CreateInfo, nullptr, &img2);
--
--VkMemoryRequirements img1MemReq;
--vkGetImageMemoryRequirements(device, img1, &img1MemReq);
--VkMemoryRequirements img2MemReq;
--vkGetImageMemoryRequirements(device, img2, &img2MemReq);
--
--VkMemoryRequirements finalMemReq = {};
--finalMemReq.size = std::max(img1MemReq.size, img2MemReq.size);
--finalMemReq.alignment = std::max(img1MemReq.alignment, img2MemReq.alignment);
--finalMemReq.memoryTypeBits = img1MemReq.memoryTypeBits & img2MemReq.memoryTypeBits;
--// Validate if(finalMemReq.memoryTypeBits != 0)
--
--VmaAllocationCreateInfo allocCreateInfo = {};
--allocCreateInfo.preferredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
--
--VmaAllocation alloc;
--res = vmaAllocateMemory(allocator, &finalMemReq, &allocCreateInfo, &alloc, nullptr);
--
--res = vmaBindImageMemory(allocator, alloc, img1);
--res = vmaBindImageMemory(allocator, alloc, img2);
--
--// You can use img1, img2 here, but not at the same time!
--
--vmaFreeMemory(allocator, alloc);
--vkDestroyImage(allocator, img2, nullptr);
--vkDestroyImage(allocator, img1, nullptr);
--\endcode
--
--VMA also provides convenience functions that create a buffer or image and bind it to memory
--represented by an existing #VmaAllocation:
--vmaCreateAliasingBuffer(), vmaCreateAliasingBuffer2(),
--vmaCreateAliasingImage(), vmaCreateAliasingImage2().
--Versions with "2" offer additional parameter `allocationLocalOffset`.
--
--Remember that using resources that alias in memory requires proper synchronization.
--You need to issue a memory barrier to make sure commands that use `img1` and `img2`
--don't overlap on GPU timeline.
--You also need to treat a resource after aliasing as uninitialized - containing garbage data.
--For example, if you use `img1` and then want to use `img2`, you need to issue
--an image memory barrier for `img2` with `oldLayout` = `VK_IMAGE_LAYOUT_UNDEFINED`.
--
--Additional considerations:
--
--- Vulkan also allows to interpret contents of memory between aliasing resources consistently in some cases.
--See chapter 11.8. "Memory Aliasing" of Vulkan specification or `VK_IMAGE_CREATE_ALIAS_BIT` flag.
--- You can create more complex layout where different images and buffers are bound
--at different offsets inside one large allocation. For example, one can imagine
--a big texture used in some render passes, aliasing with a set of many small buffers
--used between in some further passes. To bind a resource at non-zero offset in an allocation,
--use vmaBindBufferMemory2() / vmaBindImageMemory2().
--- Before allocating memory for the resources you want to alias, check `memoryTypeBits`
--returned in memory requirements of each resource to make sure the bits overlap.
--Some GPUs may expose multiple memory types suitable e.g. only for buffers or
--images with `COLOR_ATTACHMENT` usage, so the sets of memory types supported by your
--resources may be disjoint. Aliasing them is not possible in that case.
--
--
--\page custom_memory_pools Custom memory pools
--
--A memory pool contains a number of `VkDeviceMemory` blocks.
--The library automatically creates and manages default pool for each memory type available on the device.
--Default memory pool automatically grows in size.
--Size of allocated blocks is also variable and managed automatically.
--
--You can create custom pool and allocate memory out of it.
--It can be useful if you want to:
--
--- Keep certain kind of allocations separate from others.
--- Enforce particular, fixed size of Vulkan memory blocks.
--- Limit maximum amount of Vulkan memory allocated for that pool.
--- Reserve minimum or fixed amount of Vulkan memory always preallocated for that pool.
--- Use extra parameters for a set of your allocations that are available in #VmaPoolCreateInfo but not in
-- #VmaAllocationCreateInfo - e.g., custom minimum alignment, custom `pNext` chain.
--- Perform defragmentation on a specific subset of your allocations.
--
--To use custom memory pools:
--
---# Fill VmaPoolCreateInfo structure.
---# Call vmaCreatePool() to obtain #VmaPool handle.
---# When making an allocation, set VmaAllocationCreateInfo::pool to this handle.
-- You don't need to specify any other parameters of this structure, like `usage`.
--
--Example:
--
--\code
--// Find memoryTypeIndex for the pool.
--VkBufferCreateInfo sampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
--sampleBufCreateInfo.size = 0x10000; // Doesn't matter.
--sampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
--
--VmaAllocationCreateInfo sampleAllocCreateInfo = {};
--sampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
--
--uint32_t memTypeIndex;
--VkResult res = vmaFindMemoryTypeIndexForBufferInfo(allocator,
-- &sampleBufCreateInfo, &sampleAllocCreateInfo, &memTypeIndex);
--// Check res...
--
--// Create a pool that can have at most 2 blocks, 128 MiB each.
--VmaPoolCreateInfo poolCreateInfo = {};
--poolCreateInfo.memoryTypeIndex = memTypeIndex;
--poolCreateInfo.blockSize = 128ull * 1024 * 1024;
--poolCreateInfo.maxBlockCount = 2;
--
--VmaPool pool;
--res = vmaCreatePool(allocator, &poolCreateInfo, &pool);
--// Check res...
--
--// Allocate a buffer out of it.
--VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
--bufCreateInfo.size = 1024;
--bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
--
--VmaAllocationCreateInfo allocCreateInfo = {};
--allocCreateInfo.pool = pool;
--
--VkBuffer buf;
--VmaAllocation alloc;
--res = vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, nullptr);
--// Check res...
--\endcode
--
--You have to free all allocations made from this pool before destroying it.
--
--\code
--vmaDestroyBuffer(allocator, buf, alloc);
--vmaDestroyPool(allocator, pool);
--\endcode
--
--New versions of this library support creating dedicated allocations in custom pools.
--It is supported only when VmaPoolCreateInfo::blockSize = 0.
--To use this feature, set VmaAllocationCreateInfo::pool to the pointer to your custom pool and
--VmaAllocationCreateInfo::flags to #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
--
--\note Excessive use of custom pools is a common mistake when using this library.
--Custom pools may be useful for special purposes - when you want to
--keep certain type of resources separate e.g. to reserve minimum amount of memory
--for them or limit maximum amount of memory they can occupy. For most
--resources this is not needed and so it is not recommended to create #VmaPool
--objects and allocations out of them. Allocating from the default pool is sufficient.
--
--
--\section custom_memory_pools_MemTypeIndex Choosing memory type index
--
--When creating a pool, you must explicitly specify memory type index.
--To find the one suitable for your buffers or images, you can use helper functions
--vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo().
--You need to provide structures with example parameters of buffers or images
--that you are going to create in that pool.
--
--\code
--VkBufferCreateInfo exampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
--exampleBufCreateInfo.size = 1024; // Doesn't matter
--exampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
--
--VmaAllocationCreateInfo allocCreateInfo = {};
--allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
--
--uint32_t memTypeIndex;
--vmaFindMemoryTypeIndexForBufferInfo(allocator, &exampleBufCreateInfo, &allocCreateInfo, &memTypeIndex);
--
--VmaPoolCreateInfo poolCreateInfo = {};
--poolCreateInfo.memoryTypeIndex = memTypeIndex;
--// ...
--\endcode
--
--When creating buffers/images allocated in that pool, provide following parameters:
--
--- `VkBufferCreateInfo`: Prefer to pass same parameters as above.
-- Otherwise you risk creating resources in a memory type that is not suitable for them, which may result in undefined behavior.
-- Using different `VK_BUFFER_USAGE_` flags may work, but you shouldn't create images in a pool intended for buffers
-- or the other way around.
--- VmaAllocationCreateInfo: You don't need to pass same parameters. Fill only `pool` member.
-- Other members are ignored anyway.
--
--\section linear_algorithm Linear allocation algorithm
--
--Each Vulkan memory block managed by this library has accompanying metadata that
--keeps track of used and unused regions. By default, the metadata structure and
--algorithm tries to find best place for new allocations among free regions to
--optimize memory usage. This way you can allocate and free objects in any order.
--
--![Default allocation algorithm](../gfx/Linear_allocator_1_algo_default.png)
--
--Sometimes there is a need to use simpler, linear allocation algorithm. You can
--create custom pool that uses such algorithm by adding flag
--#VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT to VmaPoolCreateInfo::flags while creating
--#VmaPool object. Then an alternative metadata management is used. It always
--creates new allocations after last one and doesn't reuse free regions after
--allocations freed in the middle. It results in better allocation performance and
--less memory consumed by metadata.
--
--![Linear allocation algorithm](../gfx/Linear_allocator_2_algo_linear.png)
--
--With this one flag, you can create a custom pool that can be used in many ways:
--free-at-once, stack, double stack, and ring buffer. See below for details.
--You don't need to specify explicitly which of these options you are going to use - it is detected automatically.
--
--\subsection linear_algorithm_free_at_once Free-at-once
--
--In a pool that uses linear algorithm, you still need to free all the allocations
--individually, e.g. by using vmaFreeMemory() or vmaDestroyBuffer(). You can free
--them in any order. New allocations are always made after last one - free space
--in the middle is not reused. However, when you release all the allocation and
--the pool becomes empty, allocation starts from the beginning again. This way you
--can use linear algorithm to speed up creation of allocations that you are going
--to release all at once.
--
--![Free-at-once](../gfx/Linear_allocator_3_free_at_once.png)
--
--This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount
--value that allows multiple memory blocks.
--
--\subsection linear_algorithm_stack Stack
--
--When you free an allocation that was created last, its space can be reused.
--Thanks to this, if you always release allocations in the order opposite to their
--creation (LIFO - Last In First Out), you can achieve behavior of a stack.
--
--![Stack](../gfx/Linear_allocator_4_stack.png)
--
--This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount
--value that allows multiple memory blocks.
--
--\subsection linear_algorithm_double_stack Double stack
--
--The space reserved by a custom pool with linear algorithm may be used by two
--stacks:
--
--- First, default one, growing up from offset 0.
--- Second, "upper" one, growing down from the end towards lower offsets.
--
--To make allocation from the upper stack, add flag #VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
--to VmaAllocationCreateInfo::flags.
--
--![Double stack](../gfx/Linear_allocator_7_double_stack.png)
--
--Double stack is available only in pools with one memory block -
--VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined.
--
--When the two stacks' ends meet so there is not enough space between them for a
--new allocation, such allocation fails with usual
--`VK_ERROR_OUT_OF_DEVICE_MEMORY` error.
--
--\subsection linear_algorithm_ring_buffer Ring buffer
--
--When you free some allocations from the beginning and there is not enough free space
--for a new one at the end of a pool, allocator's "cursor" wraps around to the
--beginning and starts allocation there. Thanks to this, if you always release
--allocations in the same order as you created them (FIFO - First In First Out),
--you can achieve behavior of a ring buffer / queue.
--
--![Ring buffer](../gfx/Linear_allocator_5_ring_buffer.png)
--
--Ring buffer is available only in pools with one memory block -
--VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined.
--
--\note \ref defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT.
--
--
--\page defragmentation Defragmentation
--
--Interleaved allocations and deallocations of many objects of varying size can
--cause fragmentation over time, which can lead to a situation where the library is unable
--to find a continuous range of free memory for a new allocation despite there is
--enough free space, just scattered across many small free ranges between existing
--allocations.
--
--To mitigate this problem, you can use defragmentation feature.
--It doesn't happen automatically though and needs your cooperation,
--because VMA is a low level library that only allocates memory.
--It cannot recreate buffers and images in a new place as it doesn't remember the contents of `VkBufferCreateInfo` / `VkImageCreateInfo` structures.
--It cannot copy their contents as it doesn't record any commands to a command buffer.
--
--Example:
--
--\code
--VmaDefragmentationInfo defragInfo = {};
--defragInfo.pool = myPool;
--defragInfo.flags = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT;
--
--VmaDefragmentationContext defragCtx;
--VkResult res = vmaBeginDefragmentation(allocator, &defragInfo, &defragCtx);
--// Check res...
--
--for(;;)
--{
-- VmaDefragmentationPassMoveInfo pass;
-- res = vmaBeginDefragmentationPass(allocator, defragCtx, &pass);
-- if(res == VK_SUCCESS)
-- break;
-- else if(res != VK_INCOMPLETE)
-- // Handle error...
--
-- for(uint32_t i = 0; i < pass.moveCount; ++i)
-- {
-- // Inspect pass.pMoves[i].srcAllocation, identify what buffer/image it represents.
-- VmaAllocationInfo allocInfo;
-- vmaGetAllocationInfo(allocator, pass.pMoves[i].srcAllocation, &allocInfo);
-- MyEngineResourceData* resData = (MyEngineResourceData*)allocInfo.pUserData;
--
-- // Recreate and bind this buffer/image at: pass.pMoves[i].dstMemory, pass.pMoves[i].dstOffset.
-- VkImageCreateInfo imgCreateInfo = ...
-- VkImage newImg;
-- res = vkCreateImage(device, &imgCreateInfo, nullptr, &newImg);
-- // Check res...
-- res = vmaBindImageMemory(allocator, pass.pMoves[i].dstTmpAllocation, newImg);
-- // Check res...
--
-- // Issue a vkCmdCopyBuffer/vkCmdCopyImage to copy its content to the new place.
-- vkCmdCopyImage(cmdBuf, resData->img, ..., newImg, ...);
-- }
--
-- // Make sure the copy commands finished executing.
-- vkWaitForFences(...);
--
-- // Destroy old buffers/images bound with pass.pMoves[i].srcAllocation.
-- for(uint32_t i = 0; i < pass.moveCount; ++i)
-- {
-- // ...
-- vkDestroyImage(device, resData->img, nullptr);
-- }
--
-- // Update appropriate descriptors to point to the new places...
--
-- res = vmaEndDefragmentationPass(allocator, defragCtx, &pass);
-- if(res == VK_SUCCESS)
-- break;
-- else if(res != VK_INCOMPLETE)
-- // Handle error...
--}
--
--vmaEndDefragmentation(allocator, defragCtx, nullptr);
--\endcode
--
--Although functions like vmaCreateBuffer(), vmaCreateImage(), vmaDestroyBuffer(), vmaDestroyImage()
--create/destroy an allocation and a buffer/image at once, these are just a shortcut for
--creating the resource, allocating memory, and binding them together.
--Defragmentation works on memory allocations only. You must handle the rest manually.
--Defragmentation is an iterative process that should repreat "passes" as long as related functions
--return `VK_INCOMPLETE` not `VK_SUCCESS`.
--In each pass:
--
--1. vmaBeginDefragmentationPass() function call:
-- - Calculates and returns the list of allocations to be moved in this pass.
-- Note this can be a time-consuming process.
-- - Reserves destination memory for them by creating temporary destination allocations
-- that you can query for their `VkDeviceMemory` + offset using vmaGetAllocationInfo().
--2. Inside the pass, **you should**:
-- - Inspect the returned list of allocations to be moved.
-- - Create new buffers/images and bind them at the returned destination temporary allocations.
-- - Copy data from source to destination resources if necessary.
-- - Destroy the source buffers/images, but NOT their allocations.
--3. vmaEndDefragmentationPass() function call:
-- - Frees the source memory reserved for the allocations that are moved.
-- - Modifies source #VmaAllocation objects that are moved to point to the destination reserved memory.
-- - Frees `VkDeviceMemory` blocks that became empty.
--
--Unlike in previous iterations of the defragmentation API, there is no list of "movable" allocations passed as a parameter.
--Defragmentation algorithm tries to move all suitable allocations.
--You can, however, refuse to move some of them inside a defragmentation pass, by setting
--`pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE.
--This is not recommended and may result in suboptimal packing of the allocations after defragmentation.
--If you cannot ensure any allocation can be moved, it is better to keep movable allocations separate in a custom pool.
--
--Inside a pass, for each allocation that should be moved:
--
--- You should copy its data from the source to the destination place by calling e.g. `vkCmdCopyBuffer()`, `vkCmdCopyImage()`.
-- - You need to make sure these commands finished executing before destroying the source buffers/images and before calling vmaEndDefragmentationPass().
--- If a resource doesn't contain any meaningful data, e.g. it is a transient color attachment image to be cleared,
-- filled, and used temporarily in each rendering frame, you can just recreate this image
-- without copying its data.
--- If the resource is in `HOST_VISIBLE` and `HOST_CACHED` memory, you can copy its data on the CPU
-- using `memcpy()`.
--- If you cannot move the allocation, you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE.
-- This will cancel the move.
-- - vmaEndDefragmentationPass() will then free the destination memory
-- not the source memory of the allocation, leaving it unchanged.
--- If you decide the allocation is unimportant and can be destroyed instead of moved (e.g. it wasn't used for long time),
-- you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY.
-- - vmaEndDefragmentationPass() will then free both source and destination memory, and will destroy the source #VmaAllocation object.
--
--You can defragment a specific custom pool by setting VmaDefragmentationInfo::pool
--(like in the example above) or all the default pools by setting this member to null.
--
--Defragmentation is always performed in each pool separately.
--Allocations are never moved between different Vulkan memory types.
--The size of the destination memory reserved for a moved allocation is the same as the original one.
--Alignment of an allocation as it was determined using `vkGetBufferMemoryRequirements()` etc. is also respected after defragmentation.
--Buffers/images should be recreated with the same `VkBufferCreateInfo` / `VkImageCreateInfo` parameters as the original ones.
--
--You can perform the defragmentation incrementally to limit the number of allocations and bytes to be moved
--in each pass, e.g. to call it in sync with render frames and not to experience too big hitches.
--See members: VmaDefragmentationInfo::maxBytesPerPass, VmaDefragmentationInfo::maxAllocationsPerPass.
--
--It is also safe to perform the defragmentation asynchronously to render frames and other Vulkan and VMA
--usage, possibly from multiple threads, with the exception that allocations
--returned in VmaDefragmentationPassMoveInfo::pMoves shouldn't be destroyed until the defragmentation pass is ended.
--
--<b>Mapping</b> is preserved on allocations that are moved during defragmentation.
--Whether through #VMA_ALLOCATION_CREATE_MAPPED_BIT or vmaMapMemory(), the allocations
--are mapped at their new place. Of course, pointer to the mapped data changes, so it needs to be queried
--using VmaAllocationInfo::pMappedData.
--
--\note Defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT.
--
--
--\page statistics Statistics
--
--This library contains several functions that return information about its internal state,
--especially the amount of memory allocated from Vulkan.
--
--\section statistics_numeric_statistics Numeric statistics
--
--If you need to obtain basic statistics about memory usage per heap, together with current budget,
--you can call function vmaGetHeapBudgets() and inspect structure #VmaBudget.
--This is useful to keep track of memory usage and stay within budget
--(see also \ref staying_within_budget).
--Example:
--
--\code
--uint32_t heapIndex = ...
--
--VmaBudget budgets[VK_MAX_MEMORY_HEAPS];
--vmaGetHeapBudgets(allocator, budgets);
--
--printf("My heap currently has %u allocations taking %llu B,\n",
-- budgets[heapIndex].statistics.allocationCount,
-- budgets[heapIndex].statistics.allocationBytes);
--printf("allocated out of %u Vulkan device memory blocks taking %llu B,\n",
-- budgets[heapIndex].statistics.blockCount,
-- budgets[heapIndex].statistics.blockBytes);
--printf("Vulkan reports total usage %llu B with budget %llu B.\n",
-- budgets[heapIndex].usage,
-- budgets[heapIndex].budget);
--\endcode
--
--You can query for more detailed statistics per memory heap, type, and totals,
--including minimum and maximum allocation size and unused range size,
--by calling function vmaCalculateStatistics() and inspecting structure #VmaTotalStatistics.
--This function is slower though, as it has to traverse all the internal data structures,
--so it should be used only for debugging purposes.
--
--You can query for statistics of a custom pool using function vmaGetPoolStatistics()
--or vmaCalculatePoolStatistics().
--
--You can query for information about a specific allocation using function vmaGetAllocationInfo().
--It fill structure #VmaAllocationInfo.
--
--\section statistics_json_dump JSON dump
--
--You can dump internal state of the allocator to a string in JSON format using function vmaBuildStatsString().
--The result is guaranteed to be correct JSON.
--It uses ANSI encoding.
--Any strings provided by user (see [Allocation names](@ref allocation_names))
--are copied as-is and properly escaped for JSON, so if they use UTF-8, ISO-8859-2 or any other encoding,
--this JSON string can be treated as using this encoding.
--It must be freed using function vmaFreeStatsString().
--
--The format of this JSON string is not part of official documentation of the library,
--but it will not change in backward-incompatible way without increasing library major version number
--and appropriate mention in changelog.
--
--The JSON string contains all the data that can be obtained using vmaCalculateStatistics().
--It can also contain detailed map of allocated memory blocks and their regions -
--free and occupied by allocations.
--This allows e.g. to visualize the memory or assess fragmentation.
--
--
--\page allocation_annotation Allocation names and user data
--
--\section allocation_user_data Allocation user data
--
--You can annotate allocations with your own information, e.g. for debugging purposes.
--To do that, fill VmaAllocationCreateInfo::pUserData field when creating
--an allocation. It is an opaque `void*` pointer. You can use it e.g. as a pointer,
--some handle, index, key, ordinal number or any other value that would associate
--the allocation with your custom metadata.
--It is useful to identify appropriate data structures in your engine given #VmaAllocation,
--e.g. when doing \ref defragmentation.
--
--\code
--VkBufferCreateInfo bufCreateInfo = ...
--
--MyBufferMetadata* pMetadata = CreateBufferMetadata();
--
--VmaAllocationCreateInfo allocCreateInfo = {};
--allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
--allocCreateInfo.pUserData = pMetadata;
--
--VkBuffer buffer;
--VmaAllocation allocation;
--vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buffer, &allocation, nullptr);
--\endcode
--
--The pointer may be later retrieved as VmaAllocationInfo::pUserData:
--
--\code
--VmaAllocationInfo allocInfo;
--vmaGetAllocationInfo(allocator, allocation, &allocInfo);
--MyBufferMetadata* pMetadata = (MyBufferMetadata*)allocInfo.pUserData;
--\endcode
--
--It can also be changed using function vmaSetAllocationUserData().
--
--Values of (non-zero) allocations' `pUserData` are printed in JSON report created by
--vmaBuildStatsString() in hexadecimal form.
--
--\section allocation_names Allocation names
--
--An allocation can also carry a null-terminated string, giving a name to the allocation.
--To set it, call vmaSetAllocationName().
--The library creates internal copy of the string, so the pointer you pass doesn't need
--to be valid for whole lifetime of the allocation. You can free it after the call.
--
--\code
--std::string imageName = "Texture: ";
--imageName += fileName;
--vmaSetAllocationName(allocator, allocation, imageName.c_str());
--\endcode
--
--The string can be later retrieved by inspecting VmaAllocationInfo::pName.
--It is also printed in JSON report created by vmaBuildStatsString().
--
--\note Setting string name to VMA allocation doesn't automatically set it to the Vulkan buffer or image created with it.
--You must do it manually using an extension like VK_EXT_debug_utils, which is independent of this library.
--
--
--\page virtual_allocator Virtual allocator
--
--As an extra feature, the core allocation algorithm of the library is exposed through a simple and convenient API of "virtual allocator".
--It doesn't allocate any real GPU memory. It just keeps track of used and free regions of a "virtual block".
--You can use it to allocate your own memory or other objects, even completely unrelated to Vulkan.
--A common use case is sub-allocation of pieces of one large GPU buffer.
--
--\section virtual_allocator_creating_virtual_block Creating virtual block
--
--To use this functionality, there is no main "allocator" object.
--You don't need to have #VmaAllocator object created.
--All you need to do is to create a separate #VmaVirtualBlock object for each block of memory you want to be managed by the allocator:
--
---# Fill in #VmaVirtualBlockCreateInfo structure.
---# Call vmaCreateVirtualBlock(). Get new #VmaVirtualBlock object.
--
--Example:
--
--\code
--VmaVirtualBlockCreateInfo blockCreateInfo = {};
--blockCreateInfo.size = 1048576; // 1 MB
--
--VmaVirtualBlock block;
--VkResult res = vmaCreateVirtualBlock(&blockCreateInfo, &block);
--\endcode
--
--\section virtual_allocator_making_virtual_allocations Making virtual allocations
--
--#VmaVirtualBlock object contains internal data structure that keeps track of free and occupied regions
--using the same code as the main Vulkan memory allocator.
--Similarly to #VmaAllocation for standard GPU allocations, there is #VmaVirtualAllocation type
--that represents an opaque handle to an allocation within the virtual block.
--
--In order to make such allocation:
--
---# Fill in #VmaVirtualAllocationCreateInfo structure.
---# Call vmaVirtualAllocate(). Get new #VmaVirtualAllocation object that represents the allocation.
-- You can also receive `VkDeviceSize offset` that was assigned to the allocation.
--
--Example:
--
--\code
--VmaVirtualAllocationCreateInfo allocCreateInfo = {};
--allocCreateInfo.size = 4096; // 4 KB
--
--VmaVirtualAllocation alloc;
--VkDeviceSize offset;
--res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, &offset);
--if(res == VK_SUCCESS)
--{
-- // Use the 4 KB of your memory starting at offset.
--}
--else
--{
-- // Allocation failed - no space for it could be found. Handle this error!
--}
--\endcode
--
--\section virtual_allocator_deallocation Deallocation
--
--When no longer needed, an allocation can be freed by calling vmaVirtualFree().
--You can only pass to this function an allocation that was previously returned by vmaVirtualAllocate()
--called for the same #VmaVirtualBlock.
--
--When whole block is no longer needed, the block object can be released by calling vmaDestroyVirtualBlock().
--All allocations must be freed before the block is destroyed, which is checked internally by an assert.
--However, if you don't want to call vmaVirtualFree() for each allocation, you can use vmaClearVirtualBlock() to free them all at once -
--a feature not available in normal Vulkan memory allocator. Example:
--
--\code
--vmaVirtualFree(block, alloc);
--vmaDestroyVirtualBlock(block);
--\endcode
--
--\section virtual_allocator_allocation_parameters Allocation parameters
--
--You can attach a custom pointer to each allocation by using vmaSetVirtualAllocationUserData().
--Its default value is null.
--It can be used to store any data that needs to be associated with that allocation - e.g. an index, a handle, or a pointer to some
--larger data structure containing more information. Example:
--
--\code
--struct CustomAllocData
--{
-- std::string m_AllocName;
--};
--CustomAllocData* allocData = new CustomAllocData();
--allocData->m_AllocName = "My allocation 1";
--vmaSetVirtualAllocationUserData(block, alloc, allocData);
--\endcode
--
--The pointer can later be fetched, along with allocation offset and size, by passing the allocation handle to function
--vmaGetVirtualAllocationInfo() and inspecting returned structure #VmaVirtualAllocationInfo.
--If you allocated a new object to be used as the custom pointer, don't forget to delete that object before freeing the allocation!
--Example:
--
--\code
--VmaVirtualAllocationInfo allocInfo;
--vmaGetVirtualAllocationInfo(block, alloc, &allocInfo);
--delete (CustomAllocData*)allocInfo.pUserData;
--
--vmaVirtualFree(block, alloc);
--\endcode
--
--\section virtual_allocator_alignment_and_units Alignment and units
--
--It feels natural to express sizes and offsets in bytes.
--If an offset of an allocation needs to be aligned to a multiply of some number (e.g. 4 bytes), you can fill optional member
--VmaVirtualAllocationCreateInfo::alignment to request it. Example:
--
--\code
--VmaVirtualAllocationCreateInfo allocCreateInfo = {};
--allocCreateInfo.size = 4096; // 4 KB
--allocCreateInfo.alignment = 4; // Returned offset must be a multiply of 4 B
--
--VmaVirtualAllocation alloc;
--res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, nullptr);
--\endcode
--
--Alignments of different allocations made from one block may vary.
--However, if all alignments and sizes are always multiply of some size e.g. 4 B or `sizeof(MyDataStruct)`,
--you can express all sizes, alignments, and offsets in multiples of that size instead of individual bytes.
--It might be more convenient, but you need to make sure to use this new unit consistently in all the places:
--
--- VmaVirtualBlockCreateInfo::size
--- VmaVirtualAllocationCreateInfo::size and VmaVirtualAllocationCreateInfo::alignment
--- Using offset returned by vmaVirtualAllocate() or in VmaVirtualAllocationInfo::offset
--
--\section virtual_allocator_statistics Statistics
--
--You can obtain statistics of a virtual block using vmaGetVirtualBlockStatistics()
--(to get brief statistics that are fast to calculate)
--or vmaCalculateVirtualBlockStatistics() (to get more detailed statistics, slower to calculate).
--The functions fill structures #VmaStatistics, #VmaDetailedStatistics respectively - same as used by the normal Vulkan memory allocator.
--Example:
--
--\code
--VmaStatistics stats;
--vmaGetVirtualBlockStatistics(block, &stats);
--printf("My virtual block has %llu bytes used by %u virtual allocations\n",
-- stats.allocationBytes, stats.allocationCount);
--\endcode
--
--You can also request a full list of allocations and free regions as a string in JSON format by calling
--vmaBuildVirtualBlockStatsString().
--Returned string must be later freed using vmaFreeVirtualBlockStatsString().
--The format of this string differs from the one returned by the main Vulkan allocator, but it is similar.
--
--\section virtual_allocator_additional_considerations Additional considerations
--
--The "virtual allocator" functionality is implemented on a level of individual memory blocks.
--Keeping track of a whole collection of blocks, allocating new ones when out of free space,
--deleting empty ones, and deciding which one to try first for a new allocation must be implemented by the user.
--
--Alternative allocation algorithms are supported, just like in custom pools of the real GPU memory.
--See enum #VmaVirtualBlockCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT).
--You can find their description in chapter \ref custom_memory_pools.
--Allocation strategies are also supported.
--See enum #VmaVirtualAllocationCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT).
--
--Following features are supported only by the allocator of the real GPU memory and not by virtual allocations:
--buffer-image granularity, `VMA_DEBUG_MARGIN`, `VMA_MIN_ALIGNMENT`.
--
--
--\page debugging_memory_usage Debugging incorrect memory usage
--
--If you suspect a bug with memory usage, like usage of uninitialized memory or
--memory being overwritten out of bounds of an allocation,
--you can use debug features of this library to verify this.
--
--\section debugging_memory_usage_initialization Memory initialization
--
--If you experience a bug with incorrect and nondeterministic data in your program and you suspect uninitialized memory to be used,
--you can enable automatic memory initialization to verify this.
--To do it, define macro `VMA_DEBUG_INITIALIZE_ALLOCATIONS` to 1.
--
--\code
--#define VMA_DEBUG_INITIALIZE_ALLOCATIONS 1
--#include "vk_mem_alloc.h"
--\endcode
--
--It makes memory of new allocations initialized to bit pattern `0xDCDCDCDC`.
--Before an allocation is destroyed, its memory is filled with bit pattern `0xEFEFEFEF`.
--Memory is automatically mapped and unmapped if necessary.
--
--If you find these values while debugging your program, good chances are that you incorrectly
--read Vulkan memory that is allocated but not initialized, or already freed, respectively.
--
--Memory initialization works only with memory types that are `HOST_VISIBLE` and with allocations that can be mapped.
--It works also with dedicated allocations.
--
--\section debugging_memory_usage_margins Margins
--
--By default, allocations are laid out in memory blocks next to each other if possible
--(considering required alignment, `bufferImageGranularity`, and `nonCoherentAtomSize`).
--
--![Allocations without margin](../gfx/Margins_1.png)
--
--Define macro `VMA_DEBUG_MARGIN` to some non-zero value (e.g. 16) to enforce specified
--number of bytes as a margin after every allocation.
--
--\code
--#define VMA_DEBUG_MARGIN 16
--#include "vk_mem_alloc.h"
--\endcode
--
--![Allocations with margin](../gfx/Margins_2.png)
--
--If your bug goes away after enabling margins, it means it may be caused by memory
--being overwritten outside of allocation boundaries. It is not 100% certain though.
--Change in application behavior may also be caused by different order and distribution
--of allocations across memory blocks after margins are applied.
--
--Margins work with all types of memory.
--
--Margin is applied only to allocations made out of memory blocks and not to dedicated
--allocations, which have their own memory block of specific size.
--It is thus not applied to allocations made using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT flag
--or those automatically decided to put into dedicated allocations, e.g. due to its
--large size or recommended by VK_KHR_dedicated_allocation extension.
--
--Margins appear in [JSON dump](@ref statistics_json_dump) as part of free space.
--
--Note that enabling margins increases memory usage and fragmentation.
--
--Margins do not apply to \ref virtual_allocator.
--
--\section debugging_memory_usage_corruption_detection Corruption detection
--
--You can additionally define macro `VMA_DEBUG_DETECT_CORRUPTION` to 1 to enable validation
--of contents of the margins.
--
--\code
--#define VMA_DEBUG_MARGIN 16
--#define VMA_DEBUG_DETECT_CORRUPTION 1
--#include "vk_mem_alloc.h"
--\endcode
--
--When this feature is enabled, number of bytes specified as `VMA_DEBUG_MARGIN`
--(it must be multiply of 4) after every allocation is filled with a magic number.
--This idea is also know as "canary".
--Memory is automatically mapped and unmapped if necessary.
--
--This number is validated automatically when the allocation is destroyed.
--If it is not equal to the expected value, `VMA_ASSERT()` is executed.
--It clearly means that either CPU or GPU overwritten the memory outside of boundaries of the allocation,
--which indicates a serious bug.
--
--You can also explicitly request checking margins of all allocations in all memory blocks
--that belong to specified memory types by using function vmaCheckCorruption(),
--or in memory blocks that belong to specified custom pool, by using function
--vmaCheckPoolCorruption().
--
--Margin validation (corruption detection) works only for memory types that are
--`HOST_VISIBLE` and `HOST_COHERENT`.
--
--
--\page opengl_interop OpenGL Interop
--
--VMA provides some features that help with interoperability with OpenGL.
--
--\section opengl_interop_exporting_memory Exporting memory
--
--If you want to attach `VkExportMemoryAllocateInfoKHR` structure to `pNext` chain of memory allocations made by the library:
--
--It is recommended to create \ref custom_memory_pools for such allocations.
--Define and fill in your `VkExportMemoryAllocateInfoKHR` structure and attach it to VmaPoolCreateInfo::pMemoryAllocateNext
--while creating the custom pool.
--Please note that the structure must remain alive and unchanged for the whole lifetime of the #VmaPool,
--not only while creating it, as no copy of the structure is made,
--but its original pointer is used for each allocation instead.
--
--If you want to export all memory allocated by the library from certain memory types,
--also dedicated allocations or other allocations made from default pools,
--an alternative solution is to fill in VmaAllocatorCreateInfo::pTypeExternalMemoryHandleTypes.
--It should point to an array with `VkExternalMemoryHandleTypeFlagsKHR` to be automatically passed by the library
--through `VkExportMemoryAllocateInfoKHR` on each allocation made from a specific memory type.
--Please note that new versions of the library also support dedicated allocations created in custom pools.
--
--You should not mix these two methods in a way that allows to apply both to the same memory type.
--Otherwise, `VkExportMemoryAllocateInfoKHR` structure would be attached twice to the `pNext` chain of `VkMemoryAllocateInfo`.
--
--
--\section opengl_interop_custom_alignment Custom alignment
--
--Buffers or images exported to a different API like OpenGL may require a different alignment,
--higher than the one used by the library automatically, queried from functions like `vkGetBufferMemoryRequirements`.
--To impose such alignment:
--
--It is recommended to create \ref custom_memory_pools for such allocations.
--Set VmaPoolCreateInfo::minAllocationAlignment member to the minimum alignment required for each allocation
--to be made out of this pool.
--The alignment actually used will be the maximum of this member and the alignment returned for the specific buffer or image
--from a function like `vkGetBufferMemoryRequirements`, which is called by VMA automatically.
--
--If you want to create a buffer with a specific minimum alignment out of default pools,
--use special function vmaCreateBufferWithAlignment(), which takes additional parameter `minAlignment`.
--
--Note the problem of alignment affects only resources placed inside bigger `VkDeviceMemory` blocks and not dedicated
--allocations, as these, by definition, always have alignment = 0 because the resource is bound to the beginning of its dedicated block.
--Contrary to Direct3D 12, Vulkan doesn't have a concept of alignment of the entire memory block passed on its allocation.
--
--
--\page usage_patterns Recommended usage patterns
--
--Vulkan gives great flexibility in memory allocation.
--This chapter shows the most common patterns.
--
--See also slides from talk:
--[Sawicki, Adam. Advanced Graphics Techniques Tutorial: Memory management in Vulkan and DX12. Game Developers Conference, 2018](https://www.gdcvault.com/play/1025458/Advanced-Graphics-Techniques-Tutorial-New)
--
--
--\section usage_patterns_gpu_only GPU-only resource
--
--<b>When:</b>
--Any resources that you frequently write and read on GPU,
--e.g. images used as color attachments (aka "render targets"), depth-stencil attachments,
--images/buffers used as storage image/buffer (aka "Unordered Access View (UAV)").
--
--<b>What to do:</b>
--Let the library select the optimal memory type, which will likely have `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
--
--\code
--VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
--imgCreateInfo.imageType = VK_IMAGE_TYPE_2D;
--imgCreateInfo.extent.width = 3840;
--imgCreateInfo.extent.height = 2160;
--imgCreateInfo.extent.depth = 1;
--imgCreateInfo.mipLevels = 1;
--imgCreateInfo.arrayLayers = 1;
--imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
--imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
--imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
--imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
--imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
--
--VmaAllocationCreateInfo allocCreateInfo = {};
--allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
--allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
--allocCreateInfo.priority = 1.0f;
--
--VkImage img;
--VmaAllocation alloc;
--vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr);
--\endcode
--
--<b>Also consider:</b>
--Consider creating them as dedicated allocations using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT,
--especially if they are large or if you plan to destroy and recreate them with different sizes
--e.g. when display resolution changes.
--Prefer to create such resources first and all other GPU resources (like textures and vertex buffers) later.
--When VK_EXT_memory_priority extension is enabled, it is also worth setting high priority to such allocation
--to decrease chances to be evicted to system memory by the operating system.
--
--\section usage_patterns_staging_copy_upload Staging copy for upload
--
--<b>When:</b>
--A "staging" buffer than you want to map and fill from CPU code, then use as a source of transfer
--to some GPU resource.
--
--<b>What to do:</b>
--Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT.
--Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`.
--
--\code
--VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
--bufCreateInfo.size = 65536;
--bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
--
--VmaAllocationCreateInfo allocCreateInfo = {};
--allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
--allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
-- VMA_ALLOCATION_CREATE_MAPPED_BIT;
--
--VkBuffer buf;
--VmaAllocation alloc;
--VmaAllocationInfo allocInfo;
--vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
--
--...
--
--memcpy(allocInfo.pMappedData, myData, myDataSize);
--\endcode
--
--<b>Also consider:</b>
--You can map the allocation using vmaMapMemory() or you can create it as persistenly mapped
--using #VMA_ALLOCATION_CREATE_MAPPED_BIT, as in the example above.
--
--
--\section usage_patterns_readback Readback
--
--<b>When:</b>
--Buffers for data written by or transferred from the GPU that you want to read back on the CPU,
--e.g. results of some computations.
--
--<b>What to do:</b>
--Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.
--Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`
--and `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`.
--
--\code
--VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
--bufCreateInfo.size = 65536;
--bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
--
--VmaAllocationCreateInfo allocCreateInfo = {};
--allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
--allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT |
-- VMA_ALLOCATION_CREATE_MAPPED_BIT;
--
--VkBuffer buf;
--VmaAllocation alloc;
--VmaAllocationInfo allocInfo;
--vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
--
--...
--
--const float* downloadedData = (const float*)allocInfo.pMappedData;
--\endcode
--
--
--\section usage_patterns_advanced_data_uploading Advanced data uploading
--
--For resources that you frequently write on CPU via mapped pointer and
--frequently read on GPU e.g. as a uniform buffer (also called "dynamic"), multiple options are possible:
--
---# Easiest solution is to have one copy of the resource in `HOST_VISIBLE` memory,
-- even if it means system RAM (not `DEVICE_LOCAL`) on systems with a discrete graphics card,
-- and make the device reach out to that resource directly.
-- - Reads performed by the device will then go through PCI Express bus.
-- The performance of this access may be limited, but it may be fine depending on the size
-- of this resource (whether it is small enough to quickly end up in GPU cache) and the sparsity
-- of access.
---# On systems with unified memory (e.g. AMD APU or Intel integrated graphics, mobile chips),
-- a memory type may be available that is both `HOST_VISIBLE` (available for mapping) and `DEVICE_LOCAL`
-- (fast to access from the GPU). Then, it is likely the best choice for such type of resource.
---# Systems with a discrete graphics card and separate video memory may or may not expose
-- a memory type that is both `HOST_VISIBLE` and `DEVICE_LOCAL`, also known as Base Address Register (BAR).
-- If they do, it represents a piece of VRAM (or entire VRAM, if ReBAR is enabled in the motherboard BIOS)
-- that is available to CPU for mapping.
-- - Writes performed by the host to that memory go through PCI Express bus.
-- The performance of these writes may be limited, but it may be fine, especially on PCIe 4.0,
-- as long as rules of using uncached and write-combined memory are followed - only sequential writes and no reads.
---# Finally, you may need or prefer to create a separate copy of the resource in `DEVICE_LOCAL` memory,
-- a separate "staging" copy in `HOST_VISIBLE` memory and perform an explicit transfer command between them.
--
--Thankfully, VMA offers an aid to create and use such resources in the the way optimal
--for the current Vulkan device. To help the library make the best choice,
--use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT together with
--#VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT.
--It will then prefer a memory type that is both `DEVICE_LOCAL` and `HOST_VISIBLE` (integrated memory or BAR),
--but if no such memory type is available or allocation from it fails
--(PC graphics cards have only 256 MB of BAR by default, unless ReBAR is supported and enabled in BIOS),
--it will fall back to `DEVICE_LOCAL` memory for fast GPU access.
--It is then up to you to detect that the allocation ended up in a memory type that is not `HOST_VISIBLE`,
--so you need to create another "staging" allocation and perform explicit transfers.
--
--\code
--VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
--bufCreateInfo.size = 65536;
--bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
--
--VmaAllocationCreateInfo allocCreateInfo = {};
--allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
--allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
-- VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT |
-- VMA_ALLOCATION_CREATE_MAPPED_BIT;
--
--VkBuffer buf;
--VmaAllocation alloc;
--VmaAllocationInfo allocInfo;
--vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
--
--VkMemoryPropertyFlags memPropFlags;
--vmaGetAllocationMemoryProperties(allocator, alloc, &memPropFlags);
--
--if(memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
--{
-- // Allocation ended up in a mappable memory and is already mapped - write to it directly.
--
-- // [Executed in runtime]:
-- memcpy(allocInfo.pMappedData, myData, myDataSize);
--}
--else
--{
-- // Allocation ended up in a non-mappable memory - need to transfer.
-- VkBufferCreateInfo stagingBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
-- stagingBufCreateInfo.size = 65536;
-- stagingBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
--
-- VmaAllocationCreateInfo stagingAllocCreateInfo = {};
-- stagingAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
-- stagingAllocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
-- VMA_ALLOCATION_CREATE_MAPPED_BIT;
--
-- VkBuffer stagingBuf;
-- VmaAllocation stagingAlloc;
-- VmaAllocationInfo stagingAllocInfo;
-- vmaCreateBuffer(allocator, &stagingBufCreateInfo, &stagingAllocCreateInfo,
-- &stagingBuf, &stagingAlloc, stagingAllocInfo);
--
-- // [Executed in runtime]:
-- memcpy(stagingAllocInfo.pMappedData, myData, myDataSize);
-- vmaFlushAllocation(allocator, stagingAlloc, 0, VK_WHOLE_SIZE);
-- //vkCmdPipelineBarrier: VK_ACCESS_HOST_WRITE_BIT --> VK_ACCESS_TRANSFER_READ_BIT
-- VkBufferCopy bufCopy = {
-- 0, // srcOffset
-- 0, // dstOffset,
-- myDataSize); // size
-- vkCmdCopyBuffer(cmdBuf, stagingBuf, buf, 1, &bufCopy);
--}
--\endcode
--
--\section usage_patterns_other_use_cases Other use cases
--
--Here are some other, less obvious use cases and their recommended settings:
--
--- An image that is used only as transfer source and destination, but it should stay on the device,
-- as it is used to temporarily store a copy of some texture, e.g. from the current to the next frame,
-- for temporal antialiasing or other temporal effects.
-- - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT`
-- - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO
--- An image that is used only as transfer source and destination, but it should be placed
-- in the system RAM despite it doesn't need to be mapped, because it serves as a "swap" copy to evict
-- least recently used textures from VRAM.
-- - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT`
-- - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_HOST,
-- as VMA needs a hint here to differentiate from the previous case.
--- A buffer that you want to map and write from the CPU, directly read from the GPU
-- (e.g. as a uniform or vertex buffer), but you have a clear preference to place it in device or
-- host memory due to its large size.
-- - Use `VkBufferCreateInfo::usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT`
-- - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST
-- - Use VmaAllocationCreateInfo::flags = #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT
--
--
--\page configuration Configuration
--
--Please check "CONFIGURATION SECTION" in the code to find macros that you can define
--before each include of this file or change directly in this file to provide
--your own implementation of basic facilities like assert, `min()` and `max()` functions,
--mutex, atomic etc.
--The library uses its own implementation of containers by default, but you can switch to using
--STL containers instead.
--
--For example, define `VMA_ASSERT(expr)` before including the library to provide
--custom implementation of the assertion, compatible with your project.
--By default it is defined to standard C `assert(expr)` in `_DEBUG` configuration
--and empty otherwise.
--
--\section config_Vulkan_functions Pointers to Vulkan functions
--
--There are multiple ways to import pointers to Vulkan functions in the library.
--In the simplest case you don't need to do anything.
--If the compilation or linking of your program or the initialization of the #VmaAllocator
--doesn't work for you, you can try to reconfigure it.
--
--First, the allocator tries to fetch pointers to Vulkan functions linked statically,
--like this:
--
--\code
--m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
--\endcode
--
--If you want to disable this feature, set configuration macro: `#define VMA_STATIC_VULKAN_FUNCTIONS 0`.
--
--Second, you can provide the pointers yourself by setting member VmaAllocatorCreateInfo::pVulkanFunctions.
--You can fetch them e.g. using functions `vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` or
--by using a helper library like [volk](https://github.com/zeux/volk).
--
--Third, VMA tries to fetch remaining pointers that are still null by calling
--`vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` on its own.
--You need to only fill in VmaVulkanFunctions::vkGetInstanceProcAddr and VmaVulkanFunctions::vkGetDeviceProcAddr.
--Other pointers will be fetched automatically.
--If you want to disable this feature, set configuration macro: `#define VMA_DYNAMIC_VULKAN_FUNCTIONS 0`.
--
--Finally, all the function pointers required by the library (considering selected
--Vulkan version and enabled extensions) are checked with `VMA_ASSERT` if they are not null.
--
--
--\section custom_memory_allocator Custom host memory allocator
--
--If you use custom allocator for CPU memory rather than default operator `new`
--and `delete` from C++, you can make this library using your allocator as well
--by filling optional member VmaAllocatorCreateInfo::pAllocationCallbacks. These
--functions will be passed to Vulkan, as well as used by the library itself to
--make any CPU-side allocations.
--
--\section allocation_callbacks Device memory allocation callbacks
--
--The library makes calls to `vkAllocateMemory()` and `vkFreeMemory()` internally.
--You can setup callbacks to be informed about these calls, e.g. for the purpose
--of gathering some statistics. To do it, fill optional member
--VmaAllocatorCreateInfo::pDeviceMemoryCallbacks.
--
--\section heap_memory_limit Device heap memory limit
--
--When device memory of certain heap runs out of free space, new allocations may
--fail (returning error code) or they may succeed, silently pushing some existing_
--memory blocks from GPU VRAM to system RAM (which degrades performance). This
--behavior is implementation-dependent - it depends on GPU vendor and graphics
--driver.
--
--On AMD cards it can be controlled while creating Vulkan device object by using
--VK_AMD_memory_overallocation_behavior extension, if available.
--
--Alternatively, if you want to test how your program behaves with limited amount of Vulkan device
--memory available without switching your graphics card to one that really has
--smaller VRAM, you can use a feature of this library intended for this purpose.
--To do it, fill optional member VmaAllocatorCreateInfo::pHeapSizeLimit.
--
--
--
--\page vk_khr_dedicated_allocation VK_KHR_dedicated_allocation
--
--VK_KHR_dedicated_allocation is a Vulkan extension which can be used to improve
--performance on some GPUs. It augments Vulkan API with possibility to query
--driver whether it prefers particular buffer or image to have its own, dedicated
--allocation (separate `VkDeviceMemory` block) for better efficiency - to be able
--to do some internal optimizations. The extension is supported by this library.
--It will be used automatically when enabled.
--
--It has been promoted to core Vulkan 1.1, so if you use eligible Vulkan version
--and inform VMA about it by setting VmaAllocatorCreateInfo::vulkanApiVersion,
--you are all set.
--
--Otherwise, if you want to use it as an extension:
--
--1 . When creating Vulkan device, check if following 2 device extensions are
--supported (call `vkEnumerateDeviceExtensionProperties()`).
--If yes, enable them (fill `VkDeviceCreateInfo::ppEnabledExtensionNames`).
--
--- VK_KHR_get_memory_requirements2
--- VK_KHR_dedicated_allocation
--
--If you enabled these extensions:
--
--2 . Use #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag when creating
--your #VmaAllocator to inform the library that you enabled required extensions
--and you want the library to use them.
--
--\code
--allocatorInfo.flags |= VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT;
--
--vmaCreateAllocator(&allocatorInfo, &allocator);
--\endcode
--
--That is all. The extension will be automatically used whenever you create a
--buffer using vmaCreateBuffer() or image using vmaCreateImage().
--
--When using the extension together with Vulkan Validation Layer, you will receive
--warnings like this:
--
--_vkBindBufferMemory(): Binding memory to buffer 0x33 but vkGetBufferMemoryRequirements() has not been called on that buffer._
--
--It is OK, you should just ignore it. It happens because you use function
--`vkGetBufferMemoryRequirements2KHR()` instead of standard
--`vkGetBufferMemoryRequirements()`, while the validation layer seems to be
--unaware of it.
--
--To learn more about this extension, see:
--
--- [VK_KHR_dedicated_allocation in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap50.html#VK_KHR_dedicated_allocation)
--- [VK_KHR_dedicated_allocation unofficial manual](http://asawicki.info/articles/VK_KHR_dedicated_allocation.php5)
--
--
--
--\page vk_ext_memory_priority VK_EXT_memory_priority
--
--VK_EXT_memory_priority is a device extension that allows to pass additional "priority"
--value to Vulkan memory allocations that the implementation may use prefer certain
--buffers and images that are critical for performance to stay in device-local memory
--in cases when the memory is over-subscribed, while some others may be moved to the system memory.
--
--VMA offers convenient usage of this extension.
--If you enable it, you can pass "priority" parameter when creating allocations or custom pools
--and the library automatically passes the value to Vulkan using this extension.
--
--If you want to use this extension in connection with VMA, follow these steps:
--
--\section vk_ext_memory_priority_initialization Initialization
--
--1) Call `vkEnumerateDeviceExtensionProperties` for the physical device.
--Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_EXT_memory_priority".
--
--2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`.
--Attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to `VkPhysicalDeviceFeatures2::pNext` to be returned.
--Check if the device feature is really supported - check if `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority` is true.
--
--3) While creating device with `vkCreateDevice`, enable this extension - add "VK_EXT_memory_priority"
--to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`.
--
--4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`.
--Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`.
--Enable this device feature - attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to
--`VkPhysicalDeviceFeatures2::pNext` chain and set its member `memoryPriority` to `VK_TRUE`.
--
--5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you
--have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT
--to VmaAllocatorCreateInfo::flags.
--
--\section vk_ext_memory_priority_usage Usage
--
--When using this extension, you should initialize following member:
--
--- VmaAllocationCreateInfo::priority when creating a dedicated allocation with #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
--- VmaPoolCreateInfo::priority when creating a custom pool.
--
--It should be a floating-point value between `0.0f` and `1.0f`, where recommended default is `0.5f`.
--Memory allocated with higher value can be treated by the Vulkan implementation as higher priority
--and so it can have lower chances of being pushed out to system memory, experiencing degraded performance.
--
--It might be a good idea to create performance-critical resources like color-attachment or depth-stencil images
--as dedicated and set high priority to them. For example:
--
--\code
--VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
--imgCreateInfo.imageType = VK_IMAGE_TYPE_2D;
--imgCreateInfo.extent.width = 3840;
--imgCreateInfo.extent.height = 2160;
--imgCreateInfo.extent.depth = 1;
--imgCreateInfo.mipLevels = 1;
--imgCreateInfo.arrayLayers = 1;
--imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
--imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
--imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
--imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
--imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
--
--VmaAllocationCreateInfo allocCreateInfo = {};
--allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
--allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
--allocCreateInfo.priority = 1.0f;
--
--VkImage img;
--VmaAllocation alloc;
--vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr);
--\endcode
--
--`priority` member is ignored in the following situations:
--
--- Allocations created in custom pools: They inherit the priority, along with all other allocation parameters
-- from the parametrs passed in #VmaPoolCreateInfo when the pool was created.
--- Allocations created in default pools: They inherit the priority from the parameters
-- VMA used when creating default pools, which means `priority == 0.5f`.
--
--
--\page vk_amd_device_coherent_memory VK_AMD_device_coherent_memory
--
--VK_AMD_device_coherent_memory is a device extension that enables access to
--additional memory types with `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and
--`VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flag. It is useful mostly for
--allocation of buffers intended for writing "breadcrumb markers" in between passes
--or draw calls, which in turn are useful for debugging GPU crash/hang/TDR cases.
--
--When the extension is available but has not been enabled, Vulkan physical device
--still exposes those memory types, but their usage is forbidden. VMA automatically
--takes care of that - it returns `VK_ERROR_FEATURE_NOT_PRESENT` when an attempt
--to allocate memory of such type is made.
--
--If you want to use this extension in connection with VMA, follow these steps:
--
--\section vk_amd_device_coherent_memory_initialization Initialization
--
--1) Call `vkEnumerateDeviceExtensionProperties` for the physical device.
--Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_AMD_device_coherent_memory".
--
--2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`.
--Attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to `VkPhysicalDeviceFeatures2::pNext` to be returned.
--Check if the device feature is really supported - check if `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true.
--
--3) While creating device with `vkCreateDevice`, enable this extension - add "VK_AMD_device_coherent_memory"
--to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`.
--
--4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`.
--Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`.
--Enable this device feature - attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to
--`VkPhysicalDeviceFeatures2::pNext` and set its member `deviceCoherentMemory` to `VK_TRUE`.
--
--5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you
--have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
--to VmaAllocatorCreateInfo::flags.
--
--\section vk_amd_device_coherent_memory_usage Usage
--
--After following steps described above, you can create VMA allocations and custom pools
--out of the special `DEVICE_COHERENT` and `DEVICE_UNCACHED` memory types on eligible
--devices. There are multiple ways to do it, for example:
--
--- You can request or prefer to allocate out of such memory types by adding
-- `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` to VmaAllocationCreateInfo::requiredFlags
-- or VmaAllocationCreateInfo::preferredFlags. Those flags can be freely mixed with
-- other ways of \ref choosing_memory_type, like setting VmaAllocationCreateInfo::usage.
--- If you manually found memory type index to use for this purpose, force allocation
-- from this specific index by setting VmaAllocationCreateInfo::memoryTypeBits `= 1u << index`.
--
--\section vk_amd_device_coherent_memory_more_information More information
--
--To learn more about this extension, see [VK_AMD_device_coherent_memory in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VK_AMD_device_coherent_memory.html)
--
--Example use of this extension can be found in the code of the sample and test suite
--accompanying this library.
--
--
--\page enabling_buffer_device_address Enabling buffer device address
--
--Device extension VK_KHR_buffer_device_address
--allow to fetch raw GPU pointer to a buffer and pass it for usage in a shader code.
--It has been promoted to core Vulkan 1.2.
--
--If you want to use this feature in connection with VMA, follow these steps:
--
--\section enabling_buffer_device_address_initialization Initialization
--
--1) (For Vulkan version < 1.2) Call `vkEnumerateDeviceExtensionProperties` for the physical device.
--Check if the extension is supported - if returned array of `VkExtensionProperties` contains
--"VK_KHR_buffer_device_address".
--
--2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`.
--Attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to `VkPhysicalDeviceFeatures2::pNext` to be returned.
--Check if the device feature is really supported - check if `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress` is true.
--
--3) (For Vulkan version < 1.2) While creating device with `vkCreateDevice`, enable this extension - add
--"VK_KHR_buffer_device_address" to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`.
--
--4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`.
--Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`.
--Enable this device feature - attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to
--`VkPhysicalDeviceFeatures2::pNext` and set its member `bufferDeviceAddress` to `VK_TRUE`.
--
--5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you
--have enabled this feature - add #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT
--to VmaAllocatorCreateInfo::flags.
--
--\section enabling_buffer_device_address_usage Usage
--
--After following steps described above, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*` using VMA.
--The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT*` to
--allocated memory blocks wherever it might be needed.
--
--Please note that the library supports only `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*`.
--The second part of this functionality related to "capture and replay" is not supported,
--as it is intended for usage in debugging tools like RenderDoc, not in everyday Vulkan usage.
--
--\section enabling_buffer_device_address_more_information More information
--
--To learn more about this extension, see [VK_KHR_buffer_device_address in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap46.html#VK_KHR_buffer_device_address)
--
--Example use of this extension can be found in the code of the sample and test suite
--accompanying this library.
--
--\page general_considerations General considerations
--
--\section general_considerations_thread_safety Thread safety
--
--- The library has no global state, so separate #VmaAllocator objects can be used
-- independently.
-- There should be no need to create multiple such objects though - one per `VkDevice` is enough.
--- By default, all calls to functions that take #VmaAllocator as first parameter
-- are safe to call from multiple threads simultaneously because they are
-- synchronized internally when needed.
-- This includes allocation and deallocation from default memory pool, as well as custom #VmaPool.
--- When the allocator is created with #VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
-- flag, calls to functions that take such #VmaAllocator object must be
-- synchronized externally.
--- Access to a #VmaAllocation object must be externally synchronized. For example,
-- you must not call vmaGetAllocationInfo() and vmaMapMemory() from different
-- threads at the same time if you pass the same #VmaAllocation object to these
-- functions.
--- #VmaVirtualBlock is not safe to be used from multiple threads simultaneously.
--
--\section general_considerations_versioning_and_compatibility Versioning and compatibility
--
--The library uses [**Semantic Versioning**](https://semver.org/),
--which means version numbers follow convention: Major.Minor.Patch (e.g. 2.3.0), where:
--
--- Incremented Patch version means a release is backward- and forward-compatible,
-- introducing only some internal improvements, bug fixes, optimizations etc.
-- or changes that are out of scope of the official API described in this documentation.
--- Incremented Minor version means a release is backward-compatible,
-- so existing code that uses the library should continue to work, while some new
-- symbols could have been added: new structures, functions, new values in existing
-- enums and bit flags, new structure members, but not new function parameters.
--- Incrementing Major version means a release could break some backward compatibility.
--
--All changes between official releases are documented in file "CHANGELOG.md".
--
--\warning Backward compatibility is considered on the level of C++ source code, not binary linkage.
--Adding new members to existing structures is treated as backward compatible if initializing
--the new members to binary zero results in the old behavior.
--You should always fully initialize all library structures to zeros and not rely on their
--exact binary size.
--
--\section general_considerations_validation_layer_warnings Validation layer warnings
--
--When using this library, you can meet following types of warnings issued by
--Vulkan validation layer. They don't necessarily indicate a bug, so you may need
--to just ignore them.
--
--- *vkBindBufferMemory(): Binding memory to buffer 0xeb8e4 but vkGetBufferMemoryRequirements() has not been called on that buffer.*
-- - It happens when VK_KHR_dedicated_allocation extension is enabled.
-- `vkGetBufferMemoryRequirements2KHR` function is used instead, while validation layer seems to be unaware of it.
--- *Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.*
-- - It happens when you map a buffer or image, because the library maps entire
-- `VkDeviceMemory` block, where different types of images and buffers may end
-- up together, especially on GPUs with unified memory like Intel.
--- *Non-linear image 0xebc91 is aliased with linear buffer 0xeb8e4 which may indicate a bug.*
-- - It may happen when you use [defragmentation](@ref defragmentation).
--
--\section general_considerations_allocation_algorithm Allocation algorithm
--
--The library uses following algorithm for allocation, in order:
--
---# Try to find free range of memory in existing blocks.
---# If failed, try to create a new block of `VkDeviceMemory`, with preferred block size.
---# If failed, try to create such block with size / 2, size / 4, size / 8.
---# If failed, try to allocate separate `VkDeviceMemory` for this allocation,
-- just like when you use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
---# If failed, choose other memory type that meets the requirements specified in
-- VmaAllocationCreateInfo and go to point 1.
---# If failed, return `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
--
--\section general_considerations_features_not_supported Features not supported
--
--Features deliberately excluded from the scope of this library:
--
---# **Data transfer.** Uploading (streaming) and downloading data of buffers and images
-- between CPU and GPU memory and related synchronization is responsibility of the user.
-- Defining some "texture" object that would automatically stream its data from a
-- staging copy in CPU memory to GPU memory would rather be a feature of another,
-- higher-level library implemented on top of VMA.
-- VMA doesn't record any commands to a `VkCommandBuffer`. It just allocates memory.
---# **Recreation of buffers and images.** Although the library has functions for
-- buffer and image creation: vmaCreateBuffer(), vmaCreateImage(), you need to
-- recreate these objects yourself after defragmentation. That is because the big
-- structures `VkBufferCreateInfo`, `VkImageCreateInfo` are not stored in
-- #VmaAllocation object.
---# **Handling CPU memory allocation failures.** When dynamically creating small C++
-- objects in CPU memory (not Vulkan memory), allocation failures are not checked
-- and handled gracefully, because that would complicate code significantly and
-- is usually not needed in desktop PC applications anyway.
-- Success of an allocation is just checked with an assert.
---# **Code free of any compiler warnings.** Maintaining the library to compile and
-- work correctly on so many different platforms is hard enough. Being free of
-- any warnings, on any version of any compiler, is simply not feasible.
-- There are many preprocessor macros that make some variables unused, function parameters unreferenced,
-- or conditional expressions constant in some configurations.
-- The code of this library should not be bigger or more complicated just to silence these warnings.
-- It is recommended to disable such warnings instead.
---# This is a C++ library with C interface. **Bindings or ports to any other programming languages** are welcome as external projects but
-- are not going to be included into this repository.
--*/
-+//
-+// Copyright (c) 2017-2022 Advanced Micro Devices, Inc. All rights reserved.
-+//
-+// Permission is hereby granted, free of charge, to any person obtaining a copy
-+// of this software and associated documentation files (the "Software"), to deal
-+// in the Software without restriction, including without limitation the rights
-+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-+// copies of the Software, and to permit persons to whom the Software is
-+// furnished to do so, subject to the following conditions:
-+//
-+// The above copyright notice and this permission notice shall be included in
-+// all copies or substantial portions of the Software.
-+//
-+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-+// THE SOFTWARE.
-+//
-+
-+#ifndef AMD_VULKAN_MEMORY_ALLOCATOR_H
-+#define AMD_VULKAN_MEMORY_ALLOCATOR_H
-+
-+/** \mainpage Vulkan Memory Allocator
-+
-+<b>Version 3.1.0-development</b>
-+
-+Copyright (c) 2017-2022 Advanced Micro Devices, Inc. All rights reserved. \n
-+License: MIT
-+
-+<b>API documentation divided into groups:</b> [Modules](modules.html)
-+
-+\section main_table_of_contents Table of contents
-+
-+- <b>User guide</b>
-+ - \subpage quick_start
-+ - [Project setup](@ref quick_start_project_setup)
-+ - [Initialization](@ref quick_start_initialization)
-+ - [Resource allocation](@ref quick_start_resource_allocation)
-+ - \subpage choosing_memory_type
-+ - [Usage](@ref choosing_memory_type_usage)
-+ - [Required and preferred flags](@ref choosing_memory_type_required_preferred_flags)
-+ - [Explicit memory types](@ref choosing_memory_type_explicit_memory_types)
-+ - [Custom memory pools](@ref choosing_memory_type_custom_memory_pools)
-+ - [Dedicated allocations](@ref choosing_memory_type_dedicated_allocations)
-+ - \subpage memory_mapping
-+ - [Mapping functions](@ref memory_mapping_mapping_functions)
-+ - [Persistently mapped memory](@ref memory_mapping_persistently_mapped_memory)
-+ - [Cache flush and invalidate](@ref memory_mapping_cache_control)
-+ - \subpage staying_within_budget
-+ - [Querying for budget](@ref staying_within_budget_querying_for_budget)
-+ - [Controlling memory usage](@ref staying_within_budget_controlling_memory_usage)
-+ - \subpage resource_aliasing
-+ - \subpage custom_memory_pools
-+ - [Choosing memory type index](@ref custom_memory_pools_MemTypeIndex)
-+ - [Linear allocation algorithm](@ref linear_algorithm)
-+ - [Free-at-once](@ref linear_algorithm_free_at_once)
-+ - [Stack](@ref linear_algorithm_stack)
-+ - [Double stack](@ref linear_algorithm_double_stack)
-+ - [Ring buffer](@ref linear_algorithm_ring_buffer)
-+ - \subpage defragmentation
-+ - \subpage statistics
-+ - [Numeric statistics](@ref statistics_numeric_statistics)
-+ - [JSON dump](@ref statistics_json_dump)
-+ - \subpage allocation_annotation
-+ - [Allocation user data](@ref allocation_user_data)
-+ - [Allocation names](@ref allocation_names)
-+ - \subpage virtual_allocator
-+ - \subpage debugging_memory_usage
-+ - [Memory initialization](@ref debugging_memory_usage_initialization)
-+ - [Margins](@ref debugging_memory_usage_margins)
-+ - [Corruption detection](@ref debugging_memory_usage_corruption_detection)
-+ - \subpage opengl_interop
-+- \subpage usage_patterns
-+ - [GPU-only resource](@ref usage_patterns_gpu_only)
-+ - [Staging copy for upload](@ref usage_patterns_staging_copy_upload)
-+ - [Readback](@ref usage_patterns_readback)
-+ - [Advanced data uploading](@ref usage_patterns_advanced_data_uploading)
-+ - [Other use cases](@ref usage_patterns_other_use_cases)
-+- \subpage configuration
-+ - [Pointers to Vulkan functions](@ref config_Vulkan_functions)
-+ - [Custom host memory allocator](@ref custom_memory_allocator)
-+ - [Device memory allocation callbacks](@ref allocation_callbacks)
-+ - [Device heap memory limit](@ref heap_memory_limit)
-+- <b>Extension support</b>
-+ - \subpage vk_khr_dedicated_allocation
-+ - \subpage enabling_buffer_device_address
-+ - \subpage vk_ext_memory_priority
-+ - \subpage vk_amd_device_coherent_memory
-+- \subpage general_considerations
-+ - [Thread safety](@ref general_considerations_thread_safety)
-+ - [Versioning and compatibility](@ref general_considerations_versioning_and_compatibility)
-+ - [Validation layer warnings](@ref general_considerations_validation_layer_warnings)
-+ - [Allocation algorithm](@ref general_considerations_allocation_algorithm)
-+ - [Features not supported](@ref general_considerations_features_not_supported)
-+
-+\section main_see_also See also
-+
-+- [**Product page on GPUOpen**](https://gpuopen.com/gaming-product/vulkan-memory-allocator/)
-+- [**Source repository on GitHub**](https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator)
-+
-+\defgroup group_init Library initialization
-+
-+\brief API elements related to the initialization and management of the entire library, especially #VmaAllocator object.
-+
-+\defgroup group_alloc Memory allocation
-+
-+\brief API elements related to the allocation, deallocation, and management of Vulkan memory, buffers, images.
-+Most basic ones being: vmaCreateBuffer(), vmaCreateImage().
-+
-+\defgroup group_virtual Virtual allocator
-+
-+\brief API elements related to the mechanism of \ref virtual_allocator - using the core allocation algorithm
-+for user-defined purpose without allocating any real GPU memory.
-+
-+\defgroup group_stats Statistics
-+
-+\brief API elements that query current status of the allocator, from memory usage, budget, to full dump of the internal state in JSON format.
-+See documentation chapter: \ref statistics.
-+*/
-+
-+
-+#ifdef __cplusplus
-+extern "C" {
-+#endif
-+
-+#include <vulkan/vulkan.h>
-+
-+#if !defined(VMA_VULKAN_VERSION)
-+ #if defined(VK_VERSION_1_3)
-+ #define VMA_VULKAN_VERSION 1003000
-+ #elif defined(VK_VERSION_1_2)
-+ #define VMA_VULKAN_VERSION 1002000
-+ #elif defined(VK_VERSION_1_1)
-+ #define VMA_VULKAN_VERSION 1001000
-+ #else
-+ #define VMA_VULKAN_VERSION 1000000
-+ #endif
-+#endif
-+
-+#if defined(__ANDROID__) && defined(VK_NO_PROTOTYPES) && VMA_STATIC_VULKAN_FUNCTIONS
-+ extern PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
-+ extern PFN_vkGetDeviceProcAddr vkGetDeviceProcAddr;
-+ extern PFN_vkGetPhysicalDeviceProperties vkGetPhysicalDeviceProperties;
-+ extern PFN_vkGetPhysicalDeviceMemoryProperties vkGetPhysicalDeviceMemoryProperties;
-+ extern PFN_vkAllocateMemory vkAllocateMemory;
-+ extern PFN_vkFreeMemory vkFreeMemory;
-+ extern PFN_vkMapMemory vkMapMemory;
-+ extern PFN_vkUnmapMemory vkUnmapMemory;
-+ extern PFN_vkFlushMappedMemoryRanges vkFlushMappedMemoryRanges;
-+ extern PFN_vkInvalidateMappedMemoryRanges vkInvalidateMappedMemoryRanges;
-+ extern PFN_vkBindBufferMemory vkBindBufferMemory;
-+ extern PFN_vkBindImageMemory vkBindImageMemory;
-+ extern PFN_vkGetBufferMemoryRequirements vkGetBufferMemoryRequirements;
-+ extern PFN_vkGetImageMemoryRequirements vkGetImageMemoryRequirements;
-+ extern PFN_vkCreateBuffer vkCreateBuffer;
-+ extern PFN_vkDestroyBuffer vkDestroyBuffer;
-+ extern PFN_vkCreateImage vkCreateImage;
-+ extern PFN_vkDestroyImage vkDestroyImage;
-+ extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer;
-+ #if VMA_VULKAN_VERSION >= 1001000
-+ extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2;
-+ extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2;
-+ extern PFN_vkBindBufferMemory2 vkBindBufferMemory2;
-+ extern PFN_vkBindImageMemory2 vkBindImageMemory2;
-+ extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2;
-+ #endif // #if VMA_VULKAN_VERSION >= 1001000
-+#endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES
-+
-+#if !defined(VMA_DEDICATED_ALLOCATION)
-+ #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation
-+ #define VMA_DEDICATED_ALLOCATION 1
-+ #else
-+ #define VMA_DEDICATED_ALLOCATION 0
-+ #endif
-+#endif
-+
-+#if !defined(VMA_BIND_MEMORY2)
-+ #if VK_KHR_bind_memory2
-+ #define VMA_BIND_MEMORY2 1
-+ #else
-+ #define VMA_BIND_MEMORY2 0
-+ #endif
-+#endif
-+
-+#if !defined(VMA_MEMORY_BUDGET)
-+ #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000)
-+ #define VMA_MEMORY_BUDGET 1
-+ #else
-+ #define VMA_MEMORY_BUDGET 0
-+ #endif
-+#endif
-+
-+// Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers.
-+#if !defined(VMA_BUFFER_DEVICE_ADDRESS)
-+ #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000
-+ #define VMA_BUFFER_DEVICE_ADDRESS 1
-+ #else
-+ #define VMA_BUFFER_DEVICE_ADDRESS 0
-+ #endif
-+#endif
-+
-+// Defined to 1 when VK_EXT_memory_priority device extension is defined in Vulkan headers.
-+#if !defined(VMA_MEMORY_PRIORITY)
-+ #if VK_EXT_memory_priority
-+ #define VMA_MEMORY_PRIORITY 1
-+ #else
-+ #define VMA_MEMORY_PRIORITY 0
-+ #endif
-+#endif
-+
-+// Defined to 1 when VK_KHR_external_memory device extension is defined in Vulkan headers.
-+#if !defined(VMA_EXTERNAL_MEMORY)
-+ #if VK_KHR_external_memory
-+ #define VMA_EXTERNAL_MEMORY 1
-+ #else
-+ #define VMA_EXTERNAL_MEMORY 0
-+ #endif
-+#endif
-+
-+// Define these macros to decorate all public functions with additional code,
-+// before and after returned type, appropriately. This may be useful for
-+// exporting the functions when compiling VMA as a separate library. Example:
-+// #define VMA_CALL_PRE __declspec(dllexport)
-+// #define VMA_CALL_POST __cdecl
-+#ifndef VMA_CALL_PRE
-+ #define VMA_CALL_PRE
-+#endif
-+#ifndef VMA_CALL_POST
-+ #define VMA_CALL_POST
-+#endif
-+
-+// Define this macro to decorate pNext pointers with an attribute specifying the Vulkan
-+// structure that will be extended via the pNext chain.
-+#ifndef VMA_EXTENDS_VK_STRUCT
-+ #define VMA_EXTENDS_VK_STRUCT(vkStruct)
-+#endif
-+
-+// Define this macro to decorate pointers with an attribute specifying the
-+// length of the array they point to if they are not null.
-+//
-+// The length may be one of
-+// - The name of another parameter in the argument list where the pointer is declared
-+// - The name of another member in the struct where the pointer is declared
-+// - The name of a member of a struct type, meaning the value of that member in
-+// the context of the call. For example
-+// VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount"),
-+// this means the number of memory heaps available in the device associated
-+// with the VmaAllocator being dealt with.
-+#ifndef VMA_LEN_IF_NOT_NULL
-+ #define VMA_LEN_IF_NOT_NULL(len)
-+#endif
-+
-+// The VMA_NULLABLE macro is defined to be _Nullable when compiling with Clang.
-+// see: https://clang.llvm.org/docs/AttributeReference.html#nullable
-+#ifndef VMA_NULLABLE
-+ #ifdef __clang__
-+ #define VMA_NULLABLE _Nullable
-+ #else
-+ #define VMA_NULLABLE
-+ #endif
-+#endif
-+
-+// The VMA_NOT_NULL macro is defined to be _Nonnull when compiling with Clang.
-+// see: https://clang.llvm.org/docs/AttributeReference.html#nonnull
-+#ifndef VMA_NOT_NULL
-+ #ifdef __clang__
-+ #define VMA_NOT_NULL _Nonnull
-+ #else
-+ #define VMA_NOT_NULL
-+ #endif
-+#endif
-+
-+// If non-dispatchable handles are represented as pointers then we can give
-+// then nullability annotations
-+#ifndef VMA_NOT_NULL_NON_DISPATCHABLE
-+ #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
-+ #define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL
-+ #else
-+ #define VMA_NOT_NULL_NON_DISPATCHABLE
-+ #endif
-+#endif
-+
-+#ifndef VMA_NULLABLE_NON_DISPATCHABLE
-+ #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__)
-+ #define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE
-+ #else
-+ #define VMA_NULLABLE_NON_DISPATCHABLE
-+ #endif
-+#endif
-+
-+#ifndef VMA_STATS_STRING_ENABLED
-+ #define VMA_STATS_STRING_ENABLED 1
-+#endif
-+
-+////////////////////////////////////////////////////////////////////////////////
-+////////////////////////////////////////////////////////////////////////////////
-+//
-+// INTERFACE
-+//
-+////////////////////////////////////////////////////////////////////////////////
-+////////////////////////////////////////////////////////////////////////////////
-+
-+// Sections for managing code placement in file, only for development purposes e.g. for convenient folding inside an IDE.
-+#ifndef _VMA_ENUM_DECLARATIONS
-+
-+/**
-+\addtogroup group_init
-+@{
-+*/
-+
-+/// Flags for created #VmaAllocator.
-+typedef enum VmaAllocatorCreateFlagBits
-+{
-+ /** \brief Allocator and all objects created from it will not be synchronized internally, so you must guarantee they are used from only one thread at a time or synchronized externally by you.
-+
-+ Using this flag may increase performance because internal mutexes are not used.
-+ */
-+ VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001,
-+ /** \brief Enables usage of VK_KHR_dedicated_allocation extension.
-+
-+ The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`.
-+ When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1.
-+
-+ Using this extension will automatically allocate dedicated blocks of memory for
-+ some buffers and images instead of suballocating place for them out of bigger
-+ memory blocks (as if you explicitly used #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT
-+ flag) when it is recommended by the driver. It may improve performance on some
-+ GPUs.
-+
-+ You may set this flag only if you found out that following device extensions are
-+ supported, you enabled them while creating Vulkan device passed as
-+ VmaAllocatorCreateInfo::device, and you want them to be used internally by this
-+ library:
-+
-+ - VK_KHR_get_memory_requirements2 (device extension)
-+ - VK_KHR_dedicated_allocation (device extension)
-+
-+ When this flag is set, you can experience following warnings reported by Vulkan
-+ validation layer. You can ignore them.
-+
-+ > vkBindBufferMemory(): Binding memory to buffer 0x2d but vkGetBufferMemoryRequirements() has not been called on that buffer.
-+ */
-+ VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT = 0x00000002,
-+ /**
-+ Enables usage of VK_KHR_bind_memory2 extension.
-+
-+ The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`.
-+ When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1.
-+
-+ You may set this flag only if you found out that this device extension is supported,
-+ you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,
-+ and you want it to be used internally by this library.
-+
-+ The extension provides functions `vkBindBufferMemory2KHR` and `vkBindImageMemory2KHR`,
-+ which allow to pass a chain of `pNext` structures while binding.
-+ This flag is required if you use `pNext` parameter in vmaBindBufferMemory2() or vmaBindImageMemory2().
-+ */
-+ VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT = 0x00000004,
-+ /**
-+ Enables usage of VK_EXT_memory_budget extension.
-+
-+ You may set this flag only if you found out that this device extension is supported,
-+ you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,
-+ and you want it to be used internally by this library, along with another instance extension
-+ VK_KHR_get_physical_device_properties2, which is required by it (or Vulkan 1.1, where this extension is promoted).
-+
-+ The extension provides query for current memory usage and budget, which will probably
-+ be more accurate than an estimation used by the library otherwise.
-+ */
-+ VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT = 0x00000008,
-+ /**
-+ Enables usage of VK_AMD_device_coherent_memory extension.
-+
-+ You may set this flag only if you:
-+
-+ - found out that this device extension is supported and enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device,
-+ - checked that `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true and set it while creating the Vulkan device,
-+ - want it to be used internally by this library.
-+
-+ The extension and accompanying device feature provide access to memory types with
-+ `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flags.
-+ They are useful mostly for writing breadcrumb markers - a common method for debugging GPU crash/hang/TDR.
-+
-+ When the extension is not enabled, such memory types are still enumerated, but their usage is illegal.
-+ To protect from this error, if you don't create the allocator with this flag, it will refuse to allocate any memory or create a custom pool in such memory type,
-+ returning `VK_ERROR_FEATURE_NOT_PRESENT`.
-+ */
-+ VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT = 0x00000010,
-+ /**
-+ Enables usage of "buffer device address" feature, which allows you to use function
-+ `vkGetBufferDeviceAddress*` to get raw GPU pointer to a buffer and pass it for usage inside a shader.
-+
-+ You may set this flag only if you:
-+
-+ 1. (For Vulkan version < 1.2) Found as available and enabled device extension
-+ VK_KHR_buffer_device_address.
-+ This extension is promoted to core Vulkan 1.2.
-+ 2. Found as available and enabled device feature `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress`.
-+
-+ When this flag is set, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT` using VMA.
-+ The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT` to
-+ allocated memory blocks wherever it might be needed.
-+
-+ For more information, see documentation chapter \ref enabling_buffer_device_address.
-+ */
-+ VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT = 0x00000020,
-+ /**
-+ Enables usage of VK_EXT_memory_priority extension in the library.
-+
-+ You may set this flag only if you found available and enabled this device extension,
-+ along with `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority == VK_TRUE`,
-+ while creating Vulkan device passed as VmaAllocatorCreateInfo::device.
-+
-+ When this flag is used, VmaAllocationCreateInfo::priority and VmaPoolCreateInfo::priority
-+ are used to set priorities of allocated Vulkan memory. Without it, these variables are ignored.
-+
-+ A priority must be a floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations.
-+ Larger values are higher priority. The granularity of the priorities is implementation-dependent.
-+ It is automatically passed to every call to `vkAllocateMemory` done by the library using structure `VkMemoryPriorityAllocateInfoEXT`.
-+ The value to be used for default priority is 0.5.
-+ For more details, see the documentation of the VK_EXT_memory_priority extension.
-+ */
-+ VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT = 0x00000040,
-+
-+ VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
-+} VmaAllocatorCreateFlagBits;
-+/// See #VmaAllocatorCreateFlagBits.
-+typedef VkFlags VmaAllocatorCreateFlags;
-+
-+/** @} */
-+
-+/**
-+\addtogroup group_alloc
-+@{
-+*/
-+
-+/// \brief Intended usage of the allocated memory.
-+typedef enum VmaMemoryUsage
-+{
-+ /** No intended memory usage specified.
-+ Use other members of VmaAllocationCreateInfo to specify your requirements.
-+ */
-+ VMA_MEMORY_USAGE_UNKNOWN = 0,
-+ /**
-+ \deprecated Obsolete, preserved for backward compatibility.
-+ Prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
-+ */
-+ VMA_MEMORY_USAGE_GPU_ONLY = 1,
-+ /**
-+ \deprecated Obsolete, preserved for backward compatibility.
-+ Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` and `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT`.
-+ */
-+ VMA_MEMORY_USAGE_CPU_ONLY = 2,
-+ /**
-+ \deprecated Obsolete, preserved for backward compatibility.
-+ Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
-+ */
-+ VMA_MEMORY_USAGE_CPU_TO_GPU = 3,
-+ /**
-+ \deprecated Obsolete, preserved for backward compatibility.
-+ Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`.
-+ */
-+ VMA_MEMORY_USAGE_GPU_TO_CPU = 4,
-+ /**
-+ \deprecated Obsolete, preserved for backward compatibility.
-+ Prefers not `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
-+ */
-+ VMA_MEMORY_USAGE_CPU_COPY = 5,
-+ /**
-+ Lazily allocated GPU memory having `VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT`.
-+ Exists mostly on mobile platforms. Using it on desktop PC or other GPUs with no such memory type present will fail the allocation.
-+
-+ Usage: Memory for transient attachment images (color attachments, depth attachments etc.), created with `VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT`.
-+
-+ Allocations with this usage are always created as dedicated - it implies #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
-+ */
-+ VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED = 6,
-+ /**
-+ Selects best memory type automatically.
-+ This flag is recommended for most common use cases.
-+
-+ When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT),
-+ you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
-+ in VmaAllocationCreateInfo::flags.
-+
-+ It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g.
-+ vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo()
-+ and not with generic memory allocation functions.
-+ */
-+ VMA_MEMORY_USAGE_AUTO = 7,
-+ /**
-+ Selects best memory type automatically with preference for GPU (device) memory.
-+
-+ When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT),
-+ you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
-+ in VmaAllocationCreateInfo::flags.
-+
-+ It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g.
-+ vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo()
-+ and not with generic memory allocation functions.
-+ */
-+ VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE = 8,
-+ /**
-+ Selects best memory type automatically with preference for CPU (host) memory.
-+
-+ When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT),
-+ you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
-+ in VmaAllocationCreateInfo::flags.
-+
-+ It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g.
-+ vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo()
-+ and not with generic memory allocation functions.
-+ */
-+ VMA_MEMORY_USAGE_AUTO_PREFER_HOST = 9,
-+
-+ VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF
-+} VmaMemoryUsage;
-+
-+/// Flags to be passed as VmaAllocationCreateInfo::flags.
-+typedef enum VmaAllocationCreateFlagBits
-+{
-+ /** \brief Set this flag if the allocation should have its own memory block.
-+
-+ Use it for special, big resources, like fullscreen images used as attachments.
-+ */
-+ VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT = 0x00000001,
-+
-+ /** \brief Set this flag to only try to allocate from existing `VkDeviceMemory` blocks and never create new such block.
-+
-+ If new allocation cannot be placed in any of the existing blocks, allocation
-+ fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY` error.
-+
-+ You should not use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and
-+ #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense.
-+ */
-+ VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT = 0x00000002,
-+ /** \brief Set this flag to use a memory that will be persistently mapped and retrieve pointer to it.
-+
-+ Pointer to mapped memory will be returned through VmaAllocationInfo::pMappedData.
-+
-+ It is valid to use this flag for allocation made from memory type that is not
-+ `HOST_VISIBLE`. This flag is then ignored and memory is not mapped. This is
-+ useful if you need an allocation that is efficient to use on GPU
-+ (`DEVICE_LOCAL`) and still want to map it directly if possible on platforms that
-+ support it (e.g. Intel GPU).
-+ */
-+ VMA_ALLOCATION_CREATE_MAPPED_BIT = 0x00000004,
-+ /** \deprecated Preserved for backward compatibility. Consider using vmaSetAllocationName() instead.
-+
-+ Set this flag to treat VmaAllocationCreateInfo::pUserData as pointer to a
-+ null-terminated string. Instead of copying pointer value, a local copy of the
-+ string is made and stored in allocation's `pName`. The string is automatically
-+ freed together with the allocation. It is also used in vmaBuildStatsString().
-+ */
-+ VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT = 0x00000020,
-+ /** Allocation will be created from upper stack in a double stack pool.
-+
-+ This flag is only allowed for custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT flag.
-+ */
-+ VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = 0x00000040,
-+ /** Create both buffer/image and allocation, but don't bind them together.
-+ It is useful when you want to bind yourself to do some more advanced binding, e.g. using some extensions.
-+ The flag is meaningful only with functions that bind by default: vmaCreateBuffer(), vmaCreateImage().
-+ Otherwise it is ignored.
-+
-+ If you want to make sure the new buffer/image is not tied to the new memory allocation
-+ through `VkMemoryDedicatedAllocateInfoKHR` structure in case the allocation ends up in its own memory block,
-+ use also flag #VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT.
-+ */
-+ VMA_ALLOCATION_CREATE_DONT_BIND_BIT = 0x00000080,
-+ /** Create allocation only if additional device memory required for it, if any, won't exceed
-+ memory budget. Otherwise return `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
-+ */
-+ VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT = 0x00000100,
-+ /** \brief Set this flag if the allocated memory will have aliasing resources.
-+
-+ Usage of this flag prevents supplying `VkMemoryDedicatedAllocateInfoKHR` when #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT is specified.
-+ Otherwise created dedicated memory will not be suitable for aliasing resources, resulting in Vulkan Validation Layer errors.
-+ */
-+ VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT = 0x00000200,
-+ /**
-+ Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT).
-+
-+ - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value,
-+ you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect.
-+ - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`.
-+ This includes allocations created in \ref custom_memory_pools.
-+
-+ Declares that mapped memory will only be written sequentially, e.g. using `memcpy()` or a loop writing number-by-number,
-+ never read or accessed randomly, so a memory type can be selected that is uncached and write-combined.
-+
-+ \warning Violating this declaration may work correctly, but will likely be very slow.
-+ Watch out for implicit reads introduced by doing e.g. `pMappedData[i] += x;`
-+ Better prepare your data in a local variable and `memcpy()` it to the mapped pointer all at once.
-+ */
-+ VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT = 0x00000400,
-+ /**
-+ Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT).
-+
-+ - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value,
-+ you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect.
-+ - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`.
-+ This includes allocations created in \ref custom_memory_pools.
-+
-+ Declares that mapped memory can be read, written, and accessed in random order,
-+ so a `HOST_CACHED` memory type is required.
-+ */
-+ VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT = 0x00000800,
-+ /**
-+ Together with #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT,
-+ it says that despite request for host access, a not-`HOST_VISIBLE` memory type can be selected
-+ if it may improve performance.
-+
-+ By using this flag, you declare that you will check if the allocation ended up in a `HOST_VISIBLE` memory type
-+ (e.g. using vmaGetAllocationMemoryProperties()) and if not, you will create some "staging" buffer and
-+ issue an explicit transfer to write/read your data.
-+ To prepare for this possibility, don't forget to add appropriate flags like
-+ `VK_BUFFER_USAGE_TRANSFER_DST_BIT`, `VK_BUFFER_USAGE_TRANSFER_SRC_BIT` to the parameters of created buffer or image.
-+ */
-+ VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT = 0x00001000,
-+ /** Allocation strategy that chooses smallest possible free range for the allocation
-+ to minimize memory usage and fragmentation, possibly at the expense of allocation time.
-+ */
-+ VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = 0x00010000,
-+ /** Allocation strategy that chooses first suitable free range for the allocation -
-+ not necessarily in terms of the smallest offset but the one that is easiest and fastest to find
-+ to minimize allocation time, possibly at the expense of allocation quality.
-+ */
-+ VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = 0x00020000,
-+ /** Allocation strategy that chooses always the lowest offset in available space.
-+ This is not the most efficient strategy but achieves highly packed data.
-+ Used internally by defragmentation, not recommended in typical usage.
-+ */
-+ VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = 0x00040000,
-+ /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT.
-+ */
-+ VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT,
-+ /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT.
-+ */
-+ VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT,
-+ /** A bit mask to extract only `STRATEGY` bits from entire set of flags.
-+ */
-+ VMA_ALLOCATION_CREATE_STRATEGY_MASK =
-+ VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT |
-+ VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT |
-+ VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
-+
-+ VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
-+} VmaAllocationCreateFlagBits;
-+/// See #VmaAllocationCreateFlagBits.
-+typedef VkFlags VmaAllocationCreateFlags;
-+
-+/// Flags to be passed as VmaPoolCreateInfo::flags.
-+typedef enum VmaPoolCreateFlagBits
-+{
-+ /** \brief Use this flag if you always allocate only buffers and linear images or only optimal images out of this pool and so Buffer-Image Granularity can be ignored.
-+
-+ This is an optional optimization flag.
-+
-+ If you always allocate using vmaCreateBuffer(), vmaCreateImage(),
-+ vmaAllocateMemoryForBuffer(), then you don't need to use it because allocator
-+ knows exact type of your allocations so it can handle Buffer-Image Granularity
-+ in the optimal way.
-+
-+ If you also allocate using vmaAllocateMemoryForImage() or vmaAllocateMemory(),
-+ exact type of such allocations is not known, so allocator must be conservative
-+ in handling Buffer-Image Granularity, which can lead to suboptimal allocation
-+ (wasted memory). In that case, if you can make sure you always allocate only
-+ buffers and linear images or only optimal images out of this pool, use this flag
-+ to make allocator disregard Buffer-Image Granularity and so make allocations
-+ faster and more optimal.
-+ */
-+ VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT = 0x00000002,
-+
-+ /** \brief Enables alternative, linear allocation algorithm in this pool.
-+
-+ Specify this flag to enable linear allocation algorithm, which always creates
-+ new allocations after last one and doesn't reuse space from allocations freed in
-+ between. It trades memory consumption for simplified algorithm and data
-+ structure, which has better performance and uses less memory for metadata.
-+
-+ By using this flag, you can achieve behavior of free-at-once, stack,
-+ ring buffer, and double stack.
-+ For details, see documentation chapter \ref linear_algorithm.
-+ */
-+ VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT = 0x00000004,
-+
-+ /** Bit mask to extract only `ALGORITHM` bits from entire set of flags.
-+ */
-+ VMA_POOL_CREATE_ALGORITHM_MASK =
-+ VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT,
-+
-+ VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
-+} VmaPoolCreateFlagBits;
-+/// Flags to be passed as VmaPoolCreateInfo::flags. See #VmaPoolCreateFlagBits.
-+typedef VkFlags VmaPoolCreateFlags;
-+
-+/// Flags to be passed as VmaDefragmentationInfo::flags.
-+typedef enum VmaDefragmentationFlagBits
-+{
-+ /* \brief Use simple but fast algorithm for defragmentation.
-+ May not achieve best results but will require least time to compute and least allocations to copy.
-+ */
-+ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT = 0x1,
-+ /* \brief Default defragmentation algorithm, applied also when no `ALGORITHM` flag is specified.
-+ Offers a balance between defragmentation quality and the amount of allocations and bytes that need to be moved.
-+ */
-+ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT = 0x2,
-+ /* \brief Perform full defragmentation of memory.
-+ Can result in notably more time to compute and allocations to copy, but will achieve best memory packing.
-+ */
-+ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT = 0x4,
-+ /** \brief Use the most roboust algorithm at the cost of time to compute and number of copies to make.
-+ Only available when bufferImageGranularity is greater than 1, since it aims to reduce
-+ alignment issues between different types of resources.
-+ Otherwise falls back to same behavior as #VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT.
-+ */
-+ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT = 0x8,
-+
-+ /// A bit mask to extract only `ALGORITHM` bits from entire set of flags.
-+ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK =
-+ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT |
-+ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT |
-+ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT |
-+ VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT,
-+
-+ VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
-+} VmaDefragmentationFlagBits;
-+/// See #VmaDefragmentationFlagBits.
-+typedef VkFlags VmaDefragmentationFlags;
-+
-+/// Operation performed on single defragmentation move. See structure #VmaDefragmentationMove.
-+typedef enum VmaDefragmentationMoveOperation
-+{
-+ /// Buffer/image has been recreated at `dstTmpAllocation`, data has been copied, old buffer/image has been destroyed. `srcAllocation` should be changed to point to the new place. This is the default value set by vmaBeginDefragmentationPass().
-+ VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY = 0,
-+ /// Set this value if you cannot move the allocation. New place reserved at `dstTmpAllocation` will be freed. `srcAllocation` will remain unchanged.
-+ VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE = 1,
-+ /// Set this value if you decide to abandon the allocation and you destroyed the buffer/image. New place reserved at `dstTmpAllocation` will be freed, along with `srcAllocation`, which will be destroyed.
-+ VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY = 2,
-+} VmaDefragmentationMoveOperation;
-+
-+/** @} */
-+
-+/**
-+\addtogroup group_virtual
-+@{
-+*/
-+
-+/// Flags to be passed as VmaVirtualBlockCreateInfo::flags.
-+typedef enum VmaVirtualBlockCreateFlagBits
-+{
-+ /** \brief Enables alternative, linear allocation algorithm in this virtual block.
-+
-+ Specify this flag to enable linear allocation algorithm, which always creates
-+ new allocations after last one and doesn't reuse space from allocations freed in
-+ between. It trades memory consumption for simplified algorithm and data
-+ structure, which has better performance and uses less memory for metadata.
-+
-+ By using this flag, you can achieve behavior of free-at-once, stack,
-+ ring buffer, and double stack.
-+ For details, see documentation chapter \ref linear_algorithm.
-+ */
-+ VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT = 0x00000001,
-+
-+ /** \brief Bit mask to extract only `ALGORITHM` bits from entire set of flags.
-+ */
-+ VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK =
-+ VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT,
-+
-+ VMA_VIRTUAL_BLOCK_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
-+} VmaVirtualBlockCreateFlagBits;
-+/// Flags to be passed as VmaVirtualBlockCreateInfo::flags. See #VmaVirtualBlockCreateFlagBits.
-+typedef VkFlags VmaVirtualBlockCreateFlags;
-+
-+/// Flags to be passed as VmaVirtualAllocationCreateInfo::flags.
-+typedef enum VmaVirtualAllocationCreateFlagBits
-+{
-+ /** \brief Allocation will be created from upper stack in a double stack pool.
-+
-+ This flag is only allowed for virtual blocks created with #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT flag.
-+ */
-+ VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT,
-+ /** \brief Allocation strategy that tries to minimize memory usage.
-+ */
-+ VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT,
-+ /** \brief Allocation strategy that tries to minimize allocation time.
-+ */
-+ VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT,
-+ /** Allocation strategy that chooses always the lowest offset in available space.
-+ This is not the most efficient strategy but achieves highly packed data.
-+ */
-+ VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
-+ /** \brief A bit mask to extract only `STRATEGY` bits from entire set of flags.
-+
-+ These strategy flags are binary compatible with equivalent flags in #VmaAllocationCreateFlagBits.
-+ */
-+ VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK = VMA_ALLOCATION_CREATE_STRATEGY_MASK,
-+
-+ VMA_VIRTUAL_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF
-+} VmaVirtualAllocationCreateFlagBits;
-+/// Flags to be passed as VmaVirtualAllocationCreateInfo::flags. See #VmaVirtualAllocationCreateFlagBits.
-+typedef VkFlags VmaVirtualAllocationCreateFlags;
-+
-+/** @} */
-+
-+#endif // _VMA_ENUM_DECLARATIONS
-+
-+#ifndef _VMA_DATA_TYPES_DECLARATIONS
-+
-+/**
-+\addtogroup group_init
-+@{ */
-+
-+/** \struct VmaAllocator
-+\brief Represents main object of this library initialized.
-+
-+Fill structure #VmaAllocatorCreateInfo and call function vmaCreateAllocator() to create it.
-+Call function vmaDestroyAllocator() to destroy it.
-+
-+It is recommended to create just one object of this type per `VkDevice` object,
-+right after Vulkan is initialized and keep it alive until before Vulkan device is destroyed.
-+*/
-+VK_DEFINE_HANDLE(VmaAllocator)
-+
-+/** @} */
-+
-+/**
-+\addtogroup group_alloc
-+@{
-+*/
-+
-+/** \struct VmaPool
-+\brief Represents custom memory pool
-+
-+Fill structure VmaPoolCreateInfo and call function vmaCreatePool() to create it.
-+Call function vmaDestroyPool() to destroy it.
-+
-+For more information see [Custom memory pools](@ref choosing_memory_type_custom_memory_pools).
-+*/
-+VK_DEFINE_HANDLE(VmaPool)
-+
-+/** \struct VmaAllocation
-+\brief Represents single memory allocation.
-+
-+It may be either dedicated block of `VkDeviceMemory` or a specific region of a bigger block of this type
-+plus unique offset.
-+
-+There are multiple ways to create such object.
-+You need to fill structure VmaAllocationCreateInfo.
-+For more information see [Choosing memory type](@ref choosing_memory_type).
-+
-+Although the library provides convenience functions that create Vulkan buffer or image,
-+allocate memory for it and bind them together,
-+binding of the allocation to a buffer or an image is out of scope of the allocation itself.
-+Allocation object can exist without buffer/image bound,
-+binding can be done manually by the user, and destruction of it can be done
-+independently of destruction of the allocation.
-+
-+The object also remembers its size and some other information.
-+To retrieve this information, use function vmaGetAllocationInfo() and inspect
-+returned structure VmaAllocationInfo.
-+*/
-+VK_DEFINE_HANDLE(VmaAllocation)
-+
-+/** \struct VmaDefragmentationContext
-+\brief An opaque object that represents started defragmentation process.
-+
-+Fill structure #VmaDefragmentationInfo and call function vmaBeginDefragmentation() to create it.
-+Call function vmaEndDefragmentation() to destroy it.
-+*/
-+VK_DEFINE_HANDLE(VmaDefragmentationContext)
-+
-+/** @} */
-+
-+/**
-+\addtogroup group_virtual
-+@{
-+*/
-+
-+/** \struct VmaVirtualAllocation
-+\brief Represents single memory allocation done inside VmaVirtualBlock.
-+
-+Use it as a unique identifier to virtual allocation within the single block.
-+
-+Use value `VK_NULL_HANDLE` to represent a null/invalid allocation.
-+*/
-+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaVirtualAllocation)
-+
-+/** @} */
-+
-+/**
-+\addtogroup group_virtual
-+@{
-+*/
-+
-+/** \struct VmaVirtualBlock
-+\brief Handle to a virtual block object that allows to use core allocation algorithm without allocating any real GPU memory.
-+
-+Fill in #VmaVirtualBlockCreateInfo structure and use vmaCreateVirtualBlock() to create it. Use vmaDestroyVirtualBlock() to destroy it.
-+For more information, see documentation chapter \ref virtual_allocator.
-+
-+This object is not thread-safe - should not be used from multiple threads simultaneously, must be synchronized externally.
-+*/
-+VK_DEFINE_HANDLE(VmaVirtualBlock)
-+
-+/** @} */
-+
-+/**
-+\addtogroup group_init
-+@{
-+*/
-+
-+/// Callback function called after successful vkAllocateMemory.
-+typedef void (VKAPI_PTR* PFN_vmaAllocateDeviceMemoryFunction)(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ uint32_t memoryType,
-+ VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
-+ VkDeviceSize size,
-+ void* VMA_NULLABLE pUserData);
-+
-+/// Callback function called before vkFreeMemory.
-+typedef void (VKAPI_PTR* PFN_vmaFreeDeviceMemoryFunction)(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ uint32_t memoryType,
-+ VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory,
-+ VkDeviceSize size,
-+ void* VMA_NULLABLE pUserData);
-+
-+/** \brief Set of callbacks that the library will call for `vkAllocateMemory` and `vkFreeMemory`.
-+
-+Provided for informative purpose, e.g. to gather statistics about number of
-+allocations or total amount of memory allocated in Vulkan.
-+
-+Used in VmaAllocatorCreateInfo::pDeviceMemoryCallbacks.
-+*/
-+typedef struct VmaDeviceMemoryCallbacks
-+{
-+ /// Optional, can be null.
-+ PFN_vmaAllocateDeviceMemoryFunction VMA_NULLABLE pfnAllocate;
-+ /// Optional, can be null.
-+ PFN_vmaFreeDeviceMemoryFunction VMA_NULLABLE pfnFree;
-+ /// Optional, can be null.
-+ void* VMA_NULLABLE pUserData;
-+} VmaDeviceMemoryCallbacks;
-+
-+/** \brief Pointers to some Vulkan functions - a subset used by the library.
-+
-+Used in VmaAllocatorCreateInfo::pVulkanFunctions.
-+*/
-+typedef struct VmaVulkanFunctions
-+{
-+ /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS.
-+ PFN_vkGetInstanceProcAddr VMA_NULLABLE vkGetInstanceProcAddr;
-+ /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS.
-+ PFN_vkGetDeviceProcAddr VMA_NULLABLE vkGetDeviceProcAddr;
-+ PFN_vkGetPhysicalDeviceProperties VMA_NULLABLE vkGetPhysicalDeviceProperties;
-+ PFN_vkGetPhysicalDeviceMemoryProperties VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties;
-+ PFN_vkAllocateMemory VMA_NULLABLE vkAllocateMemory;
-+ PFN_vkFreeMemory VMA_NULLABLE vkFreeMemory;
-+ PFN_vkMapMemory VMA_NULLABLE vkMapMemory;
-+ PFN_vkUnmapMemory VMA_NULLABLE vkUnmapMemory;
-+ PFN_vkFlushMappedMemoryRanges VMA_NULLABLE vkFlushMappedMemoryRanges;
-+ PFN_vkInvalidateMappedMemoryRanges VMA_NULLABLE vkInvalidateMappedMemoryRanges;
-+ PFN_vkBindBufferMemory VMA_NULLABLE vkBindBufferMemory;
-+ PFN_vkBindImageMemory VMA_NULLABLE vkBindImageMemory;
-+ PFN_vkGetBufferMemoryRequirements VMA_NULLABLE vkGetBufferMemoryRequirements;
-+ PFN_vkGetImageMemoryRequirements VMA_NULLABLE vkGetImageMemoryRequirements;
-+ PFN_vkCreateBuffer VMA_NULLABLE vkCreateBuffer;
-+ PFN_vkDestroyBuffer VMA_NULLABLE vkDestroyBuffer;
-+ PFN_vkCreateImage VMA_NULLABLE vkCreateImage;
-+ PFN_vkDestroyImage VMA_NULLABLE vkDestroyImage;
-+ PFN_vkCmdCopyBuffer VMA_NULLABLE vkCmdCopyBuffer;
-+#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-+ /// Fetch "vkGetBufferMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetBufferMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension.
-+ PFN_vkGetBufferMemoryRequirements2KHR VMA_NULLABLE vkGetBufferMemoryRequirements2KHR;
-+ /// Fetch "vkGetImageMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetImageMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension.
-+ PFN_vkGetImageMemoryRequirements2KHR VMA_NULLABLE vkGetImageMemoryRequirements2KHR;
-+#endif
-+#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
-+ /// Fetch "vkBindBufferMemory2" on Vulkan >= 1.1, fetch "vkBindBufferMemory2KHR" when using VK_KHR_bind_memory2 extension.
-+ PFN_vkBindBufferMemory2KHR VMA_NULLABLE vkBindBufferMemory2KHR;
-+ /// Fetch "vkBindImageMemory2" on Vulkan >= 1.1, fetch "vkBindImageMemory2KHR" when using VK_KHR_bind_memory2 extension.
-+ PFN_vkBindImageMemory2KHR VMA_NULLABLE vkBindImageMemory2KHR;
-+#endif
-+#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
-+ PFN_vkGetPhysicalDeviceMemoryProperties2KHR VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties2KHR;
-+#endif
-+#if VMA_VULKAN_VERSION >= 1003000
-+ /// Fetch from "vkGetDeviceBufferMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceBufferMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4.
-+ PFN_vkGetDeviceBufferMemoryRequirements VMA_NULLABLE vkGetDeviceBufferMemoryRequirements;
-+ /// Fetch from "vkGetDeviceImageMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceImageMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4.
-+ PFN_vkGetDeviceImageMemoryRequirements VMA_NULLABLE vkGetDeviceImageMemoryRequirements;
-+#endif
-+} VmaVulkanFunctions;
-+
-+/// Description of a Allocator to be created.
-+typedef struct VmaAllocatorCreateInfo
-+{
-+ /// Flags for created allocator. Use #VmaAllocatorCreateFlagBits enum.
-+ VmaAllocatorCreateFlags flags;
-+ /// Vulkan physical device.
-+ /** It must be valid throughout whole lifetime of created allocator. */
-+ VkPhysicalDevice VMA_NOT_NULL physicalDevice;
-+ /// Vulkan device.
-+ /** It must be valid throughout whole lifetime of created allocator. */
-+ VkDevice VMA_NOT_NULL device;
-+ /// Preferred size of a single `VkDeviceMemory` block to be allocated from large heaps > 1 GiB. Optional.
-+ /** Set to 0 to use default, which is currently 256 MiB. */
-+ VkDeviceSize preferredLargeHeapBlockSize;
-+ /// Custom CPU memory allocation callbacks. Optional.
-+ /** Optional, can be null. When specified, will also be used for all CPU-side memory allocations. */
-+ const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks;
-+ /// Informative callbacks for `vkAllocateMemory`, `vkFreeMemory`. Optional.
-+ /** Optional, can be null. */
-+ const VmaDeviceMemoryCallbacks* VMA_NULLABLE pDeviceMemoryCallbacks;
-+ /** \brief Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out of particular Vulkan memory heap.
-+
-+ If not NULL, it must be a pointer to an array of
-+ `VkPhysicalDeviceMemoryProperties::memoryHeapCount` elements, defining limit on
-+ maximum number of bytes that can be allocated out of particular Vulkan memory
-+ heap.
-+
-+ Any of the elements may be equal to `VK_WHOLE_SIZE`, which means no limit on that
-+ heap. This is also the default in case of `pHeapSizeLimit` = NULL.
-+
-+ If there is a limit defined for a heap:
-+
-+ - If user tries to allocate more memory from that heap using this allocator,
-+ the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
-+ - If the limit is smaller than heap size reported in `VkMemoryHeap::size`, the
-+ value of this limit will be reported instead when using vmaGetMemoryProperties().
-+
-+ Warning! Using this feature may not be equivalent to installing a GPU with
-+ smaller amount of memory, because graphics driver doesn't necessary fail new
-+ allocations with `VK_ERROR_OUT_OF_DEVICE_MEMORY` result when memory capacity is
-+ exceeded. It may return success and just silently migrate some device memory
-+ blocks to system RAM. This driver behavior can also be controlled using
-+ VK_AMD_memory_overallocation_behavior extension.
-+ */
-+ const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit;
-+
-+ /** \brief Pointers to Vulkan functions. Can be null.
-+
-+ For details see [Pointers to Vulkan functions](@ref config_Vulkan_functions).
-+ */
-+ const VmaVulkanFunctions* VMA_NULLABLE pVulkanFunctions;
-+ /** \brief Handle to Vulkan instance object.
-+
-+ Starting from version 3.0.0 this member is no longer optional, it must be set!
-+ */
-+ VkInstance VMA_NOT_NULL instance;
-+ /** \brief Optional. The highest version of Vulkan that the application is designed to use.
-+
-+ It must be a value in the format as created by macro `VK_MAKE_VERSION` or a constant like: `VK_API_VERSION_1_1`, `VK_API_VERSION_1_0`.
-+ The patch version number specified is ignored. Only the major and minor versions are considered.
-+ It must be less or equal (preferably equal) to value as passed to `vkCreateInstance` as `VkApplicationInfo::apiVersion`.
-+ Only versions 1.0, 1.1, 1.2, 1.3 are supported by the current implementation.
-+ Leaving it initialized to zero is equivalent to `VK_API_VERSION_1_0`.
-+ */
-+ uint32_t vulkanApiVersion;
-+#if VMA_EXTERNAL_MEMORY
-+ /** \brief Either null or a pointer to an array of external memory handle types for each Vulkan memory type.
-+
-+ If not NULL, it must be a pointer to an array of `VkPhysicalDeviceMemoryProperties::memoryTypeCount`
-+ elements, defining external memory handle types of particular Vulkan memory type,
-+ to be passed using `VkExportMemoryAllocateInfoKHR`.
-+
-+ Any of the elements may be equal to 0, which means not to use `VkExportMemoryAllocateInfoKHR` on this memory type.
-+ This is also the default in case of `pTypeExternalMemoryHandleTypes` = NULL.
-+ */
-+ const VkExternalMemoryHandleTypeFlagsKHR* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryTypeCount") pTypeExternalMemoryHandleTypes;
-+#endif // #if VMA_EXTERNAL_MEMORY
-+} VmaAllocatorCreateInfo;
-+
-+/// Information about existing #VmaAllocator object.
-+typedef struct VmaAllocatorInfo
-+{
-+ /** \brief Handle to Vulkan instance object.
-+
-+ This is the same value as has been passed through VmaAllocatorCreateInfo::instance.
-+ */
-+ VkInstance VMA_NOT_NULL instance;
-+ /** \brief Handle to Vulkan physical device object.
-+
-+ This is the same value as has been passed through VmaAllocatorCreateInfo::physicalDevice.
-+ */
-+ VkPhysicalDevice VMA_NOT_NULL physicalDevice;
-+ /** \brief Handle to Vulkan device object.
-+
-+ This is the same value as has been passed through VmaAllocatorCreateInfo::device.
-+ */
-+ VkDevice VMA_NOT_NULL device;
-+} VmaAllocatorInfo;
-+
-+/** @} */
-+
-+/**
-+\addtogroup group_stats
-+@{
-+*/
-+
-+/** \brief Calculated statistics of memory usage e.g. in a specific memory type, heap, custom pool, or total.
-+
-+These are fast to calculate.
-+See functions: vmaGetHeapBudgets(), vmaGetPoolStatistics().
-+*/
-+typedef struct VmaStatistics
-+{
-+ /** \brief Number of `VkDeviceMemory` objects - Vulkan memory blocks allocated.
-+ */
-+ uint32_t blockCount;
-+ /** \brief Number of #VmaAllocation objects allocated.
-+
-+ Dedicated allocations have their own blocks, so each one adds 1 to `allocationCount` as well as `blockCount`.
-+ */
-+ uint32_t allocationCount;
-+ /** \brief Number of bytes allocated in `VkDeviceMemory` blocks.
-+
-+ \note To avoid confusion, please be aware that what Vulkan calls an "allocation" - a whole `VkDeviceMemory` object
-+ (e.g. as in `VkPhysicalDeviceLimits::maxMemoryAllocationCount`) is called a "block" in VMA, while VMA calls
-+ "allocation" a #VmaAllocation object that represents a memory region sub-allocated from such block, usually for a single buffer or image.
-+ */
-+ VkDeviceSize blockBytes;
-+ /** \brief Total number of bytes occupied by all #VmaAllocation objects.
-+
-+ Always less or equal than `blockBytes`.
-+ Difference `(blockBytes - allocationBytes)` is the amount of memory allocated from Vulkan
-+ but unused by any #VmaAllocation.
-+ */
-+ VkDeviceSize allocationBytes;
-+} VmaStatistics;
-+
-+/** \brief More detailed statistics than #VmaStatistics.
-+
-+These are slower to calculate. Use for debugging purposes.
-+See functions: vmaCalculateStatistics(), vmaCalculatePoolStatistics().
-+
-+Previous version of the statistics API provided averages, but they have been removed
-+because they can be easily calculated as:
-+
-+\code
-+VkDeviceSize allocationSizeAvg = detailedStats.statistics.allocationBytes / detailedStats.statistics.allocationCount;
-+VkDeviceSize unusedBytes = detailedStats.statistics.blockBytes - detailedStats.statistics.allocationBytes;
-+VkDeviceSize unusedRangeSizeAvg = unusedBytes / detailedStats.unusedRangeCount;
-+\endcode
-+*/
-+typedef struct VmaDetailedStatistics
-+{
-+ /// Basic statistics.
-+ VmaStatistics statistics;
-+ /// Number of free ranges of memory between allocations.
-+ uint32_t unusedRangeCount;
-+ /// Smallest allocation size. `VK_WHOLE_SIZE` if there are 0 allocations.
-+ VkDeviceSize allocationSizeMin;
-+ /// Largest allocation size. 0 if there are 0 allocations.
-+ VkDeviceSize allocationSizeMax;
-+ /// Smallest empty range size. `VK_WHOLE_SIZE` if there are 0 empty ranges.
-+ VkDeviceSize unusedRangeSizeMin;
-+ /// Largest empty range size. 0 if there are 0 empty ranges.
-+ VkDeviceSize unusedRangeSizeMax;
-+} VmaDetailedStatistics;
-+
-+/** \brief General statistics from current state of the Allocator -
-+total memory usage across all memory heaps and types.
-+
-+These are slower to calculate. Use for debugging purposes.
-+See function vmaCalculateStatistics().
-+*/
-+typedef struct VmaTotalStatistics
-+{
-+ VmaDetailedStatistics memoryType[VK_MAX_MEMORY_TYPES];
-+ VmaDetailedStatistics memoryHeap[VK_MAX_MEMORY_HEAPS];
-+ VmaDetailedStatistics total;
-+} VmaTotalStatistics;
-+
-+/** \brief Statistics of current memory usage and available budget for a specific memory heap.
-+
-+These are fast to calculate.
-+See function vmaGetHeapBudgets().
-+*/
-+typedef struct VmaBudget
-+{
-+ /** \brief Statistics fetched from the library.
-+ */
-+ VmaStatistics statistics;
-+ /** \brief Estimated current memory usage of the program, in bytes.
-+
-+ Fetched from system using VK_EXT_memory_budget extension if enabled.
-+
-+ It might be different than `statistics.blockBytes` (usually higher) due to additional implicit objects
-+ also occupying the memory, like swapchain, pipelines, descriptor heaps, command buffers, or
-+ `VkDeviceMemory` blocks allocated outside of this library, if any.
-+ */
-+ VkDeviceSize usage;
-+ /** \brief Estimated amount of memory available to the program, in bytes.
-+
-+ Fetched from system using VK_EXT_memory_budget extension if enabled.
-+
-+ It might be different (most probably smaller) than `VkMemoryHeap::size[heapIndex]` due to factors
-+ external to the program, decided by the operating system.
-+ Difference `budget - usage` is the amount of additional memory that can probably
-+ be allocated without problems. Exceeding the budget may result in various problems.
-+ */
-+ VkDeviceSize budget;
-+} VmaBudget;
-+
-+/** @} */
-+
-+/**
-+\addtogroup group_alloc
-+@{
-+*/
-+
-+/** \brief Parameters of new #VmaAllocation.
-+
-+To be used with functions like vmaCreateBuffer(), vmaCreateImage(), and many others.
-+*/
-+typedef struct VmaAllocationCreateInfo
-+{
-+ /// Use #VmaAllocationCreateFlagBits enum.
-+ VmaAllocationCreateFlags flags;
-+ /** \brief Intended usage of memory.
-+
-+ You can leave #VMA_MEMORY_USAGE_UNKNOWN if you specify memory requirements in other way. \n
-+ If `pool` is not null, this member is ignored.
-+ */
-+ VmaMemoryUsage usage;
-+ /** \brief Flags that must be set in a Memory Type chosen for an allocation.
-+
-+ Leave 0 if you specify memory requirements in other way. \n
-+ If `pool` is not null, this member is ignored.*/
-+ VkMemoryPropertyFlags requiredFlags;
-+ /** \brief Flags that preferably should be set in a memory type chosen for an allocation.
-+
-+ Set to 0 if no additional flags are preferred. \n
-+ If `pool` is not null, this member is ignored. */
-+ VkMemoryPropertyFlags preferredFlags;
-+ /** \brief Bitmask containing one bit set for every memory type acceptable for this allocation.
-+
-+ Value 0 is equivalent to `UINT32_MAX` - it means any memory type is accepted if
-+ it meets other requirements specified by this structure, with no further
-+ restrictions on memory type index. \n
-+ If `pool` is not null, this member is ignored.
-+ */
-+ uint32_t memoryTypeBits;
-+ /** \brief Pool that this allocation should be created in.
-+
-+ Leave `VK_NULL_HANDLE` to allocate from default pool. If not null, members:
-+ `usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored.
-+ */
-+ VmaPool VMA_NULLABLE pool;
-+ /** \brief Custom general-purpose pointer that will be stored in #VmaAllocation, can be read as VmaAllocationInfo::pUserData and changed using vmaSetAllocationUserData().
-+
-+ If #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is used, it must be either
-+ null or pointer to a null-terminated string. The string will be then copied to
-+ internal buffer, so it doesn't need to be valid after allocation call.
-+ */
-+ void* VMA_NULLABLE pUserData;
-+ /** \brief A floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations.
-+
-+ It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object
-+ and this allocation ends up as dedicated or is explicitly forced as dedicated using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
-+ Otherwise, it has the priority of a memory block where it is placed and this variable is ignored.
-+ */
-+ float priority;
-+} VmaAllocationCreateInfo;
-+
-+/// Describes parameter of created #VmaPool.
-+typedef struct VmaPoolCreateInfo
-+{
-+ /** \brief Vulkan memory type index to allocate this pool from.
-+ */
-+ uint32_t memoryTypeIndex;
-+ /** \brief Use combination of #VmaPoolCreateFlagBits.
-+ */
-+ VmaPoolCreateFlags flags;
-+ /** \brief Size of a single `VkDeviceMemory` block to be allocated as part of this pool, in bytes. Optional.
-+
-+ Specify nonzero to set explicit, constant size of memory blocks used by this
-+ pool.
-+
-+ Leave 0 to use default and let the library manage block sizes automatically.
-+ Sizes of particular blocks may vary.
-+ In this case, the pool will also support dedicated allocations.
-+ */
-+ VkDeviceSize blockSize;
-+ /** \brief Minimum number of blocks to be always allocated in this pool, even if they stay empty.
-+
-+ Set to 0 to have no preallocated blocks and allow the pool be completely empty.
-+ */
-+ size_t minBlockCount;
-+ /** \brief Maximum number of blocks that can be allocated in this pool. Optional.
-+
-+ Set to 0 to use default, which is `SIZE_MAX`, which means no limit.
-+
-+ Set to same value as VmaPoolCreateInfo::minBlockCount to have fixed amount of memory allocated
-+ throughout whole lifetime of this pool.
-+ */
-+ size_t maxBlockCount;
-+ /** \brief A floating-point value between 0 and 1, indicating the priority of the allocations in this pool relative to other memory allocations.
-+
-+ It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object.
-+ Otherwise, this variable is ignored.
-+ */
-+ float priority;
-+ /** \brief Additional minimum alignment to be used for all allocations created from this pool. Can be 0.
-+
-+ Leave 0 (default) not to impose any additional alignment. If not 0, it must be a power of two.
-+ It can be useful in cases where alignment returned by Vulkan by functions like `vkGetBufferMemoryRequirements` is not enough,
-+ e.g. when doing interop with OpenGL.
-+ */
-+ VkDeviceSize minAllocationAlignment;
-+ /** \brief Additional `pNext` chain to be attached to `VkMemoryAllocateInfo` used for every allocation made by this pool. Optional.
-+
-+ Optional, can be null. If not null, it must point to a `pNext` chain of structures that can be attached to `VkMemoryAllocateInfo`.
-+ It can be useful for special needs such as adding `VkExportMemoryAllocateInfoKHR`.
-+ Structures pointed by this member must remain alive and unchanged for the whole lifetime of the custom pool.
-+
-+ Please note that some structures, e.g. `VkMemoryPriorityAllocateInfoEXT`, `VkMemoryDedicatedAllocateInfoKHR`,
-+ can be attached automatically by this library when using other, more convenient of its features.
-+ */
-+ void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkMemoryAllocateInfo) pMemoryAllocateNext;
-+} VmaPoolCreateInfo;
-+
-+/** @} */
-+
-+/**
-+\addtogroup group_alloc
-+@{
-+*/
-+
-+/// Parameters of #VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo().
-+typedef struct VmaAllocationInfo
-+{
-+ /** \brief Memory type index that this allocation was allocated from.
-+
-+ It never changes.
-+ */
-+ uint32_t memoryType;
-+ /** \brief Handle to Vulkan memory object.
-+
-+ Same memory object can be shared by multiple allocations.
-+
-+ It can change after the allocation is moved during \ref defragmentation.
-+ */
-+ VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory;
-+ /** \brief Offset in `VkDeviceMemory` object to the beginning of this allocation, in bytes. `(deviceMemory, offset)` pair is unique to this allocation.
-+
-+ You usually don't need to use this offset. If you create a buffer or an image together with the allocation using e.g. function
-+ vmaCreateBuffer(), vmaCreateImage(), functions that operate on these resources refer to the beginning of the buffer or image,
-+ not entire device memory block. Functions like vmaMapMemory(), vmaBindBufferMemory() also refer to the beginning of the allocation
-+ and apply this offset automatically.
-+
-+ It can change after the allocation is moved during \ref defragmentation.
-+ */
-+ VkDeviceSize offset;
-+ /** \brief Size of this allocation, in bytes.
-+
-+ It never changes.
-+
-+ \note Allocation size returned in this variable may be greater than the size
-+ requested for the resource e.g. as `VkBufferCreateInfo::size`. Whole size of the
-+ allocation is accessible for operations on memory e.g. using a pointer after
-+ mapping with vmaMapMemory(), but operations on the resource e.g. using
-+ `vkCmdCopyBuffer` must be limited to the size of the resource.
-+ */
-+ VkDeviceSize size;
-+ /** \brief Pointer to the beginning of this allocation as mapped data.
-+
-+ If the allocation hasn't been mapped using vmaMapMemory() and hasn't been
-+ created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value is null.
-+
-+ It can change after call to vmaMapMemory(), vmaUnmapMemory().
-+ It can also change after the allocation is moved during \ref defragmentation.
-+ */
-+ void* VMA_NULLABLE pMappedData;
-+ /** \brief Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vmaSetAllocationUserData().
-+
-+ It can change after call to vmaSetAllocationUserData() for this allocation.
-+ */
-+ void* VMA_NULLABLE pUserData;
-+ /** \brief Custom allocation name that was set with vmaSetAllocationName().
-+
-+ It can change after call to vmaSetAllocationName() for this allocation.
-+
-+ Another way to set custom name is to pass it in VmaAllocationCreateInfo::pUserData with
-+ additional flag #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT set [DEPRECATED].
-+ */
-+ const char* VMA_NULLABLE pName;
-+} VmaAllocationInfo;
-+
-+/** Callback function called during vmaBeginDefragmentation() to check custom criterion about ending current defragmentation pass.
-+
-+Should return true if the defragmentation needs to stop current pass.
-+*/
-+typedef VkBool32 (VKAPI_PTR* PFN_vmaCheckDefragmentationBreakFunction)(void* VMA_NULLABLE pUserData);
-+
-+/** \brief Parameters for defragmentation.
-+
-+To be used with function vmaBeginDefragmentation().
-+*/
-+typedef struct VmaDefragmentationInfo
-+{
-+ /// \brief Use combination of #VmaDefragmentationFlagBits.
-+ VmaDefragmentationFlags flags;
-+ /** \brief Custom pool to be defragmented.
-+
-+ If null then default pools will undergo defragmentation process.
-+ */
-+ VmaPool VMA_NULLABLE pool;
-+ /** \brief Maximum numbers of bytes that can be copied during single pass, while moving allocations to different places.
-+
-+ `0` means no limit.
-+ */
-+ VkDeviceSize maxBytesPerPass;
-+ /** \brief Maximum number of allocations that can be moved during single pass to a different place.
-+
-+ `0` means no limit.
-+ */
-+ uint32_t maxAllocationsPerPass;
-+ /** \brief Optional custom callback for stopping vmaBeginDefragmentation().
-+
-+ Have to return true for breaking current defragmentation pass.
-+ */
-+ PFN_vmaCheckDefragmentationBreakFunction VMA_NULLABLE pfnBreakCallback;
-+ /// \brief Optional data to pass to custom callback for stopping pass of defragmentation.
-+ void* VMA_NULLABLE pBreakCallbackUserData;
-+} VmaDefragmentationInfo;
-+
-+/// Single move of an allocation to be done for defragmentation.
-+typedef struct VmaDefragmentationMove
-+{
-+ /// Operation to be performed on the allocation by vmaEndDefragmentationPass(). Default value is #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY. You can modify it.
-+ VmaDefragmentationMoveOperation operation;
-+ /// Allocation that should be moved.
-+ VmaAllocation VMA_NOT_NULL srcAllocation;
-+ /** \brief Temporary allocation pointing to destination memory that will replace `srcAllocation`.
-+
-+ \warning Do not store this allocation in your data structures! It exists only temporarily, for the duration of the defragmentation pass,
-+ to be used for binding new buffer/image to the destination memory using e.g. vmaBindBufferMemory().
-+ vmaEndDefragmentationPass() will destroy it and make `srcAllocation` point to this memory.
-+ */
-+ VmaAllocation VMA_NOT_NULL dstTmpAllocation;
-+} VmaDefragmentationMove;
-+
-+/** \brief Parameters for incremental defragmentation steps.
-+
-+To be used with function vmaBeginDefragmentationPass().
-+*/
-+typedef struct VmaDefragmentationPassMoveInfo
-+{
-+ /// Number of elements in the `pMoves` array.
-+ uint32_t moveCount;
-+ /** \brief Array of moves to be performed by the user in the current defragmentation pass.
-+
-+ Pointer to an array of `moveCount` elements, owned by VMA, created in vmaBeginDefragmentationPass(), destroyed in vmaEndDefragmentationPass().
-+
-+ For each element, you should:
-+
-+ 1. Create a new buffer/image in the place pointed by VmaDefragmentationMove::dstMemory + VmaDefragmentationMove::dstOffset.
-+ 2. Copy data from the VmaDefragmentationMove::srcAllocation e.g. using `vkCmdCopyBuffer`, `vkCmdCopyImage`.
-+ 3. Make sure these commands finished executing on the GPU.
-+ 4. Destroy the old buffer/image.
-+
-+ Only then you can finish defragmentation pass by calling vmaEndDefragmentationPass().
-+ After this call, the allocation will point to the new place in memory.
-+
-+ Alternatively, if you cannot move specific allocation, you can set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE.
-+
-+ Alternatively, if you decide you want to completely remove the allocation:
-+
-+ 1. Destroy its buffer/image.
-+ 2. Set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY.
-+
-+ Then, after vmaEndDefragmentationPass() the allocation will be freed.
-+ */
-+ VmaDefragmentationMove* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(moveCount) pMoves;
-+} VmaDefragmentationPassMoveInfo;
-+
-+/// Statistics returned for defragmentation process in function vmaEndDefragmentation().
-+typedef struct VmaDefragmentationStats
-+{
-+ /// Total number of bytes that have been copied while moving allocations to different places.
-+ VkDeviceSize bytesMoved;
-+ /// Total number of bytes that have been released to the system by freeing empty `VkDeviceMemory` objects.
-+ VkDeviceSize bytesFreed;
-+ /// Number of allocations that have been moved to different places.
-+ uint32_t allocationsMoved;
-+ /// Number of empty `VkDeviceMemory` objects that have been released to the system.
-+ uint32_t deviceMemoryBlocksFreed;
-+} VmaDefragmentationStats;
-+
-+/** @} */
-+
-+/**
-+\addtogroup group_virtual
-+@{
-+*/
-+
-+/// Parameters of created #VmaVirtualBlock object to be passed to vmaCreateVirtualBlock().
-+typedef struct VmaVirtualBlockCreateInfo
-+{
-+ /** \brief Total size of the virtual block.
-+
-+ Sizes can be expressed in bytes or any units you want as long as you are consistent in using them.
-+ For example, if you allocate from some array of structures, 1 can mean single instance of entire structure.
-+ */
-+ VkDeviceSize size;
-+
-+ /** \brief Use combination of #VmaVirtualBlockCreateFlagBits.
-+ */
-+ VmaVirtualBlockCreateFlags flags;
-+
-+ /** \brief Custom CPU memory allocation callbacks. Optional.
-+
-+ Optional, can be null. When specified, they will be used for all CPU-side memory allocations.
-+ */
-+ const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks;
-+} VmaVirtualBlockCreateInfo;
-+
-+/// Parameters of created virtual allocation to be passed to vmaVirtualAllocate().
-+typedef struct VmaVirtualAllocationCreateInfo
-+{
-+ /** \brief Size of the allocation.
-+
-+ Cannot be zero.
-+ */
-+ VkDeviceSize size;
-+ /** \brief Required alignment of the allocation. Optional.
-+
-+ Must be power of two. Special value 0 has the same meaning as 1 - means no special alignment is required, so allocation can start at any offset.
-+ */
-+ VkDeviceSize alignment;
-+ /** \brief Use combination of #VmaVirtualAllocationCreateFlagBits.
-+ */
-+ VmaVirtualAllocationCreateFlags flags;
-+ /** \brief Custom pointer to be associated with the allocation. Optional.
-+
-+ It can be any value and can be used for user-defined purposes. It can be fetched or changed later.
-+ */
-+ void* VMA_NULLABLE pUserData;
-+} VmaVirtualAllocationCreateInfo;
-+
-+/// Parameters of an existing virtual allocation, returned by vmaGetVirtualAllocationInfo().
-+typedef struct VmaVirtualAllocationInfo
-+{
-+ /** \brief Offset of the allocation.
-+
-+ Offset at which the allocation was made.
-+ */
-+ VkDeviceSize offset;
-+ /** \brief Size of the allocation.
-+
-+ Same value as passed in VmaVirtualAllocationCreateInfo::size.
-+ */
-+ VkDeviceSize size;
-+ /** \brief Custom pointer associated with the allocation.
-+
-+ Same value as passed in VmaVirtualAllocationCreateInfo::pUserData or to vmaSetVirtualAllocationUserData().
-+ */
-+ void* VMA_NULLABLE pUserData;
-+} VmaVirtualAllocationInfo;
-+
-+/** @} */
-+
-+#endif // _VMA_DATA_TYPES_DECLARATIONS
-+
-+#ifndef _VMA_FUNCTION_HEADERS
-+
-+/**
-+\addtogroup group_init
-+@{
-+*/
-+
-+/// Creates #VmaAllocator object.
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
-+ const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo,
-+ VmaAllocator VMA_NULLABLE* VMA_NOT_NULL pAllocator);
-+
-+/// Destroys allocator object.
-+VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
-+ VmaAllocator VMA_NULLABLE allocator);
-+
-+/** \brief Returns information about existing #VmaAllocator object - handle to Vulkan device etc.
-+
-+It might be useful if you want to keep just the #VmaAllocator handle and fetch other required handles to
-+`VkPhysicalDevice`, `VkDevice` etc. every time using this function.
-+*/
-+VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaAllocatorInfo* VMA_NOT_NULL pAllocatorInfo);
-+
-+/**
-+PhysicalDeviceProperties are fetched from physicalDevice by the allocator.
-+You can access it here, without fetching it again on your own.
-+*/
-+VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ const VkPhysicalDeviceProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceProperties);
-+
-+/**
-+PhysicalDeviceMemoryProperties are fetched from physicalDevice by the allocator.
-+You can access it here, without fetching it again on your own.
-+*/
-+VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ const VkPhysicalDeviceMemoryProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceMemoryProperties);
-+
-+/**
-+\brief Given Memory Type Index, returns Property Flags of this memory type.
-+
-+This is just a convenience function. Same information can be obtained using
-+vmaGetMemoryProperties().
-+*/
-+VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ uint32_t memoryTypeIndex,
-+ VkMemoryPropertyFlags* VMA_NOT_NULL pFlags);
-+
-+/** \brief Sets index of the current frame.
-+*/
-+VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ uint32_t frameIndex);
-+
-+/** @} */
-+
-+/**
-+\addtogroup group_stats
-+@{
-+*/
-+
-+/** \brief Retrieves statistics from current state of the Allocator.
-+
-+This function is called "calculate" not "get" because it has to traverse all
-+internal data structures, so it may be quite slow. Use it for debugging purposes.
-+For faster but more brief statistics suitable to be called every frame or every allocation,
-+use vmaGetHeapBudgets().
-+
-+Note that when using allocator from multiple threads, returned information may immediately
-+become outdated.
-+*/
-+VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaTotalStatistics* VMA_NOT_NULL pStats);
-+
-+/** \brief Retrieves information about current memory usage and budget for all memory heaps.
-+
-+\param allocator
-+\param[out] pBudgets Must point to array with number of elements at least equal to number of memory heaps in physical device used.
-+
-+This function is called "get" not "calculate" because it is very fast, suitable to be called
-+every frame or every allocation. For more detailed statistics use vmaCalculateStatistics().
-+
-+Note that when using allocator from multiple threads, returned information may immediately
-+become outdated.
-+*/
-+VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaBudget* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pBudgets);
-+
-+/** @} */
-+
-+/**
-+\addtogroup group_alloc
-+@{
-+*/
-+
-+/**
-+\brief Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo.
-+
-+This algorithm tries to find a memory type that:
-+
-+- Is allowed by memoryTypeBits.
-+- Contains all the flags from pAllocationCreateInfo->requiredFlags.
-+- Matches intended usage.
-+- Has as many flags from pAllocationCreateInfo->preferredFlags as possible.
-+
-+\return Returns VK_ERROR_FEATURE_NOT_PRESENT if not found. Receiving such result
-+from this function or any other allocating function probably means that your
-+device doesn't support any memory type with requested features for the specific
-+type of resource you want to use it for. Please check parameters of your
-+resource, like image layout (OPTIMAL versus LINEAR) or mip level count.
-+*/
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ uint32_t memoryTypeBits,
-+ const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
-+ uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
-+
-+/**
-+\brief Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo.
-+
-+It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex.
-+It internally creates a temporary, dummy buffer that never has memory bound.
-+*/
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
-+ const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
-+ uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
-+
-+/**
-+\brief Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo.
-+
-+It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex.
-+It internally creates a temporary, dummy image that never has memory bound.
-+*/
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
-+ const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
-+ uint32_t* VMA_NOT_NULL pMemoryTypeIndex);
-+
-+/** \brief Allocates Vulkan device memory and creates #VmaPool object.
-+
-+\param allocator Allocator object.
-+\param pCreateInfo Parameters of pool to create.
-+\param[out] pPool Handle to created pool.
-+*/
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ const VmaPoolCreateInfo* VMA_NOT_NULL pCreateInfo,
-+ VmaPool VMA_NULLABLE* VMA_NOT_NULL pPool);
-+
-+/** \brief Destroys #VmaPool object and frees Vulkan device memory.
-+*/
-+VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaPool VMA_NULLABLE pool);
-+
-+/** @} */
-+
-+/**
-+\addtogroup group_stats
-+@{
-+*/
-+
-+/** \brief Retrieves statistics of existing #VmaPool object.
-+
-+\param allocator Allocator object.
-+\param pool Pool object.
-+\param[out] pPoolStats Statistics of specified pool.
-+*/
-+VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaPool VMA_NOT_NULL pool,
-+ VmaStatistics* VMA_NOT_NULL pPoolStats);
-+
-+/** \brief Retrieves detailed statistics of existing #VmaPool object.
-+
-+\param allocator Allocator object.
-+\param pool Pool object.
-+\param[out] pPoolStats Statistics of specified pool.
-+*/
-+VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaPool VMA_NOT_NULL pool,
-+ VmaDetailedStatistics* VMA_NOT_NULL pPoolStats);
-+
-+/** @} */
-+
-+/**
-+\addtogroup group_alloc
-+@{
-+*/
-+
-+/** \brief Checks magic number in margins around all allocations in given memory pool in search for corruptions.
-+
-+Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero,
-+`VMA_DEBUG_MARGIN` is defined to nonzero and the pool is created in memory type that is
-+`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection).
-+
-+Possible return values:
-+
-+- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for specified pool.
-+- `VK_SUCCESS` - corruption detection has been performed and succeeded.
-+- `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations.
-+ `VMA_ASSERT` is also fired in that case.
-+- Other value: Error returned by Vulkan, e.g. memory mapping failure.
-+*/
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaPool VMA_NOT_NULL pool);
-+
-+/** \brief Retrieves name of a custom pool.
-+
-+After the call `ppName` is either null or points to an internally-owned null-terminated string
-+containing name of the pool that was previously set. The pointer becomes invalid when the pool is
-+destroyed or its name is changed using vmaSetPoolName().
-+*/
-+VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaPool VMA_NOT_NULL pool,
-+ const char* VMA_NULLABLE* VMA_NOT_NULL ppName);
-+
-+/** \brief Sets name of a custom pool.
-+
-+`pName` can be either null or pointer to a null-terminated string with new name for the pool.
-+Function makes internal copy of the string, so it can be changed or freed immediately after this call.
-+*/
-+VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaPool VMA_NOT_NULL pool,
-+ const char* VMA_NULLABLE pName);
-+
-+/** \brief General purpose memory allocation.
-+
-+\param allocator
-+\param pVkMemoryRequirements
-+\param pCreateInfo
-+\param[out] pAllocation Handle to allocated memory.
-+\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
-+
-+You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages().
-+
-+It is recommended to use vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(),
-+vmaCreateBuffer(), vmaCreateImage() instead whenever possible.
-+*/
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements,
-+ const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
-+ VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
-+ VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
-+
-+/** \brief General purpose memory allocation for multiple allocation objects at once.
-+
-+\param allocator Allocator object.
-+\param pVkMemoryRequirements Memory requirements for each allocation.
-+\param pCreateInfo Creation parameters for each allocation.
-+\param allocationCount Number of allocations to make.
-+\param[out] pAllocations Pointer to array that will be filled with handles to created allocations.
-+\param[out] pAllocationInfo Optional. Pointer to array that will be filled with parameters of created allocations.
-+
-+You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages().
-+
-+Word "pages" is just a suggestion to use this function to allocate pieces of memory needed for sparse binding.
-+It is just a general purpose allocation function able to make multiple allocations at once.
-+It may be internally optimized to be more efficient than calling vmaAllocateMemory() `allocationCount` times.
-+
-+All allocations are made using same parameters. All of them are created out of the same memory pool and type.
-+If any allocation fails, all allocations already made within this function call are also freed, so that when
-+returned result is not `VK_SUCCESS`, `pAllocation` array is always entirely filled with `VK_NULL_HANDLE`.
-+*/
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ const VkMemoryRequirements* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements,
-+ const VmaAllocationCreateInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo,
-+ size_t allocationCount,
-+ VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations,
-+ VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo);
-+
-+/** \brief Allocates memory suitable for given `VkBuffer`.
-+
-+\param allocator
-+\param buffer
-+\param pCreateInfo
-+\param[out] pAllocation Handle to allocated memory.
-+\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
-+
-+It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindBufferMemory().
-+
-+This is a special-purpose function. In most cases you should use vmaCreateBuffer().
-+
-+You must free the allocation using vmaFreeMemory() when no longer needed.
-+*/
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
-+ const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
-+ VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
-+ VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
-+
-+/** \brief Allocates memory suitable for given `VkImage`.
-+
-+\param allocator
-+\param image
-+\param pCreateInfo
-+\param[out] pAllocation Handle to allocated memory.
-+\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
-+
-+It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindImageMemory().
-+
-+This is a special-purpose function. In most cases you should use vmaCreateImage().
-+
-+You must free the allocation using vmaFreeMemory() when no longer needed.
-+*/
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
-+ const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
-+ VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
-+ VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
-+
-+/** \brief Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage().
-+
-+Passing `VK_NULL_HANDLE` as `allocation` is valid. Such function call is just skipped.
-+*/
-+VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ const VmaAllocation VMA_NULLABLE allocation);
-+
-+/** \brief Frees memory and destroys multiple allocations.
-+
-+Word "pages" is just a suggestion to use this function to free pieces of memory used for sparse binding.
-+It is just a general purpose function to free memory and destroy allocations made using e.g. vmaAllocateMemory(),
-+vmaAllocateMemoryPages() and other functions.
-+It may be internally optimized to be more efficient than calling vmaFreeMemory() `allocationCount` times.
-+
-+Allocations in `pAllocations` array can come from any memory pools and types.
-+Passing `VK_NULL_HANDLE` as elements of `pAllocations` array is valid. Such entries are just skipped.
-+*/
-+VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ size_t allocationCount,
-+ const VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations);
-+
-+/** \brief Returns current information about specified allocation.
-+
-+Current parameters of given allocation are returned in `pAllocationInfo`.
-+
-+Although this function doesn't lock any mutex, so it should be quite efficient,
-+you should avoid calling it too often.
-+You can retrieve same VmaAllocationInfo structure while creating your resource, from function
-+vmaCreateBuffer(), vmaCreateImage(). You can remember it if you are sure parameters don't change
-+(e.g. due to defragmentation).
-+*/
-+VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaAllocation VMA_NOT_NULL allocation,
-+ VmaAllocationInfo* VMA_NOT_NULL pAllocationInfo);
-+
-+/** \brief Sets pUserData in given allocation to new value.
-+
-+The value of pointer `pUserData` is copied to allocation's `pUserData`.
-+It is opaque, so you can use it however you want - e.g.
-+as a pointer, ordinal number or some handle to you own data.
-+*/
-+VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaAllocation VMA_NOT_NULL allocation,
-+ void* VMA_NULLABLE pUserData);
-+
-+/** \brief Sets pName in given allocation to new value.
-+
-+`pName` must be either null, or pointer to a null-terminated string. The function
-+makes local copy of the string and sets it as allocation's `pName`. String
-+passed as pName doesn't need to be valid for whole lifetime of the allocation -
-+you can free it after this call. String previously pointed by allocation's
-+`pName` is freed from memory.
-+*/
-+VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaAllocation VMA_NOT_NULL allocation,
-+ const char* VMA_NULLABLE pName);
-+
-+/**
-+\brief Given an allocation, returns Property Flags of its memory type.
-+
-+This is just a convenience function. Same information can be obtained using
-+vmaGetAllocationInfo() + vmaGetMemoryProperties().
-+*/
-+VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaAllocation VMA_NOT_NULL allocation,
-+ VkMemoryPropertyFlags* VMA_NOT_NULL pFlags);
-+
-+/** \brief Maps memory represented by given allocation and returns pointer to it.
-+
-+Maps memory represented by given allocation to make it accessible to CPU code.
-+When succeeded, `*ppData` contains pointer to first byte of this memory.
-+
-+\warning
-+If the allocation is part of a bigger `VkDeviceMemory` block, returned pointer is
-+correctly offsetted to the beginning of region assigned to this particular allocation.
-+Unlike the result of `vkMapMemory`, it points to the allocation, not to the beginning of the whole block.
-+You should not add VmaAllocationInfo::offset to it!
-+
-+Mapping is internally reference-counted and synchronized, so despite raw Vulkan
-+function `vkMapMemory()` cannot be used to map same block of `VkDeviceMemory`
-+multiple times simultaneously, it is safe to call this function on allocations
-+assigned to the same memory block. Actual Vulkan memory will be mapped on first
-+mapping and unmapped on last unmapping.
-+
-+If the function succeeded, you must call vmaUnmapMemory() to unmap the
-+allocation when mapping is no longer needed or before freeing the allocation, at
-+the latest.
-+
-+It also safe to call this function multiple times on the same allocation. You
-+must call vmaUnmapMemory() same number of times as you called vmaMapMemory().
-+
-+It is also safe to call this function on allocation created with
-+#VMA_ALLOCATION_CREATE_MAPPED_BIT flag. Its memory stays mapped all the time.
-+You must still call vmaUnmapMemory() same number of times as you called
-+vmaMapMemory(). You must not call vmaUnmapMemory() additional time to free the
-+"0-th" mapping made automatically due to #VMA_ALLOCATION_CREATE_MAPPED_BIT flag.
-+
-+This function fails when used on allocation made in memory type that is not
-+`HOST_VISIBLE`.
-+
-+This function doesn't automatically flush or invalidate caches.
-+If the allocation is made from a memory types that is not `HOST_COHERENT`,
-+you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification.
-+*/
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaAllocation VMA_NOT_NULL allocation,
-+ void* VMA_NULLABLE* VMA_NOT_NULL ppData);
-+
-+/** \brief Unmaps memory represented by given allocation, mapped previously using vmaMapMemory().
-+
-+For details, see description of vmaMapMemory().
-+
-+This function doesn't automatically flush or invalidate caches.
-+If the allocation is made from a memory types that is not `HOST_COHERENT`,
-+you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification.
-+*/
-+VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaAllocation VMA_NOT_NULL allocation);
-+
-+/** \brief Flushes memory of given allocation.
-+
-+Calls `vkFlushMappedMemoryRanges()` for memory associated with given range of given allocation.
-+It needs to be called after writing to a mapped memory for memory types that are not `HOST_COHERENT`.
-+Unmap operation doesn't do that automatically.
-+
-+- `offset` must be relative to the beginning of allocation.
-+- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation.
-+- `offset` and `size` don't have to be aligned.
-+ They are internally rounded down/up to multiply of `nonCoherentAtomSize`.
-+- If `size` is 0, this call is ignored.
-+- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`,
-+ this call is ignored.
-+
-+Warning! `offset` and `size` are relative to the contents of given `allocation`.
-+If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively.
-+Do not pass allocation's offset as `offset`!!!
-+
-+This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is
-+called, otherwise `VK_SUCCESS`.
-+*/
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaAllocation VMA_NOT_NULL allocation,
-+ VkDeviceSize offset,
-+ VkDeviceSize size);
-+
-+/** \brief Invalidates memory of given allocation.
-+
-+Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given range of given allocation.
-+It needs to be called before reading from a mapped memory for memory types that are not `HOST_COHERENT`.
-+Map operation doesn't do that automatically.
-+
-+- `offset` must be relative to the beginning of allocation.
-+- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation.
-+- `offset` and `size` don't have to be aligned.
-+ They are internally rounded down/up to multiply of `nonCoherentAtomSize`.
-+- If `size` is 0, this call is ignored.
-+- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`,
-+ this call is ignored.
-+
-+Warning! `offset` and `size` are relative to the contents of given `allocation`.
-+If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively.
-+Do not pass allocation's offset as `offset`!!!
-+
-+This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if
-+it is called, otherwise `VK_SUCCESS`.
-+*/
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaAllocation VMA_NOT_NULL allocation,
-+ VkDeviceSize offset,
-+ VkDeviceSize size);
-+
-+/** \brief Flushes memory of given set of allocations.
-+
-+Calls `vkFlushMappedMemoryRanges()` for memory associated with given ranges of given allocations.
-+For more information, see documentation of vmaFlushAllocation().
-+
-+\param allocator
-+\param allocationCount
-+\param allocations
-+\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero.
-+\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations.
-+
-+This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is
-+called, otherwise `VK_SUCCESS`.
-+*/
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ uint32_t allocationCount,
-+ const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
-+ const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
-+ const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
-+
-+/** \brief Invalidates memory of given set of allocations.
-+
-+Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given ranges of given allocations.
-+For more information, see documentation of vmaInvalidateAllocation().
-+
-+\param allocator
-+\param allocationCount
-+\param allocations
-+\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero.
-+\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations.
-+
-+This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if it is
-+called, otherwise `VK_SUCCESS`.
-+*/
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ uint32_t allocationCount,
-+ const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations,
-+ const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets,
-+ const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes);
-+
-+/** \brief Checks magic number in margins around all allocations in given memory types (in both default and custom pools) in search for corruptions.
-+
-+\param allocator
-+\param memoryTypeBits Bit mask, where each bit set means that a memory type with that index should be checked.
-+
-+Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero,
-+`VMA_DEBUG_MARGIN` is defined to nonzero and only for memory types that are
-+`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection).
-+
-+Possible return values:
-+
-+- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for any of specified memory types.
-+- `VK_SUCCESS` - corruption detection has been performed and succeeded.
-+- `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations.
-+ `VMA_ASSERT` is also fired in that case.
-+- Other value: Error returned by Vulkan, e.g. memory mapping failure.
-+*/
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ uint32_t memoryTypeBits);
-+
-+/** \brief Begins defragmentation process.
-+
-+\param allocator Allocator object.
-+\param pInfo Structure filled with parameters of defragmentation.
-+\param[out] pContext Context object that must be passed to vmaEndDefragmentation() to finish defragmentation.
-+\returns
-+- `VK_SUCCESS` if defragmentation can begin.
-+- `VK_ERROR_FEATURE_NOT_PRESENT` if defragmentation is not supported.
-+
-+For more information about defragmentation, see documentation chapter:
-+[Defragmentation](@ref defragmentation).
-+*/
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ const VmaDefragmentationInfo* VMA_NOT_NULL pInfo,
-+ VmaDefragmentationContext VMA_NULLABLE* VMA_NOT_NULL pContext);
-+
-+/** \brief Ends defragmentation process.
-+
-+\param allocator Allocator object.
-+\param context Context object that has been created by vmaBeginDefragmentation().
-+\param[out] pStats Optional stats for the defragmentation. Can be null.
-+
-+Use this function to finish defragmentation started by vmaBeginDefragmentation().
-+*/
-+VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaDefragmentationContext VMA_NOT_NULL context,
-+ VmaDefragmentationStats* VMA_NULLABLE pStats);
-+
-+/** \brief Starts single defragmentation pass.
-+
-+\param allocator Allocator object.
-+\param context Context object that has been created by vmaBeginDefragmentation().
-+\param[out] pPassInfo Computed information for current pass.
-+\returns
-+- `VK_SUCCESS` if no more moves are possible. Then you can omit call to vmaEndDefragmentationPass() and simply end whole defragmentation.
-+- `VK_INCOMPLETE` if there are pending moves returned in `pPassInfo`. You need to perform them, call vmaEndDefragmentationPass(),
-+ and then preferably try another pass with vmaBeginDefragmentationPass().
-+*/
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaDefragmentationContext VMA_NOT_NULL context,
-+ VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo);
-+
-+/** \brief Ends single defragmentation pass.
-+
-+\param allocator Allocator object.
-+\param context Context object that has been created by vmaBeginDefragmentation().
-+\param pPassInfo Computed information for current pass filled by vmaBeginDefragmentationPass() and possibly modified by you.
-+
-+Returns `VK_SUCCESS` if no more moves are possible or `VK_INCOMPLETE` if more defragmentations are possible.
-+
-+Ends incremental defragmentation pass and commits all defragmentation moves from `pPassInfo`.
-+After this call:
-+
-+- Allocations at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY
-+ (which is the default) will be pointing to the new destination place.
-+- Allocation at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY
-+ will be freed.
-+
-+If no more moves are possible you can end whole defragmentation.
-+*/
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaDefragmentationContext VMA_NOT_NULL context,
-+ VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo);
-+
-+/** \brief Binds buffer to allocation.
-+
-+Binds specified buffer to region of memory represented by specified allocation.
-+Gets `VkDeviceMemory` handle and offset from the allocation.
-+If you want to create a buffer, allocate memory for it and bind them together separately,
-+you should use this function for binding instead of standard `vkBindBufferMemory()`,
-+because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple
-+allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously
-+(which is illegal in Vulkan).
-+
-+It is recommended to use function vmaCreateBuffer() instead of this one.
-+*/
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaAllocation VMA_NOT_NULL allocation,
-+ VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer);
-+
-+/** \brief Binds buffer to allocation with additional parameters.
-+
-+\param allocator
-+\param allocation
-+\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0.
-+\param buffer
-+\param pNext A chain of structures to be attached to `VkBindBufferMemoryInfoKHR` structure used internally. Normally it should be null.
-+
-+This function is similar to vmaBindBufferMemory(), but it provides additional parameters.
-+
-+If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag
-+or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails.
-+*/
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaAllocation VMA_NOT_NULL allocation,
-+ VkDeviceSize allocationLocalOffset,
-+ VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer,
-+ const void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkBindBufferMemoryInfoKHR) pNext);
-+
-+/** \brief Binds image to allocation.
-+
-+Binds specified image to region of memory represented by specified allocation.
-+Gets `VkDeviceMemory` handle and offset from the allocation.
-+If you want to create an image, allocate memory for it and bind them together separately,
-+you should use this function for binding instead of standard `vkBindImageMemory()`,
-+because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple
-+allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously
-+(which is illegal in Vulkan).
-+
-+It is recommended to use function vmaCreateImage() instead of this one.
-+*/
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaAllocation VMA_NOT_NULL allocation,
-+ VkImage VMA_NOT_NULL_NON_DISPATCHABLE image);
-+
-+/** \brief Binds image to allocation with additional parameters.
-+
-+\param allocator
-+\param allocation
-+\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0.
-+\param image
-+\param pNext A chain of structures to be attached to `VkBindImageMemoryInfoKHR` structure used internally. Normally it should be null.
-+
-+This function is similar to vmaBindImageMemory(), but it provides additional parameters.
-+
-+If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag
-+or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails.
-+*/
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaAllocation VMA_NOT_NULL allocation,
-+ VkDeviceSize allocationLocalOffset,
-+ VkImage VMA_NOT_NULL_NON_DISPATCHABLE image,
-+ const void* VMA_NULLABLE VMA_EXTENDS_VK_STRUCT(VkBindImageMemoryInfoKHR) pNext);
-+
-+/** \brief Creates a new `VkBuffer`, allocates and binds memory for it.
-+
-+\param allocator
-+\param pBufferCreateInfo
-+\param pAllocationCreateInfo
-+\param[out] pBuffer Buffer that was created.
-+\param[out] pAllocation Allocation that was created.
-+\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo().
-+
-+This function automatically:
-+
-+-# Creates buffer.
-+-# Allocates appropriate memory for it.
-+-# Binds the buffer with the memory.
-+
-+If any of these operations fail, buffer and allocation are not created,
-+returned value is negative error code, `*pBuffer` and `*pAllocation` are null.
-+
-+If the function succeeded, you must destroy both buffer and allocation when you
-+no longer need them using either convenience function vmaDestroyBuffer() or
-+separately, using `vkDestroyBuffer()` and vmaFreeMemory().
-+
-+If #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag was used,
-+VK_KHR_dedicated_allocation extension is used internally to query driver whether
-+it requires or prefers the new buffer to have dedicated allocation. If yes,
-+and if dedicated allocation is possible
-+(#VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT is not used), it creates dedicated
-+allocation for this buffer, just like when using
-+#VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
-+
-+\note This function creates a new `VkBuffer`. Sub-allocation of parts of one large buffer,
-+although recommended as a good practice, is out of scope of this library and could be implemented
-+by the user as a higher-level logic on top of VMA.
-+*/
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
-+ const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
-+ VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer,
-+ VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
-+ VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
-+
-+/** \brief Creates a buffer with additional minimum alignment.
-+
-+Similar to vmaCreateBuffer() but provides additional parameter `minAlignment` which allows to specify custom,
-+minimum alignment to be used when placing the buffer inside a larger memory block, which may be needed e.g.
-+for interop with OpenGL.
-+*/
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
-+ const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
-+ VkDeviceSize minAlignment,
-+ VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer,
-+ VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
-+ VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
-+
-+/** \brief Creates a new `VkBuffer`, binds already created memory for it.
-+
-+\param allocator
-+\param allocation Allocation that provides memory to be used for binding new buffer to it.
-+\param pBufferCreateInfo
-+\param[out] pBuffer Buffer that was created.
-+
-+This function automatically:
-+
-+-# Creates buffer.
-+-# Binds the buffer with the supplied memory.
-+
-+If any of these operations fail, buffer is not created,
-+returned value is negative error code and `*pBuffer` is null.
-+
-+If the function succeeded, you must destroy the buffer when you
-+no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding
-+allocation you can use convenience function vmaDestroyBuffer().
-+
-+\note There is a new version of this function augmented with parameter `allocationLocalOffset` - see vmaCreateAliasingBuffer2().
-+*/
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaAllocation VMA_NOT_NULL allocation,
-+ const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
-+ VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer);
-+
-+/** \brief Creates a new `VkBuffer`, binds already created memory for it.
-+
-+\param allocator
-+\param allocation Allocation that provides memory to be used for binding new buffer to it.
-+\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the allocation. Normally it should be 0.
-+\param pBufferCreateInfo
-+\param[out] pBuffer Buffer that was created.
-+
-+This function automatically:
-+
-+-# Creates buffer.
-+-# Binds the buffer with the supplied memory.
-+
-+If any of these operations fail, buffer is not created,
-+returned value is negative error code and `*pBuffer` is null.
-+
-+If the function succeeded, you must destroy the buffer when you
-+no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding
-+allocation you can use convenience function vmaDestroyBuffer().
-+
-+\note This is a new version of the function augmented with parameter `allocationLocalOffset`.
-+*/
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer2(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaAllocation VMA_NOT_NULL allocation,
-+ VkDeviceSize allocationLocalOffset,
-+ const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
-+ VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer);
-+
-+/** \brief Destroys Vulkan buffer and frees allocated memory.
-+
-+This is just a convenience function equivalent to:
-+
-+\code
-+vkDestroyBuffer(device, buffer, allocationCallbacks);
-+vmaFreeMemory(allocator, allocation);
-+\endcode
-+
-+It is safe to pass null as buffer and/or allocation.
-+*/
-+VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer,
-+ VmaAllocation VMA_NULLABLE allocation);
-+
-+/// Function similar to vmaCreateBuffer().
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
-+ const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo,
-+ VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage,
-+ VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation,
-+ VmaAllocationInfo* VMA_NULLABLE pAllocationInfo);
-+
-+/// Function similar to vmaCreateAliasingBuffer() but for images.
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaAllocation VMA_NOT_NULL allocation,
-+ const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
-+ VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage);
-+
-+/// Function similar to vmaCreateAliasingBuffer2() but for images.
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage2(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaAllocation VMA_NOT_NULL allocation,
-+ VkDeviceSize allocationLocalOffset,
-+ const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
-+ VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage);
-+
-+/** \brief Destroys Vulkan image and frees allocated memory.
-+
-+This is just a convenience function equivalent to:
-+
-+\code
-+vkDestroyImage(device, image, allocationCallbacks);
-+vmaFreeMemory(allocator, allocation);
-+\endcode
-+
-+It is safe to pass null as image and/or allocation.
-+*/
-+VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VkImage VMA_NULLABLE_NON_DISPATCHABLE image,
-+ VmaAllocation VMA_NULLABLE allocation);
-+
-+/** @} */
-+
-+/**
-+\addtogroup group_virtual
-+@{
-+*/
-+
-+/** \brief Creates new #VmaVirtualBlock object.
-+
-+\param pCreateInfo Parameters for creation.
-+\param[out] pVirtualBlock Returned virtual block object or `VMA_NULL` if creation failed.
-+*/
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock(
-+ const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo,
-+ VmaVirtualBlock VMA_NULLABLE* VMA_NOT_NULL pVirtualBlock);
-+
-+/** \brief Destroys #VmaVirtualBlock object.
-+
-+Please note that you should consciously handle virtual allocations that could remain unfreed in the block.
-+You should either free them individually using vmaVirtualFree() or call vmaClearVirtualBlock()
-+if you are sure this is what you want. If you do neither, an assert is called.
-+
-+If you keep pointers to some additional metadata associated with your virtual allocations in their `pUserData`,
-+don't forget to free them.
-+*/
-+VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock(
-+ VmaVirtualBlock VMA_NULLABLE virtualBlock);
-+
-+/** \brief Returns true of the #VmaVirtualBlock is empty - contains 0 virtual allocations and has all its space available for new allocations.
-+*/
-+VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty(
-+ VmaVirtualBlock VMA_NOT_NULL virtualBlock);
-+
-+/** \brief Returns information about a specific virtual allocation within a virtual block, like its size and `pUserData` pointer.
-+*/
-+VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(
-+ VmaVirtualBlock VMA_NOT_NULL virtualBlock,
-+ VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo);
-+
-+/** \brief Allocates new virtual allocation inside given #VmaVirtualBlock.
-+
-+If the allocation fails due to not enough free space available, `VK_ERROR_OUT_OF_DEVICE_MEMORY` is returned
-+(despite the function doesn't ever allocate actual GPU memory).
-+`pAllocation` is then set to `VK_NULL_HANDLE` and `pOffset`, if not null, it set to `UINT64_MAX`.
-+
-+\param virtualBlock Virtual block
-+\param pCreateInfo Parameters for the allocation
-+\param[out] pAllocation Returned handle of the new allocation
-+\param[out] pOffset Returned offset of the new allocation. Optional, can be null.
-+*/
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(
-+ VmaVirtualBlock VMA_NOT_NULL virtualBlock,
-+ const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
-+ VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation,
-+ VkDeviceSize* VMA_NULLABLE pOffset);
-+
-+/** \brief Frees virtual allocation inside given #VmaVirtualBlock.
-+
-+It is correct to call this function with `allocation == VK_NULL_HANDLE` - it does nothing.
-+*/
-+VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(
-+ VmaVirtualBlock VMA_NOT_NULL virtualBlock,
-+ VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation);
-+
-+/** \brief Frees all virtual allocations inside given #VmaVirtualBlock.
-+
-+You must either call this function or free each virtual allocation individually with vmaVirtualFree()
-+before destroying a virtual block. Otherwise, an assert is called.
-+
-+If you keep pointer to some additional metadata associated with your virtual allocation in its `pUserData`,
-+don't forget to free it as well.
-+*/
-+VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock(
-+ VmaVirtualBlock VMA_NOT_NULL virtualBlock);
-+
-+/** \brief Changes custom pointer associated with given virtual allocation.
-+*/
-+VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData(
-+ VmaVirtualBlock VMA_NOT_NULL virtualBlock,
-+ VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation,
-+ void* VMA_NULLABLE pUserData);
-+
-+/** \brief Calculates and returns statistics about virtual allocations and memory usage in given #VmaVirtualBlock.
-+
-+This function is fast to call. For more detailed statistics, see vmaCalculateVirtualBlockStatistics().
-+*/
-+VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics(
-+ VmaVirtualBlock VMA_NOT_NULL virtualBlock,
-+ VmaStatistics* VMA_NOT_NULL pStats);
-+
-+/** \brief Calculates and returns detailed statistics about virtual allocations and memory usage in given #VmaVirtualBlock.
-+
-+This function is slow to call. Use for debugging purposes.
-+For less detailed statistics, see vmaGetVirtualBlockStatistics().
-+*/
-+VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics(
-+ VmaVirtualBlock VMA_NOT_NULL virtualBlock,
-+ VmaDetailedStatistics* VMA_NOT_NULL pStats);
-+
-+/** @} */
-+
-+#if VMA_STATS_STRING_ENABLED
-+/**
-+\addtogroup group_stats
-+@{
-+*/
-+
-+/** \brief Builds and returns a null-terminated string in JSON format with information about given #VmaVirtualBlock.
-+\param virtualBlock Virtual block.
-+\param[out] ppStatsString Returned string.
-+\param detailedMap Pass `VK_FALSE` to only obtain statistics as returned by vmaCalculateVirtualBlockStatistics(). Pass `VK_TRUE` to also obtain full list of allocations and free spaces.
-+
-+Returned string must be freed using vmaFreeVirtualBlockStatsString().
-+*/
-+VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString(
-+ VmaVirtualBlock VMA_NOT_NULL virtualBlock,
-+ char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString,
-+ VkBool32 detailedMap);
-+
-+/// Frees a string returned by vmaBuildVirtualBlockStatsString().
-+VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString(
-+ VmaVirtualBlock VMA_NOT_NULL virtualBlock,
-+ char* VMA_NULLABLE pStatsString);
-+
-+/** \brief Builds and returns statistics as a null-terminated string in JSON format.
-+\param allocator
-+\param[out] ppStatsString Must be freed using vmaFreeStatsString() function.
-+\param detailedMap
-+*/
-+VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString,
-+ VkBool32 detailedMap);
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ char* VMA_NULLABLE pStatsString);
-+
-+/** @} */
-+
-+#endif // VMA_STATS_STRING_ENABLED
-+
-+#endif // _VMA_FUNCTION_HEADERS
-+
-+#ifdef __cplusplus
-+}
-+#endif
-+
-+#endif // AMD_VULKAN_MEMORY_ALLOCATOR_H
-+
-+////////////////////////////////////////////////////////////////////////////////
-+////////////////////////////////////////////////////////////////////////////////
-+//
-+// IMPLEMENTATION
-+//
-+////////////////////////////////////////////////////////////////////////////////
-+////////////////////////////////////////////////////////////////////////////////
-+
-+// For Visual Studio IntelliSense.
-+#if defined(__cplusplus) && defined(__INTELLISENSE__)
-+#define VMA_IMPLEMENTATION
-+#endif
-+
-+#ifdef VMA_IMPLEMENTATION
-+#undef VMA_IMPLEMENTATION
-+
-+#include <cstdint>
-+#include <cstdlib>
-+#include <cstring>
-+#include <utility>
-+#include <type_traits>
-+
-+#ifdef _MSC_VER
-+ #include <intrin.h> // For functions like __popcnt, _BitScanForward etc.
-+#endif
-+#if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20
-+ #include <bit> // For std::popcount
-+#endif
-+
-+#if VMA_STATS_STRING_ENABLED
-+ #include <cstdio> // For snprintf
-+#endif
-+
-+/*******************************************************************************
-+CONFIGURATION SECTION
-+
-+Define some of these macros before each #include of this header or change them
-+here if you need other then default behavior depending on your environment.
-+*/
-+#ifndef _VMA_CONFIGURATION
-+
-+/*
-+Define this macro to 1 to make the library fetch pointers to Vulkan functions
-+internally, like:
-+
-+ vulkanFunctions.vkAllocateMemory = &vkAllocateMemory;
-+*/
-+#if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES)
-+ #define VMA_STATIC_VULKAN_FUNCTIONS 1
-+#endif
-+
-+/*
-+Define this macro to 1 to make the library fetch pointers to Vulkan functions
-+internally, like:
-+
-+ vulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkGetDeviceProcAddr(device, "vkAllocateMemory");
-+
-+To use this feature in new versions of VMA you now have to pass
-+VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as
-+VmaAllocatorCreateInfo::pVulkanFunctions. Other members can be null.
-+*/
-+#if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS)
-+ #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1
-+#endif
-+
-+#ifndef VMA_USE_STL_SHARED_MUTEX
-+ #if __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17
-+ #define VMA_USE_STL_SHARED_MUTEX 1
-+ // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus
-+ // Otherwise it is always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2.
-+ #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L
-+ #define VMA_USE_STL_SHARED_MUTEX 1
-+ #else
-+ #define VMA_USE_STL_SHARED_MUTEX 0
-+ #endif
-+#endif
-+
-+/*
-+Define this macro to include custom header files without having to edit this file directly, e.g.:
-+
-+ // Inside of "my_vma_configuration_user_includes.h":
-+
-+ #include "my_custom_assert.h" // for MY_CUSTOM_ASSERT
-+ #include "my_custom_min.h" // for my_custom_min
-+ #include <algorithm>
-+ #include <mutex>
-+
-+ // Inside a different file, which includes "vk_mem_alloc.h":
-+
-+ #define VMA_CONFIGURATION_USER_INCLUDES_H "my_vma_configuration_user_includes.h"
-+ #define VMA_ASSERT(expr) MY_CUSTOM_ASSERT(expr)
-+ #define VMA_MIN(v1, v2) (my_custom_min(v1, v2))
-+ #include "vk_mem_alloc.h"
-+ ...
-+
-+The following headers are used in this CONFIGURATION section only, so feel free to
-+remove them if not needed.
-+*/
-+#if !defined(VMA_CONFIGURATION_USER_INCLUDES_H)
-+ #include <cassert> // for assert
-+ #include <algorithm> // for min, max
-+ #include <mutex>
-+#else
-+ #include VMA_CONFIGURATION_USER_INCLUDES_H
-+#endif
-+
-+#ifndef VMA_NULL
-+ // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0.
-+ #define VMA_NULL nullptr
-+#endif
-+
-+// Used to silence warnings for implicit fallthrough.
-+#ifndef VMA_FALLTHROUGH
-+ #if __has_cpp_attribute(clang::fallthrough)
-+ #define VMA_FALLTHROUGH [[clang::fallthrough]];
-+ #elif __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17
-+ #define VMA_FALLTHROUGH [[fallthrough]]
-+ #else
-+ #define VMA_FALLTHROUGH
-+ #endif
-+#endif
-+
-+// Normal assert to check for programmer's errors, especially in Debug configuration.
-+#ifndef VMA_ASSERT
-+ #ifdef NDEBUG
-+ #define VMA_ASSERT(expr)
-+ #else
-+ #define VMA_ASSERT(expr) assert(expr)
-+ #endif
-+#endif
-+
-+// Assert that will be called very often, like inside data structures e.g. operator[].
-+// Making it non-empty can make program slow.
-+#ifndef VMA_HEAVY_ASSERT
-+ #ifdef NDEBUG
-+ #define VMA_HEAVY_ASSERT(expr)
-+ #else
-+ #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr)
-+ #endif
-+#endif
-+
-+// If your compiler is not compatible with C++17 and definition of
-+// aligned_alloc() function is missing, uncommenting following line may help:
-+
-+//#include <malloc.h>
-+
-+#if defined(__ANDROID_API__) && (__ANDROID_API__ < 16)
-+#include <cstdlib>
-+void* vma_aligned_alloc(size_t alignment, size_t size)
-+{
-+ // alignment must be >= sizeof(void*)
-+ if(alignment < sizeof(void*))
-+ {
-+ alignment = sizeof(void*);
-+ }
-+
-+ return memalign(alignment, size);
-+}
-+#elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC)) || defined(__OpenBSD__) || defined(__FreeBSD__)
-+#include <cstdlib>
-+
-+#if defined(__APPLE__)
-+#include <AvailabilityMacros.h>
-+#endif
-+
-+void *vma_aligned_alloc(size_t alignment, size_t size)
-+{
-+ // Unfortunately, aligned_alloc causes VMA to crash due to it returning null pointers. (At least under 11.4)
-+ // Therefore, for now disable this specific exception until a proper solution is found.
-+ //#if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0))
-+ //#if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0
-+ // // For C++14, usr/include/malloc/_malloc.h declares aligned_alloc()) only
-+ // // with the MacOSX11.0 SDK in Xcode 12 (which is what adds
-+ // // MAC_OS_X_VERSION_10_16), even though the function is marked
-+ // // available for 10.15. That is why the preprocessor checks for 10.16 but
-+ // // the __builtin_available checks for 10.15.
-+ // // People who use C++17 could call aligned_alloc with the 10.15 SDK already.
-+ // if (__builtin_available(macOS 10.15, iOS 13, *))
-+ // return aligned_alloc(alignment, size);
-+ //#endif
-+ //#endif
-+
-+ // alignment must be >= sizeof(void*)
-+ if(alignment < sizeof(void*))
-+ {
-+ alignment = sizeof(void*);
-+ }
-+
-+ void *pointer;
-+ if(posix_memalign(&pointer, alignment, size) == 0)
-+ return pointer;
-+ return VMA_NULL;
-+}
-+#elif defined(_WIN32)
-+void* vma_aligned_alloc(size_t alignment, size_t size)
-+{
-+ return _aligned_malloc(size, alignment);
-+}
-+#elif __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17
-+void* vma_aligned_alloc(size_t alignment, size_t size)
-+{
-+ return aligned_alloc(alignment, size);
-+}
-+#else
-+void* vma_aligned_alloc(size_t alignment, size_t size)
-+{
-+ VMA_ASSERT(0 && "Could not implement aligned_alloc automatically. Please enable C++17 or later in your compiler or provide custom implementation of macro VMA_SYSTEM_ALIGNED_MALLOC (and VMA_SYSTEM_ALIGNED_FREE if needed) using the API of your system.");
-+ return VMA_NULL;
-+}
-+#endif
-+
-+#if defined(_WIN32)
-+static void vma_aligned_free(void* ptr)
-+{
-+ _aligned_free(ptr);
-+}
-+#else
-+static void vma_aligned_free(void* VMA_NULLABLE ptr)
-+{
-+ free(ptr);
-+}
-+#endif
-+
-+#ifndef VMA_ALIGN_OF
-+ #define VMA_ALIGN_OF(type) (alignof(type))
-+#endif
-+
-+#ifndef VMA_SYSTEM_ALIGNED_MALLOC
-+ #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size))
-+#endif
-+
-+#ifndef VMA_SYSTEM_ALIGNED_FREE
-+ // VMA_SYSTEM_FREE is the old name, but might have been defined by the user
-+ #if defined(VMA_SYSTEM_FREE)
-+ #define VMA_SYSTEM_ALIGNED_FREE(ptr) VMA_SYSTEM_FREE(ptr)
-+ #else
-+ #define VMA_SYSTEM_ALIGNED_FREE(ptr) vma_aligned_free(ptr)
-+ #endif
-+#endif
-+
-+#ifndef VMA_COUNT_BITS_SET
-+ // Returns number of bits set to 1 in (v)
-+ #define VMA_COUNT_BITS_SET(v) VmaCountBitsSet(v)
-+#endif
-+
-+#ifndef VMA_BITSCAN_LSB
-+ // Scans integer for index of first nonzero value from the Least Significant Bit (LSB). If mask is 0 then returns UINT8_MAX
-+ #define VMA_BITSCAN_LSB(mask) VmaBitScanLSB(mask)
-+#endif
-+
-+#ifndef VMA_BITSCAN_MSB
-+ // Scans integer for index of first nonzero value from the Most Significant Bit (MSB). If mask is 0 then returns UINT8_MAX
-+ #define VMA_BITSCAN_MSB(mask) VmaBitScanMSB(mask)
-+#endif
-+
-+#ifndef VMA_MIN
-+ #define VMA_MIN(v1, v2) ((std::min)((v1), (v2)))
-+#endif
-+
-+#ifndef VMA_MAX
-+ #define VMA_MAX(v1, v2) ((std::max)((v1), (v2)))
-+#endif
-+
-+#ifndef VMA_SWAP
-+ #define VMA_SWAP(v1, v2) std::swap((v1), (v2))
-+#endif
-+
-+#ifndef VMA_SORT
-+ #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp)
-+#endif
-+
-+#ifndef VMA_DEBUG_LOG_FORMAT
-+ #define VMA_DEBUG_LOG_FORMAT(format, ...)
-+ /*
-+ #define VMA_DEBUG_LOG_FORMAT(format, ...) do { \
-+ printf((format), __VA_ARGS__); \
-+ printf("\n"); \
-+ } while(false)
-+ */
-+#endif
-+
-+#ifndef VMA_DEBUG_LOG
-+ #define VMA_DEBUG_LOG(str) VMA_DEBUG_LOG_FORMAT("%s", (str))
-+#endif
-+
-+#ifndef VMA_CLASS_NO_COPY
-+ #define VMA_CLASS_NO_COPY(className) \
-+ private: \
-+ className(const className&) = delete; \
-+ className& operator=(const className&) = delete;
-+#endif
-+#ifndef VMA_CLASS_NO_COPY_NO_MOVE
-+ #define VMA_CLASS_NO_COPY_NO_MOVE(className) \
-+ private: \
-+ className(const className&) = delete; \
-+ className(className&&) = delete; \
-+ className& operator=(const className&) = delete; \
-+ className& operator=(className&&) = delete;
-+#endif
-+
-+// Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString.
-+#if VMA_STATS_STRING_ENABLED
-+ static inline void VmaUint32ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint32_t num)
-+ {
-+ snprintf(outStr, strLen, "%u", static_cast<unsigned int>(num));
-+ }
-+ static inline void VmaUint64ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint64_t num)
-+ {
-+ snprintf(outStr, strLen, "%llu", static_cast<unsigned long long>(num));
-+ }
-+ static inline void VmaPtrToStr(char* VMA_NOT_NULL outStr, size_t strLen, const void* ptr)
-+ {
-+ snprintf(outStr, strLen, "%p", ptr);
-+ }
-+#endif
-+
-+#ifndef VMA_MUTEX
-+ class VmaMutex
-+ {
-+ VMA_CLASS_NO_COPY_NO_MOVE(VmaMutex)
-+ public:
-+ VmaMutex() { }
-+ void Lock() { m_Mutex.lock(); }
-+ void Unlock() { m_Mutex.unlock(); }
-+ bool TryLock() { return m_Mutex.try_lock(); }
-+ private:
-+ std::mutex m_Mutex;
-+ };
-+ #define VMA_MUTEX VmaMutex
-+#endif
-+
-+// Read-write mutex, where "read" is shared access, "write" is exclusive access.
-+#ifndef VMA_RW_MUTEX
-+ #if VMA_USE_STL_SHARED_MUTEX
-+ // Use std::shared_mutex from C++17.
-+ #include <shared_mutex>
-+ class VmaRWMutex
-+ {
-+ public:
-+ void LockRead() { m_Mutex.lock_shared(); }
-+ void UnlockRead() { m_Mutex.unlock_shared(); }
-+ bool TryLockRead() { return m_Mutex.try_lock_shared(); }
-+ void LockWrite() { m_Mutex.lock(); }
-+ void UnlockWrite() { m_Mutex.unlock(); }
-+ bool TryLockWrite() { return m_Mutex.try_lock(); }
-+ private:
-+ std::shared_mutex m_Mutex;
-+ };
-+ #define VMA_RW_MUTEX VmaRWMutex
-+ #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600
-+ // Use SRWLOCK from WinAPI.
-+ // Minimum supported client = Windows Vista, server = Windows Server 2008.
-+ class VmaRWMutex
-+ {
-+ public:
-+ VmaRWMutex() { InitializeSRWLock(&m_Lock); }
-+ void LockRead() { AcquireSRWLockShared(&m_Lock); }
-+ void UnlockRead() { ReleaseSRWLockShared(&m_Lock); }
-+ bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; }
-+ void LockWrite() { AcquireSRWLockExclusive(&m_Lock); }
-+ void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); }
-+ bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; }
-+ private:
-+ SRWLOCK m_Lock;
-+ };
-+ #define VMA_RW_MUTEX VmaRWMutex
-+ #else
-+ // Less efficient fallback: Use normal mutex.
-+ class VmaRWMutex
-+ {
-+ public:
-+ void LockRead() { m_Mutex.Lock(); }
-+ void UnlockRead() { m_Mutex.Unlock(); }
-+ bool TryLockRead() { return m_Mutex.TryLock(); }
-+ void LockWrite() { m_Mutex.Lock(); }
-+ void UnlockWrite() { m_Mutex.Unlock(); }
-+ bool TryLockWrite() { return m_Mutex.TryLock(); }
-+ private:
-+ VMA_MUTEX m_Mutex;
-+ };
-+ #define VMA_RW_MUTEX VmaRWMutex
-+ #endif // #if VMA_USE_STL_SHARED_MUTEX
-+#endif // #ifndef VMA_RW_MUTEX
-+
-+/*
-+If providing your own implementation, you need to implement a subset of std::atomic.
-+*/
-+#ifndef VMA_ATOMIC_UINT32
-+ #include <atomic>
-+ #define VMA_ATOMIC_UINT32 std::atomic<uint32_t>
-+#endif
-+
-+#ifndef VMA_ATOMIC_UINT64
-+ #include <atomic>
-+ #define VMA_ATOMIC_UINT64 std::atomic<uint64_t>
-+#endif
-+
-+#ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY
-+ /**
-+ Every allocation will have its own memory block.
-+ Define to 1 for debugging purposes only.
-+ */
-+ #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0)
-+#endif
-+
-+#ifndef VMA_MIN_ALIGNMENT
-+ /**
-+ Minimum alignment of all allocations, in bytes.
-+ Set to more than 1 for debugging purposes. Must be power of two.
-+ */
-+ #ifdef VMA_DEBUG_ALIGNMENT // Old name
-+ #define VMA_MIN_ALIGNMENT VMA_DEBUG_ALIGNMENT
-+ #else
-+ #define VMA_MIN_ALIGNMENT (1)
-+ #endif
-+#endif
-+
-+#ifndef VMA_DEBUG_MARGIN
-+ /**
-+ Minimum margin after every allocation, in bytes.
-+ Set nonzero for debugging purposes only.
-+ */
-+ #define VMA_DEBUG_MARGIN (0)
-+#endif
-+
-+#ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS
-+ /**
-+ Define this macro to 1 to automatically fill new allocations and destroyed
-+ allocations with some bit pattern.
-+ */
-+ #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0)
-+#endif
-+
-+#ifndef VMA_DEBUG_DETECT_CORRUPTION
-+ /**
-+ Define this macro to 1 together with non-zero value of VMA_DEBUG_MARGIN to
-+ enable writing magic value to the margin after every allocation and
-+ validating it, so that memory corruptions (out-of-bounds writes) are detected.
-+ */
-+ #define VMA_DEBUG_DETECT_CORRUPTION (0)
-+#endif
-+
-+#ifndef VMA_DEBUG_GLOBAL_MUTEX
-+ /**
-+ Set this to 1 for debugging purposes only, to enable single mutex protecting all
-+ entry calls to the library. Can be useful for debugging multithreading issues.
-+ */
-+ #define VMA_DEBUG_GLOBAL_MUTEX (0)
-+#endif
-+
-+#ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY
-+ /**
-+ Minimum value for VkPhysicalDeviceLimits::bufferImageGranularity.
-+ Set to more than 1 for debugging purposes only. Must be power of two.
-+ */
-+ #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1)
-+#endif
-+
-+#ifndef VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT
-+ /*
-+ Set this to 1 to make VMA never exceed VkPhysicalDeviceLimits::maxMemoryAllocationCount
-+ and return error instead of leaving up to Vulkan implementation what to do in such cases.
-+ */
-+ #define VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT (0)
-+#endif
-+
-+#ifndef VMA_SMALL_HEAP_MAX_SIZE
-+ /// Maximum size of a memory heap in Vulkan to consider it "small".
-+ #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024)
-+#endif
-+
-+#ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE
-+ /// Default size of a block allocated as single VkDeviceMemory from a "large" heap.
-+ #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024)
-+#endif
-+
-+/*
-+Mapping hysteresis is a logic that launches when vmaMapMemory/vmaUnmapMemory is called
-+or a persistently mapped allocation is created and destroyed several times in a row.
-+It keeps additional +1 mapping of a device memory block to prevent calling actual
-+vkMapMemory/vkUnmapMemory too many times, which may improve performance and help
-+tools like RenderDoc.
-+*/
-+#ifndef VMA_MAPPING_HYSTERESIS_ENABLED
-+ #define VMA_MAPPING_HYSTERESIS_ENABLED 1
-+#endif
-+
-+#define VMA_VALIDATE(cond) do { if(!(cond)) { \
-+ VMA_ASSERT(0 && "Validation failed: " #cond); \
-+ return false; \
-+ } } while(false)
-+
-+/*******************************************************************************
-+END OF CONFIGURATION
-+*/
-+#endif // _VMA_CONFIGURATION
-+
-+
-+static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC;
-+static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF;
-+// Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F.
-+static const uint32_t VMA_CORRUPTION_DETECTION_MAGIC_VALUE = 0x7F84E666;
-+
-+// Copy of some Vulkan definitions so we don't need to check their existence just to handle few constants.
-+static const uint32_t VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY = 0x00000040;
-+static const uint32_t VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY = 0x00000080;
-+static const uint32_t VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY = 0x00020000;
-+static const uint32_t VK_IMAGE_CREATE_DISJOINT_BIT_COPY = 0x00000200;
-+static const int32_t VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY = 1000158000;
-+static const uint32_t VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET = 0x10000000u;
-+static const uint32_t VMA_ALLOCATION_TRY_COUNT = 32;
-+static const uint32_t VMA_VENDOR_ID_AMD = 4098;
-+
-+// This one is tricky. Vulkan specification defines this code as available since
-+// Vulkan 1.0, but doesn't actually define it in Vulkan SDK earlier than 1.2.131.
-+// See pull request #207.
-+#define VK_ERROR_UNKNOWN_COPY ((VkResult)-13)
-+
-+
-+#if VMA_STATS_STRING_ENABLED
-+// Correspond to values of enum VmaSuballocationType.
-+static const char* VMA_SUBALLOCATION_TYPE_NAMES[] =
-+{
-+ "FREE",
-+ "UNKNOWN",
-+ "BUFFER",
-+ "IMAGE_UNKNOWN",
-+ "IMAGE_LINEAR",
-+ "IMAGE_OPTIMAL",
-+};
-+#endif
-+
-+static VkAllocationCallbacks VmaEmptyAllocationCallbacks =
-+ { VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL };
-+
-+
-+#ifndef _VMA_ENUM_DECLARATIONS
-+
-+enum VmaSuballocationType
-+{
-+ VMA_SUBALLOCATION_TYPE_FREE = 0,
-+ VMA_SUBALLOCATION_TYPE_UNKNOWN = 1,
-+ VMA_SUBALLOCATION_TYPE_BUFFER = 2,
-+ VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN = 3,
-+ VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR = 4,
-+ VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL = 5,
-+ VMA_SUBALLOCATION_TYPE_MAX_ENUM = 0x7FFFFFFF
-+};
-+
-+enum VMA_CACHE_OPERATION
-+{
-+ VMA_CACHE_FLUSH,
-+ VMA_CACHE_INVALIDATE
-+};
-+
-+enum class VmaAllocationRequestType
-+{
-+ Normal,
-+ TLSF,
-+ // Used by "Linear" algorithm.
-+ UpperAddress,
-+ EndOf1st,
-+ EndOf2nd,
-+};
-+
-+#endif // _VMA_ENUM_DECLARATIONS
-+
-+#ifndef _VMA_FORWARD_DECLARATIONS
-+// Opaque handle used by allocation algorithms to identify single allocation in any conforming way.
-+VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaAllocHandle)
-+
-+struct VmaMutexLock;
-+struct VmaMutexLockRead;
-+struct VmaMutexLockWrite;
-+
-+template<typename T>
-+struct AtomicTransactionalIncrement;
-+
-+template<typename T>
-+struct VmaStlAllocator;
-+
-+template<typename T, typename AllocatorT>
-+class VmaVector;
-+
-+template<typename T, typename AllocatorT, size_t N>
-+class VmaSmallVector;
-+
-+template<typename T>
-+class VmaPoolAllocator;
-+
-+template<typename T>
-+struct VmaListItem;
-+
-+template<typename T>
-+class VmaRawList;
-+
-+template<typename T, typename AllocatorT>
-+class VmaList;
-+
-+template<typename ItemTypeTraits>
-+class VmaIntrusiveLinkedList;
-+
-+// Unused in this version
-+#if 0
-+template<typename T1, typename T2>
-+struct VmaPair;
-+template<typename FirstT, typename SecondT>
-+struct VmaPairFirstLess;
-+
-+template<typename KeyT, typename ValueT>
-+class VmaMap;
-+#endif
-+
-+#if VMA_STATS_STRING_ENABLED
-+class VmaStringBuilder;
-+class VmaJsonWriter;
-+#endif
-+
-+class VmaDeviceMemoryBlock;
-+
-+struct VmaDedicatedAllocationListItemTraits;
-+class VmaDedicatedAllocationList;
-+
-+struct VmaSuballocation;
-+struct VmaSuballocationOffsetLess;
-+struct VmaSuballocationOffsetGreater;
-+struct VmaSuballocationItemSizeLess;
-+
-+typedef VmaList<VmaSuballocation, VmaStlAllocator<VmaSuballocation>> VmaSuballocationList;
-+
-+struct VmaAllocationRequest;
-+
-+class VmaBlockMetadata;
-+class VmaBlockMetadata_Linear;
-+class VmaBlockMetadata_TLSF;
-+
-+class VmaBlockVector;
-+
-+struct VmaPoolListItemTraits;
-+
-+struct VmaCurrentBudgetData;
-+
-+class VmaAllocationObjectAllocator;
-+
-+#endif // _VMA_FORWARD_DECLARATIONS
-+
-+
-+#ifndef _VMA_FUNCTIONS
-+
-+/*
-+Returns number of bits set to 1 in (v).
-+
-+On specific platforms and compilers you can use instrinsics like:
-+
-+Visual Studio:
-+ return __popcnt(v);
-+GCC, Clang:
-+ return static_cast<uint32_t>(__builtin_popcount(v));
-+
-+Define macro VMA_COUNT_BITS_SET to provide your optimized implementation.
-+But you need to check in runtime whether user's CPU supports these, as some old processors don't.
-+*/
-+static inline uint32_t VmaCountBitsSet(uint32_t v)
-+{
-+#if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20
-+ return std::popcount(v);
-+#else
-+ uint32_t c = v - ((v >> 1) & 0x55555555);
-+ c = ((c >> 2) & 0x33333333) + (c & 0x33333333);
-+ c = ((c >> 4) + c) & 0x0F0F0F0F;
-+ c = ((c >> 8) + c) & 0x00FF00FF;
-+ c = ((c >> 16) + c) & 0x0000FFFF;
-+ return c;
-+#endif
-+}
-+
-+static inline uint8_t VmaBitScanLSB(uint64_t mask)
-+{
-+#if defined(_MSC_VER) && defined(_WIN64)
-+ unsigned long pos;
-+ if (_BitScanForward64(&pos, mask))
-+ return static_cast<uint8_t>(pos);
-+ return UINT8_MAX;
-+#elif defined __GNUC__ || defined __clang__
-+ return static_cast<uint8_t>(__builtin_ffsll(mask)) - 1U;
-+#else
-+ uint8_t pos = 0;
-+ uint64_t bit = 1;
-+ do
-+ {
-+ if (mask & bit)
-+ return pos;
-+ bit <<= 1;
-+ } while (pos++ < 63);
-+ return UINT8_MAX;
-+#endif
-+}
-+
-+static inline uint8_t VmaBitScanLSB(uint32_t mask)
-+{
-+#ifdef _MSC_VER
-+ unsigned long pos;
-+ if (_BitScanForward(&pos, mask))
-+ return static_cast<uint8_t>(pos);
-+ return UINT8_MAX;
-+#elif defined __GNUC__ || defined __clang__
-+ return static_cast<uint8_t>(__builtin_ffs(mask)) - 1U;
-+#else
-+ uint8_t pos = 0;
-+ uint32_t bit = 1;
-+ do
-+ {
-+ if (mask & bit)
-+ return pos;
-+ bit <<= 1;
-+ } while (pos++ < 31);
-+ return UINT8_MAX;
-+#endif
-+}
-+
-+static inline uint8_t VmaBitScanMSB(uint64_t mask)
-+{
-+#if defined(_MSC_VER) && defined(_WIN64)
-+ unsigned long pos;
-+ if (_BitScanReverse64(&pos, mask))
-+ return static_cast<uint8_t>(pos);
-+#elif defined __GNUC__ || defined __clang__
-+ if (mask)
-+ return 63 - static_cast<uint8_t>(__builtin_clzll(mask));
-+#else
-+ uint8_t pos = 63;
-+ uint64_t bit = 1ULL << 63;
-+ do
-+ {
-+ if (mask & bit)
-+ return pos;
-+ bit >>= 1;
-+ } while (pos-- > 0);
-+#endif
-+ return UINT8_MAX;
-+}
-+
-+static inline uint8_t VmaBitScanMSB(uint32_t mask)
-+{
-+#ifdef _MSC_VER
-+ unsigned long pos;
-+ if (_BitScanReverse(&pos, mask))
-+ return static_cast<uint8_t>(pos);
-+#elif defined __GNUC__ || defined __clang__
-+ if (mask)
-+ return 31 - static_cast<uint8_t>(__builtin_clz(mask));
-+#else
-+ uint8_t pos = 31;
-+ uint32_t bit = 1UL << 31;
-+ do
-+ {
-+ if (mask & bit)
-+ return pos;
-+ bit >>= 1;
-+ } while (pos-- > 0);
-+#endif
-+ return UINT8_MAX;
-+}
-+
-+/*
-+Returns true if given number is a power of two.
-+T must be unsigned integer number or signed integer but always nonnegative.
-+For 0 returns true.
-+*/
-+template <typename T>
-+inline bool VmaIsPow2(T x)
-+{
-+ return (x & (x - 1)) == 0;
-+}
-+
-+// Aligns given value up to nearest multiply of align value. For example: VmaAlignUp(11, 8) = 16.
-+// Use types like uint32_t, uint64_t as T.
-+template <typename T>
-+static inline T VmaAlignUp(T val, T alignment)
-+{
-+ VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
-+ return (val + alignment - 1) & ~(alignment - 1);
-+}
-+
-+// Aligns given value down to nearest multiply of align value. For example: VmaAlignDown(11, 8) = 8.
-+// Use types like uint32_t, uint64_t as T.
-+template <typename T>
-+static inline T VmaAlignDown(T val, T alignment)
-+{
-+ VMA_HEAVY_ASSERT(VmaIsPow2(alignment));
-+ return val & ~(alignment - 1);
-+}
-+
-+// Division with mathematical rounding to nearest number.
-+template <typename T>
-+static inline T VmaRoundDiv(T x, T y)
-+{
-+ return (x + (y / (T)2)) / y;
-+}
-+
-+// Divide by 'y' and round up to nearest integer.
-+template <typename T>
-+static inline T VmaDivideRoundingUp(T x, T y)
-+{
-+ return (x + y - (T)1) / y;
-+}
-+
-+// Returns smallest power of 2 greater or equal to v.
-+static inline uint32_t VmaNextPow2(uint32_t v)
-+{
-+ v--;
-+ v |= v >> 1;
-+ v |= v >> 2;
-+ v |= v >> 4;
-+ v |= v >> 8;
-+ v |= v >> 16;
-+ v++;
-+ return v;
-+}
-+
-+static inline uint64_t VmaNextPow2(uint64_t v)
-+{
-+ v--;
-+ v |= v >> 1;
-+ v |= v >> 2;
-+ v |= v >> 4;
-+ v |= v >> 8;
-+ v |= v >> 16;
-+ v |= v >> 32;
-+ v++;
-+ return v;
-+}
-+
-+// Returns largest power of 2 less or equal to v.
-+static inline uint32_t VmaPrevPow2(uint32_t v)
-+{
-+ v |= v >> 1;
-+ v |= v >> 2;
-+ v |= v >> 4;
-+ v |= v >> 8;
-+ v |= v >> 16;
-+ v = v ^ (v >> 1);
-+ return v;
-+}
-+
-+static inline uint64_t VmaPrevPow2(uint64_t v)
-+{
-+ v |= v >> 1;
-+ v |= v >> 2;
-+ v |= v >> 4;
-+ v |= v >> 8;
-+ v |= v >> 16;
-+ v |= v >> 32;
-+ v = v ^ (v >> 1);
-+ return v;
-+}
-+
-+static inline bool VmaStrIsEmpty(const char* pStr)
-+{
-+ return pStr == VMA_NULL || *pStr == '\0';
-+}
-+
-+/*
-+Returns true if two memory blocks occupy overlapping pages.
-+ResourceA must be in less memory offset than ResourceB.
-+
-+Algorithm is based on "Vulkan 1.0.39 - A Specification (with all registered Vulkan extensions)"
-+chapter 11.6 "Resource Memory Association", paragraph "Buffer-Image Granularity".
-+*/
-+static inline bool VmaBlocksOnSamePage(
-+ VkDeviceSize resourceAOffset,
-+ VkDeviceSize resourceASize,
-+ VkDeviceSize resourceBOffset,
-+ VkDeviceSize pageSize)
-+{
-+ VMA_ASSERT(resourceAOffset + resourceASize <= resourceBOffset && resourceASize > 0 && pageSize > 0);
-+ VkDeviceSize resourceAEnd = resourceAOffset + resourceASize - 1;
-+ VkDeviceSize resourceAEndPage = resourceAEnd & ~(pageSize - 1);
-+ VkDeviceSize resourceBStart = resourceBOffset;
-+ VkDeviceSize resourceBStartPage = resourceBStart & ~(pageSize - 1);
-+ return resourceAEndPage == resourceBStartPage;
-+}
-+
-+/*
-+Returns true if given suballocation types could conflict and must respect
-+VkPhysicalDeviceLimits::bufferImageGranularity. They conflict if one is buffer
-+or linear image and another one is optimal image. If type is unknown, behave
-+conservatively.
-+*/
-+static inline bool VmaIsBufferImageGranularityConflict(
-+ VmaSuballocationType suballocType1,
-+ VmaSuballocationType suballocType2)
-+{
-+ if (suballocType1 > suballocType2)
-+ {
-+ VMA_SWAP(suballocType1, suballocType2);
-+ }
-+
-+ switch (suballocType1)
-+ {
-+ case VMA_SUBALLOCATION_TYPE_FREE:
-+ return false;
-+ case VMA_SUBALLOCATION_TYPE_UNKNOWN:
-+ return true;
-+ case VMA_SUBALLOCATION_TYPE_BUFFER:
-+ return
-+ suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
-+ suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
-+ case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN:
-+ return
-+ suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
-+ suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR ||
-+ suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
-+ case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR:
-+ return
-+ suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL;
-+ case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL:
-+ return false;
-+ default:
-+ VMA_ASSERT(0);
-+ return true;
-+ }
-+}
-+
-+static void VmaWriteMagicValue(void* pData, VkDeviceSize offset)
-+{
-+#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
-+ uint32_t* pDst = (uint32_t*)((char*)pData + offset);
-+ const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
-+ for (size_t i = 0; i < numberCount; ++i, ++pDst)
-+ {
-+ *pDst = VMA_CORRUPTION_DETECTION_MAGIC_VALUE;
-+ }
-+#else
-+ // no-op
-+#endif
-+}
-+
-+static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset)
-+{
-+#if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION
-+ const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset);
-+ const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t);
-+ for (size_t i = 0; i < numberCount; ++i, ++pSrc)
-+ {
-+ if (*pSrc != VMA_CORRUPTION_DETECTION_MAGIC_VALUE)
-+ {
-+ return false;
-+ }
-+ }
-+#endif
-+ return true;
-+}
-+
-+/*
-+Fills structure with parameters of an example buffer to be used for transfers
-+during GPU memory defragmentation.
-+*/
-+static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo)
-+{
-+ memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo));
-+ outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
-+ outBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
-+ outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size.
-+}
-+
-+
-+/*
-+Performs binary search and returns iterator to first element that is greater or
-+equal to (key), according to comparison (cmp).
-+
-+Cmp should return true if first argument is less than second argument.
-+
-+Returned value is the found element, if present in the collection or place where
-+new element with value (key) should be inserted.
-+*/
-+template <typename CmpLess, typename IterT, typename KeyT>
-+static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT& key, const CmpLess& cmp)
-+{
-+ size_t down = 0, up = size_t(end - beg);
-+ while (down < up)
-+ {
-+ const size_t mid = down + (up - down) / 2; // Overflow-safe midpoint calculation
-+ if (cmp(*(beg + mid), key))
-+ {
-+ down = mid + 1;
-+ }
-+ else
-+ {
-+ up = mid;
-+ }
-+ }
-+ return beg + down;
-+}
-+
-+template<typename CmpLess, typename IterT, typename KeyT>
-+IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp)
-+{
-+ IterT it = VmaBinaryFindFirstNotLess<CmpLess, IterT, KeyT>(
-+ beg, end, value, cmp);
-+ if (it == end ||
-+ (!cmp(*it, value) && !cmp(value, *it)))
-+ {
-+ return it;
-+ }
-+ return end;
-+}
-+
-+/*
-+Returns true if all pointers in the array are not-null and unique.
-+Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT.
-+T must be pointer type, e.g. VmaAllocation, VmaPool.
-+*/
-+template<typename T>
-+static bool VmaValidatePointerArray(uint32_t count, const T* arr)
-+{
-+ for (uint32_t i = 0; i < count; ++i)
-+ {
-+ const T iPtr = arr[i];
-+ if (iPtr == VMA_NULL)
-+ {
-+ return false;
-+ }
-+ for (uint32_t j = i + 1; j < count; ++j)
-+ {
-+ if (iPtr == arr[j])
-+ {
-+ return false;
-+ }
-+ }
-+ }
-+ return true;
-+}
-+
-+template<typename MainT, typename NewT>
-+static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct)
-+{
-+ newStruct->pNext = mainStruct->pNext;
-+ mainStruct->pNext = newStruct;
-+}
-+
-+// This is the main algorithm that guides the selection of a memory type best for an allocation -
-+// converts usage to required/preferred/not preferred flags.
-+static bool FindMemoryPreferences(
-+ bool isIntegratedGPU,
-+ const VmaAllocationCreateInfo& allocCreateInfo,
-+ VkFlags bufImgUsage, // VkBufferCreateInfo::usage or VkImageCreateInfo::usage. UINT32_MAX if unknown.
-+ VkMemoryPropertyFlags& outRequiredFlags,
-+ VkMemoryPropertyFlags& outPreferredFlags,
-+ VkMemoryPropertyFlags& outNotPreferredFlags)
-+{
-+ outRequiredFlags = allocCreateInfo.requiredFlags;
-+ outPreferredFlags = allocCreateInfo.preferredFlags;
-+ outNotPreferredFlags = 0;
-+
-+ switch(allocCreateInfo.usage)
-+ {
-+ case VMA_MEMORY_USAGE_UNKNOWN:
-+ break;
-+ case VMA_MEMORY_USAGE_GPU_ONLY:
-+ if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
-+ {
-+ outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
-+ }
-+ break;
-+ case VMA_MEMORY_USAGE_CPU_ONLY:
-+ outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
-+ break;
-+ case VMA_MEMORY_USAGE_CPU_TO_GPU:
-+ outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
-+ if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
-+ {
-+ outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
-+ }
-+ break;
-+ case VMA_MEMORY_USAGE_GPU_TO_CPU:
-+ outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
-+ outPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
-+ break;
-+ case VMA_MEMORY_USAGE_CPU_COPY:
-+ outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
-+ break;
-+ case VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED:
-+ outRequiredFlags |= VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT;
-+ break;
-+ case VMA_MEMORY_USAGE_AUTO:
-+ case VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE:
-+ case VMA_MEMORY_USAGE_AUTO_PREFER_HOST:
-+ {
-+ if(bufImgUsage == UINT32_MAX)
-+ {
-+ VMA_ASSERT(0 && "VMA_MEMORY_USAGE_AUTO* values can only be used with functions like vmaCreateBuffer, vmaCreateImage so that the details of the created resource are known.");
-+ return false;
-+ }
-+ // This relies on values of VK_IMAGE_USAGE_TRANSFER* being the same VK_BUFFER_IMAGE_TRANSFER*.
-+ const bool deviceAccess = (bufImgUsage & ~(VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT)) != 0;
-+ const bool hostAccessSequentialWrite = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT) != 0;
-+ const bool hostAccessRandom = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) != 0;
-+ const bool hostAccessAllowTransferInstead = (allocCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) != 0;
-+ const bool preferDevice = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE;
-+ const bool preferHost = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST;
-+
-+ // CPU random access - e.g. a buffer written to or transferred from GPU to read back on CPU.
-+ if(hostAccessRandom)
-+ {
-+ if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost)
-+ {
-+ // Nice if it will end up in HOST_VISIBLE, but more importantly prefer DEVICE_LOCAL.
-+ // Omitting HOST_VISIBLE here is intentional.
-+ // In case there is DEVICE_LOCAL | HOST_VISIBLE | HOST_CACHED, it will pick that one.
-+ // Otherwise, this will give same weight to DEVICE_LOCAL as HOST_VISIBLE | HOST_CACHED and select the former if occurs first on the list.
-+ outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
-+ }
-+ else
-+ {
-+ // Always CPU memory, cached.
-+ outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
-+ }
-+ }
-+ // CPU sequential write - may be CPU or host-visible GPU memory, uncached and write-combined.
-+ else if(hostAccessSequentialWrite)
-+ {
-+ // Want uncached and write-combined.
-+ outNotPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
-+
-+ if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost)
-+ {
-+ outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
-+ }
-+ else
-+ {
-+ outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
-+ // Direct GPU access, CPU sequential write (e.g. a dynamic uniform buffer updated every frame)
-+ if(deviceAccess)
-+ {
-+ // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose GPU memory.
-+ if(preferHost)
-+ outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
-+ else
-+ outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
-+ }
-+ // GPU no direct access, CPU sequential write (e.g. an upload buffer to be transferred to the GPU)
-+ else
-+ {
-+ // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose CPU memory.
-+ if(preferDevice)
-+ outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
-+ else
-+ outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
-+ }
-+ }
-+ }
-+ // No CPU access
-+ else
-+ {
-+ // if(deviceAccess)
-+ //
-+ // GPU access, no CPU access (e.g. a color attachment image) - prefer GPU memory,
-+ // unless there is a clear preference from the user not to do so.
-+ //
-+ // else:
-+ //
-+ // No direct GPU access, no CPU access, just transfers.
-+ // It may be staging copy intended for e.g. preserving image for next frame (then better GPU memory) or
-+ // a "swap file" copy to free some GPU memory (then better CPU memory).
-+ // Up to the user to decide. If no preferece, assume the former and choose GPU memory.
-+
-+ if(preferHost)
-+ outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
-+ else
-+ outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
-+ }
-+ break;
-+ }
-+ default:
-+ VMA_ASSERT(0);
-+ }
-+
-+ // Avoid DEVICE_COHERENT unless explicitly requested.
-+ if(((allocCreateInfo.requiredFlags | allocCreateInfo.preferredFlags) &
-+ (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0)
-+ {
-+ outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY;
-+ }
-+
-+ return true;
-+}
-+
-+////////////////////////////////////////////////////////////////////////////////
-+// Memory allocation
-+
-+static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment)
-+{
-+ void* result = VMA_NULL;
-+ if ((pAllocationCallbacks != VMA_NULL) &&
-+ (pAllocationCallbacks->pfnAllocation != VMA_NULL))
-+ {
-+ result = (*pAllocationCallbacks->pfnAllocation)(
-+ pAllocationCallbacks->pUserData,
-+ size,
-+ alignment,
-+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
-+ }
-+ else
-+ {
-+ result = VMA_SYSTEM_ALIGNED_MALLOC(size, alignment);
-+ }
-+ VMA_ASSERT(result != VMA_NULL && "CPU memory allocation failed.");
-+ return result;
-+}
-+
-+static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr)
-+{
-+ if ((pAllocationCallbacks != VMA_NULL) &&
-+ (pAllocationCallbacks->pfnFree != VMA_NULL))
-+ {
-+ (*pAllocationCallbacks->pfnFree)(pAllocationCallbacks->pUserData, ptr);
-+ }
-+ else
-+ {
-+ VMA_SYSTEM_ALIGNED_FREE(ptr);
-+ }
-+}
-+
-+template<typename T>
-+static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks)
-+{
-+ return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T));
-+}
-+
-+template<typename T>
-+static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count)
-+{
-+ return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T));
-+}
-+
-+#define vma_new(allocator, type) new(VmaAllocate<type>(allocator))(type)
-+
-+#define vma_new_array(allocator, type, count) new(VmaAllocateArray<type>((allocator), (count)))(type)
-+
-+template<typename T>
-+static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr)
-+{
-+ ptr->~T();
-+ VmaFree(pAllocationCallbacks, ptr);
-+}
-+
-+template<typename T>
-+static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count)
-+{
-+ if (ptr != VMA_NULL)
-+ {
-+ for (size_t i = count; i--; )
-+ {
-+ ptr[i].~T();
-+ }
-+ VmaFree(pAllocationCallbacks, ptr);
-+ }
-+}
-+
-+static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr)
-+{
-+ if (srcStr != VMA_NULL)
-+ {
-+ const size_t len = strlen(srcStr);
-+ char* const result = vma_new_array(allocs, char, len + 1);
-+ memcpy(result, srcStr, len + 1);
-+ return result;
-+ }
-+ return VMA_NULL;
-+}
-+
-+#if VMA_STATS_STRING_ENABLED
-+static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr, size_t strLen)
-+{
-+ if (srcStr != VMA_NULL)
-+ {
-+ char* const result = vma_new_array(allocs, char, strLen + 1);
-+ memcpy(result, srcStr, strLen);
-+ result[strLen] = '\0';
-+ return result;
-+ }
-+ return VMA_NULL;
-+}
-+#endif // VMA_STATS_STRING_ENABLED
-+
-+static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str)
-+{
-+ if (str != VMA_NULL)
-+ {
-+ const size_t len = strlen(str);
-+ vma_delete_array(allocs, str, len + 1);
-+ }
-+}
-+
-+template<typename CmpLess, typename VectorT>
-+size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value)
-+{
-+ const size_t indexToInsert = VmaBinaryFindFirstNotLess(
-+ vector.data(),
-+ vector.data() + vector.size(),
-+ value,
-+ CmpLess()) - vector.data();
-+ VmaVectorInsert(vector, indexToInsert, value);
-+ return indexToInsert;
-+}
-+
-+template<typename CmpLess, typename VectorT>
-+bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value)
-+{
-+ CmpLess comparator;
-+ typename VectorT::iterator it = VmaBinaryFindFirstNotLess(
-+ vector.begin(),
-+ vector.end(),
-+ value,
-+ comparator);
-+ if ((it != vector.end()) && !comparator(*it, value) && !comparator(value, *it))
-+ {
-+ size_t indexToRemove = it - vector.begin();
-+ VmaVectorRemove(vector, indexToRemove);
-+ return true;
-+ }
-+ return false;
-+}
-+#endif // _VMA_FUNCTIONS
-+
-+#ifndef _VMA_STATISTICS_FUNCTIONS
-+
-+static void VmaClearStatistics(VmaStatistics& outStats)
-+{
-+ outStats.blockCount = 0;
-+ outStats.allocationCount = 0;
-+ outStats.blockBytes = 0;
-+ outStats.allocationBytes = 0;
-+}
-+
-+static void VmaAddStatistics(VmaStatistics& inoutStats, const VmaStatistics& src)
-+{
-+ inoutStats.blockCount += src.blockCount;
-+ inoutStats.allocationCount += src.allocationCount;
-+ inoutStats.blockBytes += src.blockBytes;
-+ inoutStats.allocationBytes += src.allocationBytes;
-+}
-+
-+static void VmaClearDetailedStatistics(VmaDetailedStatistics& outStats)
-+{
-+ VmaClearStatistics(outStats.statistics);
-+ outStats.unusedRangeCount = 0;
-+ outStats.allocationSizeMin = VK_WHOLE_SIZE;
-+ outStats.allocationSizeMax = 0;
-+ outStats.unusedRangeSizeMin = VK_WHOLE_SIZE;
-+ outStats.unusedRangeSizeMax = 0;
-+}
-+
-+static void VmaAddDetailedStatisticsAllocation(VmaDetailedStatistics& inoutStats, VkDeviceSize size)
-+{
-+ inoutStats.statistics.allocationCount++;
-+ inoutStats.statistics.allocationBytes += size;
-+ inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, size);
-+ inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, size);
-+}
-+
-+static void VmaAddDetailedStatisticsUnusedRange(VmaDetailedStatistics& inoutStats, VkDeviceSize size)
-+{
-+ inoutStats.unusedRangeCount++;
-+ inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, size);
-+ inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, size);
-+}
-+
-+static void VmaAddDetailedStatistics(VmaDetailedStatistics& inoutStats, const VmaDetailedStatistics& src)
-+{
-+ VmaAddStatistics(inoutStats.statistics, src.statistics);
-+ inoutStats.unusedRangeCount += src.unusedRangeCount;
-+ inoutStats.allocationSizeMin = VMA_MIN(inoutStats.allocationSizeMin, src.allocationSizeMin);
-+ inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, src.allocationSizeMax);
-+ inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, src.unusedRangeSizeMin);
-+ inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, src.unusedRangeSizeMax);
-+}
-+
-+#endif // _VMA_STATISTICS_FUNCTIONS
-+
-+#ifndef _VMA_MUTEX_LOCK
-+// Helper RAII class to lock a mutex in constructor and unlock it in destructor (at the end of scope).
-+struct VmaMutexLock
-+{
-+ VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLock)
-+public:
-+ VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) :
-+ m_pMutex(useMutex ? &mutex : VMA_NULL)
-+ {
-+ if (m_pMutex) { m_pMutex->Lock(); }
-+ }
-+ ~VmaMutexLock() { if (m_pMutex) { m_pMutex->Unlock(); } }
-+
-+private:
-+ VMA_MUTEX* m_pMutex;
-+};
-+
-+// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading.
-+struct VmaMutexLockRead
-+{
-+ VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLockRead)
-+public:
-+ VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) :
-+ m_pMutex(useMutex ? &mutex : VMA_NULL)
-+ {
-+ if (m_pMutex) { m_pMutex->LockRead(); }
-+ }
-+ ~VmaMutexLockRead() { if (m_pMutex) { m_pMutex->UnlockRead(); } }
-+
-+private:
-+ VMA_RW_MUTEX* m_pMutex;
-+};
-+
-+// Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing.
-+struct VmaMutexLockWrite
-+{
-+ VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLockWrite)
-+public:
-+ VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex)
-+ : m_pMutex(useMutex ? &mutex : VMA_NULL)
-+ {
-+ if (m_pMutex) { m_pMutex->LockWrite(); }
-+ }
-+ ~VmaMutexLockWrite() { if (m_pMutex) { m_pMutex->UnlockWrite(); } }
-+
-+private:
-+ VMA_RW_MUTEX* m_pMutex;
-+};
-+
-+#if VMA_DEBUG_GLOBAL_MUTEX
-+ static VMA_MUTEX gDebugGlobalMutex;
-+ #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true);
-+#else
-+ #define VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+#endif
-+#endif // _VMA_MUTEX_LOCK
-+
-+#ifndef _VMA_ATOMIC_TRANSACTIONAL_INCREMENT
-+// An object that increments given atomic but decrements it back in the destructor unless Commit() is called.
-+template<typename AtomicT>
-+struct AtomicTransactionalIncrement
-+{
-+public:
-+ using T = decltype(AtomicT().load());
-+
-+ ~AtomicTransactionalIncrement()
-+ {
-+ if(m_Atomic)
-+ --(*m_Atomic);
-+ }
-+
-+ void Commit() { m_Atomic = nullptr; }
-+ T Increment(AtomicT* atomic)
-+ {
-+ m_Atomic = atomic;
-+ return m_Atomic->fetch_add(1);
-+ }
-+
-+private:
-+ AtomicT* m_Atomic = nullptr;
-+};
-+#endif // _VMA_ATOMIC_TRANSACTIONAL_INCREMENT
-+
-+#ifndef _VMA_STL_ALLOCATOR
-+// STL-compatible allocator.
-+template<typename T>
-+struct VmaStlAllocator
-+{
-+ const VkAllocationCallbacks* const m_pCallbacks;
-+ typedef T value_type;
-+
-+ VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) {}
-+ template<typename U>
-+ VmaStlAllocator(const VmaStlAllocator<U>& src) : m_pCallbacks(src.m_pCallbacks) {}
-+ VmaStlAllocator(const VmaStlAllocator&) = default;
-+ VmaStlAllocator& operator=(const VmaStlAllocator&) = delete;
-+
-+ T* allocate(size_t n) { return VmaAllocateArray<T>(m_pCallbacks, n); }
-+ void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); }
-+
-+ template<typename U>
-+ bool operator==(const VmaStlAllocator<U>& rhs) const
-+ {
-+ return m_pCallbacks == rhs.m_pCallbacks;
-+ }
-+ template<typename U>
-+ bool operator!=(const VmaStlAllocator<U>& rhs) const
-+ {
-+ return m_pCallbacks != rhs.m_pCallbacks;
-+ }
-+};
-+#endif // _VMA_STL_ALLOCATOR
-+
-+#ifndef _VMA_VECTOR
-+/* Class with interface compatible with subset of std::vector.
-+T must be POD because constructors and destructors are not called and memcpy is
-+used for these objects. */
-+template<typename T, typename AllocatorT>
-+class VmaVector
-+{
-+public:
-+ typedef T value_type;
-+ typedef T* iterator;
-+ typedef const T* const_iterator;
-+
-+ VmaVector(const AllocatorT& allocator);
-+ VmaVector(size_t count, const AllocatorT& allocator);
-+ // This version of the constructor is here for compatibility with pre-C++14 std::vector.
-+ // value is unused.
-+ VmaVector(size_t count, const T& value, const AllocatorT& allocator) : VmaVector(count, allocator) {}
-+ VmaVector(const VmaVector<T, AllocatorT>& src);
-+ VmaVector& operator=(const VmaVector& rhs);
-+ ~VmaVector() { VmaFree(m_Allocator.m_pCallbacks, m_pArray); }
-+
-+ bool empty() const { return m_Count == 0; }
-+ size_t size() const { return m_Count; }
-+ T* data() { return m_pArray; }
-+ T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; }
-+ T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; }
-+ const T* data() const { return m_pArray; }
-+ const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; }
-+ const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; }
-+
-+ iterator begin() { return m_pArray; }
-+ iterator end() { return m_pArray + m_Count; }
-+ const_iterator cbegin() const { return m_pArray; }
-+ const_iterator cend() const { return m_pArray + m_Count; }
-+ const_iterator begin() const { return cbegin(); }
-+ const_iterator end() const { return cend(); }
-+
-+ void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); }
-+ void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); }
-+ void push_front(const T& src) { insert(0, src); }
-+
-+ void push_back(const T& src);
-+ void reserve(size_t newCapacity, bool freeMemory = false);
-+ void resize(size_t newCount);
-+ void clear() { resize(0); }
-+ void shrink_to_fit();
-+ void insert(size_t index, const T& src);
-+ void remove(size_t index);
-+
-+ T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; }
-+ const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; }
-+
-+private:
-+ AllocatorT m_Allocator;
-+ T* m_pArray;
-+ size_t m_Count;
-+ size_t m_Capacity;
-+};
-+
-+#ifndef _VMA_VECTOR_FUNCTIONS
-+template<typename T, typename AllocatorT>
-+VmaVector<T, AllocatorT>::VmaVector(const AllocatorT& allocator)
-+ : m_Allocator(allocator),
-+ m_pArray(VMA_NULL),
-+ m_Count(0),
-+ m_Capacity(0) {}
-+
-+template<typename T, typename AllocatorT>
-+VmaVector<T, AllocatorT>::VmaVector(size_t count, const AllocatorT& allocator)
-+ : m_Allocator(allocator),
-+ m_pArray(count ? (T*)VmaAllocateArray<T>(allocator.m_pCallbacks, count) : VMA_NULL),
-+ m_Count(count),
-+ m_Capacity(count) {}
-+
-+template<typename T, typename AllocatorT>
-+VmaVector<T, AllocatorT>::VmaVector(const VmaVector& src)
-+ : m_Allocator(src.m_Allocator),
-+ m_pArray(src.m_Count ? (T*)VmaAllocateArray<T>(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL),
-+ m_Count(src.m_Count),
-+ m_Capacity(src.m_Count)
-+{
-+ if (m_Count != 0)
-+ {
-+ memcpy(m_pArray, src.m_pArray, m_Count * sizeof(T));
-+ }
-+}
-+
-+template<typename T, typename AllocatorT>
-+VmaVector<T, AllocatorT>& VmaVector<T, AllocatorT>::operator=(const VmaVector& rhs)
-+{
-+ if (&rhs != this)
-+ {
-+ resize(rhs.m_Count);
-+ if (m_Count != 0)
-+ {
-+ memcpy(m_pArray, rhs.m_pArray, m_Count * sizeof(T));
-+ }
-+ }
-+ return *this;
-+}
-+
-+template<typename T, typename AllocatorT>
-+void VmaVector<T, AllocatorT>::push_back(const T& src)
-+{
-+ const size_t newIndex = size();
-+ resize(newIndex + 1);
-+ m_pArray[newIndex] = src;
-+}
-+
-+template<typename T, typename AllocatorT>
-+void VmaVector<T, AllocatorT>::reserve(size_t newCapacity, bool freeMemory)
-+{
-+ newCapacity = VMA_MAX(newCapacity, m_Count);
-+
-+ if ((newCapacity < m_Capacity) && !freeMemory)
-+ {
-+ newCapacity = m_Capacity;
-+ }
-+
-+ if (newCapacity != m_Capacity)
-+ {
-+ T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator, newCapacity) : VMA_NULL;
-+ if (m_Count != 0)
-+ {
-+ memcpy(newArray, m_pArray, m_Count * sizeof(T));
-+ }
-+ VmaFree(m_Allocator.m_pCallbacks, m_pArray);
-+ m_Capacity = newCapacity;
-+ m_pArray = newArray;
-+ }
-+}
-+
-+template<typename T, typename AllocatorT>
-+void VmaVector<T, AllocatorT>::resize(size_t newCount)
-+{
-+ size_t newCapacity = m_Capacity;
-+ if (newCount > m_Capacity)
-+ {
-+ newCapacity = VMA_MAX(newCount, VMA_MAX(m_Capacity * 3 / 2, (size_t)8));
-+ }
-+
-+ if (newCapacity != m_Capacity)
-+ {
-+ T* const newArray = newCapacity ? VmaAllocateArray<T>(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL;
-+ const size_t elementsToCopy = VMA_MIN(m_Count, newCount);
-+ if (elementsToCopy != 0)
-+ {
-+ memcpy(newArray, m_pArray, elementsToCopy * sizeof(T));
-+ }
-+ VmaFree(m_Allocator.m_pCallbacks, m_pArray);
-+ m_Capacity = newCapacity;
-+ m_pArray = newArray;
-+ }
-+
-+ m_Count = newCount;
-+}
-+
-+template<typename T, typename AllocatorT>
-+void VmaVector<T, AllocatorT>::shrink_to_fit()
-+{
-+ if (m_Capacity > m_Count)
-+ {
-+ T* newArray = VMA_NULL;
-+ if (m_Count > 0)
-+ {
-+ newArray = VmaAllocateArray<T>(m_Allocator.m_pCallbacks, m_Count);
-+ memcpy(newArray, m_pArray, m_Count * sizeof(T));
-+ }
-+ VmaFree(m_Allocator.m_pCallbacks, m_pArray);
-+ m_Capacity = m_Count;
-+ m_pArray = newArray;
-+ }
-+}
-+
-+template<typename T, typename AllocatorT>
-+void VmaVector<T, AllocatorT>::insert(size_t index, const T& src)
-+{
-+ VMA_HEAVY_ASSERT(index <= m_Count);
-+ const size_t oldCount = size();
-+ resize(oldCount + 1);
-+ if (index < oldCount)
-+ {
-+ memmove(m_pArray + (index + 1), m_pArray + index, (oldCount - index) * sizeof(T));
-+ }
-+ m_pArray[index] = src;
-+}
-+
-+template<typename T, typename AllocatorT>
-+void VmaVector<T, AllocatorT>::remove(size_t index)
-+{
-+ VMA_HEAVY_ASSERT(index < m_Count);
-+ const size_t oldCount = size();
-+ if (index < oldCount - 1)
-+ {
-+ memmove(m_pArray + index, m_pArray + (index + 1), (oldCount - index - 1) * sizeof(T));
-+ }
-+ resize(oldCount - 1);
-+}
-+#endif // _VMA_VECTOR_FUNCTIONS
-+
-+template<typename T, typename allocatorT>
-+static void VmaVectorInsert(VmaVector<T, allocatorT>& vec, size_t index, const T& item)
-+{
-+ vec.insert(index, item);
-+}
-+
-+template<typename T, typename allocatorT>
-+static void VmaVectorRemove(VmaVector<T, allocatorT>& vec, size_t index)
-+{
-+ vec.remove(index);
-+}
-+#endif // _VMA_VECTOR
-+
-+#ifndef _VMA_SMALL_VECTOR
-+/*
-+This is a vector (a variable-sized array), optimized for the case when the array is small.
-+
-+It contains some number of elements in-place, which allows it to avoid heap allocation
-+when the actual number of elements is below that threshold. This allows normal "small"
-+cases to be fast without losing generality for large inputs.
-+*/
-+template<typename T, typename AllocatorT, size_t N>
-+class VmaSmallVector
-+{
-+public:
-+ typedef T value_type;
-+ typedef T* iterator;
-+
-+ VmaSmallVector(const AllocatorT& allocator);
-+ VmaSmallVector(size_t count, const AllocatorT& allocator);
-+ template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
-+ VmaSmallVector(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>&) = delete;
-+ template<typename SrcT, typename SrcAllocatorT, size_t SrcN>
-+ VmaSmallVector<T, AllocatorT, N>& operator=(const VmaSmallVector<SrcT, SrcAllocatorT, SrcN>&) = delete;
-+ ~VmaSmallVector() = default;
-+
-+ bool empty() const { return m_Count == 0; }
-+ size_t size() const { return m_Count; }
-+ T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
-+ T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; }
-+ T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; }
-+ const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; }
-+ const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; }
-+ const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; }
-+
-+ iterator begin() { return data(); }
-+ iterator end() { return data() + m_Count; }
-+
-+ void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); }
-+ void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); }
-+ void push_front(const T& src) { insert(0, src); }
-+
-+ void push_back(const T& src);
-+ void resize(size_t newCount, bool freeMemory = false);
-+ void clear(bool freeMemory = false);
-+ void insert(size_t index, const T& src);
-+ void remove(size_t index);
-+
-+ T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; }
-+ const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; }
-+
-+private:
-+ size_t m_Count;
-+ T m_StaticArray[N]; // Used when m_Size <= N
-+ VmaVector<T, AllocatorT> m_DynamicArray; // Used when m_Size > N
-+};
-+
-+#ifndef _VMA_SMALL_VECTOR_FUNCTIONS
-+template<typename T, typename AllocatorT, size_t N>
-+VmaSmallVector<T, AllocatorT, N>::VmaSmallVector(const AllocatorT& allocator)
-+ : m_Count(0),
-+ m_DynamicArray(allocator) {}
-+
-+template<typename T, typename AllocatorT, size_t N>
-+VmaSmallVector<T, AllocatorT, N>::VmaSmallVector(size_t count, const AllocatorT& allocator)
-+ : m_Count(count),
-+ m_DynamicArray(count > N ? count : 0, allocator) {}
-+
-+template<typename T, typename AllocatorT, size_t N>
-+void VmaSmallVector<T, AllocatorT, N>::push_back(const T& src)
-+{
-+ const size_t newIndex = size();
-+ resize(newIndex + 1);
-+ data()[newIndex] = src;
-+}
-+
-+template<typename T, typename AllocatorT, size_t N>
-+void VmaSmallVector<T, AllocatorT, N>::resize(size_t newCount, bool freeMemory)
-+{
-+ if (newCount > N && m_Count > N)
-+ {
-+ // Any direction, staying in m_DynamicArray
-+ m_DynamicArray.resize(newCount);
-+ if (freeMemory)
-+ {
-+ m_DynamicArray.shrink_to_fit();
-+ }
-+ }
-+ else if (newCount > N && m_Count <= N)
-+ {
-+ // Growing, moving from m_StaticArray to m_DynamicArray
-+ m_DynamicArray.resize(newCount);
-+ if (m_Count > 0)
-+ {
-+ memcpy(m_DynamicArray.data(), m_StaticArray, m_Count * sizeof(T));
-+ }
-+ }
-+ else if (newCount <= N && m_Count > N)
-+ {
-+ // Shrinking, moving from m_DynamicArray to m_StaticArray
-+ if (newCount > 0)
-+ {
-+ memcpy(m_StaticArray, m_DynamicArray.data(), newCount * sizeof(T));
-+ }
-+ m_DynamicArray.resize(0);
-+ if (freeMemory)
-+ {
-+ m_DynamicArray.shrink_to_fit();
-+ }
-+ }
-+ else
-+ {
-+ // Any direction, staying in m_StaticArray - nothing to do here
-+ }
-+ m_Count = newCount;
-+}
-+
-+template<typename T, typename AllocatorT, size_t N>
-+void VmaSmallVector<T, AllocatorT, N>::clear(bool freeMemory)
-+{
-+ m_DynamicArray.clear();
-+ if (freeMemory)
-+ {
-+ m_DynamicArray.shrink_to_fit();
-+ }
-+ m_Count = 0;
-+}
-+
-+template<typename T, typename AllocatorT, size_t N>
-+void VmaSmallVector<T, AllocatorT, N>::insert(size_t index, const T& src)
-+{
-+ VMA_HEAVY_ASSERT(index <= m_Count);
-+ const size_t oldCount = size();
-+ resize(oldCount + 1);
-+ T* const dataPtr = data();
-+ if (index < oldCount)
-+ {
-+ // I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray.
-+ memmove(dataPtr + (index + 1), dataPtr + index, (oldCount - index) * sizeof(T));
-+ }
-+ dataPtr[index] = src;
-+}
-+
-+template<typename T, typename AllocatorT, size_t N>
-+void VmaSmallVector<T, AllocatorT, N>::remove(size_t index)
-+{
-+ VMA_HEAVY_ASSERT(index < m_Count);
-+ const size_t oldCount = size();
-+ if (index < oldCount - 1)
-+ {
-+ // I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray.
-+ T* const dataPtr = data();
-+ memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T));
-+ }
-+ resize(oldCount - 1);
-+}
-+#endif // _VMA_SMALL_VECTOR_FUNCTIONS
-+#endif // _VMA_SMALL_VECTOR
-+
-+#ifndef _VMA_POOL_ALLOCATOR
-+/*
-+Allocator for objects of type T using a list of arrays (pools) to speed up
-+allocation. Number of elements that can be allocated is not bounded because
-+allocator can create multiple blocks.
-+*/
-+template<typename T>
-+class VmaPoolAllocator
-+{
-+ VMA_CLASS_NO_COPY_NO_MOVE(VmaPoolAllocator)
-+public:
-+ VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity);
-+ ~VmaPoolAllocator();
-+ template<typename... Types> T* Alloc(Types&&... args);
-+ void Free(T* ptr);
-+
-+private:
-+ union Item
-+ {
-+ uint32_t NextFreeIndex;
-+ alignas(T) char Value[sizeof(T)];
-+ };
-+ struct ItemBlock
-+ {
-+ Item* pItems;
-+ uint32_t Capacity;
-+ uint32_t FirstFreeIndex;
-+ };
-+
-+ const VkAllocationCallbacks* m_pAllocationCallbacks;
-+ const uint32_t m_FirstBlockCapacity;
-+ VmaVector<ItemBlock, VmaStlAllocator<ItemBlock>> m_ItemBlocks;
-+
-+ ItemBlock& CreateNewBlock();
-+};
-+
-+#ifndef _VMA_POOL_ALLOCATOR_FUNCTIONS
-+template<typename T>
-+VmaPoolAllocator<T>::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity)
-+ : m_pAllocationCallbacks(pAllocationCallbacks),
-+ m_FirstBlockCapacity(firstBlockCapacity),
-+ m_ItemBlocks(VmaStlAllocator<ItemBlock>(pAllocationCallbacks))
-+{
-+ VMA_ASSERT(m_FirstBlockCapacity > 1);
-+}
-+
-+template<typename T>
-+VmaPoolAllocator<T>::~VmaPoolAllocator()
-+{
-+ for (size_t i = m_ItemBlocks.size(); i--;)
-+ vma_delete_array(m_pAllocationCallbacks, m_ItemBlocks[i].pItems, m_ItemBlocks[i].Capacity);
-+ m_ItemBlocks.clear();
-+}
-+
-+template<typename T>
-+template<typename... Types> T* VmaPoolAllocator<T>::Alloc(Types&&... args)
-+{
-+ for (size_t i = m_ItemBlocks.size(); i--; )
-+ {
-+ ItemBlock& block = m_ItemBlocks[i];
-+ // This block has some free items: Use first one.
-+ if (block.FirstFreeIndex != UINT32_MAX)
-+ {
-+ Item* const pItem = &block.pItems[block.FirstFreeIndex];
-+ block.FirstFreeIndex = pItem->NextFreeIndex;
-+ T* result = (T*)&pItem->Value;
-+ new(result)T(std::forward<Types>(args)...); // Explicit constructor call.
-+ return result;
-+ }
-+ }
-+
-+ // No block has free item: Create new one and use it.
-+ ItemBlock& newBlock = CreateNewBlock();
-+ Item* const pItem = &newBlock.pItems[0];
-+ newBlock.FirstFreeIndex = pItem->NextFreeIndex;
-+ T* result = (T*)&pItem->Value;
-+ new(result) T(std::forward<Types>(args)...); // Explicit constructor call.
-+ return result;
-+}
-+
-+template<typename T>
-+void VmaPoolAllocator<T>::Free(T* ptr)
-+{
-+ // Search all memory blocks to find ptr.
-+ for (size_t i = m_ItemBlocks.size(); i--; )
-+ {
-+ ItemBlock& block = m_ItemBlocks[i];
-+
-+ // Casting to union.
-+ Item* pItemPtr;
-+ memcpy(&pItemPtr, &ptr, sizeof(pItemPtr));
-+
-+ // Check if pItemPtr is in address range of this block.
-+ if ((pItemPtr >= block.pItems) && (pItemPtr < block.pItems + block.Capacity))
-+ {
-+ ptr->~T(); // Explicit destructor call.
-+ const uint32_t index = static_cast<uint32_t>(pItemPtr - block.pItems);
-+ pItemPtr->NextFreeIndex = block.FirstFreeIndex;
-+ block.FirstFreeIndex = index;
-+ return;
-+ }
-+ }
-+ VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool.");
-+}
-+
-+template<typename T>
-+typename VmaPoolAllocator<T>::ItemBlock& VmaPoolAllocator<T>::CreateNewBlock()
-+{
-+ const uint32_t newBlockCapacity = m_ItemBlocks.empty() ?
-+ m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2;
-+
-+ const ItemBlock newBlock =
-+ {
-+ vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity),
-+ newBlockCapacity,
-+ 0
-+ };
-+
-+ m_ItemBlocks.push_back(newBlock);
-+
-+ // Setup singly-linked list of all free items in this block.
-+ for (uint32_t i = 0; i < newBlockCapacity - 1; ++i)
-+ newBlock.pItems[i].NextFreeIndex = i + 1;
-+ newBlock.pItems[newBlockCapacity - 1].NextFreeIndex = UINT32_MAX;
-+ return m_ItemBlocks.back();
-+}
-+#endif // _VMA_POOL_ALLOCATOR_FUNCTIONS
-+#endif // _VMA_POOL_ALLOCATOR
-+
-+#ifndef _VMA_RAW_LIST
-+template<typename T>
-+struct VmaListItem
-+{
-+ VmaListItem* pPrev;
-+ VmaListItem* pNext;
-+ T Value;
-+};
-+
-+// Doubly linked list.
-+template<typename T>
-+class VmaRawList
-+{
-+ VMA_CLASS_NO_COPY_NO_MOVE(VmaRawList)
-+public:
-+ typedef VmaListItem<T> ItemType;
-+
-+ VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks);
-+ // Intentionally not calling Clear, because that would be unnecessary
-+ // computations to return all items to m_ItemAllocator as free.
-+ ~VmaRawList() = default;
-+
-+ size_t GetCount() const { return m_Count; }
-+ bool IsEmpty() const { return m_Count == 0; }
-+
-+ ItemType* Front() { return m_pFront; }
-+ ItemType* Back() { return m_pBack; }
-+ const ItemType* Front() const { return m_pFront; }
-+ const ItemType* Back() const { return m_pBack; }
-+
-+ ItemType* PushFront();
-+ ItemType* PushBack();
-+ ItemType* PushFront(const T& value);
-+ ItemType* PushBack(const T& value);
-+ void PopFront();
-+ void PopBack();
-+
-+ // Item can be null - it means PushBack.
-+ ItemType* InsertBefore(ItemType* pItem);
-+ // Item can be null - it means PushFront.
-+ ItemType* InsertAfter(ItemType* pItem);
-+ ItemType* InsertBefore(ItemType* pItem, const T& value);
-+ ItemType* InsertAfter(ItemType* pItem, const T& value);
-+
-+ void Clear();
-+ void Remove(ItemType* pItem);
-+
-+private:
-+ const VkAllocationCallbacks* const m_pAllocationCallbacks;
-+ VmaPoolAllocator<ItemType> m_ItemAllocator;
-+ ItemType* m_pFront;
-+ ItemType* m_pBack;
-+ size_t m_Count;
-+};
-+
-+#ifndef _VMA_RAW_LIST_FUNCTIONS
-+template<typename T>
-+VmaRawList<T>::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks)
-+ : m_pAllocationCallbacks(pAllocationCallbacks),
-+ m_ItemAllocator(pAllocationCallbacks, 128),
-+ m_pFront(VMA_NULL),
-+ m_pBack(VMA_NULL),
-+ m_Count(0) {}
-+
-+template<typename T>
-+VmaListItem<T>* VmaRawList<T>::PushFront()
-+{
-+ ItemType* const pNewItem = m_ItemAllocator.Alloc();
-+ pNewItem->pPrev = VMA_NULL;
-+ if (IsEmpty())
-+ {
-+ pNewItem->pNext = VMA_NULL;
-+ m_pFront = pNewItem;
-+ m_pBack = pNewItem;
-+ m_Count = 1;
-+ }
-+ else
-+ {
-+ pNewItem->pNext = m_pFront;
-+ m_pFront->pPrev = pNewItem;
-+ m_pFront = pNewItem;
-+ ++m_Count;
-+ }
-+ return pNewItem;
-+}
-+
-+template<typename T>
-+VmaListItem<T>* VmaRawList<T>::PushBack()
-+{
-+ ItemType* const pNewItem = m_ItemAllocator.Alloc();
-+ pNewItem->pNext = VMA_NULL;
-+ if(IsEmpty())
-+ {
-+ pNewItem->pPrev = VMA_NULL;
-+ m_pFront = pNewItem;
-+ m_pBack = pNewItem;
-+ m_Count = 1;
-+ }
-+ else
-+ {
-+ pNewItem->pPrev = m_pBack;
-+ m_pBack->pNext = pNewItem;
-+ m_pBack = pNewItem;
-+ ++m_Count;
-+ }
-+ return pNewItem;
-+}
-+
-+template<typename T>
-+VmaListItem<T>* VmaRawList<T>::PushFront(const T& value)
-+{
-+ ItemType* const pNewItem = PushFront();
-+ pNewItem->Value = value;
-+ return pNewItem;
-+}
-+
-+template<typename T>
-+VmaListItem<T>* VmaRawList<T>::PushBack(const T& value)
-+{
-+ ItemType* const pNewItem = PushBack();
-+ pNewItem->Value = value;
-+ return pNewItem;
-+}
-+
-+template<typename T>
-+void VmaRawList<T>::PopFront()
-+{
-+ VMA_HEAVY_ASSERT(m_Count > 0);
-+ ItemType* const pFrontItem = m_pFront;
-+ ItemType* const pNextItem = pFrontItem->pNext;
-+ if (pNextItem != VMA_NULL)
-+ {
-+ pNextItem->pPrev = VMA_NULL;
-+ }
-+ m_pFront = pNextItem;
-+ m_ItemAllocator.Free(pFrontItem);
-+ --m_Count;
-+}
-+
-+template<typename T>
-+void VmaRawList<T>::PopBack()
-+{
-+ VMA_HEAVY_ASSERT(m_Count > 0);
-+ ItemType* const pBackItem = m_pBack;
-+ ItemType* const pPrevItem = pBackItem->pPrev;
-+ if(pPrevItem != VMA_NULL)
-+ {
-+ pPrevItem->pNext = VMA_NULL;
-+ }
-+ m_pBack = pPrevItem;
-+ m_ItemAllocator.Free(pBackItem);
-+ --m_Count;
-+}
-+
-+template<typename T>
-+void VmaRawList<T>::Clear()
-+{
-+ if (IsEmpty() == false)
-+ {
-+ ItemType* pItem = m_pBack;
-+ while (pItem != VMA_NULL)
-+ {
-+ ItemType* const pPrevItem = pItem->pPrev;
-+ m_ItemAllocator.Free(pItem);
-+ pItem = pPrevItem;
-+ }
-+ m_pFront = VMA_NULL;
-+ m_pBack = VMA_NULL;
-+ m_Count = 0;
-+ }
-+}
-+
-+template<typename T>
-+void VmaRawList<T>::Remove(ItemType* pItem)
-+{
-+ VMA_HEAVY_ASSERT(pItem != VMA_NULL);
-+ VMA_HEAVY_ASSERT(m_Count > 0);
-+
-+ if(pItem->pPrev != VMA_NULL)
-+ {
-+ pItem->pPrev->pNext = pItem->pNext;
-+ }
-+ else
-+ {
-+ VMA_HEAVY_ASSERT(m_pFront == pItem);
-+ m_pFront = pItem->pNext;
-+ }
-+
-+ if(pItem->pNext != VMA_NULL)
-+ {
-+ pItem->pNext->pPrev = pItem->pPrev;
-+ }
-+ else
-+ {
-+ VMA_HEAVY_ASSERT(m_pBack == pItem);
-+ m_pBack = pItem->pPrev;
-+ }
-+
-+ m_ItemAllocator.Free(pItem);
-+ --m_Count;
-+}
-+
-+template<typename T>
-+VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem)
-+{
-+ if(pItem != VMA_NULL)
-+ {
-+ ItemType* const prevItem = pItem->pPrev;
-+ ItemType* const newItem = m_ItemAllocator.Alloc();
-+ newItem->pPrev = prevItem;
-+ newItem->pNext = pItem;
-+ pItem->pPrev = newItem;
-+ if(prevItem != VMA_NULL)
-+ {
-+ prevItem->pNext = newItem;
-+ }
-+ else
-+ {
-+ VMA_HEAVY_ASSERT(m_pFront == pItem);
-+ m_pFront = newItem;
-+ }
-+ ++m_Count;
-+ return newItem;
-+ }
-+ else
-+ return PushBack();
-+}
-+
-+template<typename T>
-+VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem)
-+{
-+ if(pItem != VMA_NULL)
-+ {
-+ ItemType* const nextItem = pItem->pNext;
-+ ItemType* const newItem = m_ItemAllocator.Alloc();
-+ newItem->pNext = nextItem;
-+ newItem->pPrev = pItem;
-+ pItem->pNext = newItem;
-+ if(nextItem != VMA_NULL)
-+ {
-+ nextItem->pPrev = newItem;
-+ }
-+ else
-+ {
-+ VMA_HEAVY_ASSERT(m_pBack == pItem);
-+ m_pBack = newItem;
-+ }
-+ ++m_Count;
-+ return newItem;
-+ }
-+ else
-+ return PushFront();
-+}
-+
-+template<typename T>
-+VmaListItem<T>* VmaRawList<T>::InsertBefore(ItemType* pItem, const T& value)
-+{
-+ ItemType* const newItem = InsertBefore(pItem);
-+ newItem->Value = value;
-+ return newItem;
-+}
-+
-+template<typename T>
-+VmaListItem<T>* VmaRawList<T>::InsertAfter(ItemType* pItem, const T& value)
-+{
-+ ItemType* const newItem = InsertAfter(pItem);
-+ newItem->Value = value;
-+ return newItem;
-+}
-+#endif // _VMA_RAW_LIST_FUNCTIONS
-+#endif // _VMA_RAW_LIST
-+
-+#ifndef _VMA_LIST
-+template<typename T, typename AllocatorT>
-+class VmaList
-+{
-+ VMA_CLASS_NO_COPY_NO_MOVE(VmaList)
-+public:
-+ class reverse_iterator;
-+ class const_iterator;
-+ class const_reverse_iterator;
-+
-+ class iterator
-+ {
-+ friend class const_iterator;
-+ friend class VmaList<T, AllocatorT>;
-+ public:
-+ iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
-+ iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
-+
-+ T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
-+ T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
-+
-+ bool operator==(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
-+ bool operator!=(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
-+
-+ iterator operator++(int) { iterator result = *this; ++*this; return result; }
-+ iterator operator--(int) { iterator result = *this; --*this; return result; }
-+
-+ iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; }
-+ iterator& operator--();
-+
-+ private:
-+ VmaRawList<T>* m_pList;
-+ VmaListItem<T>* m_pItem;
-+
-+ iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}
-+ };
-+ class reverse_iterator
-+ {
-+ friend class const_reverse_iterator;
-+ friend class VmaList<T, AllocatorT>;
-+ public:
-+ reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
-+ reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
-+
-+ T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
-+ T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
-+
-+ bool operator==(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
-+ bool operator!=(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
-+
-+ reverse_iterator operator++(int) { reverse_iterator result = *this; ++* this; return result; }
-+ reverse_iterator operator--(int) { reverse_iterator result = *this; --* this; return result; }
-+
-+ reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; }
-+ reverse_iterator& operator--();
-+
-+ private:
-+ VmaRawList<T>* m_pList;
-+ VmaListItem<T>* m_pItem;
-+
-+ reverse_iterator(VmaRawList<T>* pList, VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}
-+ };
-+ class const_iterator
-+ {
-+ friend class VmaList<T, AllocatorT>;
-+ public:
-+ const_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
-+ const_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
-+ const_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
-+
-+ iterator drop_const() { return { const_cast<VmaRawList<T>*>(m_pList), const_cast<VmaListItem<T>*>(m_pItem) }; }
-+
-+ const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
-+ const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
-+
-+ bool operator==(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
-+ bool operator!=(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
-+
-+ const_iterator operator++(int) { const_iterator result = *this; ++* this; return result; }
-+ const_iterator operator--(int) { const_iterator result = *this; --* this; return result; }
-+
-+ const_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; }
-+ const_iterator& operator--();
-+
-+ private:
-+ const VmaRawList<T>* m_pList;
-+ const VmaListItem<T>* m_pItem;
-+
-+ const_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}
-+ };
-+ class const_reverse_iterator
-+ {
-+ friend class VmaList<T, AllocatorT>;
-+ public:
-+ const_reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {}
-+ const_reverse_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
-+ const_reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {}
-+
-+ reverse_iterator drop_const() { return { const_cast<VmaRawList<T>*>(m_pList), const_cast<VmaListItem<T>*>(m_pItem) }; }
-+
-+ const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; }
-+ const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; }
-+
-+ bool operator==(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; }
-+ bool operator!=(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; }
-+
-+ const_reverse_iterator operator++(int) { const_reverse_iterator result = *this; ++* this; return result; }
-+ const_reverse_iterator operator--(int) { const_reverse_iterator result = *this; --* this; return result; }
-+
-+ const_reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; }
-+ const_reverse_iterator& operator--();
-+
-+ private:
-+ const VmaRawList<T>* m_pList;
-+ const VmaListItem<T>* m_pItem;
-+
-+ const_reverse_iterator(const VmaRawList<T>* pList, const VmaListItem<T>* pItem) : m_pList(pList), m_pItem(pItem) {}
-+ };
-+
-+ VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) {}
-+
-+ bool empty() const { return m_RawList.IsEmpty(); }
-+ size_t size() const { return m_RawList.GetCount(); }
-+
-+ iterator begin() { return iterator(&m_RawList, m_RawList.Front()); }
-+ iterator end() { return iterator(&m_RawList, VMA_NULL); }
-+
-+ const_iterator cbegin() const { return const_iterator(&m_RawList, m_RawList.Front()); }
-+ const_iterator cend() const { return const_iterator(&m_RawList, VMA_NULL); }
-+
-+ const_iterator begin() const { return cbegin(); }
-+ const_iterator end() const { return cend(); }
-+
-+ reverse_iterator rbegin() { return reverse_iterator(&m_RawList, m_RawList.Back()); }
-+ reverse_iterator rend() { return reverse_iterator(&m_RawList, VMA_NULL); }
-+
-+ const_reverse_iterator crbegin() const { return const_reverse_iterator(&m_RawList, m_RawList.Back()); }
-+ const_reverse_iterator crend() const { return const_reverse_iterator(&m_RawList, VMA_NULL); }
-+
-+ const_reverse_iterator rbegin() const { return crbegin(); }
-+ const_reverse_iterator rend() const { return crend(); }
-+
-+ void push_back(const T& value) { m_RawList.PushBack(value); }
-+ iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); }
-+
-+ void clear() { m_RawList.Clear(); }
-+ void erase(iterator it) { m_RawList.Remove(it.m_pItem); }
-+
-+private:
-+ VmaRawList<T> m_RawList;
-+};
-+
-+#ifndef _VMA_LIST_FUNCTIONS
-+template<typename T, typename AllocatorT>
-+typename VmaList<T, AllocatorT>::iterator& VmaList<T, AllocatorT>::iterator::operator--()
-+{
-+ if (m_pItem != VMA_NULL)
-+ {
-+ m_pItem = m_pItem->pPrev;
-+ }
-+ else
-+ {
-+ VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
-+ m_pItem = m_pList->Back();
-+ }
-+ return *this;
-+}
-+
-+template<typename T, typename AllocatorT>
-+typename VmaList<T, AllocatorT>::reverse_iterator& VmaList<T, AllocatorT>::reverse_iterator::operator--()
-+{
-+ if (m_pItem != VMA_NULL)
-+ {
-+ m_pItem = m_pItem->pNext;
-+ }
-+ else
-+ {
-+ VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
-+ m_pItem = m_pList->Front();
-+ }
-+ return *this;
-+}
-+
-+template<typename T, typename AllocatorT>
-+typename VmaList<T, AllocatorT>::const_iterator& VmaList<T, AllocatorT>::const_iterator::operator--()
-+{
-+ if (m_pItem != VMA_NULL)
-+ {
-+ m_pItem = m_pItem->pPrev;
-+ }
-+ else
-+ {
-+ VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
-+ m_pItem = m_pList->Back();
-+ }
-+ return *this;
-+}
-+
-+template<typename T, typename AllocatorT>
-+typename VmaList<T, AllocatorT>::const_reverse_iterator& VmaList<T, AllocatorT>::const_reverse_iterator::operator--()
-+{
-+ if (m_pItem != VMA_NULL)
-+ {
-+ m_pItem = m_pItem->pNext;
-+ }
-+ else
-+ {
-+ VMA_HEAVY_ASSERT(!m_pList->IsEmpty());
-+ m_pItem = m_pList->Back();
-+ }
-+ return *this;
-+}
-+#endif // _VMA_LIST_FUNCTIONS
-+#endif // _VMA_LIST
-+
-+#ifndef _VMA_INTRUSIVE_LINKED_LIST
-+/*
-+Expected interface of ItemTypeTraits:
-+struct MyItemTypeTraits
-+{
-+ typedef MyItem ItemType;
-+ static ItemType* GetPrev(const ItemType* item) { return item->myPrevPtr; }
-+ static ItemType* GetNext(const ItemType* item) { return item->myNextPtr; }
-+ static ItemType*& AccessPrev(ItemType* item) { return item->myPrevPtr; }
-+ static ItemType*& AccessNext(ItemType* item) { return item->myNextPtr; }
-+};
-+*/
-+template<typename ItemTypeTraits>
-+class VmaIntrusiveLinkedList
-+{
-+public:
-+ typedef typename ItemTypeTraits::ItemType ItemType;
-+ static ItemType* GetPrev(const ItemType* item) { return ItemTypeTraits::GetPrev(item); }
-+ static ItemType* GetNext(const ItemType* item) { return ItemTypeTraits::GetNext(item); }
-+
-+ // Movable, not copyable.
-+ VmaIntrusiveLinkedList() = default;
-+ VmaIntrusiveLinkedList(VmaIntrusiveLinkedList && src);
-+ VmaIntrusiveLinkedList(const VmaIntrusiveLinkedList&) = delete;
-+ VmaIntrusiveLinkedList& operator=(VmaIntrusiveLinkedList&& src);
-+ VmaIntrusiveLinkedList& operator=(const VmaIntrusiveLinkedList&) = delete;
-+ ~VmaIntrusiveLinkedList() { VMA_HEAVY_ASSERT(IsEmpty()); }
-+
-+ size_t GetCount() const { return m_Count; }
-+ bool IsEmpty() const { return m_Count == 0; }
-+ ItemType* Front() { return m_Front; }
-+ ItemType* Back() { return m_Back; }
-+ const ItemType* Front() const { return m_Front; }
-+ const ItemType* Back() const { return m_Back; }
-+
-+ void PushBack(ItemType* item);
-+ void PushFront(ItemType* item);
-+ ItemType* PopBack();
-+ ItemType* PopFront();
-+
-+ // MyItem can be null - it means PushBack.
-+ void InsertBefore(ItemType* existingItem, ItemType* newItem);
-+ // MyItem can be null - it means PushFront.
-+ void InsertAfter(ItemType* existingItem, ItemType* newItem);
-+ void Remove(ItemType* item);
-+ void RemoveAll();
-+
-+private:
-+ ItemType* m_Front = VMA_NULL;
-+ ItemType* m_Back = VMA_NULL;
-+ size_t m_Count = 0;
-+};
-+
-+#ifndef _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS
-+template<typename ItemTypeTraits>
-+VmaIntrusiveLinkedList<ItemTypeTraits>::VmaIntrusiveLinkedList(VmaIntrusiveLinkedList&& src)
-+ : m_Front(src.m_Front), m_Back(src.m_Back), m_Count(src.m_Count)
-+{
-+ src.m_Front = src.m_Back = VMA_NULL;
-+ src.m_Count = 0;
-+}
-+
-+template<typename ItemTypeTraits>
-+VmaIntrusiveLinkedList<ItemTypeTraits>& VmaIntrusiveLinkedList<ItemTypeTraits>::operator=(VmaIntrusiveLinkedList&& src)
-+{
-+ if (&src != this)
-+ {
-+ VMA_HEAVY_ASSERT(IsEmpty());
-+ m_Front = src.m_Front;
-+ m_Back = src.m_Back;
-+ m_Count = src.m_Count;
-+ src.m_Front = src.m_Back = VMA_NULL;
-+ src.m_Count = 0;
-+ }
-+ return *this;
-+}
-+
-+template<typename ItemTypeTraits>
-+void VmaIntrusiveLinkedList<ItemTypeTraits>::PushBack(ItemType* item)
-+{
-+ VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);
-+ if (IsEmpty())
-+ {
-+ m_Front = item;
-+ m_Back = item;
-+ m_Count = 1;
-+ }
-+ else
-+ {
-+ ItemTypeTraits::AccessPrev(item) = m_Back;
-+ ItemTypeTraits::AccessNext(m_Back) = item;
-+ m_Back = item;
-+ ++m_Count;
-+ }
-+}
-+
-+template<typename ItemTypeTraits>
-+void VmaIntrusiveLinkedList<ItemTypeTraits>::PushFront(ItemType* item)
-+{
-+ VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL);
-+ if (IsEmpty())
-+ {
-+ m_Front = item;
-+ m_Back = item;
-+ m_Count = 1;
-+ }
-+ else
-+ {
-+ ItemTypeTraits::AccessNext(item) = m_Front;
-+ ItemTypeTraits::AccessPrev(m_Front) = item;
-+ m_Front = item;
-+ ++m_Count;
-+ }
-+}
-+
-+template<typename ItemTypeTraits>
-+typename VmaIntrusiveLinkedList<ItemTypeTraits>::ItemType* VmaIntrusiveLinkedList<ItemTypeTraits>::PopBack()
-+{
-+ VMA_HEAVY_ASSERT(m_Count > 0);
-+ ItemType* const backItem = m_Back;
-+ ItemType* const prevItem = ItemTypeTraits::GetPrev(backItem);
-+ if (prevItem != VMA_NULL)
-+ {
-+ ItemTypeTraits::AccessNext(prevItem) = VMA_NULL;
-+ }
-+ m_Back = prevItem;
-+ --m_Count;
-+ ItemTypeTraits::AccessPrev(backItem) = VMA_NULL;
-+ ItemTypeTraits::AccessNext(backItem) = VMA_NULL;
-+ return backItem;
-+}
-+
-+template<typename ItemTypeTraits>
-+typename VmaIntrusiveLinkedList<ItemTypeTraits>::ItemType* VmaIntrusiveLinkedList<ItemTypeTraits>::PopFront()
-+{
-+ VMA_HEAVY_ASSERT(m_Count > 0);
-+ ItemType* const frontItem = m_Front;
-+ ItemType* const nextItem = ItemTypeTraits::GetNext(frontItem);
-+ if (nextItem != VMA_NULL)
-+ {
-+ ItemTypeTraits::AccessPrev(nextItem) = VMA_NULL;
-+ }
-+ m_Front = nextItem;
-+ --m_Count;
-+ ItemTypeTraits::AccessPrev(frontItem) = VMA_NULL;
-+ ItemTypeTraits::AccessNext(frontItem) = VMA_NULL;
-+ return frontItem;
-+}
-+
-+template<typename ItemTypeTraits>
-+void VmaIntrusiveLinkedList<ItemTypeTraits>::InsertBefore(ItemType* existingItem, ItemType* newItem)
-+{
-+ VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);
-+ if (existingItem != VMA_NULL)
-+ {
-+ ItemType* const prevItem = ItemTypeTraits::GetPrev(existingItem);
-+ ItemTypeTraits::AccessPrev(newItem) = prevItem;
-+ ItemTypeTraits::AccessNext(newItem) = existingItem;
-+ ItemTypeTraits::AccessPrev(existingItem) = newItem;
-+ if (prevItem != VMA_NULL)
-+ {
-+ ItemTypeTraits::AccessNext(prevItem) = newItem;
-+ }
-+ else
-+ {
-+ VMA_HEAVY_ASSERT(m_Front == existingItem);
-+ m_Front = newItem;
-+ }
-+ ++m_Count;
-+ }
-+ else
-+ PushBack(newItem);
-+}
-+
-+template<typename ItemTypeTraits>
-+void VmaIntrusiveLinkedList<ItemTypeTraits>::InsertAfter(ItemType* existingItem, ItemType* newItem)
-+{
-+ VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL);
-+ if (existingItem != VMA_NULL)
-+ {
-+ ItemType* const nextItem = ItemTypeTraits::GetNext(existingItem);
-+ ItemTypeTraits::AccessNext(newItem) = nextItem;
-+ ItemTypeTraits::AccessPrev(newItem) = existingItem;
-+ ItemTypeTraits::AccessNext(existingItem) = newItem;
-+ if (nextItem != VMA_NULL)
-+ {
-+ ItemTypeTraits::AccessPrev(nextItem) = newItem;
-+ }
-+ else
-+ {
-+ VMA_HEAVY_ASSERT(m_Back == existingItem);
-+ m_Back = newItem;
-+ }
-+ ++m_Count;
-+ }
-+ else
-+ return PushFront(newItem);
-+}
-+
-+template<typename ItemTypeTraits>
-+void VmaIntrusiveLinkedList<ItemTypeTraits>::Remove(ItemType* item)
-+{
-+ VMA_HEAVY_ASSERT(item != VMA_NULL && m_Count > 0);
-+ if (ItemTypeTraits::GetPrev(item) != VMA_NULL)
-+ {
-+ ItemTypeTraits::AccessNext(ItemTypeTraits::AccessPrev(item)) = ItemTypeTraits::GetNext(item);
-+ }
-+ else
-+ {
-+ VMA_HEAVY_ASSERT(m_Front == item);
-+ m_Front = ItemTypeTraits::GetNext(item);
-+ }
-+
-+ if (ItemTypeTraits::GetNext(item) != VMA_NULL)
-+ {
-+ ItemTypeTraits::AccessPrev(ItemTypeTraits::AccessNext(item)) = ItemTypeTraits::GetPrev(item);
-+ }
-+ else
-+ {
-+ VMA_HEAVY_ASSERT(m_Back == item);
-+ m_Back = ItemTypeTraits::GetPrev(item);
-+ }
-+ ItemTypeTraits::AccessPrev(item) = VMA_NULL;
-+ ItemTypeTraits::AccessNext(item) = VMA_NULL;
-+ --m_Count;
-+}
-+
-+template<typename ItemTypeTraits>
-+void VmaIntrusiveLinkedList<ItemTypeTraits>::RemoveAll()
-+{
-+ if (!IsEmpty())
-+ {
-+ ItemType* item = m_Back;
-+ while (item != VMA_NULL)
-+ {
-+ ItemType* const prevItem = ItemTypeTraits::AccessPrev(item);
-+ ItemTypeTraits::AccessPrev(item) = VMA_NULL;
-+ ItemTypeTraits::AccessNext(item) = VMA_NULL;
-+ item = prevItem;
-+ }
-+ m_Front = VMA_NULL;
-+ m_Back = VMA_NULL;
-+ m_Count = 0;
-+ }
-+}
-+#endif // _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS
-+#endif // _VMA_INTRUSIVE_LINKED_LIST
-+
-+// Unused in this version.
-+#if 0
-+
-+#ifndef _VMA_PAIR
-+template<typename T1, typename T2>
-+struct VmaPair
-+{
-+ T1 first;
-+ T2 second;
-+
-+ VmaPair() : first(), second() {}
-+ VmaPair(const T1& firstSrc, const T2& secondSrc) : first(firstSrc), second(secondSrc) {}
-+};
-+
-+template<typename FirstT, typename SecondT>
-+struct VmaPairFirstLess
-+{
-+ bool operator()(const VmaPair<FirstT, SecondT>& lhs, const VmaPair<FirstT, SecondT>& rhs) const
-+ {
-+ return lhs.first < rhs.first;
-+ }
-+ bool operator()(const VmaPair<FirstT, SecondT>& lhs, const FirstT& rhsFirst) const
-+ {
-+ return lhs.first < rhsFirst;
-+ }
-+};
-+#endif // _VMA_PAIR
-+
-+#ifndef _VMA_MAP
-+/* Class compatible with subset of interface of std::unordered_map.
-+KeyT, ValueT must be POD because they will be stored in VmaVector.
-+*/
-+template<typename KeyT, typename ValueT>
-+class VmaMap
-+{
-+public:
-+ typedef VmaPair<KeyT, ValueT> PairType;
-+ typedef PairType* iterator;
-+
-+ VmaMap(const VmaStlAllocator<PairType>& allocator) : m_Vector(allocator) {}
-+
-+ iterator begin() { return m_Vector.begin(); }
-+ iterator end() { return m_Vector.end(); }
-+ size_t size() { return m_Vector.size(); }
-+
-+ void insert(const PairType& pair);
-+ iterator find(const KeyT& key);
-+ void erase(iterator it);
-+
-+private:
-+ VmaVector< PairType, VmaStlAllocator<PairType>> m_Vector;
-+};
-+
-+#ifndef _VMA_MAP_FUNCTIONS
-+template<typename KeyT, typename ValueT>
-+void VmaMap<KeyT, ValueT>::insert(const PairType& pair)
-+{
-+ const size_t indexToInsert = VmaBinaryFindFirstNotLess(
-+ m_Vector.data(),
-+ m_Vector.data() + m_Vector.size(),
-+ pair,
-+ VmaPairFirstLess<KeyT, ValueT>()) - m_Vector.data();
-+ VmaVectorInsert(m_Vector, indexToInsert, pair);
-+}
-+
-+template<typename KeyT, typename ValueT>
-+VmaPair<KeyT, ValueT>* VmaMap<KeyT, ValueT>::find(const KeyT& key)
-+{
-+ PairType* it = VmaBinaryFindFirstNotLess(
-+ m_Vector.data(),
-+ m_Vector.data() + m_Vector.size(),
-+ key,
-+ VmaPairFirstLess<KeyT, ValueT>());
-+ if ((it != m_Vector.end()) && (it->first == key))
-+ {
-+ return it;
-+ }
-+ else
-+ {
-+ return m_Vector.end();
-+ }
-+}
-+
-+template<typename KeyT, typename ValueT>
-+void VmaMap<KeyT, ValueT>::erase(iterator it)
-+{
-+ VmaVectorRemove(m_Vector, it - m_Vector.begin());
-+}
-+#endif // _VMA_MAP_FUNCTIONS
-+#endif // _VMA_MAP
-+
-+#endif // #if 0
-+
-+#if !defined(_VMA_STRING_BUILDER) && VMA_STATS_STRING_ENABLED
-+class VmaStringBuilder
-+{
-+public:
-+ VmaStringBuilder(const VkAllocationCallbacks* allocationCallbacks) : m_Data(VmaStlAllocator<char>(allocationCallbacks)) {}
-+ ~VmaStringBuilder() = default;
-+
-+ size_t GetLength() const { return m_Data.size(); }
-+ const char* GetData() const { return m_Data.data(); }
-+ void AddNewLine() { Add('\n'); }
-+ void Add(char ch) { m_Data.push_back(ch); }
-+
-+ void Add(const char* pStr);
-+ void AddNumber(uint32_t num);
-+ void AddNumber(uint64_t num);
-+ void AddPointer(const void* ptr);
-+
-+private:
-+ VmaVector<char, VmaStlAllocator<char>> m_Data;
-+};
-+
-+#ifndef _VMA_STRING_BUILDER_FUNCTIONS
-+void VmaStringBuilder::Add(const char* pStr)
-+{
-+ const size_t strLen = strlen(pStr);
-+ if (strLen > 0)
-+ {
-+ const size_t oldCount = m_Data.size();
-+ m_Data.resize(oldCount + strLen);
-+ memcpy(m_Data.data() + oldCount, pStr, strLen);
-+ }
-+}
-+
-+void VmaStringBuilder::AddNumber(uint32_t num)
-+{
-+ char buf[11];
-+ buf[10] = '\0';
-+ char* p = &buf[10];
-+ do
-+ {
-+ *--p = '0' + (char)(num % 10);
-+ num /= 10;
-+ } while (num);
-+ Add(p);
-+}
-+
-+void VmaStringBuilder::AddNumber(uint64_t num)
-+{
-+ char buf[21];
-+ buf[20] = '\0';
-+ char* p = &buf[20];
-+ do
-+ {
-+ *--p = '0' + (char)(num % 10);
-+ num /= 10;
-+ } while (num);
-+ Add(p);
-+}
-+
-+void VmaStringBuilder::AddPointer(const void* ptr)
-+{
-+ char buf[21];
-+ VmaPtrToStr(buf, sizeof(buf), ptr);
-+ Add(buf);
-+}
-+#endif //_VMA_STRING_BUILDER_FUNCTIONS
-+#endif // _VMA_STRING_BUILDER
-+
-+#if !defined(_VMA_JSON_WRITER) && VMA_STATS_STRING_ENABLED
-+/*
-+Allows to conveniently build a correct JSON document to be written to the
-+VmaStringBuilder passed to the constructor.
-+*/
-+class VmaJsonWriter
-+{
-+ VMA_CLASS_NO_COPY_NO_MOVE(VmaJsonWriter)
-+public:
-+ // sb - string builder to write the document to. Must remain alive for the whole lifetime of this object.
-+ VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb);
-+ ~VmaJsonWriter();
-+
-+ // Begins object by writing "{".
-+ // Inside an object, you must call pairs of WriteString and a value, e.g.:
-+ // j.BeginObject(true); j.WriteString("A"); j.WriteNumber(1); j.WriteString("B"); j.WriteNumber(2); j.EndObject();
-+ // Will write: { "A": 1, "B": 2 }
-+ void BeginObject(bool singleLine = false);
-+ // Ends object by writing "}".
-+ void EndObject();
-+
-+ // Begins array by writing "[".
-+ // Inside an array, you can write a sequence of any values.
-+ void BeginArray(bool singleLine = false);
-+ // Ends array by writing "[".
-+ void EndArray();
-+
-+ // Writes a string value inside "".
-+ // pStr can contain any ANSI characters, including '"', new line etc. - they will be properly escaped.
-+ void WriteString(const char* pStr);
-+
-+ // Begins writing a string value.
-+ // Call BeginString, ContinueString, ContinueString, ..., EndString instead of
-+ // WriteString to conveniently build the string content incrementally, made of
-+ // parts including numbers.
-+ void BeginString(const char* pStr = VMA_NULL);
-+ // Posts next part of an open string.
-+ void ContinueString(const char* pStr);
-+ // Posts next part of an open string. The number is converted to decimal characters.
-+ void ContinueString(uint32_t n);
-+ void ContinueString(uint64_t n);
-+ // Posts next part of an open string. Pointer value is converted to characters
-+ // using "%p" formatting - shown as hexadecimal number, e.g.: 000000081276Ad00
-+ void ContinueString_Pointer(const void* ptr);
-+ // Ends writing a string value by writing '"'.
-+ void EndString(const char* pStr = VMA_NULL);
-+
-+ // Writes a number value.
-+ void WriteNumber(uint32_t n);
-+ void WriteNumber(uint64_t n);
-+ // Writes a boolean value - false or true.
-+ void WriteBool(bool b);
-+ // Writes a null value.
-+ void WriteNull();
-+
-+private:
-+ enum COLLECTION_TYPE
-+ {
-+ COLLECTION_TYPE_OBJECT,
-+ COLLECTION_TYPE_ARRAY,
-+ };
-+ struct StackItem
-+ {
-+ COLLECTION_TYPE type;
-+ uint32_t valueCount;
-+ bool singleLineMode;
-+ };
-+
-+ static const char* const INDENT;
-+
-+ VmaStringBuilder& m_SB;
-+ VmaVector< StackItem, VmaStlAllocator<StackItem> > m_Stack;
-+ bool m_InsideString;
-+
-+ void BeginValue(bool isString);
-+ void WriteIndent(bool oneLess = false);
-+};
-+const char* const VmaJsonWriter::INDENT = " ";
-+
-+#ifndef _VMA_JSON_WRITER_FUNCTIONS
-+VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb)
-+ : m_SB(sb),
-+ m_Stack(VmaStlAllocator<StackItem>(pAllocationCallbacks)),
-+ m_InsideString(false) {}
-+
-+VmaJsonWriter::~VmaJsonWriter()
-+{
-+ VMA_ASSERT(!m_InsideString);
-+ VMA_ASSERT(m_Stack.empty());
-+}
-+
-+void VmaJsonWriter::BeginObject(bool singleLine)
-+{
-+ VMA_ASSERT(!m_InsideString);
-+
-+ BeginValue(false);
-+ m_SB.Add('{');
-+
-+ StackItem item;
-+ item.type = COLLECTION_TYPE_OBJECT;
-+ item.valueCount = 0;
-+ item.singleLineMode = singleLine;
-+ m_Stack.push_back(item);
-+}
-+
-+void VmaJsonWriter::EndObject()
-+{
-+ VMA_ASSERT(!m_InsideString);
-+
-+ WriteIndent(true);
-+ m_SB.Add('}');
-+
-+ VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_OBJECT);
-+ m_Stack.pop_back();
-+}
-+
-+void VmaJsonWriter::BeginArray(bool singleLine)
-+{
-+ VMA_ASSERT(!m_InsideString);
-+
-+ BeginValue(false);
-+ m_SB.Add('[');
-+
-+ StackItem item;
-+ item.type = COLLECTION_TYPE_ARRAY;
-+ item.valueCount = 0;
-+ item.singleLineMode = singleLine;
-+ m_Stack.push_back(item);
-+}
-+
-+void VmaJsonWriter::EndArray()
-+{
-+ VMA_ASSERT(!m_InsideString);
-+
-+ WriteIndent(true);
-+ m_SB.Add(']');
-+
-+ VMA_ASSERT(!m_Stack.empty() && m_Stack.back().type == COLLECTION_TYPE_ARRAY);
-+ m_Stack.pop_back();
-+}
-+
-+void VmaJsonWriter::WriteString(const char* pStr)
-+{
-+ BeginString(pStr);
-+ EndString();
-+}
-+
-+void VmaJsonWriter::BeginString(const char* pStr)
-+{
-+ VMA_ASSERT(!m_InsideString);
-+
-+ BeginValue(true);
-+ m_SB.Add('"');
-+ m_InsideString = true;
-+ if (pStr != VMA_NULL && pStr[0] != '\0')
-+ {
-+ ContinueString(pStr);
-+ }
-+}
-+
-+void VmaJsonWriter::ContinueString(const char* pStr)
-+{
-+ VMA_ASSERT(m_InsideString);
-+
-+ const size_t strLen = strlen(pStr);
-+ for (size_t i = 0; i < strLen; ++i)
-+ {
-+ char ch = pStr[i];
-+ if (ch == '\\')
-+ {
-+ m_SB.Add("\\\\");
-+ }
-+ else if (ch == '"')
-+ {
-+ m_SB.Add("\\\"");
-+ }
-+ else if (ch >= 32)
-+ {
-+ m_SB.Add(ch);
-+ }
-+ else switch (ch)
-+ {
-+ case '\b':
-+ m_SB.Add("\\b");
-+ break;
-+ case '\f':
-+ m_SB.Add("\\f");
-+ break;
-+ case '\n':
-+ m_SB.Add("\\n");
-+ break;
-+ case '\r':
-+ m_SB.Add("\\r");
-+ break;
-+ case '\t':
-+ m_SB.Add("\\t");
-+ break;
-+ default:
-+ VMA_ASSERT(0 && "Character not currently supported.");
-+ }
-+ }
-+}
-+
-+void VmaJsonWriter::ContinueString(uint32_t n)
-+{
-+ VMA_ASSERT(m_InsideString);
-+ m_SB.AddNumber(n);
-+}
-+
-+void VmaJsonWriter::ContinueString(uint64_t n)
-+{
-+ VMA_ASSERT(m_InsideString);
-+ m_SB.AddNumber(n);
-+}
-+
-+void VmaJsonWriter::ContinueString_Pointer(const void* ptr)
-+{
-+ VMA_ASSERT(m_InsideString);
-+ m_SB.AddPointer(ptr);
-+}
-+
-+void VmaJsonWriter::EndString(const char* pStr)
-+{
-+ VMA_ASSERT(m_InsideString);
-+ if (pStr != VMA_NULL && pStr[0] != '\0')
-+ {
-+ ContinueString(pStr);
-+ }
-+ m_SB.Add('"');
-+ m_InsideString = false;
-+}
-+
-+void VmaJsonWriter::WriteNumber(uint32_t n)
-+{
-+ VMA_ASSERT(!m_InsideString);
-+ BeginValue(false);
-+ m_SB.AddNumber(n);
-+}
-+
-+void VmaJsonWriter::WriteNumber(uint64_t n)
-+{
-+ VMA_ASSERT(!m_InsideString);
-+ BeginValue(false);
-+ m_SB.AddNumber(n);
-+}
-+
-+void VmaJsonWriter::WriteBool(bool b)
-+{
-+ VMA_ASSERT(!m_InsideString);
-+ BeginValue(false);
-+ m_SB.Add(b ? "true" : "false");
-+}
-+
-+void VmaJsonWriter::WriteNull()
-+{
-+ VMA_ASSERT(!m_InsideString);
-+ BeginValue(false);
-+ m_SB.Add("null");
-+}
-+
-+void VmaJsonWriter::BeginValue(bool isString)
-+{
-+ if (!m_Stack.empty())
-+ {
-+ StackItem& currItem = m_Stack.back();
-+ if (currItem.type == COLLECTION_TYPE_OBJECT &&
-+ currItem.valueCount % 2 == 0)
-+ {
-+ VMA_ASSERT(isString);
-+ }
-+
-+ if (currItem.type == COLLECTION_TYPE_OBJECT &&
-+ currItem.valueCount % 2 != 0)
-+ {
-+ m_SB.Add(": ");
-+ }
-+ else if (currItem.valueCount > 0)
-+ {
-+ m_SB.Add(", ");
-+ WriteIndent();
-+ }
-+ else
-+ {
-+ WriteIndent();
-+ }
-+ ++currItem.valueCount;
-+ }
-+}
-+
-+void VmaJsonWriter::WriteIndent(bool oneLess)
-+{
-+ if (!m_Stack.empty() && !m_Stack.back().singleLineMode)
-+ {
-+ m_SB.AddNewLine();
-+
-+ size_t count = m_Stack.size();
-+ if (count > 0 && oneLess)
-+ {
-+ --count;
-+ }
-+ for (size_t i = 0; i < count; ++i)
-+ {
-+ m_SB.Add(INDENT);
-+ }
-+ }
-+}
-+#endif // _VMA_JSON_WRITER_FUNCTIONS
-+
-+static void VmaPrintDetailedStatistics(VmaJsonWriter& json, const VmaDetailedStatistics& stat)
-+{
-+ json.BeginObject();
-+
-+ json.WriteString("BlockCount");
-+ json.WriteNumber(stat.statistics.blockCount);
-+ json.WriteString("BlockBytes");
-+ json.WriteNumber(stat.statistics.blockBytes);
-+ json.WriteString("AllocationCount");
-+ json.WriteNumber(stat.statistics.allocationCount);
-+ json.WriteString("AllocationBytes");
-+ json.WriteNumber(stat.statistics.allocationBytes);
-+ json.WriteString("UnusedRangeCount");
-+ json.WriteNumber(stat.unusedRangeCount);
-+
-+ if (stat.statistics.allocationCount > 1)
-+ {
-+ json.WriteString("AllocationSizeMin");
-+ json.WriteNumber(stat.allocationSizeMin);
-+ json.WriteString("AllocationSizeMax");
-+ json.WriteNumber(stat.allocationSizeMax);
-+ }
-+ if (stat.unusedRangeCount > 1)
-+ {
-+ json.WriteString("UnusedRangeSizeMin");
-+ json.WriteNumber(stat.unusedRangeSizeMin);
-+ json.WriteString("UnusedRangeSizeMax");
-+ json.WriteNumber(stat.unusedRangeSizeMax);
-+ }
-+ json.EndObject();
-+}
-+#endif // _VMA_JSON_WRITER
-+
-+#ifndef _VMA_MAPPING_HYSTERESIS
-+
-+class VmaMappingHysteresis
-+{
-+ VMA_CLASS_NO_COPY_NO_MOVE(VmaMappingHysteresis)
-+public:
-+ VmaMappingHysteresis() = default;
-+
-+ uint32_t GetExtraMapping() const { return m_ExtraMapping; }
-+
-+ // Call when Map was called.
-+ // Returns true if switched to extra +1 mapping reference count.
-+ bool PostMap()
-+ {
-+#if VMA_MAPPING_HYSTERESIS_ENABLED
-+ if(m_ExtraMapping == 0)
-+ {
-+ ++m_MajorCounter;
-+ if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING)
-+ {
-+ m_ExtraMapping = 1;
-+ m_MajorCounter = 0;
-+ m_MinorCounter = 0;
-+ return true;
-+ }
-+ }
-+ else // m_ExtraMapping == 1
-+ PostMinorCounter();
-+#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED
-+ return false;
-+ }
-+
-+ // Call when Unmap was called.
-+ void PostUnmap()
-+ {
-+#if VMA_MAPPING_HYSTERESIS_ENABLED
-+ if(m_ExtraMapping == 0)
-+ ++m_MajorCounter;
-+ else // m_ExtraMapping == 1
-+ PostMinorCounter();
-+#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED
-+ }
-+
-+ // Call when allocation was made from the memory block.
-+ void PostAlloc()
-+ {
-+#if VMA_MAPPING_HYSTERESIS_ENABLED
-+ if(m_ExtraMapping == 1)
-+ ++m_MajorCounter;
-+ else // m_ExtraMapping == 0
-+ PostMinorCounter();
-+#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED
-+ }
-+
-+ // Call when allocation was freed from the memory block.
-+ // Returns true if switched to extra -1 mapping reference count.
-+ bool PostFree()
-+ {
-+#if VMA_MAPPING_HYSTERESIS_ENABLED
-+ if(m_ExtraMapping == 1)
-+ {
-+ ++m_MajorCounter;
-+ if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING &&
-+ m_MajorCounter > m_MinorCounter + 1)
-+ {
-+ m_ExtraMapping = 0;
-+ m_MajorCounter = 0;
-+ m_MinorCounter = 0;
-+ return true;
-+ }
-+ }
-+ else // m_ExtraMapping == 0
-+ PostMinorCounter();
-+#endif // #if VMA_MAPPING_HYSTERESIS_ENABLED
-+ return false;
-+ }
-+
-+private:
-+ static const int32_t COUNTER_MIN_EXTRA_MAPPING = 7;
-+
-+ uint32_t m_MinorCounter = 0;
-+ uint32_t m_MajorCounter = 0;
-+ uint32_t m_ExtraMapping = 0; // 0 or 1.
-+
-+ void PostMinorCounter()
-+ {
-+ if(m_MinorCounter < m_MajorCounter)
-+ {
-+ ++m_MinorCounter;
-+ }
-+ else if(m_MajorCounter > 0)
-+ {
-+ --m_MajorCounter;
-+ --m_MinorCounter;
-+ }
-+ }
-+};
-+
-+#endif // _VMA_MAPPING_HYSTERESIS
-+
-+#ifndef _VMA_DEVICE_MEMORY_BLOCK
-+/*
-+Represents a single block of device memory (`VkDeviceMemory`) with all the
-+data about its regions (aka suballocations, #VmaAllocation), assigned and free.
-+
-+Thread-safety:
-+- Access to m_pMetadata must be externally synchronized.
-+- Map, Unmap, Bind* are synchronized internally.
-+*/
-+class VmaDeviceMemoryBlock
-+{
-+ VMA_CLASS_NO_COPY_NO_MOVE(VmaDeviceMemoryBlock)
-+public:
-+ VmaBlockMetadata* m_pMetadata;
-+
-+ VmaDeviceMemoryBlock(VmaAllocator hAllocator);
-+ ~VmaDeviceMemoryBlock();
-+
-+ // Always call after construction.
-+ void Init(
-+ VmaAllocator hAllocator,
-+ VmaPool hParentPool,
-+ uint32_t newMemoryTypeIndex,
-+ VkDeviceMemory newMemory,
-+ VkDeviceSize newSize,
-+ uint32_t id,
-+ uint32_t algorithm,
-+ VkDeviceSize bufferImageGranularity);
-+ // Always call before destruction.
-+ void Destroy(VmaAllocator allocator);
-+
-+ VmaPool GetParentPool() const { return m_hParentPool; }
-+ VkDeviceMemory GetDeviceMemory() const { return m_hMemory; }
-+ uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
-+ uint32_t GetId() const { return m_Id; }
-+ void* GetMappedData() const { return m_pMappedData; }
-+ uint32_t GetMapRefCount() const { return m_MapCount; }
-+
-+ // Call when allocation/free was made from m_pMetadata.
-+ // Used for m_MappingHysteresis.
-+ void PostAlloc(VmaAllocator hAllocator);
-+ void PostFree(VmaAllocator hAllocator);
-+
-+ // Validates all data structures inside this object. If not valid, returns false.
-+ bool Validate() const;
-+ VkResult CheckCorruption(VmaAllocator hAllocator);
-+
-+ // ppData can be null.
-+ VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData);
-+ void Unmap(VmaAllocator hAllocator, uint32_t count);
-+
-+ VkResult WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
-+ VkResult ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize);
-+
-+ VkResult BindBufferMemory(
-+ const VmaAllocator hAllocator,
-+ const VmaAllocation hAllocation,
-+ VkDeviceSize allocationLocalOffset,
-+ VkBuffer hBuffer,
-+ const void* pNext);
-+ VkResult BindImageMemory(
-+ const VmaAllocator hAllocator,
-+ const VmaAllocation hAllocation,
-+ VkDeviceSize allocationLocalOffset,
-+ VkImage hImage,
-+ const void* pNext);
-+
-+private:
-+ VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
-+ uint32_t m_MemoryTypeIndex;
-+ uint32_t m_Id;
-+ VkDeviceMemory m_hMemory;
-+
-+ /*
-+ Protects access to m_hMemory so it is not used by multiple threads simultaneously, e.g. vkMapMemory, vkBindBufferMemory.
-+ Also protects m_MapCount, m_pMappedData.
-+ Allocations, deallocations, any change in m_pMetadata is protected by parent's VmaBlockVector::m_Mutex.
-+ */
-+ VMA_MUTEX m_MapAndBindMutex;
-+ VmaMappingHysteresis m_MappingHysteresis;
-+ uint32_t m_MapCount;
-+ void* m_pMappedData;
-+};
-+#endif // _VMA_DEVICE_MEMORY_BLOCK
-+
-+#ifndef _VMA_ALLOCATION_T
-+struct VmaAllocation_T
-+{
-+ friend struct VmaDedicatedAllocationListItemTraits;
-+
-+ enum FLAGS
-+ {
-+ FLAG_PERSISTENT_MAP = 0x01,
-+ FLAG_MAPPING_ALLOWED = 0x02,
-+ };
-+
-+public:
-+ enum ALLOCATION_TYPE
-+ {
-+ ALLOCATION_TYPE_NONE,
-+ ALLOCATION_TYPE_BLOCK,
-+ ALLOCATION_TYPE_DEDICATED,
-+ };
-+
-+ // This struct is allocated using VmaPoolAllocator.
-+ VmaAllocation_T(bool mappingAllowed);
-+ ~VmaAllocation_T();
-+
-+ void InitBlockAllocation(
-+ VmaDeviceMemoryBlock* block,
-+ VmaAllocHandle allocHandle,
-+ VkDeviceSize alignment,
-+ VkDeviceSize size,
-+ uint32_t memoryTypeIndex,
-+ VmaSuballocationType suballocationType,
-+ bool mapped);
-+ // pMappedData not null means allocation is created with MAPPED flag.
-+ void InitDedicatedAllocation(
-+ VmaPool hParentPool,
-+ uint32_t memoryTypeIndex,
-+ VkDeviceMemory hMemory,
-+ VmaSuballocationType suballocationType,
-+ void* pMappedData,
-+ VkDeviceSize size);
-+
-+ ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; }
-+ VkDeviceSize GetAlignment() const { return m_Alignment; }
-+ VkDeviceSize GetSize() const { return m_Size; }
-+ void* GetUserData() const { return m_pUserData; }
-+ const char* GetName() const { return m_pName; }
-+ VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; }
-+
-+ VmaDeviceMemoryBlock* GetBlock() const { VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); return m_BlockAllocation.m_Block; }
-+ uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
-+ bool IsPersistentMap() const { return (m_Flags & FLAG_PERSISTENT_MAP) != 0; }
-+ bool IsMappingAllowed() const { return (m_Flags & FLAG_MAPPING_ALLOWED) != 0; }
-+
-+ void SetUserData(VmaAllocator hAllocator, void* pUserData) { m_pUserData = pUserData; }
-+ void SetName(VmaAllocator hAllocator, const char* pName);
-+ void FreeName(VmaAllocator hAllocator);
-+ uint8_t SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation);
-+ VmaAllocHandle GetAllocHandle() const;
-+ VkDeviceSize GetOffset() const;
-+ VmaPool GetParentPool() const;
-+ VkDeviceMemory GetMemory() const;
-+ void* GetMappedData() const;
-+
-+ void BlockAllocMap();
-+ void BlockAllocUnmap();
-+ VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData);
-+ void DedicatedAllocUnmap(VmaAllocator hAllocator);
-+
-+#if VMA_STATS_STRING_ENABLED
-+ uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; }
-+
-+ void InitBufferImageUsage(uint32_t bufferImageUsage);
-+ void PrintParameters(class VmaJsonWriter& json) const;
-+#endif
-+
-+private:
-+ // Allocation out of VmaDeviceMemoryBlock.
-+ struct BlockAllocation
-+ {
-+ VmaDeviceMemoryBlock* m_Block;
-+ VmaAllocHandle m_AllocHandle;
-+ };
-+ // Allocation for an object that has its own private VkDeviceMemory.
-+ struct DedicatedAllocation
-+ {
-+ VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool.
-+ VkDeviceMemory m_hMemory;
-+ void* m_pMappedData; // Not null means memory is mapped.
-+ VmaAllocation_T* m_Prev;
-+ VmaAllocation_T* m_Next;
-+ };
-+ union
-+ {
-+ // Allocation out of VmaDeviceMemoryBlock.
-+ BlockAllocation m_BlockAllocation;
-+ // Allocation for an object that has its own private VkDeviceMemory.
-+ DedicatedAllocation m_DedicatedAllocation;
-+ };
-+
-+ VkDeviceSize m_Alignment;
-+ VkDeviceSize m_Size;
-+ void* m_pUserData;
-+ char* m_pName;
-+ uint32_t m_MemoryTypeIndex;
-+ uint8_t m_Type; // ALLOCATION_TYPE
-+ uint8_t m_SuballocationType; // VmaSuballocationType
-+ // Reference counter for vmaMapMemory()/vmaUnmapMemory().
-+ uint8_t m_MapCount;
-+ uint8_t m_Flags; // enum FLAGS
-+#if VMA_STATS_STRING_ENABLED
-+ uint32_t m_BufferImageUsage; // 0 if unknown.
-+#endif
-+};
-+#endif // _VMA_ALLOCATION_T
-+
-+#ifndef _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS
-+struct VmaDedicatedAllocationListItemTraits
-+{
-+ typedef VmaAllocation_T ItemType;
-+
-+ static ItemType* GetPrev(const ItemType* item)
-+ {
-+ VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
-+ return item->m_DedicatedAllocation.m_Prev;
-+ }
-+ static ItemType* GetNext(const ItemType* item)
-+ {
-+ VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
-+ return item->m_DedicatedAllocation.m_Next;
-+ }
-+ static ItemType*& AccessPrev(ItemType* item)
-+ {
-+ VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
-+ return item->m_DedicatedAllocation.m_Prev;
-+ }
-+ static ItemType*& AccessNext(ItemType* item)
-+ {
-+ VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
-+ return item->m_DedicatedAllocation.m_Next;
-+ }
-+};
-+#endif // _VMA_DEDICATED_ALLOCATION_LIST_ITEM_TRAITS
-+
-+#ifndef _VMA_DEDICATED_ALLOCATION_LIST
-+/*
-+Stores linked list of VmaAllocation_T objects.
-+Thread-safe, synchronized internally.
-+*/
-+class VmaDedicatedAllocationList
-+{
-+ VMA_CLASS_NO_COPY_NO_MOVE(VmaDedicatedAllocationList)
-+public:
-+ VmaDedicatedAllocationList() {}
-+ ~VmaDedicatedAllocationList();
-+
-+ void Init(bool useMutex) { m_UseMutex = useMutex; }
-+ bool Validate();
-+
-+ void AddDetailedStatistics(VmaDetailedStatistics& inoutStats);
-+ void AddStatistics(VmaStatistics& inoutStats);
-+#if VMA_STATS_STRING_ENABLED
-+ // Writes JSON array with the list of allocations.
-+ void BuildStatsString(VmaJsonWriter& json);
-+#endif
-+
-+ bool IsEmpty();
-+ void Register(VmaAllocation alloc);
-+ void Unregister(VmaAllocation alloc);
-+
-+private:
-+ typedef VmaIntrusiveLinkedList<VmaDedicatedAllocationListItemTraits> DedicatedAllocationLinkedList;
-+
-+ bool m_UseMutex = true;
-+ VMA_RW_MUTEX m_Mutex;
-+ DedicatedAllocationLinkedList m_AllocationList;
-+};
-+
-+#ifndef _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS
-+
-+VmaDedicatedAllocationList::~VmaDedicatedAllocationList()
-+{
-+ VMA_HEAVY_ASSERT(Validate());
-+
-+ if (!m_AllocationList.IsEmpty())
-+ {
-+ VMA_ASSERT(false && "Unfreed dedicated allocations found!");
-+ }
-+}
-+
-+bool VmaDedicatedAllocationList::Validate()
-+{
-+ const size_t declaredCount = m_AllocationList.GetCount();
-+ size_t actualCount = 0;
-+ VmaMutexLockRead lock(m_Mutex, m_UseMutex);
-+ for (VmaAllocation alloc = m_AllocationList.Front();
-+ alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc))
-+ {
-+ ++actualCount;
-+ }
-+ VMA_VALIDATE(actualCount == declaredCount);
-+
-+ return true;
-+}
-+
-+void VmaDedicatedAllocationList::AddDetailedStatistics(VmaDetailedStatistics& inoutStats)
-+{
-+ for(auto* item = m_AllocationList.Front(); item != nullptr; item = DedicatedAllocationLinkedList::GetNext(item))
-+ {
-+ const VkDeviceSize size = item->GetSize();
-+ inoutStats.statistics.blockCount++;
-+ inoutStats.statistics.blockBytes += size;
-+ VmaAddDetailedStatisticsAllocation(inoutStats, item->GetSize());
-+ }
-+}
-+
-+void VmaDedicatedAllocationList::AddStatistics(VmaStatistics& inoutStats)
-+{
-+ VmaMutexLockRead lock(m_Mutex, m_UseMutex);
-+
-+ const uint32_t allocCount = (uint32_t)m_AllocationList.GetCount();
-+ inoutStats.blockCount += allocCount;
-+ inoutStats.allocationCount += allocCount;
-+
-+ for(auto* item = m_AllocationList.Front(); item != nullptr; item = DedicatedAllocationLinkedList::GetNext(item))
-+ {
-+ const VkDeviceSize size = item->GetSize();
-+ inoutStats.blockBytes += size;
-+ inoutStats.allocationBytes += size;
-+ }
-+}
-+
-+#if VMA_STATS_STRING_ENABLED
-+void VmaDedicatedAllocationList::BuildStatsString(VmaJsonWriter& json)
-+{
-+ VmaMutexLockRead lock(m_Mutex, m_UseMutex);
-+ json.BeginArray();
-+ for (VmaAllocation alloc = m_AllocationList.Front();
-+ alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc))
-+ {
-+ json.BeginObject(true);
-+ alloc->PrintParameters(json);
-+ json.EndObject();
-+ }
-+ json.EndArray();
-+}
-+#endif // VMA_STATS_STRING_ENABLED
-+
-+bool VmaDedicatedAllocationList::IsEmpty()
-+{
-+ VmaMutexLockRead lock(m_Mutex, m_UseMutex);
-+ return m_AllocationList.IsEmpty();
-+}
-+
-+void VmaDedicatedAllocationList::Register(VmaAllocation alloc)
-+{
-+ VmaMutexLockWrite lock(m_Mutex, m_UseMutex);
-+ m_AllocationList.PushBack(alloc);
-+}
-+
-+void VmaDedicatedAllocationList::Unregister(VmaAllocation alloc)
-+{
-+ VmaMutexLockWrite lock(m_Mutex, m_UseMutex);
-+ m_AllocationList.Remove(alloc);
-+}
-+#endif // _VMA_DEDICATED_ALLOCATION_LIST_FUNCTIONS
-+#endif // _VMA_DEDICATED_ALLOCATION_LIST
-+
-+#ifndef _VMA_SUBALLOCATION
-+/*
-+Represents a region of VmaDeviceMemoryBlock that is either assigned and returned as
-+allocated memory block or free.
-+*/
-+struct VmaSuballocation
-+{
-+ VkDeviceSize offset;
-+ VkDeviceSize size;
-+ void* userData;
-+ VmaSuballocationType type;
-+};
-+
-+// Comparator for offsets.
-+struct VmaSuballocationOffsetLess
-+{
-+ bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
-+ {
-+ return lhs.offset < rhs.offset;
-+ }
-+};
-+
-+struct VmaSuballocationOffsetGreater
-+{
-+ bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const
-+ {
-+ return lhs.offset > rhs.offset;
-+ }
-+};
-+
-+struct VmaSuballocationItemSizeLess
-+{
-+ bool operator()(const VmaSuballocationList::iterator lhs,
-+ const VmaSuballocationList::iterator rhs) const
-+ {
-+ return lhs->size < rhs->size;
-+ }
-+
-+ bool operator()(const VmaSuballocationList::iterator lhs,
-+ VkDeviceSize rhsSize) const
-+ {
-+ return lhs->size < rhsSize;
-+ }
-+};
-+#endif // _VMA_SUBALLOCATION
-+
-+#ifndef _VMA_ALLOCATION_REQUEST
-+/*
-+Parameters of planned allocation inside a VmaDeviceMemoryBlock.
-+item points to a FREE suballocation.
-+*/
-+struct VmaAllocationRequest
-+{
-+ VmaAllocHandle allocHandle;
-+ VkDeviceSize size;
-+ VmaSuballocationList::iterator item;
-+ void* customData;
-+ uint64_t algorithmData;
-+ VmaAllocationRequestType type;
-+};
-+#endif // _VMA_ALLOCATION_REQUEST
-+
-+#ifndef _VMA_BLOCK_METADATA
-+/*
-+Data structure used for bookkeeping of allocations and unused ranges of memory
-+in a single VkDeviceMemory block.
-+*/
-+class VmaBlockMetadata
-+{
-+ VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata)
-+public:
-+ // pAllocationCallbacks, if not null, must be owned externally - alive and unchanged for the whole lifetime of this object.
-+ VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks,
-+ VkDeviceSize bufferImageGranularity, bool isVirtual);
-+ virtual ~VmaBlockMetadata() = default;
-+
-+ virtual void Init(VkDeviceSize size) { m_Size = size; }
-+ bool IsVirtual() const { return m_IsVirtual; }
-+ VkDeviceSize GetSize() const { return m_Size; }
-+
-+ // Validates all data structures inside this object. If not valid, returns false.
-+ virtual bool Validate() const = 0;
-+ virtual size_t GetAllocationCount() const = 0;
-+ virtual size_t GetFreeRegionsCount() const = 0;
-+ virtual VkDeviceSize GetSumFreeSize() const = 0;
-+ // Returns true if this block is empty - contains only single free suballocation.
-+ virtual bool IsEmpty() const = 0;
-+ virtual void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) = 0;
-+ virtual VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const = 0;
-+ virtual void* GetAllocationUserData(VmaAllocHandle allocHandle) const = 0;
-+
-+ virtual VmaAllocHandle GetAllocationListBegin() const = 0;
-+ virtual VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const = 0;
-+ virtual VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const = 0;
-+
-+ // Shouldn't modify blockCount.
-+ virtual void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const = 0;
-+ virtual void AddStatistics(VmaStatistics& inoutStats) const = 0;
-+
-+#if VMA_STATS_STRING_ENABLED
-+ virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0;
-+#endif
-+
-+ // Tries to find a place for suballocation with given parameters inside this block.
-+ // If succeeded, fills pAllocationRequest and returns true.
-+ // If failed, returns false.
-+ virtual bool CreateAllocationRequest(
-+ VkDeviceSize allocSize,
-+ VkDeviceSize allocAlignment,
-+ bool upperAddress,
-+ VmaSuballocationType allocType,
-+ // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags.
-+ uint32_t strategy,
-+ VmaAllocationRequest* pAllocationRequest) = 0;
-+
-+ virtual VkResult CheckCorruption(const void* pBlockData) = 0;
-+
-+ // Makes actual allocation based on request. Request must already be checked and valid.
-+ virtual void Alloc(
-+ const VmaAllocationRequest& request,
-+ VmaSuballocationType type,
-+ void* userData) = 0;
-+
-+ // Frees suballocation assigned to given memory region.
-+ virtual void Free(VmaAllocHandle allocHandle) = 0;
-+
-+ // Frees all allocations.
-+ // Careful! Don't call it if there are VmaAllocation objects owned by userData of cleared allocations!
-+ virtual void Clear() = 0;
-+
-+ virtual void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) = 0;
-+ virtual void DebugLogAllAllocations() const = 0;
-+
-+protected:
-+ const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; }
-+ VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
-+ VkDeviceSize GetDebugMargin() const { return VkDeviceSize(IsVirtual() ? 0 : VMA_DEBUG_MARGIN); }
-+
-+ void DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const;
-+#if VMA_STATS_STRING_ENABLED
-+ // mapRefCount == UINT32_MAX means unspecified.
-+ void PrintDetailedMap_Begin(class VmaJsonWriter& json,
-+ VkDeviceSize unusedBytes,
-+ size_t allocationCount,
-+ size_t unusedRangeCount) const;
-+ void PrintDetailedMap_Allocation(class VmaJsonWriter& json,
-+ VkDeviceSize offset, VkDeviceSize size, void* userData) const;
-+ void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
-+ VkDeviceSize offset,
-+ VkDeviceSize size) const;
-+ void PrintDetailedMap_End(class VmaJsonWriter& json) const;
-+#endif
-+
-+private:
-+ VkDeviceSize m_Size;
-+ const VkAllocationCallbacks* m_pAllocationCallbacks;
-+ const VkDeviceSize m_BufferImageGranularity;
-+ const bool m_IsVirtual;
-+};
-+
-+#ifndef _VMA_BLOCK_METADATA_FUNCTIONS
-+VmaBlockMetadata::VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks,
-+ VkDeviceSize bufferImageGranularity, bool isVirtual)
-+ : m_Size(0),
-+ m_pAllocationCallbacks(pAllocationCallbacks),
-+ m_BufferImageGranularity(bufferImageGranularity),
-+ m_IsVirtual(isVirtual) {}
-+
-+void VmaBlockMetadata::DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const
-+{
-+ if (IsVirtual())
-+ {
-+ VMA_DEBUG_LOG_FORMAT("UNFREED VIRTUAL ALLOCATION; Offset: %llu; Size: %llu; UserData: %p", offset, size, userData);
-+ }
-+ else
-+ {
-+ VMA_ASSERT(userData != VMA_NULL);
-+ VmaAllocation allocation = reinterpret_cast<VmaAllocation>(userData);
-+
-+ userData = allocation->GetUserData();
-+ const char* name = allocation->GetName();
-+
-+#if VMA_STATS_STRING_ENABLED
-+ VMA_DEBUG_LOG_FORMAT("UNFREED ALLOCATION; Offset: %llu; Size: %llu; UserData: %p; Name: %s; Type: %s; Usage: %u",
-+ offset, size, userData, name ? name : "vma_empty",
-+ VMA_SUBALLOCATION_TYPE_NAMES[allocation->GetSuballocationType()],
-+ allocation->GetBufferImageUsage());
-+#else
-+ VMA_DEBUG_LOG_FORMAT("UNFREED ALLOCATION; Offset: %llu; Size: %llu; UserData: %p; Name: %s; Type: %u",
-+ offset, size, userData, name ? name : "vma_empty",
-+ (uint32_t)allocation->GetSuballocationType());
-+#endif // VMA_STATS_STRING_ENABLED
-+ }
-+
-+}
-+
-+#if VMA_STATS_STRING_ENABLED
-+void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json,
-+ VkDeviceSize unusedBytes, size_t allocationCount, size_t unusedRangeCount) const
-+{
-+ json.WriteString("TotalBytes");
-+ json.WriteNumber(GetSize());
-+
-+ json.WriteString("UnusedBytes");
-+ json.WriteNumber(unusedBytes);
-+
-+ json.WriteString("Allocations");
-+ json.WriteNumber((uint64_t)allocationCount);
-+
-+ json.WriteString("UnusedRanges");
-+ json.WriteNumber((uint64_t)unusedRangeCount);
-+
-+ json.WriteString("Suballocations");
-+ json.BeginArray();
-+}
-+
-+void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json,
-+ VkDeviceSize offset, VkDeviceSize size, void* userData) const
-+{
-+ json.BeginObject(true);
-+
-+ json.WriteString("Offset");
-+ json.WriteNumber(offset);
-+
-+ if (IsVirtual())
-+ {
-+ json.WriteString("Size");
-+ json.WriteNumber(size);
-+ if (userData)
-+ {
-+ json.WriteString("CustomData");
-+ json.BeginString();
-+ json.ContinueString_Pointer(userData);
-+ json.EndString();
-+ }
-+ }
-+ else
-+ {
-+ ((VmaAllocation)userData)->PrintParameters(json);
-+ }
-+
-+ json.EndObject();
-+}
-+
-+void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json,
-+ VkDeviceSize offset, VkDeviceSize size) const
-+{
-+ json.BeginObject(true);
-+
-+ json.WriteString("Offset");
-+ json.WriteNumber(offset);
-+
-+ json.WriteString("Type");
-+ json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[VMA_SUBALLOCATION_TYPE_FREE]);
-+
-+ json.WriteString("Size");
-+ json.WriteNumber(size);
-+
-+ json.EndObject();
-+}
-+
-+void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const
-+{
-+ json.EndArray();
-+}
-+#endif // VMA_STATS_STRING_ENABLED
-+#endif // _VMA_BLOCK_METADATA_FUNCTIONS
-+#endif // _VMA_BLOCK_METADATA
-+
-+#ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY
-+// Before deleting object of this class remember to call 'Destroy()'
-+class VmaBlockBufferImageGranularity final
-+{
-+public:
-+ struct ValidationContext
-+ {
-+ const VkAllocationCallbacks* allocCallbacks;
-+ uint16_t* pageAllocs;
-+ };
-+
-+ VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity);
-+ ~VmaBlockBufferImageGranularity();
-+
-+ bool IsEnabled() const { return m_BufferImageGranularity > MAX_LOW_BUFFER_IMAGE_GRANULARITY; }
-+
-+ void Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size);
-+ // Before destroying object you must call free it's memory
-+ void Destroy(const VkAllocationCallbacks* pAllocationCallbacks);
-+
-+ void RoundupAllocRequest(VmaSuballocationType allocType,
-+ VkDeviceSize& inOutAllocSize,
-+ VkDeviceSize& inOutAllocAlignment) const;
-+
-+ bool CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset,
-+ VkDeviceSize allocSize,
-+ VkDeviceSize blockOffset,
-+ VkDeviceSize blockSize,
-+ VmaSuballocationType allocType) const;
-+
-+ void AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size);
-+ void FreePages(VkDeviceSize offset, VkDeviceSize size);
-+ void Clear();
-+
-+ ValidationContext StartValidation(const VkAllocationCallbacks* pAllocationCallbacks,
-+ bool isVirutal) const;
-+ bool Validate(ValidationContext& ctx, VkDeviceSize offset, VkDeviceSize size) const;
-+ bool FinishValidation(ValidationContext& ctx) const;
-+
-+private:
-+ static const uint16_t MAX_LOW_BUFFER_IMAGE_GRANULARITY = 256;
-+
-+ struct RegionInfo
-+ {
-+ uint8_t allocType;
-+ uint16_t allocCount;
-+ };
-+
-+ VkDeviceSize m_BufferImageGranularity;
-+ uint32_t m_RegionCount;
-+ RegionInfo* m_RegionInfo;
-+
-+ uint32_t GetStartPage(VkDeviceSize offset) const { return OffsetToPageIndex(offset & ~(m_BufferImageGranularity - 1)); }
-+ uint32_t GetEndPage(VkDeviceSize offset, VkDeviceSize size) const { return OffsetToPageIndex((offset + size - 1) & ~(m_BufferImageGranularity - 1)); }
-+
-+ uint32_t OffsetToPageIndex(VkDeviceSize offset) const;
-+ void AllocPage(RegionInfo& page, uint8_t allocType);
-+};
-+
-+#ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS
-+VmaBlockBufferImageGranularity::VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity)
-+ : m_BufferImageGranularity(bufferImageGranularity),
-+ m_RegionCount(0),
-+ m_RegionInfo(VMA_NULL) {}
-+
-+VmaBlockBufferImageGranularity::~VmaBlockBufferImageGranularity()
-+{
-+ VMA_ASSERT(m_RegionInfo == VMA_NULL && "Free not called before destroying object!");
-+}
-+
-+void VmaBlockBufferImageGranularity::Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size)
-+{
-+ if (IsEnabled())
-+ {
-+ m_RegionCount = static_cast<uint32_t>(VmaDivideRoundingUp(size, m_BufferImageGranularity));
-+ m_RegionInfo = vma_new_array(pAllocationCallbacks, RegionInfo, m_RegionCount);
-+ memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo));
-+ }
-+}
-+
-+void VmaBlockBufferImageGranularity::Destroy(const VkAllocationCallbacks* pAllocationCallbacks)
-+{
-+ if (m_RegionInfo)
-+ {
-+ vma_delete_array(pAllocationCallbacks, m_RegionInfo, m_RegionCount);
-+ m_RegionInfo = VMA_NULL;
-+ }
-+}
-+
-+void VmaBlockBufferImageGranularity::RoundupAllocRequest(VmaSuballocationType allocType,
-+ VkDeviceSize& inOutAllocSize,
-+ VkDeviceSize& inOutAllocAlignment) const
-+{
-+ if (m_BufferImageGranularity > 1 &&
-+ m_BufferImageGranularity <= MAX_LOW_BUFFER_IMAGE_GRANULARITY)
-+ {
-+ if (allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
-+ allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
-+ allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
-+ {
-+ inOutAllocAlignment = VMA_MAX(inOutAllocAlignment, m_BufferImageGranularity);
-+ inOutAllocSize = VmaAlignUp(inOutAllocSize, m_BufferImageGranularity);
-+ }
-+ }
-+}
-+
-+bool VmaBlockBufferImageGranularity::CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset,
-+ VkDeviceSize allocSize,
-+ VkDeviceSize blockOffset,
-+ VkDeviceSize blockSize,
-+ VmaSuballocationType allocType) const
-+{
-+ if (IsEnabled())
-+ {
-+ uint32_t startPage = GetStartPage(inOutAllocOffset);
-+ if (m_RegionInfo[startPage].allocCount > 0 &&
-+ VmaIsBufferImageGranularityConflict(static_cast<VmaSuballocationType>(m_RegionInfo[startPage].allocType), allocType))
-+ {
-+ inOutAllocOffset = VmaAlignUp(inOutAllocOffset, m_BufferImageGranularity);
-+ if (blockSize < allocSize + inOutAllocOffset - blockOffset)
-+ return true;
-+ ++startPage;
-+ }
-+ uint32_t endPage = GetEndPage(inOutAllocOffset, allocSize);
-+ if (endPage != startPage &&
-+ m_RegionInfo[endPage].allocCount > 0 &&
-+ VmaIsBufferImageGranularityConflict(static_cast<VmaSuballocationType>(m_RegionInfo[endPage].allocType), allocType))
-+ {
-+ return true;
-+ }
-+ }
-+ return false;
-+}
-+
-+void VmaBlockBufferImageGranularity::AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size)
-+{
-+ if (IsEnabled())
-+ {
-+ uint32_t startPage = GetStartPage(offset);
-+ AllocPage(m_RegionInfo[startPage], allocType);
-+
-+ uint32_t endPage = GetEndPage(offset, size);
-+ if (startPage != endPage)
-+ AllocPage(m_RegionInfo[endPage], allocType);
-+ }
-+}
-+
-+void VmaBlockBufferImageGranularity::FreePages(VkDeviceSize offset, VkDeviceSize size)
-+{
-+ if (IsEnabled())
-+ {
-+ uint32_t startPage = GetStartPage(offset);
-+ --m_RegionInfo[startPage].allocCount;
-+ if (m_RegionInfo[startPage].allocCount == 0)
-+ m_RegionInfo[startPage].allocType = VMA_SUBALLOCATION_TYPE_FREE;
-+ uint32_t endPage = GetEndPage(offset, size);
-+ if (startPage != endPage)
-+ {
-+ --m_RegionInfo[endPage].allocCount;
-+ if (m_RegionInfo[endPage].allocCount == 0)
-+ m_RegionInfo[endPage].allocType = VMA_SUBALLOCATION_TYPE_FREE;
-+ }
-+ }
-+}
-+
-+void VmaBlockBufferImageGranularity::Clear()
-+{
-+ if (m_RegionInfo)
-+ memset(m_RegionInfo, 0, m_RegionCount * sizeof(RegionInfo));
-+}
-+
-+VmaBlockBufferImageGranularity::ValidationContext VmaBlockBufferImageGranularity::StartValidation(
-+ const VkAllocationCallbacks* pAllocationCallbacks, bool isVirutal) const
-+{
-+ ValidationContext ctx{ pAllocationCallbacks, VMA_NULL };
-+ if (!isVirutal && IsEnabled())
-+ {
-+ ctx.pageAllocs = vma_new_array(pAllocationCallbacks, uint16_t, m_RegionCount);
-+ memset(ctx.pageAllocs, 0, m_RegionCount * sizeof(uint16_t));
-+ }
-+ return ctx;
-+}
-+
-+bool VmaBlockBufferImageGranularity::Validate(ValidationContext& ctx,
-+ VkDeviceSize offset, VkDeviceSize size) const
-+{
-+ if (IsEnabled())
-+ {
-+ uint32_t start = GetStartPage(offset);
-+ ++ctx.pageAllocs[start];
-+ VMA_VALIDATE(m_RegionInfo[start].allocCount > 0);
-+
-+ uint32_t end = GetEndPage(offset, size);
-+ if (start != end)
-+ {
-+ ++ctx.pageAllocs[end];
-+ VMA_VALIDATE(m_RegionInfo[end].allocCount > 0);
-+ }
-+ }
-+ return true;
-+}
-+
-+bool VmaBlockBufferImageGranularity::FinishValidation(ValidationContext& ctx) const
-+{
-+ // Check proper page structure
-+ if (IsEnabled())
-+ {
-+ VMA_ASSERT(ctx.pageAllocs != VMA_NULL && "Validation context not initialized!");
-+
-+ for (uint32_t page = 0; page < m_RegionCount; ++page)
-+ {
-+ VMA_VALIDATE(ctx.pageAllocs[page] == m_RegionInfo[page].allocCount);
-+ }
-+ vma_delete_array(ctx.allocCallbacks, ctx.pageAllocs, m_RegionCount);
-+ ctx.pageAllocs = VMA_NULL;
-+ }
-+ return true;
-+}
-+
-+uint32_t VmaBlockBufferImageGranularity::OffsetToPageIndex(VkDeviceSize offset) const
-+{
-+ return static_cast<uint32_t>(offset >> VMA_BITSCAN_MSB(m_BufferImageGranularity));
-+}
-+
-+void VmaBlockBufferImageGranularity::AllocPage(RegionInfo& page, uint8_t allocType)
-+{
-+ // When current alloc type is free then it can be overridden by new type
-+ if (page.allocCount == 0 || (page.allocCount > 0 && page.allocType == VMA_SUBALLOCATION_TYPE_FREE))
-+ page.allocType = allocType;
-+
-+ ++page.allocCount;
-+}
-+#endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS
-+#endif // _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY
-+
-+#if 0
-+#ifndef _VMA_BLOCK_METADATA_GENERIC
-+class VmaBlockMetadata_Generic : public VmaBlockMetadata
-+{
-+ friend class VmaDefragmentationAlgorithm_Generic;
-+ friend class VmaDefragmentationAlgorithm_Fast;
-+ VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_Generic)
-+public:
-+ VmaBlockMetadata_Generic(const VkAllocationCallbacks* pAllocationCallbacks,
-+ VkDeviceSize bufferImageGranularity, bool isVirtual);
-+ virtual ~VmaBlockMetadata_Generic() = default;
-+
-+ size_t GetAllocationCount() const override { return m_Suballocations.size() - m_FreeCount; }
-+ VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize; }
-+ bool IsEmpty() const override { return (m_Suballocations.size() == 1) && (m_FreeCount == 1); }
-+ void Free(VmaAllocHandle allocHandle) override { FreeSuballocation(FindAtOffset((VkDeviceSize)allocHandle - 1)); }
-+ VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; }
-+
-+ void Init(VkDeviceSize size) override;
-+ bool Validate() const override;
-+
-+ void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override;
-+ void AddStatistics(VmaStatistics& inoutStats) const override;
-+
-+#if VMA_STATS_STRING_ENABLED
-+ void PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const override;
-+#endif
-+
-+ bool CreateAllocationRequest(
-+ VkDeviceSize allocSize,
-+ VkDeviceSize allocAlignment,
-+ bool upperAddress,
-+ VmaSuballocationType allocType,
-+ uint32_t strategy,
-+ VmaAllocationRequest* pAllocationRequest) override;
-+
-+ VkResult CheckCorruption(const void* pBlockData) override;
-+
-+ void Alloc(
-+ const VmaAllocationRequest& request,
-+ VmaSuballocationType type,
-+ void* userData) override;
-+
-+ void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override;
-+ void* GetAllocationUserData(VmaAllocHandle allocHandle) const override;
-+ VmaAllocHandle GetAllocationListBegin() const override;
-+ VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override;
-+ void Clear() override;
-+ void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override;
-+ void DebugLogAllAllocations() const override;
-+
-+private:
-+ uint32_t m_FreeCount;
-+ VkDeviceSize m_SumFreeSize;
-+ VmaSuballocationList m_Suballocations;
-+ // Suballocations that are free. Sorted by size, ascending.
-+ VmaVector<VmaSuballocationList::iterator, VmaStlAllocator<VmaSuballocationList::iterator>> m_FreeSuballocationsBySize;
-+
-+ VkDeviceSize AlignAllocationSize(VkDeviceSize size) const { return IsVirtual() ? size : VmaAlignUp(size, (VkDeviceSize)16); }
-+
-+ VmaSuballocationList::iterator FindAtOffset(VkDeviceSize offset) const;
-+ bool ValidateFreeSuballocationList() const;
-+
-+ // Checks if requested suballocation with given parameters can be placed in given pFreeSuballocItem.
-+ // If yes, fills pOffset and returns true. If no, returns false.
-+ bool CheckAllocation(
-+ VkDeviceSize allocSize,
-+ VkDeviceSize allocAlignment,
-+ VmaSuballocationType allocType,
-+ VmaSuballocationList::const_iterator suballocItem,
-+ VmaAllocHandle* pAllocHandle) const;
-+
-+ // Given free suballocation, it merges it with following one, which must also be free.
-+ void MergeFreeWithNext(VmaSuballocationList::iterator item);
-+ // Releases given suballocation, making it free.
-+ // Merges it with adjacent free suballocations if applicable.
-+ // Returns iterator to new free suballocation at this place.
-+ VmaSuballocationList::iterator FreeSuballocation(VmaSuballocationList::iterator suballocItem);
-+ // Given free suballocation, it inserts it into sorted list of
-+ // m_FreeSuballocationsBySize if it is suitable.
-+ void RegisterFreeSuballocation(VmaSuballocationList::iterator item);
-+ // Given free suballocation, it removes it from sorted list of
-+ // m_FreeSuballocationsBySize if it is suitable.
-+ void UnregisterFreeSuballocation(VmaSuballocationList::iterator item);
-+};
-+
-+#ifndef _VMA_BLOCK_METADATA_GENERIC_FUNCTIONS
-+VmaBlockMetadata_Generic::VmaBlockMetadata_Generic(const VkAllocationCallbacks* pAllocationCallbacks,
-+ VkDeviceSize bufferImageGranularity, bool isVirtual)
-+ : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual),
-+ m_FreeCount(0),
-+ m_SumFreeSize(0),
-+ m_Suballocations(VmaStlAllocator<VmaSuballocation>(pAllocationCallbacks)),
-+ m_FreeSuballocationsBySize(VmaStlAllocator<VmaSuballocationList::iterator>(pAllocationCallbacks)) {}
-+
-+void VmaBlockMetadata_Generic::Init(VkDeviceSize size)
-+{
-+ VmaBlockMetadata::Init(size);
-+
-+ m_FreeCount = 1;
-+ m_SumFreeSize = size;
-+
-+ VmaSuballocation suballoc = {};
-+ suballoc.offset = 0;
-+ suballoc.size = size;
-+ suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
-+
-+ m_Suballocations.push_back(suballoc);
-+ m_FreeSuballocationsBySize.push_back(m_Suballocations.begin());
-+}
-+
-+bool VmaBlockMetadata_Generic::Validate() const
-+{
-+ VMA_VALIDATE(!m_Suballocations.empty());
-+
-+ // Expected offset of new suballocation as calculated from previous ones.
-+ VkDeviceSize calculatedOffset = 0;
-+ // Expected number of free suballocations as calculated from traversing their list.
-+ uint32_t calculatedFreeCount = 0;
-+ // Expected sum size of free suballocations as calculated from traversing their list.
-+ VkDeviceSize calculatedSumFreeSize = 0;
-+ // Expected number of free suballocations that should be registered in
-+ // m_FreeSuballocationsBySize calculated from traversing their list.
-+ size_t freeSuballocationsToRegister = 0;
-+ // True if previous visited suballocation was free.
-+ bool prevFree = false;
-+
-+ const VkDeviceSize debugMargin = GetDebugMargin();
-+
-+ for (const auto& subAlloc : m_Suballocations)
-+ {
-+ // Actual offset of this suballocation doesn't match expected one.
-+ VMA_VALIDATE(subAlloc.offset == calculatedOffset);
-+
-+ const bool currFree = (subAlloc.type == VMA_SUBALLOCATION_TYPE_FREE);
-+ // Two adjacent free suballocations are invalid. They should be merged.
-+ VMA_VALIDATE(!prevFree || !currFree);
-+
-+ VmaAllocation alloc = (VmaAllocation)subAlloc.userData;
-+ if (!IsVirtual())
-+ {
-+ VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));
-+ }
-+
-+ if (currFree)
-+ {
-+ calculatedSumFreeSize += subAlloc.size;
-+ ++calculatedFreeCount;
-+ ++freeSuballocationsToRegister;
-+
-+ // Margin required between allocations - every free space must be at least that large.
-+ VMA_VALIDATE(subAlloc.size >= debugMargin);
-+ }
-+ else
-+ {
-+ if (!IsVirtual())
-+ {
-+ VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == subAlloc.offset + 1);
-+ VMA_VALIDATE(alloc->GetSize() == subAlloc.size);
-+ }
-+
-+ // Margin required between allocations - previous allocation must be free.
-+ VMA_VALIDATE(debugMargin == 0 || prevFree);
-+ }
-+
-+ calculatedOffset += subAlloc.size;
-+ prevFree = currFree;
-+ }
-+
-+ // Number of free suballocations registered in m_FreeSuballocationsBySize doesn't
-+ // match expected one.
-+ VMA_VALIDATE(m_FreeSuballocationsBySize.size() == freeSuballocationsToRegister);
-+
-+ VkDeviceSize lastSize = 0;
-+ for (size_t i = 0; i < m_FreeSuballocationsBySize.size(); ++i)
-+ {
-+ VmaSuballocationList::iterator suballocItem = m_FreeSuballocationsBySize[i];
-+
-+ // Only free suballocations can be registered in m_FreeSuballocationsBySize.
-+ VMA_VALIDATE(suballocItem->type == VMA_SUBALLOCATION_TYPE_FREE);
-+ // They must be sorted by size ascending.
-+ VMA_VALIDATE(suballocItem->size >= lastSize);
-+
-+ lastSize = suballocItem->size;
-+ }
-+
-+ // Check if totals match calculated values.
-+ VMA_VALIDATE(ValidateFreeSuballocationList());
-+ VMA_VALIDATE(calculatedOffset == GetSize());
-+ VMA_VALIDATE(calculatedSumFreeSize == m_SumFreeSize);
-+ VMA_VALIDATE(calculatedFreeCount == m_FreeCount);
-+
-+ return true;
-+}
-+
-+void VmaBlockMetadata_Generic::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const
-+{
-+ const uint32_t rangeCount = (uint32_t)m_Suballocations.size();
-+ inoutStats.statistics.blockCount++;
-+ inoutStats.statistics.blockBytes += GetSize();
-+
-+ for (const auto& suballoc : m_Suballocations)
-+ {
-+ if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
-+ VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);
-+ else
-+ VmaAddDetailedStatisticsUnusedRange(inoutStats, suballoc.size);
-+ }
-+}
-+
-+void VmaBlockMetadata_Generic::AddStatistics(VmaStatistics& inoutStats) const
-+{
-+ inoutStats.blockCount++;
-+ inoutStats.allocationCount += (uint32_t)m_Suballocations.size() - m_FreeCount;
-+ inoutStats.blockBytes += GetSize();
-+ inoutStats.allocationBytes += GetSize() - m_SumFreeSize;
-+}
-+
-+#if VMA_STATS_STRING_ENABLED
-+void VmaBlockMetadata_Generic::PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const
-+{
-+ PrintDetailedMap_Begin(json,
-+ m_SumFreeSize, // unusedBytes
-+ m_Suballocations.size() - (size_t)m_FreeCount, // allocationCount
-+ m_FreeCount, // unusedRangeCount
-+ mapRefCount);
-+
-+ for (const auto& suballoc : m_Suballocations)
-+ {
-+ if (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE)
-+ {
-+ PrintDetailedMap_UnusedRange(json, suballoc.offset, suballoc.size);
-+ }
-+ else
-+ {
-+ PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);
-+ }
-+ }
-+
-+ PrintDetailedMap_End(json);
-+}
-+#endif // VMA_STATS_STRING_ENABLED
-+
-+bool VmaBlockMetadata_Generic::CreateAllocationRequest(
-+ VkDeviceSize allocSize,
-+ VkDeviceSize allocAlignment,
-+ bool upperAddress,
-+ VmaSuballocationType allocType,
-+ uint32_t strategy,
-+ VmaAllocationRequest* pAllocationRequest)
-+{
-+ VMA_ASSERT(allocSize > 0);
-+ VMA_ASSERT(!upperAddress);
-+ VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
-+ VMA_ASSERT(pAllocationRequest != VMA_NULL);
-+ VMA_HEAVY_ASSERT(Validate());
-+
-+ allocSize = AlignAllocationSize(allocSize);
-+
-+ pAllocationRequest->type = VmaAllocationRequestType::Normal;
-+ pAllocationRequest->size = allocSize;
-+
-+ const VkDeviceSize debugMargin = GetDebugMargin();
-+
-+ // There is not enough total free space in this block to fulfill the request: Early return.
-+ if (m_SumFreeSize < allocSize + debugMargin)
-+ {
-+ return false;
-+ }
-+
-+ // New algorithm, efficiently searching freeSuballocationsBySize.
-+ const size_t freeSuballocCount = m_FreeSuballocationsBySize.size();
-+ if (freeSuballocCount > 0)
-+ {
-+ if (strategy == 0 ||
-+ strategy == VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT)
-+ {
-+ // Find first free suballocation with size not less than allocSize + debugMargin.
-+ VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
-+ m_FreeSuballocationsBySize.data(),
-+ m_FreeSuballocationsBySize.data() + freeSuballocCount,
-+ allocSize + debugMargin,
-+ VmaSuballocationItemSizeLess());
-+ size_t index = it - m_FreeSuballocationsBySize.data();
-+ for (; index < freeSuballocCount; ++index)
-+ {
-+ if (CheckAllocation(
-+ allocSize,
-+ allocAlignment,
-+ allocType,
-+ m_FreeSuballocationsBySize[index],
-+ &pAllocationRequest->allocHandle))
-+ {
-+ pAllocationRequest->item = m_FreeSuballocationsBySize[index];
-+ return true;
-+ }
-+ }
-+ }
-+ else if (strategy == VMA_ALLOCATION_INTERNAL_STRATEGY_MIN_OFFSET)
-+ {
-+ for (VmaSuballocationList::iterator it = m_Suballocations.begin();
-+ it != m_Suballocations.end();
-+ ++it)
-+ {
-+ if (it->type == VMA_SUBALLOCATION_TYPE_FREE && CheckAllocation(
-+ allocSize,
-+ allocAlignment,
-+ allocType,
-+ it,
-+ &pAllocationRequest->allocHandle))
-+ {
-+ pAllocationRequest->item = it;
-+ return true;
-+ }
-+ }
-+ }
-+ else
-+ {
-+ VMA_ASSERT(strategy & (VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT | VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT ));
-+ // Search staring from biggest suballocations.
-+ for (size_t index = freeSuballocCount; index--; )
-+ {
-+ if (CheckAllocation(
-+ allocSize,
-+ allocAlignment,
-+ allocType,
-+ m_FreeSuballocationsBySize[index],
-+ &pAllocationRequest->allocHandle))
-+ {
-+ pAllocationRequest->item = m_FreeSuballocationsBySize[index];
-+ return true;
-+ }
-+ }
-+ }
-+ }
-+
-+ return false;
-+}
-+
-+VkResult VmaBlockMetadata_Generic::CheckCorruption(const void* pBlockData)
-+{
-+ for (auto& suballoc : m_Suballocations)
-+ {
-+ if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
-+ {
-+ if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
-+ {
-+ VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
-+ return VK_ERROR_UNKNOWN_COPY;
-+ }
-+ }
-+ }
-+
-+ return VK_SUCCESS;
-+}
-+
-+void VmaBlockMetadata_Generic::Alloc(
-+ const VmaAllocationRequest& request,
-+ VmaSuballocationType type,
-+ void* userData)
-+{
-+ VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
-+ VMA_ASSERT(request.item != m_Suballocations.end());
-+ VmaSuballocation& suballoc = *request.item;
-+ // Given suballocation is a free block.
-+ VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
-+
-+ // Given offset is inside this suballocation.
-+ VMA_ASSERT((VkDeviceSize)request.allocHandle - 1 >= suballoc.offset);
-+ const VkDeviceSize paddingBegin = (VkDeviceSize)request.allocHandle - suballoc.offset - 1;
-+ VMA_ASSERT(suballoc.size >= paddingBegin + request.size);
-+ const VkDeviceSize paddingEnd = suballoc.size - paddingBegin - request.size;
-+
-+ // Unregister this free suballocation from m_FreeSuballocationsBySize and update
-+ // it to become used.
-+ UnregisterFreeSuballocation(request.item);
-+
-+ suballoc.offset = (VkDeviceSize)request.allocHandle - 1;
-+ suballoc.size = request.size;
-+ suballoc.type = type;
-+ suballoc.userData = userData;
-+
-+ // If there are any free bytes remaining at the end, insert new free suballocation after current one.
-+ if (paddingEnd)
-+ {
-+ VmaSuballocation paddingSuballoc = {};
-+ paddingSuballoc.offset = suballoc.offset + suballoc.size;
-+ paddingSuballoc.size = paddingEnd;
-+ paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
-+ VmaSuballocationList::iterator next = request.item;
-+ ++next;
-+ const VmaSuballocationList::iterator paddingEndItem =
-+ m_Suballocations.insert(next, paddingSuballoc);
-+ RegisterFreeSuballocation(paddingEndItem);
-+ }
-+
-+ // If there are any free bytes remaining at the beginning, insert new free suballocation before current one.
-+ if (paddingBegin)
-+ {
-+ VmaSuballocation paddingSuballoc = {};
-+ paddingSuballoc.offset = suballoc.offset - paddingBegin;
-+ paddingSuballoc.size = paddingBegin;
-+ paddingSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
-+ const VmaSuballocationList::iterator paddingBeginItem =
-+ m_Suballocations.insert(request.item, paddingSuballoc);
-+ RegisterFreeSuballocation(paddingBeginItem);
-+ }
-+
-+ // Update totals.
-+ m_FreeCount = m_FreeCount - 1;
-+ if (paddingBegin > 0)
-+ {
-+ ++m_FreeCount;
-+ }
-+ if (paddingEnd > 0)
-+ {
-+ ++m_FreeCount;
-+ }
-+ m_SumFreeSize -= request.size;
-+}
-+
-+void VmaBlockMetadata_Generic::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo)
-+{
-+ outInfo.offset = (VkDeviceSize)allocHandle - 1;
-+ const VmaSuballocation& suballoc = *FindAtOffset(outInfo.offset);
-+ outInfo.size = suballoc.size;
-+ outInfo.pUserData = suballoc.userData;
-+}
-+
-+void* VmaBlockMetadata_Generic::GetAllocationUserData(VmaAllocHandle allocHandle) const
-+{
-+ return FindAtOffset((VkDeviceSize)allocHandle - 1)->userData;
-+}
-+
-+VmaAllocHandle VmaBlockMetadata_Generic::GetAllocationListBegin() const
-+{
-+ if (IsEmpty())
-+ return VK_NULL_HANDLE;
-+
-+ for (const auto& suballoc : m_Suballocations)
-+ {
-+ if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
-+ return (VmaAllocHandle)(suballoc.offset + 1);
-+ }
-+ VMA_ASSERT(false && "Should contain at least 1 allocation!");
-+ return VK_NULL_HANDLE;
-+}
-+
-+VmaAllocHandle VmaBlockMetadata_Generic::GetNextAllocation(VmaAllocHandle prevAlloc) const
-+{
-+ VmaSuballocationList::const_iterator prev = FindAtOffset((VkDeviceSize)prevAlloc - 1);
-+
-+ for (VmaSuballocationList::const_iterator it = ++prev; it != m_Suballocations.end(); ++it)
-+ {
-+ if (it->type != VMA_SUBALLOCATION_TYPE_FREE)
-+ return (VmaAllocHandle)(it->offset + 1);
-+ }
-+ return VK_NULL_HANDLE;
-+}
-+
-+void VmaBlockMetadata_Generic::Clear()
-+{
-+ const VkDeviceSize size = GetSize();
-+
-+ VMA_ASSERT(IsVirtual());
-+ m_FreeCount = 1;
-+ m_SumFreeSize = size;
-+ m_Suballocations.clear();
-+ m_FreeSuballocationsBySize.clear();
-+
-+ VmaSuballocation suballoc = {};
-+ suballoc.offset = 0;
-+ suballoc.size = size;
-+ suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
-+ m_Suballocations.push_back(suballoc);
-+
-+ m_FreeSuballocationsBySize.push_back(m_Suballocations.begin());
-+}
-+
-+void VmaBlockMetadata_Generic::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData)
-+{
-+ VmaSuballocation& suballoc = *FindAtOffset((VkDeviceSize)allocHandle - 1);
-+ suballoc.userData = userData;
-+}
-+
-+void VmaBlockMetadata_Generic::DebugLogAllAllocations() const
-+{
-+ for (const auto& suballoc : m_Suballocations)
-+ {
-+ if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
-+ DebugLogAllocation(suballoc.offset, suballoc.size, suballoc.userData);
-+ }
-+}
-+
-+VmaSuballocationList::iterator VmaBlockMetadata_Generic::FindAtOffset(VkDeviceSize offset) const
-+{
-+ VMA_HEAVY_ASSERT(!m_Suballocations.empty());
-+ const VkDeviceSize last = m_Suballocations.rbegin()->offset;
-+ if (last == offset)
-+ return m_Suballocations.rbegin().drop_const();
-+ const VkDeviceSize first = m_Suballocations.begin()->offset;
-+ if (first == offset)
-+ return m_Suballocations.begin().drop_const();
-+
-+ const size_t suballocCount = m_Suballocations.size();
-+ const VkDeviceSize step = (last - first + m_Suballocations.begin()->size) / suballocCount;
-+ auto findSuballocation = [&](auto begin, auto end) -> VmaSuballocationList::iterator
-+ {
-+ for (auto suballocItem = begin;
-+ suballocItem != end;
-+ ++suballocItem)
-+ {
-+ if (suballocItem->offset == offset)
-+ return suballocItem.drop_const();
-+ }
-+ VMA_ASSERT(false && "Not found!");
-+ return m_Suballocations.end().drop_const();
-+ };
-+ // If requested offset is closer to the end of range, search from the end
-+ if (offset - first > suballocCount * step / 2)
-+ {
-+ return findSuballocation(m_Suballocations.rbegin(), m_Suballocations.rend());
-+ }
-+ return findSuballocation(m_Suballocations.begin(), m_Suballocations.end());
-+}
-+
-+bool VmaBlockMetadata_Generic::ValidateFreeSuballocationList() const
-+{
-+ VkDeviceSize lastSize = 0;
-+ for (size_t i = 0, count = m_FreeSuballocationsBySize.size(); i < count; ++i)
-+ {
-+ const VmaSuballocationList::iterator it = m_FreeSuballocationsBySize[i];
-+
-+ VMA_VALIDATE(it->type == VMA_SUBALLOCATION_TYPE_FREE);
-+ VMA_VALIDATE(it->size >= lastSize);
-+ lastSize = it->size;
-+ }
-+ return true;
-+}
-+
-+bool VmaBlockMetadata_Generic::CheckAllocation(
-+ VkDeviceSize allocSize,
-+ VkDeviceSize allocAlignment,
-+ VmaSuballocationType allocType,
-+ VmaSuballocationList::const_iterator suballocItem,
-+ VmaAllocHandle* pAllocHandle) const
-+{
-+ VMA_ASSERT(allocSize > 0);
-+ VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
-+ VMA_ASSERT(suballocItem != m_Suballocations.cend());
-+ VMA_ASSERT(pAllocHandle != VMA_NULL);
-+
-+ const VkDeviceSize debugMargin = GetDebugMargin();
-+ const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity();
-+
-+ const VmaSuballocation& suballoc = *suballocItem;
-+ VMA_ASSERT(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
-+
-+ // Size of this suballocation is too small for this request: Early return.
-+ if (suballoc.size < allocSize)
-+ {
-+ return false;
-+ }
-+
-+ // Start from offset equal to beginning of this suballocation.
-+ VkDeviceSize offset = suballoc.offset + (suballocItem == m_Suballocations.cbegin() ? 0 : GetDebugMargin());
-+
-+ // Apply debugMargin from the end of previous alloc.
-+ if (debugMargin > 0)
-+ {
-+ offset += debugMargin;
-+ }
-+
-+ // Apply alignment.
-+ offset = VmaAlignUp(offset, allocAlignment);
-+
-+ // Check previous suballocations for BufferImageGranularity conflicts.
-+ // Make bigger alignment if necessary.
-+ if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment)
-+ {
-+ bool bufferImageGranularityConflict = false;
-+ VmaSuballocationList::const_iterator prevSuballocItem = suballocItem;
-+ while (prevSuballocItem != m_Suballocations.cbegin())
-+ {
-+ --prevSuballocItem;
-+ const VmaSuballocation& prevSuballoc = *prevSuballocItem;
-+ if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, offset, bufferImageGranularity))
-+ {
-+ if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
-+ {
-+ bufferImageGranularityConflict = true;
-+ break;
-+ }
-+ }
-+ else
-+ // Already on previous page.
-+ break;
-+ }
-+ if (bufferImageGranularityConflict)
-+ {
-+ offset = VmaAlignUp(offset, bufferImageGranularity);
-+ }
-+ }
-+
-+ // Calculate padding at the beginning based on current offset.
-+ const VkDeviceSize paddingBegin = offset - suballoc.offset;
-+
-+ // Fail if requested size plus margin after is bigger than size of this suballocation.
-+ if (paddingBegin + allocSize + debugMargin > suballoc.size)
-+ {
-+ return false;
-+ }
-+
-+ // Check next suballocations for BufferImageGranularity conflicts.
-+ // If conflict exists, allocation cannot be made here.
-+ if (allocSize % bufferImageGranularity || offset % bufferImageGranularity)
-+ {
-+ VmaSuballocationList::const_iterator nextSuballocItem = suballocItem;
-+ ++nextSuballocItem;
-+ while (nextSuballocItem != m_Suballocations.cend())
-+ {
-+ const VmaSuballocation& nextSuballoc = *nextSuballocItem;
-+ if (VmaBlocksOnSamePage(offset, allocSize, nextSuballoc.offset, bufferImageGranularity))
-+ {
-+ if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
-+ {
-+ return false;
-+ }
-+ }
-+ else
-+ {
-+ // Already on next page.
-+ break;
-+ }
-+ ++nextSuballocItem;
-+ }
-+ }
-+
-+ *pAllocHandle = (VmaAllocHandle)(offset + 1);
-+ // All tests passed: Success. pAllocHandle is already filled.
-+ return true;
-+}
-+
-+void VmaBlockMetadata_Generic::MergeFreeWithNext(VmaSuballocationList::iterator item)
-+{
-+ VMA_ASSERT(item != m_Suballocations.end());
-+ VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
-+
-+ VmaSuballocationList::iterator nextItem = item;
-+ ++nextItem;
-+ VMA_ASSERT(nextItem != m_Suballocations.end());
-+ VMA_ASSERT(nextItem->type == VMA_SUBALLOCATION_TYPE_FREE);
-+
-+ item->size += nextItem->size;
-+ --m_FreeCount;
-+ m_Suballocations.erase(nextItem);
-+}
-+
-+VmaSuballocationList::iterator VmaBlockMetadata_Generic::FreeSuballocation(VmaSuballocationList::iterator suballocItem)
-+{
-+ // Change this suballocation to be marked as free.
-+ VmaSuballocation& suballoc = *suballocItem;
-+ suballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
-+ suballoc.userData = VMA_NULL;
-+
-+ // Update totals.
-+ ++m_FreeCount;
-+ m_SumFreeSize += suballoc.size;
-+
-+ // Merge with previous and/or next suballocation if it's also free.
-+ bool mergeWithNext = false;
-+ bool mergeWithPrev = false;
-+
-+ VmaSuballocationList::iterator nextItem = suballocItem;
-+ ++nextItem;
-+ if ((nextItem != m_Suballocations.end()) && (nextItem->type == VMA_SUBALLOCATION_TYPE_FREE))
-+ {
-+ mergeWithNext = true;
-+ }
-+
-+ VmaSuballocationList::iterator prevItem = suballocItem;
-+ if (suballocItem != m_Suballocations.begin())
-+ {
-+ --prevItem;
-+ if (prevItem->type == VMA_SUBALLOCATION_TYPE_FREE)
-+ {
-+ mergeWithPrev = true;
-+ }
-+ }
-+
-+ if (mergeWithNext)
-+ {
-+ UnregisterFreeSuballocation(nextItem);
-+ MergeFreeWithNext(suballocItem);
-+ }
-+
-+ if (mergeWithPrev)
-+ {
-+ UnregisterFreeSuballocation(prevItem);
-+ MergeFreeWithNext(prevItem);
-+ RegisterFreeSuballocation(prevItem);
-+ return prevItem;
-+ }
-+ else
-+ {
-+ RegisterFreeSuballocation(suballocItem);
-+ return suballocItem;
-+ }
-+}
-+
-+void VmaBlockMetadata_Generic::RegisterFreeSuballocation(VmaSuballocationList::iterator item)
-+{
-+ VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
-+ VMA_ASSERT(item->size > 0);
-+
-+ // You may want to enable this validation at the beginning or at the end of
-+ // this function, depending on what do you want to check.
-+ VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
-+
-+ if (m_FreeSuballocationsBySize.empty())
-+ {
-+ m_FreeSuballocationsBySize.push_back(item);
-+ }
-+ else
-+ {
-+ VmaVectorInsertSorted<VmaSuballocationItemSizeLess>(m_FreeSuballocationsBySize, item);
-+ }
-+
-+ //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
-+}
-+
-+void VmaBlockMetadata_Generic::UnregisterFreeSuballocation(VmaSuballocationList::iterator item)
-+{
-+ VMA_ASSERT(item->type == VMA_SUBALLOCATION_TYPE_FREE);
-+ VMA_ASSERT(item->size > 0);
-+
-+ // You may want to enable this validation at the beginning or at the end of
-+ // this function, depending on what do you want to check.
-+ VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
-+
-+ VmaSuballocationList::iterator* const it = VmaBinaryFindFirstNotLess(
-+ m_FreeSuballocationsBySize.data(),
-+ m_FreeSuballocationsBySize.data() + m_FreeSuballocationsBySize.size(),
-+ item,
-+ VmaSuballocationItemSizeLess());
-+ for (size_t index = it - m_FreeSuballocationsBySize.data();
-+ index < m_FreeSuballocationsBySize.size();
-+ ++index)
-+ {
-+ if (m_FreeSuballocationsBySize[index] == item)
-+ {
-+ VmaVectorRemove(m_FreeSuballocationsBySize, index);
-+ return;
-+ }
-+ VMA_ASSERT((m_FreeSuballocationsBySize[index]->size == item->size) && "Not found.");
-+ }
-+ VMA_ASSERT(0 && "Not found.");
-+
-+ //VMA_HEAVY_ASSERT(ValidateFreeSuballocationList());
-+}
-+#endif // _VMA_BLOCK_METADATA_GENERIC_FUNCTIONS
-+#endif // _VMA_BLOCK_METADATA_GENERIC
-+#endif // #if 0
-+
-+#ifndef _VMA_BLOCK_METADATA_LINEAR
-+/*
-+Allocations and their references in internal data structure look like this:
-+
-+if(m_2ndVectorMode == SECOND_VECTOR_EMPTY):
-+
-+ 0 +-------+
-+ | |
-+ | |
-+ | |
-+ +-------+
-+ | Alloc | 1st[m_1stNullItemsBeginCount]
-+ +-------+
-+ | Alloc | 1st[m_1stNullItemsBeginCount + 1]
-+ +-------+
-+ | ... |
-+ +-------+
-+ | Alloc | 1st[1st.size() - 1]
-+ +-------+
-+ | |
-+ | |
-+ | |
-+GetSize() +-------+
-+
-+if(m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER):
-+
-+ 0 +-------+
-+ | Alloc | 2nd[0]
-+ +-------+
-+ | Alloc | 2nd[1]
-+ +-------+
-+ | ... |
-+ +-------+
-+ | Alloc | 2nd[2nd.size() - 1]
-+ +-------+
-+ | |
-+ | |
-+ | |
-+ +-------+
-+ | Alloc | 1st[m_1stNullItemsBeginCount]
-+ +-------+
-+ | Alloc | 1st[m_1stNullItemsBeginCount + 1]
-+ +-------+
-+ | ... |
-+ +-------+
-+ | Alloc | 1st[1st.size() - 1]
-+ +-------+
-+ | |
-+GetSize() +-------+
-+
-+if(m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK):
-+
-+ 0 +-------+
-+ | |
-+ | |
-+ | |
-+ +-------+
-+ | Alloc | 1st[m_1stNullItemsBeginCount]
-+ +-------+
-+ | Alloc | 1st[m_1stNullItemsBeginCount + 1]
-+ +-------+
-+ | ... |
-+ +-------+
-+ | Alloc | 1st[1st.size() - 1]
-+ +-------+
-+ | |
-+ | |
-+ | |
-+ +-------+
-+ | Alloc | 2nd[2nd.size() - 1]
-+ +-------+
-+ | ... |
-+ +-------+
-+ | Alloc | 2nd[1]
-+ +-------+
-+ | Alloc | 2nd[0]
-+GetSize() +-------+
-+
-+*/
-+class VmaBlockMetadata_Linear : public VmaBlockMetadata
-+{
-+ VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_Linear)
-+public:
-+ VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks,
-+ VkDeviceSize bufferImageGranularity, bool isVirtual);
-+ virtual ~VmaBlockMetadata_Linear() = default;
-+
-+ VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize; }
-+ bool IsEmpty() const override { return GetAllocationCount() == 0; }
-+ VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; }
-+
-+ void Init(VkDeviceSize size) override;
-+ bool Validate() const override;
-+ size_t GetAllocationCount() const override;
-+ size_t GetFreeRegionsCount() const override;
-+
-+ void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override;
-+ void AddStatistics(VmaStatistics& inoutStats) const override;
-+
-+#if VMA_STATS_STRING_ENABLED
-+ void PrintDetailedMap(class VmaJsonWriter& json) const override;
-+#endif
-+
-+ bool CreateAllocationRequest(
-+ VkDeviceSize allocSize,
-+ VkDeviceSize allocAlignment,
-+ bool upperAddress,
-+ VmaSuballocationType allocType,
-+ uint32_t strategy,
-+ VmaAllocationRequest* pAllocationRequest) override;
-+
-+ VkResult CheckCorruption(const void* pBlockData) override;
-+
-+ void Alloc(
-+ const VmaAllocationRequest& request,
-+ VmaSuballocationType type,
-+ void* userData) override;
-+
-+ void Free(VmaAllocHandle allocHandle) override;
-+ void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override;
-+ void* GetAllocationUserData(VmaAllocHandle allocHandle) const override;
-+ VmaAllocHandle GetAllocationListBegin() const override;
-+ VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override;
-+ VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override;
-+ void Clear() override;
-+ void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override;
-+ void DebugLogAllAllocations() const override;
-+
-+private:
-+ /*
-+ There are two suballocation vectors, used in ping-pong way.
-+ The one with index m_1stVectorIndex is called 1st.
-+ The one with index (m_1stVectorIndex ^ 1) is called 2nd.
-+ 2nd can be non-empty only when 1st is not empty.
-+ When 2nd is not empty, m_2ndVectorMode indicates its mode of operation.
-+ */
-+ typedef VmaVector<VmaSuballocation, VmaStlAllocator<VmaSuballocation>> SuballocationVectorType;
-+
-+ enum SECOND_VECTOR_MODE
-+ {
-+ SECOND_VECTOR_EMPTY,
-+ /*
-+ Suballocations in 2nd vector are created later than the ones in 1st, but they
-+ all have smaller offset.
-+ */
-+ SECOND_VECTOR_RING_BUFFER,
-+ /*
-+ Suballocations in 2nd vector are upper side of double stack.
-+ They all have offsets higher than those in 1st vector.
-+ Top of this stack means smaller offsets, but higher indices in this vector.
-+ */
-+ SECOND_VECTOR_DOUBLE_STACK,
-+ };
-+
-+ VkDeviceSize m_SumFreeSize;
-+ SuballocationVectorType m_Suballocations0, m_Suballocations1;
-+ uint32_t m_1stVectorIndex;
-+ SECOND_VECTOR_MODE m_2ndVectorMode;
-+ // Number of items in 1st vector with hAllocation = null at the beginning.
-+ size_t m_1stNullItemsBeginCount;
-+ // Number of other items in 1st vector with hAllocation = null somewhere in the middle.
-+ size_t m_1stNullItemsMiddleCount;
-+ // Number of items in 2nd vector with hAllocation = null.
-+ size_t m_2ndNullItemsCount;
-+
-+ SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
-+ SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
-+ const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; }
-+ const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; }
-+
-+ VmaSuballocation& FindSuballocation(VkDeviceSize offset) const;
-+ bool ShouldCompact1st() const;
-+ void CleanupAfterFree();
-+
-+ bool CreateAllocationRequest_LowerAddress(
-+ VkDeviceSize allocSize,
-+ VkDeviceSize allocAlignment,
-+ VmaSuballocationType allocType,
-+ uint32_t strategy,
-+ VmaAllocationRequest* pAllocationRequest);
-+ bool CreateAllocationRequest_UpperAddress(
-+ VkDeviceSize allocSize,
-+ VkDeviceSize allocAlignment,
-+ VmaSuballocationType allocType,
-+ uint32_t strategy,
-+ VmaAllocationRequest* pAllocationRequest);
-+};
-+
-+#ifndef _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS
-+VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks,
-+ VkDeviceSize bufferImageGranularity, bool isVirtual)
-+ : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual),
-+ m_SumFreeSize(0),
-+ m_Suballocations0(VmaStlAllocator<VmaSuballocation>(pAllocationCallbacks)),
-+ m_Suballocations1(VmaStlAllocator<VmaSuballocation>(pAllocationCallbacks)),
-+ m_1stVectorIndex(0),
-+ m_2ndVectorMode(SECOND_VECTOR_EMPTY),
-+ m_1stNullItemsBeginCount(0),
-+ m_1stNullItemsMiddleCount(0),
-+ m_2ndNullItemsCount(0) {}
-+
-+void VmaBlockMetadata_Linear::Init(VkDeviceSize size)
-+{
-+ VmaBlockMetadata::Init(size);
-+ m_SumFreeSize = size;
-+}
-+
-+bool VmaBlockMetadata_Linear::Validate() const
-+{
-+ const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-+ const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-+
-+ VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY));
-+ VMA_VALIDATE(!suballocations1st.empty() ||
-+ suballocations2nd.empty() ||
-+ m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER);
-+
-+ if (!suballocations1st.empty())
-+ {
-+ // Null item at the beginning should be accounted into m_1stNullItemsBeginCount.
-+ VMA_VALIDATE(suballocations1st[m_1stNullItemsBeginCount].type != VMA_SUBALLOCATION_TYPE_FREE);
-+ // Null item at the end should be just pop_back().
-+ VMA_VALIDATE(suballocations1st.back().type != VMA_SUBALLOCATION_TYPE_FREE);
-+ }
-+ if (!suballocations2nd.empty())
-+ {
-+ // Null item at the end should be just pop_back().
-+ VMA_VALIDATE(suballocations2nd.back().type != VMA_SUBALLOCATION_TYPE_FREE);
-+ }
-+
-+ VMA_VALIDATE(m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount <= suballocations1st.size());
-+ VMA_VALIDATE(m_2ndNullItemsCount <= suballocations2nd.size());
-+
-+ VkDeviceSize sumUsedSize = 0;
-+ const size_t suballoc1stCount = suballocations1st.size();
-+ const VkDeviceSize debugMargin = GetDebugMargin();
-+ VkDeviceSize offset = 0;
-+
-+ if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-+ {
-+ const size_t suballoc2ndCount = suballocations2nd.size();
-+ size_t nullItem2ndCount = 0;
-+ for (size_t i = 0; i < suballoc2ndCount; ++i)
-+ {
-+ const VmaSuballocation& suballoc = suballocations2nd[i];
-+ const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
-+
-+ VmaAllocation const alloc = (VmaAllocation)suballoc.userData;
-+ if (!IsVirtual())
-+ {
-+ VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));
-+ }
-+ VMA_VALIDATE(suballoc.offset >= offset);
-+
-+ if (!currFree)
-+ {
-+ if (!IsVirtual())
-+ {
-+ VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1);
-+ VMA_VALIDATE(alloc->GetSize() == suballoc.size);
-+ }
-+ sumUsedSize += suballoc.size;
-+ }
-+ else
-+ {
-+ ++nullItem2ndCount;
-+ }
-+
-+ offset = suballoc.offset + suballoc.size + debugMargin;
-+ }
-+
-+ VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
-+ }
-+
-+ for (size_t i = 0; i < m_1stNullItemsBeginCount; ++i)
-+ {
-+ const VmaSuballocation& suballoc = suballocations1st[i];
-+ VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE &&
-+ suballoc.userData == VMA_NULL);
-+ }
-+
-+ size_t nullItem1stCount = m_1stNullItemsBeginCount;
-+
-+ for (size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i)
-+ {
-+ const VmaSuballocation& suballoc = suballocations1st[i];
-+ const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
-+
-+ VmaAllocation const alloc = (VmaAllocation)suballoc.userData;
-+ if (!IsVirtual())
-+ {
-+ VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));
-+ }
-+ VMA_VALIDATE(suballoc.offset >= offset);
-+ VMA_VALIDATE(i >= m_1stNullItemsBeginCount || currFree);
-+
-+ if (!currFree)
-+ {
-+ if (!IsVirtual())
-+ {
-+ VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1);
-+ VMA_VALIDATE(alloc->GetSize() == suballoc.size);
-+ }
-+ sumUsedSize += suballoc.size;
-+ }
-+ else
-+ {
-+ ++nullItem1stCount;
-+ }
-+
-+ offset = suballoc.offset + suballoc.size + debugMargin;
-+ }
-+ VMA_VALIDATE(nullItem1stCount == m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount);
-+
-+ if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
-+ {
-+ const size_t suballoc2ndCount = suballocations2nd.size();
-+ size_t nullItem2ndCount = 0;
-+ for (size_t i = suballoc2ndCount; i--; )
-+ {
-+ const VmaSuballocation& suballoc = suballocations2nd[i];
-+ const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE);
-+
-+ VmaAllocation const alloc = (VmaAllocation)suballoc.userData;
-+ if (!IsVirtual())
-+ {
-+ VMA_VALIDATE(currFree == (alloc == VK_NULL_HANDLE));
-+ }
-+ VMA_VALIDATE(suballoc.offset >= offset);
-+
-+ if (!currFree)
-+ {
-+ if (!IsVirtual())
-+ {
-+ VMA_VALIDATE((VkDeviceSize)alloc->GetAllocHandle() == suballoc.offset + 1);
-+ VMA_VALIDATE(alloc->GetSize() == suballoc.size);
-+ }
-+ sumUsedSize += suballoc.size;
-+ }
-+ else
-+ {
-+ ++nullItem2ndCount;
-+ }
-+
-+ offset = suballoc.offset + suballoc.size + debugMargin;
-+ }
-+
-+ VMA_VALIDATE(nullItem2ndCount == m_2ndNullItemsCount);
-+ }
-+
-+ VMA_VALIDATE(offset <= GetSize());
-+ VMA_VALIDATE(m_SumFreeSize == GetSize() - sumUsedSize);
-+
-+ return true;
-+}
-+
-+size_t VmaBlockMetadata_Linear::GetAllocationCount() const
-+{
-+ return AccessSuballocations1st().size() - m_1stNullItemsBeginCount - m_1stNullItemsMiddleCount +
-+ AccessSuballocations2nd().size() - m_2ndNullItemsCount;
-+}
-+
-+size_t VmaBlockMetadata_Linear::GetFreeRegionsCount() const
-+{
-+ // Function only used for defragmentation, which is disabled for this algorithm
-+ VMA_ASSERT(0);
-+ return SIZE_MAX;
-+}
-+
-+void VmaBlockMetadata_Linear::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const
-+{
-+ const VkDeviceSize size = GetSize();
-+ const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-+ const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-+ const size_t suballoc1stCount = suballocations1st.size();
-+ const size_t suballoc2ndCount = suballocations2nd.size();
-+
-+ inoutStats.statistics.blockCount++;
-+ inoutStats.statistics.blockBytes += size;
-+
-+ VkDeviceSize lastOffset = 0;
-+
-+ if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-+ {
-+ const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
-+ size_t nextAlloc2ndIndex = 0;
-+ while (lastOffset < freeSpace2ndTo1stEnd)
-+ {
-+ // Find next non-null allocation or move nextAllocIndex to the end.
-+ while (nextAlloc2ndIndex < suballoc2ndCount &&
-+ suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
-+ {
-+ ++nextAlloc2ndIndex;
-+ }
-+
-+ // Found non-null allocation.
-+ if (nextAlloc2ndIndex < suballoc2ndCount)
-+ {
-+ const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
-+
-+ // 1. Process free space before this allocation.
-+ if (lastOffset < suballoc.offset)
-+ {
-+ // There is free space from lastOffset to suballoc.offset.
-+ const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
-+ VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
-+ }
-+
-+ // 2. Process this allocation.
-+ // There is allocation with suballoc.offset, suballoc.size.
-+ VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);
-+
-+ // 3. Prepare for next iteration.
-+ lastOffset = suballoc.offset + suballoc.size;
-+ ++nextAlloc2ndIndex;
-+ }
-+ // We are at the end.
-+ else
-+ {
-+ // There is free space from lastOffset to freeSpace2ndTo1stEnd.
-+ if (lastOffset < freeSpace2ndTo1stEnd)
-+ {
-+ const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
-+ VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
-+ }
-+
-+ // End of loop.
-+ lastOffset = freeSpace2ndTo1stEnd;
-+ }
-+ }
-+ }
-+
-+ size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
-+ const VkDeviceSize freeSpace1stTo2ndEnd =
-+ m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
-+ while (lastOffset < freeSpace1stTo2ndEnd)
-+ {
-+ // Find next non-null allocation or move nextAllocIndex to the end.
-+ while (nextAlloc1stIndex < suballoc1stCount &&
-+ suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)
-+ {
-+ ++nextAlloc1stIndex;
-+ }
-+
-+ // Found non-null allocation.
-+ if (nextAlloc1stIndex < suballoc1stCount)
-+ {
-+ const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
-+
-+ // 1. Process free space before this allocation.
-+ if (lastOffset < suballoc.offset)
-+ {
-+ // There is free space from lastOffset to suballoc.offset.
-+ const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
-+ VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
-+ }
-+
-+ // 2. Process this allocation.
-+ // There is allocation with suballoc.offset, suballoc.size.
-+ VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);
-+
-+ // 3. Prepare for next iteration.
-+ lastOffset = suballoc.offset + suballoc.size;
-+ ++nextAlloc1stIndex;
-+ }
-+ // We are at the end.
-+ else
-+ {
-+ // There is free space from lastOffset to freeSpace1stTo2ndEnd.
-+ if (lastOffset < freeSpace1stTo2ndEnd)
-+ {
-+ const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
-+ VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
-+ }
-+
-+ // End of loop.
-+ lastOffset = freeSpace1stTo2ndEnd;
-+ }
-+ }
-+
-+ if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
-+ {
-+ size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
-+ while (lastOffset < size)
-+ {
-+ // Find next non-null allocation or move nextAllocIndex to the end.
-+ while (nextAlloc2ndIndex != SIZE_MAX &&
-+ suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
-+ {
-+ --nextAlloc2ndIndex;
-+ }
-+
-+ // Found non-null allocation.
-+ if (nextAlloc2ndIndex != SIZE_MAX)
-+ {
-+ const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
-+
-+ // 1. Process free space before this allocation.
-+ if (lastOffset < suballoc.offset)
-+ {
-+ // There is free space from lastOffset to suballoc.offset.
-+ const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
-+ VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
-+ }
-+
-+ // 2. Process this allocation.
-+ // There is allocation with suballoc.offset, suballoc.size.
-+ VmaAddDetailedStatisticsAllocation(inoutStats, suballoc.size);
-+
-+ // 3. Prepare for next iteration.
-+ lastOffset = suballoc.offset + suballoc.size;
-+ --nextAlloc2ndIndex;
-+ }
-+ // We are at the end.
-+ else
-+ {
-+ // There is free space from lastOffset to size.
-+ if (lastOffset < size)
-+ {
-+ const VkDeviceSize unusedRangeSize = size - lastOffset;
-+ VmaAddDetailedStatisticsUnusedRange(inoutStats, unusedRangeSize);
-+ }
-+
-+ // End of loop.
-+ lastOffset = size;
-+ }
-+ }
-+ }
-+}
-+
-+void VmaBlockMetadata_Linear::AddStatistics(VmaStatistics& inoutStats) const
-+{
-+ const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-+ const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-+ const VkDeviceSize size = GetSize();
-+ const size_t suballoc1stCount = suballocations1st.size();
-+ const size_t suballoc2ndCount = suballocations2nd.size();
-+
-+ inoutStats.blockCount++;
-+ inoutStats.blockBytes += size;
-+ inoutStats.allocationBytes += size - m_SumFreeSize;
-+
-+ VkDeviceSize lastOffset = 0;
-+
-+ if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-+ {
-+ const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
-+ size_t nextAlloc2ndIndex = m_1stNullItemsBeginCount;
-+ while (lastOffset < freeSpace2ndTo1stEnd)
-+ {
-+ // Find next non-null allocation or move nextAlloc2ndIndex to the end.
-+ while (nextAlloc2ndIndex < suballoc2ndCount &&
-+ suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
-+ {
-+ ++nextAlloc2ndIndex;
-+ }
-+
-+ // Found non-null allocation.
-+ if (nextAlloc2ndIndex < suballoc2ndCount)
-+ {
-+ const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
-+
-+ // Process this allocation.
-+ // There is allocation with suballoc.offset, suballoc.size.
-+ ++inoutStats.allocationCount;
-+
-+ // Prepare for next iteration.
-+ lastOffset = suballoc.offset + suballoc.size;
-+ ++nextAlloc2ndIndex;
-+ }
-+ // We are at the end.
-+ else
-+ {
-+ // End of loop.
-+ lastOffset = freeSpace2ndTo1stEnd;
-+ }
-+ }
-+ }
-+
-+ size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
-+ const VkDeviceSize freeSpace1stTo2ndEnd =
-+ m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
-+ while (lastOffset < freeSpace1stTo2ndEnd)
-+ {
-+ // Find next non-null allocation or move nextAllocIndex to the end.
-+ while (nextAlloc1stIndex < suballoc1stCount &&
-+ suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)
-+ {
-+ ++nextAlloc1stIndex;
-+ }
-+
-+ // Found non-null allocation.
-+ if (nextAlloc1stIndex < suballoc1stCount)
-+ {
-+ const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
-+
-+ // Process this allocation.
-+ // There is allocation with suballoc.offset, suballoc.size.
-+ ++inoutStats.allocationCount;
-+
-+ // Prepare for next iteration.
-+ lastOffset = suballoc.offset + suballoc.size;
-+ ++nextAlloc1stIndex;
-+ }
-+ // We are at the end.
-+ else
-+ {
-+ // End of loop.
-+ lastOffset = freeSpace1stTo2ndEnd;
-+ }
-+ }
-+
-+ if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
-+ {
-+ size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
-+ while (lastOffset < size)
-+ {
-+ // Find next non-null allocation or move nextAlloc2ndIndex to the end.
-+ while (nextAlloc2ndIndex != SIZE_MAX &&
-+ suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
-+ {
-+ --nextAlloc2ndIndex;
-+ }
-+
-+ // Found non-null allocation.
-+ if (nextAlloc2ndIndex != SIZE_MAX)
-+ {
-+ const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
-+
-+ // Process this allocation.
-+ // There is allocation with suballoc.offset, suballoc.size.
-+ ++inoutStats.allocationCount;
-+
-+ // Prepare for next iteration.
-+ lastOffset = suballoc.offset + suballoc.size;
-+ --nextAlloc2ndIndex;
-+ }
-+ // We are at the end.
-+ else
-+ {
-+ // End of loop.
-+ lastOffset = size;
-+ }
-+ }
-+ }
-+}
-+
-+#if VMA_STATS_STRING_ENABLED
-+void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const
-+{
-+ const VkDeviceSize size = GetSize();
-+ const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-+ const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-+ const size_t suballoc1stCount = suballocations1st.size();
-+ const size_t suballoc2ndCount = suballocations2nd.size();
-+
-+ // FIRST PASS
-+
-+ size_t unusedRangeCount = 0;
-+ VkDeviceSize usedBytes = 0;
-+
-+ VkDeviceSize lastOffset = 0;
-+
-+ size_t alloc2ndCount = 0;
-+ if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-+ {
-+ const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
-+ size_t nextAlloc2ndIndex = 0;
-+ while (lastOffset < freeSpace2ndTo1stEnd)
-+ {
-+ // Find next non-null allocation or move nextAlloc2ndIndex to the end.
-+ while (nextAlloc2ndIndex < suballoc2ndCount &&
-+ suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
-+ {
-+ ++nextAlloc2ndIndex;
-+ }
-+
-+ // Found non-null allocation.
-+ if (nextAlloc2ndIndex < suballoc2ndCount)
-+ {
-+ const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
-+
-+ // 1. Process free space before this allocation.
-+ if (lastOffset < suballoc.offset)
-+ {
-+ // There is free space from lastOffset to suballoc.offset.
-+ ++unusedRangeCount;
-+ }
-+
-+ // 2. Process this allocation.
-+ // There is allocation with suballoc.offset, suballoc.size.
-+ ++alloc2ndCount;
-+ usedBytes += suballoc.size;
-+
-+ // 3. Prepare for next iteration.
-+ lastOffset = suballoc.offset + suballoc.size;
-+ ++nextAlloc2ndIndex;
-+ }
-+ // We are at the end.
-+ else
-+ {
-+ if (lastOffset < freeSpace2ndTo1stEnd)
-+ {
-+ // There is free space from lastOffset to freeSpace2ndTo1stEnd.
-+ ++unusedRangeCount;
-+ }
-+
-+ // End of loop.
-+ lastOffset = freeSpace2ndTo1stEnd;
-+ }
-+ }
-+ }
-+
-+ size_t nextAlloc1stIndex = m_1stNullItemsBeginCount;
-+ size_t alloc1stCount = 0;
-+ const VkDeviceSize freeSpace1stTo2ndEnd =
-+ m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : size;
-+ while (lastOffset < freeSpace1stTo2ndEnd)
-+ {
-+ // Find next non-null allocation or move nextAllocIndex to the end.
-+ while (nextAlloc1stIndex < suballoc1stCount &&
-+ suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)
-+ {
-+ ++nextAlloc1stIndex;
-+ }
-+
-+ // Found non-null allocation.
-+ if (nextAlloc1stIndex < suballoc1stCount)
-+ {
-+ const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
-+
-+ // 1. Process free space before this allocation.
-+ if (lastOffset < suballoc.offset)
-+ {
-+ // There is free space from lastOffset to suballoc.offset.
-+ ++unusedRangeCount;
-+ }
-+
-+ // 2. Process this allocation.
-+ // There is allocation with suballoc.offset, suballoc.size.
-+ ++alloc1stCount;
-+ usedBytes += suballoc.size;
-+
-+ // 3. Prepare for next iteration.
-+ lastOffset = suballoc.offset + suballoc.size;
-+ ++nextAlloc1stIndex;
-+ }
-+ // We are at the end.
-+ else
-+ {
-+ if (lastOffset < size)
-+ {
-+ // There is free space from lastOffset to freeSpace1stTo2ndEnd.
-+ ++unusedRangeCount;
-+ }
-+
-+ // End of loop.
-+ lastOffset = freeSpace1stTo2ndEnd;
-+ }
-+ }
-+
-+ if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
-+ {
-+ size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
-+ while (lastOffset < size)
-+ {
-+ // Find next non-null allocation or move nextAlloc2ndIndex to the end.
-+ while (nextAlloc2ndIndex != SIZE_MAX &&
-+ suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
-+ {
-+ --nextAlloc2ndIndex;
-+ }
-+
-+ // Found non-null allocation.
-+ if (nextAlloc2ndIndex != SIZE_MAX)
-+ {
-+ const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
-+
-+ // 1. Process free space before this allocation.
-+ if (lastOffset < suballoc.offset)
-+ {
-+ // There is free space from lastOffset to suballoc.offset.
-+ ++unusedRangeCount;
-+ }
-+
-+ // 2. Process this allocation.
-+ // There is allocation with suballoc.offset, suballoc.size.
-+ ++alloc2ndCount;
-+ usedBytes += suballoc.size;
-+
-+ // 3. Prepare for next iteration.
-+ lastOffset = suballoc.offset + suballoc.size;
-+ --nextAlloc2ndIndex;
-+ }
-+ // We are at the end.
-+ else
-+ {
-+ if (lastOffset < size)
-+ {
-+ // There is free space from lastOffset to size.
-+ ++unusedRangeCount;
-+ }
-+
-+ // End of loop.
-+ lastOffset = size;
-+ }
-+ }
-+ }
-+
-+ const VkDeviceSize unusedBytes = size - usedBytes;
-+ PrintDetailedMap_Begin(json, unusedBytes, alloc1stCount + alloc2ndCount, unusedRangeCount);
-+
-+ // SECOND PASS
-+ lastOffset = 0;
-+
-+ if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-+ {
-+ const VkDeviceSize freeSpace2ndTo1stEnd = suballocations1st[m_1stNullItemsBeginCount].offset;
-+ size_t nextAlloc2ndIndex = 0;
-+ while (lastOffset < freeSpace2ndTo1stEnd)
-+ {
-+ // Find next non-null allocation or move nextAlloc2ndIndex to the end.
-+ while (nextAlloc2ndIndex < suballoc2ndCount &&
-+ suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
-+ {
-+ ++nextAlloc2ndIndex;
-+ }
-+
-+ // Found non-null allocation.
-+ if (nextAlloc2ndIndex < suballoc2ndCount)
-+ {
-+ const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
-+
-+ // 1. Process free space before this allocation.
-+ if (lastOffset < suballoc.offset)
-+ {
-+ // There is free space from lastOffset to suballoc.offset.
-+ const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
-+ PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
-+ }
-+
-+ // 2. Process this allocation.
-+ // There is allocation with suballoc.offset, suballoc.size.
-+ PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);
-+
-+ // 3. Prepare for next iteration.
-+ lastOffset = suballoc.offset + suballoc.size;
-+ ++nextAlloc2ndIndex;
-+ }
-+ // We are at the end.
-+ else
-+ {
-+ if (lastOffset < freeSpace2ndTo1stEnd)
-+ {
-+ // There is free space from lastOffset to freeSpace2ndTo1stEnd.
-+ const VkDeviceSize unusedRangeSize = freeSpace2ndTo1stEnd - lastOffset;
-+ PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
-+ }
-+
-+ // End of loop.
-+ lastOffset = freeSpace2ndTo1stEnd;
-+ }
-+ }
-+ }
-+
-+ nextAlloc1stIndex = m_1stNullItemsBeginCount;
-+ while (lastOffset < freeSpace1stTo2ndEnd)
-+ {
-+ // Find next non-null allocation or move nextAllocIndex to the end.
-+ while (nextAlloc1stIndex < suballoc1stCount &&
-+ suballocations1st[nextAlloc1stIndex].userData == VMA_NULL)
-+ {
-+ ++nextAlloc1stIndex;
-+ }
-+
-+ // Found non-null allocation.
-+ if (nextAlloc1stIndex < suballoc1stCount)
-+ {
-+ const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex];
-+
-+ // 1. Process free space before this allocation.
-+ if (lastOffset < suballoc.offset)
-+ {
-+ // There is free space from lastOffset to suballoc.offset.
-+ const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
-+ PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
-+ }
-+
-+ // 2. Process this allocation.
-+ // There is allocation with suballoc.offset, suballoc.size.
-+ PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);
-+
-+ // 3. Prepare for next iteration.
-+ lastOffset = suballoc.offset + suballoc.size;
-+ ++nextAlloc1stIndex;
-+ }
-+ // We are at the end.
-+ else
-+ {
-+ if (lastOffset < freeSpace1stTo2ndEnd)
-+ {
-+ // There is free space from lastOffset to freeSpace1stTo2ndEnd.
-+ const VkDeviceSize unusedRangeSize = freeSpace1stTo2ndEnd - lastOffset;
-+ PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
-+ }
-+
-+ // End of loop.
-+ lastOffset = freeSpace1stTo2ndEnd;
-+ }
-+ }
-+
-+ if (m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
-+ {
-+ size_t nextAlloc2ndIndex = suballocations2nd.size() - 1;
-+ while (lastOffset < size)
-+ {
-+ // Find next non-null allocation or move nextAlloc2ndIndex to the end.
-+ while (nextAlloc2ndIndex != SIZE_MAX &&
-+ suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL)
-+ {
-+ --nextAlloc2ndIndex;
-+ }
-+
-+ // Found non-null allocation.
-+ if (nextAlloc2ndIndex != SIZE_MAX)
-+ {
-+ const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex];
-+
-+ // 1. Process free space before this allocation.
-+ if (lastOffset < suballoc.offset)
-+ {
-+ // There is free space from lastOffset to suballoc.offset.
-+ const VkDeviceSize unusedRangeSize = suballoc.offset - lastOffset;
-+ PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
-+ }
-+
-+ // 2. Process this allocation.
-+ // There is allocation with suballoc.offset, suballoc.size.
-+ PrintDetailedMap_Allocation(json, suballoc.offset, suballoc.size, suballoc.userData);
-+
-+ // 3. Prepare for next iteration.
-+ lastOffset = suballoc.offset + suballoc.size;
-+ --nextAlloc2ndIndex;
-+ }
-+ // We are at the end.
-+ else
-+ {
-+ if (lastOffset < size)
-+ {
-+ // There is free space from lastOffset to size.
-+ const VkDeviceSize unusedRangeSize = size - lastOffset;
-+ PrintDetailedMap_UnusedRange(json, lastOffset, unusedRangeSize);
-+ }
-+
-+ // End of loop.
-+ lastOffset = size;
-+ }
-+ }
-+ }
-+
-+ PrintDetailedMap_End(json);
-+}
-+#endif // VMA_STATS_STRING_ENABLED
-+
-+bool VmaBlockMetadata_Linear::CreateAllocationRequest(
-+ VkDeviceSize allocSize,
-+ VkDeviceSize allocAlignment,
-+ bool upperAddress,
-+ VmaSuballocationType allocType,
-+ uint32_t strategy,
-+ VmaAllocationRequest* pAllocationRequest)
-+{
-+ VMA_ASSERT(allocSize > 0);
-+ VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE);
-+ VMA_ASSERT(pAllocationRequest != VMA_NULL);
-+ VMA_HEAVY_ASSERT(Validate());
-+ pAllocationRequest->size = allocSize;
-+ return upperAddress ?
-+ CreateAllocationRequest_UpperAddress(
-+ allocSize, allocAlignment, allocType, strategy, pAllocationRequest) :
-+ CreateAllocationRequest_LowerAddress(
-+ allocSize, allocAlignment, allocType, strategy, pAllocationRequest);
-+}
-+
-+VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData)
-+{
-+ VMA_ASSERT(!IsVirtual());
-+ SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-+ for (size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i)
-+ {
-+ const VmaSuballocation& suballoc = suballocations1st[i];
-+ if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
-+ {
-+ if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
-+ {
-+ VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
-+ return VK_ERROR_UNKNOWN_COPY;
-+ }
-+ }
-+ }
-+
-+ SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-+ for (size_t i = 0, count = suballocations2nd.size(); i < count; ++i)
-+ {
-+ const VmaSuballocation& suballoc = suballocations2nd[i];
-+ if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE)
-+ {
-+ if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size))
-+ {
-+ VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
-+ return VK_ERROR_UNKNOWN_COPY;
-+ }
-+ }
-+ }
-+
-+ return VK_SUCCESS;
-+}
-+
-+void VmaBlockMetadata_Linear::Alloc(
-+ const VmaAllocationRequest& request,
-+ VmaSuballocationType type,
-+ void* userData)
-+{
-+ const VkDeviceSize offset = (VkDeviceSize)request.allocHandle - 1;
-+ const VmaSuballocation newSuballoc = { offset, request.size, userData, type };
-+
-+ switch (request.type)
-+ {
-+ case VmaAllocationRequestType::UpperAddress:
-+ {
-+ VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER &&
-+ "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer.");
-+ SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-+ suballocations2nd.push_back(newSuballoc);
-+ m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK;
-+ }
-+ break;
-+ case VmaAllocationRequestType::EndOf1st:
-+ {
-+ SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-+
-+ VMA_ASSERT(suballocations1st.empty() ||
-+ offset >= suballocations1st.back().offset + suballocations1st.back().size);
-+ // Check if it fits before the end of the block.
-+ VMA_ASSERT(offset + request.size <= GetSize());
-+
-+ suballocations1st.push_back(newSuballoc);
-+ }
-+ break;
-+ case VmaAllocationRequestType::EndOf2nd:
-+ {
-+ SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-+ // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector.
-+ VMA_ASSERT(!suballocations1st.empty() &&
-+ offset + request.size <= suballocations1st[m_1stNullItemsBeginCount].offset);
-+ SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-+
-+ switch (m_2ndVectorMode)
-+ {
-+ case SECOND_VECTOR_EMPTY:
-+ // First allocation from second part ring buffer.
-+ VMA_ASSERT(suballocations2nd.empty());
-+ m_2ndVectorMode = SECOND_VECTOR_RING_BUFFER;
-+ break;
-+ case SECOND_VECTOR_RING_BUFFER:
-+ // 2-part ring buffer is already started.
-+ VMA_ASSERT(!suballocations2nd.empty());
-+ break;
-+ case SECOND_VECTOR_DOUBLE_STACK:
-+ VMA_ASSERT(0 && "CRITICAL ERROR: Trying to use linear allocator as ring buffer while it was already used as double stack.");
-+ break;
-+ default:
-+ VMA_ASSERT(0);
-+ }
-+
-+ suballocations2nd.push_back(newSuballoc);
-+ }
-+ break;
-+ default:
-+ VMA_ASSERT(0 && "CRITICAL INTERNAL ERROR.");
-+ }
-+
-+ m_SumFreeSize -= newSuballoc.size;
-+}
-+
-+void VmaBlockMetadata_Linear::Free(VmaAllocHandle allocHandle)
-+{
-+ SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-+ SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-+ VkDeviceSize offset = (VkDeviceSize)allocHandle - 1;
-+
-+ if (!suballocations1st.empty())
-+ {
-+ // First allocation: Mark it as next empty at the beginning.
-+ VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount];
-+ if (firstSuballoc.offset == offset)
-+ {
-+ firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE;
-+ firstSuballoc.userData = VMA_NULL;
-+ m_SumFreeSize += firstSuballoc.size;
-+ ++m_1stNullItemsBeginCount;
-+ CleanupAfterFree();
-+ return;
-+ }
-+ }
-+
-+ // Last allocation in 2-part ring buffer or top of upper stack (same logic).
-+ if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ||
-+ m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
-+ {
-+ VmaSuballocation& lastSuballoc = suballocations2nd.back();
-+ if (lastSuballoc.offset == offset)
-+ {
-+ m_SumFreeSize += lastSuballoc.size;
-+ suballocations2nd.pop_back();
-+ CleanupAfterFree();
-+ return;
-+ }
-+ }
-+ // Last allocation in 1st vector.
-+ else if (m_2ndVectorMode == SECOND_VECTOR_EMPTY)
-+ {
-+ VmaSuballocation& lastSuballoc = suballocations1st.back();
-+ if (lastSuballoc.offset == offset)
-+ {
-+ m_SumFreeSize += lastSuballoc.size;
-+ suballocations1st.pop_back();
-+ CleanupAfterFree();
-+ return;
-+ }
-+ }
-+
-+ VmaSuballocation refSuballoc;
-+ refSuballoc.offset = offset;
-+ // Rest of members stays uninitialized intentionally for better performance.
-+
-+ // Item from the middle of 1st vector.
-+ {
-+ const SuballocationVectorType::iterator it = VmaBinaryFindSorted(
-+ suballocations1st.begin() + m_1stNullItemsBeginCount,
-+ suballocations1st.end(),
-+ refSuballoc,
-+ VmaSuballocationOffsetLess());
-+ if (it != suballocations1st.end())
-+ {
-+ it->type = VMA_SUBALLOCATION_TYPE_FREE;
-+ it->userData = VMA_NULL;
-+ ++m_1stNullItemsMiddleCount;
-+ m_SumFreeSize += it->size;
-+ CleanupAfterFree();
-+ return;
-+ }
-+ }
-+
-+ if (m_2ndVectorMode != SECOND_VECTOR_EMPTY)
-+ {
-+ // Item from the middle of 2nd vector.
-+ const SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
-+ VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
-+ VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
-+ if (it != suballocations2nd.end())
-+ {
-+ it->type = VMA_SUBALLOCATION_TYPE_FREE;
-+ it->userData = VMA_NULL;
-+ ++m_2ndNullItemsCount;
-+ m_SumFreeSize += it->size;
-+ CleanupAfterFree();
-+ return;
-+ }
-+ }
-+
-+ VMA_ASSERT(0 && "Allocation to free not found in linear allocator!");
-+}
-+
-+void VmaBlockMetadata_Linear::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo)
-+{
-+ outInfo.offset = (VkDeviceSize)allocHandle - 1;
-+ VmaSuballocation& suballoc = FindSuballocation(outInfo.offset);
-+ outInfo.size = suballoc.size;
-+ outInfo.pUserData = suballoc.userData;
-+}
-+
-+void* VmaBlockMetadata_Linear::GetAllocationUserData(VmaAllocHandle allocHandle) const
-+{
-+ return FindSuballocation((VkDeviceSize)allocHandle - 1).userData;
-+}
-+
-+VmaAllocHandle VmaBlockMetadata_Linear::GetAllocationListBegin() const
-+{
-+ // Function only used for defragmentation, which is disabled for this algorithm
-+ VMA_ASSERT(0);
-+ return VK_NULL_HANDLE;
-+}
-+
-+VmaAllocHandle VmaBlockMetadata_Linear::GetNextAllocation(VmaAllocHandle prevAlloc) const
-+{
-+ // Function only used for defragmentation, which is disabled for this algorithm
-+ VMA_ASSERT(0);
-+ return VK_NULL_HANDLE;
-+}
-+
-+VkDeviceSize VmaBlockMetadata_Linear::GetNextFreeRegionSize(VmaAllocHandle alloc) const
-+{
-+ // Function only used for defragmentation, which is disabled for this algorithm
-+ VMA_ASSERT(0);
-+ return 0;
-+}
-+
-+void VmaBlockMetadata_Linear::Clear()
-+{
-+ m_SumFreeSize = GetSize();
-+ m_Suballocations0.clear();
-+ m_Suballocations1.clear();
-+ // Leaving m_1stVectorIndex unchanged - it doesn't matter.
-+ m_2ndVectorMode = SECOND_VECTOR_EMPTY;
-+ m_1stNullItemsBeginCount = 0;
-+ m_1stNullItemsMiddleCount = 0;
-+ m_2ndNullItemsCount = 0;
-+}
-+
-+void VmaBlockMetadata_Linear::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData)
-+{
-+ VmaSuballocation& suballoc = FindSuballocation((VkDeviceSize)allocHandle - 1);
-+ suballoc.userData = userData;
-+}
-+
-+void VmaBlockMetadata_Linear::DebugLogAllAllocations() const
-+{
-+ const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-+ for (auto it = suballocations1st.begin() + m_1stNullItemsBeginCount; it != suballocations1st.end(); ++it)
-+ if (it->type != VMA_SUBALLOCATION_TYPE_FREE)
-+ DebugLogAllocation(it->offset, it->size, it->userData);
-+
-+ const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-+ for (auto it = suballocations2nd.begin(); it != suballocations2nd.end(); ++it)
-+ if (it->type != VMA_SUBALLOCATION_TYPE_FREE)
-+ DebugLogAllocation(it->offset, it->size, it->userData);
-+}
-+
-+VmaSuballocation& VmaBlockMetadata_Linear::FindSuballocation(VkDeviceSize offset) const
-+{
-+ const SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-+ const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-+
-+ VmaSuballocation refSuballoc;
-+ refSuballoc.offset = offset;
-+ // Rest of members stays uninitialized intentionally for better performance.
-+
-+ // Item from the 1st vector.
-+ {
-+ SuballocationVectorType::const_iterator it = VmaBinaryFindSorted(
-+ suballocations1st.begin() + m_1stNullItemsBeginCount,
-+ suballocations1st.end(),
-+ refSuballoc,
-+ VmaSuballocationOffsetLess());
-+ if (it != suballocations1st.end())
-+ {
-+ return const_cast<VmaSuballocation&>(*it);
-+ }
-+ }
-+
-+ if (m_2ndVectorMode != SECOND_VECTOR_EMPTY)
-+ {
-+ // Rest of members stays uninitialized intentionally for better performance.
-+ SuballocationVectorType::const_iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ?
-+ VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) :
-+ VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater());
-+ if (it != suballocations2nd.end())
-+ {
-+ return const_cast<VmaSuballocation&>(*it);
-+ }
-+ }
-+
-+ VMA_ASSERT(0 && "Allocation not found in linear allocator!");
-+ return const_cast<VmaSuballocation&>(suballocations1st.back()); // Should never occur.
-+}
-+
-+bool VmaBlockMetadata_Linear::ShouldCompact1st() const
-+{
-+ const size_t nullItemCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
-+ const size_t suballocCount = AccessSuballocations1st().size();
-+ return suballocCount > 32 && nullItemCount * 2 >= (suballocCount - nullItemCount) * 3;
-+}
-+
-+void VmaBlockMetadata_Linear::CleanupAfterFree()
-+{
-+ SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-+ SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-+
-+ if (IsEmpty())
-+ {
-+ suballocations1st.clear();
-+ suballocations2nd.clear();
-+ m_1stNullItemsBeginCount = 0;
-+ m_1stNullItemsMiddleCount = 0;
-+ m_2ndNullItemsCount = 0;
-+ m_2ndVectorMode = SECOND_VECTOR_EMPTY;
-+ }
-+ else
-+ {
-+ const size_t suballoc1stCount = suballocations1st.size();
-+ const size_t nullItem1stCount = m_1stNullItemsBeginCount + m_1stNullItemsMiddleCount;
-+ VMA_ASSERT(nullItem1stCount <= suballoc1stCount);
-+
-+ // Find more null items at the beginning of 1st vector.
-+ while (m_1stNullItemsBeginCount < suballoc1stCount &&
-+ suballocations1st[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE)
-+ {
-+ ++m_1stNullItemsBeginCount;
-+ --m_1stNullItemsMiddleCount;
-+ }
-+
-+ // Find more null items at the end of 1st vector.
-+ while (m_1stNullItemsMiddleCount > 0 &&
-+ suballocations1st.back().type == VMA_SUBALLOCATION_TYPE_FREE)
-+ {
-+ --m_1stNullItemsMiddleCount;
-+ suballocations1st.pop_back();
-+ }
-+
-+ // Find more null items at the end of 2nd vector.
-+ while (m_2ndNullItemsCount > 0 &&
-+ suballocations2nd.back().type == VMA_SUBALLOCATION_TYPE_FREE)
-+ {
-+ --m_2ndNullItemsCount;
-+ suballocations2nd.pop_back();
-+ }
-+
-+ // Find more null items at the beginning of 2nd vector.
-+ while (m_2ndNullItemsCount > 0 &&
-+ suballocations2nd[0].type == VMA_SUBALLOCATION_TYPE_FREE)
-+ {
-+ --m_2ndNullItemsCount;
-+ VmaVectorRemove(suballocations2nd, 0);
-+ }
-+
-+ if (ShouldCompact1st())
-+ {
-+ const size_t nonNullItemCount = suballoc1stCount - nullItem1stCount;
-+ size_t srcIndex = m_1stNullItemsBeginCount;
-+ for (size_t dstIndex = 0; dstIndex < nonNullItemCount; ++dstIndex)
-+ {
-+ while (suballocations1st[srcIndex].type == VMA_SUBALLOCATION_TYPE_FREE)
-+ {
-+ ++srcIndex;
-+ }
-+ if (dstIndex != srcIndex)
-+ {
-+ suballocations1st[dstIndex] = suballocations1st[srcIndex];
-+ }
-+ ++srcIndex;
-+ }
-+ suballocations1st.resize(nonNullItemCount);
-+ m_1stNullItemsBeginCount = 0;
-+ m_1stNullItemsMiddleCount = 0;
-+ }
-+
-+ // 2nd vector became empty.
-+ if (suballocations2nd.empty())
-+ {
-+ m_2ndVectorMode = SECOND_VECTOR_EMPTY;
-+ }
-+
-+ // 1st vector became empty.
-+ if (suballocations1st.size() - m_1stNullItemsBeginCount == 0)
-+ {
-+ suballocations1st.clear();
-+ m_1stNullItemsBeginCount = 0;
-+
-+ if (!suballocations2nd.empty() && m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-+ {
-+ // Swap 1st with 2nd. Now 2nd is empty.
-+ m_2ndVectorMode = SECOND_VECTOR_EMPTY;
-+ m_1stNullItemsMiddleCount = m_2ndNullItemsCount;
-+ while (m_1stNullItemsBeginCount < suballocations2nd.size() &&
-+ suballocations2nd[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE)
-+ {
-+ ++m_1stNullItemsBeginCount;
-+ --m_1stNullItemsMiddleCount;
-+ }
-+ m_2ndNullItemsCount = 0;
-+ m_1stVectorIndex ^= 1;
-+ }
-+ }
-+ }
-+
-+ VMA_HEAVY_ASSERT(Validate());
-+}
-+
-+bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress(
-+ VkDeviceSize allocSize,
-+ VkDeviceSize allocAlignment,
-+ VmaSuballocationType allocType,
-+ uint32_t strategy,
-+ VmaAllocationRequest* pAllocationRequest)
-+{
-+ const VkDeviceSize blockSize = GetSize();
-+ const VkDeviceSize debugMargin = GetDebugMargin();
-+ const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity();
-+ SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-+ SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-+
-+ if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
-+ {
-+ // Try to allocate at the end of 1st vector.
-+
-+ VkDeviceSize resultBaseOffset = 0;
-+ if (!suballocations1st.empty())
-+ {
-+ const VmaSuballocation& lastSuballoc = suballocations1st.back();
-+ resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin;
-+ }
-+
-+ // Start from offset equal to beginning of free space.
-+ VkDeviceSize resultOffset = resultBaseOffset;
-+
-+ // Apply alignment.
-+ resultOffset = VmaAlignUp(resultOffset, allocAlignment);
-+
-+ // Check previous suballocations for BufferImageGranularity conflicts.
-+ // Make bigger alignment if necessary.
-+ if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations1st.empty())
-+ {
-+ bool bufferImageGranularityConflict = false;
-+ for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
-+ {
-+ const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
-+ if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
-+ {
-+ if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
-+ {
-+ bufferImageGranularityConflict = true;
-+ break;
-+ }
-+ }
-+ else
-+ // Already on previous page.
-+ break;
-+ }
-+ if (bufferImageGranularityConflict)
-+ {
-+ resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
-+ }
-+ }
-+
-+ const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ?
-+ suballocations2nd.back().offset : blockSize;
-+
-+ // There is enough free space at the end after alignment.
-+ if (resultOffset + allocSize + debugMargin <= freeSpaceEnd)
-+ {
-+ // Check next suballocations for BufferImageGranularity conflicts.
-+ // If conflict exists, allocation cannot be made here.
-+ if ((allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK)
-+ {
-+ for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
-+ {
-+ const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
-+ if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
-+ {
-+ if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
-+ {
-+ return false;
-+ }
-+ }
-+ else
-+ {
-+ // Already on previous page.
-+ break;
-+ }
-+ }
-+ }
-+
-+ // All tests passed: Success.
-+ pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1);
-+ // pAllocationRequest->item, customData unused.
-+ pAllocationRequest->type = VmaAllocationRequestType::EndOf1st;
-+ return true;
-+ }
-+ }
-+
-+ // Wrap-around to end of 2nd vector. Try to allocate there, watching for the
-+ // beginning of 1st vector as the end of free space.
-+ if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-+ {
-+ VMA_ASSERT(!suballocations1st.empty());
-+
-+ VkDeviceSize resultBaseOffset = 0;
-+ if (!suballocations2nd.empty())
-+ {
-+ const VmaSuballocation& lastSuballoc = suballocations2nd.back();
-+ resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin;
-+ }
-+
-+ // Start from offset equal to beginning of free space.
-+ VkDeviceSize resultOffset = resultBaseOffset;
-+
-+ // Apply alignment.
-+ resultOffset = VmaAlignUp(resultOffset, allocAlignment);
-+
-+ // Check previous suballocations for BufferImageGranularity conflicts.
-+ // Make bigger alignment if necessary.
-+ if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty())
-+ {
-+ bool bufferImageGranularityConflict = false;
-+ for (size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; )
-+ {
-+ const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex];
-+ if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
-+ {
-+ if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType))
-+ {
-+ bufferImageGranularityConflict = true;
-+ break;
-+ }
-+ }
-+ else
-+ // Already on previous page.
-+ break;
-+ }
-+ if (bufferImageGranularityConflict)
-+ {
-+ resultOffset = VmaAlignUp(resultOffset, bufferImageGranularity);
-+ }
-+ }
-+
-+ size_t index1st = m_1stNullItemsBeginCount;
-+
-+ // There is enough free space at the end after alignment.
-+ if ((index1st == suballocations1st.size() && resultOffset + allocSize + debugMargin <= blockSize) ||
-+ (index1st < suballocations1st.size() && resultOffset + allocSize + debugMargin <= suballocations1st[index1st].offset))
-+ {
-+ // Check next suballocations for BufferImageGranularity conflicts.
-+ // If conflict exists, allocation cannot be made here.
-+ if (allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity)
-+ {
-+ for (size_t nextSuballocIndex = index1st;
-+ nextSuballocIndex < suballocations1st.size();
-+ nextSuballocIndex++)
-+ {
-+ const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex];
-+ if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
-+ {
-+ if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type))
-+ {
-+ return false;
-+ }
-+ }
-+ else
-+ {
-+ // Already on next page.
-+ break;
-+ }
-+ }
-+ }
-+
-+ // All tests passed: Success.
-+ pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1);
-+ pAllocationRequest->type = VmaAllocationRequestType::EndOf2nd;
-+ // pAllocationRequest->item, customData unused.
-+ return true;
-+ }
-+ }
-+
-+ return false;
-+}
-+
-+bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress(
-+ VkDeviceSize allocSize,
-+ VkDeviceSize allocAlignment,
-+ VmaSuballocationType allocType,
-+ uint32_t strategy,
-+ VmaAllocationRequest* pAllocationRequest)
-+{
-+ const VkDeviceSize blockSize = GetSize();
-+ const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity();
-+ SuballocationVectorType& suballocations1st = AccessSuballocations1st();
-+ SuballocationVectorType& suballocations2nd = AccessSuballocations2nd();
-+
-+ if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER)
-+ {
-+ VMA_ASSERT(0 && "Trying to use pool with linear algorithm as double stack, while it is already being used as ring buffer.");
-+ return false;
-+ }
-+
-+ // Try to allocate before 2nd.back(), or end of block if 2nd.empty().
-+ if (allocSize > blockSize)
-+ {
-+ return false;
-+ }
-+ VkDeviceSize resultBaseOffset = blockSize - allocSize;
-+ if (!suballocations2nd.empty())
-+ {
-+ const VmaSuballocation& lastSuballoc = suballocations2nd.back();
-+ resultBaseOffset = lastSuballoc.offset - allocSize;
-+ if (allocSize > lastSuballoc.offset)
-+ {
-+ return false;
-+ }
-+ }
-+
-+ // Start from offset equal to end of free space.
-+ VkDeviceSize resultOffset = resultBaseOffset;
-+
-+ const VkDeviceSize debugMargin = GetDebugMargin();
-+
-+ // Apply debugMargin at the end.
-+ if (debugMargin > 0)
-+ {
-+ if (resultOffset < debugMargin)
-+ {
-+ return false;
-+ }
-+ resultOffset -= debugMargin;
-+ }
-+
-+ // Apply alignment.
-+ resultOffset = VmaAlignDown(resultOffset, allocAlignment);
-+
-+ // Check next suballocations from 2nd for BufferImageGranularity conflicts.
-+ // Make bigger alignment if necessary.
-+ if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty())
-+ {
-+ bool bufferImageGranularityConflict = false;
-+ for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; )
-+ {
-+ const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex];
-+ if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity))
-+ {
-+ if (VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType))
-+ {
-+ bufferImageGranularityConflict = true;
-+ break;
-+ }
-+ }
-+ else
-+ // Already on previous page.
-+ break;
-+ }
-+ if (bufferImageGranularityConflict)
-+ {
-+ resultOffset = VmaAlignDown(resultOffset, bufferImageGranularity);
-+ }
-+ }
-+
-+ // There is enough free space.
-+ const VkDeviceSize endOf1st = !suballocations1st.empty() ?
-+ suballocations1st.back().offset + suballocations1st.back().size :
-+ 0;
-+ if (endOf1st + debugMargin <= resultOffset)
-+ {
-+ // Check previous suballocations for BufferImageGranularity conflicts.
-+ // If conflict exists, allocation cannot be made here.
-+ if (bufferImageGranularity > 1)
-+ {
-+ for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; )
-+ {
-+ const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex];
-+ if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity))
-+ {
-+ if (VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type))
-+ {
-+ return false;
-+ }
-+ }
-+ else
-+ {
-+ // Already on next page.
-+ break;
-+ }
-+ }
-+ }
-+
-+ // All tests passed: Success.
-+ pAllocationRequest->allocHandle = (VmaAllocHandle)(resultOffset + 1);
-+ // pAllocationRequest->item unused.
-+ pAllocationRequest->type = VmaAllocationRequestType::UpperAddress;
-+ return true;
-+ }
-+
-+ return false;
-+}
-+#endif // _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS
-+#endif // _VMA_BLOCK_METADATA_LINEAR
-+
-+#if 0
-+#ifndef _VMA_BLOCK_METADATA_BUDDY
-+/*
-+- GetSize() is the original size of allocated memory block.
-+- m_UsableSize is this size aligned down to a power of two.
-+ All allocations and calculations happen relative to m_UsableSize.
-+- GetUnusableSize() is the difference between them.
-+ It is reported as separate, unused range, not available for allocations.
-+
-+Node at level 0 has size = m_UsableSize.
-+Each next level contains nodes with size 2 times smaller than current level.
-+m_LevelCount is the maximum number of levels to use in the current object.
-+*/
-+class VmaBlockMetadata_Buddy : public VmaBlockMetadata
-+{
-+ VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_Buddy)
-+public:
-+ VmaBlockMetadata_Buddy(const VkAllocationCallbacks* pAllocationCallbacks,
-+ VkDeviceSize bufferImageGranularity, bool isVirtual);
-+ virtual ~VmaBlockMetadata_Buddy();
-+
-+ size_t GetAllocationCount() const override { return m_AllocationCount; }
-+ VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize + GetUnusableSize(); }
-+ bool IsEmpty() const override { return m_Root->type == Node::TYPE_FREE; }
-+ VkResult CheckCorruption(const void* pBlockData) override { return VK_ERROR_FEATURE_NOT_PRESENT; }
-+ VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return (VkDeviceSize)allocHandle - 1; }
-+ void DebugLogAllAllocations() const override { DebugLogAllAllocationNode(m_Root, 0); }
-+
-+ void Init(VkDeviceSize size) override;
-+ bool Validate() const override;
-+
-+ void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override;
-+ void AddStatistics(VmaStatistics& inoutStats) const override;
-+
-+#if VMA_STATS_STRING_ENABLED
-+ void PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const override;
-+#endif
-+
-+ bool CreateAllocationRequest(
-+ VkDeviceSize allocSize,
-+ VkDeviceSize allocAlignment,
-+ bool upperAddress,
-+ VmaSuballocationType allocType,
-+ uint32_t strategy,
-+ VmaAllocationRequest* pAllocationRequest) override;
-+
-+ void Alloc(
-+ const VmaAllocationRequest& request,
-+ VmaSuballocationType type,
-+ void* userData) override;
-+
-+ void Free(VmaAllocHandle allocHandle) override;
-+ void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override;
-+ void* GetAllocationUserData(VmaAllocHandle allocHandle) const override;
-+ VmaAllocHandle GetAllocationListBegin() const override;
-+ VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override;
-+ void Clear() override;
-+ void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override;
-+
-+private:
-+ static const size_t MAX_LEVELS = 48;
-+
-+ struct ValidationContext
-+ {
-+ size_t calculatedAllocationCount = 0;
-+ size_t calculatedFreeCount = 0;
-+ VkDeviceSize calculatedSumFreeSize = 0;
-+ };
-+ struct Node
-+ {
-+ VkDeviceSize offset;
-+ enum TYPE
-+ {
-+ TYPE_FREE,
-+ TYPE_ALLOCATION,
-+ TYPE_SPLIT,
-+ TYPE_COUNT
-+ } type;
-+ Node* parent;
-+ Node* buddy;
-+
-+ union
-+ {
-+ struct
-+ {
-+ Node* prev;
-+ Node* next;
-+ } free;
-+ struct
-+ {
-+ void* userData;
-+ } allocation;
-+ struct
-+ {
-+ Node* leftChild;
-+ } split;
-+ };
-+ };
-+
-+ // Size of the memory block aligned down to a power of two.
-+ VkDeviceSize m_UsableSize;
-+ uint32_t m_LevelCount;
-+ VmaPoolAllocator<Node> m_NodeAllocator;
-+ Node* m_Root;
-+ struct
-+ {
-+ Node* front;
-+ Node* back;
-+ } m_FreeList[MAX_LEVELS];
-+
-+ // Number of nodes in the tree with type == TYPE_ALLOCATION.
-+ size_t m_AllocationCount;
-+ // Number of nodes in the tree with type == TYPE_FREE.
-+ size_t m_FreeCount;
-+ // Doesn't include space wasted due to internal fragmentation - allocation sizes are just aligned up to node sizes.
-+ // Doesn't include unusable size.
-+ VkDeviceSize m_SumFreeSize;
-+
-+ VkDeviceSize GetUnusableSize() const { return GetSize() - m_UsableSize; }
-+ VkDeviceSize LevelToNodeSize(uint32_t level) const { return m_UsableSize >> level; }
-+
-+ VkDeviceSize AlignAllocationSize(VkDeviceSize size) const
-+ {
-+ if (!IsVirtual())
-+ {
-+ size = VmaAlignUp(size, (VkDeviceSize)16);
-+ }
-+ return VmaNextPow2(size);
-+ }
-+ Node* FindAllocationNode(VkDeviceSize offset, uint32_t& outLevel) const;
-+ void DeleteNodeChildren(Node* node);
-+ bool ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const;
-+ uint32_t AllocSizeToLevel(VkDeviceSize allocSize) const;
-+ void AddNodeToDetailedStatistics(VmaDetailedStatistics& inoutStats, const Node* node, VkDeviceSize levelNodeSize) const;
-+ // Adds node to the front of FreeList at given level.
-+ // node->type must be FREE.
-+ // node->free.prev, next can be undefined.
-+ void AddToFreeListFront(uint32_t level, Node* node);
-+ // Removes node from FreeList at given level.
-+ // node->type must be FREE.
-+ // node->free.prev, next stay untouched.
-+ void RemoveFromFreeList(uint32_t level, Node* node);
-+ void DebugLogAllAllocationNode(Node* node, uint32_t level) const;
-+
-+#if VMA_STATS_STRING_ENABLED
-+ void PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const;
-+#endif
-+};
-+
-+#ifndef _VMA_BLOCK_METADATA_BUDDY_FUNCTIONS
-+VmaBlockMetadata_Buddy::VmaBlockMetadata_Buddy(const VkAllocationCallbacks* pAllocationCallbacks,
-+ VkDeviceSize bufferImageGranularity, bool isVirtual)
-+ : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual),
-+ m_NodeAllocator(pAllocationCallbacks, 32), // firstBlockCapacity
-+ m_Root(VMA_NULL),
-+ m_AllocationCount(0),
-+ m_FreeCount(1),
-+ m_SumFreeSize(0)
-+{
-+ memset(m_FreeList, 0, sizeof(m_FreeList));
-+}
-+
-+VmaBlockMetadata_Buddy::~VmaBlockMetadata_Buddy()
-+{
-+ DeleteNodeChildren(m_Root);
-+ m_NodeAllocator.Free(m_Root);
-+}
-+
-+void VmaBlockMetadata_Buddy::Init(VkDeviceSize size)
-+{
-+ VmaBlockMetadata::Init(size);
-+
-+ m_UsableSize = VmaPrevPow2(size);
-+ m_SumFreeSize = m_UsableSize;
-+
-+ // Calculate m_LevelCount.
-+ const VkDeviceSize minNodeSize = IsVirtual() ? 1 : 16;
-+ m_LevelCount = 1;
-+ while (m_LevelCount < MAX_LEVELS &&
-+ LevelToNodeSize(m_LevelCount) >= minNodeSize)
-+ {
-+ ++m_LevelCount;
-+ }
-+
-+ Node* rootNode = m_NodeAllocator.Alloc();
-+ rootNode->offset = 0;
-+ rootNode->type = Node::TYPE_FREE;
-+ rootNode->parent = VMA_NULL;
-+ rootNode->buddy = VMA_NULL;
-+
-+ m_Root = rootNode;
-+ AddToFreeListFront(0, rootNode);
-+}
-+
-+bool VmaBlockMetadata_Buddy::Validate() const
-+{
-+ // Validate tree.
-+ ValidationContext ctx;
-+ if (!ValidateNode(ctx, VMA_NULL, m_Root, 0, LevelToNodeSize(0)))
-+ {
-+ VMA_VALIDATE(false && "ValidateNode failed.");
-+ }
-+ VMA_VALIDATE(m_AllocationCount == ctx.calculatedAllocationCount);
-+ VMA_VALIDATE(m_SumFreeSize == ctx.calculatedSumFreeSize);
-+
-+ // Validate free node lists.
-+ for (uint32_t level = 0; level < m_LevelCount; ++level)
-+ {
-+ VMA_VALIDATE(m_FreeList[level].front == VMA_NULL ||
-+ m_FreeList[level].front->free.prev == VMA_NULL);
-+
-+ for (Node* node = m_FreeList[level].front;
-+ node != VMA_NULL;
-+ node = node->free.next)
-+ {
-+ VMA_VALIDATE(node->type == Node::TYPE_FREE);
-+
-+ if (node->free.next == VMA_NULL)
-+ {
-+ VMA_VALIDATE(m_FreeList[level].back == node);
-+ }
-+ else
-+ {
-+ VMA_VALIDATE(node->free.next->free.prev == node);
-+ }
-+ }
-+ }
-+
-+ // Validate that free lists ar higher levels are empty.
-+ for (uint32_t level = m_LevelCount; level < MAX_LEVELS; ++level)
-+ {
-+ VMA_VALIDATE(m_FreeList[level].front == VMA_NULL && m_FreeList[level].back == VMA_NULL);
-+ }
-+
-+ return true;
-+}
-+
-+void VmaBlockMetadata_Buddy::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const
-+{
-+ inoutStats.statistics.blockCount++;
-+ inoutStats.statistics.blockBytes += GetSize();
-+
-+ AddNodeToDetailedStatistics(inoutStats, m_Root, LevelToNodeSize(0));
-+
-+ const VkDeviceSize unusableSize = GetUnusableSize();
-+ if (unusableSize > 0)
-+ VmaAddDetailedStatisticsUnusedRange(inoutStats, unusableSize);
-+}
-+
-+void VmaBlockMetadata_Buddy::AddStatistics(VmaStatistics& inoutStats) const
-+{
-+ inoutStats.blockCount++;
-+ inoutStats.allocationCount += (uint32_t)m_AllocationCount;
-+ inoutStats.blockBytes += GetSize();
-+ inoutStats.allocationBytes += GetSize() - m_SumFreeSize;
-+}
-+
-+#if VMA_STATS_STRING_ENABLED
-+void VmaBlockMetadata_Buddy::PrintDetailedMap(class VmaJsonWriter& json, uint32_t mapRefCount) const
-+{
-+ VmaDetailedStatistics stats;
-+ VmaClearDetailedStatistics(stats);
-+ AddDetailedStatistics(stats);
-+
-+ PrintDetailedMap_Begin(
-+ json,
-+ stats.statistics.blockBytes - stats.statistics.allocationBytes,
-+ stats.statistics.allocationCount,
-+ stats.unusedRangeCount,
-+ mapRefCount);
-+
-+ PrintDetailedMapNode(json, m_Root, LevelToNodeSize(0));
-+
-+ const VkDeviceSize unusableSize = GetUnusableSize();
-+ if (unusableSize > 0)
-+ {
-+ PrintDetailedMap_UnusedRange(json,
-+ m_UsableSize, // offset
-+ unusableSize); // size
-+ }
-+
-+ PrintDetailedMap_End(json);
-+}
-+#endif // VMA_STATS_STRING_ENABLED
-+
-+bool VmaBlockMetadata_Buddy::CreateAllocationRequest(
-+ VkDeviceSize allocSize,
-+ VkDeviceSize allocAlignment,
-+ bool upperAddress,
-+ VmaSuballocationType allocType,
-+ uint32_t strategy,
-+ VmaAllocationRequest* pAllocationRequest)
-+{
-+ VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
-+
-+ allocSize = AlignAllocationSize(allocSize);
-+
-+ // Simple way to respect bufferImageGranularity. May be optimized some day.
-+ // Whenever it might be an OPTIMAL image...
-+ if (allocType == VMA_SUBALLOCATION_TYPE_UNKNOWN ||
-+ allocType == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN ||
-+ allocType == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL)
-+ {
-+ allocAlignment = VMA_MAX(allocAlignment, GetBufferImageGranularity());
-+ allocSize = VmaAlignUp(allocSize, GetBufferImageGranularity());
-+ }
-+
-+ if (allocSize > m_UsableSize)
-+ {
-+ return false;
-+ }
-+
-+ const uint32_t targetLevel = AllocSizeToLevel(allocSize);
-+ for (uint32_t level = targetLevel; level--; )
-+ {
-+ for (Node* freeNode = m_FreeList[level].front;
-+ freeNode != VMA_NULL;
-+ freeNode = freeNode->free.next)
-+ {
-+ if (freeNode->offset % allocAlignment == 0)
-+ {
-+ pAllocationRequest->type = VmaAllocationRequestType::Normal;
-+ pAllocationRequest->allocHandle = (VmaAllocHandle)(freeNode->offset + 1);
-+ pAllocationRequest->size = allocSize;
-+ pAllocationRequest->customData = (void*)(uintptr_t)level;
-+ return true;
-+ }
-+ }
-+ }
-+
-+ return false;
-+}
-+
-+void VmaBlockMetadata_Buddy::Alloc(
-+ const VmaAllocationRequest& request,
-+ VmaSuballocationType type,
-+ void* userData)
-+{
-+ VMA_ASSERT(request.type == VmaAllocationRequestType::Normal);
-+
-+ const uint32_t targetLevel = AllocSizeToLevel(request.size);
-+ uint32_t currLevel = (uint32_t)(uintptr_t)request.customData;
-+
-+ Node* currNode = m_FreeList[currLevel].front;
-+ VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
-+ const VkDeviceSize offset = (VkDeviceSize)request.allocHandle - 1;
-+ while (currNode->offset != offset)
-+ {
-+ currNode = currNode->free.next;
-+ VMA_ASSERT(currNode != VMA_NULL && currNode->type == Node::TYPE_FREE);
-+ }
-+
-+ // Go down, splitting free nodes.
-+ while (currLevel < targetLevel)
-+ {
-+ // currNode is already first free node at currLevel.
-+ // Remove it from list of free nodes at this currLevel.
-+ RemoveFromFreeList(currLevel, currNode);
-+
-+ const uint32_t childrenLevel = currLevel + 1;
-+
-+ // Create two free sub-nodes.
-+ Node* leftChild = m_NodeAllocator.Alloc();
-+ Node* rightChild = m_NodeAllocator.Alloc();
-+
-+ leftChild->offset = currNode->offset;
-+ leftChild->type = Node::TYPE_FREE;
-+ leftChild->parent = currNode;
-+ leftChild->buddy = rightChild;
-+
-+ rightChild->offset = currNode->offset + LevelToNodeSize(childrenLevel);
-+ rightChild->type = Node::TYPE_FREE;
-+ rightChild->parent = currNode;
-+ rightChild->buddy = leftChild;
-+
-+ // Convert current currNode to split type.
-+ currNode->type = Node::TYPE_SPLIT;
-+ currNode->split.leftChild = leftChild;
-+
-+ // Add child nodes to free list. Order is important!
-+ AddToFreeListFront(childrenLevel, rightChild);
-+ AddToFreeListFront(childrenLevel, leftChild);
-+
-+ ++m_FreeCount;
-+ ++currLevel;
-+ currNode = m_FreeList[currLevel].front;
-+
-+ /*
-+ We can be sure that currNode, as left child of node previously split,
-+ also fulfills the alignment requirement.
-+ */
-+ }
-+
-+ // Remove from free list.
-+ VMA_ASSERT(currLevel == targetLevel &&
-+ currNode != VMA_NULL &&
-+ currNode->type == Node::TYPE_FREE);
-+ RemoveFromFreeList(currLevel, currNode);
-+
-+ // Convert to allocation node.
-+ currNode->type = Node::TYPE_ALLOCATION;
-+ currNode->allocation.userData = userData;
-+
-+ ++m_AllocationCount;
-+ --m_FreeCount;
-+ m_SumFreeSize -= request.size;
-+}
-+
-+void VmaBlockMetadata_Buddy::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo)
-+{
-+ uint32_t level = 0;
-+ outInfo.offset = (VkDeviceSize)allocHandle - 1;
-+ const Node* const node = FindAllocationNode(outInfo.offset, level);
-+ outInfo.size = LevelToNodeSize(level);
-+ outInfo.pUserData = node->allocation.userData;
-+}
-+
-+void* VmaBlockMetadata_Buddy::GetAllocationUserData(VmaAllocHandle allocHandle) const
-+{
-+ uint32_t level = 0;
-+ const Node* const node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level);
-+ return node->allocation.userData;
-+}
-+
-+VmaAllocHandle VmaBlockMetadata_Buddy::GetAllocationListBegin() const
-+{
-+ // Function only used for defragmentation, which is disabled for this algorithm
-+ return VK_NULL_HANDLE;
-+}
-+
-+VmaAllocHandle VmaBlockMetadata_Buddy::GetNextAllocation(VmaAllocHandle prevAlloc) const
-+{
-+ // Function only used for defragmentation, which is disabled for this algorithm
-+ return VK_NULL_HANDLE;
-+}
-+
-+void VmaBlockMetadata_Buddy::DeleteNodeChildren(Node* node)
-+{
-+ if (node->type == Node::TYPE_SPLIT)
-+ {
-+ DeleteNodeChildren(node->split.leftChild->buddy);
-+ DeleteNodeChildren(node->split.leftChild);
-+ const VkAllocationCallbacks* allocationCallbacks = GetAllocationCallbacks();
-+ m_NodeAllocator.Free(node->split.leftChild->buddy);
-+ m_NodeAllocator.Free(node->split.leftChild);
-+ }
-+}
-+
-+void VmaBlockMetadata_Buddy::Clear()
-+{
-+ DeleteNodeChildren(m_Root);
-+ m_Root->type = Node::TYPE_FREE;
-+ m_AllocationCount = 0;
-+ m_FreeCount = 1;
-+ m_SumFreeSize = m_UsableSize;
-+}
-+
-+void VmaBlockMetadata_Buddy::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData)
-+{
-+ uint32_t level = 0;
-+ Node* const node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level);
-+ node->allocation.userData = userData;
-+}
-+
-+VmaBlockMetadata_Buddy::Node* VmaBlockMetadata_Buddy::FindAllocationNode(VkDeviceSize offset, uint32_t& outLevel) const
-+{
-+ Node* node = m_Root;
-+ VkDeviceSize nodeOffset = 0;
-+ outLevel = 0;
-+ VkDeviceSize levelNodeSize = LevelToNodeSize(0);
-+ while (node->type == Node::TYPE_SPLIT)
-+ {
-+ const VkDeviceSize nextLevelNodeSize = levelNodeSize >> 1;
-+ if (offset < nodeOffset + nextLevelNodeSize)
-+ {
-+ node = node->split.leftChild;
-+ }
-+ else
-+ {
-+ node = node->split.leftChild->buddy;
-+ nodeOffset += nextLevelNodeSize;
-+ }
-+ ++outLevel;
-+ levelNodeSize = nextLevelNodeSize;
-+ }
-+
-+ VMA_ASSERT(node != VMA_NULL && node->type == Node::TYPE_ALLOCATION);
-+ return node;
-+}
-+
-+bool VmaBlockMetadata_Buddy::ValidateNode(ValidationContext& ctx, const Node* parent, const Node* curr, uint32_t level, VkDeviceSize levelNodeSize) const
-+{
-+ VMA_VALIDATE(level < m_LevelCount);
-+ VMA_VALIDATE(curr->parent == parent);
-+ VMA_VALIDATE((curr->buddy == VMA_NULL) == (parent == VMA_NULL));
-+ VMA_VALIDATE(curr->buddy == VMA_NULL || curr->buddy->buddy == curr);
-+ switch (curr->type)
-+ {
-+ case Node::TYPE_FREE:
-+ // curr->free.prev, next are validated separately.
-+ ctx.calculatedSumFreeSize += levelNodeSize;
-+ ++ctx.calculatedFreeCount;
-+ break;
-+ case Node::TYPE_ALLOCATION:
-+ ++ctx.calculatedAllocationCount;
-+ if (!IsVirtual())
-+ {
-+ VMA_VALIDATE(curr->allocation.userData != VMA_NULL);
-+ }
-+ break;
-+ case Node::TYPE_SPLIT:
-+ {
-+ const uint32_t childrenLevel = level + 1;
-+ const VkDeviceSize childrenLevelNodeSize = levelNodeSize >> 1;
-+ const Node* const leftChild = curr->split.leftChild;
-+ VMA_VALIDATE(leftChild != VMA_NULL);
-+ VMA_VALIDATE(leftChild->offset == curr->offset);
-+ if (!ValidateNode(ctx, curr, leftChild, childrenLevel, childrenLevelNodeSize))
-+ {
-+ VMA_VALIDATE(false && "ValidateNode for left child failed.");
-+ }
-+ const Node* const rightChild = leftChild->buddy;
-+ VMA_VALIDATE(rightChild->offset == curr->offset + childrenLevelNodeSize);
-+ if (!ValidateNode(ctx, curr, rightChild, childrenLevel, childrenLevelNodeSize))
-+ {
-+ VMA_VALIDATE(false && "ValidateNode for right child failed.");
-+ }
-+ }
-+ break;
-+ default:
-+ return false;
-+ }
-+
-+ return true;
-+}
-+
-+uint32_t VmaBlockMetadata_Buddy::AllocSizeToLevel(VkDeviceSize allocSize) const
-+{
-+ // I know this could be optimized somehow e.g. by using std::log2p1 from C++20.
-+ uint32_t level = 0;
-+ VkDeviceSize currLevelNodeSize = m_UsableSize;
-+ VkDeviceSize nextLevelNodeSize = currLevelNodeSize >> 1;
-+ while (allocSize <= nextLevelNodeSize && level + 1 < m_LevelCount)
-+ {
-+ ++level;
-+ currLevelNodeSize >>= 1;
-+ nextLevelNodeSize >>= 1;
-+ }
-+ return level;
-+}
-+
-+void VmaBlockMetadata_Buddy::Free(VmaAllocHandle allocHandle)
-+{
-+ uint32_t level = 0;
-+ Node* node = FindAllocationNode((VkDeviceSize)allocHandle - 1, level);
-+
-+ ++m_FreeCount;
-+ --m_AllocationCount;
-+ m_SumFreeSize += LevelToNodeSize(level);
-+
-+ node->type = Node::TYPE_FREE;
-+
-+ // Join free nodes if possible.
-+ while (level > 0 && node->buddy->type == Node::TYPE_FREE)
-+ {
-+ RemoveFromFreeList(level, node->buddy);
-+ Node* const parent = node->parent;
-+
-+ m_NodeAllocator.Free(node->buddy);
-+ m_NodeAllocator.Free(node);
-+ parent->type = Node::TYPE_FREE;
-+
-+ node = parent;
-+ --level;
-+ --m_FreeCount;
-+ }
-+
-+ AddToFreeListFront(level, node);
-+}
-+
-+void VmaBlockMetadata_Buddy::AddNodeToDetailedStatistics(VmaDetailedStatistics& inoutStats, const Node* node, VkDeviceSize levelNodeSize) const
-+{
-+ switch (node->type)
-+ {
-+ case Node::TYPE_FREE:
-+ VmaAddDetailedStatisticsUnusedRange(inoutStats, levelNodeSize);
-+ break;
-+ case Node::TYPE_ALLOCATION:
-+ VmaAddDetailedStatisticsAllocation(inoutStats, levelNodeSize);
-+ break;
-+ case Node::TYPE_SPLIT:
-+ {
-+ const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
-+ const Node* const leftChild = node->split.leftChild;
-+ AddNodeToDetailedStatistics(inoutStats, leftChild, childrenNodeSize);
-+ const Node* const rightChild = leftChild->buddy;
-+ AddNodeToDetailedStatistics(inoutStats, rightChild, childrenNodeSize);
-+ }
-+ break;
-+ default:
-+ VMA_ASSERT(0);
-+ }
-+}
-+
-+void VmaBlockMetadata_Buddy::AddToFreeListFront(uint32_t level, Node* node)
-+{
-+ VMA_ASSERT(node->type == Node::TYPE_FREE);
-+
-+ // List is empty.
-+ Node* const frontNode = m_FreeList[level].front;
-+ if (frontNode == VMA_NULL)
-+ {
-+ VMA_ASSERT(m_FreeList[level].back == VMA_NULL);
-+ node->free.prev = node->free.next = VMA_NULL;
-+ m_FreeList[level].front = m_FreeList[level].back = node;
-+ }
-+ else
-+ {
-+ VMA_ASSERT(frontNode->free.prev == VMA_NULL);
-+ node->free.prev = VMA_NULL;
-+ node->free.next = frontNode;
-+ frontNode->free.prev = node;
-+ m_FreeList[level].front = node;
-+ }
-+}
-+
-+void VmaBlockMetadata_Buddy::RemoveFromFreeList(uint32_t level, Node* node)
-+{
-+ VMA_ASSERT(m_FreeList[level].front != VMA_NULL);
-+
-+ // It is at the front.
-+ if (node->free.prev == VMA_NULL)
-+ {
-+ VMA_ASSERT(m_FreeList[level].front == node);
-+ m_FreeList[level].front = node->free.next;
-+ }
-+ else
-+ {
-+ Node* const prevFreeNode = node->free.prev;
-+ VMA_ASSERT(prevFreeNode->free.next == node);
-+ prevFreeNode->free.next = node->free.next;
-+ }
-+
-+ // It is at the back.
-+ if (node->free.next == VMA_NULL)
-+ {
-+ VMA_ASSERT(m_FreeList[level].back == node);
-+ m_FreeList[level].back = node->free.prev;
-+ }
-+ else
-+ {
-+ Node* const nextFreeNode = node->free.next;
-+ VMA_ASSERT(nextFreeNode->free.prev == node);
-+ nextFreeNode->free.prev = node->free.prev;
-+ }
-+}
-+
-+void VmaBlockMetadata_Buddy::DebugLogAllAllocationNode(Node* node, uint32_t level) const
-+{
-+ switch (node->type)
-+ {
-+ case Node::TYPE_FREE:
-+ break;
-+ case Node::TYPE_ALLOCATION:
-+ DebugLogAllocation(node->offset, LevelToNodeSize(level), node->allocation.userData);
-+ break;
-+ case Node::TYPE_SPLIT:
-+ {
-+ ++level;
-+ DebugLogAllAllocationNode(node->split.leftChild, level);
-+ DebugLogAllAllocationNode(node->split.leftChild->buddy, level);
-+ }
-+ break;
-+ default:
-+ VMA_ASSERT(0);
-+ }
-+}
-+
-+#if VMA_STATS_STRING_ENABLED
-+void VmaBlockMetadata_Buddy::PrintDetailedMapNode(class VmaJsonWriter& json, const Node* node, VkDeviceSize levelNodeSize) const
-+{
-+ switch (node->type)
-+ {
-+ case Node::TYPE_FREE:
-+ PrintDetailedMap_UnusedRange(json, node->offset, levelNodeSize);
-+ break;
-+ case Node::TYPE_ALLOCATION:
-+ PrintDetailedMap_Allocation(json, node->offset, levelNodeSize, node->allocation.userData);
-+ break;
-+ case Node::TYPE_SPLIT:
-+ {
-+ const VkDeviceSize childrenNodeSize = levelNodeSize / 2;
-+ const Node* const leftChild = node->split.leftChild;
-+ PrintDetailedMapNode(json, leftChild, childrenNodeSize);
-+ const Node* const rightChild = leftChild->buddy;
-+ PrintDetailedMapNode(json, rightChild, childrenNodeSize);
-+ }
-+ break;
-+ default:
-+ VMA_ASSERT(0);
-+ }
-+}
-+#endif // VMA_STATS_STRING_ENABLED
-+#endif // _VMA_BLOCK_METADATA_BUDDY_FUNCTIONS
-+#endif // _VMA_BLOCK_METADATA_BUDDY
-+#endif // #if 0
-+
-+#ifndef _VMA_BLOCK_METADATA_TLSF
-+// To not search current larger region if first allocation won't succeed and skip to smaller range
-+// use with VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT as strategy in CreateAllocationRequest().
-+// When fragmentation and reusal of previous blocks doesn't matter then use with
-+// VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT for fastest alloc time possible.
-+class VmaBlockMetadata_TLSF : public VmaBlockMetadata
-+{
-+ VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_TLSF)
-+public:
-+ VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks,
-+ VkDeviceSize bufferImageGranularity, bool isVirtual);
-+ virtual ~VmaBlockMetadata_TLSF();
-+
-+ size_t GetAllocationCount() const override { return m_AllocCount; }
-+ size_t GetFreeRegionsCount() const override { return m_BlocksFreeCount + 1; }
-+ VkDeviceSize GetSumFreeSize() const override { return m_BlocksFreeSize + m_NullBlock->size; }
-+ bool IsEmpty() const override { return m_NullBlock->offset == 0; }
-+ VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return ((Block*)allocHandle)->offset; }
-+
-+ void Init(VkDeviceSize size) override;
-+ bool Validate() const override;
-+
-+ void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override;
-+ void AddStatistics(VmaStatistics& inoutStats) const override;
-+
-+#if VMA_STATS_STRING_ENABLED
-+ void PrintDetailedMap(class VmaJsonWriter& json) const override;
-+#endif
-+
-+ bool CreateAllocationRequest(
-+ VkDeviceSize allocSize,
-+ VkDeviceSize allocAlignment,
-+ bool upperAddress,
-+ VmaSuballocationType allocType,
-+ uint32_t strategy,
-+ VmaAllocationRequest* pAllocationRequest) override;
-+
-+ VkResult CheckCorruption(const void* pBlockData) override;
-+ void Alloc(
-+ const VmaAllocationRequest& request,
-+ VmaSuballocationType type,
-+ void* userData) override;
-+
-+ void Free(VmaAllocHandle allocHandle) override;
-+ void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override;
-+ void* GetAllocationUserData(VmaAllocHandle allocHandle) const override;
-+ VmaAllocHandle GetAllocationListBegin() const override;
-+ VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override;
-+ VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override;
-+ void Clear() override;
-+ void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override;
-+ void DebugLogAllAllocations() const override;
-+
-+private:
-+ // According to original paper it should be preferable 4 or 5:
-+ // M. Masmano, I. Ripoll, A. Crespo, and J. Real "TLSF: a New Dynamic Memory Allocator for Real-Time Systems"
-+ // http://www.gii.upv.es/tlsf/files/ecrts04_tlsf.pdf
-+ static const uint8_t SECOND_LEVEL_INDEX = 5;
-+ static const uint16_t SMALL_BUFFER_SIZE = 256;
-+ static const uint32_t INITIAL_BLOCK_ALLOC_COUNT = 16;
-+ static const uint8_t MEMORY_CLASS_SHIFT = 7;
-+ static const uint8_t MAX_MEMORY_CLASSES = 65 - MEMORY_CLASS_SHIFT;
-+
-+ class Block
-+ {
-+ public:
-+ VkDeviceSize offset;
-+ VkDeviceSize size;
-+ Block* prevPhysical;
-+ Block* nextPhysical;
-+
-+ void MarkFree() { prevFree = VMA_NULL; }
-+ void MarkTaken() { prevFree = this; }
-+ bool IsFree() const { return prevFree != this; }
-+ void*& UserData() { VMA_HEAVY_ASSERT(!IsFree()); return userData; }
-+ Block*& PrevFree() { return prevFree; }
-+ Block*& NextFree() { VMA_HEAVY_ASSERT(IsFree()); return nextFree; }
-+
-+ private:
-+ Block* prevFree; // Address of the same block here indicates that block is taken
-+ union
-+ {
-+ Block* nextFree;
-+ void* userData;
-+ };
-+ };
-+
-+ size_t m_AllocCount;
-+ // Total number of free blocks besides null block
-+ size_t m_BlocksFreeCount;
-+ // Total size of free blocks excluding null block
-+ VkDeviceSize m_BlocksFreeSize;
-+ uint32_t m_IsFreeBitmap;
-+ uint8_t m_MemoryClasses;
-+ uint32_t m_InnerIsFreeBitmap[MAX_MEMORY_CLASSES];
-+ uint32_t m_ListsCount;
-+ /*
-+ * 0: 0-3 lists for small buffers
-+ * 1+: 0-(2^SLI-1) lists for normal buffers
-+ */
-+ Block** m_FreeList;
-+ VmaPoolAllocator<Block> m_BlockAllocator;
-+ Block* m_NullBlock;
-+ VmaBlockBufferImageGranularity m_GranularityHandler;
-+
-+ uint8_t SizeToMemoryClass(VkDeviceSize size) const;
-+ uint16_t SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const;
-+ uint32_t GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const;
-+ uint32_t GetListIndex(VkDeviceSize size) const;
-+
-+ void RemoveFreeBlock(Block* block);
-+ void InsertFreeBlock(Block* block);
-+ void MergeBlock(Block* block, Block* prev);
-+
-+ Block* FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const;
-+ bool CheckBlock(
-+ Block& block,
-+ uint32_t listIndex,
-+ VkDeviceSize allocSize,
-+ VkDeviceSize allocAlignment,
-+ VmaSuballocationType allocType,
-+ VmaAllocationRequest* pAllocationRequest);
-+};
-+
-+#ifndef _VMA_BLOCK_METADATA_TLSF_FUNCTIONS
-+VmaBlockMetadata_TLSF::VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks,
-+ VkDeviceSize bufferImageGranularity, bool isVirtual)
-+ : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual),
-+ m_AllocCount(0),
-+ m_BlocksFreeCount(0),
-+ m_BlocksFreeSize(0),
-+ m_IsFreeBitmap(0),
-+ m_MemoryClasses(0),
-+ m_ListsCount(0),
-+ m_FreeList(VMA_NULL),
-+ m_BlockAllocator(pAllocationCallbacks, INITIAL_BLOCK_ALLOC_COUNT),
-+ m_NullBlock(VMA_NULL),
-+ m_GranularityHandler(bufferImageGranularity) {}
-+
-+VmaBlockMetadata_TLSF::~VmaBlockMetadata_TLSF()
-+{
-+ if (m_FreeList)
-+ vma_delete_array(GetAllocationCallbacks(), m_FreeList, m_ListsCount);
-+ m_GranularityHandler.Destroy(GetAllocationCallbacks());
-+}
-+
-+void VmaBlockMetadata_TLSF::Init(VkDeviceSize size)
-+{
-+ VmaBlockMetadata::Init(size);
-+
-+ if (!IsVirtual())
-+ m_GranularityHandler.Init(GetAllocationCallbacks(), size);
-+
-+ m_NullBlock = m_BlockAllocator.Alloc();
-+ m_NullBlock->size = size;
-+ m_NullBlock->offset = 0;
-+ m_NullBlock->prevPhysical = VMA_NULL;
-+ m_NullBlock->nextPhysical = VMA_NULL;
-+ m_NullBlock->MarkFree();
-+ m_NullBlock->NextFree() = VMA_NULL;
-+ m_NullBlock->PrevFree() = VMA_NULL;
-+ uint8_t memoryClass = SizeToMemoryClass(size);
-+ uint16_t sli = SizeToSecondIndex(size, memoryClass);
-+ m_ListsCount = (memoryClass == 0 ? 0 : (memoryClass - 1) * (1UL << SECOND_LEVEL_INDEX) + sli) + 1;
-+ if (IsVirtual())
-+ m_ListsCount += 1UL << SECOND_LEVEL_INDEX;
-+ else
-+ m_ListsCount += 4;
-+
-+ m_MemoryClasses = memoryClass + uint8_t(2);
-+ memset(m_InnerIsFreeBitmap, 0, MAX_MEMORY_CLASSES * sizeof(uint32_t));
-+
-+ m_FreeList = vma_new_array(GetAllocationCallbacks(), Block*, m_ListsCount);
-+ memset(m_FreeList, 0, m_ListsCount * sizeof(Block*));
-+}
-+
-+bool VmaBlockMetadata_TLSF::Validate() const
-+{
-+ VMA_VALIDATE(GetSumFreeSize() <= GetSize());
-+
-+ VkDeviceSize calculatedSize = m_NullBlock->size;
-+ VkDeviceSize calculatedFreeSize = m_NullBlock->size;
-+ size_t allocCount = 0;
-+ size_t freeCount = 0;
-+
-+ // Check integrity of free lists
-+ for (uint32_t list = 0; list < m_ListsCount; ++list)
-+ {
-+ Block* block = m_FreeList[list];
-+ if (block != VMA_NULL)
-+ {
-+ VMA_VALIDATE(block->IsFree());
-+ VMA_VALIDATE(block->PrevFree() == VMA_NULL);
-+ while (block->NextFree())
-+ {
-+ VMA_VALIDATE(block->NextFree()->IsFree());
-+ VMA_VALIDATE(block->NextFree()->PrevFree() == block);
-+ block = block->NextFree();
-+ }
-+ }
-+ }
-+
-+ VkDeviceSize nextOffset = m_NullBlock->offset;
-+ auto validateCtx = m_GranularityHandler.StartValidation(GetAllocationCallbacks(), IsVirtual());
-+
-+ VMA_VALIDATE(m_NullBlock->nextPhysical == VMA_NULL);
-+ if (m_NullBlock->prevPhysical)
-+ {
-+ VMA_VALIDATE(m_NullBlock->prevPhysical->nextPhysical == m_NullBlock);
-+ }
-+ // Check all blocks
-+ for (Block* prev = m_NullBlock->prevPhysical; prev != VMA_NULL; prev = prev->prevPhysical)
-+ {
-+ VMA_VALIDATE(prev->offset + prev->size == nextOffset);
-+ nextOffset = prev->offset;
-+ calculatedSize += prev->size;
-+
-+ uint32_t listIndex = GetListIndex(prev->size);
-+ if (prev->IsFree())
-+ {
-+ ++freeCount;
-+ // Check if free block belongs to free list
-+ Block* freeBlock = m_FreeList[listIndex];
-+ VMA_VALIDATE(freeBlock != VMA_NULL);
-+
-+ bool found = false;
-+ do
-+ {
-+ if (freeBlock == prev)
-+ found = true;
-+
-+ freeBlock = freeBlock->NextFree();
-+ } while (!found && freeBlock != VMA_NULL);
-+
-+ VMA_VALIDATE(found);
-+ calculatedFreeSize += prev->size;
-+ }
-+ else
-+ {
-+ ++allocCount;
-+ // Check if taken block is not on a free list
-+ Block* freeBlock = m_FreeList[listIndex];
-+ while (freeBlock)
-+ {
-+ VMA_VALIDATE(freeBlock != prev);
-+ freeBlock = freeBlock->NextFree();
-+ }
-+
-+ if (!IsVirtual())
-+ {
-+ VMA_VALIDATE(m_GranularityHandler.Validate(validateCtx, prev->offset, prev->size));
-+ }
-+ }
-+
-+ if (prev->prevPhysical)
-+ {
-+ VMA_VALIDATE(prev->prevPhysical->nextPhysical == prev);
-+ }
-+ }
-+
-+ if (!IsVirtual())
-+ {
-+ VMA_VALIDATE(m_GranularityHandler.FinishValidation(validateCtx));
-+ }
-+
-+ VMA_VALIDATE(nextOffset == 0);
-+ VMA_VALIDATE(calculatedSize == GetSize());
-+ VMA_VALIDATE(calculatedFreeSize == GetSumFreeSize());
-+ VMA_VALIDATE(allocCount == m_AllocCount);
-+ VMA_VALIDATE(freeCount == m_BlocksFreeCount);
-+
-+ return true;
-+}
-+
-+void VmaBlockMetadata_TLSF::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const
-+{
-+ inoutStats.statistics.blockCount++;
-+ inoutStats.statistics.blockBytes += GetSize();
-+ if (m_NullBlock->size > 0)
-+ VmaAddDetailedStatisticsUnusedRange(inoutStats, m_NullBlock->size);
-+
-+ for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
-+ {
-+ if (block->IsFree())
-+ VmaAddDetailedStatisticsUnusedRange(inoutStats, block->size);
-+ else
-+ VmaAddDetailedStatisticsAllocation(inoutStats, block->size);
-+ }
-+}
-+
-+void VmaBlockMetadata_TLSF::AddStatistics(VmaStatistics& inoutStats) const
-+{
-+ inoutStats.blockCount++;
-+ inoutStats.allocationCount += (uint32_t)m_AllocCount;
-+ inoutStats.blockBytes += GetSize();
-+ inoutStats.allocationBytes += GetSize() - GetSumFreeSize();
-+}
-+
-+#if VMA_STATS_STRING_ENABLED
-+void VmaBlockMetadata_TLSF::PrintDetailedMap(class VmaJsonWriter& json) const
-+{
-+ size_t blockCount = m_AllocCount + m_BlocksFreeCount;
-+ VmaStlAllocator<Block*> allocator(GetAllocationCallbacks());
-+ VmaVector<Block*, VmaStlAllocator<Block*>> blockList(blockCount, allocator);
-+
-+ size_t i = blockCount;
-+ for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
-+ {
-+ blockList[--i] = block;
-+ }
-+ VMA_ASSERT(i == 0);
-+
-+ VmaDetailedStatistics stats;
-+ VmaClearDetailedStatistics(stats);
-+ AddDetailedStatistics(stats);
-+
-+ PrintDetailedMap_Begin(json,
-+ stats.statistics.blockBytes - stats.statistics.allocationBytes,
-+ stats.statistics.allocationCount,
-+ stats.unusedRangeCount);
-+
-+ for (; i < blockCount; ++i)
-+ {
-+ Block* block = blockList[i];
-+ if (block->IsFree())
-+ PrintDetailedMap_UnusedRange(json, block->offset, block->size);
-+ else
-+ PrintDetailedMap_Allocation(json, block->offset, block->size, block->UserData());
-+ }
-+ if (m_NullBlock->size > 0)
-+ PrintDetailedMap_UnusedRange(json, m_NullBlock->offset, m_NullBlock->size);
-+
-+ PrintDetailedMap_End(json);
-+}
-+#endif
-+
-+bool VmaBlockMetadata_TLSF::CreateAllocationRequest(
-+ VkDeviceSize allocSize,
-+ VkDeviceSize allocAlignment,
-+ bool upperAddress,
-+ VmaSuballocationType allocType,
-+ uint32_t strategy,
-+ VmaAllocationRequest* pAllocationRequest)
-+{
-+ VMA_ASSERT(allocSize > 0 && "Cannot allocate empty block!");
-+ VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm.");
-+
-+ // For small granularity round up
-+ if (!IsVirtual())
-+ m_GranularityHandler.RoundupAllocRequest(allocType, allocSize, allocAlignment);
-+
-+ allocSize += GetDebugMargin();
-+ // Quick check for too small pool
-+ if (allocSize > GetSumFreeSize())
-+ return false;
-+
-+ // If no free blocks in pool then check only null block
-+ if (m_BlocksFreeCount == 0)
-+ return CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest);
-+
-+ // Round up to the next block
-+ VkDeviceSize sizeForNextList = allocSize;
-+ VkDeviceSize smallSizeStep = VkDeviceSize(SMALL_BUFFER_SIZE / (IsVirtual() ? 1 << SECOND_LEVEL_INDEX : 4));
-+ if (allocSize > SMALL_BUFFER_SIZE)
-+ {
-+ sizeForNextList += (1ULL << (VMA_BITSCAN_MSB(allocSize) - SECOND_LEVEL_INDEX));
-+ }
-+ else if (allocSize > SMALL_BUFFER_SIZE - smallSizeStep)
-+ sizeForNextList = SMALL_BUFFER_SIZE + 1;
-+ else
-+ sizeForNextList += smallSizeStep;
-+
-+ uint32_t nextListIndex = m_ListsCount;
-+ uint32_t prevListIndex = m_ListsCount;
-+ Block* nextListBlock = VMA_NULL;
-+ Block* prevListBlock = VMA_NULL;
-+
-+ // Check blocks according to strategies
-+ if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT)
-+ {
-+ // Quick check for larger block first
-+ nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);
-+ if (nextListBlock != VMA_NULL && CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
-+ return true;
-+
-+ // If not fitted then null block
-+ if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))
-+ return true;
-+
-+ // Null block failed, search larger bucket
-+ while (nextListBlock)
-+ {
-+ if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
-+ return true;
-+ nextListBlock = nextListBlock->NextFree();
-+ }
-+
-+ // Failed again, check best fit bucket
-+ prevListBlock = FindFreeBlock(allocSize, prevListIndex);
-+ while (prevListBlock)
-+ {
-+ if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
-+ return true;
-+ prevListBlock = prevListBlock->NextFree();
-+ }
-+ }
-+ else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT)
-+ {
-+ // Check best fit bucket
-+ prevListBlock = FindFreeBlock(allocSize, prevListIndex);
-+ while (prevListBlock)
-+ {
-+ if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
-+ return true;
-+ prevListBlock = prevListBlock->NextFree();
-+ }
-+
-+ // If failed check null block
-+ if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))
-+ return true;
-+
-+ // Check larger bucket
-+ nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);
-+ while (nextListBlock)
-+ {
-+ if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
-+ return true;
-+ nextListBlock = nextListBlock->NextFree();
-+ }
-+ }
-+ else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT )
-+ {
-+ // Perform search from the start
-+ VmaStlAllocator<Block*> allocator(GetAllocationCallbacks());
-+ VmaVector<Block*, VmaStlAllocator<Block*>> blockList(m_BlocksFreeCount, allocator);
-+
-+ size_t i = m_BlocksFreeCount;
-+ for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
-+ {
-+ if (block->IsFree() && block->size >= allocSize)
-+ blockList[--i] = block;
-+ }
-+
-+ for (; i < m_BlocksFreeCount; ++i)
-+ {
-+ Block& block = *blockList[i];
-+ if (CheckBlock(block, GetListIndex(block.size), allocSize, allocAlignment, allocType, pAllocationRequest))
-+ return true;
-+ }
-+
-+ // If failed check null block
-+ if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))
-+ return true;
-+
-+ // Whole range searched, no more memory
-+ return false;
-+ }
-+ else
-+ {
-+ // Check larger bucket
-+ nextListBlock = FindFreeBlock(sizeForNextList, nextListIndex);
-+ while (nextListBlock)
-+ {
-+ if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
-+ return true;
-+ nextListBlock = nextListBlock->NextFree();
-+ }
-+
-+ // If failed check null block
-+ if (CheckBlock(*m_NullBlock, m_ListsCount, allocSize, allocAlignment, allocType, pAllocationRequest))
-+ return true;
-+
-+ // Check best fit bucket
-+ prevListBlock = FindFreeBlock(allocSize, prevListIndex);
-+ while (prevListBlock)
-+ {
-+ if (CheckBlock(*prevListBlock, prevListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
-+ return true;
-+ prevListBlock = prevListBlock->NextFree();
-+ }
-+ }
-+
-+ // Worst case, full search has to be done
-+ while (++nextListIndex < m_ListsCount)
-+ {
-+ nextListBlock = m_FreeList[nextListIndex];
-+ while (nextListBlock)
-+ {
-+ if (CheckBlock(*nextListBlock, nextListIndex, allocSize, allocAlignment, allocType, pAllocationRequest))
-+ return true;
-+ nextListBlock = nextListBlock->NextFree();
-+ }
-+ }
-+
-+ // No more memory sadly
-+ return false;
-+}
-+
-+VkResult VmaBlockMetadata_TLSF::CheckCorruption(const void* pBlockData)
-+{
-+ for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
-+ {
-+ if (!block->IsFree())
-+ {
-+ if (!VmaValidateMagicValue(pBlockData, block->offset + block->size))
-+ {
-+ VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER VALIDATED ALLOCATION!");
-+ return VK_ERROR_UNKNOWN_COPY;
-+ }
-+ }
-+ }
-+
-+ return VK_SUCCESS;
-+}
-+
-+void VmaBlockMetadata_TLSF::Alloc(
-+ const VmaAllocationRequest& request,
-+ VmaSuballocationType type,
-+ void* userData)
-+{
-+ VMA_ASSERT(request.type == VmaAllocationRequestType::TLSF);
-+
-+ // Get block and pop it from the free list
-+ Block* currentBlock = (Block*)request.allocHandle;
-+ VkDeviceSize offset = request.algorithmData;
-+ VMA_ASSERT(currentBlock != VMA_NULL);
-+ VMA_ASSERT(currentBlock->offset <= offset);
-+
-+ if (currentBlock != m_NullBlock)
-+ RemoveFreeBlock(currentBlock);
-+
-+ VkDeviceSize debugMargin = GetDebugMargin();
-+ VkDeviceSize misssingAlignment = offset - currentBlock->offset;
-+
-+ // Append missing alignment to prev block or create new one
-+ if (misssingAlignment)
-+ {
-+ Block* prevBlock = currentBlock->prevPhysical;
-+ VMA_ASSERT(prevBlock != VMA_NULL && "There should be no missing alignment at offset 0!");
-+
-+ if (prevBlock->IsFree() && prevBlock->size != debugMargin)
-+ {
-+ uint32_t oldList = GetListIndex(prevBlock->size);
-+ prevBlock->size += misssingAlignment;
-+ // Check if new size crosses list bucket
-+ if (oldList != GetListIndex(prevBlock->size))
-+ {
-+ prevBlock->size -= misssingAlignment;
-+ RemoveFreeBlock(prevBlock);
-+ prevBlock->size += misssingAlignment;
-+ InsertFreeBlock(prevBlock);
-+ }
-+ else
-+ m_BlocksFreeSize += misssingAlignment;
-+ }
-+ else
-+ {
-+ Block* newBlock = m_BlockAllocator.Alloc();
-+ currentBlock->prevPhysical = newBlock;
-+ prevBlock->nextPhysical = newBlock;
-+ newBlock->prevPhysical = prevBlock;
-+ newBlock->nextPhysical = currentBlock;
-+ newBlock->size = misssingAlignment;
-+ newBlock->offset = currentBlock->offset;
-+ newBlock->MarkTaken();
-+
-+ InsertFreeBlock(newBlock);
-+ }
-+
-+ currentBlock->size -= misssingAlignment;
-+ currentBlock->offset += misssingAlignment;
-+ }
-+
-+ VkDeviceSize size = request.size + debugMargin;
-+ if (currentBlock->size == size)
-+ {
-+ if (currentBlock == m_NullBlock)
-+ {
-+ // Setup new null block
-+ m_NullBlock = m_BlockAllocator.Alloc();
-+ m_NullBlock->size = 0;
-+ m_NullBlock->offset = currentBlock->offset + size;
-+ m_NullBlock->prevPhysical = currentBlock;
-+ m_NullBlock->nextPhysical = VMA_NULL;
-+ m_NullBlock->MarkFree();
-+ m_NullBlock->PrevFree() = VMA_NULL;
-+ m_NullBlock->NextFree() = VMA_NULL;
-+ currentBlock->nextPhysical = m_NullBlock;
-+ currentBlock->MarkTaken();
-+ }
-+ }
-+ else
-+ {
-+ VMA_ASSERT(currentBlock->size > size && "Proper block already found, shouldn't find smaller one!");
-+
-+ // Create new free block
-+ Block* newBlock = m_BlockAllocator.Alloc();
-+ newBlock->size = currentBlock->size - size;
-+ newBlock->offset = currentBlock->offset + size;
-+ newBlock->prevPhysical = currentBlock;
-+ newBlock->nextPhysical = currentBlock->nextPhysical;
-+ currentBlock->nextPhysical = newBlock;
-+ currentBlock->size = size;
-+
-+ if (currentBlock == m_NullBlock)
-+ {
-+ m_NullBlock = newBlock;
-+ m_NullBlock->MarkFree();
-+ m_NullBlock->NextFree() = VMA_NULL;
-+ m_NullBlock->PrevFree() = VMA_NULL;
-+ currentBlock->MarkTaken();
-+ }
-+ else
-+ {
-+ newBlock->nextPhysical->prevPhysical = newBlock;
-+ newBlock->MarkTaken();
-+ InsertFreeBlock(newBlock);
-+ }
-+ }
-+ currentBlock->UserData() = userData;
-+
-+ if (debugMargin > 0)
-+ {
-+ currentBlock->size -= debugMargin;
-+ Block* newBlock = m_BlockAllocator.Alloc();
-+ newBlock->size = debugMargin;
-+ newBlock->offset = currentBlock->offset + currentBlock->size;
-+ newBlock->prevPhysical = currentBlock;
-+ newBlock->nextPhysical = currentBlock->nextPhysical;
-+ newBlock->MarkTaken();
-+ currentBlock->nextPhysical->prevPhysical = newBlock;
-+ currentBlock->nextPhysical = newBlock;
-+ InsertFreeBlock(newBlock);
-+ }
-+
-+ if (!IsVirtual())
-+ m_GranularityHandler.AllocPages((uint8_t)(uintptr_t)request.customData,
-+ currentBlock->offset, currentBlock->size);
-+ ++m_AllocCount;
-+}
-+
-+void VmaBlockMetadata_TLSF::Free(VmaAllocHandle allocHandle)
-+{
-+ Block* block = (Block*)allocHandle;
-+ Block* next = block->nextPhysical;
-+ VMA_ASSERT(!block->IsFree() && "Block is already free!");
-+
-+ if (!IsVirtual())
-+ m_GranularityHandler.FreePages(block->offset, block->size);
-+ --m_AllocCount;
-+
-+ VkDeviceSize debugMargin = GetDebugMargin();
-+ if (debugMargin > 0)
-+ {
-+ RemoveFreeBlock(next);
-+ MergeBlock(next, block);
-+ block = next;
-+ next = next->nextPhysical;
-+ }
-+
-+ // Try merging
-+ Block* prev = block->prevPhysical;
-+ if (prev != VMA_NULL && prev->IsFree() && prev->size != debugMargin)
-+ {
-+ RemoveFreeBlock(prev);
-+ MergeBlock(block, prev);
-+ }
-+
-+ if (!next->IsFree())
-+ InsertFreeBlock(block);
-+ else if (next == m_NullBlock)
-+ MergeBlock(m_NullBlock, block);
-+ else
-+ {
-+ RemoveFreeBlock(next);
-+ MergeBlock(next, block);
-+ InsertFreeBlock(next);
-+ }
-+}
-+
-+void VmaBlockMetadata_TLSF::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo)
-+{
-+ Block* block = (Block*)allocHandle;
-+ VMA_ASSERT(!block->IsFree() && "Cannot get allocation info for free block!");
-+ outInfo.offset = block->offset;
-+ outInfo.size = block->size;
-+ outInfo.pUserData = block->UserData();
-+}
-+
-+void* VmaBlockMetadata_TLSF::GetAllocationUserData(VmaAllocHandle allocHandle) const
-+{
-+ Block* block = (Block*)allocHandle;
-+ VMA_ASSERT(!block->IsFree() && "Cannot get user data for free block!");
-+ return block->UserData();
-+}
-+
-+VmaAllocHandle VmaBlockMetadata_TLSF::GetAllocationListBegin() const
-+{
-+ if (m_AllocCount == 0)
-+ return VK_NULL_HANDLE;
-+
-+ for (Block* block = m_NullBlock->prevPhysical; block; block = block->prevPhysical)
-+ {
-+ if (!block->IsFree())
-+ return (VmaAllocHandle)block;
-+ }
-+ VMA_ASSERT(false && "If m_AllocCount > 0 then should find any allocation!");
-+ return VK_NULL_HANDLE;
-+}
-+
-+VmaAllocHandle VmaBlockMetadata_TLSF::GetNextAllocation(VmaAllocHandle prevAlloc) const
-+{
-+ Block* startBlock = (Block*)prevAlloc;
-+ VMA_ASSERT(!startBlock->IsFree() && "Incorrect block!");
-+
-+ for (Block* block = startBlock->prevPhysical; block; block = block->prevPhysical)
-+ {
-+ if (!block->IsFree())
-+ return (VmaAllocHandle)block;
-+ }
-+ return VK_NULL_HANDLE;
-+}
-+
-+VkDeviceSize VmaBlockMetadata_TLSF::GetNextFreeRegionSize(VmaAllocHandle alloc) const
-+{
-+ Block* block = (Block*)alloc;
-+ VMA_ASSERT(!block->IsFree() && "Incorrect block!");
-+
-+ if (block->prevPhysical)
-+ return block->prevPhysical->IsFree() ? block->prevPhysical->size : 0;
-+ return 0;
-+}
-+
-+void VmaBlockMetadata_TLSF::Clear()
-+{
-+ m_AllocCount = 0;
-+ m_BlocksFreeCount = 0;
-+ m_BlocksFreeSize = 0;
-+ m_IsFreeBitmap = 0;
-+ m_NullBlock->offset = 0;
-+ m_NullBlock->size = GetSize();
-+ Block* block = m_NullBlock->prevPhysical;
-+ m_NullBlock->prevPhysical = VMA_NULL;
-+ while (block)
-+ {
-+ Block* prev = block->prevPhysical;
-+ m_BlockAllocator.Free(block);
-+ block = prev;
-+ }
-+ memset(m_FreeList, 0, m_ListsCount * sizeof(Block*));
-+ memset(m_InnerIsFreeBitmap, 0, m_MemoryClasses * sizeof(uint32_t));
-+ m_GranularityHandler.Clear();
-+}
-+
-+void VmaBlockMetadata_TLSF::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData)
-+{
-+ Block* block = (Block*)allocHandle;
-+ VMA_ASSERT(!block->IsFree() && "Trying to set user data for not allocated block!");
-+ block->UserData() = userData;
-+}
-+
-+void VmaBlockMetadata_TLSF::DebugLogAllAllocations() const
-+{
-+ for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical)
-+ if (!block->IsFree())
-+ DebugLogAllocation(block->offset, block->size, block->UserData());
-+}
-+
-+uint8_t VmaBlockMetadata_TLSF::SizeToMemoryClass(VkDeviceSize size) const
-+{
-+ if (size > SMALL_BUFFER_SIZE)
-+ return uint8_t(VMA_BITSCAN_MSB(size) - MEMORY_CLASS_SHIFT);
-+ return 0;
-+}
-+
-+uint16_t VmaBlockMetadata_TLSF::SizeToSecondIndex(VkDeviceSize size, uint8_t memoryClass) const
-+{
-+ if (memoryClass == 0)
-+ {
-+ if (IsVirtual())
-+ return static_cast<uint16_t>((size - 1) / 8);
-+ else
-+ return static_cast<uint16_t>((size - 1) / 64);
-+ }
-+ return static_cast<uint16_t>((size >> (memoryClass + MEMORY_CLASS_SHIFT - SECOND_LEVEL_INDEX)) ^ (1U << SECOND_LEVEL_INDEX));
-+}
-+
-+uint32_t VmaBlockMetadata_TLSF::GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const
-+{
-+ if (memoryClass == 0)
-+ return secondIndex;
-+
-+ const uint32_t index = static_cast<uint32_t>(memoryClass - 1) * (1 << SECOND_LEVEL_INDEX) + secondIndex;
-+ if (IsVirtual())
-+ return index + (1 << SECOND_LEVEL_INDEX);
-+ else
-+ return index + 4;
-+}
-+
-+uint32_t VmaBlockMetadata_TLSF::GetListIndex(VkDeviceSize size) const
-+{
-+ uint8_t memoryClass = SizeToMemoryClass(size);
-+ return GetListIndex(memoryClass, SizeToSecondIndex(size, memoryClass));
-+}
-+
-+void VmaBlockMetadata_TLSF::RemoveFreeBlock(Block* block)
-+{
-+ VMA_ASSERT(block != m_NullBlock);
-+ VMA_ASSERT(block->IsFree());
-+
-+ if (block->NextFree() != VMA_NULL)
-+ block->NextFree()->PrevFree() = block->PrevFree();
-+ if (block->PrevFree() != VMA_NULL)
-+ block->PrevFree()->NextFree() = block->NextFree();
-+ else
-+ {
-+ uint8_t memClass = SizeToMemoryClass(block->size);
-+ uint16_t secondIndex = SizeToSecondIndex(block->size, memClass);
-+ uint32_t index = GetListIndex(memClass, secondIndex);
-+ VMA_ASSERT(m_FreeList[index] == block);
-+ m_FreeList[index] = block->NextFree();
-+ if (block->NextFree() == VMA_NULL)
-+ {
-+ m_InnerIsFreeBitmap[memClass] &= ~(1U << secondIndex);
-+ if (m_InnerIsFreeBitmap[memClass] == 0)
-+ m_IsFreeBitmap &= ~(1UL << memClass);
-+ }
-+ }
-+ block->MarkTaken();
-+ block->UserData() = VMA_NULL;
-+ --m_BlocksFreeCount;
-+ m_BlocksFreeSize -= block->size;
-+}
-+
-+void VmaBlockMetadata_TLSF::InsertFreeBlock(Block* block)
-+{
-+ VMA_ASSERT(block != m_NullBlock);
-+ VMA_ASSERT(!block->IsFree() && "Cannot insert block twice!");
-+
-+ uint8_t memClass = SizeToMemoryClass(block->size);
-+ uint16_t secondIndex = SizeToSecondIndex(block->size, memClass);
-+ uint32_t index = GetListIndex(memClass, secondIndex);
-+ VMA_ASSERT(index < m_ListsCount);
-+ block->PrevFree() = VMA_NULL;
-+ block->NextFree() = m_FreeList[index];
-+ m_FreeList[index] = block;
-+ if (block->NextFree() != VMA_NULL)
-+ block->NextFree()->PrevFree() = block;
-+ else
-+ {
-+ m_InnerIsFreeBitmap[memClass] |= 1U << secondIndex;
-+ m_IsFreeBitmap |= 1UL << memClass;
-+ }
-+ ++m_BlocksFreeCount;
-+ m_BlocksFreeSize += block->size;
-+}
-+
-+void VmaBlockMetadata_TLSF::MergeBlock(Block* block, Block* prev)
-+{
-+ VMA_ASSERT(block->prevPhysical == prev && "Cannot merge separate physical regions!");
-+ VMA_ASSERT(!prev->IsFree() && "Cannot merge block that belongs to free list!");
-+
-+ block->offset = prev->offset;
-+ block->size += prev->size;
-+ block->prevPhysical = prev->prevPhysical;
-+ if (block->prevPhysical)
-+ block->prevPhysical->nextPhysical = block;
-+ m_BlockAllocator.Free(prev);
-+}
-+
-+VmaBlockMetadata_TLSF::Block* VmaBlockMetadata_TLSF::FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const
-+{
-+ uint8_t memoryClass = SizeToMemoryClass(size);
-+ uint32_t innerFreeMap = m_InnerIsFreeBitmap[memoryClass] & (~0U << SizeToSecondIndex(size, memoryClass));
-+ if (!innerFreeMap)
-+ {
-+ // Check higher levels for available blocks
-+ uint32_t freeMap = m_IsFreeBitmap & (~0UL << (memoryClass + 1));
-+ if (!freeMap)
-+ return VMA_NULL; // No more memory available
-+
-+ // Find lowest free region
-+ memoryClass = VMA_BITSCAN_LSB(freeMap);
-+ innerFreeMap = m_InnerIsFreeBitmap[memoryClass];
-+ VMA_ASSERT(innerFreeMap != 0);
-+ }
-+ // Find lowest free subregion
-+ listIndex = GetListIndex(memoryClass, VMA_BITSCAN_LSB(innerFreeMap));
-+ VMA_ASSERT(m_FreeList[listIndex]);
-+ return m_FreeList[listIndex];
-+}
-+
-+bool VmaBlockMetadata_TLSF::CheckBlock(
-+ Block& block,
-+ uint32_t listIndex,
-+ VkDeviceSize allocSize,
-+ VkDeviceSize allocAlignment,
-+ VmaSuballocationType allocType,
-+ VmaAllocationRequest* pAllocationRequest)
-+{
-+ VMA_ASSERT(block.IsFree() && "Block is already taken!");
-+
-+ VkDeviceSize alignedOffset = VmaAlignUp(block.offset, allocAlignment);
-+ if (block.size < allocSize + alignedOffset - block.offset)
-+ return false;
-+
-+ // Check for granularity conflicts
-+ if (!IsVirtual() &&
-+ m_GranularityHandler.CheckConflictAndAlignUp(alignedOffset, allocSize, block.offset, block.size, allocType))
-+ return false;
-+
-+ // Alloc successful
-+ pAllocationRequest->type = VmaAllocationRequestType::TLSF;
-+ pAllocationRequest->allocHandle = (VmaAllocHandle)&block;
-+ pAllocationRequest->size = allocSize - GetDebugMargin();
-+ pAllocationRequest->customData = (void*)allocType;
-+ pAllocationRequest->algorithmData = alignedOffset;
-+
-+ // Place block at the start of list if it's normal block
-+ if (listIndex != m_ListsCount && block.PrevFree())
-+ {
-+ block.PrevFree()->NextFree() = block.NextFree();
-+ if (block.NextFree())
-+ block.NextFree()->PrevFree() = block.PrevFree();
-+ block.PrevFree() = VMA_NULL;
-+ block.NextFree() = m_FreeList[listIndex];
-+ m_FreeList[listIndex] = &block;
-+ if (block.NextFree())
-+ block.NextFree()->PrevFree() = &block;
-+ }
-+
-+ return true;
-+}
-+#endif // _VMA_BLOCK_METADATA_TLSF_FUNCTIONS
-+#endif // _VMA_BLOCK_METADATA_TLSF
-+
-+#ifndef _VMA_BLOCK_VECTOR
-+/*
-+Sequence of VmaDeviceMemoryBlock. Represents memory blocks allocated for a specific
-+Vulkan memory type.
-+
-+Synchronized internally with a mutex.
-+*/
-+class VmaBlockVector
-+{
-+ friend struct VmaDefragmentationContext_T;
-+ VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockVector)
-+public:
-+ VmaBlockVector(
-+ VmaAllocator hAllocator,
-+ VmaPool hParentPool,
-+ uint32_t memoryTypeIndex,
-+ VkDeviceSize preferredBlockSize,
-+ size_t minBlockCount,
-+ size_t maxBlockCount,
-+ VkDeviceSize bufferImageGranularity,
-+ bool explicitBlockSize,
-+ uint32_t algorithm,
-+ float priority,
-+ VkDeviceSize minAllocationAlignment,
-+ void* pMemoryAllocateNext);
-+ ~VmaBlockVector();
-+
-+ VmaAllocator GetAllocator() const { return m_hAllocator; }
-+ VmaPool GetParentPool() const { return m_hParentPool; }
-+ bool IsCustomPool() const { return m_hParentPool != VMA_NULL; }
-+ uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; }
-+ VkDeviceSize GetPreferredBlockSize() const { return m_PreferredBlockSize; }
-+ VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; }
-+ uint32_t GetAlgorithm() const { return m_Algorithm; }
-+ bool HasExplicitBlockSize() const { return m_ExplicitBlockSize; }
-+ float GetPriority() const { return m_Priority; }
-+ const void* GetAllocationNextPtr() const { return m_pMemoryAllocateNext; }
-+ // To be used only while the m_Mutex is locked. Used during defragmentation.
-+ size_t GetBlockCount() const { return m_Blocks.size(); }
-+ // To be used only while the m_Mutex is locked. Used during defragmentation.
-+ VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; }
-+ VMA_RW_MUTEX &GetMutex() { return m_Mutex; }
-+
-+ VkResult CreateMinBlocks();
-+ void AddStatistics(VmaStatistics& inoutStats);
-+ void AddDetailedStatistics(VmaDetailedStatistics& inoutStats);
-+ bool IsEmpty();
-+ bool IsCorruptionDetectionEnabled() const;
-+
-+ VkResult Allocate(
-+ VkDeviceSize size,
-+ VkDeviceSize alignment,
-+ const VmaAllocationCreateInfo& createInfo,
-+ VmaSuballocationType suballocType,
-+ size_t allocationCount,
-+ VmaAllocation* pAllocations);
-+
-+ void Free(const VmaAllocation hAllocation);
-+
-+#if VMA_STATS_STRING_ENABLED
-+ void PrintDetailedMap(class VmaJsonWriter& json);
-+#endif
-+
-+ VkResult CheckCorruption();
-+
-+private:
-+ const VmaAllocator m_hAllocator;
-+ const VmaPool m_hParentPool;
-+ const uint32_t m_MemoryTypeIndex;
-+ const VkDeviceSize m_PreferredBlockSize;
-+ const size_t m_MinBlockCount;
-+ const size_t m_MaxBlockCount;
-+ const VkDeviceSize m_BufferImageGranularity;
-+ const bool m_ExplicitBlockSize;
-+ const uint32_t m_Algorithm;
-+ const float m_Priority;
-+ const VkDeviceSize m_MinAllocationAlignment;
-+
-+ void* const m_pMemoryAllocateNext;
-+ VMA_RW_MUTEX m_Mutex;
-+ // Incrementally sorted by sumFreeSize, ascending.
-+ VmaVector<VmaDeviceMemoryBlock*, VmaStlAllocator<VmaDeviceMemoryBlock*>> m_Blocks;
-+ uint32_t m_NextBlockId;
-+ bool m_IncrementalSort = true;
-+
-+ void SetIncrementalSort(bool val) { m_IncrementalSort = val; }
-+
-+ VkDeviceSize CalcMaxBlockSize() const;
-+ // Finds and removes given block from vector.
-+ void Remove(VmaDeviceMemoryBlock* pBlock);
-+ // Performs single step in sorting m_Blocks. They may not be fully sorted
-+ // after this call.
-+ void IncrementallySortBlocks();
-+ void SortByFreeSize();
-+
-+ VkResult AllocatePage(
-+ VkDeviceSize size,
-+ VkDeviceSize alignment,
-+ const VmaAllocationCreateInfo& createInfo,
-+ VmaSuballocationType suballocType,
-+ VmaAllocation* pAllocation);
-+
-+ VkResult AllocateFromBlock(
-+ VmaDeviceMemoryBlock* pBlock,
-+ VkDeviceSize size,
-+ VkDeviceSize alignment,
-+ VmaAllocationCreateFlags allocFlags,
-+ void* pUserData,
-+ VmaSuballocationType suballocType,
-+ uint32_t strategy,
-+ VmaAllocation* pAllocation);
-+
-+ VkResult CommitAllocationRequest(
-+ VmaAllocationRequest& allocRequest,
-+ VmaDeviceMemoryBlock* pBlock,
-+ VkDeviceSize alignment,
-+ VmaAllocationCreateFlags allocFlags,
-+ void* pUserData,
-+ VmaSuballocationType suballocType,
-+ VmaAllocation* pAllocation);
-+
-+ VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex);
-+ bool HasEmptyBlock();
-+};
-+#endif // _VMA_BLOCK_VECTOR
-+
-+#ifndef _VMA_DEFRAGMENTATION_CONTEXT
-+struct VmaDefragmentationContext_T
-+{
-+ VMA_CLASS_NO_COPY_NO_MOVE(VmaDefragmentationContext_T)
-+public:
-+ VmaDefragmentationContext_T(
-+ VmaAllocator hAllocator,
-+ const VmaDefragmentationInfo& info);
-+ ~VmaDefragmentationContext_T();
-+
-+ void GetStats(VmaDefragmentationStats& outStats) { outStats = m_GlobalStats; }
-+
-+ VkResult DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo);
-+ VkResult DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo);
-+
-+private:
-+ // Max number of allocations to ignore due to size constraints before ending single pass
-+ static const uint8_t MAX_ALLOCS_TO_IGNORE = 16;
-+ enum class CounterStatus { Pass, Ignore, End };
-+
-+ struct FragmentedBlock
-+ {
-+ uint32_t data;
-+ VmaDeviceMemoryBlock* block;
-+ };
-+ struct StateBalanced
-+ {
-+ VkDeviceSize avgFreeSize = 0;
-+ VkDeviceSize avgAllocSize = UINT64_MAX;
-+ };
-+ struct StateExtensive
-+ {
-+ enum class Operation : uint8_t
-+ {
-+ FindFreeBlockBuffer, FindFreeBlockTexture, FindFreeBlockAll,
-+ MoveBuffers, MoveTextures, MoveAll,
-+ Cleanup, Done
-+ };
-+
-+ Operation operation = Operation::FindFreeBlockTexture;
-+ size_t firstFreeBlock = SIZE_MAX;
-+ };
-+ struct MoveAllocationData
-+ {
-+ VkDeviceSize size;
-+ VkDeviceSize alignment;
-+ VmaSuballocationType type;
-+ VmaAllocationCreateFlags flags;
-+ VmaDefragmentationMove move = {};
-+ };
-+
-+ const VkDeviceSize m_MaxPassBytes;
-+ const uint32_t m_MaxPassAllocations;
-+ const PFN_vmaCheckDefragmentationBreakFunction m_BreakCallback;
-+ void* m_BreakCallbackUserData;
-+
-+ VmaStlAllocator<VmaDefragmentationMove> m_MoveAllocator;
-+ VmaVector<VmaDefragmentationMove, VmaStlAllocator<VmaDefragmentationMove>> m_Moves;
-+
-+ uint8_t m_IgnoredAllocs = 0;
-+ uint32_t m_Algorithm;
-+ uint32_t m_BlockVectorCount;
-+ VmaBlockVector* m_PoolBlockVector;
-+ VmaBlockVector** m_pBlockVectors;
-+ size_t m_ImmovableBlockCount = 0;
-+ VmaDefragmentationStats m_GlobalStats = { 0 };
-+ VmaDefragmentationStats m_PassStats = { 0 };
-+ void* m_AlgorithmState = VMA_NULL;
-+
-+ static MoveAllocationData GetMoveData(VmaAllocHandle handle, VmaBlockMetadata* metadata);
-+ CounterStatus CheckCounters(VkDeviceSize bytes);
-+ bool IncrementCounters(VkDeviceSize bytes);
-+ bool ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block);
-+ bool AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector);
-+
-+ bool ComputeDefragmentation(VmaBlockVector& vector, size_t index);
-+ bool ComputeDefragmentation_Fast(VmaBlockVector& vector);
-+ bool ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update);
-+ bool ComputeDefragmentation_Full(VmaBlockVector& vector);
-+ bool ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index);
-+
-+ void UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state);
-+ bool MoveDataToFreeBlocks(VmaSuballocationType currentType,
-+ VmaBlockVector& vector, size_t firstFreeBlock,
-+ bool& texturePresent, bool& bufferPresent, bool& otherPresent);
-+};
-+#endif // _VMA_DEFRAGMENTATION_CONTEXT
-+
-+#ifndef _VMA_POOL_T
-+struct VmaPool_T
-+{
-+ friend struct VmaPoolListItemTraits;
-+ VMA_CLASS_NO_COPY_NO_MOVE(VmaPool_T)
-+public:
-+ VmaBlockVector m_BlockVector;
-+ VmaDedicatedAllocationList m_DedicatedAllocations;
-+
-+ VmaPool_T(
-+ VmaAllocator hAllocator,
-+ const VmaPoolCreateInfo& createInfo,
-+ VkDeviceSize preferredBlockSize);
-+ ~VmaPool_T();
-+
-+ uint32_t GetId() const { return m_Id; }
-+ void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; }
-+
-+ const char* GetName() const { return m_Name; }
-+ void SetName(const char* pName);
-+
-+#if VMA_STATS_STRING_ENABLED
-+ //void PrintDetailedMap(class VmaStringBuilder& sb);
-+#endif
-+
-+private:
-+ uint32_t m_Id;
-+ char* m_Name;
-+ VmaPool_T* m_PrevPool = VMA_NULL;
-+ VmaPool_T* m_NextPool = VMA_NULL;
-+};
-+
-+struct VmaPoolListItemTraits
-+{
-+ typedef VmaPool_T ItemType;
-+
-+ static ItemType* GetPrev(const ItemType* item) { return item->m_PrevPool; }
-+ static ItemType* GetNext(const ItemType* item) { return item->m_NextPool; }
-+ static ItemType*& AccessPrev(ItemType* item) { return item->m_PrevPool; }
-+ static ItemType*& AccessNext(ItemType* item) { return item->m_NextPool; }
-+};
-+#endif // _VMA_POOL_T
-+
-+#ifndef _VMA_CURRENT_BUDGET_DATA
-+struct VmaCurrentBudgetData
-+{
-+ VMA_CLASS_NO_COPY_NO_MOVE(VmaCurrentBudgetData)
-+public:
-+
-+ VMA_ATOMIC_UINT32 m_BlockCount[VK_MAX_MEMORY_HEAPS];
-+ VMA_ATOMIC_UINT32 m_AllocationCount[VK_MAX_MEMORY_HEAPS];
-+ VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS];
-+ VMA_ATOMIC_UINT64 m_AllocationBytes[VK_MAX_MEMORY_HEAPS];
-+
-+#if VMA_MEMORY_BUDGET
-+ VMA_ATOMIC_UINT32 m_OperationsSinceBudgetFetch;
-+ VMA_RW_MUTEX m_BudgetMutex;
-+ uint64_t m_VulkanUsage[VK_MAX_MEMORY_HEAPS];
-+ uint64_t m_VulkanBudget[VK_MAX_MEMORY_HEAPS];
-+ uint64_t m_BlockBytesAtBudgetFetch[VK_MAX_MEMORY_HEAPS];
-+#endif // VMA_MEMORY_BUDGET
-+
-+ VmaCurrentBudgetData();
-+
-+ void AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize);
-+ void RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize);
-+};
-+
-+#ifndef _VMA_CURRENT_BUDGET_DATA_FUNCTIONS
-+VmaCurrentBudgetData::VmaCurrentBudgetData()
-+{
-+ for (uint32_t heapIndex = 0; heapIndex < VK_MAX_MEMORY_HEAPS; ++heapIndex)
-+ {
-+ m_BlockCount[heapIndex] = 0;
-+ m_AllocationCount[heapIndex] = 0;
-+ m_BlockBytes[heapIndex] = 0;
-+ m_AllocationBytes[heapIndex] = 0;
-+#if VMA_MEMORY_BUDGET
-+ m_VulkanUsage[heapIndex] = 0;
-+ m_VulkanBudget[heapIndex] = 0;
-+ m_BlockBytesAtBudgetFetch[heapIndex] = 0;
-+#endif
-+ }
-+
-+#if VMA_MEMORY_BUDGET
-+ m_OperationsSinceBudgetFetch = 0;
-+#endif
-+}
-+
-+void VmaCurrentBudgetData::AddAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
-+{
-+ m_AllocationBytes[heapIndex] += allocationSize;
-+ ++m_AllocationCount[heapIndex];
-+#if VMA_MEMORY_BUDGET
-+ ++m_OperationsSinceBudgetFetch;
-+#endif
-+}
-+
-+void VmaCurrentBudgetData::RemoveAllocation(uint32_t heapIndex, VkDeviceSize allocationSize)
-+{
-+ VMA_ASSERT(m_AllocationBytes[heapIndex] >= allocationSize);
-+ m_AllocationBytes[heapIndex] -= allocationSize;
-+ VMA_ASSERT(m_AllocationCount[heapIndex] > 0);
-+ --m_AllocationCount[heapIndex];
-+#if VMA_MEMORY_BUDGET
-+ ++m_OperationsSinceBudgetFetch;
-+#endif
-+}
-+#endif // _VMA_CURRENT_BUDGET_DATA_FUNCTIONS
-+#endif // _VMA_CURRENT_BUDGET_DATA
-+
-+#ifndef _VMA_ALLOCATION_OBJECT_ALLOCATOR
-+/*
-+Thread-safe wrapper over VmaPoolAllocator free list, for allocation of VmaAllocation_T objects.
-+*/
-+class VmaAllocationObjectAllocator
-+{
-+ VMA_CLASS_NO_COPY_NO_MOVE(VmaAllocationObjectAllocator)
-+public:
-+ VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks)
-+ : m_Allocator(pAllocationCallbacks, 1024) {}
-+
-+ template<typename... Types> VmaAllocation Allocate(Types&&... args);
-+ void Free(VmaAllocation hAlloc);
-+
-+private:
-+ VMA_MUTEX m_Mutex;
-+ VmaPoolAllocator<VmaAllocation_T> m_Allocator;
-+};
-+
-+template<typename... Types>
-+VmaAllocation VmaAllocationObjectAllocator::Allocate(Types&&... args)
-+{
-+ VmaMutexLock mutexLock(m_Mutex);
-+ return m_Allocator.Alloc<Types...>(std::forward<Types>(args)...);
-+}
-+
-+void VmaAllocationObjectAllocator::Free(VmaAllocation hAlloc)
-+{
-+ VmaMutexLock mutexLock(m_Mutex);
-+ m_Allocator.Free(hAlloc);
-+}
-+#endif // _VMA_ALLOCATION_OBJECT_ALLOCATOR
-+
-+#ifndef _VMA_VIRTUAL_BLOCK_T
-+struct VmaVirtualBlock_T
-+{
-+ VMA_CLASS_NO_COPY_NO_MOVE(VmaVirtualBlock_T)
-+public:
-+ const bool m_AllocationCallbacksSpecified;
-+ const VkAllocationCallbacks m_AllocationCallbacks;
-+
-+ VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo);
-+ ~VmaVirtualBlock_T();
-+
-+ VkResult Init() { return VK_SUCCESS; }
-+ bool IsEmpty() const { return m_Metadata->IsEmpty(); }
-+ void Free(VmaVirtualAllocation allocation) { m_Metadata->Free((VmaAllocHandle)allocation); }
-+ void SetAllocationUserData(VmaVirtualAllocation allocation, void* userData) { m_Metadata->SetAllocationUserData((VmaAllocHandle)allocation, userData); }
-+ void Clear() { m_Metadata->Clear(); }
-+
-+ const VkAllocationCallbacks* GetAllocationCallbacks() const;
-+ void GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo);
-+ VkResult Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation,
-+ VkDeviceSize* outOffset);
-+ void GetStatistics(VmaStatistics& outStats) const;
-+ void CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const;
-+#if VMA_STATS_STRING_ENABLED
-+ void BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const;
-+#endif
-+
-+private:
-+ VmaBlockMetadata* m_Metadata;
-+};
-+
-+#ifndef _VMA_VIRTUAL_BLOCK_T_FUNCTIONS
-+VmaVirtualBlock_T::VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo)
-+ : m_AllocationCallbacksSpecified(createInfo.pAllocationCallbacks != VMA_NULL),
-+ m_AllocationCallbacks(createInfo.pAllocationCallbacks != VMA_NULL ? *createInfo.pAllocationCallbacks : VmaEmptyAllocationCallbacks)
-+{
-+ const uint32_t algorithm = createInfo.flags & VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK;
-+ switch (algorithm)
-+ {
-+ case 0:
-+ m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_TLSF)(VK_NULL_HANDLE, 1, true);
-+ break;
-+ case VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT:
-+ m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_Linear)(VK_NULL_HANDLE, 1, true);
-+ break;
-+ default:
-+ VMA_ASSERT(0);
-+ m_Metadata = vma_new(GetAllocationCallbacks(), VmaBlockMetadata_TLSF)(VK_NULL_HANDLE, 1, true);
-+ }
-+
-+ m_Metadata->Init(createInfo.size);
-+}
-+
-+VmaVirtualBlock_T::~VmaVirtualBlock_T()
-+{
-+ // Define macro VMA_DEBUG_LOG_FORMAT to receive the list of the unfreed allocations
-+ if (!m_Metadata->IsEmpty())
-+ m_Metadata->DebugLogAllAllocations();
-+ // This is the most important assert in the entire library.
-+ // Hitting it means you have some memory leak - unreleased virtual allocations.
-+ VMA_ASSERT(m_Metadata->IsEmpty() && "Some virtual allocations were not freed before destruction of this virtual block!");
-+
-+ vma_delete(GetAllocationCallbacks(), m_Metadata);
-+}
-+
-+const VkAllocationCallbacks* VmaVirtualBlock_T::GetAllocationCallbacks() const
-+{
-+ return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL;
-+}
-+
-+void VmaVirtualBlock_T::GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo)
-+{
-+ m_Metadata->GetAllocationInfo((VmaAllocHandle)allocation, outInfo);
-+}
-+
-+VkResult VmaVirtualBlock_T::Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation,
-+ VkDeviceSize* outOffset)
-+{
-+ VmaAllocationRequest request = {};
-+ if (m_Metadata->CreateAllocationRequest(
-+ createInfo.size, // allocSize
-+ VMA_MAX(createInfo.alignment, (VkDeviceSize)1), // allocAlignment
-+ (createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0, // upperAddress
-+ VMA_SUBALLOCATION_TYPE_UNKNOWN, // allocType - unimportant
-+ createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK, // strategy
-+ &request))
-+ {
-+ m_Metadata->Alloc(request,
-+ VMA_SUBALLOCATION_TYPE_UNKNOWN, // type - unimportant
-+ createInfo.pUserData);
-+ outAllocation = (VmaVirtualAllocation)request.allocHandle;
-+ if(outOffset)
-+ *outOffset = m_Metadata->GetAllocationOffset(request.allocHandle);
-+ return VK_SUCCESS;
-+ }
-+ outAllocation = (VmaVirtualAllocation)VK_NULL_HANDLE;
-+ if (outOffset)
-+ *outOffset = UINT64_MAX;
-+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-+}
-+
-+void VmaVirtualBlock_T::GetStatistics(VmaStatistics& outStats) const
-+{
-+ VmaClearStatistics(outStats);
-+ m_Metadata->AddStatistics(outStats);
-+}
-+
-+void VmaVirtualBlock_T::CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const
-+{
-+ VmaClearDetailedStatistics(outStats);
-+ m_Metadata->AddDetailedStatistics(outStats);
-+}
-+
-+#if VMA_STATS_STRING_ENABLED
-+void VmaVirtualBlock_T::BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const
-+{
-+ VmaJsonWriter json(GetAllocationCallbacks(), sb);
-+ json.BeginObject();
-+
-+ VmaDetailedStatistics stats;
-+ CalculateDetailedStatistics(stats);
-+
-+ json.WriteString("Stats");
-+ VmaPrintDetailedStatistics(json, stats);
-+
-+ if (detailedMap)
-+ {
-+ json.WriteString("Details");
-+ json.BeginObject();
-+ m_Metadata->PrintDetailedMap(json);
-+ json.EndObject();
-+ }
-+
-+ json.EndObject();
-+}
-+#endif // VMA_STATS_STRING_ENABLED
-+#endif // _VMA_VIRTUAL_BLOCK_T_FUNCTIONS
-+#endif // _VMA_VIRTUAL_BLOCK_T
-+
-+
-+// Main allocator object.
-+struct VmaAllocator_T
-+{
-+ VMA_CLASS_NO_COPY_NO_MOVE(VmaAllocator_T)
-+public:
-+ bool m_UseMutex;
-+ uint32_t m_VulkanApiVersion;
-+ bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
-+ bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0).
-+ bool m_UseExtMemoryBudget;
-+ bool m_UseAmdDeviceCoherentMemory;
-+ bool m_UseKhrBufferDeviceAddress;
-+ bool m_UseExtMemoryPriority;
-+ VkDevice m_hDevice;
-+ VkInstance m_hInstance;
-+ bool m_AllocationCallbacksSpecified;
-+ VkAllocationCallbacks m_AllocationCallbacks;
-+ VmaDeviceMemoryCallbacks m_DeviceMemoryCallbacks;
-+ VmaAllocationObjectAllocator m_AllocationObjectAllocator;
-+
-+ // Each bit (1 << i) is set if HeapSizeLimit is enabled for that heap, so cannot allocate more than the heap size.
-+ uint32_t m_HeapSizeLimitMask;
-+
-+ VkPhysicalDeviceProperties m_PhysicalDeviceProperties;
-+ VkPhysicalDeviceMemoryProperties m_MemProps;
-+
-+ // Default pools.
-+ VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
-+ VmaDedicatedAllocationList m_DedicatedAllocations[VK_MAX_MEMORY_TYPES];
-+
-+ VmaCurrentBudgetData m_Budget;
-+ VMA_ATOMIC_UINT32 m_DeviceMemoryCount; // Total number of VkDeviceMemory objects.
-+
-+ VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo);
-+ VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo);
-+ ~VmaAllocator_T();
-+
-+ const VkAllocationCallbacks* GetAllocationCallbacks() const
-+ {
-+ return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL;
-+ }
-+ const VmaVulkanFunctions& GetVulkanFunctions() const
-+ {
-+ return m_VulkanFunctions;
-+ }
-+
-+ VkPhysicalDevice GetPhysicalDevice() const { return m_PhysicalDevice; }
-+
-+ VkDeviceSize GetBufferImageGranularity() const
-+ {
-+ return VMA_MAX(
-+ static_cast<VkDeviceSize>(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY),
-+ m_PhysicalDeviceProperties.limits.bufferImageGranularity);
-+ }
-+
-+ uint32_t GetMemoryHeapCount() const { return m_MemProps.memoryHeapCount; }
-+ uint32_t GetMemoryTypeCount() const { return m_MemProps.memoryTypeCount; }
-+
-+ uint32_t MemoryTypeIndexToHeapIndex(uint32_t memTypeIndex) const
-+ {
-+ VMA_ASSERT(memTypeIndex < m_MemProps.memoryTypeCount);
-+ return m_MemProps.memoryTypes[memTypeIndex].heapIndex;
-+ }
-+ // True when specific memory type is HOST_VISIBLE but not HOST_COHERENT.
-+ bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const
-+ {
-+ return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ==
-+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
-+ }
-+ // Minimum alignment for all allocations in specific memory type.
-+ VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const
-+ {
-+ return IsMemoryTypeNonCoherent(memTypeIndex) ?
-+ VMA_MAX((VkDeviceSize)VMA_MIN_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) :
-+ (VkDeviceSize)VMA_MIN_ALIGNMENT;
-+ }
-+
-+ bool IsIntegratedGpu() const
-+ {
-+ return m_PhysicalDeviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU;
-+ }
-+
-+ uint32_t GetGlobalMemoryTypeBits() const { return m_GlobalMemoryTypeBits; }
-+
-+ void GetBufferMemoryRequirements(
-+ VkBuffer hBuffer,
-+ VkMemoryRequirements& memReq,
-+ bool& requiresDedicatedAllocation,
-+ bool& prefersDedicatedAllocation) const;
-+ void GetImageMemoryRequirements(
-+ VkImage hImage,
-+ VkMemoryRequirements& memReq,
-+ bool& requiresDedicatedAllocation,
-+ bool& prefersDedicatedAllocation) const;
-+ VkResult FindMemoryTypeIndex(
-+ uint32_t memoryTypeBits,
-+ const VmaAllocationCreateInfo* pAllocationCreateInfo,
-+ VkFlags bufImgUsage, // VkBufferCreateInfo::usage or VkImageCreateInfo::usage. UINT32_MAX if unknown.
-+ uint32_t* pMemoryTypeIndex) const;
-+
-+ // Main allocation function.
-+ VkResult AllocateMemory(
-+ const VkMemoryRequirements& vkMemReq,
-+ bool requiresDedicatedAllocation,
-+ bool prefersDedicatedAllocation,
-+ VkBuffer dedicatedBuffer,
-+ VkImage dedicatedImage,
-+ VkFlags dedicatedBufferImageUsage, // UINT32_MAX if unknown.
-+ const VmaAllocationCreateInfo& createInfo,
-+ VmaSuballocationType suballocType,
-+ size_t allocationCount,
-+ VmaAllocation* pAllocations);
-+
-+ // Main deallocation function.
-+ void FreeMemory(
-+ size_t allocationCount,
-+ const VmaAllocation* pAllocations);
-+
-+ void CalculateStatistics(VmaTotalStatistics* pStats);
-+
-+ void GetHeapBudgets(
-+ VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount);
-+
-+#if VMA_STATS_STRING_ENABLED
-+ void PrintDetailedMap(class VmaJsonWriter& json);
-+#endif
-+
-+ void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo);
-+
-+ VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool);
-+ void DestroyPool(VmaPool pool);
-+ void GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats);
-+ void CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats);
-+
-+ void SetCurrentFrameIndex(uint32_t frameIndex);
-+ uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); }
-+
-+ VkResult CheckPoolCorruption(VmaPool hPool);
-+ VkResult CheckCorruption(uint32_t memoryTypeBits);
-+
-+ // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping.
-+ VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory);
-+ // Call to Vulkan function vkFreeMemory with accompanying bookkeeping.
-+ void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory);
-+ // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR.
-+ VkResult BindVulkanBuffer(
-+ VkDeviceMemory memory,
-+ VkDeviceSize memoryOffset,
-+ VkBuffer buffer,
-+ const void* pNext);
-+ // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR.
-+ VkResult BindVulkanImage(
-+ VkDeviceMemory memory,
-+ VkDeviceSize memoryOffset,
-+ VkImage image,
-+ const void* pNext);
-+
-+ VkResult Map(VmaAllocation hAllocation, void** ppData);
-+ void Unmap(VmaAllocation hAllocation);
-+
-+ VkResult BindBufferMemory(
-+ VmaAllocation hAllocation,
-+ VkDeviceSize allocationLocalOffset,
-+ VkBuffer hBuffer,
-+ const void* pNext);
-+ VkResult BindImageMemory(
-+ VmaAllocation hAllocation,
-+ VkDeviceSize allocationLocalOffset,
-+ VkImage hImage,
-+ const void* pNext);
-+
-+ VkResult FlushOrInvalidateAllocation(
-+ VmaAllocation hAllocation,
-+ VkDeviceSize offset, VkDeviceSize size,
-+ VMA_CACHE_OPERATION op);
-+ VkResult FlushOrInvalidateAllocations(
-+ uint32_t allocationCount,
-+ const VmaAllocation* allocations,
-+ const VkDeviceSize* offsets, const VkDeviceSize* sizes,
-+ VMA_CACHE_OPERATION op);
-+
-+ void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern);
-+
-+ /*
-+ Returns bit mask of memory types that can support defragmentation on GPU as
-+ they support creation of required buffer for copy operations.
-+ */
-+ uint32_t GetGpuDefragmentationMemoryTypeBits();
-+
-+#if VMA_EXTERNAL_MEMORY
-+ VkExternalMemoryHandleTypeFlagsKHR GetExternalMemoryHandleTypeFlags(uint32_t memTypeIndex) const
-+ {
-+ return m_TypeExternalMemoryHandleTypes[memTypeIndex];
-+ }
-+#endif // #if VMA_EXTERNAL_MEMORY
-+
-+private:
-+ VkDeviceSize m_PreferredLargeHeapBlockSize;
-+
-+ VkPhysicalDevice m_PhysicalDevice;
-+ VMA_ATOMIC_UINT32 m_CurrentFrameIndex;
-+ VMA_ATOMIC_UINT32 m_GpuDefragmentationMemoryTypeBits; // UINT32_MAX means uninitialized.
-+#if VMA_EXTERNAL_MEMORY
-+ VkExternalMemoryHandleTypeFlagsKHR m_TypeExternalMemoryHandleTypes[VK_MAX_MEMORY_TYPES];
-+#endif // #if VMA_EXTERNAL_MEMORY
-+
-+ VMA_RW_MUTEX m_PoolsMutex;
-+ typedef VmaIntrusiveLinkedList<VmaPoolListItemTraits> PoolList;
-+ // Protected by m_PoolsMutex.
-+ PoolList m_Pools;
-+ uint32_t m_NextPoolId;
-+
-+ VmaVulkanFunctions m_VulkanFunctions;
-+
-+ // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types.
-+ uint32_t m_GlobalMemoryTypeBits;
-+
-+ void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions);
-+
-+#if VMA_STATIC_VULKAN_FUNCTIONS == 1
-+ void ImportVulkanFunctions_Static();
-+#endif
-+
-+ void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions);
-+
-+#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
-+ void ImportVulkanFunctions_Dynamic();
-+#endif
-+
-+ void ValidateVulkanFunctions();
-+
-+ VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
-+
-+ VkResult AllocateMemoryOfType(
-+ VmaPool pool,
-+ VkDeviceSize size,
-+ VkDeviceSize alignment,
-+ bool dedicatedPreferred,
-+ VkBuffer dedicatedBuffer,
-+ VkImage dedicatedImage,
-+ VkFlags dedicatedBufferImageUsage,
-+ const VmaAllocationCreateInfo& createInfo,
-+ uint32_t memTypeIndex,
-+ VmaSuballocationType suballocType,
-+ VmaDedicatedAllocationList& dedicatedAllocations,
-+ VmaBlockVector& blockVector,
-+ size_t allocationCount,
-+ VmaAllocation* pAllocations);
-+
-+ // Helper function only to be used inside AllocateDedicatedMemory.
-+ VkResult AllocateDedicatedMemoryPage(
-+ VmaPool pool,
-+ VkDeviceSize size,
-+ VmaSuballocationType suballocType,
-+ uint32_t memTypeIndex,
-+ const VkMemoryAllocateInfo& allocInfo,
-+ bool map,
-+ bool isUserDataString,
-+ bool isMappingAllowed,
-+ void* pUserData,
-+ VmaAllocation* pAllocation);
-+
-+ // Allocates and registers new VkDeviceMemory specifically for dedicated allocations.
-+ VkResult AllocateDedicatedMemory(
-+ VmaPool pool,
-+ VkDeviceSize size,
-+ VmaSuballocationType suballocType,
-+ VmaDedicatedAllocationList& dedicatedAllocations,
-+ uint32_t memTypeIndex,
-+ bool map,
-+ bool isUserDataString,
-+ bool isMappingAllowed,
-+ bool canAliasMemory,
-+ void* pUserData,
-+ float priority,
-+ VkBuffer dedicatedBuffer,
-+ VkImage dedicatedImage,
-+ VkFlags dedicatedBufferImageUsage,
-+ size_t allocationCount,
-+ VmaAllocation* pAllocations,
-+ const void* pNextChain = nullptr);
-+
-+ void FreeDedicatedMemory(const VmaAllocation allocation);
-+
-+ VkResult CalcMemTypeParams(
-+ VmaAllocationCreateInfo& outCreateInfo,
-+ uint32_t memTypeIndex,
-+ VkDeviceSize size,
-+ size_t allocationCount);
-+ VkResult CalcAllocationParams(
-+ VmaAllocationCreateInfo& outCreateInfo,
-+ bool dedicatedRequired,
-+ bool dedicatedPreferred);
-+
-+ /*
-+ Calculates and returns bit mask of memory types that can support defragmentation
-+ on GPU as they support creation of required buffer for copy operations.
-+ */
-+ uint32_t CalculateGpuDefragmentationMemoryTypeBits() const;
-+ uint32_t CalculateGlobalMemoryTypeBits() const;
-+
-+ bool GetFlushOrInvalidateRange(
-+ VmaAllocation allocation,
-+ VkDeviceSize offset, VkDeviceSize size,
-+ VkMappedMemoryRange& outRange) const;
-+
-+#if VMA_MEMORY_BUDGET
-+ void UpdateVulkanBudget();
-+#endif // #if VMA_MEMORY_BUDGET
-+};
-+
-+
-+#ifndef _VMA_MEMORY_FUNCTIONS
-+static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment)
-+{
-+ return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment);
-+}
-+
-+static void VmaFree(VmaAllocator hAllocator, void* ptr)
-+{
-+ VmaFree(&hAllocator->m_AllocationCallbacks, ptr);
-+}
-+
-+template<typename T>
-+static T* VmaAllocate(VmaAllocator hAllocator)
-+{
-+ return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T));
-+}
-+
-+template<typename T>
-+static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count)
-+{
-+ return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T));
-+}
-+
-+template<typename T>
-+static void vma_delete(VmaAllocator hAllocator, T* ptr)
-+{
-+ if(ptr != VMA_NULL)
-+ {
-+ ptr->~T();
-+ VmaFree(hAllocator, ptr);
-+ }
-+}
-+
-+template<typename T>
-+static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count)
-+{
-+ if(ptr != VMA_NULL)
-+ {
-+ for(size_t i = count; i--; )
-+ ptr[i].~T();
-+ VmaFree(hAllocator, ptr);
-+ }
-+}
-+#endif // _VMA_MEMORY_FUNCTIONS
-+
-+#ifndef _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS
-+VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator)
-+ : m_pMetadata(VMA_NULL),
-+ m_MemoryTypeIndex(UINT32_MAX),
-+ m_Id(0),
-+ m_hMemory(VK_NULL_HANDLE),
-+ m_MapCount(0),
-+ m_pMappedData(VMA_NULL) {}
-+
-+VmaDeviceMemoryBlock::~VmaDeviceMemoryBlock()
-+{
-+ VMA_ASSERT(m_MapCount == 0 && "VkDeviceMemory block is being destroyed while it is still mapped.");
-+ VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
-+}
-+
-+void VmaDeviceMemoryBlock::Init(
-+ VmaAllocator hAllocator,
-+ VmaPool hParentPool,
-+ uint32_t newMemoryTypeIndex,
-+ VkDeviceMemory newMemory,
-+ VkDeviceSize newSize,
-+ uint32_t id,
-+ uint32_t algorithm,
-+ VkDeviceSize bufferImageGranularity)
-+{
-+ VMA_ASSERT(m_hMemory == VK_NULL_HANDLE);
-+
-+ m_hParentPool = hParentPool;
-+ m_MemoryTypeIndex = newMemoryTypeIndex;
-+ m_Id = id;
-+ m_hMemory = newMemory;
-+
-+ switch (algorithm)
-+ {
-+ case 0:
-+ m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(),
-+ bufferImageGranularity, false); // isVirtual
-+ break;
-+ case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT:
-+ m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator->GetAllocationCallbacks(),
-+ bufferImageGranularity, false); // isVirtual
-+ break;
-+ default:
-+ VMA_ASSERT(0);
-+ m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(),
-+ bufferImageGranularity, false); // isVirtual
-+ }
-+ m_pMetadata->Init(newSize);
-+}
-+
-+void VmaDeviceMemoryBlock::Destroy(VmaAllocator allocator)
-+{
-+ // Define macro VMA_DEBUG_LOG_FORMAT to receive the list of the unfreed allocations
-+ if (!m_pMetadata->IsEmpty())
-+ m_pMetadata->DebugLogAllAllocations();
-+ // This is the most important assert in the entire library.
-+ // Hitting it means you have some memory leak - unreleased VmaAllocation objects.
-+ VMA_ASSERT(m_pMetadata->IsEmpty() && "Some allocations were not freed before destruction of this memory block!");
-+
-+ VMA_ASSERT(m_hMemory != VK_NULL_HANDLE);
-+ allocator->FreeVulkanMemory(m_MemoryTypeIndex, m_pMetadata->GetSize(), m_hMemory);
-+ m_hMemory = VK_NULL_HANDLE;
-+
-+ vma_delete(allocator, m_pMetadata);
-+ m_pMetadata = VMA_NULL;
-+}
-+
-+void VmaDeviceMemoryBlock::PostAlloc(VmaAllocator hAllocator)
-+{
-+ VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
-+ m_MappingHysteresis.PostAlloc();
-+}
-+
-+void VmaDeviceMemoryBlock::PostFree(VmaAllocator hAllocator)
-+{
-+ VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
-+ if(m_MappingHysteresis.PostFree())
-+ {
-+ VMA_ASSERT(m_MappingHysteresis.GetExtraMapping() == 0);
-+ if (m_MapCount == 0)
-+ {
-+ m_pMappedData = VMA_NULL;
-+ (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
-+ }
-+ }
-+}
-+
-+bool VmaDeviceMemoryBlock::Validate() const
-+{
-+ VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) &&
-+ (m_pMetadata->GetSize() != 0));
-+
-+ return m_pMetadata->Validate();
-+}
-+
-+VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator)
-+{
-+ void* pData = nullptr;
-+ VkResult res = Map(hAllocator, 1, &pData);
-+ if (res != VK_SUCCESS)
-+ {
-+ return res;
-+ }
-+
-+ res = m_pMetadata->CheckCorruption(pData);
-+
-+ Unmap(hAllocator, 1);
-+
-+ return res;
-+}
-+
-+VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData)
-+{
-+ if (count == 0)
-+ {
-+ return VK_SUCCESS;
-+ }
-+
-+ VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
-+ const uint32_t oldTotalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping();
-+ m_MappingHysteresis.PostMap();
-+ if (oldTotalMapCount != 0)
-+ {
-+ m_MapCount += count;
-+ VMA_ASSERT(m_pMappedData != VMA_NULL);
-+ if (ppData != VMA_NULL)
-+ {
-+ *ppData = m_pMappedData;
-+ }
-+ return VK_SUCCESS;
-+ }
-+ else
-+ {
-+ VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
-+ hAllocator->m_hDevice,
-+ m_hMemory,
-+ 0, // offset
-+ VK_WHOLE_SIZE,
-+ 0, // flags
-+ &m_pMappedData);
-+ if (result == VK_SUCCESS)
-+ {
-+ if (ppData != VMA_NULL)
-+ {
-+ *ppData = m_pMappedData;
-+ }
-+ m_MapCount = count;
-+ }
-+ return result;
-+ }
-+}
-+
-+void VmaDeviceMemoryBlock::Unmap(VmaAllocator hAllocator, uint32_t count)
-+{
-+ if (count == 0)
-+ {
-+ return;
-+ }
-+
-+ VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
-+ if (m_MapCount >= count)
-+ {
-+ m_MapCount -= count;
-+ const uint32_t totalMapCount = m_MapCount + m_MappingHysteresis.GetExtraMapping();
-+ if (totalMapCount == 0)
-+ {
-+ m_pMappedData = VMA_NULL;
-+ (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(hAllocator->m_hDevice, m_hMemory);
-+ }
-+ m_MappingHysteresis.PostUnmap();
-+ }
-+ else
-+ {
-+ VMA_ASSERT(0 && "VkDeviceMemory block is being unmapped while it was not previously mapped.");
-+ }
-+}
-+
-+VkResult VmaDeviceMemoryBlock::WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
-+{
-+ VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
-+
-+ void* pData;
-+ VkResult res = Map(hAllocator, 1, &pData);
-+ if (res != VK_SUCCESS)
-+ {
-+ return res;
-+ }
-+
-+ VmaWriteMagicValue(pData, allocOffset + allocSize);
-+
-+ Unmap(hAllocator, 1);
-+ return VK_SUCCESS;
-+}
-+
-+VkResult VmaDeviceMemoryBlock::ValidateMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize)
-+{
-+ VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION);
-+
-+ void* pData;
-+ VkResult res = Map(hAllocator, 1, &pData);
-+ if (res != VK_SUCCESS)
-+ {
-+ return res;
-+ }
-+
-+ if (!VmaValidateMagicValue(pData, allocOffset + allocSize))
-+ {
-+ VMA_ASSERT(0 && "MEMORY CORRUPTION DETECTED AFTER FREED ALLOCATION!");
-+ }
-+
-+ Unmap(hAllocator, 1);
-+ return VK_SUCCESS;
-+}
-+
-+VkResult VmaDeviceMemoryBlock::BindBufferMemory(
-+ const VmaAllocator hAllocator,
-+ const VmaAllocation hAllocation,
-+ VkDeviceSize allocationLocalOffset,
-+ VkBuffer hBuffer,
-+ const void* pNext)
-+{
-+ VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
-+ hAllocation->GetBlock() == this);
-+ VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
-+ "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
-+ const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
-+ // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
-+ VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
-+ return hAllocator->BindVulkanBuffer(m_hMemory, memoryOffset, hBuffer, pNext);
-+}
-+
-+VkResult VmaDeviceMemoryBlock::BindImageMemory(
-+ const VmaAllocator hAllocator,
-+ const VmaAllocation hAllocation,
-+ VkDeviceSize allocationLocalOffset,
-+ VkImage hImage,
-+ const void* pNext)
-+{
-+ VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK &&
-+ hAllocation->GetBlock() == this);
-+ VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() &&
-+ "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?");
-+ const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset;
-+ // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads.
-+ VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex);
-+ return hAllocator->BindVulkanImage(m_hMemory, memoryOffset, hImage, pNext);
-+}
-+#endif // _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS
-+
-+#ifndef _VMA_ALLOCATION_T_FUNCTIONS
-+VmaAllocation_T::VmaAllocation_T(bool mappingAllowed)
-+ : m_Alignment{ 1 },
-+ m_Size{ 0 },
-+ m_pUserData{ VMA_NULL },
-+ m_pName{ VMA_NULL },
-+ m_MemoryTypeIndex{ 0 },
-+ m_Type{ (uint8_t)ALLOCATION_TYPE_NONE },
-+ m_SuballocationType{ (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN },
-+ m_MapCount{ 0 },
-+ m_Flags{ 0 }
-+{
-+ if(mappingAllowed)
-+ m_Flags |= (uint8_t)FLAG_MAPPING_ALLOWED;
-+
-+#if VMA_STATS_STRING_ENABLED
-+ m_BufferImageUsage = 0;
-+#endif
-+}
-+
-+VmaAllocation_T::~VmaAllocation_T()
-+{
-+ VMA_ASSERT(m_MapCount == 0 && "Allocation was not unmapped before destruction.");
-+
-+ // Check if owned string was freed.
-+ VMA_ASSERT(m_pName == VMA_NULL);
-+}
-+
-+void VmaAllocation_T::InitBlockAllocation(
-+ VmaDeviceMemoryBlock* block,
-+ VmaAllocHandle allocHandle,
-+ VkDeviceSize alignment,
-+ VkDeviceSize size,
-+ uint32_t memoryTypeIndex,
-+ VmaSuballocationType suballocationType,
-+ bool mapped)
-+{
-+ VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
-+ VMA_ASSERT(block != VMA_NULL);
-+ m_Type = (uint8_t)ALLOCATION_TYPE_BLOCK;
-+ m_Alignment = alignment;
-+ m_Size = size;
-+ m_MemoryTypeIndex = memoryTypeIndex;
-+ if(mapped)
-+ {
-+ VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.");
-+ m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP;
-+ }
-+ m_SuballocationType = (uint8_t)suballocationType;
-+ m_BlockAllocation.m_Block = block;
-+ m_BlockAllocation.m_AllocHandle = allocHandle;
-+}
-+
-+void VmaAllocation_T::InitDedicatedAllocation(
-+ VmaPool hParentPool,
-+ uint32_t memoryTypeIndex,
-+ VkDeviceMemory hMemory,
-+ VmaSuballocationType suballocationType,
-+ void* pMappedData,
-+ VkDeviceSize size)
-+{
-+ VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE);
-+ VMA_ASSERT(hMemory != VK_NULL_HANDLE);
-+ m_Type = (uint8_t)ALLOCATION_TYPE_DEDICATED;
-+ m_Alignment = 0;
-+ m_Size = size;
-+ m_MemoryTypeIndex = memoryTypeIndex;
-+ m_SuballocationType = (uint8_t)suballocationType;
-+ if(pMappedData != VMA_NULL)
-+ {
-+ VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.");
-+ m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP;
-+ }
-+ m_DedicatedAllocation.m_hParentPool = hParentPool;
-+ m_DedicatedAllocation.m_hMemory = hMemory;
-+ m_DedicatedAllocation.m_pMappedData = pMappedData;
-+ m_DedicatedAllocation.m_Prev = VMA_NULL;
-+ m_DedicatedAllocation.m_Next = VMA_NULL;
-+}
-+
-+void VmaAllocation_T::SetName(VmaAllocator hAllocator, const char* pName)
-+{
-+ VMA_ASSERT(pName == VMA_NULL || pName != m_pName);
-+
-+ FreeName(hAllocator);
-+
-+ if (pName != VMA_NULL)
-+ m_pName = VmaCreateStringCopy(hAllocator->GetAllocationCallbacks(), pName);
-+}
-+
-+uint8_t VmaAllocation_T::SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation)
-+{
-+ VMA_ASSERT(allocation != VMA_NULL);
-+ VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK);
-+ VMA_ASSERT(allocation->m_Type == ALLOCATION_TYPE_BLOCK);
-+
-+ if (m_MapCount != 0)
-+ m_BlockAllocation.m_Block->Unmap(hAllocator, m_MapCount);
-+
-+ m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, allocation);
-+ VMA_SWAP(m_BlockAllocation, allocation->m_BlockAllocation);
-+ m_BlockAllocation.m_Block->m_pMetadata->SetAllocationUserData(m_BlockAllocation.m_AllocHandle, this);
-+
-+#if VMA_STATS_STRING_ENABLED
-+ VMA_SWAP(m_BufferImageUsage, allocation->m_BufferImageUsage);
-+#endif
-+ return m_MapCount;
-+}
-+
-+VmaAllocHandle VmaAllocation_T::GetAllocHandle() const
-+{
-+ switch (m_Type)
-+ {
-+ case ALLOCATION_TYPE_BLOCK:
-+ return m_BlockAllocation.m_AllocHandle;
-+ case ALLOCATION_TYPE_DEDICATED:
-+ return VK_NULL_HANDLE;
-+ default:
-+ VMA_ASSERT(0);
-+ return VK_NULL_HANDLE;
-+ }
-+}
-+
-+VkDeviceSize VmaAllocation_T::GetOffset() const
-+{
-+ switch (m_Type)
-+ {
-+ case ALLOCATION_TYPE_BLOCK:
-+ return m_BlockAllocation.m_Block->m_pMetadata->GetAllocationOffset(m_BlockAllocation.m_AllocHandle);
-+ case ALLOCATION_TYPE_DEDICATED:
-+ return 0;
-+ default:
-+ VMA_ASSERT(0);
-+ return 0;
-+ }
-+}
-+
-+VmaPool VmaAllocation_T::GetParentPool() const
-+{
-+ switch (m_Type)
-+ {
-+ case ALLOCATION_TYPE_BLOCK:
-+ return m_BlockAllocation.m_Block->GetParentPool();
-+ case ALLOCATION_TYPE_DEDICATED:
-+ return m_DedicatedAllocation.m_hParentPool;
-+ default:
-+ VMA_ASSERT(0);
-+ return VK_NULL_HANDLE;
-+ }
-+}
-+
-+VkDeviceMemory VmaAllocation_T::GetMemory() const
-+{
-+ switch (m_Type)
-+ {
-+ case ALLOCATION_TYPE_BLOCK:
-+ return m_BlockAllocation.m_Block->GetDeviceMemory();
-+ case ALLOCATION_TYPE_DEDICATED:
-+ return m_DedicatedAllocation.m_hMemory;
-+ default:
-+ VMA_ASSERT(0);
-+ return VK_NULL_HANDLE;
-+ }
-+}
-+
-+void* VmaAllocation_T::GetMappedData() const
-+{
-+ switch (m_Type)
-+ {
-+ case ALLOCATION_TYPE_BLOCK:
-+ if (m_MapCount != 0 || IsPersistentMap())
-+ {
-+ void* pBlockData = m_BlockAllocation.m_Block->GetMappedData();
-+ VMA_ASSERT(pBlockData != VMA_NULL);
-+ return (char*)pBlockData + GetOffset();
-+ }
-+ else
-+ {
-+ return VMA_NULL;
-+ }
-+ break;
-+ case ALLOCATION_TYPE_DEDICATED:
-+ VMA_ASSERT((m_DedicatedAllocation.m_pMappedData != VMA_NULL) == (m_MapCount != 0 || IsPersistentMap()));
-+ return m_DedicatedAllocation.m_pMappedData;
-+ default:
-+ VMA_ASSERT(0);
-+ return VMA_NULL;
-+ }
-+}
-+
-+void VmaAllocation_T::BlockAllocMap()
-+{
-+ VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
-+ VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.");
-+
-+ if (m_MapCount < 0xFF)
-+ {
-+ ++m_MapCount;
-+ }
-+ else
-+ {
-+ VMA_ASSERT(0 && "Allocation mapped too many times simultaneously.");
-+ }
-+}
-+
-+void VmaAllocation_T::BlockAllocUnmap()
-+{
-+ VMA_ASSERT(GetType() == ALLOCATION_TYPE_BLOCK);
-+
-+ if (m_MapCount > 0)
-+ {
-+ --m_MapCount;
-+ }
-+ else
-+ {
-+ VMA_ASSERT(0 && "Unmapping allocation not previously mapped.");
-+ }
-+}
-+
-+VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData)
-+{
-+ VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
-+ VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it.");
-+
-+ if (m_MapCount != 0 || IsPersistentMap())
-+ {
-+ if (m_MapCount < 0xFF)
-+ {
-+ VMA_ASSERT(m_DedicatedAllocation.m_pMappedData != VMA_NULL);
-+ *ppData = m_DedicatedAllocation.m_pMappedData;
-+ ++m_MapCount;
-+ return VK_SUCCESS;
-+ }
-+ else
-+ {
-+ VMA_ASSERT(0 && "Dedicated allocation mapped too many times simultaneously.");
-+ return VK_ERROR_MEMORY_MAP_FAILED;
-+ }
-+ }
-+ else
-+ {
-+ VkResult result = (*hAllocator->GetVulkanFunctions().vkMapMemory)(
-+ hAllocator->m_hDevice,
-+ m_DedicatedAllocation.m_hMemory,
-+ 0, // offset
-+ VK_WHOLE_SIZE,
-+ 0, // flags
-+ ppData);
-+ if (result == VK_SUCCESS)
-+ {
-+ m_DedicatedAllocation.m_pMappedData = *ppData;
-+ m_MapCount = 1;
-+ }
-+ return result;
-+ }
-+}
-+
-+void VmaAllocation_T::DedicatedAllocUnmap(VmaAllocator hAllocator)
-+{
-+ VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED);
-+
-+ if (m_MapCount > 0)
-+ {
-+ --m_MapCount;
-+ if (m_MapCount == 0 && !IsPersistentMap())
-+ {
-+ m_DedicatedAllocation.m_pMappedData = VMA_NULL;
-+ (*hAllocator->GetVulkanFunctions().vkUnmapMemory)(
-+ hAllocator->m_hDevice,
-+ m_DedicatedAllocation.m_hMemory);
-+ }
-+ }
-+ else
-+ {
-+ VMA_ASSERT(0 && "Unmapping dedicated allocation not previously mapped.");
-+ }
-+}
-+
-+#if VMA_STATS_STRING_ENABLED
-+void VmaAllocation_T::InitBufferImageUsage(uint32_t bufferImageUsage)
-+{
-+ VMA_ASSERT(m_BufferImageUsage == 0);
-+ m_BufferImageUsage = bufferImageUsage;
-+}
-+
-+void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const
-+{
-+ json.WriteString("Type");
-+ json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]);
-+
-+ json.WriteString("Size");
-+ json.WriteNumber(m_Size);
-+ json.WriteString("Usage");
-+ json.WriteNumber(m_BufferImageUsage);
-+
-+ if (m_pUserData != VMA_NULL)
-+ {
-+ json.WriteString("CustomData");
-+ json.BeginString();
-+ json.ContinueString_Pointer(m_pUserData);
-+ json.EndString();
-+ }
-+ if (m_pName != VMA_NULL)
-+ {
-+ json.WriteString("Name");
-+ json.WriteString(m_pName);
-+ }
-+}
-+#endif // VMA_STATS_STRING_ENABLED
-+
-+void VmaAllocation_T::FreeName(VmaAllocator hAllocator)
-+{
-+ if(m_pName)
-+ {
-+ VmaFreeString(hAllocator->GetAllocationCallbacks(), m_pName);
-+ m_pName = VMA_NULL;
-+ }
-+}
-+#endif // _VMA_ALLOCATION_T_FUNCTIONS
-+
-+#ifndef _VMA_BLOCK_VECTOR_FUNCTIONS
-+VmaBlockVector::VmaBlockVector(
-+ VmaAllocator hAllocator,
-+ VmaPool hParentPool,
-+ uint32_t memoryTypeIndex,
-+ VkDeviceSize preferredBlockSize,
-+ size_t minBlockCount,
-+ size_t maxBlockCount,
-+ VkDeviceSize bufferImageGranularity,
-+ bool explicitBlockSize,
-+ uint32_t algorithm,
-+ float priority,
-+ VkDeviceSize minAllocationAlignment,
-+ void* pMemoryAllocateNext)
-+ : m_hAllocator(hAllocator),
-+ m_hParentPool(hParentPool),
-+ m_MemoryTypeIndex(memoryTypeIndex),
-+ m_PreferredBlockSize(preferredBlockSize),
-+ m_MinBlockCount(minBlockCount),
-+ m_MaxBlockCount(maxBlockCount),
-+ m_BufferImageGranularity(bufferImageGranularity),
-+ m_ExplicitBlockSize(explicitBlockSize),
-+ m_Algorithm(algorithm),
-+ m_Priority(priority),
-+ m_MinAllocationAlignment(minAllocationAlignment),
-+ m_pMemoryAllocateNext(pMemoryAllocateNext),
-+ m_Blocks(VmaStlAllocator<VmaDeviceMemoryBlock*>(hAllocator->GetAllocationCallbacks())),
-+ m_NextBlockId(0) {}
-+
-+VmaBlockVector::~VmaBlockVector()
-+{
-+ for (size_t i = m_Blocks.size(); i--; )
-+ {
-+ m_Blocks[i]->Destroy(m_hAllocator);
-+ vma_delete(m_hAllocator, m_Blocks[i]);
-+ }
-+}
-+
-+VkResult VmaBlockVector::CreateMinBlocks()
-+{
-+ for (size_t i = 0; i < m_MinBlockCount; ++i)
-+ {
-+ VkResult res = CreateBlock(m_PreferredBlockSize, VMA_NULL);
-+ if (res != VK_SUCCESS)
-+ {
-+ return res;
-+ }
-+ }
-+ return VK_SUCCESS;
-+}
-+
-+void VmaBlockVector::AddStatistics(VmaStatistics& inoutStats)
-+{
-+ VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
-+
-+ const size_t blockCount = m_Blocks.size();
-+ for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
-+ {
-+ const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
-+ VMA_ASSERT(pBlock);
-+ VMA_HEAVY_ASSERT(pBlock->Validate());
-+ pBlock->m_pMetadata->AddStatistics(inoutStats);
-+ }
-+}
-+
-+void VmaBlockVector::AddDetailedStatistics(VmaDetailedStatistics& inoutStats)
-+{
-+ VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
-+
-+ const size_t blockCount = m_Blocks.size();
-+ for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex)
-+ {
-+ const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
-+ VMA_ASSERT(pBlock);
-+ VMA_HEAVY_ASSERT(pBlock->Validate());
-+ pBlock->m_pMetadata->AddDetailedStatistics(inoutStats);
-+ }
-+}
-+
-+bool VmaBlockVector::IsEmpty()
-+{
-+ VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
-+ return m_Blocks.empty();
-+}
-+
-+bool VmaBlockVector::IsCorruptionDetectionEnabled() const
-+{
-+ const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
-+ return (VMA_DEBUG_DETECT_CORRUPTION != 0) &&
-+ (VMA_DEBUG_MARGIN > 0) &&
-+ (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) &&
-+ (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags;
-+}
-+
-+VkResult VmaBlockVector::Allocate(
-+ VkDeviceSize size,
-+ VkDeviceSize alignment,
-+ const VmaAllocationCreateInfo& createInfo,
-+ VmaSuballocationType suballocType,
-+ size_t allocationCount,
-+ VmaAllocation* pAllocations)
-+{
-+ size_t allocIndex;
-+ VkResult res = VK_SUCCESS;
-+
-+ alignment = VMA_MAX(alignment, m_MinAllocationAlignment);
-+
-+ if (IsCorruptionDetectionEnabled())
-+ {
-+ size = VmaAlignUp<VkDeviceSize>(size, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
-+ alignment = VmaAlignUp<VkDeviceSize>(alignment, sizeof(VMA_CORRUPTION_DETECTION_MAGIC_VALUE));
-+ }
-+
-+ {
-+ VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
-+ for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
-+ {
-+ res = AllocatePage(
-+ size,
-+ alignment,
-+ createInfo,
-+ suballocType,
-+ pAllocations + allocIndex);
-+ if (res != VK_SUCCESS)
-+ {
-+ break;
-+ }
-+ }
-+ }
-+
-+ if (res != VK_SUCCESS)
-+ {
-+ // Free all already created allocations.
-+ while (allocIndex--)
-+ Free(pAllocations[allocIndex]);
-+ memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
-+ }
-+
-+ return res;
-+}
-+
-+VkResult VmaBlockVector::AllocatePage(
-+ VkDeviceSize size,
-+ VkDeviceSize alignment,
-+ const VmaAllocationCreateInfo& createInfo,
-+ VmaSuballocationType suballocType,
-+ VmaAllocation* pAllocation)
-+{
-+ const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
-+
-+ VkDeviceSize freeMemory;
-+ {
-+ const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
-+ VmaBudget heapBudget = {};
-+ m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1);
-+ freeMemory = (heapBudget.usage < heapBudget.budget) ? (heapBudget.budget - heapBudget.usage) : 0;
-+ }
-+
-+ const bool canFallbackToDedicated = !HasExplicitBlockSize() &&
-+ (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0;
-+ const bool canCreateNewBlock =
-+ ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) &&
-+ (m_Blocks.size() < m_MaxBlockCount) &&
-+ (freeMemory >= size || !canFallbackToDedicated);
-+ uint32_t strategy = createInfo.flags & VMA_ALLOCATION_CREATE_STRATEGY_MASK;
-+
-+ // Upper address can only be used with linear allocator and within single memory block.
-+ if (isUpperAddress &&
-+ (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT || m_MaxBlockCount > 1))
-+ {
-+ return VK_ERROR_FEATURE_NOT_PRESENT;
-+ }
-+
-+ // Early reject: requested allocation size is larger that maximum block size for this block vector.
-+ if (size + VMA_DEBUG_MARGIN > m_PreferredBlockSize)
-+ {
-+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-+ }
-+
-+ // 1. Search existing allocations. Try to allocate.
-+ if (m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
-+ {
-+ // Use only last block.
-+ if (!m_Blocks.empty())
-+ {
-+ VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back();
-+ VMA_ASSERT(pCurrBlock);
-+ VkResult res = AllocateFromBlock(
-+ pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
-+ if (res == VK_SUCCESS)
-+ {
-+ VMA_DEBUG_LOG_FORMAT(" Returned from last block #%u", pCurrBlock->GetId());
-+ IncrementallySortBlocks();
-+ return VK_SUCCESS;
-+ }
-+ }
-+ }
-+ else
-+ {
-+ if (strategy != VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT) // MIN_MEMORY or default
-+ {
-+ const bool isHostVisible =
-+ (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0;
-+ if(isHostVisible)
-+ {
-+ const bool isMappingAllowed = (createInfo.flags &
-+ (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0;
-+ /*
-+ For non-mappable allocations, check blocks that are not mapped first.
-+ For mappable allocations, check blocks that are already mapped first.
-+ This way, having many blocks, we will separate mappable and non-mappable allocations,
-+ hopefully limiting the number of blocks that are mapped, which will help tools like RenderDoc.
-+ */
-+ for(size_t mappingI = 0; mappingI < 2; ++mappingI)
-+ {
-+ // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
-+ for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
-+ {
-+ VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
-+ VMA_ASSERT(pCurrBlock);
-+ const bool isBlockMapped = pCurrBlock->GetMappedData() != VMA_NULL;
-+ if((mappingI == 0) == (isMappingAllowed == isBlockMapped))
-+ {
-+ VkResult res = AllocateFromBlock(
-+ pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
-+ if (res == VK_SUCCESS)
-+ {
-+ VMA_DEBUG_LOG_FORMAT(" Returned from existing block #%u", pCurrBlock->GetId());
-+ IncrementallySortBlocks();
-+ return VK_SUCCESS;
-+ }
-+ }
-+ }
-+ }
-+ }
-+ else
-+ {
-+ // Forward order in m_Blocks - prefer blocks with smallest amount of free space.
-+ for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
-+ {
-+ VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
-+ VMA_ASSERT(pCurrBlock);
-+ VkResult res = AllocateFromBlock(
-+ pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
-+ if (res == VK_SUCCESS)
-+ {
-+ VMA_DEBUG_LOG_FORMAT(" Returned from existing block #%u", pCurrBlock->GetId());
-+ IncrementallySortBlocks();
-+ return VK_SUCCESS;
-+ }
-+ }
-+ }
-+ }
-+ else // VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT
-+ {
-+ // Backward order in m_Blocks - prefer blocks with largest amount of free space.
-+ for (size_t blockIndex = m_Blocks.size(); blockIndex--; )
-+ {
-+ VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex];
-+ VMA_ASSERT(pCurrBlock);
-+ VkResult res = AllocateFromBlock(pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
-+ if (res == VK_SUCCESS)
-+ {
-+ VMA_DEBUG_LOG_FORMAT(" Returned from existing block #%u", pCurrBlock->GetId());
-+ IncrementallySortBlocks();
-+ return VK_SUCCESS;
-+ }
-+ }
-+ }
-+ }
-+
-+ // 2. Try to create new block.
-+ if (canCreateNewBlock)
-+ {
-+ // Calculate optimal size for new block.
-+ VkDeviceSize newBlockSize = m_PreferredBlockSize;
-+ uint32_t newBlockSizeShift = 0;
-+ const uint32_t NEW_BLOCK_SIZE_SHIFT_MAX = 3;
-+
-+ if (!m_ExplicitBlockSize)
-+ {
-+ // Allocate 1/8, 1/4, 1/2 as first blocks.
-+ const VkDeviceSize maxExistingBlockSize = CalcMaxBlockSize();
-+ for (uint32_t i = 0; i < NEW_BLOCK_SIZE_SHIFT_MAX; ++i)
-+ {
-+ const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
-+ if (smallerNewBlockSize > maxExistingBlockSize && smallerNewBlockSize >= size * 2)
-+ {
-+ newBlockSize = smallerNewBlockSize;
-+ ++newBlockSizeShift;
-+ }
-+ else
-+ {
-+ break;
-+ }
-+ }
-+ }
-+
-+ size_t newBlockIndex = 0;
-+ VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
-+ CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
-+ // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize.
-+ if (!m_ExplicitBlockSize)
-+ {
-+ while (res < 0 && newBlockSizeShift < NEW_BLOCK_SIZE_SHIFT_MAX)
-+ {
-+ const VkDeviceSize smallerNewBlockSize = newBlockSize / 2;
-+ if (smallerNewBlockSize >= size)
-+ {
-+ newBlockSize = smallerNewBlockSize;
-+ ++newBlockSizeShift;
-+ res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ?
-+ CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY;
-+ }
-+ else
-+ {
-+ break;
-+ }
-+ }
-+ }
-+
-+ if (res == VK_SUCCESS)
-+ {
-+ VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex];
-+ VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size);
-+
-+ res = AllocateFromBlock(
-+ pBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation);
-+ if (res == VK_SUCCESS)
-+ {
-+ VMA_DEBUG_LOG_FORMAT(" Created new block #%u Size=%llu", pBlock->GetId(), newBlockSize);
-+ IncrementallySortBlocks();
-+ return VK_SUCCESS;
-+ }
-+ else
-+ {
-+ // Allocation from new block failed, possibly due to VMA_DEBUG_MARGIN or alignment.
-+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-+ }
-+ }
-+ }
-+
-+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-+}
-+
-+void VmaBlockVector::Free(const VmaAllocation hAllocation)
-+{
-+ VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL;
-+
-+ bool budgetExceeded = false;
-+ {
-+ const uint32_t heapIndex = m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex);
-+ VmaBudget heapBudget = {};
-+ m_hAllocator->GetHeapBudgets(&heapBudget, heapIndex, 1);
-+ budgetExceeded = heapBudget.usage >= heapBudget.budget;
-+ }
-+
-+ // Scope for lock.
-+ {
-+ VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex);
-+
-+ VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
-+
-+ if (IsCorruptionDetectionEnabled())
-+ {
-+ VkResult res = pBlock->ValidateMagicValueAfterAllocation(m_hAllocator, hAllocation->GetOffset(), hAllocation->GetSize());
-+ VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to validate magic value.");
-+ }
-+
-+ if (hAllocation->IsPersistentMap())
-+ {
-+ pBlock->Unmap(m_hAllocator, 1);
-+ }
-+
-+ const bool hadEmptyBlockBeforeFree = HasEmptyBlock();
-+ pBlock->m_pMetadata->Free(hAllocation->GetAllocHandle());
-+ pBlock->PostFree(m_hAllocator);
-+ VMA_HEAVY_ASSERT(pBlock->Validate());
-+
-+ VMA_DEBUG_LOG_FORMAT(" Freed from MemoryTypeIndex=%u", m_MemoryTypeIndex);
-+
-+ const bool canDeleteBlock = m_Blocks.size() > m_MinBlockCount;
-+ // pBlock became empty after this deallocation.
-+ if (pBlock->m_pMetadata->IsEmpty())
-+ {
-+ // Already had empty block. We don't want to have two, so delete this one.
-+ if ((hadEmptyBlockBeforeFree || budgetExceeded) && canDeleteBlock)
-+ {
-+ pBlockToDelete = pBlock;
-+ Remove(pBlock);
-+ }
-+ // else: We now have one empty block - leave it. A hysteresis to avoid allocating whole block back and forth.
-+ }
-+ // pBlock didn't become empty, but we have another empty block - find and free that one.
-+ // (This is optional, heuristics.)
-+ else if (hadEmptyBlockBeforeFree && canDeleteBlock)
-+ {
-+ VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back();
-+ if (pLastBlock->m_pMetadata->IsEmpty())
-+ {
-+ pBlockToDelete = pLastBlock;
-+ m_Blocks.pop_back();
-+ }
-+ }
-+
-+ IncrementallySortBlocks();
-+ }
-+
-+ // Destruction of a free block. Deferred until this point, outside of mutex
-+ // lock, for performance reason.
-+ if (pBlockToDelete != VMA_NULL)
-+ {
-+ VMA_DEBUG_LOG_FORMAT(" Deleted empty block #%u", pBlockToDelete->GetId());
-+ pBlockToDelete->Destroy(m_hAllocator);
-+ vma_delete(m_hAllocator, pBlockToDelete);
-+ }
-+
-+ m_hAllocator->m_Budget.RemoveAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), hAllocation->GetSize());
-+ m_hAllocator->m_AllocationObjectAllocator.Free(hAllocation);
-+}
-+
-+VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const
-+{
-+ VkDeviceSize result = 0;
-+ for (size_t i = m_Blocks.size(); i--; )
-+ {
-+ result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize());
-+ if (result >= m_PreferredBlockSize)
-+ {
-+ break;
-+ }
-+ }
-+ return result;
-+}
-+
-+void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock)
-+{
-+ for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
-+ {
-+ if (m_Blocks[blockIndex] == pBlock)
-+ {
-+ VmaVectorRemove(m_Blocks, blockIndex);
-+ return;
-+ }
-+ }
-+ VMA_ASSERT(0);
-+}
-+
-+void VmaBlockVector::IncrementallySortBlocks()
-+{
-+ if (!m_IncrementalSort)
-+ return;
-+ if (m_Algorithm != VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
-+ {
-+ // Bubble sort only until first swap.
-+ for (size_t i = 1; i < m_Blocks.size(); ++i)
-+ {
-+ if (m_Blocks[i - 1]->m_pMetadata->GetSumFreeSize() > m_Blocks[i]->m_pMetadata->GetSumFreeSize())
-+ {
-+ VMA_SWAP(m_Blocks[i - 1], m_Blocks[i]);
-+ return;
-+ }
-+ }
-+ }
-+}
-+
-+void VmaBlockVector::SortByFreeSize()
-+{
-+ VMA_SORT(m_Blocks.begin(), m_Blocks.end(),
-+ [](VmaDeviceMemoryBlock* b1, VmaDeviceMemoryBlock* b2) -> bool
-+ {
-+ return b1->m_pMetadata->GetSumFreeSize() < b2->m_pMetadata->GetSumFreeSize();
-+ });
-+}
-+
-+VkResult VmaBlockVector::AllocateFromBlock(
-+ VmaDeviceMemoryBlock* pBlock,
-+ VkDeviceSize size,
-+ VkDeviceSize alignment,
-+ VmaAllocationCreateFlags allocFlags,
-+ void* pUserData,
-+ VmaSuballocationType suballocType,
-+ uint32_t strategy,
-+ VmaAllocation* pAllocation)
-+{
-+ const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0;
-+
-+ VmaAllocationRequest currRequest = {};
-+ if (pBlock->m_pMetadata->CreateAllocationRequest(
-+ size,
-+ alignment,
-+ isUpperAddress,
-+ suballocType,
-+ strategy,
-+ &currRequest))
-+ {
-+ return CommitAllocationRequest(currRequest, pBlock, alignment, allocFlags, pUserData, suballocType, pAllocation);
-+ }
-+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-+}
-+
-+VkResult VmaBlockVector::CommitAllocationRequest(
-+ VmaAllocationRequest& allocRequest,
-+ VmaDeviceMemoryBlock* pBlock,
-+ VkDeviceSize alignment,
-+ VmaAllocationCreateFlags allocFlags,
-+ void* pUserData,
-+ VmaSuballocationType suballocType,
-+ VmaAllocation* pAllocation)
-+{
-+ const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0;
-+ const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0;
-+ const bool isMappingAllowed = (allocFlags &
-+ (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0;
-+
-+ pBlock->PostAlloc(m_hAllocator);
-+ // Allocate from pCurrBlock.
-+ if (mapped)
-+ {
-+ VkResult res = pBlock->Map(m_hAllocator, 1, VMA_NULL);
-+ if (res != VK_SUCCESS)
-+ {
-+ return res;
-+ }
-+ }
-+
-+ *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(isMappingAllowed);
-+ pBlock->m_pMetadata->Alloc(allocRequest, suballocType, *pAllocation);
-+ (*pAllocation)->InitBlockAllocation(
-+ pBlock,
-+ allocRequest.allocHandle,
-+ alignment,
-+ allocRequest.size, // Not size, as actual allocation size may be larger than requested!
-+ m_MemoryTypeIndex,
-+ suballocType,
-+ mapped);
-+ VMA_HEAVY_ASSERT(pBlock->Validate());
-+ if (isUserDataString)
-+ (*pAllocation)->SetName(m_hAllocator, (const char*)pUserData);
-+ else
-+ (*pAllocation)->SetUserData(m_hAllocator, pUserData);
-+ m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), allocRequest.size);
-+ if (VMA_DEBUG_INITIALIZE_ALLOCATIONS)
-+ {
-+ m_hAllocator->FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
-+ }
-+ if (IsCorruptionDetectionEnabled())
-+ {
-+ VkResult res = pBlock->WriteMagicValueAfterAllocation(m_hAllocator, (*pAllocation)->GetOffset(), allocRequest.size);
-+ VMA_ASSERT(res == VK_SUCCESS && "Couldn't map block memory to write magic value.");
-+ }
-+ return VK_SUCCESS;
-+}
-+
-+VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex)
-+{
-+ VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
-+ allocInfo.pNext = m_pMemoryAllocateNext;
-+ allocInfo.memoryTypeIndex = m_MemoryTypeIndex;
-+ allocInfo.allocationSize = blockSize;
-+
-+#if VMA_BUFFER_DEVICE_ADDRESS
-+ // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature.
-+ VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
-+ if (m_hAllocator->m_UseKhrBufferDeviceAddress)
-+ {
-+ allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
-+ VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
-+ }
-+#endif // VMA_BUFFER_DEVICE_ADDRESS
-+
-+#if VMA_MEMORY_PRIORITY
-+ VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };
-+ if (m_hAllocator->m_UseExtMemoryPriority)
-+ {
-+ VMA_ASSERT(m_Priority >= 0.f && m_Priority <= 1.f);
-+ priorityInfo.priority = m_Priority;
-+ VmaPnextChainPushFront(&allocInfo, &priorityInfo);
-+ }
-+#endif // VMA_MEMORY_PRIORITY
-+
-+#if VMA_EXTERNAL_MEMORY
-+ // Attach VkExportMemoryAllocateInfoKHR if necessary.
-+ VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR };
-+ exportMemoryAllocInfo.handleTypes = m_hAllocator->GetExternalMemoryHandleTypeFlags(m_MemoryTypeIndex);
-+ if (exportMemoryAllocInfo.handleTypes != 0)
-+ {
-+ VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo);
-+ }
-+#endif // VMA_EXTERNAL_MEMORY
-+
-+ VkDeviceMemory mem = VK_NULL_HANDLE;
-+ VkResult res = m_hAllocator->AllocateVulkanMemory(&allocInfo, &mem);
-+ if (res < 0)
-+ {
-+ return res;
-+ }
-+
-+ // New VkDeviceMemory successfully created.
-+
-+ // Create new Allocation for it.
-+ VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator);
-+ pBlock->Init(
-+ m_hAllocator,
-+ m_hParentPool,
-+ m_MemoryTypeIndex,
-+ mem,
-+ allocInfo.allocationSize,
-+ m_NextBlockId++,
-+ m_Algorithm,
-+ m_BufferImageGranularity);
-+
-+ m_Blocks.push_back(pBlock);
-+ if (pNewBlockIndex != VMA_NULL)
-+ {
-+ *pNewBlockIndex = m_Blocks.size() - 1;
-+ }
-+
-+ return VK_SUCCESS;
-+}
-+
-+bool VmaBlockVector::HasEmptyBlock()
-+{
-+ for (size_t index = 0, count = m_Blocks.size(); index < count; ++index)
-+ {
-+ VmaDeviceMemoryBlock* const pBlock = m_Blocks[index];
-+ if (pBlock->m_pMetadata->IsEmpty())
-+ {
-+ return true;
-+ }
-+ }
-+ return false;
-+}
-+
-+#if VMA_STATS_STRING_ENABLED
-+void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json)
-+{
-+ VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
-+
-+
-+ json.BeginObject();
-+ for (size_t i = 0; i < m_Blocks.size(); ++i)
-+ {
-+ json.BeginString();
-+ json.ContinueString(m_Blocks[i]->GetId());
-+ json.EndString();
-+
-+ json.BeginObject();
-+ json.WriteString("MapRefCount");
-+ json.WriteNumber(m_Blocks[i]->GetMapRefCount());
-+
-+ m_Blocks[i]->m_pMetadata->PrintDetailedMap(json);
-+ json.EndObject();
-+ }
-+ json.EndObject();
-+}
-+#endif // VMA_STATS_STRING_ENABLED
-+
-+VkResult VmaBlockVector::CheckCorruption()
-+{
-+ if (!IsCorruptionDetectionEnabled())
-+ {
-+ return VK_ERROR_FEATURE_NOT_PRESENT;
-+ }
-+
-+ VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex);
-+ for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex)
-+ {
-+ VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex];
-+ VMA_ASSERT(pBlock);
-+ VkResult res = pBlock->CheckCorruption(m_hAllocator);
-+ if (res != VK_SUCCESS)
-+ {
-+ return res;
-+ }
-+ }
-+ return VK_SUCCESS;
-+}
-+
-+#endif // _VMA_BLOCK_VECTOR_FUNCTIONS
-+
-+#ifndef _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS
-+VmaDefragmentationContext_T::VmaDefragmentationContext_T(
-+ VmaAllocator hAllocator,
-+ const VmaDefragmentationInfo& info)
-+ : m_MaxPassBytes(info.maxBytesPerPass == 0 ? VK_WHOLE_SIZE : info.maxBytesPerPass),
-+ m_MaxPassAllocations(info.maxAllocationsPerPass == 0 ? UINT32_MAX : info.maxAllocationsPerPass),
-+ m_BreakCallback(info.pfnBreakCallback),
-+ m_BreakCallbackUserData(info.pBreakCallbackUserData),
-+ m_MoveAllocator(hAllocator->GetAllocationCallbacks()),
-+ m_Moves(m_MoveAllocator)
-+{
-+ m_Algorithm = info.flags & VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK;
-+
-+ if (info.pool != VMA_NULL)
-+ {
-+ m_BlockVectorCount = 1;
-+ m_PoolBlockVector = &info.pool->m_BlockVector;
-+ m_pBlockVectors = &m_PoolBlockVector;
-+ m_PoolBlockVector->SetIncrementalSort(false);
-+ m_PoolBlockVector->SortByFreeSize();
-+ }
-+ else
-+ {
-+ m_BlockVectorCount = hAllocator->GetMemoryTypeCount();
-+ m_PoolBlockVector = VMA_NULL;
-+ m_pBlockVectors = hAllocator->m_pBlockVectors;
-+ for (uint32_t i = 0; i < m_BlockVectorCount; ++i)
-+ {
-+ VmaBlockVector* vector = m_pBlockVectors[i];
-+ if (vector != VMA_NULL)
-+ {
-+ vector->SetIncrementalSort(false);
-+ vector->SortByFreeSize();
-+ }
-+ }
-+ }
-+
-+ switch (m_Algorithm)
-+ {
-+ case 0: // Default algorithm
-+ m_Algorithm = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT;
-+ m_AlgorithmState = vma_new_array(hAllocator, StateBalanced, m_BlockVectorCount);
-+ break;
-+ case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT:
-+ m_AlgorithmState = vma_new_array(hAllocator, StateBalanced, m_BlockVectorCount);
-+ break;
-+ case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT:
-+ if (hAllocator->GetBufferImageGranularity() > 1)
-+ {
-+ m_AlgorithmState = vma_new_array(hAllocator, StateExtensive, m_BlockVectorCount);
-+ }
-+ break;
-+ }
-+}
-+
-+VmaDefragmentationContext_T::~VmaDefragmentationContext_T()
-+{
-+ if (m_PoolBlockVector != VMA_NULL)
-+ {
-+ m_PoolBlockVector->SetIncrementalSort(true);
-+ }
-+ else
-+ {
-+ for (uint32_t i = 0; i < m_BlockVectorCount; ++i)
-+ {
-+ VmaBlockVector* vector = m_pBlockVectors[i];
-+ if (vector != VMA_NULL)
-+ vector->SetIncrementalSort(true);
-+ }
-+ }
-+
-+ if (m_AlgorithmState)
-+ {
-+ switch (m_Algorithm)
-+ {
-+ case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT:
-+ vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast<StateBalanced*>(m_AlgorithmState), m_BlockVectorCount);
-+ break;
-+ case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT:
-+ vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast<StateExtensive*>(m_AlgorithmState), m_BlockVectorCount);
-+ break;
-+ default:
-+ VMA_ASSERT(0);
-+ }
-+ }
-+}
-+
-+VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo)
-+{
-+ if (m_PoolBlockVector != VMA_NULL)
-+ {
-+ VmaMutexLockWrite lock(m_PoolBlockVector->GetMutex(), m_PoolBlockVector->GetAllocator()->m_UseMutex);
-+
-+ if (m_PoolBlockVector->GetBlockCount() > 1)
-+ ComputeDefragmentation(*m_PoolBlockVector, 0);
-+ else if (m_PoolBlockVector->GetBlockCount() == 1)
-+ ReallocWithinBlock(*m_PoolBlockVector, m_PoolBlockVector->GetBlock(0));
-+ }
-+ else
-+ {
-+ for (uint32_t i = 0; i < m_BlockVectorCount; ++i)
-+ {
-+ if (m_pBlockVectors[i] != VMA_NULL)
-+ {
-+ VmaMutexLockWrite lock(m_pBlockVectors[i]->GetMutex(), m_pBlockVectors[i]->GetAllocator()->m_UseMutex);
-+
-+ if (m_pBlockVectors[i]->GetBlockCount() > 1)
-+ {
-+ if (ComputeDefragmentation(*m_pBlockVectors[i], i))
-+ break;
-+ }
-+ else if (m_pBlockVectors[i]->GetBlockCount() == 1)
-+ {
-+ if (ReallocWithinBlock(*m_pBlockVectors[i], m_pBlockVectors[i]->GetBlock(0)))
-+ break;
-+ }
-+ }
-+ }
-+ }
-+
-+ moveInfo.moveCount = static_cast<uint32_t>(m_Moves.size());
-+ if (moveInfo.moveCount > 0)
-+ {
-+ moveInfo.pMoves = m_Moves.data();
-+ return VK_INCOMPLETE;
-+ }
-+
-+ moveInfo.pMoves = VMA_NULL;
-+ return VK_SUCCESS;
-+}
-+
-+VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo)
-+{
-+ VMA_ASSERT(moveInfo.moveCount > 0 ? moveInfo.pMoves != VMA_NULL : true);
-+
-+ VkResult result = VK_SUCCESS;
-+ VmaStlAllocator<FragmentedBlock> blockAllocator(m_MoveAllocator.m_pCallbacks);
-+ VmaVector<FragmentedBlock, VmaStlAllocator<FragmentedBlock>> immovableBlocks(blockAllocator);
-+ VmaVector<FragmentedBlock, VmaStlAllocator<FragmentedBlock>> mappedBlocks(blockAllocator);
-+
-+ VmaAllocator allocator = VMA_NULL;
-+ for (uint32_t i = 0; i < moveInfo.moveCount; ++i)
-+ {
-+ VmaDefragmentationMove& move = moveInfo.pMoves[i];
-+ size_t prevCount = 0, currentCount = 0;
-+ VkDeviceSize freedBlockSize = 0;
-+
-+ uint32_t vectorIndex;
-+ VmaBlockVector* vector;
-+ if (m_PoolBlockVector != VMA_NULL)
-+ {
-+ vectorIndex = 0;
-+ vector = m_PoolBlockVector;
-+ }
-+ else
-+ {
-+ vectorIndex = move.srcAllocation->GetMemoryTypeIndex();
-+ vector = m_pBlockVectors[vectorIndex];
-+ VMA_ASSERT(vector != VMA_NULL);
-+ }
-+
-+ switch (move.operation)
-+ {
-+ case VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY:
-+ {
-+ uint8_t mapCount = move.srcAllocation->SwapBlockAllocation(vector->m_hAllocator, move.dstTmpAllocation);
-+ if (mapCount > 0)
-+ {
-+ allocator = vector->m_hAllocator;
-+ VmaDeviceMemoryBlock* newMapBlock = move.srcAllocation->GetBlock();
-+ bool notPresent = true;
-+ for (FragmentedBlock& block : mappedBlocks)
-+ {
-+ if (block.block == newMapBlock)
-+ {
-+ notPresent = false;
-+ block.data += mapCount;
-+ break;
-+ }
-+ }
-+ if (notPresent)
-+ mappedBlocks.push_back({ mapCount, newMapBlock });
-+ }
-+
-+ // Scope for locks, Free have it's own lock
-+ {
-+ VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
-+ prevCount = vector->GetBlockCount();
-+ freedBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize();
-+ }
-+ vector->Free(move.dstTmpAllocation);
-+ {
-+ VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
-+ currentCount = vector->GetBlockCount();
-+ }
-+
-+ result = VK_INCOMPLETE;
-+ break;
-+ }
-+ case VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE:
-+ {
-+ m_PassStats.bytesMoved -= move.srcAllocation->GetSize();
-+ --m_PassStats.allocationsMoved;
-+ vector->Free(move.dstTmpAllocation);
-+
-+ VmaDeviceMemoryBlock* newBlock = move.srcAllocation->GetBlock();
-+ bool notPresent = true;
-+ for (const FragmentedBlock& block : immovableBlocks)
-+ {
-+ if (block.block == newBlock)
-+ {
-+ notPresent = false;
-+ break;
-+ }
-+ }
-+ if (notPresent)
-+ immovableBlocks.push_back({ vectorIndex, newBlock });
-+ break;
-+ }
-+ case VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY:
-+ {
-+ m_PassStats.bytesMoved -= move.srcAllocation->GetSize();
-+ --m_PassStats.allocationsMoved;
-+ // Scope for locks, Free have it's own lock
-+ {
-+ VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
-+ prevCount = vector->GetBlockCount();
-+ freedBlockSize = move.srcAllocation->GetBlock()->m_pMetadata->GetSize();
-+ }
-+ vector->Free(move.srcAllocation);
-+ {
-+ VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
-+ currentCount = vector->GetBlockCount();
-+ }
-+ freedBlockSize *= prevCount - currentCount;
-+
-+ VkDeviceSize dstBlockSize;
-+ {
-+ VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
-+ dstBlockSize = move.dstTmpAllocation->GetBlock()->m_pMetadata->GetSize();
-+ }
-+ vector->Free(move.dstTmpAllocation);
-+ {
-+ VmaMutexLockRead lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
-+ freedBlockSize += dstBlockSize * (currentCount - vector->GetBlockCount());
-+ currentCount = vector->GetBlockCount();
-+ }
-+
-+ result = VK_INCOMPLETE;
-+ break;
-+ }
-+ default:
-+ VMA_ASSERT(0);
-+ }
-+
-+ if (prevCount > currentCount)
-+ {
-+ size_t freedBlocks = prevCount - currentCount;
-+ m_PassStats.deviceMemoryBlocksFreed += static_cast<uint32_t>(freedBlocks);
-+ m_PassStats.bytesFreed += freedBlockSize;
-+ }
-+
-+ if(m_Algorithm == VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT &&
-+ m_AlgorithmState != VMA_NULL)
-+ {
-+ // Avoid unnecessary tries to allocate when new free block is available
-+ StateExtensive& state = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[vectorIndex];
-+ if (state.firstFreeBlock != SIZE_MAX)
-+ {
-+ const size_t diff = prevCount - currentCount;
-+ if (state.firstFreeBlock >= diff)
-+ {
-+ state.firstFreeBlock -= diff;
-+ if (state.firstFreeBlock != 0)
-+ state.firstFreeBlock -= vector->GetBlock(state.firstFreeBlock - 1)->m_pMetadata->IsEmpty();
-+ }
-+ else
-+ state.firstFreeBlock = 0;
-+ }
-+ }
-+ }
-+ moveInfo.moveCount = 0;
-+ moveInfo.pMoves = VMA_NULL;
-+ m_Moves.clear();
-+
-+ // Update stats
-+ m_GlobalStats.allocationsMoved += m_PassStats.allocationsMoved;
-+ m_GlobalStats.bytesFreed += m_PassStats.bytesFreed;
-+ m_GlobalStats.bytesMoved += m_PassStats.bytesMoved;
-+ m_GlobalStats.deviceMemoryBlocksFreed += m_PassStats.deviceMemoryBlocksFreed;
-+ m_PassStats = { 0 };
-+
-+ // Move blocks with immovable allocations according to algorithm
-+ if (immovableBlocks.size() > 0)
-+ {
-+ do
-+ {
-+ if(m_Algorithm == VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT)
-+ {
-+ if (m_AlgorithmState != VMA_NULL)
-+ {
-+ bool swapped = false;
-+ // Move to the start of free blocks range
-+ for (const FragmentedBlock& block : immovableBlocks)
-+ {
-+ StateExtensive& state = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[block.data];
-+ if (state.operation != StateExtensive::Operation::Cleanup)
-+ {
-+ VmaBlockVector* vector = m_pBlockVectors[block.data];
-+ VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
-+
-+ for (size_t i = 0, count = vector->GetBlockCount() - m_ImmovableBlockCount; i < count; ++i)
-+ {
-+ if (vector->GetBlock(i) == block.block)
-+ {
-+ VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[vector->GetBlockCount() - ++m_ImmovableBlockCount]);
-+ if (state.firstFreeBlock != SIZE_MAX)
-+ {
-+ if (i + 1 < state.firstFreeBlock)
-+ {
-+ if (state.firstFreeBlock > 1)
-+ VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[--state.firstFreeBlock]);
-+ else
-+ --state.firstFreeBlock;
-+ }
-+ }
-+ swapped = true;
-+ break;
-+ }
-+ }
-+ }
-+ }
-+ if (swapped)
-+ result = VK_INCOMPLETE;
-+ break;
-+ }
-+ }
-+
-+ // Move to the beginning
-+ for (const FragmentedBlock& block : immovableBlocks)
-+ {
-+ VmaBlockVector* vector = m_pBlockVectors[block.data];
-+ VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex);
-+
-+ for (size_t i = m_ImmovableBlockCount; i < vector->GetBlockCount(); ++i)
-+ {
-+ if (vector->GetBlock(i) == block.block)
-+ {
-+ VMA_SWAP(vector->m_Blocks[i], vector->m_Blocks[m_ImmovableBlockCount++]);
-+ break;
-+ }
-+ }
-+ }
-+ } while (false);
-+ }
-+
-+ // Bulk-map destination blocks
-+ for (const FragmentedBlock& block : mappedBlocks)
-+ {
-+ VkResult res = block.block->Map(allocator, block.data, VMA_NULL);
-+ VMA_ASSERT(res == VK_SUCCESS);
-+ }
-+ return result;
-+}
-+
-+bool VmaDefragmentationContext_T::ComputeDefragmentation(VmaBlockVector& vector, size_t index)
-+{
-+ switch (m_Algorithm)
-+ {
-+ case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT:
-+ return ComputeDefragmentation_Fast(vector);
-+ case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT:
-+ return ComputeDefragmentation_Balanced(vector, index, true);
-+ case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT:
-+ return ComputeDefragmentation_Full(vector);
-+ case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT:
-+ return ComputeDefragmentation_Extensive(vector, index);
-+ default:
-+ VMA_ASSERT(0);
-+ return ComputeDefragmentation_Balanced(vector, index, true);
-+ }
-+}
-+
-+VmaDefragmentationContext_T::MoveAllocationData VmaDefragmentationContext_T::GetMoveData(
-+ VmaAllocHandle handle, VmaBlockMetadata* metadata)
-+{
-+ MoveAllocationData moveData;
-+ moveData.move.srcAllocation = (VmaAllocation)metadata->GetAllocationUserData(handle);
-+ moveData.size = moveData.move.srcAllocation->GetSize();
-+ moveData.alignment = moveData.move.srcAllocation->GetAlignment();
-+ moveData.type = moveData.move.srcAllocation->GetSuballocationType();
-+ moveData.flags = 0;
-+
-+ if (moveData.move.srcAllocation->IsPersistentMap())
-+ moveData.flags |= VMA_ALLOCATION_CREATE_MAPPED_BIT;
-+ if (moveData.move.srcAllocation->IsMappingAllowed())
-+ moveData.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT;
-+
-+ return moveData;
-+}
-+
-+VmaDefragmentationContext_T::CounterStatus VmaDefragmentationContext_T::CheckCounters(VkDeviceSize bytes)
-+{
-+ // Check custom criteria if exists
-+ if (m_BreakCallback && m_BreakCallback(m_BreakCallbackUserData))
-+ return CounterStatus::End;
-+
-+ // Ignore allocation if will exceed max size for copy
-+ if (m_PassStats.bytesMoved + bytes > m_MaxPassBytes)
-+ {
-+ if (++m_IgnoredAllocs < MAX_ALLOCS_TO_IGNORE)
-+ return CounterStatus::Ignore;
-+ else
-+ return CounterStatus::End;
-+ }
-+ else
-+ m_IgnoredAllocs = 0;
-+ return CounterStatus::Pass;
-+}
-+
-+bool VmaDefragmentationContext_T::IncrementCounters(VkDeviceSize bytes)
-+{
-+ m_PassStats.bytesMoved += bytes;
-+ // Early return when max found
-+ if (++m_PassStats.allocationsMoved >= m_MaxPassAllocations || m_PassStats.bytesMoved >= m_MaxPassBytes)
-+ {
-+ VMA_ASSERT((m_PassStats.allocationsMoved == m_MaxPassAllocations ||
-+ m_PassStats.bytesMoved == m_MaxPassBytes) && "Exceeded maximal pass threshold!");
-+ return true;
-+ }
-+ return false;
-+}
-+
-+bool VmaDefragmentationContext_T::ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block)
-+{
-+ VmaBlockMetadata* metadata = block->m_pMetadata;
-+
-+ for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
-+ handle != VK_NULL_HANDLE;
-+ handle = metadata->GetNextAllocation(handle))
-+ {
-+ MoveAllocationData moveData = GetMoveData(handle, metadata);
-+ // Ignore newly created allocations by defragmentation algorithm
-+ if (moveData.move.srcAllocation->GetUserData() == this)
-+ continue;
-+ switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
-+ {
-+ case CounterStatus::Ignore:
-+ continue;
-+ case CounterStatus::End:
-+ return true;
-+ case CounterStatus::Pass:
-+ break;
-+ default:
-+ VMA_ASSERT(0);
-+ }
-+
-+ VkDeviceSize offset = moveData.move.srcAllocation->GetOffset();
-+ if (offset != 0 && metadata->GetSumFreeSize() >= moveData.size)
-+ {
-+ VmaAllocationRequest request = {};
-+ if (metadata->CreateAllocationRequest(
-+ moveData.size,
-+ moveData.alignment,
-+ false,
-+ moveData.type,
-+ VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
-+ &request))
-+ {
-+ if (metadata->GetAllocationOffset(request.allocHandle) < offset)
-+ {
-+ if (vector.CommitAllocationRequest(
-+ request,
-+ block,
-+ moveData.alignment,
-+ moveData.flags,
-+ this,
-+ moveData.type,
-+ &moveData.move.dstTmpAllocation) == VK_SUCCESS)
-+ {
-+ m_Moves.push_back(moveData.move);
-+ if (IncrementCounters(moveData.size))
-+ return true;
-+ }
-+ }
-+ }
-+ }
-+ }
-+ return false;
-+}
-+
-+bool VmaDefragmentationContext_T::AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector)
-+{
-+ for (; start < end; ++start)
-+ {
-+ VmaDeviceMemoryBlock* dstBlock = vector.GetBlock(start);
-+ if (dstBlock->m_pMetadata->GetSumFreeSize() >= data.size)
-+ {
-+ if (vector.AllocateFromBlock(dstBlock,
-+ data.size,
-+ data.alignment,
-+ data.flags,
-+ this,
-+ data.type,
-+ 0,
-+ &data.move.dstTmpAllocation) == VK_SUCCESS)
-+ {
-+ m_Moves.push_back(data.move);
-+ if (IncrementCounters(data.size))
-+ return true;
-+ break;
-+ }
-+ }
-+ }
-+ return false;
-+}
-+
-+bool VmaDefragmentationContext_T::ComputeDefragmentation_Fast(VmaBlockVector& vector)
-+{
-+ // Move only between blocks
-+
-+ // Go through allocations in last blocks and try to fit them inside first ones
-+ for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)
-+ {
-+ VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata;
-+
-+ for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
-+ handle != VK_NULL_HANDLE;
-+ handle = metadata->GetNextAllocation(handle))
-+ {
-+ MoveAllocationData moveData = GetMoveData(handle, metadata);
-+ // Ignore newly created allocations by defragmentation algorithm
-+ if (moveData.move.srcAllocation->GetUserData() == this)
-+ continue;
-+ switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
-+ {
-+ case CounterStatus::Ignore:
-+ continue;
-+ case CounterStatus::End:
-+ return true;
-+ case CounterStatus::Pass:
-+ break;
-+ default:
-+ VMA_ASSERT(0);
-+ }
-+
-+ // Check all previous blocks for free space
-+ if (AllocInOtherBlock(0, i, moveData, vector))
-+ return true;
-+ }
-+ }
-+ return false;
-+}
-+
-+bool VmaDefragmentationContext_T::ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update)
-+{
-+ // Go over every allocation and try to fit it in previous blocks at lowest offsets,
-+ // if not possible: realloc within single block to minimize offset (exclude offset == 0),
-+ // but only if there are noticeable gaps between them (some heuristic, ex. average size of allocation in block)
-+ VMA_ASSERT(m_AlgorithmState != VMA_NULL);
-+
-+ StateBalanced& vectorState = reinterpret_cast<StateBalanced*>(m_AlgorithmState)[index];
-+ if (update && vectorState.avgAllocSize == UINT64_MAX)
-+ UpdateVectorStatistics(vector, vectorState);
-+
-+ const size_t startMoveCount = m_Moves.size();
-+ VkDeviceSize minimalFreeRegion = vectorState.avgFreeSize / 2;
-+ for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)
-+ {
-+ VmaDeviceMemoryBlock* block = vector.GetBlock(i);
-+ VmaBlockMetadata* metadata = block->m_pMetadata;
-+ VkDeviceSize prevFreeRegionSize = 0;
-+
-+ for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
-+ handle != VK_NULL_HANDLE;
-+ handle = metadata->GetNextAllocation(handle))
-+ {
-+ MoveAllocationData moveData = GetMoveData(handle, metadata);
-+ // Ignore newly created allocations by defragmentation algorithm
-+ if (moveData.move.srcAllocation->GetUserData() == this)
-+ continue;
-+ switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
-+ {
-+ case CounterStatus::Ignore:
-+ continue;
-+ case CounterStatus::End:
-+ return true;
-+ case CounterStatus::Pass:
-+ break;
-+ default:
-+ VMA_ASSERT(0);
-+ }
-+
-+ // Check all previous blocks for free space
-+ const size_t prevMoveCount = m_Moves.size();
-+ if (AllocInOtherBlock(0, i, moveData, vector))
-+ return true;
-+
-+ VkDeviceSize nextFreeRegionSize = metadata->GetNextFreeRegionSize(handle);
-+ // If no room found then realloc within block for lower offset
-+ VkDeviceSize offset = moveData.move.srcAllocation->GetOffset();
-+ if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size)
-+ {
-+ // Check if realloc will make sense
-+ if (prevFreeRegionSize >= minimalFreeRegion ||
-+ nextFreeRegionSize >= minimalFreeRegion ||
-+ moveData.size <= vectorState.avgFreeSize ||
-+ moveData.size <= vectorState.avgAllocSize)
-+ {
-+ VmaAllocationRequest request = {};
-+ if (metadata->CreateAllocationRequest(
-+ moveData.size,
-+ moveData.alignment,
-+ false,
-+ moveData.type,
-+ VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
-+ &request))
-+ {
-+ if (metadata->GetAllocationOffset(request.allocHandle) < offset)
-+ {
-+ if (vector.CommitAllocationRequest(
-+ request,
-+ block,
-+ moveData.alignment,
-+ moveData.flags,
-+ this,
-+ moveData.type,
-+ &moveData.move.dstTmpAllocation) == VK_SUCCESS)
-+ {
-+ m_Moves.push_back(moveData.move);
-+ if (IncrementCounters(moveData.size))
-+ return true;
-+ }
-+ }
-+ }
-+ }
-+ }
-+ prevFreeRegionSize = nextFreeRegionSize;
-+ }
-+ }
-+
-+ // No moves performed, update statistics to current vector state
-+ if (startMoveCount == m_Moves.size() && !update)
-+ {
-+ vectorState.avgAllocSize = UINT64_MAX;
-+ return ComputeDefragmentation_Balanced(vector, index, false);
-+ }
-+ return false;
-+}
-+
-+bool VmaDefragmentationContext_T::ComputeDefragmentation_Full(VmaBlockVector& vector)
-+{
-+ // Go over every allocation and try to fit it in previous blocks at lowest offsets,
-+ // if not possible: realloc within single block to minimize offset (exclude offset == 0)
-+
-+ for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i)
-+ {
-+ VmaDeviceMemoryBlock* block = vector.GetBlock(i);
-+ VmaBlockMetadata* metadata = block->m_pMetadata;
-+
-+ for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
-+ handle != VK_NULL_HANDLE;
-+ handle = metadata->GetNextAllocation(handle))
-+ {
-+ MoveAllocationData moveData = GetMoveData(handle, metadata);
-+ // Ignore newly created allocations by defragmentation algorithm
-+ if (moveData.move.srcAllocation->GetUserData() == this)
-+ continue;
-+ switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
-+ {
-+ case CounterStatus::Ignore:
-+ continue;
-+ case CounterStatus::End:
-+ return true;
-+ case CounterStatus::Pass:
-+ break;
-+ default:
-+ VMA_ASSERT(0);
-+ }
-+
-+ // Check all previous blocks for free space
-+ const size_t prevMoveCount = m_Moves.size();
-+ if (AllocInOtherBlock(0, i, moveData, vector))
-+ return true;
-+
-+ // If no room found then realloc within block for lower offset
-+ VkDeviceSize offset = moveData.move.srcAllocation->GetOffset();
-+ if (prevMoveCount == m_Moves.size() && offset != 0 && metadata->GetSumFreeSize() >= moveData.size)
-+ {
-+ VmaAllocationRequest request = {};
-+ if (metadata->CreateAllocationRequest(
-+ moveData.size,
-+ moveData.alignment,
-+ false,
-+ moveData.type,
-+ VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT,
-+ &request))
-+ {
-+ if (metadata->GetAllocationOffset(request.allocHandle) < offset)
-+ {
-+ if (vector.CommitAllocationRequest(
-+ request,
-+ block,
-+ moveData.alignment,
-+ moveData.flags,
-+ this,
-+ moveData.type,
-+ &moveData.move.dstTmpAllocation) == VK_SUCCESS)
-+ {
-+ m_Moves.push_back(moveData.move);
-+ if (IncrementCounters(moveData.size))
-+ return true;
-+ }
-+ }
-+ }
-+ }
-+ }
-+ }
-+ return false;
-+}
-+
-+bool VmaDefragmentationContext_T::ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index)
-+{
-+ // First free single block, then populate it to the brim, then free another block, and so on
-+
-+ // Fallback to previous algorithm since without granularity conflicts it can achieve max packing
-+ if (vector.m_BufferImageGranularity == 1)
-+ return ComputeDefragmentation_Full(vector);
-+
-+ VMA_ASSERT(m_AlgorithmState != VMA_NULL);
-+
-+ StateExtensive& vectorState = reinterpret_cast<StateExtensive*>(m_AlgorithmState)[index];
-+
-+ bool texturePresent = false, bufferPresent = false, otherPresent = false;
-+ switch (vectorState.operation)
-+ {
-+ case StateExtensive::Operation::Done: // Vector defragmented
-+ return false;
-+ case StateExtensive::Operation::FindFreeBlockBuffer:
-+ case StateExtensive::Operation::FindFreeBlockTexture:
-+ case StateExtensive::Operation::FindFreeBlockAll:
-+ {
-+ // No more blocks to free, just perform fast realloc and move to cleanup
-+ if (vectorState.firstFreeBlock == 0)
-+ {
-+ vectorState.operation = StateExtensive::Operation::Cleanup;
-+ return ComputeDefragmentation_Fast(vector);
-+ }
-+
-+ // No free blocks, have to clear last one
-+ size_t last = (vectorState.firstFreeBlock == SIZE_MAX ? vector.GetBlockCount() : vectorState.firstFreeBlock) - 1;
-+ VmaBlockMetadata* freeMetadata = vector.GetBlock(last)->m_pMetadata;
-+
-+ const size_t prevMoveCount = m_Moves.size();
-+ for (VmaAllocHandle handle = freeMetadata->GetAllocationListBegin();
-+ handle != VK_NULL_HANDLE;
-+ handle = freeMetadata->GetNextAllocation(handle))
-+ {
-+ MoveAllocationData moveData = GetMoveData(handle, freeMetadata);
-+ switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
-+ {
-+ case CounterStatus::Ignore:
-+ continue;
-+ case CounterStatus::End:
-+ return true;
-+ case CounterStatus::Pass:
-+ break;
-+ default:
-+ VMA_ASSERT(0);
-+ }
-+
-+ // Check all previous blocks for free space
-+ if (AllocInOtherBlock(0, last, moveData, vector))
-+ {
-+ // Full clear performed already
-+ if (prevMoveCount != m_Moves.size() && freeMetadata->GetNextAllocation(handle) == VK_NULL_HANDLE)
-+ vectorState.firstFreeBlock = last;
-+ return true;
-+ }
-+ }
-+
-+ if (prevMoveCount == m_Moves.size())
-+ {
-+ // Cannot perform full clear, have to move data in other blocks around
-+ if (last != 0)
-+ {
-+ for (size_t i = last - 1; i; --i)
-+ {
-+ if (ReallocWithinBlock(vector, vector.GetBlock(i)))
-+ return true;
-+ }
-+ }
-+
-+ if (prevMoveCount == m_Moves.size())
-+ {
-+ // No possible reallocs within blocks, try to move them around fast
-+ return ComputeDefragmentation_Fast(vector);
-+ }
-+ }
-+ else
-+ {
-+ switch (vectorState.operation)
-+ {
-+ case StateExtensive::Operation::FindFreeBlockBuffer:
-+ vectorState.operation = StateExtensive::Operation::MoveBuffers;
-+ break;
-+ case StateExtensive::Operation::FindFreeBlockTexture:
-+ vectorState.operation = StateExtensive::Operation::MoveTextures;
-+ break;
-+ case StateExtensive::Operation::FindFreeBlockAll:
-+ vectorState.operation = StateExtensive::Operation::MoveAll;
-+ break;
-+ default:
-+ VMA_ASSERT(0);
-+ vectorState.operation = StateExtensive::Operation::MoveTextures;
-+ }
-+ vectorState.firstFreeBlock = last;
-+ // Nothing done, block found without reallocations, can perform another reallocs in same pass
-+ return ComputeDefragmentation_Extensive(vector, index);
-+ }
-+ break;
-+ }
-+ case StateExtensive::Operation::MoveTextures:
-+ {
-+ if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL, vector,
-+ vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent))
-+ {
-+ if (texturePresent)
-+ {
-+ vectorState.operation = StateExtensive::Operation::FindFreeBlockTexture;
-+ return ComputeDefragmentation_Extensive(vector, index);
-+ }
-+
-+ if (!bufferPresent && !otherPresent)
-+ {
-+ vectorState.operation = StateExtensive::Operation::Cleanup;
-+ break;
-+ }
-+
-+ // No more textures to move, check buffers
-+ vectorState.operation = StateExtensive::Operation::MoveBuffers;
-+ bufferPresent = false;
-+ otherPresent = false;
-+ }
-+ else
-+ break;
-+ VMA_FALLTHROUGH; // Fallthrough
-+ }
-+ case StateExtensive::Operation::MoveBuffers:
-+ {
-+ if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_BUFFER, vector,
-+ vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent))
-+ {
-+ if (bufferPresent)
-+ {
-+ vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer;
-+ return ComputeDefragmentation_Extensive(vector, index);
-+ }
-+
-+ if (!otherPresent)
-+ {
-+ vectorState.operation = StateExtensive::Operation::Cleanup;
-+ break;
-+ }
-+
-+ // No more buffers to move, check all others
-+ vectorState.operation = StateExtensive::Operation::MoveAll;
-+ otherPresent = false;
-+ }
-+ else
-+ break;
-+ VMA_FALLTHROUGH; // Fallthrough
-+ }
-+ case StateExtensive::Operation::MoveAll:
-+ {
-+ if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_FREE, vector,
-+ vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent))
-+ {
-+ if (otherPresent)
-+ {
-+ vectorState.operation = StateExtensive::Operation::FindFreeBlockBuffer;
-+ return ComputeDefragmentation_Extensive(vector, index);
-+ }
-+ // Everything moved
-+ vectorState.operation = StateExtensive::Operation::Cleanup;
-+ }
-+ break;
-+ }
-+ case StateExtensive::Operation::Cleanup:
-+ // Cleanup is handled below so that other operations may reuse the cleanup code. This case is here to prevent the unhandled enum value warning (C4062).
-+ break;
-+ }
-+
-+ if (vectorState.operation == StateExtensive::Operation::Cleanup)
-+ {
-+ // All other work done, pack data in blocks even tighter if possible
-+ const size_t prevMoveCount = m_Moves.size();
-+ for (size_t i = 0; i < vector.GetBlockCount(); ++i)
-+ {
-+ if (ReallocWithinBlock(vector, vector.GetBlock(i)))
-+ return true;
-+ }
-+
-+ if (prevMoveCount == m_Moves.size())
-+ vectorState.operation = StateExtensive::Operation::Done;
-+ }
-+ return false;
-+}
-+
-+void VmaDefragmentationContext_T::UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state)
-+{
-+ size_t allocCount = 0;
-+ size_t freeCount = 0;
-+ state.avgFreeSize = 0;
-+ state.avgAllocSize = 0;
-+
-+ for (size_t i = 0; i < vector.GetBlockCount(); ++i)
-+ {
-+ VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata;
-+
-+ allocCount += metadata->GetAllocationCount();
-+ freeCount += metadata->GetFreeRegionsCount();
-+ state.avgFreeSize += metadata->GetSumFreeSize();
-+ state.avgAllocSize += metadata->GetSize();
-+ }
-+
-+ state.avgAllocSize = (state.avgAllocSize - state.avgFreeSize) / allocCount;
-+ state.avgFreeSize /= freeCount;
-+}
-+
-+bool VmaDefragmentationContext_T::MoveDataToFreeBlocks(VmaSuballocationType currentType,
-+ VmaBlockVector& vector, size_t firstFreeBlock,
-+ bool& texturePresent, bool& bufferPresent, bool& otherPresent)
-+{
-+ const size_t prevMoveCount = m_Moves.size();
-+ for (size_t i = firstFreeBlock ; i;)
-+ {
-+ VmaDeviceMemoryBlock* block = vector.GetBlock(--i);
-+ VmaBlockMetadata* metadata = block->m_pMetadata;
-+
-+ for (VmaAllocHandle handle = metadata->GetAllocationListBegin();
-+ handle != VK_NULL_HANDLE;
-+ handle = metadata->GetNextAllocation(handle))
-+ {
-+ MoveAllocationData moveData = GetMoveData(handle, metadata);
-+ // Ignore newly created allocations by defragmentation algorithm
-+ if (moveData.move.srcAllocation->GetUserData() == this)
-+ continue;
-+ switch (CheckCounters(moveData.move.srcAllocation->GetSize()))
-+ {
-+ case CounterStatus::Ignore:
-+ continue;
-+ case CounterStatus::End:
-+ return true;
-+ case CounterStatus::Pass:
-+ break;
-+ default:
-+ VMA_ASSERT(0);
-+ }
-+
-+ // Move only single type of resources at once
-+ if (!VmaIsBufferImageGranularityConflict(moveData.type, currentType))
-+ {
-+ // Try to fit allocation into free blocks
-+ if (AllocInOtherBlock(firstFreeBlock, vector.GetBlockCount(), moveData, vector))
-+ return false;
-+ }
-+
-+ if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL))
-+ texturePresent = true;
-+ else if (!VmaIsBufferImageGranularityConflict(moveData.type, VMA_SUBALLOCATION_TYPE_BUFFER))
-+ bufferPresent = true;
-+ else
-+ otherPresent = true;
-+ }
-+ }
-+ return prevMoveCount == m_Moves.size();
-+}
-+#endif // _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS
-+
-+#ifndef _VMA_POOL_T_FUNCTIONS
-+VmaPool_T::VmaPool_T(
-+ VmaAllocator hAllocator,
-+ const VmaPoolCreateInfo& createInfo,
-+ VkDeviceSize preferredBlockSize)
-+ : m_BlockVector(
-+ hAllocator,
-+ this, // hParentPool
-+ createInfo.memoryTypeIndex,
-+ createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
-+ createInfo.minBlockCount,
-+ createInfo.maxBlockCount,
-+ (createInfo.flags& VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
-+ createInfo.blockSize != 0, // explicitBlockSize
-+ createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK, // algorithm
-+ createInfo.priority,
-+ VMA_MAX(hAllocator->GetMemoryTypeMinAlignment(createInfo.memoryTypeIndex), createInfo.minAllocationAlignment),
-+ createInfo.pMemoryAllocateNext),
-+ m_Id(0),
-+ m_Name(VMA_NULL) {}
-+
-+VmaPool_T::~VmaPool_T()
-+{
-+ VMA_ASSERT(m_PrevPool == VMA_NULL && m_NextPool == VMA_NULL);
-+}
-+
-+void VmaPool_T::SetName(const char* pName)
-+{
-+ const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();
-+ VmaFreeString(allocs, m_Name);
-+
-+ if (pName != VMA_NULL)
-+ {
-+ m_Name = VmaCreateStringCopy(allocs, pName);
-+ }
-+ else
-+ {
-+ m_Name = VMA_NULL;
-+ }
-+}
-+#endif // _VMA_POOL_T_FUNCTIONS
-+
-+#ifndef _VMA_ALLOCATOR_T_FUNCTIONS
-+VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) :
-+ m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0),
-+ m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0),
-+ m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0),
-+ m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0),
-+ m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0),
-+ m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0),
-+ m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0),
-+ m_UseExtMemoryPriority((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT) != 0),
-+ m_hDevice(pCreateInfo->device),
-+ m_hInstance(pCreateInfo->instance),
-+ m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL),
-+ m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ?
-+ *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks),
-+ m_AllocationObjectAllocator(&m_AllocationCallbacks),
-+ m_HeapSizeLimitMask(0),
-+ m_DeviceMemoryCount(0),
-+ m_PreferredLargeHeapBlockSize(0),
-+ m_PhysicalDevice(pCreateInfo->physicalDevice),
-+ m_GpuDefragmentationMemoryTypeBits(UINT32_MAX),
-+ m_NextPoolId(0),
-+ m_GlobalMemoryTypeBits(UINT32_MAX)
-+{
-+ if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-+ {
-+ m_UseKhrDedicatedAllocation = false;
-+ m_UseKhrBindMemory2 = false;
-+ }
-+
-+ if(VMA_DEBUG_DETECT_CORRUPTION)
-+ {
-+ // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it.
-+ VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0);
-+ }
-+
-+ VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance);
-+
-+ if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0))
-+ {
-+#if !(VMA_DEDICATED_ALLOCATION)
-+ if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0)
-+ {
-+ VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros.");
-+ }
-+#endif
-+#if !(VMA_BIND_MEMORY2)
-+ if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0)
-+ {
-+ VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros.");
-+ }
-+#endif
-+ }
-+#if !(VMA_MEMORY_BUDGET)
-+ if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0)
-+ {
-+ VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros.");
-+ }
-+#endif
-+#if !(VMA_BUFFER_DEVICE_ADDRESS)
-+ if(m_UseKhrBufferDeviceAddress)
-+ {
-+ VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
-+ }
-+#endif
-+#if VMA_VULKAN_VERSION < 1003000
-+ if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0))
-+ {
-+ VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_3 but required Vulkan version is disabled by preprocessor macros.");
-+ }
-+#endif
-+#if VMA_VULKAN_VERSION < 1002000
-+ if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0))
-+ {
-+ VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros.");
-+ }
-+#endif
-+#if VMA_VULKAN_VERSION < 1001000
-+ if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-+ {
-+ VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros.");
-+ }
-+#endif
-+#if !(VMA_MEMORY_PRIORITY)
-+ if(m_UseExtMemoryPriority)
-+ {
-+ VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro.");
-+ }
-+#endif
-+
-+ memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks));
-+ memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties));
-+ memset(&m_MemProps, 0, sizeof(m_MemProps));
-+
-+ memset(&m_pBlockVectors, 0, sizeof(m_pBlockVectors));
-+ memset(&m_VulkanFunctions, 0, sizeof(m_VulkanFunctions));
-+
-+#if VMA_EXTERNAL_MEMORY
-+ memset(&m_TypeExternalMemoryHandleTypes, 0, sizeof(m_TypeExternalMemoryHandleTypes));
-+#endif // #if VMA_EXTERNAL_MEMORY
-+
-+ if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL)
-+ {
-+ m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData;
-+ m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate;
-+ m_DeviceMemoryCallbacks.pfnFree = pCreateInfo->pDeviceMemoryCallbacks->pfnFree;
-+ }
-+
-+ ImportVulkanFunctions(pCreateInfo->pVulkanFunctions);
-+
-+ (*m_VulkanFunctions.vkGetPhysicalDeviceProperties)(m_PhysicalDevice, &m_PhysicalDeviceProperties);
-+ (*m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties)(m_PhysicalDevice, &m_MemProps);
-+
-+ VMA_ASSERT(VmaIsPow2(VMA_MIN_ALIGNMENT));
-+ VMA_ASSERT(VmaIsPow2(VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY));
-+ VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity));
-+ VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize));
-+
-+ m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ?
-+ pCreateInfo->preferredLargeHeapBlockSize : static_cast<VkDeviceSize>(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE);
-+
-+ m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits();
-+
-+#if VMA_EXTERNAL_MEMORY
-+ if(pCreateInfo->pTypeExternalMemoryHandleTypes != VMA_NULL)
-+ {
-+ memcpy(m_TypeExternalMemoryHandleTypes, pCreateInfo->pTypeExternalMemoryHandleTypes,
-+ sizeof(VkExternalMemoryHandleTypeFlagsKHR) * GetMemoryTypeCount());
-+ }
-+#endif // #if VMA_EXTERNAL_MEMORY
-+
-+ if(pCreateInfo->pHeapSizeLimit != VMA_NULL)
-+ {
-+ for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
-+ {
-+ const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex];
-+ if(limit != VK_WHOLE_SIZE)
-+ {
-+ m_HeapSizeLimitMask |= 1u << heapIndex;
-+ if(limit < m_MemProps.memoryHeaps[heapIndex].size)
-+ {
-+ m_MemProps.memoryHeaps[heapIndex].size = limit;
-+ }
-+ }
-+ }
-+ }
-+
-+ for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-+ {
-+ // Create only supported types
-+ if((m_GlobalMemoryTypeBits & (1u << memTypeIndex)) != 0)
-+ {
-+ const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex);
-+ m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)(
-+ this,
-+ VK_NULL_HANDLE, // hParentPool
-+ memTypeIndex,
-+ preferredBlockSize,
-+ 0,
-+ SIZE_MAX,
-+ GetBufferImageGranularity(),
-+ false, // explicitBlockSize
-+ 0, // algorithm
-+ 0.5f, // priority (0.5 is the default per Vulkan spec)
-+ GetMemoryTypeMinAlignment(memTypeIndex), // minAllocationAlignment
-+ VMA_NULL); // // pMemoryAllocateNext
-+ // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here,
-+ // becase minBlockCount is 0.
-+ }
-+ }
-+}
-+
-+VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo)
-+{
-+ VkResult res = VK_SUCCESS;
-+
-+#if VMA_MEMORY_BUDGET
-+ if(m_UseExtMemoryBudget)
-+ {
-+ UpdateVulkanBudget();
-+ }
-+#endif // #if VMA_MEMORY_BUDGET
-+
-+ return res;
-+}
-+
-+VmaAllocator_T::~VmaAllocator_T()
-+{
-+ VMA_ASSERT(m_Pools.IsEmpty());
-+
-+ for(size_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; )
-+ {
-+ vma_delete(this, m_pBlockVectors[memTypeIndex]);
-+ }
-+}
-+
-+void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions)
-+{
-+#if VMA_STATIC_VULKAN_FUNCTIONS == 1
-+ ImportVulkanFunctions_Static();
-+#endif
-+
-+ if(pVulkanFunctions != VMA_NULL)
-+ {
-+ ImportVulkanFunctions_Custom(pVulkanFunctions);
-+ }
-+
-+#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
-+ ImportVulkanFunctions_Dynamic();
-+#endif
-+
-+ ValidateVulkanFunctions();
-+}
-+
-+#if VMA_STATIC_VULKAN_FUNCTIONS == 1
-+
-+void VmaAllocator_T::ImportVulkanFunctions_Static()
-+{
-+ // Vulkan 1.0
-+ m_VulkanFunctions.vkGetInstanceProcAddr = (PFN_vkGetInstanceProcAddr)vkGetInstanceProcAddr;
-+ m_VulkanFunctions.vkGetDeviceProcAddr = (PFN_vkGetDeviceProcAddr)vkGetDeviceProcAddr;
-+ m_VulkanFunctions.vkGetPhysicalDeviceProperties = (PFN_vkGetPhysicalDeviceProperties)vkGetPhysicalDeviceProperties;
-+ m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties = (PFN_vkGetPhysicalDeviceMemoryProperties)vkGetPhysicalDeviceMemoryProperties;
-+ m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
-+ m_VulkanFunctions.vkFreeMemory = (PFN_vkFreeMemory)vkFreeMemory;
-+ m_VulkanFunctions.vkMapMemory = (PFN_vkMapMemory)vkMapMemory;
-+ m_VulkanFunctions.vkUnmapMemory = (PFN_vkUnmapMemory)vkUnmapMemory;
-+ m_VulkanFunctions.vkFlushMappedMemoryRanges = (PFN_vkFlushMappedMemoryRanges)vkFlushMappedMemoryRanges;
-+ m_VulkanFunctions.vkInvalidateMappedMemoryRanges = (PFN_vkInvalidateMappedMemoryRanges)vkInvalidateMappedMemoryRanges;
-+ m_VulkanFunctions.vkBindBufferMemory = (PFN_vkBindBufferMemory)vkBindBufferMemory;
-+ m_VulkanFunctions.vkBindImageMemory = (PFN_vkBindImageMemory)vkBindImageMemory;
-+ m_VulkanFunctions.vkGetBufferMemoryRequirements = (PFN_vkGetBufferMemoryRequirements)vkGetBufferMemoryRequirements;
-+ m_VulkanFunctions.vkGetImageMemoryRequirements = (PFN_vkGetImageMemoryRequirements)vkGetImageMemoryRequirements;
-+ m_VulkanFunctions.vkCreateBuffer = (PFN_vkCreateBuffer)vkCreateBuffer;
-+ m_VulkanFunctions.vkDestroyBuffer = (PFN_vkDestroyBuffer)vkDestroyBuffer;
-+ m_VulkanFunctions.vkCreateImage = (PFN_vkCreateImage)vkCreateImage;
-+ m_VulkanFunctions.vkDestroyImage = (PFN_vkDestroyImage)vkDestroyImage;
-+ m_VulkanFunctions.vkCmdCopyBuffer = (PFN_vkCmdCopyBuffer)vkCmdCopyBuffer;
-+
-+ // Vulkan 1.1
-+#if VMA_VULKAN_VERSION >= 1001000
-+ if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-+ {
-+ m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2;
-+ m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2;
-+ m_VulkanFunctions.vkBindBufferMemory2KHR = (PFN_vkBindBufferMemory2)vkBindBufferMemory2;
-+ m_VulkanFunctions.vkBindImageMemory2KHR = (PFN_vkBindImageMemory2)vkBindImageMemory2;
-+ }
-+#endif
-+
-+#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
-+ if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-+ {
-+ m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2;
-+ }
-+#endif
-+
-+#if VMA_VULKAN_VERSION >= 1003000
-+ if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0))
-+ {
-+ m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements = (PFN_vkGetDeviceBufferMemoryRequirements)vkGetDeviceBufferMemoryRequirements;
-+ m_VulkanFunctions.vkGetDeviceImageMemoryRequirements = (PFN_vkGetDeviceImageMemoryRequirements)vkGetDeviceImageMemoryRequirements;
-+ }
-+#endif
-+}
-+
-+#endif // VMA_STATIC_VULKAN_FUNCTIONS == 1
-+
-+void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions)
-+{
-+ VMA_ASSERT(pVulkanFunctions != VMA_NULL);
-+
-+#define VMA_COPY_IF_NOT_NULL(funcName) \
-+ if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName;
-+
-+ VMA_COPY_IF_NOT_NULL(vkGetInstanceProcAddr);
-+ VMA_COPY_IF_NOT_NULL(vkGetDeviceProcAddr);
-+ VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceProperties);
-+ VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties);
-+ VMA_COPY_IF_NOT_NULL(vkAllocateMemory);
-+ VMA_COPY_IF_NOT_NULL(vkFreeMemory);
-+ VMA_COPY_IF_NOT_NULL(vkMapMemory);
-+ VMA_COPY_IF_NOT_NULL(vkUnmapMemory);
-+ VMA_COPY_IF_NOT_NULL(vkFlushMappedMemoryRanges);
-+ VMA_COPY_IF_NOT_NULL(vkInvalidateMappedMemoryRanges);
-+ VMA_COPY_IF_NOT_NULL(vkBindBufferMemory);
-+ VMA_COPY_IF_NOT_NULL(vkBindImageMemory);
-+ VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements);
-+ VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements);
-+ VMA_COPY_IF_NOT_NULL(vkCreateBuffer);
-+ VMA_COPY_IF_NOT_NULL(vkDestroyBuffer);
-+ VMA_COPY_IF_NOT_NULL(vkCreateImage);
-+ VMA_COPY_IF_NOT_NULL(vkDestroyImage);
-+ VMA_COPY_IF_NOT_NULL(vkCmdCopyBuffer);
-+
-+#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-+ VMA_COPY_IF_NOT_NULL(vkGetBufferMemoryRequirements2KHR);
-+ VMA_COPY_IF_NOT_NULL(vkGetImageMemoryRequirements2KHR);
-+#endif
-+
-+#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
-+ VMA_COPY_IF_NOT_NULL(vkBindBufferMemory2KHR);
-+ VMA_COPY_IF_NOT_NULL(vkBindImageMemory2KHR);
-+#endif
-+
-+#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
-+ VMA_COPY_IF_NOT_NULL(vkGetPhysicalDeviceMemoryProperties2KHR);
-+#endif
-+
-+#if VMA_VULKAN_VERSION >= 1003000
-+ VMA_COPY_IF_NOT_NULL(vkGetDeviceBufferMemoryRequirements);
-+ VMA_COPY_IF_NOT_NULL(vkGetDeviceImageMemoryRequirements);
-+#endif
-+
-+#undef VMA_COPY_IF_NOT_NULL
-+}
-+
-+#if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
-+
-+void VmaAllocator_T::ImportVulkanFunctions_Dynamic()
-+{
-+ VMA_ASSERT(m_VulkanFunctions.vkGetInstanceProcAddr && m_VulkanFunctions.vkGetDeviceProcAddr &&
-+ "To use VMA_DYNAMIC_VULKAN_FUNCTIONS in new versions of VMA you now have to pass "
-+ "VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as VmaAllocatorCreateInfo::pVulkanFunctions. "
-+ "Other members can be null.");
-+
-+#define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \
-+ if(m_VulkanFunctions.memberName == VMA_NULL) \
-+ m_VulkanFunctions.memberName = \
-+ (functionPointerType)m_VulkanFunctions.vkGetInstanceProcAddr(m_hInstance, functionNameString);
-+#define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \
-+ if(m_VulkanFunctions.memberName == VMA_NULL) \
-+ m_VulkanFunctions.memberName = \
-+ (functionPointerType)m_VulkanFunctions.vkGetDeviceProcAddr(m_hDevice, functionNameString);
-+
-+ VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties");
-+ VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties, PFN_vkGetPhysicalDeviceMemoryProperties, "vkGetPhysicalDeviceMemoryProperties");
-+ VMA_FETCH_DEVICE_FUNC(vkAllocateMemory, PFN_vkAllocateMemory, "vkAllocateMemory");
-+ VMA_FETCH_DEVICE_FUNC(vkFreeMemory, PFN_vkFreeMemory, "vkFreeMemory");
-+ VMA_FETCH_DEVICE_FUNC(vkMapMemory, PFN_vkMapMemory, "vkMapMemory");
-+ VMA_FETCH_DEVICE_FUNC(vkUnmapMemory, PFN_vkUnmapMemory, "vkUnmapMemory");
-+ VMA_FETCH_DEVICE_FUNC(vkFlushMappedMemoryRanges, PFN_vkFlushMappedMemoryRanges, "vkFlushMappedMemoryRanges");
-+ VMA_FETCH_DEVICE_FUNC(vkInvalidateMappedMemoryRanges, PFN_vkInvalidateMappedMemoryRanges, "vkInvalidateMappedMemoryRanges");
-+ VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory, PFN_vkBindBufferMemory, "vkBindBufferMemory");
-+ VMA_FETCH_DEVICE_FUNC(vkBindImageMemory, PFN_vkBindImageMemory, "vkBindImageMemory");
-+ VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements, PFN_vkGetBufferMemoryRequirements, "vkGetBufferMemoryRequirements");
-+ VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements, PFN_vkGetImageMemoryRequirements, "vkGetImageMemoryRequirements");
-+ VMA_FETCH_DEVICE_FUNC(vkCreateBuffer, PFN_vkCreateBuffer, "vkCreateBuffer");
-+ VMA_FETCH_DEVICE_FUNC(vkDestroyBuffer, PFN_vkDestroyBuffer, "vkDestroyBuffer");
-+ VMA_FETCH_DEVICE_FUNC(vkCreateImage, PFN_vkCreateImage, "vkCreateImage");
-+ VMA_FETCH_DEVICE_FUNC(vkDestroyImage, PFN_vkDestroyImage, "vkDestroyImage");
-+ VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, "vkCmdCopyBuffer");
-+
-+#if VMA_VULKAN_VERSION >= 1001000
-+ if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-+ {
-+ VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2, "vkGetBufferMemoryRequirements2");
-+ VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2, "vkGetImageMemoryRequirements2");
-+ VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2, "vkBindBufferMemory2");
-+ VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2, "vkBindImageMemory2");
-+ }
-+#endif
-+
-+#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
-+ if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-+ {
-+ VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2, "vkGetPhysicalDeviceMemoryProperties2");
-+ }
-+ else if(m_UseExtMemoryBudget)
-+ {
-+ VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2, "vkGetPhysicalDeviceMemoryProperties2KHR");
-+ }
-+#endif
-+
-+#if VMA_DEDICATED_ALLOCATION
-+ if(m_UseKhrDedicatedAllocation)
-+ {
-+ VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, "vkGetBufferMemoryRequirements2KHR");
-+ VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, "vkGetImageMemoryRequirements2KHR");
-+ }
-+#endif
-+
-+#if VMA_BIND_MEMORY2
-+ if(m_UseKhrBindMemory2)
-+ {
-+ VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, "vkBindBufferMemory2KHR");
-+ VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, "vkBindImageMemory2KHR");
-+ }
-+#endif // #if VMA_BIND_MEMORY2
-+
-+#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
-+ if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-+ {
-+ VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2");
-+ }
-+ else if(m_UseExtMemoryBudget)
-+ {
-+ VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR");
-+ }
-+#endif // #if VMA_MEMORY_BUDGET
-+
-+#if VMA_VULKAN_VERSION >= 1003000
-+ if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0))
-+ {
-+ VMA_FETCH_DEVICE_FUNC(vkGetDeviceBufferMemoryRequirements, PFN_vkGetDeviceBufferMemoryRequirements, "vkGetDeviceBufferMemoryRequirements");
-+ VMA_FETCH_DEVICE_FUNC(vkGetDeviceImageMemoryRequirements, PFN_vkGetDeviceImageMemoryRequirements, "vkGetDeviceImageMemoryRequirements");
-+ }
-+#endif
-+
-+#undef VMA_FETCH_DEVICE_FUNC
-+#undef VMA_FETCH_INSTANCE_FUNC
-+}
-+
-+#endif // VMA_DYNAMIC_VULKAN_FUNCTIONS == 1
-+
-+void VmaAllocator_T::ValidateVulkanFunctions()
-+{
-+ VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceProperties != VMA_NULL);
-+ VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties != VMA_NULL);
-+ VMA_ASSERT(m_VulkanFunctions.vkAllocateMemory != VMA_NULL);
-+ VMA_ASSERT(m_VulkanFunctions.vkFreeMemory != VMA_NULL);
-+ VMA_ASSERT(m_VulkanFunctions.vkMapMemory != VMA_NULL);
-+ VMA_ASSERT(m_VulkanFunctions.vkUnmapMemory != VMA_NULL);
-+ VMA_ASSERT(m_VulkanFunctions.vkFlushMappedMemoryRanges != VMA_NULL);
-+ VMA_ASSERT(m_VulkanFunctions.vkInvalidateMappedMemoryRanges != VMA_NULL);
-+ VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory != VMA_NULL);
-+ VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory != VMA_NULL);
-+ VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements != VMA_NULL);
-+ VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements != VMA_NULL);
-+ VMA_ASSERT(m_VulkanFunctions.vkCreateBuffer != VMA_NULL);
-+ VMA_ASSERT(m_VulkanFunctions.vkDestroyBuffer != VMA_NULL);
-+ VMA_ASSERT(m_VulkanFunctions.vkCreateImage != VMA_NULL);
-+ VMA_ASSERT(m_VulkanFunctions.vkDestroyImage != VMA_NULL);
-+ VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL);
-+
-+#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-+ if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation)
-+ {
-+ VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL);
-+ VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL);
-+ }
-+#endif
-+
-+#if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000
-+ if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2)
-+ {
-+ VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL);
-+ VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL);
-+ }
-+#endif
-+
-+#if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000
-+ if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-+ {
-+ VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL);
-+ }
-+#endif
-+
-+#if VMA_VULKAN_VERSION >= 1003000
-+ if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0))
-+ {
-+ VMA_ASSERT(m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements != VMA_NULL);
-+ VMA_ASSERT(m_VulkanFunctions.vkGetDeviceImageMemoryRequirements != VMA_NULL);
-+ }
-+#endif
-+}
-+
-+VkDeviceSize VmaAllocator_T::CalcPreferredBlockSize(uint32_t memTypeIndex)
-+{
-+ const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
-+ const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
-+ const bool isSmallHeap = heapSize <= VMA_SMALL_HEAP_MAX_SIZE;
-+ return VmaAlignUp(isSmallHeap ? (heapSize / 8) : m_PreferredLargeHeapBlockSize, (VkDeviceSize)32);
-+}
-+
-+VkResult VmaAllocator_T::AllocateMemoryOfType(
-+ VmaPool pool,
-+ VkDeviceSize size,
-+ VkDeviceSize alignment,
-+ bool dedicatedPreferred,
-+ VkBuffer dedicatedBuffer,
-+ VkImage dedicatedImage,
-+ VkFlags dedicatedBufferImageUsage,
-+ const VmaAllocationCreateInfo& createInfo,
-+ uint32_t memTypeIndex,
-+ VmaSuballocationType suballocType,
-+ VmaDedicatedAllocationList& dedicatedAllocations,
-+ VmaBlockVector& blockVector,
-+ size_t allocationCount,
-+ VmaAllocation* pAllocations)
-+{
-+ VMA_ASSERT(pAllocations != VMA_NULL);
-+ VMA_DEBUG_LOG_FORMAT(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size);
-+
-+ VmaAllocationCreateInfo finalCreateInfo = createInfo;
-+ VkResult res = CalcMemTypeParams(
-+ finalCreateInfo,
-+ memTypeIndex,
-+ size,
-+ allocationCount);
-+ if(res != VK_SUCCESS)
-+ return res;
-+
-+ if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
-+ {
-+ return AllocateDedicatedMemory(
-+ pool,
-+ size,
-+ suballocType,
-+ dedicatedAllocations,
-+ memTypeIndex,
-+ (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
-+ (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
-+ (finalCreateInfo.flags &
-+ (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0,
-+ (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0,
-+ finalCreateInfo.pUserData,
-+ finalCreateInfo.priority,
-+ dedicatedBuffer,
-+ dedicatedImage,
-+ dedicatedBufferImageUsage,
-+ allocationCount,
-+ pAllocations,
-+ blockVector.GetAllocationNextPtr());
-+ }
-+ else
-+ {
-+ const bool canAllocateDedicated =
-+ (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 &&
-+ (pool == VK_NULL_HANDLE || !blockVector.HasExplicitBlockSize());
-+
-+ if(canAllocateDedicated)
-+ {
-+ // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size.
-+ if(size > blockVector.GetPreferredBlockSize() / 2)
-+ {
-+ dedicatedPreferred = true;
-+ }
-+ // Protection against creating each allocation as dedicated when we reach or exceed heap size/budget,
-+ // which can quickly deplete maxMemoryAllocationCount: Don't prefer dedicated allocations when above
-+ // 3/4 of the maximum allocation count.
-+ if(m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount < UINT32_MAX / 4 &&
-+ m_DeviceMemoryCount.load() > m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount * 3 / 4)
-+ {
-+ dedicatedPreferred = false;
-+ }
-+
-+ if(dedicatedPreferred)
-+ {
-+ res = AllocateDedicatedMemory(
-+ pool,
-+ size,
-+ suballocType,
-+ dedicatedAllocations,
-+ memTypeIndex,
-+ (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
-+ (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
-+ (finalCreateInfo.flags &
-+ (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0,
-+ (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0,
-+ finalCreateInfo.pUserData,
-+ finalCreateInfo.priority,
-+ dedicatedBuffer,
-+ dedicatedImage,
-+ dedicatedBufferImageUsage,
-+ allocationCount,
-+ pAllocations,
-+ blockVector.GetAllocationNextPtr());
-+ if(res == VK_SUCCESS)
-+ {
-+ // Succeeded: AllocateDedicatedMemory function already filled pMemory, nothing more to do here.
-+ VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
-+ return VK_SUCCESS;
-+ }
-+ }
-+ }
-+
-+ res = blockVector.Allocate(
-+ size,
-+ alignment,
-+ finalCreateInfo,
-+ suballocType,
-+ allocationCount,
-+ pAllocations);
-+ if(res == VK_SUCCESS)
-+ return VK_SUCCESS;
-+
-+ // Try dedicated memory.
-+ if(canAllocateDedicated && !dedicatedPreferred)
-+ {
-+ res = AllocateDedicatedMemory(
-+ pool,
-+ size,
-+ suballocType,
-+ dedicatedAllocations,
-+ memTypeIndex,
-+ (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0,
-+ (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0,
-+ (finalCreateInfo.flags &
-+ (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0,
-+ (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0,
-+ finalCreateInfo.pUserData,
-+ finalCreateInfo.priority,
-+ dedicatedBuffer,
-+ dedicatedImage,
-+ dedicatedBufferImageUsage,
-+ allocationCount,
-+ pAllocations,
-+ blockVector.GetAllocationNextPtr());
-+ if(res == VK_SUCCESS)
-+ {
-+ // Succeeded: AllocateDedicatedMemory function already filled pMemory, nothing more to do here.
-+ VMA_DEBUG_LOG(" Allocated as DedicatedMemory");
-+ return VK_SUCCESS;
-+ }
-+ }
-+ // Everything failed: Return error code.
-+ VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
-+ return res;
-+ }
-+}
-+
-+VkResult VmaAllocator_T::AllocateDedicatedMemory(
-+ VmaPool pool,
-+ VkDeviceSize size,
-+ VmaSuballocationType suballocType,
-+ VmaDedicatedAllocationList& dedicatedAllocations,
-+ uint32_t memTypeIndex,
-+ bool map,
-+ bool isUserDataString,
-+ bool isMappingAllowed,
-+ bool canAliasMemory,
-+ void* pUserData,
-+ float priority,
-+ VkBuffer dedicatedBuffer,
-+ VkImage dedicatedImage,
-+ VkFlags dedicatedBufferImageUsage,
-+ size_t allocationCount,
-+ VmaAllocation* pAllocations,
-+ const void* pNextChain)
-+{
-+ VMA_ASSERT(allocationCount > 0 && pAllocations);
-+
-+ VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
-+ allocInfo.memoryTypeIndex = memTypeIndex;
-+ allocInfo.allocationSize = size;
-+ allocInfo.pNext = pNextChain;
-+
-+#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-+ VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR };
-+ if(!canAliasMemory)
-+ {
-+ if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-+ {
-+ if(dedicatedBuffer != VK_NULL_HANDLE)
-+ {
-+ VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE);
-+ dedicatedAllocInfo.buffer = dedicatedBuffer;
-+ VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
-+ }
-+ else if(dedicatedImage != VK_NULL_HANDLE)
-+ {
-+ dedicatedAllocInfo.image = dedicatedImage;
-+ VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo);
-+ }
-+ }
-+ }
-+#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-+
-+#if VMA_BUFFER_DEVICE_ADDRESS
-+ VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR };
-+ if(m_UseKhrBufferDeviceAddress)
-+ {
-+ bool canContainBufferWithDeviceAddress = true;
-+ if(dedicatedBuffer != VK_NULL_HANDLE)
-+ {
-+ canContainBufferWithDeviceAddress = dedicatedBufferImageUsage == UINT32_MAX || // Usage flags unknown
-+ (dedicatedBufferImageUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0;
-+ }
-+ else if(dedicatedImage != VK_NULL_HANDLE)
-+ {
-+ canContainBufferWithDeviceAddress = false;
-+ }
-+ if(canContainBufferWithDeviceAddress)
-+ {
-+ allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR;
-+ VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo);
-+ }
-+ }
-+#endif // #if VMA_BUFFER_DEVICE_ADDRESS
-+
-+#if VMA_MEMORY_PRIORITY
-+ VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT };
-+ if(m_UseExtMemoryPriority)
-+ {
-+ VMA_ASSERT(priority >= 0.f && priority <= 1.f);
-+ priorityInfo.priority = priority;
-+ VmaPnextChainPushFront(&allocInfo, &priorityInfo);
-+ }
-+#endif // #if VMA_MEMORY_PRIORITY
-+
-+#if VMA_EXTERNAL_MEMORY
-+ // Attach VkExportMemoryAllocateInfoKHR if necessary.
-+ VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR };
-+ exportMemoryAllocInfo.handleTypes = GetExternalMemoryHandleTypeFlags(memTypeIndex);
-+ if(exportMemoryAllocInfo.handleTypes != 0)
-+ {
-+ VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo);
-+ }
-+#endif // #if VMA_EXTERNAL_MEMORY
-+
-+ size_t allocIndex;
-+ VkResult res = VK_SUCCESS;
-+ for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
-+ {
-+ res = AllocateDedicatedMemoryPage(
-+ pool,
-+ size,
-+ suballocType,
-+ memTypeIndex,
-+ allocInfo,
-+ map,
-+ isUserDataString,
-+ isMappingAllowed,
-+ pUserData,
-+ pAllocations + allocIndex);
-+ if(res != VK_SUCCESS)
-+ {
-+ break;
-+ }
-+ }
-+
-+ if(res == VK_SUCCESS)
-+ {
-+ for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
-+ {
-+ dedicatedAllocations.Register(pAllocations[allocIndex]);
-+ }
-+ VMA_DEBUG_LOG_FORMAT(" Allocated DedicatedMemory Count=%zu, MemoryTypeIndex=#%u", allocationCount, memTypeIndex);
-+ }
-+ else
-+ {
-+ // Free all already created allocations.
-+ while(allocIndex--)
-+ {
-+ VmaAllocation currAlloc = pAllocations[allocIndex];
-+ VkDeviceMemory hMemory = currAlloc->GetMemory();
-+
-+ /*
-+ There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
-+ before vkFreeMemory.
-+
-+ if(currAlloc->GetMappedData() != VMA_NULL)
-+ {
-+ (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
-+ }
-+ */
-+
-+ FreeVulkanMemory(memTypeIndex, currAlloc->GetSize(), hMemory);
-+ m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), currAlloc->GetSize());
-+ m_AllocationObjectAllocator.Free(currAlloc);
-+ }
-+
-+ memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
-+ }
-+
-+ return res;
-+}
-+
-+VkResult VmaAllocator_T::AllocateDedicatedMemoryPage(
-+ VmaPool pool,
-+ VkDeviceSize size,
-+ VmaSuballocationType suballocType,
-+ uint32_t memTypeIndex,
-+ const VkMemoryAllocateInfo& allocInfo,
-+ bool map,
-+ bool isUserDataString,
-+ bool isMappingAllowed,
-+ void* pUserData,
-+ VmaAllocation* pAllocation)
-+{
-+ VkDeviceMemory hMemory = VK_NULL_HANDLE;
-+ VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory);
-+ if(res < 0)
-+ {
-+ VMA_DEBUG_LOG(" vkAllocateMemory FAILED");
-+ return res;
-+ }
-+
-+ void* pMappedData = VMA_NULL;
-+ if(map)
-+ {
-+ res = (*m_VulkanFunctions.vkMapMemory)(
-+ m_hDevice,
-+ hMemory,
-+ 0,
-+ VK_WHOLE_SIZE,
-+ 0,
-+ &pMappedData);
-+ if(res < 0)
-+ {
-+ VMA_DEBUG_LOG(" vkMapMemory FAILED");
-+ FreeVulkanMemory(memTypeIndex, size, hMemory);
-+ return res;
-+ }
-+ }
-+
-+ *pAllocation = m_AllocationObjectAllocator.Allocate(isMappingAllowed);
-+ (*pAllocation)->InitDedicatedAllocation(pool, memTypeIndex, hMemory, suballocType, pMappedData, size);
-+ if (isUserDataString)
-+ (*pAllocation)->SetName(this, (const char*)pUserData);
-+ else
-+ (*pAllocation)->SetUserData(this, pUserData);
-+ m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size);
-+ if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
-+ {
-+ FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED);
-+ }
-+
-+ return VK_SUCCESS;
-+}
-+
-+void VmaAllocator_T::GetBufferMemoryRequirements(
-+ VkBuffer hBuffer,
-+ VkMemoryRequirements& memReq,
-+ bool& requiresDedicatedAllocation,
-+ bool& prefersDedicatedAllocation) const
-+{
-+#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-+ if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-+ {
-+ VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR };
-+ memReqInfo.buffer = hBuffer;
-+
-+ VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
-+
-+ VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
-+ VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
-+
-+ (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
-+
-+ memReq = memReq2.memoryRequirements;
-+ requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
-+ prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
-+ }
-+ else
-+#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-+ {
-+ (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq);
-+ requiresDedicatedAllocation = false;
-+ prefersDedicatedAllocation = false;
-+ }
-+}
-+
-+void VmaAllocator_T::GetImageMemoryRequirements(
-+ VkImage hImage,
-+ VkMemoryRequirements& memReq,
-+ bool& requiresDedicatedAllocation,
-+ bool& prefersDedicatedAllocation) const
-+{
-+#if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-+ if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0))
-+ {
-+ VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR };
-+ memReqInfo.image = hImage;
-+
-+ VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR };
-+
-+ VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR };
-+ VmaPnextChainPushFront(&memReq2, &memDedicatedReq);
-+
-+ (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2);
-+
-+ memReq = memReq2.memoryRequirements;
-+ requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE);
-+ prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE);
-+ }
-+ else
-+#endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000
-+ {
-+ (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq);
-+ requiresDedicatedAllocation = false;
-+ prefersDedicatedAllocation = false;
-+ }
-+}
-+
-+VkResult VmaAllocator_T::FindMemoryTypeIndex(
-+ uint32_t memoryTypeBits,
-+ const VmaAllocationCreateInfo* pAllocationCreateInfo,
-+ VkFlags bufImgUsage,
-+ uint32_t* pMemoryTypeIndex) const
-+{
-+ memoryTypeBits &= GetGlobalMemoryTypeBits();
-+
-+ if(pAllocationCreateInfo->memoryTypeBits != 0)
-+ {
-+ memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits;
-+ }
-+
-+ VkMemoryPropertyFlags requiredFlags = 0, preferredFlags = 0, notPreferredFlags = 0;
-+ if(!FindMemoryPreferences(
-+ IsIntegratedGpu(),
-+ *pAllocationCreateInfo,
-+ bufImgUsage,
-+ requiredFlags, preferredFlags, notPreferredFlags))
-+ {
-+ return VK_ERROR_FEATURE_NOT_PRESENT;
-+ }
-+
-+ *pMemoryTypeIndex = UINT32_MAX;
-+ uint32_t minCost = UINT32_MAX;
-+ for(uint32_t memTypeIndex = 0, memTypeBit = 1;
-+ memTypeIndex < GetMemoryTypeCount();
-+ ++memTypeIndex, memTypeBit <<= 1)
-+ {
-+ // This memory type is acceptable according to memoryTypeBits bitmask.
-+ if((memTypeBit & memoryTypeBits) != 0)
-+ {
-+ const VkMemoryPropertyFlags currFlags =
-+ m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
-+ // This memory type contains requiredFlags.
-+ if((requiredFlags & ~currFlags) == 0)
-+ {
-+ // Calculate cost as number of bits from preferredFlags not present in this memory type.
-+ uint32_t currCost = VMA_COUNT_BITS_SET(preferredFlags & ~currFlags) +
-+ VMA_COUNT_BITS_SET(currFlags & notPreferredFlags);
-+ // Remember memory type with lowest cost.
-+ if(currCost < minCost)
-+ {
-+ *pMemoryTypeIndex = memTypeIndex;
-+ if(currCost == 0)
-+ {
-+ return VK_SUCCESS;
-+ }
-+ minCost = currCost;
-+ }
-+ }
-+ }
-+ }
-+ return (*pMemoryTypeIndex != UINT32_MAX) ? VK_SUCCESS : VK_ERROR_FEATURE_NOT_PRESENT;
-+}
-+
-+VkResult VmaAllocator_T::CalcMemTypeParams(
-+ VmaAllocationCreateInfo& inoutCreateInfo,
-+ uint32_t memTypeIndex,
-+ VkDeviceSize size,
-+ size_t allocationCount)
-+{
-+ // If memory type is not HOST_VISIBLE, disable MAPPED.
-+ if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 &&
-+ (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
-+ {
-+ inoutCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT;
-+ }
-+
-+ if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
-+ (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0)
-+ {
-+ const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
-+ VmaBudget heapBudget = {};
-+ GetHeapBudgets(&heapBudget, heapIndex, 1);
-+ if(heapBudget.usage + size * allocationCount > heapBudget.budget)
-+ {
-+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-+ }
-+ }
-+ return VK_SUCCESS;
-+}
-+
-+VkResult VmaAllocator_T::CalcAllocationParams(
-+ VmaAllocationCreateInfo& inoutCreateInfo,
-+ bool dedicatedRequired,
-+ bool dedicatedPreferred)
-+{
-+ VMA_ASSERT((inoutCreateInfo.flags &
-+ (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) !=
-+ (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) &&
-+ "Specifying both flags VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT and VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT is incorrect.");
-+ VMA_ASSERT((((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) == 0 ||
-+ (inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0)) &&
-+ "Specifying VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT requires also VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.");
-+ if(inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST)
-+ {
-+ if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0)
-+ {
-+ VMA_ASSERT((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0 &&
-+ "When using VMA_ALLOCATION_CREATE_MAPPED_BIT and usage = VMA_MEMORY_USAGE_AUTO*, you must also specify VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.");
-+ }
-+ }
-+
-+ // If memory is lazily allocated, it should be always dedicated.
-+ if(dedicatedRequired ||
-+ inoutCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED)
-+ {
-+ inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
-+ }
-+
-+ if(inoutCreateInfo.pool != VK_NULL_HANDLE)
-+ {
-+ if(inoutCreateInfo.pool->m_BlockVector.HasExplicitBlockSize() &&
-+ (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
-+ {
-+ VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT while current custom pool doesn't support dedicated allocations.");
-+ return VK_ERROR_FEATURE_NOT_PRESENT;
-+ }
-+ inoutCreateInfo.priority = inoutCreateInfo.pool->m_BlockVector.GetPriority();
-+ }
-+
-+ if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
-+ (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
-+ {
-+ VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense.");
-+ return VK_ERROR_FEATURE_NOT_PRESENT;
-+ }
-+
-+ if(VMA_DEBUG_ALWAYS_DEDICATED_MEMORY &&
-+ (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0)
-+ {
-+ inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
-+ }
-+
-+ // Non-auto USAGE values imply HOST_ACCESS flags.
-+ // And so does VMA_MEMORY_USAGE_UNKNOWN because it is used with custom pools.
-+ // Which specific flag is used doesn't matter. They change things only when used with VMA_MEMORY_USAGE_AUTO*.
-+ // Otherwise they just protect from assert on mapping.
-+ if(inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO &&
-+ inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE &&
-+ inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_HOST)
-+ {
-+ if((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) == 0)
-+ {
-+ inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT;
-+ }
-+ }
-+
-+ return VK_SUCCESS;
-+}
-+
-+VkResult VmaAllocator_T::AllocateMemory(
-+ const VkMemoryRequirements& vkMemReq,
-+ bool requiresDedicatedAllocation,
-+ bool prefersDedicatedAllocation,
-+ VkBuffer dedicatedBuffer,
-+ VkImage dedicatedImage,
-+ VkFlags dedicatedBufferImageUsage,
-+ const VmaAllocationCreateInfo& createInfo,
-+ VmaSuballocationType suballocType,
-+ size_t allocationCount,
-+ VmaAllocation* pAllocations)
-+{
-+ memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount);
-+
-+ VMA_ASSERT(VmaIsPow2(vkMemReq.alignment));
-+
-+ if(vkMemReq.size == 0)
-+ {
-+ return VK_ERROR_INITIALIZATION_FAILED;
-+ }
-+
-+ VmaAllocationCreateInfo createInfoFinal = createInfo;
-+ VkResult res = CalcAllocationParams(createInfoFinal, requiresDedicatedAllocation, prefersDedicatedAllocation);
-+ if(res != VK_SUCCESS)
-+ return res;
-+
-+ if(createInfoFinal.pool != VK_NULL_HANDLE)
-+ {
-+ VmaBlockVector& blockVector = createInfoFinal.pool->m_BlockVector;
-+ return AllocateMemoryOfType(
-+ createInfoFinal.pool,
-+ vkMemReq.size,
-+ vkMemReq.alignment,
-+ prefersDedicatedAllocation,
-+ dedicatedBuffer,
-+ dedicatedImage,
-+ dedicatedBufferImageUsage,
-+ createInfoFinal,
-+ blockVector.GetMemoryTypeIndex(),
-+ suballocType,
-+ createInfoFinal.pool->m_DedicatedAllocations,
-+ blockVector,
-+ allocationCount,
-+ pAllocations);
-+ }
-+ else
-+ {
-+ // Bit mask of memory Vulkan types acceptable for this allocation.
-+ uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
-+ uint32_t memTypeIndex = UINT32_MAX;
-+ res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex);
-+ // Can't find any single memory type matching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
-+ if(res != VK_SUCCESS)
-+ return res;
-+ do
-+ {
-+ VmaBlockVector* blockVector = m_pBlockVectors[memTypeIndex];
-+ VMA_ASSERT(blockVector && "Trying to use unsupported memory type!");
-+ res = AllocateMemoryOfType(
-+ VK_NULL_HANDLE,
-+ vkMemReq.size,
-+ vkMemReq.alignment,
-+ requiresDedicatedAllocation || prefersDedicatedAllocation,
-+ dedicatedBuffer,
-+ dedicatedImage,
-+ dedicatedBufferImageUsage,
-+ createInfoFinal,
-+ memTypeIndex,
-+ suballocType,
-+ m_DedicatedAllocations[memTypeIndex],
-+ *blockVector,
-+ allocationCount,
-+ pAllocations);
-+ // Allocation succeeded
-+ if(res == VK_SUCCESS)
-+ return VK_SUCCESS;
-+
-+ // Remove old memTypeIndex from list of possibilities.
-+ memoryTypeBits &= ~(1u << memTypeIndex);
-+ // Find alternative memTypeIndex.
-+ res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex);
-+ } while(res == VK_SUCCESS);
-+
-+ // No other matching memory type index could be found.
-+ // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
-+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-+ }
-+}
-+
-+void VmaAllocator_T::FreeMemory(
-+ size_t allocationCount,
-+ const VmaAllocation* pAllocations)
-+{
-+ VMA_ASSERT(pAllocations);
-+
-+ for(size_t allocIndex = allocationCount; allocIndex--; )
-+ {
-+ VmaAllocation allocation = pAllocations[allocIndex];
-+
-+ if(allocation != VK_NULL_HANDLE)
-+ {
-+ if(VMA_DEBUG_INITIALIZE_ALLOCATIONS)
-+ {
-+ FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED);
-+ }
-+
-+ allocation->FreeName(this);
-+
-+ switch(allocation->GetType())
-+ {
-+ case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
-+ {
-+ VmaBlockVector* pBlockVector = VMA_NULL;
-+ VmaPool hPool = allocation->GetParentPool();
-+ if(hPool != VK_NULL_HANDLE)
-+ {
-+ pBlockVector = &hPool->m_BlockVector;
-+ }
-+ else
-+ {
-+ const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
-+ pBlockVector = m_pBlockVectors[memTypeIndex];
-+ VMA_ASSERT(pBlockVector && "Trying to free memory of unsupported type!");
-+ }
-+ pBlockVector->Free(allocation);
-+ }
-+ break;
-+ case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
-+ FreeDedicatedMemory(allocation);
-+ break;
-+ default:
-+ VMA_ASSERT(0);
-+ }
-+ }
-+ }
-+}
-+
-+void VmaAllocator_T::CalculateStatistics(VmaTotalStatistics* pStats)
-+{
-+ // Initialize.
-+ VmaClearDetailedStatistics(pStats->total);
-+ for(uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i)
-+ VmaClearDetailedStatistics(pStats->memoryType[i]);
-+ for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i)
-+ VmaClearDetailedStatistics(pStats->memoryHeap[i]);
-+
-+ // Process default pools.
-+ for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-+ {
-+ VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
-+ if (pBlockVector != VMA_NULL)
-+ pBlockVector->AddDetailedStatistics(pStats->memoryType[memTypeIndex]);
-+ }
-+
-+ // Process custom pools.
-+ {
-+ VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
-+ for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
-+ {
-+ VmaBlockVector& blockVector = pool->m_BlockVector;
-+ const uint32_t memTypeIndex = blockVector.GetMemoryTypeIndex();
-+ blockVector.AddDetailedStatistics(pStats->memoryType[memTypeIndex]);
-+ pool->m_DedicatedAllocations.AddDetailedStatistics(pStats->memoryType[memTypeIndex]);
-+ }
-+ }
-+
-+ // Process dedicated allocations.
-+ for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-+ {
-+ m_DedicatedAllocations[memTypeIndex].AddDetailedStatistics(pStats->memoryType[memTypeIndex]);
-+ }
-+
-+ // Sum from memory types to memory heaps.
-+ for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-+ {
-+ const uint32_t memHeapIndex = m_MemProps.memoryTypes[memTypeIndex].heapIndex;
-+ VmaAddDetailedStatistics(pStats->memoryHeap[memHeapIndex], pStats->memoryType[memTypeIndex]);
-+ }
-+
-+ // Sum from memory heaps to total.
-+ for(uint32_t memHeapIndex = 0; memHeapIndex < GetMemoryHeapCount(); ++memHeapIndex)
-+ VmaAddDetailedStatistics(pStats->total, pStats->memoryHeap[memHeapIndex]);
-+
-+ VMA_ASSERT(pStats->total.statistics.allocationCount == 0 ||
-+ pStats->total.allocationSizeMax >= pStats->total.allocationSizeMin);
-+ VMA_ASSERT(pStats->total.unusedRangeCount == 0 ||
-+ pStats->total.unusedRangeSizeMax >= pStats->total.unusedRangeSizeMin);
-+}
-+
-+void VmaAllocator_T::GetHeapBudgets(VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount)
-+{
-+#if VMA_MEMORY_BUDGET
-+ if(m_UseExtMemoryBudget)
-+ {
-+ if(m_Budget.m_OperationsSinceBudgetFetch < 30)
-+ {
-+ VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex);
-+ for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets)
-+ {
-+ const uint32_t heapIndex = firstHeap + i;
-+
-+ outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex];
-+ outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex];
-+ outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex];
-+ outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
-+
-+ if(m_Budget.m_VulkanUsage[heapIndex] + outBudgets->statistics.blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex])
-+ {
-+ outBudgets->usage = m_Budget.m_VulkanUsage[heapIndex] +
-+ outBudgets->statistics.blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
-+ }
-+ else
-+ {
-+ outBudgets->usage = 0;
-+ }
-+
-+ // Have to take MIN with heap size because explicit HeapSizeLimit is included in it.
-+ outBudgets->budget = VMA_MIN(
-+ m_Budget.m_VulkanBudget[heapIndex], m_MemProps.memoryHeaps[heapIndex].size);
-+ }
-+ }
-+ else
-+ {
-+ UpdateVulkanBudget(); // Outside of mutex lock
-+ GetHeapBudgets(outBudgets, firstHeap, heapCount); // Recursion
-+ }
-+ }
-+ else
-+#endif
-+ {
-+ for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets)
-+ {
-+ const uint32_t heapIndex = firstHeap + i;
-+
-+ outBudgets->statistics.blockCount = m_Budget.m_BlockCount[heapIndex];
-+ outBudgets->statistics.allocationCount = m_Budget.m_AllocationCount[heapIndex];
-+ outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex];
-+ outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex];
-+
-+ outBudgets->usage = outBudgets->statistics.blockBytes;
-+ outBudgets->budget = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
-+ }
-+ }
-+}
-+
-+void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo)
-+{
-+ pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex();
-+ pAllocationInfo->deviceMemory = hAllocation->GetMemory();
-+ pAllocationInfo->offset = hAllocation->GetOffset();
-+ pAllocationInfo->size = hAllocation->GetSize();
-+ pAllocationInfo->pMappedData = hAllocation->GetMappedData();
-+ pAllocationInfo->pUserData = hAllocation->GetUserData();
-+ pAllocationInfo->pName = hAllocation->GetName();
-+}
-+
-+VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool)
-+{
-+ VMA_DEBUG_LOG_FORMAT(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags);
-+
-+ VmaPoolCreateInfo newCreateInfo = *pCreateInfo;
-+
-+ // Protection against uninitialized new structure member. If garbage data are left there, this pointer dereference would crash.
-+ if(pCreateInfo->pMemoryAllocateNext)
-+ {
-+ VMA_ASSERT(((const VkBaseInStructure*)pCreateInfo->pMemoryAllocateNext)->sType != 0);
-+ }
-+
-+ if(newCreateInfo.maxBlockCount == 0)
-+ {
-+ newCreateInfo.maxBlockCount = SIZE_MAX;
-+ }
-+ if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount)
-+ {
-+ return VK_ERROR_INITIALIZATION_FAILED;
-+ }
-+ // Memory type index out of range or forbidden.
-+ if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() ||
-+ ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0)
-+ {
-+ return VK_ERROR_FEATURE_NOT_PRESENT;
-+ }
-+ if(newCreateInfo.minAllocationAlignment > 0)
-+ {
-+ VMA_ASSERT(VmaIsPow2(newCreateInfo.minAllocationAlignment));
-+ }
-+
-+ const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
-+
-+ *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
-+
-+ VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
-+ if(res != VK_SUCCESS)
-+ {
-+ vma_delete(this, *pPool);
-+ *pPool = VMA_NULL;
-+ return res;
-+ }
-+
-+ // Add to m_Pools.
-+ {
-+ VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
-+ (*pPool)->SetId(m_NextPoolId++);
-+ m_Pools.PushBack(*pPool);
-+ }
-+
-+ return VK_SUCCESS;
-+}
-+
-+void VmaAllocator_T::DestroyPool(VmaPool pool)
-+{
-+ // Remove from m_Pools.
-+ {
-+ VmaMutexLockWrite lock(m_PoolsMutex, m_UseMutex);
-+ m_Pools.Remove(pool);
-+ }
-+
-+ vma_delete(this, pool);
-+}
-+
-+void VmaAllocator_T::GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats)
-+{
-+ VmaClearStatistics(*pPoolStats);
-+ pool->m_BlockVector.AddStatistics(*pPoolStats);
-+ pool->m_DedicatedAllocations.AddStatistics(*pPoolStats);
-+}
-+
-+void VmaAllocator_T::CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats)
-+{
-+ VmaClearDetailedStatistics(*pPoolStats);
-+ pool->m_BlockVector.AddDetailedStatistics(*pPoolStats);
-+ pool->m_DedicatedAllocations.AddDetailedStatistics(*pPoolStats);
-+}
-+
-+void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
-+{
-+ m_CurrentFrameIndex.store(frameIndex);
-+
-+#if VMA_MEMORY_BUDGET
-+ if(m_UseExtMemoryBudget)
-+ {
-+ UpdateVulkanBudget();
-+ }
-+#endif // #if VMA_MEMORY_BUDGET
-+}
-+
-+VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
-+{
-+ return hPool->m_BlockVector.CheckCorruption();
-+}
-+
-+VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
-+{
-+ VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT;
-+
-+ // Process default pools.
-+ for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-+ {
-+ VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex];
-+ if(pBlockVector != VMA_NULL)
-+ {
-+ VkResult localRes = pBlockVector->CheckCorruption();
-+ switch(localRes)
-+ {
-+ case VK_ERROR_FEATURE_NOT_PRESENT:
-+ break;
-+ case VK_SUCCESS:
-+ finalRes = VK_SUCCESS;
-+ break;
-+ default:
-+ return localRes;
-+ }
-+ }
-+ }
-+
-+ // Process custom pools.
-+ {
-+ VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
-+ for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
-+ {
-+ if(((1u << pool->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
-+ {
-+ VkResult localRes = pool->m_BlockVector.CheckCorruption();
-+ switch(localRes)
-+ {
-+ case VK_ERROR_FEATURE_NOT_PRESENT:
-+ break;
-+ case VK_SUCCESS:
-+ finalRes = VK_SUCCESS;
-+ break;
-+ default:
-+ return localRes;
-+ }
-+ }
-+ }
-+ }
-+
-+ return finalRes;
-+}
-+
-+VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory)
-+{
-+ AtomicTransactionalIncrement<VMA_ATOMIC_UINT32> deviceMemoryCountIncrement;
-+ const uint64_t prevDeviceMemoryCount = deviceMemoryCountIncrement.Increment(&m_DeviceMemoryCount);
-+#if VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT
-+ if(prevDeviceMemoryCount >= m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount)
-+ {
-+ return VK_ERROR_TOO_MANY_OBJECTS;
-+ }
-+#endif
-+
-+ const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex);
-+
-+ // HeapSizeLimit is in effect for this heap.
-+ if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0)
-+ {
-+ const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size;
-+ VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex];
-+ for(;;)
-+ {
-+ const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize;
-+ if(blockBytesAfterAllocation > heapSize)
-+ {
-+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-+ }
-+ if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation))
-+ {
-+ break;
-+ }
-+ }
-+ }
-+ else
-+ {
-+ m_Budget.m_BlockBytes[heapIndex] += pAllocateInfo->allocationSize;
-+ }
-+ ++m_Budget.m_BlockCount[heapIndex];
-+
-+ // VULKAN CALL vkAllocateMemory.
-+ VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory);
-+
-+ if(res == VK_SUCCESS)
-+ {
-+#if VMA_MEMORY_BUDGET
-+ ++m_Budget.m_OperationsSinceBudgetFetch;
-+#endif
-+
-+ // Informative callback.
-+ if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL)
-+ {
-+ (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData);
-+ }
-+
-+ deviceMemoryCountIncrement.Commit();
-+ }
-+ else
-+ {
-+ --m_Budget.m_BlockCount[heapIndex];
-+ m_Budget.m_BlockBytes[heapIndex] -= pAllocateInfo->allocationSize;
-+ }
-+
-+ return res;
-+}
-+
-+void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory)
-+{
-+ // Informative callback.
-+ if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL)
-+ {
-+ (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData);
-+ }
-+
-+ // VULKAN CALL vkFreeMemory.
-+ (*m_VulkanFunctions.vkFreeMemory)(m_hDevice, hMemory, GetAllocationCallbacks());
-+
-+ const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memoryType);
-+ --m_Budget.m_BlockCount[heapIndex];
-+ m_Budget.m_BlockBytes[heapIndex] -= size;
-+
-+ --m_DeviceMemoryCount;
-+}
-+
-+VkResult VmaAllocator_T::BindVulkanBuffer(
-+ VkDeviceMemory memory,
-+ VkDeviceSize memoryOffset,
-+ VkBuffer buffer,
-+ const void* pNext)
-+{
-+ if(pNext != VMA_NULL)
-+ {
-+#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
-+ if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
-+ m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL)
-+ {
-+ VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR };
-+ bindBufferMemoryInfo.pNext = pNext;
-+ bindBufferMemoryInfo.buffer = buffer;
-+ bindBufferMemoryInfo.memory = memory;
-+ bindBufferMemoryInfo.memoryOffset = memoryOffset;
-+ return (*m_VulkanFunctions.vkBindBufferMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
-+ }
-+ else
-+#endif // #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
-+ {
-+ return VK_ERROR_EXTENSION_NOT_PRESENT;
-+ }
-+ }
-+ else
-+ {
-+ return (*m_VulkanFunctions.vkBindBufferMemory)(m_hDevice, buffer, memory, memoryOffset);
-+ }
-+}
-+
-+VkResult VmaAllocator_T::BindVulkanImage(
-+ VkDeviceMemory memory,
-+ VkDeviceSize memoryOffset,
-+ VkImage image,
-+ const void* pNext)
-+{
-+ if(pNext != VMA_NULL)
-+ {
-+#if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2
-+ if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) &&
-+ m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL)
-+ {
-+ VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR };
-+ bindBufferMemoryInfo.pNext = pNext;
-+ bindBufferMemoryInfo.image = image;
-+ bindBufferMemoryInfo.memory = memory;
-+ bindBufferMemoryInfo.memoryOffset = memoryOffset;
-+ return (*m_VulkanFunctions.vkBindImageMemory2KHR)(m_hDevice, 1, &bindBufferMemoryInfo);
-+ }
-+ else
-+#endif // #if VMA_BIND_MEMORY2
-+ {
-+ return VK_ERROR_EXTENSION_NOT_PRESENT;
-+ }
-+ }
-+ else
-+ {
-+ return (*m_VulkanFunctions.vkBindImageMemory)(m_hDevice, image, memory, memoryOffset);
-+ }
-+}
-+
-+VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData)
-+{
-+ switch(hAllocation->GetType())
-+ {
-+ case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
-+ {
-+ VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
-+ char *pBytes = VMA_NULL;
-+ VkResult res = pBlock->Map(this, 1, (void**)&pBytes);
-+ if(res == VK_SUCCESS)
-+ {
-+ *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset();
-+ hAllocation->BlockAllocMap();
-+ }
-+ return res;
-+ }
-+ case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
-+ return hAllocation->DedicatedAllocMap(this, ppData);
-+ default:
-+ VMA_ASSERT(0);
-+ return VK_ERROR_MEMORY_MAP_FAILED;
-+ }
-+}
-+
-+void VmaAllocator_T::Unmap(VmaAllocation hAllocation)
-+{
-+ switch(hAllocation->GetType())
-+ {
-+ case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
-+ {
-+ VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
-+ hAllocation->BlockAllocUnmap();
-+ pBlock->Unmap(this, 1);
-+ }
-+ break;
-+ case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
-+ hAllocation->DedicatedAllocUnmap(this);
-+ break;
-+ default:
-+ VMA_ASSERT(0);
-+ }
-+}
-+
-+VkResult VmaAllocator_T::BindBufferMemory(
-+ VmaAllocation hAllocation,
-+ VkDeviceSize allocationLocalOffset,
-+ VkBuffer hBuffer,
-+ const void* pNext)
-+{
-+ VkResult res = VK_ERROR_UNKNOWN;
-+ switch(hAllocation->GetType())
-+ {
-+ case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
-+ res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext);
-+ break;
-+ case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
-+ {
-+ VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock();
-+ VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block.");
-+ res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext);
-+ break;
-+ }
-+ default:
-+ VMA_ASSERT(0);
-+ }
-+ return res;
-+}
-+
-+VkResult VmaAllocator_T::BindImageMemory(
-+ VmaAllocation hAllocation,
-+ VkDeviceSize allocationLocalOffset,
-+ VkImage hImage,
-+ const void* pNext)
-+{
-+ VkResult res = VK_ERROR_UNKNOWN;
-+ switch(hAllocation->GetType())
-+ {
-+ case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
-+ res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext);
-+ break;
-+ case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
-+ {
-+ VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock();
-+ VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block.");
-+ res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext);
-+ break;
-+ }
-+ default:
-+ VMA_ASSERT(0);
-+ }
-+ return res;
-+}
-+
-+VkResult VmaAllocator_T::FlushOrInvalidateAllocation(
-+ VmaAllocation hAllocation,
-+ VkDeviceSize offset, VkDeviceSize size,
-+ VMA_CACHE_OPERATION op)
-+{
-+ VkResult res = VK_SUCCESS;
-+
-+ VkMappedMemoryRange memRange = {};
-+ if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange))
-+ {
-+ switch(op)
-+ {
-+ case VMA_CACHE_FLUSH:
-+ res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange);
-+ break;
-+ case VMA_CACHE_INVALIDATE:
-+ res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, 1, &memRange);
-+ break;
-+ default:
-+ VMA_ASSERT(0);
-+ }
-+ }
-+ // else: Just ignore this call.
-+ return res;
-+}
-+
-+VkResult VmaAllocator_T::FlushOrInvalidateAllocations(
-+ uint32_t allocationCount,
-+ const VmaAllocation* allocations,
-+ const VkDeviceSize* offsets, const VkDeviceSize* sizes,
-+ VMA_CACHE_OPERATION op)
-+{
-+ typedef VmaStlAllocator<VkMappedMemoryRange> RangeAllocator;
-+ typedef VmaSmallVector<VkMappedMemoryRange, RangeAllocator, 16> RangeVector;
-+ RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks()));
-+
-+ for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex)
-+ {
-+ const VmaAllocation alloc = allocations[allocIndex];
-+ const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0;
-+ const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE;
-+ VkMappedMemoryRange newRange;
-+ if(GetFlushOrInvalidateRange(alloc, offset, size, newRange))
-+ {
-+ ranges.push_back(newRange);
-+ }
-+ }
-+
-+ VkResult res = VK_SUCCESS;
-+ if(!ranges.empty())
-+ {
-+ switch(op)
-+ {
-+ case VMA_CACHE_FLUSH:
-+ res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
-+ break;
-+ case VMA_CACHE_INVALIDATE:
-+ res = (*GetVulkanFunctions().vkInvalidateMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data());
-+ break;
-+ default:
-+ VMA_ASSERT(0);
-+ }
-+ }
-+ // else: Just ignore this call.
-+ return res;
-+}
-+
-+void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
-+{
-+ VMA_ASSERT(allocation && allocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED);
-+
-+ const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
-+ VmaPool parentPool = allocation->GetParentPool();
-+ if(parentPool == VK_NULL_HANDLE)
-+ {
-+ // Default pool
-+ m_DedicatedAllocations[memTypeIndex].Unregister(allocation);
-+ }
-+ else
-+ {
-+ // Custom pool
-+ parentPool->m_DedicatedAllocations.Unregister(allocation);
-+ }
-+
-+ VkDeviceMemory hMemory = allocation->GetMemory();
-+
-+ /*
-+ There is no need to call this, because Vulkan spec allows to skip vkUnmapMemory
-+ before vkFreeMemory.
-+
-+ if(allocation->GetMappedData() != VMA_NULL)
-+ {
-+ (*m_VulkanFunctions.vkUnmapMemory)(m_hDevice, hMemory);
-+ }
-+ */
-+
-+ FreeVulkanMemory(memTypeIndex, allocation->GetSize(), hMemory);
-+
-+ m_Budget.RemoveAllocation(MemoryTypeIndexToHeapIndex(allocation->GetMemoryTypeIndex()), allocation->GetSize());
-+ m_AllocationObjectAllocator.Free(allocation);
-+
-+ VMA_DEBUG_LOG_FORMAT(" Freed DedicatedMemory MemoryTypeIndex=%u", memTypeIndex);
-+}
-+
-+uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const
-+{
-+ VkBufferCreateInfo dummyBufCreateInfo;
-+ VmaFillGpuDefragmentationBufferCreateInfo(dummyBufCreateInfo);
-+
-+ uint32_t memoryTypeBits = 0;
-+
-+ // Create buffer.
-+ VkBuffer buf = VK_NULL_HANDLE;
-+ VkResult res = (*GetVulkanFunctions().vkCreateBuffer)(
-+ m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf);
-+ if(res == VK_SUCCESS)
-+ {
-+ // Query for supported memory types.
-+ VkMemoryRequirements memReq;
-+ (*GetVulkanFunctions().vkGetBufferMemoryRequirements)(m_hDevice, buf, &memReq);
-+ memoryTypeBits = memReq.memoryTypeBits;
-+
-+ // Destroy buffer.
-+ (*GetVulkanFunctions().vkDestroyBuffer)(m_hDevice, buf, GetAllocationCallbacks());
-+ }
-+
-+ return memoryTypeBits;
-+}
-+
-+uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const
-+{
-+ // Make sure memory information is already fetched.
-+ VMA_ASSERT(GetMemoryTypeCount() > 0);
-+
-+ uint32_t memoryTypeBits = UINT32_MAX;
-+
-+ if(!m_UseAmdDeviceCoherentMemory)
-+ {
-+ // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD.
-+ for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-+ {
-+ if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0)
-+ {
-+ memoryTypeBits &= ~(1u << memTypeIndex);
-+ }
-+ }
-+ }
-+
-+ return memoryTypeBits;
-+}
-+
-+bool VmaAllocator_T::GetFlushOrInvalidateRange(
-+ VmaAllocation allocation,
-+ VkDeviceSize offset, VkDeviceSize size,
-+ VkMappedMemoryRange& outRange) const
-+{
-+ const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
-+ if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex))
-+ {
-+ const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize;
-+ const VkDeviceSize allocationSize = allocation->GetSize();
-+ VMA_ASSERT(offset <= allocationSize);
-+
-+ outRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
-+ outRange.pNext = VMA_NULL;
-+ outRange.memory = allocation->GetMemory();
-+
-+ switch(allocation->GetType())
-+ {
-+ case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED:
-+ outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
-+ if(size == VK_WHOLE_SIZE)
-+ {
-+ outRange.size = allocationSize - outRange.offset;
-+ }
-+ else
-+ {
-+ VMA_ASSERT(offset + size <= allocationSize);
-+ outRange.size = VMA_MIN(
-+ VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize),
-+ allocationSize - outRange.offset);
-+ }
-+ break;
-+ case VmaAllocation_T::ALLOCATION_TYPE_BLOCK:
-+ {
-+ // 1. Still within this allocation.
-+ outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize);
-+ if(size == VK_WHOLE_SIZE)
-+ {
-+ size = allocationSize - offset;
-+ }
-+ else
-+ {
-+ VMA_ASSERT(offset + size <= allocationSize);
-+ }
-+ outRange.size = VmaAlignUp(size + (offset - outRange.offset), nonCoherentAtomSize);
-+
-+ // 2. Adjust to whole block.
-+ const VkDeviceSize allocationOffset = allocation->GetOffset();
-+ VMA_ASSERT(allocationOffset % nonCoherentAtomSize == 0);
-+ const VkDeviceSize blockSize = allocation->GetBlock()->m_pMetadata->GetSize();
-+ outRange.offset += allocationOffset;
-+ outRange.size = VMA_MIN(outRange.size, blockSize - outRange.offset);
-+
-+ break;
-+ }
-+ default:
-+ VMA_ASSERT(0);
-+ }
-+ return true;
-+ }
-+ return false;
-+}
-+
-+#if VMA_MEMORY_BUDGET
-+void VmaAllocator_T::UpdateVulkanBudget()
-+{
-+ VMA_ASSERT(m_UseExtMemoryBudget);
-+
-+ VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR };
-+
-+ VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT };
-+ VmaPnextChainPushFront(&memProps, &budgetProps);
-+
-+ GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps);
-+
-+ {
-+ VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex);
-+
-+ for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex)
-+ {
-+ m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex];
-+ m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex];
-+ m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load();
-+
-+ // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size.
-+ if(m_Budget.m_VulkanBudget[heapIndex] == 0)
-+ {
-+ m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics.
-+ }
-+ else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size)
-+ {
-+ m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size;
-+ }
-+ if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0)
-+ {
-+ m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex];
-+ }
-+ }
-+ m_Budget.m_OperationsSinceBudgetFetch = 0;
-+ }
-+}
-+#endif // VMA_MEMORY_BUDGET
-+
-+void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern)
-+{
-+ if(VMA_DEBUG_INITIALIZE_ALLOCATIONS &&
-+ hAllocation->IsMappingAllowed() &&
-+ (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0)
-+ {
-+ void* pData = VMA_NULL;
-+ VkResult res = Map(hAllocation, &pData);
-+ if(res == VK_SUCCESS)
-+ {
-+ memset(pData, (int)pattern, (size_t)hAllocation->GetSize());
-+ FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH);
-+ Unmap(hAllocation);
-+ }
-+ else
-+ {
-+ VMA_ASSERT(0 && "VMA_DEBUG_INITIALIZE_ALLOCATIONS is enabled, but couldn't map memory to fill allocation.");
-+ }
-+ }
-+}
-+
-+uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits()
-+{
-+ uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load();
-+ if(memoryTypeBits == UINT32_MAX)
-+ {
-+ memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits();
-+ m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits);
-+ }
-+ return memoryTypeBits;
-+}
-+
-+#if VMA_STATS_STRING_ENABLED
-+void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
-+{
-+ json.WriteString("DefaultPools");
-+ json.BeginObject();
-+ {
-+ for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-+ {
-+ VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex];
-+ VmaDedicatedAllocationList& dedicatedAllocList = m_DedicatedAllocations[memTypeIndex];
-+ if (pBlockVector != VMA_NULL)
-+ {
-+ json.BeginString("Type ");
-+ json.ContinueString(memTypeIndex);
-+ json.EndString();
-+ json.BeginObject();
-+ {
-+ json.WriteString("PreferredBlockSize");
-+ json.WriteNumber(pBlockVector->GetPreferredBlockSize());
-+
-+ json.WriteString("Blocks");
-+ pBlockVector->PrintDetailedMap(json);
-+
-+ json.WriteString("DedicatedAllocations");
-+ dedicatedAllocList.BuildStatsString(json);
-+ }
-+ json.EndObject();
-+ }
-+ }
-+ }
-+ json.EndObject();
-+
-+ json.WriteString("CustomPools");
-+ json.BeginObject();
-+ {
-+ VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
-+ if (!m_Pools.IsEmpty())
-+ {
-+ for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
-+ {
-+ bool displayType = true;
-+ size_t index = 0;
-+ for (VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
-+ {
-+ VmaBlockVector& blockVector = pool->m_BlockVector;
-+ if (blockVector.GetMemoryTypeIndex() == memTypeIndex)
-+ {
-+ if (displayType)
-+ {
-+ json.BeginString("Type ");
-+ json.ContinueString(memTypeIndex);
-+ json.EndString();
-+ json.BeginArray();
-+ displayType = false;
-+ }
-+
-+ json.BeginObject();
-+ {
-+ json.WriteString("Name");
-+ json.BeginString();
-+ json.ContinueString((uint64_t)index++);
-+ if (pool->GetName())
-+ {
-+ json.ContinueString(" - ");
-+ json.ContinueString(pool->GetName());
-+ }
-+ json.EndString();
-+
-+ json.WriteString("PreferredBlockSize");
-+ json.WriteNumber(blockVector.GetPreferredBlockSize());
-+
-+ json.WriteString("Blocks");
-+ blockVector.PrintDetailedMap(json);
-+
-+ json.WriteString("DedicatedAllocations");
-+ pool->m_DedicatedAllocations.BuildStatsString(json);
-+ }
-+ json.EndObject();
-+ }
-+ }
-+
-+ if (!displayType)
-+ json.EndArray();
-+ }
-+ }
-+ }
-+ json.EndObject();
-+}
-+#endif // VMA_STATS_STRING_ENABLED
-+#endif // _VMA_ALLOCATOR_T_FUNCTIONS
-+
-+
-+#ifndef _VMA_PUBLIC_INTERFACE
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator(
-+ const VmaAllocatorCreateInfo* pCreateInfo,
-+ VmaAllocator* pAllocator)
-+{
-+ VMA_ASSERT(pCreateInfo && pAllocator);
-+ VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 ||
-+ (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 3));
-+ VMA_DEBUG_LOG("vmaCreateAllocator");
-+ *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo);
-+ VkResult result = (*pAllocator)->Init(pCreateInfo);
-+ if(result < 0)
-+ {
-+ vma_delete(pCreateInfo->pAllocationCallbacks, *pAllocator);
-+ *pAllocator = VK_NULL_HANDLE;
-+ }
-+ return result;
-+}
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator(
-+ VmaAllocator allocator)
-+{
-+ if(allocator != VK_NULL_HANDLE)
-+ {
-+ VMA_DEBUG_LOG("vmaDestroyAllocator");
-+ VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks; // Have to copy the callbacks when destroying.
-+ vma_delete(&allocationCallbacks, allocator);
-+ }
-+}
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo)
-+{
-+ VMA_ASSERT(allocator && pAllocatorInfo);
-+ pAllocatorInfo->instance = allocator->m_hInstance;
-+ pAllocatorInfo->physicalDevice = allocator->GetPhysicalDevice();
-+ pAllocatorInfo->device = allocator->m_hDevice;
-+}
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties(
-+ VmaAllocator allocator,
-+ const VkPhysicalDeviceProperties **ppPhysicalDeviceProperties)
-+{
-+ VMA_ASSERT(allocator && ppPhysicalDeviceProperties);
-+ *ppPhysicalDeviceProperties = &allocator->m_PhysicalDeviceProperties;
-+}
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties(
-+ VmaAllocator allocator,
-+ const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties)
-+{
-+ VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties);
-+ *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps;
-+}
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties(
-+ VmaAllocator allocator,
-+ uint32_t memoryTypeIndex,
-+ VkMemoryPropertyFlags* pFlags)
-+{
-+ VMA_ASSERT(allocator && pFlags);
-+ VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount());
-+ *pFlags = allocator->m_MemProps.memoryTypes[memoryTypeIndex].propertyFlags;
-+}
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex(
-+ VmaAllocator allocator,
-+ uint32_t frameIndex)
-+{
-+ VMA_ASSERT(allocator);
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ allocator->SetCurrentFrameIndex(frameIndex);
-+}
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics(
-+ VmaAllocator allocator,
-+ VmaTotalStatistics* pStats)
-+{
-+ VMA_ASSERT(allocator && pStats);
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+ allocator->CalculateStatistics(pStats);
-+}
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets(
-+ VmaAllocator allocator,
-+ VmaBudget* pBudgets)
-+{
-+ VMA_ASSERT(allocator && pBudgets);
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+ allocator->GetHeapBudgets(pBudgets, 0, allocator->GetMemoryHeapCount());
-+}
-+
-+#if VMA_STATS_STRING_ENABLED
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString(
-+ VmaAllocator allocator,
-+ char** ppStatsString,
-+ VkBool32 detailedMap)
-+{
-+ VMA_ASSERT(allocator && ppStatsString);
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ VmaStringBuilder sb(allocator->GetAllocationCallbacks());
-+ {
-+ VmaBudget budgets[VK_MAX_MEMORY_HEAPS];
-+ allocator->GetHeapBudgets(budgets, 0, allocator->GetMemoryHeapCount());
-+
-+ VmaTotalStatistics stats;
-+ allocator->CalculateStatistics(&stats);
-+
-+ VmaJsonWriter json(allocator->GetAllocationCallbacks(), sb);
-+ json.BeginObject();
-+ {
-+ json.WriteString("General");
-+ json.BeginObject();
-+ {
-+ const VkPhysicalDeviceProperties& deviceProperties = allocator->m_PhysicalDeviceProperties;
-+ const VkPhysicalDeviceMemoryProperties& memoryProperties = allocator->m_MemProps;
-+
-+ json.WriteString("API");
-+ json.WriteString("Vulkan");
-+
-+ json.WriteString("apiVersion");
-+ json.BeginString();
-+ json.ContinueString(VK_VERSION_MAJOR(deviceProperties.apiVersion));
-+ json.ContinueString(".");
-+ json.ContinueString(VK_VERSION_MINOR(deviceProperties.apiVersion));
-+ json.ContinueString(".");
-+ json.ContinueString(VK_VERSION_PATCH(deviceProperties.apiVersion));
-+ json.EndString();
-+
-+ json.WriteString("GPU");
-+ json.WriteString(deviceProperties.deviceName);
-+ json.WriteString("deviceType");
-+ json.WriteNumber(static_cast<uint32_t>(deviceProperties.deviceType));
-+
-+ json.WriteString("maxMemoryAllocationCount");
-+ json.WriteNumber(deviceProperties.limits.maxMemoryAllocationCount);
-+ json.WriteString("bufferImageGranularity");
-+ json.WriteNumber(deviceProperties.limits.bufferImageGranularity);
-+ json.WriteString("nonCoherentAtomSize");
-+ json.WriteNumber(deviceProperties.limits.nonCoherentAtomSize);
-+
-+ json.WriteString("memoryHeapCount");
-+ json.WriteNumber(memoryProperties.memoryHeapCount);
-+ json.WriteString("memoryTypeCount");
-+ json.WriteNumber(memoryProperties.memoryTypeCount);
-+ }
-+ json.EndObject();
-+ }
-+ {
-+ json.WriteString("Total");
-+ VmaPrintDetailedStatistics(json, stats.total);
-+ }
-+ {
-+ json.WriteString("MemoryInfo");
-+ json.BeginObject();
-+ {
-+ for (uint32_t heapIndex = 0; heapIndex < allocator->GetMemoryHeapCount(); ++heapIndex)
-+ {
-+ json.BeginString("Heap ");
-+ json.ContinueString(heapIndex);
-+ json.EndString();
-+ json.BeginObject();
-+ {
-+ const VkMemoryHeap& heapInfo = allocator->m_MemProps.memoryHeaps[heapIndex];
-+ json.WriteString("Flags");
-+ json.BeginArray(true);
-+ {
-+ if (heapInfo.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT)
-+ json.WriteString("DEVICE_LOCAL");
-+ #if VMA_VULKAN_VERSION >= 1001000
-+ if (heapInfo.flags & VK_MEMORY_HEAP_MULTI_INSTANCE_BIT)
-+ json.WriteString("MULTI_INSTANCE");
-+ #endif
-+
-+ VkMemoryHeapFlags flags = heapInfo.flags &
-+ ~(VK_MEMORY_HEAP_DEVICE_LOCAL_BIT
-+ #if VMA_VULKAN_VERSION >= 1001000
-+ | VK_MEMORY_HEAP_MULTI_INSTANCE_BIT
-+ #endif
-+ );
-+ if (flags != 0)
-+ json.WriteNumber(flags);
-+ }
-+ json.EndArray();
-+
-+ json.WriteString("Size");
-+ json.WriteNumber(heapInfo.size);
-+
-+ json.WriteString("Budget");
-+ json.BeginObject();
-+ {
-+ json.WriteString("BudgetBytes");
-+ json.WriteNumber(budgets[heapIndex].budget);
-+ json.WriteString("UsageBytes");
-+ json.WriteNumber(budgets[heapIndex].usage);
-+ }
-+ json.EndObject();
-+
-+ json.WriteString("Stats");
-+ VmaPrintDetailedStatistics(json, stats.memoryHeap[heapIndex]);
-+
-+ json.WriteString("MemoryPools");
-+ json.BeginObject();
-+ {
-+ for (uint32_t typeIndex = 0; typeIndex < allocator->GetMemoryTypeCount(); ++typeIndex)
-+ {
-+ if (allocator->MemoryTypeIndexToHeapIndex(typeIndex) == heapIndex)
-+ {
-+ json.BeginString("Type ");
-+ json.ContinueString(typeIndex);
-+ json.EndString();
-+ json.BeginObject();
-+ {
-+ json.WriteString("Flags");
-+ json.BeginArray(true);
-+ {
-+ VkMemoryPropertyFlags flags = allocator->m_MemProps.memoryTypes[typeIndex].propertyFlags;
-+ if (flags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)
-+ json.WriteString("DEVICE_LOCAL");
-+ if (flags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
-+ json.WriteString("HOST_VISIBLE");
-+ if (flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)
-+ json.WriteString("HOST_COHERENT");
-+ if (flags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT)
-+ json.WriteString("HOST_CACHED");
-+ if (flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT)
-+ json.WriteString("LAZILY_ALLOCATED");
-+ #if VMA_VULKAN_VERSION >= 1001000
-+ if (flags & VK_MEMORY_PROPERTY_PROTECTED_BIT)
-+ json.WriteString("PROTECTED");
-+ #endif
-+ #if VK_AMD_device_coherent_memory
-+ if (flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY)
-+ json.WriteString("DEVICE_COHERENT_AMD");
-+ if (flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)
-+ json.WriteString("DEVICE_UNCACHED_AMD");
-+ #endif
-+
-+ flags &= ~(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
-+ #if VMA_VULKAN_VERSION >= 1001000
-+ | VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT
-+ #endif
-+ #if VK_AMD_device_coherent_memory
-+ | VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY
-+ | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY
-+ #endif
-+ | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
-+ | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT
-+ | VK_MEMORY_PROPERTY_HOST_CACHED_BIT);
-+ if (flags != 0)
-+ json.WriteNumber(flags);
-+ }
-+ json.EndArray();
-+
-+ json.WriteString("Stats");
-+ VmaPrintDetailedStatistics(json, stats.memoryType[typeIndex]);
-+ }
-+ json.EndObject();
-+ }
-+ }
-+
-+ }
-+ json.EndObject();
-+ }
-+ json.EndObject();
-+ }
-+ }
-+ json.EndObject();
-+ }
-+
-+ if (detailedMap == VK_TRUE)
-+ allocator->PrintDetailedMap(json);
-+
-+ json.EndObject();
-+ }
-+
-+ *ppStatsString = VmaCreateStringCopy(allocator->GetAllocationCallbacks(), sb.GetData(), sb.GetLength());
-+}
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString(
-+ VmaAllocator allocator,
-+ char* pStatsString)
-+{
-+ if(pStatsString != VMA_NULL)
-+ {
-+ VMA_ASSERT(allocator);
-+ VmaFreeString(allocator->GetAllocationCallbacks(), pStatsString);
-+ }
-+}
-+
-+#endif // VMA_STATS_STRING_ENABLED
-+
-+/*
-+This function is not protected by any mutex because it just reads immutable data.
-+*/
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex(
-+ VmaAllocator allocator,
-+ uint32_t memoryTypeBits,
-+ const VmaAllocationCreateInfo* pAllocationCreateInfo,
-+ uint32_t* pMemoryTypeIndex)
-+{
-+ VMA_ASSERT(allocator != VK_NULL_HANDLE);
-+ VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
-+ VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
-+
-+ return allocator->FindMemoryTypeIndex(memoryTypeBits, pAllocationCreateInfo, UINT32_MAX, pMemoryTypeIndex);
-+}
-+
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo(
-+ VmaAllocator allocator,
-+ const VkBufferCreateInfo* pBufferCreateInfo,
-+ const VmaAllocationCreateInfo* pAllocationCreateInfo,
-+ uint32_t* pMemoryTypeIndex)
-+{
-+ VMA_ASSERT(allocator != VK_NULL_HANDLE);
-+ VMA_ASSERT(pBufferCreateInfo != VMA_NULL);
-+ VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
-+ VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
-+
-+ const VkDevice hDev = allocator->m_hDevice;
-+ const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions();
-+ VkResult res;
-+
-+#if VMA_VULKAN_VERSION >= 1003000
-+ if(funcs->vkGetDeviceBufferMemoryRequirements)
-+ {
-+ // Can query straight from VkBufferCreateInfo :)
-+ VkDeviceBufferMemoryRequirements devBufMemReq = {VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS};
-+ devBufMemReq.pCreateInfo = pBufferCreateInfo;
-+
-+ VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2};
-+ (*funcs->vkGetDeviceBufferMemoryRequirements)(hDev, &devBufMemReq, &memReq);
-+
-+ res = allocator->FindMemoryTypeIndex(
-+ memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo, pBufferCreateInfo->usage, pMemoryTypeIndex);
-+ }
-+ else
-+#endif // #if VMA_VULKAN_VERSION >= 1003000
-+ {
-+ // Must create a dummy buffer to query :(
-+ VkBuffer hBuffer = VK_NULL_HANDLE;
-+ res = funcs->vkCreateBuffer(
-+ hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer);
-+ if(res == VK_SUCCESS)
-+ {
-+ VkMemoryRequirements memReq = {};
-+ funcs->vkGetBufferMemoryRequirements(hDev, hBuffer, &memReq);
-+
-+ res = allocator->FindMemoryTypeIndex(
-+ memReq.memoryTypeBits, pAllocationCreateInfo, pBufferCreateInfo->usage, pMemoryTypeIndex);
-+
-+ funcs->vkDestroyBuffer(
-+ hDev, hBuffer, allocator->GetAllocationCallbacks());
-+ }
-+ }
-+ return res;
-+}
-+
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo(
-+ VmaAllocator allocator,
-+ const VkImageCreateInfo* pImageCreateInfo,
-+ const VmaAllocationCreateInfo* pAllocationCreateInfo,
-+ uint32_t* pMemoryTypeIndex)
-+{
-+ VMA_ASSERT(allocator != VK_NULL_HANDLE);
-+ VMA_ASSERT(pImageCreateInfo != VMA_NULL);
-+ VMA_ASSERT(pAllocationCreateInfo != VMA_NULL);
-+ VMA_ASSERT(pMemoryTypeIndex != VMA_NULL);
-+
-+ const VkDevice hDev = allocator->m_hDevice;
-+ const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions();
-+ VkResult res;
-+
-+#if VMA_VULKAN_VERSION >= 1003000
-+ if(funcs->vkGetDeviceImageMemoryRequirements)
-+ {
-+ // Can query straight from VkImageCreateInfo :)
-+ VkDeviceImageMemoryRequirements devImgMemReq = {VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS};
-+ devImgMemReq.pCreateInfo = pImageCreateInfo;
-+ VMA_ASSERT(pImageCreateInfo->tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY && (pImageCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT_COPY) == 0 &&
-+ "Cannot use this VkImageCreateInfo with vmaFindMemoryTypeIndexForImageInfo as I don't know what to pass as VkDeviceImageMemoryRequirements::planeAspect.");
-+
-+ VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2};
-+ (*funcs->vkGetDeviceImageMemoryRequirements)(hDev, &devImgMemReq, &memReq);
-+
-+ res = allocator->FindMemoryTypeIndex(
-+ memReq.memoryRequirements.memoryTypeBits, pAllocationCreateInfo, pImageCreateInfo->usage, pMemoryTypeIndex);
-+ }
-+ else
-+#endif // #if VMA_VULKAN_VERSION >= 1003000
-+ {
-+ // Must create a dummy image to query :(
-+ VkImage hImage = VK_NULL_HANDLE;
-+ res = funcs->vkCreateImage(
-+ hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage);
-+ if(res == VK_SUCCESS)
-+ {
-+ VkMemoryRequirements memReq = {};
-+ funcs->vkGetImageMemoryRequirements(hDev, hImage, &memReq);
-+
-+ res = allocator->FindMemoryTypeIndex(
-+ memReq.memoryTypeBits, pAllocationCreateInfo, pImageCreateInfo->usage, pMemoryTypeIndex);
-+
-+ funcs->vkDestroyImage(
-+ hDev, hImage, allocator->GetAllocationCallbacks());
-+ }
-+ }
-+ return res;
-+}
-+
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool(
-+ VmaAllocator allocator,
-+ const VmaPoolCreateInfo* pCreateInfo,
-+ VmaPool* pPool)
-+{
-+ VMA_ASSERT(allocator && pCreateInfo && pPool);
-+
-+ VMA_DEBUG_LOG("vmaCreatePool");
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ return allocator->CreatePool(pCreateInfo, pPool);
-+}
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool(
-+ VmaAllocator allocator,
-+ VmaPool pool)
-+{
-+ VMA_ASSERT(allocator);
-+
-+ if(pool == VK_NULL_HANDLE)
-+ {
-+ return;
-+ }
-+
-+ VMA_DEBUG_LOG("vmaDestroyPool");
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ allocator->DestroyPool(pool);
-+}
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics(
-+ VmaAllocator allocator,
-+ VmaPool pool,
-+ VmaStatistics* pPoolStats)
-+{
-+ VMA_ASSERT(allocator && pool && pPoolStats);
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ allocator->GetPoolStatistics(pool, pPoolStats);
-+}
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics(
-+ VmaAllocator allocator,
-+ VmaPool pool,
-+ VmaDetailedStatistics* pPoolStats)
-+{
-+ VMA_ASSERT(allocator && pool && pPoolStats);
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ allocator->CalculatePoolStatistics(pool, pPoolStats);
-+}
-+
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocator, VmaPool pool)
-+{
-+ VMA_ASSERT(allocator && pool);
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ VMA_DEBUG_LOG("vmaCheckPoolCorruption");
-+
-+ return allocator->CheckPoolCorruption(pool);
-+}
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName(
-+ VmaAllocator allocator,
-+ VmaPool pool,
-+ const char** ppName)
-+{
-+ VMA_ASSERT(allocator && pool && ppName);
-+
-+ VMA_DEBUG_LOG("vmaGetPoolName");
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ *ppName = pool->GetName();
-+}
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName(
-+ VmaAllocator allocator,
-+ VmaPool pool,
-+ const char* pName)
-+{
-+ VMA_ASSERT(allocator && pool);
-+
-+ VMA_DEBUG_LOG("vmaSetPoolName");
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ pool->SetName(pName);
-+}
-+
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory(
-+ VmaAllocator allocator,
-+ const VkMemoryRequirements* pVkMemoryRequirements,
-+ const VmaAllocationCreateInfo* pCreateInfo,
-+ VmaAllocation* pAllocation,
-+ VmaAllocationInfo* pAllocationInfo)
-+{
-+ VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation);
-+
-+ VMA_DEBUG_LOG("vmaAllocateMemory");
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ VkResult result = allocator->AllocateMemory(
-+ *pVkMemoryRequirements,
-+ false, // requiresDedicatedAllocation
-+ false, // prefersDedicatedAllocation
-+ VK_NULL_HANDLE, // dedicatedBuffer
-+ VK_NULL_HANDLE, // dedicatedImage
-+ UINT32_MAX, // dedicatedBufferImageUsage
-+ *pCreateInfo,
-+ VMA_SUBALLOCATION_TYPE_UNKNOWN,
-+ 1, // allocationCount
-+ pAllocation);
-+
-+ if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
-+ {
-+ allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
-+ }
-+
-+ return result;
-+}
-+
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages(
-+ VmaAllocator allocator,
-+ const VkMemoryRequirements* pVkMemoryRequirements,
-+ const VmaAllocationCreateInfo* pCreateInfo,
-+ size_t allocationCount,
-+ VmaAllocation* pAllocations,
-+ VmaAllocationInfo* pAllocationInfo)
-+{
-+ if(allocationCount == 0)
-+ {
-+ return VK_SUCCESS;
-+ }
-+
-+ VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocations);
-+
-+ VMA_DEBUG_LOG("vmaAllocateMemoryPages");
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ VkResult result = allocator->AllocateMemory(
-+ *pVkMemoryRequirements,
-+ false, // requiresDedicatedAllocation
-+ false, // prefersDedicatedAllocation
-+ VK_NULL_HANDLE, // dedicatedBuffer
-+ VK_NULL_HANDLE, // dedicatedImage
-+ UINT32_MAX, // dedicatedBufferImageUsage
-+ *pCreateInfo,
-+ VMA_SUBALLOCATION_TYPE_UNKNOWN,
-+ allocationCount,
-+ pAllocations);
-+
-+ if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS)
-+ {
-+ for(size_t i = 0; i < allocationCount; ++i)
-+ {
-+ allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i);
-+ }
-+ }
-+
-+ return result;
-+}
-+
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer(
-+ VmaAllocator allocator,
-+ VkBuffer buffer,
-+ const VmaAllocationCreateInfo* pCreateInfo,
-+ VmaAllocation* pAllocation,
-+ VmaAllocationInfo* pAllocationInfo)
-+{
-+ VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation);
-+
-+ VMA_DEBUG_LOG("vmaAllocateMemoryForBuffer");
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ VkMemoryRequirements vkMemReq = {};
-+ bool requiresDedicatedAllocation = false;
-+ bool prefersDedicatedAllocation = false;
-+ allocator->GetBufferMemoryRequirements(buffer, vkMemReq,
-+ requiresDedicatedAllocation,
-+ prefersDedicatedAllocation);
-+
-+ VkResult result = allocator->AllocateMemory(
-+ vkMemReq,
-+ requiresDedicatedAllocation,
-+ prefersDedicatedAllocation,
-+ buffer, // dedicatedBuffer
-+ VK_NULL_HANDLE, // dedicatedImage
-+ UINT32_MAX, // dedicatedBufferImageUsage
-+ *pCreateInfo,
-+ VMA_SUBALLOCATION_TYPE_BUFFER,
-+ 1, // allocationCount
-+ pAllocation);
-+
-+ if(pAllocationInfo && result == VK_SUCCESS)
-+ {
-+ allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
-+ }
-+
-+ return result;
-+}
-+
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage(
-+ VmaAllocator allocator,
-+ VkImage image,
-+ const VmaAllocationCreateInfo* pCreateInfo,
-+ VmaAllocation* pAllocation,
-+ VmaAllocationInfo* pAllocationInfo)
-+{
-+ VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation);
-+
-+ VMA_DEBUG_LOG("vmaAllocateMemoryForImage");
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ VkMemoryRequirements vkMemReq = {};
-+ bool requiresDedicatedAllocation = false;
-+ bool prefersDedicatedAllocation = false;
-+ allocator->GetImageMemoryRequirements(image, vkMemReq,
-+ requiresDedicatedAllocation, prefersDedicatedAllocation);
-+
-+ VkResult result = allocator->AllocateMemory(
-+ vkMemReq,
-+ requiresDedicatedAllocation,
-+ prefersDedicatedAllocation,
-+ VK_NULL_HANDLE, // dedicatedBuffer
-+ image, // dedicatedImage
-+ UINT32_MAX, // dedicatedBufferImageUsage
-+ *pCreateInfo,
-+ VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN,
-+ 1, // allocationCount
-+ pAllocation);
-+
-+ if(pAllocationInfo && result == VK_SUCCESS)
-+ {
-+ allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
-+ }
-+
-+ return result;
-+}
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory(
-+ VmaAllocator allocator,
-+ VmaAllocation allocation)
-+{
-+ VMA_ASSERT(allocator);
-+
-+ if(allocation == VK_NULL_HANDLE)
-+ {
-+ return;
-+ }
-+
-+ VMA_DEBUG_LOG("vmaFreeMemory");
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ allocator->FreeMemory(
-+ 1, // allocationCount
-+ &allocation);
-+}
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages(
-+ VmaAllocator allocator,
-+ size_t allocationCount,
-+ const VmaAllocation* pAllocations)
-+{
-+ if(allocationCount == 0)
-+ {
-+ return;
-+ }
-+
-+ VMA_ASSERT(allocator);
-+
-+ VMA_DEBUG_LOG("vmaFreeMemoryPages");
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ allocator->FreeMemory(allocationCount, pAllocations);
-+}
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo(
-+ VmaAllocator allocator,
-+ VmaAllocation allocation,
-+ VmaAllocationInfo* pAllocationInfo)
-+{
-+ VMA_ASSERT(allocator && allocation && pAllocationInfo);
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ allocator->GetAllocationInfo(allocation, pAllocationInfo);
-+}
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData(
-+ VmaAllocator allocator,
-+ VmaAllocation allocation,
-+ void* pUserData)
-+{
-+ VMA_ASSERT(allocator && allocation);
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ allocation->SetUserData(allocator, pUserData);
-+}
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaAllocation VMA_NOT_NULL allocation,
-+ const char* VMA_NULLABLE pName)
-+{
-+ allocation->SetName(allocator, pName);
-+}
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaAllocation VMA_NOT_NULL allocation,
-+ VkMemoryPropertyFlags* VMA_NOT_NULL pFlags)
-+{
-+ VMA_ASSERT(allocator && allocation && pFlags);
-+ const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
-+ *pFlags = allocator->m_MemProps.memoryTypes[memTypeIndex].propertyFlags;
-+}
-+
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory(
-+ VmaAllocator allocator,
-+ VmaAllocation allocation,
-+ void** ppData)
-+{
-+ VMA_ASSERT(allocator && allocation && ppData);
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ return allocator->Map(allocation, ppData);
-+}
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory(
-+ VmaAllocator allocator,
-+ VmaAllocation allocation)
-+{
-+ VMA_ASSERT(allocator && allocation);
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ allocator->Unmap(allocation);
-+}
-+
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation(
-+ VmaAllocator allocator,
-+ VmaAllocation allocation,
-+ VkDeviceSize offset,
-+ VkDeviceSize size)
-+{
-+ VMA_ASSERT(allocator && allocation);
-+
-+ VMA_DEBUG_LOG("vmaFlushAllocation");
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_FLUSH);
-+
-+ return res;
-+}
-+
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation(
-+ VmaAllocator allocator,
-+ VmaAllocation allocation,
-+ VkDeviceSize offset,
-+ VkDeviceSize size)
-+{
-+ VMA_ASSERT(allocator && allocation);
-+
-+ VMA_DEBUG_LOG("vmaInvalidateAllocation");
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ const VkResult res = allocator->FlushOrInvalidateAllocation(allocation, offset, size, VMA_CACHE_INVALIDATE);
-+
-+ return res;
-+}
-+
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations(
-+ VmaAllocator allocator,
-+ uint32_t allocationCount,
-+ const VmaAllocation* allocations,
-+ const VkDeviceSize* offsets,
-+ const VkDeviceSize* sizes)
-+{
-+ VMA_ASSERT(allocator);
-+
-+ if(allocationCount == 0)
-+ {
-+ return VK_SUCCESS;
-+ }
-+
-+ VMA_ASSERT(allocations);
-+
-+ VMA_DEBUG_LOG("vmaFlushAllocations");
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_FLUSH);
-+
-+ return res;
-+}
-+
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations(
-+ VmaAllocator allocator,
-+ uint32_t allocationCount,
-+ const VmaAllocation* allocations,
-+ const VkDeviceSize* offsets,
-+ const VkDeviceSize* sizes)
-+{
-+ VMA_ASSERT(allocator);
-+
-+ if(allocationCount == 0)
-+ {
-+ return VK_SUCCESS;
-+ }
-+
-+ VMA_ASSERT(allocations);
-+
-+ VMA_DEBUG_LOG("vmaInvalidateAllocations");
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ const VkResult res = allocator->FlushOrInvalidateAllocations(allocationCount, allocations, offsets, sizes, VMA_CACHE_INVALIDATE);
-+
-+ return res;
-+}
-+
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption(
-+ VmaAllocator allocator,
-+ uint32_t memoryTypeBits)
-+{
-+ VMA_ASSERT(allocator);
-+
-+ VMA_DEBUG_LOG("vmaCheckCorruption");
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ return allocator->CheckCorruption(memoryTypeBits);
-+}
-+
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation(
-+ VmaAllocator allocator,
-+ const VmaDefragmentationInfo* pInfo,
-+ VmaDefragmentationContext* pContext)
-+{
-+ VMA_ASSERT(allocator && pInfo && pContext);
-+
-+ VMA_DEBUG_LOG("vmaBeginDefragmentation");
-+
-+ if (pInfo->pool != VMA_NULL)
-+ {
-+ // Check if run on supported algorithms
-+ if (pInfo->pool->m_BlockVector.GetAlgorithm() & VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT)
-+ return VK_ERROR_FEATURE_NOT_PRESENT;
-+ }
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ *pContext = vma_new(allocator, VmaDefragmentationContext_T)(allocator, *pInfo);
-+ return VK_SUCCESS;
-+}
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation(
-+ VmaAllocator allocator,
-+ VmaDefragmentationContext context,
-+ VmaDefragmentationStats* pStats)
-+{
-+ VMA_ASSERT(allocator && context);
-+
-+ VMA_DEBUG_LOG("vmaEndDefragmentation");
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ if (pStats)
-+ context->GetStats(*pStats);
-+ vma_delete(allocator, context);
-+}
-+
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaDefragmentationContext VMA_NOT_NULL context,
-+ VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo)
-+{
-+ VMA_ASSERT(context && pPassInfo);
-+
-+ VMA_DEBUG_LOG("vmaBeginDefragmentationPass");
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ return context->DefragmentPassBegin(*pPassInfo);
-+}
-+
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaDefragmentationContext VMA_NOT_NULL context,
-+ VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo)
-+{
-+ VMA_ASSERT(context && pPassInfo);
-+
-+ VMA_DEBUG_LOG("vmaEndDefragmentationPass");
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ return context->DefragmentPassEnd(*pPassInfo);
-+}
-+
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory(
-+ VmaAllocator allocator,
-+ VmaAllocation allocation,
-+ VkBuffer buffer)
-+{
-+ VMA_ASSERT(allocator && allocation && buffer);
-+
-+ VMA_DEBUG_LOG("vmaBindBufferMemory");
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ return allocator->BindBufferMemory(allocation, 0, buffer, VMA_NULL);
-+}
-+
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2(
-+ VmaAllocator allocator,
-+ VmaAllocation allocation,
-+ VkDeviceSize allocationLocalOffset,
-+ VkBuffer buffer,
-+ const void* pNext)
-+{
-+ VMA_ASSERT(allocator && allocation && buffer);
-+
-+ VMA_DEBUG_LOG("vmaBindBufferMemory2");
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ return allocator->BindBufferMemory(allocation, allocationLocalOffset, buffer, pNext);
-+}
-+
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory(
-+ VmaAllocator allocator,
-+ VmaAllocation allocation,
-+ VkImage image)
-+{
-+ VMA_ASSERT(allocator && allocation && image);
-+
-+ VMA_DEBUG_LOG("vmaBindImageMemory");
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ return allocator->BindImageMemory(allocation, 0, image, VMA_NULL);
-+}
-+
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2(
-+ VmaAllocator allocator,
-+ VmaAllocation allocation,
-+ VkDeviceSize allocationLocalOffset,
-+ VkImage image,
-+ const void* pNext)
-+{
-+ VMA_ASSERT(allocator && allocation && image);
-+
-+ VMA_DEBUG_LOG("vmaBindImageMemory2");
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext);
-+}
-+
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer(
-+ VmaAllocator allocator,
-+ const VkBufferCreateInfo* pBufferCreateInfo,
-+ const VmaAllocationCreateInfo* pAllocationCreateInfo,
-+ VkBuffer* pBuffer,
-+ VmaAllocation* pAllocation,
-+ VmaAllocationInfo* pAllocationInfo)
-+{
-+ VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation);
-+
-+ if(pBufferCreateInfo->size == 0)
-+ {
-+ return VK_ERROR_INITIALIZATION_FAILED;
-+ }
-+ if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
-+ !allocator->m_UseKhrBufferDeviceAddress)
-+ {
-+ VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
-+ return VK_ERROR_INITIALIZATION_FAILED;
-+ }
-+
-+ VMA_DEBUG_LOG("vmaCreateBuffer");
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ *pBuffer = VK_NULL_HANDLE;
-+ *pAllocation = VK_NULL_HANDLE;
-+
-+ // 1. Create VkBuffer.
-+ VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
-+ allocator->m_hDevice,
-+ pBufferCreateInfo,
-+ allocator->GetAllocationCallbacks(),
-+ pBuffer);
-+ if(res >= 0)
-+ {
-+ // 2. vkGetBufferMemoryRequirements.
-+ VkMemoryRequirements vkMemReq = {};
-+ bool requiresDedicatedAllocation = false;
-+ bool prefersDedicatedAllocation = false;
-+ allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
-+ requiresDedicatedAllocation, prefersDedicatedAllocation);
-+
-+ // 3. Allocate memory using allocator.
-+ res = allocator->AllocateMemory(
-+ vkMemReq,
-+ requiresDedicatedAllocation,
-+ prefersDedicatedAllocation,
-+ *pBuffer, // dedicatedBuffer
-+ VK_NULL_HANDLE, // dedicatedImage
-+ pBufferCreateInfo->usage, // dedicatedBufferImageUsage
-+ *pAllocationCreateInfo,
-+ VMA_SUBALLOCATION_TYPE_BUFFER,
-+ 1, // allocationCount
-+ pAllocation);
-+
-+ if(res >= 0)
-+ {
-+ // 3. Bind buffer with memory.
-+ if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
-+ {
-+ res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
-+ }
-+ if(res >= 0)
-+ {
-+ // All steps succeeded.
-+ #if VMA_STATS_STRING_ENABLED
-+ (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
-+ #endif
-+ if(pAllocationInfo != VMA_NULL)
-+ {
-+ allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
-+ }
-+
-+ return VK_SUCCESS;
-+ }
-+ allocator->FreeMemory(
-+ 1, // allocationCount
-+ pAllocation);
-+ *pAllocation = VK_NULL_HANDLE;
-+ (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
-+ *pBuffer = VK_NULL_HANDLE;
-+ return res;
-+ }
-+ (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
-+ *pBuffer = VK_NULL_HANDLE;
-+ return res;
-+ }
-+ return res;
-+}
-+
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment(
-+ VmaAllocator allocator,
-+ const VkBufferCreateInfo* pBufferCreateInfo,
-+ const VmaAllocationCreateInfo* pAllocationCreateInfo,
-+ VkDeviceSize minAlignment,
-+ VkBuffer* pBuffer,
-+ VmaAllocation* pAllocation,
-+ VmaAllocationInfo* pAllocationInfo)
-+{
-+ VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && VmaIsPow2(minAlignment) && pBuffer && pAllocation);
-+
-+ if(pBufferCreateInfo->size == 0)
-+ {
-+ return VK_ERROR_INITIALIZATION_FAILED;
-+ }
-+ if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
-+ !allocator->m_UseKhrBufferDeviceAddress)
-+ {
-+ VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
-+ return VK_ERROR_INITIALIZATION_FAILED;
-+ }
-+
-+ VMA_DEBUG_LOG("vmaCreateBufferWithAlignment");
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ *pBuffer = VK_NULL_HANDLE;
-+ *pAllocation = VK_NULL_HANDLE;
-+
-+ // 1. Create VkBuffer.
-+ VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
-+ allocator->m_hDevice,
-+ pBufferCreateInfo,
-+ allocator->GetAllocationCallbacks(),
-+ pBuffer);
-+ if(res >= 0)
-+ {
-+ // 2. vkGetBufferMemoryRequirements.
-+ VkMemoryRequirements vkMemReq = {};
-+ bool requiresDedicatedAllocation = false;
-+ bool prefersDedicatedAllocation = false;
-+ allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq,
-+ requiresDedicatedAllocation, prefersDedicatedAllocation);
-+
-+ // 2a. Include minAlignment
-+ vkMemReq.alignment = VMA_MAX(vkMemReq.alignment, minAlignment);
-+
-+ // 3. Allocate memory using allocator.
-+ res = allocator->AllocateMemory(
-+ vkMemReq,
-+ requiresDedicatedAllocation,
-+ prefersDedicatedAllocation,
-+ *pBuffer, // dedicatedBuffer
-+ VK_NULL_HANDLE, // dedicatedImage
-+ pBufferCreateInfo->usage, // dedicatedBufferImageUsage
-+ *pAllocationCreateInfo,
-+ VMA_SUBALLOCATION_TYPE_BUFFER,
-+ 1, // allocationCount
-+ pAllocation);
-+
-+ if(res >= 0)
-+ {
-+ // 3. Bind buffer with memory.
-+ if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
-+ {
-+ res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL);
-+ }
-+ if(res >= 0)
-+ {
-+ // All steps succeeded.
-+ #if VMA_STATS_STRING_ENABLED
-+ (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage);
-+ #endif
-+ if(pAllocationInfo != VMA_NULL)
-+ {
-+ allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
-+ }
-+
-+ return VK_SUCCESS;
-+ }
-+ allocator->FreeMemory(
-+ 1, // allocationCount
-+ pAllocation);
-+ *pAllocation = VK_NULL_HANDLE;
-+ (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
-+ *pBuffer = VK_NULL_HANDLE;
-+ return res;
-+ }
-+ (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
-+ *pBuffer = VK_NULL_HANDLE;
-+ return res;
-+ }
-+ return res;
-+}
-+
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaAllocation VMA_NOT_NULL allocation,
-+ const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
-+ VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer)
-+{
-+ return vmaCreateAliasingBuffer2(allocator, allocation, 0, pBufferCreateInfo, pBuffer);
-+}
-+
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer2(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaAllocation VMA_NOT_NULL allocation,
-+ VkDeviceSize allocationLocalOffset,
-+ const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo,
-+ VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer)
-+{
-+ VMA_ASSERT(allocator && pBufferCreateInfo && pBuffer && allocation);
-+ VMA_ASSERT(allocationLocalOffset + pBufferCreateInfo->size <= allocation->GetSize());
-+
-+ VMA_DEBUG_LOG("vmaCreateAliasingBuffer2");
-+
-+ *pBuffer = VK_NULL_HANDLE;
-+
-+ if (pBufferCreateInfo->size == 0)
-+ {
-+ return VK_ERROR_INITIALIZATION_FAILED;
-+ }
-+ if ((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 &&
-+ !allocator->m_UseKhrBufferDeviceAddress)
-+ {
-+ VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used.");
-+ return VK_ERROR_INITIALIZATION_FAILED;
-+ }
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ // 1. Create VkBuffer.
-+ VkResult res = (*allocator->GetVulkanFunctions().vkCreateBuffer)(
-+ allocator->m_hDevice,
-+ pBufferCreateInfo,
-+ allocator->GetAllocationCallbacks(),
-+ pBuffer);
-+ if (res >= 0)
-+ {
-+ // 2. Bind buffer with memory.
-+ res = allocator->BindBufferMemory(allocation, allocationLocalOffset, *pBuffer, VMA_NULL);
-+ if (res >= 0)
-+ {
-+ return VK_SUCCESS;
-+ }
-+ (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, *pBuffer, allocator->GetAllocationCallbacks());
-+ }
-+ return res;
-+}
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer(
-+ VmaAllocator allocator,
-+ VkBuffer buffer,
-+ VmaAllocation allocation)
-+{
-+ VMA_ASSERT(allocator);
-+
-+ if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
-+ {
-+ return;
-+ }
-+
-+ VMA_DEBUG_LOG("vmaDestroyBuffer");
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ if(buffer != VK_NULL_HANDLE)
-+ {
-+ (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks());
-+ }
-+
-+ if(allocation != VK_NULL_HANDLE)
-+ {
-+ allocator->FreeMemory(
-+ 1, // allocationCount
-+ &allocation);
-+ }
-+}
-+
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage(
-+ VmaAllocator allocator,
-+ const VkImageCreateInfo* pImageCreateInfo,
-+ const VmaAllocationCreateInfo* pAllocationCreateInfo,
-+ VkImage* pImage,
-+ VmaAllocation* pAllocation,
-+ VmaAllocationInfo* pAllocationInfo)
-+{
-+ VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation);
-+
-+ if(pImageCreateInfo->extent.width == 0 ||
-+ pImageCreateInfo->extent.height == 0 ||
-+ pImageCreateInfo->extent.depth == 0 ||
-+ pImageCreateInfo->mipLevels == 0 ||
-+ pImageCreateInfo->arrayLayers == 0)
-+ {
-+ return VK_ERROR_INITIALIZATION_FAILED;
-+ }
-+
-+ VMA_DEBUG_LOG("vmaCreateImage");
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ *pImage = VK_NULL_HANDLE;
-+ *pAllocation = VK_NULL_HANDLE;
-+
-+ // 1. Create VkImage.
-+ VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
-+ allocator->m_hDevice,
-+ pImageCreateInfo,
-+ allocator->GetAllocationCallbacks(),
-+ pImage);
-+ if(res >= 0)
-+ {
-+ VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ?
-+ VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL :
-+ VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR;
-+
-+ // 2. Allocate memory using allocator.
-+ VkMemoryRequirements vkMemReq = {};
-+ bool requiresDedicatedAllocation = false;
-+ bool prefersDedicatedAllocation = false;
-+ allocator->GetImageMemoryRequirements(*pImage, vkMemReq,
-+ requiresDedicatedAllocation, prefersDedicatedAllocation);
-+
-+ res = allocator->AllocateMemory(
-+ vkMemReq,
-+ requiresDedicatedAllocation,
-+ prefersDedicatedAllocation,
-+ VK_NULL_HANDLE, // dedicatedBuffer
-+ *pImage, // dedicatedImage
-+ pImageCreateInfo->usage, // dedicatedBufferImageUsage
-+ *pAllocationCreateInfo,
-+ suballocType,
-+ 1, // allocationCount
-+ pAllocation);
-+
-+ if(res >= 0)
-+ {
-+ // 3. Bind image with memory.
-+ if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0)
-+ {
-+ res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL);
-+ }
-+ if(res >= 0)
-+ {
-+ // All steps succeeded.
-+ #if VMA_STATS_STRING_ENABLED
-+ (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage);
-+ #endif
-+ if(pAllocationInfo != VMA_NULL)
-+ {
-+ allocator->GetAllocationInfo(*pAllocation, pAllocationInfo);
-+ }
-+
-+ return VK_SUCCESS;
-+ }
-+ allocator->FreeMemory(
-+ 1, // allocationCount
-+ pAllocation);
-+ *pAllocation = VK_NULL_HANDLE;
-+ (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
-+ *pImage = VK_NULL_HANDLE;
-+ return res;
-+ }
-+ (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
-+ *pImage = VK_NULL_HANDLE;
-+ return res;
-+ }
-+ return res;
-+}
-+
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaAllocation VMA_NOT_NULL allocation,
-+ const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
-+ VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage)
-+{
-+ return vmaCreateAliasingImage2(allocator, allocation, 0, pImageCreateInfo, pImage);
-+}
-+
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage2(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VmaAllocation VMA_NOT_NULL allocation,
-+ VkDeviceSize allocationLocalOffset,
-+ const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo,
-+ VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage)
-+{
-+ VMA_ASSERT(allocator && pImageCreateInfo && pImage && allocation);
-+
-+ *pImage = VK_NULL_HANDLE;
-+
-+ VMA_DEBUG_LOG("vmaCreateImage2");
-+
-+ if (pImageCreateInfo->extent.width == 0 ||
-+ pImageCreateInfo->extent.height == 0 ||
-+ pImageCreateInfo->extent.depth == 0 ||
-+ pImageCreateInfo->mipLevels == 0 ||
-+ pImageCreateInfo->arrayLayers == 0)
-+ {
-+ return VK_ERROR_INITIALIZATION_FAILED;
-+ }
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ // 1. Create VkImage.
-+ VkResult res = (*allocator->GetVulkanFunctions().vkCreateImage)(
-+ allocator->m_hDevice,
-+ pImageCreateInfo,
-+ allocator->GetAllocationCallbacks(),
-+ pImage);
-+ if (res >= 0)
-+ {
-+ // 2. Bind image with memory.
-+ res = allocator->BindImageMemory(allocation, allocationLocalOffset, *pImage, VMA_NULL);
-+ if (res >= 0)
-+ {
-+ return VK_SUCCESS;
-+ }
-+ (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, *pImage, allocator->GetAllocationCallbacks());
-+ }
-+ return res;
-+}
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage(
-+ VmaAllocator VMA_NOT_NULL allocator,
-+ VkImage VMA_NULLABLE_NON_DISPATCHABLE image,
-+ VmaAllocation VMA_NULLABLE allocation)
-+{
-+ VMA_ASSERT(allocator);
-+
-+ if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE)
-+ {
-+ return;
-+ }
-+
-+ VMA_DEBUG_LOG("vmaDestroyImage");
-+
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK
-+
-+ if(image != VK_NULL_HANDLE)
-+ {
-+ (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks());
-+ }
-+ if(allocation != VK_NULL_HANDLE)
-+ {
-+ allocator->FreeMemory(
-+ 1, // allocationCount
-+ &allocation);
-+ }
-+}
-+
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock(
-+ const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo,
-+ VmaVirtualBlock VMA_NULLABLE * VMA_NOT_NULL pVirtualBlock)
-+{
-+ VMA_ASSERT(pCreateInfo && pVirtualBlock);
-+ VMA_ASSERT(pCreateInfo->size > 0);
-+ VMA_DEBUG_LOG("vmaCreateVirtualBlock");
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK;
-+ *pVirtualBlock = vma_new(pCreateInfo->pAllocationCallbacks, VmaVirtualBlock_T)(*pCreateInfo);
-+ VkResult res = (*pVirtualBlock)->Init();
-+ if(res < 0)
-+ {
-+ vma_delete(pCreateInfo->pAllocationCallbacks, *pVirtualBlock);
-+ *pVirtualBlock = VK_NULL_HANDLE;
-+ }
-+ return res;
-+}
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock(VmaVirtualBlock VMA_NULLABLE virtualBlock)
-+{
-+ if(virtualBlock != VK_NULL_HANDLE)
-+ {
-+ VMA_DEBUG_LOG("vmaDestroyVirtualBlock");
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK;
-+ VkAllocationCallbacks allocationCallbacks = virtualBlock->m_AllocationCallbacks; // Have to copy the callbacks when destroying.
-+ vma_delete(&allocationCallbacks, virtualBlock);
-+ }
-+}
-+
-+VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty(VmaVirtualBlock VMA_NOT_NULL virtualBlock)
-+{
-+ VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
-+ VMA_DEBUG_LOG("vmaIsVirtualBlockEmpty");
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK;
-+ return virtualBlock->IsEmpty() ? VK_TRUE : VK_FALSE;
-+}
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
-+ VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo)
-+{
-+ VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pVirtualAllocInfo != VMA_NULL);
-+ VMA_DEBUG_LOG("vmaGetVirtualAllocationInfo");
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK;
-+ virtualBlock->GetAllocationInfo(allocation, *pVirtualAllocInfo);
-+}
-+
-+VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
-+ const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation,
-+ VkDeviceSize* VMA_NULLABLE pOffset)
-+{
-+ VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pCreateInfo != VMA_NULL && pAllocation != VMA_NULL);
-+ VMA_DEBUG_LOG("vmaVirtualAllocate");
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK;
-+ return virtualBlock->Allocate(*pCreateInfo, *pAllocation, pOffset);
-+}
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation)
-+{
-+ if(allocation != VK_NULL_HANDLE)
-+ {
-+ VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
-+ VMA_DEBUG_LOG("vmaVirtualFree");
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK;
-+ virtualBlock->Free(allocation);
-+ }
-+}
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock(VmaVirtualBlock VMA_NOT_NULL virtualBlock)
-+{
-+ VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
-+ VMA_DEBUG_LOG("vmaClearVirtualBlock");
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK;
-+ virtualBlock->Clear();
-+}
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
-+ VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, void* VMA_NULLABLE pUserData)
-+{
-+ VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
-+ VMA_DEBUG_LOG("vmaSetVirtualAllocationUserData");
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK;
-+ virtualBlock->SetAllocationUserData(allocation, pUserData);
-+}
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
-+ VmaStatistics* VMA_NOT_NULL pStats)
-+{
-+ VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL);
-+ VMA_DEBUG_LOG("vmaGetVirtualBlockStatistics");
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK;
-+ virtualBlock->GetStatistics(*pStats);
-+}
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
-+ VmaDetailedStatistics* VMA_NOT_NULL pStats)
-+{
-+ VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL);
-+ VMA_DEBUG_LOG("vmaCalculateVirtualBlockStatistics");
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK;
-+ virtualBlock->CalculateDetailedStatistics(*pStats);
-+}
-+
-+#if VMA_STATS_STRING_ENABLED
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
-+ char* VMA_NULLABLE * VMA_NOT_NULL ppStatsString, VkBool32 detailedMap)
-+{
-+ VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && ppStatsString != VMA_NULL);
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK;
-+ const VkAllocationCallbacks* allocationCallbacks = virtualBlock->GetAllocationCallbacks();
-+ VmaStringBuilder sb(allocationCallbacks);
-+ virtualBlock->BuildStatsString(detailedMap != VK_FALSE, sb);
-+ *ppStatsString = VmaCreateStringCopy(allocationCallbacks, sb.GetData(), sb.GetLength());
-+}
-+
-+VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
-+ char* VMA_NULLABLE pStatsString)
-+{
-+ if(pStatsString != VMA_NULL)
-+ {
-+ VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
-+ VMA_DEBUG_GLOBAL_MUTEX_LOCK;
-+ VmaFreeString(virtualBlock->GetAllocationCallbacks(), pStatsString);
-+ }
-+}
-+#endif // VMA_STATS_STRING_ENABLED
-+#endif // _VMA_PUBLIC_INTERFACE
-+#endif // VMA_IMPLEMENTATION
-+
-+/**
-+\page quick_start Quick start
-+
-+\section quick_start_project_setup Project setup
-+
-+Vulkan Memory Allocator comes in form of a "stb-style" single header file.
-+You don't need to build it as a separate library project.
-+You can add this file directly to your project and submit it to code repository next to your other source files.
-+
-+"Single header" doesn't mean that everything is contained in C/C++ declarations,
-+like it tends to be in case of inline functions or C++ templates.
-+It means that implementation is bundled with interface in a single file and needs to be extracted using preprocessor macro.
-+If you don't do it properly, you will get linker errors.
-+
-+To do it properly:
-+
-+-# Include "vk_mem_alloc.h" file in each CPP file where you want to use the library.
-+ This includes declarations of all members of the library.
-+-# In exactly one CPP file define following macro before this include.
-+ It enables also internal definitions.
-+
-+\code
-+#define VMA_IMPLEMENTATION
-+#include "vk_mem_alloc.h"
-+\endcode
-+
-+It may be a good idea to create dedicated CPP file just for this purpose.
-+
-+This library includes header `<vulkan/vulkan.h>`, which in turn
-+includes `<windows.h>` on Windows. If you need some specific macros defined
-+before including these headers (like `WIN32_LEAN_AND_MEAN` or
-+`WINVER` for Windows, `VK_USE_PLATFORM_WIN32_KHR` for Vulkan), you must define
-+them before every `#include` of this library.
-+
-+This library is written in C++, but has C-compatible interface.
-+Thus you can include and use vk_mem_alloc.h in C or C++ code, but full
-+implementation with `VMA_IMPLEMENTATION` macro must be compiled as C++, NOT as C.
-+Some features of C++14 are used. STL containers, RTTI, or C++ exceptions are not used.
-+
-+
-+\section quick_start_initialization Initialization
-+
-+At program startup:
-+
-+-# Initialize Vulkan to have `VkPhysicalDevice`, `VkDevice` and `VkInstance` object.
-+-# Fill VmaAllocatorCreateInfo structure and create #VmaAllocator object by
-+ calling vmaCreateAllocator().
-+
-+Only members `physicalDevice`, `device`, `instance` are required.
-+However, you should inform the library which Vulkan version do you use by setting
-+VmaAllocatorCreateInfo::vulkanApiVersion and which extensions did you enable
-+by setting VmaAllocatorCreateInfo::flags (like #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT for VK_KHR_buffer_device_address).
-+Otherwise, VMA would use only features of Vulkan 1.0 core with no extensions.
-+
-+\subsection quick_start_initialization_selecting_vulkan_version Selecting Vulkan version
-+
-+VMA supports Vulkan version down to 1.0, for backward compatibility.
-+If you want to use higher version, you need to inform the library about it.
-+This is a two-step process.
-+
-+<b>Step 1: Compile time.</b> By default, VMA compiles with code supporting the highest
-+Vulkan version found in the included `<vulkan/vulkan.h>` that is also supported by the library.
-+If this is OK, you don't need to do anything.
-+However, if you want to compile VMA as if only some lower Vulkan version was available,
-+define macro `VMA_VULKAN_VERSION` before every `#include "vk_mem_alloc.h"`.
-+It should have decimal numeric value in form of ABBBCCC, where A = major, BBB = minor, CCC = patch Vulkan version.
-+For example, to compile against Vulkan 1.2:
-+
-+\code
-+#define VMA_VULKAN_VERSION 1002000 // Vulkan 1.2
-+#include "vk_mem_alloc.h"
-+\endcode
-+
-+<b>Step 2: Runtime.</b> Even when compiled with higher Vulkan version available,
-+VMA can use only features of a lower version, which is configurable during creation of the #VmaAllocator object.
-+By default, only Vulkan 1.0 is used.
-+To initialize the allocator with support for higher Vulkan version, you need to set member
-+VmaAllocatorCreateInfo::vulkanApiVersion to an appropriate value, e.g. using constants like `VK_API_VERSION_1_2`.
-+See code sample below.
-+
-+\subsection quick_start_initialization_importing_vulkan_functions Importing Vulkan functions
-+
-+You may need to configure importing Vulkan functions. There are 3 ways to do this:
-+
-+-# **If you link with Vulkan static library** (e.g. "vulkan-1.lib" on Windows):
-+ - You don't need to do anything.
-+ - VMA will use these, as macro `VMA_STATIC_VULKAN_FUNCTIONS` is defined to 1 by default.
-+-# **If you want VMA to fetch pointers to Vulkan functions dynamically** using `vkGetInstanceProcAddr`,
-+ `vkGetDeviceProcAddr` (this is the option presented in the example below):
-+ - Define `VMA_STATIC_VULKAN_FUNCTIONS` to 0, `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 1.
-+ - Provide pointers to these two functions via VmaVulkanFunctions::vkGetInstanceProcAddr,
-+ VmaVulkanFunctions::vkGetDeviceProcAddr.
-+ - The library will fetch pointers to all other functions it needs internally.
-+-# **If you fetch pointers to all Vulkan functions in a custom way**, e.g. using some loader like
-+ [Volk](https://github.com/zeux/volk):
-+ - Define `VMA_STATIC_VULKAN_FUNCTIONS` and `VMA_DYNAMIC_VULKAN_FUNCTIONS` to 0.
-+ - Pass these pointers via structure #VmaVulkanFunctions.
-+
-+Example for case 2:
-+
-+\code
-+#define VMA_STATIC_VULKAN_FUNCTIONS 0
-+#define VMA_DYNAMIC_VULKAN_FUNCTIONS 1
-+#include "vk_mem_alloc.h"
-+
-+...
-+
-+VmaVulkanFunctions vulkanFunctions = {};
-+vulkanFunctions.vkGetInstanceProcAddr = &vkGetInstanceProcAddr;
-+vulkanFunctions.vkGetDeviceProcAddr = &vkGetDeviceProcAddr;
-+
-+VmaAllocatorCreateInfo allocatorCreateInfo = {};
-+allocatorCreateInfo.vulkanApiVersion = VK_API_VERSION_1_2;
-+allocatorCreateInfo.physicalDevice = physicalDevice;
-+allocatorCreateInfo.device = device;
-+allocatorCreateInfo.instance = instance;
-+allocatorCreateInfo.pVulkanFunctions = &vulkanFunctions;
-+
-+VmaAllocator allocator;
-+vmaCreateAllocator(&allocatorCreateInfo, &allocator);
-+\endcode
-+
-+
-+\section quick_start_resource_allocation Resource allocation
-+
-+When you want to create a buffer or image:
-+
-+-# Fill `VkBufferCreateInfo` / `VkImageCreateInfo` structure.
-+-# Fill VmaAllocationCreateInfo structure.
-+-# Call vmaCreateBuffer() / vmaCreateImage() to get `VkBuffer`/`VkImage` with memory
-+ already allocated and bound to it, plus #VmaAllocation objects that represents its underlying memory.
-+
-+\code
-+VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
-+bufferInfo.size = 65536;
-+bufferInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
-+
-+VmaAllocationCreateInfo allocInfo = {};
-+allocInfo.usage = VMA_MEMORY_USAGE_AUTO;
-+
-+VkBuffer buffer;
-+VmaAllocation allocation;
-+vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
-+\endcode
-+
-+Don't forget to destroy your objects when no longer needed:
-+
-+\code
-+vmaDestroyBuffer(allocator, buffer, allocation);
-+vmaDestroyAllocator(allocator);
-+\endcode
-+
-+
-+\page choosing_memory_type Choosing memory type
-+
-+Physical devices in Vulkan support various combinations of memory heaps and
-+types. Help with choosing correct and optimal memory type for your specific
-+resource is one of the key features of this library. You can use it by filling
-+appropriate members of VmaAllocationCreateInfo structure, as described below.
-+You can also combine multiple methods.
-+
-+-# If you just want to find memory type index that meets your requirements, you
-+ can use function: vmaFindMemoryTypeIndexForBufferInfo(),
-+ vmaFindMemoryTypeIndexForImageInfo(), vmaFindMemoryTypeIndex().
-+-# If you want to allocate a region of device memory without association with any
-+ specific image or buffer, you can use function vmaAllocateMemory(). Usage of
-+ this function is not recommended and usually not needed.
-+ vmaAllocateMemoryPages() function is also provided for creating multiple allocations at once,
-+ which may be useful for sparse binding.
-+-# If you already have a buffer or an image created, you want to allocate memory
-+ for it and then you will bind it yourself, you can use function
-+ vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage().
-+ For binding you should use functions: vmaBindBufferMemory(), vmaBindImageMemory()
-+ or their extended versions: vmaBindBufferMemory2(), vmaBindImageMemory2().
-+-# **This is the easiest and recommended way to use this library:**
-+ If you want to create a buffer or an image, allocate memory for it and bind
-+ them together, all in one call, you can use function vmaCreateBuffer(),
-+ vmaCreateImage().
-+
-+When using 3. or 4., the library internally queries Vulkan for memory types
-+supported for that buffer or image (function `vkGetBufferMemoryRequirements()`)
-+and uses only one of these types.
-+
-+If no memory type can be found that meets all the requirements, these functions
-+return `VK_ERROR_FEATURE_NOT_PRESENT`.
-+
-+You can leave VmaAllocationCreateInfo structure completely filled with zeros.
-+It means no requirements are specified for memory type.
-+It is valid, although not very useful.
-+
-+\section choosing_memory_type_usage Usage
-+
-+The easiest way to specify memory requirements is to fill member
-+VmaAllocationCreateInfo::usage using one of the values of enum #VmaMemoryUsage.
-+It defines high level, common usage types.
-+Since version 3 of the library, it is recommended to use #VMA_MEMORY_USAGE_AUTO to let it select best memory type for your resource automatically.
-+
-+For example, if you want to create a uniform buffer that will be filled using
-+transfer only once or infrequently and then used for rendering every frame as a uniform buffer, you can
-+do it using following code. The buffer will most likely end up in a memory type with
-+`VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT` to be fast to access by the GPU device.
-+
-+\code
-+VkBufferCreateInfo bufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
-+bufferInfo.size = 65536;
-+bufferInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
-+
-+VmaAllocationCreateInfo allocInfo = {};
-+allocInfo.usage = VMA_MEMORY_USAGE_AUTO;
-+
-+VkBuffer buffer;
-+VmaAllocation allocation;
-+vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
-+\endcode
-+
-+If you have a preference for putting the resource in GPU (device) memory or CPU (host) memory
-+on systems with discrete graphics card that have the memories separate, you can use
-+#VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST.
-+
-+When using `VMA_MEMORY_USAGE_AUTO*` while you want to map the allocated memory,
-+you also need to specify one of the host access flags:
-+#VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.
-+This will help the library decide about preferred memory type to ensure it has `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`
-+so you can map it.
-+
-+For example, a staging buffer that will be filled via mapped pointer and then
-+used as a source of transfer to the buffer described previously can be created like this.
-+It will likely end up in a memory type that is `HOST_VISIBLE` and `HOST_COHERENT`
-+but not `HOST_CACHED` (meaning uncached, write-combined) and not `DEVICE_LOCAL` (meaning system RAM).
-+
-+\code
-+VkBufferCreateInfo stagingBufferInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
-+stagingBufferInfo.size = 65536;
-+stagingBufferInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
-+
-+VmaAllocationCreateInfo stagingAllocInfo = {};
-+stagingAllocInfo.usage = VMA_MEMORY_USAGE_AUTO;
-+stagingAllocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT;
-+
-+VkBuffer stagingBuffer;
-+VmaAllocation stagingAllocation;
-+vmaCreateBuffer(allocator, &stagingBufferInfo, &stagingAllocInfo, &stagingBuffer, &stagingAllocation, nullptr);
-+\endcode
-+
-+For more examples of creating different kinds of resources, see chapter \ref usage_patterns.
-+
-+Usage values `VMA_MEMORY_USAGE_AUTO*` are legal to use only when the library knows
-+about the resource being created by having `VkBufferCreateInfo` / `VkImageCreateInfo` passed,
-+so they work with functions like: vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo() etc.
-+If you allocate raw memory using function vmaAllocateMemory(), you have to use other means of selecting
-+memory type, as described below.
-+
-+\note
-+Old usage values (`VMA_MEMORY_USAGE_GPU_ONLY`, `VMA_MEMORY_USAGE_CPU_ONLY`,
-+`VMA_MEMORY_USAGE_CPU_TO_GPU`, `VMA_MEMORY_USAGE_GPU_TO_CPU`, `VMA_MEMORY_USAGE_CPU_COPY`)
-+are still available and work same way as in previous versions of the library
-+for backward compatibility, but they are not recommended.
-+
-+\section choosing_memory_type_required_preferred_flags Required and preferred flags
-+
-+You can specify more detailed requirements by filling members
-+VmaAllocationCreateInfo::requiredFlags and VmaAllocationCreateInfo::preferredFlags
-+with a combination of bits from enum `VkMemoryPropertyFlags`. For example,
-+if you want to create a buffer that will be persistently mapped on host (so it
-+must be `HOST_VISIBLE`) and preferably will also be `HOST_COHERENT` and `HOST_CACHED`,
-+use following code:
-+
-+\code
-+VmaAllocationCreateInfo allocInfo = {};
-+allocInfo.requiredFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
-+allocInfo.preferredFlags = VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
-+allocInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT | VMA_ALLOCATION_CREATE_MAPPED_BIT;
-+
-+VkBuffer buffer;
-+VmaAllocation allocation;
-+vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
-+\endcode
-+
-+A memory type is chosen that has all the required flags and as many preferred
-+flags set as possible.
-+
-+Value passed in VmaAllocationCreateInfo::usage is internally converted to a set of required and preferred flags,
-+plus some extra "magic" (heuristics).
-+
-+\section choosing_memory_type_explicit_memory_types Explicit memory types
-+
-+If you inspected memory types available on the physical device and you have
-+a preference for memory types that you want to use, you can fill member
-+VmaAllocationCreateInfo::memoryTypeBits. It is a bit mask, where each bit set
-+means that a memory type with that index is allowed to be used for the
-+allocation. Special value 0, just like `UINT32_MAX`, means there are no
-+restrictions to memory type index.
-+
-+Please note that this member is NOT just a memory type index.
-+Still you can use it to choose just one, specific memory type.
-+For example, if you already determined that your buffer should be created in
-+memory type 2, use following code:
-+
-+\code
-+uint32_t memoryTypeIndex = 2;
-+
-+VmaAllocationCreateInfo allocInfo = {};
-+allocInfo.memoryTypeBits = 1u << memoryTypeIndex;
-+
-+VkBuffer buffer;
-+VmaAllocation allocation;
-+vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &buffer, &allocation, nullptr);
-+\endcode
-+
-+
-+\section choosing_memory_type_custom_memory_pools Custom memory pools
-+
-+If you allocate from custom memory pool, all the ways of specifying memory
-+requirements described above are not applicable and the aforementioned members
-+of VmaAllocationCreateInfo structure are ignored. Memory type is selected
-+explicitly when creating the pool and then used to make all the allocations from
-+that pool. For further details, see \ref custom_memory_pools.
-+
-+\section choosing_memory_type_dedicated_allocations Dedicated allocations
-+
-+Memory for allocations is reserved out of larger block of `VkDeviceMemory`
-+allocated from Vulkan internally. That is the main feature of this whole library.
-+You can still request a separate memory block to be created for an allocation,
-+just like you would do in a trivial solution without using any allocator.
-+In that case, a buffer or image is always bound to that memory at offset 0.
-+This is called a "dedicated allocation".
-+You can explicitly request it by using flag #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
-+The library can also internally decide to use dedicated allocation in some cases, e.g.:
-+
-+- When the size of the allocation is large.
-+- When [VK_KHR_dedicated_allocation](@ref vk_khr_dedicated_allocation) extension is enabled
-+ and it reports that dedicated allocation is required or recommended for the resource.
-+- When allocation of next big memory block fails due to not enough device memory,
-+ but allocation with the exact requested size succeeds.
-+
-+
-+\page memory_mapping Memory mapping
-+
-+To "map memory" in Vulkan means to obtain a CPU pointer to `VkDeviceMemory`,
-+to be able to read from it or write to it in CPU code.
-+Mapping is possible only of memory allocated from a memory type that has
-+`VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` flag.
-+Functions `vkMapMemory()`, `vkUnmapMemory()` are designed for this purpose.
-+You can use them directly with memory allocated by this library,
-+but it is not recommended because of following issue:
-+Mapping the same `VkDeviceMemory` block multiple times is illegal - only one mapping at a time is allowed.
-+This includes mapping disjoint regions. Mapping is not reference-counted internally by Vulkan.
-+Because of this, Vulkan Memory Allocator provides following facilities:
-+
-+\note If you want to be able to map an allocation, you need to specify one of the flags
-+#VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT
-+in VmaAllocationCreateInfo::flags. These flags are required for an allocation to be mappable
-+when using #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` enum values.
-+For other usage values they are ignored and every such allocation made in `HOST_VISIBLE` memory type is mappable,
-+but they can still be used for consistency.
-+
-+\section memory_mapping_mapping_functions Mapping functions
-+
-+The library provides following functions for mapping of a specific #VmaAllocation: vmaMapMemory(), vmaUnmapMemory().
-+They are safer and more convenient to use than standard Vulkan functions.
-+You can map an allocation multiple times simultaneously - mapping is reference-counted internally.
-+You can also map different allocations simultaneously regardless of whether they use the same `VkDeviceMemory` block.
-+The way it is implemented is that the library always maps entire memory block, not just region of the allocation.
-+For further details, see description of vmaMapMemory() function.
-+Example:
-+
-+\code
-+// Having these objects initialized:
-+struct ConstantBuffer
-+{
-+ ...
-+};
-+ConstantBuffer constantBufferData = ...
-+
-+VmaAllocator allocator = ...
-+VkBuffer constantBuffer = ...
-+VmaAllocation constantBufferAllocation = ...
-+
-+// You can map and fill your buffer using following code:
-+
-+void* mappedData;
-+vmaMapMemory(allocator, constantBufferAllocation, &mappedData);
-+memcpy(mappedData, &constantBufferData, sizeof(constantBufferData));
-+vmaUnmapMemory(allocator, constantBufferAllocation);
-+\endcode
-+
-+When mapping, you may see a warning from Vulkan validation layer similar to this one:
-+
-+<i>Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.</i>
-+
-+It happens because the library maps entire `VkDeviceMemory` block, where different
-+types of images and buffers may end up together, especially on GPUs with unified memory like Intel.
-+You can safely ignore it if you are sure you access only memory of the intended
-+object that you wanted to map.
-+
-+
-+\section memory_mapping_persistently_mapped_memory Persistently mapped memory
-+
-+Keeping your memory persistently mapped is generally OK in Vulkan.
-+You don't need to unmap it before using its data on the GPU.
-+The library provides a special feature designed for that:
-+Allocations made with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag set in
-+VmaAllocationCreateInfo::flags stay mapped all the time,
-+so you can just access CPU pointer to it any time
-+without a need to call any "map" or "unmap" function.
-+Example:
-+
-+\code
-+VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
-+bufCreateInfo.size = sizeof(ConstantBuffer);
-+bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
-+
-+VmaAllocationCreateInfo allocCreateInfo = {};
-+allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
-+allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
-+ VMA_ALLOCATION_CREATE_MAPPED_BIT;
-+
-+VkBuffer buf;
-+VmaAllocation alloc;
-+VmaAllocationInfo allocInfo;
-+vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
-+
-+// Buffer is already mapped. You can access its memory.
-+memcpy(allocInfo.pMappedData, &constantBufferData, sizeof(constantBufferData));
-+\endcode
-+
-+\note #VMA_ALLOCATION_CREATE_MAPPED_BIT by itself doesn't guarantee that the allocation will end up
-+in a mappable memory type.
-+For this, you need to also specify #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or
-+#VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.
-+#VMA_ALLOCATION_CREATE_MAPPED_BIT only guarantees that if the memory is `HOST_VISIBLE`, the allocation will be mapped on creation.
-+For an example of how to make use of this fact, see section \ref usage_patterns_advanced_data_uploading.
-+
-+\section memory_mapping_cache_control Cache flush and invalidate
-+
-+Memory in Vulkan doesn't need to be unmapped before using it on GPU,
-+but unless a memory types has `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT` flag set,
-+you need to manually **invalidate** cache before reading of mapped pointer
-+and **flush** cache after writing to mapped pointer.
-+Map/unmap operations don't do that automatically.
-+Vulkan provides following functions for this purpose `vkFlushMappedMemoryRanges()`,
-+`vkInvalidateMappedMemoryRanges()`, but this library provides more convenient
-+functions that refer to given allocation object: vmaFlushAllocation(),
-+vmaInvalidateAllocation(),
-+or multiple objects at once: vmaFlushAllocations(), vmaInvalidateAllocations().
-+
-+Regions of memory specified for flush/invalidate must be aligned to
-+`VkPhysicalDeviceLimits::nonCoherentAtomSize`. This is automatically ensured by the library.
-+In any memory type that is `HOST_VISIBLE` but not `HOST_COHERENT`, all allocations
-+within blocks are aligned to this value, so their offsets are always multiply of
-+`nonCoherentAtomSize` and two different allocations never share same "line" of this size.
-+
-+Also, Windows drivers from all 3 PC GPU vendors (AMD, Intel, NVIDIA)
-+currently provide `HOST_COHERENT` flag on all memory types that are
-+`HOST_VISIBLE`, so on PC you may not need to bother.
-+
-+
-+\page staying_within_budget Staying within budget
-+
-+When developing a graphics-intensive game or program, it is important to avoid allocating
-+more GPU memory than it is physically available. When the memory is over-committed,
-+various bad things can happen, depending on the specific GPU, graphics driver, and
-+operating system:
-+
-+- It may just work without any problems.
-+- The application may slow down because some memory blocks are moved to system RAM
-+ and the GPU has to access them through PCI Express bus.
-+- A new allocation may take very long time to complete, even few seconds, and possibly
-+ freeze entire system.
-+- The new allocation may fail with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
-+- It may even result in GPU crash (TDR), observed as `VK_ERROR_DEVICE_LOST`
-+ returned somewhere later.
-+
-+\section staying_within_budget_querying_for_budget Querying for budget
-+
-+To query for current memory usage and available budget, use function vmaGetHeapBudgets().
-+Returned structure #VmaBudget contains quantities expressed in bytes, per Vulkan memory heap.
-+
-+Please note that this function returns different information and works faster than
-+vmaCalculateStatistics(). vmaGetHeapBudgets() can be called every frame or even before every
-+allocation, while vmaCalculateStatistics() is intended to be used rarely,
-+only to obtain statistical information, e.g. for debugging purposes.
-+
-+It is recommended to use <b>VK_EXT_memory_budget</b> device extension to obtain information
-+about the budget from Vulkan device. VMA is able to use this extension automatically.
-+When not enabled, the allocator behaves same way, but then it estimates current usage
-+and available budget based on its internal information and Vulkan memory heap sizes,
-+which may be less precise. In order to use this extension:
-+
-+1. Make sure extensions VK_EXT_memory_budget and VK_KHR_get_physical_device_properties2
-+ required by it are available and enable them. Please note that the first is a device
-+ extension and the second is instance extension!
-+2. Use flag #VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT when creating #VmaAllocator object.
-+3. Make sure to call vmaSetCurrentFrameIndex() every frame. Budget is queried from
-+ Vulkan inside of it to avoid overhead of querying it with every allocation.
-+
-+\section staying_within_budget_controlling_memory_usage Controlling memory usage
-+
-+There are many ways in which you can try to stay within the budget.
-+
-+First, when making new allocation requires allocating a new memory block, the library
-+tries not to exceed the budget automatically. If a block with default recommended size
-+(e.g. 256 MB) would go over budget, a smaller block is allocated, possibly even
-+dedicated memory for just this resource.
-+
-+If the size of the requested resource plus current memory usage is more than the
-+budget, by default the library still tries to create it, leaving it to the Vulkan
-+implementation whether the allocation succeeds or fails. You can change this behavior
-+by using #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag. With it, the allocation is
-+not made if it would exceed the budget or if the budget is already exceeded.
-+VMA then tries to make the allocation from the next eligible Vulkan memory type.
-+The all of them fail, the call then fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
-+Example usage pattern may be to pass the #VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT flag
-+when creating resources that are not essential for the application (e.g. the texture
-+of a specific object) and not to pass it when creating critically important resources
-+(e.g. render targets).
-+
-+On AMD graphics cards there is a custom vendor extension available: <b>VK_AMD_memory_overallocation_behavior</b>
-+that allows to control the behavior of the Vulkan implementation in out-of-memory cases -
-+whether it should fail with an error code or still allow the allocation.
-+Usage of this extension involves only passing extra structure on Vulkan device creation,
-+so it is out of scope of this library.
-+
-+Finally, you can also use #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT flag to make sure
-+a new allocation is created only when it fits inside one of the existing memory blocks.
-+If it would require to allocate a new block, if fails instead with `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
-+This also ensures that the function call is very fast because it never goes to Vulkan
-+to obtain a new block.
-+
-+\note Creating \ref custom_memory_pools with VmaPoolCreateInfo::minBlockCount
-+set to more than 0 will currently try to allocate memory blocks without checking whether they
-+fit within budget.
-+
-+
-+\page resource_aliasing Resource aliasing (overlap)
-+
-+New explicit graphics APIs (Vulkan and Direct3D 12), thanks to manual memory
-+management, give an opportunity to alias (overlap) multiple resources in the
-+same region of memory - a feature not available in the old APIs (Direct3D 11, OpenGL).
-+It can be useful to save video memory, but it must be used with caution.
-+
-+For example, if you know the flow of your whole render frame in advance, you
-+are going to use some intermediate textures or buffers only during a small range of render passes,
-+and you know these ranges don't overlap in time, you can bind these resources to
-+the same place in memory, even if they have completely different parameters (width, height, format etc.).
-+
-+![Resource aliasing (overlap)](../gfx/Aliasing.png)
-+
-+Such scenario is possible using VMA, but you need to create your images manually.
-+Then you need to calculate parameters of an allocation to be made using formula:
-+
-+- allocation size = max(size of each image)
-+- allocation alignment = max(alignment of each image)
-+- allocation memoryTypeBits = bitwise AND(memoryTypeBits of each image)
-+
-+Following example shows two different images bound to the same place in memory,
-+allocated to fit largest of them.
-+
-+\code
-+// A 512x512 texture to be sampled.
-+VkImageCreateInfo img1CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
-+img1CreateInfo.imageType = VK_IMAGE_TYPE_2D;
-+img1CreateInfo.extent.width = 512;
-+img1CreateInfo.extent.height = 512;
-+img1CreateInfo.extent.depth = 1;
-+img1CreateInfo.mipLevels = 10;
-+img1CreateInfo.arrayLayers = 1;
-+img1CreateInfo.format = VK_FORMAT_R8G8B8A8_SRGB;
-+img1CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
-+img1CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
-+img1CreateInfo.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
-+img1CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
-+
-+// A full screen texture to be used as color attachment.
-+VkImageCreateInfo img2CreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
-+img2CreateInfo.imageType = VK_IMAGE_TYPE_2D;
-+img2CreateInfo.extent.width = 1920;
-+img2CreateInfo.extent.height = 1080;
-+img2CreateInfo.extent.depth = 1;
-+img2CreateInfo.mipLevels = 1;
-+img2CreateInfo.arrayLayers = 1;
-+img2CreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
-+img2CreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
-+img2CreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
-+img2CreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
-+img2CreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
-+
-+VkImage img1;
-+res = vkCreateImage(device, &img1CreateInfo, nullptr, &img1);
-+VkImage img2;
-+res = vkCreateImage(device, &img2CreateInfo, nullptr, &img2);
-+
-+VkMemoryRequirements img1MemReq;
-+vkGetImageMemoryRequirements(device, img1, &img1MemReq);
-+VkMemoryRequirements img2MemReq;
-+vkGetImageMemoryRequirements(device, img2, &img2MemReq);
-+
-+VkMemoryRequirements finalMemReq = {};
-+finalMemReq.size = std::max(img1MemReq.size, img2MemReq.size);
-+finalMemReq.alignment = std::max(img1MemReq.alignment, img2MemReq.alignment);
-+finalMemReq.memoryTypeBits = img1MemReq.memoryTypeBits & img2MemReq.memoryTypeBits;
-+// Validate if(finalMemReq.memoryTypeBits != 0)
-+
-+VmaAllocationCreateInfo allocCreateInfo = {};
-+allocCreateInfo.preferredFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
-+
-+VmaAllocation alloc;
-+res = vmaAllocateMemory(allocator, &finalMemReq, &allocCreateInfo, &alloc, nullptr);
-+
-+res = vmaBindImageMemory(allocator, alloc, img1);
-+res = vmaBindImageMemory(allocator, alloc, img2);
-+
-+// You can use img1, img2 here, but not at the same time!
-+
-+vmaFreeMemory(allocator, alloc);
-+vkDestroyImage(allocator, img2, nullptr);
-+vkDestroyImage(allocator, img1, nullptr);
-+\endcode
-+
-+VMA also provides convenience functions that create a buffer or image and bind it to memory
-+represented by an existing #VmaAllocation:
-+vmaCreateAliasingBuffer(), vmaCreateAliasingBuffer2(),
-+vmaCreateAliasingImage(), vmaCreateAliasingImage2().
-+Versions with "2" offer additional parameter `allocationLocalOffset`.
-+
-+Remember that using resources that alias in memory requires proper synchronization.
-+You need to issue a memory barrier to make sure commands that use `img1` and `img2`
-+don't overlap on GPU timeline.
-+You also need to treat a resource after aliasing as uninitialized - containing garbage data.
-+For example, if you use `img1` and then want to use `img2`, you need to issue
-+an image memory barrier for `img2` with `oldLayout` = `VK_IMAGE_LAYOUT_UNDEFINED`.
-+
-+Additional considerations:
-+
-+- Vulkan also allows to interpret contents of memory between aliasing resources consistently in some cases.
-+See chapter 11.8. "Memory Aliasing" of Vulkan specification or `VK_IMAGE_CREATE_ALIAS_BIT` flag.
-+- You can create more complex layout where different images and buffers are bound
-+at different offsets inside one large allocation. For example, one can imagine
-+a big texture used in some render passes, aliasing with a set of many small buffers
-+used between in some further passes. To bind a resource at non-zero offset in an allocation,
-+use vmaBindBufferMemory2() / vmaBindImageMemory2().
-+- Before allocating memory for the resources you want to alias, check `memoryTypeBits`
-+returned in memory requirements of each resource to make sure the bits overlap.
-+Some GPUs may expose multiple memory types suitable e.g. only for buffers or
-+images with `COLOR_ATTACHMENT` usage, so the sets of memory types supported by your
-+resources may be disjoint. Aliasing them is not possible in that case.
-+
-+
-+\page custom_memory_pools Custom memory pools
-+
-+A memory pool contains a number of `VkDeviceMemory` blocks.
-+The library automatically creates and manages default pool for each memory type available on the device.
-+Default memory pool automatically grows in size.
-+Size of allocated blocks is also variable and managed automatically.
-+
-+You can create custom pool and allocate memory out of it.
-+It can be useful if you want to:
-+
-+- Keep certain kind of allocations separate from others.
-+- Enforce particular, fixed size of Vulkan memory blocks.
-+- Limit maximum amount of Vulkan memory allocated for that pool.
-+- Reserve minimum or fixed amount of Vulkan memory always preallocated for that pool.
-+- Use extra parameters for a set of your allocations that are available in #VmaPoolCreateInfo but not in
-+ #VmaAllocationCreateInfo - e.g., custom minimum alignment, custom `pNext` chain.
-+- Perform defragmentation on a specific subset of your allocations.
-+
-+To use custom memory pools:
-+
-+-# Fill VmaPoolCreateInfo structure.
-+-# Call vmaCreatePool() to obtain #VmaPool handle.
-+-# When making an allocation, set VmaAllocationCreateInfo::pool to this handle.
-+ You don't need to specify any other parameters of this structure, like `usage`.
-+
-+Example:
-+
-+\code
-+// Find memoryTypeIndex for the pool.
-+VkBufferCreateInfo sampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
-+sampleBufCreateInfo.size = 0x10000; // Doesn't matter.
-+sampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
-+
-+VmaAllocationCreateInfo sampleAllocCreateInfo = {};
-+sampleAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
-+
-+uint32_t memTypeIndex;
-+VkResult res = vmaFindMemoryTypeIndexForBufferInfo(allocator,
-+ &sampleBufCreateInfo, &sampleAllocCreateInfo, &memTypeIndex);
-+// Check res...
-+
-+// Create a pool that can have at most 2 blocks, 128 MiB each.
-+VmaPoolCreateInfo poolCreateInfo = {};
-+poolCreateInfo.memoryTypeIndex = memTypeIndex;
-+poolCreateInfo.blockSize = 128ull * 1024 * 1024;
-+poolCreateInfo.maxBlockCount = 2;
-+
-+VmaPool pool;
-+res = vmaCreatePool(allocator, &poolCreateInfo, &pool);
-+// Check res...
-+
-+// Allocate a buffer out of it.
-+VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
-+bufCreateInfo.size = 1024;
-+bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
-+
-+VmaAllocationCreateInfo allocCreateInfo = {};
-+allocCreateInfo.pool = pool;
-+
-+VkBuffer buf;
-+VmaAllocation alloc;
-+res = vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, nullptr);
-+// Check res...
-+\endcode
-+
-+You have to free all allocations made from this pool before destroying it.
-+
-+\code
-+vmaDestroyBuffer(allocator, buf, alloc);
-+vmaDestroyPool(allocator, pool);
-+\endcode
-+
-+New versions of this library support creating dedicated allocations in custom pools.
-+It is supported only when VmaPoolCreateInfo::blockSize = 0.
-+To use this feature, set VmaAllocationCreateInfo::pool to the pointer to your custom pool and
-+VmaAllocationCreateInfo::flags to #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
-+
-+\note Excessive use of custom pools is a common mistake when using this library.
-+Custom pools may be useful for special purposes - when you want to
-+keep certain type of resources separate e.g. to reserve minimum amount of memory
-+for them or limit maximum amount of memory they can occupy. For most
-+resources this is not needed and so it is not recommended to create #VmaPool
-+objects and allocations out of them. Allocating from the default pool is sufficient.
-+
-+
-+\section custom_memory_pools_MemTypeIndex Choosing memory type index
-+
-+When creating a pool, you must explicitly specify memory type index.
-+To find the one suitable for your buffers or images, you can use helper functions
-+vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo().
-+You need to provide structures with example parameters of buffers or images
-+that you are going to create in that pool.
-+
-+\code
-+VkBufferCreateInfo exampleBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
-+exampleBufCreateInfo.size = 1024; // Doesn't matter
-+exampleBufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
-+
-+VmaAllocationCreateInfo allocCreateInfo = {};
-+allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
-+
-+uint32_t memTypeIndex;
-+vmaFindMemoryTypeIndexForBufferInfo(allocator, &exampleBufCreateInfo, &allocCreateInfo, &memTypeIndex);
-+
-+VmaPoolCreateInfo poolCreateInfo = {};
-+poolCreateInfo.memoryTypeIndex = memTypeIndex;
-+// ...
-+\endcode
-+
-+When creating buffers/images allocated in that pool, provide following parameters:
-+
-+- `VkBufferCreateInfo`: Prefer to pass same parameters as above.
-+ Otherwise you risk creating resources in a memory type that is not suitable for them, which may result in undefined behavior.
-+ Using different `VK_BUFFER_USAGE_` flags may work, but you shouldn't create images in a pool intended for buffers
-+ or the other way around.
-+- VmaAllocationCreateInfo: You don't need to pass same parameters. Fill only `pool` member.
-+ Other members are ignored anyway.
-+
-+\section linear_algorithm Linear allocation algorithm
-+
-+Each Vulkan memory block managed by this library has accompanying metadata that
-+keeps track of used and unused regions. By default, the metadata structure and
-+algorithm tries to find best place for new allocations among free regions to
-+optimize memory usage. This way you can allocate and free objects in any order.
-+
-+![Default allocation algorithm](../gfx/Linear_allocator_1_algo_default.png)
-+
-+Sometimes there is a need to use simpler, linear allocation algorithm. You can
-+create custom pool that uses such algorithm by adding flag
-+#VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT to VmaPoolCreateInfo::flags while creating
-+#VmaPool object. Then an alternative metadata management is used. It always
-+creates new allocations after last one and doesn't reuse free regions after
-+allocations freed in the middle. It results in better allocation performance and
-+less memory consumed by metadata.
-+
-+![Linear allocation algorithm](../gfx/Linear_allocator_2_algo_linear.png)
-+
-+With this one flag, you can create a custom pool that can be used in many ways:
-+free-at-once, stack, double stack, and ring buffer. See below for details.
-+You don't need to specify explicitly which of these options you are going to use - it is detected automatically.
-+
-+\subsection linear_algorithm_free_at_once Free-at-once
-+
-+In a pool that uses linear algorithm, you still need to free all the allocations
-+individually, e.g. by using vmaFreeMemory() or vmaDestroyBuffer(). You can free
-+them in any order. New allocations are always made after last one - free space
-+in the middle is not reused. However, when you release all the allocation and
-+the pool becomes empty, allocation starts from the beginning again. This way you
-+can use linear algorithm to speed up creation of allocations that you are going
-+to release all at once.
-+
-+![Free-at-once](../gfx/Linear_allocator_3_free_at_once.png)
-+
-+This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount
-+value that allows multiple memory blocks.
-+
-+\subsection linear_algorithm_stack Stack
-+
-+When you free an allocation that was created last, its space can be reused.
-+Thanks to this, if you always release allocations in the order opposite to their
-+creation (LIFO - Last In First Out), you can achieve behavior of a stack.
-+
-+![Stack](../gfx/Linear_allocator_4_stack.png)
-+
-+This mode is also available for pools created with VmaPoolCreateInfo::maxBlockCount
-+value that allows multiple memory blocks.
-+
-+\subsection linear_algorithm_double_stack Double stack
-+
-+The space reserved by a custom pool with linear algorithm may be used by two
-+stacks:
-+
-+- First, default one, growing up from offset 0.
-+- Second, "upper" one, growing down from the end towards lower offsets.
-+
-+To make allocation from the upper stack, add flag #VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT
-+to VmaAllocationCreateInfo::flags.
-+
-+![Double stack](../gfx/Linear_allocator_7_double_stack.png)
-+
-+Double stack is available only in pools with one memory block -
-+VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined.
-+
-+When the two stacks' ends meet so there is not enough space between them for a
-+new allocation, such allocation fails with usual
-+`VK_ERROR_OUT_OF_DEVICE_MEMORY` error.
-+
-+\subsection linear_algorithm_ring_buffer Ring buffer
-+
-+When you free some allocations from the beginning and there is not enough free space
-+for a new one at the end of a pool, allocator's "cursor" wraps around to the
-+beginning and starts allocation there. Thanks to this, if you always release
-+allocations in the same order as you created them (FIFO - First In First Out),
-+you can achieve behavior of a ring buffer / queue.
-+
-+![Ring buffer](../gfx/Linear_allocator_5_ring_buffer.png)
-+
-+Ring buffer is available only in pools with one memory block -
-+VmaPoolCreateInfo::maxBlockCount must be 1. Otherwise behavior is undefined.
-+
-+\note \ref defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT.
-+
-+
-+\page defragmentation Defragmentation
-+
-+Interleaved allocations and deallocations of many objects of varying size can
-+cause fragmentation over time, which can lead to a situation where the library is unable
-+to find a continuous range of free memory for a new allocation despite there is
-+enough free space, just scattered across many small free ranges between existing
-+allocations.
-+
-+To mitigate this problem, you can use defragmentation feature.
-+It doesn't happen automatically though and needs your cooperation,
-+because VMA is a low level library that only allocates memory.
-+It cannot recreate buffers and images in a new place as it doesn't remember the contents of `VkBufferCreateInfo` / `VkImageCreateInfo` structures.
-+It cannot copy their contents as it doesn't record any commands to a command buffer.
-+
-+Example:
-+
-+\code
-+VmaDefragmentationInfo defragInfo = {};
-+defragInfo.pool = myPool;
-+defragInfo.flags = VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT;
-+
-+VmaDefragmentationContext defragCtx;
-+VkResult res = vmaBeginDefragmentation(allocator, &defragInfo, &defragCtx);
-+// Check res...
-+
-+for(;;)
-+{
-+ VmaDefragmentationPassMoveInfo pass;
-+ res = vmaBeginDefragmentationPass(allocator, defragCtx, &pass);
-+ if(res == VK_SUCCESS)
-+ break;
-+ else if(res != VK_INCOMPLETE)
-+ // Handle error...
-+
-+ for(uint32_t i = 0; i < pass.moveCount; ++i)
-+ {
-+ // Inspect pass.pMoves[i].srcAllocation, identify what buffer/image it represents.
-+ VmaAllocationInfo allocInfo;
-+ vmaGetAllocationInfo(allocator, pass.pMoves[i].srcAllocation, &allocInfo);
-+ MyEngineResourceData* resData = (MyEngineResourceData*)allocInfo.pUserData;
-+
-+ // Recreate and bind this buffer/image at: pass.pMoves[i].dstMemory, pass.pMoves[i].dstOffset.
-+ VkImageCreateInfo imgCreateInfo = ...
-+ VkImage newImg;
-+ res = vkCreateImage(device, &imgCreateInfo, nullptr, &newImg);
-+ // Check res...
-+ res = vmaBindImageMemory(allocator, pass.pMoves[i].dstTmpAllocation, newImg);
-+ // Check res...
-+
-+ // Issue a vkCmdCopyBuffer/vkCmdCopyImage to copy its content to the new place.
-+ vkCmdCopyImage(cmdBuf, resData->img, ..., newImg, ...);
-+ }
-+
-+ // Make sure the copy commands finished executing.
-+ vkWaitForFences(...);
-+
-+ // Destroy old buffers/images bound with pass.pMoves[i].srcAllocation.
-+ for(uint32_t i = 0; i < pass.moveCount; ++i)
-+ {
-+ // ...
-+ vkDestroyImage(device, resData->img, nullptr);
-+ }
-+
-+ // Update appropriate descriptors to point to the new places...
-+
-+ res = vmaEndDefragmentationPass(allocator, defragCtx, &pass);
-+ if(res == VK_SUCCESS)
-+ break;
-+ else if(res != VK_INCOMPLETE)
-+ // Handle error...
-+}
-+
-+vmaEndDefragmentation(allocator, defragCtx, nullptr);
-+\endcode
-+
-+Although functions like vmaCreateBuffer(), vmaCreateImage(), vmaDestroyBuffer(), vmaDestroyImage()
-+create/destroy an allocation and a buffer/image at once, these are just a shortcut for
-+creating the resource, allocating memory, and binding them together.
-+Defragmentation works on memory allocations only. You must handle the rest manually.
-+Defragmentation is an iterative process that should repreat "passes" as long as related functions
-+return `VK_INCOMPLETE` not `VK_SUCCESS`.
-+In each pass:
-+
-+1. vmaBeginDefragmentationPass() function call:
-+ - Calculates and returns the list of allocations to be moved in this pass.
-+ Note this can be a time-consuming process.
-+ - Reserves destination memory for them by creating temporary destination allocations
-+ that you can query for their `VkDeviceMemory` + offset using vmaGetAllocationInfo().
-+2. Inside the pass, **you should**:
-+ - Inspect the returned list of allocations to be moved.
-+ - Create new buffers/images and bind them at the returned destination temporary allocations.
-+ - Copy data from source to destination resources if necessary.
-+ - Destroy the source buffers/images, but NOT their allocations.
-+3. vmaEndDefragmentationPass() function call:
-+ - Frees the source memory reserved for the allocations that are moved.
-+ - Modifies source #VmaAllocation objects that are moved to point to the destination reserved memory.
-+ - Frees `VkDeviceMemory` blocks that became empty.
-+
-+Unlike in previous iterations of the defragmentation API, there is no list of "movable" allocations passed as a parameter.
-+Defragmentation algorithm tries to move all suitable allocations.
-+You can, however, refuse to move some of them inside a defragmentation pass, by setting
-+`pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE.
-+This is not recommended and may result in suboptimal packing of the allocations after defragmentation.
-+If you cannot ensure any allocation can be moved, it is better to keep movable allocations separate in a custom pool.
-+
-+Inside a pass, for each allocation that should be moved:
-+
-+- You should copy its data from the source to the destination place by calling e.g. `vkCmdCopyBuffer()`, `vkCmdCopyImage()`.
-+ - You need to make sure these commands finished executing before destroying the source buffers/images and before calling vmaEndDefragmentationPass().
-+- If a resource doesn't contain any meaningful data, e.g. it is a transient color attachment image to be cleared,
-+ filled, and used temporarily in each rendering frame, you can just recreate this image
-+ without copying its data.
-+- If the resource is in `HOST_VISIBLE` and `HOST_CACHED` memory, you can copy its data on the CPU
-+ using `memcpy()`.
-+- If you cannot move the allocation, you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE.
-+ This will cancel the move.
-+ - vmaEndDefragmentationPass() will then free the destination memory
-+ not the source memory of the allocation, leaving it unchanged.
-+- If you decide the allocation is unimportant and can be destroyed instead of moved (e.g. it wasn't used for long time),
-+ you can set `pass.pMoves[i].operation` to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY.
-+ - vmaEndDefragmentationPass() will then free both source and destination memory, and will destroy the source #VmaAllocation object.
-+
-+You can defragment a specific custom pool by setting VmaDefragmentationInfo::pool
-+(like in the example above) or all the default pools by setting this member to null.
-+
-+Defragmentation is always performed in each pool separately.
-+Allocations are never moved between different Vulkan memory types.
-+The size of the destination memory reserved for a moved allocation is the same as the original one.
-+Alignment of an allocation as it was determined using `vkGetBufferMemoryRequirements()` etc. is also respected after defragmentation.
-+Buffers/images should be recreated with the same `VkBufferCreateInfo` / `VkImageCreateInfo` parameters as the original ones.
-+
-+You can perform the defragmentation incrementally to limit the number of allocations and bytes to be moved
-+in each pass, e.g. to call it in sync with render frames and not to experience too big hitches.
-+See members: VmaDefragmentationInfo::maxBytesPerPass, VmaDefragmentationInfo::maxAllocationsPerPass.
-+
-+It is also safe to perform the defragmentation asynchronously to render frames and other Vulkan and VMA
-+usage, possibly from multiple threads, with the exception that allocations
-+returned in VmaDefragmentationPassMoveInfo::pMoves shouldn't be destroyed until the defragmentation pass is ended.
-+
-+<b>Mapping</b> is preserved on allocations that are moved during defragmentation.
-+Whether through #VMA_ALLOCATION_CREATE_MAPPED_BIT or vmaMapMemory(), the allocations
-+are mapped at their new place. Of course, pointer to the mapped data changes, so it needs to be queried
-+using VmaAllocationInfo::pMappedData.
-+
-+\note Defragmentation is not supported in custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT.
-+
-+
-+\page statistics Statistics
-+
-+This library contains several functions that return information about its internal state,
-+especially the amount of memory allocated from Vulkan.
-+
-+\section statistics_numeric_statistics Numeric statistics
-+
-+If you need to obtain basic statistics about memory usage per heap, together with current budget,
-+you can call function vmaGetHeapBudgets() and inspect structure #VmaBudget.
-+This is useful to keep track of memory usage and stay within budget
-+(see also \ref staying_within_budget).
-+Example:
-+
-+\code
-+uint32_t heapIndex = ...
-+
-+VmaBudget budgets[VK_MAX_MEMORY_HEAPS];
-+vmaGetHeapBudgets(allocator, budgets);
-+
-+printf("My heap currently has %u allocations taking %llu B,\n",
-+ budgets[heapIndex].statistics.allocationCount,
-+ budgets[heapIndex].statistics.allocationBytes);
-+printf("allocated out of %u Vulkan device memory blocks taking %llu B,\n",
-+ budgets[heapIndex].statistics.blockCount,
-+ budgets[heapIndex].statistics.blockBytes);
-+printf("Vulkan reports total usage %llu B with budget %llu B.\n",
-+ budgets[heapIndex].usage,
-+ budgets[heapIndex].budget);
-+\endcode
-+
-+You can query for more detailed statistics per memory heap, type, and totals,
-+including minimum and maximum allocation size and unused range size,
-+by calling function vmaCalculateStatistics() and inspecting structure #VmaTotalStatistics.
-+This function is slower though, as it has to traverse all the internal data structures,
-+so it should be used only for debugging purposes.
-+
-+You can query for statistics of a custom pool using function vmaGetPoolStatistics()
-+or vmaCalculatePoolStatistics().
-+
-+You can query for information about a specific allocation using function vmaGetAllocationInfo().
-+It fill structure #VmaAllocationInfo.
-+
-+\section statistics_json_dump JSON dump
-+
-+You can dump internal state of the allocator to a string in JSON format using function vmaBuildStatsString().
-+The result is guaranteed to be correct JSON.
-+It uses ANSI encoding.
-+Any strings provided by user (see [Allocation names](@ref allocation_names))
-+are copied as-is and properly escaped for JSON, so if they use UTF-8, ISO-8859-2 or any other encoding,
-+this JSON string can be treated as using this encoding.
-+It must be freed using function vmaFreeStatsString().
-+
-+The format of this JSON string is not part of official documentation of the library,
-+but it will not change in backward-incompatible way without increasing library major version number
-+and appropriate mention in changelog.
-+
-+The JSON string contains all the data that can be obtained using vmaCalculateStatistics().
-+It can also contain detailed map of allocated memory blocks and their regions -
-+free and occupied by allocations.
-+This allows e.g. to visualize the memory or assess fragmentation.
-+
-+
-+\page allocation_annotation Allocation names and user data
-+
-+\section allocation_user_data Allocation user data
-+
-+You can annotate allocations with your own information, e.g. for debugging purposes.
-+To do that, fill VmaAllocationCreateInfo::pUserData field when creating
-+an allocation. It is an opaque `void*` pointer. You can use it e.g. as a pointer,
-+some handle, index, key, ordinal number or any other value that would associate
-+the allocation with your custom metadata.
-+It is useful to identify appropriate data structures in your engine given #VmaAllocation,
-+e.g. when doing \ref defragmentation.
-+
-+\code
-+VkBufferCreateInfo bufCreateInfo = ...
-+
-+MyBufferMetadata* pMetadata = CreateBufferMetadata();
-+
-+VmaAllocationCreateInfo allocCreateInfo = {};
-+allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
-+allocCreateInfo.pUserData = pMetadata;
-+
-+VkBuffer buffer;
-+VmaAllocation allocation;
-+vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buffer, &allocation, nullptr);
-+\endcode
-+
-+The pointer may be later retrieved as VmaAllocationInfo::pUserData:
-+
-+\code
-+VmaAllocationInfo allocInfo;
-+vmaGetAllocationInfo(allocator, allocation, &allocInfo);
-+MyBufferMetadata* pMetadata = (MyBufferMetadata*)allocInfo.pUserData;
-+\endcode
-+
-+It can also be changed using function vmaSetAllocationUserData().
-+
-+Values of (non-zero) allocations' `pUserData` are printed in JSON report created by
-+vmaBuildStatsString() in hexadecimal form.
-+
-+\section allocation_names Allocation names
-+
-+An allocation can also carry a null-terminated string, giving a name to the allocation.
-+To set it, call vmaSetAllocationName().
-+The library creates internal copy of the string, so the pointer you pass doesn't need
-+to be valid for whole lifetime of the allocation. You can free it after the call.
-+
-+\code
-+std::string imageName = "Texture: ";
-+imageName += fileName;
-+vmaSetAllocationName(allocator, allocation, imageName.c_str());
-+\endcode
-+
-+The string can be later retrieved by inspecting VmaAllocationInfo::pName.
-+It is also printed in JSON report created by vmaBuildStatsString().
-+
-+\note Setting string name to VMA allocation doesn't automatically set it to the Vulkan buffer or image created with it.
-+You must do it manually using an extension like VK_EXT_debug_utils, which is independent of this library.
-+
-+
-+\page virtual_allocator Virtual allocator
-+
-+As an extra feature, the core allocation algorithm of the library is exposed through a simple and convenient API of "virtual allocator".
-+It doesn't allocate any real GPU memory. It just keeps track of used and free regions of a "virtual block".
-+You can use it to allocate your own memory or other objects, even completely unrelated to Vulkan.
-+A common use case is sub-allocation of pieces of one large GPU buffer.
-+
-+\section virtual_allocator_creating_virtual_block Creating virtual block
-+
-+To use this functionality, there is no main "allocator" object.
-+You don't need to have #VmaAllocator object created.
-+All you need to do is to create a separate #VmaVirtualBlock object for each block of memory you want to be managed by the allocator:
-+
-+-# Fill in #VmaVirtualBlockCreateInfo structure.
-+-# Call vmaCreateVirtualBlock(). Get new #VmaVirtualBlock object.
-+
-+Example:
-+
-+\code
-+VmaVirtualBlockCreateInfo blockCreateInfo = {};
-+blockCreateInfo.size = 1048576; // 1 MB
-+
-+VmaVirtualBlock block;
-+VkResult res = vmaCreateVirtualBlock(&blockCreateInfo, &block);
-+\endcode
-+
-+\section virtual_allocator_making_virtual_allocations Making virtual allocations
-+
-+#VmaVirtualBlock object contains internal data structure that keeps track of free and occupied regions
-+using the same code as the main Vulkan memory allocator.
-+Similarly to #VmaAllocation for standard GPU allocations, there is #VmaVirtualAllocation type
-+that represents an opaque handle to an allocation within the virtual block.
-+
-+In order to make such allocation:
-+
-+-# Fill in #VmaVirtualAllocationCreateInfo structure.
-+-# Call vmaVirtualAllocate(). Get new #VmaVirtualAllocation object that represents the allocation.
-+ You can also receive `VkDeviceSize offset` that was assigned to the allocation.
-+
-+Example:
-+
-+\code
-+VmaVirtualAllocationCreateInfo allocCreateInfo = {};
-+allocCreateInfo.size = 4096; // 4 KB
-+
-+VmaVirtualAllocation alloc;
-+VkDeviceSize offset;
-+res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, &offset);
-+if(res == VK_SUCCESS)
-+{
-+ // Use the 4 KB of your memory starting at offset.
-+}
-+else
-+{
-+ // Allocation failed - no space for it could be found. Handle this error!
-+}
-+\endcode
-+
-+\section virtual_allocator_deallocation Deallocation
-+
-+When no longer needed, an allocation can be freed by calling vmaVirtualFree().
-+You can only pass to this function an allocation that was previously returned by vmaVirtualAllocate()
-+called for the same #VmaVirtualBlock.
-+
-+When whole block is no longer needed, the block object can be released by calling vmaDestroyVirtualBlock().
-+All allocations must be freed before the block is destroyed, which is checked internally by an assert.
-+However, if you don't want to call vmaVirtualFree() for each allocation, you can use vmaClearVirtualBlock() to free them all at once -
-+a feature not available in normal Vulkan memory allocator. Example:
-+
-+\code
-+vmaVirtualFree(block, alloc);
-+vmaDestroyVirtualBlock(block);
-+\endcode
-+
-+\section virtual_allocator_allocation_parameters Allocation parameters
-+
-+You can attach a custom pointer to each allocation by using vmaSetVirtualAllocationUserData().
-+Its default value is null.
-+It can be used to store any data that needs to be associated with that allocation - e.g. an index, a handle, or a pointer to some
-+larger data structure containing more information. Example:
-+
-+\code
-+struct CustomAllocData
-+{
-+ std::string m_AllocName;
-+};
-+CustomAllocData* allocData = new CustomAllocData();
-+allocData->m_AllocName = "My allocation 1";
-+vmaSetVirtualAllocationUserData(block, alloc, allocData);
-+\endcode
-+
-+The pointer can later be fetched, along with allocation offset and size, by passing the allocation handle to function
-+vmaGetVirtualAllocationInfo() and inspecting returned structure #VmaVirtualAllocationInfo.
-+If you allocated a new object to be used as the custom pointer, don't forget to delete that object before freeing the allocation!
-+Example:
-+
-+\code
-+VmaVirtualAllocationInfo allocInfo;
-+vmaGetVirtualAllocationInfo(block, alloc, &allocInfo);
-+delete (CustomAllocData*)allocInfo.pUserData;
-+
-+vmaVirtualFree(block, alloc);
-+\endcode
-+
-+\section virtual_allocator_alignment_and_units Alignment and units
-+
-+It feels natural to express sizes and offsets in bytes.
-+If an offset of an allocation needs to be aligned to a multiply of some number (e.g. 4 bytes), you can fill optional member
-+VmaVirtualAllocationCreateInfo::alignment to request it. Example:
-+
-+\code
-+VmaVirtualAllocationCreateInfo allocCreateInfo = {};
-+allocCreateInfo.size = 4096; // 4 KB
-+allocCreateInfo.alignment = 4; // Returned offset must be a multiply of 4 B
-+
-+VmaVirtualAllocation alloc;
-+res = vmaVirtualAllocate(block, &allocCreateInfo, &alloc, nullptr);
-+\endcode
-+
-+Alignments of different allocations made from one block may vary.
-+However, if all alignments and sizes are always multiply of some size e.g. 4 B or `sizeof(MyDataStruct)`,
-+you can express all sizes, alignments, and offsets in multiples of that size instead of individual bytes.
-+It might be more convenient, but you need to make sure to use this new unit consistently in all the places:
-+
-+- VmaVirtualBlockCreateInfo::size
-+- VmaVirtualAllocationCreateInfo::size and VmaVirtualAllocationCreateInfo::alignment
-+- Using offset returned by vmaVirtualAllocate() or in VmaVirtualAllocationInfo::offset
-+
-+\section virtual_allocator_statistics Statistics
-+
-+You can obtain statistics of a virtual block using vmaGetVirtualBlockStatistics()
-+(to get brief statistics that are fast to calculate)
-+or vmaCalculateVirtualBlockStatistics() (to get more detailed statistics, slower to calculate).
-+The functions fill structures #VmaStatistics, #VmaDetailedStatistics respectively - same as used by the normal Vulkan memory allocator.
-+Example:
-+
-+\code
-+VmaStatistics stats;
-+vmaGetVirtualBlockStatistics(block, &stats);
-+printf("My virtual block has %llu bytes used by %u virtual allocations\n",
-+ stats.allocationBytes, stats.allocationCount);
-+\endcode
-+
-+You can also request a full list of allocations and free regions as a string in JSON format by calling
-+vmaBuildVirtualBlockStatsString().
-+Returned string must be later freed using vmaFreeVirtualBlockStatsString().
-+The format of this string differs from the one returned by the main Vulkan allocator, but it is similar.
-+
-+\section virtual_allocator_additional_considerations Additional considerations
-+
-+The "virtual allocator" functionality is implemented on a level of individual memory blocks.
-+Keeping track of a whole collection of blocks, allocating new ones when out of free space,
-+deleting empty ones, and deciding which one to try first for a new allocation must be implemented by the user.
-+
-+Alternative allocation algorithms are supported, just like in custom pools of the real GPU memory.
-+See enum #VmaVirtualBlockCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT).
-+You can find their description in chapter \ref custom_memory_pools.
-+Allocation strategies are also supported.
-+See enum #VmaVirtualAllocationCreateFlagBits to learn how to specify them (e.g. #VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT).
-+
-+Following features are supported only by the allocator of the real GPU memory and not by virtual allocations:
-+buffer-image granularity, `VMA_DEBUG_MARGIN`, `VMA_MIN_ALIGNMENT`.
-+
-+
-+\page debugging_memory_usage Debugging incorrect memory usage
-+
-+If you suspect a bug with memory usage, like usage of uninitialized memory or
-+memory being overwritten out of bounds of an allocation,
-+you can use debug features of this library to verify this.
-+
-+\section debugging_memory_usage_initialization Memory initialization
-+
-+If you experience a bug with incorrect and nondeterministic data in your program and you suspect uninitialized memory to be used,
-+you can enable automatic memory initialization to verify this.
-+To do it, define macro `VMA_DEBUG_INITIALIZE_ALLOCATIONS` to 1.
-+
-+\code
-+#define VMA_DEBUG_INITIALIZE_ALLOCATIONS 1
-+#include "vk_mem_alloc.h"
-+\endcode
-+
-+It makes memory of new allocations initialized to bit pattern `0xDCDCDCDC`.
-+Before an allocation is destroyed, its memory is filled with bit pattern `0xEFEFEFEF`.
-+Memory is automatically mapped and unmapped if necessary.
-+
-+If you find these values while debugging your program, good chances are that you incorrectly
-+read Vulkan memory that is allocated but not initialized, or already freed, respectively.
-+
-+Memory initialization works only with memory types that are `HOST_VISIBLE` and with allocations that can be mapped.
-+It works also with dedicated allocations.
-+
-+\section debugging_memory_usage_margins Margins
-+
-+By default, allocations are laid out in memory blocks next to each other if possible
-+(considering required alignment, `bufferImageGranularity`, and `nonCoherentAtomSize`).
-+
-+![Allocations without margin](../gfx/Margins_1.png)
-+
-+Define macro `VMA_DEBUG_MARGIN` to some non-zero value (e.g. 16) to enforce specified
-+number of bytes as a margin after every allocation.
-+
-+\code
-+#define VMA_DEBUG_MARGIN 16
-+#include "vk_mem_alloc.h"
-+\endcode
-+
-+![Allocations with margin](../gfx/Margins_2.png)
-+
-+If your bug goes away after enabling margins, it means it may be caused by memory
-+being overwritten outside of allocation boundaries. It is not 100% certain though.
-+Change in application behavior may also be caused by different order and distribution
-+of allocations across memory blocks after margins are applied.
-+
-+Margins work with all types of memory.
-+
-+Margin is applied only to allocations made out of memory blocks and not to dedicated
-+allocations, which have their own memory block of specific size.
-+It is thus not applied to allocations made using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT flag
-+or those automatically decided to put into dedicated allocations, e.g. due to its
-+large size or recommended by VK_KHR_dedicated_allocation extension.
-+
-+Margins appear in [JSON dump](@ref statistics_json_dump) as part of free space.
-+
-+Note that enabling margins increases memory usage and fragmentation.
-+
-+Margins do not apply to \ref virtual_allocator.
-+
-+\section debugging_memory_usage_corruption_detection Corruption detection
-+
-+You can additionally define macro `VMA_DEBUG_DETECT_CORRUPTION` to 1 to enable validation
-+of contents of the margins.
-+
-+\code
-+#define VMA_DEBUG_MARGIN 16
-+#define VMA_DEBUG_DETECT_CORRUPTION 1
-+#include "vk_mem_alloc.h"
-+\endcode
-+
-+When this feature is enabled, number of bytes specified as `VMA_DEBUG_MARGIN`
-+(it must be multiply of 4) after every allocation is filled with a magic number.
-+This idea is also know as "canary".
-+Memory is automatically mapped and unmapped if necessary.
-+
-+This number is validated automatically when the allocation is destroyed.
-+If it is not equal to the expected value, `VMA_ASSERT()` is executed.
-+It clearly means that either CPU or GPU overwritten the memory outside of boundaries of the allocation,
-+which indicates a serious bug.
-+
-+You can also explicitly request checking margins of all allocations in all memory blocks
-+that belong to specified memory types by using function vmaCheckCorruption(),
-+or in memory blocks that belong to specified custom pool, by using function
-+vmaCheckPoolCorruption().
-+
-+Margin validation (corruption detection) works only for memory types that are
-+`HOST_VISIBLE` and `HOST_COHERENT`.
-+
-+
-+\page opengl_interop OpenGL Interop
-+
-+VMA provides some features that help with interoperability with OpenGL.
-+
-+\section opengl_interop_exporting_memory Exporting memory
-+
-+If you want to attach `VkExportMemoryAllocateInfoKHR` structure to `pNext` chain of memory allocations made by the library:
-+
-+It is recommended to create \ref custom_memory_pools for such allocations.
-+Define and fill in your `VkExportMemoryAllocateInfoKHR` structure and attach it to VmaPoolCreateInfo::pMemoryAllocateNext
-+while creating the custom pool.
-+Please note that the structure must remain alive and unchanged for the whole lifetime of the #VmaPool,
-+not only while creating it, as no copy of the structure is made,
-+but its original pointer is used for each allocation instead.
-+
-+If you want to export all memory allocated by the library from certain memory types,
-+also dedicated allocations or other allocations made from default pools,
-+an alternative solution is to fill in VmaAllocatorCreateInfo::pTypeExternalMemoryHandleTypes.
-+It should point to an array with `VkExternalMemoryHandleTypeFlagsKHR` to be automatically passed by the library
-+through `VkExportMemoryAllocateInfoKHR` on each allocation made from a specific memory type.
-+Please note that new versions of the library also support dedicated allocations created in custom pools.
-+
-+You should not mix these two methods in a way that allows to apply both to the same memory type.
-+Otherwise, `VkExportMemoryAllocateInfoKHR` structure would be attached twice to the `pNext` chain of `VkMemoryAllocateInfo`.
-+
-+
-+\section opengl_interop_custom_alignment Custom alignment
-+
-+Buffers or images exported to a different API like OpenGL may require a different alignment,
-+higher than the one used by the library automatically, queried from functions like `vkGetBufferMemoryRequirements`.
-+To impose such alignment:
-+
-+It is recommended to create \ref custom_memory_pools for such allocations.
-+Set VmaPoolCreateInfo::minAllocationAlignment member to the minimum alignment required for each allocation
-+to be made out of this pool.
-+The alignment actually used will be the maximum of this member and the alignment returned for the specific buffer or image
-+from a function like `vkGetBufferMemoryRequirements`, which is called by VMA automatically.
-+
-+If you want to create a buffer with a specific minimum alignment out of default pools,
-+use special function vmaCreateBufferWithAlignment(), which takes additional parameter `minAlignment`.
-+
-+Note the problem of alignment affects only resources placed inside bigger `VkDeviceMemory` blocks and not dedicated
-+allocations, as these, by definition, always have alignment = 0 because the resource is bound to the beginning of its dedicated block.
-+Contrary to Direct3D 12, Vulkan doesn't have a concept of alignment of the entire memory block passed on its allocation.
-+
-+
-+\page usage_patterns Recommended usage patterns
-+
-+Vulkan gives great flexibility in memory allocation.
-+This chapter shows the most common patterns.
-+
-+See also slides from talk:
-+[Sawicki, Adam. Advanced Graphics Techniques Tutorial: Memory management in Vulkan and DX12. Game Developers Conference, 2018](https://www.gdcvault.com/play/1025458/Advanced-Graphics-Techniques-Tutorial-New)
-+
-+
-+\section usage_patterns_gpu_only GPU-only resource
-+
-+<b>When:</b>
-+Any resources that you frequently write and read on GPU,
-+e.g. images used as color attachments (aka "render targets"), depth-stencil attachments,
-+images/buffers used as storage image/buffer (aka "Unordered Access View (UAV)").
-+
-+<b>What to do:</b>
-+Let the library select the optimal memory type, which will likely have `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`.
-+
-+\code
-+VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
-+imgCreateInfo.imageType = VK_IMAGE_TYPE_2D;
-+imgCreateInfo.extent.width = 3840;
-+imgCreateInfo.extent.height = 2160;
-+imgCreateInfo.extent.depth = 1;
-+imgCreateInfo.mipLevels = 1;
-+imgCreateInfo.arrayLayers = 1;
-+imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
-+imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
-+imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
-+imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
-+imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
-+
-+VmaAllocationCreateInfo allocCreateInfo = {};
-+allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
-+allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
-+allocCreateInfo.priority = 1.0f;
-+
-+VkImage img;
-+VmaAllocation alloc;
-+vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr);
-+\endcode
-+
-+<b>Also consider:</b>
-+Consider creating them as dedicated allocations using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT,
-+especially if they are large or if you plan to destroy and recreate them with different sizes
-+e.g. when display resolution changes.
-+Prefer to create such resources first and all other GPU resources (like textures and vertex buffers) later.
-+When VK_EXT_memory_priority extension is enabled, it is also worth setting high priority to such allocation
-+to decrease chances to be evicted to system memory by the operating system.
-+
-+\section usage_patterns_staging_copy_upload Staging copy for upload
-+
-+<b>When:</b>
-+A "staging" buffer than you want to map and fill from CPU code, then use as a source of transfer
-+to some GPU resource.
-+
-+<b>What to do:</b>
-+Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT.
-+Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`.
-+
-+\code
-+VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
-+bufCreateInfo.size = 65536;
-+bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
-+
-+VmaAllocationCreateInfo allocCreateInfo = {};
-+allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
-+allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
-+ VMA_ALLOCATION_CREATE_MAPPED_BIT;
-+
-+VkBuffer buf;
-+VmaAllocation alloc;
-+VmaAllocationInfo allocInfo;
-+vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
-+
-+...
-+
-+memcpy(allocInfo.pMappedData, myData, myDataSize);
-+\endcode
-+
-+<b>Also consider:</b>
-+You can map the allocation using vmaMapMemory() or you can create it as persistenly mapped
-+using #VMA_ALLOCATION_CREATE_MAPPED_BIT, as in the example above.
-+
-+
-+\section usage_patterns_readback Readback
-+
-+<b>When:</b>
-+Buffers for data written by or transferred from the GPU that you want to read back on the CPU,
-+e.g. results of some computations.
-+
-+<b>What to do:</b>
-+Use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT.
-+Let the library select the optimal memory type, which will always have `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`
-+and `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`.
-+
-+\code
-+VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
-+bufCreateInfo.size = 65536;
-+bufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
-+
-+VmaAllocationCreateInfo allocCreateInfo = {};
-+allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
-+allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT |
-+ VMA_ALLOCATION_CREATE_MAPPED_BIT;
-+
-+VkBuffer buf;
-+VmaAllocation alloc;
-+VmaAllocationInfo allocInfo;
-+vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
-+
-+...
-+
-+const float* downloadedData = (const float*)allocInfo.pMappedData;
-+\endcode
-+
-+
-+\section usage_patterns_advanced_data_uploading Advanced data uploading
-+
-+For resources that you frequently write on CPU via mapped pointer and
-+frequently read on GPU e.g. as a uniform buffer (also called "dynamic"), multiple options are possible:
-+
-+-# Easiest solution is to have one copy of the resource in `HOST_VISIBLE` memory,
-+ even if it means system RAM (not `DEVICE_LOCAL`) on systems with a discrete graphics card,
-+ and make the device reach out to that resource directly.
-+ - Reads performed by the device will then go through PCI Express bus.
-+ The performance of this access may be limited, but it may be fine depending on the size
-+ of this resource (whether it is small enough to quickly end up in GPU cache) and the sparsity
-+ of access.
-+-# On systems with unified memory (e.g. AMD APU or Intel integrated graphics, mobile chips),
-+ a memory type may be available that is both `HOST_VISIBLE` (available for mapping) and `DEVICE_LOCAL`
-+ (fast to access from the GPU). Then, it is likely the best choice for such type of resource.
-+-# Systems with a discrete graphics card and separate video memory may or may not expose
-+ a memory type that is both `HOST_VISIBLE` and `DEVICE_LOCAL`, also known as Base Address Register (BAR).
-+ If they do, it represents a piece of VRAM (or entire VRAM, if ReBAR is enabled in the motherboard BIOS)
-+ that is available to CPU for mapping.
-+ - Writes performed by the host to that memory go through PCI Express bus.
-+ The performance of these writes may be limited, but it may be fine, especially on PCIe 4.0,
-+ as long as rules of using uncached and write-combined memory are followed - only sequential writes and no reads.
-+-# Finally, you may need or prefer to create a separate copy of the resource in `DEVICE_LOCAL` memory,
-+ a separate "staging" copy in `HOST_VISIBLE` memory and perform an explicit transfer command between them.
-+
-+Thankfully, VMA offers an aid to create and use such resources in the the way optimal
-+for the current Vulkan device. To help the library make the best choice,
-+use flag #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT together with
-+#VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT.
-+It will then prefer a memory type that is both `DEVICE_LOCAL` and `HOST_VISIBLE` (integrated memory or BAR),
-+but if no such memory type is available or allocation from it fails
-+(PC graphics cards have only 256 MB of BAR by default, unless ReBAR is supported and enabled in BIOS),
-+it will fall back to `DEVICE_LOCAL` memory for fast GPU access.
-+It is then up to you to detect that the allocation ended up in a memory type that is not `HOST_VISIBLE`,
-+so you need to create another "staging" allocation and perform explicit transfers.
-+
-+\code
-+VkBufferCreateInfo bufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
-+bufCreateInfo.size = 65536;
-+bufCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT;
-+
-+VmaAllocationCreateInfo allocCreateInfo = {};
-+allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
-+allocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
-+ VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT |
-+ VMA_ALLOCATION_CREATE_MAPPED_BIT;
-+
-+VkBuffer buf;
-+VmaAllocation alloc;
-+VmaAllocationInfo allocInfo;
-+vmaCreateBuffer(allocator, &bufCreateInfo, &allocCreateInfo, &buf, &alloc, &allocInfo);
-+
-+VkMemoryPropertyFlags memPropFlags;
-+vmaGetAllocationMemoryProperties(allocator, alloc, &memPropFlags);
-+
-+if(memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
-+{
-+ // Allocation ended up in a mappable memory and is already mapped - write to it directly.
-+
-+ // [Executed in runtime]:
-+ memcpy(allocInfo.pMappedData, myData, myDataSize);
-+}
-+else
-+{
-+ // Allocation ended up in a non-mappable memory - need to transfer.
-+ VkBufferCreateInfo stagingBufCreateInfo = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
-+ stagingBufCreateInfo.size = 65536;
-+ stagingBufCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
-+
-+ VmaAllocationCreateInfo stagingAllocCreateInfo = {};
-+ stagingAllocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
-+ stagingAllocCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT |
-+ VMA_ALLOCATION_CREATE_MAPPED_BIT;
-+
-+ VkBuffer stagingBuf;
-+ VmaAllocation stagingAlloc;
-+ VmaAllocationInfo stagingAllocInfo;
-+ vmaCreateBuffer(allocator, &stagingBufCreateInfo, &stagingAllocCreateInfo,
-+ &stagingBuf, &stagingAlloc, stagingAllocInfo);
-+
-+ // [Executed in runtime]:
-+ memcpy(stagingAllocInfo.pMappedData, myData, myDataSize);
-+ vmaFlushAllocation(allocator, stagingAlloc, 0, VK_WHOLE_SIZE);
-+ //vkCmdPipelineBarrier: VK_ACCESS_HOST_WRITE_BIT --> VK_ACCESS_TRANSFER_READ_BIT
-+ VkBufferCopy bufCopy = {
-+ 0, // srcOffset
-+ 0, // dstOffset,
-+ myDataSize); // size
-+ vkCmdCopyBuffer(cmdBuf, stagingBuf, buf, 1, &bufCopy);
-+}
-+\endcode
-+
-+\section usage_patterns_other_use_cases Other use cases
-+
-+Here are some other, less obvious use cases and their recommended settings:
-+
-+- An image that is used only as transfer source and destination, but it should stay on the device,
-+ as it is used to temporarily store a copy of some texture, e.g. from the current to the next frame,
-+ for temporal antialiasing or other temporal effects.
-+ - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT`
-+ - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO
-+- An image that is used only as transfer source and destination, but it should be placed
-+ in the system RAM despite it doesn't need to be mapped, because it serves as a "swap" copy to evict
-+ least recently used textures from VRAM.
-+ - Use `VkImageCreateInfo::usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT`
-+ - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_HOST,
-+ as VMA needs a hint here to differentiate from the previous case.
-+- A buffer that you want to map and write from the CPU, directly read from the GPU
-+ (e.g. as a uniform or vertex buffer), but you have a clear preference to place it in device or
-+ host memory due to its large size.
-+ - Use `VkBufferCreateInfo::usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT`
-+ - Use VmaAllocationCreateInfo::usage = #VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE or #VMA_MEMORY_USAGE_AUTO_PREFER_HOST
-+ - Use VmaAllocationCreateInfo::flags = #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT
-+
-+
-+\page configuration Configuration
-+
-+Please check "CONFIGURATION SECTION" in the code to find macros that you can define
-+before each include of this file or change directly in this file to provide
-+your own implementation of basic facilities like assert, `min()` and `max()` functions,
-+mutex, atomic etc.
-+The library uses its own implementation of containers by default, but you can switch to using
-+STL containers instead.
-+
-+For example, define `VMA_ASSERT(expr)` before including the library to provide
-+custom implementation of the assertion, compatible with your project.
-+By default it is defined to standard C `assert(expr)` in `_DEBUG` configuration
-+and empty otherwise.
-+
-+\section config_Vulkan_functions Pointers to Vulkan functions
-+
-+There are multiple ways to import pointers to Vulkan functions in the library.
-+In the simplest case you don't need to do anything.
-+If the compilation or linking of your program or the initialization of the #VmaAllocator
-+doesn't work for you, you can try to reconfigure it.
-+
-+First, the allocator tries to fetch pointers to Vulkan functions linked statically,
-+like this:
-+
-+\code
-+m_VulkanFunctions.vkAllocateMemory = (PFN_vkAllocateMemory)vkAllocateMemory;
-+\endcode
-+
-+If you want to disable this feature, set configuration macro: `#define VMA_STATIC_VULKAN_FUNCTIONS 0`.
-+
-+Second, you can provide the pointers yourself by setting member VmaAllocatorCreateInfo::pVulkanFunctions.
-+You can fetch them e.g. using functions `vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` or
-+by using a helper library like [volk](https://github.com/zeux/volk).
-+
-+Third, VMA tries to fetch remaining pointers that are still null by calling
-+`vkGetInstanceProcAddr` and `vkGetDeviceProcAddr` on its own.
-+You need to only fill in VmaVulkanFunctions::vkGetInstanceProcAddr and VmaVulkanFunctions::vkGetDeviceProcAddr.
-+Other pointers will be fetched automatically.
-+If you want to disable this feature, set configuration macro: `#define VMA_DYNAMIC_VULKAN_FUNCTIONS 0`.
-+
-+Finally, all the function pointers required by the library (considering selected
-+Vulkan version and enabled extensions) are checked with `VMA_ASSERT` if they are not null.
-+
-+
-+\section custom_memory_allocator Custom host memory allocator
-+
-+If you use custom allocator for CPU memory rather than default operator `new`
-+and `delete` from C++, you can make this library using your allocator as well
-+by filling optional member VmaAllocatorCreateInfo::pAllocationCallbacks. These
-+functions will be passed to Vulkan, as well as used by the library itself to
-+make any CPU-side allocations.
-+
-+\section allocation_callbacks Device memory allocation callbacks
-+
-+The library makes calls to `vkAllocateMemory()` and `vkFreeMemory()` internally.
-+You can setup callbacks to be informed about these calls, e.g. for the purpose
-+of gathering some statistics. To do it, fill optional member
-+VmaAllocatorCreateInfo::pDeviceMemoryCallbacks.
-+
-+\section heap_memory_limit Device heap memory limit
-+
-+When device memory of certain heap runs out of free space, new allocations may
-+fail (returning error code) or they may succeed, silently pushing some existing_
-+memory blocks from GPU VRAM to system RAM (which degrades performance). This
-+behavior is implementation-dependent - it depends on GPU vendor and graphics
-+driver.
-+
-+On AMD cards it can be controlled while creating Vulkan device object by using
-+VK_AMD_memory_overallocation_behavior extension, if available.
-+
-+Alternatively, if you want to test how your program behaves with limited amount of Vulkan device
-+memory available without switching your graphics card to one that really has
-+smaller VRAM, you can use a feature of this library intended for this purpose.
-+To do it, fill optional member VmaAllocatorCreateInfo::pHeapSizeLimit.
-+
-+
-+
-+\page vk_khr_dedicated_allocation VK_KHR_dedicated_allocation
-+
-+VK_KHR_dedicated_allocation is a Vulkan extension which can be used to improve
-+performance on some GPUs. It augments Vulkan API with possibility to query
-+driver whether it prefers particular buffer or image to have its own, dedicated
-+allocation (separate `VkDeviceMemory` block) for better efficiency - to be able
-+to do some internal optimizations. The extension is supported by this library.
-+It will be used automatically when enabled.
-+
-+It has been promoted to core Vulkan 1.1, so if you use eligible Vulkan version
-+and inform VMA about it by setting VmaAllocatorCreateInfo::vulkanApiVersion,
-+you are all set.
-+
-+Otherwise, if you want to use it as an extension:
-+
-+1 . When creating Vulkan device, check if following 2 device extensions are
-+supported (call `vkEnumerateDeviceExtensionProperties()`).
-+If yes, enable them (fill `VkDeviceCreateInfo::ppEnabledExtensionNames`).
-+
-+- VK_KHR_get_memory_requirements2
-+- VK_KHR_dedicated_allocation
-+
-+If you enabled these extensions:
-+
-+2 . Use #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag when creating
-+your #VmaAllocator to inform the library that you enabled required extensions
-+and you want the library to use them.
-+
-+\code
-+allocatorInfo.flags |= VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT;
-+
-+vmaCreateAllocator(&allocatorInfo, &allocator);
-+\endcode
-+
-+That is all. The extension will be automatically used whenever you create a
-+buffer using vmaCreateBuffer() or image using vmaCreateImage().
-+
-+When using the extension together with Vulkan Validation Layer, you will receive
-+warnings like this:
-+
-+_vkBindBufferMemory(): Binding memory to buffer 0x33 but vkGetBufferMemoryRequirements() has not been called on that buffer._
-+
-+It is OK, you should just ignore it. It happens because you use function
-+`vkGetBufferMemoryRequirements2KHR()` instead of standard
-+`vkGetBufferMemoryRequirements()`, while the validation layer seems to be
-+unaware of it.
-+
-+To learn more about this extension, see:
-+
-+- [VK_KHR_dedicated_allocation in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap50.html#VK_KHR_dedicated_allocation)
-+- [VK_KHR_dedicated_allocation unofficial manual](http://asawicki.info/articles/VK_KHR_dedicated_allocation.php5)
-+
-+
-+
-+\page vk_ext_memory_priority VK_EXT_memory_priority
-+
-+VK_EXT_memory_priority is a device extension that allows to pass additional "priority"
-+value to Vulkan memory allocations that the implementation may use prefer certain
-+buffers and images that are critical for performance to stay in device-local memory
-+in cases when the memory is over-subscribed, while some others may be moved to the system memory.
-+
-+VMA offers convenient usage of this extension.
-+If you enable it, you can pass "priority" parameter when creating allocations or custom pools
-+and the library automatically passes the value to Vulkan using this extension.
-+
-+If you want to use this extension in connection with VMA, follow these steps:
-+
-+\section vk_ext_memory_priority_initialization Initialization
-+
-+1) Call `vkEnumerateDeviceExtensionProperties` for the physical device.
-+Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_EXT_memory_priority".
-+
-+2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`.
-+Attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to `VkPhysicalDeviceFeatures2::pNext` to be returned.
-+Check if the device feature is really supported - check if `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority` is true.
-+
-+3) While creating device with `vkCreateDevice`, enable this extension - add "VK_EXT_memory_priority"
-+to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`.
-+
-+4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`.
-+Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`.
-+Enable this device feature - attach additional structure `VkPhysicalDeviceMemoryPriorityFeaturesEXT` to
-+`VkPhysicalDeviceFeatures2::pNext` chain and set its member `memoryPriority` to `VK_TRUE`.
-+
-+5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you
-+have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT
-+to VmaAllocatorCreateInfo::flags.
-+
-+\section vk_ext_memory_priority_usage Usage
-+
-+When using this extension, you should initialize following member:
-+
-+- VmaAllocationCreateInfo::priority when creating a dedicated allocation with #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
-+- VmaPoolCreateInfo::priority when creating a custom pool.
-+
-+It should be a floating-point value between `0.0f` and `1.0f`, where recommended default is `0.5f`.
-+Memory allocated with higher value can be treated by the Vulkan implementation as higher priority
-+and so it can have lower chances of being pushed out to system memory, experiencing degraded performance.
-+
-+It might be a good idea to create performance-critical resources like color-attachment or depth-stencil images
-+as dedicated and set high priority to them. For example:
-+
-+\code
-+VkImageCreateInfo imgCreateInfo = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
-+imgCreateInfo.imageType = VK_IMAGE_TYPE_2D;
-+imgCreateInfo.extent.width = 3840;
-+imgCreateInfo.extent.height = 2160;
-+imgCreateInfo.extent.depth = 1;
-+imgCreateInfo.mipLevels = 1;
-+imgCreateInfo.arrayLayers = 1;
-+imgCreateInfo.format = VK_FORMAT_R8G8B8A8_UNORM;
-+imgCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
-+imgCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
-+imgCreateInfo.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
-+imgCreateInfo.samples = VK_SAMPLE_COUNT_1_BIT;
-+
-+VmaAllocationCreateInfo allocCreateInfo = {};
-+allocCreateInfo.usage = VMA_MEMORY_USAGE_AUTO;
-+allocCreateInfo.flags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
-+allocCreateInfo.priority = 1.0f;
-+
-+VkImage img;
-+VmaAllocation alloc;
-+vmaCreateImage(allocator, &imgCreateInfo, &allocCreateInfo, &img, &alloc, nullptr);
-+\endcode
-+
-+`priority` member is ignored in the following situations:
-+
-+- Allocations created in custom pools: They inherit the priority, along with all other allocation parameters
-+ from the parametrs passed in #VmaPoolCreateInfo when the pool was created.
-+- Allocations created in default pools: They inherit the priority from the parameters
-+ VMA used when creating default pools, which means `priority == 0.5f`.
-+
-+
-+\page vk_amd_device_coherent_memory VK_AMD_device_coherent_memory
-+
-+VK_AMD_device_coherent_memory is a device extension that enables access to
-+additional memory types with `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and
-+`VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flag. It is useful mostly for
-+allocation of buffers intended for writing "breadcrumb markers" in between passes
-+or draw calls, which in turn are useful for debugging GPU crash/hang/TDR cases.
-+
-+When the extension is available but has not been enabled, Vulkan physical device
-+still exposes those memory types, but their usage is forbidden. VMA automatically
-+takes care of that - it returns `VK_ERROR_FEATURE_NOT_PRESENT` when an attempt
-+to allocate memory of such type is made.
-+
-+If you want to use this extension in connection with VMA, follow these steps:
-+
-+\section vk_amd_device_coherent_memory_initialization Initialization
-+
-+1) Call `vkEnumerateDeviceExtensionProperties` for the physical device.
-+Check if the extension is supported - if returned array of `VkExtensionProperties` contains "VK_AMD_device_coherent_memory".
-+
-+2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`.
-+Attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to `VkPhysicalDeviceFeatures2::pNext` to be returned.
-+Check if the device feature is really supported - check if `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true.
-+
-+3) While creating device with `vkCreateDevice`, enable this extension - add "VK_AMD_device_coherent_memory"
-+to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`.
-+
-+4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`.
-+Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`.
-+Enable this device feature - attach additional structure `VkPhysicalDeviceCoherentMemoryFeaturesAMD` to
-+`VkPhysicalDeviceFeatures2::pNext` and set its member `deviceCoherentMemory` to `VK_TRUE`.
-+
-+5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you
-+have enabled this extension and feature - add #VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT
-+to VmaAllocatorCreateInfo::flags.
-+
-+\section vk_amd_device_coherent_memory_usage Usage
-+
-+After following steps described above, you can create VMA allocations and custom pools
-+out of the special `DEVICE_COHERENT` and `DEVICE_UNCACHED` memory types on eligible
-+devices. There are multiple ways to do it, for example:
-+
-+- You can request or prefer to allocate out of such memory types by adding
-+ `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` to VmaAllocationCreateInfo::requiredFlags
-+ or VmaAllocationCreateInfo::preferredFlags. Those flags can be freely mixed with
-+ other ways of \ref choosing_memory_type, like setting VmaAllocationCreateInfo::usage.
-+- If you manually found memory type index to use for this purpose, force allocation
-+ from this specific index by setting VmaAllocationCreateInfo::memoryTypeBits `= 1u << index`.
-+
-+\section vk_amd_device_coherent_memory_more_information More information
-+
-+To learn more about this extension, see [VK_AMD_device_coherent_memory in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/man/html/VK_AMD_device_coherent_memory.html)
-+
-+Example use of this extension can be found in the code of the sample and test suite
-+accompanying this library.
-+
-+
-+\page enabling_buffer_device_address Enabling buffer device address
-+
-+Device extension VK_KHR_buffer_device_address
-+allow to fetch raw GPU pointer to a buffer and pass it for usage in a shader code.
-+It has been promoted to core Vulkan 1.2.
-+
-+If you want to use this feature in connection with VMA, follow these steps:
-+
-+\section enabling_buffer_device_address_initialization Initialization
-+
-+1) (For Vulkan version < 1.2) Call `vkEnumerateDeviceExtensionProperties` for the physical device.
-+Check if the extension is supported - if returned array of `VkExtensionProperties` contains
-+"VK_KHR_buffer_device_address".
-+
-+2) Call `vkGetPhysicalDeviceFeatures2` for the physical device instead of old `vkGetPhysicalDeviceFeatures`.
-+Attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to `VkPhysicalDeviceFeatures2::pNext` to be returned.
-+Check if the device feature is really supported - check if `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress` is true.
-+
-+3) (For Vulkan version < 1.2) While creating device with `vkCreateDevice`, enable this extension - add
-+"VK_KHR_buffer_device_address" to the list passed as `VkDeviceCreateInfo::ppEnabledExtensionNames`.
-+
-+4) While creating the device, also don't set `VkDeviceCreateInfo::pEnabledFeatures`.
-+Fill in `VkPhysicalDeviceFeatures2` structure instead and pass it as `VkDeviceCreateInfo::pNext`.
-+Enable this device feature - attach additional structure `VkPhysicalDeviceBufferDeviceAddressFeatures*` to
-+`VkPhysicalDeviceFeatures2::pNext` and set its member `bufferDeviceAddress` to `VK_TRUE`.
-+
-+5) While creating #VmaAllocator with vmaCreateAllocator() inform VMA that you
-+have enabled this feature - add #VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT
-+to VmaAllocatorCreateInfo::flags.
-+
-+\section enabling_buffer_device_address_usage Usage
-+
-+After following steps described above, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*` using VMA.
-+The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT*` to
-+allocated memory blocks wherever it might be needed.
-+
-+Please note that the library supports only `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT*`.
-+The second part of this functionality related to "capture and replay" is not supported,
-+as it is intended for usage in debugging tools like RenderDoc, not in everyday Vulkan usage.
-+
-+\section enabling_buffer_device_address_more_information More information
-+
-+To learn more about this extension, see [VK_KHR_buffer_device_address in Vulkan specification](https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap46.html#VK_KHR_buffer_device_address)
-+
-+Example use of this extension can be found in the code of the sample and test suite
-+accompanying this library.
-+
-+\page general_considerations General considerations
-+
-+\section general_considerations_thread_safety Thread safety
-+
-+- The library has no global state, so separate #VmaAllocator objects can be used
-+ independently.
-+ There should be no need to create multiple such objects though - one per `VkDevice` is enough.
-+- By default, all calls to functions that take #VmaAllocator as first parameter
-+ are safe to call from multiple threads simultaneously because they are
-+ synchronized internally when needed.
-+ This includes allocation and deallocation from default memory pool, as well as custom #VmaPool.
-+- When the allocator is created with #VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT
-+ flag, calls to functions that take such #VmaAllocator object must be
-+ synchronized externally.
-+- Access to a #VmaAllocation object must be externally synchronized. For example,
-+ you must not call vmaGetAllocationInfo() and vmaMapMemory() from different
-+ threads at the same time if you pass the same #VmaAllocation object to these
-+ functions.
-+- #VmaVirtualBlock is not safe to be used from multiple threads simultaneously.
-+
-+\section general_considerations_versioning_and_compatibility Versioning and compatibility
-+
-+The library uses [**Semantic Versioning**](https://semver.org/),
-+which means version numbers follow convention: Major.Minor.Patch (e.g. 2.3.0), where:
-+
-+- Incremented Patch version means a release is backward- and forward-compatible,
-+ introducing only some internal improvements, bug fixes, optimizations etc.
-+ or changes that are out of scope of the official API described in this documentation.
-+- Incremented Minor version means a release is backward-compatible,
-+ so existing code that uses the library should continue to work, while some new
-+ symbols could have been added: new structures, functions, new values in existing
-+ enums and bit flags, new structure members, but not new function parameters.
-+- Incrementing Major version means a release could break some backward compatibility.
-+
-+All changes between official releases are documented in file "CHANGELOG.md".
-+
-+\warning Backward compatibility is considered on the level of C++ source code, not binary linkage.
-+Adding new members to existing structures is treated as backward compatible if initializing
-+the new members to binary zero results in the old behavior.
-+You should always fully initialize all library structures to zeros and not rely on their
-+exact binary size.
-+
-+\section general_considerations_validation_layer_warnings Validation layer warnings
-+
-+When using this library, you can meet following types of warnings issued by
-+Vulkan validation layer. They don't necessarily indicate a bug, so you may need
-+to just ignore them.
-+
-+- *vkBindBufferMemory(): Binding memory to buffer 0xeb8e4 but vkGetBufferMemoryRequirements() has not been called on that buffer.*
-+ - It happens when VK_KHR_dedicated_allocation extension is enabled.
-+ `vkGetBufferMemoryRequirements2KHR` function is used instead, while validation layer seems to be unaware of it.
-+- *Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used.*
-+ - It happens when you map a buffer or image, because the library maps entire
-+ `VkDeviceMemory` block, where different types of images and buffers may end
-+ up together, especially on GPUs with unified memory like Intel.
-+- *Non-linear image 0xebc91 is aliased with linear buffer 0xeb8e4 which may indicate a bug.*
-+ - It may happen when you use [defragmentation](@ref defragmentation).
-+
-+\section general_considerations_allocation_algorithm Allocation algorithm
-+
-+The library uses following algorithm for allocation, in order:
-+
-+-# Try to find free range of memory in existing blocks.
-+-# If failed, try to create a new block of `VkDeviceMemory`, with preferred block size.
-+-# If failed, try to create such block with size / 2, size / 4, size / 8.
-+-# If failed, try to allocate separate `VkDeviceMemory` for this allocation,
-+ just like when you use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT.
-+-# If failed, choose other memory type that meets the requirements specified in
-+ VmaAllocationCreateInfo and go to point 1.
-+-# If failed, return `VK_ERROR_OUT_OF_DEVICE_MEMORY`.
-+
-+\section general_considerations_features_not_supported Features not supported
-+
-+Features deliberately excluded from the scope of this library:
-+
-+-# **Data transfer.** Uploading (streaming) and downloading data of buffers and images
-+ between CPU and GPU memory and related synchronization is responsibility of the user.
-+ Defining some "texture" object that would automatically stream its data from a
-+ staging copy in CPU memory to GPU memory would rather be a feature of another,
-+ higher-level library implemented on top of VMA.
-+ VMA doesn't record any commands to a `VkCommandBuffer`. It just allocates memory.
-+-# **Recreation of buffers and images.** Although the library has functions for
-+ buffer and image creation: vmaCreateBuffer(), vmaCreateImage(), you need to
-+ recreate these objects yourself after defragmentation. That is because the big
-+ structures `VkBufferCreateInfo`, `VkImageCreateInfo` are not stored in
-+ #VmaAllocation object.
-+-# **Handling CPU memory allocation failures.** When dynamically creating small C++
-+ objects in CPU memory (not Vulkan memory), allocation failures are not checked
-+ and handled gracefully, because that would complicate code significantly and
-+ is usually not needed in desktop PC applications anyway.
-+ Success of an allocation is just checked with an assert.
-+-# **Code free of any compiler warnings.** Maintaining the library to compile and
-+ work correctly on so many different platforms is hard enough. Being free of
-+ any warnings, on any version of any compiler, is simply not feasible.
-+ There are many preprocessor macros that make some variables unused, function parameters unreferenced,
-+ or conditional expressions constant in some configurations.
-+ The code of this library should not be bigger or more complicated just to silence these warnings.
-+ It is recommended to disable such warnings instead.
-+-# This is a C++ library with C interface. **Bindings or ports to any other programming languages** are welcome as external projects but
-+ are not going to be included into this repository.
-+*/
++#elif defined(__APPLE__) || defined(__ANDROID__) || (defined(__linux__) && defined(__GLIBCXX__) && !defined(_GLIBCXX_HAVE_ALIGNED_ALLOC)) || defined(__OpenBSD__) || defined(__FreeBSD__)
+ #include <cstdlib>
+
+ #if defined(__APPLE__)
diff --git a/www/ungoogled-chromium/files/patch-third__party_wayland_include_config.h b/www/ungoogled-chromium/files/patch-third__party_wayland_include_config.h
index 14da7a98faf5..a477d9e98822 100644
--- a/www/ungoogled-chromium/files/patch-third__party_wayland_include_config.h
+++ b/www/ungoogled-chromium/files/patch-third__party_wayland_include_config.h
@@ -1,6 +1,21 @@
---- third_party/wayland/include/config.h.orig 2023-11-28 23:10:47 UTC
+--- third_party/wayland/include/config.h.orig 2024-02-03 15:42:55 UTC
+++ third_party/wayland/include/config.h
-@@ -25,11 +25,14 @@
+@@ -9,7 +9,14 @@
+
+ #define HAVE_BROKEN_MSG_CMSG_CLOEXEC 0
+
++#if defined(__FreeBSD__)
++#include <osreldate.h>
++#if defined(__FreeBSD_version) && __FreeBSD_version < 1300048
++#undef HAVE_MEMFD_CREATE
++#else
+ #define HAVE_MEMFD_CREATE
++#endif
++#endif
+
+ #define HAVE_MKOSTEMP
+
+@@ -25,7 +32,11 @@
#undef HAVE_SYS_PROCCTL_H
@@ -12,7 +27,3 @@
#define HAVE_XUCRED_CR_PID 0
- #define PACKAGE "wayland"
-
- #define PACKAGE_VERSION "1.21.0"
--
diff --git a/www/ungoogled-chromium/files/patch-third__party_webrtc_modules_audio__device_BUILD.gn b/www/ungoogled-chromium/files/patch-third__party_webrtc_modules_audio__device_BUILD.gn
index 770ad7806f53..263b1fae562f 100644
--- a/www/ungoogled-chromium/files/patch-third__party_webrtc_modules_audio__device_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-third__party_webrtc_modules_audio__device_BUILD.gn
@@ -1,6 +1,6 @@
---- third_party/webrtc/modules/audio_device/BUILD.gn.orig 2023-09-17 07:59:53 UTC
+--- third_party/webrtc/modules/audio_device/BUILD.gn.orig 2024-02-03 15:42:55 UTC
+++ third_party/webrtc/modules/audio_device/BUILD.gn
-@@ -349,7 +349,7 @@ rtc_library("audio_device_impl") {
+@@ -350,7 +350,7 @@ rtc_library("audio_device_impl") {
if (rtc_use_dummy_audio_file_devices) {
defines += [ "WEBRTC_DUMMY_FILE_DEVICES" ]
} else {
diff --git a/www/ungoogled-chromium/files/patch-tools_gn_build_gen.py b/www/ungoogled-chromium/files/patch-tools_gn_build_gen.py
index cf8866a733dd..306133e3fccf 100644
--- a/www/ungoogled-chromium/files/patch-tools_gn_build_gen.py
+++ b/www/ungoogled-chromium/files/patch-tools_gn_build_gen.py
@@ -1,4 +1,4 @@
---- tools/gn/build/gen.py.orig 2023-09-17 07:59:53 UTC
+--- tools/gn/build/gen.py.orig 2024-02-03 15:42:55 UTC
+++ tools/gn/build/gen.py
@@ -94,6 +94,12 @@ class Platform(object):
def is_solaris(self):
@@ -22,7 +22,7 @@
'haiku': 'build_haiku.ninja.template',
'solaris': 'build_linux.ninja.template',
'netbsd': 'build_linux.ninja.template',
-@@ -512,6 +518,9 @@ def WriteGNNinja(path, platform, host, options, args_l
+@@ -514,6 +520,9 @@ def WriteGNNinja(path, platform, host, options, args_l
if platform.is_posix() and not platform.is_haiku():
ldflags.append('-pthread')
diff --git a/www/ungoogled-chromium/files/patch-tools_json__schema__compiler_feature__compiler.py b/www/ungoogled-chromium/files/patch-tools_json__schema__compiler_feature__compiler.py
index 624c84e002b0..9adf4cb6417a 100644
--- a/www/ungoogled-chromium/files/patch-tools_json__schema__compiler_feature__compiler.py
+++ b/www/ungoogled-chromium/files/patch-tools_json__schema__compiler_feature__compiler.py
@@ -1,6 +1,6 @@
---- tools/json_schema_compiler/feature_compiler.py.orig 2023-06-05 19:39:05 UTC
+--- tools/json_schema_compiler/feature_compiler.py.orig 2024-02-03 15:42:55 UTC
+++ tools/json_schema_compiler/feature_compiler.py
-@@ -272,6 +272,8 @@ FEATURE_GRAMMAR = ({
+@@ -273,6 +273,8 @@ FEATURE_GRAMMAR = ({
'linux': 'Feature::LINUX_PLATFORM',
'mac': 'Feature::MACOSX_PLATFORM',
'win': 'Feature::WIN_PLATFORM',
diff --git a/www/ungoogled-chromium/files/patch-ui_base_resource_resource__bundle.cc b/www/ungoogled-chromium/files/patch-ui_base_resource_resource__bundle.cc
index d65ec663af26..746f9ea3df89 100644
--- a/www/ungoogled-chromium/files/patch-ui_base_resource_resource__bundle.cc
+++ b/www/ungoogled-chromium/files/patch-ui_base_resource_resource__bundle.cc
@@ -1,6 +1,6 @@
---- ui/base/resource/resource_bundle.cc.orig 2023-11-04 07:08:51 UTC
+--- ui/base/resource/resource_bundle.cc.orig 2024-02-03 15:42:55 UTC
+++ ui/base/resource/resource_bundle.cc
-@@ -925,7 +925,7 @@ void ResourceBundle::ReloadFonts() {
+@@ -926,7 +926,7 @@ void ResourceBundle::ReloadFonts() {
}
ResourceScaleFactor ResourceBundle::GetMaxResourceScaleFactor() const {
diff --git a/www/ungoogled-chromium/files/patch-ui_base_ui__base__features.cc b/www/ungoogled-chromium/files/patch-ui_base_ui__base__features.cc
index 26db96fa7a18..362581b75ce8 100644
--- a/www/ungoogled-chromium/files/patch-ui_base_ui__base__features.cc
+++ b/www/ungoogled-chromium/files/patch-ui_base_ui__base__features.cc
@@ -1,6 +1,6 @@
---- ui/base/ui_base_features.cc.orig 2023-12-23 12:33:28 UTC
+--- ui/base/ui_base_features.cc.orig 2024-02-03 15:42:55 UTC
+++ ui/base/ui_base_features.cc
-@@ -224,7 +224,7 @@ BASE_FEATURE(kExperimentalFlingAnimation,
+@@ -217,7 +217,7 @@ BASE_FEATURE(kExperimentalFlingAnimation,
"ExperimentalFlingAnimation",
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
@@ -9,7 +9,7 @@
(BUILDFLAG(IS_LINUX) && !BUILDFLAG(IS_CHROMEOS_ASH) && \
!BUILDFLAG(IS_CHROMEOS_LACROS))
base::FEATURE_ENABLED_BY_DEFAULT
-@@ -337,7 +337,7 @@ bool IsForcedColorsEnabled() {
+@@ -318,7 +318,7 @@ bool IsForcedColorsEnabled() {
BASE_FEATURE(kEyeDropper,
"EyeDropper",
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_MAC) || BUILDFLAG(IS_LINUX) || \
diff --git a/www/ungoogled-chromium/files/patch-ui_base_webui_web__ui__util.cc b/www/ungoogled-chromium/files/patch-ui_base_webui_web__ui__util.cc
index 37d7817d23f5..946aac0abbb0 100644
--- a/www/ungoogled-chromium/files/patch-ui_base_webui_web__ui__util.cc
+++ b/www/ungoogled-chromium/files/patch-ui_base_webui_web__ui__util.cc
@@ -1,6 +1,6 @@
---- ui/base/webui/web_ui_util.cc.orig 2023-12-23 12:33:28 UTC
+--- ui/base/webui/web_ui_util.cc.orig 2024-02-03 15:42:55 UTC
+++ ui/base/webui/web_ui_util.cc
-@@ -38,7 +38,7 @@ namespace {
+@@ -39,7 +39,7 @@ namespace {
constexpr float kMaxScaleFactor = 1000.0f;
std::string GetFontFamilyMd() {
@@ -9,7 +9,7 @@
return "Roboto, " + GetFontFamily();
#else
return GetFontFamily();
-@@ -216,7 +216,7 @@ std::string GetFontFamily() {
+@@ -217,7 +217,7 @@ std::string GetFontFamily() {
// TODO(crbug.com/1052397): Revisit the macro expression once build flag switch
// of lacros-chrome is complete.
diff --git a/www/ungoogled-chromium/files/patch-ui_base_x_x11__cursor__loader.cc b/www/ungoogled-chromium/files/patch-ui_base_x_x11__cursor__loader.cc
index 1c8f2dc331db..812c50683302 100644
--- a/www/ungoogled-chromium/files/patch-ui_base_x_x11__cursor__loader.cc
+++ b/www/ungoogled-chromium/files/patch-ui_base_x_x11__cursor__loader.cc
@@ -1,15 +1,15 @@
---- ui/base/x/x11_cursor_loader.cc.orig 2023-11-04 07:08:51 UTC
+--- ui/base/x/x11_cursor_loader.cc.orig 2024-02-03 15:42:55 UTC
+++ ui/base/x/x11_cursor_loader.cc
-@@ -32,7 +32,7 @@
+@@ -31,7 +31,7 @@
+ #include "ui/gfx/x/connection.h"
#include "ui/gfx/x/xproto.h"
- #include "ui/gfx/x/xproto_util.h"
-#if BUILDFLAG(IS_LINUX)
+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
#include "ui/linux/linux_ui.h"
#endif
-@@ -138,7 +138,11 @@ std::string CursorPathFromLibXcursor() {
+@@ -137,7 +137,11 @@ std::string CursorPathFromLibXcursor() {
void operator()(void* ptr) const { dlclose(ptr); }
};
@@ -21,7 +21,7 @@
if (!lib)
return "";
-@@ -249,7 +253,7 @@ scoped_refptr<base::RefCountedMemory> ReadCursorFile(
+@@ -248,7 +252,7 @@ scoped_refptr<base::RefCountedMemory> ReadCursorFile(
const std::string& rm_xcursor_theme) {
constexpr const char kDefaultTheme[] = "default";
std::string themes[] = {
@@ -30,7 +30,7 @@
// The toolkit theme has the highest priority.
LinuxUi::instance() ? LinuxUi::instance()->GetCursorThemeName()
: std::string(),
-@@ -448,7 +452,7 @@ uint32_t XCursorLoader::GetPreferredCursorSize() const
+@@ -440,7 +444,7 @@ uint32_t XCursorLoader::GetPreferredCursorSize() const
return size;
}
diff --git a/www/ungoogled-chromium/files/patch-ui_base_x_x11__display__manager.cc b/www/ungoogled-chromium/files/patch-ui_base_x_x11__display__manager.cc
index 019c7d2aa431..86bc2c392789 100644
--- a/www/ungoogled-chromium/files/patch-ui_base_x_x11__display__manager.cc
+++ b/www/ungoogled-chromium/files/patch-ui_base_x_x11__display__manager.cc
@@ -1,7 +1,7 @@
---- ui/base/x/x11_display_manager.cc.orig 2023-11-04 07:08:51 UTC
+--- ui/base/x/x11_display_manager.cc.orig 2024-02-03 15:42:55 UTC
+++ ui/base/x/x11_display_manager.cc
@@ -15,7 +15,7 @@
- #include "ui/gfx/x/x11_atom_cache.h"
+ #include "ui/gfx/x/randr.h"
#include "ui/gfx/x/xproto.h"
-#if BUILDFLAG(IS_LINUX)
diff --git a/www/ungoogled-chromium/files/patch-ui_color_color__id.h b/www/ungoogled-chromium/files/patch-ui_color_color__id.h
index 8654d8ef42a8..b440b52215c3 100644
--- a/www/ungoogled-chromium/files/patch-ui_color_color__id.h
+++ b/www/ungoogled-chromium/files/patch-ui_color_color__id.h
@@ -1,6 +1,6 @@
---- ui/color/color_id.h.orig 2023-12-23 12:33:28 UTC
+--- ui/color/color_id.h.orig 2024-02-03 15:42:55 UTC
+++ ui/color/color_id.h
-@@ -606,7 +606,7 @@
+@@ -610,7 +610,7 @@
E_CPONLY(kColorNativeColor6) \
E_CPONLY(kColorNativeBaseColor) \
E_CPONLY(kColorNativeSecondaryColor)
diff --git a/www/ungoogled-chromium/files/patch-ui_compositor_compositor.cc b/www/ungoogled-chromium/files/patch-ui_compositor_compositor.cc
new file mode 100644
index 000000000000..535b07e54837
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-ui_compositor_compositor.cc
@@ -0,0 +1,11 @@
+--- ui/compositor/compositor.cc.orig 2024-02-03 15:42:55 UTC
++++ ui/compositor/compositor.cc
+@@ -893,7 +893,7 @@ void Compositor::OnResume() {
+ obs.ResetIfActive();
+ }
+
+-#if BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ #if BUILDFLAG(OZONE_PLATFORM_X11)
+ void Compositor::OnCompleteSwapWithNewSize(const gfx::Size& size) {
+ for (auto& observer : observer_list_)
diff --git a/www/ungoogled-chromium/files/patch-ui_compositor_compositor.h b/www/ungoogled-chromium/files/patch-ui_compositor_compositor.h
new file mode 100644
index 000000000000..b0f8affe5266
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-ui_compositor_compositor.h
@@ -0,0 +1,20 @@
+--- ui/compositor/compositor.h.orig 2024-02-03 15:42:55 UTC
++++ ui/compositor/compositor.h
+@@ -56,7 +56,7 @@
+ #include "ui/gfx/native_widget_types.h"
+ #include "ui/gfx/overlay_transform.h"
+
+-#if BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ #include "ui/ozone/buildflags.h"
+ #endif
+
+@@ -464,7 +464,7 @@ class COMPOSITOR_EXPORT Compositor : public base::Powe
+ // base::PowerSuspendObserver:
+ void OnResume() override;
+
+-#if BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ #if BUILDFLAG(OZONE_PLATFORM_X11)
+ void OnCompleteSwapWithNewSize(const gfx::Size& size);
+ #endif // BUILDFLAG(OZONE_PLATFORM_X11)
diff --git a/www/ungoogled-chromium/files/patch-ui_compositor_compositor__observer.h b/www/ungoogled-chromium/files/patch-ui_compositor_compositor__observer.h
new file mode 100644
index 000000000000..f1274db937dc
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-ui_compositor_compositor__observer.h
@@ -0,0 +1,20 @@
+--- ui/compositor/compositor_observer.h.orig 2024-02-03 15:42:55 UTC
++++ ui/compositor/compositor_observer.h
+@@ -11,7 +11,7 @@
+ #include "components/viz/common/surfaces/frame_sink_id.h"
+ #include "ui/compositor/compositor_export.h"
+
+-#if BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ #include "ui/ozone/buildflags.h"
+ #endif
+
+@@ -49,7 +49,7 @@ class COMPOSITOR_EXPORT CompositorObserver {
+ // Called when a child of the compositor is resizing.
+ virtual void OnCompositingChildResizing(Compositor* compositor) {}
+
+-#if BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ #if BUILDFLAG(OZONE_PLATFORM_X11)
+ // Called when a swap with new size is completed.
+ virtual void OnCompositingCompleteSwapWithNewSize(ui::Compositor* compositor,
diff --git a/www/ungoogled-chromium/files/patch-ui_display_screen.h b/www/ungoogled-chromium/files/patch-ui_display_screen.h
index ac26e9161ec7..8b01964b928b 100644
--- a/www/ungoogled-chromium/files/patch-ui_display_screen.h
+++ b/www/ungoogled-chromium/files/patch-ui_display_screen.h
@@ -1,4 +1,4 @@
---- ui/display/screen.h.orig 2023-02-11 09:11:04 UTC
+--- ui/display/screen.h.orig 2024-02-03 15:42:55 UTC
+++ ui/display/screen.h
@@ -131,7 +131,7 @@ class DISPLAY_EXPORT Screen {
// (both of which may or may not be `nearest_id`).
@@ -9,7 +9,7 @@
// Object which suspends the platform-specific screensaver for the duration of
// its existence.
class ScreenSaverSuspender {
-@@ -232,7 +232,7 @@ class DISPLAY_EXPORT Screen {
+@@ -231,7 +231,7 @@ class DISPLAY_EXPORT Screen {
int64_t display_id_for_new_windows_;
int64_t scoped_display_id_for_new_windows_ = display::kInvalidDisplayId;
diff --git a/www/ungoogled-chromium/files/patch-ui_events_devices_x11_device__data__manager__x11.cc b/www/ungoogled-chromium/files/patch-ui_events_devices_x11_device__data__manager__x11.cc
index 827cbfc677a2..a55f999e28d6 100644
--- a/www/ungoogled-chromium/files/patch-ui_events_devices_x11_device__data__manager__x11.cc
+++ b/www/ungoogled-chromium/files/patch-ui_events_devices_x11_device__data__manager__x11.cc
@@ -1,6 +1,6 @@
---- ui/events/devices/x11/device_data_manager_x11.cc.orig 2023-07-21 09:49:17 UTC
+--- ui/events/devices/x11/device_data_manager_x11.cc.orig 2024-02-03 15:42:55 UTC
+++ ui/events/devices/x11/device_data_manager_x11.cc
-@@ -876,6 +876,7 @@ void DeviceDataManagerX11::SetDisabledKeyboardAllowedK
+@@ -844,6 +844,7 @@ void DeviceDataManagerX11::SetDisabledKeyboardAllowedK
}
void DeviceDataManagerX11::DisableDevice(x11::Input::DeviceId deviceid) {
diff --git a/www/ungoogled-chromium/files/patch-ui_gfx_BUILD.gn b/www/ungoogled-chromium/files/patch-ui_gfx_BUILD.gn
index 3efb07eb0846..684ee49219e9 100644
--- a/www/ungoogled-chromium/files/patch-ui_gfx_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-ui_gfx_BUILD.gn
@@ -1,6 +1,6 @@
---- ui/gfx/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+--- ui/gfx/BUILD.gn.orig 2024-02-03 15:42:55 UTC
+++ ui/gfx/BUILD.gn
-@@ -664,7 +664,7 @@ source_set("memory_buffer_sources") {
+@@ -667,7 +667,7 @@ source_set("memory_buffer_sources") {
deps += [ "//build/config/linux/libdrm" ]
}
diff --git a/www/ungoogled-chromium/files/patch-ui_gfx_font__fallback__linux.cc b/www/ungoogled-chromium/files/patch-ui_gfx_font__fallback__linux.cc
index 6e4638f8addd..323416925311 100644
--- a/www/ungoogled-chromium/files/patch-ui_gfx_font__fallback__linux.cc
+++ b/www/ungoogled-chromium/files/patch-ui_gfx_font__fallback__linux.cc
@@ -1,6 +1,6 @@
---- ui/gfx/font_fallback_linux.cc.orig 2022-10-01 07:40:07 UTC
+--- ui/gfx/font_fallback_linux.cc.orig 2024-02-03 15:42:55 UTC
+++ ui/gfx/font_fallback_linux.cc
-@@ -26,6 +26,8 @@
+@@ -27,6 +27,8 @@
#include "ui/gfx/linux/fontconfig_util.h"
#include "ui/gfx/platform_font.h"
diff --git a/www/ungoogled-chromium/files/patch-ui_gfx_font__render__params.h b/www/ungoogled-chromium/files/patch-ui_gfx_font__render__params.h
index c20e7cc0fa0d..f43e3384b030 100644
--- a/www/ungoogled-chromium/files/patch-ui_gfx_font__render__params.h
+++ b/www/ungoogled-chromium/files/patch-ui_gfx_font__render__params.h
@@ -1,15 +1,15 @@
---- ui/gfx/font_render_params.h.orig 2022-10-01 07:40:07 UTC
+--- ui/gfx/font_render_params.h.orig 2024-02-03 15:42:55 UTC
+++ ui/gfx/font_render_params.h
-@@ -109,7 +109,7 @@ GFX_EXPORT FontRenderParams GetFontRenderParams(
+@@ -118,7 +118,7 @@ GFX_EXPORT FontRenderParams GetFontRenderParams(
const FontRenderParamsQuery& query,
std::string* family_out);
--#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS)
-+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_BSD)
+-#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_WIN)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_WIN) || BUILDFLAG(IS_BSD)
// Clears GetFontRenderParams()'s cache. Intended to be called by tests that are
// changing Fontconfig's configuration.
GFX_EXPORT void ClearFontRenderParamsCacheForTest();
-@@ -119,7 +119,7 @@ GFX_EXPORT void ClearFontRenderParamsCacheForTest();
+@@ -128,7 +128,7 @@ GFX_EXPORT void ClearFontRenderParamsCacheForTest();
GFX_EXPORT float GetFontRenderParamsDeviceScaleFactor();
#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || \
diff --git a/www/ungoogled-chromium/files/patch-ui_gfx_platform__font__skia.cc b/www/ungoogled-chromium/files/patch-ui_gfx_platform__font__skia.cc
index 76951b235582..369de47fe69d 100644
--- a/www/ungoogled-chromium/files/patch-ui_gfx_platform__font__skia.cc
+++ b/www/ungoogled-chromium/files/patch-ui_gfx_platform__font__skia.cc
@@ -1,6 +1,6 @@
---- ui/gfx/platform_font_skia.cc.orig 2022-10-01 07:40:07 UTC
+--- ui/gfx/platform_font_skia.cc.orig 2024-02-03 15:42:55 UTC
+++ ui/gfx/platform_font_skia.cc
-@@ -28,7 +28,7 @@
+@@ -30,7 +30,7 @@
#include "ui/gfx/system_fonts_win.h"
#endif
@@ -9,7 +9,7 @@
#include "ui/linux/linux_ui.h"
#endif
-@@ -166,7 +166,7 @@ void PlatformFontSkia::EnsuresDefaultFontIsInitialized
+@@ -168,7 +168,7 @@ void PlatformFontSkia::EnsuresDefaultFontIsInitialized
weight = system_font.GetWeight();
#endif // BUILDFLAG(IS_WIN)
diff --git a/www/ungoogled-chromium/files/patch-ui_gfx_x_generated__protos_dri3.cc b/www/ungoogled-chromium/files/patch-ui_gfx_x_generated__protos_dri3.cc
index 256e7991a54c..a990fa707f00 100644
--- a/www/ungoogled-chromium/files/patch-ui_gfx_x_generated__protos_dri3.cc
+++ b/www/ungoogled-chromium/files/patch-ui_gfx_x_generated__protos_dri3.cc
@@ -1,4 +1,4 @@
---- ui/gfx/x/generated_protos/dri3.cc.orig 2023-12-23 12:33:28 UTC
+--- ui/gfx/x/generated_protos/dri3.cc.orig 2024-02-03 15:42:55 UTC
+++ ui/gfx/x/generated_protos/dri3.cc
@@ -27,6 +27,8 @@
#include <xcb/xcb.h>
@@ -8,4 +8,4 @@
+
#include "base/logging.h"
#include "base/posix/eintr_wrapper.h"
- #include "ui/gfx/x/xproto_internal.h"
+ #include "ui/gfx/x/connection.h"
diff --git a/www/ungoogled-chromium/files/patch-ui_gfx_x_generated__protos_shm.cc b/www/ungoogled-chromium/files/patch-ui_gfx_x_generated__protos_shm.cc
index 23e9250c94f2..fdaa00397c25 100644
--- a/www/ungoogled-chromium/files/patch-ui_gfx_x_generated__protos_shm.cc
+++ b/www/ungoogled-chromium/files/patch-ui_gfx_x_generated__protos_shm.cc
@@ -1,4 +1,4 @@
---- ui/gfx/x/generated_protos/shm.cc.orig 2023-12-23 12:33:28 UTC
+--- ui/gfx/x/generated_protos/shm.cc.orig 2024-02-03 15:42:55 UTC
+++ ui/gfx/x/generated_protos/shm.cc
@@ -27,6 +27,8 @@
#include <xcb/xcb.h>
@@ -8,4 +8,4 @@
+
#include "base/logging.h"
#include "base/posix/eintr_wrapper.h"
- #include "ui/gfx/x/xproto_internal.h"
+ #include "ui/gfx/x/connection.h"
diff --git a/www/ungoogled-chromium/files/patch-ui_gl_BUILD.gn b/www/ungoogled-chromium/files/patch-ui_gl_BUILD.gn
index 51b1d5a791d5..9b7f2bd67f9b 100644
--- a/www/ungoogled-chromium/files/patch-ui_gl_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-ui_gl_BUILD.gn
@@ -1,6 +1,6 @@
---- ui/gl/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+--- ui/gl/BUILD.gn.orig 2024-02-03 15:42:55 UTC
+++ ui/gl/BUILD.gn
-@@ -151,8 +151,6 @@ component("gl") {
+@@ -154,8 +154,6 @@ component("gl") {
defines += [ "GPU_ENABLE_SERVICE_LOGGING" ]
}
@@ -9,7 +9,7 @@
all_dependent_configs = [ ":gl_config" ]
public_configs = [ "//third_party/khronos:khronos_headers" ]
-@@ -169,7 +167,6 @@ component("gl") {
+@@ -172,7 +170,6 @@ component("gl") {
]
public_deps = [
"//base",
@@ -17,7 +17,7 @@
"//ui/events/platform",
"//ui/gfx",
"//ui/gfx/geometry",
-@@ -377,7 +374,6 @@ component("gl") {
+@@ -380,7 +377,6 @@ component("gl") {
data_deps += [
"//third_party/angle:libEGL",
"//third_party/angle:libGLESv2",
@@ -25,7 +25,7 @@
]
if (enable_swiftshader) {
data_deps += [
-@@ -582,7 +578,6 @@ test("gl_unittests") {
+@@ -584,7 +580,6 @@ test("gl_unittests") {
data_deps = [
"//testing/buildbot/filters:gl_unittests_filters",
diff --git a/www/ungoogled-chromium/files/patch-ui_gl_gl__context.cc b/www/ungoogled-chromium/files/patch-ui_gl_gl__context.cc
index 6e4275291b8a..76b32a6a2ec4 100644
--- a/www/ungoogled-chromium/files/patch-ui_gl_gl__context.cc
+++ b/www/ungoogled-chromium/files/patch-ui_gl_gl__context.cc
@@ -1,6 +1,6 @@
---- ui/gl/gl_context.cc.orig 2023-12-23 12:33:28 UTC
+--- ui/gl/gl_context.cc.orig 2024-02-03 15:42:55 UTC
+++ ui/gl/gl_context.cc
-@@ -486,7 +486,7 @@ bool GLContext::MakeVirtuallyCurrent(
+@@ -495,7 +495,7 @@ bool GLContext::MakeVirtuallyCurrent(
DCHECK(virtual_context->IsCurrent(surface));
if (switched_real_contexts || virtual_context != current_virtual_context_) {
diff --git a/www/ungoogled-chromium/files/patch-ui_gl_gl__switches.cc b/www/ungoogled-chromium/files/patch-ui_gl_gl__switches.cc
index 7f7c539ba433..dc994bc17f47 100644
--- a/www/ungoogled-chromium/files/patch-ui_gl_gl__switches.cc
+++ b/www/ungoogled-chromium/files/patch-ui_gl_gl__switches.cc
@@ -1,4 +1,4 @@
---- ui/gl/gl_switches.cc.orig 2023-10-13 13:20:35 UTC
+--- ui/gl/gl_switches.cc.orig 2024-02-03 15:42:55 UTC
+++ ui/gl/gl_switches.cc
@@ -11,7 +11,7 @@
#include "base/android/build_info.h"
@@ -9,7 +9,7 @@
#include <vulkan/vulkan_core.h>
#include "third_party/angle/src/gpu_info_util/SystemInfo.h" // nogncheck
#endif
-@@ -294,7 +294,7 @@ bool IsDefaultANGLEVulkan() {
+@@ -307,7 +307,7 @@ bool IsDefaultANGLEVulkan() {
base::android::SDK_VERSION_Q)
return false;
#endif // BUILDFLAG(IS_ANDROID)
diff --git a/www/ungoogled-chromium/files/patch-ui_native__theme_native__theme__features.cc b/www/ungoogled-chromium/files/patch-ui_native__theme_native__theme__features.cc
new file mode 100644
index 000000000000..9d0c28673525
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-ui_native__theme_native__theme__features.cc
@@ -0,0 +1,20 @@
+--- ui/native_theme/native_theme_features.cc.orig 2024-02-03 15:42:55 UTC
++++ ui/native_theme/native_theme_features.cc
+@@ -54,7 +54,7 @@ bool IsOverlayScrollbarEnabled() {
+
+ bool IsFluentScrollbarEnabled() {
+ // Fluent scrollbars are only used for some OSes due to UI design guidelines.
+-#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ return base::FeatureList::IsEnabled(features::kFluentScrollbar) ||
+ IsFluentOverlayScrollbarEnabled();
+ #else
+@@ -63,7 +63,7 @@ bool IsFluentScrollbarEnabled() {
+ }
+ bool IsFluentOverlayScrollbarEnabled() {
+ // Fluent scrollbars are only used for some OSes due to UI design guidelines.
+-#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_WIN) || BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ return base::FeatureList::IsEnabled(features::kFluentOverlayScrollbar);
+ #else
+ return false;
diff --git a/www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_common_wayland__util.cc b/www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_common_wayland__util.cc
new file mode 100644
index 000000000000..2986de3d0401
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_common_wayland__util.cc
@@ -0,0 +1,11 @@
+--- ui/ozone/platform/wayland/common/wayland_util.cc.orig 2024-02-03 15:42:55 UTC
++++ ui/ozone/platform/wayland/common/wayland_util.cc
+@@ -335,7 +335,7 @@ void TransformToWlArray(
+ }
+
+ base::TimeTicks EventMillisecondsToTimeTicks(uint32_t milliseconds) {
+-#if BUILDFLAG(IS_LINUX)
++#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
+ // TODO(crbug.com/1499638): `milliseconds` comes from Weston that
+ // uses timestamp from libinput, which is different from TimeTicks.
+ // Use EventTimeForNow(), for now.
diff --git a/www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_host_wayland__toplevel__window.cc b/www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_host_wayland__toplevel__window.cc
index 8ef45fc42bc3..22bb5ceab193 100644
--- a/www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_host_wayland__toplevel__window.cc
+++ b/www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_host_wayland__toplevel__window.cc
@@ -1,6 +1,6 @@
---- ui/ozone/platform/wayland/host/wayland_toplevel_window.cc.orig 2023-11-04 07:08:51 UTC
+--- ui/ozone/platform/wayland/host/wayland_toplevel_window.cc.orig 2024-02-03 15:42:55 UTC
+++ ui/ozone/platform/wayland/host/wayland_toplevel_window.cc
-@@ -570,7 +570,7 @@ void WaylandToplevelWindow::HandleAuraToplevelConfigur
+@@ -573,7 +573,7 @@ void WaylandToplevelWindow::HandleAuraToplevelConfigur
const bool did_active_change = is_active_ != window_states.is_activated;
is_active_ = window_states.is_activated;
diff --git a/www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_host_wayland__toplevel__window.h b/www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_host_wayland__toplevel__window.h
index 5067d8cf2c90..343c5fe64957 100644
--- a/www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_host_wayland__toplevel__window.h
+++ b/www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_host_wayland__toplevel__window.h
@@ -1,6 +1,6 @@
---- ui/ozone/platform/wayland/host/wayland_toplevel_window.h.orig 2023-11-04 07:08:51 UTC
+--- ui/ozone/platform/wayland/host/wayland_toplevel_window.h.orig 2024-02-03 15:42:55 UTC
+++ ui/ozone/platform/wayland/host/wayland_toplevel_window.h
-@@ -256,7 +256,7 @@ class WaylandToplevelWindow : public WaylandWindow,
+@@ -259,7 +259,7 @@ class WaylandToplevelWindow : public WaylandWindow,
// The display ID to switch to in case the state is `kFullscreen`.
int64_t fullscreen_display_id_ = display::kInvalidDisplayId;
diff --git a/www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_host_wayland__window.cc b/www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_host_wayland__window.cc
index 15ba510e6d56..3eed4c6aff8f 100644
--- a/www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_host_wayland__window.cc
+++ b/www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_host_wayland__window.cc
@@ -1,6 +1,6 @@
---- ui/ozone/platform/wayland/host/wayland_window.cc.orig 2023-10-13 13:20:35 UTC
+--- ui/ozone/platform/wayland/host/wayland_window.cc.orig 2024-02-03 15:42:55 UTC
+++ ui/ozone/platform/wayland/host/wayland_window.cc
-@@ -233,7 +233,7 @@ void WaylandWindow::OnPointerFocusChanged(bool focused
+@@ -234,7 +234,7 @@ void WaylandWindow::OnPointerFocusChanged(bool focused
// Whenever the window gets the pointer focus back, the cursor shape must be
// updated. Otherwise, it is invalidated upon wl_pointer::leave and is not
// restored by the Wayland compositor.
@@ -9,7 +9,7 @@
if (focused && async_cursor_) {
async_cursor_->AddCursorLoadedCallback(
base::BindOnce(&WaylandWindow::OnCursorLoaded,
-@@ -490,7 +490,7 @@ bool WaylandWindow::ShouldUseNativeFrame() const {
+@@ -491,7 +491,7 @@ bool WaylandWindow::ShouldUseNativeFrame() const {
void WaylandWindow::SetCursor(scoped_refptr<PlatformCursor> platform_cursor) {
DCHECK(platform_cursor);
@@ -18,7 +18,7 @@
auto async_cursor = WaylandAsyncCursor::FromPlatformCursor(platform_cursor);
if (async_cursor_ == async_cursor) {
-@@ -661,7 +661,7 @@ std::string WaylandWindow::WindowStates::ToString() co
+@@ -662,7 +662,7 @@ std::string WaylandWindow::WindowStates::ToString() co
} else {
base::TrimString(states, " ", &states);
}
@@ -27,7 +27,7 @@
states += "; tiled_edges: ";
std::string tiled = "";
if (tiled_edges.left) {
-@@ -1111,12 +1111,12 @@ void WaylandWindow::UpdateCursorShape(scoped_refptr<Bi
+@@ -1117,12 +1117,12 @@ void WaylandWindow::UpdateCursorShape(scoped_refptr<Bi
cursor->bitmaps(), hotspot_in_dips,
std::ceil(cursor->cursor_image_scale_factor()));
}
diff --git a/www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_host_xdg__toplevel__wrapper__impl.cc b/www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_host_xdg__toplevel__wrapper__impl.cc
index a0626f9ca5b0..999357094905 100644
--- a/www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_host_xdg__toplevel__wrapper__impl.cc
+++ b/www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_host_xdg__toplevel__wrapper__impl.cc
@@ -1,6 +1,6 @@
---- ui/ozone/platform/wayland/host/xdg_toplevel_wrapper_impl.cc.orig 2023-11-04 07:08:51 UTC
+--- ui/ozone/platform/wayland/host/xdg_toplevel_wrapper_impl.cc.orig 2024-02-03 15:42:55 UTC
+++ ui/ozone/platform/wayland/host/xdg_toplevel_wrapper_impl.cc
-@@ -310,7 +310,7 @@ void XDGToplevelWrapperImpl::OnToplevelConfigure(void*
+@@ -322,7 +322,7 @@ void XDGToplevelWrapperImpl::OnToplevelConfigure(void*
CheckIfWlArrayHasValue(states, XDG_TOPLEVEL_STATE_ACTIVATED),
};
diff --git a/www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_host_zwp__text__input__wrapper__v1.cc b/www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_host_zwp__text__input__wrapper__v1.cc
new file mode 100644
index 000000000000..3adfb0722c3b
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_host_zwp__text__input__wrapper__v1.cc
@@ -0,0 +1,21 @@
+--- ui/ozone/platform/wayland/host/zwp_text_input_wrapper_v1.cc.orig 2024-02-03 15:42:55 UTC
++++ ui/ozone/platform/wayland/host/zwp_text_input_wrapper_v1.cc
+@@ -226,6 +226,10 @@ void ZWPTextInputWrapperV1::SetSurroundingText(
+ // so if it exceeds 16 bits, it may be broken.
+ static constexpr size_t kSizeLimit = 60000;
+ if (HasAdvancedSurroundingTextSupport() && text.length() > kSizeLimit) {
++#if defined(__FreeBSD_version) && __FreeBSD_version < 1300048
++ PLOG(ERROR) << "memfd is not supported";
++ return;
++#else
+ base::ScopedFD memfd(memfd_create("surrounding_text", MFD_CLOEXEC));
+ if (!memfd.get()) {
+ PLOG(ERROR) << "Failed to create memfd";
+@@ -238,6 +242,7 @@ void ZWPTextInputWrapperV1::SetSurroundingText(
+ zcr_extended_text_input_v1_set_large_surrounding_text(
+ extended_obj_.get(), memfd.get(), text.length(),
+ selection_range.start(), selection_range.end());
++#endif
+ } else {
+ zwp_text_input_v1_set_surrounding_text(obj_.get(), text.c_str(),
+ selection_range.start(),
diff --git a/www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_ozone__platform__wayland.cc b/www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_ozone__platform__wayland.cc
index ded8c423a305..5d2e58627508 100644
--- a/www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_ozone__platform__wayland.cc
+++ b/www/ungoogled-chromium/files/patch-ui_ozone_platform_wayland_ozone__platform__wayland.cc
@@ -1,4 +1,4 @@
---- ui/ozone/platform/wayland/ozone_platform_wayland.cc.orig 2023-11-04 07:08:51 UTC
+--- ui/ozone/platform/wayland/ozone_platform_wayland.cc.orig 2024-02-03 15:42:55 UTC
+++ ui/ozone/platform/wayland/ozone_platform_wayland.cc
@@ -66,13 +66,13 @@
#include "ui/events/ozone/layout/stub/stub_keyboard_layout_engine.h"
@@ -34,7 +34,7 @@
linux_ui_delegate_ =
std::make_unique<LinuxUiDelegateWayland>(connection_.get());
#endif
-@@ -507,7 +507,7 @@ class OzonePlatformWayland : public OzonePlatform,
+@@ -509,7 +509,7 @@ class OzonePlatformWayland : public OzonePlatform,
DrmRenderNodePathFinder path_finder_;
#endif
diff --git a/www/ungoogled-chromium/files/patch-ui_ozone_public_platform__screen.h b/www/ungoogled-chromium/files/patch-ui_ozone_public_platform__screen.h
deleted file mode 100644
index 701c997fc7bc..000000000000
--- a/www/ungoogled-chromium/files/patch-ui_ozone_public_platform__screen.h
+++ /dev/null
@@ -1,11 +0,0 @@
---- ui/ozone/public/platform_screen.h.orig 2023-11-04 07:08:51 UTC
-+++ ui/ozone/public/platform_screen.h
-@@ -15,7 +15,7 @@
- #include "ui/gfx/gpu_extra_info.h"
- #include "ui/gfx/native_widget_types.h"
-
--#if BUILDFLAG(IS_LINUX)
-+#if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_BSD)
- #include "ui/linux/linux_ui.h"
- #endif
-
diff --git a/www/ungoogled-chromium/files/patch-ui_qt_BUILD.gn b/www/ungoogled-chromium/files/patch-ui_qt_BUILD.gn
new file mode 100644
index 000000000000..a260ac425831
--- /dev/null
+++ b/www/ungoogled-chromium/files/patch-ui_qt_BUILD.gn
@@ -0,0 +1,11 @@
+--- ui/qt/BUILD.gn.orig 2024-02-03 15:42:55 UTC
++++ ui/qt/BUILD.gn
+@@ -20,7 +20,7 @@ config("qt_internal_config") {
+
+ # It's OK to depend on the system libstdc++ since it's a dependency of QT, so
+ # it will get loaded into the process anyway.
+- libs = [ "stdc++" ]
++ # libs = [ "stdc++" ]
+
+ configs = [
+ "//build/config/linux:runtime_library",
diff --git a/www/ungoogled-chromium/files/patch-ui_views_controls_textfield_textfield.cc b/www/ungoogled-chromium/files/patch-ui_views_controls_textfield_textfield.cc
index fa06c1e18ee8..1f195ef852e6 100644
--- a/www/ungoogled-chromium/files/patch-ui_views_controls_textfield_textfield.cc
+++ b/www/ungoogled-chromium/files/patch-ui_views_controls_textfield_textfield.cc
@@ -1,6 +1,6 @@
---- ui/views/controls/textfield/textfield.cc.orig 2024-01-06 08:40:52 UTC
+--- ui/views/controls/textfield/textfield.cc.orig 2024-02-03 15:42:55 UTC
+++ ui/views/controls/textfield/textfield.cc
-@@ -81,7 +81,7 @@
+@@ -84,7 +84,7 @@
#include "base/win/win_util.h"
#endif
@@ -9,7 +9,7 @@
#include "ui/base/ime/linux/text_edit_command_auralinux.h"
#include "ui/base/ime/text_input_flags.h"
#include "ui/linux/linux_ui.h"
-@@ -176,7 +176,7 @@ bool IsControlKeyModifier(int flags) {
+@@ -183,7 +183,7 @@ bool IsControlKeyModifier(int flags) {
// Control-modified key combination, but we cannot extend it to other platforms
// as Control has different meanings and behaviors.
// https://crrev.com/2580483002/#msg46
@@ -18,7 +18,7 @@
return flags & ui::EF_CONTROL_DOWN;
#else
return false;
-@@ -747,7 +747,7 @@ bool Textfield::OnKeyPressed(const ui::KeyEvent& event
+@@ -755,7 +755,7 @@ bool Textfield::OnKeyPressed(const ui::KeyEvent& event
if (!textfield)
return handled;
@@ -27,7 +27,7 @@
auto* linux_ui = ui::LinuxUi::instance();
std::vector<ui::TextEditCommandAuraLinux> commands;
if (!handled && linux_ui &&
-@@ -930,7 +930,7 @@ void Textfield::AboutToRequestFocusFromTabTraversal(bo
+@@ -938,7 +938,7 @@ void Textfield::AboutToRequestFocusFromTabTraversal(bo
}
bool Textfield::SkipDefaultKeyEventProcessing(const ui::KeyEvent& event) {
@@ -36,7 +36,7 @@
// Skip any accelerator handling that conflicts with custom keybindings.
auto* linux_ui = ui::LinuxUi::instance();
std::vector<ui::TextEditCommandAuraLinux> commands;
-@@ -1941,7 +1941,7 @@ bool Textfield::ShouldDoLearning() {
+@@ -1980,7 +1980,7 @@ bool Textfield::ShouldDoLearning() {
return false;
}
@@ -45,7 +45,7 @@
// TODO(https://crbug.com/952355): Implement this method to support Korean IME
// reconversion feature on native text fields (e.g. find bar).
bool Textfield::SetCompositionFromExistingText(
-@@ -2437,14 +2437,14 @@ ui::TextEditCommand Textfield::GetCommandForKeyEvent(
+@@ -2476,14 +2476,14 @@ ui::TextEditCommand Textfield::GetCommandForKeyEvent(
#endif
return ui::TextEditCommand::DELETE_BACKWARD;
}
diff --git a/www/ungoogled-chromium/files/patch-ui_views_controls_textfield_textfield.h b/www/ungoogled-chromium/files/patch-ui_views_controls_textfield_textfield.h
index 971d1c35c045..8f506adcee4c 100644
--- a/www/ungoogled-chromium/files/patch-ui_views_controls_textfield_textfield.h
+++ b/www/ungoogled-chromium/files/patch-ui_views_controls_textfield_textfield.h
@@ -1,6 +1,6 @@
---- ui/views/controls/textfield/textfield.h.orig 2023-10-13 13:20:35 UTC
+--- ui/views/controls/textfield/textfield.h.orig 2024-02-03 15:42:55 UTC
+++ ui/views/controls/textfield/textfield.h
-@@ -44,7 +44,7 @@
+@@ -46,7 +46,7 @@
#include "ui/views/view.h"
#include "ui/views/word_lookup_client.h"
@@ -9,7 +9,7 @@
#include <vector>
#endif
-@@ -452,7 +452,7 @@ class VIEWS_EXPORT Textfield : public View,
+@@ -454,7 +454,7 @@ class VIEWS_EXPORT Textfield : public View,
// Set whether the text should be used to improve typing suggestions.
void SetShouldDoLearning(bool value) { should_do_learning_ = value; }
diff --git a/www/ungoogled-chromium/files/patch-ui_views_views__delegate.cc b/www/ungoogled-chromium/files/patch-ui_views_views__delegate.cc
index 3e8af15e20a2..65ca7fa333eb 100644
--- a/www/ungoogled-chromium/files/patch-ui_views_views__delegate.cc
+++ b/www/ungoogled-chromium/files/patch-ui_views_views__delegate.cc
@@ -1,6 +1,6 @@
---- ui/views/views_delegate.cc.orig 2022-10-01 07:40:07 UTC
+--- ui/views/views_delegate.cc.orig 2024-02-03 15:42:55 UTC
+++ ui/views/views_delegate.cc
-@@ -95,7 +95,7 @@ bool ViewsDelegate::IsWindowInMetro(gfx::NativeWindow
+@@ -88,7 +88,7 @@ bool ViewsDelegate::IsWindowInMetro(gfx::NativeWindow
return false;
}
#elif BUILDFLAG(ENABLE_DESKTOP_AURA) && \
diff --git a/www/ungoogled-chromium/files/patch-ui_views_views__delegate.h b/www/ungoogled-chromium/files/patch-ui_views_views__delegate.h
index 18829b81bb5b..de6fdaf8c24a 100644
--- a/www/ungoogled-chromium/files/patch-ui_views_views__delegate.h
+++ b/www/ungoogled-chromium/files/patch-ui_views_views__delegate.h
@@ -1,6 +1,6 @@
---- ui/views/views_delegate.h.orig 2022-10-01 07:40:07 UTC
+--- ui/views/views_delegate.h.orig 2024-02-03 15:42:55 UTC
+++ ui/views/views_delegate.h
-@@ -139,7 +139,7 @@ class VIEWS_EXPORT ViewsDelegate {
+@@ -138,7 +138,7 @@ class VIEWS_EXPORT ViewsDelegate {
// environment.
virtual bool IsWindowInMetro(gfx::NativeWindow window) const;
#elif BUILDFLAG(ENABLE_DESKTOP_AURA) && \
diff --git a/www/ungoogled-chromium/files/patch-ui_views_widget_desktop__aura_desktop__window__tree__host__platform.cc b/www/ungoogled-chromium/files/patch-ui_views_widget_desktop__aura_desktop__window__tree__host__platform.cc
index a44f32e59157..c244fd59ccc1 100644
--- a/www/ungoogled-chromium/files/patch-ui_views_widget_desktop__aura_desktop__window__tree__host__platform.cc
+++ b/www/ungoogled-chromium/files/patch-ui_views_widget_desktop__aura_desktop__window__tree__host__platform.cc
@@ -1,4 +1,4 @@
---- ui/views/widget/desktop_aura/desktop_window_tree_host_platform.cc.orig 2023-11-04 07:08:51 UTC
+--- ui/views/widget/desktop_aura/desktop_window_tree_host_platform.cc.orig 2024-02-03 15:42:55 UTC
+++ ui/views/widget/desktop_aura/desktop_window_tree_host_platform.cc
@@ -44,7 +44,7 @@
#include "ui/wm/core/window_util.h"
@@ -27,7 +27,7 @@
std::make_unique<DesktopDragDropClientOzoneLinux>(window(), drag_handler);
#else
std::make_unique<DesktopDragDropClientOzone>(window(), drag_handler);
-@@ -1084,7 +1084,7 @@ bool DesktopWindowTreeHostPlatform::RotateFocusForWidg
+@@ -1096,7 +1096,7 @@ bool DesktopWindowTreeHostPlatform::RotateFocusForWidg
// DesktopWindowTreeHost:
// Linux subclasses this host and adds some Linux specific bits.
diff --git a/www/ungoogled-chromium/files/patch-ui_views_widget_widget.cc b/www/ungoogled-chromium/files/patch-ui_views_widget_widget.cc
index d481fe04d903..c627207c64d1 100644
--- a/www/ungoogled-chromium/files/patch-ui_views_widget_widget.cc
+++ b/www/ungoogled-chromium/files/patch-ui_views_widget_widget.cc
@@ -1,4 +1,4 @@
---- ui/views/widget/widget.cc.orig 2023-12-23 12:33:28 UTC
+--- ui/views/widget/widget.cc.orig 2024-02-03 15:42:55 UTC
+++ ui/views/widget/widget.cc
@@ -54,7 +54,7 @@
#include "ui/views/window/custom_frame_view.h"
@@ -9,7 +9,7 @@
#include "ui/linux/linux_ui.h"
#endif
-@@ -2068,7 +2068,7 @@ const ui::NativeTheme* Widget::GetNativeTheme() const
+@@ -2103,7 +2103,7 @@ const ui::NativeTheme* Widget::GetNativeTheme() const
if (parent_)
return parent_->GetNativeTheme();
diff --git a/www/ungoogled-chromium/files/patch-ui_views_widget_widget.h b/www/ungoogled-chromium/files/patch-ui_views_widget_widget.h
index b7a2dcb44ee6..6d82a0df6649 100644
--- a/www/ungoogled-chromium/files/patch-ui_views_widget_widget.h
+++ b/www/ungoogled-chromium/files/patch-ui_views_widget_widget.h
@@ -1,6 +1,6 @@
---- ui/views/widget/widget.h.orig 2023-11-04 07:08:51 UTC
+--- ui/views/widget/widget.h.orig 2024-02-03 15:42:55 UTC
+++ ui/views/widget/widget.h
-@@ -417,7 +417,7 @@ class VIEWS_EXPORT Widget : public internal::NativeWid
+@@ -423,7 +423,7 @@ class VIEWS_EXPORT Widget : public internal::NativeWid
// If set, the widget was created in headless mode.
bool headless_mode = false;
diff --git a/www/ungoogled-chromium/files/patch-v8_BUILD.gn b/www/ungoogled-chromium/files/patch-v8_BUILD.gn
index bf064b2fdce7..e09ada4ad0f5 100644
--- a/www/ungoogled-chromium/files/patch-v8_BUILD.gn
+++ b/www/ungoogled-chromium/files/patch-v8_BUILD.gn
@@ -1,6 +1,6 @@
---- v8/BUILD.gn.orig 2023-12-23 12:33:28 UTC
+--- v8/BUILD.gn.orig 2024-02-03 15:42:55 UTC
+++ v8/BUILD.gn
-@@ -1425,6 +1425,14 @@ config("toolchain") {
+@@ -1441,6 +1441,14 @@ config("toolchain") {
} else if (target_os == "chromeos") {
defines += [ "V8_HAVE_TARGET_OS" ]
defines += [ "V8_TARGET_OS_CHROMEOS" ]
@@ -15,7 +15,7 @@
}
# TODO(infra): Support v8_enable_prof on Windows.
-@@ -2388,6 +2396,12 @@ template("run_mksnapshot") {
+@@ -2404,6 +2412,12 @@ template("run_mksnapshot") {
if (!v8_enable_builtins_profiling && v8_enable_builtins_reordering) {
args += [ "--reorder-builtins" ]
}
@@ -28,7 +28,7 @@
}
# This is needed to distinguish between generating code for the simulator
-@@ -6248,7 +6262,7 @@ v8_component("v8_libbase") {
+@@ -6324,7 +6338,7 @@ v8_component("v8_libbase") {
}
}
@@ -37,7 +37,7 @@
sources += [
"src/base/debug/stack_trace_posix.cc",
"src/base/platform/platform-linux.cc",
-@@ -6259,6 +6273,18 @@ v8_component("v8_libbase") {
+@@ -6335,6 +6349,18 @@ v8_component("v8_libbase") {
"dl",
"rt",
]
diff --git a/www/ungoogled-chromium/files/patch-v8_src_api_api.cc b/www/ungoogled-chromium/files/patch-v8_src_api_api.cc
index 014cd7d19c25..7e2960920adf 100644
--- a/www/ungoogled-chromium/files/patch-v8_src_api_api.cc
+++ b/www/ungoogled-chromium/files/patch-v8_src_api_api.cc
@@ -1,4 +1,4 @@
---- v8/src/api/api.cc.orig 2023-12-23 12:33:28 UTC
+--- v8/src/api/api.cc.orig 2024-02-03 15:42:55 UTC
+++ v8/src/api/api.cc
@@ -141,7 +141,7 @@
#include "src/wasm/wasm-serialization.h"
@@ -9,7 +9,7 @@
#include <signal.h>
#include <unistd.h>
-@@ -6356,7 +6356,7 @@ bool v8::V8::Initialize(const int build_config) {
+@@ -6394,7 +6394,7 @@ bool v8::V8::Initialize(const int build_config) {
return true;
}
diff --git a/www/ungoogled-chromium/files/patch-v8_src_baseline_x64_baseline-assembler-x64-inl.h b/www/ungoogled-chromium/files/patch-v8_src_baseline_x64_baseline-assembler-x64-inl.h
deleted file mode 100644
index 0474871deca0..000000000000
--- a/www/ungoogled-chromium/files/patch-v8_src_baseline_x64_baseline-assembler-x64-inl.h
+++ /dev/null
@@ -1,13 +0,0 @@
---- v8/src/baseline/x64/baseline-assembler-x64-inl.h.orig 2023-11-04 07:08:51 UTC
-+++ v8/src/baseline/x64/baseline-assembler-x64-inl.h
-@@ -76,9 +76,7 @@ MemOperand BaselineAssembler::FeedbackCellOperand() {
-
- void BaselineAssembler::Bind(Label* label) { __ bind(label); }
-
--void BaselineAssembler::JumpTarget() {
-- // NOP on x64.
--}
-+void BaselineAssembler::JumpTarget() { __ endbr64(); }
-
- void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
- __ jmp(target, distance);
diff --git a/www/ungoogled-chromium/files/patch-v8_src_builtins_x64_builtins-x64.cc b/www/ungoogled-chromium/files/patch-v8_src_builtins_x64_builtins-x64.cc
deleted file mode 100644
index f327aba22943..000000000000
--- a/www/ungoogled-chromium/files/patch-v8_src_builtins_x64_builtins-x64.cc
+++ /dev/null
@@ -1,46 +0,0 @@
---- v8/src/builtins/x64/builtins-x64.cc.orig 2023-12-23 12:33:28 UTC
-+++ v8/src/builtins/x64/builtins-x64.cc
-@@ -44,6 +44,8 @@ namespace internal {
- #define __ ACCESS_MASM(masm)
-
- void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
-+ __ CodeEntry();
-+
- __ LoadAddress(kJavaScriptCallExtraArg1Register,
- ExternalReference::Create(address));
- __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
-@@ -430,7 +432,7 @@ void Generate_JSEntryVariant(MacroAssembler* masm, Sta
- // Jump to a faked try block that does the invoke, with a faked catch
- // block that sets the pending exception.
- __ jmp(&invoke);
-- __ bind(&handler_entry);
-+ __ BindExceptionHandler(&handler_entry);
-
- // Store the current pc as the handler offset. It's used later to create the
- // handler table.
-@@ -3339,6 +3341,9 @@ void SwitchBackAndReturnPromise(MacroAssembler* masm,
- void GenerateExceptionHandlingLandingPad(MacroAssembler* masm,
- Label* return_promise) {
- int catch_handler = __ pc_offset();
-+
-+ __ endbr64();
-+
- // Restore rsp to free the reserved stack slots for the sections.
- __ leaq(rsp, MemOperand(rbp, StackSwitchFrameConstants::kLastSpillOffset));
-
-@@ -3696,6 +3701,7 @@ void Builtins::Generate_WasmSuspend(MacroAssembler* ma
- LoadJumpBuffer(masm, jmpbuf, true);
- __ Trap();
- __ bind(&resume);
-+ __ endbr64();
- __ LeaveFrame(StackFrame::STACK_SWITCH);
- __ ret(0);
- }
-@@ -3828,6 +3834,7 @@ void Generate_WasmResumeHelper(MacroAssembler* masm, w
- }
- __ Trap();
- __ bind(&suspend);
-+ __ endbr64();
- __ LeaveFrame(StackFrame::STACK_SWITCH);
- // Pop receiver + parameter.
- __ ret(2 * kSystemPointerSize);
diff --git a/www/ungoogled-chromium/files/patch-v8_src_codegen_x64_assembler-x64.cc b/www/ungoogled-chromium/files/patch-v8_src_codegen_x64_assembler-x64.cc
deleted file mode 100644
index 2ea5490d8765..000000000000
--- a/www/ungoogled-chromium/files/patch-v8_src_codegen_x64_assembler-x64.cc
+++ /dev/null
@@ -1,42 +0,0 @@
---- v8/src/codegen/x64/assembler-x64.cc.orig 2023-09-17 07:59:53 UTC
-+++ v8/src/codegen/x64/assembler-x64.cc
-@@ -1316,6 +1316,14 @@ void Assembler::hlt() {
- emit(0xF4);
- }
-
-+void Assembler::endbr64() {
-+ EnsureSpace ensure_space(this);
-+ emit(0xF3);
-+ emit(0x0f);
-+ emit(0x1e);
-+ emit(0xfa);
-+}
-+
- void Assembler::emit_idiv(Register src, int size) {
- EnsureSpace ensure_space(this);
- emit_rex(src, size);
-@@ -1583,16 +1591,22 @@ void Assembler::jmp(Handle<Code> target, RelocInfo::Mo
- emitl(code_target_index);
- }
-
--void Assembler::jmp(Register target) {
-+void Assembler::jmp(Register target, bool notrack) {
- EnsureSpace ensure_space(this);
-+ if (notrack) {
-+ emit(0x3e);
-+ }
- // Opcode FF/4 r64.
- emit_optional_rex_32(target);
- emit(0xFF);
- emit_modrm(0x4, target);
- }
-
--void Assembler::jmp(Operand src) {
-+void Assembler::jmp(Operand src, bool notrack) {
- EnsureSpace ensure_space(this);
-+ if (notrack) {
-+ emit(0x3e);
-+ }
- // Opcode FF/4 m64.
- emit_optional_rex_32(src);
- emit(0xFF);
diff --git a/www/ungoogled-chromium/files/patch-v8_src_codegen_x64_assembler-x64.h b/www/ungoogled-chromium/files/patch-v8_src_codegen_x64_assembler-x64.h
deleted file mode 100644
index fba881372347..000000000000
--- a/www/ungoogled-chromium/files/patch-v8_src_codegen_x64_assembler-x64.h
+++ /dev/null
@@ -1,21 +0,0 @@
---- v8/src/codegen/x64/assembler-x64.h.orig 2023-11-04 07:08:51 UTC
-+++ v8/src/codegen/x64/assembler-x64.h
-@@ -860,6 +860,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBa
- void ret(int imm16);
- void ud2();
- void setcc(Condition cc, Register reg);
-+ void endbr64();
-
- void pblendw(XMMRegister dst, Operand src, uint8_t mask);
- void pblendw(XMMRegister dst, XMMRegister src, uint8_t mask);
-@@ -918,8 +919,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBa
- void jmp(Handle<Code> target, RelocInfo::Mode rmode);
-
- // Jump near absolute indirect (r64)
-- void jmp(Register adr);
-- void jmp(Operand src);
-+ void jmp(Register adr, bool notrack = false);
-+ void jmp(Operand src, bool notrack = false);
-
- // Unconditional jump relative to the current address. Low-level routine,
- // use with caution!
diff --git a/www/ungoogled-chromium/files/patch-v8_src_codegen_x64_macro-assembler-x64.cc b/www/ungoogled-chromium/files/patch-v8_src_codegen_x64_macro-assembler-x64.cc
deleted file mode 100644
index 0389b3197f8c..000000000000
--- a/www/ungoogled-chromium/files/patch-v8_src_codegen_x64_macro-assembler-x64.cc
+++ /dev/null
@@ -1,20 +0,0 @@
---- v8/src/codegen/x64/macro-assembler-x64.cc.orig 2023-12-23 12:33:28 UTC
-+++ v8/src/codegen/x64/macro-assembler-x64.cc
-@@ -51,6 +51,8 @@ Operand StackArgumentsAccessor::GetArgumentOperand(int
- return Operand(rsp, kPCOnStackSize + index * kSystemPointerSize);
- }
-
-+void MacroAssembler::CodeEntry() { endbr64(); }
-+
- void MacroAssembler::Load(Register destination, ExternalReference source) {
- if (root_array_available_ && options().enable_root_relative_access) {
- intptr_t delta = RootRegisterOffsetForExternalReference(isolate(), source);
-@@ -2144,7 +2146,7 @@ void MacroAssembler::Switch(Register scratch, Register
- cmpq(reg, Immediate(num_labels));
- j(above_equal, &fallthrough);
- leaq(table, MemOperand(&jump_table));
-- jmp(MemOperand(table, reg, times_8, 0));
-+ jmp(MemOperand(table, reg, times_8, 0), /*notrack=*/true);
- // Emit the jump table inline, under the assumption that it's not too big.
- Align(kSystemPointerSize);
- bind(&jump_table);
diff --git a/www/ungoogled-chromium/files/patch-v8_src_codegen_x64_macro-assembler-x64.h b/www/ungoogled-chromium/files/patch-v8_src_codegen_x64_macro-assembler-x64.h
deleted file mode 100644
index 9243cc4a7f92..000000000000
--- a/www/ungoogled-chromium/files/patch-v8_src_codegen_x64_macro-assembler-x64.h
+++ /dev/null
@@ -1,20 +0,0 @@
---- v8/src/codegen/x64/macro-assembler-x64.h.orig 2023-11-04 07:08:51 UTC
-+++ v8/src/codegen/x64/macro-assembler-x64.h
-@@ -655,11 +655,14 @@ class V8_EXPORT_PRIVATE MacroAssembler
-
- // Define a function entrypoint. This doesn't emit any code for this
- // architecture, as control-flow integrity is not supported for it.
-- void CodeEntry() {}
-+ void CodeEntry();
- // Define an exception handler.
-- void ExceptionHandler() {}
-+ void ExceptionHandler() { CodeEntry(); }
- // Define an exception handler and bind a label.
-- void BindExceptionHandler(Label* label) { bind(label); }
-+ void BindExceptionHandler(Label* label) {
-+ bind(label);
-+ CodeEntry();
-+ }
-
- // ---------------------------------------------------------------------------
- // Pointer compression support
diff --git a/www/ungoogled-chromium/files/patch-v8_src_compiler_backend_x64_code-generator-x64.cc b/www/ungoogled-chromium/files/patch-v8_src_compiler_backend_x64_code-generator-x64.cc
deleted file mode 100644
index 7d822e389c42..000000000000
--- a/www/ungoogled-chromium/files/patch-v8_src_compiler_backend_x64_code-generator-x64.cc
+++ /dev/null
@@ -1,16 +0,0 @@
---- v8/src/compiler/backend/x64/code-generator-x64.cc.orig 2023-12-23 12:33:28 UTC
-+++ v8/src/compiler/backend/x64/code-generator-x64.cc
-@@ -6850,11 +6850,11 @@ void CodeGenerator::AssembleArchTableSwitch(Instructio
- // target = table + (target - table)
- __ addq(input, kScratchRegister);
- // Jump to the target.
-- __ jmp(input);
-+ __ jmp(input, /*notrack=*/true);
- } else {
- // For non builtins, the value in the table is 'target_address' (8 bytes)
- // jmp [table + index*8]
-- __ jmp(Operand(kScratchRegister, input, times_8, 0));
-+ __ jmp(Operand(kScratchRegister, input, times_8, 0), /*notrack=*/true);
- }
- }
-
diff --git a/www/ungoogled-chromium/files/patch-v8_src_deoptimizer_x64_deoptimizer-x64.cc b/www/ungoogled-chromium/files/patch-v8_src_deoptimizer_x64_deoptimizer-x64.cc
deleted file mode 100644
index 02fad132444e..000000000000
--- a/www/ungoogled-chromium/files/patch-v8_src_deoptimizer_x64_deoptimizer-x64.cc
+++ /dev/null
@@ -1,11 +0,0 @@
---- v8/src/deoptimizer/x64/deoptimizer-x64.cc.orig 2023-07-21 09:49:17 UTC
-+++ v8/src/deoptimizer/x64/deoptimizer-x64.cc
-@@ -21,7 +21,7 @@ ASSERT_OFFSET(Builtin::kDeoptimizationEntry_Lazy);
- #undef ASSERT_OFFSET
-
- const int Deoptimizer::kEagerDeoptExitSize = 4;
--const int Deoptimizer::kLazyDeoptExitSize = 4;
-+const int Deoptimizer::kLazyDeoptExitSize = 8;
-
- Float32 RegisterValues::GetFloatRegister(unsigned n) const {
- return Float32::FromBits(
diff --git a/www/ungoogled-chromium/files/patch-v8_src_execution_isolate.cc b/www/ungoogled-chromium/files/patch-v8_src_execution_isolate.cc
index 6504d9d7b9b4..cc245a0f24f2 100644
--- a/www/ungoogled-chromium/files/patch-v8_src_execution_isolate.cc
+++ b/www/ungoogled-chromium/files/patch-v8_src_execution_isolate.cc
@@ -1,6 +1,6 @@
---- v8/src/execution/isolate.cc.orig 2023-12-23 12:33:28 UTC
+--- v8/src/execution/isolate.cc.orig 2024-02-03 15:42:55 UTC
+++ v8/src/execution/isolate.cc
-@@ -147,6 +147,10 @@
+@@ -148,6 +148,10 @@
#include "src/execution/simulator-base.h"
#endif
@@ -11,7 +11,7 @@
extern "C" const uint8_t v8_Default_embedded_blob_code_[];
extern "C" uint32_t v8_Default_embedded_blob_code_size_;
extern "C" const uint8_t v8_Default_embedded_blob_data_[];
-@@ -4190,6 +4194,11 @@ void Isolate::InitializeDefaultEmbeddedBlob() {
+@@ -4154,6 +4158,11 @@ void Isolate::InitializeDefaultEmbeddedBlob() {
uint32_t code_size = DefaultEmbeddedBlobCodeSize();
const uint8_t* data = DefaultEmbeddedBlobData();
uint32_t data_size = DefaultEmbeddedBlobDataSize();
diff --git a/www/ungoogled-chromium/files/patch-v8_src_flags_flags.cc b/www/ungoogled-chromium/files/patch-v8_src_flags_flags.cc
index 0563285ad4a6..0d04614804b2 100644
--- a/www/ungoogled-chromium/files/patch-v8_src_flags_flags.cc
+++ b/www/ungoogled-chromium/files/patch-v8_src_flags_flags.cc
@@ -1,4 +1,4 @@
---- v8/src/flags/flags.cc.orig 2023-11-04 07:08:51 UTC
+--- v8/src/flags/flags.cc.orig 2024-02-03 15:42:55 UTC
+++ v8/src/flags/flags.cc
@@ -13,6 +13,10 @@
#include <set>
@@ -23,7 +23,7 @@
// {v8_flags} needs to be aligned to a memory page, and the size needs to be a
// multiple of a page size. This is required for memory-protection of the memory
-@@ -905,6 +913,10 @@ void FlagList::FreezeFlags() {
+@@ -912,6 +920,10 @@ void FlagList::FreezeFlags() {
// Note that for string flags we only protect the pointer itself, but not the
// string storage. TODO(12887): Fix this.
base::OS::SetDataReadOnly(&v8_flags, sizeof(v8_flags));
diff --git a/www/ungoogled-chromium/files/patch-v8_src_maglev_x64_maglev-assembler-x64-inl.h b/www/ungoogled-chromium/files/patch-v8_src_maglev_x64_maglev-assembler-x64-inl.h
deleted file mode 100644
index dd699baba2f2..000000000000
--- a/www/ungoogled-chromium/files/patch-v8_src_maglev_x64_maglev-assembler-x64-inl.h
+++ /dev/null
@@ -1,14 +0,0 @@
---- v8/src/maglev/x64/maglev-assembler-x64-inl.h.orig 2023-12-23 12:33:28 UTC
-+++ v8/src/maglev/x64/maglev-assembler-x64-inl.h
-@@ -232,7 +232,10 @@ void MaglevAssembler::PushReverse(T... vals) {
- detail::PushAllHelper<T...>::PushReverse(this, vals...);
- }
-
--inline void MaglevAssembler::BindJumpTarget(Label* label) { bind(label); }
-+inline void MaglevAssembler::BindJumpTarget(Label* label) {
-+ bind(label);
-+ endbr64();
-+}
-
- inline void MaglevAssembler::BindBlock(BasicBlock* block) {
- bind(block->label());
diff --git a/www/ungoogled-chromium/files/patch-v8_src_maglev_x64_maglev-assembler-x64.cc b/www/ungoogled-chromium/files/patch-v8_src_maglev_x64_maglev-assembler-x64.cc
deleted file mode 100644
index 79dac0642ae6..000000000000
--- a/www/ungoogled-chromium/files/patch-v8_src_maglev_x64_maglev-assembler-x64.cc
+++ /dev/null
@@ -1,16 +0,0 @@
---- v8/src/maglev/x64/maglev-assembler-x64.cc.orig 2023-09-17 07:59:53 UTC
-+++ v8/src/maglev/x64/maglev-assembler-x64.cc
-@@ -433,10 +433,12 @@ void MaglevAssembler::OSRPrologue(Graph* graph) {
- void MaglevAssembler::Prologue(Graph* graph) {
- DCHECK(!graph->is_osr());
-
-+ CodeEntry();
-+
- BailoutIfDeoptimized(rbx);
-
- if (graph->has_recursive_calls()) {
-- bind(code_gen_state()->entry_label());
-+ BindJumpTarget(code_gen_state()->entry_label());
- }
-
- // Tiering support.
diff --git a/www/ungoogled-chromium/files/patch-v8_src_regexp_x64_regexp-macro-assembler-x64.cc b/www/ungoogled-chromium/files/patch-v8_src_regexp_x64_regexp-macro-assembler-x64.cc
deleted file mode 100644
index 09b8c6bd5532..000000000000
--- a/www/ungoogled-chromium/files/patch-v8_src_regexp_x64_regexp-macro-assembler-x64.cc
+++ /dev/null
@@ -1,22 +0,0 @@
---- v8/src/regexp/x64/regexp-macro-assembler-x64.cc.orig 2023-07-21 09:49:17 UTC
-+++ v8/src/regexp/x64/regexp-macro-assembler-x64.cc
-@@ -110,6 +110,7 @@ RegExpMacroAssemblerX64::RegExpMacroAssemblerX64(Isola
- backtrack_label_(),
- exit_label_() {
- DCHECK_EQ(0, registers_to_save % 2);
-+ __ CodeEntry();
- __ jmp(&entry_label_); // We'll write the entry code when we know more.
- __ bind(&start_label_); // And then continue from here.
- }
-@@ -714,6 +715,11 @@ bool RegExpMacroAssemblerX64::CheckSpecialClassRanges(
- // Match any character.
- return true;
- }
-+}
-+
-+void RegExpMacroAssemblerX64::BindJumpTarget(Label* label) {
-+ Bind(label);
-+ __ CodeEntry();
- }
-
- void RegExpMacroAssemblerX64::Fail() {
diff --git a/www/ungoogled-chromium/files/patch-v8_src_regexp_x64_regexp-macro-assembler-x64.h b/www/ungoogled-chromium/files/patch-v8_src_regexp_x64_regexp-macro-assembler-x64.h
deleted file mode 100644
index d7c0e58e6051..000000000000
--- a/www/ungoogled-chromium/files/patch-v8_src_regexp_x64_regexp-macro-assembler-x64.h
+++ /dev/null
@@ -1,12 +0,0 @@
---- v8/src/regexp/x64/regexp-macro-assembler-x64.h.orig 2023-07-21 09:49:17 UTC
-+++ v8/src/regexp/x64/regexp-macro-assembler-x64.h
-@@ -59,6 +59,9 @@ class V8_EXPORT_PRIVATE RegExpMacroAssemblerX64
- void CheckPosition(int cp_offset, Label* on_outside_input) override;
- bool CheckSpecialClassRanges(StandardCharacterSet type,
- Label* on_no_match) override;
-+
-+ void BindJumpTarget(Label* label) override;
-+
- void Fail() override;
- Handle<HeapObject> GetCode(Handle<String> source) override;
- void GoTo(Label* label) override;
diff --git a/www/ungoogled-chromium/files/patch-v8_src_wasm_jump-table-assembler.cc b/www/ungoogled-chromium/files/patch-v8_src_wasm_jump-table-assembler.cc
deleted file mode 100644
index 2ca6edf1af90..000000000000
--- a/www/ungoogled-chromium/files/patch-v8_src_wasm_jump-table-assembler.cc
+++ /dev/null
@@ -1,51 +0,0 @@
---- v8/src/wasm/jump-table-assembler.cc.orig 2023-11-04 07:08:51 UTC
-+++ v8/src/wasm/jump-table-assembler.cc
-@@ -74,15 +74,21 @@ void JumpTableAssembler::InitializeJumpsToLazyCompileT
- #if V8_TARGET_ARCH_X64
- void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
- Address lazy_compile_target) {
-+ intptr_t displacement =
-+ static_cast<intptr_t>(reinterpret_cast<uint8_t*>(lazy_compile_target) -
-+ (pc_ + 9) - kNearJmpInstrSize);
-+ if (!is_int32(displacement)) return;
-+ CodeEntry(); // 4 bytes
- // Use a push, because mov to an extended register takes 6 bytes.
-- pushq_imm32(func_index); // 5 bytes
-- EmitJumpSlot(lazy_compile_target); // 5 bytes
-+ pushq_imm32(func_index); // 5 bytes
-+ near_jmp(displacement, RelocInfo::NO_INFO); // 5 bytes
- }
-
- bool JumpTableAssembler::EmitJumpSlot(Address target) {
- intptr_t displacement = static_cast<intptr_t>(
-- reinterpret_cast<uint8_t*>(target) - pc_ - kNearJmpInstrSize);
-+ reinterpret_cast<uint8_t*>(target) - (pc_ + 4) - kNearJmpInstrSize);
- if (!is_int32(displacement)) return false;
-+ CodeEntry(); // 4 bytes
- near_jmp(displacement, RelocInfo::NO_INFO); // 5 bytes
- return true;
- }
-@@ -90,11 +96,12 @@ bool JumpTableAssembler::EmitJumpSlot(Address target)
- void JumpTableAssembler::EmitFarJumpSlot(Address target) {
- Label data;
- int start_offset = pc_offset();
-+ CodeEntry(); // 4 bytes
- jmp(Operand(&data)); // 6 bytes
-- Nop(2); // 2 bytes
-+ Nop(6); // 6 bytes
- // The data must be properly aligned, so it can be patched atomically (see
- // {PatchFarJumpSlot}).
-- DCHECK_EQ(start_offset + kSystemPointerSize, pc_offset());
-+ DCHECK_EQ(start_offset + kFarJumpTableSlotOffset, pc_offset());
- USE(start_offset);
- bind(&data);
- dq(target); // 8 bytes
-@@ -105,7 +112,7 @@ void JumpTableAssembler::PatchFarJumpSlot(Address slot
- // The slot needs to be pointer-size aligned so we can atomically update it.
- DCHECK(IsAligned(slot, kSystemPointerSize));
- // Offset of the target is at 8 bytes, see {EmitFarJumpSlot}.
-- reinterpret_cast<std::atomic<Address>*>(slot + kSystemPointerSize)
-+ reinterpret_cast<std::atomic<Address>*>(slot + kFarJumpTableSlotOffset)
- ->store(target, std::memory_order_relaxed);
- // The update is atomic because the address is properly aligned.
- // Because of cache coherence, the data update will eventually be seen by all
diff --git a/www/ungoogled-chromium/files/patch-v8_src_wasm_jump-table-assembler.h b/www/ungoogled-chromium/files/patch-v8_src_wasm_jump-table-assembler.h
deleted file mode 100644
index 5ed729e879a4..000000000000
--- a/www/ungoogled-chromium/files/patch-v8_src_wasm_jump-table-assembler.h
+++ /dev/null
@@ -1,16 +0,0 @@
---- v8/src/wasm/jump-table-assembler.h.orig 2023-11-04 07:08:51 UTC
-+++ v8/src/wasm/jump-table-assembler.h
-@@ -174,9 +174,10 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public Ma
- // boundaries. The jump table line size has been chosen to satisfy this.
- #if V8_TARGET_ARCH_X64
- static constexpr int kJumpTableLineSize = 64;
-- static constexpr int kJumpTableSlotSize = 5;
-- static constexpr int kFarJumpTableSlotSize = 16;
-- static constexpr int kLazyCompileTableSlotSize = 10;
-+ static constexpr int kJumpTableSlotSize = 5 + 4;
-+ static constexpr int kFarJumpTableSlotOffset = 2 * kSystemPointerSize;
-+ static constexpr int kFarJumpTableSlotSize = 16 + 8;
-+ static constexpr int kLazyCompileTableSlotSize = 10 + 4;
- #elif V8_TARGET_ARCH_IA32
- static constexpr int kJumpTableLineSize = 64;
- static constexpr int kJumpTableSlotSize = 5;
diff --git a/www/ungoogled-chromium/pkg-plist b/www/ungoogled-chromium/pkg-plist
index 25256b942fa6..946b24aa0f0c 100644
--- a/www/ungoogled-chromium/pkg-plist
+++ b/www/ungoogled-chromium/pkg-plist
@@ -132,6 +132,7 @@ bin/ungoogled-chromium
%%DATADIR%%/resources/inspector_overlay/main.js
%%DATADIR%%/snapshot_blob.bin
%%NOT_AARCH64%%%%DATADIR%%/libvk_swiftshader.so
+%%NOT_AARCH64%%%%DATADIR%%/vk_swiftshader_icd.json
%%DATADIR%%/v8_context_snapshot.bin
%%DEBUG%%%%DATADIR%%/character_data_generator
%%DEBUG%%%%DATADIR%%/libVkLayer_khronos_validation.so