From bcc9700ee9717524853cd256e1c6d2dc7cd4bc82 Mon Sep 17 00:00:00 2001 From: Pedro Boechat Date: Sun, 12 Nov 2023 22:27:49 +0000 Subject: [PATCH] First iteration of the Android port. --- CMakeLists.txt | 2 +- FastCG/CMakeLists.txt | 15 +- FastCG/assets/shaders/ImGui.frag | 6 - FastCG/assets/shaders/ImGui.vert | 6 - FastCG/assets/shaders/Lighting.glsl | 4 +- FastCG/assets/shaders/Noise.glsl | 6 +- FastCG/assets/shaders/Shadow.glsl | 2 +- .../shaders/deferred/BumpedDiffuse.frag | 6 - .../shaders/deferred/BumpedDiffuse.vert | 6 - .../shaders/deferred/BumpedSpecular.frag | 6 - .../shaders/deferred/BumpedSpecular.vert | 6 - FastCG/assets/shaders/deferred/Diffuse.frag | 6 - FastCG/assets/shaders/deferred/Diffuse.vert | 6 - .../deferred/DirectionalLightPass.frag | 6 - .../deferred/DirectionalLightPass.vert | 10 +- FastCG/assets/shaders/deferred/FastCG.glsl | 4 +- .../shaders/deferred/PointLightPass.frag | 6 - .../shaders/deferred/PointLightPass.vert | 6 - .../assets/shaders/deferred/SolidColor.frag | 6 - .../assets/shaders/deferred/SolidColor.vert | 6 - FastCG/assets/shaders/deferred/Specular.frag | 6 - FastCG/assets/shaders/deferred/Specular.vert | 6 - .../assets/shaders/deferred/StencilPass.frag | 2 - .../assets/shaders/deferred/StencilPass.vert | 6 - .../assets/shaders/forward/BumpedDiffuse.frag | 6 - .../assets/shaders/forward/BumpedDiffuse.vert | 6 - .../shaders/forward/BumpedSpecular.frag | 6 - .../shaders/forward/BumpedSpecular.vert | 8 +- FastCG/assets/shaders/forward/Diffuse.frag | 6 - FastCG/assets/shaders/forward/Diffuse.vert | 8 +- FastCG/assets/shaders/forward/SolidColor.frag | 6 - FastCG/assets/shaders/forward/SolidColor.vert | 8 +- FastCG/assets/shaders/forward/Specular.frag | 6 - FastCG/assets/shaders/forward/Specular.vert | 8 +- FastCG/assets/shaders/pcss/PCSS.glsl | 16 +- FastCG/assets/shaders/pcss/ShadowMapPass.frag | 2 - FastCG/assets/shaders/pcss/ShadowMapPass.vert | 6 - FastCG/assets/shaders/ssao/SSAOBlurPass.frag | 14 +- FastCG/assets/shaders/ssao/SSAOBlurPass.vert | 6 +- .../shaders/ssao/SSAOHighFrequencyPass.frag | 24 +- .../shaders/ssao/SSAOHighFrequencyPass.vert | 12 +- .../assets/shaders/tonemap/TonemapPass.frag | 6 - .../assets/shaders/tonemap/TonemapPass.vert | 6 +- FastCG/include/FastCG/Assets/AssetSystem.inc | 12 +- FastCG/include/FastCG/Core/Log.h | 30 + FastCG/include/FastCG/Core/Macros.h | 28 +- FastCG/include/FastCG/Core/MsgBox.h | 3 + .../FastCG/Graphics/BaseGraphicsContext.h | 2 +- .../FastCG/Graphics/BaseGraphicsSystem.h | 4 + .../FastCG/Graphics/BaseGraphicsSystem.inc | 2 +- .../include/FastCG/Graphics/OpenGL/OpenGL.h | 9 + .../FastCG/Graphics/OpenGL/OpenGLExceptions.h | 60 +- .../Graphics/OpenGL/OpenGLGraphicsContext.h | 2 +- .../Graphics/OpenGL/OpenGLGraphicsSystem.h | 20 +- .../FastCG/Graphics/OpenGL/OpenGLUtils.h | 14 + .../include/FastCG/Graphics/RenderingPath.h | 10 +- FastCG/include/FastCG/Graphics/ShaderSource.h | 6 +- .../include/FastCG/Graphics/ShaderSource.inc | 24 +- .../include/FastCG/Graphics/Vulkan/Vulkan.h | 10 + .../FastCG/Graphics/Vulkan/VulkanBuffer.h | 4 + .../Graphics/Vulkan/VulkanGraphicsContext.h | 2 +- .../Graphics/Vulkan/VulkanGraphicsSystem.h | 44 +- .../FastCG/Graphics/Vulkan/VulkanTexture.h | 8 +- .../FastCG/Graphics/Vulkan/VulkanUtils.h | 40 +- .../Platform/Android/AndroidApplication.h | 67 + FastCG/include/FastCG/Platform/Application.h | 6 + .../include/FastCG/Platform/BaseApplication.h | 18 + FastCG/include/FastCG/Platform/Directory.inc | 10 +- FastCG/include/FastCG/Platform/File.inc | 4 +- FastCG/include/FastCG/Platform/FileWriter.inc | 2 +- FastCG/include/FastCG/Platform/Thread.h | 4 +- FastCG/include/FastCG/Platform/Timer.h | 8 +- .../Platform/Windows/WindowsApplication.h | 4 +- .../include/FastCG/Reflection/Inspectable.h | 6 +- .../FastCG/Rendering/BaseWorldRenderer.inc | 4 +- .../include/FastCG/Rendering/IWorldRenderer.h | 2 + FastCG/src/Assets/AssetSystem.cpp | 24 +- FastCG/src/Graphics/OpenGL/OpenGLBuffer.cpp | 7 + .../Graphics/OpenGL/OpenGLGraphicsContext.cpp | 30 +- .../Graphics/OpenGL/OpenGLGraphicsSystem.cpp | 223 +- FastCG/src/Graphics/OpenGL/OpenGLShader.cpp | 149 +- FastCG/src/Graphics/OpenGL/OpenGLTexture.cpp | 7 +- FastCG/src/Graphics/ShaderImporter.cpp | 14 +- FastCG/src/Graphics/Vulkan/VulkanBuffer.cpp | 2 +- .../Graphics/Vulkan/VulkanGraphicsContext.cpp | 48 +- .../Graphics/Vulkan/VulkanGraphicsSystem.cpp | 393 +- FastCG/src/Graphics/Vulkan/VulkanShader.cpp | 4 +- FastCG/src/Graphics/Vulkan/VulkanTexture.cpp | 9 +- .../Platform/Android/AndroidApplication.cpp | 314 + FastCG/src/Platform/BaseApplication.cpp | 114 +- FastCG/src/Platform/Linux/X11Application.cpp | 5 +- .../Platform/Windows/WindowsApplication.cpp | 91 +- .../src/Rendering/DeferredWorldRenderer.cpp | 2 +- FastCG/src/Rendering/ForwardWorldRenderer.cpp | 2 +- .../Rendering/MaterialDefinitionImporter.cpp | 6 +- FastCG/src/Rendering/RenderingSystem.cpp | 4 +- FastCG/src/World/WorldSystem.cpp | 18 +- cmake/fastcg_glsl_processor.cmake | 25 + cmake/fastcg_setup.cmake | 71 +- cmake/fastcg_targets.cmake | 188 +- cmake/fastcg_template_engine.cmake | 25 + dependencies/CMakeLists.txt | 7 +- dependencies/SPIRV-Cross/CMakeLists.txt | 2 +- dependencies/SPIRV-Cross/README.md | 2 +- .../VulkanMemoryAllocator/CMakeLists.txt | 2 +- .../include/vk_mem_alloc.h | 8279 +++++++++-------- .../android_native_app_glue/CMakeLists.txt | 16 + dependencies/glew/CMakeLists.txt | 2 +- dependencies/glm/CMakeLists.txt | 2 +- dependencies/imgui/CMakeLists.txt | 2 +- dependencies/messagebox-x11/CMakeLists.txt | 2 +- dependencies/rapidjson/CMakeLists.txt | 2 +- dependencies/stb/CMakeLists.txt | 2 +- dependencies/tinyobj_loader_c/CMakeLists.txt | 2 +- disable_cross_compiling.bat | 3 + disable_cross_compiling.sh | 3 + enable_android_cross_compiling.bat | 9 + enable_android_cross_compiling.sh | 9 + .../gradle/project/app/build.gradle.template | 39 + .../app/src/main/AndroidManifest.xml.template | 18 + .../main/java/com/fastcg/MainActivity.java | 154 + .../src/main/res/values/strings.xml.template | 3 + resources/Android/gradle/project/build.gradle | 22 + .../Android/gradle/project/gradle.properties | 2 + .../project/gradle/wrapper/gradle-wrapper.jar | Bin 0 -> 63721 bytes .../gradle/wrapper/gradle-wrapper.properties | 7 + resources/Android/gradle/project/gradlew | 249 + resources/Android/gradle/project/gradlew.bat | 92 + .../gradle/project/settings.gradle.template | 2 + samples/CMakeLists.txt | 2 +- samples/bump_mapping/CMakeLists.txt | 2 +- .../src/BumpMappingApplication.cpp | 4 +- samples/bump_mapping/src/main.cpp | 7 - samples/deferred_rendering/CMakeLists.txt | 2 +- .../src/DeferredRenderingApplication.cpp | 2 + samples/deferred_rendering/src/main.cpp | 7 - samples/pcss/CMakeLists.txt | 2 +- samples/pcss/src/PCSSApplication.cpp | 4 +- samples/pcss/src/main.cpp | 7 - samples/ssao/CMakeLists.txt | 2 +- samples/ssao/src/SSAOApplication.cpp | 4 +- samples/ssao/src/main.cpp | 7 - 142 files changed, 6721 insertions(+), 4810 deletions(-) create mode 100644 FastCG/include/FastCG/Core/Log.h create mode 100644 FastCG/include/FastCG/Platform/Android/AndroidApplication.h create mode 100644 FastCG/src/Platform/Android/AndroidApplication.cpp create mode 100644 cmake/fastcg_glsl_processor.cmake create mode 100644 cmake/fastcg_template_engine.cmake create mode 100644 dependencies/android_native_app_glue/CMakeLists.txt create mode 100644 disable_cross_compiling.bat create mode 100644 disable_cross_compiling.sh create mode 100644 enable_android_cross_compiling.bat create mode 100644 enable_android_cross_compiling.sh create mode 100644 resources/Android/gradle/project/app/build.gradle.template create mode 100644 resources/Android/gradle/project/app/src/main/AndroidManifest.xml.template create mode 100644 resources/Android/gradle/project/app/src/main/java/com/fastcg/MainActivity.java create mode 100644 resources/Android/gradle/project/app/src/main/res/values/strings.xml.template create mode 100644 resources/Android/gradle/project/build.gradle create mode 100644 resources/Android/gradle/project/gradle.properties create mode 100644 resources/Android/gradle/project/gradle/wrapper/gradle-wrapper.jar create mode 100644 resources/Android/gradle/project/gradle/wrapper/gradle-wrapper.properties create mode 100644 resources/Android/gradle/project/gradlew create mode 100644 resources/Android/gradle/project/gradlew.bat create mode 100644 resources/Android/gradle/project/settings.gradle.template delete mode 100644 samples/bump_mapping/src/main.cpp delete mode 100644 samples/deferred_rendering/src/main.cpp delete mode 100644 samples/pcss/src/main.cpp delete mode 100644 samples/ssao/src/main.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 964824b4..fdf325a4 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.8) +cmake_minimum_required(VERSION 3.20) project(FastCG) diff --git a/FastCG/CMakeLists.txt b/FastCG/CMakeLists.txt index a6587c2c..8f0e3744 100644 --- a/FastCG/CMakeLists.txt +++ b/FastCG/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.8) +cmake_minimum_required(VERSION 3.20) if(FASTCG_GRAPHICS_SYSTEM STREQUAL "Vulkan") find_package(Vulkan REQUIRED) @@ -40,12 +40,19 @@ set(SHADERS ${FORWARD_SHADERS} ${DEFERRED_SHADERS} ${GENERAL_SHADERS}) if(FASTCG_PLATFORM STREQUAL "Linux") set(PLATFORM_INCLUDE_DIRS ${messagebox_x11_INCLUDE_DIRS} ${X11_INCLUDE} ${X11_Xrender_INCLUDE_PATH}) set(PLATFORM_LIBRARIES ${messagebox_x11_LIBRARIES} ${X11_LIBRARIES} ${X11_Xrender_LIB}) +elseif(FASTCG_PLATFORM STREQUAL "Android") + set(PLATFORM_INCLUDE_DIRS ${android_native_app_glue_INCLUDE_DIRS}) + set(PLATFORM_LIBRARIES android log android_native_app_glue) endif() if(FASTCG_GRAPHICS_SYSTEM STREQUAL "OpenGL") - set(GRAPHICS_SYSTEM_INCLUDE_DIRS ${glew_INCLUDE_DIRS}) - set(GRAPHICS_SYSTEM_LIBRARIES ${glew_LIBRARIES}) - set(GRAPHICS_SYSTEM_DEFINES -DGLEW_STATIC) + if(FASTCG_PLATFORM STREQUAL "Android") + set(GRAPHICS_SYSTEM_LIBRARIES EGL GLESv3) + else() + set(GRAPHICS_SYSTEM_INCLUDE_DIRS ${glew_INCLUDE_DIRS}) + set(GRAPHICS_SYSTEM_LIBRARIES ${glew_LIBRARIES}) + set(GRAPHICS_SYSTEM_DEFINES -DGLEW_STATIC) + endif() elseif(FASTCG_GRAPHICS_SYSTEM STREQUAL "Vulkan") set(GRAPHICS_SYSTEM_INCLUDE_DIRS ${Vulkan_INCLUDE_DIRS} ${VulkanMemoryAllocator_INCLUDE_DIRS}) set(GRAPHICS_SYSTEM_LIBRARIES ${Vulkan_LIBRARIES} ${VulkanMemoryAllocator_LIBRARIES} spirv-cross-core) diff --git a/FastCG/assets/shaders/ImGui.frag b/FastCG/assets/shaders/ImGui.frag index 9555d194..3847356f 100644 --- a/FastCG/assets/shaders/ImGui.frag +++ b/FastCG/assets/shaders/ImGui.frag @@ -1,9 +1,3 @@ -#version 430 core - -#ifdef ENABLE_INCLUDE_EXTENSION_DIRECTIVE -#extension GL_GOOGLE_include_directive : enable -#endif - #include "FastCG.glsl" layout(BINDING_0_1) uniform sampler2D uColorMap; diff --git a/FastCG/assets/shaders/ImGui.vert b/FastCG/assets/shaders/ImGui.vert index e2edba1f..1d311af2 100644 --- a/FastCG/assets/shaders/ImGui.vert +++ b/FastCG/assets/shaders/ImGui.vert @@ -1,9 +1,3 @@ -#version 430 core - -#ifdef ENABLE_INCLUDE_EXTENSION_DIRECTIVE -#extension GL_GOOGLE_include_directive : enable -#endif - #include "FastCG.glsl" #include "ImGui.glsl" diff --git a/FastCG/assets/shaders/Lighting.glsl b/FastCG/assets/shaders/Lighting.glsl index 58ba09de..6c2e1e4f 100644 --- a/FastCG/assets/shaders/Lighting.glsl +++ b/FastCG/assets/shaders/Lighting.glsl @@ -51,7 +51,7 @@ vec4 Phong(vec4 diffuse, vec4 specular, float shininess, vec3 lightDirection, ve vec4 diffuseContribution = uLight0DiffuseColor * uLight0Intensity * diffuse * diffuseAttenuation; vec3 reflectionDirection = normalize(reflect(-lightDirection, normal)); - float specularAttenuation = max(pow(max(dot(reflectionDirection, viewerDirection), 0.0), shininess), 0); + float specularAttenuation = max(pow(max(dot(reflectionDirection, viewerDirection), 0.0), shininess), 0.0); vec4 specularContribution = uLight0SpecularColor * uLight0Intensity * specular * specularAttenuation; return DistanceAttenuation(worldPosition) * (uAmbientColor + diffuseContribution + specularContribution) * GetShadow(uPCSSData, uShadowMap, worldPosition) * GetAmbientOcclusion(uAmbientOcclusionMap, screenCoords); @@ -71,7 +71,7 @@ vec4 BlinnPhong(vec4 diffuse, vec4 specular, float shininess, vec3 lightDirectio vec4 diffuseContribution = uLight0DiffuseColor * uLight0Intensity * diffuse * diffuseAttenuation; vec3 halfwayVector = normalize(lightDirection + viewerDirection); - float specularAttenuation = max(pow(max(dot(halfwayVector, normal), 0.0), shininess), 0); + float specularAttenuation = max(pow(max(dot(halfwayVector, normal), 0.0), shininess), 0.0); vec4 specularContribution = uLight0SpecularColor * uLight0Intensity * specular * specularAttenuation; return DistanceAttenuation(worldPosition) * (uAmbientColor + diffuseContribution + specularContribution) * GetShadow(uPCSSData, uShadowMap, worldPosition) * GetAmbientOcclusion(uAmbientOcclusionMap, screenCoords); diff --git a/FastCG/assets/shaders/Noise.glsl b/FastCG/assets/shaders/Noise.glsl index a6ac7c05..fb828b6f 100644 --- a/FastCG/assets/shaders/Noise.glsl +++ b/FastCG/assets/shaders/Noise.glsl @@ -1,7 +1,7 @@ #ifndef FASTCG_NOISE_GLSL #define FASTCG_NOISE_GLSL -const vec2 poissonDisk[16] = { +const vec2 poissonDisk[16] = vec2[]( vec2( -0.94201624, -0.39906216 ), vec2( 0.94558609, -0.76890725 ), vec2( -0.094184101, -0.92938870 ), @@ -18,11 +18,11 @@ const vec2 poissonDisk[16] = { vec2( -0.81409955, 0.91437590 ), vec2( 0.19984126, 0.78641367 ), vec2( 0.14383161, -0.14100790 ) -}; +); vec2 PoissonDiskSample(float d) { - return poissonDisk[int(d * 16)]; + return poissonDisk[int(d * 16.0)]; } #endif \ No newline at end of file diff --git a/FastCG/assets/shaders/Shadow.glsl b/FastCG/assets/shaders/Shadow.glsl index e2824ba1..4ce8bc9d 100644 --- a/FastCG/assets/shaders/Shadow.glsl +++ b/FastCG/assets/shaders/Shadow.glsl @@ -11,7 +11,7 @@ vec3 GetShadowMapCoordinates(ShadowMapData shadowMapData, vec3 worldPosition) { vec4 clipPos = shadowMapData.viewProjection * vec4(worldPosition, 1); vec3 ndc = clipPos.xyz / clipPos.w; -#if VULKAN +#ifdef VULKAN return vec3(ndc.x * 0.5 + 0.5, ndc.y * -0.5 + 0.5, // flip y ndc.z); // z already in [0, 1] diff --git a/FastCG/assets/shaders/deferred/BumpedDiffuse.frag b/FastCG/assets/shaders/deferred/BumpedDiffuse.frag index d6852f6f..c7b7db19 100644 --- a/FastCG/assets/shaders/deferred/BumpedDiffuse.frag +++ b/FastCG/assets/shaders/deferred/BumpedDiffuse.frag @@ -1,9 +1,3 @@ -#version 430 core - -#ifdef ENABLE_INCLUDE_EXTENSION_DIRECTIVE -#extension GL_GOOGLE_include_directive : enable -#endif - #include "FastCG.glsl" #include "Material.glsl" diff --git a/FastCG/assets/shaders/deferred/BumpedDiffuse.vert b/FastCG/assets/shaders/deferred/BumpedDiffuse.vert index 97d7cff0..33de8dcf 100644 --- a/FastCG/assets/shaders/deferred/BumpedDiffuse.vert +++ b/FastCG/assets/shaders/deferred/BumpedDiffuse.vert @@ -1,9 +1,3 @@ -#version 430 core - -#ifdef ENABLE_INCLUDE_EXTENSION_DIRECTIVE -#extension GL_GOOGLE_include_directive : enable -#endif - #include "FastCG.glsl" #include "Instance.glsl" diff --git a/FastCG/assets/shaders/deferred/BumpedSpecular.frag b/FastCG/assets/shaders/deferred/BumpedSpecular.frag index 4c5c6488..a4ef9d78 100644 --- a/FastCG/assets/shaders/deferred/BumpedSpecular.frag +++ b/FastCG/assets/shaders/deferred/BumpedSpecular.frag @@ -1,9 +1,3 @@ -#version 430 core - -#ifdef ENABLE_INCLUDE_EXTENSION_DIRECTIVE -#extension GL_GOOGLE_include_directive : enable -#endif - #include "FastCG.glsl" #include "Material.glsl" diff --git a/FastCG/assets/shaders/deferred/BumpedSpecular.vert b/FastCG/assets/shaders/deferred/BumpedSpecular.vert index 97d7cff0..33de8dcf 100644 --- a/FastCG/assets/shaders/deferred/BumpedSpecular.vert +++ b/FastCG/assets/shaders/deferred/BumpedSpecular.vert @@ -1,9 +1,3 @@ -#version 430 core - -#ifdef ENABLE_INCLUDE_EXTENSION_DIRECTIVE -#extension GL_GOOGLE_include_directive : enable -#endif - #include "FastCG.glsl" #include "Instance.glsl" diff --git a/FastCG/assets/shaders/deferred/Diffuse.frag b/FastCG/assets/shaders/deferred/Diffuse.frag index 83566d79..bd1cc342 100644 --- a/FastCG/assets/shaders/deferred/Diffuse.frag +++ b/FastCG/assets/shaders/deferred/Diffuse.frag @@ -1,9 +1,3 @@ -#version 430 core - -#ifdef ENABLE_INCLUDE_EXTENSION_DIRECTIVE -#extension GL_GOOGLE_include_directive : enable -#endif - #include "FastCG.glsl" #include "Material.glsl" diff --git a/FastCG/assets/shaders/deferred/Diffuse.vert b/FastCG/assets/shaders/deferred/Diffuse.vert index f25ff062..ed30b943 100644 --- a/FastCG/assets/shaders/deferred/Diffuse.vert +++ b/FastCG/assets/shaders/deferred/Diffuse.vert @@ -1,9 +1,3 @@ -#version 430 core - -#ifdef ENABLE_INCLUDE_EXTENSION_DIRECTIVE -#extension GL_GOOGLE_include_directive : enable -#endif - #include "FastCG.glsl" #include "Instance.glsl" diff --git a/FastCG/assets/shaders/deferred/DirectionalLightPass.frag b/FastCG/assets/shaders/deferred/DirectionalLightPass.frag index d4670790..ac42fa2f 100644 --- a/FastCG/assets/shaders/deferred/DirectionalLightPass.frag +++ b/FastCG/assets/shaders/deferred/DirectionalLightPass.frag @@ -1,9 +1,3 @@ -#version 430 core - -#ifdef ENABLE_INCLUDE_EXTENSION_DIRECTIVE -#extension GL_GOOGLE_include_directive : enable -#endif - #include "FastCG.glsl" #include "Scene.glsl" #include "Lighting.glsl" diff --git a/FastCG/assets/shaders/deferred/DirectionalLightPass.vert b/FastCG/assets/shaders/deferred/DirectionalLightPass.vert index b9be30b5..aac2514e 100644 --- a/FastCG/assets/shaders/deferred/DirectionalLightPass.vert +++ b/FastCG/assets/shaders/deferred/DirectionalLightPass.vert @@ -1,9 +1,3 @@ -#version 430 core - -#ifdef ENABLE_INCLUDE_EXTENSION_DIRECTIVE -#extension GL_GOOGLE_include_directive : enable -#endif - #include "FastCG.glsl" layout(location = 0) in vec3 iPosition; @@ -15,8 +9,8 @@ layout(location = 0) out vec2 vUV; void main() { vUV = iUV; -#if VULKAN - vUV.y = 1 - vUV.y; +#ifdef VULKAN + vUV.y = 1.0 - vUV.y; #endif gl_Position = vec4(iPosition, 1); } \ No newline at end of file diff --git a/FastCG/assets/shaders/deferred/FastCG.glsl b/FastCG/assets/shaders/deferred/FastCG.glsl index 469e7033..130c7c5f 100644 --- a/FastCG/assets/shaders/deferred/FastCG.glsl +++ b/FastCG/assets/shaders/deferred/FastCG.glsl @@ -17,7 +17,7 @@ vec3 GetViewPositionFromScreenPositionAndDepth(mat4 inverseProjection, vec2 scre { vec3 ndc; ndc.x = 2.0 * (screenPos.x / screenSize.x) - 1.0; -#if VULKAN +#ifdef VULKAN ndc.y = 2.0 * ((screenSize.y - screenPos.y) / screenSize.y) - 1.0; // FIXME: ndc.z = depth; // [0, 1] @@ -32,7 +32,7 @@ vec3 GetViewPositionFromScreenPositionAndDepth(mat4 inverseProjection, vec2 scre bool HasBump(vec4 tangent, vec4 extraData) { - return extraData.x != 0 && extraData.y != 0 && extraData.z != 0; + return extraData.x != 0.0 && extraData.y != 0.0 && extraData.z != 0.0; } #endif \ No newline at end of file diff --git a/FastCG/assets/shaders/deferred/PointLightPass.frag b/FastCG/assets/shaders/deferred/PointLightPass.frag index 6ab1888c..5f8bcf03 100644 --- a/FastCG/assets/shaders/deferred/PointLightPass.frag +++ b/FastCG/assets/shaders/deferred/PointLightPass.frag @@ -1,9 +1,3 @@ -#version 430 core - -#ifdef ENABLE_INCLUDE_EXTENSION_DIRECTIVE -#extension GL_GOOGLE_include_directive : enable -#endif - #include "FastCG.glsl" #include "Scene.glsl" #include "Lighting.glsl" diff --git a/FastCG/assets/shaders/deferred/PointLightPass.vert b/FastCG/assets/shaders/deferred/PointLightPass.vert index 49b01f2f..e98cde1e 100644 --- a/FastCG/assets/shaders/deferred/PointLightPass.vert +++ b/FastCG/assets/shaders/deferred/PointLightPass.vert @@ -1,9 +1,3 @@ -#version 430 core - -#ifdef ENABLE_INCLUDE_EXTENSION_DIRECTIVE -#extension GL_GOOGLE_include_directive : enable -#endif - #include "FastCG.glsl" #include "Instance.glsl" diff --git a/FastCG/assets/shaders/deferred/SolidColor.frag b/FastCG/assets/shaders/deferred/SolidColor.frag index 3773f055..9dcedb91 100644 --- a/FastCG/assets/shaders/deferred/SolidColor.frag +++ b/FastCG/assets/shaders/deferred/SolidColor.frag @@ -1,9 +1,3 @@ -#version 430 core - -#ifdef ENABLE_INCLUDE_EXTENSION_DIRECTIVE -#extension GL_GOOGLE_include_directive : enable -#endif - #include "FastCG.glsl" #include "Material.glsl" diff --git a/FastCG/assets/shaders/deferred/SolidColor.vert b/FastCG/assets/shaders/deferred/SolidColor.vert index afae45e9..3c0221a5 100644 --- a/FastCG/assets/shaders/deferred/SolidColor.vert +++ b/FastCG/assets/shaders/deferred/SolidColor.vert @@ -1,9 +1,3 @@ -#version 430 core - -#ifdef ENABLE_INCLUDE_EXTENSION_DIRECTIVE -#extension GL_GOOGLE_include_directive : enable -#endif - #include "FastCG.glsl" #include "Instance.glsl" diff --git a/FastCG/assets/shaders/deferred/Specular.frag b/FastCG/assets/shaders/deferred/Specular.frag index aa65c55b..ef9bbb07 100644 --- a/FastCG/assets/shaders/deferred/Specular.frag +++ b/FastCG/assets/shaders/deferred/Specular.frag @@ -1,9 +1,3 @@ -#version 430 core - -#ifdef ENABLE_INCLUDE_EXTENSION_DIRECTIVE -#extension GL_GOOGLE_include_directive : enable -#endif - #include "FastCG.glsl" #include "Material.glsl" diff --git a/FastCG/assets/shaders/deferred/Specular.vert b/FastCG/assets/shaders/deferred/Specular.vert index f25ff062..ed30b943 100644 --- a/FastCG/assets/shaders/deferred/Specular.vert +++ b/FastCG/assets/shaders/deferred/Specular.vert @@ -1,9 +1,3 @@ -#version 430 core - -#ifdef ENABLE_INCLUDE_EXTENSION_DIRECTIVE -#extension GL_GOOGLE_include_directive : enable -#endif - #include "FastCG.glsl" #include "Instance.glsl" diff --git a/FastCG/assets/shaders/deferred/StencilPass.frag b/FastCG/assets/shaders/deferred/StencilPass.frag index 3c492c4c..e8a09945 100644 --- a/FastCG/assets/shaders/deferred/StencilPass.frag +++ b/FastCG/assets/shaders/deferred/StencilPass.frag @@ -1,5 +1,3 @@ -#version 430 core - void main() { } \ No newline at end of file diff --git a/FastCG/assets/shaders/deferred/StencilPass.vert b/FastCG/assets/shaders/deferred/StencilPass.vert index dde9ac20..eea3e15c 100644 --- a/FastCG/assets/shaders/deferred/StencilPass.vert +++ b/FastCG/assets/shaders/deferred/StencilPass.vert @@ -1,9 +1,3 @@ -#version 430 core - -#ifdef ENABLE_INCLUDE_EXTENSION_DIRECTIVE -#extension GL_GOOGLE_include_directive : enable -#endif - #include "FastCG.glsl" #include "Instance.glsl" diff --git a/FastCG/assets/shaders/forward/BumpedDiffuse.frag b/FastCG/assets/shaders/forward/BumpedDiffuse.frag index b6c65793..7b4c99f6 100644 --- a/FastCG/assets/shaders/forward/BumpedDiffuse.frag +++ b/FastCG/assets/shaders/forward/BumpedDiffuse.frag @@ -1,9 +1,3 @@ -#version 430 core - -#ifdef ENABLE_INCLUDE_EXTENSION_DIRECTIVE -#extension GL_GOOGLE_include_directive : enable -#endif - #include "FastCG.glsl" #include "Scene.glsl" #include "Lighting.glsl" diff --git a/FastCG/assets/shaders/forward/BumpedDiffuse.vert b/FastCG/assets/shaders/forward/BumpedDiffuse.vert index 480d07ad..f411c5d6 100644 --- a/FastCG/assets/shaders/forward/BumpedDiffuse.vert +++ b/FastCG/assets/shaders/forward/BumpedDiffuse.vert @@ -1,9 +1,3 @@ -#version 430 core - -#ifdef ENABLE_INCLUDE_EXTENSION_DIRECTIVE -#extension GL_GOOGLE_include_directive : enable -#endif - #include "FastCG.glsl" #include "Scene.glsl" #include "Instance.glsl" diff --git a/FastCG/assets/shaders/forward/BumpedSpecular.frag b/FastCG/assets/shaders/forward/BumpedSpecular.frag index 0ae1baf7..19e3de7b 100644 --- a/FastCG/assets/shaders/forward/BumpedSpecular.frag +++ b/FastCG/assets/shaders/forward/BumpedSpecular.frag @@ -1,9 +1,3 @@ -#version 430 core - -#ifdef ENABLE_INCLUDE_EXTENSION_DIRECTIVE -#extension GL_GOOGLE_include_directive : enable -#endif - #include "FastCG.glsl" #include "Scene.glsl" #include "Lighting.glsl" diff --git a/FastCG/assets/shaders/forward/BumpedSpecular.vert b/FastCG/assets/shaders/forward/BumpedSpecular.vert index 9b1db5b2..fac3b02f 100644 --- a/FastCG/assets/shaders/forward/BumpedSpecular.vert +++ b/FastCG/assets/shaders/forward/BumpedSpecular.vert @@ -1,9 +1,3 @@ -#version 430 core - -#ifdef ENABLE_INCLUDE_EXTENSION_DIRECTIVE -#extension GL_GOOGLE_include_directive : enable -#endif - #include "FastCG.glsl" #include "Scene.glsl" #include "Instance.glsl" @@ -29,7 +23,7 @@ void main() vec4 worldPosition = GetInstanceData().model * vec4(iPosition, 1); vec3 viewPosition = vec3(uView * worldPosition); - vLightDirection = tangentSpaceMatrix * normalize(uLight0ViewPosition.xyz - (step(0, GetLightType()) * viewPosition)); + vLightDirection = tangentSpaceMatrix * normalize(uLight0ViewPosition.xyz - (step(0.0, GetLightType()) * viewPosition)); vViewerDirection = tangentSpaceMatrix * normalize(-viewPosition); vPosition = worldPosition.xyz; vUV = iUV; diff --git a/FastCG/assets/shaders/forward/Diffuse.frag b/FastCG/assets/shaders/forward/Diffuse.frag index d1258878..c2f0e290 100644 --- a/FastCG/assets/shaders/forward/Diffuse.frag +++ b/FastCG/assets/shaders/forward/Diffuse.frag @@ -1,9 +1,3 @@ -#version 430 core - -#ifdef ENABLE_INCLUDE_EXTENSION_DIRECTIVE -#extension GL_GOOGLE_include_directive : enable -#endif - #include "FastCG.glsl" #include "Scene.glsl" #include "Lighting.glsl" diff --git a/FastCG/assets/shaders/forward/Diffuse.vert b/FastCG/assets/shaders/forward/Diffuse.vert index 9919b0f3..1c52660f 100644 --- a/FastCG/assets/shaders/forward/Diffuse.vert +++ b/FastCG/assets/shaders/forward/Diffuse.vert @@ -1,9 +1,3 @@ -#version 430 core - -#ifdef ENABLE_INCLUDE_EXTENSION_DIRECTIVE -#extension GL_GOOGLE_include_directive : enable -#endif - #include "FastCG.glsl" #include "Scene.glsl" #include "Lighting.glsl" @@ -22,7 +16,7 @@ void main() { vec4 worldPosition = GetInstanceData().model * vec4(iPosition, 1); vec3 viewPosition = vec3(uView * worldPosition); - vLightDirection = normalize(uLight0ViewPosition.xyz - (step(0, GetLightType()) * viewPosition)); + vLightDirection = normalize(uLight0ViewPosition.xyz - (step(0.0, GetLightType()) * viewPosition)); vPosition = worldPosition.xyz; vNormal = normalize(mat3(GetInstanceData().modelViewInverseTranspose) * iNormal); vUV = iUV; diff --git a/FastCG/assets/shaders/forward/SolidColor.frag b/FastCG/assets/shaders/forward/SolidColor.frag index eaf5d22c..29405a92 100644 --- a/FastCG/assets/shaders/forward/SolidColor.frag +++ b/FastCG/assets/shaders/forward/SolidColor.frag @@ -1,9 +1,3 @@ -#version 430 core - -#ifdef ENABLE_INCLUDE_EXTENSION_DIRECTIVE -#extension GL_GOOGLE_include_directive : enable -#endif - #include "FastCG.glsl" #include "Scene.glsl" #include "Lighting.glsl" diff --git a/FastCG/assets/shaders/forward/SolidColor.vert b/FastCG/assets/shaders/forward/SolidColor.vert index e901d909..9b916b38 100644 --- a/FastCG/assets/shaders/forward/SolidColor.vert +++ b/FastCG/assets/shaders/forward/SolidColor.vert @@ -1,9 +1,3 @@ -#version 430 core - -#ifdef ENABLE_INCLUDE_EXTENSION_DIRECTIVE -#extension GL_GOOGLE_include_directive : enable -#endif - #include "Lighting.glsl" #include "FastCG.glsl" #include "Scene.glsl" @@ -23,7 +17,7 @@ void main() { vec4 worldPosition = GetInstanceData().model * vec4(iPosition, 1); vec3 viewPosition = vec3(uView * worldPosition); - vLightDirection = normalize(uLight0ViewPosition.xyz - (step(0, GetLightType()) * viewPosition)); + vLightDirection = normalize(uLight0ViewPosition.xyz - (step(0.0, GetLightType()) * viewPosition)); vViewerDirection = normalize(-viewPosition); vPosition = worldPosition.xyz; vNormal = normalize(mat3(GetInstanceData().modelViewInverseTranspose) * iNormal); diff --git a/FastCG/assets/shaders/forward/Specular.frag b/FastCG/assets/shaders/forward/Specular.frag index c1fb49a6..69517e8b 100644 --- a/FastCG/assets/shaders/forward/Specular.frag +++ b/FastCG/assets/shaders/forward/Specular.frag @@ -1,9 +1,3 @@ -#version 430 core - -#ifdef ENABLE_INCLUDE_EXTENSION_DIRECTIVE -#extension GL_GOOGLE_include_directive : enable -#endif - #include "FastCG.glsl" #include "Scene.glsl" #include "Lighting.glsl" diff --git a/FastCG/assets/shaders/forward/Specular.vert b/FastCG/assets/shaders/forward/Specular.vert index d06ca997..00acd749 100644 --- a/FastCG/assets/shaders/forward/Specular.vert +++ b/FastCG/assets/shaders/forward/Specular.vert @@ -1,9 +1,3 @@ -#version 430 core - -#ifdef ENABLE_INCLUDE_EXTENSION_DIRECTIVE -#extension GL_GOOGLE_include_directive : enable -#endif - #include "FastCG.glsl" #include "Scene.glsl" #include "Lighting.glsl" @@ -23,7 +17,7 @@ void main() { vec4 worldPosition = GetInstanceData().model * vec4(iPosition, 1); vec3 viewPosition = vec3(uView * worldPosition); - vLightDirection = normalize(uLight0ViewPosition.xyz - (step(0, GetLightType()) * viewPosition)); + vLightDirection = normalize(uLight0ViewPosition.xyz - (step(0.0, GetLightType()) * viewPosition)); vViewerDirection = normalize(-viewPosition); vPosition = worldPosition.xyz; vNormal = normalize(mat3(GetInstanceData().modelViewInverseTranspose) * iNormal); diff --git a/FastCG/assets/shaders/pcss/PCSS.glsl b/FastCG/assets/shaders/pcss/PCSS.glsl index 1c8a3d56..5577768e 100644 --- a/FastCG/assets/shaders/pcss/PCSS.glsl +++ b/FastCG/assets/shaders/pcss/PCSS.glsl @@ -19,30 +19,30 @@ struct PCSSData void FindBlocker(PCSSData pcssData, sampler2D shadowMap, vec3 shadowMapCoords, out float avgBlockerDistance, out int numBlockers) { numBlockers = 0; - float blockerDistanceSum = 0; + float blockerDistanceSum = 0.0; float zThreshold = shadowMapCoords.z - pcssData.shadowMapData.bias; for (int i = 0; i < pcssData.blockerSearchSamples; i++) { - float z = texture(shadowMap, shadowMapCoords.xy + PoissonDiskSample(i / float(pcssData.blockerSearchSamples)) * pcssData.uvScale).x; + float z = texture(shadowMap, shadowMapCoords.xy + PoissonDiskSample(float(i) / float(pcssData.blockerSearchSamples)) * pcssData.uvScale).x; if (z < zThreshold) { blockerDistanceSum += z; numBlockers++; } } - avgBlockerDistance = blockerDistanceSum / numBlockers; + avgBlockerDistance = blockerDistanceSum / float(numBlockers); } float PCF(PCSSData pcssData, sampler2D shadowMap, vec3 shadowMapCoords, float uvRadius) { - float sum = 0; + float sum = 0.0; float zThreshold = shadowMapCoords.z - pcssData.shadowMapData.bias; for (int i = 0; i < pcssData.pcfSamples; i++) { - float z = texture(shadowMap, shadowMapCoords.xy + PoissonDiskSample(i / float(pcssData.pcfSamples)) * uvRadius).x; + float z = texture(shadowMap, shadowMapCoords.xy + PoissonDiskSample(float(i) / float(pcssData.pcfSamples)) * uvRadius).x; sum += float(z <= zThreshold); } - return sum / pcssData.pcfSamples; + return sum / float(pcssData.pcfSamples); } float GetPCSS(PCSSData pcssData, sampler2D shadowMap, vec3 worldPosition) @@ -55,7 +55,7 @@ float GetPCSS(PCSSData pcssData, sampler2D shadowMap, vec3 worldPosition) FindBlocker(pcssData, shadowMap, shadowMapCoords, avgBlockerDistance, numBlockers); if (numBlockers < 1) { - return 1; + return 1.0; } // penumbra estimation @@ -63,7 +63,7 @@ float GetPCSS(PCSSData pcssData, sampler2D shadowMap, vec3 worldPosition) // percentage-close filtering float uvRadius = penumbraWidth * pcssData.uvScale; - return 1 - PCF(pcssData, shadowMap, shadowMapCoords, uvRadius); + return 1.0 - PCF(pcssData, shadowMap, shadowMapCoords, uvRadius); } // for debugging purposes diff --git a/FastCG/assets/shaders/pcss/ShadowMapPass.frag b/FastCG/assets/shaders/pcss/ShadowMapPass.frag index 3c492c4c..e8a09945 100644 --- a/FastCG/assets/shaders/pcss/ShadowMapPass.frag +++ b/FastCG/assets/shaders/pcss/ShadowMapPass.frag @@ -1,5 +1,3 @@ -#version 430 core - void main() { } \ No newline at end of file diff --git a/FastCG/assets/shaders/pcss/ShadowMapPass.vert b/FastCG/assets/shaders/pcss/ShadowMapPass.vert index 904a7e6c..21f05960 100644 --- a/FastCG/assets/shaders/pcss/ShadowMapPass.vert +++ b/FastCG/assets/shaders/pcss/ShadowMapPass.vert @@ -1,9 +1,3 @@ -#version 430 core - -#ifdef ENABLE_INCLUDE_EXTENSION_DIRECTIVE -#extension GL_GOOGLE_include_directive : enable -#endif - #include "../FastCG.glsl" #include "ShadowMapPass.glsl" diff --git a/FastCG/assets/shaders/ssao/SSAOBlurPass.frag b/FastCG/assets/shaders/ssao/SSAOBlurPass.frag index 674c94cc..ed951410 100644 --- a/FastCG/assets/shaders/ssao/SSAOBlurPass.frag +++ b/FastCG/assets/shaders/ssao/SSAOBlurPass.frag @@ -1,9 +1,3 @@ -#version 430 core - -#ifdef ENABLE_INCLUDE_EXTENSION_DIRECTIVE -#extension GL_GOOGLE_include_directive : enable -#endif - #define NOISE_TEXTURE_SIDE 4 #define NOISE_TEXTURE_SIZE 16 @@ -17,11 +11,11 @@ layout(location = 0) out float oAmbientOcclusion; void main() { - vec2 texelSize = 1.0 / textureSize(uAmbientOcclusionMap, 0); + vec2 texelSize = 1.0 / vec2(textureSize(uAmbientOcclusionMap, 0)); // simple mean filter - float result = 0; - vec2 stride = vec2(-NOISE_TEXTURE_SIDE * 0.5); + float result = 0.0; + vec2 stride = vec2(float(-NOISE_TEXTURE_SIDE) * 0.5); for (int x = 0; x < NOISE_TEXTURE_SIDE; x++) { for (int y = 0; y < NOISE_TEXTURE_SIDE; y++) @@ -34,5 +28,5 @@ void main() } } - oAmbientOcclusion = result / NOISE_TEXTURE_SIZE; + oAmbientOcclusion = result / float(NOISE_TEXTURE_SIZE); } \ No newline at end of file diff --git a/FastCG/assets/shaders/ssao/SSAOBlurPass.vert b/FastCG/assets/shaders/ssao/SSAOBlurPass.vert index 1d0ea666..5ba9e635 100644 --- a/FastCG/assets/shaders/ssao/SSAOBlurPass.vert +++ b/FastCG/assets/shaders/ssao/SSAOBlurPass.vert @@ -1,5 +1,3 @@ -#version 430 core - layout(location = 0) in vec3 iPosition; layout(location = 2) in vec2 iUV; @@ -8,8 +6,8 @@ layout(location = 0) out vec2 vUV; void main() { vUV = iUV; -#if VULKAN - vUV.y = 1 - vUV.y; +#ifdef VULKAN + vUV.y = 1.0 - vUV.y; #endif gl_Position = vec4(iPosition.xy, 0.0, 1.0); } \ No newline at end of file diff --git a/FastCG/assets/shaders/ssao/SSAOHighFrequencyPass.frag b/FastCG/assets/shaders/ssao/SSAOHighFrequencyPass.frag index f36f4dd8..9a8f17b4 100644 --- a/FastCG/assets/shaders/ssao/SSAOHighFrequencyPass.frag +++ b/FastCG/assets/shaders/ssao/SSAOHighFrequencyPass.frag @@ -1,17 +1,11 @@ -#version 430 core - -#ifdef ENABLE_INCLUDE_EXTENSION_DIRECTIVE -#extension GL_GOOGLE_include_directive : enable -#endif - #include "../FastCG.glsl" -#include "SSAOHighFrequencyPass.glsl" +#include "SSAOHighFrequencyPass.glsl" layout(BINDING_0_1) uniform sampler2D uNoiseMap; layout(BINDING_0_2) uniform sampler2D uDepth; layout(location = 0) in vec2 vUV; -layout(location = 1) noperspective in vec3 vViewRay; +layout(location = 1) in vec3 vViewRay; layout(location = 0) out float oAmbientOcclusion; @@ -19,7 +13,7 @@ layout(location = 0) out float oAmbientOcclusion; void main() { - vec2 noiseUv = textureSize(uDepth, 0) / textureSize(uNoiseMap, 0) * vUV; + vec2 noiseUv = vec2(textureSize(uDepth, 0) / textureSize(uNoiseMap, 0)) * vUV; vec3 randomVector = texture(uNoiseMap, noiseUv).xyz; float z = GetViewSpaceZ(uProjection, texture(uDepth, vUV).x); @@ -36,9 +30,9 @@ void main() // of the current and neighbouring fragments vec3 normal = normalize(cross(viewPosRight, viewPosBottom)); -#if VULKAN +#ifdef VULKAN // FIXME: - normal *= -1; + normal *= -1.0; #endif // compute the sampling tangent space matrix from normal, @@ -47,16 +41,16 @@ void main() vec3 binormal = cross(normal, tangent); mat3 tangentSpaceMatrix = mat3(tangent, binormal, normal); - float occlusion = 0; + float occlusion = 0.0; for (int i = 0; i < NUMBER_OF_RANDOM_SAMPLES; i++) { vec3 sampleViewPos = viewPos + (tangentSpaceMatrix * uRandomSamples[i].xyz) * uAspectRatio; - vec4 sampleClipPos = uProjection * vec4(sampleViewPos, 1); + vec4 sampleClipPos = uProjection * vec4(sampleViewPos, 1.0); vec2 sampleUv = sampleClipPos.xy / sampleClipPos.w; sampleUv.x = sampleUv.x * 0.5 + 0.5; -#if VULKAN +#ifdef VULKAN sampleUv.y = sampleUv.y * -0.5 + 0.5; #else sampleUv.y = sampleUv.y * 0.5 + 0.5; @@ -70,5 +64,5 @@ void main() occlusion += (actualViewPos.z <= sampleViewPos.z - uBias ? 1.0 : 0.0) * rangeCheck; } - oAmbientOcclusion = 1 - (occlusion / NUMBER_OF_RANDOM_SAMPLES); + oAmbientOcclusion = 1.0 - (occlusion / float(NUMBER_OF_RANDOM_SAMPLES)); } \ No newline at end of file diff --git a/FastCG/assets/shaders/ssao/SSAOHighFrequencyPass.vert b/FastCG/assets/shaders/ssao/SSAOHighFrequencyPass.vert index 2b13a759..6ff9bfed 100644 --- a/FastCG/assets/shaders/ssao/SSAOHighFrequencyPass.vert +++ b/FastCG/assets/shaders/ssao/SSAOHighFrequencyPass.vert @@ -1,9 +1,3 @@ -#version 430 core - -#ifdef ENABLE_INCLUDE_EXTENSION_DIRECTIVE -#extension GL_GOOGLE_include_directive : enable -#endif - #include "../FastCG.glsl" #include "SSAOHighFrequencyPass.glsl" @@ -11,13 +5,13 @@ layout(location = 0) in vec3 iPosition; layout(location = 2) in vec2 iUV; layout(location = 0) out vec2 vUV; -layout(location = 1) noperspective out vec3 vViewRay; +layout(location = 1) out vec3 vViewRay; void main() { vUV = iUV; -#if VULKAN - vUV.y = 1 - vUV.y; +#ifdef VULKAN + vUV.y = 1.0 - vUV.y; #endif vViewRay = normalize(vec3(iPosition.x * uTanHalfFov * uAspectRatio, iPosition.y * uTanHalfFov, diff --git a/FastCG/assets/shaders/tonemap/TonemapPass.frag b/FastCG/assets/shaders/tonemap/TonemapPass.frag index 619e0ddf..5098bdc3 100644 --- a/FastCG/assets/shaders/tonemap/TonemapPass.frag +++ b/FastCG/assets/shaders/tonemap/TonemapPass.frag @@ -1,9 +1,3 @@ -#version 430 core - -#ifdef ENABLE_INCLUDE_EXTENSION_DIRECTIVE -#extension GL_GOOGLE_include_directive : enable -#endif - #include "../FastCG.glsl" layout(BINDING_0_0) uniform sampler2D uSource; diff --git a/FastCG/assets/shaders/tonemap/TonemapPass.vert b/FastCG/assets/shaders/tonemap/TonemapPass.vert index ac6f2931..e5d39fb0 100644 --- a/FastCG/assets/shaders/tonemap/TonemapPass.vert +++ b/FastCG/assets/shaders/tonemap/TonemapPass.vert @@ -1,5 +1,3 @@ -#version 430 core - layout(location = 0) in vec3 iPosition; layout(location = 0) out vec2 vUV; @@ -7,8 +5,8 @@ layout(location = 0) out vec2 vUV; void main() { vUV = iPosition.xy * 0.5 + 0.5; -#if VULKAN - vUV.y = 1 - vUV.y; +#ifdef VULKAN + vUV.y = 1.0 - vUV.y; #endif gl_Position = vec4(iPosition.xy, 0.0, 1.0); } \ No newline at end of file diff --git a/FastCG/include/FastCG/Assets/AssetSystem.inc b/FastCG/include/FastCG/Assets/AssetSystem.inc index b9c9a89c..f1fbd081 100644 --- a/FastCG/include/FastCG/Assets/AssetSystem.inc +++ b/FastCG/include/FastCG/Assets/AssetSystem.inc @@ -8,9 +8,9 @@ namespace FastCG std::vector AssetSystem::List(const std::string &rRelDirectoryPath, bool recursive /* = false*/) const { std::vector fileList; - for (const auto &bundleRoot : mBundleRoots) + for (const auto &rBundleRoot : mBundleRoots) { - auto absDirPath = bundleRoot + "/" + rRelDirectoryPath; + auto absDirPath = rBundleRoot + "/" + rRelDirectoryPath; auto partialFileList = Directory::List(absDirPath, recursive); fileList.insert(fileList.end(), partialFileList.begin(), partialFileList.end()); } @@ -19,9 +19,9 @@ namespace FastCG bool AssetSystem::Resolve(const std::string &rRelFilePath, std::string &rAbsFilePath) const { - for (const auto &bundleRoot : mBundleRoots) + for (const auto &rBundleRoot : mBundleRoots) { - auto absFilePath = bundleRoot + "/" + rRelFilePath; + auto absFilePath = rBundleRoot + "/" + rRelFilePath; if (File::Exists(absFilePath)) { rAbsFilePath = absFilePath; @@ -41,9 +41,9 @@ namespace FastCG void AssetSystem::Expand(const std::string &rRelFilePath, std::vector &rAbsFilePaths, bool includeNonExistant /* = false*/) const { rAbsFilePaths.clear(); - for (const auto &bundleRoot : mBundleRoots) + for (const auto &rBundleRoot : mBundleRoots) { - auto absFilePath = bundleRoot + "/" + rRelFilePath; + auto absFilePath = rBundleRoot + "/" + rRelFilePath; if (includeNonExistant || File::Exists(absFilePath)) { rAbsFilePaths.emplace_back(absFilePath); diff --git a/FastCG/include/FastCG/Core/Log.h b/FastCG/include/FastCG/Core/Log.h new file mode 100644 index 00000000..d6bb06ed --- /dev/null +++ b/FastCG/include/FastCG/Core/Log.h @@ -0,0 +1,30 @@ +#ifndef FASTCG_LOG_H +#define FASTCG_LOG_H + +#if defined FASTCG_WINDOWS || defined FASTCG_LINUX +// TODO: write proper log functions +#include +#define FASTCG_LOG_VERBOSE(...) \ + printf(__VA_ARGS__); \ + printf("\n") +#define FASTCG_LOG_DEBUG(...) \ + printf(__VA_ARGS__); \ + printf("\n") +#define FASTCG_LOG_INFO(...) \ + printf(__VA_ARGS__); \ + printf("\n") +#define FASTCG_LOG_ERROR(...) \ + printf(__VA_ARGS__); \ + printf("\n") +#elif defined FASTCG_ANDROID +#include +#define FASTCG_LOG_(severity, ...) __android_log_print(ANDROID_LOG_##severity, "FASTCG", __VA_ARGS__) +#define FASTCG_LOG_VERBOSE(...) FASTCG_LOG_(VERBOSE, __VA_ARGS__) +#define FASTCG_LOG_DEBUG(...) FASTCG_LOG_(DEBUG, __VA_ARGS__) +#define FASTCG_LOG_INFO(...) FASTCG_LOG_(INFO, __VA_ARGS__) +#define FASTCG_LOG_ERROR(...) FASTCG_LOG_(ERROR, __VA_ARGS__) +#else +#error "FASTCG_LOG_*() is not implemented on the current platform" +#endif + +#endif \ No newline at end of file diff --git a/FastCG/include/FastCG/Core/Macros.h b/FastCG/include/FastCG/Core/Macros.h index 491fa767..e63a617b 100644 --- a/FastCG/include/FastCG/Core/Macros.h +++ b/FastCG/include/FastCG/Core/Macros.h @@ -1,12 +1,6 @@ #ifndef FASTCG_MACROS_H #define FASTCG_MACROS_H -#if defined FASTCG_WINDOWS -#include -#elif defined FASTCG_LINUX -#include -#endif - #define FASTCG_EXPAND(x) x // source: https://gist.github.com/thwarted/8ce47e1897a578f4e80a @@ -74,27 +68,31 @@ #define FASTCG_ARRAYSIZE(x) (sizeof(x) / sizeof(x[0])) #ifdef _DEBUG -#if defined FASTCG_WINDOWS +#if defined _MSC_VER #define FASTCG_BREAK_TO_DEBUGGER() __debugbreak() -#elif defined FASTCG_LINUX -#define FASTCG_BREAK_TO_DEBUGGER() raise(SIGTRAP) +#elif defined __GNUC__ || defined __clang__ +#define FASTCG_BREAK_TO_DEBUGGER() // __builtin_trap() #else -#error "FASTCG_BREAK_TO_DEBUGGER() is not implemented on the current platform" +#error "FASTCG_BREAK_TO_DEBUGGER() is not implemented on the current compiler" #endif #else #define FASTCG_BREAK_TO_DEBUGGER() #endif -#if defined FASTCG_WINDOWS +#if defined _MSC_VER #define FASTCG_WARN_PUSH _Pragma("warning(push)") -#define FASTCG_WARN_IGNORE_MACRO_ARGS _Pragma("warning(disable:4003)") +#define FASTCG_WARN_IGNORE_DEPRECATED_DECLARATIONS _Pragma("warning(disable:C4996)") #define FASTCG_WARN_POP _Pragma("warning(pop)") -#elif defined FASTCG_LINUX +#elif defined __GNUC__ #define FASTCG_WARN_PUSH _Pragma("GCC diagnostic push") -#define FASTCG_WARN_IGNORE_MACRO_ARGS _Pragma("GCC diagnostic ignored \"-W\"") +#define FASTCG_WARN_IGNORE_DEPRECATED_DECLARATIONS _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") #define FASTCG_WARN_POP _Pragma("GCC diagnostic pop") +#elif defined __clang__ +#define FASTCG_WARN_PUSH _Pragma("clang diagnostic push") +#define FASTCG_WARN_IGNORE_DEPRECATED_DECLARATIONS _Pragma("clang diagnostic ignored \"-Wdeprecated-declarations\""") +#define FASTCG_WARN_POP _Pragma("clang diagnostic pop") #else -#error "FASTCG_WARN_* macros are not implemented on the current platform" +#error "FASTCG_WARN_* macros are not implemented on the current compiler" #endif #endif \ No newline at end of file diff --git a/FastCG/include/FastCG/Core/MsgBox.h b/FastCG/include/FastCG/Core/MsgBox.h index f6733533..45f1b0f5 100644 --- a/FastCG/include/FastCG/Core/MsgBox.h +++ b/FastCG/include/FastCG/Core/MsgBox.h @@ -33,6 +33,9 @@ namespace FastCG button.result = 0; \ Messagebox(title, wMsg, &button, 1); \ } +#elif defined FASTCG_ANDROID +// TODO: implement a dialog box on Android - although I guess there's no easy way to do it +#define FASTCG_MSG_BOX(...) #else #error "FASTCG_MSG_BOX() is not implemented on the current platform" #endif diff --git a/FastCG/include/FastCG/Graphics/BaseGraphicsContext.h b/FastCG/include/FastCG/Graphics/BaseGraphicsContext.h index e052369a..919794d5 100644 --- a/FastCG/include/FastCG/Graphics/BaseGraphicsContext.h +++ b/FastCG/include/FastCG/Graphics/BaseGraphicsContext.h @@ -33,7 +33,7 @@ namespace FastCG } // Template interface - void Begin(); + bool Begin(); void PushDebugMarker(const char *pName); void PopDebugMarker(); void SetViewport(int32_t x, int32_t y, uint32_t width, uint32_t height); diff --git a/FastCG/include/FastCG/Graphics/BaseGraphicsSystem.h b/FastCG/include/FastCG/Graphics/BaseGraphicsSystem.h index f6b7bdc5..b8d3280b 100644 --- a/FastCG/include/FastCG/Graphics/BaseGraphicsSystem.h +++ b/FastCG/include/FastCG/Graphics/BaseGraphicsSystem.h @@ -95,6 +95,10 @@ namespace FastCG inline virtual void DestroyTexture(const Texture *pTexture); inline const Shader *FindShader(const std::string &rName) const; inline const Texture *GetMissingTexture(TextureType textureType) const; +#if defined FASTCG_ANDROID + inline void OnWindowInitialized(); + inline void OnWindowTerminated(); +#endif protected: const GraphicsSystemArgs mArgs; diff --git a/FastCG/include/FastCG/Graphics/BaseGraphicsSystem.inc b/FastCG/include/FastCG/Graphics/BaseGraphicsSystem.inc index 2bfa9d3e..65472050 100644 --- a/FastCG/include/FastCG/Graphics/BaseGraphicsSystem.inc +++ b/FastCG/include/FastCG/Graphics/BaseGraphicsSystem.inc @@ -150,7 +150,7 @@ namespace FastCG TextureType::TEXTURE_2D, TextureUsageFlagBit::RENDER_TARGET, TextureFormat::RGBA, - {32, 32, 32, 32}, + {8, 8, 8, 8}, TextureDataType::FLOAT, TextureFilter::POINT_FILTER, TextureWrapMode::CLAMP, diff --git a/FastCG/include/FastCG/Graphics/OpenGL/OpenGL.h b/FastCG/include/FastCG/Graphics/OpenGL/OpenGL.h index 74ab3254..e83364d1 100644 --- a/FastCG/include/FastCG/Graphics/OpenGL/OpenGL.h +++ b/FastCG/include/FastCG/Graphics/OpenGL/OpenGL.h @@ -3,13 +3,22 @@ #ifdef FASTCG_OPENGL +#if !defined FASTCG_ANDROID #include +#endif + +#if defined FASTCG_ANDROID +#include +#else #include +#endif #if defined FASTCG_WINDOWS #include #elif defined FASTCG_LINUX #include +#elif defined FASTCG_ANDROID +#include #endif #endif diff --git a/FastCG/include/FastCG/Graphics/OpenGL/OpenGLExceptions.h b/FastCG/include/FastCG/Graphics/OpenGL/OpenGLExceptions.h index f1dbcee8..47e55494 100644 --- a/FastCG/include/FastCG/Graphics/OpenGL/OpenGLExceptions.h +++ b/FastCG/include/FastCG/Graphics/OpenGL/OpenGLExceptions.h @@ -6,7 +6,7 @@ #include #include -#include +#include namespace FastCG { @@ -21,25 +21,51 @@ namespace FastCG } +inline const char *glGetErrorString(GLenum error) +{ +#if defined FASTCG_ANDROID + +#ifdef CASE_RETURN_STRING +#undef CASE_RETURN_STRING +#endif + +#define CASE_RETURN_STRING(str) \ + case str: \ + return #str + + switch (error) + { + CASE_RETURN_STRING(GL_NO_ERROR); + CASE_RETURN_STRING(GL_INVALID_ENUM); + CASE_RETURN_STRING(GL_INVALID_VALUE); + CASE_RETURN_STRING(GL_INVALID_OPERATION); + CASE_RETURN_STRING(GL_OUT_OF_MEMORY); + CASE_RETURN_STRING(GL_INVALID_FRAMEBUFFER_OPERATION); + default: + FASTCG_THROW_EXCEPTION(FastCG::Exception, "OpenGL: Unhandled error %d", (int)error); + return nullptr; + } + +#undef CAST_RETURN_STRING + +#else + return (const char *)gluErrorString(error); +#endif +} + #ifdef _DEBUG -#define FASTCG_CHECK_OPENGL_ERROR() \ - { \ - GLenum __errorCode; \ - if ((__errorCode = glGetError()) != GL_NO_ERROR) \ - { \ - const auto *__pErrorMessage = gluErrorString(__errorCode); \ - std::stringstream __stringStream; \ - if (__pErrorMessage) \ - { \ - __stringStream << __pErrorMessage; \ - } \ - __stringStream << "\n" \ - << "(error code: %d)"; \ - FASTCG_THROW_EXCEPTION(FastCG::OpenGLException, __stringStream.str().c_str(), __errorCode); \ - } \ +#define FASTCG_CHECK_OPENGL_ERROR(fmt, ...) \ + { \ + GLenum __error; \ + if ((__error = glGetError()) != GL_NO_ERROR) \ + { \ + char msg[4096]; \ + sprintf(msg, fmt, ##__VA_ARGS__); \ + FASTCG_THROW_EXCEPTION(FastCG::OpenGLException, "%s (error: %s)", msg, glGetErrorString(__error)); \ + } \ } #else -#define FASTCG_CHECK_OPENGL_ERROR() +#define FASTCG_CHECK_OPENGL_ERROR(...) #endif #endif diff --git a/FastCG/include/FastCG/Graphics/OpenGL/OpenGLGraphicsContext.h b/FastCG/include/FastCG/Graphics/OpenGL/OpenGLGraphicsContext.h index 8aed067c..bb78d897 100644 --- a/FastCG/include/FastCG/Graphics/OpenGL/OpenGLGraphicsContext.h +++ b/FastCG/include/FastCG/Graphics/OpenGL/OpenGLGraphicsContext.h @@ -22,7 +22,7 @@ namespace FastCG OpenGLGraphicsContext(const Args &rArgs); virtual ~OpenGLGraphicsContext(); - void Begin(); + bool Begin(); void PushDebugMarker(const char *pName); void PopDebugMarker(); void SetViewport(int32_t x, int32_t y, uint32_t width, uint32_t height); diff --git a/FastCG/include/FastCG/Graphics/OpenGL/OpenGLGraphicsSystem.h b/FastCG/include/FastCG/Graphics/OpenGL/OpenGLGraphicsSystem.h index ace0f789..44865afa 100644 --- a/FastCG/include/FastCG/Graphics/OpenGL/OpenGLGraphicsSystem.h +++ b/FastCG/include/FastCG/Graphics/OpenGL/OpenGLGraphicsSystem.h @@ -44,10 +44,19 @@ namespace FastCG { return mDeviceProperties; } - void DestroyTexture(const OpenGLTexture *pTexture); + void DestroyTexture(const OpenGLTexture *pTexture) override; GLuint GetOrCreateFramebuffer(const OpenGLTexture *const *pRenderTargets, uint32_t renderTargetCount, const OpenGLTexture *pDepthStencilBuffer); GLuint GetOrCreateVertexArray(const OpenGLBuffer *const *pBuffers, uint32_t bufferCount); +#if defined FASTCG_ANDROID + inline bool IsHeadless() const + { + return mHeadedContext == EGL_NO_CONTEXT; + } + + void OnWindowInitialized(); + void OnWindowTerminated(); +#endif protected: OpenGLGraphicsSystem(const GraphicsSystemArgs &rArgs); virtual ~OpenGLGraphicsSystem(); @@ -58,6 +67,13 @@ namespace FastCG HGLRC mHGLRC{0}; #elif defined FASTCG_LINUX GLXContext mpRenderContext{nullptr}; +#elif defined FASTCG_ANDROID + EGLDisplay mDisplay{nullptr}; + EGLConfig mConfig{nullptr}; + EGLContext mHeadlessContext{EGL_NO_CONTEXT}; + EGLSurface mPbufferSurface{EGL_NO_SURFACE}; + EGLContext mHeadedContext{EGL_NO_CONTEXT}; + EGLSurface mWindowSurface{EGL_NO_SURFACE}; #endif std::unordered_map> mFboIds; std::unordered_map, IdentityHasher> mTextureToFboHashes; @@ -69,7 +85,9 @@ namespace FastCG void Resize() {} void Present(); double GetGpuElapsedTime() const; +#if !defined FASTCG_ANDROID void InitializeGlew(); +#endif void CreateOpenGLContext(); void QueryDeviceProperties(); void DestroyOpenGLContext(); diff --git a/FastCG/include/FastCG/Graphics/OpenGL/OpenGLUtils.h b/FastCG/include/FastCG/Graphics/OpenGL/OpenGLUtils.h index b73d9e64..60d32c34 100644 --- a/FastCG/include/FastCG/Graphics/OpenGL/OpenGLUtils.h +++ b/FastCG/include/FastCG/Graphics/OpenGL/OpenGLUtils.h @@ -265,10 +265,12 @@ namespace FastCG { return GL_R16F; } +#if defined GL_R16 else if (bitsPerChannel.r == 16) { return GL_R16; } +#endif else if (bitsPerChannel.r == 32 && dataType == TextureDataType::FLOAT) { return GL_R32F; @@ -282,10 +284,12 @@ namespace FastCG { return GL_RG8; } +#if defined GL_RG16 else if (bitsPerChannel.r == 16 && bitsPerChannel.g == 16) { return GL_RG16; } +#endif else if (bitsPerChannel.r == 16 && bitsPerChannel.g == 16 && dataType == TextureDataType::FLOAT) { return GL_RG16F; @@ -308,18 +312,22 @@ namespace FastCG { return GL_R11F_G11F_B10F; } +#if defined GL_RGB12 else if (bitsPerChannel.r == 12 && bitsPerChannel.g == 12 && bitsPerChannel.b == 12) { return GL_RGB12; } +#endif else if (bitsPerChannel.r == 16 && bitsPerChannel.g == 16 && bitsPerChannel.b == 16 && dataType == TextureDataType::FLOAT) { return GL_RGB16F; } +#if defined GL_RGB16 else if (bitsPerChannel.r == 16 && bitsPerChannel.g == 16 && bitsPerChannel.b == 16) { return GL_RGB16; } +#endif else { return GL_RGB; @@ -372,10 +380,12 @@ namespace FastCG { return GL_DEPTH_COMPONENT32F; } +#if defined GL_DEPTH_COMPONENT32 else if (bitsPerChannel.r == 32) { return GL_DEPTH_COMPONENT32; } +#endif else { return GL_DEPTH_COMPONENT; @@ -398,10 +408,14 @@ namespace FastCG return GL_RGB; case TextureFormat::RGBA: return GL_RGBA; +#if defined GL_BRG case TextureFormat::BGR: return GL_BGR; +#endif +#if defined GL_BGRA case TextureFormat::BGRA: return GL_BGRA; +#endif case TextureFormat::DEPTH_STENCIL: return GL_DEPTH_STENCIL; case TextureFormat::DEPTH: diff --git a/FastCG/include/FastCG/Graphics/RenderingPath.h b/FastCG/include/FastCG/Graphics/RenderingPath.h index 9ccaa44d..07a94e2f 100644 --- a/FastCG/include/FastCG/Graphics/RenderingPath.h +++ b/FastCG/include/FastCG/Graphics/RenderingPath.h @@ -1,18 +1,14 @@ #ifndef FASTCG_RENDERING_PATH_H #define FASTCG_RENDERING_PATH_H +#include + #include #include namespace FastCG { - enum class RenderingPath : uint8_t - { - FORWARD, - DEFERRED, - LAST - - }; + FASTCG_DECLARE_SCOPED_ENUM(RenderingPath, uint8_t, FORWARD, DEFERRED); using RenderingPathInt = std::underlying_type_t; using RenderingPathMask = uint8_t; diff --git a/FastCG/include/FastCG/Graphics/ShaderSource.h b/FastCG/include/FastCG/Graphics/ShaderSource.h index c4130943..79f9e26d 100644 --- a/FastCG/include/FastCG/Graphics/ShaderSource.h +++ b/FastCG/include/FastCG/Graphics/ShaderSource.h @@ -1,6 +1,8 @@ #ifndef FASTCG_SHADER_SOURCE_H #define FASTCG_SHADER_SOURCE_H +#include + #include namespace FastCG @@ -8,8 +10,8 @@ namespace FastCG class ShaderSource { public: - inline static std::string ParseFile(const std::string &rFileName); - inline static void ParseSource(std::string &rSource, const std::string &rIncludePath); + inline static std::string ParseFile(const std::string &rFileName, ShaderType shaderType, bool isIncluded = false); + inline static void ParseSource(std::string &rSource, const std::string &rIncludePath, ShaderType shaderType, bool isIncluded = false); private: ShaderSource() = delete; diff --git a/FastCG/include/FastCG/Graphics/ShaderSource.inc b/FastCG/include/FastCG/Graphics/ShaderSource.inc index 7cd717c3..8f5ba6dd 100644 --- a/FastCG/include/FastCG/Graphics/ShaderSource.inc +++ b/FastCG/include/FastCG/Graphics/ShaderSource.inc @@ -4,18 +4,34 @@ namespace FastCG { - std::string ShaderSource::ParseFile(const std::string &rFilePath) + std::string ShaderSource::ParseFile(const std::string &rFilePath, ShaderType shaderType, bool isIncluded /* = false*/) { auto includePath = File::GetBasePath(rFilePath); size_t fileSize; auto data = FileReader::ReadText(rFilePath, fileSize); std::string source(data.get(), data.get() + fileSize); - ParseSource(source, includePath); + ParseSource(source, includePath, shaderType, isIncluded); return source; } - void ShaderSource::ParseSource(std::string &rSource, const std::string &rIncludePath) + void ShaderSource::ParseSource(std::string &rSource, const std::string &rIncludePath, ShaderType shaderType, bool isIncluded /* = false*/) { + if (!isIncluded) + { + std::string sourcePrefix; +#if defined FASTCG_ANDROID + sourcePrefix = "#version 320 es\n"; +#else + sourcePrefix = "#version 430\n"; +#endif +#if defined FASTCG_ANDROID + if (shaderType == ShaderType::FRAGMENT) + { + sourcePrefix += "precision mediump float;\n"; + } +#endif + rSource = sourcePrefix + rSource; + } size_t includePosition = 0; while ((includePosition = rSource.find("#include ", includePosition)) != std::string::npos) { @@ -28,7 +44,7 @@ namespace FastCG StringUtils::Replace(includeFileName, "<", ""); StringUtils::Replace(includeFileName, ">", ""); StringUtils::Trim(includeFileName); - auto rSourceToInclude = ParseFile(rIncludePath + includeFileName); + auto rSourceToInclude = ParseFile(rIncludePath + includeFileName, shaderType, true); StringUtils::Replace(rSource, includeStatement, rSourceToInclude); } } diff --git a/FastCG/include/FastCG/Graphics/Vulkan/Vulkan.h b/FastCG/include/FastCG/Graphics/Vulkan/Vulkan.h index 6eef8a3e..c528211a 100644 --- a/FastCG/include/FastCG/Graphics/Vulkan/Vulkan.h +++ b/FastCG/include/FastCG/Graphics/Vulkan/Vulkan.h @@ -15,6 +15,8 @@ #define VK_USE_PLATFORM_WIN32_KHR #elif defined FASTCG_LINUX #define VK_USE_PLATFORM_XLIB_KHR +#elif defined FASTCG_ANDROID +#define VK_USE_PLATFORM_ANDROID_KHR #else #error "FASTCG: Unhandled platform" #endif @@ -30,6 +32,14 @@ #include +#if defined VK_VERSION_1_3 +#define VK_API_VERSION VK_API_VERSION_1_3; +#elif defined VK_VERSION_1_1 +#define VK_API_VERSION VK_API_VERSION_1_1; +#else +#error "FastCG: Unexpected Vulkan API version" +#endif + #endif #endif \ No newline at end of file diff --git a/FastCG/include/FastCG/Graphics/Vulkan/VulkanBuffer.h b/FastCG/include/FastCG/Graphics/Vulkan/VulkanBuffer.h index 337df12d..0154fff0 100644 --- a/FastCG/include/FastCG/Graphics/Vulkan/VulkanBuffer.h +++ b/FastCG/include/FastCG/Graphics/Vulkan/VulkanBuffer.h @@ -72,6 +72,10 @@ namespace FastCG { return (GetUsage() & BufferUsageFlagBit::DYNAMIC) != 0; } + inline bool UsesMappableMemory() const + { + return IsDynamic(); + } void CreateBuffer(); void DestroyBuffer(); diff --git a/FastCG/include/FastCG/Graphics/Vulkan/VulkanGraphicsContext.h b/FastCG/include/FastCG/Graphics/Vulkan/VulkanGraphicsContext.h index 627836f6..ddf7631b 100644 --- a/FastCG/include/FastCG/Graphics/Vulkan/VulkanGraphicsContext.h +++ b/FastCG/include/FastCG/Graphics/Vulkan/VulkanGraphicsContext.h @@ -25,7 +25,7 @@ namespace FastCG VulkanGraphicsContext(const Args &rArgs); virtual ~VulkanGraphicsContext(); - void Begin(); + bool Begin(); void PushDebugMarker(const char *pName); void PopDebugMarker(); void SetViewport(int32_t x, int32_t y, uint32_t width, uint32_t height); diff --git a/FastCG/include/FastCG/Graphics/Vulkan/VulkanGraphicsSystem.h b/FastCG/include/FastCG/Graphics/Vulkan/VulkanGraphicsSystem.h index c1fef72e..013ea728 100644 --- a/FastCG/include/FastCG/Graphics/Vulkan/VulkanGraphicsSystem.h +++ b/FastCG/include/FastCG/Graphics/Vulkan/VulkanGraphicsSystem.h @@ -25,6 +25,24 @@ namespace FastCG { +#define FASTCG_DECL_VK_EXT_FN(fn) extern PFN_##fn fn + + namespace VkExt + { + FASTCG_DECL_VK_EXT_FN(vkCreateRenderPass2KHR); + FASTCG_DECL_VK_EXT_FN(vkCmdBeginRenderPass2KHR); + FASTCG_DECL_VK_EXT_FN(vkCmdEndRenderPass2KHR); +#if _DEBUG + FASTCG_DECL_VK_EXT_FN(vkCreateDebugUtilsMessengerEXT); + FASTCG_DECL_VK_EXT_FN(vkDestroyDebugUtilsMessengerEXT); + FASTCG_DECL_VK_EXT_FN(vkCmdBeginDebugUtilsLabelEXT); + FASTCG_DECL_VK_EXT_FN(vkCmdEndDebugUtilsLabelEXT); + FASTCG_DECL_VK_EXT_FN(vkSetDebugUtilsObjectNameEXT); +#endif + } + +#undef FASTCG_DECL_VK_EXT_FN + struct VulkanImageMemoryTransition { VkImageLayout layout; @@ -45,9 +63,18 @@ namespace FastCG { return mCurrentFrame; } - inline void DestroyBuffer(const VulkanBuffer *pBuffer); - inline void DestroyShader(const VulkanShader *pShader); - inline void DestroyTexture(const VulkanTexture *pTexture); + inline void DestroyBuffer(const VulkanBuffer *pBuffer) override; + inline void DestroyShader(const VulkanShader *pShader) override; + inline void DestroyTexture(const VulkanTexture *pTexture) override; +#if defined FASTCG_ANDROID + inline bool IsHeadless() const + { + return mSurface == VK_NULL_HANDLE; + } + + void OnWindowInitialized(); + void OnWindowTerminated(); +#endif protected: using Super = BaseGraphicsSystem; @@ -97,6 +124,7 @@ namespace FastCG }; VkInstance mInstance{VK_NULL_HANDLE}; + std::vector mInstanceExtensions; VkSurfaceKHR mSurface{VK_NULL_HANDLE}; VkPhysicalDevice mPhysicalDevice{VK_NULL_HANDLE}; VkPhysicalDeviceProperties mPhysicalDeviceProperties{}; @@ -104,15 +132,16 @@ namespace FastCG VkFormatProperties mFormatProperties[((size_t)LAST_FORMAT) + 1]{}; std::unique_ptr mAllocationCallbacks{nullptr}; VkDevice mDevice{VK_NULL_HANDLE}; + std::vector mDeviceExtensions; uint32_t mGraphicsAndPresentQueueFamilyIndex{~0u}; VkQueue mGraphicsAndPresentQueue{VK_NULL_HANDLE}; - VkSurfaceTransformFlagBitsKHR mPreTransform; - VkPresentModeKHR mPresentMode; - uint32_t mMaxSimultaneousFrames{0}; + VkSurfaceTransformFlagBitsKHR mPreTransform{(VkSurfaceTransformFlagBitsKHR)0}; + VkPresentModeKHR mPresentMode{VK_PRESENT_MODE_IMMEDIATE_KHR}; + VkSurfaceFormatKHR mSwapChainSurfaceFormat{}; + uint32_t mMaxSimultaneousFrames{1}; uint32_t mCurrentFrame{0}; uint32_t mSwapChainIndex{0}; VkSwapchainKHR mSwapChain{VK_NULL_HANDLE}; - VkSurfaceFormatKHR mSwapChainSurfaceFormat; std::vector mSwapChainTextures; std::vector mAcquireSwapChainImageSemaphores; std::vector mSubmitFinishedSemaphores; @@ -154,6 +183,7 @@ namespace FastCG void SelectPhysicalDevice(); void CreateAllocator(); void AcquirePhysicalDeviceProperties(); + void AcquirePhysicalDeviceSurfaceProperties(); void CreateDeviceAndGetQueues(); void RecreateSwapChainAndGetImages(); void AcquireNextSwapChainImage(); diff --git a/FastCG/include/FastCG/Graphics/Vulkan/VulkanTexture.h b/FastCG/include/FastCG/Graphics/Vulkan/VulkanTexture.h index 51721fa1..adf27fbd 100644 --- a/FastCG/include/FastCG/Graphics/Vulkan/VulkanTexture.h +++ b/FastCG/include/FastCG/Graphics/Vulkan/VulkanTexture.h @@ -61,6 +61,7 @@ namespace FastCG VmaAllocationInfo mAllocationInfo; VkImageView mDefaultImageView{VK_NULL_HANDLE}; VkSampler mDefaultSampler{VK_NULL_HANDLE}; + bool mUsesMappableMemory{false}; inline VkImage GetImage() const { @@ -92,7 +93,7 @@ namespace FastCG } inline VkImageAspectFlags GetAspectFlags() const { - return GetVkImageAspectFlags(GetFormat()); + return GetVkImageAspectFlags(GetFormat(), GetDataType()); } inline VkImageLayout GetRestingLayout() const { @@ -106,6 +107,11 @@ namespace FastCG { return GetVkImagePipelineStageFlags(GetUsage()); } + inline bool UsesMappableMemory() const + { + return mUsesMappableMemory; + } + void CreateImage(); void DestroyImage(); void TransitionToRestingLayout(); diff --git a/FastCG/include/FastCG/Graphics/Vulkan/VulkanUtils.h b/FastCG/include/FastCG/Graphics/Vulkan/VulkanUtils.h index ca3243c4..697c977c 100644 --- a/FastCG/include/FastCG/Graphics/Vulkan/VulkanUtils.h +++ b/FastCG/include/FastCG/Graphics/Vulkan/VulkanUtils.h @@ -41,12 +41,8 @@ namespace FastCG CASE_RETURN_STRING(VK_ERROR_TOO_MANY_OBJECTS); CASE_RETURN_STRING(VK_ERROR_FORMAT_NOT_SUPPORTED); CASE_RETURN_STRING(VK_ERROR_FRAGMENTED_POOL); - CASE_RETURN_STRING(VK_ERROR_UNKNOWN); CASE_RETURN_STRING(VK_ERROR_OUT_OF_POOL_MEMORY); CASE_RETURN_STRING(VK_ERROR_INVALID_EXTERNAL_HANDLE); - CASE_RETURN_STRING(VK_ERROR_FRAGMENTATION); - CASE_RETURN_STRING(VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS); - CASE_RETURN_STRING(VK_PIPELINE_COMPILE_REQUIRED); CASE_RETURN_STRING(VK_ERROR_SURFACE_LOST_KHR); CASE_RETURN_STRING(VK_ERROR_NATIVE_WINDOW_IN_USE_KHR); CASE_RETURN_STRING(VK_SUBOPTIMAL_KHR); @@ -55,12 +51,14 @@ namespace FastCG CASE_RETURN_STRING(VK_ERROR_VALIDATION_FAILED_EXT); CASE_RETURN_STRING(VK_ERROR_INVALID_SHADER_NV); CASE_RETURN_STRING(VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT); - CASE_RETURN_STRING(VK_ERROR_NOT_PERMITTED_KHR); CASE_RETURN_STRING(VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT); - CASE_RETURN_STRING(VK_THREAD_IDLE_KHR); - CASE_RETURN_STRING(VK_THREAD_DONE_KHR); - CASE_RETURN_STRING(VK_OPERATION_DEFERRED_KHR); - CASE_RETURN_STRING(VK_OPERATION_NOT_DEFERRED_KHR); +#if !defined FASTCG_ANDROID + // FIXME: Vulkan header in NDK 21 doesn't define VK_ERROR_UNKNOWN + CASE_RETURN_STRING(VK_ERROR_UNKNOWN); +#endif +#if defined VK_API_VERSION_1_2 + CASE_RETURN_STRING(VK_ERROR_FRAGMENTATION); +#endif default: FASTCG_THROW_EXCEPTION(Exception, "Vulkan: Unhandled VkResult %d", (int)vkResult); return nullptr; @@ -304,13 +302,17 @@ namespace FastCG CASE_RETURN_STRING(VK_IMAGE_LAYOUT_PREINITIALIZED); CASE_RETURN_STRING(VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL); CASE_RETURN_STRING(VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL); + CASE_RETURN_STRING(VK_IMAGE_LAYOUT_PRESENT_SRC_KHR); +#if defined VK_API_VERSION_1_2 CASE_RETURN_STRING(VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL); CASE_RETURN_STRING(VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL); CASE_RETURN_STRING(VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL); CASE_RETURN_STRING(VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL); +#endif +#if defined VK_API_VERSION_1_3 CASE_RETURN_STRING(VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL); CASE_RETURN_STRING(VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL); - CASE_RETURN_STRING(VK_IMAGE_LAYOUT_PRESENT_SRC_KHR); +#endif default: FASTCG_THROW_EXCEPTION(Exception, "Vulkan: Unhandled image layout %d", (int)layout); return nullptr; @@ -354,7 +356,7 @@ namespace FastCG return usageFlags; } - inline VkImageAspectFlags GetVkImageAspectFlags(TextureFormat format) + inline VkImageAspectFlags GetVkImageAspectFlags(TextureFormat format, TextureDataType dataType) { switch (format) { @@ -366,7 +368,14 @@ namespace FastCG case TextureFormat::BGRA: return VK_IMAGE_ASPECT_COLOR_BIT; case TextureFormat::DEPTH_STENCIL: - return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; + if (dataType == TextureDataType::FLOAT) + { + return VK_IMAGE_ASPECT_DEPTH_BIT; + } + else + { + return VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT; + } case TextureFormat::DEPTH: return VK_IMAGE_ASPECT_DEPTH_BIT; default: @@ -721,6 +730,7 @@ namespace FastCG {VK_FORMAT_B8G8R8A8_UINT, TextureFormat::RGBA, BitsPerChannel{8, 8, 8, 8}, TextureDataType::UNSIGNED_CHAR}, {VK_FORMAT_D24_UNORM_S8_UINT, TextureFormat::DEPTH_STENCIL, BitsPerChannel{24, 8}, TextureDataType::UNSIGNED_INT}, {VK_FORMAT_D24_UNORM_S8_UINT, TextureFormat::DEPTH_STENCIL, BitsPerChannel{24, 8}, TextureDataType::UNSIGNED_CHAR}, + {VK_FORMAT_X8_D24_UNORM_PACK32, TextureFormat::DEPTH_STENCIL, BitsPerChannel{24, 8}, TextureDataType::FLOAT}, {VK_FORMAT_D32_SFLOAT, TextureFormat::DEPTH, BitsPerChannel{32}, TextureDataType::FLOAT}, {VK_FORMAT_D16_UNORM, TextureFormat::DEPTH, BitsPerChannel{16}, TextureDataType::FLOAT}, }; @@ -738,7 +748,11 @@ namespace FastCG rTableEntry.dataType == dataType; }); if (it == pTableEnd) { - FASTCG_THROW_EXCEPTION(Exception, "Vulkan: Unhandled format conversion"); + FASTCG_THROW_EXCEPTION(Exception, + "Vulkan: Unhandled format conversion (format: %s, rgba: %d/%d/%d/%d, dataType: %s)", + GetTextureFormatString(format), + bitsPerChannel.r, bitsPerChannel.g, bitsPerChannel.b, bitsPerChannel.a, + GetTextureDataTypeString(dataType)); return (VkFormat)0; } return it->vkFormat; diff --git a/FastCG/include/FastCG/Platform/Android/AndroidApplication.h b/FastCG/include/FastCG/Platform/Android/AndroidApplication.h new file mode 100644 index 00000000..224ecf2f --- /dev/null +++ b/FastCG/include/FastCG/Platform/Android/AndroidApplication.h @@ -0,0 +1,67 @@ +#ifndef FASTCG_ANDROID_APPLICATION_H +#define FASTCG_ANDROID_APPLICATION_H + +#ifdef FASTCG_ANDROID + +#include + +#include + +void onAppCmd(android_app *, int32_t); +int32_t onInputEvent(android_app *, AInputEvent *); + +namespace FastCG +{ + class AndroidApplication : public BaseApplication + { + public: + AndroidApplication(const ApplicationSettings &applicationSettings) : BaseApplication(applicationSettings) {} + + inline static AndroidApplication *GetInstance() + { + return static_cast(BaseApplication::GetInstance()); + } + inline ANativeWindow *GetWindow() + { + return mAndroidApp->window; + } + inline const char *GetInternalDataPath() const + { + return mAndroidApp->activity->internalDataPath; + } + inline JNIEnv *GetJniEnv() + { + return mJniEnv; + } + inline bool IsPaused() const + { + return mPaused; + } + uint64_t GetNativeKey(Key key) const override; + + protected: + void RunMainLoop() override; + void OnPreInitialize() override; + void OnPostFinalize() override; + + private: + android_app *mAndroidApp{nullptr}; + JNIEnv *mJniEnv{nullptr}; + bool mPaused{false}; + + void SetAndroidApp(android_app *androidApp); + + inline void SetPaused(bool paused) + { + mPaused = paused; + } + + friend void ::onAppCmd(android_app *, int32_t); + friend int32_t(::onInputEvent)(android_app *, AInputEvent *); + friend void ::android_main(android_app *); + }; +} + +#endif + +#endif \ No newline at end of file diff --git a/FastCG/include/FastCG/Platform/Application.h b/FastCG/include/FastCG/Platform/Application.h index 5ac75875..0dba916a 100644 --- a/FastCG/include/FastCG/Platform/Application.h +++ b/FastCG/include/FastCG/Platform/Application.h @@ -13,6 +13,12 @@ namespace FastCG { using Application = X11Application; } +#elif defined FASTCG_ANDROID +#include +namespace FastCG +{ + using Application = AndroidApplication; +} #else #error "FASTCG: Unhandled platform" #endif diff --git a/FastCG/include/FastCG/Platform/BaseApplication.h b/FastCG/include/FastCG/Platform/BaseApplication.h index ebb1eff0..27c48218 100644 --- a/FastCG/include/FastCG/Platform/BaseApplication.h +++ b/FastCG/include/FastCG/Platform/BaseApplication.h @@ -93,6 +93,7 @@ namespace FastCG return mSettings.rendering.path; } + int Run(); int Run(int argc, char **argv); inline void Exit() @@ -147,4 +148,21 @@ namespace FastCG } +#if defined FASTCG_ANDROID +#define FASTCG_MAIN(appType) \ + void android_main(android_app *androidApp) \ + { \ + appType app; \ + app.SetAndroidApp(androidApp); \ + app.Run(); \ + } +#else +#define FASTCG_MAIN(appType) \ + int main(int argc, char **argv) \ + { \ + appType app; \ + return app.Run(argc, argv); \ + } +#endif + #endif \ No newline at end of file diff --git a/FastCG/include/FastCG/Platform/Directory.inc b/FastCG/include/FastCG/Platform/Directory.inc index 8a6bf127..a6619115 100644 --- a/FastCG/include/FastCG/Platform/Directory.inc +++ b/FastCG/include/FastCG/Platform/Directory.inc @@ -1,10 +1,12 @@ #if defined FASTCG_WINDOWS #include #include -#elif defined FASTCG_LINUX +#elif defined FASTCG_POSIX #include +#if defined FASTCG_POSIX #include #endif +#endif namespace { @@ -33,12 +35,12 @@ namespace } FindClose(findHandle); } -#elif defined FASTCG_LINUX +#elif defined FASTCG_POSIX if (auto dir = opendir(rDirectoryPath.c_str())) { while (auto file = readdir(dir)) { - if (!file->d_name || file->d_name[0] == '.') + if (!file->d_name[0] || file->d_name[0] == '.') { continue; } @@ -66,7 +68,7 @@ namespace FastCG #if defined FASTCG_WINDOWS auto directoryAttributes = GetFileAttributes(rDirectoryPath.c_str()); return (directoryAttributes != INVALID_FILE_ATTRIBUTES && (directoryAttributes & FILE_ATTRIBUTE_DIRECTORY)); -#elif defined FASTCG_LINUX +#elif defined FASTCG_POSIX struct stat fileStat; return stat(rDirectoryPath.c_str(), &fileStat) == 0 && (fileStat.st_mode & S_IFDIR) != 0; #else diff --git a/FastCG/include/FastCG/Platform/File.inc b/FastCG/include/FastCG/Platform/File.inc index 4f5fae87..025e2b37 100644 --- a/FastCG/include/FastCG/Platform/File.inc +++ b/FastCG/include/FastCG/Platform/File.inc @@ -3,7 +3,7 @@ #if defined FASTCG_WINDOWS #include -#elif defined FASTCG_LINUX +#elif defined FASTCG_POSIX #include #endif @@ -80,7 +80,7 @@ namespace FastCG #if defined FASTCG_WINDOWS unsigned long fileAttributes = GetFileAttributes(rFilePath.c_str()); return (fileAttributes != INVALID_FILE_ATTRIBUTES); -#elif defined FASTCG_LINUX +#elif defined FASTCG_POSIX struct stat fileStat; return stat(rFilePath.c_str(), &fileStat) == 0; #else diff --git a/FastCG/include/FastCG/Platform/FileWriter.inc b/FastCG/include/FastCG/Platform/FileWriter.inc index 4271cbaf..754271ec 100644 --- a/FastCG/include/FastCG/Platform/FileWriter.inc +++ b/FastCG/include/FastCG/Platform/FileWriter.inc @@ -16,7 +16,7 @@ namespace if (!fileStream.is_open()) { - FASTCG_THROW_EXCEPTION(FastCG::Exception, "Error opening file: %s", rFilePath.c_str()); + FASTCG_THROW_EXCEPTION(FastCG::Exception, "Couldn't open file: %s", rFilePath.c_str()); } fileStream.write((const char *)pData, dataSize); diff --git a/FastCG/include/FastCG/Platform/Thread.h b/FastCG/include/FastCG/Platform/Thread.h index 9ea3b0f9..9dc8435b 100644 --- a/FastCG/include/FastCG/Platform/Thread.h +++ b/FastCG/include/FastCG/Platform/Thread.h @@ -3,7 +3,7 @@ #if defined FASTCG_WINDOWS #include -#elif defined FASTCG_LINUX +#elif defined FASTCG_POSIX #include #endif @@ -16,7 +16,7 @@ namespace FastCG { #if defined FASTCG_WINDOWS ::Sleep(static_cast(seconds * 1e3)); -#elif defined FASTCG_LINUX +#elif defined FASTCG_POSIX sleep((unsigned int)seconds); #else #error "Thread::Sleep() not implemented on current platform" diff --git a/FastCG/include/FastCG/Platform/Timer.h b/FastCG/include/FastCG/Platform/Timer.h index a8587e08..baf13efe 100644 --- a/FastCG/include/FastCG/Platform/Timer.h +++ b/FastCG/include/FastCG/Platform/Timer.h @@ -5,7 +5,7 @@ #if defined FASTCG_WINDOWS #include -#elif defined FASTCG_LINUX +#elif defined FASTCG_POSIX #include #endif @@ -30,7 +30,7 @@ namespace FastCG } else { - FASTCG_THROW_EXCEPTION(Exception, "Cannot query performance counter: %d", 0); + FASTCG_THROW_EXCEPTION(Exception, "Couldn't query performance counter: %d", 0); return 0; } } @@ -65,11 +65,11 @@ namespace FastCG } else { - FASTCG_THROW_EXCEPTION(Exception, "Cannot query performance counter frequency: %d", 0); + FASTCG_THROW_EXCEPTION(Exception, "Couldn't query performance counter frequency: %d", 0); } } }; -#elif defined FASTCG_LINUX +#elif defined FASTCG_POSIX class Timer { public: diff --git a/FastCG/include/FastCG/Platform/Windows/WindowsApplication.h b/FastCG/include/FastCG/Platform/Windows/WindowsApplication.h index 93ba13a8..6fd96450 100644 --- a/FastCG/include/FastCG/Platform/Windows/WindowsApplication.h +++ b/FastCG/include/FastCG/Platform/Windows/WindowsApplication.h @@ -7,6 +7,8 @@ #include +LRESULT WndProc(HWND, UINT, WPARAM, LPARAM); + namespace FastCG { class WindowsApplication : public BaseApplication @@ -28,7 +30,7 @@ namespace FastCG } uint64_t GetNativeKey(Key key) const override; - friend LRESULT WndProc(HWND, UINT, WPARAM, LPARAM); + friend LRESULT(::WndProc)(HWND, UINT, WPARAM, LPARAM); protected: void OnPreInitialize() override; diff --git a/FastCG/include/FastCG/Reflection/Inspectable.h b/FastCG/include/FastCG/Reflection/Inspectable.h index 13cb800a..86a99fc9 100644 --- a/FastCG/include/FastCG/Reflection/Inspectable.h +++ b/FastCG/include/FastCG/Reflection/Inspectable.h @@ -154,6 +154,8 @@ namespace FastCG class IInspectableProperty { public: + virtual ~IInspectableProperty() = default; + virtual InspectablePropertyType GetType() const = 0; virtual const std::string &GetName() const = 0; virtual void GetValue(void *pValue) const = 0; @@ -220,7 +222,7 @@ namespace FastCG { } - inline InspectablePropertyType GetType() const + inline InspectablePropertyType GetType() const override { return GetInspectablePropertyTypeFromType::value; } @@ -325,7 +327,7 @@ namespace FastCG { } - inline InspectablePropertyType GetType() const + inline InspectablePropertyType GetType() const override { return GetInspectablePropertyTypeFromType::value; } diff --git a/FastCG/include/FastCG/Rendering/BaseWorldRenderer.inc b/FastCG/include/FastCG/Rendering/BaseWorldRenderer.inc index 534d3767..9c0f9f79 100644 --- a/FastCG/include/FastCG/Rendering/BaseWorldRenderer.inc +++ b/FastCG/include/FastCG/Rendering/BaseWorldRenderer.inc @@ -30,8 +30,8 @@ namespace height, FastCG::TextureType::TEXTURE_2D, FastCG::TextureUsageFlagBit::SAMPLED | FastCG::TextureUsageFlagBit::RENDER_TARGET, - FastCG::TextureFormat::DEPTH, - {32}, + FastCG::TextureFormat::DEPTH_STENCIL, + {24, 8}, FastCG::TextureDataType::FLOAT, FastCG::TextureFilter::POINT_FILTER, FastCG::TextureWrapMode::CLAMP, diff --git a/FastCG/include/FastCG/Rendering/IWorldRenderer.h b/FastCG/include/FastCG/Rendering/IWorldRenderer.h index 5a71a602..79ae7667 100644 --- a/FastCG/include/FastCG/Rendering/IWorldRenderer.h +++ b/FastCG/include/FastCG/Rendering/IWorldRenderer.h @@ -9,6 +9,8 @@ namespace FastCG class IWorldRenderer { public: + virtual ~IWorldRenderer() = default; + virtual float GetShadowMapBias() const = 0; virtual void SetShadowMapBias(float shadowMapBias) = 0; virtual float GetPCSSUvScale() const = 0; diff --git a/FastCG/src/Assets/AssetSystem.cpp b/FastCG/src/Assets/AssetSystem.cpp index 26b87091..7db00bd9 100644 --- a/FastCG/src/Assets/AssetSystem.cpp +++ b/FastCG/src/Assets/AssetSystem.cpp @@ -1,12 +1,30 @@ +#if defined FASTCG_ANDROID +#include +#endif +#include #include namespace FastCG { AssetSystem::AssetSystem(const AssetSystemArgs &rArgs) { - mBundleRoots.emplace_back("../assets/FastCG"); - std::transform(rArgs.rBundles.cbegin(), rArgs.rBundles.cend(), std::back_inserter(mBundleRoots), [](const auto &bundle) - { return "../assets/" + bundle; }); + std::string assetsRoot = +#if defined FASTCG_ANDROID + // assets packaged in the APK are copied to the internal storage at app startup + AndroidApplication::GetInstance()->GetInternalDataPath() + std::string("/assets") +#else + // binaries are deployed to a sibling of the assets directory + "../assets" +#endif + ; + mBundleRoots.emplace_back(assetsRoot + "/FastCG"); + std::transform(rArgs.rBundles.cbegin(), rArgs.rBundles.cend(), std::back_inserter(mBundleRoots), [&](const auto &bundle) + { return assetsRoot + "/" + bundle; }); + FASTCG_LOG_VERBOSE("Bundle roots:"); + for (const auto &rBundleRoot : mBundleRoots) + { + FASTCG_LOG_VERBOSE("- %s", rBundleRoot.c_str()); + } } } diff --git a/FastCG/src/Graphics/OpenGL/OpenGLBuffer.cpp b/FastCG/src/Graphics/OpenGL/OpenGLBuffer.cpp index ea0aaa6e..2e676bad 100644 --- a/FastCG/src/Graphics/OpenGL/OpenGLBuffer.cpp +++ b/FastCG/src/Graphics/OpenGL/OpenGLBuffer.cpp @@ -13,7 +13,11 @@ namespace FastCG auto target = GetOpenGLTarget(mUsage); glGenBuffers(1, &mBufferId); + FASTCG_CHECK_OPENGL_ERROR("Couldn't generate buffer (buffer: %s)", rArgs.name.c_str()); + glBindBuffer(target, mBufferId); + FASTCG_CHECK_OPENGL_ERROR("Couldn't bind buffer (buffer: %s)", rArgs.name.c_str()); + #ifdef _DEBUG { auto bufferLabel = mName + " (GL_BUFFER)"; @@ -23,9 +27,12 @@ namespace FastCG if (rArgs.dataSize > 0) { glBufferData(target, (GLsizeiptr)rArgs.dataSize, rArgs.pData, GetOpenGLUsageHint(mUsage)); + FASTCG_CHECK_OPENGL_ERROR("Couldn't create buffer data store (buffer: %s, usage: %d)", rArgs.name.c_str(), (int)mUsage); + if (rArgs.pData != nullptr) { glBufferSubData(target, 0, (GLsizeiptr)rArgs.dataSize, (const GLvoid *)rArgs.pData); + FASTCG_CHECK_OPENGL_ERROR("Couldn't update the buffer data store (buffer: %s)", rArgs.name.c_str()); } } } diff --git a/FastCG/src/Graphics/OpenGL/OpenGLGraphicsContext.cpp b/FastCG/src/Graphics/OpenGL/OpenGLGraphicsContext.cpp index 7dac538a..c6f15e32 100644 --- a/FastCG/src/Graphics/OpenGL/OpenGLGraphicsContext.cpp +++ b/FastCG/src/Graphics/OpenGL/OpenGLGraphicsContext.cpp @@ -1,5 +1,6 @@ #ifdef FASTCG_OPENGL +#include #include #include #include @@ -17,7 +18,7 @@ namespace FastCG { #if !defined FASTCG_DISABLE_GPU_TIMING glGenQueries(FASTCG_ARRAYSIZE(mTimeElapsedQueries), mTimeElapsedQueries); - FASTCG_CHECK_OPENGL_ERROR(); + FASTCG_CHECK_OPENGL_ERROR("Couldn't generate time queries"); #endif } @@ -28,15 +29,23 @@ namespace FastCG #endif } - void OpenGLGraphicsContext::Begin() + bool OpenGLGraphicsContext::Begin() { +#if defined FASTCG_ANDROID + if (OpenGLGraphicsSystem::GetInstance()->IsHeadless() || AndroidApplication::GetInstance()->IsPaused()) + { + return false; + } +#endif + assert(mEnded); mEnded = false; #if !defined FASTCG_DISABLE_GPU_TIMING mElapsedTime = 0; glBeginQuery(GL_TIME_ELAPSED, mTimeElapsedQueries[mCurrentQuery]); - FASTCG_CHECK_OPENGL_ERROR(); + FASTCG_CHECK_OPENGL_ERROR("Couldn't begin time queries"); #endif + return true; } void OpenGLGraphicsContext::PushDebugMarker(const char *pName) @@ -224,7 +233,7 @@ namespace FastCG return; } glBindBufferBase(GetOpenGLTarget(pBuffer->GetUsage()), rResourceInfo.binding, *pBuffer); - FASTCG_CHECK_OPENGL_ERROR(); + FASTCG_CHECK_OPENGL_ERROR("Couldn't bind buffer to resource (buffer: %s, resource: %s, binding: %d)", pBuffer->GetName().c_str(), pName, rResourceInfo.binding); mResourceUsage.emplace(pName); } @@ -246,7 +255,7 @@ namespace FastCG glActiveTexture(GL_TEXTURE0 + rResourceInfo.binding); glBindTexture(GL_TEXTURE_2D, *pTexture); glUniform1i(rResourceInfo.location, rResourceInfo.binding); - FASTCG_CHECK_OPENGL_ERROR(); + FASTCG_CHECK_OPENGL_ERROR("Couldn't bind texture to resource (texture: %s, location: %d, binding: %d)", pTexture->GetName().c_str(), rResourceInfo.location, rResourceInfo.binding); } void OpenGLGraphicsContext::Blit(const OpenGLTexture *pSrc, const OpenGLTexture *pDst) @@ -278,7 +287,8 @@ namespace FastCG { auto drawFbo = OpenGLGraphicsSystem::GetInstance()->GetOrCreateFramebuffer(&pDst, 1, nullptr); glBindFramebuffer(GL_DRAW_FRAMEBUFFER, drawFbo); - glDrawBuffer(GL_COLOR_ATTACHMENT0); + GLenum drawBuffers[1] = {GL_COLOR_ATTACHMENT0}; + glDrawBuffers(1, drawBuffers); dstWidth = (GLint)pDst->GetWidth(); dstHeight = (GLint)pDst->GetHeight(); } @@ -401,7 +411,7 @@ namespace FastCG { SetupDraw(); glDrawElementsBaseVertex(GetOpenGLPrimitiveType(primitiveType), (GLsizei)indexCount, GL_UNSIGNED_INT, (GLvoid *)(uintptr_t)(firstIndex * sizeof(uint32_t)), (GLint)vertexOffset); - FASTCG_CHECK_OPENGL_ERROR(); + FASTCG_CHECK_OPENGL_ERROR("Couldn't draw"); } void OpenGLGraphicsContext::DrawInstancedIndexed(PrimitiveType primitiveType, uint32_t firstInstance, uint32_t instanceCount, uint32_t firstIndex, uint32_t indexCount, int32_t vertexOffset) @@ -409,18 +419,18 @@ namespace FastCG assert(firstInstance == 0); SetupDraw(); glDrawElementsInstancedBaseVertex(GetOpenGLPrimitiveType(primitiveType), (GLsizei)indexCount, GL_UNSIGNED_INT, (GLvoid *)(uintptr_t)(firstIndex * sizeof(uint32_t)), (GLsizei)instanceCount, (GLint)vertexOffset); - FASTCG_CHECK_OPENGL_ERROR(); + FASTCG_CHECK_OPENGL_ERROR("Couldn't draw instanced"); } void OpenGLGraphicsContext::End() { assert(!mEnded); mpBoundShader = nullptr; - FASTCG_CHECK_OPENGL_ERROR(); + FASTCG_CHECK_OPENGL_ERROR("Couldn't end graphics context"); #if !defined FASTCG_DISABLE_GPU_TIMING glEndQuery(GL_TIME_ELAPSED); - FASTCG_CHECK_OPENGL_ERROR(); + FASTCG_CHECK_OPENGL_ERROR("Couldn't end time queries"); mEndedQuery[mCurrentQuery] = true; #endif mEnded = true; diff --git a/FastCG/src/Graphics/OpenGL/OpenGLGraphicsSystem.cpp b/FastCG/src/Graphics/OpenGL/OpenGLGraphicsSystem.cpp index b8a06f3c..f8220c91 100644 --- a/FastCG/src/Graphics/OpenGL/OpenGLGraphicsSystem.cpp +++ b/FastCG/src/Graphics/OpenGL/OpenGLGraphicsSystem.cpp @@ -1,10 +1,6 @@ #ifdef FASTCG_OPENGL -#if defined FASTCG_WINDOWS -#include -#elif defined FASTCG_LINUX -#include -#endif +#include #include #include #include @@ -16,17 +12,74 @@ #include #endif +#include #include #include namespace { -#ifdef FASTCG_LINUX +#if defined FASTCG_LINUX int GLXContextErrorHandler(Display *dpy, XErrorEvent *ev) { - FASTCG_THROW_EXCEPTION(FastCG::Exception, "Error creating GLX context (error_code: %d)", ev->error_code); + FASTCG_THROW_EXCEPTION(FastCG::Exception, "Couldn't create the GLX context (error: %d)", ev->error_code); return 0; } +#elif defined FASTCG_ANDROID + inline const char *eglGetErrorString(EGLint error) + { +#ifdef CASE_RETURN_STRING +#undef CASE_RETURN_STRING +#endif + +#define CASE_RETURN_STRING(str) \ + case str: \ + return #str + + switch (error) + { + CASE_RETURN_STRING(EGL_SUCCESS); + CASE_RETURN_STRING(EGL_NOT_INITIALIZED); + CASE_RETURN_STRING(EGL_BAD_ACCESS); + CASE_RETURN_STRING(EGL_BAD_ALLOC); + CASE_RETURN_STRING(EGL_BAD_ATTRIBUTE); + CASE_RETURN_STRING(EGL_BAD_CONTEXT); + CASE_RETURN_STRING(EGL_BAD_CONFIG); + CASE_RETURN_STRING(EGL_BAD_CURRENT_SURFACE); + CASE_RETURN_STRING(EGL_BAD_DISPLAY); + CASE_RETURN_STRING(EGL_BAD_SURFACE); + CASE_RETURN_STRING(EGL_BAD_MATCH); + CASE_RETURN_STRING(EGL_BAD_PARAMETER); + CASE_RETURN_STRING(EGL_BAD_NATIVE_PIXMAP); + CASE_RETURN_STRING(EGL_BAD_NATIVE_WINDOW); + CASE_RETURN_STRING(EGL_CONTEXT_LOST); + default: + FASTCG_THROW_EXCEPTION(FastCG::Exception, "EGL: Unhandled error %d", (int)error); + return nullptr; + } + +#undef CASE_RETURN_STRING + } + +#ifdef _DEBUG +#define FASTCG_CHECK_EGL_ERROR(fmt, ...) \ + { \ + EGLint __error; \ + if ((__error = eglGetError()) != EGL_SUCCESS) \ + { \ + char msg[4096]; \ + sprintf(msg, fmt, ##__VA_ARGS__); \ + FASTCG_THROW_EXCEPTION(FastCG::Exception, "%s (error: %s)", msg, eglGetErrorString(__error)); \ + } \ + } +#else +#define FASTCG_CHECK_EGL_ERROR(...) +#endif + + const EGLint EGL_CONTEXT_ATTRIBS[] = { + EGL_CONTEXT_MAJOR_VERSION, 3, + EGL_CONTEXT_MINOR_VERSION, 2, + EGL_NONE}; + #endif void OpenGLDebugCallback(GLenum source, GLenum type, GLuint id, GLenum severity, GLsizei length, const GLchar *message, const GLvoid *userParam) @@ -39,7 +92,6 @@ namespace << " - " << message << std::endl; } - } namespace FastCG @@ -54,13 +106,15 @@ namespace FastCG { BaseGraphicsSystem::OnInitialize(); +#if !defined FASTCG_ANDROID InitializeGlew(); +#endif CreateOpenGLContext(); #ifdef _DEBUG glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS); - glDebugMessageCallbackARB(OpenGLDebugCallback, nullptr); + glDebugMessageCallback(OpenGLDebugCallback, nullptr); glDebugMessageControl(GL_DEBUG_SOURCE_APPLICATION, GL_DEBUG_TYPE_PUSH_GROUP, GL_DEBUG_SEVERITY_NOTIFICATION, 0, nullptr, false); glDebugMessageControl(GL_DEBUG_SOURCE_APPLICATION, GL_DEBUG_TYPE_POP_GROUP, GL_DEBUG_SEVERITY_NOTIFICATION, 0, nullptr, false); #endif @@ -172,12 +226,10 @@ namespace FastCG glFramebufferTexture2D(GL_FRAMEBUFFER, attachment, GetOpenGLTarget(pDepthStencilBuffer->GetType()), *pDepthStencilBuffer, 0); } - glBindFramebuffer(GL_DRAW_FRAMEBUFFER, oldFboId); - - auto status = glCheckNamedFramebufferStatusEXT(fboId, GL_DRAW_FRAMEBUFFER); + auto status = glCheckFramebufferStatus(GL_DRAW_FRAMEBUFFER); if (status != GL_FRAMEBUFFER_COMPLETE) { - FASTCG_THROW_EXCEPTION(OpenGLException, "Error creating FBO: 0x%x\n", status); + FASTCG_THROW_EXCEPTION(OpenGLException, "Couldn't create FBO: 0x%x\n", status); } mFboIds.emplace(fboHash, fboId); @@ -190,6 +242,8 @@ namespace FastCG mTextureToFboHashes[*pDepthStencilBuffer].emplace_back(fboHash); } + glBindFramebuffer(GL_DRAW_FRAMEBUFFER, oldFboId); + return fboId; } @@ -235,6 +289,7 @@ namespace FastCG return vaoId; } +#if !defined FASTCG_ANDROID void OpenGLGraphicsSystem::InitializeGlew() { #if defined FASTCG_WINDOWS @@ -264,18 +319,18 @@ namespace FastCG auto pixelFormat = ChoosePixelFormat(mHDC, &pixelFormatDescr); if (!SetPixelFormat(mHDC, pixelFormat, &pixelFormatDescr)) { - FASTCG_THROW_EXCEPTION(FastCG::Exception, "Error setting current pixel format"); + FASTCG_THROW_EXCEPTION(FastCG::Exception, "WGL: Couldn't set the current pixel format"); } mHGLRC = wglCreateContext(mHDC); if (mHGLRC == 0) { - FASTCG_THROW_EXCEPTION(Exception, "Error creating a temporary WGL context"); + FASTCG_THROW_EXCEPTION(Exception, "WGL: Couldn't create the temporary WGL context"); } if (!wglMakeCurrent(mHDC, mHGLRC)) { - FASTCG_THROW_EXCEPTION(Exception, "Error making a temporary WGL context current"); + FASTCG_THROW_EXCEPTION(Exception, "WGL: Couldn't make the temporary WGL context current"); } #elif defined FASTCG_LINUX auto *pDisplay = X11Application::GetInstance()->GetDisplay(); @@ -302,9 +357,10 @@ namespace FastCG GLenum glewInitRes; if ((glewInitRes = glewInit()) != GLEW_OK) { - FASTCG_THROW_EXCEPTION(Exception, "Error intializing Glew: %s", glewGetErrorString(glewInitRes)); + FASTCG_THROW_EXCEPTION(Exception, "Couldn't initialize Glew: %s", glewGetErrorString(glewInitRes)); } } +#endif void OpenGLGraphicsSystem::CreateOpenGLContext() { @@ -325,12 +381,12 @@ namespace FastCG mHGLRC = wglCreateContextAttribsARB(mHDC, mHGLRC, attribs); if (mHGLRC == 0) { - FASTCG_THROW_EXCEPTION(Exception, "Error creating the WGL context"); + FASTCG_THROW_EXCEPTION(Exception, "WGL: Couldn't create the WGL context"); } if (!wglMakeCurrent(mHDC, mHGLRC)) { - FASTCG_THROW_EXCEPTION(Exception, "Error making the WGL context current"); + FASTCG_THROW_EXCEPTION(Exception, "WGL: Couldn't make the WGL context current"); } wglDeleteContext(oldHGLRC); @@ -401,7 +457,7 @@ namespace FastCG if (fbConfig == nullptr) { - FASTCG_THROW_EXCEPTION(Exception, "No matching framebuffer config"); + FASTCG_THROW_EXCEPTION(Exception, "GLX: Couldn't find an appropriate framebuffer configuration"); } auto &rWindow = X11Application::GetInstance()->CreateWindow(pVisualInfo); @@ -416,17 +472,60 @@ namespace FastCG if (mpRenderContext == nullptr) { - FASTCG_THROW_EXCEPTION(Exception, "Error creating the GLX context"); + FASTCG_THROW_EXCEPTION(Exception, "GLX: Couldn't create the GLX context"); } XSync(pDisplay, False); if (!glXMakeContextCurrent(pDisplay, rWindow, rWindow, mpRenderContext)) { - FASTCG_THROW_EXCEPTION(Exception, "Error making the GLX context current"); + FASTCG_THROW_EXCEPTION(Exception, "GLX: Couldn't make the GLX context current"); } glXSwapIntervalEXT(pDisplay, rWindow, mArgs.vsync ? 1 : 0); +#elif defined FASTCG_ANDROID + mDisplay = eglGetDisplay(EGL_DEFAULT_DISPLAY); + if (!eglInitialize(mDisplay, 0, 0)) + { + FASTCG_CHECK_EGL_ERROR("EGL: Couldn't initialize EGL"); + } + + const EGLint configAttribs[] = { + EGL_SURFACE_TYPE, EGL_WINDOW_BIT, + EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT, + EGL_BLUE_SIZE, 8, + EGL_GREEN_SIZE, 8, + EGL_RED_SIZE, 8, + EGL_ALPHA_SIZE, 8, + EGL_NONE}; + + EGLint numConfigs; + if (!eglChooseConfig(mDisplay, configAttribs, &mConfig, 1, &numConfigs)) + { + FASTCG_CHECK_EGL_ERROR("EGL: Couldn't find an appropriate EGL configuration"); + } + + mHeadlessContext = eglCreateContext(mDisplay, mConfig, EGL_NO_CONTEXT, EGL_CONTEXT_ATTRIBS); + if (mHeadlessContext == EGL_NO_CONTEXT) + { + FASTCG_THROW_EXCEPTION(Exception, "EGL: Couldn't create the headless EGL context"); + } + + const EGLint pbufferAttribs[] = { + EGL_WIDTH, 1, + EGL_HEIGHT, 1, + EGL_NONE}; + + mPbufferSurface = eglCreatePbufferSurface(mDisplay, mConfig, pbufferAttribs); + if (mPbufferSurface == EGL_NO_SURFACE) + { + FASTCG_THROW_EXCEPTION(Exception, "EGL: Couldn't create the Pbuffer EGL surface"); + } + + if (!eglMakeCurrent(mDisplay, mPbufferSurface, mPbufferSurface, mHeadlessContext)) + { + FASTCG_CHECK_EGL_ERROR("EGL: Couldn't make the headless EGL context current"); + } #else #error FastCG::OpenGLRenderingSystem::CreateOpenGLContext() is not implemented on the current platform #endif @@ -468,6 +567,35 @@ namespace FastCG mpRenderContext = nullptr; } +#elif defined FASTCG_ANDROID + if (mDisplay != nullptr) + { + if (mWindowSurface != EGL_NO_SURFACE) + { + eglDestroySurface(mDisplay, mWindowSurface); + + mWindowSurface = EGL_NO_SURFACE; + } + if (mHeadedContext != EGL_NO_CONTEXT) + { + eglDestroyContext(mDisplay, mHeadedContext); + + mHeadedContext = EGL_NO_CONTEXT; + } + if (mPbufferSurface != EGL_NO_SURFACE) + { + eglDestroyContext(mDisplay, mPbufferSurface); + + mPbufferSurface = EGL_NO_SURFACE; + } + if (mHeadlessContext != EGL_NO_CONTEXT) + { + eglDestroyContext(mDisplay, mHeadlessContext); + + mHeadlessContext = EGL_NO_CONTEXT; + } + eglTerminate(mDisplay); + } #else #error "FastCG::OpenGLRenderingSystem::DestroyOpenGLContext() is not implemented on the current platform" #endif @@ -481,6 +609,12 @@ namespace FastCG auto *pDisplay = X11Application::GetInstance()->GetDisplay(); auto &rWindow = X11Application::GetInstance()->GetWindow(); glXSwapBuffers(pDisplay, rWindow); +#elif defined FASTCG_ANDROID + if (IsHeadless() || AndroidApplication::GetInstance()->IsPaused()) + { + return; + } + eglSwapBuffers(mDisplay, mWindowSurface); #else #error "OpenGLRenderingSystem::Present() not implemented on the current platform" #endif @@ -505,6 +639,51 @@ namespace FastCG return 0; #endif } + +#if defined FASTCG_ANDROID + void OpenGLGraphicsSystem::OnWindowInitialized() + { + assert(mHeadlessContext != EGL_NO_CONTEXT); + assert(mHeadedContext == EGL_NO_CONTEXT); + assert(mWindowSurface == EGL_NO_SURFACE); + + auto *pWindow = AndroidApplication::GetInstance()->GetWindow(); + assert(pWindow != nullptr); + + mHeadedContext = eglCreateContext(mDisplay, mConfig, mHeadlessContext, EGL_CONTEXT_ATTRIBS); + if (mHeadedContext == EGL_NO_CONTEXT) + { + FASTCG_THROW_EXCEPTION(Exception, "Couldn't create the headed EGL context"); + } + + mWindowSurface = eglCreateWindowSurface(mDisplay, mConfig, pWindow, NULL); + if (mWindowSurface == EGL_NO_SURFACE) + { + FASTCG_CHECK_EGL_ERROR("Couldn't create the EGL window surface"); + } + + if (!eglMakeCurrent(mDisplay, mWindowSurface, mWindowSurface, mHeadedContext)) + { + FASTCG_CHECK_EGL_ERROR("Couldn't make the headed EGL context current"); + } + } + + void OpenGLGraphicsSystem::OnWindowTerminated() + { + if (mWindowSurface != EGL_NO_SURFACE) + { + eglDestroySurface(mDisplay, mWindowSurface); + + mWindowSurface = EGL_NO_SURFACE; + } + if (mHeadedContext != EGL_NO_CONTEXT) + { + eglDestroyContext(mDisplay, mHeadedContext); + + mHeadedContext = EGL_NO_CONTEXT; + } + } +#endif } #endif diff --git a/FastCG/src/Graphics/OpenGL/OpenGLShader.cpp b/FastCG/src/Graphics/OpenGL/OpenGLShader.cpp index 880599e0..23fb815c 100644 --- a/FastCG/src/Graphics/OpenGL/OpenGLShader.cpp +++ b/FastCG/src/Graphics/OpenGL/OpenGLShader.cpp @@ -5,51 +5,50 @@ #include #include #include -#include +#include + +#include namespace { -#define DECLARE_CHECK_STATUS_FN(object) \ - void Check##object##Status(GLuint objectId, GLenum status, const std::string &rIdentifier) \ - { \ - GLint statusValue; \ - glGet##object##iv(objectId, status, &statusValue); \ - GLint infoLogLength; \ - glGet##object##iv(objectId, GL_INFO_LOG_LENGTH, &infoLogLength); \ - if (infoLogLength > 0) \ - { \ - std::string infoLog; \ - infoLog.reserve(infoLogLength); \ - glGet##object##InfoLog(objectId, infoLogLength, &infoLogLength, &infoLog[0]); \ - if (statusValue == GL_TRUE) \ - { \ - FASTCG_MSG_BOX("OpenGLShader", #object " info log ('%s'): %s", rIdentifier.c_str(), infoLog.c_str()) \ - } \ - else \ - { \ - FASTCG_THROW_EXCEPTION(FastCG::Exception, #object " info log ('%s'): %s", rIdentifier.c_str(), infoLog.c_str()); \ - } \ - } \ +#define DECLARE_CHECK_STATUS_FN(object) \ + bool Check##object##Status(GLuint objectId, GLenum status, std::string &rInfoLog) \ + { \ + GLint statusValue; \ + glGet##object##iv(objectId, status, &statusValue); \ + if (!statusValue) \ + { \ + GLint infoLogLength; \ + glGet##object##iv(objectId, GL_INFO_LOG_LENGTH, &infoLogLength); \ + rInfoLog.resize(infoLogLength + 1); \ + if (infoLogLength > 0) \ + { \ + glGet##object##InfoLog(objectId, infoLogLength, &infoLogLength, &rInfoLog[0]); \ + } \ + rInfoLog[infoLogLength] = '\0'; \ + return false; \ + } \ + return true; \ } DECLARE_CHECK_STATUS_FN(Shader) DECLARE_CHECK_STATUS_FN(Program) - void GetResourceLocations(GLuint programId, std::unordered_map &rResourceInfos) + void GetShaderResourceLocations(const std::string &rIdentifier, GLuint programId, std::unordered_map &rResourceInfos) { for (GLenum iface : {GL_UNIFORM_BLOCK, GL_SHADER_STORAGE_BLOCK, GL_UNIFORM}) { - GLint numActiveResources = 0; - glGetProgramInterfaceiv(programId, iface, GL_ACTIVE_RESOURCES, &numActiveResources); - FASTCG_CHECK_OPENGL_ERROR(); + GLint activeResourcesCount = 0; + glGetProgramInterfaceiv(programId, iface, GL_ACTIVE_RESOURCES, &activeResourcesCount); + FASTCG_CHECK_OPENGL_ERROR("Couldn't get shader active resources count (program: %s)", rIdentifier.c_str()); - for (GLint i = 0; i < numActiveResources; ++i) + for (GLint i = 0; i < activeResourcesCount; ++i) { GLsizei length = 0; GLchar buffer[128]; glGetProgramResourceName(programId, iface, i, FASTCG_ARRAYSIZE(buffer), &length, buffer); - FASTCG_CHECK_OPENGL_ERROR(); + FASTCG_CHECK_OPENGL_ERROR("Couldn't get shader resource name (program: %s, resource idx: %d)", rIdentifier.c_str(), i); std::string resourceName(buffer, length); @@ -67,7 +66,7 @@ namespace { property = GL_LOCATION; glGetProgramResourceiv(programId, iface, i, 1, &property, 1, nullptr, &location); - FASTCG_CHECK_OPENGL_ERROR(); + FASTCG_CHECK_OPENGL_ERROR("Couldn't get uniform location (program: %s, resource: %s)", rIdentifier.c_str(), resourceName.c_str()); if (location == -1) { @@ -78,13 +77,14 @@ namespace property = GL_TYPE; glGetProgramResourceiv(programId, iface, i, 1, &property, 1, nullptr, &type); + FASTCG_CHECK_OPENGL_ERROR("Couldn't get uniform type (program: %s, resource: %s)", rIdentifier.c_str(), resourceName.c_str()); } else { property = GL_BUFFER_BINDING; glGetProgramResourceiv(programId, iface, i, 1, &property, 1, nullptr, &binding); + FASTCG_CHECK_OPENGL_ERROR("Couldn't get buffer binding (program: %s, resource idx: %d)", rIdentifier.c_str(), i); } - FASTCG_CHECK_OPENGL_ERROR(); if (binding == -1) { @@ -120,7 +120,52 @@ namespace FastCG { glShaderSource(shaderId, 1, (const char **)&rProgramData.pData, nullptr); glCompileShader(shaderId); + + std::string infoLog; + if (!CheckShaderStatus(shaderId, GL_COMPILE_STATUS, infoLog)) + { + std::vector tokens; + StringUtils::Split(infoLog, ":", tokens); + assert(tokens.size() >= 4); + + std::vector lines; + StringUtils::Split((const char *)rProgramData.pData, "\n", lines); + + auto errorLine = (size_t)atoll(tokens[1].c_str()); + assert(errorLine > 0); + assert(lines.size() > errorLine); + std::string snippet; + if (errorLine > 1) + { + snippet = " " + lines[errorLine - 2] + "\n"; + } + snippet += "--> " + lines[errorLine - 1] + "\n"; + if (lines.size() > errorLine) + { + snippet += " " + lines[errorLine] + "\n"; + } + + std::vector subTokens; + CollectionUtils::Slice(tokens, 2, tokens.size(), subTokens); + auto cause = StringUtils::Join(subTokens, ":"); + + FASTCG_THROW_EXCEPTION(FastCG::Exception, + "Couldn't compile shader module (program: %s, type: %s):\n%s\n%s", + mName.c_str(), + GetShaderTypeString(shaderType), + cause.c_str(), + snippet.c_str()); + } } +#if defined FASTCG_ANDROID + else + { + FASTCG_THROW_EXCEPTION(Exception, + "Couldn't find shader code (program: %s, type: %s)", + mName.c_str(), + GetShaderTypeString(shaderType)); + } +#else else { if (GLEW_ARB_gl_spirv) @@ -130,13 +175,23 @@ namespace FastCG } else { - FASTCG_THROW_EXCEPTION(Exception, "OpenGL: Cannot use SPIR-V shaders!"); + FASTCG_THROW_EXCEPTION(Exception, + "Can't use shader SPIR-V module (program: %s, type: %s)", + mName.c_str(), + GetShaderTypeString(shaderType)); } - } - FASTCG_CHECK_OPENGL_ERROR(); - - CheckShaderStatus(shaderId, GL_COMPILE_STATUS, mName); + std::string infoLog; + if (!CheckShaderStatus(shaderId, GL_COMPILE_STATUS, infoLog)) + { + FASTCG_THROW_EXCEPTION(Exception, + "Couldn't compile shader module (program: %s, type: %s):\n%s", + mName.c_str(), + GetShaderTypeString(shaderType), + infoLog.c_str()); + } + } +#endif mShadersIds[i] = shaderId; @@ -154,9 +209,18 @@ namespace FastCG } glLinkProgram(mProgramId); - CheckProgramStatus(mProgramId, GL_LINK_STATUS, GetName()); + { + std::string infoLog; + if (!CheckProgramStatus(mProgramId, GL_LINK_STATUS, infoLog)) + { + FASTCG_THROW_EXCEPTION(Exception, + "Couldn't link shader program (program: %s):\n%s", + mName.c_str(), + infoLog.c_str()); + } + } - GetResourceLocations(mProgramId, mResourceInfo); + GetShaderResourceLocations(mName, mProgramId, mResourceInfo); for (const auto &shaderId : mShadersIds) { @@ -165,7 +229,16 @@ namespace FastCG #ifdef _DEBUG glValidateProgram(mProgramId); - CheckProgramStatus(mProgramId, GL_VALIDATE_STATUS, GetName()); + { + std::string infoLog; + if (!CheckProgramStatus(mProgramId, GL_VALIDATE_STATUS, infoLog)) + { + FASTCG_THROW_EXCEPTION(Exception, + "Couldn't validate shader program (program: %s):\n%s", + mName.c_str(), + infoLog.c_str()); + } + } #endif #ifdef _DEBUG diff --git a/FastCG/src/Graphics/OpenGL/OpenGLTexture.cpp b/FastCG/src/Graphics/OpenGL/OpenGLTexture.cpp index 8bbc77bb..532754b9 100644 --- a/FastCG/src/Graphics/OpenGL/OpenGLTexture.cpp +++ b/FastCG/src/Graphics/OpenGL/OpenGLTexture.cpp @@ -15,7 +15,10 @@ namespace FastCG auto format = GetOpenGLFormat(mFormat); glGenTextures(1, &mTextureId); + FASTCG_CHECK_OPENGL_ERROR("Couldn't generate texture (texture: %s)", rArgs.name.c_str()); + glBindTexture(target, mTextureId); + FASTCG_CHECK_OPENGL_ERROR("Couldn't bind texture (texture: %s)", rArgs.name.c_str()); #ifdef _DEBUG { std::string textureLabel = GetName() + " (GL_TEXTURE)"; @@ -32,13 +35,13 @@ namespace FastCG glTexParameteri(target, GL_TEXTURE_COMPARE_MODE, GL_NONE); } glTexImage2D(target, 0, internalFormat, (GLsizei)mWidth, (GLsizei)mHeight, 0, format, GetOpenGLDataType(mFormat, mBitsPerChannel), rArgs.pData); + FASTCG_CHECK_OPENGL_ERROR("Couldn't create texture image (texture: %s)", rArgs.name.c_str()); if (rArgs.generateMipmap) { glGenerateMipmap(target); + FASTCG_CHECK_OPENGL_ERROR("Couldn't generate mipmaps (texture: %s)", rArgs.name.c_str()); } - - FASTCG_CHECK_OPENGL_ERROR(); } OpenGLTexture::~OpenGLTexture() diff --git a/FastCG/src/Graphics/ShaderImporter.cpp b/FastCG/src/Graphics/ShaderImporter.cpp index 572cf4cf..4ce5eb76 100644 --- a/FastCG/src/Graphics/ShaderImporter.cpp +++ b/FastCG/src/Graphics/ShaderImporter.cpp @@ -1,3 +1,4 @@ +#include #include #include #include @@ -5,6 +6,7 @@ #include #include #include +#include #include #include @@ -70,11 +72,12 @@ namespace FastCG ShaderTypeValueArray programFileNames; }; - const auto allowedRenderingPathMask = 1 << (RenderingPathMask)Application::GetInstance()->GetRenderingPath(); + const auto renderingPath = Application::GetInstance()->GetRenderingPath(); + const auto allowedRenderingPathMask = 1 << (RenderingPathMask)renderingPath; std::unordered_map shaderInfos; for (const auto &rShaderFileName : AssetSystem::GetInstance()->List("shaders", true)) { - // only import shaders fro mthe selected rendering path + // only import shaders from the selected rendering path if ((allowedRenderingPathMask & GetSuitableRenderingPathMask(rShaderFileName)) == 0) { continue; @@ -105,6 +108,9 @@ namespace FastCG rShaderInfo.programFileNames[(ShaderTypeInt)shaderType] = rShaderFileName; } + + FASTCG_LOG_VERBOSE("Importing shaders (%s):", GetRenderingPathString(renderingPath)); + Shader::Args shaderArgs{}; for (const auto &rEntry : shaderInfos) { @@ -119,9 +125,11 @@ namespace FastCG continue; } + FASTCG_LOG_VERBOSE("- %s (%s)", shaderArgs.name.c_str(), shaderArgs.text ? "text" : "binary"); + if (shaderArgs.text) { - auto programSource = ShaderSource::ParseFile(rShaderInfo.programFileNames[i]); + auto programSource = ShaderSource::ParseFile(rShaderInfo.programFileNames[i], (ShaderType)i); shaderArgs.programsData[i].dataSize = programSource.size() + 1; programsData[i] = std::make_unique(shaderArgs.programsData[i].dataSize); std::copy(programSource.cbegin(), programSource.cend(), (char *)programsData[i].get()); diff --git a/FastCG/src/Graphics/Vulkan/VulkanBuffer.cpp b/FastCG/src/Graphics/Vulkan/VulkanBuffer.cpp index 48f40767..d9804223 100644 --- a/FastCG/src/Graphics/Vulkan/VulkanBuffer.cpp +++ b/FastCG/src/Graphics/Vulkan/VulkanBuffer.cpp @@ -36,7 +36,7 @@ namespace FastCG for (uint32_t i = 0; i < frameDataCount; ++i) { VmaAllocationCreateInfo allocationCreateInfo; - if (IsDynamic()) + if (UsesMappableMemory()) { allocationCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT; } diff --git a/FastCG/src/Graphics/Vulkan/VulkanGraphicsContext.cpp b/FastCG/src/Graphics/Vulkan/VulkanGraphicsContext.cpp index eee18408..f2b77492 100644 --- a/FastCG/src/Graphics/Vulkan/VulkanGraphicsContext.cpp +++ b/FastCG/src/Graphics/Vulkan/VulkanGraphicsContext.cpp @@ -1,5 +1,6 @@ #ifdef FASTCG_VULKAN +#include #include #include #include @@ -31,7 +32,7 @@ namespace FastCG VulkanGraphicsContext::~VulkanGraphicsContext() = default; - void VulkanGraphicsContext::Begin() + bool VulkanGraphicsContext::Begin() { assert(mEnded); mEnded = false; @@ -43,6 +44,12 @@ namespace FastCG mVertexBuffers.resize(0); mpIndexBuffer = VK_NULL_HANDLE; mPipelineResourcesUsage.resize(0); +#if defined FASTCG_ANDROID + if (VulkanGraphicsSystem::GetInstance()->IsHeadless() || AndroidApplication::GetInstance()->IsPaused()) + { + return false; + } +#endif #if !defined FASTCG_DISABLE_GPU_TIMING if (mTimeElapsedQueries.empty()) { @@ -53,6 +60,7 @@ namespace FastCG EnqueueTimestampQuery(mTimeElapsedQueries[VulkanGraphicsSystem::GetInstance()->GetCurrentFrame()].start); mElapsedTimes[VulkanGraphicsSystem::GetInstance()->GetCurrentFrame()] = 0; #endif + return true; } void VulkanGraphicsContext::PushDebugMarker(const char *pName) @@ -199,12 +207,7 @@ namespace FastCG auto &rBufferFrameData = pBuffer->GetFrameData(frameIndex); assert(rBufferFrameData.allocation != VK_NULL_HANDLE); - VkMemoryPropertyFlags memPropFlags; - vmaGetAllocationMemoryProperties(VulkanGraphicsSystem::GetInstance()->GetAllocator(), - rBufferFrameData.allocation, - &memPropFlags); - - if ((memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) + if (pBuffer->UsesMappableMemory()) { void *mappedData; FASTCG_CHECK_VK_RESULT(vmaMapMemory(VulkanGraphicsSystem::GetInstance()->GetAllocator(), @@ -214,6 +217,10 @@ namespace FastCG vmaUnmapMemory(VulkanGraphicsSystem::GetInstance()->GetAllocator(), rBufferFrameData.allocation); + VkMemoryPropertyFlags memPropFlags; + vmaGetAllocationMemoryProperties(VulkanGraphicsSystem::GetInstance()->GetAllocator(), + rBufferFrameData.allocation, + &memPropFlags); if ((memPropFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0) { FASTCG_CHECK_VK_RESULT(vmaFlushAllocation(VulkanGraphicsSystem::GetInstance()->GetAllocator(), @@ -244,12 +251,7 @@ namespace FastCG assert(dataSize > 0); assert(pData != nullptr); - VkMemoryPropertyFlags memPropFlags; - vmaGetAllocationMemoryProperties(VulkanGraphicsSystem::GetInstance()->GetAllocator(), - pTexture->GetAllocation(), - &memPropFlags); - - if ((memPropFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) + if (pTexture->UsesMappableMemory()) { void *mappedData; FASTCG_CHECK_VK_RESULT(vmaMapMemory(VulkanGraphicsSystem::GetInstance()->GetAllocator(), @@ -259,6 +261,10 @@ namespace FastCG vmaUnmapMemory(VulkanGraphicsSystem::GetInstance()->GetAllocator(), pTexture->GetAllocation()); + VkMemoryPropertyFlags memPropFlags; + vmaGetAllocationMemoryProperties(VulkanGraphicsSystem::GetInstance()->GetAllocator(), + pTexture->GetAllocation(), + &memPropFlags); if ((memPropFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT) == 0) { FASTCG_CHECK_VK_RESULT(vmaFlushAllocation(VulkanGraphicsSystem::GetInstance()->GetAllocator(), @@ -1013,14 +1019,14 @@ namespace FastCG renderPassBeginInfo.clearValueCount = (uint32_t)rRenderPassCommand.clearValues.size(); renderPassBeginInfo.pClearValues = rRenderPassCommand.clearValues.empty() ? nullptr : &rRenderPassCommand.clearValues[0]; - VkSubpassBeginInfo subpassBeginInfo; - subpassBeginInfo.sType = VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO; + VkSubpassBeginInfoKHR subpassBeginInfo; + subpassBeginInfo.sType = VK_STRUCTURE_TYPE_SUBPASS_BEGIN_INFO_KHR; subpassBeginInfo.pNext = nullptr; subpassBeginInfo.contents = VK_SUBPASS_CONTENTS_INLINE; - vkCmdBeginRenderPass2(VulkanGraphicsSystem::GetInstance()->GetCurrentCommandBuffer(), - &renderPassBeginInfo, - &subpassBeginInfo); + VkExt::vkCmdBeginRenderPass2KHR(VulkanGraphicsSystem::GetInstance()->GetCurrentCommandBuffer(), + &renderPassBeginInfo, + &subpassBeginInfo); VkPipeline currentPipeline{VK_NULL_HANDLE}; for (; lastUsedPipelineCommandIdx < rRenderPassCommand.lastPipelineCommandIdx; ++lastUsedPipelineCommandIdx) @@ -1095,10 +1101,10 @@ namespace FastCG } } - VkSubpassEndInfo subpassEndInfo; - subpassEndInfo.sType = VK_STRUCTURE_TYPE_SUBPASS_END_INFO; + VkSubpassEndInfoKHR subpassEndInfo; + subpassEndInfo.sType = VK_STRUCTURE_TYPE_SUBPASS_END_INFO_KHR; subpassEndInfo.pNext = nullptr; - vkCmdEndRenderPass2(VulkanGraphicsSystem::GetInstance()->GetCurrentCommandBuffer(), &subpassEndInfo); + VkExt::vkCmdEndRenderPass2KHR(VulkanGraphicsSystem::GetInstance()->GetCurrentCommandBuffer(), &subpassEndInfo); auto TransitionRenderTargetToFinalLayout = [&](const auto *pRenderTarget) { diff --git a/FastCG/src/Graphics/Vulkan/VulkanGraphicsSystem.cpp b/FastCG/src/Graphics/Vulkan/VulkanGraphicsSystem.cpp index a805dd03..9627f0a9 100644 --- a/FastCG/src/Graphics/Vulkan/VulkanGraphicsSystem.cpp +++ b/FastCG/src/Graphics/Vulkan/VulkanGraphicsSystem.cpp @@ -1,20 +1,17 @@ #ifdef FASTCG_VULKAN -#if defined FASTCG_WINDOWS -#include -#elif defined FASTCG_LINUX -#include -#endif +#include #include #include #include #include #include +#include #include #include +#include #include -#include #include #include @@ -23,6 +20,15 @@ namespace template struct StrComparer; + template <> + struct StrComparer + { + static bool Compare(const char *a, const char *b) + { + return strcmp(a, b) == 0; + } + }; + template <> struct StrComparer { @@ -56,9 +62,12 @@ namespace #elif defined FASTCG_LINUX auto *pDisplay = FastCG::X11Application::GetInstance()->GetDisplay(); assert(pDisplay != nullptr); - // Uses visual ID from default visual. Only works because we're using a "simple window". + // uses visual ID from default visual. Only works because we're using a "simple window". auto visualId = XVisualIDFromVisual(DefaultVisual(pDisplay, DefaultScreen(pDisplay))); supportsPresentation = vkGetPhysicalDeviceXlibPresentationSupportKHR(physicalDevice, queueFamilyIdx, pDisplay, visualId); +#elif defined FASTCG_ANDROID + // TODO: apparently, there's no need for checking whether a queue family supports presentation on Android + supportsPresentation = true; #else #error "FASTCG: Don't know how to check presentation support" #endif @@ -77,8 +86,9 @@ namespace const VkDebugUtilsMessengerCallbackDataEXT *pCallbackData, void *pUserData) { - std::cerr << "[VULKAN]" - << " - " << FastCG::GetVkDebugUtilsMessageSeverityFlagBitsString(messageSeverity); + std::stringstream stream; + stream << "[VULKAN]" + << " - " << FastCG::GetVkDebugUtilsMessageSeverityFlagBitsString(messageSeverity); // if ((messageSeverity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT) != 0) // { @@ -88,71 +98,64 @@ namespace bool prevType = false; if ((messageType & VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT) != 0) { - std::cerr << " - " << FastCG::GetVkDebugUtilsMessageTypeFlagBitsString(VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT); + stream << " - " << FastCG::GetVkDebugUtilsMessageTypeFlagBitsString(VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT); prevType = true; } if ((messageType & VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT) != 0) { - std::cerr << (prevType ? "|" : " - ") << FastCG::GetVkDebugUtilsMessageTypeFlagBitsString(VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT); + stream << (prevType ? "|" : " - ") << FastCG::GetVkDebugUtilsMessageTypeFlagBitsString(VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT); prevType = true; } if ((messageType & VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT) != 0) { - std::cerr << (prevType ? "|" : " - ") << FastCG::GetVkDebugUtilsMessageTypeFlagBitsString(VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT); + stream << (prevType ? "|" : " - ") << FastCG::GetVkDebugUtilsMessageTypeFlagBitsString(VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT); prevType = true; } - std::cerr << " - " << pCallbackData->messageIdNumber - << " - " << pCallbackData->pMessage - << std::endl; - return VK_FALSE; - } -#endif + stream << " - " << pCallbackData->messageIdNumber + << " - " << pCallbackData->pMessage + << std::endl; -#define FASTCG_VK_EXT_FN(fn) \ - PFN_##fn fn = nullptr + FASTCG_LOG_DEBUG("%s", stream.str().c_str()); - namespace VkExt - { -#if _DEBUG - FASTCG_VK_EXT_FN(vkCreateDebugUtilsMessengerEXT); - FASTCG_VK_EXT_FN(vkDestroyDebugUtilsMessengerEXT); - FASTCG_VK_EXT_FN(vkCmdBeginDebugUtilsLabelEXT); - FASTCG_VK_EXT_FN(vkCmdEndDebugUtilsLabelEXT); - FASTCG_VK_EXT_FN(vkSetDebugUtilsObjectNameEXT); -#endif + return VK_FALSE; } - -#undef FASTCG_VK_EXT_FN +#endif #define FASTCG_LOAD_VK_INSTANCE_EXT_FN(instance, fn) \ - VkExt::fn = (PFN_##fn)vkGetInstanceProcAddr(instance, #fn); \ - if (VkExt::fn == nullptr) \ + FastCG::VkExt::fn = (PFN_##fn)vkGetInstanceProcAddr(instance, #fn); \ + if (FastCG::VkExt::fn == nullptr) \ { \ FASTCG_THROW_EXCEPTION(FastCG::Exception, "Vulkan: Failed to load Vulkan instance extension function " #fn); \ } - void LoadVulkanInstanceExtensionFunctions(VkInstance instance) + void LoadVulkanInstanceExtensionFunctions(VkInstance instance, const std::vector &rExtensions) { #if _DEBUG - FASTCG_LOAD_VK_INSTANCE_EXT_FN(instance, vkCreateDebugUtilsMessengerEXT); - FASTCG_LOAD_VK_INSTANCE_EXT_FN(instance, vkDestroyDebugUtilsMessengerEXT); - FASTCG_LOAD_VK_INSTANCE_EXT_FN(instance, vkCmdBeginDebugUtilsLabelEXT); - FASTCG_LOAD_VK_INSTANCE_EXT_FN(instance, vkCmdEndDebugUtilsLabelEXT); - FASTCG_LOAD_VK_INSTANCE_EXT_FN(instance, vkSetDebugUtilsObjectNameEXT); + if (Contains(rExtensions, VK_EXT_DEBUG_UTILS_EXTENSION_NAME)) + { + FASTCG_LOAD_VK_INSTANCE_EXT_FN(instance, vkCreateDebugUtilsMessengerEXT); + FASTCG_LOAD_VK_INSTANCE_EXT_FN(instance, vkDestroyDebugUtilsMessengerEXT); + FASTCG_LOAD_VK_INSTANCE_EXT_FN(instance, vkCmdBeginDebugUtilsLabelEXT); + FASTCG_LOAD_VK_INSTANCE_EXT_FN(instance, vkCmdEndDebugUtilsLabelEXT); + FASTCG_LOAD_VK_INSTANCE_EXT_FN(instance, vkSetDebugUtilsObjectNameEXT); + } #endif } #undef FASTCG_LOAD_VK_INSTANCE_EXT_FN -#define FASTCG_LOAD_VK_DEVICE_EXT_FN(instance, fn) \ - VkExt::fn = (PFN_##fn)vkGetDeviceProcAddr(instance, #fn); \ - if (VkExt::fn == nullptr) \ +#define FASTCG_LOAD_VK_DEVICE_EXT_FN(device, fn) \ + FastCG::VkExt::fn = (PFN_##fn)vkGetDeviceProcAddr(device, #fn); \ + if (FastCG::VkExt::fn == nullptr) \ { \ FASTCG_THROW_EXCEPTION(FastCG::Exception, "Vulkan: Failed to load Vulkan device extension function " #fn); \ } void LoadVulkanDeviceExtensionFunctions(VkDevice device) { + FASTCG_LOAD_VK_DEVICE_EXT_FN(device, vkCreateRenderPass2KHR); + FASTCG_LOAD_VK_DEVICE_EXT_FN(device, vkCmdBeginRenderPass2KHR); + FASTCG_LOAD_VK_DEVICE_EXT_FN(device, vkCmdEndRenderPass2KHR); } #undef FASTCG_LOAD_VK_DEVICE_EXT_FN @@ -193,6 +196,24 @@ namespace namespace FastCG { +#define FASTCG_IMPL_VK_EXT_FN(fn) PFN_##fn fn = nullptr + + namespace VkExt + { + FASTCG_IMPL_VK_EXT_FN(vkCreateRenderPass2KHR); + FASTCG_IMPL_VK_EXT_FN(vkCmdBeginRenderPass2KHR); + FASTCG_IMPL_VK_EXT_FN(vkCmdEndRenderPass2KHR); +#if _DEBUG + FASTCG_IMPL_VK_EXT_FN(vkCreateDebugUtilsMessengerEXT); + FASTCG_IMPL_VK_EXT_FN(vkDestroyDebugUtilsMessengerEXT); + FASTCG_IMPL_VK_EXT_FN(vkCmdBeginDebugUtilsLabelEXT); + FASTCG_IMPL_VK_EXT_FN(vkCmdEndDebugUtilsLabelEXT); + FASTCG_IMPL_VK_EXT_FN(vkSetDebugUtilsObjectNameEXT); +#endif + } + +#undef FASTCG_IMPL_VK_EXT_FN + VulkanGraphicsSystem::VulkanGraphicsSystem(const GraphicsSystemArgs &rArgs) : BaseGraphicsSystem(rArgs) { } @@ -207,6 +228,7 @@ namespace FastCG CreateSurface(); SelectPhysicalDevice(); AcquirePhysicalDeviceProperties(); + AcquirePhysicalDeviceSurfaceProperties(); CreateDeviceAndGetQueues(); CreateAllocator(); CreateSynchronizationObjects(); @@ -252,12 +274,12 @@ namespace FastCG VkApplicationInfo applicationInfo; applicationInfo.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO; applicationInfo.pNext = nullptr; - applicationInfo.pApplicationName = ""; + applicationInfo.pApplicationName = FASTCG_PROJECT_NAME; // TODO: provide a mechanism for users to specify their app versions applicationInfo.applicationVersion = VK_MAKE_VERSION(1, 0, 0); applicationInfo.pEngineName = "FastCG"; applicationInfo.engineVersion = VK_MAKE_VERSION(MAJOR_VERSION, MINOR_VERSION, PATCH_VERSION); - applicationInfo.apiVersion = VK_API_VERSION_1_3; + applicationInfo.apiVersion = VK_API_VERSION; #if _DEBUG uint32_t availableLayerCount; @@ -265,10 +287,10 @@ namespace FastCG std::vector availableLayers(availableLayerCount); vkEnumerateInstanceLayerProperties(&availableLayerCount, &availableLayers[0]); - std::cout << "Available layers:" << std::endl; + FASTCG_LOG_DEBUG("Available layers:"); for (const auto &rLayer : availableLayers) { - std::cout << rLayer.layerName << std::endl; + FASTCG_LOG_DEBUG("- %s", rLayer.layerName); } #endif @@ -280,7 +302,7 @@ namespace FastCG } else { - std::cout << "VK_LAYER_KHRONOS_validation not available, ignoring it" << std::endl; + FASTCG_LOG_DEBUG("VK_LAYER_KHRONOS_validation not available, ignoring it"); } #endif @@ -290,26 +312,26 @@ namespace FastCG vkEnumerateInstanceExtensionProperties(nullptr, &availableExtensionCount, &availableExtensions[0]); #if _DEBUG - std::cout << "Available extensions:" << std::endl; + FASTCG_LOG_DEBUG("Available extensions:"); for (const auto &rExtension : availableExtensions) { - std::cout << rExtension.extensionName << std::endl; + FASTCG_LOG_DEBUG("- %s", rExtension.extensionName); } #endif - std::vector extensions; - if (!Contains(availableExtensions, "VK_KHR_surface")) { FASTCG_THROW_EXCEPTION(Exception, "Couldn't find VK_KHR_surface extension"); } - extensions.emplace_back("VK_KHR_surface"); + mInstanceExtensions.emplace_back("VK_KHR_surface"); const char *platformSurfaceExtName = #if defined FASTCG_WINDOWS "VK_KHR_win32_surface" #elif defined FASTCG_LINUX VK_KHR_XLIB_SURFACE_EXTENSION_NAME +#elif defined FASTCG_ANDROID + "VK_KHR_android_surface" #else #error "FASTCG: Don't know how to enable surfaces in the current platform" #endif @@ -318,14 +340,17 @@ namespace FastCG { FASTCG_THROW_EXCEPTION(Exception, "Couldn't find platform surface extension"); } - extensions.emplace_back(platformSurfaceExtName); + mInstanceExtensions.emplace_back(platformSurfaceExtName); #if _DEBUG - if (!Contains(availableExtensions, VK_EXT_DEBUG_UTILS_EXTENSION_NAME)) + if (Contains(availableExtensions, VK_EXT_DEBUG_UTILS_EXTENSION_NAME)) + { + mInstanceExtensions.emplace_back(VK_EXT_DEBUG_UTILS_EXTENSION_NAME); + } + else { - FASTCG_THROW_EXCEPTION(Exception, "Couldn't find debug utils extension"); + FASTCG_LOG_DEBUG("VK_EXT_DEBUG_UTILS_EXTENSION_NAME not available, ignoring it"); } - extensions.emplace_back(VK_EXT_DEBUG_UTILS_EXTENSION_NAME); #endif VkInstanceCreateInfo instanceCreateInfo; @@ -335,29 +360,32 @@ namespace FastCG instanceCreateInfo.pApplicationInfo = &applicationInfo; instanceCreateInfo.enabledLayerCount = (uint32_t)usedLayers.size(); instanceCreateInfo.ppEnabledLayerNames = usedLayers.empty() ? nullptr : &usedLayers[0]; - instanceCreateInfo.enabledExtensionCount = (uint32_t)extensions.size(); - instanceCreateInfo.ppEnabledExtensionNames = extensions.empty() ? nullptr : &extensions[0]; + instanceCreateInfo.enabledExtensionCount = (uint32_t)mInstanceExtensions.size(); + instanceCreateInfo.ppEnabledExtensionNames = mInstanceExtensions.empty() ? nullptr : &mInstanceExtensions[0]; FASTCG_CHECK_VK_RESULT(vkCreateInstance(&instanceCreateInfo, mAllocationCallbacks.get(), &mInstance)); - LoadVulkanInstanceExtensionFunctions(mInstance); + LoadVulkanInstanceExtensionFunctions(mInstance, mInstanceExtensions); #if _DEBUG - VkDebugUtilsMessengerCreateInfoEXT debugUtilsMessengerCreateInfo; - debugUtilsMessengerCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT; - debugUtilsMessengerCreateInfo.pNext = nullptr; - debugUtilsMessengerCreateInfo.flags = 0; - debugUtilsMessengerCreateInfo.messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT | - VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT | - VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT | - VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT; - debugUtilsMessengerCreateInfo.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT | - VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT | - VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT; - debugUtilsMessengerCreateInfo.pfnUserCallback = VulkanDebugCallback; - debugUtilsMessengerCreateInfo.pUserData = nullptr; - - FASTCG_CHECK_VK_RESULT(VkExt::vkCreateDebugUtilsMessengerEXT(mInstance, &debugUtilsMessengerCreateInfo, mAllocationCallbacks.get(), &mDebugMessenger)); + if (Contains(mInstanceExtensions, VK_EXT_DEBUG_UTILS_EXTENSION_NAME)) + { + VkDebugUtilsMessengerCreateInfoEXT debugUtilsMessengerCreateInfo; + debugUtilsMessengerCreateInfo.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT; + debugUtilsMessengerCreateInfo.pNext = nullptr; + debugUtilsMessengerCreateInfo.flags = 0; + debugUtilsMessengerCreateInfo.messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT | + VK_DEBUG_UTILS_MESSAGE_SEVERITY_INFO_BIT_EXT | + VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT | + VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT; + debugUtilsMessengerCreateInfo.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT | + VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT | + VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT; + debugUtilsMessengerCreateInfo.pfnUserCallback = VulkanDebugCallback; + debugUtilsMessengerCreateInfo.pUserData = nullptr; + + FASTCG_CHECK_VK_RESULT(VkExt::vkCreateDebugUtilsMessengerEXT(mInstance, &debugUtilsMessengerCreateInfo, mAllocationCallbacks.get(), &mDebugMessenger)); + } #endif } @@ -382,6 +410,17 @@ namespace FastCG assert(surfaceCreateInfo.dpy != nullptr); surfaceCreateInfo.window = X11Application::GetInstance()->CreateSimpleWindow(); FASTCG_CHECK_VK_RESULT(vkCreateXlibSurfaceKHR(mInstance, &surfaceCreateInfo, mAllocationCallbacks.get(), &mSurface)); +#elif defined FASTCG_ANDROID + auto *pWindow = AndroidApplication::GetInstance()->GetWindow(); + if (pWindow != nullptr) + { + VkAndroidSurfaceCreateInfoKHR surfaceCreateInfo; + surfaceCreateInfo.sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR; + surfaceCreateInfo.pNext = NULL; + surfaceCreateInfo.flags = 0; + surfaceCreateInfo.window = pWindow; + FASTCG_CHECK_VK_RESULT(vkCreateAndroidSurfaceKHR(mInstance, &surfaceCreateInfo, mAllocationCallbacks.get(), &mSurface)); + } #else #error "FASTCG: Don't know how to create presentation surface" #endif @@ -397,12 +436,12 @@ namespace FastCG FASTCG_CHECK_VK_RESULT(vkEnumeratePhysicalDevices(mInstance, &numPhysicalDevices, &physicalDevices[0])); #if _DEBUG - std::cout << "Devices:" << std::endl; + FASTCG_LOG_DEBUG("Devices:"); for (auto &rPhysicalDevice : physicalDevices) { VkPhysicalDeviceProperties properties; vkGetPhysicalDeviceProperties(rPhysicalDevice, &properties); - std::cout << properties.deviceName << std::endl; + FASTCG_LOG_DEBUG("- %s", properties.deviceName); } #endif @@ -452,7 +491,7 @@ namespace FastCG allocatorCreateInfo.pHeapSizeLimit = nullptr; allocatorCreateInfo.pVulkanFunctions = nullptr; allocatorCreateInfo.instance = mInstance; - allocatorCreateInfo.vulkanApiVersion = VK_API_VERSION_1_3; + allocatorCreateInfo.vulkanApiVersion = VK_API_VERSION; FASTCG_CHECK_VK_RESULT(vmaCreateAllocator(&allocatorCreateInfo, &mAllocator)); } @@ -466,6 +505,14 @@ namespace FastCG { vkGetPhysicalDeviceFormatProperties(mPhysicalDevice, format, &mFormatProperties[format]); } + } + + void VulkanGraphicsSystem::AcquirePhysicalDeviceSurfaceProperties() + { + if (mSurface == VK_NULL_HANDLE) + { + return; + } VkSurfaceCapabilitiesKHR surfaceCapabilities; FASTCG_CHECK_VK_RESULT(vkGetPhysicalDeviceSurfaceCapabilitiesKHR(mPhysicalDevice, mSurface, &surfaceCapabilities)); @@ -530,9 +577,10 @@ namespace FastCG deviceQueueCreateInfo.queueCount = 1; deviceQueueCreateInfo.pQueuePriorities = sc_queuePriorities; - std::vector extensions; + // TODO: check available extensions - extensions.push_back("VK_KHR_swapchain"); + mDeviceExtensions.push_back("VK_KHR_swapchain"); + mDeviceExtensions.push_back("VK_KHR_create_renderpass2"); VkDeviceCreateInfo deviceCreateInfo; deviceCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; @@ -540,14 +588,16 @@ namespace FastCG deviceCreateInfo.flags = 0; deviceCreateInfo.queueCreateInfoCount = queueCount; deviceCreateInfo.pQueueCreateInfos = &deviceQueueCreateInfos[0]; - deviceCreateInfo.enabledExtensionCount = (uint32_t)extensions.size(); - deviceCreateInfo.ppEnabledExtensionNames = extensions.empty() ? nullptr : &extensions[0]; + deviceCreateInfo.enabledExtensionCount = (uint32_t)mDeviceExtensions.size(); + deviceCreateInfo.ppEnabledExtensionNames = mDeviceExtensions.empty() ? nullptr : &mDeviceExtensions[0]; deviceCreateInfo.enabledLayerCount = 0; deviceCreateInfo.ppEnabledLayerNames = nullptr; deviceCreateInfo.pEnabledFeatures = nullptr; FASTCG_CHECK_VK_RESULT(vkCreateDevice(mPhysicalDevice, &deviceCreateInfo, mAllocationCallbacks.get(), &mDevice)); + LoadVulkanDeviceExtensionFunctions(mDevice); + vkGetDeviceQueue(mDevice, mGraphicsAndPresentQueueFamilyIndex, 0, &mGraphicsAndPresentQueue); } @@ -555,6 +605,11 @@ namespace FastCG { DestroySwapChainAndClearImages(); + if (mSurface == VK_NULL_HANDLE) + { + return; + } + VkSwapchainCreateInfoKHR swapChainCreateInfo; swapChainCreateInfo.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR; swapChainCreateInfo.pNext = nullptr; @@ -880,7 +935,10 @@ namespace FastCG if (mInstance != VK_NULL_HANDLE) { #if _DEBUG - VkExt::vkDestroyDebugUtilsMessengerEXT(mInstance, mDebugMessenger, nullptr); + if (Contains(mInstanceExtensions, VK_EXT_DEBUG_UTILS_EXTENSION_NAME)) + { + VkExt::vkDestroyDebugUtilsMessengerEXT(mInstance, mDebugMessenger, nullptr); + } #endif vkDestroyInstance(mInstance, mAllocationCallbacks.get()); @@ -890,15 +948,20 @@ namespace FastCG void VulkanGraphicsSystem::Present() { - auto imageMemoryTransition = GetLastImageMemoryTransition(GetCurrentSwapChainTexture()); - if (imageMemoryTransition.layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) +#if defined FASTCG_ANDROID + if (!IsHeadless() && !AndroidApplication::GetInstance()->IsPaused()) +#endif { - GetImmediateGraphicsContext()->AddTextureMemoryBarrier(GetCurrentSwapChainTexture(), - VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, - imageMemoryTransition.accessMask, - 0, - imageMemoryTransition.stageMask, - VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT); + auto imageMemoryTransition = GetLastImageMemoryTransition(GetCurrentSwapChainTexture()); + if (imageMemoryTransition.layout != VK_IMAGE_LAYOUT_PRESENT_SRC_KHR) + { + GetImmediateGraphicsContext()->AddTextureMemoryBarrier(GetCurrentSwapChainTexture(), + VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, + imageMemoryTransition.accessMask, + 0, + imageMemoryTransition.stageMask, + VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT); + } } GetImmediateGraphicsContext()->End(); @@ -909,9 +972,20 @@ namespace FastCG VkSubmitInfo submitInfo; submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; submitInfo.pNext = nullptr; - submitInfo.waitSemaphoreCount = 1; - submitInfo.pWaitSemaphores = &mAcquireSwapChainImageSemaphores[mCurrentFrame]; - submitInfo.pWaitDstStageMask = &waitDstStageMask; +#if defined FASTCG_ANDROID + if (IsHeadless() || AndroidApplication::GetInstance()->IsPaused()) + { + submitInfo.waitSemaphoreCount = 0; + submitInfo.pWaitSemaphores = nullptr; + submitInfo.pWaitDstStageMask = nullptr; + } + else +#endif + { + submitInfo.waitSemaphoreCount = 1; + submitInfo.pWaitSemaphores = &mAcquireSwapChainImageSemaphores[mCurrentFrame]; + submitInfo.pWaitDstStageMask = &waitDstStageMask; + } submitInfo.commandBufferCount = 1; submitInfo.pCommandBuffers = &mCommandBuffers[mCurrentFrame]; submitInfo.signalSemaphoreCount = 1; @@ -921,29 +995,34 @@ namespace FastCG FASTCG_THROW_EXCEPTION(Exception, "Vulkan: Couldn't submit commands"); } - VkPresentInfoKHR presentInfo; - presentInfo.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR; - presentInfo.pNext = nullptr; - presentInfo.waitSemaphoreCount = 1; - presentInfo.pWaitSemaphores = &mSubmitFinishedSemaphores[mCurrentFrame]; - presentInfo.swapchainCount = 1; - presentInfo.pSwapchains = &mSwapChain; - presentInfo.pImageIndices = &mSwapChainIndex; - presentInfo.pResults = nullptr; - bool outdatedSwapchain = false; - auto result = vkQueuePresentKHR(mGraphicsAndPresentQueue, &presentInfo); - switch (result) +#if defined FASTCG_ANDROID + if (IsHeadless() || AndroidApplication::GetInstance()->IsPaused()) +#endif { - case VK_SUCCESS: - case VK_SUBOPTIMAL_KHR: - break; - case VK_ERROR_OUT_OF_DATE_KHR: - outdatedSwapchain = true; - break; - default: - FASTCG_THROW_EXCEPTION(Exception, "Vulkan: Couldn't present"); - break; + VkPresentInfoKHR presentInfo; + presentInfo.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR; + presentInfo.pNext = nullptr; + presentInfo.waitSemaphoreCount = 1; + presentInfo.pWaitSemaphores = &mSubmitFinishedSemaphores[mCurrentFrame]; + presentInfo.swapchainCount = 1; + presentInfo.pSwapchains = &mSwapChain; + presentInfo.pImageIndices = &mSwapChainIndex; + presentInfo.pResults = nullptr; + + auto result = vkQueuePresentKHR(mGraphicsAndPresentQueue, &presentInfo); + switch (result) + { + case VK_SUCCESS: + case VK_SUBOPTIMAL_KHR: + break; + case VK_ERROR_OUT_OF_DATE_KHR: + outdatedSwapchain = true; + break; + default: + FASTCG_THROW_EXCEPTION(Exception, "Vulkan: Couldn't present"); + break; + } } mCurrentFrame = (mCurrentFrame + 1) % mMaxSimultaneousFrames; @@ -971,13 +1050,18 @@ namespace FastCG GetImmediateGraphicsContext()->Begin(); - if (outdatedSwapchain) - { - RecreateSwapChainAndGetImages(); - } - else +#if defined FASTCG_ANDROID + if (IsHeadless() || AndroidApplication::GetInstance()->IsPaused()) +#endif { - AcquireNextSwapChainImage(); + if (outdatedSwapchain) + { + RecreateSwapChainAndGetImages(); + } + else + { + AcquireNextSwapChainImage(); + } } } @@ -1066,8 +1150,8 @@ namespace FastCG return {it->first, it->second}; } - VkRenderPassCreateInfo2 renderPassCreateInfo; - renderPassCreateInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2; + VkRenderPassCreateInfo2KHR renderPassCreateInfo; + renderPassCreateInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO_2_KHR; renderPassCreateInfo.pNext = nullptr; renderPassCreateInfo.flags = 0; renderPassCreateInfo.dependencyCount = 0; @@ -1075,8 +1159,8 @@ namespace FastCG renderPassCreateInfo.correlatedViewMaskCount = 0; renderPassCreateInfo.pCorrelatedViewMasks = nullptr; - VkSubpassDescription2 subpassDescription; - subpassDescription.sType = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2; + VkSubpassDescription2KHR subpassDescription; + subpassDescription.sType = VK_STRUCTURE_TYPE_SUBPASS_DESCRIPTION_2_KHR; subpassDescription.pNext = nullptr; subpassDescription.flags = 0; subpassDescription.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; @@ -1090,8 +1174,8 @@ namespace FastCG renderPassCreateInfo.subpassCount = 1; renderPassCreateInfo.pSubpasses = &subpassDescription; - std::vector attachmentDescriptions; - std::vector colorAttachmentReferences; + std::vector attachmentDescriptions; + std::vector colorAttachmentReferences; subpassDescription.pDepthStencilAttachment = nullptr; @@ -1108,7 +1192,7 @@ namespace FastCG attachmentDescriptions.emplace_back(); auto &rAttachmentDescription = attachmentDescriptions.back(); - rAttachmentDescription.sType = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2; + rAttachmentDescription.sType = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2_KHR; rAttachmentDescription.pNext = nullptr; rAttachmentDescription.flags = 0; rAttachmentDescription.format = pRenderTarget->GetVulkanFormat(); @@ -1122,14 +1206,14 @@ namespace FastCG colorAttachmentReferences.emplace_back(); auto &rColorAttachmentReference = colorAttachmentReferences.back(); - rColorAttachmentReference.sType = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2; + rColorAttachmentReference.sType = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2_KHR; rColorAttachmentReference.pNext = nullptr; rColorAttachmentReference.attachment = attachmentIdx; rColorAttachmentReference.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; rColorAttachmentReference.aspectMask = pRenderTarget->GetAspectFlags(); } - VkAttachmentReference2 depthAttachmentReference; + VkAttachmentReference2KHR depthAttachmentReference; if (rRenderPassDescription.pDepthStencilBuffer != nullptr) { auto attachmentIdx = (uint32_t)attachmentDescriptions.size(); @@ -1138,7 +1222,7 @@ namespace FastCG attachmentDescriptions.emplace_back(); auto &rAttachmentDescription = attachmentDescriptions.back(); - rAttachmentDescription.sType = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2; + rAttachmentDescription.sType = VK_STRUCTURE_TYPE_ATTACHMENT_DESCRIPTION_2_KHR; rAttachmentDescription.pNext = nullptr; rAttachmentDescription.flags = 0; rAttachmentDescription.format = rRenderPassDescription.pDepthStencilBuffer->GetVulkanFormat(); @@ -1158,7 +1242,7 @@ namespace FastCG finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL; } - depthAttachmentReference.sType = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2; + depthAttachmentReference.sType = VK_STRUCTURE_TYPE_ATTACHMENT_REFERENCE_2_KHR; depthAttachmentReference.pNext = nullptr; depthAttachmentReference.attachment = attachmentIdx; depthAttachmentReference.layout = initialLayout; @@ -1186,10 +1270,10 @@ namespace FastCG subpassDescription.pColorAttachments = colorAttachmentReferences.empty() ? nullptr : &colorAttachmentReferences[0]; VkRenderPass renderPass; - FASTCG_CHECK_VK_RESULT(vkCreateRenderPass2(mDevice, - &renderPassCreateInfo, - mAllocationCallbacks.get(), - &renderPass)); + FASTCG_CHECK_VK_RESULT(VkExt::vkCreateRenderPass2KHR(mDevice, + &renderPassCreateInfo, + mAllocationCallbacks.get(), + &renderPass)); it = mRenderPasses.emplace(renderPassHash, renderPass).first; @@ -1467,7 +1551,7 @@ namespace FastCG VkPipeline pipeline; FASTCG_CHECK_VK_RESULT(vkCreateGraphicsPipelines(mDevice, - nullptr, + VK_NULL_HANDLE, 1, &pipelineCreateInfo, mAllocationCallbacks.get(), @@ -1497,7 +1581,7 @@ namespace FastCG } else { - setLayouts.emplace_back(VK_NULL_HANDLE); + setLayouts.emplace_back(nullptr); } } @@ -1603,6 +1687,11 @@ namespace FastCG #ifdef _DEBUG void VulkanGraphicsSystem::PushDebugMarker(VkCommandBuffer commandBuffer, const char *pName) { + if (!Contains(mInstanceExtensions, VK_EXT_DEBUG_UTILS_EXTENSION_NAME)) + { + return; + } + VkDebugUtilsLabelEXT debugLabel; debugLabel.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_LABEL_EXT; debugLabel.pNext = nullptr; @@ -1616,11 +1705,21 @@ namespace FastCG void VulkanGraphicsSystem::PopDebugMarker(VkCommandBuffer commandBuffer) { + if (!Contains(mInstanceExtensions, VK_EXT_DEBUG_UTILS_EXTENSION_NAME)) + { + return; + } + VkExt::vkCmdEndDebugUtilsLabelEXT(commandBuffer); } void VulkanGraphicsSystem::SetObjectName(const char *pObjectName, VkObjectType objectType, uint64_t objectHandle) { + if (!Contains(mInstanceExtensions, VK_EXT_DEBUG_UTILS_EXTENSION_NAME)) + { + return; + } + VkDebugUtilsObjectNameInfoEXT debugUtilsObjectNameInfo; debugUtilsObjectNameInfo.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT; debugUtilsObjectNameInfo.pNext = nullptr; @@ -1685,6 +1784,26 @@ namespace FastCG } mDeferredDestroyRequests.clear(); } + +#if defined FASTCG_ANDROID + void VulkanGraphicsSystem::OnWindowInitialized() + { + CreateSurface(); + AcquirePhysicalDeviceSurfaceProperties(); + RecreateSwapChainAndGetImages(); + } + + void VulkanGraphicsSystem::OnWindowTerminated() + { + mPreTransform = (VkSurfaceTransformFlagBitsKHR)0; + mPresentMode = VK_PRESENT_MODE_IMMEDIATE_KHR; + mSwapChainSurfaceFormat = {}; + mMaxSimultaneousFrames = 1; + mCurrentFrame = 0; + DestroySwapChainAndClearImages(); + DestroySurface(); + } +#endif } #endif \ No newline at end of file diff --git a/FastCG/src/Graphics/Vulkan/VulkanShader.cpp b/FastCG/src/Graphics/Vulkan/VulkanShader.cpp index 56149662..be898039 100644 --- a/FastCG/src/Graphics/Vulkan/VulkanShader.cpp +++ b/FastCG/src/Graphics/Vulkan/VulkanShader.cpp @@ -5,12 +5,12 @@ #include #include +#include + #include #include #include -#include - namespace { inline VkFormat GetVkFormatFromSpirType(const spirv_cross::SPIRType &rType) diff --git a/FastCG/src/Graphics/Vulkan/VulkanTexture.cpp b/FastCG/src/Graphics/Vulkan/VulkanTexture.cpp index 5bf11bd8..97fb81df 100644 --- a/FastCG/src/Graphics/Vulkan/VulkanTexture.cpp +++ b/FastCG/src/Graphics/Vulkan/VulkanTexture.cpp @@ -51,7 +51,6 @@ namespace FastCG const auto *pFormatProperties = VulkanGraphicsSystem::GetInstance()->GetFormatProperties(imageCreateInfo.format); assert(pFormatProperties != nullptr); - bool usesMappableMemory = false; if ((pFormatProperties->optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) != 0) { imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL; @@ -60,11 +59,11 @@ namespace FastCG { imageCreateInfo.tiling = VK_IMAGE_TILING_LINEAR; // linearly tiled images should use mappable memory at the moment - usesMappableMemory = true; + mUsesMappableMemory = true; } else { - FASTCG_THROW_EXCEPTION(Exception, "Vulkan: No tiling features found for format %s", GetVkFormatString(imageCreateInfo.format)); + FASTCG_THROW_EXCEPTION(Exception, "Vulkan: No tiling features found for format %s (texture: %s)", GetVkFormatString(imageCreateInfo.format), mName.c_str()); } imageCreateInfo.usage = GetVkImageUsageFlags(GetUsage(), GetFormat()); @@ -72,7 +71,7 @@ namespace FastCG imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; VmaAllocationCreateInfo allocationCreateInfo; - if (usesMappableMemory) + if (mUsesMappableMemory) { allocationCreateInfo.flags = VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT; } @@ -155,7 +154,7 @@ namespace FastCG &mDefaultImageView)); break; default: - FASTCG_THROW_EXCEPTION(Exception, "Vulkan: Can't create image view for texture type %s", GetTextureTypeString(GetType())); + FASTCG_THROW_EXCEPTION(Exception, "Vulkan: Can't create image view for texture type %s (texture: %s)", GetTextureTypeString(GetType()), mName.c_str()); break; } diff --git a/FastCG/src/Platform/Android/AndroidApplication.cpp b/FastCG/src/Platform/Android/AndroidApplication.cpp new file mode 100644 index 00000000..375850f5 --- /dev/null +++ b/FastCG/src/Platform/Android/AndroidApplication.cpp @@ -0,0 +1,314 @@ +#ifdef FASTCG_ANDROID + +#include +#include +#include +#include + +#include + +#include + +namespace +{ +#define KEY(a, b) \ + { \ + (uint64_t) b, FastCG::Key::a \ + } + + std::unordered_map gKeyLut = { + KEY(BACKSPACE, AKEYCODE_BACK), + KEY(RETURN, AKEYCODE_ENTER), + KEY(ESCAPE, AKEYCODE_ESCAPE), + KEY(SPACE, AKEYCODE_SPACE), + KEY(ASTERISK, AKEYCODE_STAR), + KEY(PLUS, AKEYCODE_PLUS), + KEY(COMMA, AKEYCODE_COMMA), + KEY(MINUS, AKEYCODE_MINUS), + KEY(DOT, AKEYCODE_PERIOD), + KEY(SLASH, AKEYCODE_SLASH), + KEY(NUMBER_0, AKEYCODE_NUMPAD_0), + KEY(NUMBER_1, AKEYCODE_NUMPAD_1), + KEY(NUMBER_2, AKEYCODE_NUMPAD_2), + KEY(NUMBER_3, AKEYCODE_NUMPAD_3), + KEY(NUMBER_4, AKEYCODE_NUMPAD_4), + KEY(NUMBER_5, AKEYCODE_NUMPAD_5), + KEY(NUMBER_6, AKEYCODE_NUMPAD_6), + KEY(NUMBER_7, AKEYCODE_NUMPAD_7), + KEY(NUMBER_8, AKEYCODE_NUMPAD_8), + KEY(NUMBER_9, AKEYCODE_NUMPAD_9), + // KEY(COLON, AKEYCODE_UNKNOWN), + KEY(SEMI_COLON, AKEYCODE_SEMICOLON), + KEY(EQUALS, AKEYCODE_EQUALS), + KEY(LEFT_ARROW, AKEYCODE_DPAD_LEFT), + KEY(UP_ARROW, AKEYCODE_DPAD_UP), + KEY(RIGHT_ARROW, AKEYCODE_DPAD_RIGHT), + KEY(DOWN_ARROW, AKEYCODE_DPAD_DOWN), + KEY(F1, AKEYCODE_F1), + KEY(F2, AKEYCODE_F2), + KEY(F3, AKEYCODE_F3), + KEY(F4, AKEYCODE_F4), + KEY(F5, AKEYCODE_F5), + KEY(F6, AKEYCODE_F6), + KEY(F7, AKEYCODE_F7), + KEY(F8, AKEYCODE_F8), + KEY(F9, AKEYCODE_F9), + KEY(F10, AKEYCODE_F10), + KEY(F11, AKEYCODE_F11), + KEY(F12, AKEYCODE_F12), + KEY(PAGE_UP, AKEYCODE_PAGE_UP), + KEY(PAGE_DOWN, AKEYCODE_PAGE_DOWN), + KEY(END, AKEYCODE_MOVE_END), + KEY(HOME, AKEYCODE_MOVE_HOME), + KEY(INSERT, AKEYCODE_INSERT), + KEY(SHIFT, AKEYCODE_SHIFT_LEFT), + KEY(CONTROL, AKEYCODE_CTRL_LEFT), + KEY(OPEN_SQUARE_BRACKET, AKEYCODE_LEFT_BRACKET), + KEY(BACKSLASH, AKEYCODE_BACKSLASH), + KEY(CLOSE_SQUARE_BRACKET, AKEYCODE_RIGHT_BRACKET), + KEY(LETTER_A, AKEYCODE_A), + KEY(LETTER_B, AKEYCODE_B), + KEY(LETTER_C, AKEYCODE_C), + KEY(LETTER_D, AKEYCODE_D), + KEY(LETTER_E, AKEYCODE_E), + KEY(LETTER_F, AKEYCODE_F), + KEY(LETTER_G, AKEYCODE_G), + KEY(LETTER_H, AKEYCODE_H), + KEY(LETTER_I, AKEYCODE_I), + KEY(LETTER_J, AKEYCODE_J), + KEY(LETTER_K, AKEYCODE_K), + KEY(LETTER_L, AKEYCODE_L), + KEY(LETTER_M, AKEYCODE_M), + KEY(LETTER_N, AKEYCODE_N), + KEY(LETTER_O, AKEYCODE_O), + KEY(LETTER_P, AKEYCODE_P), + KEY(LETTER_Q, AKEYCODE_Q), + KEY(LETTER_R, AKEYCODE_R), + KEY(LETTER_S, AKEYCODE_S), + KEY(LETTER_T, AKEYCODE_T), + KEY(LETTER_U, AKEYCODE_U), + KEY(LETTER_V, AKEYCODE_V), + KEY(LETTER_W, AKEYCODE_W), + KEY(LETTER_X, AKEYCODE_X), + KEY(LETTER_Y, AKEYCODE_Y), + KEY(LETTER_Z, AKEYCODE_Z), + // KEY(TILDE, AKEYCODE_UNKNOWN), + KEY(DEL, AKEYCODE_DEL), + }; + + FastCG::Key TranslateKey(uint64_t key) + { + auto it = gKeyLut.find(key); + if (it == gKeyLut.end()) + { + return FastCG::Key::UNKNOWN; + } + return it->second; + } + +} + +void onAppCmd(android_app *app, int32_t cmd) +{ + switch (cmd) + { + case APP_CMD_INIT_WINDOW: + if (app->window != nullptr) + { + FastCG::GraphicsSystem::GetInstance()->OnWindowInitialized(); + // obtain the actual size of the window + FastCG::AndroidApplication::GetInstance()->WindowResizeCallback( + (uint32_t)ANativeWindow_getWidth(app->window), + (uint32_t)ANativeWindow_getHeight(app->window)); + } + break; + case APP_CMD_TERM_WINDOW: + FastCG::GraphicsSystem::GetInstance()->OnWindowTerminated(); + break; + case APP_CMD_WINDOW_RESIZED: + // obtain the actual size of the window + FastCG::AndroidApplication::GetInstance()->WindowResizeCallback( + (uint32_t)ANativeWindow_getWidth(app->window), + (uint32_t)ANativeWindow_getHeight(app->window)); + break; + case APP_CMD_WINDOW_REDRAW_NEEDED: + // not implemented + break; + case APP_CMD_CONTENT_RECT_CHANGED: + // not implemented + break; + case APP_CMD_GAINED_FOCUS: + // not implemented + break; + case APP_CMD_LOST_FOCUS: + // not implemented + break; + case APP_CMD_CONFIG_CHANGED: + // not implemented + break; + case APP_CMD_LOW_MEMORY: + // not implemented + break; + case APP_CMD_START: + // not implemented + break; + case APP_CMD_RESUME: + FastCG::AndroidApplication::GetInstance()->SetPaused(false); + break; + case APP_CMD_SAVE_STATE: + // not implemented + break; + case APP_CMD_PAUSE: + FastCG::AndroidApplication::GetInstance()->SetPaused(true); + break; + case APP_CMD_STOP: + // not implemented + break; + case APP_CMD_DESTROY: + // not implemented + break; + } +} + +int32_t onInputEvent(android_app *app, AInputEvent *event) +{ + int32_t eventType = AInputEvent_getType(event); + switch (eventType) + { + case AINPUT_EVENT_TYPE_MOTION: + { + size_t pointerCount = AMotionEvent_getPointerCount(event); + // TODO: support multiple pointers + assert(pointerCount > 0); + + float x = AMotionEvent_getX(event, 0); + float y = AMotionEvent_getY(event, 0); + assert(x > 0 && y > 0); + int32_t action = AMotionEvent_getAction(event); + switch (action) + { + case AMOTION_EVENT_ACTION_DOWN: + case AMOTION_EVENT_ACTION_UP: + FastCG::AndroidApplication::GetInstance()->MouseButtonCallback(FastCG::MouseButton::RIGHT_BUTTON, + action == AMOTION_EVENT_ACTION_DOWN ? FastCG::MouseButtonState::PRESSED : FastCG::MouseButtonState::RELEASED); + break; + case AMOTION_EVENT_ACTION_MOVE: + FastCG::AndroidApplication::GetInstance()->MouseMoveCallback((uint32_t)x, (uint32_t)y); + break; + case AMOTION_EVENT_ACTION_CANCEL: + // not implemented + break; + case AMOTION_EVENT_ACTION_OUTSIDE: + // not implemented + break; + case AMOTION_EVENT_ACTION_POINTER_DOWN: + case AMOTION_EVENT_ACTION_POINTER_UP: + // TODO: support multiple pointers + break; + } + return 1; + } + case AINPUT_EVENT_TYPE_KEY: + { + int32_t keyCode = AKeyEvent_getKeyCode(event); + int32_t action = AKeyEvent_getAction(event); + bool pressed = action == AKEY_EVENT_ACTION_DOWN; // AKEY_EVENT_ACTION_MULTIPLE is considered an "unpress" action + FastCG::AndroidApplication::GetInstance()->KeyboardCallback(TranslateKey(keyCode), pressed); + return 1; + } + case AINPUT_EVENT_TYPE_FOCUS: + // not implemented + break; + } + return 0; +} + +namespace FastCG +{ + void AndroidApplication::SetAndroidApp(android_app *androidApp) + { + assert(androidApp != nullptr); + + // FIXME: that call shouldn't be necessary anymore + // https://github.com/android/ndk/issues/381 + FASTCG_WARN_PUSH + FASTCG_WARN_IGNORE_DEPRECATED_DECLARATIONS + app_dummy(); + FASTCG_WARN_POP + + mAndroidApp = androidApp; + + mAndroidApp->onAppCmd = ::onAppCmd; + mAndroidApp->onInputEvent = ::onInputEvent; + } + + void AndroidApplication::RunMainLoop() + { + int events; + android_poll_source *source; + + while (mRunning) + { + auto osStart = Timer::GetTime(); + + while (ALooper_pollAll(0, nullptr, &events, (void **)&source) >= 0) + { + if (source != nullptr) + { + source->process(mAndroidApp, source); + } + + if (mAndroidApp->destroyRequested != 0) + { + mRunning = false; + goto __exit; + } + } + + RunMainLoopIteration(Timer::GetTime() - osStart); + } + __exit: + return; + } + + void AndroidApplication::OnPreInitialize() + { + BaseApplication::OnPreInitialize(); + + if (mAndroidApp->activity->vm->AttachCurrentThread(&mJniEnv, NULL) != JNI_OK) + { + FASTCG_THROW_EXCEPTION(Exception, "Couldn't obtain the JNI environment for the current thread"); + } + } + + void AndroidApplication::OnPostFinalize() + { + // since ending the current thread doesn't make the Android application to end, + // we need to call the main activity's finish() method explicitly + + jobject activity = mAndroidApp->activity->clazz; + jclass activityClass = mJniEnv->GetObjectClass(activity); + + jmethodID safeFinishMethod = mJniEnv->GetMethodID(activityClass, "finish", "()V"); + mJniEnv->CallVoidMethod(activity, safeFinishMethod); + + mAndroidApp->activity->vm->DetachCurrentThread(); + + BaseApplication::OnPostFinalize(); + } + + uint64_t AndroidApplication::GetNativeKey(Key key) const + { + for (auto it = gKeyLut.cbegin(); it != gKeyLut.cend(); ++it) + { + if (it->second == key) + { + return it->first; + } + } + return uint64_t(~0); + } + +} + +#endif \ No newline at end of file diff --git a/FastCG/src/Platform/BaseApplication.cpp b/FastCG/src/Platform/BaseApplication.cpp index 85e5f33a..7f383f62 100644 --- a/FastCG/src/Platform/BaseApplication.cpp +++ b/FastCG/src/Platform/BaseApplication.cpp @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -106,14 +107,8 @@ namespace FastCG smpInstance = nullptr; } - int BaseApplication::Run(int argc, char **argv) + int BaseApplication::Run() { - if (!ParseCommandLineArguments(argc, argv)) - { - OnPrintUsage(); - return -1; - } - try { Initialize(); @@ -131,68 +126,99 @@ namespace FastCG } catch (Exception &e) { + FASTCG_LOG_ERROR("Fatal Exception: %s", e.GetFullDescription().c_str()); FASTCG_MSG_BOX("Error", "Fatal Exception: %s", e.GetFullDescription().c_str()); return -1; } } + int BaseApplication::Run(int argc, char **argv) + { + if (!ParseCommandLineArguments(argc, argv)) + { + FASTCG_LOG_ERROR("Failed to parse command line arguments"); + OnPrintUsage(); + return -1; + } + + return Run(); + } + void BaseApplication::Initialize() { + FASTCG_LOG_VERBOSE("Pre-initializing application"); OnPreInitialize(); - AssetSystem::Create({mSettings.assets.bundles}); + { + FASTCG_LOG_VERBOSE("Creating systems"); + + AssetSystem::Create({mSettings.assets.bundles}); #ifdef _DEBUG - DebugMenuSystem::Create({}); + DebugMenuSystem::Create({}); #endif - GraphicsSystem::Create({mScreenWidth, - mScreenHeight, - mSettings.maxSimultaneousFrames, - mSettings.vsync}); - InputSystem::Create({}); - ImGuiSystem::Create({mScreenWidth, - mScreenHeight}); - RenderingSystem::Create({mSettings.rendering.path, - mScreenWidth, - mScreenHeight, - mSettings.rendering.clearColor, - mSettings.rendering.ambientLight, - mRenderingStatistics}); - WorldSystem::Create({mScreenWidth, - mScreenHeight}); - - GraphicsSystem::GetInstance()->Initialize(); - - for (const auto &rImportCallback : mSettings.assets.importCallbacks) - { - rImportCallback(); + GraphicsSystem::Create({mScreenWidth, + mScreenHeight, + mSettings.maxSimultaneousFrames, + mSettings.vsync}); + InputSystem::Create({}); + ImGuiSystem::Create({mScreenWidth, + mScreenHeight}); + RenderingSystem::Create({mSettings.rendering.path, + mScreenWidth, + mScreenHeight, + mSettings.rendering.clearColor, + mSettings.rendering.ambientLight, + mRenderingStatistics}); + WorldSystem::Create({mScreenWidth, + mScreenHeight}); } - ImGuiSystem::GetInstance()->Initialize(); - RenderingSystem::GetInstance()->Initialize(); - WorldSystem::GetInstance()->Initialize(); + { + FASTCG_LOG_VERBOSE("Initializing systems"); + + GraphicsSystem::GetInstance()->Initialize(); + + for (const auto &rImportCallback : mSettings.assets.importCallbacks) + { + rImportCallback(); + } + + ImGuiSystem::GetInstance()->Initialize(); + RenderingSystem::GetInstance()->Initialize(); + WorldSystem::GetInstance()->Initialize(); + } + FASTCG_LOG_VERBOSE("Post-initializing application"); OnPostInitialize(); } void BaseApplication::Finalize() { + FASTCG_LOG_VERBOSE("Pre-finalizing application"); OnPreFinalize(); - WorldSystem::GetInstance()->Finalize(); - RenderingSystem::GetInstance()->Finalize(); - ImGuiSystem::GetInstance()->Finalize(); - GraphicsSystem::GetInstance()->Finalize(); + { + FASTCG_LOG_VERBOSE("Finalizing systems"); + WorldSystem::GetInstance()->Finalize(); + RenderingSystem::GetInstance()->Finalize(); + ImGuiSystem::GetInstance()->Finalize(); + GraphicsSystem::GetInstance()->Finalize(); + } - WorldSystem::Destroy(); - RenderingSystem::Destroy(); - ImGuiSystem::Destroy(); - InputSystem::Destroy(); - GraphicsSystem::Destroy(); + { + FASTCG_LOG_VERBOSE("Destroying systems"); + WorldSystem::Destroy(); + RenderingSystem::Destroy(); + ImGuiSystem::Destroy(); + InputSystem::Destroy(); + GraphicsSystem::Destroy(); #ifdef _DEBUG - DebugMenuSystem::Destroy(); + DebugMenuSystem::Destroy(); #endif - AssetSystem::Destroy(); + AssetSystem::Destroy(); + } + FASTCG_LOG_VERBOSE("Post-finalizing application"); OnPostFinalize(); } diff --git a/FastCG/src/Platform/Linux/X11Application.cpp b/FastCG/src/Platform/Linux/X11Application.cpp index 52d3f840..42666fe2 100644 --- a/FastCG/src/Platform/Linux/X11Application.cpp +++ b/FastCG/src/Platform/Linux/X11Application.cpp @@ -186,12 +186,15 @@ namespace FastCG if (static_cast(event.xclient.data.l[0]) == mDeleteWindowAtom) { mRunning = false; + goto __exit; } } } RunMainLoopIteration(Timer::GetTime() - osStart); } + __exit: + return; } Window &X11Application::CreateSimpleWindow() @@ -239,7 +242,7 @@ namespace FastCG &windowAttribs); if (mWindow == None) { - FASTCG_THROW_EXCEPTION(Exception, "Failed to create a window"); + FASTCG_THROW_EXCEPTION(Exception, "Couldn't create a window"); } if (windowAttribs.colormap != None) diff --git a/FastCG/src/Platform/Windows/WindowsApplication.cpp b/FastCG/src/Platform/Windows/WindowsApplication.cpp index 498eb4b8..6129e871 100644 --- a/FastCG/src/Platform/Windows/WindowsApplication.cpp +++ b/FastCG/src/Platform/Windows/WindowsApplication.cpp @@ -105,53 +105,53 @@ namespace } } -namespace FastCG +LRESULT WndProc(HWND hWnd, UINT uMsg, WPARAM wParam, LPARAM lParam) { - LRESULT WndProc(HWND hWnd, UINT uMsg, WPARAM wParam, LPARAM lParam) + auto *pApplication = FastCG::WindowsApplication::GetInstance(); + switch (uMsg) { - auto *app = WindowsApplication::GetInstance(); - switch (uMsg) - { - case WM_DESTROY: - case WM_QUIT: - case WM_CLOSE: - PostQuitMessage(0); - break; - case WM_SIZE: - app->WindowResizeCallback((uint32_t)LOWORD(lParam), (uint32_t)HIWORD(lParam)); - break; - case WM_LBUTTONDOWN: - app->MouseButtonCallback(MouseButton::LEFT_BUTTON, MouseButtonState::PRESSED); - break; - case WM_RBUTTONDOWN: - app->MouseButtonCallback(MouseButton::RIGHT_BUTTON, MouseButtonState::PRESSED); - break; - case WM_MBUTTONDOWN: - app->MouseButtonCallback(MouseButton::MIDDLE_BUTTON, MouseButtonState::PRESSED); - break; - case WM_LBUTTONUP: - app->MouseButtonCallback(MouseButton::LEFT_BUTTON, MouseButtonState::RELEASED); - break; - case WM_RBUTTONUP: - app->MouseButtonCallback(MouseButton::RIGHT_BUTTON, MouseButtonState::RELEASED); - break; - case WM_MBUTTONUP: - app->MouseButtonCallback(MouseButton::MIDDLE_BUTTON, MouseButtonState::RELEASED); - break; - case WM_MOUSEMOVE: - app->MouseMoveCallback((uint32_t)LOWORD(lParam), (uint32_t)HIWORD(lParam)); - break; - case WM_KEYDOWN: - case WM_KEYUP: - app->KeyboardCallback(TranslateKey((uint64_t)wParam), uMsg == WM_KEYDOWN || uMsg == WM_SYSKEYDOWN); - break; - default: - break; - } - - return DefWindowProc(hWnd, uMsg, wParam, lParam); + case WM_DESTROY: + case WM_QUIT: + case WM_CLOSE: + PostQuitMessage(0); + break; + case WM_SIZE: + pApplication->WindowResizeCallback((uint32_t)LOWORD(lParam), (uint32_t)HIWORD(lParam)); + break; + case WM_LBUTTONDOWN: + pApplication->MouseButtonCallback(FastCG::MouseButton::LEFT_BUTTON, FastCG::MouseButtonState::PRESSED); + break; + case WM_RBUTTONDOWN: + pApplication->MouseButtonCallback(FastCG::MouseButton::RIGHT_BUTTON, FastCG::MouseButtonState::PRESSED); + break; + case WM_MBUTTONDOWN: + pApplication->MouseButtonCallback(FastCG::MouseButton::MIDDLE_BUTTON, FastCG::MouseButtonState::PRESSED); + break; + case WM_LBUTTONUP: + pApplication->MouseButtonCallback(FastCG::MouseButton::LEFT_BUTTON, FastCG::MouseButtonState::RELEASED); + break; + case WM_RBUTTONUP: + pApplication->MouseButtonCallback(FastCG::MouseButton::RIGHT_BUTTON, FastCG::MouseButtonState::RELEASED); + break; + case WM_MBUTTONUP: + pApplication->MouseButtonCallback(FastCG::MouseButton::MIDDLE_BUTTON, FastCG::MouseButtonState::RELEASED); + break; + case WM_MOUSEMOVE: + pApplication->MouseMoveCallback((uint32_t)LOWORD(lParam), (uint32_t)HIWORD(lParam)); + break; + case WM_KEYDOWN: + case WM_KEYUP: + pApplication->KeyboardCallback(TranslateKey((uint64_t)wParam), uMsg == WM_KEYDOWN || uMsg == WM_SYSKEYDOWN); + break; + default: + break; } + return DefWindowProc(hWnd, uMsg, wParam, lParam); +} + +namespace FastCG +{ void WindowsApplication::OnPreInitialize() { BaseApplication::OnPreInitialize(); @@ -180,7 +180,7 @@ namespace FastCG if (!RegisterClassEx(&windowClass)) { - FASTCG_THROW_EXCEPTION(Exception, "Error registering window class"); + FASTCG_THROW_EXCEPTION(Exception, "Couldn't register window class"); } auto dwExStyle = WS_EX_APPWINDOW | WS_EX_WINDOWEDGE; @@ -201,7 +201,7 @@ namespace FastCG if (mHWnd == 0) { - FASTCG_THROW_EXCEPTION(Exception, "Error creating window"); + FASTCG_THROW_EXCEPTION(Exception, "Couldn't create window"); } ShowWindow(mHWnd, SW_SHOW); @@ -230,6 +230,7 @@ namespace FastCG { if (!GetMessage(&msg, NULL, 0, 0)) { + mRunning = false; goto __exit; } diff --git a/FastCG/src/Rendering/DeferredWorldRenderer.cpp b/FastCG/src/Rendering/DeferredWorldRenderer.cpp index f81a56b9..efb5190a 100644 --- a/FastCG/src/Rendering/DeferredWorldRenderer.cpp +++ b/FastCG/src/Rendering/DeferredWorldRenderer.cpp @@ -100,7 +100,7 @@ namespace FastCG TextureType::TEXTURE_2D, TextureUsageFlagBit::SAMPLED | TextureUsageFlagBit::RENDER_TARGET, TextureFormat::RGBA, - {10, 10, 10, 2}, + {8, 8, 8, 8}, TextureDataType::FLOAT, TextureFilter::LINEAR_FILTER, TextureWrapMode::CLAMP, diff --git a/FastCG/src/Rendering/ForwardWorldRenderer.cpp b/FastCG/src/Rendering/ForwardWorldRenderer.cpp index 42d5416e..e881d0f8 100644 --- a/FastCG/src/Rendering/ForwardWorldRenderer.cpp +++ b/FastCG/src/Rendering/ForwardWorldRenderer.cpp @@ -36,7 +36,7 @@ namespace FastCG TextureType::TEXTURE_2D, TextureUsageFlagBit::SAMPLED | TextureUsageFlagBit::RENDER_TARGET, TextureFormat::RGBA, - {10, 10, 10, 2}, + {8, 8, 8, 8}, TextureDataType::FLOAT, TextureFilter::LINEAR_FILTER, TextureWrapMode::CLAMP, diff --git a/FastCG/src/Rendering/MaterialDefinitionImporter.cpp b/FastCG/src/Rendering/MaterialDefinitionImporter.cpp index c1831933..9acdd26a 100644 --- a/FastCG/src/Rendering/MaterialDefinitionImporter.cpp +++ b/FastCG/src/Rendering/MaterialDefinitionImporter.cpp @@ -1,15 +1,19 @@ #include #include #include +#include #include namespace FastCG { void MaterialDefinitionImporter::Import() { + FASTCG_LOG_VERBOSE("Importing materials:"); for (const auto &rMaterialFileName : AssetSystem::GetInstance()->List("materials", true)) { - MaterialDefinitionRegistry::GetInstance()->AddMaterialDefinition(MaterialDefinitionLoader::Load(rMaterialFileName)); + auto pMaterialDefinition = MaterialDefinitionLoader::Load(rMaterialFileName); + FASTCG_LOG_VERBOSE("- %s", pMaterialDefinition->GetName().c_str()); + MaterialDefinitionRegistry::GetInstance()->AddMaterialDefinition(std::move(pMaterialDefinition)); } } } \ No newline at end of file diff --git a/FastCG/src/Rendering/RenderingSystem.cpp b/FastCG/src/Rendering/RenderingSystem.cpp index b48629f3..6b2c6b17 100644 --- a/FastCG/src/Rendering/RenderingSystem.cpp +++ b/FastCG/src/Rendering/RenderingSystem.cpp @@ -70,12 +70,12 @@ namespace FastCG ImGui::Render(); auto *pImDrawData = ImGui::GetDrawData(); - mpGraphicsContext->Begin(); + if (mpGraphicsContext->Begin()) { mpWorldRenderer->Render(pMainCamera, mpGraphicsContext); mpImGuiRenderer->Render(pImDrawData, mpGraphicsContext); + mpGraphicsContext->End(); } - mpGraphicsContext->End(); } void RenderingSystem::Finalize() diff --git a/FastCG/src/World/WorldSystem.cpp b/FastCG/src/World/WorldSystem.cpp index 8d33e818..3ae803da 100644 --- a/FastCG/src/World/WorldSystem.cpp +++ b/FastCG/src/World/WorldSystem.cpp @@ -26,15 +26,15 @@ m##className##s.emplace_back(static_cast(component)); \ } -#define FASTCG_UNTRACK_COMPONENT(className, component) \ - if (component->GetType().IsDerived(className::TYPE)) \ - { \ - auto it = std::find(m##className##s.begin(), m##className##s.end(), component); \ - if (it == m##className##s.end()) \ - { \ - FASTCG_THROW_EXCEPTION(Exception, "Error unregistering: %s", #className); \ - } \ - m##className##s.erase(it); \ +#define FASTCG_UNTRACK_COMPONENT(className, component) \ + if (component->GetType().IsDerived(className::TYPE)) \ + { \ + auto it = std::find(m##className##s.begin(), m##className##s.end(), component); \ + if (it == m##className##s.end()) \ + { \ + FASTCG_THROW_EXCEPTION(Exception, "Couldn't untrack component (component: %s)", #className); \ + } \ + m##className##s.erase(it); \ } namespace diff --git a/cmake/fastcg_glsl_processor.cmake b/cmake/fastcg_glsl_processor.cmake new file mode 100644 index 00000000..92c2c9ac --- /dev/null +++ b/cmake/fastcg_glsl_processor.cmake @@ -0,0 +1,25 @@ +cmake_minimum_required(VERSION 3.10) + +# Usage: +# cmake -P fastcg_glsl_processor.cmake "path/to/input" "path/to/output" "glsl_glsl_version" + +set(input_file ${CMAKE_ARGV3} CACHE FILEPATH "Path to the input file") +set(output_file ${CMAKE_ARGV4} CACHE FILEPATH "Path to the output file") +set(glsl_version ${CMAKE_ARGV5} CACHE FILEPATH "GLSL version") + +if(input_file STREQUAL "" OR output_file STREQUAL "" OR glsl_version STREQUAL "") + message(FATAL_ERROR "You must provide input_file, output_file and glsl_version") +endif() + +file(READ ${input_file} file_content) + +set(glsl_prefix "#version ${glsl_version}") +set(glsl_prefix "${glsl_prefix}\n#extension GL_GOOGLE_include_directive : enable") +if(input_file MATCHES "\\.frag$") + set(glsl_prefix "${glsl_prefix}\nprecision mediump float;") +endif() + + +file(WRITE ${output_file} "${glsl_prefix}\n\n${file_content}") + +message(STATUS "File prepending completed for: ${output_file}") \ No newline at end of file diff --git a/cmake/fastcg_setup.cmake b/cmake/fastcg_setup.cmake index 3ebbe5d3..0cd6f758 100644 --- a/cmake/fastcg_setup.cmake +++ b/cmake/fastcg_setup.cmake @@ -1,48 +1,77 @@ -# Declare platform variable +# Setup platform -if (DEFINED FASTCG_PLATFORM) - unset(FASTCG_PLATFORM) +set(FASTCG_PLATFORM ${CMAKE_SYSTEM_NAME}) +set(FASTCG_PLATFORM ${FASTCG_PLATFORM} CACHE STRING "FastCG platform") +set(FASTCG_PLATFORMS "Windows" "Linux" "Android") +set_property(CACHE FASTCG_PLATFORM PROPERTY STRINGS ${FASTCG_PLATFORMS}) +if (NOT FASTCG_PLATFORM IN_LIST FASTCG_PLATFORMS) + message(FATAL_ERROR "Invalid platform: ${FASTCG_PLATFORM}") endif() -if(WIN32) - set(FASTCG_PLATFORM "Windows" CACHE STRING "FastCG platform") -elseif(UNIX AND NOT APPLE) - set(FASTCG_PLATFORM "Linux" CACHE STRING "FastCG platform") -endif() -message("FastCG platform: ${FASTCG_PLATFORM}") +message(STATUS "FastCG platform: ${FASTCG_PLATFORM}") -# Declare graphics system variable +# Setup graphics system set(FASTCG_GRAPHICS_SYSTEM "OpenGL" CACHE STRING "FastCG graphics system") -message("FastCG graphics system: ${FASTCG_GRAPHICS_SYSTEM}") -set_property(CACHE FASTCG_GRAPHICS_SYSTEM PROPERTY STRINGS OpenGL Vulkan) +set(FASTCG_GRAPHICS_SYSTEMS "OpenGL" "Vulkan") +set_property(CACHE FASTCG_GRAPHICS_SYSTEM PROPERTY STRINGS ${FASTCG_GRAPHICS_SYSTEMS}) +if (NOT FASTCG_GRAPHICS_SYSTEM IN_LIST FASTCG_GRAPHICS_SYSTEMS) + message(FATAL_ERROR "Invalid graphics system: ${FASTCG_GRAPHICS_SYSTEM}") +endif() +message(STATUS "FastCG graphics system: ${FASTCG_GRAPHICS_SYSTEM}") -# Declare deploy variable +# Setup deploy directory if (DEFINED FASTCG_DEPLOY) set(FASTCG_DEPLOY_VALUE ${FASTCG_DEPLOY}) unset(FASTCG_DEPLOY) else() - set(FASTCG_DEPLOY_VALUE ${CMAKE_SOURCE_DIR}/deploy) + set(FASTCG_DEPLOY_VALUE ${CMAKE_SOURCE_DIR}/deploy/${FASTCG_PLATFORM}) endif() set(FASTCG_DEPLOY ${FASTCG_DEPLOY_VALUE} CACHE STRING "FastCG deploy directory") -message("FastCG deploy directory: ${FASTCG_DEPLOY}") set_property(DIRECTORY PROPERTY ADDITIONAL_MAKE_CLEAN_FILES ${FASTCG_DEPLOY}) +message(STATUS "FastCG deploy directory: ${FASTCG_DEPLOY}") -# Declare options +# Setup options -option(FASTCG_BUILD_SAMPLES "Build FastCG samples" ON) -option(FASTCG_USE_TEXT_SHADERS "Use text shaders" OFF) -option(FASTCG_DISABLE_GPU_TIMING "Disable GPU timing" OFF) +option(FASTCG_BUILD_SAMPLES "Build samples" ON) +if (FASTCG_PLATFORM STREQUAL "Android" AND FASTCG_GRAPHICS_SYSTEM STREQUAL "OpenGL") + # TODO: use precompiled shaders in GLES if possible at all + set(FASTCG_USE_TEXT_SHADERS ON) + # time queries require either vendor specific solutions or an extension in GLES + set(FASTCG_DISABLE_GPU_TIMING ON) +else() + option(FASTCG_USE_TEXT_SHADERS "Use text shaders" OFF) + option(FASTCG_DISABLE_GPU_TIMING "Disable GPU timing" OFF) +endif() option(FASTCG_DISABLE_GPU_VALIDATION "Disable GPU validation" OFF) +message(STATUS "FastCG - Build samples = ${FASTCG_BUILD_SAMPLES}") +message(STATUS "FastCG - Use text shaders = ${FASTCG_USE_TEXT_SHADERS}") +message(STATUS "FastCG - Disable GPU timing = ${FASTCG_DISABLE_GPU_TIMING}") +message(STATUS "FastCG - Disable GPU validation = ${FASTCG_DISABLE_GPU_VALIDATION}") + # Find necessary programs if (NOT FASTCG_USE_TEXT_SHADERS) find_program(FASTCG_GLSLANGVALIDATOR glslangValidator DOC "Path to the glslangValidator executable") if (FASTCG_GLSLANGVALIDATOR) - message(STATUS "Found glslangValidator: ${FASTCG_GLSLANGVALIDATOR}") + message(STATUS "Found glslangValidator: ${FASTCG_GLSLANGVALIDATOR}") else() - message(FATAL_ERROR "Could not find the glslangValidator executable!") + message(FATAL_ERROR "Couldn't find the glslangValidator executable!") endif() +endif() + +if (FASTCG_PLATFORM STREQUAL "Android") + find_program(FASTCG_ADB adb DOC "Path to the adb executable") + if (NOT FASTCG_ADB) + message(FATAL_ERROR "Couldn't find the adb executable!") + endif() + message(STATUS "Found adb: ${FASTCG_ADB}") +endif() + +# Setup compiler options + +if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" OR CMAKE_CXX_COMPILER_ID MATCHES "Clang") + add_compile_options(-Werror) endif() \ No newline at end of file diff --git a/cmake/fastcg_targets.cmake b/cmake/fastcg_targets.cmake index ed2ccfd2..9250db10 100644 --- a/cmake/fastcg_targets.cmake +++ b/cmake/fastcg_targets.cmake @@ -6,48 +6,82 @@ function(_fastcg_remove) endforeach() endfunction() -function(_fastcg_compile_glsl_shaders) +function(_fastcg_add_glsl_shader_target) get_target_property(SOURCE_DIR ${ARGV0} SOURCE_DIR) set(SRC_SHADERS_DIR "${SOURCE_DIR}/assets/shaders") set(DST_SHADERS_DIR "${FASTCG_DEPLOY}/assets/${ARGV0}/shaders") + set(TMP_SHADERS_DIR "${CMAKE_BINARY_DIR}/assets/shaders/${ARGV0}") - set(FASTCG_SHADER_COMPILER_ARGS -e main --source-entrypoint main) + set(SHADER_COMPILER_ARGS -e main --source-entrypoint main) - if(FASTCG_GRAPHICS_SYSTEM STREQUAL "OpenGL") - set(FASTCG_SHADER_COMPILER_ARGS ${FASTCG_SHADER_COMPILER_ARGS} --target-env opengl) - elseif(FASTCG_GRAPHICS_SYSTEM STREQUAL "Vulkan") - set(FASTCG_SHADER_COMPILER_ARGS ${FASTCG_SHADER_COMPILER_ARGS} --target-env vulkan1.3) + if(FASTCG_PLATFORM STREQUAL "Android") + set(GLSL_VERSION "320 es") else() - message(FATAL_ERROR "Don't know how to compile GLSL shaders for ${FASTCG_GRAPHICS_SYSTEM}") + set(GLSL_VERSION "430") endif() - set(FASTCG_SHADER_COMPILER_ARGS ${FASTCG_SHADER_COMPILER_ARGS} -DENABLE_INCLUDE_EXTENSION_DIRECTIVE) + if(FASTCG_GRAPHICS_SYSTEM STREQUAL "OpenGL") + set(SHADER_COMPILER_ARGS ${SHADER_COMPILER_ARGS} --target-env opengl) + else() + set(SHADER_COMPILER_ARGS ${SHADER_COMPILER_ARGS} --target-env vulkan1.1) + endif() file(GLOB_RECURSE GLSL_HEADERS "${SRC_SHADERS_DIR}/*.glsl") + foreach(GLSL_HEADER IN LISTS GLSL_HEADERS) + file(RELATIVE_PATH REL_GLSL_HEADER ${SRC_SHADERS_DIR} ${GLSL_HEADER}) + if(FASTCG_USE_TEXT_SHADERS) + set(NEW_GLSL_HEADER "${DST_SHADERS_DIR}/${REL_GLSL_HEADER}") + else() + set(NEW_GLSL_HEADER "${TMP_SHADERS_DIR}/${REL_GLSL_HEADER}") + endif() + add_custom_command( + OUTPUT ${NEW_GLSL_HEADER} + COMMAND ${CMAKE_COMMAND} -E copy ${GLSL_HEADER} ${NEW_GLSL_HEADER} + DEPENDS ${GLSL_HEADER} + ) + list(APPEND NEW_GLSL_HEADERS ${NEW_GLSL_HEADER}) + endforeach(GLSL_HEADER) + file(GLOB_RECURSE GLSL_SOURCES "${SRC_SHADERS_DIR}/*.vert" "${SRC_SHADERS_DIR}/*.frag") foreach(GLSL_SOURCE IN LISTS GLSL_SOURCES) file(RELATIVE_PATH REL_GLSL_SOURCE ${SRC_SHADERS_DIR} ${GLSL_SOURCE}) - get_filename_component(GLSL_SOURCE_DIR ${REL_GLSL_SOURCE} DIRECTORY) - get_filename_component(GLSL_SOURCE_BASENAME ${REL_GLSL_SOURCE} NAME_WE) - get_filename_component(GLSL_SOURCE_EXT ${REL_GLSL_SOURCE} EXT) - string(SUBSTRING ${GLSL_SOURCE_EXT} 1 -1 GLSL_SOURCE_EXT) - set(SPIRV_DIR ${DST_SHADERS_DIR}/${GLSL_SOURCE_DIR}) - set(SPIRV_FILE "${SPIRV_DIR}/${GLSL_SOURCE_BASENAME}.${GLSL_SOURCE_EXT}_spv") - add_custom_command( - OUTPUT ${SPIRV_FILE} - COMMAND ${CMAKE_COMMAND} -E make_directory ${SPIRV_DIR} - COMMAND ${FASTCG_GLSLANGVALIDATOR} ${FASTCG_SHADER_COMPILER_ARGS} ${GLSL_SOURCE} -o ${SPIRV_FILE} $,-g,> # generate debug info if in Debug config - DEPENDS ${GLSL_SOURCE} ${GLSL_HEADERS} - ) - list(APPEND GLSL_SPIRV_FILES ${SPIRV_FILE}) + + if(FASTCG_USE_TEXT_SHADERS) + set(DST_GLSL_SOURCE "${DST_SHADERS_DIR}/${REL_GLSL_SOURCE}") + add_custom_command( + OUTPUT ${DST_GLSL_SOURCE} + COMMAND ${CMAKE_COMMAND} -E copy ${GLSL_SOURCE} ${DST_GLSL_SOURCE} + DEPENDS ${GLSL_SOURCE} ${NEW_GLSL_HEADERS} + ) + else() + get_filename_component(GLSL_SOURCE_DIR ${REL_GLSL_SOURCE} DIRECTORY) + get_filename_component(GLSL_SOURCE_BASENAME ${REL_GLSL_SOURCE} NAME_WE) + get_filename_component(GLSL_SOURCE_EXT ${REL_GLSL_SOURCE} EXT) + + string(SUBSTRING ${GLSL_SOURCE_EXT} 1 -1 GLSL_SOURCE_EXT) + set(SPIRV_DIR ${DST_SHADERS_DIR}/${GLSL_SOURCE_DIR}) + set(DST_GLSL_SOURCE "${SPIRV_DIR}/${GLSL_SOURCE_BASENAME}.${GLSL_SOURCE_EXT}_spv") + + set(TMP_GLSL_SOURCE "${TMP_SHADERS_DIR}/${REL_GLSL_SOURCE}") + + add_custom_command( + OUTPUT ${DST_GLSL_SOURCE} + COMMAND ${CMAKE_COMMAND} -E make_directory ${SPIRV_DIR} + COMMAND ${CMAKE_COMMAND} -P ${CMAKE_SOURCE_DIR}/cmake/fastcg_glsl_processor.cmake "${GLSL_SOURCE}" "${TMP_GLSL_SOURCE}" "${GLSL_VERSION}" + COMMAND ${FASTCG_GLSLANGVALIDATOR} ${SHADER_COMPILER_ARGS} ${TMP_GLSL_SOURCE} -o ${DST_GLSL_SOURCE} $,-g,-g0> # generate debug info iif in Debug config + DEPENDS ${GLSL_SOURCE} ${NEW_GLSL_HEADERS} + ) + endif() + list(APPEND DST_GLSL_SOURCES ${DST_GLSL_SOURCE}) endforeach(GLSL_SOURCE) - if(GLSL_SPIRV_FILES) + + if(DST_GLSL_SOURCES) add_custom_target( - ${ARGV0}_FASTCG_COMPILE_GLSL_SHADERS - DEPENDS ${GLSL_SPIRV_FILES} + ${ARGV0}_GLSL_SHADERS + DEPENDS ${DST_GLSL_SOURCES} ) - add_dependencies(${ARGV0} ${ARGV0}_FASTCG_COMPILE_GLSL_SHADERS) + add_dependencies(${ARGV0} ${ARGV0}_GLSL_SHADERS) endif() endfunction() @@ -60,7 +94,7 @@ function(_fastcg_copy_assets) file(GLOB_RECURSE SRC_ASSET_FILES "${SRC_ASSETS_DIR}/*.*") foreach(SRC_ASSET_FILE IN LISTS SRC_ASSET_FILES) file(RELATIVE_PATH REL_ASSET_FILE ${SRC_ASSETS_DIR} ${SRC_ASSET_FILE}) - if(REL_ASSET_FILE MATCHES "shaders" AND NOT FASTCG_USE_TEXT_SHADERS) + if(REL_ASSET_FILE MATCHES "shaders") continue() endif() set(DST_ASSET_FILE "${DST_ASSETS_DIR}/${REL_ASSET_FILE}") @@ -73,26 +107,27 @@ function(_fastcg_copy_assets) endforeach(SRC_ASSET_FILE) if(DST_ASSET_FILES) add_custom_target( - ${ARGV0}_FASTCG_COPY_ASSETS + ${ARGV0}_COPY_ASSETS DEPENDS ${DST_ASSET_FILES} ) - add_dependencies(${ARGV0} ${ARGV0}_FASTCG_COPY_ASSETS) + add_dependencies(${ARGV0} ${ARGV0}_COPY_ASSETS) endif() endfunction() function(_fastcg_prepare_assets) - if(NOT FASTCG_USE_TEXT_SHADERS) - _fastcg_compile_glsl_shaders(${ARGV}) - endif() + _fastcg_add_glsl_shader_target(${ARGV}) _fastcg_copy_assets(${ARGV}) endfunction() function(_fastcg_add_definitions) + add_definitions(-DFASTCG_PROJECT_NAME="${ARGV0}") add_definitions(-DFASTCG_PLATFORM="${FASTCG_PLATFORM}") if(FASTCG_PLATFORM STREQUAL "Windows") add_definitions(-DFASTCG_WINDOWS) elseif(FASTCG_PLATFORM STREQUAL "Linux") - add_definitions(-DFASTCG_LINUX) + add_definitions(-DFASTCG_LINUX -DFASTCG_POSIX) + elseif(FASTCG_PLATFORM STREQUAL "Android") + add_definitions(-DFASTCG_ANDROID -DFASTCG_POSIX) endif() add_definitions(-DFASTCG_GRAPHICS_SYSTEM="${FASTCG_GRAPHICS_SYSTEM}") if(FASTCG_GRAPHICS_SYSTEM STREQUAL "OpenGL") @@ -107,15 +142,15 @@ function(_fastcg_add_definitions) if (FASTCG_DISABLE_GPU_VALIDATION) add_definitions(-DFASTCG_DISABLE_GPU_VALIDATION) endif() - if(FASTCG_PLATFORM STREQUAL "Linux") - target_compile_definitions(${ARGV0} PUBLIC $,_DEBUG=1,>) - endif() + target_compile_definitions(${ARGV0} PUBLIC $,_DEBUG=1,>) endfunction() function(_fastcg_set_target_properties) set(DEPLOY_DIR "${FASTCG_DEPLOY}/${ARGV0}") + set_target_properties(${ARGV0} PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${DEPLOY_DIR} + LIBRARY_OUTPUT_DIRECTORY ${DEPLOY_DIR} DEBUG_POSTFIX "d" ) @@ -123,6 +158,7 @@ function(_fastcg_set_target_properties) string(TOUPPER ${CMAKE_CONFIGURATION_TYPE} CMAKE_CONFIGURATION_TYPE) set_target_properties(${ARGV0} PROPERTIES RUNTIME_OUTPUT_DIRECTORY_${CMAKE_CONFIGURATION_TYPE} ${DEPLOY_DIR} + LIBRARY_OUTPUT_DIRECTORY_${CMAKE_CONFIGURATION_TYPE} ${DEPLOY_DIR} VS_DEBUGGER_WORKING_DIRECTORY ${DEPLOY_DIR} DEBUG_POSTFIX "d" ) @@ -135,8 +171,86 @@ function(_fastcg_add_library) _fastcg_prepare_assets(${ARGV0}) endfunction() +function(_fastcg_add_apk_targets) + set(SRC_GRADLE_PROJECT_DIR "${CMAKE_SOURCE_DIR}/resources/Android/gradle/project") + set(DST_GRADLE_PROJECT_DIR "${CMAKE_BINARY_DIR}/gradle/${ARGV0}") + + file(GLOB_RECURSE SRC_GRADLE_PROJECT_FILES "${SRC_GRADLE_PROJECT_DIR}/*.*") + foreach(SRC_GRADLE_PROJECT_FILE IN LISTS SRC_GRADLE_PROJECT_FILES) + file(RELATIVE_PATH REL_GRADLE_PROJECT_FILE ${SRC_GRADLE_PROJECT_DIR} ${SRC_GRADLE_PROJECT_FILE}) + set(DST_GRADLE_PROJECT_FILE "${DST_GRADLE_PROJECT_DIR}/${REL_GRADLE_PROJECT_FILE}") + if(DST_GRADLE_PROJECT_FILE MATCHES "\\.template$") + string(REGEX REPLACE "\\.template$" "" DST_GRADLE_PROJECT_FILE "${DST_GRADLE_PROJECT_FILE}") + add_custom_command( + OUTPUT ${DST_GRADLE_PROJECT_FILE} + COMMAND ${CMAKE_COMMAND} -P ${CMAKE_SOURCE_DIR}/cmake/fastcg_template_engine.cmake "${SRC_GRADLE_PROJECT_FILE}" "${DST_GRADLE_PROJECT_FILE}" "project_name=${ARGV0};debuggable=$,true,false>" + DEPENDS ${SRC_GRADLE_PROJECT_FILE} + ) + else() + add_custom_command( + OUTPUT ${DST_GRADLE_PROJECT_FILE} + COMMAND ${CMAKE_COMMAND} -E copy ${SRC_GRADLE_PROJECT_FILE} ${DST_GRADLE_PROJECT_FILE} + DEPENDS ${SRC_GRADLE_PROJECT_FILE} + ) + endif() + list(APPEND DST_GRADLE_RESOURCE_FILES ${DST_GRADLE_PROJECT_FILE}) + endforeach(SRC_GRADLE_PROJECT_FILE) + + if(DST_GRADLE_RESOURCE_FILES) + add_custom_target( + ${ARGV0}_PREPARE_GRADLE_PROJECT + DEPENDS ${ARGV0} ${DST_GRADLE_RESOURCE_FILES} + ) + else() + add_custom_target( + ${ARGV0}_PREPARE_GRADLE_PROJECT + DEPENDS ${ARGV0} + ) + endif() + + set(FASTCG_ASSETS_DIR "${FASTCG_DEPLOY}/assets/FastCG") + set(ASSETS_DIR "${FASTCG_DEPLOY}/assets/${ARGV0}") + + add_custom_command( + TARGET ${ARGV0}_PREPARE_GRADLE_PROJECT PRE_BUILD + COMMAND ${CMAKE_COMMAND} -E remove_directory ${DST_GRADLE_PROJECT_DIR}/app/src/main/assets/FastCG + COMMAND ${CMAKE_COMMAND} -E copy_directory ${FASTCG_ASSETS_DIR} ${DST_GRADLE_PROJECT_DIR}/app/src/main/assets/FastCG + COMMAND ${CMAKE_COMMAND} -E remove_directory ${DST_GRADLE_PROJECT_DIR}/app/src/main/assets/${ARGV0} + COMMAND ${CMAKE_COMMAND} -E copy_directory ${ASSETS_DIR} ${DST_GRADLE_PROJECT_DIR}/app/src/main/assets/${ARGV0} + COMMAND ${CMAKE_COMMAND} -E copy $ ${DST_GRADLE_PROJECT_DIR}/app/src/main/jniLibs/arm64-v8a/lib${ARGV0}.so + ) + + add_custom_target( + ${ARGV0}_BUILD_APK + WORKING_DIRECTORY ${DST_GRADLE_PROJECT_DIR} + COMMAND gradlew assemble$ + BYPRODUCTS ${DST_GRADLE_PROJECT_DIR}/app/build/outputs/apk/$>/app-$>.apk + DEPENDS ${ARGV0}_PREPARE_GRADLE_PROJECT + ) + + add_custom_target( + ${ARGV0}_DEPLOY_APK + COMMAND ${FASTCG_ADB} shell am force-stop com.fastcg.${ARGV0} + COMMAND ${FASTCG_ADB} install ${DST_GRADLE_PROJECT_DIR}/app/build/outputs/apk/$>/app-$>.apk + DEPENDS ${ARGV0}_BUILD_APK + ) +endfunction() + +function(_fastcg_add_android_executable) + set(REMAINING_ARGS ${ARGN}) + list(REMOVE_AT REMAINING_ARGS 0) + # turn executables into shared libraries + add_library(${ARGV0} SHARED ${REMAINING_ARGS}) + + _fastcg_add_apk_targets(${ARGV0}) +endfunction() + function(fastcg_add_executable) - add_executable(${ARGN}) + if(FASTCG_PLATFORM STREQUAL "Android") + _fastcg_add_android_executable(${ARGN}) + else() + add_executable(${ARGN}) + endif() _fastcg_add_definitions(${ARGV0}) _fastcg_set_target_properties(${ARGV0}) target_link_libraries(${ARGV0} ${FastCG_LIBRARIES}) @@ -152,6 +266,8 @@ function(fastcg_add_library) _fastcg_add_definitions(${ARGV0}) endfunction() +# Custom commands + if(DEFINED FASTCG_EXEC) separate_arguments(FASTCG_EXEC_ARGS) if(FASTCG_EXEC STREQUAL "remove") diff --git a/cmake/fastcg_template_engine.cmake b/cmake/fastcg_template_engine.cmake new file mode 100644 index 00000000..c09a986e --- /dev/null +++ b/cmake/fastcg_template_engine.cmake @@ -0,0 +1,25 @@ +cmake_minimum_required(VERSION 3.10) + +# Usage: +# cmake -P fastcg_template_engine.cmake "path/to/template" "path/to/output" "key1=value1;key2=value2;key3=value3" + +set(template_file ${CMAKE_ARGV3} CACHE FILEPATH "Path to the template file") +set(output_file ${CMAKE_ARGV4} CACHE FILEPATH "Path to the output file") +set(params ${CMAKE_ARGV5} CACHE STRING "Template params as key=value pairs") + +if(template_file STREQUAL "" OR output_file STREQUAL "" OR replacements STREQUAL "") + message(FATAL_ERROR "You must provide template_file, output_file and params") +endif() + +file(READ ${template_file} file_content) + +foreach(param IN LISTS params) + string(REGEX MATCHALL "([^=]+)=([^=]+)" _ ${param}) + set(key ${CMAKE_MATCH_1}) + set(value ${CMAKE_MATCH_2}) + string(REGEX REPLACE "\\$\\{${key}\\}" "${value}" file_content "${file_content}") +endforeach() + +file(WRITE ${output_file} "${file_content}") + +message(STATUS "Template processing completed for: ${output_file}") \ No newline at end of file diff --git a/dependencies/CMakeLists.txt b/dependencies/CMakeLists.txt index 6b6e3228..fd30c57a 100644 --- a/dependencies/CMakeLists.txt +++ b/dependencies/CMakeLists.txt @@ -1,6 +1,9 @@ -cmake_minimum_required(VERSION 3.8) +cmake_minimum_required(VERSION 3.20) -if(FASTCG_GRAPHICS_SYSTEM STREQUAL "OpenGL") +if(FASTCG_PLATFORM STREQUAL "Android") + add_subdirectory(android_native_app_glue) +endif() +if(FASTCG_GRAPHICS_SYSTEM STREQUAL "OpenGL" AND NOT FASTCG_PLATFORM STREQUAL "Android") add_subdirectory(glew) endif() add_subdirectory(glm) diff --git a/dependencies/SPIRV-Cross/CMakeLists.txt b/dependencies/SPIRV-Cross/CMakeLists.txt index 974c1902..01bb13fc 100644 --- a/dependencies/SPIRV-Cross/CMakeLists.txt +++ b/dependencies/SPIRV-Cross/CMakeLists.txt @@ -19,7 +19,7 @@ # 2. The MIT License, found at . # -cmake_minimum_required(VERSION 3.0) +cmake_minimum_required(VERSION 3.20) set(CMAKE_CXX_STANDARD 11) set(CMAKE_CXX_EXTENSIONS OFF) diff --git a/dependencies/SPIRV-Cross/README.md b/dependencies/SPIRV-Cross/README.md index 3a89e6c2..7389dcb5 100644 --- a/dependencies/SPIRV-Cross/README.md +++ b/dependencies/SPIRV-Cross/README.md @@ -227,7 +227,7 @@ $ pkg-config spirv-cross-c-shared --libs --cflags If the project is installed, it can be found with `find_package()`, e.g.: ``` -cmake_minimum_required(VERSION 3.5) +cmake_minimum_required(VERSION 3.20) set(CMAKE_C_STANDARD 99) project(Test LANGUAGES C) diff --git a/dependencies/VulkanMemoryAllocator/CMakeLists.txt b/dependencies/VulkanMemoryAllocator/CMakeLists.txt index b8c582ef..ba8075f4 100644 --- a/dependencies/VulkanMemoryAllocator/CMakeLists.txt +++ b/dependencies/VulkanMemoryAllocator/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.8) +cmake_minimum_required(VERSION 3.20) project(VulkanMemoryAllocator) diff --git a/dependencies/VulkanMemoryAllocator/include/vk_mem_alloc.h b/dependencies/VulkanMemoryAllocator/include/vk_mem_alloc.h index ef46fc5f..76267483 100644 --- a/dependencies/VulkanMemoryAllocator/include/vk_mem_alloc.h +++ b/dependencies/VulkanMemoryAllocator/include/vk_mem_alloc.h @@ -121,25 +121,25 @@ for user-defined purpose without allocating any real GPU memory. See documentation chapter: \ref statistics. */ - #ifdef __cplusplus -extern "C" { +extern "C" +{ #endif #ifndef VULKAN_H_ - #include +#include #endif #if !defined(VMA_VULKAN_VERSION) - #if defined(VK_VERSION_1_3) - #define VMA_VULKAN_VERSION 1003000 - #elif defined(VK_VERSION_1_2) - #define VMA_VULKAN_VERSION 1002000 - #elif defined(VK_VERSION_1_1) - #define VMA_VULKAN_VERSION 1001000 - #else - #define VMA_VULKAN_VERSION 1000000 - #endif +#if defined(VK_VERSION_1_3) +#define VMA_VULKAN_VERSION 1003000 +#elif defined(VK_VERSION_1_2) +#define VMA_VULKAN_VERSION 1002000 +#elif defined(VK_VERSION_1_1) +#define VMA_VULKAN_VERSION 1001000 +#else +#define VMA_VULKAN_VERSION 1000000 +#endif #endif #if defined(__ANDROID__) && defined(VK_NO_PROTOTYPES) && VMA_STATIC_VULKAN_FUNCTIONS @@ -162,64 +162,71 @@ extern "C" { extern PFN_vkCreateImage vkCreateImage; extern PFN_vkDestroyImage vkDestroyImage; extern PFN_vkCmdCopyBuffer vkCmdCopyBuffer; - #if VMA_VULKAN_VERSION >= 1001000 - extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2; - extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2; - extern PFN_vkBindBufferMemory2 vkBindBufferMemory2; - extern PFN_vkBindImageMemory2 vkBindImageMemory2; - extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2; - #endif // #if VMA_VULKAN_VERSION >= 1001000 +#if VMA_VULKAN_VERSION >= 1001000 + extern PFN_vkGetBufferMemoryRequirements2 vkGetBufferMemoryRequirements2; + extern PFN_vkGetImageMemoryRequirements2 vkGetImageMemoryRequirements2; + extern PFN_vkBindBufferMemory2 vkBindBufferMemory2; + extern PFN_vkBindImageMemory2 vkBindImageMemory2; + extern PFN_vkGetPhysicalDeviceMemoryProperties2 vkGetPhysicalDeviceMemoryProperties2; +#endif // #if VMA_VULKAN_VERSION >= 1001000 #endif // #if defined(__ANDROID__) && VMA_STATIC_VULKAN_FUNCTIONS && VK_NO_PROTOTYPES +#if defined(__ANDROID__) +#ifdef VK_ERROR_UNKNOWN +#undef VK_ERROR_UNKNOWN +#endif +#define VK_ERROR_UNKNOWN ((VkResult)-13) +#endif + #if !defined(VMA_DEDICATED_ALLOCATION) - #if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation - #define VMA_DEDICATED_ALLOCATION 1 - #else - #define VMA_DEDICATED_ALLOCATION 0 - #endif +#if VK_KHR_get_memory_requirements2 && VK_KHR_dedicated_allocation +#define VMA_DEDICATED_ALLOCATION 1 +#else +#define VMA_DEDICATED_ALLOCATION 0 +#endif #endif #if !defined(VMA_BIND_MEMORY2) - #if VK_KHR_bind_memory2 - #define VMA_BIND_MEMORY2 1 - #else - #define VMA_BIND_MEMORY2 0 - #endif +#if VK_KHR_bind_memory2 +#define VMA_BIND_MEMORY2 1 +#else +#define VMA_BIND_MEMORY2 0 +#endif #endif #if !defined(VMA_MEMORY_BUDGET) - #if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000) - #define VMA_MEMORY_BUDGET 1 - #else - #define VMA_MEMORY_BUDGET 0 - #endif +#if VK_EXT_memory_budget && (VK_KHR_get_physical_device_properties2 || VMA_VULKAN_VERSION >= 1001000) +#define VMA_MEMORY_BUDGET 1 +#else +#define VMA_MEMORY_BUDGET 0 +#endif #endif // Defined to 1 when VK_KHR_buffer_device_address device extension or equivalent core Vulkan 1.2 feature is defined in its headers. #if !defined(VMA_BUFFER_DEVICE_ADDRESS) - #if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000 - #define VMA_BUFFER_DEVICE_ADDRESS 1 - #else - #define VMA_BUFFER_DEVICE_ADDRESS 0 - #endif +#if VK_KHR_buffer_device_address || VMA_VULKAN_VERSION >= 1002000 +#define VMA_BUFFER_DEVICE_ADDRESS 1 +#else +#define VMA_BUFFER_DEVICE_ADDRESS 0 +#endif #endif // Defined to 1 when VK_EXT_memory_priority device extension is defined in Vulkan headers. #if !defined(VMA_MEMORY_PRIORITY) - #if VK_EXT_memory_priority - #define VMA_MEMORY_PRIORITY 1 - #else - #define VMA_MEMORY_PRIORITY 0 - #endif +#if VK_EXT_memory_priority +#define VMA_MEMORY_PRIORITY 1 +#else +#define VMA_MEMORY_PRIORITY 0 +#endif #endif // Defined to 1 when VK_KHR_external_memory device extension is defined in Vulkan headers. #if !defined(VMA_EXTERNAL_MEMORY) - #if VK_KHR_external_memory - #define VMA_EXTERNAL_MEMORY 1 - #else - #define VMA_EXTERNAL_MEMORY 0 - #endif +#if VK_KHR_external_memory +#define VMA_EXTERNAL_MEMORY 1 +#else +#define VMA_EXTERNAL_MEMORY 0 +#endif #endif // Define these macros to decorate all public functions with additional code, @@ -228,10 +235,10 @@ extern "C" { // #define VMA_CALL_PRE __declspec(dllexport) // #define VMA_CALL_POST __cdecl #ifndef VMA_CALL_PRE - #define VMA_CALL_PRE +#define VMA_CALL_PRE #endif #ifndef VMA_CALL_POST - #define VMA_CALL_POST +#define VMA_CALL_POST #endif // Define this macro to decorate pointers with an attribute specifying the @@ -246,49 +253,49 @@ extern "C" { // this means the number of memory heaps available in the device associated // with the VmaAllocator being dealt with. #ifndef VMA_LEN_IF_NOT_NULL - #define VMA_LEN_IF_NOT_NULL(len) +#define VMA_LEN_IF_NOT_NULL(len) #endif // The VMA_NULLABLE macro is defined to be _Nullable when compiling with Clang. // see: https://clang.llvm.org/docs/AttributeReference.html#nullable #ifndef VMA_NULLABLE - #ifdef __clang__ - #define VMA_NULLABLE _Nullable - #else - #define VMA_NULLABLE - #endif +#ifdef __clang__ +#define VMA_NULLABLE _Nullable +#else +#define VMA_NULLABLE +#endif #endif // The VMA_NOT_NULL macro is defined to be _Nonnull when compiling with Clang. // see: https://clang.llvm.org/docs/AttributeReference.html#nonnull #ifndef VMA_NOT_NULL - #ifdef __clang__ - #define VMA_NOT_NULL _Nonnull - #else - #define VMA_NOT_NULL - #endif +#ifdef __clang__ +#define VMA_NOT_NULL _Nonnull +#else +#define VMA_NOT_NULL +#endif #endif // If non-dispatchable handles are represented as pointers then we can give // then nullability annotations #ifndef VMA_NOT_NULL_NON_DISPATCHABLE - #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) - #define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL - #else - #define VMA_NOT_NULL_NON_DISPATCHABLE - #endif +#if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__)) || defined(_M_X64) || defined(__ia64) || defined(_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) +#define VMA_NOT_NULL_NON_DISPATCHABLE VMA_NOT_NULL +#else +#define VMA_NOT_NULL_NON_DISPATCHABLE +#endif #endif #ifndef VMA_NULLABLE_NON_DISPATCHABLE - #if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__) ) || defined(_M_X64) || defined(__ia64) || defined (_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) - #define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE - #else - #define VMA_NULLABLE_NON_DISPATCHABLE - #endif +#if defined(__LP64__) || defined(_WIN64) || (defined(__x86_64__) && !defined(__ILP32__)) || defined(_M_X64) || defined(__ia64) || defined(_M_IA64) || defined(__aarch64__) || defined(__powerpc64__) +#define VMA_NULLABLE_NON_DISPATCHABLE VMA_NULLABLE +#else +#define VMA_NULLABLE_NON_DISPATCHABLE +#endif #endif #ifndef VMA_STATS_STRING_ENABLED - #define VMA_STATS_STRING_ENABLED 1 +#define VMA_STATS_STRING_ENABLED 1 #endif //////////////////////////////////////////////////////////////////////////////// @@ -302,2278 +309,2278 @@ extern "C" { // Sections for managing code placement in file, only for development purposes e.g. for convenient folding inside an IDE. #ifndef _VMA_ENUM_DECLARATIONS -/** -\addtogroup group_init -@{ -*/ - -/// Flags for created #VmaAllocator. -typedef enum VmaAllocatorCreateFlagBits -{ - /** \brief Allocator and all objects created from it will not be synchronized internally, so you must guarantee they are used from only one thread at a time or synchronized externally by you. - - Using this flag may increase performance because internal mutexes are not used. + /** + \addtogroup group_init + @{ */ - VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001, - /** \brief Enables usage of VK_KHR_dedicated_allocation extension. - - The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`. - When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1. - - Using this extension will automatically allocate dedicated blocks of memory for - some buffers and images instead of suballocating place for them out of bigger - memory blocks (as if you explicitly used #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT - flag) when it is recommended by the driver. It may improve performance on some - GPUs. - You may set this flag only if you found out that following device extensions are - supported, you enabled them while creating Vulkan device passed as - VmaAllocatorCreateInfo::device, and you want them to be used internally by this - library: + /// Flags for created #VmaAllocator. + typedef enum VmaAllocatorCreateFlagBits + { + /** \brief Allocator and all objects created from it will not be synchronized internally, so you must guarantee they are used from only one thread at a time or synchronized externally by you. - - VK_KHR_get_memory_requirements2 (device extension) - - VK_KHR_dedicated_allocation (device extension) + Using this flag may increase performance because internal mutexes are not used. + */ + VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT = 0x00000001, + /** \brief Enables usage of VK_KHR_dedicated_allocation extension. - When this flag is set, you can experience following warnings reported by Vulkan - validation layer. You can ignore them. + The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`. + When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1. - > vkBindBufferMemory(): Binding memory to buffer 0x2d but vkGetBufferMemoryRequirements() has not been called on that buffer. - */ - VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT = 0x00000002, - /** - Enables usage of VK_KHR_bind_memory2 extension. + Using this extension will automatically allocate dedicated blocks of memory for + some buffers and images instead of suballocating place for them out of bigger + memory blocks (as if you explicitly used #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT + flag) when it is recommended by the driver. It may improve performance on some + GPUs. - The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`. - When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1. + You may set this flag only if you found out that following device extensions are + supported, you enabled them while creating Vulkan device passed as + VmaAllocatorCreateInfo::device, and you want them to be used internally by this + library: - You may set this flag only if you found out that this device extension is supported, - you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, - and you want it to be used internally by this library. + - VK_KHR_get_memory_requirements2 (device extension) + - VK_KHR_dedicated_allocation (device extension) - The extension provides functions `vkBindBufferMemory2KHR` and `vkBindImageMemory2KHR`, - which allow to pass a chain of `pNext` structures while binding. - This flag is required if you use `pNext` parameter in vmaBindBufferMemory2() or vmaBindImageMemory2(). - */ - VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT = 0x00000004, - /** - Enables usage of VK_EXT_memory_budget extension. + When this flag is set, you can experience following warnings reported by Vulkan + validation layer. You can ignore them. - You may set this flag only if you found out that this device extension is supported, - you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, - and you want it to be used internally by this library, along with another instance extension - VK_KHR_get_physical_device_properties2, which is required by it (or Vulkan 1.1, where this extension is promoted). + > vkBindBufferMemory(): Binding memory to buffer 0x2d but vkGetBufferMemoryRequirements() has not been called on that buffer. + */ + VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT = 0x00000002, + /** + Enables usage of VK_KHR_bind_memory2 extension. - The extension provides query for current memory usage and budget, which will probably - be more accurate than an estimation used by the library otherwise. - */ - VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT = 0x00000008, - /** - Enables usage of VK_AMD_device_coherent_memory extension. + The flag works only if VmaAllocatorCreateInfo::vulkanApiVersion `== VK_API_VERSION_1_0`. + When it is `VK_API_VERSION_1_1`, the flag is ignored because the extension has been promoted to Vulkan 1.1. - You may set this flag only if you: + You may set this flag only if you found out that this device extension is supported, + you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, + and you want it to be used internally by this library. - - found out that this device extension is supported and enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, - - checked that `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true and set it while creating the Vulkan device, - - want it to be used internally by this library. + The extension provides functions `vkBindBufferMemory2KHR` and `vkBindImageMemory2KHR`, + which allow to pass a chain of `pNext` structures while binding. + This flag is required if you use `pNext` parameter in vmaBindBufferMemory2() or vmaBindImageMemory2(). + */ + VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT = 0x00000004, + /** + Enables usage of VK_EXT_memory_budget extension. - The extension and accompanying device feature provide access to memory types with - `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flags. - They are useful mostly for writing breadcrumb markers - a common method for debugging GPU crash/hang/TDR. + You may set this flag only if you found out that this device extension is supported, + you enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, + and you want it to be used internally by this library, along with another instance extension + VK_KHR_get_physical_device_properties2, which is required by it (or Vulkan 1.1, where this extension is promoted). - When the extension is not enabled, such memory types are still enumerated, but their usage is illegal. - To protect from this error, if you don't create the allocator with this flag, it will refuse to allocate any memory or create a custom pool in such memory type, - returning `VK_ERROR_FEATURE_NOT_PRESENT`. - */ - VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT = 0x00000010, - /** - Enables usage of "buffer device address" feature, which allows you to use function - `vkGetBufferDeviceAddress*` to get raw GPU pointer to a buffer and pass it for usage inside a shader. + The extension provides query for current memory usage and budget, which will probably + be more accurate than an estimation used by the library otherwise. + */ + VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT = 0x00000008, + /** + Enables usage of VK_AMD_device_coherent_memory extension. - You may set this flag only if you: + You may set this flag only if you: - 1. (For Vulkan version < 1.2) Found as available and enabled device extension - VK_KHR_buffer_device_address. - This extension is promoted to core Vulkan 1.2. - 2. Found as available and enabled device feature `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress`. + - found out that this device extension is supported and enabled it while creating Vulkan device passed as VmaAllocatorCreateInfo::device, + - checked that `VkPhysicalDeviceCoherentMemoryFeaturesAMD::deviceCoherentMemory` is true and set it while creating the Vulkan device, + - want it to be used internally by this library. - When this flag is set, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT` using VMA. - The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT` to - allocated memory blocks wherever it might be needed. + The extension and accompanying device feature provide access to memory types with + `VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD` and `VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD` flags. + They are useful mostly for writing breadcrumb markers - a common method for debugging GPU crash/hang/TDR. - For more information, see documentation chapter \ref enabling_buffer_device_address. - */ - VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT = 0x00000020, - /** - Enables usage of VK_EXT_memory_priority extension in the library. + When the extension is not enabled, such memory types are still enumerated, but their usage is illegal. + To protect from this error, if you don't create the allocator with this flag, it will refuse to allocate any memory or create a custom pool in such memory type, + returning `VK_ERROR_FEATURE_NOT_PRESENT`. + */ + VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT = 0x00000010, + /** + Enables usage of "buffer device address" feature, which allows you to use function + `vkGetBufferDeviceAddress*` to get raw GPU pointer to a buffer and pass it for usage inside a shader. - You may set this flag only if you found available and enabled this device extension, - along with `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority == VK_TRUE`, - while creating Vulkan device passed as VmaAllocatorCreateInfo::device. + You may set this flag only if you: - When this flag is used, VmaAllocationCreateInfo::priority and VmaPoolCreateInfo::priority - are used to set priorities of allocated Vulkan memory. Without it, these variables are ignored. + 1. (For Vulkan version < 1.2) Found as available and enabled device extension + VK_KHR_buffer_device_address. + This extension is promoted to core Vulkan 1.2. + 2. Found as available and enabled device feature `VkPhysicalDeviceBufferDeviceAddressFeatures::bufferDeviceAddress`. - A priority must be a floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations. - Larger values are higher priority. The granularity of the priorities is implementation-dependent. - It is automatically passed to every call to `vkAllocateMemory` done by the library using structure `VkMemoryPriorityAllocateInfoEXT`. - The value to be used for default priority is 0.5. - For more details, see the documentation of the VK_EXT_memory_priority extension. - */ - VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT = 0x00000040, + When this flag is set, you can create buffers with `VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT` using VMA. + The library automatically adds `VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT` to + allocated memory blocks wherever it might be needed. - VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF -} VmaAllocatorCreateFlagBits; -/// See #VmaAllocatorCreateFlagBits. -typedef VkFlags VmaAllocatorCreateFlags; + For more information, see documentation chapter \ref enabling_buffer_device_address. + */ + VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT = 0x00000020, + /** + Enables usage of VK_EXT_memory_priority extension in the library. + + You may set this flag only if you found available and enabled this device extension, + along with `VkPhysicalDeviceMemoryPriorityFeaturesEXT::memoryPriority == VK_TRUE`, + while creating Vulkan device passed as VmaAllocatorCreateInfo::device. + + When this flag is used, VmaAllocationCreateInfo::priority and VmaPoolCreateInfo::priority + are used to set priorities of allocated Vulkan memory. Without it, these variables are ignored. + + A priority must be a floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations. + Larger values are higher priority. The granularity of the priorities is implementation-dependent. + It is automatically passed to every call to `vkAllocateMemory` done by the library using structure `VkMemoryPriorityAllocateInfoEXT`. + The value to be used for default priority is 0.5. + For more details, see the documentation of the VK_EXT_memory_priority extension. + */ + VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT = 0x00000040, -/** @} */ + VMA_ALLOCATOR_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF + } VmaAllocatorCreateFlagBits; + /// See #VmaAllocatorCreateFlagBits. + typedef VkFlags VmaAllocatorCreateFlags; -/** -\addtogroup group_alloc -@{ -*/ + /** @} */ -/// \brief Intended usage of the allocated memory. -typedef enum VmaMemoryUsage -{ - /** No intended memory usage specified. - Use other members of VmaAllocationCreateInfo to specify your requirements. - */ - VMA_MEMORY_USAGE_UNKNOWN = 0, - /** - \deprecated Obsolete, preserved for backward compatibility. - Prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. - */ - VMA_MEMORY_USAGE_GPU_ONLY = 1, - /** - \deprecated Obsolete, preserved for backward compatibility. - Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` and `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT`. - */ - VMA_MEMORY_USAGE_CPU_ONLY = 2, - /** - \deprecated Obsolete, preserved for backward compatibility. - Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. - */ - VMA_MEMORY_USAGE_CPU_TO_GPU = 3, - /** - \deprecated Obsolete, preserved for backward compatibility. - Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`. - */ - VMA_MEMORY_USAGE_GPU_TO_CPU = 4, /** - \deprecated Obsolete, preserved for backward compatibility. - Prefers not `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. + \addtogroup group_alloc + @{ */ - VMA_MEMORY_USAGE_CPU_COPY = 5, - /** - Lazily allocated GPU memory having `VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT`. - Exists mostly on mobile platforms. Using it on desktop PC or other GPUs with no such memory type present will fail the allocation. - Usage: Memory for transient attachment images (color attachments, depth attachments etc.), created with `VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT`. - - Allocations with this usage are always created as dedicated - it implies #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. - */ - VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED = 6, - /** - Selects best memory type automatically. - This flag is recommended for most common use cases. + /// \brief Intended usage of the allocated memory. + typedef enum VmaMemoryUsage + { + /** No intended memory usage specified. + Use other members of VmaAllocationCreateInfo to specify your requirements. + */ + VMA_MEMORY_USAGE_UNKNOWN = 0, + /** + \deprecated Obsolete, preserved for backward compatibility. + Prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. + */ + VMA_MEMORY_USAGE_GPU_ONLY = 1, + /** + \deprecated Obsolete, preserved for backward compatibility. + Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT` and `VK_MEMORY_PROPERTY_HOST_COHERENT_BIT`. + */ + VMA_MEMORY_USAGE_CPU_ONLY = 2, + /** + \deprecated Obsolete, preserved for backward compatibility. + Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. + */ + VMA_MEMORY_USAGE_CPU_TO_GPU = 3, + /** + \deprecated Obsolete, preserved for backward compatibility. + Guarantees `VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT`, prefers `VK_MEMORY_PROPERTY_HOST_CACHED_BIT`. + */ + VMA_MEMORY_USAGE_GPU_TO_CPU = 4, + /** + \deprecated Obsolete, preserved for backward compatibility. + Prefers not `VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT`. + */ + VMA_MEMORY_USAGE_CPU_COPY = 5, + /** + Lazily allocated GPU memory having `VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT`. + Exists mostly on mobile platforms. Using it on desktop PC or other GPUs with no such memory type present will fail the allocation. - When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), - you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT - in VmaAllocationCreateInfo::flags. + Usage: Memory for transient attachment images (color attachments, depth attachments etc.), created with `VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT`. - It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. - vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() - and not with generic memory allocation functions. - */ - VMA_MEMORY_USAGE_AUTO = 7, - /** - Selects best memory type automatically with preference for GPU (device) memory. + Allocations with this usage are always created as dedicated - it implies #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. + */ + VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED = 6, + /** + Selects best memory type automatically. + This flag is recommended for most common use cases. + + When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), + you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT + in VmaAllocationCreateInfo::flags. + + It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. + vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() + and not with generic memory allocation functions. + */ + VMA_MEMORY_USAGE_AUTO = 7, + /** + Selects best memory type automatically with preference for GPU (device) memory. - When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), - you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT - in VmaAllocationCreateInfo::flags. + When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), + you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT + in VmaAllocationCreateInfo::flags. - It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. - vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() - and not with generic memory allocation functions. - */ - VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE = 8, - /** - Selects best memory type automatically with preference for CPU (host) memory. + It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. + vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() + and not with generic memory allocation functions. + */ + VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE = 8, + /** + Selects best memory type automatically with preference for CPU (host) memory. - When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), - you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT - in VmaAllocationCreateInfo::flags. + When using this flag, if you want to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT), + you must pass one of the flags: #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT + in VmaAllocationCreateInfo::flags. - It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. - vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() - and not with generic memory allocation functions. - */ - VMA_MEMORY_USAGE_AUTO_PREFER_HOST = 9, + It can be used only with functions that let the library know `VkBufferCreateInfo` or `VkImageCreateInfo`, e.g. + vmaCreateBuffer(), vmaCreateImage(), vmaFindMemoryTypeIndexForBufferInfo(), vmaFindMemoryTypeIndexForImageInfo() + and not with generic memory allocation functions. + */ + VMA_MEMORY_USAGE_AUTO_PREFER_HOST = 9, - VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF -} VmaMemoryUsage; + VMA_MEMORY_USAGE_MAX_ENUM = 0x7FFFFFFF + } VmaMemoryUsage; -/// Flags to be passed as VmaAllocationCreateInfo::flags. -typedef enum VmaAllocationCreateFlagBits -{ - /** \brief Set this flag if the allocation should have its own memory block. + /// Flags to be passed as VmaAllocationCreateInfo::flags. + typedef enum VmaAllocationCreateFlagBits + { + /** \brief Set this flag if the allocation should have its own memory block. - Use it for special, big resources, like fullscreen images used as attachments. - */ - VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT = 0x00000001, + Use it for special, big resources, like fullscreen images used as attachments. + */ + VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT = 0x00000001, - /** \brief Set this flag to only try to allocate from existing `VkDeviceMemory` blocks and never create new such block. + /** \brief Set this flag to only try to allocate from existing `VkDeviceMemory` blocks and never create new such block. - If new allocation cannot be placed in any of the existing blocks, allocation - fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY` error. + If new allocation cannot be placed in any of the existing blocks, allocation + fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY` error. - You should not use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and - #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense. - */ - VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT = 0x00000002, - /** \brief Set this flag to use a memory that will be persistently mapped and retrieve pointer to it. + You should not use #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT and + #VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT at the same time. It makes no sense. + */ + VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT = 0x00000002, + /** \brief Set this flag to use a memory that will be persistently mapped and retrieve pointer to it. - Pointer to mapped memory will be returned through VmaAllocationInfo::pMappedData. + Pointer to mapped memory will be returned through VmaAllocationInfo::pMappedData. - It is valid to use this flag for allocation made from memory type that is not - `HOST_VISIBLE`. This flag is then ignored and memory is not mapped. This is - useful if you need an allocation that is efficient to use on GPU - (`DEVICE_LOCAL`) and still want to map it directly if possible on platforms that - support it (e.g. Intel GPU). - */ - VMA_ALLOCATION_CREATE_MAPPED_BIT = 0x00000004, - /** \deprecated Preserved for backward compatibility. Consider using vmaSetAllocationName() instead. + It is valid to use this flag for allocation made from memory type that is not + `HOST_VISIBLE`. This flag is then ignored and memory is not mapped. This is + useful if you need an allocation that is efficient to use on GPU + (`DEVICE_LOCAL`) and still want to map it directly if possible on platforms that + support it (e.g. Intel GPU). + */ + VMA_ALLOCATION_CREATE_MAPPED_BIT = 0x00000004, + /** \deprecated Preserved for backward compatibility. Consider using vmaSetAllocationName() instead. - Set this flag to treat VmaAllocationCreateInfo::pUserData as pointer to a - null-terminated string. Instead of copying pointer value, a local copy of the - string is made and stored in allocation's `pName`. The string is automatically - freed together with the allocation. It is also used in vmaBuildStatsString(). - */ - VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT = 0x00000020, - /** Allocation will be created from upper stack in a double stack pool. + Set this flag to treat VmaAllocationCreateInfo::pUserData as pointer to a + null-terminated string. Instead of copying pointer value, a local copy of the + string is made and stored in allocation's `pName`. The string is automatically + freed together with the allocation. It is also used in vmaBuildStatsString(). + */ + VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT = 0x00000020, + /** Allocation will be created from upper stack in a double stack pool. - This flag is only allowed for custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT flag. - */ - VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = 0x00000040, - /** Create both buffer/image and allocation, but don't bind them together. - It is useful when you want to bind yourself to do some more advanced binding, e.g. using some extensions. - The flag is meaningful only with functions that bind by default: vmaCreateBuffer(), vmaCreateImage(). - Otherwise it is ignored. - - If you want to make sure the new buffer/image is not tied to the new memory allocation - through `VkMemoryDedicatedAllocateInfoKHR` structure in case the allocation ends up in its own memory block, - use also flag #VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT. - */ - VMA_ALLOCATION_CREATE_DONT_BIND_BIT = 0x00000080, - /** Create allocation only if additional device memory required for it, if any, won't exceed - memory budget. Otherwise return `VK_ERROR_OUT_OF_DEVICE_MEMORY`. - */ - VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT = 0x00000100, - /** \brief Set this flag if the allocated memory will have aliasing resources. + This flag is only allowed for custom pools created with #VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT flag. + */ + VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = 0x00000040, + /** Create both buffer/image and allocation, but don't bind them together. + It is useful when you want to bind yourself to do some more advanced binding, e.g. using some extensions. + The flag is meaningful only with functions that bind by default: vmaCreateBuffer(), vmaCreateImage(). + Otherwise it is ignored. + + If you want to make sure the new buffer/image is not tied to the new memory allocation + through `VkMemoryDedicatedAllocateInfoKHR` structure in case the allocation ends up in its own memory block, + use also flag #VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT. + */ + VMA_ALLOCATION_CREATE_DONT_BIND_BIT = 0x00000080, + /** Create allocation only if additional device memory required for it, if any, won't exceed + memory budget. Otherwise return `VK_ERROR_OUT_OF_DEVICE_MEMORY`. + */ + VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT = 0x00000100, + /** \brief Set this flag if the allocated memory will have aliasing resources. - Usage of this flag prevents supplying `VkMemoryDedicatedAllocateInfoKHR` when #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT is specified. - Otherwise created dedicated memory will not be suitable for aliasing resources, resulting in Vulkan Validation Layer errors. - */ - VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT = 0x00000200, - /** - Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT). + Usage of this flag prevents supplying `VkMemoryDedicatedAllocateInfoKHR` when #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT is specified. + Otherwise created dedicated memory will not be suitable for aliasing resources, resulting in Vulkan Validation Layer errors. + */ + VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT = 0x00000200, + /** + Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT). - - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value, - you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect. - - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`. - This includes allocations created in \ref custom_memory_pools. + - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value, + you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect. + - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`. + This includes allocations created in \ref custom_memory_pools. - Declares that mapped memory will only be written sequentially, e.g. using `memcpy()` or a loop writing number-by-number, - never read or accessed randomly, so a memory type can be selected that is uncached and write-combined. + Declares that mapped memory will only be written sequentially, e.g. using `memcpy()` or a loop writing number-by-number, + never read or accessed randomly, so a memory type can be selected that is uncached and write-combined. - \warning Violating this declaration may work correctly, but will likely be very slow. - Watch out for implicit reads introduced by doing e.g. `pMappedData[i] += x;` - Better prepare your data in a local variable and `memcpy()` it to the mapped pointer all at once. - */ - VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT = 0x00000400, - /** - Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT). + \warning Violating this declaration may work correctly, but will likely be very slow. + Watch out for implicit reads introduced by doing e.g. `pMappedData[i] += x;` + Better prepare your data in a local variable and `memcpy()` it to the mapped pointer all at once. + */ + VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT = 0x00000400, + /** + Requests possibility to map the allocation (using vmaMapMemory() or #VMA_ALLOCATION_CREATE_MAPPED_BIT). - - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value, - you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect. - - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`. - This includes allocations created in \ref custom_memory_pools. + - If you use #VMA_MEMORY_USAGE_AUTO or other `VMA_MEMORY_USAGE_AUTO*` value, + you must use this flag to be able to map the allocation. Otherwise, mapping is incorrect. + - If you use other value of #VmaMemoryUsage, this flag is ignored and mapping is always possible in memory types that are `HOST_VISIBLE`. + This includes allocations created in \ref custom_memory_pools. - Declares that mapped memory can be read, written, and accessed in random order, - so a `HOST_CACHED` memory type is required. - */ - VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT = 0x00000800, - /** - Together with #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT, - it says that despite request for host access, a not-`HOST_VISIBLE` memory type can be selected - if it may improve performance. - - By using this flag, you declare that you will check if the allocation ended up in a `HOST_VISIBLE` memory type - (e.g. using vmaGetAllocationMemoryProperties()) and if not, you will create some "staging" buffer and - issue an explicit transfer to write/read your data. - To prepare for this possibility, don't forget to add appropriate flags like - `VK_BUFFER_USAGE_TRANSFER_DST_BIT`, `VK_BUFFER_USAGE_TRANSFER_SRC_BIT` to the parameters of created buffer or image. - */ - VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT = 0x00001000, - /** Allocation strategy that chooses smallest possible free range for the allocation - to minimize memory usage and fragmentation, possibly at the expense of allocation time. - */ - VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = 0x00010000, - /** Allocation strategy that chooses first suitable free range for the allocation - - not necessarily in terms of the smallest offset but the one that is easiest and fastest to find - to minimize allocation time, possibly at the expense of allocation quality. - */ - VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = 0x00020000, - /** Allocation strategy that chooses always the lowest offset in available space. - This is not the most efficient strategy but achieves highly packed data. - Used internally by defragmentation, not recommended in typical usage. - */ - VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = 0x00040000, - /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT. - */ - VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT, - /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT. - */ - VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT, - /** A bit mask to extract only `STRATEGY` bits from entire set of flags. - */ - VMA_ALLOCATION_CREATE_STRATEGY_MASK = - VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT | - VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT | - VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, - - VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF -} VmaAllocationCreateFlagBits; -/// See #VmaAllocationCreateFlagBits. -typedef VkFlags VmaAllocationCreateFlags; - -/// Flags to be passed as VmaPoolCreateInfo::flags. -typedef enum VmaPoolCreateFlagBits -{ - /** \brief Use this flag if you always allocate only buffers and linear images or only optimal images out of this pool and so Buffer-Image Granularity can be ignored. - - This is an optional optimization flag. - - If you always allocate using vmaCreateBuffer(), vmaCreateImage(), - vmaAllocateMemoryForBuffer(), then you don't need to use it because allocator - knows exact type of your allocations so it can handle Buffer-Image Granularity - in the optimal way. - - If you also allocate using vmaAllocateMemoryForImage() or vmaAllocateMemory(), - exact type of such allocations is not known, so allocator must be conservative - in handling Buffer-Image Granularity, which can lead to suboptimal allocation - (wasted memory). In that case, if you can make sure you always allocate only - buffers and linear images or only optimal images out of this pool, use this flag - to make allocator disregard Buffer-Image Granularity and so make allocations - faster and more optimal. - */ - VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT = 0x00000002, + Declares that mapped memory can be read, written, and accessed in random order, + so a `HOST_CACHED` memory type is required. + */ + VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT = 0x00000800, + /** + Together with #VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or #VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT, + it says that despite request for host access, a not-`HOST_VISIBLE` memory type can be selected + if it may improve performance. + + By using this flag, you declare that you will check if the allocation ended up in a `HOST_VISIBLE` memory type + (e.g. using vmaGetAllocationMemoryProperties()) and if not, you will create some "staging" buffer and + issue an explicit transfer to write/read your data. + To prepare for this possibility, don't forget to add appropriate flags like + `VK_BUFFER_USAGE_TRANSFER_DST_BIT`, `VK_BUFFER_USAGE_TRANSFER_SRC_BIT` to the parameters of created buffer or image. + */ + VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT = 0x00001000, + /** Allocation strategy that chooses smallest possible free range for the allocation + to minimize memory usage and fragmentation, possibly at the expense of allocation time. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = 0x00010000, + /** Allocation strategy that chooses first suitable free range for the allocation - + not necessarily in terms of the smallest offset but the one that is easiest and fastest to find + to minimize allocation time, possibly at the expense of allocation quality. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = 0x00020000, + /** Allocation strategy that chooses always the lowest offset in available space. + This is not the most efficient strategy but achieves highly packed data. + Used internally by defragmentation, not recommended in typical usage. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = 0x00040000, + /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT. + */ + VMA_ALLOCATION_CREATE_STRATEGY_BEST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT, + /** Alias to #VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT. + */ + VMA_ALLOCATION_CREATE_STRATEGY_FIRST_FIT_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT, + /** A bit mask to extract only `STRATEGY` bits from entire set of flags. + */ + VMA_ALLOCATION_CREATE_STRATEGY_MASK = + VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT | + VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT | + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + + VMA_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF + } VmaAllocationCreateFlagBits; + /// See #VmaAllocationCreateFlagBits. + typedef VkFlags VmaAllocationCreateFlags; + + /// Flags to be passed as VmaPoolCreateInfo::flags. + typedef enum VmaPoolCreateFlagBits + { + /** \brief Use this flag if you always allocate only buffers and linear images or only optimal images out of this pool and so Buffer-Image Granularity can be ignored. + + This is an optional optimization flag. + + If you always allocate using vmaCreateBuffer(), vmaCreateImage(), + vmaAllocateMemoryForBuffer(), then you don't need to use it because allocator + knows exact type of your allocations so it can handle Buffer-Image Granularity + in the optimal way. + + If you also allocate using vmaAllocateMemoryForImage() or vmaAllocateMemory(), + exact type of such allocations is not known, so allocator must be conservative + in handling Buffer-Image Granularity, which can lead to suboptimal allocation + (wasted memory). In that case, if you can make sure you always allocate only + buffers and linear images or only optimal images out of this pool, use this flag + to make allocator disregard Buffer-Image Granularity and so make allocations + faster and more optimal. + */ + VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT = 0x00000002, - /** \brief Enables alternative, linear allocation algorithm in this pool. + /** \brief Enables alternative, linear allocation algorithm in this pool. - Specify this flag to enable linear allocation algorithm, which always creates - new allocations after last one and doesn't reuse space from allocations freed in - between. It trades memory consumption for simplified algorithm and data - structure, which has better performance and uses less memory for metadata. + Specify this flag to enable linear allocation algorithm, which always creates + new allocations after last one and doesn't reuse space from allocations freed in + between. It trades memory consumption for simplified algorithm and data + structure, which has better performance and uses less memory for metadata. - By using this flag, you can achieve behavior of free-at-once, stack, - ring buffer, and double stack. - For details, see documentation chapter \ref linear_algorithm. - */ - VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT = 0x00000004, + By using this flag, you can achieve behavior of free-at-once, stack, + ring buffer, and double stack. + For details, see documentation chapter \ref linear_algorithm. + */ + VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT = 0x00000004, - /** Bit mask to extract only `ALGORITHM` bits from entire set of flags. - */ - VMA_POOL_CREATE_ALGORITHM_MASK = - VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT, + /** Bit mask to extract only `ALGORITHM` bits from entire set of flags. + */ + VMA_POOL_CREATE_ALGORITHM_MASK = + VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT, - VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF -} VmaPoolCreateFlagBits; -/// Flags to be passed as VmaPoolCreateInfo::flags. See #VmaPoolCreateFlagBits. -typedef VkFlags VmaPoolCreateFlags; + VMA_POOL_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF + } VmaPoolCreateFlagBits; + /// Flags to be passed as VmaPoolCreateInfo::flags. See #VmaPoolCreateFlagBits. + typedef VkFlags VmaPoolCreateFlags; -/// Flags to be passed as VmaDefragmentationInfo::flags. -typedef enum VmaDefragmentationFlagBits -{ - /* \brief Use simple but fast algorithm for defragmentation. - May not achieve best results but will require least time to compute and least allocations to copy. - */ - VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT = 0x1, - /* \brief Default defragmentation algorithm, applied also when no `ALGORITHM` flag is specified. - Offers a balance between defragmentation quality and the amount of allocations and bytes that need to be moved. - */ - VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT = 0x2, - /* \brief Perform full defragmentation of memory. - Can result in notably more time to compute and allocations to copy, but will achieve best memory packing. - */ - VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT = 0x4, - /** \brief Use the most roboust algorithm at the cost of time to compute and number of copies to make. - Only available when bufferImageGranularity is greater than 1, since it aims to reduce - alignment issues between different types of resources. - Otherwise falls back to same behavior as #VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT. - */ - VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT = 0x8, + /// Flags to be passed as VmaDefragmentationInfo::flags. + typedef enum VmaDefragmentationFlagBits + { + /* \brief Use simple but fast algorithm for defragmentation. + May not achieve best results but will require least time to compute and least allocations to copy. + */ + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT = 0x1, + /* \brief Default defragmentation algorithm, applied also when no `ALGORITHM` flag is specified. + Offers a balance between defragmentation quality and the amount of allocations and bytes that need to be moved. + */ + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT = 0x2, + /* \brief Perform full defragmentation of memory. + Can result in notably more time to compute and allocations to copy, but will achieve best memory packing. + */ + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT = 0x4, + /** \brief Use the most roboust algorithm at the cost of time to compute and number of copies to make. + Only available when bufferImageGranularity is greater than 1, since it aims to reduce + alignment issues between different types of resources. + Otherwise falls back to same behavior as #VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT. + */ + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT = 0x8, - /// A bit mask to extract only `ALGORITHM` bits from entire set of flags. - VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK = - VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT | - VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT | - VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT | - VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT, + /// A bit mask to extract only `ALGORITHM` bits from entire set of flags. + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK = + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FAST_BIT | + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT | + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_FULL_BIT | + VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT, - VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF -} VmaDefragmentationFlagBits; -/// See #VmaDefragmentationFlagBits. -typedef VkFlags VmaDefragmentationFlags; + VMA_DEFRAGMENTATION_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF + } VmaDefragmentationFlagBits; + /// See #VmaDefragmentationFlagBits. + typedef VkFlags VmaDefragmentationFlags; -/// Operation performed on single defragmentation move. See structure #VmaDefragmentationMove. -typedef enum VmaDefragmentationMoveOperation -{ - /// Buffer/image has been recreated at `dstTmpAllocation`, data has been copied, old buffer/image has been destroyed. `srcAllocation` should be changed to point to the new place. This is the default value set by vmaBeginDefragmentationPass(). - VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY = 0, - /// Set this value if you cannot move the allocation. New place reserved at `dstTmpAllocation` will be freed. `srcAllocation` will remain unchanged. - VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE = 1, - /// Set this value if you decide to abandon the allocation and you destroyed the buffer/image. New place reserved at `dstTmpAllocation` will be freed, along with `srcAllocation`, which will be destroyed. - VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY = 2, -} VmaDefragmentationMoveOperation; + /// Operation performed on single defragmentation move. See structure #VmaDefragmentationMove. + typedef enum VmaDefragmentationMoveOperation + { + /// Buffer/image has been recreated at `dstTmpAllocation`, data has been copied, old buffer/image has been destroyed. `srcAllocation` should be changed to point to the new place. This is the default value set by vmaBeginDefragmentationPass(). + VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY = 0, + /// Set this value if you cannot move the allocation. New place reserved at `dstTmpAllocation` will be freed. `srcAllocation` will remain unchanged. + VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE = 1, + /// Set this value if you decide to abandon the allocation and you destroyed the buffer/image. New place reserved at `dstTmpAllocation` will be freed, along with `srcAllocation`, which will be destroyed. + VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY = 2, + } VmaDefragmentationMoveOperation; -/** @} */ + /** @} */ -/** -\addtogroup group_virtual -@{ -*/ + /** + \addtogroup group_virtual + @{ + */ -/// Flags to be passed as VmaVirtualBlockCreateInfo::flags. -typedef enum VmaVirtualBlockCreateFlagBits -{ - /** \brief Enables alternative, linear allocation algorithm in this virtual block. + /// Flags to be passed as VmaVirtualBlockCreateInfo::flags. + typedef enum VmaVirtualBlockCreateFlagBits + { + /** \brief Enables alternative, linear allocation algorithm in this virtual block. - Specify this flag to enable linear allocation algorithm, which always creates - new allocations after last one and doesn't reuse space from allocations freed in - between. It trades memory consumption for simplified algorithm and data - structure, which has better performance and uses less memory for metadata. + Specify this flag to enable linear allocation algorithm, which always creates + new allocations after last one and doesn't reuse space from allocations freed in + between. It trades memory consumption for simplified algorithm and data + structure, which has better performance and uses less memory for metadata. - By using this flag, you can achieve behavior of free-at-once, stack, - ring buffer, and double stack. - For details, see documentation chapter \ref linear_algorithm. - */ - VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT = 0x00000001, + By using this flag, you can achieve behavior of free-at-once, stack, + ring buffer, and double stack. + For details, see documentation chapter \ref linear_algorithm. + */ + VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT = 0x00000001, - /** \brief Bit mask to extract only `ALGORITHM` bits from entire set of flags. - */ - VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK = - VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT, + /** \brief Bit mask to extract only `ALGORITHM` bits from entire set of flags. + */ + VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK = + VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT, - VMA_VIRTUAL_BLOCK_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF -} VmaVirtualBlockCreateFlagBits; -/// Flags to be passed as VmaVirtualBlockCreateInfo::flags. See #VmaVirtualBlockCreateFlagBits. -typedef VkFlags VmaVirtualBlockCreateFlags; + VMA_VIRTUAL_BLOCK_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF + } VmaVirtualBlockCreateFlagBits; + /// Flags to be passed as VmaVirtualBlockCreateInfo::flags. See #VmaVirtualBlockCreateFlagBits. + typedef VkFlags VmaVirtualBlockCreateFlags; -/// Flags to be passed as VmaVirtualAllocationCreateInfo::flags. -typedef enum VmaVirtualAllocationCreateFlagBits -{ - /** \brief Allocation will be created from upper stack in a double stack pool. + /// Flags to be passed as VmaVirtualAllocationCreateInfo::flags. + typedef enum VmaVirtualAllocationCreateFlagBits + { + /** \brief Allocation will be created from upper stack in a double stack pool. - This flag is only allowed for virtual blocks created with #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT flag. - */ - VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT, - /** \brief Allocation strategy that tries to minimize memory usage. - */ - VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT, - /** \brief Allocation strategy that tries to minimize allocation time. - */ - VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT, - /** Allocation strategy that chooses always the lowest offset in available space. - This is not the most efficient strategy but achieves highly packed data. - */ - VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, - /** \brief A bit mask to extract only `STRATEGY` bits from entire set of flags. + This flag is only allowed for virtual blocks created with #VMA_VIRTUAL_BLOCK_CREATE_LINEAR_ALGORITHM_BIT flag. + */ + VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT = VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT, + /** \brief Allocation strategy that tries to minimize memory usage. + */ + VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_MEMORY_BIT, + /** \brief Allocation strategy that tries to minimize allocation time. + */ + VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT, + /** Allocation strategy that chooses always the lowest offset in available space. + This is not the most efficient strategy but achieves highly packed data. + */ + VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT = VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + /** \brief A bit mask to extract only `STRATEGY` bits from entire set of flags. - These strategy flags are binary compatible with equivalent flags in #VmaAllocationCreateFlagBits. - */ - VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK = VMA_ALLOCATION_CREATE_STRATEGY_MASK, + These strategy flags are binary compatible with equivalent flags in #VmaAllocationCreateFlagBits. + */ + VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK = VMA_ALLOCATION_CREATE_STRATEGY_MASK, - VMA_VIRTUAL_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF -} VmaVirtualAllocationCreateFlagBits; -/// Flags to be passed as VmaVirtualAllocationCreateInfo::flags. See #VmaVirtualAllocationCreateFlagBits. -typedef VkFlags VmaVirtualAllocationCreateFlags; + VMA_VIRTUAL_ALLOCATION_CREATE_FLAG_BITS_MAX_ENUM = 0x7FFFFFFF + } VmaVirtualAllocationCreateFlagBits; + /// Flags to be passed as VmaVirtualAllocationCreateInfo::flags. See #VmaVirtualAllocationCreateFlagBits. + typedef VkFlags VmaVirtualAllocationCreateFlags; -/** @} */ + /** @} */ #endif // _VMA_ENUM_DECLARATIONS #ifndef _VMA_DATA_TYPES_DECLARATIONS -/** -\addtogroup group_init -@{ */ + /** + \addtogroup group_init + @{ */ -/** \struct VmaAllocator -\brief Represents main object of this library initialized. + /** \struct VmaAllocator + \brief Represents main object of this library initialized. -Fill structure #VmaAllocatorCreateInfo and call function vmaCreateAllocator() to create it. -Call function vmaDestroyAllocator() to destroy it. + Fill structure #VmaAllocatorCreateInfo and call function vmaCreateAllocator() to create it. + Call function vmaDestroyAllocator() to destroy it. -It is recommended to create just one object of this type per `VkDevice` object, -right after Vulkan is initialized and keep it alive until before Vulkan device is destroyed. -*/ -VK_DEFINE_HANDLE(VmaAllocator) + It is recommended to create just one object of this type per `VkDevice` object, + right after Vulkan is initialized and keep it alive until before Vulkan device is destroyed. + */ + VK_DEFINE_HANDLE(VmaAllocator) -/** @} */ + /** @} */ -/** -\addtogroup group_alloc -@{ -*/ + /** + \addtogroup group_alloc + @{ + */ -/** \struct VmaPool -\brief Represents custom memory pool + /** \struct VmaPool + \brief Represents custom memory pool -Fill structure VmaPoolCreateInfo and call function vmaCreatePool() to create it. -Call function vmaDestroyPool() to destroy it. + Fill structure VmaPoolCreateInfo and call function vmaCreatePool() to create it. + Call function vmaDestroyPool() to destroy it. -For more information see [Custom memory pools](@ref choosing_memory_type_custom_memory_pools). -*/ -VK_DEFINE_HANDLE(VmaPool) + For more information see [Custom memory pools](@ref choosing_memory_type_custom_memory_pools). + */ + VK_DEFINE_HANDLE(VmaPool) -/** \struct VmaAllocation -\brief Represents single memory allocation. + /** \struct VmaAllocation + \brief Represents single memory allocation. -It may be either dedicated block of `VkDeviceMemory` or a specific region of a bigger block of this type -plus unique offset. + It may be either dedicated block of `VkDeviceMemory` or a specific region of a bigger block of this type + plus unique offset. -There are multiple ways to create such object. -You need to fill structure VmaAllocationCreateInfo. -For more information see [Choosing memory type](@ref choosing_memory_type). + There are multiple ways to create such object. + You need to fill structure VmaAllocationCreateInfo. + For more information see [Choosing memory type](@ref choosing_memory_type). -Although the library provides convenience functions that create Vulkan buffer or image, -allocate memory for it and bind them together, -binding of the allocation to a buffer or an image is out of scope of the allocation itself. -Allocation object can exist without buffer/image bound, -binding can be done manually by the user, and destruction of it can be done -independently of destruction of the allocation. + Although the library provides convenience functions that create Vulkan buffer or image, + allocate memory for it and bind them together, + binding of the allocation to a buffer or an image is out of scope of the allocation itself. + Allocation object can exist without buffer/image bound, + binding can be done manually by the user, and destruction of it can be done + independently of destruction of the allocation. -The object also remembers its size and some other information. -To retrieve this information, use function vmaGetAllocationInfo() and inspect -returned structure VmaAllocationInfo. -*/ -VK_DEFINE_HANDLE(VmaAllocation) + The object also remembers its size and some other information. + To retrieve this information, use function vmaGetAllocationInfo() and inspect + returned structure VmaAllocationInfo. + */ + VK_DEFINE_HANDLE(VmaAllocation) -/** \struct VmaDefragmentationContext -\brief An opaque object that represents started defragmentation process. + /** \struct VmaDefragmentationContext + \brief An opaque object that represents started defragmentation process. -Fill structure #VmaDefragmentationInfo and call function vmaBeginDefragmentation() to create it. -Call function vmaEndDefragmentation() to destroy it. -*/ -VK_DEFINE_HANDLE(VmaDefragmentationContext) + Fill structure #VmaDefragmentationInfo and call function vmaBeginDefragmentation() to create it. + Call function vmaEndDefragmentation() to destroy it. + */ + VK_DEFINE_HANDLE(VmaDefragmentationContext) -/** @} */ + /** @} */ -/** -\addtogroup group_virtual -@{ -*/ + /** + \addtogroup group_virtual + @{ + */ -/** \struct VmaVirtualAllocation -\brief Represents single memory allocation done inside VmaVirtualBlock. + /** \struct VmaVirtualAllocation + \brief Represents single memory allocation done inside VmaVirtualBlock. -Use it as a unique identifier to virtual allocation within the single block. + Use it as a unique identifier to virtual allocation within the single block. -Use value `VK_NULL_HANDLE` to represent a null/invalid allocation. -*/ -VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaVirtualAllocation) + Use value `VK_NULL_HANDLE` to represent a null/invalid allocation. + */ + VK_DEFINE_NON_DISPATCHABLE_HANDLE(VmaVirtualAllocation) -/** @} */ + /** @} */ -/** -\addtogroup group_virtual -@{ -*/ + /** + \addtogroup group_virtual + @{ + */ -/** \struct VmaVirtualBlock -\brief Handle to a virtual block object that allows to use core allocation algorithm without allocating any real GPU memory. + /** \struct VmaVirtualBlock + \brief Handle to a virtual block object that allows to use core allocation algorithm without allocating any real GPU memory. -Fill in #VmaVirtualBlockCreateInfo structure and use vmaCreateVirtualBlock() to create it. Use vmaDestroyVirtualBlock() to destroy it. -For more information, see documentation chapter \ref virtual_allocator. + Fill in #VmaVirtualBlockCreateInfo structure and use vmaCreateVirtualBlock() to create it. Use vmaDestroyVirtualBlock() to destroy it. + For more information, see documentation chapter \ref virtual_allocator. -This object is not thread-safe - should not be used from multiple threads simultaneously, must be synchronized externally. -*/ -VK_DEFINE_HANDLE(VmaVirtualBlock) + This object is not thread-safe - should not be used from multiple threads simultaneously, must be synchronized externally. + */ + VK_DEFINE_HANDLE(VmaVirtualBlock) -/** @} */ + /** @} */ -/** -\addtogroup group_init -@{ -*/ + /** + \addtogroup group_init + @{ + */ -/// Callback function called after successful vkAllocateMemory. -typedef void (VKAPI_PTR* PFN_vmaAllocateDeviceMemoryFunction)( - VmaAllocator VMA_NOT_NULL allocator, - uint32_t memoryType, - VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory, - VkDeviceSize size, - void* VMA_NULLABLE pUserData); + /// Callback function called after successful vkAllocateMemory. + typedef void(VKAPI_PTR *PFN_vmaAllocateDeviceMemoryFunction)( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryType, + VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory, + VkDeviceSize size, + void *VMA_NULLABLE pUserData); -/// Callback function called before vkFreeMemory. -typedef void (VKAPI_PTR* PFN_vmaFreeDeviceMemoryFunction)( - VmaAllocator VMA_NOT_NULL allocator, - uint32_t memoryType, - VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory, - VkDeviceSize size, - void* VMA_NULLABLE pUserData); + /// Callback function called before vkFreeMemory. + typedef void(VKAPI_PTR *PFN_vmaFreeDeviceMemoryFunction)( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryType, + VkDeviceMemory VMA_NOT_NULL_NON_DISPATCHABLE memory, + VkDeviceSize size, + void *VMA_NULLABLE pUserData); -/** \brief Set of callbacks that the library will call for `vkAllocateMemory` and `vkFreeMemory`. + /** \brief Set of callbacks that the library will call for `vkAllocateMemory` and `vkFreeMemory`. -Provided for informative purpose, e.g. to gather statistics about number of -allocations or total amount of memory allocated in Vulkan. + Provided for informative purpose, e.g. to gather statistics about number of + allocations or total amount of memory allocated in Vulkan. -Used in VmaAllocatorCreateInfo::pDeviceMemoryCallbacks. -*/ -typedef struct VmaDeviceMemoryCallbacks -{ - /// Optional, can be null. - PFN_vmaAllocateDeviceMemoryFunction VMA_NULLABLE pfnAllocate; - /// Optional, can be null. - PFN_vmaFreeDeviceMemoryFunction VMA_NULLABLE pfnFree; - /// Optional, can be null. - void* VMA_NULLABLE pUserData; -} VmaDeviceMemoryCallbacks; + Used in VmaAllocatorCreateInfo::pDeviceMemoryCallbacks. + */ + typedef struct VmaDeviceMemoryCallbacks + { + /// Optional, can be null. + PFN_vmaAllocateDeviceMemoryFunction VMA_NULLABLE pfnAllocate; + /// Optional, can be null. + PFN_vmaFreeDeviceMemoryFunction VMA_NULLABLE pfnFree; + /// Optional, can be null. + void *VMA_NULLABLE pUserData; + } VmaDeviceMemoryCallbacks; -/** \brief Pointers to some Vulkan functions - a subset used by the library. + /** \brief Pointers to some Vulkan functions - a subset used by the library. -Used in VmaAllocatorCreateInfo::pVulkanFunctions. -*/ -typedef struct VmaVulkanFunctions -{ - /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS. - PFN_vkGetInstanceProcAddr VMA_NULLABLE vkGetInstanceProcAddr; - /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS. - PFN_vkGetDeviceProcAddr VMA_NULLABLE vkGetDeviceProcAddr; - PFN_vkGetPhysicalDeviceProperties VMA_NULLABLE vkGetPhysicalDeviceProperties; - PFN_vkGetPhysicalDeviceMemoryProperties VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties; - PFN_vkAllocateMemory VMA_NULLABLE vkAllocateMemory; - PFN_vkFreeMemory VMA_NULLABLE vkFreeMemory; - PFN_vkMapMemory VMA_NULLABLE vkMapMemory; - PFN_vkUnmapMemory VMA_NULLABLE vkUnmapMemory; - PFN_vkFlushMappedMemoryRanges VMA_NULLABLE vkFlushMappedMemoryRanges; - PFN_vkInvalidateMappedMemoryRanges VMA_NULLABLE vkInvalidateMappedMemoryRanges; - PFN_vkBindBufferMemory VMA_NULLABLE vkBindBufferMemory; - PFN_vkBindImageMemory VMA_NULLABLE vkBindImageMemory; - PFN_vkGetBufferMemoryRequirements VMA_NULLABLE vkGetBufferMemoryRequirements; - PFN_vkGetImageMemoryRequirements VMA_NULLABLE vkGetImageMemoryRequirements; - PFN_vkCreateBuffer VMA_NULLABLE vkCreateBuffer; - PFN_vkDestroyBuffer VMA_NULLABLE vkDestroyBuffer; - PFN_vkCreateImage VMA_NULLABLE vkCreateImage; - PFN_vkDestroyImage VMA_NULLABLE vkDestroyImage; - PFN_vkCmdCopyBuffer VMA_NULLABLE vkCmdCopyBuffer; + Used in VmaAllocatorCreateInfo::pVulkanFunctions. + */ + typedef struct VmaVulkanFunctions + { + /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS. + PFN_vkGetInstanceProcAddr VMA_NULLABLE vkGetInstanceProcAddr; + /// Required when using VMA_DYNAMIC_VULKAN_FUNCTIONS. + PFN_vkGetDeviceProcAddr VMA_NULLABLE vkGetDeviceProcAddr; + PFN_vkGetPhysicalDeviceProperties VMA_NULLABLE vkGetPhysicalDeviceProperties; + PFN_vkGetPhysicalDeviceMemoryProperties VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties; + PFN_vkAllocateMemory VMA_NULLABLE vkAllocateMemory; + PFN_vkFreeMemory VMA_NULLABLE vkFreeMemory; + PFN_vkMapMemory VMA_NULLABLE vkMapMemory; + PFN_vkUnmapMemory VMA_NULLABLE vkUnmapMemory; + PFN_vkFlushMappedMemoryRanges VMA_NULLABLE vkFlushMappedMemoryRanges; + PFN_vkInvalidateMappedMemoryRanges VMA_NULLABLE vkInvalidateMappedMemoryRanges; + PFN_vkBindBufferMemory VMA_NULLABLE vkBindBufferMemory; + PFN_vkBindImageMemory VMA_NULLABLE vkBindImageMemory; + PFN_vkGetBufferMemoryRequirements VMA_NULLABLE vkGetBufferMemoryRequirements; + PFN_vkGetImageMemoryRequirements VMA_NULLABLE vkGetImageMemoryRequirements; + PFN_vkCreateBuffer VMA_NULLABLE vkCreateBuffer; + PFN_vkDestroyBuffer VMA_NULLABLE vkDestroyBuffer; + PFN_vkCreateImage VMA_NULLABLE vkCreateImage; + PFN_vkDestroyImage VMA_NULLABLE vkDestroyImage; + PFN_vkCmdCopyBuffer VMA_NULLABLE vkCmdCopyBuffer; #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 - /// Fetch "vkGetBufferMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetBufferMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension. - PFN_vkGetBufferMemoryRequirements2KHR VMA_NULLABLE vkGetBufferMemoryRequirements2KHR; - /// Fetch "vkGetImageMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetImageMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension. - PFN_vkGetImageMemoryRequirements2KHR VMA_NULLABLE vkGetImageMemoryRequirements2KHR; + /// Fetch "vkGetBufferMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetBufferMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension. + PFN_vkGetBufferMemoryRequirements2KHR VMA_NULLABLE vkGetBufferMemoryRequirements2KHR; + /// Fetch "vkGetImageMemoryRequirements2" on Vulkan >= 1.1, fetch "vkGetImageMemoryRequirements2KHR" when using VK_KHR_dedicated_allocation extension. + PFN_vkGetImageMemoryRequirements2KHR VMA_NULLABLE vkGetImageMemoryRequirements2KHR; #endif #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 - /// Fetch "vkBindBufferMemory2" on Vulkan >= 1.1, fetch "vkBindBufferMemory2KHR" when using VK_KHR_bind_memory2 extension. - PFN_vkBindBufferMemory2KHR VMA_NULLABLE vkBindBufferMemory2KHR; - /// Fetch "vkBindImageMemory2" on Vulkan >= 1.1, fetch "vkBindImageMemory2KHR" when using VK_KHR_bind_memory2 extension. - PFN_vkBindImageMemory2KHR VMA_NULLABLE vkBindImageMemory2KHR; + /// Fetch "vkBindBufferMemory2" on Vulkan >= 1.1, fetch "vkBindBufferMemory2KHR" when using VK_KHR_bind_memory2 extension. + PFN_vkBindBufferMemory2KHR VMA_NULLABLE vkBindBufferMemory2KHR; + /// Fetch "vkBindImageMemory2" on Vulkan >= 1.1, fetch "vkBindImageMemory2KHR" when using VK_KHR_bind_memory2 extension. + PFN_vkBindImageMemory2KHR VMA_NULLABLE vkBindImageMemory2KHR; #endif #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 - PFN_vkGetPhysicalDeviceMemoryProperties2KHR VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties2KHR; + PFN_vkGetPhysicalDeviceMemoryProperties2KHR VMA_NULLABLE vkGetPhysicalDeviceMemoryProperties2KHR; #endif #if VMA_VULKAN_VERSION >= 1003000 - /// Fetch from "vkGetDeviceBufferMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceBufferMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4. - PFN_vkGetDeviceBufferMemoryRequirements VMA_NULLABLE vkGetDeviceBufferMemoryRequirements; - /// Fetch from "vkGetDeviceImageMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceImageMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4. - PFN_vkGetDeviceImageMemoryRequirements VMA_NULLABLE vkGetDeviceImageMemoryRequirements; + /// Fetch from "vkGetDeviceBufferMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceBufferMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4. + PFN_vkGetDeviceBufferMemoryRequirements VMA_NULLABLE vkGetDeviceBufferMemoryRequirements; + /// Fetch from "vkGetDeviceImageMemoryRequirements" on Vulkan >= 1.3, but you can also fetch it from "vkGetDeviceImageMemoryRequirementsKHR" if you enabled extension VK_KHR_maintenance4. + PFN_vkGetDeviceImageMemoryRequirements VMA_NULLABLE vkGetDeviceImageMemoryRequirements; #endif -} VmaVulkanFunctions; - -/// Description of a Allocator to be created. -typedef struct VmaAllocatorCreateInfo -{ - /// Flags for created allocator. Use #VmaAllocatorCreateFlagBits enum. - VmaAllocatorCreateFlags flags; - /// Vulkan physical device. - /** It must be valid throughout whole lifetime of created allocator. */ - VkPhysicalDevice VMA_NOT_NULL physicalDevice; - /// Vulkan device. - /** It must be valid throughout whole lifetime of created allocator. */ - VkDevice VMA_NOT_NULL device; - /// Preferred size of a single `VkDeviceMemory` block to be allocated from large heaps > 1 GiB. Optional. - /** Set to 0 to use default, which is currently 256 MiB. */ - VkDeviceSize preferredLargeHeapBlockSize; - /// Custom CPU memory allocation callbacks. Optional. - /** Optional, can be null. When specified, will also be used for all CPU-side memory allocations. */ - const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks; - /// Informative callbacks for `vkAllocateMemory`, `vkFreeMemory`. Optional. - /** Optional, can be null. */ - const VmaDeviceMemoryCallbacks* VMA_NULLABLE pDeviceMemoryCallbacks; - /** \brief Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out of particular Vulkan memory heap. - - If not NULL, it must be a pointer to an array of - `VkPhysicalDeviceMemoryProperties::memoryHeapCount` elements, defining limit on - maximum number of bytes that can be allocated out of particular Vulkan memory - heap. - - Any of the elements may be equal to `VK_WHOLE_SIZE`, which means no limit on that - heap. This is also the default in case of `pHeapSizeLimit` = NULL. - - If there is a limit defined for a heap: - - - If user tries to allocate more memory from that heap using this allocator, - the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. - - If the limit is smaller than heap size reported in `VkMemoryHeap::size`, the - value of this limit will be reported instead when using vmaGetMemoryProperties(). - - Warning! Using this feature may not be equivalent to installing a GPU with - smaller amount of memory, because graphics driver doesn't necessary fail new - allocations with `VK_ERROR_OUT_OF_DEVICE_MEMORY` result when memory capacity is - exceeded. It may return success and just silently migrate some device memory - blocks to system RAM. This driver behavior can also be controlled using - VK_AMD_memory_overallocation_behavior extension. - */ - const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit; + } VmaVulkanFunctions; + + /// Description of a Allocator to be created. + typedef struct VmaAllocatorCreateInfo + { + /// Flags for created allocator. Use #VmaAllocatorCreateFlagBits enum. + VmaAllocatorCreateFlags flags; + /// Vulkan physical device. + /** It must be valid throughout whole lifetime of created allocator. */ + VkPhysicalDevice VMA_NOT_NULL physicalDevice; + /// Vulkan device. + /** It must be valid throughout whole lifetime of created allocator. */ + VkDevice VMA_NOT_NULL device; + /// Preferred size of a single `VkDeviceMemory` block to be allocated from large heaps > 1 GiB. Optional. + /** Set to 0 to use default, which is currently 256 MiB. */ + VkDeviceSize preferredLargeHeapBlockSize; + /// Custom CPU memory allocation callbacks. Optional. + /** Optional, can be null. When specified, will also be used for all CPU-side memory allocations. */ + const VkAllocationCallbacks *VMA_NULLABLE pAllocationCallbacks; + /// Informative callbacks for `vkAllocateMemory`, `vkFreeMemory`. Optional. + /** Optional, can be null. */ + const VmaDeviceMemoryCallbacks *VMA_NULLABLE pDeviceMemoryCallbacks; + /** \brief Either null or a pointer to an array of limits on maximum number of bytes that can be allocated out of particular Vulkan memory heap. + + If not NULL, it must be a pointer to an array of + `VkPhysicalDeviceMemoryProperties::memoryHeapCount` elements, defining limit on + maximum number of bytes that can be allocated out of particular Vulkan memory + heap. + + Any of the elements may be equal to `VK_WHOLE_SIZE`, which means no limit on that + heap. This is also the default in case of `pHeapSizeLimit` = NULL. + + If there is a limit defined for a heap: + + - If user tries to allocate more memory from that heap using this allocator, + the allocation fails with `VK_ERROR_OUT_OF_DEVICE_MEMORY`. + - If the limit is smaller than heap size reported in `VkMemoryHeap::size`, the + value of this limit will be reported instead when using vmaGetMemoryProperties(). + + Warning! Using this feature may not be equivalent to installing a GPU with + smaller amount of memory, because graphics driver doesn't necessary fail new + allocations with `VK_ERROR_OUT_OF_DEVICE_MEMORY` result when memory capacity is + exceeded. It may return success and just silently migrate some device memory + blocks to system RAM. This driver behavior can also be controlled using + VK_AMD_memory_overallocation_behavior extension. + */ + const VkDeviceSize *VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pHeapSizeLimit; - /** \brief Pointers to Vulkan functions. Can be null. + /** \brief Pointers to Vulkan functions. Can be null. - For details see [Pointers to Vulkan functions](@ref config_Vulkan_functions). - */ - const VmaVulkanFunctions* VMA_NULLABLE pVulkanFunctions; - /** \brief Handle to Vulkan instance object. + For details see [Pointers to Vulkan functions](@ref config_Vulkan_functions). + */ + const VmaVulkanFunctions *VMA_NULLABLE pVulkanFunctions; + /** \brief Handle to Vulkan instance object. - Starting from version 3.0.0 this member is no longer optional, it must be set! - */ - VkInstance VMA_NOT_NULL instance; - /** \brief Optional. The highest version of Vulkan that the application is designed to use. - - It must be a value in the format as created by macro `VK_MAKE_VERSION` or a constant like: `VK_API_VERSION_1_1`, `VK_API_VERSION_1_0`. - The patch version number specified is ignored. Only the major and minor versions are considered. - It must be less or equal (preferably equal) to value as passed to `vkCreateInstance` as `VkApplicationInfo::apiVersion`. - Only versions 1.0, 1.1, 1.2, 1.3 are supported by the current implementation. - Leaving it initialized to zero is equivalent to `VK_API_VERSION_1_0`. - */ - uint32_t vulkanApiVersion; + Starting from version 3.0.0 this member is no longer optional, it must be set! + */ + VkInstance VMA_NOT_NULL instance; + /** \brief Optional. The highest version of Vulkan that the application is designed to use. + + It must be a value in the format as created by macro `VK_MAKE_VERSION` or a constant like: `VK_API_VERSION_1_1`, `VK_API_VERSION_1_0`. + The patch version number specified is ignored. Only the major and minor versions are considered. + It must be less or equal (preferably equal) to value as passed to `vkCreateInstance` as `VkApplicationInfo::apiVersion`. + Only versions 1.0, 1.1, 1.2, 1.3 are supported by the current implementation. + Leaving it initialized to zero is equivalent to `VK_API_VERSION_1_0`. + */ + uint32_t vulkanApiVersion; #if VMA_EXTERNAL_MEMORY - /** \brief Either null or a pointer to an array of external memory handle types for each Vulkan memory type. + /** \brief Either null or a pointer to an array of external memory handle types for each Vulkan memory type. - If not NULL, it must be a pointer to an array of `VkPhysicalDeviceMemoryProperties::memoryTypeCount` - elements, defining external memory handle types of particular Vulkan memory type, - to be passed using `VkExportMemoryAllocateInfoKHR`. + If not NULL, it must be a pointer to an array of `VkPhysicalDeviceMemoryProperties::memoryTypeCount` + elements, defining external memory handle types of particular Vulkan memory type, + to be passed using `VkExportMemoryAllocateInfoKHR`. - Any of the elements may be equal to 0, which means not to use `VkExportMemoryAllocateInfoKHR` on this memory type. - This is also the default in case of `pTypeExternalMemoryHandleTypes` = NULL. - */ - const VkExternalMemoryHandleTypeFlagsKHR* VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryTypeCount") pTypeExternalMemoryHandleTypes; + Any of the elements may be equal to 0, which means not to use `VkExportMemoryAllocateInfoKHR` on this memory type. + This is also the default in case of `pTypeExternalMemoryHandleTypes` = NULL. + */ + const VkExternalMemoryHandleTypeFlagsKHR *VMA_NULLABLE VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryTypeCount") pTypeExternalMemoryHandleTypes; #endif // #if VMA_EXTERNAL_MEMORY -} VmaAllocatorCreateInfo; - -/// Information about existing #VmaAllocator object. -typedef struct VmaAllocatorInfo -{ - /** \brief Handle to Vulkan instance object. - - This is the same value as has been passed through VmaAllocatorCreateInfo::instance. - */ - VkInstance VMA_NOT_NULL instance; - /** \brief Handle to Vulkan physical device object. + } VmaAllocatorCreateInfo; - This is the same value as has been passed through VmaAllocatorCreateInfo::physicalDevice. - */ - VkPhysicalDevice VMA_NOT_NULL physicalDevice; - /** \brief Handle to Vulkan device object. + /// Information about existing #VmaAllocator object. + typedef struct VmaAllocatorInfo + { + /** \brief Handle to Vulkan instance object. - This is the same value as has been passed through VmaAllocatorCreateInfo::device. - */ - VkDevice VMA_NOT_NULL device; -} VmaAllocatorInfo; + This is the same value as has been passed through VmaAllocatorCreateInfo::instance. + */ + VkInstance VMA_NOT_NULL instance; + /** \brief Handle to Vulkan physical device object. -/** @} */ + This is the same value as has been passed through VmaAllocatorCreateInfo::physicalDevice. + */ + VkPhysicalDevice VMA_NOT_NULL physicalDevice; + /** \brief Handle to Vulkan device object. -/** -\addtogroup group_stats -@{ -*/ + This is the same value as has been passed through VmaAllocatorCreateInfo::device. + */ + VkDevice VMA_NOT_NULL device; + } VmaAllocatorInfo; -/** \brief Calculated statistics of memory usage e.g. in a specific memory type, heap, custom pool, or total. + /** @} */ -These are fast to calculate. -See functions: vmaGetHeapBudgets(), vmaGetPoolStatistics(). -*/ -typedef struct VmaStatistics -{ - /** \brief Number of `VkDeviceMemory` objects - Vulkan memory blocks allocated. + /** + \addtogroup group_stats + @{ */ - uint32_t blockCount; - /** \brief Number of #VmaAllocation objects allocated. - Dedicated allocations have their own blocks, so each one adds 1 to `allocationCount` as well as `blockCount`. - */ - uint32_t allocationCount; - /** \brief Number of bytes allocated in `VkDeviceMemory` blocks. + /** \brief Calculated statistics of memory usage e.g. in a specific memory type, heap, custom pool, or total. - \note To avoid confusion, please be aware that what Vulkan calls an "allocation" - a whole `VkDeviceMemory` object - (e.g. as in `VkPhysicalDeviceLimits::maxMemoryAllocationCount`) is called a "block" in VMA, while VMA calls - "allocation" a #VmaAllocation object that represents a memory region sub-allocated from such block, usually for a single buffer or image. + These are fast to calculate. + See functions: vmaGetHeapBudgets(), vmaGetPoolStatistics(). */ - VkDeviceSize blockBytes; - /** \brief Total number of bytes occupied by all #VmaAllocation objects. + typedef struct VmaStatistics + { + /** \brief Number of `VkDeviceMemory` objects - Vulkan memory blocks allocated. + */ + uint32_t blockCount; + /** \brief Number of #VmaAllocation objects allocated. - Always less or equal than `blockBytes`. - Difference `(blockBytes - allocationBytes)` is the amount of memory allocated from Vulkan - but unused by any #VmaAllocation. - */ - VkDeviceSize allocationBytes; -} VmaStatistics; + Dedicated allocations have their own blocks, so each one adds 1 to `allocationCount` as well as `blockCount`. + */ + uint32_t allocationCount; + /** \brief Number of bytes allocated in `VkDeviceMemory` blocks. -/** \brief More detailed statistics than #VmaStatistics. + \note To avoid confusion, please be aware that what Vulkan calls an "allocation" - a whole `VkDeviceMemory` object + (e.g. as in `VkPhysicalDeviceLimits::maxMemoryAllocationCount`) is called a "block" in VMA, while VMA calls + "allocation" a #VmaAllocation object that represents a memory region sub-allocated from such block, usually for a single buffer or image. + */ + VkDeviceSize blockBytes; + /** \brief Total number of bytes occupied by all #VmaAllocation objects. -These are slower to calculate. Use for debugging purposes. -See functions: vmaCalculateStatistics(), vmaCalculatePoolStatistics(). + Always less or equal than `blockBytes`. + Difference `(blockBytes - allocationBytes)` is the amount of memory allocated from Vulkan + but unused by any #VmaAllocation. + */ + VkDeviceSize allocationBytes; + } VmaStatistics; -Previous version of the statistics API provided averages, but they have been removed -because they can be easily calculated as: + /** \brief More detailed statistics than #VmaStatistics. -\code -VkDeviceSize allocationSizeAvg = detailedStats.statistics.allocationBytes / detailedStats.statistics.allocationCount; -VkDeviceSize unusedBytes = detailedStats.statistics.blockBytes - detailedStats.statistics.allocationBytes; -VkDeviceSize unusedRangeSizeAvg = unusedBytes / detailedStats.unusedRangeCount; -\endcode -*/ -typedef struct VmaDetailedStatistics -{ - /// Basic statistics. - VmaStatistics statistics; - /// Number of free ranges of memory between allocations. - uint32_t unusedRangeCount; - /// Smallest allocation size. `VK_WHOLE_SIZE` if there are 0 allocations. - VkDeviceSize allocationSizeMin; - /// Largest allocation size. 0 if there are 0 allocations. - VkDeviceSize allocationSizeMax; - /// Smallest empty range size. `VK_WHOLE_SIZE` if there are 0 empty ranges. - VkDeviceSize unusedRangeSizeMin; - /// Largest empty range size. 0 if there are 0 empty ranges. - VkDeviceSize unusedRangeSizeMax; -} VmaDetailedStatistics; - -/** \brief General statistics from current state of the Allocator - -total memory usage across all memory heaps and types. - -These are slower to calculate. Use for debugging purposes. -See function vmaCalculateStatistics(). -*/ -typedef struct VmaTotalStatistics -{ - VmaDetailedStatistics memoryType[VK_MAX_MEMORY_TYPES]; - VmaDetailedStatistics memoryHeap[VK_MAX_MEMORY_HEAPS]; - VmaDetailedStatistics total; -} VmaTotalStatistics; + These are slower to calculate. Use for debugging purposes. + See functions: vmaCalculateStatistics(), vmaCalculatePoolStatistics(). -/** \brief Statistics of current memory usage and available budget for a specific memory heap. + Previous version of the statistics API provided averages, but they have been removed + because they can be easily calculated as: -These are fast to calculate. -See function vmaGetHeapBudgets(). -*/ -typedef struct VmaBudget -{ - /** \brief Statistics fetched from the library. + \code + VkDeviceSize allocationSizeAvg = detailedStats.statistics.allocationBytes / detailedStats.statistics.allocationCount; + VkDeviceSize unusedBytes = detailedStats.statistics.blockBytes - detailedStats.statistics.allocationBytes; + VkDeviceSize unusedRangeSizeAvg = unusedBytes / detailedStats.unusedRangeCount; + \endcode */ - VmaStatistics statistics; - /** \brief Estimated current memory usage of the program, in bytes. - - Fetched from system using VK_EXT_memory_budget extension if enabled. - - It might be different than `statistics.blockBytes` (usually higher) due to additional implicit objects - also occupying the memory, like swapchain, pipelines, descriptor heaps, command buffers, or - `VkDeviceMemory` blocks allocated outside of this library, if any. + typedef struct VmaDetailedStatistics + { + /// Basic statistics. + VmaStatistics statistics; + /// Number of free ranges of memory between allocations. + uint32_t unusedRangeCount; + /// Smallest allocation size. `VK_WHOLE_SIZE` if there are 0 allocations. + VkDeviceSize allocationSizeMin; + /// Largest allocation size. 0 if there are 0 allocations. + VkDeviceSize allocationSizeMax; + /// Smallest empty range size. `VK_WHOLE_SIZE` if there are 0 empty ranges. + VkDeviceSize unusedRangeSizeMin; + /// Largest empty range size. 0 if there are 0 empty ranges. + VkDeviceSize unusedRangeSizeMax; + } VmaDetailedStatistics; + + /** \brief General statistics from current state of the Allocator - + total memory usage across all memory heaps and types. + + These are slower to calculate. Use for debugging purposes. + See function vmaCalculateStatistics(). */ - VkDeviceSize usage; - /** \brief Estimated amount of memory available to the program, in bytes. + typedef struct VmaTotalStatistics + { + VmaDetailedStatistics memoryType[VK_MAX_MEMORY_TYPES]; + VmaDetailedStatistics memoryHeap[VK_MAX_MEMORY_HEAPS]; + VmaDetailedStatistics total; + } VmaTotalStatistics; - Fetched from system using VK_EXT_memory_budget extension if enabled. + /** \brief Statistics of current memory usage and available budget for a specific memory heap. - It might be different (most probably smaller) than `VkMemoryHeap::size[heapIndex]` due to factors - external to the program, decided by the operating system. - Difference `budget - usage` is the amount of additional memory that can probably - be allocated without problems. Exceeding the budget may result in various problems. + These are fast to calculate. + See function vmaGetHeapBudgets(). */ - VkDeviceSize budget; -} VmaBudget; + typedef struct VmaBudget + { + /** \brief Statistics fetched from the library. + */ + VmaStatistics statistics; + /** \brief Estimated current memory usage of the program, in bytes. -/** @} */ + Fetched from system using VK_EXT_memory_budget extension if enabled. -/** -\addtogroup group_alloc -@{ -*/ + It might be different than `statistics.blockBytes` (usually higher) due to additional implicit objects + also occupying the memory, like swapchain, pipelines, descriptor heaps, command buffers, or + `VkDeviceMemory` blocks allocated outside of this library, if any. + */ + VkDeviceSize usage; + /** \brief Estimated amount of memory available to the program, in bytes. -/** \brief Parameters of new #VmaAllocation. + Fetched from system using VK_EXT_memory_budget extension if enabled. -To be used with functions like vmaCreateBuffer(), vmaCreateImage(), and many others. -*/ -typedef struct VmaAllocationCreateInfo -{ - /// Use #VmaAllocationCreateFlagBits enum. - VmaAllocationCreateFlags flags; - /** \brief Intended usage of memory. + It might be different (most probably smaller) than `VkMemoryHeap::size[heapIndex]` due to factors + external to the program, decided by the operating system. + Difference `budget - usage` is the amount of additional memory that can probably + be allocated without problems. Exceeding the budget may result in various problems. + */ + VkDeviceSize budget; + } VmaBudget; - You can leave #VMA_MEMORY_USAGE_UNKNOWN if you specify memory requirements in other way. \n - If `pool` is not null, this member is ignored. - */ - VmaMemoryUsage usage; - /** \brief Flags that must be set in a Memory Type chosen for an allocation. - - Leave 0 if you specify memory requirements in other way. \n - If `pool` is not null, this member is ignored.*/ - VkMemoryPropertyFlags requiredFlags; - /** \brief Flags that preferably should be set in a memory type chosen for an allocation. - - Set to 0 if no additional flags are preferred. \n - If `pool` is not null, this member is ignored. */ - VkMemoryPropertyFlags preferredFlags; - /** \brief Bitmask containing one bit set for every memory type acceptable for this allocation. - - Value 0 is equivalent to `UINT32_MAX` - it means any memory type is accepted if - it meets other requirements specified by this structure, with no further - restrictions on memory type index. \n - If `pool` is not null, this member is ignored. - */ - uint32_t memoryTypeBits; - /** \brief Pool that this allocation should be created in. + /** @} */ - Leave `VK_NULL_HANDLE` to allocate from default pool. If not null, members: - `usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored. + /** + \addtogroup group_alloc + @{ */ - VmaPool VMA_NULLABLE pool; - /** \brief Custom general-purpose pointer that will be stored in #VmaAllocation, can be read as VmaAllocationInfo::pUserData and changed using vmaSetAllocationUserData(). - If #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is used, it must be either - null or pointer to a null-terminated string. The string will be then copied to - internal buffer, so it doesn't need to be valid after allocation call. - */ - void* VMA_NULLABLE pUserData; - /** \brief A floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations. + /** \brief Parameters of new #VmaAllocation. - It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object - and this allocation ends up as dedicated or is explicitly forced as dedicated using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. - Otherwise, it has the priority of a memory block where it is placed and this variable is ignored. + To be used with functions like vmaCreateBuffer(), vmaCreateImage(), and many others. */ - float priority; -} VmaAllocationCreateInfo; + typedef struct VmaAllocationCreateInfo + { + /// Use #VmaAllocationCreateFlagBits enum. + VmaAllocationCreateFlags flags; + /** \brief Intended usage of memory. -/// Describes parameter of created #VmaPool. -typedef struct VmaPoolCreateInfo -{ - /** \brief Vulkan memory type index to allocate this pool from. - */ - uint32_t memoryTypeIndex; - /** \brief Use combination of #VmaPoolCreateFlagBits. - */ - VmaPoolCreateFlags flags; - /** \brief Size of a single `VkDeviceMemory` block to be allocated as part of this pool, in bytes. Optional. + You can leave #VMA_MEMORY_USAGE_UNKNOWN if you specify memory requirements in other way. \n + If `pool` is not null, this member is ignored. + */ + VmaMemoryUsage usage; + /** \brief Flags that must be set in a Memory Type chosen for an allocation. + + Leave 0 if you specify memory requirements in other way. \n + If `pool` is not null, this member is ignored.*/ + VkMemoryPropertyFlags requiredFlags; + /** \brief Flags that preferably should be set in a memory type chosen for an allocation. + + Set to 0 if no additional flags are preferred. \n + If `pool` is not null, this member is ignored. */ + VkMemoryPropertyFlags preferredFlags; + /** \brief Bitmask containing one bit set for every memory type acceptable for this allocation. + + Value 0 is equivalent to `UINT32_MAX` - it means any memory type is accepted if + it meets other requirements specified by this structure, with no further + restrictions on memory type index. \n + If `pool` is not null, this member is ignored. + */ + uint32_t memoryTypeBits; + /** \brief Pool that this allocation should be created in. - Specify nonzero to set explicit, constant size of memory blocks used by this - pool. - - Leave 0 to use default and let the library manage block sizes automatically. - Sizes of particular blocks may vary. - In this case, the pool will also support dedicated allocations. - */ - VkDeviceSize blockSize; - /** \brief Minimum number of blocks to be always allocated in this pool, even if they stay empty. - - Set to 0 to have no preallocated blocks and allow the pool be completely empty. - */ - size_t minBlockCount; - /** \brief Maximum number of blocks that can be allocated in this pool. Optional. - - Set to 0 to use default, which is `SIZE_MAX`, which means no limit. + Leave `VK_NULL_HANDLE` to allocate from default pool. If not null, members: + `usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored. + */ + VmaPool VMA_NULLABLE pool; + /** \brief Custom general-purpose pointer that will be stored in #VmaAllocation, can be read as VmaAllocationInfo::pUserData and changed using vmaSetAllocationUserData(). - Set to same value as VmaPoolCreateInfo::minBlockCount to have fixed amount of memory allocated - throughout whole lifetime of this pool. - */ - size_t maxBlockCount; - /** \brief A floating-point value between 0 and 1, indicating the priority of the allocations in this pool relative to other memory allocations. + If #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT is used, it must be either + null or pointer to a null-terminated string. The string will be then copied to + internal buffer, so it doesn't need to be valid after allocation call. + */ + void *VMA_NULLABLE pUserData; + /** \brief A floating-point value between 0 and 1, indicating the priority of the allocation relative to other memory allocations. - It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object. - Otherwise, this variable is ignored. - */ - float priority; - /** \brief Additional minimum alignment to be used for all allocations created from this pool. Can be 0. + It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object + and this allocation ends up as dedicated or is explicitly forced as dedicated using #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. + Otherwise, it has the priority of a memory block where it is placed and this variable is ignored. + */ + float priority; + } VmaAllocationCreateInfo; + + /// Describes parameter of created #VmaPool. + typedef struct VmaPoolCreateInfo + { + /** \brief Vulkan memory type index to allocate this pool from. + */ + uint32_t memoryTypeIndex; + /** \brief Use combination of #VmaPoolCreateFlagBits. + */ + VmaPoolCreateFlags flags; + /** \brief Size of a single `VkDeviceMemory` block to be allocated as part of this pool, in bytes. Optional. + + Specify nonzero to set explicit, constant size of memory blocks used by this + pool. + + Leave 0 to use default and let the library manage block sizes automatically. + Sizes of particular blocks may vary. + In this case, the pool will also support dedicated allocations. + */ + VkDeviceSize blockSize; + /** \brief Minimum number of blocks to be always allocated in this pool, even if they stay empty. - Leave 0 (default) not to impose any additional alignment. If not 0, it must be a power of two. - It can be useful in cases where alignment returned by Vulkan by functions like `vkGetBufferMemoryRequirements` is not enough, - e.g. when doing interop with OpenGL. - */ - VkDeviceSize minAllocationAlignment; - /** \brief Additional `pNext` chain to be attached to `VkMemoryAllocateInfo` used for every allocation made by this pool. Optional. + Set to 0 to have no preallocated blocks and allow the pool be completely empty. + */ + size_t minBlockCount; + /** \brief Maximum number of blocks that can be allocated in this pool. Optional. - Optional, can be null. If not null, it must point to a `pNext` chain of structures that can be attached to `VkMemoryAllocateInfo`. - It can be useful for special needs such as adding `VkExportMemoryAllocateInfoKHR`. - Structures pointed by this member must remain alive and unchanged for the whole lifetime of the custom pool. + Set to 0 to use default, which is `SIZE_MAX`, which means no limit. - Please note that some structures, e.g. `VkMemoryPriorityAllocateInfoEXT`, `VkMemoryDedicatedAllocateInfoKHR`, - can be attached automatically by this library when using other, more convenient of its features. - */ - void* VMA_NULLABLE pMemoryAllocateNext; -} VmaPoolCreateInfo; + Set to same value as VmaPoolCreateInfo::minBlockCount to have fixed amount of memory allocated + throughout whole lifetime of this pool. + */ + size_t maxBlockCount; + /** \brief A floating-point value between 0 and 1, indicating the priority of the allocations in this pool relative to other memory allocations. -/** @} */ + It is used only when #VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT flag was used during creation of the #VmaAllocator object. + Otherwise, this variable is ignored. + */ + float priority; + /** \brief Additional minimum alignment to be used for all allocations created from this pool. Can be 0. -/** -\addtogroup group_alloc -@{ -*/ + Leave 0 (default) not to impose any additional alignment. If not 0, it must be a power of two. + It can be useful in cases where alignment returned by Vulkan by functions like `vkGetBufferMemoryRequirements` is not enough, + e.g. when doing interop with OpenGL. + */ + VkDeviceSize minAllocationAlignment; + /** \brief Additional `pNext` chain to be attached to `VkMemoryAllocateInfo` used for every allocation made by this pool. Optional. -/// Parameters of #VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo(). -typedef struct VmaAllocationInfo -{ - /** \brief Memory type index that this allocation was allocated from. + Optional, can be null. If not null, it must point to a `pNext` chain of structures that can be attached to `VkMemoryAllocateInfo`. + It can be useful for special needs such as adding `VkExportMemoryAllocateInfoKHR`. + Structures pointed by this member must remain alive and unchanged for the whole lifetime of the custom pool. - It never changes. - */ - uint32_t memoryType; - /** \brief Handle to Vulkan memory object. + Please note that some structures, e.g. `VkMemoryPriorityAllocateInfoEXT`, `VkMemoryDedicatedAllocateInfoKHR`, + can be attached automatically by this library when using other, more convenient of its features. + */ + void *VMA_NULLABLE pMemoryAllocateNext; + } VmaPoolCreateInfo; - Same memory object can be shared by multiple allocations. + /** @} */ - It can change after the allocation is moved during \ref defragmentation. + /** + \addtogroup group_alloc + @{ */ - VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory; - /** \brief Offset in `VkDeviceMemory` object to the beginning of this allocation, in bytes. `(deviceMemory, offset)` pair is unique to this allocation. - You usually don't need to use this offset. If you create a buffer or an image together with the allocation using e.g. function - vmaCreateBuffer(), vmaCreateImage(), functions that operate on these resources refer to the beginning of the buffer or image, - not entire device memory block. Functions like vmaMapMemory(), vmaBindBufferMemory() also refer to the beginning of the allocation - and apply this offset automatically. + /// Parameters of #VmaAllocation objects, that can be retrieved using function vmaGetAllocationInfo(). + typedef struct VmaAllocationInfo + { + /** \brief Memory type index that this allocation was allocated from. - It can change after the allocation is moved during \ref defragmentation. - */ - VkDeviceSize offset; - /** \brief Size of this allocation, in bytes. + It never changes. + */ + uint32_t memoryType; + /** \brief Handle to Vulkan memory object. - It never changes. + Same memory object can be shared by multiple allocations. - \note Allocation size returned in this variable may be greater than the size - requested for the resource e.g. as `VkBufferCreateInfo::size`. Whole size of the - allocation is accessible for operations on memory e.g. using a pointer after - mapping with vmaMapMemory(), but operations on the resource e.g. using - `vkCmdCopyBuffer` must be limited to the size of the resource. - */ - VkDeviceSize size; - /** \brief Pointer to the beginning of this allocation as mapped data. - - If the allocation hasn't been mapped using vmaMapMemory() and hasn't been - created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value is null. + It can change after the allocation is moved during \ref defragmentation. + */ + VkDeviceMemory VMA_NULLABLE_NON_DISPATCHABLE deviceMemory; + /** \brief Offset in `VkDeviceMemory` object to the beginning of this allocation, in bytes. `(deviceMemory, offset)` pair is unique to this allocation. - It can change after call to vmaMapMemory(), vmaUnmapMemory(). - It can also change after the allocation is moved during \ref defragmentation. - */ - void* VMA_NULLABLE pMappedData; - /** \brief Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vmaSetAllocationUserData(). + You usually don't need to use this offset. If you create a buffer or an image together with the allocation using e.g. function + vmaCreateBuffer(), vmaCreateImage(), functions that operate on these resources refer to the beginning of the buffer or image, + not entire device memory block. Functions like vmaMapMemory(), vmaBindBufferMemory() also refer to the beginning of the allocation + and apply this offset automatically. - It can change after call to vmaSetAllocationUserData() for this allocation. - */ - void* VMA_NULLABLE pUserData; - /** \brief Custom allocation name that was set with vmaSetAllocationName(). + It can change after the allocation is moved during \ref defragmentation. + */ + VkDeviceSize offset; + /** \brief Size of this allocation, in bytes. - It can change after call to vmaSetAllocationName() for this allocation. + It never changes. - Another way to set custom name is to pass it in VmaAllocationCreateInfo::pUserData with - additional flag #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT set [DEPRECATED]. - */ - const char* VMA_NULLABLE pName; -} VmaAllocationInfo; + \note Allocation size returned in this variable may be greater than the size + requested for the resource e.g. as `VkBufferCreateInfo::size`. Whole size of the + allocation is accessible for operations on memory e.g. using a pointer after + mapping with vmaMapMemory(), but operations on the resource e.g. using + `vkCmdCopyBuffer` must be limited to the size of the resource. + */ + VkDeviceSize size; + /** \brief Pointer to the beginning of this allocation as mapped data. -/** \brief Parameters for defragmentation. + If the allocation hasn't been mapped using vmaMapMemory() and hasn't been + created with #VMA_ALLOCATION_CREATE_MAPPED_BIT flag, this value is null. -To be used with function vmaBeginDefragmentation(). -*/ -typedef struct VmaDefragmentationInfo -{ - /// \brief Use combination of #VmaDefragmentationFlagBits. - VmaDefragmentationFlags flags; - /** \brief Custom pool to be defragmented. + It can change after call to vmaMapMemory(), vmaUnmapMemory(). + It can also change after the allocation is moved during \ref defragmentation. + */ + void *VMA_NULLABLE pMappedData; + /** \brief Custom general-purpose pointer that was passed as VmaAllocationCreateInfo::pUserData or set using vmaSetAllocationUserData(). - If null then default pools will undergo defragmentation process. - */ - VmaPool VMA_NULLABLE pool; - /** \brief Maximum numbers of bytes that can be copied during single pass, while moving allocations to different places. + It can change after call to vmaSetAllocationUserData() for this allocation. + */ + void *VMA_NULLABLE pUserData; + /** \brief Custom allocation name that was set with vmaSetAllocationName(). - `0` means no limit. - */ - VkDeviceSize maxBytesPerPass; - /** \brief Maximum number of allocations that can be moved during single pass to a different place. + It can change after call to vmaSetAllocationName() for this allocation. - `0` means no limit. - */ - uint32_t maxAllocationsPerPass; -} VmaDefragmentationInfo; + Another way to set custom name is to pass it in VmaAllocationCreateInfo::pUserData with + additional flag #VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT set [DEPRECATED]. + */ + const char *VMA_NULLABLE pName; + } VmaAllocationInfo; -/// Single move of an allocation to be done for defragmentation. -typedef struct VmaDefragmentationMove -{ - /// Operation to be performed on the allocation by vmaEndDefragmentationPass(). Default value is #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY. You can modify it. - VmaDefragmentationMoveOperation operation; - /// Allocation that should be moved. - VmaAllocation VMA_NOT_NULL srcAllocation; - /** \brief Temporary allocation pointing to destination memory that will replace `srcAllocation`. + /** \brief Parameters for defragmentation. - \warning Do not store this allocation in your data structures! It exists only temporarily, for the duration of the defragmentation pass, - to be used for binding new buffer/image to the destination memory using e.g. vmaBindBufferMemory(). - vmaEndDefragmentationPass() will destroy it and make `srcAllocation` point to this memory. + To be used with function vmaBeginDefragmentation(). */ - VmaAllocation VMA_NOT_NULL dstTmpAllocation; -} VmaDefragmentationMove; - -/** \brief Parameters for incremental defragmentation steps. - -To be used with function vmaBeginDefragmentationPass(). -*/ -typedef struct VmaDefragmentationPassMoveInfo -{ - /// Number of elements in the `pMoves` array. - uint32_t moveCount; - /** \brief Array of moves to be performed by the user in the current defragmentation pass. - - Pointer to an array of `moveCount` elements, owned by VMA, created in vmaBeginDefragmentationPass(), destroyed in vmaEndDefragmentationPass(). - - For each element, you should: - - 1. Create a new buffer/image in the place pointed by VmaDefragmentationMove::dstMemory + VmaDefragmentationMove::dstOffset. - 2. Copy data from the VmaDefragmentationMove::srcAllocation e.g. using `vkCmdCopyBuffer`, `vkCmdCopyImage`. - 3. Make sure these commands finished executing on the GPU. - 4. Destroy the old buffer/image. - - Only then you can finish defragmentation pass by calling vmaEndDefragmentationPass(). - After this call, the allocation will point to the new place in memory. - - Alternatively, if you cannot move specific allocation, you can set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE. - - Alternatively, if you decide you want to completely remove the allocation: + typedef struct VmaDefragmentationInfo + { + /// \brief Use combination of #VmaDefragmentationFlagBits. + VmaDefragmentationFlags flags; + /** \brief Custom pool to be defragmented. - 1. Destroy its buffer/image. - 2. Set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY. + If null then default pools will undergo defragmentation process. + */ + VmaPool VMA_NULLABLE pool; + /** \brief Maximum numbers of bytes that can be copied during single pass, while moving allocations to different places. - Then, after vmaEndDefragmentationPass() the allocation will be freed. - */ - VmaDefragmentationMove* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(moveCount) pMoves; -} VmaDefragmentationPassMoveInfo; + `0` means no limit. + */ + VkDeviceSize maxBytesPerPass; + /** \brief Maximum number of allocations that can be moved during single pass to a different place. -/// Statistics returned for defragmentation process in function vmaEndDefragmentation(). -typedef struct VmaDefragmentationStats -{ - /// Total number of bytes that have been copied while moving allocations to different places. - VkDeviceSize bytesMoved; - /// Total number of bytes that have been released to the system by freeing empty `VkDeviceMemory` objects. - VkDeviceSize bytesFreed; - /// Number of allocations that have been moved to different places. - uint32_t allocationsMoved; - /// Number of empty `VkDeviceMemory` objects that have been released to the system. - uint32_t deviceMemoryBlocksFreed; -} VmaDefragmentationStats; + `0` means no limit. + */ + uint32_t maxAllocationsPerPass; + } VmaDefragmentationInfo; -/** @} */ + /// Single move of an allocation to be done for defragmentation. + typedef struct VmaDefragmentationMove + { + /// Operation to be performed on the allocation by vmaEndDefragmentationPass(). Default value is #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY. You can modify it. + VmaDefragmentationMoveOperation operation; + /// Allocation that should be moved. + VmaAllocation VMA_NOT_NULL srcAllocation; + /** \brief Temporary allocation pointing to destination memory that will replace `srcAllocation`. -/** -\addtogroup group_virtual -@{ -*/ + \warning Do not store this allocation in your data structures! It exists only temporarily, for the duration of the defragmentation pass, + to be used for binding new buffer/image to the destination memory using e.g. vmaBindBufferMemory(). + vmaEndDefragmentationPass() will destroy it and make `srcAllocation` point to this memory. + */ + VmaAllocation VMA_NOT_NULL dstTmpAllocation; + } VmaDefragmentationMove; -/// Parameters of created #VmaVirtualBlock object to be passed to vmaCreateVirtualBlock(). -typedef struct VmaVirtualBlockCreateInfo -{ - /** \brief Total size of the virtual block. + /** \brief Parameters for incremental defragmentation steps. - Sizes can be expressed in bytes or any units you want as long as you are consistent in using them. - For example, if you allocate from some array of structures, 1 can mean single instance of entire structure. + To be used with function vmaBeginDefragmentationPass(). */ - VkDeviceSize size; + typedef struct VmaDefragmentationPassMoveInfo + { + /// Number of elements in the `pMoves` array. + uint32_t moveCount; + /** \brief Array of moves to be performed by the user in the current defragmentation pass. - /** \brief Use combination of #VmaVirtualBlockCreateFlagBits. - */ - VmaVirtualBlockCreateFlags flags; + Pointer to an array of `moveCount` elements, owned by VMA, created in vmaBeginDefragmentationPass(), destroyed in vmaEndDefragmentationPass(). - /** \brief Custom CPU memory allocation callbacks. Optional. + For each element, you should: - Optional, can be null. When specified, they will be used for all CPU-side memory allocations. - */ - const VkAllocationCallbacks* VMA_NULLABLE pAllocationCallbacks; -} VmaVirtualBlockCreateInfo; + 1. Create a new buffer/image in the place pointed by VmaDefragmentationMove::dstMemory + VmaDefragmentationMove::dstOffset. + 2. Copy data from the VmaDefragmentationMove::srcAllocation e.g. using `vkCmdCopyBuffer`, `vkCmdCopyImage`. + 3. Make sure these commands finished executing on the GPU. + 4. Destroy the old buffer/image. -/// Parameters of created virtual allocation to be passed to vmaVirtualAllocate(). -typedef struct VmaVirtualAllocationCreateInfo -{ - /** \brief Size of the allocation. + Only then you can finish defragmentation pass by calling vmaEndDefragmentationPass(). + After this call, the allocation will point to the new place in memory. - Cannot be zero. - */ - VkDeviceSize size; - /** \brief Required alignment of the allocation. Optional. + Alternatively, if you cannot move specific allocation, you can set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_IGNORE. - Must be power of two. Special value 0 has the same meaning as 1 - means no special alignment is required, so allocation can start at any offset. - */ - VkDeviceSize alignment; - /** \brief Use combination of #VmaVirtualAllocationCreateFlagBits. - */ - VmaVirtualAllocationCreateFlags flags; - /** \brief Custom pointer to be associated with the allocation. Optional. + Alternatively, if you decide you want to completely remove the allocation: - It can be any value and can be used for user-defined purposes. It can be fetched or changed later. - */ - void* VMA_NULLABLE pUserData; -} VmaVirtualAllocationCreateInfo; + 1. Destroy its buffer/image. + 2. Set VmaDefragmentationMove::operation to #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY. -/// Parameters of an existing virtual allocation, returned by vmaGetVirtualAllocationInfo(). -typedef struct VmaVirtualAllocationInfo -{ - /** \brief Offset of the allocation. + Then, after vmaEndDefragmentationPass() the allocation will be freed. + */ + VmaDefragmentationMove *VMA_NULLABLE VMA_LEN_IF_NOT_NULL(moveCount) pMoves; + } VmaDefragmentationPassMoveInfo; - Offset at which the allocation was made. - */ - VkDeviceSize offset; - /** \brief Size of the allocation. + /// Statistics returned for defragmentation process in function vmaEndDefragmentation(). + typedef struct VmaDefragmentationStats + { + /// Total number of bytes that have been copied while moving allocations to different places. + VkDeviceSize bytesMoved; + /// Total number of bytes that have been released to the system by freeing empty `VkDeviceMemory` objects. + VkDeviceSize bytesFreed; + /// Number of allocations that have been moved to different places. + uint32_t allocationsMoved; + /// Number of empty `VkDeviceMemory` objects that have been released to the system. + uint32_t deviceMemoryBlocksFreed; + } VmaDefragmentationStats; - Same value as passed in VmaVirtualAllocationCreateInfo::size. - */ - VkDeviceSize size; - /** \brief Custom pointer associated with the allocation. + /** @} */ - Same value as passed in VmaVirtualAllocationCreateInfo::pUserData or to vmaSetVirtualAllocationUserData(). + /** + \addtogroup group_virtual + @{ */ - void* VMA_NULLABLE pUserData; -} VmaVirtualAllocationInfo; - -/** @} */ - -#endif // _VMA_DATA_TYPES_DECLARATIONS - -#ifndef _VMA_FUNCTION_HEADERS - -/** -\addtogroup group_init -@{ -*/ - -/// Creates #VmaAllocator object. -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator( - const VmaAllocatorCreateInfo* VMA_NOT_NULL pCreateInfo, - VmaAllocator VMA_NULLABLE* VMA_NOT_NULL pAllocator); - -/// Destroys allocator object. -VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator( - VmaAllocator VMA_NULLABLE allocator); - -/** \brief Returns information about existing #VmaAllocator object - handle to Vulkan device etc. - -It might be useful if you want to keep just the #VmaAllocator handle and fetch other required handles to -`VkPhysicalDevice`, `VkDevice` etc. every time using this function. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocatorInfo* VMA_NOT_NULL pAllocatorInfo); - -/** -PhysicalDeviceProperties are fetched from physicalDevice by the allocator. -You can access it here, without fetching it again on your own. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties( - VmaAllocator VMA_NOT_NULL allocator, - const VkPhysicalDeviceProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceProperties); - -/** -PhysicalDeviceMemoryProperties are fetched from physicalDevice by the allocator. -You can access it here, without fetching it again on your own. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties( - VmaAllocator VMA_NOT_NULL allocator, - const VkPhysicalDeviceMemoryProperties* VMA_NULLABLE* VMA_NOT_NULL ppPhysicalDeviceMemoryProperties); - -/** -\brief Given Memory Type Index, returns Property Flags of this memory type. - -This is just a convenience function. Same information can be obtained using -vmaGetMemoryProperties(). -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties( - VmaAllocator VMA_NOT_NULL allocator, - uint32_t memoryTypeIndex, - VkMemoryPropertyFlags* VMA_NOT_NULL pFlags); - -/** \brief Sets index of the current frame. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex( - VmaAllocator VMA_NOT_NULL allocator, - uint32_t frameIndex); - -/** @} */ -/** -\addtogroup group_stats -@{ -*/ - -/** \brief Retrieves statistics from current state of the Allocator. - -This function is called "calculate" not "get" because it has to traverse all -internal data structures, so it may be quite slow. Use it for debugging purposes. -For faster but more brief statistics suitable to be called every frame or every allocation, -use vmaGetHeapBudgets(). - -Note that when using allocator from multiple threads, returned information may immediately -become outdated. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics( - VmaAllocator VMA_NOT_NULL allocator, - VmaTotalStatistics* VMA_NOT_NULL pStats); - -/** \brief Retrieves information about current memory usage and budget for all memory heaps. - -\param allocator -\param[out] pBudgets Must point to array with number of elements at least equal to number of memory heaps in physical device used. - -This function is called "get" not "calculate" because it is very fast, suitable to be called -every frame or every allocation. For more detailed statistics use vmaCalculateStatistics(). - -Note that when using allocator from multiple threads, returned information may immediately -become outdated. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets( - VmaAllocator VMA_NOT_NULL allocator, - VmaBudget* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pBudgets); - -/** @} */ - -/** -\addtogroup group_alloc -@{ -*/ + /// Parameters of created #VmaVirtualBlock object to be passed to vmaCreateVirtualBlock(). + typedef struct VmaVirtualBlockCreateInfo + { + /** \brief Total size of the virtual block. -/** -\brief Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo. + Sizes can be expressed in bytes or any units you want as long as you are consistent in using them. + For example, if you allocate from some array of structures, 1 can mean single instance of entire structure. + */ + VkDeviceSize size; -This algorithm tries to find a memory type that: + /** \brief Use combination of #VmaVirtualBlockCreateFlagBits. + */ + VmaVirtualBlockCreateFlags flags; -- Is allowed by memoryTypeBits. -- Contains all the flags from pAllocationCreateInfo->requiredFlags. -- Matches intended usage. -- Has as many flags from pAllocationCreateInfo->preferredFlags as possible. + /** \brief Custom CPU memory allocation callbacks. Optional. -\return Returns VK_ERROR_FEATURE_NOT_PRESENT if not found. Receiving such result -from this function or any other allocating function probably means that your -device doesn't support any memory type with requested features for the specific -type of resource you want to use it for. Please check parameters of your -resource, like image layout (OPTIMAL versus LINEAR) or mip level count. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex( - VmaAllocator VMA_NOT_NULL allocator, - uint32_t memoryTypeBits, - const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, - uint32_t* VMA_NOT_NULL pMemoryTypeIndex); + Optional, can be null. When specified, they will be used for all CPU-side memory allocations. + */ + const VkAllocationCallbacks *VMA_NULLABLE pAllocationCallbacks; + } VmaVirtualBlockCreateInfo; -/** -\brief Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo. + /// Parameters of created virtual allocation to be passed to vmaVirtualAllocate(). + typedef struct VmaVirtualAllocationCreateInfo + { + /** \brief Size of the allocation. -It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex. -It internally creates a temporary, dummy buffer that never has memory bound. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( - VmaAllocator VMA_NOT_NULL allocator, - const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, - const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, - uint32_t* VMA_NOT_NULL pMemoryTypeIndex); + Cannot be zero. + */ + VkDeviceSize size; + /** \brief Required alignment of the allocation. Optional. -/** -\brief Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo. + Must be power of two. Special value 0 has the same meaning as 1 - means no special alignment is required, so allocation can start at any offset. + */ + VkDeviceSize alignment; + /** \brief Use combination of #VmaVirtualAllocationCreateFlagBits. + */ + VmaVirtualAllocationCreateFlags flags; + /** \brief Custom pointer to be associated with the allocation. Optional. -It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex. -It internally creates a temporary, dummy image that never has memory bound. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo( - VmaAllocator VMA_NOT_NULL allocator, - const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, - const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, - uint32_t* VMA_NOT_NULL pMemoryTypeIndex); + It can be any value and can be used for user-defined purposes. It can be fetched or changed later. + */ + void *VMA_NULLABLE pUserData; + } VmaVirtualAllocationCreateInfo; -/** \brief Allocates Vulkan device memory and creates #VmaPool object. + /// Parameters of an existing virtual allocation, returned by vmaGetVirtualAllocationInfo(). + typedef struct VmaVirtualAllocationInfo + { + /** \brief Offset of the allocation. -\param allocator Allocator object. -\param pCreateInfo Parameters of pool to create. -\param[out] pPool Handle to created pool. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool( - VmaAllocator VMA_NOT_NULL allocator, - const VmaPoolCreateInfo* VMA_NOT_NULL pCreateInfo, - VmaPool VMA_NULLABLE* VMA_NOT_NULL pPool); + Offset at which the allocation was made. + */ + VkDeviceSize offset; + /** \brief Size of the allocation. -/** \brief Destroys #VmaPool object and frees Vulkan device memory. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool( - VmaAllocator VMA_NOT_NULL allocator, - VmaPool VMA_NULLABLE pool); + Same value as passed in VmaVirtualAllocationCreateInfo::size. + */ + VkDeviceSize size; + /** \brief Custom pointer associated with the allocation. -/** @} */ + Same value as passed in VmaVirtualAllocationCreateInfo::pUserData or to vmaSetVirtualAllocationUserData(). + */ + void *VMA_NULLABLE pUserData; + } VmaVirtualAllocationInfo; -/** -\addtogroup group_stats -@{ -*/ + /** @} */ -/** \brief Retrieves statistics of existing #VmaPool object. +#endif // _VMA_DATA_TYPES_DECLARATIONS -\param allocator Allocator object. -\param pool Pool object. -\param[out] pPoolStats Statistics of specified pool. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics( - VmaAllocator VMA_NOT_NULL allocator, - VmaPool VMA_NOT_NULL pool, - VmaStatistics* VMA_NOT_NULL pPoolStats); +#ifndef _VMA_FUNCTION_HEADERS -/** \brief Retrieves detailed statistics of existing #VmaPool object. + /** + \addtogroup group_init + @{ + */ -\param allocator Allocator object. -\param pool Pool object. -\param[out] pPoolStats Statistics of specified pool. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics( - VmaAllocator VMA_NOT_NULL allocator, - VmaPool VMA_NOT_NULL pool, - VmaDetailedStatistics* VMA_NOT_NULL pPoolStats); + /// Creates #VmaAllocator object. + VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator( + const VmaAllocatorCreateInfo *VMA_NOT_NULL pCreateInfo, + VmaAllocator VMA_NULLABLE *VMA_NOT_NULL pAllocator); -/** @} */ + /// Destroys allocator object. + VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator( + VmaAllocator VMA_NULLABLE allocator); -/** -\addtogroup group_alloc -@{ -*/ + /** \brief Returns information about existing #VmaAllocator object - handle to Vulkan device etc. -/** \brief Checks magic number in margins around all allocations in given memory pool in search for corruptions. + It might be useful if you want to keep just the #VmaAllocator handle and fetch other required handles to + `VkPhysicalDevice`, `VkDevice` etc. every time using this function. + */ + VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocatorInfo *VMA_NOT_NULL pAllocatorInfo); -Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero, -`VMA_DEBUG_MARGIN` is defined to nonzero and the pool is created in memory type that is -`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection). + /** + PhysicalDeviceProperties are fetched from physicalDevice by the allocator. + You can access it here, without fetching it again on your own. + */ + VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties( + VmaAllocator VMA_NOT_NULL allocator, + const VkPhysicalDeviceProperties *VMA_NULLABLE *VMA_NOT_NULL ppPhysicalDeviceProperties); -Possible return values: + /** + PhysicalDeviceMemoryProperties are fetched from physicalDevice by the allocator. + You can access it here, without fetching it again on your own. + */ + VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties( + VmaAllocator VMA_NOT_NULL allocator, + const VkPhysicalDeviceMemoryProperties *VMA_NULLABLE *VMA_NOT_NULL ppPhysicalDeviceMemoryProperties); -- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for specified pool. -- `VK_SUCCESS` - corruption detection has been performed and succeeded. -- `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations. - `VMA_ASSERT` is also fired in that case. -- Other value: Error returned by Vulkan, e.g. memory mapping failure. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption( - VmaAllocator VMA_NOT_NULL allocator, - VmaPool VMA_NOT_NULL pool); + /** + \brief Given Memory Type Index, returns Property Flags of this memory type. -/** \brief Retrieves name of a custom pool. + This is just a convenience function. Same information can be obtained using + vmaGetMemoryProperties(). + */ + VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryTypeIndex, + VkMemoryPropertyFlags *VMA_NOT_NULL pFlags); -After the call `ppName` is either null or points to an internally-owned null-terminated string -containing name of the pool that was previously set. The pointer becomes invalid when the pool is -destroyed or its name is changed using vmaSetPoolName(). -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName( - VmaAllocator VMA_NOT_NULL allocator, - VmaPool VMA_NOT_NULL pool, - const char* VMA_NULLABLE* VMA_NOT_NULL ppName); + /** \brief Sets index of the current frame. + */ + VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t frameIndex); -/** \brief Sets name of a custom pool. + /** @} */ -`pName` can be either null or pointer to a null-terminated string with new name for the pool. -Function makes internal copy of the string, so it can be changed or freed immediately after this call. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName( - VmaAllocator VMA_NOT_NULL allocator, - VmaPool VMA_NOT_NULL pool, - const char* VMA_NULLABLE pName); + /** + \addtogroup group_stats + @{ + */ -/** \brief General purpose memory allocation. + /** \brief Retrieves statistics from current state of the Allocator. -\param allocator -\param pVkMemoryRequirements -\param pCreateInfo -\param[out] pAllocation Handle to allocated memory. -\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). + This function is called "calculate" not "get" because it has to traverse all + internal data structures, so it may be quite slow. Use it for debugging purposes. + For faster but more brief statistics suitable to be called every frame or every allocation, + use vmaGetHeapBudgets(). -You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages(). + Note that when using allocator from multiple threads, returned information may immediately + become outdated. + */ + VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics( + VmaAllocator VMA_NOT_NULL allocator, + VmaTotalStatistics *VMA_NOT_NULL pStats); -It is recommended to use vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(), -vmaCreateBuffer(), vmaCreateImage() instead whenever possible. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory( - VmaAllocator VMA_NOT_NULL allocator, - const VkMemoryRequirements* VMA_NOT_NULL pVkMemoryRequirements, - const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, - VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, - VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + /** \brief Retrieves information about current memory usage and budget for all memory heaps. -/** \brief General purpose memory allocation for multiple allocation objects at once. + \param allocator + \param[out] pBudgets Must point to array with number of elements at least equal to number of memory heaps in physical device used. -\param allocator Allocator object. -\param pVkMemoryRequirements Memory requirements for each allocation. -\param pCreateInfo Creation parameters for each allocation. -\param allocationCount Number of allocations to make. -\param[out] pAllocations Pointer to array that will be filled with handles to created allocations. -\param[out] pAllocationInfo Optional. Pointer to array that will be filled with parameters of created allocations. + This function is called "get" not "calculate" because it is very fast, suitable to be called + every frame or every allocation. For more detailed statistics use vmaCalculateStatistics(). -You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages(). + Note that when using allocator from multiple threads, returned information may immediately + become outdated. + */ + VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets( + VmaAllocator VMA_NOT_NULL allocator, + VmaBudget *VMA_NOT_NULL VMA_LEN_IF_NOT_NULL("VkPhysicalDeviceMemoryProperties::memoryHeapCount") pBudgets); -Word "pages" is just a suggestion to use this function to allocate pieces of memory needed for sparse binding. -It is just a general purpose allocation function able to make multiple allocations at once. -It may be internally optimized to be more efficient than calling vmaAllocateMemory() `allocationCount` times. + /** @} */ -All allocations are made using same parameters. All of them are created out of the same memory pool and type. -If any allocation fails, all allocations already made within this function call are also freed, so that when -returned result is not `VK_SUCCESS`, `pAllocation` array is always entirely filled with `VK_NULL_HANDLE`. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages( - VmaAllocator VMA_NOT_NULL allocator, - const VkMemoryRequirements* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements, - const VmaAllocationCreateInfo* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo, - size_t allocationCount, - VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations, - VmaAllocationInfo* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo); + /** + \addtogroup group_alloc + @{ + */ -/** \brief Allocates memory suitable for given `VkBuffer`. + /** + \brief Helps to find memoryTypeIndex, given memoryTypeBits and VmaAllocationCreateInfo. -\param allocator -\param buffer -\param pCreateInfo -\param[out] pAllocation Handle to allocated memory. -\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). + This algorithm tries to find a memory type that: -It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindBufferMemory(). + - Is allowed by memoryTypeBits. + - Contains all the flags from pAllocationCreateInfo->requiredFlags. + - Matches intended usage. + - Has as many flags from pAllocationCreateInfo->preferredFlags as possible. -This is a special-purpose function. In most cases you should use vmaCreateBuffer(). + \return Returns VK_ERROR_FEATURE_NOT_PRESENT if not found. Receiving such result + from this function or any other allocating function probably means that your + device doesn't support any memory type with requested features for the specific + type of resource you want to use it for. Please check parameters of your + resource, like image layout (OPTIMAL versus LINEAR) or mip level count. + */ + VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryTypeBits, + const VmaAllocationCreateInfo *VMA_NOT_NULL pAllocationCreateInfo, + uint32_t *VMA_NOT_NULL pMemoryTypeIndex); -You must free the allocation using vmaFreeMemory() when no longer needed. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer( - VmaAllocator VMA_NOT_NULL allocator, - VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer, - const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, - VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, - VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + /** + \brief Helps to find memoryTypeIndex, given VkBufferCreateInfo and VmaAllocationCreateInfo. -/** \brief Allocates memory suitable for given `VkImage`. + It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex. + It internally creates a temporary, dummy buffer that never has memory bound. + */ + VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( + VmaAllocator VMA_NOT_NULL allocator, + const VkBufferCreateInfo *VMA_NOT_NULL pBufferCreateInfo, + const VmaAllocationCreateInfo *VMA_NOT_NULL pAllocationCreateInfo, + uint32_t *VMA_NOT_NULL pMemoryTypeIndex); -\param allocator -\param image -\param pCreateInfo -\param[out] pAllocation Handle to allocated memory. -\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). + /** + \brief Helps to find memoryTypeIndex, given VkImageCreateInfo and VmaAllocationCreateInfo. -It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindImageMemory(). + It can be useful e.g. to determine value to be used as VmaPoolCreateInfo::memoryTypeIndex. + It internally creates a temporary, dummy image that never has memory bound. + */ + VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo( + VmaAllocator VMA_NOT_NULL allocator, + const VkImageCreateInfo *VMA_NOT_NULL pImageCreateInfo, + const VmaAllocationCreateInfo *VMA_NOT_NULL pAllocationCreateInfo, + uint32_t *VMA_NOT_NULL pMemoryTypeIndex); -This is a special-purpose function. In most cases you should use vmaCreateImage(). + /** \brief Allocates Vulkan device memory and creates #VmaPool object. -You must free the allocation using vmaFreeMemory() when no longer needed. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage( - VmaAllocator VMA_NOT_NULL allocator, - VkImage VMA_NOT_NULL_NON_DISPATCHABLE image, - const VmaAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, - VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, - VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + \param allocator Allocator object. + \param pCreateInfo Parameters of pool to create. + \param[out] pPool Handle to created pool. + */ + VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool( + VmaAllocator VMA_NOT_NULL allocator, + const VmaPoolCreateInfo *VMA_NOT_NULL pCreateInfo, + VmaPool VMA_NULLABLE *VMA_NOT_NULL pPool); -/** \brief Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage(). + /** \brief Destroys #VmaPool object and frees Vulkan device memory. + */ + VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NULLABLE pool); -Passing `VK_NULL_HANDLE` as `allocation` is valid. Such function call is just skipped. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory( - VmaAllocator VMA_NOT_NULL allocator, - const VmaAllocation VMA_NULLABLE allocation); + /** @} */ -/** \brief Frees memory and destroys multiple allocations. + /** + \addtogroup group_stats + @{ + */ -Word "pages" is just a suggestion to use this function to free pieces of memory used for sparse binding. -It is just a general purpose function to free memory and destroy allocations made using e.g. vmaAllocateMemory(), -vmaAllocateMemoryPages() and other functions. -It may be internally optimized to be more efficient than calling vmaFreeMemory() `allocationCount` times. + /** \brief Retrieves statistics of existing #VmaPool object. -Allocations in `pAllocations` array can come from any memory pools and types. -Passing `VK_NULL_HANDLE` as elements of `pAllocations` array is valid. Such entries are just skipped. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages( - VmaAllocator VMA_NOT_NULL allocator, - size_t allocationCount, - const VmaAllocation VMA_NULLABLE* VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations); + \param allocator Allocator object. + \param pool Pool object. + \param[out] pPoolStats Statistics of specified pool. + */ + VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool, + VmaStatistics *VMA_NOT_NULL pPoolStats); -/** \brief Returns current information about specified allocation. + /** \brief Retrieves detailed statistics of existing #VmaPool object. -Current parameters of given allocation are returned in `pAllocationInfo`. + \param allocator Allocator object. + \param pool Pool object. + \param[out] pPoolStats Statistics of specified pool. + */ + VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool, + VmaDetailedStatistics *VMA_NOT_NULL pPoolStats); -Although this function doesn't lock any mutex, so it should be quite efficient, -you should avoid calling it too often. -You can retrieve same VmaAllocationInfo structure while creating your resource, from function -vmaCreateBuffer(), vmaCreateImage(). You can remember it if you are sure parameters don't change -(e.g. due to defragmentation). -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - VmaAllocationInfo* VMA_NOT_NULL pAllocationInfo); + /** @} */ -/** \brief Sets pUserData in given allocation to new value. + /** + \addtogroup group_alloc + @{ + */ -The value of pointer `pUserData` is copied to allocation's `pUserData`. -It is opaque, so you can use it however you want - e.g. -as a pointer, ordinal number or some handle to you own data. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - void* VMA_NULLABLE pUserData); + /** \brief Checks magic number in margins around all allocations in given memory pool in search for corruptions. -/** \brief Sets pName in given allocation to new value. + Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero, + `VMA_DEBUG_MARGIN` is defined to nonzero and the pool is created in memory type that is + `HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection). -`pName` must be either null, or pointer to a null-terminated string. The function -makes local copy of the string and sets it as allocation's `pName`. String -passed as pName doesn't need to be valid for whole lifetime of the allocation - -you can free it after this call. String previously pointed by allocation's -`pName` is freed from memory. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - const char* VMA_NULLABLE pName); + Possible return values: -/** -\brief Given an allocation, returns Property Flags of its memory type. + - `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for specified pool. + - `VK_SUCCESS` - corruption detection has been performed and succeeded. + - `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations. + `VMA_ASSERT` is also fired in that case. + - Other value: Error returned by Vulkan, e.g. memory mapping failure. + */ + VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool); -This is just a convenience function. Same information can be obtained using -vmaGetAllocationInfo() + vmaGetMemoryProperties(). -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - VkMemoryPropertyFlags* VMA_NOT_NULL pFlags); + /** \brief Retrieves name of a custom pool. -/** \brief Maps memory represented by given allocation and returns pointer to it. + After the call `ppName` is either null or points to an internally-owned null-terminated string + containing name of the pool that was previously set. The pointer becomes invalid when the pool is + destroyed or its name is changed using vmaSetPoolName(). + */ + VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool, + const char *VMA_NULLABLE *VMA_NOT_NULL ppName); -Maps memory represented by given allocation to make it accessible to CPU code. -When succeeded, `*ppData` contains pointer to first byte of this memory. + /** \brief Sets name of a custom pool. -\warning -If the allocation is part of a bigger `VkDeviceMemory` block, returned pointer is -correctly offsetted to the beginning of region assigned to this particular allocation. -Unlike the result of `vkMapMemory`, it points to the allocation, not to the beginning of the whole block. -You should not add VmaAllocationInfo::offset to it! + `pName` can be either null or pointer to a null-terminated string with new name for the pool. + Function makes internal copy of the string, so it can be changed or freed immediately after this call. + */ + VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName( + VmaAllocator VMA_NOT_NULL allocator, + VmaPool VMA_NOT_NULL pool, + const char *VMA_NULLABLE pName); -Mapping is internally reference-counted and synchronized, so despite raw Vulkan -function `vkMapMemory()` cannot be used to map same block of `VkDeviceMemory` -multiple times simultaneously, it is safe to call this function on allocations -assigned to the same memory block. Actual Vulkan memory will be mapped on first -mapping and unmapped on last unmapping. + /** \brief General purpose memory allocation. -If the function succeeded, you must call vmaUnmapMemory() to unmap the -allocation when mapping is no longer needed or before freeing the allocation, at -the latest. + \param allocator + \param pVkMemoryRequirements + \param pCreateInfo + \param[out] pAllocation Handle to allocated memory. + \param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). -It also safe to call this function multiple times on the same allocation. You -must call vmaUnmapMemory() same number of times as you called vmaMapMemory(). + You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages(). -It is also safe to call this function on allocation created with -#VMA_ALLOCATION_CREATE_MAPPED_BIT flag. Its memory stays mapped all the time. -You must still call vmaUnmapMemory() same number of times as you called -vmaMapMemory(). You must not call vmaUnmapMemory() additional time to free the -"0-th" mapping made automatically due to #VMA_ALLOCATION_CREATE_MAPPED_BIT flag. + It is recommended to use vmaAllocateMemoryForBuffer(), vmaAllocateMemoryForImage(), + vmaCreateBuffer(), vmaCreateImage() instead whenever possible. + */ + VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory( + VmaAllocator VMA_NOT_NULL allocator, + const VkMemoryRequirements *VMA_NOT_NULL pVkMemoryRequirements, + const VmaAllocationCreateInfo *VMA_NOT_NULL pCreateInfo, + VmaAllocation VMA_NULLABLE *VMA_NOT_NULL pAllocation, + VmaAllocationInfo *VMA_NULLABLE pAllocationInfo); + + /** \brief General purpose memory allocation for multiple allocation objects at once. + + \param allocator Allocator object. + \param pVkMemoryRequirements Memory requirements for each allocation. + \param pCreateInfo Creation parameters for each allocation. + \param allocationCount Number of allocations to make. + \param[out] pAllocations Pointer to array that will be filled with handles to created allocations. + \param[out] pAllocationInfo Optional. Pointer to array that will be filled with parameters of created allocations. + + You should free the memory using vmaFreeMemory() or vmaFreeMemoryPages(). + + Word "pages" is just a suggestion to use this function to allocate pieces of memory needed for sparse binding. + It is just a general purpose allocation function able to make multiple allocations at once. + It may be internally optimized to be more efficient than calling vmaAllocateMemory() `allocationCount` times. + + All allocations are made using same parameters. All of them are created out of the same memory pool and type. + If any allocation fails, all allocations already made within this function call are also freed, so that when + returned result is not `VK_SUCCESS`, `pAllocation` array is always entirely filled with `VK_NULL_HANDLE`. + */ + VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages( + VmaAllocator VMA_NOT_NULL allocator, + const VkMemoryRequirements *VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pVkMemoryRequirements, + const VmaAllocationCreateInfo *VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pCreateInfo, + size_t allocationCount, + VmaAllocation VMA_NULLABLE *VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations, + VmaAllocationInfo *VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) pAllocationInfo); -This function fails when used on allocation made in memory type that is not -`HOST_VISIBLE`. + /** \brief Allocates memory suitable for given `VkBuffer`. -This function doesn't automatically flush or invalidate caches. -If the allocation is made from a memory types that is not `HOST_COHERENT`, -you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - void* VMA_NULLABLE* VMA_NOT_NULL ppData); + \param allocator + \param buffer + \param pCreateInfo + \param[out] pAllocation Handle to allocated memory. + \param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). -/** \brief Unmaps memory represented by given allocation, mapped previously using vmaMapMemory(). + It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindBufferMemory(). -For details, see description of vmaMapMemory(). + This is a special-purpose function. In most cases you should use vmaCreateBuffer(). -This function doesn't automatically flush or invalidate caches. -If the allocation is made from a memory types that is not `HOST_COHERENT`, -you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation); + You must free the allocation using vmaFreeMemory() when no longer needed. + */ + VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer( + VmaAllocator VMA_NOT_NULL allocator, + VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer, + const VmaAllocationCreateInfo *VMA_NOT_NULL pCreateInfo, + VmaAllocation VMA_NULLABLE *VMA_NOT_NULL pAllocation, + VmaAllocationInfo *VMA_NULLABLE pAllocationInfo); -/** \brief Flushes memory of given allocation. + /** \brief Allocates memory suitable for given `VkImage`. -Calls `vkFlushMappedMemoryRanges()` for memory associated with given range of given allocation. -It needs to be called after writing to a mapped memory for memory types that are not `HOST_COHERENT`. -Unmap operation doesn't do that automatically. + \param allocator + \param image + \param pCreateInfo + \param[out] pAllocation Handle to allocated memory. + \param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). -- `offset` must be relative to the beginning of allocation. -- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation. -- `offset` and `size` don't have to be aligned. - They are internally rounded down/up to multiply of `nonCoherentAtomSize`. -- If `size` is 0, this call is ignored. -- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`, - this call is ignored. + It only creates #VmaAllocation. To bind the memory to the buffer, use vmaBindImageMemory(). -Warning! `offset` and `size` are relative to the contents of given `allocation`. -If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively. -Do not pass allocation's offset as `offset`!!! + This is a special-purpose function. In most cases you should use vmaCreateImage(). -This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is -called, otherwise `VK_SUCCESS`. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - VkDeviceSize offset, - VkDeviceSize size); + You must free the allocation using vmaFreeMemory() when no longer needed. + */ + VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage( + VmaAllocator VMA_NOT_NULL allocator, + VkImage VMA_NOT_NULL_NON_DISPATCHABLE image, + const VmaAllocationCreateInfo *VMA_NOT_NULL pCreateInfo, + VmaAllocation VMA_NULLABLE *VMA_NOT_NULL pAllocation, + VmaAllocationInfo *VMA_NULLABLE pAllocationInfo); -/** \brief Invalidates memory of given allocation. + /** \brief Frees memory previously allocated using vmaAllocateMemory(), vmaAllocateMemoryForBuffer(), or vmaAllocateMemoryForImage(). -Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given range of given allocation. -It needs to be called before reading from a mapped memory for memory types that are not `HOST_COHERENT`. -Map operation doesn't do that automatically. + Passing `VK_NULL_HANDLE` as `allocation` is valid. Such function call is just skipped. + */ + VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory( + VmaAllocator VMA_NOT_NULL allocator, + const VmaAllocation VMA_NULLABLE allocation); -- `offset` must be relative to the beginning of allocation. -- `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation. -- `offset` and `size` don't have to be aligned. - They are internally rounded down/up to multiply of `nonCoherentAtomSize`. -- If `size` is 0, this call is ignored. -- If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`, - this call is ignored. + /** \brief Frees memory and destroys multiple allocations. -Warning! `offset` and `size` are relative to the contents of given `allocation`. -If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively. -Do not pass allocation's offset as `offset`!!! + Word "pages" is just a suggestion to use this function to free pieces of memory used for sparse binding. + It is just a general purpose function to free memory and destroy allocations made using e.g. vmaAllocateMemory(), + vmaAllocateMemoryPages() and other functions. + It may be internally optimized to be more efficient than calling vmaFreeMemory() `allocationCount` times. -This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if -it is called, otherwise `VK_SUCCESS`. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - VkDeviceSize offset, - VkDeviceSize size); + Allocations in `pAllocations` array can come from any memory pools and types. + Passing `VK_NULL_HANDLE` as elements of `pAllocations` array is valid. Such entries are just skipped. + */ + VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages( + VmaAllocator VMA_NOT_NULL allocator, + size_t allocationCount, + const VmaAllocation VMA_NULLABLE *VMA_NOT_NULL VMA_LEN_IF_NOT_NULL(allocationCount) pAllocations); -/** \brief Flushes memory of given set of allocations. + /** \brief Returns current information about specified allocation. -Calls `vkFlushMappedMemoryRanges()` for memory associated with given ranges of given allocations. -For more information, see documentation of vmaFlushAllocation(). + Current parameters of given allocation are returned in `pAllocationInfo`. -\param allocator -\param allocationCount -\param allocations -\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero. -\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations. + Although this function doesn't lock any mutex, so it should be quite efficient, + you should avoid calling it too often. + You can retrieve same VmaAllocationInfo structure while creating your resource, from function + vmaCreateBuffer(), vmaCreateImage(). You can remember it if you are sure parameters don't change + (e.g. due to defragmentation). + */ + VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VmaAllocationInfo *VMA_NOT_NULL pAllocationInfo); -This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is -called, otherwise `VK_SUCCESS`. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations( - VmaAllocator VMA_NOT_NULL allocator, - uint32_t allocationCount, - const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations, - const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets, - const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes); + /** \brief Sets pUserData in given allocation to new value. -/** \brief Invalidates memory of given set of allocations. + The value of pointer `pUserData` is copied to allocation's `pUserData`. + It is opaque, so you can use it however you want - e.g. + as a pointer, ordinal number or some handle to you own data. + */ + VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + void *VMA_NULLABLE pUserData); + + /** \brief Sets pName in given allocation to new value. + + `pName` must be either null, or pointer to a null-terminated string. The function + makes local copy of the string and sets it as allocation's `pName`. String + passed as pName doesn't need to be valid for whole lifetime of the allocation - + you can free it after this call. String previously pointed by allocation's + `pName` is freed from memory. + */ + VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const char *VMA_NULLABLE pName); -Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given ranges of given allocations. -For more information, see documentation of vmaInvalidateAllocation(). + /** + \brief Given an allocation, returns Property Flags of its memory type. -\param allocator -\param allocationCount -\param allocations -\param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero. -\param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations. + This is just a convenience function. Same information can be obtained using + vmaGetAllocationInfo() + vmaGetMemoryProperties(). + */ + VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkMemoryPropertyFlags *VMA_NOT_NULL pFlags); + + /** \brief Maps memory represented by given allocation and returns pointer to it. + + Maps memory represented by given allocation to make it accessible to CPU code. + When succeeded, `*ppData` contains pointer to first byte of this memory. + + \warning + If the allocation is part of a bigger `VkDeviceMemory` block, returned pointer is + correctly offsetted to the beginning of region assigned to this particular allocation. + Unlike the result of `vkMapMemory`, it points to the allocation, not to the beginning of the whole block. + You should not add VmaAllocationInfo::offset to it! + + Mapping is internally reference-counted and synchronized, so despite raw Vulkan + function `vkMapMemory()` cannot be used to map same block of `VkDeviceMemory` + multiple times simultaneously, it is safe to call this function on allocations + assigned to the same memory block. Actual Vulkan memory will be mapped on first + mapping and unmapped on last unmapping. + + If the function succeeded, you must call vmaUnmapMemory() to unmap the + allocation when mapping is no longer needed or before freeing the allocation, at + the latest. + + It also safe to call this function multiple times on the same allocation. You + must call vmaUnmapMemory() same number of times as you called vmaMapMemory(). + + It is also safe to call this function on allocation created with + #VMA_ALLOCATION_CREATE_MAPPED_BIT flag. Its memory stays mapped all the time. + You must still call vmaUnmapMemory() same number of times as you called + vmaMapMemory(). You must not call vmaUnmapMemory() additional time to free the + "0-th" mapping made automatically due to #VMA_ALLOCATION_CREATE_MAPPED_BIT flag. + + This function fails when used on allocation made in memory type that is not + `HOST_VISIBLE`. + + This function doesn't automatically flush or invalidate caches. + If the allocation is made from a memory types that is not `HOST_COHERENT`, + you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification. + */ + VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + void *VMA_NULLABLE *VMA_NOT_NULL ppData); -This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if it is -called, otherwise `VK_SUCCESS`. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations( - VmaAllocator VMA_NOT_NULL allocator, - uint32_t allocationCount, - const VmaAllocation VMA_NOT_NULL* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations, - const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets, - const VkDeviceSize* VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes); + /** \brief Unmaps memory represented by given allocation, mapped previously using vmaMapMemory(). -/** \brief Checks magic number in margins around all allocations in given memory types (in both default and custom pools) in search for corruptions. + For details, see description of vmaMapMemory(). -\param allocator -\param memoryTypeBits Bit mask, where each bit set means that a memory type with that index should be checked. + This function doesn't automatically flush or invalidate caches. + If the allocation is made from a memory types that is not `HOST_COHERENT`, + you also need to use vmaInvalidateAllocation() / vmaFlushAllocation(), as required by Vulkan specification. + */ + VMA_CALL_PRE void VMA_CALL_POST vmaUnmapMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation); + + /** \brief Flushes memory of given allocation. + + Calls `vkFlushMappedMemoryRanges()` for memory associated with given range of given allocation. + It needs to be called after writing to a mapped memory for memory types that are not `HOST_COHERENT`. + Unmap operation doesn't do that automatically. + + - `offset` must be relative to the beginning of allocation. + - `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation. + - `offset` and `size` don't have to be aligned. + They are internally rounded down/up to multiply of `nonCoherentAtomSize`. + - If `size` is 0, this call is ignored. + - If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`, + this call is ignored. + + Warning! `offset` and `size` are relative to the contents of given `allocation`. + If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively. + Do not pass allocation's offset as `offset`!!! + + This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is + called, otherwise `VK_SUCCESS`. + */ + VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocation( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize offset, + VkDeviceSize size); -Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero, -`VMA_DEBUG_MARGIN` is defined to nonzero and only for memory types that are -`HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection). + /** \brief Invalidates memory of given allocation. -Possible return values: + Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given range of given allocation. + It needs to be called before reading from a mapped memory for memory types that are not `HOST_COHERENT`. + Map operation doesn't do that automatically. -- `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for any of specified memory types. -- `VK_SUCCESS` - corruption detection has been performed and succeeded. -- `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations. - `VMA_ASSERT` is also fired in that case. -- Other value: Error returned by Vulkan, e.g. memory mapping failure. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption( - VmaAllocator VMA_NOT_NULL allocator, - uint32_t memoryTypeBits); + - `offset` must be relative to the beginning of allocation. + - `size` can be `VK_WHOLE_SIZE`. It means all memory from `offset` the the end of given allocation. + - `offset` and `size` don't have to be aligned. + They are internally rounded down/up to multiply of `nonCoherentAtomSize`. + - If `size` is 0, this call is ignored. + - If memory type that the `allocation` belongs to is not `HOST_VISIBLE` or it is `HOST_COHERENT`, + this call is ignored. -/** \brief Begins defragmentation process. + Warning! `offset` and `size` are relative to the contents of given `allocation`. + If you mean whole allocation, you can pass 0 and `VK_WHOLE_SIZE`, respectively. + Do not pass allocation's offset as `offset`!!! -\param allocator Allocator object. -\param pInfo Structure filled with parameters of defragmentation. -\param[out] pContext Context object that must be passed to vmaEndDefragmentation() to finish defragmentation. -\returns -- `VK_SUCCESS` if defragmentation can begin. -- `VK_ERROR_FEATURE_NOT_PRESENT` if defragmentation is not supported. + This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if + it is called, otherwise `VK_SUCCESS`. + */ + VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize offset, + VkDeviceSize size); -For more information about defragmentation, see documentation chapter: -[Defragmentation](@ref defragmentation). -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation( - VmaAllocator VMA_NOT_NULL allocator, - const VmaDefragmentationInfo* VMA_NOT_NULL pInfo, - VmaDefragmentationContext VMA_NULLABLE* VMA_NOT_NULL pContext); + /** \brief Flushes memory of given set of allocations. -/** \brief Ends defragmentation process. + Calls `vkFlushMappedMemoryRanges()` for memory associated with given ranges of given allocations. + For more information, see documentation of vmaFlushAllocation(). -\param allocator Allocator object. -\param context Context object that has been created by vmaBeginDefragmentation(). -\param[out] pStats Optional stats for the defragmentation. Can be null. + \param allocator + \param allocationCount + \param allocations + \param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero. + \param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations. -Use this function to finish defragmentation started by vmaBeginDefragmentation(). -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation( - VmaAllocator VMA_NOT_NULL allocator, - VmaDefragmentationContext VMA_NOT_NULL context, - VmaDefragmentationStats* VMA_NULLABLE pStats); + This function returns the `VkResult` from `vkFlushMappedMemoryRanges` if it is + called, otherwise `VK_SUCCESS`. + */ + VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t allocationCount, + const VmaAllocation VMA_NOT_NULL *VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations, + const VkDeviceSize *VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets, + const VkDeviceSize *VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes); -/** \brief Starts single defragmentation pass. + /** \brief Invalidates memory of given set of allocations. -\param allocator Allocator object. -\param context Context object that has been created by vmaBeginDefragmentation(). -\param[out] pPassInfo Computed information for current pass. -\returns -- `VK_SUCCESS` if no more moves are possible. Then you can omit call to vmaEndDefragmentationPass() and simply end whole defragmentation. -- `VK_INCOMPLETE` if there are pending moves returned in `pPassInfo`. You need to perform them, call vmaEndDefragmentationPass(), - and then preferably try another pass with vmaBeginDefragmentationPass(). -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass( - VmaAllocator VMA_NOT_NULL allocator, - VmaDefragmentationContext VMA_NOT_NULL context, - VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo); + Calls `vkInvalidateMappedMemoryRanges()` for memory associated with given ranges of given allocations. + For more information, see documentation of vmaInvalidateAllocation(). -/** \brief Ends single defragmentation pass. + \param allocator + \param allocationCount + \param allocations + \param offsets If not null, it must point to an array of offsets of regions to flush, relative to the beginning of respective allocations. Null means all ofsets are zero. + \param sizes If not null, it must point to an array of sizes of regions to flush in respective allocations. Null means `VK_WHOLE_SIZE` for all allocations. -\param allocator Allocator object. -\param context Context object that has been created by vmaBeginDefragmentation(). -\param pPassInfo Computed information for current pass filled by vmaBeginDefragmentationPass() and possibly modified by you. + This function returns the `VkResult` from `vkInvalidateMappedMemoryRanges` if it is + called, otherwise `VK_SUCCESS`. + */ + VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t allocationCount, + const VmaAllocation VMA_NOT_NULL *VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) allocations, + const VkDeviceSize *VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) offsets, + const VkDeviceSize *VMA_NULLABLE VMA_LEN_IF_NOT_NULL(allocationCount) sizes); -Returns `VK_SUCCESS` if no more moves are possible or `VK_INCOMPLETE` if more defragmentations are possible. + /** \brief Checks magic number in margins around all allocations in given memory types (in both default and custom pools) in search for corruptions. -Ends incremental defragmentation pass and commits all defragmentation moves from `pPassInfo`. -After this call: + \param allocator + \param memoryTypeBits Bit mask, where each bit set means that a memory type with that index should be checked. -- Allocations at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY - (which is the default) will be pointing to the new destination place. -- Allocation at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY - will be freed. + Corruption detection is enabled only when `VMA_DEBUG_DETECT_CORRUPTION` macro is defined to nonzero, + `VMA_DEBUG_MARGIN` is defined to nonzero and only for memory types that are + `HOST_VISIBLE` and `HOST_COHERENT`. For more information, see [Corruption detection](@ref debugging_memory_usage_corruption_detection). -If no more moves are possible you can end whole defragmentation. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass( - VmaAllocator VMA_NOT_NULL allocator, - VmaDefragmentationContext VMA_NOT_NULL context, - VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo); + Possible return values: -/** \brief Binds buffer to allocation. + - `VK_ERROR_FEATURE_NOT_PRESENT` - corruption detection is not enabled for any of specified memory types. + - `VK_SUCCESS` - corruption detection has been performed and succeeded. + - `VK_ERROR_UNKNOWN` - corruption detection has been performed and found memory corruptions around one of the allocations. + `VMA_ASSERT` is also fired in that case. + - Other value: Error returned by Vulkan, e.g. memory mapping failure. + */ + VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption( + VmaAllocator VMA_NOT_NULL allocator, + uint32_t memoryTypeBits); -Binds specified buffer to region of memory represented by specified allocation. -Gets `VkDeviceMemory` handle and offset from the allocation. -If you want to create a buffer, allocate memory for it and bind them together separately, -you should use this function for binding instead of standard `vkBindBufferMemory()`, -because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple -allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously -(which is illegal in Vulkan). + /** \brief Begins defragmentation process. -It is recommended to use function vmaCreateBuffer() instead of this one. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer); + \param allocator Allocator object. + \param pInfo Structure filled with parameters of defragmentation. + \param[out] pContext Context object that must be passed to vmaEndDefragmentation() to finish defragmentation. + \returns + - `VK_SUCCESS` if defragmentation can begin. + - `VK_ERROR_FEATURE_NOT_PRESENT` if defragmentation is not supported. -/** \brief Binds buffer to allocation with additional parameters. + For more information about defragmentation, see documentation chapter: + [Defragmentation](@ref defragmentation). + */ + VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation( + VmaAllocator VMA_NOT_NULL allocator, + const VmaDefragmentationInfo *VMA_NOT_NULL pInfo, + VmaDefragmentationContext VMA_NULLABLE *VMA_NOT_NULL pContext); -\param allocator -\param allocation -\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0. -\param buffer -\param pNext A chain of structures to be attached to `VkBindBufferMemoryInfoKHR` structure used internally. Normally it should be null. + /** \brief Ends defragmentation process. -This function is similar to vmaBindBufferMemory(), but it provides additional parameters. + \param allocator Allocator object. + \param context Context object that has been created by vmaBeginDefragmentation(). + \param[out] pStats Optional stats for the defragmentation. Can be null. -If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag -or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - VkDeviceSize allocationLocalOffset, - VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer, - const void* VMA_NULLABLE pNext); + Use this function to finish defragmentation started by vmaBeginDefragmentation(). + */ + VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NOT_NULL context, + VmaDefragmentationStats *VMA_NULLABLE pStats); + + /** \brief Starts single defragmentation pass. + + \param allocator Allocator object. + \param context Context object that has been created by vmaBeginDefragmentation(). + \param[out] pPassInfo Computed information for current pass. + \returns + - `VK_SUCCESS` if no more moves are possible. Then you can omit call to vmaEndDefragmentationPass() and simply end whole defragmentation. + - `VK_INCOMPLETE` if there are pending moves returned in `pPassInfo`. You need to perform them, call vmaEndDefragmentationPass(), + and then preferably try another pass with vmaBeginDefragmentationPass(). + */ + VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NOT_NULL context, + VmaDefragmentationPassMoveInfo *VMA_NOT_NULL pPassInfo); -/** \brief Binds image to allocation. + /** \brief Ends single defragmentation pass. -Binds specified image to region of memory represented by specified allocation. -Gets `VkDeviceMemory` handle and offset from the allocation. -If you want to create an image, allocate memory for it and bind them together separately, -you should use this function for binding instead of standard `vkBindImageMemory()`, -because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple -allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously -(which is illegal in Vulkan). + \param allocator Allocator object. + \param context Context object that has been created by vmaBeginDefragmentation(). + \param pPassInfo Computed information for current pass filled by vmaBeginDefragmentationPass() and possibly modified by you. -It is recommended to use function vmaCreateImage() instead of this one. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - VkImage VMA_NOT_NULL_NON_DISPATCHABLE image); + Returns `VK_SUCCESS` if no more moves are possible or `VK_INCOMPLETE` if more defragmentations are possible. -/** \brief Binds image to allocation with additional parameters. + Ends incremental defragmentation pass and commits all defragmentation moves from `pPassInfo`. + After this call: -\param allocator -\param allocation -\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0. -\param image -\param pNext A chain of structures to be attached to `VkBindImageMemoryInfoKHR` structure used internally. Normally it should be null. + - Allocations at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_COPY + (which is the default) will be pointing to the new destination place. + - Allocation at `pPassInfo[i].srcAllocation` that had `pPassInfo[i].operation ==` #VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY + will be freed. -This function is similar to vmaBindImageMemory(), but it provides additional parameters. + If no more moves are possible you can end whole defragmentation. + */ + VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass( + VmaAllocator VMA_NOT_NULL allocator, + VmaDefragmentationContext VMA_NOT_NULL context, + VmaDefragmentationPassMoveInfo *VMA_NOT_NULL pPassInfo); + + /** \brief Binds buffer to allocation. + + Binds specified buffer to region of memory represented by specified allocation. + Gets `VkDeviceMemory` handle and offset from the allocation. + If you want to create a buffer, allocate memory for it and bind them together separately, + you should use this function for binding instead of standard `vkBindBufferMemory()`, + because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple + allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously + (which is illegal in Vulkan). + + It is recommended to use function vmaCreateBuffer() instead of this one. + */ + VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer); -If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag -or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - VkDeviceSize allocationLocalOffset, - VkImage VMA_NOT_NULL_NON_DISPATCHABLE image, - const void* VMA_NULLABLE pNext); - -/** \brief Creates a new `VkBuffer`, allocates and binds memory for it. - -\param allocator -\param pBufferCreateInfo -\param pAllocationCreateInfo -\param[out] pBuffer Buffer that was created. -\param[out] pAllocation Allocation that was created. -\param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). - -This function automatically: - --# Creates buffer. --# Allocates appropriate memory for it. --# Binds the buffer with the memory. - -If any of these operations fail, buffer and allocation are not created, -returned value is negative error code, `*pBuffer` and `*pAllocation` are null. - -If the function succeeded, you must destroy both buffer and allocation when you -no longer need them using either convenience function vmaDestroyBuffer() or -separately, using `vkDestroyBuffer()` and vmaFreeMemory(). - -If #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag was used, -VK_KHR_dedicated_allocation extension is used internally to query driver whether -it requires or prefers the new buffer to have dedicated allocation. If yes, -and if dedicated allocation is possible -(#VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT is not used), it creates dedicated -allocation for this buffer, just like when using -#VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. - -\note This function creates a new `VkBuffer`. Sub-allocation of parts of one large buffer, -although recommended as a good practice, is out of scope of this library and could be implemented -by the user as a higher-level logic on top of VMA. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer( - VmaAllocator VMA_NOT_NULL allocator, - const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, - const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, - VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer, - VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, - VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + /** \brief Binds buffer to allocation with additional parameters. -/** \brief Creates a buffer with additional minimum alignment. + \param allocator + \param allocation + \param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0. + \param buffer + \param pNext A chain of structures to be attached to `VkBindBufferMemoryInfoKHR` structure used internally. Normally it should be null. -Similar to vmaCreateBuffer() but provides additional parameter `minAlignment` which allows to specify custom, -minimum alignment to be used when placing the buffer inside a larger memory block, which may be needed e.g. -for interop with OpenGL. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment( - VmaAllocator VMA_NOT_NULL allocator, - const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, - const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, - VkDeviceSize minAlignment, - VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer, - VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, - VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + This function is similar to vmaBindBufferMemory(), but it provides additional parameters. -/** \brief Creates a new `VkBuffer`, binds already created memory for it. + If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag + or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails. + */ + VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + VkBuffer VMA_NOT_NULL_NON_DISPATCHABLE buffer, + const void *VMA_NULLABLE pNext); -\param allocator -\param allocation Allocation that provides memory to be used for binding new buffer to it. -\param pBufferCreateInfo -\param[out] pBuffer Buffer that was created. + /** \brief Binds image to allocation. -This function automatically: + Binds specified image to region of memory represented by specified allocation. + Gets `VkDeviceMemory` handle and offset from the allocation. + If you want to create an image, allocate memory for it and bind them together separately, + you should use this function for binding instead of standard `vkBindImageMemory()`, + because it ensures proper synchronization so that when a `VkDeviceMemory` object is used by multiple + allocations, calls to `vkBind*Memory()` or `vkMapMemory()` won't happen from multiple threads simultaneously + (which is illegal in Vulkan). --# Creates buffer. --# Binds the buffer with the supplied memory. + It is recommended to use function vmaCreateImage() instead of this one. + */ + VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkImage VMA_NOT_NULL_NON_DISPATCHABLE image); -If any of these operations fail, buffer is not created, -returned value is negative error code and `*pBuffer` is null. + /** \brief Binds image to allocation with additional parameters. -If the function succeeded, you must destroy the buffer when you -no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding -allocation you can use convenience function vmaDestroyBuffer(). + \param allocator + \param allocation + \param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the `allocation`. Normally it should be 0. + \param image + \param pNext A chain of structures to be attached to `VkBindImageMemoryInfoKHR` structure used internally. Normally it should be null. -\note There is a new version of this function augmented with parameter `allocationLocalOffset` - see vmaCreateAliasingBuffer2(). -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, - VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer); + This function is similar to vmaBindImageMemory(), but it provides additional parameters. -/** \brief Creates a new `VkBuffer`, binds already created memory for it. + If `pNext` is not null, #VmaAllocator object must have been created with #VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT flag + or with VmaAllocatorCreateInfo::vulkanApiVersion `>= VK_API_VERSION_1_1`. Otherwise the call fails. + */ + VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + VkImage VMA_NOT_NULL_NON_DISPATCHABLE image, + const void *VMA_NULLABLE pNext); + + /** \brief Creates a new `VkBuffer`, allocates and binds memory for it. + + \param allocator + \param pBufferCreateInfo + \param pAllocationCreateInfo + \param[out] pBuffer Buffer that was created. + \param[out] pAllocation Allocation that was created. + \param[out] pAllocationInfo Optional. Information about allocated memory. It can be later fetched using function vmaGetAllocationInfo(). + + This function automatically: + + -# Creates buffer. + -# Allocates appropriate memory for it. + -# Binds the buffer with the memory. + + If any of these operations fail, buffer and allocation are not created, + returned value is negative error code, `*pBuffer` and `*pAllocation` are null. + + If the function succeeded, you must destroy both buffer and allocation when you + no longer need them using either convenience function vmaDestroyBuffer() or + separately, using `vkDestroyBuffer()` and vmaFreeMemory(). + + If #VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT flag was used, + VK_KHR_dedicated_allocation extension is used internally to query driver whether + it requires or prefers the new buffer to have dedicated allocation. If yes, + and if dedicated allocation is possible + (#VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT is not used), it creates dedicated + allocation for this buffer, just like when using + #VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT. + + \note This function creates a new `VkBuffer`. Sub-allocation of parts of one large buffer, + although recommended as a good practice, is out of scope of this library and could be implemented + by the user as a higher-level logic on top of VMA. + */ + VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer( + VmaAllocator VMA_NOT_NULL allocator, + const VkBufferCreateInfo *VMA_NOT_NULL pBufferCreateInfo, + const VmaAllocationCreateInfo *VMA_NOT_NULL pAllocationCreateInfo, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE *VMA_NOT_NULL pBuffer, + VmaAllocation VMA_NULLABLE *VMA_NOT_NULL pAllocation, + VmaAllocationInfo *VMA_NULLABLE pAllocationInfo); + + /** \brief Creates a buffer with additional minimum alignment. + + Similar to vmaCreateBuffer() but provides additional parameter `minAlignment` which allows to specify custom, + minimum alignment to be used when placing the buffer inside a larger memory block, which may be needed e.g. + for interop with OpenGL. + */ + VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment( + VmaAllocator VMA_NOT_NULL allocator, + const VkBufferCreateInfo *VMA_NOT_NULL pBufferCreateInfo, + const VmaAllocationCreateInfo *VMA_NOT_NULL pAllocationCreateInfo, + VkDeviceSize minAlignment, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE *VMA_NOT_NULL pBuffer, + VmaAllocation VMA_NULLABLE *VMA_NOT_NULL pAllocation, + VmaAllocationInfo *VMA_NULLABLE pAllocationInfo); -\param allocator -\param allocation Allocation that provides memory to be used for binding new buffer to it. -\param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the allocation. Normally it should be 0. -\param pBufferCreateInfo -\param[out] pBuffer Buffer that was created. + /** \brief Creates a new `VkBuffer`, binds already created memory for it. -This function automatically: + \param allocator + \param allocation Allocation that provides memory to be used for binding new buffer to it. + \param pBufferCreateInfo + \param[out] pBuffer Buffer that was created. --# Creates buffer. --# Binds the buffer with the supplied memory. + This function automatically: -If any of these operations fail, buffer is not created, -returned value is negative error code and `*pBuffer` is null. + -# Creates buffer. + -# Binds the buffer with the supplied memory. -If the function succeeded, you must destroy the buffer when you -no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding -allocation you can use convenience function vmaDestroyBuffer(). + If any of these operations fail, buffer is not created, + returned value is negative error code and `*pBuffer` is null. -\note This is a new version of the function augmented with parameter `allocationLocalOffset`. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer2( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - VkDeviceSize allocationLocalOffset, - const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, - VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer); + If the function succeeded, you must destroy the buffer when you + no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding + allocation you can use convenience function vmaDestroyBuffer(). -/** \brief Destroys Vulkan buffer and frees allocated memory. + \note There is a new version of this function augmented with parameter `allocationLocalOffset` - see vmaCreateAliasingBuffer2(). + */ + VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const VkBufferCreateInfo *VMA_NOT_NULL pBufferCreateInfo, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE *VMA_NOT_NULL pBuffer); -This is just a convenience function equivalent to: + /** \brief Creates a new `VkBuffer`, binds already created memory for it. -\code -vkDestroyBuffer(device, buffer, allocationCallbacks); -vmaFreeMemory(allocator, allocation); -\endcode + \param allocator + \param allocation Allocation that provides memory to be used for binding new buffer to it. + \param allocationLocalOffset Additional offset to be added while binding, relative to the beginning of the allocation. Normally it should be 0. + \param pBufferCreateInfo + \param[out] pBuffer Buffer that was created. -It is safe to pass null as buffer and/or allocation. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer( - VmaAllocator VMA_NOT_NULL allocator, - VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer, - VmaAllocation VMA_NULLABLE allocation); + This function automatically: -/// Function similar to vmaCreateBuffer(). -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage( - VmaAllocator VMA_NOT_NULL allocator, - const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, - const VmaAllocationCreateInfo* VMA_NOT_NULL pAllocationCreateInfo, - VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage, - VmaAllocation VMA_NULLABLE* VMA_NOT_NULL pAllocation, - VmaAllocationInfo* VMA_NULLABLE pAllocationInfo); + -# Creates buffer. + -# Binds the buffer with the supplied memory. -/// Function similar to vmaCreateAliasingBuffer() but for images. -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, - VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage); + If any of these operations fail, buffer is not created, + returned value is negative error code and `*pBuffer` is null. -/// Function similar to vmaCreateAliasingBuffer2() but for images. -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage2( - VmaAllocator VMA_NOT_NULL allocator, - VmaAllocation VMA_NOT_NULL allocation, - VkDeviceSize allocationLocalOffset, - const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, - VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage); + If the function succeeded, you must destroy the buffer when you + no longer need it using `vkDestroyBuffer()`. If you want to also destroy the corresponding + allocation you can use convenience function vmaDestroyBuffer(). -/** \brief Destroys Vulkan image and frees allocated memory. + \note This is a new version of the function augmented with parameter `allocationLocalOffset`. + */ + VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + const VkBufferCreateInfo *VMA_NOT_NULL pBufferCreateInfo, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE *VMA_NOT_NULL pBuffer); -This is just a convenience function equivalent to: + /** \brief Destroys Vulkan buffer and frees allocated memory. -\code -vkDestroyImage(device, image, allocationCallbacks); -vmaFreeMemory(allocator, allocation); -\endcode + This is just a convenience function equivalent to: -It is safe to pass null as image and/or allocation. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage( - VmaAllocator VMA_NOT_NULL allocator, - VkImage VMA_NULLABLE_NON_DISPATCHABLE image, - VmaAllocation VMA_NULLABLE allocation); + \code + vkDestroyBuffer(device, buffer, allocationCallbacks); + vmaFreeMemory(allocator, allocation); + \endcode -/** @} */ + It is safe to pass null as buffer and/or allocation. + */ + VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer( + VmaAllocator VMA_NOT_NULL allocator, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE buffer, + VmaAllocation VMA_NULLABLE allocation); + + /// Function similar to vmaCreateBuffer(). + VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage( + VmaAllocator VMA_NOT_NULL allocator, + const VkImageCreateInfo *VMA_NOT_NULL pImageCreateInfo, + const VmaAllocationCreateInfo *VMA_NOT_NULL pAllocationCreateInfo, + VkImage VMA_NULLABLE_NON_DISPATCHABLE *VMA_NOT_NULL pImage, + VmaAllocation VMA_NULLABLE *VMA_NOT_NULL pAllocation, + VmaAllocationInfo *VMA_NULLABLE pAllocationInfo); + + /// Function similar to vmaCreateAliasingBuffer() but for images. + VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + const VkImageCreateInfo *VMA_NOT_NULL pImageCreateInfo, + VkImage VMA_NULLABLE_NON_DISPATCHABLE *VMA_NOT_NULL pImage); + + /// Function similar to vmaCreateAliasingBuffer2() but for images. + VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage2( + VmaAllocator VMA_NOT_NULL allocator, + VmaAllocation VMA_NOT_NULL allocation, + VkDeviceSize allocationLocalOffset, + const VkImageCreateInfo *VMA_NOT_NULL pImageCreateInfo, + VkImage VMA_NULLABLE_NON_DISPATCHABLE *VMA_NOT_NULL pImage); -/** -\addtogroup group_virtual -@{ -*/ + /** \brief Destroys Vulkan image and frees allocated memory. -/** \brief Creates new #VmaVirtualBlock object. + This is just a convenience function equivalent to: -\param pCreateInfo Parameters for creation. -\param[out] pVirtualBlock Returned virtual block object or `VMA_NULL` if creation failed. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock( - const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo, - VmaVirtualBlock VMA_NULLABLE* VMA_NOT_NULL pVirtualBlock); + \code + vkDestroyImage(device, image, allocationCallbacks); + vmaFreeMemory(allocator, allocation); + \endcode -/** \brief Destroys #VmaVirtualBlock object. + It is safe to pass null as image and/or allocation. + */ + VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage( + VmaAllocator VMA_NOT_NULL allocator, + VkImage VMA_NULLABLE_NON_DISPATCHABLE image, + VmaAllocation VMA_NULLABLE allocation); -Please note that you should consciously handle virtual allocations that could remain unfreed in the block. -You should either free them individually using vmaVirtualFree() or call vmaClearVirtualBlock() -if you are sure this is what you want. If you do neither, an assert is called. + /** @} */ -If you keep pointers to some additional metadata associated with your virtual allocations in their `pUserData`, -don't forget to free them. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock( - VmaVirtualBlock VMA_NULLABLE virtualBlock); + /** + \addtogroup group_virtual + @{ + */ -/** \brief Returns true of the #VmaVirtualBlock is empty - contains 0 virtual allocations and has all its space available for new allocations. -*/ -VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty( - VmaVirtualBlock VMA_NOT_NULL virtualBlock); + /** \brief Creates new #VmaVirtualBlock object. -/** \brief Returns information about a specific virtual allocation within a virtual block, like its size and `pUserData` pointer. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo( - VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo); + \param pCreateInfo Parameters for creation. + \param[out] pVirtualBlock Returned virtual block object or `VMA_NULL` if creation failed. + */ + VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock( + const VmaVirtualBlockCreateInfo *VMA_NOT_NULL pCreateInfo, + VmaVirtualBlock VMA_NULLABLE *VMA_NOT_NULL pVirtualBlock); -/** \brief Allocates new virtual allocation inside given #VmaVirtualBlock. + /** \brief Destroys #VmaVirtualBlock object. -If the allocation fails due to not enough free space available, `VK_ERROR_OUT_OF_DEVICE_MEMORY` is returned -(despite the function doesn't ever allocate actual GPU memory). -`pAllocation` is then set to `VK_NULL_HANDLE` and `pOffset`, if not null, it set to `UINT64_MAX`. + Please note that you should consciously handle virtual allocations that could remain unfreed in the block. + You should either free them individually using vmaVirtualFree() or call vmaClearVirtualBlock() + if you are sure this is what you want. If you do neither, an assert is called. -\param virtualBlock Virtual block -\param pCreateInfo Parameters for the allocation -\param[out] pAllocation Returned handle of the new allocation -\param[out] pOffset Returned offset of the new allocation. Optional, can be null. -*/ -VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate( - VmaVirtualBlock VMA_NOT_NULL virtualBlock, - const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, - VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation, - VkDeviceSize* VMA_NULLABLE pOffset); + If you keep pointers to some additional metadata associated with your virtual allocations in their `pUserData`, + don't forget to free them. + */ + VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock( + VmaVirtualBlock VMA_NULLABLE virtualBlock); + + /** \brief Returns true of the #VmaVirtualBlock is empty - contains 0 virtual allocations and has all its space available for new allocations. + */ + VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty( + VmaVirtualBlock VMA_NOT_NULL virtualBlock); + + /** \brief Returns information about a specific virtual allocation within a virtual block, like its size and `pUserData` pointer. + */ + VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo *VMA_NOT_NULL pVirtualAllocInfo); + + /** \brief Allocates new virtual allocation inside given #VmaVirtualBlock. + + If the allocation fails due to not enough free space available, `VK_ERROR_OUT_OF_DEVICE_MEMORY` is returned + (despite the function doesn't ever allocate actual GPU memory). + `pAllocation` is then set to `VK_NULL_HANDLE` and `pOffset`, if not null, it set to `UINT64_MAX`. + + \param virtualBlock Virtual block + \param pCreateInfo Parameters for the allocation + \param[out] pAllocation Returned handle of the new allocation + \param[out] pOffset Returned offset of the new allocation. Optional, can be null. + */ + VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + const VmaVirtualAllocationCreateInfo *VMA_NOT_NULL pCreateInfo, + VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE *VMA_NOT_NULL pAllocation, + VkDeviceSize *VMA_NULLABLE pOffset); -/** \brief Frees virtual allocation inside given #VmaVirtualBlock. + /** \brief Frees virtual allocation inside given #VmaVirtualBlock. -It is correct to call this function with `allocation == VK_NULL_HANDLE` - it does nothing. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree( - VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation); + It is correct to call this function with `allocation == VK_NULL_HANDLE` - it does nothing. + */ + VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation); -/** \brief Frees all virtual allocations inside given #VmaVirtualBlock. + /** \brief Frees all virtual allocations inside given #VmaVirtualBlock. -You must either call this function or free each virtual allocation individually with vmaVirtualFree() -before destroying a virtual block. Otherwise, an assert is called. + You must either call this function or free each virtual allocation individually with vmaVirtualFree() + before destroying a virtual block. Otherwise, an assert is called. -If you keep pointer to some additional metadata associated with your virtual allocation in its `pUserData`, -don't forget to free it as well. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock( - VmaVirtualBlock VMA_NOT_NULL virtualBlock); + If you keep pointer to some additional metadata associated with your virtual allocation in its `pUserData`, + don't forget to free it as well. + */ + VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock( + VmaVirtualBlock VMA_NOT_NULL virtualBlock); -/** \brief Changes custom pointer associated with given virtual allocation. -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData( - VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, - void* VMA_NULLABLE pUserData); + /** \brief Changes custom pointer associated with given virtual allocation. + */ + VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, + void *VMA_NULLABLE pUserData); -/** \brief Calculates and returns statistics about virtual allocations and memory usage in given #VmaVirtualBlock. + /** \brief Calculates and returns statistics about virtual allocations and memory usage in given #VmaVirtualBlock. -This function is fast to call. For more detailed statistics, see vmaCalculateVirtualBlockStatistics(). -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics( - VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VmaStatistics* VMA_NOT_NULL pStats); + This function is fast to call. For more detailed statistics, see vmaCalculateVirtualBlockStatistics(). + */ + VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaStatistics *VMA_NOT_NULL pStats); -/** \brief Calculates and returns detailed statistics about virtual allocations and memory usage in given #VmaVirtualBlock. + /** \brief Calculates and returns detailed statistics about virtual allocations and memory usage in given #VmaVirtualBlock. -This function is slow to call. Use for debugging purposes. -For less detailed statistics, see vmaGetVirtualBlockStatistics(). -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics( - VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VmaDetailedStatistics* VMA_NOT_NULL pStats); + This function is slow to call. Use for debugging purposes. + For less detailed statistics, see vmaGetVirtualBlockStatistics(). + */ + VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + VmaDetailedStatistics *VMA_NOT_NULL pStats); -/** @} */ + /** @} */ #if VMA_STATS_STRING_ENABLED -/** -\addtogroup group_stats -@{ -*/ - -/** \brief Builds and returns a null-terminated string in JSON format with information about given #VmaVirtualBlock. -\param virtualBlock Virtual block. -\param[out] ppStatsString Returned string. -\param detailedMap Pass `VK_FALSE` to only obtain statistics as returned by vmaCalculateVirtualBlockStatistics(). Pass `VK_TRUE` to also obtain full list of allocations and free spaces. + /** + \addtogroup group_stats + @{ + */ -Returned string must be freed using vmaFreeVirtualBlockStatsString(). -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString( - VmaVirtualBlock VMA_NOT_NULL virtualBlock, - char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString, - VkBool32 detailedMap); - -/// Frees a string returned by vmaBuildVirtualBlockStatsString(). -VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString( - VmaVirtualBlock VMA_NOT_NULL virtualBlock, - char* VMA_NULLABLE pStatsString); - -/** \brief Builds and returns statistics as a null-terminated string in JSON format. -\param allocator -\param[out] ppStatsString Must be freed using vmaFreeStatsString() function. -\param detailedMap -*/ -VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( - VmaAllocator VMA_NOT_NULL allocator, - char* VMA_NULLABLE* VMA_NOT_NULL ppStatsString, - VkBool32 detailedMap); + /** \brief Builds and returns a null-terminated string in JSON format with information about given #VmaVirtualBlock. + \param virtualBlock Virtual block. + \param[out] ppStatsString Returned string. + \param detailedMap Pass `VK_FALSE` to only obtain statistics as returned by vmaCalculateVirtualBlockStatistics(). Pass `VK_TRUE` to also obtain full list of allocations and free spaces. -VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( - VmaAllocator VMA_NOT_NULL allocator, - char* VMA_NULLABLE pStatsString); + Returned string must be freed using vmaFreeVirtualBlockStatsString(). + */ + VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + char *VMA_NULLABLE *VMA_NOT_NULL ppStatsString, + VkBool32 detailedMap); + + /// Frees a string returned by vmaBuildVirtualBlockStatsString(). + VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString( + VmaVirtualBlock VMA_NOT_NULL virtualBlock, + char *VMA_NULLABLE pStatsString); + + /** \brief Builds and returns statistics as a null-terminated string in JSON format. + \param allocator + \param[out] ppStatsString Must be freed using vmaFreeStatsString() function. + \param detailedMap + */ + VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( + VmaAllocator VMA_NOT_NULL allocator, + char *VMA_NULLABLE *VMA_NOT_NULL ppStatsString, + VkBool32 detailedMap); -/** @} */ + VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( + VmaAllocator VMA_NOT_NULL allocator, + char *VMA_NULLABLE pStatsString); + + /** @} */ #endif // VMA_STATS_STRING_ENABLED @@ -2608,14 +2615,14 @@ VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( #include #ifdef _MSC_VER - #include // For functions like __popcnt, _BitScanForward etc. +#include // For functions like __popcnt, _BitScanForward etc. #endif #if __cplusplus >= 202002L || _MSVC_LANG >= 202002L // C++20 - #include // For std::popcount +#include // For std::popcount #endif #if VMA_STATS_STRING_ENABLED - #include // For snprintf +#include // For snprintf #endif /******************************************************************************* @@ -2633,7 +2640,7 @@ internally, like: vulkanFunctions.vkAllocateMemory = &vkAllocateMemory; */ #if !defined(VMA_STATIC_VULKAN_FUNCTIONS) && !defined(VK_NO_PROTOTYPES) - #define VMA_STATIC_VULKAN_FUNCTIONS 1 +#define VMA_STATIC_VULKAN_FUNCTIONS 1 #endif /* @@ -2647,19 +2654,19 @@ VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as VmaAllocatorCreateInfo::pVulkanFunctions. Other members can be null. */ #if !defined(VMA_DYNAMIC_VULKAN_FUNCTIONS) - #define VMA_DYNAMIC_VULKAN_FUNCTIONS 1 +#define VMA_DYNAMIC_VULKAN_FUNCTIONS 1 #endif #ifndef VMA_USE_STL_SHARED_MUTEX - #if __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17 - #define VMA_USE_STL_SHARED_MUTEX 1 - // Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus - // Otherwise it is always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2. - #elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L - #define VMA_USE_STL_SHARED_MUTEX 1 - #else - #define VMA_USE_STL_SHARED_MUTEX 0 - #endif +#if __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17 +#define VMA_USE_STL_SHARED_MUTEX 1 +// Visual studio defines __cplusplus properly only when passed additional parameter: /Zc:__cplusplus +// Otherwise it is always 199711L, despite shared_mutex works since Visual Studio 2015 Update 2. +#elif defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 190023918 && __cplusplus == 199711L && _MSVC_LANG >= 201703L +#define VMA_USE_STL_SHARED_MUTEX 1 +#else +#define VMA_USE_STL_SHARED_MUTEX 0 +#endif #endif /* @@ -2684,58 +2691,58 @@ The following headers are used in this CONFIGURATION section only, so feel free remove them if not needed. */ #if !defined(VMA_CONFIGURATION_USER_INCLUDES_H) - #include // for assert - #include // for min, max - #include +#include // for assert +#include // for min, max +#include #else - #include VMA_CONFIGURATION_USER_INCLUDES_H +#include VMA_CONFIGURATION_USER_INCLUDES_H #endif #ifndef VMA_NULL - // Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0. - #define VMA_NULL nullptr +// Value used as null pointer. Define it to e.g.: nullptr, NULL, 0, (void*)0. +#define VMA_NULL nullptr #endif #ifndef VMA_FALLTHROUGH - #if __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17 - #define VMA_FALLTHROUGH [[fallthrough]] - #else - #define VMA_FALLTHROUGH - #endif +#if __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17 +#define VMA_FALLTHROUGH [[fallthrough]] +#else +#define VMA_FALLTHROUGH +#endif #endif // Normal assert to check for programmer's errors, especially in Debug configuration. #ifndef VMA_ASSERT - #ifdef NDEBUG - #define VMA_ASSERT(expr) - #else - #define VMA_ASSERT(expr) assert(expr) - #endif +#ifdef NDEBUG +#define VMA_ASSERT(expr) +#else +#define VMA_ASSERT(expr) assert(expr) +#endif #endif // Assert that will be called very often, like inside data structures e.g. operator[]. // Making it non-empty can make program slow. #ifndef VMA_HEAVY_ASSERT - #ifdef NDEBUG - #define VMA_HEAVY_ASSERT(expr) - #else - #define VMA_HEAVY_ASSERT(expr) //VMA_ASSERT(expr) - #endif +#ifdef NDEBUG +#define VMA_HEAVY_ASSERT(expr) +#else +#define VMA_HEAVY_ASSERT(expr) // VMA_ASSERT(expr) +#endif #endif // If your compiler is not compatible with C++17 and definition of // aligned_alloc() function is missing, uncommenting following line may help: -//#include +// #include #if defined(__ANDROID_API__) && (__ANDROID_API__ < 16) #include -static void* vma_aligned_alloc(size_t alignment, size_t size) +static void *vma_aligned_alloc(size_t alignment, size_t size) { // alignment must be >= sizeof(void*) - if(alignment < sizeof(void*)) + if (alignment < sizeof(void *)) { - alignment = sizeof(void*); + alignment = sizeof(void *); } return memalign(alignment, size); @@ -2747,12 +2754,12 @@ static void* vma_aligned_alloc(size_t alignment, size_t size) #include #endif -static void* vma_aligned_alloc(size_t alignment, size_t size) +static void *vma_aligned_alloc(size_t alignment, size_t size) { // Unfortunately, aligned_alloc causes VMA to crash due to it returning null pointers. (At least under 11.4) // Therefore, for now disable this specific exception until a proper solution is found. - //#if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0)) - //#if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0 + // #if defined(__APPLE__) && (defined(MAC_OS_X_VERSION_10_16) || defined(__IPHONE_14_0)) + // #if MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_16 || __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_14_0 // // For C++14, usr/include/malloc/_malloc.h declares aligned_alloc()) only // // with the MacOSX11.0 SDK in Xcode 12 (which is what adds // // MAC_OS_X_VERSION_10_16), even though the function is marked @@ -2761,22 +2768,22 @@ static void* vma_aligned_alloc(size_t alignment, size_t size) // // People who use C++17 could call aligned_alloc with the 10.15 SDK already. // if (__builtin_available(macOS 10.15, iOS 13, *)) // return aligned_alloc(alignment, size); - //#endif - //#endif + // #endif + // #endif // alignment must be >= sizeof(void*) - if(alignment < sizeof(void*)) + if (alignment < sizeof(void *)) { - alignment = sizeof(void*); + alignment = sizeof(void *); } void *pointer; - if(posix_memalign(&pointer, alignment, size) == 0) + if (posix_memalign(&pointer, alignment, size) == 0) return pointer; return VMA_NULL; } #elif defined(_WIN32) -static void* vma_aligned_alloc(size_t alignment, size_t size) +static void *vma_aligned_alloc(size_t alignment, size_t size) { return _aligned_malloc(size, alignment); } @@ -2787,12 +2794,12 @@ static void *vma_aligned_alloc(size_t alignment, size_t size) return aligned_alloc(alignment, size); } #elif __cplusplus >= 201703L || _MSVC_LANG >= 201703L // C++17 -static void* vma_aligned_alloc(size_t alignment, size_t size) +static void *vma_aligned_alloc(size_t alignment, size_t size) { return aligned_alloc(alignment, size); } #else -static void* vma_aligned_alloc(size_t alignment, size_t size) +static void *vma_aligned_alloc(size_t alignment, size_t size) { VMA_ASSERT(0 && "Could not implement aligned_alloc automatically. Please enable C++17 or later in your compiler or provide custom implementation of macro VMA_SYSTEM_ALIGNED_MALLOC (and VMA_SYSTEM_ALIGNED_FREE if needed) using the API of your system."); return VMA_NULL; @@ -2800,268 +2807,272 @@ static void* vma_aligned_alloc(size_t alignment, size_t size) #endif #if defined(_WIN32) -static void vma_aligned_free(void* ptr) +static void vma_aligned_free(void *ptr) { _aligned_free(ptr); } #else -static void vma_aligned_free(void* VMA_NULLABLE ptr) +static void vma_aligned_free(void *VMA_NULLABLE ptr) { free(ptr); } #endif #ifndef VMA_ALIGN_OF - #define VMA_ALIGN_OF(type) (alignof(type)) +#define VMA_ALIGN_OF(type) (alignof(type)) #endif #ifndef VMA_SYSTEM_ALIGNED_MALLOC - #define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size)) +#define VMA_SYSTEM_ALIGNED_MALLOC(size, alignment) vma_aligned_alloc((alignment), (size)) #endif #ifndef VMA_SYSTEM_ALIGNED_FREE - // VMA_SYSTEM_FREE is the old name, but might have been defined by the user - #if defined(VMA_SYSTEM_FREE) - #define VMA_SYSTEM_ALIGNED_FREE(ptr) VMA_SYSTEM_FREE(ptr) - #else - #define VMA_SYSTEM_ALIGNED_FREE(ptr) vma_aligned_free(ptr) - #endif +// VMA_SYSTEM_FREE is the old name, but might have been defined by the user +#if defined(VMA_SYSTEM_FREE) +#define VMA_SYSTEM_ALIGNED_FREE(ptr) VMA_SYSTEM_FREE(ptr) +#else +#define VMA_SYSTEM_ALIGNED_FREE(ptr) vma_aligned_free(ptr) +#endif #endif #ifndef VMA_COUNT_BITS_SET - // Returns number of bits set to 1 in (v) - #define VMA_COUNT_BITS_SET(v) VmaCountBitsSet(v) +// Returns number of bits set to 1 in (v) +#define VMA_COUNT_BITS_SET(v) VmaCountBitsSet(v) #endif #ifndef VMA_BITSCAN_LSB - // Scans integer for index of first nonzero value from the Least Significant Bit (LSB). If mask is 0 then returns UINT8_MAX - #define VMA_BITSCAN_LSB(mask) VmaBitScanLSB(mask) +// Scans integer for index of first nonzero value from the Least Significant Bit (LSB). If mask is 0 then returns UINT8_MAX +#define VMA_BITSCAN_LSB(mask) VmaBitScanLSB(mask) #endif #ifndef VMA_BITSCAN_MSB - // Scans integer for index of first nonzero value from the Most Significant Bit (MSB). If mask is 0 then returns UINT8_MAX - #define VMA_BITSCAN_MSB(mask) VmaBitScanMSB(mask) +// Scans integer for index of first nonzero value from the Most Significant Bit (MSB). If mask is 0 then returns UINT8_MAX +#define VMA_BITSCAN_MSB(mask) VmaBitScanMSB(mask) #endif #ifndef VMA_MIN - #define VMA_MIN(v1, v2) ((std::min)((v1), (v2))) +#define VMA_MIN(v1, v2) ((std::min)((v1), (v2))) #endif #ifndef VMA_MAX - #define VMA_MAX(v1, v2) ((std::max)((v1), (v2))) +#define VMA_MAX(v1, v2) ((std::max)((v1), (v2))) #endif #ifndef VMA_SWAP - #define VMA_SWAP(v1, v2) std::swap((v1), (v2)) +#define VMA_SWAP(v1, v2) std::swap((v1), (v2)) #endif #ifndef VMA_SORT - #define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp) +#define VMA_SORT(beg, end, cmp) std::sort(beg, end, cmp) #endif #ifndef VMA_DEBUG_LOG_FORMAT - #define VMA_DEBUG_LOG_FORMAT(format, ...) - /* - #define VMA_DEBUG_LOG_FORMAT(format, ...) do { \ - printf((format), __VA_ARGS__); \ - printf("\n"); \ - } while(false) - */ +#define VMA_DEBUG_LOG_FORMAT(format, ...) +/* +#define VMA_DEBUG_LOG_FORMAT(format, ...) do { \ + printf((format), __VA_ARGS__); \ + printf("\n"); \ +} while(false) +*/ #endif #ifndef VMA_DEBUG_LOG - #define VMA_DEBUG_LOG(str) VMA_DEBUG_LOG_FORMAT("%s", (str)) +#define VMA_DEBUG_LOG(str) VMA_DEBUG_LOG_FORMAT("%s", (str)) #endif #ifndef VMA_CLASS_NO_COPY - #define VMA_CLASS_NO_COPY(className) \ - private: \ - className(const className&) = delete; \ - className& operator=(const className&) = delete; +#define VMA_CLASS_NO_COPY(className) \ +private: \ + className(const className &) = delete; \ + className &operator=(const className &) = delete; #endif #ifndef VMA_CLASS_NO_COPY_NO_MOVE - #define VMA_CLASS_NO_COPY_NO_MOVE(className) \ - private: \ - className(const className&) = delete; \ - className(className&&) = delete; \ - className& operator=(const className&) = delete; \ - className& operator=(className&&) = delete; +#define VMA_CLASS_NO_COPY_NO_MOVE(className) \ +private: \ + className(const className &) = delete; \ + className(className &&) = delete; \ + className &operator=(const className &) = delete; \ + className &operator=(className &&) = delete; #endif // Define this macro to 1 to enable functions: vmaBuildStatsString, vmaFreeStatsString. #if VMA_STATS_STRING_ENABLED - static inline void VmaUint32ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint32_t num) - { - snprintf(outStr, strLen, "%u", static_cast(num)); - } - static inline void VmaUint64ToStr(char* VMA_NOT_NULL outStr, size_t strLen, uint64_t num) - { - snprintf(outStr, strLen, "%llu", static_cast(num)); - } - static inline void VmaPtrToStr(char* VMA_NOT_NULL outStr, size_t strLen, const void* ptr) - { - snprintf(outStr, strLen, "%p", ptr); - } +static inline void VmaUint32ToStr(char *VMA_NOT_NULL outStr, size_t strLen, uint32_t num) +{ + snprintf(outStr, strLen, "%u", static_cast(num)); +} +static inline void VmaUint64ToStr(char *VMA_NOT_NULL outStr, size_t strLen, uint64_t num) +{ + snprintf(outStr, strLen, "%llu", static_cast(num)); +} +static inline void VmaPtrToStr(char *VMA_NOT_NULL outStr, size_t strLen, const void *ptr) +{ + snprintf(outStr, strLen, "%p", ptr); +} #endif #ifndef VMA_MUTEX - class VmaMutex - { +class VmaMutex +{ VMA_CLASS_NO_COPY_NO_MOVE(VmaMutex) - public: - VmaMutex() { } - void Lock() { m_Mutex.lock(); } - void Unlock() { m_Mutex.unlock(); } - bool TryLock() { return m_Mutex.try_lock(); } - private: - std::mutex m_Mutex; - }; - #define VMA_MUTEX VmaMutex +public: + VmaMutex() {} + void Lock() { m_Mutex.lock(); } + void Unlock() { m_Mutex.unlock(); } + bool TryLock() { return m_Mutex.try_lock(); } + +private: + std::mutex m_Mutex; +}; +#define VMA_MUTEX VmaMutex #endif // Read-write mutex, where "read" is shared access, "write" is exclusive access. #ifndef VMA_RW_MUTEX - #if VMA_USE_STL_SHARED_MUTEX - // Use std::shared_mutex from C++17. - #include - class VmaRWMutex - { - public: - void LockRead() { m_Mutex.lock_shared(); } - void UnlockRead() { m_Mutex.unlock_shared(); } - bool TryLockRead() { return m_Mutex.try_lock_shared(); } - void LockWrite() { m_Mutex.lock(); } - void UnlockWrite() { m_Mutex.unlock(); } - bool TryLockWrite() { return m_Mutex.try_lock(); } - private: - std::shared_mutex m_Mutex; - }; - #define VMA_RW_MUTEX VmaRWMutex - #elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600 - // Use SRWLOCK from WinAPI. - // Minimum supported client = Windows Vista, server = Windows Server 2008. - class VmaRWMutex - { - public: - VmaRWMutex() { InitializeSRWLock(&m_Lock); } - void LockRead() { AcquireSRWLockShared(&m_Lock); } - void UnlockRead() { ReleaseSRWLockShared(&m_Lock); } - bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; } - void LockWrite() { AcquireSRWLockExclusive(&m_Lock); } - void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); } - bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; } - private: - SRWLOCK m_Lock; - }; - #define VMA_RW_MUTEX VmaRWMutex - #else - // Less efficient fallback: Use normal mutex. - class VmaRWMutex - { - public: - void LockRead() { m_Mutex.Lock(); } - void UnlockRead() { m_Mutex.Unlock(); } - bool TryLockRead() { return m_Mutex.TryLock(); } - void LockWrite() { m_Mutex.Lock(); } - void UnlockWrite() { m_Mutex.Unlock(); } - bool TryLockWrite() { return m_Mutex.TryLock(); } - private: - VMA_MUTEX m_Mutex; - }; - #define VMA_RW_MUTEX VmaRWMutex - #endif // #if VMA_USE_STL_SHARED_MUTEX +#if VMA_USE_STL_SHARED_MUTEX +// Use std::shared_mutex from C++17. +#include +class VmaRWMutex +{ +public: + void LockRead() { m_Mutex.lock_shared(); } + void UnlockRead() { m_Mutex.unlock_shared(); } + bool TryLockRead() { return m_Mutex.try_lock_shared(); } + void LockWrite() { m_Mutex.lock(); } + void UnlockWrite() { m_Mutex.unlock(); } + bool TryLockWrite() { return m_Mutex.try_lock(); } + +private: + std::shared_mutex m_Mutex; +}; +#define VMA_RW_MUTEX VmaRWMutex +#elif defined(_WIN32) && defined(WINVER) && WINVER >= 0x0600 +// Use SRWLOCK from WinAPI. +// Minimum supported client = Windows Vista, server = Windows Server 2008. +class VmaRWMutex +{ +public: + VmaRWMutex() { InitializeSRWLock(&m_Lock); } + void LockRead() { AcquireSRWLockShared(&m_Lock); } + void UnlockRead() { ReleaseSRWLockShared(&m_Lock); } + bool TryLockRead() { return TryAcquireSRWLockShared(&m_Lock) != FALSE; } + void LockWrite() { AcquireSRWLockExclusive(&m_Lock); } + void UnlockWrite() { ReleaseSRWLockExclusive(&m_Lock); } + bool TryLockWrite() { return TryAcquireSRWLockExclusive(&m_Lock) != FALSE; } + +private: + SRWLOCK m_Lock; +}; +#define VMA_RW_MUTEX VmaRWMutex +#else +// Less efficient fallback: Use normal mutex. +class VmaRWMutex +{ +public: + void LockRead() { m_Mutex.Lock(); } + void UnlockRead() { m_Mutex.Unlock(); } + bool TryLockRead() { return m_Mutex.TryLock(); } + void LockWrite() { m_Mutex.Lock(); } + void UnlockWrite() { m_Mutex.Unlock(); } + bool TryLockWrite() { return m_Mutex.TryLock(); } + +private: + VMA_MUTEX m_Mutex; +}; +#define VMA_RW_MUTEX VmaRWMutex +#endif // #if VMA_USE_STL_SHARED_MUTEX #endif // #ifndef VMA_RW_MUTEX /* If providing your own implementation, you need to implement a subset of std::atomic. */ #ifndef VMA_ATOMIC_UINT32 - #include - #define VMA_ATOMIC_UINT32 std::atomic +#include +#define VMA_ATOMIC_UINT32 std::atomic #endif #ifndef VMA_ATOMIC_UINT64 - #include - #define VMA_ATOMIC_UINT64 std::atomic +#include +#define VMA_ATOMIC_UINT64 std::atomic #endif #ifndef VMA_DEBUG_ALWAYS_DEDICATED_MEMORY - /** - Every allocation will have its own memory block. - Define to 1 for debugging purposes only. - */ - #define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0) +/** +Every allocation will have its own memory block. +Define to 1 for debugging purposes only. +*/ +#define VMA_DEBUG_ALWAYS_DEDICATED_MEMORY (0) #endif #ifndef VMA_MIN_ALIGNMENT - /** - Minimum alignment of all allocations, in bytes. - Set to more than 1 for debugging purposes. Must be power of two. - */ - #ifdef VMA_DEBUG_ALIGNMENT // Old name - #define VMA_MIN_ALIGNMENT VMA_DEBUG_ALIGNMENT - #else - #define VMA_MIN_ALIGNMENT (1) - #endif +/** +Minimum alignment of all allocations, in bytes. +Set to more than 1 for debugging purposes. Must be power of two. +*/ +#ifdef VMA_DEBUG_ALIGNMENT // Old name +#define VMA_MIN_ALIGNMENT VMA_DEBUG_ALIGNMENT +#else +#define VMA_MIN_ALIGNMENT (1) +#endif #endif #ifndef VMA_DEBUG_MARGIN - /** - Minimum margin after every allocation, in bytes. - Set nonzero for debugging purposes only. - */ - #define VMA_DEBUG_MARGIN (0) +/** +Minimum margin after every allocation, in bytes. +Set nonzero for debugging purposes only. +*/ +#define VMA_DEBUG_MARGIN (0) #endif #ifndef VMA_DEBUG_INITIALIZE_ALLOCATIONS - /** - Define this macro to 1 to automatically fill new allocations and destroyed - allocations with some bit pattern. - */ - #define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0) +/** +Define this macro to 1 to automatically fill new allocations and destroyed +allocations with some bit pattern. +*/ +#define VMA_DEBUG_INITIALIZE_ALLOCATIONS (0) #endif #ifndef VMA_DEBUG_DETECT_CORRUPTION - /** - Define this macro to 1 together with non-zero value of VMA_DEBUG_MARGIN to - enable writing magic value to the margin after every allocation and - validating it, so that memory corruptions (out-of-bounds writes) are detected. - */ - #define VMA_DEBUG_DETECT_CORRUPTION (0) +/** +Define this macro to 1 together with non-zero value of VMA_DEBUG_MARGIN to +enable writing magic value to the margin after every allocation and +validating it, so that memory corruptions (out-of-bounds writes) are detected. +*/ +#define VMA_DEBUG_DETECT_CORRUPTION (0) #endif #ifndef VMA_DEBUG_GLOBAL_MUTEX - /** - Set this to 1 for debugging purposes only, to enable single mutex protecting all - entry calls to the library. Can be useful for debugging multithreading issues. - */ - #define VMA_DEBUG_GLOBAL_MUTEX (0) +/** +Set this to 1 for debugging purposes only, to enable single mutex protecting all +entry calls to the library. Can be useful for debugging multithreading issues. +*/ +#define VMA_DEBUG_GLOBAL_MUTEX (0) #endif #ifndef VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY - /** - Minimum value for VkPhysicalDeviceLimits::bufferImageGranularity. - Set to more than 1 for debugging purposes only. Must be power of two. - */ - #define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1) +/** +Minimum value for VkPhysicalDeviceLimits::bufferImageGranularity. +Set to more than 1 for debugging purposes only. Must be power of two. +*/ +#define VMA_DEBUG_MIN_BUFFER_IMAGE_GRANULARITY (1) #endif #ifndef VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT - /* - Set this to 1 to make VMA never exceed VkPhysicalDeviceLimits::maxMemoryAllocationCount - and return error instead of leaving up to Vulkan implementation what to do in such cases. - */ - #define VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT (0) +/* +Set this to 1 to make VMA never exceed VkPhysicalDeviceLimits::maxMemoryAllocationCount +and return error instead of leaving up to Vulkan implementation what to do in such cases. +*/ +#define VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT (0) #endif #ifndef VMA_SMALL_HEAP_MAX_SIZE - /// Maximum size of a memory heap in Vulkan to consider it "small". - #define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024) +/// Maximum size of a memory heap in Vulkan to consider it "small". +#define VMA_SMALL_HEAP_MAX_SIZE (1024ull * 1024 * 1024) #endif #ifndef VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE - /// Default size of a block allocated as single VkDeviceMemory from a "large" heap. - #define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024) +/// Default size of a block allocated as single VkDeviceMemory from a "large" heap. +#define VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE (256ull * 1024 * 1024) #endif /* @@ -3072,20 +3083,24 @@ vkMapMemory/vkUnmapMemory too many times, which may improve performance and help tools like RenderDoc. */ #ifndef VMA_MAPPING_HYSTERESIS_ENABLED - #define VMA_MAPPING_HYSTERESIS_ENABLED 1 +#define VMA_MAPPING_HYSTERESIS_ENABLED 1 #endif -#define VMA_VALIDATE(cond) do { if(!(cond)) { \ - VMA_ASSERT(0 && "Validation failed: " #cond); \ - return false; \ - } } while(false) +#define VMA_VALIDATE(cond) \ + do \ + { \ + if (!(cond)) \ + { \ + VMA_ASSERT(0 && "Validation failed: " #cond); \ + return false; \ + } \ + } while (false) /******************************************************************************* END OF CONFIGURATION */ #endif // _VMA_CONFIGURATION - static const uint8_t VMA_ALLOCATION_FILL_PATTERN_CREATED = 0xDC; static const uint8_t VMA_ALLOCATION_FILL_PATTERN_DESTROYED = 0xEF; // Decimal 2139416166, float NaN, little-endian binary 66 E6 84 7F. @@ -3106,23 +3121,21 @@ static const uint32_t VMA_VENDOR_ID_AMD = 4098; // See pull request #207. #define VK_ERROR_UNKNOWN_COPY ((VkResult)-13) - #if VMA_STATS_STRING_ENABLED // Correspond to values of enum VmaSuballocationType. -static const char* VMA_SUBALLOCATION_TYPE_NAMES[] = -{ - "FREE", - "UNKNOWN", - "BUFFER", - "IMAGE_UNKNOWN", - "IMAGE_LINEAR", - "IMAGE_OPTIMAL", +static const char *VMA_SUBALLOCATION_TYPE_NAMES[] = + { + "FREE", + "UNKNOWN", + "BUFFER", + "IMAGE_UNKNOWN", + "IMAGE_LINEAR", + "IMAGE_OPTIMAL", }; #endif static VkAllocationCallbacks VmaEmptyAllocationCallbacks = - { VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL }; - + {VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL, VMA_NULL}; #ifndef _VMA_ENUM_DECLARATIONS @@ -3163,31 +3176,31 @@ struct VmaMutexLock; struct VmaMutexLockRead; struct VmaMutexLockWrite; -template +template struct AtomicTransactionalIncrement; -template +template struct VmaStlAllocator; -template +template class VmaVector; -template +template class VmaSmallVector; -template +template class VmaPoolAllocator; -template +template struct VmaListItem; -template +template class VmaRawList; -template +template class VmaList; -template +template class VmaIntrusiveLinkedList; // Unused in this version @@ -3234,7 +3247,6 @@ class VmaAllocationObjectAllocator; #endif // _VMA_FORWARD_DECLARATIONS - #ifndef _VMA_FUNCTIONS /* @@ -3445,7 +3457,7 @@ static inline uint64_t VmaPrevPow2(uint64_t v) return v; } -static inline bool VmaStrIsEmpty(const char* pStr) +static inline bool VmaStrIsEmpty(const char *pStr) { return pStr == VMA_NULL || *pStr == '\0'; } @@ -3493,17 +3505,14 @@ static inline bool VmaIsBufferImageGranularityConflict( case VMA_SUBALLOCATION_TYPE_UNKNOWN: return true; case VMA_SUBALLOCATION_TYPE_BUFFER: - return - suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || - suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; + return suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; case VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN: - return - suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || - suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR || - suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; + return suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN || + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR || + suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; case VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR: - return - suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; + return suballocType2 == VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL; case VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL: return false; default: @@ -3512,10 +3521,10 @@ static inline bool VmaIsBufferImageGranularityConflict( } } -static void VmaWriteMagicValue(void* pData, VkDeviceSize offset) +static void VmaWriteMagicValue(void *pData, VkDeviceSize offset) { #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION - uint32_t* pDst = (uint32_t*)((char*)pData + offset); + uint32_t *pDst = (uint32_t *)((char *)pData + offset); const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t); for (size_t i = 0; i < numberCount; ++i, ++pDst) { @@ -3526,10 +3535,10 @@ static void VmaWriteMagicValue(void* pData, VkDeviceSize offset) #endif } -static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset) +static bool VmaValidateMagicValue(const void *pData, VkDeviceSize offset) { #if VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_DETECT_CORRUPTION - const uint32_t* pSrc = (const uint32_t*)((const char*)pData + offset); + const uint32_t *pSrc = (const uint32_t *)((const char *)pData + offset); const size_t numberCount = VMA_DEBUG_MARGIN / sizeof(uint32_t); for (size_t i = 0; i < numberCount; ++i, ++pSrc) { @@ -3546,7 +3555,7 @@ static bool VmaValidateMagicValue(const void* pData, VkDeviceSize offset) Fills structure with parameters of an example buffer to be used for transfers during GPU memory defragmentation. */ -static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBufCreateInfo) +static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo &outBufCreateInfo) { memset(&outBufCreateInfo, 0, sizeof(outBufCreateInfo)); outBufCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO; @@ -3554,7 +3563,6 @@ static void VmaFillGpuDefragmentationBufferCreateInfo(VkBufferCreateInfo& outBuf outBufCreateInfo.size = (VkDeviceSize)VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE; // Example size. } - /* Performs binary search and returns iterator to first element that is greater or equal to (key), according to comparison (cmp). @@ -3565,12 +3573,12 @@ Returned value is the found element, if present in the collection or place where new element with value (key) should be inserted. */ template -static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT& key, const CmpLess& cmp) +static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT &key, const CmpLess &cmp) { size_t down = 0, up = size_t(end - beg); while (down < up) { - const size_t mid = down + (up - down) / 2; // Overflow-safe midpoint calculation + const size_t mid = down + (up - down) / 2; // Overflow-safe midpoint calculation if (cmp(*(beg + mid), key)) { down = mid + 1; @@ -3583,8 +3591,8 @@ static IterT VmaBinaryFindFirstNotLess(IterT beg, IterT end, const KeyT& key, co return beg + down; } -template -IterT VmaBinaryFindSorted(const IterT& beg, const IterT& end, const KeyT& value, const CmpLess& cmp) +template +IterT VmaBinaryFindSorted(const IterT &beg, const IterT &end, const KeyT &value, const CmpLess &cmp) { IterT it = VmaBinaryFindFirstNotLess( beg, end, value, cmp); @@ -3601,8 +3609,8 @@ Returns true if all pointers in the array are not-null and unique. Warning! O(n^2) complexity. Use only inside VMA_HEAVY_ASSERT. T must be pointer type, e.g. VmaAllocation, VmaPool. */ -template -static bool VmaValidatePointerArray(uint32_t count, const T* arr) +template +static bool VmaValidatePointerArray(uint32_t count, const T *arr) { for (uint32_t i = 0; i < count; ++i) { @@ -3622,8 +3630,8 @@ static bool VmaValidatePointerArray(uint32_t count, const T* arr) return true; } -template -static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct) +template +static inline void VmaPnextChainPushFront(MainT *mainStruct, NewT *newStruct) { newStruct->pNext = mainStruct->pNext; mainStruct->pNext = newStruct; @@ -3633,22 +3641,22 @@ static inline void VmaPnextChainPushFront(MainT* mainStruct, NewT* newStruct) // converts usage to required/preferred/not preferred flags. static bool FindMemoryPreferences( bool isIntegratedGPU, - const VmaAllocationCreateInfo& allocCreateInfo, + const VmaAllocationCreateInfo &allocCreateInfo, VkFlags bufImgUsage, // VkBufferCreateInfo::usage or VkImageCreateInfo::usage. UINT32_MAX if unknown. - VkMemoryPropertyFlags& outRequiredFlags, - VkMemoryPropertyFlags& outPreferredFlags, - VkMemoryPropertyFlags& outNotPreferredFlags) + VkMemoryPropertyFlags &outRequiredFlags, + VkMemoryPropertyFlags &outPreferredFlags, + VkMemoryPropertyFlags &outNotPreferredFlags) { outRequiredFlags = allocCreateInfo.requiredFlags; outPreferredFlags = allocCreateInfo.preferredFlags; outNotPreferredFlags = 0; - switch(allocCreateInfo.usage) + switch (allocCreateInfo.usage) { case VMA_MEMORY_USAGE_UNKNOWN: break; case VMA_MEMORY_USAGE_GPU_ONLY: - if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + if (!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) { outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; } @@ -3658,7 +3666,7 @@ static bool FindMemoryPreferences( break; case VMA_MEMORY_USAGE_CPU_TO_GPU: outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; - if(!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) + if (!isIntegratedGPU || (outPreferredFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) { outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; } @@ -3677,7 +3685,7 @@ static bool FindMemoryPreferences( case VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE: case VMA_MEMORY_USAGE_AUTO_PREFER_HOST: { - if(bufImgUsage == UINT32_MAX) + if (bufImgUsage == UINT32_MAX) { VMA_ASSERT(0 && "VMA_MEMORY_USAGE_AUTO* values can only be used with functions like vmaCreateBuffer, vmaCreateImage so that the details of the created resource are known."); return false; @@ -3691,9 +3699,9 @@ static bool FindMemoryPreferences( const bool preferHost = allocCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST; // CPU random access - e.g. a buffer written to or transferred from GPU to read back on CPU. - if(hostAccessRandom) + if (hostAccessRandom) { - if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost) + if (!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost) { // Nice if it will end up in HOST_VISIBLE, but more importantly prefer DEVICE_LOCAL. // Omitting HOST_VISIBLE here is intentional. @@ -3708,12 +3716,12 @@ static bool FindMemoryPreferences( } } // CPU sequential write - may be CPU or host-visible GPU memory, uncached and write-combined. - else if(hostAccessSequentialWrite) + else if (hostAccessSequentialWrite) { // Want uncached and write-combined. outNotPreferredFlags |= VK_MEMORY_PROPERTY_HOST_CACHED_BIT; - if(!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost) + if (!isIntegratedGPU && deviceAccess && hostAccessAllowTransferInstead && !preferHost) { outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; } @@ -3721,10 +3729,10 @@ static bool FindMemoryPreferences( { outRequiredFlags |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; // Direct GPU access, CPU sequential write (e.g. a dynamic uniform buffer updated every frame) - if(deviceAccess) + if (deviceAccess) { // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose GPU memory. - if(preferHost) + if (preferHost) outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; else outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; @@ -3733,7 +3741,7 @@ static bool FindMemoryPreferences( else { // Could go to CPU memory or GPU BAR/unified. Up to the user to decide. If no preference, choose CPU memory. - if(preferDevice) + if (preferDevice) outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; else outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; @@ -3744,10 +3752,10 @@ static bool FindMemoryPreferences( else { // GPU access, no CPU access (e.g. a color attachment image) - prefer GPU memory - if(deviceAccess) + if (deviceAccess) { // ...unless there is a clear preference from the user not to do so. - if(preferHost) + if (preferHost) outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; else outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; @@ -3756,7 +3764,7 @@ static bool FindMemoryPreferences( // It may be staging copy intended for e.g. preserving image for next frame (then better GPU memory) or // a "swap file" copy to free some GPU memory (then better CPU memory). // Up to the user to decide. If no preferece, assume the former and choose GPU memory. - if(preferHost) + if (preferHost) outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; else outPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; @@ -3768,8 +3776,8 @@ static bool FindMemoryPreferences( } // Avoid DEVICE_COHERENT unless explicitly requested. - if(((allocCreateInfo.requiredFlags | allocCreateInfo.preferredFlags) & - (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0) + if (((allocCreateInfo.requiredFlags | allocCreateInfo.preferredFlags) & + (VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY)) == 0) { outNotPreferredFlags |= VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY; } @@ -3780,9 +3788,9 @@ static bool FindMemoryPreferences( //////////////////////////////////////////////////////////////////////////////// // Memory allocation -static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t size, size_t alignment) +static void *VmaMalloc(const VkAllocationCallbacks *pAllocationCallbacks, size_t size, size_t alignment) { - void* result = VMA_NULL; + void *result = VMA_NULL; if ((pAllocationCallbacks != VMA_NULL) && (pAllocationCallbacks->pfnAllocation != VMA_NULL)) { @@ -3800,7 +3808,7 @@ static void* VmaMalloc(const VkAllocationCallbacks* pAllocationCallbacks, size_t return result; } -static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr) +static void VmaFree(const VkAllocationCallbacks *pAllocationCallbacks, void *ptr) { if ((pAllocationCallbacks != VMA_NULL) && (pAllocationCallbacks->pfnFree != VMA_NULL)) @@ -3813,35 +3821,35 @@ static void VmaFree(const VkAllocationCallbacks* pAllocationCallbacks, void* ptr } } -template -static T* VmaAllocate(const VkAllocationCallbacks* pAllocationCallbacks) +template +static T *VmaAllocate(const VkAllocationCallbacks *pAllocationCallbacks) { - return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T)); + return (T *)VmaMalloc(pAllocationCallbacks, sizeof(T), VMA_ALIGN_OF(T)); } -template -static T* VmaAllocateArray(const VkAllocationCallbacks* pAllocationCallbacks, size_t count) +template +static T *VmaAllocateArray(const VkAllocationCallbacks *pAllocationCallbacks, size_t count) { - return (T*)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T)); + return (T *)VmaMalloc(pAllocationCallbacks, sizeof(T) * count, VMA_ALIGN_OF(T)); } -#define vma_new(allocator, type) new(VmaAllocate(allocator))(type) +#define vma_new(allocator, type) new (VmaAllocate(allocator))(type) -#define vma_new_array(allocator, type, count) new(VmaAllocateArray((allocator), (count)))(type) +#define vma_new_array(allocator, type, count) new (VmaAllocateArray((allocator), (count)))(type) -template -static void vma_delete(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr) +template +static void vma_delete(const VkAllocationCallbacks *pAllocationCallbacks, T *ptr) { ptr->~T(); VmaFree(pAllocationCallbacks, ptr); } -template -static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, T* ptr, size_t count) +template +static void vma_delete_array(const VkAllocationCallbacks *pAllocationCallbacks, T *ptr, size_t count) { if (ptr != VMA_NULL) { - for (size_t i = count; i--; ) + for (size_t i = count; i--;) { ptr[i].~T(); } @@ -3849,12 +3857,12 @@ static void vma_delete_array(const VkAllocationCallbacks* pAllocationCallbacks, } } -static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr) +static char *VmaCreateStringCopy(const VkAllocationCallbacks *allocs, const char *srcStr) { if (srcStr != VMA_NULL) { const size_t len = strlen(srcStr); - char* const result = vma_new_array(allocs, char, len + 1); + char *const result = vma_new_array(allocs, char, len + 1); memcpy(result, srcStr, len + 1); return result; } @@ -3862,11 +3870,11 @@ static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char } #if VMA_STATS_STRING_ENABLED -static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char* srcStr, size_t strLen) +static char *VmaCreateStringCopy(const VkAllocationCallbacks *allocs, const char *srcStr, size_t strLen) { if (srcStr != VMA_NULL) { - char* const result = vma_new_array(allocs, char, strLen + 1); + char *const result = vma_new_array(allocs, char, strLen + 1); memcpy(result, srcStr, strLen); result[strLen] = '\0'; return result; @@ -3875,7 +3883,7 @@ static char* VmaCreateStringCopy(const VkAllocationCallbacks* allocs, const char } #endif // VMA_STATS_STRING_ENABLED -static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str) +static void VmaFreeString(const VkAllocationCallbacks *allocs, char *str) { if (str != VMA_NULL) { @@ -3884,20 +3892,21 @@ static void VmaFreeString(const VkAllocationCallbacks* allocs, char* str) } } -template -size_t VmaVectorInsertSorted(VectorT& vector, const typename VectorT::value_type& value) +template +size_t VmaVectorInsertSorted(VectorT &vector, const typename VectorT::value_type &value) { const size_t indexToInsert = VmaBinaryFindFirstNotLess( - vector.data(), - vector.data() + vector.size(), - value, - CmpLess()) - vector.data(); + vector.data(), + vector.data() + vector.size(), + value, + CmpLess()) - + vector.data(); VmaVectorInsert(vector, indexToInsert, value); return indexToInsert; } -template -bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& value) +template +bool VmaVectorRemoveSorted(VectorT &vector, const typename VectorT::value_type &value) { CmpLess comparator; typename VectorT::iterator it = VmaBinaryFindFirstNotLess( @@ -3917,7 +3926,7 @@ bool VmaVectorRemoveSorted(VectorT& vector, const typename VectorT::value_type& #ifndef _VMA_STATISTICS_FUNCTIONS -static void VmaClearStatistics(VmaStatistics& outStats) +static void VmaClearStatistics(VmaStatistics &outStats) { outStats.blockCount = 0; outStats.allocationCount = 0; @@ -3925,7 +3934,7 @@ static void VmaClearStatistics(VmaStatistics& outStats) outStats.allocationBytes = 0; } -static void VmaAddStatistics(VmaStatistics& inoutStats, const VmaStatistics& src) +static void VmaAddStatistics(VmaStatistics &inoutStats, const VmaStatistics &src) { inoutStats.blockCount += src.blockCount; inoutStats.allocationCount += src.allocationCount; @@ -3933,7 +3942,7 @@ static void VmaAddStatistics(VmaStatistics& inoutStats, const VmaStatistics& src inoutStats.allocationBytes += src.allocationBytes; } -static void VmaClearDetailedStatistics(VmaDetailedStatistics& outStats) +static void VmaClearDetailedStatistics(VmaDetailedStatistics &outStats) { VmaClearStatistics(outStats.statistics); outStats.unusedRangeCount = 0; @@ -3943,7 +3952,7 @@ static void VmaClearDetailedStatistics(VmaDetailedStatistics& outStats) outStats.unusedRangeSizeMax = 0; } -static void VmaAddDetailedStatisticsAllocation(VmaDetailedStatistics& inoutStats, VkDeviceSize size) +static void VmaAddDetailedStatisticsAllocation(VmaDetailedStatistics &inoutStats, VkDeviceSize size) { inoutStats.statistics.allocationCount++; inoutStats.statistics.allocationBytes += size; @@ -3951,14 +3960,14 @@ static void VmaAddDetailedStatisticsAllocation(VmaDetailedStatistics& inoutStats inoutStats.allocationSizeMax = VMA_MAX(inoutStats.allocationSizeMax, size); } -static void VmaAddDetailedStatisticsUnusedRange(VmaDetailedStatistics& inoutStats, VkDeviceSize size) +static void VmaAddDetailedStatisticsUnusedRange(VmaDetailedStatistics &inoutStats, VkDeviceSize size) { inoutStats.unusedRangeCount++; inoutStats.unusedRangeSizeMin = VMA_MIN(inoutStats.unusedRangeSizeMin, size); inoutStats.unusedRangeSizeMax = VMA_MAX(inoutStats.unusedRangeSizeMax, size); } -static void VmaAddDetailedStatistics(VmaDetailedStatistics& inoutStats, const VmaDetailedStatistics& src) +static void VmaAddDetailedStatistics(VmaDetailedStatistics &inoutStats, const VmaDetailedStatistics &src) { VmaAddStatistics(inoutStats.statistics, src.statistics); inoutStats.unusedRangeCount += src.unusedRangeCount; @@ -3976,15 +3985,23 @@ struct VmaMutexLock { VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLock) public: - VmaMutexLock(VMA_MUTEX& mutex, bool useMutex = true) : - m_pMutex(useMutex ? &mutex : VMA_NULL) + VmaMutexLock(VMA_MUTEX &mutex, bool useMutex = true) : m_pMutex(useMutex ? &mutex : VMA_NULL) + { + if (m_pMutex) + { + m_pMutex->Lock(); + } + } + ~VmaMutexLock() { - if (m_pMutex) { m_pMutex->Lock(); } + if (m_pMutex) + { + m_pMutex->Unlock(); + } } - ~VmaMutexLock() { if (m_pMutex) { m_pMutex->Unlock(); } } private: - VMA_MUTEX* m_pMutex; + VMA_MUTEX *m_pMutex; }; // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for reading. @@ -3992,15 +4009,23 @@ struct VmaMutexLockRead { VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLockRead) public: - VmaMutexLockRead(VMA_RW_MUTEX& mutex, bool useMutex) : - m_pMutex(useMutex ? &mutex : VMA_NULL) + VmaMutexLockRead(VMA_RW_MUTEX &mutex, bool useMutex) : m_pMutex(useMutex ? &mutex : VMA_NULL) + { + if (m_pMutex) + { + m_pMutex->LockRead(); + } + } + ~VmaMutexLockRead() { - if (m_pMutex) { m_pMutex->LockRead(); } + if (m_pMutex) + { + m_pMutex->UnlockRead(); + } } - ~VmaMutexLockRead() { if (m_pMutex) { m_pMutex->UnlockRead(); } } private: - VMA_RW_MUTEX* m_pMutex; + VMA_RW_MUTEX *m_pMutex; }; // Helper RAII class to lock a RW mutex in constructor and unlock it in destructor (at the end of scope), for writing. @@ -4008,28 +4033,37 @@ struct VmaMutexLockWrite { VMA_CLASS_NO_COPY_NO_MOVE(VmaMutexLockWrite) public: - VmaMutexLockWrite(VMA_RW_MUTEX& mutex, bool useMutex) + VmaMutexLockWrite(VMA_RW_MUTEX &mutex, bool useMutex) : m_pMutex(useMutex ? &mutex : VMA_NULL) { - if (m_pMutex) { m_pMutex->LockWrite(); } + if (m_pMutex) + { + m_pMutex->LockWrite(); + } + } + ~VmaMutexLockWrite() + { + if (m_pMutex) + { + m_pMutex->UnlockWrite(); + } } - ~VmaMutexLockWrite() { if (m_pMutex) { m_pMutex->UnlockWrite(); } } private: - VMA_RW_MUTEX* m_pMutex; + VMA_RW_MUTEX *m_pMutex; }; #if VMA_DEBUG_GLOBAL_MUTEX - static VMA_MUTEX gDebugGlobalMutex; - #define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true); +static VMA_MUTEX gDebugGlobalMutex; +#define VMA_DEBUG_GLOBAL_MUTEX_LOCK VmaMutexLock debugGlobalMutexLock(gDebugGlobalMutex, true); #else - #define VMA_DEBUG_GLOBAL_MUTEX_LOCK +#define VMA_DEBUG_GLOBAL_MUTEX_LOCK #endif #endif // _VMA_MUTEX_LOCK #ifndef _VMA_ATOMIC_TRANSACTIONAL_INCREMENT // An object that increments given atomic but decrements it back in the destructor unless Commit() is called. -template +template struct AtomicTransactionalIncrement { public: @@ -4037,46 +4071,46 @@ struct AtomicTransactionalIncrement ~AtomicTransactionalIncrement() { - if(m_Atomic) + if (m_Atomic) --(*m_Atomic); } void Commit() { m_Atomic = nullptr; } - T Increment(AtomicT* atomic) + T Increment(AtomicT *atomic) { m_Atomic = atomic; return m_Atomic->fetch_add(1); } private: - AtomicT* m_Atomic = nullptr; + AtomicT *m_Atomic = nullptr; }; #endif // _VMA_ATOMIC_TRANSACTIONAL_INCREMENT #ifndef _VMA_STL_ALLOCATOR // STL-compatible allocator. -template +template struct VmaStlAllocator { - const VkAllocationCallbacks* const m_pCallbacks; + const VkAllocationCallbacks *const m_pCallbacks; typedef T value_type; - VmaStlAllocator(const VkAllocationCallbacks* pCallbacks) : m_pCallbacks(pCallbacks) {} - template - VmaStlAllocator(const VmaStlAllocator& src) : m_pCallbacks(src.m_pCallbacks) {} - VmaStlAllocator(const VmaStlAllocator&) = default; - VmaStlAllocator& operator=(const VmaStlAllocator&) = delete; + VmaStlAllocator(const VkAllocationCallbacks *pCallbacks) : m_pCallbacks(pCallbacks) {} + template + VmaStlAllocator(const VmaStlAllocator &src) : m_pCallbacks(src.m_pCallbacks) {} + VmaStlAllocator(const VmaStlAllocator &) = default; + VmaStlAllocator &operator=(const VmaStlAllocator &) = delete; - T* allocate(size_t n) { return VmaAllocateArray(m_pCallbacks, n); } - void deallocate(T* p, size_t n) { VmaFree(m_pCallbacks, p); } + T *allocate(size_t n) { return VmaAllocateArray(m_pCallbacks, n); } + void deallocate(T *p, size_t n) { VmaFree(m_pCallbacks, p); } - template - bool operator==(const VmaStlAllocator& rhs) const + template + bool operator==(const VmaStlAllocator &rhs) const { return m_pCallbacks == rhs.m_pCallbacks; } - template - bool operator!=(const VmaStlAllocator& rhs) const + template + bool operator!=(const VmaStlAllocator &rhs) const { return m_pCallbacks != rhs.m_pCallbacks; } @@ -4087,31 +4121,47 @@ struct VmaStlAllocator /* Class with interface compatible with subset of std::vector. T must be POD because constructors and destructors are not called and memcpy is used for these objects. */ -template +template class VmaVector { public: typedef T value_type; - typedef T* iterator; - typedef const T* const_iterator; + typedef T *iterator; + typedef const T *const_iterator; - VmaVector(const AllocatorT& allocator); - VmaVector(size_t count, const AllocatorT& allocator); + VmaVector(const AllocatorT &allocator); + VmaVector(size_t count, const AllocatorT &allocator); // This version of the constructor is here for compatibility with pre-C++14 std::vector. // value is unused. - VmaVector(size_t count, const T& value, const AllocatorT& allocator) : VmaVector(count, allocator) {} - VmaVector(const VmaVector& src); - VmaVector& operator=(const VmaVector& rhs); + VmaVector(size_t count, const T &value, const AllocatorT &allocator) : VmaVector(count, allocator) {} + VmaVector(const VmaVector &src); + VmaVector &operator=(const VmaVector &rhs); ~VmaVector() { VmaFree(m_Allocator.m_pCallbacks, m_pArray); } bool empty() const { return m_Count == 0; } size_t size() const { return m_Count; } - T* data() { return m_pArray; } - T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; } - T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; } - const T* data() const { return m_pArray; } - const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[0]; } - const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return m_pArray[m_Count - 1]; } + T *data() { return m_pArray; } + T &front() + { + VMA_HEAVY_ASSERT(m_Count > 0); + return m_pArray[0]; + } + T &back() + { + VMA_HEAVY_ASSERT(m_Count > 0); + return m_pArray[m_Count - 1]; + } + const T *data() const { return m_pArray; } + const T &front() const + { + VMA_HEAVY_ASSERT(m_Count > 0); + return m_pArray[0]; + } + const T &back() const + { + VMA_HEAVY_ASSERT(m_Count > 0); + return m_pArray[m_Count - 1]; + } iterator begin() { return m_pArray; } iterator end() { return m_pArray + m_Count; } @@ -4120,49 +4170,67 @@ class VmaVector const_iterator begin() const { return cbegin(); } const_iterator end() const { return cend(); } - void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); } - void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); } - void push_front(const T& src) { insert(0, src); } + void pop_front() + { + VMA_HEAVY_ASSERT(m_Count > 0); + remove(0); + } + void pop_back() + { + VMA_HEAVY_ASSERT(m_Count > 0); + resize(size() - 1); + } + void push_front(const T &src) { insert(0, src); } - void push_back(const T& src); + void push_back(const T &src); void reserve(size_t newCapacity, bool freeMemory = false); void resize(size_t newCount); void clear() { resize(0); } void shrink_to_fit(); - void insert(size_t index, const T& src); + void insert(size_t index, const T &src); void remove(size_t index); - T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; } - const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return m_pArray[index]; } + T &operator[](size_t index) + { + VMA_HEAVY_ASSERT(index < m_Count); + return m_pArray[index]; + } + const T &operator[](size_t index) const + { + VMA_HEAVY_ASSERT(index < m_Count); + return m_pArray[index]; + } private: AllocatorT m_Allocator; - T* m_pArray; + T *m_pArray; size_t m_Count; size_t m_Capacity; }; #ifndef _VMA_VECTOR_FUNCTIONS -template -VmaVector::VmaVector(const AllocatorT& allocator) +template +VmaVector::VmaVector(const AllocatorT &allocator) : m_Allocator(allocator), - m_pArray(VMA_NULL), - m_Count(0), - m_Capacity(0) {} + m_pArray(VMA_NULL), + m_Count(0), + m_Capacity(0) +{ +} -template -VmaVector::VmaVector(size_t count, const AllocatorT& allocator) +template +VmaVector::VmaVector(size_t count, const AllocatorT &allocator) : m_Allocator(allocator), - m_pArray(count ? (T*)VmaAllocateArray(allocator.m_pCallbacks, count) : VMA_NULL), - m_Count(count), - m_Capacity(count) {} + m_pArray(count ? (T *)VmaAllocateArray(allocator.m_pCallbacks, count) : VMA_NULL), + m_Count(count), + m_Capacity(count) {} -template -VmaVector::VmaVector(const VmaVector& src) +template +VmaVector::VmaVector(const VmaVector &src) : m_Allocator(src.m_Allocator), - m_pArray(src.m_Count ? (T*)VmaAllocateArray(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL), - m_Count(src.m_Count), - m_Capacity(src.m_Count) + m_pArray(src.m_Count ? (T *)VmaAllocateArray(src.m_Allocator.m_pCallbacks, src.m_Count) : VMA_NULL), + m_Count(src.m_Count), + m_Capacity(src.m_Count) { if (m_Count != 0) { @@ -4170,8 +4238,8 @@ VmaVector::VmaVector(const VmaVector& src) } } -template -VmaVector& VmaVector::operator=(const VmaVector& rhs) +template +VmaVector &VmaVector::operator=(const VmaVector &rhs) { if (&rhs != this) { @@ -4184,15 +4252,15 @@ VmaVector& VmaVector::operator=(const VmaVector& r return *this; } -template -void VmaVector::push_back(const T& src) +template +void VmaVector::push_back(const T &src) { const size_t newIndex = size(); resize(newIndex + 1); m_pArray[newIndex] = src; } -template +template void VmaVector::reserve(size_t newCapacity, bool freeMemory) { newCapacity = VMA_MAX(newCapacity, m_Count); @@ -4204,7 +4272,7 @@ void VmaVector::reserve(size_t newCapacity, bool freeMemory) if (newCapacity != m_Capacity) { - T* const newArray = newCapacity ? VmaAllocateArray(m_Allocator, newCapacity) : VMA_NULL; + T *const newArray = newCapacity ? VmaAllocateArray(m_Allocator, newCapacity) : VMA_NULL; if (m_Count != 0) { memcpy(newArray, m_pArray, m_Count * sizeof(T)); @@ -4215,7 +4283,7 @@ void VmaVector::reserve(size_t newCapacity, bool freeMemory) } } -template +template void VmaVector::resize(size_t newCount) { size_t newCapacity = m_Capacity; @@ -4226,7 +4294,7 @@ void VmaVector::resize(size_t newCount) if (newCapacity != m_Capacity) { - T* const newArray = newCapacity ? VmaAllocateArray(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL; + T *const newArray = newCapacity ? VmaAllocateArray(m_Allocator.m_pCallbacks, newCapacity) : VMA_NULL; const size_t elementsToCopy = VMA_MIN(m_Count, newCount); if (elementsToCopy != 0) { @@ -4240,12 +4308,12 @@ void VmaVector::resize(size_t newCount) m_Count = newCount; } -template +template void VmaVector::shrink_to_fit() { if (m_Capacity > m_Count) { - T* newArray = VMA_NULL; + T *newArray = VMA_NULL; if (m_Count > 0) { newArray = VmaAllocateArray(m_Allocator.m_pCallbacks, m_Count); @@ -4257,8 +4325,8 @@ void VmaVector::shrink_to_fit() } } -template -void VmaVector::insert(size_t index, const T& src) +template +void VmaVector::insert(size_t index, const T &src) { VMA_HEAVY_ASSERT(index <= m_Count); const size_t oldCount = size(); @@ -4270,7 +4338,7 @@ void VmaVector::insert(size_t index, const T& src) m_pArray[index] = src; } -template +template void VmaVector::remove(size_t index) { VMA_HEAVY_ASSERT(index < m_Count); @@ -4283,14 +4351,14 @@ void VmaVector::remove(size_t index) } #endif // _VMA_VECTOR_FUNCTIONS -template -static void VmaVectorInsert(VmaVector& vec, size_t index, const T& item) +template +static void VmaVectorInsert(VmaVector &vec, size_t index, const T &item) { vec.insert(index, item); } -template -static void VmaVectorRemove(VmaVector& vec, size_t index) +template +static void VmaVectorRemove(VmaVector &vec, size_t index) { vec.remove(index); } @@ -4304,72 +4372,106 @@ It contains some number of elements in-place, which allows it to avoid heap allo when the actual number of elements is below that threshold. This allows normal "small" cases to be fast without losing generality for large inputs. */ -template +template class VmaSmallVector { public: typedef T value_type; - typedef T* iterator; - - VmaSmallVector(const AllocatorT& allocator); - VmaSmallVector(size_t count, const AllocatorT& allocator); - template - VmaSmallVector(const VmaSmallVector&) = delete; - template - VmaSmallVector& operator=(const VmaSmallVector&) = delete; + typedef T *iterator; + + VmaSmallVector(const AllocatorT &allocator); + VmaSmallVector(size_t count, const AllocatorT &allocator); + template + VmaSmallVector(const VmaSmallVector &) = delete; + template + VmaSmallVector &operator=(const VmaSmallVector &) = delete; ~VmaSmallVector() = default; bool empty() const { return m_Count == 0; } size_t size() const { return m_Count; } - T* data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; } - T& front() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; } - T& back() { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; } - const T* data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; } - const T& front() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[0]; } - const T& back() const { VMA_HEAVY_ASSERT(m_Count > 0); return data()[m_Count - 1]; } + T *data() { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; } + T &front() + { + VMA_HEAVY_ASSERT(m_Count > 0); + return data()[0]; + } + T &back() + { + VMA_HEAVY_ASSERT(m_Count > 0); + return data()[m_Count - 1]; + } + const T *data() const { return m_Count > N ? m_DynamicArray.data() : m_StaticArray; } + const T &front() const + { + VMA_HEAVY_ASSERT(m_Count > 0); + return data()[0]; + } + const T &back() const + { + VMA_HEAVY_ASSERT(m_Count > 0); + return data()[m_Count - 1]; + } iterator begin() { return data(); } iterator end() { return data() + m_Count; } - void pop_front() { VMA_HEAVY_ASSERT(m_Count > 0); remove(0); } - void pop_back() { VMA_HEAVY_ASSERT(m_Count > 0); resize(size() - 1); } - void push_front(const T& src) { insert(0, src); } + void pop_front() + { + VMA_HEAVY_ASSERT(m_Count > 0); + remove(0); + } + void pop_back() + { + VMA_HEAVY_ASSERT(m_Count > 0); + resize(size() - 1); + } + void push_front(const T &src) { insert(0, src); } - void push_back(const T& src); + void push_back(const T &src); void resize(size_t newCount, bool freeMemory = false); void clear(bool freeMemory = false); - void insert(size_t index, const T& src); + void insert(size_t index, const T &src); void remove(size_t index); - T& operator[](size_t index) { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; } - const T& operator[](size_t index) const { VMA_HEAVY_ASSERT(index < m_Count); return data()[index]; } + T &operator[](size_t index) + { + VMA_HEAVY_ASSERT(index < m_Count); + return data()[index]; + } + const T &operator[](size_t index) const + { + VMA_HEAVY_ASSERT(index < m_Count); + return data()[index]; + } private: size_t m_Count; - T m_StaticArray[N]; // Used when m_Size <= N + T m_StaticArray[N]; // Used when m_Size <= N VmaVector m_DynamicArray; // Used when m_Size > N }; #ifndef _VMA_SMALL_VECTOR_FUNCTIONS -template -VmaSmallVector::VmaSmallVector(const AllocatorT& allocator) +template +VmaSmallVector::VmaSmallVector(const AllocatorT &allocator) : m_Count(0), - m_DynamicArray(allocator) {} + m_DynamicArray(allocator) +{ +} -template -VmaSmallVector::VmaSmallVector(size_t count, const AllocatorT& allocator) +template +VmaSmallVector::VmaSmallVector(size_t count, const AllocatorT &allocator) : m_Count(count), - m_DynamicArray(count > N ? count : 0, allocator) {} + m_DynamicArray(count > N ? count : 0, allocator) {} -template -void VmaSmallVector::push_back(const T& src) +template +void VmaSmallVector::push_back(const T &src) { const size_t newIndex = size(); resize(newIndex + 1); data()[newIndex] = src; } -template +template void VmaSmallVector::resize(size_t newCount, bool freeMemory) { if (newCount > N && m_Count > N) @@ -4410,7 +4512,7 @@ void VmaSmallVector::resize(size_t newCount, bool freeMemory) m_Count = newCount; } -template +template void VmaSmallVector::clear(bool freeMemory) { m_DynamicArray.clear(); @@ -4421,13 +4523,13 @@ void VmaSmallVector::clear(bool freeMemory) m_Count = 0; } -template -void VmaSmallVector::insert(size_t index, const T& src) +template +void VmaSmallVector::insert(size_t index, const T &src) { VMA_HEAVY_ASSERT(index <= m_Count); const size_t oldCount = size(); resize(oldCount + 1); - T* const dataPtr = data(); + T *const dataPtr = data(); if (index < oldCount) { // I know, this could be more optimal for case where memmove can be memcpy directly from m_StaticArray to m_DynamicArray. @@ -4436,7 +4538,7 @@ void VmaSmallVector::insert(size_t index, const T& src) dataPtr[index] = src; } -template +template void VmaSmallVector::remove(size_t index) { VMA_HEAVY_ASSERT(index < m_Count); @@ -4444,7 +4546,7 @@ void VmaSmallVector::remove(size_t index) if (index < oldCount - 1) { // I know, this could be more optimal for case where memmove can be memcpy directly from m_DynamicArray to m_StaticArray. - T* const dataPtr = data(); + T *const dataPtr = data(); memmove(dataPtr + index, dataPtr + (index + 1), (oldCount - index - 1) * sizeof(T)); } resize(oldCount - 1); @@ -4458,15 +4560,16 @@ Allocator for objects of type T using a list of arrays (pools) to speed up allocation. Number of elements that can be allocated is not bounded because allocator can create multiple blocks. */ -template +template class VmaPoolAllocator { VMA_CLASS_NO_COPY_NO_MOVE(VmaPoolAllocator) public: - VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity); + VmaPoolAllocator(const VkAllocationCallbacks *pAllocationCallbacks, uint32_t firstBlockCapacity); ~VmaPoolAllocator(); - template T* Alloc(Types&&... args); - void Free(T* ptr); + template + T *Alloc(Types &&...args); + void Free(T *ptr); private: union Item @@ -4476,29 +4579,29 @@ class VmaPoolAllocator }; struct ItemBlock { - Item* pItems; + Item *pItems; uint32_t Capacity; uint32_t FirstFreeIndex; }; - const VkAllocationCallbacks* m_pAllocationCallbacks; + const VkAllocationCallbacks *m_pAllocationCallbacks; const uint32_t m_FirstBlockCapacity; VmaVector> m_ItemBlocks; - ItemBlock& CreateNewBlock(); + ItemBlock &CreateNewBlock(); }; #ifndef _VMA_POOL_ALLOCATOR_FUNCTIONS -template -VmaPoolAllocator::VmaPoolAllocator(const VkAllocationCallbacks* pAllocationCallbacks, uint32_t firstBlockCapacity) +template +VmaPoolAllocator::VmaPoolAllocator(const VkAllocationCallbacks *pAllocationCallbacks, uint32_t firstBlockCapacity) : m_pAllocationCallbacks(pAllocationCallbacks), - m_FirstBlockCapacity(firstBlockCapacity), - m_ItemBlocks(VmaStlAllocator(pAllocationCallbacks)) + m_FirstBlockCapacity(firstBlockCapacity), + m_ItemBlocks(VmaStlAllocator(pAllocationCallbacks)) { VMA_ASSERT(m_FirstBlockCapacity > 1); } -template +template VmaPoolAllocator::~VmaPoolAllocator() { for (size_t i = m_ItemBlocks.size(); i--;) @@ -4506,42 +4609,43 @@ VmaPoolAllocator::~VmaPoolAllocator() m_ItemBlocks.clear(); } -template -template T* VmaPoolAllocator::Alloc(Types&&... args) +template +template +T *VmaPoolAllocator::Alloc(Types &&...args) { - for (size_t i = m_ItemBlocks.size(); i--; ) + for (size_t i = m_ItemBlocks.size(); i--;) { - ItemBlock& block = m_ItemBlocks[i]; + ItemBlock &block = m_ItemBlocks[i]; // This block has some free items: Use first one. if (block.FirstFreeIndex != UINT32_MAX) { - Item* const pItem = &block.pItems[block.FirstFreeIndex]; + Item *const pItem = &block.pItems[block.FirstFreeIndex]; block.FirstFreeIndex = pItem->NextFreeIndex; - T* result = (T*)&pItem->Value; - new(result)T(std::forward(args)...); // Explicit constructor call. + T *result = (T *)&pItem->Value; + new (result) T(std::forward(args)...); // Explicit constructor call. return result; } } // No block has free item: Create new one and use it. - ItemBlock& newBlock = CreateNewBlock(); - Item* const pItem = &newBlock.pItems[0]; + ItemBlock &newBlock = CreateNewBlock(); + Item *const pItem = &newBlock.pItems[0]; newBlock.FirstFreeIndex = pItem->NextFreeIndex; - T* result = (T*)&pItem->Value; - new(result) T(std::forward(args)...); // Explicit constructor call. + T *result = (T *)&pItem->Value; + new (result) T(std::forward(args)...); // Explicit constructor call. return result; } -template -void VmaPoolAllocator::Free(T* ptr) +template +void VmaPoolAllocator::Free(T *ptr) { // Search all memory blocks to find ptr. - for (size_t i = m_ItemBlocks.size(); i--; ) + for (size_t i = m_ItemBlocks.size(); i--;) { - ItemBlock& block = m_ItemBlocks[i]; + ItemBlock &block = m_ItemBlocks[i]; // Casting to union. - Item* pItemPtr; + Item *pItemPtr; memcpy(&pItemPtr, &ptr, sizeof(pItemPtr)); // Check if pItemPtr is in address range of this block. @@ -4557,18 +4661,16 @@ void VmaPoolAllocator::Free(T* ptr) VMA_ASSERT(0 && "Pointer doesn't belong to this memory pool."); } -template -typename VmaPoolAllocator::ItemBlock& VmaPoolAllocator::CreateNewBlock() +template +typename VmaPoolAllocator::ItemBlock &VmaPoolAllocator::CreateNewBlock() { - const uint32_t newBlockCapacity = m_ItemBlocks.empty() ? - m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2; + const uint32_t newBlockCapacity = m_ItemBlocks.empty() ? m_FirstBlockCapacity : m_ItemBlocks.back().Capacity * 3 / 2; const ItemBlock newBlock = - { - vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity), - newBlockCapacity, - 0 - }; + { + vma_new_array(m_pAllocationCallbacks, Item, newBlockCapacity), + newBlockCapacity, + 0}; m_ItemBlocks.push_back(newBlock); @@ -4582,23 +4684,23 @@ typename VmaPoolAllocator::ItemBlock& VmaPoolAllocator::CreateNewBlock() #endif // _VMA_POOL_ALLOCATOR #ifndef _VMA_RAW_LIST -template +template struct VmaListItem { - VmaListItem* pPrev; - VmaListItem* pNext; + VmaListItem *pPrev; + VmaListItem *pNext; T Value; }; // Doubly linked list. -template +template class VmaRawList { VMA_CLASS_NO_COPY_NO_MOVE(VmaRawList) public: typedef VmaListItem ItemType; - VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks); + VmaRawList(const VkAllocationCallbacks *pAllocationCallbacks); // Intentionally not calling Clear, because that would be unnecessary // computations to return all items to m_ItemAllocator as free. ~VmaRawList() = default; @@ -4606,49 +4708,51 @@ class VmaRawList size_t GetCount() const { return m_Count; } bool IsEmpty() const { return m_Count == 0; } - ItemType* Front() { return m_pFront; } - ItemType* Back() { return m_pBack; } - const ItemType* Front() const { return m_pFront; } - const ItemType* Back() const { return m_pBack; } + ItemType *Front() { return m_pFront; } + ItemType *Back() { return m_pBack; } + const ItemType *Front() const { return m_pFront; } + const ItemType *Back() const { return m_pBack; } - ItemType* PushFront(); - ItemType* PushBack(); - ItemType* PushFront(const T& value); - ItemType* PushBack(const T& value); + ItemType *PushFront(); + ItemType *PushBack(); + ItemType *PushFront(const T &value); + ItemType *PushBack(const T &value); void PopFront(); void PopBack(); // Item can be null - it means PushBack. - ItemType* InsertBefore(ItemType* pItem); + ItemType *InsertBefore(ItemType *pItem); // Item can be null - it means PushFront. - ItemType* InsertAfter(ItemType* pItem); - ItemType* InsertBefore(ItemType* pItem, const T& value); - ItemType* InsertAfter(ItemType* pItem, const T& value); + ItemType *InsertAfter(ItemType *pItem); + ItemType *InsertBefore(ItemType *pItem, const T &value); + ItemType *InsertAfter(ItemType *pItem, const T &value); void Clear(); - void Remove(ItemType* pItem); + void Remove(ItemType *pItem); private: - const VkAllocationCallbacks* const m_pAllocationCallbacks; + const VkAllocationCallbacks *const m_pAllocationCallbacks; VmaPoolAllocator m_ItemAllocator; - ItemType* m_pFront; - ItemType* m_pBack; + ItemType *m_pFront; + ItemType *m_pBack; size_t m_Count; }; #ifndef _VMA_RAW_LIST_FUNCTIONS -template -VmaRawList::VmaRawList(const VkAllocationCallbacks* pAllocationCallbacks) +template +VmaRawList::VmaRawList(const VkAllocationCallbacks *pAllocationCallbacks) : m_pAllocationCallbacks(pAllocationCallbacks), - m_ItemAllocator(pAllocationCallbacks, 128), - m_pFront(VMA_NULL), - m_pBack(VMA_NULL), - m_Count(0) {} + m_ItemAllocator(pAllocationCallbacks, 128), + m_pFront(VMA_NULL), + m_pBack(VMA_NULL), + m_Count(0) +{ +} -template -VmaListItem* VmaRawList::PushFront() +template +VmaListItem *VmaRawList::PushFront() { - ItemType* const pNewItem = m_ItemAllocator.Alloc(); + ItemType *const pNewItem = m_ItemAllocator.Alloc(); pNewItem->pPrev = VMA_NULL; if (IsEmpty()) { @@ -4667,12 +4771,12 @@ VmaListItem* VmaRawList::PushFront() return pNewItem; } -template -VmaListItem* VmaRawList::PushBack() +template +VmaListItem *VmaRawList::PushBack() { - ItemType* const pNewItem = m_ItemAllocator.Alloc(); + ItemType *const pNewItem = m_ItemAllocator.Alloc(); pNewItem->pNext = VMA_NULL; - if(IsEmpty()) + if (IsEmpty()) { pNewItem->pPrev = VMA_NULL; m_pFront = pNewItem; @@ -4689,28 +4793,28 @@ VmaListItem* VmaRawList::PushBack() return pNewItem; } -template -VmaListItem* VmaRawList::PushFront(const T& value) +template +VmaListItem *VmaRawList::PushFront(const T &value) { - ItemType* const pNewItem = PushFront(); + ItemType *const pNewItem = PushFront(); pNewItem->Value = value; return pNewItem; } -template -VmaListItem* VmaRawList::PushBack(const T& value) +template +VmaListItem *VmaRawList::PushBack(const T &value) { - ItemType* const pNewItem = PushBack(); + ItemType *const pNewItem = PushBack(); pNewItem->Value = value; return pNewItem; } -template +template void VmaRawList::PopFront() { VMA_HEAVY_ASSERT(m_Count > 0); - ItemType* const pFrontItem = m_pFront; - ItemType* const pNextItem = pFrontItem->pNext; + ItemType *const pFrontItem = m_pFront; + ItemType *const pNextItem = pFrontItem->pNext; if (pNextItem != VMA_NULL) { pNextItem->pPrev = VMA_NULL; @@ -4720,13 +4824,13 @@ void VmaRawList::PopFront() --m_Count; } -template +template void VmaRawList::PopBack() { VMA_HEAVY_ASSERT(m_Count > 0); - ItemType* const pBackItem = m_pBack; - ItemType* const pPrevItem = pBackItem->pPrev; - if(pPrevItem != VMA_NULL) + ItemType *const pBackItem = m_pBack; + ItemType *const pPrevItem = pBackItem->pPrev; + if (pPrevItem != VMA_NULL) { pPrevItem->pNext = VMA_NULL; } @@ -4735,15 +4839,15 @@ void VmaRawList::PopBack() --m_Count; } -template +template void VmaRawList::Clear() { if (IsEmpty() == false) { - ItemType* pItem = m_pBack; + ItemType *pItem = m_pBack; while (pItem != VMA_NULL) { - ItemType* const pPrevItem = pItem->pPrev; + ItemType *const pPrevItem = pItem->pPrev; m_ItemAllocator.Free(pItem); pItem = pPrevItem; } @@ -4753,13 +4857,13 @@ void VmaRawList::Clear() } } -template -void VmaRawList::Remove(ItemType* pItem) +template +void VmaRawList::Remove(ItemType *pItem) { VMA_HEAVY_ASSERT(pItem != VMA_NULL); VMA_HEAVY_ASSERT(m_Count > 0); - if(pItem->pPrev != VMA_NULL) + if (pItem->pPrev != VMA_NULL) { pItem->pPrev->pNext = pItem->pNext; } @@ -4769,7 +4873,7 @@ void VmaRawList::Remove(ItemType* pItem) m_pFront = pItem->pNext; } - if(pItem->pNext != VMA_NULL) + if (pItem->pNext != VMA_NULL) { pItem->pNext->pPrev = pItem->pPrev; } @@ -4783,17 +4887,17 @@ void VmaRawList::Remove(ItemType* pItem) --m_Count; } -template -VmaListItem* VmaRawList::InsertBefore(ItemType* pItem) +template +VmaListItem *VmaRawList::InsertBefore(ItemType *pItem) { - if(pItem != VMA_NULL) + if (pItem != VMA_NULL) { - ItemType* const prevItem = pItem->pPrev; - ItemType* const newItem = m_ItemAllocator.Alloc(); + ItemType *const prevItem = pItem->pPrev; + ItemType *const newItem = m_ItemAllocator.Alloc(); newItem->pPrev = prevItem; newItem->pNext = pItem; pItem->pPrev = newItem; - if(prevItem != VMA_NULL) + if (prevItem != VMA_NULL) { prevItem->pNext = newItem; } @@ -4809,17 +4913,17 @@ VmaListItem* VmaRawList::InsertBefore(ItemType* pItem) return PushBack(); } -template -VmaListItem* VmaRawList::InsertAfter(ItemType* pItem) +template +VmaListItem *VmaRawList::InsertAfter(ItemType *pItem) { - if(pItem != VMA_NULL) + if (pItem != VMA_NULL) { - ItemType* const nextItem = pItem->pNext; - ItemType* const newItem = m_ItemAllocator.Alloc(); + ItemType *const nextItem = pItem->pNext; + ItemType *const newItem = m_ItemAllocator.Alloc(); newItem->pNext = nextItem; newItem->pPrev = pItem; pItem->pNext = newItem; - if(nextItem != VMA_NULL) + if (nextItem != VMA_NULL) { nextItem->pPrev = newItem; } @@ -4835,18 +4939,18 @@ VmaListItem* VmaRawList::InsertAfter(ItemType* pItem) return PushFront(); } -template -VmaListItem* VmaRawList::InsertBefore(ItemType* pItem, const T& value) +template +VmaListItem *VmaRawList::InsertBefore(ItemType *pItem, const T &value) { - ItemType* const newItem = InsertBefore(pItem); + ItemType *const newItem = InsertBefore(pItem); newItem->Value = value; return newItem; } -template -VmaListItem* VmaRawList::InsertAfter(ItemType* pItem, const T& value) +template +VmaListItem *VmaRawList::InsertAfter(ItemType *pItem, const T &value) { - ItemType* const newItem = InsertAfter(pItem); + ItemType *const newItem = InsertAfter(pItem); newItem->Value = value; return newItem; } @@ -4854,7 +4958,7 @@ VmaListItem* VmaRawList::InsertAfter(ItemType* pItem, const T& value) #endif // _VMA_RAW_LIST #ifndef _VMA_LIST -template +template class VmaList { VMA_CLASS_NO_COPY_NO_MOVE(VmaList) @@ -4867,112 +4971,240 @@ class VmaList { friend class const_iterator; friend class VmaList; + public: - iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} - iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} + iterator(const reverse_iterator &src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} - T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } - T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } + T &operator*() const + { + VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); + return m_pItem->Value; + } + T *operator->() const + { + VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); + return &m_pItem->Value; + } - bool operator==(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } - bool operator!=(const iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } + bool operator==(const iterator &rhs) const + { + VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); + return m_pItem == rhs.m_pItem; + } + bool operator!=(const iterator &rhs) const + { + VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); + return m_pItem != rhs.m_pItem; + } - iterator operator++(int) { iterator result = *this; ++*this; return result; } - iterator operator--(int) { iterator result = *this; --*this; return result; } + iterator operator++(int) + { + iterator result = *this; + ++*this; + return result; + } + iterator operator--(int) + { + iterator result = *this; + --*this; + return result; + } - iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; } - iterator& operator--(); + iterator &operator++() + { + VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); + m_pItem = m_pItem->pNext; + return *this; + } + iterator &operator--(); private: - VmaRawList* m_pList; - VmaListItem* m_pItem; + VmaRawList *m_pList; + VmaListItem *m_pItem; - iterator(VmaRawList* pList, VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} + iterator(VmaRawList *pList, VmaListItem *pItem) : m_pList(pList), m_pItem(pItem) {} }; class reverse_iterator { friend class const_reverse_iterator; friend class VmaList; + public: reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} - reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + reverse_iterator(const iterator &src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} - T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } - T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } + T &operator*() const + { + VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); + return m_pItem->Value; + } + T *operator->() const + { + VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); + return &m_pItem->Value; + } - bool operator==(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } - bool operator!=(const reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } + bool operator==(const reverse_iterator &rhs) const + { + VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); + return m_pItem == rhs.m_pItem; + } + bool operator!=(const reverse_iterator &rhs) const + { + VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); + return m_pItem != rhs.m_pItem; + } - reverse_iterator operator++(int) { reverse_iterator result = *this; ++* this; return result; } - reverse_iterator operator--(int) { reverse_iterator result = *this; --* this; return result; } + reverse_iterator operator++(int) + { + reverse_iterator result = *this; + ++*this; + return result; + } + reverse_iterator operator--(int) + { + reverse_iterator result = *this; + --*this; + return result; + } - reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; } - reverse_iterator& operator--(); + reverse_iterator &operator++() + { + VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); + m_pItem = m_pItem->pPrev; + return *this; + } + reverse_iterator &operator--(); private: - VmaRawList* m_pList; - VmaListItem* m_pItem; + VmaRawList *m_pList; + VmaListItem *m_pItem; - reverse_iterator(VmaRawList* pList, VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} + reverse_iterator(VmaRawList *pList, VmaListItem *pItem) : m_pList(pList), m_pItem(pItem) {} }; class const_iterator { friend class VmaList; + public: const_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} - const_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} - const_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + const_iterator(const iterator &src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + const_iterator(const reverse_iterator &src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} - iterator drop_const() { return { const_cast*>(m_pList), const_cast*>(m_pItem) }; } + iterator drop_const() { return {const_cast *>(m_pList), const_cast *>(m_pItem)}; } - const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } - const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } + const T &operator*() const + { + VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); + return m_pItem->Value; + } + const T *operator->() const + { + VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); + return &m_pItem->Value; + } - bool operator==(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } - bool operator!=(const const_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } + bool operator==(const const_iterator &rhs) const + { + VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); + return m_pItem == rhs.m_pItem; + } + bool operator!=(const const_iterator &rhs) const + { + VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); + return m_pItem != rhs.m_pItem; + } - const_iterator operator++(int) { const_iterator result = *this; ++* this; return result; } - const_iterator operator--(int) { const_iterator result = *this; --* this; return result; } + const_iterator operator++(int) + { + const_iterator result = *this; + ++*this; + return result; + } + const_iterator operator--(int) + { + const_iterator result = *this; + --*this; + return result; + } - const_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pNext; return *this; } - const_iterator& operator--(); + const_iterator &operator++() + { + VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); + m_pItem = m_pItem->pNext; + return *this; + } + const_iterator &operator--(); private: - const VmaRawList* m_pList; - const VmaListItem* m_pItem; + const VmaRawList *m_pList; + const VmaListItem *m_pItem; - const_iterator(const VmaRawList* pList, const VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} + const_iterator(const VmaRawList *pList, const VmaListItem *pItem) : m_pList(pList), m_pItem(pItem) {} }; class const_reverse_iterator { friend class VmaList; + public: const_reverse_iterator() : m_pList(VMA_NULL), m_pItem(VMA_NULL) {} - const_reverse_iterator(const reverse_iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} - const_reverse_iterator(const iterator& src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + const_reverse_iterator(const reverse_iterator &src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} + const_reverse_iterator(const iterator &src) : m_pList(src.m_pList), m_pItem(src.m_pItem) {} - reverse_iterator drop_const() { return { const_cast*>(m_pList), const_cast*>(m_pItem) }; } + reverse_iterator drop_const() { return {const_cast *>(m_pList), const_cast *>(m_pItem)}; } - const T& operator*() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return m_pItem->Value; } - const T* operator->() const { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); return &m_pItem->Value; } + const T &operator*() const + { + VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); + return m_pItem->Value; + } + const T *operator->() const + { + VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); + return &m_pItem->Value; + } - bool operator==(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem == rhs.m_pItem; } - bool operator!=(const const_reverse_iterator& rhs) const { VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); return m_pItem != rhs.m_pItem; } + bool operator==(const const_reverse_iterator &rhs) const + { + VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); + return m_pItem == rhs.m_pItem; + } + bool operator!=(const const_reverse_iterator &rhs) const + { + VMA_HEAVY_ASSERT(m_pList == rhs.m_pList); + return m_pItem != rhs.m_pItem; + } - const_reverse_iterator operator++(int) { const_reverse_iterator result = *this; ++* this; return result; } - const_reverse_iterator operator--(int) { const_reverse_iterator result = *this; --* this; return result; } + const_reverse_iterator operator++(int) + { + const_reverse_iterator result = *this; + ++*this; + return result; + } + const_reverse_iterator operator--(int) + { + const_reverse_iterator result = *this; + --*this; + return result; + } - const_reverse_iterator& operator++() { VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); m_pItem = m_pItem->pPrev; return *this; } - const_reverse_iterator& operator--(); + const_reverse_iterator &operator++() + { + VMA_HEAVY_ASSERT(m_pItem != VMA_NULL); + m_pItem = m_pItem->pPrev; + return *this; + } + const_reverse_iterator &operator--(); private: - const VmaRawList* m_pList; - const VmaListItem* m_pItem; + const VmaRawList *m_pList; + const VmaListItem *m_pItem; - const_reverse_iterator(const VmaRawList* pList, const VmaListItem* pItem) : m_pList(pList), m_pItem(pItem) {} + const_reverse_iterator(const VmaRawList *pList, const VmaListItem *pItem) : m_pList(pList), m_pItem(pItem) {} }; - VmaList(const AllocatorT& allocator) : m_RawList(allocator.m_pCallbacks) {} + VmaList(const AllocatorT &allocator) : m_RawList(allocator.m_pCallbacks) {} bool empty() const { return m_RawList.IsEmpty(); } size_t size() const { return m_RawList.GetCount(); } @@ -4995,8 +5227,8 @@ class VmaList const_reverse_iterator rbegin() const { return crbegin(); } const_reverse_iterator rend() const { return crend(); } - void push_back(const T& value) { m_RawList.PushBack(value); } - iterator insert(iterator it, const T& value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); } + void push_back(const T &value) { m_RawList.PushBack(value); } + iterator insert(iterator it, const T &value) { return iterator(&m_RawList, m_RawList.InsertBefore(it.m_pItem, value)); } void clear() { m_RawList.Clear(); } void erase(iterator it) { m_RawList.Remove(it.m_pItem); } @@ -5006,8 +5238,8 @@ class VmaList }; #ifndef _VMA_LIST_FUNCTIONS -template -typename VmaList::iterator& VmaList::iterator::operator--() +template +typename VmaList::iterator &VmaList::iterator::operator--() { if (m_pItem != VMA_NULL) { @@ -5021,8 +5253,8 @@ typename VmaList::iterator& VmaList::iterator::ope return *this; } -template -typename VmaList::reverse_iterator& VmaList::reverse_iterator::operator--() +template +typename VmaList::reverse_iterator &VmaList::reverse_iterator::operator--() { if (m_pItem != VMA_NULL) { @@ -5036,8 +5268,8 @@ typename VmaList::reverse_iterator& VmaList::rever return *this; } -template -typename VmaList::const_iterator& VmaList::const_iterator::operator--() +template +typename VmaList::const_iterator &VmaList::const_iterator::operator--() { if (m_pItem != VMA_NULL) { @@ -5051,8 +5283,8 @@ typename VmaList::const_iterator& VmaList::const_i return *this; } -template -typename VmaList::const_reverse_iterator& VmaList::const_reverse_iterator::operator--() +template +typename VmaList::const_reverse_iterator &VmaList::const_reverse_iterator::operator--() { if (m_pItem != VMA_NULL) { @@ -5080,58 +5312,58 @@ struct MyItemTypeTraits static ItemType*& AccessNext(ItemType* item) { return item->myNextPtr; } }; */ -template +template class VmaIntrusiveLinkedList { public: typedef typename ItemTypeTraits::ItemType ItemType; - static ItemType* GetPrev(const ItemType* item) { return ItemTypeTraits::GetPrev(item); } - static ItemType* GetNext(const ItemType* item) { return ItemTypeTraits::GetNext(item); } + static ItemType *GetPrev(const ItemType *item) { return ItemTypeTraits::GetPrev(item); } + static ItemType *GetNext(const ItemType *item) { return ItemTypeTraits::GetNext(item); } // Movable, not copyable. VmaIntrusiveLinkedList() = default; - VmaIntrusiveLinkedList(VmaIntrusiveLinkedList && src); - VmaIntrusiveLinkedList(const VmaIntrusiveLinkedList&) = delete; - VmaIntrusiveLinkedList& operator=(VmaIntrusiveLinkedList&& src); - VmaIntrusiveLinkedList& operator=(const VmaIntrusiveLinkedList&) = delete; + VmaIntrusiveLinkedList(VmaIntrusiveLinkedList &&src); + VmaIntrusiveLinkedList(const VmaIntrusiveLinkedList &) = delete; + VmaIntrusiveLinkedList &operator=(VmaIntrusiveLinkedList &&src); + VmaIntrusiveLinkedList &operator=(const VmaIntrusiveLinkedList &) = delete; ~VmaIntrusiveLinkedList() { VMA_HEAVY_ASSERT(IsEmpty()); } size_t GetCount() const { return m_Count; } bool IsEmpty() const { return m_Count == 0; } - ItemType* Front() { return m_Front; } - ItemType* Back() { return m_Back; } - const ItemType* Front() const { return m_Front; } - const ItemType* Back() const { return m_Back; } + ItemType *Front() { return m_Front; } + ItemType *Back() { return m_Back; } + const ItemType *Front() const { return m_Front; } + const ItemType *Back() const { return m_Back; } - void PushBack(ItemType* item); - void PushFront(ItemType* item); - ItemType* PopBack(); - ItemType* PopFront(); + void PushBack(ItemType *item); + void PushFront(ItemType *item); + ItemType *PopBack(); + ItemType *PopFront(); // MyItem can be null - it means PushBack. - void InsertBefore(ItemType* existingItem, ItemType* newItem); + void InsertBefore(ItemType *existingItem, ItemType *newItem); // MyItem can be null - it means PushFront. - void InsertAfter(ItemType* existingItem, ItemType* newItem); - void Remove(ItemType* item); + void InsertAfter(ItemType *existingItem, ItemType *newItem); + void Remove(ItemType *item); void RemoveAll(); private: - ItemType* m_Front = VMA_NULL; - ItemType* m_Back = VMA_NULL; + ItemType *m_Front = VMA_NULL; + ItemType *m_Back = VMA_NULL; size_t m_Count = 0; }; #ifndef _VMA_INTRUSIVE_LINKED_LIST_FUNCTIONS -template -VmaIntrusiveLinkedList::VmaIntrusiveLinkedList(VmaIntrusiveLinkedList&& src) +template +VmaIntrusiveLinkedList::VmaIntrusiveLinkedList(VmaIntrusiveLinkedList &&src) : m_Front(src.m_Front), m_Back(src.m_Back), m_Count(src.m_Count) { src.m_Front = src.m_Back = VMA_NULL; src.m_Count = 0; } -template -VmaIntrusiveLinkedList& VmaIntrusiveLinkedList::operator=(VmaIntrusiveLinkedList&& src) +template +VmaIntrusiveLinkedList &VmaIntrusiveLinkedList::operator=(VmaIntrusiveLinkedList &&src) { if (&src != this) { @@ -5145,8 +5377,8 @@ VmaIntrusiveLinkedList& VmaIntrusiveLinkedList:: return *this; } -template -void VmaIntrusiveLinkedList::PushBack(ItemType* item) +template +void VmaIntrusiveLinkedList::PushBack(ItemType *item) { VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL); if (IsEmpty()) @@ -5164,8 +5396,8 @@ void VmaIntrusiveLinkedList::PushBack(ItemType* item) } } -template -void VmaIntrusiveLinkedList::PushFront(ItemType* item) +template +void VmaIntrusiveLinkedList::PushFront(ItemType *item) { VMA_HEAVY_ASSERT(ItemTypeTraits::GetPrev(item) == VMA_NULL && ItemTypeTraits::GetNext(item) == VMA_NULL); if (IsEmpty()) @@ -5183,12 +5415,12 @@ void VmaIntrusiveLinkedList::PushFront(ItemType* item) } } -template -typename VmaIntrusiveLinkedList::ItemType* VmaIntrusiveLinkedList::PopBack() +template +typename VmaIntrusiveLinkedList::ItemType *VmaIntrusiveLinkedList::PopBack() { VMA_HEAVY_ASSERT(m_Count > 0); - ItemType* const backItem = m_Back; - ItemType* const prevItem = ItemTypeTraits::GetPrev(backItem); + ItemType *const backItem = m_Back; + ItemType *const prevItem = ItemTypeTraits::GetPrev(backItem); if (prevItem != VMA_NULL) { ItemTypeTraits::AccessNext(prevItem) = VMA_NULL; @@ -5200,12 +5432,12 @@ typename VmaIntrusiveLinkedList::ItemType* VmaIntrusiveLinkedLis return backItem; } -template -typename VmaIntrusiveLinkedList::ItemType* VmaIntrusiveLinkedList::PopFront() +template +typename VmaIntrusiveLinkedList::ItemType *VmaIntrusiveLinkedList::PopFront() { VMA_HEAVY_ASSERT(m_Count > 0); - ItemType* const frontItem = m_Front; - ItemType* const nextItem = ItemTypeTraits::GetNext(frontItem); + ItemType *const frontItem = m_Front; + ItemType *const nextItem = ItemTypeTraits::GetNext(frontItem); if (nextItem != VMA_NULL) { ItemTypeTraits::AccessPrev(nextItem) = VMA_NULL; @@ -5217,13 +5449,13 @@ typename VmaIntrusiveLinkedList::ItemType* VmaIntrusiveLinkedLis return frontItem; } -template -void VmaIntrusiveLinkedList::InsertBefore(ItemType* existingItem, ItemType* newItem) +template +void VmaIntrusiveLinkedList::InsertBefore(ItemType *existingItem, ItemType *newItem) { VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL); if (existingItem != VMA_NULL) { - ItemType* const prevItem = ItemTypeTraits::GetPrev(existingItem); + ItemType *const prevItem = ItemTypeTraits::GetPrev(existingItem); ItemTypeTraits::AccessPrev(newItem) = prevItem; ItemTypeTraits::AccessNext(newItem) = existingItem; ItemTypeTraits::AccessPrev(existingItem) = newItem; @@ -5242,13 +5474,13 @@ void VmaIntrusiveLinkedList::InsertBefore(ItemType* existingItem PushBack(newItem); } -template -void VmaIntrusiveLinkedList::InsertAfter(ItemType* existingItem, ItemType* newItem) +template +void VmaIntrusiveLinkedList::InsertAfter(ItemType *existingItem, ItemType *newItem) { VMA_HEAVY_ASSERT(newItem != VMA_NULL && ItemTypeTraits::GetPrev(newItem) == VMA_NULL && ItemTypeTraits::GetNext(newItem) == VMA_NULL); if (existingItem != VMA_NULL) { - ItemType* const nextItem = ItemTypeTraits::GetNext(existingItem); + ItemType *const nextItem = ItemTypeTraits::GetNext(existingItem); ItemTypeTraits::AccessNext(newItem) = nextItem; ItemTypeTraits::AccessPrev(newItem) = existingItem; ItemTypeTraits::AccessNext(existingItem) = newItem; @@ -5267,8 +5499,8 @@ void VmaIntrusiveLinkedList::InsertAfter(ItemType* existingItem, return PushFront(newItem); } -template -void VmaIntrusiveLinkedList::Remove(ItemType* item) +template +void VmaIntrusiveLinkedList::Remove(ItemType *item) { VMA_HEAVY_ASSERT(item != VMA_NULL && m_Count > 0); if (ItemTypeTraits::GetPrev(item) != VMA_NULL) @@ -5295,15 +5527,15 @@ void VmaIntrusiveLinkedList::Remove(ItemType* item) --m_Count; } -template +template void VmaIntrusiveLinkedList::RemoveAll() { if (!IsEmpty()) { - ItemType* item = m_Back; + ItemType *item = m_Back; while (item != VMA_NULL) { - ItemType* const prevItem = ItemTypeTraits::AccessPrev(item); + ItemType *const prevItem = ItemTypeTraits::AccessPrev(item); ItemTypeTraits::AccessPrev(item) = VMA_NULL; ItemTypeTraits::AccessNext(item) = VMA_NULL; item = prevItem; @@ -5413,25 +5645,25 @@ void VmaMap::erase(iterator it) class VmaStringBuilder { public: - VmaStringBuilder(const VkAllocationCallbacks* allocationCallbacks) : m_Data(VmaStlAllocator(allocationCallbacks)) {} + VmaStringBuilder(const VkAllocationCallbacks *allocationCallbacks) : m_Data(VmaStlAllocator(allocationCallbacks)) {} ~VmaStringBuilder() = default; size_t GetLength() const { return m_Data.size(); } - const char* GetData() const { return m_Data.data(); } + const char *GetData() const { return m_Data.data(); } void AddNewLine() { Add('\n'); } void Add(char ch) { m_Data.push_back(ch); } - void Add(const char* pStr); + void Add(const char *pStr); void AddNumber(uint32_t num); void AddNumber(uint64_t num); - void AddPointer(const void* ptr); + void AddPointer(const void *ptr); private: VmaVector> m_Data; }; #ifndef _VMA_STRING_BUILDER_FUNCTIONS -void VmaStringBuilder::Add(const char* pStr) +void VmaStringBuilder::Add(const char *pStr) { const size_t strLen = strlen(pStr); if (strLen > 0) @@ -5446,7 +5678,7 @@ void VmaStringBuilder::AddNumber(uint32_t num) { char buf[11]; buf[10] = '\0'; - char* p = &buf[10]; + char *p = &buf[10]; do { *--p = '0' + (char)(num % 10); @@ -5459,7 +5691,7 @@ void VmaStringBuilder::AddNumber(uint64_t num) { char buf[21]; buf[20] = '\0'; - char* p = &buf[20]; + char *p = &buf[20]; do { *--p = '0' + (char)(num % 10); @@ -5468,7 +5700,7 @@ void VmaStringBuilder::AddNumber(uint64_t num) Add(p); } -void VmaStringBuilder::AddPointer(const void* ptr) +void VmaStringBuilder::AddPointer(const void *ptr) { char buf[21]; VmaPtrToStr(buf, sizeof(buf), ptr); @@ -5487,7 +5719,7 @@ class VmaJsonWriter VMA_CLASS_NO_COPY_NO_MOVE(VmaJsonWriter) public: // sb - string builder to write the document to. Must remain alive for the whole lifetime of this object. - VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb); + VmaJsonWriter(const VkAllocationCallbacks *pAllocationCallbacks, VmaStringBuilder &sb); ~VmaJsonWriter(); // Begins object by writing "{". @@ -5506,23 +5738,23 @@ class VmaJsonWriter // Writes a string value inside "". // pStr can contain any ANSI characters, including '"', new line etc. - they will be properly escaped. - void WriteString(const char* pStr); + void WriteString(const char *pStr); // Begins writing a string value. // Call BeginString, ContinueString, ContinueString, ..., EndString instead of // WriteString to conveniently build the string content incrementally, made of // parts including numbers. - void BeginString(const char* pStr = VMA_NULL); + void BeginString(const char *pStr = VMA_NULL); // Posts next part of an open string. - void ContinueString(const char* pStr); + void ContinueString(const char *pStr); // Posts next part of an open string. The number is converted to decimal characters. void ContinueString(uint32_t n); void ContinueString(uint64_t n); // Posts next part of an open string. Pointer value is converted to characters // using "%p" formatting - shown as hexadecimal number, e.g.: 000000081276Ad00 - void ContinueString_Pointer(const void* ptr); + void ContinueString_Pointer(const void *ptr); // Ends writing a string value by writing '"'. - void EndString(const char* pStr = VMA_NULL); + void EndString(const char *pStr = VMA_NULL); // Writes a number value. void WriteNumber(uint32_t n); @@ -5545,22 +5777,24 @@ class VmaJsonWriter bool singleLineMode; }; - static const char* const INDENT; + static const char *const INDENT; - VmaStringBuilder& m_SB; - VmaVector< StackItem, VmaStlAllocator > m_Stack; + VmaStringBuilder &m_SB; + VmaVector> m_Stack; bool m_InsideString; void BeginValue(bool isString); void WriteIndent(bool oneLess = false); }; -const char* const VmaJsonWriter::INDENT = " "; +const char *const VmaJsonWriter::INDENT = " "; #ifndef _VMA_JSON_WRITER_FUNCTIONS -VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks* pAllocationCallbacks, VmaStringBuilder& sb) +VmaJsonWriter::VmaJsonWriter(const VkAllocationCallbacks *pAllocationCallbacks, VmaStringBuilder &sb) : m_SB(sb), - m_Stack(VmaStlAllocator(pAllocationCallbacks)), - m_InsideString(false) {} + m_Stack(VmaStlAllocator(pAllocationCallbacks)), + m_InsideString(false) +{ +} VmaJsonWriter::~VmaJsonWriter() { @@ -5618,13 +5852,13 @@ void VmaJsonWriter::EndArray() m_Stack.pop_back(); } -void VmaJsonWriter::WriteString(const char* pStr) +void VmaJsonWriter::WriteString(const char *pStr) { BeginString(pStr); EndString(); } -void VmaJsonWriter::BeginString(const char* pStr) +void VmaJsonWriter::BeginString(const char *pStr) { VMA_ASSERT(!m_InsideString); @@ -5637,7 +5871,7 @@ void VmaJsonWriter::BeginString(const char* pStr) } } -void VmaJsonWriter::ContinueString(const char* pStr) +void VmaJsonWriter::ContinueString(const char *pStr) { VMA_ASSERT(m_InsideString); @@ -5657,26 +5891,27 @@ void VmaJsonWriter::ContinueString(const char* pStr) { m_SB.Add(ch); } - else switch (ch) - { - case '\b': - m_SB.Add("\\b"); - break; - case '\f': - m_SB.Add("\\f"); - break; - case '\n': - m_SB.Add("\\n"); - break; - case '\r': - m_SB.Add("\\r"); - break; - case '\t': - m_SB.Add("\\t"); - break; - default: - VMA_ASSERT(0 && "Character not currently supported."); - } + else + switch (ch) + { + case '\b': + m_SB.Add("\\b"); + break; + case '\f': + m_SB.Add("\\f"); + break; + case '\n': + m_SB.Add("\\n"); + break; + case '\r': + m_SB.Add("\\r"); + break; + case '\t': + m_SB.Add("\\t"); + break; + default: + VMA_ASSERT(0 && "Character not currently supported."); + } } } @@ -5692,13 +5927,13 @@ void VmaJsonWriter::ContinueString(uint64_t n) m_SB.AddNumber(n); } -void VmaJsonWriter::ContinueString_Pointer(const void* ptr) +void VmaJsonWriter::ContinueString_Pointer(const void *ptr) { VMA_ASSERT(m_InsideString); m_SB.AddPointer(ptr); } -void VmaJsonWriter::EndString(const char* pStr) +void VmaJsonWriter::EndString(const char *pStr) { VMA_ASSERT(m_InsideString); if (pStr != VMA_NULL && pStr[0] != '\0') @@ -5741,7 +5976,7 @@ void VmaJsonWriter::BeginValue(bool isString) { if (!m_Stack.empty()) { - StackItem& currItem = m_Stack.back(); + StackItem &currItem = m_Stack.back(); if (currItem.type == COLLECTION_TYPE_OBJECT && currItem.valueCount % 2 == 0) { @@ -5785,7 +6020,7 @@ void VmaJsonWriter::WriteIndent(bool oneLess) } #endif // _VMA_JSON_WRITER_FUNCTIONS -static void VmaPrintDetailedStatistics(VmaJsonWriter& json, const VmaDetailedStatistics& stat) +static void VmaPrintDetailedStatistics(VmaJsonWriter &json, const VmaDetailedStatistics &stat) { json.BeginObject(); @@ -5833,10 +6068,10 @@ class VmaMappingHysteresis bool PostMap() { #if VMA_MAPPING_HYSTERESIS_ENABLED - if(m_ExtraMapping == 0) + if (m_ExtraMapping == 0) { ++m_MajorCounter; - if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING) + if (m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING) { m_ExtraMapping = 1; m_MajorCounter = 0; @@ -5854,7 +6089,7 @@ class VmaMappingHysteresis void PostUnmap() { #if VMA_MAPPING_HYSTERESIS_ENABLED - if(m_ExtraMapping == 0) + if (m_ExtraMapping == 0) ++m_MajorCounter; else // m_ExtraMapping == 1 PostMinorCounter(); @@ -5865,7 +6100,7 @@ class VmaMappingHysteresis void PostAlloc() { #if VMA_MAPPING_HYSTERESIS_ENABLED - if(m_ExtraMapping == 1) + if (m_ExtraMapping == 1) ++m_MajorCounter; else // m_ExtraMapping == 0 PostMinorCounter(); @@ -5877,10 +6112,10 @@ class VmaMappingHysteresis bool PostFree() { #if VMA_MAPPING_HYSTERESIS_ENABLED - if(m_ExtraMapping == 1) + if (m_ExtraMapping == 1) { ++m_MajorCounter; - if(m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING && + if (m_MajorCounter >= COUNTER_MIN_EXTRA_MAPPING && m_MajorCounter > m_MinorCounter + 1) { m_ExtraMapping = 0; @@ -5904,11 +6139,11 @@ class VmaMappingHysteresis void PostMinorCounter() { - if(m_MinorCounter < m_MajorCounter) + if (m_MinorCounter < m_MajorCounter) { ++m_MinorCounter; } - else if(m_MajorCounter > 0) + else if (m_MajorCounter > 0) { --m_MajorCounter; --m_MinorCounter; @@ -5931,7 +6166,7 @@ class VmaDeviceMemoryBlock { VMA_CLASS_NO_COPY_NO_MOVE(VmaDeviceMemoryBlock) public: - VmaBlockMetadata* m_pMetadata; + VmaBlockMetadata *m_pMetadata; VmaDeviceMemoryBlock(VmaAllocator hAllocator); ~VmaDeviceMemoryBlock(); @@ -5953,7 +6188,7 @@ class VmaDeviceMemoryBlock VkDeviceMemory GetDeviceMemory() const { return m_hMemory; } uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } uint32_t GetId() const { return m_Id; } - void* GetMappedData() const { return m_pMappedData; } + void *GetMappedData() const { return m_pMappedData; } uint32_t GetMapRefCount() const { return m_MapCount; } // Call when allocation/free was made from m_pMetadata. @@ -5966,7 +6201,7 @@ class VmaDeviceMemoryBlock VkResult CheckCorruption(VmaAllocator hAllocator); // ppData can be null. - VkResult Map(VmaAllocator hAllocator, uint32_t count, void** ppData); + VkResult Map(VmaAllocator hAllocator, uint32_t count, void **ppData); void Unmap(VmaAllocator hAllocator, uint32_t count); VkResult WriteMagicValueAfterAllocation(VmaAllocator hAllocator, VkDeviceSize allocOffset, VkDeviceSize allocSize); @@ -5977,13 +6212,13 @@ class VmaDeviceMemoryBlock const VmaAllocation hAllocation, VkDeviceSize allocationLocalOffset, VkBuffer hBuffer, - const void* pNext); + const void *pNext); VkResult BindImageMemory( const VmaAllocator hAllocator, const VmaAllocation hAllocation, VkDeviceSize allocationLocalOffset, VkImage hImage, - const void* pNext); + const void *pNext); private: VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool. @@ -5999,7 +6234,7 @@ class VmaDeviceMemoryBlock VMA_MUTEX m_MapAndBindMutex; VmaMappingHysteresis m_MappingHysteresis; uint32_t m_MapCount; - void* m_pMappedData; + void *m_pMappedData; }; #endif // _VMA_DEVICE_MEMORY_BLOCK @@ -6010,8 +6245,8 @@ struct VmaAllocation_T enum FLAGS { - FLAG_PERSISTENT_MAP = 0x01, - FLAG_MAPPING_ALLOWED = 0x02, + FLAG_PERSISTENT_MAP = 0x01, + FLAG_MAPPING_ALLOWED = 0x02, }; public: @@ -6027,7 +6262,7 @@ struct VmaAllocation_T ~VmaAllocation_T(); void InitBlockAllocation( - VmaDeviceMemoryBlock* block, + VmaDeviceMemoryBlock *block, VmaAllocHandle allocHandle, VkDeviceSize alignment, VkDeviceSize size, @@ -6040,48 +6275,55 @@ struct VmaAllocation_T uint32_t memoryTypeIndex, VkDeviceMemory hMemory, VmaSuballocationType suballocationType, - void* pMappedData, + void *pMappedData, VkDeviceSize size); ALLOCATION_TYPE GetType() const { return (ALLOCATION_TYPE)m_Type; } VkDeviceSize GetAlignment() const { return m_Alignment; } VkDeviceSize GetSize() const { return m_Size; } - void* GetUserData() const { return m_pUserData; } - const char* GetName() const { return m_pName; } + void *GetUserData() const { return m_pUserData; } + const char *GetName() const { return m_pName; } VmaSuballocationType GetSuballocationType() const { return (VmaSuballocationType)m_SuballocationType; } - VmaDeviceMemoryBlock* GetBlock() const { VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); return m_BlockAllocation.m_Block; } + VmaDeviceMemoryBlock *GetBlock() const + { + VMA_ASSERT(m_Type == ALLOCATION_TYPE_BLOCK); + return m_BlockAllocation.m_Block; + } uint32_t GetMemoryTypeIndex() const { return m_MemoryTypeIndex; } bool IsPersistentMap() const { return (m_Flags & FLAG_PERSISTENT_MAP) != 0; } bool IsMappingAllowed() const { return (m_Flags & FLAG_MAPPING_ALLOWED) != 0; } - void SetUserData(VmaAllocator hAllocator, void* pUserData) { m_pUserData = pUserData; } - void SetName(VmaAllocator hAllocator, const char* pName); + void SetUserData(VmaAllocator hAllocator, void *pUserData) { m_pUserData = pUserData; } + void SetName(VmaAllocator hAllocator, const char *pName); void FreeName(VmaAllocator hAllocator); uint8_t SwapBlockAllocation(VmaAllocator hAllocator, VmaAllocation allocation); VmaAllocHandle GetAllocHandle() const; VkDeviceSize GetOffset() const; VmaPool GetParentPool() const; VkDeviceMemory GetMemory() const; - void* GetMappedData() const; + void *GetMappedData() const; void BlockAllocMap(); void BlockAllocUnmap(); - VkResult DedicatedAllocMap(VmaAllocator hAllocator, void** ppData); + VkResult DedicatedAllocMap(VmaAllocator hAllocator, void **ppData); void DedicatedAllocUnmap(VmaAllocator hAllocator); #if VMA_STATS_STRING_ENABLED - uint32_t GetBufferImageUsage() const { return m_BufferImageUsage; } + uint32_t GetBufferImageUsage() const + { + return m_BufferImageUsage; + } void InitBufferImageUsage(uint32_t bufferImageUsage); - void PrintParameters(class VmaJsonWriter& json) const; + void PrintParameters(class VmaJsonWriter &json) const; #endif private: // Allocation out of VmaDeviceMemoryBlock. struct BlockAllocation { - VmaDeviceMemoryBlock* m_Block; + VmaDeviceMemoryBlock *m_Block; VmaAllocHandle m_AllocHandle; }; // Allocation for an object that has its own private VkDeviceMemory. @@ -6089,9 +6331,9 @@ struct VmaAllocation_T { VmaPool m_hParentPool; // VK_NULL_HANDLE if not belongs to custom pool. VkDeviceMemory m_hMemory; - void* m_pMappedData; // Not null means memory is mapped. - VmaAllocation_T* m_Prev; - VmaAllocation_T* m_Next; + void *m_pMappedData; // Not null means memory is mapped. + VmaAllocation_T *m_Prev; + VmaAllocation_T *m_Next; }; union { @@ -6103,10 +6345,10 @@ struct VmaAllocation_T VkDeviceSize m_Alignment; VkDeviceSize m_Size; - void* m_pUserData; - char* m_pName; + void *m_pUserData; + char *m_pName; uint32_t m_MemoryTypeIndex; - uint8_t m_Type; // ALLOCATION_TYPE + uint8_t m_Type; // ALLOCATION_TYPE uint8_t m_SuballocationType; // VmaSuballocationType // Reference counter for vmaMapMemory()/vmaUnmapMemory(). uint8_t m_MapCount; @@ -6122,22 +6364,22 @@ struct VmaDedicatedAllocationListItemTraits { typedef VmaAllocation_T ItemType; - static ItemType* GetPrev(const ItemType* item) + static ItemType *GetPrev(const ItemType *item) { VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); return item->m_DedicatedAllocation.m_Prev; } - static ItemType* GetNext(const ItemType* item) + static ItemType *GetNext(const ItemType *item) { VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); return item->m_DedicatedAllocation.m_Next; } - static ItemType*& AccessPrev(ItemType* item) + static ItemType *&AccessPrev(ItemType *item) { VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); return item->m_DedicatedAllocation.m_Prev; } - static ItemType*& AccessNext(ItemType* item) + static ItemType *&AccessNext(ItemType *item) { VMA_HEAVY_ASSERT(item->GetType() == VmaAllocation_T::ALLOCATION_TYPE_DEDICATED); return item->m_DedicatedAllocation.m_Next; @@ -6160,11 +6402,11 @@ class VmaDedicatedAllocationList void Init(bool useMutex) { m_UseMutex = useMutex; } bool Validate(); - void AddDetailedStatistics(VmaDetailedStatistics& inoutStats); - void AddStatistics(VmaStatistics& inoutStats); + void AddDetailedStatistics(VmaDetailedStatistics &inoutStats); + void AddStatistics(VmaStatistics &inoutStats); #if VMA_STATS_STRING_ENABLED // Writes JSON array with the list of allocations. - void BuildStatsString(VmaJsonWriter& json); + void BuildStatsString(VmaJsonWriter &json); #endif bool IsEmpty(); @@ -6197,7 +6439,7 @@ bool VmaDedicatedAllocationList::Validate() size_t actualCount = 0; VmaMutexLockRead lock(m_Mutex, m_UseMutex); for (VmaAllocation alloc = m_AllocationList.Front(); - alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc)) + alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc)) { ++actualCount; } @@ -6206,9 +6448,9 @@ bool VmaDedicatedAllocationList::Validate() return true; } -void VmaDedicatedAllocationList::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) +void VmaDedicatedAllocationList::AddDetailedStatistics(VmaDetailedStatistics &inoutStats) { - for(auto* item = m_AllocationList.Front(); item != nullptr; item = DedicatedAllocationLinkedList::GetNext(item)) + for (auto *item = m_AllocationList.Front(); item != nullptr; item = DedicatedAllocationLinkedList::GetNext(item)) { const VkDeviceSize size = item->GetSize(); inoutStats.statistics.blockCount++; @@ -6217,7 +6459,7 @@ void VmaDedicatedAllocationList::AddDetailedStatistics(VmaDetailedStatistics& in } } -void VmaDedicatedAllocationList::AddStatistics(VmaStatistics& inoutStats) +void VmaDedicatedAllocationList::AddStatistics(VmaStatistics &inoutStats) { VmaMutexLockRead lock(m_Mutex, m_UseMutex); @@ -6225,7 +6467,7 @@ void VmaDedicatedAllocationList::AddStatistics(VmaStatistics& inoutStats) inoutStats.blockCount += allocCount; inoutStats.allocationCount += allocCount; - for(auto* item = m_AllocationList.Front(); item != nullptr; item = DedicatedAllocationLinkedList::GetNext(item)) + for (auto *item = m_AllocationList.Front(); item != nullptr; item = DedicatedAllocationLinkedList::GetNext(item)) { const VkDeviceSize size = item->GetSize(); inoutStats.blockBytes += size; @@ -6234,12 +6476,12 @@ void VmaDedicatedAllocationList::AddStatistics(VmaStatistics& inoutStats) } #if VMA_STATS_STRING_ENABLED -void VmaDedicatedAllocationList::BuildStatsString(VmaJsonWriter& json) +void VmaDedicatedAllocationList::BuildStatsString(VmaJsonWriter &json) { VmaMutexLockRead lock(m_Mutex, m_UseMutex); json.BeginArray(); for (VmaAllocation alloc = m_AllocationList.Front(); - alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc)) + alloc != VMA_NULL; alloc = m_AllocationList.GetNext(alloc)) { json.BeginObject(true); alloc->PrintParameters(json); @@ -6278,14 +6520,14 @@ struct VmaSuballocation { VkDeviceSize offset; VkDeviceSize size; - void* userData; + void *userData; VmaSuballocationType type; }; // Comparator for offsets. struct VmaSuballocationOffsetLess { - bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const + bool operator()(const VmaSuballocation &lhs, const VmaSuballocation &rhs) const { return lhs.offset < rhs.offset; } @@ -6293,7 +6535,7 @@ struct VmaSuballocationOffsetLess struct VmaSuballocationOffsetGreater { - bool operator()(const VmaSuballocation& lhs, const VmaSuballocation& rhs) const + bool operator()(const VmaSuballocation &lhs, const VmaSuballocation &rhs) const { return lhs.offset > rhs.offset; } @@ -6302,13 +6544,13 @@ struct VmaSuballocationOffsetGreater struct VmaSuballocationItemSizeLess { bool operator()(const VmaSuballocationList::iterator lhs, - const VmaSuballocationList::iterator rhs) const + const VmaSuballocationList::iterator rhs) const { return lhs->size < rhs->size; } bool operator()(const VmaSuballocationList::iterator lhs, - VkDeviceSize rhsSize) const + VkDeviceSize rhsSize) const { return lhs->size < rhsSize; } @@ -6325,7 +6567,7 @@ struct VmaAllocationRequest VmaAllocHandle allocHandle; VkDeviceSize size; VmaSuballocationList::iterator item; - void* customData; + void *customData; uint64_t algorithmData; VmaAllocationRequestType type; }; @@ -6341,8 +6583,8 @@ class VmaBlockMetadata VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata) public: // pAllocationCallbacks, if not null, must be owned externally - alive and unchanged for the whole lifetime of this object. - VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks, - VkDeviceSize bufferImageGranularity, bool isVirtual); + VmaBlockMetadata(const VkAllocationCallbacks *pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual); virtual ~VmaBlockMetadata() = default; virtual void Init(VkDeviceSize size) { m_Size = size; } @@ -6356,20 +6598,20 @@ class VmaBlockMetadata virtual VkDeviceSize GetSumFreeSize() const = 0; // Returns true if this block is empty - contains only single free suballocation. virtual bool IsEmpty() const = 0; - virtual void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) = 0; + virtual void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo &outInfo) = 0; virtual VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const = 0; - virtual void* GetAllocationUserData(VmaAllocHandle allocHandle) const = 0; + virtual void *GetAllocationUserData(VmaAllocHandle allocHandle) const = 0; virtual VmaAllocHandle GetAllocationListBegin() const = 0; virtual VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const = 0; virtual VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const = 0; // Shouldn't modify blockCount. - virtual void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const = 0; - virtual void AddStatistics(VmaStatistics& inoutStats) const = 0; + virtual void AddDetailedStatistics(VmaDetailedStatistics &inoutStats) const = 0; + virtual void AddStatistics(VmaStatistics &inoutStats) const = 0; #if VMA_STATS_STRING_ENABLED - virtual void PrintDetailedMap(class VmaJsonWriter& json) const = 0; + virtual void PrintDetailedMap(class VmaJsonWriter &json) const = 0; #endif // Tries to find a place for suballocation with given parameters inside this block. @@ -6382,15 +6624,15 @@ class VmaBlockMetadata VmaSuballocationType allocType, // Always one of VMA_ALLOCATION_CREATE_STRATEGY_* or VMA_ALLOCATION_INTERNAL_STRATEGY_* flags. uint32_t strategy, - VmaAllocationRequest* pAllocationRequest) = 0; + VmaAllocationRequest *pAllocationRequest) = 0; - virtual VkResult CheckCorruption(const void* pBlockData) = 0; + virtual VkResult CheckCorruption(const void *pBlockData) = 0; // Makes actual allocation based on request. Request must already be checked and valid. virtual void Alloc( - const VmaAllocationRequest& request, + const VmaAllocationRequest &request, VmaSuballocationType type, - void* userData) = 0; + void *userData) = 0; // Frees suballocation assigned to given memory region. virtual void Free(VmaAllocHandle allocHandle) = 0; @@ -6399,45 +6641,47 @@ class VmaBlockMetadata // Careful! Don't call it if there are VmaAllocation objects owned by userData of cleared allocations! virtual void Clear() = 0; - virtual void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) = 0; + virtual void SetAllocationUserData(VmaAllocHandle allocHandle, void *userData) = 0; virtual void DebugLogAllAllocations() const = 0; protected: - const VkAllocationCallbacks* GetAllocationCallbacks() const { return m_pAllocationCallbacks; } + const VkAllocationCallbacks *GetAllocationCallbacks() const { return m_pAllocationCallbacks; } VkDeviceSize GetBufferImageGranularity() const { return m_BufferImageGranularity; } VkDeviceSize GetDebugMargin() const { return VkDeviceSize(IsVirtual() ? 0 : VMA_DEBUG_MARGIN); } - void DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const; + void DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void *userData) const; #if VMA_STATS_STRING_ENABLED // mapRefCount == UINT32_MAX means unspecified. - void PrintDetailedMap_Begin(class VmaJsonWriter& json, - VkDeviceSize unusedBytes, - size_t allocationCount, - size_t unusedRangeCount) const; - void PrintDetailedMap_Allocation(class VmaJsonWriter& json, - VkDeviceSize offset, VkDeviceSize size, void* userData) const; - void PrintDetailedMap_UnusedRange(class VmaJsonWriter& json, - VkDeviceSize offset, - VkDeviceSize size) const; - void PrintDetailedMap_End(class VmaJsonWriter& json) const; + void PrintDetailedMap_Begin(class VmaJsonWriter &json, + VkDeviceSize unusedBytes, + size_t allocationCount, + size_t unusedRangeCount) const; + void PrintDetailedMap_Allocation(class VmaJsonWriter &json, + VkDeviceSize offset, VkDeviceSize size, void *userData) const; + void PrintDetailedMap_UnusedRange(class VmaJsonWriter &json, + VkDeviceSize offset, + VkDeviceSize size) const; + void PrintDetailedMap_End(class VmaJsonWriter &json) const; #endif private: VkDeviceSize m_Size; - const VkAllocationCallbacks* m_pAllocationCallbacks; + const VkAllocationCallbacks *m_pAllocationCallbacks; const VkDeviceSize m_BufferImageGranularity; const bool m_IsVirtual; }; #ifndef _VMA_BLOCK_METADATA_FUNCTIONS -VmaBlockMetadata::VmaBlockMetadata(const VkAllocationCallbacks* pAllocationCallbacks, - VkDeviceSize bufferImageGranularity, bool isVirtual) +VmaBlockMetadata::VmaBlockMetadata(const VkAllocationCallbacks *pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual) : m_Size(0), - m_pAllocationCallbacks(pAllocationCallbacks), - m_BufferImageGranularity(bufferImageGranularity), - m_IsVirtual(isVirtual) {} + m_pAllocationCallbacks(pAllocationCallbacks), + m_BufferImageGranularity(bufferImageGranularity), + m_IsVirtual(isVirtual) +{ +} -void VmaBlockMetadata::DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void* userData) const +void VmaBlockMetadata::DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size, void *userData) const { if (IsVirtual()) { @@ -6449,25 +6693,24 @@ void VmaBlockMetadata::DebugLogAllocation(VkDeviceSize offset, VkDeviceSize size VmaAllocation allocation = reinterpret_cast(userData); userData = allocation->GetUserData(); - const char* name = allocation->GetName(); + const char *name = allocation->GetName(); #if VMA_STATS_STRING_ENABLED VMA_DEBUG_LOG_FORMAT("UNFREED ALLOCATION; Offset: %llu; Size: %llu; UserData: %p; Name: %s; Type: %s; Usage: %u", - offset, size, userData, name ? name : "vma_empty", - VMA_SUBALLOCATION_TYPE_NAMES[allocation->GetSuballocationType()], - allocation->GetBufferImageUsage()); + offset, size, userData, name ? name : "vma_empty", + VMA_SUBALLOCATION_TYPE_NAMES[allocation->GetSuballocationType()], + allocation->GetBufferImageUsage()); #else VMA_DEBUG_LOG_FORMAT("UNFREED ALLOCATION; Offset: %llu; Size: %llu; UserData: %p; Name: %s; Type: %u", - offset, size, userData, name ? name : "vma_empty", - (uint32_t)allocation->GetSuballocationType()); + offset, size, userData, name ? name : "vma_empty", + (uint32_t)allocation->GetSuballocationType()); #endif // VMA_STATS_STRING_ENABLED } - } #if VMA_STATS_STRING_ENABLED -void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json, - VkDeviceSize unusedBytes, size_t allocationCount, size_t unusedRangeCount) const +void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter &json, + VkDeviceSize unusedBytes, size_t allocationCount, size_t unusedRangeCount) const { json.WriteString("TotalBytes"); json.WriteNumber(GetSize()); @@ -6485,8 +6728,8 @@ void VmaBlockMetadata::PrintDetailedMap_Begin(class VmaJsonWriter& json, json.BeginArray(); } -void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json, - VkDeviceSize offset, VkDeviceSize size, void* userData) const +void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter &json, + VkDeviceSize offset, VkDeviceSize size, void *userData) const { json.BeginObject(true); @@ -6513,8 +6756,8 @@ void VmaBlockMetadata::PrintDetailedMap_Allocation(class VmaJsonWriter& json, json.EndObject(); } -void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json, - VkDeviceSize offset, VkDeviceSize size) const +void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter &json, + VkDeviceSize offset, VkDeviceSize size) const { json.BeginObject(true); @@ -6530,7 +6773,7 @@ void VmaBlockMetadata::PrintDetailedMap_UnusedRange(class VmaJsonWriter& json, json.EndObject(); } -void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter& json) const +void VmaBlockMetadata::PrintDetailedMap_End(class VmaJsonWriter &json) const { json.EndArray(); } @@ -6545,8 +6788,8 @@ class VmaBlockBufferImageGranularity final public: struct ValidationContext { - const VkAllocationCallbacks* allocCallbacks; - uint16_t* pageAllocs; + const VkAllocationCallbacks *allocCallbacks; + uint16_t *pageAllocs; }; VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity); @@ -6554,28 +6797,28 @@ class VmaBlockBufferImageGranularity final bool IsEnabled() const { return m_BufferImageGranularity > MAX_LOW_BUFFER_IMAGE_GRANULARITY; } - void Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size); + void Init(const VkAllocationCallbacks *pAllocationCallbacks, VkDeviceSize size); // Before destroying object you must call free it's memory - void Destroy(const VkAllocationCallbacks* pAllocationCallbacks); + void Destroy(const VkAllocationCallbacks *pAllocationCallbacks); void RoundupAllocRequest(VmaSuballocationType allocType, - VkDeviceSize& inOutAllocSize, - VkDeviceSize& inOutAllocAlignment) const; + VkDeviceSize &inOutAllocSize, + VkDeviceSize &inOutAllocAlignment) const; - bool CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset, - VkDeviceSize allocSize, - VkDeviceSize blockOffset, - VkDeviceSize blockSize, - VmaSuballocationType allocType) const; + bool CheckConflictAndAlignUp(VkDeviceSize &inOutAllocOffset, + VkDeviceSize allocSize, + VkDeviceSize blockOffset, + VkDeviceSize blockSize, + VmaSuballocationType allocType) const; void AllocPages(uint8_t allocType, VkDeviceSize offset, VkDeviceSize size); void FreePages(VkDeviceSize offset, VkDeviceSize size); void Clear(); - ValidationContext StartValidation(const VkAllocationCallbacks* pAllocationCallbacks, - bool isVirutal) const; - bool Validate(ValidationContext& ctx, VkDeviceSize offset, VkDeviceSize size) const; - bool FinishValidation(ValidationContext& ctx) const; + ValidationContext StartValidation(const VkAllocationCallbacks *pAllocationCallbacks, + bool isVirutal) const; + bool Validate(ValidationContext &ctx, VkDeviceSize offset, VkDeviceSize size) const; + bool FinishValidation(ValidationContext &ctx) const; private: static const uint16_t MAX_LOW_BUFFER_IMAGE_GRANULARITY = 256; @@ -6588,27 +6831,29 @@ class VmaBlockBufferImageGranularity final VkDeviceSize m_BufferImageGranularity; uint32_t m_RegionCount; - RegionInfo* m_RegionInfo; + RegionInfo *m_RegionInfo; uint32_t GetStartPage(VkDeviceSize offset) const { return OffsetToPageIndex(offset & ~(m_BufferImageGranularity - 1)); } uint32_t GetEndPage(VkDeviceSize offset, VkDeviceSize size) const { return OffsetToPageIndex((offset + size - 1) & ~(m_BufferImageGranularity - 1)); } uint32_t OffsetToPageIndex(VkDeviceSize offset) const; - void AllocPage(RegionInfo& page, uint8_t allocType); + void AllocPage(RegionInfo &page, uint8_t allocType); }; #ifndef _VMA_BLOCK_BUFFER_IMAGE_GRANULARITY_FUNCTIONS VmaBlockBufferImageGranularity::VmaBlockBufferImageGranularity(VkDeviceSize bufferImageGranularity) : m_BufferImageGranularity(bufferImageGranularity), - m_RegionCount(0), - m_RegionInfo(VMA_NULL) {} + m_RegionCount(0), + m_RegionInfo(VMA_NULL) +{ +} VmaBlockBufferImageGranularity::~VmaBlockBufferImageGranularity() { VMA_ASSERT(m_RegionInfo == VMA_NULL && "Free not called before destroying object!"); } -void VmaBlockBufferImageGranularity::Init(const VkAllocationCallbacks* pAllocationCallbacks, VkDeviceSize size) +void VmaBlockBufferImageGranularity::Init(const VkAllocationCallbacks *pAllocationCallbacks, VkDeviceSize size) { if (IsEnabled()) { @@ -6618,7 +6863,7 @@ void VmaBlockBufferImageGranularity::Init(const VkAllocationCallbacks* pAllocati } } -void VmaBlockBufferImageGranularity::Destroy(const VkAllocationCallbacks* pAllocationCallbacks) +void VmaBlockBufferImageGranularity::Destroy(const VkAllocationCallbacks *pAllocationCallbacks) { if (m_RegionInfo) { @@ -6628,8 +6873,8 @@ void VmaBlockBufferImageGranularity::Destroy(const VkAllocationCallbacks* pAlloc } void VmaBlockBufferImageGranularity::RoundupAllocRequest(VmaSuballocationType allocType, - VkDeviceSize& inOutAllocSize, - VkDeviceSize& inOutAllocAlignment) const + VkDeviceSize &inOutAllocSize, + VkDeviceSize &inOutAllocAlignment) const { if (m_BufferImageGranularity > 1 && m_BufferImageGranularity <= MAX_LOW_BUFFER_IMAGE_GRANULARITY) @@ -6644,11 +6889,11 @@ void VmaBlockBufferImageGranularity::RoundupAllocRequest(VmaSuballocationType al } } -bool VmaBlockBufferImageGranularity::CheckConflictAndAlignUp(VkDeviceSize& inOutAllocOffset, - VkDeviceSize allocSize, - VkDeviceSize blockOffset, - VkDeviceSize blockSize, - VmaSuballocationType allocType) const +bool VmaBlockBufferImageGranularity::CheckConflictAndAlignUp(VkDeviceSize &inOutAllocOffset, + VkDeviceSize allocSize, + VkDeviceSize blockOffset, + VkDeviceSize blockSize, + VmaSuballocationType allocType) const { if (IsEnabled()) { @@ -6710,9 +6955,9 @@ void VmaBlockBufferImageGranularity::Clear() } VmaBlockBufferImageGranularity::ValidationContext VmaBlockBufferImageGranularity::StartValidation( - const VkAllocationCallbacks* pAllocationCallbacks, bool isVirutal) const + const VkAllocationCallbacks *pAllocationCallbacks, bool isVirutal) const { - ValidationContext ctx{ pAllocationCallbacks, VMA_NULL }; + ValidationContext ctx{pAllocationCallbacks, VMA_NULL}; if (!isVirutal && IsEnabled()) { ctx.pageAllocs = vma_new_array(pAllocationCallbacks, uint16_t, m_RegionCount); @@ -6721,8 +6966,8 @@ VmaBlockBufferImageGranularity::ValidationContext VmaBlockBufferImageGranularity return ctx; } -bool VmaBlockBufferImageGranularity::Validate(ValidationContext& ctx, - VkDeviceSize offset, VkDeviceSize size) const +bool VmaBlockBufferImageGranularity::Validate(ValidationContext &ctx, + VkDeviceSize offset, VkDeviceSize size) const { if (IsEnabled()) { @@ -6740,7 +6985,7 @@ bool VmaBlockBufferImageGranularity::Validate(ValidationContext& ctx, return true; } -bool VmaBlockBufferImageGranularity::FinishValidation(ValidationContext& ctx) const +bool VmaBlockBufferImageGranularity::FinishValidation(ValidationContext &ctx) const { // Check proper page structure if (IsEnabled()) @@ -6762,7 +7007,7 @@ uint32_t VmaBlockBufferImageGranularity::OffsetToPageIndex(VkDeviceSize offset) return static_cast(offset >> VMA_BITSCAN_MSB(m_BufferImageGranularity)); } -void VmaBlockBufferImageGranularity::AllocPage(RegionInfo& page, uint8_t allocType) +void VmaBlockBufferImageGranularity::AllocPage(RegionInfo &page, uint8_t allocType) { // When current alloc type is free then it can be overridden by new type if (page.allocCount == 0 || (page.allocCount > 0 && page.allocType == VMA_SUBALLOCATION_TYPE_FREE)) @@ -7620,8 +7865,8 @@ class VmaBlockMetadata_Linear : public VmaBlockMetadata { VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_Linear) public: - VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks, - VkDeviceSize bufferImageGranularity, bool isVirtual); + VmaBlockMetadata_Linear(const VkAllocationCallbacks *pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual); virtual ~VmaBlockMetadata_Linear() = default; VkDeviceSize GetSumFreeSize() const override { return m_SumFreeSize; } @@ -7633,11 +7878,11 @@ class VmaBlockMetadata_Linear : public VmaBlockMetadata size_t GetAllocationCount() const override; size_t GetFreeRegionsCount() const override; - void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override; - void AddStatistics(VmaStatistics& inoutStats) const override; + void AddDetailedStatistics(VmaDetailedStatistics &inoutStats) const override; + void AddStatistics(VmaStatistics &inoutStats) const override; #if VMA_STATS_STRING_ENABLED - void PrintDetailedMap(class VmaJsonWriter& json) const override; + void PrintDetailedMap(class VmaJsonWriter &json) const override; #endif bool CreateAllocationRequest( @@ -7646,23 +7891,23 @@ class VmaBlockMetadata_Linear : public VmaBlockMetadata bool upperAddress, VmaSuballocationType allocType, uint32_t strategy, - VmaAllocationRequest* pAllocationRequest) override; + VmaAllocationRequest *pAllocationRequest) override; - VkResult CheckCorruption(const void* pBlockData) override; + VkResult CheckCorruption(const void *pBlockData) override; void Alloc( - const VmaAllocationRequest& request, + const VmaAllocationRequest &request, VmaSuballocationType type, - void* userData) override; + void *userData) override; void Free(VmaAllocHandle allocHandle) override; - void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; - void* GetAllocationUserData(VmaAllocHandle allocHandle) const override; + void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo &outInfo) override; + void *GetAllocationUserData(VmaAllocHandle allocHandle) const override; VmaAllocHandle GetAllocationListBegin() const override; VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override; VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override; void Clear() override; - void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; + void SetAllocationUserData(VmaAllocHandle allocHandle, void *userData) override; void DebugLogAllAllocations() const override; private: @@ -7702,12 +7947,12 @@ class VmaBlockMetadata_Linear : public VmaBlockMetadata // Number of items in 2nd vector with hAllocation = null. size_t m_2ndNullItemsCount; - SuballocationVectorType& AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; } - SuballocationVectorType& AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; } - const SuballocationVectorType& AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; } - const SuballocationVectorType& AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; } + SuballocationVectorType &AccessSuballocations1st() { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; } + SuballocationVectorType &AccessSuballocations2nd() { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; } + const SuballocationVectorType &AccessSuballocations1st() const { return m_1stVectorIndex ? m_Suballocations1 : m_Suballocations0; } + const SuballocationVectorType &AccessSuballocations2nd() const { return m_1stVectorIndex ? m_Suballocations0 : m_Suballocations1; } - VmaSuballocation& FindSuballocation(VkDeviceSize offset) const; + VmaSuballocation &FindSuballocation(VkDeviceSize offset) const; bool ShouldCompact1st() const; void CleanupAfterFree(); @@ -7716,27 +7961,29 @@ class VmaBlockMetadata_Linear : public VmaBlockMetadata VkDeviceSize allocAlignment, VmaSuballocationType allocType, uint32_t strategy, - VmaAllocationRequest* pAllocationRequest); + VmaAllocationRequest *pAllocationRequest); bool CreateAllocationRequest_UpperAddress( VkDeviceSize allocSize, VkDeviceSize allocAlignment, VmaSuballocationType allocType, uint32_t strategy, - VmaAllocationRequest* pAllocationRequest); + VmaAllocationRequest *pAllocationRequest); }; #ifndef _VMA_BLOCK_METADATA_LINEAR_FUNCTIONS -VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(const VkAllocationCallbacks* pAllocationCallbacks, - VkDeviceSize bufferImageGranularity, bool isVirtual) +VmaBlockMetadata_Linear::VmaBlockMetadata_Linear(const VkAllocationCallbacks *pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual) : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual), - m_SumFreeSize(0), - m_Suballocations0(VmaStlAllocator(pAllocationCallbacks)), - m_Suballocations1(VmaStlAllocator(pAllocationCallbacks)), - m_1stVectorIndex(0), - m_2ndVectorMode(SECOND_VECTOR_EMPTY), - m_1stNullItemsBeginCount(0), - m_1stNullItemsMiddleCount(0), - m_2ndNullItemsCount(0) {} + m_SumFreeSize(0), + m_Suballocations0(VmaStlAllocator(pAllocationCallbacks)), + m_Suballocations1(VmaStlAllocator(pAllocationCallbacks)), + m_1stVectorIndex(0), + m_2ndVectorMode(SECOND_VECTOR_EMPTY), + m_1stNullItemsBeginCount(0), + m_1stNullItemsMiddleCount(0), + m_2ndNullItemsCount(0) +{ +} void VmaBlockMetadata_Linear::Init(VkDeviceSize size) { @@ -7746,13 +7993,13 @@ void VmaBlockMetadata_Linear::Init(VkDeviceSize size) bool VmaBlockMetadata_Linear::Validate() const { - const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const SuballocationVectorType &suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType &suballocations2nd = AccessSuballocations2nd(); VMA_VALIDATE(suballocations2nd.empty() == (m_2ndVectorMode == SECOND_VECTOR_EMPTY)); VMA_VALIDATE(!suballocations1st.empty() || - suballocations2nd.empty() || - m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER); + suballocations2nd.empty() || + m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER); if (!suballocations1st.empty()) { @@ -7781,7 +8028,7 @@ bool VmaBlockMetadata_Linear::Validate() const size_t nullItem2ndCount = 0; for (size_t i = 0; i < suballoc2ndCount; ++i) { - const VmaSuballocation& suballoc = suballocations2nd[i]; + const VmaSuballocation &suballoc = suballocations2nd[i]; const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); VmaAllocation const alloc = (VmaAllocation)suballoc.userData; @@ -7813,16 +8060,16 @@ bool VmaBlockMetadata_Linear::Validate() const for (size_t i = 0; i < m_1stNullItemsBeginCount; ++i) { - const VmaSuballocation& suballoc = suballocations1st[i]; + const VmaSuballocation &suballoc = suballocations1st[i]; VMA_VALIDATE(suballoc.type == VMA_SUBALLOCATION_TYPE_FREE && - suballoc.userData == VMA_NULL); + suballoc.userData == VMA_NULL); } size_t nullItem1stCount = m_1stNullItemsBeginCount; for (size_t i = m_1stNullItemsBeginCount; i < suballoc1stCount; ++i) { - const VmaSuballocation& suballoc = suballocations1st[i]; + const VmaSuballocation &suballoc = suballocations1st[i]; const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); VmaAllocation const alloc = (VmaAllocation)suballoc.userData; @@ -7855,9 +8102,9 @@ bool VmaBlockMetadata_Linear::Validate() const { const size_t suballoc2ndCount = suballocations2nd.size(); size_t nullItem2ndCount = 0; - for (size_t i = suballoc2ndCount; i--; ) + for (size_t i = suballoc2ndCount; i--;) { - const VmaSuballocation& suballoc = suballocations2nd[i]; + const VmaSuballocation &suballoc = suballocations2nd[i]; const bool currFree = (suballoc.type == VMA_SUBALLOCATION_TYPE_FREE); VmaAllocation const alloc = (VmaAllocation)suballoc.userData; @@ -7896,7 +8143,7 @@ bool VmaBlockMetadata_Linear::Validate() const size_t VmaBlockMetadata_Linear::GetAllocationCount() const { return AccessSuballocations1st().size() - m_1stNullItemsBeginCount - m_1stNullItemsMiddleCount + - AccessSuballocations2nd().size() - m_2ndNullItemsCount; + AccessSuballocations2nd().size() - m_2ndNullItemsCount; } size_t VmaBlockMetadata_Linear::GetFreeRegionsCount() const @@ -7906,11 +8153,11 @@ size_t VmaBlockMetadata_Linear::GetFreeRegionsCount() const return SIZE_MAX; } -void VmaBlockMetadata_Linear::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const +void VmaBlockMetadata_Linear::AddDetailedStatistics(VmaDetailedStatistics &inoutStats) const { const VkDeviceSize size = GetSize(); - const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const SuballocationVectorType &suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType &suballocations2nd = AccessSuballocations2nd(); const size_t suballoc1stCount = suballocations1st.size(); const size_t suballoc2ndCount = suballocations2nd.size(); @@ -7927,7 +8174,7 @@ void VmaBlockMetadata_Linear::AddDetailedStatistics(VmaDetailedStatistics& inout { // Find next non-null allocation or move nextAllocIndex to the end. while (nextAlloc2ndIndex < suballoc2ndCount && - suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) { ++nextAlloc2ndIndex; } @@ -7935,7 +8182,7 @@ void VmaBlockMetadata_Linear::AddDetailedStatistics(VmaDetailedStatistics& inout // Found non-null allocation. if (nextAlloc2ndIndex < suballoc2ndCount) { - const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + const VmaSuballocation &suballoc = suballocations2nd[nextAlloc2ndIndex]; // 1. Process free space before this allocation. if (lastOffset < suballoc.offset) @@ -7976,7 +8223,7 @@ void VmaBlockMetadata_Linear::AddDetailedStatistics(VmaDetailedStatistics& inout { // Find next non-null allocation or move nextAllocIndex to the end. while (nextAlloc1stIndex < suballoc1stCount && - suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) + suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) { ++nextAlloc1stIndex; } @@ -7984,7 +8231,7 @@ void VmaBlockMetadata_Linear::AddDetailedStatistics(VmaDetailedStatistics& inout // Found non-null allocation. if (nextAlloc1stIndex < suballoc1stCount) { - const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + const VmaSuballocation &suballoc = suballocations1st[nextAlloc1stIndex]; // 1. Process free space before this allocation. if (lastOffset < suballoc.offset) @@ -8024,7 +8271,7 @@ void VmaBlockMetadata_Linear::AddDetailedStatistics(VmaDetailedStatistics& inout { // Find next non-null allocation or move nextAllocIndex to the end. while (nextAlloc2ndIndex != SIZE_MAX && - suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) { --nextAlloc2ndIndex; } @@ -8032,7 +8279,7 @@ void VmaBlockMetadata_Linear::AddDetailedStatistics(VmaDetailedStatistics& inout // Found non-null allocation. if (nextAlloc2ndIndex != SIZE_MAX) { - const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + const VmaSuballocation &suballoc = suballocations2nd[nextAlloc2ndIndex]; // 1. Process free space before this allocation. if (lastOffset < suballoc.offset) @@ -8067,10 +8314,10 @@ void VmaBlockMetadata_Linear::AddDetailedStatistics(VmaDetailedStatistics& inout } } -void VmaBlockMetadata_Linear::AddStatistics(VmaStatistics& inoutStats) const +void VmaBlockMetadata_Linear::AddStatistics(VmaStatistics &inoutStats) const { - const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const SuballocationVectorType &suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType &suballocations2nd = AccessSuballocations2nd(); const VkDeviceSize size = GetSize(); const size_t suballoc1stCount = suballocations1st.size(); const size_t suballoc2ndCount = suballocations2nd.size(); @@ -8089,7 +8336,7 @@ void VmaBlockMetadata_Linear::AddStatistics(VmaStatistics& inoutStats) const { // Find next non-null allocation or move nextAlloc2ndIndex to the end. while (nextAlloc2ndIndex < suballoc2ndCount && - suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) { ++nextAlloc2ndIndex; } @@ -8097,7 +8344,7 @@ void VmaBlockMetadata_Linear::AddStatistics(VmaStatistics& inoutStats) const // Found non-null allocation. if (nextAlloc2ndIndex < suballoc2ndCount) { - const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + const VmaSuballocation &suballoc = suballocations2nd[nextAlloc2ndIndex]; // Process this allocation. // There is allocation with suballoc.offset, suballoc.size. @@ -8123,7 +8370,7 @@ void VmaBlockMetadata_Linear::AddStatistics(VmaStatistics& inoutStats) const { // Find next non-null allocation or move nextAllocIndex to the end. while (nextAlloc1stIndex < suballoc1stCount && - suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) + suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) { ++nextAlloc1stIndex; } @@ -8131,7 +8378,7 @@ void VmaBlockMetadata_Linear::AddStatistics(VmaStatistics& inoutStats) const // Found non-null allocation. if (nextAlloc1stIndex < suballoc1stCount) { - const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + const VmaSuballocation &suballoc = suballocations1st[nextAlloc1stIndex]; // Process this allocation. // There is allocation with suballoc.offset, suballoc.size. @@ -8156,7 +8403,7 @@ void VmaBlockMetadata_Linear::AddStatistics(VmaStatistics& inoutStats) const { // Find next non-null allocation or move nextAlloc2ndIndex to the end. while (nextAlloc2ndIndex != SIZE_MAX && - suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) { --nextAlloc2ndIndex; } @@ -8164,7 +8411,7 @@ void VmaBlockMetadata_Linear::AddStatistics(VmaStatistics& inoutStats) const // Found non-null allocation. if (nextAlloc2ndIndex != SIZE_MAX) { - const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + const VmaSuballocation &suballoc = suballocations2nd[nextAlloc2ndIndex]; // Process this allocation. // There is allocation with suballoc.offset, suballoc.size. @@ -8185,11 +8432,11 @@ void VmaBlockMetadata_Linear::AddStatistics(VmaStatistics& inoutStats) const } #if VMA_STATS_STRING_ENABLED -void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const +void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter &json) const { const VkDeviceSize size = GetSize(); - const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const SuballocationVectorType &suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType &suballocations2nd = AccessSuballocations2nd(); const size_t suballoc1stCount = suballocations1st.size(); const size_t suballoc2ndCount = suballocations2nd.size(); @@ -8209,7 +8456,7 @@ void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const { // Find next non-null allocation or move nextAlloc2ndIndex to the end. while (nextAlloc2ndIndex < suballoc2ndCount && - suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) { ++nextAlloc2ndIndex; } @@ -8217,7 +8464,7 @@ void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const // Found non-null allocation. if (nextAlloc2ndIndex < suballoc2ndCount) { - const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + const VmaSuballocation &suballoc = suballocations2nd[nextAlloc2ndIndex]; // 1. Process free space before this allocation. if (lastOffset < suballoc.offset) @@ -8258,7 +8505,7 @@ void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const { // Find next non-null allocation or move nextAllocIndex to the end. while (nextAlloc1stIndex < suballoc1stCount && - suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) + suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) { ++nextAlloc1stIndex; } @@ -8266,7 +8513,7 @@ void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const // Found non-null allocation. if (nextAlloc1stIndex < suballoc1stCount) { - const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + const VmaSuballocation &suballoc = suballocations1st[nextAlloc1stIndex]; // 1. Process free space before this allocation. if (lastOffset < suballoc.offset) @@ -8305,7 +8552,7 @@ void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const { // Find next non-null allocation or move nextAlloc2ndIndex to the end. while (nextAlloc2ndIndex != SIZE_MAX && - suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) { --nextAlloc2ndIndex; } @@ -8313,7 +8560,7 @@ void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const // Found non-null allocation. if (nextAlloc2ndIndex != SIZE_MAX) { - const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + const VmaSuballocation &suballoc = suballocations2nd[nextAlloc2ndIndex]; // 1. Process free space before this allocation. if (lastOffset < suballoc.offset) @@ -8360,7 +8607,7 @@ void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const { // Find next non-null allocation or move nextAlloc2ndIndex to the end. while (nextAlloc2ndIndex < suballoc2ndCount && - suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) { ++nextAlloc2ndIndex; } @@ -8368,7 +8615,7 @@ void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const // Found non-null allocation. if (nextAlloc2ndIndex < suballoc2ndCount) { - const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + const VmaSuballocation &suballoc = suballocations2nd[nextAlloc2ndIndex]; // 1. Process free space before this allocation. if (lastOffset < suballoc.offset) @@ -8407,7 +8654,7 @@ void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const { // Find next non-null allocation or move nextAllocIndex to the end. while (nextAlloc1stIndex < suballoc1stCount && - suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) + suballocations1st[nextAlloc1stIndex].userData == VMA_NULL) { ++nextAlloc1stIndex; } @@ -8415,7 +8662,7 @@ void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const // Found non-null allocation. if (nextAlloc1stIndex < suballoc1stCount) { - const VmaSuballocation& suballoc = suballocations1st[nextAlloc1stIndex]; + const VmaSuballocation &suballoc = suballocations1st[nextAlloc1stIndex]; // 1. Process free space before this allocation. if (lastOffset < suballoc.offset) @@ -8455,7 +8702,7 @@ void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const { // Find next non-null allocation or move nextAlloc2ndIndex to the end. while (nextAlloc2ndIndex != SIZE_MAX && - suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) + suballocations2nd[nextAlloc2ndIndex].userData == VMA_NULL) { --nextAlloc2ndIndex; } @@ -8463,7 +8710,7 @@ void VmaBlockMetadata_Linear::PrintDetailedMap(class VmaJsonWriter& json) const // Found non-null allocation. if (nextAlloc2ndIndex != SIZE_MAX) { - const VmaSuballocation& suballoc = suballocations2nd[nextAlloc2ndIndex]; + const VmaSuballocation &suballoc = suballocations2nd[nextAlloc2ndIndex]; // 1. Process free space before this allocation. if (lastOffset < suballoc.offset) @@ -8507,27 +8754,26 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest( bool upperAddress, VmaSuballocationType allocType, uint32_t strategy, - VmaAllocationRequest* pAllocationRequest) + VmaAllocationRequest *pAllocationRequest) { VMA_ASSERT(allocSize > 0); VMA_ASSERT(allocType != VMA_SUBALLOCATION_TYPE_FREE); VMA_ASSERT(pAllocationRequest != VMA_NULL); VMA_HEAVY_ASSERT(Validate()); pAllocationRequest->size = allocSize; - return upperAddress ? - CreateAllocationRequest_UpperAddress( - allocSize, allocAlignment, allocType, strategy, pAllocationRequest) : - CreateAllocationRequest_LowerAddress( - allocSize, allocAlignment, allocType, strategy, pAllocationRequest); + return upperAddress ? CreateAllocationRequest_UpperAddress( + allocSize, allocAlignment, allocType, strategy, pAllocationRequest) + : CreateAllocationRequest_LowerAddress( + allocSize, allocAlignment, allocType, strategy, pAllocationRequest); } -VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData) +VkResult VmaBlockMetadata_Linear::CheckCorruption(const void *pBlockData) { VMA_ASSERT(!IsVirtual()); - SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType &suballocations1st = AccessSuballocations1st(); for (size_t i = m_1stNullItemsBeginCount, count = suballocations1st.size(); i < count; ++i) { - const VmaSuballocation& suballoc = suballocations1st[i]; + const VmaSuballocation &suballoc = suballocations1st[i]; if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) { if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) @@ -8538,10 +8784,10 @@ VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData) } } - SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + SuballocationVectorType &suballocations2nd = AccessSuballocations2nd(); for (size_t i = 0, count = suballocations2nd.size(); i < count; ++i) { - const VmaSuballocation& suballoc = suballocations2nd[i]; + const VmaSuballocation &suballoc = suballocations2nd[i]; if (suballoc.type != VMA_SUBALLOCATION_TYPE_FREE) { if (!VmaValidateMagicValue(pBlockData, suballoc.offset + suballoc.size)) @@ -8556,30 +8802,30 @@ VkResult VmaBlockMetadata_Linear::CheckCorruption(const void* pBlockData) } void VmaBlockMetadata_Linear::Alloc( - const VmaAllocationRequest& request, + const VmaAllocationRequest &request, VmaSuballocationType type, - void* userData) + void *userData) { const VkDeviceSize offset = (VkDeviceSize)request.allocHandle - 1; - const VmaSuballocation newSuballoc = { offset, request.size, userData, type }; + const VmaSuballocation newSuballoc = {offset, request.size, userData, type}; switch (request.type) { case VmaAllocationRequestType::UpperAddress: { VMA_ASSERT(m_2ndVectorMode != SECOND_VECTOR_RING_BUFFER && - "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer."); - SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + "CRITICAL ERROR: Trying to use linear allocator as double stack while it was already used as ring buffer."); + SuballocationVectorType &suballocations2nd = AccessSuballocations2nd(); suballocations2nd.push_back(newSuballoc); m_2ndVectorMode = SECOND_VECTOR_DOUBLE_STACK; } break; case VmaAllocationRequestType::EndOf1st: { - SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType &suballocations1st = AccessSuballocations1st(); VMA_ASSERT(suballocations1st.empty() || - offset >= suballocations1st.back().offset + suballocations1st.back().size); + offset >= suballocations1st.back().offset + suballocations1st.back().size); // Check if it fits before the end of the block. VMA_ASSERT(offset + request.size <= GetSize()); @@ -8588,11 +8834,11 @@ void VmaBlockMetadata_Linear::Alloc( break; case VmaAllocationRequestType::EndOf2nd: { - SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + SuballocationVectorType &suballocations1st = AccessSuballocations1st(); // New allocation at the end of 2-part ring buffer, so before first allocation from 1st vector. VMA_ASSERT(!suballocations1st.empty() && - offset + request.size <= suballocations1st[m_1stNullItemsBeginCount].offset); - SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + offset + request.size <= suballocations1st[m_1stNullItemsBeginCount].offset); + SuballocationVectorType &suballocations2nd = AccessSuballocations2nd(); switch (m_2ndVectorMode) { @@ -8624,14 +8870,14 @@ void VmaBlockMetadata_Linear::Alloc( void VmaBlockMetadata_Linear::Free(VmaAllocHandle allocHandle) { - SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + SuballocationVectorType &suballocations1st = AccessSuballocations1st(); + SuballocationVectorType &suballocations2nd = AccessSuballocations2nd(); VkDeviceSize offset = (VkDeviceSize)allocHandle - 1; if (!suballocations1st.empty()) { // First allocation: Mark it as next empty at the beginning. - VmaSuballocation& firstSuballoc = suballocations1st[m_1stNullItemsBeginCount]; + VmaSuballocation &firstSuballoc = suballocations1st[m_1stNullItemsBeginCount]; if (firstSuballoc.offset == offset) { firstSuballoc.type = VMA_SUBALLOCATION_TYPE_FREE; @@ -8647,7 +8893,7 @@ void VmaBlockMetadata_Linear::Free(VmaAllocHandle allocHandle) if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) { - VmaSuballocation& lastSuballoc = suballocations2nd.back(); + VmaSuballocation &lastSuballoc = suballocations2nd.back(); if (lastSuballoc.offset == offset) { m_SumFreeSize += lastSuballoc.size; @@ -8659,7 +8905,7 @@ void VmaBlockMetadata_Linear::Free(VmaAllocHandle allocHandle) // Last allocation in 1st vector. else if (m_2ndVectorMode == SECOND_VECTOR_EMPTY) { - VmaSuballocation& lastSuballoc = suballocations1st.back(); + VmaSuballocation &lastSuballoc = suballocations1st.back(); if (lastSuballoc.offset == offset) { m_SumFreeSize += lastSuballoc.size; @@ -8694,9 +8940,7 @@ void VmaBlockMetadata_Linear::Free(VmaAllocHandle allocHandle) if (m_2ndVectorMode != SECOND_VECTOR_EMPTY) { // Item from the middle of 2nd vector. - const SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ? - VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) : - VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater()); + const SuballocationVectorType::iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ? VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) : VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater()); if (it != suballocations2nd.end()) { it->type = VMA_SUBALLOCATION_TYPE_FREE; @@ -8711,15 +8955,15 @@ void VmaBlockMetadata_Linear::Free(VmaAllocHandle allocHandle) VMA_ASSERT(0 && "Allocation to free not found in linear allocator!"); } -void VmaBlockMetadata_Linear::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) +void VmaBlockMetadata_Linear::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo &outInfo) { outInfo.offset = (VkDeviceSize)allocHandle - 1; - VmaSuballocation& suballoc = FindSuballocation(outInfo.offset); + VmaSuballocation &suballoc = FindSuballocation(outInfo.offset); outInfo.size = suballoc.size; outInfo.pUserData = suballoc.userData; } -void* VmaBlockMetadata_Linear::GetAllocationUserData(VmaAllocHandle allocHandle) const +void *VmaBlockMetadata_Linear::GetAllocationUserData(VmaAllocHandle allocHandle) const { return FindSuballocation((VkDeviceSize)allocHandle - 1).userData; } @@ -8757,29 +9001,29 @@ void VmaBlockMetadata_Linear::Clear() m_2ndNullItemsCount = 0; } -void VmaBlockMetadata_Linear::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) +void VmaBlockMetadata_Linear::SetAllocationUserData(VmaAllocHandle allocHandle, void *userData) { - VmaSuballocation& suballoc = FindSuballocation((VkDeviceSize)allocHandle - 1); + VmaSuballocation &suballoc = FindSuballocation((VkDeviceSize)allocHandle - 1); suballoc.userData = userData; } void VmaBlockMetadata_Linear::DebugLogAllAllocations() const { - const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType &suballocations1st = AccessSuballocations1st(); for (auto it = suballocations1st.begin() + m_1stNullItemsBeginCount; it != suballocations1st.end(); ++it) if (it->type != VMA_SUBALLOCATION_TYPE_FREE) DebugLogAllocation(it->offset, it->size, it->userData); - const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const SuballocationVectorType &suballocations2nd = AccessSuballocations2nd(); for (auto it = suballocations2nd.begin(); it != suballocations2nd.end(); ++it) if (it->type != VMA_SUBALLOCATION_TYPE_FREE) DebugLogAllocation(it->offset, it->size, it->userData); } -VmaSuballocation& VmaBlockMetadata_Linear::FindSuballocation(VkDeviceSize offset) const +VmaSuballocation &VmaBlockMetadata_Linear::FindSuballocation(VkDeviceSize offset) const { - const SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - const SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + const SuballocationVectorType &suballocations1st = AccessSuballocations1st(); + const SuballocationVectorType &suballocations2nd = AccessSuballocations2nd(); VmaSuballocation refSuballoc; refSuballoc.offset = offset; @@ -8794,24 +9038,22 @@ VmaSuballocation& VmaBlockMetadata_Linear::FindSuballocation(VkDeviceSize offset VmaSuballocationOffsetLess()); if (it != suballocations1st.end()) { - return const_cast(*it); + return const_cast(*it); } } if (m_2ndVectorMode != SECOND_VECTOR_EMPTY) { // Rest of members stays uninitialized intentionally for better performance. - SuballocationVectorType::const_iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ? - VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) : - VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater()); + SuballocationVectorType::const_iterator it = m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER ? VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetLess()) : VmaBinaryFindSorted(suballocations2nd.begin(), suballocations2nd.end(), refSuballoc, VmaSuballocationOffsetGreater()); if (it != suballocations2nd.end()) { - return const_cast(*it); + return const_cast(*it); } } VMA_ASSERT(0 && "Allocation not found in linear allocator!"); - return const_cast(suballocations1st.back()); // Should never occur. + return const_cast(suballocations1st.back()); // Should never occur. } bool VmaBlockMetadata_Linear::ShouldCompact1st() const @@ -8823,8 +9065,8 @@ bool VmaBlockMetadata_Linear::ShouldCompact1st() const void VmaBlockMetadata_Linear::CleanupAfterFree() { - SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + SuballocationVectorType &suballocations1st = AccessSuballocations1st(); + SuballocationVectorType &suballocations2nd = AccessSuballocations2nd(); if (IsEmpty()) { @@ -8843,7 +9085,7 @@ void VmaBlockMetadata_Linear::CleanupAfterFree() // Find more null items at the beginning of 1st vector. while (m_1stNullItemsBeginCount < suballoc1stCount && - suballocations1st[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE) + suballocations1st[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE) { ++m_1stNullItemsBeginCount; --m_1stNullItemsMiddleCount; @@ -8851,7 +9093,7 @@ void VmaBlockMetadata_Linear::CleanupAfterFree() // Find more null items at the end of 1st vector. while (m_1stNullItemsMiddleCount > 0 && - suballocations1st.back().type == VMA_SUBALLOCATION_TYPE_FREE) + suballocations1st.back().type == VMA_SUBALLOCATION_TYPE_FREE) { --m_1stNullItemsMiddleCount; suballocations1st.pop_back(); @@ -8859,7 +9101,7 @@ void VmaBlockMetadata_Linear::CleanupAfterFree() // Find more null items at the end of 2nd vector. while (m_2ndNullItemsCount > 0 && - suballocations2nd.back().type == VMA_SUBALLOCATION_TYPE_FREE) + suballocations2nd.back().type == VMA_SUBALLOCATION_TYPE_FREE) { --m_2ndNullItemsCount; suballocations2nd.pop_back(); @@ -8867,7 +9109,7 @@ void VmaBlockMetadata_Linear::CleanupAfterFree() // Find more null items at the beginning of 2nd vector. while (m_2ndNullItemsCount > 0 && - suballocations2nd[0].type == VMA_SUBALLOCATION_TYPE_FREE) + suballocations2nd[0].type == VMA_SUBALLOCATION_TYPE_FREE) { --m_2ndNullItemsCount; VmaVectorRemove(suballocations2nd, 0); @@ -8912,7 +9154,7 @@ void VmaBlockMetadata_Linear::CleanupAfterFree() m_2ndVectorMode = SECOND_VECTOR_EMPTY; m_1stNullItemsMiddleCount = m_2ndNullItemsCount; while (m_1stNullItemsBeginCount < suballocations2nd.size() && - suballocations2nd[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE) + suballocations2nd[m_1stNullItemsBeginCount].type == VMA_SUBALLOCATION_TYPE_FREE) { ++m_1stNullItemsBeginCount; --m_1stNullItemsMiddleCount; @@ -8931,13 +9173,13 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( VkDeviceSize allocAlignment, VmaSuballocationType allocType, uint32_t strategy, - VmaAllocationRequest* pAllocationRequest) + VmaAllocationRequest *pAllocationRequest) { const VkDeviceSize blockSize = GetSize(); const VkDeviceSize debugMargin = GetDebugMargin(); const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity(); - SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + SuballocationVectorType &suballocations1st = AccessSuballocations1st(); + SuballocationVectorType &suballocations2nd = AccessSuballocations2nd(); if (m_2ndVectorMode == SECOND_VECTOR_EMPTY || m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) { @@ -8946,7 +9188,7 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( VkDeviceSize resultBaseOffset = 0; if (!suballocations1st.empty()) { - const VmaSuballocation& lastSuballoc = suballocations1st.back(); + const VmaSuballocation &lastSuballoc = suballocations1st.back(); resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin; } @@ -8961,9 +9203,9 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations1st.empty()) { bool bufferImageGranularityConflict = false; - for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; ) + for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--;) { - const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex]; + const VmaSuballocation &prevSuballoc = suballocations1st[prevSuballocIndex]; if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) { if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) @@ -8982,8 +9224,7 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( } } - const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? - suballocations2nd.back().offset : blockSize; + const VkDeviceSize freeSpaceEnd = m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK ? suballocations2nd.back().offset : blockSize; // There is enough free space at the end after alignment. if (resultOffset + allocSize + debugMargin <= freeSpaceEnd) @@ -8992,9 +9233,9 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( // If conflict exists, allocation cannot be made here. if ((allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) && m_2ndVectorMode == SECOND_VECTOR_DOUBLE_STACK) { - for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; ) + for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--;) { - const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex]; + const VmaSuballocation &nextSuballoc = suballocations2nd[nextSuballocIndex]; if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) { if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) @@ -9027,7 +9268,7 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( VkDeviceSize resultBaseOffset = 0; if (!suballocations2nd.empty()) { - const VmaSuballocation& lastSuballoc = suballocations2nd.back(); + const VmaSuballocation &lastSuballoc = suballocations2nd.back(); resultBaseOffset = lastSuballoc.offset + lastSuballoc.size + debugMargin; } @@ -9042,9 +9283,9 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty()) { bool bufferImageGranularityConflict = false; - for (size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--; ) + for (size_t prevSuballocIndex = suballocations2nd.size(); prevSuballocIndex--;) { - const VmaSuballocation& prevSuballoc = suballocations2nd[prevSuballocIndex]; + const VmaSuballocation &prevSuballoc = suballocations2nd[prevSuballocIndex]; if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) { if (VmaIsBufferImageGranularityConflict(prevSuballoc.type, allocType)) @@ -9074,10 +9315,10 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_LowerAddress( if (allocSize % bufferImageGranularity || resultOffset % bufferImageGranularity) { for (size_t nextSuballocIndex = index1st; - nextSuballocIndex < suballocations1st.size(); - nextSuballocIndex++) + nextSuballocIndex < suballocations1st.size(); + nextSuballocIndex++) { - const VmaSuballocation& nextSuballoc = suballocations1st[nextSuballocIndex]; + const VmaSuballocation &nextSuballoc = suballocations1st[nextSuballocIndex]; if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) { if (VmaIsBufferImageGranularityConflict(allocType, nextSuballoc.type)) @@ -9109,12 +9350,12 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress( VkDeviceSize allocAlignment, VmaSuballocationType allocType, uint32_t strategy, - VmaAllocationRequest* pAllocationRequest) + VmaAllocationRequest *pAllocationRequest) { const VkDeviceSize blockSize = GetSize(); const VkDeviceSize bufferImageGranularity = GetBufferImageGranularity(); - SuballocationVectorType& suballocations1st = AccessSuballocations1st(); - SuballocationVectorType& suballocations2nd = AccessSuballocations2nd(); + SuballocationVectorType &suballocations1st = AccessSuballocations1st(); + SuballocationVectorType &suballocations2nd = AccessSuballocations2nd(); if (m_2ndVectorMode == SECOND_VECTOR_RING_BUFFER) { @@ -9130,7 +9371,7 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress( VkDeviceSize resultBaseOffset = blockSize - allocSize; if (!suballocations2nd.empty()) { - const VmaSuballocation& lastSuballoc = suballocations2nd.back(); + const VmaSuballocation &lastSuballoc = suballocations2nd.back(); resultBaseOffset = lastSuballoc.offset - allocSize; if (allocSize > lastSuballoc.offset) { @@ -9161,9 +9402,9 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress( if (bufferImageGranularity > 1 && bufferImageGranularity != allocAlignment && !suballocations2nd.empty()) { bool bufferImageGranularityConflict = false; - for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--; ) + for (size_t nextSuballocIndex = suballocations2nd.size(); nextSuballocIndex--;) { - const VmaSuballocation& nextSuballoc = suballocations2nd[nextSuballocIndex]; + const VmaSuballocation &nextSuballoc = suballocations2nd[nextSuballocIndex]; if (VmaBlocksOnSamePage(resultOffset, allocSize, nextSuballoc.offset, bufferImageGranularity)) { if (VmaIsBufferImageGranularityConflict(nextSuballoc.type, allocType)) @@ -9183,18 +9424,16 @@ bool VmaBlockMetadata_Linear::CreateAllocationRequest_UpperAddress( } // There is enough free space. - const VkDeviceSize endOf1st = !suballocations1st.empty() ? - suballocations1st.back().offset + suballocations1st.back().size : - 0; + const VkDeviceSize endOf1st = !suballocations1st.empty() ? suballocations1st.back().offset + suballocations1st.back().size : 0; if (endOf1st + debugMargin <= resultOffset) { // Check previous suballocations for BufferImageGranularity conflicts. // If conflict exists, allocation cannot be made here. if (bufferImageGranularity > 1) { - for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--; ) + for (size_t prevSuballocIndex = suballocations1st.size(); prevSuballocIndex--;) { - const VmaSuballocation& prevSuballoc = suballocations1st[prevSuballocIndex]; + const VmaSuballocation &prevSuballoc = suballocations1st[prevSuballocIndex]; if (VmaBlocksOnSamePage(prevSuballoc.offset, prevSuballoc.size, resultOffset, bufferImageGranularity)) { if (VmaIsBufferImageGranularityConflict(allocType, prevSuballoc.type)) @@ -9938,24 +10177,24 @@ class VmaBlockMetadata_TLSF : public VmaBlockMetadata { VMA_CLASS_NO_COPY_NO_MOVE(VmaBlockMetadata_TLSF) public: - VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks, - VkDeviceSize bufferImageGranularity, bool isVirtual); + VmaBlockMetadata_TLSF(const VkAllocationCallbacks *pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual); virtual ~VmaBlockMetadata_TLSF(); size_t GetAllocationCount() const override { return m_AllocCount; } size_t GetFreeRegionsCount() const override { return m_BlocksFreeCount + 1; } VkDeviceSize GetSumFreeSize() const override { return m_BlocksFreeSize + m_NullBlock->size; } bool IsEmpty() const override { return m_NullBlock->offset == 0; } - VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return ((Block*)allocHandle)->offset; } + VkDeviceSize GetAllocationOffset(VmaAllocHandle allocHandle) const override { return ((Block *)allocHandle)->offset; } void Init(VkDeviceSize size) override; bool Validate() const override; - void AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const override; - void AddStatistics(VmaStatistics& inoutStats) const override; + void AddDetailedStatistics(VmaDetailedStatistics &inoutStats) const override; + void AddStatistics(VmaStatistics &inoutStats) const override; #if VMA_STATS_STRING_ENABLED - void PrintDetailedMap(class VmaJsonWriter& json) const override; + void PrintDetailedMap(class VmaJsonWriter &json) const override; #endif bool CreateAllocationRequest( @@ -9964,22 +10203,22 @@ class VmaBlockMetadata_TLSF : public VmaBlockMetadata bool upperAddress, VmaSuballocationType allocType, uint32_t strategy, - VmaAllocationRequest* pAllocationRequest) override; + VmaAllocationRequest *pAllocationRequest) override; - VkResult CheckCorruption(const void* pBlockData) override; + VkResult CheckCorruption(const void *pBlockData) override; void Alloc( - const VmaAllocationRequest& request, + const VmaAllocationRequest &request, VmaSuballocationType type, - void* userData) override; + void *userData) override; void Free(VmaAllocHandle allocHandle) override; - void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) override; - void* GetAllocationUserData(VmaAllocHandle allocHandle) const override; + void GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo &outInfo) override; + void *GetAllocationUserData(VmaAllocHandle allocHandle) const override; VmaAllocHandle GetAllocationListBegin() const override; VmaAllocHandle GetNextAllocation(VmaAllocHandle prevAlloc) const override; VkDeviceSize GetNextFreeRegionSize(VmaAllocHandle alloc) const override; void Clear() override; - void SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) override; + void SetAllocationUserData(VmaAllocHandle allocHandle, void *userData) override; void DebugLogAllAllocations() const override; private: @@ -9997,22 +10236,30 @@ class VmaBlockMetadata_TLSF : public VmaBlockMetadata public: VkDeviceSize offset; VkDeviceSize size; - Block* prevPhysical; - Block* nextPhysical; + Block *prevPhysical; + Block *nextPhysical; void MarkFree() { prevFree = VMA_NULL; } void MarkTaken() { prevFree = this; } bool IsFree() const { return prevFree != this; } - void*& UserData() { VMA_HEAVY_ASSERT(!IsFree()); return userData; } - Block*& PrevFree() { return prevFree; } - Block*& NextFree() { VMA_HEAVY_ASSERT(IsFree()); return nextFree; } + void *&UserData() + { + VMA_HEAVY_ASSERT(!IsFree()); + return userData; + } + Block *&PrevFree() { return prevFree; } + Block *&NextFree() + { + VMA_HEAVY_ASSERT(IsFree()); + return nextFree; + } private: - Block* prevFree; // Address of the same block here indicates that block is taken + Block *prevFree; // Address of the same block here indicates that block is taken union { - Block* nextFree; - void* userData; + Block *nextFree; + void *userData; }; }; @@ -10026,12 +10273,12 @@ class VmaBlockMetadata_TLSF : public VmaBlockMetadata uint32_t m_InnerIsFreeBitmap[MAX_MEMORY_CLASSES]; uint32_t m_ListsCount; /* - * 0: 0-3 lists for small buffers - * 1+: 0-(2^SLI-1) lists for normal buffers - */ - Block** m_FreeList; + * 0: 0-3 lists for small buffers + * 1+: 0-(2^SLI-1) lists for normal buffers + */ + Block **m_FreeList; VmaPoolAllocator m_BlockAllocator; - Block* m_NullBlock; + Block *m_NullBlock; VmaBlockBufferImageGranularity m_GranularityHandler; uint8_t SizeToMemoryClass(VkDeviceSize size) const; @@ -10039,34 +10286,36 @@ class VmaBlockMetadata_TLSF : public VmaBlockMetadata uint32_t GetListIndex(uint8_t memoryClass, uint16_t secondIndex) const; uint32_t GetListIndex(VkDeviceSize size) const; - void RemoveFreeBlock(Block* block); - void InsertFreeBlock(Block* block); - void MergeBlock(Block* block, Block* prev); + void RemoveFreeBlock(Block *block); + void InsertFreeBlock(Block *block); + void MergeBlock(Block *block, Block *prev); - Block* FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const; + Block *FindFreeBlock(VkDeviceSize size, uint32_t &listIndex) const; bool CheckBlock( - Block& block, + Block &block, uint32_t listIndex, VkDeviceSize allocSize, VkDeviceSize allocAlignment, VmaSuballocationType allocType, - VmaAllocationRequest* pAllocationRequest); + VmaAllocationRequest *pAllocationRequest); }; #ifndef _VMA_BLOCK_METADATA_TLSF_FUNCTIONS -VmaBlockMetadata_TLSF::VmaBlockMetadata_TLSF(const VkAllocationCallbacks* pAllocationCallbacks, - VkDeviceSize bufferImageGranularity, bool isVirtual) +VmaBlockMetadata_TLSF::VmaBlockMetadata_TLSF(const VkAllocationCallbacks *pAllocationCallbacks, + VkDeviceSize bufferImageGranularity, bool isVirtual) : VmaBlockMetadata(pAllocationCallbacks, bufferImageGranularity, isVirtual), - m_AllocCount(0), - m_BlocksFreeCount(0), - m_BlocksFreeSize(0), - m_IsFreeBitmap(0), - m_MemoryClasses(0), - m_ListsCount(0), - m_FreeList(VMA_NULL), - m_BlockAllocator(pAllocationCallbacks, INITIAL_BLOCK_ALLOC_COUNT), - m_NullBlock(VMA_NULL), - m_GranularityHandler(bufferImageGranularity) {} + m_AllocCount(0), + m_BlocksFreeCount(0), + m_BlocksFreeSize(0), + m_IsFreeBitmap(0), + m_MemoryClasses(0), + m_ListsCount(0), + m_FreeList(VMA_NULL), + m_BlockAllocator(pAllocationCallbacks, INITIAL_BLOCK_ALLOC_COUNT), + m_NullBlock(VMA_NULL), + m_GranularityHandler(bufferImageGranularity) +{ +} VmaBlockMetadata_TLSF::~VmaBlockMetadata_TLSF() { @@ -10101,8 +10350,8 @@ void VmaBlockMetadata_TLSF::Init(VkDeviceSize size) m_MemoryClasses = memoryClass + uint8_t(2); memset(m_InnerIsFreeBitmap, 0, MAX_MEMORY_CLASSES * sizeof(uint32_t)); - m_FreeList = vma_new_array(GetAllocationCallbacks(), Block*, m_ListsCount); - memset(m_FreeList, 0, m_ListsCount * sizeof(Block*)); + m_FreeList = vma_new_array(GetAllocationCallbacks(), Block *, m_ListsCount); + memset(m_FreeList, 0, m_ListsCount * sizeof(Block *)); } bool VmaBlockMetadata_TLSF::Validate() const @@ -10117,7 +10366,7 @@ bool VmaBlockMetadata_TLSF::Validate() const // Check integrity of free lists for (uint32_t list = 0; list < m_ListsCount; ++list) { - Block* block = m_FreeList[list]; + Block *block = m_FreeList[list]; if (block != VMA_NULL) { VMA_VALIDATE(block->IsFree()); @@ -10140,7 +10389,7 @@ bool VmaBlockMetadata_TLSF::Validate() const VMA_VALIDATE(m_NullBlock->prevPhysical->nextPhysical == m_NullBlock); } // Check all blocks - for (Block* prev = m_NullBlock->prevPhysical; prev != VMA_NULL; prev = prev->prevPhysical) + for (Block *prev = m_NullBlock->prevPhysical; prev != VMA_NULL; prev = prev->prevPhysical) { VMA_VALIDATE(prev->offset + prev->size == nextOffset); nextOffset = prev->offset; @@ -10151,7 +10400,7 @@ bool VmaBlockMetadata_TLSF::Validate() const { ++freeCount; // Check if free block belongs to free list - Block* freeBlock = m_FreeList[listIndex]; + Block *freeBlock = m_FreeList[listIndex]; VMA_VALIDATE(freeBlock != VMA_NULL); bool found = false; @@ -10170,7 +10419,7 @@ bool VmaBlockMetadata_TLSF::Validate() const { ++allocCount; // Check if taken block is not on a free list - Block* freeBlock = m_FreeList[listIndex]; + Block *freeBlock = m_FreeList[listIndex]; while (freeBlock) { VMA_VALIDATE(freeBlock != prev); @@ -10203,14 +10452,14 @@ bool VmaBlockMetadata_TLSF::Validate() const return true; } -void VmaBlockMetadata_TLSF::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) const +void VmaBlockMetadata_TLSF::AddDetailedStatistics(VmaDetailedStatistics &inoutStats) const { inoutStats.statistics.blockCount++; inoutStats.statistics.blockBytes += GetSize(); if (m_NullBlock->size > 0) VmaAddDetailedStatisticsUnusedRange(inoutStats, m_NullBlock->size); - for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) + for (Block *block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) { if (block->IsFree()) VmaAddDetailedStatisticsUnusedRange(inoutStats, block->size); @@ -10219,7 +10468,7 @@ void VmaBlockMetadata_TLSF::AddDetailedStatistics(VmaDetailedStatistics& inoutSt } } -void VmaBlockMetadata_TLSF::AddStatistics(VmaStatistics& inoutStats) const +void VmaBlockMetadata_TLSF::AddStatistics(VmaStatistics &inoutStats) const { inoutStats.blockCount++; inoutStats.allocationCount += (uint32_t)m_AllocCount; @@ -10228,14 +10477,14 @@ void VmaBlockMetadata_TLSF::AddStatistics(VmaStatistics& inoutStats) const } #if VMA_STATS_STRING_ENABLED -void VmaBlockMetadata_TLSF::PrintDetailedMap(class VmaJsonWriter& json) const +void VmaBlockMetadata_TLSF::PrintDetailedMap(class VmaJsonWriter &json) const { size_t blockCount = m_AllocCount + m_BlocksFreeCount; - VmaStlAllocator allocator(GetAllocationCallbacks()); - VmaVector> blockList(blockCount, allocator); + VmaStlAllocator allocator(GetAllocationCallbacks()); + VmaVector> blockList(blockCount, allocator); size_t i = blockCount; - for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) + for (Block *block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) { blockList[--i] = block; } @@ -10246,13 +10495,13 @@ void VmaBlockMetadata_TLSF::PrintDetailedMap(class VmaJsonWriter& json) const AddDetailedStatistics(stats); PrintDetailedMap_Begin(json, - stats.statistics.blockBytes - stats.statistics.allocationBytes, - stats.statistics.allocationCount, - stats.unusedRangeCount); + stats.statistics.blockBytes - stats.statistics.allocationBytes, + stats.statistics.allocationCount, + stats.unusedRangeCount); for (; i < blockCount; ++i) { - Block* block = blockList[i]; + Block *block = blockList[i]; if (block->IsFree()) PrintDetailedMap_UnusedRange(json, block->offset, block->size); else @@ -10271,7 +10520,7 @@ bool VmaBlockMetadata_TLSF::CreateAllocationRequest( bool upperAddress, VmaSuballocationType allocType, uint32_t strategy, - VmaAllocationRequest* pAllocationRequest) + VmaAllocationRequest *pAllocationRequest) { VMA_ASSERT(allocSize > 0 && "Cannot allocate empty block!"); VMA_ASSERT(!upperAddress && "VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT can be used only with linear algorithm."); @@ -10303,8 +10552,8 @@ bool VmaBlockMetadata_TLSF::CreateAllocationRequest( uint32_t nextListIndex = 0; uint32_t prevListIndex = 0; - Block* nextListBlock = VMA_NULL; - Block* prevListBlock = VMA_NULL; + Block *nextListBlock = VMA_NULL; + Block *prevListBlock = VMA_NULL; // Check blocks according to strategies if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT) @@ -10359,14 +10608,14 @@ bool VmaBlockMetadata_TLSF::CreateAllocationRequest( nextListBlock = nextListBlock->NextFree(); } } - else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT ) + else if (strategy & VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT) { // Perform search from the start - VmaStlAllocator allocator(GetAllocationCallbacks()); - VmaVector> blockList(m_BlocksFreeCount, allocator); + VmaStlAllocator allocator(GetAllocationCallbacks()); + VmaVector> blockList(m_BlocksFreeCount, allocator); size_t i = m_BlocksFreeCount; - for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) + for (Block *block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) { if (block->IsFree() && block->size >= allocSize) blockList[--i] = block; @@ -10374,7 +10623,7 @@ bool VmaBlockMetadata_TLSF::CreateAllocationRequest( for (; i < m_BlocksFreeCount; ++i) { - Block& block = *blockList[i]; + Block &block = *blockList[i]; if (CheckBlock(block, GetListIndex(block.size), allocSize, allocAlignment, allocType, pAllocationRequest)) return true; } @@ -10427,9 +10676,9 @@ bool VmaBlockMetadata_TLSF::CreateAllocationRequest( return false; } -VkResult VmaBlockMetadata_TLSF::CheckCorruption(const void* pBlockData) +VkResult VmaBlockMetadata_TLSF::CheckCorruption(const void *pBlockData) { - for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) + for (Block *block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) { if (!block->IsFree()) { @@ -10445,14 +10694,14 @@ VkResult VmaBlockMetadata_TLSF::CheckCorruption(const void* pBlockData) } void VmaBlockMetadata_TLSF::Alloc( - const VmaAllocationRequest& request, + const VmaAllocationRequest &request, VmaSuballocationType type, - void* userData) + void *userData) { VMA_ASSERT(request.type == VmaAllocationRequestType::TLSF); // Get block and pop it from the free list - Block* currentBlock = (Block*)request.allocHandle; + Block *currentBlock = (Block *)request.allocHandle; VkDeviceSize offset = request.algorithmData; VMA_ASSERT(currentBlock != VMA_NULL); VMA_ASSERT(currentBlock->offset <= offset); @@ -10466,7 +10715,7 @@ void VmaBlockMetadata_TLSF::Alloc( // Append missing alignment to prev block or create new one if (misssingAlignment) { - Block* prevBlock = currentBlock->prevPhysical; + Block *prevBlock = currentBlock->prevPhysical; VMA_ASSERT(prevBlock != VMA_NULL && "There should be no missing alignment at offset 0!"); if (prevBlock->IsFree() && prevBlock->size != debugMargin) @@ -10486,7 +10735,7 @@ void VmaBlockMetadata_TLSF::Alloc( } else { - Block* newBlock = m_BlockAllocator.Alloc(); + Block *newBlock = m_BlockAllocator.Alloc(); currentBlock->prevPhysical = newBlock; prevBlock->nextPhysical = newBlock; newBlock->prevPhysical = prevBlock; @@ -10525,7 +10774,7 @@ void VmaBlockMetadata_TLSF::Alloc( VMA_ASSERT(currentBlock->size > size && "Proper block already found, shouldn't find smaller one!"); // Create new free block - Block* newBlock = m_BlockAllocator.Alloc(); + Block *newBlock = m_BlockAllocator.Alloc(); newBlock->size = currentBlock->size - size; newBlock->offset = currentBlock->offset + size; newBlock->prevPhysical = currentBlock; @@ -10553,7 +10802,7 @@ void VmaBlockMetadata_TLSF::Alloc( if (debugMargin > 0) { currentBlock->size -= debugMargin; - Block* newBlock = m_BlockAllocator.Alloc(); + Block *newBlock = m_BlockAllocator.Alloc(); newBlock->size = debugMargin; newBlock->offset = currentBlock->offset + currentBlock->size; newBlock->prevPhysical = currentBlock; @@ -10566,14 +10815,14 @@ void VmaBlockMetadata_TLSF::Alloc( if (!IsVirtual()) m_GranularityHandler.AllocPages((uint8_t)(uintptr_t)request.customData, - currentBlock->offset, currentBlock->size); + currentBlock->offset, currentBlock->size); ++m_AllocCount; } void VmaBlockMetadata_TLSF::Free(VmaAllocHandle allocHandle) { - Block* block = (Block*)allocHandle; - Block* next = block->nextPhysical; + Block *block = (Block *)allocHandle; + Block *next = block->nextPhysical; VMA_ASSERT(!block->IsFree() && "Block is already free!"); if (!IsVirtual()) @@ -10590,7 +10839,7 @@ void VmaBlockMetadata_TLSF::Free(VmaAllocHandle allocHandle) } // Try merging - Block* prev = block->prevPhysical; + Block *prev = block->prevPhysical; if (prev != VMA_NULL && prev->IsFree() && prev->size != debugMargin) { RemoveFreeBlock(prev); @@ -10609,18 +10858,18 @@ void VmaBlockMetadata_TLSF::Free(VmaAllocHandle allocHandle) } } -void VmaBlockMetadata_TLSF::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo& outInfo) +void VmaBlockMetadata_TLSF::GetAllocationInfo(VmaAllocHandle allocHandle, VmaVirtualAllocationInfo &outInfo) { - Block* block = (Block*)allocHandle; + Block *block = (Block *)allocHandle; VMA_ASSERT(!block->IsFree() && "Cannot get allocation info for free block!"); outInfo.offset = block->offset; outInfo.size = block->size; outInfo.pUserData = block->UserData(); } -void* VmaBlockMetadata_TLSF::GetAllocationUserData(VmaAllocHandle allocHandle) const +void *VmaBlockMetadata_TLSF::GetAllocationUserData(VmaAllocHandle allocHandle) const { - Block* block = (Block*)allocHandle; + Block *block = (Block *)allocHandle; VMA_ASSERT(!block->IsFree() && "Cannot get user data for free block!"); return block->UserData(); } @@ -10630,7 +10879,7 @@ VmaAllocHandle VmaBlockMetadata_TLSF::GetAllocationListBegin() const if (m_AllocCount == 0) return VK_NULL_HANDLE; - for (Block* block = m_NullBlock->prevPhysical; block; block = block->prevPhysical) + for (Block *block = m_NullBlock->prevPhysical; block; block = block->prevPhysical) { if (!block->IsFree()) return (VmaAllocHandle)block; @@ -10641,10 +10890,10 @@ VmaAllocHandle VmaBlockMetadata_TLSF::GetAllocationListBegin() const VmaAllocHandle VmaBlockMetadata_TLSF::GetNextAllocation(VmaAllocHandle prevAlloc) const { - Block* startBlock = (Block*)prevAlloc; + Block *startBlock = (Block *)prevAlloc; VMA_ASSERT(!startBlock->IsFree() && "Incorrect block!"); - for (Block* block = startBlock->prevPhysical; block; block = block->prevPhysical) + for (Block *block = startBlock->prevPhysical; block; block = block->prevPhysical) { if (!block->IsFree()) return (VmaAllocHandle)block; @@ -10654,7 +10903,7 @@ VmaAllocHandle VmaBlockMetadata_TLSF::GetNextAllocation(VmaAllocHandle prevAlloc VkDeviceSize VmaBlockMetadata_TLSF::GetNextFreeRegionSize(VmaAllocHandle alloc) const { - Block* block = (Block*)alloc; + Block *block = (Block *)alloc; VMA_ASSERT(!block->IsFree() && "Incorrect block!"); if (block->prevPhysical) @@ -10670,29 +10919,29 @@ void VmaBlockMetadata_TLSF::Clear() m_IsFreeBitmap = 0; m_NullBlock->offset = 0; m_NullBlock->size = GetSize(); - Block* block = m_NullBlock->prevPhysical; + Block *block = m_NullBlock->prevPhysical; m_NullBlock->prevPhysical = VMA_NULL; while (block) { - Block* prev = block->prevPhysical; + Block *prev = block->prevPhysical; m_BlockAllocator.Free(block); block = prev; } - memset(m_FreeList, 0, m_ListsCount * sizeof(Block*)); + memset(m_FreeList, 0, m_ListsCount * sizeof(Block *)); memset(m_InnerIsFreeBitmap, 0, m_MemoryClasses * sizeof(uint32_t)); m_GranularityHandler.Clear(); } -void VmaBlockMetadata_TLSF::SetAllocationUserData(VmaAllocHandle allocHandle, void* userData) +void VmaBlockMetadata_TLSF::SetAllocationUserData(VmaAllocHandle allocHandle, void *userData) { - Block* block = (Block*)allocHandle; + Block *block = (Block *)allocHandle; VMA_ASSERT(!block->IsFree() && "Trying to set user data for not allocated block!"); block->UserData() = userData; } void VmaBlockMetadata_TLSF::DebugLogAllAllocations() const { - for (Block* block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) + for (Block *block = m_NullBlock->prevPhysical; block != VMA_NULL; block = block->prevPhysical) if (!block->IsFree()) DebugLogAllocation(block->offset, block->size, block->UserData()); } @@ -10734,7 +10983,7 @@ uint32_t VmaBlockMetadata_TLSF::GetListIndex(VkDeviceSize size) const return GetListIndex(memoryClass, SizeToSecondIndex(size, memoryClass)); } -void VmaBlockMetadata_TLSF::RemoveFreeBlock(Block* block) +void VmaBlockMetadata_TLSF::RemoveFreeBlock(Block *block) { VMA_ASSERT(block != m_NullBlock); VMA_ASSERT(block->IsFree()); @@ -10763,7 +11012,7 @@ void VmaBlockMetadata_TLSF::RemoveFreeBlock(Block* block) m_BlocksFreeSize -= block->size; } -void VmaBlockMetadata_TLSF::InsertFreeBlock(Block* block) +void VmaBlockMetadata_TLSF::InsertFreeBlock(Block *block) { VMA_ASSERT(block != m_NullBlock); VMA_ASSERT(!block->IsFree() && "Cannot insert block twice!"); @@ -10786,7 +11035,7 @@ void VmaBlockMetadata_TLSF::InsertFreeBlock(Block* block) m_BlocksFreeSize += block->size; } -void VmaBlockMetadata_TLSF::MergeBlock(Block* block, Block* prev) +void VmaBlockMetadata_TLSF::MergeBlock(Block *block, Block *prev) { VMA_ASSERT(block->prevPhysical == prev && "Cannot merge separate physical regions!"); VMA_ASSERT(!prev->IsFree() && "Cannot merge block that belongs to free list!"); @@ -10799,7 +11048,7 @@ void VmaBlockMetadata_TLSF::MergeBlock(Block* block, Block* prev) m_BlockAllocator.Free(prev); } -VmaBlockMetadata_TLSF::Block* VmaBlockMetadata_TLSF::FindFreeBlock(VkDeviceSize size, uint32_t& listIndex) const +VmaBlockMetadata_TLSF::Block *VmaBlockMetadata_TLSF::FindFreeBlock(VkDeviceSize size, uint32_t &listIndex) const { uint8_t memoryClass = SizeToMemoryClass(size); uint32_t innerFreeMap = m_InnerIsFreeBitmap[memoryClass] & (~0U << SizeToSecondIndex(size, memoryClass)); @@ -10822,12 +11071,12 @@ VmaBlockMetadata_TLSF::Block* VmaBlockMetadata_TLSF::FindFreeBlock(VkDeviceSize } bool VmaBlockMetadata_TLSF::CheckBlock( - Block& block, + Block &block, uint32_t listIndex, VkDeviceSize allocSize, VkDeviceSize allocAlignment, VmaSuballocationType allocType, - VmaAllocationRequest* pAllocationRequest) + VmaAllocationRequest *pAllocationRequest) { VMA_ASSERT(block.IsFree() && "Block is already taken!"); @@ -10844,7 +11093,7 @@ bool VmaBlockMetadata_TLSF::CheckBlock( pAllocationRequest->type = VmaAllocationRequestType::TLSF; pAllocationRequest->allocHandle = (VmaAllocHandle)█ pAllocationRequest->size = allocSize - GetDebugMargin(); - pAllocationRequest->customData = (void*)allocType; + pAllocationRequest->customData = (void *)allocType; pAllocationRequest->algorithmData = alignedOffset; // Place block at the start of list if it's normal block @@ -10889,7 +11138,7 @@ class VmaBlockVector uint32_t algorithm, float priority, VkDeviceSize minAllocationAlignment, - void* pMemoryAllocateNext); + void *pMemoryAllocateNext); ~VmaBlockVector(); VmaAllocator GetAllocator() const { return m_hAllocator; } @@ -10901,31 +11150,31 @@ class VmaBlockVector uint32_t GetAlgorithm() const { return m_Algorithm; } bool HasExplicitBlockSize() const { return m_ExplicitBlockSize; } float GetPriority() const { return m_Priority; } - const void* GetAllocationNextPtr() const { return m_pMemoryAllocateNext; } + const void *GetAllocationNextPtr() const { return m_pMemoryAllocateNext; } // To be used only while the m_Mutex is locked. Used during defragmentation. size_t GetBlockCount() const { return m_Blocks.size(); } // To be used only while the m_Mutex is locked. Used during defragmentation. - VmaDeviceMemoryBlock* GetBlock(size_t index) const { return m_Blocks[index]; } + VmaDeviceMemoryBlock *GetBlock(size_t index) const { return m_Blocks[index]; } VMA_RW_MUTEX &GetMutex() { return m_Mutex; } VkResult CreateMinBlocks(); - void AddStatistics(VmaStatistics& inoutStats); - void AddDetailedStatistics(VmaDetailedStatistics& inoutStats); + void AddStatistics(VmaStatistics &inoutStats); + void AddDetailedStatistics(VmaDetailedStatistics &inoutStats); bool IsEmpty(); bool IsCorruptionDetectionEnabled() const; VkResult Allocate( VkDeviceSize size, VkDeviceSize alignment, - const VmaAllocationCreateInfo& createInfo, + const VmaAllocationCreateInfo &createInfo, VmaSuballocationType suballocType, size_t allocationCount, - VmaAllocation* pAllocations); + VmaAllocation *pAllocations); void Free(const VmaAllocation hAllocation); #if VMA_STATS_STRING_ENABLED - void PrintDetailedMap(class VmaJsonWriter& json); + void PrintDetailedMap(class VmaJsonWriter &json); #endif VkResult CheckCorruption(); @@ -10943,10 +11192,10 @@ class VmaBlockVector const float m_Priority; const VkDeviceSize m_MinAllocationAlignment; - void* const m_pMemoryAllocateNext; + void *const m_pMemoryAllocateNext; VMA_RW_MUTEX m_Mutex; // Incrementally sorted by sumFreeSize, ascending. - VmaVector> m_Blocks; + VmaVector> m_Blocks; uint32_t m_NextBlockId; bool m_IncrementalSort = true; @@ -10954,7 +11203,7 @@ class VmaBlockVector VkDeviceSize CalcMaxBlockSize() const; // Finds and removes given block from vector. - void Remove(VmaDeviceMemoryBlock* pBlock); + void Remove(VmaDeviceMemoryBlock *pBlock); // Performs single step in sorting m_Blocks. They may not be fully sorted // after this call. void IncrementallySortBlocks(); @@ -10963,30 +11212,30 @@ class VmaBlockVector VkResult AllocatePage( VkDeviceSize size, VkDeviceSize alignment, - const VmaAllocationCreateInfo& createInfo, + const VmaAllocationCreateInfo &createInfo, VmaSuballocationType suballocType, - VmaAllocation* pAllocation); + VmaAllocation *pAllocation); VkResult AllocateFromBlock( - VmaDeviceMemoryBlock* pBlock, + VmaDeviceMemoryBlock *pBlock, VkDeviceSize size, VkDeviceSize alignment, VmaAllocationCreateFlags allocFlags, - void* pUserData, + void *pUserData, VmaSuballocationType suballocType, uint32_t strategy, - VmaAllocation* pAllocation); + VmaAllocation *pAllocation); VkResult CommitAllocationRequest( - VmaAllocationRequest& allocRequest, - VmaDeviceMemoryBlock* pBlock, + VmaAllocationRequest &allocRequest, + VmaDeviceMemoryBlock *pBlock, VkDeviceSize alignment, VmaAllocationCreateFlags allocFlags, - void* pUserData, + void *pUserData, VmaSuballocationType suballocType, - VmaAllocation* pAllocation); + VmaAllocation *pAllocation); - VkResult CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex); + VkResult CreateBlock(VkDeviceSize blockSize, size_t *pNewBlockIndex); bool HasEmptyBlock(); }; #endif // _VMA_BLOCK_VECTOR @@ -10998,23 +11247,28 @@ struct VmaDefragmentationContext_T public: VmaDefragmentationContext_T( VmaAllocator hAllocator, - const VmaDefragmentationInfo& info); + const VmaDefragmentationInfo &info); ~VmaDefragmentationContext_T(); - void GetStats(VmaDefragmentationStats& outStats) { outStats = m_GlobalStats; } + void GetStats(VmaDefragmentationStats &outStats) { outStats = m_GlobalStats; } - VkResult DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo); - VkResult DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo); + VkResult DefragmentPassBegin(VmaDefragmentationPassMoveInfo &moveInfo); + VkResult DefragmentPassEnd(VmaDefragmentationPassMoveInfo &moveInfo); private: // Max number of allocations to ignore due to size constraints before ending single pass static const uint8_t MAX_ALLOCS_TO_IGNORE = 16; - enum class CounterStatus { Pass, Ignore, End }; + enum class CounterStatus + { + Pass, + Ignore, + End + }; struct FragmentedBlock { uint32_t data; - VmaDeviceMemoryBlock* block; + VmaDeviceMemoryBlock *block; }; struct StateBalanced { @@ -11025,9 +11279,14 @@ struct VmaDefragmentationContext_T { enum class Operation : uint8_t { - FindFreeBlockBuffer, FindFreeBlockTexture, FindFreeBlockAll, - MoveBuffers, MoveTextures, MoveAll, - Cleanup, Done + FindFreeBlockBuffer, + FindFreeBlockTexture, + FindFreeBlockAll, + MoveBuffers, + MoveTextures, + MoveAll, + Cleanup, + Done }; Operation operation = Operation::FindFreeBlockTexture; @@ -11051,29 +11310,29 @@ struct VmaDefragmentationContext_T uint8_t m_IgnoredAllocs = 0; uint32_t m_Algorithm; uint32_t m_BlockVectorCount; - VmaBlockVector* m_PoolBlockVector; - VmaBlockVector** m_pBlockVectors; + VmaBlockVector *m_PoolBlockVector; + VmaBlockVector **m_pBlockVectors; size_t m_ImmovableBlockCount = 0; - VmaDefragmentationStats m_GlobalStats = { 0 }; - VmaDefragmentationStats m_PassStats = { 0 }; - void* m_AlgorithmState = VMA_NULL; + VmaDefragmentationStats m_GlobalStats = {0}; + VmaDefragmentationStats m_PassStats = {0}; + void *m_AlgorithmState = VMA_NULL; - static MoveAllocationData GetMoveData(VmaAllocHandle handle, VmaBlockMetadata* metadata); + static MoveAllocationData GetMoveData(VmaAllocHandle handle, VmaBlockMetadata *metadata); CounterStatus CheckCounters(VkDeviceSize bytes); bool IncrementCounters(VkDeviceSize bytes); - bool ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block); - bool AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector); + bool ReallocWithinBlock(VmaBlockVector &vector, VmaDeviceMemoryBlock *block); + bool AllocInOtherBlock(size_t start, size_t end, MoveAllocationData &data, VmaBlockVector &vector); - bool ComputeDefragmentation(VmaBlockVector& vector, size_t index); - bool ComputeDefragmentation_Fast(VmaBlockVector& vector); - bool ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update); - bool ComputeDefragmentation_Full(VmaBlockVector& vector); - bool ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index); + bool ComputeDefragmentation(VmaBlockVector &vector, size_t index); + bool ComputeDefragmentation_Fast(VmaBlockVector &vector); + bool ComputeDefragmentation_Balanced(VmaBlockVector &vector, size_t index, bool update); + bool ComputeDefragmentation_Full(VmaBlockVector &vector); + bool ComputeDefragmentation_Extensive(VmaBlockVector &vector, size_t index); - void UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state); + void UpdateVectorStatistics(VmaBlockVector &vector, StateBalanced &state); bool MoveDataToFreeBlocks(VmaSuballocationType currentType, - VmaBlockVector& vector, size_t firstFreeBlock, - bool& texturePresent, bool& bufferPresent, bool& otherPresent); + VmaBlockVector &vector, size_t firstFreeBlock, + bool &texturePresent, bool &bufferPresent, bool &otherPresent); }; #endif // _VMA_DEFRAGMENTATION_CONTEXT @@ -11088,35 +11347,39 @@ struct VmaPool_T VmaPool_T( VmaAllocator hAllocator, - const VmaPoolCreateInfo& createInfo, + const VmaPoolCreateInfo &createInfo, VkDeviceSize preferredBlockSize); ~VmaPool_T(); uint32_t GetId() const { return m_Id; } - void SetId(uint32_t id) { VMA_ASSERT(m_Id == 0); m_Id = id; } + void SetId(uint32_t id) + { + VMA_ASSERT(m_Id == 0); + m_Id = id; + } - const char* GetName() const { return m_Name; } - void SetName(const char* pName); + const char *GetName() const { return m_Name; } + void SetName(const char *pName); #if VMA_STATS_STRING_ENABLED - //void PrintDetailedMap(class VmaStringBuilder& sb); + // void PrintDetailedMap(class VmaStringBuilder& sb); #endif private: uint32_t m_Id; - char* m_Name; - VmaPool_T* m_PrevPool = VMA_NULL; - VmaPool_T* m_NextPool = VMA_NULL; + char *m_Name; + VmaPool_T *m_PrevPool = VMA_NULL; + VmaPool_T *m_NextPool = VMA_NULL; }; struct VmaPoolListItemTraits { typedef VmaPool_T ItemType; - static ItemType* GetPrev(const ItemType* item) { return item->m_PrevPool; } - static ItemType* GetNext(const ItemType* item) { return item->m_NextPool; } - static ItemType*& AccessPrev(ItemType* item) { return item->m_PrevPool; } - static ItemType*& AccessNext(ItemType* item) { return item->m_NextPool; } + static ItemType *GetPrev(const ItemType *item) { return item->m_PrevPool; } + static ItemType *GetNext(const ItemType *item) { return item->m_NextPool; } + static ItemType *&AccessPrev(ItemType *item) { return item->m_PrevPool; } + static ItemType *&AccessNext(ItemType *item) { return item->m_NextPool; } }; #endif // _VMA_POOL_T @@ -11125,7 +11388,6 @@ struct VmaCurrentBudgetData { VMA_CLASS_NO_COPY_NO_MOVE(VmaCurrentBudgetData) public: - VMA_ATOMIC_UINT32 m_BlockCount[VK_MAX_MEMORY_HEAPS]; VMA_ATOMIC_UINT32 m_AllocationCount[VK_MAX_MEMORY_HEAPS]; VMA_ATOMIC_UINT64 m_BlockBytes[VK_MAX_MEMORY_HEAPS]; @@ -11196,10 +11458,11 @@ class VmaAllocationObjectAllocator { VMA_CLASS_NO_COPY_NO_MOVE(VmaAllocationObjectAllocator) public: - VmaAllocationObjectAllocator(const VkAllocationCallbacks* pAllocationCallbacks) + VmaAllocationObjectAllocator(const VkAllocationCallbacks *pAllocationCallbacks) : m_Allocator(pAllocationCallbacks, 1024) {} - template VmaAllocation Allocate(Types&&... args); + template + VmaAllocation Allocate(Types &&...args); void Free(VmaAllocation hAlloc); private: @@ -11207,8 +11470,8 @@ class VmaAllocationObjectAllocator VmaPoolAllocator m_Allocator; }; -template -VmaAllocation VmaAllocationObjectAllocator::Allocate(Types&&... args) +template +VmaAllocation VmaAllocationObjectAllocator::Allocate(Types &&...args) { VmaMutexLock mutexLock(m_Mutex); return m_Allocator.Alloc(std::forward(args)...); @@ -11229,33 +11492,33 @@ struct VmaVirtualBlock_T const bool m_AllocationCallbacksSpecified; const VkAllocationCallbacks m_AllocationCallbacks; - VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo); + VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo &createInfo); ~VmaVirtualBlock_T(); VkResult Init() { return VK_SUCCESS; } bool IsEmpty() const { return m_Metadata->IsEmpty(); } void Free(VmaVirtualAllocation allocation) { m_Metadata->Free((VmaAllocHandle)allocation); } - void SetAllocationUserData(VmaVirtualAllocation allocation, void* userData) { m_Metadata->SetAllocationUserData((VmaAllocHandle)allocation, userData); } + void SetAllocationUserData(VmaVirtualAllocation allocation, void *userData) { m_Metadata->SetAllocationUserData((VmaAllocHandle)allocation, userData); } void Clear() { m_Metadata->Clear(); } - const VkAllocationCallbacks* GetAllocationCallbacks() const; - void GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo); - VkResult Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation, - VkDeviceSize* outOffset); - void GetStatistics(VmaStatistics& outStats) const; - void CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const; + const VkAllocationCallbacks *GetAllocationCallbacks() const; + void GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo &outInfo); + VkResult Allocate(const VmaVirtualAllocationCreateInfo &createInfo, VmaVirtualAllocation &outAllocation, + VkDeviceSize *outOffset); + void GetStatistics(VmaStatistics &outStats) const; + void CalculateDetailedStatistics(VmaDetailedStatistics &outStats) const; #if VMA_STATS_STRING_ENABLED - void BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const; + void BuildStatsString(bool detailedMap, VmaStringBuilder &sb) const; #endif private: - VmaBlockMetadata* m_Metadata; + VmaBlockMetadata *m_Metadata; }; #ifndef _VMA_VIRTUAL_BLOCK_T_FUNCTIONS -VmaVirtualBlock_T::VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo& createInfo) +VmaVirtualBlock_T::VmaVirtualBlock_T(const VmaVirtualBlockCreateInfo &createInfo) : m_AllocationCallbacksSpecified(createInfo.pAllocationCallbacks != VMA_NULL), - m_AllocationCallbacks(createInfo.pAllocationCallbacks != VMA_NULL ? *createInfo.pAllocationCallbacks : VmaEmptyAllocationCallbacks) + m_AllocationCallbacks(createInfo.pAllocationCallbacks != VMA_NULL ? *createInfo.pAllocationCallbacks : VmaEmptyAllocationCallbacks) { const uint32_t algorithm = createInfo.flags & VMA_VIRTUAL_BLOCK_CREATE_ALGORITHM_MASK; switch (algorithm) @@ -11286,33 +11549,33 @@ VmaVirtualBlock_T::~VmaVirtualBlock_T() vma_delete(GetAllocationCallbacks(), m_Metadata); } -const VkAllocationCallbacks* VmaVirtualBlock_T::GetAllocationCallbacks() const +const VkAllocationCallbacks *VmaVirtualBlock_T::GetAllocationCallbacks() const { return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL; } -void VmaVirtualBlock_T::GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo& outInfo) +void VmaVirtualBlock_T::GetAllocationInfo(VmaVirtualAllocation allocation, VmaVirtualAllocationInfo &outInfo) { m_Metadata->GetAllocationInfo((VmaAllocHandle)allocation, outInfo); } -VkResult VmaVirtualBlock_T::Allocate(const VmaVirtualAllocationCreateInfo& createInfo, VmaVirtualAllocation& outAllocation, - VkDeviceSize* outOffset) +VkResult VmaVirtualBlock_T::Allocate(const VmaVirtualAllocationCreateInfo &createInfo, VmaVirtualAllocation &outAllocation, + VkDeviceSize *outOffset) { VmaAllocationRequest request = {}; if (m_Metadata->CreateAllocationRequest( - createInfo.size, // allocSize - VMA_MAX(createInfo.alignment, (VkDeviceSize)1), // allocAlignment - (createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0, // upperAddress - VMA_SUBALLOCATION_TYPE_UNKNOWN, // allocType - unimportant - createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK, // strategy - &request)) + createInfo.size, // allocSize + VMA_MAX(createInfo.alignment, (VkDeviceSize)1), // allocAlignment + (createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0, // upperAddress + VMA_SUBALLOCATION_TYPE_UNKNOWN, // allocType - unimportant + createInfo.flags & VMA_VIRTUAL_ALLOCATION_CREATE_STRATEGY_MASK, // strategy + &request)) { m_Metadata->Alloc(request, - VMA_SUBALLOCATION_TYPE_UNKNOWN, // type - unimportant - createInfo.pUserData); + VMA_SUBALLOCATION_TYPE_UNKNOWN, // type - unimportant + createInfo.pUserData); outAllocation = (VmaVirtualAllocation)request.allocHandle; - if(outOffset) + if (outOffset) *outOffset = m_Metadata->GetAllocationOffset(request.allocHandle); return VK_SUCCESS; } @@ -11322,20 +11585,20 @@ VkResult VmaVirtualBlock_T::Allocate(const VmaVirtualAllocationCreateInfo& creat return VK_ERROR_OUT_OF_DEVICE_MEMORY; } -void VmaVirtualBlock_T::GetStatistics(VmaStatistics& outStats) const +void VmaVirtualBlock_T::GetStatistics(VmaStatistics &outStats) const { VmaClearStatistics(outStats); m_Metadata->AddStatistics(outStats); } -void VmaVirtualBlock_T::CalculateDetailedStatistics(VmaDetailedStatistics& outStats) const +void VmaVirtualBlock_T::CalculateDetailedStatistics(VmaDetailedStatistics &outStats) const { VmaClearDetailedStatistics(outStats); m_Metadata->AddDetailedStatistics(outStats); } #if VMA_STATS_STRING_ENABLED -void VmaVirtualBlock_T::BuildStatsString(bool detailedMap, VmaStringBuilder& sb) const +void VmaVirtualBlock_T::BuildStatsString(bool detailedMap, VmaStringBuilder &sb) const { VmaJsonWriter json(GetAllocationCallbacks(), sb); json.BeginObject(); @@ -11360,7 +11623,6 @@ void VmaVirtualBlock_T::BuildStatsString(bool detailedMap, VmaStringBuilder& sb) #endif // _VMA_VIRTUAL_BLOCK_T_FUNCTIONS #endif // _VMA_VIRTUAL_BLOCK_T - // Main allocator object. struct VmaAllocator_T { @@ -11369,7 +11631,7 @@ struct VmaAllocator_T bool m_UseMutex; uint32_t m_VulkanApiVersion; bool m_UseKhrDedicatedAllocation; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0). - bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0). + bool m_UseKhrBindMemory2; // Can be set only if m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0). bool m_UseExtMemoryBudget; bool m_UseAmdDeviceCoherentMemory; bool m_UseKhrBufferDeviceAddress; @@ -11388,21 +11650,21 @@ struct VmaAllocator_T VkPhysicalDeviceMemoryProperties m_MemProps; // Default pools. - VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES]; + VmaBlockVector *m_pBlockVectors[VK_MAX_MEMORY_TYPES]; VmaDedicatedAllocationList m_DedicatedAllocations[VK_MAX_MEMORY_TYPES]; VmaCurrentBudgetData m_Budget; VMA_ATOMIC_UINT32 m_DeviceMemoryCount; // Total number of VkDeviceMemory objects. - VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo); - VkResult Init(const VmaAllocatorCreateInfo* pCreateInfo); + VmaAllocator_T(const VmaAllocatorCreateInfo *pCreateInfo); + VkResult Init(const VmaAllocatorCreateInfo *pCreateInfo); ~VmaAllocator_T(); - const VkAllocationCallbacks* GetAllocationCallbacks() const + const VkAllocationCallbacks *GetAllocationCallbacks() const { return m_AllocationCallbacksSpecified ? &m_AllocationCallbacks : VMA_NULL; } - const VmaVulkanFunctions& GetVulkanFunctions() const + const VmaVulkanFunctions &GetVulkanFunctions() const { return m_VulkanFunctions; } @@ -11428,14 +11690,12 @@ struct VmaAllocator_T bool IsMemoryTypeNonCoherent(uint32_t memTypeIndex) const { return (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) == - VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; + VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT; } // Minimum alignment for all allocations in specific memory type. VkDeviceSize GetMemoryTypeMinAlignment(uint32_t memTypeIndex) const { - return IsMemoryTypeNonCoherent(memTypeIndex) ? - VMA_MAX((VkDeviceSize)VMA_MIN_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) : - (VkDeviceSize)VMA_MIN_ALIGNMENT; + return IsMemoryTypeNonCoherent(memTypeIndex) ? VMA_MAX((VkDeviceSize)VMA_MIN_ALIGNMENT, m_PhysicalDeviceProperties.limits.nonCoherentAtomSize) : (VkDeviceSize)VMA_MIN_ALIGNMENT; } bool IsIntegratedGpu() const @@ -11447,53 +11707,53 @@ struct VmaAllocator_T void GetBufferMemoryRequirements( VkBuffer hBuffer, - VkMemoryRequirements& memReq, - bool& requiresDedicatedAllocation, - bool& prefersDedicatedAllocation) const; + VkMemoryRequirements &memReq, + bool &requiresDedicatedAllocation, + bool &prefersDedicatedAllocation) const; void GetImageMemoryRequirements( VkImage hImage, - VkMemoryRequirements& memReq, - bool& requiresDedicatedAllocation, - bool& prefersDedicatedAllocation) const; + VkMemoryRequirements &memReq, + bool &requiresDedicatedAllocation, + bool &prefersDedicatedAllocation) const; VkResult FindMemoryTypeIndex( uint32_t memoryTypeBits, - const VmaAllocationCreateInfo* pAllocationCreateInfo, + const VmaAllocationCreateInfo *pAllocationCreateInfo, VkFlags bufImgUsage, // VkBufferCreateInfo::usage or VkImageCreateInfo::usage. UINT32_MAX if unknown. - uint32_t* pMemoryTypeIndex) const; + uint32_t *pMemoryTypeIndex) const; // Main allocation function. VkResult AllocateMemory( - const VkMemoryRequirements& vkMemReq, + const VkMemoryRequirements &vkMemReq, bool requiresDedicatedAllocation, bool prefersDedicatedAllocation, VkBuffer dedicatedBuffer, VkImage dedicatedImage, VkFlags dedicatedBufferImageUsage, // UINT32_MAX if unknown. - const VmaAllocationCreateInfo& createInfo, + const VmaAllocationCreateInfo &createInfo, VmaSuballocationType suballocType, size_t allocationCount, - VmaAllocation* pAllocations); + VmaAllocation *pAllocations); // Main deallocation function. void FreeMemory( size_t allocationCount, - const VmaAllocation* pAllocations); + const VmaAllocation *pAllocations); - void CalculateStatistics(VmaTotalStatistics* pStats); + void CalculateStatistics(VmaTotalStatistics *pStats); void GetHeapBudgets( - VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount); + VmaBudget *outBudgets, uint32_t firstHeap, uint32_t heapCount); #if VMA_STATS_STRING_ENABLED - void PrintDetailedMap(class VmaJsonWriter& json); + void PrintDetailedMap(class VmaJsonWriter &json); #endif - void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo); + void GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo *pAllocationInfo); - VkResult CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool); + VkResult CreatePool(const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool); void DestroyPool(VmaPool pool); - void GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats); - void CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats); + void GetPoolStatistics(VmaPool pool, VmaStatistics *pPoolStats); + void CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics *pPoolStats); void SetCurrentFrameIndex(uint32_t frameIndex); uint32_t GetCurrentFrameIndex() const { return m_CurrentFrameIndex.load(); } @@ -11502,7 +11762,7 @@ struct VmaAllocator_T VkResult CheckCorruption(uint32_t memoryTypeBits); // Call to Vulkan function vkAllocateMemory with accompanying bookkeeping. - VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory); + VkResult AllocateVulkanMemory(const VkMemoryAllocateInfo *pAllocateInfo, VkDeviceMemory *pMemory); // Call to Vulkan function vkFreeMemory with accompanying bookkeeping. void FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory); // Call to Vulkan function vkBindBufferMemory or vkBindBufferMemory2KHR. @@ -11510,27 +11770,27 @@ struct VmaAllocator_T VkDeviceMemory memory, VkDeviceSize memoryOffset, VkBuffer buffer, - const void* pNext); + const void *pNext); // Call to Vulkan function vkBindImageMemory or vkBindImageMemory2KHR. VkResult BindVulkanImage( VkDeviceMemory memory, VkDeviceSize memoryOffset, VkImage image, - const void* pNext); + const void *pNext); - VkResult Map(VmaAllocation hAllocation, void** ppData); + VkResult Map(VmaAllocation hAllocation, void **ppData); void Unmap(VmaAllocation hAllocation); VkResult BindBufferMemory( VmaAllocation hAllocation, VkDeviceSize allocationLocalOffset, VkBuffer hBuffer, - const void* pNext); + const void *pNext); VkResult BindImageMemory( VmaAllocation hAllocation, VkDeviceSize allocationLocalOffset, VkImage hImage, - const void* pNext); + const void *pNext); VkResult FlushOrInvalidateAllocation( VmaAllocation hAllocation, @@ -11538,8 +11798,8 @@ struct VmaAllocator_T VMA_CACHE_OPERATION op); VkResult FlushOrInvalidateAllocations( uint32_t allocationCount, - const VmaAllocation* allocations, - const VkDeviceSize* offsets, const VkDeviceSize* sizes, + const VmaAllocation *allocations, + const VkDeviceSize *offsets, const VkDeviceSize *sizes, VMA_CACHE_OPERATION op); void FillAllocation(const VmaAllocation hAllocation, uint8_t pattern); @@ -11578,13 +11838,13 @@ struct VmaAllocator_T // Global bit mask AND-ed with any memoryTypeBits to disallow certain memory types. uint32_t m_GlobalMemoryTypeBits; - void ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions); + void ImportVulkanFunctions(const VmaVulkanFunctions *pVulkanFunctions); #if VMA_STATIC_VULKAN_FUNCTIONS == 1 void ImportVulkanFunctions_Static(); #endif - void ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions); + void ImportVulkanFunctions_Custom(const VmaVulkanFunctions *pVulkanFunctions); #if VMA_DYNAMIC_VULKAN_FUNCTIONS == 1 void ImportVulkanFunctions_Dynamic(); @@ -11602,13 +11862,13 @@ struct VmaAllocator_T VkBuffer dedicatedBuffer, VkImage dedicatedImage, VkFlags dedicatedBufferImageUsage, - const VmaAllocationCreateInfo& createInfo, + const VmaAllocationCreateInfo &createInfo, uint32_t memTypeIndex, VmaSuballocationType suballocType, - VmaDedicatedAllocationList& dedicatedAllocations, - VmaBlockVector& blockVector, + VmaDedicatedAllocationList &dedicatedAllocations, + VmaBlockVector &blockVector, size_t allocationCount, - VmaAllocation* pAllocations); + VmaAllocation *pAllocations); // Helper function only to be used inside AllocateDedicatedMemory. VkResult AllocateDedicatedMemoryPage( @@ -11616,42 +11876,42 @@ struct VmaAllocator_T VkDeviceSize size, VmaSuballocationType suballocType, uint32_t memTypeIndex, - const VkMemoryAllocateInfo& allocInfo, + const VkMemoryAllocateInfo &allocInfo, bool map, bool isUserDataString, bool isMappingAllowed, - void* pUserData, - VmaAllocation* pAllocation); + void *pUserData, + VmaAllocation *pAllocation); // Allocates and registers new VkDeviceMemory specifically for dedicated allocations. VkResult AllocateDedicatedMemory( VmaPool pool, VkDeviceSize size, VmaSuballocationType suballocType, - VmaDedicatedAllocationList& dedicatedAllocations, + VmaDedicatedAllocationList &dedicatedAllocations, uint32_t memTypeIndex, bool map, bool isUserDataString, bool isMappingAllowed, bool canAliasMemory, - void* pUserData, + void *pUserData, float priority, VkBuffer dedicatedBuffer, VkImage dedicatedImage, VkFlags dedicatedBufferImageUsage, size_t allocationCount, - VmaAllocation* pAllocations, - const void* pNextChain = nullptr); + VmaAllocation *pAllocations, + const void *pNextChain = nullptr); void FreeDedicatedMemory(const VmaAllocation allocation); VkResult CalcMemTypeParams( - VmaAllocationCreateInfo& outCreateInfo, + VmaAllocationCreateInfo &outCreateInfo, uint32_t memTypeIndex, VkDeviceSize size, size_t allocationCount); VkResult CalcAllocationParams( - VmaAllocationCreateInfo& outCreateInfo, + VmaAllocationCreateInfo &outCreateInfo, bool dedicatedRequired, bool dedicatedPreferred); @@ -11665,53 +11925,52 @@ struct VmaAllocator_T bool GetFlushOrInvalidateRange( VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size, - VkMappedMemoryRange& outRange) const; + VkMappedMemoryRange &outRange) const; #if VMA_MEMORY_BUDGET void UpdateVulkanBudget(); #endif // #if VMA_MEMORY_BUDGET }; - #ifndef _VMA_MEMORY_FUNCTIONS -static void* VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment) +static void *VmaMalloc(VmaAllocator hAllocator, size_t size, size_t alignment) { return VmaMalloc(&hAllocator->m_AllocationCallbacks, size, alignment); } -static void VmaFree(VmaAllocator hAllocator, void* ptr) +static void VmaFree(VmaAllocator hAllocator, void *ptr) { VmaFree(&hAllocator->m_AllocationCallbacks, ptr); } -template -static T* VmaAllocate(VmaAllocator hAllocator) +template +static T *VmaAllocate(VmaAllocator hAllocator) { - return (T*)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T)); + return (T *)VmaMalloc(hAllocator, sizeof(T), VMA_ALIGN_OF(T)); } -template -static T* VmaAllocateArray(VmaAllocator hAllocator, size_t count) +template +static T *VmaAllocateArray(VmaAllocator hAllocator, size_t count) { - return (T*)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T)); + return (T *)VmaMalloc(hAllocator, sizeof(T) * count, VMA_ALIGN_OF(T)); } -template -static void vma_delete(VmaAllocator hAllocator, T* ptr) +template +static void vma_delete(VmaAllocator hAllocator, T *ptr) { - if(ptr != VMA_NULL) + if (ptr != VMA_NULL) { ptr->~T(); VmaFree(hAllocator, ptr); } } -template -static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count) +template +static void vma_delete_array(VmaAllocator hAllocator, T *ptr, size_t count) { - if(ptr != VMA_NULL) + if (ptr != VMA_NULL) { - for(size_t i = count; i--; ) + for (size_t i = count; i--;) ptr[i].~T(); VmaFree(hAllocator, ptr); } @@ -11721,11 +11980,13 @@ static void vma_delete_array(VmaAllocator hAllocator, T* ptr, size_t count) #ifndef _VMA_DEVICE_MEMORY_BLOCK_FUNCTIONS VmaDeviceMemoryBlock::VmaDeviceMemoryBlock(VmaAllocator hAllocator) : m_pMetadata(VMA_NULL), - m_MemoryTypeIndex(UINT32_MAX), - m_Id(0), - m_hMemory(VK_NULL_HANDLE), - m_MapCount(0), - m_pMappedData(VMA_NULL) {} + m_MemoryTypeIndex(UINT32_MAX), + m_Id(0), + m_hMemory(VK_NULL_HANDLE), + m_MapCount(0), + m_pMappedData(VMA_NULL) +{ +} VmaDeviceMemoryBlock::~VmaDeviceMemoryBlock() { @@ -11754,16 +12015,16 @@ void VmaDeviceMemoryBlock::Init( { case 0: m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(), - bufferImageGranularity, false); // isVirtual + bufferImageGranularity, false); // isVirtual break; case VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT: m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_Linear)(hAllocator->GetAllocationCallbacks(), - bufferImageGranularity, false); // isVirtual + bufferImageGranularity, false); // isVirtual break; default: VMA_ASSERT(0); m_pMetadata = vma_new(hAllocator, VmaBlockMetadata_TLSF)(hAllocator->GetAllocationCallbacks(), - bufferImageGranularity, false); // isVirtual + bufferImageGranularity, false); // isVirtual } m_pMetadata->Init(newSize); } @@ -11794,7 +12055,7 @@ void VmaDeviceMemoryBlock::PostAlloc(VmaAllocator hAllocator) void VmaDeviceMemoryBlock::PostFree(VmaAllocator hAllocator) { VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); - if(m_MappingHysteresis.PostFree()) + if (m_MappingHysteresis.PostFree()) { VMA_ASSERT(m_MappingHysteresis.GetExtraMapping() == 0); if (m_MapCount == 0) @@ -11808,14 +12069,14 @@ void VmaDeviceMemoryBlock::PostFree(VmaAllocator hAllocator) bool VmaDeviceMemoryBlock::Validate() const { VMA_VALIDATE((m_hMemory != VK_NULL_HANDLE) && - (m_pMetadata->GetSize() != 0)); + (m_pMetadata->GetSize() != 0)); return m_pMetadata->Validate(); } VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator) { - void* pData = nullptr; + void *pData = nullptr; VkResult res = Map(hAllocator, 1, &pData); if (res != VK_SUCCESS) { @@ -11829,7 +12090,7 @@ VkResult VmaDeviceMemoryBlock::CheckCorruption(VmaAllocator hAllocator) return res; } -VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void** ppData) +VkResult VmaDeviceMemoryBlock::Map(VmaAllocator hAllocator, uint32_t count, void **ppData) { if (count == 0) { @@ -11899,7 +12160,7 @@ VkResult VmaDeviceMemoryBlock::WriteMagicValueAfterAllocation(VmaAllocator hAllo { VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION); - void* pData; + void *pData; VkResult res = Map(hAllocator, 1, &pData); if (res != VK_SUCCESS) { @@ -11916,7 +12177,7 @@ VkResult VmaDeviceMemoryBlock::ValidateMagicValueAfterAllocation(VmaAllocator hA { VMA_ASSERT(VMA_DEBUG_MARGIN > 0 && VMA_DEBUG_MARGIN % 4 == 0 && VMA_DEBUG_DETECT_CORRUPTION); - void* pData; + void *pData; VkResult res = Map(hAllocator, 1, &pData); if (res != VK_SUCCESS) { @@ -11937,12 +12198,12 @@ VkResult VmaDeviceMemoryBlock::BindBufferMemory( const VmaAllocation hAllocation, VkDeviceSize allocationLocalOffset, VkBuffer hBuffer, - const void* pNext) + const void *pNext) { VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK && - hAllocation->GetBlock() == this); + hAllocation->GetBlock() == this); VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() && - "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?"); + "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?"); const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset; // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads. VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); @@ -11954,12 +12215,12 @@ VkResult VmaDeviceMemoryBlock::BindImageMemory( const VmaAllocation hAllocation, VkDeviceSize allocationLocalOffset, VkImage hImage, - const void* pNext) + const void *pNext) { VMA_ASSERT(hAllocation->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK && - hAllocation->GetBlock() == this); + hAllocation->GetBlock() == this); VMA_ASSERT(allocationLocalOffset < hAllocation->GetSize() && - "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?"); + "Invalid allocationLocalOffset. Did you forget that this offset is relative to the beginning of the allocation, not the whole memory block?"); const VkDeviceSize memoryOffset = hAllocation->GetOffset() + allocationLocalOffset; // This lock is important so that we don't call vkBind... and/or vkMap... simultaneously on the same VkDeviceMemory from multiple threads. VmaMutexLock lock(m_MapAndBindMutex, hAllocator->m_UseMutex); @@ -11969,17 +12230,17 @@ VkResult VmaDeviceMemoryBlock::BindImageMemory( #ifndef _VMA_ALLOCATION_T_FUNCTIONS VmaAllocation_T::VmaAllocation_T(bool mappingAllowed) - : m_Alignment{ 1 }, - m_Size{ 0 }, - m_pUserData{ VMA_NULL }, - m_pName{ VMA_NULL }, - m_MemoryTypeIndex{ 0 }, - m_Type{ (uint8_t)ALLOCATION_TYPE_NONE }, - m_SuballocationType{ (uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN }, - m_MapCount{ 0 }, - m_Flags{ 0 } -{ - if(mappingAllowed) + : m_Alignment{1}, + m_Size{0}, + m_pUserData{VMA_NULL}, + m_pName{VMA_NULL}, + m_MemoryTypeIndex{0}, + m_Type{(uint8_t)ALLOCATION_TYPE_NONE}, + m_SuballocationType{(uint8_t)VMA_SUBALLOCATION_TYPE_UNKNOWN}, + m_MapCount{0}, + m_Flags{0} +{ + if (mappingAllowed) m_Flags |= (uint8_t)FLAG_MAPPING_ALLOWED; #if VMA_STATS_STRING_ENABLED @@ -11996,7 +12257,7 @@ VmaAllocation_T::~VmaAllocation_T() } void VmaAllocation_T::InitBlockAllocation( - VmaDeviceMemoryBlock* block, + VmaDeviceMemoryBlock *block, VmaAllocHandle allocHandle, VkDeviceSize alignment, VkDeviceSize size, @@ -12010,7 +12271,7 @@ void VmaAllocation_T::InitBlockAllocation( m_Alignment = alignment; m_Size = size; m_MemoryTypeIndex = memoryTypeIndex; - if(mapped) + if (mapped) { VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP; @@ -12025,7 +12286,7 @@ void VmaAllocation_T::InitDedicatedAllocation( uint32_t memoryTypeIndex, VkDeviceMemory hMemory, VmaSuballocationType suballocationType, - void* pMappedData, + void *pMappedData, VkDeviceSize size) { VMA_ASSERT(m_Type == ALLOCATION_TYPE_NONE); @@ -12035,7 +12296,7 @@ void VmaAllocation_T::InitDedicatedAllocation( m_Size = size; m_MemoryTypeIndex = memoryTypeIndex; m_SuballocationType = (uint8_t)suballocationType; - if(pMappedData != VMA_NULL) + if (pMappedData != VMA_NULL) { VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); m_Flags |= (uint8_t)FLAG_PERSISTENT_MAP; @@ -12047,7 +12308,7 @@ void VmaAllocation_T::InitDedicatedAllocation( m_DedicatedAllocation.m_Next = VMA_NULL; } -void VmaAllocation_T::SetName(VmaAllocator hAllocator, const char* pName) +void VmaAllocation_T::SetName(VmaAllocator hAllocator, const char *pName) { VMA_ASSERT(pName == VMA_NULL || pName != m_pName); @@ -12132,16 +12393,16 @@ VkDeviceMemory VmaAllocation_T::GetMemory() const } } -void* VmaAllocation_T::GetMappedData() const +void *VmaAllocation_T::GetMappedData() const { switch (m_Type) { case ALLOCATION_TYPE_BLOCK: if (m_MapCount != 0 || IsPersistentMap()) { - void* pBlockData = m_BlockAllocation.m_Block->GetMappedData(); + void *pBlockData = m_BlockAllocation.m_Block->GetMappedData(); VMA_ASSERT(pBlockData != VMA_NULL); - return (char*)pBlockData + GetOffset(); + return (char *)pBlockData + GetOffset(); } else { @@ -12186,7 +12447,7 @@ void VmaAllocation_T::BlockAllocUnmap() } } -VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void** ppData) +VkResult VmaAllocation_T::DedicatedAllocMap(VmaAllocator hAllocator, void **ppData) { VMA_ASSERT(GetType() == ALLOCATION_TYPE_DEDICATED); VMA_ASSERT(IsMappingAllowed() && "Mapping is not allowed on this allocation! Please use one of the new VMA_ALLOCATION_CREATE_HOST_ACCESS_* flags when creating it."); @@ -12252,7 +12513,7 @@ void VmaAllocation_T::InitBufferImageUsage(uint32_t bufferImageUsage) m_BufferImageUsage = bufferImageUsage; } -void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const +void VmaAllocation_T::PrintParameters(class VmaJsonWriter &json) const { json.WriteString("Type"); json.WriteString(VMA_SUBALLOCATION_TYPE_NAMES[m_SuballocationType]); @@ -12279,7 +12540,7 @@ void VmaAllocation_T::PrintParameters(class VmaJsonWriter& json) const void VmaAllocation_T::FreeName(VmaAllocator hAllocator) { - if(m_pName) + if (m_pName) { VmaFreeString(hAllocator->GetAllocationCallbacks(), m_pName); m_pName = VMA_NULL; @@ -12300,25 +12561,27 @@ VmaBlockVector::VmaBlockVector( uint32_t algorithm, float priority, VkDeviceSize minAllocationAlignment, - void* pMemoryAllocateNext) + void *pMemoryAllocateNext) : m_hAllocator(hAllocator), - m_hParentPool(hParentPool), - m_MemoryTypeIndex(memoryTypeIndex), - m_PreferredBlockSize(preferredBlockSize), - m_MinBlockCount(minBlockCount), - m_MaxBlockCount(maxBlockCount), - m_BufferImageGranularity(bufferImageGranularity), - m_ExplicitBlockSize(explicitBlockSize), - m_Algorithm(algorithm), - m_Priority(priority), - m_MinAllocationAlignment(minAllocationAlignment), - m_pMemoryAllocateNext(pMemoryAllocateNext), - m_Blocks(VmaStlAllocator(hAllocator->GetAllocationCallbacks())), - m_NextBlockId(0) {} + m_hParentPool(hParentPool), + m_MemoryTypeIndex(memoryTypeIndex), + m_PreferredBlockSize(preferredBlockSize), + m_MinBlockCount(minBlockCount), + m_MaxBlockCount(maxBlockCount), + m_BufferImageGranularity(bufferImageGranularity), + m_ExplicitBlockSize(explicitBlockSize), + m_Algorithm(algorithm), + m_Priority(priority), + m_MinAllocationAlignment(minAllocationAlignment), + m_pMemoryAllocateNext(pMemoryAllocateNext), + m_Blocks(VmaStlAllocator(hAllocator->GetAllocationCallbacks())), + m_NextBlockId(0) +{ +} VmaBlockVector::~VmaBlockVector() { - for (size_t i = m_Blocks.size(); i--; ) + for (size_t i = m_Blocks.size(); i--;) { m_Blocks[i]->Destroy(m_hAllocator); vma_delete(m_hAllocator, m_Blocks[i]); @@ -12338,28 +12601,28 @@ VkResult VmaBlockVector::CreateMinBlocks() return VK_SUCCESS; } -void VmaBlockVector::AddStatistics(VmaStatistics& inoutStats) +void VmaBlockVector::AddStatistics(VmaStatistics &inoutStats) { VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); const size_t blockCount = m_Blocks.size(); for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) { - const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; + const VmaDeviceMemoryBlock *const pBlock = m_Blocks[blockIndex]; VMA_ASSERT(pBlock); VMA_HEAVY_ASSERT(pBlock->Validate()); pBlock->m_pMetadata->AddStatistics(inoutStats); } } -void VmaBlockVector::AddDetailedStatistics(VmaDetailedStatistics& inoutStats) +void VmaBlockVector::AddDetailedStatistics(VmaDetailedStatistics &inoutStats) { VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); const size_t blockCount = m_Blocks.size(); for (uint32_t blockIndex = 0; blockIndex < blockCount; ++blockIndex) { - const VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; + const VmaDeviceMemoryBlock *const pBlock = m_Blocks[blockIndex]; VMA_ASSERT(pBlock); VMA_HEAVY_ASSERT(pBlock->Validate()); pBlock->m_pMetadata->AddDetailedStatistics(inoutStats); @@ -12376,18 +12639,18 @@ bool VmaBlockVector::IsCorruptionDetectionEnabled() const { const uint32_t requiredMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; return (VMA_DEBUG_DETECT_CORRUPTION != 0) && - (VMA_DEBUG_MARGIN > 0) && - (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) && - (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags; + (VMA_DEBUG_MARGIN > 0) && + (m_Algorithm == 0 || m_Algorithm == VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT) && + (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & requiredMemFlags) == requiredMemFlags; } VkResult VmaBlockVector::Allocate( VkDeviceSize size, VkDeviceSize alignment, - const VmaAllocationCreateInfo& createInfo, + const VmaAllocationCreateInfo &createInfo, VmaSuballocationType suballocType, size_t allocationCount, - VmaAllocation* pAllocations) + VmaAllocation *pAllocations) { size_t allocIndex; VkResult res = VK_SUCCESS; @@ -12431,9 +12694,9 @@ VkResult VmaBlockVector::Allocate( VkResult VmaBlockVector::AllocatePage( VkDeviceSize size, VkDeviceSize alignment, - const VmaAllocationCreateInfo& createInfo, + const VmaAllocationCreateInfo &createInfo, VmaSuballocationType suballocType, - VmaAllocation* pAllocation) + VmaAllocation *pAllocation) { const bool isUpperAddress = (createInfo.flags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0; @@ -12446,7 +12709,7 @@ VkResult VmaBlockVector::AllocatePage( } const bool canFallbackToDedicated = !HasExplicitBlockSize() && - (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0; + (createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0; const bool canCreateNewBlock = ((createInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0) && (m_Blocks.size() < m_MaxBlockCount) && @@ -12472,7 +12735,7 @@ VkResult VmaBlockVector::AllocatePage( // Use only last block. if (!m_Blocks.empty()) { - VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks.back(); + VmaDeviceMemoryBlock *const pCurrBlock = m_Blocks.back(); VMA_ASSERT(pCurrBlock); VkResult res = AllocateFromBlock( pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); @@ -12490,25 +12753,25 @@ VkResult VmaBlockVector::AllocatePage( { const bool isHostVisible = (m_hAllocator->m_MemProps.memoryTypes[m_MemoryTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0; - if(isHostVisible) + if (isHostVisible) { const bool isMappingAllowed = (createInfo.flags & - (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0; + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0; /* For non-mappable allocations, check blocks that are not mapped first. For mappable allocations, check blocks that are already mapped first. This way, having many blocks, we will separate mappable and non-mappable allocations, hopefully limiting the number of blocks that are mapped, which will help tools like RenderDoc. */ - for(size_t mappingI = 0; mappingI < 2; ++mappingI) + for (size_t mappingI = 0; mappingI < 2; ++mappingI) { // Forward order in m_Blocks - prefer blocks with smallest amount of free space. for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) { - VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; + VmaDeviceMemoryBlock *const pCurrBlock = m_Blocks[blockIndex]; VMA_ASSERT(pCurrBlock); const bool isBlockMapped = pCurrBlock->GetMappedData() != VMA_NULL; - if((mappingI == 0) == (isMappingAllowed == isBlockMapped)) + if ((mappingI == 0) == (isMappingAllowed == isBlockMapped)) { VkResult res = AllocateFromBlock( pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); @@ -12527,7 +12790,7 @@ VkResult VmaBlockVector::AllocatePage( // Forward order in m_Blocks - prefer blocks with smallest amount of free space. for (size_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) { - VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; + VmaDeviceMemoryBlock *const pCurrBlock = m_Blocks[blockIndex]; VMA_ASSERT(pCurrBlock); VkResult res = AllocateFromBlock( pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); @@ -12543,9 +12806,9 @@ VkResult VmaBlockVector::AllocatePage( else // VMA_ALLOCATION_CREATE_STRATEGY_MIN_TIME_BIT { // Backward order in m_Blocks - prefer blocks with largest amount of free space. - for (size_t blockIndex = m_Blocks.size(); blockIndex--; ) + for (size_t blockIndex = m_Blocks.size(); blockIndex--;) { - VmaDeviceMemoryBlock* const pCurrBlock = m_Blocks[blockIndex]; + VmaDeviceMemoryBlock *const pCurrBlock = m_Blocks[blockIndex]; VMA_ASSERT(pCurrBlock); VkResult res = AllocateFromBlock(pCurrBlock, size, alignment, createInfo.flags, createInfo.pUserData, suballocType, strategy, pAllocation); if (res == VK_SUCCESS) @@ -12586,8 +12849,7 @@ VkResult VmaBlockVector::AllocatePage( } size_t newBlockIndex = 0; - VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ? - CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY; + VkResult res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ? CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY; // Allocation of this size failed? Try 1/2, 1/4, 1/8 of m_PreferredBlockSize. if (!m_ExplicitBlockSize) { @@ -12598,8 +12860,7 @@ VkResult VmaBlockVector::AllocatePage( { newBlockSize = smallerNewBlockSize; ++newBlockSizeShift; - res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ? - CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY; + res = (newBlockSize <= freeMemory || !canFallbackToDedicated) ? CreateBlock(newBlockSize, &newBlockIndex) : VK_ERROR_OUT_OF_DEVICE_MEMORY; } else { @@ -12610,7 +12871,7 @@ VkResult VmaBlockVector::AllocatePage( if (res == VK_SUCCESS) { - VmaDeviceMemoryBlock* const pBlock = m_Blocks[newBlockIndex]; + VmaDeviceMemoryBlock *const pBlock = m_Blocks[newBlockIndex]; VMA_ASSERT(pBlock->m_pMetadata->GetSize() >= size); res = AllocateFromBlock( @@ -12634,7 +12895,7 @@ VkResult VmaBlockVector::AllocatePage( void VmaBlockVector::Free(const VmaAllocation hAllocation) { - VmaDeviceMemoryBlock* pBlockToDelete = VMA_NULL; + VmaDeviceMemoryBlock *pBlockToDelete = VMA_NULL; bool budgetExceeded = false; { @@ -12648,7 +12909,7 @@ void VmaBlockVector::Free(const VmaAllocation hAllocation) { VmaMutexLockWrite lock(m_Mutex, m_hAllocator->m_UseMutex); - VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock(); + VmaDeviceMemoryBlock *pBlock = hAllocation->GetBlock(); if (IsCorruptionDetectionEnabled()) { @@ -12684,7 +12945,7 @@ void VmaBlockVector::Free(const VmaAllocation hAllocation) // (This is optional, heuristics.) else if (hadEmptyBlockBeforeFree && canDeleteBlock) { - VmaDeviceMemoryBlock* pLastBlock = m_Blocks.back(); + VmaDeviceMemoryBlock *pLastBlock = m_Blocks.back(); if (pLastBlock->m_pMetadata->IsEmpty()) { pBlockToDelete = pLastBlock; @@ -12711,7 +12972,7 @@ void VmaBlockVector::Free(const VmaAllocation hAllocation) VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const { VkDeviceSize result = 0; - for (size_t i = m_Blocks.size(); i--; ) + for (size_t i = m_Blocks.size(); i--;) { result = VMA_MAX(result, m_Blocks[i]->m_pMetadata->GetSize()); if (result >= m_PreferredBlockSize) @@ -12722,7 +12983,7 @@ VkDeviceSize VmaBlockVector::CalcMaxBlockSize() const return result; } -void VmaBlockVector::Remove(VmaDeviceMemoryBlock* pBlock) +void VmaBlockVector::Remove(VmaDeviceMemoryBlock *pBlock) { for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) { @@ -12756,32 +13017,32 @@ void VmaBlockVector::IncrementallySortBlocks() void VmaBlockVector::SortByFreeSize() { VMA_SORT(m_Blocks.begin(), m_Blocks.end(), - [](VmaDeviceMemoryBlock* b1, VmaDeviceMemoryBlock* b2) -> bool - { - return b1->m_pMetadata->GetSumFreeSize() < b2->m_pMetadata->GetSumFreeSize(); - }); + [](VmaDeviceMemoryBlock *b1, VmaDeviceMemoryBlock *b2) -> bool + { + return b1->m_pMetadata->GetSumFreeSize() < b2->m_pMetadata->GetSumFreeSize(); + }); } VkResult VmaBlockVector::AllocateFromBlock( - VmaDeviceMemoryBlock* pBlock, + VmaDeviceMemoryBlock *pBlock, VkDeviceSize size, VkDeviceSize alignment, VmaAllocationCreateFlags allocFlags, - void* pUserData, + void *pUserData, VmaSuballocationType suballocType, uint32_t strategy, - VmaAllocation* pAllocation) + VmaAllocation *pAllocation) { const bool isUpperAddress = (allocFlags & VMA_ALLOCATION_CREATE_UPPER_ADDRESS_BIT) != 0; VmaAllocationRequest currRequest = {}; if (pBlock->m_pMetadata->CreateAllocationRequest( - size, - alignment, - isUpperAddress, - suballocType, - strategy, - &currRequest)) + size, + alignment, + isUpperAddress, + suballocType, + strategy, + &currRequest)) { return CommitAllocationRequest(currRequest, pBlock, alignment, allocFlags, pUserData, suballocType, pAllocation); } @@ -12789,18 +13050,18 @@ VkResult VmaBlockVector::AllocateFromBlock( } VkResult VmaBlockVector::CommitAllocationRequest( - VmaAllocationRequest& allocRequest, - VmaDeviceMemoryBlock* pBlock, + VmaAllocationRequest &allocRequest, + VmaDeviceMemoryBlock *pBlock, VkDeviceSize alignment, VmaAllocationCreateFlags allocFlags, - void* pUserData, + void *pUserData, VmaSuballocationType suballocType, - VmaAllocation* pAllocation) + VmaAllocation *pAllocation) { const bool mapped = (allocFlags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0; const bool isUserDataString = (allocFlags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0; const bool isMappingAllowed = (allocFlags & - (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0; + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0; pBlock->PostAlloc(m_hAllocator); // Allocate from pCurrBlock. @@ -12815,17 +13076,12 @@ VkResult VmaBlockVector::CommitAllocationRequest( *pAllocation = m_hAllocator->m_AllocationObjectAllocator.Allocate(isMappingAllowed); pBlock->m_pMetadata->Alloc(allocRequest, suballocType, *pAllocation); - (*pAllocation)->InitBlockAllocation( - pBlock, - allocRequest.allocHandle, - alignment, - allocRequest.size, // Not size, as actual allocation size may be larger than requested! - m_MemoryTypeIndex, - suballocType, - mapped); + (*pAllocation)->InitBlockAllocation(pBlock, allocRequest.allocHandle, alignment, + allocRequest.size, // Not size, as actual allocation size may be larger than requested! + m_MemoryTypeIndex, suballocType, mapped); VMA_HEAVY_ASSERT(pBlock->Validate()); if (isUserDataString) - (*pAllocation)->SetName(m_hAllocator, (const char*)pUserData); + (*pAllocation)->SetName(m_hAllocator, (const char *)pUserData); else (*pAllocation)->SetUserData(m_hAllocator, pUserData); m_hAllocator->m_Budget.AddAllocation(m_hAllocator->MemoryTypeIndexToHeapIndex(m_MemoryTypeIndex), allocRequest.size); @@ -12841,16 +13097,16 @@ VkResult VmaBlockVector::CommitAllocationRequest( return VK_SUCCESS; } -VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIndex) +VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t *pNewBlockIndex) { - VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO }; + VkMemoryAllocateInfo allocInfo = {VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO}; allocInfo.pNext = m_pMemoryAllocateNext; allocInfo.memoryTypeIndex = m_MemoryTypeIndex; allocInfo.allocationSize = blockSize; #if VMA_BUFFER_DEVICE_ADDRESS // Every standalone block can potentially contain a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT - always enable the feature. - VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR }; + VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = {VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR}; if (m_hAllocator->m_UseKhrBufferDeviceAddress) { allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR; @@ -12859,7 +13115,7 @@ VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIn #endif // VMA_BUFFER_DEVICE_ADDRESS #if VMA_MEMORY_PRIORITY - VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT }; + VkMemoryPriorityAllocateInfoEXT priorityInfo = {VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT}; if (m_hAllocator->m_UseExtMemoryPriority) { VMA_ASSERT(m_Priority >= 0.f && m_Priority <= 1.f); @@ -12870,7 +13126,7 @@ VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIn #if VMA_EXTERNAL_MEMORY // Attach VkExportMemoryAllocateInfoKHR if necessary. - VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR }; + VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = {VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR}; exportMemoryAllocInfo.handleTypes = m_hAllocator->GetExternalMemoryHandleTypeFlags(m_MemoryTypeIndex); if (exportMemoryAllocInfo.handleTypes != 0) { @@ -12888,7 +13144,7 @@ VkResult VmaBlockVector::CreateBlock(VkDeviceSize blockSize, size_t* pNewBlockIn // New VkDeviceMemory successfully created. // Create new Allocation for it. - VmaDeviceMemoryBlock* const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator); + VmaDeviceMemoryBlock *const pBlock = vma_new(m_hAllocator, VmaDeviceMemoryBlock)(m_hAllocator); pBlock->Init( m_hAllocator, m_hParentPool, @@ -12912,7 +13168,7 @@ bool VmaBlockVector::HasEmptyBlock() { for (size_t index = 0, count = m_Blocks.size(); index < count; ++index) { - VmaDeviceMemoryBlock* const pBlock = m_Blocks[index]; + VmaDeviceMemoryBlock *const pBlock = m_Blocks[index]; if (pBlock->m_pMetadata->IsEmpty()) { return true; @@ -12922,11 +13178,10 @@ bool VmaBlockVector::HasEmptyBlock() } #if VMA_STATS_STRING_ENABLED -void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter& json) +void VmaBlockVector::PrintDetailedMap(class VmaJsonWriter &json) { VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); - json.BeginObject(); for (size_t i = 0; i < m_Blocks.size(); ++i) { @@ -12955,7 +13210,7 @@ VkResult VmaBlockVector::CheckCorruption() VmaMutexLockRead lock(m_Mutex, m_hAllocator->m_UseMutex); for (uint32_t blockIndex = 0; blockIndex < m_Blocks.size(); ++blockIndex) { - VmaDeviceMemoryBlock* const pBlock = m_Blocks[blockIndex]; + VmaDeviceMemoryBlock *const pBlock = m_Blocks[blockIndex]; VMA_ASSERT(pBlock); VkResult res = pBlock->CheckCorruption(m_hAllocator); if (res != VK_SUCCESS) @@ -12971,11 +13226,11 @@ VkResult VmaBlockVector::CheckCorruption() #ifndef _VMA_DEFRAGMENTATION_CONTEXT_FUNCTIONS VmaDefragmentationContext_T::VmaDefragmentationContext_T( VmaAllocator hAllocator, - const VmaDefragmentationInfo& info) + const VmaDefragmentationInfo &info) : m_MaxPassBytes(info.maxBytesPerPass == 0 ? VK_WHOLE_SIZE : info.maxBytesPerPass), - m_MaxPassAllocations(info.maxAllocationsPerPass == 0 ? UINT32_MAX : info.maxAllocationsPerPass), - m_MoveAllocator(hAllocator->GetAllocationCallbacks()), - m_Moves(m_MoveAllocator) + m_MaxPassAllocations(info.maxAllocationsPerPass == 0 ? UINT32_MAX : info.maxAllocationsPerPass), + m_MoveAllocator(hAllocator->GetAllocationCallbacks()), + m_Moves(m_MoveAllocator) { m_Algorithm = info.flags & VMA_DEFRAGMENTATION_FLAG_ALGORITHM_MASK; @@ -12994,7 +13249,7 @@ VmaDefragmentationContext_T::VmaDefragmentationContext_T( m_pBlockVectors = hAllocator->m_pBlockVectors; for (uint32_t i = 0; i < m_BlockVectorCount; ++i) { - VmaBlockVector* vector = m_pBlockVectors[i]; + VmaBlockVector *vector = m_pBlockVectors[i]; if (vector != VMA_NULL) { vector->SetIncrementalSort(false); @@ -13031,7 +13286,7 @@ VmaDefragmentationContext_T::~VmaDefragmentationContext_T() { for (uint32_t i = 0; i < m_BlockVectorCount; ++i) { - VmaBlockVector* vector = m_pBlockVectors[i]; + VmaBlockVector *vector = m_pBlockVectors[i]; if (vector != VMA_NULL) vector->SetIncrementalSort(true); } @@ -13042,10 +13297,10 @@ VmaDefragmentationContext_T::~VmaDefragmentationContext_T() switch (m_Algorithm) { case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_BALANCED_BIT: - vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast(m_AlgorithmState), m_BlockVectorCount); + vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast(m_AlgorithmState), m_BlockVectorCount); break; case VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT: - vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast(m_AlgorithmState), m_BlockVectorCount); + vma_delete_array(m_MoveAllocator.m_pCallbacks, reinterpret_cast(m_AlgorithmState), m_BlockVectorCount); break; default: VMA_ASSERT(0); @@ -13053,7 +13308,7 @@ VmaDefragmentationContext_T::~VmaDefragmentationContext_T() } } -VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassMoveInfo& moveInfo) +VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPassMoveInfo &moveInfo) { if (m_PoolBlockVector != VMA_NULL) { @@ -13097,7 +13352,7 @@ VkResult VmaDefragmentationContext_T::DefragmentPassBegin(VmaDefragmentationPass return VK_SUCCESS; } -VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMoveInfo& moveInfo) +VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMoveInfo &moveInfo) { VMA_ASSERT(moveInfo.moveCount > 0 ? moveInfo.pMoves != VMA_NULL : true); @@ -13109,12 +13364,12 @@ VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMo VmaAllocator allocator = VMA_NULL; for (uint32_t i = 0; i < moveInfo.moveCount; ++i) { - VmaDefragmentationMove& move = moveInfo.pMoves[i]; + VmaDefragmentationMove &move = moveInfo.pMoves[i]; size_t prevCount = 0, currentCount = 0; VkDeviceSize freedBlockSize = 0; uint32_t vectorIndex; - VmaBlockVector* vector; + VmaBlockVector *vector; if (m_PoolBlockVector != VMA_NULL) { vectorIndex = 0; @@ -13135,9 +13390,9 @@ VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMo if (mapCount > 0) { allocator = vector->m_hAllocator; - VmaDeviceMemoryBlock* newMapBlock = move.srcAllocation->GetBlock(); + VmaDeviceMemoryBlock *newMapBlock = move.srcAllocation->GetBlock(); bool notPresent = true; - for (FragmentedBlock& block : mappedBlocks) + for (FragmentedBlock &block : mappedBlocks) { if (block.block == newMapBlock) { @@ -13147,7 +13402,7 @@ VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMo } } if (notPresent) - mappedBlocks.push_back({ mapCount, newMapBlock }); + mappedBlocks.push_back({mapCount, newMapBlock}); } // Scope for locks, Free have it's own lock @@ -13171,9 +13426,9 @@ VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMo --m_PassStats.allocationsMoved; vector->Free(move.dstTmpAllocation); - VmaDeviceMemoryBlock* newBlock = move.srcAllocation->GetBlock(); + VmaDeviceMemoryBlock *newBlock = move.srcAllocation->GetBlock(); bool notPresent = true; - for (const FragmentedBlock& block : immovableBlocks) + for (const FragmentedBlock &block : immovableBlocks) { if (block.block == newBlock) { @@ -13182,7 +13437,7 @@ VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMo } } if (notPresent) - immovableBlocks.push_back({ vectorIndex, newBlock }); + immovableBlocks.push_back({vectorIndex, newBlock}); break; } case VMA_DEFRAGMENTATION_MOVE_OPERATION_DESTROY: @@ -13228,11 +13483,11 @@ VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMo m_PassStats.bytesFreed += freedBlockSize; } - if(m_Algorithm == VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT && + if (m_Algorithm == VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT && m_AlgorithmState != VMA_NULL) { // Avoid unnecessary tries to allocate when new free block is available - StateExtensive& state = reinterpret_cast(m_AlgorithmState)[vectorIndex]; + StateExtensive &state = reinterpret_cast(m_AlgorithmState)[vectorIndex]; if (state.firstFreeBlock != SIZE_MAX) { const size_t diff = prevCount - currentCount; @@ -13256,25 +13511,25 @@ VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMo m_GlobalStats.bytesFreed += m_PassStats.bytesFreed; m_GlobalStats.bytesMoved += m_PassStats.bytesMoved; m_GlobalStats.deviceMemoryBlocksFreed += m_PassStats.deviceMemoryBlocksFreed; - m_PassStats = { 0 }; + m_PassStats = {0}; // Move blocks with immovable allocations according to algorithm if (immovableBlocks.size() > 0) { do { - if(m_Algorithm == VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT) + if (m_Algorithm == VMA_DEFRAGMENTATION_FLAG_ALGORITHM_EXTENSIVE_BIT) { if (m_AlgorithmState != VMA_NULL) { bool swapped = false; // Move to the start of free blocks range - for (const FragmentedBlock& block : immovableBlocks) + for (const FragmentedBlock &block : immovableBlocks) { - StateExtensive& state = reinterpret_cast(m_AlgorithmState)[block.data]; + StateExtensive &state = reinterpret_cast(m_AlgorithmState)[block.data]; if (state.operation != StateExtensive::Operation::Cleanup) { - VmaBlockVector* vector = m_pBlockVectors[block.data]; + VmaBlockVector *vector = m_pBlockVectors[block.data]; VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); for (size_t i = 0, count = vector->GetBlockCount() - m_ImmovableBlockCount; i < count; ++i) @@ -13305,9 +13560,9 @@ VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMo } // Move to the beginning - for (const FragmentedBlock& block : immovableBlocks) + for (const FragmentedBlock &block : immovableBlocks) { - VmaBlockVector* vector = m_pBlockVectors[block.data]; + VmaBlockVector *vector = m_pBlockVectors[block.data]; VmaMutexLockWrite lock(vector->GetMutex(), vector->GetAllocator()->m_UseMutex); for (size_t i = m_ImmovableBlockCount; i < vector->GetBlockCount(); ++i) @@ -13323,7 +13578,7 @@ VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMo } // Bulk-map destination blocks - for (const FragmentedBlock& block : mappedBlocks) + for (const FragmentedBlock &block : mappedBlocks) { VkResult res = block.block->Map(allocator, block.data, VMA_NULL); VMA_ASSERT(res == VK_SUCCESS); @@ -13331,7 +13586,7 @@ VkResult VmaDefragmentationContext_T::DefragmentPassEnd(VmaDefragmentationPassMo return result; } -bool VmaDefragmentationContext_T::ComputeDefragmentation(VmaBlockVector& vector, size_t index) +bool VmaDefragmentationContext_T::ComputeDefragmentation(VmaBlockVector &vector, size_t index) { switch (m_Algorithm) { @@ -13350,7 +13605,7 @@ bool VmaDefragmentationContext_T::ComputeDefragmentation(VmaBlockVector& vector, } VmaDefragmentationContext_T::MoveAllocationData VmaDefragmentationContext_T::GetMoveData( - VmaAllocHandle handle, VmaBlockMetadata* metadata) + VmaAllocHandle handle, VmaBlockMetadata *metadata) { MoveAllocationData moveData; moveData.move.srcAllocation = (VmaAllocation)metadata->GetAllocationUserData(handle); @@ -13389,19 +13644,20 @@ bool VmaDefragmentationContext_T::IncrementCounters(VkDeviceSize bytes) if (++m_PassStats.allocationsMoved >= m_MaxPassAllocations || m_PassStats.bytesMoved >= m_MaxPassBytes) { VMA_ASSERT((m_PassStats.allocationsMoved == m_MaxPassAllocations || - m_PassStats.bytesMoved == m_MaxPassBytes) && "Exceeded maximal pass threshold!"); + m_PassStats.bytesMoved == m_MaxPassBytes) && + "Exceeded maximal pass threshold!"); return true; } return false; } -bool VmaDefragmentationContext_T::ReallocWithinBlock(VmaBlockVector& vector, VmaDeviceMemoryBlock* block) +bool VmaDefragmentationContext_T::ReallocWithinBlock(VmaBlockVector &vector, VmaDeviceMemoryBlock *block) { - VmaBlockMetadata* metadata = block->m_pMetadata; + VmaBlockMetadata *metadata = block->m_pMetadata; for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); - handle != VK_NULL_HANDLE; - handle = metadata->GetNextAllocation(handle)) + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) { MoveAllocationData moveData = GetMoveData(handle, metadata); // Ignore newly created allocations by defragmentation algorithm @@ -13424,23 +13680,23 @@ bool VmaDefragmentationContext_T::ReallocWithinBlock(VmaBlockVector& vector, Vma { VmaAllocationRequest request = {}; if (metadata->CreateAllocationRequest( - moveData.size, - moveData.alignment, - false, - moveData.type, - VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, - &request)) + moveData.size, + moveData.alignment, + false, + moveData.type, + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + &request)) { if (metadata->GetAllocationOffset(request.allocHandle) < offset) { if (vector.CommitAllocationRequest( - request, - block, - moveData.alignment, - moveData.flags, - this, - moveData.type, - &moveData.move.dstTmpAllocation) == VK_SUCCESS) + request, + block, + moveData.alignment, + moveData.flags, + this, + moveData.type, + &moveData.move.dstTmpAllocation) == VK_SUCCESS) { m_Moves.push_back(moveData.move); if (IncrementCounters(moveData.size)) @@ -13453,21 +13709,21 @@ bool VmaDefragmentationContext_T::ReallocWithinBlock(VmaBlockVector& vector, Vma return false; } -bool VmaDefragmentationContext_T::AllocInOtherBlock(size_t start, size_t end, MoveAllocationData& data, VmaBlockVector& vector) +bool VmaDefragmentationContext_T::AllocInOtherBlock(size_t start, size_t end, MoveAllocationData &data, VmaBlockVector &vector) { for (; start < end; ++start) { - VmaDeviceMemoryBlock* dstBlock = vector.GetBlock(start); + VmaDeviceMemoryBlock *dstBlock = vector.GetBlock(start); if (dstBlock->m_pMetadata->GetSumFreeSize() >= data.size) { if (vector.AllocateFromBlock(dstBlock, - data.size, - data.alignment, - data.flags, - this, - data.type, - 0, - &data.move.dstTmpAllocation) == VK_SUCCESS) + data.size, + data.alignment, + data.flags, + this, + data.type, + 0, + &data.move.dstTmpAllocation) == VK_SUCCESS) { m_Moves.push_back(data.move); if (IncrementCounters(data.size)) @@ -13479,18 +13735,18 @@ bool VmaDefragmentationContext_T::AllocInOtherBlock(size_t start, size_t end, Mo return false; } -bool VmaDefragmentationContext_T::ComputeDefragmentation_Fast(VmaBlockVector& vector) +bool VmaDefragmentationContext_T::ComputeDefragmentation_Fast(VmaBlockVector &vector) { // Move only between blocks // Go through allocations in last blocks and try to fit them inside first ones for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i) { - VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata; + VmaBlockMetadata *metadata = vector.GetBlock(i)->m_pMetadata; for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); - handle != VK_NULL_HANDLE; - handle = metadata->GetNextAllocation(handle)) + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) { MoveAllocationData moveData = GetMoveData(handle, metadata); // Ignore newly created allocations by defragmentation algorithm @@ -13516,14 +13772,14 @@ bool VmaDefragmentationContext_T::ComputeDefragmentation_Fast(VmaBlockVector& ve return false; } -bool VmaDefragmentationContext_T::ComputeDefragmentation_Balanced(VmaBlockVector& vector, size_t index, bool update) +bool VmaDefragmentationContext_T::ComputeDefragmentation_Balanced(VmaBlockVector &vector, size_t index, bool update) { // Go over every allocation and try to fit it in previous blocks at lowest offsets, // if not possible: realloc within single block to minimize offset (exclude offset == 0), // but only if there are noticeable gaps between them (some heuristic, ex. average size of allocation in block) VMA_ASSERT(m_AlgorithmState != VMA_NULL); - StateBalanced& vectorState = reinterpret_cast(m_AlgorithmState)[index]; + StateBalanced &vectorState = reinterpret_cast(m_AlgorithmState)[index]; if (update && vectorState.avgAllocSize == UINT64_MAX) UpdateVectorStatistics(vector, vectorState); @@ -13531,13 +13787,13 @@ bool VmaDefragmentationContext_T::ComputeDefragmentation_Balanced(VmaBlockVector VkDeviceSize minimalFreeRegion = vectorState.avgFreeSize / 2; for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i) { - VmaDeviceMemoryBlock* block = vector.GetBlock(i); - VmaBlockMetadata* metadata = block->m_pMetadata; + VmaDeviceMemoryBlock *block = vector.GetBlock(i); + VmaBlockMetadata *metadata = block->m_pMetadata; VkDeviceSize prevFreeRegionSize = 0; for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); - handle != VK_NULL_HANDLE; - handle = metadata->GetNextAllocation(handle)) + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) { MoveAllocationData moveData = GetMoveData(handle, metadata); // Ignore newly created allocations by defragmentation algorithm @@ -13573,23 +13829,23 @@ bool VmaDefragmentationContext_T::ComputeDefragmentation_Balanced(VmaBlockVector { VmaAllocationRequest request = {}; if (metadata->CreateAllocationRequest( - moveData.size, - moveData.alignment, - false, - moveData.type, - VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, - &request)) + moveData.size, + moveData.alignment, + false, + moveData.type, + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + &request)) { if (metadata->GetAllocationOffset(request.allocHandle) < offset) { if (vector.CommitAllocationRequest( - request, - block, - moveData.alignment, - moveData.flags, - this, - moveData.type, - &moveData.move.dstTmpAllocation) == VK_SUCCESS) + request, + block, + moveData.alignment, + moveData.flags, + this, + moveData.type, + &moveData.move.dstTmpAllocation) == VK_SUCCESS) { m_Moves.push_back(moveData.move); if (IncrementCounters(moveData.size)) @@ -13612,19 +13868,19 @@ bool VmaDefragmentationContext_T::ComputeDefragmentation_Balanced(VmaBlockVector return false; } -bool VmaDefragmentationContext_T::ComputeDefragmentation_Full(VmaBlockVector& vector) +bool VmaDefragmentationContext_T::ComputeDefragmentation_Full(VmaBlockVector &vector) { // Go over every allocation and try to fit it in previous blocks at lowest offsets, // if not possible: realloc within single block to minimize offset (exclude offset == 0) for (size_t i = vector.GetBlockCount() - 1; i > m_ImmovableBlockCount; --i) { - VmaDeviceMemoryBlock* block = vector.GetBlock(i); - VmaBlockMetadata* metadata = block->m_pMetadata; + VmaDeviceMemoryBlock *block = vector.GetBlock(i); + VmaBlockMetadata *metadata = block->m_pMetadata; for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); - handle != VK_NULL_HANDLE; - handle = metadata->GetNextAllocation(handle)) + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) { MoveAllocationData moveData = GetMoveData(handle, metadata); // Ignore newly created allocations by defragmentation algorithm @@ -13653,23 +13909,23 @@ bool VmaDefragmentationContext_T::ComputeDefragmentation_Full(VmaBlockVector& ve { VmaAllocationRequest request = {}; if (metadata->CreateAllocationRequest( - moveData.size, - moveData.alignment, - false, - moveData.type, - VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, - &request)) + moveData.size, + moveData.alignment, + false, + moveData.type, + VMA_ALLOCATION_CREATE_STRATEGY_MIN_OFFSET_BIT, + &request)) { if (metadata->GetAllocationOffset(request.allocHandle) < offset) { if (vector.CommitAllocationRequest( - request, - block, - moveData.alignment, - moveData.flags, - this, - moveData.type, - &moveData.move.dstTmpAllocation) == VK_SUCCESS) + request, + block, + moveData.alignment, + moveData.flags, + this, + moveData.type, + &moveData.move.dstTmpAllocation) == VK_SUCCESS) { m_Moves.push_back(moveData.move); if (IncrementCounters(moveData.size)) @@ -13683,7 +13939,7 @@ bool VmaDefragmentationContext_T::ComputeDefragmentation_Full(VmaBlockVector& ve return false; } -bool VmaDefragmentationContext_T::ComputeDefragmentation_Extensive(VmaBlockVector& vector, size_t index) +bool VmaDefragmentationContext_T::ComputeDefragmentation_Extensive(VmaBlockVector &vector, size_t index) { // First free single block, then populate it to the brim, then free another block, and so on @@ -13693,7 +13949,7 @@ bool VmaDefragmentationContext_T::ComputeDefragmentation_Extensive(VmaBlockVecto VMA_ASSERT(m_AlgorithmState != VMA_NULL); - StateExtensive& vectorState = reinterpret_cast(m_AlgorithmState)[index]; + StateExtensive &vectorState = reinterpret_cast(m_AlgorithmState)[index]; bool texturePresent = false, bufferPresent = false, otherPresent = false; switch (vectorState.operation) @@ -13713,12 +13969,12 @@ bool VmaDefragmentationContext_T::ComputeDefragmentation_Extensive(VmaBlockVecto // No free blocks, have to clear last one size_t last = (vectorState.firstFreeBlock == SIZE_MAX ? vector.GetBlockCount() : vectorState.firstFreeBlock) - 1; - VmaBlockMetadata* freeMetadata = vector.GetBlock(last)->m_pMetadata; + VmaBlockMetadata *freeMetadata = vector.GetBlock(last)->m_pMetadata; const size_t prevMoveCount = m_Moves.size(); for (VmaAllocHandle handle = freeMetadata->GetAllocationListBegin(); - handle != VK_NULL_HANDLE; - handle = freeMetadata->GetNextAllocation(handle)) + handle != VK_NULL_HANDLE; + handle = freeMetadata->GetNextAllocation(handle)) { MoveAllocationData moveData = GetMoveData(handle, freeMetadata); switch (CheckCounters(moveData.move.srcAllocation->GetSize())) @@ -13787,7 +14043,7 @@ bool VmaDefragmentationContext_T::ComputeDefragmentation_Extensive(VmaBlockVecto case StateExtensive::Operation::MoveTextures: { if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL, vector, - vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent)) + vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent)) { if (texturePresent) { @@ -13813,7 +14069,7 @@ bool VmaDefragmentationContext_T::ComputeDefragmentation_Extensive(VmaBlockVecto case StateExtensive::Operation::MoveBuffers: { if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_BUFFER, vector, - vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent)) + vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent)) { if (bufferPresent) { @@ -13838,7 +14094,7 @@ bool VmaDefragmentationContext_T::ComputeDefragmentation_Extensive(VmaBlockVecto case StateExtensive::Operation::MoveAll: { if (MoveDataToFreeBlocks(VMA_SUBALLOCATION_TYPE_FREE, vector, - vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent)) + vectorState.firstFreeBlock, texturePresent, bufferPresent, otherPresent)) { if (otherPresent) { @@ -13871,7 +14127,7 @@ bool VmaDefragmentationContext_T::ComputeDefragmentation_Extensive(VmaBlockVecto return false; } -void VmaDefragmentationContext_T::UpdateVectorStatistics(VmaBlockVector& vector, StateBalanced& state) +void VmaDefragmentationContext_T::UpdateVectorStatistics(VmaBlockVector &vector, StateBalanced &state) { size_t allocCount = 0; size_t freeCount = 0; @@ -13880,7 +14136,7 @@ void VmaDefragmentationContext_T::UpdateVectorStatistics(VmaBlockVector& vector, for (size_t i = 0; i < vector.GetBlockCount(); ++i) { - VmaBlockMetadata* metadata = vector.GetBlock(i)->m_pMetadata; + VmaBlockMetadata *metadata = vector.GetBlock(i)->m_pMetadata; allocCount += metadata->GetAllocationCount(); freeCount += metadata->GetFreeRegionsCount(); @@ -13893,18 +14149,18 @@ void VmaDefragmentationContext_T::UpdateVectorStatistics(VmaBlockVector& vector, } bool VmaDefragmentationContext_T::MoveDataToFreeBlocks(VmaSuballocationType currentType, - VmaBlockVector& vector, size_t firstFreeBlock, - bool& texturePresent, bool& bufferPresent, bool& otherPresent) + VmaBlockVector &vector, size_t firstFreeBlock, + bool &texturePresent, bool &bufferPresent, bool &otherPresent) { const size_t prevMoveCount = m_Moves.size(); - for (size_t i = firstFreeBlock ; i;) + for (size_t i = firstFreeBlock; i;) { - VmaDeviceMemoryBlock* block = vector.GetBlock(--i); - VmaBlockMetadata* metadata = block->m_pMetadata; + VmaDeviceMemoryBlock *block = vector.GetBlock(--i); + VmaBlockMetadata *metadata = block->m_pMetadata; for (VmaAllocHandle handle = metadata->GetAllocationListBegin(); - handle != VK_NULL_HANDLE; - handle = metadata->GetNextAllocation(handle)) + handle != VK_NULL_HANDLE; + handle = metadata->GetNextAllocation(handle)) { MoveAllocationData moveData = GetMoveData(handle, metadata); // Ignore newly created allocations by defragmentation algorithm @@ -13945,32 +14201,34 @@ bool VmaDefragmentationContext_T::MoveDataToFreeBlocks(VmaSuballocationType curr #ifndef _VMA_POOL_T_FUNCTIONS VmaPool_T::VmaPool_T( VmaAllocator hAllocator, - const VmaPoolCreateInfo& createInfo, + const VmaPoolCreateInfo &createInfo, VkDeviceSize preferredBlockSize) : m_BlockVector( - hAllocator, - this, // hParentPool - createInfo.memoryTypeIndex, - createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize, - createInfo.minBlockCount, - createInfo.maxBlockCount, - (createInfo.flags& VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(), - createInfo.blockSize != 0, // explicitBlockSize - createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK, // algorithm - createInfo.priority, - VMA_MAX(hAllocator->GetMemoryTypeMinAlignment(createInfo.memoryTypeIndex), createInfo.minAllocationAlignment), - createInfo.pMemoryAllocateNext), - m_Id(0), - m_Name(VMA_NULL) {} + hAllocator, + this, // hParentPool + createInfo.memoryTypeIndex, + createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize, + createInfo.minBlockCount, + createInfo.maxBlockCount, + (createInfo.flags & VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(), + createInfo.blockSize != 0, // explicitBlockSize + createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK, // algorithm + createInfo.priority, + VMA_MAX(hAllocator->GetMemoryTypeMinAlignment(createInfo.memoryTypeIndex), createInfo.minAllocationAlignment), + createInfo.pMemoryAllocateNext), + m_Id(0), + m_Name(VMA_NULL) +{ +} VmaPool_T::~VmaPool_T() { VMA_ASSERT(m_PrevPool == VMA_NULL && m_NextPool == VMA_NULL); } -void VmaPool_T::SetName(const char* pName) +void VmaPool_T::SetName(const char *pName) { - const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks(); + const VkAllocationCallbacks *allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks(); VmaFreeString(allocs, m_Name); if (pName != VMA_NULL) @@ -13985,36 +14243,34 @@ void VmaPool_T::SetName(const char* pName) #endif // _VMA_POOL_T_FUNCTIONS #ifndef _VMA_ALLOCATOR_T_FUNCTIONS -VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) : - m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0), - m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0), - m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0), - m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0), - m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0), - m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0), - m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0), - m_UseExtMemoryPriority((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT) != 0), - m_hDevice(pCreateInfo->device), - m_hInstance(pCreateInfo->instance), - m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL), - m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ? - *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks), - m_AllocationObjectAllocator(&m_AllocationCallbacks), - m_HeapSizeLimitMask(0), - m_DeviceMemoryCount(0), - m_PreferredLargeHeapBlockSize(0), - m_PhysicalDevice(pCreateInfo->physicalDevice), - m_GpuDefragmentationMemoryTypeBits(UINT32_MAX), - m_NextPoolId(0), - m_GlobalMemoryTypeBits(UINT32_MAX) -{ - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) +VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo *pCreateInfo) : m_UseMutex((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXTERNALLY_SYNCHRONIZED_BIT) == 0), + m_VulkanApiVersion(pCreateInfo->vulkanApiVersion != 0 ? pCreateInfo->vulkanApiVersion : VK_API_VERSION_1_0), + m_UseKhrDedicatedAllocation((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0), + m_UseKhrBindMemory2((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0), + m_UseExtMemoryBudget((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0), + m_UseAmdDeviceCoherentMemory((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_AMD_DEVICE_COHERENT_MEMORY_BIT) != 0), + m_UseKhrBufferDeviceAddress((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT) != 0), + m_UseExtMemoryPriority((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT) != 0), + m_hDevice(pCreateInfo->device), + m_hInstance(pCreateInfo->instance), + m_AllocationCallbacksSpecified(pCreateInfo->pAllocationCallbacks != VMA_NULL), + m_AllocationCallbacks(pCreateInfo->pAllocationCallbacks ? *pCreateInfo->pAllocationCallbacks : VmaEmptyAllocationCallbacks), + m_AllocationObjectAllocator(&m_AllocationCallbacks), + m_HeapSizeLimitMask(0), + m_DeviceMemoryCount(0), + m_PreferredLargeHeapBlockSize(0), + m_PhysicalDevice(pCreateInfo->physicalDevice), + m_GpuDefragmentationMemoryTypeBits(UINT32_MAX), + m_NextPoolId(0), + m_GlobalMemoryTypeBits(UINT32_MAX) +{ + if (m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) { m_UseKhrDedicatedAllocation = false; m_UseKhrBindMemory2 = false; } - if(VMA_DEBUG_DETECT_CORRUPTION) + if (VMA_DEBUG_DETECT_CORRUPTION) { // Needs to be multiply of uint32_t size because we are going to write VMA_CORRUPTION_DETECTION_MAGIC_VALUE to it. VMA_ASSERT(VMA_DEBUG_MARGIN % sizeof(uint32_t) == 0); @@ -14022,59 +14278,59 @@ VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) : VMA_ASSERT(pCreateInfo->physicalDevice && pCreateInfo->device && pCreateInfo->instance); - if(m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0)) + if (m_VulkanApiVersion < VK_MAKE_VERSION(1, 1, 0)) { #if !(VMA_DEDICATED_ALLOCATION) - if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0) + if ((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT) != 0) { VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT set but required extensions are disabled by preprocessor macros."); } #endif #if !(VMA_BIND_MEMORY2) - if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0) + if ((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT) != 0) { VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_KHR_BIND_MEMORY2_BIT set but required extension is disabled by preprocessor macros."); } #endif } #if !(VMA_MEMORY_BUDGET) - if((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0) + if ((pCreateInfo->flags & VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT) != 0) { VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_BUDGET_BIT set but required extension is disabled by preprocessor macros."); } #endif #if !(VMA_BUFFER_DEVICE_ADDRESS) - if(m_UseKhrBufferDeviceAddress) + if (m_UseKhrBufferDeviceAddress) { VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT is set but required extension or Vulkan 1.2 is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); } #endif #if VMA_VULKAN_VERSION < 1003000 - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) + if (m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) { VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_3 but required Vulkan version is disabled by preprocessor macros."); } #endif #if VMA_VULKAN_VERSION < 1002000 - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0)) + if (m_VulkanApiVersion >= VK_MAKE_VERSION(1, 2, 0)) { VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_2 but required Vulkan version is disabled by preprocessor macros."); } #endif #if VMA_VULKAN_VERSION < 1001000 - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + if (m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) { VMA_ASSERT(0 && "vulkanApiVersion >= VK_API_VERSION_1_1 but required Vulkan version is disabled by preprocessor macros."); } #endif #if !(VMA_MEMORY_PRIORITY) - if(m_UseExtMemoryPriority) + if (m_UseExtMemoryPriority) { VMA_ASSERT(0 && "VMA_ALLOCATOR_CREATE_EXT_MEMORY_PRIORITY_BIT is set but required extension is not available in your Vulkan header or its support in VMA has been disabled by a preprocessor macro."); } #endif - memset(&m_DeviceMemoryCallbacks, 0 ,sizeof(m_DeviceMemoryCallbacks)); + memset(&m_DeviceMemoryCallbacks, 0, sizeof(m_DeviceMemoryCallbacks)); memset(&m_PhysicalDeviceProperties, 0, sizeof(m_PhysicalDeviceProperties)); memset(&m_MemProps, 0, sizeof(m_MemProps)); @@ -14085,7 +14341,7 @@ VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) : memset(&m_TypeExternalMemoryHandleTypes, 0, sizeof(m_TypeExternalMemoryHandleTypes)); #endif // #if VMA_EXTERNAL_MEMORY - if(pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL) + if (pCreateInfo->pDeviceMemoryCallbacks != VMA_NULL) { m_DeviceMemoryCallbacks.pUserData = pCreateInfo->pDeviceMemoryCallbacks->pUserData; m_DeviceMemoryCallbacks.pfnAllocate = pCreateInfo->pDeviceMemoryCallbacks->pfnAllocate; @@ -14102,28 +14358,27 @@ VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) : VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.bufferImageGranularity)); VMA_ASSERT(VmaIsPow2(m_PhysicalDeviceProperties.limits.nonCoherentAtomSize)); - m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ? - pCreateInfo->preferredLargeHeapBlockSize : static_cast(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE); + m_PreferredLargeHeapBlockSize = (pCreateInfo->preferredLargeHeapBlockSize != 0) ? pCreateInfo->preferredLargeHeapBlockSize : static_cast(VMA_DEFAULT_LARGE_HEAP_BLOCK_SIZE); m_GlobalMemoryTypeBits = CalculateGlobalMemoryTypeBits(); #if VMA_EXTERNAL_MEMORY - if(pCreateInfo->pTypeExternalMemoryHandleTypes != VMA_NULL) + if (pCreateInfo->pTypeExternalMemoryHandleTypes != VMA_NULL) { memcpy(m_TypeExternalMemoryHandleTypes, pCreateInfo->pTypeExternalMemoryHandleTypes, - sizeof(VkExternalMemoryHandleTypeFlagsKHR) * GetMemoryTypeCount()); + sizeof(VkExternalMemoryHandleTypeFlagsKHR) * GetMemoryTypeCount()); } #endif // #if VMA_EXTERNAL_MEMORY - if(pCreateInfo->pHeapSizeLimit != VMA_NULL) + if (pCreateInfo->pHeapSizeLimit != VMA_NULL) { - for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex) + for (uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex) { const VkDeviceSize limit = pCreateInfo->pHeapSizeLimit[heapIndex]; - if(limit != VK_WHOLE_SIZE) + if (limit != VK_WHOLE_SIZE) { m_HeapSizeLimitMask |= 1u << heapIndex; - if(limit < m_MemProps.memoryHeaps[heapIndex].size) + if (limit < m_MemProps.memoryHeaps[heapIndex].size) { m_MemProps.memoryHeaps[heapIndex].size = limit; } @@ -14131,10 +14386,10 @@ VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) : } } - for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) { // Create only supported types - if((m_GlobalMemoryTypeBits & (1u << memTypeIndex)) != 0) + if ((m_GlobalMemoryTypeBits & (1u << memTypeIndex)) != 0) { const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(memTypeIndex); m_pBlockVectors[memTypeIndex] = vma_new(this, VmaBlockVector)( @@ -14145,23 +14400,23 @@ VmaAllocator_T::VmaAllocator_T(const VmaAllocatorCreateInfo* pCreateInfo) : 0, SIZE_MAX, GetBufferImageGranularity(), - false, // explicitBlockSize - 0, // algorithm - 0.5f, // priority (0.5 is the default per Vulkan spec) + false, // explicitBlockSize + 0, // algorithm + 0.5f, // priority (0.5 is the default per Vulkan spec) GetMemoryTypeMinAlignment(memTypeIndex), // minAllocationAlignment - VMA_NULL); // // pMemoryAllocateNext + VMA_NULL); // // pMemoryAllocateNext // No need to call m_pBlockVectors[memTypeIndex][blockVectorTypeIndex]->CreateMinBlocks here, // becase minBlockCount is 0. } } } -VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo* pCreateInfo) +VkResult VmaAllocator_T::Init(const VmaAllocatorCreateInfo *pCreateInfo) { VkResult res = VK_SUCCESS; #if VMA_MEMORY_BUDGET - if(m_UseExtMemoryBudget) + if (m_UseExtMemoryBudget) { UpdateVulkanBudget(); } @@ -14174,19 +14429,19 @@ VmaAllocator_T::~VmaAllocator_T() { VMA_ASSERT(m_Pools.IsEmpty()); - for(size_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--; ) + for (size_t memTypeIndex = GetMemoryTypeCount(); memTypeIndex--;) { vma_delete(this, m_pBlockVectors[memTypeIndex]); } } -void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions* pVulkanFunctions) +void VmaAllocator_T::ImportVulkanFunctions(const VmaVulkanFunctions *pVulkanFunctions) { #if VMA_STATIC_VULKAN_FUNCTIONS == 1 ImportVulkanFunctions_Static(); #endif - if(pVulkanFunctions != VMA_NULL) + if (pVulkanFunctions != VMA_NULL) { ImportVulkanFunctions_Custom(pVulkanFunctions); } @@ -14225,7 +14480,7 @@ void VmaAllocator_T::ImportVulkanFunctions_Static() // Vulkan 1.1 #if VMA_VULKAN_VERSION >= 1001000 - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + if (m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) { m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR = (PFN_vkGetBufferMemoryRequirements2)vkGetBufferMemoryRequirements2; m_VulkanFunctions.vkGetImageMemoryRequirements2KHR = (PFN_vkGetImageMemoryRequirements2)vkGetImageMemoryRequirements2; @@ -14235,14 +14490,14 @@ void VmaAllocator_T::ImportVulkanFunctions_Static() #endif #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + if (m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) { m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR = (PFN_vkGetPhysicalDeviceMemoryProperties2)vkGetPhysicalDeviceMemoryProperties2; } #endif #if VMA_VULKAN_VERSION >= 1003000 - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) + if (m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) { m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements = (PFN_vkGetDeviceBufferMemoryRequirements)vkGetDeviceBufferMemoryRequirements; m_VulkanFunctions.vkGetDeviceImageMemoryRequirements = (PFN_vkGetDeviceImageMemoryRequirements)vkGetDeviceImageMemoryRequirements; @@ -14252,12 +14507,13 @@ void VmaAllocator_T::ImportVulkanFunctions_Static() #endif // VMA_STATIC_VULKAN_FUNCTIONS == 1 -void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVulkanFunctions) +void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions *pVulkanFunctions) { VMA_ASSERT(pVulkanFunctions != VMA_NULL); -#define VMA_COPY_IF_NOT_NULL(funcName) \ - if(pVulkanFunctions->funcName != VMA_NULL) m_VulkanFunctions.funcName = pVulkanFunctions->funcName; +#define VMA_COPY_IF_NOT_NULL(funcName) \ + if (pVulkanFunctions->funcName != VMA_NULL) \ + m_VulkanFunctions.funcName = pVulkanFunctions->funcName; VMA_COPY_IF_NOT_NULL(vkGetInstanceProcAddr); VMA_COPY_IF_NOT_NULL(vkGetDeviceProcAddr); @@ -14306,17 +14562,17 @@ void VmaAllocator_T::ImportVulkanFunctions_Custom(const VmaVulkanFunctions* pVul void VmaAllocator_T::ImportVulkanFunctions_Dynamic() { VMA_ASSERT(m_VulkanFunctions.vkGetInstanceProcAddr && m_VulkanFunctions.vkGetDeviceProcAddr && - "To use VMA_DYNAMIC_VULKAN_FUNCTIONS in new versions of VMA you now have to pass " - "VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as VmaAllocatorCreateInfo::pVulkanFunctions. " - "Other members can be null."); + "To use VMA_DYNAMIC_VULKAN_FUNCTIONS in new versions of VMA you now have to pass " + "VmaVulkanFunctions::vkGetInstanceProcAddr and vkGetDeviceProcAddr as VmaAllocatorCreateInfo::pVulkanFunctions. " + "Other members can be null."); #define VMA_FETCH_INSTANCE_FUNC(memberName, functionPointerType, functionNameString) \ - if(m_VulkanFunctions.memberName == VMA_NULL) \ - m_VulkanFunctions.memberName = \ + if (m_VulkanFunctions.memberName == VMA_NULL) \ + m_VulkanFunctions.memberName = \ (functionPointerType)m_VulkanFunctions.vkGetInstanceProcAddr(m_hInstance, functionNameString); #define VMA_FETCH_DEVICE_FUNC(memberName, functionPointerType, functionNameString) \ - if(m_VulkanFunctions.memberName == VMA_NULL) \ - m_VulkanFunctions.memberName = \ + if (m_VulkanFunctions.memberName == VMA_NULL) \ + m_VulkanFunctions.memberName = \ (functionPointerType)m_VulkanFunctions.vkGetDeviceProcAddr(m_hDevice, functionNameString); VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceProperties, PFN_vkGetPhysicalDeviceProperties, "vkGetPhysicalDeviceProperties"); @@ -14338,7 +14594,7 @@ void VmaAllocator_T::ImportVulkanFunctions_Dynamic() VMA_FETCH_DEVICE_FUNC(vkCmdCopyBuffer, PFN_vkCmdCopyBuffer, "vkCmdCopyBuffer"); #if VMA_VULKAN_VERSION >= 1001000 - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + if (m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) { VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2, "vkGetBufferMemoryRequirements2"); VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2, "vkGetImageMemoryRequirements2"); @@ -14348,18 +14604,18 @@ void VmaAllocator_T::ImportVulkanFunctions_Dynamic() #endif #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + if (m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) { VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2, "vkGetPhysicalDeviceMemoryProperties2"); } - else if(m_UseExtMemoryBudget) + else if (m_UseExtMemoryBudget) { VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2, "vkGetPhysicalDeviceMemoryProperties2KHR"); } #endif #if VMA_DEDICATED_ALLOCATION - if(m_UseKhrDedicatedAllocation) + if (m_UseKhrDedicatedAllocation) { VMA_FETCH_DEVICE_FUNC(vkGetBufferMemoryRequirements2KHR, PFN_vkGetBufferMemoryRequirements2KHR, "vkGetBufferMemoryRequirements2KHR"); VMA_FETCH_DEVICE_FUNC(vkGetImageMemoryRequirements2KHR, PFN_vkGetImageMemoryRequirements2KHR, "vkGetImageMemoryRequirements2KHR"); @@ -14367,7 +14623,7 @@ void VmaAllocator_T::ImportVulkanFunctions_Dynamic() #endif #if VMA_BIND_MEMORY2 - if(m_UseKhrBindMemory2) + if (m_UseKhrBindMemory2) { VMA_FETCH_DEVICE_FUNC(vkBindBufferMemory2KHR, PFN_vkBindBufferMemory2KHR, "vkBindBufferMemory2KHR"); VMA_FETCH_DEVICE_FUNC(vkBindImageMemory2KHR, PFN_vkBindImageMemory2KHR, "vkBindImageMemory2KHR"); @@ -14375,18 +14631,18 @@ void VmaAllocator_T::ImportVulkanFunctions_Dynamic() #endif // #if VMA_BIND_MEMORY2 #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + if (m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) { VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2"); } - else if(m_UseExtMemoryBudget) + else if (m_UseExtMemoryBudget) { VMA_FETCH_INSTANCE_FUNC(vkGetPhysicalDeviceMemoryProperties2KHR, PFN_vkGetPhysicalDeviceMemoryProperties2KHR, "vkGetPhysicalDeviceMemoryProperties2KHR"); } #endif // #if VMA_MEMORY_BUDGET #if VMA_VULKAN_VERSION >= 1003000 - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) + if (m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) { VMA_FETCH_DEVICE_FUNC(vkGetDeviceBufferMemoryRequirements, PFN_vkGetDeviceBufferMemoryRequirements, "vkGetDeviceBufferMemoryRequirements"); VMA_FETCH_DEVICE_FUNC(vkGetDeviceImageMemoryRequirements, PFN_vkGetDeviceImageMemoryRequirements, "vkGetDeviceImageMemoryRequirements"); @@ -14420,7 +14676,7 @@ void VmaAllocator_T::ValidateVulkanFunctions() VMA_ASSERT(m_VulkanFunctions.vkCmdCopyBuffer != VMA_NULL); #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation) + if (m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrDedicatedAllocation) { VMA_ASSERT(m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR != VMA_NULL); VMA_ASSERT(m_VulkanFunctions.vkGetImageMemoryRequirements2KHR != VMA_NULL); @@ -14428,7 +14684,7 @@ void VmaAllocator_T::ValidateVulkanFunctions() #endif #if VMA_BIND_MEMORY2 || VMA_VULKAN_VERSION >= 1001000 - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2) + if (m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0) || m_UseKhrBindMemory2) { VMA_ASSERT(m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL); VMA_ASSERT(m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL); @@ -14436,14 +14692,14 @@ void VmaAllocator_T::ValidateVulkanFunctions() #endif #if VMA_MEMORY_BUDGET || VMA_VULKAN_VERSION >= 1001000 - if(m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + if (m_UseExtMemoryBudget || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) { VMA_ASSERT(m_VulkanFunctions.vkGetPhysicalDeviceMemoryProperties2KHR != VMA_NULL); } #endif #if VMA_VULKAN_VERSION >= 1003000 - if(m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) + if (m_VulkanApiVersion >= VK_MAKE_VERSION(1, 3, 0)) { VMA_ASSERT(m_VulkanFunctions.vkGetDeviceBufferMemoryRequirements != VMA_NULL); VMA_ASSERT(m_VulkanFunctions.vkGetDeviceImageMemoryRequirements != VMA_NULL); @@ -14467,13 +14723,13 @@ VkResult VmaAllocator_T::AllocateMemoryOfType( VkBuffer dedicatedBuffer, VkImage dedicatedImage, VkFlags dedicatedBufferImageUsage, - const VmaAllocationCreateInfo& createInfo, + const VmaAllocationCreateInfo &createInfo, uint32_t memTypeIndex, VmaSuballocationType suballocType, - VmaDedicatedAllocationList& dedicatedAllocations, - VmaBlockVector& blockVector, + VmaDedicatedAllocationList &dedicatedAllocations, + VmaBlockVector &blockVector, size_t allocationCount, - VmaAllocation* pAllocations) + VmaAllocation *pAllocations) { VMA_ASSERT(pAllocations != VMA_NULL); VMA_DEBUG_LOG_FORMAT(" AllocateMemory: MemoryTypeIndex=%u, AllocationCount=%zu, Size=%llu", memTypeIndex, allocationCount, size); @@ -14484,10 +14740,10 @@ VkResult VmaAllocator_T::AllocateMemoryOfType( memTypeIndex, size, allocationCount); - if(res != VK_SUCCESS) + if (res != VK_SUCCESS) return res; - if((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0) + if ((finalCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0) { return AllocateDedicatedMemory( pool, @@ -14498,7 +14754,7 @@ VkResult VmaAllocator_T::AllocateMemoryOfType( (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, (finalCreateInfo.flags & - (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0, + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0, (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0, finalCreateInfo.pUserData, finalCreateInfo.priority, @@ -14515,23 +14771,23 @@ VkResult VmaAllocator_T::AllocateMemoryOfType( (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) == 0 && (pool == VK_NULL_HANDLE || !blockVector.HasExplicitBlockSize()); - if(canAllocateDedicated) + if (canAllocateDedicated) { // Heuristics: Allocate dedicated memory if requested size if greater than half of preferred block size. - if(size > blockVector.GetPreferredBlockSize() / 2) + if (size > blockVector.GetPreferredBlockSize() / 2) { dedicatedPreferred = true; } // Protection against creating each allocation as dedicated when we reach or exceed heap size/budget, // which can quickly deplete maxMemoryAllocationCount: Don't prefer dedicated allocations when above // 3/4 of the maximum allocation count. - if(m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount < UINT32_MAX / 4 && + if (m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount < UINT32_MAX / 4 && m_DeviceMemoryCount.load() > m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount * 3 / 4) { dedicatedPreferred = false; } - if(dedicatedPreferred) + if (dedicatedPreferred) { res = AllocateDedicatedMemory( pool, @@ -14542,7 +14798,7 @@ VkResult VmaAllocator_T::AllocateMemoryOfType( (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, (finalCreateInfo.flags & - (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0, + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0, (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0, finalCreateInfo.pUserData, finalCreateInfo.priority, @@ -14552,7 +14808,7 @@ VkResult VmaAllocator_T::AllocateMemoryOfType( allocationCount, pAllocations, blockVector.GetAllocationNextPtr()); - if(res == VK_SUCCESS) + if (res == VK_SUCCESS) { // Succeeded: AllocateDedicatedMemory function already filled pMemory, nothing more to do here. VMA_DEBUG_LOG(" Allocated as DedicatedMemory"); @@ -14568,11 +14824,11 @@ VkResult VmaAllocator_T::AllocateMemoryOfType( suballocType, allocationCount, pAllocations); - if(res == VK_SUCCESS) + if (res == VK_SUCCESS) return VK_SUCCESS; // Try dedicated memory. - if(canAllocateDedicated && !dedicatedPreferred) + if (canAllocateDedicated && !dedicatedPreferred) { res = AllocateDedicatedMemory( pool, @@ -14583,7 +14839,7 @@ VkResult VmaAllocator_T::AllocateMemoryOfType( (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0, (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_USER_DATA_COPY_STRING_BIT) != 0, (finalCreateInfo.flags & - (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0, + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0, (finalCreateInfo.flags & VMA_ALLOCATION_CREATE_CAN_ALIAS_BIT) != 0, finalCreateInfo.pUserData, finalCreateInfo.priority, @@ -14593,7 +14849,7 @@ VkResult VmaAllocator_T::AllocateMemoryOfType( allocationCount, pAllocations, blockVector.GetAllocationNextPtr()); - if(res == VK_SUCCESS) + if (res == VK_SUCCESS) { // Succeeded: AllocateDedicatedMemory function already filled pMemory, nothing more to do here. VMA_DEBUG_LOG(" Allocated as DedicatedMemory"); @@ -14610,41 +14866,41 @@ VkResult VmaAllocator_T::AllocateDedicatedMemory( VmaPool pool, VkDeviceSize size, VmaSuballocationType suballocType, - VmaDedicatedAllocationList& dedicatedAllocations, + VmaDedicatedAllocationList &dedicatedAllocations, uint32_t memTypeIndex, bool map, bool isUserDataString, bool isMappingAllowed, bool canAliasMemory, - void* pUserData, + void *pUserData, float priority, VkBuffer dedicatedBuffer, VkImage dedicatedImage, VkFlags dedicatedBufferImageUsage, size_t allocationCount, - VmaAllocation* pAllocations, - const void* pNextChain) + VmaAllocation *pAllocations, + const void *pNextChain) { VMA_ASSERT(allocationCount > 0 && pAllocations); - VkMemoryAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO }; + VkMemoryAllocateInfo allocInfo = {VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO}; allocInfo.memoryTypeIndex = memTypeIndex; allocInfo.allocationSize = size; allocInfo.pNext = pNextChain; #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 - VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR }; - if(!canAliasMemory) + VkMemoryDedicatedAllocateInfoKHR dedicatedAllocInfo = {VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR}; + if (!canAliasMemory) { - if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + if (m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) { - if(dedicatedBuffer != VK_NULL_HANDLE) + if (dedicatedBuffer != VK_NULL_HANDLE) { VMA_ASSERT(dedicatedImage == VK_NULL_HANDLE); dedicatedAllocInfo.buffer = dedicatedBuffer; VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo); } - else if(dedicatedImage != VK_NULL_HANDLE) + else if (dedicatedImage != VK_NULL_HANDLE) { dedicatedAllocInfo.image = dedicatedImage; VmaPnextChainPushFront(&allocInfo, &dedicatedAllocInfo); @@ -14654,20 +14910,20 @@ VkResult VmaAllocator_T::AllocateDedicatedMemory( #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 #if VMA_BUFFER_DEVICE_ADDRESS - VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR }; - if(m_UseKhrBufferDeviceAddress) + VkMemoryAllocateFlagsInfoKHR allocFlagsInfo = {VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR}; + if (m_UseKhrBufferDeviceAddress) { bool canContainBufferWithDeviceAddress = true; - if(dedicatedBuffer != VK_NULL_HANDLE) + if (dedicatedBuffer != VK_NULL_HANDLE) { canContainBufferWithDeviceAddress = dedicatedBufferImageUsage == UINT32_MAX || // Usage flags unknown - (dedicatedBufferImageUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0; + (dedicatedBufferImageUsage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_EXT) != 0; } - else if(dedicatedImage != VK_NULL_HANDLE) + else if (dedicatedImage != VK_NULL_HANDLE) { canContainBufferWithDeviceAddress = false; } - if(canContainBufferWithDeviceAddress) + if (canContainBufferWithDeviceAddress) { allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT_KHR; VmaPnextChainPushFront(&allocInfo, &allocFlagsInfo); @@ -14676,8 +14932,8 @@ VkResult VmaAllocator_T::AllocateDedicatedMemory( #endif // #if VMA_BUFFER_DEVICE_ADDRESS #if VMA_MEMORY_PRIORITY - VkMemoryPriorityAllocateInfoEXT priorityInfo = { VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT }; - if(m_UseExtMemoryPriority) + VkMemoryPriorityAllocateInfoEXT priorityInfo = {VK_STRUCTURE_TYPE_MEMORY_PRIORITY_ALLOCATE_INFO_EXT}; + if (m_UseExtMemoryPriority) { VMA_ASSERT(priority >= 0.f && priority <= 1.f); priorityInfo.priority = priority; @@ -14687,9 +14943,9 @@ VkResult VmaAllocator_T::AllocateDedicatedMemory( #if VMA_EXTERNAL_MEMORY // Attach VkExportMemoryAllocateInfoKHR if necessary. - VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = { VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR }; + VkExportMemoryAllocateInfoKHR exportMemoryAllocInfo = {VK_STRUCTURE_TYPE_EXPORT_MEMORY_ALLOCATE_INFO_KHR}; exportMemoryAllocInfo.handleTypes = GetExternalMemoryHandleTypeFlags(memTypeIndex); - if(exportMemoryAllocInfo.handleTypes != 0) + if (exportMemoryAllocInfo.handleTypes != 0) { VmaPnextChainPushFront(&allocInfo, &exportMemoryAllocInfo); } @@ -14697,7 +14953,7 @@ VkResult VmaAllocator_T::AllocateDedicatedMemory( size_t allocIndex; VkResult res = VK_SUCCESS; - for(allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex) { res = AllocateDedicatedMemoryPage( pool, @@ -14710,13 +14966,13 @@ VkResult VmaAllocator_T::AllocateDedicatedMemory( isMappingAllowed, pUserData, pAllocations + allocIndex); - if(res != VK_SUCCESS) + if (res != VK_SUCCESS) { break; } } - if(res == VK_SUCCESS) + if (res == VK_SUCCESS) { for (allocIndex = 0; allocIndex < allocationCount; ++allocIndex) { @@ -14727,7 +14983,7 @@ VkResult VmaAllocator_T::AllocateDedicatedMemory( else { // Free all already created allocations. - while(allocIndex--) + while (allocIndex--) { VmaAllocation currAlloc = pAllocations[allocIndex]; VkDeviceMemory hMemory = currAlloc->GetMemory(); @@ -14758,23 +15014,23 @@ VkResult VmaAllocator_T::AllocateDedicatedMemoryPage( VkDeviceSize size, VmaSuballocationType suballocType, uint32_t memTypeIndex, - const VkMemoryAllocateInfo& allocInfo, + const VkMemoryAllocateInfo &allocInfo, bool map, bool isUserDataString, bool isMappingAllowed, - void* pUserData, - VmaAllocation* pAllocation) + void *pUserData, + VmaAllocation *pAllocation) { VkDeviceMemory hMemory = VK_NULL_HANDLE; VkResult res = AllocateVulkanMemory(&allocInfo, &hMemory); - if(res < 0) + if (res < 0) { VMA_DEBUG_LOG(" vkAllocateMemory FAILED"); return res; } - void* pMappedData = VMA_NULL; - if(map) + void *pMappedData = VMA_NULL; + if (map) { res = (*m_VulkanFunctions.vkMapMemory)( m_hDevice, @@ -14783,7 +15039,7 @@ VkResult VmaAllocator_T::AllocateDedicatedMemoryPage( VK_WHOLE_SIZE, 0, &pMappedData); - if(res < 0) + if (res < 0) { VMA_DEBUG_LOG(" vkMapMemory FAILED"); FreeVulkanMemory(memTypeIndex, size, hMemory); @@ -14794,11 +15050,11 @@ VkResult VmaAllocator_T::AllocateDedicatedMemoryPage( *pAllocation = m_AllocationObjectAllocator.Allocate(isMappingAllowed); (*pAllocation)->InitDedicatedAllocation(pool, memTypeIndex, hMemory, suballocType, pMappedData, size); if (isUserDataString) - (*pAllocation)->SetName(this, (const char*)pUserData); + (*pAllocation)->SetName(this, (const char *)pUserData); else (*pAllocation)->SetUserData(this, pUserData); m_Budget.AddAllocation(MemoryTypeIndexToHeapIndex(memTypeIndex), size); - if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) + if (VMA_DEBUG_INITIALIZE_ALLOCATIONS) { FillAllocation(*pAllocation, VMA_ALLOCATION_FILL_PATTERN_CREATED); } @@ -14808,113 +15064,113 @@ VkResult VmaAllocator_T::AllocateDedicatedMemoryPage( void VmaAllocator_T::GetBufferMemoryRequirements( VkBuffer hBuffer, - VkMemoryRequirements& memReq, - bool& requiresDedicatedAllocation, - bool& prefersDedicatedAllocation) const + VkMemoryRequirements &memReq, + bool &requiresDedicatedAllocation, + bool &prefersDedicatedAllocation) const { #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 - if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + if (m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) { - VkBufferMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR }; + VkBufferMemoryRequirementsInfo2KHR memReqInfo = {VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2_KHR}; memReqInfo.buffer = hBuffer; - VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR }; + VkMemoryDedicatedRequirementsKHR memDedicatedReq = {VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR}; - VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR }; + VkMemoryRequirements2KHR memReq2 = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR}; VmaPnextChainPushFront(&memReq2, &memDedicatedReq); (*m_VulkanFunctions.vkGetBufferMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2); memReq = memReq2.memoryRequirements; requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE); - prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE); + prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE); } else #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 { (*m_VulkanFunctions.vkGetBufferMemoryRequirements)(m_hDevice, hBuffer, &memReq); requiresDedicatedAllocation = false; - prefersDedicatedAllocation = false; + prefersDedicatedAllocation = false; } } void VmaAllocator_T::GetImageMemoryRequirements( VkImage hImage, - VkMemoryRequirements& memReq, - bool& requiresDedicatedAllocation, - bool& prefersDedicatedAllocation) const + VkMemoryRequirements &memReq, + bool &requiresDedicatedAllocation, + bool &prefersDedicatedAllocation) const { #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 - if(m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) + if (m_UseKhrDedicatedAllocation || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) { - VkImageMemoryRequirementsInfo2KHR memReqInfo = { VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR }; + VkImageMemoryRequirementsInfo2KHR memReqInfo = {VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR}; memReqInfo.image = hImage; - VkMemoryDedicatedRequirementsKHR memDedicatedReq = { VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR }; + VkMemoryDedicatedRequirementsKHR memDedicatedReq = {VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR}; - VkMemoryRequirements2KHR memReq2 = { VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR }; + VkMemoryRequirements2KHR memReq2 = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR}; VmaPnextChainPushFront(&memReq2, &memDedicatedReq); (*m_VulkanFunctions.vkGetImageMemoryRequirements2KHR)(m_hDevice, &memReqInfo, &memReq2); memReq = memReq2.memoryRequirements; requiresDedicatedAllocation = (memDedicatedReq.requiresDedicatedAllocation != VK_FALSE); - prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE); + prefersDedicatedAllocation = (memDedicatedReq.prefersDedicatedAllocation != VK_FALSE); } else #endif // #if VMA_DEDICATED_ALLOCATION || VMA_VULKAN_VERSION >= 1001000 { (*m_VulkanFunctions.vkGetImageMemoryRequirements)(m_hDevice, hImage, &memReq); requiresDedicatedAllocation = false; - prefersDedicatedAllocation = false; + prefersDedicatedAllocation = false; } } VkResult VmaAllocator_T::FindMemoryTypeIndex( uint32_t memoryTypeBits, - const VmaAllocationCreateInfo* pAllocationCreateInfo, + const VmaAllocationCreateInfo *pAllocationCreateInfo, VkFlags bufImgUsage, - uint32_t* pMemoryTypeIndex) const + uint32_t *pMemoryTypeIndex) const { memoryTypeBits &= GetGlobalMemoryTypeBits(); - if(pAllocationCreateInfo->memoryTypeBits != 0) + if (pAllocationCreateInfo->memoryTypeBits != 0) { memoryTypeBits &= pAllocationCreateInfo->memoryTypeBits; } VkMemoryPropertyFlags requiredFlags = 0, preferredFlags = 0, notPreferredFlags = 0; - if(!FindMemoryPreferences( - IsIntegratedGpu(), - *pAllocationCreateInfo, - bufImgUsage, - requiredFlags, preferredFlags, notPreferredFlags)) + if (!FindMemoryPreferences( + IsIntegratedGpu(), + *pAllocationCreateInfo, + bufImgUsage, + requiredFlags, preferredFlags, notPreferredFlags)) { return VK_ERROR_FEATURE_NOT_PRESENT; } *pMemoryTypeIndex = UINT32_MAX; uint32_t minCost = UINT32_MAX; - for(uint32_t memTypeIndex = 0, memTypeBit = 1; - memTypeIndex < GetMemoryTypeCount(); - ++memTypeIndex, memTypeBit <<= 1) + for (uint32_t memTypeIndex = 0, memTypeBit = 1; + memTypeIndex < GetMemoryTypeCount(); + ++memTypeIndex, memTypeBit <<= 1) { // This memory type is acceptable according to memoryTypeBits bitmask. - if((memTypeBit & memoryTypeBits) != 0) + if ((memTypeBit & memoryTypeBits) != 0) { const VkMemoryPropertyFlags currFlags = m_MemProps.memoryTypes[memTypeIndex].propertyFlags; // This memory type contains requiredFlags. - if((requiredFlags & ~currFlags) == 0) + if ((requiredFlags & ~currFlags) == 0) { // Calculate cost as number of bits from preferredFlags not present in this memory type. uint32_t currCost = VMA_COUNT_BITS_SET(preferredFlags & ~currFlags) + - VMA_COUNT_BITS_SET(currFlags & notPreferredFlags); + VMA_COUNT_BITS_SET(currFlags & notPreferredFlags); // Remember memory type with lowest cost. - if(currCost < minCost) + if (currCost < minCost) { *pMemoryTypeIndex = memTypeIndex; - if(currCost == 0) + if (currCost == 0) { return VK_SUCCESS; } @@ -14927,25 +15183,25 @@ VkResult VmaAllocator_T::FindMemoryTypeIndex( } VkResult VmaAllocator_T::CalcMemTypeParams( - VmaAllocationCreateInfo& inoutCreateInfo, + VmaAllocationCreateInfo &inoutCreateInfo, uint32_t memTypeIndex, VkDeviceSize size, size_t allocationCount) { // If memory type is not HOST_VISIBLE, disable MAPPED. - if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 && + if ((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0 && (m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0) { inoutCreateInfo.flags &= ~VMA_ALLOCATION_CREATE_MAPPED_BIT; } - if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 && + if ((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 && (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_WITHIN_BUDGET_BIT) != 0) { const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex); VmaBudget heapBudget = {}; GetHeapBudgets(&heapBudget, heapIndex, 1); - if(heapBudget.usage + size * allocationCount > heapBudget.budget) + if (heapBudget.usage + size * allocationCount > heapBudget.budget) { return VK_ERROR_OUT_OF_DEVICE_MEMORY; } @@ -14954,36 +15210,36 @@ VkResult VmaAllocator_T::CalcMemTypeParams( } VkResult VmaAllocator_T::CalcAllocationParams( - VmaAllocationCreateInfo& inoutCreateInfo, + VmaAllocationCreateInfo &inoutCreateInfo, bool dedicatedRequired, bool dedicatedPreferred) { VMA_ASSERT((inoutCreateInfo.flags & - (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != - (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) && - "Specifying both flags VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT and VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT is incorrect."); + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != + (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT) && + "Specifying both flags VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT and VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT is incorrect."); VMA_ASSERT((((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT) == 0 || - (inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0)) && - "Specifying VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT requires also VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT."); - if(inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST) + (inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0)) && + "Specifying VMA_ALLOCATION_CREATE_HOST_ACCESS_ALLOW_TRANSFER_INSTEAD_BIT requires also VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT."); + if (inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE || inoutCreateInfo.usage == VMA_MEMORY_USAGE_AUTO_PREFER_HOST) { - if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0) + if ((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_MAPPED_BIT) != 0) { VMA_ASSERT((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) != 0 && - "When using VMA_ALLOCATION_CREATE_MAPPED_BIT and usage = VMA_MEMORY_USAGE_AUTO*, you must also specify VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT."); + "When using VMA_ALLOCATION_CREATE_MAPPED_BIT and usage = VMA_MEMORY_USAGE_AUTO*, you must also specify VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT or VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT."); } } // If memory is lazily allocated, it should be always dedicated. - if(dedicatedRequired || + if (dedicatedRequired || inoutCreateInfo.usage == VMA_MEMORY_USAGE_GPU_LAZILY_ALLOCATED) { inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; } - if(inoutCreateInfo.pool != VK_NULL_HANDLE) + if (inoutCreateInfo.pool != VK_NULL_HANDLE) { - if(inoutCreateInfo.pool->m_BlockVector.HasExplicitBlockSize() && + if (inoutCreateInfo.pool->m_BlockVector.HasExplicitBlockSize() && (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0) { VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT while current custom pool doesn't support dedicated allocations."); @@ -14992,14 +15248,14 @@ VkResult VmaAllocator_T::CalcAllocationParams( inoutCreateInfo.priority = inoutCreateInfo.pool->m_BlockVector.GetPriority(); } - if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 && + if ((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 && (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) { VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT together with VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT makes no sense."); return VK_ERROR_FEATURE_NOT_PRESENT; } - if(VMA_DEBUG_ALWAYS_DEDICATED_MEMORY && + if (VMA_DEBUG_ALWAYS_DEDICATED_MEMORY && (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_NEVER_ALLOCATE_BIT) != 0) { inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; @@ -15009,11 +15265,11 @@ VkResult VmaAllocator_T::CalcAllocationParams( // And so does VMA_MEMORY_USAGE_UNKNOWN because it is used with custom pools. // Which specific flag is used doesn't matter. They change things only when used with VMA_MEMORY_USAGE_AUTO*. // Otherwise they just protect from assert on mapping. - if(inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO && + if (inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO && inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_DEVICE && inoutCreateInfo.usage != VMA_MEMORY_USAGE_AUTO_PREFER_HOST) { - if((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) == 0) + if ((inoutCreateInfo.flags & (VMA_ALLOCATION_CREATE_HOST_ACCESS_SEQUENTIAL_WRITE_BIT | VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT)) == 0) { inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_HOST_ACCESS_RANDOM_BIT; } @@ -15023,34 +15279,34 @@ VkResult VmaAllocator_T::CalcAllocationParams( } VkResult VmaAllocator_T::AllocateMemory( - const VkMemoryRequirements& vkMemReq, + const VkMemoryRequirements &vkMemReq, bool requiresDedicatedAllocation, bool prefersDedicatedAllocation, VkBuffer dedicatedBuffer, VkImage dedicatedImage, VkFlags dedicatedBufferImageUsage, - const VmaAllocationCreateInfo& createInfo, + const VmaAllocationCreateInfo &createInfo, VmaSuballocationType suballocType, size_t allocationCount, - VmaAllocation* pAllocations) + VmaAllocation *pAllocations) { memset(pAllocations, 0, sizeof(VmaAllocation) * allocationCount); VMA_ASSERT(VmaIsPow2(vkMemReq.alignment)); - if(vkMemReq.size == 0) + if (vkMemReq.size == 0) { return VK_ERROR_INITIALIZATION_FAILED; } VmaAllocationCreateInfo createInfoFinal = createInfo; VkResult res = CalcAllocationParams(createInfoFinal, requiresDedicatedAllocation, prefersDedicatedAllocation); - if(res != VK_SUCCESS) + if (res != VK_SUCCESS) return res; - if(createInfoFinal.pool != VK_NULL_HANDLE) + if (createInfoFinal.pool != VK_NULL_HANDLE) { - VmaBlockVector& blockVector = createInfoFinal.pool->m_BlockVector; + VmaBlockVector &blockVector = createInfoFinal.pool->m_BlockVector; return AllocateMemoryOfType( createInfoFinal.pool, vkMemReq.size, @@ -15074,11 +15330,11 @@ VkResult VmaAllocator_T::AllocateMemory( uint32_t memTypeIndex = UINT32_MAX; res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex); // Can't find any single memory type matching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT. - if(res != VK_SUCCESS) + if (res != VK_SUCCESS) return res; do { - VmaBlockVector* blockVector = m_pBlockVectors[memTypeIndex]; + VmaBlockVector *blockVector = m_pBlockVectors[memTypeIndex]; VMA_ASSERT(blockVector && "Trying to use unsupported memory type!"); res = AllocateMemoryOfType( VK_NULL_HANDLE, @@ -15096,14 +15352,14 @@ VkResult VmaAllocator_T::AllocateMemory( allocationCount, pAllocations); // Allocation succeeded - if(res == VK_SUCCESS) + if (res == VK_SUCCESS) return VK_SUCCESS; // Remove old memTypeIndex from list of possibilities. memoryTypeBits &= ~(1u << memTypeIndex); // Find alternative memTypeIndex. res = FindMemoryTypeIndex(memoryTypeBits, &createInfoFinal, dedicatedBufferImageUsage, &memTypeIndex); - } while(res == VK_SUCCESS); + } while (res == VK_SUCCESS); // No other matching memory type index could be found. // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once. @@ -15113,42 +15369,42 @@ VkResult VmaAllocator_T::AllocateMemory( void VmaAllocator_T::FreeMemory( size_t allocationCount, - const VmaAllocation* pAllocations) + const VmaAllocation *pAllocations) { VMA_ASSERT(pAllocations); - for(size_t allocIndex = allocationCount; allocIndex--; ) + for (size_t allocIndex = allocationCount; allocIndex--;) { VmaAllocation allocation = pAllocations[allocIndex]; - if(allocation != VK_NULL_HANDLE) + if (allocation != VK_NULL_HANDLE) { - if(VMA_DEBUG_INITIALIZE_ALLOCATIONS) + if (VMA_DEBUG_INITIALIZE_ALLOCATIONS) { FillAllocation(allocation, VMA_ALLOCATION_FILL_PATTERN_DESTROYED); } allocation->FreeName(this); - switch(allocation->GetType()) + switch (allocation->GetType()) { case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaBlockVector *pBlockVector = VMA_NULL; + VmaPool hPool = allocation->GetParentPool(); + if (hPool != VK_NULL_HANDLE) { - VmaBlockVector* pBlockVector = VMA_NULL; - VmaPool hPool = allocation->GetParentPool(); - if(hPool != VK_NULL_HANDLE) - { - pBlockVector = &hPool->m_BlockVector; - } - else - { - const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); - pBlockVector = m_pBlockVectors[memTypeIndex]; - VMA_ASSERT(pBlockVector && "Trying to free memory of unsupported type!"); - } - pBlockVector->Free(allocation); + pBlockVector = &hPool->m_BlockVector; } - break; + else + { + const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); + pBlockVector = m_pBlockVectors[memTypeIndex]; + VMA_ASSERT(pBlockVector && "Trying to free memory of unsupported type!"); + } + pBlockVector->Free(allocation); + } + break; case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: FreeDedicatedMemory(allocation); break; @@ -15159,19 +15415,19 @@ void VmaAllocator_T::FreeMemory( } } -void VmaAllocator_T::CalculateStatistics(VmaTotalStatistics* pStats) +void VmaAllocator_T::CalculateStatistics(VmaTotalStatistics *pStats) { // Initialize. VmaClearDetailedStatistics(pStats->total); - for(uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i) + for (uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i) VmaClearDetailedStatistics(pStats->memoryType[i]); - for(uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i) + for (uint32_t i = 0; i < VK_MAX_MEMORY_HEAPS; ++i) VmaClearDetailedStatistics(pStats->memoryHeap[i]); // Process default pools. - for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) { - VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex]; + VmaBlockVector *const pBlockVector = m_pBlockVectors[memTypeIndex]; if (pBlockVector != VMA_NULL) pBlockVector->AddDetailedStatistics(pStats->memoryType[memTypeIndex]); } @@ -15179,9 +15435,9 @@ void VmaAllocator_T::CalculateStatistics(VmaTotalStatistics* pStats) // Process custom pools. { VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); - for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) + for (VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) { - VmaBlockVector& blockVector = pool->m_BlockVector; + VmaBlockVector &blockVector = pool->m_BlockVector; const uint32_t memTypeIndex = blockVector.GetMemoryTypeIndex(); blockVector.AddDetailedStatistics(pStats->memoryType[memTypeIndex]); pool->m_DedicatedAllocations.AddDetailedStatistics(pStats->memoryType[memTypeIndex]); @@ -15189,37 +15445,37 @@ void VmaAllocator_T::CalculateStatistics(VmaTotalStatistics* pStats) } // Process dedicated allocations. - for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) { m_DedicatedAllocations[memTypeIndex].AddDetailedStatistics(pStats->memoryType[memTypeIndex]); } // Sum from memory types to memory heaps. - for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) { const uint32_t memHeapIndex = m_MemProps.memoryTypes[memTypeIndex].heapIndex; VmaAddDetailedStatistics(pStats->memoryHeap[memHeapIndex], pStats->memoryType[memTypeIndex]); } // Sum from memory heaps to total. - for(uint32_t memHeapIndex = 0; memHeapIndex < GetMemoryHeapCount(); ++memHeapIndex) + for (uint32_t memHeapIndex = 0; memHeapIndex < GetMemoryHeapCount(); ++memHeapIndex) VmaAddDetailedStatistics(pStats->total, pStats->memoryHeap[memHeapIndex]); VMA_ASSERT(pStats->total.statistics.allocationCount == 0 || - pStats->total.allocationSizeMax >= pStats->total.allocationSizeMin); + pStats->total.allocationSizeMax >= pStats->total.allocationSizeMin); VMA_ASSERT(pStats->total.unusedRangeCount == 0 || - pStats->total.unusedRangeSizeMax >= pStats->total.unusedRangeSizeMin); + pStats->total.unusedRangeSizeMax >= pStats->total.unusedRangeSizeMin); } -void VmaAllocator_T::GetHeapBudgets(VmaBudget* outBudgets, uint32_t firstHeap, uint32_t heapCount) +void VmaAllocator_T::GetHeapBudgets(VmaBudget *outBudgets, uint32_t firstHeap, uint32_t heapCount) { #if VMA_MEMORY_BUDGET - if(m_UseExtMemoryBudget) + if (m_UseExtMemoryBudget) { - if(m_Budget.m_OperationsSinceBudgetFetch < 30) + if (m_Budget.m_OperationsSinceBudgetFetch < 30) { VmaMutexLockRead lockRead(m_Budget.m_BudgetMutex, m_UseMutex); - for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets) + for (uint32_t i = 0; i < heapCount; ++i, ++outBudgets) { const uint32_t heapIndex = firstHeap + i; @@ -15228,10 +15484,10 @@ void VmaAllocator_T::GetHeapBudgets(VmaBudget* outBudgets, uint32_t firstHeap, u outBudgets->statistics.blockBytes = m_Budget.m_BlockBytes[heapIndex]; outBudgets->statistics.allocationBytes = m_Budget.m_AllocationBytes[heapIndex]; - if(m_Budget.m_VulkanUsage[heapIndex] + outBudgets->statistics.blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]) + if (m_Budget.m_VulkanUsage[heapIndex] + outBudgets->statistics.blockBytes > m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]) { outBudgets->usage = m_Budget.m_VulkanUsage[heapIndex] + - outBudgets->statistics.blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]; + outBudgets->statistics.blockBytes - m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]; } else { @@ -15245,14 +15501,14 @@ void VmaAllocator_T::GetHeapBudgets(VmaBudget* outBudgets, uint32_t firstHeap, u } else { - UpdateVulkanBudget(); // Outside of mutex lock + UpdateVulkanBudget(); // Outside of mutex lock GetHeapBudgets(outBudgets, firstHeap, heapCount); // Recursion } } else #endif { - for(uint32_t i = 0; i < heapCount; ++i, ++outBudgets) + for (uint32_t i = 0; i < heapCount; ++i, ++outBudgets) { const uint32_t heapIndex = firstHeap + i; @@ -15267,7 +15523,7 @@ void VmaAllocator_T::GetHeapBudgets(VmaBudget* outBudgets, uint32_t firstHeap, u } } -void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo* pAllocationInfo) +void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationInfo *pAllocationInfo) { pAllocationInfo->memoryType = hAllocation->GetMemoryTypeIndex(); pAllocationInfo->deviceMemory = hAllocation->GetMemory(); @@ -15278,33 +15534,33 @@ void VmaAllocator_T::GetAllocationInfo(VmaAllocation hAllocation, VmaAllocationI pAllocationInfo->pName = hAllocation->GetName(); } -VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPool* pPool) +VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo *pCreateInfo, VmaPool *pPool) { VMA_DEBUG_LOG_FORMAT(" CreatePool: MemoryTypeIndex=%u, flags=%u", pCreateInfo->memoryTypeIndex, pCreateInfo->flags); VmaPoolCreateInfo newCreateInfo = *pCreateInfo; // Protection against uninitialized new structure member. If garbage data are left there, this pointer dereference would crash. - if(pCreateInfo->pMemoryAllocateNext) + if (pCreateInfo->pMemoryAllocateNext) { - VMA_ASSERT(((const VkBaseInStructure*)pCreateInfo->pMemoryAllocateNext)->sType != 0); + VMA_ASSERT(((const VkBaseInStructure *)pCreateInfo->pMemoryAllocateNext)->sType != 0); } - if(newCreateInfo.maxBlockCount == 0) + if (newCreateInfo.maxBlockCount == 0) { newCreateInfo.maxBlockCount = SIZE_MAX; } - if(newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount) + if (newCreateInfo.minBlockCount > newCreateInfo.maxBlockCount) { return VK_ERROR_INITIALIZATION_FAILED; } // Memory type index out of range or forbidden. - if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() || + if (pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() || ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0) { return VK_ERROR_FEATURE_NOT_PRESENT; } - if(newCreateInfo.minAllocationAlignment > 0) + if (newCreateInfo.minAllocationAlignment > 0) { VMA_ASSERT(VmaIsPow2(newCreateInfo.minAllocationAlignment)); } @@ -15314,7 +15570,7 @@ VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPoo *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize); VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks(); - if(res != VK_SUCCESS) + if (res != VK_SUCCESS) { vma_delete(this, *pPool); *pPool = VMA_NULL; @@ -15342,14 +15598,14 @@ void VmaAllocator_T::DestroyPool(VmaPool pool) vma_delete(this, pool); } -void VmaAllocator_T::GetPoolStatistics(VmaPool pool, VmaStatistics* pPoolStats) +void VmaAllocator_T::GetPoolStatistics(VmaPool pool, VmaStatistics *pPoolStats) { VmaClearStatistics(*pPoolStats); pool->m_BlockVector.AddStatistics(*pPoolStats); pool->m_DedicatedAllocations.AddStatistics(*pPoolStats); } -void VmaAllocator_T::CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics* pPoolStats) +void VmaAllocator_T::CalculatePoolStatistics(VmaPool pool, VmaDetailedStatistics *pPoolStats) { VmaClearDetailedStatistics(*pPoolStats); pool->m_BlockVector.AddDetailedStatistics(*pPoolStats); @@ -15361,7 +15617,7 @@ void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex) m_CurrentFrameIndex.store(frameIndex); #if VMA_MEMORY_BUDGET - if(m_UseExtMemoryBudget) + if (m_UseExtMemoryBudget) { UpdateVulkanBudget(); } @@ -15378,13 +15634,13 @@ VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits) VkResult finalRes = VK_ERROR_FEATURE_NOT_PRESENT; // Process default pools. - for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) { - VmaBlockVector* const pBlockVector = m_pBlockVectors[memTypeIndex]; - if(pBlockVector != VMA_NULL) + VmaBlockVector *const pBlockVector = m_pBlockVectors[memTypeIndex]; + if (pBlockVector != VMA_NULL) { VkResult localRes = pBlockVector->CheckCorruption(); - switch(localRes) + switch (localRes) { case VK_ERROR_FEATURE_NOT_PRESENT: break; @@ -15400,12 +15656,12 @@ VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits) // Process custom pools. { VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex); - for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) + for (VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) { - if(((1u << pool->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0) + if (((1u << pool->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0) { VkResult localRes = pool->m_BlockVector.CheckCorruption(); - switch(localRes) + switch (localRes) { case VK_ERROR_FEATURE_NOT_PRESENT: break; @@ -15422,12 +15678,12 @@ VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits) return finalRes; } -VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAllocateInfo, VkDeviceMemory* pMemory) +VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo *pAllocateInfo, VkDeviceMemory *pMemory) { AtomicTransactionalIncrement deviceMemoryCountIncrement; const uint64_t prevDeviceMemoryCount = deviceMemoryCountIncrement.Increment(&m_DeviceMemoryCount); #if VMA_DEBUG_DONT_EXCEED_MAX_MEMORY_ALLOCATION_COUNT - if(prevDeviceMemoryCount >= m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount) + if (prevDeviceMemoryCount >= m_PhysicalDeviceProperties.limits.maxMemoryAllocationCount) { return VK_ERROR_TOO_MANY_OBJECTS; } @@ -15436,18 +15692,18 @@ VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAlloc const uint32_t heapIndex = MemoryTypeIndexToHeapIndex(pAllocateInfo->memoryTypeIndex); // HeapSizeLimit is in effect for this heap. - if((m_HeapSizeLimitMask & (1u << heapIndex)) != 0) + if ((m_HeapSizeLimitMask & (1u << heapIndex)) != 0) { const VkDeviceSize heapSize = m_MemProps.memoryHeaps[heapIndex].size; VkDeviceSize blockBytes = m_Budget.m_BlockBytes[heapIndex]; - for(;;) + for (;;) { const VkDeviceSize blockBytesAfterAllocation = blockBytes + pAllocateInfo->allocationSize; - if(blockBytesAfterAllocation > heapSize) + if (blockBytesAfterAllocation > heapSize) { return VK_ERROR_OUT_OF_DEVICE_MEMORY; } - if(m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation)) + if (m_Budget.m_BlockBytes[heapIndex].compare_exchange_strong(blockBytes, blockBytesAfterAllocation)) { break; } @@ -15462,14 +15718,14 @@ VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAlloc // VULKAN CALL vkAllocateMemory. VkResult res = (*m_VulkanFunctions.vkAllocateMemory)(m_hDevice, pAllocateInfo, GetAllocationCallbacks(), pMemory); - if(res == VK_SUCCESS) + if (res == VK_SUCCESS) { #if VMA_MEMORY_BUDGET ++m_Budget.m_OperationsSinceBudgetFetch; #endif // Informative callback. - if(m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL) + if (m_DeviceMemoryCallbacks.pfnAllocate != VMA_NULL) { (*m_DeviceMemoryCallbacks.pfnAllocate)(this, pAllocateInfo->memoryTypeIndex, *pMemory, pAllocateInfo->allocationSize, m_DeviceMemoryCallbacks.pUserData); } @@ -15488,7 +15744,7 @@ VkResult VmaAllocator_T::AllocateVulkanMemory(const VkMemoryAllocateInfo* pAlloc void VmaAllocator_T::FreeVulkanMemory(uint32_t memoryType, VkDeviceSize size, VkDeviceMemory hMemory) { // Informative callback. - if(m_DeviceMemoryCallbacks.pfnFree != VMA_NULL) + if (m_DeviceMemoryCallbacks.pfnFree != VMA_NULL) { (*m_DeviceMemoryCallbacks.pfnFree)(this, memoryType, hMemory, size, m_DeviceMemoryCallbacks.pUserData); } @@ -15507,15 +15763,15 @@ VkResult VmaAllocator_T::BindVulkanBuffer( VkDeviceMemory memory, VkDeviceSize memoryOffset, VkBuffer buffer, - const void* pNext) + const void *pNext) { - if(pNext != VMA_NULL) + if (pNext != VMA_NULL) { #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 - if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) && + if ((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) && m_VulkanFunctions.vkBindBufferMemory2KHR != VMA_NULL) { - VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR }; + VkBindBufferMemoryInfoKHR bindBufferMemoryInfo = {VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR}; bindBufferMemoryInfo.pNext = pNext; bindBufferMemoryInfo.buffer = buffer; bindBufferMemoryInfo.memory = memory; @@ -15538,15 +15794,15 @@ VkResult VmaAllocator_T::BindVulkanImage( VkDeviceMemory memory, VkDeviceSize memoryOffset, VkImage image, - const void* pNext) + const void *pNext) { - if(pNext != VMA_NULL) + if (pNext != VMA_NULL) { #if VMA_VULKAN_VERSION >= 1001000 || VMA_BIND_MEMORY2 - if((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) && + if ((m_UseKhrBindMemory2 || m_VulkanApiVersion >= VK_MAKE_VERSION(1, 1, 0)) && m_VulkanFunctions.vkBindImageMemory2KHR != VMA_NULL) { - VkBindImageMemoryInfoKHR bindBufferMemoryInfo = { VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR }; + VkBindImageMemoryInfoKHR bindBufferMemoryInfo = {VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO_KHR}; bindBufferMemoryInfo.pNext = pNext; bindBufferMemoryInfo.image = image; bindBufferMemoryInfo.memory = memory; @@ -15565,22 +15821,22 @@ VkResult VmaAllocator_T::BindVulkanImage( } } -VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData) +VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void **ppData) { - switch(hAllocation->GetType()) + switch (hAllocation->GetType()) { case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: + { + VmaDeviceMemoryBlock *const pBlock = hAllocation->GetBlock(); + char *pBytes = VMA_NULL; + VkResult res = pBlock->Map(this, 1, (void **)&pBytes); + if (res == VK_SUCCESS) { - VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); - char *pBytes = VMA_NULL; - VkResult res = pBlock->Map(this, 1, (void**)&pBytes); - if(res == VK_SUCCESS) - { - *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset(); - hAllocation->BlockAllocMap(); - } - return res; + *ppData = pBytes + (ptrdiff_t)hAllocation->GetOffset(); + hAllocation->BlockAllocMap(); } + return res; + } VMA_FALLTHROUGH; // Fallthrough case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: return hAllocation->DedicatedAllocMap(this, ppData); @@ -15592,15 +15848,15 @@ VkResult VmaAllocator_T::Map(VmaAllocation hAllocation, void** ppData) void VmaAllocator_T::Unmap(VmaAllocation hAllocation) { - switch(hAllocation->GetType()) + switch (hAllocation->GetType()) { case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: - { - VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); - hAllocation->BlockAllocUnmap(); - pBlock->Unmap(this, 1); - } - break; + { + VmaDeviceMemoryBlock *const pBlock = hAllocation->GetBlock(); + hAllocation->BlockAllocUnmap(); + pBlock->Unmap(this, 1); + } + break; case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: hAllocation->DedicatedAllocUnmap(this); break; @@ -15613,17 +15869,17 @@ VkResult VmaAllocator_T::BindBufferMemory( VmaAllocation hAllocation, VkDeviceSize allocationLocalOffset, VkBuffer hBuffer, - const void* pNext) + const void *pNext) { VkResult res = VK_ERROR_UNKNOWN; - switch(hAllocation->GetType()) + switch (hAllocation->GetType()) { case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: res = BindVulkanBuffer(hAllocation->GetMemory(), allocationLocalOffset, hBuffer, pNext); break; case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: { - VmaDeviceMemoryBlock* const pBlock = hAllocation->GetBlock(); + VmaDeviceMemoryBlock *const pBlock = hAllocation->GetBlock(); VMA_ASSERT(pBlock && "Binding buffer to allocation that doesn't belong to any block."); res = pBlock->BindBufferMemory(this, hAllocation, allocationLocalOffset, hBuffer, pNext); break; @@ -15638,17 +15894,17 @@ VkResult VmaAllocator_T::BindImageMemory( VmaAllocation hAllocation, VkDeviceSize allocationLocalOffset, VkImage hImage, - const void* pNext) + const void *pNext) { VkResult res = VK_ERROR_UNKNOWN; - switch(hAllocation->GetType()) + switch (hAllocation->GetType()) { case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: res = BindVulkanImage(hAllocation->GetMemory(), allocationLocalOffset, hImage, pNext); break; case VmaAllocation_T::ALLOCATION_TYPE_BLOCK: { - VmaDeviceMemoryBlock* pBlock = hAllocation->GetBlock(); + VmaDeviceMemoryBlock *pBlock = hAllocation->GetBlock(); VMA_ASSERT(pBlock && "Binding image to allocation that doesn't belong to any block."); res = pBlock->BindImageMemory(this, hAllocation, allocationLocalOffset, hImage, pNext); break; @@ -15667,9 +15923,9 @@ VkResult VmaAllocator_T::FlushOrInvalidateAllocation( VkResult res = VK_SUCCESS; VkMappedMemoryRange memRange = {}; - if(GetFlushOrInvalidateRange(hAllocation, offset, size, memRange)) + if (GetFlushOrInvalidateRange(hAllocation, offset, size, memRange)) { - switch(op) + switch (op) { case VMA_CACHE_FLUSH: res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, 1, &memRange); @@ -15687,30 +15943,30 @@ VkResult VmaAllocator_T::FlushOrInvalidateAllocation( VkResult VmaAllocator_T::FlushOrInvalidateAllocations( uint32_t allocationCount, - const VmaAllocation* allocations, - const VkDeviceSize* offsets, const VkDeviceSize* sizes, + const VmaAllocation *allocations, + const VkDeviceSize *offsets, const VkDeviceSize *sizes, VMA_CACHE_OPERATION op) { typedef VmaStlAllocator RangeAllocator; typedef VmaSmallVector RangeVector; RangeVector ranges = RangeVector(RangeAllocator(GetAllocationCallbacks())); - for(uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex) + for (uint32_t allocIndex = 0; allocIndex < allocationCount; ++allocIndex) { const VmaAllocation alloc = allocations[allocIndex]; const VkDeviceSize offset = offsets != VMA_NULL ? offsets[allocIndex] : 0; const VkDeviceSize size = sizes != VMA_NULL ? sizes[allocIndex] : VK_WHOLE_SIZE; VkMappedMemoryRange newRange; - if(GetFlushOrInvalidateRange(alloc, offset, size, newRange)) + if (GetFlushOrInvalidateRange(alloc, offset, size, newRange)) { ranges.push_back(newRange); } } VkResult res = VK_SUCCESS; - if(!ranges.empty()) + if (!ranges.empty()) { - switch(op) + switch (op) { case VMA_CACHE_FLUSH: res = (*GetVulkanFunctions().vkFlushMappedMemoryRanges)(m_hDevice, (uint32_t)ranges.size(), ranges.data()); @@ -15732,7 +15988,7 @@ void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation) const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); VmaPool parentPool = allocation->GetParentPool(); - if(parentPool == VK_NULL_HANDLE) + if (parentPool == VK_NULL_HANDLE) { // Default pool m_DedicatedAllocations[memTypeIndex].Unregister(allocation); @@ -15774,7 +16030,7 @@ uint32_t VmaAllocator_T::CalculateGpuDefragmentationMemoryTypeBits() const VkBuffer buf = VK_NULL_HANDLE; VkResult res = (*GetVulkanFunctions().vkCreateBuffer)( m_hDevice, &dummyBufCreateInfo, GetAllocationCallbacks(), &buf); - if(res == VK_SUCCESS) + if (res == VK_SUCCESS) { // Query for supported memory types. VkMemoryRequirements memReq; @@ -15795,12 +16051,12 @@ uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const uint32_t memoryTypeBits = UINT32_MAX; - if(!m_UseAmdDeviceCoherentMemory) + if (!m_UseAmdDeviceCoherentMemory) { // Exclude memory types that have VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD. - for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) + for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) { - if((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0) + if ((m_MemProps.memoryTypes[memTypeIndex].propertyFlags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) != 0) { memoryTypeBits &= ~(1u << memTypeIndex); } @@ -15813,10 +16069,10 @@ uint32_t VmaAllocator_T::CalculateGlobalMemoryTypeBits() const bool VmaAllocator_T::GetFlushOrInvalidateRange( VmaAllocation allocation, VkDeviceSize offset, VkDeviceSize size, - VkMappedMemoryRange& outRange) const + VkMappedMemoryRange &outRange) const { const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); - if(size > 0 && IsMemoryTypeNonCoherent(memTypeIndex)) + if (size > 0 && IsMemoryTypeNonCoherent(memTypeIndex)) { const VkDeviceSize nonCoherentAtomSize = m_PhysicalDeviceProperties.limits.nonCoherentAtomSize; const VkDeviceSize allocationSize = allocation->GetSize(); @@ -15826,11 +16082,11 @@ bool VmaAllocator_T::GetFlushOrInvalidateRange( outRange.pNext = VMA_NULL; outRange.memory = allocation->GetMemory(); - switch(allocation->GetType()) + switch (allocation->GetType()) { case VmaAllocation_T::ALLOCATION_TYPE_DEDICATED: outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize); - if(size == VK_WHOLE_SIZE) + if (size == VK_WHOLE_SIZE) { outRange.size = allocationSize - outRange.offset; } @@ -15846,7 +16102,7 @@ bool VmaAllocator_T::GetFlushOrInvalidateRange( { // 1. Still within this allocation. outRange.offset = VmaAlignDown(offset, nonCoherentAtomSize); - if(size == VK_WHOLE_SIZE) + if (size == VK_WHOLE_SIZE) { size = allocationSize - offset; } @@ -15878,9 +16134,9 @@ void VmaAllocator_T::UpdateVulkanBudget() { VMA_ASSERT(m_UseExtMemoryBudget); - VkPhysicalDeviceMemoryProperties2KHR memProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR }; + VkPhysicalDeviceMemoryProperties2KHR memProps = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_PROPERTIES_2_KHR}; - VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT }; + VkPhysicalDeviceMemoryBudgetPropertiesEXT budgetProps = {VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MEMORY_BUDGET_PROPERTIES_EXT}; VmaPnextChainPushFront(&memProps, &budgetProps); GetVulkanFunctions().vkGetPhysicalDeviceMemoryProperties2KHR(m_PhysicalDevice, &memProps); @@ -15888,22 +16144,22 @@ void VmaAllocator_T::UpdateVulkanBudget() { VmaMutexLockWrite lockWrite(m_Budget.m_BudgetMutex, m_UseMutex); - for(uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex) + for (uint32_t heapIndex = 0; heapIndex < GetMemoryHeapCount(); ++heapIndex) { m_Budget.m_VulkanUsage[heapIndex] = budgetProps.heapUsage[heapIndex]; m_Budget.m_VulkanBudget[heapIndex] = budgetProps.heapBudget[heapIndex]; m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] = m_Budget.m_BlockBytes[heapIndex].load(); // Some bugged drivers return the budget incorrectly, e.g. 0 or much bigger than heap size. - if(m_Budget.m_VulkanBudget[heapIndex] == 0) + if (m_Budget.m_VulkanBudget[heapIndex] == 0) { m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size * 8 / 10; // 80% heuristics. } - else if(m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size) + else if (m_Budget.m_VulkanBudget[heapIndex] > m_MemProps.memoryHeaps[heapIndex].size) { m_Budget.m_VulkanBudget[heapIndex] = m_MemProps.memoryHeaps[heapIndex].size; } - if(m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0) + if (m_Budget.m_VulkanUsage[heapIndex] == 0 && m_Budget.m_BlockBytesAtBudgetFetch[heapIndex] > 0) { m_Budget.m_VulkanUsage[heapIndex] = m_Budget.m_BlockBytesAtBudgetFetch[heapIndex]; } @@ -15915,13 +16171,13 @@ void VmaAllocator_T::UpdateVulkanBudget() void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pattern) { - if(VMA_DEBUG_INITIALIZE_ALLOCATIONS && + if (VMA_DEBUG_INITIALIZE_ALLOCATIONS && hAllocation->IsMappingAllowed() && (m_MemProps.memoryTypes[hAllocation->GetMemoryTypeIndex()].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) != 0) { - void* pData = VMA_NULL; + void *pData = VMA_NULL; VkResult res = Map(hAllocation, &pData); - if(res == VK_SUCCESS) + if (res == VK_SUCCESS) { memset(pData, (int)pattern, (size_t)hAllocation->GetSize()); FlushOrInvalidateAllocation(hAllocation, 0, VK_WHOLE_SIZE, VMA_CACHE_FLUSH); @@ -15937,7 +16193,7 @@ void VmaAllocator_T::FillAllocation(const VmaAllocation hAllocation, uint8_t pat uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits() { uint32_t memoryTypeBits = m_GpuDefragmentationMemoryTypeBits.load(); - if(memoryTypeBits == UINT32_MAX) + if (memoryTypeBits == UINT32_MAX) { memoryTypeBits = CalculateGpuDefragmentationMemoryTypeBits(); m_GpuDefragmentationMemoryTypeBits.store(memoryTypeBits); @@ -15946,15 +16202,15 @@ uint32_t VmaAllocator_T::GetGpuDefragmentationMemoryTypeBits() } #if VMA_STATS_STRING_ENABLED -void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json) +void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter &json) { json.WriteString("DefaultPools"); json.BeginObject(); { for (uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex) { - VmaBlockVector* pBlockVector = m_pBlockVectors[memTypeIndex]; - VmaDedicatedAllocationList& dedicatedAllocList = m_DedicatedAllocations[memTypeIndex]; + VmaBlockVector *pBlockVector = m_pBlockVectors[memTypeIndex]; + VmaDedicatedAllocationList &dedicatedAllocList = m_DedicatedAllocations[memTypeIndex]; if (pBlockVector != VMA_NULL) { json.BeginString("Type "); @@ -15989,7 +16245,7 @@ void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json) size_t index = 0; for (VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool)) { - VmaBlockVector& blockVector = pool->m_BlockVector; + VmaBlockVector &blockVector = pool->m_BlockVector; if (blockVector.GetMemoryTypeIndex() == memTypeIndex) { if (displayType) @@ -16036,19 +16292,18 @@ void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json) #endif // VMA_STATS_STRING_ENABLED #endif // _VMA_ALLOCATOR_T_FUNCTIONS - #ifndef _VMA_PUBLIC_INTERFACE VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator( - const VmaAllocatorCreateInfo* pCreateInfo, - VmaAllocator* pAllocator) + const VmaAllocatorCreateInfo *pCreateInfo, + VmaAllocator *pAllocator) { VMA_ASSERT(pCreateInfo && pAllocator); VMA_ASSERT(pCreateInfo->vulkanApiVersion == 0 || - (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 3)); + (VK_VERSION_MAJOR(pCreateInfo->vulkanApiVersion) == 1 && VK_VERSION_MINOR(pCreateInfo->vulkanApiVersion) <= 3)); VMA_DEBUG_LOG("vmaCreateAllocator"); *pAllocator = vma_new(pCreateInfo->pAllocationCallbacks, VmaAllocator_T)(pCreateInfo); VkResult result = (*pAllocator)->Init(pCreateInfo); - if(result < 0) + if (result < 0) { vma_delete(pCreateInfo->pAllocationCallbacks, *pAllocator); *pAllocator = VK_NULL_HANDLE; @@ -16059,7 +16314,7 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAllocator( VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator( VmaAllocator allocator) { - if(allocator != VK_NULL_HANDLE) + if (allocator != VK_NULL_HANDLE) { VMA_DEBUG_LOG("vmaDestroyAllocator"); VkAllocationCallbacks allocationCallbacks = allocator->m_AllocationCallbacks; // Have to copy the callbacks when destroying. @@ -16067,7 +16322,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaDestroyAllocator( } } -VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo* pAllocatorInfo) +VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocatorInfo(VmaAllocator allocator, VmaAllocatorInfo *pAllocatorInfo) { VMA_ASSERT(allocator && pAllocatorInfo); pAllocatorInfo->instance = allocator->m_hInstance; @@ -16085,7 +16340,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaGetPhysicalDeviceProperties( VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties( VmaAllocator allocator, - const VkPhysicalDeviceMemoryProperties** ppPhysicalDeviceMemoryProperties) + const VkPhysicalDeviceMemoryProperties **ppPhysicalDeviceMemoryProperties) { VMA_ASSERT(allocator && ppPhysicalDeviceMemoryProperties); *ppPhysicalDeviceMemoryProperties = &allocator->m_MemProps; @@ -16094,7 +16349,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryProperties( VMA_CALL_PRE void VMA_CALL_POST vmaGetMemoryTypeProperties( VmaAllocator allocator, uint32_t memoryTypeIndex, - VkMemoryPropertyFlags* pFlags) + VkMemoryPropertyFlags *pFlags) { VMA_ASSERT(allocator && pFlags); VMA_ASSERT(memoryTypeIndex < allocator->GetMemoryTypeCount()); @@ -16114,7 +16369,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaSetCurrentFrameIndex( VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics( VmaAllocator allocator, - VmaTotalStatistics* pStats) + VmaTotalStatistics *pStats) { VMA_ASSERT(allocator && pStats); VMA_DEBUG_GLOBAL_MUTEX_LOCK @@ -16123,7 +16378,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaCalculateStatistics( VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets( VmaAllocator allocator, - VmaBudget* pBudgets) + VmaBudget *pBudgets) { VMA_ASSERT(allocator && pBudgets); VMA_DEBUG_GLOBAL_MUTEX_LOCK @@ -16134,7 +16389,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaGetHeapBudgets( VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( VmaAllocator allocator, - char** ppStatsString, + char **ppStatsString, VkBool32 detailedMap) { VMA_ASSERT(allocator && ppStatsString); @@ -16154,8 +16409,8 @@ VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( json.WriteString("General"); json.BeginObject(); { - const VkPhysicalDeviceProperties& deviceProperties = allocator->m_PhysicalDeviceProperties; - const VkPhysicalDeviceMemoryProperties& memoryProperties = allocator->m_MemProps; + const VkPhysicalDeviceProperties &deviceProperties = allocator->m_PhysicalDeviceProperties; + const VkPhysicalDeviceMemoryProperties &memoryProperties = allocator->m_MemProps; json.WriteString("API"); json.WriteString("Vulkan"); @@ -16203,23 +16458,23 @@ VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( json.EndString(); json.BeginObject(); { - const VkMemoryHeap& heapInfo = allocator->m_MemProps.memoryHeaps[heapIndex]; + const VkMemoryHeap &heapInfo = allocator->m_MemProps.memoryHeaps[heapIndex]; json.WriteString("Flags"); json.BeginArray(true); { if (heapInfo.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) json.WriteString("DEVICE_LOCAL"); - #if VMA_VULKAN_VERSION >= 1001000 +#if VMA_VULKAN_VERSION >= 1001000 if (heapInfo.flags & VK_MEMORY_HEAP_MULTI_INSTANCE_BIT) json.WriteString("MULTI_INSTANCE"); - #endif +#endif VkMemoryHeapFlags flags = heapInfo.flags & - ~(VK_MEMORY_HEAP_DEVICE_LOCAL_BIT - #if VMA_VULKAN_VERSION >= 1001000 - | VK_MEMORY_HEAP_MULTI_INSTANCE_BIT - #endif - ); + ~(VK_MEMORY_HEAP_DEVICE_LOCAL_BIT +#if VMA_VULKAN_VERSION >= 1001000 + | VK_MEMORY_HEAP_MULTI_INSTANCE_BIT +#endif + ); if (flags != 0) json.WriteNumber(flags); } @@ -16267,28 +16522,25 @@ VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( json.WriteString("HOST_CACHED"); if (flags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT) json.WriteString("LAZILY_ALLOCATED"); - #if VMA_VULKAN_VERSION >= 1001000 +#if VMA_VULKAN_VERSION >= 1001000 if (flags & VK_MEMORY_PROPERTY_PROTECTED_BIT) json.WriteString("PROTECTED"); - #endif - #if VK_AMD_device_coherent_memory +#endif +#if VK_AMD_device_coherent_memory if (flags & VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY) json.WriteString("DEVICE_COHERENT_AMD"); if (flags & VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY) json.WriteString("DEVICE_UNCACHED_AMD"); - #endif +#endif flags &= ~(VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT - #if VMA_VULKAN_VERSION >= 1001000 - | VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT - #endif - #if VK_AMD_device_coherent_memory - | VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY - | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY - #endif - | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT - | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT - | VK_MEMORY_PROPERTY_HOST_CACHED_BIT); +#if VMA_VULKAN_VERSION >= 1001000 + | VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT +#endif +#if VK_AMD_device_coherent_memory + | VK_MEMORY_PROPERTY_DEVICE_COHERENT_BIT_AMD_COPY | VK_MEMORY_PROPERTY_DEVICE_UNCACHED_BIT_AMD_COPY +#endif + | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT); if (flags != 0) json.WriteNumber(flags); } @@ -16300,7 +16552,6 @@ VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( json.EndObject(); } } - } json.EndObject(); } @@ -16321,9 +16572,9 @@ VMA_CALL_PRE void VMA_CALL_POST vmaBuildStatsString( VMA_CALL_PRE void VMA_CALL_POST vmaFreeStatsString( VmaAllocator allocator, - char* pStatsString) + char *pStatsString) { - if(pStatsString != VMA_NULL) + if (pStatsString != VMA_NULL) { VMA_ASSERT(allocator); VmaFreeString(allocator->GetAllocationCallbacks(), pStatsString); @@ -16338,8 +16589,8 @@ This function is not protected by any mutex because it just reads immutable data VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex( VmaAllocator allocator, uint32_t memoryTypeBits, - const VmaAllocationCreateInfo* pAllocationCreateInfo, - uint32_t* pMemoryTypeIndex) + const VmaAllocationCreateInfo *pAllocationCreateInfo, + uint32_t *pMemoryTypeIndex) { VMA_ASSERT(allocator != VK_NULL_HANDLE); VMA_ASSERT(pAllocationCreateInfo != VMA_NULL); @@ -16350,9 +16601,9 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndex( VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( VmaAllocator allocator, - const VkBufferCreateInfo* pBufferCreateInfo, - const VmaAllocationCreateInfo* pAllocationCreateInfo, - uint32_t* pMemoryTypeIndex) + const VkBufferCreateInfo *pBufferCreateInfo, + const VmaAllocationCreateInfo *pAllocationCreateInfo, + uint32_t *pMemoryTypeIndex) { VMA_ASSERT(allocator != VK_NULL_HANDLE); VMA_ASSERT(pBufferCreateInfo != VMA_NULL); @@ -16360,11 +16611,11 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); const VkDevice hDev = allocator->m_hDevice; - const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions(); + const VmaVulkanFunctions *funcs = &allocator->GetVulkanFunctions(); VkResult res; #if VMA_VULKAN_VERSION >= 1003000 - if(funcs->vkGetDeviceBufferMemoryRequirements) + if (funcs->vkGetDeviceBufferMemoryRequirements) { // Can query straight from VkBufferCreateInfo :) VkDeviceBufferMemoryRequirements devBufMemReq = {VK_STRUCTURE_TYPE_DEVICE_BUFFER_MEMORY_REQUIREMENTS}; @@ -16383,7 +16634,7 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( VkBuffer hBuffer = VK_NULL_HANDLE; res = funcs->vkCreateBuffer( hDev, pBufferCreateInfo, allocator->GetAllocationCallbacks(), &hBuffer); - if(res == VK_SUCCESS) + if (res == VK_SUCCESS) { VkMemoryRequirements memReq = {}; funcs->vkGetBufferMemoryRequirements(hDev, hBuffer, &memReq); @@ -16400,9 +16651,9 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForBufferInfo( VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo( VmaAllocator allocator, - const VkImageCreateInfo* pImageCreateInfo, - const VmaAllocationCreateInfo* pAllocationCreateInfo, - uint32_t* pMemoryTypeIndex) + const VkImageCreateInfo *pImageCreateInfo, + const VmaAllocationCreateInfo *pAllocationCreateInfo, + uint32_t *pMemoryTypeIndex) { VMA_ASSERT(allocator != VK_NULL_HANDLE); VMA_ASSERT(pImageCreateInfo != VMA_NULL); @@ -16410,17 +16661,17 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo( VMA_ASSERT(pMemoryTypeIndex != VMA_NULL); const VkDevice hDev = allocator->m_hDevice; - const VmaVulkanFunctions* funcs = &allocator->GetVulkanFunctions(); + const VmaVulkanFunctions *funcs = &allocator->GetVulkanFunctions(); VkResult res; #if VMA_VULKAN_VERSION >= 1003000 - if(funcs->vkGetDeviceImageMemoryRequirements) + if (funcs->vkGetDeviceImageMemoryRequirements) { // Can query straight from VkImageCreateInfo :) VkDeviceImageMemoryRequirements devImgMemReq = {VK_STRUCTURE_TYPE_DEVICE_IMAGE_MEMORY_REQUIREMENTS}; devImgMemReq.pCreateInfo = pImageCreateInfo; VMA_ASSERT(pImageCreateInfo->tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT_COPY && (pImageCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT_COPY) == 0 && - "Cannot use this VkImageCreateInfo with vmaFindMemoryTypeIndexForImageInfo as I don't know what to pass as VkDeviceImageMemoryRequirements::planeAspect."); + "Cannot use this VkImageCreateInfo with vmaFindMemoryTypeIndexForImageInfo as I don't know what to pass as VkDeviceImageMemoryRequirements::planeAspect."); VkMemoryRequirements2 memReq = {VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2}; (*funcs->vkGetDeviceImageMemoryRequirements)(hDev, &devImgMemReq, &memReq); @@ -16435,7 +16686,7 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo( VkImage hImage = VK_NULL_HANDLE; res = funcs->vkCreateImage( hDev, pImageCreateInfo, allocator->GetAllocationCallbacks(), &hImage); - if(res == VK_SUCCESS) + if (res == VK_SUCCESS) { VkMemoryRequirements memReq = {}; funcs->vkGetImageMemoryRequirements(hDev, hImage, &memReq); @@ -16452,8 +16703,8 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaFindMemoryTypeIndexForImageInfo( VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreatePool( VmaAllocator allocator, - const VmaPoolCreateInfo* pCreateInfo, - VmaPool* pPool) + const VmaPoolCreateInfo *pCreateInfo, + VmaPool *pPool) { VMA_ASSERT(allocator && pCreateInfo && pPool); @@ -16470,7 +16721,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool( { VMA_ASSERT(allocator); - if(pool == VK_NULL_HANDLE) + if (pool == VK_NULL_HANDLE) { return; } @@ -16485,7 +16736,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaDestroyPool( VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics( VmaAllocator allocator, VmaPool pool, - VmaStatistics* pPoolStats) + VmaStatistics *pPoolStats) { VMA_ASSERT(allocator && pool && pPoolStats); @@ -16497,7 +16748,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolStatistics( VMA_CALL_PRE void VMA_CALL_POST vmaCalculatePoolStatistics( VmaAllocator allocator, VmaPool pool, - VmaDetailedStatistics* pPoolStats) + VmaDetailedStatistics *pPoolStats) { VMA_ASSERT(allocator && pool && pPoolStats); @@ -16520,7 +16771,7 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckPoolCorruption(VmaAllocator allocato VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName( VmaAllocator allocator, VmaPool pool, - const char** ppName) + const char **ppName) { VMA_ASSERT(allocator && pool && ppName); @@ -16534,7 +16785,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaGetPoolName( VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName( VmaAllocator allocator, VmaPool pool, - const char* pName) + const char *pName) { VMA_ASSERT(allocator && pool); @@ -16547,10 +16798,10 @@ VMA_CALL_PRE void VMA_CALL_POST vmaSetPoolName( VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory( VmaAllocator allocator, - const VkMemoryRequirements* pVkMemoryRequirements, - const VmaAllocationCreateInfo* pCreateInfo, - VmaAllocation* pAllocation, - VmaAllocationInfo* pAllocationInfo) + const VkMemoryRequirements *pVkMemoryRequirements, + const VmaAllocationCreateInfo *pCreateInfo, + VmaAllocation *pAllocation, + VmaAllocationInfo *pAllocationInfo) { VMA_ASSERT(allocator && pVkMemoryRequirements && pCreateInfo && pAllocation); @@ -16560,17 +16811,17 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory( VkResult result = allocator->AllocateMemory( *pVkMemoryRequirements, - false, // requiresDedicatedAllocation - false, // prefersDedicatedAllocation + false, // requiresDedicatedAllocation + false, // prefersDedicatedAllocation VK_NULL_HANDLE, // dedicatedBuffer VK_NULL_HANDLE, // dedicatedImage - UINT32_MAX, // dedicatedBufferImageUsage + UINT32_MAX, // dedicatedBufferImageUsage *pCreateInfo, VMA_SUBALLOCATION_TYPE_UNKNOWN, 1, // allocationCount pAllocation); - if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS) + if (pAllocationInfo != VMA_NULL && result == VK_SUCCESS) { allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); } @@ -16580,13 +16831,13 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemory( VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages( VmaAllocator allocator, - const VkMemoryRequirements* pVkMemoryRequirements, - const VmaAllocationCreateInfo* pCreateInfo, + const VkMemoryRequirements *pVkMemoryRequirements, + const VmaAllocationCreateInfo *pCreateInfo, size_t allocationCount, - VmaAllocation* pAllocations, - VmaAllocationInfo* pAllocationInfo) + VmaAllocation *pAllocations, + VmaAllocationInfo *pAllocationInfo) { - if(allocationCount == 0) + if (allocationCount == 0) { return VK_SUCCESS; } @@ -16599,19 +16850,19 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages( VkResult result = allocator->AllocateMemory( *pVkMemoryRequirements, - false, // requiresDedicatedAllocation - false, // prefersDedicatedAllocation + false, // requiresDedicatedAllocation + false, // prefersDedicatedAllocation VK_NULL_HANDLE, // dedicatedBuffer VK_NULL_HANDLE, // dedicatedImage - UINT32_MAX, // dedicatedBufferImageUsage + UINT32_MAX, // dedicatedBufferImageUsage *pCreateInfo, VMA_SUBALLOCATION_TYPE_UNKNOWN, allocationCount, pAllocations); - if(pAllocationInfo != VMA_NULL && result == VK_SUCCESS) + if (pAllocationInfo != VMA_NULL && result == VK_SUCCESS) { - for(size_t i = 0; i < allocationCount; ++i) + for (size_t i = 0; i < allocationCount; ++i) { allocator->GetAllocationInfo(pAllocations[i], pAllocationInfo + i); } @@ -16623,9 +16874,9 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryPages( VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer( VmaAllocator allocator, VkBuffer buffer, - const VmaAllocationCreateInfo* pCreateInfo, - VmaAllocation* pAllocation, - VmaAllocationInfo* pAllocationInfo) + const VmaAllocationCreateInfo *pCreateInfo, + VmaAllocation *pAllocation, + VmaAllocationInfo *pAllocationInfo) { VMA_ASSERT(allocator && buffer != VK_NULL_HANDLE && pCreateInfo && pAllocation); @@ -16637,22 +16888,22 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer( bool requiresDedicatedAllocation = false; bool prefersDedicatedAllocation = false; allocator->GetBufferMemoryRequirements(buffer, vkMemReq, - requiresDedicatedAllocation, - prefersDedicatedAllocation); + requiresDedicatedAllocation, + prefersDedicatedAllocation); VkResult result = allocator->AllocateMemory( vkMemReq, requiresDedicatedAllocation, prefersDedicatedAllocation, - buffer, // dedicatedBuffer + buffer, // dedicatedBuffer VK_NULL_HANDLE, // dedicatedImage - UINT32_MAX, // dedicatedBufferImageUsage + UINT32_MAX, // dedicatedBufferImageUsage *pCreateInfo, VMA_SUBALLOCATION_TYPE_BUFFER, 1, // allocationCount pAllocation); - if(pAllocationInfo && result == VK_SUCCESS) + if (pAllocationInfo && result == VK_SUCCESS) { allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); } @@ -16663,9 +16914,9 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForBuffer( VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage( VmaAllocator allocator, VkImage image, - const VmaAllocationCreateInfo* pCreateInfo, - VmaAllocation* pAllocation, - VmaAllocationInfo* pAllocationInfo) + const VmaAllocationCreateInfo *pCreateInfo, + VmaAllocation *pAllocation, + VmaAllocationInfo *pAllocationInfo) { VMA_ASSERT(allocator && image != VK_NULL_HANDLE && pCreateInfo && pAllocation); @@ -16675,23 +16926,23 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaAllocateMemoryForImage( VkMemoryRequirements vkMemReq = {}; bool requiresDedicatedAllocation = false; - bool prefersDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; allocator->GetImageMemoryRequirements(image, vkMemReq, - requiresDedicatedAllocation, prefersDedicatedAllocation); + requiresDedicatedAllocation, prefersDedicatedAllocation); VkResult result = allocator->AllocateMemory( vkMemReq, requiresDedicatedAllocation, prefersDedicatedAllocation, VK_NULL_HANDLE, // dedicatedBuffer - image, // dedicatedImage - UINT32_MAX, // dedicatedBufferImageUsage + image, // dedicatedImage + UINT32_MAX, // dedicatedBufferImageUsage *pCreateInfo, VMA_SUBALLOCATION_TYPE_IMAGE_UNKNOWN, 1, // allocationCount pAllocation); - if(pAllocationInfo && result == VK_SUCCESS) + if (pAllocationInfo && result == VK_SUCCESS) { allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); } @@ -16705,7 +16956,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory( { VMA_ASSERT(allocator); - if(allocation == VK_NULL_HANDLE) + if (allocation == VK_NULL_HANDLE) { return; } @@ -16722,9 +16973,9 @@ VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemory( VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages( VmaAllocator allocator, size_t allocationCount, - const VmaAllocation* pAllocations) + const VmaAllocation *pAllocations) { - if(allocationCount == 0) + if (allocationCount == 0) { return; } @@ -16741,7 +16992,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaFreeMemoryPages( VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo( VmaAllocator allocator, VmaAllocation allocation, - VmaAllocationInfo* pAllocationInfo) + VmaAllocationInfo *pAllocationInfo) { VMA_ASSERT(allocator && allocation && pAllocationInfo); @@ -16753,7 +17004,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationInfo( VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData( VmaAllocator allocator, VmaAllocation allocation, - void* pUserData) + void *pUserData) { VMA_ASSERT(allocator && allocation); @@ -16765,7 +17016,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationUserData( VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName( VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, - const char* VMA_NULLABLE pName) + const char *VMA_NULLABLE pName) { allocation->SetName(allocator, pName); } @@ -16773,7 +17024,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaSetAllocationName( VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties( VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, - VkMemoryPropertyFlags* VMA_NOT_NULL pFlags) + VkMemoryPropertyFlags *VMA_NOT_NULL pFlags) { VMA_ASSERT(allocator && allocation && pFlags); const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex(); @@ -16783,7 +17034,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaGetAllocationMemoryProperties( VMA_CALL_PRE VkResult VMA_CALL_POST vmaMapMemory( VmaAllocator allocator, VmaAllocation allocation, - void** ppData) + void **ppData) { VMA_ASSERT(allocator && allocation && ppData); @@ -16840,13 +17091,13 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocation( VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations( VmaAllocator allocator, uint32_t allocationCount, - const VmaAllocation* allocations, - const VkDeviceSize* offsets, - const VkDeviceSize* sizes) + const VmaAllocation *allocations, + const VkDeviceSize *offsets, + const VkDeviceSize *sizes) { VMA_ASSERT(allocator); - if(allocationCount == 0) + if (allocationCount == 0) { return VK_SUCCESS; } @@ -16865,13 +17116,13 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaFlushAllocations( VMA_CALL_PRE VkResult VMA_CALL_POST vmaInvalidateAllocations( VmaAllocator allocator, uint32_t allocationCount, - const VmaAllocation* allocations, - const VkDeviceSize* offsets, - const VkDeviceSize* sizes) + const VmaAllocation *allocations, + const VkDeviceSize *offsets, + const VkDeviceSize *sizes) { VMA_ASSERT(allocator); - if(allocationCount == 0) + if (allocationCount == 0) { return VK_SUCCESS; } @@ -16902,8 +17153,8 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCheckCorruption( VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation( VmaAllocator allocator, - const VmaDefragmentationInfo* pInfo, - VmaDefragmentationContext* pContext) + const VmaDefragmentationInfo *pInfo, + VmaDefragmentationContext *pContext) { VMA_ASSERT(allocator && pInfo && pContext); @@ -16925,7 +17176,7 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentation( VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation( VmaAllocator allocator, VmaDefragmentationContext context, - VmaDefragmentationStats* pStats) + VmaDefragmentationStats *pStats) { VMA_ASSERT(allocator && context); @@ -16941,7 +17192,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaEndDefragmentation( VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass( VmaAllocator VMA_NOT_NULL allocator, VmaDefragmentationContext VMA_NOT_NULL context, - VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo) + VmaDefragmentationPassMoveInfo *VMA_NOT_NULL pPassInfo) { VMA_ASSERT(context && pPassInfo); @@ -16955,7 +17206,7 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaBeginDefragmentationPass( VMA_CALL_PRE VkResult VMA_CALL_POST vmaEndDefragmentationPass( VmaAllocator VMA_NOT_NULL allocator, VmaDefragmentationContext VMA_NOT_NULL context, - VmaDefragmentationPassMoveInfo* VMA_NOT_NULL pPassInfo) + VmaDefragmentationPassMoveInfo *VMA_NOT_NULL pPassInfo) { VMA_ASSERT(context && pPassInfo); @@ -16985,7 +17236,7 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindBufferMemory2( VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkBuffer buffer, - const void* pNext) + const void *pNext) { VMA_ASSERT(allocator && allocation && buffer); @@ -17015,7 +17266,7 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2( VmaAllocation allocation, VkDeviceSize allocationLocalOffset, VkImage image, - const void* pNext) + const void *pNext) { VMA_ASSERT(allocator && allocation && image); @@ -17023,24 +17274,24 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaBindImageMemory2( VMA_DEBUG_GLOBAL_MUTEX_LOCK - return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext); + return allocator->BindImageMemory(allocation, allocationLocalOffset, image, pNext); } VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer( VmaAllocator allocator, - const VkBufferCreateInfo* pBufferCreateInfo, - const VmaAllocationCreateInfo* pAllocationCreateInfo, - VkBuffer* pBuffer, - VmaAllocation* pAllocation, - VmaAllocationInfo* pAllocationInfo) + const VkBufferCreateInfo *pBufferCreateInfo, + const VmaAllocationCreateInfo *pAllocationCreateInfo, + VkBuffer *pBuffer, + VmaAllocation *pAllocation, + VmaAllocationInfo *pAllocationInfo) { VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && pBuffer && pAllocation); - if(pBufferCreateInfo->size == 0) + if (pBufferCreateInfo->size == 0) { return VK_ERROR_INITIALIZATION_FAILED; } - if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 && + if ((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 && !allocator->m_UseKhrBufferDeviceAddress) { VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used."); @@ -17060,42 +17311,42 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer( pBufferCreateInfo, allocator->GetAllocationCallbacks(), pBuffer); - if(res >= 0) + if (res >= 0) { // 2. vkGetBufferMemoryRequirements. VkMemoryRequirements vkMemReq = {}; bool requiresDedicatedAllocation = false; - bool prefersDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq, - requiresDedicatedAllocation, prefersDedicatedAllocation); + requiresDedicatedAllocation, prefersDedicatedAllocation); // 3. Allocate memory using allocator. res = allocator->AllocateMemory( vkMemReq, requiresDedicatedAllocation, prefersDedicatedAllocation, - *pBuffer, // dedicatedBuffer - VK_NULL_HANDLE, // dedicatedImage + *pBuffer, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage pBufferCreateInfo->usage, // dedicatedBufferImageUsage *pAllocationCreateInfo, VMA_SUBALLOCATION_TYPE_BUFFER, 1, // allocationCount pAllocation); - if(res >= 0) + if (res >= 0) { // 3. Bind buffer with memory. - if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) + if ((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) { res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL); } - if(res >= 0) + if (res >= 0) { - // All steps succeeded. - #if VMA_STATS_STRING_ENABLED - (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage); - #endif - if(pAllocationInfo != VMA_NULL) +// All steps succeeded. +#if VMA_STATS_STRING_ENABLED + (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage); +#endif + if (pAllocationInfo != VMA_NULL) { allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); } @@ -17119,20 +17370,20 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBuffer( VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment( VmaAllocator allocator, - const VkBufferCreateInfo* pBufferCreateInfo, - const VmaAllocationCreateInfo* pAllocationCreateInfo, + const VkBufferCreateInfo *pBufferCreateInfo, + const VmaAllocationCreateInfo *pAllocationCreateInfo, VkDeviceSize minAlignment, - VkBuffer* pBuffer, - VmaAllocation* pAllocation, - VmaAllocationInfo* pAllocationInfo) + VkBuffer *pBuffer, + VmaAllocation *pAllocation, + VmaAllocationInfo *pAllocationInfo) { VMA_ASSERT(allocator && pBufferCreateInfo && pAllocationCreateInfo && VmaIsPow2(minAlignment) && pBuffer && pAllocation); - if(pBufferCreateInfo->size == 0) + if (pBufferCreateInfo->size == 0) { return VK_ERROR_INITIALIZATION_FAILED; } - if((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 && + if ((pBufferCreateInfo->usage & VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT_COPY) != 0 && !allocator->m_UseKhrBufferDeviceAddress) { VMA_ASSERT(0 && "Creating a buffer with VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT is not valid if VMA_ALLOCATOR_CREATE_BUFFER_DEVICE_ADDRESS_BIT was not used."); @@ -17152,14 +17403,14 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment( pBufferCreateInfo, allocator->GetAllocationCallbacks(), pBuffer); - if(res >= 0) + if (res >= 0) { // 2. vkGetBufferMemoryRequirements. VkMemoryRequirements vkMemReq = {}; bool requiresDedicatedAllocation = false; - bool prefersDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; allocator->GetBufferMemoryRequirements(*pBuffer, vkMemReq, - requiresDedicatedAllocation, prefersDedicatedAllocation); + requiresDedicatedAllocation, prefersDedicatedAllocation); // 2a. Include minAlignment vkMemReq.alignment = VMA_MAX(vkMemReq.alignment, minAlignment); @@ -17169,28 +17420,28 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment( vkMemReq, requiresDedicatedAllocation, prefersDedicatedAllocation, - *pBuffer, // dedicatedBuffer - VK_NULL_HANDLE, // dedicatedImage + *pBuffer, // dedicatedBuffer + VK_NULL_HANDLE, // dedicatedImage pBufferCreateInfo->usage, // dedicatedBufferImageUsage *pAllocationCreateInfo, VMA_SUBALLOCATION_TYPE_BUFFER, 1, // allocationCount pAllocation); - if(res >= 0) + if (res >= 0) { // 3. Bind buffer with memory. - if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) + if ((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) { res = allocator->BindBufferMemory(*pAllocation, 0, *pBuffer, VMA_NULL); } - if(res >= 0) + if (res >= 0) { - // All steps succeeded. - #if VMA_STATS_STRING_ENABLED - (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage); - #endif - if(pAllocationInfo != VMA_NULL) +// All steps succeeded. +#if VMA_STATS_STRING_ENABLED + (*pAllocation)->InitBufferImageUsage(pBufferCreateInfo->usage); +#endif + if (pAllocationInfo != VMA_NULL) { allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); } @@ -17215,8 +17466,8 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateBufferWithAlignment( VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer( VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, - const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, - VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer) + const VkBufferCreateInfo *VMA_NOT_NULL pBufferCreateInfo, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE *VMA_NOT_NULL pBuffer) { return vmaCreateAliasingBuffer2(allocator, allocation, 0, pBufferCreateInfo, pBuffer); } @@ -17225,8 +17476,8 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingBuffer2( VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, VkDeviceSize allocationLocalOffset, - const VkBufferCreateInfo* VMA_NOT_NULL pBufferCreateInfo, - VkBuffer VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pBuffer) + const VkBufferCreateInfo *VMA_NOT_NULL pBufferCreateInfo, + VkBuffer VMA_NULLABLE_NON_DISPATCHABLE *VMA_NOT_NULL pBuffer) { VMA_ASSERT(allocator && pBufferCreateInfo && pBuffer && allocation); VMA_ASSERT(allocationLocalOffset + pBufferCreateInfo->size <= allocation->GetSize()); @@ -17274,7 +17525,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer( { VMA_ASSERT(allocator); - if(buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE) + if (buffer == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE) { return; } @@ -17283,12 +17534,12 @@ VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer( VMA_DEBUG_GLOBAL_MUTEX_LOCK - if(buffer != VK_NULL_HANDLE) + if (buffer != VK_NULL_HANDLE) { (*allocator->GetVulkanFunctions().vkDestroyBuffer)(allocator->m_hDevice, buffer, allocator->GetAllocationCallbacks()); } - if(allocation != VK_NULL_HANDLE) + if (allocation != VK_NULL_HANDLE) { allocator->FreeMemory( 1, // allocationCount @@ -17298,15 +17549,15 @@ VMA_CALL_PRE void VMA_CALL_POST vmaDestroyBuffer( VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage( VmaAllocator allocator, - const VkImageCreateInfo* pImageCreateInfo, - const VmaAllocationCreateInfo* pAllocationCreateInfo, - VkImage* pImage, - VmaAllocation* pAllocation, - VmaAllocationInfo* pAllocationInfo) + const VkImageCreateInfo *pImageCreateInfo, + const VmaAllocationCreateInfo *pAllocationCreateInfo, + VkImage *pImage, + VmaAllocation *pAllocation, + VmaAllocationInfo *pAllocationInfo) { VMA_ASSERT(allocator && pImageCreateInfo && pAllocationCreateInfo && pImage && pAllocation); - if(pImageCreateInfo->extent.width == 0 || + if (pImageCreateInfo->extent.width == 0 || pImageCreateInfo->extent.height == 0 || pImageCreateInfo->extent.depth == 0 || pImageCreateInfo->mipLevels == 0 || @@ -17328,45 +17579,43 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage( pImageCreateInfo, allocator->GetAllocationCallbacks(), pImage); - if(res >= 0) + if (res >= 0) { - VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ? - VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL : - VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR; + VmaSuballocationType suballocType = pImageCreateInfo->tiling == VK_IMAGE_TILING_OPTIMAL ? VMA_SUBALLOCATION_TYPE_IMAGE_OPTIMAL : VMA_SUBALLOCATION_TYPE_IMAGE_LINEAR; // 2. Allocate memory using allocator. VkMemoryRequirements vkMemReq = {}; bool requiresDedicatedAllocation = false; - bool prefersDedicatedAllocation = false; + bool prefersDedicatedAllocation = false; allocator->GetImageMemoryRequirements(*pImage, vkMemReq, - requiresDedicatedAllocation, prefersDedicatedAllocation); + requiresDedicatedAllocation, prefersDedicatedAllocation); res = allocator->AllocateMemory( vkMemReq, requiresDedicatedAllocation, prefersDedicatedAllocation, - VK_NULL_HANDLE, // dedicatedBuffer - *pImage, // dedicatedImage + VK_NULL_HANDLE, // dedicatedBuffer + *pImage, // dedicatedImage pImageCreateInfo->usage, // dedicatedBufferImageUsage *pAllocationCreateInfo, suballocType, 1, // allocationCount pAllocation); - if(res >= 0) + if (res >= 0) { // 3. Bind image with memory. - if((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) + if ((pAllocationCreateInfo->flags & VMA_ALLOCATION_CREATE_DONT_BIND_BIT) == 0) { res = allocator->BindImageMemory(*pAllocation, 0, *pImage, VMA_NULL); } - if(res >= 0) + if (res >= 0) { - // All steps succeeded. - #if VMA_STATS_STRING_ENABLED - (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage); - #endif - if(pAllocationInfo != VMA_NULL) +// All steps succeeded. +#if VMA_STATS_STRING_ENABLED + (*pAllocation)->InitBufferImageUsage(pImageCreateInfo->usage); +#endif + if (pAllocationInfo != VMA_NULL) { allocator->GetAllocationInfo(*pAllocation, pAllocationInfo); } @@ -17391,8 +17640,8 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateImage( VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage( VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, - const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, - VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage) + const VkImageCreateInfo *VMA_NOT_NULL pImageCreateInfo, + VkImage VMA_NULLABLE_NON_DISPATCHABLE *VMA_NOT_NULL pImage) { return vmaCreateAliasingImage2(allocator, allocation, 0, pImageCreateInfo, pImage); } @@ -17401,8 +17650,8 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateAliasingImage2( VmaAllocator VMA_NOT_NULL allocator, VmaAllocation VMA_NOT_NULL allocation, VkDeviceSize allocationLocalOffset, - const VkImageCreateInfo* VMA_NOT_NULL pImageCreateInfo, - VkImage VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pImage) + const VkImageCreateInfo *VMA_NOT_NULL pImageCreateInfo, + VkImage VMA_NULLABLE_NON_DISPATCHABLE *VMA_NOT_NULL pImage) { VMA_ASSERT(allocator && pImageCreateInfo && pImage && allocation); @@ -17447,7 +17696,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage( { VMA_ASSERT(allocator); - if(image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE) + if (image == VK_NULL_HANDLE && allocation == VK_NULL_HANDLE) { return; } @@ -17456,11 +17705,11 @@ VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage( VMA_DEBUG_GLOBAL_MUTEX_LOCK - if(image != VK_NULL_HANDLE) + if (image != VK_NULL_HANDLE) { (*allocator->GetVulkanFunctions().vkDestroyImage)(allocator->m_hDevice, image, allocator->GetAllocationCallbacks()); } - if(allocation != VK_NULL_HANDLE) + if (allocation != VK_NULL_HANDLE) { allocator->FreeMemory( 1, // allocationCount @@ -17469,8 +17718,8 @@ VMA_CALL_PRE void VMA_CALL_POST vmaDestroyImage( } VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock( - const VmaVirtualBlockCreateInfo* VMA_NOT_NULL pCreateInfo, - VmaVirtualBlock VMA_NULLABLE * VMA_NOT_NULL pVirtualBlock) + const VmaVirtualBlockCreateInfo *VMA_NOT_NULL pCreateInfo, + VmaVirtualBlock VMA_NULLABLE *VMA_NOT_NULL pVirtualBlock) { VMA_ASSERT(pCreateInfo && pVirtualBlock); VMA_ASSERT(pCreateInfo->size > 0); @@ -17478,7 +17727,7 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock( VMA_DEBUG_GLOBAL_MUTEX_LOCK; *pVirtualBlock = vma_new(pCreateInfo->pAllocationCallbacks, VmaVirtualBlock_T)(*pCreateInfo); VkResult res = (*pVirtualBlock)->Init(); - if(res < 0) + if (res < 0) { vma_delete(pCreateInfo->pAllocationCallbacks, *pVirtualBlock); *pVirtualBlock = VK_NULL_HANDLE; @@ -17488,7 +17737,7 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaCreateVirtualBlock( VMA_CALL_PRE void VMA_CALL_POST vmaDestroyVirtualBlock(VmaVirtualBlock VMA_NULLABLE virtualBlock) { - if(virtualBlock != VK_NULL_HANDLE) + if (virtualBlock != VK_NULL_HANDLE) { VMA_DEBUG_LOG("vmaDestroyVirtualBlock"); VMA_DEBUG_GLOBAL_MUTEX_LOCK; @@ -17506,7 +17755,7 @@ VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty(VmaVirtualBlock VMA_N } VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo) + VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo *VMA_NOT_NULL pVirtualAllocInfo) { VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pVirtualAllocInfo != VMA_NULL); VMA_DEBUG_LOG("vmaGetVirtualAllocationInfo"); @@ -17515,8 +17764,8 @@ VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(VmaVirtualBlock VMA_ } VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(VmaVirtualBlock VMA_NOT_NULL virtualBlock, - const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation, - VkDeviceSize* VMA_NULLABLE pOffset) + const VmaVirtualAllocationCreateInfo *VMA_NOT_NULL pCreateInfo, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE *VMA_NOT_NULL pAllocation, + VkDeviceSize *VMA_NULLABLE pOffset) { VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pCreateInfo != VMA_NULL && pAllocation != VMA_NULL); VMA_DEBUG_LOG("vmaVirtualAllocate"); @@ -17526,7 +17775,7 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(VmaVirtualBlock VMA_NOT_N VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation) { - if(allocation != VK_NULL_HANDLE) + if (allocation != VK_NULL_HANDLE) { VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); VMA_DEBUG_LOG("vmaVirtualFree"); @@ -17544,7 +17793,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock(VmaVirtualBlock VMA_NOT_NUL } VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData(VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, void* VMA_NULLABLE pUserData) + VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, void *VMA_NULLABLE pUserData) { VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); VMA_DEBUG_LOG("vmaSetVirtualAllocationUserData"); @@ -17553,7 +17802,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData(VmaVirtualBlock } VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VmaStatistics* VMA_NOT_NULL pStats) + VmaStatistics *VMA_NOT_NULL pStats) { VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL); VMA_DEBUG_LOG("vmaGetVirtualBlockStatistics"); @@ -17562,7 +17811,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualBlockStatistics(VmaVirtualBlock VMA } VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics(VmaVirtualBlock VMA_NOT_NULL virtualBlock, - VmaDetailedStatistics* VMA_NOT_NULL pStats) + VmaDetailedStatistics *VMA_NOT_NULL pStats) { VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pStats != VMA_NULL); VMA_DEBUG_LOG("vmaCalculateVirtualBlockStatistics"); @@ -17573,20 +17822,20 @@ VMA_CALL_PRE void VMA_CALL_POST vmaCalculateVirtualBlockStatistics(VmaVirtualBlo #if VMA_STATS_STRING_ENABLED VMA_CALL_PRE void VMA_CALL_POST vmaBuildVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock, - char* VMA_NULLABLE * VMA_NOT_NULL ppStatsString, VkBool32 detailedMap) + char *VMA_NULLABLE *VMA_NOT_NULL ppStatsString, VkBool32 detailedMap) { VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && ppStatsString != VMA_NULL); VMA_DEBUG_GLOBAL_MUTEX_LOCK; - const VkAllocationCallbacks* allocationCallbacks = virtualBlock->GetAllocationCallbacks(); + const VkAllocationCallbacks *allocationCallbacks = virtualBlock->GetAllocationCallbacks(); VmaStringBuilder sb(allocationCallbacks); virtualBlock->BuildStatsString(detailedMap != VK_FALSE, sb); *ppStatsString = VmaCreateStringCopy(allocationCallbacks, sb.GetData(), sb.GetLength()); } VMA_CALL_PRE void VMA_CALL_POST vmaFreeVirtualBlockStatsString(VmaVirtualBlock VMA_NOT_NULL virtualBlock, - char* VMA_NULLABLE pStatsString) + char *VMA_NULLABLE pStatsString) { - if(pStatsString != VMA_NULL) + if (pStatsString != VMA_NULL) { VMA_ASSERT(virtualBlock != VK_NULL_HANDLE); VMA_DEBUG_GLOBAL_MUTEX_LOCK; diff --git a/dependencies/android_native_app_glue/CMakeLists.txt b/dependencies/android_native_app_glue/CMakeLists.txt new file mode 100644 index 00000000..ad37467b --- /dev/null +++ b/dependencies/android_native_app_glue/CMakeLists.txt @@ -0,0 +1,16 @@ +cmake_minimum_required(VERSION 3.20) + +project(android_native_app_glue) + +file(TO_CMAKE_PATH "$ENV{NDKROOT}" NDKROOT_PATH) +set(SOURCES ${NDKROOT_PATH}/sources/android/native_app_glue/android_native_app_glue.c) +set(HEADERS ${NDKROOT_PATH}/sources/android/native_app_glue/android_native_app_glue.h) + +set(${PROJECT_NAME}_INCLUDE_DIRS ${NDKROOT_PATH}/sources/android/native_app_glue CACHE INTERNAL "${PROJECT_NAME}: includes" FORCE) + +include_directories(${${PROJECT_NAME}_INCLUDE_DIRS}) + +source_group("includes" FILES ${HEADERS}) +source_group("src" FILES ${SOURCES}) + +add_library(${PROJECT_NAME} STATIC ${HEADERS} ${SOURCES}) diff --git a/dependencies/glew/CMakeLists.txt b/dependencies/glew/CMakeLists.txt index d7b096d0..2578526d 100644 --- a/dependencies/glew/CMakeLists.txt +++ b/dependencies/glew/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.8) +cmake_minimum_required(VERSION 3.20) project(glew) diff --git a/dependencies/glm/CMakeLists.txt b/dependencies/glm/CMakeLists.txt index 08e16d94..5ecc4792 100644 --- a/dependencies/glm/CMakeLists.txt +++ b/dependencies/glm/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.8) +cmake_minimum_required(VERSION 3.20) project(glm) diff --git a/dependencies/imgui/CMakeLists.txt b/dependencies/imgui/CMakeLists.txt index 5ea35561..96c81061 100644 --- a/dependencies/imgui/CMakeLists.txt +++ b/dependencies/imgui/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.8) +cmake_minimum_required(VERSION 3.20) project(imgui) diff --git a/dependencies/messagebox-x11/CMakeLists.txt b/dependencies/messagebox-x11/CMakeLists.txt index e495ab2a..9e8ed845 100644 --- a/dependencies/messagebox-x11/CMakeLists.txt +++ b/dependencies/messagebox-x11/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.8) +cmake_minimum_required(VERSION 3.20) project(messagebox_x11) diff --git a/dependencies/rapidjson/CMakeLists.txt b/dependencies/rapidjson/CMakeLists.txt index 776a89c3..04a36a6f 100644 --- a/dependencies/rapidjson/CMakeLists.txt +++ b/dependencies/rapidjson/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.8) +cmake_minimum_required(VERSION 3.20) project(rapidjson) diff --git a/dependencies/stb/CMakeLists.txt b/dependencies/stb/CMakeLists.txt index 3e0def37..7314cbd8 100644 --- a/dependencies/stb/CMakeLists.txt +++ b/dependencies/stb/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.8) +cmake_minimum_required(VERSION 3.20) project(stb) diff --git a/dependencies/tinyobj_loader_c/CMakeLists.txt b/dependencies/tinyobj_loader_c/CMakeLists.txt index fa4277eb..65f4bc53 100644 --- a/dependencies/tinyobj_loader_c/CMakeLists.txt +++ b/dependencies/tinyobj_loader_c/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.8) +cmake_minimum_required(VERSION 3.20) project(tinyobj_loader_c) diff --git a/disable_cross_compiling.bat b/disable_cross_compiling.bat new file mode 100644 index 00000000..923a3cc7 --- /dev/null +++ b/disable_cross_compiling.bat @@ -0,0 +1,3 @@ +@echo off + +del .vscode\settings.json \ No newline at end of file diff --git a/disable_cross_compiling.sh b/disable_cross_compiling.sh new file mode 100644 index 00000000..d5a4f0f1 --- /dev/null +++ b/disable_cross_compiling.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +rm .vscode/settings.json \ No newline at end of file diff --git a/enable_android_cross_compiling.bat b/enable_android_cross_compiling.bat new file mode 100644 index 00000000..98e522a8 --- /dev/null +++ b/enable_android_cross_compiling.bat @@ -0,0 +1,9 @@ +@echo off + +if "%NDKROOT%" == "" ( + echo Cannot find NDKROOT environment variable. + exit /b -1 +) + +mkdir .vscode +copy /Y resources\Android\.vscode\settings.json .vscode\settings.json \ No newline at end of file diff --git a/enable_android_cross_compiling.sh b/enable_android_cross_compiling.sh new file mode 100644 index 00000000..c5f11d5e --- /dev/null +++ b/enable_android_cross_compiling.sh @@ -0,0 +1,9 @@ +#!/bin/sh + +if [ -z "$NDKROOT" ]; then + echo "Cannot find NDKROOT environment variable." + exit -1 +fi + +mkdir -p .vscode +cp -f resources/Android/.vscode/settings.json .vscode/settings.json \ No newline at end of file diff --git a/resources/Android/gradle/project/app/build.gradle.template b/resources/Android/gradle/project/app/build.gradle.template new file mode 100644 index 00000000..f685c63f --- /dev/null +++ b/resources/Android/gradle/project/app/build.gradle.template @@ -0,0 +1,39 @@ +apply plugin: 'com.android.application' + +android { + namespace "com.fastcg.${project_name}" + compileSdkVersion 29 + + defaultConfig { + applicationId "com.fastcg.${project_name}" + minSdkVersion 29 + targetSdkVersion 29 + versionCode 1 + versionName "1.0" + + ndk { + abiFilters 'arm64-v8a' + } + } + + buildTypes { + debug { + debuggable true + } + release { + minifyEnabled false + } + } + + sourceSets { + main { + jniLibs.srcDirs = ['src/main/jniLibs'] + } + } +} + +dependencies { + implementation fileTree(dir: 'libs', include: ['*.jar']) + implementation 'androidx.appcompat:appcompat:1.2.0' + implementation 'com.google.android.material:material:1.3.0' +} \ No newline at end of file diff --git a/resources/Android/gradle/project/app/src/main/AndroidManifest.xml.template b/resources/Android/gradle/project/app/src/main/AndroidManifest.xml.template new file mode 100644 index 00000000..c0c31cbd --- /dev/null +++ b/resources/Android/gradle/project/app/src/main/AndroidManifest.xml.template @@ -0,0 +1,18 @@ + + + + + + + + + + + \ No newline at end of file diff --git a/resources/Android/gradle/project/app/src/main/java/com/fastcg/MainActivity.java b/resources/Android/gradle/project/app/src/main/java/com/fastcg/MainActivity.java new file mode 100644 index 00000000..8a2cee40 --- /dev/null +++ b/resources/Android/gradle/project/app/src/main/java/com/fastcg/MainActivity.java @@ -0,0 +1,154 @@ +package com.fastcg; + +import android.os.Bundle; +import android.app.NativeActivity; +import android.os.Bundle; +import android.util.Log; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import android.content.res.AssetManager; + +public class MainActivity extends NativeActivity +{ + @Override + protected void onCreate(Bundle savedInstanceState) + { + deleteAssetsInInternalStorage(); + copyAssetsToInternalStorage(""); + + super.onCreate(savedInstanceState); + } + + private boolean deleteAssetsInInternalStorage() + { + return deleteDirectory(new File(getFilesDir().getAbsolutePath() + "/assets")); + } + + private boolean deleteDirectory(File directory) + { + if (directory.isDirectory()) + { + File[] files = directory.listFiles(); + if (files != null) + { + for (File file : files) + { + if (file.isDirectory()) + { + deleteDirectory(file); + } + else + { + file.delete(); + } + } + } + } + return directory.delete(); + } + + private void copyAssetsToInternalStorage(String path) + { + AssetManager assetManager = getAssets(); + String[] assets = null; + try + { + assets = assetManager.list(path); + if (assets == null || assets.length == 0) + { + copyAssetToInternalStorage(path); + } + else + { + String fullPath = getFilesDir().getAbsolutePath() + "/assets/" + path; + File dir = new File(fullPath); + if (!dir.exists()) + { + dir.mkdir(); + } + for (String asset : assets) + { + copyAssetsToInternalStorage(path.equals("") ? asset : path + "/" + asset); + } + } + } + catch (IOException e) + { + Log.e("FASTCG", "Failed to copy asset file: " + path, e); + } + } + + private void copyAssetToInternalStorage(String filename) + { + AssetManager assetManager = getAssets(); + InputStream in = null; + OutputStream out = null; + try + { + in = assetManager.open(filename); + File outFile = new File(getFilesDir() + "/assets/" + filename); + File dir = outFile.getParentFile(); + if (!dir.exists()) + { + dir.mkdirs(); + } + out = new FileOutputStream(outFile); + copyFile(in, out); + } + catch(IOException e) + { + Log.e("FASTCG", "Failed to copy asset file: " + filename, e); + } + finally + { + if (in != null) + { + try + { + in.close(); + } + catch (IOException e) + { + // Handle error + } + } + if (out != null) + { + try + { + out.close(); + } + catch (IOException e) + { + // Handle error + } + } + } + } + + private void copyFile(InputStream in, OutputStream out) throws IOException + { + byte[] buffer = new byte[1024]; + int read; + while((read = in.read(buffer)) != -1) + { + out.write(buffer, 0, read); + } + } + + // FIXME: + /*public void safeFinish() + { + runOnUiThread(new Runnable() + { + @Override + public void run() + { + finish(); + } + }); + }*/ +} \ No newline at end of file diff --git a/resources/Android/gradle/project/app/src/main/res/values/strings.xml.template b/resources/Android/gradle/project/app/src/main/res/values/strings.xml.template new file mode 100644 index 00000000..969e9090 --- /dev/null +++ b/resources/Android/gradle/project/app/src/main/res/values/strings.xml.template @@ -0,0 +1,3 @@ + + ${project_name} + \ No newline at end of file diff --git a/resources/Android/gradle/project/build.gradle b/resources/Android/gradle/project/build.gradle new file mode 100644 index 00000000..a7b6c127 --- /dev/null +++ b/resources/Android/gradle/project/build.gradle @@ -0,0 +1,22 @@ +buildscript { + ext.kotlin_version = '1.5.20' + repositories { + google() + mavenCentral() + } + dependencies { + classpath 'com.android.tools.build:gradle:8.0.0' + classpath "org.jetbrains.kotlin:kotlin-gradle-plugin:$kotlin_version" + } +} + +allprojects { + repositories { + google() + mavenCentral() + } +} + +task clean(type: Delete) { + delete rootProject.buildDir +} \ No newline at end of file diff --git a/resources/Android/gradle/project/gradle.properties b/resources/Android/gradle/project/gradle.properties new file mode 100644 index 00000000..6cddd62e --- /dev/null +++ b/resources/Android/gradle/project/gradle.properties @@ -0,0 +1,2 @@ +android.useAndroidX=true +# org.gradle.jvmargs=-Xmx1536m \ No newline at end of file diff --git a/resources/Android/gradle/project/gradle/wrapper/gradle-wrapper.jar b/resources/Android/gradle/project/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 0000000000000000000000000000000000000000..7f93135c49b765f8051ef9d0a6055ff8e46073d8 GIT binary patch literal 63721 zcmb5Wb9gP!wgnp7wrv|bwr$&XvSZt}Z6`anZSUAlc9NHKf9JdJ;NJVr`=eI(_pMp0 zy1VAAG3FfAOI`{X1O)&90s;U4K;XLp008~hCjbEC_fbYfS%6kTR+JtXK>nW$ZR+`W ze|#J8f4A@M|F5BpfUJb5h>|j$jOe}0oE!`Zf6fM>CR?!y@zU(cL8NsKk`a z6tx5mAkdjD;J=LcJ;;Aw8p!v#ouk>mUDZF@ zK>yvw%+bKu+T{Nk@LZ;zkYy0HBKw06_IWcMHo*0HKpTsEFZhn5qCHH9j z)|XpN&{`!0a>Vl+PmdQc)Yg4A(AG-z!+@Q#eHr&g<9D?7E)_aEB?s_rx>UE9TUq|? z;(ggJt>9l?C|zoO@5)tu?EV0x_7T17q4fF-q3{yZ^ipUbKcRZ4Qftd!xO(#UGhb2y>?*@{xq%`(-`2T^vc=#< zx!+@4pRdk&*1ht2OWk^Z5IAQ0YTAXLkL{(D*$gENaD)7A%^XXrCchN&z2x+*>o2FwPFjWpeaL=!tzv#JOW#( z$B)Nel<+$bkH1KZv3&-}=SiG~w2sbDbAWarg%5>YbC|}*d9hBjBkR(@tyM0T)FO$# zPtRXukGPnOd)~z=?avu+4Co@wF}1T)-uh5jI<1$HLtyDrVak{gw`mcH@Q-@wg{v^c zRzu}hMKFHV<8w}o*yg6p@Sq%=gkd~;`_VGTS?L@yVu`xuGy+dH6YOwcP6ZE`_0rK% zAx5!FjDuss`FQ3eF|mhrWkjux(Pny^k$u_)dyCSEbAsecHsq#8B3n3kDU(zW5yE|( zgc>sFQywFj5}U*qtF9Y(bi*;>B7WJykcAXF86@)z|0-Vm@jt!EPoLA6>r)?@DIobIZ5Sx zsc@OC{b|3%vaMbyeM|O^UxEYlEMHK4r)V-{r)_yz`w1*xV0|lh-LQOP`OP`Pk1aW( z8DSlGN>Ts|n*xj+%If~+E_BxK)~5T#w6Q1WEKt{!Xtbd`J;`2a>8boRo;7u2M&iOop4qcy<)z023=oghSFV zST;?S;ye+dRQe>ygiJ6HCv4;~3DHtJ({fWeE~$H@mKn@Oh6Z(_sO>01JwH5oA4nvK zr5Sr^g+LC zLt(i&ecdmqsIJGNOSUyUpglvhhrY8lGkzO=0USEKNL%8zHshS>Qziu|`eyWP^5xL4 zRP122_dCJl>hZc~?58w~>`P_s18VoU|7(|Eit0-lZRgLTZKNq5{k zE?V=`7=R&ro(X%LTS*f+#H-mGo_j3dm@F_krAYegDLk6UV{`UKE;{YSsn$ z(yz{v1@p|p!0>g04!eRSrSVb>MQYPr8_MA|MpoGzqyd*$@4j|)cD_%^Hrd>SorF>@ zBX+V<@vEB5PRLGR(uP9&U&5=(HVc?6B58NJT_igiAH*q~Wb`dDZpJSKfy5#Aag4IX zj~uv74EQ_Q_1qaXWI!7Vf@ZrdUhZFE;L&P_Xr8l@GMkhc#=plV0+g(ki>+7fO%?Jb zl+bTy7q{w^pTb{>(Xf2q1BVdq?#f=!geqssXp z4pMu*q;iiHmA*IjOj4`4S&|8@gSw*^{|PT}Aw~}ZXU`6=vZB=GGeMm}V6W46|pU&58~P+?LUs%n@J}CSrICkeng6YJ^M? zS(W?K4nOtoBe4tvBXs@@`i?4G$S2W&;$z8VBSM;Mn9 zxcaEiQ9=vS|bIJ>*tf9AH~m&U%2+Dim<)E=}KORp+cZ^!@wI`h1NVBXu{@%hB2Cq(dXx_aQ9x3mr*fwL5!ZryQqi|KFJuzvP zK1)nrKZ7U+B{1ZmJub?4)Ln^J6k!i0t~VO#=q1{?T)%OV?MN}k5M{}vjyZu#M0_*u z8jwZKJ#Df~1jcLXZL7bnCEhB6IzQZ-GcoQJ!16I*39iazoVGugcKA{lhiHg4Ta2fD zk1Utyc5%QzZ$s3;p0N+N8VX{sd!~l*Ta3|t>lhI&G`sr6L~G5Lul`>m z{!^INm?J|&7X=;{XveF!(b*=?9NAp4y&r&N3(GKcW4rS(Ejk|Lzs1PrxPI_owB-`H zg3(Rruh^&)`TKA6+_!n>RdI6pw>Vt1_j&+bKIaMTYLiqhZ#y_=J8`TK{Jd<7l9&sY z^^`hmi7^14s16B6)1O;vJWOF$=$B5ONW;;2&|pUvJlmeUS&F;DbSHCrEb0QBDR|my zIs+pE0Y^`qJTyH-_mP=)Y+u^LHcuZhsM3+P||?+W#V!_6E-8boP#R-*na4!o-Q1 zVthtYhK{mDhF(&7Okzo9dTi03X(AE{8cH$JIg%MEQca`S zy@8{Fjft~~BdzWC(di#X{ny;!yYGK9b@=b|zcKZ{vv4D8i+`ilOPl;PJl{!&5-0!w z^fOl#|}vVg%=n)@_e1BrP)`A zKPgs`O0EO}Y2KWLuo`iGaKu1k#YR6BMySxQf2V++Wo{6EHmK>A~Q5o73yM z-RbxC7Qdh0Cz!nG+7BRZE>~FLI-?&W_rJUl-8FDIaXoNBL)@1hwKa^wOr1($*5h~T zF;%f^%<$p8Y_yu(JEg=c_O!aZ#)Gjh$n(hfJAp$C2he555W5zdrBqjFmo|VY+el;o z=*D_w|GXG|p0**hQ7~9-n|y5k%B}TAF0iarDM!q-jYbR^us(>&y;n^2l0C%@2B}KM zyeRT9)oMt97Agvc4sEKUEy%MpXr2vz*lb zh*L}}iG>-pqDRw7ud{=FvTD?}xjD)w{`KzjNom-$jS^;iw0+7nXSnt1R@G|VqoRhE%12nm+PH?9`(4rM0kfrZzIK9JU=^$YNyLvAIoxl#Q)xxDz!^0@zZ zSCs$nfcxK_vRYM34O<1}QHZ|hp4`ioX3x8(UV(FU$J@o%tw3t4k1QPmlEpZa2IujG&(roX_q*%e`Hq|);0;@k z0z=fZiFckp#JzW0p+2A+D$PC~IsakhJJkG(c;CqAgFfU0Z`u$PzG~-9I1oPHrCw&)@s^Dc~^)#HPW0Ra}J^=|h7Fs*<8|b13ZzG6MP*Q1dkoZ6&A^!}|hbjM{2HpqlSXv_UUg1U4gn z3Q)2VjU^ti1myodv+tjhSZp%D978m~p& z43uZUrraHs80Mq&vcetqfQpQP?m!CFj)44t8Z}k`E798wxg&~aCm+DBoI+nKq}&j^ zlPY3W$)K;KtEajks1`G?-@me7C>{PiiBu+41#yU_c(dITaqE?IQ(DBu+c^Ux!>pCj zLC|HJGU*v+!it1(;3e`6igkH(VA)-S+k(*yqxMgUah3$@C zz`7hEM47xr>j8^g`%*f=6S5n>z%Bt_Fg{Tvmr+MIsCx=0gsu_sF`q2hlkEmisz#Fy zj_0;zUWr;Gz}$BS%Y`meb(=$d%@Crs(OoJ|}m#<7=-A~PQbyN$x%2iXP2@e*nO0b7AwfH8cCUa*Wfu@b)D_>I*%uE4O3 z(lfnB`-Xf*LfC)E}e?%X2kK7DItK6Tf<+M^mX0Ijf_!IP>7c8IZX%8_#0060P{QMuV^B9i<^E`_Qf0pv9(P%_s8D`qvDE9LK9u-jB}J2S`(mCO&XHTS04Z5Ez*vl^T%!^$~EH8M-UdwhegL>3IQ*)(MtuH2Xt1p!fS4o~*rR?WLxlA!sjc2(O znjJn~wQ!Fp9s2e^IWP1C<4%sFF}T4omr}7+4asciyo3DntTgWIzhQpQirM$9{EbQd z3jz9vS@{aOqTQHI|l#aUV@2Q^Wko4T0T04Me4!2nsdrA8QY1%fnAYb~d2GDz@lAtfcHq(P7 zaMBAGo}+NcE-K*@9y;Vt3*(aCaMKXBB*BJcD_Qnxpt75r?GeAQ}*|>pYJE=uZb73 zC>sv)18)q#EGrTG6io*}JLuB_jP3AU1Uiu$D7r|2_zlIGb9 zjhst#ni)Y`$)!fc#reM*$~iaYoz~_Cy7J3ZTiPm)E?%`fbk`3Tu-F#`{i!l5pNEn5 zO-Tw-=TojYhzT{J=?SZj=Z8#|eoF>434b-DXiUsignxXNaR3 zm_}4iWU$gt2Mw5NvZ5(VpF`?X*f2UZDs1TEa1oZCif?Jdgr{>O~7}-$|BZ7I(IKW`{f;@|IZFX*R8&iT= zoWstN8&R;}@2Ka%d3vrLtR|O??ben;k8QbS-WB0VgiCz;<$pBmIZdN!aalyCSEm)crpS9dcD^Y@XT1a3+zpi-`D}e#HV<} z$Y(G&o~PvL-xSVD5D?JqF3?B9rxGWeb=oEGJ3vRp5xfBPlngh1O$yI95EL+T8{GC@ z98i1H9KhZGFl|;`)_=QpM6H?eDPpw~^(aFQWwyXZ8_EEE4#@QeT_URray*mEOGsGc z6|sdXtq!hVZo=d#+9^@lm&L5|q&-GDCyUx#YQiccq;spOBe3V+VKdjJA=IL=Zn%P} zNk=_8u}VhzFf{UYZV0`lUwcD&)9AFx0@Fc6LD9A6Rd1=ga>Mi0)_QxM2ddCVRmZ0d z+J=uXc(?5JLX3=)e)Jm$HS2yF`44IKhwRnm2*669_J=2LlwuF5$1tAo@ROSU@-y+;Foy2IEl2^V1N;fk~YR z?&EP8#t&m0B=?aJeuz~lHjAzRBX>&x=A;gIvb>MD{XEV zV%l-+9N-)i;YH%nKP?>f`=?#`>B(`*t`aiPLoQM(a6(qs4p5KFjDBN?8JGrf3z8>= zi7sD)c)Nm~x{e<^jy4nTx${P~cwz_*a>%0_;ULou3kHCAD7EYkw@l$8TN#LO9jC( z1BeFW`k+bu5e8Ns^a8dPcjEVHM;r6UX+cN=Uy7HU)j-myRU0wHd$A1fNI~`4;I~`zC)3ul#8#^rXVSO*m}Ag>c%_;nj=Nv$rCZ z*~L@C@OZg%Q^m)lc-kcX&a*a5`y&DaRxh6O*dfhLfF+fU5wKs(1v*!TkZidw*)YBP za@r`3+^IHRFeO%!ai%rxy;R;;V^Fr=OJlpBX;(b*3+SIw}7= zIq$*Thr(Zft-RlY)D3e8V;BmD&HOfX+E$H#Y@B3?UL5L~_fA-@*IB-!gItK7PIgG9 zgWuGZK_nuZjHVT_Fv(XxtU%)58;W39vzTI2n&)&4Dmq7&JX6G>XFaAR{7_3QB6zsT z?$L8c*WdN~nZGiscY%5KljQARN;`w$gho=p006z;n(qIQ*Zu<``TMO3n0{ARL@gYh zoRwS*|Niw~cR!?hE{m*y@F`1)vx-JRfqET=dJ5_(076st(=lFfjtKHoYg`k3oNmo_ zNbQEw8&sO5jAYmkD|Zaz_yUb0rC})U!rCHOl}JhbYIDLzLvrZVw0~JO`d*6f;X&?V=#T@ND*cv^I;`sFeq4 z##H5;gpZTb^0Hz@3C*~u0AqqNZ-r%rN3KD~%Gw`0XsIq$(^MEb<~H(2*5G^<2(*aI z%7}WB+TRlMIrEK#s0 z93xn*Ohb=kWFc)BNHG4I(~RPn-R8#0lqyBBz5OM6o5|>x9LK@%HaM}}Y5goCQRt2C z{j*2TtT4ne!Z}vh89mjwiSXG=%DURar~=kGNNaO_+Nkb+tRi~Rkf!7a$*QlavziD( z83s4GmQ^Wf*0Bd04f#0HX@ua_d8 z23~z*53ePD6@xwZ(vdl0DLc=>cPIOPOdca&MyR^jhhKrdQO?_jJh`xV3GKz&2lvP8 zEOwW6L*ufvK;TN{=S&R@pzV^U=QNk^Ec}5H z+2~JvEVA{`uMAr)?Kf|aW>33`)UL@bnfIUQc~L;TsTQ6>r-<^rB8uoNOJ>HWgqMI8 zSW}pZmp_;z_2O5_RD|fGyTxaxk53Hg_3Khc<8AUzV|ZeK{fp|Ne933=1&_^Dbv5^u zB9n=*)k*tjHDRJ@$bp9mrh}qFn*s}npMl5BMDC%Hs0M0g-hW~P*3CNG06G!MOPEQ_ zi}Qs-6M8aMt;sL$vlmVBR^+Ry<64jrm1EI1%#j?c?4b*7>)a{aDw#TfTYKq+SjEFA z(aJ&z_0?0JB83D-i3Vh+o|XV4UP+YJ$9Boid2^M2en@APw&wx7vU~t$r2V`F|7Qfo z>WKgI@eNBZ-+Og<{u2ZiG%>YvH2L3fNpV9J;WLJoBZda)01Rn;o@){01{7E#ke(7U zHK>S#qZ(N=aoae*4X!0A{)nu0R_sKpi1{)u>GVjC+b5Jyl6#AoQ-1_3UDovNSo`T> z?c-@7XX*2GMy?k?{g)7?Sv;SJkmxYPJPs!&QqB12ejq`Lee^-cDveVWL^CTUldb(G zjDGe(O4P=S{4fF=#~oAu>LG>wrU^z_?3yt24FOx>}{^lCGh8?vtvY$^hbZ)9I0E3r3NOlb9I?F-Yc=r$*~l`4N^xzlV~N zl~#oc>U)Yjl0BxV>O*Kr@lKT{Z09OXt2GlvE38nfs+DD7exl|&vT;)>VFXJVZp9Np zDK}aO;R3~ag$X*|hRVY3OPax|PG`@_ESc8E!mHRByJbZQRS38V2F__7MW~sgh!a>98Q2%lUNFO=^xU52|?D=IK#QjwBky-C>zOWlsiiM&1n z;!&1((Xn1$9K}xabq~222gYvx3hnZPg}VMF_GV~5ocE=-v>V=T&RsLBo&`)DOyIj* zLV{h)JU_y*7SdRtDajP_Y+rBkNN*1_TXiKwHH2&p51d(#zv~s#HwbNy?<+(=9WBvo zw2hkk2Dj%kTFhY+$T+W-b7@qD!bkfN#Z2ng@Pd=i3-i?xYfs5Z*1hO?kd7Sp^9`;Y zM2jeGg<-nJD1er@Pc_cSY7wo5dzQX44=%6rn}P_SRbpzsA{6B+!$3B0#;}qwO37G^ zL(V_5JK`XT?OHVk|{_$vQ|oNEpab*BO4F zUTNQ7RUhnRsU`TK#~`)$icsvKh~(pl=3p6m98@k3P#~upd=k*u20SNcb{l^1rUa)>qO997)pYRWMncC8A&&MHlbW?7i^7M`+B$hH~Y|J zd>FYOGQ;j>Zc2e7R{KK7)0>>nn_jYJy&o@sK!4G>-rLKM8Hv)f;hi1D2fAc$+six2 zyVZ@wZ6x|fJ!4KrpCJY=!Mq0;)X)OoS~{Lkh6u8J`eK%u0WtKh6B>GW_)PVc zl}-k`p09qwGtZ@VbYJC!>29V?Dr>>vk?)o(x?!z*9DJ||9qG-&G~#kXxbw{KKYy}J zQKa-dPt~M~E}V?PhW0R26xdA%1T*%ra6SguGu50YHngOTIv)@N|YttEXo#OZfgtP7;H?EeZZxo<}3YlYxtBq znJ!WFR^tmGf0Py}N?kZ(#=VtpC@%xJkDmfcCoBTxq zr_|5gP?u1@vJZbxPZ|G0AW4=tpb84gM2DpJU||(b8kMOV1S3|(yuwZJ&rIiFW(U;5 zUtAW`O6F6Zy+eZ1EDuP~AAHlSY-+A_eI5Gx)%*uro5tljy}kCZU*_d7)oJ>oQSZ3* zneTn`{gnNC&uJd)0aMBzAg021?YJ~b(fmkwZAd696a=0NzBAqBN54KuNDwa*no(^O z6p05bioXUR^uXjpTol*ppHp%1v9e)vkoUAUJyBx3lw0UO39b0?^{}yb!$yca(@DUn zCquRF?t=Zb9`Ed3AI6|L{eX~ijVH`VzSMheKoP7LSSf4g>md>`yi!TkoG5P>Ofp+n z(v~rW+(5L96L{vBb^g51B=(o)?%%xhvT*A5btOpw(TKh^g^4c zw>0%X!_0`{iN%RbVk+A^f{w-4-SSf*fu@FhruNL##F~sF24O~u zyYF<3el2b$$wZ_|uW#@Ak+VAGk#e|kS8nL1g>2B-SNMjMp^8;-FfeofY2fphFHO!{ z*!o4oTb{4e;S<|JEs<1_hPsmAlVNk?_5-Fp5KKU&d#FiNW~Y+pVFk@Cua1I{T+1|+ zHx6rFMor)7L)krbilqsWwy@T+g3DiH5MyVf8Wy}XbEaoFIDr~y;@r&I>FMW{ z?Q+(IgyebZ)-i4jNoXQhq4Muy9Fv+OxU;9_Jmn+<`mEC#%2Q_2bpcgzcinygNI!&^ z=V$)o2&Yz04~+&pPWWn`rrWxJ&}8khR)6B(--!9Q zubo}h+1T)>a@c)H^i``@<^j?|r4*{;tQf78(xn0g39IoZw0(CwY1f<%F>kEaJ zp9u|IeMY5mRdAlw*+gSN^5$Q)ShM<~E=(c8QM+T-Qk)FyKz#Sw0EJ*edYcuOtO#~Cx^(M7w5 z3)rl#L)rF|(Vun2LkFr!rg8Q@=r>9p>(t3Gf_auiJ2Xx9HmxYTa|=MH_SUlYL`mz9 zTTS$`%;D-|Jt}AP1&k7PcnfFNTH0A-*FmxstjBDiZX?}%u%Yq94$fUT&z6od+(Uk> zuqsld#G(b$G8tus=M!N#oPd|PVFX)?M?tCD0tS%2IGTfh}3YA3f&UM)W$_GNV8 zQo+a(ml2Km4o6O%gKTCSDNq+#zCTIQ1*`TIJh~k6Gp;htHBFnne))rlFdGqwC6dx2+La1&Mnko*352k0y z+tQcwndQlX`nc6nb$A9?<-o|r*%aWXV#=6PQic0Ok_D;q>wbv&j7cKc!w4~KF#-{6 z(S%6Za)WpGIWf7jZ3svNG5OLs0>vCL9{V7cgO%zevIVMH{WgP*^D9ws&OqA{yr|m| zKD4*07dGXshJHd#e%x%J+qmS^lS|0Bp?{drv;{@{l9ArPO&?Q5=?OO9=}h$oVe#3b z3Yofj&Cb}WC$PxmRRS)H%&$1-)z7jELS}!u!zQ?A^Y{Tv4QVt*vd@uj-^t2fYRzQj zfxGR>-q|o$3sGn^#VzZ!QQx?h9`njeJry}@x?|k0-GTTA4y3t2E`3DZ!A~D?GiJup z)8%PK2^9OVRlP(24P^4_<|D=H^7}WlWu#LgsdHzB%cPy|f8dD3|A^mh4WXxhLTVu_ z@abE{6Saz|Y{rXYPd4$tfPYo}ef(oQWZ=4Bct-=_9`#Qgp4ma$n$`tOwq#&E18$B; z@Bp)bn3&rEi0>fWWZ@7k5WazfoX`SCO4jQWwVuo+$PmSZn^Hz?O(-tW@*DGxuf)V1 zO_xm&;NVCaHD4dqt(-MlszI3F-p?0!-e$fbiCeuaw66h^TTDLWuaV<@C-`=Xe5WL) zwooG7h>4&*)p3pKMS3O!4>-4jQUN}iAMQ)2*70?hP~)TzzR?-f@?Aqy$$1Iy8VGG$ zMM?8;j!pUX7QQD$gRc_#+=raAS577ga-w?jd`vCiN5lu)dEUkkUPl9!?{$IJNxQys z*E4e$eF&n&+AMRQR2gcaFEjAy*r)G!s(P6D&TfoApMFC_*Ftx0|D0@E-=B7tezU@d zZ{hGiN;YLIoSeRS;9o%dEua4b%4R3;$SugDjP$x;Z!M!@QibuSBb)HY!3zJ7M;^jw zlx6AD50FD&p3JyP*>o+t9YWW8(7P2t!VQQ21pHJOcG_SXQD;(5aX#M6x##5H_Re>6lPyDCjxr*R(+HE%c&QN+b^tbT zXBJk?p)zhJj#I?&Y2n&~XiytG9!1ox;bw5Rbj~)7c(MFBb4>IiRATdhg zmiEFlj@S_hwYYI(ki{}&<;_7(Z0Qkfq>am z&LtL=2qc7rWguk3BtE4zL41@#S;NN*-jWw|7Kx7H7~_%7fPt;TIX}Ubo>;Rmj94V> zNB1=;-9AR7s`Pxn}t_6^3ahlq53e&!Lh85uG zec0vJY_6e`tg7LgfrJ3k!DjR)Bi#L@DHIrZ`sK=<5O0Ip!fxGf*OgGSpP@Hbbe&$9 z;ZI}8lEoC2_7;%L2=w?tb%1oL0V+=Z`7b=P&lNGY;yVBazXRYu;+cQDKvm*7NCxu&i;zub zAJh#11%?w>E2rf2e~C4+rAb-&$^vsdACs7 z@|Ra!OfVM(ke{vyiqh7puf&Yp6cd6{DptUteYfIRWG3pI+5< zBVBI_xkBAc<(pcb$!Y%dTW(b;B;2pOI-(QCsLv@U-D1XJ z(Gk8Q3l7Ws46Aktuj>|s{$6zA&xCPuXL-kB`CgYMs}4IeyG*P51IDwW?8UNQd+$i~ zlxOPtSi5L|gJcF@DwmJA5Ju8HEJ>o{{upwIpb!f{2(vLNBw`7xMbvcw<^{Fj@E~1( z?w`iIMieunS#>nXlmUcSMU+D3rX28f?s7z;X=se6bo8;5vM|O^(D6{A9*ChnGH!RG zP##3>LDC3jZPE4PH32AxrqPk|yIIrq~`aL-=}`okhNu9aT%q z1b)7iJ)CN=V#Ly84N_r7U^SH2FGdE5FpTO2 z630TF$P>GNMu8`rOytb(lB2};`;P4YNwW1<5d3Q~AX#P0aX}R2b2)`rgkp#zTxcGj zAV^cvFbhP|JgWrq_e`~exr~sIR$6p5V?o4Wym3kQ3HA+;Pr$bQ0(PmADVO%MKL!^q z?zAM8j1l4jrq|5X+V!8S*2Wl@=7*pPgciTVK6kS1Ge zMsd_u6DFK$jTnvVtE;qa+8(1sGBu~n&F%dh(&c(Zs4Fc#A=gG^^%^AyH}1^?|8quj zl@Z47h$){PlELJgYZCIHHL= z{U8O>Tw4x3<1{?$8>k-P<}1y9DmAZP_;(3Y*{Sk^H^A=_iSJ@+s5ktgwTXz_2$~W9>VVZsfwCm@s0sQ zeB50_yu@uS+e7QoPvdCwDz{prjo(AFwR%C?z`EL{1`|coJHQTk^nX=tvs1<0arUOJ z!^`*x&&BvTYmemyZ)2p~{%eYX=JVR?DYr(rNgqRMA5E1PR1Iw=prk=L2ldy3r3Vg@27IZx43+ywyzr-X*p*d@tZV+!U#~$-q=8c zgdSuh#r?b4GhEGNai)ayHQpk>5(%j5c@C1K3(W1pb~HeHpaqijJZa-e6vq_8t-^M^ zBJxq|MqZc?pjXPIH}70a5vt!IUh;l}<>VX<-Qcv^u@5(@@M2CHSe_hD$VG-eiV^V( zj7*9T0?di?P$FaD6oo?)<)QT>Npf6Og!GO^GmPV(Km0!=+dE&bk#SNI+C9RGQ|{~O*VC+tXK3!n`5 zHfl6>lwf_aEVV3`0T!aHNZLsj$paS$=LL(?b!Czaa5bbSuZ6#$_@LK<(7yrrl+80| z{tOFd=|ta2Z`^ssozD9BINn45NxUeCQis?-BKmU*Kt=FY-NJ+)8S1ecuFtN-M?&42 zl2$G>u!iNhAk*HoJ^4v^9#ORYp5t^wDj6|lx~5w45#E5wVqI1JQ~9l?nPp1YINf++ zMAdSif~_ETv@Er(EFBI^@L4BULFW>)NI+ejHFP*T}UhWNN`I)RRS8za? z*@`1>9ZB}An%aT5K=_2iQmfE;GcBVHLF!$`I99o5GO`O%O_zLr9AG18>&^HkG(;=V z%}c!OBQ~?MX(9h~tajX{=x)+!cbM7$YzTlmsPOdp2L-?GoW`@{lY9U3f;OUo*BwRB z8A+nv(br0-SH#VxGy#ZrgnGD(=@;HME;yd46EgWJ`EL%oXc&lFpc@Y}^>G(W>h_v_ zlN!`idhX+OjL+~T?19sroAFVGfa5tX-D49w$1g2g_-T|EpHL6}K_aX4$K=LTvwtlF zL*z}j{f+Uoe7{-px3_5iKPA<_7W=>Izkk)!l9ez2w%vi(?Y;i8AxRNLSOGDzNoqoI zP!1uAl}r=_871(G?y`i&)-7{u=%nxk7CZ_Qh#!|ITec zwQn`33GTUM`;D2POWnkqngqJhJRlM>CTONzTG}>^Q0wUunQyn|TAiHzyX2_%ATx%P z%7gW)%4rA9^)M<_%k@`Y?RbC<29sWU&5;@|9thf2#zf8z12$hRcZ!CSb>kUp=4N#y zl3hE#y6>kkA8VY2`W`g5Ip?2qC_BY$>R`iGQLhz2-S>x(RuWv)SPaGdl^)gGw7tjR zH@;jwk!jIaCgSg_*9iF|a);sRUTq30(8I(obh^|}S~}P4U^BIGYqcz;MPpC~Y@k_m zaw4WG1_vz2GdCAX!$_a%GHK**@IrHSkGoN>)e}>yzUTm52on`hYot7cB=oA-h1u|R ztH$11t?54Qg2L+i33FPFKKRm1aOjKST{l1*(nps`>sv%VqeVMWjl5+Gh+9);hIP8? zA@$?}Sc z3qIRpba+y5yf{R6G(u8Z^vkg0Fu&D-7?1s=QZU`Ub{-!Y`I?AGf1VNuc^L3v>)>i# z{DV9W$)>34wnzAXUiV^ZpYKw>UElrN_5Xj6{r_3| z$X5PK`e5$7>~9Dj7gK5ash(dvs`vwfk}&RD`>04;j62zoXESkFBklYaKm5seyiX(P zqQ-;XxlV*yg?Dhlx%xt!b0N3GHp@(p$A;8|%# zZ5m2KL|{on4nr>2_s9Yh=r5ScQ0;aMF)G$-9-Ca6%wA`Pa)i?NGFA|#Yi?{X-4ZO_ z^}%7%vkzvUHa$-^Y#aA+aiR5sa%S|Ebyn`EV<3Pc?ax_f>@sBZF1S;7y$CXd5t5=WGsTKBk8$OfH4v|0?0I=Yp}7c=WBSCg!{0n)XmiU;lfx)**zZaYqmDJelxk$)nZyx5`x$6R|fz(;u zEje5Dtm|a%zK!!tk3{i9$I2b{vXNFy%Bf{50X!x{98+BsDr_u9i>G5%*sqEX|06J0 z^IY{UcEbj6LDwuMh7cH`H@9sVt1l1#8kEQ(LyT@&+K}(ReE`ux8gb0r6L_#bDUo^P z3Ka2lRo52Hdtl_%+pwVs14=q`{d^L58PsU@AMf(hENumaxM{7iAT5sYmWh@hQCO^ zK&}ijo=`VqZ#a3vE?`7QW0ZREL17ZvDfdqKGD?0D4fg{7v%|Yj&_jcKJAB)>=*RS* zto8p6@k%;&^ZF>hvXm&$PCuEp{uqw3VPG$9VMdW5$w-fy2CNNT>E;>ejBgy-m_6`& z97L1p{%srn@O_JQgFpa_#f(_)eb#YS>o>q3(*uB;uZb605(iqM$=NK{nHY=+X2*G) zO3-_Xh%aG}fHWe*==58zBwp%&`mge<8uq8;xIxOd=P%9EK!34^E9sk|(Zq1QSz-JVeP12Fp)-`F|KY$LPwUE?rku zY@OJ)Z9A!ojfzfeyJ9;zv2EM7ZQB)AR5xGa-tMn^bl)FmoIiVyJ@!~@%{}qXXD&Ns zPnfe5U+&ohKefILu_1mPfLGuapX@btta5C#gPB2cjk5m4T}Nfi+Vfka!Yd(L?-c~5 z#ZK4VeQEXNPc4r$K00Fg>g#_W!YZ)cJ?JTS<&68_$#cZT-ME`}tcwqg3#``3M3UPvn+pi}(VNNx6y zFIMVb6OwYU(2`at$gHba*qrMVUl8xk5z-z~fb@Q3Y_+aXuEKH}L+>eW__!IAd@V}L zkw#s%H0v2k5-=vh$^vPCuAi22Luu3uKTf6fPo?*nvj$9(u)4$6tvF-%IM+3pt*cgs z_?wW}J7VAA{_~!?))?s6{M=KPpVhg4fNuU*|3THp@_(q!b*hdl{fjRVFWtu^1dV(f z6iOux9hi&+UK=|%M*~|aqFK{Urfl!TA}UWY#`w(0P!KMe1Si{8|o))Gy6d7;!JQYhgMYmXl?3FfOM2nQGN@~Ap6(G z3+d_5y@=nkpKAhRqf{qQ~k7Z$v&l&@m7Ppt#FSNzKPZM z8LhihcE6i=<(#87E|Wr~HKvVWhkll4iSK$^mUHaxgy8*K$_Zj;zJ`L$naPj+^3zTi z-3NTaaKnD5FPY-~?Tq6QHnmDDRxu0mh0D|zD~Y=vv_qig5r-cIbCpxlju&8Sya)@{ zsmv6XUSi)@(?PvItkiZEeN*)AE~I_?#+Ja-r8$(XiXei2d@Hi7Rx8+rZZb?ZLa{;@*EHeRQ-YDadz~M*YCM4&F-r;E#M+@CSJMJ0oU|PQ^ z=E!HBJDMQ2TN*Y(Ag(ynAL8%^v;=~q?s4plA_hig&5Z0x_^Oab!T)@6kRN$)qEJ6E zNuQjg|G7iwU(N8pI@_6==0CL;lRh1dQF#wePhmu@hADFd3B5KIH#dx(2A zp~K&;Xw}F_N6CU~0)QpQk7s$a+LcTOj1%=WXI(U=Dv!6 z{#<#-)2+gCyyv=Jw?Ab#PVkxPDeH|sAxyG`|Ys}A$PW4TdBv%zDz z^?lwrxWR<%Vzc8Sgt|?FL6ej_*e&rhqJZ3Y>k=X(^dytycR;XDU16}Pc9Vn0>_@H+ zQ;a`GSMEG64=JRAOg%~L)x*w{2re6DVprNp+FcNra4VdNjiaF0M^*>CdPkt(m150rCue?FVdL0nFL$V%5y6N z%eLr5%YN7D06k5ji5*p4v$UMM)G??Q%RB27IvH7vYr_^3>1D-M66#MN8tWGw>WED} z5AhlsanO=STFYFs)Il_0i)l)f<8qn|$DW7ZXhf5xI;m+7M5-%P63XFQrG9>DMqHc} zsgNU9nR`b}E^mL5=@7<1_R~j@q_2U^3h|+`7YH-?C=vme1C3m`Fe0HC>pjt6f_XMh zy~-i-8R46QNYneL4t@)<0VU7({aUO?aH`z4V2+kxgH5pYD5)wCh75JqQY)jIPN=U6 z+qi8cGiOtXG2tXm;_CfpH9ESCz#i5B(42}rBJJF$jh<1sbpj^8&L;gzGHb8M{of+} zzF^8VgML2O9nxBW7AvdEt90vp+#kZxWf@A)o9f9}vKJy9NDBjBW zSt=Hcs=YWCwnfY1UYx*+msp{g!w0HC<_SM!VL1(I2PE?CS}r(eh?{I)mQixmo5^p# zV?2R!R@3GV6hwTCrfHiK#3Orj>I!GS2kYhk1S;aFBD_}u2v;0HYFq}Iz1Z(I4oca4 zxquja8$+8JW_EagDHf$a1OTk5S97umGSDaj)gH=fLs9>_=XvVj^Xj9a#gLdk=&3tl zfmK9MNnIX9v{?%xdw7568 zNrZ|roYs(vC4pHB5RJ8>)^*OuyNC>x7ad)tB_}3SgQ96+-JT^Qi<`xi=)_=$Skwv~ zdqeT9Pa`LYvCAn&rMa2aCDV(TMI#PA5g#RtV|CWpgDYRA^|55LLN^uNh*gOU>Z=a06qJ;$C9z8;n-Pq=qZnc1zUwJ@t)L;&NN+E5m zRkQ(SeM8=l-aoAKGKD>!@?mWTW&~)uF2PYUJ;tB^my`r9n|Ly~0c%diYzqs9W#FTjy?h&X3TnH zXqA{QI82sdjPO->f=^K^f>N`+B`q9&rN0bOXO79S&a9XX8zund(kW7O76f4dcWhIu zER`XSMSFbSL>b;Rp#`CuGJ&p$s~G|76){d?xSA5wVg##_O0DrmyEYppyBr%fyWbbv zp`K84JwRNP$d-pJ!Qk|(RMr?*!wi1if-9G#0p>>1QXKXWFy)eB3ai)l3601q8!9JC zvU#ZWWDNKq9g6fYs?JQ)Q4C_cgTy3FhgKb8s&m)DdmL5zhNK#8wWg!J*7G7Qhe9VU zha?^AQTDpYcuN!B+#1dE*X{<#!M%zfUQbj=zLE{dW0XeQ7-oIsGY6RbkP2re@Q{}r_$iiH0xU%iN*ST`A)-EH6eaZB$GA#v)cLi z*MpA(3bYk$oBDKAzu^kJoSUsDd|856DApz={3u8sbQV@JnRkp2nC|)m;#T=DvIL-O zI4vh;g7824l}*`_p@MT4+d`JZ2%6NQh=N9bmgJ#q!hK@_<`HQq3}Z8Ij>3%~<*= zcv=!oT#5xmeGI92lqm9sGVE%#X$ls;St|F#u!?5Y7syhx6q#MVRa&lBmmn%$C0QzU z);*ldgwwCmzM3uglr}!Z2G+?& zf%Dpo&mD%2ZcNFiN-Z0f;c_Q;A%f@>26f?{d1kxIJD}LxsQkB47SAdwinfMILZdN3 zfj^HmTzS3Ku5BxY>ANutS8WPQ-G>v4^_Qndy==P3pDm+Xc?>rUHl-4+^%Sp5atOja z2oP}ftw-rqnb}+khR3CrRg^ibi6?QYk1*i^;kQGirQ=uB9Sd1NTfT-Rbv;hqnY4neE5H1YUrjS2m+2&@uXiAo- zrKUX|Ohg7(6F(AoP~tj;NZlV#xsfo-5reuQHB$&EIAhyZk;bL;k9ouDmJNBAun;H& zn;Of1z_Qj`x&M;5X;{s~iGzBQTY^kv-k{ksbE*Dl%Qf%N@hQCfY~iUw!=F-*$cpf2 z3wix|aLBV0b;W@z^%7S{>9Z^T^fLOI68_;l@+Qzaxo`nAI8emTV@rRhEKZ z?*z_{oGdI~R*#<2{bkz$G~^Qef}$*4OYTgtL$e9q!FY7EqxJ2`zk6SQc}M(k(_MaV zSLJnTXw&@djco1~a(vhBl^&w=$fa9{Sru>7g8SHahv$&Bl(D@(Zwxo_3r=;VH|uc5 zi1Ny)J!<(KN-EcQ(xlw%PNwK8U>4$9nVOhj(y0l9X^vP1TA>r_7WtSExIOsz`nDOP zs}d>Vxb2Vo2e5x8p(n~Y5ggAyvib>d)6?)|E@{FIz?G3PVGLf7-;BxaP;c?7ddH$z zA+{~k^V=bZuXafOv!RPsE1GrR3J2TH9uB=Z67gok+u`V#}BR86hB1xl}H4v`F+mRfr zYhortD%@IGfh!JB(NUNSDh+qDz?4ztEgCz&bIG-Wg7w-ua4ChgQR_c+z8dT3<1?uX z*G(DKy_LTl*Ea!%v!RhpCXW1WJO6F`bgS-SB;Xw9#! z<*K}=#wVu9$`Yo|e!z-CPYH!nj7s9dEPr-E`DXUBu0n!xX~&|%#G=BeM?X@shQQMf zMvr2!y7p_gD5-!Lnm|a@z8Of^EKboZsTMk%5VsJEm>VsJ4W7Kv{<|#4f-qDE$D-W>gWT%z-!qXnDHhOvLk=?^a1*|0j z{pW{M0{#1VcR5;F!!fIlLVNh_Gj zbnW(_j?0c2q$EHIi@fSMR{OUKBcLr{Y&$hrM8XhPByyZaXy|dd&{hYQRJ9@Fn%h3p7*VQolBIV@Eq`=y%5BU~3RPa^$a?ixp^cCg z+}Q*X+CW9~TL29@OOng(#OAOd!)e$d%sr}^KBJ-?-X&|4HTmtemxmp?cT3uA?md4% zT8yZ0U;6Rg6JHy3fJae{6TMGS?ZUX6+gGTT{Q{)SI85$5FD{g-eR%O0KMpWPY`4@O zx!hen1*8^E(*}{m^V_?}(b5k3hYo=T+$&M32+B`}81~KKZhY;2H{7O-M@vbCzuX0n zW-&HXeyr1%I3$@ns-V1~Lb@wIpkmx|8I~ob1Of7i6BTNysEwI}=!nU%q7(V_^+d*G z7G;07m(CRTJup!`cdYi93r^+LY+`M*>aMuHJm(A8_O8C#A*$!Xvddgpjx5)?_EB*q zgE8o5O>e~9IiSC@WtZpF{4Bj2J5eZ>uUzY%TgWF7wdDE!fSQIAWCP)V{;HsU3ap?4 znRsiiDbtN7i9hapO;(|Ew>Ip2TZSvK9Z^N21%J?OiA_&eP1{(Pu_=%JjKy|HOardq ze?zK^K zA%sjF64*Wufad%H<) z^|t>e*h+Z1#l=5wHexzt9HNDNXgM=-OPWKd^5p!~%SIl>Fo&7BvNpbf8{NXmH)o{r zO=aBJ;meX1^{O%q;kqdw*5k!Y7%t_30 zy{nGRVc&5qt?dBwLs+^Sfp;f`YVMSB#C>z^a9@fpZ!xb|b-JEz1LBX7ci)V@W+kvQ89KWA0T~Lj$aCcfW#nD5bt&Y_< z-q{4ZXDqVg?|0o)j1%l0^_it0WF*LCn-+)c!2y5yS7aZIN$>0LqNnkujV*YVes(v$ zY@_-!Q;!ZyJ}Bg|G-~w@or&u0RO?vlt5*9~yeoPV_UWrO2J54b4#{D(D>jF(R88u2 zo#B^@iF_%S>{iXSol8jpmsZuJ?+;epg>k=$d`?GSegAVp3n$`GVDvK${N*#L_1`44 z{w0fL{2%)0|E+qgZtjX}itZz^KJt4Y;*8uSK}Ft38+3>j|K(PxIXXR-t4VopXo#9# zt|F{LWr-?34y`$nLBVV_*UEgA6AUI65dYIbqpNq9cl&uLJ0~L}<=ESlOm?Y-S@L*d z<7vt}`)TW#f%Rp$Q}6@3=j$7Tze@_uZO@aMn<|si{?S}~maII`VTjs&?}jQ4_cut9$)PEqMukwoXobzaKx^MV z2fQwl+;LSZ$qy%Tys0oo^K=jOw$!YwCv^ei4NBVauL)tN%=wz9M{uf{IB(BxK|lT*pFkmNK_1tV`nb%jH=a0~VNq2RCKY(rG7jz!-D^k)Ec)yS%17pE#o6&eY+ z^qN(hQT$}5F(=4lgNQhlxj?nB4N6ntUY6(?+R#B?W3hY_a*)hnr4PA|vJ<6p`K3Z5Hy z{{8(|ux~NLUW=!?9Qe&WXMTAkQnLXg(g=I@(VG3{HE13OaUT|DljyWXPs2FE@?`iU z4GQlM&Q=T<4&v@Fe<+TuXiZQT3G~vZ&^POfmI1K2h6t4eD}Gk5XFGpbj1n_g*{qmD6Xy z`6Vv|lLZtLmrnv*{Q%xxtcWVj3K4M%$bdBk_a&ar{{GWyu#ljM;dII;*jP;QH z#+^o-A4np{@|Mz+LphTD0`FTyxYq#wY)*&Ls5o{0z9yg2K+K7ZN>j1>N&;r+Z`vI| zDzG1LJZ+sE?m?>x{5LJx^)g&pGEpY=fQ-4}{x=ru;}FL$inHemOg%|R*ZXPodU}Kh zFEd5#+8rGq$Y<_?k-}r5zgQ3jRV=ooHiF|@z_#D4pKVEmn5CGV(9VKCyG|sT9nc=U zEoT67R`C->KY8Wp-fEcjjFm^;Cg(ls|*ABVHq8clBE(;~K^b+S>6uj70g? z&{XQ5U&!Z$SO7zfP+y^8XBbiu*Cv-yJG|l-oe*!s5$@Lh_KpxYL2sx`B|V=dETN>5K+C+CU~a_3cI8{vbu$TNVdGf15*>D zz@f{zIlorkY>TRh7mKuAlN9A0>N>SV`X)+bEHms=mfYTMWt_AJtz_h+JMmrgH?mZt zm=lfdF`t^J*XLg7v+iS)XZROygK=CS@CvUaJo&w2W!Wb@aa?~Drtf`JV^cCMjngVZ zv&xaIBEo8EYWuML+vxCpjjY^s1-ahXJzAV6hTw%ZIy!FjI}aJ+{rE&u#>rs)vzuxz z+$5z=7W?zH2>Eb32dvgHYZtCAf!=OLY-pb4>Ae79rd68E2LkVPj-|jFeyqtBCCwiW zkB@kO_(3wFq)7qwV}bA=zD!*@UhT`geq}ITo%@O(Z5Y80nEX~;0-8kO{oB6|(4fQh z);73T!>3@{ZobPwRv*W?7m0Ml9GmJBCJd&6E?hdj9lV= z4flNfsc(J*DyPv?RCOx!MSvk(M952PJ-G|JeVxWVjN~SNS6n-_Ge3Q;TGE;EQvZg86%wZ`MB zSMQua(i*R8a75!6$QRO^(o7sGoomb+Y{OMy;m~Oa`;P9Yqo>?bJAhqXxLr7_3g_n>f#UVtxG!^F#1+y@os6x(sg z^28bsQ@8rw%Gxk-stAEPRbv^}5sLe=VMbkc@Jjimqjvmd!3E7+QnL>|(^3!R} zD-l1l7*Amu@j+PWLGHXXaFG0Ct2Q=}5YNUxEQHCAU7gA$sSC<5OGylNnQUa>>l%sM zyu}z6i&({U@x^hln**o6r2s-(C-L50tQvz|zHTqW!ir?w&V23tuYEDJVV#5pE|OJu z7^R!A$iM$YCe?8n67l*J-okwfZ+ZTkGvZ)tVPfR;|3gyFjF)8V zyXXN=!*bpyRg9#~Bg1+UDYCt0 ztp4&?t1X0q>uz;ann$OrZs{5*r`(oNvw=$7O#rD|Wuv*wIi)4b zGtq4%BX+kkagv3F9Id6~-c+1&?zny%w5j&nk9SQfo0k4LhdSU_kWGW7axkfpgR`8* z!?UTG*Zi_baA1^0eda8S|@&F z{)Rad0kiLjB|=}XFJhD(S3ssKlveFFmkN{Vl^_nb!o5M!RC=m)V&v2%e?ZoRC@h3> zJ(?pvToFd`*Zc@HFPL#=otWKwtuuQ_dT-Hr{S%pQX<6dqVJ8;f(o)4~VM_kEQkMR+ zs1SCVi~k>M`u1u2xc}>#D!V&6nOOh-E$O&SzYrjJdZpaDv1!R-QGA141WjQe2s0J~ zQ;AXG)F+K#K8_5HVqRoRM%^EduqOnS(j2)|ctA6Q^=|s_WJYU;Z%5bHp08HPL`YF2 zR)Ad1z{zh`=sDs^&V}J z%$Z$!jd7BY5AkT?j`eqMs%!Gm@T8)4w3GYEX~IwgE~`d|@T{WYHkudy(47brgHXx& zBL1yFG6!!!VOSmDxBpefy2{L_u5yTwja&HA!mYA#wg#bc-m%~8aRR|~AvMnind@zs zy>wkShe5&*un^zvSOdlVu%kHsEo>@puMQ`b1}(|)l~E{5)f7gC=E$fP(FC2=F<^|A zxeIm?{EE!3sO!Gr7e{w)Dx(uU#3WrFZ>ibmKSQ1tY?*-Nh1TDHLe+k*;{Rp!Bmd_m zb#^kh`Y*8l|9Cz2e{;RL%_lg{#^Ar+NH|3z*Zye>!alpt{z;4dFAw^^H!6ING*EFc z_yqhr8d!;%nHX9AKhFQZBGrSzfzYCi%C!(Q5*~hX>)0N`vbhZ@N|i;_972WSx*>LH z87?en(;2_`{_JHF`Sv6Wlps;dCcj+8IJ8ca6`DsOQCMb3n# z3)_w%FuJ3>fjeOOtWyq)ag|PmgQbC-s}KRHG~enBcIwqIiGW8R8jFeBNY9|YswRY5 zjGUxdGgUD26wOpwM#8a!Nuqg68*dG@VM~SbOroL_On0N6QdT9?)NeB3@0FCC?Z|E0 z6TPZj(AsPtwCw>*{eDEE}Gby>0q{*lI+g2e&(YQrsY&uGM{O~}(oM@YWmb*F zA0^rr5~UD^qmNljq$F#ARXRZ1igP`MQx4aS6*MS;Ot(1L5jF2NJ;de!NujUYg$dr# z=TEL_zTj2@>ZZN(NYCeVX2==~=aT)R30gETO{G&GM4XN<+!&W&(WcDP%oL8PyIVUC zs5AvMgh6qr-2?^unB@mXK*Dbil^y-GTC+>&N5HkzXtozVf93m~xOUHn8`HpX=$_v2 z61H;Z1qK9o;>->tb8y%#4H)765W4E>TQ1o0PFj)uTOPEvv&}%(_mG0ISmyhnQV33Z$#&yd{ zc{>8V8XK$3u8}04CmAQ#I@XvtmB*s4t8va?-IY4@CN>;)mLb_4!&P3XSw4pA_NzDb zORn!blT-aHk1%Jpi>T~oGLuh{DB)JIGZ9KOsciWs2N7mM1JWM+lna4vkDL?Q)z_Ct z`!mi0jtr+4*L&N7jk&LodVO#6?_qRGVaucqVB8*us6i3BTa^^EI0x%EREQSXV@f!lak6Wf1cNZ8>*artIJ(ADO*=<-an`3zB4d*oO*8D1K!f z*A@P1bZCNtU=p!742MrAj%&5v%Xp_dSX@4YCw%F|%Dk=u|1BOmo)HsVz)nD5USa zR~??e61sO(;PR)iaxK{M%QM_rIua9C^4ppVS$qCT9j2%?*em?`4Z;4@>I(c%M&#cH z>4}*;ej<4cKkbCAjjDsyKS8rIm90O)Jjgyxj5^venBx&7B!xLmzxW3jhj7sR(^3Fz z84EY|p1NauwXUr;FfZjdaAfh%ivyp+^!jBjJuAaKa!yCq=?T_)R!>16?{~p)FQ3LDoMyG%hL#pR!f@P%*;#90rs_y z@9}@r1BmM-SJ#DeuqCQk=J?ixDSwL*wh|G#us;dd{H}3*-Y7Tv5m=bQJMcH+_S`zVtf;!0kt*(zwJ zs+kedTm!A}cMiM!qv(c$o5K%}Yd0|nOd0iLjus&;s0Acvoi-PFrWm?+q9f^FslxGi z6ywB`QpL$rJzWDg(4)C4+!2cLE}UPCTBLa*_=c#*$b2PWrRN46$y~yST3a2$7hEH= zNjux+wna^AzQ=KEa_5#9Ph=G1{S0#hh1L3hQ`@HrVnCx{!fw_a0N5xV(iPdKZ-HOM za)LdgK}1ww*C_>V7hbQnTzjURJL`S%`6nTHcgS+dB6b_;PY1FsrdE8(2K6FN>37!62j_cBlui{jO^$dPkGHV>pXvW0EiOA zqW`YaSUBWg_v^Y5tPJfWLcLpsA8T zG)!x>pKMpt!lv3&KV!-um= zKCir6`bEL_LCFx4Z5bAFXW$g3Cq`?Q%)3q0r852XI*Der*JNuKUZ`C{cCuu8R8nkt z%pnF>R$uY8L+D!V{s^9>IC+bmt<05h**>49R*#vpM*4i0qRB2uPbg8{{s#9yC;Z18 zD7|4m<9qneQ84uX|J&f-g8a|nFKFt34@Bt{CU`v(SYbbn95Q67*)_Esl_;v291s=9 z+#2F2apZU4Tq=x+?V}CjwD(P=U~d<=mfEFuyPB`Ey82V9G#Sk8H_Ob_RnP3s?)S_3 zr%}Pb?;lt_)Nf>@zX~D~TBr;-LS<1I##8z`;0ZCvI_QbXNh8Iv)$LS=*gHr;}dgb=w5$3k2la1keIm|=7<-JD>)U%=Avl0Vj@+&vxn zt-)`vJxJr88D&!}2^{GPXc^nmRf#}nb$4MMkBA21GzB`-Or`-3lq^O^svO7Vs~FdM zv`NvzyG+0T!P8l_&8gH|pzE{N(gv_tgDU7SWeiI-iHC#0Ai%Ixn4&nt{5y3(GQs)i z&uA;~_0shP$0Wh0VooIeyC|lak__#KVJfxa7*mYmZ22@(<^W}FdKjd*U1CqSjNKW% z*z$5$=t^+;Ui=MoDW~A7;)Mj%ibX1_p4gu>RC}Z_pl`U*{_z@+HN?AF{_W z?M_X@o%w8fgFIJ$fIzBeK=v#*`mtY$HC3tqw7q^GCT!P$I%=2N4FY7j9nG8aIm$c9 zeKTxVKN!UJ{#W)zxW|Q^K!3s;(*7Gbn;e@pQBCDS(I|Y0euK#dSQ_W^)sv5pa%<^o zyu}3d?Lx`)3-n5Sy9r#`I{+t6x%I%G(iewGbvor&I^{lhu-!#}*Q3^itvY(^UWXgvthH52zLy&T+B)Pw;5>4D6>74 zO_EBS)>l!zLTVkX@NDqyN2cXTwsUVao7$HcqV2%t$YzdAC&T)dwzExa3*kt9d(}al zA~M}=%2NVNUjZiO7c>04YH)sRelXJYpWSn^aC$|Ji|E13a^-v2MB!Nc*b+=KY7MCm zqIteKfNkONq}uM;PB?vvgQvfKLPMB8u5+Am=d#>g+o&Ysb>dX9EC8q?D$pJH!MTAqa=DS5$cb+;hEvjwVfF{4;M{5U&^_+r zvZdu_rildI!*|*A$TzJ&apQWV@p{!W`=?t(o0{?9y&vM)V)ycGSlI3`;ps(vf2PUq zX745#`cmT*ra7XECC0gKkpu2eyhFEUb?;4@X7weEnLjXj_F~?OzL1U1L0|s6M+kIhmi%`n5vvDALMagi4`wMc=JV{XiO+^ z?s9i7;GgrRW{Mx)d7rj)?(;|b-`iBNPqdwtt%32se@?w4<^KU&585_kZ=`Wy^oLu9 z?DQAh5z%q;UkP48jgMFHTf#mj?#z|=w= z(q6~17Vn}P)J3M?O)x))%a5+>TFW3No~TgP;f}K$#icBh;rSS+R|}l鯊%1Et zwk~hMkhq;MOw^Q5`7oC{CUUyTw9x>^%*FHx^qJw(LB+E0WBX@{Ghw;)6aA-KyYg8p z7XDveQOpEr;B4je@2~usI5BlFadedX^ma{b{ypd|RNYqo#~d*mj&y`^iojR}s%~vF z(H!u`yx68D1Tj(3(m;Q+Ma}s2n#;O~bcB1`lYk%Irx60&-nWIUBr2x&@}@76+*zJ5 ze&4?q8?m%L9c6h=J$WBzbiTf1Z-0Eb5$IZs>lvm$>1n_Mezp*qw_pr8<8$6f)5f<@ zyV#tzMCs51nTv_5ca`x`yfE5YA^*%O_H?;tWYdM_kHPubA%vy47i=9>Bq) zRQ&0UwLQHeswmB1yP)+BiR;S+Vc-5TX84KUA;8VY9}yEj0eESSO`7HQ4lO z4(CyA8y1G7_C;6kd4U3K-aNOK!sHE}KL_-^EDl(vB42P$2Km7$WGqNy=%fqB+ zSLdrlcbEH=T@W8V4(TgoXZ*G1_aq$K^@ek=TVhoKRjw;HyI&coln|uRr5mMOy2GXP zwr*F^Y|!Sjr2YQXX(Fp^*`Wk905K%$bd03R4(igl0&7IIm*#f`A!DCarW9$h$z`kYk9MjjqN&5-DsH@8xh63!fTNPxWsFQhNv z#|3RjnP$Thdb#Ys7M+v|>AHm0BVTw)EH}>x@_f4zca&3tXJhTZ8pO}aN?(dHo)44Z z_5j+YP=jMlFqwvf3lq!57-SAuRV2_gJ*wsR_!Y4Z(trO}0wmB9%f#jNDHPdQGHFR; zZXzS-$`;7DQ5vF~oSgP3bNV$6Z(rwo6W(U07b1n3UHqml>{=6&-4PALATsH@Bh^W? z)ob%oAPaiw{?9HfMzpGb)@Kys^J$CN{uf*HX?)z=g`J(uK1YO^8~s1(ZIbG%Et(|q z$D@_QqltVZu9Py4R0Ld8!U|#`5~^M=b>fnHthzKBRr=i+w@0Vr^l|W;=zFT#PJ?*a zbC}G#It}rQP^Ait^W&aa6B;+0gNvz4cWUMzpv(1gvfw-X4xJ2Sv;mt;zb2Tsn|kSS zo*U9N?I{=-;a-OybL4r;PolCfiaL=y@o9{%`>+&FI#D^uy#>)R@b^1ue&AKKwuI*` zx%+6r48EIX6nF4o;>)zhV_8(IEX})NGU6Vs(yslrx{5fII}o3SMHW7wGtK9oIO4OM&@@ECtXSICLcPXoS|{;=_yj>hh*%hP27yZwOmj4&Lh z*Nd@OMkd!aKReoqNOkp5cW*lC)&C$P?+H3*%8)6HcpBg&IhGP^77XPZpc%WKYLX$T zsSQ$|ntaVVOoRat$6lvZO(G-QM5s#N4j*|N_;8cc2v_k4n6zx9c1L4JL*83F-C1Cn zaJhd;>rHXB%%ZN=3_o3&Qd2YOxrK~&?1=UuN9QhL$~OY-Qyg&})#ez*8NpQW_*a&kD&ANjedxT0Ar z<6r{eaVz3`d~+N~vkMaV8{F?RBVemN(jD@S8qO~L{rUw#=2a$V(7rLE+kGUZ<%pdr z?$DP|Vg#gZ9S}w((O2NbxzQ^zTot=89!0^~hE{|c9q1hVzv0?YC5s42Yx($;hAp*E zyoGuRyphQY{Q2ee0Xx`1&lv(l-SeC$NEyS~8iil3_aNlnqF_G|;zt#F%1;J)jnPT& z@iU0S;wHJ2$f!juqEzPZeZkjcQ+Pa@eERSLKsWf=`{R@yv7AuRh&ALRTAy z8=g&nxsSJCe!QLchJ=}6|LshnXIK)SNd zRkJNiqHwKK{SO;N5m5wdL&qK`v|d?5<4!(FAsDxR>Ky#0#t$8XCMptvNo?|SY?d8b z`*8dVBlXTUanlh6n)!EHf2&PDG8sXNAt6~u-_1EjPI1|<=33T8 zEnA00E!`4Ave0d&VVh0e>)Dc}=FfAFxpsC1u9ATfQ`-Cu;mhc8Z>2;uyXtqpLb7(P zd2F9<3cXS} znMg?{&8_YFTGRQZEPU-XPq55%51}RJpw@LO_|)CFAt62-_!u_Uq$csc+7|3+TV_!h z+2a7Yh^5AA{q^m|=KSJL+w-EWDBc&I_I1vOr^}P8i?cKMhGy$CP0XKrQzCheG$}G# zuglf8*PAFO8%xop7KSwI8||liTaQ9NCAFarr~psQt)g*pC@9bORZ>m`_GA`_K@~&% zijH0z;T$fd;-Liw8%EKZas>BH8nYTqsK7F;>>@YsE=Rqo?_8}UO-S#|6~CAW0Oz1} z3F(1=+#wrBJh4H)9jTQ_$~@#9|Bc1Pd3rAIA_&vOpvvbgDJOM(yNPhJJq2%PCcMaI zrbe~toYzvkZYQ{ea(Wiyu#4WB#RRN%bMe=SOk!CbJZv^m?Flo5p{W8|0i3`hI3Np# zvCZqY%o258CI=SGb+A3yJe~JH^i{uU`#U#fvSC~rWTq+K`E%J@ zasU07&pB6A4w3b?d?q}2=0rA#SA7D`X+zg@&zm^iA*HVi z009#PUH<%lk4z~p^l0S{lCJk1Uxi=F4e_DwlfHA`X`rv(|JqWKAA5nH+u4Da+E_p+ zVmH@lg^n4ixs~*@gm_dgQ&eDmE1mnw5wBz9Yg?QdZwF|an67Xd*x!He)Gc8&2!urh z4_uXzbYz-aX)X1>&iUjGp;P1u8&7TID0bTH-jCL&Xk8b&;;6p2op_=y^m@Nq*0{#o!!A;wNAFG@0%Z9rHo zcJs?Th>Ny6+hI`+1XoU*ED$Yf@9f91m9Y=#N(HJP^Y@ZEYR6I?oM{>&Wq4|v0IB(p zqX#Z<_3X(&{H+{3Tr|sFy}~=bv+l=P;|sBz$wk-n^R`G3p0(p>p=5ahpaD7>r|>pm zv;V`_IR@tvZreIuv2EM7ZQHhO+qUgw#kOs%*ekY^n|=1#x9&c;Ro&I~{rG-#_3ZB1 z?|9}IFdbP}^DneP*T-JaoYHt~r@EfvnPE5EKUwIxjPbsr$% zfWW83pgWST7*B(o=kmo)74$8UU)v0{@4DI+ci&%=#90}!CZz|rnH+Mz=HN~97G3~@ z;v5(9_2%eca(9iu@J@aqaMS6*$TMw!S>H(b z4(*B!|H|8&EuB%mITr~O?vVEf%(Gr)6E=>H~1VR z&1YOXluJSG1!?TnT)_*YmJ*o_Q@om~(GdrhI{$Fsx_zrkupc#y{DK1WOUR>tk>ZE) ziOLoBkhZZ?0Uf}cm>GsA>Rd6V8@JF)J*EQlQ<=JD@m<)hyElXR0`pTku*3MU`HJn| zIf7$)RlK^pW-$87U;431;Ye4Ie+l~_B3*bH1>*yKzn23cH0u(i5pXV! z4K?{3oF7ZavmmtTq((wtml)m6i)8X6ot_mrE-QJCW}Yn!(3~aUHYG=^fA<^~`e3yc z-NWTb{gR;DOUcK#zPbN^D*e=2eR^_!(!RKkiwMW@@yYtEoOp4XjOGgzi`;=8 zi3`Ccw1%L*y(FDj=C7Ro-V?q)-%p?Ob2ZElu`eZ99n14-ZkEV#y5C+{Pq87Gu3&>g zFy~Wk7^6v*)4pF3@F@rE__k3ikx(hzN3@e*^0=KNA6|jC^B5nf(XaoQaZN?Xi}Rn3 z$8&m*KmWvPaUQ(V<#J+S&zO|8P-#!f%7G+n_%sXp9=J%Z4&9OkWXeuZN}ssgQ#Tcj z8p6ErJQJWZ+fXLCco=RN8D{W%+*kko*2-LEb))xcHwNl~Xmir>kmAxW?eW50Osw3# zki8Fl$#fvw*7rqd?%E?}ZX4`c5-R&w!Y0#EBbelVXSng+kUfeUiqofPehl}$ormli zg%r)}?%=?_pHb9`Cq9Z|B`L8b>(!+8HSX?`5+5mm81AFXfnAt1*R3F z%b2RPIacKAddx%JfQ8l{3U|vK@W7KB$CdLqn@wP^?azRks@x8z59#$Q*7q!KilY-P zHUbs(IFYRGG1{~@RF;Lqyho$~7^hNC`NL3kn^Td%A7dRgr_&`2k=t+}D-o9&C!y^? z6MsQ=tc3g0xkK(O%DzR9nbNB(r@L;1zQrs8mzx&4dz}?3KNYozOW5;=w18U6$G4U2 z#2^qRLT*Mo4bV1Oeo1PKQ2WQS2Y-hv&S|C7`xh6=Pj7MNLC5K-zokZ67S)C;(F0Dd zloDK2_o1$Fmza>EMj3X9je7e%Q`$39Dk~GoOj89-6q9|_WJlSl!!+*{R=tGp z8u|MuSwm^t7K^nUe+^0G3dkGZr3@(X+TL5eah)K^Tn zXEtHmR9UIaEYgD5Nhh(s*fcG_lh-mfy5iUF3xxpRZ0q3nZ=1qAtUa?(LnT9I&~uxX z`pV?+=|-Gl(kz?w!zIieXT}o}7@`QO>;u$Z!QB${a08_bW0_o@&9cjJUXzVyNGCm8 zm=W+$H!;_Kzp6WQqxUI;JlPY&`V}9C$8HZ^m?NvI*JT@~BM=()T()Ii#+*$y@lTZBkmMMda>7s#O(1YZR+zTG@&}!EXFG{ zEWPSDI5bFi;NT>Yj*FjH((=oe%t%xYmE~AGaOc4#9K_XsVpl<4SP@E!TgC0qpe1oi zNpxU2b0(lEMcoibQ-G^cxO?ySVW26HoBNa;n0}CWL*{k)oBu1>F18X061$SP{Gu67 z-v-Fa=Fl^u3lnGY^o5v)Bux}bNZ~ z5pL+7F_Esoun8^5>z8NFoIdb$sNS&xT8_|`GTe8zSXQzs4r^g0kZjg(b0bJvz`g<70u9Z3fQILX1Lj@;@+##bP|FAOl)U^9U>0rx zGi)M1(Hce)LAvQO-pW!MN$;#ZMX?VE(22lTlJrk#pB0FJNqVwC+*%${Gt#r_tH9I_ z;+#)#8cWAl?d@R+O+}@1A^hAR1s3UcW{G+>;X4utD2d9X(jF555}!TVN-hByV6t+A zdFR^aE@GNNgSxxixS2p=on4(+*+f<8xrwAObC)D5)4!z7)}mTpb7&ofF3u&9&wPS< zB62WHLGMhmrmOAgmJ+|c>qEWTD#jd~lHNgT0?t-p{T=~#EMcB| z=AoDKOL+qXCfk~F)-Rv**V}}gWFl>liXOl7Uec_8v)(S#av99PX1sQIVZ9eNLkhq$ zt|qu0b?GW_uo}TbU8!jYn8iJeIP)r@;!Ze_7mj{AUV$GEz6bDSDO=D!&C9!M@*S2! zfGyA|EPlXGMjkH6x7OMF?gKL7{GvGfED=Jte^p=91FpCu)#{whAMw`vSLa`K#atdN zThnL+7!ZNmP{rc=Z>%$meH;Qi1=m1E3Lq2D_O1-X5C;!I0L>zur@tPAC9*7Jeh)`;eec}1`nkRP(%iv-`N zZ@ip-g|7l6Hz%j%gcAM}6-nrC8oA$BkOTz^?dakvX?`^=ZkYh%vUE z9+&)K1UTK=ahYiaNn&G5nHUY5niLGus@p5E2@RwZufRvF{@$hW{;{3QhjvEHMvduO z#Wf-@oYU4ht?#uP{N3utVzV49mEc9>*TV_W2TVC`6+oI)zAjy$KJrr=*q##&kobiQ z1vNbya&OVjK`2pdRrM?LuK6BgrLN7H_3m z!qpNKg~87XgCwb#I=Q&0rI*l$wM!qTkXrx1ko5q-f;=R2fImRMwt5Qs{P*p^z@9ex z`2#v(qE&F%MXlHpdO#QEZyZftn4f05ab^f2vjxuFaat2}jke{j?5GrF=WYBR?gS(^ z9SBiNi}anzBDBRc+QqizTTQuJrzm^bNA~A{j%ugXP7McZqJ}65l10({wk++$=e8O{ zxWjG!Qp#5OmI#XRQQM?n6?1ztl6^D40hDJr?4$Wc&O_{*OfMfxe)V0=e{|N?J#fgE>j9jAajze$iN!*yeF%jJU#G1c@@rm zolGW!j?W6Q8pP=lkctNFdfgUMg92wlM4E$aks1??M$~WQfzzzXtS)wKrr2sJeCN4X zY(X^H_c^PzfcO8Bq(Q*p4c_v@F$Y8cHLrH$`pJ2}=#*8%JYdqsqnGqEdBQMpl!Ot04tUGSXTQdsX&GDtjbWD=prcCT9(+ z&UM%lW%Q3yrl1yiYs;LxzIy>2G}EPY6|sBhL&X&RAQrSAV4Tlh2nITR?{6xO9ujGu zr*)^E`>o!c=gT*_@6S&>0POxcXYNQd&HMw6<|#{eSute2C3{&h?Ah|cw56-AP^f8l zT^kvZY$YiH8j)sk7_=;gx)vx-PW`hbSBXJGCTkpt;ap(}G2GY=2bbjABU5)ty%G#x zAi07{Bjhv}>OD#5zh#$0w;-vvC@^}F! z#X$@)zIs1L^E;2xDAwEjaXhTBw2<{&JkF*`;c3<1U@A4MaLPe{M5DGGkL}#{cHL%* zYMG+-Fm0#qzPL#V)TvQVI|?_M>=zVJr9>(6ib*#z8q@mYKXDP`k&A4A};xMK0h=yrMp~JW{L?mE~ph&1Y1a#4%SO)@{ zK2juwynUOC)U*hVlJU17%llUxAJFuKZh3K0gU`aP)pc~bE~mM!i1mi!~LTf>1Wp< zuG+ahp^gH8g8-M$u{HUWh0m^9Rg@cQ{&DAO{PTMudV6c?ka7+AO& z746QylZ&Oj`1aqfu?l&zGtJnpEQOt;OAFq19MXTcI~`ZcoZmyMrIKDFRIDi`FH)w; z8+*8tdevMDv*VtQi|e}CnB_JWs>fhLOH-+Os2Lh!&)Oh2utl{*AwR)QVLS49iTp{6 z;|172Jl!Ml17unF+pd+Ff@jIE-{Oxv)5|pOm@CkHW?{l}b@1>Pe!l}VccX#xp@xgJ zyE<&ep$=*vT=}7vtvif0B?9xw_3Gej7mN*dOHdQPtW5kA5_zGD zpA4tV2*0E^OUimSsV#?Tg#oiQ>%4D@1F5@AHwT8Kgen$bSMHD3sXCkq8^(uo7CWk`mT zuslYq`6Yz;L%wJh$3l1%SZv#QnG3=NZ=BK4yzk#HAPbqXa92;3K5?0kn4TQ`%E%X} z&>Lbt!!QclYKd6+J7Nl@xv!uD%)*bY-;p`y^ZCC<%LEHUi$l5biu!sT3TGGSTPA21 zT8@B&a0lJHVn1I$I3I1I{W9fJAYc+8 zVj8>HvD}&O`TqU2AAb={?eT;0hyL(R{|h23=4fDSZKC32;wWxsVj`P z3J3{M$PwdH!ro*Cn!D&=jnFR>BNGR<<|I8CI@+@658Dy(lhqbhXfPTVecY@L8%`3Q z1Fux2w?2C3th60jI~%OC9BtpNF$QPqcG+Pz96qZJ71_`0o0w_q7|h&O>`6U+^BA&5 zXd5Zp1Xkw~>M%RixTm&OqpNl8Q+ue=92Op_>T~_9UON?ZM2c0aGm=^A4ejrXj3dV9 zhh_bCt-b9`uOX#cFLj!vhZ#lS8Tc47OH>*)y#{O9?AT~KR9LntM|#l#Dlm^8{nZdk zjMl#>ZM%#^nK2TPzLcKxqx24P7R1FPlBy7LSBrRvx>fE$9AJ;7{PQm~^LBX^k#6Zq zw*Z(zJC|`!6_)EFR}8|n8&&Rbj8y028~P~sFXBFRt+tmqH-S3<%N;C&WGH!f3{7cm zy_fCAb9@HqaXa1Y5vFbxWf%#zg6SI$C+Uz5=CTO}e|2fjWkZ;Dx|84Ow~bkI=LW+U zuq;KSv9VMboRvs9)}2PAO|b(JCEC_A0wq{uEj|3x@}*=bOd zwr{TgeCGG>HT<@Zeq8y}vTpwDg#UBvD)BEs@1KP$^3$sh&_joQPn{hjBXmLPJ{tC) z*HS`*2+VtJO{|e$mM^|qv1R*8i(m1`%)}g=SU#T#0KlTM2RSvYUc1fP+va|4;5}Bfz98UvDCpq7}+SMV&;nX zQw~N6qOX{P55{#LQkrZk(e5YGzr|(B;Q;ju;2a`q+S9bsEH@i1{_Y0;hWYn1-79jl z5c&bytD*k)GqrVcHn6t-7kinadiD>B{Tl`ZY@`g|b~pvHh5!gKP4({rp?D0aFd_cN zhHRo4dd5^S6ViN(>(28qZT6E>??aRhc($kP`>@<+lIKS5HdhjVU;>f7<4))E*5|g{ z&d1}D|vpuV^eRj5j|xx9nwaCxXFG?Qbjn~_WSy=N}P0W>MP zG-F%70lX5Xr$a)2i6?i|iMyM|;Jtf*hO?=Jxj12oz&>P=1#h~lf%#fc73M2_(SUM- zf&qnjS80|_Y0lDgl&I?*eMumUklLe_=Td!9G@eR*tcPOgIShJipp3{A10u(4eT~DY zHezEj8V+7m!knn7)W!-5QI3=IvC^as5+TW1@Ern@yX| z7Nn~xVx&fGSr+L%4iohtS3w^{-H1A_5=r&x8}R!YZvp<2T^YFvj8G_vm}5q;^UOJf ztl=X3iL;;^^a#`t{Ae-%5Oq{?M#s6Npj+L(n-*LMI-yMR{)qki!~{5z{&`-iL}lgW zxo+tnvICK=lImjV$Z|O_cYj_PlEYCzu-XBz&XC-JVxUh9;6*z4fuBG+H{voCC;`~GYV|hj%j_&I zDZCj>Q_0RCwFauYoVMiUSB+*Mx`tg)bWmM^SwMA+?lBg12QUF_x2b)b?qb88K-YUd z0dO}3k#QirBV<5%jL$#wlf!60dizu;tsp(7XLdI=eQs?P`tOZYMjVq&jE)qK*6B^$ zBe>VvH5TO>s>izhwJJ$<`a8fakTL!yM^Zfr2hV9`f}}VVUXK39p@G|xYRz{fTI+Yq z20d=)iwjuG9RB$%$^&8#(c0_j0t_C~^|n+c`Apu|x7~;#cS-s=X1|C*YxX3ailhg_|0`g!E&GZJEr?bh#Tpb8siR=JxWKc{#w7g zWznLwi;zLFmM1g8V5-P#RsM@iX>TK$xsWuujcsVR^7TQ@!+vCD<>Bk9tdCo7Mzgq5 zv8d>dK9x8C@Qoh01u@3h0X_`SZluTb@5o;{4{{eF!-4405x8X7hewZWpz z2qEi4UTiXTvsa(0X7kQH{3VMF>W|6;6iTrrYD2fMggFA&-CBEfSqPlQDxqsa>{e2M z(R5PJ7uOooFc|9GU0ELA%m4&4Ja#cQpNw8i8ACAoK6?-px+oBl_yKmenZut#Xumjz zk8p^OV2KY&?5MUwGrBOo?ki`Sxo#?-Q4gw*Sh0k`@ zFTaYK2;}%Zk-68`#5DXU$2#=%YL#S&MTN8bF+!J2VT6x^XBci6O)Q#JfW{YMz) zOBM>t2rSj)n#0a3cjvu}r|k3od6W(SN}V-cL?bi*Iz-8uOcCcsX0L>ZXjLqk zZu2uHq5B|Kt>e+=pPKu=1P@1r9WLgYFq_TNV1p9pu0erHGd!+bBp!qGi+~4A(RsYN@CyXNrC&hxGmW)u5m35OmWwX`I+0yByglO`}HC4nGE^_HUs^&A(uaM zKPj^=qI{&ayOq#z=p&pnx@@k&I1JI>cttJcu@Ihljt?6p^6{|ds`0MoQwp+I{3l6` zB<9S((RpLG^>=Kic`1LnhpW2=Gu!x`m~=y;A`Qk!-w`IN;S8S930#vBVMv2vCKi}u z6<-VPrU0AnE&vzwV(CFC0gnZYcpa-l5T0ZS$P6(?9AM;`Aj~XDvt;Jua=jIgF=Fm? zdp=M$>`phx%+Gu};;-&7T|B1AcC#L4@mW5SV_^1BRbo6;2PWe$r+npRV`yc;T1mo& z+~_?7rA+(Um&o@Tddl zL_hxvWk~a)yY}%j`Y+200D%9$bWHy&;(yj{jpi?Rtz{J66ANw)UyPOm;t6FzY3$hx zcn)Ir79nhFvNa7^a{SHN7XH*|Vlsx`CddPnA&Qvh8aNhEA;mPVv;Ah=k<*u!Zq^7 z<=xs*iQTQOMMcg|(NA_auh@x`3#_LFt=)}%SQppP{E>mu_LgquAWvh<>L7tf9+~rO znwUDS52u)OtY<~!d$;m9+87aO+&`#2ICl@Y>&F{jI=H(K+@3M1$rr=*H^dye#~TyD z!){#Pyfn+|ugUu}G;a~!&&0aqQ59U@UT3|_JuBlYUpT$2+11;}JBJ`{+lQN9T@QFY z5+`t;6(TS0F?OlBTE!@7D`8#URDNqx2t6`GZ{ZgXeS@v%-eJzZOHz18aS|svxII$a zZeFjrJ*$IwX$f-Rzr_G>xbu@euGl)B7pC&S+CmDJBg$BoV~jxSO#>y z33`bupN#LDoW0feZe0%q8un0rYN|eRAnwDHQ6e_)xBTbtoZtTA=Fvk){q}9Os~6mQ zKB80VI_&6iSq`LnK7*kfHZoeX6?WE}8yjuDn=2#JG$+;-TOA1%^=DnXx%w{b=w}tS zQbU3XxtOI8E(!%`64r2`zog;5<0b4i)xBmGP^jiDZ2%HNSxIf3@wKs~uk4%3Mxz;~ zts_S~E4>W+YwI<-*-$U8*^HKDEa8oLbmqGg?3vewnaNg%Mm)W=)lcC_J+1ov^u*N3 zXJ?!BrH-+wGYziJq2Y#vyry6Z>NPgkEk+Ke`^DvNRdb>Q2Nlr#v%O@<5hbflI6EKE z9dWc0-ORk^T}jP!nkJ1imyjdVX@GrjOs%cpgA8-c&FH&$(4od#x6Y&=LiJZPINVyW z0snY$8JW@>tc2}DlrD3StQmA0Twck~@>8dSix9CyQOALcREdxoM$Sw*l!}bXKq9&r zysMWR@%OY24@e`?+#xV2bk{T^C_xSo8v2ZI=lBI*l{RciPwuE>L5@uhz@{!l)rtVlWC>)6(G)1~n=Q|S!{E9~6*fdpa*n z!()-8EpTdj=zr_Lswi;#{TxbtH$8*G=UM`I+icz7sr_SdnHXrv=?iEOF1UL+*6O;% zPw>t^kbW9X@oEXx<97%lBm-9?O_7L!DeD)Me#rwE54t~UBu9VZ zl_I1tBB~>jm@bw0Aljz8! zXBB6ATG6iByKIxs!qr%pz%wgqbg(l{65DP4#v(vqhhL{0b#0C8mq`bnqZ1OwFV z7mlZZJFMACm>h9v^2J9+^_zc1=JjL#qM5ZHaThH&n zXPTsR8(+)cj&>Un{6v*z?@VTLr{TmZ@-fY%*o2G}*G}#!bmqpoo*Ay@U!JI^Q@7gj;Kg-HIrLj4}#ec4~D2~X6vo;ghep-@&yOivYP zC19L0D`jjKy1Yi-SGPAn94(768Tcf$urAf{)1)9W58P`6MA{YG%O?|07!g9(b`8PXG1B1Sh0?HQmeJtP0M$O$hI z{5G`&9XzYhh|y@qsF1GnHN|~^ru~HVf#)lOTSrv=S@DyR$UKQk zjdEPFDz{uHM&UM;=mG!xKvp;xAGHOBo~>_=WFTmh$chpC7c`~7?36h)7$fF~Ii}8q zF|YXxH-Z?d+Q+27Rs3X9S&K3N+)OBxMHn1u(vlrUC6ckBY@@jl+mgr#KQUKo#VeFm zFwNYgv0<%~Wn}KeLeD9e1$S>jhOq&(e*I@L<=I5b(?G(zpqI*WBqf|Zge0&aoDUsC zngMRA_Kt0>La+Erl=Uv_J^p(z=!?XHpenzn$%EA`JIq#yYF?JLDMYiPfM(&Csr#f{ zdd+LJL1by?xz|D8+(fgzRs~(N1k9DSyK@LJygwaYX8dZl0W!I&c^K?7)z{2is;OkE zd$VK-(uH#AUaZrp=1z;O*n=b?QJkxu`Xsw&7yrX0?(CX=I-C#T;yi8a<{E~?vr3W> zQrpPqOW2M+AnZ&p{hqmHZU-;Q(7?- zP8L|Q0RM~sB0w1w53f&Kd*y}ofx@c z5Y6B8qGel+uT1JMot$nT1!Tim6{>oZzJXdyA+4euOLME?5Fd_85Uk%#E*ln%y{u8Q z$|?|R@Hpb~yTVK-Yr_S#%NUy7EBfYGAg>b({J|5b+j-PBpPy$Ns`PaJin4JdRfOaS zE|<HjH%NuJgsd2wOlv>~y=np%=2)$M9LS|>P)zJ+Fei5vYo_N~B0XCn+GM76 z)Xz3tg*FRVFgIl9zpESgdpWAavvVViGlU8|UFY{{gVJskg*I!ZjWyk~OW-Td4(mZ6 zB&SQreAAMqwp}rjy`HsG({l2&q5Y52<@AULVAu~rWI$UbFuZs>Sc*x+XI<+ez%$U)|a^unjpiW0l0 zj1!K0(b6$8LOjzRqQ~K&dfbMIE=TF}XFAi)$+h}5SD3lo z%%Qd>p9se=VtQG{kQ;N`sI)G^u|DN#7{aoEd zkksYP%_X$Rq08);-s6o>CGJ<}v`qs%eYf+J%DQ^2k68C%nvikRsN?$ap--f+vCS`K z#&~)f7!N^;sdUXu54gl3L=LN>FB^tuK=y2e#|hWiWUls__n@L|>xH{%8lIJTd5`w? zSwZbnS;W~DawT4OwSJVdAylbY+u5S+ZH{4hAi2&}Iv~W(UvHg(1GTZRPz`@{SOqzy z(8g&Dz=$PfRV=6FgxN~zo+G8OoPI&d-thcGVR*_^(R8COTM@bq?fDwY{}WhsQS1AK zF6R1t8!RdFmfocpJ6?9Yv~;WYi~XPgs(|>{5})j!AR!voO7y9&cMPo#80A(`za@t>cx<0;qxM@S*m(jYP)dMXr*?q0E`oL;12}VAep179uEr8c<=D zr5?A*C{eJ`z9Ee;E$8)MECqatHkbHH z&Y+ho0B$31MIB-xm&;xyaFCtg<{m~M-QDbY)fQ>Q*Xibb~8ytxZQ?QMf9!%cV zU0_X1@b4d+Pg#R!`OJ~DOrQz3@cpiGy~XSKjZQQ|^4J1puvwKeScrH8o{bscBsowomu z^f12kTvje`yEI3eEXDHJ6L+O{Jv$HVj%IKb|J{IvD*l6IG8WUgDJ*UGz z3!C%>?=dlfSJ>4U88)V+`U-!9r^@AxJBx8R;)J4Fn@`~k>8>v0M9xp90OJElWP&R5 zM#v*vtT}*Gm1^)Bv!s72T3PB0yVIjJW)H7a)ilkAvoaH?)jjb`MP>2z{%Y?}83 zUIwBKn`-MSg)=?R)1Q0z3b>dHE^)D8LFs}6ASG1|daDly_^lOSy&zIIhm*HXm1?VS=_iacG);_I9c zUQH1>i#*?oPIwBMJkzi_*>HoUe}_4o>2(SHWzqQ=;TyhAHS;Enr7!#8;sdlty&(>d zl%5cjri8`2X^Ds`jnw7>A`X|bl=U8n+3LKLy(1dAu8`g@9=5iw$R0qk)w8Vh_Dt^U zIglK}sn^)W7aB(Q>HvrX=rxB z+*L)3DiqpQ_%~|m=44LcD4-bxO3OO*LPjsh%p(k?&jvLp0py57oMH|*IMa(<|{m1(0S|x)?R-mqJ=I;_YUZA>J z62v*eSK;5w!h8J+6Z2~oyGdZ68waWfy09?4fU&m7%u~zi?YPHPgK6LDwphgaYu%0j zurtw)AYOpYKgHBrkX189mlJ`q)w-f|6>IER{5Lk97%P~a-JyCRFjejW@L>n4vt6#hq;!|m;hNE||LK3nw1{bJOy+eBJjK=QqNjI;Q6;Rp5 z&035pZDUZ#%Oa;&_7x0T<7!RW`#YBOj}F380Bq?MjjEhrvlCATPdkCTTl+2efTX$k zH&0zR1n^`C3ef~^sXzJK-)52(T}uTG%OF8yDhT76L~|^+hZ2hiSM*QA9*D5odI1>& z9kV9jC~twA5MwyOx(lsGD_ggYmztXPD`2=_V|ks_FOx!_J8!zM zTzh^cc+=VNZ&(OdN=y4Juw)@8-85lwf_#VMN!Ed(eQiRiLB2^2e`4dp286h@v@`O%_b)Y~A; zv}r6U?zs&@uD_+(_4bwoy7*uozNvp?bXFoB8?l8yG0qsm1JYzIvB_OH4_2G*IIOwT zVl%HX1562vLVcxM_RG*~w_`FbIc!(T=3>r528#%mwwMK}uEhJ()3MEby zQQjzqjWkwfI~;Fuj(Lj=Ug0y`>~C7`w&wzjK(rPw+Hpd~EvQ-ufQOiB4OMpyUKJhw zqEt~jle9d7S~LI~$6Z->J~QJ{Vdn3!c}g9}*KG^Kzr^(7VI5Gk(mHLL{itj_hG?&K4Ws0+T4gLfi3eu$N=`s36geNC?c zm!~}vG6lx9Uf^5M;bWntF<-{p^bruy~f?sk9 zcETAPQZLoJ8JzMMg<-=ju4keY@SY%Wo?u9Gx=j&dfa6LIAB|IrbORLV1-H==Z1zCM zeZcOYpm5>U2fU7V*h;%n`8 zN95QhfD994={1*<2vKLCNF)feKOGk`R#K~G=;rfq}|)s20&MCa65 zUM?xF5!&e0lF%|U!#rD@I{~OsS_?=;s_MQ_b_s=PuWdC)q|UQ&ea)DMRh5>fpQjXe z%9#*x=7{iRCtBKT#H>#v%>77|{4_slZ)XCY{s3j_r{tdpvb#|r|sbS^dU1x70$eJMU!h{Y7Kd{dl}9&vxQl6Jt1a` zHQZrWyY0?!vqf@u-fxU_@+}u(%Wm>0I#KP48tiAPYY!TdW(o|KtVI|EUB9V`CBBNaBLVih7+yMVF|GSoIQD0Jfb{ z!OXq;(>Z?O`1gap(L~bUcp>Lc@Jl-})^=6P%<~~9ywY=$iu8pJ0m*hOPzr~q`23eX zgbs;VOxxENe0UMVeN*>uCn9Gk!4siN-e>x)pIKAbQz!G)TcqIJ0`JBBaX>1-4_XO_-HCS^vr2vjv#7KltDZdyQ{tlWh4$Gm zB>|O1cBDC)yG(sbnc*@w6e%e}r*|IhpXckx&;sQCwGdKH+3oSG-2)Bf#x`@<4ETAr z0My%7RFh6ZLiZ_;X6Mu1YmXx7C$lSZ^}1h;j`EZd6@%JNUe=btBE z%s=Xmo1Ps?8G`}9+6>iaB8bgjUdXT?=trMu|4yLX^m0Dg{m7rpKNJey|EwHI+nN1e zL^>qN%5Fg)dGs4DO~uwIdXImN)QJ*Jhpj7$fq_^`{3fwpztL@WBB}OwQ#Epo-mqMO zsM$UgpFiG&d#)lzEQ{3Q;)&zTw;SzGOah-Dpm{!q7<8*)Ti_;xvV2TYXa}=faXZy? z3y?~GY@kl)>G&EvEijk9y1S`*=zBJSB1iet>0;x1Ai)*`^{pj0JMs)KAM=@UyOGtO z3y0BouW$N&TnwU6!%zS%nIrnANvZF&vB1~P5_d`x-giHuG zPJ;>XkVoghm#kZXRf>qxxEix;2;D1CC~NrbO6NBX!`&_$iXwP~P*c($EVV|669kDO zKoTLZNF4Cskh!Jz5ga9uZ`3o%7Pv`d^;a=cXI|>y;zC3rYPFLQkF*nv(r>SQvD*## z(Vo%^9g`%XwS0t#94zPq;mYGLKu4LU3;txF26?V~A0xZbU4Lmy`)>SoQX^m7fd^*E z+%{R4eN!rIk~K)M&UEzxp9dbY;_I^c} zOc{wlIrN_P(PPqi51k_$>Lt|X6A^|CGYgKAmoI#Li?;Wq%q~q*L7ehZkUrMxW67Jl zhsb~+U?33QS>eqyN{(odAkbopo=Q$Az?L+NZW>j;#~@wCDX?=L5SI|OxI~7!Pli;e zELMFcZtJY3!|=Gr2L4>z8yQ-{To>(f80*#;6`4IAiqUw`=Pg$%C?#1 z_g@hIGerILSU>=P>z{gM|DS91A4cT@PEIB^hSop!uhMo#2G;+tQSpDO_6nOnPWSLU zS;a9m^DFMXR4?*X=}d7l;nXuHk&0|m`NQn%d?8|Ab3A9l9Jh5s120ibWBdB z$5YwsK3;wvp!Kn@)Qae{ef`0#NwlRpQ}k^r>yos_Ne1;xyKLO?4)t_G4eK~wkUS2A&@_;)K0-03XGBzU+5f+uMDxC z(s8!8!RvdC#@`~fx$r)TKdLD6fWEVdEYtV#{ncT-ZMX~eI#UeQ-+H(Z43vVn%Yj9X zLdu9>o%wnWdvzA-#d6Z~vzj-}V3FQ5;axDIZ;i(95IIU=GQ4WuU{tl-{gk!5{l4_d zvvb&uE{%!iFwpymz{wh?bKr1*qzeZb5f6e6m_ozRF&zux2mlK=v_(_s^R6b5lu?_W4W3#<$zeG~Pd)^!4tzhs}-Sx$FJP>)ZGF(hVTH|C3(U zs0PO&*h_ zNA-&qZpTP$$LtIgfiCn07}XDbK#HIXdmv8zdz4TY;ifNIH-0jy(gMSByG2EF~Th#eb_TueZC` zE?3I>UTMpKQ})=C;6p!?G)M6w^u*A57bD?2X`m3X^6;&4%i_m(uGJ3Z5h`nwxM<)H z$I5m?wN>O~8`BGnZ=y^p6;0+%_0K}Dcg|K;+fEi|qoBqvHj(M&aHGqNF48~XqhtU? z^ogwBzRlOfpAJ+Rw7IED8lRbTdBdyEK$gPUpUG}j-M42xDj_&qEAQEtbs>D#dRd7Y z<&TpSZ(quQDHiCFn&0xsrz~4`4tz!CdL8m~HxZM_agu@IrBpyeL1Ft}V$HX_ZqDPm z-f89)pjuEzGdq-PRu`b1m+qBGY{zr_>{6Ss>F|xHZlJj9dt5HD$u`1*WZe)qEIuDSR)%z+|n zatVlhQ?$w#XRS7xUrFE;Y8vMGhQS5*T{ZnY=q1P?w5g$OKJ#M&e??tAmPWHMj3xhS ziGxapy?kn@$~2%ZY;M8Bc@%$pkl%Rvj!?o%agBvpQ-Q61n9kznC4ttrRNQ4%GFR5u zyv%Yo9~yxQJWJSfj z?#HY$y=O~F|2pZs22pu|_&Ajd+D(Mt!nPUG{|1nlvP`=R#kKH zO*s$r_%ss5h1YO7k0bHJ2CXN)Yd6CHn~W!R=SqkWe=&nAZu(Q1G!xgcUilM@YVei@2@a`8he z9@pM`)VB*=e7-MWgLlXlc)t;fF&-AwM{E-EX}pViFn0I0CNw2bNEnN2dj!^4(^zS3 zobUm1uQnpqk_4q{pl*n06=TfK_C>UgurKFjRXsK_LEn};=79`TB12tv6KzwSu*-C8 z;=~ohDLZylHQ|Mpx-?yql>|e=vI1Z!epyUpAcDCp4T|*RV&X`Q$0ogNwy6mFALo^@ z9=&(9txO8V@E!@6^(W0{*~CT>+-MA~vnJULBxCTUW>X5>r7*eXYUT0B6+w@lzw%n> z_VjJ<2qf|(d6jYq2(x$(ZDf!yVkfnbvNmb5c|hhZ^2TV_LBz`9w!e_V*W_(MiA7|= z&EeIIkw*+$Xd!)j8<@_<}A5;~A_>3JT*kX^@}cDoLd>Qj<`Se^wdUa(j0dp+Tl8EptwBm{9OGsdFEq zM`!pjf(Lm(`$e3FLOjqA5LnN5o!}z{ zNf}rJuZh@yUtq&ErjHeGzX4(!luV!jB&;FAP|!R_QHYw#^Z1LwTePAKJ6X&IDNO#; z)#I@Xnnzyij~C@UH~X51JCgQeF0&hTXnuoElz#m{heZRexWc0k4<>0+ClX7%0 zEBqCCld1tD9Zwkr4{?Nor19#E5-YKfB8d?qgR82-Ow2^AuNevly2*tHA|sK!ybYkX zm-sLQH72P&{vEAW6+z~O5d0qd=xW~rua~5a?ymYFSD@8&gV)E5@RNNBAj^C99+Z5Z zR@Pq55mbCQbz+Mn$d_CMW<-+?TU960agEk1J<>d>0K=pF19yN))a~4>m^G&tc*xR+yMD*S=yip-q=H zIlredHpsJV8H(32@Zxc@bX6a21dUV95Th--8pE6C&3F>pk=yv$yd6@Haw;$v4+Fcb zRwn{Qo@0`7aPa2LQOP}j9v>sjOo5Kqvn|`FLizX zB+@-u4Lw|jsvz{p^>n8Vo8H2peIqJJnMN}A)q6%$Tmig7eu^}K2 zrh$X?T|ZMsoh{6pdw1G$_T<`Ds-G=jc;qcGdK4{?dN2-XxjDNbb(7pk|3JUVCU4y; z)?LXR>f+AAu)JEiti_Zy#z5{RgsC}R(@jl%9YZ>zu~hKQ*AxbvhC378-I@{~#%Y`Z zy=a=9YpewPIC+gkEUUwtUL7|RU7=!^Aa}Mk^6uxOgRGA#JXjWLsjFUnix|Mau{hDT z7mn*z1m5g`vP(#tjT0Zy4eAY(br&!RiiXE=ZI!{sE1#^#%x^Z7t1U)b<;%Y}Q9=5v z;wpDCEZ@OE36TWT=|gxigT@VaW9BvHS05;_P(#s z8zI4XFQys}q)<`tkX$WnSarn{3e!s}4(J!=Yf>+Y>cP3f;vr63f2{|S^`_pWc)^5_!R z*(x-fuBxL51@xe!lnDBKi}Br$c$BMZ3%f2Sa6kLabiBS{pq*yj;q|k(86x`PiC{p6 z_bxCW{>Q2BA8~Ggz&0jkrcU+-$ANBsOop*ms>34K9lNYil@}jC;?cYP(m^P}nR6FV zk(M%48Z&%2Rx$A&FhOEirEhY0(dn;-k(qkTU)sFQ`+-ih+s@A8g?r8Pw+}2;35WYf zi}VO`jS`p(tc)$X$a>-#WXoW!phhatC*$}|rk>|wUU71eUJG^$c6_jwX?iSHM@6__ zvV|6%U*$sSXJu9SX?2%M^kK|}a2QJ8AhF{fuXrHZxXsI~O zGKX45!K7p*MCPEQ=gp?eu&#AW*pR{lhQR##P_*{c_DjMGL|3T3-bSJ(o$|M{ytU}> zAV>wq*uE*qFo9KvnA^@juy{x<-u*#2NvkV={Ly}ysKYB-k`K3@K#^S1Bb$8Y#0L0# z`6IkSG&|Z$ODy|VLS+y5pFJx&8tvPmMd8c9FhCyiU8~k6FwkakUd^(_ml8`rnl>JS zZV){9G*)xBqPz^LDqRwyS6w86#D^~xP4($150M)SOZRe9sn=>V#aG0Iy(_^YcPpIz8QYM-#s+n% z@Jd?xQq?Xk6=<3xSY7XYP$$yd&Spu{A#uafiIfy8gRC`o0nk{ezEDjb=q_qRAlR1d zFq^*9Gn)yTG4b}R{!+3hWQ+u3GT~8nwl2S1lpw`s0X_qpxv)g+JIkVKl${sYf_nV~B>Em>M;RlqGb5WVil(89 zs=ld@|#;dq1*vQGz=7--Br-|l) zZ%Xh@v8>B7P?~}?Cg$q9_={59l%m~O&*a6TKsCMAzG&vD>k2WDzJ6!tc!V)+oxF;h zJH;apM=wO?r_+*#;ulohuP=E>^zon}a$NnlcQ{1$SO*i=jnGVcQa^>QOILc)e6;eNTI>os=eaJ{*^DE+~jc zS}TYeOykDmJ=6O%>m`i*>&pO_S;qMySJIyP=}4E&J%#1zju$RpVAkZbEl+p%?ZP^C z*$$2b4t%a(e+%>a>d_f_<JjxI#J1x;=hPd1zFPx=6T$;;X1TD*2(edZ3f46zaAoW>L53vS_J*N8TMB|n+;LD| zC=GkQPpyDY#Am4l49chDv*gojhRj_?63&&8#doW`INATAo(qY#{q}%nf@eTIXmtU< zdB<7YWfyCmBs|c)cK>1)v&M#!yNj#4d$~pVfDWQc_ke1?fw{T1Nce_b`v|Vp5ig(H zJvRD^+ps46^hLX;=e2!2e;w9y1D@!D$c@Jc&%%%IL=+xzw55&2?darw=9g~>P z9>?Kdc$r?6c$m%x2S$sdpPl>GQZ{rC9mPS63*qjCVa?OIBj!fW zm|g?>CVfGXNjOfcyqImXR_(tXS(F{FcoNzKvG5R$IgGaxC@)i(e+$ME}vPVIhd|mx2IIE+f zM?9opQHIVgBWu)^A|RzXw!^??S!x)SZOwZaJkGjc<_}2l^eSBm!eAJG9T>EC6I_sy z?bxzDIAn&K5*mX)$RQzDA?s)-no-XF(g*yl4%+GBf`##bDXJ==AQk*xmnatI;SsLp zP9XTHq5mmS=iWu~9ES>b%Q=1aMa|ya^vj$@qz9S!ih{T8_PD%Sf_QrNKwgrXw9ldm zHRVR98*{C?_XNpJn{abA!oix_mowRMu^2lV-LPi;0+?-F(>^5#OHX-fPED zCu^l7u3E%STI}c4{J2!)9SUlGP_@!d?5W^QJXOI-Ea`hFMKjR7TluLvzC-ozCPn1`Tpy z!vlv@_Z58ILX6>nDjTp-1LlFMx~-%GA`aJvG$?8*Ihn;mH37eK**rmOEwqegf-Ccx zrIX4;{c~RK>XuTXxYo5kMiWMy)!IC{*DHG@E$hx?RwP@+wuad(P1{@%tRkyJRqD)3 zMHHHZ4boqDn>-=DgR5VlhQTpfVy182Gk;A_S8A1-;U1RR>+$62>(MUx@Nox$vTjHq z%QR=j!6Gdyb5wu7y(YUktwMuW5<@jl?m4cv4BODiT5o8qVdC0MBqGr@-YBIwnpZAY znX9(_uQjP}JJ=!~Ve9#5I~rUnN|P_3D$LqZcvBnywYhjlMSFHm`;u9GPla{5QD7(7*6Tb3Svr8;(nuAd81q$*uq6HC_&~je*Ca7hP4sJp0av{M8480wF zxASi7Qv+~@2U%Nu1Ud;s-G4CTVWIPyx!sg&8ZG0Wq zG_}i3C(6_1>q3w!EH7$Kwq8uBp2F2N7}l65mk1p*9v0&+;th=_E-W)E;w}P(j⁢ zv5o9#E7!G0XmdzfsS{efPNi`1b44~SZ4Z8fuX!I}#8g+(wxzQwUT#Xb2(tbY1+EUhGKoT@KEU9Ktl>_0 z%bjDJg;#*gtJZv!-Zs`?^}v5eKmnbjqlvnSzE@_SP|LG_PJ6CYU+6zY6>92%E+ z=j@TZf-iW4(%U{lnYxQA;7Q!b;^brF8n0D>)`q5>|WDDXLrqYU_tKN2>=#@~OE7grMnNh?UOz-O~6 z6%rHy{#h9K0AT+lDC7q4{hw^|q6*Ry;;L%Q@)Ga}$60_q%D)rv(CtS$CQbpq9|y1e zRSrN4;$Jyl{m5bZw`$8TGvb}(LpY{-cQ)fcyJv7l3S52TLXVDsphtv&aPuDk1OzCA z4A^QtC(!11`IsNx_HnSy?>EKpHJWT^wmS~hc^p^zIIh@9f6U@I2 zC=Mve{j2^)mS#U$e{@Q?SO6%LDsXz@SY+=cK_QMmXBIU)j!$ajc-zLx3V60EXJ!qC zi<%2x8Q24YN+&8U@CIlN zrZkcT9yh%LrlGS9`G)KdP(@9Eo-AQz@8GEFWcb7U=a0H^ZVbLmz{+&M7W(nXJ4sN8 zJLR7eeK(K8`2-}j(T7JsO`L!+CvbueT%izanm-^A1Dn{`1Nw`9P?cq;7no+XfC`K(GO9?O^5zNIt4M+M8LM0=7Gz8UA@Z0N+lg+cX)NfazRu z5D)~HA^(u%w^cz+@2@_#S|u>GpB+j4KzQ^&Wcl9f z&hG#bCA(Yk0D&t&aJE^xME^&E-&xGHhXn%}psEIj641H+Nl-}boj;)Zt*t(4wZ5DN z@GXF$bL=&pBq-#vkTkh>7hl%K5|3 z{`Vn9b$iR-SoGENp}bn4;fR3>9sA%X2@1L3aE9yTra;Wb#_`xWwLSLdfu+PAu+o3| zGVnpzPr=ch{uuoHjtw7+_!L_2;knQ!DuDl0R`|%jr+}jFzXtrHIKc323?JO{l&;VF z*L1+}JU7%QJOg|5|Tc|D8fN zJORAg=_vsy{ak|o);@)Yh8Lkcg@$FG3k@ep36BRa^>~UmnRPziS>Z=`Jb2x*Q#`%A zU*i3&Vg?TluO@X0O;r2Jl6LKLUOVhSqg1*qOt^|8*c7 zo(298@+r$k_wQNGHv{|$tW(T8L+4_`FQ{kEW5Jgg{yf7ey4ss_(SNKfz(N9lx&a;< je(UuV8hP?p&}TPdm1I$XmG#(RzlD&B2izSj9sl%y5~4qc literal 0 HcmV?d00001 diff --git a/resources/Android/gradle/project/gradle/wrapper/gradle-wrapper.properties b/resources/Android/gradle/project/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 00000000..3fa8f862 --- /dev/null +++ b/resources/Android/gradle/project/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,7 @@ +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-8.4-bin.zip +networkTimeout=10000 +validateDistributionUrl=true +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists diff --git a/resources/Android/gradle/project/gradlew b/resources/Android/gradle/project/gradlew new file mode 100644 index 00000000..1aa94a42 --- /dev/null +++ b/resources/Android/gradle/project/gradlew @@ -0,0 +1,249 @@ +#!/bin/sh + +# +# Copyright © 2015-2021 the original authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +############################################################################## +# +# Gradle start up script for POSIX generated by Gradle. +# +# Important for running: +# +# (1) You need a POSIX-compliant shell to run this script. If your /bin/sh is +# noncompliant, but you have some other compliant shell such as ksh or +# bash, then to run this script, type that shell name before the whole +# command line, like: +# +# ksh Gradle +# +# Busybox and similar reduced shells will NOT work, because this script +# requires all of these POSIX shell features: +# * functions; +# * expansions «$var», «${var}», «${var:-default}», «${var+SET}», +# «${var#prefix}», «${var%suffix}», and «$( cmd )»; +# * compound commands having a testable exit status, especially «case»; +# * various built-in commands including «command», «set», and «ulimit». +# +# Important for patching: +# +# (2) This script targets any POSIX shell, so it avoids extensions provided +# by Bash, Ksh, etc; in particular arrays are avoided. +# +# The "traditional" practice of packing multiple parameters into a +# space-separated string is a well documented source of bugs and security +# problems, so this is (mostly) avoided, by progressively accumulating +# options in "$@", and eventually passing that to Java. +# +# Where the inherited environment variables (DEFAULT_JVM_OPTS, JAVA_OPTS, +# and GRADLE_OPTS) rely on word-splitting, this is performed explicitly; +# see the in-line comments for details. +# +# There are tweaks for specific operating systems such as AIX, CygWin, +# Darwin, MinGW, and NonStop. +# +# (3) This script is generated from the Groovy template +# https://github.com/gradle/gradle/blob/HEAD/subprojects/plugins/src/main/resources/org/gradle/api/internal/plugins/unixStartScript.txt +# within the Gradle project. +# +# You can find Gradle at https://github.com/gradle/gradle/. +# +############################################################################## + +# Attempt to set APP_HOME + +# Resolve links: $0 may be a link +app_path=$0 + +# Need this for daisy-chained symlinks. +while + APP_HOME=${app_path%"${app_path##*/}"} # leaves a trailing /; empty if no leading path + [ -h "$app_path" ] +do + ls=$( ls -ld "$app_path" ) + link=${ls#*' -> '} + case $link in #( + /*) app_path=$link ;; #( + *) app_path=$APP_HOME$link ;; + esac +done + +# This is normally unused +# shellcheck disable=SC2034 +APP_BASE_NAME=${0##*/} +# Discard cd standard output in case $CDPATH is set (https://github.com/gradle/gradle/issues/25036) +APP_HOME=$( cd "${APP_HOME:-./}" > /dev/null && pwd -P ) || exit + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD=maximum + +warn () { + echo "$*" +} >&2 + +die () { + echo + echo "$*" + echo + exit 1 +} >&2 + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "$( uname )" in #( + CYGWIN* ) cygwin=true ;; #( + Darwin* ) darwin=true ;; #( + MSYS* | MINGW* ) msys=true ;; #( + NONSTOP* ) nonstop=true ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD=$JAVA_HOME/jre/sh/java + else + JAVACMD=$JAVA_HOME/bin/java + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD=java + if ! command -v java >/dev/null 2>&1 + then + die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +fi + +# Increase the maximum file descriptors if we can. +if ! "$cygwin" && ! "$darwin" && ! "$nonstop" ; then + case $MAX_FD in #( + max*) + # In POSIX sh, ulimit -H is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC2039,SC3045 + MAX_FD=$( ulimit -H -n ) || + warn "Could not query maximum file descriptor limit" + esac + case $MAX_FD in #( + '' | soft) :;; #( + *) + # In POSIX sh, ulimit -n is undefined. That's why the result is checked to see if it worked. + # shellcheck disable=SC2039,SC3045 + ulimit -n "$MAX_FD" || + warn "Could not set maximum file descriptor limit to $MAX_FD" + esac +fi + +# Collect all arguments for the java command, stacking in reverse order: +# * args from the command line +# * the main class name +# * -classpath +# * -D...appname settings +# * --module-path (only if needed) +# * DEFAULT_JVM_OPTS, JAVA_OPTS, and GRADLE_OPTS environment variables. + +# For Cygwin or MSYS, switch paths to Windows format before running java +if "$cygwin" || "$msys" ; then + APP_HOME=$( cygpath --path --mixed "$APP_HOME" ) + CLASSPATH=$( cygpath --path --mixed "$CLASSPATH" ) + + JAVACMD=$( cygpath --unix "$JAVACMD" ) + + # Now convert the arguments - kludge to limit ourselves to /bin/sh + for arg do + if + case $arg in #( + -*) false ;; # don't mess with options #( + /?*) t=${arg#/} t=/${t%%/*} # looks like a POSIX filepath + [ -e "$t" ] ;; #( + *) false ;; + esac + then + arg=$( cygpath --path --ignore --mixed "$arg" ) + fi + # Roll the args list around exactly as many times as the number of + # args, so each arg winds up back in the position where it started, but + # possibly modified. + # + # NB: a `for` loop captures its iteration list before it begins, so + # changing the positional parameters here affects neither the number of + # iterations, nor the values presented in `arg`. + shift # remove old arg + set -- "$@" "$arg" # push replacement arg + done +fi + + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS='"-Xmx64m" "-Xms64m"' + +# Collect all arguments for the java command: +# * DEFAULT_JVM_OPTS, JAVA_OPTS, JAVA_OPTS, and optsEnvironmentVar are not allowed to contain shell fragments, +# and any embedded shellness will be escaped. +# * For example: A user cannot expect ${Hostname} to be expanded, as it is an environment variable and will be +# treated as '${Hostname}' itself on the command line. + +set -- \ + "-Dorg.gradle.appname=$APP_BASE_NAME" \ + -classpath "$CLASSPATH" \ + org.gradle.wrapper.GradleWrapperMain \ + "$@" + +# Stop when "xargs" is not available. +if ! command -v xargs >/dev/null 2>&1 +then + die "xargs is not available" +fi + +# Use "xargs" to parse quoted args. +# +# With -n1 it outputs one arg per line, with the quotes and backslashes removed. +# +# In Bash we could simply go: +# +# readarray ARGS < <( xargs -n1 <<<"$var" ) && +# set -- "${ARGS[@]}" "$@" +# +# but POSIX shell has neither arrays nor command substitution, so instead we +# post-process each arg (as a line of input to sed) to backslash-escape any +# character that might be a shell metacharacter, then use eval to reverse +# that process (while maintaining the separation between arguments), and wrap +# the whole thing up as a single "set" statement. +# +# This will of course break if any of these variables contains a newline or +# an unmatched quote. +# + +eval "set -- $( + printf '%s\n' "$DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS" | + xargs -n1 | + sed ' s~[^-[:alnum:]+,./:=@_]~\\&~g; ' | + tr '\n' ' ' + )" '"$@"' + +exec "$JAVACMD" "$@" diff --git a/resources/Android/gradle/project/gradlew.bat b/resources/Android/gradle/project/gradlew.bat new file mode 100644 index 00000000..93e3f59f --- /dev/null +++ b/resources/Android/gradle/project/gradlew.bat @@ -0,0 +1,92 @@ +@rem +@rem Copyright 2015 the original author or authors. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem https://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. +@rem + +@if "%DEBUG%"=="" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%"=="" set DIRNAME=. +@rem This is normally unused +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Resolve any "." and ".." in APP_HOME to make it shorter. +for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if %ERRORLEVEL% equ 0 goto execute + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto execute + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %* + +:end +@rem End local scope for the variables with windows NT shell +if %ERRORLEVEL% equ 0 goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +set EXIT_CODE=%ERRORLEVEL% +if %EXIT_CODE% equ 0 set EXIT_CODE=1 +if not ""=="%GRADLE_EXIT_CONSOLE%" exit %EXIT_CODE% +exit /b %EXIT_CODE% + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/resources/Android/gradle/project/settings.gradle.template b/resources/Android/gradle/project/settings.gradle.template new file mode 100644 index 00000000..9344b730 --- /dev/null +++ b/resources/Android/gradle/project/settings.gradle.template @@ -0,0 +1,2 @@ +rootProject.name = '${project_name}' +include ':app' \ No newline at end of file diff --git a/samples/CMakeLists.txt b/samples/CMakeLists.txt index d3e099a9..5d8dcecc 100644 --- a/samples/CMakeLists.txt +++ b/samples/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.8) +cmake_minimum_required(VERSION 3.20) add_subdirectory(bump_mapping) add_subdirectory(deferred_rendering) diff --git a/samples/bump_mapping/CMakeLists.txt b/samples/bump_mapping/CMakeLists.txt index ccd4d8ad..a59b86be 100644 --- a/samples/bump_mapping/CMakeLists.txt +++ b/samples/bump_mapping/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.8) +cmake_minimum_required(VERSION 3.20) project(bump_mapping) diff --git a/samples/bump_mapping/src/BumpMappingApplication.cpp b/samples/bump_mapping/src/BumpMappingApplication.cpp index 685b0ecd..dde3b572 100644 --- a/samples/bump_mapping/src/BumpMappingApplication.cpp +++ b/samples/bump_mapping/src/BumpMappingApplication.cpp @@ -80,7 +80,7 @@ namespace auto *pModelGameObject = ModelLoader::Load(AssetSystem::GetInstance()->Resolve("objs/doomsday.obj"), pMissingMaterial); if (pModelGameObject == nullptr) { - FASTCG_THROW_EXCEPTION(Exception, "Missing doomsday model"); + FASTCG_THROW_EXCEPTION(Exception, "Couldn't find doomsday model"); } const auto &bounds = pModelGameObject->GetBounds(); @@ -131,3 +131,5 @@ void BumpMappingApplication::OnStart() Controls::Instantiate(pGeneralBehavioursGameObject); LightsAnimator::Instantiate(pGeneralBehavioursGameObject); } + +FASTCG_MAIN(BumpMappingApplication) \ No newline at end of file diff --git a/samples/bump_mapping/src/main.cpp b/samples/bump_mapping/src/main.cpp deleted file mode 100644 index 9b158175..00000000 --- a/samples/bump_mapping/src/main.cpp +++ /dev/null @@ -1,7 +0,0 @@ -#include "BumpMappingApplication.h" - -int main(int argc, char **argv) -{ - BumpMappingApplication app; - return app.Run(argc, argv); -} diff --git a/samples/deferred_rendering/CMakeLists.txt b/samples/deferred_rendering/CMakeLists.txt index 43a998aa..2f49f3bd 100644 --- a/samples/deferred_rendering/CMakeLists.txt +++ b/samples/deferred_rendering/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.8) +cmake_minimum_required(VERSION 3.20) project(deferred_rendering) diff --git a/samples/deferred_rendering/src/DeferredRenderingApplication.cpp b/samples/deferred_rendering/src/DeferredRenderingApplication.cpp index 9c8cf2d3..bde7f3d1 100644 --- a/samples/deferred_rendering/src/DeferredRenderingApplication.cpp +++ b/samples/deferred_rendering/src/DeferredRenderingApplication.cpp @@ -21,3 +21,5 @@ void DeferredRenderingApplication::OnStart() { GameObjectLoader::Load(AssetSystem::GetInstance()->Resolve("scenes/deferred_rendering.scene")); } + +FASTCG_MAIN(DeferredRenderingApplication) \ No newline at end of file diff --git a/samples/deferred_rendering/src/main.cpp b/samples/deferred_rendering/src/main.cpp deleted file mode 100644 index 04e1d4aa..00000000 --- a/samples/deferred_rendering/src/main.cpp +++ /dev/null @@ -1,7 +0,0 @@ -#include "DeferredRenderingApplication.h" - -int main(int argc, char **argv) -{ - DeferredRenderingApplication app; - return app.Run(argc, argv); -} diff --git a/samples/pcss/CMakeLists.txt b/samples/pcss/CMakeLists.txt index 0ed77ee5..dfc208de 100644 --- a/samples/pcss/CMakeLists.txt +++ b/samples/pcss/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.8) +cmake_minimum_required(VERSION 3.20) project(pcss) diff --git a/samples/pcss/src/PCSSApplication.cpp b/samples/pcss/src/PCSSApplication.cpp index 43277291..a26d6856 100644 --- a/samples/pcss/src/PCSSApplication.cpp +++ b/samples/pcss/src/PCSSApplication.cpp @@ -29,7 +29,7 @@ namespace auto *pModel = ModelLoader::Load(AssetSystem::GetInstance()->Resolve("objs/bunny.obj"), pDefaultMaterial, (ModelLoaderOptionMaskType)ModelLoaderOption::IS_SHADOW_CASTER); if (pModel == nullptr) { - FASTCG_THROW_EXCEPTION(Exception, "Missing bunny model"); + FASTCG_THROW_EXCEPTION(Exception, "Couldn't find bunny model"); } const auto &bounds = pModel->GetBounds(); @@ -89,3 +89,5 @@ void PCSSApplication::OnStart() auto *pGeneralBehavioursGameObject = GameObject::Instantiate("General Behaviours"); Controls::Instantiate(pGeneralBehavioursGameObject); } + +FASTCG_MAIN(PCSSApplication) \ No newline at end of file diff --git a/samples/pcss/src/main.cpp b/samples/pcss/src/main.cpp deleted file mode 100644 index e83ea4be..00000000 --- a/samples/pcss/src/main.cpp +++ /dev/null @@ -1,7 +0,0 @@ -#include "PCSSApplication.h" - -int main(int argc, char **argv) -{ - PCSSApplication app; - return app.Run(argc, argv); -} diff --git a/samples/ssao/CMakeLists.txt b/samples/ssao/CMakeLists.txt index 20ee16c2..b59a8172 100644 --- a/samples/ssao/CMakeLists.txt +++ b/samples/ssao/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.8) +cmake_minimum_required(VERSION 3.20) project(ssao) diff --git a/samples/ssao/src/SSAOApplication.cpp b/samples/ssao/src/SSAOApplication.cpp index 220f927e..ba1e0e7c 100644 --- a/samples/ssao/src/SSAOApplication.cpp +++ b/samples/ssao/src/SSAOApplication.cpp @@ -25,7 +25,7 @@ namespace auto *pModel = ModelLoader::Load(AssetSystem::GetInstance()->Resolve("objs/armadillo.obj"), pDefaultMaterial); if (pModel == nullptr) { - FASTCG_THROW_EXCEPTION(Exception, "Missing armadillo model"); + FASTCG_THROW_EXCEPTION(Exception, "Couldn't find armadillo model"); } const auto &bounds = pModel->GetBounds(); @@ -75,3 +75,5 @@ void SSAOApplication::OnStart() auto *pGeneralBehavioursGameObject = GameObject::Instantiate("General Behaviours"); Controls::Instantiate(pGeneralBehavioursGameObject); } + +FASTCG_MAIN(SSAOApplication) \ No newline at end of file diff --git a/samples/ssao/src/main.cpp b/samples/ssao/src/main.cpp deleted file mode 100644 index 68427429..00000000 --- a/samples/ssao/src/main.cpp +++ /dev/null @@ -1,7 +0,0 @@ -#include "SSAOApplication.h" - -int main(int argc, char **argv) -{ - SSAOApplication app; - return app.Run(argc, argv); -}