Fix various issues in WebGPU backend

This commit is contained in:
Wojtek Figat
2026-03-16 16:39:18 +01:00
parent a5bbf0dbde
commit 427f4647fc
9 changed files with 53 additions and 5 deletions

View File

@@ -256,8 +256,6 @@ uint64 GPUResource::GetMemoryUsage() const
return _memoryUsage;
}
static_assert((GPU_ENABLE_RESOURCE_NAMING) == (!BUILD_RELEASE), "Update build condition on around GPUResource Name property getter/setter.");
#if GPU_ENABLE_RESOURCE_NAMING
StringView GPUResource::GetName() const
@@ -282,6 +280,17 @@ void GPUResource::SetName(const StringView& name)
}
}
#elif !BUILD_RELEASE
StringView GPUResource::GetName() const
{
return StringView::Empty;
}
void GPUResource::SetName(const StringView& name)
{
}
#endif
void GPUResource::ReleaseGPU()

View File

@@ -244,3 +244,10 @@ void GraphicsService::Dispose()
{
// Device is disposed AFTER Content (faster and safer because there is no assets so there is less gpu resources to cleanup)
}
#if PLATFORM_WEB && !GRAPHICS_API_WEBGPU
// Fix missing method when using Null backend on Web
void SetWebGPUTextureViewSampler(GPUTextureView* view, uint32 samplerType)
{
}
#endif

View File

@@ -362,7 +362,13 @@ Viewport SceneRenderTask::GetOutputViewport() const
if (Output && Output->IsAllocated())
return Viewport(0, 0, static_cast<float>(Output->Width()), static_cast<float>(Output->Height()));
if (SwapChain)
{
#if PLATFORM_WEB
// Hack fix for Web where swapchain texture might have different size than actual current size of the backbuffer, just precache it (GetBackBufferView might resize internally)
SwapChain->GetBackBufferView();
#endif
return Viewport(0, 0, static_cast<float>(SwapChain->GetWidth()), static_cast<float>(SwapChain->GetHeight()));
}
return GetViewport();
}

View File

@@ -128,6 +128,7 @@ void GPUBufferWebGPU::OnReleaseGPU()
#if GPU_ENABLE_RESOURCE_NAMING
_name.Clear();
#endif
_view.Ptr.Version++;
// Base
GPUBuffer::OnReleaseGPU();

View File

@@ -554,6 +554,7 @@ WGPURenderPipeline GPUPipelineStateWebGPU::GetPipeline(const PipelineKey& key, c
_colorTargets[i].format = (WGPUTextureFormat)key.RenderTargetFormats[i];
}
WGPUVertexBufferLayout buffers[GPU_MAX_VB_BINDED];
WGPUVertexAttribute attributes[GPU_MAX_VS_ELEMENTS];
if (key.VertexLayout)
{
// Combine input layout of Vertex Buffers with the destination layout used by the Vertex Shader
@@ -564,7 +565,6 @@ WGPURenderPipeline GPUPipelineStateWebGPU::GetPipeline(const PipelineKey& key, c
mergedVertexLayout = (GPUVertexLayoutWebGPU*)GPUVertexLayout::Merge(mergedVertexLayout, VS->InputLayout, true, true, -1, true);
// Build attributes list
WGPUVertexAttribute attributes[GPU_MAX_VS_ELEMENTS];
desc.vertex.bufferCount = 0;
desc.vertex.buffers = buffers;
int32 attributeIndex = 0;

View File

@@ -20,6 +20,16 @@ GPUConstantBufferWebGPU::GPUConstantBufferWebGPU(GPUDeviceWebGPU* device, uint32
GPUShaderProgram* GPUShaderWebGPU::CreateGPUShaderProgram(ShaderStage type, const GPUShaderProgramInitializer& initializer, Span<byte> bytecode, MemoryReadStream& stream)
{
// Fix issue with unaligned loads if bytecode
// TODO: fix issue at cook time by adding padding before shader bytecode to ensure it's aligned (eg. to 8 bytes)
BytesContainer bytecoddAligned;
uintptr align = (uintptr)bytecode.Get() % sizeof(uintptr);
if (align != 0)
{
bytecoddAligned.Copy(bytecode);
bytecode = bytecoddAligned;
}
// Extract the SPIR-V shader header from the cache
SpirvShaderHeader* header = (SpirvShaderHeader*)bytecode.Get();
bytecode = bytecode.Slice(sizeof(SpirvShaderHeader));

View File

@@ -11,7 +11,6 @@
#include "Engine/Graphics/RenderTools.h"
#include "Engine/Profiler/ProfilerCPU.h"
#include "Engine/Profiler/ProfilerMemory.h"
#include "Engine/Scripting/Enums.h"
GPUSwapChainWebGPU::GPUSwapChainWebGPU(GPUDeviceWebGPU* device, Window* window)
: GPUResourceWebGPU(device, StringView::Empty)
@@ -59,6 +58,18 @@ GPUTextureView* GPUSwapChainWebGPU::GetBackBufferView()
ASSERT(hasSurfaceTexture);
_surfaceView.Texture = surfaceTexture.texture;
// Fix up the size (in case underlying texture is different than the engine resize it to)
const uint32 width = wgpuTextureGetWidth(surfaceTexture.texture);
const uint32 height = wgpuTextureGetHeight(surfaceTexture.texture);
if (_width != width || _height != height)
{
PROFILE_MEM_DEC(Graphics, _memoryUsage);
_width = width;
_height = height;
_memoryUsage = RenderTools::CalculateTextureMemoryUsage(_format, _width, _height, 1);
PROFILE_MEM_INC(Graphics, _memoryUsage);
}
// Create view
WGPUTextureViewDescriptor viewDesc = WGPU_TEXTURE_VIEW_DESCRIPTOR_INIT;
#if GPU_ENABLE_RESOURCE_NAMING

View File

@@ -115,6 +115,10 @@ void GPUTextureViewWebGPU::Release()
View = nullptr;
}
Texture = nullptr;
HasStencil = false;
ReadOnly = false;
DepthSlice = WGPU_DEPTH_SLICE_UNDEFINED;
Ptr.Version++;
}
bool GPUTextureWebGPU::OnInit()

View File

@@ -142,7 +142,7 @@ void RenderAntiAliasingPass(RenderContext& renderContext, GPUTexture* input, GPU
// AA -> CAS -> Output
auto tmpImage = RenderTargetPool::Get(input->GetDescription());
RENDER_TARGET_POOL_SET_NAME(tmpImage, "TmpImage");
context->SetViewportAndScissors((float)input->Width(), (float)input->Height());
context->SetViewportAndScissors((float)tmpImage->Width(), (float)tmpImage->Height());
if (aaMode == AntialiasingMode::FastApproximateAntialiasing)
FXAA::Instance()->Render(renderContext, input, tmpImage->View());
else