Refactor Vertex Shader input vertex layout to use GPUVertexLayout defined on Vertex Buffer rather than Vertex Shader

#3044 #2667
This commit is contained in:
Wojtek Figat
2024-12-15 22:10:45 +01:00
parent 666efb7675
commit b3f37ca041
66 changed files with 786 additions and 579 deletions

View File

@@ -8,81 +8,19 @@
#include "Types.h"
#include "../RenderToolsDX.h"
GPUShaderProgram* GPUShaderDX12::CreateGPUShaderProgram(ShaderStage type, const GPUShaderProgramInitializer& initializer, byte* cacheBytes, uint32 cacheSize, MemoryReadStream& stream)
GPUShaderProgram* GPUShaderDX12::CreateGPUShaderProgram(ShaderStage type, const GPUShaderProgramInitializer& initializer, Span<byte> bytecode, MemoryReadStream& stream)
{
// Extract the DX shader header from the cache
DxShaderHeader* header = (DxShaderHeader*)cacheBytes;
cacheBytes += sizeof(DxShaderHeader);
cacheSize -= sizeof(DxShaderHeader);
DxShaderHeader* header = (DxShaderHeader*)bytecode.Get();
bytecode = Span<byte>(bytecode.Get() + sizeof(DxShaderHeader), bytecode.Length() - sizeof(DxShaderHeader));
GPUShaderProgram* shader = nullptr;
switch (type)
{
case ShaderStage::Vertex:
{
// Load Input Layout (it may be empty)
byte inputLayoutSize;
stream.ReadByte(&inputLayoutSize);
ASSERT(inputLayoutSize <= VERTEX_SHADER_MAX_INPUT_ELEMENTS);
D3D12_INPUT_ELEMENT_DESC inputLayout[VERTEX_SHADER_MAX_INPUT_ELEMENTS];
for (int32 a = 0; a < inputLayoutSize; a++)
{
// Read description
GPUShaderProgramVS::InputElement inputElement;
stream.Read(inputElement);
// Get semantic name
const char* semanticName = nullptr;
// TODO: maybe use enum+mapping ?
switch (inputElement.Type)
{
case 1:
semanticName = "POSITION";
break;
case 2:
semanticName = "COLOR";
break;
case 3:
semanticName = "TEXCOORD";
break;
case 4:
semanticName = "NORMAL";
break;
case 5:
semanticName = "TANGENT";
break;
case 6:
semanticName = "BITANGENT";
break;
case 7:
semanticName = "ATTRIBUTE";
break;
case 8:
semanticName = "BLENDINDICES";
break;
case 9:
semanticName = "BLENDWEIGHT";
break;
default:
LOG(Fatal, "Invalid vertex shader element semantic type: {0}", inputElement.Type);
break;
}
// Set data
inputLayout[a] =
{
semanticName,
static_cast<UINT>(inputElement.Index),
static_cast<DXGI_FORMAT>(inputElement.Format),
static_cast<UINT>(inputElement.InputSlot),
static_cast<UINT>(inputElement.AlignedByteOffset),
static_cast<D3D12_INPUT_CLASSIFICATION>(inputElement.InputSlotClass),
static_cast<UINT>(inputElement.InstanceDataStepRate)
};
}
// Create object
shader = New<GPUShaderProgramVSDX12>(initializer, header, cacheBytes, cacheSize, inputLayout, inputLayoutSize);
GPUVertexLayout* vertexLayout = ReadVertexLayout(stream);
shader = New<GPUShaderProgramVSDX12>(initializer, header, bytecode, vertexLayout);
break;
}
#if GPU_ALLOW_TESSELLATION_SHADERS
@@ -90,12 +28,12 @@ GPUShaderProgram* GPUShaderDX12::CreateGPUShaderProgram(ShaderStage type, const
{
int32 controlPointsCount;
stream.ReadInt32(&controlPointsCount);
shader = New<GPUShaderProgramHSDX12>(initializer, header, cacheBytes, cacheSize, controlPointsCount);
shader = New<GPUShaderProgramHSDX12>(initializer, header, bytecode, controlPointsCount);
break;
}
case ShaderStage::Domain:
{
shader = New<GPUShaderProgramDSDX12>(initializer, header, cacheBytes, cacheSize);
shader = New<GPUShaderProgramDSDX12>(initializer, header, bytecode);
break;
}
#else
@@ -109,18 +47,18 @@ GPUShaderProgram* GPUShaderDX12::CreateGPUShaderProgram(ShaderStage type, const
#if GPU_ALLOW_GEOMETRY_SHADERS
case ShaderStage::Geometry:
{
shader = New<GPUShaderProgramGSDX12>(initializer, header, cacheBytes, cacheSize);
shader = New<GPUShaderProgramGSDX12>(initializer, header, bytecode);
break;
}
#endif
case ShaderStage::Pixel:
{
shader = New<GPUShaderProgramPSDX12>(initializer, header, cacheBytes, cacheSize);
shader = New<GPUShaderProgramPSDX12>(initializer, header, bytecode);
break;
}
case ShaderStage::Compute:
{
shader = New<GPUShaderProgramCSDX12>(_device, initializer, header, cacheBytes, cacheSize);
shader = New<GPUShaderProgramCSDX12>(_device, initializer, header, bytecode);
break;
}
}