initial commit

This commit is contained in:
Jo 2025-01-07 02:06:59 +01:00
parent 6715289efe
commit 788c3389af
37645 changed files with 2526849 additions and 80 deletions

View file

@ -0,0 +1,23 @@
#pragma kernel BlitBucket
Texture3D<float4> BucketBuffer;
SamplerState sampler_point_clamp;
float3 BucketOffset;
int BucketSize;
RWTexture3D<float4> Result;
[numthreads(4,4,4)]
void BlitBucket(uint3 id : SV_DispatchThreadID)
{
float3 UV = float3(id - BucketOffset) / BucketSize;
float clip = step(UV.x,1) * step(UV.y, 1) * step(UV.z, 1) *
step(0,UV.x) * step(0, UV.y) * step(0, UV.z);
uint3 bucketID = id.xyz - BucketOffset.xyz;
Result[id.xyz] += BucketBuffer[bucketID] * clip;
}

View file

@ -0,0 +1,8 @@
fileFormatVersion: 2
guid: 0893d2f52a56cc5468ef6576d598e4f8
ComputeShaderImporter:
externalObjects: {}
currentAPIMask: 2052
userData:
assetBundleName:
assetBundleVariant:

View file

@ -0,0 +1,17 @@
// Each #kernel tells which function to compile; you can have many kernels
#pragma kernel Clear3D
// Create a RenderTexture with enableRandomWrite flag and set it
// with cs.SetTexture
RWTexture3D<half4> _Source;
uint4 _SourceDim;
half4 _ClearColor;
[numthreads(4,4,4)]
void Clear3D (uint3 id : SV_DispatchThreadID)
{
if (all(id.xyz < _SourceDim.xyz))
{
_Source[id.xyz] = _ClearColor;
}
}

View file

@ -0,0 +1,8 @@
fileFormatVersion: 2
guid: e78dcf06706ad9840b7c3b0ec9f489b7
ComputeShaderImporter:
externalObjects: {}
preprocessorOverride: 0
userData:
assetBundleName:
assetBundleVariant:

View file

@ -0,0 +1,99 @@

////This generates a combined clipmap so we don't have to sample every volume map every frame////
//TODO: Add cascades or tapper
#pragma kernel ClipMapGen
#pragma kernel ClipMapClear
#pragma kernel ClipMapHeight
Texture3D<float4> PreResult;
RWTexture3D<float4> Result;
SamplerState my_point_clamp_sampler;
float3 ClipmapWorldPosition;
float ClipmapScale;
float3 VolumeWorldSize;
float3 VolumeWorldPosition;
Texture3D<float4> VolumeMap;
SamplerState trilinear_clamp_sampler;
float VolumeDensity;
shared float _GlobalExtinction;
float4 clearColor;
[numthreads(4, 4, 4)]
void ClipMapClear(uint3 id : SV_DispatchThreadID)
{
Result[id.xyz] = float4(0, 0, 0, 0);
//Result[id.xyz] = clearColor;
}
[numthreads(4,4,4)]
void ClipMapGen (uint3 id : SV_DispatchThreadID)
{
//uint count, stride;
float3 ClipRes = float3(0,0,0);
Result.GetDimensions(ClipRes.x, ClipRes.y, ClipRes.z);
float4 UVW;
UVW.xyz = (id + 0.5) / ClipRes.xyz; //0-1 scaling
UVW.w = 1;
float4 WorldCoord = UVW * ClipmapScale + ClipmapWorldPosition.xyzz;
float3 MapUVW = (WorldCoord.xyz - VolumeWorldPosition) / VolumeWorldSize;
//TODO: Make sampling calculations run or not if they are inside or out of the clipped area
float ClipUVW =
step(MapUVW.x, 1) * step(0, MapUVW.x) *
step(MapUVW.y, 1) * step(0, MapUVW.y) *
step(MapUVW.z, 1) * step(0, MapUVW.z);
//float tempden = distance(WorldCoord.xyz,0) > 3 ? 0:10 ;
float3 VolRes = float3(0, 0, 0);
VolumeMap.GetDimensions(VolRes.x, VolRes.y, VolRes.z);
//float3 VolPixSize = VolumeWorldSize / VolRes;
//float3 ClipPixSize = ClipmapScale / ClipRes;
// Move this math into the C# side, no reason to do this per pixel!
float3 VolPixPerClipPix = (ClipmapScale * VolRes) / (VolumeWorldSize * ClipRes); // ClipPixSize / VolPixSize;
float maxScale = max(VolPixPerClipPix.x, max(VolPixPerClipPix.y, VolPixPerClipPix.z));
float mip = log2(maxScale);
//Sample baked volume and clip it off to its area
//Doing max of the two results to account for overlaps
float4 colors = max(PreResult[id.xyz], VolumeMap.SampleLevel(trilinear_clamp_sampler, MapUVW, mip) * ClipUVW);
colors.a = 0;
Result[id.xyz] = colors;
}
[numthreads(4, 4, 4)]
void MediapGen(uint3 id : SV_DispatchThreadID)
{
}
[numthreads(4, 4, 4)]
void ClipMapHeight(uint3 id : SV_DispatchThreadID)
{
//uint count, stride;
float4 UVW = float4(0,0,0,0);
Result.GetDimensions(UVW.x, UVW.y, UVW.z);
UVW.xyz = (id + 0.5) / UVW.xyz; //0-1 scaling
UVW.w = 1;
float4 WorldCoord = UVW * ClipmapScale + ClipmapWorldPosition.xyzz;
//float heightDens = WorldCoord.y > 0 ? 0:1;
float heightDens = _GlobalExtinction;
Result[id.xyz] = max(PreResult[id.xyz].rgba,float4(0,0,0,heightDens));
;
}

View file

@ -0,0 +1,8 @@
fileFormatVersion: 2
guid: a273e9d897989a74fbff10054c9b4fed
ComputeShaderImporter:
externalObjects: {}
currentAPIMask: 2052
userData:
assetBundleName:
assetBundleVariant:

View file

@ -0,0 +1,394 @@
//#include "UnityShaderVariables.cginc"
//#include "Noise.cginc"
#pragma only_renderers d3d12
//#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Macros.hlsl"
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Common.hlsl"
//#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Packing.hlsl"
//#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Sampling/Sampling.hlsl"
#pragma max_recursion_depth 1
#define M_PI 3.1415926535897932384626433832795
#define M_PHI 1.618033988749895
// Input
RaytracingAccelerationStructure g_SceneAccelStruct;
//Global inputs
uint PointLightCount;
uint ConeLightCount;
uint DirLightCount;
uint AreaLightCount;
uint AreaLightSamples;
uint EnvLightSamples;
int PerDispatchRayCount;
int StartRayIdx;
float HalfVoxelSize;
struct PointLightData{
float3 PointLightsWS;
float4 PointLightsColor;
};
struct ConeLightData{
float3 ConeLightsWS ;
float4 ConeLightsColor ;
float3 ConeLightsDir ;
float2 ConeLightsPram ; //Outter , inner
};
struct DirLightData{
float3 DirLightsDir;
float4 DirLightsColor;
};
struct AreaLightData{
float4x4 AreaLightsMatrix;
float4x4 AreaLightsMatrixInv;
float3 AreaLightsWS;
float4 AreaLightsColor;
float3 AreaLightsSize;
};
StructuredBuffer<PointLightData> PLD;
StructuredBuffer<ConeLightData> CLD;
StructuredBuffer<DirLightData> DLD;
StructuredBuffer<AreaLightData> ALD;
float3 WPosition;
float3 Size = float3(1,1,1);
// Output
RWTexture3D<float4> g_Output;
//Enviorment
TextureCube _SkyTexture;
SamplerState sampler_SkyTexture;
struct RayPayload
{
float4 color;
float3 dir;
};
struct EnvRayPayload
{
float4 color;
float3 dir;
};
[shader("miss")]
void MainMissShader(inout RayPayload payload : SV_RayPayload)
{
payload.color = float4(1, 1, 1, 0);
}
[shader("miss")]
void zEnvMissShader(inout EnvRayPayload payload : SV_RayPayload)
{
//Enviormental
payload.color = _SkyTexture.SampleLevel(sampler_SkyTexture, payload.dir, 0);
}
float InverseSquare(float distance){
return 1 / (4 * M_PI * distance * distance);
}
//float _Seed;
float rand(float2 Pixel)
{
float _Seed = Pixel.x + Pixel.y ;
float result = frac(sin(_Seed / 100.0f * dot(Pixel, float2(12.9898f, 78.233f) ) ) * 43758.5453f);
// _Seed = _Seed + 1.0f;
return result;
}
float rand(float3 Pixel)
{
float _Seed = Pixel.x + Pixel.y + Pixel.z ;
float result = frac(sin(_Seed / 100.0f * dot(Pixel, float3(12.9898f, 49.1165f, 29.1165f))) * 43758.5453f);
// _Seed += 1.0f;
return result;
}
//RTgems 3.11 FIBONACCI SPHERE
//current index, number of samples
float3 sphericalFibonacci(float i, float n)
{
const float PHI = sqrt(5.) * 0.5f + 0.5f;
float fraction = (i * (PHI - 1)) - floor(i * (PHI - 1));
float phi = 2.f * M_PI * fraction;
float cosTheta = 1.f - (2.f * i + 1.f) * (1.f / n);
float sinTheta = sqrt(saturate(1.f - cosTheta*cosTheta));
return float3(cos(phi) * sinTheta , sin(phi) * sinTheta , cosTheta);
}
///
///Point light
///
float4 PointLightCast(float3 VoxelWS, uint Num){
float LightRadius = distance(VoxelWS, PLD[Num].PointLightsWS );
float3 rayDirection = -normalize(VoxelWS - PLD[Num].PointLightsWS) ;
RayDesc ray;
ray.Origin = VoxelWS;
ray.Direction = rayDirection;
ray.TMin = 0.0f;
ray.TMax = LightRadius;
RayPayload payload;
payload.color = float4(0, 0, 0, 1);
uint missShaderIndex = 0;
TraceRay(g_SceneAccelStruct, 0, 0xFF, 0, 1, missShaderIndex, ray, payload); //Add an anyhit shader to support transparencies
return (1-payload.color.a) * InverseSquare(max(LightRadius, HalfVoxelSize)) * PLD[Num].PointLightsColor;
}
///
/// Cone light
///
float4 ConeLightCast(float3 VoxelWS, uint Num){
float3 rayDirection = -normalize(VoxelWS - CLD[Num].ConeLightsWS) ;
//Currently taking a point light and adding attenuation
float attenuation = (dot(CLD[Num].ConeLightsDir, -rayDirection));
if (attenuation <= 0) return float4(0, 0, 0, 0); //early out
float LightRadius = (distance(VoxelWS, CLD[Num].ConeLightsWS ));
/////
float flOuterConeCos = CLD[Num].ConeLightsPram.x;
float flTemp = dot(CLD[Num].ConeLightsDir, -rayDirection) - flOuterConeCos;
float vSpotAtten = saturate(flTemp * CLD[Num].ConeLightsPram.y);
///
RayDesc ray;
ray.Origin = VoxelWS;
ray.Direction = rayDirection;
ray.TMin = 0.0f;
ray.TMax = LightRadius;
RayPayload payload;
payload.color = float4(0, 0, 0, 1);
uint missShaderIndex = 0;
TraceRay(g_SceneAccelStruct, 0, 0xFF, 0, 1, missShaderIndex, ray, payload); //Add an anyhit shader to support transparencies
return InverseSquare(max(LightRadius, HalfVoxelSize) ) * vSpotAtten * (1-payload.color.a) * CLD[Num].ConeLightsColor;
}
//
//Directional Light
//
float4 DirLightCast(float3 VoxelWS, uint Num){
float3 rayDirection = -DLD[Num].DirLightsDir;
RayDesc ray;
ray.Origin = VoxelWS;
ray.Direction = rayDirection;
ray.TMin = 0.0f;
ray.TMax = 1.#INF;
RayPayload payload;
payload.color = float4(0, 0, 0, 1);
uint missShaderIndex = 0;
TraceRay(g_SceneAccelStruct, 0, 0xFF, 0, 1, missShaderIndex, ray, payload); //Add an anyhit shader to support transparencies
return (1-payload.color.a) * DLD[Num].DirLightsColor;
}
//
//Area Light
//
float4 AreaLightCast(float3 VoxelWS, uint Num, int startIdx, int endIdx){
//
float3 lsPos = float3(ALD[Num].AreaLightsMatrix[0][3], ALD[Num].AreaLightsMatrix[1][3],ALD[Num].AreaLightsMatrix[2][3]);
float3 VoxelLS = mul( float4(VoxelWS.xyz,1)-lsPos, ALD[Num].AreaLightsMatrix);
float4 areaLightAccumulation = float4(0,0,0,0);
if (VoxelLS.z <= 0) return areaLightAccumulation; //Early out
uint3 id = DispatchRaysIndex().xyz; //redundent, oh well
for (int j = startIdx; j < endIdx; j++)
{
//int loop64 = fmod(j + AreaLightSamples + id.x+id.y+id.z,64);
float3 LocalPos = mul(ALD[Num].AreaLightsWS.xyz -lsPos , ALD[Num].AreaLightsMatrix).xyz ;
float3 LightPosSample = LocalPos + float3( (rand(id.xyz + j)-0.5) * ALD[Num].AreaLightsSize.x , (rand(id.xyz+j+20)-0.5) * ALD[Num].AreaLightsSize.y,0);
//float3 LightPosSample = LocalPos + float3( BlueNoiseInDisk[loop64].x * AreaLightsSize[Num].x * 0.5 , BlueNoiseInDisk[loop64].y * AreaLightsSize[Num].y *0.5, 0 );
float LightRadius = distance(VoxelLS, LightPosSample );
float3 rayDirection = -normalize(VoxelLS - LightPosSample);
float attenuation = saturate(dot(float3(0,0,1), -rayDirection));
RayDesc ray;
ray.Origin = VoxelWS;
ray.Direction = mul(rayDirection, ALD[Num].AreaLightsMatrixInv);
ray.TMin = 0.0f;
ray.TMax = LightRadius;
RayPayload payload;
payload.color = float4(0, 0, 0, 1);
uint missShaderIndex = 0;
TraceRay(g_SceneAccelStruct, 0, 0xFF, 0, 1, missShaderIndex, ray, payload); //Add an anyhit shader to support transparencies
areaLightAccumulation += saturate(InverseSquare(LightRadius) * (1-payload.color.a) * ALD[Num].AreaLightsColor * attenuation) / AreaLightSamples;
}
return areaLightAccumulation;
}
//
//Disk light
//
float4 DiscLightCast(float3 VoxelWS, uint Num, int startIdx, int endIdx){
//
float3 lsPos = float3(ALD[Num].AreaLightsMatrix[0][3], ALD[Num].AreaLightsMatrix[1][3],ALD[Num].AreaLightsMatrix[2][3]);
float3 VoxelLS = mul( float4(VoxelWS.xyz,1)-lsPos, ALD[Num].AreaLightsMatrix);
float3 IntialDirection = -normalize(VoxelWS - ALD[Num].AreaLightsWS) ;
uint3 id = DispatchRaysIndex().xyz;
float4 areaLightAccumulation = float4(0,0,0,0);
if (VoxelLS.z <= 0) return areaLightAccumulation;
for (int j = startIdx; j < endIdx; j++)
{
float3 LocalPos = mul(ALD[Num].AreaLightsWS.xyz -lsPos , ALD[Num].AreaLightsMatrix).xyz ;
//https://stackoverflow.com/questions/5837572/generate-a-random-point-within-a-circle-uniformly
float t = 2 * M_PI * rand(id.xyz + j + 30);
float u = rand(id.xyz + j) + rand(id.xyz + j + 20);
float r;
if (u > 1) r = (2 - u);
else r = u;
// [r * cos(t), r * sin(t)]
float3 LightPosSample = LocalPos +
float3( ( r * cos(t)) * ALD[Num].AreaLightsSize.x,
( r * sin(t)) * ALD[Num].AreaLightsSize.x,
0);
float LightRadius = distance(VoxelLS, LightPosSample );
float3 rayDirection = -normalize(VoxelLS - LightPosSample);
float attenuation = saturate(dot(float3(0,0,1), -rayDirection));
RayDesc ray;
ray.Origin = VoxelWS;
ray.Direction = mul(rayDirection, ALD[Num].AreaLightsMatrixInv);
ray.TMin = 0.0f;
ray.TMax = LightRadius;
RayPayload payload;
payload.color = float4(0, 0, 0, 1);
uint missShaderIndex = 0;
TraceRay(g_SceneAccelStruct, 0, 0xFF, 0, 1, missShaderIndex, ray, payload); //Add an anyhit shader to support transparencies
areaLightAccumulation += saturate(InverseSquare(LightRadius) * (1-payload.color.a) * ALD[Num].AreaLightsColor * attenuation) / AreaLightSamples;
}
return areaLightAccumulation;
}
//Cast Ray from vox to env
float4 EnvCast(float3 VoxelWS, int startIdx, int endIdx){
float4 Accumulation = float4(0,0,0,0);
for (int j = startIdx; j < endIdx; j++)
{
uint3 id = DispatchRaysIndex().xyz;
float3 rayDirection = sphericalFibonacci(j, EnvLightSamples) + ( normalize(float3(rand(id.xyz+j),rand(id.xyz+j+33) , rand(id.xyz+j+120)) -.5) * 2 * (4*M_PI / EnvLightSamples) ) ;
RayDesc ray;
ray.Origin = VoxelWS;
ray.Direction = rayDirection;
ray.TMin = 0.0f;
ray.TMax = 100000;
RayPayload payload;
payload.color = float4(0, 0, 0, 0);
uint missShaderIndex = 1;
TraceRay(g_SceneAccelStruct, 0, 0xFF, 0, 1, missShaderIndex, ray, payload);
Accumulation += payload.color / EnvLightSamples ;
}
return Accumulation;
}
// [shader("closesthit")]
// void MyClosestHit(inout RayPayload data,
// BuiltInTriangleIntersectionAttributes attribs) {
// data.color = float4( 1, 0, 0, 1 );
// }
//
// Main shader
//
[shader("raygeneration")]
void MainRayGenShader()
{
float3 launchIndex = DispatchRaysIndex().xyz ; //id
float3 launchDim = DispatchRaysDimensions().xyz; //whd
float3 VoxelWorldPosition = WPosition + Size * ( (launchIndex + 0.5 ) / launchDim );
float4 LightAccumulation = g_Output[launchIndex];
int start = StartRayIdx;
int end = StartRayIdx + PerDispatchRayCount;
int maxIter = min(PointLightCount, end);
for (int i = start; i < maxIter; i++) LightAccumulation += PointLightCast(VoxelWorldPosition, i);
start = max(0, start - (int)PointLightCount);
end -= PointLightCount;
maxIter = min(ConeLightCount, end);
for (int i = start; i < maxIter; i++) LightAccumulation += ConeLightCast(VoxelWorldPosition, i);
start = max(0, start - (int)ConeLightCount);
end -= ConeLightCount;
maxIter = min(DirLightCount, end);
for (int i = start; i < maxIter; i++) LightAccumulation += DirLightCast(VoxelWorldPosition, i);
start = max(0, start - (int)DirLightCount);
end -= (int)DirLightCount;
maxIter = min(EnvLightSamples, end);
LightAccumulation += EnvCast(VoxelWorldPosition, start, maxIter);
start = max(0, start - (int)EnvLightSamples);
end -= (int)EnvLightSamples;
maxIter = min(AreaLightSamples, end);
int light = 0;
while (end > 0)
{
if (ALD[light].AreaLightsSize.z == 0) LightAccumulation += AreaLightCast(VoxelWorldPosition, light, start, maxIter);
else LightAccumulation += DiscLightCast(VoxelWorldPosition, light, start, maxIter);
start = max(0, start - (int)AreaLightSamples);
end -= (int)AreaLightSamples;
maxIter = min(AreaLightSamples, end);
light++;
}
// LightAccumulation.a = 1; //filling alpha
g_Output[launchIndex] = LightAccumulation;
}

View file

@ -0,0 +1,8 @@
fileFormatVersion: 2
guid: c7c13d3174f650f4fa0202ce218970eb
RayTracingShaderImporter:
externalObjects: {}
currentAPIMask: 262144
userData:
assetBundleName:
assetBundleVariant:

View file

@ -0,0 +1,90 @@
#ifndef RAYTRACING_META_PASS
#define RAYTRACING_META_PASS
#include "UnityRayTracingMeshUtils.cginc"
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Color.hlsl"
//#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/SurfaceInput.hlsl"
#include "Packages/com.unity.render-pipelines.universal/Shaders/LitInput.hlsl"
#if !defined(DYNAMIC_EMISSION)
#if !defined(_EMISSION)
#define _EMISSION false
#else
#undef _EMISSION
#define _EMISSION true
#endif
#endif
struct RayPayload
{
float4 color;
float3 dir;
};
struct AttributeData
{
float2 barycentrics;
};
struct Vertex
{
float2 texcoord;
float3 normal;
};
// Texture2D<float4> _MainTex;
// SamplerState sampler_MainTex;
Vertex FetchVertex(uint vertexIndex)
{
Vertex v;
v.normal = UnityRayTracingFetchVertexAttribute3(vertexIndex, kVertexAttributeNormal);
return v;
}
Vertex InterpolateVertices(Vertex v0, Vertex v1, Vertex v2, float3 barycentrics)
{
Vertex v;
#define INTERPOLATE_ATTRIBUTE(attr) v.attr = v0.attr * barycentrics.x + v1.attr * barycentrics.y + v2.attr * barycentrics.z
INTERPOLATE_ATTRIBUTE(normal);
return v;
}
//https://coty.tips/raytracing-in-unity/
[shader("closesthit")]
void BakedClosestHit(inout RayPayload payload,
AttributeData attributes : SV_IntersectionAttributes)
{
if (_EMISSION)
{
uint2 launchIdx = DispatchRaysIndex();
// ShadingData shade = getShadingData( PrimitiveIndex(), attribs );
uint primitiveIndex = PrimitiveIndex();
uint3 triangleIndicies = UnityRayTracingFetchTriangleIndices(primitiveIndex);
Vertex v0, v1, v2;
v0.texcoord = UnityRayTracingFetchVertexAttribute2(triangleIndicies.x, kVertexAttributeTexCoord0);
v1.texcoord = UnityRayTracingFetchVertexAttribute2(triangleIndicies.y, kVertexAttributeTexCoord0);
v2.texcoord = UnityRayTracingFetchVertexAttribute2(triangleIndicies.z, kVertexAttributeTexCoord0);
float3 barycentrics = float3(1.0 - attributes.barycentrics.x - attributes.barycentrics.y, attributes.barycentrics.x, attributes.barycentrics.y);
Vertex vInterpolated;
vInterpolated.texcoord = v0.texcoord * barycentrics.x + v1.texcoord * barycentrics.y + v2.texcoord * barycentrics.z;
payload.color = float4(_EmissionMap.SampleLevel(sampler_EmissionMap, vInterpolated.texcoord, 0).rgb * _EmissionColor.rgb, 1);
payload.dir = float3(1, 0, 0);
}
else
{
payload.color = float4(0, 0, 0, 1);
payload.dir = float3(1, 0, 0);
}
}
#endif

View file

@ -0,0 +1,9 @@
fileFormatVersion: 2
guid: 5797c868392f1aa4ca743060406430a7
ShaderImporter:
externalObjects: {}
defaultTextures: []
nonModifiableTextures: []
userData:
assetBundleName:
assetBundleVariant:

View file

@ -0,0 +1,119 @@
// Each #kernel tells which function to compile; you can have many kernels
#pragma kernel CalculateMip
#pragma kernel CalculateMipBuffer
#pragma kernel CopyTexToBuffer
// Create a RenderTexture with enableRandomWrite flag and set it
// with cs.SetTexture
uint4 _OutputDim;
Texture3D<float4> _Input;
RWTexture3D<float4> _Output;
SamplerState sampler_LinearClamp;
[numthreads(4,4,4)]
void CalculateMip(uint3 id : SV_DispatchThreadID)
{
//id.xyz = min(id.xyz, _OutputDim.xyz - (1).xxx);
const float3 samplePoints[8] = {
float3(0.25, 0.25, 0.25),
float3(0.75, 0.25, 0.25),
float3(0.25, 0.75, 0.25),
float3(0.75, 0.75, 0.25),
float3(0.25, 0.25, 0.75),
float3(0.75, 0.25, 0.75),
float3(0.25, 0.75, 0.75),
float3(0.75, 0.75, 0.75),
};
float4 output = float4(0,0,0,0);
float3 coordsFloat = (float3)min(id.xyz, _OutputDim.xyz - (1).xxx);
float3 dimFloat = (float3)_OutputDim.xyz;
[unroll] for (int i = 0; i < 8; i++)
{
float3 voxelCenter = (coordsFloat + samplePoints[i]) / dimFloat;
output += 0.125 * _Input.SampleLevel(sampler_LinearClamp, voxelCenter, 0);
}
_Output[id.xyz] = output;
}
RWStructuredBuffer<uint2> _Buffer;
int4 _PrevMipDimOffset;
int4 _MipDimOffset;
[numthreads(4,4,4)]
void CalculateMipBuffer(uint3 id : SV_DispatchThreadID)
{
if (id.x < (uint)_MipDimOffset.x && id.y < (uint)_MipDimOffset.y && id.z < (uint)_MipDimOffset.z)
{
//id.xyz = min(id.xyz, _OutputDim.xyz - (1).xxx);
const int3 samplePoints[8] =
{
int3(0, 0, 0),
int3(1, 0, 0),
int3(0, 1, 0),
int3(1, 1, 0),
int3(0, 0, 1),
int3(1, 0, 1),
int3(0, 1, 1),
int3(1, 1, 1),
};
float4 output = float4(0, 0, 0, 0);
int3 coords = (int3)id.xyz;
int3 prevCoords = 2 * coords;
int prevMipAdr = prevCoords.x + prevCoords.y * _PrevMipDimOffset.x + (prevCoords.z * _PrevMipDimOffset.x * _PrevMipDimOffset.y) + _PrevMipDimOffset.w;
int sliceSize = _PrevMipDimOffset.x * _PrevMipDimOffset.y;
//int sampleAddress[8] =
//{
// prevMipAdr,
// prevMipAdr + 1,
// prevMipAdr + _PrevMipDimOffset.x,
// prevMipAdr + 1 + _PrevMipDimOffset.x,
// prevMipAdr + sliceSize,
// prevMipAdr + 1 + sliceSize,
// prevMipAdr + _PrevMipDimOffset.x + sliceSize,
// prevMipAdr + 1 + _PrevMipDimOffset.x + sliceSize,
//};
int3 maxCoord = _PrevMipDimOffset.xyz - (1).xxx;
[unroll]
for (int i = 0; i < 8; i++)
{
int sampleAddress =
min(prevCoords.x + samplePoints[i].x, maxCoord.x) +
min(prevCoords.y + samplePoints[i].y, maxCoord.y) * _PrevMipDimOffset.x +
min(prevCoords.z + samplePoints[i].z, maxCoord.z) * _PrevMipDimOffset.x * _PrevMipDimOffset.y +
+ _PrevMipDimOffset.w;
uint2 dataHalf4 = _Buffer.Load(sampleAddress);
float4 dataFloat = float4(0, 0, 0, 0);
dataFloat.x = f16tof32(dataHalf4.x & 0xFFFF);
dataFloat.y = f16tof32(dataHalf4.x >> 16);
dataFloat.z = f16tof32(dataHalf4.y & 0xFFFF);
dataFloat.w = f16tof32(dataHalf4.y >> 16);
output += 0.125 * dataFloat;
}
//output = float4(1,0,1,1);
uint2 outputHalf = uint2(0,0);
outputHalf.x = (f32tof16(output.x) & 0xFFFF) | (f32tof16(output.y) << 16);
outputHalf.y = (f32tof16(output.z) & 0xFFFF) | (f32tof16(output.w) << 16);
int mipAdr = coords.x + coords.y * _MipDimOffset.x + (coords.z * _MipDimOffset.x * _MipDimOffset.y) + _MipDimOffset.w;
_Buffer[mipAdr] = outputHalf;
}
}
[numthreads(4,4,4)]
void CopyTexToBuffer(uint3 id : SV_DispatchThreadID)
{
if (id.x < (uint)_MipDimOffset.x && id.y < (uint)_MipDimOffset.y && id.z < (uint)_MipDimOffset.z)
{
int3 coords = (int3)id.xyz;
float4 output = _Input.Load(int4(coords, 0));
uint2 outputHalf = uint2(0,0);
outputHalf.x = (f32tof16(output.x) & 0xFFFF) | (f32tof16(output.y) << 16);
outputHalf.y = (f32tof16(output.z) & 0xFFFF) | (f32tof16(output.w) << 16);
uint mipAdr = id.x + (id.y * _MipDimOffset.x) + (id.z * _MipDimOffset.x * _MipDimOffset.y); // + _MipDimOffset.w; // offset is 0 for first mip
_Buffer[mipAdr] = outputHalf;
}
}

View file

@ -0,0 +1,7 @@
fileFormatVersion: 2
guid: a7b6f45f3454c3345a78c989cedd7229
ComputeShaderImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View file

@ -0,0 +1,8 @@
fileFormatVersion: 2
guid: b94482a9af587f8448a2554ea12e232e
folderAsset: yes
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View file

@ -0,0 +1,211 @@
Shader "Hidden/DebugVolumeRendering"
{
Properties
{
_Color ("Color", Color) = (1, 1, 1, 1)
_Volume ("Volume", 3D) = "" {}
_Intensity ("Intensity", Range(1.0, 5.0)) = 1.2
_Threshold ("Threshold", Range(0.0, 1.0)) = 0.95
_SliceMin ("Slice min", Vector) = (0.0, 0.0, 0.0, -1.0)
_SliceMax ("Slice max", Vector) = (1.0, 1.0, 1.0, -1.0)
}
SubShader {
Blend One One
ZTest Always
Tags {"RenderPipeline" = "UniversalPipeline" "RenderType" = "Transparent" "Queue" = "Transparent" }
Cull front
HLSLINCLUDE
#pragma target 3.0
#pragma exclude_renderers vulkan
ENDHLSL
Pass
{
HLSLPROGRAM
#define REQUIRE_DEPTH_TEXTURE 1
#define ITERATIONS 100
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Core.hlsl"
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Lighting.hlsl"
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Color.hlsl"
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/UnityInstancing.hlsl"
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/ShaderGraphFunctions.hlsl"
#ifndef ITERATIONS
#define ITERATIONS 100
#endif
half4 _Color;
sampler3D _Volume;
half _Intensity, _Threshold;
half3 _SliceMin, _SliceMax;
float4x4 _AxisRotationMatrix;
uniform float4 _CameraDepthTexture_TexelSize;
float _VolExposure;
struct Ray {
float3 origin;
float3 dir;
};
struct AABB {
float3 min;
float3 max;
};
bool intersect(Ray r, AABB aabb, out float t0, out float t1)
{
float3 invR = 1.0 / r.dir;
float3 tbot = invR * (aabb.min - r.origin);
float3 ttop = invR * (aabb.max - r.origin);
float3 tmin = min(ttop, tbot);
float3 tmax = max(ttop, tbot);
float2 t = max(tmin.xx, tmin.yz);
t0 = max(t.x, t.y);
t = min(tmax.xx, tmax.yz);
t1 = min(t.x, t.y);
return t0 <= t1;
}
float3 localize(float3 p) {
return mul(unity_WorldToObject, float4(p, 1)).xyz;
}
float3 get_uv(float3 p) {
// float3 local = localize(p);
return (p + 0.5);
}
float4 sample_volume(float3 uv, float3 p)
{
float4 v = tex3D(_Volume, uv) * _Intensity;
float3 axis = mul(_AxisRotationMatrix, float4(p, 0)).xyz;
axis = get_uv(axis);
float min = step(_SliceMin.x, axis.x) * step(_SliceMin.y, axis.y) * step(_SliceMin.z, axis.z);
float max = step(axis.x, _SliceMax.x) * step(axis.y, _SliceMax.y) * step(axis.z, _SliceMax.z);
return min * max * v;
}
bool outside(float3 uv)
{
const float EPSILON = 0.01;
float lower = -EPSILON;
float upper = 1 + EPSILON;
return (
uv.x < lower || uv.y < lower || uv.z < lower ||
uv.x > upper || uv.y > upper || uv.z > upper
);
}
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float4 vertex : SV_POSITION;
float2 uv : TEXCOORD0;
float3 world : TEXCOORD1;
float3 local : TEXCOORD2;
float4 ase_texcoord1 : TEXCOORD3;
};
v2f vert(appdata v)
{
v2f o;
//o.vertex = UnityObjectToClipPos(v.vertex);
o.vertex = TransformObjectToHClip(v.vertex);
o.uv = v.uv;
o.world = mul(unity_ObjectToWorld, v.vertex).xyz;
o.local = v.vertex.xyz;
//Get screen pos For depth
float4 ase_clipPos = TransformObjectToHClip((v.vertex).xyz);
float4 screenPos = ComputeScreenPos(ase_clipPos);
o.ase_texcoord1 = screenPos;
return o;
}
float4 frag(v2f i) : SV_Target
{
//For depth
float4 screenPos = i.ase_texcoord1;
float4 ase_screenPosNorm = screenPos / screenPos.w;
float clampDepth22 = Linear01Depth(SHADERGRAPH_SAMPLE_SCENE_DEPTH(ase_screenPosNorm.xy),_ZBufferParams);
Ray ray;
// ray.origin = localize(i.world);
ray.origin = i.local;
// world space direction to object space
float3 dir = -(i.world - _WorldSpaceCameraPos);
ray.dir = normalize(mul(unity_WorldToObject, dir));
AABB aabb;
aabb.min = float3(-0.5, -0.5, -0.5);
aabb.max = float3(0.5, 0.5, 0.5);
float tnear;
float tfar;
intersect(ray, aabb, tnear, tfar);
tnear = max(0.0, tnear);
// float3 start = ray.origin + ray.dir * tnear;
float3 start = ray.origin;
float3 end = ray.origin + ray.dir * tfar;
float dist = abs(tfar - tnear); // float dist = distance(start, end);
float step_size = dist / float(ITERATIONS);
float3 ds = normalize(end - start) * step_size;
float4 dst = float4(0, 0, 0, 0);
float3 p = start;
[unroll]
for (int iter = 0; iter < ITERATIONS; iter++)
{
float3 uv = get_uv(p);
float4 v = sample_volume(uv, p);
float4 src = v;
src.a *= 0.5;
src.rgb *= src.a;
// blend
dst = _VolExposure * src + dst;
p += ds;
//if (dst.a > _Threshold) break;
}
return saturate(dst) * _Color;
}
#pragma vertex vert
#pragma fragment frag
ENDHLSL
}
}
}

View file

@ -0,0 +1,10 @@
fileFormatVersion: 2
guid: 2cd044507c209714393a12c7018c0e3f
timeCreated: 1511925442
licenseType: Pro
ShaderImporter:
externalObjects: {}
defaultTextures: []
userData:
assetBundleName:
assetBundleVariant:

View file

@ -0,0 +1,165 @@
#ifndef __VOLUME_RENDERING_INCLUDED__
#define __VOLUME_RENDERING_INCLUDED__
#ifndef ITERATIONS
#define ITERATIONS 100
#endif
half4 _Color;
sampler3D _Volume;
half _Intensity, _Threshold;
half3 _SliceMin, _SliceMax;
float4x4 _AxisRotationMatrix;
uniform float4 _CameraDepthTexture_TexelSize;
struct Ray {
float3 origin;
float3 dir;
};
struct AABB {
float3 min;
float3 max;
};
bool intersect(Ray r, AABB aabb, out float t0, out float t1)
{
float3 invR = 1.0 / r.dir;
float3 tbot = invR * (aabb.min - r.origin);
float3 ttop = invR * (aabb.max - r.origin);
float3 tmin = min(ttop, tbot);
float3 tmax = max(ttop, tbot);
float2 t = max(tmin.xx, tmin.yz);
t0 = max(t.x, t.y);
t = min(tmax.xx, tmax.yz);
t1 = min(t.x, t.y);
return t0 <= t1;
}
float3 localize(float3 p) {
return mul(unity_WorldToObject, float4(p, 1)).xyz;
}
float3 get_uv(float3 p) {
// float3 local = localize(p);
return (p + 0.5);
}
float4 sample_volume(float3 uv, float3 p)
{
float4 v = tex3D(_Volume, uv) * _Intensity;
float3 axis = mul(_AxisRotationMatrix, float4(p, 0)).xyz;
axis = get_uv(axis);
float min = step(_SliceMin.x, axis.x) * step(_SliceMin.y, axis.y) * step(_SliceMin.z, axis.z);
float max = step(axis.x, _SliceMax.x) * step(axis.y, _SliceMax.y) * step(axis.z, _SliceMax.z);
return min * max * v;
}
bool outside(float3 uv)
{
const float EPSILON = 0.01;
float lower = -EPSILON;
float upper = 1 + EPSILON;
return (
uv.x < lower || uv.y < lower || uv.z < lower ||
uv.x > upper || uv.y > upper || uv.z > upper
);
}
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float4 vertex : SV_POSITION;
float2 uv : TEXCOORD0;
float3 world : TEXCOORD1;
float3 local : TEXCOORD2;
float4 ase_texcoord1 : TEXCOORD3;
};
v2f vert(appdata v)
{
v2f o;
//o.vertex = UnityObjectToClipPos(v.vertex);
o.vertex = TransformObjectToHClip(v.vertex);
o.uv = v.uv;
o.world = mul(unity_ObjectToWorld, v.vertex).xyz;
o.local = v.vertex.xyz;
//Get screen pos For depth
float4 ase_clipPos = TransformObjectToHClip((v.vertex).xyz);
float4 screenPos = ComputeScreenPos(ase_clipPos);
o.ase_texcoord1 = screenPos;
return o;
}
float4 frag(v2f i) : SV_Target
{
//For depth
float4 screenPos = i.ase_texcoord1;
float4 ase_screenPosNorm = screenPos / screenPos.w;
float clampDepth22 = Linear01Depth(SHADERGRAPH_SAMPLE_SCENE_DEPTH( ase_screenPosNorm.xy ),_ZBufferParams);
Ray ray;
// ray.origin = localize(i.world);
ray.origin = i.local;
// world space direction to object space
float3 dir = -(i.world - _WorldSpaceCameraPos);
ray.dir = normalize(mul(unity_WorldToObject, dir));
AABB aabb;
aabb.min = float3(-0.5, -0.5, -0.5);
aabb.max = float3(0.5, 0.5, 0.5);
float tnear;
float tfar;
intersect(ray, aabb, tnear, tfar);
tnear = max(0.0, tnear);
// float3 start = ray.origin + ray.dir * tnear;
float3 start = ray.origin;
float3 end = ray.origin + ray.dir * tfar;
float dist = abs(tfar - tnear); // float dist = distance(start, end);
float step_size = dist / float(ITERATIONS);
float3 ds = normalize(end - start) * step_size;
float4 dst = float4(0, 0, 0, 0);
float3 p = start;
[unroll]
for (int iter = 0; iter < ITERATIONS; iter++)
{
float3 uv = get_uv(p);
float4 v = sample_volume(uv, p);
float4 src = v;
src.a *= 0.5;
src.rgb *= src.a;
// blend
dst = (1.0 - dst.a) * src + dst;
p += ds;
if (dst.a > _Threshold) break;
}
return saturate(dst) * _Color;
}
#endif

View file

@ -0,0 +1,10 @@
fileFormatVersion: 2
guid: ecd4a9f42486b0045b64673db49ddfdc
timeCreated: 1530315460
licenseType: Pro
ShaderImporter:
externalObjects: {}
defaultTextures: []
userData:
assetBundleName:
assetBundleVariant:

View file

@ -0,0 +1,189 @@
Shader "Hidden/VolumetricPreview"
{
Properties
{
_Volume ("Volume", 3D) = "" {}
//_GlobalExtinction("Extinction", float) = 0.05
//_GlobalScattering("Scattering", color) = (0.05, 0.05, 0.05, 0.05)
_Intensity ("Intensity", Range(0.0, 5.0)) = 1.2
_Threshold ("Threshold", Range(0.0, 1.0)) = 0.95
_StepDist ("Step Distance", float) = 0.5
//_VolExposure2 ("Volume Exposure", float) = 0.05
}
SubShader
{
Blend One One
ZTest Always
Tags {"RenderPipeline" = "UniversalPipeline" "RenderType" = "Transparent" "Queue" = "Transparent" }
Cull front
ZWrite Off
Pass
{
HLSLPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma multi_compile_instancing
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Core.hlsl"
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/Lighting.hlsl"
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Color.hlsl"
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/UnityInstancing.hlsl"
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/ShaderGraphFunctions.hlsl"
#include "Packages/com.unity.render-pipelines.universal/ShaderLibrary/DeclareDepthTexture.hlsl"
#ifndef MAX_ITERATIONS
#define MAX_ITERATIONS 100
#define INV_MAX_ITERATIONS 0.01
#endif
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
UNITY_VERTEX_INPUT_INSTANCE_ID
};
struct v2f
{
float2 uv : TEXCOORD0;
float4 vertex : SV_POSITION;
float4 screenPos : TEXCOORD1;
float4 wPos : TEXCOORD2;
UNITY_VERTEX_INPUT_INSTANCE_ID
UNITY_VERTEX_OUTPUT_STEREO
};
TEXTURE3D(_Volume);
SamplerState sampler_Volume;
float _GlobalExtinction;
float4 _GlobalScattering;
CBUFFER_START(UnityPerMaterial)
float _Threshold;
float _Intensity;
float _StepDist;
float _VolExposure2;
CBUFFER_END
/** Moves the given ray to the surface of the bounding box via AABB ray intersection
*/
void MoveRayToBoxSurface(inout float3 rayPos, float3 rayDir, float3 boxCenter, float3 boxSize, out float2 intersectLen)
{
float3 rayPosCentered = rayPos - boxCenter;
float3 boxBottom = -boxSize;
float3 boxTop = boxSize;
float3 rcpRayDir = rcp(rayDir);
float3 tBottom = rcpRayDir * (boxBottom - rayPosCentered);
float3 tTop = rcpRayDir * (boxTop - rayPosCentered);
float3 tMin = min(tBottom, tTop);
float3 tMax = max(tBottom, tTop);
float2 temp = max(tMin.xx, tMin.yz);
temp = max(temp.x, temp.y);
intersectLen.x = temp;
temp = min(tMax.xx, tMax.yz);
temp = min(temp.x, temp.y);
intersectLen.y = temp;
if (intersectLen.x > 0)
{
rayPos += intersectLen.x * rayDir;
}
}
v2f vert (appdata v)
{
v2f o;
UNITY_SETUP_INSTANCE_ID(v);
UNITY_TRANSFER_INSTANCE_ID(v, o);
UNITY_INITIALIZE_VERTEX_OUTPUT_STEREO(o);
o.vertex = TransformObjectToHClip(v.vertex);
o.uv = v.uv;
o.screenPos = ComputeScreenPos(o.vertex);
o.wPos = float4(TransformObjectToWorld(v.vertex),1);
return o;
}
half4 frag (v2f i) : SV_Target
{
UNITY_SETUP_INSTANCE_ID(i);
UNITY_SETUP_STEREO_EYE_INDEX_POST_VERTEX(i);
// Get the screen depth
float2 screenUV = i.screenPos.xy/i.screenPos.w;
float depth = SampleSceneDepth(screenUV);
depth = Linear01Depth(depth, _ZBufferParams);
// calculate the length of the ray from the camera to the depth value sampled at the pixel
float3 camToPix = i.wPos.xyz - _WorldSpaceCameraPos;
float pixDepth = dot(camToPix, -UNITY_MATRIX_I_V._m02_m12_m22); // distance from the camera along the camera's -z
float3 depthPos = camToPix * (_ProjectionParams.z / pixDepth) * depth; //normalize camToPix such that the vector's length along the camera's Z is 1, then multiply by the depth times the far plane distance to get the ray from the camera to the pixel in the depth texture
float rayLenDepth = length(depthPos);
float3 rayDir = normalize(camToPix);
float3 rayPos = _WorldSpaceCameraPos;
//Axes of the box, equal to half the length of the box on each axis. Assumes that the object is axis aligned!
float3 boxSize = UNITY_MATRIX_M._m00_m11_m22 * 0.5;
float3 boxCenter = UNITY_MATRIX_M._m03_m13_m23;
float3 boxMin = boxCenter-boxSize;
// move the ray to the surface of the bounding box if the camera is outside the box
float2 intersectLen;
MoveRayToBoxSurface(rayPos, rayDir, boxCenter, boxSize, intersectLen);
//Max distance the ray can travel is either to the far side of the bounding box or to the position specified by the depth in the camera depth texture
float rayMaxDist = min(intersectLen.y, rayLenDepth);
//Ensures the rays will always be able to reach the farthest corners of the bounding box
float rayStepSize = 2*sqrt(boxSize.x*boxSize.x + boxSize.y*boxSize.y + boxSize.z*boxSize.z) * INV_MAX_ITERATIONS;
float3 rayUVW = (rayPos - boxMin) / (2.0 * boxSize);
float4 totalColor = 0;
float4 volumeColor = float4(0,0,0,1);
float totalDist = length(rayPos - _WorldSpaceCameraPos);
float transmittance = exp(-_GlobalExtinction * rayStepSize);
float exposure = _VolExposure2*0.01;
[branch] if (totalDist < rayMaxDist)
{
[loop] for (int iter = 0; iter < MAX_ITERATIONS; iter++)
{
rayUVW = (rayPos - boxMin) / (2.0 * boxSize);
volumeColor = SAMPLE_TEXTURE3D_LOD(_Volume, sampler_Volume, rayUVW, 0) * _Intensity;
float3 stepColor = exposure * volumeColor.rgb * volumeColor.a * ((1-transmittance)/_GlobalExtinction) * _GlobalScattering;
rayPos += rayStepSize * rayDir;
totalDist += rayStepSize;
[branch] if (totalDist > rayMaxDist)
{
float stepFrac = max(0, (rayStepSize - (totalDist - rayMaxDist)) / rayStepSize);
totalColor.rgb += stepFrac * stepColor;
break;
}
totalColor.rgb += stepColor;
totalColor.a *= transmittance;
}
//totalColor.rgb = float3(0,0,1);
}
float4 col = float4( totalColor.rgb, 1);
return col;
}
ENDHLSL
}
}
}

View file

@ -0,0 +1,10 @@
fileFormatVersion: 2
guid: eb2e9bbdd80a9d847b4d2c260bc40922
ShaderImporter:
externalObjects: {}
defaultTextures: []
nonModifiableTextures: []
preprocessorOverride: 0
userData:
assetBundleName:
assetBundleVariant:

View file

@ -0,0 +1,13 @@
//Slices 3d texture into 2d slices
#pragma kernel CSMain
Texture3D<float4> voxels;
RWTexture2D<float4> Result;
int layer;
[numthreads(32, 32, 1)]
void CSMain(uint3 id : SV_DispatchThreadID)
{
uint3 pos = uint3(id.x, id.y, layer);
Result[id.xy] = voxels[pos];
}

View file

@ -0,0 +1,8 @@
fileFormatVersion: 2
guid: 82879ba59713dd34c8bec1716751215e
ComputeShaderImporter:
externalObjects: {}
currentAPIMask: 4
userData:
assetBundleName:
assetBundleVariant:

View file

@ -0,0 +1,156 @@
//Second froxel pass
// Sebastien Hillaire https://www.slideshare.net/DICEStudio/physically-based-and-unified-volumetric-rendering-in-frostbite/26
#pragma kernel StepAdd
#define EPSILON 1e-8;
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Common.hlsl"
//#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/GeometricTools.hlsl"
//#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Color.hlsl"
//#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Filtering.hlsl"
//#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/VolumeRendering.hlsl"
//#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/EntityLighting.hlsl"
RWTexture3D<float4> Result;
Texture3D<float4> InLightingTexture;
Texture3D<float4> HistoryBuffer;
SamplerState Custom_trilinear_clamp_sampler;
//shared float4 _GlobalExtinctionTint;
//#define extinction _GlobalExtinction
float4x4 LeftEyeMatrix;
float4x4 RightEyeMatrix;
//shared float4 _VolumePlaneSettings;
//shared float _GlobalExtinction;
shared float4 _GlobalScattering;
float4 _VolZBufferParams;
//shared float4 _VBufferDistanceEncodingParams;
CBUFFER_START(PerFrameCB)
float4 _VBufferDistanceDecodingParams;
float3 SeqOffset; //z offseter
CBUFFER_END
//float4 StereoOffset() {
//
//
// float3 prevPos = CameraPosition - CameraMotionVector; //just cache this and send it rather than calulating here
// float3 ws_repro = WS_coordinate - CameraMotionVector;
//
// //float2 positionNDC = ComputeNormalizedDeviceCoordinates(ws_repro, PrevViewProjMatrix );
// float vdistance = distance(ws_repro, prevPos);
// float W = EncodeLogarithmicDepthGeneralized(vdistance, _VBufferDistanceEncodingParams);
//
//
// half4 ls = half4(WS_coordinate - prevPos, -1); //_WorldSpaceCameraPos
//
// ls = mul(ls, transpose(PreviousFrameMatrix));
// ls.xyz = ls.xyz / ls.w;
//
// float3 reprojection = float3(ls.xy, W);
//
//
//}
[numthreads(8, 8, 1)]
void StepAdd(uint3 id : SV_DispatchThreadID)
{
//Use this to figure out distance between layers
//float near = _VolumePlaneSettings.x;
//float far = _VolumePlaneSettings.y;
// T (A → B) = 𝑒 ^ -({ S [𝐴 - 𝐵]} 𝛽𝑒(𝑥) 𝑑x )
//float Absorption = 0.01f; //temp Absorption rate
float perPixelRandomOffset = GenerateHashedRandomFloat(id); //posInput.positionSS
// This is a time-based sequence of 7 equidistant numbers from 1/14 to 13/14.
// Each of them is the centroid of the interval of length 2/14.
float3 rndVal = frac(perPixelRandomOffset + 1-SeqOffset);
float3 whd ; //make floating to avoid interger calulations later
Result.GetDimensions(whd.x, whd.y, whd.z); //TODO: Send this in scrpit instead of getting it here
float4 accumScatteringTransmittance = float4(0, 0, 0, 1); //used for current slice output
//Make stereoscopic.
float4 Stereo_UVWs = float4( ((id ))/ whd ,1);
//Shift space to linear. TODO: link to a shared file and add bias
//Stereo_UVWs.z = Stereo_UVWs.z * _VolumePlaneSettings.y / (1 + Stereo_UVWs.z * _VolumePlaneSettings.z); // LinearToVaporDeviceDepth
int EyeID = floor(Stereo_UVWs.x * 2); //Assume left and right eyes
Stereo_UVWs.x = frac(Stereo_UVWs.x * 2); //Make both eyes have proper UV.x
///Doesn't support android.
//if (id.x < whd.x / 2) {
// Stereo_UVWs = mul(LeftEyeMatrix, Stereo_UVWs);
//}
//else {
// Stereo_UVWs = mul(RightEyeMatrix, Stereo_UVWs);
//}
//Mask matrix transforms per eye
// Stereo_UVWs = (mul(LeftEyeMatrix, Stereo_UVWs) * (1-EyeID)) + (mul(RightEyeMatrix, Stereo_UVWs) * EyeID);
//Disabled until it's done correctly for log space
Stereo_UVWs.z = DecodeLogarithmicDepthGeneralized(Stereo_UVWs.z, _VBufferDistanceDecodingParams);
float ipd = 0.0001;
Stereo_UVWs.x = (((Stereo_UVWs.x + (ipd * (1- Stereo_UVWs.z)) )* (1-EyeID)) + (Stereo_UVWs.x - (ipd * (1 - Stereo_UVWs.z)) )*EyeID);
float invDepthNum = 1.0 / whd.z; // used to avoid division per slice TODO: move to sccpit
float previousDepth = DecodeLogarithmicDepthGeneralized(0, _VBufferDistanceDecodingParams);
float4 previousResult = HistoryBuffer[id];
//Max unroll. Maybe make this a precompute to optimize further. TODO: Add clamp in scrpit
//#if defined(SHADER_API_MOBILE)
// [unroll(32)]
//#else
// [unroll(128)]
//#endif
for (float depth = 0.0; depth < whd.z; depth++) {
float UVSlice = depth * invDepthNum; // slice / slicecount
//Sampling the input texture here instead of doing an ID lookup because we are offsetting the UVs to get stereo instead of doing it the previous step
float4 scatteringExtinction =
InLightingTexture.SampleLevel(Custom_trilinear_clamp_sampler,float3(Stereo_UVWs.xy, depth * invDepthNum), 0) ; //Sample current Layer
//float zDepth = GetWDepth(UVSlice);
float zDepth = DecodeLogarithmicDepthGeneralized(UVSlice, _VBufferDistanceDecodingParams);
float travelDis = (zDepth - previousDepth) ; //Distance between layers. Jittering and do temporal blend
//* (.5 + rndVal.z *.5)
float extinction = scatteringExtinction.a;
float transmittance = exp(-extinction * travelDis );
// accumScatteringTransmittance.rgb += scatteringExtinction.rgb * accumScatteringTransmittance.a * _GlobalScattering; //Non-energy conserve
accumScatteringTransmittance.rgb += scatteringExtinction.rgb * accumScatteringTransmittance.a * ((1.0 - transmittance) / extinction) * _GlobalScattering.rgb; //Corrected version
accumScatteringTransmittance.a *= transmittance ;
// float4 normalizedVoxelValue = accumScatteringTransmittance * rcp(travelDis);
Result[int3(id.x, id.y, depth)] = accumScatteringTransmittance;
previousDepth = zDepth;
}
}

View file

@ -0,0 +1,8 @@
fileFormatVersion: 2
guid: c6ab875809b37f5489628e8a3ed0ae94
ComputeShaderImporter:
externalObjects: {}
currentAPIMask: 4
userData:
assetBundleName:
assetBundleVariant:

View file

@ -0,0 +1,668 @@
///This bakes lights into a 3d texture
//#pragma kernel VolumetricAreaBake
#pragma use_dxc vulkan
#pragma kernel PointLight
#pragma kernel SpotLight
#pragma kernel RectangleLight
#pragma kernel DiscLight
#pragma kernel DirectionalLight
#pragma kernel ClearBuffer
//#pragma editor_sync_compilation
#define M_PI 3.1415926535897932384626433832795
RWTexture3D<float4> AccumulatedLights;
//Obsolete
//float3 DirectionalLightDirection;
//float4 DirectionalLightColor;
//Convert to a structured buffer?
//Setting single light variable because we are looping through lights on the CPU.
//TODO: batch groups of lights together?
float3 LightPosition;
float3 LightDirection;
float4 LightColor;
int LightCount; //not used ATM
float SpotPram;
float InnerSpotPram;
float2 AreaSize;
float AreaLightSamples;
float4x4 AreaMatrix;
///Volume Variables
float3 Size;
float3 Position;
///
///TODO: Depreciate this
float4 OpaqueSphere[128];
int SphereCount;
float AmbientMedia;
float3 MediaSphere;
float4 MediaSphereAbsorption[128];
int MediaSphereCount;
Texture3D<float4> ParticipatingMediaTexture;
struct MeshObject
{
float4x4 localToWorldMatrix;
int indices_offset;
int indices_count;
};
StructuredBuffer<MeshObject> _MeshObjects;
StructuredBuffer<float3> _Vertices;
StructuredBuffer<int> _Indices;
//Bakes Lights into a volumetic texture to use later
//Raytracer using some code from http://three-eyed-games.com/2018/05/03/gpu-ray-tracing-in-unity-part-1/
struct Ray {
float3 origin;
float3 direction;
};
void SaveLightTexture(int3 id, float4 colorData) {
//DebugBuffer[0].debugCount = 1;
AccumulatedLights[id] += colorData;
}
static const float EPSILON = 1e-8;
bool IntersectTriangle_MT97(Ray ray, float3 vert0, float3 vert1, float3 vert2,
inout float t, inout float u, inout float v)
{
// find vectors for two edges sharing vert0
float3 edge1 = vert1 - vert0;
float3 edge2 = vert2 - vert0;
// begin calculating determinant - also used to calculate U parameter
float3 pvec = cross(ray.direction, edge2);
// if determinant is near zero, ray lies in plane of triangle
float det = dot(edge1, pvec);
// use backface culling
//if (det < EPSILON)
// return false;
float inv_det = 1.0f / det;
// calculate distance from vert0 to ray origin
float3 tvec = ray.origin - vert0;
// calculate U parameter and test bounds
u = dot(tvec, pvec) * inv_det;
if (u < 0.0 || u > 1.0f)
return false;
// prepare to test V parameter
float3 qvec = cross(tvec, edge1);
// calculate V parameter and test bounds
v = dot(ray.direction, qvec) * inv_det;
if (v < 0.0 || u + v > 1.0f)
return false;
// calculate t, ray intersects triangle
t = dot(edge2, qvec) * inv_det;
return true;
}
Ray CreateRay(float3 origin, float3 direction) {
Ray ray;
ray.origin = origin;
ray.direction = direction;
return ray;
}
Ray DirectionToPoint(float3 LightPosition, float3 UVW) {
float3 direction = UVW - LightPosition;
return CreateRay(LightPosition,direction);
}
struct RayHit
{
float3 position;
float distance;
float3 normal;
};
RayHit CreateRayHit()
{
RayHit hit;
hit.position = float3(0.0f, 0.0f, 0.0f);
hit.distance = 1.#INF;
hit.normal = float3(0.0f, 0.0f, 0.0f);
return hit;
}
///////////////
//
//Intersection types
//
///////////////
void IntersectGroundPlane(Ray ray, inout RayHit bestHit)
{
// Calculate distance along the ray where the ground plane is intersected
float t = -ray.origin.y / ray.direction.y;
if (t > 0 && t < bestHit.distance)
{
bestHit.distance = t;
bestHit.position = ray.origin + t * ray.direction;
bestHit.normal = float3(0.0f, 1.0f, 0.0f);
}
}
void IntersectSphere(Ray ray, inout RayHit bestHit, float4 sphere)
{
// Calculate distance along the ray where the sphere is intersected
float3 d = ray.origin - sphere.xyz;
float p1 = -dot(ray.direction, d);
float p2sqr = p1 * p1 - dot(d, d) + sphere.w * sphere.w;
if (p2sqr < 0)
return;
float p2 = sqrt(p2sqr);
float t = p1 - p2 > 0 ? p1 - p2 : p1 + p2;
if (t > 0 && t < bestHit.distance)
{
bestHit.distance = t;
bestHit.position = ray.origin + t * ray.direction;
bestHit.normal = normalize(bestHit.position - sphere.xyz);
}
}
//A debuging triangle
void IntersectTriangle(Ray ray, inout RayHit bestHit ){
// Trace single triangle
float3 v0 = float3(10, 2, 0);
float3 v1 = float3(20, 2, 0);
float3 v2 = float3(15, 8, 15);
float t, u, v;
if (IntersectTriangle_MT97(ray, v0, v1, v2, t, u, v))
{
if (t > 0 && t < bestHit.distance)
{
bestHit.distance = t;
bestHit.position = ray.origin + t * ray.direction;
bestHit.normal = normalize(cross(v1 - v0, v2 - v0));
// bestHit.albedo = 0.00f;
// bestHit.specular = 0.65f * float3(1, 0.4f, 0.2f);
// bestHit.smoothness = 0.9f;
// bestHit.emission = 0.0f;
}
}
}
void IntersectMeshObject(Ray ray, inout RayHit bestHit, MeshObject meshObject)
{
uint offset = meshObject.indices_offset;
uint count = offset + meshObject.indices_count;
for (uint i = offset; i < count; i += 3)
{
float3 v2 = (mul(meshObject.localToWorldMatrix, float4(_Vertices[_Indices[i]], 1))).xyz;
float3 v1 = (mul(meshObject.localToWorldMatrix, float4(_Vertices[_Indices[i + 1]], 1))).xyz;
float3 v0 = (mul(meshObject.localToWorldMatrix, float4(_Vertices[_Indices[i + 2]], 1))).xyz;
float t, u, v;
if (IntersectTriangle_MT97(ray, v0, v1, v2, t, u, v))
{
if (t > 0 && t < bestHit.distance)
{
bestHit.distance = t;
bestHit.position = ray.origin + t * ray.direction;
bestHit.normal = normalize(cross(v1 - v0, v2 - v0));
// bestHit.albedo = 0.0f;
// bestHit.specular = 0.65f;
// bestHit.smoothness = 0.99f;
// bestHit.emission = 0.0f;
}
}
}
}
//Set up interactions here
RayHit Trace(Ray ray)
{
RayHit bestHit = CreateRayHit();
uint count, stride;
//IntersectGroundPlane(ray, bestHit);
//Sphere tracing
for (int i = 0; i < SphereCount; i++) {
IntersectSphere( ray, bestHit, OpaqueSphere[i] );
}
// IntersectTriangle(ray, bestHit);
//// Trace mesh objects
_MeshObjects.GetDimensions(count, stride);
for (uint r = 0; r < count; r++)
{
IntersectMeshObject(ray, bestHit, _MeshObjects[r]);
}
return bestHit;
}
float3 Shade(inout Ray ray, RayHit hit)
{
if (hit.distance < 1.#INF)
{
// Return the normal
//return hit.normal * 0.5f + 0.5f;
return hit.distance;
//Inverse square law
//float LightRadius = (hit.distance( (id + 0.5 ) / (w / Size), LightPosition[i]) ); //Distance from center of voxel
//return 1 / (4 * M_PI * LightRadius*LightRadius);
}
else
{
// Sample the skybox and write it
float theta = acos(ray.direction.y) / -M_PI;
float phi = atan2(ray.direction.x, -ray.direction.z) / -M_PI * 0.5f;
return float3(1, 0, 1);
}
}
float4 SimpleGround(float3 pos) {
if (pos.y > 1) {
return float4(0, 1, 1, 0);
}
else {
return float4(.25, .125, 0, 1);
}
}
uniform float _Seed;
float rand(float2 Pixel, inout float seed)
{
float result = frac(sin(seed / 100.0f * dot(Pixel, float2(12.9898f, 78.233f) ) ) * 43758.5453f);
seed += 1.0f;
return result;
}
float rand(float3 Pixel, inout float seed)
{
float result = frac(sin(seed / 100.0f * dot(Pixel, float3(12.9898f, 49.1165f, 29.1165f))) * 43758.5453f);
seed += 1.0f;
return result;
}
///////////////////
///
///Media passes
///
///////////////////
SamplerState _LinearClamp;
[numthreads(4, 4, 4)]
void ClearBuffer(uint3 id : SV_DispatchThreadID) {
AccumulatedLights[id] = float4(0,0,0,0);
}
[numthreads(4, 4, 4)]
void ParticipatingMedia(uint3 id : SV_DispatchThreadID) {
// ParticipatingMediaTexture.SampleLevel(_LinearClamp, POS ,0,0)
// SaveLightTexture(id, inverseSquareColor * ShadowColor);
}
[numthreads(4, 4, 4)]
void ParticipatingSphere(uint3 id : SV_DispatchThreadID) {
// ParticipatingMediaTexture.SampleLevel(_LinearClamp, POS ,0,0)
// SaveLightTexture(id, inverseSquareColor * ShadowColor);
}
///////////////////
///
///Light Passes
///
///////////////////
[numthreads(4, 4, 4)]
void PointLight(uint3 id : SV_DispatchThreadID) {
float3 whd;
AccumulatedLights.GetDimensions(whd.x, whd.y, whd.z);
float3 VoxelWorldPosition = Position + ((id + 0.5) / (whd / Size));
//Distance from light
float LightRadius = (distance(VoxelWorldPosition, LightPosition)); //Distance from center of voxel
float4 ShadowColor = float4(1, 1, 1, 1);
Ray PointShadowRay = CreateRay(VoxelWorldPosition, -normalize(VoxelWorldPosition - LightPosition));
RayHit PointShadowHit = Trace(PointShadowRay);
if (PointShadowHit.distance < LightRadius)
{
//todo: Account for tinted materials to make colored shadows
ShadowColor *= 0;
}
//Currently just doing the inverse square law for falloff. Figure out physical scattering and absorption
float4 inverseSquareColor = LightColor / (4 * M_PI * LightRadius * LightRadius);
SaveLightTexture(id, inverseSquareColor * ShadowColor);
//AccumulatedLights[id] += inverseSquareColor * ShadowColor;
}
[numthreads(4, 4, 4)]
void SpotLight(uint3 id : SV_DispatchThreadID) {
float3 whd;
AccumulatedLights.GetDimensions(whd.x, whd.y, whd.z);
float3 VoxelWorldPosition = Position + ((id + 0.5) / (whd / Size));
float3 VoxelDirectionToLight = -normalize(VoxelWorldPosition - LightPosition);
//LightDirection = float3(0, -1, 0);
//Currently taking a point light and adding attenuation
float attenuation = clamp(dot(LightDirection, -VoxelDirectionToLight), 0, 1);
///
float flOuterConeCos = SpotPram;
float flTemp = dot(LightDirection, -VoxelDirectionToLight) - flOuterConeCos;
float vSpotAtten = saturate(flTemp * InnerSpotPram);
///
//Distance from light
float LightRadius = (distance(VoxelWorldPosition, LightPosition)); //Distance from center of voxel
float4 ShadowColor = float4(1, 1, 1, 1);
//Only casting rays in lit areas
if (attenuation > 0) {
Ray PointShadowRay = CreateRay(VoxelWorldPosition, VoxelDirectionToLight);
RayHit PointShadowHit = Trace(PointShadowRay);
if (PointShadowHit.distance < LightRadius)
{
//todo: Account for tinted materials to make colored shadows
ShadowColor *= 0;
}
}
//Currently just doing the inverse square law for falloff.
//TODO: Figure out physical scattering and absorption for more accuraete color and falloff
float4 inverseSquareColor = LightColor / (4 * M_PI * LightRadius * LightRadius);
//AccumulatedLights[id] += inverseSquareColor * ShadowColor * vSpotAtten;
SaveLightTexture(id, inverseSquareColor * ShadowColor * vSpotAtten);
}
//Add a skylight portal to better control light leaking
[numthreads(4, 4, 4)]
void DirectionalLight(uint3 id : SV_DispatchThreadID) {
float3 whd;
AccumulatedLights.GetDimensions(whd.x, whd.y, whd.z);
float3 VoxelWorldPosition = Position + ((id + 0.5) / (whd / Size));
//Directional Light // Make an array
// Shadow test ray
float4 DirectionalShadow = float4(1, 1, 1, 1);
Ray shadowRay = CreateRay(VoxelWorldPosition, -LightDirection.xyz);
RayHit shadowHit = Trace(shadowRay);
if (shadowHit.distance != 1.#INF && shadowHit.distance != -1.#INF)
{
DirectionalShadow *= 0;
}
// AccumulatedLights[id.xyz] += DirectionalColor;
SaveLightTexture(id, LightColor * 0.01 * DirectionalShadow); //TODO: remove temp multiplier
}
//
[numthreads(4, 4, 4)]
void RectangleLight(uint3 id : SV_DispatchThreadID) {
float3 whd;
AccumulatedLights.GetDimensions(whd.x, whd.y, whd.z);
float3 VoxelWorldPosition = Position + ((id + 0.5) / (whd / Size));
float4 MonteCarlointegration = float4(0, 0, 0, 0);
float seed = _Seed;
for (int i = 0; i < AreaLightSamples; i++) {
float4 ShadowColor = float4(1, 1, 1, 1);
float3 VoxelDirectionToLight = -normalize(VoxelWorldPosition - LightPosition);
float Facing = saturate(dot(LightDirection, -VoxelDirectionToLight)); //Intal direction check
//Only cast rays from one side of the light
if (Facing > 0){
float3 LightPosSample = LightPosition + mul((float3x3)AreaMatrix, float3( (rand(id.xyz, seed)-0.5) * AreaSize.x , (rand(id.xyz, seed) - 0.5) * AreaSize.y, 0) );
VoxelDirectionToLight = -normalize(VoxelWorldPosition - LightPosSample);
float LightRadius = (distance(VoxelWorldPosition, LightPosSample)); //Distance from center of voxel
float attenuation = saturate(dot(LightDirection, -VoxelDirectionToLight));
Ray PointShadowRay = CreateRay(VoxelWorldPosition, -normalize(VoxelWorldPosition - LightPosSample) );
RayHit PointShadowHit = Trace(PointShadowRay);
if (PointShadowHit.distance < LightRadius)
{
//todo: Account for tinted materials to make colored shadows
ShadowColor *= 0;
}
//TODO: figure out what's unity's area lighting model is and match it
float4 inverseSquareColor = (LightColor ) / (4 * M_PI * LightRadius * LightRadius);
MonteCarlointegration += (inverseSquareColor * ShadowColor * attenuation) / AreaLightSamples;
}
//Currently just doing the inverse square law for falloff. Figure out physical scattering and absorption
}
// AccumulatedLights[id] += MonteCarlointegration;
SaveLightTexture(id, MonteCarlointegration);
}
[numthreads(4, 4, 4)]
void DiscLight(uint3 id : SV_DispatchThreadID) {
float3 whd;
AccumulatedLights.GetDimensions(whd.x, whd.y, whd.z);
float3 VoxelWorldPosition = Position + ((id + 0.5) / (whd / Size));
float4 MonteCarlointegration = float4(0, 0, 0, 0);
float seed = _Seed;
for (int i = 0; i < AreaLightSamples; i++) {
float4 ShadowColor = float4(1, 1, 1, 1);
float3 VoxelDirectionToLight = -normalize(VoxelWorldPosition - LightPosition);
float Facing = saturate(dot(LightDirection, -VoxelDirectionToLight)); //Intal direction check
//Only cast rays from one side of the light
if (Facing > 0) {
//https://stackoverflow.com/questions/5837572/generate-a-random-point-within-a-circle-uniformly
float t = 2 * M_PI * rand(id.xyz, seed);
float u = rand(id.xyz, seed) + rand(id.xyz, seed);
float r;
if (u > 1)
{
r = (2 - u);
}
else { r = u; }
// [r * cos(t), r * sin(t)]
float3 LightPosSample = LightPosition + mul((float3x3)AreaMatrix,
float3( ( r * cos(t)) * AreaSize.x,
( r * sin(t)) * AreaSize.x,
0));
//TEMP
//float3 LightPosSample = LightPosition + mul(AreaMatrix, float3((rand(id.xyz) - 0.5) * AreaSize.x, (rand(id.xyz) - 0.5) * AreaSize.x, 0));
VoxelDirectionToLight = -normalize(VoxelWorldPosition - LightPosSample);
float LightRadius = (distance(VoxelWorldPosition, LightPosSample)); //Distance from center of voxel
float attenuation = saturate(dot(LightDirection, -VoxelDirectionToLight));
Ray PointShadowRay = CreateRay(VoxelWorldPosition, -normalize(VoxelWorldPosition - LightPosSample));
RayHit PointShadowHit = Trace(PointShadowRay);
if (PointShadowHit.distance < LightRadius)
{
//todo: Account for tinted materials to make colored shadows
ShadowColor *= 0;
}
//TODO: figure out what's unity's area lighting model is and match it
float4 inverseSquareColor = (LightColor) / (4 * M_PI * LightRadius * LightRadius);
MonteCarlointegration += (inverseSquareColor * ShadowColor * attenuation) / AreaLightSamples;
}
//Currently just doing the inverse square law for falloff. Figure out physical scattering and absorption
}
// AccumulatedLights[id] += MonteCarlointegration;
SaveLightTexture(id, MonteCarlointegration);
}
/////UNUSED
//
//[numthreads(4, 4, 4)]
//void VolumetricAreaBake(uint3 id : SV_DispatchThreadID)
//{
// float3 whd;
// AccumulatedLights.GetDimensions(whd.x, whd.y, whd.z);
//
//
// Debug debug;
// debug.debugCount = 0;
//
// //w -= 1;
//
// float4 ColoredLights = float4(0,0,0,0);
// float ParticipatingMedia = 0;
//
// float3 VoxelWorldPosition = Position + ((id + 0.5) / (whd / Size));
//
// //Directional Light // Make an array
// // Shadow test ray
// bool shadow = false;
// Ray shadowRay = CreateRay(VoxelWorldPosition, LightDirection.xyz);
// RayHit shadowHit = Trace(shadowRay);
// float4 DirectionalColor = DirectionalLightColor;
//
// if (shadowHit.distance != 1.#INF)
// {
// DirectionalColor *= 0;
// }
// ///
//
//
// ColoredLights += DirectionalColor;
//
// ////Point lights without shadows
// //for (int i = 0; i < LightCount; i++) {
//
// // //Distance from light
// // float LightRadius = (distance(VoxelWorldPosition, LightPosition[i]) ); //Distance from center of voxel
//
// // float4 ShadowColor = float4(1,1,1,1);
//
// // Ray PointShadowRay = CreateRay(VoxelWorldPosition, -normalize(VoxelWorldPosition - LightPosition[i]) );
//
// // RayHit PointShadowHit = Trace(PointShadowRay);
// // if (PointShadowHit.distance < LightRadius)
// // {
// // //todo: Account for tinted materials to make colored shadows
// // ShadowColor *=0;
// // }
// // //Currently just doing the inverse square law for falloff. Figure out physical scattering and absorption
// // float4 inverseSquareColor = LightColor[i] / (4 * M_PI * LightRadius*LightRadius);
// //
// // ColoredLights += inverseSquareColor * ShadowColor;
//
// //}
//
//
// //Bake in fog densitiy
// for (int s = 0; s < MediaSphereCount; s++) {
//
// float mediaStep = distance ( MediaSphere[s], VoxelWorldPosition) ;
//
//
// if (mediaStep > 0.5)
// {
// mediaStep = 0;
// }
// else {
// mediaStep = MediaSphereAbsorption[s].x;
// }
//
// ParticipatingMedia += mediaStep;
//
// }
//
// ParticipatingMedia += AmbientMedia;
//
//
// //float4 LightColored = (1 - saturate(distance(id / (w / Size), LightPosition))) * LightColor;
//
//
// //RayHit hit = Trace(ray);
// //float3 result = Shade(ray, hit);
// //Result[id.xyz] = float4(result, 1);
//
// AccumulatedLights[id.xyz] = float4(ColoredLights.xyz, ParticipatingMedia) ; //Store Light Colors and
// //Result[id.xyz] = SimpleGround(LightPosition[0]-ray.direction);
//
//// Result[id.xyz] = float4(id / (w / Size),1); //DEBUG
//// DebugBuffer[id.x].debugCount = debug.debugCount;
//}

View file

@ -0,0 +1,8 @@
fileFormatVersion: 2
guid: c9f3c60ba4ab14e4aacea6114fe7609c
ComputeShaderImporter:
externalObjects: {}
currentAPIMask: 4
userData:
assetBundleName:
assetBundleVariant:

View file

@ -0,0 +1,46 @@
#pragma kernel VolBlurX VolBlur=VolBlurX
#pragma kernel VolBlurY VolBlur=VolBlurY VERTICAL_PASS
#define TapSamples 2
//#define SIGMA_FILTER 1
RWTexture3D<float4> Result;
Texture3D<float4> InTex;
static float KernelLUT[3] = {0.38774, 0.24477, 0.06136};
//Gaussian dispubution baked as a LUT. SIGMA = 2 http://dev.theomader.com/gaussian-kernel-calculator/
//static float KernelLUT[5] = { 0.20236, 0.179044, 0.124009, 0.067234, 0.028532 };
//float Gaussian(float radius, float sigma)
//{
// float v = radius / sigma;
// return exp(-(v * v));
//}
//float GaussianLUT(int currentKernal)
//{
// return KernelLUT[currentKernal];
//}
[numthreads(4,4,4)]
void VolBlur (uint3 id : SV_DispatchThreadID)
{
float4 sum = float4(0, 0, 0, 0);
// [unroll(TapSamples*2 + 1)] //Unroll all potential samples
for (int i = -TapSamples; i < TapSamples; i++) {
#if VERTICAL_PASS
int3 ids = uint3(id.x, id.y + i, id.z);
#else
int3 ids = uint3(id.x + i, id.y, id.z);
#endif
// float weight = GaussianLUT( abs(i) );
sum += InTex[ids].rgba * KernelLUT[abs(i)];
}
Result[id.xyz] = sum;
}

View file

@ -0,0 +1,8 @@
fileFormatVersion: 2
guid: be2be01bbace8854f9bf0a1335db0883
ComputeShaderImporter:
externalObjects: {}
currentAPIMask: 2052
userData:
assetBundleName:
assetBundleVariant:

View file

@ -0,0 +1,454 @@
///This is the realtime volumetric shader. It's the first froxel pass.
#pragma use_dxc vulkan
#pragma kernel Scatter
#define M_PI 3.1415926535897932384626433832795 //Standard stored Pi.
#define PI_x4 12.566370614359172953850573533118 //For inverse square.
//#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/UnityInstancing.hlsl"
#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/Common.hlsl"
//#include "Packages/com.unity.render-pipelines.universal/Shaders/Volumetrics/VolumetricCookie.hlsl"
//#include "Packages/com.unity.render-pipelines.core/ShaderLibrary/SpaceTransforms.hlsl"
Texture3D<float4> PreviousFrameLighting;
RWTexture3D<float4> Result;
//Texture2D<float4> BlueNoise;
//Texture2DArray<float4> LightProjectionTextureArray;
//Currently only Supports spot lights
// struct LightObject
// {
// float4x4 LightProjectionMatrix;
// float3 LightPosition;
// float4 LightColor;
// int LightCookie;
// };
//StructuredBuffer<LightObject> LightObjects;
/*
struct ParticipatingMediaSphere
{
float3 CenterPosition;
float LocalExtinction;
float LocalFalloff;
float LocalRange;
};
StructuredBuffer<ParticipatingMediaSphere> media_sphere_buffer;
int media_sphere_buffer_length;
*/
//shared float4 _VolumePlaneSettings;
shared float _GlobalExtinction;
shared float _FogBaseHeight;
shared float _FogMaxHeight;
shared float _StaticLightMultiplier;
float _ClipmapScale;
float _ClipmapScale2;
float3 _ClipmapPosition;
CBUFFER_START(PerFrameCB)
float4x4 _VBufferCoordToViewDirWS; // >_>
float4x4 _PrevViewProjMatrix;
float4x4 _ViewMatrix;
float4x4 TransposedCameraProjectionMatrix;
float4x4 CameraProjectionMatrix;
float4 _VBufferDistanceEncodingParams;
float4 _VBufferDistanceDecodingParams;
float4 SeqOffset; //z offseter
float4 CameraPosition; //current camera position
float4 CameraMotionVector; //camera's motion per frame
CBUFFER_END
//shared float SeqOffsetPrv; //z offseter
float _VBufferUnitDepthTexelSpacing;
//shared float4 _VBufferSharedUvScaleAndLimit;
//float4x4 PreviousFrameMatrix;
SamplerState _LinearClamp;
SamplerState _point_repeat;
SamplerState Custom_trilinear_clamp_sampler;
SamplerState s_linear_clamp_sampler;
Texture3D<float4> _VolumetricClipmapTexture;
Texture3D<float4> _VolumetricClipmapTexture2;
shared TextureCube<float4> _SkyTexture;
//float reprojectionAmount;
shared float4 _MipFogParameters; //near, far, mix
shared int _SkyMipCount;
// Returns the forward (central) direction of the current view in the world space.
float3 GetViewForwardDir()
{
float4x4 viewMat = _ViewMatrix;
return -viewMat[2].xyz;
}
// Returns the forward (up) direction of the current view in the world space.
float3 GetViewUpDir()
{
float4x4 viewMat = _ViewMatrix;
return viewMat[1].xyz;
}
float3 GetFogColor(float3 V, float fragDist)
{
float3 color = float3(1,1,1);
// Based on Uncharted 4 "Mip Sky Fog" trick: http://advances.realtimerendering.com/other/2016/naughty_dog/NaughtyDog_TechArt_Final.pdf
float mipLevel = (1.0 - _MipFogParameters.z * saturate((fragDist - _MipFogParameters.x) / (_MipFogParameters.y - _MipFogParameters.x))) * (_SkyMipCount - 1);
// For the atmospheric scattering, we use the GGX convoluted version of the cubemap. That matches the of the idnex 0
//color *= SampleSkyTexture(-V, mipLevel, 0).rgb; // '_FogColor' is the tint
color *= _SkyTexture.SampleLevel(Custom_trilinear_clamp_sampler,V, mipLevel).rgb; // '_FogColor' is the tint
return color;
}
//Custom trilinear interpolation
//Apparently you can't use a sampler with a RWtexture, but you can directly read the data.
//Do we need this? Can I do this in a sampler instead without requiring a CPU memory copy? Is it any faster to do it that way? //DOES IT WORK IN ANDROID???
float4 TrilinearInterpolation(float3 UVW) {
float3 UVW_0 = floor(UVW); //Lowest corner
float3 UVW_1 = ceil(UVW); //Highest Corner
float3 PixelDifference = UVW - UVW_0; //vec3 distance
float3 PixelDifference_1minus = 1 - PixelDifference;
//Sample ALL the points!
float4 value_000 = Result[UVW_0.xyz];
float4 value_100 = Result[int3(UVW_1.x, UVW_0.y, UVW_0.z)];
float4 value_010 = Result[int3(UVW_0.x, UVW_1.y, UVW_0.z)];
float4 value_110 = Result[int3(UVW_1.x, UVW_1.y, UVW_0.z)];
float4 value_001 = Result[int3(UVW_0.x, UVW_0.y, UVW_1.z)];
float4 value_101 = Result[int3(UVW_1.x, UVW_0.y, UVW_1.z)];
float4 value_011 = Result[int3(UVW_0.x, UVW_1.y, UVW_1.z)];
float4 value_111 = Result[UVW_1.xyz];
// Interpolate in 3 dimensions
float4 c00 = (value_000 * (PixelDifference_1minus.x)) + (value_100 * (PixelDifference.x));
float4 c01 = (value_001 * (PixelDifference_1minus.x)) + (value_101 * (PixelDifference.x));
float4 c10 = (value_010 * (PixelDifference_1minus.x)) + (value_110 * (PixelDifference.x));
float4 c11 = (value_011 * (PixelDifference_1minus.x)) + (value_111 * (PixelDifference.x));
float4 c0 = (c00 * (PixelDifference_1minus.y)) + (c10 * (PixelDifference.y));
float4 c1 = (c01 * (PixelDifference_1minus.y)) + (c11 * (PixelDifference.y));
return (c0 * PixelDifference_1minus.z) + (c1 * PixelDifference.z);
};
float ComputeHistoryWeight(float frameMulti)
{
// Compute the exponential moving average over 'n' frames:
// X = (1 - a) * ValueAtFrame[n] + a * AverageOverPreviousFrames.
// We want each sample to be uniformly weighted by (1 / n):
// X = (1 / n) * Sum{i from 1 to n}{ValueAtFrame[i]}.
// Therefore, we get:
// (1 - a) = (1 / n) => a = (1 - 1 / n) = (n - 1) / n,
// X = (1 / n) * ValueAtFrame[n] + (1 - 1 / n) * AverageOverPreviousFrames.
// Why does it work? We need to make the following assumption:
// AverageOverPreviousFrames ≈ AverageOverFrames[n - 1].
// AverageOverFrames[n - 1] = (1 / (n - 1)) * Sum{i from 1 to n - 1}{ValueAtFrame[i]}.
// This implies that the reprojected (accumulated) value has mostly converged.
// X = (1 / n) * ValueAtFrame[n] + ((n - 1) / n) * (1 / (n - 1)) * Sum{i from 1 to n - 1}{ValueAtFrame[i]}.
// X = (1 / n) * ValueAtFrame[n] + (1 / n) * Sum{i from 1 to n - 1}{ValueAtFrame[i]}.
// X = Sum{i from 1 to n}{ValueAtFrame[i] / n}.
float numFrames = 7 * frameMulti;
float frameWeight = 1 / numFrames;
float historyWeight = 1 - frameWeight;
return historyWeight;
}
//Realtime spot evaluation
// float4 SpotLightLoop(float4 WS_coordinate) {
//
// float4 accumLighting = float4(0,0,0,0);
//
// uint count, stride;
// LightObjects.GetDimensions(count, stride);
//
// for (int i = 0; i < count; i++) {
//
// ///Realtime Spotlight projection matrix and cookie sampler
// float4 lightWSPos = WS_coordinate - float4(LightObjects[i].LightPosition, 1); //world pos
//
// lightWSPos = mul(LightObjects[i].LightProjectionMatrix, lightWSPos);
//
// lightWSPos.xy = lightWSPos.xy / lightWSPos.w;
//
// float lightDirection = lightWSPos.z;
//
// lightWSPos.z = 1; //Setting which slice to sample https://docs.microsoft.com/en-us/windows/win32/direct3dhlsl/dx-graphics-hlsl-to-samplelevel
//
// float4 spotlightTex = LightProjectionTextureArray.SampleLevel(Custom_trilinear_clamp_sampler, lightWSPos.xyz, 0, 0);
//
// float LightRadius = distance(WS_coordinate.xyz, LightObjects[i].LightPosition.xyz); // Distance from light source
//
// spotlightTex *= LightObjects[i].LightColor / (PI_x4 * LightRadius*LightRadius); // inverse square
//
// accumLighting += spotlightTex * step(0,-lightDirection); // clip only to the front and add to the lightinbg
// }
//
// return accumLighting;
// }
//Realtime Additional evaluation
// float4 AdditionalLightLoop(float4 positionWS)
// {
// // #ifdef _ADDITIONAL_LIGHTS
// uint pixelLightCount = GetAdditionalLightsCount();
// for (int i=0 ; i < pixelLightCount ; i++)
// {
// Light light = GetAdditionalLight(lightIndex, inputData.positionWS, shadowMask);
// }
// // #endif
// }
//Real-time media
/*
float RealtimeMediaSpheres(float3 worldspace){
// uint2 count;
// media_sphere_buffer.GetDimensions(count.x,count.y);
float accumulation = 0;
for (uint i = 0; i < media_sphere_buffer_length; i++)
{
//Get the distance, scale it to the range, and multiple the linear ramp with the Extinction
accumulation += saturate(((1 - ( distance(media_sphere_buffer[i].CenterPosition,worldspace) + 1
- media_sphere_buffer[i].LocalRange) )/ media_sphere_buffer[i].LocalRange)) * media_sphere_buffer[i].LocalExtinction;
}
return accumulation;
}
*/
///Main scattering pass. Integration is done separately so we can reproject this result for XR rather than do it twice.
///We Would have to store a pre integration version anyways for temporal reprojection
[numthreads(4,4,4)]
void Scatter(uint3 id : SV_DispatchThreadID)
{
///Get current RT resolution and convert it to 0 - 1 UVW space
float3 whd;
Result.GetDimensions(whd.x, whd.y, whd.z);
// float3 TemporalOffset = float3(0.5, 0.5, 0.5 + Jittery * 0.5); //offsetting
// float3 TemporalOffset = float3(0.5, 0.5, 0.5 ); //offsetting
float4 UVW = float4( (id) / whd, 1); //Make uvs and sample from center of froxel //ID TO UVW
float e1 = (id.z + 1) / whd.z; // (slice + 1) / sliceCount //Todo: Bake Rcp
// float t1 = DecodeLogarithmicDepthGeneralized(e1, _VBufferDistanceDecodingParams);
// UVW.z = t1;
//TODO: replace this with the global baked bluenoise texture used elsewhere
// Perform per-pixel randomization by adding an offset and then sampling uniformly
// (in the log space) in a vein similar to Stochastic Universal Sampling:
// https://en.wikipedia.org/wiki/Stochastic_universal_sampling
float perPixelRandomOffset = GenerateHashedRandomFloat(id); //posInput.positionSS
// This is a time-based sequence of 7 equidistant numbers from 1/14 to 13/14.
// Each of them is the centroid of the interval of length 2/14.
float rndVal = frac(perPixelRandomOffset + SeqOffset.z);
/// Invert the assumed perspective projection from the UVW to World Space
//float4 WS_coordinate = mul(CameraProjectionMatrix, UVW); //inverse camera matrix
//WS_coordinate.xyz = WS_coordinate.xyz / WS_coordinate.w; //tapper coord and flip around
//WS_coordinate += CameraPosition;
float2 centerCoord = id.xy + float2(0.5, 0.5);
// Compute a ray direction s.t. ViewSpace(rayDirWS).z = 1.
//float3 rayDirWS = mul(-float4(centerCoord, 1, 1), _VBufferCoordToViewDirWS[unity_StereoEyeIndex]).xyz;
float3 rayDirWS = normalize(mul(-float4(centerCoord , 1, 1), _VBufferCoordToViewDirWS)).xyz ;
float rcpLenRayDir = rsqrt(dot(rayDirWS, rayDirWS));
//rayDirWS *= rcpLenRayDir; // Normalize
//JitteredRay ray;
//ray.originWS = GetCurrentViewPosition(); //WS pos of camera
//ray.centerDirWS = rayDirWS * rcpLenRayDir; // Normalize
//float originWS = GetCurrentViewPosition(); //WS pos of camera
// float3 originWS = CameraPosition.xyz; //WS pos of camera
float3 F = GetViewForwardDir();
float3 U = GetViewUpDir();
float3 rightDirWS = cross(rayDirWS, U);
float rcpLenRightDir = rsqrt(dot(rightDirWS, rightDirWS));
// ray.jitterDirWS = ray.centerDirWS;
// float de = 1 / whd.z;
// float e1 = (UVW.z + 1) / whd.z; //linearZ
//float t = DecodeLogarithmicDepthGeneralized(e1 - 0.5 * de, _VBufferDistanceDecodingParams);
rndVal = SeqOffset.z ;
float t = DecodeLogarithmicDepthGeneralized( ( e1 - (rndVal * (1/whd.z)) ) , _VBufferDistanceDecodingParams); //Get log encoded distance based on linear UVWs
float pt = DecodeLogarithmicDepthGeneralized( ( e1 - (.5 * (1/whd.z)) ) , _VBufferDistanceDecodingParams); //Get log encoded distance based on linear UVWs
float3 centerDirWS = rayDirWS; // //jittered ray
float FdotD = dot(F, centerDirWS);
//float _VBufferUnitDepthTexelSpacing = .01; //TODO: make this smart
float unitDistFaceSize = _VBufferUnitDepthTexelSpacing * FdotD * rcpLenRayDir;
float3 xDirDerivWS = rightDirWS * (rcpLenRightDir * unitDistFaceSize); // Normalize & rescale
float3 yDirDerivWS = cross(xDirDerivWS, centerDirWS); // Will have the length of 'unitDistFaceSize' by construction
//SeqOffset.xy = 0;
//float3 jitterDirWS = normalize(centerDirWS + (SeqOffset.x * xDirDerivWS) + (SeqOffset.y * yDirDerivWS));
float3 jitterDirWS = normalize(centerDirWS);
//TODO: add the left right jitter too. Helps smooth out rough edges.
float3 centerWS = CameraPosition.xyz + ( t * jitterDirWS); //Cast ray along direction and add cam pos to get WS pos
float3 centWSNJIT = CameraPosition.xyz + pt * centerDirWS ; //non-jittered version for reprojection
//RGB is total lighting, A is the total extinction
float4 accumLighting = float4(0,0,0,0);
//
//centerWS *= .99;
//WS_coordinate = -WS_coordinate + CameraPosition; //move to postion.
float4 WS_coordinate = float4(centerWS,1);
///
// WS_coordinate.z += Noised;
/// Previous frame space
////
//REPROJECTION
///
//float4 PreviousFrameProjection = WS_coordinate; // Catch WS relitive to the camera
//PreviousFrameProjection.xyz -= CameraMotionVector.xyz; //Move back previous frame's pos TODO: Add to matrix instead
//PreviousFrameProjection.w = 1;
//PreviousFrameProjection = mul(PreviousFrameMatrix, PreviousFrameProjection); // convert using previous matrix
//PreviousFrameProjection = PreviousFrameProjection / PreviousFrameProjection.w; //Untapper coord
/////
// PreviousFrameProjection.z = PreviousFrameProjection.z / (_VolumePlaneSettings.y - PreviousFrameProjection.z * _VolumePlaneSettings.z);
// PreviousFrameProjection.xy = (PreviousFrameProjection.xy + 1.0f) * 0.5f;
//float t0 = DecodeLogarithmicDepthGeneralized(0, _VBufferDistanceDecodingParams);
// float de = _VBufferRcpSliceCount; // Log-encoded distance between slices
//float4 previousResult = TrilinearInterpolation( PreviousFrameProjection.xyz * whd.xyz - 0.5); //Custom trilinear interpolation of RW texture. can't use this on mobile because can't read a RW tex.
//float4 previousResult = PreviousFrameLighting.SampleLevel(Custom_trilinear_clamp_sampler, id / UVW, 0 );
//half4 ls = half4(WS_coordinate - prevPos, -1); //_WorldSpaceCameraPos
//ls = mul(ls, transpose(PreviousFrameMatrix));
//ls.xyz = ls.xyz / ls.w;
float3 prevPos = CameraPosition.xyz - CameraMotionVector.xyz; //just cache this and send it rather than calulating here
float3 ws_repro = centWSNJIT.xyz ;
float2 positionNDC = ComputeNormalizedDeviceCoordinates(ws_repro, _PrevViewProjMatrix);
float vdistance = distance(ws_repro, prevPos);
float W = EncodeLogarithmicDepthGeneralized(vdistance, _VBufferDistanceEncodingParams);
// float W = vdistance;
//half4 ls = half4(WS_coordinate - prevPos, -1); //_WorldSpaceCameraPos
//ls = mul(ls, transpose(PreviousFrameMatrix));
//ls.xyz = ls.xyz / ls.w;
//float3 reprojection = float3(ls.xy, W);
float3 reprojection = float3(positionNDC, W);
//float clamper = step(reprojection.x,1) * step(0, reprojection.x) *
// step(reprojection.y, 1)* step(0, reprojection.y) *
// step(reprojection.z, 1) * step(0, reprojection.z);
//clamper = saturate(clamper);
float4 previousResult = PreviousFrameLighting.SampleLevel(s_linear_clamp_sampler, reprojection, 0 ) ;
//float4 previousResult = PreviousFrameLighting[id];
/////
//Light loops
/////
// accumLighting += SpotLightLoop(WS_coordinate);
/////
//Baked light volumes
/////
//Using combined clipmap instead of direct baked maps. Reduces the amount of 3d texture look-ups per frame.
//Sample based on worldspace divided by texture's world size. (apply to 0-1 Tex Sampler, no need to know the resolution)
float3 ClipmapUVW = (WS_coordinate.xyz - (_ClipmapPosition.xyz - (_ClipmapScale *.5) ) ) / _ClipmapScale.xxx;
float3 LargeClipmapUVW = (WS_coordinate.xyz - (_ClipmapPosition.xyz - (_ClipmapScale2 *.5) ) ) / (_ClipmapScale2.xxx);
//For clipping the clipmap. Isn't needed if the view volume is smaller than the clipmap
float Clipped =
step(ClipmapUVW.x, 1) * step(0, ClipmapUVW.x) *
step(ClipmapUVW.y, 1) * step(0, ClipmapUVW.y) *
step(ClipmapUVW.z, 1) * step(0, ClipmapUVW.z);
Clipped = saturate(Clipped);
//if (Clipped > 0) { //Android doesn't like ifs :/
// accumLighting += float4(_VolumetricClipmapTexture.SampleLevel(Custom_trilinear_clamp_sampler, ClipmapUVW, 0).xyz, 0);
//}
//else
//{
// accumLighting += float4(_VolumetricClipmapTexture2.SampleLevel(Custom_trilinear_clamp_sampler, LargeClipmapUVW, 0).xyz, 0);
//}
accumLighting += _VolumetricClipmapTexture.SampleLevel(Custom_trilinear_clamp_sampler, ClipmapUVW, 0) * Clipped;
accumLighting += _VolumetricClipmapTexture2.SampleLevel(Custom_trilinear_clamp_sampler, LargeClipmapUVW, 0) * (1- Clipped);
accumLighting.rgb *= _StaticLightMultiplier;
accumLighting.rgb += GetFogColor(centerDirWS,W)*W; //Base fog color from cubemap;
// float3 lerpy = abs(dot(float3(0,0,1),rayDirWS)); //Directional tester
// accumLighting.rgb += lerpy*lerpy * float3(1,0.8,0.5)*W;
//Take any precomputed extinction stored in the clipmap and add global variables
//Height density. Doing this here instead of the clipmap to smoothly interpolate between volumes
// float maxheight = 1;
// float baseheight = 0;
//////////
/// Fog Density
//////////
//float heightDensity = lerp(_GlobalExtinction, REAL_EPS,saturate( (WS_coordinate.y / maxheight - baseheight) ));
//float NewValue = (((WS_coordinate.y) * (maxheight - baseheight)) / (1 - 0)) + baseheight;
float HeightRemap = (WS_coordinate.y - _FogBaseHeight) / (_FogMaxHeight - _FogBaseHeight);
float heightDensity = lerp(_GlobalExtinction, .001,sqrt(saturate( HeightRemap)) );
//accumLighting.a += _GlobalExtinction;
// accumLighting.a += RealtimeMediaSpheres(WS_coordinate.xyz); //Simple density spheres
accumLighting.a += heightDensity;
//accumLighting.a += distance(WS_coordinate.xyz,0) > 3 ? 0:10 ;
accumLighting.rgb *= accumLighting.a;
// accumLighting.rgb += max((float3(rndVal, frac(rndVal+.2), frac(rndVal+.5)) - 0.5) * 0.03,0); //dithering
//Result[id.xyz] = lerp(accumLighting , previousResult, reprojectionAmount); // Temporal sampling ComputeHistoryWeight()
//Doing a simple curve based on depth so nearer slices take longer to converge. This is to get rid of the near flickering. There is likely something else to fix and this is just a bandaid solution.
Result[id.xyz] = lerp(accumLighting , previousResult, ComputeHistoryWeight((1 / (id.z * id.z + .4) + 1)) ); // Temporal sampling
}
///Initially did just matrix calculations. Worked fine, but the light scattering was less accurate on the edges. Implemented HDRP's method of casting rays

View file

@ -0,0 +1,8 @@
fileFormatVersion: 2
guid: 62ecf5c506e2b9d4db18e8e96a92107f
ComputeShaderImporter:
externalObjects: {}
currentAPIMask: 4
userData:
assetBundleName:
assetBundleVariant: