本文整理汇总了C#中RenderContext.ThrowIfGBuffer0Missing方法的典型用法代码示例。如果您正苦于以下问题:C# RenderContext.ThrowIfGBuffer0Missing方法的具体用法?C# RenderContext.ThrowIfGBuffer0Missing怎么用?C# RenderContext.ThrowIfGBuffer0Missing使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类RenderContext
的用法示例。
在下文中一共展示了RenderContext.ThrowIfGBuffer0Missing方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: OnProcess
protected override void OnProcess(RenderContext context)
{
context.ThrowIfCameraMissing();
context.ThrowIfGBuffer0Missing();
var graphicsDevice = GraphicsService.GraphicsDevice;
var renderTargetPool = GraphicsService.RenderTargetPool;
var source = context.SourceTexture;
var target = context.RenderTarget;
var viewport = context.Viewport;
// Get temporary render targets.
var sourceSize = new Vector2F(source.Width, source.Height);
var isFloatingPointFormat = TextureHelper.IsFloatingPointFormat(source.Format);
var sceneFormat = new RenderTargetFormat(source.Width, source.Height, false, source.Format, DepthFormat.None);
var maskedScene = renderTargetPool.Obtain2D(sceneFormat);
var rayFormat = new RenderTargetFormat(
Math.Max(1, (int)(sourceSize.X / DownsampleFactor)),
Math.Max(1, (int)(sourceSize.Y / DownsampleFactor)),
false,
source.Format,
DepthFormat.None);
var rayImage0 = renderTargetPool.Obtain2D(rayFormat);
var rayImage1 = renderTargetPool.Obtain2D(rayFormat);
// Get view and view-projection transforms.
var cameraNode = context.CameraNode;
Matrix44F projection = cameraNode.Camera.Projection.ToMatrix44F();
Matrix44F view = cameraNode.View;
Matrix44F viewProjection = projection * view;
// We simply place the light source "far away" in opposite light ray direction.
Vector4F lightPositionWorld = new Vector4F(-LightDirection * 10000, 1);
// Convert to clip space.
Vector4F lightPositionProj = viewProjection * lightPositionWorld;
Vector3F lightPositionClip = Vector4F.HomogeneousDivide(lightPositionProj);
// Convert from clip space [-1, 1] to texture space [0, 1].
Vector2 lightPosition = new Vector2(lightPositionClip.X * 0.5f + 0.5f, -lightPositionClip.Y * 0.5f + 0.5f);
// We use dot²(forward, -LightDirection) as a smooth S-shaped attenuation
// curve to reduce the god ray effect when we look orthogonal or away from the sun.
var lightDirectionView = view.TransformDirection(LightDirection);
float z = Math.Max(0, lightDirectionView.Z);
float attenuation = z * z;
// Common effect parameters.
_parameters0Parameter.SetValue(new Vector4(lightPosition.X, lightPosition.Y, LightRadius * LightRadius, Scale));
_parameters1Parameter.SetValue(new Vector2(Softness, graphicsDevice.Viewport.AspectRatio));
_intensityParameter.SetValue((Vector3)Intensity * attenuation);
_numberOfSamplesParameter.SetValue(NumberOfSamples);
_gBuffer0Parameter.SetValue(context.GBuffer0);
// First, create a scene image where occluders are black.
graphicsDevice.SetRenderTarget(maskedScene);
_viewportSizeParameter.SetValue(new Vector2(graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height));
_sourceTextureParameter.SetValue(source);
graphicsDevice.SamplerStates[0] = isFloatingPointFormat ? SamplerState.PointClamp : SamplerState.LinearClamp;
graphicsDevice.SamplerStates[1] = SamplerState.PointClamp; // G-Buffer 0.
_createMaskPass.Apply();
graphicsDevice.DrawFullScreenQuad();
// Downsample image.
context.SourceTexture = maskedScene;
context.RenderTarget = rayImage0;
context.Viewport = new Viewport(0, 0, rayImage0.Width, rayImage0.Height);
_downsampleFilter.Process(context);
// Compute light shafts.
_viewportSizeParameter.SetValue(new Vector2(context.Viewport.Width, context.Viewport.Height));
graphicsDevice.SamplerStates[0] = isFloatingPointFormat ? SamplerState.PointClamp : SamplerState.LinearClamp;
for (int i = 0; i < NumberOfPasses; i++)
{
graphicsDevice.SetRenderTarget(rayImage1);
_sourceTextureParameter.SetValue(rayImage0);
_blurPass.Apply();
graphicsDevice.DrawFullScreenQuad();
// Put the current result in variable rayImage0.
MathHelper.Swap(ref rayImage0, ref rayImage1);
}
// Combine light shaft image with scene.
graphicsDevice.SetRenderTarget(target);
graphicsDevice.Viewport = viewport;
_viewportSizeParameter.SetValue(new Vector2(graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height));
_sourceTextureParameter.SetValue(source);
_rayTextureParameter.SetValue(rayImage0);
graphicsDevice.SamplerStates[0] = isFloatingPointFormat ? SamplerState.PointClamp : SamplerState.LinearClamp;
graphicsDevice.SamplerStates[1] = isFloatingPointFormat ? SamplerState.PointClamp : SamplerState.LinearClamp;
_combinePass.Apply();
graphicsDevice.DrawFullScreenQuad();
// Clean-up
_sourceTextureParameter.SetValue((Texture2D)null);
_gBuffer0Parameter.SetValue((Texture2D)null);
//.........这里部分代码省略.........
示例2: Render
private void Render(RenderContext context, Vector4F color, Texture2D colorTexture, bool preserveColor)
{
if (context == null)
throw new ArgumentNullException("context");
context.Validate(_effect);
context.ThrowIfCameraMissing();
context.ThrowIfGBuffer0Missing();
var graphicsDevice = _effect.GraphicsDevice;
var savedRenderState = new RenderStateSnapshot(graphicsDevice);
graphicsDevice.DepthStencilState = GraphicsHelper.DepthStencilStateAlways;
graphicsDevice.RasterizerState = RasterizerState.CullNone;
if (preserveColor)
graphicsDevice.BlendState = GraphicsHelper.BlendStateNoColorWrite;
else
graphicsDevice.BlendState = BlendState.Opaque;
if (colorTexture != null)
{
if (TextureHelper.IsFloatingPointFormat(colorTexture.Format))
graphicsDevice.SamplerStates[1] = SamplerState.PointClamp;
else
graphicsDevice.SamplerStates[1] = SamplerState.LinearClamp;
}
var projection = context.CameraNode.Camera.Projection;
bool isPerspective = projection is PerspectiveProjection;
float near = projection.Near * NearBias;
float far = projection.Far * FarBias;
var biasedProjection = isPerspective
? Matrix44F.CreatePerspectiveOffCenter(
projection.Left, projection.Right,
projection.Bottom, projection.Top,
near, far)
: Matrix44F.CreateOrthographicOffCenter(
projection.Left, projection.Right,
projection.Bottom, projection.Top,
near, far);
var viewport = graphicsDevice.Viewport;
_parameterViewportSize.SetValue(new Vector2(viewport.Width, viewport.Height));
_parameterProjection.SetValue((Matrix)biasedProjection);
_parameterCameraFar.SetValue(projection.Far);
_parameterGBuffer0.SetValue(context.GBuffer0);
_parameterColor.SetValue((Vector4)color);
_parameterSourceTexture.SetValue(colorTexture);
_effect.CurrentTechnique = isPerspective ? _techniquePerspective : _techniqueOrthographic;
_effect.CurrentTechnique.Passes[(colorTexture == null) ? 0 : 1].Apply();
graphicsDevice.DrawFullScreenQuad();
graphicsDevice.ResetTextures();
savedRenderState.Restore();
}
示例3: OnProcess
protected override void OnProcess(RenderContext context)
{
context.ThrowIfCameraMissing();
var graphicsDevice = GraphicsService.GraphicsDevice;
var renderTargetPool = GraphicsService.RenderTargetPool;
var cameraNode = context.CameraNode;
var source = context.SourceTexture;
var target = context.RenderTarget;
var viewport = context.Viewport;
if (Quality == 0)
{
// No ambient occlusion.
if (!CombineWithSource)
{
// CombineWithSource is not set. --> Simply clear the render target to white.
graphicsDevice.SetRenderTarget(target);
graphicsDevice.Viewport = viewport;
graphicsDevice.Clear(Color.White);
}
else
{
// Copy source image to target.
_copyFilter.Process(context);
}
return;
}
// Try to get downsampled depth buffer from render context.
// If we cannot find it in the render context, we downsample it manually.
Texture2D downsampledDepthTexture = null;
RenderTarget2D downsampledDepthTarget = null;
if (DownsampleFactor == 2)
{
object dummy;
if (context.Data.TryGetValue(RenderContextKeys.DepthBufferHalf, out dummy))
downsampledDepthTexture = dummy as Texture2D;
}
if (downsampledDepthTexture == null)
{
context.ThrowIfGBuffer0Missing();
if (DownsampleFactor == 1)
{
downsampledDepthTexture = context.GBuffer0;
}
else
{
// Downsample manually.
// If we do not downsample the depth target, we get artifacts (strange horizontal and vertical
// lines). TODO: Check what causes the artifacts and try to remove the downsampling.
downsampledDepthTarget = renderTargetPool.Obtain2D(new RenderTargetFormat(
context.GBuffer0.Width / DownsampleFactor,
context.GBuffer0.Height / DownsampleFactor,
false,
context.GBuffer0.Format,
DepthFormat.None));
context.SourceTexture = context.GBuffer0;
context.RenderTarget = downsampledDepthTarget;
context.Viewport = new Viewport(0, 0, downsampledDepthTarget.Width, downsampledDepthTarget.Height);
_downsampleFilter.Process(context);
downsampledDepthTexture = downsampledDepthTarget;
}
}
// We use two temporary render targets.
// We do not use a floating point format because float textures cannot use hardware filtering.
RenderTarget2D temp0;
if (!CombineWithSource && target != null
&& target.Width == context.GBuffer0.Width / DownsampleFactor
&& target.Height == context.GBuffer0.Height / DownsampleFactor
&& Strength < 1)
{
// If we do not have to combine the AO result with the source image, and if the target
// image has the half resolution, then we can use the target image directly and do not
// need a temporary render target.
// Also, a Strength > 1 is always applied in a separate pass because applying a Strength
// > 1 before the blur has no effect.
temp0 = target;
}
else
{
temp0 = renderTargetPool.Obtain2D(new RenderTargetFormat(
context.GBuffer0.Width / DownsampleFactor,
context.GBuffer0.Height / DownsampleFactor,
false,
SurfaceFormat.Color,
DepthFormat.None));
}
// Create SSAO.
graphicsDevice.SetRenderTarget(temp0);
_viewportSizeParameter.SetValue(new Vector2(graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height));
_farParameter.SetValue(cameraNode.Camera.Projection.Far);
_radiusParameter.SetValue((Vector2)Radii);
//.........这里部分代码省略.........
示例4: OnProcess
protected override void OnProcess(RenderContext context)
{
context.ThrowIfCameraMissing();
var graphicsDevice = GraphicsService.GraphicsDevice;
var renderTargetPool = GraphicsService.RenderTargetPool;
var cameraNode = context.CameraNode;
var source = context.SourceTexture;
var target = context.RenderTarget;
var viewport = context.Viewport;
Projection projection = cameraNode.Camera.Projection;
Matrix44F projMatrix = projection;
float near = projection.Near;
float far = projection.Far;
_frustumInfoParameter.SetValue(new Vector4(
projection.Left / near,
projection.Top / near,
(projection.Right - projection.Left) / near,
(projection.Bottom - projection.Top) / near));
_numberOfAOSamplesParameter.SetValue(NumberOfSamples);
// The height of a 1 unit object 1 unit in front of the camera.
// (Compute 0.5 unit multiply by 2 and divide by 2 to convert from [-1, 1] to [0, 1] range.)
float projectionScale =
projMatrix.TransformPosition(new Vector3F(0, 0.5f, -1)).Y
- projMatrix.TransformPosition(new Vector3F(0, 0, -1)).Y;
_aoParameters0.SetValue(new Vector4(
projectionScale,
Radius,
Strength / (float)Math.Pow(Radius, 6),
Bias));
_aoParameters1.SetValue(new Vector4(
viewport.Width,
viewport.Height,
far,
MaxOcclusion));
_aoParameters2.SetValue(new Vector4(
SampleDistribution,
1.0f / (EdgeSoftness + 0.001f) * far,
BlurScale,
MinBias));
context.ThrowIfGBuffer0Missing();
_gBuffer0Parameter.SetValue(context.GBuffer0);
//var view = cameraNode.View;
//_viewParameter.SetValue((Matrix)view);
//_gBuffer1Parameter.SetValue(context.GBuffer1);
// We use two temporary render targets.
var format = new RenderTargetFormat(
context.Viewport.Width,
context.Viewport.Height,
false,
SurfaceFormat.Color,
DepthFormat.None);
var tempTarget0 = renderTargetPool.Obtain2D(format);
var tempTarget1 = renderTargetPool.Obtain2D(format);
// Create SSAO.
graphicsDevice.SetRenderTarget(tempTarget0);
_createAOPass.Apply();
graphicsDevice.Clear(new Color(1.0f, 1.0f, 1.0f, 1.0f));
graphicsDevice.DrawFullScreenQuad();
// Horizontal blur.
graphicsDevice.SetRenderTarget(tempTarget1);
_occlusionTextureParameter.SetValue(tempTarget0);
_blurHorizontalPass.Apply();
graphicsDevice.DrawFullScreenQuad();
// Vertical blur
graphicsDevice.SetRenderTarget(target);
graphicsDevice.Viewport = viewport;
_occlusionTextureParameter.SetValue(tempTarget1);
if (!CombineWithSource)
{
_blurVerticalPass.Apply();
}
else
{
if (_sourceTextureParameter != null)
_sourceTextureParameter.SetValue(source);
if (TextureHelper.IsFloatingPointFormat(source.Format))
graphicsDevice.SamplerStates[0] = SamplerState.PointClamp;
else
graphicsDevice.SamplerStates[0] = SamplerState.LinearClamp;
_blurVerticalAndCombinePass.Apply();
}
//.........这里部分代码省略.........
示例5: OnProcess
protected override void OnProcess(RenderContext context)
{
context.ThrowIfCameraMissing();
context.ThrowIfGBuffer0Missing();
var graphicsDevice = GraphicsService.GraphicsDevice;
var cameraNode = context.CameraNode;
var camera = cameraNode.Camera;
var projection = camera.Projection;
// Get required matrices.
Matrix44F view = cameraNode.View;
Matrix44F viewInverse = view.Inverse;
Pose lastPoseWorld = cameraNode.LastPoseWorld ?? cameraNode.PoseWorld;
Matrix44F lastView = lastPoseWorld.Inverse;
Matrix44F lastProjection = camera.LastProjection ?? projection;
Matrix44F lastViewProjection = lastProjection * lastView;
if (TextureHelper.IsFloatingPointFormat(context.SourceTexture.Format))
graphicsDevice.SamplerStates[0] = SamplerState.PointClamp;
else
graphicsDevice.SamplerStates[0] = SamplerState.LinearClamp;
graphicsDevice.SetRenderTarget(context.RenderTarget);
graphicsDevice.Viewport = context.Viewport;
_viewportSizeParameter.SetValue(new Vector2(graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height));
GraphicsHelper.GetFrustumFarCorners(projection, _cameraFrustumFarCorners);
_parameterFrustumCorners.SetValue(_cameraFrustumFarCorners);
_sourceTextureParameter.SetValue(context.SourceTexture);
_gBuffer0Parameter.SetValue(context.GBuffer0);
_viewInverseParameter.SetValue((Matrix)viewInverse);
_viewProjOldParameter.SetValue((Matrix)lastViewProjection);
_numberOfSamplesParameter.SetValue((int)NumberOfSamples);
_strengthParameter.SetValue(Strength);
_effect.CurrentTechnique.Passes[0].Apply();
graphicsDevice.DrawFullScreenQuad();
_sourceTextureParameter.SetValue((Texture2D)null);
_gBuffer0Parameter.SetValue((Texture2D)null);
}
示例6: OnProcess
protected override void OnProcess(RenderContext context)
{
var graphicsDevice = GraphicsService.GraphicsDevice;
// Set the render target - but only if no kind of alpha blending is currently set.
// If alpha-blending is set, then we have to assume that the render target is already
// set - everything else does not make sense.
if (graphicsDevice.BlendState.ColorDestinationBlend == Blend.Zero
&& graphicsDevice.BlendState.AlphaDestinationBlend == Blend.Zero)
{
graphicsDevice.SetRenderTarget(context.RenderTarget);
graphicsDevice.Viewport = context.Viewport;
}
Projection projection = null;
if (RebuildZBuffer || Mode == UpsamplingMode.NearestDepth)
{
context.ThrowIfCameraMissing();
projection = context.CameraNode.Camera.Projection;
}
var sourceTexture = context.SourceTexture;
_parameterSourceTexture.SetValue(sourceTexture);
_parameterSourceSize.SetValue(new Vector2(sourceTexture.Width, sourceTexture.Height));
var viewport = context.Viewport;
_parameterTargetSize.SetValue(new Vector2(viewport.Width, viewport.Height));
if (Mode == UpsamplingMode.Bilateral)
_parameterDepthSensitivity.SetValue(DepthSensitivity);
else if (Mode == UpsamplingMode.NearestDepth)
_parameterDepthThreshold.SetValue(DepthThreshold / projection.Far);
int techniqueIndex = (int)Mode;
int passIndex = 0;
if (context.SceneTexture != null)
{
_parameterSceneTexture.SetValue(context.SceneTexture);
passIndex |= 1;
}
if (RebuildZBuffer)
{
passIndex |= 2;
float nearBias = 1;
float farBias = 0.995f;
object obj;
context.Data.TryGetValue(RenderContextKeys.RebuildZBufferRenderer, out obj);
var rebuildZBufferRenderer = obj as RebuildZBufferRenderer;
if (rebuildZBufferRenderer != null)
{
nearBias = rebuildZBufferRenderer.NearBias;
farBias = rebuildZBufferRenderer.FarBias;
}
// Compute biased projection for restoring the z-buffer.
var biasedProjection = Matrix44F.CreatePerspectiveOffCenter(
projection.Left,
projection.Right,
projection.Bottom,
projection.Top,
projection.Near * nearBias,
projection.Far * farBias);
_parameterProjection.SetValue((Matrix)biasedProjection);
_parameterCameraFar.SetValue(projection.Far);
// PostProcessor.ProcessInternal sets the DepthStencilState to None.
// --> Enable depth writes.
graphicsDevice.DepthStencilState = GraphicsHelper.DepthStencilStateAlways;
}
if (RebuildZBuffer || Mode >= UpsamplingMode.Bilateral)
{
context.ThrowIfGBuffer0Missing();
_parameterDepthBuffer.SetValue(context.GBuffer0);
}
if (Mode >= UpsamplingMode.Bilateral)
{
// Render at half resolution into off-screen buffer.
object dummy;
context.Data.TryGetValue(RenderContextKeys.DepthBufferHalf, out dummy);
var depthBufferHalf = dummy as Texture2D;
if (depthBufferHalf == null)
{
string message = "Downsampled depth buffer is not set in render context. (The downsampled "
+ "depth buffer (half width and height) is required by the UpsampleFilter."
+ "It needs to be stored in RenderContext.Data[RenderContextKeys.DepthBufferHalf].)";
throw new GraphicsException(message);
}
_parameterDepthBufferLow.SetValue(depthBufferHalf);
}
_effect.CurrentTechnique = _effect.Techniques[techniqueIndex];
_effect.CurrentTechnique.Passes[passIndex].Apply();
graphicsDevice.DrawFullScreenQuad();
//.........这里部分代码省略.........
示例7: OnProcess
protected override void OnProcess(RenderContext context)
{
var graphicsDevice = GraphicsService.GraphicsDevice;
var renderTargetPool = GraphicsService.RenderTargetPool;
var viewport = context.Viewport;
Vector2 size = new Vector2(viewport.Width, viewport.Height);
// Choose suitable technique.
// We do not have shader for each sample count.
int numberOfSamples = NumberOfSamples;
SetCurrentTechnique(ref numberOfSamples);
// Apply current scale and texture size to offsets.
for (int i = 0; i < NumberOfSamples; i++)
{
_horizontalOffsets[i].X = Offsets[i].X * Scale / size.X;
_horizontalOffsets[i].Y = Offsets[i].Y * Scale / size.Y;
}
// Make sure the other samples are 0 (e.g. if we want 11 samples but the
// next best shader supports only 15 samples).
for (int i = NumberOfSamples; i < numberOfSamples; i++)
{
_horizontalOffsets[i].X = 0;
_horizontalOffsets[i].Y = 0;
Weights[i] = 0;
}
// If we have a separable filter, we initialize _verticalOffsets too.
if (IsSeparable)
{
if (_verticalOffsets == null)
_verticalOffsets = new Vector2[MaxNumberOfSamples];
float aspectRatio = size.X / size.Y;
for (int i = 0; i < NumberOfSamples; i++)
{
_verticalOffsets[i].X = _horizontalOffsets[i].Y * aspectRatio;
_verticalOffsets[i].Y = _horizontalOffsets[i].X * aspectRatio;
}
for (int i = NumberOfSamples; i < numberOfSamples; i++)
{
_verticalOffsets[i].X = 0;
_verticalOffsets[i].Y = 0;
}
}
// Use hardware filtering if possible.
if (TextureHelper.IsFloatingPointFormat(context.SourceTexture.Format))
graphicsDevice.SamplerStates[0] = SamplerState.PointClamp;
else
graphicsDevice.SamplerStates[0] = SamplerState.LinearClamp;
bool isAnisotropic = IsAnisotropic;
bool isBilateral = IsBilateral;
if (FilterInLogSpace)
{
// Anisotropic and bilateral filtering in log-space is not implemented.
isAnisotropic = false;
isBilateral = false;
}
else
{
if (isAnisotropic || isBilateral)
{
context.ThrowIfCameraMissing();
var cameraNode = context.CameraNode;
var projection = cameraNode.Camera.Projection;
float far = projection.Far;
GraphicsHelper.GetFrustumFarCorners(cameraNode.Camera.Projection, _frustumFarCorners);
_parameterFrustumCorners.SetValue(_frustumFarCorners);
_parameterBlurParameters0.SetValue(new Vector4(
far,
viewport.AspectRatio,
1.0f / (EdgeSoftness + 0.001f) * far,
DepthScaling));
context.ThrowIfGBuffer0Missing();
Texture2D depthBuffer = context.GBuffer0;
if (viewport.Width < depthBuffer.Width && viewport.Height < depthBuffer.Height)
{
// Use half-resolution depth buffer.
object obj;
if (context.Data.TryGetValue(RenderContextKeys.DepthBufferHalf, out obj))
{
var depthBufferHalf = obj as Texture2D;
if (depthBufferHalf != null)
depthBuffer = depthBufferHalf;
}
}
_parameterGBuffer0.SetValue(depthBuffer);
}
}
_parameterViewportSize.SetValue(size);
//.........这里部分代码省略.........
示例8: OnProcess
protected override void OnProcess(RenderContext context)
{
context.ThrowIfCameraMissing();
context.ThrowIfGBuffer0Missing();
var graphicsDevice = GraphicsService.GraphicsDevice;
var cameraNode = context.CameraNode;
var renderTargetPool = GraphicsService.RenderTargetPool;
var source = context.SourceTexture;
var target = context.RenderTarget;
var viewport = context.Viewport;
var sourceSize = new Vector2F(source.Width, source.Height);
int width = (int)sourceSize.X;
int height = (int)sourceSize.Y;
int downsampledWidth = Math.Max(1, width / DownsampleFactor);
int downsampledHeight = Math.Max(1, height / DownsampleFactor);
if (TextureHelper.IsFloatingPointFormat(source.Format))
{
graphicsDevice.SamplerStates[0] = SamplerState.PointClamp;
InitializeGaussianBlur(new Vector2F(downsampledWidth, downsampledHeight), false);
}
else
{
graphicsDevice.SamplerStates[0] = SamplerState.LinearClamp;
InitializeGaussianBlur(new Vector2F(downsampledWidth, downsampledHeight), true);
}
// Get temporary render targets.
var downsampleFormat = new RenderTargetFormat(downsampledWidth, downsampledHeight, false, source.Format, DepthFormat.None);
RenderTarget2D blurredScene0 = renderTargetPool.Obtain2D(downsampleFormat);
RenderTarget2D blurredScene1 = renderTargetPool.Obtain2D(downsampleFormat);
var blurredDepthFormat = new RenderTargetFormat(downsampledWidth, downsampledHeight, false, context.GBuffer0.Format, DepthFormat.None);
RenderTarget2D blurredDepth0 = renderTargetPool.Obtain2D(blurredDepthFormat);
var cocFormat = new RenderTargetFormat(width, height, false, SurfaceFormat.Single, DepthFormat.None);
RenderTarget2D cocImage = renderTargetPool.Obtain2D(cocFormat);
var downSampledCocFormat = new RenderTargetFormat(downsampledWidth, downsampledHeight, false, cocFormat.SurfaceFormat, DepthFormat.None);
RenderTarget2D cocImageBlurred = renderTargetPool.Obtain2D(downSampledCocFormat);
// ----- Create CoC map.
_effect.CurrentTechnique = _effect.Techniques[0];
graphicsDevice.SetRenderTarget(cocImage);
_screenSizeParameter.SetValue(new Vector2(cocImage.Width, cocImage.Height));
_depthTextureParameter.SetValue(context.GBuffer0);
_nearBlurDistanceParameter.SetValue(NearBlurDistance);
_nearFocusDistanceParameter.SetValue(NearFocusDistance);
_farFocusDistanceParameter.SetValue(FarFocusDistance);
_farBlurDistanceParameter.SetValue(FarBlurDistance);
_farParameter.SetValue(cameraNode.Camera.Projection.Far);
_circleOfConfusionPass.Apply();
graphicsDevice.DrawFullScreenQuad();
// ----- Downsample cocImage to cocImageBlurred.
context.SourceTexture = cocImage;
context.RenderTarget = cocImageBlurred;
context.Viewport = new Viewport(0, 0, cocImageBlurred.Width, cocImageBlurred.Height);
_downsampleFilter.Process(context);
renderTargetPool.Recycle(cocImage);
// ----- Downsample source to blurredScene0.
context.SourceTexture = source;
context.RenderTarget = blurredScene0;
context.Viewport = new Viewport(0, 0, blurredScene0.Width, blurredScene0.Height);
_downsampleFilter.Process(context);
// ----- Downsample depth texture to blurredDepth0.
context.SourceTexture = context.GBuffer0;
context.RenderTarget = blurredDepth0;
context.Viewport = new Viewport(0, 0, blurredDepth0.Width, blurredDepth0.Height);
_downsampleFilter.Process(context);
// ----- Blur scene.
// Horizontal blur
graphicsDevice.SetRenderTarget(blurredScene1);
_screenSizeParameter.SetValue(new Vector2(blurredScene0.Width, blurredScene0.Height));
_blurTextureParameter.SetValue(blurredScene0);
_downsampledDepthTextureParameter.SetValue(blurredDepth0);
_downsampledCocTextureParameter.SetValue(cocImageBlurred);
_offsetsParameter.SetValue(_horizontalOffsets);
_weightsParameter.SetValue(_weights);
_blurPass.Apply();
graphicsDevice.DrawFullScreenQuad();
// Vertical blur.
graphicsDevice.SetRenderTarget(blurredScene0);
_blurTextureParameter.SetValue(blurredScene1);
_offsetsParameter.SetValue(_verticalOffsets);
_blurPass.Apply();
graphicsDevice.DrawFullScreenQuad();
renderTargetPool.Recycle(blurredScene1);
// ----- Blur cocImageBlurred.
context.SourceTexture = cocImageBlurred;
//.........这里部分代码省略.........
示例9: OnProcess
protected override void OnProcess(RenderContext context)
{
context.ThrowIfCameraMissing();
context.ThrowIfGBuffer0Missing();
context.ThrowIfGBuffer1Missing();
var graphicsDevice = GraphicsService.GraphicsDevice;
var source = context.SourceTexture;
var target = context.RenderTarget;
var viewport = context.Viewport;
if (TextureHelper.IsFloatingPointFormat(source.Format))
graphicsDevice.SamplerStates[0] = SamplerState.PointClamp;
else
graphicsDevice.SamplerStates[0] = SamplerState.LinearClamp;
graphicsDevice.SetRenderTarget(target);
graphicsDevice.Viewport = viewport;
_parameterViewportSize.SetValue(new Vector2(viewport.Width, viewport.Height));
_parameterHalfEdgeWidth.SetValue(_halfEdgeWidth);
_parameterDepthThreshold.SetValue(DepthThreshold);
_parameterDepthSensitivity.SetValue(DepthSensitivity);
_parameterNormalThreshold.SetValue(NormalThreshold);
_parameterNormalSensitivity.SetValue(NormalSensitivity);
_parameterCameraBackward.SetValue((Vector3)(context.CameraNode.ViewInverse.GetColumn(2).XYZ));
_parameterSilhouetteColor.SetValue((Vector4)SilhouetteColor);
_parameterCreaseColor.SetValue((Vector4)CreaseColor);
if (_parameterSourceTexture != null)
_parameterSourceTexture.SetValue(source);
if (_parameterGBuffer0 != null)
_parameterGBuffer0.SetValue(context.GBuffer0);
if (_parameterGBuffer1 != null)
_parameterGBuffer1.SetValue(context.GBuffer1);
var pass = Numeric.IsLessOrEqual(_halfEdgeWidth, 0.5f) ? _passOnePixelEdge : _passEdge;
pass.Apply();
graphicsDevice.DrawFullScreenQuad();
if (_parameterSourceTexture != null)
_parameterSourceTexture.SetValue((Texture2D)null);
if (_parameterGBuffer0 != null)
_parameterGBuffer0.SetValue((Texture2D)null);
if (_parameterGBuffer1 != null)
_parameterGBuffer1.SetValue((Texture2D)null);
context.SourceTexture = source;
context.RenderTarget = target;
context.Viewport = viewport;
}
示例10: OnProcess
protected override void OnProcess(RenderContext context)
{
var graphicsDevice = GraphicsService.GraphicsDevice;
var renderTargetPool = GraphicsService.RenderTargetPool;
// Get velocity buffers from render context.
object value0, value1;
context.Data.TryGetValue(RenderContextKeys.VelocityBuffer, out value0);
context.Data.TryGetValue(RenderContextKeys.LastVelocityBuffer, out value1);
var velocityBuffer0 = value0 as Texture2D;
var velocityBuffer1 = value1 as Texture2D;
if (velocityBuffer0 == null)
throw new GraphicsException("VelocityBuffer needs to be set in the render context (RenderContext.Data[\"VelocityBuffer\"]).");
if (TextureHelper.IsFloatingPointFormat(context.SourceTexture.Format))
graphicsDevice.SamplerStates[0] = SamplerState.PointClamp;
else
graphicsDevice.SamplerStates[0] = SamplerState.LinearClamp;
if (!SoftenEdges)
{
// ----- Motion blur using one or two velocity buffers
_effect.CurrentTechnique = _effect.Techniques[0];
graphicsDevice.SetRenderTarget(context.RenderTarget);
graphicsDevice.Viewport = context.Viewport;
_viewportSizeParameter.SetValue(new Vector2(graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height));
_sourceTextureParameter.SetValue(context.SourceTexture);
_numberOfSamplesParameter.SetValue((int)NumberOfSamples);
_velocityTextureParameter.SetValue(velocityBuffer0);
if (TextureHelper.IsFloatingPointFormat(velocityBuffer0.Format))
graphicsDevice.SamplerStates[1] = SamplerState.PointClamp;
else
graphicsDevice.SamplerStates[1] = SamplerState.LinearClamp;
if (velocityBuffer1 == null || !UseLastVelocityBuffer)
{
_singlePass.Apply();
}
else
{
_velocityTexture2Parameter.SetValue(velocityBuffer1);
if (TextureHelper.IsFloatingPointFormat(velocityBuffer1.Format))
graphicsDevice.SamplerStates[2] = SamplerState.PointClamp;
else
graphicsDevice.SamplerStates[2] = SamplerState.LinearClamp;
_dualPass.Apply();
}
graphicsDevice.DrawFullScreenQuad();
}
else
{
// ----- Advanced motion blur (based on paper "A Reconstruction Filter for Plausible Motion Blur")
context.ThrowIfCameraMissing();
context.ThrowIfGBuffer0Missing();
// The width/height of the current velocity input.
int sourceWidth;
int sourceHeight;
if (context.RenderTarget != null)
{
sourceWidth = velocityBuffer0.Width;
sourceHeight = velocityBuffer0.Height;
}
else
{
sourceWidth = context.Viewport.Width;
sourceHeight = context.Viewport.Height;
}
// The downsampled target width/height.
int targetWidth = Math.Max(1, (int)(sourceWidth / MaxBlurRadius));
int targetHeight = Math.Max(1, (int)(sourceHeight / MaxBlurRadius));
var tempFormat = new RenderTargetFormat(targetWidth, targetHeight, false, SurfaceFormat.Color, DepthFormat.None);
RenderTarget2D temp0 = renderTargetPool.Obtain2D(tempFormat);
RenderTarget2D temp1 = renderTargetPool.Obtain2D(tempFormat);
// ----- Downsample max velocity buffer
_effect.CurrentTechnique = _effect.Techniques[0];
_maxBlurRadiusParameter.SetValue(new Vector2(MaxBlurRadius / sourceWidth, MaxBlurRadius / sourceHeight));
Texture2D currentVelocityBuffer = velocityBuffer0;
bool isFinalPass;
do
{
// Downsample to this target size.
sourceWidth = Math.Max(targetWidth, sourceWidth / 2);
sourceHeight = Math.Max(targetHeight, sourceHeight / 2);
// Is this the final downsample pass?
isFinalPass = (sourceWidth <= targetWidth && sourceHeight <= targetHeight);
// Get temporary render target for intermediate steps.
RenderTarget2D temp = null;
if (!isFinalPass)
{
tempFormat.Width = sourceWidth;
tempFormat.Height = sourceHeight;
//.........这里部分代码省略.........
示例11: Render
public override void Render(IList<SceneNode> nodes, RenderContext context, RenderOrder order)
{
ThrowIfDisposed();
if (nodes == null)
throw new ArgumentNullException("nodes");
if (context == null)
throw new ArgumentNullException("context");
context.Validate(_effect);
context.ThrowIfCameraMissing();
context.ThrowIfGBuffer0Missing();
// Fog is not used in all games. --> Early out, if possible.
int numberOfNodes = nodes.Count;
if (nodes.Count == 0)
return;
if (nodes.Count > 1)
{
// Get a sorted list of all fog nodes.
if (_fogNodes == null)
_fogNodes = new List<SceneNode>();
_fogNodes.Clear();
for (int i = 0; i < numberOfNodes; i++)
{
var node = nodes[i] as FogNode;
if (node != null)
{
_fogNodes.Add(node);
node.SortTag = node.Priority;
}
}
// Sort ascending. (Fog with lower priority is rendered first.)
// Note: Since this list is a list of SceneNodes, we use the AscendingNodeComparer
// instead of the AscendingFogNodeComparer. The Priority was written to the SortTag,
// so this will work.
_fogNodes.Sort(AscendingNodeComparer.Instance);
nodes = _fogNodes;
numberOfNodes = _fogNodes.Count;
}
var graphicsDevice = _effect.GraphicsDevice;
var savedRenderState = new RenderStateSnapshot(graphicsDevice);
graphicsDevice.RasterizerState = RasterizerState.CullNone;
graphicsDevice.DepthStencilState = DepthStencilState.None;
graphicsDevice.BlendState = BlendState.AlphaBlend;
var viewport = graphicsDevice.Viewport;
_parameterViewportSize.SetValue(new Vector2(viewport.Width, viewport.Height));
var cameraNode = context.CameraNode;
var cameraPose = cameraNode.PoseWorld;
GraphicsHelper.GetFrustumFarCorners(cameraNode.Camera.Projection, _cameraFrustumFarCorners);
// Convert frustum far corners from view space to world space.
for (int i = 0; i < _cameraFrustumFarCorners.Length; i++)
_cameraFrustumFarCorners[i] = (Vector3)cameraPose.ToWorldDirection((Vector3F)_cameraFrustumFarCorners[i]);
_parameterFrustumCorners.SetValue(_cameraFrustumFarCorners);
_parameterGBuffer0.SetValue(context.GBuffer0);
// Update SceneNode.LastFrame for all visible nodes.
int frame = context.Frame;
cameraNode.LastFrame = frame;
bool directionalLightIsSet = false;
float scatteringSymmetryStrength = 1;
for (int i = 0; i < numberOfNodes; i++)
{
var node = nodes[i] as FogNode;
if (node == null)
continue;
// FogNode is visible in current frame.
node.LastFrame = frame;
var fog = node.Fog;
if (fog.Density <= Numeric.EpsilonF)
continue;
// Compute actual density and falloff.
float fogDensity = fog.Density;
float heightFalloff = fog.HeightFalloff;
// In previous versions, we gave FogDensity * 2^(-h*y) to the effect. Following code
// avoids numerical problems where this value is numerically 0. This is now handled
// in the shader.
//if (!Numeric.IsZero(heightFalloff))
//{
// float cameraDensity = (float)Math.Pow(2, -heightFalloff * cameraPose.Position.Y);
// // Trick: If the heightFalloff is very large, the e^x function can quickly reach
// // the float limit! If this happens, the shader will not compute any fog and this
// // looks like the fog disappears. To avoid this problem we reduce the heightFalloff
// // to keep the result of e^x always within floating point range.
// const float Limit = 1e-37f;
// if (cameraDensity < Limit)
//.........这里部分代码省略.........