当前位置: 首页>>代码示例>>C#>>正文


C# RenderContext.ThrowIfCameraMissing方法代码示例

本文整理汇总了C#中RenderContext.ThrowIfCameraMissing方法的典型用法代码示例。如果您正苦于以下问题:C# RenderContext.ThrowIfCameraMissing方法的具体用法?C# RenderContext.ThrowIfCameraMissing怎么用?C# RenderContext.ThrowIfCameraMissing使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在RenderContext的用法示例。


在下文中一共展示了RenderContext.ThrowIfCameraMissing方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: Render

        /// <summary>
        /// Draws the lines.
        /// </summary>
        /// <param name="context">The render context.</param>
        /// <remarks>
        /// If <see cref="Effect"/> is <see langword="null"/>, then <see cref="Render"/> does nothing.
        /// </remarks>
        /// <exception cref="ArgumentNullException">
        /// <paramref name="context"/> is <see langword="null"/>.
        /// </exception>
        public void Render(RenderContext context)
        {
            if (context == null)
            throw new ArgumentNullException("context");

              if (Effect == null)
            return;

              if (_numberOfLines <= 0)
            return;

              context.Validate(Effect);
              context.ThrowIfCameraMissing();

              // Reset the texture stages. If a floating point texture is set, we get exceptions
              // when a sampler with bilinear filtering is set.
              var graphicsDevice = context.GraphicsService.GraphicsDevice;
              graphicsDevice.ResetTextures();

              // Effect parameters.
              Effect.Alpha = 1;
              Effect.DiffuseColor = Color.White.ToVector3();
              Effect.LightingEnabled = false;
              Effect.TextureEnabled = false;
              Effect.VertexColorEnabled = true;
              Effect.World = Matrix.Identity;
              Effect.View = (Matrix)context.CameraNode.View;
              Effect.Projection = context.CameraNode.Camera.Projection;
              Effect.CurrentTechnique.Passes[0].Apply();

              // Submit lines. The loop is only needed if we have more lines than can be
              // submitted with one draw call.
              var startLineIndex = 0;
              var maxPrimitivesPerCall = graphicsDevice.GetMaxPrimitivesPerCall();
              while (startLineIndex < _numberOfLines)
              {
            // Number of lines in this batch.
            int linesPerBatch = Math.Min(_numberOfLines - startLineIndex, maxPrimitivesPerCall);

            // Draw lines.
            graphicsDevice.DrawUserPrimitives(PrimitiveType.LineList, _buffer, startLineIndex * 2, linesPerBatch);

            startLineIndex += linesPerBatch;
              }
        }
开发者ID:Zolniu,项目名称:DigitalRune,代码行数:55,代码来源:LineBatch.cs

示例2: Render

        public override void Render(IList<SceneNode> nodes, RenderContext context, RenderOrder order)
        {
            if (nodes == null)
            throw new ArgumentNullException("nodes");
              if (context == null)
            throw new ArgumentNullException("context");

              int numberOfNodes = nodes.Count;
              if (numberOfNodes == 0)
            return;

              context.Validate(_effect);
              context.ThrowIfCameraMissing();

              var graphicsDevice = _effect.GraphicsDevice;
              var savedRenderState = new RenderStateSnapshot(graphicsDevice);
              graphicsDevice.DepthStencilState = DepthStencilState.None;
              graphicsDevice.RasterizerState = RasterizerState.CullNone;

              // Set camera properties.
              var cameraNode = context.CameraNode;
              var cameraPose = cameraNode.PoseWorld;
              Matrix viewInverse = cameraPose;
              _parameterViewInverse.SetValue(viewInverse);
              _parameterGBuffer0.SetValue(context.GBuffer0);

              Viewport viewport = context.Viewport;
              _parameterParameters0.SetValue(new Vector2(viewport.Width, viewport.Height));

              // Set jitter map.
              if (_jitterMap == null)
            _jitterMap = NoiseHelper.GetGrainTexture(context.GraphicsService, NoiseHelper.DefaultJitterMapWidth);

              _parameterJitterMap.SetValue(_jitterMap);

              float cameraFar = context.CameraNode.Camera.Projection.Far;

              for (int i = 0; i < numberOfNodes; i++)
              {
            var lightNode = nodes[i] as LightNode;
            if (lightNode == null)
              continue;

            var shadow = lightNode.Shadow as CascadedShadow;
            if (shadow == null)
              continue;

            if (shadow.ShadowMap == null || shadow.ShadowMask == null)
              continue;

            // The effect must only render in a specific channel.
            // Do not change blend state if the correct write channels is already set, e.g. if this
            // shadow is part of a CompositeShadow, the correct blend state is already set.
            if ((int)graphicsDevice.BlendState.ColorWriteChannels != (1 << shadow.ShadowMaskChannel))
              graphicsDevice.BlendState = GraphicsHelper.BlendStateWriteSingleChannel[shadow.ShadowMaskChannel];

            _parameterParameters1.SetValue(new Vector4(
              shadow.FadeOutRange,
              shadow.Distances[shadow.NumberOfCascades - 1],
              shadow.VisualizeCascades ? 1 : 0,
              shadow.ShadowFog));

            float filterRadius = shadow.FilterRadius;

            // If we use a subset of the Poisson kernel, we have to normalize the scale.
            int numberOfSamples = Math.Min(shadow.NumberOfSamples, StandardShadowMaskRenderer.PoissonKernel.Length);

            // Not all shader passes support cascade visualization. Use a similar pass instead.
            if (shadow.VisualizeCascades)
            {
              if (numberOfSamples < 0)
              {
            numberOfSamples = 4;
              }
              else if (numberOfSamples == 0)
              {
            numberOfSamples = 1;
            filterRadius = 0;
              }
            }

            // The best dithered CSM supports max 22 samples.
            if (shadow.CascadeSelection == ShadowCascadeSelection.BestDithered && numberOfSamples > 22)
              numberOfSamples = 22;

            if (numberOfSamples > 0)
              filterRadius /= StandardShadowMaskRenderer.PoissonKernel[numberOfSamples - 1].Length();

            _parameterParameters2.SetValue(new Vector4(
              shadow.ShadowMap.Width,
              shadow.ShadowMap.Height,
              filterRadius,
              // The StandardShadow.JitterResolution is the number of texels per world unit.
              // In the shader the parameter JitterResolution contains the division by the jitter map size.
              shadow.JitterResolution / _jitterMap.Width));

            // Split distances.
            if (_parameterDistances != null)
            {
              // Set not used entries to large values.
//.........这里部分代码省略.........
开发者ID:Zolniu,项目名称:DigitalRune,代码行数:101,代码来源:CascadedShadowMaskRenderer.cs

示例3: Render

        private void Render(RenderContext context, Vector4F color, Texture2D colorTexture, bool preserveColor)
        {
            if (context == null)
            throw new ArgumentNullException("context");

              context.Validate(_effect);
              context.ThrowIfCameraMissing();
              context.ThrowIfGBuffer0Missing();

              var graphicsDevice = _effect.GraphicsDevice;
              var savedRenderState = new RenderStateSnapshot(graphicsDevice);
              graphicsDevice.DepthStencilState = GraphicsHelper.DepthStencilStateAlways;
              graphicsDevice.RasterizerState = RasterizerState.CullNone;

              if (preserveColor)
            graphicsDevice.BlendState = GraphicsHelper.BlendStateNoColorWrite;
              else
            graphicsDevice.BlendState = BlendState.Opaque;

              if (colorTexture != null)
              {
            if (TextureHelper.IsFloatingPointFormat(colorTexture.Format))
              graphicsDevice.SamplerStates[1] = SamplerState.PointClamp;
            else
              graphicsDevice.SamplerStates[1] = SamplerState.LinearClamp;
              }

              var projection = context.CameraNode.Camera.Projection;
              bool isPerspective = projection is PerspectiveProjection;
              float near = projection.Near * NearBias;
              float far = projection.Far * FarBias;
              var biasedProjection = isPerspective
                               ? Matrix44F.CreatePerspectiveOffCenter(
                                 projection.Left, projection.Right,
                                 projection.Bottom, projection.Top,
                                 near, far)
                               : Matrix44F.CreateOrthographicOffCenter(
                                 projection.Left, projection.Right,
                                 projection.Bottom, projection.Top,
                                 near, far);

              var viewport = graphicsDevice.Viewport;
              _parameterViewportSize.SetValue(new Vector2(viewport.Width, viewport.Height));
              _parameterProjection.SetValue((Matrix)biasedProjection);
              _parameterCameraFar.SetValue(projection.Far);
              _parameterGBuffer0.SetValue(context.GBuffer0);
              _parameterColor.SetValue((Vector4)color);
              _parameterSourceTexture.SetValue(colorTexture);

              _effect.CurrentTechnique = isPerspective ? _techniquePerspective : _techniqueOrthographic;
              _effect.CurrentTechnique.Passes[(colorTexture == null) ? 0 : 1].Apply();

              graphicsDevice.DrawFullScreenQuad();

              graphicsDevice.ResetTextures();

              savedRenderState.Restore();
        }
开发者ID:Zolniu,项目名称:DigitalRune,代码行数:58,代码来源:RebuildZBufferRenderer.cs

示例4: Render

        public override void Render(IList<SceneNode> nodes, RenderContext context, RenderOrder order)
        {
            if (nodes == null)
            throw new ArgumentNullException("nodes");
              if (context == null)
            throw new ArgumentNullException("context");

              int numberOfNodes = nodes.Count;
              if (numberOfNodes == 0)
            return;

              context.Validate(_effect);
              context.ThrowIfCameraMissing();

              var graphicsDevice = _effect.GraphicsDevice;
              var savedRenderState = new RenderStateSnapshot(graphicsDevice);
              graphicsDevice.DepthStencilState = DepthStencilState.None;
              graphicsDevice.RasterizerState = RasterizerState.CullNone;
              graphicsDevice.BlendState = GraphicsHelper.BlendStateAdd;

              var viewport = graphicsDevice.Viewport;
              _parameterViewportSize.SetValue(new Vector2(viewport.Width, viewport.Height));
              _parameterGBuffer0.SetValue(context.GBuffer0);
              _parameterGBuffer1.SetValue(context.GBuffer1);

              var cameraNode = context.CameraNode;
              var cameraPose = cameraNode.PoseWorld;
              Matrix viewProjection = (Matrix)cameraNode.View * cameraNode.Camera.Projection;

              // Update SceneNode.LastFrame for all visible nodes.
              int frame = context.Frame;
              cameraNode.LastFrame = frame;

              var isHdrEnabled = context.IsHdrEnabled();
              for (int i = 0; i < numberOfNodes; i++)
              {
            var lightNode = nodes[i] as LightNode;
            if (lightNode == null)
              continue;

            var light = lightNode.Light as ProjectorLight;
            if (light == null)
              continue;

            // LightNode is visible in current frame.
            lightNode.LastFrame = frame;

            float hdrScale = isHdrEnabled ? light.HdrScale : 1;
            _parameterDiffuseColor.SetValue((Vector3)light.Color * light.DiffuseIntensity * hdrScale);
            _parameterSpecularColor.SetValue((Vector3)light.Color * light.SpecularIntensity * hdrScale);
            _parameterTexture.SetValue(light.Texture);

            var lightPose = lightNode.PoseWorld;
            _parameterPosition.SetValue((Vector3)(lightPose.Position - cameraPose.Position));

            _parameterRange.SetValue(light.Projection.Far);
            _parameterAttenuation.SetValue(light.Attenuation);
            _parameterTextureMatrix.SetValue((Matrix)(GraphicsHelper.ProjectorBiasMatrix * light.Projection * (lightPose.Inverse * new Pose(cameraPose.Position))));

            var rectangle = GraphicsHelper.GetViewportRectangle(cameraNode, viewport, lightNode);
            var texCoordTopLeft = new Vector2F(rectangle.Left / (float)viewport.Width, rectangle.Top / (float)viewport.Height);
            var texCoordBottomRight = new Vector2F(rectangle.Right / (float)viewport.Width, rectangle.Bottom / (float)viewport.Height);
            GraphicsHelper.GetFrustumFarCorners(cameraNode.Camera.Projection, texCoordTopLeft, texCoordBottomRight, _frustumFarCorners);

            // Convert frustum far corners from view space to world space.
            for (int j = 0; j < _frustumFarCorners.Length; j++)
              _frustumFarCorners[j] = (Vector3)cameraPose.ToWorldDirection((Vector3F)_frustumFarCorners[j]);

            _parameterFrustumCorners.SetValue(_frustumFarCorners);

            bool hasShadow = (lightNode.Shadow != null && lightNode.Shadow.ShadowMask != null);
            if (hasShadow)
            {
              switch (lightNode.Shadow.ShadowMaskChannel)
              {
            case 0: _parameterShadowMaskChannel.SetValue(new Vector4(1, 0, 0, 0)); break;
            case 1: _parameterShadowMaskChannel.SetValue(new Vector4(0, 1, 0, 0)); break;
            case 2: _parameterShadowMaskChannel.SetValue(new Vector4(0, 0, 1, 0)); break;
            default: _parameterShadowMaskChannel.SetValue(new Vector4(0, 0, 0, 1)); break;
              }

              _parameterShadowMask.SetValue(lightNode.Shadow.ShadowMask);
            }

            if (lightNode.Clip != null)
            {
              var data = lightNode.RenderData as LightRenderData;
              if (data == null)
              {
            data = new LightRenderData();
            lightNode.RenderData = data;
              }

              data.UpdateClipSubmesh(context.GraphicsService, lightNode);

              graphicsDevice.DepthStencilState = GraphicsHelper.DepthStencilStateOnePassStencilFail;
              graphicsDevice.BlendState = GraphicsHelper.BlendStateNoColorWrite;

              _parameterWorldViewProjection.SetValue((Matrix)data.ClipMatrix * viewProjection);
              _passClip.Apply();
//.........这里部分代码省略.........
开发者ID:Zolniu,项目名称:DigitalRune,代码行数:101,代码来源:ProjectorLightRenderer.cs

示例5: OnProcess

        protected override void OnProcess(RenderContext context)
        {
            context.ThrowIfCameraMissing();

              var graphicsDevice = GraphicsService.GraphicsDevice;
              var renderTargetPool = GraphicsService.RenderTargetPool;
              var cameraNode = context.CameraNode;

              var source = context.SourceTexture;
              var target = context.RenderTarget;
              var viewport = context.Viewport;

              if (Quality == 0)
              {
            // No ambient occlusion.
            if (!CombineWithSource)
            {
              // CombineWithSource is not set. --> Simply clear the render target to white.
              graphicsDevice.SetRenderTarget(target);
              graphicsDevice.Viewport = viewport;
              graphicsDevice.Clear(Color.White);
            }
            else
            {
              // Copy source image to target.
              _copyFilter.Process(context);
            }
            return;
              }

              // Try to get downsampled depth buffer from render context.
              // If we cannot find it in the render context, we downsample it manually.
              Texture2D downsampledDepthTexture = null;
              RenderTarget2D downsampledDepthTarget = null;
              if (DownsampleFactor == 2)
              {
            object dummy;
            if (context.Data.TryGetValue(RenderContextKeys.DepthBufferHalf, out dummy))
              downsampledDepthTexture = dummy as Texture2D;
              }

              if (downsampledDepthTexture == null)
              {
            context.ThrowIfGBuffer0Missing();

            if (DownsampleFactor == 1)
            {
              downsampledDepthTexture = context.GBuffer0;
            }
            else
            {
              // Downsample manually.
              // If we do not downsample the depth target, we get artifacts (strange horizontal and vertical
              // lines). TODO: Check what causes the artifacts and try to remove the downsampling.
              downsampledDepthTarget = renderTargetPool.Obtain2D(new RenderTargetFormat(
            context.GBuffer0.Width / DownsampleFactor,
            context.GBuffer0.Height / DownsampleFactor,
            false,
            context.GBuffer0.Format,
            DepthFormat.None));
              context.SourceTexture = context.GBuffer0;
              context.RenderTarget = downsampledDepthTarget;
              context.Viewport = new Viewport(0, 0, downsampledDepthTarget.Width, downsampledDepthTarget.Height);
              _downsampleFilter.Process(context);
              downsampledDepthTexture = downsampledDepthTarget;
            }
              }

              // We use two temporary render targets.
              // We do not use a floating point format because float textures cannot use hardware filtering.

              RenderTarget2D temp0;
              if (!CombineWithSource && target != null
              && target.Width == context.GBuffer0.Width / DownsampleFactor
              && target.Height == context.GBuffer0.Height / DownsampleFactor
              && Strength < 1)
              {
            // If we do not have to combine the AO result with the source image, and if the target
            // image has the half resolution, then we can use the target image directly and do not
            // need a temporary render target.
            // Also, a Strength > 1 is always applied in a separate pass because applying a Strength
            // > 1 before the blur has no effect.

            temp0 = target;
              }
              else
              {
            temp0 = renderTargetPool.Obtain2D(new RenderTargetFormat(
              context.GBuffer0.Width / DownsampleFactor,
              context.GBuffer0.Height / DownsampleFactor,
              false,
              SurfaceFormat.Color,
              DepthFormat.None));
              }

              // Create SSAO.
              graphicsDevice.SetRenderTarget(temp0);
              _viewportSizeParameter.SetValue(new Vector2(graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height));
              _farParameter.SetValue(cameraNode.Camera.Projection.Far);
              _radiusParameter.SetValue((Vector2)Radii);
//.........这里部分代码省略.........
开发者ID:Zolniu,项目名称:DigitalRune,代码行数:101,代码来源:SsaoFilter.cs

示例6: OnProcess

        protected override void OnProcess(RenderContext context)
        {
            var graphicsDevice = GraphicsService.GraphicsDevice;
              var renderTargetPool = GraphicsService.RenderTargetPool;

              var viewport = context.Viewport;
              Vector2 size = new Vector2(viewport.Width, viewport.Height);

              // Choose suitable technique.
              // We do not have shader for each sample count.
              int numberOfSamples = NumberOfSamples;
              SetCurrentTechnique(ref numberOfSamples);

              // Apply current scale and texture size to offsets.
              for (int i = 0; i < NumberOfSamples; i++)
              {
            _horizontalOffsets[i].X = Offsets[i].X * Scale / size.X;
            _horizontalOffsets[i].Y = Offsets[i].Y * Scale / size.Y;
              }

              // Make sure the other samples are 0 (e.g. if we want 11 samples but the
              // next best shader supports only 15 samples).
              for (int i = NumberOfSamples; i < numberOfSamples; i++)
              {
            _horizontalOffsets[i].X = 0;
            _horizontalOffsets[i].Y = 0;
            Weights[i] = 0;
              }

              // If we have a separable filter, we initialize _verticalOffsets too.
              if (IsSeparable)
              {
            if (_verticalOffsets == null)
              _verticalOffsets = new Vector2[MaxNumberOfSamples];

            float aspectRatio = size.X / size.Y;
            for (int i = 0; i < NumberOfSamples; i++)
            {
              _verticalOffsets[i].X = _horizontalOffsets[i].Y * aspectRatio;
              _verticalOffsets[i].Y = _horizontalOffsets[i].X * aspectRatio;
            }
            for (int i = NumberOfSamples; i < numberOfSamples; i++)
            {
              _verticalOffsets[i].X = 0;
              _verticalOffsets[i].Y = 0;
            }
              }

              // Use hardware filtering if possible.
              if (TextureHelper.IsFloatingPointFormat(context.SourceTexture.Format))
            graphicsDevice.SamplerStates[0] = SamplerState.PointClamp;
              else
            graphicsDevice.SamplerStates[0] = SamplerState.LinearClamp;

              bool isAnisotropic = IsAnisotropic;
              bool isBilateral = IsBilateral;
              if (FilterInLogSpace)
              {
            // Anisotropic and bilateral filtering in log-space is not implemented.
            isAnisotropic = false;
            isBilateral = false;
              }
              else
              {
            if (isAnisotropic || isBilateral)
            {
              context.ThrowIfCameraMissing();

              var cameraNode = context.CameraNode;
              var projection = cameraNode.Camera.Projection;
              float far = projection.Far;

              GraphicsHelper.GetFrustumFarCorners(cameraNode.Camera.Projection, _frustumFarCorners);
              _parameterFrustumCorners.SetValue(_frustumFarCorners);

              _parameterBlurParameters0.SetValue(new Vector4(
            far,
            viewport.AspectRatio,
            1.0f / (EdgeSoftness + 0.001f) * far,
            DepthScaling));

              context.ThrowIfGBuffer0Missing();
              Texture2D depthBuffer = context.GBuffer0;
              if (viewport.Width < depthBuffer.Width && viewport.Height < depthBuffer.Height)
              {
            // Use half-resolution depth buffer.
            object obj;
            if (context.Data.TryGetValue(RenderContextKeys.DepthBufferHalf, out obj))
            {
              var depthBufferHalf = obj as Texture2D;
              if (depthBufferHalf != null)
                depthBuffer = depthBufferHalf;
            }
              }

              _parameterGBuffer0.SetValue(depthBuffer);
            }
              }

              _parameterViewportSize.SetValue(size);
//.........这里部分代码省略.........
开发者ID:Zolniu,项目名称:DigitalRune,代码行数:101,代码来源:Blur.cs

示例7: Render

        public override void Render(IList<SceneNode> nodes, RenderContext context, RenderOrder order)
        {
            ThrowIfDisposed();

              if (nodes == null)
            throw new ArgumentNullException("nodes");
              if (context == null)
            throw new ArgumentNullException("context");

              int numberOfNodes = nodes.Count;
              if (nodes.Count == 0)
            return;

              context.Validate(_effect);
              context.ThrowIfCameraMissing();

              var graphicsDevice = context.GraphicsService.GraphicsDevice;
              var savedRenderState = new RenderStateSnapshot(graphicsDevice);

              // Camera properties
              var cameraNode = context.CameraNode;
              Matrix view = (Matrix)cameraNode.View;
              Matrix projection = cameraNode.Camera.Projection;
              Matrix viewProjection = view * projection;

              // Update SceneNode.LastFrame for all visible nodes.
              int frame = context.Frame;
              cameraNode.LastFrame = frame;

              // Blend additively over any cosmos textures.
              graphicsDevice.RasterizerState = RasterizerState.CullNone;
              graphicsDevice.DepthStencilState = DepthStencilState.DepthRead;
              graphicsDevice.BlendState = BlendState.Additive;

              _effectParameterViewportSize.SetValue(new Vector2(context.Viewport.Width, context.Viewport.Height));

              for (int i = 0; i < numberOfNodes; i++)
              {
            var node = nodes[i] as StarfieldNode;
            if (node == null)
              continue;

            // SkyboxNode is visible in current frame.
            node.LastFrame = frame;

            if (node.Stars != null && node.Stars.Count > 0)
            {
              Matrix world = (Matrix)new Matrix44F(node.PoseWorld.Orientation, Vector3F.Zero);
              _effectParameterWorldViewProjection.SetValue(world * viewProjection);

              // In [ZFX] the star luminance of the precomputed star data is scaled with
              // float const viewFactor = tan(fov);
              // float const resolutionFactor = resolution / 1920.0f;
              // float const luminanceScale = 1.0f / (viewFactor * viewFactor) * (resolutionFactor * resolutionFactor);
              // We ignore this here, but we could add this factor to the Intensity parameter.
              _effectParameterIntensity.SetValue((Vector3)node.Color);

              if (context.IsHdrEnabled())
            _effectPassLinear.Apply();
              else
            _effectPassGamma.Apply();

              var mesh = GetStarfieldMesh(node, context);
              mesh.Draw();
            }
              }

              savedRenderState.Restore();
        }
开发者ID:Zolniu,项目名称:DigitalRune,代码行数:69,代码来源:StarfieldRenderer.cs

示例8: OnProcess

        protected override void OnProcess(RenderContext context)
        {
            context.ThrowIfCameraMissing();
              context.ThrowIfGBuffer0Missing();
              context.ThrowIfGBuffer1Missing();

              var graphicsDevice = GraphicsService.GraphicsDevice;
              var source = context.SourceTexture;
              var target = context.RenderTarget;
              var viewport = context.Viewport;

              if (TextureHelper.IsFloatingPointFormat(source.Format))
            graphicsDevice.SamplerStates[0] = SamplerState.PointClamp;
              else
            graphicsDevice.SamplerStates[0] = SamplerState.LinearClamp;

              graphicsDevice.SetRenderTarget(target);
              graphicsDevice.Viewport = viewport;

              _parameterViewportSize.SetValue(new Vector2(viewport.Width, viewport.Height));
              _parameterHalfEdgeWidth.SetValue(_halfEdgeWidth);
              _parameterDepthThreshold.SetValue(DepthThreshold);
              _parameterDepthSensitivity.SetValue(DepthSensitivity);
              _parameterNormalThreshold.SetValue(NormalThreshold);
              _parameterNormalSensitivity.SetValue(NormalSensitivity);
              _parameterCameraBackward.SetValue((Vector3)(context.CameraNode.ViewInverse.GetColumn(2).XYZ));
              _parameterSilhouetteColor.SetValue((Vector4)SilhouetteColor);
              _parameterCreaseColor.SetValue((Vector4)CreaseColor);
              if (_parameterSourceTexture != null)
            _parameterSourceTexture.SetValue(source);
              if (_parameterGBuffer0 != null)
            _parameterGBuffer0.SetValue(context.GBuffer0);
              if (_parameterGBuffer1 != null)
            _parameterGBuffer1.SetValue(context.GBuffer1);

              var pass = Numeric.IsLessOrEqual(_halfEdgeWidth, 0.5f) ? _passOnePixelEdge : _passEdge;
              pass.Apply();

              graphicsDevice.DrawFullScreenQuad();

              if (_parameterSourceTexture != null)
            _parameterSourceTexture.SetValue((Texture2D)null);
              if (_parameterGBuffer0 != null)
            _parameterGBuffer0.SetValue((Texture2D)null);
              if (_parameterGBuffer1 != null)
            _parameterGBuffer1.SetValue((Texture2D)null);

              context.SourceTexture = source;
              context.RenderTarget = target;
              context.Viewport = viewport;
        }
开发者ID:Zolniu,项目名称:DigitalRune,代码行数:51,代码来源:EdgeFilter.cs

示例9: Render

        /// <inheritdoc/>
        public override void Render(IList<SceneNode> nodes, RenderContext context, RenderOrder order)
        {
            ThrowIfDisposed();

              if (nodes == null)
            throw new ArgumentNullException("nodes");
              if (context == null)
            throw new ArgumentNullException("context");

              int numberOfNodes = nodes.Count;
              if (nodes.Count == 0)
            return;

              context.Validate(_effect);
              context.ThrowIfCameraMissing();

              // Update SceneNode.LastFrame for all visible nodes.
              int frame = context.Frame;
              var cameraNode = context.CameraNode;
              cameraNode.LastFrame = frame;

              bool reach = (context.GraphicsService.GraphicsDevice.GraphicsProfile == GraphicsProfile.Reach);
              for (int i = 0; i < numberOfNodes; i++)
              {
            var node = nodes[i] as SkyboxNode;
            if (node == null)
              continue;

            // SkyboxNode is visible in current frame.
            node.LastFrame = frame;

            if (node.Texture != null)
            {
              if (reach)
            RenderReach(node, context);
              else
            RenderHiDef(node, context);
            }
              }
        }
开发者ID:Zolniu,项目名称:DigitalRune,代码行数:41,代码来源:SkyboxRenderer.cs

示例10: Render

        /// <summary>
        /// Draws the texts.
        /// </summary>
        /// <param name="context">The render context.</param>
        /// <remarks>
        /// If <see cref="SpriteBatch"/> or <see cref="SpriteFont"/> are <see langword="null"/>, then 
        /// <see cref="Render"/> does nothing.
        /// </remarks>
        /// <exception cref="ArgumentNullException">
        /// <paramref name="context"/> is <see langword="null"/>.
        /// </exception>
        public void Render(RenderContext context)
        {
            if (context == null)
            throw new ArgumentNullException("context");

              if (SpriteBatch == null || SpriteFont == null)
            return;

              context.Validate(SpriteBatch);

              if (_texts2D.Count == 0 && _texts3D.Count == 0)
            return;

              if (_texts3D.Count > 0)
            context.ThrowIfCameraMissing();

              var savedRenderState = new RenderStateSnapshot(SpriteBatch.GraphicsDevice);

              if (EnableDepthTest)
              {
            SpriteBatch.Begin(SpriteSortMode.Immediate, BlendState.AlphaBlend, SamplerState.LinearClamp, DepthStencilState.Default, RasterizerState.CullNone);
              }
              else
              {
            SpriteBatch.Begin();
              }

              // ----- Draw world space text.
              if (_texts3D.Count > 0)
              {
            CameraNode cameraNode = context.CameraNode;
            Matrix44F viewProjection = cameraNode.Camera.Projection * cameraNode.View;
            Viewport viewport = SpriteBatch.GraphicsDevice.Viewport;

            foreach (var textInfo in _texts3D)
            {
              // Transform position from world space to the viewport.
              Vector3F pos = viewport.ProjectToViewport(textInfo.Position, viewProjection);
              if (pos.Z < 0 || pos.Z > 1)
            continue;

              // Snap to pixels. Also add a small bias in one direction because when we draw text at
              // certain positions (e.g. view space origin) and the presentation target width is an
              // odd number, the pos will be exactly at pixel centers and due to numerical errors it
              // would jitter between pixels if the camera moves slightly.
              pos.X = (float)Math.Round(pos.X + 0.01f);
              pos.Y = (float)Math.Round(pos.Y + 0.01f);

              var textAsString = textInfo.Text as string;
              if (!string.IsNullOrEmpty(textAsString))
              {
            var textOrigin = GetOrigin(textAsString, textInfo.RelativeOrigin);
            SpriteBatch.DrawString(SpriteFont, textAsString, new Vector2(pos.X, pos.Y), textInfo.Color, 0, textOrigin, 1.0f, SpriteEffects.None, pos.Z);
              }
              else
              {
            var textAsStringBuilder = textInfo.Text as StringBuilder;
            if (textAsStringBuilder != null && textAsStringBuilder.Length > 0)
            {
              var textOrigin = GetOrigin(textAsStringBuilder, textInfo.RelativeOrigin);
              SpriteBatch.DrawString(SpriteFont, textAsStringBuilder, new Vector2(pos.X, pos.Y), textInfo.Color, 0, textOrigin, 1, SpriteEffects.None, pos.Z);
            }
              }
            }
              }

              // ----- Draw screen space text.
              foreach (var textInfo in _texts2D)
              {
            var textAsString = textInfo.Text as string;
            if (!string.IsNullOrEmpty(textAsString))
            {
              var textOrigin = GetOrigin(textAsString, textInfo.RelativeOrigin);
              SpriteBatch.DrawString(SpriteFont, textAsString, (Vector2)textInfo.Position, textInfo.Color, 0, textOrigin, 1, SpriteEffects.None, 0);
            }
            else
            {
              var textAsStringBuilder = textInfo.Text as StringBuilder;
              if (textAsStringBuilder != null && textAsStringBuilder.Length > 0)
              {
            var textOrigin = GetOrigin(textAsStringBuilder, textInfo.RelativeOrigin);
            SpriteBatch.DrawString(SpriteFont, textAsStringBuilder, (Vector2)textInfo.Position, textInfo.Color, 0, textOrigin, 1, SpriteEffects.None, 0);
              }
            }
              }

              SpriteBatch.End();

              savedRenderState.Restore();
//.........这里部分代码省略.........
开发者ID:Zolniu,项目名称:DigitalRune,代码行数:101,代码来源:TextBatch.cs

示例11: Render

        public override void Render(IList<SceneNode> nodes, RenderContext context, RenderOrder order)
        {
            if (nodes == null)
            throw new ArgumentNullException("nodes");
              if (context == null)
            throw new ArgumentNullException("context");

              int numberOfNodes = nodes.Count;
              if (numberOfNodes == 0)
            return;

              context.ThrowIfCameraMissing();
              context.ThrowIfSceneMissing();

              var originalRenderTarget = context.RenderTarget;
              var originalViewport = context.Viewport;
              var originalReferenceNode = context.ReferenceNode;

              var cameraNode = context.CameraNode;

              // Update SceneNode.LastFrame for all visible nodes.
              int frame = context.Frame;
              cameraNode.LastFrame = frame;

              // The scene node renderer should use the light camera instead of the player camera.
              context.CameraNode = _perspectiveCameraNode;
              context.Technique = "Omnidirectional";

              var graphicsService = context.GraphicsService;
              var graphicsDevice = graphicsService.GraphicsDevice;
              var renderTargetPool = graphicsService.RenderTargetPool;
              var savedRenderState = new RenderStateSnapshot(graphicsDevice);

              for (int i = 0; i < numberOfNodes; i++)
              {
            var lightNode = nodes[i] as LightNode;
            if (lightNode == null)
              continue;

            var shadow = lightNode.Shadow as CubeMapShadow;
            if (shadow == null)
              continue;

            var light = lightNode.Light as PointLight;
            if (light == null)
              throw new GraphicsException("CubeMapShadow can only be used with a PointLight.");

            // LightNode is visible in current frame.
            lightNode.LastFrame = frame;

            if (shadow.ShadowMap == null)
            {
              shadow.ShadowMap = renderTargetPool.ObtainCube(
            new RenderTargetFormat(
              shadow.PreferredSize,
              null,
              false,
              shadow.Prefer16Bit ? SurfaceFormat.HalfSingle : SurfaceFormat.Single,
              DepthFormat.Depth24));
            }

            ((PerspectiveProjection)_perspectiveCameraNode.Camera.Projection).SetFieldOfView(
              ConstantsF.PiOver2, 1, shadow.Near, light.Range);

            // World units per texel at a planar distance of 1 world unit.
            float unitsPerTexel = _perspectiveCameraNode.Camera.Projection.Width / (shadow.ShadowMap.Size * shadow.Near);

            // Convert depth bias from "texel" to  world space.
            // Minus to move receiver closer to light.
            shadow.EffectiveDepthBias = -shadow.DepthBias * unitsPerTexel;

            // Convert normal offset from "texel" to world space.
            shadow.EffectiveNormalOffset = shadow.NormalOffset * unitsPerTexel;

            var pose = lightNode.PoseWorld;

            context.ReferenceNode = lightNode;
            context.Object = shadow;

            bool shadowMapContainsSomething = false;
            for (int side = 0; side < 6; side++)
            {
              context.Data[RenderContextKeys.ShadowTileIndex] = BoxedIntegers[side];

              graphicsDevice.SetRenderTarget(shadow.ShadowMap, CubeMapFaces[side]);
              // context.RenderTarget = shadow.ShadowMap;   // TODO: Support cube maps targets in the render context.
              context.Viewport = graphicsDevice.Viewport;

              graphicsDevice.Clear(Color.White);

              _perspectiveCameraNode.View = Matrix44F.CreateLookAt(
            pose.Position,
            pose.ToWorldPosition(CubeMapForwardVectors[side]),
            pose.ToWorldDirection(CubeMapUpVectors[side]));

              // Abort if this cube map frustum does not touch the camera frustum.
              if (!context.Scene.HaveContact(cameraNode, _perspectiveCameraNode))
            continue;

              graphicsDevice.DepthStencilState = DepthStencilState.Default;
//.........这里部分代码省略.........
开发者ID:Zolniu,项目名称:DigitalRune,代码行数:101,代码来源:CubeMapShadowMapRenderer.cs

示例12: Render

        public override void Render(IList<SceneNode> nodes, RenderContext context, RenderOrder order)
        {
            ThrowIfDisposed();

              if (nodes == null)
            throw new ArgumentNullException("nodes");
              if (context == null)
            throw new ArgumentNullException("context");

              int numberOfNodes = nodes.Count;
              if (nodes.Count == 0)
            return;

              context.Validate(_spriteBatch);
              context.ThrowIfCameraMissing();

              var graphicsDevice = context.GraphicsService.GraphicsDevice;
              var savedRenderState = new RenderStateSnapshot(graphicsDevice);

              // Camera properties
              var cameraNode = context.CameraNode;
              Matrix44F viewProjection = cameraNode.Camera.Projection * cameraNode.View;
              var viewport = graphicsDevice.Viewport;

              // Update SceneNode.LastFrame for all visible nodes.
              int frame = context.Frame;
              cameraNode.LastFrame = frame;

              SpriteSortMode sortMode;
              switch (order)
              {
            case RenderOrder.Default:
              sortMode = SpriteSortMode.Texture;
              break;
            case RenderOrder.FrontToBack:
              sortMode = SpriteSortMode.FrontToBack;
              break;
            case RenderOrder.BackToFront:
              sortMode = SpriteSortMode.BackToFront;
              break;
            case RenderOrder.UserDefined:
            default:
              sortMode = SpriteSortMode.Deferred;
              break;
              }

              _spriteBatch.Begin(sortMode, graphicsDevice.BlendState, null, graphicsDevice.DepthStencilState, null);

              for (int i = 0; i < numberOfNodes; i++)
              {
            var node = nodes[i] as SpriteNode;
            if (node == null)
              continue;

            // SpriteNode is visible in current frame.
            node.LastFrame = frame;

            // Position, size, and origin in pixels.
            Vector3F position = new Vector3F();
            Vector2 size = new Vector2();
            Vector2 origin = new Vector2();

            var bitmapSprite = node.Sprite as ImageSprite;
            if (bitmapSprite != null)
            {
              var packedTexture = bitmapSprite.Texture;
              if (packedTexture != null)
              {
            // Project into viewport and snap to pixels.
            position = viewport.ProjectToViewport(node.PoseWorld.Position, viewProjection);
            position.X = (int)(position.X + 0.5f);
            position.Y = (int)(position.Y + 0.5f);

            // Get source rectangle (pixel bounds).
            var sourceRectangle = packedTexture.GetBounds(node.AnimationTime);
            size = new Vector2(sourceRectangle.Width, sourceRectangle.Height);

            // Premultiply color.
            Vector3F color3F = node.Color;
            float alpha = node.Alpha;
            Color color = new Color(color3F.X * alpha, color3F.Y * alpha, color3F.Z * alpha, alpha);

            // Get absolute origin (relative to pixel bounds).
            origin = (Vector2)node.Origin * size;

            // Draw using SpriteBatch.
            _spriteBatch.Draw(
              packedTexture.TextureAtlas, new Vector2(position.X, position.Y), sourceRectangle,
              color, node.Rotation, origin, (Vector2)node.Scale, SpriteEffects.None, position.Z);
              }
            }
            else
            {
              var textSprite = node.Sprite as TextSprite;
              if (textSprite != null)
              {
            var font = textSprite.Font ?? _spriteFont;
            if (font != null)
            {
              // Text can be a string or StringBuilder.
//.........这里部分代码省略.........
开发者ID:Zolniu,项目名称:DigitalRune,代码行数:101,代码来源:SpriteRenderer.cs

示例13: OnProcess

        protected override void OnProcess(RenderContext context)
        {
            context.ThrowIfCameraMissing();

              var graphicsDevice = GraphicsService.GraphicsDevice;
              var source = context.SourceTexture;
              var target = context.RenderTarget;
              var viewport = context.Viewport;

              var tempFormat = new RenderTargetFormat(source.Width, source.Height, false, source.Format, DepthFormat.None);
              RenderTarget2D blurredScene = GraphicsService.RenderTargetPool.Obtain2D(tempFormat);

              if (TextureHelper.IsFloatingPointFormat(source.Format))
              {
            graphicsDevice.SamplerStates[0] = SamplerState.PointClamp;
            graphicsDevice.SamplerStates[1] = SamplerState.PointClamp;
              }
              else
              {
            graphicsDevice.SamplerStates[0] = SamplerState.LinearClamp;
            graphicsDevice.SamplerStates[1] = SamplerState.LinearClamp;
              }

              context.RenderTarget = blurredScene;
              context.Viewport = new Viewport(0, 0, blurredScene.Width, blurredScene.Height);

              // Get view-dependent information stored in camera node.
              var cameraNode = context.CameraNode;
              object dummy;
              cameraNode.ViewDependentData.TryGetValue(this, out dummy);
              var data = dummy as ViewDependentData;
              if (data == null)
              {
            data = new ViewDependentData(GraphicsService);
            cameraNode.ViewDependentData[this] = data;
              }

              if (data.LastBlurredScene == null)
              {
            // This is the first frame. Simply remember the current source for the next frame.
            _copyFilter.Process(context);
              }
              else
              {
            // Create new blurred scene.
            graphicsDevice.SetRenderTarget(blurredScene);

            _viewportSizeParameter.SetValue(new Vector2(graphicsDevice.Viewport.Width, graphicsDevice.Viewport.Height));
            _strengthParameter.SetValue(Strength);
            _sourceTextureParameter.SetValue(source);
            _lastSourceTextureParameter.SetValue(data.LastBlurredScene);
            _effect.CurrentTechnique.Passes[0].Apply();
            graphicsDevice.DrawFullScreenQuad();
              }

              // Copy blurredScene to target.
              context.SourceTexture = blurredScene;
              context.RenderTarget = target;
              context.Viewport = viewport;
              _copyFilter.Process(context);

              // Recycle old blurred scene and store new scene (switch render targets).
              GraphicsService.RenderTargetPool.Recycle(data.LastBlurredScene);
              data.LastBlurredScene = blurredScene;

              _sourceTextureParameter.SetValue((Texture2D)null);
              _lastSourceTextureParameter.SetValue((Texture2D)null);

              // Restore original context.
              context.SourceTexture = source;
        }
开发者ID:Zolniu,项目名称:DigitalRune,代码行数:71,代码来源:SimpleMotionBlur.cs

示例14: Render

        public override void Render(IList<SceneNode> nodes, RenderContext context, RenderOrder order)
        {
            ThrowIfDisposed();

              if (nodes == null)
            throw new ArgumentNullException("nodes");
              if (context == null)
            throw new ArgumentNullException("context");

              int numberOfNodes = nodes.Count;
              if (numberOfNodes == 0)
            return;

              context.Validate(_effect);
              context.ThrowIfCameraMissing();

              float deltaTime = (float)context.DeltaTime.TotalSeconds;

              var graphicsService = context.GraphicsService;
              var graphicsDevice = graphicsService.GraphicsDevice;
              var renderTargetPool = graphicsService.RenderTargetPool;

              var cameraNode = context.CameraNode;
              Projection projection = cameraNode.Camera.Projection;
              Pose view = cameraNode.PoseWorld.Inverse;

              // Around the camera we push the waves down to avoid that the camera cuts the near plane.
              // Get largest vector from camera to near plane corners.
              float nearPlaneRadius =
            new Vector3F(Math.Max(Math.Abs(projection.Right), Math.Abs(projection.Left)),
                     Math.Max(Math.Abs(projection.Top), Math.Abs(projection.Bottom)),
                     projection.Near
                    ).Length;

              var originalSourceTexture = context.SourceTexture;

              // Update SceneNode.LastFrame for all visible nodes.
              int frame = context.Frame;
              cameraNode.LastFrame = frame;

              var savedRenderState = new RenderStateSnapshot(graphicsDevice);

              // Water surface is opaque.
              graphicsDevice.BlendState = BlendState.Opaque;

              #region ----- Common Effect Parameters -----

              _parameterView.SetValue(view);
              _parameterProjection.SetValue(projection);
              _parameterCameraParameters.SetValue(new Vector4(
            (Vector3)cameraNode.PoseWorld.Position,
            cameraNode.Camera.Projection.Far));

              var viewport = graphicsDevice.Viewport;
              _parameterViewportSize.SetValue(new Vector2(viewport.Width, viewport.Height));

              _parameterTime.SetValue((float)context.Time.TotalSeconds);

              // Query ambient and directional lights.
              var lightQuery = context.Scene.Query<GlobalLightQuery>(cameraNode, context);
              Vector3F ambientLight = Vector3F.Zero;
              if (lightQuery.AmbientLights.Count > 0)
              {
            var light = (AmbientLight)lightQuery.AmbientLights[0].Light;
            ambientLight = light.Color * light.Intensity * light.HdrScale;
              }

              _parameterAmbientLight.SetValue((Vector3)ambientLight);

              Vector3F directionalLightDirection = new Vector3F(0, -1, 0);
              Vector3F directionalLightIntensity = Vector3F.Zero;
              if (lightQuery.DirectionalLights.Count > 0)
              {
            var lightNode = lightQuery.DirectionalLights[0];
            var light = (DirectionalLight)lightNode.Light;
            directionalLightDirection = -lightNode.PoseWorld.Orientation.GetColumn(2);
            directionalLightIntensity = light.Color * light.SpecularIntensity * light.HdrScale;
              }

              _parameterDirectionalLightDirection.SetValue((Vector3)directionalLightDirection);
              _parameterDirectionalLightIntensity.SetValue((Vector3)directionalLightIntensity);

              _parameterGBuffer0.SetValue(context.GBuffer0);

              if (_parameterNoiseMap != null)
            _parameterNoiseMap.SetValue(_noiseMap);
              #endregion

              #region ----- Fog Parameters -----

              var fogNodes = context.Scene.Query<FogQuery>(cameraNode, context).FogNodes;
              SetFogParameters(fogNodes, cameraNode, directionalLightDirection);
              #endregion

              _parameterProjectedGridParameters.SetValue(new Vector3(
              ProjectedGridParameters.EdgeAttenuation,
              ProjectedGridParameters.DistanceAttenuationStart,
              ProjectedGridParameters.DistanceAttenuationEnd));

              for (int i = 0; i < numberOfNodes; i++)
//.........这里部分代码省略.........
开发者ID:Zolniu,项目名称:DigitalRune,代码行数:101,代码来源:WaterRenderer.cs

示例15: Render

        public override void Render(IList<SceneNode> nodes, RenderContext context, RenderOrder order)
        {
            if (nodes == null)
            throw new ArgumentNullException("nodes");
              if (context == null)
            throw new ArgumentNullException("context");

              int numberOfNodes = nodes.Count;
              if (numberOfNodes == 0)
            return;

              // Note: The camera node is not used by the StandardShadowMapRenderer.
              // Still throw an exception if null for consistency. (All other shadow map
              // renderers need a camera node.)
              context.ThrowIfCameraMissing();
              context.ThrowIfSceneMissing();

              var originalRenderTarget = context.RenderTarget;
              var originalViewport = context.Viewport;
              var originalReferenceNode = context.ReferenceNode;

              var cameraNode = context.CameraNode;

              // Update SceneNode.LastFrame for all visible nodes.
              int frame = context.Frame;
              cameraNode.LastFrame = frame;

              context.Technique = "Default";

              var graphicsService = context.GraphicsService;
              var graphicsDevice = graphicsService.GraphicsDevice;
              var savedRenderState = new RenderStateSnapshot(graphicsDevice);

              for (int i = 0; i < numberOfNodes; i++)
              {
            var lightNode = nodes[i] as LightNode;
            if (lightNode == null)
              continue;

            var shadow = lightNode.Shadow as StandardShadow;
            if (shadow == null)
              continue;

            // LightNode is visible in current frame.
            lightNode.LastFrame = frame;

            // Get a new shadow map if necessary.
            if (shadow.ShadowMap == null)
            {
              shadow.ShadowMap = graphicsService.RenderTargetPool.Obtain2D(
            new RenderTargetFormat(
              shadow.PreferredSize,
              shadow.PreferredSize,
              false,
              shadow.Prefer16Bit ? SurfaceFormat.HalfSingle : SurfaceFormat.Single,
              DepthFormat.Depth24));
            }

            // Create a suitable shadow camera.
            CameraNode lightCameraNode;
            if (lightNode.Light is ProjectorLight)
            {
              var light = (ProjectorLight)lightNode.Light;
              if (light.Projection is PerspectiveProjection)
              {
            var lp = (PerspectiveProjection)light.Projection;
            var cp = (PerspectiveProjection)_perspectiveCameraNode.Camera.Projection;
            cp.SetOffCenter(lp.Left, lp.Right, lp.Bottom, lp.Top, lp.Near, lp.Far);

            lightCameraNode = _perspectiveCameraNode;
              }
              else //if (light.Projection is OrthographicProjection)
              {
            var lp = (OrthographicProjection)light.Projection;
            var cp = (OrthographicProjection)_orthographicCameraNode.Camera.Projection;
            cp.SetOffCenter(lp.Left, lp.Right, lp.Bottom, lp.Top, lp.Near, lp.Far);

            lightCameraNode = _orthographicCameraNode;
              }
            }
            else if (lightNode.Light is Spotlight)
            {
              var light = (Spotlight)lightNode.Light;
              var cp = (PerspectiveProjection)_perspectiveCameraNode.Camera.Projection;
              cp.SetFieldOfView(2 * light.CutoffAngle, 1, shadow.DefaultNear, light.Range);

              lightCameraNode = _perspectiveCameraNode;
            }
            else
            {
              throw new GraphicsException("StandardShadow can only be used with a Spotlight or a ProjectorLight.");
            }

            lightCameraNode.PoseWorld = lightNode.PoseWorld;

            // Store data for use in StandardShadowMaskRenderer.
            shadow.Near = lightCameraNode.Camera.Projection.Near;
            shadow.Far = lightCameraNode.Camera.Projection.Far;
            shadow.View = lightCameraNode.PoseWorld.Inverse;
            shadow.Projection = lightCameraNode.Camera.Projection;
//.........这里部分代码省略.........
开发者ID:Zolniu,项目名称:DigitalRune,代码行数:101,代码来源:StandardShadowMapRenderer.cs


注:本文中的RenderContext.ThrowIfCameraMissing方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。