本文整理汇总了C++中Sampler::GetSample方法的典型用法代码示例。如果您正苦于以下问题:C++ Sampler::GetSample方法的具体用法?C++ Sampler::GetSample怎么用?C++ Sampler::GetSample使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Sampler
的用法示例。
在下文中一共展示了Sampler::GetSample方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: RenderFuncVM
void BiDirVMCPURenderThread::RenderFuncVM() {
//SLG_LOG("[BiDirVMCPURenderThread::" << threadIndex << "] Rendering thread started");
//--------------------------------------------------------------------------
// Initialization
//--------------------------------------------------------------------------
BiDirVMCPURenderEngine *engine = (BiDirVMCPURenderEngine *)renderEngine;
RandomGenerator *rndGen = new RandomGenerator(engine->seedBase + threadIndex);
Scene *scene = engine->renderConfig->scene;
Camera *camera = scene->camera;
Film *film = threadFilm;
const u_int filmWidth = film->GetWidth();
const u_int filmHeight = film->GetHeight();
pixelCount = filmWidth * filmHeight;
// Setup the samplers
vector<Sampler *> samplers(engine->lightPathsCount, NULL);
const u_int sampleSize =
sampleBootSizeVM + // To generate the initial light vertex and trace eye ray
engine->maxLightPathDepth * sampleLightStepSize + // For each light vertex
engine->maxEyePathDepth * sampleEyeStepSize; // For each eye vertex
// metropolisSharedTotalLuminance and metropolisSharedSampleCount are
// initialized inside MetropolisSampler::RequestSamples()
double metropolisSharedTotalLuminance, metropolisSharedSampleCount;
for (u_int i = 0; i < samplers.size(); ++i) {
Sampler *sampler = engine->renderConfig->AllocSampler(rndGen, film,
&metropolisSharedTotalLuminance, &metropolisSharedSampleCount);
sampler->RequestSamples(sampleSize);
samplers[i] = sampler;
}
u_int iteration = 0;
vector<vector<SampleResult> > samplesResults(samplers.size());
vector<vector<PathVertexVM> > lightPathsVertices(samplers.size());
vector<Point> lensPoints(samplers.size());
HashGrid hashGrid;
const u_int haltDebug = engine->renderConfig->GetProperty("batch.haltdebug").Get<u_int>();
for(u_int steps = 0; !boost::this_thread::interruption_requested(); ++steps) {
// Clear the arrays
for (u_int samplerIndex = 0; samplerIndex < samplers.size(); ++samplerIndex) {
samplesResults[samplerIndex].clear();
lightPathsVertices[samplerIndex].clear();
}
// Setup vertex merging
float radius = engine->baseRadius;
radius /= powf(float(iteration + 1), .5f * (1.f - engine->radiusAlpha));
radius = Max(radius, DEFAULT_EPSILON_STATIC);
const float radius2 = radius * radius;
const float vmFactor = M_PI * radius2 * engine->lightPathsCount;
vmNormalization = 1.f / vmFactor;
const float etaVCM = vmFactor;
misVmWeightFactor = MIS(etaVCM);
misVcWeightFactor = MIS(1.f / etaVCM);
// Using the same time for all rays in the same pass is required by the
// current implementation (i.e. I can not mix paths with different
// times). However this is detrimental for the Metropolis sampler.
const float time = rndGen->floatValue();
//----------------------------------------------------------------------
// Trace all light paths
//----------------------------------------------------------------------
for (u_int samplerIndex = 0; samplerIndex < samplers.size(); ++samplerIndex) {
Sampler *sampler = samplers[samplerIndex];
// Sample a point on the camera lens
if (!camera->SampleLens(time, sampler->GetSample(3), sampler->GetSample(4),
&lensPoints[samplerIndex]))
continue;
TraceLightPath(time, sampler, lensPoints[samplerIndex],
lightPathsVertices[samplerIndex], samplesResults[samplerIndex]);
}
//----------------------------------------------------------------------
// Store all light path vertices in the k-NN accelerator
//----------------------------------------------------------------------
hashGrid.Build(lightPathsVertices, radius);
//cout << "==========================================\n";
//cout << "Iteration: " << iteration << " Paths: " << engine->lightPathsCount << " Light path vertices: "<< hashGrid.GetVertexCount() <<"\n";
//----------------------------------------------------------------------
// Trace all eye paths
//----------------------------------------------------------------------
for (u_int samplerIndex = 0; samplerIndex < samplers.size(); ++samplerIndex) {
Sampler *sampler = samplers[samplerIndex];
PathVertexVM eyeVertex;
SampleResult eyeSampleResult(Film::RADIANCE_PER_PIXEL_NORMALIZED | Film::ALPHA, 1);
eyeSampleResult.alpha = 1.f;
//.........这里部分代码省略.........
示例2: RenderFunc
void LightCPURenderThread::RenderFunc() {
//SLG_LOG("[LightCPURenderThread::" << threadIndex << "] Rendering thread started");
//--------------------------------------------------------------------------
// Initialization
//--------------------------------------------------------------------------
LightCPURenderEngine *engine = (LightCPURenderEngine *)renderEngine;
RandomGenerator *rndGen = new RandomGenerator(engine->seedBase + threadIndex);
Scene *scene = engine->renderConfig->scene;
PerspectiveCamera *camera = scene->camera;
Film *film = threadFilm;
// Setup the sampler
double metropolisSharedTotalLuminance, metropolisSharedSampleCount;
Sampler *sampler = engine->renderConfig->AllocSampler(rndGen, film,
&metropolisSharedTotalLuminance, &metropolisSharedSampleCount);
const u_int sampleBootSize = 11;
const u_int sampleEyeStepSize = 4;
const u_int sampleLightStepSize = 5;
const u_int sampleSize =
sampleBootSize + // To generate the initial setup
engine->maxPathDepth * sampleEyeStepSize + // For each eye vertex
engine->maxPathDepth * sampleLightStepSize; // For each light vertex
sampler->RequestSamples(sampleSize);
//--------------------------------------------------------------------------
// Trace light paths
//--------------------------------------------------------------------------
vector<SampleResult> sampleResults;
while (!boost::this_thread::interruption_requested()) {
sampleResults.clear();
// Select one light source
float lightPickPdf;
const LightSource *light = scene->SampleAllLights(sampler->GetSample(2), &lightPickPdf);
// Initialize the light path
float lightEmitPdfW;
Ray nextEventRay;
Spectrum lightPathFlux = light->Emit(scene,
sampler->GetSample(3), sampler->GetSample(4), sampler->GetSample(5), sampler->GetSample(6),
&nextEventRay.o, &nextEventRay.d, &lightEmitPdfW);
if (lightPathFlux.Black()) {
sampler->NextSample(sampleResults);
continue;
}
lightPathFlux /= lightEmitPdfW * lightPickPdf;
assert (!lightPathFlux.IsNaN() && !lightPathFlux.IsInf());
// Sample a point on the camera lens
Point lensPoint;
if (!camera->SampleLens(sampler->GetSample(7), sampler->GetSample(8),
&lensPoint)) {
sampler->NextSample(sampleResults);
continue;
}
//----------------------------------------------------------------------
// I don't try to connect the light vertex directly with the eye
// because InfiniteLight::Emit() returns a point on the scene bounding
// sphere. Instead, I trace a ray from the camera like in BiDir.
// This is also a good why to test the Film Per-Pixel-Normalization and
// the Per-Screen-Normalization Buffers used by BiDir.
//----------------------------------------------------------------------
TraceEyePath(sampler, &sampleResults);
//----------------------------------------------------------------------
// Trace the light path
//----------------------------------------------------------------------
int depth = 1;
while (depth <= engine->maxPathDepth) {
const u_int sampleOffset = sampleBootSize + sampleEyeStepSize * engine->maxPathDepth +
(depth - 1) * sampleLightStepSize;
RayHit nextEventRayHit;
BSDF bsdf;
Spectrum connectionThroughput;
if (scene->Intersect(device, true, sampler->GetSample(sampleOffset),
&nextEventRay, &nextEventRayHit, &bsdf, &connectionThroughput)) {
// Something was hit
lightPathFlux *= connectionThroughput;
//--------------------------------------------------------------
// Try to connect the light path vertex with the eye
//--------------------------------------------------------------
ConnectToEye(sampler->GetSample(sampleOffset + 1),
bsdf, lensPoint, lightPathFlux, sampleResults);
if (depth >= engine->maxPathDepth)
break;
//--------------------------------------------------------------
// Build the next vertex path ray
//--------------------------------------------------------------
//.........这里部分代码省略.........
示例3: RenderFunc
void PathCPURenderThread::RenderFunc() {
//SLG_LOG("[PathCPURenderEngine::" << threadIndex << "] Rendering thread started");
//--------------------------------------------------------------------------
// Initialization
//--------------------------------------------------------------------------
PathCPURenderEngine *engine = (PathCPURenderEngine *)renderEngine;
RandomGenerator *rndGen = new RandomGenerator(engine->seedBase + threadIndex);
Scene *scene = engine->renderConfig->scene;
PerspectiveCamera *camera = scene->camera;
Film * film = threadFilm;
const unsigned int filmWidth = film->GetWidth();
const unsigned int filmHeight = film->GetHeight();
// Setup the sampler
double metropolisSharedTotalLuminance, metropolisSharedSampleCount;
Sampler *sampler = engine->renderConfig->AllocSampler(rndGen, film,
&metropolisSharedTotalLuminance, &metropolisSharedSampleCount);
const unsigned int sampleBootSize = 4;
const unsigned int sampleStepSize = 9;
const unsigned int sampleSize =
sampleBootSize + // To generate eye ray
engine->maxPathDepth * sampleStepSize; // For each path vertex
sampler->RequestSamples(sampleSize);
//--------------------------------------------------------------------------
// Trace paths
//--------------------------------------------------------------------------
vector<SampleResult> sampleResults(1);
sampleResults[0].type = PER_PIXEL_NORMALIZED;
while (!boost::this_thread::interruption_requested()) {
float alpha = 1.f;
Ray eyeRay;
const float screenX = min(sampler->GetSample(0) * filmWidth, (float)(filmWidth - 1));
const float screenY = min(sampler->GetSample(1) * filmHeight, (float)(filmHeight - 1));
camera->GenerateRay(screenX, screenY, &eyeRay,
sampler->GetSample(2), sampler->GetSample(3));
int depth = 1;
bool lastSpecular = true;
float lastPdfW = 1.f;
Spectrum radiance;
Spectrum pathThrouput(1.f, 1.f, 1.f);
BSDF bsdf;
while (depth <= engine->maxPathDepth) {
const unsigned int sampleOffset = sampleBootSize + (depth - 1) * sampleStepSize;
RayHit eyeRayHit;
Spectrum connectionThroughput;
if (!scene->Intersect(device, false, sampler->GetSample(sampleOffset),
&eyeRay, &eyeRayHit, &bsdf, &connectionThroughput)) {
// Nothing was hit, look for infinitelight
DirectHitInfiniteLight(lastSpecular, pathThrouput * connectionThroughput, eyeRay.d,
lastPdfW, &radiance);
if (depth == 1)
alpha = 0.f;
break;
}
pathThrouput *= connectionThroughput;
// Something was hit
// Check if it is a light source
if (bsdf.IsLightSource()) {
DirectHitFiniteLight(lastSpecular, pathThrouput,
eyeRayHit.t, bsdf, lastPdfW, &radiance);
}
// Note: pass-through check is done inside SceneIntersect()
//------------------------------------------------------------------
// Direct light sampling
//------------------------------------------------------------------
DirectLightSampling(sampler->GetSample(sampleOffset + 1),
sampler->GetSample(sampleOffset + 2),
sampler->GetSample(sampleOffset + 3),
sampler->GetSample(sampleOffset + 4),
sampler->GetSample(sampleOffset + 5),
pathThrouput, bsdf, depth, &radiance);
//------------------------------------------------------------------
// Build the next vertex path ray
//------------------------------------------------------------------
Vector sampledDir;
BSDFEvent event;
float cosSampledDir;
const Spectrum bsdfSample = bsdf.Sample(&sampledDir,
sampler->GetSample(sampleOffset + 6),
sampler->GetSample(sampleOffset + 7),
&lastPdfW, &cosSampledDir, &event);
if (bsdfSample.Black())
break;
lastSpecular = ((event & SPECULAR) != 0);
//.........这里部分代码省略.........
示例4: RenderFuncVM
void BiDirVMCPURenderThread::RenderFuncVM() {
//SLG_LOG("[BiDirVMCPURenderThread::" << threadIndex << "] Rendering thread started");
//--------------------------------------------------------------------------
// Initialization
//--------------------------------------------------------------------------
BiDirVMCPURenderEngine *engine = (BiDirVMCPURenderEngine *)renderEngine;
RandomGenerator *rndGen = new RandomGenerator(engine->seedBase + threadIndex);
Scene *scene = engine->renderConfig->scene;
PerspectiveCamera *camera = scene->camera;
Film *film = threadFilm;
const unsigned int filmWidth = film->GetWidth();
const unsigned int filmHeight = film->GetHeight();
pixelCount = filmWidth * filmHeight;
// Setup the samplers
vector<Sampler *> samplers(engine->lightPathsCount, NULL);
const unsigned int sampleSize =
sampleBootSize + // To generate the initial light vertex and trace eye ray
engine->maxLightPathDepth * sampleLightStepSize + // For each light vertex
engine->maxEyePathDepth * sampleEyeStepSize; // For each eye vertex
double metropolisSharedTotalLuminance, metropolisSharedSampleCount;
for (u_int i = 0; i < samplers.size(); ++i) {
Sampler *sampler = engine->renderConfig->AllocSampler(rndGen, film,
&metropolisSharedTotalLuminance, &metropolisSharedSampleCount);
sampler->RequestSamples(sampleSize);
samplers[i] = sampler;
}
u_int iteration = 0;
vector<vector<SampleResult> > samplesResults(samplers.size());
vector<vector<PathVertexVM> > lightPathsVertices(samplers.size());
vector<Point> lensPoints(samplers.size());
HashGrid hashGrid;
while (!boost::this_thread::interruption_requested()) {
// Clear the arrays
for (u_int samplerIndex = 0; samplerIndex < samplers.size(); ++samplerIndex) {
samplesResults[samplerIndex].clear();
lightPathsVertices[samplerIndex].clear();
}
// Setup vertex merging
float radius = engine->baseRadius;
radius /= powf(float(iteration + 1), .5f * (1.f - engine->radiusAlpha));
radius = Max(radius, DEFAULT_EPSILON_STATIC);
const float radius2 = radius * radius;
const float vmFactor = M_PI * radius2 * engine->lightPathsCount;
vmNormalization = 1.f / vmFactor;
const float etaVCM = vmFactor;
misVmWeightFactor = MIS(etaVCM);
misVcWeightFactor = MIS(1.f / etaVCM);
//----------------------------------------------------------------------
// Trace all light paths
//----------------------------------------------------------------------
for (u_int samplerIndex = 0; samplerIndex < samplers.size(); ++samplerIndex) {
Sampler *sampler = samplers[samplerIndex];
// Sample a point on the camera lens
if (!camera->SampleLens(sampler->GetSample(3), sampler->GetSample(4),
&lensPoints[samplerIndex]))
continue;
TraceLightPath(sampler, lensPoints[samplerIndex],
lightPathsVertices[samplerIndex], samplesResults[samplerIndex]);
}
//----------------------------------------------------------------------
// Store all light path vertices in the k-NN accelerator
//----------------------------------------------------------------------
hashGrid.Build(lightPathsVertices, radius);
//----------------------------------------------------------------------
// Trace all eye paths
//----------------------------------------------------------------------
for (u_int samplerIndex = 0; samplerIndex < samplers.size(); ++samplerIndex) {
Sampler *sampler = samplers[samplerIndex];
const vector<PathVertexVM> &lightPathVertices = lightPathsVertices[samplerIndex];
PathVertexVM eyeVertex;
SampleResult eyeSampleResult;
eyeSampleResult.type = PER_PIXEL_NORMALIZED;
eyeSampleResult.alpha = 1.f;
Ray eyeRay;
eyeSampleResult.screenX = min(sampler->GetSample(0) * filmWidth, (float)(filmWidth - 1));
eyeSampleResult.screenY = min(sampler->GetSample(1) * filmHeight, (float)(filmHeight - 1));
camera->GenerateRay(eyeSampleResult.screenX, eyeSampleResult.screenY, &eyeRay,
sampler->GetSample(9), sampler->GetSample(10));
eyeVertex.bsdf.hitPoint.fixedDir = -eyeRay.d;
eyeVertex.throughput = Spectrum(1.f, 1.f, 1.f);
const float cosAtCamera = Dot(scene->camera->GetDir(), eyeRay.d);
//.........这里部分代码省略.........
示例5: main
int main() {
outPut[0]="scene1.test";
outPut[1]="scene1-camera1.test";
outPut[2]="scene1-camera2.test";
outPut[3]="scene1-camera3.test";
outPut[4]="scene1-camera4.test";
outPut[5]="scene2-camera1.test";
outPut[6]="scene2-camera2.test";
outPut[7]="scene2-camera3.test";
outPut[8]="scene3.test";
outPut[9]="self.test";
outPut[10]="spheres.test";
outPut[11]="self1.test";
outPut[12]="spheres2.test";
outPut[13]="spheres3.test";
outPut[14]="scene3-2.test";
outPut[15]="self2.test";
outPut[16]="self3.test";
outPut[17]="self4.test";
outPut[18]="self2-1.test";
outPut[19]="self2-2.test";
outPut[20]="self4-2.test";
outPut[21]="self4-3.test";
outPut[22]="spheres4.test";
outPut[23]="self4-4.test";
outPut[24]="self4-5.test";
outPut[25]="self4-6.test";
outPut[26]="self2-3.test";
outPut[27]="self3-1.test";
outPut[28]="scene1-4.test";
outPut[29]="spheres4-1.test";
outPut[30]="spheres4-2.test";
outPut[31]="spheres4-3.test";
outPut[32]="spheres4-4.test";
outPut[33]="spheres4-5.test";
for (int names=0; names<TESTS; names++){
Film myImage;
Camera myCamera;
vector<Primitive> myPrimitives(1, Primitive());
vector<Triangle> myTriangles(1, Triangle());
Parser myParser;
Sample currSample(0,0);
Sampler mySampler;
RayTracer myTracer;
Ray currRay(vec3(0,0,0), vec3(0,0,0), vec3(0,0,0));
Color currColor;
inputfile.open(outPut[names].c_str());
int *x,*y;
x =(int*) malloc(sizeof(int));
y =(int*) malloc(sizeof(int));
myParser.initialparse(inputfile, x, y);
myCamera.SetAspect(x, y);
myImage.SetFilm(*x,*y);
myImage.InitializeFilm();
mySampler.SetSamplerSize(*x, *y);
myParser.parsefile(inputfile, &myCamera, &myTracer, &maxDepth);
myTracer.SetDepth(maxDepth);
cout<<"maxDepth: "<<maxDepth<<endl;
assert(maxDepth>=2);
while(mySampler.GetSample(&currSample)){
currColor.SetColor(0.0,0.0,0.0); // reset currColor to 0 every time
myCamera.GenerateRay(currSample,&currRay);
myTracer.traceRay(&currRay, 0, &currColor);
myImage.Commit(currSample, currColor);
}
myImage.WriteImage(outPut[names]);
inputfile.close();
delete x;
delete y;
cout << "finished " << outPut[names] << endl;
}
cout << "finished everything" << endl;
return 0;
}