本文整理匯總了TypeScript中neuroglancer/webgl/one_dimensional_texture_access.OneDimensionalTextureAccessHelper類的典型用法代碼示例。如果您正苦於以下問題:TypeScript OneDimensionalTextureAccessHelper類的具體用法?TypeScript OneDimensionalTextureAccessHelper怎麽用?TypeScript OneDimensionalTextureAccessHelper使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了OneDimensionalTextureAccessHelper類的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的TypeScript代碼示例。
示例1: fragmentShaderTest
fragmentShaderTest(6, tester => {
let {gl, builder} = tester;
const dataType = DataType.UINT32;
const numComponents = 1;
const format = new OneDimensionalTextureFormat();
const layout = new OneDimensionalTextureLayout();
compute1dTextureFormat(format, dataType, numComponents);
const data = new Uint32Array(dataLength);
for (let i = 0; i < data.length; ++i) {
data[i] = i;
}
setLayout(layout, gl, format.texelsPerElement);
const accessHelper = new OneDimensionalTextureAccessHelper('textureAccess');
const textureUnitSymbol = Symbol('textureUnit');
accessHelper.defineShader(builder);
builder.addUniform('highp float', 'uOffset');
builder.addUniform('highp vec4', 'uExpected');
builder.addTextureSampler2D('uSampler', textureUnitSymbol);
builder.addFragmentCode(
accessHelper.getAccessor('readValue', 'uSampler', dataType, numComponents));
builder.addFragmentCode(glsl_unnormalizeUint8);
builder.addFragmentCode(glsl_uintleToFloat);
builder.addOutputBuffer('vec4', 'v4f_fragData0', 0);
builder.addOutputBuffer('vec4', 'v4f_fragData1', 1);
builder.addOutputBuffer('vec4', 'v4f_fragData2', 2);
builder.addOutputBuffer('vec4', 'v4f_fragData3', 3);
builder.addOutputBuffer('vec4', 'v4f_fragData4', 4);
builder.addOutputBuffer('vec4', 'v4f_fragData5', 5);
builder.setFragmentMain(`
uint32_t value = readValue(uOffset);
v4f_fragData4 = packFloatIntoVec4(uintleToFloat(value.value.xyz));
v4f_fragData5 = packFloatIntoVec4(all(equal(value.value, uExpected)) ? 1.0 : 0.0);
value.value = unnormalizeUint8(value.value);
v4f_fragData0 = packFloatIntoVec4(value.value.x);
v4f_fragData1 = packFloatIntoVec4(value.value.y);
v4f_fragData2 = packFloatIntoVec4(value.value.z);
v4f_fragData3 = packFloatIntoVec4(value.value.w);
`);
tester.build();
let {shader} = tester;
shader.bind();
accessHelper.setupTextureLayout(gl, shader, layout);
const textureUnit = shader.textureUnit(textureUnitSymbol);
let texture = gl.createTexture();
tester.registerDisposer(() => {
gl.deleteTexture(texture);
});
gl.bindTexture(gl.TEXTURE_2D, texture);
setOneDimensionalTextureData(gl, layout, format, data);
gl.bindTexture(gl.TEXTURE_2D, null);
function testOffset(x: number) {
let value = data[x];
gl.uniform1f(shader.uniform('uOffset'), x);
gl.uniform4fv(shader.uniform('uExpected'), setVec4FromUint32(new Float32Array(4), value));
gl.activeTexture(gl.TEXTURE0 + textureUnit);
gl.bindTexture(gl.TEXTURE_2D, texture);
tester.execute();
gl.bindTexture(gl.TEXTURE_2D, null);
let actual = new Float32Array(4);
let expected = new Float32Array(4);
for (let i = 0; i < 4; ++i) {
actual[i] = tester.readFloat(i);
expected[i] = (value >>> (8 * i)) & 0xFF;
}
for (let i = 0; i < 4; ++i) {
expect(actual[i]).toBe(
expected[i],
`offset = ${x}, value = ${x}, actual = ${Array.from(actual)}, expected = ${
Array.from(expected)}`);
}
expect(tester.readFloat(4))
.toBe(value, `uint24le value != expected, offset = ${x}, value = ${x}`);
expect(tester.readFloat(5))
.toBe(1.0, `uExpected != value in shader, offset = ${x}, value = ${x}`);
}
testOffset(255 /*+ 256 * 256 * 9*/);
for (let i = 0; i < 100; ++i) {
testOffset(i);
}
const COUNT = 100;
for (let i = 0; i < COUNT; ++i) {
let offset = Math.floor(Math.random() * data.length);
testOffset(offset);
}
});
示例2: getChannelOffset
defineShader(builder: ShaderBuilder) {
super.defineShader(builder);
let {textureAccessHelper} = this;
textureAccessHelper.defineShader(builder);
builder.addFragmentCode(
textureAccessHelper.getAccessor('readVolumeData', 'uVolumeChunkSampler', this.dataType));
let {numChannels} = this;
if (numChannels > 1) {
builder.addUniform('highp float', 'uChannelStride');
builder.addFragmentCode(`
float getChannelOffset(int channelIndex) {
return float(channelIndex) * uChannelStride;
}
`);
} else {
builder.addFragmentCode(`float getChannelOffset(int channelIndex) { return 0.0; }`);
}
builder.addFragmentCode(`
float getIndexIntoChunk (int channelIndex) {
vec3 chunkDataPosition = getPositionWithinChunk();
return chunkDataPosition.x + uChunkDataSize.x * (chunkDataPosition.y + uChunkDataSize.y * chunkDataPosition.z) + getChannelOffset(channelIndex);
}
`);
const shaderType = getShaderType(this.dataType);
builder.addFragmentCode(`
${shaderType} getDataValue (int channelIndex) {
return readVolumeData(getIndexIntoChunk(channelIndex));
}
`);
}
示例3: defineShader
defineShader(builder: ShaderBuilder) {
super.defineShader(builder);
this.textureAccessHelper.defineShader(builder);
let local = (x: string) => 'compressedSegmentationChunkFormat_' + x;
builder.addUniform('highp vec3', 'uSubchunkGridSize');
builder.addUniform('highp vec3', 'uSubchunkSize');
builder.addFragmentCode(glsl_getFortranOrderIndexFromNormalized);
const {dataType} = this;
const glslType = GLSL_TYPE_FOR_DATA_TYPE.get(dataType);
if (dataType === DataType.UINT64) {
builder.addFragmentCode(glsl_uint64);
} else {
builder.addFragmentCode(glsl_uint32);
}
let fragmentCode = `
vec4 ${local('readTextureValue')}(float offset) {
vec4 result;
${this.textureAccessHelper.readTextureValue}(uVolumeChunkSampler, offset, result);
return result;
}
float ${local('getChannelOffset')}(int channelIndex) {
if (channelIndex == 0) {
return ${this.numChannels}.0;
}
vec4 v = ${local('readTextureValue')}(float(channelIndex));
return v.x * 255.0 + v.y * 255.0 * 256.0 + v.z * 255.0 * 256.0 * 256.0;
}
${glslType} getDataValue (int channelIndex) {
vec3 chunkPosition = getPositionWithinChunk();
// TODO: maybe premultiply this and store as uniform.
vec3 subchunkGridPosition = floor(chunkPosition / uSubchunkSize);
float subchunkGridOffset = getFortranOrderIndex(subchunkGridPosition, uSubchunkGridSize);
float channelOffset = ${local('getChannelOffset')}(channelIndex);
// TODO: Maybe just combine this offset into subchunkGridStrides.
float subchunkHeaderOffset = subchunkGridOffset * 2.0 + channelOffset;
vec4 subchunkHeader0 = ${local('readTextureValue')}(subchunkHeaderOffset);
vec4 subchunkHeader1 = ${local('readTextureValue')}(subchunkHeaderOffset + 1.0);
float outputValueOffset = dot(subchunkHeader0.xyz, vec3(255, 256 * 255, 256 * 256 * 255)) + channelOffset;
float encodingBits = subchunkHeader0[3] * 255.0;
if (encodingBits > 0.0) {
vec3 subchunkPosition = floor(min(chunkPosition - subchunkGridPosition * uSubchunkSize, uSubchunkSize - 1.0));
float subchunkOffset = getFortranOrderIndex(subchunkPosition, uSubchunkSize);
highp float encodedValueBaseOffset = dot(subchunkHeader1.xyz, vec3(255.0, 256.0 * 255.0, 256.0 * 256.0 * 255.0)) + channelOffset;
highp float encodedValueOffset = floor(encodedValueBaseOffset + subchunkOffset * encodingBits / 32.0);
vec4 encodedValue = ${local('readTextureValue')}(encodedValueOffset);
float wordOffset = mod(subchunkOffset * encodingBits, 32.0);
// If the value is in the first byte, then 0 <= wordOffset < 8.
// We need to mod by 2**encodedBits
float wordShifter = pow(2.0, -wordOffset);
float encodedValueMod = pow(2.0, encodingBits);
float encodedValueShifted;
if (wordOffset < 16.0) {
encodedValueShifted = dot(encodedValue.xy, vec2(255.0, 255.0 * 256.0));
} else {
encodedValueShifted = dot(encodedValue.zw, vec2(255.0 * 256.0 * 256.0, 255.0 * 256.0 * 256.0 * 256.0));
}
encodedValueShifted = floor(encodedValueShifted * wordShifter);
float decodedValue = mod(encodedValueShifted, encodedValueMod);
outputValueOffset += decodedValue * ${this.dataType === DataType.UINT64 ? '2.0' : '1.0'};
}
${glslType} result;
`;
if (dataType === DataType.UINT64) {
fragmentCode += `
result.low = ${local('readTextureValue')}(outputValueOffset);
result.high = ${local('readTextureValue')}(outputValueOffset+1.0);
`;
} else {
fragmentCode += `
result.value = ${local('readTextureValue')}(outputValueOffset);
`;
}
fragmentCode += `
return result;
}
`;
builder.addFragmentCode(fragmentCode);
}
示例4: setupTextureLayout
/**
* Called each time textureLayout changes while drawing chunks.
*/
setupTextureLayout(gl: GL, shader: ShaderProgram, textureLayout: TextureLayout) {
gl.uniform3fv(shader.uniform('uSubchunkGridSize'), textureLayout.subchunkGridSize);
this.textureAccessHelper.setupTextureLayout(gl, shader, textureLayout);
}
示例5: setupTextureLayout
/**
* Called each time textureLayout changes while drawing chunks.
*/
setupTextureLayout(gl: GL, shader: ShaderProgram, textureLayout: TextureLayout) {
if (this.numChannels > 1) {
gl.uniform1f(shader.uniform('uChannelStride'), textureLayout.channelStride);
}
this.textureAccessHelper.setupTextureLayout(gl, shader, textureLayout);
}
示例6: getChannelOffset
defineShader(builder: ShaderBuilder) {
super.defineShader(builder);
this.textureAccessHelper.defineShader(builder);
let {numChannels} = this;
if (numChannels > 1) {
builder.addUniform('highp float', 'uChannelStride');
builder.addFragmentCode(`
float getChannelOffset(int channelIndex) {
return float(channelIndex) * uChannelStride;
}
`);
} else {
builder.addFragmentCode(`float getChannelOffset(int channelIndex) { return 0.0; }`);
}
builder.addFragmentCode(`
float getIndexIntoChunk (int channelIndex) {
vec3 chunkDataPosition = getPositionWithinChunk();
return chunkDataPosition.x + uChunkDataSize.x * (chunkDataPosition.y + uChunkDataSize.y * chunkDataPosition.z) + getChannelOffset(channelIndex);
}
`);
switch (this.dataType) {
case DataType.UINT8:
builder.addFragmentCode(glsl_uint8);
builder.addFragmentCode(`
uint8_t getDataValue (int channelIndex) {
uint8_t result;
vec4 temp;
${this.textureAccessHelper.readTextureValue}(uVolumeChunkSampler, getIndexIntoChunk(channelIndex), temp);
result.value = temp.x;
return result;
}
`);
break;
case DataType.FLOAT32:
builder.addFragmentCode(glsl_float);
builder.addFragmentCode(`
float getDataValue (int channelIndex) {
vec4 temp;
${this.textureAccessHelper.readTextureValue}(uVolumeChunkSampler, getIndexIntoChunk(channelIndex), temp);
return temp.x;
}
`);
break;
case DataType.UINT16:
builder.addFragmentCode(glsl_uint16);
builder.addFragmentCode(`
uint16_t getDataValue (int channelIndex) {
uint16_t result;
vec4 temp;
${this.textureAccessHelper.readTextureValue}(uVolumeChunkSampler, getIndexIntoChunk(channelIndex), temp);
result.value = temp.xw;
return result;
}
`);
break;
case DataType.UINT32:
builder.addFragmentCode(glsl_uint32);
builder.addFragmentCode(`
uint32_t getDataValue (int channelIndex) {
uint32_t result;
${this.textureAccessHelper.readTextureValue}(uVolumeChunkSampler, getIndexIntoChunk(channelIndex), result.value);
return result;
}
`);
break;
case DataType.UINT64:
builder.addFragmentCode(glsl_uint64);
builder.addFragmentCode(`
uint64_t getDataValue (int channelIndex) {
uint64_t result;
${this.textureAccessHelper.readTextureValue}(uVolumeChunkSampler, getIndexIntoChunk(channelIndex), result.low, result.high);
return result;
}
`);
break;
}
}