当前位置: 首页>>代码示例>>TypeScript>>正文


TypeScript vec3.fromValues方法代码示例

本文整理汇总了TypeScript中neuroglancer/util/geom.vec3.fromValues方法的典型用法代码示例。如果您正苦于以下问题:TypeScript vec3.fromValues方法的具体用法?TypeScript vec3.fromValues怎么用?TypeScript vec3.fromValues使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在neuroglancer/util/geom.vec3的用法示例。


在下文中一共展示了vec3.fromValues方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的TypeScript代码示例。

示例1: getSources

  getSources(vectorGraphicsSourceOptions: VectorGraphicsSourceOptions) {
    const voxelSize = this.stackInfo.voxelResolution;
    const chunkSize = vec3.subtract(
        vec3.create(), this.stackInfo.upperVoxelBound, this.stackInfo.lowerVoxelBound);
    vec3.multiply(chunkSize, chunkSize, voxelSize);
    chunkSize[2] = voxelSize[2];

    const spec = VectorGraphicsChunkSpecification.make({
      voxelSize,
      chunkSize,
      lowerChunkBound: vec3.fromValues(0, 0, this.stackInfo.lowerVoxelBound[2]),
      upperChunkBound: vec3.fromValues(1, 1, this.stackInfo.upperVoxelBound[2]),
      vectorGraphicsSourceOptions
    });
    const source = this.chunkManager.getChunkSource(PointMatchSource, {
      spec,
      parameters: {
        'baseUrls': this.baseUrls,
        'owner': this.ownerInfo.owner,
        'project': this.stackInfo.project,
        'stack': this.stack,
        'encoding': 'points',
        'matchCollection': this.matchCollection,
        'zoffset': this.zoffset
      }
    });

    return [[source]];
  }
开发者ID:google,项目名称:neuroglancer,代码行数:29,代码来源:frontend.ts

示例2: it

 it('parseRGBColorSpecification works', () => {
   expect(parseRGBColorSpecification('white')).toEqual(vec3.fromValues(1, 1, 1));
   expect(parseRGBColorSpecification('black')).toEqual(vec3.fromValues(0, 0, 0));
   expect(parseRGBColorSpecification('red')).toEqual(vec3.fromValues(1, 0, 0));
   expect(parseRGBColorSpecification('lime')).toEqual(vec3.fromValues(0, 1, 0));
   expect(parseRGBColorSpecification('blue')).toEqual(vec3.fromValues(0, 0, 1));
 });
开发者ID:google,项目名称:neuroglancer,代码行数:7,代码来源:color.spec.ts

示例3: Error

 .then(header => {
   let dataTypeInfo = DATA_TYPE_CONVERSIONS.get(header.datatypeCode);
   if (dataTypeInfo === undefined) {
     throw new Error(
         `Unsupported data type: ${NiftiDataType[header.datatypeCode] ||
         header.datatypeCode}.`);
   }
   if (header.dims[4] !== 1) {
     throw new Error(`Time series data not supported.`);
   }
   const spatialUnits = header.xyzt_units & NIFTI1.SPATIAL_UNITS_MASK;
   let unitsPerNm = 1;
   switch (spatialUnits) {
     case NIFTI1.UNITS_METER:
       unitsPerNm = 1e9;
       break;
     case NIFTI1.UNITS_MM:
       unitsPerNm = 1e6;
       break;
     case NIFTI1.UNITS_MICRON:
       unitsPerNm = 1e3;
       break;
   }
   const {quatern_b, quatern_c, quatern_d} = header;
   const quatern_a = Math.sqrt(
       1.0 - quatern_b * quatern_b - quatern_c * quatern_c - quatern_d * quatern_d);
   const qfac = header.pixDims[0] === -1 ? -1 : 1;
   let info: NiftiVolumeInfo = {
     description: header.description,
     affine: convertAffine(header.affine),
     dataType: dataTypeInfo.dataType,
     numChannels: header.dims[5],
     volumeType: dataTypeInfo.volumeType,
     voxelSize: vec3.fromValues(
         unitsPerNm * header.pixDims[1], unitsPerNm * header.pixDims[2],
         unitsPerNm * header.pixDims[3]),
     volumeSize: vec3.fromValues(header.dims[1], header.dims[2], header.dims[3]),
     qoffset: vec3.fromValues(
         unitsPerNm * header.qoffset_x, unitsPerNm * header.qoffset_y,
         unitsPerNm * header.qoffset_z),
     qform_code: header.qform_code,
     sform_code: header.sform_code,
     qfac: qfac,
     quatern: quat.fromValues(quatern_b, quatern_c, quatern_d, quatern_a),
   };
   return {value: info};
 });
开发者ID:janelia-flyem,项目名称:neuroglancer,代码行数:47,代码来源:backend.ts

示例4: parsePositionString

export function parsePositionString(s: string): vec3|undefined {
  const match = s.match(
      /^[\[\]{}()\s,]*(\d+(?:\.\d+)?)[,\s]+(\d+(?:\.\d+)?)[,\s]+(\d+(?:\.\d+)?)[\[\]{}()\s,]*$/);
  if (match !== null) {
    return vec3.fromValues(parseFloat(match[1]), parseFloat(match[2]), parseFloat(match[3]));
  }
  return undefined;
}
开发者ID:google,项目名称:neuroglancer,代码行数:8,代码来源:default_clipboard_handling.ts

示例5: it

  it('getMultiscaleChunksToDraw simple', () => {
    const manifest: MultiscaleMeshManifest = {
      chunkShape: vec3.fromValues(10, 20, 30),
      chunkGridSpatialOrigin: vec3.fromValues(5, 6, -50),
      clipLowerBound: vec3.fromValues(20, 23, -50),
      clipUpperBound: vec3.fromValues(40, 45, -20),
      lodScales: [20, 40],
      chunkCoordinates: Uint32Array.from([
        0, 0, 0,  //
      ]),
    };
    const viewportWidth = 640;
    const viewportHeight = 480;
    const modelViewProjection =
        mat4.perspective(mat4.create(), Math.PI / 2, viewportWidth / viewportHeight, 5, 100);
    expect(getChunkList(
               manifest, modelViewProjection, /*detailCutoff=*/ 1000, viewportWidth, viewportHeight))
        .toEqual([{
          lod: 1,
          renderScale: 960,
          beginIndex: 0,
          endIndex: 1,
        }]);

    expect(getChunkList(
               manifest, modelViewProjection, /*detailCutoff=*/ 800, viewportWidth, viewportHeight))
        .toEqual([
          {
            lod: 1,
            renderScale: 960,
            beginIndex: 0,
            endIndex: 1,
          },
          {
            lod: 0,
            renderScale: 480,
            beginIndex: 0,
            endIndex: 1,
          }
        ]);
  });
开发者ID:google,项目名称:neuroglancer,代码行数:41,代码来源:multiscale.spec.ts

示例6: it

 it('works on basic cases', () => {
   expect(parsePositionString('10 2 3')).toEqual(vec3.fromValues(10, 2, 3));
   expect(parsePositionString('[1 2 3')).toEqual(vec3.fromValues(1, 2, 3));
   expect(parsePositionString('[1, 2, 3,')).toEqual(vec3.fromValues(1, 2, 3));
   expect(parsePositionString('[1, 2, 3]')).toEqual(vec3.fromValues(1, 2, 3));
   expect(parsePositionString('1.2 2.4 3')).toEqual(vec3.fromValues(1.2, 2.4, 3));
   expect(parsePositionString('{200, 400, 500}')).toEqual(vec3.fromValues(200, 400, 500));
 });
开发者ID:google,项目名称:neuroglancer,代码行数:8,代码来源:default_clipboard_handling.spec.ts

示例7: it

  it('getNearIsotropicBlockSize', () => {
    expect(
        getNearIsotropicBlockSize({voxelSize: vec3.fromValues(1, 1, 1), maxVoxelsPerChunkLog2: 18}))
        .toEqual(vec3.fromValues(64, 64, 64));

    expect(
        getNearIsotropicBlockSize({voxelSize: vec3.fromValues(2, 1, 1), maxVoxelsPerChunkLog2: 17}))
        .toEqual(vec3.fromValues(32, 64, 64));

    expect(
        getNearIsotropicBlockSize({voxelSize: vec3.fromValues(3, 3, 30), maxVoxelsPerChunkLog2: 9}))
        .toEqual(vec3.fromValues(16, 16, 2));

    expect(getNearIsotropicBlockSize({
      voxelSize: vec3.fromValues(3, 3, 30),
      upperVoxelBound: vec3.fromValues(1, 128, 128),
      maxVoxelsPerChunkLog2: 8
    })).toEqual(vec3.fromValues(1, 64, 4));
  });
开发者ID:google,项目名称:neuroglancer,代码行数:19,代码来源:base.spec.ts

示例8: describe

  describe('encodeChannels', () => {

    it('basic 1-channel 1-block', () => {
      const blockSize = [2, 2, 1];
      const input = Uint32Array.of(
          4, 4, 4, 4  //
      );
      const volumeSize = [2, 2, 1, 1];
      const output = new Uint32ArrayBuilder();
      encodeChannels(output, blockSize, input, volumeSize);
      expect(output.view)
          .toEqual(Uint32Array.of(
              1,       //
              2, 2, 4  //
          ));
    });

    for (let blockSize of [vec3.fromValues(2, 2, 2), vec3.fromValues(8, 4, 1), ]) {
      for (let volumeSize of [  //
               [1, 2, 1, 1],    //
               [1, 2, 1, 3],    //
               [2, 2, 2, 1],    //
               [2, 2, 2, 3],    //
               [4, 4, 5, 3],    //
      ]) {
        it(`round trip ${volumeSize.join(',')} with blockSize ${vec3Key(blockSize)}`, () => {
          const numPossibleValues = 15;
          const input = makeRandomUint32Array(prod4(volumeSize), numPossibleValues);
          const output = new Uint32ArrayBuilder();
          encodeChannels(output, blockSize, input, volumeSize);
          const decoded = new Uint32Array(input.length);
          decodeChannels(decoded, output.view, 0, volumeSize, blockSize);
          expect(decoded).toEqual(input);
        });
      }
    }
  });
开发者ID:google,项目名称:neuroglancer,代码行数:37,代码来源:encode_uint32.spec.ts

示例9: getSources

  getSources(volumeSourceOptions: VolumeSourceOptions) {
    let sources: VolumeChunkSource[][] = [];

    let numLevels = this.numLevels; 
    if (numLevels === undefined) {
      numLevels = computeStackHierarchy(this.stackInfo, this.dims[0]);
    }

    for (let level = 0; level < numLevels; level++) {
      let voxelSize = vec3.clone(this.stackInfo.voxelResolution);
      let chunkDataSize = vec3.fromValues(1, 1, 1);
      // tiles are NxMx1
      for (let i = 0; i < 2; ++i) {
        voxelSize[i] = voxelSize[i] * Math.pow(2, level);
        chunkDataSize[i] = this.dims[i];
      }

      let lowerVoxelBound = vec3.create(), upperVoxelBound = vec3.create();

      for (let i = 0; i < 3; i++) {
        lowerVoxelBound[i] = Math.floor(
            this.stackInfo.lowerVoxelBound[i] * (this.stackInfo.voxelResolution[i] / voxelSize[i]));
        upperVoxelBound[i] = Math.ceil(
            this.stackInfo.upperVoxelBound[i] * (this.stackInfo.voxelResolution[i] / voxelSize[i]));
      }

      let spec = VolumeChunkSpecification.make({
        voxelSize,
        chunkDataSize,
        numChannels: this.numChannels,
        dataType: this.dataType, lowerVoxelBound, upperVoxelBound, volumeSourceOptions,
      });

      let source = TileChunkSource.get(this.chunkManager, spec, {
        'baseUrls': this.baseUrls,
        'owner': this.ownerInfo.owner,
        'project': this.stackInfo.project,
        'stack': this.stack,
        'encoding': this.encoding,
        'level': level,
        'dims': `${this.dims[0]}_${this.dims[1]}`,
      });

      sources.push([source]);
    }
    return sources;
  }
开发者ID:janelia-flyem,项目名称:neuroglancer,代码行数:47,代码来源:frontend.ts

示例10: decodeChunk

function decodeChunk(chunk: VolumeChunk, response: ArrayBuffer, encoding: VolumeChunkEncoding) {
  const dv = new DataView(response);
  const mode = dv.getUint16(0, /*littleEndian=*/ false);
  if (mode !== 0) {
    throw new Error(`Unsupported mode: ${mode}.`);
  }
  const numDimensions = dv.getUint16(2, /*littleEndian=*/ false);
  if (numDimensions !== 3) {
    throw new Error(`Number of dimensions must be 3.`);
  }
  let offset = 4;
  const shape = new Uint32Array(numDimensions);
  for (let i = 0; i < numDimensions; ++i) {
    shape[i] = dv.getUint32(offset, /*littleEndian=*/ false);
    offset += 4;
  }
  chunk.chunkDataSize = vec3.fromValues(shape[0], shape[1], shape[2]);
  let buffer = new Uint8Array(response, offset);
  if (encoding === VolumeChunkEncoding.GZIP) {
    buffer = inflate(buffer);
  }
  decodeRawChunk(chunk, buffer.buffer, Endianness.BIG, buffer.byteOffset, buffer.byteLength);
}
开发者ID:google,项目名称:neuroglancer,代码行数:23,代码来源:backend.ts


注:本文中的neuroglancer/util/geom.vec3.fromValues方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。