本文整理汇总了TypeScript中@tensorflow/tfjs-core.tensor1d函数的典型用法代码示例。如果您正苦于以下问题:TypeScript tensor1d函数的具体用法?TypeScript tensor1d怎么用?TypeScript tensor1d使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了tensor1d函数的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的TypeScript代码示例。
示例1: extractScaleLayerParams
function extractScaleLayerParams(numWeights: number): ScaleLayerParams {
const weights = tf.tensor1d(extractWeights(numWeights))
const biases = tf.tensor1d(extractWeights(numWeights))
return {
weights,
biases
}
}
示例2: it
it('should close the tensorArray', async () => {
const tensorArray =
new TensorArray('', 'int32', 5, [3], true, false, true);
const input4 = tensor1d([0, 0, 0], 'int32');
const input5 = tensor1d([1, 1, 1], 'int32');
tensorArray.writeMany([0, 1], [input4, input5]);
context.addTensorArray(tensorArray);
node.op = 'tensorArrayClose';
node.params['tensorArrayId'] = createNumberAttrFromIndex(0);
node.inputNames = ['input2'];
const input2 = [scalar(tensorArray.id)];
await executeOp(node, {input2}, context);
expect(tensorArray.closed).toBeTruthy();
});
示例3: extractDepthwiseConvParams
function extractDepthwiseConvParams(numChannels: number): MobileNetV1.DepthwiseConvParams {
const filters = tf.tensor4d(extractWeights(3 * 3 * numChannels), [3, 3, numChannels, 1])
const batch_norm_scale = tf.tensor1d(extractWeights(numChannels))
const batch_norm_offset = tf.tensor1d(extractWeights(numChannels))
const batch_norm_mean = tf.tensor1d(extractWeights(numChannels))
const batch_norm_variance = tf.tensor1d(extractWeights(numChannels))
return {
filters,
batch_norm_scale,
batch_norm_offset,
batch_norm_mean,
batch_norm_variance
}
}
示例4: describe
describe('evaluation', () => {
let node: Node;
const input1 = [tfc.tensor1d([1])];
const input2 = [tfc.scalar(1)];
const context = new ExecutionContext({}, {});
beforeEach(() => {
node = {
name: 'input1',
op: '',
category: 'evaluation',
inputNames: ['input1', 'input2'],
inputs: [],
params: {},
children: []
};
});
describe('executeOp', () => {
describe('topK', () => {
it('should return input', () => {
node.op = 'topK';
node.params['x'] = createTensorAttr(0);
node.params['k'] = createNumberAttrFromIndex(1);
node.params['sorted'] = createBoolAttr(true);
spyOn(tfc, 'topk').and.callThrough();
executeOp(node, {input1, input2}, context);
expect(tfc.topk).toHaveBeenCalledWith(input1[0], 1, true);
});
});
});
});
示例5: extractFcParams
function extractFcParams(channelsIn: number, channelsOut: number,): FCParams {
const fc_weights = tf.tensor2d(extractWeights(channelsIn * channelsOut), [channelsIn, channelsOut])
const fc_bias = tf.tensor1d(extractWeights(channelsOut))
return {
weights: fc_weights,
bias: fc_bias
}
}
示例6: it
it('should throw exception if inputs dtype do not match graph', () => {
inputNode.params['dtype'] = {value: 'int32', type: 'dtype'};
const inputTensor = tfc.tensor1d([1], 'float32');
expect(() => executor.execute({input: [inputTensor]}))
.toThrow(new Error(
'The dtype of dict[\'input\'] provided' +
' in model.execute(dict) must be int32, but was float32'));
});
示例7: it
it('should return input', () => {
node.op = 'nonMaxSuppression';
node.params['boxes'] = createTensorAttr(0);
node.params['scores'] = createTensorAttr(1);
node.params['maxOutputSize'] = createTensorAttr(2);
node.params['iouThreshold'] = createTensorAttr(3);
node.params['scoreThreshold'] = createNumberAttr(1);
node.inputNames = ['input1', 'input2', 'input3', 'input4'];
const input2 = [tfc.tensor1d([1])];
const input3 = [tfc.tensor1d([1])];
const input4 = [tfc.tensor1d([1])];
spyOn(tfc.image, 'nonMaxSuppressionAsync').and.callThrough();
const result =
executeOp(node, {input1, input2, input3, input4}, context);
expect(tfc.image.nonMaxSuppressionAsync)
.toHaveBeenCalledWith(
input1[0], input2[0], input3[0], input4[0], 1);
expect(result instanceof Promise).toBeTruthy();
});
示例8: switch
export let executeOp: OpExecutor = (node: Node, tensorMap: NamedTensorsMap,
context: ExecutionContext):
tfc.Tensor[] => {
switch (node.op) {
case 'const': {
return tensorMap[node.name];
}
case 'placeholder':
const def =
getParamValue('default', node, tensorMap, context) as tfc.Tensor;
return [getTensor(node.name, tensorMap, context) || def];
case 'identity':
case 'stopGradient':
case 'fakeQuantWithMinMaxVars': // This op is currently ignored.
return [getParamValue('x', node, tensorMap, context) as tfc.Tensor];
case 'snapshot':
const snapshot =
(getParamValue('x', node, tensorMap, context) as tfc.Tensor);
return [snapshot.clone()];
case 'shape':
return [tfc.tensor1d(
(getParamValue('x', node, tensorMap, context) as tfc.Tensor).shape,
'int32')];
case 'size':
return [tfc.scalar(
(getParamValue('x', node, tensorMap, context) as tfc.Tensor).size,
'int32')];
case 'rank':
return [tfc.scalar(
(getParamValue('x', node, tensorMap, context) as tfc.Tensor).rank,
'int32')];
case 'noop':
return [];
case 'print':
const input = getParamValue('x', node, tensorMap, context) as tfc.Tensor;
const data =
getParamValue('data', node, tensorMap, context) as tfc.Tensor[];
const message =
getParamValue('message', node, tensorMap, context) as string;
const summarize =
getParamValue('summarize', node, tensorMap, context) as number;
console.warn(
'The graph has a tf.print() operation,' +
'usually used for debugging, which slows down performance.');
console.log(message);
for (let i = 0; i < data.length; i++) {
console.log(
Array.prototype.slice.call(data[0].dataSync()).slice(0, summarize));
}
return [input];
default:
throw TypeError(`Node type ${node.op} is not implemented`);
}
};