本文整理匯總了TypeScript中deeplearn.tidy函數的典型用法代碼示例。如果您正苦於以下問題:TypeScript tidy函數的具體用法?TypeScript tidy怎麽用?TypeScript tidy使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了tidy函數的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的TypeScript代碼示例。
示例1: predict
export function predict(x: dl.Tensor2D): number[] {
const pred = dl.tidy(() => {
const axis = 1;
return model(x).argMax(axis);
});
return Array.from(pred.dataSync());
}
示例2: async
xhr.onload = async () => {
const data = JSON.parse(xhr.responseText) as SampleData;
// Wrap everything in a dl.tidy so we clean up intermediate Tensors.
dl.tidy(async () => {
console.log(`Evaluation set: n=${data.images.length}.`);
let numCorrect = 0;
for (let i = 0; i < data.images.length; i++) {
const x = dl.tensor1d(data.images[i]);
// Infer through the model to get a prediction.
const predictedLabel = Math.round(await infer(x, vars).val());
console.log(`Item ${i}, predicted label ${predictedLabel}.`);
// Aggregate correctness to show accuracy.
const label = data.labels[i];
if (label === predictedLabel) {
numCorrect++;
}
// Show the image.
const result =
renderResults(dl.tensor1d(data.images[i]), label, predictedLabel);
document.body.appendChild(result);
}
// Compute final accuracy.
const accuracy = numCorrect * 100 / data.images.length;
document.getElementById('accuracy').innerHTML = `${accuracy}%`;
});
};
示例3: runInference
async runInference() {
await dl.tidy(async () => {
const preprocessed = dl.fromPixels(this.contentImgElement);
const inferenceResult = await this.transformNet.predict(preprocessed);
this.setCanvasShape(inferenceResult.shape);
renderToCanvas(inferenceResult, this.canvas);
});
}
示例4: run
async run(size: number, option: string): Promise<number> {
dl.setBackend('cpu');
const input: dl.Tensor2D = dl.randomUniform([size, size], -1, 1);
const op = getReductionOp(option);
const start = performance.now();
dl.tidy(() => {
op(input).get();
});
const end = performance.now();
return end - start;
}
示例5: run
async run(size: number, option: string): Promise<number> {
const safeMode = false;
const math = new dl.NDArrayMath('cpu', safeMode);
dl.ENV.setMath(math);
const input: dl.Tensor2D = dl.randomUniform([size, size], -1, 1);
const op = getReductionOp(option);
const start = performance.now();
dl.tidy(() => {
op(input).get();
});
math.dispose();
const end = performance.now();
return end - start;
}
示例6: getConditioning
function getConditioning(): dl.Tensor1D {
return dl.tidy(() => {
if (!conditioned) {
// TODO(nsthorat): figure out why we have to cast these shapes to numbers.
// The linter is complaining, though VSCode can infer the types.
const size = 1 + (noteDensityEncoding.shape[0] as number) +
(pitchHistogramEncoding.shape[0] as number);
const conditioning: dl.Tensor1D =
dl.oneHot(dl.tensor1d([0]), size).as1D();
return conditioning;
} else {
const axis = 0;
const conditioningValues =
noteDensityEncoding.concat(pitchHistogramEncoding, axis);
return dl.tensor1d([0]).concat(conditioningValues, axis);
}
});
}
示例7: predict
/**
* Infer through TransformNet, assumes variables have been loaded.
* Original Tensorflow version of model can be found at
* https://github.com/lengstrom/fast-style-transfer
*
* @param preprocessedInput preprocessed input Array.
* @return dl.Tensor3D containing pixels of output img
*/
predict(preprocessedInput: dl.Tensor3D): dl.Tensor3D {
const img = dl.tidy(() => {
const conv1 = this.convLayer(preprocessedInput.toFloat(), 1, true, 0);
const conv2 = this.convLayer(conv1, 2, true, 3);
const conv3 = this.convLayer(conv2, 2, true, 6);
const resid1 = this.residualBlock(conv3, 9);
const resid2 = this.residualBlock(resid1, 15);
const resid3 = this.residualBlock(resid2, 21);
const resid4 = this.residualBlock(resid3, 27);
const resid5 = this.residualBlock(resid4, 33);
const convT1 = this.convTransposeLayer(resid5, 64, 2, 39);
const convT2 = this.convTransposeLayer(convT1, 32, 2, 42);
const convT3 = this.convLayer(convT2, 1, false, 45);
return convT3.tanh()
.mul(this.timesScalar)
.add(this.plusScalar)
.clip(0, 255)
.div(dl.scalar(255)) as dl.Tensor3D;
});
return img;
}
示例8: intro
// This file parallels (some of) the code in the introduction tutorial.
/**
* 'Math with WebGL backend' section of tutorial
*/
async function intro() {
const a = dl.tensor2d([1.0, 2.0, 3.0, 4.0], [2, 2]);
const b = dl.tensor2d([0.0, 2.0, 4.0, 6.0], [2, 2]);
const size = dl.scalar(a.size);
// Non-blocking math calls.
const average = a.sub(b).square().sum().div(size);
console.log(`mean squared difference: ${await average.val()}`);
/**
* 'Graphs and Tensors' section of tutorial
*/
const g = new dl.Graph();
// Placeholders are input containers. This is the container for where we
// will feed an input Tensor when we execute the graph.
const inputShape = [3];
const inputTensor = g.placeholder('input', inputShape);
const labelShape = [1];
const labelTensor = g.placeholder('label', labelShape);
// Variables are containers that hold a value that can be updated from
// training.
// Here we initialize the multiplier variable randomly.
const multiplier = g.variable('multiplier', dl.randomNormal([1, 3]));
// Top level graph methods take Tensors and return Tensors.
const outputTensor = g.matmul(multiplier, inputTensor);
const costTensor = g.meanSquaredCost(labelTensor, outputTensor);
// Tensors, like Tensors, have a shape attribute.
console.log(outputTensor.shape);
/**
* 'dl.Session and dl.FeedEntry' section of the tutorial.
*/
const learningRate = .00001;
const batchSize = 3;
const session = new dl.Session(g, dl.ENV.math);
const optimizer = dl.train.sgd(learningRate);
const inputs: dl.Tensor1D[] = [
dl.tensor1d([1.0, 2.0, 3.0]), dl.tensor1d([10.0, 20.0, 30.0]),
dl.tensor1d([100.0, 200.0, 300.0])
];
const labels: dl.Tensor1D[] =
[dl.tensor1d([4.0]), dl.tensor1d([40.0]), dl.tensor1d([400.0])];
// Shuffles inputs and labels and keeps them mutually in sync.
const shuffledInputProviderBuilder =
new dl.InCPUMemoryShuffledInputProviderBuilder([inputs, labels]);
const [inputProvider, labelProvider] =
shuffledInputProviderBuilder.getInputProviders();
// Maps tensors to InputProviders.
const feedEntries: dl.FeedEntry[] = [
{tensor: inputTensor, data: inputProvider},
{tensor: labelTensor, data: labelProvider}
];
const NUM_BATCHES = 10;
for (let i = 0; i < NUM_BATCHES; i++) {
// Wrap session.train in a scope so the cost gets cleaned up
// automatically.
await dl.tidy(async () => {
// Train takes a cost tensor to minimize. Trains one batch. Returns the
// average cost as a dl.Scalar.
const cost = session.train(
costTensor, feedEntries, batchSize, optimizer, dl.CostReduction.MEAN);
console.log(`last average cost (${i}): ${await cost.val()}`);
});
}
const testInput = dl.tensor1d([0.1, 0.2, 0.3]);
// session.eval can take Tensors as input data.
const testFeedEntries: dl.FeedEntry[] =
[{tensor: inputTensor, data: testInput}];
const testOutput = session.eval(outputTensor, testFeedEntries);
console.log('---inference output---');
console.log(`shape: ${testOutput.shape}`);
console.log(`value: ${await testOutput.val(0)}`);
}
示例9: async
export const learnXOR = async () => {
const iterations = getRandomIntegerInRange(800, 1000);
const timeStart: number = performance.now();
let loss: number;
let cost: dl.Scalar;
const graph = new dl.Graph();
const input = graph.placeholder('input', [2]);
const y = graph.placeholder('y', [1]);
const hiddenLayer = graph.layers.dense(
'hiddenLayer', input, 10, (x: dl.SymbolicTensor) => graph.relu(x), true);
const output = graph.layers.dense(
'outputLayer', hiddenLayer, 1, (x: dl.SymbolicTensor) => graph.sigmoid(x),
true);
const costTensor = graph.reduceSum(graph.add(
graph.multiply(
graph.constant([-1]),
graph.multiply(
y, graph.log(graph.add(output, graph.constant([EPSILON]))))),
graph.multiply(
graph.constant([-1]),
graph.multiply(
graph.subtract(graph.constant([1]), y),
graph.log(graph.add(
graph.subtract(graph.constant([1]), output),
graph.constant([EPSILON])))))));
const session = new dl.Session(graph, dl.ENV.math);
const optimizer = new dl.SGDOptimizer(0.2);
const inputArray = [
dl.tensor1d([0, 0]), dl.tensor1d([0, 1]), dl.tensor1d([1, 0]),
dl.tensor1d([1, 1])
];
const targetArray =
[dl.tensor1d([0]), dl.tensor1d([1]), dl.tensor1d([1]), dl.tensor1d([0])];
const shuffledInputProviderBuilder =
new dl.InCPUMemoryShuffledInputProviderBuilder([inputArray, targetArray]);
const [inputProvider, targetProvider] =
shuffledInputProviderBuilder.getInputProviders();
const feedEntries =
[{tensor: input, data: inputProvider}, {tensor: y, data: targetProvider}];
/**
* Train the model
*/
await dl.tidy(async () => {
for (let i = 0; i < iterations; i += 1) {
cost = session.train(
costTensor, feedEntries, 4, optimizer, dl.CostReduction.MEAN);
}
loss = await cost.val();
});
const result = [];
/**
* Test the model
*/
for (let i = 0; i < 4; i += 1) {
const inputData = inputArray[i];
const expectedOutput = targetArray[i];
const val = session.eval(output, [{tensor: input, data: inputData}]);
result.push({
input: await inputData.data(),
expected: await expectedOutput.data(),
output: await val.data()
});
}
const timeEnd: number = performance.now();
const time = timeEnd - timeStart;
return {iterations, loss, time, result};
};
示例10: generateStep
async function generateStep(loopId: number) {
if (loopId < currentLoopId) {
// Was part of an outdated generateStep() scheduled via setTimeout.
return;
}
await dl.tidy(async () => {
const lstm1 = (data: dl.Tensor2D, c: dl.Tensor2D, h: dl.Tensor2D) =>
dl.basicLSTMCell(forgetBias, lstmKernel1, lstmBias1, data, c, h);
const lstm2 = (data: dl.Tensor2D, c: dl.Tensor2D, h: dl.Tensor2D) =>
dl.basicLSTMCell(forgetBias, lstmKernel2, lstmBias2, data, c, h);
const lstm3 = (data: dl.Tensor2D, c: dl.Tensor2D, h: dl.Tensor2D) =>
dl.basicLSTMCell(forgetBias, lstmKernel3, lstmBias3, data, c, h);
const outputs: dl.Scalar[] = [];
// Generate some notes.
for (let i = 0; i < STEPS_PER_GENERATE_CALL; i++) {
// Use last sampled output as the next input.
const eventInput = dl.oneHot(lastSample.as1D(), EVENT_SIZE).as1D();
// Dispose the last sample from the previous generate call, since we
// kept it.
if (i === 0) {
lastSample.dispose();
}
const conditioning = getConditioning();
const axis = 0;
const input = conditioning.concat(eventInput, axis);
const output =
dl.multiRNNCell([lstm1, lstm2, lstm3], input.as2D(1, -1), c, h);
c = output[0];
h = output[1];
const outputH = h[2];
const logits = outputH.matMul(fcW).add(fcB);
const softmax = logits.as1D().softmax();
const sampledOutput = dl.multinomial(softmax, 1).asScalar();
outputs.push(sampledOutput);
dl.keep(sampledOutput);
lastSample = sampledOutput;
}
c.forEach(val => dl.keep(val));
h.forEach(val => dl.keep(val));
await outputs[outputs.length - 1].data();
for (let i = 0; i < outputs.length; i++) {
playOutput(await outputs[i].val());
}
if (piano.now() - currentPianoTimeSec > MAX_GENERATION_LAG_SECONDS) {
console.warn(
`Generation is ${
piano.now() - currentPianoTimeSec} seconds behind, ` +
`which is over ${MAX_NOTE_DURATION_SECONDS}. Resetting time!`);
currentPianoTimeSec = piano.now();
}
const delta = Math.max(
0, currentPianoTimeSec - piano.now() - GENERATION_BUFFER_SECONDS);
setTimeout(() => generateStep(loopId), delta * 1000);
});
}