本文整理匯總了TypeScript中deeplearn.InCPUMemoryShuffledInputProviderBuilder類的典型用法代碼示例。如果您正苦於以下問題:TypeScript InCPUMemoryShuffledInputProviderBuilder類的具體用法?TypeScript InCPUMemoryShuffledInputProviderBuilder怎麽用?TypeScript InCPUMemoryShuffledInputProviderBuilder使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了InCPUMemoryShuffledInputProviderBuilder類的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的TypeScript代碼示例。
示例1:
await dl.tidy(async () => {
/**
* Inference
*/
// Now we ask the dl.Graph to evaluate (infer) and give us the result when
// providing a value 4 for "x".
// NOTE: "a", "b", and "c" are randomly initialized, so this will give us
// something random.
let result = session.eval(y, [{tensor: x, data: dl.scalar(4)}]);
console.log(await result.data());
/**
* Training
*/
// Now let's learn the coefficients of this quadratic given some data.
// To do this, we need to provide examples of x and y.
// The values given here are for values a = 3, b = 2, c = 1, with random
// noise added to the output so it's not a perfect fit.
const xs = [dl.scalar(0), dl.scalar(1), dl.scalar(2), dl.scalar(3)];
const ys =
[dl.scalar(1.1), dl.scalar(5.9), dl.scalar(16.8), dl.scalar(33.9)];
// When training, it's important to shuffle your data!
const shuffledInputProviderBuilder =
new dl.InCPUMemoryShuffledInputProviderBuilder([xs, ys]);
const [xProvider, yProvider] =
shuffledInputProviderBuilder.getInputProviders();
// Training is broken up into batches.
const NUM_BATCHES = 20;
const BATCH_SIZE = xs.length;
// Before we start training, we need to provide an optimizer. This is the
// object that is responsible for updating weights. The learning rate
// param is a value that represents how large of a step to make when
// updating weights. If this is too big, you may overstep and oscillate.
// If it is too small, the model may take a long time to train.
const LEARNING_RATE = .01;
const optimizer = dl.train.sgd(LEARNING_RATE);
for (let i = 0; i < NUM_BATCHES; i++) {
// Train takes a cost dl.Tensor to minimize; this call trains one batch
// and returns the average cost of the batch as a dl.Scalar.
const costValue = session.train(
cost,
// Map input providers to Tensors on the dl.Graph.
[{tensor: x, data: xProvider}, {tensor: yLabel, data: yProvider}],
BATCH_SIZE, optimizer, dl.CostReduction.MEAN);
console.log(`average cost: ${await costValue.data()}`);
}
// Now print the value from the trained model for x = 4, should be ~57.0.
result = session.eval(y, [{tensor: x, data: dl.scalar(4)}]);
console.log('result should be ~57.0:');
console.log(await result.data());
});
示例2: startTraining
private startTraining() {
const trainingData = this.getTrainingData();
const testData = this.getTestData();
// Recreate optimizer with the selected optimizer and hyperparameters.
this.optimizer = this.createOptimizer();
if (this.isValid && (trainingData != null) && (testData != null)) {
this.recreateCharts();
this.graphRunner.resetStatistics();
const trainingShuffledInputProviderGenerator =
new dl.InCPUMemoryShuffledInputProviderBuilder(trainingData);
const [trainInputProvider, trainLabelProvider] =
trainingShuffledInputProviderGenerator.getInputProviders();
const trainFeeds = [
{tensor: this.xTensor, data: trainInputProvider},
{tensor: this.labelTensor, data: trainLabelProvider}
];
const accuracyShuffledInputProviderGenerator =
new dl.InCPUMemoryShuffledInputProviderBuilder(testData);
const [accuracyInputProvider, accuracyLabelProvider] =
accuracyShuffledInputProviderGenerator.getInputProviders();
const accuracyFeeds = [
{tensor: this.xTensor, data: accuracyInputProvider},
{tensor: this.labelTensor, data: accuracyLabelProvider}
];
this.graphRunner.train(
this.costTensor, trainFeeds, this.batchSize, this.optimizer,
undefined /** numBatches */, this.accuracyTensor, accuracyFeeds,
this.batchSize, dl.MetricReduction.MEAN, EVAL_INTERVAL_MS,
COST_INTERVAL_MS);
this.showTrainStats = true;
this.applicationState = ApplicationState.TRAINING;
}
}
示例3: startInference
private startInference() {
const testData = this.getTestData();
if (testData == null) {
// Dataset not ready yet.
return;
}
if (this.isValid && (testData != null)) {
const inferenceShuffledInputProviderGenerator =
new dl.InCPUMemoryShuffledInputProviderBuilder(testData);
const [inferenceInputProvider, inferenceLabelProvider] =
inferenceShuffledInputProviderGenerator.getInputProviders();
const inferenceFeeds = [
{tensor: this.xTensor, data: inferenceInputProvider},
{tensor: this.labelTensor, data: inferenceLabelProvider}
];
this.graphRunner.infer(
this.predictionTensor, inferenceFeeds, INFERENCE_EXAMPLE_INTERVAL_MS,
INFERENCE_EXAMPLE_COUNT);
}
}
示例4: intro
// This file parallels (some of) the code in the introduction tutorial.
/**
* 'Math with WebGL backend' section of tutorial
*/
async function intro() {
const a = dl.tensor2d([1.0, 2.0, 3.0, 4.0], [2, 2]);
const b = dl.tensor2d([0.0, 2.0, 4.0, 6.0], [2, 2]);
const size = dl.scalar(a.size);
// Non-blocking math calls.
const average = a.sub(b).square().sum().div(size);
console.log(`mean squared difference: ${await average.val()}`);
/**
* 'Graphs and Tensors' section of tutorial
*/
const g = new dl.Graph();
// Placeholders are input containers. This is the container for where we
// will feed an input Tensor when we execute the graph.
const inputShape = [3];
const inputTensor = g.placeholder('input', inputShape);
const labelShape = [1];
const labelTensor = g.placeholder('label', labelShape);
// Variables are containers that hold a value that can be updated from
// training.
// Here we initialize the multiplier variable randomly.
const multiplier = g.variable('multiplier', dl.randomNormal([1, 3]));
// Top level graph methods take Tensors and return Tensors.
const outputTensor = g.matmul(multiplier, inputTensor);
const costTensor = g.meanSquaredCost(labelTensor, outputTensor);
// Tensors, like Tensors, have a shape attribute.
console.log(outputTensor.shape);
/**
* 'dl.Session and dl.FeedEntry' section of the tutorial.
*/
const learningRate = .00001;
const batchSize = 3;
const session = new dl.Session(g, dl.ENV.math);
const optimizer = dl.train.sgd(learningRate);
const inputs: dl.Tensor1D[] = [
dl.tensor1d([1.0, 2.0, 3.0]), dl.tensor1d([10.0, 20.0, 30.0]),
dl.tensor1d([100.0, 200.0, 300.0])
];
const labels: dl.Tensor1D[] =
[dl.tensor1d([4.0]), dl.tensor1d([40.0]), dl.tensor1d([400.0])];
// Shuffles inputs and labels and keeps them mutually in sync.
const shuffledInputProviderBuilder =
new dl.InCPUMemoryShuffledInputProviderBuilder([inputs, labels]);
const [inputProvider, labelProvider] =
shuffledInputProviderBuilder.getInputProviders();
// Maps tensors to InputProviders.
const feedEntries: dl.FeedEntry[] = [
{tensor: inputTensor, data: inputProvider},
{tensor: labelTensor, data: labelProvider}
];
const NUM_BATCHES = 10;
for (let i = 0; i < NUM_BATCHES; i++) {
// Wrap session.train in a scope so the cost gets cleaned up
// automatically.
await dl.tidy(async () => {
// Train takes a cost tensor to minimize. Trains one batch. Returns the
// average cost as a dl.Scalar.
const cost = session.train(
costTensor, feedEntries, batchSize, optimizer, dl.CostReduction.MEAN);
console.log(`last average cost (${i}): ${await cost.val()}`);
});
}
const testInput = dl.tensor1d([0.1, 0.2, 0.3]);
// session.eval can take Tensors as input data.
const testFeedEntries: dl.FeedEntry[] =
[{tensor: inputTensor, data: testInput}];
const testOutput = session.eval(outputTensor, testFeedEntries);
console.log('---inference output---');
console.log(`shape: ${testOutput.shape}`);
console.log(`value: ${await testOutput.val(0)}`);
}
示例5: async
export const learnXOR = async () => {
const iterations = getRandomIntegerInRange(800, 1000);
const timeStart: number = performance.now();
let loss: number;
let cost: dl.Scalar;
const graph = new dl.Graph();
const input = graph.placeholder('input', [2]);
const y = graph.placeholder('y', [1]);
const hiddenLayer = graph.layers.dense(
'hiddenLayer', input, 10, (x: dl.SymbolicTensor) => graph.relu(x), true);
const output = graph.layers.dense(
'outputLayer', hiddenLayer, 1, (x: dl.SymbolicTensor) => graph.sigmoid(x),
true);
const costTensor = graph.reduceSum(graph.add(
graph.multiply(
graph.constant([-1]),
graph.multiply(
y, graph.log(graph.add(output, graph.constant([EPSILON]))))),
graph.multiply(
graph.constant([-1]),
graph.multiply(
graph.subtract(graph.constant([1]), y),
graph.log(graph.add(
graph.subtract(graph.constant([1]), output),
graph.constant([EPSILON])))))));
const session = new dl.Session(graph, dl.ENV.math);
const optimizer = new dl.SGDOptimizer(0.2);
const inputArray = [
dl.tensor1d([0, 0]), dl.tensor1d([0, 1]), dl.tensor1d([1, 0]),
dl.tensor1d([1, 1])
];
const targetArray =
[dl.tensor1d([0]), dl.tensor1d([1]), dl.tensor1d([1]), dl.tensor1d([0])];
const shuffledInputProviderBuilder =
new dl.InCPUMemoryShuffledInputProviderBuilder([inputArray, targetArray]);
const [inputProvider, targetProvider] =
shuffledInputProviderBuilder.getInputProviders();
const feedEntries =
[{tensor: input, data: inputProvider}, {tensor: y, data: targetProvider}];
/**
* Train the model
*/
await dl.tidy(async () => {
for (let i = 0; i < iterations; i += 1) {
cost = session.train(
costTensor, feedEntries, 4, optimizer, dl.CostReduction.MEAN);
}
loss = await cost.val();
});
const result = [];
/**
* Test the model
*/
for (let i = 0; i < 4; i += 1) {
const inputData = inputArray[i];
const expectedOutput = targetArray[i];
const val = session.eval(output, [{tensor: input, data: inputData}]);
result.push({
input: await inputData.data(),
expected: await expectedOutput.data(),
output: await val.data()
});
}
const timeEnd: number = performance.now();
const time = timeEnd - timeStart;
return {iterations, loss, time, result};
};