当前位置: 首页>>代码示例>>TypeScript>>正文


TypeScript InCPUMemoryShuffledInputProviderBuilder.getInputProviders方法代码示例

本文整理汇总了TypeScript中deeplearn.InCPUMemoryShuffledInputProviderBuilder.getInputProviders方法的典型用法代码示例。如果您正苦于以下问题:TypeScript InCPUMemoryShuffledInputProviderBuilder.getInputProviders方法的具体用法?TypeScript InCPUMemoryShuffledInputProviderBuilder.getInputProviders怎么用?TypeScript InCPUMemoryShuffledInputProviderBuilder.getInputProviders使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在deeplearn.InCPUMemoryShuffledInputProviderBuilder的用法示例。


在下文中一共展示了InCPUMemoryShuffledInputProviderBuilder.getInputProviders方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的TypeScript代码示例。

示例1:

    await dl.tidy(async () => {
      /**
       * Inference
       */
      // Now we ask the dl.Graph to evaluate (infer) and give us the result when
      // providing a value 4 for "x".
      // NOTE: "a", "b", and "c" are randomly initialized, so this will give us
      // something random.
      let result = session.eval(y, [{tensor: x, data: dl.scalar(4)}]);
      console.log(await result.data());

      /**
       * Training
       */
      // Now let's learn the coefficients of this quadratic given some data.
      // To do this, we need to provide examples of x and y.
      // The values given here are for values a = 3, b = 2, c = 1, with random
      // noise added to the output so it's not a perfect fit.
      const xs = [dl.scalar(0), dl.scalar(1), dl.scalar(2), dl.scalar(3)];
      const ys =
          [dl.scalar(1.1), dl.scalar(5.9), dl.scalar(16.8), dl.scalar(33.9)];
      // When training, it's important to shuffle your data!
      const shuffledInputProviderBuilder =
          new dl.InCPUMemoryShuffledInputProviderBuilder([xs, ys]);
      const [xProvider, yProvider] =
          shuffledInputProviderBuilder.getInputProviders();

      // Training is broken up into batches.
      const NUM_BATCHES = 20;
      const BATCH_SIZE = xs.length;
      // Before we start training, we need to provide an optimizer. This is the
      // object that is responsible for updating weights. The learning rate
      // param is a value that represents how large of a step to make when
      // updating weights. If this is too big, you may overstep and oscillate.
      // If it is too small, the model may take a long time to train.
      const LEARNING_RATE = .01;
      const optimizer = dl.train.sgd(LEARNING_RATE);
      for (let i = 0; i < NUM_BATCHES; i++) {
        // Train takes a cost dl.Tensor to minimize; this call trains one batch
        // and returns the average cost of the batch as a dl.Scalar.
        const costValue = session.train(
            cost,
            // Map input providers to Tensors on the dl.Graph.
            [{tensor: x, data: xProvider}, {tensor: yLabel, data: yProvider}],
            BATCH_SIZE, optimizer, dl.CostReduction.MEAN);

        console.log(`average cost: ${await costValue.data()}`);
      }

      // Now print the value from the trained model for x = 4, should be ~57.0.
      result = session.eval(y, [{tensor: x, data: dl.scalar(4)}]);
      console.log('result should be ~57.0:');
      console.log(await result.data());
    });
开发者ID:ScapeQin,项目名称:deeplearnjs,代码行数:54,代码来源:ml_beginners.ts

示例2: startTraining

  private startTraining() {
    const trainingData = this.getTrainingData();
    const testData = this.getTestData();

    // Recreate optimizer with the selected optimizer and hyperparameters.
    this.optimizer = this.createOptimizer();

    if (this.isValid && (trainingData != null) && (testData != null)) {
      this.recreateCharts();
      this.graphRunner.resetStatistics();

      const trainingShuffledInputProviderGenerator =
          new dl.InCPUMemoryShuffledInputProviderBuilder(trainingData);
      const [trainInputProvider, trainLabelProvider] =
          trainingShuffledInputProviderGenerator.getInputProviders();

      const trainFeeds = [
        {tensor: this.xTensor, data: trainInputProvider},
        {tensor: this.labelTensor, data: trainLabelProvider}
      ];

      const accuracyShuffledInputProviderGenerator =
          new dl.InCPUMemoryShuffledInputProviderBuilder(testData);
      const [accuracyInputProvider, accuracyLabelProvider] =
          accuracyShuffledInputProviderGenerator.getInputProviders();

      const accuracyFeeds = [
        {tensor: this.xTensor, data: accuracyInputProvider},
        {tensor: this.labelTensor, data: accuracyLabelProvider}
      ];

      this.graphRunner.train(
          this.costTensor, trainFeeds, this.batchSize, this.optimizer,
          undefined /** numBatches */, this.accuracyTensor, accuracyFeeds,
          this.batchSize, dl.MetricReduction.MEAN, EVAL_INTERVAL_MS,
          COST_INTERVAL_MS);

      this.showTrainStats = true;
      this.applicationState = ApplicationState.TRAINING;
    }
  }
开发者ID:ScapeQin,项目名称:deeplearnjs,代码行数:41,代码来源:model-builder.ts

示例3: startInference

  private startInference() {
    const testData = this.getTestData();
    if (testData == null) {
      // Dataset not ready yet.
      return;
    }
    if (this.isValid && (testData != null)) {
      const inferenceShuffledInputProviderGenerator =
          new dl.InCPUMemoryShuffledInputProviderBuilder(testData);
      const [inferenceInputProvider, inferenceLabelProvider] =
          inferenceShuffledInputProviderGenerator.getInputProviders();

      const inferenceFeeds = [
        {tensor: this.xTensor, data: inferenceInputProvider},
        {tensor: this.labelTensor, data: inferenceLabelProvider}
      ];

      this.graphRunner.infer(
          this.predictionTensor, inferenceFeeds, INFERENCE_EXAMPLE_INTERVAL_MS,
          INFERENCE_EXAMPLE_COUNT);
    }
  }
开发者ID:ScapeQin,项目名称:deeplearnjs,代码行数:22,代码来源:model-builder.ts

示例4: intro

// This file parallels (some of) the code in the introduction tutorial.

/**
 * 'Math with WebGL backend' section of tutorial
 */
async function intro() {
  const a = dl.tensor2d([1.0, 2.0, 3.0, 4.0], [2, 2]);
  const b = dl.tensor2d([0.0, 2.0, 4.0, 6.0], [2, 2]);

  const size = dl.scalar(a.size);

  // Non-blocking math calls.
  const average = a.sub(b).square().sum().div(size);

  console.log(`mean squared difference: ${await average.val()}`);

  /**
   * 'Graphs and Tensors' section of tutorial
   */

  const g = new dl.Graph();

  // Placeholders are input containers. This is the container for where we
  // will feed an input Tensor when we execute the graph.
  const inputShape = [3];
  const inputTensor = g.placeholder('input', inputShape);

  const labelShape = [1];
  const labelTensor = g.placeholder('label', labelShape);

  // Variables are containers that hold a value that can be updated from
  // training.
  // Here we initialize the multiplier variable randomly.
  const multiplier = g.variable('multiplier', dl.randomNormal([1, 3]));

  // Top level graph methods take Tensors and return Tensors.
  const outputTensor = g.matmul(multiplier, inputTensor);
  const costTensor = g.meanSquaredCost(labelTensor, outputTensor);

  // Tensors, like Tensors, have a shape attribute.
  console.log(outputTensor.shape);

  /**
   * 'dl.Session and dl.FeedEntry' section of the tutorial.
   */

  const learningRate = .00001;
  const batchSize = 3;

  const session = new dl.Session(g, dl.ENV.math);
  const optimizer = dl.train.sgd(learningRate);

  const inputs: dl.Tensor1D[] = [
    dl.tensor1d([1.0, 2.0, 3.0]), dl.tensor1d([10.0, 20.0, 30.0]),
    dl.tensor1d([100.0, 200.0, 300.0])
  ];

  const labels: dl.Tensor1D[] =
      [dl.tensor1d([4.0]), dl.tensor1d([40.0]), dl.tensor1d([400.0])];

  // Shuffles inputs and labels and keeps them mutually in sync.
  const shuffledInputProviderBuilder =
      new dl.InCPUMemoryShuffledInputProviderBuilder([inputs, labels]);
  const [inputProvider, labelProvider] =
      shuffledInputProviderBuilder.getInputProviders();

  // Maps tensors to InputProviders.
  const feedEntries: dl.FeedEntry[] = [
    {tensor: inputTensor, data: inputProvider},
    {tensor: labelTensor, data: labelProvider}
  ];

  const NUM_BATCHES = 10;
  for (let i = 0; i < NUM_BATCHES; i++) {
    // Wrap session.train in a scope so the cost gets cleaned up
    // automatically.
    await dl.tidy(async () => {
      // Train takes a cost tensor to minimize. Trains one batch. Returns the
      // average cost as a dl.Scalar.
      const cost = session.train(
          costTensor, feedEntries, batchSize, optimizer, dl.CostReduction.MEAN);

      console.log(`last average cost (${i}): ${await cost.val()}`);
    });
  }

  const testInput = dl.tensor1d([0.1, 0.2, 0.3]);

  // session.eval can take Tensors as input data.
  const testFeedEntries: dl.FeedEntry[] =
      [{tensor: inputTensor, data: testInput}];

  const testOutput = session.eval(outputTensor, testFeedEntries);

  console.log('---inference output---');
  console.log(`shape: ${testOutput.shape}`);
  console.log(`value: ${await testOutput.val(0)}`);
}
开发者ID:ScapeQin,项目名称:deeplearnjs,代码行数:98,代码来源:intro.ts

示例5: async

export const learnXOR = async () => {
  const iterations = getRandomIntegerInRange(800, 1000);
  const timeStart: number = performance.now();
  let loss: number;
  let cost: dl.Scalar;

  const graph = new dl.Graph();

  const input = graph.placeholder('input', [2]);
  const y = graph.placeholder('y', [1]);

  const hiddenLayer = graph.layers.dense(
      'hiddenLayer', input, 10, (x: dl.SymbolicTensor) => graph.relu(x), true);
  const output = graph.layers.dense(
      'outputLayer', hiddenLayer, 1, (x: dl.SymbolicTensor) => graph.sigmoid(x),
      true);

  const costTensor = graph.reduceSum(graph.add(
      graph.multiply(
          graph.constant([-1]),
          graph.multiply(
              y, graph.log(graph.add(output, graph.constant([EPSILON]))))),
      graph.multiply(
          graph.constant([-1]),
          graph.multiply(
              graph.subtract(graph.constant([1]), y),
              graph.log(graph.add(
                  graph.subtract(graph.constant([1]), output),
                  graph.constant([EPSILON])))))));

  const session = new dl.Session(graph, dl.ENV.math);
  const optimizer = new dl.SGDOptimizer(0.2);

  const inputArray = [
    dl.tensor1d([0, 0]), dl.tensor1d([0, 1]), dl.tensor1d([1, 0]),
    dl.tensor1d([1, 1])
  ];

  const targetArray =
      [dl.tensor1d([0]), dl.tensor1d([1]), dl.tensor1d([1]), dl.tensor1d([0])];

  const shuffledInputProviderBuilder =
      new dl.InCPUMemoryShuffledInputProviderBuilder([inputArray, targetArray]);

  const [inputProvider, targetProvider] =
      shuffledInputProviderBuilder.getInputProviders();

  const feedEntries =
      [{tensor: input, data: inputProvider}, {tensor: y, data: targetProvider}];

  /**
   * Train the model
   */
  await dl.tidy(async () => {
    for (let i = 0; i < iterations; i += 1) {
      cost = session.train(
          costTensor, feedEntries, 4, optimizer, dl.CostReduction.MEAN);
    }
    loss = await cost.val();
  });

  const result = [];

  /**
   * Test the model
   */
  for (let i = 0; i < 4; i += 1) {
    const inputData = inputArray[i];
    const expectedOutput = targetArray[i];

    const val = session.eval(output, [{tensor: input, data: inputData}]);

    result.push({
      input: await inputData.data(),
      expected: await expectedOutput.data(),
      output: await val.data()
    });
  }

  const timeEnd: number = performance.now();
  const time = timeEnd - timeStart;

  return {iterations, loss, time, result};
};
开发者ID:ScapeQin,项目名称:deeplearnjs,代码行数:84,代码来源:learn-xor.ts


注:本文中的deeplearn.InCPUMemoryShuffledInputProviderBuilder.getInputProviders方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。