当前位置: 首页>>代码示例>>Python>>正文


Python testing_utils.get_test_data函数代码示例

本文整理汇总了Python中tensorflow.python.keras.testing_utils.get_test_data函数的典型用法代码示例。如果您正苦于以下问题:Python get_test_data函数的具体用法?Python get_test_data怎么用?Python get_test_data使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了get_test_data函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_keras_model_with_lstm

  def test_keras_model_with_lstm(self):
    input_shape = 10
    rnn_state_size = 8
    output_shape = 8
    timestep = 4
    batch = 100
    epoch = 10

    (x_train, y_train), _ = testing_utils.get_test_data(
        train_samples=batch,
        test_samples=0,
        input_shape=(timestep, input_shape),
        num_classes=output_shape)
    y_train = keras.utils.to_categorical(y_train, output_shape)

    K.set_session(session.Session(config=self.config))
    layer = UnifiedLSTM(rnn_state_size)

    inputs = keras.layers.Input(
        shape=[timestep, input_shape], dtype=dtypes.float32)

    outputs, unused_runtime = layer(inputs)
    model = keras.models.Model(inputs, outputs)
    model.compile('rmsprop', loss='mse')
    model.fit(x_train, y_train, epochs=epoch)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:25,代码来源:unified_rnn_test.py

示例2: _test_optimizer

def _test_optimizer(optimizer, target=0.75):
  np.random.seed(1337)
  (x_train, y_train), _ = testing_utils.get_test_data(train_samples=1000,
                                                      test_samples=200,
                                                      input_shape=(10,),
                                                      num_classes=2)
  y_train = keras.utils.to_categorical(y_train)
  model = _get_model(x_train.shape[1], 20, y_train.shape[1])
  model.compile(loss='categorical_crossentropy',
                optimizer=optimizer,
                metrics=['accuracy'])
  history = model.fit(x_train, y_train, epochs=2, batch_size=16, verbose=0)
  assert history.history['acc'][-1] >= target
  config = keras.optimizers.serialize(optimizer)
  optim = keras.optimizers.deserialize(config)
  new_config = keras.optimizers.serialize(optim)
  new_config['class_name'] = new_config['class_name'].lower()
  assert config == new_config

  # Test constraints.
  model = keras.models.Sequential()
  dense = keras.layers.Dense(10,
                             input_shape=(x_train.shape[1],),
                             kernel_constraint=lambda x: 0. * x + 1.,
                             bias_constraint=lambda x: 0. * x + 2.,
                             activation='relu')
  model.add(dense)
  model.add(keras.layers.Dense(y_train.shape[1], activation='softmax'))
  model.compile(loss='categorical_crossentropy',
                optimizer=optimizer,
                metrics=['accuracy'])
  model.train_on_batch(x_train[:10], y_train[:10])
  kernel, bias = dense.get_weights()
  np.testing.assert_allclose(kernel, 1., atol=1e-3)
  np.testing.assert_allclose(bias, 2., atol=1e-3)
开发者ID:LongJun123456,项目名称:tensorflow,代码行数:35,代码来源:optimizers_test.py

示例3: test_TensorBoard_with_ReduceLROnPlateau

  def test_TensorBoard_with_ReduceLROnPlateau(self):
    with self.cached_session():
      temp_dir = self.get_temp_dir()
      self.addCleanup(shutil.rmtree, temp_dir, ignore_errors=True)

      (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
          train_samples=TRAIN_SAMPLES,
          test_samples=TEST_SAMPLES,
          input_shape=(INPUT_DIM,),
          num_classes=NUM_CLASSES)
      y_test = keras.utils.to_categorical(y_test)
      y_train = keras.utils.to_categorical(y_train)

      model = testing_utils.get_small_sequential_mlp(
          num_hidden=NUM_HIDDEN, num_classes=NUM_CLASSES, input_dim=INPUT_DIM)
      model.compile(
          loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])

      cbks = [
          keras.callbacks.ReduceLROnPlateau(
              monitor='val_loss', factor=0.5, patience=4, verbose=1),
          keras.callbacks.TensorBoard(log_dir=temp_dir)
      ]

      model.fit(
          x_train,
          y_train,
          batch_size=BATCH_SIZE,
          validation_data=(x_test, y_test),
          callbacks=cbks,
          epochs=2,
          verbose=0)

      assert os.path.exists(temp_dir)
开发者ID:rmlarsen,项目名称:tensorflow,代码行数:34,代码来源:callbacks_test.py

示例4: test_keras_model_with_gru

  def test_keras_model_with_gru(self):
    input_shape = 10
    rnn_state_size = 8
    output_shape = 8
    timestep = 4
    batch = 100
    epoch = 10

    (x_train, y_train), _ = testing_utils.get_test_data(
        train_samples=batch,
        test_samples=0,
        input_shape=(timestep, input_shape),
        num_classes=output_shape)
    y_train = keras.utils.to_categorical(y_train, output_shape)

    layer = keras.layers.UnifiedGRU(rnn_state_size)

    inputs = keras.layers.Input(
        shape=[timestep, input_shape], dtype=dtypes.float32)

    outputs = layer(inputs)
    model = keras.models.Model(inputs, outputs)
    model.compile('rmsprop', loss='mse')
    model.fit(x_train, y_train, epochs=epoch)
    model.evaluate(x_train, y_train)
    model.predict(x_train)
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:26,代码来源:unified_gru_test.py

示例5: test_vector_classification_shared_model

  def test_vector_classification_shared_model(self):
    # Test that functional models that feature internal updates
    # and internal losses can be shared.
    with self.cached_session():
      np.random.seed(1337)
      (x_train, y_train), _ = testing_utils.get_test_data(
          train_samples=100,
          test_samples=0,
          input_shape=(10,),
          num_classes=2)
      y_train = keras.utils.to_categorical(y_train)

      inputs = keras.layers.Input(x_train.shape[1:])
      x = keras.layers.Dense(16,
                             activation='relu',
                             kernel_regularizer=keras.regularizers.l2(1e-5),
                             bias_regularizer=keras.regularizers.l2(1e-5),
                             input_shape=x_train.shape[1:])(inputs)
      x = keras.layers.BatchNormalization()(x)
      base_model = keras.models.Model(inputs, x)

      x = keras.layers.Input(x_train.shape[1:])
      y = base_model(x)
      y = keras.layers.Dense(y_train.shape[-1], activation='softmax')(y)
      model = keras.models.Model(x, y)
      model.compile(loss='categorical_crossentropy',
                    optimizer=keras.optimizers.Adam(lr=0.1),
                    metrics=['accuracy'])
      history = model.fit(x_train, y_train, epochs=10, batch_size=16,
                          validation_data=(x_train, y_train),
                          verbose=2)
      self.assertGreater(history.history['val_acc'][-1], 0.7)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:32,代码来源:integration_test.py

示例6: test_EarlyStopping_with_baseline

  def test_EarlyStopping_with_baseline(self):
    with self.cached_session():
      np.random.seed(1337)
      baseline = 0.5
      (data, labels), _ = testing_utils.get_test_data(
          train_samples=100,
          test_samples=50,
          input_shape=(1,),
          num_classes=NUM_CLASSES)
      model = testing_utils.get_small_sequential_mlp(
          num_hidden=1, num_classes=1, input_dim=1)
      model.compile(
          optimizer='sgd', loss='binary_crossentropy', metrics=['acc'])

      stopper = keras.callbacks.EarlyStopping(monitor='acc',
                                              baseline=baseline)
      hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
      assert len(hist.epoch) == 1

      patience = 3
      stopper = keras.callbacks.EarlyStopping(monitor='acc',
                                              patience=patience,
                                              baseline=baseline)
      hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
      assert len(hist.epoch) >= patience
开发者ID:kylin9872,项目名称:tensorflow,代码行数:25,代码来源:callbacks_test.py

示例7: test_image_classification_sequential

  def test_image_classification_sequential(self):
    with self.cached_session():
      np.random.seed(1337)
      (x_train, y_train), _ = testing_utils.get_test_data(
          train_samples=100,
          test_samples=0,
          input_shape=(12, 12, 3),
          num_classes=2)
      y_train = keras.utils.to_categorical(y_train)

      model = keras.models.Sequential()
      model.add(keras.layers.Conv2D(
          4, 3,
          padding='same',
          activation='relu',
          input_shape=x_train.shape[1:]))
      model.add(keras.layers.Conv2D(
          8, 3,
          padding='same',
          activation='relu'))
      model.add(keras.layers.Conv2D(
          16, 3,
          padding='same',
          activation='relu'))
      model.add(keras.layers.Flatten())
      model.add(keras.layers.Dense(y_train.shape[-1], activation='softmax'))
      model.compile(loss='categorical_crossentropy',
                    optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.8),
                    metrics=['accuracy'])
      history = model.fit(x_train, y_train, epochs=10, batch_size=16,
                          validation_data=(x_train, y_train),
                          verbose=2)
      self.assertGreater(history.history['val_acc'][-1], 0.7)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:33,代码来源:integration_test.py

示例8: test_temporal_sample_weights

  def test_temporal_sample_weights(self):
    num_classes = 5
    weighted_class = 3
    train_samples = 1000
    test_samples = 1000
    input_dim = 5
    timesteps = 3

    model = keras.models.Sequential()
    model.add(
        keras.layers.TimeDistributed(
            keras.layers.Dense(num_classes),
            input_shape=(timesteps, input_dim)))
    model.add(keras.layers.Activation('softmax'))

    np.random.seed(1337)
    (_, y_train), _ = testing_utils.get_test_data(
        train_samples=train_samples,
        test_samples=test_samples,
        input_shape=(input_dim,),
        num_classes=num_classes)
    int_y_train = y_train.copy()
    # convert class vectors to binary class matrices
    y_train = keras.utils.to_categorical(y_train, num_classes)

    class_weight = dict([(i, 1.) for i in range(num_classes)])
    class_weight[weighted_class] = 2.

    sample_weight = np.ones((y_train.shape[0]))
    sample_weight[int_y_train == weighted_class] = 2.
    with self.assertRaises(ValueError):
      model.compile(
          loss='binary_crossentropy',
          optimizer=RMSPropOptimizer(learning_rate=0.001),
          sample_weight_mode='temporal')
开发者ID:LiuCKind,项目名称:tensorflow,代码行数:35,代码来源:training_eager_test.py

示例9: test_video_classification_functional

  def test_video_classification_functional(self):
    with self.cached_session():
      np.random.seed(1337)
      (x_train, y_train), _ = testing_utils.get_test_data(
          train_samples=100,
          test_samples=0,
          input_shape=(4, 8, 8, 3),
          num_classes=3)
      y_train = keras.utils.to_categorical(y_train)

      inputs = keras.layers.Input(shape=x_train.shape[1:])
      x = keras.layers.TimeDistributed(
          keras.layers.Conv2D(4, 3, activation='relu'))(inputs)
      x = keras.layers.BatchNormalization()(x)
      x = keras.layers.TimeDistributed(keras.layers.GlobalMaxPooling2D())(x)
      x = keras.layers.Conv1D(8, 3, activation='relu')(x)
      x = keras.layers.Flatten()(x)
      outputs = keras.layers.Dense(y_train.shape[-1], activation='softmax')(x)

      model = keras.models.Model(inputs, outputs)
      model.compile(loss='categorical_crossentropy',
                    optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.8),
                    metrics=['accuracy'])
      history = model.fit(x_train, y_train, epochs=10, batch_size=16,
                          validation_data=(x_train, y_train),
                          verbose=2)
      self.assertGreater(history.history['val_acc'][-1], 0.7)
开发者ID:AnishShah,项目名称:tensorflow,代码行数:27,代码来源:integration_test.py

示例10: test_invalid_loss_or_metrics

  def test_invalid_loss_or_metrics(self):
    num_classes = 5
    train_samples = 1000
    test_samples = 1000
    input_dim = 5

    model = keras.models.Sequential()
    model.add(keras.layers.Dense(10, input_shape=(input_dim,)))
    model.add(keras.layers.Activation('relu'))
    model.add(keras.layers.Dense(num_classes))
    model.add(keras.layers.Activation('softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=RMSPropOptimizer(learning_rate=0.001))
    np.random.seed(1337)

    (x_train, y_train), (_, _) = testing_utils.get_test_data(
        train_samples=train_samples,
        test_samples=test_samples,
        input_shape=(input_dim,),
        num_classes=num_classes)

    with self.assertRaises(ValueError):
      model.fit(x_train, np.concatenate([y_train, y_train], axis=-1))

    with self.assertRaises(TypeError):
      model.compile(loss='categorical_crossentropy',
                    optimizer=RMSPropOptimizer(learning_rate=0.001),
                    metrics=set(0))

    with self.assertRaises(ValueError):
      model.compile(loss=None,
                    optimizer='rms')
开发者ID:LiuCKind,项目名称:tensorflow,代码行数:32,代码来源:training_eager_test.py

示例11: testOptimizerWithCallableVarList

  def testOptimizerWithCallableVarList(self):
    train_samples = 20
    input_dim = 1
    num_classes = 2
    (x, y), _ = testing_utils.get_test_data(
        train_samples=train_samples,
        test_samples=10,
        input_shape=(input_dim,),
        num_classes=num_classes)
    y = keras.utils.to_categorical(y)

    num_hidden = 1
    model = testing_utils.get_small_sequential_mlp(
        num_hidden=num_hidden, num_classes=num_classes)
    opt = adam.Adam()

    loss = lambda: losses.mean_squared_error(model(x), y)
    var_list = lambda: model.trainable_weights

    with self.assertRaisesRegexp(
        ValueError, 'Weights for model .* have not yet been created'):
      var_list()
    train_op = opt.minimize(loss, var_list)
    if not context.executing_eagerly():
      self.evaluate(variables.global_variables_initializer())
      self.assertEqual(
          [[0.]], self.evaluate(opt.get_slot(var_list()[0], 'm')))
      self.evaluate(train_op)
    self.assertNotEqual(
        [[0.]], self.evaluate(opt.get_slot(var_list()[0], 'm')))
    self.assertLen(var_list(), 4)
开发者ID:aritratony,项目名称:tensorflow,代码行数:31,代码来源:optimizer_v2_test.py

示例12: test_EarlyStopping_with_baseline

  def test_EarlyStopping_with_baseline(self):
    with self.test_session():
      np.random.seed(1337)
      baseline = 0.5
      (data, labels), _ = testing_utils.get_test_data(
          train_samples=100,
          test_samples=50,
          input_shape=(1,),
          num_classes=NUM_CLASSES)
      model = keras.models.Sequential((keras.layers.Dense(
          1, input_dim=1, activation='relu'), keras.layers.Dense(
              1, activation='sigmoid'),))
      model.compile(
          optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])

      stopper = keras.callbacks.EarlyStopping(monitor='acc',
                                              baseline=baseline)
      hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
      assert len(hist.epoch) == 1

      patience = 3
      stopper = keras.callbacks.EarlyStopping(monitor='acc',
                                              patience=patience,
                                              baseline=baseline)
      hist = model.fit(data, labels, callbacks=[stopper], verbose=0, epochs=20)
      assert len(hist.epoch) >= patience
开发者ID:ZhangXinNan,项目名称:tensorflow,代码行数:26,代码来源:callbacks_test.py

示例13: test_TerminateOnNaN

  def test_TerminateOnNaN(self):
    with self.test_session():
      np.random.seed(1337)
      (x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
          train_samples=TRAIN_SAMPLES,
          test_samples=TEST_SAMPLES,
          input_shape=(INPUT_DIM,),
          num_classes=NUM_CLASSES)

      y_test = keras.utils.to_categorical(y_test)
      y_train = keras.utils.to_categorical(y_train)
      cbks = [keras.callbacks.TerminateOnNaN()]
      model = keras.models.Sequential()
      initializer = keras.initializers.Constant(value=1e5)
      for _ in range(5):
        model.add(
            keras.layers.Dense(
                2,
                input_dim=INPUT_DIM,
                activation='relu',
                kernel_initializer=initializer))
      model.add(keras.layers.Dense(NUM_CLASSES))
      model.compile(loss='mean_squared_error', optimizer='rmsprop')

      history = model.fit(
          x_train,
          y_train,
          batch_size=BATCH_SIZE,
          validation_data=(x_test, y_test),
          callbacks=cbks,
          epochs=20)
      loss = history.history['loss']
      assert len(loss) == 1
      assert loss[0] == np.inf
开发者ID:ZhangXinNan,项目名称:tensorflow,代码行数:34,代码来源:callbacks_test.py

示例14: testRNNWithKerasGRUCell

  def testRNNWithKerasGRUCell(self):
    with self.cached_session() as sess:
      input_shape = 10
      output_shape = 5
      timestep = 4
      batch = 100
      (x_train, y_train), _ = testing_utils.get_test_data(
          train_samples=batch,
          test_samples=0,
          input_shape=(timestep, input_shape),
          num_classes=output_shape)
      y_train = keras.utils.to_categorical(y_train)
      cell = keras.layers.GRUCell(output_shape)

      inputs = array_ops.placeholder(
          dtypes.float32, shape=(None, timestep, input_shape))
      predict = array_ops.placeholder(
          dtypes.float32, shape=(None, output_shape))

      outputs, state = rnn.dynamic_rnn(
          cell, inputs, dtype=dtypes.float32)
      self.assertEqual(outputs.shape.as_list(), [None, timestep, output_shape])
      self.assertEqual(state.shape.as_list(), [None, output_shape])
      loss = losses.softmax_cross_entropy(predict, state)
      train_op = training.GradientDescentOptimizer(0.001).minimize(loss)

      sess.run([variables_lib.global_variables_initializer()])
      _, outputs, state = sess.run(
          [train_op, outputs, state], {inputs: x_train, predict: y_train})

      self.assertEqual(len(outputs), batch)
      self.assertEqual(len(state), batch)
开发者ID:gunan,项目名称:tensorflow,代码行数:32,代码来源:rnn_test.py

示例15: testKerasAndTFRNNLayerOutputComparison

  def testKerasAndTFRNNLayerOutputComparison(self):
    input_shape = 10
    output_shape = 5
    timestep = 4
    batch = 20
    (x_train, _), _ = testing_utils.get_test_data(
        train_samples=batch,
        test_samples=0,
        input_shape=(timestep, input_shape),
        num_classes=output_shape)
    fix_weights_generator = keras.layers.SimpleRNNCell(output_shape)
    fix_weights_generator.build((None, input_shape))
    weights = fix_weights_generator.get_weights()

    with self.session(graph=ops_lib.Graph()) as sess:
      inputs = array_ops.placeholder(
          dtypes.float32, shape=(None, timestep, input_shape))
      cell = keras.layers.SimpleRNNCell(output_shape)
      tf_out, tf_state = rnn.dynamic_rnn(
          cell, inputs, dtype=dtypes.float32)
      cell.set_weights(weights)
      [tf_out, tf_state] = sess.run([tf_out, tf_state], {inputs: x_train})
    with self.session(graph=ops_lib.Graph()) as sess:
      k_input = keras.Input(shape=(timestep, input_shape),
                            dtype=dtypes.float32)
      cell = keras.layers.SimpleRNNCell(output_shape)
      layer = keras.layers.RNN(cell, return_sequences=True, return_state=True)
      keras_out = layer(k_input)
      cell.set_weights(weights)
      k_out, k_state = sess.run(keras_out, {k_input: x_train})
    self.assertAllClose(tf_out, k_out)
    self.assertAllClose(tf_state, k_state)
开发者ID:gunan,项目名称:tensorflow,代码行数:32,代码来源:rnn_test.py


注:本文中的tensorflow.python.keras.testing_utils.get_test_data函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。