本文整理汇总了Python中tensorflow.python.keras._impl.keras.testing_utils.get_test_data函数的典型用法代码示例。如果您正苦于以下问题:Python get_test_data函数的具体用法?Python get_test_data怎么用?Python get_test_data使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_test_data函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_multi_inputs_multi_outputs
def test_multi_inputs_multi_outputs(self):
np.random.seed(1337)
(a_train, c_train), (a_test, c_test) = testing_utils.get_test_data(
train_samples=200, test_samples=100, input_shape=(32,), num_classes=3)
(b_train, d_train), (b_test, d_test) = testing_utils.get_test_data(
train_samples=200, test_samples=100, input_shape=(32,), num_classes=2)
c_train = keras.utils.to_categorical(c_train)
c_test = keras.utils.to_categorical(c_test)
d_train = keras.utils.to_categorical(d_train)
d_test = keras.utils.to_categorical(d_test)
def train_input_fn():
input_dict = {
'input_a':
ops.convert_to_tensor(
np.array(a_train, dtype=np.float32), dtype=dtypes.float32),
'input_b':
ops.convert_to_tensor(
np.array(b_train, dtype=np.float32), dtype=dtypes.float32)
}
output_dict = {
'dense_2':
ops.convert_to_tensor(
np.array(c_train, dtype=np.float32), dtype=dtypes.float32),
'dense_3':
ops.convert_to_tensor(
np.array(d_train, dtype=np.float32), dtype=dtypes.float32)
}
return input_dict, output_dict
def evaluate_input_fn():
input_dict = {
'input_a':
ops.convert_to_tensor(
np.array(a_test, dtype=np.float32), dtype=dtypes.float32),
'input_b':
ops.convert_to_tensor(
np.array(b_test, dtype=np.float32), dtype=dtypes.float32)
}
output_dict = {
'dense_2':
ops.convert_to_tensor(
np.array(c_test, dtype=np.float32), dtype=dtypes.float32),
'dense_3':
ops.convert_to_tensor(
np.array(d_test, dtype=np.float32), dtype=dtypes.float32)
}
return input_dict, output_dict
with self.test_session():
model = multi_inputs_multi_outputs_model()
est_keras = keras.estimator.model_to_estimator(
keras_model=model, model_dir=tempfile.mkdtemp(dir=self._base_dir))
est_keras.train(input_fn=train_input_fn, steps=200 * 10 / 16)
eval_results = est_keras.evaluate(input_fn=evaluate_input_fn, steps=1)
self.assertGreater(eval_results['accuracy_dense_2'], 0.5)
self.assertGreater(eval_results['accuracy_dense_3'], 0.5)
示例2: get_resource_for_simple_model
def get_resource_for_simple_model(is_sequential, is_evaluate):
model = simple_sequential_model(
) if is_sequential else simple_functional_model()
if is_sequential:
model.build()
input_name = model.input_names[0]
np.random.seed(_RANDOM_SEED)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=_INPUT_SIZE,
num_classes=_NUM_CLASS)
y_train = keras.utils.to_categorical(y_train)
y_test = keras.utils.to_categorical(y_test)
train_input_fn = numpy_io.numpy_input_fn(
x={input_name: x_train},
y=y_train,
shuffle=False,
num_epochs=None,
batch_size=16)
evaluate_input_fn = numpy_io.numpy_input_fn(
x={input_name: x_test}, y=y_test, num_epochs=1, shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={input_name: x_test}, num_epochs=1, shuffle=False)
inference_input_fn = evaluate_input_fn if is_evaluate else predict_input_fn
return model, (x_train, y_train), (x_test,
y_test), train_input_fn, inference_input_fn
示例3: test_invalid_loss_or_metrics
def test_invalid_loss_or_metrics(self):
num_classes = 5
train_samples = 1000
test_samples = 1000
input_dim = 5
model = keras.models.Sequential()
model.add(keras.layers.Dense(10, input_shape=(input_dim,)))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dense(num_classes))
model.add(keras.layers.Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001))
np.random.seed(1337)
(x_train, y_train), (_, _) = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
with self.assertRaises(ValueError):
model.fit(x_train, np.concatenate([y_train, y_train], axis=-1))
with self.assertRaises(TypeError):
model.compile(loss='categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001),
metrics=set(0))
with self.assertRaises(ValueError):
model.compile(loss=None,
optimizer='rms')
示例4: test_LearningRateScheduler
def test_LearningRateScheduler(self):
with self.test_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(
keras.layers.Dense(
NUM_HIDDEN, input_dim=INPUT_DIM, activation='relu'))
model.add(keras.layers.Dense(NUM_CLASSES, activation='softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
cbks = [keras.callbacks.LearningRateScheduler(lambda x: 1. / (1. + x))]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks,
epochs=5,
verbose=0)
assert (float(keras.backend.get_value(model.optimizer.lr)) - 0.2
) < keras.backend.epsilon()
示例5: _test_optimizer
def _test_optimizer(optimizer, target=0.75):
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(train_samples=1000,
test_samples=200,
input_shape=(10,),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
model = _get_model(x_train.shape[1], 20, y_train.shape[1])
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=2, batch_size=16, verbose=0)
assert history.history['acc'][-1] >= target
config = keras.optimizers.serialize(optimizer)
optim = keras.optimizers.deserialize(config)
new_config = keras.optimizers.serialize(optim)
new_config['class_name'] = new_config['class_name'].lower()
assert config == new_config
# Test constraints.
model = keras.models.Sequential()
dense = keras.layers.Dense(10,
input_shape=(x_train.shape[1],),
kernel_constraint=lambda x: 0. * x + 1.,
bias_constraint=lambda x: 0. * x + 2.,
activation='relu')
model.add(dense)
model.add(keras.layers.Dense(y_train.shape[1], activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
model.train_on_batch(x_train[:10], y_train[:10])
kernel, bias = dense.get_weights()
np.testing.assert_allclose(kernel, 1., atol=1e-3)
np.testing.assert_allclose(bias, 2., atol=1e-3)
示例6: test_video_classification_functional
def test_video_classification_functional(self):
with self.test_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=200,
test_samples=100,
input_shape=(4, 8, 8, 3),
num_classes=3)
y_train = keras.utils.to_categorical(y_train)
y_test = keras.utils.to_categorical(y_test)
inputs = keras.layers.Input(shape=x_train.shape[1:])
x = keras.layers.TimeDistributed(
keras.layers.Conv2D(4, 3, activation='relu'))(inputs)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.TimeDistributed(keras.layers.GlobalMaxPooling2D())(x)
x = keras.layers.Conv1D(8, 3, activation='relu')(x)
x = keras.layers.Flatten()(x)
outputs = keras.layers.Dense(y_train.shape[-1], activation='softmax')(x)
model = keras.models.Model(inputs, outputs)
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.8),
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=10, batch_size=16,
validation_data=(x_test, y_test),
verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.70)
示例7: test_vector_classification_declarative
def test_vector_classification_declarative(self):
with self.test_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=200,
test_samples=100,
input_shape=(10,),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
y_test = keras.utils.to_categorical(y_test)
model = keras.models.Sequential([
keras.layers.Dense(16,
activation='relu',
input_shape=x_train.shape[1:]),
keras.layers.Dropout(0.1),
keras.layers.Dense(y_train.shape[-1], activation='softmax')
])
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=10, batch_size=16,
validation_data=(x_test, y_test),
verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.85)
示例8: test_vector_classification_shared_model
def test_vector_classification_shared_model(self):
# Test that functional models that feature internal updates
# and internal losses can be shared.
with self.test_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=200,
test_samples=100,
input_shape=(10,),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
y_test = keras.utils.to_categorical(y_test)
inputs = keras.layers.Input(x_train.shape[1:])
x = keras.layers.Dense(16,
activation='relu',
kernel_regularizer=keras.regularizers.l2(1e-5),
bias_regularizer=keras.regularizers.l2(1e-5),
input_shape=x_train.shape[1:])(inputs)
x = keras.layers.BatchNormalization()(x)
base_model = keras.models.Model(inputs, x)
x = keras.layers.Input(x_train.shape[1:])
y = base_model(x)
y = keras.layers.Dense(y_train.shape[-1], activation='softmax')(y)
model = keras.models.Model(x, y)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=10, batch_size=16,
validation_data=(x_test, y_test),
verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.85)
示例9: test_TerminateOnNaN
def test_TerminateOnNaN(self):
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=TRAIN_SAMPLES,
test_samples=TEST_SAMPLES,
input_shape=(INPUT_DIM,),
num_classes=NUM_CLASSES)
y_test = keras.utils.to_categorical(y_test)
y_train = keras.utils.to_categorical(y_train)
cbks = [keras.callbacks.TerminateOnNaN()]
model = keras.models.Sequential()
initializer = keras.initializers.Constant(value=1e5)
for _ in range(5):
model.add(keras.layers.Dense(2,
input_dim=INPUT_DIM,
activation='relu',
kernel_initializer=initializer))
model.add(keras.layers.Dense(NUM_CLASSES))
model.compile(loss='mean_squared_error',
optimizer='rmsprop')
history = model.fit(x_train, y_train, batch_size=BATCH_SIZE,
validation_data=(x_test, y_test),
callbacks=cbks, epochs=20)
loss = history.history['loss']
assert len(loss) == 1
assert loss[0] == np.inf
示例10: test_image_classification_declarative
def test_image_classification_declarative(self):
with self.test_session():
np.random.seed(1337)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=200,
test_samples=100,
input_shape=(8, 8, 3),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
y_test = keras.utils.to_categorical(y_test)
model = keras.models.Sequential()
model.add(keras.layers.Conv2D(
8, 3,
activation='relu',
input_shape=x_train.shape[1:]))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Conv2D(
8, 3,
padding='same',
activation='relu'))
model.add(keras.layers.GlobalMaxPooling2D())
model.add(keras.layers.Dense(y_train.shape[-1], activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=10, batch_size=16,
validation_data=(x_test, y_test),
verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.85)
示例11: test_invalid_ionames_error
def test_invalid_ionames_error(self):
(x_train, y_train), (_, _) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=100,
input_shape=(10,),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
def invald_input_name_input_fn():
input_dict = {'invalid_input_name': x_train}
return input_dict, y_train
def invald_output_name_input_fn():
input_dict = {'input_1': x_train}
output_dict = {'invalid_output_name': y_train}
return input_dict, output_dict
model = simple_functional_model()
model.compile(
loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
est_keras = keras.estimator.model_to_estimator(
keras_model=model, config=self._config)
with self.test_session():
with self.assertRaises(ValueError):
est_keras.train(input_fn=invald_input_name_input_fn, steps=100)
with self.assertRaises(ValueError):
est_keras.train(input_fn=invald_output_name_input_fn, steps=100)
示例12: test_image_classification_sequential
def test_image_classification_sequential(self):
with self.test_session():
np.random.seed(1337)
(x_train, y_train), _ = testing_utils.get_test_data(
train_samples=100,
test_samples=0,
input_shape=(12, 12, 3),
num_classes=2)
y_train = keras.utils.to_categorical(y_train)
model = keras.models.Sequential()
model.add(keras.layers.Conv2D(
4, 3,
padding='same',
activation='relu',
input_shape=x_train.shape[1:]))
model.add(keras.layers.Conv2D(
8, 3,
padding='same',
activation='relu'))
model.add(keras.layers.Conv2D(
16, 3,
padding='same',
activation='relu'))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(y_train.shape[-1], activation='softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.SGD(lr=0.01, momentum=0.8),
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=10, batch_size=16,
validation_data=(x_train, y_train),
verbose=2)
self.assertGreater(history.history['val_acc'][-1], 0.7)
示例13: test_temporal_sample_weights
def test_temporal_sample_weights(self):
num_classes = 5
weighted_class = 3
train_samples = 1000
test_samples = 1000
input_dim = 5
timesteps = 3
model = keras.models.Sequential()
model.add(
keras.layers.TimeDistributed(
keras.layers.Dense(num_classes),
input_shape=(timesteps, input_dim)))
model.add(keras.layers.Activation('softmax'))
np.random.seed(1337)
(_, y_train), _ = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
int_y_train = y_train.copy()
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
class_weight = dict([(i, 1.) for i in range(num_classes)])
class_weight[weighted_class] = 2.
sample_weight = np.ones((y_train.shape[0]))
sample_weight[int_y_train == weighted_class] = 2.
with self.assertRaises(ValueError):
model.compile(
loss='binary_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001),
sample_weight_mode='temporal')
示例14: test_sample_weights
def test_sample_weights(self):
num_classes = 5
batch_size = 5
epochs = 5
weighted_class = 3
train_samples = 3000
test_samples = 3000
input_dim = 5
model = keras.models.Sequential()
model.add(keras.layers.Dense(10, input_shape=(input_dim,)))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dense(num_classes))
model.add(keras.layers.Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=RMSPropOptimizer(learning_rate=0.001))
np.random.seed(43)
(x_train, y_train), (x_test, y_test) = testing_utils.get_test_data(
train_samples=train_samples,
test_samples=test_samples,
input_shape=(input_dim,),
num_classes=num_classes)
int_y_test = y_test.copy()
int_y_train = y_train.copy()
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
test_ids = np.where(int_y_test == np.array(weighted_class))[0]
class_weight = dict([(i, 1.) for i in range(num_classes)])
class_weight[weighted_class] = 2.
sample_weight = np.ones((y_train.shape[0]))
sample_weight[int_y_train == weighted_class] = 2.
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs // 3,
verbose=0,
sample_weight=sample_weight)
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs // 3,
verbose=0,
sample_weight=sample_weight,
validation_split=0.1)
model.train_on_batch(
x_train[:batch_size],
y_train[:batch_size],
sample_weight=sample_weight[:batch_size])
model.test_on_batch(
x_train[:batch_size],
y_train[:batch_size],
sample_weight=sample_weight[:batch_size])
示例15: test_multi_inputs_multi_outputs
def test_multi_inputs_multi_outputs(self):
np.random.seed(_RANDOM_SEED)
(a_train, c_train), (a_test, c_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(16,),
num_classes=3)
np.random.seed(_RANDOM_SEED)
(b_train, d_train), (b_test, d_test) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(16,),
num_classes=2)
np.random.seed(_RANDOM_SEED)
(input_m_train, _), (input_m_test, _) = testing_utils.get_test_data(
train_samples=_TRAIN_SIZE,
test_samples=50,
input_shape=(8,),
num_classes=2)
c_train = keras.utils.to_categorical(c_train)
c_test = keras.utils.to_categorical(c_test)
d_train = keras.utils.to_categorical(d_train)
d_test = keras.utils.to_categorical(d_test)
def train_input_fn():
input_dict = {'input_a': a_train, 'input_b': b_train,
'input_m': input_m_train > 0}
output_dict = {'dense_2': c_train, 'dense_3': d_train}
return input_dict, output_dict
def eval_input_fn():
input_dict = {'input_a': a_test, 'input_b': b_test,
'input_m': input_m_test > 0}
output_dict = {'dense_2': c_test, 'dense_3': d_test}
return input_dict, output_dict
with self.test_session():
model = multi_inputs_multi_outputs_model()
est_keras = keras.estimator.model_to_estimator(
keras_model=model, config=self._config)
before_eval_results = est_keras.evaluate(input_fn=eval_input_fn, steps=1)
est_keras.train(input_fn=train_input_fn, steps=_TRAIN_SIZE / 16)
after_eval_results = est_keras.evaluate(input_fn=eval_input_fn, steps=1)
self.assertLess(after_eval_results['loss'], before_eval_results['loss'])