本文整理汇总了Python中tensorflow.keras方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.keras方法的具体用法?Python tensorflow.keras怎么用?Python tensorflow.keras使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.keras方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testUnconstrainedNoMissingValue
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import keras [as 别名]
def testUnconstrainedNoMissingValue(self, units, one_d_input, expected_loss):
if self._disable_all:
return
config = {
"units": units,
"one_d_input": one_d_input,
"num_training_records": 100,
"num_training_epoch": 2000,
"optimizer": tf.keras.optimizers.Adagrad,
"learning_rate": 0.15,
"x_generator": self._ScatterXUniformly,
"y_function": self._SmallWaves,
"monotonicity": 0,
"num_keypoints": 21,
"input_min": -1.0,
"input_max": 1.0,
"output_min": None,
"output_max": None,
}
loss = self._TrainModel(config)
self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps)
if units > 1 and not one_d_input:
config["use_multi_calibration_layer"] = True
loss = self._TrainModel(config)
self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps)
示例2: testConvexityNonUniformKeypoints
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import keras [as 别名]
def testConvexityNonUniformKeypoints(self, units, convexity, expected_loss):
# No constraints other than convexity.
if self._disable_all:
return
config = {
"units": units,
"num_training_records": 100,
"num_training_epoch": 200,
"optimizer": tf.keras.optimizers.Adagrad,
"learning_rate": 1.0,
"x_generator": self._ScatterXUniformly,
"y_function": self._WavyParabola,
"monotonicity": 0,
"convexity": convexity,
"input_keypoints": [-1.0, -0.9, -0.3, -0.2, 0.0, 0.3, 0.31, 0.35, 1.0],
"output_min": None,
"output_max": None,
}
loss = self._TrainModel(config)
self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps)
if units > 1:
config["use_multi_calibration_layer"] = True
loss = self._TrainModel(config)
self.assertAlmostEqual(loss, expected_loss, delta=self._loss_eps)
示例3: testInputKeypoints
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import keras [as 别名]
def testInputKeypoints(self, keypoints):
if self._disable_all:
return
config = {
"num_training_records": 100,
"num_training_epoch": 200,
"optimizer": tf.keras.optimizers.Adagrad,
"learning_rate": 0.15,
"x_generator": self._ScatterXUniformly,
"y_function": self._SmallWaves,
"monotonicity": 0,
"input_keypoints": keypoints,
"output_min": None,
"output_max": None,
}
loss = self._TrainModel(config)
self.assertAlmostEqual(loss, 0.009650, delta=self._loss_eps)
示例4: __call__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import keras [as 别名]
def __call__(self, model):
"""
:param model: Keras model to be accelerated
:type model: Union[keras.Model, keras.Sequential]
:return: Accelerated Keras model
:rtype: Union[keras.Model, keras.Sequential]
"""
if isinstance(model, tfk.Model) or isinstance(model, tfk.Sequential):
self.model = model
else:
raise TypeError(f'FastMCInference expects tensorflow.keras Model, you gave {type(model)}')
new_input = tfk.layers.Input(shape=(self.model.input_shape[1:]), name='input')
mc_model = tfk.models.Model(inputs=self.model.inputs, outputs=self.model.outputs)
mc = FastMCInferenceMeanVar()(tfk.layers.TimeDistributed(mc_model)(FastMCRepeat(self.n)(new_input)))
new_mc_model = tfk.models.Model(inputs=new_input, outputs=mc)
return new_mc_model
示例5: _update_t_cur_eta_t_v2
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import keras [as 别名]
def _update_t_cur_eta_t_v2(self, lr_t=None, var=None): # tf.keras
t_cur_update, eta_t_update = None, None # in case not assigned
# update `t_cur` if iterating last `(grad, var)`
iteration_done = self._updates_processed == (self._updates_per_iter - 1)
if iteration_done:
t_cur_update = state_ops.assign_add(self.t_cur, 1,
use_locking=self._use_locking)
self._updates_processed = 0 # reset
else:
self._updates_processed += 1
# Cosine annealing
if self.use_cosine_annealing and iteration_done:
# ensure eta_t is updated AFTER t_cur
with ops.control_dependencies([t_cur_update]):
eta_t_update = state_ops.assign(self.eta_t, _compute_eta_t(self),
use_locking=self._use_locking)
self.lr_t = lr_t * self.eta_t # for external tracking
return iteration_done, t_cur_update, eta_t_update
示例6: build_layerwise_model
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import keras [as 别名]
def build_layerwise_model(input_shape, **pruning_params):
return tf.keras.Sequential([
prune.prune_low_magnitude(
l.Conv2D(32, 5, padding='same', activation='relu'),
input_shape=input_shape,
**pruning_params),
l.MaxPooling2D((2, 2), (2, 2), padding='same'),
l.BatchNormalization(),
prune.prune_low_magnitude(
l.Conv2D(64, 5, padding='same', activation='relu'), **pruning_params),
l.MaxPooling2D((2, 2), (2, 2), padding='same'),
l.Flatten(),
prune.prune_low_magnitude(
l.Dense(1024, activation='relu'), **pruning_params),
l.Dropout(0.4),
prune.prune_low_magnitude(
l.Dense(num_classes, activation='softmax'), **pruning_params)
])
示例7: build_layerwise_model
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import keras [as 别名]
def build_layerwise_model(input_shape, **pruning_params):
return tf.keras.Sequential([
l.Conv2D(
32, 5, padding='same', activation='relu', input_shape=input_shape),
l.MaxPooling2D((2, 2), (2, 2), padding='same'),
l.Conv2D(64, 5, padding='same'),
l.BatchNormalization(),
l.ReLU(),
l.MaxPooling2D((2, 2), (2, 2), padding='same'),
l.Flatten(),
prune.prune_low_magnitude(
l.Dense(1024, activation='relu'), **pruning_params),
l.Dropout(0.4),
prune.prune_low_magnitude(
l.Dense(num_classes, activation='softmax'), **pruning_params)
])
示例8: testValuesRemainClusteredAfterTraining
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import keras [as 别名]
def testValuesRemainClusteredAfterTraining(self):
"""Verifies that training a clustered model does not destroy the clusters."""
original_model = keras.Sequential([
layers.Dense(2, input_shape=(2,)),
layers.Dense(2),
])
clustered_model = cluster.cluster_weights(original_model, **self.params)
clustered_model.compile(
loss=keras.losses.categorical_crossentropy,
optimizer="adam",
metrics=["accuracy"],
)
clustered_model.fit(x=self.dataset_generator(), steps_per_epoch=1)
stripped_model = cluster.strip_clustering(clustered_model)
weights_as_list = stripped_model.get_weights()[0].reshape(-1,).tolist()
unique_weights = set(weights_as_list)
self.assertLessEqual(len(unique_weights), self.params["number_of_clusters"])
示例9: testClusterModelValidLayersSuccessful
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import keras [as 别名]
def testClusterModelValidLayersSuccessful(self):
"""
Verifies that clustering a sequential model results in all clusterable
layers within the model being clustered.
"""
model = keras.Sequential([
self.keras_clusterable_layer,
self.keras_non_clusterable_layer,
self.custom_clusterable_layer
])
clustered_model = cluster.cluster_weights(model, **self.params)
clustered_model.build(input_shape=(1, 28, 28, 1))
self.assertEqual(len(model.layers), len(clustered_model.layers))
for layer, clustered_layer in zip(model.layers, clustered_model.layers):
self._validate_clustered_layer(layer, clustered_layer)
示例10: testClusterModelDoesNotWrapAlreadyWrappedLayer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import keras [as 别名]
def testClusterModelDoesNotWrapAlreadyWrappedLayer(self):
"""
Verifies that clustering a model that contains an already clustered layer
does not result in wrapping the clustered layer into another
cluster_wrapper.
"""
model = keras.Sequential(
[
layers.Flatten(),
cluster.cluster_weights(layers.Dense(10), **self.params),
])
clustered_model = cluster.cluster_weights(model, **self.params)
clustered_model.build(input_shape=(10, 10, 1))
self.assertEqual(len(model.layers), len(clustered_model.layers))
self._validate_clustered_layer(model.layers[0], clustered_model.layers[0])
# Second layer is used as-is since it's already a clustered layer.
self.assertEqual(model.layers[1], clustered_model.layers[1])
self._validate_clustered_layer(model.layers[1].layer,
clustered_model.layers[1])
示例11: testClusterSequentialModelPreservesBuiltStateNoInput
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import keras [as 别名]
def testClusterSequentialModelPreservesBuiltStateNoInput(self):
"""
Verifies that clustering a sequential model without an input layer
preserves the built state of the model.
"""
# No InputLayer
model = keras.Sequential([
layers.Dense(10),
layers.Dense(10),
])
self.assertEqual(model.built, False)
clustered_model = cluster.cluster_weights(model, **self.params)
self.assertEqual(model.built, False)
# Test built state is preserved across serialization
with cluster.cluster_scope():
loaded_model = keras.models.model_from_config(
json.loads(clustered_model.to_json()))
self.assertEqual(loaded_model.built, False)
示例12: testClusterFunctionalModelPreservesBuiltState
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import keras [as 别名]
def testClusterFunctionalModelPreservesBuiltState(self):
"""
Verifies that clustering a functional model preserves the built state of
the model.
"""
i1 = keras.Input(shape=(10,))
i2 = keras.Input(shape=(10,))
x1 = layers.Dense(10)(i1)
x2 = layers.Dense(10)(i2)
outputs = layers.Add()([x1, x2])
model = keras.Model(inputs=[i1, i2], outputs=outputs)
self.assertEqual(model.built, True)
clustered_model = cluster.cluster_weights(model, **self.params)
self.assertEqual(model.built, True)
# Test built state preserves across serialization
with cluster.cluster_scope():
loaded_model = keras.models.model_from_config(
json.loads(clustered_model.to_json()))
self.assertEqual(loaded_model.built, True)
示例13: testClusterWeightsStrippedWeights
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import keras [as 别名]
def testClusterWeightsStrippedWeights(self):
"""
Verifies that stripping the clustering wrappers from a functional model
preserves the clustered weights.
"""
i1 = keras.Input(shape=(10,))
x1 = layers.BatchNormalization()(i1)
outputs = x1
model = keras.Model(inputs=[i1], outputs=outputs)
clustered_model = cluster.cluster_weights(model, **self.params)
cluster_weight_length = (len(clustered_model.get_weights()))
stripped_model = cluster.strip_clustering(clustered_model)
self.assertEqual(self._count_clustered_layers(stripped_model), 0)
self.assertEqual(len(stripped_model.get_weights()), cluster_weight_length)
示例14: testStrippedKernel
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import keras [as 别名]
def testStrippedKernel(self):
"""
Verifies that stripping the clustering wrappers from a functional model
restores the layers kernel and the layers weight array to the new clustered weight value .
"""
i1 = keras.Input(shape=(1, 1, 1))
x1 = layers.Conv2D(1, 1)(i1)
outputs = x1
model = keras.Model(inputs=[i1], outputs=outputs)
clustered_model = cluster.cluster_weights(model, **self.params)
clustered_conv2d_layer = clustered_model.layers[1]
clustered_kernel = clustered_conv2d_layer.layer.kernel
stripped_model = cluster.strip_clustering(clustered_model)
stripped_conv2d_layer = stripped_model.layers[1]
self.assertEqual(self._count_clustered_layers(stripped_model), 0)
self.assertIsNot(stripped_conv2d_layer.kernel, clustered_kernel)
self.assertEqual(stripped_conv2d_layer.kernel,
stripped_conv2d_layer.weights[0])
示例15: testStripSelectivelyClusteredFunctionalModel
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import keras [as 别名]
def testStripSelectivelyClusteredFunctionalModel(self):
"""
Verifies that invoking strip_clustering() on a selectively clustered
functional model strips the clustering wrappers from the clustered layers.
"""
i1 = keras.Input(shape=(10,))
i2 = keras.Input(shape=(10,))
x1 = cluster.cluster_weights(layers.Dense(10), **self.params)(i1)
x2 = layers.Dense(10)(i2)
outputs = layers.Add()([x1, x2])
clustered_model = keras.Model(inputs=[i1, i2], outputs=outputs)
stripped_model = cluster.strip_clustering(clustered_model)
self.assertEqual(self._count_clustered_layers(stripped_model), 0)
self.assertIsInstance(stripped_model.layers[2], layers.Dense)