本文整理匯總了Python中tensorflow.compat.v1.keras方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.keras方法的具體用法?Python v1.keras怎麽用?Python v1.keras使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.keras方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: ctc_symbol_loss
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import keras [as 別名]
def ctc_symbol_loss(top_out, targets, model_hparams, vocab_size, weight_fn):
"""Compute the CTC loss."""
del model_hparams, vocab_size # unused arg
logits = top_out
with tf.name_scope("ctc_loss", values=[logits, targets]):
# For CTC we assume targets are 1d, [batch, length, 1, 1] here.
targets_shape = targets.get_shape().as_list()
assert len(targets_shape) == 4
assert targets_shape[2] == 1
assert targets_shape[3] == 1
targets = tf.squeeze(targets, axis=[2, 3])
logits = tf.squeeze(logits, axis=[2, 3])
targets_mask = 1 - tf.to_int32(tf.equal(targets, 0))
targets_lengths = tf.reduce_sum(targets_mask, axis=1)
sparse_targets = tf.keras.backend.ctc_label_dense_to_sparse(
targets, targets_lengths)
xent = tf.nn.ctc_loss(
sparse_targets,
logits,
targets_lengths,
time_major=False,
preprocess_collapse_repeated=False,
ctc_merge_repeated=False)
weights = weight_fn(targets)
return tf.reduce_sum(xent), tf.reduce_sum(weights)
示例2: call
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import keras [as 別名]
def call(self, state):
"""Creates the output tensor/op given the input state tensor.
See https://www.tensorflow.org/api_docs/python/tf/keras/Model for more
information on this. Note that tf.keras.Model implements `call` which is
wrapped by `__call__` function by tf.keras.Model.
Args:
state: Tensor, input tensor.
Returns:
collections.namedtuple, output ops (graph mode) or output tensors (eager).
"""
net = tf.cast(state, tf.float32)
net = tf.div(net, 255.)
net = self.conv1(net)
net = self.conv2(net)
net = self.conv3(net)
net = self.flatten(net)
net = self.dense1(net)
net = self.dense2(net)
unordered_q_heads = tf.reshape(net, [-1, self.num_actions, self.num_heads])
q_heads, q_values = combine_q_functions(
unordered_q_heads, self._transform_strategy, **self._kwargs)
return MultiHeadNetworkType(q_heads, unordered_q_heads, q_values)
示例3: video_raw_top
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import keras [as 別名]
def video_raw_top(body_output, targets, model_hparams, vocab_size):
del targets, model_hparams, vocab_size # unused arg
frames = body_output
if isinstance(body_output, list):
frames = tf.stack(body_output, axis=1)
rgb_frames = common_layers.convert_real_to_rgb(frames)
common_video.gif_summary("body_output", rgb_frames)
return tf.expand_dims(rgb_frames, axis=-1)
# Utility functions similar to tf.keras for default transformations
示例4: __init__
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import keras [as 別名]
def __init__(self, dims):
super(Decoder, self).__init__()
self._decoder = keras.Sequential()
layer_sizes = [1024, 1024, 2048]
for layer_size in layer_sizes:
self._decoder.add(
keras.layers.Dense(layer_size, activation=tf.nn.leaky_relu))
self._decoder.add(keras.layers.Dense(dims, activation=None))
示例5: __init__
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import keras [as 別名]
def __init__(self, feature_dims):
super(Resnet18, self).__init__()
self.conv1 = keras.layers.Conv2D(
64, 7, strides=2, padding='same', use_bias=False)
self.bn1 = keras.layers.BatchNormalization()
self.relu1 = keras.layers.ReLU()
self.maxpool = keras.layers.MaxPooling2D(3, strides=2, padding='same')
layers = [2, 2, 2, 2]
self.layer1 = ResLayer(BasicBlock, 64, 64, layers[0])
self.layer2 = ResLayer(BasicBlock, 64, 128, layers[1], stride=2)
self.layer3 = ResLayer(BasicBlock, 128, 256, layers[2], stride=2)
self.layer4 = ResLayer(BasicBlock, 256, 512, layers[3], stride=2)
self.fc = keras.layers.Dense(feature_dims, activation=None)
示例6: __init__
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import keras [as 別名]
def __init__(self, num_actions: int, name: str = None):
"""Creates the layers used for calculating Q-values.
Args:
num_actions: number of actions.
name: used to create scope for network parameters.
"""
super(NatureDQNNetwork, self).__init__(name=name)
self.num_actions = num_actions
# Defining layers.
activation_fn = tf.keras.activations.relu
# Setting names of the layers manually to make variable names more similar
# with tf.slim variable names/checkpoints.
self.conv1 = tf.keras.layers.Conv2D(
32, [8, 8],
strides=4,
padding='same',
activation=activation_fn,
name='Conv')
self.conv2 = tf.keras.layers.Conv2D(
64, [4, 4],
strides=2,
padding='same',
activation=activation_fn,
name='Conv')
self.conv3 = tf.keras.layers.Conv2D(
64, [3, 3],
strides=1,
padding='same',
activation=activation_fn,
name='Conv')
self.flatten = tf.keras.layers.Flatten()
self.dense1 = tf.keras.layers.Dense(
512, activation=activation_fn, name='fully_connected')
self.dense2 = tf.keras.layers.Dense(num_actions, name='fully_connected')
示例7: inference_network_fn
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import keras [as 別名]
def inference_network_fn(self,
features,
labels,
mode,
config=None,
params=None):
"""See base class documentation."""
del mode, config, params
if self._multi_dataset:
net = features.x1 + features.x2
else:
net = features.x
for pos, activations in enumerate([32, 16, 8]):
# tf.keras does not support variable_scope and custom_getter.
# Therefore, we cannot use this api yet for meta learning models.
# Note, we have to add the MockTFModel name in order to support legacy
# model loading.
net = tf.layers.dense(
net,
units=activations,
activation=tf.nn.elu,
name='MockT2RModel.dense.{}'.format(pos))
net = tf.layers.batch_normalization(
net, name='MockT2RModel.batch_norm.{}'.format(pos))
net = tf.layers.dense(net, units=1, name='MockT2RModel.dense.4')
inference_outputs = {}
inference_outputs['logit'] = net
return inference_outputs
示例8: model_train_fn
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import keras [as 別名]
def model_train_fn(self,
features,
labels,
inference_outputs,
mode,
config=None,
params=None):
"""See base class documentation."""
loss = tf.keras.losses.categorical_hinge(
y_true=labels.y, y_pred=inference_outputs['logit'])
return tf.reduce_mean(loss)
示例9: _build_model
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import keras [as 別名]
def _build_model(self):
self._model = tf.keras.Sequential()
if not self._multi_dataset:
for pos, activations in enumerate([32, 16, 8]):
self._model.add(
tf.keras.layers.Dense(
units=activations,
activation=tf.keras.activations.elu,
name='MockTF2T2RModel.dense.{}'.format(pos)))
self._model.add(
tf.keras.layers.BatchNormalization(
name='MockTF2T2RModel.batch_norm.{}'.format(pos)))
self._model.add(
tf.keras.layers.Dense(units=1, name='MockTF2T2RModel.dense.4'))
示例10: main
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import keras [as 別名]
def main():
parser = argparse.ArgumentParser(description='tf.keras model FLOPs & PARAMs checking tool')
parser.add_argument('--model_path', help='model file to evaluate', type=str, required=True)
args = parser.parse_args()
custom_object_dict = get_custom_objects()
model = load_model(args.model_path, compile=False, custom_objects=custom_object_dict)
get_flops(model)