本文整理汇总了Python中sonnet.Sequential方法的典型用法代码示例。如果您正苦于以下问题:Python sonnet.Sequential方法的具体用法?Python sonnet.Sequential怎么用?Python sonnet.Sequential使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sonnet
的用法示例。
在下文中一共展示了sonnet.Sequential方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: make_ensemble
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Sequential [as 别名]
def make_ensemble(num_actions: int,
num_ensemble: int = 20,
num_hidden_layers: int = 2,
num_units: int = 50,
prior_scale: float = 3.) -> Sequence[snt.Module]:
"""Convenience function to make an ensemble from flags."""
output_sizes = [num_units] * num_hidden_layers + [num_actions]
ensemble = []
for _ in range(num_ensemble):
network = snt.Sequential([
snt.Flatten(),
snt.nets.MLP(output_sizes),
])
prior_network = snt.Sequential([
snt.Flatten(),
snt.nets.MLP(output_sizes),
])
ensemble.append(NetworkWithPrior(network, prior_network, prior_scale))
return ensemble
示例2: default_agent
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Sequential [as 别名]
def default_agent(obs_spec: specs.Array,
action_spec: specs.DiscreteArray):
"""Initialize a DQN agent with default parameters."""
del obs_spec # Unused.
network = snt.Sequential([
snt.Flatten(),
snt.nets.MLP([50, 50, action_spec.num_values]),
])
optimizer = snt.optimizers.Adam(learning_rate=1e-3)
return DQN(
action_spec=action_spec,
network=network,
batch_size=32,
discount=0.99,
replay_capacity=10000,
min_replay_size=100,
sgd_period=1,
target_update_period=4,
optimizer=optimizer,
epsilon=0.05,
seed=42)
示例3: test_train
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Sequential [as 别名]
def test_train(self):
image = tf.random_uniform(shape=(_BATCH_SIZE, 784), maxval=1.)
labels = tf.random_uniform(shape=(_BATCH_SIZE,), maxval=10, dtype=tf.int32)
labels_one_hot = tf.one_hot(labels, 10)
model = snt.Sequential([snt.BatchFlatten(), snt.nets.MLP([128, 128, 10])])
logits = model(image)
all_losses = tf.nn.softmax_cross_entropy_with_logits_v2(
logits=logits, labels=labels_one_hot)
loss = tf.reduce_mean(all_losses)
layers = layer_collection.LayerCollection()
optimizer = periodic_inv_cov_update_kfac_opt.PeriodicInvCovUpdateKfacOpt(
invert_every=10,
cov_update_every=1,
learning_rate=0.03,
cov_ema_decay=0.95,
damping=100.,
layer_collection=layers,
momentum=0.9,
num_burnin_steps=0,
placement_strategy="round_robin")
_construct_layer_collection(layers, [logits], tf.trainable_variables())
train_step = optimizer.minimize(loss)
counter = optimizer.counter
max_iterations = 50
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
tf.train.start_queue_runners(sess=sess, coord=coord)
for iteration in range(max_iterations):
sess.run([loss, train_step])
counter_ = sess.run(counter)
self.assertEqual(counter_, iteration + 1.0)
示例4: add_train_ops
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Sequential [as 别名]
def add_train_ops(self,
num_classes,
joint_rep,
minibatch):
"""Add ops for training in the computation graph.
Args:
num_classes: number of classes to predict in the task.
joint_rep: the joint sentence representation if the input is sentence
pairs or the representation for the sentence if the input is a single
sentence.
minibatch: a minibatch of sequences of embeddings.
Returns:
train_accuracy: the accuracy on the training dataset
loss: training loss.
opt_step: training op.
"""
if self.linear_classifier is None:
classifier_layers = []
classifier_layers.append(snt.Linear(num_classes))
self.linear_classifier = snt.Sequential(classifier_layers)
logits = self.linear_classifier(joint_rep)
# Losses and optimizer.
def get_loss(logits, labels):
return tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits))
loss = get_loss(logits, minibatch.sentiment)
train_accuracy = utils.get_accuracy(logits, minibatch.sentiment)
opt_step = self._add_optimize_op(loss)
return train_accuracy, loss, opt_step
示例5: _build
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Sequential [as 别名]
def _build(self, inputs):
hparams = self._hparams
hidden = snt.Sequential([
util.concat_features,
util.make_mlp(
hparams,
hparams.obs_decoder_fc_hidden_layers,
activate_final=True),
])(inputs)
return (self._build_game_output(hidden),
self._build_score(hidden),
self._build_game_over(hidden))
示例6: _build
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Sequential [as 别名]
def _build(self, inputs):
if self._input_encoders:
inputs = snt.Sequential(self._input_encoders)(inputs)
return self._decoder(inputs)
示例7: run
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Sequential [as 别名]
def run(bsuite_id: str) -> str:
"""Runs a DQN agent on a given bsuite environment, logging to CSV."""
env = bsuite.load_and_record(
bsuite_id=bsuite_id,
save_path=FLAGS.save_path,
logging_mode=FLAGS.logging_mode,
overwrite=FLAGS.overwrite,
)
# Making the networks.
hidden_units = [FLAGS.num_units] * FLAGS.num_hidden_layers
network = snt.Sequential([
snt.Flatten(),
snt.nets.MLP(hidden_units + [env.action_spec().num_values]),
])
optimizer = snt.optimizers.Adam(learning_rate=FLAGS.learning_rate)
agent = dqn.DQN(
action_spec=env.action_spec(),
network=network,
batch_size=FLAGS.batch_size,
discount=FLAGS.discount,
replay_capacity=FLAGS.replay_capacity,
min_replay_size=FLAGS.min_replay_size,
sgd_period=FLAGS.sgd_period,
target_update_period=FLAGS.target_update_period,
optimizer=optimizer,
epsilon=FLAGS.epsilon,
seed=FLAGS.seed,
)
num_episodes = FLAGS.num_episodes or getattr(env, 'bsuite_num_episodes')
experiment.run(
agent=agent,
environment=env,
num_episodes=num_episodes,
verbose=FLAGS.verbose)
return bsuite_id
示例8: __init__
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Sequential [as 别名]
def __init__(self, hidden_sizes: Sequence[int],
action_spec: specs.DiscreteArray):
super().__init__(name='policy_value_net')
self._torso = snt.Sequential([
snt.Flatten(),
snt.nets.MLP(hidden_sizes, activate_final=True),
])
self._policy_head = snt.Linear(action_spec.num_values)
self._value_head = snt.Linear(1)
self._action_dtype = action_spec.dtype
示例9: mnist
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Sequential [as 别名]
def mnist(layers, # pylint: disable=invalid-name
activation="sigmoid",
batch_size=128,
mode="train"):
"""Mnist classification with a multi-layer perceptron."""
if activation == "sigmoid":
activation_op = tf.sigmoid
elif activation == "relu":
activation_op = tf.nn.relu
else:
raise ValueError("{} activation not supported".format(activation))
# Data.
data = mnist_dataset.load_mnist()
data = getattr(data, mode)
images = tf.constant(data.images, dtype=tf.float32, name="MNIST_images")
images = tf.reshape(images, [-1, 28, 28, 1])
labels = tf.constant(data.labels, dtype=tf.int64, name="MNIST_labels")
# Network.
mlp = snt.nets.MLP(list(layers) + [10],
activation=activation_op,
initializers=_nn_initializers)
network = snt.Sequential([snt.BatchFlatten(), mlp])
def build():
indices = tf.random_uniform([batch_size], 0, data.num_examples, tf.int64)
batch_images = tf.gather(images, indices)
batch_labels = tf.gather(labels, indices)
output = network(batch_images)
return _xent_loss(output, batch_labels)
return build
示例10: __init__
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Sequential [as 别名]
def __init__(self, init_with_true_state=False, model='2lstm', **unused_kwargs):
self.placeholders = {'o': tf.placeholder('float32', [None, None, 24, 24, 3], 'observations'),
'a': tf.placeholder('float32', [None, None, 3], 'actions'),
's': tf.placeholder('float32', [None, None, 3], 'states'),
'keep_prob': tf.placeholder('float32')}
self.pred_states = None
self.init_with_true_state = init_with_true_state
self.model = model
# build models
# <-- observation
self.encoder = snt.Sequential([
snt.nets.ConvNet2D([16, 32, 64], [[3, 3]], [2], [snt.SAME], activate_final=True, name='encoder/convnet'),
snt.BatchFlatten(),
lambda x: tf.nn.dropout(x, self.placeholders['keep_prob']),
snt.Linear(128, name='encoder/Linear'),
tf.nn.relu,
])
# <-- action
if self.model == '2lstm':
self.rnn1 = snt.LSTM(512)
self.rnn2 = snt.LSTM(512)
if self.model == '2gru':
self.rnn1 = snt.GRU(512)
self.rnn2 = snt.GRU(512)
elif self.model == 'ff':
self.ff_lstm_replacement = snt.Sequential([
snt.Linear(512),
tf.nn.relu,
snt.Linear(512),
tf.nn.relu])
self.belief_decoder = snt.Sequential([
snt.Linear(256),
tf.nn.relu,
snt.Linear(256),
tf.nn.relu,
snt.Linear(3)
])
示例11: _build
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Sequential [as 别名]
def _build(self, attribute_value):
tf.summary.histogram('cont_attribute_value_histogram', attribute_value)
embedding = snt.Sequential([
snt.nets.MLP([self._attr_embedding_dim] * 3, activate_final=True, use_dropout=True),
snt.LayerNorm(),
])(tf.cast(attribute_value, dtype=tf.float32))
tf.summary.histogram('cont_embedding_histogram', embedding)
return embedding
示例12: make_mlp_model
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Sequential [as 别名]
def make_mlp_model(latent_size=16, num_layers=2):
"""Instantiates a new MLP, followed by LayerNorm.
The parameters of each new MLP are not shared with others generated by
this function.
Returns:
A Sonnet module which contains the MLP and LayerNorm.
"""
return snt.Sequential([
snt.nets.MLP([latent_size] * num_layers, activate_final=True),
snt.LayerNorm()
])
示例13: _edge_model
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Sequential [as 别名]
def _edge_model(self):
return snt.Sequential([self._role_embedder,
snt.nets.MLP([self._latent_size] * self._num_layers, activate_final=True),
snt.LayerNorm()])
示例14: _node_model
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Sequential [as 别名]
def _node_model(self):
return snt.Sequential([self._thing_embedder,
snt.nets.MLP([self._latent_size] * self._num_layers, activate_final=True),
snt.LayerNorm()])
示例15: _embed
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Sequential [as 别名]
def _embed(self, inpt):
flatten = snt.BatchFlatten()
mlp = MLP(self._n_hidden, n_out=self._n_param)
seq = snt.Sequential([flatten, mlp])
return seq(inpt)