本文整理汇总了Python中sonnet.SAME属性的典型用法代码示例。如果您正苦于以下问题:Python sonnet.SAME属性的具体用法?Python sonnet.SAME怎么用?Python sonnet.SAME使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类sonnet
的用法示例。
在下文中一共展示了sonnet.SAME属性的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _build
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import SAME [as 别名]
def _build(self, inputs, is_training):
"""Connects the module to inputs.
Args:
inputs: Inputs to the Unit3D component.
is_training: whether to use training mode for snt.BatchNorm (boolean).
Returns:
Outputs from the module.
"""
net = snt.Conv3D(output_channels=self._output_channels,
kernel_shape=self._kernel_shape,
stride=self._stride,
padding=snt.SAME,
use_bias=self._use_bias)(inputs)
if self._use_batch_norm:
bn = snt.BatchNorm()
net = bn(net, is_training=is_training, test_local_stats=False)
if self._activation_fn is not None:
net = self._activation_fn(net)
return net
示例2: _build
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import SAME [as 别名]
def _build(self, inputs, is_training):
"""Connects the module to inputs.
Args:
inputs: Inputs to the Unit3D component.
is_training: whether to use training mode for snt.BatchNorm (boolean).
Returns:
Outputs from the module.
"""
net = snt.Conv3D(output_channels=self._output_channels,
kernel_shape=self._kernel_shape,
stride=self._stride,
padding=snt.SAME,
use_bias=self._use_bias)(inputs)
if self._use_batch_norm:
bn = snt.BatchNorm()
#################### Warning batchnorm is hard coded to is_training=False #################
# net = bn(net, is_training=is_training, test_local_stats=False)
net = bn(net, is_training=False, test_local_stats=False)
if self._activation_fn is not None:
net = self._activation_fn(net)
return net
示例3: _build
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import SAME [as 别名]
def _build(self, inputs, is_training):
"""Connects the module to inputs.
Args:
inputs: Inputs to the Unit3Dtf component.
is_training: whether to use training mode for snt.BatchNorm (boolean).
Returns:
Outputs from the module.
"""
net = snt.Conv3D(
output_channels=self._output_channels,
kernel_shape=self._kernel_shape,
stride=self._stride,
padding=snt.SAME,
use_bias=self._use_bias)(inputs)
if self._use_batch_norm:
bn = snt.BatchNorm()
net = bn(net, is_training=is_training, test_local_stats=False)
if self._activation_fn is not None:
net = self._activation_fn(net)
return net
示例4: _build
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import SAME [as 别名]
def _build(self, inputs):
if FLAGS.l2_reg:
regularizers = {'w': lambda w: FLAGS.l2_reg*tf.nn.l2_loss(w),
'b': lambda w: FLAGS.l2_reg*tf.nn.l2_loss(w),}
else:
regularizers = None
reshape = snt.BatchReshape([28, 28, 1])
conv = snt.Conv2D(2, 5, padding=snt.SAME, regularizers=regularizers)
act = _NONLINEARITY(conv(reshape(inputs)))
pool = tf.nn.pool(act, window_shape=(2, 2), pooling_type=_POOL,
padding=snt.SAME, strides=(2, 2))
conv = snt.Conv2D(4, 5, padding=snt.SAME, regularizers=regularizers)
act = _NONLINEARITY(conv(pool))
pool = tf.nn.pool(act, window_shape=(2, 2), pooling_type=_POOL,
padding=snt.SAME, strides=(2, 2))
flatten = snt.BatchFlatten()(pool)
linear = snt.Linear(32, regularizers=regularizers)(flatten)
return snt.Linear(10, regularizers=regularizers)(linear)
示例5: __init__
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import SAME [as 别名]
def __init__(self, init_with_true_state=False, model='2lstm', **unused_kwargs):
self.placeholders = {'o': tf.placeholder('float32', [None, None, 24, 24, 3], 'observations'),
'a': tf.placeholder('float32', [None, None, 3], 'actions'),
's': tf.placeholder('float32', [None, None, 3], 'states'),
'keep_prob': tf.placeholder('float32')}
self.pred_states = None
self.init_with_true_state = init_with_true_state
self.model = model
# build models
# <-- observation
self.encoder = snt.Sequential([
snt.nets.ConvNet2D([16, 32, 64], [[3, 3]], [2], [snt.SAME], activate_final=True, name='encoder/convnet'),
snt.BatchFlatten(),
lambda x: tf.nn.dropout(x, self.placeholders['keep_prob']),
snt.Linear(128, name='encoder/Linear'),
tf.nn.relu,
])
# <-- action
if self.model == '2lstm':
self.rnn1 = snt.LSTM(512)
self.rnn2 = snt.LSTM(512)
if self.model == '2gru':
self.rnn1 = snt.GRU(512)
self.rnn2 = snt.GRU(512)
elif self.model == 'ff':
self.ff_lstm_replacement = snt.Sequential([
snt.Linear(512),
tf.nn.relu,
snt.Linear(512),
tf.nn.relu])
self.belief_decoder = snt.Sequential([
snt.Linear(256),
tf.nn.relu,
snt.Linear(256),
tf.nn.relu,
snt.Linear(3)
])
示例6: build_modules
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import SAME [as 别名]
def build_modules(self, min_obs_likelihood, proposer_keep_ratio, learn_gaussian_mle):
"""
:param min_obs_likelihood:
:param proposer_keep_ratio:
:return: None
"""
# MEASUREMENT MODEL
# conv net for encoding the image
self.encoder = snt.Sequential([
snt.nets.ConvNet2D([16, 16, 16, 16], [[7, 7], [5, 5], [5, 5], [5, 5]], [[1,1], [1, 2], [1, 2], [2, 2]], [snt.SAME], activate_final=True, name='encoder/convnet'),
snt.BatchFlatten(),
lambda x: tf.nn.dropout(x, self.placeholders['keep_prob']),
snt.Linear(128, name='encoder/linear'),
tf.nn.relu
])
# observation likelihood estimator that maps states and image encodings to probabilities
self.obs_like_estimator = snt.Sequential([
snt.Linear(128, name='obs_like_estimator/linear'),
tf.nn.relu,
snt.Linear(128, name='obs_like_estimator/linear'),
tf.nn.relu,
snt.Linear(1, name='obs_like_estimator/linear'),
tf.nn.sigmoid,
lambda x: x * (1 - min_obs_likelihood) + min_obs_likelihood
], name='obs_like_estimator')
# motion noise generator used for motion sampling
if learn_gaussian_mle:
self.mo_noise_generator = snt.nets.MLP([32, 32, 4], activate_final=False, name='mo_noise_generator')
else:
self.mo_noise_generator = snt.nets.MLP([32, 32, 2], activate_final=False, name='mo_noise_generator')
# odometry model (if we want to learn it)
if self.learn_odom:
self.mo_transition_model = snt.nets.MLP([128, 128, 128, self.state_dim], activate_final=False, name='mo_transition_model')
# particle proposer that maps encodings to particles (if we want to use it)
if self.use_proposer:
self.particle_proposer = snt.Sequential([
snt.Linear(128, name='particle_proposer/linear'),
tf.nn.relu,
lambda x: tf.nn.dropout(x, proposer_keep_ratio),
snt.Linear(128, name='particle_proposer/linear'),
tf.nn.relu,
snt.Linear(128, name='particle_proposer/linear'),
tf.nn.relu,
snt.Linear(128, name='particle_proposer/linear'),
tf.nn.relu,
snt.Linear(4, name='particle_proposer/linear'),
tf.nn.tanh,
])
self.noise_scaler1 = snt.Module(lambda x: x * tf.exp(10 * tf.get_variable('motion_sampler/noise_scaler1', initializer=np.array(0.0, dtype='float32'))))
self.noise_scaler2 = snt.Module(lambda x: x * tf.exp(10 * tf.get_variable('motion_sampler/noise_scaler2', initializer=np.array(0.0, dtype='float32'))))
示例7: build_modules
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import SAME [as 别名]
def build_modules(self, min_obs_likelihood, proposer_keep_ratio):
"""
:param min_obs_likelihood:
:param proposer_keep_ratio:
:return: None
"""
# MEASUREMENT MODEL
# conv net for encoding the image
self.encoder = snt.Sequential([
snt.nets.ConvNet2D([16, 32, 64], [[3, 3]], [2], [snt.SAME], activate_final=True, name='encoder/convnet'),
snt.BatchFlatten(),
lambda x: tf.nn.dropout(x, self.placeholders['keep_prob']),
snt.Linear(128, name='encoder/linear'),
tf.nn.relu
])
# observation likelihood estimator that maps states and image encodings to probabilities
self.obs_like_estimator = snt.Sequential([
snt.Linear(128, name='obs_like_estimator/linear'),
tf.nn.relu,
snt.Linear(128, name='obs_like_estimator/linear'),
tf.nn.relu,
snt.Linear(1, name='obs_like_estimator/linear'),
tf.nn.sigmoid,
lambda x: x * (1 - min_obs_likelihood) + min_obs_likelihood
], name='obs_like_estimator')
# motion noise generator used for motion sampling
self.mo_noise_generator = snt.nets.MLP([32, 32, self.state_dim], activate_final=False, name='mo_noise_generator')
# odometry model (if we want to learn it)
if self.learn_odom:
self.mo_transition_model = snt.nets.MLP([128, 128, 128, self.state_dim], activate_final=False, name='mo_transition_model')
# particle proposer that maps encodings to particles (if we want to use it)
if self.use_proposer:
self.particle_proposer = snt.Sequential([
snt.Linear(128, name='particle_proposer/linear'),
tf.nn.relu,
lambda x: tf.nn.dropout(x, proposer_keep_ratio),
snt.Linear(128, name='particle_proposer/linear'),
tf.nn.relu,
snt.Linear(128, name='particle_proposer/linear'),
tf.nn.relu,
snt.Linear(128, name='particle_proposer/linear'),
tf.nn.relu,
snt.Linear(4, name='particle_proposer/linear'),
tf.nn.tanh,
])