本文整理汇总了Python中sonnet.Conv1D方法的典型用法代码示例。如果您正苦于以下问题:Python sonnet.Conv1D方法的具体用法?Python sonnet.Conv1D怎么用?Python sonnet.Conv1D使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sonnet
的用法示例。
在下文中一共展示了sonnet.Conv1D方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _build
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Conv1D [as 别名]
def _build(self, x):
# x is [units, bs, 1]
net = tf.transpose(x, [1, 0, 2]) # now [bs x units x 1]
channels = x.shape.as_list()[2]
mod = snt.Conv1D(output_channels=channels, kernel_shape=[3])
net = mod(net)
net = snt.BatchNorm(axis=[0, 1])(net, is_training=False)
net = tf.nn.relu(net)
mod = snt.Conv1D(output_channels=channels, kernel_shape=[3])
net = mod(net)
net = snt.BatchNorm(axis=[0, 1])(net, is_training=False)
net = tf.nn.relu(net)
to_concat = tf.transpose(net, [1, 0, 2])
if self.add:
return x + to_concat
else:
return tf.concat([x, to_concat], 2)
示例2: testConv1dIntervalBounds
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Conv1D [as 别名]
def testConv1dIntervalBounds(self):
m = snt.Conv1D(
output_channels=1,
kernel_shape=2,
padding='VALID',
stride=1,
use_bias=True,
initializers={
'w': tf.constant_initializer(1.),
'b': tf.constant_initializer(2.),
})
z = tf.constant([3, 4], dtype=tf.float32)
z = tf.reshape(z, [1, 2, 1])
m(z) # Connect to create weights.
m = ibp.LinearConv1dWrapper(m)
input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
output_bounds = m.propagate_bounds(input_bounds)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
l, u = sess.run([output_bounds.lower, output_bounds.upper])
l = l.item()
u = u.item()
self.assertAlmostEqual(7., l)
self.assertAlmostEqual(11., u)
示例3: _inputs_for_observed_module
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Conv1D [as 别名]
def _inputs_for_observed_module(self, subgraph):
"""Extracts input tensors from a connected Sonnet module.
This default implementation supports common layer types, but should be
overridden if custom layer types are to be supported.
Args:
subgraph: `snt.ConnectedSubGraph` specifying the Sonnet module being
connected, and its inputs and outputs.
Returns:
List of input tensors, or None if not a supported Sonnet module.
"""
m = subgraph.module
# Only support a few operations for now.
if not (isinstance(m, snt.BatchReshape) or
isinstance(m, snt.Linear) or
isinstance(m, snt.Conv1D) or
isinstance(m, snt.Conv2D) or
isinstance(m, snt.BatchNorm) or
isinstance(m, layers.ImageNorm)):
return None
if isinstance(m, snt.BatchNorm):
return subgraph.inputs['input_batch'],
else:
return subgraph.inputs['inputs'],
示例4: _wrapper_for_observed_module
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Conv1D [as 别名]
def _wrapper_for_observed_module(self, subgraph):
"""Creates a wrapper for a connected Sonnet module.
This default implementation supports common layer types, but should be
overridden if custom layer types are to be supported.
Args:
subgraph: `snt.ConnectedSubGraph` specifying the Sonnet module being
connected, and its inputs and outputs.
Returns:
`ibp.VerifiableWrapper` for the Sonnet module.
"""
m = subgraph.module
if isinstance(m, snt.BatchReshape):
shape = subgraph.outputs.get_shape()[1:].as_list()
return verifiable_wrapper.BatchReshapeWrapper(m, shape)
elif isinstance(m, snt.Linear):
return verifiable_wrapper.LinearFCWrapper(m)
elif isinstance(m, snt.Conv1D):
return verifiable_wrapper.LinearConv1dWrapper(m)
elif isinstance(m, snt.Conv2D):
return verifiable_wrapper.LinearConv2dWrapper(m)
elif isinstance(m, layers.ImageNorm):
return verifiable_wrapper.ImageNormWrapper(m)
else:
assert isinstance(m, snt.BatchNorm)
return verifiable_wrapper.BatchNormWrapper(m)
示例5: __init__
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Conv1D [as 别名]
def __init__(self, module):
if not isinstance(module, snt.Conv1D):
raise ValueError('Cannot wrap {} with a LinearConv1dWrapper.'.format(
module))
super(LinearConv1dWrapper, self).__init__(module)
示例6: testConv1dSymbolicBounds
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Conv1D [as 别名]
def testConv1dSymbolicBounds(self):
m = snt.Conv1D(
output_channels=1,
kernel_shape=(2),
padding='VALID',
stride=1,
use_bias=True,
initializers={
'w': tf.constant_initializer(1.),
'b': tf.constant_initializer(3.),
})
z = tf.constant([3, 4], dtype=tf.float32)
z = tf.reshape(z, [1, 2, 1])
m(z) # Connect to create weights.
m = ibp.LinearConv1dWrapper(m)
input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
input_bounds = ibp.SymbolicBounds.convert(input_bounds)
output_bounds = m.propagate_bounds(input_bounds)
output_bounds = ibp.IntervalBounds.convert(output_bounds)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
l, u = sess.run([output_bounds.lower, output_bounds.upper])
l = l.item()
u = u.item()
self.assertAlmostEqual(8., l)
self.assertAlmostEqual(12., u)
示例7: compute_top_delta
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Conv1D [as 别名]
def compute_top_delta(self, z):
""" parameterization of topD. This converts the top level activation
to an error signal.
Args:
z: tf.Tensor
batch of final layer post activations
Returns
delta: tf.Tensor
the error signal
"""
s_idx = 0
with tf.variable_scope('compute_top_delta'), tf.device(self.remote_device):
# typically this takes [BS, length, input_channels],
# We are applying this such that we convolve over the batch dimension.
act = tf.expand_dims(tf.transpose(z, [1, 0]), 2) # [channels, BS, 1]
mod = snt.Conv1D(output_channels=self.top_delta_size, kernel_shape=[5])
act = mod(act)
act = snt.BatchNorm(axis=[0, 1])(act, is_training=False)
act = tf.nn.relu(act)
bs = act.shape.as_list()[0]
act = tf.transpose(act, [2, 1, 0])
act = snt.Conv1D(output_channels=bs, kernel_shape=[3])(act)
act = snt.BatchNorm(axis=[0, 1])(act, is_training=False)
act = tf.nn.relu(act)
act = snt.Conv1D(output_channels=bs, kernel_shape=[3])(act)
act = snt.BatchNorm(axis=[0, 1])(act, is_training=False)
act = tf.nn.relu(act)
act = tf.transpose(act, [2, 1, 0])
prev_act = act
for i in range(self.top_delta_layers):
mod = snt.Conv1D(output_channels=self.top_delta_size, kernel_shape=[3])
act = mod(act)
act = snt.BatchNorm(axis=[0, 1])(act, is_training=False)
act = tf.nn.relu(act)
prev_act = act
mod = snt.Conv1D(output_channels=self.delta_dim, kernel_shape=[3])
act = mod(act)
# [bs, feature_channels, delta_channels]
act = tf.transpose(act, [1, 0, 2])
return act