本文整理汇总了Python中sonnet.Conv2D方法的典型用法代码示例。如果您正苦于以下问题:Python sonnet.Conv2D方法的具体用法?Python sonnet.Conv2D怎么用?Python sonnet.Conv2D使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sonnet
的用法示例。
在下文中一共展示了sonnet.Conv2D方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testConv2dIntervalBounds
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Conv2D [as 别名]
def testConv2dIntervalBounds(self):
m = snt.Conv2D(
output_channels=1,
kernel_shape=(2, 2),
padding='VALID',
stride=1,
use_bias=True,
initializers={
'w': tf.constant_initializer(1.),
'b': tf.constant_initializer(2.),
})
z = tf.constant([1, 2, 3, 4], dtype=tf.float32)
z = tf.reshape(z, [1, 2, 2, 1])
m(z) # Connect to create weights.
m = ibp.LinearConv2dWrapper(m)
input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
output_bounds = m.propagate_bounds(input_bounds)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
l, u = sess.run([output_bounds.lower, output_bounds.upper])
l = l.item()
u = u.item()
self.assertAlmostEqual(8., l)
self.assertAlmostEqual(16., u)
示例2: custom_build
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Conv2D [as 别名]
def custom_build(inputs, is_training, keep_prob):
x_inputs = tf.reshape(inputs, [-1, 28, 28, 1])
"""A custom build method to wrap into a sonnet Module."""
outputs = snt.Conv2D(output_channels=32, kernel_shape=4, stride=2)(x_inputs)
outputs = snt.BatchNorm()(outputs, is_training=is_training)
outputs = tf.nn.relu(outputs)
outputs = tf.nn.max_pool(outputs, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
outputs = snt.Conv2D(output_channels=64, kernel_shape=4, stride=2)(outputs)
outputs = snt.BatchNorm()(outputs, is_training=is_training)
outputs = tf.nn.relu(outputs)
outputs = tf.nn.max_pool(outputs, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
outputs = snt.Conv2D(output_channels=1024, kernel_shape=1, stride=1)(outputs)
outputs = snt.BatchNorm()(outputs, is_training=is_training)
outputs = tf.nn.relu(outputs)
outputs = snt.BatchFlatten()(outputs)
outputs = tf.nn.dropout(outputs, keep_prob=keep_prob)
outputs = snt.Linear(output_size=10)(outputs)
# _activation_summary(outputs)
return outputs
示例3: custom_build
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Conv2D [as 别名]
def custom_build(self, inputs):
"""A custom build method to wrap into a sonnet Module."""
outputs = snt.Conv2D(output_channels=16, kernel_shape=[7, 7], stride=[1, 1])(inputs)
outputs = tf.nn.relu(outputs)
outputs = snt.Conv2D(output_channels=16, kernel_shape=[5, 5], stride=[1, 2])(outputs)
outputs = tf.nn.relu(outputs)
outputs = snt.Conv2D(output_channels=16, kernel_shape=[5, 5], stride=[1, 2])(outputs)
outputs = tf.nn.relu(outputs)
outputs = snt.Conv2D(output_channels=16, kernel_shape=[5, 5], stride=[2, 2])(outputs)
outputs = tf.nn.relu(outputs)
outputs = tf.nn.dropout(outputs, self.placeholders['keep_prob'])
outputs = snt.BatchFlatten()(outputs)
outputs = snt.Linear(128)(outputs)
outputs = tf.nn.relu(outputs)
return outputs
示例4: test_incompatible_higher_rank_inputs_raises
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Conv2D [as 别名]
def test_incompatible_higher_rank_inputs_raises(self,
use_edges,
use_receiver_nodes,
use_sender_nodes,
use_globals,
field):
"""A exception should be raised if the inputs have incompatible shapes."""
input_graph = self._get_shaped_input_graph()
input_graph = input_graph.replace(
**{field: tf.transpose(getattr(input_graph, field), [0, 2, 1, 3])})
network = blocks.EdgeBlock(
functools.partial(snt.Conv2D, output_channels=10, kernel_shape=[3, 3]),
use_edges=use_edges,
use_receiver_nodes=use_receiver_nodes,
use_sender_nodes=use_sender_nodes,
use_globals=use_globals
)
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError, "Dimensions of inputs should match"):
network(input_graph)
示例5: test_incompatible_higher_rank_inputs_no_raise
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Conv2D [as 别名]
def test_incompatible_higher_rank_inputs_no_raise(self,
use_edges,
use_receiver_nodes,
use_sender_nodes,
use_globals,
field):
"""No exception should occur if a differently shapped field is not used."""
input_graph = self._get_shaped_input_graph()
input_graph = input_graph.replace(
**{field: tf.transpose(getattr(input_graph, field), [0, 2, 1, 3])})
network = blocks.EdgeBlock(
functools.partial(snt.Conv2D, output_channels=10, kernel_shape=[3, 3]),
use_edges=use_edges,
use_receiver_nodes=use_receiver_nodes,
use_sender_nodes=use_sender_nodes,
use_globals=use_globals
)
self._assert_build_and_run(network, input_graph)
示例6: test_incompatible_higher_rank_partial_outputs_raises
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Conv2D [as 别名]
def test_incompatible_higher_rank_partial_outputs_raises(self):
"""A error should be raised if partial outputs have incompatible shapes."""
input_graph = self._get_shaped_input_graph()
edge_model_fn, node_model_fn, global_model_fn = self._get_shaped_model_fns()
edge_model_fn_2 = functools.partial(
snt.Conv2D, output_channels=10, kernel_shape=[3, 3], stride=[1, 2])
graph_network = modules.GraphNetwork(
edge_model_fn_2, node_model_fn, global_model_fn)
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError, "Dimensions of inputs should match"):
graph_network(input_graph)
node_model_fn_2 = functools.partial(
snt.Conv2D, output_channels=10, kernel_shape=[3, 3], stride=[1, 2])
graph_network = modules.GraphNetwork(
edge_model_fn, node_model_fn_2, global_model_fn)
with self.assertRaisesRegexp(
tf.errors.InvalidArgumentError, "Dimensions of inputs should match"):
graph_network(input_graph)
示例7: test_incompatible_higher_rank_inputs_raises
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Conv2D [as 别名]
def test_incompatible_higher_rank_inputs_raises(self,
use_edges,
use_receiver_nodes,
use_sender_nodes,
use_globals,
field):
"""A exception should be raised if the inputs have incompatible shapes."""
input_graph = self._get_shaped_input_graph()
input_graph = input_graph.replace(
**{field: tf.transpose(getattr(input_graph, field), [0, 2, 1, 3])})
network = blocks.EdgeBlock(
functools.partial(snt.Conv2D, output_channels=10, kernel_shape=[3, 3]),
use_edges=use_edges,
use_receiver_nodes=use_receiver_nodes,
use_sender_nodes=use_sender_nodes,
use_globals=use_globals
)
with self.assertRaisesRegexp(ValueError, "in both shapes must be equal"):
network(input_graph)
示例8: test_incompatible_higher_rank_partial_outputs_raises
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Conv2D [as 别名]
def test_incompatible_higher_rank_partial_outputs_raises(self):
"""A error should be raised if partial outputs have incompatible shapes."""
input_graph = self._get_shaped_input_graph()
edge_model_fn, node_model_fn, global_model_fn = self._get_shaped_model_fns()
edge_model_fn_2 = functools.partial(
snt.Conv2D, output_channels=10, kernel_shape=[3, 3], stride=[1, 2])
graph_network = modules.GraphNetwork(
edge_model_fn_2, node_model_fn, global_model_fn)
with self.assertRaisesRegexp(ValueError, "in both shapes must be equal"):
graph_network(input_graph)
node_model_fn_2 = functools.partial(
snt.Conv2D, output_channels=10, kernel_shape=[3, 3], stride=[1, 2])
graph_network = modules.GraphNetwork(
edge_model_fn, node_model_fn_2, global_model_fn)
with self.assertRaisesRegexp(ValueError, "in both shapes must be equal"):
graph_network(input_graph)
示例9: _build
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Conv2D [as 别名]
def _build(self, inputs):
if FLAGS.l2_reg:
regularizers = {'w': lambda w: FLAGS.l2_reg*tf.nn.l2_loss(w),
'b': lambda w: FLAGS.l2_reg*tf.nn.l2_loss(w),}
else:
regularizers = None
reshape = snt.BatchReshape([28, 28, 1])
conv = snt.Conv2D(2, 5, padding=snt.SAME, regularizers=regularizers)
act = _NONLINEARITY(conv(reshape(inputs)))
pool = tf.nn.pool(act, window_shape=(2, 2), pooling_type=_POOL,
padding=snt.SAME, strides=(2, 2))
conv = snt.Conv2D(4, 5, padding=snt.SAME, regularizers=regularizers)
act = _NONLINEARITY(conv(pool))
pool = tf.nn.pool(act, window_shape=(2, 2), pooling_type=_POOL,
padding=snt.SAME, strides=(2, 2))
flatten = snt.BatchFlatten()(pool)
linear = snt.Linear(32, regularizers=regularizers)(flatten)
return snt.Linear(10, regularizers=regularizers)(linear)
示例10: _build
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Conv2D [as 别名]
def _build(self, x):
h = x
for unused_i, l in enumerate(self.layers):
h = tf.nn.relu(snt.Conv2D(l[0], l[1], l[2])(h))
h_shape = h.get_shape().as_list()
h = tf.reshape(h, [-1, h_shape[1] * h_shape[2] * h_shape[3]])
for _, l in enumerate(self.padding_linear_layers):
h = snt.Linear(l)(h)
pre_z = snt.Linear(2 * self.n_latent)(h)
mu = pre_z[:, :self.n_latent]
sigma = tf.nn.softplus(pre_z[:, self.n_latent:])
return mu, sigma
示例11: _inputs_for_observed_module
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Conv2D [as 别名]
def _inputs_for_observed_module(self, subgraph):
"""Extracts input tensors from a connected Sonnet module.
This default implementation supports common layer types, but should be
overridden if custom layer types are to be supported.
Args:
subgraph: `snt.ConnectedSubGraph` specifying the Sonnet module being
connected, and its inputs and outputs.
Returns:
List of input tensors, or None if not a supported Sonnet module.
"""
m = subgraph.module
# Only support a few operations for now.
if not (isinstance(m, snt.BatchReshape) or
isinstance(m, snt.Linear) or
isinstance(m, snt.Conv1D) or
isinstance(m, snt.Conv2D) or
isinstance(m, snt.BatchNorm) or
isinstance(m, layers.ImageNorm)):
return None
if isinstance(m, snt.BatchNorm):
return subgraph.inputs['input_batch'],
else:
return subgraph.inputs['inputs'],
示例12: _wrapper_for_observed_module
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Conv2D [as 别名]
def _wrapper_for_observed_module(self, subgraph):
"""Creates a wrapper for a connected Sonnet module.
This default implementation supports common layer types, but should be
overridden if custom layer types are to be supported.
Args:
subgraph: `snt.ConnectedSubGraph` specifying the Sonnet module being
connected, and its inputs and outputs.
Returns:
`ibp.VerifiableWrapper` for the Sonnet module.
"""
m = subgraph.module
if isinstance(m, snt.BatchReshape):
shape = subgraph.outputs.get_shape()[1:].as_list()
return verifiable_wrapper.BatchReshapeWrapper(m, shape)
elif isinstance(m, snt.Linear):
return verifiable_wrapper.LinearFCWrapper(m)
elif isinstance(m, snt.Conv1D):
return verifiable_wrapper.LinearConv1dWrapper(m)
elif isinstance(m, snt.Conv2D):
return verifiable_wrapper.LinearConv2dWrapper(m)
elif isinstance(m, layers.ImageNorm):
return verifiable_wrapper.ImageNormWrapper(m)
else:
assert isinstance(m, snt.BatchNorm)
return verifiable_wrapper.BatchNormWrapper(m)
示例13: __init__
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Conv2D [as 别名]
def __init__(self, module):
if not isinstance(module, snt.Conv2D):
raise ValueError('Cannot wrap {} with a LinearConv2dWrapper.'.format(
module))
super(LinearConv2dWrapper, self).__init__(module)
示例14: testConv2dSymbolicBounds
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Conv2D [as 别名]
def testConv2dSymbolicBounds(self):
m = snt.Conv2D(
output_channels=1,
kernel_shape=(2, 2),
padding='VALID',
stride=1,
use_bias=True,
initializers={
'w': tf.constant_initializer(1.),
'b': tf.constant_initializer(2.),
})
z = tf.constant([1, 2, 3, 4], dtype=tf.float32)
z = tf.reshape(z, [1, 2, 2, 1])
m(z) # Connect to create weights.
m = ibp.LinearConv2dWrapper(m)
input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
input_bounds = ibp.SymbolicBounds.convert(input_bounds)
output_bounds = m.propagate_bounds(input_bounds)
output_bounds = ibp.IntervalBounds.convert(output_bounds)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
l, u = sess.run([output_bounds.lower, output_bounds.upper])
l = l.item()
u = u.item()
self.assertAlmostEqual(8., l)
self.assertAlmostEqual(16., u)
示例15: testConv2dBackwardBounds
# 需要导入模块: import sonnet [as 别名]
# 或者: from sonnet import Conv2D [as 别名]
def testConv2dBackwardBounds(self):
m = snt.Conv2D(
output_channels=1,
kernel_shape=(2, 2),
padding='VALID',
stride=1,
use_bias=True,
initializers={
'w': tf.constant_initializer(1.),
'b': tf.constant_initializer(2.),
})
z = tf.constant([1, 2, 3, 4], dtype=tf.float32)
z = tf.reshape(z, [1, 2, 2, 1])
m(z) # Connect to create weights.
m = ibp.LinearConv2dWrapper(m)
input_bounds = ibp.IntervalBounds(z - 1., z + 1.)
m.propagate_bounds(input_bounds) # Create IBP bounds.
crown_init_bounds = _generate_identity_spec([m], shape=(1, 1, 1, 1, 1))
output_bounds = m.propagate_bounds(crown_init_bounds)
concrete_bounds = output_bounds.concretize()
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
l, u = sess.run([concrete_bounds.lower, concrete_bounds.upper])
l = l.item()
u = u.item()
self.assertAlmostEqual(8., l)
self.assertAlmostEqual(16., u)