本文整理汇总了Python中tensorflow.contrib.slim.layers.conv2d方法的典型用法代码示例。如果您正苦于以下问题:Python layers.conv2d方法的具体用法?Python layers.conv2d怎么用?Python layers.conv2d使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.slim.layers
的用法示例。
在下文中一共展示了layers.conv2d方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: basic_conv_lstm_cell
# 需要导入模块: from tensorflow.contrib.slim import layers [as 别名]
# 或者: from tensorflow.contrib.slim.layers import conv2d [as 别名]
def basic_conv_lstm_cell(inputs,
state,
num_channels,
filter_size=5,
forget_bias=1.0,
scope=None,
reuse=None):
"""Basic LSTM recurrent network cell, with 2D convolution connctions.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
Args:
inputs: input Tensor, 4D, batch x height x width x channels.
state: state Tensor, 4D, batch x height x width x channels.
num_channels: the number of output channels in the layer.
filter_size: the shape of the each convolution filter.
forget_bias: the initial value of the forget biases.
scope: Optional scope for variable_scope.
reuse: whether or not the layer and the variables should be reused.
Returns:
a tuple of tensors representing output and the new state.
"""
spatial_size = inputs.get_shape()[1:3]
if state is None:
state = init_state(inputs, list(spatial_size) + [2 * num_channels])
with tf.variable_scope(scope,
'BasicConvLstmCell',
[inputs, state],
reuse=reuse):
inputs.get_shape().assert_has_rank(4)
state.get_shape().assert_has_rank(4)
c, h = tf.split(axis=3, num_or_size_splits=2, value=state)
inputs_h = tf.concat(axis=3, values=[inputs, h])
# Parameters of gates are concatenated into one conv for efficiency.
i_j_f_o = layers.conv2d(inputs_h,
4 * num_channels, [filter_size, filter_size],
stride=1,
activation_fn=None,
scope='Gates')
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = tf.split(axis=3, num_or_size_splits=4, value=i_j_f_o)
new_c = c * tf.sigmoid(f + forget_bias) + tf.sigmoid(i) * tf.tanh(j)
new_h = tf.tanh(new_c) * tf.sigmoid(o)
return new_h, tf.concat(axis=3, values=[new_c, new_h])
示例2: basic_conv_lstm_cell
# 需要导入模块: from tensorflow.contrib.slim import layers [as 别名]
# 或者: from tensorflow.contrib.slim.layers import conv2d [as 别名]
def basic_conv_lstm_cell(inputs,
state,
num_channels,
filter_size=5,
forget_bias=1.0,
scope=None,
reuse=None,
):
"""Basic LSTM recurrent network cell, with 2D convolution connctions.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
Args:
inputs: input Tensor, 4D, batch x height x width x channels.
state: state Tensor, 4D, batch x height x width x channels.
num_channels: the number of output channels in the layer.
filter_size: the shape of the each convolution filter.
forget_bias: the initial value of the forget biases.
scope: Optional scope for variable_scope.
reuse: whether or not the layer and the variables should be reused.
Returns:
a tuple of tensors representing output and the new state.
"""
if state is None:
state = tf.zeros(inputs.get_shape().as_list()[:3] + [2 * num_channels], name='init_state')
with tf.variable_scope(scope,
'BasicConvLstmCell',
[inputs, state],
reuse=reuse):
inputs.get_shape().assert_has_rank(4)
state.get_shape().assert_has_rank(4)
c, h = tf.split(axis=3, num_or_size_splits=2, value=state)
inputs_h = tf.concat(values=[inputs, h], axis=3)
# Parameters of gates are concatenated into one conv for efficiency.
i_j_f_o = layers.conv2d(inputs_h,
4 * num_channels, [filter_size, filter_size],
stride=1,
activation_fn=None,
scope='Gates',
)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = tf.split(value=i_j_f_o, num_or_size_splits=4, axis=3)
new_c = c * tf.sigmoid(f + forget_bias) + tf.sigmoid(i) * tf.tanh(j)
new_h = tf.tanh(new_c) * tf.sigmoid(o)
return new_h, tf.concat(values=[new_c, new_h], axis=3)
示例3: basic_conv_lstm_cell
# 需要导入模块: from tensorflow.contrib.slim import layers [as 别名]
# 或者: from tensorflow.contrib.slim.layers import conv2d [as 别名]
def basic_conv_lstm_cell(inputs,
state,
num_channels,
filter_size=5,
forget_bias=1.0,
scope=None,
reuse=None):
"""Basic LSTM recurrent network cell, with 2D convolution connctions.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
Args:
inputs: input Tensor, 4D, batch x height x width x channels.
state: state Tensor, 4D, batch x height x width x channels.
num_channels: the number of output channels in the layer.
filter_size: the shape of the each convolution filter.
forget_bias: the initial value of the forget biases.
scope: Optional scope for variable_scope.
reuse: whether or not the layer and the variables should be reused.
Returns:
a tuple of tensors representing output and the new state.
"""
spatial_size = inputs.get_shape()[1:3]
if state is None:
state = init_state(inputs, list(spatial_size) + [2 * num_channels])
with tf.variable_scope(scope,
'BasicConvLstmCell',
[inputs, state],
reuse=reuse):
inputs.get_shape().assert_has_rank(4)
state.get_shape().assert_has_rank(4)
c, h = tf.split(axis=3, num_or_size_splits=2, value=state)
inputs_h = tf.concat(axis=3, values=[inputs, h])
# Parameters of gates are concatenated into one conv for efficiency.
i_j_f_o = layers.conv2d(inputs_h,
4 * num_channels, [filter_size, filter_size],
stride=1,
activation_fn=None,
scope='Gates')
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = tf.split(axis=3, num_or_size_splits=4, value=i_j_f_o)
new_c = c * tf.sigmoid(f + forget_bias) + tf.sigmoid(i) * tf.tanh(j)
new_h = tf.tanh(new_c) * tf.sigmoid(o)
return new_h, tf.concat(axis=3, values=[new_c, new_h])
示例4: basic_conv_lstm_cell
# 需要导入模块: from tensorflow.contrib.slim import layers [as 别名]
# 或者: from tensorflow.contrib.slim.layers import conv2d [as 别名]
def basic_conv_lstm_cell(inputs,
state,
num_channels,
filter_size=5,
forget_bias=1.0,
scope=None,
reuse=None):
"""Basic LSTM recurrent network cell, with 2D convolution connctions.
We add forget_bias (default: 1) to the biases of the forget gate in order to
reduce the scale of forgetting in the beginning of the training.
It does not allow cell clipping, a projection layer, and does not
use peep-hole connections: it is the basic baseline.
Args:
inputs: input Tensor, 4D, batch x height x width x channels.
state: state Tensor, 4D, batch x height x width x channels.
num_channels: the number of output channels in the layer.
filter_size: the shape of the each convolution filter.
forget_bias: the initial value of the forget biases.
scope: Optional scope for variable_scope.
reuse: whether or not the layer and the variables should be reused.
Returns:
a tuple of tensors representing output and the new state.
"""
spatial_size = inputs.get_shape()[1:3]
if state is None:
state = init_state(inputs, list(spatial_size) + [2 * num_channels])
with tf.variable_scope(scope,
'BasicConvLstmCell',
[inputs, state],
reuse=reuse):
inputs.get_shape().assert_has_rank(4)
state.get_shape().assert_has_rank(4)
c, h = tf.split(3, 2, state)
inputs_h = tf.concat(3, [inputs, h])
# Parameters of gates are concatenated into one conv for efficiency.
i_j_f_o = layers.conv2d(inputs_h,
4 * num_channels, [filter_size, filter_size],
stride=1,
activation_fn=None,
scope='Gates')
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = tf.split(3, 4, i_j_f_o)
new_c = c * tf.sigmoid(f + forget_bias) + tf.sigmoid(i) * tf.tanh(j)
new_h = tf.tanh(new_c) * tf.sigmoid(o)
return new_h, tf.concat(3, [new_c, new_h])