本文整理汇总了Python中tensorflow.graph方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.graph方法的具体用法?Python tensorflow.graph怎么用?Python tensorflow.graph使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.graph方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: conv2d
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import graph [as 别名]
def conv2d(name, x, w=None, num_filters=16, kernel_size=(3, 3), padding='SAME', stride=(1, 1),
initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0, bias=0.0,
activation=None, batchnorm_enabled=False, max_pool_enabled=False, dropout_keep_prob=-1,
is_training=True):
"""
This block is responsible for a convolution 2D layer followed by optional (non-linearity, dropout, max-pooling).
Note that: "is_training" should be passed by a correct value based on being in either training or testing.
:param name: (string) The name scope provided by the upper tf.name_scope('name') as scope.
:param x: (tf.tensor) The input to the layer (N, H, W, C).
:param num_filters: (integer) No. of filters (This is the output depth)
:param kernel_size: (integer tuple) The size of the convolving kernel.
:param padding: (string) The amount of padding required.
:param stride: (integer tuple) The stride required.
:param initializer: (tf.contrib initializer) The initialization scheme, He et al. normal or Xavier normal are recommended.
:param l2_strength:(weight decay) (float) L2 regularization parameter.
:param bias: (float) Amount of bias.
:param activation: (tf.graph operator) The activation function applied after the convolution operation. If None, linear is applied.
:param batchnorm_enabled: (boolean) for enabling batch normalization.
:param max_pool_enabled: (boolean) for enabling max-pooling 2x2 to decrease width and height by a factor of 2.
:param dropout_keep_prob: (float) for the probability of keeping neurons. If equals -1, it means no dropout
:param is_training: (boolean) to diff. between training and testing (important for batch normalization and dropout)
:return: The output tensor of the layer (N, H', W', C').
"""
with tf.variable_scope(name) as scope:
conv_o_b = __conv2d_p(scope, x=x, w=w, num_filters=num_filters, kernel_size=kernel_size, stride=stride,
padding=padding,
initializer=initializer, l2_strength=l2_strength, bias=bias)
if batchnorm_enabled:
conv_o_bn = tf.layers.batch_normalization(conv_o_b, training=is_training)
if not activation:
conv_a = conv_o_bn
else:
conv_a = activation(conv_o_bn)
else:
if not activation:
conv_a = conv_o_b
else:
conv_a = activation(conv_o_b)
def dropout_with_keep():
return tf.nn.dropout(conv_a, dropout_keep_prob)
def dropout_no_keep():
return tf.nn.dropout(conv_a, 1.0)
if dropout_keep_prob != -1:
conv_o_dr = tf.cond(is_training, dropout_with_keep, dropout_no_keep)
else:
conv_o_dr = conv_a
conv_o = conv_o_dr
if max_pool_enabled:
conv_o = max_pool_2d(conv_o_dr)
return conv_o
示例2: dense
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import graph [as 别名]
def dense(name, x, w=None, output_dim=128, initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0,
bias=0.0,
activation=None, batchnorm_enabled=False, dropout_keep_prob=-1,
is_training=True
):
"""
This block is responsible for a fully connected followed by optional (non-linearity, dropout, max-pooling).
Note that: "is_training" should be passed by a correct value based on being in either training or testing.
:param name: (string) The name scope provided by the upper tf.name_scope('name') as scope.
:param x: (tf.tensor) The input to the layer (N, D).
:param output_dim: (integer) It specifies H, the output second dimension of the fully connected layer [ie:(N, H)]
:param initializer: (tf.contrib initializer) The initialization scheme, He et al. normal or Xavier normal are recommended.
:param l2_strength:(weight decay) (float) L2 regularization parameter.
:param bias: (float) Amount of bias.
:param activation: (tf.graph operator) The activation function applied after the convolution operation. If None, linear is applied.
:param batchnorm_enabled: (boolean) for enabling batch normalization.
:param dropout_keep_prob: (float) for the probability of keeping neurons. If equals -1, it means no dropout
:param is_training: (boolean) to diff. between training and testing (important for batch normalization and dropout)
:return out: The output of the layer. (N, H)
"""
with tf.variable_scope(name) as scope:
dense_o_b = __dense_p(name=scope, x=x, w=w, output_dim=output_dim, initializer=initializer,
l2_strength=l2_strength,
bias=bias)
if batchnorm_enabled:
dense_o_bn = tf.layers.batch_normalization(dense_o_b, training=is_training)
if not activation:
dense_a = dense_o_bn
else:
dense_a = activation(dense_o_bn)
else:
if not activation:
dense_a = dense_o_b
else:
dense_a = activation(dense_o_b)
def dropout_with_keep():
return tf.nn.dropout(dense_a, dropout_keep_prob)
def dropout_no_keep():
return tf.nn.dropout(dense_a, 1.0)
if dropout_keep_prob != -1:
dense_o_dr = tf.cond(is_training, dropout_with_keep, dropout_no_keep)
else:
dense_o_dr = dense_a
dense_o = dense_o_dr
return dense_o
示例3: conv2d
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import graph [as 别名]
def conv2d(name, x, w=None, num_filters=16, kernel_size=(3, 3), padding='SAME', stride=(1, 1),
initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0, bias=0.0,
activation=None, batchnorm_enabled=False, max_pool_enabled=False, dropout_keep_prob=-1,
is_training=True):
"""
This block is responsible for a convolution 2D layer followed by optional (non-linearity, dropout, max-pooling).
Note that: "is_training" should be passed by a correct value based on being in either training or testing.
:param name: (string) The name scope provided by the upper tf.name_scope('name') as scope.
:param x: (tf.tensor) The input to the layer (N, H, W, C).
:param num_filters: (integer) No. of filters (This is the output depth)
:param kernel_size: (integer tuple) The size of the convolving kernel.
:param padding: (string) The amount of padding required.
:param stride: (integer tuple) The stride required.
:param initializer: (tf.contrib initializer) The initialization scheme, He et al. normal or Xavier normal are recommended.
:param l2_strength:(weight decay) (float) L2 regularization parameter.
:param bias: (float) Amount of bias.
:param activation: (tf.graph operator) The activation function applied after the convolution operation. If None, linear is applied.
:param batchnorm_enabled: (boolean) for enabling batch normalization.
:param max_pool_enabled: (boolean) for enabling max-pooling 2x2 to decrease width and height by a factor of 2.
:param dropout_keep_prob: (float) for the probability of keeping neurons. If equals -1, it means no dropout
:param is_training: (boolean) to diff. between training and testing (important for batch normalization and dropout)
:return: The output tensor of the layer (N, H', W', C').
"""
with tf.variable_scope(name) as scope:
conv_o_b = conv2d_p(scope, x=x, w=w, num_filters=num_filters, kernel_size=kernel_size, stride=stride,
padding=padding,
initializer=initializer, l2_strength=l2_strength, bias=bias)
if batchnorm_enabled:
conv_o_bn = tf.layers.batch_normalization(conv_o_b, training=is_training)
if not activation:
conv_a = conv_o_bn
else:
conv_a = activation(conv_o_bn)
else:
if not activation:
conv_a = conv_o_b
else:
conv_a = activation(conv_o_b)
if dropout_keep_prob != -1:
conv_o_dr = tf.nn.dropout(conv_a, dropout_keep_prob)
else:
conv_o_dr = conv_a
conv_o = conv_o_dr
if max_pool_enabled:
conv_o = max_pool_2d(scope, conv_o_dr)
return conv_o
示例4: atrous_conv2d
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import graph [as 别名]
def atrous_conv2d(name, x, w=None, num_filters=16, kernel_size=(3, 3), padding='SAME', dilation_rate=1,
initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0, bias=0.0,
activation=None, batchnorm_enabled=False, max_pool_enabled=False, dropout_keep_prob=-1,
is_training=True):
"""
This block is responsible for a Dilated convolution 2D layer followed by optional (non-linearity, dropout, max-pooling).
Note that: "is_training" should be passed by a correct value based on being in either training or testing.
:param name: (string) The name scope provided by the upper tf.name_scope('name') as scope.
:param x: (tf.tensor) The input to the layer (N, H, W, C).
:param num_filters: (integer) No. of filters (This is the output depth)
:param kernel_size: (integer tuple) The size of the convolving kernel.
:param padding: (string) The amount of padding required.
:param dilation_rate: (integer) The amount of dilation required. If equals 1, it means normal convolution.
:param initializer: (tf.contrib initializer) The initialization scheme, He et al. normal or Xavier normal are recommended.
:param l2_strength:(weight decay) (float) L2 regularization parameter.
:param bias: (float) Amount of bias.
:param activation: (tf.graph operator) The activation function applied after the convolution operation. If None, linear is applied.
:param batchnorm_enabled: (boolean) for enabling batch normalization.
:param max_pool_enabled: (boolean) for enabling max-pooling 2x2 to decrease width and height by a factor of 2.
:param dropout_keep_prob: (float) for the probability of keeping neurons. If equals -1, it means no dropout
:param is_training: (boolean) to diff. between training and testing (important for batch normalization and dropout)
:return: The output tensor of the layer (N, H', W', C').
"""
with tf.variable_scope(name) as scope:
conv_o_b = atrous_conv2d_p(scope, x=x, w=w, num_filters=num_filters, kernel_size=kernel_size,
padding=padding, dilation_rate=dilation_rate,
initializer=initializer, l2_strength=l2_strength, bias=bias)
if batchnorm_enabled:
conv_o_bn = tf.layers.batch_normalization(conv_o_b, training=is_training)
if not activation:
conv_a = conv_o_bn
else:
conv_a = activation(conv_o_bn)
else:
if not activation:
conv_a = conv_o_b
else:
conv_a = activation(conv_o_b)
if dropout_keep_prob != -1:
conv_o_dr = tf.nn.dropout(conv_a, dropout_keep_prob)
else:
conv_o_dr = conv_a
conv_o = conv_o_dr
if max_pool_enabled:
conv_o = max_pool_2d(scope, conv_o_dr)
return conv_o
示例5: conv2d_transpose
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import graph [as 别名]
def conv2d_transpose(name, x, w=None, output_shape=None, kernel_size=(3, 3), padding='SAME', stride=(1, 1),
l2_strength=0.0,
bias=0.0, activation=None, batchnorm_enabled=False, dropout_keep_prob=-1,
is_training=True):
"""
This block is responsible for a convolution transpose 2D followed by optional (non-linearity, dropout, max-pooling).
Note that: "is_training" should be passed by a correct value based on being in either training or testing.
:param name: (string) The name scope provided by the upper tf.name_scope('name') as scope.
:param x: (tf.tensor) The input to the layer (N, H, W, C).
:param output_shape: (Array) [N, H', W', C'] The shape of the corresponding output.
:param kernel_size: (integer tuple) The size of the convolving kernel.
:param padding: (string) The amount of padding required.
:param stride: (integer tuple) The stride required.
:param l2_strength:(weight decay) (float) L2 regularization parameter.
:param bias: (float) Amount of bias.
:param activation: (tf.graph operator) The activation function applied after the convolution operation. If None, linear is applied.
:param batchnorm_enabled: (boolean) for enabling batch normalization.
:param max_pool_enabled: (boolean) for enabling max-pooling 2x2 to decrease width and height by a factor of 2.
:param dropout_keep_prob: (float) for the probability of keeping neurons. If equals -1, it means no dropout
:param is_training: (boolean) to diff. between training and testing (important for batch normalization and dropout)
:return out: The output of the layer. (output_shape[0], output_shape[1], output_shape[2], output_shape[3])
"""
with tf.variable_scope(name) as scope:
conv_o_b = conv2d_transpose_p(name=scope, x=x, w=w, output_shape=output_shape, kernel_size=kernel_size,
padding=padding, stride=stride,
l2_strength=l2_strength,
bias=bias)
if batchnorm_enabled:
conv_o_bn = tf.layers.batch_normalization(conv_o_b, training=is_training)
if not activation:
conv_a = conv_o_bn
else:
conv_a = activation(conv_o_bn)
else:
if not activation:
conv_a = conv_o_b
else:
conv_a = activation(conv_o_b)
if dropout_keep_prob != -1:
conv_o_dr = tf.nn.dropout(conv_a, dropout_keep_prob)
else:
conv_o_dr = conv_a
conv_o = conv_o_dr
return conv_o
#############################################################################################################
# Dense Layer methods
示例6: dense
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import graph [as 别名]
def dense(name, x, w=None, output_dim=128, initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0,
bias=0.0,
activation=None, batchnorm_enabled=False, dropout_keep_prob=-1,
is_training=True
):
"""
This block is responsible for a fully connected followed by optional (non-linearity, dropout, max-pooling).
Note that: "is_training" should be passed by a correct value based on being in either training or testing.
:param name: (string) The name scope provided by the upper tf.name_scope('name') as scope.
:param x: (tf.tensor) The input to the layer (N, D).
:param output_dim: (integer) It specifies H, the output second dimension of the fully connected layer [ie:(N, H)]
:param initializer: (tf.contrib initializer) The initialization scheme, He et al. normal or Xavier normal are recommended.
:param l2_strength:(weight decay) (float) L2 regularization parameter.
:param bias: (float) Amount of bias.
:param activation: (tf.graph operator) The activation function applied after the convolution operation. If None, linear is applied.
:param batchnorm_enabled: (boolean) for enabling batch normalization.
:param dropout_keep_prob: (float) for the probability of keeping neurons. If equals -1, it means no dropout
:param is_training: (boolean) to diff. between training and testing (important for batch normalization and dropout)
:return out: The output of the layer. (N, H)
"""
with tf.variable_scope(name) as scope:
dense_o_b = dense_p(name=scope, x=x, w=w, output_dim=output_dim, initializer=initializer,
l2_strength=l2_strength,
bias=bias)
if batchnorm_enabled:
dense_o_bn = tf.layers.batch_normalization(dense_o_b, training=is_training)
if not activation:
dense_a = dense_o_bn
else:
dense_a = activation(dense_o_bn)
else:
if not activation:
dense_a = dense_o_b
else:
dense_a = activation(dense_o_b)
if dropout_keep_prob != -1:
dense_o_dr = tf.nn.dropout(dense_a, dropout_keep_prob)
else:
dense_o_dr = dense_a
dense_o = dense_o_dr
return dense_o
示例7: dense
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import graph [as 别名]
def dense(name, x, w=None, output_dim=128, initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0,
bias=0.0,
activation=None, batchnorm_enabled=False, dropout_keep_prob=-1,
is_training=True
):
"""
This block is responsible for a fully connected followed by optional (non-linearity, dropout, max-pooling).
Note that: "is_training" should be passed by a correct value based on being in either training or testing.
:param name: (string) The name scope provided by the upper tf.name_scope('name') as scope.
:param x: (tf.tensor) The input to the layer (N, D).
:param output_dim: (integer) It specifies H, the output second dimension of the fully connected layer [ie:(N, H)]
:param initializer: (tf.contrib initializer) The initialization scheme, He et al. normal or Xavier normal are recommended.
:param l2_strength:(weight decay) (float) L2 regularization parameter.
:param bias: (float) Amount of bias.
:param activation: (tf.graph operator) The activation function applied after the convolution operation. If None, linear is applied.
:param batchnorm_enabled: (boolean) for enabling batch normalization.
:param dropout_keep_prob: (float) for the probability of keeping neurons. If equals -1, it means no dropout
:param is_training: (boolean) to diff. between training and testing (important for batch normalization and dropout)
:return out: The output of the layer. (N, H)
"""
with tf.variable_scope(name) as scope:
dense_o_b = dense_p(name=scope, x=x, w=w, output_dim=output_dim, initializer=initializer,
l2_strength=l2_strength,
bias=bias)
if batchnorm_enabled:
dense_o_bn = tf.layers.batch_normalization(dense_o_b, training=is_training)
if not activation:
dense_a = dense_o_bn
else:
dense_a = activation(dense_o_bn)
else:
if not activation:
dense_a = dense_o_b
else:
dense_a = activation(dense_o_b)
def dropout_with_keep():
return tf.nn.dropout(dense_a, dropout_keep_prob)
def dropout_no_keep():
return tf.nn.dropout(dense_a, 1.0)
if dropout_keep_prob != -1:
dense_o_dr = tf.cond(is_training, dropout_with_keep, dropout_no_keep)
else:
dense_o_dr = dense_a
dense_o = dense_o_dr
return dense_o
示例8: atrous_conv2d
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import graph [as 别名]
def atrous_conv2d(name, x, w=None, num_filters=16, kernel_size=(3, 3), padding='SAME', dilation_rate=1,
initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0, bias=0.0,
activation=None, batchnorm_enabled=False, max_pool_enabled=False, dropout_keep_prob=-1,
is_training=True):
"""
This block is responsible for a Dilated convolution 2D layer followed by optional (non-linearity, dropout, max-pooling).
Note that: "is_training" should be passed by a correct value based on being in either training or testing.
:param name: (string) The name scope provided by the upper tf.name_scope('name') as scope.
:param x: (tf.tensor) The input to the layer (N, H, W, C).
:param num_filters: (integer) No. of filters (This is the output depth)
:param kernel_size: (integer tuple) The size of the convolving kernel.
:param padding: (string) The amount of padding required.
:param dilation_rate: (integer) The amount of dilation required. If equals 1, it means normal convolution.
:param initializer: (tf.contrib initializer) The initialization scheme, He et al. normal or Xavier normal are recommended.
:param l2_strength:(weight decay) (float) L2 regularization parameter.
:param bias: (float) Amount of bias.
:param activation: (tf.graph operator) The activation function applied after the convolution operation. If None, linear is applied.
:param batchnorm_enabled: (boolean) for enabling batch normalization.
:param max_pool_enabled: (boolean) for enabling max-pooling 2x2 to decrease width and height by a factor of 2.
:param dropout_keep_prob: (float) for the probability of keeping neurons. If equals -1, it means no dropout
:param is_training: (boolean) to diff. between training and testing (important for batch normalization and dropout)
:return: The output tensor of the layer (N, H', W', C').
"""
with tf.variable_scope(name) as scope:
conv_o_b = __atrous_conv2d_p(scope, x=x, w=w, num_filters=num_filters, kernel_size=kernel_size,
padding=padding, dilation_rate=dilation_rate,
initializer=initializer, l2_strength=l2_strength, bias=bias)
if batchnorm_enabled:
conv_o_bn = tf.layers.batch_normalization(conv_o_b, training=is_training)
if not activation:
conv_a = conv_o_bn
else:
conv_a = activation(conv_o_bn)
else:
if not activation:
conv_a = conv_o_b
else:
conv_a = activation(conv_o_b)
def dropout_with_keep():
return tf.nn.dropout(conv_a, dropout_keep_prob)
def dropout_no_keep():
return tf.nn.dropout(conv_a, 1.0)
if dropout_keep_prob != -1:
conv_o_dr = tf.cond(is_training, dropout_with_keep, dropout_no_keep)
else:
conv_o_dr = conv_a
conv_o = conv_o_dr
if max_pool_enabled:
conv_o = max_pool_2d(conv_o_dr)
return conv_o
示例9: conv2d_transpose
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import graph [as 别名]
def conv2d_transpose(name, x, w=None, output_shape=None, kernel_size=(3, 3), padding='SAME', stride=(1, 1),
l2_strength=0.0,
bias=0.0, activation=None, batchnorm_enabled=False, dropout_keep_prob=-1,
is_training=True):
"""
This block is responsible for a convolution transpose 2D followed by optional (non-linearity, dropout, max-pooling).
Note that: "is_training" should be passed by a correct value based on being in either training or testing.
:param name: (string) The name scope provided by the upper tf.name_scope('name') as scope.
:param x: (tf.tensor) The input to the layer (N, H, W, C).
:param output_shape: (Array) [N, H', W', C'] The shape of the corresponding output.
:param kernel_size: (integer tuple) The size of the convolving kernel.
:param padding: (string) The amount of padding required.
:param stride: (integer tuple) The stride required.
:param l2_strength:(weight decay) (float) L2 regularization parameter.
:param bias: (float) Amount of bias.
:param activation: (tf.graph operator) The activation function applied after the convolution operation. If None, linear is applied.
:param batchnorm_enabled: (boolean) for enabling batch normalization.
:param dropout_keep_prob: (float) for the probability of keeping neurons. If equals -1, it means no dropout
:param is_training: (boolean) to diff. between training and testing (important for batch normalization and dropout)
:return out: The output of the layer. (output_shape[0], output_shape[1], output_shape[2], output_shape[3])
"""
with tf.variable_scope(name) as scope:
conv_o_b = __conv2d_transpose_p(name=scope, x=x, w=w, output_shape=output_shape, kernel_size=kernel_size,
padding=padding, stride=stride,
l2_strength=l2_strength,
bias=bias)
if batchnorm_enabled:
conv_o_bn = tf.layers.batch_normalization(conv_o_b, training=is_training)
if not activation:
conv_a = conv_o_bn
else:
conv_a = activation(conv_o_bn)
else:
if not activation:
conv_a = conv_o_b
else:
conv_a = activation(conv_o_b)
def dropout_with_keep():
return tf.nn.dropout(conv_a, dropout_keep_prob)
def dropout_no_keep():
return tf.nn.dropout(conv_a, 1.0)
if dropout_keep_prob != -1:
conv_o_dr = tf.cond(is_training, dropout_with_keep, dropout_no_keep)
else:
conv_o_dr = conv_a
conv_o = conv_o_dr
return conv_o
示例10: conv2d
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import graph [as 别名]
def conv2d(name, x, w=None, num_filters=16, kernel_size=(3, 3), padding='SAME', stride=(1, 1),
initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0, bias=0.0,
activation=None, batchnorm_enabled=False, max_pool_enabled=False, dropout_keep_prob=-1,
is_training=True):
"""
This block is responsible for a convolution 2D layer followed by optional (non-linearity, dropout, max-pooling).
Note that: "is_training" should be passed by a correct value based on being in either training or testing.
:param name: (string) The name scope provided by the upper tf.name_scope('name') as scope.
:param x: (tf.tensor) The input to the layer (N, H, W, C).
:param num_filters: (integer) No. of filters (This is the output depth)
:param kernel_size: (integer tuple) The size of the convolving kernel.
:param padding: (string) The amount of padding required.
:param stride: (integer tuple) The stride required.
:param initializer: (tf.contrib initializer) The initialization scheme, He et al. normal or Xavier normal are recommended.
:param l2_strength:(weight decay) (float) L2 regularization parameter.
:param bias: (float) Amount of bias.
:param activation: (tf.graph operator) The activation function applied after the convolution operation. If None, linear is applied.
:param batchnorm_enabled: (boolean) for enabling batch normalization.
:param max_pool_enabled: (boolean) for enabling max-pooling 2x2 to decrease width and height by a factor of 2.
:param dropout_keep_prob: (float) for the probability of keeping neurons. If equals -1, it means no dropout
:param is_training: (boolean) to diff. between training and testing (important for batch normalization and dropout)
:return: The output tensor of the layer (N, H', W', C').
"""
with tf.variable_scope(name) as scope:
conv_o_b = __conv2d_p('conv', x=x, w=w, num_filters=num_filters, kernel_size=kernel_size, stride=stride,
padding=padding,
initializer=initializer, l2_strength=l2_strength, bias=bias)
if batchnorm_enabled:
conv_o_bn = tf.layers.batch_normalization(conv_o_b, training=is_training, epsilon=1e-5)
if not activation:
conv_a = conv_o_bn
else:
conv_a = activation(conv_o_bn)
else:
if not activation:
conv_a = conv_o_b
else:
conv_a = activation(conv_o_b)
def dropout_with_keep():
return tf.nn.dropout(conv_a, dropout_keep_prob)
def dropout_no_keep():
return tf.nn.dropout(conv_a, 1.0)
if dropout_keep_prob != -1:
conv_o_dr = tf.cond(is_training, dropout_with_keep, dropout_no_keep)
else:
conv_o_dr = conv_a
conv_o = conv_o_dr
if max_pool_enabled:
conv_o = max_pool_2d(conv_o_dr)
return conv_o
示例11: dense
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import graph [as 别名]
def dense(name, x, w=None, output_dim=128, initializer=tf.contrib.layers.xavier_initializer(), l2_strength=0.0,
bias=0.0,
activation=None, batchnorm_enabled=False, dropout_keep_prob=-1,
is_training=True
):
"""
This block is responsible for a fully connected followed by optional (non-linearity, dropout, max-pooling).
Note that: "is_training" should be passed by a correct value based on being in either training or testing.
:param name: (string) The name scope provided by the upper tf.name_scope('name') as scope.
:param x: (tf.tensor) The input to the layer (N, D).
:param output_dim: (integer) It specifies H, the output second dimension of the fully connected layer [ie:(N, H)]
:param initializer: (tf.contrib initializer) The initialization scheme, He et al. normal or Xavier normal are recommended.
:param l2_strength:(weight decay) (float) L2 regularization parameter.
:param bias: (float) Amount of bias.
:param activation: (tf.graph operator) The activation function applied after the convolution operation. If None, linear is applied.
:param batchnorm_enabled: (boolean) for enabling batch normalization.
:param dropout_keep_prob: (float) for the probability of keeping neurons. If equals -1, it means no dropout
:param is_training: (boolean) to diff. between training and testing (important for batch normalization and dropout)
:return out: The output of the layer. (N, H)
"""
with tf.variable_scope(name) as scope:
dense_o_b = __dense_p(name='dense', x=x, w=w, output_dim=output_dim, initializer=initializer,
l2_strength=l2_strength,
bias=bias)
if batchnorm_enabled:
dense_o_bn = tf.layers.batch_normalization(dense_o_b, training=is_training, epsilon=1e-5)
if not activation:
dense_a = dense_o_bn
else:
dense_a = activation(dense_o_bn)
else:
if not activation:
dense_a = dense_o_b
else:
dense_a = activation(dense_o_b)
def dropout_with_keep():
return tf.nn.dropout(dense_a, dropout_keep_prob)
def dropout_no_keep():
return tf.nn.dropout(dense_a, 1.0)
if dropout_keep_prob != -1:
dense_o_dr = tf.cond(is_training, dropout_with_keep, dropout_no_keep)
else:
dense_o_dr = dense_a
dense_o = dense_o_dr
return dense_o
示例12: conv2d_transpose
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import graph [as 别名]
def conv2d_transpose(name, x, w=None, output_shape=None, kernel_size=(3, 3), padding='SAME', stride=(1, 1),
l2_strength=0.0,
bias=0.0, activation=None, batchnorm_enabled=False, dropout_keep_prob=-1,
is_training=True):
"""
This block is responsible for a convolution transpose 2D followed by optional (non-linearity, dropout, max-pooling).
Note that: "is_training" should be passed by a correct value based on being in either training or testing.
:param name: (string) The name scope provided by the upper tf.name_scope('name') as scope.
:param x: (tf.tensor) The input to the layer (N, H, W, C).
:param output_shape: (Array) [N, H', W', C'] The shape of the corresponding output.
:param kernel_size: (integer tuple) The size of the convolving kernel.
:param padding: (string) The amount of padding required.
:param stride: (integer tuple) The stride required.
:param l2_strength:(weight decay) (float) L2 regularization parameter.
:param bias: (float) Amount of bias.
:param activation: (tf.graph operator) The activation function applied after the convolution operation. If None, linear is applied.
:param batchnorm_enabled: (boolean) for enabling batch normalization.
:param max_pool_enabled: (boolean) for enabling max-pooling 2x2 to decrease width and height by a factor of 2.
:param dropout_keep_prob: (float) for the probability of keeping neurons. If equals -1, it means no dropout
:param is_training: (boolean) to diff. between training and testing (important for batch normalization and dropout)
:return out: The output of the layer. (output_shape[0], output_shape[1], output_shape[2], output_shape[3])
"""
with tf.variable_scope(name) as scope:
conv_o_b = conv2d_transpose_p(name=scope, x=x, w=w, output_shape=output_shape, kernel_size=kernel_size,
padding=padding, stride=stride,
l2_strength=l2_strength,
bias=bias)
if batchnorm_enabled:
conv_o_bn = tf.layers.batch_normalization(conv_o_b, training=is_training)
if not activation:
conv_a = conv_o_bn
else:
conv_a = activation(conv_o_bn)
else:
if not activation:
conv_a = conv_o_b
else:
conv_a = activation(conv_o_b)
if dropout_keep_prob != -1:
conv_o_dr = tf.nn.dropout(conv_a, dropout_keep_prob)
else:
conv_o_dr = conv_a
conv_o = conv_o_dr
return conv_o