本文整理汇总了Python中tensorflow.keras.initializers.RandomNormal方法的典型用法代码示例。如果您正苦于以下问题:Python initializers.RandomNormal方法的具体用法?Python initializers.RandomNormal怎么用?Python initializers.RandomNormal使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.keras.initializers
的用法示例。
在下文中一共展示了initializers.RandomNormal方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: conv_lb
# 需要导入模块: from tensorflow.keras import initializers [as 别名]
# 或者: from tensorflow.keras.initializers import RandomNormal [as 别名]
def conv_lb(prev_layer, num_filters, layer_name, pad="same", batch_norm=True):
"""
conv_lb
Condensing operations into new function for better readability
performs a convolution, then batch normalization, then leakyReLU
Input: single layer (prev_layer) along with constant parameters
Output: single layer
"""
weight_init = RandomNormal(stddev=0.02)
new_layer = Conv2D(
num_filters, FILTER, strides=STRIDE, padding=pad, kernel_initializer=weight_init
)(prev_layer)
if batch_norm:
new_layer = BatchNormalization()(new_layer, training=True)
new_layer = LeakyReLU(alpha=LEAKY_RELU_ALPHA, name=layer_name)(new_layer)
return new_layer
示例2: deconv_b
# 需要导入模块: from tensorflow.keras import initializers [as 别名]
# 或者: from tensorflow.keras.initializers import RandomNormal [as 别名]
def deconv_b(prev_layer, num_filters, batch_norm=True):
"""
deconv_b
Condensing operations into new function for better readability
performs a convolution, then batch normalization
Input: single layer (prev_layer) along with constant parameters
Output: single layer
"""
weight_init = RandomNormal(stddev=0.02)
new_layer = Conv2DTranspose(
num_filters,
FILTER,
strides=STRIDE,
padding="same",
activation="relu",
kernel_initializer=weight_init,
)(prev_layer)
if batch_norm:
new_layer = BatchNormalization()(new_layer, training=True)
return new_layer
示例3: build
# 需要导入模块: from tensorflow.keras import initializers [as 别名]
# 或者: from tensorflow.keras.initializers import RandomNormal [as 别名]
def build(self, input_shape):
self._shape = input_shape
# normalize the format of depth_v and depth_k
self.depth_k, self.depth_v = _normalize_depth_vars(self.depth_k, self.depth_v,
input_shape)
if self.axis == 1:
_, channels, height, width = input_shape
else:
_, height, width, channels = input_shape
if self.relative:
dk_per_head = self.depth_k // self.num_heads
if dk_per_head == 0:
print('dk per head', dk_per_head)
self.key_relative_w = self.add_weight('key_rel_w',
shape=[2 * width - 1, dk_per_head],
initializer=initializers.RandomNormal(
stddev=dk_per_head ** -0.5))
self.key_relative_h = self.add_weight('key_rel_h',
shape=[2 * height - 1, dk_per_head],
initializer=initializers.RandomNormal(
stddev=dk_per_head ** -0.5))
else:
self.key_relative_w = None
self.key_relative_h = None
示例4: concat_deconv
# 需要导入模块: from tensorflow.keras import initializers [as 别名]
# 或者: from tensorflow.keras.initializers import RandomNormal [as 别名]
def concat_deconv(prev_layer, skip_layer, num_filters, batch_norm=True, dropout=True):
"""
concat_deconv
Condensing operations into new function for better readability
Performs a deconvolution, then concatenates two layers,
then batch normalization if batch_norm=True
Input: two layers (prev_layer, skip_layer) along with constant parameters
Output: single layer
"""
weight_init = RandomNormal(stddev=0.02)
new_layer = Conv2DTranspose(
num_filters,
FILTER,
strides=STRIDE,
padding="same",
activation="relu",
kernel_initializer=weight_init,
)(prev_layer)
new_layer = Concatenate()([skip_layer, new_layer])
if batch_norm:
new_layer = BatchNormalization()(new_layer, training=True)
if dropout:
new_layer = Dropout(rate=DROPOUT_RATE)(new_layer, training=True)
return new_layer
###########################################################
# Generator, U-net
###########################################################
示例5: discriminator
# 需要导入模块: from tensorflow.keras import initializers [as 别名]
# 或者: from tensorflow.keras.initializers import RandomNormal [as 别名]
def discriminator(summary=False):
"""
Decides whether an image is real or generated. Used in
training the generator.
"""
input_img = Input(shape=IMAGE_SIZE) # image put into generator
unknown_img = Input(shape=IMAGE_SIZE) # either real image or generated image
weight_init = RandomNormal(stddev=0.02)
input_tensor = Concatenate()([input_img, unknown_img])
d = conv_lb(input_tensor, 64, layer_name="layer_1", batch_norm=False)
d = conv_lb(d, 128, layer_name="layer_2")
d = conv_lb(d, 256, layer_name="layer_3")
d = conv_lb(d, 512, layer_name="layer_4")
d = Conv2D(
1,
FILTER,
padding="same",
kernel_initializer=weight_init,
activation="sigmoid",
name="layer_6",
)(d)
# Define discriminator model
dis_model = Model(inputs=[input_img, unknown_img], outputs=d, name="Discriminator")
if summary:
dis_model.summary()
return dis_model
###########################################################
# General Utility Functions
###########################################################
示例6: __init__
# 需要导入模块: from tensorflow.keras import initializers [as 别名]
# 或者: from tensorflow.keras.initializers import RandomNormal [as 别名]
def __init__(self, width, depth, num_anchors=9, separable_conv=True, freeze_bn=False, detect_quadrangle=False, **kwargs):
super(BoxNet, self).__init__(**kwargs)
self.width = width
self.depth = depth
self.num_anchors = num_anchors
self.separable_conv = separable_conv
self.detect_quadrangle = detect_quadrangle
num_values = 9 if detect_quadrangle else 4
options = {
'kernel_size': 3,
'strides': 1,
'padding': 'same',
'bias_initializer': 'zeros',
}
if separable_conv:
kernel_initializer = {
'depthwise_initializer': initializers.VarianceScaling(),
'pointwise_initializer': initializers.VarianceScaling(),
}
options.update(kernel_initializer)
self.convs = [layers.SeparableConv2D(filters=width, name=f'{self.name}/box-{i}', **options) for i in
range(depth)]
self.head = layers.SeparableConv2D(filters=num_anchors * num_values,
name=f'{self.name}/box-predict', **options)
else:
kernel_initializer = {
'kernel_initializer': initializers.RandomNormal(mean=0.0, stddev=0.01, seed=None)
}
options.update(kernel_initializer)
self.convs = [layers.Conv2D(filters=width, name=f'{self.name}/box-{i}', **options) for i in range(depth)]
self.head = layers.Conv2D(filters=num_anchors * num_values, name=f'{self.name}/box-predict', **options)
self.bns = [
[layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON, name=f'{self.name}/box-{i}-bn-{j}') for j in
range(3, 8)]
for i in range(depth)]
# self.bns = [[BatchNormalization(freeze=freeze_bn, name=f'{self.name}/box-{i}-bn-{j}') for j in range(3, 8)]
# for i in range(depth)]
self.relu = layers.Lambda(lambda x: tf.nn.swish(x))
self.reshape = layers.Reshape((-1, num_values))
self.level = 0
示例7: cvpr2018_net
# 需要导入模块: from tensorflow.keras import initializers [as 别名]
# 或者: from tensorflow.keras.initializers import RandomNormal [as 别名]
def cvpr2018_net(vol_size, enc_nf, dec_nf, indexing='ij', name="voxelmorph"):
"""
From https://github.com/voxelmorph/voxelmorph.
unet architecture for voxelmorph models presented in the CVPR 2018 paper.
You may need to modify this code (e.g., number of layers) to suit your project needs.
:param vol_size: volume size. e.g. (256, 256, 256)
:param enc_nf: list of encoder filters. right now it needs to be 1x4.
e.g. [16,32,32,32]
:param dec_nf: list of decoder filters. right now it must be 1x6 (like voxelmorph-1) or 1x7 (voxelmorph-2)
:return: the keras model
"""
import tensorflow.keras.layers as KL
ndims = len(vol_size)
assert ndims==3, "ndims should be 3. found: %d" % ndims
src = Input(vol_size + (1,), name='input_src')
tgt = Input(vol_size + (1,), name='input_tgt')
input_stack = Concatenate(name='concat_inputs')([src, tgt])
# get the core model
x = unet3D(input_stack, img_shape=vol_size, out_im_chans=ndims, nf_enc=enc_nf, nf_dec=dec_nf)
# transform the results into a flow field.
Conv = getattr(KL, 'Conv%dD' % ndims)
flow = Conv(ndims, kernel_size=3, padding='same', name='flow',
kernel_initializer=RandomNormal(mean=0.0, stddev=1e-5))(x)
# warp the source with the flow
y = SpatialTransformer(interp_method='linear', indexing=indexing)([src, flow])
# prepare model
model = Model(inputs=[src, tgt], outputs=[y, flow], name=name)
return model
##############################################################################
# Appearance transform model
##############################################################################
示例8: conv
# 需要导入模块: from tensorflow.keras import initializers [as 别名]
# 或者: from tensorflow.keras.initializers import RandomNormal [as 别名]
def conv(x, outsize, kernel_size, strides_=1, padding_='same', activation=None):
return Conv2D(outsize, kernel_size, strides=strides_, padding=padding_, kernel_initializer=RandomNormal(
stddev=0.001), use_bias=False, activation=activation)(x)
示例9: generator
# 需要导入模块: from tensorflow.keras import initializers [as 别名]
# 或者: from tensorflow.keras.initializers import RandomNormal [as 别名]
def generator(summary=False):
"""
Generates image based on input. Uses a U-net.
Training is focused on making the generator
as good as possible, because the generator
is used in inference.
variable legend:
e = encoder
s = center layer
d = decoder
# (ie 1,2,3,etc) = layer number
a = activation
b = batch normalization
c = a concatenated layer
So d3ab is the layer 3 decoder that has gone
through activation and batch normalization.
"""
# -----------------------------------------------------------
# Encoder
input_tensor = Input(shape=IMAGE_SIZE)
e1a = conv_lb(input_tensor, 64, layer_name="layer_1", batch_norm=False)
e2ba = conv_lb(e1a, 128, layer_name="layer_2")
e3ba = conv_lb(e2ba, 256, layer_name="layer_3")
e4ba = conv_lb(e3ba, 512, layer_name="layer_4")
e5ba = conv_lb(e4ba, 512, layer_name="layer_5")
e6ba = conv_lb(e5ba, 512, layer_name="layer_6")
e7ba = conv_lb(e6ba, 512, layer_name="layer_7")
# -----------------------------------------------------------
# Center layer
s8ba = conv_lb(e7ba, 512, layer_name="middle_layer", batch_norm=False)
# -----------------------------------------------------------
# Decoder
d9cba = concat_deconv(s8ba, e7ba, 512)
d10cba = concat_deconv(d9cba, e6ba, 512)
d11cba = concat_deconv(d10cba, e5ba, 512)
d12cba = concat_deconv(d11cba, e4ba, 512, dropout=False)
d13cba = concat_deconv(d12cba, e3ba, 256, dropout=False)
d14cba = concat_deconv(d13cba, e2ba, 128, dropout=False)
d15cba = concat_deconv(d14cba, e1a, 64, dropout=False)
d16ba = Conv2DTranspose(
3,
FILTER,
strides=STRIDE,
padding="same",
activation="tanh",
kernel_initializer=RandomNormal(stddev=0.02),
)(d15cba)
# Define generator model
gen_model = Model(input_tensor, d16ba, name="Generator")
if summary:
gen_model.summary()
return gen_model
###########################################################
# Discriminator
###########################################################