本文整理汇总了Python中keras.initializers.RandomUniform方法的典型用法代码示例。如果您正苦于以下问题:Python initializers.RandomUniform方法的具体用法?Python initializers.RandomUniform怎么用?Python initializers.RandomUniform使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.initializers
的用法示例。
在下文中一共展示了initializers.RandomUniform方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import RandomUniform [as 别名]
def __init__(self, t_left_initializer='zeros',
a_left_initializer=initializers.RandomUniform(minval=0, maxval=1),
t_right_initializer=initializers.RandomUniform(minval=0, maxval=5),
a_right_initializer='ones',
shared_axes=None,
**kwargs):
super(SReLU, self).__init__(**kwargs)
self.supports_masking = True
self.t_left_initializer = initializers.get(t_left_initializer)
self.a_left_initializer = initializers.get(a_left_initializer)
self.t_right_initializer = initializers.get(t_right_initializer)
self.a_right_initializer = initializers.get(a_right_initializer)
if shared_axes is None:
self.shared_axes = None
elif not isinstance(shared_axes, (list, tuple)):
self.shared_axes = [shared_axes]
else:
self.shared_axes = list(shared_axes)
示例2: build
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import RandomUniform [as 别名]
def build(self, input_shape):
hadamard_size = 2 ** int(math.ceil(math.log(max(input_shape[1], self.output_dim), 2)))
self.hadamard = K.constant(
value=hadamard(hadamard_size, dtype=np.int8)[:input_shape[1], :self.output_dim])
init_scale = 1. / math.sqrt(self.output_dim)
self.scale = self.add_weight(name='scale',
shape=(1,),
initializer=Constant(init_scale),
trainable=True)
if self.use_bias:
self.bias = self.add_weight(name='bias',
shape=(self.output_dim,),
initializer=RandomUniform(-init_scale, init_scale),
trainable=True)
super(HadamardClassifier, self).build(input_shape)
示例3: build
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import RandomUniform [as 别名]
def build(self):
"""
Build model structure.
aNMM model based on bin weighting and query term attentions
"""
# query is [batch_size, left_text_len]
# doc is [batch_size, right_text_len, bin_num]
query, doc = self._make_inputs()
embedding = self._make_embedding_layer()
q_embed = embedding(query)
q_attention = keras.layers.Dense(
1, kernel_initializer=RandomUniform(), use_bias=False)(q_embed)
q_text_len = self._params['input_shapes'][0][0]
q_attention = keras.layers.Lambda(
lambda x: softmax(x, axis=1),
output_shape=(q_text_len,)
)(q_attention)
d_bin = keras.layers.Dropout(
rate=self._params['dropout_rate'])(doc)
for layer_id in range(self._params['num_layers'] - 1):
d_bin = keras.layers.Dense(
self._params['hidden_sizes'][layer_id],
kernel_initializer=RandomUniform())(d_bin)
d_bin = keras.layers.Activation('tanh')(d_bin)
d_bin = keras.layers.Dense(
self._params['hidden_sizes'][self._params['num_layers'] - 1])(
d_bin)
d_bin = keras.layers.Reshape((q_text_len,))(d_bin)
q_attention = keras.layers.Reshape((q_text_len,))(q_attention)
score = keras.layers.Dot(axes=[1, 1])([d_bin, q_attention])
x_out = self._make_output_layer()(score)
self._backend = keras.Model(inputs=[query, doc], outputs=x_out)
示例4: test_uniform
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import RandomUniform [as 别名]
def test_uniform(tensor_shape):
_runner(initializers.RandomUniform(minval=-1, maxval=1), tensor_shape,
target_mean=0., target_max=1, target_min=-1)
示例5: __init__
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import RandomUniform [as 别名]
def __init__(self, config, ntags=None):
# build input, directly feed with word embedding by the data generator
word_input = Input(shape=(None, config.word_embedding_size), name='word_input')
# build character based embedding
char_input = Input(shape=(None, config.max_char_length), dtype='int32', name='char_input')
char_embeddings = TimeDistributed(Embedding(input_dim=config.char_vocab_size,
output_dim=config.char_embedding_size,
#mask_zero=True,
#embeddings_initializer=RandomUniform(minval=-0.5, maxval=0.5),
name='char_embeddings'
))(char_input)
chars = TimeDistributed(Bidirectional(LSTM(config.num_char_lstm_units, return_sequences=False)))(char_embeddings)
# length of sequence not used for the moment (but used for f1 communication)
length_input = Input(batch_shape=(None, 1), dtype='int32', name='length_input')
# combine characters and word embeddings
x = Concatenate()([word_input, chars])
x = Dropout(config.dropout)(x)
x = Bidirectional(LSTM(units=config.num_word_lstm_units,
return_sequences=True,
recurrent_dropout=config.recurrent_dropout))(x)
x = Dropout(config.dropout)(x)
x = Dense(config.num_word_lstm_units, activation='tanh')(x)
x = Dense(ntags)(x)
self.crf = ChainCRF()
pred = self.crf(x)
self.model = Model(inputs=[word_input, char_input, length_input], outputs=[pred])
self.config = config
示例6: build
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import RandomUniform [as 别名]
def build(self, input_shape):
init = initializers.RandomUniform(minval=-50, maxval=50, seed=None)
self.kernel = self.add_weight(name='kernel', shape=(self.height, self.width, 3),
initializer=init, trainable=True)
super(InputReflect, self).build(input_shape)
示例7: add_embed_layer
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import RandomUniform [as 别名]
def add_embed_layer(vocab_emb, vocab_size, embed_size, train_embed, dropout_rate):
emb_layer = Sequential()
if vocab_emb is not None:
print("Embedding with initialized weights")
print(vocab_size, embed_size)
emb_layer.add(Embedding(input_dim=vocab_size, output_dim=embed_size, weights=[vocab_emb],
trainable=train_embed, mask_zero=False))
else:
print("Embedding with random weights")
emb_layer.add(Embedding(input_dim=vocab_size, output_dim=embed_size, trainable=True, mask_zero=False,
embeddings_initializer=RandomUniform(-0.05, 0.05)))
emb_layer.add(SpatialDropout1D(dropout_rate))
return emb_layer
示例8: build
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import RandomUniform [as 别名]
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[1]
if self.H == 'Glorot':
self.H = np.float32(np.sqrt(1.5 / (input_dim + self.units)))
#print('Glorot H: {}'.format(self.H))
if self.kernel_lr_multiplier == 'Glorot':
self.kernel_lr_multiplier = np.float32(1. / np.sqrt(1.5 / (input_dim + self.units)))
#print('Glorot learning rate multiplier: {}'.format(self.kernel_lr_multiplier))
self.kernel_constraint = Clip(-self.H, self.H)
self.kernel_initializer = initializers.RandomUniform(-self.H, self.H)
self.kernel = self.add_weight(shape=(input_dim, self.units),
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.lr_multipliers = [self.kernel_lr_multiplier, self.bias_lr_multiplier]
self.bias = self.add_weight(shape=(self.output_dim,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.lr_multipliers = [self.kernel_lr_multiplier]
self.bias = None
self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
self.built = True
示例9: build
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import RandomUniform [as 别名]
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[1]
if self.H == 'Glorot':
self.H = np.float32(np.sqrt(1.5 / (input_dim + self.units)))
#print('Glorot H: {}'.format(self.H))
if self.kernel_lr_multiplier == 'Glorot':
self.kernel_lr_multiplier = np.float32(1. / np.sqrt(1.5 / (input_dim + self.units)))
#print('Glorot learning rate multiplier: {}'.format(self.lr_multiplier))
self.kernel_constraint = Clip(-self.H, self.H)
self.kernel_initializer = initializers.RandomUniform(-self.H, self.H)
self.kernel = self.add_weight(shape=(input_dim, self.units),
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.lr_multipliers = [self.kernel_lr_multiplier, self.bias_lr_multiplier]
self.bias = self.add_weight(shape=(self.output_dim,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.lr_multipliers = [self.kernel_lr_multiplier]
self.bias = None
self.built = True
示例10: __init__
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import RandomUniform [as 别名]
def __init__(self,
input_shape,
n_classes=None,
init=RandomUniform(minval=-0.01, maxval=0.01),
y=None,
model='cnn',
vocab_sz=None,
word_embedding_dim=100,
embedding_matrix=None
):
super(WSTC, self).__init__()
self.input_shape = input_shape
self.y = y
self.n_classes = n_classes
if model == 'cnn':
self.classifier = ConvolutionLayer(self.input_shape[1], n_classes=n_classes,
vocab_sz=vocab_sz, embedding_matrix=embedding_matrix,
word_embedding_dim=word_embedding_dim, init=init)
elif model == 'rnn':
self.classifier = HierAttLayer(self.input_shape, n_classes=n_classes,
vocab_sz=vocab_sz, embedding_matrix=embedding_matrix,
word_embedding_dim=word_embedding_dim)
self.model = self.classifier
self.sup_list = {}
示例11: __init__
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import RandomUniform [as 别名]
def __init__(self,
input_shape,
class_tree,
max_level,
sup_source,
init=RandomUniform(minval=-0.01, maxval=0.01),
y=None,
vocab_sz=None,
word_embedding_dim=100,
blocking_perc=0,
block_thre=1.0,
block_level=1,
):
super(WSTC, self).__init__()
self.input_shape = input_shape
self.class_tree = class_tree
self.y = y
if type(y) == dict:
self.eval_set = np.array([ele for ele in y])
else:
self.eval_set = None
self.vocab_sz = vocab_sz
self.block_level = block_level
self.block_thre = block_thre
self.block_label = {}
self.siblings_map = {}
self.x = Input(shape=(input_shape[1],), name='input')
self.model = []
self.sup_dict = {}
if sup_source == 'docs':
n_classes = class_tree.get_size() - 1
leaves = class_tree.find_leaves()
for leaf in leaves:
current = np.zeros(n_classes)
for i in class_tree.name2label(leaf.name):
current[i] = 1.0
for idx in leaf.sup_idx:
self.sup_dict[idx] = current
示例12: instantiate
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import RandomUniform [as 别名]
def instantiate(self, class_tree, filter_sizes=[2, 3, 4, 5], num_filters=20, word_trainable=False,
word_embedding_dim=100, hidden_dim=20, act='relu', init=RandomUniform(minval=-0.01, maxval=0.01)):
num_children = len(class_tree.children)
if num_children <= 1:
class_tree.model = None
else:
class_tree.model = ConvolutionLayer(self.x, self.input_shape[1], filter_sizes=filter_sizes,
n_classes=num_children,
vocab_sz=self.vocab_sz, embedding_matrix=class_tree.embedding,
hidden_dim=hidden_dim,
word_embedding_dim=word_embedding_dim, num_filters=num_filters,
init=init,
word_trainable=word_trainable, act=act)
示例13: build
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import RandomUniform [as 别名]
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[1]
if self.H == 'Glorot':
self.H = np.float32(np.sqrt(1.5 / (input_dim + self.units)))
#print('Glorot H: {}'.format(self.H))
if self.kernel_lr_multiplier == 'Glorot':
self.kernel_lr_multiplier = np.float32(1. / np.sqrt(1.5 / (input_dim + self.units)))
#print('Glorot learning rate multiplier: {}'.format(self.kernel_lr_multiplier))
self.kernel_constraint = Clip(-self.H, self.H)
self.kernel_initializer = initializers.RandomUniform(-self.H, self.H)
self.kernel = self.add_weight(shape=(input_dim, self.units),
initializer=self.kernel_initializer,
name='kernel',
regularizer=self.kernel_regularizer,
constraint=self.kernel_constraint)
if self.use_bias:
self.lr_multipliers = [self.kernel_lr_multiplier, self.bias_lr_multiplier]
self.bias = self.add_weight(shape=(self.units,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint)
else:
self.lr_multipliers = [self.kernel_lr_multiplier]
self.bias = None
self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
self.built = True
示例14: build
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import RandomUniform [as 别名]
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[-1]
expert_init_lim = np.sqrt(3.0*self.expert_kernel_initializer_scale / (max(1., float(input_dim + self.units) / 2)))
gating_init_lim = np.sqrt(3.0*self.gating_kernel_initializer_scale / (max(1., float(input_dim + 1) / 2)))
self.expert_kernel = self.add_weight(shape=(input_dim, self.units, self.n_experts),
initializer=RandomUniform(minval=-expert_init_lim,maxval=expert_init_lim),
name='expert_kernel',
regularizer=self.expert_kernel_regularizer,
constraint=self.expert_kernel_constraint)
self.gating_kernel = self.add_weight(shape=(input_dim, self.n_experts),
initializer=RandomUniform(minval=-gating_init_lim,maxval=gating_init_lim),
name='gating_kernel',
regularizer=self.gating_kernel_regularizer,
constraint=self.gating_kernel_constraint)
if self.use_expert_bias:
self.expert_bias = self.add_weight(shape=(self.units, self.n_experts),
initializer=self.expert_bias_initializer,
name='expert_bias',
regularizer=self.expert_bias_regularizer,
constraint=self.expert_bias_constraint)
else:
self.expert_bias = None
if self.use_gating_bias:
self.gating_bias = self.add_weight(shape=(self.n_experts,),
initializer=self.gating_bias_initializer,
name='gating_bias',
regularizer=self.gating_bias_regularizer,
constraint=self.gating_bias_constraint)
else:
self.gating_bias = None
self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
self.built = True
示例15: __init__
# 需要导入模块: from keras import initializers [as 别名]
# 或者: from keras.initializers import RandomUniform [as 别名]
def __init__(self, output_dim, initializer=None, betas=1.0, **kwargs):
self.output_dim = output_dim
self.init_betas = betas
if not initializer:
self.initializer = RandomUniform(0.0, 1.0)
else:
self.initializer = initializer
super(RBFLayer, self).__init__(**kwargs)