本文整理汇总了Python中tensorflow.python.keras.initializers.RandomNormal方法的典型用法代码示例。如果您正苦于以下问题:Python initializers.RandomNormal方法的具体用法?Python initializers.RandomNormal怎么用?Python initializers.RandomNormal使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.keras.initializers
的用法示例。
在下文中一共展示了initializers.RandomNormal方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __new__
# 需要导入模块: from tensorflow.python.keras import initializers [as 别名]
# 或者: from tensorflow.python.keras.initializers import RandomNormal [as 别名]
def __new__(cls, name, vocabulary_size, embedding_dim=4, use_hash=False, dtype="int32", embeddings_initializer=None,
embedding_name=None,
group_name=DEFAULT_GROUP_NAME, trainable=True):
if embedding_dim == "auto":
embedding_dim = 6 * int(pow(vocabulary_size, 0.25))
if embeddings_initializer is None:
embeddings_initializer = RandomNormal(mean=0.0, stddev=0.0001, seed=2020)
if embedding_name is None:
embedding_name = name
return super(SparseFeat, cls).__new__(cls, name, vocabulary_size, embedding_dim, use_hash, dtype,
embeddings_initializer,
embedding_name, group_name, trainable)
示例2: create_embedding_dict
# 需要导入模块: from tensorflow.python.keras import initializers [as 别名]
# 或者: from tensorflow.python.keras.initializers import RandomNormal [as 别名]
def create_embedding_dict(feature_dim_dict, embedding_size, init_std, seed, l2_reg, prefix='sparse', seq_mask_zero=True):
if embedding_size == 'auto':
sparse_embedding = {feat.name: Embedding(feat.dimension, 6 * int(pow(feat.dimension, 0.25)),
embeddings_initializer=RandomNormal(
mean=0.0, stddev=init_std, seed=seed),
embeddings_regularizer=l2(l2_reg),
name=prefix+'_emb_' + str(i) + '-' + feat.name) for i, feat in
enumerate(feature_dim_dict["sparse"])}
else:
sparse_embedding = {feat.name: Embedding(feat.dimension, embedding_size,
embeddings_initializer=RandomNormal(
mean=0.0, stddev=init_std, seed=seed),
embeddings_regularizer=l2(
l2_reg),
name=prefix+'_emb_' + str(i) + '-' + feat.name) for i, feat in
enumerate(feature_dim_dict["sparse"])}
if 'sequence' in feature_dim_dict:
count = len(sparse_embedding)
sequence_dim_list = feature_dim_dict['sequence']
for feat in sequence_dim_list:
# if feat.name not in sparse_embedding:
if embedding_size == "auto":
sparse_embedding[feat.name] = Embedding(feat.dimension, 6 * int(pow(feat.dimension, 0.25)),
embeddings_initializer=RandomNormal(
mean=0.0, stddev=init_std, seed=seed),
embeddings_regularizer=l2(
l2_reg),
name=prefix + '_emb_' + str(count) + '-' + feat.name, mask_zero=seq_mask_zero)
else:
sparse_embedding[feat.name] = Embedding(feat.dimension, embedding_size,
embeddings_initializer=RandomNormal(
mean=0.0, stddev=init_std, seed=seed),
embeddings_regularizer=l2(
l2_reg),
name=prefix+'_emb_' + str(count) + '-' + feat.name, mask_zero=seq_mask_zero)
count += 1
return sparse_embedding