本文整理汇总了Python中tensorflow.get_seed方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.get_seed方法的具体用法?Python tensorflow.get_seed怎么用?Python tensorflow.get_seed使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.get_seed方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: AddPretrainedEmbeddings
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_seed [as 别名]
def AddPretrainedEmbeddings(self, index, embeddings_path, task_context):
"""Embeddings at the given index will be set to pretrained values."""
def _Initializer(shape, dtype=tf.float32, partition_info=None):
"""Variable initializer that loads pretrained embeddings."""
unused_dtype = dtype
seed1, seed2 = tf.get_seed(self._seed)
t = gen_parser_ops.word_embedding_initializer(
vectors=embeddings_path,
task_context=task_context,
embedding_init=self._embedding_init,
seed=seed1,
seed2=seed2)
t.set_shape(shape)
return t
self._pretrained_embeddings[index] = _Initializer
示例2: AddPretrainedEmbeddings
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_seed [as 别名]
def AddPretrainedEmbeddings(self, index, embeddings_path, task_context):
"""Embeddings at the given index will be set to pretrained values."""
def _Initializer(shape, dtype=tf.float32, partition_info=None):
"""Variable initializer that loads pretrained embeddings."""
unused_dtype = dtype
seed1, seed2 = tf.get_seed(self._seed)
t = gen_parser_ops.word_embedding_initializer(
vectors=embeddings_path,
task_context=task_context,
embedding_init=self._embedding_init,
cache_vectors_locally=False,
seed=seed1,
seed2=seed2)
t.set_shape(shape)
return t
self._pretrained_embeddings[index] = _Initializer
示例3: add_embeddings
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_seed [as 别名]
def add_embeddings(channel_id, feature_spec, seed=None):
"""Adds a variable for the embedding of a given fixed feature.
Supports pre-trained or randomly initialized embeddings In both cases, extra
vector is reserved for out-of-vocabulary words, so the embedding matrix has
the size of [feature_spec.vocabulary_size + 1, feature_spec.embedding_dim].
Args:
channel_id: Numeric id of the fixed feature channel
feature_spec: Feature spec protobuf of type FixedFeatureChannel
seed: used for random initializer
Returns:
tf.Variable object corresponding to the embedding for that feature.
Raises:
RuntimeError: if more the pretrained embeddings are specified in resources
containing more than one part.
"""
check.Gt(feature_spec.embedding_dim, 0,
'Embeddings requested for non-embedded feature: %s' % feature_spec)
name = fixed_embeddings_name(channel_id)
shape = [feature_spec.vocabulary_size + 1, feature_spec.embedding_dim]
if feature_spec.HasField('pretrained_embedding_matrix'):
if len(feature_spec.pretrained_embedding_matrix.part) > 1:
raise RuntimeError('pretrained_embedding_matrix resource contains '
'more than one part:\n%s',
str(feature_spec.pretrained_embedding_matrix))
if len(feature_spec.vocab.part) > 1:
raise RuntimeError('vocab resource contains more than one part:\n%s',
str(feature_spec.vocab))
seed1, seed2 = tf.get_seed(seed)
embeddings = dragnn_ops.dragnn_embedding_initializer(
embedding_input=feature_spec.pretrained_embedding_matrix.part[0]
.file_pattern,
vocab=feature_spec.vocab.part[0].file_pattern,
scaling_coefficient=1.0,
seed=seed1,
seed2=seed2)
return tf.get_variable(name, initializer=tf.reshape(embeddings, shape))
else:
return tf.get_variable(
name,
shape,
initializer=tf.random_normal_initializer(
stddev=1.0 / feature_spec.embedding_dim**.5, seed=seed))
示例4: add_embeddings
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_seed [as 别名]
def add_embeddings(channel_id, feature_spec, seed=None):
"""Adds a variable for the embedding of a given fixed feature.
Supports pre-trained or randomly initialized embeddings In both cases, extra
vector is reserved for out-of-vocabulary words, so the embedding matrix has
the size of [feature_spec.vocabulary_size + 1, feature_spec.embedding_dim].
Args:
channel_id: Numeric id of the fixed feature channel
feature_spec: Feature spec protobuf of type FixedFeatureChannel
seed: used for random initializer
Returns:
tf.Variable object corresponding to the embedding for that feature.
Raises:
RuntimeError: if more the pretrained embeddings are specified in resources
containing more than one part.
"""
check.Gt(feature_spec.embedding_dim, 0,
'Embeddings requested for non-embedded feature: %s' % feature_spec)
name = fixed_embeddings_name(channel_id)
shape = [feature_spec.vocabulary_size + 1, feature_spec.embedding_dim]
if feature_spec.HasField('pretrained_embedding_matrix'):
if len(feature_spec.pretrained_embedding_matrix.part) > 1:
raise RuntimeError('pretrained_embedding_matrix resource contains '
'more than one part:\n%s',
str(feature_spec.pretrained_embedding_matrix))
if len(feature_spec.vocab.part) > 1:
raise RuntimeError('vocab resource contains more than one part:\n%s',
str(feature_spec.vocab))
seed1, seed2 = tf.get_seed(seed)
embeddings = syntaxnet_ops.word_embedding_initializer(
vectors=feature_spec.pretrained_embedding_matrix.part[0].file_pattern,
vocabulary=feature_spec.vocab.part[0].file_pattern,
num_special_embeddings=1,
embedding_init=1.0,
seed=seed1,
seed2=seed2)
return tf.get_variable(
name,
initializer=tf.reshape(embeddings, shape),
trainable=not feature_spec.is_constant)
else:
return tf.get_variable(
name,
shape,
initializer=tf.random_normal_initializer(
stddev=1.0 / feature_spec.embedding_dim**.5, seed=seed),
trainable=not feature_spec.is_constant)
示例5: add_embeddings
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_seed [as 别名]
def add_embeddings(channel_id, feature_spec, seed=None):
"""Adds a variable for the embedding of a given fixed feature.
Supports pre-trained or randomly initialized embeddings In both cases, extra
vector is reserved for out-of-vocabulary words, so the embedding matrix has
the size of [feature_spec.vocabulary_size + 1, feature_spec.embedding_dim].
Args:
channel_id: Numeric id of the fixed feature channel
feature_spec: Feature spec protobuf of type FixedFeatureChannel
seed: used for random initializer
Returns:
tf.Variable object corresponding to the embedding for that feature.
Raises:
RuntimeError: if more the pretrained embeddings are specified in resources
containing more than one part.
"""
check.Gt(feature_spec.embedding_dim, 0,
'Embeddings requested for non-embedded feature: %s' % feature_spec)
name = fixed_embeddings_name(channel_id)
shape = [feature_spec.vocabulary_size + 1, feature_spec.embedding_dim]
if feature_spec.HasField('pretrained_embedding_matrix'):
if len(feature_spec.pretrained_embedding_matrix.part) > 1:
raise RuntimeError('pretrained_embedding_matrix resource contains '
'more than one part:\n%s',
str(feature_spec.pretrained_embedding_matrix))
if len(feature_spec.vocab.part) > 1:
raise RuntimeError('vocab resource contains more than one part:\n%s',
str(feature_spec.vocab))
seed1, seed2 = tf.get_seed(seed)
embeddings = syntaxnet_ops.word_embedding_initializer(
vectors=feature_spec.pretrained_embedding_matrix.part[0].file_pattern,
vocabulary=feature_spec.vocab.part[0].file_pattern,
num_special_embeddings=1,
embedding_init=1.0,
seed=seed1,
seed2=seed2)
return tf.get_variable(name, initializer=tf.reshape(embeddings, shape))
else:
return tf.get_variable(
name,
shape,
initializer=tf.random_normal_initializer(
stddev=1.0 / feature_spec.embedding_dim**.5, seed=seed))
示例6: add_embeddings
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import get_seed [as 别名]
def add_embeddings(channel_id, feature_spec, seed=None):
"""Adds a variable for the embedding of a given fixed feature.
Supports pre-trained or randomly initialized embeddings In both cases, extra
vector is reserved for out-of-vocabulary words, so the embedding matrix has
the size of [feature_spec.vocabulary_size + 1, feature_spec.embedding_dim].
Args:
channel_id: Numeric id of the fixed feature channel
feature_spec: Feature spec protobuf of type FixedFeatureChannel
seed: used for random initializer
Returns:
tf.Variable object corresponding to the embedding for that feature.
Raises:
RuntimeError: if more the pretrained embeddings are specified in resources
containing more than one part.
"""
check.Gt(feature_spec.embedding_dim, 0,
'Embeddings requested for non-embedded feature: %s' % feature_spec)
name = fixed_embeddings_name(channel_id)
row_num = feature_spec.vocabulary_size + 1
shape = [row_num, feature_spec.embedding_dim]
if feature_spec.HasField('pretrained_embedding_matrix'):
if len(feature_spec.pretrained_embedding_matrix.part) > 1:
raise RuntimeError('pretrained_embedding_matrix resource contains '
'more than one part:\n%s',
str(feature_spec.pretrained_embedding_matrix))
if len(feature_spec.vocab.part) > 1:
raise RuntimeError('vocab resource contains more than one part:\n%s',
str(feature_spec.vocab))
seed1, seed2 = tf.get_seed(seed)
embeddings = syntaxnet_ops.word_embedding_initializer(
vectors=feature_spec.pretrained_embedding_matrix.part[0].file_pattern,
vocabulary=feature_spec.vocab.part[0].file_pattern,
override_num_embeddings=row_num,
embedding_init=0.0, # zero out rows with no pretrained values
seed=seed1,
seed2=seed2)
return tf.get_variable(
name,
initializer=tf.reshape(embeddings, shape),
trainable=not feature_spec.is_constant)
else:
return tf.get_variable(
name,
shape,
initializer=tf.random_normal_initializer(
stddev=1.0 / feature_spec.embedding_dim**.5, seed=seed),
trainable=not feature_spec.is_constant)