当前位置: 首页>>代码示例>>Python>>正文


Python embedding_ops.embedding_lookup方法代码示例

本文整理汇总了Python中tensorflow.python.ops.embedding_ops.embedding_lookup方法的典型用法代码示例。如果您正苦于以下问题:Python embedding_ops.embedding_lookup方法的具体用法?Python embedding_ops.embedding_lookup怎么用?Python embedding_ops.embedding_lookup使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.embedding_ops的用法示例。


在下文中一共展示了embedding_ops.embedding_lookup方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: add_embedding_layer

# 需要导入模块: from tensorflow.python.ops import embedding_ops [as 别名]
# 或者: from tensorflow.python.ops.embedding_ops import embedding_lookup [as 别名]
def add_embedding_layer(self, emb_matrix):
        """
        Adds word embedding layer to the graph.

        Inputs:
          emb_matrix: shape (400002, embedding_size).
            The GloVe vectors, plus vectors for PAD and UNK.
        """
        with vs.variable_scope("embeddings"):

            # Note: the embedding matrix is a tf.constant which means it's not a trainable parameter
            embedding_matrix = tf.constant(emb_matrix, dtype=tf.float32, name="emb_matrix") # shape (400002, embedding_size)

            # Get the word embeddings for the context and question,
            # using the placeholders self.context_ids and self.qn_ids
            self.context_embs = embedding_ops.embedding_lookup(embedding_matrix, self.context_ids) # shape (batch_size, context_len, embedding_size)
            self.qn_embs = embedding_ops.embedding_lookup(embedding_matrix, self.qn_ids) # shape (batch_size, question_len, embedding_size) 
开发者ID:abisee,项目名称:cs224n-win18-squad,代码行数:19,代码来源:qa_model.py

示例2: call

# 需要导入模块: from tensorflow.python.ops import embedding_ops [as 别名]
# 或者: from tensorflow.python.ops.embedding_ops import embedding_lookup [as 别名]
def call(self, inputs, state):
    """Run the cell on embedded inputs."""
    with ops.device("/cpu:0"):
      if self._initializer:
        initializer = self._initializer
      elif vs.get_variable_scope().initializer:
        initializer = vs.get_variable_scope().initializer
      else:
        # Default initializer for embeddings should have variance=1.
        sqrt3 = math.sqrt(3)  # Uniform(-sqrt(3), sqrt(3)) has variance=1.
        initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)

      if isinstance(state, tuple):
        data_type = state[0].dtype
      else:
        data_type = state.dtype

      embedding = vs.get_variable(
          "embedding", [self._embedding_classes, self._embedding_size],
          initializer=initializer,
          dtype=data_type)
      embedded = embedding_ops.embedding_lookup(embedding,
                                                array_ops.reshape(inputs, [-1]))

      return self._cell(embedded, state) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:27,代码来源:core_rnn_cell.py

示例3: _init_clusters_random

# 需要导入模块: from tensorflow.python.ops import embedding_ops [as 别名]
# 或者: from tensorflow.python.ops.embedding_ops import embedding_lookup [as 别名]
def _init_clusters_random(self):
    """Does random initialization of clusters.

    Returns:
      Tensor of randomly initialized clusters.
    """
    num_data = math_ops.add_n([array_ops.shape(inp)[0] for inp in self._inputs])
    # Note that for mini-batch k-means, we should ensure that the batch size of
    # data used during initialization is sufficiently large to avoid duplicated
    # clusters.
    with ops.control_dependencies(
        [check_ops.assert_less_equal(self._num_clusters, num_data)]):
      indices = random_ops.random_uniform(
          array_ops.reshape(self._num_clusters, [-1]),
          minval=0,
          maxval=math_ops.cast(num_data, dtypes.int64),
          seed=self._random_seed,
          dtype=dtypes.int64)
      clusters_init = embedding_lookup(
          self._inputs, indices, partition_strategy='div')
      return clusters_init 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:23,代码来源:clustering_ops.py

示例4: _init_clusters_random

# 需要导入模块: from tensorflow.python.ops import embedding_ops [as 别名]
# 或者: from tensorflow.python.ops.embedding_ops import embedding_lookup [as 别名]
def _init_clusters_random(data, num_clusters, random_seed):
  """Does random initialization of clusters.

  Args:
    data: a list of Tensors with a matrix of data, each row is an example.
    num_clusters: an integer with the number of clusters.
    random_seed: Seed for PRNG used to initialize seeds.

  Returns:
    A Tensor with num_clusters random rows of data.
  """
  assert isinstance(data, list)
  num_data = math_ops.add_n([array_ops.shape(inp)[0] for inp in data])
  with ops.control_dependencies(
      [check_ops.assert_less_equal(num_clusters, num_data)]):
    indices = random_ops.random_uniform(
        [num_clusters],
        minval=0,
        maxval=math_ops.cast(num_data, dtypes.int64),
        seed=random_seed,
        dtype=dtypes.int64)
  indices %= math_ops.cast(num_data, dtypes.int64)
  clusters_init = embedding_lookup(data, indices, partition_strategy='div')
  return clusters_init 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:26,代码来源:gmm_ops.py

示例5: __call__

# 需要导入模块: from tensorflow.python.ops import embedding_ops [as 别名]
# 或者: from tensorflow.python.ops.embedding_ops import embedding_lookup [as 别名]
def __call__(self, inputs, state, scope=None):
    """Run the cell on embedded inputs."""
    with vs.variable_scope(scope or "embedding_wrapper"):  # "EmbeddingWrapper"
      with ops.device("/cpu:0"):
        if self._initializer:
          initializer = self._initializer
        elif vs.get_variable_scope().initializer:
          initializer = vs.get_variable_scope().initializer
        else:
          # Default initializer for embeddings should have variance=1.
          sqrt3 = math.sqrt(3)  # Uniform(-sqrt(3), sqrt(3)) has variance=1.
          initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)

        if type(state) is tuple:
          data_type = state[0].dtype
        else:
          data_type = state.dtype

        embedding = vs.get_variable(
            "embedding", [self._embedding_classes, self._embedding_size],
            initializer=initializer,
            dtype=data_type)
        embedded = embedding_ops.embedding_lookup(
            embedding, array_ops.reshape(inputs, [-1]))
    return self._cell(embedded, state) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:27,代码来源:core_rnn_cell_impl.py

示例6: _init_clusters_random

# 需要导入模块: from tensorflow.python.ops import embedding_ops [as 别名]
# 或者: from tensorflow.python.ops.embedding_ops import embedding_lookup [as 别名]
def _init_clusters_random(data, num_clusters, random_seed):
  """Does random initialization of clusters.

  Args:
    data: a list of Tensors with a matrix of data, each row is an example.
    num_clusters: an integer with the number of clusters.
    random_seed: Seed for PRNG used to initialize seeds.

  Returns:
    A Tensor with num_clusters random rows of data.
  """
  assert isinstance(data, list)
  num_data = math_ops.add_n([array_ops.shape(inp)[0] for inp in data])
  with ops.control_dependencies(
      [check_ops.assert_less_equal(num_clusters, num_data)]):
    indices = random_ops.random_uniform(
        [num_clusters],
        minval=0,
        maxval=math_ops.cast(num_data, dtypes.int64),
        seed=random_seed,
        dtype=dtypes.int64)
  indices = math_ops.cast(indices, dtypes.int32) % num_data
  clusters_init = embedding_lookup(data, indices, partition_strategy='div')
  return clusters_init 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:26,代码来源:gmm_ops.py

示例7: embedding_rnn_decoder

# 需要导入模块: from tensorflow.python.ops import embedding_ops [as 别名]
# 或者: from tensorflow.python.ops.embedding_ops import embedding_lookup [as 别名]
def embedding_rnn_decoder(self,initial_state, cell,
							attention_states, encode_embed, num_heads=1,
							 output_projection=None,
							 feed_previous=False,
							 update_embedding_for_previous=True, scope=None):
		"""RNN decoder with embedding and a pure-decoding option.

		"""
		if output_projection is not None:
			proj_weights = ops.convert_to_tensor(output_projection[0],
												 dtype=dtypes.float32)
			proj_weights.get_shape().assert_is_compatible_with([None, num_symbols])
			proj_biases = ops.convert_to_tensor(
				output_projection[1], dtype=dtypes.float32)
			proj_biases.get_shape().assert_is_compatible_with([num_symbols])

		with variable_scope.variable_scope(scope or "embedding_rnn_decoder"):
			loop_function = self._extract_argmax_and_embed(
				encode_embed, output_projection,
				update_embedding_for_previous) if feed_previous else None
			# emb_inp = (
			#	embedding_ops.embedding_lookup(embeddings, i) for i in decoder_inputs)
			#emb_inp = decoder_embed
			return self.rnn_decoder(encode_embed, attention_states, initial_state, cell,
								num_heads=num_heads, loop_function=loop_function) 
开发者ID:QingyaoAi,项目名称:Deep-Listwise-Context-Model-for-Ranking-Refinement,代码行数:27,代码来源:RankLSTM_model.py

示例8: listMLE

# 需要导入模块: from tensorflow.python.ops import embedding_ops [as 别名]
# 或者: from tensorflow.python.ops.embedding_ops import embedding_lookup [as 别名]
def listMLE(self, output, target_indexs, target_rels, name=None):
		loss = None
		with ops.name_scope(name, "listMLE",[output] + target_indexs + target_rels):
			output = tf.nn.l2_normalize(output, 1)
			loss = -1.0 * math_ops.reduce_sum(output,1)
			print(loss.get_shape())
			exp_output = tf.exp(output)
			exp_output_table = tf.reshape(exp_output,[-1])
			print(exp_output.get_shape())
			print(exp_output_table.get_shape())
			sum_exp_output = math_ops.reduce_sum(exp_output,1)
			loss = tf.add(loss, tf.log(sum_exp_output))
			#compute MLE
			for i in xrange(self.rank_list_size-1):
				idx = target_indexs[i] + tf.to_int64(self.batch_index_bias)
				y_i = embedding_ops.embedding_lookup(exp_output_table, idx)
				#y_i = tf.gather_nd(exp_output, idx)
				sum_exp_output = tf.subtract(sum_exp_output, y_i)
				loss = tf.add(loss, tf.log(sum_exp_output))
		batch_size = tf.shape(target_rels[0])[0]
		return math_ops.reduce_sum(loss) / math_ops.cast(batch_size, dtypes.float32) 
开发者ID:QingyaoAi,项目名称:Deep-Listwise-Context-Model-for-Ranking-Refinement,代码行数:23,代码来源:RankLSTM_model.py

示例9: __call__

# 需要导入模块: from tensorflow.python.ops import embedding_ops [as 别名]
# 或者: from tensorflow.python.ops.embedding_ops import embedding_lookup [as 别名]
def __call__(self, inputs, state, scope=None):
    """Run the cell on embedded inputs."""
    with vs.variable_scope(scope or type(self).__name__):  # "EmbeddingWrapper"
      with ops.device("/cpu:0"):
        if self._initializer:
          initializer = self._initializer
        elif vs.get_variable_scope().initializer:
          initializer = vs.get_variable_scope().initializer
        else:
          # Default initializer for embeddings should have variance=1.
          sqrt3 = math.sqrt(3)  # Uniform(-sqrt(3), sqrt(3)) has variance=1.
          initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)

        if type(state) is tuple:
          data_type = state[0].dtype
        else:
          data_type = state.dtype

        embedding = vs.get_variable(
            "embedding", [self._embedding_classes, self._embedding_size],
            initializer=initializer,
            dtype=data_type)
        embedded = embedding_ops.embedding_lookup(
            embedding, array_ops.reshape(inputs, [-1]))
    return self._cell(embedded, state) 
开发者ID:Guanghan,项目名称:ROLO,代码行数:27,代码来源:rnn_cell.py

示例10: testIndexedSlicesGradient

# 需要导入模块: from tensorflow.python.ops import embedding_ops [as 别名]
# 或者: from tensorflow.python.ops.embedding_ops import embedding_lookup [as 别名]
def testIndexedSlicesGradient(self):
    with ops.Graph().as_default():
      embedding_matrix = tf.get_variable(
          "embedding_matrix", [5, 5],
          initializer=tf.random_normal_initializer())
      def Cond(it, _):
        return it < 5
      def Body(it, cost):
        embedding = embedding_ops.embedding_lookup(embedding_matrix + 0.0, [0])
        cost += tf.reduce_sum(embedding)
        return it + 1, cost
      _, cost = control_flow_ops.while_loop(
          Cond, Body, [tf.constant(0), tf.constant(0.0)])
      optimizer = momentum.MomentumOptimizer(0.1, 0.9)
      train_op = optimizer.minimize(cost)
      with self.test_session() as sess:
        sess.run(tf.global_variables_initializer())
        for _ in range(10):
          sess.run([train_op]) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:21,代码来源:control_flow_ops_test.py

示例11: _init_clusters_random

# 需要导入模块: from tensorflow.python.ops import embedding_ops [as 别名]
# 或者: from tensorflow.python.ops.embedding_ops import embedding_lookup [as 别名]
def _init_clusters_random(self):
    """Does random initialization of clusters.

    Returns:
      Tensor of randomly initialized clusters.
    """
    num_data = tf.add_n([tf.shape(inp)[0] for inp in self._inputs])
    # Note that for mini-batch k-means, we should ensure that the batch size of
    # data used during initialization is sufficiently large to avoid duplicated
    # clusters.
    with tf.control_dependencies(
        [tf.assert_less_equal(self._num_clusters, num_data)]):
      indices = tf.random_uniform(tf.reshape(self._num_clusters, [-1]),
                                  minval=0,
                                  maxval=tf.cast(num_data, tf.int64),
                                  seed=self._random_seed,
                                  dtype=tf.int64)
      clusters_init = embedding_lookup(self._inputs, indices,
                                       partition_strategy='div')
      return clusters_init 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:22,代码来源:clustering_ops.py

示例12: __call__

# 需要导入模块: from tensorflow.python.ops import embedding_ops [as 别名]
# 或者: from tensorflow.python.ops.embedding_ops import embedding_lookup [as 别名]
def __call__(self, inputs, state, scope=None):
        """Run the cell on embedded inputs."""
        with vs.variable_scope(scope or type(self).__name__):    # "EmbeddingWrapper"
            with ops.device("/cpu:0"):
                if self._initializer:
                    initializer = self._initializer
                elif vs.get_variable_scope().initializer:
                    initializer = vs.get_variable_scope().initializer
                else:
                    # Default initializer for embeddings should have variance=1.
                    sqrt3 = math.sqrt(3)    # Uniform(-sqrt(3), sqrt(3)) has variance=1.
                    initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)

                if type(state) is tuple:
                    data_type = state[0].dtype
                else:
                    data_type = state.dtype

                embedding = vs.get_variable(
                        "embedding", [self._embedding_classes, self._embedding_size],
                        initializer=initializer,
                        dtype=data_type)
                embedded = embedding_ops.embedding_lookup(
                        embedding, array_ops.reshape(inputs, [-1]))
        return self._cell(embedded, state) 
开发者ID:thu-coai,项目名称:ecm,代码行数:27,代码来源:rnn_cell.py

示例13: __call__

# 需要导入模块: from tensorflow.python.ops import embedding_ops [as 别名]
# 或者: from tensorflow.python.ops.embedding_ops import embedding_lookup [as 别名]
def __call__(self, inputs, state, scope=None):
    """Run the cell on embedded inputs."""
    with _checked_scope(self, scope or "embedding_wrapper", reuse=self._reuse):
      with ops.device("/cpu:0"):
        if self._initializer:
          initializer = self._initializer
        elif vs.get_variable_scope().initializer:
          initializer = vs.get_variable_scope().initializer
        else:
          # Default initializer for embeddings should have variance=1.
          sqrt3 = math.sqrt(3)  # Uniform(-sqrt(3), sqrt(3)) has variance=1.
          initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)

        if type(state) is tuple:
          data_type = state[0].dtype
        else:
          data_type = state.dtype

        embedding = vs.get_variable(
            "embedding", [self._embedding_classes, self._embedding_size],
            initializer=initializer,
            dtype=data_type)
        embedded = embedding_ops.embedding_lookup(
            embedding, array_ops.reshape(inputs, [-1]))
    return self._cell(embedded, state) 
开发者ID:ratschlab,项目名称:RGAN,代码行数:27,代码来源:mod_core_rnn_cell_impl.py

示例14: _add_emb_signal

# 需要导入模块: from tensorflow.python.ops import embedding_ops [as 别名]
# 或者: from tensorflow.python.ops.embedding_ops import embedding_lookup [as 别名]
def _add_emb_signal(self, x, time):
        """ add position embedding

        :param x:
        :param time:
        :return:
        """
        x_ndims = x.get_shape().ndims
        if x_ndims == 2:
            position = ops.convert_to_tensor(time, dtype=dtypes.int32)
        elif x_ndims == 3:
            position = math_ops.range(array_ops.shape(x)[1])
        else:
            raise ValueError("need a Tensor with rank 2 or 3")
        position_emb = embedding_ops.embedding_lookup(
            self._position_embedding, position)
        return x + array_ops.expand_dims(position_emb, 0) 
开发者ID:zhaocq-nlp,项目名称:NJUNMT-tf,代码行数:19,代码来源:embedding.py

示例15: __init__

# 需要导入模块: from tensorflow.python.ops import embedding_ops [as 别名]
# 或者: from tensorflow.python.ops.embedding_ops import embedding_lookup [as 别名]
def __init__(self, dsl_syntax, max_program_len, embedding, start_tokens, end_token, seed=None):
        """Initializer.

        Args:
            dsl_syntax: Syntax checker for generating next possible tokens.
            max_program_len: maximum program length
            embedding: A callable that takes a vector tensor of `ids` (argmax ids),
                or the `params` argument for `embedding_lookup`. The returned tensor
                will be passed to the decoder input.
            start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
            end_token: `int32` scalar, the token that marks end of decoding.
            seed: The sampling seed.

        Raises:
            ValueError: if `start_tokens` is not a 1D tensor or `end_token` is not a
                scalar.
        """
        super(SyntacticSampleEmbeddingHelper, self).__init__(
            dsl_syntax, max_program_len, embedding, start_tokens, end_token)
        self._seed = seed 
开发者ID:shaohua0116,项目名称:demo2program,代码行数:22,代码来源:seq2seq_helper.py


注:本文中的tensorflow.python.ops.embedding_ops.embedding_lookup方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。