当前位置: 首页>>代码示例>>Python>>正文


Python embedding_ops.embedding_lookup函数代码示例

本文整理汇总了Python中tensorflow.python.ops.embedding_ops.embedding_lookup函数的典型用法代码示例。如果您正苦于以下问题:Python embedding_lookup函数的具体用法?Python embedding_lookup怎么用?Python embedding_lookup使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了embedding_lookup函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testAggregateGradients

  def testAggregateGradients(self):

    def fn(x):
      ind1 = constant_op.constant(np.array([0, 1]))
      ind2 = constant_op.constant(np.array([2, 3]))
      ind3 = constant_op.constant(np.array([1, 3]))
      # A mixture of IndexedSlices and dense tensor to aggregate.
      g1 = embedding_ops.embedding_lookup(x, ind1)
      g2 = embedding_ops.embedding_lookup(x, ind2)
      g3 = embedding_ops.embedding_lookup(x, ind3)
      g4 = math_ops.reduce_sum(x * constant_op.constant(2.0))
      return g1 * g2 * g3 * g4

    var_np = np.random.rand(4, 2).astype(np.float32)
    var = constant_op.constant(var_np)
    grad = backprop.gradients_function(fn, [0])(var)[0]
    grad = self.evaluate(ops.convert_to_tensor(grad))

    if not context.executing_eagerly():
      tf_var = array_ops.constant(var_np, dtypes.float32)
      tf_ind1 = array_ops.constant([0, 1])
      tf_ind2 = array_ops.constant([2, 3])
      tf_ind3 = array_ops.constant([1, 3])
      tf_g1 = embedding_ops.embedding_lookup(tf_var, tf_ind1)
      tf_g2 = embedding_ops.embedding_lookup(tf_var, tf_ind2)
      tf_g3 = embedding_ops.embedding_lookup(tf_var, tf_ind3)
      tf_g4 = math_ops.reduce_sum(tf_var * 2.0, axis=(0, 1))
      tf_y = tf_g1 * tf_g2 * tf_g3 * tf_g4
      tf_grad = gradients.gradients(tf_y, [tf_var])[0]

      tf_dense_grad = math_ops.unsorted_segment_sum(
          tf_grad.values, tf_grad.indices, tf_grad.dense_shape[0])

      self.assertAllClose(grad, self.evaluate(tf_dense_grad))
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:34,代码来源:backprop_test.py

示例2: testHigherRankMaxNorm

 def testHigherRankMaxNorm(self):
   np.random.seed(8)
   with self.cached_session():
     for params_shape in (12,), (6, 3), (6, 2, 3):
       # Test embedding rank 0, 1, 2.
       # Note: the first dimension must be a common multiple of procs below.
       params = 2 * np.ones(params_shape)
       params_norm = params / np.sqrt(
           np.sum(
               params * params, tuple(range(params.ndim)[1:]), keepdims=True))
       for ids_shape in (), (3), (4, 3), (2, 3, 4):
         ids = np.random.randint(
             params.shape[0], size=np.prod(ids_shape,
                                           dtype=np.int64)).reshape(ids_shape)
         # Compare nonsharded to gather
         simple = embedding_ops.embedding_lookup(
             params, ids, max_norm=1.0).eval()
         # assertAllClose is used here as different implementations of sqrt may
         # be used to compute each of the values being compared.  For example,
         # on AVX512 builds the embedding operation makes use of Eigen's fast
         # vectorized square root algorithm for doubles.  These different
         # implementations of sqrt are not guaranteed to produce exactly the
         # same results. Therefore, an exact comparison cannot be made.
         self.assertAllClose(simple, array_ops.gather(params_norm, ids).eval())
         # Run a few different sharded versions.
         for procs in 1, 2, 3:
           stride = procs * math_ops.range(params.shape[0] // procs)
           split_params = [
               array_ops.gather(params, stride + p) for p in xrange(procs)
           ]
           sharded = embedding_ops.embedding_lookup(
               split_params, ids, max_norm=1.0).eval()
           self.assertAllEqual(simple, sharded)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:33,代码来源:embedding_ops_test.py

示例3: testConstructionNonSharded

 def testConstructionNonSharded(self):
   with ops.Graph().as_default():
     p = variables.Variable(
         array_ops.zeros(
             shape=[100, 100], dtype=dtypes.float32))
     ids = constant_op.constant([0, 1, 1, 7], dtype=dtypes.int32)
     embedding_ops.embedding_lookup([p], ids)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:7,代码来源:embedding_ops_test.py

示例4: testAggregateGradients

  def testAggregateGradients(self):

    def fn(x):
      ind1 = tensor.Tensor(np.array([0, 1]))
      ind2 = tensor.Tensor(np.array([2, 3]))
      ind3 = tensor.Tensor(np.array([1, 3]))
      # A mixture of IndexedSlices and dense tensor to aggregate.
      g1 = embedding_ops.embedding_lookup(x, ind1)
      g2 = embedding_ops.embedding_lookup(x, ind2)
      g3 = embedding_ops.embedding_lookup(x, ind3)
      g4 = math_ops.reduce_sum(x * tensor.Tensor(2.0))
      return g1 * g2 * g3 * g4

    var_np = np.random.rand(4, 2).astype(np.float32)
    var = tensor.Tensor(var_np)
    grad = backprop.gradients_function(fn, [0])(var)[0]

    with context.graph_mode(), self.test_session():
      tf_var = array_ops.constant(var_np, dtypes.float32)
      tf_ind1 = array_ops.constant([0, 1])
      tf_ind2 = array_ops.constant([2, 3])
      tf_ind3 = array_ops.constant([1, 3])
      tf_g1 = embedding_ops.embedding_lookup(tf_var, tf_ind1)
      tf_g2 = embedding_ops.embedding_lookup(tf_var, tf_ind2)
      tf_g3 = embedding_ops.embedding_lookup(tf_var, tf_ind3)
      tf_g4 = math_ops.reduce_sum(tf_var * 2.0, reduction_indices=(0, 1))
      tf_y = tf_g1 * tf_g2 * tf_g3 * tf_g4
      tf_grad = gradients.gradients(tf_y, [tf_var])[0]

      tf_dense_grad = math_ops.unsorted_segment_sum(
          tf_grad.values, tf_grad.indices, tf_grad.dense_shape[0])

      self.assertAllClose(grad.numpy(), tf_dense_grad.eval())
开发者ID:chdinh,项目名称:tensorflow,代码行数:33,代码来源:backprop_test.py

示例5: testHigherRankMaxNorm

 def testHigherRankMaxNorm(self):
   np.random.seed(8)
   with self.test_session():
     for params_shape in (12,), (6, 3), (6, 2, 3):
       # Test embedding rank 0, 1, 2.
       # Note: the first dimension must be a common multiple of procs below.
       params = 2 * np.ones(params_shape)
       params_norm = params / np.sqrt(
           np.sum(
               params * params, tuple(range(params.ndim)[1:]), keepdims=True))
       for ids_shape in (), (3), (4, 3), (2, 3, 4):
         ids = np.random.randint(
             params.shape[0], size=np.prod(ids_shape,
                                           dtype=np.int64)).reshape(ids_shape)
         # Compare nonsharded to gather
         simple = embedding_ops.embedding_lookup(
             params, ids, max_norm=1.0).eval()
         self.assertAllEqual(simple, array_ops.gather(params_norm, ids).eval())
         # Run a few different sharded versions.
         for procs in 1, 2, 3:
           stride = procs * math_ops.range(params.shape[0] // procs)
           split_params = [
               array_ops.gather(params, stride + p) for p in xrange(procs)
           ]
           sharded = embedding_ops.embedding_lookup(
               split_params, ids, max_norm=1.0).eval()
           self.assertAllEqual(simple, sharded)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:27,代码来源:embedding_ops_test.py

示例6: body

    def body(it, cost):
      embedding = embedding_ops.embedding_lookup(embedding_matrix, [0])
      cost = control_flow_ops.cond(
          math_ops.equal(it, 3), lambda: math_ops.square(cost),
          (lambda: cost + math_ops.reduce_sum(embedding)))
      return it + 1, cost

      _, cost = control_flow_ops.while_loop(
          cond, body, [constant_op.constant(0),
                       constant_op.constant(0.0)])

      dynamic_grads = gradients_impl.gradients(cost, [embedding_matrix])[0]
      dynamic_grads = math_ops.segment_sum(dynamic_grads.values,
                                           dynamic_grads.indices)

      embedding = embedding_ops.embedding_lookup(embedding_matrix, [0])
      static = math_ops.square(
          math_ops.reduce_sum(embedding) + math_ops.reduce_sum(embedding) +
          math_ops.reduce_sum(embedding)) + math_ops.reduce_sum(embedding)
      static_grads = gradients_impl.gradients(static, [embedding_matrix])[0]
      static_grads = math_ops.segment_sum(static_grads.values,
                                          static_grads.indices)

      with self.cached_session():
        self.evaluate(variables.global_variables_initializer())
        self.assertAllEqual(*self.evaluate([static_grads, dynamic_grads]))
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:26,代码来源:control_flow_ops_test.py

示例7: fn

 def fn(x):
   ind1 = constant_op.constant(np.array([0, 1]))
   ind2 = constant_op.constant(np.array([2, 3]))
   ind3 = constant_op.constant(np.array([1, 3]))
   # A mixture of IndexedSlices and dense tensor to aggregate.
   g1 = embedding_ops.embedding_lookup(x, ind1)
   g2 = embedding_ops.embedding_lookup(x, ind2)
   g3 = embedding_ops.embedding_lookup(x, ind3)
   g4 = math_ops.reduce_sum(x * constant_op.constant(2.0))
   return g1 * g2 * g3 * g4
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:10,代码来源:backprop_test.py

示例8: testConstructionSharded

 def testConstructionSharded(self):
   with ops.Graph().as_default():
     p = []
     for _ in range(2):
       p += [
           variables.Variable(
               array_ops.zeros(shape=[100, 100], dtype=dtypes.float32))
       ]
       ids = constant_op.constant([0, 1, 1, 17], dtype=dtypes.int32)
     embedding_ops.embedding_lookup(p, ids)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:10,代码来源:embedding_ops_test.py

示例9: create_decoder

  def create_decoder(self):
    start_time = time.time()

    with vs.variable_scope("embedding" or scope):
      tokens = self.tokens[:-1]
      embeddings = []
      with tf.device("/cpu:0"):
        sqrt3 = np.sqrt(3)
        embedding = vs.get_variable(
            "embedding", [self.vocab_size, self.embedding_size],
            initializer=tf.random_uniform_initializer(-sqrt3, sqrt3))

        for token in tokens:
          # Create the embedding layer.
          emb = embedding_ops.embedding_lookup(embedding, token)
          emb.set_shape([self.batch_size, self.embedding_size])
          embeddings.append(emb)

    cell = rnn_cell.GRUCell(self.decoder_cell_size)
    cell = rnn_cell.OutputProjectionWrapper(cell, self.vocab_size)
    self.decoder_states = rnn.rnn(
        cell, embeddings, dtype=tf.float32, sequence_length=self.tokens_len)[0]
    self.logits = self.decoder_states

    print('create_decoder graph time %f' % (time.time() - start_time))
开发者ID:suriyadeepan,项目名称:tensorflow,代码行数:25,代码来源:lm.py

示例10: testAdamSparse

  def testAdamSparse(self):
    with ops.device('/cpu:0'):
      # Create 2-D embedding for 3 objects on CPU because sparse/sliced updates
      # are not implemented on TPU.
      embedding_matrix = resource_variable_ops.ResourceVariable(
          array_ops.ones([3, 2]))

    with self.test_scope():
      with backprop.GradientTape() as tape:
        embedding = embedding_ops.embedding_lookup(embedding_matrix, [1])
        y = math_ops.reduce_sum(embedding)
      dy_dx = tape.gradient(y, embedding_matrix)
      self.assertIsInstance(dy_dx, ops.IndexedSlices)
      optimizer = adam.AdamOptimizer(0.1)
      # The gradient application operations will run on CPU because optimizer
      # updates are always collocated with the variable.
      optimizer.apply_gradients([(dy_dx, embedding_matrix)])

      # This assign_add will run on CPU because when an input to an
      # operation is a resource, this operation is placed on the resource's
      # device by the eager runtime.
      embedding_matrix.assign_add(array_ops.ones([3, 2]))

    self.assertAllClose([[2.0, 2.0],
                         [1.9, 1.9],
                         [2.0, 2.0]], embedding_matrix.numpy())
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:26,代码来源:eager_test.py

示例11: calculate_loss_from_wals_model

 def calculate_loss_from_wals_model(self, wals_model, sp_inputs):
   current_rows = embedding_ops.embedding_lookup(
       wals_model.row_factors, math_ops.range(wals_model._input_rows),
       partition_strategy="div")
   current_cols = embedding_ops.embedding_lookup(
       wals_model.col_factors, math_ops.range(wals_model._input_cols),
       partition_strategy="div")
   row_wts = embedding_ops.embedding_lookup(
       wals_model._row_weights, math_ops.range(wals_model._input_rows),
       partition_strategy="div")
   col_wts = embedding_ops.embedding_lookup(
       wals_model._col_weights, math_ops.range(wals_model._input_cols),
       partition_strategy="div")
   return factorization_ops_test_utils.calculate_loss(
       sp_inputs, current_rows, current_cols, wals_model._regularization,
       wals_model._unobserved_weight, row_wts, col_wts)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:16,代码来源:factorization_ops_test.py

示例12: loop_function

  def loop_function(prev, i, log_beam_probs, beam_path, beam_symbols):
    if output_projection is not None:
      prev = nn_ops.xw_plus_b(
          prev, output_projection[0], output_projection[1])
    # prev= prev.get_shape().with_rank(2)[1]

    probs  = tf.log(tf.nn.softmax(prev))

    if i > 1:

        probs = tf.reshape(probs + log_beam_probs[-1],
                               [-1, beam_size * num_symbols])

    best_probs, indices = tf.nn.top_k(probs, beam_size)
    indices = tf.stop_gradient(tf.squeeze(tf.reshape(indices, [-1, 1])))
    best_probs = tf.stop_gradient(tf.reshape(best_probs, [-1, 1]))

    symbols = indices % num_symbols # Which word in vocabulary.
    beam_parent = indices // num_symbols # Which hypothesis it came from.


    beam_symbols.append(symbols)
    beam_path.append(beam_parent)
    log_beam_probs.append(best_probs)

    # Note that gradients will not propagate through the second parameter of
    # embedding_lookup.

    emb_prev = embedding_ops.embedding_lookup(embedding, symbols)
    emb_prev  = tf.reshape(emb_prev,[beam_size,embedding_size])
    # emb_prev = embedding_ops.embedding_lookup(embedding, symbols)
    if not update_embedding:
      emb_prev = array_ops.stop_gradient(emb_prev)
    return emb_prev
开发者ID:Vunb,项目名称:Neural_Conversation_Models,代码行数:34,代码来源:my_seq2seq.py

示例13: attention_decoder_with_embedding

def attention_decoder_with_embedding(decoder_inputs, initial_state, attention_states,
                                     cell, embedding, num_heads=1,
                                     output_size=None, dtype=dtypes.float32, scope=None,
                                     initial_state_attention=False):
    """
    We are not using output_projection because we are NOT using a sampled softmax

    Parameters
    ----------
    decoder_inputs
    initial_state
    attention_states
    cell
    embedding: outside embedding passed in
    num_heads
    output_size
    dtype
    scope
    initial_state_attention

    Returns
    -------

    """
    if output_size is None:
        output_size = cell.output_size

    with vs.variable_scope(scope or "attention_decoder_with_embedding"):
        emb_inp = [
            embedding_ops.embedding_lookup(embedding, i) for i in decoder_inputs]
        return attention_decoder(
            emb_inp, initial_state, attention_states, cell, output_size=output_size,
            num_heads=num_heads, loop_function=None,
            initial_state_attention=initial_state_attention)
开发者ID:windweller,项目名称:Trident,代码行数:34,代码来源:seq2seq.py

示例14: _tf_dec_embedding_attention_decoder

    def _tf_dec_embedding_attention_decoder(self, enc_out, decoder_input, last_state,
                                    cell, num_symbols, embedding_size, num_heads=1,
                                    output_size=None, output_projection=None,
                                    dtype=dtypes.float32,
                                    scope=None, src_mask=None, maxout_layer=False, encoder="reverse",
                                    start=None, init_const=False, bow_mask=None):
        """Decode single step version of tensorflow.models.rnn.seq2seq.embedding_attention_decoder
            """
        if output_size is None:
          output_size = cell.output_size
        if output_projection is not None:
          proj_weights = ops.convert_to_tensor(output_projection[0], dtype=dtype)
          proj_weights.get_shape().assert_is_compatible_with([cell.output_size,
                                                                num_symbols])   
          proj_biases = ops.convert_to_tensor(output_projection[1], dtype=dtype)
          proj_biases.get_shape().assert_is_compatible_with([num_symbols])

        with variable_scope.variable_scope(scope or "embedding_attention_decoder"):
          with ops.device("/cpu:0"):
            embedding = variable_scope.get_variable("embedding",
                                                    [num_symbols, embedding_size])
          emb_inp = embedding_ops.embedding_lookup(embedding, decoder_input)
          return self._tf_dec_attention_decoder(
              enc_out, emb_inp, last_state, cell, output_size=output_size,
              num_heads=num_heads, src_mask=src_mask, maxout_layer=maxout_layer, embedding_size=embedding_size,
              encoder=encoder, start=start, init_const=init_const, bow_mask=bow_mask)
开发者ID:ehasler,项目名称:tensorflow,代码行数:26,代码来源:tf_seq2seq.py

示例15: __call__

  def __call__(self, inputs, state, scope=None):
    """Run the cell on embedded inputs."""
    with vs.variable_scope(scope or type(self).__name__):  # "EmbeddingWrapper2"
      with ops.device("/cpu:0"):
        if self._initializer:
          initializer = self._initializer
        elif vs.get_variable_scope().initializer:
          initializer = vs.get_variable_scope().initializer
        else:
          # Default initializer for embeddings should have variance=1.
          sqrt3 = math.sqrt(3)  # Uniform(-sqrt(3), sqrt(3)) has variance=1.
          initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)
        embeddings = []
        for i in xrange(len(self._embedding_classes)):
            embeddings.append(vs.get_variable("embedding"+str(i), [self._embedding_classes[i],
                                                  self._embedding_sizes[i]],
                                    initializer=initializer))
        embedded = []
        for i in xrange(len(self._embedding_classes)):
            embedded.append(embedding_ops.embedding_lookup(
                  embeddings[i], array_ops.reshape(inputs[i], [-1])))

        finalEmbedded = tf.concat(1, embedded)

    return self._cell(finalEmbedded, state)
开发者ID:KentonMurray,项目名称:DeepDomers,代码行数:25,代码来源:MultipleInputEmbeddingWrapper.py


注:本文中的tensorflow.python.ops.embedding_ops.embedding_lookup函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。