当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.segment_sum函数代码示例

本文整理汇总了Python中tensorflow.segment_sum函数的典型用法代码示例。如果您正苦于以下问题:Python segment_sum函数的具体用法?Python segment_sum怎么用?Python segment_sum使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了segment_sum函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testGradientMatchesSegmentSum

 def testGradientMatchesSegmentSum(self):
   # Strategy: compute the gradient for UnsortedSegmentSum and SegmentSum
   # and compare the outputs, which should be identical.
   # NB: for this test to work, indices must be valid for SegmentSum, namely
   # it must be sorted, the indices must be contiguous, and num_segments
   # must be max(indices) + 1.
   indices = [0, 0, 1, 1, 1, 2, 3, 4, 5]
   n = len(indices)
   num_cols = 2
   shape = [n, num_cols]
   num_segments = max(indices) + 1
   with self.test_session():
     tf_x, np_x = self._input(shape, dtype=tf.float64)
     # Results from UnsortedSegmentSum
     unsorted_s = tf.unsorted_segment_sum(data=tf_x,
                                                segment_ids=indices,
                                                num_segments=num_segments)
     unsorted_jacob_t, unsorted_jacob_n = gradient_checker.ComputeGradient(
         tf_x, shape, unsorted_s, [num_segments, num_cols],
         x_init_value=np_x.astype(np.double),
         delta=1)
     # Results from SegmentSum
     sorted_s = tf.segment_sum(data=tf_x, segment_ids=indices)
     sorted_jacob_t, sorted_jacob_n = gradient_checker.ComputeGradient(
         tf_x, shape, sorted_s, [num_segments, num_cols],
         x_init_value=np_x.astype(np.double),
         delta=1)
   self.assertAllClose(unsorted_jacob_t, sorted_jacob_t, rtol=1e-3, atol=1e-3)
   self.assertAllClose(unsorted_jacob_n, sorted_jacob_n, rtol=1e-3, atol=1e-3)
开发者ID:adeelzaman,项目名称:tensorflow,代码行数:29,代码来源:segment_reduction_ops_test.py

示例2: create_tensor

  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """
    parent layers: atom_features, atom_split
    """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()
    outputs = in_layers[0].out_tensor
    atom_split = in_layers[1].out_tensor

    if self.gaussian_expand:
      outputs = self.gaussian_histogram(outputs)

    output_molecules = tf.segment_sum(outputs, atom_split)

    if self.gaussian_expand:
      output_molecules = tf.matmul(output_molecules, self.W) + self.b
      output_molecules = self.activation(output_molecules)

    out_tensor = output_molecules
    if set_tensors:
      self.variables = self.trainable_weights
      self.out_tensor = out_tensor
    return out_tensor
开发者ID:AhlamMD,项目名称:deepchem,代码行数:26,代码来源:graph_layers.py

示例3: create_tensor

  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """
    parent layers: atom_features, distance, distance_membership_i, distance_membership_j
    """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()
    atom_features = in_layers[0].out_tensor
    distance = in_layers[1].out_tensor
    distance_membership_i = in_layers[2].out_tensor
    distance_membership_j = in_layers[3].out_tensor
    distance_hidden = tf.matmul(distance, self.W_df) + self.b_df
    atom_features_hidden = tf.matmul(atom_features, self.W_cf) + self.b_cf
    outputs = tf.multiply(
        distance_hidden, tf.gather(atom_features_hidden, distance_membership_j))

    # for atom i in a molecule m, this step multiplies together distance info of atom pair(i,j)
    # and embeddings of atom j(both gone through a hidden layer)
    outputs = tf.matmul(outputs, self.W_fc)
    outputs = self.activation(outputs)

    output_ii = tf.multiply(self.b_df, atom_features_hidden)
    output_ii = tf.matmul(output_ii, self.W_fc)
    output_ii = self.activation(output_ii)

    # for atom i, sum the influence from all other atom j in the molecule
    outputs = tf.segment_sum(outputs,
                             distance_membership_i) - output_ii + atom_features
    out_tensor = outputs
    if set_tensors:
      self.trainable_variables = self.trainable_weights
      self.out_tensor = out_tensor
    return out_tensor
开发者ID:ktaneishi,项目名称:deepchem,代码行数:35,代码来源:graph_layers.py

示例4: model

    def model(self, dataset, labels, isEval=None):

        with tf.variable_scope('softmax_linear', reuse=isEval):
            embeddings = tf.get_variable("embeddings", [self.vocabulary_size, self.embedding_size],
                initializer=tf.random_uniform_initializer(minval=-1.0, maxval=1.0, seed=self.SEED))
            segments = tf.constant([x // self.context_window for x in
                range(self.cbowbatch_size)])
            weights = tf.get_variable("weights", [self.vocabulary_size, self.embedding_size],
                initializer=tf.truncated_normal_initializer(0.0, 1.0 / math.sqrt(float(self.embedding_size)),
                          seed=self.SEED))
            biases = tf.get_variable("biases", [self.vocabulary_size],
                initializer=tf.constant_initializer(0.0))

            # Look up embeddings for inputs.
            embed = tf.nn.embedding_lookup(embeddings, dataset)
            compressed_embeddings = tf.segment_sum(embed, segments) # merging couple of embeded words into one input
            # Compute the softmax loss, using a sample of the negative labels each time.
            loss = tf.reduce_mean(
                tf.nn.sampled_softmax_loss(weights, biases,
                                           compressed_embeddings,
                               labels, self.num_sampled, self.vocabulary_size))

            similarity, normalized_embeddings, embeddings = self.similarity(embeddings, dataset)

            if isEval == None:
                return loss
            if isEval == True:
                return similarity, normalized_embeddings, embeddings
开发者ID:andreslechuga,项目名称:DeepLearning,代码行数:28,代码来源:Assignment5.py

示例5: testSegmentIdsInvalid2

 def testSegmentIdsInvalid2(self):
   shape = [4, 4]
   with self.test_session():
     tf_x, _ = self._input(shape)
     indices = [1, 1, 2, 2]
     s = tf.segment_sum(data=tf_x, segment_ids=indices)
     with self.assertRaisesOpError("segment ids do not start at 0"):
       s.eval()
开发者ID:0-T-0,项目名称:tensorflow,代码行数:8,代码来源:segment_reduction_ops_test.py

示例6: testSegmentIdsInvalid4

 def testSegmentIdsInvalid4(self):
   shape = [4, 4]
   with self.test_session():
     tf_x, _ = self._input(shape)
     indices = [0, 1, 0, 1]
     s = tf.segment_sum(data=tf_x, segment_ids=indices)
     with self.assertRaisesOpError("segment ids are not increasing by 1"):
       s.eval()
开发者ID:0-T-0,项目名称:tensorflow,代码行数:8,代码来源:segment_reduction_ops_test.py

示例7: testSegmentIdsInvalid7

 def testSegmentIdsInvalid7(self):
   shape = [4, 4]
   with self.test_session():
     tf_x, _ = self._input(shape)
     indices = [0, 0, 0, -2]
     s = tf.segment_sum(data=tf_x, segment_ids=indices)
     with self.assertRaisesOpError("segment ids must be >= 0"):
       s.eval()
开发者ID:0-T-0,项目名称:tensorflow,代码行数:8,代码来源:segment_reduction_ops_test.py

示例8: testSegmentIdsSize

 def testSegmentIdsSize(self):
   shape = [4, 4]
   with self.test_session():
     tf_x, _ = self._input(shape)
     indices = [0, 1]
     s = tf.segment_sum(data=tf_x, segment_ids=indices)
     with self.assertRaisesOpError("segment_ids should be the same size"):
       s.eval()
开发者ID:adeelzaman,项目名称:tensorflow,代码行数:8,代码来源:segment_reduction_ops_test.py

示例9: testSegmentIdsValid

 def testSegmentIdsValid(self):
   # This is a baseline for the following SegmentIdsInvalid* tests.
   shape = [4, 4]
   with self.test_session():
     tf_x, _ = self._input(shape)
     indices = [0, 0, 0, 1]
     result = tf.segment_sum(data=tf_x, segment_ids=indices).eval()
     self.assertAllEqual([[15, 18, 21, 24], [13, 14, 15, 16]], result)
开发者ID:0-T-0,项目名称:tensorflow,代码行数:8,代码来源:segment_reduction_ops_test.py

示例10: remap_keys

  def remap_keys(sparse_tensor):
    # Current indices of our SparseTensor that we need to fix
    bad_indices = sparse_tensor.indices
    # Current values of our SparseTensor that we need to fix
    bad_values = sparse_tensor.values 
  
    # Group by the batch_indices and get the count for each  
    size = tf.segment_sum(data = tf.ones_like(bad_indices[:,0], dtype = tf.int64), segment_ids = bad_indices[:,0]) - 1
    # The number of batch_indices (this should be batch_size unless it is a partially full batch)
    length = tf.shape(size, out_type = tf.int64)[0]
    # Finds the cumulative sum which we can use for indexing later
    cum = tf.cumsum(size)
    # The offsets between each example in the batch due to our concatentation of the keys in the decode_example method
    length_range = tf.range(start = 0, limit = length, delta = 1, dtype = tf.int64)
    # Indices of the SparseTensor's indices member of the rows we added by the concatentation of our keys in the decode_example method
    cum_range = cum + length_range

    # The keys that we have extracted back out of our concatentated SparseTensor
    gathered_indices = tf.squeeze(tf.gather(bad_indices, cum_range)[:,1])

    # The enumerated row indices of the SparseTensor's indices member
    sparse_indices_range = tf.range(tf.shape(bad_indices, out_type = tf.int64)[0], dtype = tf.int64)

    # We want to find here the row indices of the SparseTensor's indices member that are of our actual data and not the concatentated rows
    # So we want to find the intersection of the two sets and then take the opposite of that
    x = sparse_indices_range
    s = cum_range

    # Number of multiples we are going to tile x, which is our sparse_indices_range
    tile_multiples = tf.concat([tf.ones(tf.shape(tf.shape(x)), dtype=tf.int64), tf.shape(s, out_type = tf.int64)], axis = 0)
    # Expands x, our sparse_indices_range, into a rank 2 tensor and then multiplies the rows by 1 (no copying) and the columns by the number of examples in the batch
    x_tile = tf.tile(tf.expand_dims(x, -1), tile_multiples)
    # Essentially a vectorized logical or, that we then negate
    x_not_in_s = ~tf.reduce_any(tf.equal(x_tile, s), -1)

    # The SparseTensor's indices that are our actual data by using the boolean_mask we just made above applied to the entire indices member of our SparseTensor
    selected_indices = tf.boolean_mask(tensor = bad_indices, mask = x_not_in_s, axis = 0)
    # Apply the same boolean_mask to the entire values member of our SparseTensor to get the actual values data
    selected_values = tf.boolean_mask(tensor = bad_values, mask = x_not_in_s, axis = 0)

    # Need to replace the first column of our selected_indices with keys, so we first need to tile our gathered_indices
    tiling = tf.tile(input = tf.expand_dims(gathered_indices[0], -1), multiples = tf.expand_dims(size[0] , -1))
    
    # We have to repeatedly apply the tiling to each example in the batch
    # Since it is jagged we cannot use tf.map_fn due to the stacking of the TensorArray, so we have to create our own custom version
    def loop_body(i, tensor_grow):
      return i + 1, tf.concat(values = [tensor_grow, tf.tile(input = tf.expand_dims(gathered_indices[i], -1), multiples = tf.expand_dims(size[i] , -1))], axis = 0)

    _, result = tf.while_loop(lambda i, tensor_grow: i < length, loop_body, [tf.constant(1, dtype = tf.int64), tiling])
    
    # Concatenate tiled keys with the 2nd column of selected_indices
    selected_indices_fixed = tf.concat([tf.expand_dims(result, -1), tf.expand_dims(selected_indices[:, 1], -1)], axis = 1)
    
    # Combine everything together back into a SparseTensor
    remapped_sparse_tensor = tf.SparseTensor(indices = selected_indices_fixed, values = selected_values, dense_shape = sparse_tensor.dense_shape)
    return remapped_sparse_tensor
开发者ID:TarunBattula,项目名称:training-data-analyst,代码行数:56,代码来源:model.py

示例11: testSegmentIdsInvalid5

 def testSegmentIdsInvalid5(self):
   shape = [4, 4]
   with self.test_session():
     tf_x, _ = self._input(shape)
     indices = [0, 1, 2, 0]
     s = tf.segment_sum(data=tf_x, segment_ids=indices)
     with self.assertRaisesOpError(
         r"Segment id 1 out of range \[0, 1\), probably "
         "because 'segment_ids' input is not sorted."):
       s.eval()
开发者ID:0-T-0,项目名称:tensorflow,代码行数:10,代码来源:segment_reduction_ops_test.py

示例12: range

  
  # Variables.
  embeddings = tf.Variable(
    tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
  softmax_weights = tf.Variable(
    tf.truncated_normal([vocabulary_size, embedding_size],
                         stddev=1.0 / math.sqrt(embedding_size)))
  softmax_biases = tf.Variable(tf.zeros([vocabulary_size]))
  
  # Model.
  # Look up embeddings for inputs.
  embed = tf.nn.embedding_lookup(embeddings, train_dataset)
  # sum every `context_window` word embedding into one
  segment_ids = tf.constant([i // context_window for i in range(batch_size)])
  
  embed = tf.segment_sum(embed, segment_ids)
  
  # Compute the softmax loss, using a sample of the negative labels each time.
  loss = tf.reduce_mean(
    tf.nn.sampled_softmax_loss(softmax_weights, softmax_biases, embed,
                               train_labels, num_sampled, vocabulary_size))

  # Optimizer.
  optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)
  
  # Compute the similarity between minibatch examples and all embeddings.
  # We use the cosine distance:
  norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
  normalized_embeddings = embeddings / norm
  valid_embeddings = tf.nn.embedding_lookup(
    normalized_embeddings, valid_dataset)
开发者ID:vincentqb,项目名称:udacity-deeplearning,代码行数:30,代码来源:5_word2vec_maxc01.py

示例13: test_SegmentSum

 def test_SegmentSum(self):
     t = tf.segment_sum(self.random(4, 2, 3), np.array([0, 1, 1, 2]))
     self.check(t)
开发者ID:kestrelm,项目名称:tfdeploy,代码行数:3,代码来源:ops.py

示例14: range

  
  # Model.
  # Look up embeddings for inputs.
  embed = tf.nn.embedding_lookup(embeddings, train_dataset)

  # seq_ids only needs to be generated once so do this as a numpy array rather than a tensor.
  seq_ids = np.zeros(batch_size, dtype=np.int32)
  cur_id = -1
  for i in range(batch_size):
    if i % context_window == 0:
      cur_id = cur_id + 1
    seq_ids[i] = cur_id
  print (seq_ids)
  
  # use segment_sum to add together the related words and reduce the output to be num_labels in size.
  final_embed = tf.segment_sum(embed, seq_ids)
  
  # Compute the softmax loss, using a sample of the negative labels each time.
  loss = tf.reduce_mean(
    tf.nn.sampled_softmax_loss(softmax_weights, softmax_biases, final_embed,
                               train_labels, num_sampled, vocabulary_size))

  # Optimizer.
  optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)
  
  # Compute the similarity between minibatch examples and all embeddings.
  # We use the cosine distance:
  norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
  normalized_embeddings = embeddings / norm
  valid_embeddings = tf.nn.embedding_lookup(
    normalized_embeddings, valid_dataset)
开发者ID:qpham01,项目名称:GitHub,代码行数:30,代码来源:word2vect_cbow.py

示例15: testSegmentIdsShape

 def testSegmentIdsShape(self):
   shape = [4, 4]
   tf_x, _ = self._input(shape)
   indices = tf.constant([0, 1, 2, 2], shape=[2, 2])
   with self.assertRaises(ValueError):
     tf.segment_sum(data=tf_x, segment_ids=indices)
开发者ID:adeelzaman,项目名称:tensorflow,代码行数:6,代码来源:segment_reduction_ops_test.py


注:本文中的tensorflow.segment_sum函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。