当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.dynamic_partition方法代码示例

本文整理汇总了Python中tensorflow.dynamic_partition方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.dynamic_partition方法的具体用法?Python tensorflow.dynamic_partition怎么用?Python tensorflow.dynamic_partition使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.dynamic_partition方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: scatter_update

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import dynamic_partition [as 别名]
def scatter_update(cls, factor, indices, values, sharding_func):
    """Helper function for doing sharded scatter update."""
    assert isinstance(factor, list)
    if len(factor) == 1:
      with ops.colocate_with(factor[0]):
        # TODO(agarwal): assign instead of scatter update for full batch update.
        return tf.scatter_update(factor[0], indices, values).op
    else:
      num_shards = len(factor)
      assignments, new_ids = sharding_func(indices)
      assert assignments is not None
      assignments = tf.cast(assignments, tf.int32)
      sharded_ids = tf.dynamic_partition(new_ids, assignments, num_shards)
      sharded_values = tf.dynamic_partition(values, assignments, num_shards)
      updates = []
      for i in xrange(num_shards):
        updates.append(tf.scatter_update(factor[i],
                                         sharded_ids[i],
                                         sharded_values[i]))
      return tf.group(*updates) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:22,代码来源:factorization_ops.py

示例2: call

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import dynamic_partition [as 别名]
def call(self, inputs):
    """Perform M steps of set2set gather,

    Detailed descriptions in: https://arxiv.org/abs/1511.06391
    """
    atom_features, atom_split = inputs
    c = tf.zeros((self.batch_size, self.n_hidden))
    h = tf.zeros((self.batch_size, self.n_hidden))

    for i in range(self.M):
      q_expanded = tf.gather(h, atom_split)
      e = tf.reduce_sum(atom_features * q_expanded, 1)
      e_mols = tf.dynamic_partition(e, atom_split, self.batch_size)
      # Add another value(~-Inf) to prevent error in softmax
      e_mols = [
          tf.concat([e_mol, tf.constant([-1000.])], 0) for e_mol in e_mols
      ]
      a = tf.concat([tf.nn.softmax(e_mol)[:-1] for e_mol in e_mols], 0)
      r = tf.math.segment_sum(
          tf.reshape(a, [-1, 1]) * atom_features, atom_split)
      # Model using this layer must set pad_batches=True
      q_star = tf.concat([h, r], axis=1)
      h, c = self.LSTMStep(q_star, c)
    return q_star 
开发者ID:deepchem,项目名称:deepchem,代码行数:26,代码来源:layers.py

示例3: _last_relevant

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import dynamic_partition [as 别名]
def _last_relevant(outputs, sequence_length):
        """Deprecated"""
        batch_size = tf.shape(outputs)[0]
        max_length = outputs.get_shape()[1]
        output_size = outputs.get_shape()[2]
        index = tf.range(0, batch_size) * max_length + (sequence_length - 1)
        flat = tf.reshape(outputs, [-1, output_size])
        last_timesteps = tf.gather(flat, index)  # very slow
        # mask = tf.sign(index)
        # last_timesteps = tf.boolean_mask(flat, mask)
        # # Creating a vector of 0s and 1s that will specify what timesteps to choose.
        # partitions = tf.reduce_sum(tf.one_hot(index, tf.shape(flat)[0], dtype='int32'), 0)
        # # Selecting the elements we want to choose.
        # _, last_timesteps = tf.dynamic_partition(flat, partitions, 2)  # (batch_size, n_dim)
        # https://stackoverflow.com/questions/35892412/tensorflow-dense-gradient-explanation
        return last_timesteps 
开发者ID:Lapis-Hong,项目名称:atec-nlp,代码行数:18,代码来源:encoder.py

示例4: select_present

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import dynamic_partition [as 别名]
def select_present(x, presence, batch_size=1, name='select_present'):
    with tf.variable_scope(name):
        presence = 1 - tf.to_int32(presence)  # invert mask

        bs = x.get_shape()[0]
        if bs != None:  # here type(bs) is tf.Dimension and == is ok
            batch_size = int(bs)

        num_partitions = 2 * batch_size
        r = tf.range(0, num_partitions,  2)
        r.set_shape(tf.TensorShape(batch_size))
        r = broadcast_against(r, presence)

        presence += r

        selected = tf.dynamic_partition(x, presence, num_partitions)
        selected = tf.concat(axis=0, values=selected)
        selected = tf.reshape(selected, tf.shape(x))

    return selected 
开发者ID:akosiorek,项目名称:hart,代码行数:22,代码来源:tensor_ops.py

示例5: filter_valids

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import dynamic_partition [as 别名]
def filter_valids(t, valid, name='filter_valids'):
    """Filter out tensor using valid array.

    Args:
        t (tf.Tensor): The tensor to filter.
        valid (list[float]): Array of length of the valid values (either
            0 or 1).
        name (string): Name of the operation.

    Returns:
        tf.Tensor: Filtered Tensor.
    """
    # Must round before cast to prevent floating-error
    return tf.dynamic_partition(t,
                                tf.cast(tf.round(valid), tf.int32),
                                2,
                                name=name)[1] 
开发者ID:rlworkgroup,项目名称:garage,代码行数:19,代码来源:tensor_utils.py

示例6: testSimpleOneDimensional

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import dynamic_partition [as 别名]
def testSimpleOneDimensional(self):
    with self.test_session() as sess:
      data = tf.constant([0, 13, 2, 39, 4, 17])
      indices = tf.constant([0, 0, 2, 3, 2, 1])
      partitions = tf.dynamic_partition(data, indices, num_partitions=4)
      partition_vals = sess.run(partitions)

    self.assertAllEqual([0, 13], partition_vals[0])
    self.assertAllEqual([17], partition_vals[1])
    self.assertAllEqual([2, 4], partition_vals[2])
    self.assertAllEqual([39], partition_vals[3])
    # Vector data input to DynamicPartition results in
    # `num_partitions` vectors of unknown length.
    self.assertEqual([None], partitions[0].get_shape().as_list())
    self.assertEqual([None], partitions[1].get_shape().as_list())
    self.assertEqual([None], partitions[2].get_shape().as_list())
    self.assertEqual([None], partitions[3].get_shape().as_list()) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:19,代码来源:dynamic_partition_op_test.py

示例7: testSimpleTwoDimensional

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import dynamic_partition [as 别名]
def testSimpleTwoDimensional(self):
    with self.test_session() as sess:
      data = tf.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8],
                                   [9, 10, 11], [12, 13, 14], [15, 16, 17]])
      indices = tf.constant([0, 0, 2, 3, 2, 1])
      partitions = tf.dynamic_partition(data, indices, num_partitions=4)
      partition_vals = sess.run(partitions)

    self.assertAllEqual([[0, 1, 2], [3, 4, 5]], partition_vals[0])
    self.assertAllEqual([[15, 16, 17]], partition_vals[1])
    self.assertAllEqual([[6, 7, 8], [12, 13, 14]], partition_vals[2])
    self.assertAllEqual([[9, 10, 11]], partition_vals[3])
    # Vector data input to DynamicPartition results in
    # `num_partitions` matrices with an unknown number of rows, and 3 columns.
    self.assertEqual([None, 3], partitions[0].get_shape().as_list())
    self.assertEqual([None, 3], partitions[1].get_shape().as_list())
    self.assertEqual([None, 3], partitions[2].get_shape().as_list())
    self.assertEqual([None, 3], partitions[3].get_shape().as_list()) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:20,代码来源:dynamic_partition_op_test.py

示例8: testHigherRank

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import dynamic_partition [as 别名]
def testHigherRank(self):
    np.random.seed(7)
    with self.test_session() as sess:
      for n in 2, 3:
        for shape in (4,), (4, 5), (4, 5, 2):
          partitions = np.random.randint(n, size=np.prod(shape)).reshape(shape)
          for extra_shape in (), (6,), (6, 7):
            data = np.random.randn(*(shape + extra_shape))
            partitions_t = tf.constant(partitions, dtype=tf.int32)
            data_t = tf.constant(data)
            outputs = tf.dynamic_partition(
                data_t, partitions_t, num_partitions=n)
            self.assertEqual(n, len(outputs))
            outputs_val = sess.run(outputs)
            for i, output in enumerate(outputs_val):
              self.assertAllEqual(output, data[partitions == i])

            # Test gradients
            outputs_grad = [7 * output for output in outputs_val]
            grads = tf.gradients(outputs, [data_t, partitions_t], outputs_grad)
            self.assertEqual(grads[1], None)  # Partitions has no gradients
            self.assertAllEqual(7 * data, sess.run(grads[0])) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:24,代码来源:dynamic_partition_op_test.py

示例9: split_apply_merge

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import dynamic_partition [as 别名]
def split_apply_merge(inp, partitions, fns):
  """Split input according to partitions.  Pass results through fns and merge.

  Args:
    inp: the input vector
    partitions: tensor of same length as input vector, having values 0, 1
    fns: the two functions.

  Returns:
    the vector routed, where routed[i] = fns[partitions[i]](inp[i])
  """
  new_inputs = tf.dynamic_partition(inp, partitions, len(fns))
  new_outputs = [fns[i](x) for i, x in enumerate(new_inputs)]
  new_indices = tf.dynamic_partition(
      tf.range(0, inp.get_shape()[0]), partitions, len(fns))
  return tf.dynamic_stitch(new_indices, new_outputs) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:18,代码来源:reinforce_simple_example.py

示例10: _arrange_back_fn

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import dynamic_partition [as 别名]
def _arrange_back_fn(list_tensor_1d_mask_1d):
        """Arranges back tensor_1d to restore original order
            modified by `_rearrange_fn` according to mask_1d:
            - number of 0s in mask_1d values on the left are set to
              their corresponding places where mask_1d=0,
            - number of 1s in mask_1d values on the right are set to
              their corresponding places where mask_1d=1"""
        tensor_1d, mask_1d = list_tensor_1d_mask_1d

        mask_indices = tf.dynamic_partition(tf.range(tf.shape(tensor_1d)[0]),
                                            mask_1d, 2)

        mask_sum = tf.reduce_sum(mask_1d, axis=0)
        partitioned_tensor = [tf.zeros_like(tensor_1d[:-mask_sum]),
                              tensor_1d[-mask_sum:]]

        return tf.dynamic_stitch(mask_indices, partitioned_tensor) 
开发者ID:RasaHQ,项目名称:rasa_core,代码行数:19,代码来源:tf_utils.py

示例11: Dispatch

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import dynamic_partition [as 别名]
def Dispatch(self, d_tensors):
    """Reshuffles input `Tensor`s to produce output `Tensor`s.

    The dimensions of all input and output `Tensor`s match, except for
    dimension 0.  In dimension 0, the input `Tensor`s match the corresponding
    `gates` `Tensor`s which were passed to the constructor.

    Args:
      d_tensors: a list of `Tensor`s, one per datashard.

    Returns:
      a list of `Tensor`s, one per expert.

    """
    parts = self._data_parallelism(tf.dynamic_partition, d_tensors, self._gates,
                                   self._model_parallelism.n)
    parts_by_expert = TransposeListOfLists(parts)
    x_tensors = self._model_parallelism(tf.concat, parts_by_expert, 0)
    return x_tensors 
开发者ID:ZhenYangIACAS,项目名称:NMT_GAN,代码行数:21,代码来源:expert_utils.py

示例12: create_inverse_model

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import dynamic_partition [as 别名]
def create_inverse_model(self, encoded_state, encoded_next_state):
        """
        Creates inverse model TensorFlow ops for Curiosity module.
        Predicts action taken given current and future encoded states.
        :param encoded_state: Tensor corresponding to encoded current state.
        :param encoded_next_state: Tensor corresponding to encoded next state.
        """
        combined_input = tf.concat([encoded_state, encoded_next_state], axis=1)
        hidden = tf.layers.dense(combined_input, 256, activation=self.swish)
        if self.brain.vector_action_space_type == "continuous":
            pred_action = tf.layers.dense(hidden, self.a_size, activation=None)
            squared_difference = tf.reduce_sum(tf.squared_difference(pred_action, self.selected_actions), axis=1)
            self.inverse_loss = tf.reduce_mean(tf.dynamic_partition(squared_difference, self.mask, 2)[1])
        else:
            pred_action = tf.layers.dense(hidden, self.a_size, activation=tf.nn.softmax)
            cross_entropy = tf.reduce_sum(-tf.log(pred_action + 1e-10) * self.selected_actions, axis=1)
            self.inverse_loss = tf.reduce_mean(tf.dynamic_partition(cross_entropy, self.mask, 2)[1]) 
开发者ID:xkiwilabs,项目名称:DQN-using-PyTorch-and-ML-Agents,代码行数:19,代码来源:models.py

示例13: create_forward_model

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import dynamic_partition [as 别名]
def create_forward_model(self, encoded_state, encoded_next_state):
        """
        Creates forward model TensorFlow ops for Curiosity module.
        Predicts encoded future state based on encoded current state and given action.
        :param encoded_state: Tensor corresponding to encoded current state.
        :param encoded_next_state: Tensor corresponding to encoded next state.
        """
        combined_input = tf.concat([encoded_state, self.selected_actions], axis=1)
        hidden = tf.layers.dense(combined_input, 256, activation=self.swish)
        # We compare against the concatenation of all observation streams, hence `self.v_size + int(self.o_size > 0)`.
        pred_next_state = tf.layers.dense(hidden, self.curiosity_enc_size * (self.v_size + int(self.o_size > 0)),
                                          activation=None)

        squared_difference = 0.5 * tf.reduce_sum(tf.squared_difference(pred_next_state, encoded_next_state), axis=1)
        self.intrinsic_reward = tf.clip_by_value(self.curiosity_strength * squared_difference, 0, 1)
        self.forward_loss = tf.reduce_mean(tf.dynamic_partition(squared_difference, self.mask, 2)[1]) 
开发者ID:xkiwilabs,项目名称:DQN-using-PyTorch-and-ML-Agents,代码行数:18,代码来源:models.py

示例14: find_obj

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import dynamic_partition [as 别名]
def find_obj(sentence, s_mask, classes, scores, num):
  """Computes the object reward for one sentence."""
  shape = tf.shape(sentence)
  sentence = tf.boolean_mask(sentence, s_mask)

  def body(x):
    idx = tf.to_int32(tf.where(tf.equal(sentence, x)))
    idx = tf.cond(tf.shape(idx)[0] > 0, lambda: idx[0, 0],
                  lambda: tf.constant(999, tf.int32))
    return idx

  classes = classes[:num]
  scores = scores[:num]
  ind = tf.map_fn(body, classes, tf.int32)
  mask = tf.not_equal(ind, 999)
  miss, detected = tf.dynamic_partition(scores, tf.to_int32(mask), 2)
  ind = tf.boolean_mask(ind, mask)
  ret = tf.scatter_nd(tf.expand_dims(ind, 1), detected, shape)
  return ret 
开发者ID:fengyang0317,项目名称:unsupervised_captioning,代码行数:21,代码来源:misc_fn.py

示例15: triplet_loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import dynamic_partition [as 别名]
def triplet_loss(infer, labels, radius = 2.0):
    """
    Args:
        infer: inference concatenate together with 2 * batch_size
        labels: 0 or 1 with batch_size
        radius:
    Return:
        loss: triplet loss
    """
            
    feature_1, feature_2 = tf.split(0,2,infer)

    feature_diff = tf.reduce_sum(tf.square(feature_1 - feature_2), 1)
    feature_list = tf.dynamic_partition(feature_diff, labels, 2)

    pos_list = feature_list[1]
    neg_list  = (tf.maximum(0.0, radius * radius - feature_list[0]))
    full_list = tf.concat(0,[pos_list, neg_list])
    loss = tf.reduce_mean(full_list)

    return loss 
开发者ID:polltooh,项目名称:traffic_video_analysis,代码行数:23,代码来源:model_func.py


注:本文中的tensorflow.dynamic_partition方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。