当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.scatter_nd函数代码示例

本文整理汇总了Python中tensorflow.scatter_nd函数的典型用法代码示例。如果您正苦于以下问题:Python scatter_nd函数的具体用法?Python scatter_nd怎么用?Python scatter_nd使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了scatter_nd函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testEmptyOutputShape1

  def testEmptyOutputShape1(self):
    indices = tf.zeros([2, 2, 2], tf.int32)
    updates = tf.zeros([2, 2, 2], tf.int32)
    shape = tf.constant([0, 3, 2], tf.int32)

    with self.assertRaisesWithPredicateMatch(
        ValueError, "Indices and updates specified for empty output shape"):
      tf.scatter_nd(indices, updates, shape)
开发者ID:chinnadhurai,项目名称:block_rnn,代码行数:8,代码来源:scatter_nd_ops_test.py

示例2: testRank3InvalidShape2

  def testRank3InvalidShape2(self):
    indices = tf.zeros([2, 2, 1], tf.int32)
    updates = tf.zeros([2, 2], tf.int32)
    shape = np.array([2, 2, 2])
    with self.assertRaisesWithPredicateMatch(
        ValueError, "The inner \\d+ dimensions of output\\.shape="):
      tf.scatter_nd(indices, updates, shape)

    ref = tf.Variable(tf.zeros(shape, tf.int32))
    with self.assertRaisesWithPredicateMatch(
        ValueError, "The inner \\d+ dimensions of ref\\.shape="):
      tf.scatter_nd_update(ref, indices, updates)
开发者ID:chinnadhurai,项目名称:block_rnn,代码行数:12,代码来源:scatter_nd_ops_test.py

示例3: testEmptyOutputShape2

  def testEmptyOutputShape2(self):
    indices = tf.placeholder(tf.int32, shape=None)
    updates = tf.placeholder(tf.int32, shape=None)
    shape = tf.constant([0, 3, 2], tf.int32)

    with self.test_session():
      tf.scatter_nd(indices, updates, shape).eval(feed_dict={
          indices: np.zeros(
              [2, 2, 2], dtype=np.int32),
          updates: np.zeros(
              [2, 2, 2], dtype=np.int32)
      })
开发者ID:chinnadhurai,项目名称:block_rnn,代码行数:12,代码来源:scatter_nd_ops_test.py

示例4: stack_tensor

def stack_tensor(slices, indices, dense_tensor, head_dims):
  """Reconsititutes a tensor from slices and corresponding indices.

  This is an inverse operation to slice_tensor. Missing slices are set to 0.

  Args:
    slices: a tensor. Shape [K, D_1, ...]
    indices: a 1-D integer tensor. Shape: [K]
    dense_tensor: the original tensor the slices were taken
      from. Shape: [D_0, D_1, ...]
    head_dims: True dimensions of the dense_tensor's first dimension.

  Returns:
    Reconsituted tensor. Shape: [D_0, D_1, ...]
  """
  # NOTE(siege): This cast shouldn't be necessary.
  indices = tf.cast(indices, tf.int32)

  tail_dims = tf.shape(dense_tensor)[1:]
  dense_shape = tf.concat([head_dims, tail_dims], 0)

  slices = tf.reshape(slices, tf.concat([[-1], dense_shape[1:]], 0))
  indices = tf.expand_dims(indices, -1)

  return tf.reshape(tf.scatter_nd(indices, slices, dense_shape),
                    tf.shape(dense_tensor))
开发者ID:ALISCIFP,项目名称:models,代码行数:26,代码来源:utils.py

示例5: compute_module

 def compute_module(accum, module):
     mask = tf.equal(module, selection)
     reduced_mask = tf.reduce_any(mask, axis=-1)
     indices = tf.where(reduced_mask)
     affected_inp = tf.boolean_mask(inputs, reduced_mask)
     output = module_fnc(affected_inp, module)
     return accum + tf.scatter_nd(indices, output, tf.cast(output_shape, tf.int64))
开发者ID:timediv,项目名称:libmodular,代码行数:7,代码来源:modular.py

示例6: hnet_transformation

def hnet_transformation(gt_pts, transformation_coeffcient, name):
    """

    :param gt_pts:
    :param transformation_coeffcient:
    :param name:
    :return:
    """
    with tf.variable_scope(name):
        # 首先映射原始标签点对
        transformation_coeffcient = tf.concat([transformation_coeffcient, [1.0]], axis=-1)
        H_indices = tf.constant([[0], [1], [2], [4], [5], [7], [8]])
        H_shape = tf.constant([9])
        H = tf.scatter_nd(H_indices, transformation_coeffcient, H_shape)
        H = tf.reshape(H, shape=[3, 3])

        gt_pts = tf.transpose(gt_pts)
        pts_projects = tf.matmul(H, gt_pts)

        # 求解最小二乘二阶多项式拟合参数矩阵
        Y = tf.transpose(pts_projects[1, :])
        X = tf.transpose(pts_projects[0, :])
        Y_One = tf.add(tf.subtract(Y, Y), tf.constant(1.0, tf.float32))
        Y_stack = tf.stack([tf.pow(Y, 3), tf.pow(Y, 2), Y, Y_One], axis=1)
        w = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(tf.transpose(Y_stack), Y_stack)),
                                tf.transpose(Y_stack)),
                      tf.expand_dims(X, -1))

        # 利用二阶多项式参数求解拟合位置
        x_preds = tf.matmul(Y_stack, w)
        preds = tf.transpose(tf.stack([tf.squeeze(x_preds, -1), Y, Y_One], axis=1))
        preds_fit = tf.stack([tf.squeeze(x_preds, -1), Y], axis=1)
        x_transformation_back = tf.matmul(tf.matrix_inverse(H), preds)

    return x_transformation_back
开发者ID:dandancat123,项目名称:bilibli_notes2,代码行数:35,代码来源:lanenet_hnet_loss.py

示例7: max_unpool

def max_unpool(inputs, pooling_indices, output_shape=None, k_size=[1, 2, 2, 1]):
    # NOTE! this function is based on the implementation by kwotsin in
    # https://github.com/kwotsin/TensorFlow-ENet

    # inputs has shape [batch_size, height, width, channels]

    # pooling_indices: pooling indices of the previously max_pooled layer

    # output_shape: what shape the returned tensor should have

    pooling_indices = tf.cast(pooling_indices, tf.int32)
    input_shape = tf.shape(inputs, out_type=tf.int32)

    one_like_pooling_indices = tf.ones_like(pooling_indices, dtype=tf.int32)
    batch_shape = tf.concat([[input_shape[0]], [1], [1], [1]], 0)
    batch_range = tf.reshape(tf.range(input_shape[0], dtype=tf.int32), shape=batch_shape)
    b = one_like_pooling_indices*batch_range
    y = pooling_indices//(output_shape[2]*output_shape[3])
    x = (pooling_indices//output_shape[3]) % output_shape[2]
    feature_range = tf.range(output_shape[3], dtype=tf.int32)
    f = one_like_pooling_indices*feature_range

    inputs_size = tf.size(inputs)
    indices = tf.transpose(tf.reshape(tf.stack([b, y, x, f]), [4, inputs_size]))
    values = tf.reshape(inputs, [inputs_size])

    ret = tf.scatter_nd(indices, values, output_shape)

    return ret
开发者ID:ascenoputing,项目名称:SemanticSegmentation_DL,代码行数:29,代码来源:network.py

示例8: call

  def call(self, x, padding=None):
    # Retrieve dynamically known shapes
    batch_size = tf.shape(x)[0]
    length = tf.shape(x)[1]

    if padding is not None:
      with tf.name_scope("remove_padding"):
        # Flatten padding to [batch_size*length]
        pad_mask = tf.reshape(padding, [-1])

        nonpad_ids = tf.to_int32(tf.where(pad_mask < 1e-9))

        # Reshape x to [batch_size*length, hidden_size] to remove padding
        x = tf.reshape(x, [-1, self.hidden_size])
        x = tf.gather_nd(x, indices=nonpad_ids)

        # Reshape x from 2 dimensions to 3 dimensions.
        x.set_shape([None, self.hidden_size])
        x = tf.expand_dims(x, axis=0)

    output = self.filter_dense_layer(x)
    if self.train:
      output = tf.nn.dropout(output, 1.0 - self.relu_dropout)
    output = self.output_dense_layer(output)

    if padding is not None:
      with tf.name_scope("re_add_padding"):
        output = tf.squeeze(output, axis=0)
        output = tf.scatter_nd(
            indices=nonpad_ids,
            updates=output,
            shape=[batch_size * length, self.hidden_size]
        )
        output = tf.reshape(output, [batch_size, length, self.hidden_size])
    return output
开发者ID:cybermaster,项目名称:reference,代码行数:35,代码来源:ffn_layer.py

示例9: update_slices

def update_slices(slices, indices, dense_tensor, head_dims):
  """Reconstitutes a tensor from slices and corresponding indices.

  Like _stack_tensor, but instead of setting missing slices to 0, sets them to
  what they were in the original tensor. The return value is reshaped to be
  the same as dense_tensor.

  Args:
    slices: a tensor. Shape [K, D_1, ...]
    indices: a 1-D integer tensor. Shape: [K]
    dense_tensor: the original tensor the slices were taken
      from. Shape: [D_0, D_1, ...]
    head_dims: True dimensions of the dense_tensor's first dimension.

  Returns:
    Reconsituted tensor. Shape: [D_0, D_1, ...]
  """
  # NOTE(siege): This cast shouldn't be necessary.
  indices = tf.cast(indices, tf.int32)

  tail_dims = tf.shape(dense_tensor)[1:]
  dense_shape = tf.concat([head_dims, tail_dims], 0)

  update_mask_vals = tf.fill(tf.shape(indices), 1)
  reshaped_indices = tf.expand_dims(indices, -1)
  update_mask = tf.equal(
      tf.scatter_nd(reshaped_indices, update_mask_vals, head_dims[:1]), 1)

  reshaped_dense_slices = tf.reshape(
      stack_tensor(slices, indices, dense_tensor, head_dims), dense_shape)
  reshaped_dense_tensor = tf.reshape(dense_tensor, dense_shape)

  return tf.reshape(
      tf.where(update_mask, reshaped_dense_slices, reshaped_dense_tensor),
      tf.shape(dense_tensor))
开发者ID:ALISCIFP,项目名称:models,代码行数:35,代码来源:utils.py

示例10: hard_negative_mining

      def hard_negative_mining():
        bboxes_per_batch = tf.unstack(bboxes)
        classification_loss_per_batch = tf.unstack(classification_loss)
        num_positives_per_batch = tf.unstack(tf.reduce_sum(positives, axis=-1))
        neg_class_loss_per_batch = tf.unstack(neg_class_loss_all)

        neg_class_losses = []
        total_negatives = []

        for bboxes_per_image, classification_loss_per_image, num_positives_per_image, neg_class_loss_per_image in \
            zip(bboxes_per_batch, classification_loss_per_batch, num_positives_per_batch, neg_class_loss_per_batch):
          min_negatives_keep = tf.maximum(self.neg_pos_ratio * num_positives_per_image, 3)
          num_negatives_keep = tf.minimum(min_negatives_keep,
                                          tf.count_nonzero(neg_class_loss_per_image, dtype=tf.float32))

          indices = tf.image.non_max_suppression(bboxes_per_image, classification_loss_per_image,
                                                 tf.to_int32(num_negatives_keep), iou_threshold=0.99)
          num_negatives = tf.size(indices)
          total_negatives.append(num_negatives)
          expanded_indexes = tf.expand_dims(indices, axis=1)  # shape: (num_negatives, 1)
          negatives_keep = tf.scatter_nd(expanded_indexes, updates=tf.ones_like(indices, dtype=tf.int32),
                                         shape=tf.shape(classification_loss_per_image))  # shape: (num_priors,)
          negatives_keep = tf.to_float(tf.reshape(negatives_keep, [num_priors]))  # shape: (batch_size, num_priors)
          neg_class_losses.append(tf.reduce_sum(classification_loss_per_image * negatives_keep, axis=-1))  # shape: (1,)

        return tf.stack(neg_class_losses), tf.reduce_sum(tf.stack(total_negatives))
开发者ID:undeadinu,项目名称:training_toolbox_tensorflow,代码行数:26,代码来源:loss.py

示例11: testScatterNdRepatedIndicesAdd

 def testScatterNdRepatedIndicesAdd(self):
   indices = tf.zeros([100000, 1], tf.int32)
   values = np.random.randn(100000)
   shape = [1]
   with self.test_session():
     val = tf.scatter_nd(indices, values, shape).eval()
   self.assertAllClose([np.sum(values)], val)
开发者ID:chinnadhurai,项目名称:block_rnn,代码行数:7,代码来源:scatter_nd_ops_test.py

示例12: hnet_loss

def hnet_loss(gt_pts, transformation_coeffcient, name):
    """
    
    :param gt_pts: 原始的标签点对 [x, y, 1] 
    :param transformation_coeffcient: 映射矩阵参数(6参数矩阵) [[a, b, c], [0, d, e], [0, f, 1]]
    :param name:
    :return: 
    """
    with tf.variable_scope(name):
        # 首先映射原始标签点对
        transformation_coeffcient = tf.concat([transformation_coeffcient, [1.0]], axis=-1)
        H_indices = tf.constant([[0], [1], [2], [4], [5], [7], [8]])
        H_shape = tf.constant([9])
        H = tf.scatter_nd(H_indices, transformation_coeffcient, H_shape)
        H = tf.reshape(H, shape=[3, 3])

        gt_pts = tf.transpose(gt_pts)
        pts_projects = tf.matmul(H, gt_pts)

        # 求解最小二乘二阶多项式拟合参数矩阵
        Y = tf.transpose(pts_projects[1, :])
        X = tf.transpose(pts_projects[0, :])
        Y_One = tf.add(tf.subtract(Y, Y), tf.constant(1.0, tf.float32))
        Y_stack = tf.stack([tf.pow(Y, 3), tf.pow(Y, 2), Y, Y_One], axis=1)
        w = tf.matmul(tf.matmul(tf.matrix_inverse(tf.matmul(tf.transpose(Y_stack), Y_stack)),
                                tf.transpose(Y_stack)),
                      tf.expand_dims(X, -1))
        # 利用二阶多项式参数求解拟合位置并反算到原始投影空间计算损失
        x_preds = tf.matmul(Y_stack, w)
        preds = tf.transpose(tf.stack([tf.squeeze(x_preds, -1), Y, Y_One], axis=1))
        x_transformation_back = tf.matmul(tf.matrix_inverse(H), preds)

        loss = tf.reduce_mean(tf.pow(gt_pts[0, :] - x_transformation_back[0, :], 2))

    return loss
开发者ID:dandancat123,项目名称:bilibli_notes2,代码行数:35,代码来源:lanenet_hnet_loss.py

示例13: unpool_layer2x2_batch

    def unpool_layer2x2_batch(self, bottom, argmax):
        bottom_shape = tf.shape(bottom)
        top_shape = [bottom_shape[0], bottom_shape[1] * 2, bottom_shape[2] * 2, bottom_shape[3]]

        batch_size = top_shape[0]
        height = top_shape[1]
        width = top_shape[2]
        channels = top_shape[3]

        argmax_shape = tf.to_int64([batch_size, height, width, channels])
        argmax = self.unravel_argmax(argmax, argmax_shape)

        t1 = tf.to_int64(tf.range(channels))
        t1 = tf.tile(t1, [batch_size * (width // 2) * (height // 2)])
        t1 = tf.reshape(t1, [-1, channels])
        t1 = tf.transpose(t1, perm=[1, 0])
        t1 = tf.reshape(t1, [channels, batch_size, height // 2, width // 2, 1])
        t1 = tf.transpose(t1, perm=[1, 0, 2, 3, 4])

        t2 = tf.to_int64(tf.range(batch_size))
        t2 = tf.tile(t2, [channels * (width // 2) * (height // 2)])
        t2 = tf.reshape(t2, [-1, batch_size])
        t2 = tf.transpose(t2, perm=[1, 0])
        t2 = tf.reshape(t2, [batch_size, channels, height // 2, width // 2, 1])

        t3 = tf.transpose(argmax, perm=[1, 4, 2, 3, 0])

        t = tf.concat(4, [t2, t3, t1])
        indices = tf.reshape(t, [(height // 2) * (width // 2) * channels * batch_size, 4])

        x1 = tf.transpose(bottom, perm=[0, 3, 1, 2])
        values = tf.reshape(x1, [-1])
        return tf.scatter_nd(indices, values, tf.to_int64(top_shape))
开发者ID:BenJamesbabala,项目名称:Tensorflow-DeconvNet-Segmentation,代码行数:33,代码来源:DeconvNetPipeline.py

示例14: testEmptyOutputShape3

  def testEmptyOutputShape3(self):
    indices = tf.zeros([0], tf.int32)
    updates = tf.zeros([0], tf.int32)
    shape = tf.constant([0], tf.int32)
    scatter = tf.scatter_nd(indices, updates, shape)

    with self.test_session():
      self.assertEqual(scatter.eval().size, 0)
开发者ID:chinnadhurai,项目名称:block_rnn,代码行数:8,代码来源:scatter_nd_ops_test.py

示例15: _unsparsify

 def _unsparsify(var_x):
     if not isinstance(var_x, tf.IndexedSlices):
         return var_x
     assert var_x.dense_shape is not None, \
         "memory_saving_gradients encountered sparse gradients of unknown shape"
     indices = var_x.indices
     while indices.shape.ndims < var_x.values.shape.ndims:
         indices = tf.expand_dims(indices, -1)
     return tf.scatter_nd(indices, var_x.values, var_x.dense_shape)
开发者ID:stonezuohui,项目名称:faceswap,代码行数:9,代码来源:memory_saving_gradients.py


注:本文中的tensorflow.scatter_nd函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。