当前位置: 首页>>代码示例>>Python>>正文


Python v1.broadcast_to方法代码示例

本文整理汇总了Python中tensorflow.compat.v1.broadcast_to方法的典型用法代码示例。如果您正苦于以下问题:Python v1.broadcast_to方法的具体用法?Python v1.broadcast_to怎么用?Python v1.broadcast_to使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.broadcast_to方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _init_graph

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import broadcast_to [as 别名]
def _init_graph(self):
    """Initialize computation graph for tensorflow.
    """
    with self.graph.as_default():
      self.refiner = im.ImNet(dim=self.dim,
                              in_features=self.codelen,
                              out_features=self.out_features,
                              num_filters=self.num_filters)
      self.global_step = tf.get_variable('global_step', shape=[],
                                         dtype=tf.int64)

      self.pts_ph = tf.placeholder(tf.float32, shape=[self.point_batch, 3])
      self.lat_ph = tf.placeholder(tf.float32, shape=[self.codelen])

      lat = tf.broadcast_to(self.lat_ph[tf.newaxis],
                            [self.point_batch, self.codelen])
      code = tf.concat((self.pts_ph, lat), axis=-1)  # [pb, 3+c]

      vals = self.refiner(code, training=False)  # [pb, 1]
      self.vals = tf.squeeze(vals, axis=1)  # [pb]
      self.saver = tf.train.Saver()
      self.sess = tf.Session()
      self.saver.restore(self.sess, self.ckpt) 
开发者ID:tensorflow,项目名称:graphics,代码行数:25,代码来源:evaluator.py

示例2: extract_relation_representations

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import broadcast_to [as 别名]
def extract_relation_representations(input_layer, input_ids, tokenizer):
  """Extracts relation representation from sentence sequence layer."""
  entity_representations = []
  entity_marker_ids = tokenizer.convert_tokens_to_ids(["[E1]", "[E2]"])
  for entity_marker_id in entity_marker_ids:
    mask = tf.to_float(tf.equal(input_ids, entity_marker_id))
    mask = tf.broadcast_to(tf.expand_dims(mask, -1), tf.shape(input_layer))
    entity_representation = tf.reduce_max(
        mask * input_layer, axis=1, keepdims=True)
    entity_representations.append(entity_representation)

  output_layer = tf.concat(entity_representations, axis=2)
  output_layer = tf.squeeze(output_layer, [1])
  tf.logging.info("entity marker pooling AFTER output shape %s",
                  output_layer.shape)

  return output_layer 
开发者ID:google-research,项目名称:language,代码行数:19,代码来源:bert_fewshot_classifier.py

示例3: _batch_slice

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import broadcast_to [as 别名]
def _batch_slice(self, ary, start_ijk, w, batch_size):
    """Batched slicing of original grid.

    Args:
      ary: tensor, rank = 3.
      start_ijk: [batch_size, 3] tensor, starting index.
      w: width of cube to extract.
      batch_size: int, batch size.

    Returns:
      batched_slices: [batch_size, w, w, w] tensor, batched slices of ary.
    """
    batch_size = start_ijk.shape[0]
    ijk = tf.range(w, dtype=tf.int32)
    slice_idx = tf.meshgrid(ijk, ijk, ijk, indexing='ij')
    slice_idx = tf.stack(
        slice_idx, axis=-1)  # [in_grid_res, in_grid_res, in_grid_res, 3]
    slice_idx = tf.broadcast_to(slice_idx[tf.newaxis], [batch_size, w, w, w, 3])
    offset = tf.broadcast_to(
        start_ijk[:, tf.newaxis, tf.newaxis, tf.newaxis, :],
        [batch_size, w, w, w, 3])
    slice_idx += offset
    # [batch_size, in_grid_res, in_grid_res, in_grid_res, 3]
    batched_slices = tf.gather_nd(ary, slice_idx)
    # [batch_size, in_grid_res, in_grid_res, in_grid_res]
    return batched_slices 
开发者ID:tensorflow,项目名称:graphics,代码行数:28,代码来源:evaluator.py

示例4: get_global_step

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import broadcast_to [as 别名]
def get_global_step(self):
    # tf.train.get_global_step() does not work well under model_fn for TPU.
    with tf.variable_scope('', reuse=tf.AUTO_REUSE):
      return tf.broadcast_to(
          tf.get_variable('global_step', shape=[], dtype=tf.int64),
          shape=(self._export_batch_size,)) 
开发者ID:google-research,项目名称:tensor2robot,代码行数:8,代码来源:t2r_models.py

示例5: padded_where

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import broadcast_to [as 别名]
def padded_where(condition, length):
  """TPU friendly version of tf.where(cond) with fixed length and padding.

  This is a wrapper around tf.where(cond) that returns the coordinates of the
  True elements of cond (case where x and y are None). This version, however,
  returns a fixed length tensor of coordinates, determined by `length`.  If the
  number of True elements in `condition` is less than `length`, then the
  returned tensor is right-padded with zeros. Otherwise, the returned tensor is
  truncated to `length` size.

  Args:
    condition: tf.Tensor of type boolean; any shape.
    length: Length of (last dimension of) the returned tensor.

  Returns:
    Two tensors:
    - a tensor of type int32, with same shape as `condition`, representing
      coordinates of the last dimension of `condition` tensor where values are
      True.
    - a mask tensor of type int32 with 1s in valid indices of the first tensor,
      and 0s for padded indices.
  """
  condition_shape = shape(condition)
  n = condition_shape[-1]

  # Build a tensor that counts indices from 0 to length of condition.
  ixs = tf.broadcast_to(tf.range(n, dtype=tf.int32), condition_shape)

  # Build tensor where True condition values get their index value or
  # n (== len(condition)) otherwise.
  ixs = tf.where(condition, ixs, tf.ones_like(condition, dtype=tf.int32) * n)

  # Sort indices (so that indices for False values == n, will be placed last),
  # and get the desired number of entries, truncating by `length`.
  ixs = tf.sort(ixs)[Ellipsis, 0:length]

  # For first tensor, zero-out values == n. For second tensor, put 1s where
  # values are < n, and 0s where values are == 0.
  return tf.mod(ixs, n), (1 - tf.div(ixs, n)) 
开发者ID:google-research,项目名称:language,代码行数:41,代码来源:tensor_utils.py

示例6: _test_broadcast_to

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import broadcast_to [as 别名]
def _test_broadcast_to(in_shape, to_shape):
    """ One iteration of broadcast_to"""

    data = np.random.uniform(size=in_shape).astype('float32')
    shape_data = np.array(to_shape).astype('int32')

    with tf.Graph().as_default():
        in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
        shape_data = constant_op.constant(
            shape_data, shape=shape_data.shape, dtype=shape_data.dtype)
        tf.broadcast_to(in_data, shape_data)

        compare_tf_with_tvm(data, 'Placeholder:0',
                            'BroadcastTo:0', opt_level=0) 
开发者ID:apache,项目名称:incubator-tvm,代码行数:16,代码来源:test_forward.py

示例7: _test_broadcast_to_from_tensor

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import broadcast_to [as 别名]
def _test_broadcast_to_from_tensor(in_shape):
    """ One iteration of broadcast_to with unknown shape at graph build"""

    data = np.random.uniform(size=in_shape).astype('float32')

    with tf.Graph().as_default():
        in_data = array_ops.placeholder(
            shape=[None], dtype=data.dtype)

        shape_data = tf.multiply(tf.shape(in_data), 32)
        tf.broadcast_to(in_data, shape_data)

        compare_tf_with_tvm(data, 'Placeholder:0', 'BroadcastTo:0') 
开发者ID:apache,项目名称:incubator-tvm,代码行数:15,代码来源:test_forward.py

示例8: _eval_net

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import broadcast_to [as 别名]
def _eval_net(self, lat, weights, xloc, training=False):
    """Evaluate function values by querying shared dense network.

    Args:
      lat: `[batch_size, num_points, 2**dim, in_features]` tensor, neighbor
      latent codes for each input point.
      weights: `[batch_size, num_points, 2**dim]` tensor, bi/tri-linear
      interpolation weights for each neighbor.
      xloc: `[batch_size, num_points, 2**dim, dim]`tensor, relative coordinates.
      training: bool, flag indicating training phase.
    Returns:
      values: `[batch_size, num_point, out_features]` tensor, query values.
    """
    nb, np, nn, nc = lat.get_shape().as_list()
    nd = self.dim
    if self.method == "linear":
      inputs = tf.concat([xloc, lat], axis=-1)
      # `[batch_size, num_points, 2**dim, dim+in_features]`
      inputs = tf.reshape(inputs, [-1, nc+nd])
      values = self.net(inputs, training=training)
      values = tf.reshape(values, [nb, np, nn, self.cout])
      # `[batch_size, num_points, 2**dim, out_features]`
      if self.interp:
        values = tf.reduce_sum(tf.expand_dims(weights, axis=-1)*values, axis=2)
        # `[batch_size, num_points out_features]`
      else:
        values = (values, weights)
    else:  # nearest neighbor
      nid = tf.cast(tf.argmax(weights, axis=-1), tf.int32)
      # [batch_size, num_points]
      bid = tf.broadcast_to(tf.range(nb, dtype=tf.int32)[:, tf.newaxis],
                            [nb, np])
      pid = tf.broadcast_to(tf.range(np, dtype=tf.int32)[tf.newaxis, :],
                            [nb, np])
      gather_id = tf.stack((bid, pid, nid), axis=-1)
      lat_ = tf.gather_nd(lat, gather_id)  # [batch_size, num_points, in_feat]
      xloc_ = tf.gather_nd(xloc, gather_id)  # [batch_size, num_points, dim]
      inputs = tf.concat([xloc_, lat_], axis=-1)
      inputs = tf.reshape(inputs, [-1, nc+nd])
      values = self.net(inputs, training=training)
      values = tf.reshape(values, [nb, np, self.cout])
      # `[batch_size, num_points, out_features]`

    return values 
开发者ID:tensorflow,项目名称:graphics,代码行数:46,代码来源:local_implicit_grid_layer.py


注:本文中的tensorflow.compat.v1.broadcast_to方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。