當前位置: 首頁>>代碼示例>>Python>>正文


Python v1.broadcast_to方法代碼示例

本文整理匯總了Python中tensorflow.compat.v1.broadcast_to方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.broadcast_to方法的具體用法?Python v1.broadcast_to怎麽用?Python v1.broadcast_to使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.broadcast_to方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _init_graph

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import broadcast_to [as 別名]
def _init_graph(self):
    """Initialize computation graph for tensorflow.
    """
    with self.graph.as_default():
      self.refiner = im.ImNet(dim=self.dim,
                              in_features=self.codelen,
                              out_features=self.out_features,
                              num_filters=self.num_filters)
      self.global_step = tf.get_variable('global_step', shape=[],
                                         dtype=tf.int64)

      self.pts_ph = tf.placeholder(tf.float32, shape=[self.point_batch, 3])
      self.lat_ph = tf.placeholder(tf.float32, shape=[self.codelen])

      lat = tf.broadcast_to(self.lat_ph[tf.newaxis],
                            [self.point_batch, self.codelen])
      code = tf.concat((self.pts_ph, lat), axis=-1)  # [pb, 3+c]

      vals = self.refiner(code, training=False)  # [pb, 1]
      self.vals = tf.squeeze(vals, axis=1)  # [pb]
      self.saver = tf.train.Saver()
      self.sess = tf.Session()
      self.saver.restore(self.sess, self.ckpt) 
開發者ID:tensorflow,項目名稱:graphics,代碼行數:25,代碼來源:evaluator.py

示例2: extract_relation_representations

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import broadcast_to [as 別名]
def extract_relation_representations(input_layer, input_ids, tokenizer):
  """Extracts relation representation from sentence sequence layer."""
  entity_representations = []
  entity_marker_ids = tokenizer.convert_tokens_to_ids(["[E1]", "[E2]"])
  for entity_marker_id in entity_marker_ids:
    mask = tf.to_float(tf.equal(input_ids, entity_marker_id))
    mask = tf.broadcast_to(tf.expand_dims(mask, -1), tf.shape(input_layer))
    entity_representation = tf.reduce_max(
        mask * input_layer, axis=1, keepdims=True)
    entity_representations.append(entity_representation)

  output_layer = tf.concat(entity_representations, axis=2)
  output_layer = tf.squeeze(output_layer, [1])
  tf.logging.info("entity marker pooling AFTER output shape %s",
                  output_layer.shape)

  return output_layer 
開發者ID:google-research,項目名稱:language,代碼行數:19,代碼來源:bert_fewshot_classifier.py

示例3: _batch_slice

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import broadcast_to [as 別名]
def _batch_slice(self, ary, start_ijk, w, batch_size):
    """Batched slicing of original grid.

    Args:
      ary: tensor, rank = 3.
      start_ijk: [batch_size, 3] tensor, starting index.
      w: width of cube to extract.
      batch_size: int, batch size.

    Returns:
      batched_slices: [batch_size, w, w, w] tensor, batched slices of ary.
    """
    batch_size = start_ijk.shape[0]
    ijk = tf.range(w, dtype=tf.int32)
    slice_idx = tf.meshgrid(ijk, ijk, ijk, indexing='ij')
    slice_idx = tf.stack(
        slice_idx, axis=-1)  # [in_grid_res, in_grid_res, in_grid_res, 3]
    slice_idx = tf.broadcast_to(slice_idx[tf.newaxis], [batch_size, w, w, w, 3])
    offset = tf.broadcast_to(
        start_ijk[:, tf.newaxis, tf.newaxis, tf.newaxis, :],
        [batch_size, w, w, w, 3])
    slice_idx += offset
    # [batch_size, in_grid_res, in_grid_res, in_grid_res, 3]
    batched_slices = tf.gather_nd(ary, slice_idx)
    # [batch_size, in_grid_res, in_grid_res, in_grid_res]
    return batched_slices 
開發者ID:tensorflow,項目名稱:graphics,代碼行數:28,代碼來源:evaluator.py

示例4: get_global_step

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import broadcast_to [as 別名]
def get_global_step(self):
    # tf.train.get_global_step() does not work well under model_fn for TPU.
    with tf.variable_scope('', reuse=tf.AUTO_REUSE):
      return tf.broadcast_to(
          tf.get_variable('global_step', shape=[], dtype=tf.int64),
          shape=(self._export_batch_size,)) 
開發者ID:google-research,項目名稱:tensor2robot,代碼行數:8,代碼來源:t2r_models.py

示例5: padded_where

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import broadcast_to [as 別名]
def padded_where(condition, length):
  """TPU friendly version of tf.where(cond) with fixed length and padding.

  This is a wrapper around tf.where(cond) that returns the coordinates of the
  True elements of cond (case where x and y are None). This version, however,
  returns a fixed length tensor of coordinates, determined by `length`.  If the
  number of True elements in `condition` is less than `length`, then the
  returned tensor is right-padded with zeros. Otherwise, the returned tensor is
  truncated to `length` size.

  Args:
    condition: tf.Tensor of type boolean; any shape.
    length: Length of (last dimension of) the returned tensor.

  Returns:
    Two tensors:
    - a tensor of type int32, with same shape as `condition`, representing
      coordinates of the last dimension of `condition` tensor where values are
      True.
    - a mask tensor of type int32 with 1s in valid indices of the first tensor,
      and 0s for padded indices.
  """
  condition_shape = shape(condition)
  n = condition_shape[-1]

  # Build a tensor that counts indices from 0 to length of condition.
  ixs = tf.broadcast_to(tf.range(n, dtype=tf.int32), condition_shape)

  # Build tensor where True condition values get their index value or
  # n (== len(condition)) otherwise.
  ixs = tf.where(condition, ixs, tf.ones_like(condition, dtype=tf.int32) * n)

  # Sort indices (so that indices for False values == n, will be placed last),
  # and get the desired number of entries, truncating by `length`.
  ixs = tf.sort(ixs)[Ellipsis, 0:length]

  # For first tensor, zero-out values == n. For second tensor, put 1s where
  # values are < n, and 0s where values are == 0.
  return tf.mod(ixs, n), (1 - tf.div(ixs, n)) 
開發者ID:google-research,項目名稱:language,代碼行數:41,代碼來源:tensor_utils.py

示例6: _test_broadcast_to

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import broadcast_to [as 別名]
def _test_broadcast_to(in_shape, to_shape):
    """ One iteration of broadcast_to"""

    data = np.random.uniform(size=in_shape).astype('float32')
    shape_data = np.array(to_shape).astype('int32')

    with tf.Graph().as_default():
        in_data = array_ops.placeholder(shape=data.shape, dtype=data.dtype)
        shape_data = constant_op.constant(
            shape_data, shape=shape_data.shape, dtype=shape_data.dtype)
        tf.broadcast_to(in_data, shape_data)

        compare_tf_with_tvm(data, 'Placeholder:0',
                            'BroadcastTo:0', opt_level=0) 
開發者ID:apache,項目名稱:incubator-tvm,代碼行數:16,代碼來源:test_forward.py

示例7: _test_broadcast_to_from_tensor

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import broadcast_to [as 別名]
def _test_broadcast_to_from_tensor(in_shape):
    """ One iteration of broadcast_to with unknown shape at graph build"""

    data = np.random.uniform(size=in_shape).astype('float32')

    with tf.Graph().as_default():
        in_data = array_ops.placeholder(
            shape=[None], dtype=data.dtype)

        shape_data = tf.multiply(tf.shape(in_data), 32)
        tf.broadcast_to(in_data, shape_data)

        compare_tf_with_tvm(data, 'Placeholder:0', 'BroadcastTo:0') 
開發者ID:apache,項目名稱:incubator-tvm,代碼行數:15,代碼來源:test_forward.py

示例8: _eval_net

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import broadcast_to [as 別名]
def _eval_net(self, lat, weights, xloc, training=False):
    """Evaluate function values by querying shared dense network.

    Args:
      lat: `[batch_size, num_points, 2**dim, in_features]` tensor, neighbor
      latent codes for each input point.
      weights: `[batch_size, num_points, 2**dim]` tensor, bi/tri-linear
      interpolation weights for each neighbor.
      xloc: `[batch_size, num_points, 2**dim, dim]`tensor, relative coordinates.
      training: bool, flag indicating training phase.
    Returns:
      values: `[batch_size, num_point, out_features]` tensor, query values.
    """
    nb, np, nn, nc = lat.get_shape().as_list()
    nd = self.dim
    if self.method == "linear":
      inputs = tf.concat([xloc, lat], axis=-1)
      # `[batch_size, num_points, 2**dim, dim+in_features]`
      inputs = tf.reshape(inputs, [-1, nc+nd])
      values = self.net(inputs, training=training)
      values = tf.reshape(values, [nb, np, nn, self.cout])
      # `[batch_size, num_points, 2**dim, out_features]`
      if self.interp:
        values = tf.reduce_sum(tf.expand_dims(weights, axis=-1)*values, axis=2)
        # `[batch_size, num_points out_features]`
      else:
        values = (values, weights)
    else:  # nearest neighbor
      nid = tf.cast(tf.argmax(weights, axis=-1), tf.int32)
      # [batch_size, num_points]
      bid = tf.broadcast_to(tf.range(nb, dtype=tf.int32)[:, tf.newaxis],
                            [nb, np])
      pid = tf.broadcast_to(tf.range(np, dtype=tf.int32)[tf.newaxis, :],
                            [nb, np])
      gather_id = tf.stack((bid, pid, nid), axis=-1)
      lat_ = tf.gather_nd(lat, gather_id)  # [batch_size, num_points, in_feat]
      xloc_ = tf.gather_nd(xloc, gather_id)  # [batch_size, num_points, dim]
      inputs = tf.concat([xloc_, lat_], axis=-1)
      inputs = tf.reshape(inputs, [-1, nc+nd])
      values = self.net(inputs, training=training)
      values = tf.reshape(values, [nb, np, self.cout])
      # `[batch_size, num_points, out_features]`

    return values 
開發者ID:tensorflow,項目名稱:graphics,代碼行數:46,代碼來源:local_implicit_grid_layer.py


注:本文中的tensorflow.compat.v1.broadcast_to方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。