当前位置: 首页>>代码示例>>Python>>正文


Python array_ops.concat_v2函数代码示例

本文整理汇总了Python中tensorflow.python.ops.array_ops.concat_v2函数的典型用法代码示例。如果您正苦于以下问题:Python concat_v2函数的具体用法?Python concat_v2怎么用?Python concat_v2使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了concat_v2函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _SparseDenseCwiseMulOrDivGrad

def _SparseDenseCwiseMulOrDivGrad(op, grad, is_mul):
  """Common code for SparseDenseCwise{Mul,Div} gradients."""
  x_indices = op.inputs[0]
  x_shape = op.inputs[2]
  y = op.inputs[3]

  y_shape = math_ops.to_int64(array_ops.shape(y))
  num_added_dims = array_ops.expand_dims(
      array_ops.size(x_shape) - array_ops.size(y_shape), 0)
  augmented_y_shape = array_ops.concat_v2(
      [array_ops.ones(num_added_dims, ops.dtypes.int64), y_shape], 0)

  scaling = x_shape // augmented_y_shape
  scaled_indices = x_indices // scaling
  scaled_indices = array_ops.slice(
      scaled_indices, array_ops.concat_v2([[0], num_added_dims], 0), [-1, -1])
  dense_vals = array_ops.gather_nd(y, scaled_indices)

  if is_mul:
    dx = grad * dense_vals
    dy_val = grad * op.inputs[1]
  else:
    dx = grad / dense_vals
    dy_val = grad * (-op.inputs[1] / math_ops.square(dense_vals))
  # indices can repeat after scaling, so we can't use sparse_to_dense().
  dy = sparse_ops.sparse_add(
      array_ops.zeros_like(y),
      sparse_tensor.SparseTensor(scaled_indices, dy_val, y_shape))

  # (sp_indices, sp_vals, sp_shape, dense)
  return (None, dx, None, dy)
开发者ID:curtiszimmerman,项目名称:tensorflow,代码行数:31,代码来源:sparse_grad.py

示例2: _entropy

 def _entropy(self):
   if (not self.distribution.is_continuous or
       not self.bijector.is_constant_jacobian):
     raise NotImplementedError("entropy is not implemented")
   # Suppose Y = g(X) where g is a diffeomorphism and X is a continuous rv. It
   # can be shown that:
   #   H[Y] = H[X] + E_X[(log o abs o det o J o g)(X)].
   # If is_constant_jacobian then:
   #   E_X[(log o abs o det o J o g)(X)] = (log o abs o det o J o g)(c)
   # where c can by anything.
   entropy = self.distribution.entropy()
   if self._is_maybe_event_override:
     # H[X] = sum_i H[X_i] if X_i are mutually independent.
     # This means that a reduce_sum is a simple rescaling.
     entropy *= math_ops.cast(math_ops.reduce_prod(self._override_event_shape),
                              dtype=entropy.dtype.base_dtype)
   if self._is_maybe_batch_override:
     new_shape = array_ops.concat_v2([
         _ones_like(self._override_batch_shape),
         self.distribution.batch_shape()], 0)
     entropy = array_ops.reshape(entropy, new_shape)
     multiples = array_ops.concat_v2([
         self._override_batch_shape,
         _ones_like(self.distribution.batch_shape())], 0)
     entropy = array_ops.tile(entropy, multiples)
   dummy = 0.
   return entropy - self.bijector.inverse_log_det_jacobian(dummy)
开发者ID:kadeng,项目名称:tensorflow,代码行数:27,代码来源:transformed_distribution.py

示例3: _BiasAddGradGrad

def _BiasAddGradGrad(op, received_grad):
  """Gradient for the BiasAddGrad op.

  Args:
    op: BiasAddGrad op for which we are calculating gradients.
    received_grad: The gradients passed to the BiasAddGrad op.

  Returns:
    A single gradient Tensor for the input to BiasAddGrad (which
    is the gradient of the bias term in BiasAdd)
  """

  try:
    data_format = op.get_attr("data_format")
  except ValueError:
    data_format = None

  shape = array_ops.shape(op.inputs[0])
  rank = array_ops.rank(op.inputs[0])
  bias_shape = array_ops.shape(received_grad)

  if data_format == b"NCHW":
    expanded_shape = array_ops.concat_v2([
        array_ops.ones_like(shape[:-3]), bias_shape, array_ops.ones_like(shape[
            -2:])
    ], 0)
    tile_mults = array_ops.concat_v2([shape[:-3], [1], shape[-2:]], 0)
  else:
    expanded_shape = array_ops.concat_v2(
        [array_ops.ones_like(shape[:-1]), bias_shape], 0)
    tile_mults = array_ops.concat_v2([shape[:-1], [1]], 0)

  expanded_grad = array_ops.reshape(received_grad, expanded_shape)
  return array_ops.tile(expanded_grad, tile_mults)
开发者ID:zuowang,项目名称:tensorflow,代码行数:34,代码来源:nn_grad.py

示例4: same_dynamic_shape

def same_dynamic_shape(a, b):
  """Returns whether a and b have the same dynamic shape.

  Args:
    a: `Tensor`
    b: `Tensor`

  Returns:
    `Boolean` `Tensor` representing if both tensors have the same shape.
  """
  a = ops.convert_to_tensor(a, name="a")
  b = ops.convert_to_tensor(b, name="b")

  # One of the shapes isn't fully defined, so we need to use the dynamic
  # shape.
  return control_flow_ops.cond(
      math_ops.equal(array_ops.rank(a), array_ops.rank(b)),
      # Here we can't just do math_ops.equal(a.shape, b.shape), since
      # static shape inference may break the equality comparison between
      # shape(a) and shape(b) in math_ops.equal.
      lambda: math_ops.reduce_all(math_ops.equal(
          array_ops.concat_v2((
              array_ops.shape(a),
              array_ops.shape(b)), 0),
          array_ops.concat_v2((
              array_ops.shape(b),
              array_ops.shape(a)), 0))),
      lambda: constant_op.constant(False))
开发者ID:kadeng,项目名称:tensorflow,代码行数:28,代码来源:distribution_util.py

示例5: _GRUBlockCellGrad

def _GRUBlockCellGrad(op, *grad):
  r"""Gradient for GRUBlockCell.

  Args:
    op: Op for which the gradient is defined.
    *grad: Gradients of the optimization function wrt output
      for the Op.

  Returns:
    d_x: Gradients wrt to x
    d_h: Gradients wrt to h
    d_w_ru: Gradients wrt to w_ru
    d_w_c: Gradients wrt to w_c
    d_b_ru: Gradients wrt to b_ru
    d_b_c: Gradients wrt to b_c

  Mathematics behind the Gradients below:
  ```
  d_c_bar = d_h \circ (1-u) \circ (1-c \circ c)
  d_u_bar = d_h \circ (h-c) \circ u \circ (1-u)

  d_r_bar_u_bar = [d_r_bar d_u_bar]

  [d_x_component_1 d_h_prev_component_1] = d_r_bar_u_bar * w_ru^T

  [d_x_component_2 d_h_prevr] = d_c_bar * w_c^T

  d_x = d_x_component_1 + d_x_component_2

  d_h_prev = d_h_prev_component_1 + d_h_prevr \circ r + u
  ```
  Below calculation is performed in the python wrapper for the Gradients
  (not in the gradient kernel.)
  ```
  d_w_ru = x_h_prevr^T * d_c_bar

  d_w_c = x_h_prev^T * d_r_bar_u_bar

  d_b_ru = sum of d_r_bar_u_bar along axis = 0

  d_b_c = sum of d_c_bar along axis = 0
  ```
  """
  x, h_prev, w_ru, w_c, b_ru, b_c = op.inputs
  r, u, c, _ = op.outputs
  _, _, _, d_h = grad

  d_x, d_h_prev, d_c_bar, d_r_bar_u_bar = _gru_ops_so.gru_block_cell_grad(
      x, h_prev, w_ru, w_c, b_ru, b_c, r, u, c, d_h)

  x_h_prev = array_ops.concat_v2([x, h_prev], 1)
  d_w_ru = math_ops.matmul(x_h_prev, d_r_bar_u_bar, transpose_a=True)
  d_b_ru = nn_ops.bias_add_grad(d_r_bar_u_bar)

  x_h_prevr = array_ops.concat_v2([x, h_prev * r], 1)
  d_w_c = math_ops.matmul(x_h_prevr, d_c_bar, transpose_a=True)
  d_b_c = nn_ops.bias_add_grad(d_c_bar)

  return d_x, d_h_prev, d_w_ru, d_w_c, d_b_ru, d_b_c
开发者ID:curtiszimmerman,项目名称:tensorflow,代码行数:59,代码来源:gru_ops.py

示例6: testAttentionCellWrapperCorrectResult

 def testAttentionCellWrapperCorrectResult(self):
   num_units = 4
   attn_length = 6
   batch_size = 2
   expected_output = np.array(
       [[0.955392, 0.408507, -0.60122, 0.270718],
        [0.903681, 0.331165, -0.500238, 0.224052]],
       dtype=np.float32)
   expected_state = np.array(
       [[0.81331915, 0.32036272, 0.28079176, 1.08888793, 0.41264394,
         0.1062041, 0.10444493, 0.32050529, 0.64655536, 0.70794445,
         0.51896095, 0.31809306, 0.58086717, 0.49446869, 0.7641536,
         0.12814975, 0.92231739, 0.89857256, 0.21889746, 0.38442063,
         0.53481543, 0.8876909, 0.45823169, 0.5905602, 0.78038228,
         0.56501579, 0.03971386, 0.09870267, 0.8074435, 0.66821432,
         0.99211812, 0.12295902, 1.01412082, 0.33123279, -0.71114945,
         0.40583119],
        [0.59962207, 0.42597458, -0.22491696, 0.98063421, 0.32548007,
         0.11623692, -0.10100613, 0.27708149, 0.76956916, 0.6360054,
         0.51719815, 0.50458527, 0.73000264, 0.66986895, 0.73576689,
         0.86301267, 0.87887371, 0.35185754, 0.93417215, 0.64732957,
         0.63173044, 0.66627824, 0.53644657, 0.20477486, 0.98458421,
         0.38277245, 0.03746676, 0.92510188, 0.57714164, 0.84932971,
         0.36127412, 0.12125921, 0.99780077, 0.31886846, -0.67595094,
         0.56531656]],
       dtype=np.float32)
   seed = 12345
   random_seed.set_random_seed(seed)
   for state_is_tuple in [False, True]:
     with session.Session() as sess:
       with variable_scope.variable_scope(
           "state_is_tuple", reuse=state_is_tuple):
         lstm_cell = core_rnn_cell_impl.BasicLSTMCell(
             num_units, state_is_tuple=state_is_tuple)
         cell = rnn_cell.AttentionCellWrapper(
             lstm_cell, attn_length, state_is_tuple=state_is_tuple)
         zeros1 = random_ops.random_uniform(
             (batch_size, num_units), 0.0, 1.0, seed=seed + 1)
         zeros2 = random_ops.random_uniform(
             (batch_size, num_units), 0.0, 1.0, seed=seed + 2)
         zeros3 = random_ops.random_uniform(
             (batch_size, num_units), 0.0, 1.0, seed=seed + 3)
         attn_state_zeros = random_ops.random_uniform(
             (batch_size, attn_length * num_units), 0.0, 1.0, seed=seed + 4)
         zero_state = ((zeros1, zeros2), zeros3, attn_state_zeros)
         if not state_is_tuple:
           zero_state = array_ops.concat_v2([
               zero_state[0][0], zero_state[0][1], zero_state[1], zero_state[2]
           ], 1)
         inputs = random_ops.random_uniform(
             (batch_size, num_units), 0.0, 1.0, seed=seed + 5)
         output, state = cell(inputs, zero_state)
         if state_is_tuple:
           state = array_ops.concat_v2(
               [state[0][0], state[0][1], state[1], state[2]], 1)
         sess.run(variables.global_variables_initializer())
         self.assertAllClose(sess.run(output), expected_output)
         self.assertAllClose(sess.run(state), expected_state)
开发者ID:kadeng,项目名称:tensorflow,代码行数:58,代码来源:rnn_cell_test.py

示例7: eye

def eye(
    num_rows,
    num_columns=None,
    batch_shape=None,
    dtype=dtypes.float32,
    name=None):
  """Construct an identity matrix, or a batch of matrices.

  ```python
  # Construct one identity matrix.
  tf.eye(2)
  ==> [[1., 0.],
       [0., 1.]]

  # Construct a batch of 3 identity matricies, each 2 x 2.
  # batch_identity[i, :, :] is a 2 x 2 identity matrix, i = 0, 1, 2.
  batch_identity = tf.eye(2, batch_shape=[3])

  # Construct one 2 x 3 "identity" matrix
  tf.eye(2, num_columns=3)
  ==> [[ 1.,  0.,  0.],
       [ 0.,  1.,  0.]]
  ```

  Args:
    num_rows: Non-negative `int32` scalar `Tensor` giving the number of rows
      in each batch matrix.
    num_columns: Optional non-negative `int32` scalar `Tensor` giving the number
      of columns in each batch matrix.  Defaults to `num_rows`.
    batch_shape:  `int32` `Tensor`.  If provided, returned `Tensor` will have
      leading batch dimensions of this shape.
    dtype:  The type of an element in the resulting `Tensor`
    name:  A name for this `Op`.  Defaults to "eye".

  Returns:
    A `Tensor` of shape `batch_shape + [num_rows, num_columns]`
  """
  with ops.name_scope(
      name, default_name="eye", values=[num_rows, num_columns, batch_shape]):

    batch_shape = [] if batch_shape is None else batch_shape
    batch_shape = ops.convert_to_tensor(
        batch_shape, name="shape", dtype=dtypes.int32)

    if num_columns is None:
      diag_size = num_rows
    else:
      diag_size = math_ops.minimum(num_rows, num_columns)
    diag_shape = array_ops.concat_v2((batch_shape, [diag_size]), 0)
    diag_ones = array_ops.ones(diag_shape, dtype=dtype)

    if num_columns is None:
      return array_ops.matrix_diag(diag_ones)
    else:
      shape = array_ops.concat_v2((batch_shape, [num_rows, num_columns]), 0)
      zero_matrix = array_ops.zeros(shape, dtype=dtype)
      return array_ops.matrix_set_diag(zero_matrix, diag_ones)
开发者ID:curtiszimmerman,项目名称:tensorflow,代码行数:57,代码来源:linalg_ops.py

示例8: boston_eval_fn

def boston_eval_fn():
  boston = base.load_boston()
  n_examples = len(boston.target)
  features = array_ops.reshape(
      constant_op.constant(boston.data), [n_examples, _BOSTON_INPUT_DIM])
  labels = array_ops.reshape(
      constant_op.constant(boston.target), [n_examples, 1])
  return array_ops.concat_v2([features, features], 0), array_ops.concat_v2(
      [labels, labels], 0)
开发者ID:kadeng,项目名称:tensorflow,代码行数:9,代码来源:estimator_test.py

示例9: testOpsBetweenCut

 def testOpsBetweenCut(self):
   with ops.Graph().as_default() as g:
     t1 = constant(1.0)
     t2 = constant(2.0)
     t3 = array_ops.stack([t1, t2])
     t4 = constant([1.0])
     t5 = array_ops.concat_v2([t4, t3], 0)
     t6 = constant([2.0])
     t7 = array_ops.concat_v2([t5, t6], 0)
   self._assertOpListEqual([t7.op, t5.op, t4.op],
                           _OpsBetween(g, [t7.op], [t4.op]))
开发者ID:moolighty,项目名称:tensorflow,代码行数:11,代码来源:gradients_test.py

示例10: testConcat

  def testConcat(self):
    tf_val = array_ops.concat_v2(
        [[16, 37], array_ops.placeholder(
            dtypes.int32, shape=(2,))], 0)
    c_val = tensor_util.constant_value_as_shape(tf_val)
    self.assertEqual([16, 37, None, None], c_val.as_list())

    tf_val = array_ops.concat_v2(
        [[16, 37], array_ops.placeholder(
            dtypes.int32, shape=(1,)), [48]], 0)
    c_val = tensor_util.constant_value_as_shape(tf_val)
    self.assertEqual([16, 37, None, 48], c_val.as_list())
开发者ID:kadeng,项目名称:tensorflow,代码行数:12,代码来源:tensor_util_test.py

示例11: refresh_shortlist

 def refresh_shortlist():
   """Update the shortlist with the highest scores in id_to_score."""
   new_scores, new_ids = nn_ops.top_k(self.id_to_score, self.shortlist_size)
   smallest_new_score = math_ops.reduce_min(new_scores)
   new_length = math_ops.reduce_sum(
       math_ops.to_int32(math_ops.greater(new_scores, dtypes.float32.min)))
   u1 = self.sl_ids.assign(
       math_ops.to_int64(array_ops.concat_v2([[new_length], new_ids], 0)))
   u2 = self.sl_scores.assign(
       array_ops.concat_v2([[smallest_new_score], new_scores], 0))
   self.last_ops = [u1, u2]
   return control_flow_ops.group(u1, u2)
开发者ID:kadeng,项目名称:tensorflow,代码行数:12,代码来源:topn.py

示例12: _sample_n

    def _sample_n(self, n, seed):
        batch_shape = self.batch_shape()
        event_shape = self.event_shape()
        batch_ndims = array_ops.shape(batch_shape)[0]

        ndims = batch_ndims + 3  # sample_ndims=1, event_ndims=2
        shape = array_ops.concat_v2(((n,), batch_shape, event_shape), 0)

        # Complexity: O(nbk^2)
        x = random_ops.random_normal(shape=shape, mean=0.0, stddev=1.0, dtype=self.dtype, seed=seed)

        # Complexity: O(nbk)
        # This parametrization is equivalent to Chi2, i.e.,
        # ChiSquared(k) == Gamma(alpha=k/2, beta=1/2)
        g = random_ops.random_gamma(
            shape=(n,),
            alpha=self._multi_gamma_sequence(0.5 * self.df, self.dimension),
            beta=0.5,
            dtype=self.dtype,
            seed=distribution_util.gen_new_seed(seed, "wishart"),
        )

        # Complexity: O(nbk^2)
        x = array_ops.matrix_band_part(x, -1, 0)  # Tri-lower.

        # Complexity: O(nbk)
        x = array_ops.matrix_set_diag(x, math_ops.sqrt(g))

        # Make batch-op ready.
        # Complexity: O(nbk^2)
        perm = array_ops.concat_v2((math_ops.range(1, ndims), (0,)), 0)
        x = array_ops.transpose(x, perm)
        shape = array_ops.concat_v2((batch_shape, (event_shape[0], -1)), 0)
        x = array_ops.reshape(x, shape)

        # Complexity: O(nbM) where M is the complexity of the operator solving a
        # vector system.  E.g., for OperatorPDDiag, each matmul is O(k^2), so
        # this complexity is O(nbk^2). For OperatorPDCholesky, each matmul is
        # O(k^3) so this step has complexity O(nbk^3).
        x = self.scale_operator_pd.sqrt_matmul(x)

        # Undo make batch-op ready.
        # Complexity: O(nbk^2)
        shape = array_ops.concat_v2((batch_shape, event_shape, (n,)), 0)
        x = array_ops.reshape(x, shape)
        perm = array_ops.concat_v2(((ndims - 1,), math_ops.range(0, ndims - 1)), 0)
        x = array_ops.transpose(x, perm)

        if not self.cholesky_input_output_matrices:
            # Complexity: O(nbk^3)
            x = math_ops.matmul(x, x, adjoint_b=True)

        return x
开发者ID:kdavis-mozilla,项目名称:tensorflow,代码行数:53,代码来源:wishart.py

示例13: _flip_matrix_to_vector_dynamic

def _flip_matrix_to_vector_dynamic(mat, batch_shape):
  """Flip matrix to vector with dynamic shapes."""
  mat_rank = array_ops.rank(mat)
  k = array_ops.gather(array_ops.shape(mat), mat_rank - 2)
  final_shape = array_ops.concat_v2((batch_shape, [k]), 0)

  # mat.shape = matrix_batch_shape + [k, M]
  # Permutation corresponding to [M] + matrix_batch_shape + [k]
  perm = array_ops.concat_v2(
      ([mat_rank - 1], math_ops.range(0, mat_rank - 1)), 0)
  mat_with_end_at_beginning = array_ops.transpose(mat, perm=perm)
  vector = array_ops.reshape(mat_with_end_at_beginning, final_shape)
  return vector
开发者ID:curtiszimmerman,项目名称:tensorflow,代码行数:13,代码来源:operator_pd.py

示例14: _propagate

def _propagate(dim_indices, conf, cell, c_prev, m_prev, new_output, new_state,
               first_call):
  """Propagates through all the cells in dim_indices dimensions.
  """
  if len(dim_indices) == 0:
    return

  # Because of the way RNNCells are implemented, we take the last dimension
  # (H_{N-1}) out and feed it as the state of the RNN cell
  # (in `last_dim_output`).
  # The input of the cell (H_0 to H_{N-2}) are concatenated into `cell_inputs`
  if conf.num_dims > 1:
    ls_cell_inputs = [None] * (conf.num_dims - 1)
    for d in conf.dims[:-1]:
      ls_cell_inputs[d.idx] = new_output[d.idx] if new_output[
          d.idx] is not None else m_prev[d.idx]
    cell_inputs = array_ops.concat_v2(ls_cell_inputs, 1)
  else:
    cell_inputs = array_ops.zeros([m_prev[0].get_shape().as_list()[0], 0],
                                  m_prev[0].dtype)

  last_dim_output = new_output[-1] if new_output[-1] is not None else m_prev[-1]

  for i in dim_indices:
    d = conf.dims[i]
    if d.non_recurrent_fn:
      linear_args = array_ops.concat_v2(
          [cell_inputs, last_dim_output],
          1) if conf.num_dims > 1 else last_dim_output
      with vs.variable_scope('non_recurrent' if conf.tied else
                             'non_recurrent/cell_{}'.format(i)):
        if conf.tied and not (first_call and i == dim_indices[0]):
          vs.get_variable_scope().reuse_variables()
        new_output[d.idx] = layers.legacy_fully_connected(
            linear_args,
            num_output_units=conf.num_units,
            activation_fn=d.non_recurrent_fn,
            weight_init=vs.get_variable_scope().initializer or
            layers.initializers.xavier_initializer)
    else:
      if c_prev[i] is not None:
        cell_state = array_ops.concat_v2([c_prev[i], last_dim_output], 1)
      else:
        # for GRU/RNN, the state is just the previous output
        cell_state = last_dim_output

      with vs.variable_scope('recurrent' if conf.tied else
                             'recurrent/cell_{}'.format(i)):
        if conf.tied and not (first_call and i == dim_indices[0]):
          vs.get_variable_scope().reuse_variables()
        new_output[d.idx], new_state[d.idx] = cell(cell_inputs, cell_state)
开发者ID:kadeng,项目名称:tensorflow,代码行数:51,代码来源:grid_rnn_cell.py

示例15: testOpsBetweenCycle

 def testOpsBetweenCycle(self):
   with ops.Graph().as_default() as g:
     t1 = constant(1.0)
     t2 = constant(2.0)
     t3 = array_ops.pack([t1, t2])
     t4 = array_ops.concat_v2([t3, t3, t3], 0)
     t5 = constant([1.0])
     t6 = array_ops.concat_v2([t4, t5], 0)
     t7 = array_ops.concat_v2([t6, t3], 0)
   self._assertOpListEqual([t6.op, t4.op, t3.op],
                           _OpsBetween(g, [t6.op], [t3.op]))
   self._assertOpListEqual([t7.op, t6.op, t5.op, t4.op, t3.op, t1.op],
                           _OpsBetween(g, [t7.op], [t1.op, t5.op]))
   self._assertOpListEqual([t6.op, t5.op, t4.op, t3.op, t2.op],
                           _OpsBetween(g, [t6.op], [t2.op, t5.op]))
开发者ID:BloodD,项目名称:tensorflow,代码行数:15,代码来源:gradients_test.py


注:本文中的tensorflow.python.ops.array_ops.concat_v2函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。