當前位置: 首頁>>代碼示例>>Python>>正文


Python v1.einsum方法代碼示例

本文整理匯總了Python中tensorflow.compat.v1.einsum方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.einsum方法的具體用法?Python v1.einsum怎麽用?Python v1.einsum使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.einsum方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: two_class_log_likelihood

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import einsum [as 別名]
def two_class_log_likelihood(predictions, labels, weights_fn=None):
  """Log-likelihood for two class classification with 0/1 labels.

  Args:
    predictions: A float valued tensor of shape [`batch_size`].  Each
      component should be between 0 and 1.
    labels: An int valued tensor of shape [`batch_size`].  Each component
      should either be 0 or 1.
    weights_fn: unused.

  Returns:
    A pair, with the average log likelihood in the first component.
  """
  del weights_fn
  float_predictions = tf.cast(tf.squeeze(predictions), dtype=tf.float64)
  batch_probs = tf.stack([1. - float_predictions, float_predictions], axis=-1)
  int_labels = tf.cast(tf.squeeze(labels), dtype=tf.int32)
  onehot_targets = tf.cast(tf.one_hot(int_labels, 2), dtype=tf.float64)
  chosen_probs = tf.einsum(
      "ij,ij->i", batch_probs, onehot_targets, name="chosen_probs")
  avg_log_likelihood = tf.reduce_mean(tf.log(chosen_probs))
  return avg_log_likelihood, tf.constant(1.0) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:24,代碼來源:metrics.py

示例2: test_einsum_via_matmul

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import einsum [as 別名]
def test_einsum_via_matmul(self):
    batch_size = 8
    seq_length = 12
    num_attention_heads = 3
    head_size = 6
    hidden_size = 10

    input_tensor = np.random.uniform(0, 1,
                                     [batch_size, seq_length, hidden_size])
    input_tensor = tf.constant(input_tensor, dtype=tf.float32)
    w = np.random.uniform(0, 1, [hidden_size, num_attention_heads, head_size])
    w = tf.constant(w, dtype=tf.float32)
    ret1 = tf.einsum("BFH,HND->BFND", input_tensor, w)
    ret2 = modeling.einsum_via_matmul(input_tensor, w, 1)
    self.assertAllClose(ret1, ret2)

    input_tensor = np.random.uniform(0, 1,
                                     [batch_size, seq_length,
                                      num_attention_heads, head_size])
    input_tensor = tf.constant(input_tensor, dtype=tf.float32)
    w = np.random.uniform(0, 1, [num_attention_heads, head_size, hidden_size])
    w = tf.constant(w, dtype=tf.float32)
    ret1 = tf.einsum("BFND,NDH->BFH", input_tensor, w)
    ret2 = modeling.einsum_via_matmul(input_tensor, w, 2)
    self.assertAllClose(ret1, ret2) 
開發者ID:google-research,項目名稱:albert,代碼行數:27,代碼來源:modeling_test.py

示例3: concretize

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import einsum [as 別名]
def concretize(self):
    """Returns lower and upper interval bounds."""
    lb = ub = None
    if self.lower is not None:
      lb = (
          tf.einsum('nsi,ni->ns',
                    self._reshape_to_rank(tf.maximum(self.lower.w, 0), 3),
                    self._reshape_to_rank(self.lower.lower, 2)) +
          tf.einsum('nsi,ni->ns',
                    self._reshape_to_rank(tf.minimum(self.lower.w, 0), 3),
                    self._reshape_to_rank(self.lower.upper, 2)))
      lb += self.lower.b
    if self.upper is not None:
      ub = (
          tf.einsum('nsi,ni->ns',
                    self._reshape_to_rank(tf.maximum(self.upper.w, 0), 3),
                    self._reshape_to_rank(self.upper.upper, 2)) +
          tf.einsum('nsi,ni->ns',
                    self._reshape_to_rank(tf.minimum(self.upper.w, 0), 3),
                    self._reshape_to_rank(self.upper.lower, 2)))
      ub += self.upper.b
    return bounds.IntervalBounds(lb, ub) 
開發者ID:deepmind,項目名稱:interval-bound-propagation,代碼行數:24,代碼來源:crown.py

示例4: _concretize_bounds

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import einsum [as 別名]
def _concretize_bounds(lower, upper):
    """Returns lower and upper interval bounds."""
    if len(lower.b.shape) == 2:
      equation = 'ijk,ij->ik'
    elif len(lower.b.shape) == 3:
      equation = 'ijnc,ij->inc'
    elif len(lower.b.shape) == 4:
      equation = 'ijhwc,ij->ihwc'
    else:
      raise NotImplementedError('Shape unsupported: {}'.format(lower.b.shape))

    lb = (tf.einsum(equation, tf.maximum(lower.w, 0), lower.lower) +
          tf.einsum(equation, tf.minimum(lower.w, 0), lower.upper) +
          lower.b)
    ub = (tf.einsum(equation, tf.maximum(upper.w, 0), upper.upper) +
          tf.einsum(equation, tf.minimum(upper.w, 0), upper.lower) +
          upper.b)
    return lb, ub 
開發者ID:deepmind,項目名稱:interval-bound-propagation,代碼行數:20,代碼來源:fastlin.py

示例5: call

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import einsum [as 別名]
def call(self, inputs):
    inputs = tf.convert_to_tensor(inputs)
    rank = tf.rank(inputs)
    if rank > 2:
      outputs = tf.einsum("aki,aij->akj", inputs, self.kernel)

      # Reshape the output back to the original ndim of the input.
      if not tf.executing_eagerly():
        shape = inputs.get_shape().as_list()
        output_shape = shape[:-1] + [self.units]
        outputs.set_shape(output_shape)
    else:
      assert False
      # outputs = tf.mat_mul(inputs, self.kernel)
    if self.use_bias:
      outputs = tf.nn.bias_add(outputs, self.bias)
    if self.activation is not None:
      return self.activation(outputs)  # pylint: disable=not-callable
    return outputs 
開發者ID:google-research,項目名稱:language,代碼行數:21,代碼來源:linear.py

示例6: einsum_via_matmul

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import einsum [as 別名]
def einsum_via_matmul(input_tensor, w, num_inner_dims):
  """Implements einsum via matmul and reshape ops.

  Args:
    input_tensor: float Tensor of shape [<batch_dims>, <inner_dims>].
    w: float Tensor of shape [<inner_dims>, <outer_dims>].
    num_inner_dims: int. number of dimensions to use for inner products.

  Returns:
    float Tensor of shape [<batch_dims>, <outer_dims>].
  """
  input_shape = get_shape_list(input_tensor)
  w_shape = get_shape_list(w)
  batch_dims = input_shape[: -num_inner_dims]
  inner_dims = input_shape[-num_inner_dims:]
  outer_dims = w_shape[num_inner_dims:]
  inner_dim = np.prod(inner_dims)
  outer_dim = np.prod(outer_dims)
  if num_inner_dims > 1:
    input_tensor = tf.reshape(input_tensor, batch_dims + [inner_dim])
  if len(w_shape) > 2:
    w = tf.reshape(w, [inner_dim, outer_dim])
  ret = tf.matmul(input_tensor, w)
  if len(outer_dims) > 1:
    ret = tf.reshape(ret, batch_dims + outer_dims)
  return ret 
開發者ID:google-research,項目名稱:albert,代碼行數:28,代碼來源:modeling.py

示例7: dense_layer_3d_proj

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import einsum [as 別名]
def dense_layer_3d_proj(input_tensor,
                        hidden_size,
                        head_size,
                        initializer,
                        activation,
                        use_einsum,
                        name=None):
  """A dense layer with 3D kernel for projection.

  Args:
    input_tensor: float Tensor of shape [batch,from_seq_length,
      num_attention_heads, size_per_head].
    hidden_size: The size of hidden layer.
    head_size: The size of head.
    initializer: Kernel initializer.
    activation: Actication function.
    use_einsum: bool. Whether to use einsum or reshape+matmul for dense layers.
    name: The name scope of this layer.

  Returns:
    float logits Tensor.
  """
  input_shape = get_shape_list(input_tensor)
  num_attention_heads = input_shape[2]
  with tf.variable_scope(name):
    w = tf.get_variable(
        name="kernel",
        shape=[num_attention_heads * head_size, hidden_size],
        initializer=initializer)
    w = tf.reshape(w, [num_attention_heads, head_size, hidden_size])
    b = tf.get_variable(
        name="bias", shape=[hidden_size], initializer=tf.zeros_initializer)
    if use_einsum:
      ret = tf.einsum("BFND,NDH->BFH", input_tensor, w)
    else:
      ret = einsum_via_matmul(input_tensor, w, 2)
    ret += b
  if activation is not None:
    return activation(ret)
  else:
    return ret 
開發者ID:google-research,項目名稱:albert,代碼行數:43,代碼來源:modeling.py

示例8: dense_layer_2d

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import einsum [as 別名]
def dense_layer_2d(input_tensor,
                   output_size,
                   initializer,
                   activation,
                   use_einsum,
                   num_attention_heads=1,
                   name=None):
  """A dense layer with 2D kernel.

  Args:
    input_tensor: Float tensor with rank 3.
    output_size: The size of output dimension.
    initializer: Kernel initializer.
    activation: Activation function.
    use_einsum: bool. Whether to use einsum or reshape+matmul for dense layers.
    num_attention_heads: number of attention head in attention layer.
    name: The name scope of this layer.

  Returns:
    float logits Tensor.
  """
  del num_attention_heads  # unused
  input_shape = get_shape_list(input_tensor)
  hidden_size = input_shape[2]
  with tf.variable_scope(name):
    w = tf.get_variable(
        name="kernel",
        shape=[hidden_size, output_size],
        initializer=initializer)
    b = tf.get_variable(
        name="bias", shape=[output_size], initializer=tf.zeros_initializer)
    if use_einsum:
      ret = tf.einsum("BFH,HO->BFO", input_tensor, w)
    else:
      ret = tf.matmul(input_tensor, w)
    ret += b
  if activation is not None:
    return activation(ret)
  else:
    return ret 
開發者ID:google-research,項目名稱:albert,代碼行數:42,代碼來源:modeling.py

示例9: _build

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import einsum [as 別名]
def _build(self, modules):
    """Outputs specification value."""
    # inputs have shape [batch_size, num_outputs].
    if not (self.collapse and
            isinstance(modules[-1], verifiable_wrapper.LinearFCWrapper)):
      logging.info('Elision of last layer disabled.')
      bounds = modules[-1].output_bounds
      w = self._c
      b = self._d
    else:
      logging.info('Elision of last layer active.')
      # Collapse the last layer.
      bounds = modules[-1].input_bounds
      w = modules[-1].module.w
      b = modules[-1].module.b
      w = tf.einsum('ijk,lk->ijl', self._c, w)
      b = tf.einsum('ijk,k->ij', self._c, b)
      if self._d is not None:
        b += self._d

    # Maximize z * w + b s.t. lower <= z <= upper.
    bounds = bounds_lib.IntervalBounds.convert(bounds)
    c = (bounds.lower + bounds.upper) / 2.
    r = (bounds.upper - bounds.lower) / 2.
    c = tf.einsum('ij,ikj->ik', c, w)
    if b is not None:
      c += b
    r = tf.einsum('ij,ikj->ik', r, tf.abs(w))

    # output has shape [batch_size, num_specifications].
    return c + r 
開發者ID:deepmind,項目名稱:interval-bound-propagation,代碼行數:33,代碼來源:specification.py

示例10: evaluate

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import einsum [as 別名]
def evaluate(self, logits):
    if len(logits.shape) == 2:
      output = tf.einsum('ij,ikj->ik', logits, self._c)
    elif len(logits.shape) == 3:
      output = tf.einsum('rij,ikj->rik', logits, self._c)
    else:
      assert len(logits.shape) == 4
      output = tf.einsum('rsbo,bso->rbs', logits, self._c)
    if self._d is not None:
      output += self._d
    return output 
開發者ID:deepmind,項目名稱:interval-bound-propagation,代碼行數:13,代碼來源:specification.py

示例11: apply_linear

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import einsum [as 別名]
def apply_linear(self, wrapper, w, b):
    """Propagate CROWN bounds backward through a linear layer."""
    def _linear_propagate(bound):
      """Propagate one side of the bound."""
      new_bound_w = tf.einsum('nsk,lk->nsl', bound.w, w)
      if b is not None:
        bias = tf.tensordot(bound.w, b, axes=1)
      return fastlin.LinearExpression(w=new_bound_w, b=bias + bound.b,
                                      lower=wrapper.input_bounds.lower,
                                      upper=wrapper.input_bounds.upper)
    ub_expr = _linear_propagate(self.upper) if self.upper else None
    lb_expr = _linear_propagate(self.lower) if self.lower else None
    return BackwardBounds(lb_expr, ub_expr) 
開發者ID:deepmind,項目名稱:interval-bound-propagation,代碼行數:15,代碼來源:crown.py

示例12: einsum

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import einsum [as 別名]
def einsum(self, equation, *slices):
    """Override this for custom einsum implementation.

    Args:
      equation: a string
      *slices: a list of tf.Tensor
    Returns:
      a tf.Tensor
    """
    return tf.einsum(equation, *slices) 
開發者ID:tensorflow,項目名稱:mesh,代碼行數:12,代碼來源:simd_mesh_impl.py

示例13: create_model

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import einsum [as 別名]
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
                 image_vector, use_one_hot_embeddings, scope):
  """Creates a model."""
  model = modeling.BertModel(
      config=bert_config,
      is_training=is_training,
      input_ids=input_ids,
      input_mask=input_mask,
      token_type_ids=segment_ids,
      use_one_hot_embeddings=use_one_hot_embeddings,
      scope=scope)

  if FLAGS.ignore_image:
    logit = tf.layers.dense(
        model.get_pooled_output(), 1, activation=tf.tanh,
        kernel_initializer=
        modeling.create_initializer(bert_config.initializer_range))
    logit = tf.squeeze(logit, axis=1)
  else:
    logit = tf.einsum("ij,ij->i", tf.layers.dense(
        image_vector,
        bert_config.hidden_size,
        activation=tf.tanh,
        kernel_initializer=
        modeling.create_initializer(bert_config.initializer_range)),
                      model.get_pooled_output(),
                      name="inner")

  return tf.stack([-logit, logit], axis=1) 
開發者ID:google-research,項目名稱:language,代碼行數:31,代碼來源:run_dual_encoder.py

示例14: dense_layer_3d

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import einsum [as 別名]
def dense_layer_3d(input_tensor,
                   num_attention_heads,
                   size_per_head,
                   initializer,
                   activation,
                   name=None):
  """A dense layer with 3D kernel.

  Args:
    input_tensor: float Tensor of shape [batch, seq_length, hidden_size].
    num_attention_heads: Number of attention heads.
    size_per_head: The size per attention head.
    initializer: Kernel initializer.
    activation: Actication function.
    name: The name scope of this layer.

  Returns:
    float logits Tensor.
  """

  last_dim = get_shape_list(input_tensor)[-1]

  with tf.variable_scope(name):
    w = tf.get_variable(
        name="kernel",
        shape=[last_dim, num_attention_heads * size_per_head],
        initializer=initializer)
    w = tf.reshape(w, [last_dim, num_attention_heads, size_per_head])
    b = tf.get_variable(
        name="bias",
        shape=[num_attention_heads * size_per_head],
        initializer=tf.zeros_initializer)
    b = tf.reshape(b, [num_attention_heads, size_per_head])
    ret = tf.einsum("abc,cde->abde", input_tensor, w)
    ret += b
    if activation is not None:
      return activation(ret)
    else:
      return ret 
開發者ID:mlperf,項目名稱:training,代碼行數:41,代碼來源:modeling.py

示例15: dense_layer_3d_proj

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import einsum [as 別名]
def dense_layer_3d_proj(input_tensor,
                        hidden_size,
                        num_attention_heads,
                        head_size,
                        initializer,
                        activation,
                        name=None):
  """A dense layer with 3D kernel for projection.

  Args:
    input_tensor: float Tensor of shape [batch,from_seq_length,
      num_attention_heads, size_per_head].
    hidden_size: The size of hidden layer.
    num_attention_heads: The size of output dimension.
    head_size: The size of head.
    initializer: Kernel initializer.
    activation: Actication function.
    name: The name scope of this layer.

  Returns:
    float logits Tensor.
  """
  head_size = hidden_size // num_attention_heads
  with tf.variable_scope(name):
    w = tf.get_variable(
        name="kernel",
        shape=[hidden_size, hidden_size],
        initializer=initializer)
    w = tf.reshape(w, [num_attention_heads, head_size, hidden_size])
    b = tf.get_variable(
        name="bias", shape=[hidden_size], initializer=tf.zeros_initializer)

  ret = tf.einsum("BFNH,NHD->BFD", input_tensor, w)
  ret += b
  if activation is not None:
    return activation(ret)
  else:
    return ret 
開發者ID:mlperf,項目名稱:training,代碼行數:40,代碼來源:modeling.py


注:本文中的tensorflow.compat.v1.einsum方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。