當前位置: 首頁>>代碼示例>>Python>>正文


Python tensorflow.einsum方法代碼示例

本文整理匯總了Python中tensorflow.einsum方法的典型用法代碼示例。如果您正苦於以下問題:Python tensorflow.einsum方法的具體用法?Python tensorflow.einsum怎麽用?Python tensorflow.einsum使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.einsum方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: build_bnn

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import einsum [as 別名]
def build_bnn(x, layer_sizes, n_particles):
    bn = zs.BayesianNet()
    h = tf.tile(x[None, ...], [n_particles, 1, 1])
    for i, (n_in, n_out) in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])):
        w = bn.normal("w" + str(i), tf.zeros([n_out, n_in + 1]), std=1.,
                      group_ndims=2, n_samples=n_particles)
        h = tf.concat([h, tf.ones(tf.shape(h)[:-1])[..., None]], -1)
        h = tf.einsum("imk,ijk->ijm", w, h) / tf.sqrt(
            tf.cast(tf.shape(h)[2], tf.float32))
        if i < len(layer_sizes) - 2:
            h = tf.nn.relu(h)

    y_mean = bn.deterministic("y_mean", tf.squeeze(h, 2))
    y_logstd = tf.get_variable("y_logstd", shape=[],
                               initializer=tf.constant_initializer(0.))
    bn.normal("y", y_mean, logstd=y_logstd)
    return bn 
開發者ID:thu-ml,項目名稱:zhusuan,代碼行數:19,代碼來源:bnn_vi.py

示例2: build_bnn

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import einsum [as 別名]
def build_bnn(x, layer_sizes, logstds, n_particles):
    bn = zs.BayesianNet()
    h = tf.tile(x[None, ...], [n_particles, 1, 1])
    for i, (n_in, n_out) in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])):
        w = bn.normal("w" + str(i), tf.zeros([n_out, n_in + 1]),
                      logstd=logstds[i], group_ndims=2, n_samples=n_particles)
        h = tf.concat([h, tf.ones(tf.shape(h)[:-1])[..., None]], -1)
        h = tf.einsum("imk,ijk->ijm", w, h) / tf.sqrt(
            tf.cast(tf.shape(h)[2], tf.float32))
        if i < len(layer_sizes) - 2:
            h = tf.nn.relu(h)

    y_mean = bn.deterministic("y_mean", tf.squeeze(h, 2))
    y_logstd = -0.95
    bn.normal("y", y_mean, logstd=y_logstd)
    return bn 
開發者ID:thu-ml,項目名稱:zhusuan,代碼行數:18,代碼來源:bnn_sgmcmc.py

示例3: _call

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import einsum [as 別名]
def _call(self, inputs):
        x = inputs  # N, S, VF
        # dropout
        x = tf.nn.dropout(x, 1 - self.dropout)
        # convolve
        supports = list()
        for i in range(len(self.support)):
            pre_sup = tf.einsum('ijk,kl->ijl', x, self.vars['weights_' + str(i)])
            support = tf.einsum('ij,kjl->kil', self.support[i], pre_sup)
            supports.append(support)
        output = tf.add_n(supports)
        # bias
        if self.bias:
            output += self.vars['bias']

        return self.act(output) 
開發者ID:walsvid,項目名稱:Pixel2MeshPlusPlus,代碼行數:18,代碼來源:layers.py

示例4: mixed_mode_dot

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import einsum [as 別名]
def mixed_mode_dot(a, b):
    """
    Computes the equivalent of `tf.einsum('ij,bjk->bik', a, b)`, but
    works for both dense and sparse inputs.
    :param a: Tensor or SparseTensor with rank 2.
    :param b: Tensor or SparseTensor with rank 3.
    :return: Tensor or SparseTensor with rank 3.
    """
    s_0_, s_1_, s_2_ = K.int_shape(b)
    B_T = ops.transpose(b, (1, 2, 0))
    B_T = ops.reshape(B_T, (s_1_, -1))
    output = dot(a, B_T)
    output = ops.reshape(output, (s_1_, s_2_, -1))
    output = ops.transpose(output, (2, 0, 1))

    return output 
開發者ID:danielegrattarola,項目名稱:spektral,代碼行數:18,代碼來源:matmul.py

示例5: _call_dense

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import einsum [as 別名]
def _call_dense(self, X, A):
        shape = tf.shape(A)[:-1]
        A = tf.linalg.set_diag(A, tf.zeros(shape, A.dtype))
        A = tf.linalg.set_diag(A, tf.ones(shape, A.dtype))
        X = tf.einsum("...NI , IHO -> ...NHO", X, self.kernel)
        attn_for_self = tf.einsum("...NHI , IHO -> ...NHO", X, self.attn_kernel_self)
        attn_for_neighs = tf.einsum("...NHI , IHO -> ...NHO", X, self.attn_kernel_neighs)
        attn_for_neighs = tf.einsum("...ABC -> ...CBA", attn_for_neighs)

        attn_coef = attn_for_self + attn_for_neighs
        attn_coef = tf.nn.leaky_relu(attn_coef, alpha=0.2)

        mask = -10e9 * (1.0 - A)
        attn_coef += mask[..., None, :]
        attn_coef = tf.nn.softmax(attn_coef, axis=-1)
        attn_coef_drop = self.dropout(attn_coef)

        output = tf.einsum("...NHM , ...MHI -> ...NHI", attn_coef_drop, X)

        return output, attn_coef 
開發者ID:danielegrattarola,項目名稱:spektral,代碼行數:22,代碼來源:graph_attention.py

示例6: post_attention

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import einsum [as 別名]
def post_attention(h, attn_vec, d_model, n_head, d_head, dropout, is_training,
                   kernel_initializer, residual=True):
  """Post-attention processing."""
  # post-attention projection (back to `d_model`)
  proj_o = tf.get_variable('o/kernel', [d_model, n_head, d_head],
                           dtype=h.dtype, initializer=kernel_initializer)
  attn_out = tf.einsum('ibnd,hnd->ibh', attn_vec, proj_o)

  attn_out = tf.layers.dropout(attn_out, dropout, training=is_training)
  if residual:
    output = tf.contrib.layers.layer_norm(attn_out + h, begin_norm_axis=-1,
                                          scope='LayerNorm')
  else:
    output = tf.contrib.layers.layer_norm(attn_out, begin_norm_axis=-1,
                                          scope='LayerNorm')

  return output 
開發者ID:rusiaaman,項目名稱:XLnet-gen,代碼行數:19,代碼來源:modeling.py

示例7: bert_layer_aggerate

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import einsum [as 別名]
def bert_layer_aggerate(encoding_lst, max_len, scope, reuse):
	with tf.variable_scope(scope, reuse=reuse):
		valid_tensor = tf.stack(encoding_lst, axis=1) # batch x num_layer x seq x dim
		attn = tf.get_variable(scope+"/layer_attention",
										dtype=tf.float32,
										shape=[len(encoding_lst),],
										initializer=tf.initializers.random_uniform(0,1))

		prob = tf.exp(tf.nn.log_softmax(attn))

		layer_repres = tf.einsum("abcd,b->acd", valid_tensor, prob)
		# layer_repres = encoding_lst[-1]
		# since input_target_a means b->a 
		# and input_target_b means a->b
		
		layer_repres = layer_repres[:,0:max_len,:]
		
		# print(" bert layer output shape w{}".format(layer_repres.get_shape()))
		return layer_repres 
開發者ID:yyht,項目名稱:BERT,代碼行數:21,代碼來源:bert_esim.py

示例8: bert_layer_aggerate

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import einsum [as 別名]
def bert_layer_aggerate(encoding_lst, 
						scope, reuse):
	with tf.variable_scope(scope, reuse=reuse):
		valid_tensor = tf.stack(encoding_lst, axis=1) # batch x num_layer x seq x dim
		attn = tf.get_variable(scope+"/layer_attention",
										dtype=tf.float32,
										shape=[len(encoding_lst),],
										initializer=tf.initializers.random_uniform(-0.01,0.01))

		prob = tf.exp(tf.nn.log_softmax(attn))

		layer_repres = tf.einsum("abcd,b->acd", valid_tensor, prob)
		# since input_target_a means b->a 
		# and input_target_b means a->b
		
		# print(" bert layer output shape w{}".format(layer_repres.get_shape()))
		return layer_repres 
開發者ID:yyht,項目名稱:BERT,代碼行數:19,代碼來源:bert_esim_v1.py

示例9: call

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import einsum [as 別名]
def call(self, inputs):
    if self.coeffs_mean is None and self.coeffs_precision_tril_op is None:
      # p(mean(ynew) | xnew) = Normal(ynew | mean = 0, variance = xnew xnew^T)
      predictive_mean = 0.
      predictive_variance = tf.reduce_sum(tf.square(inputs), -1)
    else:
      # p(mean(ynew) | xnew, x, y) = Normal(ynew |
      #   mean = xnew (1/noise_variance) (1/noise_variance x^T x + I)^{-1}x^T y,
      #   variance = xnew (1/noise_variance x^T x + I)^{-1} xnew^T)
      predictive_mean = tf.einsum('nm,m->n', inputs, self.coeffs_mean)
      predictive_covariance = tf.matmul(
          inputs,
          self.coeffs_precision_tril_op.solve(
              self.coeffs_precision_tril_op.solve(inputs, adjoint_arg=True),
              adjoint=True))
      predictive_variance = tf.diag_part(predictive_covariance)
    return ed.Normal(loc=predictive_mean, scale=tf.sqrt(predictive_variance)) 
開發者ID:yyht,項目名稱:BERT,代碼行數:19,代碼來源:gaussian_process.py

示例10: fit

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import einsum [as 別名]
def fit(self, x=None, y=None):
    # p(coeffs | x, y) = Normal(coeffs |
    #   mean = (1/noise_variance) (1/noise_variance x^T x + I)^{-1} x^T y,
    #   covariance = (1/noise_variance x^T x + I)^{-1})
    # TODO(trandustin): We newly fit the data at each call. Extend to do
    # Bayesian updating.
    kernel_matrix = tf.matmul(x, x, transpose_a=True) / self.noise_variance
    coeffs_precision = tf.matrix_set_diag(
        kernel_matrix, tf.matrix_diag_part(kernel_matrix) + 1.)
    coeffs_precision_tril = tf.linalg.cholesky(coeffs_precision)
    self.coeffs_precision_tril_op = tf.linalg.LinearOperatorLowerTriangular(
        coeffs_precision_tril)
    self.coeffs_mean = self.coeffs_precision_tril_op.solvevec(
        self.coeffs_precision_tril_op.solvevec(tf.einsum('nm,n->m', x, y)),
        adjoint=True) / self.noise_variance
    # TODO(trandustin): To be fully Keras-compatible, return History object.
    return 
開發者ID:yyht,項目名稱:BERT,代碼行數:19,代碼來源:gaussian_process.py

示例11: laplace_attention

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import einsum [as 別名]
def laplace_attention(q, k, v, scale, normalise):
  """Computes laplace exponential attention.

  Args:
    q: queries. Tensor of shape [batch_size, m, d_k].
    k: keys. Tensor of shape [batch_size, n, d_k].
    v: values. Tensor of shape [batch_size, n, d_v].
    scale: float that scales the L1 distance.
    normalise: Boolean that determines whether weights sum to 1.

  Returns:
    Tensor of shape [batch_size, m, d_v].
  """
  k = tf.expand_dims(k, axis=1)  # [batch_size, 1, n, d_k]
  q = tf.expand_dims(q, axis=2)  # [batch_size, m, 1, d_k]
  unnorm_weights = - tf.abs((k - q) / scale)  # [batch_size, m, n, d_k]
  unnorm_weights = tf.reduce_sum(unnorm_weights, axis=-1)  # [batch_size, m, n]
  if normalise:
    weight_fn = tf.nn.softmax
  else:
    weight_fn = lambda x: 1 + tf.tanh(x)
  weights = weight_fn(unnorm_weights)  # [batch_size, m, n]
  rep = tf.einsum('bik,bkj->bij', weights, v)  # [batch_size, m, d_v]
  return rep 
開發者ID:yyht,項目名稱:BERT,代碼行數:26,代碼來源:gaussian_process.py

示例12: calculate_loss

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import einsum [as 別名]
def calculate_loss(self, predictions, labels, weights=None, **unused_params):
    with tf.name_scope("loss_xent"):
      epsilon = 10e-6
      if FLAGS.label_smoothing:
        float_labels = smoothing(labels)
      else:
        float_labels = tf.cast(labels, tf.float32)
      cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
          1 - float_labels) * tf.log(1 - predictions + epsilon)
      cross_entropy_loss = tf.negative(cross_entropy_loss)
      if weights is not None:
        print cross_entropy_loss, weights
        weighted_loss = tf.einsum("ij,i->ij", cross_entropy_loss, weights)
        print "create weighted_loss", weighted_loss
        return tf.reduce_mean(tf.reduce_sum(weighted_loss, 1))
      else:
        return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1)) 
開發者ID:wangheda,項目名稱:youtube-8m,代碼行數:19,代碼來源:losses.py

示例13: create_model

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import einsum [as 別名]
def create_model(self, model_input, vocab_size, l2_penalty=1e-8, original_input=None, **unused_params):
    """Creates a linear regression model.

    Args:
      model_input: 'batch' x 'num_features' x 'num_methods' matrix of input features.
      vocab_size: The number of classes in the dataset.

    Returns:
      A dictionary with a tensor containing the probability predictions of the
      model in the 'predictions' key. The dimensions of the tensor are
      batch_size x num_classes."""
    num_methods = model_input.get_shape().as_list()[-1]
    weight = tf.get_variable("ensemble_weight", 
        shape=[num_methods],
        regularizer=slim.l2_regularizer(l2_penalty))
    weight = tf.nn.softmax(weight)
    output = tf.einsum("ijk,k->ij", model_input, weight)
    return {"predictions": output} 
開發者ID:wangheda,項目名稱:youtube-8m,代碼行數:20,代碼來源:linear_regression_model.py

示例14: create_model

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import einsum [as 別名]
def create_model(self,
                   model_input,
                   vocab_size,
                   num_mixtures=None,
                   l2_penalty=1e-8,
                   sub_scope="",
                   original_input=None, 
                   **unused_params):

    num_methods = model_input.get_shape().as_list()[-1]
    num_features = model_input.get_shape().as_list()[-2]

    original_input = tf.nn.l2_normalize(original_input, dim=1)
    gate_activations = slim.fully_connected(
        original_input,
        num_methods,
        activation_fn=tf.nn.softmax,
        weights_regularizer=slim.l2_regularizer(l2_penalty),
        scope="gates"+sub_scope)

    output = tf.einsum("ijk,ik->ij", model_input, gate_activations)
    return {"predictions": output} 
開發者ID:wangheda,項目名稱:youtube-8m,代碼行數:24,代碼來源:input_moe_model.py

示例15: lstm

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import einsum [as 別名]
def lstm(self, prev_y, prev_h, prev_c, z):
		hs = self.hidden_size

		preact = tf.einsum('ijk,ka->ija', prev_h, self.h2h_W) + \
				 tf.einsum('ijk,ka->ija', prev_y, self.i2h_W) + \
				 tf.matmul(z, self.z2h_W) + \
				 self.b # preactivation
		# [1, batch_size, hidden_size * 4]
		i = tf.sigmoid(preact[:, :, 0*hs: 1*hs])
		f = tf.sigmoid(preact[:, :, 1*hs: 2*hs])
		o = tf.sigmoid(preact[:, :, 2*hs: 3*hs])
		c = tf.tanh(preact[:, :, 3*hs: 4*hs])
		c = f * prev_c + i * c # [1, batch_size, hidden_size] (element-wise multiply)
		h = o * tf.tanh(c) # [1, batch_size, hidden_size]
		y = tf.einsum('ijk,ka->ija', h, self.Vhid) + self.bhid # [1, batch_size, vocab_size]

		# Author doesn't mention this part in his paper, but it appers in his code
		# So I assume this is part of his soft-max approx. strategy ---|
		max_y = tf.reduce_max(y, axis=1, keep_dims=True) # [1, 1, vocab_size]
		e = tf.exp((y - max_y) * self.L)  # [1, batch_size, vocab_size]
		w = e / tf.reduce_sum(e, axis=1, keep_dims=True) # [1, batch_size, vocab_size]
		# Assumption ends here ----------------------------------------|

		y = tf.einsum('ijk,ka->ija', w, self.Wemb) # [1, batch_size, input_dim]
		
		return y, h, c 
開發者ID:Jeff-HOU,項目名稱:UROP-Adversarial-Feature-Matching-for-Text-Generation,代碼行數:28,代碼來源:generator.py


注:本文中的tensorflow.einsum方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。