当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.einsum方法代码示例

本文整理汇总了Python中tensorflow.einsum方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.einsum方法的具体用法?Python tensorflow.einsum怎么用?Python tensorflow.einsum使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.einsum方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: build_bnn

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import einsum [as 别名]
def build_bnn(x, layer_sizes, n_particles):
    bn = zs.BayesianNet()
    h = tf.tile(x[None, ...], [n_particles, 1, 1])
    for i, (n_in, n_out) in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])):
        w = bn.normal("w" + str(i), tf.zeros([n_out, n_in + 1]), std=1.,
                      group_ndims=2, n_samples=n_particles)
        h = tf.concat([h, tf.ones(tf.shape(h)[:-1])[..., None]], -1)
        h = tf.einsum("imk,ijk->ijm", w, h) / tf.sqrt(
            tf.cast(tf.shape(h)[2], tf.float32))
        if i < len(layer_sizes) - 2:
            h = tf.nn.relu(h)

    y_mean = bn.deterministic("y_mean", tf.squeeze(h, 2))
    y_logstd = tf.get_variable("y_logstd", shape=[],
                               initializer=tf.constant_initializer(0.))
    bn.normal("y", y_mean, logstd=y_logstd)
    return bn 
开发者ID:thu-ml,项目名称:zhusuan,代码行数:19,代码来源:bnn_vi.py

示例2: build_bnn

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import einsum [as 别名]
def build_bnn(x, layer_sizes, logstds, n_particles):
    bn = zs.BayesianNet()
    h = tf.tile(x[None, ...], [n_particles, 1, 1])
    for i, (n_in, n_out) in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])):
        w = bn.normal("w" + str(i), tf.zeros([n_out, n_in + 1]),
                      logstd=logstds[i], group_ndims=2, n_samples=n_particles)
        h = tf.concat([h, tf.ones(tf.shape(h)[:-1])[..., None]], -1)
        h = tf.einsum("imk,ijk->ijm", w, h) / tf.sqrt(
            tf.cast(tf.shape(h)[2], tf.float32))
        if i < len(layer_sizes) - 2:
            h = tf.nn.relu(h)

    y_mean = bn.deterministic("y_mean", tf.squeeze(h, 2))
    y_logstd = -0.95
    bn.normal("y", y_mean, logstd=y_logstd)
    return bn 
开发者ID:thu-ml,项目名称:zhusuan,代码行数:18,代码来源:bnn_sgmcmc.py

示例3: _call

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import einsum [as 别名]
def _call(self, inputs):
        x = inputs  # N, S, VF
        # dropout
        x = tf.nn.dropout(x, 1 - self.dropout)
        # convolve
        supports = list()
        for i in range(len(self.support)):
            pre_sup = tf.einsum('ijk,kl->ijl', x, self.vars['weights_' + str(i)])
            support = tf.einsum('ij,kjl->kil', self.support[i], pre_sup)
            supports.append(support)
        output = tf.add_n(supports)
        # bias
        if self.bias:
            output += self.vars['bias']

        return self.act(output) 
开发者ID:walsvid,项目名称:Pixel2MeshPlusPlus,代码行数:18,代码来源:layers.py

示例4: mixed_mode_dot

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import einsum [as 别名]
def mixed_mode_dot(a, b):
    """
    Computes the equivalent of `tf.einsum('ij,bjk->bik', a, b)`, but
    works for both dense and sparse inputs.
    :param a: Tensor or SparseTensor with rank 2.
    :param b: Tensor or SparseTensor with rank 3.
    :return: Tensor or SparseTensor with rank 3.
    """
    s_0_, s_1_, s_2_ = K.int_shape(b)
    B_T = ops.transpose(b, (1, 2, 0))
    B_T = ops.reshape(B_T, (s_1_, -1))
    output = dot(a, B_T)
    output = ops.reshape(output, (s_1_, s_2_, -1))
    output = ops.transpose(output, (2, 0, 1))

    return output 
开发者ID:danielegrattarola,项目名称:spektral,代码行数:18,代码来源:matmul.py

示例5: _call_dense

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import einsum [as 别名]
def _call_dense(self, X, A):
        shape = tf.shape(A)[:-1]
        A = tf.linalg.set_diag(A, tf.zeros(shape, A.dtype))
        A = tf.linalg.set_diag(A, tf.ones(shape, A.dtype))
        X = tf.einsum("...NI , IHO -> ...NHO", X, self.kernel)
        attn_for_self = tf.einsum("...NHI , IHO -> ...NHO", X, self.attn_kernel_self)
        attn_for_neighs = tf.einsum("...NHI , IHO -> ...NHO", X, self.attn_kernel_neighs)
        attn_for_neighs = tf.einsum("...ABC -> ...CBA", attn_for_neighs)

        attn_coef = attn_for_self + attn_for_neighs
        attn_coef = tf.nn.leaky_relu(attn_coef, alpha=0.2)

        mask = -10e9 * (1.0 - A)
        attn_coef += mask[..., None, :]
        attn_coef = tf.nn.softmax(attn_coef, axis=-1)
        attn_coef_drop = self.dropout(attn_coef)

        output = tf.einsum("...NHM , ...MHI -> ...NHI", attn_coef_drop, X)

        return output, attn_coef 
开发者ID:danielegrattarola,项目名称:spektral,代码行数:22,代码来源:graph_attention.py

示例6: post_attention

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import einsum [as 别名]
def post_attention(h, attn_vec, d_model, n_head, d_head, dropout, is_training,
                   kernel_initializer, residual=True):
  """Post-attention processing."""
  # post-attention projection (back to `d_model`)
  proj_o = tf.get_variable('o/kernel', [d_model, n_head, d_head],
                           dtype=h.dtype, initializer=kernel_initializer)
  attn_out = tf.einsum('ibnd,hnd->ibh', attn_vec, proj_o)

  attn_out = tf.layers.dropout(attn_out, dropout, training=is_training)
  if residual:
    output = tf.contrib.layers.layer_norm(attn_out + h, begin_norm_axis=-1,
                                          scope='LayerNorm')
  else:
    output = tf.contrib.layers.layer_norm(attn_out, begin_norm_axis=-1,
                                          scope='LayerNorm')

  return output 
开发者ID:rusiaaman,项目名称:XLnet-gen,代码行数:19,代码来源:modeling.py

示例7: bert_layer_aggerate

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import einsum [as 别名]
def bert_layer_aggerate(encoding_lst, max_len, scope, reuse):
	with tf.variable_scope(scope, reuse=reuse):
		valid_tensor = tf.stack(encoding_lst, axis=1) # batch x num_layer x seq x dim
		attn = tf.get_variable(scope+"/layer_attention",
										dtype=tf.float32,
										shape=[len(encoding_lst),],
										initializer=tf.initializers.random_uniform(0,1))

		prob = tf.exp(tf.nn.log_softmax(attn))

		layer_repres = tf.einsum("abcd,b->acd", valid_tensor, prob)
		# layer_repres = encoding_lst[-1]
		# since input_target_a means b->a 
		# and input_target_b means a->b
		
		layer_repres = layer_repres[:,0:max_len,:]
		
		# print(" bert layer output shape w{}".format(layer_repres.get_shape()))
		return layer_repres 
开发者ID:yyht,项目名称:BERT,代码行数:21,代码来源:bert_esim.py

示例8: bert_layer_aggerate

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import einsum [as 别名]
def bert_layer_aggerate(encoding_lst, 
						scope, reuse):
	with tf.variable_scope(scope, reuse=reuse):
		valid_tensor = tf.stack(encoding_lst, axis=1) # batch x num_layer x seq x dim
		attn = tf.get_variable(scope+"/layer_attention",
										dtype=tf.float32,
										shape=[len(encoding_lst),],
										initializer=tf.initializers.random_uniform(-0.01,0.01))

		prob = tf.exp(tf.nn.log_softmax(attn))

		layer_repres = tf.einsum("abcd,b->acd", valid_tensor, prob)
		# since input_target_a means b->a 
		# and input_target_b means a->b
		
		# print(" bert layer output shape w{}".format(layer_repres.get_shape()))
		return layer_repres 
开发者ID:yyht,项目名称:BERT,代码行数:19,代码来源:bert_esim_v1.py

示例9: call

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import einsum [as 别名]
def call(self, inputs):
    if self.coeffs_mean is None and self.coeffs_precision_tril_op is None:
      # p(mean(ynew) | xnew) = Normal(ynew | mean = 0, variance = xnew xnew^T)
      predictive_mean = 0.
      predictive_variance = tf.reduce_sum(tf.square(inputs), -1)
    else:
      # p(mean(ynew) | xnew, x, y) = Normal(ynew |
      #   mean = xnew (1/noise_variance) (1/noise_variance x^T x + I)^{-1}x^T y,
      #   variance = xnew (1/noise_variance x^T x + I)^{-1} xnew^T)
      predictive_mean = tf.einsum('nm,m->n', inputs, self.coeffs_mean)
      predictive_covariance = tf.matmul(
          inputs,
          self.coeffs_precision_tril_op.solve(
              self.coeffs_precision_tril_op.solve(inputs, adjoint_arg=True),
              adjoint=True))
      predictive_variance = tf.diag_part(predictive_covariance)
    return ed.Normal(loc=predictive_mean, scale=tf.sqrt(predictive_variance)) 
开发者ID:yyht,项目名称:BERT,代码行数:19,代码来源:gaussian_process.py

示例10: fit

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import einsum [as 别名]
def fit(self, x=None, y=None):
    # p(coeffs | x, y) = Normal(coeffs |
    #   mean = (1/noise_variance) (1/noise_variance x^T x + I)^{-1} x^T y,
    #   covariance = (1/noise_variance x^T x + I)^{-1})
    # TODO(trandustin): We newly fit the data at each call. Extend to do
    # Bayesian updating.
    kernel_matrix = tf.matmul(x, x, transpose_a=True) / self.noise_variance
    coeffs_precision = tf.matrix_set_diag(
        kernel_matrix, tf.matrix_diag_part(kernel_matrix) + 1.)
    coeffs_precision_tril = tf.linalg.cholesky(coeffs_precision)
    self.coeffs_precision_tril_op = tf.linalg.LinearOperatorLowerTriangular(
        coeffs_precision_tril)
    self.coeffs_mean = self.coeffs_precision_tril_op.solvevec(
        self.coeffs_precision_tril_op.solvevec(tf.einsum('nm,n->m', x, y)),
        adjoint=True) / self.noise_variance
    # TODO(trandustin): To be fully Keras-compatible, return History object.
    return 
开发者ID:yyht,项目名称:BERT,代码行数:19,代码来源:gaussian_process.py

示例11: laplace_attention

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import einsum [as 别名]
def laplace_attention(q, k, v, scale, normalise):
  """Computes laplace exponential attention.

  Args:
    q: queries. Tensor of shape [batch_size, m, d_k].
    k: keys. Tensor of shape [batch_size, n, d_k].
    v: values. Tensor of shape [batch_size, n, d_v].
    scale: float that scales the L1 distance.
    normalise: Boolean that determines whether weights sum to 1.

  Returns:
    Tensor of shape [batch_size, m, d_v].
  """
  k = tf.expand_dims(k, axis=1)  # [batch_size, 1, n, d_k]
  q = tf.expand_dims(q, axis=2)  # [batch_size, m, 1, d_k]
  unnorm_weights = - tf.abs((k - q) / scale)  # [batch_size, m, n, d_k]
  unnorm_weights = tf.reduce_sum(unnorm_weights, axis=-1)  # [batch_size, m, n]
  if normalise:
    weight_fn = tf.nn.softmax
  else:
    weight_fn = lambda x: 1 + tf.tanh(x)
  weights = weight_fn(unnorm_weights)  # [batch_size, m, n]
  rep = tf.einsum('bik,bkj->bij', weights, v)  # [batch_size, m, d_v]
  return rep 
开发者ID:yyht,项目名称:BERT,代码行数:26,代码来源:gaussian_process.py

示例12: calculate_loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import einsum [as 别名]
def calculate_loss(self, predictions, labels, weights=None, **unused_params):
    with tf.name_scope("loss_xent"):
      epsilon = 10e-6
      if FLAGS.label_smoothing:
        float_labels = smoothing(labels)
      else:
        float_labels = tf.cast(labels, tf.float32)
      cross_entropy_loss = float_labels * tf.log(predictions + epsilon) + (
          1 - float_labels) * tf.log(1 - predictions + epsilon)
      cross_entropy_loss = tf.negative(cross_entropy_loss)
      if weights is not None:
        print cross_entropy_loss, weights
        weighted_loss = tf.einsum("ij,i->ij", cross_entropy_loss, weights)
        print "create weighted_loss", weighted_loss
        return tf.reduce_mean(tf.reduce_sum(weighted_loss, 1))
      else:
        return tf.reduce_mean(tf.reduce_sum(cross_entropy_loss, 1)) 
开发者ID:wangheda,项目名称:youtube-8m,代码行数:19,代码来源:losses.py

示例13: create_model

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import einsum [as 别名]
def create_model(self, model_input, vocab_size, l2_penalty=1e-8, original_input=None, **unused_params):
    """Creates a linear regression model.

    Args:
      model_input: 'batch' x 'num_features' x 'num_methods' matrix of input features.
      vocab_size: The number of classes in the dataset.

    Returns:
      A dictionary with a tensor containing the probability predictions of the
      model in the 'predictions' key. The dimensions of the tensor are
      batch_size x num_classes."""
    num_methods = model_input.get_shape().as_list()[-1]
    weight = tf.get_variable("ensemble_weight", 
        shape=[num_methods],
        regularizer=slim.l2_regularizer(l2_penalty))
    weight = tf.nn.softmax(weight)
    output = tf.einsum("ijk,k->ij", model_input, weight)
    return {"predictions": output} 
开发者ID:wangheda,项目名称:youtube-8m,代码行数:20,代码来源:linear_regression_model.py

示例14: create_model

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import einsum [as 别名]
def create_model(self,
                   model_input,
                   vocab_size,
                   num_mixtures=None,
                   l2_penalty=1e-8,
                   sub_scope="",
                   original_input=None, 
                   **unused_params):

    num_methods = model_input.get_shape().as_list()[-1]
    num_features = model_input.get_shape().as_list()[-2]

    original_input = tf.nn.l2_normalize(original_input, dim=1)
    gate_activations = slim.fully_connected(
        original_input,
        num_methods,
        activation_fn=tf.nn.softmax,
        weights_regularizer=slim.l2_regularizer(l2_penalty),
        scope="gates"+sub_scope)

    output = tf.einsum("ijk,ik->ij", model_input, gate_activations)
    return {"predictions": output} 
开发者ID:wangheda,项目名称:youtube-8m,代码行数:24,代码来源:input_moe_model.py

示例15: lstm

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import einsum [as 别名]
def lstm(self, prev_y, prev_h, prev_c, z):
		hs = self.hidden_size

		preact = tf.einsum('ijk,ka->ija', prev_h, self.h2h_W) + \
				 tf.einsum('ijk,ka->ija', prev_y, self.i2h_W) + \
				 tf.matmul(z, self.z2h_W) + \
				 self.b # preactivation
		# [1, batch_size, hidden_size * 4]
		i = tf.sigmoid(preact[:, :, 0*hs: 1*hs])
		f = tf.sigmoid(preact[:, :, 1*hs: 2*hs])
		o = tf.sigmoid(preact[:, :, 2*hs: 3*hs])
		c = tf.tanh(preact[:, :, 3*hs: 4*hs])
		c = f * prev_c + i * c # [1, batch_size, hidden_size] (element-wise multiply)
		h = o * tf.tanh(c) # [1, batch_size, hidden_size]
		y = tf.einsum('ijk,ka->ija', h, self.Vhid) + self.bhid # [1, batch_size, vocab_size]

		# Author doesn't mention this part in his paper, but it appers in his code
		# So I assume this is part of his soft-max approx. strategy ---|
		max_y = tf.reduce_max(y, axis=1, keep_dims=True) # [1, 1, vocab_size]
		e = tf.exp((y - max_y) * self.L)  # [1, batch_size, vocab_size]
		w = e / tf.reduce_sum(e, axis=1, keep_dims=True) # [1, batch_size, vocab_size]
		# Assumption ends here ----------------------------------------|

		y = tf.einsum('ijk,ka->ija', w, self.Wemb) # [1, batch_size, input_dim]
		
		return y, h, c 
开发者ID:Jeff-HOU,项目名称:UROP-Adversarial-Feature-Matching-for-Text-Generation,代码行数:28,代码来源:generator.py


注:本文中的tensorflow.einsum方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。