当前位置: 首页>>代码示例>>Python>>正文


Python tensor.batched_dot方法代码示例

本文整理汇总了Python中theano.tensor.batched_dot方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.batched_dot方法的具体用法?Python tensor.batched_dot怎么用?Python tensor.batched_dot使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在theano.tensor的用法示例。


在下文中一共展示了tensor.batched_dot方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: quadratic_loss

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import batched_dot [as 别名]
def quadratic_loss(mx, Sx, target, Q, *args, **kwargs):
    '''
        Quadratic penalty function c(x) = (||x-target||_Q)^2
    '''
    if Sx is None:
        # deterministic case
        if mx.ndim == 1:
            mx = mx[None, :]
        delta = mx-target
        deltaQ = delta.dot(Q)
        cost = tt.batched_dot(deltaQ, delta)
        return cost
    else:
        # stochastic case (moment matching)
        delta = mx-target
        deltaQ = delta.T.dot(Q)
        SxQ = Sx.dot(Q)
        m_cost = tt.sum(Sx*Q) + deltaQ.dot(delta)
        s_cost = 2*tt.sum(SxQ.dot(SxQ)) + 4*deltaQ.dot(Sx).dot(deltaQ.T)
        return m_cost, s_cost 
开发者ID:mcgillmrl,项目名称:kusanagi,代码行数:22,代码来源:cost.py

示例2: get_output_for

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import batched_dot [as 别名]
def get_output_for(self, inputs, attention_only=False, **kwargs):

        # inputs[0]: B x N x D
        # inputs[1]: B x Q x D
        # inputs[2]: B x N x Q / B x Q x N
        # self.mask: B x Q

        if self.transpose: M = inputs[2].dimshuffle((0,2,1))
        else: M = inputs[2]
        alphas = T.nnet.softmax(T.reshape(M, (M.shape[0]*M.shape[1],M.shape[2])))
        alphas_r = T.reshape(alphas, (M.shape[0],M.shape[1],M.shape[2]))* \
                self.mask[:,np.newaxis,:] # B x N x Q
        alphas_r = alphas_r/alphas_r.sum(axis=2)[:,:,np.newaxis] # B x N x Q
        q_rep = T.batched_dot(alphas_r, inputs[1]) # B x N x D
    
        return eval(self.gating_fn)(inputs[0],q_rep) 
开发者ID:bdhingra,项目名称:ga-reader,代码行数:18,代码来源:layers.py

示例3: cosine_similarity

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import batched_dot [as 别名]
def cosine_similarity(x, y, eps=1e-6):
    r"""
    Cosine similarity between a vector and each row of a base matrix.

    Parameters
    ----------
    x: a 3D Theano variable
        Vector to compare to each row of the matrix y.
    y: a 3D Theano variable
        Matrix to be compared to
    eps: float
        Precision of the operation (necessary for differentiability).

    Return
    ------
    z: a 3D Theano variable
        A vector whose components are the cosine similarities
        between x and each row of y.
    """
    z = T.batched_dot(x, y.dimshuffle(0, 2, 1))
    z /= T.sqrt(T.sum(x * x, axis=2).dimshuffle(0, 1, 'x') * T.sum(y * y, axis=2).dimshuffle(0, 'x', 1) + eps)

    return z 
开发者ID:snipsco,项目名称:ntm-lasagne,代码行数:25,代码来源:similarities.py

示例4: process

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import batched_dot [as 别名]
def process(self, gstate, ref_matrix, dropout_masks=Ellipsis):
        """
        Process a direct ref matrix and update the state accordingly. Each node runs a GRU step
        with previous state from the node state and input from the matrix.

        Params:
            gstate: A GraphState giving the current state
            ref_matrix: A tensor of the form (n_batch, num_node_ids, input_width)
        """
        if dropout_masks is Ellipsis:
            dropout_masks = None
            append_masks = False
        else:
            append_masks = True

        # To process the input, we need to map from node id to node index
        # We can do this using the gstate.node_ids, of shape (n_batch, n_nodes, num_node_ids)
        prepped_input_vector = T.batched_dot(gstate.node_ids, ref_matrix)

        # prepped_input_vector is of shape (n_batch, n_nodes, input_width)
        # gstate.node_states is of shape (n_batch, n_nodes, node_state_width)
        # so they match nicely
        full_input = T.concatenate([gstate.node_ids, prepped_input_vector], 2)

        # we flatten to apply GRU
        flat_input = full_input.reshape([-1, self._input_width + self._graph_spec.num_node_ids])
        flat_state = gstate.node_states.reshape([-1, self._graph_spec.node_state_size])
        new_flat_state, dropout_masks = self._update_gru.step(flat_input, flat_state, dropout_masks)

        new_node_states = new_flat_state.reshape(gstate.node_states.shape)

        new_gstate = gstate.with_updates(node_states=new_node_states)
        if append_masks:
            return new_gstate, dropout_masks
        else:
            return new_gstate 
开发者ID:hexahedria,项目名称:gated-graph-transformer-network,代码行数:38,代码来源:direct_reference_update.py

示例5: cosine_similarity

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import batched_dot [as 别名]
def cosine_similarity(x, y, eps=1e-6):
    z = T.batched_dot(x, y.dimshuffle(0, 2, 1))
    z /= T.sqrt(T.sum(x * x, axis=2).dimshuffle(0, 1, 'x') * T.sum(y * y, axis=2).dimshuffle(0, 'x', 1) + eps)

    return z 
开发者ID:tristandeleu,项目名称:ntm-one-shot,代码行数:7,代码来源:similarities.py

示例6: compute_psi2

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import batched_dot [as 别名]
def compute_psi2(lls, lsf, z, input_means, input_vars):

    ls = T.exp(lls)
    sf = T.exp(lsf)
    b = ls / casting(2.0)
    term_1 = T.prod(T.sqrt(b / (b + input_vars)), 1)

    scale = T.sqrt(4 * (2 * b[ None, : ] + 0 * input_vars))
    scaled_z = z[ None, : , : ] / scale[ : , None , : ]
    scaled_z_minus_m = scaled_z
    r2b = T.sum(scaled_z_minus_m**2, 2)[ :, None, : ] + T.sum(scaled_z_minus_m**2, 2)[ :, : , None ] - \
        2 * T.batched_dot(scaled_z_minus_m, np.transpose(scaled_z_minus_m, [ 0, 2, 1 ]))
    term_2 = T.exp(-r2b)

    scale = T.sqrt(4 * (2 * b[ None, : ] + 2 * input_vars))
    scaled_z = z[ None, : , : ] / scale[ : , None , : ]
    scaled_m = input_means / scale
    scaled_m = T.tile(scaled_m[ : , None, : ], [ 1, z.shape[ 0 ], 1])
    scaled_z_minus_m = scaled_z - scaled_m
    r2b = T.sum(scaled_z_minus_m**2, 2)[ :, None, : ] + T.sum(scaled_z_minus_m**2, 2)[ :, : , None ] + \
        2 * T.batched_dot(scaled_z_minus_m, np.transpose(scaled_z_minus_m, [ 0, 2, 1 ]))
    term_3 = T.exp(-r2b)
    
    psi2_computed = sf**casting(2.0) * term_1[ :, None, None ] * term_2 * term_3

    return T.transpose(psi2_computed, [ 1, 2, 0 ]) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:28,代码来源:gauss.py

示例7: batched_gram5d

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import batched_dot [as 别名]
def batched_gram5d(self, fmap):
		# (layer, batch, featuremaps, height*width)
		fmap=fmap.flatten(ndim=4)

		# (layer*batch, featuremaps, height*width)
		fmap2=fmap.reshape((-1, fmap.shape[-2], fmap.shape[-1]))

		# The T.prod term can't be taken outside as a T.mean in style_loss(), since the width and height of the image might vary
		return T.batched_dot(fmap2, fmap2.dimshuffle(0,2,1)).reshape(fmap.shape)/T.prod(fmap.shape[-2:]) 
开发者ID:joelmoniz,项目名称:gogh-figure,代码行数:11,代码来源:model.py

示例8: batched_gram

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import batched_dot [as 别名]
def batched_gram(self, fmap):
		# (batch, featuremaps, height*width)
		fmap=fmap.flatten(ndim=3)

		# The T.prod term can't be taken outside as a T.mean in style_loss(), since the width and height of the image might vary
		if self.net_type == 0:
			return T.batched_dot(fmap, fmap.dimshuffle(0,2,1))/T.prod(fmap.shape[-2:])
		elif self.net_type == 1:
			return T.batched_dot(fmap, fmap.dimshuffle(0,2,1))/T.prod(fmap.shape[-1]) 
开发者ID:joelmoniz,项目名称:gogh-figure,代码行数:11,代码来源:model.py

示例9: LayerNormalization

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import batched_dot [as 别名]
def LayerNormalization(x, gamma, mask, estimated_mean=0.0, estimated_var=1.0):
    assert x.ndim == 3 or x.ndim == 2
    if x.ndim == 3:
        x_mean = T.mean(x, axis=2).dimshuffle(0, 1, 'x')
        x_var = T.var(x, axis=2).dimshuffle(0, 1, 'x')
        return gamma*((x - x_mean) / T.sqrt(x_var+1e-7)), x_mean[0, 0], x_var[0, 0]

    elif x.ndim == 2:
        x_mean = T.mean(x, axis=1).dimshuffle(0, 'x')
        x_var = T.var(x, axis=1).dimshuffle(0, 'x')
        return gamma*((x - x_mean) / T.sqrt(x_var+1e-7)), x_mean[0], x_var[0]



# Does theano.batched_dot. If last_axis is on it will loop over the last axis, otherwise it will loop over the first axis. 
开发者ID:julianser,项目名称:hred-latent-piecewise,代码行数:17,代码来源:utils.py

示例10: BatchedDot

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import batched_dot [as 别名]
def BatchedDot(x, y, last_axis=False):
    if last_axis==False:
        return T.batched_dot(x, y)
    elif last_axis:
        if x.ndim == 2:
            shuffled_x = x.dimshuffle(1,0)
        elif x.ndim == 3:
            shuffled_x = x.dimshuffle(2,0,1)
        elif x.ndim == 4:
            shuffled_x = x.dimshuffle(3,0,1,2)
        else:
            raise ValueError('BatchedDot inputs must have between 2-4 dimensions, but x has ' + str(x.ndim) + ' dimensions')

        if y.ndim == 2:
            shuffled_y = y.dimshuffle(1,0)
        elif y.ndim == 3:
            shuffled_y = y.dimshuffle(2,0,1)
        elif y.ndim == 4:
            shuffled_y = y.dimshuffle(3,0,1,2)
        else:
            raise ValueError('BatchedDot inputs must have between 2-4 dimensions, but y has ' + str(y.ndim) + ' dimensions')

        dot = T.batched_dot(shuffled_x, shuffled_y)
        if dot.ndim == 2:
            return dot.dimshuffle(1,0)
        elif dot.ndim == 3:
            return dot.dimshuffle(1,2,0)
        elif dot.ndim == 4:
            return dot.dimshuffle(1,2,3,0) 
开发者ID:julianser,项目名称:hred-latent-piecewise,代码行数:31,代码来源:utils.py

示例11: gaussian_chol

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import batched_dot [as 别名]
def gaussian_chol(mean, logvar, chol, sample=None):
    if sample != None:
        raise Exception('Not implemented')
    diag = gaussian_diag(mean, logvar)
    mask = T.shape_padleft(T.triu(T.ones_like(chol[0]), 1))
    sample = diag.sample + T.batched_dot(diag.sample, chol * mask)
    return RandomVariable(sample, diag.logp, diag.entr, mean=mean, logvar=logvar) 
开发者ID:openai,项目名称:iaf,代码行数:9,代码来源:rand.py

示例12: quadratic_saturating_loss

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import batched_dot [as 别名]
def quadratic_saturating_loss(mx, Sx, target, Q, *args, **kwargs):
    '''
        Squashing loss penalty function
        c(x) = ( 1 - e^(-0.5*quadratic_loss(x, target)) )
    '''
    if Sx is None:
        if mx.ndim == 1:
            mx = mx[None, :]
        delta = mx - target[None, :]
        deltaQ = delta.dot(Q)
        cost = 1.0 - tt.exp(-0.5*tt.batched_dot(deltaQ, delta))
        return cost
    else:
        # stochastic case (moment matching)
        delta = mx - target
        SxQ = Sx.dot(Q)
        EyeM = tt.eye(mx.shape[0])
        IpSxQ = EyeM + SxQ
        Ip2SxQ = EyeM + 2*SxQ
        S1 = tt.dot(Q, matrix_inverse(IpSxQ))
        S2 = tt.dot(Q, matrix_inverse(Ip2SxQ))
        # S1 = solve(IpSxQ.T, Q.T).T
        # S2 = solve(Ip2SxQ.T, Q.T).T
        # mean
        m_cost = -tt.exp(-0.5*delta.dot(S1).dot(delta))/tt.sqrt(det(IpSxQ))
        # var
        s_cost = tt.exp(
            -delta.dot(S2).dot(delta))/tt.sqrt(det(Ip2SxQ)) - m_cost**2

        return 1.0 + m_cost, s_cost 
开发者ID:mcgillmrl,项目名称:kusanagi,代码行数:32,代码来源:cost.py

示例13: get_output_for

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import batched_dot [as 别名]
def get_output_for(self, input, **kwargs):
        act = T.batched_dot(T.tensordot(input, self.V, axes = [1, 2]), input) + T.dot(input, self.W) + self.b.dimshuffle('x', 0)
        return self.nonlinearity(act) 
开发者ID:kimiyoung,项目名称:planetoid,代码行数:5,代码来源:layers.py

示例14: get_output_for

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import batched_dot [as 别名]
def get_output_for(self, inputs, **kwargs):
        M = inputs[0]
        u = inputs[1]
        output = T.batched_dot(M, u)
        if self.nonlinearity is not None:
            output = self.nonlinearity(output)
        return output 
开发者ID:npow,项目名称:MemN2N,代码行数:9,代码来源:main.py

示例15: read

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import batched_dot [as 别名]
def read(self, w_tm1, M_t, **kwargs):
        r_t = T.batched_dot(w_tm1, M_t)

        return r_t.flatten(ndim=2) 
开发者ID:snipsco,项目名称:ntm-lasagne,代码行数:6,代码来源:heads.py


注:本文中的theano.tensor.batched_dot方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。