当前位置: 首页>>代码示例>>Python>>正文


Python tensor.abs_函数代码示例

本文整理汇总了Python中theano.tensor.abs_函数的典型用法代码示例。如果您正苦于以下问题:Python abs_函数的具体用法?Python abs_怎么用?Python abs_使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了abs_函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: smoothL1

def smoothL1(x):
    #x is vector of scalars
    lto = T.abs_(x)<1
    gteo = T.abs_(x)>=1
    new_x = T.set_subtensor(x[lto.nonzero()],0.5 * T.square(x[lto.nonzero()]))
    new_x = T.set_subtensor(new_x[gteo.nonzero()], T.abs_(new_x[gteo.nonzero()]) - 0.5)
    return new_x
开发者ID:eracah,项目名称:hur-detect,代码行数:7,代码来源:helper_fxns.py

示例2: theano_setup

    def theano_setup(self):
    
        W = T.dmatrix('W')
        b = T.dvector('b')
        c = T.dvector('c')
        x = T.dmatrix('x')
    
        s = T.dot(x, W) + c
        # h = 1 / (1 + T.exp(-s))
        # h = T.nnet.sigmoid(s)
        h = T.tanh(s)
        # r = T.dot(h,W.T) + b
        # r = theano.printing.Print("r=")(2*T.tanh(T.dot(h,W.T) + b))
        ract = T.dot(h,W.T) + b
        r = self.output_scaling_factor * T.tanh(ract)
    
        #g  = function([W,b,c,x], h)
        #f  = function([W,b,c,h], r)
        #fg = function([W,b,c,x], r)
    
        # Another variable to be able to call a function
        # with a noisy x and compare it to a reference x.
        y = T.dmatrix('y')

        all_losses = ((r - y)**2)
        loss = T.sum(all_losses)
        #loss = ((r - y)**2).sum()
        
        self.theano_encode_decode = function([W,b,c,x], r)
        self.theano_all_losses = function([W,b,c,x,y], [all_losses, T.abs_(s), T.abs_(ract)])
        self.theano_gradients = function([W,b,c,x,y], [T.grad(loss, W), T.grad(loss, b), T.grad(loss, c)])
开发者ID:gyom,项目名称:cae.py,代码行数:31,代码来源:dae_theano.py

示例3: relevance_conv_a_b_abs

def relevance_conv_a_b_abs(inputs, weights, out_relevances, a, b, bias=None):
    assert a is not None
    assert b is not None
    assert a - b == 1
    weights_plus = weights * T.gt(weights, 0)
    weights_neg = weights * T.lt(weights, 0)

    plus_norm = conv2d(T.abs_(inputs), weights_plus)
    # stabilize, prevent division by 0
    eps = 1e-4
    plus_norm += T.eq(plus_norm, 0) * eps
    plus_rel_normed = out_relevances / plus_norm
    in_rel_plus = conv2d(plus_rel_normed, weights_plus.dimshuffle(1, 0, 2, 3)[:, :, ::-1, ::-1], border_mode="full")
    in_rel_plus *= T.abs_(inputs)

    # minuses to get positive outputs, since will be subtracted
    # at end of function
    neg_norm = -conv2d(T.abs_(inputs), weights_neg)
    neg_norm += T.eq(neg_norm, 0) * eps
    neg_rel_normed = out_relevances / neg_norm
    in_rel_neg = -conv2d(neg_rel_normed, weights_neg.dimshuffle(1, 0, 2, 3)[:, :, ::-1, ::-1], border_mode="full")
    in_rel_neg *= T.abs_(inputs)

    in_relevance = a * in_rel_plus - b * in_rel_neg
    return in_relevance
开发者ID:robintibor,项目名称:braindecode,代码行数:25,代码来源:heatmap.py

示例4: power_pool_2d

def power_pool_2d(x, ds, p=3, b=0):
    n_batch, n_ch, s0, s1 = x.shape
    d0, d1 = ds
    c = tt.ones((s0, s1))

    # sum elements in regions
    y = tt.abs_(x[:, :, 0::d0, 0::d1])**p
    d = c[0::d0, 0::d1].copy()
    for i in range(0, d0):
        for j in range(0, d1):
            if i != 0 or j != 0:
                ni = (s0 - i - 1) / d0 + 1
                nj = (s1 - j - 1) / d1 + 1
                xij = tt.abs_(x[:, :, i::d0, j::d1])**p
                y = tt.inc_subtensor(y[:, :, :ni, :nj], xij)
                d = tt.inc_subtensor(d[:ni, :nj], c[i::d0, j::d1])

    # divide by number of elements
    y /= d
    y += b**p

    # take root
    y = y**(1. / p)

    return y
开发者ID:valeiras,项目名称:spiking_hand_detector,代码行数:25,代码来源:pooling.py

示例5: attention_gate

    def attention_gate(self, facts, memory, question):
        # TODO: for the first iteration question and memory are the same so
        # we can speedup the computation

        # facts is (num_batch * fact_length * memory_dim)
        # questions is (num_batch * memory_dim)
        # memory is (num_batch * memory_dim)
        # attention_gates must be (fact_length * nb_batch * 1)

        # Compute z (num_batch * fact_length * (7*memory_dim + 2))

        # Dimshuffle facts to get a shape of
        # (fact_length * num_batch * memory_dim)
        facts = facts.dimshuffle(1, 0, 2)

        # Pad questions and memory to be of shape
        # (_ * num_batch * memory_dim)
        memory = T.shape_padleft(memory)
        question = T.shape_padleft(question)

        to_concatenate = list()
        to_concatenate.extend([facts, memory, question])
        to_concatenate.extend([facts * question, facts * memory])
        to_concatenate.extend([T.abs_(facts - question),
                               T.abs_(facts - memory)])

        # z = concatenate(to_concatenate, axis=2)

        # TODO: to be continued for the moment just return ones
        return T.ones((facts.shape[1], facts.shape[0], 1))
开发者ID:clementdoumouro,项目名称:dmn,代码行数:30,代码来源:episodic_memory.py

示例6: criteria

    def criteria(self):
        
        F = T.dot(self.w, self.X)
        Fs = T.sqrt(F**2 + 1e-8)
        L2Fs = (Fs**2).sum(axis=[1])
        L2Fs = T.sqrt(L2Fs)
        NFs = Fs/L2Fs.dimshuffle(0, 'x')
        L2Fn = (NFs**2).sum(axis=[0])
        L2Fn = T.sqrt(L2Fn)
        self.Fhat = NFs/L2Fn.dimshuffle('x', 0)
        
#        self.Fhat = self.feedForward(self.dot())   
        
        F = T.sqrt(T.dot(self.gMat, T.sqr(self.Fhat))) # self.Fhat1)) # self.feedForward(self.dot()
        Fs = T.sqrt(F**2 + 1e-8)
        L2Fs = (Fs**2).sum(axis=[1])
        L2Fs = T.sqrt(L2Fs)
        NFs = Fs/L2Fs.dimshuffle(0, 'x')
        L2Fn = (NFs**2).sum(axis=[0])
        L2Fn = T.sqrt(L2Fn)
        self.gFhat = NFs/L2Fn.dimshuffle('x', 0)
        
#        from connections import distMat
#        x = distMat(self.w.shape[0].eval(), 20)
#        inhibition = T.dot(T.sqr(self.Fhat1.T), x)
#        inhibition = self.Fhat1 * inhibition.T
        
        return T.abs_(self.Fhat) + T.abs_(self.gFhat) #+ T.abs_(inhibition)
开发者ID:fengjiran,项目名称:sparse_filtering,代码行数:28,代码来源:sf_archive.py

示例7: pass_fn

 def pass_fn(*inputs):
     ''' 
     Function for scan op. Has to work with variable number of arguments. 
     Input layout: diff, message[message_order[0]], ..., message[message_order[N], initial_potential[0], ..., initial_potential[M] 
     '''
     
     input_messages = {}
     ''' Quick creation of message potential tables by using a shallow copy of existing potential tables'''
     for i,midx in enumerate(message_order):
         input_messages[midx] = first_messages[midx].replace_tensor(inputs[i+1])
     
     off = 1+len(message_order) #  offset into input for the initial potentials
     ipotentials = []
     ''' Create initial potentials from passed inputs'''
     for i, pot in enumerate(mpstate.initial_potentials):
         ipotentials.append(pot.replace_tensor(inputs[off+i]))
     
     ''' Pass messages and calculate next set of messages '''
     (used_message_order, next_messages) = mpstate.pass_messages(input_messages=input_messages, initial_potentials=ipotentials)
     if (convergence_threshold>=0.0): 
         
         ''' Calculate absolute difference between last set of differences and current set for convergence diagnostics'''
         diff = T.sum( T.abs_(next_messages[used_message_order[0]].pt_tensor.flatten() - input_messages[used_message_order[0]].pt_tensor.flatten()))
         for i in range(1, len(used_message_order)):
             diff += T.sum( T.abs_(next_messages[used_message_order[i]].pt_tensor.flatten() - input_messages[used_message_order[i]].pt_tensor.flatten()))
         
         ''' Create result which conforms to the start of the input layout'''
         resvalues = [diff] + [next_messages[midx].pt_tensor for midx in message_order]
         ''' Return updated values plus a convergence criterion'''
         return resvalues, theano.scan_module.until(diff<=convergence_threshold)
     else:
         diff = convergence_criterion
         resvalues = [diff] + [next_messages[midx].pt_tensor for midx in message_order]
         return resvalues
开发者ID:kadeng,项目名称:pypgmc,代码行数:34,代码来源:message_passing.py

示例8: get_cost_updates

    def get_cost_updates(self, x, W, W_prime, b, b_prime, corruption_level, learning_rate, l2reg=0., l1reg=0.):
        """ This function computes the cost and the updates for one trainng
        step of the dA """
        self.x = x
        self.W = W
        self.W_prime = W_prime
        self.b = b
        self.b_prime = b_prime
        self.params = [self.W, self.W_prime, self.b, self.b_prime]
        if corruption_level == None:
            tilde_x = self.x
        else:
            tilde_x = self.get_corrupted_input(self.x, corruption_level)
        y       = self.get_hidden_values( tilde_x)
        z       = self.get_reconstructed_input(y)
        # note : we sum over the size of a datapoint; if we are using minibatches,
        #        L will  be a vector, with one entry per example in minibatch
        
        XE = self.x * T.log(z) + (1 - self.x) *  T.log(1-z)
        cost = -T.mean(T.sum(XE, axis=1),axis=0)
        
        if l2reg != 0.:
            cost += l2reg * (T.mean(T.sum(self.W*self.W,1),0) + T.mean(T.sum(self.W_prime*self.W_prime,1),0))
        if l1reg != 0.:
            cost += l1reg * (T.mean(T.sum(T.abs_(y),1),0) + T.mean(T.sum(T.abs_(y),1),0))
        # compute the gradients of the cost of the `dA` with respect
        # to its parameters 
        gparams = T.grad(cost, self.params)
#        # generate the list of updates
#        updates = {}
#        for param, gparam in zip(self.params, gparams):
#            updates[param] = param -  learning_rate*gparam
        updates = [-learning_rate*gparam for gparam in gparams]

        return (cost, updates)
开发者ID:pombredanne,项目名称:DeepANN-sparse,代码行数:35,代码来源:SimpledAclass.py

示例9: init_param_updates

    def init_param_updates(self, layer, parameter):
        step = self.variables.step

        parameter_shape = T.shape(parameter).eval()
        prev_delta = theano.shared(
            name="{}/prev-delta".format(parameter.name),
            value=asfloat(np.zeros(parameter_shape)),
        )
        prev_gradient = theano.shared(
            name="{}/prev-grad".format(parameter.name),
            value=asfloat(np.zeros(parameter_shape)),
        )

        gradient = T.grad(self.variables.error_func, wrt=parameter)
        grad_delta = T.abs_(prev_gradient - gradient)

        parameter_delta = ifelse(
            T.eq(self.variables.epoch, 1),
            gradient,
            T.clip(
                T.abs_(prev_delta) * gradient / grad_delta,
                -self.upper_bound,
                self.upper_bound
            )
        )
        return [
            (parameter, parameter - step * parameter_delta),
            (prev_gradient, gradient),
            (prev_delta, parameter_delta),
        ]
开发者ID:itdxer,项目名称:neupy,代码行数:30,代码来源:quickprop.py

示例10: prepareTraining

    def prepareTraining(self):
        '''
        Prepares the relevant functions
        (details on neural_net_creator's prepareTraining)
        '''
        #loss objective to minimize
        self.prediction = lasagne.layers.get_output(self.network)
        self.prediction=self.prediction[:,0]
        #self.loss = lasagne.objectives.categorical_crossentropy(self.prediction, self.target_var)
        #the loss is now the squared error in the output
        self.loss =  lasagne.objectives.squared_error(self.prediction, self.target_var)
        self.loss = self.loss.mean()

        self.params = lasagne.layers.get_all_params(self.network, trainable=True)
        self.updates = lasagne.updates.nesterov_momentum(
                self.loss, self.params, learning_rate=0.01, momentum=0.9)

        self.test_prediction = lasagne.layers.get_output(self.network, deterministic=True)
        self.test_prediction=self.test_prediction[:,0]
        self.test_loss = lasagne.objectives.squared_error(self.test_prediction, self.target_var)
        self.test_loss = self.test_loss.mean()
        #the accuracy is now the number of sample that achieve a 0.01 precision (can be changed)
        self.test_acc = T.mean(T.le(T.abs_(T.sub(self.test_prediction,self.target_var)),0.01)
                            , dtype=theano.config.floatX)
        self.test_acc2 = T.mean(T.le(T.abs_(T.sub(self.test_prediction,self.target_var)),0.05)
                            , dtype=theano.config.floatX)
        self.test_acc3 = T.mean(T.le(T.abs_(T.sub(self.test_prediction,self.target_var)),0.1)
                            , dtype=theano.config.floatX)

        self.train_fn = theano.function([self.input_var, self.target_var], self.loss, updates=self.updates)

        self.val_fn = theano.function([self.input_var, self.target_var], [self.test_loss,self.test_acc,self.test_acc2,self.test_acc3])

        self.use = theano.function([self.input_var],[self.test_prediction])
开发者ID:vr367305,项目名称:s4r_metal,代码行数:34,代码来源:regression_test.py

示例11: forward_jacobian_log_det

 def forward_jacobian_log_det(self, x):
     if x.ndim == 1:
         return tt.log(tt.abs_(self.diag_weights)).sum()
     elif x.ndim == 2:
         return x.shape[0] * tt.log(tt.abs_(self.diag_weights)).sum()
     else:
         raise ValueError('x must be one or two dimensional.')
开发者ID:matt-graham,项目名称:differentiable-generator-networks,代码行数:7,代码来源:invertible_layers.py

示例12: _calc_regularization_cost

    def _calc_regularization_cost(self):
        """Calculate the regularization cost given the weight decay parameters.

        Only the parameters will be considered that are stored in the set
        self.regularize. We need to handle it manually in this class, because
        the weight matrices contain bias columns, which should not be considered
        in regularization computation. Therefore, do not!!! add W1 and W2 to
        self.regularize

        Returns
        -------
        theano variable
            regularization cost depending on the parameters to be regularized
            and the weight decay parameters for L1 and L2 regularization.
        """
        cost = super(SLmNce, self)._calc_regularization_cost()
        l1_cost = T.sum(T.abs_(self.W1[:, :-1]))
        l1_cost += T.sum(T.abs_(self.W2[:, :-1]))
        l2_cost = T.sum(T.sqr(self.W1[:, :-1]))
        l2_cost += T.sum(T.sqr(self.W2[:, :-1]))

        if self.l1_weight != 0:
            cost += self.l1_weight * l1_cost

        if self.l2_weight != 0:
            cost += self.l2_weight * l2_cost

        return cost
开发者ID:herbertchen1,项目名称:SciTail,代码行数:28,代码来源:networks.py

示例13: _recurrence

        def _recurrence(v_h_, x_h_, v_t_, x_t_, a_t_, is_aggressive):

            state = tt.concatenate([v_h_, x_h_, tt.flatten(v_t_), tt.flatten(x_t_), tt.flatten(a_t_)])

            h0 = tt.dot(state, self.W_a_0) + self.b_a_0
            relu0 = tt.nnet.relu(h0)

            h1 = tt.dot(relu0, self.W_a_1) + self.b_a_1
            relu1 = tt.nnet.relu(h1)

            h2 = tt.dot(relu1, self.W_a_2) + self.b_a_2
            relu2 = tt.nnet.relu(h2)

            a = tt.dot(relu2, self.W_a_c)

            v_h, x_h, v_t, x_t, a_t, cost_transition = _step_state(v_h_, x_h_, v_t_, x_t_, a_t_, a, is_aggressive)

            # cost:

            # 0. smooth acceleration policy
            cost_accel = tt.abs_(a)

            # 1. forcing the host to move forward (until the top point of the roundabout)
            cost_progress = tt.nnet.relu(0.5*self.two_pi_r-x_h)

            # 2. keeping distance from close vehicles
            x_abs_diffs = tt.abs_(x_h - x_t)

            cost_accident =  tt.mean(3*tt.nnet.relu( self.require_distance-x_abs_diffs )) * (x_h > - 0.5*self.host_length) #tt.nnet.sigmoid(x_h + 0.5*self.host_length)

            cost = self.alpha_accel * cost_accel + self.alpha_progress * cost_progress + self.alpha_accident * cost_accident

            return (v_h, x_h, v_t, x_t, a_t, cost, cost_transition), t.scan_module.until(x_h[0]>=0.45*self.two_pi_r)
开发者ID:bentzinir,项目名称:Buffe,代码行数:33,代码来源:controller.py

示例14: batch_multicrop

def batch_multicrop(bboxes, frame):
	att_col = img_col
	att_row = img_row

	_cx = (bboxes[:, :, 1] + bboxes[:, :, 3]) / 2; cx = (_cx + 1) / 2. * img_col
	_cy = (bboxes[:, :, 0] + bboxes[:, :, 2]) / 2; cy = (_cy + 1) / 2. * img_row
	_w = TT.abs_(bboxes[:, :, 3] - bboxes[:, :, 1]) / 2; w = _w * img_col
	_h = TT.abs_(bboxes[:, :, 2] - bboxes[:, :, 0]) / 2; h = _h * img_row

	dx = w / (img_col - 1)
	dy = h / (img_row - 1)

	mx = cx.dimshuffle(0, 1, 'x') + dx.dimshuffle(0, 1, 'x') * (TT.arange(att_col, dtype=T.config.floatX).dimshuffle('x', 'x', 0) - (att_col - 1) / 2.)
	my = cy.dimshuffle(0, 1, 'x') + dy.dimshuffle(0, 1, 'x') * (TT.arange(att_row, dtype=T.config.floatX).dimshuffle('x', 'x', 0) - (att_row - 1) / 2.)

	a = TT.arange(img_col, dtype=T.config.floatX)
	b = TT.arange(img_row, dtype=T.config.floatX)

	# (batch_size, nr_samples, channels, frame_size, att_size)
	ax = TT.maximum(0, 1 - TT.abs_(a.dimshuffle('x', 'x', 'x', 0, 'x') - mx.dimshuffle(0, 1, 'x', 'x', 2)))
	by = TT.maximum(0, 1 - TT.abs_(b.dimshuffle('x', 'x', 'x', 0, 'x') - my.dimshuffle(0, 1, 'x', 'x', 2)))

	def __batch_multicrop_dot(a, b):
		return (a.dimshuffle(0, 1, 2, 3, 4, 'x') * b.dimshuffle(0, 1, 2, 'x', 3, 4)).sum(axis=4)

	crop = __batch_multicrop_dot(by.dimshuffle(0, 1, 2, 4, 3), __batch_multicrop_dot(frame.dimshuffle(0, 'x', 1, 2, 3), ax))
	return crop
开发者ID:olivernina,项目名称:tracking-with-rnn,代码行数:27,代码来源:recurrent_mlp_gpu.py

示例15: crop_attention_bilinear

def crop_attention_bilinear(bbox, frame):
	att = bbox
	frame_col = img_col
	frame_row = img_row

	_cx = (att[1] + att[3]) / 2; cx = (_cx + 1) / 2. * frame_col
	_cy = (att[0] + att[2]) / 2; cy = (_cy + 1) / 2. * frame_row
	_w = TT.abs_(att[3] - att[1]) / 2; w = _w * frame_col
	_h = TT.abs_(att[2] - att[0]) / 2; h = _h * frame_row

	dx = w / (att_col - 1)
	dy = h / (att_row - 1)

	mx = cx + dx * (TT.arange(att_col, dtype=T.config.floatX) - (att_col - 1) / 2.)
	my = cy + dy * (TT.arange(att_row, dtype=T.config.floatX) - (att_row - 1) / 2.)

	a = TT.arange(frame_col, dtype=T.config.floatX)
	b = TT.arange(frame_row, dtype=T.config.floatX)

	ax = TT.maximum(0, 1 - TT.abs_(a.dimshuffle(0, 'x') - mx.dimshuffle('x', 0)))
	by = TT.maximum(0, 1 - TT.abs_(b.dimshuffle(0, 'x') - my.dimshuffle('x', 0)))

	bilin = TT.dot(by.T, TT.dot(frame, ax))

	return bilin
开发者ID:BarclayII,项目名称:tracking-with-rnn,代码行数:25,代码来源:recurrent_local_online.py


注:本文中的theano.tensor.abs_函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。