當前位置: 首頁>>代碼示例>>Python>>正文


Python tensor.sgn方法代碼示例

本文整理匯總了Python中theano.tensor.sgn方法的典型用法代碼示例。如果您正苦於以下問題:Python tensor.sgn方法的具體用法?Python tensor.sgn怎麽用?Python tensor.sgn使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在theano.tensor的用法示例。


在下文中一共展示了tensor.sgn方法的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: discretized_laplace

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import sgn [as 別名]
def discretized_laplace(mean, logscale, binsize, sample=None):
    scale = .5*T.exp(logscale)
    if sample is None:
        u = G.rng_curand.uniform(size=mean.shape) - .5
        sample = mean - scale * T.sgn(u) * T.log(1-2*abs(u))
        sample = T.floor(sample/binsize)*binsize #discretize the sample
    
    d = .5*binsize
    def cdf(x):
        z = x-mean
        return .5 + .5 * T.sgn(z) * (1.-T.exp(-abs(z)/scale))
    def logmass1(x):
        # General method for probability mass, but numerically unstable for large |x-mean|/scale
        return T.log(cdf(x+d) - cdf(x-d) + 1e-7)
    def logmass2(x):
        # Only valid for |x-mean| >= d
        return -abs(x-mean)/scale + T.log(T.exp(d/scale)-T.exp(-d/scale)) - np.log(2.).astype(G.floatX) 
    def logmass_stable(x):
        switch = (abs(x-mean) < d)
        return switch * logmass1(x) + (1-switch) * logmass2(x)
    
    logp = logmass_stable(sample).flatten(2).sum(axis=1)
    entr = None #(1 + logscale).flatten(2).sum(axis=1)
    return RandomVariable(sample, logp, entr, mean=mean, scale=scale) 
開發者ID:openai,項目名稱:iaf,代碼行數:26,代碼來源:rand.py

示例2: rprop_core

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import sgn [as 別名]
def rprop_core(params, gradients, rprop_increase=1.01, rprop_decrease=0.99, rprop_min_step=0, rprop_max_step=100,
               learning_rate=0.01):
    """
    Rprop optimizer.
    See http://sci2s.ugr.es/keel/pdf/algorithm/articulo/2003-Neuro-Igel-IRprop+.pdf.
    """
    for param, grad in zip(params, gradients):
        grad_tm1 = theano.shared(np.zeros_like(param.get_value()), name=param.name + '_grad')
        step_tm1 = theano.shared(np.zeros_like(param.get_value()) + learning_rate, name=param.name+ '_step')

        test = grad * grad_tm1
        same = T.gt(test, 0)
        diff = T.lt(test, 0)
        step = T.minimum(rprop_max_step, T.maximum(rprop_min_step, step_tm1 * (
            T.eq(test, 0) +
            same * rprop_increase +
            diff * rprop_decrease)))
        grad = grad - diff * grad
        yield param, param - T.sgn(grad) * step
        yield grad_tm1, grad
        yield step_tm1, step 
開發者ID:zomux,項目名稱:deepy,代碼行數:23,代碼來源:rprop.py

示例3: sign

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import sgn [as 別名]
def sign(x):
    return T.sgn(x) 
開發者ID:lingluodlut,項目名稱:Att-ChemdNER,代碼行數:4,代碼來源:theano_backend.py

示例4: gradient_regularize

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import sgn [as 別名]
def gradient_regularize(self, p, g):
        g += p * self.l2
        g += T.sgn(p) * self.l1
        return g 
開發者ID:junyanz,項目名稱:iGAN,代碼行數:6,代碼來源:updates.py

示例5: sgn

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import sgn [as 別名]
def sgn(x):
    """
    Elemwise signe of `x`.

    """
    # see decorator for function body 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:8,代碼來源:basic.py

示例6: __abs__

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import sgn [as 別名]
def __abs__(self, other):
        assert hasattr(self, 'out'), 'all layers need a default output'
        new_obj = utils.copy(self)
        new_obj.out = abs(new_obj.out)
        if hasattr(new_obj, 'grads'):
            new_obj.grads = [TT.sgn(new_obj.out) * x for x in new_obj.grads]
        return new_obj 
開發者ID:pascanur,項目名稱:GroundHog,代碼行數:9,代碼來源:basic.py

示例7: laplace_diag

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import sgn [as 別名]
def laplace_diag(mean, logscale, sample=None):
    scale = .5*T.exp(logscale)
    if sample is None:
        u = G.rng_curand.uniform(size=mean.shape) - .5
        sample = mean - scale * T.sgn(u) * T.log(1-2*abs(u))
    logp = (- logscale - abs(sample-mean) / scale).flatten(2).sum(axis=1)
    entr = (1 + logscale).flatten(2).sum(axis=1)
    return RandomVariable(sample, logp, entr, mean=mean, scale=scale) 
開發者ID:openai,項目名稱:iaf,代碼行數:10,代碼來源:rand.py

示例8: _get_updates_for

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import sgn [as 別名]
def _get_updates_for(self, param, grad):
        grad_tm1 = util.shared_like(param, 'grad')
        step_tm1 = util.shared_like(param, 'step', self.learning_rate.eval())
        test = grad * grad_tm1
        diff = TT.lt(test, 0)
        steps = step_tm1 * (TT.eq(test, 0) +
                            TT.gt(test, 0) * self.step_increase +
                            diff * self.step_decrease)
        step = TT.minimum(self.max_step, TT.maximum(self.min_step, steps))
        grad = grad - diff * grad
        yield param, TT.sgn(grad) * step
        yield grad_tm1, grad
        yield step_tm1, step 
開發者ID:lmjohns3,項目名稱:downhill,代碼行數:15,代碼來源:adaptive.py

示例9: fd3

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import sgn [as 別名]
def fd3(mlp, fdm, params, globalLR1, globalLR2, momentParam1, momentParam2):

    cost1 = mlp.classError1 + mlp.penalty
    gradT1reg = T.grad(cost1, mlp.paramsT2)        

    updateT1 = []; updateT2 = []; onlyT2param = []    
    # take opt from Adam?
    if params.opt2 in ['adam']: opt2 = adam()
    else: opt2 = None    

    # update W - (1) + (3)            
    for param, uC1, uC2 in zip(mlp.paramsT1, fdm.updateC1T1, fdm.updateC2T1):                               
        updateT1 += [(param, param + uC1 - uC2)]

    # compute grad T2 of C1,  update T2 - [(4) - (2) ] / lr1
    for param, grad, gT2 in zip(mlp.paramsT2, gradT1reg, fdm.gradC1T2):   
        if params.T2onlySGN:
           grad_proxi = T.sgn((grad - gT2)/step*globalLR1)
        else:
           grad_proxi = (grad - gT2)/step*globalLR1
            
        tempUp, tempPair, _ = update_fun(param, T.reshape(grad_proxi, param.shape), None,
                              'T2', {}, opt2, params,
                              globalLR1, globalLR2, momentParam1, momentParam2)
        updateT2 += tempUp
        onlyT2param += tempPair        
     
     
    debugs = [check for (_, check) in onlyT2param]  
    return updateT1 + updateT2, debugs 
開發者ID:bigaidream-projects,項目名稱:drmad,代碼行數:32,代碼來源:finite_difference.py

示例10: get_loss

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import sgn [as 別名]
def get_loss(self, target=None, *args, **kwargs):
        if target is None:
            target = self.target_var

        network_output = self.input_layer.get_output(self.input_map, *args, **kwargs)
        loss = log_loss(network_output, target)
        input_grad_map = { layer: T.grad(loss, input_var) for layer, input_var in self.input_map.iteritems() }
        perturbed_input_map = { layer: input_var + self.epsilon * T.sgn(input_grad_map[layer]) for layer, input_var in self.input_map.iteritems() }
        perturbed_network_output = self.input_layer.get_output(perturbed_input_map, *args, **kwargs)        
        perturbed_loss = log_loss(perturbed_network_output, target)

        adv_loss = self.alpha * loss + (1 - self.alpha) * perturbed_loss
        return adv_loss 
開發者ID:benanne,項目名稱:kaggle-ndsb,代碼行數:15,代碼來源:nn_plankton.py

示例11: __init__

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import sgn [as 別名]
def __init__(self, model, e, a=0.5, verbose=2, iterator='linear'):

        self.verbose = verbose
        self.model = init(model)
        try:
            self.iterator = instantiate(iterators, iterator)
        except:
            self.iterator = instantiate(async_iterators, iterator)

        y_tr = self.model[-1].op({'dropout':True, 'bn_active':True, 'infer':False})
        y_te = self.model[-1].op({'dropout':False, 'bn_active':False, 'infer':False})
        y_inf = self.model[-1].op({'dropout':False, 'bn_active':True, 'infer':True})
        self.X = self.model[0].X
        self.Y = T.TensorType(theano.config.floatX, (False,)*(len(model[-1].out_shape)))()

        cost = T.nnet.categorical_crossentropy(y_tr, self.Y).mean()

        X_adv = self.X + e*T.sgn(T.grad(cost, self.X))

        self.model[0].X = X_adv
        y_tr_adv = self.model[-1].op({'dropout':True, 'bn_active':True, 'infer':False})

        cost_adv = a*cost + (1.-a)*T.nnet.categorical_crossentropy(y_tr_adv, self.Y).mean()

        te_cost = T.nnet.categorical_crossentropy(y_te, self.Y).mean()

        X_te_adv = self.X + e*T.sgn(T.grad(te_cost, self.X))

        self.updates = collect_updates(self.model, cost_adv)
        self.infer_updates = collect_infer_updates(self.model)
        self.reset_updates = collect_reset_updates(self.model)
        self._train = theano.function([self.X, self.Y], cost_adv, updates=self.updates)
        self._predict = theano.function([self.X], y_te)
        self._fast_sign = theano.function([self.X, self.Y], X_te_adv)
        self._infer = theano.function([self.X], y_inf, updates=self.infer_updates)
        self._reset = theano.function([], updates=self.reset_updates) 
開發者ID:IndicoDataSolutions,項目名稱:Foxhound,代碼行數:38,代碼來源:models.py


注:本文中的theano.tensor.sgn方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。