本文整理汇总了Python中theano.tensor.sgn方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.sgn方法的具体用法?Python tensor.sgn怎么用?Python tensor.sgn使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.tensor
的用法示例。
在下文中一共展示了tensor.sgn方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: discretized_laplace
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import sgn [as 别名]
def discretized_laplace(mean, logscale, binsize, sample=None):
scale = .5*T.exp(logscale)
if sample is None:
u = G.rng_curand.uniform(size=mean.shape) - .5
sample = mean - scale * T.sgn(u) * T.log(1-2*abs(u))
sample = T.floor(sample/binsize)*binsize #discretize the sample
d = .5*binsize
def cdf(x):
z = x-mean
return .5 + .5 * T.sgn(z) * (1.-T.exp(-abs(z)/scale))
def logmass1(x):
# General method for probability mass, but numerically unstable for large |x-mean|/scale
return T.log(cdf(x+d) - cdf(x-d) + 1e-7)
def logmass2(x):
# Only valid for |x-mean| >= d
return -abs(x-mean)/scale + T.log(T.exp(d/scale)-T.exp(-d/scale)) - np.log(2.).astype(G.floatX)
def logmass_stable(x):
switch = (abs(x-mean) < d)
return switch * logmass1(x) + (1-switch) * logmass2(x)
logp = logmass_stable(sample).flatten(2).sum(axis=1)
entr = None #(1 + logscale).flatten(2).sum(axis=1)
return RandomVariable(sample, logp, entr, mean=mean, scale=scale)
示例2: rprop_core
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import sgn [as 别名]
def rprop_core(params, gradients, rprop_increase=1.01, rprop_decrease=0.99, rprop_min_step=0, rprop_max_step=100,
learning_rate=0.01):
"""
Rprop optimizer.
See http://sci2s.ugr.es/keel/pdf/algorithm/articulo/2003-Neuro-Igel-IRprop+.pdf.
"""
for param, grad in zip(params, gradients):
grad_tm1 = theano.shared(np.zeros_like(param.get_value()), name=param.name + '_grad')
step_tm1 = theano.shared(np.zeros_like(param.get_value()) + learning_rate, name=param.name+ '_step')
test = grad * grad_tm1
same = T.gt(test, 0)
diff = T.lt(test, 0)
step = T.minimum(rprop_max_step, T.maximum(rprop_min_step, step_tm1 * (
T.eq(test, 0) +
same * rprop_increase +
diff * rprop_decrease)))
grad = grad - diff * grad
yield param, param - T.sgn(grad) * step
yield grad_tm1, grad
yield step_tm1, step
示例3: sign
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import sgn [as 别名]
def sign(x):
return T.sgn(x)
示例4: gradient_regularize
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import sgn [as 别名]
def gradient_regularize(self, p, g):
g += p * self.l2
g += T.sgn(p) * self.l1
return g
示例5: sgn
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import sgn [as 别名]
def sgn(x):
"""
Elemwise signe of `x`.
"""
# see decorator for function body
示例6: __abs__
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import sgn [as 别名]
def __abs__(self, other):
assert hasattr(self, 'out'), 'all layers need a default output'
new_obj = utils.copy(self)
new_obj.out = abs(new_obj.out)
if hasattr(new_obj, 'grads'):
new_obj.grads = [TT.sgn(new_obj.out) * x for x in new_obj.grads]
return new_obj
示例7: laplace_diag
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import sgn [as 别名]
def laplace_diag(mean, logscale, sample=None):
scale = .5*T.exp(logscale)
if sample is None:
u = G.rng_curand.uniform(size=mean.shape) - .5
sample = mean - scale * T.sgn(u) * T.log(1-2*abs(u))
logp = (- logscale - abs(sample-mean) / scale).flatten(2).sum(axis=1)
entr = (1 + logscale).flatten(2).sum(axis=1)
return RandomVariable(sample, logp, entr, mean=mean, scale=scale)
示例8: _get_updates_for
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import sgn [as 别名]
def _get_updates_for(self, param, grad):
grad_tm1 = util.shared_like(param, 'grad')
step_tm1 = util.shared_like(param, 'step', self.learning_rate.eval())
test = grad * grad_tm1
diff = TT.lt(test, 0)
steps = step_tm1 * (TT.eq(test, 0) +
TT.gt(test, 0) * self.step_increase +
diff * self.step_decrease)
step = TT.minimum(self.max_step, TT.maximum(self.min_step, steps))
grad = grad - diff * grad
yield param, TT.sgn(grad) * step
yield grad_tm1, grad
yield step_tm1, step
示例9: fd3
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import sgn [as 别名]
def fd3(mlp, fdm, params, globalLR1, globalLR2, momentParam1, momentParam2):
cost1 = mlp.classError1 + mlp.penalty
gradT1reg = T.grad(cost1, mlp.paramsT2)
updateT1 = []; updateT2 = []; onlyT2param = []
# take opt from Adam?
if params.opt2 in ['adam']: opt2 = adam()
else: opt2 = None
# update W - (1) + (3)
for param, uC1, uC2 in zip(mlp.paramsT1, fdm.updateC1T1, fdm.updateC2T1):
updateT1 += [(param, param + uC1 - uC2)]
# compute grad T2 of C1, update T2 - [(4) - (2) ] / lr1
for param, grad, gT2 in zip(mlp.paramsT2, gradT1reg, fdm.gradC1T2):
if params.T2onlySGN:
grad_proxi = T.sgn((grad - gT2)/step*globalLR1)
else:
grad_proxi = (grad - gT2)/step*globalLR1
tempUp, tempPair, _ = update_fun(param, T.reshape(grad_proxi, param.shape), None,
'T2', {}, opt2, params,
globalLR1, globalLR2, momentParam1, momentParam2)
updateT2 += tempUp
onlyT2param += tempPair
debugs = [check for (_, check) in onlyT2param]
return updateT1 + updateT2, debugs
示例10: get_loss
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import sgn [as 别名]
def get_loss(self, target=None, *args, **kwargs):
if target is None:
target = self.target_var
network_output = self.input_layer.get_output(self.input_map, *args, **kwargs)
loss = log_loss(network_output, target)
input_grad_map = { layer: T.grad(loss, input_var) for layer, input_var in self.input_map.iteritems() }
perturbed_input_map = { layer: input_var + self.epsilon * T.sgn(input_grad_map[layer]) for layer, input_var in self.input_map.iteritems() }
perturbed_network_output = self.input_layer.get_output(perturbed_input_map, *args, **kwargs)
perturbed_loss = log_loss(perturbed_network_output, target)
adv_loss = self.alpha * loss + (1 - self.alpha) * perturbed_loss
return adv_loss
示例11: __init__
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import sgn [as 别名]
def __init__(self, model, e, a=0.5, verbose=2, iterator='linear'):
self.verbose = verbose
self.model = init(model)
try:
self.iterator = instantiate(iterators, iterator)
except:
self.iterator = instantiate(async_iterators, iterator)
y_tr = self.model[-1].op({'dropout':True, 'bn_active':True, 'infer':False})
y_te = self.model[-1].op({'dropout':False, 'bn_active':False, 'infer':False})
y_inf = self.model[-1].op({'dropout':False, 'bn_active':True, 'infer':True})
self.X = self.model[0].X
self.Y = T.TensorType(theano.config.floatX, (False,)*(len(model[-1].out_shape)))()
cost = T.nnet.categorical_crossentropy(y_tr, self.Y).mean()
X_adv = self.X + e*T.sgn(T.grad(cost, self.X))
self.model[0].X = X_adv
y_tr_adv = self.model[-1].op({'dropout':True, 'bn_active':True, 'infer':False})
cost_adv = a*cost + (1.-a)*T.nnet.categorical_crossentropy(y_tr_adv, self.Y).mean()
te_cost = T.nnet.categorical_crossentropy(y_te, self.Y).mean()
X_te_adv = self.X + e*T.sgn(T.grad(te_cost, self.X))
self.updates = collect_updates(self.model, cost_adv)
self.infer_updates = collect_infer_updates(self.model)
self.reset_updates = collect_reset_updates(self.model)
self._train = theano.function([self.X, self.Y], cost_adv, updates=self.updates)
self._predict = theano.function([self.X], y_te)
self._fast_sign = theano.function([self.X, self.Y], X_te_adv)
self._infer = theano.function([self.X], y_inf, updates=self.infer_updates)
self._reset = theano.function([], updates=self.reset_updates)