本文整理汇总了Python中theano.tensor.gt函数的典型用法代码示例。如果您正苦于以下问题:Python gt函数的具体用法?Python gt怎么用?Python gt使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了gt函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _backward_negative_z
def _backward_negative_z(inputs, weights, normed_relevances, bias=None):
inputs_plus = inputs * T.gt(inputs, 0)
weights_plus = weights * T.gt(weights, 0)
inputs_minus = inputs * T.lt(inputs, 0)
weights_minus = weights * T.lt(weights, 0)
# Compute weights+ * inputs- and weights- * inputs+
negative_part_a = conv2d(
normed_relevances, weights_plus.dimshuffle(1, 0, 2, 3)[:, :, ::-1, ::-1], border_mode="full"
)
negative_part_a *= inputs_minus
negative_part_b = conv2d(
normed_relevances, weights_minus.dimshuffle(1, 0, 2, 3)[:, :, ::-1, ::-1], border_mode="full"
)
negative_part_b *= inputs_plus
together = negative_part_a + negative_part_b
if bias is not None:
bias_negative = bias * T.lt(bias, 0)
bias_relevance = bias_negative.dimshuffle("x", 0, "x", "x") * normed_relevances
# Divide bias by weight size before convolving back
# mean across channel, 0, 1 dims (hope this is correct?)
fraction_bias = bias_relevance / T.prod(weights.shape[1:]).astype(theano.config.floatX)
bias_rel_in = conv2d(
fraction_bias, T.ones_like(weights).dimshuffle(1, 0, 2, 3)[:, :, ::-1, ::-1], border_mode="full"
)
together += bias_rel_in
return together
示例2: calcColNormalizer
def calcColNormalizer(inMatrix):
#Theano function for calculating logSum, i.e., calculate ln(X + Y) based on ln(X) and ln(Y).
maxExp = -4950.0
x, y = T.fscalars(2)
yMinusx = y - x ## this part is for the condition which x > y
xMinusy = x - y # if x < y
bigger = T.switch(T.gt(x, y), x, y)
YSubtractX = T.switch(T.gt(x,y), yMinusx, xMinusy)
x_prime = T.log(1 + T.exp(YSubtractX)) + bigger
calcSum = T.switch(T.lt(YSubtractX, maxExp), bigger, x_prime)
logSum = function([x, y], calcSum, allow_input_downcast=True)
####### end of logSum ###############
# now we caclculate sum of log joint as normalizer
if len(inMatrix.shape) < 2:
raise Exception ("calcColNormalizer expect a 2D matrix")
nRows, nCols = inMatrix.shape
columnAccumLogSum = np.zeros(nCols)
for col in range(nCols):
currLogSum = np.NINF
for j in range(nRows):
if inMatrix[j,col] == np.NINF:
continue
currLogSum = logSum(currLogSum, inMatrix[j,col])
columnAccumLogSum[col] = currLogSum
return columnAccumLogSum
示例3: T_subspacel1_slow_shrinkage
def T_subspacel1_slow_shrinkage(a,L,lam_sparse,lam_slow,small_value=.001):
amp = T.sqrt(a[::2,:]**2 + a[1::2,:]**2 + small_value)
#damp = amp[:,1:] - amp[:,:-1]
# compose slow shrinkage with subspace l1 shrinkage
# slow shrinkage
div = T.zeros_like(amp)
d1 = amp[:,1:] - amp[:,:-1]
d2 = d1[:,1:] - d1[:,:-1]
div = T.set_subtensor(div[:,1:-1],-d2)
div = T.set_subtensor(div[:,0], -d1[:,0])
div = T.set_subtensor(div[:,-1], d1[:,-1])
slow_amp_shrinkage = 1 - (lam_slow/L)*(div/amp)
slow_amp_value = T.switch(T.gt(slow_amp_shrinkage,0),slow_amp_shrinkage,0)
slow_shrinkage_prox_a = slow_amp_value*a[::2,:]
slow_shrinkage_prox_b = slow_amp_value*a[1::2,:]
# subspace l1 shrinkage
amp_slow_shrinkage_prox = T.sqrt(slow_shrinkage_prox_a**2 + slow_shrinkage_prox_b**2)
#amp_shrinkage = 1. - (lam_slow*lam_sparse/L)*amp_slow_shrinkage_prox
amp_shrinkage = 1. - (lam_sparse/L)/amp_slow_shrinkage_prox
amp_value = T.switch(T.gt(amp_shrinkage,0.),amp_shrinkage,0.)
subspacel1_prox = T.zeros_like(a)
subspacel1_prox = T.set_subtensor(subspacel1_prox[ ::2,:],amp_value*slow_shrinkage_prox_a)
subspacel1_prox = T.set_subtensor(subspacel1_prox[1::2,:],amp_value*slow_shrinkage_prox_b)
return subspacel1_prox
示例4: momentum_normscaled
def momentum_normscaled(loss, all_params, lr, mom, batch_size, max_norm=np.inf, weight_decay=0.0,verbose=False):
updates = []
#all_grads = [theano.grad(loss, param) for param in all_params]
all_grads = theano.grad(gradient_clipper(loss),all_params)
grad_lst = [ T.sum( ( grad / float(batch_size) )**2 ) for grad in all_grads ]
grad_norm = T.sqrt( T.sum( grad_lst ))
if verbose:
grad_norm = theano.printing.Print('MOMENTUM GRAD NORM1:')(grad_norm)
all_grads = ifelse(T.gt(grad_norm, max_norm),
[grads*(max_norm / grad_norm) for grads in all_grads],
all_grads)
if verbose:
grad_lst = [ T.sum( ( grad / float(batch_size) )**2 ) for grad in all_grads ]
grad_norm = T.sqrt( T.sum( grad_lst ))
grad_norm = theano.printing.Print('MOMENTUM GRAD NORM2:')(grad_norm)
all_grads = ifelse(T.gt(grad_norm, np.inf),
[grads*(max_norm / grad_norm) for grads in all_grads],
all_grads)
for param_i, grad_i in zip(all_params, all_grads):
mparam_i = theano.shared(np.zeros(param_i.get_value().shape, dtype=theano.config.floatX))
v = mom * mparam_i - lr*(weight_decay*param_i + grad_i)
updates.append( (mparam_i, v) )
updates.append( (param_i, param_i + v) )
return updates
示例5: __init__
def __init__(self, x, lower, upper, *args, **kwargs):
super(Uniform, self).__init__(*args, **kwargs)
self._logp = T.log(T.switch(T.gt(x, upper), 0, T.switch(T.lt(x, lower), 0, 1/(upper - lower))))
self._cdf = T.switch(T.gt(x, up), 1, T.switch(T.lt(x, low), 0, (x - low)/(up - low)))
self._add_expr('x', x)
self._add_expr('lower', lower)
self._add_expr('upper', upper)
示例6: call
def call(self, X):
if type(X) is not list or len(X) != 2:
raise Exception("SquareAttention must be called on a list of two tensors. Got: " + str(X))
frame, position = X[0], X[1]
# Reshaping the input to exclude the time dimension
frameShape = K.shape(frame)
positionShape = K.shape(position)
(chans, height, width) = frameShape[-3:]
targetDim = positionShape[-1]
frame = K.reshape(frame, (-1, chans, height, width))
position = K.reshape(position, (-1, ) + (targetDim, ))
# Applying the attention
hw = THT.abs_(position[:, 2] - position[:, 0]) * self.scale / 2.0
hh = THT.abs_(position[:, 3] - position[:, 1]) * self.scale / 2.0
position = THT.maximum(THT.set_subtensor(position[:, 0], position[:, 0] - hw), -1.0)
position = THT.minimum(THT.set_subtensor(position[:, 2], position[:, 2] + hw), 1.0)
position = THT.maximum(THT.set_subtensor(position[:, 1], position[:, 1] - hh), -1.0)
position = THT.minimum(THT.set_subtensor(position[:, 3], position[:, 3] + hh), 1.0)
rX = Data.linspace(-1.0, 1.0, width)
rY = Data.linspace(-1.0, 1.0, height)
FX = THT.gt(rX, position[:,0].dimshuffle(0,'x')) * THT.le(rX, position[:,2].dimshuffle(0,'x'))
FY = THT.gt(rY, position[:,1].dimshuffle(0,'x')) * THT.le(rY, position[:,3].dimshuffle(0,'x'))
m = FY.dimshuffle(0, 1, 'x') * FX.dimshuffle(0, 'x', 1)
m = m + self.alpha - THT.gt(m, 0.) * self.alpha
frame = frame * m.dimshuffle(0, 'x', 1, 2)
# Reshaping the frame to include time dimension
output = K.reshape(frame, frameShape)
return output
示例7: T_subspacel1_slow_shrinkage_conv
def T_subspacel1_slow_shrinkage_conv(a, L, lam_sparse, lam_slow, imshp,kshp,featshp,stride=(1,1),small_value=.001):
featshp = (imshp[0],kshp[0],featshp[2],featshp[3]) # num images, features, szy, szx
features = T.reshape(T.transpose(a),featshp,ndim=4)
amp = T.sqrt(features[:,::2,:,:]**2 + features[:,1::2,:,:]**2 + small_value)
#damp = amp[:,1:] - amp[:,:-1]
# compose slow shrinkage with subspace l1 shrinkage
# slow shrinkage
div = T.zeros_like(amp)
d1 = amp[1:,:,:,:] - amp[:-1,:,:,:]
d2 = d1[1:,:,:,:] - d1[:-1,:,:,:]
div = T.set_subtensor(div[1:-1,:,:,:], -d2)
div = T.set_subtensor(div[0,:,:,:], -d1[0,:,:,:])
div = T.set_subtensor(div[-1,:,:,:], d1[-1,:,:,:])
slow_amp_shrinkage = 1 - (lam_slow / L) * (div / amp)
slow_amp_value = T.switch(T.gt(slow_amp_shrinkage, 0), slow_amp_shrinkage, 0)
slow_shrinkage_prox_a = slow_amp_value * features[:, ::2, :,:]
slow_shrinkage_prox_b = slow_amp_value * features[:,1::2, :,:]
# subspace l1 shrinkage
amp_slow_shrinkage_prox = T.sqrt(slow_shrinkage_prox_a ** 2 + slow_shrinkage_prox_b ** 2)
#amp_shrinkage = 1. - (lam_slow*lam_sparse/L)*amp_slow_shrinkage_prox
amp_shrinkage = 1. - (lam_sparse / L) / amp_slow_shrinkage_prox
amp_value = T.switch(T.gt(amp_shrinkage, 0.), amp_shrinkage, 0.)
subspacel1_prox = T.zeros_like(features)
subspacel1_prox = T.set_subtensor(subspacel1_prox[:, ::2, :,:], amp_value * slow_shrinkage_prox_a)
subspacel1_prox = T.set_subtensor(subspacel1_prox[:,1::2, :,:], amp_value * slow_shrinkage_prox_b)
reshape_subspacel1_prox = T.transpose(T.reshape(subspacel1_prox,(featshp[0],featshp[1]*featshp[2]*featshp[3]),ndim=2))
return reshape_subspacel1_prox
示例8: norm_col
def norm_col(w, h):
"""normalize the column vector w (Theano function).
Apply the invert normalization on h such that w.h does not change
Parameters
----------
w: Theano vector
vector to be normalised
h: Ttheano vector
vector to be normalised by the invert normalistation
Returns
-------
w : Theano vector with the same shape as w
normalised vector (w/norm)
h : Theano vector with the same shape as h
h*norm
"""
norm = w.norm(2, 0)
eps = 1e-12
size_norm = (T.ones_like(w)).norm(2, 0)
w = ifelse(T.gt(norm, eps),
w/norm,
(w+eps)/(eps*size_norm).astype(theano.config.floatX))
h = ifelse(T.gt(norm, eps),
h*norm,
(h*eps*size_norm).astype(theano.config.floatX))
return w, h
示例9: decay
def decay(self):
updates = []
new_batch = ifelse(T.gt(self.batch, self.decay_batch), sharedX(0), self.batch+1)
new_lr = ifelse(T.gt(self.batch, self.decay_batch), self.lr*self.lr_decay_factor, self.lr)
updates.append((self.batch, new_batch))
updates.append((self.lr, new_lr))
return updates
示例10: irprop_minus_updates
def irprop_minus_updates(params, grads):
# IRPROP- parameters
updates = []
deltas = 0.1*numpy.ones(len(params))
last_params = params
positiveStep = 1.2
negativeStep = 0.5
maxStep = 50.
minStep = math.exp(-6)
for param, gparam, delta, last_gparam in zip(params, grads, deltas, last_params):
# calculate change
change = T.sgn(gparam * last_gparam)
if T.gt(change, 0) :
delta = T.minimum(delta * positiveStep, maxStep)
if T.lt(delta, minStep):
delta = minStep
elif T.lt(change, 0):
delta = T.maximum(delta * negativeStep, minStep)
if T.gt(delta, params['maxStep']):
delta = params['maxStep']
last_gparam = 0
# update the weights
updates.append((param, param - T.sgn(gparam) * delta))
# store old change
last_gparam = gparam
return updates
示例11: __init__
def __init__(self, alpha, m, *args, **kwargs):
super(Pareto, self).__init__(*args, **kwargs)
self.alpha = alpha
self.m = m
self.mean = tt.switch(tt.gt(alpha, 1), alpha * m / (alpha - 1.0), np.inf)
self.median = m * 2.0 ** (1.0 / alpha)
self.variance = tt.switch(tt.gt(alpha, 2), (alpha * m ** 2) / ((alpha - 2.0) * (alpha - 1.0) ** 2), np.inf)
示例12: errors
def errors(self, y, print_output=False):
# check if y has same dimension of y_pred
if y.ndim != self.y_pred.ndim:
raise TypeError('y should have the same shape as self.y_pred', ('y', y.type, 'y_pred', self.y_pred.type))
# check if y is of the correct datatype
if y.dtype.startswith('int'):
num_positive = T.cast(T.sum(T.eq(y,1)),'float64')
num_predicted_positive = T.cast(T.sum(T.eq(self.y_pred,1)),'float64')
num_correctly_predicted = T.cast(T.sum(T.eq(self.y_pred*y,1)),'float64')
P = T.cast(0.0,'float64') # precision = True positive / (True positive + False positive)
if (T.gt(num_predicted_positive,0.0)):
P = T.cast(num_correctly_predicted / num_predicted_positive,'float64')
R = T.cast(0.0,'float64') # recall = True positive / (True positive + False negative)
if (T.gt(num_positive,0.0)):
R = T.cast(num_correctly_predicted / num_positive,'float64')
F1 = T.cast(0.0,'float64') # F1 score
if (T.gt(P+R,0.0)):
F1 = 2.0*P*R/(P+R)
if (print_output):
print(" num positive = {0}".format( num_positive ) )
print(" num predicted positive = {0}".format( num_predicted_positive ) )
print(" num correctly predicted = {0}".format( num_correctly_predicted ) )
print(" precision = {0}".format(P))
print(" recall = {0}".format(R))
print(" F1 score = {0}".format(F1))
return [T.mean(T.neq(self.y_pred, y)), P, R, F1]
else:
raise NotImplementedError()
return
示例13: multiclassRealPosAndNegAndTruePredPosNegTraining0OrValidation1
def multiclassRealPosAndNegAndTruePredPosNegTraining0OrValidation1(self, y, training0OrValidation1):
"""
The returned list has (numberOfClasses)x4 integers: >numberOfRealPositives, numberOfRealNegatives, numberOfTruePredictedPositives, numberOfTruePredictedNegatives< for each class (incl background).
For class_i == 0 (backgr), what is reported is the WHOLE rp,rn,tpp,tpn. ie, as calculated considering background VS all other classes.
Order in the list is the natural order of the classes (ie class-0-WHOLE RP,RN,TPP,TPN, class-1 RP,RN,TPP,TPN, class-2 RP,RN,TPP,TPN ...)
"""
returnedListWithNumberOfRpRnPpPnForEachClass = []
for class_i in xrange(0, self.numberOfOutputClasses) :
#Number of Real Positive, Real Negatives, True Predicted Positives and True Predicted Negatives are reported PER CLASS (first for WHOLE).
vectorOneAtRealPositives = T.gt(y, 0) if class_i == 0 else T.eq(y, class_i)
vectorOneAtRealNegatives = T.eq(y, 0) if class_i == 0 else T.neq(y, class_i)
if training0OrValidation1 == 0 : #training:
yPredToUse = self.y_pred
else: #validation
yPredToUse = self.y_pred_inference
vectorOneAtPredictedPositives = T.gt(yPredToUse, 0) if class_i == 0 else T.eq(yPredToUse, class_i)
vectorOneAtPredictedNegatives = T.eq(yPredToUse, 0) if class_i == 0 else T.neq(yPredToUse, class_i)
vectorOneAtTruePredictedPositives = T.and_(vectorOneAtRealPositives,vectorOneAtPredictedPositives)
vectorOneAtTruePredictedNegatives = T.and_(vectorOneAtRealNegatives,vectorOneAtPredictedNegatives)
returnedListWithNumberOfRpRnPpPnForEachClass.append( T.sum(vectorOneAtRealPositives) )
returnedListWithNumberOfRpRnPpPnForEachClass.append( T.sum(vectorOneAtRealNegatives) )
returnedListWithNumberOfRpRnPpPnForEachClass.append( T.sum(vectorOneAtTruePredictedPositives) )
returnedListWithNumberOfRpRnPpPnForEachClass.append( T.sum(vectorOneAtTruePredictedNegatives) )
return returnedListWithNumberOfRpRnPpPnForEachClass
示例14: group_div
def group_div(X, W, H, beta, params):
"""Compute beta divergence D(X|WH), intra-class distance
and intra-session distance for a particular
(class, session) couple [1]_.
Parameters
----------
X : Theano tensor
data
W : Theano tensor
Bases
H : Theano tensor
activation matrix
beta : Theano scalar
params : Theano tensor
Matrix of parameter related to class/session.
:params[0][0]: index for the (class, session) couple
:params[1][0]: number of vector basis related to class
:params[1][1]: number of vector basis related to session
:params[2]: weight on the class/session similarity constraints
:params[3]: sessions in which class c appears
:params[4]: classes present in session s
Returns
-------
cost : Theano scalar
total cost
div : Theano scalar
beta divergence D(X|WH)
sum_cls : Theano scalar
intra-class distance
sum_ses : Theano scalar
intra-session distance"""
ind = params[0][0]
k_cls = params[1][0]
k_ses = params[1][1]
lambdas = params[2]
Sc = params[3]
Cs = params[4]
res_ses, up = theano.scan(
fn=lambda Cs, prior_result: prior_result
+ eucl_dist(W[ind, :, k_cls : k_cls + k_ses], W[Cs, :, k_cls : k_cls + k_ses]),
outputs_info=T.zeros_like(beta),
sequences=Cs,
)
sum_ses = ifelse(T.gt(Cs[0], 0), res_ses[-1], T.zeros_like(beta))
res_cls, up = theano.scan(
fn=lambda Sc, prior_result: prior_result + eucl_dist(W[ind, :, 0:k_cls], W[Sc, :, 0:k_cls]),
outputs_info=T.zeros_like(beta),
sequences=Sc,
)
sum_cls = ifelse(T.gt(Sc[0], 0), res_cls[-1], T.zeros_like(beta))
betaDiv = beta_div(X, W[ind].T, H, beta)
cost = lambdas[0] * sum_cls + lambdas[1] * sum_ses + betaDiv
return cost, betaDiv, sum_cls, sum_ses
示例15: symGivens2
def symGivens2(a, b):
"""
Stable Symmetric Givens rotation plus reflection
Parameters
a: (theano scalar) first element of a two-vector [a; b]
b: (theano scalar) second element of a two-vector [a; b]
Returns
c cosine(theta), where theta is the implicit angle of
rotation (counter-clockwise) in a plane-rotation
s sine(theta)
d two-norm of [a; b]
Description:
This method gives c and s such that
[ c s ][a] = [d],
[ s -c ][b] [0]
where d = two norm of vector [a, b],
c = a / sqrt(a^2 + b^2) = a / d,
s = b / sqrt(a^2 + b^2) = b / d.
The implementation guards against overflow in computing
sqrt(a^2 + b^2).
SEE ALSO:
(1) Algorithm 4.9, stable *unsymmetric* Givens
rotations in Golub and van Loan's book Matrix
Computations, 3rd edition.
(2) MATLAB's function PLANEROT.
Observations:
Implementing this function as a single op in C might improve speed
considerably ..
"""
c_branch1 = T.switch(T.eq(a, constantX(0)), constantX(1), T.sgn(a))
c_branch21 = (a / b) * T.sgn(b) / T.sqrt(constantX(1) + (a / b) ** 2)
c_branch22 = T.sgn(a) / T.sqrt(constantX(1) + (b / a) ** 2)
c_branch2 = T.switch(T.eq(a, constantX(0)), constantX(0), T.switch(T.gt(abs(b), abs(a)), c_branch21, c_branch22))
c = T.switch(T.eq(b, constantX(0)), c_branch1, c_branch2)
s_branch1 = T.sgn(b) / T.sqrt(constantX(1) + (a / b) ** 2)
s_branch2 = (b / a) * T.sgn(a) / T.sqrt(constantX(1) + (b / a) ** 2)
s = T.switch(
T.eq(b, constantX(0)),
constantX(0),
T.switch(T.eq(a, constantX(0)), T.sgn(b), T.switch(T.gt(abs(b), abs(a)), s_branch1, s_branch2)),
)
d_branch1 = b / (T.sgn(b) / T.sqrt(constantX(1) + (a / b) ** 2))
d_branch2 = a / (T.sgn(a) / T.sqrt(constantX(1) + (b / a) ** 2))
d = T.switch(
T.eq(b, constantX(0)),
abs(a),
T.switch(T.eq(a, constantX(0)), abs(b), T.switch(T.gt(abs(b), abs(a)), d_branch1, d_branch2)),
)
return c, s, d