本文整理匯總了Python中theano.tensor.gt方法的典型用法代碼示例。如果您正苦於以下問題:Python tensor.gt方法的具體用法?Python tensor.gt怎麽用?Python tensor.gt使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類theano.tensor
的用法示例。
在下文中一共展示了tensor.gt方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_pdbbreakpoint_op
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import gt [as 別名]
def test_pdbbreakpoint_op():
""" Test that PdbBreakpoint ops don't block gpu optimization"""
b = tensor.fmatrix()
# Create a function composed of a breakpoint followed by
# some computation
condition = tensor.gt(b.sum(), 0)
b_monitored = PdbBreakpoint(name='TestBreakpoint')(condition, b)
output = b_monitored ** 2
f = theano.function([b], output, mode=mode_with_gpu)
# Ensure that, in the compiled function, the computation following the
# breakpoint has been moved to the gpu.
topo = f.maker.fgraph.toposort()
assert isinstance(topo[-2].op, cuda.GpuElemwise)
assert topo[-1].op == cuda.host_from_gpu
示例2: test_elemwise_comparaison_cast
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import gt [as 別名]
def test_elemwise_comparaison_cast():
"""
test if an elemwise comparaison followed by a cast to float32 are
pushed to gpu.
"""
a = tensor.fmatrix()
b = tensor.fmatrix()
av = theano._asarray(numpy.random.rand(4, 4), dtype='float32')
bv = numpy.ones((4, 4), dtype='float32')
for g, ans in [(tensor.lt, av < bv), (tensor.gt, av > bv),
(tensor.le, av <= bv), (tensor.ge, av >= bv)]:
f = pfunc([a, b], tensor.cast(g(a, b), 'float32'), mode=mode_with_gpu)
out = f(av, bv)
assert numpy.all(out == ans)
assert any([isinstance(node.op, cuda.GpuElemwise)
for node in f.maker.fgraph.toposort()])
示例3: test_pdbbreakpoint_op
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import gt [as 別名]
def test_pdbbreakpoint_op():
""" Test that PdbBreakpoint ops don't block gpu optimization"""
b = tensor.fmatrix()
# Create a function composed of a breakpoint followed by
# some computation
condition = tensor.gt(b.sum(), 0)
b_monitored = PdbBreakpoint(name='TestBreakpoint')(condition, b)
output = b_monitored ** 2
f = theano.function([b], output, mode=mode_with_gpu)
# Ensure that, in the compiled function, the computation following the
# breakpoint has been moved to the gpu.
topo = f.maker.fgraph.toposort()
assert isinstance(topo[-2].op, GpuElemwise)
assert topo[-1].op == host_from_gpu
示例4: setUp
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import gt [as 別名]
def setUp(self):
super(TestPdbBreakpoint, self).setUp()
# Sample computation that involves tensors with different numbers
# of dimensions
self.input1 = T.fmatrix()
self.input2 = T.fscalar()
self.output = T.dot((self.input1 - self.input2),
(self.input1 - self.input2).transpose())
# Declare the conditional breakpoint
self.breakpointOp = PdbBreakpoint("Sum of output too high")
self.condition = T.gt(self.output.sum(), 1000)
(self.monitored_input1,
self.monitored_input2,
self.monitored_output) = self.breakpointOp(self.condition,
self.input1,
self.input2, self.output)
示例5: relu
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import gt [as 別名]
def relu(x, alpha=0., max_value=None, threshold=0.):
_assert_has_capability(T.nnet, 'relu')
if alpha != 0.:
if threshold != 0.:
negative_part = T.nnet.relu(-x + threshold)
else:
negative_part = T.nnet.relu(-x)
if threshold != 0.:
x = x * T.cast(T.gt(x, threshold), floatx())
else:
x = T.nnet.relu(x)
if max_value is not None:
x = T.clip(x, 0.0, max_value)
if alpha != 0.:
x -= alpha * negative_part
return x
示例6: __init__
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import gt [as 別名]
def __init__(self, input, centerbias = None, alpha=1.0):
self.input = input
if centerbias is None:
centerbias = np.ones(12)
self.alpha = theano.shared(value = np.array(alpha).astype(theano.config.floatX), name='alpha')
self.centerbias_ys = theano.shared(value=np.array(centerbias, dtype=theano.config.floatX), name='centerbias_ys')
self.centerbias_xs = theano.shared(value=np.linspace(0, 1, len(centerbias), dtype=theano.config.floatX), name='centerbias_xs')
height = T.cast(input.shape[0], theano.config.floatX)
width = T.cast(input.shape[1], theano.config.floatX)
x_coords = (T.arange(width) - 0.5*width) / (0.5*width)
y_coords = (T.arange(height) - 0.5*height) / (0.5*height) + 0.0001 # We cannot have zeros in there because of grad
x_coords = x_coords.dimshuffle('x', 0)
y_coords = y_coords.dimshuffle(0, 'x')
dists = T.sqrt(T.square(x_coords) + self.alpha*T.square(y_coords))
self.max_dist = T.sqrt(1 + self.alpha)
self.dists = dists/self.max_dist
self.factors = nonlinearity(self.dists, self.centerbias_xs, self.centerbias_ys, len(centerbias))
apply_centerbias = T.gt(self.centerbias_ys.shape[0], 2)
self.output = ifelse(apply_centerbias, self.input*self.factors, self.input)
self.params = [self.centerbias_ys, self.alpha]
示例7: greater
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import gt [as 別名]
def greater(x, y):
return T.gt(x, y)
示例8: build_transition_cost
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import gt [as 別名]
def build_transition_cost(logits, targets, num_transitions):
"""
Build a parse action prediction cost function.
"""
# swap seq_length dimension to front so that we can scan per timestep
logits = T.swapaxes(logits, 0, 1)
targets = targets.T
def cost_t(logits, tgt, num_transitions):
# TODO(jongauthier): Taper down xent cost as we proceed through
# sequence?
predicted_dist = T.nnet.softmax(logits)
cost = T.nnet.categorical_crossentropy(predicted_dist, tgt)
pred = T.argmax(logits, axis=1)
error = T.neq(pred, tgt)
return cost, error
results, _ = theano.scan(cost_t, [logits, targets], non_sequences=[num_transitions])
costs, errors = results
# Create a mask that selects only transitions that involve real data.
unrolling_length = T.shape(costs)[0]
padding = unrolling_length - num_transitions
padding = T.reshape(padding, (1, -1))
rng = T.arange(unrolling_length) + 1
rng = T.reshape(rng, (-1, 1))
mask = T.gt(rng, padding)
# Compute acc using the mask
acc = 1.0 - (T.sum(errors * mask, dtype=theano.config.floatX)
/ T.sum(num_transitions, dtype=theano.config.floatX))
# Compute cost directly, since we *do* want a cost incentive to get the padding
# transitions right.
cost = T.mean(costs)
return cost, acc
示例9: __init__
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import gt [as 別名]
def __init__(self, x, lower, upper, *args, **kwargs):
super(Uniform, self).__init__(*args, **kwargs)
self._logp = T.log(T.switch(
T.gt(x, upper), 0,
T.switch(T.lt(x, lower), 0, 1/(upper-lower))
))
self._cdf = T.switch(
T.gt(x, upper), 1,
T.switch(T.lt(x, lower), 0, (x-lower)/(upper-lower))
)
self._add_expr('x', x)
self._add_expr('lower', lower)
self._add_expr('upper', upper)
示例10: errorsBreakdown
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import gt [as 別名]
def errorsBreakdown(self, y):
##truth shall be casted to at least int32
def breakDown3C(pred=None, truth=None):
labelcount = T.bincount(truth, minlength=3)
err = T.neq(pred, truth)
truth_with_wrong_pred = truth[err.nonzero()]
errcount = T.bincount(truth_with_wrong_pred, minlength=3)
## use 0.0001 to avoid division by 0
return T.mul(errcount, 1./(labelcount + 0.0001) )
if self.n_out == 3:
truth = T.cast(y, 'int32')
return breakDown3C(self.y_pred, truth)
if self.n_out == 12:
## convert the 12-label system to the 3-label system
## 0, 1, 2, 3 to 0; 4,5,6,7,8,9,10 to 1; and 11 to 2
y1 = T.zeros_like(y)
y2 = T.gt(y, 3)
y3 = T.gt(y, 10)
truth = T.cast(y1 + y2 + y3, 'int32')
pred1 = T.zeros_like(self.y_pred)
pred2 = T.gt(self.y_pred, 3)
pred3 = T.gt(self.y_pred, 10)
pred = T.cast( y1 + y2 + y3, 'int32')
return breakDown3C(pred, truth)
else:
print 'this function only works when n_out is either 3 or 12'
sys.exit(-1)
## calculate the confusion matrix of the prediction
示例11: confusionMatrix
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import gt [as 別名]
def confusionMatrix(self, y):
def confusionMatrix3C(pred=None, truth=None):
labelcount = T.bincount(truth, minlength=3)
truth_pred = truth * 3 + pred
count = T.bincount(truth_pred, minlength=9).reshape((3, 3))
count_norm = count /(1. * truth.shape[0] )
return count_norm
if self.n_out == 3:
##truth shall be casted to at least int32
truth = T.cast(y, 'int32')
return confusionMatrix3C(self.y_pred, truth)
if self.n_out == 12:
## convert the 12-label system to the 3-label system
## 0, 1, 2, 3 to 0; 4,5,6,7,8,9,10 to 1; and 11 to 2
y1 = T.zeros_like(y)
y2 = T.gt(y, 3)
y3 = T.gt(y, 10)
truth = T.cast(y1 + y2 + y3, 'int32')
pred1 = T.zeros_like(self.y_pred)
pred2 = T.gt(self.y_pred, 3)
pred3 = T.gt(self.y_pred, 10)
pred = T.cast( y1 + y2 + y3, 'int32')
return confusionMatrix3C(pred, truth)
else:
print 'this function only works when n_out is either 3 or 12'
sys.exit(-1)
示例12: _get_updates_for
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import gt [as 別名]
def _get_updates_for(self, param, grad):
grad_tm1 = util.shared_like(param, 'grad')
step_tm1 = util.shared_like(param, 'step', self.learning_rate.eval())
test = grad * grad_tm1
diff = TT.lt(test, 0)
steps = step_tm1 * (TT.eq(test, 0) +
TT.gt(test, 0) * self.step_increase +
diff * self.step_decrease)
step = TT.minimum(self.max_step, TT.maximum(self.min_step, steps))
grad = grad - diff * grad
yield param, TT.sgn(grad) * step
yield grad_tm1, grad
yield step_tm1, step
示例13: decay
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import gt [as 別名]
def decay(self):
updates = []
new_batch = ifelse(T.gt(self.batch, self.decay_batch), sharedX(0), self.batch+1)
new_lr = ifelse(T.gt(self.batch, self.decay_batch), self.lr*self.lr_decay_factor, self.lr)
updates.append((self.batch, new_batch))
updates.append((self.lr, new_lr))
return updates
示例14: pos_ct
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import gt [as 別名]
def pos_ct(y_true, y_pred):
pos_pred = K.sum(gt((K.clip(y_pred, 0, 1)),0.5))
return pos_pred
示例15: true_pos
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import gt [as 別名]
def true_pos(y_true, y_pred):
true_pos_ct = K.sum(gt((K.clip(y_pred*y_true, 0, 1)),0.5))
return true_pos_ct