本文整理汇总了Python中theano.tensor.shape函数的典型用法代码示例。如果您正苦于以下问题:Python shape函数的具体用法?Python shape怎么用?Python shape使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了shape函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_sensi_speci
def get_sensi_speci(y_hat, y):
# y_hat = T.concatenate(T.sum(input=y_hat[:, 0:2], axis=1), T.sum(input=y_hat[:, 2:], axis=1))
y_hat = T.stacklists([y_hat[:, 0] + y_hat[:, 1], y_hat[:, 2] + y_hat[:, 3] + y_hat[:, 4]]).T
y_hat = T.argmax(y_hat)
tag = 10 * y_hat + y
tneg = T.cast((T.shape(tag[(T.eq(tag, 0.)).nonzero()]))[0], config.floatX)
fneg = T.cast((T.shape(tag[(T.eq(tag, 1.)).nonzero()]))[0], config.floatX)
fpos = T.cast((T.shape(tag[(T.eq(tag, 10.)).nonzero()]))[0], config.floatX)
tpos = T.cast((T.shape(tag[(T.eq(tag, 11.)).nonzero()]))[0], config.floatX)
# assert fneg + fneg + fpos + tpos == 1380
# tneg.astype(config.floatX)
# fneg.astype(config.floatX)
# fpos.astype(config.floatX)
# tpos.astype(config.floatX)
speci = ifelse(T.eq((tneg + fpos), 0), np.float64(float('inf')), tneg / (tneg + fpos))
sensi = ifelse(T.eq((tpos + fneg), 0), np.float64(float('inf')), tpos / (tpos + fneg))
# keng die!!!
# if T.eq((tneg + fpos), 0):
# speci = float('inf')
# else:
# speci = tneg // (tneg + fpos)
# if T.eq((tpos + fneg), 0.):
# sensi = float('inf')
# else:
# sensi = tpos // (tpos + fneg)
# speci.astype(config.floatX)
# sensi.astype(config.floatX)
return [sensi, speci]
示例2: compileActivation
def compileActivation(self, net, layerNum):
variable = net.x if layerNum == 0 else net.varArrayA[layerNum - 1]
#Calc shapes for reshape function on-the-fly. Assume we have square images as input.
sX = T.cast(T.sqrt(T.shape(variable)[0] / self.kernel_shape[1]), 'int16')
#Converts input from 2 to 4 dimensions
Xr = T.reshape(variable.T, (T.shape(variable)[1], self.kernel_shape[1], sX, sX))
if self.optimized:
out_size = T.cast(
T.ceil((T.shape(Xr)[-1] - T.shape(net.varWeights[layerNum]['w'])[-1] + 1) / np.float32(self.stride)),
'int32')
conv_op = FilterActs(stride=self.stride)
input_shuffled = Xr.dimshuffle(1, 2, 3, 0) # bc01 to c01b
filters_shuffled = net.varWeights[layerNum]['w'].dimshuffle(1, 2, 3, 0) # bc01 to c01b
filters_flipped = filters_shuffled[:, ::-1, ::-1, :] # flip rows and columns
contiguous_input = gpu_contiguous(input_shuffled)
contiguous_filters = gpu_contiguous(filters_flipped *
(net.dropOutVectors[layerNum].dimshuffle('x', 0, 1, 'x') if self.dropout else 1.0))
a = conv_op(contiguous_input, contiguous_filters)
a = a[:, :out_size, :out_size, :]
#Add bias
a = a + net.varWeights[layerNum]['b'].dimshuffle(0, 'x', 'x', 'x')
else:
a = T.nnet.conv2d(Xr, net.varWeights[layerNum]['w'] *
(net.dropOutVectors[layerNum].dimshuffle('x', 'x', 0, 1) if self.dropout else 1.0),
border_mode='valid',
subsample=(self.stride, self.stride))
#Add bias
a = a + net.varWeights[layerNum]['b'].dimshuffle('x', 0, 'x', 'x')
if self.pooling:
if self.optimized:
#Pooling
# ds - side of square pool window
# stride - Defines the stride size between successive pooling squares.
# Setting this parameter smaller than sizeX produces overlapping pools.
# Setting it equal to sizeX gives the usual, non-overlapping pools. Values greater than sizeX are not allowed.
pool_op = MaxPool(ds=self.pooling_shape, stride=self.pooling_shape)
contiguous_input = gpu_contiguous(a)
a = pool_op(contiguous_input)
a = a.dimshuffle(3, 0, 1, 2) # c01b to bc01
else:
#a = downsample.max_pool_2d(a, (self.pooling_shape, self.pooling_shape), ignore_border=False)
a = pool.max_pool2D(a, (self.pooling_shape, self.pooling_shape), ignore_border=False)
else:
if self.optimized:
a = a.dimshuffle(3, 0, 1, 2) # c01b to bc01
a = T.flatten(a, outdim=2).T
#Sigmoid
a = self.activation(a, self.pool_size)
net.varArrayA.append(a)
示例3: infer_shape
def infer_shape(self, node, in_shapes):
data_shape = T.shape(node.inputs[0])
rois_shape = T.shape(node.inputs[1])
batch_size = rois_shape[0]
num_maps = data_shape[1]
h = self.pooled_h
w = self.pooled_w
out_shape = [batch_size, num_maps, h, w]
return [out_shape, out_shape]
示例4: __init__
def __init__(self, p, *args, **kwargs):
super(Categorical, self).__init__(*args, **kwargs)
try:
self.k = tt.shape(p)[-1].tag.test_value
except AttributeError:
self.k = tt.shape(p)[-1]
self.p = p = tt.as_tensor_variable(p)
self.p = (p.T / tt.sum(p, -1)).T
self.mode = tt.argmax(p)
示例5: activation
def activation(self,z):
y = T.reshape(z,(T.shape(z)[0], self.n_units//self.n_pieces, self.n_pieces))
y = T.max(y,axis=2)
y = T.reshape(y,(T.shape(z)[0],self.n_units//self.n_pieces))
return y
示例6: get_train
def get_train(U_Ot, U_R, lenW, n_facts):
def phi_x1(x_t, L):
return T.concatenate([L[x_t].reshape((-1,)), zeros((2*lenW,)), zeros((3,))], axis=0)
def phi_x2(x_t, L):
return T.concatenate([zeros((lenW,)), L[x_t].reshape((-1,)), zeros((lenW,)), zeros((3,))], axis=0)
def phi_y(x_t, L):
return T.concatenate([zeros((2*lenW,)), L[x_t].reshape((-1,)), zeros((3,))], axis=0)
def phi_t(x_t, y_t, yp_t, L):
return T.concatenate([zeros(3*lenW,), T.stack(T.switch(T.lt(x_t,y_t), 1, 0), T.switch(T.lt(x_t,yp_t), 1, 0), T.switch(T.lt(y_t,yp_t), 1, 0))], axis=0)
def s_Ot(xs, y_t, yp_t, L):
result, updates = theano.scan(
lambda x_t, t: T.dot(T.dot(T.switch(T.eq(t, 0), phi_x1(x_t, L).reshape((1,-1)), phi_x2(x_t, L).reshape((1,-1))), U_Ot.T),
T.dot(U_Ot, (phi_y(y_t, L) - phi_y(yp_t, L) + phi_t(x_t, y_t, yp_t, L)))),
sequences=[xs, T.arange(T.shape(xs)[0])])
return result.sum()
def sR(xs, y_t, L, V):
result, updates = theano.scan(
lambda x_t, t: T.dot(T.dot(T.switch(T.eq(t, 0), phi_x1(x_t, L).reshape((1,-1)), phi_x2(x_t, L).reshape((1,-1))), U_R.T),
T.dot(U_R, phi_y(y_t, V))),
sequences=[xs, T.arange(T.shape(xs)[0])])
return result.sum()
x_t = T.iscalar('x_t')
m = [x_t] + [T.iscalar('m_o%d' % i) for i in xrange(n_facts)]
f = [T.iscalar('f%d_t' % i) for i in xrange(n_facts)]
r_t = T.iscalar('r_t')
gamma = T.scalar('gamma')
L = T.fmatrix('L') # list of messages
V = T.fmatrix('V') # vocab
r_args = T.stack(*m)
cost_arr = [0] * 2 * (len(m)-1)
updates_arr = [0] * 2 * (len(m)-1)
for i in xrange(len(m)-1):
cost_arr[2*i], updates_arr[2*i] = theano.scan(
lambda f_bar, t: T.switch(T.or_(T.eq(t, f[i]), T.eq(t, T.shape(L)-1)), 0, T.largest(gamma - s_Ot(T.stack(*m[:i+1]), f[i], t, L), 0)),
sequences=[L, T.arange(T.shape(L)[0])])
cost_arr[2*i+1], updates_arr[2*i+1] = theano.scan(
lambda f_bar, t: T.switch(T.or_(T.eq(t, f[i]), T.eq(t, T.shape(L)-1)), 0, T.largest(gamma + s_Ot(T.stack(*m[:i+1]), t, f[i], L), 0)),
sequences=[L, T.arange(T.shape(L)[0])])
cost1, u1 = theano.scan(
lambda r_bar, t: T.switch(T.eq(r_t, t), 0, T.largest(gamma - sR(r_args, r_t, L, V) + sR(r_args, t, L, V), 0)),
sequences=[V, T.arange(T.shape(V)[0])])
cost = cost1.sum()
for c in cost_arr:
cost += c.sum()
g_uo, g_ur = T.grad(cost, [U_Ot, U_R])
train = theano.function(
inputs=[r_t, gamma, L, V] + m + f,
outputs=[cost],
updates=[(U_Ot, U_Ot-alpha*g_uo), (U_R, U_R-alpha*g_ur)])
return train
示例7: __init__
def __init__(self, input1, input2):
x1_sub = input1[:, :, 2:-2, 2:-2]
x1_flatten = T.flatten(x1_sub)
x1 = T.extra_ops.repeat(x1_flatten, 25)
x1 = T.reshape(x1, [T.shape(x1_flatten)[0], 25])
x2 = neighbours.images2neibs(input2, neib_shape=(5, 5), neib_step=(1, 1))
diff = x1 - x2
new_shape = T.shape(x1_sub)*[1, 1, 5, 5]
diff_img = neighbours.neibs2images(diff, neib_shape=(5, 5), original_shape=[1, 25, 25*5, 5*5])
self.output = T.nnet.relu(diff_img)
示例8: conv2D_keep_shape
def conv2D_keep_shape(x, w, image_shape, filter_shape, subsample=(1, 1)):
# crop output to same size as input
fs = T.shape(w)[2] - 1 # this is the filter size minus 1
ims = T.shape(x)[2] # this is the image size
# return theano.sandbox.cuda.dnn.dnn_conv(img=x, kerns=w,
return theano.tensor.nnet.conv2d(x,w,
image_shape=image_shape, filter_shape=filter_shape,
border_mode='full',
subsample=subsample,
)[:, :, fs/2:ims+fs/2, fs/2:ims+fs/2]
示例9: down_sampleT
def down_sampleT(self, x, y, _sample_rate):
length = tensor.cast(tensor.shape(y)[0] * _sample_rate, 'int32')
id_max = tensor.cast(tensor.shape(y)[0] - 1, 'int32')
def get_sub(i,x,y):
idd = self.srng.random_integers(low = 0, high = id_max)
return [x[idd], y[idd]]
([dx, dy], updates) = theano.scan(fn = get_sub,
outputs_info=None,
sequences=tensor.arange(length),
non_sequences=[x,y])
return dx, dy, length
示例10: get_output_for
def get_output_for( self, inputs ,**kwargs ):
# For each ROI R = [batch_index x1 y1 x2 y2]: max pool over R
input = inputs[0]
boxes = inputs[1]
batch = T.shape (input)[0]
channels = T.shape (input)[1]
height = T.shape( input )[2]
width = T.shape( input )[3]
num_boxes = T.shape(boxes)[0]
#output = T.zeros((batch * num_boxes , channels, self.num_features))
op = ROIPoolingOp(pooled_h=self.pool_dims, pooled_w=self.pool_dims, spatial_scale=self.sp_scale)
output = op(input, boxes)
return output[0]
示例11: __init__
def __init__(self, p, *args, **kwargs):
super().__init__(*args, **kwargs)
try:
self.k = tt.shape(p)[-1].tag.test_value
except AttributeError:
self.k = tt.shape(p)[-1]
p = tt.as_tensor_variable(floatX(p))
# From #2082, it may be dangerous to automatically rescale p at this
# point without checking for positiveness
self.p = p
self.mode = tt.argmax(p, axis=-1)
if self.mode.ndim == 1:
self.mode = tt.squeeze(self.mode)
示例12: grad
def grad(self, inputs, cost_grad):
"""
Notes:
1. The gradient is computed under the assumption that perturbations
of the input array respect triangularity, i.e. partial derivatives wrt
triangular region are zero.
2. In contrast with the usual mathematical presentation, in order to
apply theano's 'reshape' function wich implements row-order (i.e. C
order), the differential expressions below have been derived based on
the row-vectorizations of inputs 'a' and 'b'.
See The Matrix Reference Manual,
Copyright 1998-2011 Mike Brookes, Imperial College, London, UK
"""
a, b = inputs
ingrad = cost_grad
ingrad = tensor.as_tensor_variable(ingrad)
shp_a = (tensor.shape(inputs[0])[1],
tensor.shape(inputs[0])[1])
I_M = tensor.eye(*shp_a)
if self.lower:
inv_a = solve_triangular(a, I_M, lower=True)
tri_M = tril(tensor.ones(shp_a))
else:
inv_a = solve_triangular(a, I_M, lower=False)
tri_M = triu(tensor.ones(shp_a))
if b.ndim == 1:
prod_a_b = tensor.tensordot(-b.T, inv_a.T, axes=1)
prod_a_b = tensor.shape_padleft(prod_a_b)
jac_veca = kron(inv_a, prod_a_b)
jac_b = inv_a
outgrad_veca = tensor.tensordot(ingrad, jac_veca, axes=1)
outgrad_a = tensor.reshape(outgrad_veca,
(inputs[0].shape[0], inputs[0].shape[0])) * tri_M
outgrad_b = tensor.tensordot(ingrad, jac_b, axes=1).flatten(ndim=1)
else:
ingrad_vec = ingrad.flatten(ndim=1)
prod_a_b = tensor.tensordot(-b.T, inv_a.T, axes=1)
jac_veca = kron(inv_a, prod_a_b)
I_N = tensor.eye(tensor.shape(inputs[1])[1],
tensor.shape(inputs[1])[1])
jac_vecb = kron(inv_a, I_N)
outgrad_veca = tensor.tensordot(ingrad_vec, jac_veca, axes=1)
outgrad_a = tensor.reshape(outgrad_veca,
(inputs[0].shape[0], inputs[0].shape[0])) * tri_M
outgrad_vecb = tensor.tensordot(ingrad_vec, jac_vecb, axes=1)
outgrad_b = tensor.reshape(outgrad_vecb,
(inputs[1].shape[0], inputs[1].shape[1]))
return [outgrad_a, outgrad_b]
示例13: dropout_fprop
def dropout_fprop(self, input):
# we reduce the precision of parameters for the computations
self.fixed_W = apply_format(self.format, self.W, self.comp_precision, self.w_range)
self.fixed_b = apply_format(self.format, self.b, self.comp_precision, self.b_range)
# create the dropout mask
# The cast is important because
# int * float32 = float64 which pulls things off the gpu
srng = T.shared_randomstreams.RandomStreams(self.rng.randint(999999))
self.mask = T.cast(srng.binomial(n=1, p=self.p, size=T.shape(input)), theano.config.floatX)
# apply the mask
self.fixed_x = input * self.mask
# weighted sum
self.z = T.dot(self.fixed_x, self.fixed_W) + self.fixed_b
self.fixed_z = apply_format(self.format, self.z, self.comp_precision, self.z_range)
# activation
self.y = self.activation(self.fixed_z)
self.fixed_y = apply_format(self.format, self.y, self.comp_precision, self.y_range)
# return the output
return self.fixed_y
示例14: bbprop
def bbprop(self):
self.lin_bbprop = self.p_y_given_x - self.p_y_given_x * self.p_y_given_x
self.lin_bbprop /= T.shape(self.p_y_given_x)[0]
self.dict_bbprop = {}
self.dict_bbprop.update({self.b_upmask: T.sum(self.lin_bbprop, 0)})
self.dict_bbprop.update({self.W_upmask: T.dot(T.transpose(self.inp * self.inp), self.lin_bbprop)})
return T.dot(self.lin_bbprop, T.transpose(self.W * self.W)), self.dict_bbprop
示例15: timestep
def timestep(predictions, label, len_example, total_len_example):
label_binary = T.gt(label[0:len_example-1], 0)
oov_count = T.shape(label_binary)[0] - T.sum(label_binary)
a = total_len_example
return T.sum(T.log( 1./ predictions[T.arange(len_example-1), label[0:len_example-1]]) * label_binary ), oov_count