本文整理汇总了Python中theano.tensor.sort函数的典型用法代码示例。如果您正苦于以下问题:Python sort函数的具体用法?Python sort怎么用?Python sort使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sort函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _inferred_labels
def _inferred_labels(self, s, M, threshold):
inference = T.tensordot(
s,
T.switch(T.eq(M,0), 0, M/T.sum(M, axis=0)),
axes=[1,1])
BvSB = T.sort(inference,axis=1)[:,-1]-T.sort(inference,axis=1)[:,-2]
L_inf = T.switch(
T.gt(BvSB,threshold),
T.cast(T.argmax(inference,axis=1),'int32'),
-1
)
return L_inf
示例2: median
def median(tensor):
"""
MAD tensor from https://groups.google.com/forum/#!topic/theano-users/I4eHjbAetEQ
:param tensor: Input tensor
:return: Median expression
"""
tensor = tensor.flatten(1)
return T.switch(T.eq((tensor.shape[0] % 2), 0),
# if even vector
T.mean(T.sort(tensor)[((tensor.shape[0] / 2) - 1): ((tensor.shape[0] / 2) + 1)]),
# if odd vector
T.sort(tensor)[tensor.shape[0] // 2])
示例3: softmax
def softmax(self, D, I):
D = D * T.constant(self.attrs['sharpening'], 'float32')
if self.attrs['norm'] == 'exp':
E = T.exp(-D) * I
E = E / T.maximum(T.sum(E,axis=0,keepdims=True),T.constant(1e-20,'float32'))
elif self.attrs['norm'] == 'sigmoid':
E = (numpy.float32(1) - T.tanh(D)**2) * I
elif self.attrs['norm'] == 'lstm':
n_out = self.attrs['template']
def lstm(z, i_t, s_p, h_p):
z += T.dot(h_p, self.N_re)
i = T.outer(i_t, T.alloc(numpy.cast['int8'](1), n_out))
ingate = T.nnet.sigmoid(z[:,n_out: 2 * n_out])
forgetgate = T.nnet.sigmoid(z[:,2 * n_out:3 * n_out])
outgate = T.nnet.sigmoid(z[:,3 * n_out:])
input = T.tanh(z[:,:n_out])
s_t = input * ingate + s_p * forgetgate
h_t = T.tanh(s_t) * outgate
return theano.gradient.grad_clip(s_t * i, -50, 50), h_t * i
E, _ = theano.scan(lstm, sequences=[D,I], outputs_info=[T.zeros((n_out,), 'float32'), T.zeros((n_out,), 'int32')])
E = T.nnet.sigmoid(T.dot(E,self.N_out))
else:
raise NotImplementedError()
if self.attrs['nbest'] > 1:
opt = T.minimum(self.attrs['nbest'], E.shape[0])
score = (T.sort(E, axis=0)[-opt]).dimshuffle('x',0).repeat(E.shape[0],axis=0)
E = T.switch(T.lt(E,score), T.zeros_like(E), E)
return E
示例4: _step
def _step(x, k, max_seq_len):
tmp = x[
T.arange(x.shape[0])[:, np.newaxis, np.newaxis],
T.sort(T.argsort(x, axis=1)[:, -k:, :], axis=1),
T.arange(x.shape[2])[np.newaxis, np.newaxis,:],
]
return T.concatenate([tmp, T.zeros([x.shape[0], max_seq_len-k, x.shape[2]])], axis=1)
示例5: _pooling_function
def _pooling_function(self, inputs, pool_size, strides, border_mode, dim_ordering):
if pool_size[0]<-1:
# k-max pooling
input_layer = T.transpose(inputs, axes=(0, 1, 3, 2))
sorted_values = T.argsort(input_layer, axis=3)
topmax_indexes = sorted_values[:, :, :, -self.k:]
# sort indexes so that we keep the correct order within the sentence
topmax_indexes_sorted = T.sort(topmax_indexes)
# given that topmax only gives the index of the third dimension, we need to generate the other 3 dimensions
dim0 = T.arange(0, input_layer.shape[0]).repeat(input_layer.shape[1] * input_layer.shape[2] * self.k)
dim1 = T.arange(0, input_layer.shape[1]).repeat(self.k * input_layer.shape[2]).reshape((1, -1)).repeat(
input_layer.shape[0],
axis=0).flatten()
dim2 = T.arange(0, input_layer.shape[2]).repeat(self.k).reshape((1, -1)).repeat(
input_layer.shape[0] * input_layer.shape[1],
axis=0).flatten()
dim3 = topmax_indexes_sorted.flatten()
x = T.transpose(
input_layer[dim0, dim1, dim2, dim3].reshape(
(input_layer.shape[0], input_layer.shape[1], input_layer.shape[2], self.k)),
axes=(0, 1, 3, 2))
return x
else:
return super(MaxPooling2DWrapper, self)._pooling_function(inputs, pool_size, strides, border_mode, dim_ordering)
示例6: kmaxpooling_output
def kmaxpooling_output(input):
'''
实现 k-max pooling
1. 先排序
2. 再分别取出前k个值
:param k: k top higiest value
:type k: int
:return:
'''
input = T.transpose(input, axes=(0, 1, 3, 2))
sorted_values = T.argsort(input, axis=3)
topmax_indexes = sorted_values[:, :, :, -k:]
# sort indexes so that we keep the correct order within the sentence
topmax_indexes_sorted = T.sort(topmax_indexes)
# given that topmax only gives the index of the third dimension, we need to generate the other 3 dimensions
dim0 = T.arange(0, input.shape[0]).repeat(input.shape[1] * input.shape[2] * k)
dim1 = T.arange(0, input.shape[1]).repeat(k * input.shape[2]).reshape((1, -1)).repeat(input.shape[0],
axis=0).flatten()
dim2 = T.arange(0, input.shape[2]).repeat(k).reshape((1, -1)).repeat(input.shape[0] * input.shape[1],
axis=0).flatten()
dim3 = topmax_indexes_sorted.flatten()
return T.transpose(
input[dim0, dim1, dim2, dim3].reshape((input.shape[0], input.shape[1], input.shape[2], k)),
axes=(0, 1, 3, 2))
示例7: __call__
def __call__(self, X):
XY = X.dot(X.T)
x2 = tt.sum(X ** 2, axis=1).dimshuffle(0, 'x')
X2e = tt.repeat(x2, X.shape[0], axis=1)
H = X2e + X2e.T - 2. * XY
V = tt.sort(H.flatten())
length = V.shape[0]
# median distance
m = tt.switch(tt.eq((length % 2), 0),
# if even vector
tt.mean(V[((length // 2) - 1):((length // 2) + 1)]),
# if odd vector
V[length // 2])
h = .5 * m / tt.log(floatX(H.shape[0]) + floatX(1))
# RBF
Kxy = tt.exp(-H / h / 2.0)
# Derivative
dxkxy = -tt.dot(Kxy, X)
sumkxy = tt.sum(Kxy, axis=1).dimshuffle(0, 'x')
dxkxy = tt.add(dxkxy, tt.mul(X, sumkxy)) / h
return Kxy, dxkxy
示例8: output
def output(self, x, index_selection_func=None):
if self.n_out > 1:
iWin = self.k
if self.n_in == 1:
iWin = 1
rnd_proj = T.dot(
x.reshape((x.shape[0], x.shape[1]*x.shape[2])),
self.rand_proj_mat
)
if index_selection_func is not None:
self.out_idxs = index_selection_func(rnd_proj)
else:
self.out_idxs = T.argsort(rnd_proj)
self.out_idxs = T.sort(self.out_idxs[:, -self.k:])
# self.out_idxs.set_value(
# np.random.randint(0, self.n_out, (self.batch_size, self.k))
# )
sparse = sparse_block_dot_SS(
self.W,
x,
self.in_idxs,
self.b,
self.out_idxs
)
return (sparse if self.activation is None
else self.activation(sparse))
示例9: link
def link(self, input):
self.input = input.dimshuffle(0, 1, 3, 2)
# get the indexes that give the max on every line and sort them
ind = T.argsort(self.input, axis=3)
sorted_ind = T.sort(ind[:, :, :, -self.k_max:], axis=3)
dim0, dim1, dim2, dim3 = sorted_ind.shape
# prepare indices for selection
indices_dim0 = T.arange(dim0)\
.repeat(dim1 * dim2 * dim3)
indices_dim1 = T.arange(dim1)\
.repeat(dim2 * dim3)\
.reshape((dim1 * dim2 * dim3, 1))\
.repeat(dim0, axis=1)\
.T\
.flatten()
indices_dim2 = T.arange(dim2)\
.repeat(dim3)\
.reshape((dim2 * dim3, 1))\
.repeat(dim0 * dim1, axis=1)\
.T\
.flatten()
# output
self.output = self.input[
indices_dim0,
indices_dim1,
indices_dim2,
sorted_ind.flatten()
].reshape(sorted_ind.shape).dimshuffle(0, 1, 3, 2)
return self.output
示例10: apply
def apply(self, x, y, prefix):
W, = self.parameters
mask = tensor.alloc(0, y.shape[0]*self.n_classes)
ind = tensor.arange(y.shape[0])*self.n_classes + y
mask = tensor.set_subtensor(mask[ind], 1.0)
mask = tensor.reshape(mask, (y.shape[0], self.n_classes))
#Compute distance matrix
D = ((W**2).sum(axis=1, keepdims=True).T + (x**2).sum(axis=1, keepdims=True) - 2*tensor.dot(x, W.T))
self.add_auxiliary_variable(D, name=prefix+'_D')
d_correct = tensor.reshape(D[mask.nonzero()], (y.shape[0], 1))
d_incorrect = tensor.reshape(D[(1.0-mask).nonzero()], (y.shape[0], self.n_classes-1))
c = (d_correct - d_incorrect)/(d_correct + d_incorrect)
c_sorted = tensor.sort(c, axis=1)[:, ::-1]
c = (self.weighting*c_sorted).sum(axis=1, keepdims=True)
self.add_auxiliary_variable(c, name=prefix+'_cost')
if self.nonlin:
c = tensor.exp(self.gamma*c)
cost = c.mean()
misclass = (tensor.switch(c_sorted[:, 0] < 0, 0.0, 1.0)).mean()
return cost, misclass
示例11: dynamic_kmaxPooling
def dynamic_kmaxPooling(self, curConv_out, k):
neighborsForPooling = TSN.images2neibs(ten4=curConv_out, neib_shape=(1,curConv_out.shape[3]), mode='ignore_borders')
self.neighbors = neighborsForPooling
neighborsArgSorted = T.argsort(neighborsForPooling, axis=1)
kNeighborsArg = neighborsArgSorted[:,-k:]
#self.bestK = kNeighborsArg
kNeighborsArgSorted = T.sort(kNeighborsArg, axis=1)
ii = T.repeat(T.arange(neighborsForPooling.shape[0]), k)
jj = kNeighborsArgSorted.flatten()
pooledkmaxTmp = neighborsForPooling[ii, jj]
new_shape = T.cast(T.join(0,
T.as_tensor([neighborsForPooling.shape[0]]),
T.as_tensor([k])),
'int64')
pooledkmax_matrix = T.reshape(pooledkmaxTmp, new_shape, ndim=2)
rightWidth=self.unifiedWidth-k
right_padding = T.zeros((neighborsForPooling.shape[0], rightWidth), dtype=theano.config.floatX)
matrix_padded = T.concatenate([pooledkmax_matrix, right_padding], axis=1)
#recover tensor form
new_shape = T.cast(T.join(0, curConv_out.shape[:-2],
T.as_tensor([curConv_out.shape[2]]),
T.as_tensor([self.unifiedWidth])),
'int64')
curPooled_out = T.reshape(matrix_padded, new_shape, ndim=4)
return curPooled_out
示例12: k_max_pool
def k_max_pool(self, x, k):
"""
perform k-max pool on the input along the rows
input: theano.tensor.tensor4
k: theano.tensor.iscalar
the k parameter
Returns:
4D tensor
"""
x = T.reshape(x, (x.shape[0], x.shape[1], 1, x.shape[2] * x.shape[3]))
ind = T.argsort(x, axis=3)
sorted_ind = T.sort(ind[:, :, :, -k:], axis=3)
dim0, dim1, dim2, dim3 = sorted_ind.shape
indices_dim0 = T.arange(dim0).repeat(dim1 * dim2 * dim3)
indices_dim1 = (
T.arange(dim1).repeat(dim2 * dim3).reshape((dim1 * dim2 * dim3, 1)).repeat(dim0, axis=1).T.flatten()
)
indices_dim2 = T.arange(dim2).repeat(dim3).reshape((dim2 * dim3, 1)).repeat(dim0 * dim1, axis=1).T.flatten()
result = x[indices_dim0, indices_dim1, indices_dim2, sorted_ind.flatten()].reshape(sorted_ind.shape)
shape = (result.shape[0], result.shape[1], result.shape[2] * result.shape[3], 1)
result = T.reshape(result, shape)
return result
示例13: fix_k_max
def fix_k_max(self, k, masked_data):
# @ref: https://github.com/fchollet/keras/issues/373
result = masked_data[
T.arange(masked_data.shape[0]).dimshuffle(0, "x", "x"),
T.sort(T.argsort(masked_data, axis=1)[:, -k:, :], axis=1),
T.arange(masked_data.shape[2]).dimshuffle("x", "x", 0)
]
return result
示例14: __call__
def __call__(self,X):
ind = T.argsort(X, axis = 3)
sorted_ind = T.sort(ind[:,:,:, -self.poolsize:], axis = 3)
dim0, dim1, dim2, dim3 = sorted_ind.shape
indices_dim0 = T.arange(dim0).repeat(dim1 * dim2 * dim3)
indices_dim1 = T.arange(dim1).repeat(dim2 * dim3).reshape((dim1*dim2*dim3, 1)).repeat(dim0, axis=1).T.flatten()
indices_dim2 = T.arange(dim2).repeat(dim3).reshape((dim2*dim3, 1)).repeat(dim0 * dim1, axis = 1).T.flatten()
return X[indices_dim0, indices_dim1, indices_dim2, sorted_ind.flatten()].reshape(sorted_ind.shape)
示例15: median
def median(grid_split_image, grid_counts):
shp = grid_split_image.shape
reshape = grid_split_image.reshape((shp[0], shp[1], shp[2], -1), ndim=4)
img_sort = T.sort(reshape)
pixels = img_sort.shape[-1]
indicies = (pixels + grid_counts) // 2
indicies = T.cast(indicies, 'int32')
medians = img_sort.take(indicies)
return medians