本文整理汇总了Python中theano.sandbox.neighbours.images2neibs函数的典型用法代码示例。如果您正苦于以下问题:Python images2neibs函数的具体用法?Python images2neibs怎么用?Python images2neibs使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了images2neibs函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: unpooling
def unpooling(self, Y_4D, Z, X_4D):
""" This method reverses pooling operation.
"""
Y = images2neibs(Y_4D, T.as_tensor_variable((1, Y_4D.shape[3])))
X = images2neibs(X_4D, T.as_tensor_variable((1, X_4D.shape[3])))
X_z = T.zeros_like(X)
X_ = T.set_subtensor(X_z[T.arange(X.shape[0]).reshape((X.shape[0], 1)), Z], Y)
return X_.reshape(X_4D.shape)
示例2: dynamic_kmaxPooling
def dynamic_kmaxPooling(self, curConv_out, k):
neighborsForPooling = TSN.images2neibs(ten4=curConv_out, neib_shape=(1,curConv_out.shape[3]), mode='ignore_borders')
self.neighbors = neighborsForPooling
neighborsArgSorted = T.argsort(neighborsForPooling, axis=1)
kNeighborsArg = neighborsArgSorted[:,-k:]
#self.bestK = kNeighborsArg
kNeighborsArgSorted = T.sort(kNeighborsArg, axis=1)
ii = T.repeat(T.arange(neighborsForPooling.shape[0]), k)
jj = kNeighborsArgSorted.flatten()
pooledkmaxTmp = neighborsForPooling[ii, jj]
new_shape = T.cast(T.join(0,
T.as_tensor([neighborsForPooling.shape[0]]),
T.as_tensor([k])),
'int64')
pooledkmax_matrix = T.reshape(pooledkmaxTmp, new_shape, ndim=2)
rightWidth=self.unifiedWidth-k
right_padding = T.zeros((neighborsForPooling.shape[0], rightWidth), dtype=theano.config.floatX)
matrix_padded = T.concatenate([pooledkmax_matrix, right_padding], axis=1)
#recover tensor form
new_shape = T.cast(T.join(0, curConv_out.shape[:-2],
T.as_tensor([curConv_out.shape[2]]),
T.as_tensor([self.unifiedWidth])),
'int64')
curPooled_out = T.reshape(matrix_padded, new_shape, ndim=4)
return curPooled_out
示例3: link
def link(self, input):
self.input = input
# select the lines where we apply k-max pooling
neighbors_for_pooling = TSN.images2neibs(
ten4=self.input,
neib_shape=(self.input.shape[2], 1), # we look the max on every dimension
mode='valid' # 'ignore_borders'
)
neighbors_arg_sorted = T.argsort(neighbors_for_pooling, axis=1)
k_neighbors_arg = neighbors_arg_sorted[:, -self.k_max:]
k_neighbors_arg_sorted = T.sort(k_neighbors_arg, axis=1)
ii = T.repeat(T.arange(neighbors_for_pooling.shape[0]), self.k_max)
jj = k_neighbors_arg_sorted.flatten()
flattened_pooled_out = neighbors_for_pooling[ii, jj]
pooled_out_pre_shape = T.join(
0,
self.input.shape[:-2],
[self.input.shape[3]],
[self.k_max]
)
self.output = flattened_pooled_out.reshape(
pooled_out_pre_shape,
ndim=self.input.ndim
).dimshuffle(0, 1, 3, 2)
return self.output
示例4: Fold
def Fold(self, conv_out, ds=(2,1)):
'''Fold into two. (Sum up vertical neighbours)'''
imgs = images2neibs(conv_out, T.as_tensor_variable(ds), mode='ignore_borders') # Correct 'mode' if there's a typo!
orig = conv_out.shape
shp = (orig[0], orig[1], T.cast(orig[2]/2, 'int32'), orig[3])
res = T.reshape(T.sum(imgs, axis=-1), shp)
return res
示例5: pool_2d_i2n
def pool_2d_i2n(input, ds=(2, 2), strides=None,
pad=(0, 0),
pool_function=T.max, mode='ignore_borders'):
if strides is None:
strides = ds
if strides[0] > ds[0] or strides[1] > ds[1]:
raise RuntimeError(
"strides should be smaller than or equal to ds,"
" strides=(%d, %d) and ds=(%d, %d)" %
(strides + ds))
shape = input.shape
if pad != (0, 0):
assert pool_function is T.max
pad_x = pad[0]
pad_y = pad[1]
a = T.alloc(-numpy.inf, shape[0], shape[1], shape[2] + pad_x * 2,
shape[3] + pad_y * 2)
input = T.set_subtensor(a[:, :,
pad_x:pad_x + shape[2],
pad_y:pad_y + shape[3]],
input)
shape = input.shape
neibs = images2neibs(input, ds, strides, mode=mode)
pooled_neibs = pool_function(neibs, axis=1)
output_width = (shape[2] - ds[0]) // strides[0] + 1
output_height = (shape[3] - ds[1]) // strides[1] + 1
pooled_output = pooled_neibs.reshape((shape[0], shape[1],
output_width, output_height))
return pooled_output
示例6: kmaxPool
def kmaxPool(self, conv_out, pool_shape, k):
'''
Perform k-max Pooling.
'''
n0, n1, d, size = pool_shape
imgs = images2neibs(conv_out, T.as_tensor_variable((1, size)))
indices = T.argsort(T.mul(imgs, -1))
k_max_indices = T.sort(indices[:, :k])
S = T.arange(d*n1*n0).reshape((d*n1*n0, 1))
return imgs[S, k_max_indices].reshape((n0, n1, d, k))
示例7: fold
def fold(conv):
c_shape = conv.shape
pool_size = (1, conv.shape[-1])
neighbors_to_pool = TSN.images2neibs(ten4=conv,
neib_shape=pool_size,
mode='ignore_borders')
n_shape = neighbors_to_pool.shape
paired = T.reshape(neighbors_to_pool, (n_shape[0] / 2, 2, n_shape[-1]))
summed = T.sum(paired, axis=1)
folded_out = T.reshape(summed, (c_shape[0], c_shape[1], c_shape[2] / 2, c_shape[3]),
ndim=4)
return folded_out
示例8: k_max_pool
def k_max_pool(conv, k):
c_shape = conv.shape
# c_shape = tPrint('conv_shape')(c_shape)
pool_size = (1, conv.shape[-1])
neighbors_to_pool = TSN.images2neibs(ten4=conv,
neib_shape=pool_size,
mode='ignore_borders')
arg_sorted = T.argsort(neighbors_to_pool, axis=1)
top_k = arg_sorted[:, -k:]
top_k_sorted = T.sort(top_k, axis=1)
ii = T.repeat(T.arange(neighbors_to_pool.shape[0], dtype='int32'), k)
jj = top_k_sorted.flatten()
values = neighbors_to_pool[ii, jj]
pooled_out = T.reshape(values, (c_shape[0], c_shape[1], c_shape[2], k), ndim=4)
return pooled_out
示例9: cifar10neighbs
def cifar10neighbs(topo, patch_shape):
assert topo.ndim == 4
r, c = patch_shape
topo = as_tensor_variable(topo)
flat = images2neibs(ten4 = topo.dimshuffle(3,0,1,2),
neib_shape = (r,c),
neib_step = (1,1))
m = flat.shape[0] / 3
n = flat.shape[1] * 3
red = flat[0:m,:]
green = flat[m:2*m,:]
blue = flat[2*m:,:]
rval = T.concatenate((red,green,blue),axis=1)
return rval
示例10: kmaxPooling
def kmaxPooling(self, fold_out, k):
neighborsForPooling = TSN.images2neibs(ten4=fold_out, neib_shape=(1,fold_out.shape[3]), mode='ignore_borders')
self.neighbors = neighborsForPooling
neighborsArgSorted = T.argsort(neighborsForPooling, axis=1)
kNeighborsArg = neighborsArgSorted[:,-k:]
#self.bestK = kNeighborsArg
kNeighborsArgSorted = T.sort(kNeighborsArg, axis=1)
ii = T.repeat(T.arange(neighborsForPooling.shape[0]), k)
jj = kNeighborsArgSorted.flatten()
pooledkmaxTmp = neighborsForPooling[ii, jj]
new_shape = T.cast(T.join(0, fold_out.shape[:-2],
T.as_tensor([fold_out.shape[2]]),
T.as_tensor([k])),
'int64')
pooled_out = T.reshape(pooledkmaxTmp, new_shape, ndim=4)
return pooled_out
示例11: pool_2d_i2n
def pool_2d_i2n(input, ds=(2, 2), strides=None,
pool_function=T.max, mode='ignore_borders'):
if strides is None:
strides = ds
if strides[0] > ds[0] or strides[1] > ds[1]:
raise RuntimeError(
"strides should be smaller than or equal to ds,"
" strides=(%d, %d) and ds=(%d, %d)" %
(strides + ds))
shape = input.shape
neibs = images2neibs(input, ds, strides, mode=mode)
pooled_neibs = pool_function(neibs, axis=1)
output_width = (shape[2] - ds[0]) // strides[0] + 1
output_height = (shape[3] - ds[1]) // strides[1] + 1
pooled_output = pooled_neibs.reshape((shape[0], shape[1],
output_width, output_height))
return pooled_output
示例12: __init__
def __init__(self,input,input_shape = None):
if isinstance(input, Layer):
self.input = input.output
Layer.linkstruct[input].append(self)
if input_shape == None:
input_shape = input.output_shape
else:
self.input = input
self.input_shape = input_shape
#Only square image allowed
assert input_shape[2]==input_shape[3]
#Extend one pixel at each direction
shapeext = input_shape[0], input_shape[1], input_shape[2]+2, input_shape[3]+2
inputext = CachedAlloc(dtypeX(-INF), *shapeext)
inputext = T.set_subtensor(inputext[:,:,1:input_shape[2]+1,1:input_shape[3]+1], self.input)
self.output_shape = input_shape[0], input_shape[1], (input_shape[2]+1)/2, (input_shape[3]+1)/2
self.output = images2neibs(inputext, (3,3), (2,2), 'ignore_borders').mean(axis=-1)
self.output = T.patternbroadcast(self.output.reshape(self.output_shape),(False,)*4)
示例13: __init__
def __init__(self,input,input_shape = None):
if isinstance(input, Layer):
self.input = input.output
if input_shape == None:
input_shape = input.output_shape
else:
self.input = input
self.input_shape = input_shape
##Only square image allowed
#assert input_shape[2]==input_shape[3]
#Extend one pixel at each direction
shapeext = input_shape[0], input_shape[1], input_shape[2]+2, input_shape[3]+2
inputext = T.alloc(dtypeX(-INF), *shapeext)
inputext = T.set_subtensor(inputext[:,:,1:input_shape[2]+1,1:input_shape[3]+1], self.input)
self.output_shape = input_shape[0], input_shape[1], (input_shape[2]+1)/2, (input_shape[3]+1)/2
self.output = images2neibs(inputext, (3,3), (2,2), 'ignore_borders').mean(axis=-1)
self.output = self.output.reshape(self.output_shape)
示例14: __init__
def __init__(self, potentialWidth, potentialHeight,
columnsWidth, columnsHeight,
inputWidth, inputHeight,
centerPotSynapses, connectedPerm,
minOverlap, wrapInput):
# Overlap Parameters
###########################################
# Specifies if the potential synapses are centered
# over the columns
self.centerPotSynapses = centerPotSynapses
# Use a wrap input function instead of padding the input
# to calcualte the overlap scores.
self.wrapInput = wrapInput
self.potentialWidth = potentialWidth
self.potentialHeight = potentialHeight
self.connectedPermParam = connectedPerm
self.minOverlap = minOverlap
self.inputWidth = inputWidth
self.inputHeight = inputHeight
# Calculate how many columns are expected from these
# parameters.
self.columnsWidth = columnsWidth
self.columnsHeight = columnsHeight
self.numColumns = columnsWidth * columnsHeight
# Store the potetnial inputs to every column.
# Each row represents the inputs a columns potential synapses cover.
self.colInputPotSyn = None
# Store the potential overlap values for every column
self.colPotOverlaps = None
# StepX and Step Y describe how far each
# columns potential synapses differ from the adjacent
# columns in the X and Y directions. These parameters can't
# change as theano uses them to setup functions.
self.stepX, self.stepY = self.getStepSizes(inputWidth, inputHeight,
self.columnsWidth, self.columnsHeight,
self.potentialWidth, self.potentialHeight)
# Contruct a tiebreaker matrix for the columns potential synapses.
# It contains small values that help resolve any ties in potential
# overlap scores for columns.
self.potSynTieBreaker = np.array([[0.0 for i in range(self.potentialHeight*self.potentialWidth)]
for j in range(self.numColumns)])
#import ipdb; ipdb.set_trace()
self.makePotSynTieBreaker(self.potSynTieBreaker)
# Store the potential inputs to every column plus the tie breaker value.
# Each row represents the inputs a columns potential synapses cover.
self.colInputPotSynTie = np.array([[0.0 for i in range(self.potentialHeight*self.potentialWidth)]
for j in range(self.numColumns)])
self.colTieBreaker = np.array([0.0 for i in range(self.numColumns)])
self.makeColTieBreaker(self.colTieBreaker)
# Create theano variables and functions
############################################
# Create the theano function for calculating
# the multiplication elementwise of 2 matricies.
self.i_grid = T.matrix(dtype='float32')
self.j_grid = T.matrix(dtype='float32')
self.multi_vals = self.i_grid * self.j_grid
self.multi_grids = function([self.i_grid, self.j_grid],
self.multi_vals,
on_unused_input='warn',
allow_input_downcast=True)
# Create the theano function for calculating
# the addition of a small tie breaker value to each matrix input.
self.o_grid = T.matrix(dtype='float32')
self.tie_grid = T.matrix(dtype='float32')
self.add_vals = T.add(self.o_grid, self.tie_grid)
self.add_tieBreaker = function([self.o_grid, self.tie_grid],
self.add_vals,
on_unused_input='warn',
allow_input_downcast=True)
# Create the theano function for calculating
# the addition of a small tie breaker value to each matrix input.
self.o_vect = T.vector(dtype='float32')
self.tie_vect = T.vector(dtype='float32')
self.add_vectVals = T.add(self.o_vect, self.tie_vect)
self.add_vectTieBreaker = function([self.o_vect, self.tie_vect],
self.add_vectVals,
on_unused_input='warn',
allow_input_downcast=True)
# Create the theano function for calculating
# the inputs to a column from an input grid.
self.kernalSize = (potentialHeight, potentialWidth)
# poolstep is how far to move the kernal in each direction.
self.poolstep = (self.stepY, self.stepX)
# Create the theano function for calculating the input to each column
self.neib_shape = T.as_tensor_variable(self.kernalSize)
self.neib_step = T.as_tensor_variable(self.poolstep)
self.pool_inp = T.tensor4('pool_input', dtype='float32')
self.pool_convole = images2neibs(self.pool_inp, self.neib_shape, self.neib_step, mode='valid')
self.pool_inputs = function([self.pool_inp],
self.pool_convole,
on_unused_input='warn',
allow_input_downcast=True)
# Create the theano function for calculating
# the inputs to a column from an input grid.
#.........这里部分代码省略.........
示例15: fc_fun
Ishape = intercept.shape
intercept.shape = (1, Ishape[0], 1, 1)
Ashape = A.shape
A.shape = (Ashape[0], 1, Ashape[1], Ashape[2])
Bshape = filter.shape
filter.shape = (Bshape[0], 1, Bshape[1], Bshape[2])
R = fc_fun(A.astype(floatX1), rot180_T4(filter).astype(floatX1),
intercept.astype(floatX1))
A.shape = Ashape
filter.shape = Bshape
intercept.shape = Ishape
return R
pdim = T.scalar('pool dim', dtype = floatX1)
pool_inp = T.tensor4('pool input', dtype = floatX1)
pool_sum = TSN.images2neibs(pool_inp, (pdim, pdim))
pool_out = pool_sum.mean(axis=-1)
pool_fun = theano.function([pool_inp, pdim], pool_out, name = 'pool_fun')
def average_pool_T4(A, pool_dim):
""" Compute average pooling for a 4-dimensional tensor - this is equivalent
to pooling over all the matrices stored in the 4-dim tensor
"""
# Warning: pool_fun returns a 1-D vector, we need to reshape it into a 4-D
# tensor
temp = pool_fun(A, pool_dim)
temp.shape = (A.shape[0], A.shape[1], A.shape[2]/pool_dim,
A.shape[3]/pool_dim)
return temp