本文整理汇总了Python中theano.tensor.floor函数的典型用法代码示例。如果您正苦于以下问题:Python floor函数的具体用法?Python floor怎么用?Python floor使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了floor函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: quantized_bprop
def quantized_bprop(self, cost):
"""
bprop equals:
(active_prime) *elem_multiply* error_signal_in * (rep of previous layer)
(rep of previous layer) is recoded as self.x during fprop() process.
Here we quantize (rep of previous layer) and leave the rest as it is.
"""
# the lower 2**(integer power)
index_low = T.switch(self.x > 0., T.floor(T.log2(self.x)), T.floor(T.log2(-self.x)))
index_low = T.clip(index_low, -4, 3)
sign = T.switch(self.x > 0., 1., -1.)
#index_up = index_low + 1 # the upper 2**(integer power) though not used explicitly.
p_up = sign * self.x / 2**(index_low) - 1 # percentage of upper index.
srng = theano.sandbox.rng_mrg.MRG_RandomStreams(self.rng.randint(999999))
index_random = index_low + srng.binomial(n=1, p=p_up, size=T.shape(self.x), dtype=theano.config.floatX)
quantized_rep = sign * 2**index_random
# there is sth wrong with this self-made backprop:
# the code is using BN, but this type of explicit computation is not considering
# gradients caused by BN.
# error = self.activation_prime(self.z) * error_signal_in
error = T.grad(cost=cost, wrt=self.z)
self.dEdW = T.dot(quantized_rep.T, error)
#self.dEdW = T.dot(self.x.T, error)
self.dEdb = T.grad(cost=cost, wrt=self.b)
if self.BN == True:
self.dEda = T.grad(cost=cost, wrt=self.a)
示例2: ShiftConv
def ShiftConv(w_t_g, s_t, N):
shift = 2.*s_t-1.
Z = T.mod(shift+N, N)
simj = 1 - (Z - T.floor(Z))
imj = T.mod(T.arange(N) + T.iround(T.floor(Z)),N)
w_t_g_roll_1 = T.roll(w_t_g, -T.iround(T.floor(Z)))
w_t_g_roll_2 = T.roll(w_t_g, -(T.iround(T.floor(Z))+1))
w_t_s = w_t_g_roll_1*simj + w_t_g_roll_2*(1-simj)
return w_t_s
示例3: _interpolate
def _interpolate(im, x, y, out_height, out_width):
# *_f are floats
num_batch, height, width, channels = im.shape
height_f = T.cast(height, 'float32')
width_f = T.cast(width, 'float32')
zero = T.zeros([], dtype='int64')
max_y = im.shape[1] - 1
max_x = im.shape[2] - 1
# scale indices from [-1, 1] to [0, width/height].
x = (x + 1.0)*(width_f) / 2.0
y = (y + 1.0)*(height_f) / 2.0
x0 = T.cast(T.floor(x), 'int64')
x1 = x0 + 1
y0 = T.cast(T.floor(y), 'int64')
y1 = y0 + 1
# Clip indicies to ensure they are not out of bounds.
x0 = T.clip(x0, zero, max_x)
x1 = T.clip(x1, zero, max_x)
y0 = T.clip(y0, zero, max_y)
y1 = T.clip(y1, zero, max_y)
# The input is [num_batch, height, width, channels]. We do the lookup in
# the flattened input, i.e [num_batch*height*width, channels]. We need
# to offset all indices to match the flat version
dim2 = width
dim1 = width*height
base = _repeat(
T.arange(num_batch, dtype='int32')*dim1, out_height*out_width)
base_y0 = base + y0*dim2
base_y1 = base + y1*dim2
idx_a = base_y0 + x0
idx_b = base_y1 + x0
idx_c = base_y0 + x1
idx_d = base_y1 + x1
# use indices to lookup pixels for all samples
im_flat = im.reshape((-1, channels))
Ia = im_flat[idx_a]
Ib = im_flat[idx_b]
Ic = im_flat[idx_c]
Id = im_flat[idx_d]
# calculate interpolated values
x0_f = T.cast(x0, 'float32')
x1_f = T.cast(x1, 'float32')
y0_f = T.cast(y0, 'float32')
y1_f = T.cast(y1, 'float32')
wa = ((x1_f-x) * (y1_f-y)).dimshuffle(0, 'x')
wb = ((x1_f-x) * (y-y0_f)).dimshuffle(0, 'x')
wc = ((x-x0_f) * (y1_f-y)).dimshuffle(0, 'x')
wd = ((x-x0_f) * (y-y0_f)).dimshuffle(0, 'x')
output = T.sum([wa*Ia, wb*Ib, wc*Ic, wd*Id], axis=0)
return output
示例4: _interpolate
def _interpolate(im, x, y, out_height, out_width):
# *_f are floats
num_batch, height, width, channels = im.shape
height_f = T.cast(height, theano.config.floatX)
width_f = T.cast(width, theano.config.floatX)
# clip coordinates to [-1, 1]
x = T.clip(x, -1, 1)
y = T.clip(y, -1, 1)
# scale coordinates from [-1, 1] to [0, width/height - 1]
x = (x + 1) / 2 * (width_f - 1)
y = (y + 1) / 2 * (height_f - 1)
# obtain indices of the 2x2 pixel neighborhood surrounding the coordinates;
# we need those in floatX for interpolation and in int64 for indexing. for
# indexing, we need to take care they do not extend past the image.
x0_f = T.floor(x)
y0_f = T.floor(y)
x1_f = x0_f + 1
y1_f = y0_f + 1
x0 = T.cast(x0_f, 'int64')
y0 = T.cast(y0_f, 'int64')
x1 = T.cast(T.minimum(x1_f, width_f - 1), 'int64')
y1 = T.cast(T.minimum(y1_f, height_f - 1), 'int64')
# The input is [num_batch, height, width, channels]. We do the lookup in
# the flattened input, i.e [num_batch*height*width, channels]. We need
# to offset all indices to match the flat version
dim2 = width
dim1 = width*height
base = T.repeat(
T.arange(num_batch, dtype='int64')*dim1, out_height*out_width)
base_y0 = base + y0*dim2
base_y1 = base + y1*dim2
idx_a = base_y0 + x0
idx_b = base_y1 + x0
idx_c = base_y0 + x1
idx_d = base_y1 + x1
# use indices to lookup pixels for all samples
im_flat = im.reshape((-1, channels))
Ia = im_flat[idx_a]
Ib = im_flat[idx_b]
Ic = im_flat[idx_c]
Id = im_flat[idx_d]
# calculate interpolated values
wa = ((x1_f-x) * (y1_f-y)).dimshuffle(0, 'x')
wb = ((x1_f-x) * (y-y0_f)).dimshuffle(0, 'x')
wc = ((x-x0_f) * (y1_f-y)).dimshuffle(0, 'x')
wd = ((x-x0_f) * (y-y0_f)).dimshuffle(0, 'x')
output = T.sum([wa*Ia, wb*Ib, wc*Ic, wd*Id], axis=0)
assert str(output.dtype) == theano.config.floatX, str(output.dtype)
return output
示例5: _interpolate
def _interpolate(im, x, y, out_height, out_width):
# *_f are floats
num_batch, height, width, channels = im.shape
height_f = T.cast(height, theano.config.floatX)
width_f = T.cast(width, theano.config.floatX)
# scale indices from [-1, 1] to [0, width/height].
x = (x + 1) / 2 * width_f
y = (y + 1) / 2 * height_f
# Clip indices to ensure they are not out of bounds.
max_x = width_f - 1
max_y = height_f - 1
x0 = T.clip(x, 0, max_x)
x1 = T.clip(x + 1, 0, max_x)
y0 = T.clip(y, 0, max_y)
y1 = T.clip(y + 1, 0, max_y)
# We need floatX for interpolation and int64 for indexing.
x0_f = T.floor(x0)
x1_f = T.floor(x1)
y0_f = T.floor(y0)
y1_f = T.floor(y1)
x0 = T.cast(x0, 'int64')
x1 = T.cast(x1, 'int64')
y0 = T.cast(y0, 'int64')
y1 = T.cast(y1, 'int64')
# The input is [num_batch, height, width, channels]. We do the lookup in
# the flattened input, i.e [num_batch*height*width, channels]. We need
# to offset all indices to match the flat version
dim2 = width
dim1 = width*height
base = T.repeat(
T.arange(num_batch, dtype='int64')*dim1, out_height*out_width)
base_y0 = base + y0*dim2
base_y1 = base + y1*dim2
idx_a = base_y0 + x0
idx_b = base_y1 + x0
idx_c = base_y0 + x1
idx_d = base_y1 + x1
# use indices to lookup pixels for all samples
im_flat = im.reshape((-1, channels))
Ia = im_flat[idx_a]
Ib = im_flat[idx_b]
Ic = im_flat[idx_c]
Id = im_flat[idx_d]
# calculate interpolated values
wa = ((x1_f-x) * (y1_f-y)).dimshuffle(0, 'x')
wb = ((x1_f-x) * (y-y0_f)).dimshuffle(0, 'x')
wc = ((x-x0_f) * (y1_f-y)).dimshuffle(0, 'x')
wd = ((x-x0_f) * (y-y0_f)).dimshuffle(0, 'x')
output = T.sum([wa*Ia, wb*Ib, wc*Ic, wd*Id], axis=0)
return output
示例6: MASK_blanking
def MASK_blanking(x_i):
# Find indicies of first and last non-zero value in x_i
idxs = T.nonzero(x_i)[0][[1, -1]]
# Diff = no of non zero values
no_values = idxs[1] - idxs[0]
# Move index inside by proportion of no of values
idxs0 = T.cast(T.floor(idxs[0] + no_values * blank_proportion), 'int32')
idxs1 = T.cast(T.floor(idxs[1] - no_values * blank_proportion), 'int32')
# Return a vector that has a tighter mask than x_i
return T.set_subtensor(T.zeros_like(x_i)[idxs0:idxs1], T.alloc(1., idxs1-idxs0))
示例7: _interpolate
def _interpolate(im, x, y, out_height, out_width, dtype = 'float32'):
# *_f are floats
num_batch, height, width, channels = im.shape
height_f = T.cast(height, dtype = dtype)
width_f = T.cast(width, dtype = dtype)
# scale coordinates from [-1, 1] to [0, width/height - 1]
idx = ((x >= 0) & (x <= 1) & (y >= 0) & (y <= 1)).nonzero()[0]
# x = (x + 1) / 2 * (width_f - 1)
# y = (y + 1) / 2 * (height_f - 1)
x = x * (width_f - 1)
y = y * (height_f - 1)
# obtain indices of the 2x2 pixel neighborhood surrounding the coordinates;
# we need those in floatX for interpolation and in int64 for indexing. for
# indexing, we need to take care they do not extend past the image.
x0_f = T.floor(x)
y0_f = T.floor(y)
x1_f = x0_f + 1
y1_f = y0_f + 1
x0 = T.cast(x0_f, 'int64')
y0 = T.cast(y0_f, 'int64')
x1 = T.cast(T.minimum(x1_f, width_f - 1), 'int64')
y1 = T.cast(T.minimum(y1_f, height_f - 1), 'int64')
# The input is [num_batch, height, width, channels]. We do the lookup in
# the flattened input, i.e [num_batch*height*width, channels]. We need
# to offset all indices to match the flat version
dim2 = width
dim1 = width*height
base = T.repeat(
T.arange(num_batch, dtype='int64')*dim1, out_height*out_width)
base_y0 = base + y0*dim2
base_y1 = base + y1*dim2
idx_a = base_y0 + x0
idx_b = base_y1 + x0
idx_c = base_y0 + x1
idx_d = base_y1 + x1
# use indices to lookup pixels for all samples
im_flat = im.reshape((-1, channels))
Ia = im_flat[idx_a[idx]]
Ib = im_flat[idx_b[idx]]
Ic = im_flat[idx_c[idx]]
Id = im_flat[idx_d[idx]]
# calculate interpolated values
wa = ((x1_f-x) * (y1_f-y)).dimshuffle(0, 'x')[idx, :]
wb = ((x1_f-x) * (y-y0_f)).dimshuffle(0, 'x')[idx, :]
wc = ((x-x0_f) * (y1_f-y)).dimshuffle(0, 'x')[idx, :]
wd = ((x-x0_f) * (y-y0_f)).dimshuffle(0, 'x')[idx, :]
output = T.sum([wa*Ia, wb*Ib, wc*Ic, wd*Id], axis=0)
# out = T.zeros_like(((x1_f-x) * (y1_f-y)).dimshuffle(0, 'x'))
out = T.zeros_like(im_flat)
return T.set_subtensor(out[idx, :], output)
示例8: _interpolate
def _interpolate(self, im, x, y, downsample_factor):
# constants
num_batch, height, width, channels = im.shape
height_f = T.cast(height, floatX)
width_f = T.cast(width, floatX)
out_height = T.cast(height_f // downsample_factor, 'int64')
out_width = T.cast(width_f // downsample_factor, 'int64')
zero = T.zeros([], dtype='int64')
max_y = T.cast(im.shape[1] - 1, 'int64')
max_x = T.cast(im.shape[2] - 1, 'int64')
# scale indices from [-1, 1] to [0, width/height]
x = (x + 1.0)*(width_f) / 2.0
y = (y + 1.0)*(height_f) / 2.0
# do sampling
x0 = T.cast(T.floor(x), 'int64')
x1 = x0 + 1
y0 = T.cast(T.floor(y), 'int64')
y1 = y0 + 1
x0 = T.clip(x0, zero, max_x)
x1 = T.clip(x1, zero, max_x)
y0 = T.clip(y0, zero, max_y)
y1 = T.clip(y1, zero, max_y)
dim2 = width
dim1 = width*height
base = self._repeat(
T.arange(num_batch, dtype='int32')*dim1, out_height*out_width)
base_y0 = base + y0*dim2
base_y1 = base + y1*dim2
idx_a = base_y0 + x0
idx_b = base_y1 + x0
idx_c = base_y0 + x1
idx_d = base_y1 + x1
# use indices to lookup pixels in the flat
# image and restore channels dim
im_flat = im.reshape((-1, channels))
Ia = im_flat[idx_a]
Ib = im_flat[idx_b]
Ic = im_flat[idx_c]
Id = im_flat[idx_d]
# and finanly calculate interpolated values
x0_f = T.cast(x0, floatX)
x1_f = T.cast(x1, floatX)
y0_f = T.cast(y0, floatX)
y1_f = T.cast(y1, floatX)
wa = ((x1_f-x) * (y1_f-y)).dimshuffle(0, 'x')
wb = ((x1_f-x) * (y-y0_f)).dimshuffle(0, 'x')
wc = ((x-x0_f) * (y1_f-y)).dimshuffle(0, 'x')
wd = ((x-x0_f) * (y-y0_f)).dimshuffle(0, 'x')
output = T.sum([wa*Ia, wb*Ib, wc*Ic, wd*Id], axis=0)
return output
示例9: _interpolate_bicubic
def _interpolate_bicubic(im, x, y, out_height, out_width):
# *_f are floats
num_batch, height, width, channels = im.shape
height_f = T.cast(height, theano.config.floatX)
width_f = T.cast(width, theano.config.floatX)
grid = _meshgrid(out_height, out_width)
x_grid_flat = grid[0].flatten()
y_grid_flat = grid[1].flatten()
# clip coordinates to [-1, 1]
x = T.clip(x, -1, 1)
y = T.clip(y, -1, 1)
# scale coordinates from [-1, 1] to [0, width/height - 1]
x = (x + 1) / 2 * (width_f - 1)
y = (y + 1) / 2 * (height_f - 1)
x0_f = T.floor(x)
y0_f = T.floor(y)
x0 = T.cast(x0_f, "int64")
y0 = T.cast(y0_f, "int64")
# return T.concatenate(((x0-x).dimshuffle(0, 'x')**2, 0.0*dg2(x.dimshuffle(0, 'x')), 0.0*dg2(x0.dimshuffle(0, 'x'))), 1)
offsets = np.arange(-1, 3).astype(int)
dim2 = width
dim1 = width * height
base = T.repeat(T.arange(num_batch, dtype="int64") * dim1, out_height * out_width)
# Need to convert (x, y) to linear
def _flat_idx(xx, yy, dim2=dim2):
return base + yy * dim2 + xx
y_locs = [y0 + offset for offset in offsets]
ys = [T.clip(loc, 0, height - 1) for loc in y_locs]
def _cubic_interp_dim(im_flat, other_idx):
"""Cubic interpolation along a dimension
"""
neighbor_locs = [x0 + offset for offset in offsets]
neighbor_idx = [T.clip(nloc, 0, width - 1) for nloc in neighbor_locs]
xidxs = neighbor_idx
yidxs = [other_idx] * len(neighbor_idx)
neighbor_idxs = [_flat_idx(xidx, yidx) for xidx, yidx in zip(xidxs, yidxs)]
values = [im_flat[idx] for idx in neighbor_idxs]
weights = [_cubic_conv_weights(dg2(nloc) - x).dimshuffle(0, "x") for nloc in neighbor_locs]
# Interpolate along x direction
out = T.sum([dg2(v) * w for w, v in zip(weights, values)], axis=0) / T.sum(weights, axis=0)
return out
im_flat = im.reshape((-1, channels))
ims = [_cubic_interp_dim(im_flat, yidx) for yidx in ys]
yweights = [_cubic_conv_weights(dg2(yloc) - y).dimshuffle(0, "x") for yloc in y_locs]
out = T.sum(
[v * _cubic_conv_weights(dg2(yloc) - y).dimshuffle(0, "x") for v, yloc in zip(ims, y_locs)], axis=0
) / T.sum(yweights, axis=0)
return out
示例10: process
def process(self, input, tparams, BNparams):
b, f, h0, w0 = input.shape
result = []
for h, w in self.pymamid:
win_h = T.ceil(h0 / h).astype('int32')
win_w = T.ceil(w0 / w).astype('int32')
str_h = T.floor(h0 / h).astype('int32')
str_w = T.floor(w0 / w).astype('int32')
result.append(dnn_pool(
img=input, ws=(win_h, win_w), mode=self.mode,
stride=(str_h, str_w), pad=(0, 0)).reshape([b, -1]))
return T.concatenate(result, axis=1)
示例11: pool_2d_nxn_regions
def pool_2d_nxn_regions(inputs, output_size, mode='max'):
"""
Performs a pooling operation that results in a fixed size:
output_size x output_size.
Used by SpatialPyramidPoolingLayer. Refer to appendix A in [1]
Parameters
----------
inputs : a tensor with 4 dimensions (N x C x H x W)
output_size: integer
The output size of the pooling operation
mode : string
Pooling mode, one of 'max', 'average_inc_pad', 'average_exc_pad'
Defaults to 'max'.
Returns a list of tensors, for each output bin.
The list contains output_size*output_size elements, where
each element is a 3D tensor (N x C x 1)
References
----------
.. [1] He, Kaiming et al (2015):
Spatial Pyramid Pooling in Deep Convolutional Networks
for Visual Recognition.
http://arxiv.org/pdf/1406.4729.pdf.
"""
if mode == 'max':
pooling_op = T.max
elif mode in ['average_inc_pad', 'average_exc_pad']:
pooling_op = T.mean
else:
msg = "Mode must be either 'max', 'average_inc_pad' or "
msg += "'average_exc_pad'. Got '{0}'"
raise ValueError(msg.format(mode))
h, w = inputs.shape[2:]
result = []
n = float(output_size)
for row in range(output_size):
for col in range(output_size):
start_h = T.floor(row / n * h).astype('int32')
end_h = T.ceil((row + 1) / n * h).astype('int32')
start_w = T.floor(col / n * w).astype('int32')
end_w = T.ceil((col + 1) / n * w).astype('int32')
pooling_region = inputs[:, :, start_h:end_h, start_w:end_w]
this_result = pooling_op(pooling_region, axis=(2, 3))
result.append(this_result.dimshuffle(0, 1, 'x'))
return result
示例12: discretized_logistic
def discretized_logistic(mean, logscale, binsize, sample=None):
scale = T.exp(logscale)
if sample is None:
u = G.rng_curand.uniform(size=mean.shape)
_y = T.log(-u/(u-1)) #inverse CDF of the logistic
sample = mean + scale * _y #sample from the actual logistic
sample = T.floor(sample/binsize)*binsize #discretize the sample
_sample = (T.floor(sample/binsize)*binsize - mean)/scale
logps = T.log( T.nnet.sigmoid(_sample + binsize/scale) - T.nnet.sigmoid(_sample) + 1e-7)
logp = logps.flatten(2).sum(axis=1)
#raise Exception()
entr = logscale.flatten(2)
entr = entr.sum(axis=1) + 2. * entr.shape[1].astype(G.floatX)
return RandomVariable(sample, logp, entr, mean=mean, logscale=logscale, logps=logps)
示例13: discretized_gaussian
def discretized_gaussian(mean, logvar, binsize, sample=None):
scale = T.exp(.5*logvar)
if sample is None:
_y = G.rng_curand.normal(size=mean.shape)
sample = mean + scale * _y #sample from the actual logistic
sample = T.floor(sample/binsize)*binsize #discretize the sample
_sample = (T.floor(sample/binsize)*binsize - mean)/scale
def _erf(x):
return T.erf(x/T.sqrt(2.))
logp = T.log( _erf(_sample + binsize/scale) - _erf(_sample) + 1e-7) + T.log(.5)
logp = logp.flatten(2).sum(axis=1)
#raise Exception()
entr = (.5 * (T.log(2 * math.pi) + 1 + logvar)).flatten(2).sum(axis=1)
return RandomVariable(sample, logp, entr, mean=mean, logvar=logvar)
示例14: create_learning_rate_func
def create_learning_rate_func(solver_params):
base = tt.fscalar('base')
gamma = tt.fscalar('gamma')
power = tt.fscalar('power')
itrvl = tt.fscalar('itrvl')
iter = tt.scalar('iter')
if solver_params['lr_type']=='inv':
lr_ = base * tt.pow(1 + gamma * iter, -power)
lr = t.function(
inputs=[iter, t.Param(base, default=solver_params['base']), t.Param(gamma, default=solver_params['gamma']), t.Param(power, default=solver_params['power'])],
outputs=lr_)
elif solver_params['lr_type']=='fixed':
lr_ = base
lr = t.function(
inputs=[iter, t.Param(base, default=solver_params['base'])],
outputs=lr_,
on_unused_input='ignore')
elif solver_params['lr_type']=='episodic':
lr_ = base / (tt.floor(iter/itrvl) + 1)
lr = t.function(
inputs=[iter, t.Param(base, default=solver_params['base']), t.Param(itrvl, default=solver_params['interval'])],
outputs=lr_,
on_unused_input='ignore')
return lr
示例15: get_output_for
def get_output_for(self, input, **kwargs):
p = self.p
k = self.k
nbatches = input.shape[0]
x_len = self.x_len
# x_len = 30
# x = input.reshape((nbatches, x_len))
x = input.reshape((nbatches, x_len))
p_floor = T.floor(p)
p_ceil = T.ceil(p)
# Deltas
p_delta = p - p_floor
ep_delta = T.exp(k*-p_delta)
p2_delta = 1 - p_delta
ep2_delta = T.exp(k*-p2_delta)
p0_delta = 1 + p_delta
ep0_delta = T.exp(k*-p0_delta)
ep_sum = ep_delta + ep2_delta + ep0_delta
perm1 = x[:, (T.cast(p_floor, 'int32'))%x_len]
perm2 = x[:, (T.cast(p_ceil, 'int32')+1)%x_len]
perm0 = x[:, (T.cast(p_floor, 'int32')-1)%x_len]
perm1_factor = ep_delta * perm1
perm2_factor = ep2_delta * perm2
perm3_factor = ep0_delta * perm0
res = (perm1_factor + perm2_factor + perm3_factor) / ep_sum
return res.reshape(input.shape)