本文整理汇总了Python中scipy.ndimage.filters.convolve函数的典型用法代码示例。如果您正苦于以下问题:Python convolve函数的具体用法?Python convolve怎么用?Python convolve使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了convolve函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: rl_damped
def rl_damped(raw, psf, niter=2, damped=True, N=3, T=None, multiplier=1):
""" working on it"""
#psf /= psf.sum()
conversion = raw.mean() / 10
raw /= conversion
lucy = np.ones(raw.shape) * raw.mean()
#plt.ion()
#plt.figure()
#plt.plot(raw)
#plt.axhline(y=0, lw=2, color='black')
for i in xrange(niter):
if damped:
print "dampening"
lucy_temp = convolve( lucy, psf, mode='mirror')
ratio = dampen(lucy_temp, raw, N, T, multiplier)
else:
ratio = raw / convolve(lucy, psf, mode='mirror')
ratio[ np.isnan(ratio) ] = 0
top = convolve( ratio, psf, mode='mirror')
top[ np.isnan(top) ] = 0
lucy = lucy * (top / psf.sum())
#plt.plot( lucy )
print 'iteration', i, lucy.mean(), raw.mean()
print
#raw_input('Done')
return lucy * conversion
示例2: convolve1d
def convolve1d(Z, K, toric=False):
""" Discrete, clamped, linear convolution of two one-dimensional sequences.
The convolution operator is often seen in signal processing, where it
models the effect of a linear time-invariant system on a signal [1]_.
In probability theory, the sum of two independent random variables is
distributed according to the convolution of their individual
distributions.
:param array Z:
One-dimensional array.
:param array K:
One-dimensional array.
:param bool toric:
Indicate whether convolution should be considered toric
:return:
Discrete, clamped, linear convolution of `Z` and `K`.
**Note**
The discrete convolution operation is defined as
.. math:: (f * g)[n] = \sum_{m = -\infty}^{\infty} f[m] g[n-m]
**References**
.. [1] Wikipedia, "Convolution",
http://en.wikipedia.org/wiki/Convolution.
"""
if toric:
return convolve(Z,K,mode='wrap')
else:
return convolve(Z,K,mode='constant')
示例3: process
def process(self, image):
image = image.astype(np.double)
if image.max() > 1:
# The image is between 0 and 255 - we need to convert it to [0,1]
image /= 255;
if image.ndim == 3:
# we do not deal with color images.
image = np.mean(image,axis=2)
H,W = image.shape
IH = filters.convolve(image, self._GH, mode='nearest')
IW = filters.convolve(image, self._GW, mode='nearest')
I_mag = np.sqrt(IH ** 2 + IW ** 2)
I_theta = np.arctan2(IH, IW)
alpha = self.specs.get('alpha', _ALPHA)
num_angles = self.specs.get('num_angles', _NUM_ANGLES)
I_orient = np.empty((H, W, num_angles))
if self.specs.get('twoside', True):
for i in range(num_angles):
I_orient[:,:,i] = I_mag * np.maximum(
np.cos(I_theta - self._ANGLES[i]) ** alpha, 0)
else:
for i in range(num_angles):
I_orient[:,:,i] = I_mag * np.abs(
np.cos(I_theta - self._ANGLES[i]) ** alpha)
return I_orient
示例4: step
def step(self, dt):
if dt!=self.dt:
print "I can only integrate at fixed dt!"
return
self.nCells = len(self.cellStates)
# Check we have enough space allocated
try:
s = self.specLevel[self.nCells-1]
except IndexError:
# Could resize here, then would have to rebuild views
print "Number of cells exceeded " \
+ self.__class__.__name__ \
+ "::maxCells (" + self.maxCells + ")"
self.dataLen = self.signalDataLen + self.nCells*self.nSpecies
# Do u += h(T(u_t)/2 + hf(u_t)) where T=transport operator, f(u_t) is
# our regulation function dydt
self.signalling.transportRates(self.signalRate, self.signalLevel)
self.signalRate *= 0.5
self.dydt()
self.rates[0:self.dataLen] *= self.dt
self.levels[0:self.dataLen] += self.rates[0:self.dataLen]
# Convolve (I+hT/2)u_t + f(u_t) with the Greens func to get u_{t+1}
sigLvl = self.signalLevel.reshape(self.gridDim)
convolve(sigLvl, self.greensFunc, mode='nearest')
# Put the final signal levels into the cell states
states = self.cellStates
for (id,c) in states.items():
if self.signalling:
c.signals = self.signalling.signals(c, self.signalLevel)
示例5: stitch
def stitch(targets,images):
mask = rois_mask(targets) # True where image data is
gaps_mask = mask==False # True where infill needs to go
# compute bounds relative to the camera field
(x,y,w,h) = stitched_box(targets)
uroi = img_as_float(stitch_raw(targets,images,(x,y,w,h))) # stitch with black infill
# step 1: sparsely sample background mostly ignoring blob
# compute gradient on both axes
k = [[-3,-1,0,1,3],
[-3,-1,0,1,3],
[-3,-1,0,1,3],
[-3,-1,0,1,3]]
gy = convolve(uroi,k)
gx = convolve(uroi,np.rot90(k))
# ignore all but low-gradient areas
bg = (abs(gy+gx) < 0.2) & mask
# step 2: remove less contiguous areas
filter_size = max(2,int(max(h,w)/200))
mf = minimum_filter(bg*1,filter_size)
# step 3: interpolate between samples
z = inpaint(uroi*mf,mf==False)
# step 4: subsample and re-interpolate to degrade artifacts in fill region
random = RandomState(0)
(h,w)=z.shape
ng = random.rand(h,w) < 0.01
z2 = inpaint(z*ng,ng==False)
# step 5: final composite
roi = (z2 * gaps_mask) + uroi
return (roi * 255).astype(np.uint8), mask
示例6: add_pointsources
def add_pointsources(map_shape, freq, alpha0=4.5, sigma=0.5, A=1, number=1):
map = np.zeros(map_shape)
spec_list = []
for i in range(number):
ra = np.random.randint(0, map_shape[1])
dec = np.random.randint(0, map_shape[2])
alpha = np.random.normal(alpha0, sigma, 1)
spec = A * (freq/150.)**alpha
spec_list.append(spec)
map[:, ra, dec] += spec
out = np.zeros(map_shape)
for i in range(map_shape[0]):
kernel = np.arange(41) - 20. #GBT
#kernel = np.arange(21) - 10.
kernel = sp.exp(-kernel**2 / (2. * 3 ** 2.))
kernel *= 1. / (2. * sp.pi * 3 ** 2.)
kernel = kernel[:, None] * kernel[None, :]
convolve(map[i], kernel, output=out[i])
map = out
return map, spec_list
示例7: rl_standard
def rl_standard(raw_image, psf, niter):
""" Standerd lucy-richardson convolution
arXiv 2002 Lauer
"""
psf /= psf.sum()
psf_inverse = psf[::-1]
lucy = np.ones( raw_image.shape ) * raw_image.mean()
for i in xrange( niter ):
estimate = convolve(lucy, psf, mode='mirror')
estimate[ np.isnan(estimate) ] = 0
correction = convolve(raw_image/estimate, psf_inverse, mode='mirror')
correction[ np.isnan(correction) ] = 0
print 'Correction:',correction.mean()
lucy *= correction
print 'Means:', raw_image.mean(), lucy.mean()
chisq = scipy.nansum((lucy - raw_image)**2 / (lucy)) / (raw_image.size-1)
print chisq
return lucy
示例8: preprocess
def preprocess(pattern, img):
#bilinear interpolation for bayer_rggb images
if pattern == 'bayer_rggb':
(z, q, h) = (0.0, 0.25, 0.5)
sparse = np.array([[q, h, q],
[h, z, h],
[q, h, q]])
dense = np.array([[z, q, z],
[q, z, q],
[z, q, z]])
img[0,:,:] = \
np.where(img[0,:,:] > 0.0,
img[0,:,:],
convolve(img[0,:,:], sparse, mode='mirror'))
img[1,:,:] = \
np.where(img[1,:,:] > 0.0,
img[1,:,:],
convolve(img[1,:,:], dense, mode='mirror'))
img[2,:,:] = \
np.where(img[2,:,:] > 0.0,
img[2,:,:],
convolve(img[2,:,:], sparse, mode='mirror'))
img = np.dstack((img[2,:,:],
img[1,:,:],
img[0,:,:]))
return np.swapaxes(np.swapaxes(img, 2,0), 1,2)
else:
raise NotImplementedError('Preprocessing is implemented only for bayer_rggb')
示例9: _postprocess_nodes
def _postprocess_nodes(self):
fluid_map = self._fluid_map(wet=False, base=True).astype(np.uint8)
wet_map_for_unused = self._fluid_map(wet=True, allow_unused=True, base=True).astype(np.uint8)
wet_map = self._fluid_map(wet=True, base=True).astype(np.uint8)
neighbors = self._lattice_kernel()
# Any *wet* node not connected to at least one *fluid* node is marked unused.
# Note that dry nodes connecting to wet nodes need to be retained.
# For instance:
# W W
# W V
# where W is a HBB wall and V is a velocity BC.
where = filters.convolve(fluid_map, neighbors, mode="constant", cval=1) == 0
self._type_map_base[where & wet_map_for_unused.astype(np.bool)] = nt._NTUnused.id
# Any dry node, not connected to at least one wet node is marked unused.
# For instance, for HBB walls: .. W W W F -> .. U U W F.
where = filters.convolve(wet_map, neighbors, mode="constant", cval=0) == 0
self._type_map_base[where & np.logical_not(wet_map.astype(np.bool))] = nt._NTUnused.id
# If an unused node touches a wet node, mark it as propagation only.
# For instance, for HBB walls: .. U U W F -> .. U P W F.
used_map = (self._type_map_base != nt._NTUnused.id).astype(np.uint8)
where = filters.convolve(used_map, neighbors, mode="constant", cval=0) > 0
self._type_map_base[where & (self._type_map_base == nt._NTUnused.id)] = nt._NTPropagationOnly.id
示例10: __init__
def __init__(self, image, fit_par=None, dt=0, fw=None, win_size=None,
kernel=None, xkernel=None, bkg_image=None):
self.image = image
self.bkg_image = bkg_image
# Noise removal by convolving with a null sum gaussian. Its FWHM
# has to match the one of the objects we want to detect.
try:
self.fwhm = fw
self.win_size = win_size
self.kernel = kernel
self.xkernel = xkernel
self.image_conv = convolve(self.image.astype(float), self.kernel)
except RuntimeError:
# If the kernel is None, I assume all the args must be calculated
self.fwhm = tools.get_fwhm(670, 1.42) / 120
self.win_size = int(np.ceil(self.fwhm))
self.kernel = tools.kernel(self.fwhm)
self.xkernel = tools.xkernel(self.fwhm)
self.image_conv = convolve(self.image.astype(float), self.kernel)
# TODO: FIXME
if self.bkg_image is None:
self.bkg_image = self.image_conv
self.fit_par = fit_par
self.dt = dt
示例11: mvd_lr
def mvd_lr(initImg, imgList, psfList, iterNum):
EPS = np.finfo(float).eps
viewNum = len(imgList)
initImg = initImg - np.amin(initImg)
initImg = initImg / np.sum(np.abs(initImg))
reconImg = initImg
for i in xrange(iterNum):
updateAll = np.ones(initImg.shape, dtype=float)
for j in xrange(viewNum):
img = imgList[j]
psf = psfList[j]
psf_prime = np.flipud(np.fliplr(psf))
update = convolve(img/(convolve(reconImg, psf)+EPS), psf_prime)
updateAll = updateAll * update
# display progress
progress = float(i*viewNum+j+1)/(viewNum*iterNum)
timeElapsed = time.time() - startTime
timeRemaining = timeElapsed/progress*(1-progress)
sys.stdout.write('\r%.2f%%, %.2f s elapsed, %.2f s remaining' %
(progress*100.0, timeElapsed, timeRemaining))
sys.stdout.flush()
reconImg = reconImg * updateAll
reconImg = np.abs(reconImg)
reconImg = reconImg / np.sum(reconImg)
sys.stdout.write('\n')
return reconImg
示例12: _postprocess_nodes
def _postprocess_nodes(self):
fluid_map = self._fluid_map_base(wet=False).astype(np.uint8)
wet_map = self._fluid_map_base(wet=True).astype(np.uint8)
neighbors = np.zeros((3, 3, 3), dtype=np.uint8)
neighbors[1,1,1] = 1
for ei in self.grid.basis:
neighbors[1 + ei[2], 1 + ei[1], 1 + ei[0]] = 1
# Any wet node not connected to at least one fluid node is marked unused.
# Note that dry nodes connecting to wet nodes need to be retained.
# For instance:
# W W
# W V
# where W is a HBB wall and V is a velocity BC.
where = (filters.convolve(fluid_map, neighbors, mode='wrap') == 0)
self._type_map_base[where & wet_map.astype(np.bool)] = nt._NTUnused.id
# Any dry node, not connected to at least one wet node is marked unused.
# For instance, for HBB walls: .. W W W F -> .. U U W F.
where = (filters.convolve(wet_map, neighbors, mode='wrap') == 0)
self._type_map_base[where & np.logical_not(wet_map)] = nt._NTUnused.id
# If an unused node touches a wet node, mark it as propagation only.
# For instance, for HBB walls: .. U U W F -> .. U P W F.
used_map = (self._type_map_base != nt._NTUnused.id).astype(np.uint8)
where = (filters.convolve(used_map, neighbors, mode='wrap') > 0)
self._type_map_base[where & (self._type_map_base == nt._NTUnused.id)] = nt._NTPropagationOnly.id
示例13: transportRates
def transportRates(self, signalRates, signalLevels, boundcond='constant', mode='normal'):
# Compute diffusion term, laplacian of grid levels in signalLevels,
# write into signalRates
#
# mode='greens' - do not use initLevels as these don't apply!
signalRatesView = signalRates.reshape(self.gridDim)
signalLevelsView = signalLevels.reshape(self.gridDim)
advKernel = numpy.zeros((3,3,3))
advKernel[:,1,1] = [-0.5,0,0.5]
for s in range(self.nSignals):
if boundcond=='constant' and self.initLevels and mode!='greens':
boundval = self.initLevels[s]
else:
boundval = 0.0
if self.advRates:
# Adevction term = du/dx
# Note: always use 'nearest' edge case, this gives central
# differences in middle, and forward/backward differences on edges
convolve(signalLevelsView[s], advKernel*self.advRates[s], output=signalRatesView[s], mode='nearest')
# Diffusion term = \del^2u
# Use edge case from boundary conditions for diffusion
signalRatesView[s] += laplace(signalLevelsView[s], None, mode=boundcond, cval=boundval) * \
self.diffRates[s] / 6.0
else:
signalRatesView[s] = laplace(signalLevelsView[s], None, mode=boundcond, cval=boundval) \
* self.diffRates[s] / 6.0
示例14: step
def step(self, dt):
if dt!=self.dt:
print "I can only integrate at fixed dt!"
return
self.nCells = len(self.cellStates)
# Check we have enough space allocated
try:
s = self.specLevel[self.nCells-1]
except IndexError:
# Could resize here, then would have to rebuild views
print "Number of cells exceeded " \
+ self.__class__.__name__ \
+ "::maxCells (" + self.maxCells + ")"
self.dataLen = self.signalDataLen + self.nCells*self.nSpecies
# growth dilution of species
self.diluteSpecies()
# Do u += h(T(u_t)/2 + hf(u_t)) where T=transport operator, f(u_t) is
# our regulation function dydt
self.signalling.transportRates(self.signalRate, self.signalLevel, self.boundcond)
self.signalRate *= 0.5
self.dydt()
self.rates[0:self.dataLen] *= self.dt
self.levels[0:self.dataLen] += self.rates[0:self.dataLen]
# Convolve (I+hT/2)u_t + f(u_t) with the Greens func to get u_{t+1}
sigLvl = self.signalLevel.reshape(self.gridDim)
convolve(sigLvl, self.greensFunc, mode=self.boundcond)
# put local cell signal levels in array
self.signalLevel_dev.set(self.signalLevel)
self.program.setCellSignals(self.queue, (self.nCells,), None,
numpy.int32(self.nSignals),
numpy.int32(self.gridTotalSize),
numpy.int32(self.signalling.gridDim[1]),
numpy.int32(self.signalling.gridDim[2]),
numpy.int32(self.signalling.gridDim[3]),
self.gridIdxs_dev.data,
self.triWts_dev.data,
self.signalLevel_dev.data,
self.cellSigLevels_dev.data).wait()
self.cellSigLevels[:] = self.cellSigLevels_dev.get()
# Put the final signal levels into the cell states
# states = self.cellStates
# for (id,c) in states.items():
# if self.signalling:
# c.signals = self.signalling.signals(c, self.signalLevel)
# Update cellType array
for (id,c) in self.cellStates.items():
self.celltype[c.idx] = numpy.int32(c.cellType)
self.celltype_dev.set(self.celltype)
示例15: np_hs_jacobi
def np_hs_jacobi(im0, im1, u, v):
It = im1 - im0
Iy = convolve(im1, dy)
Ix = convolve(im1, dx)
denom = np.square(Ix) + np.square(Iy) + alpha ** 2
for _ in range(100):
ubar = convolve(u, jacobi)
vbar = convolve(v, jacobi)
t = (Ix * ubar + Iy * vbar + It) / denom
u = ubar - Ix * t
v = vbar - Iy * t
return u, v