本文整理汇总了Python中pywt.wavedec2函数的典型用法代码示例。如果您正苦于以下问题:Python wavedec2函数的具体用法?Python wavedec2怎么用?Python wavedec2使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了wavedec2函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: blend_images
def blend_images(base, texture, level=4, mode='sp1', base_gain=None, texture_gain=None):
base_data = image2array(base)
texture_data = image2array(texture)
output_data = []
for base_band, texture_band in zip(base_data, texture_data):
base_band_coeffs = pywt.wavedec2(base_band, 'db2', mode, level)
texture_band_coeffs = pywt.wavedec2(texture_band, 'db2', mode, level)
output_band_coeffs = [base_band_coeffs[0]]
del base_band_coeffs[0], texture_band_coeffs[0]
for n, (base_band_details, texture_band_details) in enumerate(
zip(base_band_coeffs, texture_band_coeffs)):
blended_details = []
for (base_detail, texture_detail) in zip(base_band_details, texture_band_details):
if base_gain is not None:
base_detail *= base_gain
if texture_gain is not None:
texture_detail *= texture_gain
blended = numpy.where(abs(base_detail) > abs(texture_detail), base_detail, texture_detail)
blended_details.append(blended)
base_band_coeffs[n] = texture_band_coeffs[n] = None
output_band_coeffs.append(blended_details)
new_band = pywt.waverec2(output_band_coeffs, 'db2', mode)
output_data.append(new_band)
del new_band, base_band_coeffs, texture_band_coeffs
del base_data, texture_data
output_data = numpy.array(output_data)
return array2image(output_data, base.mode)
示例2: imageWT
def imageWT(image, scaleLevel):
r = pywt.wavedec2(image[0], 'Haar', level=scaleLevel)[0]
g = pywt.wavedec2(image[1], 'Haar', level=scaleLevel)[0]
b = pywt.wavedec2(image[2], 'Haar', level=scaleLevel)[0]
x = np.sqrt(image.shape[1]*image.shape[2]/(r.shape[0]*r.shape[1]))
waveImg = np.array([r,g,b]/x).astype(int)
return waveImg
示例3: dwt2
def dwt2(image, wavelet, mode, level):
signal = np.asarray(image)
if signal.ndim == 2:
r = pack_wave_coeff(pywt.wavedec2(signal, wavelet, mode, level))
return r
elif signal.ndim == 3:
r, g, b = wv.splitRGB(image)
rw = pack_wave_coeff(pywt.wavedec2(r, wavelet, mode, level))
gw = pack_wave_coeff(pywt.wavedec2(g, wavelet, mode, level))
bw = pack_wave_coeff(pywt.wavedec2(b, wavelet, mode, level))
return (rw, gw, bw)
示例4: multiwavelet_from_rgb
def multiwavelet_from_rgb(rgb):
from scipy.fftpack import dct
from pywt import wavedec2
r = rgb[:, :, 0].astype(np.float)
g = rgb[:, :, 1].astype(np.float)
dctr = dct(r, norm='ortho').ravel()
dctg = dct(g, norm='ortho').ravel()
daubr = _unpack(wavedec2(r, 'db4'))
daubg = _unpack(wavedec2(g, 'db4'))
return np.hstack([dctr, dctg, daubr, daubg])
示例5: ReduceDimension
def ReduceDimension(X = np.zeros([2,2])):
r, c = X.shape
image = X[0,:].reshape([385,576])
coeffs = pywt.wavedec2(image,'db1', level=4)
cA4, (cH4, cV4, cD4), (cH3, cV3, cD3),(cH2, cV2, cD2),(cH1, cV1, cD1) = coeffs
nr,nc = cA4.shape
rX = np.zeros([r,nc*nr], dtype=np.float32)
for i in range(r):
image = X[i,:].reshape([385,576])
coeffs = pywt.wavedec2(image,'db1', level=4)
cA4, (cH4, cV4, cD4), (cH3, cV3, cD3),(cH2, cV2, cD2),(cH1, cV1, cD1) = coeffs
rX[i,:] = cV4.flatten()
return rX
示例6: func
def func(dframe):
frame1, frame2 = dframe[0], dframe[1]
frame1 = np.array(frame1)
frame2 = np.array(frame2)
C = pywt.wavedec2(frame1, 'db4', level=level)
S = pywt.wavedec2(frame2, 'db4', level=level)
tA2 = (C[0] + S[0])/2
coeffs = fuse(tA2, C[1:], S[1:])
fuse_img = pywt.waverec2(coeffs, 'db4')
if frame1.dtype == np.uint16:
fuse_img = fuse_img.clip(0,65535).astype(np.uint16)
elif frame1.dtype == np.uint8:
fuse_img = fuse_img.clip(0,255).astype(np.uint8)
return np.squeeze(fuse_img)
示例7: matvec
def matvec(x):
xnd = x.reshape(shapein)
yl = pywt.wavedec2(xnd, wavelet, mode=mode, level=level)
y = yl[0].flatten()
for el in yl[1:]:
y = np.concatenate((y, np.concatenate(el).flatten()))
return y
示例8: denoise
def denoise():
wave = 'db4'
sig = 20
tau1 = 3*sig
tau2 = 3*sig/2
noisyLena = lena + np.random.normal(scale = sig, size=lena.shape)
lw = pywt.wavedec2(noisyLena, wave, level=4)
lwt1 = hardThresh(lw, tau1)
lwt2 = softThresh(lw, tau2)
rlena1 = pywt.waverec2(lwt1, wave)
rlena2 = pywt.waverec2(lwt2, wave)
plt.subplot(131)
plt.imshow(noisyLena, cmap=plt.cm.Greys_r)
plt.axis('off')
plt.subplot(132)
plt.imshow(rlena1, cmap=plt.cm.Greys_r)
plt.axis('off')
plt.subplot(133)
plt.imshow(rlena2, cmap=plt.cm.Greys_r)
plt.axis('off')
plt.savefig('denoise.pdf')
plt.clf()
示例9: _get_haar_feature
def _get_haar_feature(filename):
data = misc.imread(filename)
data = misc.imresize(data, (64, 64))
#data.resize(64, 64)
feature_layers = np.zeros((32, 32, 3), dtype=np.float32)
additional = np.empty(4)
for index in range(3):
layer = data[:, :, index]
layer = np.float32(layer)
additional[index] = layer.mean()
#layer /= 255.0
#print(layer.min(), layer.max(), layer.mean())
#print(layer[:1])
haar = pywt.wavedec2(data=layer, wavelet='haar', level=1)
cA = haar[0]
feature_layers[:, :, index] = cA
height, width, _ = data.shape
aspect = float(width)/(width+height)
additional[-1] = aspect
features = np.concatenate((feature_layers.reshape(32*32*3), additional))
return features
示例10: _call
def _call(self, x):
"""Compute the discrete wavelet transform.
Parameters
----------
x : `DiscreteLpVector`
Returns
-------
arr : `numpy.ndarray`
Flattened and concatenated coefficient array
The length of the array depends on the size of input image to
be transformed and on the chosen wavelet basis.
"""
if x.space.ndim == 1:
coeff_list = pywt.wavedec(x, self.wbasis, self.mode, self.nscales)
coeff_arr = pywt_coeff_to_array(coeff_list, self.size_list)
return self.range.element(coeff_arr)
if x.space.ndim == 2:
coeff_list = pywt.wavedec2(x, self.wbasis, self.mode, self.nscales)
coeff_arr = pywt_coeff_to_array(coeff_list, self.size_list)
return self.range.element(coeff_arr)
if x.space.ndim == 3:
coeff_dict = wavelet_decomposition3d(x, self.wbasis, self.mode,
self.nscales)
coeff_arr = pywt_coeff_to_array(coeff_dict, self.size_list)
return self.range.element(coeff_arr)
示例11: munchetal_filter
def munchetal_filter(im, wlevel, sigma, wname='db15'):
# Wavelet decomposition:
coeffs = pywt.wavedec2(im.astype(np.float32), wname, level=wlevel)
coeffsFlt = [coeffs[0]]
# FFT transform of horizontal frequency bands:
for i in range(1, wlevel + 1):
# FFT:
fcV = np.fft.fftshift(np.fft.fft(coeffs[i][1], axis=0))
my, mx = fcV.shape
# Damping of vertical stripes:
damp = 1 - np.exp(-(np.arange(-np.floor(my / 2.), -np.floor(my / 2.) + my) ** 2) / (2 * (sigma ** 2)))
dampprime = np.kron(np.ones((1, mx)), damp.reshape((damp.shape[0], 1)))
fcV = fcV * dampprime
# Inverse FFT:
fcVflt = np.real(np.fft.ifft(np.fft.ifftshift(fcV), axis=0))
cVHDtup = (coeffs[i][0], fcVflt, coeffs[i][2])
coeffsFlt.append(cVHDtup)
# Get wavelet reconstruction:
im_f = np.real(pywt.waverec2(coeffsFlt, wname))
# Return image according to input type:
if (im.dtype == 'uint16'):
# Check extrema for uint16 images:
im_f[im_f < np.iinfo(np.uint16).min] = np.iinfo(np.uint16).min
im_f[im_f > np.iinfo(np.uint16).max] = np.iinfo(np.uint16).max
# Return filtered image (an additional row and/or column might be present):
return im_f[0:im.shape[0], 0:im.shape[1]].astype(np.uint16)
else:
return im_f[0:im.shape[0], 0:im.shape[1]]
示例12: blur_feature_tong_etal
def blur_feature_tong_etal(img, thresh=35, MinZero=0.05):
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = resize_borders_to_multiple_of(img, 8)
w = pywt.wavedec2(img, 'haar', level=3)
emap = [np.sqrt(w[i][0]**2 + w[i][1]**2 + w[i][2]**2) for i in range(1, len(w))]
window_size_map = [2, 4, 8]
emax = [np.zeros((e.shape[0]/s, e.shape[1]/s)) for e, s in zip(emap, window_size_map)]
for e, s, m in zip(emap, window_size_map, emax):
for y in range(0, e.shape[0]/s):
for x in range(0, e.shape[1]/s):
ep = e[y*s:y*s+s,x*s:x*s+s]
m[y,x] = np.amax(ep)
r1 = edge_point = np.logical_or(emax[0] > thresh, np.logical_or(emax[1] > thresh, emax[2] > thresh))
r2 = ds_or_as = np.logical_and(edge_point, np.logical_and(emax[0] > emax[1], emax[1] > emax[2]))
r3 = rs_or_gs = np.logical_and(edge_point, np.logical_and(emax[0] < emax[1], emax[1] < emax[2]))
r4 = rs = np.logical_and(edge_point, np.logical_and(emax[1] > emax[0], emax[1] > emax[2]))
r5 = more_likely = np.logical_and(np.logical_or(rs_or_gs, rs), emax[0] < thresh)
N_edge = np.count_nonzero(r1)
N_da = np.count_nonzero(r2)
N_rg = np.count_nonzero(np.logical_or(r3, r4))
N_brg = np.count_nonzero(r5)
Per = float(N_da)/float(N_edge)
unblured = Per > MinZero
# if N_rg is 0 then the image must be really blurry
if N_rg == 0:
BlurExtent = 1
else:
BlurExtent = float(N_brg)/float(N_rg)
return BlurExtent
示例13: w2d
def w2d(img, mode='haar', level=1):
imArray = cv2.imread(img)
#Datatype conversions
#convert to grayscale
imArray = cv2.cvtColor( imArray,cv2.COLOR_RGB2GRAY )
#convert to float
imArray = np.float32(imArray)
imArray /= 255.;
# compute coefficients
coeffs=pywt.wavedec2(imArray, mode, level=level)
#print len(coeffs)
#Process Coefficients
coeffs_H=list(coeffs[1][0])
coeffs_H *= 0
coeffs[1][0] = coeffs_H
# reconstruction
imArray_H=pywt.waverec2(coeffs, mode);
imArray_H *= 255.;
imArray_H = np.uint8(imArray_H)
#Display result
cv2.imshow('image',imArray_H)
cv2.waitKey(0)
cv2.destroyAllWindows()
示例14: create_haar_dictionary
def create_haar_dictionary(p=8):
import pywt
c = pywt.wavedec2(np.zeros((p, p)), 'haar')
D = []
for k in range(1, len(c)):
for i in range(3):
ck = c[k][i]
l = ck.shape[0]
for j in range(l):
for m in range(l):
ck[j, m] = 1
D += [pywt.waverec2(c, 'haar')]
ck[j, m] = 0
ck = c[0]
l = ck.shape[0]
for j in range(l):
for m in range(l):
ck[j, m] = 1
D += [pywt.waverec2(c, 'haar')]
ck[j, m] = 0
D = np.array(D).reshape(-1, p*p)
Dn = []
for i in range(15):
Dn += _translate(D[i].reshape((p, p)))
Dn = np.array(Dn).reshape((-1, p*p))
i0 = np.sum(abs(Dn), axis=1) != 0
return Dn[i0]
示例15: idwt2
def idwt2(self):
"""
Test pypwt for DWT reconstruction (waverec2).
"""
W = self.W
levels = self.levels
# inverse DWT with pypwt
W.forward()
logging.info("computing Wavelets.inverse from pypwt")
t0 = time()
W.inverse()
logging.info("Wavelets.inverse took %.3f ms" % elapsed_ms(t0))
if self.do_pywt:
# inverse DWT with pywt
Wpy = pywt.wavedec2(self.data, self.wname, mode=per_kw, level=levels)
logging.info("computing waverec2 from pywt")
_ = pywt.waverec2(Wpy, self.wname, mode=per_kw)
logging.info("pywt took %.3f ms" % elapsed_ms(t0))
# Check reconstruction
W_image = W.image
maxerr = _calc_errors(self.data, W_image, "[rec]")
self.assertTrue(maxerr < self.tol, msg="[%s] something wrong with the reconstruction (errmax = %e)" % (self.wname, maxerr))