本文整理汇总了Python中scipy.fft函数的典型用法代码示例。如果您正苦于以下问题:Python fft函数的具体用法?Python fft怎么用?Python fft使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了fft函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: getMSD
def getMSD(d1, d2):
diff = 0
d11 = []
d22 = []
index = 0
for index in xrange(9):
chunk = d1[index * 22050: index * 22050 + 22050]
x = cp.fft(chunk)
maximum1 = max(x)
d11.append(maximum1)
index = 0
for index in xrange(9):
chunk = d2[index * 22050: index * 22050 + 22050]
x = cp.fft(chunk)
maximum1 = max(x)
d22.append(maximum1)
currentMax1 = max(d11)
currentMax2 = max(d22)
for x in xrange(len(d11)):
# diff += abs(d11[x]/currentMax1 - d22[x]/currentMax2) * abs(d11[x]/currentMax1 - d22[x]/currentMax2)
diff += abs(d11[x] - d22[x]) * abs(d11[x] - d22[x])
diff /= len(d11)
return diff
示例2: problem4
def problem4():
# read in tada.wav
rate, tada = wavfile.read('tada.wav')
# upon inspection, we find that tada.wav is a stereo audio file.
# we create stereo white noise that lasts 10 seconds
L_white = sp.int16(sp.random.randint(-32767,32767,rate*10))
R_white = sp.int16(sp.random.randint(-32767,32767,rate*10))
white = sp.zeros((len(L_white),2))
white[:,0] = L_white
white[:,1] = R_white
# pad tada signal with zeros
padded_tada = sp.zeros_like(white)
padded_tada[:len(tada)] = tada
ptada = padded_tada
# fourier transforms
ftada = sp.fft(ptada,axis=0)
fwhite = sp.fft(white,axis=0)
# inverse transform of convolution
out = sp.ifft((ftada*fwhite),axis=0)
# prepping output and writing file
out = sp.real(out)
scaled = sp.int16(out / sp.absolute(out).max() * 32767)
wavfile.write('my_tada_conv.wav',rate,scaled)
示例3: get_envelope
def get_envelope(R,dim=1):
"""
Returns the complex version of the input signal R.
@param R: The input data matrix.
@param dim: The dimension along which the envelope is to be taken. default: dim=1
"""
if dim==0:
R=R.T
if len(R.shape)==1:
freqs=scipy.fft(R)
length=len(R)/2
freqs[length:]=0
freqs[1:length]=2*freqs[1:length]
## freqs[1:length]=freqs[1:length]
env=scipy.ifft(freqs)
else:
freqs=scipy.fft(R)
length=R.shape[dim]/2
#Something is fishy here:
freqs[:,length:]=0
freqs[:,1:length]=2*freqs[0,1:length]
## freqs[:,1:length]=freqs[0,1:length]
env=scipy.ifft(freqs)
if dim==0:
return env.T
return env
示例4: itakura_saito_spectrum_distance
def itakura_saito_spectrum_distance(s,shat,winfunc):
size = min(len(s),len(shat))
window = winfunc(size)
s = s[0:size]
shat = shat[0:size]
s_amp = sp.absolute(sp.fft(s*window))
shat_amp = sp.absolute(sp.fft(shat*window))
return sp.mean(sp.log(s_amp / shat_amp) + (shat_amp/s_amp) - 1.0)
示例5: log_spectrum_distance
def log_spectrum_distance(s,shat,winfunc):
size = min(len(s),len(shat))
window = winfunc(size)
s = s[0:size]
shat = shat[0:size]
s_amp = sp.absolute(sp.fft(s*window))
shat_amp = sp.absolute(sp.fft(shat*window))
return sp.sqrt(sp.mean((sp.log10(s_amp / shat_amp)*10.0)**2.0))
示例6: prob5
def prob5():
rate, sig = wavfile.read('tada.wav')
sig = sp.float32(sig)
noise = sp.float32(sp.random.randint(-32767,32767,sig.shape))
out = sp.ifft(sp.fft(sig)*sp.fft(noise))
out = sp.real(out)
out = sp.int16(out/sp.absolute(out).max() * 32767)
wavfile.write('white-conv.wav',rate,out)
示例7: fourierTransform
def fourierTransform(self, fromPos, toPos, only = []):
self.checkToPos(toPos)
if len(only) > 0:
self.allFid[toPos] = np.array([fftshift(fft(self.allFid[fromPos][fidIndex])) for fidIndex in only])
else:
self.allFid[toPos] = np.array([fftshift(fft(fid)) for fid in self.allFid[fromPos]])
self.frequency = np.linspace(-self.sweepWidthTD2/2,self.sweepWidthTD2/2,len(self.allFid[fromPos][0]))
示例8: fftconv
def fftconv(x, y):
""" Convolution of x and y using the FFT convolution theorem. """
n = np.int(np.round(2 ** np.ceil(np.log2(len(x))))) + 1
X, Y, x_y = fft(x, n), fft(y, n), []
for i in range(n):
x_y.append(X[i] * Y[i])
# Returns the inverse Fourier transform with padding correction
return fft.ifft(x_y)[4:len(x)+4]
示例9: algoChannelSelection
def algoChannelSelection(left, right):
''' Algorithm which automatically selects the channel with dominant vocals from a stereo flamenco recording
based on spectral band energies as described in section 2-A-I of
Kroher, N. & Gomez, E. (2016). Automatic Transcription of Flamenco Singing from Polyphonic Music Recordings.
ACM / IEEE Transactions on Audio, Speech and Language Processing, 24(5), pp. 901-913.
:param left: samples of the left audio channel in 44.1kHz
:param right: samples of the right audio channel in 44.1kHz
:return: index of the dominant vocal channel (0 = left, 1 = right)
'''
# PARAMETERS
fs = 44100 # sample rate
wSize = 2048 # window size in samples
hSize = 2048 # hop size in samples
fftSize = 2048 # FFT size
freqGuitLow = 80.0 # lower bound for guitar band
freqGuitHigh = 400.0 # upper bound for guitar band
freqVocLow = 500.0 # lower bound for vocal band
freqVocHigh = 6000.0 # higher bound for vocal band
# INIT
window = hanning(wSize)
numFrames = int(math.floor(float(len(left))/float(wSize)))
# bin indices corresponding to freqeuncy band limits
indGuitLow = int(round((freqGuitLow/fs)*fftSize))
indGuitHigh = int(round((freqGuitHigh/fs)*fftSize))
indVocLow = int(round((freqVocLow/fs)*fftSize))
indVocHigh = int(round((freqVocHigh/fs)*fftSize))
# frame-wise computation of the spectral band ratio
sbrL = []
sbrR = []
for i in range(0,numFrames-100):
frameL = left[i*hSize:i*hSize+wSize]
specL = fft(frameL*window) / fftSize
specL = abs(specL * conj(specL))
guitMag = sum(specL[indGuitLow:indGuitHigh],0)
vocMag = sum(specL[indVocLow:indVocHigh],0)
sbrL.append(20*math.log10(vocMag/guitMag))
frameR = right[i*hSize:i*wSize+wSize]
specR = fft(frameR*window) / fftSize
specR = abs(specR * conj(specR))
guitMag = sum(specR[indGuitLow:indGuitHigh],0)
vocMag = sum(specR[indVocLow:indVocHigh],0)
sbrR.append(20*math.log10(vocMag/guitMag))
# select channel based on mean SBR
if mean(sbrL)>=mean(sbrR):
ind = 0
else:
ind = 1
return ind
示例10: calculation
def calculation(self, data):
if(isinstance(data[0], types.ListType)):
freq = [np.abs(fft(l)/len(l))[1:len(l)/2] for l in data]
freq = [n.tolist() for n in freq]
else:
l = data
freq = np.abs(fft(l)/len(l))[1:len(l)/2]
#freq = [[v] for v in freq]
#freq = freq.tolist()
return freq # .tolist()
示例11: prob3
def prob3():
rate1,sig1 = wavfile.read('chopinw.wav')
n = sig1.shape[0]
rate2,sig2 = wavfile.read('balloon.wav')
m = sig2.shape[0]
sig1 = sp.append(sig1,sp.zeros((m,2)))
sig2 = sp.append(sig2,sp.zeros((n,2)))
f1 = sp.fft(sig1)
f2 = sp.fft(sig2)
out = sp.ifft((f1*f2))
out = sp.real(out)
scaled = sp.int16(out/sp.absolute(out).max() * 32767)
wavfile.write('test.wav',rate1,scaled)
示例12: makeIR
def makeIR(wav_in,wav_out,fs,duration,noise=0.025):
""" measures the response of a speaker (+amp+mic) and build an IR """
# step 1: full duplex playback and recording. Input: provided sweep wav file
# output: recorded time response
ecasound_cmd="ecasound -f:16,1,%i -a:1 -i jack,system,capture " + \
" -o /tmp/capture.wav -a:2 -i %s -o jack,system -t %i"
ecasound_cmd=ecasound_cmd%(int(fs),wav_in,int(duration))
# run capture
os.system(ecasound_cmd)
# load input and capture wave files
time.sleep(3)
f=wave.open(wav_in,'rb')
len1=f.getnframes()
#nc1=f.getnchannels()
#bp1=f.getsampwidth()
data=f.readframes(len1)
f.close()
Y1=scipy.float32(scipy.fromstring(data,dtype='int16'))
f=wave.open('/tmp/capture.wav','rb')
len2=f.getnframes()
#nc1=f.getnchannels()
#bp1=f.getsampwidth()
data=f.readframes(len2)
f.close()
Y2=scipy.float32(scipy.fromstring(data,dtype='int16'))
# truncate and normalize wave file
#(or we could pad the shortest to the longest... TODO!)
minlen = min([len1,len2])
Y2=Y2[0:minlen]
Y2=Y2/max(abs(Y2))
Y1=Y1[0:minlen]
Y1=Y1/max(abs(Y1))
# compute frequency response function as ration of both spectra
FRF=scipy.fft(Y2)/scipy.fft(Y1)
# compute impulse response as inverse FFT of FRF
IRraw=scipy.real(scipy.ifft(FRF))
# get rid of initial lag in IR
thr=max(abs(IRraw))*noise
offset=max([0 , min(min(scipy.where(abs(IRraw)>thr)))-5 ])
IR=IRraw[offset:-1]
IRnorm=IR/max(abs(IR))
# TODO: add post pro options such as low/high pass and decay
# write output IR
f = wave.open(wav_out, 'w')
f.setparams((1, 2, fs, 0, 'NONE', 'not compressed'))
maxVol=2**15-1.0 #maximum amplitude
wvData=""
for i in range(len(IRnorm)):
wvData+=pack('h', maxVol*IRnorm[i])
f.writeframes(wvData)
f.close()
示例13: load_validation_set
def load_validation_set():
"""
Output
a tuple of features: (fft features, mfcc features, mean-std features)
Description
extracts three types of features from validation set.
"""
ffts = dict()
mfccs = dict()
mean_stds = dict()
for i in validation_ids:
path = './validation/validation.{i}.wav'.format(i=i)
_, X = read_wav(path)
# FFT
fft = np.array(abs(sp.fft(X)[:1000]))
ffts.update({i: fft})
# MFCC
ceps, mspec, spec = mfcc(X)
num_ceps = len(ceps)
x = np.mean(ceps[int(num_ceps*1/10):int(num_ceps*9/10)], axis=0)
mfccs.update({i: x})
# Mean-Std
[Fs, x] = audioBasicIO.readAudioFile(path);
F = audioFeatureExtraction.stFeatureExtraction(x, Fs, 0.050*Fs, 0.025*Fs);
mean_std = []
for f in F:
mean_std.extend([f.mean(), f.std()])
mean_stds.update({i: np.array(mean_std)})
return (ffts, mfccs, mean_stds)
示例14: _fft
def _fft(self, data):
data_length = len(data)
frequencies = scipy.fft(data) / data_length
frequencies = frequencies[range(data_length / 2)]
frequencies[0] = 0
frequencies = np.abs(frequencies)
return frequencies
示例15: printer
def printer(function):
window[function] = eval("scipy.signal.%s(size)" % function)
pylab.plot(numpy.abs(window[function]))
pylab.xlim(0, size - 1)
pylab.savefig("%s-%03d.png" % (function, speed))
pylab.close()
fft[function] = scipy.fft(window[function])
pylab.loglog(numpy.abs(fft[function])[0 : size / 2 + 1])
pylab.savefig("%s-fft-%03d.png" % (function, speed))
pylab.close()
sinus[function] = scipy.fft(y * window[function])
pylab.loglog(numpy.abs(sinus[function])[0 : size / 2 + 1])
pylab.savefig("%s-sinus-%03d.png" % (function, speed))
pylab.close()