本文整理汇总了Python中matplotlib.mlab.psd函数的典型用法代码示例。如果您正苦于以下问题:Python psd函数的具体用法?Python psd怎么用?Python psd使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了psd函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: compute_psd
def compute_psd(x, t_start=0, t_end=-1, NFFT=128, Fs=200, noverlap=None, f_min=0, f_max=-1):
"""Compute power spectral densities of a dataset 'x' which is
organized as: trials X sensors X time.
t_start, t_end = define a time window. Default: all the trial.
NFFT = size of the FFT window.
noverlap = amount of overlapping between windows.
f_min, f_max = return a specific frequency window of PSD. Default: full range.
Returns:
x_psd = dataset organized as: trials X channels X PSD.
freq = the actual frequencies of the PSD.
"""
print "Computing PSD of each trial (%s) and for each channel (%s):" % (x.shape[0], x.shape[1])
if noverlap is None:
noverlap = NFFT - Fs * 0.100 # See van gerven and jensen 2009
size = NFFT / 2 + 1
f_idx = range(size)
if f_min!=0 and f_max!=-1: # compute actual frequency interval size
tmp, freq = mlab.psd(x[0, 0, t_start:t_end], NFFT=NFFT, Fs=Fs, noverlap=noverlap)
f_idx = np.where(((freq>=f_min) & (freq<f_max)))[0]
size = f_idx.size
shape = (x.shape[0], x.shape[1], size)
x_psd = np.zeros(shape)
for trial in range(x.shape[0]):
print "T",trial,
sys.stdout.flush()
for sensor in range(x.shape[1]):
tmp, freq = mlab.psd(x[trial, sensor, t_start:t_end], NFFT=NFFT, Fs=Fs, noverlap=noverlap)
x_psd[trial, sensor, :] = tmp.squeeze()[f_idx]
print
return x_psd, freq
示例2: compute_spectrum
def compute_spectrum(self):
#print "Signal length = " + str((self.signal_1).shape)
(Pxx_1, freq) = mlab.psd(self.signal_1, NFFT=self.NSAMPLES, Fs=self.RATE, detrend=mlab.detrend_mean,
window=mlab.window_hanning, noverlap=self.N_OVERLAP, sides='onesided')
(Pxx_2, freq) = mlab.psd(self.signal_2, NFFT=self.NSAMPLES, Fs=self.RATE, detrend=mlab.detrend_mean,
window=mlab.window_hanning, noverlap=self.N_OVERLAP, sides='onesided')
# Taking 10*log10() Convert to dB and compute total energy
#self.amp_sum_in = 0.0
#self.amp_dum_out = 0.0
Pxx_1 = np.array([10*math.log(p,10) for p in Pxx_1])
Pxx_2 = np.array([10*math.log(p,10) for p in Pxx_2])
#amp_sum_sub = abs(amp_sum_out - amp_sum_in)
#energy = amp_sum_sub #which energy source to use?
#self.energy = self.amp_sum_1
#print 'Pxx_out shape=' + str(Pxx_1.shape)
# Smooth in Frequency Domain (Moving Average in time domain)
temp = np.reshape(Pxx_2-Pxx_1, (self.NSAMPLES/2 + 1,))
sub_smoothed = cookb_signalsmooth.smooth(temp, window_len=61, window='flat'); #61 or 51
#compute the SNR
self.snr_list.append(self.SNR(sub_smoothed))
if self.PLOT == 1:
self.plot_graph(freq, Pxx_1, Pxx_2, sub_smoothed)
return sub_smoothed
示例3: test_psd
def test_psd(self):
for y, fstims in zip(self.y, self.fstimsall):
Pxx1, freqs1 = mlab.psd(y, NFFT=self.NFFT,
Fs=self.Fs,
noverlap=self.noverlap,
pad_to=self.pad_to,
sides='default')
np.testing.assert_array_equal(freqs1, self.freqss)
for fstim in fstims:
i = np.abs(freqs1 - fstim).argmin()
self.assertTrue(Pxx1[i] > Pxx1[i+1])
self.assertTrue(Pxx1[i] > Pxx1[i-1])
Pxx2, freqs2 = mlab.psd(y, NFFT=self.NFFT,
Fs=self.Fs,
noverlap=self.noverlap,
pad_to=self.pad_to,
sides='onesided')
np.testing.assert_array_equal(freqs2, self.freqss)
for fstim in fstims:
i = np.abs(freqs2 - fstim).argmin()
self.assertTrue(Pxx2[i] > Pxx2[i+1])
self.assertTrue(Pxx2[i] > Pxx2[i-1])
Pxx3, freqs3 = mlab.psd(y, NFFT=self.NFFT,
Fs=self.Fs,
noverlap=self.noverlap,
pad_to=self.pad_to,
sides='twosided')
np.testing.assert_array_equal(freqs3, self.freqsd)
for fstim in fstims:
i = np.abs(freqs3 - fstim).argmin()
self.assertTrue(Pxx3[i] > Pxx3[i+1])
self.assertTrue(Pxx3[i] > Pxx3[i-1])
示例4: tb_stimulus
def tb_stimulus():
# pulse the reset
yield reset.pulse(100)
for ii in xrange(2):
yield clock.posedge
# chirp 1 (time response pictoral)
print(" chirp 1 ...")
samp_in = signal.chirp(np.arange(args.Nsamps/2)*1/args.Fs,
8, .64, 480,
method=u'logarithmic')*.94
samp_in = np.concatenate(
(samp_in,
np.array([ss for ss in reversed(samp_in[:-1])] )))
samp_out = []
fsamp_out=[]
# input samples, save the output
for ii in xrange(args.Nsamps-1):
sig_in.next = int(np.floor(samp_in[ii]*(sMax)))
yield clock.posedge
samp_out.append(sig_out//float(sMax))
fsamp_out.append(int(np.floor(samp_in[ii]*(sMax))))
samp_out = np.array(samp_out)
#fsamp_out = np.array(fsamp_out)
#fsamp_out.save('fsamp_out.dat')
fh=open('fsamp_out.dat','w')
cPickle.dump(fsamp_out,fh)
c = signal.lfilter(coef, 1, samp_in)
sdiff = np.abs(c[:-2] - samp_out[2:])
plt.figure(3); plt.plot(sdiff)
#print(np.max(sdiff), np.mean(sdiff**2))
#assert np.max(sdiff) < 1e-3, "error too large"
assert np.max(sdiff) > 1e-3, "error too large"
ia = np.concatenate((np.ones(args.Nflt/2)*.98, samp_in))
fig,ax = plt.subplots(1)
ax.plot(ia, 'b'); ax.plot(samp_out[1:], 'r'); ax.plot(c, 'y--')
fig.savefig('__plot2.png')
# chirp 2 (frequency response, more points)
print(" chrip 2 ...")
Nfft = 8*args.Nsamps
samp_in = signal.chirp(np.arange(Nfft)*1/args.Fs,
0.1, 1, 500)*.98
samp_out = []
for ii in xrange(Nfft):
sig_in.next = int(np.floor(samp_in[ii]*(sMax)))
yield clock.posedge
samp_out.append(sig_out//float(sMax))
samp_out = np.array(samp_out)
Pi,fi = mlab.psd(samp_in)
Po,fo = mlab.psd(samp_out)
ax1.plot(pi*fi, 10*log10(abs(Po/Pi)), 'r')
ax1.grid(True)
fig1.savefig('__plot1.png')
raise StopSimulation
示例5: auto_auto_cross
def auto_auto_cross(a, b, sample_rate, NFFT=None, detrend=mlab.detrend_none, window=mlab.window_none, noverlap=None,
binned=True, bins_per_decade=30, **kwds):
"""
Return estimates of the auto-spectral density of both real time series a and b, of their cross-spectral density, and
the frequencies corresponding to these estimates.
Parameters
----------
a : ndarray(real)
A real time series.
b : ndarray(real)
A real time series.
sample_rate : float
The sample rate of both time series.
NFFT : int
The number of samples to use for each FFT chunk; should be a power of two for speed; if None, a reasonable
default is used.
window : callable
A function that takes a complex time series as argument and returns a windowed time series.
noverlap : int
The number of samples to overlap in each chunk; if None, a value equal to half the NFFT value is used.
detrend : callable
A function that takes a complex time series as argument and returns a detrended time series.
binned : bool
If True, the result is binned using bin sizes that increase with frequency, and the bins at zero frequency and
the Nyquist frequency are dropped.
bins_per_decade : int
The number of bins per decade; used only if binned is True.
kwds : dict
Additional keywords to pass to mlab.psd and mlab.csd.
Returns
-------
f : ndarray(float)
The frequencies corresponding to the data.
S_aa : ndarray(float)
The spectral density of a.
S_bb : ndarray(float)
The spectral density of b.
S_ab : ndarray(complex)
The cross-spectral density of a and b.
"""
if NFFT is None:
NFFT = int(2 ** (np.floor(np.log2(a.size)) - 3))
if noverlap is None:
noverlap = NFFT // 2
S_aa, f = mlab.psd(a, Fs=sample_rate, NFFT=NFFT, detrend=detrend, window=window, noverlap=noverlap, **kwds)
S_bb, f = mlab.psd(b, Fs=sample_rate, NFFT=NFFT, detrend=detrend, window=window, noverlap=noverlap, **kwds)
S_ab, f = mlab.csd(a, b, Fs=sample_rate, NFFT=NFFT, window=window, detrend=detrend, noverlap=noverlap, **kwds)
if binned:
f = f[1:-1]
S_aa = S_aa[1:-1]
S_bb = S_bb[1:-1]
S_ab = S_ab[1:-1]
edges, counts, f, (S_aa, S_bb, S_ab) = binning.log_bin(f, bins_per_decade, S_aa, S_bb, S_ab)
return AutoAutoCross(f, S_aa, S_bb, S_ab)
示例6: plot_example_psds
def plot_example_psds(example,rate):
"""
This function creates a figure with 4 lines to show the overall psd for
the four sleep examples. (Recall row 0 is REM, rows 1-3 are NREM stages 1,
2 and 3/4)
"""
plt.figure()
##YOUR CODE HERE
for i in range(0, len(example)):
Pxx, freqs = m.psd(example[i], NFFT=512, Fs=rate)
normalizedPxx = Pxx/sum(Pxx)
plt.plot(freqs, normalizedPxx, label=rowname[i])
plt.xlabel('Frequency (Hz)')
plt.ylabel('Normalized Power Spectral Density')
## Inserted into plotting loop
## Possible Classifier - calculate average power-weighted frequency
sumPower = 0
for j in range(0, len(normalizedPxx)):
sumPower = sumPower + (normalizedPxx[j] * freqs[j])
avgFreq = sumPower/len(freqs)
print(rowname[i] + " average frequency = " + str(avgFreq))
##
##
plt.xlim(0,20)
plt.legend(loc=0)
## Better Classifier - Cummulative Power Distribution
## find where cummulative power exceeds threshold %
threshold = 0.95
print(' ')
print('Threshold set to ' + str(threshold*100)+'%')
# iterate over each example data set
for i in range(0, len(example)):
Pxx, freqs = m.psd(example[i], NFFT=512, Fs=rate)
# normalize PSD
normalizedPxx = Pxx/sum(Pxx)
# determine cummulative power distribution
sumPxx = 0
j = 0
while sumPxx <= threshold:
sumPxx = sumPxx + normalizedPxx[j]
j = j + 1
# stop when cummulative power exceeds threshold %
print(rowname[i]+' - Threshold Met at Frequency = '+str(freqs[j])+' Hz')
return
示例7: plot_psds
def plot_psds(data, rate, subject, condition, label_set, title):
"""
Plots the frequency response for all 9 channels
using the entire recording
"""
fig = plt.figure()
# common title
fig.suptitle('Frequency Response ('+title
+')'+subject+' '+condition,
fontsize=14, fontweight='bold')
# common ylabel
fig.text(0.06, 0.5, 'Normalized Power Spectral Density',
ha='center', va='center', rotation='vertical',
fontsize=14, fontweight='bold')
# common xlabel
fig.text(0.5, 0.05,'Frequency (Hz)',
ha='center', va='center',fontsize=14, fontweight='bold')
# use this to stack EEG, EOG, EMG on top of each other
sub_order = [1,4,7,10,2,5,3,6,9]
# determine max response to scale y-axis
maxY = 0
for i in range(0, len(data)):
Pxx, freqs = m.psd(data[i], NFFT=512, Fs=rate)
normalizedPxx = Pxx/sum(Pxx)
if normalizedPxx.max() > maxY:
maxY = normalizedPxx.max()
# plot all subplots
for i in range(0, len(data)):
plt.subplot(4, 3, sub_order[i]) # 4 x 3 layout
#plt.subplot(9, 1, i + 1) # vertical 9 x 1 layout
plt.subplots_adjust(hspace=.6) # adds space between subplots
Pxx, freqs = m.psd(data[i], NFFT=512, Fs=rate)
normalizedPxx = Pxx/sum(Pxx)
#plt.plot(freqs, normalizedPxx, label=label_set[ch])
plt.bar(freqs, normalizedPxx, label=label_set[i],width=0.2)
plt.axis([0,70,0,maxY])
plt.title(label_set[i])
#plt.xlabel('Frequency (Hz)')
#plt.ylabel('Normalized Power Spectral Density')
## Inserted into plotting loop
## Possible Classifier - calculate average power-weighted frequency
# sumPower = 0
# for j in range(0, len(normalizedPxx)):
# sumPower = sumPower + (normalizedPxx[j] * freqs[j])
#
# avgFreq = sumPower/len(freqs)
# print(channel_name[i] + " average frequency = " + str(avgFreq))
return
示例8: test_full_spectral_helper
def test_full_spectral_helper():
x = np.random.randn(2 ** 20)
y = np.random.randn(2 ** 20)
mlabPxx, fr = mlab.psd(x, NFFT=2 ** 16, Fs=512e6 / 2 ** 14)
mlabPyy, fr = mlab.psd(y, NFFT=2 ** 16, Fs=512e6 / 2 ** 14)
mlabPxy, fr = mlab.csd(x, y, NFFT=2 ** 16, Fs=512e6 / 2 ** 14)
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
fullPxx, fullPyy, fullPxy, freqs, t = full_spectral_helper(x, y, NFFT=2 ** 16, Fs=512e6 / 2 ** 14)
assert (np.allclose(mlabPxx, fullPxx.mean(1)))
assert (np.allclose(mlabPyy, fullPyy.mean(1)))
assert (np.allclose(mlabPxy, fullPxy.mean(1)))
示例9: work
def work(self, data):
nfft = self.nfft
length = self.length
slice_length = self.slice_length
samp_rate = self.samp_rate
offset = self.offset
index = data[0]
center_freq = data[1]
samples = data[2]
if len(samples)>2*length:
samples = samples[:2*length]
trash = length - slice_length
#print len(samples)
samples = samples - offset
power, freqs = psd(samples, NFFT=nfft, pad_to=length, noverlap=self.nfft/2, Fs=samp_rate/1e6, detrend=mlab.detrend_none, window=mlab.window_hanning, sides = 'twosided')
power = np.reshape(power, len(power))
freqs = freqs + center_freq/1e6
power = power[trash/2:-trash/2]
freqs = freqs[trash/2:-trash/2]
power = 10*np.log10(power)
power = power - self.correction
out = [index, power, freqs]
return out
示例10: plot_example_psds
def plot_example_psds(example,rate):
"""
This function creates a figure with 4 lines to show the overall psd for
the four sleep examples. (Recall row 0 is REM, rows 1-3 are NREM stages 1,
2 and 3/4)
"""
sleep_stages = ['REM sleep', 'Stage 1 NREM sleep', 'Stage 2 NREM sleep', 'Stage 3 and 4 NREM sleep'];
plt.figure()
##YOUR CODE HERE
for i in range( len( example[:,0]) ):
# Apply power spectral density using a Fast Fourier Transform
# to generate blocks of data
psd, frequency = m.psd(example[i, :], NFFT = 512, Fs = rate )
# normalize frequency
psd = psd / np.sum(psd)
# plot sleep stages
plt.plot(frequency, psd, label = sleep_stages[i])
# add legend
plt.ylabel('Normalized Power Spectral Density')
plt.xlabel('Frequency (Hz)')
plt.xlim(0,20)
plt.legend(loc=0)
plt.title('Overall ower Spectral Density for Sleep Stages')
return
示例11: fitgrid
def fitgrid(z, channel='b'):
Fs = z.fs
tp = z.__dict__[channel]
# 512 is balance between freq resolution and averaging, good for 50 Hz
(p, f) = psd(tp, NFFT=512, pad_to=4096, Fs=Fs) # unit variance -> PSD = 1.
p = p / z.fillfrac # account for zeros in stiched timeseries
N = len(z.t) # original sequence length
pad = 2**int(np.ceil(np.log2(N))) # pad length for efficient FFTs
fac = np.zeros(pad)
mpad = np.zeros(pad)
bpad = np.zeros(pad)
bpad[:N] = tp
B = np.fft.rfft(bpad).conj() # N factor goes into fft, ifft = 1/N * ..
fm = np.abs(np.fft.fftfreq(pad, d=1./Fs)[:1+pad/2])
fac = 1. / interp1d(f, p)(fm) # 1/PSD for matched filter (double whiten)
fac[fm < 0.1] = 0. # turn off low freqs below 0.1 Hz - just a guess
def makesnr(*args):
(xtest, ytest, s1test, s2test, thtest) = args
mpad[:N] = ezmodel(z.x, z.y, xtest, ytest, s1test, s2test, thtest) # model signal
# mpad[:N] = model(z.x, z.y, xtest, ytest, fwhm=rad2asec(s1test)*2.355)
M = np.fft.rfft(mpad)
norm = np.sqrt(np.sum(np.abs(M)**2 * fac))
snr = np.sum((M * B * fac).real) / norm
return snr
(xx, yy, ss1, ss2, tt) = np.mgrid[-2:2, 12:16, 10:30, 10:20, 20:90:15]
snrs = []
pars = zip(xx.ravel(), yy.ravel(), ss1.ravel(), ss2.ravel(), tt.ravel())
for (x, y, s1, s2, th) in pars:
snrs.append(makesnr(asec2rad(x)/2, asec2rad(y)/2, asec2rad(s1/2.355), asec2rad(s2/2.355), th*np.pi/180.))
snrs = np.array(snrs)
ss = snrs.reshape(xx.shape)
return ss
示例12: spectrum
def spectrum(data, sampling, NFFT=256, overlap=0.5,\
window='hanning', detrender=mlab.detrend_linear,\
sides='onesided', scale='PSD'):
numpoints = len(data)
numoverlap = int(sampling * (1.0 - overlap))
if isinstance(window,str):
window=window.lower()
win = signal.get_window(window, NFFT)
# calculate PSD with given parameters
spec,freq = mlab.psd(data, NFFT=NFFT, Fs=sampling, noverlap=numoverlap,\
window=win, sides=sides, detrend=detrender)
# rescale data to meet user's request
scale = scale.lower()
if scale == 'asd':
spec = numpy.sqrt(spec) * numpy.sqrt(2 / (sampling*sum(win**2)))
elif scale == 'psd':
spec *= 2/(sampling*sum(win**2))
elif scale == 'as':
spec = nump.sqrt(spec) * numpy.sqrt(2) / sum(win)
elif scale == 'ps':
spec = spec * 2 / (sum(win)**2)
return freq, spec.flatten()
示例13: spectral_analysis
def spectral_analysis(filtered_signal, sample_rate, signal_name="unknown"):
"""Runs a quick spectral analysis of a filtered signal, returns a bunch of
information about it."""
x = int( (log(sample_rate) - log(0.01)) / log(2) )
NFFT = int(2**x)
nooverlap = NFFT / 2
pxx, freqs = psd( filtered_signal,
NFFT=NFFT, Fs=sample_rate, noverlap=nooverlap,
detrend=detrend_linear, window=window_hanning
)
i = pxx.argmax() # index of the maximum power frequency
max_frequency = freqs[i]
max_power = pxx[i]
period = 1.0 / max_frequency
if max_frequency == 0:
logger.warning("%s power peak at zero. Threshold at %f Hz" % (signal_name, freqs[4]))
i = pxx[5:].argmax()
max_frequency = freqs[i+5]
max_power = pxx[i+5]
period = 1.0 / max_frequency
print "Butterworth Filter: %s signal" % signal_name
report_value("Spectral peak (Hz)", max_frequency)
report_value("Peak power density", max_power)
report_value("Spectral period (s)", period)
return(pxx, freqs, max_frequency, max_power, period)
示例14: compute_rayleigh
def compute_rayleigh(time, stride=0.1, num_strides=10):
duration = stride*num_strides
data = get_cache(STRAIN_FRAMETYPE)\
.fetch(STRAIN_CHANNEL, time, time+duration)
rate = 1.0/data.metadata.dt
chunk_size = int(len(data)/num_strides)
shared_freq = None
powers = np.empty((num_strides, chunk_size//2 + 1), np.float64)
for i in range(num_strides):
subset = data[i*chunk_size:(i+1)*chunk_size]
power, freq = mlab.psd(subset, Fs=rate, NFFT=int(rate*stride))
if shared_freq is None:
shared_freq = freq
else:
assert (shared_freq == freq).all()
powers[i, :] = power.T
rs = powers.std(axis=0)/powers.mean(axis=0)
return Frame(time=time,
stride=stride,
num_strides=num_strides,
frequencies=shared_freq,
rs=rs)
示例15: __setup_bins
def __setup_bins(self):
"""
Makes an initial dummy psd and thus sets up the bins and all the rest.
Should be able to do it without a dummy psd..
"""
dummy = np.ones(self.len)
_spec, freq = mlab.psd(dummy, self.nfft, self.sampling_rate,
noverlap=self.nlap)
# leave out first entry (offset)
freq = freq[1:]
per = 1.0 / freq[::-1]
self.freq = freq
self.per = per
# calculate left/rigth edge of first period bin,
# width of bin is one octave
per_left = per[0] / 2
per_right = 2 * per_left
# calculate center period of first period bin
per_center = math.sqrt(per_left * per_right)
# calculate mean of all spectral values in the first bin
per_octaves_left = [per_left]
per_octaves_right = [per_right]
per_octaves = [per_center]
# we move through the period range at 1/8 octave steps
factor_eighth_octave = 2 ** 0.125
# do this for the whole period range and append the values to our lists
while per_right < per[-1]:
per_left *= factor_eighth_octave
per_right = 2 * per_left
per_center = math.sqrt(per_left * per_right)
per_octaves_left.append(per_left)
per_octaves_right.append(per_right)
per_octaves.append(per_center)
self.per_octaves_left = np.array(per_octaves_left)
self.per_octaves_right = np.array(per_octaves_right)
self.per_octaves = np.array(per_octaves)
self.period_bins = per_octaves
# mid-points of all the period bins
self.period_bin_centers = np.mean((self.period_bins[:-1],
self.period_bins[1:]), axis=0)
self.store_freqi=[]
self.store_freqf=[]
for f in self.store_freqs:
ind = np.abs(np.array(freq) - f).argmin()
self.store_freqi.append(ind)
self.store_freqf.append(freq[ind])
# write the array of frequencies
if self.write_psd:
f = open('o-PSDtxt/freq.txt', 'w')
# for item in self.freq:
# f.write("%7.5e\n" % item)
freq_octaves = 1.0 / np.array(self.period_bins[::-1])
for i in range(len(self.period_bins)):
f.write("%7.5e\n" % freq_octaves[i])
f.close