本文整理汇总了Python中scipy.signal.find_peaks方法的典型用法代码示例。如果您正苦于以下问题:Python signal.find_peaks方法的具体用法?Python signal.find_peaks怎么用?Python signal.find_peaks使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy.signal
的用法示例。
在下文中一共展示了signal.find_peaks方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: detect_impulses
# 需要导入模块: from scipy import signal [as 别名]
# 或者: from scipy.signal import find_peaks [as 别名]
def detect_impulses(x, fs, k=10, tdist=1e-3):
"""Detect impulses in `x`
The minimum height of impulses is defined by `a+k*b`
where `a` is median of the envelope of `x` and `b` is median
absolute deviation (MAD) of the envelope of `x`.
:param x: real signal
:param fs: sampling frequency in Hz
:param k: multiple of MAD for the impulse minimum height (default: 10)
:param tdist: minimum time difference between neighbouring impulses in sec (default: 1e-3)
:returns: indices and heights of detected impulses
>>> nsamp = 1000
>>> ind_impulses = np.array([10, 115, 641, 888])
>>> x = np.zeros((nsamp))
>>> x[ind_impulses] = 1
>>> x += np.random.normal(0, 0.1, nsamp)
>>> ind_pks, h_pks = signal.detect_impulses(x, fs=100000, k=10, tdist=1e-3)
"""
env = envelope(x)
height = _np.median(env)+k*_np.median(_np.abs(env-_np.median(env)))
distance = int(tdist*fs)
ind_imp, properties = _sig.find_peaks(env, height=height, distance=distance)
return ind_imp, properties["peak_heights"]
示例2: FeatureSpectralTonalPowerRatio
# 需要导入模块: from scipy import signal [as 别名]
# 或者: from scipy.signal import find_peaks [as 别名]
def FeatureSpectralTonalPowerRatio(X, f_s, G_T=5e-4):
X = X**2
fSum = X.sum(axis=0)
vtpr = np.zeros(fSum.shape)
for n in range(0, X.shape[1]):
if fSum[n] < G_T:
continue
# find local maxima above the threshold
afPeaks = find_peaks(X[:, n], height=G_T)
if not afPeaks[0].size:
continue
# calculate ratio
vtpr[n] = X[afPeaks[0], n].sum() / fSum[n]
return (vtpr)
示例3: _calculate_results
# 需要导入模块: from scipy import signal [as 别名]
# 或者: from scipy.signal import find_peaks [as 别名]
def _calculate_results(self, s, interval, df, loader):
if not df.empty:
hist = pd.cut(df[Names.HEART_RATE], np.arange(30, 90), right=False).value_counts(sort=False)
peaks, _ = find_peaks(hist)
for peak in peaks:
rest_hr = hist.index[peak].left
measurements = hist.loc[rest_hr]
if measurements > len(df) * 0.01:
log.debug(f'Rest HR is {rest_hr} with {measurements} values')
# conversion to int as value above is numpy int64
loader.add(Titles.REST_HR, Units.BPM, S.join(S.MIN, S.MSR), interval,
int(rest_hr), local_date_to_time(interval.start), StatisticJournalInteger,
'The rest heart rate')
return
else:
log.debug(f'Skipping rest HR at {rest_hr} because too few measurements ({measurements}/{len(df)})')
log.warning(f'Unable to calculate rest HR at {format_date(interval.start)}')
示例4: preprocess
# 需要导入模块: from scipy import signal [as 别名]
# 或者: from scipy.signal import find_peaks [as 别名]
def preprocess(data, config):
sr = config.sample_rate
if sr == None:
sr = 300
data = np.nan_to_num(data) # removing NaNs and Infs
from scipy.signal import resample
data = resample(data, int(len(data) * 360 / sr) ) # resample to match the data sampling rate 360(mit), 300(cinc)
from sklearn import preprocessing
data = preprocessing.scale(data)
from scipy.signal import find_peaks
peaks, _ = find_peaks(data, distance=150)
data = data.reshape(1,len(data))
data = np.expand_dims(data, axis=2) # required by Keras
return data, peaks
# predict
示例5: PitchSpectralAcf
# 需要导入模块: from scipy import signal [as 别名]
# 或者: from scipy.signal import find_peaks [as 别名]
def PitchSpectralAcf(X, f_s):
# initialize
f_min = 300
f = np.zeros(X.shape[1])
# use spectral symmetry for robustness
X[0, :] = np.max(X)
X = np.concatenate((np.flipud(X), X), axis=0)
# compute the ACF
for n in range(0, X.shape[1]):
if X[:, n].sum() < 1e-20:
continue
eta_min = int(round(f_min / f_s * (X.shape[0] - 2))) - 1
afCorr = np.correlate(X[:, n], X[:, n], "full") / np.dot(X[:, n], X[:, n])
afCorr = afCorr[np.arange(X.shape[0], afCorr.size)]
# find the highest local maximum
iPeaks = find_peaks(afCorr, height=0)
if iPeaks[0].size:
eta_min = np.max([eta_min, iPeaks[0][0] - 1])
f[n] = np.argmax(afCorr[np.arange(eta_min, afCorr.size)]) + 1
# find max index and convert to Hz (note: X has double length)
f[n] = (f[n] + eta_min) / (X.shape[0] - 2) * f_s
return (f)
示例6: find_peaks_simple
# 需要导入模块: from scipy import signal [as 别名]
# 或者: from scipy.signal import find_peaks [as 别名]
def find_peaks_simple(x):
if x.ndim == 1:
# Delegate to scipy's peak finder.
return find_peaks(x)[0],
else:
# Use maximum filter for peak finding.
y = maximum_filter(x, 3)
return np.where(x == y)
示例7: get_picks_from_pdf
# 需要导入模块: from scipy import signal [as 别名]
# 或者: from scipy.signal import find_peaks [as 别名]
def get_picks_from_pdf(feature, phase, pick_set, height=0.5, distance=100):
i = feature.phase.index(phase)
peaks, properties = find_peaks(feature.pdf[-1, :, i], height=height, distance=distance)
for p in peaks:
if p:
pick_time = UTCDateTime(feature.starttime) + p * feature.delta
feature.pick_time.append(pick_time.isoformat())
feature.pick_phase.append(feature.phase[i])
feature.pick_set.append(pick_set)
示例8: infer_pitch_v2
# 需要导入模块: from scipy import signal [as 别名]
# 或者: from scipy.signal import find_peaks [as 别名]
def infer_pitch_v2(pitch, shortest=5, offset_interval=6):
w_on = pitch[:,2]
w_dura = pitch[:,1]
peaks, properties = find_peaks(w_on, distance=shortest, width=5)
if len(peaks) == 0:
return []
notes = []
adjust = 5 if shortest==10 else 2
for i in range(len(peaks)-1):
notes.append({"start": peaks[i]-adjust, "end": peaks[i+1]-adjust, "stren": pitch[peaks[i], 2]})
notes.append({"start": peaks[-1]-adjust, "end": len(w_on), "stren": pitch[peaks[-1], 2]})
del_idx = []
for idx, p in enumerate(peaks):
upper = int(peaks[idx+1]) if idx<len(peaks)-2 else len(w_dura)
for i in range(p, upper):
if np.sum(w_dura[i:i+offset_interval]) == 0:
if i - notes[idx]["start"] < shortest:
del_idx.append(idx)
else:
notes[idx]["end"] = i
break
for ii, i in enumerate(del_idx):
del notes[i-ii]
return notes
示例9: add_noise
# 需要导入模块: from scipy import signal [as 别名]
# 或者: from scipy.signal import find_peaks [as 别名]
def add_noise(config):
noises = dict()
noises["trainset"] = list()
noises["testset"] = list()
import csv
try:
testlabel = list(csv.reader(open('training2017/REFERENCE.csv')))
except:
cmd = "curl -O https://archive.physionet.org/challenge/2017/training2017.zip"
os.system(cmd)
os.system("unzip training2017.zip")
testlabel = list(csv.reader(open('training2017/REFERENCE.csv')))
for i, label in enumerate(testlabel):
if label[1] == '~':
filename = 'training2017/'+ label[0] + '.mat'
from scipy.io import loadmat
noise = loadmat(filename)
noise = noise['val']
_, size = noise.shape
noise = noise.reshape(size,)
noise = np.nan_to_num(noise) # removing NaNs and Infs
from scipy.signal import resample
noise= resample(noise, int(len(noise) * 360 / 300) ) # resample to match the data sampling rate 360(mit), 300(cinc)
from sklearn import preprocessing
noise = preprocessing.scale(noise)
noise = noise/1000*6 # rough normalize, to be improved
from scipy.signal import find_peaks
peaks, _ = find_peaks(noise, distance=150)
choices = 10 # 256*10 from 9000
picked_peaks = np.random.choice(peaks, choices, replace=False)
for j, peak in enumerate(picked_peaks):
if peak > config.input_size//2 and peak < len(noise) - config.input_size//2:
start,end = peak-config.input_size//2, peak+config.input_size//2
if i > len(testlabel)/6:
noises["trainset"].append(noise[start:end].tolist())
else:
noises["testset"].append(noise[start:end].tolist())
return noises
示例10: test_k2_peaks_finite
# 需要导入模块: from scipy import signal [as 别名]
# 或者: from scipy.signal import find_peaks [as 别名]
def test_k2_peaks_finite(self):
"""Tests the correct peak locations and intensities are found for the
k=2 term in finite systems.
"""
desc = LMBTR(
species=["H", "O"],
k2={
"geometry": {"function": "distance"},
"grid": {"min": -1, "max": 3, "sigma": 0.5, "n": 1000},
"weighting": {"function": "unity"},
},
normalize_gaussians=False,
periodic=False,
flatten=True,
sparse=False
)
features = desc.create(H2O, [0])[0, :]
pos = H2O.get_positions()
x = desc.get_k2_axis()
# Check the X-H peaks
xh_feat = features[desc.get_location(("X", "H"))]
xh_peak_indices = find_peaks(xh_feat, prominence=0.5)[0]
xh_peak_locs = x[xh_peak_indices]
xh_peak_ints = xh_feat[xh_peak_indices]
self.assertTrue(np.allclose(xh_peak_locs, [np.linalg.norm(pos[0] - pos[2])], rtol=0, atol=1e-2))
self.assertTrue(np.allclose(xh_peak_ints, [1], rtol=0, atol=1e-2))
# Check the X-O peaks
xo_feat = features[desc.get_location(("X", "O"))]
xo_peak_indices = find_peaks(xo_feat, prominence=0.5)[0]
xo_peak_locs = x[xo_peak_indices]
xo_peak_ints = xo_feat[xo_peak_indices]
self.assertTrue(np.allclose(xo_peak_locs, np.linalg.norm(pos[0] - pos[1]), rtol=0, atol=1e-2))
self.assertTrue(np.allclose(xo_peak_ints, [1], rtol=0, atol=1e-2))
# Check that everything else is zero
features[desc.get_location(("X", "H"))] = 0
features[desc.get_location(("X", "O"))] = 0
self.assertEqual(features.sum(), 0)
示例11: test_k1_peaks_finite
# 需要导入模块: from scipy import signal [as 别名]
# 或者: from scipy.signal import find_peaks [as 别名]
def test_k1_peaks_finite(self):
"""Tests the correct peak locations and intensities are found for the
k=1 term.
"""
desc = MBTR(
species=[1, 8],
k1={
"geometry": {"function": "atomic_number"},
"grid": {"min": 0, "max": 9, "sigma": 0.5, "n": 1000}
},
normalize_gaussians=False,
periodic=False,
flatten=True,
sparse=False
)
features = desc.create(H2O)[0, :]
x = desc.get_k1_axis()
# Check the H peaks
h_feat = features[desc.get_location(("H"))]
h_peak_indices = find_peaks(h_feat, prominence=1)[0]
h_peak_locs = x[h_peak_indices]
h_peak_ints = h_feat[h_peak_indices]
self.assertTrue(np.allclose(h_peak_locs, [1], rtol=0, atol=1e-2))
self.assertTrue(np.allclose(h_peak_ints, [2], rtol=0, atol=1e-2))
# Check the O peaks
o_feat = features[desc.get_location(("O"))]
o_peak_indices = find_peaks(o_feat, prominence=1)[0]
o_peak_locs = x[o_peak_indices]
o_peak_ints = o_feat[o_peak_indices]
self.assertTrue(np.allclose(o_peak_locs, [8], rtol=0, atol=1e-2))
self.assertTrue(np.allclose(o_peak_ints, [1], rtol=0, atol=1e-2))
# Check that everything else is zero
features[desc.get_location(("H"))] = 0
features[desc.get_location(("O"))] = 0
self.assertEqual(features.sum(), 0)
示例12: test_k2_peaks_finite
# 需要导入模块: from scipy import signal [as 别名]
# 或者: from scipy.signal import find_peaks [as 别名]
def test_k2_peaks_finite(self):
"""Tests the correct peak locations and intensities are found for the
k=2 term in finite systems.
"""
desc = MBTR(
species=[1, 8],
k2={
"geometry": {"function": "distance"},
"grid": {"min": -1, "max": 3, "sigma": 0.5, "n": 1000},
"weighting": {"function": "unity"},
},
normalize_gaussians=False,
periodic=False,
flatten=True,
sparse=False
)
features = desc.create(H2O)[0, :]
pos = H2O.get_positions()
x = desc.get_k2_axis()
# Check the H-H peaks
hh_feat = features[desc.get_location(("H", "H"))]
hh_peak_indices = find_peaks(hh_feat, prominence=0.5)[0]
hh_peak_locs = x[hh_peak_indices]
hh_peak_ints = hh_feat[hh_peak_indices]
self.assertTrue(np.allclose(hh_peak_locs, [np.linalg.norm(pos[0] - pos[2])], rtol=0, atol=1e-2))
self.assertTrue(np.allclose(hh_peak_ints, [1], rtol=0, atol=1e-2))
# Check the O-H peaks
ho_feat = features[desc.get_location(("H", "O"))]
ho_peak_indices = find_peaks(ho_feat, prominence=0.5)[0]
ho_peak_locs = x[ho_peak_indices]
ho_peak_ints = ho_feat[ho_peak_indices]
self.assertTrue(np.allclose(ho_peak_locs, np.linalg.norm(pos[0] - pos[1]), rtol=0, atol=1e-2))
self.assertTrue(np.allclose(ho_peak_ints, [2], rtol=0, atol=1e-2))
# Check that everything else is zero
features[desc.get_location(("H", "H"))] = 0
features[desc.get_location(("H", "O"))] = 0
self.assertEqual(features.sum(), 0)
示例13: panPeakDetect
# 需要导入模块: from scipy import signal [as 别名]
# 或者: from scipy.signal import find_peaks [as 别名]
def panPeakDetect(detection, fs):
min_distance = int(0.25*fs)
peaks, _ = signal.find_peaks(detection, distance=min_distance)
signal_peaks = []
noise_peaks = []
SPKI = 0.0
NPKI = 0.0
threshold_I1 = 0.0
threshold_I2 = 0.0
RR_missed = 0
index = 0
indexes = []
missed_peaks = []
for peak in peaks:
if detection[peak] > threshold_I1:
signal_peaks.append(peak)
indexes.append(index)
SPKI = 0.125*detection[signal_peaks[-1]] + 0.875*SPKI
if RR_missed!=0:
if signal_peaks[-1]-signal_peaks[-2]>RR_missed:
missed_section_peaks = peaks[indexes[-2]+1:indexes[-1]]
missed_section_peaks2 = []
for missed_peak in missed_section_peaks:
if missed_peak-signal_peaks[-2]>min_distance and signal_peaks[-1]-missed_peak>min_distance and detection[missed_peak]>threshold_I2:
missed_section_peaks2.append(missed_peak)
if len(missed_section_peaks2)>0:
missed_peak = missed_section_peaks2[np.argmax(detection[missed_section_peaks2])]
missed_peaks.append(missed_peak)
signal_peaks.append(signal_peaks[-1])
signal_peaks[-2] = missed_peak
else:
noise_peaks.append(peak)
NPKI = 0.125*detection[noise_peaks[-1]] + 0.875*NPKI
threshold_I1 = NPKI + 0.25*(SPKI-NPKI)
threshold_I2 = 0.5*threshold_I1
if len(signal_peaks)>8:
RR = np.diff(signal_peaks[-9:])
RR_ave = int(np.mean(RR))
RR_missed = int(1.66*RR_ave)
index = index+1
signal_peaks.pop(0)
return signal_peaks
示例14: PitchTimeAuditory
# 需要导入模块: from scipy import signal [as 别名]
# 或者: from scipy.signal import find_peaks [as 别名]
def PitchTimeAuditory(x, iBlockLength, iHopLength, f_s):
# initialize
iNumOfBlocks = math.ceil(x.size / iHopLength)
f = np.zeros(iNumOfBlocks)
f_max = 2000
iNumBands = 20
fLengthLpInS = 0.001
iLengthLp = math.ceil(fLengthLpInS * f_s)
# compute time stamps
t = (np.arange(0, iNumOfBlocks) * iHopLength + (iBlockLength / 2)) / f_s
# apply filterbank
X = ToolGammatoneFb(x, f_s, iNumBands)
# half wave rectification
X[X < 0] = 0
# smooth the results with a moving average filter
b = np.ones(iLengthLp) / iLengthLp
X = filtfilt(b, 1, X)
for n in range(0, iNumOfBlocks):
eta_min = int(round(f_s / f_max))
afSumCorr = np.zeros(iBlockLength - 1)
x_tmp = np.zeros(iBlockLength)
i_start = n * iHopLength
i_stop = np.min([x.size - 1, i_start + iBlockLength - 1])
# compute ACF per band and summarize
for k in range(0, iNumBands):
# get current block
if X[k, np.arange(i_start, i_stop + 1)].sum() < 1e-20:
continue
else:
x_tmp[np.arange(0, i_stop - i_start + 1)] = X[k, np.arange(i_start, i_stop + 1)]
afCorr = np.correlate(x_tmp, x_tmp, "full") / np.dot(x_tmp, x_tmp)
# aggregate bands with simple sum before peak picking
afSumCorr += afCorr[np.arange(iBlockLength, afCorr.size)]
if afSumCorr.sum() < 1e-20:
continue
# find the highest local maximum
iPeaks = find_peaks(afSumCorr, height=0)
if iPeaks[0].size:
eta_min = np.max([eta_min, iPeaks[0][0] - 1])
f[n] = np.argmax(afSumCorr[np.arange(eta_min + 1, afSumCorr.size)]) + 1
# convert to Hz
f[n] = f_s / (f[n] + eta_min + 1)
return (f, t)
示例15: computeNoveltyFunction
# 需要导入模块: from scipy import signal [as 别名]
# 或者: from scipy.signal import find_peaks [as 别名]
def computeNoveltyFunction(cNoveltyName, afAudioData, f_s, afWindow=None, iBlockLength=4096, iHopLength=512):
# compute window function for FFT
if afWindow is None:
afWindow = ToolComputeHann(iBlockLength)
assert(afWindow.shape[0] == iBlockLength), "parameter error: invalid window dimension"
#mypackage = __import__(".Novelty" + cNoveltyName, package="pyACA")
hNoveltyFunc = getattr(pyACA, "Novelty" + cNoveltyName)
# initialization
fLengthLpInS = 0.3
iLengthLp = np.max([2, math.ceil(fLengthLpInS * f_s / iHopLength)])
# pre-processing
afAudioData = ToolPreprocAudio(afAudioData, iBlockLength)
# in the real world, we would do this block by block...
[f, t, X] = spectrogram(afAudioData,
f_s,
afWindow,
iBlockLength,
iBlockLength - iHopLength,
iBlockLength,
False,
True,
'spectrum')
# scale the same as for matlab
X = np.sqrt(X / 2)
# novelty function
d = hNoveltyFunc(X, f_s)
# smooth novelty function
b = np.ones(10) / 10
d = filtfilt(b, 1, d)
d[d < 0] = 0
# compute threshold
b = np.ones(iLengthLp) / iLengthLp
G_T = .5 * np.mean(d[np.arange(1, d.shape[0])]) + filtfilt(b, 1, d)
# find local maxima above the threshold
iPeaks = find_peaks(d - G_T, height=0)
return (d, t, iPeaks[0])