本文整理汇总了Python中obspy.core.Stream.filter方法的典型用法代码示例。如果您正苦于以下问题:Python Stream.filter方法的具体用法?Python Stream.filter怎么用?Python Stream.filter使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类obspy.core.Stream
的用法示例。
在下文中一共展示了Stream.filter方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: axisem2mseed
# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import filter [as 别名]
def axisem2mseed(path):
"""
change .dat files into MSEED format
"""
global test_param
if not os.path.isdir(os.path.join(path, 'MSEED')):
os.mkdir(os.path.join(path, 'MSEED'))
else:
print 'Following directory already exists:'
print os.path.join(path, 'MSEED')
sys.exit()
t = UTCDateTime(0)
traces = []
for file in glob.iglob(os.path.join(path, '*.dat')):
stationID = file.split('/')[-1].split('_')[0]
networkID = file.split('/')[-1].split('_')[1]
chan = file.split('/')[-1].split('_')[-1].split('.')[0]
if chan == 'E': chan = 'BHE'
elif chan == 'N': chan = 'BHN'
elif chan == 'Z': chan = 'BHZ'
try:
dat = np.loadtxt(file)
npts = len(dat[:,0])
stats = {'network': networkID,
'station': stationID,
'location': '',
'channel': chan,
'npts': npts,
'sampling_rate': (npts - 1.)/(dat[-1,0] - dat[0,0]),
'starttime': t + dat[0,0],
'mseed' : {'dataquality': 'D'}}
st = Stream(Trace(data=dat[:,1], header=stats))
if test_param['convSTF'] == 'Y':
sigma = test_param['halfduration'] / np.sqrt(2.) / 3.5
convSTF(st, sigma=sigma)
if test_param['filter'] == 'Y':
st.filter('lowpass', freq=test_param['fmax'], corners=2)
st.filter('lowpass', freq=test_param['fmax'], corners=2)
st.filter('lowpass', freq=test_param['fmax'], corners=2)
st.filter('lowpass', freq=test_param['fmax'], corners=2)
st.filter('highpass', freq=test_param['fmin'], corners=2)
st.filter('highpass', freq=test_param['fmin'], corners=2)
fname = os.path.join(path, 'MSEED', 'dis.' + stationID + '..' + chan)
st.write(fname, format='MSEED')
except Exception, e:
print e
print networkID + '.' + stationID + '.' + chan + '.mseed'
print '-------------------------------------------------'
示例2: cosTaper
# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import filter [as 别名]
if exceptions:
summary.append("#" * 33 + " Exceptions " + "#" * 33)
summary += exceptions
summary.append("#" * 79)
trig = []
mutt = []
if st:
# preprocessing, backup original data for plotting at end
st.merge(0)
st.detrend("linear")
for tr in st:
tr.data = tr.data * cosTaper(len(tr), 0.01)
#st.simulate(paz_remove="self", paz_simulate=cornFreq2Paz(1.0), remove_sensitivity=False)
st.sort()
st.filter("bandpass", freqmin=PAR.LOW, freqmax=PAR.HIGH, corners=1, zerophase=True)
st.trim(T1, T2)
st_trigger = st.copy()
st.normalize(global_max=False)
# do the triggering
trig = coincidenceTrigger("recstalta", PAR.ON, PAR.OFF, st_trigger,
thr_coincidence_sum=PAR.MIN_STATIONS,
max_trigger_length=PAR.MAXLEN, trigger_off_extension=PAR.ALLOWANCE,
details=True, sta=PAR.STA, lta=PAR.LTA)
for t in trig:
info = "%s %ss %s %s" % (t['time'].strftime("%Y-%m-%dT%H:%M:%S"), ("%.1f" % t['duration']).rjust(4), ("%i" % t['cft_peak_wmean']).rjust(3), "-".join(t['stations']))
summary.append(info)
tmp = st.slice(t['time'] - 1, t['time'] + t['duration'])
outfilename = "%s/%s_%.1f_%i_%s-%s_%s.png" % (PLOTDIR, t['time'].strftime("%Y-%m-%dT%H:%M:%S"), t['duration'], t['cft_peak_wmean'], len(t['stations']), num_stations, "-".join(t['stations']))
tmp.plot(outfile=outfilename)
示例3: test_coincidenceTrigger
# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import filter [as 别名]
def test_coincidenceTrigger(self):
"""
Test network coincidence trigger.
"""
st = Stream()
files = ["BW.UH1._.SHZ.D.2010.147.cut.slist.gz",
"BW.UH2._.SHZ.D.2010.147.cut.slist.gz",
"BW.UH3._.SHZ.D.2010.147.cut.slist.gz",
"BW.UH4._.EHZ.D.2010.147.cut.slist.gz"]
for filename in files:
filename = os.path.join(self.path, filename)
st += read(filename)
# some prefiltering used for UH network
st.filter('bandpass', freqmin=10, freqmax=20)
# 1. no weighting, no stations specified, good settings
# => 3 events, no false triggers
# for the first test we make some additional tests regarding types
res = coincidenceTrigger("recstalta", 3.5, 1, st.copy(), 3, sta=0.5,
lta=10)
self.assertTrue(isinstance(res, list))
self.assertTrue(len(res) == 3)
expected_keys = ['time', 'coincidence_sum', 'duration', 'stations',
'trace_ids']
expected_types = [UTCDateTime, float, float, list, list]
for item in res:
self.assertTrue(isinstance(item, dict))
for key, _type in zip(expected_keys, expected_types):
self.assertTrue(key in item)
self.assertTrue(isinstance(item[key], _type))
self.assertTrue(res[0]['time'] > UTCDateTime("2010-05-27T16:24:31"))
self.assertTrue(res[0]['time'] < UTCDateTime("2010-05-27T16:24:35"))
self.assertTrue(4.2 < res[0]['duration'] < 4.8)
self.assertTrue(res[0]['stations'] == ['UH3', 'UH2', 'UH1', 'UH4'])
self.assertTrue(res[0]['coincidence_sum'] == 4)
self.assertTrue(res[1]['time'] > UTCDateTime("2010-05-27T16:26:59"))
self.assertTrue(res[1]['time'] < UTCDateTime("2010-05-27T16:27:03"))
self.assertTrue(3.2 < res[1]['duration'] < 3.7)
self.assertTrue(res[1]['stations'] == ['UH2', 'UH3', 'UH1'])
self.assertTrue(res[1]['coincidence_sum'] == 3)
self.assertTrue(res[2]['time'] > UTCDateTime("2010-05-27T16:27:27"))
self.assertTrue(res[2]['time'] < UTCDateTime("2010-05-27T16:27:33"))
self.assertTrue(4.2 < res[2]['duration'] < 4.4)
self.assertTrue(res[2]['stations'] == ['UH3', 'UH2', 'UH1', 'UH4'])
self.assertTrue(res[2]['coincidence_sum'] == 4)
# 2. no weighting, station selection
# => 2 events, no false triggers
trace_ids = ['BW.UH1..SHZ', 'BW.UH3..SHZ', 'BW.UH4..EHZ']
# ignore UserWarnings
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore', UserWarning)
re = coincidenceTrigger("recstalta", 3.5, 1, st.copy(), 3,
trace_ids=trace_ids, sta=0.5, lta=10)
self.assertTrue(len(re) == 2)
self.assertTrue(re[0]['time'] > UTCDateTime("2010-05-27T16:24:31"))
self.assertTrue(re[0]['time'] < UTCDateTime("2010-05-27T16:24:35"))
self.assertTrue(4.2 < re[0]['duration'] < 4.8)
self.assertTrue(re[0]['stations'] == ['UH3', 'UH1', 'UH4'])
self.assertTrue(re[0]['coincidence_sum'] == 3)
self.assertTrue(re[1]['time'] > UTCDateTime("2010-05-27T16:27:27"))
self.assertTrue(re[1]['time'] < UTCDateTime("2010-05-27T16:27:33"))
self.assertTrue(4.2 < re[1]['duration'] < 4.4)
self.assertTrue(re[1]['stations'] == ['UH3', 'UH1', 'UH4'])
self.assertTrue(re[1]['coincidence_sum'] == 3)
# 3. weighting, station selection
# => 3 events, no false triggers
trace_ids = {'BW.UH1..SHZ': 0.4, 'BW.UH2..SHZ': 0.35,
'BW.UH3..SHZ': 0.4, 'BW.UH4..EHZ': 0.25}
res = coincidenceTrigger("recstalta", 3.5, 1, st.copy(), 1.0,
trace_ids=trace_ids, sta=0.5, lta=10)
self.assertTrue(len(res) == 3)
self.assertTrue(res[0]['time'] > UTCDateTime("2010-05-27T16:24:31"))
self.assertTrue(res[0]['time'] < UTCDateTime("2010-05-27T16:24:35"))
self.assertTrue(4.2 < res[0]['duration'] < 4.8)
self.assertTrue(res[0]['stations'] == ['UH3', 'UH2', 'UH1', 'UH4'])
self.assertTrue(res[0]['coincidence_sum'] == 1.4)
self.assertTrue(res[1]['time'] > UTCDateTime("2010-05-27T16:26:59"))
self.assertTrue(res[1]['time'] < UTCDateTime("2010-05-27T16:27:03"))
self.assertTrue(3.2 < res[1]['duration'] < 3.7)
self.assertTrue(res[1]['stations'] == ['UH2', 'UH3', 'UH1'])
self.assertTrue(res[1]['coincidence_sum'] == 1.15)
self.assertTrue(res[2]['time'] > UTCDateTime("2010-05-27T16:27:27"))
self.assertTrue(res[2]['time'] < UTCDateTime("2010-05-27T16:27:33"))
self.assertTrue(4.2 < res[2]['duration'] < 4.4)
self.assertTrue(res[2]['stations'] == ['UH3', 'UH2', 'UH1', 'UH4'])
self.assertTrue(res[2]['coincidence_sum'] == 1.4)
# 4. weighting, station selection, max_len
# => 2 events, no false triggers, small event does not overlap anymore
trace_ids = {'BW.UH1..SHZ': 0.6, 'BW.UH2..SHZ': 0.6}
# ignore UserWarnings
with warnings.catch_warnings(record=True):
warnings.simplefilter('ignore', UserWarning)
re = coincidenceTrigger("recstalta", 3.5, 1, st.copy(), 1.2,
trace_ids=trace_ids,
max_trigger_length=0.13, sta=0.5, lta=10)
self.assertTrue(len(re) == 2)
self.assertTrue(re[0]['time'] > UTCDateTime("2010-05-27T16:24:31"))
self.assertTrue(re[0]['time'] < UTCDateTime("2010-05-27T16:24:35"))
self.assertTrue(0.2 < re[0]['duration'] < 0.3)
self.assertTrue(re[0]['stations'] == ['UH2', 'UH1'])
self.assertTrue(re[0]['coincidence_sum'] == 1.2)
#.........这里部分代码省略.........
示例4: Stream
# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import filter [as 别名]
t2 = t + 4 * 3600
stations = ["AIGLE", "SENIN", "DIX", "LAUCH", "MMK", "SIMPL"]
st = Stream()
for station in stations:
try:
tmp = client.getWaveform("CH", station, "", "[EH]HZ", t, t2,
metadata=True)
except:
print station, "---"
continue
st += tmp
st.taper()
st.filter("bandpass", freqmin=1, freqmax=20)
triglist = coincidenceTrigger("recstalta", 10, 2, st, 4, sta=0.5, lta=10)
print len(triglist), "events triggered."
for trig in triglist:
closest_sta = trig['stations'][0]
tr = st.select(station=closest_sta)[0]
trig['latitude'] = tr.stats.coordinates.latitude
trig['longitude'] = tr.stats.coordinates.longitude
paz_wa = {'sensitivity': 2800, 'zeros': [0j], 'gain': 1,
'poles': [-6.2832-4.7124j, -6.2832+4.7124j]}
for trig in triglist:
t = trig['time']
print "#" * 80
示例5: triggerOnset
# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import filter [as 别名]
# do the triggering
trigger_list = []
for tr in st_trigger:
tr.stats.channel = "recstalta"
max_len = PAR.MAXLEN * tr.stats.sampling_rate
trigger_sample_list = triggerOnset(tr.data, PAR.ON, PAR.OFF, max_len=max_len)
for on, off in trigger_sample_list:
begin = tr.stats.starttime + float(on) / tr.stats.sampling_rate
end = tr.stats.starttime + float(off) / tr.stats.sampling_rate
trigger_list.append((begin.timestamp, end.timestamp, tr.stats.station))
trigger_list.sort()
# merge waveform and trigger stream for plotting
# the normalizations are done because the triggers have a completely different
# scale and would not be visible in the plot otherwise...
st.filter("bandpass", freqmin=1.0, freqmax=20.0, corners=1, zerophase=True)
st.normalize(global_max=False)
st_trigger.normalize(global_max=True)
st.extend(st_trigger)
# coincidence part, work through sorted trigger list...
mutt = ["mutt", "-s", "UH Alert %s -- %s" % (T1, T2)]
while len(trigger_list) > 1:
on, off, sta = trigger_list[0]
stations = set()
stations.add(sta)
for i in xrange(1, len(trigger_list)):
tmp_on, tmp_off, tmp_sta = trigger_list[i]
if tmp_on < off + PAR.ALLOWANCE:
stations.add(tmp_sta)
# allow sets of triggers that overlap only on subsets of all