本文整理汇总了Python中obspy.core.Stream.extend方法的典型用法代码示例。如果您正苦于以下问题:Python Stream.extend方法的具体用法?Python Stream.extend怎么用?Python Stream.extend使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类obspy.core.Stream
的用法示例。
在下文中一共展示了Stream.extend方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_waveforms
# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import extend [as 别名]
def get_waveforms():
events = get_events()
client = ArcClient()
wforms = Stream()
for event in events:
t = event.preferred_origin().time
args = seed_id.split('.') + [t + 5 * 60, t + 14 * 60]
wforms.extend(client.getWaveform(*args))
wforms.decimate(int(round(wforms[0].stats.sampling_rate)) // 5,
no_filter=True)
wforms.write(wavname, wavformat)
示例2: get_waveforms
# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import extend [as 别名]
def get_waveforms():
events = get_events()
client = ArcClient(**client_kwargs)
wforms = Stream()
for i, event in enumerate(events):
print('Fetch data for event no. %d' % (i + 1))
t = event.preferred_origin().time
for sta in stations:
args = (net, sta, loc, cha, t - 10, t + 220)
try:
stream = client.getWaveform(*args)
except:
print('no data for %s' % (args,))
continue
sr = stream[0].stats.sampling_rate
stream.decimate(int(sr) // 20, no_filter=True)
for tr in stream:
del tr.stats.mseed
stream.merge()
wforms.extend(stream)
wforms.write(wavname, wavformat)
return wforms
示例3: Client
# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import extend [as 别名]
client = Client("http://10.153.82.3:8080", timeout=60)
st = Stream()
num_stations = 0
exceptions = []
for station in STATIONS:
try:
# we request 60s more at start and end and cut them off later to avoid
# a false trigger due to the tapering during instrument correction
tmp = client.waveform.getWaveform(NET, station, "", CHANNEL, T1 - 180,
T2 + 180, getPAZ=True,
getCoordinates=True)
except Exception, e:
exceptions.append("%s: %s" % (e.__class__.__name__, e))
continue
st.extend(tmp)
num_stations += 1
st.merge(-1)
st.sort()
summary = []
summary.append("#" * 79)
summary.append("######## %s --- %s ########" % (T1, T2))
summary.append("#" * 79)
summary.append(st.__str__(extended=True))
if exceptions:
summary.append("#" * 33 + " Exceptions " + "#" * 33)
summary += exceptions
summary.append("#" * 79)
trig = []
示例4: corr_trace_fun
# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import extend [as 别名]
def corr_trace_fun(signals, comb=[], normal=True,
parallel=True, processes=None):
""" Correlate Traces according to the given combinations
The `corr_trace_fun` correlates the Traces contained in the passed
:class:`~obspy.core.trace.Stream` object according to the list of
combinations `tuple` given in input. It does the job asynchronously
instantiating as many process as cores available in the hosting machine.
If traces do not share the same starttime the correlation trace is shifted
by fractions of a sample such that time alignment is obtained precisely at
the sample 1971-01-01T00:00:00Z. If there is no overlap between the
traces this time might not be in the stream.
:type signals: :class:`~obspy.core.stream.Stream`
:param signals: The container for the Traces that we want to correlate
:type comb: list, optional
:param comb: List of combinations that must be calculated
:type normal: bool, otional
:param normal: Normalization flag (See
:func:`~miic.core.corr_fun.conv_traces` for details)
:type parallel: bool (Default: True)
:pram parallel: If the filtering will be run in parallel or not
:type processes: int
:pram processes: Number of processes to start (if None it will be equal
to the number of cores available in the hosting machine)
:rtype: :class:`~obspy.core.stream.Stream`
:return: **corrData**: The resulting object containing the correlation data
and their meta-informations obtained as described
in thr function :func:`~miic.core.corr_fun.conv_traces`
"""
if not isinstance(signals, Stream):
raise TypeError("signal must be an obspy Stream object.")
corrData = Stream()
nSignal = signals.count()
if nSignal == 0:
print "Empty stream!!"
return corrData
if (nSignal == 1) and not (comb == [(1, 1)]):
print "Single trace. No cross correlation"
return corrData
if comb == []:
comb = [(k, i) for k in range(nSignal) for i in range(k + 1, nSignal)]
if not parallel:
dc = _doCorr(signals, normal)
corrData.extend(map(dc, comb))
else:
if processes == 0:
processes = None
p = Pool(processes=processes)
p.map_async(_doCorr(signals, normal),
comb,
callback=_AppendST(corrData))
p.close()
p.join()
return corrData
示例5: usarray_read
# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import extend [as 别名]
def usarray_read(fname):
""" Read the BAM US-Array lbv data format used on Mike-2 test specimen.
Read the BAM US-Array lbv data format used on Mike-2 test specimen into a
stream object.
As there is no obvious station (or any other) information in the data file.
As the parameters are not supposed to change, they are hardcoded here.
:parameters:
------------
:type fname: string
:param fname: Path to the file containing the data to be read
(WITHOUT EXTENSION) extensions .dat and .hdr will be added
automatically
:rtype: :class:`~obspy.core.Stream` object
:return: **st**: obspy.core.Stream object
Obspy stream object containing the data
"""
# filenames
lbvfilename = fname + '.lbv'
hdrfilename = fname + '.hdr'
# initialise
st = Stream()
tr = Trace()
# tr = SacIO()
# static parameters
t = os.path.getmtime(hdrfilename)
tt = datetime.datetime.fromtimestamp(t)
tr.stats['starttime'] = UTCDateTime(tt.year, tt.month, tt.day, tt.hour,
tt.minute, tt.second, tt.microsecond)
tr.stats['network'] = 'BAM-USArray'
tr.stats['channel'] = 'z'
# reading header from file
fh = open(hdrfilename, 'r')
while True:
line = fh.readline()
if line.__len__() < 1:
break
line = line.rstrip()
if line.find('PK') > -1:
parts = re.split(':', line)
tr.stats['location'] = parts[1].lstrip()
if line.find('transceivers') > -1:
parts = re.split(':', line)
ntra = int(parts[1].lstrip())
traco = np.zeros((ntra, 3), float)
for i in range(ntra):
coordstr = fh.readline().split()
for j in range(3):
traco[i, j] = float(coordstr[j])
if line.find('measurements') > -1:
parts = re.split(':', line)
nmeas = int(parts[1].lstrip())
measco = np.zeros((nmeas, 2), int)
for i in range(nmeas):
configstr = fh.readline().split()
for j in range(2):
measco[i, j] = float(configstr[j])
if line.find('samples') > -1:
parts = re.split(':', line)
tr.stats['npts'] = int(parts[1].lstrip())
if line.find('samplefreq') > -1:
parts = re.split(':', line)
tr.stats['sampling_rate'] = int(parts[1].lstrip())
fh.close()
# reading data from file
fd = open(lbvfilename, 'rb')
datatype = '>i2'
read_data = np.fromfile(file=fd, dtype=datatype)
fd.close()
# sort and store traces
for i in range(nmeas):
# receiver number stored as station name
tr.stats['station'] = str(measco[i, 1])
# receiver coords (storing not yet implemented)
stla = traco[measco[i, 1] - 1, 1] # x
stlo = traco[measco[i, 1] - 1, 1] # y
stel = traco[measco[i, 1] - 1, 1] # z
# transmitter number stored as event name (storing not yet implemented)
kevnm = str(measco[i, 0])
# transmitter coords (storing not yet implemented)
evla = traco[measco[i, 1] - 1, 0] # x
evlo = traco[measco[i, 1] - 1, 0] # y
evdp = traco[measco[i, 1] - 1, 0] # z
tr.data = read_data[i * tr.stats.npts:(i + 1) * tr.stats.npts]
st.extend([tr])
# plot 1 trace for test purposes
# if i==20:
# tr.plot()
# print ('plot done')
return st
示例6: kutec_read
# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import extend [as 别名]
#.........这里部分代码省略.........
keys[keyval[0]]['Stunden'] = int(keyval[6])
keys[keyval[0]]['Minuten'] = int(keyval[7])
keys[keyval[0]]['Sekunden'] = float(keyval[8])
tr.stats['starttime'] = UTCDateTime(keys[keyval[0]]['Jahr'], \
keys[keyval[0]]['Monat'], \
keys[keyval[0]]['Tag'], \
keys[keyval[0]]['Stunden'], \
keys[keyval[0]]['Minuten'], \
keys[keyval[0]]['Sekunden'])
elif keyval[0] == 'CD':
keys[keyval[0]] = {}
keys[keyval[0]]['Version'] = int(keyval[1])
keys[keyval[0]]['Lang'] = int(keyval[2])
keys[keyval[0]]['dx'] = float(keyval[3])
tr.stats['delta'] = keys[keyval[0]]['dx']
keys[keyval[0]]['kalibiert'] = int(keyval[4])
if keys[keyval[0]]['kalibiert'] != 1:
print "%s %s = %d not implemented." % \
(keyval[0], 'kalibiert',
keys[keyval[0]]['kalibiert'])
break
keys[keyval[0]]['EinheitLang'] = int(keyval[5])
keys[keyval[0]]['Einheit'] = keyval[6]
if keys[keyval[0]]['Version'] == 2:
keys[keyval[0]]['Reduktion'] = int(keyval[7])
keys[keyval[0]]['InMultiEvents'] = int(keyval[8])
keys[keyval[0]]['SortiereBuffer'] = int(keyval[9])
keys[keyval[0]]['x0'] = float(keyval[10])
keys[keyval[0]]['PretriggerVerwendung'] = int(keyval[11])
if keys[keyval[0]]['Version'] == 1:
keys[keyval[0]]['Reduktion'] = ''
keys[keyval[0]]['InMultiEvents'] = ''
keys[keyval[0]]['SortiereBuffer'] = ''
keys[keyval[0]]['x0'] = ''
keys[keyval[0]]['PretriggerVerwendung'] = 0
elif keyval[0] == 'CR':
keys[keyval[0]] = {}
keys[keyval[0]]['Version'] = int(keyval[1])
keys[keyval[0]]['Lang'] = int(keyval[2])
keys[keyval[0]]['Transformieren'] = int(keyval[3])
keys[keyval[0]]['Faktor'] = float(keyval[4])
keys[keyval[0]]['Offset'] = float(keyval[5])
keys[keyval[0]]['Kalibriert'] = int(keyval[6])
keys[keyval[0]]['EinheitLang'] = int(keyval[7])
keys[keyval[0]]['Einheit'] = keyval[8]
elif keyval[0] == 'CN': # station names
keys[keyval[0]] = {}
keys[keyval[0]]['Version'] = int(keyval[1])
keys[keyval[0]]['Lang'] = int(keyval[2])
keys[keyval[0]]['IndexGruppe'] = int(keyval[3])
keys[keyval[0]]['IndexBit'] = int(keyval[5])
keys[keyval[0]]['NameLang'] = int(keyval[6])
keys[keyval[0]]['Name'] = keyval[7]
keys[keyval[0]]['KommLang'] = int(keyval[8])
keys[keyval[0]]['Kommentar'] = keyval[9]
else:
keys[keyval[0]] = {}
keys[keyval[0]]['KeyString'] = keyval[1:]
# NT key is beginning of measurement (starting of measurement unit)
# keys['Cb']['Addzeit'] needs to be added to obtain the absolute trigger
# time
tr.stats['starttime'] += keys['Cb']['Addzeit']
# Adjust starttime according to pretrigger (There is some uncertainty
# about the CD key) to get relative trigger time
# for CD:Version == 1 always use Cb:x0
# for CD:Version == 2 only use Cb:x0 if CD:PretriggerVerwendung == 1
if keys['CD']['Version'] == 1 or \
(keys['CD']['Version'] == 2 and
keys['CD']['PretriggerVerwendung'] == 1):
tr.stats['starttime'] += keys['Cb']['x0']
if 'CR' in keys:
if keys['CR']['Transformieren']:
tr.data = tr.data * keys['CR']['Faktor'] + keys['CR']['Offset']
f.close()
# ### Channel naming
tr.stats['network'] = 'KU'
tr.stats['location'] = ''
# ### Pre 20120619 namin convention to extract the station name from the
# filename
# tr.stats['station'] = fname[-12:-7]
# ### Now take the station name from the ICN key
tr.stats['station'] = keys['CN']['Name'].replace('_', '')
# ### or construct a name that is consistent with the old filename
# generated one from the key
# ### This is is very likely to cause a problem sooner or later.
# tr.stats['station'] = 'MK%03d' % int(keys['CN']['Name'].split('_')[-1])
# tr.stats['station'] = keys['CN']['Name'].replace('_','')
st = Stream()
st.extend([tr])
return st
示例7: Stream
# 需要导入模块: from obspy.core import Stream [as 别名]
# 或者: from obspy.core.Stream import extend [as 别名]
# search given timespan one hour at a time, set initial T1 one hour earlier
T1 = START - (60 * 60 * 1)
while T1 < END:
T1 += (60 * 60 * 1)
T2 = T1 + (60 * 60 * 1)
st = Stream()
num_stations = 0
for station in STATIONS:
try:
# we request 60s more at start and end and cut them off later to avoid
# a false trigger due to the tapering during instrument correction
tmp = client.waveform.getWaveform(NET, station, "", CHANNEL, T1 - 60,
T2 + 60, getPAZ=True,
getCoordinates=True)
st.extend(tmp)
num_stations += 1
except Exception, e:
if "No waveform data available" in str(e):
continue
raise
st.merge(-1)
st.sort()
summary = []
summary.append("#" * 79)
summary.append("######## %s --- %s ########" % (T1, T2))
summary.append("#" * 79)
summary.append(str(st))
if not st: