本文整理汇总了Python中obspy.core.event.Pick类的典型用法代码示例。如果您正苦于以下问题:Python Pick类的具体用法?Python Pick怎么用?Python Pick使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Pick类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __toPick
def __toPick(parser, pick_el, evaluation_mode):
"""
"""
pick = Pick()
waveform = pick_el.xpath("waveform")[0]
pick.waveform_id = WaveformStreamID(\
network_code=waveform.get("networkCode"),
station_code=waveform.get("stationCode"),
channel_code=waveform.get("channelCode"),
location_code=waveform.get("locationCode"))
pick.time, pick.time_errors = __toTimeQuantity(parser, pick_el, "time")
pick.phase_hint = parser.xpath2obj('phaseHint', pick_el)
onset = parser.xpath2obj('onset', pick_el)
if onset and onset.lower() in ["emergent", "impulsive", "questionable"]:
pick.onset = onset.lower()
# Evaluation mode of a pick is global in the SeisHub Event file format.
pick.evaluation_mode = evaluation_mode
# The polarity needs to be mapped.
polarity = parser.xpath2obj('polarity', pick_el)
pol_map_dict = {'up': 'positive', 'positive': 'positive',
'down': 'negative', 'negative': 'negative',
'undecidable': 'undecidable'}
if polarity and polarity.lower() in pol_map_dict:
pick.polarity = pol_map_dict[polarity.lower()]
# Convert azimuth to backazmith
azimuth = __toFloatQuantity(parser, pick_el, "azimuth")
if len(azimuth) == 2 and azimuth[0] and azimuth[1]:
# Convert to backazimuth
pick.backazimuth = (azimuth[0] + 180.0) % 360.0
pick.backzimuth_errors = azimuth[1]
return pick
示例2: test_clear_method_resets_objects
def test_clear_method_resets_objects(self):
"""
Tests that the clear() method properly resets all objects. Test for
#449.
"""
# Test with basic event object.
e = Event(force_resource_id=False)
e.comments.append(Comment(text="test"))
e.event_type = "explosion"
self.assertEqual(len(e.comments), 1)
self.assertEqual(e.event_type, "explosion")
e.clear()
self.assertEqual(e, Event(force_resource_id=False))
self.assertEqual(len(e.comments), 0)
self.assertEqual(e.event_type, None)
# Test with pick object. Does not really fit in the event test case but
# it tests the same thing...
p = Pick()
p.comments.append(Comment(text="test"))
p.phase_hint = "p"
self.assertEqual(len(p.comments), 1)
self.assertEqual(p.phase_hint, "p")
# Add some more random attributes. These should disappear upon
# cleaning.
p.test_1 = "a"
p.test_2 = "b"
self.assertEqual(p.test_1, "a")
self.assertEqual(p.test_2, "b")
p.clear()
self.assertEqual(len(p.comments), 0)
self.assertEqual(p.phase_hint, None)
self.assertFalse(hasattr(p, "test_1"))
self.assertFalse(hasattr(p, "test_2"))
示例3: test_clear_method_resets_objects
def test_clear_method_resets_objects(self):
"""
Tests that the clear() method properly resets all objects. Test for
#449.
"""
# Test with basic event object.
e = Event(force_resource_id=False)
e.comments.append(Comment(text="test"))
e.event_type = "explosion"
self.assertEqual(len(e.comments), 1)
self.assertEqual(e.event_type, "explosion")
e.clear()
self.assertEqual(e, Event(force_resource_id=False))
self.assertEqual(len(e.comments), 0)
self.assertEqual(e.event_type, None)
# Test with pick object. Does not really fit in the event test case but
# it tests the same thing...
p = Pick()
p.comments.append(Comment(text="test"))
p.phase_hint = "p"
self.assertEqual(len(p.comments), 1)
self.assertEqual(p.phase_hint, "p")
# Add some more random attributes. These should disappear upon
# cleaning.
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
p.test_1 = "a"
p.test_2 = "b"
# two warnings should have been issued by setting non-default keys
self.assertEqual(len(w), 2)
self.assertEqual(p.test_1, "a")
self.assertEqual(p.test_2, "b")
p.clear()
self.assertEqual(len(p.comments), 0)
self.assertEqual(p.phase_hint, None)
self.assertFalse(hasattr(p, "test_1"))
self.assertFalse(hasattr(p, "test_2"))
示例4: test_write_with_extra_tags_and_read
def test_write_with_extra_tags_and_read(self):
"""
Tests that a QuakeML file with additional custom "extra" tags gets
written correctly and that when reading it again the extra tags are
parsed correctly.
"""
filename = os.path.join(self.path, "quakeml_1.2_origin.xml")
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
cat = _read_quakeml(filename)
self.assertEqual(len(w), 0)
# add some custom tags to first event:
# - tag with explicit namespace but no explicit ns abbreviation
# - tag without explicit namespace (gets obspy default ns)
# - tag with explicit namespace and namespace abbreviation
my_extra = AttribDict(
{'public': {'value': False,
'namespace': 'http://some-page.de/xmlns/1.0',
'attrib': {'some_attrib': 'some_value',
'another_attrib': 'another_value'}},
'custom': {'value': 'True',
'namespace': 'http://test.org/xmlns/0.1'},
'new_tag': {'value': 1234,
'namespace': 'http://test.org/xmlns/0.1'},
'tX': {'value': UTCDateTime('2013-01-02T13:12:14.600000Z'),
'namespace': 'http://test.org/xmlns/0.1'},
'dataid': {'namespace': 'http://anss.org/xmlns/catalog/0.1',
'type': 'attribute', 'value': '00999999'},
# some nested tags :
'quantity': {'namespace': 'http://some-page.de/xmlns/1.0',
'attrib': {'attrib1': 'attrib_value1',
'attrib2': 'attrib_value2'},
'value': {
'my_nested_tag1': {
'namespace': 'http://some-page.de/xmlns/1.0',
'value': 1.23E10},
'my_nested_tag2': {
'namespace': 'http://some-page.de/xmlns/1.0',
'value': False}}}})
nsmap = {'ns0': 'http://test.org/xmlns/0.1',
'catalog': 'http://anss.org/xmlns/catalog/0.1'}
cat[0].extra = my_extra.copy()
# insert a pick with an extra field
p = Pick()
p.extra = {'weight': {'value': 2,
'namespace': 'http://test.org/xmlns/0.1'}}
cat[0].picks.append(p)
with NamedTemporaryFile() as tf:
tmpfile = tf.name
# write file
cat.write(tmpfile, format='QUAKEML', nsmap=nsmap)
# check contents
with open(tmpfile, 'rb') as fh:
# enforce reproducible attribute orders through write_c14n
obj = etree.fromstring(fh.read()).getroottree()
buf = io.BytesIO()
obj.write_c14n(buf)
buf.seek(0, 0)
content = buf.read()
# check namespace definitions in root element
expected = [b'<q:quakeml',
b'xmlns:catalog="http://anss.org/xmlns/catalog/0.1"',
b'xmlns:ns0="http://test.org/xmlns/0.1"',
b'xmlns:ns1="http://some-page.de/xmlns/1.0"',
b'xmlns:q="http://quakeml.org/xmlns/quakeml/1.2"',
b'xmlns="http://quakeml.org/xmlns/bed/1.2"']
for line in expected:
self.assertIn(line, content)
# check additional tags
expected = [
b'<ns0:custom>True</ns0:custom>',
b'<ns0:new_tag>1234</ns0:new_tag>',
b'<ns0:tX>2013-01-02T13:12:14.600000Z</ns0:tX>',
b'<ns1:public '
b'another_attrib="another_value" '
b'some_attrib="some_value">false</ns1:public>'
]
for line in expected:
self.assertIn(line, content)
# now, read again to test if it's parsed correctly..
cat = _read_quakeml(tmpfile)
# when reading..
# - namespace abbreviations should be disregarded
# - we always end up with a namespace definition, even if it was
# omitted when originally setting the custom tag
# - custom namespace abbreviations should attached to Catalog
self.assertTrue(hasattr(cat[0], 'extra'))
def _tostr(x):
if isinstance(x, bool):
if x:
return str('true')
else:
return str('false')
elif isinstance(x, AttribDict):
for key, value in x.items():
x[key].value = _tostr(value['value'])
#.........这里部分代码省略.........
示例5: _read_single_hypocenter
#.........这里部分代码省略.........
msg = ("Negative value in XX value of covariance matrix, not "
"setting longitude error (epicentral uncertainties will "
"still be set in origin uncertainty).")
warnings.warn(msg)
else:
raise
o.latitude = y
try:
o.latitude_errors.uncertainty = kilometer2degrees(sqrt(covariance_yy))
except ValueError:
if covariance_yy < 0:
msg = ("Negative value in YY value of covariance matrix, not "
"setting longitude error (epicentral uncertainties will "
"still be set in origin uncertainty).")
warnings.warn(msg)
else:
raise
o.depth = z * 1e3 # meters!
o.depth_errors.uncertainty = sqrt(covariance_zz) * 1e3 # meters!
o.depth_errors.confidence_level = 68
o.depth_type = str("from location")
o.time = time
ou.horizontal_uncertainty = hor_unc
ou.min_horizontal_uncertainty = min_hor_unc
ou.max_horizontal_uncertainty = max_hor_unc
# values of -1 seem to be used for unset values, set to None
for field in ("horizontal_uncertainty", "min_horizontal_uncertainty",
"max_horizontal_uncertainty"):
if ou.get(field, -1) == -1:
ou[field] = None
else:
ou[field] *= 1e3 # meters!
ou.azimuth_max_horizontal_uncertainty = hor_unc_azim
ou.preferred_description = str("uncertainty ellipse")
ou.confidence_level = 68 # NonLinLoc in general uses 1-sigma (68%) level
oq.standard_error = stderr
oq.azimuthal_gap = az_gap
oq.secondary_azimuthal_gap = sec_az_gap
oq.used_phase_count = used_phase_count
oq.used_station_count = used_station_count
oq.associated_phase_count = assoc_phase_count
oq.associated_station_count = assoc_station_count
oq.depth_phase_count = depth_phase_count
oq.ground_truth_level = gt_level
oq.minimum_distance = kilometer2degrees(min_dist)
oq.maximum_distance = kilometer2degrees(max_dist)
oq.median_distance = kilometer2degrees(med_dist)
# go through all phase info lines
for line in phases_lines:
line = line.split()
arrival = Arrival()
o.arrivals.append(arrival)
station = str(line[0])
phase = str(line[4])
arrival.phase = phase
arrival.distance = kilometer2degrees(float(line[21]))
arrival.azimuth = float(line[23])
arrival.takeoff_angle = float(line[24])
arrival.time_residual = float(line[16])
arrival.time_weight = float(line[17])
pick = Pick()
# network codes are not used by NonLinLoc, so they can not be known
# when reading the .hyp file.. to conform with QuakeML standard set an
# empty network code
wid = WaveformStreamID(network_code="", station_code=station)
# have to split this into ints for overflow to work correctly
date, hourmin, sec = map(str, line[6:9])
ymd = [int(date[:4]), int(date[4:6]), int(date[6:8])]
hm = [int(hourmin[:2]), int(hourmin[2:4])]
t = UTCDateTime(*(ymd + hm), strict=False) + float(sec)
pick.waveform_id = wid
pick.time = t
pick.time_errors.uncertainty = float(line[10])
pick.phase_hint = phase
pick.onset = ONSETS.get(line[3].lower(), None)
pick.polarity = POLARITIES.get(line[5].lower(), None)
# try to determine original pick for each arrival
for pick_ in original_picks:
wid = pick_.waveform_id
if station == wid.station_code and phase == pick_.phase_hint:
pick = pick_
break
else:
# warn if original picks were specified and we could not associate
# the arrival correctly
if original_picks:
msg = ("Could not determine corresponding original pick for "
"arrival. "
"Falling back to pick information in NonLinLoc "
"hypocenter-phase file.")
warnings.warn(msg)
event.picks.append(pick)
arrival.pick_id = pick.resource_id
event.scope_resource_ids()
return event
示例6: _read_picks
def _read_picks(f, new_event):
"""
Internal pick reader. Use read_nordic instead.
:type f: file
:param f: File open in read mode
:type wav_names: list
:param wav_names: List of waveform files in the sfile
:type new_event: :class:`~obspy.core.event.event.Event`
:param new_event: event to associate picks with.
:returns: :class:`~obspy.core.event.event.Event`
"""
f.seek(0)
evtime = new_event.origins[0].time
pickline = []
# Set a default, ignored later unless overwritten
snr = None
for lineno, line in enumerate(f):
if line[79] == '7':
header = line
break
for lineno, line in enumerate(f):
if len(line.rstrip('\n').rstrip('\r')) in [80, 79] and \
line[79] in ' 4\n':
pickline += [line]
for line in pickline:
if line[18:28].strip() == '': # If line is empty miss it
continue
weight = line[14]
if weight == '_':
phase = line[10:17]
weight = 0
polarity = ''
else:
phase = line[10:14].strip()
polarity = line[16]
if weight == ' ':
weight = 0
polarity_maps = {"": "undecidable", "C": "positive", "D": "negative"}
try:
polarity = polarity_maps[polarity]
except KeyError:
polarity = "undecidable"
# It is valid nordic for the origin to be hour 23 and picks to be hour
# 00 or 24: this signifies a pick over a day boundary.
if int(line[18:20]) == 0 and evtime.hour == 23:
day_add = 86400
pick_hour = 0
elif int(line[18:20]) == 24:
day_add = 86400
pick_hour = 0
else:
day_add = 0
pick_hour = int(line[18:20])
try:
time = UTCDateTime(evtime.year, evtime.month, evtime.day,
pick_hour, int(line[20:22]),
float(line[23:28])) + day_add
except ValueError:
time = UTCDateTime(evtime.year, evtime.month, evtime.day,
int(line[18:20]), pick_hour,
float("0." + line[23:38].split('.')[1])) +\
60 + day_add
# Add 60 seconds on to the time, this copes with s-file
# preference to write seconds in 1-60 rather than 0-59 which
# datetime objects accept
if header[57:60] == 'AIN':
ain = _float_conv(line[57:60])
warnings.warn('AIN: %s in header, currently unsupported' % ain)
elif header[57:60] == 'SNR':
snr = _float_conv(line[57:60])
else:
warnings.warn('%s is not currently supported' % header[57:60])
# finalweight = _int_conv(line[68:70])
# Create a new obspy.event.Pick class for this pick
_waveform_id = WaveformStreamID(station_code=line[1:6].strip(),
channel_code=line[6:8].strip(),
network_code='NA')
pick = Pick(waveform_id=_waveform_id, phase_hint=phase,
polarity=polarity, time=time)
try:
pick.onset = onsets[line[9]]
except KeyError:
pass
if line[15] == 'A':
pick.evaluation_mode = 'automatic'
else:
pick.evaluation_mode = 'manual'
# Note these two are not always filled - velocity conversion not yet
# implemented, needs to be converted from km/s to s/deg
# if not velocity == 999.0:
# new_event.picks[pick_index].horizontal_slowness = 1.0 / velocity
if _float_conv(line[46:51]) is not None:
pick.backazimuth = _float_conv(line[46:51])
# Create new obspy.event.Amplitude class which references above Pick
# only if there is an amplitude picked.
if _float_conv(line[33:40]) is not None:
_amplitude = Amplitude(generic_amplitude=_float_conv(line[33:40]),
period=_float_conv(line[41:45]),
#.........这里部分代码省略.........
示例7: test_write_with_extra_tags_and_read
def test_write_with_extra_tags_and_read(self):
"""
Tests that a QuakeML file with additional custom "extra" tags gets
written correctly and that when reading it again the extra tags are
parsed correctly.
"""
filename = os.path.join(self.path, "quakeml_1.2_origin.xml")
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
cat = readQuakeML(filename)
self.assertEqual(len(w), 0)
# add some custom tags to first event:
# - tag with explicit namespace but no explicit ns abbreviation
# - tag without explicit namespace (gets obspy default ns)
# - tag with explicit namespace and namespace abbreviation
my_extra = AttribDict(
{'public': {'value': False,
'namespace': r"http://some-page.de/xmlns/1.0",
'attrib': {u"some_attrib": u"some_value",
u"another_attrib": u"another_value"}},
'custom': {'value': u"True",
'namespace': r'http://test.org/xmlns/0.1'},
'new_tag': {'value': 1234,
'namespace': r"http://test.org/xmlns/0.1"},
'tX': {'value': UTCDateTime('2013-01-02T13:12:14.600000Z'),
'namespace': r'http://test.org/xmlns/0.1'},
'dataid': {'namespace': r'http://anss.org/xmlns/catalog/0.1',
'type': 'attribute', 'value': '00999999'}})
nsmap = {"ns0": r"http://test.org/xmlns/0.1",
"catalog": r'http://anss.org/xmlns/catalog/0.1'}
cat[0].extra = my_extra.copy()
# insert a pick with an extra field
p = Pick()
p.extra = {'weight': {'value': 2,
'namespace': r"http://test.org/xmlns/0.1"}}
cat[0].picks.append(p)
with NamedTemporaryFile() as tf:
tmpfile = tf.name
# write file
cat.write(tmpfile, format="QUAKEML", nsmap=nsmap)
# check contents
with open(tmpfile, "r") as fh:
content = fh.read()
# check namespace definitions in root element
expected = ['<q:quakeml',
'xmlns:catalog="http://anss.org/xmlns/catalog/0.1"',
'xmlns:ns0="http://test.org/xmlns/0.1"',
'xmlns:ns1="http://some-page.de/xmlns/1.0"',
'xmlns:q="http://quakeml.org/xmlns/quakeml/1.2"',
'xmlns="http://quakeml.org/xmlns/bed/1.2"']
for line in expected:
self.assertTrue(line in content)
# check additional tags
expected = [
'<ns0:custom>True</ns0:custom>',
'<ns0:new_tag>1234</ns0:new_tag>',
'<ns0:tX>2013-01-02T13:12:14.600000Z</ns0:tX>',
'<ns1:public '
'another_attrib="another_value" '
'some_attrib="some_value">false</ns1:public>'
]
for lines in expected:
self.assertTrue(line in content)
# now, read again to test if its parsed correctly..
cat = readQuakeML(tmpfile)
# when reading..
# - namespace abbreviations should be disregarded
# - we always end up with a namespace definition, even if it was
# omitted when originally setting the custom tag
# - custom namespace abbreviations should attached to Catalog
self.assertTrue(hasattr(cat[0], "extra"))
def _tostr(x):
if isinstance(x, bool):
if x:
return str("true")
else:
return str("false")
return str(x)
for key, value in my_extra.items():
my_extra[key]['value'] = _tostr(value['value'])
self.assertEqual(cat[0].extra, my_extra)
self.assertTrue(hasattr(cat[0].picks[0], "extra"))
self.assertEqual(
cat[0].picks[0].extra,
{'weight': {'value': '2',
'namespace': r'http://test.org/xmlns/0.1'}})
self.assertTrue(hasattr(cat, "nsmap"))
self.assertTrue(getattr(cat, "nsmap")['ns0'] == nsmap['ns0'])
示例8: read_nlloc_hyp
def read_nlloc_hyp(filename, coordinate_converter=None, picks=None, **kwargs):
"""
Reads a NonLinLoc Hypocenter-Phase file to a
:class:`~obspy.core.event.Catalog` object.
.. note::
Coordinate conversion from coordinate frame of NonLinLoc model files /
location run to WGS84 has to be specified explicitly by the user if
necessary.
.. note::
An example can be found on the :mod:`~obspy.nlloc` submodule front
page in the documentation pages.
:param filename: File or file-like object in text mode.
:type coordinate_converter: func
:param coordinate_converter: Function to convert (x, y, z)
coordinates of NonLinLoc output to geographical coordinates and depth
in meters (longitude, latitude, depth in kilometers).
If left `None` NonLinLoc (x, y, z) output is left unchanged (e.g. if
it is in geographical coordinates already like for NonLinLoc in
global mode).
The function should accept three arguments x, y, z and return a
tuple of three values (lon, lat, depth in kilometers).
:type picks: list of :class:`~obspy.core.event.Pick`
:param picks: Original picks used to generate the NonLinLoc location.
If provided, the output event will include the original picks and the
arrivals in the output origin will link to them correctly (with their
`pick_id` attribute). If not provided, the output event will include
(the rather basic) pick information that can be reconstructed from the
NonLinLoc hypocenter-phase file.
:rtype: :class:`~obspy.core.event.Catalog`
"""
if not hasattr(filename, "read"):
# Check if it exists, otherwise assume its a string.
try:
with open(filename, "rt") as fh:
data = fh.read()
except:
try:
data = filename.decode()
except:
data = str(filename)
data = data.strip()
else:
data = filename.read()
if hasattr(data, "decode"):
data = data.decode()
lines = data.splitlines()
# remember picks originally used in location, if provided
original_picks = picks
if original_picks is None:
original_picks = []
# determine indices of block start/end of the NLLOC output file
indices_hyp = [None, None]
indices_phases = [None, None]
for i, line in enumerate(lines):
if line.startswith("NLLOC "):
indices_hyp[0] = i
elif line.startswith("END_NLLOC"):
indices_hyp[1] = i
elif line.startswith("PHASE "):
indices_phases[0] = i
elif line.startswith("END_PHASE"):
indices_phases[1] = i
if any([i is None for i in indices_hyp]):
msg = ("NLLOC HYP file seems corrupt,"
" could not detect 'NLLOC' and 'END_NLLOC' lines.")
raise RuntimeError(msg)
# strip any other lines around NLLOC block
lines = lines[indices_hyp[0]:indices_hyp[1]]
# extract PHASES lines (if any)
if any(indices_phases):
if not all(indices_phases):
msg = ("NLLOC HYP file seems corrupt, 'PHASE' block is corrupt.")
raise RuntimeError(msg)
i1, i2 = indices_phases
lines, phases_lines = lines[:i1] + lines[i2 + 1:], lines[i1 + 1:i2]
else:
phases_lines = []
lines = dict([line.split(None, 1) for line in lines])
line = lines["SIGNATURE"]
line = line.rstrip().split('"')[1]
signature, version, date, time = line.rsplit(" ", 3)
creation_time = UTCDateTime().strptime(date + time, str("%d%b%Y%Hh%Mm%S"))
# maximum likelihood origin location info line
line = lines["HYPOCENTER"]
x, y, z = map(float, line.split()[1:7:2])
if coordinate_converter:
#.........这里部分代码省略.........
示例9: __toPick
def __toPick(parser, pick_el, evaluation_mode):
"""
"""
pick = Pick()
pick.resource_id = ResourceIdentifier(prefix="/".join([RESOURCE_ROOT, "pick"]))
# Raise a warnings if there is a phase delay
phase_delay = parser.xpath2obj("phase_delay", pick_el, float)
if phase_delay is not None:
msg = "The pick has a phase_delay!"
raise Exception(msg)
waveform = pick_el.xpath("waveform")[0]
network = waveform.get("networkCode")
station = fix_station_name(waveform.get("stationCode"))
# Map some station names.
if station in STATION_DICT:
station = STATION_DICT[station]
if not network:
network = NETWORK_DICT[station]
location = waveform.get("locationCode") or ""
channel = waveform.get("channelCode") or ""
pick.waveform_id = WaveformStreamID(
network_code=network,
station_code=station,
channel_code=channel,
location_code=location)
pick.time, pick.time_errors = __toTimeQuantity(parser, pick_el, "time")
# Picks without time are not quakeml conform
if pick.time is None:
print "Pick has no time and is ignored: %s" % station
return None
pick.phase_hint = parser.xpath2obj('phaseHint', pick_el, str)
onset = parser.xpath2obj('onset', pick_el)
# Fixing bad and old typo ...
if onset == "implusive":
onset = "impulsive"
if onset:
pick.onset = onset.lower()
# Evaluation mode of a pick is global in the SeisHub Event file format.
#pick.evaluation_mode = evaluation_mode
# The polarity needs to be mapped.
polarity = parser.xpath2obj('polarity', pick_el)
pol_map_dict = {'up': 'positive', 'positive': 'positive',
'forward': 'positive',
'forwards': 'positive',
'right': 'positive',
'backward': 'negative',
'backwards': 'negative',
'left': 'negative',
'down': 'negative', 'negative': 'negative',
'undecidable': 'undecidable',
'poorup': 'positive',
'poordown': 'negative'}
if polarity:
if polarity.lower() in pol_map_dict:
pick.polarity = pol_map_dict[polarity.lower()]
else:
pick.polarity = polarity.lower()
pick_weight = parser.xpath2obj('weight', pick_el, int)
if pick_weight is not None:
pick.extra = AttribDict()
pick.extra.weight = {'value': pick_weight, 'namespace': NAMESPACE}
return pick
示例10: _parse_arrivals
def _parse_arrivals(self, event, origin, origin_res_id):
# Skip header of arrivals
next(self.lines)
# Stop the loop after 2 empty lines (according to the standard).
previous_line_empty = False
for line in self.lines:
line_empty = not line or line.isspace()
if not self.event_point_separator:
# Event are separated by two empty lines
if line_empty and previous_line_empty:
break
else:
# Event are separated by '.'
if line.startswith('.'):
break
previous_line_empty = line_empty
if line_empty:
# Skip empty lines when the loop should be stopped by
# point
continue
magnitude_types = []
magnitude_values = []
fields = self.fields['arrival']
station = line[fields['sta']].strip()
distance = line[fields['dist']].strip()
event_azimuth = line[fields['ev_az']].strip()
evaluation_mode = line[fields['picktype']].strip()
direction = line[fields['direction']].strip()
onset = line[fields['detchar']].strip()
phase = line[fields['phase']].strip()
time = line[fields['time']].strip().replace('/', '-')
time_residual = line[fields['t_res']].strip()
arrival_azimuth = line[fields['azim']].strip()
azimuth_residual = line[fields['az_res']].strip()
slowness = line[fields['slow']].strip()
slowness_residual = line[fields['s_res']].strip()
time_defining_flag = line[fields['t_def']].strip()
azimuth_defining_flag = line[fields['a_def']].strip()
slowness_defining_flag = line[fields['s_def']].strip()
snr = line[fields['snr']].strip()
amplitude_value = line[fields['amp']].strip()
period = line[fields['per']].strip()
magnitude_types.append(line[fields['mag_type_1']].strip())
magnitude_values.append(line[fields['mag_1']].strip())
magnitude_types.append(line[fields['mag_type_2']].strip())
magnitude_values.append(line[fields['mag_2']].strip())
line_id = line[fields['id']].strip()
# Don't take pick and arrival with wrong time residual
if '*' in time_residual:
continue
try:
pick = Pick()
pick.creation_info = self._get_creation_info()
pick.waveform_id = WaveformStreamID()
pick.waveform_id.station_code = station
pick.time = UTCDateTime(time)
network_code = self.default_network_code
location_code = self.default_location_code
channel_code = self.default_channel_code
try:
network_code, channel = self._get_channel(station,
pick.time)
if channel:
channel_code = channel.code
location_code = channel.location_code
except TypeError:
pass
pick.waveform_id.network_code = network_code
pick.waveform_id.channel_code = channel_code
if location_code:
pick.waveform_id.location_code = location_code
try:
ev_mode = EVALUATION_MODES[evaluation_mode]
pick.evaluation_mode = ev_mode
except KeyError:
pass
try:
pick.polarity = PICK_POLARITIES[direction]
except KeyError:
pass
try:
pick.onset = PICK_ONSETS[onset]
except KeyError:
pass
pick.phase_hint = phase
try:
#.........这里部分代码省略.........
示例11: _parse_record_s
def _parse_record_s(self, line, event, p_pick, p_arrival):
"""
Parses the 'secondary phases' record S
Secondary phases are following phases of the reading,
and can be P-type or S-type.
"""
arrivals = []
phase = line[7:15].strip()
arrival_time = line[15:24]
if phase:
arrivals.append((phase, arrival_time))
phase = line[25:33].strip()
arrival_time = line[33:42]
if phase:
arrivals.append((phase, arrival_time))
phase = line[43:51].strip()
arrival_time = line[51:60]
if phase:
arrivals.append((phase, arrival_time))
evid = event.resource_id.id.split('/')[-1]
station_string = \
p_pick.waveform_id.get_seed_string()\
.replace(' ', '-').replace('.', '_').lower()
origin = event.origins[0]
for phase, arrival_time in arrivals:
if phase[0:2] == 'D=':
# unused: depth = self._float(phase[2:7])
try:
depth_usage_flag = phase[7]
except IndexError:
# usage flag is not defined
depth_usage_flag = None
# FIXME: I'm not sure that 'X' actually
# means 'used'
if depth_usage_flag == 'X':
# FIXME: is this enough to say that
# the event is constrained by depth phases?
origin.depth_type = 'constrained by depth phases'
origin.quality.depth_phase_count += 1
else:
pick = Pick()
prefix = '/'.join((res_id_prefix, 'pick',
evid, station_string))
pick.resource_id = ResourceIdentifier(prefix=prefix)
date = origin.time.strftime('%Y%m%d')
pick.time = UTCDateTime(date + arrival_time)
# Check if pick is on the next day:
if pick.time < origin.time:
pick.time += timedelta(days=1)
pick.waveform_id = p_pick.waveform_id
pick.backazimuth = p_pick.backazimuth
onset = phase[0]
if onset == 'e':
pick.onset = 'emergent'
phase = phase[1:]
elif onset == 'i':
pick.onset = 'impulsive'
phase = phase[1:]
elif onset == 'q':
pick.onset = 'questionable'
phase = phase[1:]
pick.phase_hint = phase.strip()
event.picks.append(pick)
arrival = Arrival()
prefix = '/'.join((res_id_prefix, 'arrival',
evid, station_string))
arrival.resource_id = ResourceIdentifier(prefix=prefix)
arrival.pick_id = pick.resource_id
arrival.phase = pick.phase_hint
arrival.azimuth = p_arrival.azimuth
arrival.distance = p_arrival.distance
origin.quality.associated_phase_count += 1
origin.arrivals.append(arrival)
示例12: _parse_record_p
def _parse_record_p(self, line, event):
"""
Parses the 'primary phase record' P
The primary phase is the first phase of the reading,
regardless its type.
"""
station = line[2:7].strip()
phase = line[7:15]
arrival_time = line[15:24]
residual = self._float(line[25:30])
# unused: residual_flag = line[30]
distance = self._float(line[32:38]) # degrees
azimuth = self._float(line[39:44])
backazimuth = round(azimuth % -360 + 180, 1)
mb_period = self._float(line[44:48])
mb_amplitude = self._float(line[48:55]) # nanometers
mb_magnitude = self._float(line[56:59])
# unused: mb_usage_flag = line[59]
origin = event.origins[0]
evid = event.resource_id.id.split('/')[-1]
waveform_id = WaveformStreamID()
waveform_id.station_code = station
# network_code is required for QuakeML validation
waveform_id.network_code = ' '
station_string = \
waveform_id.get_seed_string()\
.replace(' ', '-').replace('.', '_').lower()
prefix = '/'.join((res_id_prefix, 'waveformstream',
evid, station_string))
waveform_id.resource_uri = ResourceIdentifier(prefix=prefix)
pick = Pick()
prefix = '/'.join((res_id_prefix, 'pick', evid, station_string))
pick.resource_id = ResourceIdentifier(prefix=prefix)
date = origin.time.strftime('%Y%m%d')
pick.time = UTCDateTime(date + arrival_time)
# Check if pick is on the next day:
if pick.time < origin.time:
pick.time += timedelta(days=1)
pick.waveform_id = waveform_id
pick.backazimuth = backazimuth
onset = phase[0]
if onset == 'e':
pick.onset = 'emergent'
phase = phase[1:]
elif onset == 'i':
pick.onset = 'impulsive'
phase = phase[1:]
elif onset == 'q':
pick.onset = 'questionable'
phase = phase[1:]
pick.phase_hint = phase.strip()
event.picks.append(pick)
if mb_amplitude is not None:
amplitude = Amplitude()
prefix = '/'.join((res_id_prefix, 'amp', evid, station_string))
amplitude.resource_id = ResourceIdentifier(prefix=prefix)
amplitude.generic_amplitude = mb_amplitude * 1E-9
amplitude.unit = 'm'
amplitude.period = mb_period
amplitude.type = 'AB'
amplitude.magnitude_hint = 'Mb'
amplitude.pick_id = pick.resource_id
amplitude.waveform_id = pick.waveform_id
event.amplitudes.append(amplitude)
station_magnitude = StationMagnitude()
prefix = '/'.join((res_id_prefix, 'stationmagntiude',
evid, station_string))
station_magnitude.resource_id = ResourceIdentifier(prefix=prefix)
station_magnitude.origin_id = origin.resource_id
station_magnitude.mag = mb_magnitude
# station_magnitude.mag_errors['uncertainty'] = 0.0
station_magnitude.station_magnitude_type = 'Mb'
station_magnitude.amplitude_id = amplitude.resource_id
station_magnitude.waveform_id = pick.waveform_id
res_id = '/'.join(
(res_id_prefix, 'magnitude/generic/body_wave_magnitude'))
station_magnitude.method_id = \
ResourceIdentifier(id=res_id)
event.station_magnitudes.append(station_magnitude)
arrival = Arrival()
prefix = '/'.join((res_id_prefix, 'arrival', evid, station_string))
arrival.resource_id = ResourceIdentifier(prefix=prefix)
arrival.pick_id = pick.resource_id
arrival.phase = pick.phase_hint
arrival.azimuth = azimuth
arrival.distance = distance
arrival.time_residual = residual
res_id = '/'.join((res_id_prefix, 'earthmodel/ak135'))
arrival.earth_model_id = ResourceIdentifier(id=res_id)
origin.arrivals.append(arrival)
origin.quality.minimum_distance = min(
d for d in (arrival.distance, origin.quality.minimum_distance)
if d is not None)
origin.quality.maximum_distance = \
max(arrival.distance, origin.quality.minimum_distance)
origin.quality.associated_phase_count += 1
return pick, arrival
示例13: outputOBSPY
def outputOBSPY(hp, event=None, only_fm_picks=False):
"""
Make an Event which includes the current focal mechanism information from HASH
Use the 'only_fm_picks' flag to only include the picks HASH used for the FocalMechanism.
This flag will replace the 'picks' and 'arrivals' lists of existing events with new ones.
Inputs
-------
hp : hashpy.HashPype instance
event : obspy.core.event.Event
only_fm_picks : bool of whether to overwrite the picks/arrivals lists
Returns
-------
obspy.core.event.Event
Event will be new if no event was input, FocalMech added to existing event
"""
# Returns new (or updates existing) Event with HASH solution
n = hp.npol
if event is None:
event = Event(focal_mechanisms=[], picks=[], origins=[])
origin = Origin(arrivals=[])
origin.time = UTCDateTime(hp.tstamp)
origin.latitude = hp.qlat
origin.longitude = hp.qlon
origin.depth = hp.qdep
origin.creation_info = CreationInfo(version=hp.icusp)
origin.resource_id = ResourceIdentifier('smi:hash/Origin/{0}'.format(hp.icusp))
for _i in range(n):
p = Pick()
p.creation_info = CreationInfo(version=hp.arid[_i])
p.resource_id = ResourceIdentifier('smi:hash/Pick/{0}'.format(p.creation_info.version))
p.waveform_id = WaveformStreamID(network_code=hp.snet[_i], station_code=hp.sname[_i], channel_code=hp.scomp[_i])
if hp.p_pol[_i] > 0:
p.polarity = 'positive'
else:
p.polarity = 'negative'
a = Arrival()
a.creation_info = CreationInfo(version=hp.arid[_i])
a.resource_id = ResourceIdentifier('smi:hash/Arrival/{0}'.format(p.creation_info.version))
a.azimuth = hp.p_azi_mc[_i,0]
a.takeoff_angle = 180. - hp.p_the_mc[_i,0]
a.pick_id = p.resource_id
origin.arrivals.append(a)
event.picks.append(p)
event.origins.append(origin)
event.preferred_origin_id = str(origin.resource_id)
else: # just update the changes
origin = event.preferred_origin()
picks = []
arrivals = []
for _i in range(n):
ind = hp.p_index[_i]
a = origin.arrivals[ind]
p = a.pick_id.getReferredObject()
a.takeoff_angle = hp.p_the_mc[_i,0]
picks.append(p)
arrivals.append(a)
if only_fm_picks:
origin.arrivals = arrivals
event.picks = picks
# Use me double couple calculator and populate planes/axes etc
x = hp._best_quality_index
# Put all the mechanisms into the 'focal_mechanisms' list, mark "best" as preferred
for s in range(hp.nmult):
dc = DoubleCouple([hp.str_avg[s], hp.dip_avg[s], hp.rak_avg[s]])
ax = dc.axis
focal_mech = FocalMechanism()
focal_mech.creation_info = CreationInfo(creation_time=UTCDateTime(), author=hp.author)
focal_mech.triggering_origin_id = origin.resource_id
focal_mech.resource_id = ResourceIdentifier('smi:hash/FocalMechanism/{0}/{1}'.format(hp.icusp, s+1))
focal_mech.method_id = ResourceIdentifier('HASH')
focal_mech.nodal_planes = NodalPlanes()
focal_mech.nodal_planes.nodal_plane_1 = NodalPlane(*dc.plane1)
focal_mech.nodal_planes.nodal_plane_2 = NodalPlane(*dc.plane2)
focal_mech.principal_axes = PrincipalAxes()
focal_mech.principal_axes.t_axis = Axis(azimuth=ax['T']['azimuth'], plunge=ax['T']['dip'])
focal_mech.principal_axes.p_axis = Axis(azimuth=ax['P']['azimuth'], plunge=ax['P']['dip'])
focal_mech.station_polarity_count = n
focal_mech.azimuthal_gap = hp.magap
focal_mech.misfit = hp.mfrac[s]
focal_mech.station_distribution_ratio = hp.stdr[s]
focal_mech.comments.append(
Comment(hp.qual[s], resource_id=ResourceIdentifier(str(focal_mech.resource_id) + '/comment/quality'))
)
#----------------------------------------
event.focal_mechanisms.append(focal_mech)
if s == x:
event.preferred_focal_mechanism_id = str(focal_mech.resource_id)
return event
示例14: test_write_with_extra_tags_and_read
def test_write_with_extra_tags_and_read(self):
"""
Tests that a QuakeML file with additional custom "extra" tags gets
written correctly and that when reading it again the extra tags are
parsed correctly.
"""
filename = os.path.join(self.path, "quakeml_1.2_origin.xml")
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
cat = _read_quakeml(filename)
self.assertEqual(len(w), 0)
# add some custom tags to first event:
# - tag with explicit namespace but no explicit ns abbreviation
# - tag without explicit namespace (gets obspy default ns)
# - tag with explicit namespace and namespace abbreviation
my_extra = AttribDict(
{
"public": {
"value": False,
"namespace": "http://some-page.de/xmlns/1.0",
"attrib": {"some_attrib": "some_value", "another_attrib": "another_value"},
},
"custom": {"value": "True", "namespace": "http://test.org/xmlns/0.1"},
"new_tag": {"value": 1234, "namespace": "http://test.org/xmlns/0.1"},
"tX": {"value": UTCDateTime("2013-01-02T13:12:14.600000Z"), "namespace": "http://test.org/xmlns/0.1"},
"dataid": {"namespace": "http://anss.org/xmlns/catalog/0.1", "type": "attribute", "value": "00999999"},
# some nested tags :
"quantity": {
"namespace": "http://some-page.de/xmlns/1.0",
"attrib": {"attrib1": "attrib_value1", "attrib2": "attrib_value2"},
"value": {
"my_nested_tag1": {"namespace": "http://some-page.de/xmlns/1.0", "value": 1.23e10},
"my_nested_tag2": {"namespace": "http://some-page.de/xmlns/1.0", "value": False},
},
},
}
)
nsmap = {"ns0": "http://test.org/xmlns/0.1", "catalog": "http://anss.org/xmlns/catalog/0.1"}
cat[0].extra = my_extra.copy()
# insert a pick with an extra field
p = Pick()
p.extra = {"weight": {"value": 2, "namespace": "http://test.org/xmlns/0.1"}}
cat[0].picks.append(p)
with NamedTemporaryFile() as tf:
tmpfile = tf.name
# write file
cat.write(tmpfile, format="QUAKEML", nsmap=nsmap)
# check contents
with open(tmpfile, "rb") as fh:
# enforce reproducible attribute orders through write_c14n
obj = etree.fromstring(fh.read()).getroottree()
buf = io.BytesIO()
obj.write_c14n(buf)
buf.seek(0, 0)
content = buf.read()
# check namespace definitions in root element
expected = [
b"<q:quakeml",
b'xmlns:catalog="http://anss.org/xmlns/catalog/0.1"',
b'xmlns:ns0="http://test.org/xmlns/0.1"',
b'xmlns:ns1="http://some-page.de/xmlns/1.0"',
b'xmlns:q="http://quakeml.org/xmlns/quakeml/1.2"',
b'xmlns="http://quakeml.org/xmlns/bed/1.2"',
]
for line in expected:
self.assertIn(line, content)
# check additional tags
expected = [
b"<ns0:custom>True</ns0:custom>",
b"<ns0:new_tag>1234</ns0:new_tag>",
b"<ns0:tX>2013-01-02T13:12:14.600000Z</ns0:tX>",
b"<ns1:public " b'another_attrib="another_value" ' b'some_attrib="some_value">false</ns1:public>',
]
for line in expected:
self.assertIn(line, content)
# now, read again to test if it's parsed correctly..
cat = _read_quakeml(tmpfile)
# when reading..
# - namespace abbreviations should be disregarded
# - we always end up with a namespace definition, even if it was
# omitted when originally setting the custom tag
# - custom namespace abbreviations should attached to Catalog
self.assertTrue(hasattr(cat[0], "extra"))
def _tostr(x):
if isinstance(x, bool):
if x:
return str("true")
else:
return str("false")
elif isinstance(x, AttribDict):
for key, value in x.items():
x[key].value = _tostr(value["value"])
return x
else:
return str(x)
#.........这里部分代码省略.........
示例15: _map_join2phase
def _map_join2phase(self, db):
"""
Return an obspy Arrival and Pick from an dict of CSS key/values
corresponding to one record. See the 'Join' section for the implied
database table join expected.
Inputs
======
db : dict of key/values of CSS fields related to the phases (see Join)
Returns
=======
obspy.core.event.Pick, obspy.core.event.Arrival
Notes
=====
Any object that supports the dict 'get' method can be passed as
input, e.g. OrderedDict, custom classes, etc.
Join
----
assoc <- arrival <- affiliation (outer) <- schanloc [sta chan] (outer)
"""
p = Pick()
p.time = _utc(db.get('time'))
def_net = self.agency[:2].upper()
css_sta = db.get('sta')
css_chan = db.get('chan')
p.waveform_id = WaveformStreamID(
station_code = db.get('fsta') or css_sta,
channel_code = db.get('fchan') or css_chan,
network_code = db.get('snet') or def_net,
location_code = db.get('loc'),
)
p.horizontal_slowness = db.get('slow')
#p.horizontal_slowness_errors = self._create_dict(db, 'delslo')
p.backazimuth = db.get('azimuth')
#p.backazimuth_errors = self._create_dict(db, 'delaz')
on_qual = _str(db.get('qual')).lower()
if 'i' in on_qual:
p.onset = "impulsive"
elif 'e' in on_qual:
p.onset = "emergent"
elif 'w' in on_qual:
p.onset = "questionable"
else:
p.onset = None
p.phase_hint = db.get('iphase')
pol = _str(db.get('fm')).lower()
if 'c' in pol or 'u' in pol:
p.polarity = "positive"
elif 'd' in pol or 'r' in pol:
p.polarity = "negative"
elif '.' in pol:
p.polarity = "undecidable"
else:
p.polarity = None
p.evaluation_mode = "automatic"
if 'orbassoc' not in _str(db.get('auth')):
p.evaluation_mode = "manual"
p.evaluation_status = "preliminary"
if p.evaluation_mode is "manual":
p.evaluation_status = "reviewed"
p.creation_info = CreationInfo(
version = db.get('arid'),
creation_time = _utc(db.get('arrival.lddate')),
agency_id = self.agency,
author = db.get('auth'),
)
p.resource_id = self._rid(p)
a = Arrival()
a.pick_id = ResourceIdentifier(str(p.resource_id), referred_object=p)
a.phase = db.get('phase')
a.azimuth = db.get('esaz')
a.distance = db.get('delta')
a.takeoff_angle = db.get('ema')
#a.takeoff_angle_errors = self._create_dict(db, 'emares')
a.time_residual = db.get('timeres')
a.horizontal_slowness_residual = db.get('slores')
a.time_weight = db.get('wgt')
a.earth_model_id = ResourceIdentifier(self._prefix+'/VelocityModel/'+_str(db.get('vmodel')))
a.creation_info = CreationInfo(
version = db.get('arid'),
creation_time = _utc(db.get('lddate')),
agency_id = self.agency,
)
a.extra = {}
a.extra['timedef'] = {
'value': _str(db.get('timedef')),
'namespace': CSS_NAMESPACE
}
#.........这里部分代码省略.........