本文整理汇总了Python中pyasdf.ASDFDataSet类的典型用法代码示例。如果您正苦于以下问题:Python ASDFDataSet类的具体用法?Python ASDFDataSet怎么用?Python ASDFDataSet使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ASDFDataSet类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: write_new_synt_asdf
def write_new_synt_asdf(self, file_prefix):
new_synt_dict = self._sort_new_synt()
for tag, win_array in new_synt_dict.iteritems():
filename = "%s.%s.h5" % (file_prefix, tag)
if os.path.exists(filename):
os.remove(filename)
logger.info("Output file exists, removed: %s" % filename)
else:
logger.info("Output new synt asdf: %s" % filename)
ds = ASDFDataSet(filename, mode='w')
added_list = []
for window in win_array:
synt_id = window.datalist['new_synt'].id
# skip duplicate obsd location id.
# for example, II.AAK.00.BHZ and II.AAK.10.BHZ will
# be treated as different traces. But the synt and
# new synt will be the same. So we only add one
if synt_id in added_list:
continue
else:
added_list.append(synt_id)
ds.add_waveforms(window.datalist['new_synt'], tag=tag)
# add stationxml
_staxml_asdf = self._asdf_file_dict['synt']
ds_sta = ASDFDataSet(_staxml_asdf)
self.__add_staxml_from_other_asdf(ds, ds_sta)
ds.flush()
示例2: test_adding_arbitrary_files
def test_adding_arbitrary_files(tmpdir):
"""
Tests that adding arbitrary files works.
"""
test_filename = os.path.join(tmpdir.strpath, "temp.json")
test_dict = {"a": 1, "b": 2}
with open(test_filename, "wt") as fh:
json.dump(test_dict, fh, sort_keys=True)
asdf_filename = os.path.join(tmpdir.strpath, "test.h5")
data_set = ASDFDataSet(asdf_filename)
data_set.add_auxiliary_data_file(
test_filename, tag="test_file", parameters={"1": 1})
data_set.__del__()
del data_set
new_data_set = ASDFDataSet(asdf_filename)
# Extraction works the same as always, but now has a special attribute,
# that returns the data as a BytesIO.
aux_data = new_data_set.auxiliary_data.File.test_file
assert aux_data.parameters == {"1": 1}
assert aux_data.tag == "test_file"
new_test_dict = json.loads(aux_data.file.read().decode())
assert test_dict == new_test_dict
aux_data.file.seek(0, 0)
with open(test_filename, "rb") as fh:
assert fh.read() == aux_data.file.read()
示例3: test_get_provenance_document_for_id
def test_get_provenance_document_for_id(tmpdir):
asdf_filename = os.path.join(tmpdir.strpath, "test.h5")
data_set = ASDFDataSet(asdf_filename)
filename = os.path.join(data_dir,
"example_schematic_processing_chain.xml")
doc = prov.read(filename)
data_set.provenance["test_provenance"] = doc
assert data_set.provenance.get_provenance_document_for_id(
'{http://seisprov.org/seis_prov/0.1/#}sp002_dt_f87sf7sf78') == \
{"name": "test_provenance", "document": doc}
assert data_set.provenance.get_provenance_document_for_id(
'{http://seisprov.org/seis_prov/0.1/#}sp004_lp_f87sf7sf78') == \
{"name": "test_provenance", "document": doc}
# Id not found.
with pytest.raises(ASDFValueError) as err:
data_set.provenance.get_provenance_document_for_id(
'{http://seisprov.org/seis_prov/0.1/#}bogus_id')
assert err.value.args[0] == (
"Document containing id "
"'{http://seisprov.org/seis_prov/0.1/#}bogus_id'"
" not found in the data set.")
# Not a qualified id.
with pytest.raises(ASDFValueError) as err:
data_set.provenance.get_provenance_document_for_id("bla")
assert err.value.args[0] == ("Not a valid qualified name.")
data_set.__del__()
示例4: test_tag_iterator
def test_tag_iterator(example_data_set):
"""
Tests the tag iterator.
"""
data_set = ASDFDataSet(example_data_set.filename)
expected_ids = ["AE.113A..BHE", "AE.113A..BHN", "AE.113A..BHZ", "TA.POKR..BHE", "TA.POKR..BHN", "TA.POKR..BHZ"]
for st, inv in data_set.itertag("raw_recording"):
for tr in st:
assert tr.id in expected_ids
expected_ids.remove(tr.id)
assert bool(
inv.select(
network=tr.stats.network,
station=tr.stats.station,
channel=tr.stats.channel,
location=tr.stats.location,
).networks
)
assert expected_ids == []
# It will only return matching tags.
count = 0
for _ in data_set.itertag("random"):
count += 1
assert count == 0
示例5: test_detailed_event_association_is_persistent_through_processing
def test_detailed_event_association_is_persistent_through_processing(
example_data_set):
"""
Processing a file with an associated event and storing it again should
keep the association for all the possible event tags..
"""
data_set = ASDFDataSet(example_data_set.filename)
# Store a new waveform.
event = data_set.events[0]
origin = event.origins[0]
magnitude = event.magnitudes[0]
focmec = event.focal_mechanisms[0]
tr = obspy.read()[0]
tr.stats.network = "BW"
tr.stats.station = "RJOB"
data_set.add_waveforms(tr, tag="random", event_id=event,
origin_id=origin, focal_mechanism_id=focmec,
magnitude_id=magnitude)
new_st = data_set.waveforms.BW_RJOB.random
new_st.taper(max_percentage=0.05, type="cosine")
data_set.add_waveforms(new_st, tag="processed")
processed_st = data_set.waveforms.BW_RJOB.processed
assert event.resource_id == processed_st[0].stats.asdf.event_id
assert origin.resource_id == processed_st[0].stats.asdf.origin_id
assert magnitude.resource_id == processed_st[0].stats.asdf.magnitude_id
assert focmec.resource_id == processed_st[0].stats.asdf.focal_mechanism_id
示例6: write_new_syn_file
def write_new_syn_file(self, file_format="sac", outputdir=".",
eventname=None, suffix=None):
"""
Write out new synthetic file based on new cmtsolution
:return:
"""
if not os.path.exists(outputdir):
os.makedirs(outputdir)
# sort the new synthetic data
new_synt_dict = {}
for window in self.window:
tag = window.tag['synt']
if tag not in new_synt_dict.keys():
new_synt_dict[tag] = []
new_synt_dict[tag].append(window)
if file_format.upper() == "SAC":
for tag, win_array in new_synt_dict.iteritems():
if eventname is None:
targetdir = os.path.join(outputdir, tag)
else:
targetdir = os.path.join(outputdir, "%s_%s"
% (eventname, tag))
if not os.path.exists(targetdir):
os.makedirs(targetdir)
for window in win_array:
sta = window.station
nw = window.network
component = window.component
location = window.location
filename = "%s.%s.%s.%s.sac" \
% (sta, nw, location, component)
outputfn = os.path.join(targetdir, filename)
new_synt = window.datalist['new_synt']
new_synt.write(outputfn, format='SAC')
elif file_format.upper() == "ASDF":
for tag, win_array in new_synt_dict.iteritems():
if eventname is None:
outputfn = os.path.join(outputdir, "new_synt.%s.h5" % tag)
else:
if suffix is None:
outputfn = os.path.join(outputdir, "%s.new_synt.%s.h5"
% (eventname, tag))
else:
outputfn = os.path.join(
outputdir, "%s.%s.new_synt.%s.h5"
% (eventname, suffix, tag))
if os.path.exists(outputfn):
os.remove(outputfn)
ds = ASDFDataSet(outputfn)
for window in win_array:
ds.add_waveforms(window.datalist['new_synt'], tag=tag)
# add stationxml
else:
raise NotImplementedError
示例7: _core
def _core(self, path, param):
"""
Core function that handles one pair of asdf file(observed and
synthetic), windows and configuration for adjoint source
:param path: path information, path of observed asdf, synthetic
asdf, windows files, observed tag, synthetic tag, output adjoint
file, figure mode and figure directory
:type path: dict
:param param: parameter information for constructing adjoint source
:type param: dict
:return:
"""
adjoint_param = param["adjoint_config"]
obsd_file = path["obsd_asdf"]
synt_file = path["synt_asdf"]
obsd_tag = path["obsd_tag"]
synt_tag = path["synt_tag"]
window_file = path["window_file"]
output_filename = path["output_file"]
self.check_input_file(obsd_file)
self.check_input_file(synt_file)
self.check_input_file(window_file)
self.check_output_file(output_filename)
obsd_ds = self.load_asdf(obsd_file, mode="r")
synt_ds = self.load_asdf(synt_file, mode="r")
windows = self.load_windows(window_file)
adj_src_type = adjoint_param["adj_src_type"]
adjoint_param.pop("adj_src_type", None)
config = load_adjoint_config(adjoint_param, adj_src_type)
if self.mpi_mode and self.rank == 0:
output_ds = ASDFDataSet(output_filename, mpi=False)
if output_ds.events:
output_ds.events = obsd_ds.events
del output_ds
if self.mpi_mode:
self.comm.barrier()
measure_adj_func = \
partial(measure_adjoint_wrapper, config=config,
obsd_tag=obsd_tag, synt_tag=synt_tag,
windows=windows,
adj_src_type=adj_src_type)
results = obsd_ds.process_two_files(synt_ds, measure_adj_func)
if self.rank == 0:
print("output filename: %s" % output_filename)
write_measurements(results, output_filename)
示例8: test_str_method_provenance_documents
def test_str_method_provenance_documents(tmpdir):
asdf_filename = os.path.join(tmpdir.strpath, "test.h5")
data_set = ASDFDataSet(asdf_filename)
filename = os.path.join(data_dir, "example_schematic_processing_chain.xml")
data_set.add_provenance_document(filename, name="test_provenance")
assert str(data_set.provenance) == (
"1 Provenance Document(s):\n\ttest_provenance"
)
示例9: test_provenance_list_command
def test_provenance_list_command(tmpdir):
asdf_filename = os.path.join(tmpdir.strpath, "test.h5")
data_set = ASDFDataSet(asdf_filename)
filename = os.path.join(data_dir,
"example_schematic_processing_chain.xml")
# Add it as a document.
doc = prov.read(filename, format="xml")
data_set.add_provenance_document(doc, name="test_provenance")
assert data_set.provenance.list() == ["test_provenance"]
示例10: test_coordinate_extraction_but_no_stationxml
def test_coordinate_extraction_but_no_stationxml(tmpdir):
"""
Tests what happens if no stationxml is defined for a station.
"""
asdf_filename = os.path.join(tmpdir.strpath, "test.h5")
data_path = os.path.join(data_dir, "small_sample_data_set")
data_set = ASDFDataSet(asdf_filename)
for filename in glob.glob(os.path.join(data_path, "*.mseed")):
data_set.add_waveforms(filename, tag="raw_recording")
# If not stationxml exists it should just return an empty dictionary.
assert data_set.get_all_coordinates() == {}
示例11: test_event_association_is_persistent_through_processing
def test_event_association_is_persistent_through_processing(example_data_set):
"""
Processing a file with an associated event and storing it again should
keep the association.
"""
data_set = ASDFDataSet(example_data_set.filename)
st = data_set.waveforms.TA_POKR.raw_recording
event_id = st[0].stats.asdf.event_id
st.taper(max_percentage=0.05, type="cosine")
data_set.add_waveforms(st, tag="processed")
processed_st = data_set.waveforms.TA_POKR.processed
assert event_id == processed_st[0].stats.asdf.event_id
示例12: test_waveform_accessor_printing
def test_waveform_accessor_printing(example_data_set):
"""
Pretty printing of the waveform accessor proxy objects.
"""
data_set = ASDFDataSet(example_data_set.filename)
assert data_set.waveforms.AE_113A.__str__() == (
"Contents of the data set for station AE.113A:\n"
" - Has a StationXML file\n"
" - 1 Waveform Tag(s):\n"
" raw_recording")
data_set.__del__()
del data_set
示例13: test_accessing_non_existent_tag_raises
def test_accessing_non_existent_tag_raises(example_data_set):
"""
Accessing a non-existing station should raise.
"""
data_set = ASDFDataSet(example_data_set.filename)
try:
with pytest.raises(WaveformNotInFileException) as excinfo:
data_set.waveforms.AE_113A.asdfasdf
assert excinfo.value.args[0] == ("Tag 'asdfasdf' not part of the data "
"set for station 'AE.113A'.")
finally:
data_set.__del__()
示例14: test_extract_all_coordinates
def test_extract_all_coordinates(example_data_set):
"""
Tests the extraction of all coordinates.
"""
data_set = ASDFDataSet(example_data_set.filename)
assert data_set.get_all_coordinates() == {
"AE.113A": {
"latitude": 32.7683,
"longitude": -113.7667,
"elevation_in_m": 118.0},
"TA.POKR": {
"latitude": 65.1171,
"longitude": -147.4335,
"elevation_in_m": 501.0}}
示例15: test_adding_same_event_twice_raises
def test_adding_same_event_twice_raises(tmpdir):
"""
Adding the same event twice raises.
"""
asdf_filename = os.path.join(tmpdir.strpath, "test.h5")
data_path = os.path.join(data_dir, "small_sample_data_set")
data_set = ASDFDataSet(asdf_filename)
# Add once, all good.
data_set.add_quakeml(os.path.join(data_path, "quake.xml"))
assert len(data_set.events) == 1
# Adding again should raise an error.
with pytest.raises(ValueError):
data_set.add_quakeml(os.path.join(data_path, "quake.xml"))