本文整理汇总了Python中gwpy.segments.DataQualityFlag.read方法的典型用法代码示例。如果您正苦于以下问题:Python DataQualityFlag.read方法的具体用法?Python DataQualityFlag.read怎么用?Python DataQualityFlag.read使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类gwpy.segments.DataQualityFlag
的用法示例。
在下文中一共展示了DataQualityFlag.read方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_read_ligolw
# 需要导入模块: from gwpy.segments import DataQualityFlag [as 别名]
# 或者: from gwpy.segments.DataQualityFlag import read [as 别名]
def test_read_ligolw(self):
flag = DataQualityFlag.read(SEGXML, FLAG1, coalesce=False)
self.assertTrue(flag.active == ACTIVE,
'DataQualityFlag.read(ligol) mismatch:\n\n%s\n\n%s'
% (ACTIVE, flag.active))
self.assertTrue(flag.known == KNOWN,
'DataQualityFlag.read(ligol) mismatch:\n\n%s\n\n%s'
% (KNOWN, flag.known))
示例2: _read_segments
# 需要导入模块: from gwpy.segments import DataQualityFlag [as 别名]
# 或者: from gwpy.segments.DataQualityFlag import read [as 别名]
def _read_segments(self, filename):
segs = DataQualityFlag.read(filename, self.definition)
# XXX HACK around malformed segment files with no segment_summary table
if segs.active and not segs.known:
segs.known = type(segs.active)(segs.active)
if self.known:
self.known = self.known & segs.known
self.active = self.known & segs.active
else:
self.known = segs.known
self.active = segs.active
return self
示例3: test_read_hdf5
# 需要导入模块: from gwpy.segments import DataQualityFlag [as 别名]
# 或者: from gwpy.segments.DataQualityFlag import read [as 别名]
def test_read_hdf5(self):
try:
hdfout = self.test_write_hdf5(delete=False)
except ImportError as e:
self.skipTest(str(e))
else:
flag = DataQualityFlag.read(hdfout)
os.remove(hdfout)
self.assertTrue(flag.active == ACTIVE,
'DataQualityFlag.read(hdf5) mismatch:\n\n%s\n\n%s'
% (ACTIVE, flag.active))
self.assertTrue(flag.known == KNOWN,
'DataQualityFlag.read(hdf5) mismatch:\n\n%s\n\n%s'
% (KNOWN, flag.known))
示例4: process
# 需要导入模块: from gwpy.segments import DataQualityFlag [as 别名]
# 或者: from gwpy.segments.DataQualityFlag import read [as 别名]
def process(self, *args, **kwargs):
# read the segment files
if os.path.isfile(self.segmentfile):
segs = DataQualityFlag.read(self.segmentfile, coalesce=False)
self.states[0].known = segs.known
self.states[0].active = segs.active
self.states[0].ready = True
else:
warn('Segment file %s not found.' % self.segmentfile)
return
if len(self.states[0].active) == 0:
warn('No segments analysed by daily ahope.')
return
# read the cache files
if os.path.isfile(self.inspiralcachefile):
with open(self.inspiralcachefile, 'r') as fobj:
try:
self.inspiralcache = Cache.fromfile(fobj).sieve(
segment=self.span)
except ValueError as e:
if "could not convert \'\\n\' to CacheEntry" in str(e):
self.inspiralcache = Cache()
else:
raise
else:
warn("Cache file %s not found." % self.inspiralcachefile)
return
if os.path.isfile(self.tmpltbankcachefile):
with open(self.tmpltbankcachefile, 'r') as fobj:
try:
self.tmpltbankcache = Cache.fromfile(fobj).sieve(
segment=self.span)
except ValueError:
if "could not convert \'\\n\' to CacheEntry" in str(e):
self.tmpltbankcache = Cache()
else:
raise
else:
warn("Cache file %s not found." % self.tmpltbankcachefile)
self.tmpltbankcache = Cache()
# only process if the cachfile was found
super(DailyAhopeTab, self).process(*args, **kwargs)
示例5: test_read_segwizard
# 需要导入模块: from gwpy.segments import DataQualityFlag [as 别名]
# 或者: from gwpy.segments.DataQualityFlag import read [as 别名]
def test_read_segwizard(self):
flag = DataQualityFlag.read(SEGWIZ, FLAG1, coalesce=False)
self.assertTrue(flag.active == ACTIVE,
'DataQualityFlag.read(segwizard) mismatch:\n\n%s\n\n%s'
% (ACTIVE, flag.active))
self.assertTrue(flag.known == flag.active)
示例6: get_segments
# 需要导入模块: from gwpy.segments import DataQualityFlag [as 别名]
# 或者: from gwpy.segments.DataQualityFlag import read [as 别名]
def get_segments(flag, validity=None, config=ConfigParser(), cache=None,
query=True, return_=True, coalesce=True, padding=None,
segdb_error='raise', url=None):
"""Retrieve the segments for a given flag
Segments will be loaded from global memory if already defined,
otherwise they will be loaded from the given
:class:`~glue.lal.Cache`, or finally from the segment database
Parameters
----------
flag : `str`, `list`
either the name of one flag, or a list of names
validity : `~gwpy.segments.SegmentList`
the segments over which to search for other segments
query : `bool`, optional, default: `True`
actually execute a read/query operation (if needed), otherwise
just retrieve segments that have already been cached
config : `~configparser.ConfigParser`, optional
the configuration for your analysis, if you have one. If
present the ``[segment-database]`` section will be queried
for the following options
- ``gps-start-time``, and ``gps-end-time``, if ``validity`` is
not given
- ``url`` (the remote hostname for the segment database) if
the ``url`` keyword is not given
cache : :class:`glue.lal.Cache`, optional
a cache of files from which to read segments, otherwise segments
will be downloaded from the segment database
coalesce : `bool`, optional, default: `True`
coalesce all segmentlists before returning, otherwise just return
segments as they were downloaded/read
padding : `tuple`, or `dict` of `tuples`, optional
`(start, end)` padding with which to pad segments that are
downloaded/read
segdb_error : `str`, optional, default: ``'raise'``
how to handle errors returned from the segment database, one of
- ``'raise'`` (default) : raise the exception as normal
- ``'warn'`` : print the exception as a warning, but return no
segments
- ``'ignore'`` : silently ignore the error and return no segments
url : `str`, optional
the remote hostname for the target segment database
return_ : `bool`, optional, default: `True`
internal flag to enable (True) or disable (False) actually returning
anything. This is useful if you want to download/read segments now
but not use them until later (e.g. plotting)
Returns
-------
flag : `~gwpy.segments.DataQualityFlag`
the flag object representing segments for the given single flag, OR
flagdict : `~gwpy.segments.DataQualityDict`
the dict of `~gwpy.segments.DataQualityFlag` objects for multiple
flags, if ``flag`` is given as a `list`, OR
None
if ``return_=False``
"""
if isinstance(flag, str):
flags = flag.split(',')
else:
flags = flag
allflags = set([f for cf in flags for f in
re_flagdiv.split(str(cf))[::2] if f])
if padding is None and isinstance(flag, DataQualityFlag):
padding = {flag: flag.padding}
elif padding is None:
padding = dict((flag,
isinstance(flag, DataQualityFlag) and
flag.padding or None) for flag in flags)
# check validity
if validity is None:
start = config.get(DEFAULTSECT, 'gps-start-time')
end = config.get(DEFAULTSECT, 'gps-end-time')
span = SegmentList([Segment(start, end)])
elif isinstance(validity, DataQualityFlag):
validity = validity.active
try:
span = SegmentList([validity.extent()])
except ValueError:
span = SegmentList()
else:
try:
span = SegmentList([SegmentList(validity).extent()])
except ValueError:
#.........这里部分代码省略.........
示例7: read_data_archive
# 需要导入模块: from gwpy.segments import DataQualityFlag [as 别名]
# 或者: from gwpy.segments.DataQualityFlag import read [as 别名]
def read_data_archive(sourcefile):
"""Read archived data from an HDF5 archive source
This method reads all found data into the data containers defined by
the `gwsumm.globalv` module, then returns nothing.
Parameters
----------
sourcefile : `str`
path to source HDF5 file
"""
from h5py import File
with File(sourcefile, 'r') as h5file:
# -- channels ---------------------------
try:
ctable = Table.read(h5file['channels'])
except KeyError: # no channels table written
pass
else:
for row in ctable:
chan = get_channel(row['name'])
for p in ctable.colnames[1:]:
if row[p]:
setattr(chan, p, row[p])
# -- timeseries -------------------------
for dataset in h5file.get('timeseries', {}).values():
ts = TimeSeries.read(dataset, format='hdf5')
if (re.search(r'\.(rms|min|mean|max|n)\Z', ts.channel.name) and
ts.sample_rate.value == 1.0):
ts.channel.type = 's-trend'
elif re.search(r'\.(rms|min|mean|max|n)\Z', ts.channel.name):
ts.channel.type = 'm-trend'
ts.channel = get_channel(ts.channel)
try:
add_timeseries(ts, key=ts.channel.ndsname)
except ValueError:
if mode.get_mode() != mode.Mode.day:
raise
warnings.warn('Caught ValueError in combining daily archives')
# get end time
globalv.DATA[ts.channel.ndsname].pop(-1)
t = globalv.DATA[ts.channel.ndsname][-1].span[-1]
add_timeseries(ts.crop(start=t), key=ts.channel.ndsname)
# -- statevector -- ---------------------
for dataset in h5file.get('statevector', {}).values():
sv = StateVector.read(dataset, format='hdf5')
sv.channel = get_channel(sv.channel)
add_timeseries(sv, key=sv.channel.ndsname)
# -- spectrogram ------------------------
for tag, add_ in zip(
['spectrogram', 'coherence-components'],
[add_spectrogram, add_coherence_component_spectrogram]):
for key, dataset in h5file.get(tag, {}).items():
key = key.rsplit(',', 1)[0]
spec = Spectrogram.read(dataset, format='hdf5')
spec.channel = get_channel(spec.channel)
add_(spec, key=key)
# -- segments ---------------------------
for name, dataset in h5file.get('segments', {}).items():
dqflag = DataQualityFlag.read(h5file, path=dataset.name,
format='hdf5')
globalv.SEGMENTS += {name: dqflag}
# -- triggers ---------------------------
for dataset in h5file.get('triggers', {}).values():
load_table(dataset)
示例8: read_data_archive
# 需要导入模块: from gwpy.segments import DataQualityFlag [as 别名]
# 或者: from gwpy.segments.DataQualityFlag import read [as 别名]
def read_data_archive(sourcefile):
"""Read archived data from an HDF5 archive source.
Parameters
----------
sourcefile : `str`
path to source HDF5 file
"""
from h5py import File
with File(sourcefile, 'r') as h5file:
# read all time-series data
try:
group = h5file['timeseries']
except KeyError:
group = dict()
for dataset in group.itervalues():
ts = TimeSeries.read(dataset, format='hdf')
if (re.search('\.(rms|min|mean|max|n)\Z', ts.channel.name) and
ts.sample_rate.value == 1.0):
ts.channel.type = 's-trend'
elif re.search('\.(rms|min|mean|max|n)\Z', ts.channel.name):
ts.channel.type = 'm-trend'
ts.channel = get_channel(ts.channel)
try:
add_timeseries(ts, key=ts.channel.ndsname)
except ValueError:
if mode.get_mode() == mode.SUMMARY_MODE_DAY:
raise
warnings.warn('Caught ValueError in combining daily archives')
# get end time
globalv.DATA[ts.channel.ndsname].pop(-1)
t = globalv.DATA[ts.channel.ndsname][-1].span[-1]
add_timeseries(ts.crop(start=t), key=ts.channel.ndsname)
# read all state-vector data
try:
group = h5file['statevector']
except KeyError:
group = dict()
for dataset in group.itervalues():
sv = StateVector.read(dataset, format='hdf')
sv.channel = get_channel(sv.channel)
add_timeseries(sv, key=sv.channel.ndsname)
# read all spectrogram data
try:
group = h5file['spectrogram']
except KeyError:
group = dict()
for key, dataset in group.iteritems():
key = key.rsplit(',', 1)[0]
spec = Spectrogram.read(dataset, format='hdf')
spec.channel = get_channel(spec.channel)
add_spectrogram(spec, key=key)
try:
group = h5file['segments']
except KeyError:
group = dict()
for name, dataset in group.iteritems():
dqflag = DataQualityFlag.read(dataset, format='hdf')
globalv.SEGMENTS += {name: dqflag}
示例9: get_segments
# 需要导入模块: from gwpy.segments import DataQualityFlag [as 别名]
# 或者: from gwpy.segments.DataQualityFlag import read [as 别名]
def get_segments(flag, validity=None, config=ConfigParser(), cache=None,
query=True, return_=True, coalesce=True, padding=None,
segdb_error='raise', url=None):
"""Retrieve the segments for a given flag
Segments will be loaded from global memory if already defined,
otherwise they will be loaded from the given
:class:`~glue.lal.Cache`, or finally from the segment database
Parameters
----------
FIXME
Returns
-------
FIXME
"""
if isinstance(flag, (unicode, str)):
flags = flag.split(',')
else:
flags = flag
allflags = set([f for cf in flags for f in
re_flagdiv.split(str(cf))[::2] if f])
if padding is None and isinstance(flag, DataQualityFlag):
padding = {flag: flag.padding}
elif padding is None:
padding = dict((flag, isinstance(flag, DataQualityFlag) and
flag.padding or None) for flag in flags)
# check validity
if validity is None:
start = config.get(DEAFULTSECT, 'gps-start-time')
end = config.get(DEFAULTSECT, 'gps-end-time')
span = SegmentList([Segment(start, end)])
elif isinstance(validity, DataQualityFlag):
validity = validity.active
try:
span = SegmentList([validity.extent()])
except ValueError:
span = SegmentList()
else:
try:
span = SegmentList([SegmentList(validity).extent()])
except ValueError:
span = SegmentList()
validity = SegmentList(validity)
# generate output object
out = DataQualityDict()
for f in flags:
out[f] = DataQualityFlag(f, known=validity, active=validity)
for f in allflags:
globalv.SEGMENTS.setdefault(f, DataQualityFlag(f))
# read segments from global memory and get the union of needed times
try:
old = reduce(operator.and_, (globalv.SEGMENTS.get(
f, DataQualityFlag(f)).known
for f in flags))
except TypeError:
old = SegmentList()
newsegs = validity - old
# load new segments
query &= abs(newsegs) != 0
query &= len(allflags) > 0
if cache is not None:
query &= len(cache) != 0
if query:
if cache is not None:
try:
new = DataQualityDict.read(cache, list(allflags))
except IORegistryError as e:
# can remove when astropy >= 1.2 is required
if type(e) is not IORegistryError:
raise
if len(allflags) == 1:
f = list(allflags)[0]
new = DataQualityDict()
new[f] = DataQualityFlag.read(cache, f, coalesce=False)
for f in new:
new[f].known &= newsegs
new[f].active &= newsegs
if coalesce:
new[f].coalesce()
vprint(" Read %d segments for %s (%.2f%% coverage).\n"
% (len(new[f].active), f,
float(abs(new[f].known))/float(abs(newsegs))*100))
else:
if len(newsegs) >= 10:
qsegs = span
else:
qsegs = newsegs
# parse configuration for query
kwargs = {}
if url is not None:
kwargs['url'] = url
else:
try:
kwargs['url'] = config.get('segment-database', 'url')
#.........这里部分代码省略.........