本文整理汇总了Python中glue.lal.Cache.sort方法的典型用法代码示例。如果您正苦于以下问题:Python Cache.sort方法的具体用法?Python Cache.sort怎么用?Python Cache.sort使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类glue.lal.Cache
的用法示例。
在下文中一共展示了Cache.sort方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: find_trigger_urls
# 需要导入模块: from glue.lal import Cache [as 别名]
# 或者: from glue.lal.Cache import sort [as 别名]
def find_trigger_urls(channel, etg, gpsstart, gpsend, verbose=False):
"""Find the paths of trigger files that represent the given
observatory, channel, and ETG (event trigger generator) for a given
GPS [start, end) segment.
"""
if etg.lower() == 'omicron':
etg = '?micron'
# construct search
span = Segment(gpsstart, gpsend)
ifo, channel = channel.split(':', 1)
trigtype = "%s_%s" % (channel, etg.lower())
epoch = '*'
searchbase = os.path.join(TRIGFIND_BASE_PATH, epoch, ifo, trigtype)
gpsdirs = range(int(str(gpsstart)[:5]), int(str(gpsend)[:5])+1)
trigform = ('%s-%s_%s-%s-*.xml*'
% (ifo, re_dash.sub('_', channel), etg.lower(), '[0-9]'*10))
# perform and cache results
out = Cache()
for gpsdir in gpsdirs:
gpssearchpath = os.path.join(searchbase, str(gpsdir), trigform)
if verbose:
gprint("Searching %s..." % os.path.split(gpssearchpath)[0],
end =' ')
gpscache = Cache(map(CacheEntry.from_T050017,
glob.glob(os.path.join(searchbase, str(gpsdir),
trigform))))
out.extend(gpscache.sieve(segment=span))
if verbose:
gprint("%d found" % len(gpscache.sieve(segment=span)))
out.sort(key=lambda e: e.path)
return out
示例2: find_kw
# 需要导入模块: from glue.lal import Cache [as 别名]
# 或者: from glue.lal.Cache import sort [as 别名]
def find_kw(channel, start, end, base=None):
"""Find KW trigger XML files
"""
span = Segment(to_gps(start), to_gps(end))
channel = get_channel(channel)
ifo = channel.ifo
if base is None and channel.name.split(':', 1)[-1] == 'GDS-CALIB_STRAIN':
tag = '%s-KW_HOFT' % ifo[0].upper()
base = '/gds-%s/dmt/triggers/%s' % (ifo.lower(), tag)
elif base is None:
tag = '%s-KW_TRIGGERS' % ifo[0].upper()
base = '/gds-%s/dmt/triggers/%s' % (ifo.lower(), tag)
gps5 = int('%.5s' % start)
end5 = int('%.5s' % end)
out = Cache()
append = out.append
while gps5 <= end5:
trigglob = os.path.join(
base, '%s-%d' % (tag, gps5), '%s-*-*.xml' % tag)
found = glob.glob(trigglob)
for f in found:
ce = CacheEntry.from_T050017(f)
if ce.segment.intersects(span):
append(ce)
gps5 += 1
out.sort(key=lambda e: e.path)
vprint(" Found %d files for %s (KW)\n"
% (len(out), channel.ndsname))
return out
示例3: find_dmt_omega
# 需要导入模块: from glue.lal import Cache [as 别名]
# 或者: from glue.lal.Cache import sort [as 别名]
def find_dmt_omega(channel, start, end, base=None):
"""Find DMT-Omega trigger XML files
"""
span = Segment(to_gps(start), to_gps(end))
channel = get_channel(channel)
ifo = channel.ifo
if base is None and channel.name.split(':', 1)[-1] == 'GDS-CALIB_STRAIN':
base = '/gds-%s/dmt/triggers/%s-HOFT_Omega' % (
ifo.lower(), ifo[0].upper())
elif base is None:
raise NotImplementedError("This method doesn't know how to locate DMT "
"Omega trigger files for %r" % str(channel))
gps5 = int('%.5s' % start)
end5 = int('%.5s' % end)
out = Cache()
append = out.append
while gps5 <= end5:
trigglob = os.path.join(
base, str(gps5),
'%s-%s_%s_%s_OmegaC-*-*.xml' % (
ifo, channel.system, channel.subsystem, channel.signal))
found = glob.glob(trigglob)
for f in found:
ce = CacheEntry.from_T050017(f)
if ce.segment.intersects(span):
append(ce)
gps5 += 1
out.sort(key=lambda e: e.path)
vprint(" Found %d files for %s (DMT-Omega)\n"
% (len(out), channel.ndsname))
return out
示例4: find_online_cache
# 需要导入模块: from glue.lal import Cache [as 别名]
# 或者: from glue.lal.Cache import sort [as 别名]
def find_online_cache(start, end, channel, **kwargs):
"""Find ExcessPower files from the online GSTLAL analysis
for the given span
@param start
GPS start time for search
@param end
GPS end time for search
@param channel UNDOCUMENTED
@param kwargs UNDOCUMENTED
'ifo' observatory for search
'clustering'
tag for clustering stage to search, default: unclustered
'check_files'
check that the returned files can be read on disk, default False
"""
out = Cache()
# set base directory
directory = kwargs.pop("directory", ER3_RUN_DIRECTORY)
ifo,channel = channel.split(":", 1)
channel_dir = os.path.join(directory, ifo, "%s_excesspower" % channel)
glob_query = "%s-%s_excesspower-*.xml" % (ifo, channel.replace("-", "_"))
span = Segment(start, end)
# optimise
append = out.append
splitext = os.path.splitext
isfile = os.path.isfile
pjoin = os.path.join
intersects = span.intersects
from_T050017 = CacheEntry.from_T050017
# loop over days gathering files
t = start // 1e4 * 1e4
while t < end:
gps_dir = os.path.join(channel_dir, "%.6s" % t)
if os.path.isdir(gps_dir):
file_list = glob(os.path.join(gps_dir, glob_query))
for f in file_list:
e = from_T050017(f)
if intersects(e.segment):
append(e)
t += 1e4
out.sort(key=lambda e: e.segment[0])
return out
示例5: find_trigger_urls
# 需要导入模块: from glue.lal import Cache [as 别名]
# 或者: from glue.lal.Cache import sort [as 别名]
def find_trigger_urls(channel, etg, gpsstart, gpsend, verbose=False):
"""Find the paths of trigger files that represent the given
observatory, channel, and ETG (event trigger generator) for a given
GPS [start, end) segment.
"""
if etg.lower().startswith('omicron'):
etg = '?' + etg[1:]
# construct search
gpsstart = to_gps(gpsstart).seconds
gpsend = to_gps(gpsend).seconds
span = Segment(gpsstart, gpsend)
ifo, channel = channel.split(':', 1)
trigtype = "%s_%s" % (channel, etg.lower())
epoch = '*'
searchbase = os.path.join(TRIGFIND_BASE_PATH, epoch, ifo, trigtype)
gpsdirs = range(int(str(gpsstart)[:5]), int(str(gpsend)[:5])+1)
trigform = ('%s-%s_%s-%s-*.xml*'
% (ifo, re_dash.sub('_', channel), etg.lower(), '[0-9]'*10))
# test for channel-level directory
if not glob.glob(searchbase):
raise ValueError("No channel-level directory found at %s. Either the "
"channel name or ETG names are wrong, or this "
"channel is not configured for this ETG."
% searchbase)
# perform and cache results
out = Cache()
append = out.append
for gpsdir in gpsdirs:
gpssearchpath = os.path.join(searchbase, str(gpsdir), trigform)
if verbose:
gprint("Searching %s..." % os.path.split(gpssearchpath)[0],
end=' ')
found = set(map(
os.path.realpath,
glob.glob(os.path.join(searchbase, str(gpsdir), trigform))))
n = 0
for f in found:
ce = CacheEntry.from_T050017(f)
if ce.segment.intersects(span):
append(ce)
n += 1
if verbose:
gprint("%d found" % n)
out.sort(key=lambda e: e.path)
return out
示例6: find_trigger_urls
# 需要导入模块: from glue.lal import Cache [as 别名]
# 或者: from glue.lal.Cache import sort [as 别名]
def find_trigger_urls(channel, etg, gpsstart, gpsend, verbose=False, **kwargs):
"""Find the paths of trigger files that represent the given
observatory, channel, and ETG (event trigger generator) for a given
GPS [start, end) segment.
"""
# special case for KW
if etg.lower() in ['kw', 'kleinewelle']:
from .kw import find_dmt_cache
ifo = channel.split(':')[0]
kwargs.setdefault('extension', 'xml')
kwargs.setdefault('check_files', True)
return find_dmt_cache(gpsstart, gpsend, ifo, **kwargs)
elif etg.lower() == 'omega':
from .omega import find_dmt_cache
ifo = channel.split(':')[0]
kwargs.setdefault('check_files', True)
return find_dmt_cache(gpsstart, gpsend, ifo, **kwargs)
elif etg.lower() == 'omicron':
etg = '?micron'
# construct search
span = segments.segment(gpsstart, gpsend)
ifo, channel = channel.split(':', 1)
trigtype = "%s_%s" % (channel, etg.lower())
epoch = '*'
searchbase = os.path.join(TRIGFIND_BASE_PATH, epoch, ifo, trigtype)
gpsdirs = numpy.arange(int(str(gpsstart)[:5]), int(str(gpsend)[:5])+1)
trigform = ('%s-%s_%s-%s-*.xml*'
% (ifo, re.sub('-', '_', channel), etg.lower(), '[0-9]'*10))
# perform and cache results
out = Cache()
for gpsdir in gpsdirs:
gpssearchpath = os.path.join(searchbase, str(gpsdir), trigform)
if verbose:
sys.stdout.write("Searching %s..."
% os.path.split(gpssearchpath)[0])
sys.stdout.flush()
gpscache = Cache(map(CacheEntry.from_T050017,
glob.glob(os.path.join(searchbase, str(gpsdir),
trigform))))
out.extend(gpscache.sieve(segment=span))
if verbose:
sys.stdout.write(" %d found\n" % len(gpscache.sieve(segment=span)))
out.sort(key=lambda e: e.path)
return out
示例7: find_daily_cache
# 需要导入模块: from glue.lal import Cache [as 别名]
# 或者: from glue.lal.Cache import sort [as 别名]
def find_daily_cache(start, end, ifo, clustering=None, check_files=False,
**kwargs):
"""Find Daily ihope files from the daily runs for the given span
@param start
GPS start time for search
@param end
GPS end time for search
@param ifo
observatory for search
@param clustering
tag for clustering stage to search, default: unclustered
@param check_files
check that the returned files can be read on disk, default False
@param kwargs UNDOCUMENTED
"""
out = Cache()
# set clustering tag
if clustering==None or clustering.upper()=='UNCLUSTERED':
file_tag='INSPIRAL_UNCLUSTERED'
elif clustering.upper() in ["100MS", "100MILLISEC"]:
file_tag='INSPIRAL_100MILLISEC_CLUSTERED'
elif clustering.upper() in ["30MS", "30MILLISEC"]:
file_tag='INSPIRAL_30MILLISEC_CLUSTERED'
elif clustering.upper() in ["16S", "16SECOND"]:
file_tag='INSPIRAL_16SEC_CLUSTERED'
# set base directory
directory = kwargs.pop("directory", os.path.expanduser("~cbc/ihope_daily"))
# work out days
span = Segment(start, end)
start = int(start)
start_d = lal.UTCToGPS(datetime(*lal.GPSToUTC(start)[:6]).replace(
hour=0, minute=0, second=0).timetuple())
days = []
day = start_d
while day <= end:
days.append(day)
day+=86400
# optimise
append = out.append
splitext = os.path.splitext
isfile = os.path.isfile
pjoin = os.path.join
intersects = span.intersects
from_T050017 = CacheEntry.from_T050017
# loop over days gathering files
for day in days:
utc = datetime(*lal.GPSToUTC(day)[:6])
day_path = pjoin(directory, utc.strftime("%Y%m"),
utc.strftime("%Y%m%d"))
day_cache = os.path.join(day_path, "%s-%s.cache" % (ifo, file_tag))
if isfile(day_cache):
with open(day_cache, "r") as f:
filenames = Cache.fromfile(f).pfnlist()
else:
filenames = glob(os.path.join(day_path,
("%s-%s-*.xml.gz"
% (ifo, file_tag))))
for filename in filenames:
e = from_T050017(filename)
if intersects(e.segment):
append(e)
out.sort(key=lambda e: e.path)
return out
示例8: find_dmt_cache
# 需要导入模块: from glue.lal import Cache [as 别名]
# 或者: from glue.lal.Cache import sort [as 别名]
def find_dmt_cache(start, end, ifo, check_files=False, **kwargs):
"""Find DMTOmega files for the given GPS period.
@param start
GPS start time for search
@param end
GPS end time for search
@param ifo
observatory for search
@param check_files
check that the returned files can be read on disk, default False
@param kwargs UNDOCUMENTED
"""
out = Cache()
# verify host
host = { 'G1':'atlas', 'H1':'ligo-wa', 'H2':'ligo-wa', 'L1':'ligo-la'}
if (not kwargs.has_key('directory') and not
re.search(host[ifo],getfqdn())):
sys.stderr.write("WARNING: Omega online files are not available for "
"IFO=%s on this host." % ifo)
sys.stderr.flush()
return out
span = segments.segment(start,end)
# set known epochs
known_epochs = {1031340854:55, 1041657635:55, 1041669472:55,
1041682187:55, 1044093810:38, 1044111232:38, 1044111282:38,
1044112180:38, 1057700030:38, 1057722672:38}
# get parameters
epoch = kwargs.pop("epoch", sorted(known_epochs.keys()))
dt = kwargs.pop("duration", 55)
try:
iter(epoch)
except TypeError:
epoch = [epoch]
overlap = kwargs.pop("overlap", 0)
directory = kwargs.pop("duration",
"/gds-%s/dmt/triggers/%s-Omega_Triggers"
% (ifo.lower(), ifo[0].upper()))
# optimise
append = out.append
splitext = os.path.splitext
isfile = os.path.isfile
intersects = span.intersects
segment = segments.segment
from_T050017 = CacheEntry.from_T050017
# get times
epoch_idx = bisect.bisect_right(epoch, start)-1
print epoch_idx
try:
dt = known_epochs[epoch[epoch_idx]]
except KeyError:
dt = 38
next_epoch = len(epoch) >= epoch_idx+2 and epoch[epoch_idx+1] or 0
start_time = int(start-numpy.mod(start-epoch[epoch_idx], dt-overlap))
t = start_time
def _omega_file(gps, ifo, deltaT):
return ("%s/%s-OMEGA_TRIGGERS_CLUSTER-%.5s/"
"%s-OMEGA_TRIGGERS_CLUSTER-%.10d-%d.xml"
% (directory, ifo.upper(), gps, ifo.upper(), gps, deltaT))
# loop over time segments constructing file paths
while t<end:
fp = _omega_file(t, ifo, dt)
if (intersects(segment(t, t+dt)) and
(not check_files or isfile(fp))):
append(from_T050017(fp))
t += dt - overlap
if next_epoch and t > next_epoch:
try:
dt = known_epochs[next_epoch]
except KeyError:
dt = 55
t = next_epoch
epoch_idx += 1
next_epoch = len(epoch) >= epoch_idx+2 and epoch[epoch_idx+1] or 0
out.sort(key=lambda e: e.path)
return out
示例9: find_online_cache
# 需要导入模块: from glue.lal import Cache [as 别名]
# 或者: from glue.lal.Cache import sort [as 别名]
def find_online_cache(start, end, ifo, mask='DOWNSELECT',
check_files=False, **kwargs):
"""Find Omega Online files for the given GPS period.
@param start
GPS start time for search
@param end
GPS end time for search
@param ifo
observatory for search
@param mask
description tag of Omega ASCII to search
@param check_files
check that the returned files can be read on disk, default False
@param kwargs UNDOCUMENTED
"""
out = Cache()
# verify host
host = { 'G1':'atlas', 'H1':'ligo-wa', 'H2':'ligo-wa', 'L1':'ligo-la'}
if (not kwargs.has_key('directory') and not
re.search(host[ifo],getfqdn())):
sys.stderr.write("WARNING: Omega online files are not available for "
"IFO=%s on this host." % ifo)
sys.stderr.flush()
return out
span = segments.segment(start,end)
# get parameters
dt = kwargs.pop("duration", 64)
overlap = kwargs.pop("overlap", 8)
if ifo == "G1":
directory = kwargs.pop("directory", "/home/omega/online/G1/segments")
epoch = kwargs.pop("epoch", 983669456)
else:
directory = kwargs.pop("directory",\
"/home/omega/online/%s/archive/S6/segments"
% ifo)
# optimise
append = out.append
splitext = os.path.splitext
isfile = os.path.isfile
intersects = span.intersects
segment = segments.segment
from_T050017 = CacheEntry.from_T050017
# get times
start_time = int(start-numpy.mod(start-epoch, dt-overlap))
t = start_time
if ifo == "G1":
def _omega_file(gps, ifo):
return ("%s/%.5d/%.10d-%.10d/%s-OMEGA_TRIGGERS_%s-%.10d-%d.txt"
% (basedir, gps/100000, gps, gps+dt, ifo, mask, gps, dt))
else:
def _omega_file(gps, ifo):
return ("%s/%s-%s/%s-OMEGA_TRIGGERS_%s-%s-%s.txt"
% (basedir, gps, gps+dt, ifo, mask, gps, dt))
# loop over time segments constructing file paths
while t<end:
fp = _omega_file(t, ifo)
if (intersects(segment(t, t+dt)) and
(not check_files or isfile(fp))):
append(from_T050017(fp))
t += dt - overlap
out.sort(key=lambda e: e.path)
return out
示例10: find_dmt_cache
# 需要导入模块: from glue.lal import Cache [as 别名]
# 或者: from glue.lal.Cache import sort [as 别名]
def find_dmt_cache(start, end, ifo, extension="xml", check_files=False,
**kwargs):
"""Find DMT KW files for the given GPS period.
@param start
GPS start time for search
@param end
GPS end time for search
@param ifo
observatory for search
@param extension UNDOCUMENTED
@param check_files
check that the returned files can be read on disk, default False
@param kwargs UNDOCUMENTED
"""
out = Cache()
# verify host
host = { 'G1':'atlas', 'H1':'ligo-wa', 'H2':'ligo-wa', 'L1':'ligo-la'}
if (not kwargs.has_key('directory') and not
re.search(host[ifo],getfqdn())):
sys.stderr.write("WARNING: KW online files are not available for "
"IFO=%s on this host." % ifo)
sys.stderr.flush()
return out
span = segments.segment(start,end)
# set known epochs
known_epochs = [1026263104]
# get parameters
dt = int(kwargs.pop("duration", 64))
epoch = kwargs.pop("epoch", known_epochs)
filetag = kwargs.pop("filetag", "KW_TRIGGERS")
dirtag = filetag.endswith("_TRENDS") and filetag[:-7] or filetag
try:
iter(epoch)
except TypeError:
epoch = [int(epoch)]
overlap = int(kwargs.pop("overlap", 0))
directory = kwargs.pop("duration",
"/gds-%s/dmt/triggers/%s-%s"
% (ifo.lower(), ifo[0].upper(), dirtag))
# optimise
append = out.append
splitext = os.path.splitext
isfile = os.path.isfile
intersects = span.intersects
segment = segments.segment
from_T050017 = CacheEntry.from_T050017
# get times
epoch_idx = bisect.bisect_right(epoch, start)-1
next_epoch = len(epoch) >= epoch_idx+2 and epoch[epoch_idx+1] or 0
start_time = int(start-numpy.mod(start-epoch[epoch_idx], dt-overlap))
t = start_time
def _kw_file(gps, ifo):
return ("%s/%s-%s-%.5s/"
"%s-%s-%.10d-%d.%s"
% (directory, ifo.upper()[0], dirtag, gps,
ifo.upper()[0], filetag, gps, dt, extension))
# loop over time segments constructing file paths
while t<end:
fp = _kw_file(t, ifo)
if (intersects(segment(t, t+dt)) and
(not check_files or isfile(fp))):
append(from_T050017(fp))
t += dt - overlap
if next_epoch and t > next_epoch:
t = next_epoch
epoch_idx += 1
next_epoch = len(epoch) >= epoch_idx+2 and epoch[epoch_idx+1] or 0
out.sort(key=lambda e: e.path)
return out
示例11: get_cache
# 需要导入模块: from glue.lal import Cache [as 别名]
# 或者: from glue.lal.Cache import sort [as 别名]
def get_cache(start, end, ifo, channel, mask='DOWNSELECT', checkfilesexist=False,\
**kwargs):
"""
Returns a glue.lal.Cache contatining CacheEntires for all omega online
trigger files between the given start and end time for the given ifo.
"""
cache = Cache()
# verify host
host = { 'G1':'atlas', 'H1':'ligo-wa', 'H2':'ligo-wa', 'L1':'ligo-la'}
if (not kwargs.has_key('directory') and not re.search(host[ifo],getfqdn())):
sys.stderr.write("warning: Omega online files are not available for "+\
"IFO=%s on this host." % ifo)
sys.stderr.flush()
return cache
span = segments.segment(start,end)
if ifo == 'G1':
if channel:
kwargs.setdefault('directory', '/home/omega/online/%s/segments' % channel.replace(':','_'))
else:
kwargs.setdefault('directory', '/home/omega/online/G1/segments')
kwargs.setdefault('epoch', 0)
else:
kwargs.setdefault('directory',\
'/home/omega/online/%s/archive/S6/segments' % ifo)
kwargs.setdefault('epoch', 931211808)
kwargs.setdefault('duration', 64)
kwargs.setdefault('overlap', 8)
# optimise
append = cache.append
splitext = os.path.splitext
isfile = os.path.isfile
intersects = span.intersects
segment = segments.segment
from_T050017 = CacheEntry.from_T050017
basedir = kwargs['directory']
basetime = kwargs['epoch']
triglength = kwargs['duration']
overlap = kwargs['overlap']
# get times
start_time = int(start-numpy.mod(start-basetime,triglength-overlap))
t = start_time
# loop over time segments constructing file paths and appending to the cache
while t<end:
if ifo == 'G1':
trigfile = '%s/%.5d/%.10d-%10.d/%s-OMEGA_TRIGGERS_%s-%.10d-%d.txt'\
% (basedir, t/100000, t, t+triglength, ifo, mask, t, triglength)
else:
trigfile = '%s/%.10d-%10.d/%s-OMEGA_TRIGGERS_%s-%.10d-%d.txt'\
% (basedir, t, t+triglength, ifo, mask, t, triglength)
if intersects(segment(t, t+triglength))\
and (not checkfilesexist or isfile(trigfile)):
append(from_T050017(trigfile))
t+=triglength-overlap
cache.sort(key=lambda e: e.path)
return cache