当前位置: 首页>>代码示例>>Python>>正文


Python Cache.fromfile方法代码示例

本文整理汇总了Python中glue.lal.Cache.fromfile方法的典型用法代码示例。如果您正苦于以下问题:Python Cache.fromfile方法的具体用法?Python Cache.fromfile怎么用?Python Cache.fromfile使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在glue.lal.Cache的用法示例。


在下文中一共展示了Cache.fromfile方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: open_cache

# 需要导入模块: from glue.lal import Cache [as 别名]
# 或者: from glue.lal.Cache import fromfile [as 别名]
def open_cache(lcf):
    """Read a LAL-format cache file into memory as a
    :class:`glue.lal.Cache`.
    """
    if isinstance(lcf, file):
        return Cache.fromfile(lcf)
    else:
        with open(lcf, 'r') as f:
            return Cache.fromfile(f)
开发者ID:paulaltin,项目名称:gwpy,代码行数:11,代码来源:cache.py

示例2: process

# 需要导入模块: from glue.lal import Cache [as 别名]
# 或者: from glue.lal.Cache import fromfile [as 别名]
 def process(self, *args, **kwargs):
     error = None
     # read the cache files
     if isinstance(self.cache, str) and os.path.isfile(self.cache):
         with open(self.cache, 'r') as fobj:
             try:
                 self.cache = Cache.fromfile(fobj).sieve(
                                          segment=self.span)
             except ValueError as e:
                 if "could not convert \'\\n\' to CacheEntry" in str(e):
                     error = 'could not parse event cache file'
                 else:
                     raise
     elif isinstance(self.cache, str):
         error = 'could not locate event cache file'
         warn("Cache file %s not found." % self.cache)
     elif self.cache is not None and not isinstance(self.cache, Cache):
         raise ValueError("Cannot parse EventTriggerTab.cache of type %r"
                          % type(self.cache))
     # push error to all states for HTML writing
     if error:
         for state in self.states:
             self.error[state] = (
                 'danger', 'This analysis seems to have failed: %s.' % error)
     # only process if the cachfile was found
     if kwargs.get('trigcache', None) is None:
         kwargs['trigcache'] = self.cache
     try:
         super(EventTriggerTab, self).process(*args, **kwargs)
     except IOError as e:
         warn('Caught %s: %s' % (type(e).__name__, str(e)))
         msg = "GWSumm failed to process these data.<pre>%s</pre>" % str(e)
         for state in self.states:
             self.error[state] = ( 'danger', msg)
开发者ID:pvasired,项目名称:gwsumm,代码行数:36,代码来源:etg.py

示例3: process

# 需要导入模块: from glue.lal import Cache [as 别名]
# 或者: from glue.lal.Cache import fromfile [as 别名]
    def process(self, *args, **kwargs):
        # read the segment files
        if os.path.isfile(self.segmentfile):
            segs = DataQualityFlag.read(self.segmentfile, coalesce=False)
            self.states[0].known = segs.known
            self.states[0].active = segs.active
            self.states[0].ready = True
        else:
            warn('Segment file %s not found.' % self.segmentfile)
            return
        if len(self.states[0].active) == 0:
            warn('No segments analysed by daily ahope.')
            return
        # read the cache files
        if os.path.isfile(self.inspiralcachefile):
            with open(self.inspiralcachefile, 'r') as fobj:
                try:
                    self.inspiralcache = Cache.fromfile(fobj).sieve(
                                             segment=self.span)
                except ValueError as e:
                    if "could not convert \'\\n\' to CacheEntry" in str(e):
                        self.inspiralcache = Cache()
                    else:
                        raise
        else:
            warn("Cache file %s not found." % self.inspiralcachefile)
            return
        if os.path.isfile(self.tmpltbankcachefile):
            with open(self.tmpltbankcachefile, 'r') as fobj:
                try:
                    self.tmpltbankcache = Cache.fromfile(fobj).sieve(
                                              segment=self.span)
                except ValueError:
                    if "could not convert \'\\n\' to CacheEntry" in str(e):
                        self.tmpltbankcache = Cache()
                    else:
                        raise
        else:
            warn("Cache file %s not found." % self.tmpltbankcachefile)
            self.tmpltbankcache = Cache()

        # only process if the cachfile was found
        super(DailyAhopeTab, self).process(*args, **kwargs)
开发者ID:berkowitze,项目名称:gwsumm,代码行数:45,代码来源:ihope.py

示例4: get_omicron_triggers

# 需要导入模块: from glue.lal import Cache [as 别名]
# 或者: from glue.lal.Cache import fromfile [as 别名]
def get_omicron_triggers(channel, ifo, segments, cachefile):
  print "Reading channel: %s\n" %channel
  with open(cachefile, 'r') as f:
    mycache = Cache.fromfile(f)
  # Let's try and catch failed reads
  try:
    triggers = get_triggers(ifo + ':' + channel, 'sngl_burst', segments,\
        cache=mycache)
  except:
    print "No Omicron triggers read for channel %s" %channel
    return None

  return triggers
开发者ID:WanduiAlbert,项目名称:SummerResearch,代码行数:15,代码来源:VETplotter.py

示例5: load_omic_trigs

# 需要导入模块: from glue.lal import Cache [as 别名]
# 或者: from glue.lal.Cache import fromfile [as 别名]
def load_omic_trigs(omicroncachefile, segs):
  # Read in the Omicron triggers
  with open(omicroncachefile, 'r') as cachefile:
    cache = Cache.fromfile(cachefile)

  omic_trigs = SnglBurstTable.read(cache, verbose=True, filt=lambda x: x.snr <\
      100 and x.peak_frequency < 100)

  # Check if Omicron triggers have been read in successfully
  if not omic_trigs:
    sys.exit("ERROR: No triggers for Omicron channel: %s" % cachefile.split('.')[0])
  else:
    print "%d Omicron triggers read" % len(omic_trigs)

  #Get the Omicron triggers that lie within the valid segment list
  omic_trigs = omic_trigs.vetoed(segs)
  return omic_trigs
开发者ID:WanduiAlbert,项目名称:SummerResearch,代码行数:19,代码来源:offsets.py

示例6: run_command

# 需要导入模块: from glue.lal import Cache [as 别名]
# 或者: from glue.lal.Cache import fromfile [as 别名]
    def run_command(self, options={}, args=[]):
        if len(args) not in [1]:
            self.parser.error("cachfile is required.")

        config = getLarsConfig()
        if not config:
            print "This analysis does not appear to have a reservation. (no %s)" % INI_NAME
            print "If a reservation has been lost, try 'lars info [--repair]'"
            print "to try to recover your '%s'" % INI_NAME
            return

        id = config.get('lars','id')

        cachefilename = args[0]
        cachefile = open(cachefilename, "r")
        
        cache = Cache.fromfile(cachefile)
        segdir = cache.to_segmentlistdict()
        extent = segdir.extent_all()
        gpsStart = int(extent[0])
        gpsEnd = int(extent[1])
        ifos = mkIfos(segdir.keys())

        duration = gpsEnd - gpsStart

        url = makeNiceUrl(os.getcwd())

        if options.dry_run:
            print "Dry run.  Results not saved"
            print "gpsStart:  ", gpsStart
            print "gpsEnd:    ", gpsEnd
            print "duration:  ", duration
            print "IFOs:      ", ifos
            print "Cachefile: ", cachefilename
            print "Location:  ", url
            return

        server = serviceProxy(config.get('lars', 'serviceUrl'))
        rv = server.publish(id, ifos, gpsStart, duration, url, makeNiceUrl(cachefilename))
        rv = objectify(rv)
        print "Published:", rv.uid
开发者ID:GeraintPratten,项目名称:lalsuite,代码行数:43,代码来源:__init__.py

示例7: find_daily_cache

# 需要导入模块: from glue.lal import Cache [as 别名]
# 或者: from glue.lal.Cache import fromfile [as 别名]
def find_daily_cache(start, end, ifo, clustering=None, check_files=False,
                     **kwargs):
    """Find Daily ihope files from the daily runs for the given span

    @param start
        GPS start time for search
    @param end
        GPS end time for search
    @param ifo
        observatory for search
    @param clustering
        tag for clustering stage to search, default: unclustered
    @param check_files
        check that the returned files can be read on disk, default False
    @param kwargs UNDOCUMENTED
    """
    out = Cache()

    # set clustering tag
    if clustering==None or clustering.upper()=='UNCLUSTERED':
        file_tag='INSPIRAL_UNCLUSTERED'
    elif clustering.upper() in ["100MS", "100MILLISEC"]:
        file_tag='INSPIRAL_100MILLISEC_CLUSTERED'
    elif clustering.upper() in ["30MS", "30MILLISEC"]:
        file_tag='INSPIRAL_30MILLISEC_CLUSTERED'
    elif clustering.upper() in ["16S", "16SECOND"]:
        file_tag='INSPIRAL_16SEC_CLUSTERED'

    # set base directory
    directory = kwargs.pop("directory", os.path.expanduser("~cbc/ihope_daily"))

    # work out days
    span = Segment(start, end)
    start = int(start)
    start_d = lal.UTCToGPS(datetime(*lal.GPSToUTC(start)[:6]).replace(
                                       hour=0, minute=0, second=0).timetuple())
    days = []
    day = start_d
    while day <= end:
        days.append(day)
        day+=86400

    # optimise
    append = out.append
    splitext = os.path.splitext
    isfile = os.path.isfile
    pjoin = os.path.join
    intersects = span.intersects
    from_T050017 = CacheEntry.from_T050017

    # loop over days gathering files
    for day in days:
        utc = datetime(*lal.GPSToUTC(day)[:6])
        day_path = pjoin(directory, utc.strftime("%Y%m"),
                         utc.strftime("%Y%m%d"))
        day_cache = os.path.join(day_path, "%s-%s.cache" % (ifo, file_tag))
        if isfile(day_cache):
            with open(day_cache, "r") as f:
                filenames = Cache.fromfile(f).pfnlist()
        else:
            filenames = glob(os.path.join(day_path,
                                               ("%s-%s-*.xml.gz"
                                                % (ifo, file_tag))))
        for filename in filenames:
            e = from_T050017(filename)
            if intersects(e.segment):
                append(e)

    out.sort(key=lambda e: e.path)
    return out
开发者ID:GeraintPratten,项目名称:lalsuite,代码行数:72,代码来源:daily_ihope.py

示例8: map

# 需要导入模块: from glue.lal import Cache [as 别名]
# 或者: from glue.lal.Cache import fromfile [as 别名]
#
conditions = map(parse_specification, opts.specifier)

#
# Put the conditions together
#
channel_cond = defaultdict(list)
for inst, channel_name, op, threshold in conditions:
	channel = "%s:%s" % (inst, channel_name)
	channel_cond[channel].append((op, threshold))

#
# Read the datas and such
#
ifos = list(set([c[:2] for c in channel_cond.keys()]))
cache = Cache.fromfile(open(opts.frame_cache))
seg = cache.to_segmentlistdict()[ifos[0][0]][0]
if opts.verbose:
	print "Loaded %s, total coverage time: %f" % (opts.frame_cache, abs(seg))

#
# Set up the XML document
#
xmldoc = ligolw.Document()
xmldoc.appendChild(ligolw.LIGO_LW())
# Append the process information
procrow = utils.process.append_process(xmldoc, program=sys.argv[0])
utils.process.append_process_params(xmldoc, procrow, process.process_params_from_dict(opts.__dict__))

#
# Segment storage
开发者ID:GeraintPratten,项目名称:lalsuite,代码行数:33,代码来源:laldetchar-segments-from-data.py

示例9: process_options

# 需要导入模块: from glue.lal import Cache [as 别名]
# 或者: from glue.lal.Cache import fromfile [as 别名]
def process_options():
	"""
	Process options and check for required values.
	"""
	opt = OptionParser()
	opt.add_option( "-c", "--input-cache", help="Read triggers from the files in this cache." )
	opt.add_option( "-v", "--verbose", action="store_true", help="Be verbose." )
	veto_settings = OptionGroup( opt, "HVeto settings" )
	veto_settings.add_option( "-i", "--instrument", help="Instrument against which to veto. Required." )
	veto_settings.add_option( "-r", "--reference-channel", help="Channel against which to veto. Required." )
	veto_settings.add_option( "-t", "--reference-triggers", help="File path to load reference triggers. Required." )
	veto_settings.add_option( "-s", "--significance-threshold", type=float, default=15, help="Significance below which to terminate the rounds. Default is 15." )
	veto_settings.add_option( "--snr-thresh", action="append", help="Add an SNR threshold to use in veto round. Can be given multiple times for different values. WARNING: This will override the default settings, *not* append to them." )
	veto_settings.add_option( "--time-window", action="append", help="Add a time window to use in veto round. Can be given multiple times for different values. WARNING: This will override the default settings, *not* append to them." )
	veto_settings.add_option( "-S", "--min-ref-snr", type=float, default=8, help="Minimum SNR threshold to load a trigger in the reference channel." )
	# FIXME: Strictly speaking the ignore list is required because I'm not
	# sure what the	 function will do with out one?
	veto_settings.add_option( "-I", "--ignore-list", help="Text file, one channel per line with a list of channels to ignore when loading triggers." )
	# FIXME:
	#veto_settings.add_option( "-C", "--ignore-channel", action="append", help="Ignore these channels. Given several times, will ignore several channels. Do not prepend instrument. E.g. -C LSC-DARM_CTRL." )
	veto_settings.add_option( "--write-coinc", action="store_true", default=False, help="If set, output table will include coinc tables indicating which triggers were coincided in the process of execution." )
	opt.add_option_group( veto_settings )

	livetime_settings = OptionGroup( opt, "livetime settings" )
	# FIXME:
	#livetime_settings.add_option( "-L", "--livetime-definer", action="append", help="Name of segment definer entry from which to draw live segments. See '-l' option. If none is indicated, use all segments. Provide several times for deveral different segment definers." )
	livetime_settings.add_option( "-l", "--livetime-segments", help="File from which to parse livetime segments. Will assume, at first, a LIGOLW XML file with valid segment definer and segment tables. If this fails, will try segwizard format. Required." )
	livetime_settings.add_option( "--segment-definer", help="In tandem with --livetime-segments will retrieve segments with this definer. If none is provided, all segments will be used. Note: this option is REQUIRED if querying a databse. Example: H1:DMT-SCIENCE:1 (version is required)" )
	livetime_settings.add_option( "--segment-database", help="Query this URL for segments. Takes precedence over providing a file." )
	livetime_settings.add_option( "--gps-start", type=int, help="GPS start of analysis." )
	livetime_settings.add_option( "--gps-end", type=int, help="GPS end of analysis." )
	opt.add_option_group( livetime_settings )

	opts, args = opt.parse_args()
	if opts.instrument is None:
		print >>sys.stderr, "Instrument must be indicated."
		exit()
	if opts.reference_channel is None:
		print >>sys.stderr, "Reference channel must be indicated."
		exit()
	if opts.reference_triggers is None:
		print >>sys.stderr, "Reference triggers must be present."
		exit()
	if (opts.livetime_segments or opts.segment_database) is None:
		print >>sys.stderr, "Must provide livetime segments file or segment database location."
		exit()
	if opts.segment_database and (opts.segment_definer is None):
		print >>sys.stderr, "Must provide definer for segment database querying."
		exit()
	if len(args) == 0 and opts.input_cache is None:
		print >>sys.stderr, "Must provide input arguments or set --input-cache."
		exit()
	if opts.input_cache is not None:
		with open(opts.input_cache) as cache:
			c = Cache.fromfile(cache)
			args.extend( c.pfnlist() )
	if opts.ignore_list is None:
		print >>sys.stderr, "Must provide a channel ignore list."
		exit()

	return opts, args
开发者ID:GeraintPratten,项目名称:lalsuite,代码行数:63,代码来源:laldetchar-hveto.py

示例10: list

# 需要导入模块: from glue.lal import Cache [as 别名]
# 或者: from glue.lal.Cache import fromfile [as 别名]
########## apply segment list (of good times)

segments = SegmentList.read('L1_ER7_segments.txt')

for cachefile in cachelist:
  ### open trigger cache
  # Make a tag for the saving the plot as well as the title
  # The tag is the name of the channel extracted from the path
  tag = cachefile.split('/')[-1]
  tag = tag.split('.')[0]
  tag = tag.replace('_Omicron','')
 
  print ('\n\nReading file: %s now ...\n' % tag)
  with open(cachefile, 'r') as fobj:
      cache = Cache.fromfile(fobj)


  ### read triggers
  # filter to select for triggers with frequency < 100
  #trigs = SnglBurstTable.read(cache, verbose=True, filt=lambda t: t.peak_frequency < 100)

  #filter to select for triggers with frequency <100 and snr <100
  trigs = get_triggers('L1:'+tag, 'sngl_burst', segments, cache=cache)


  ### check triggers read successfully
  if not trigs:
    print("    WARNING: No triggers for channel '%s'." % channel,
                file=sys.stderr)
  else:
开发者ID:WanduiAlbert,项目名称:SummerResearch,代码行数:32,代码来源:plot_time_snr.py


注:本文中的glue.lal.Cache.fromfile方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。