当前位置: 首页>>代码示例>>Python>>正文


Python Log.info方法代码示例

本文整理汇总了Python中logger.Log.info方法的典型用法代码示例。如果您正苦于以下问题:Python Log.info方法的具体用法?Python Log.info怎么用?Python Log.info使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在logger.Log的用法示例。


在下文中一共展示了Log.info方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: run_listener

# 需要导入模块: from logger import Log [as 别名]
# 或者: from logger.Log import info [as 别名]
def run_listener(q):
    log = Log(__name__,level='INFO')
    log.info('Run AMQP listener until ctrl-c input\n {0}'.format(q))
    def thread_func(worker,id):
        worker.start()
    def signal_handler(signum,stack):
        sys.exit(0)
    signal.signal(signal.SIGINT, signal_handler)
    worker = AMQPWorker(queue=q)
    task = WorkerThread(worker,'amqp')
    tasks = WorkerTasks(tasks=[task], func=thread_func)
    tasks.run()
    tasks.wait_for_completion(timeout_sec=-1)
开发者ID:BillyAbildgaard,项目名称:RackHD,代码行数:15,代码来源:amqp.py

示例2: __init__

# 需要导入模块: from logger import Log [as 别名]
# 或者: from logger.Log import info [as 别名]
  def __init__(self, gse, merge_cols=True, percentile=.75):
    """Initialize filter. Requires populated gse.

    Args:
      gse: GSE instance associated with row_iter
      merge_cols: bool if to merge columns if able
      percentile: float 0<x<=1 of top percent by std to keep
    """
    # 1. Require that GSE is populated and is of correct type.
    # ==========
    if not gse.populated:
      raise geo.NotPopulatedError, "%s must be populated to filter rows." % gse
    if gse.type != "eQTL":
      raise geo.StudyTypeMismatch, "%s must be type 'eQTL', not '%s'." % \
        (gse, gse.type)

    # 2. Set Attributes.
    # ==========
    self.gse = gse
    self.col_titles = self.gse.col_titles[:]
    self.col_map = None
    self.rows_filtered = []
    self.rows_per_gene = {}
    self.row_stats = {}
    self.merge_cols = merge_cols
    self.percentile = percentile
    
    # 3. Get column map for column merging.
    # ==========
    n_samples = len(self.gse.samples)
    n_uniques = len(self.gse.subject_gsms)

    # If there are more samples than unique subjects, then create column map.
    if self.merge_cols and n_samples > n_uniques:
      self.col_map = self._make_col_map()
      rx_str = self.gse.parameters['rx_gsm_subject_str']
      Log.info(("Created column merge map for %s (%d samples to %d subjects)" +\
        " with rx '%s'") % \
        (self.gse, n_samples, n_uniques, rx_str))
      # Verify that column merge map is reasonable (num uniques + 1 for ID column)
      if len(self.col_map) != n_uniques + 1:
        Log.warning("Column merge map has %d classes, expected %d in %s." % \
                    (len(self.col_map), n_uniques, self))
        
    # No column merging scheme can exist. Do not create a col_map.
    else:
      # Retrieve the regular expression used
      rx_str = self.gse.parameters['rx_gsm_subject_str']
      Log.info("No column merge map created for %s using rx '%s'. Merge_cols flag is %s" % \
        (self.gse, rx_str, self.merge_cols))
开发者ID:andrewdyates,项目名称:geo_api,代码行数:52,代码来源:filter.py

示例3: read

# 需要导入模块: from logger import Log [as 别名]
# 或者: from logger.Log import info [as 别名]
  def read(self):
    """Return a file-pointer-like object to this resource.
    
    Returns:
      iter: file-pointer-like str line iterator (uncompressed)
    """
    # Attempt to retreive from cache if possible.
    if self.read_cache:
      fp = self._fetch_from_cache()
    else:
      fp = None
    if fp:
      Log.info("Fetched %s from cache." % self.url)
      return fp
    else:
      Log.info("Downloading %s from network." % self.url)
    
    # From HTTP, Fetch request and populate self with response.
    http_fp = self.fetch()
    # If compressed, wrap http handle in a gzip decompressor.
    if self.headers and "content-encoding" in self.headers and \
        self.headers["content-encoding"] == "gzip":
      zip_fp = gzip.GzipFile(fileobj=http_fp)
      fp = zip_fp
    else:
      fp = http_fp
      
    # Return download iterator from decompressed HTTP handle.
    if self.write_cache:
      cache = self.cache_name
    else:
      cache = None

    # Get expected download size in bytes.
    if self.headers and 'content-length' in self.headers:
      try:
        size = int(self.headers['content-length'])
      except:
        size = None
    else:
      size = None
      
    return DownloadIter( \
      fp, cache=cache, size=size, report=self.report_status, finalize=self.finalize)
开发者ID:andrewdyates,项目名称:geo_api,代码行数:46,代码来源:cached_download.py

示例4: run_server

# 需要导入模块: from logger import Log [as 别名]
# 或者: from logger.Log import info [as 别名]
def run_server(addr,port):
    global task
    log = Log(__name__,level='INFO')
    log.info('Run httpd server until ctrl-c input')
    def shutdown(task):
        task.worker.stop()
        task.running = False
    def start(httpd, id):
        httpd.start()
    def signal_handler(signum,stack):
        log.info('Sending shutdown to httpd server')
        thread.start_new_thread(shutdown, (task,)) 
    signal.signal(signal.SIGINT, signal_handler)
    server = Httpd(port=int(port),address=addr)
    task = WorkerThread(server,'httpd')
    worker = WorkerTasks(tasks=[task], func=start)
    worker.run()
    worker.wait_for_completion(timeout_sec=-1) # run forever
    
开发者ID:BillyAbildgaard,项目名称:RackHD,代码行数:20,代码来源:httpd.py

示例5: get_rows

# 需要导入模块: from logger import Log [as 别名]
# 或者: from logger.Log import info [as 别名]
  def get_rows(self):
    """Return filtered row iterator.
    CLEAN THIS UP
    It may be best to break this into multiple filters?
    Fix to return [str]
    
    Returns:
      *[str] of filtered rows of data split by columns
    """
    Log.info("Initiated filter %s for rows of %s" % (self, self.gse))
    if self.col_map:
      Log.info("self.col_map exists. Merge %d to %d columns for %s" % \
               (len(self.col_titles), len(self.col_map), self))
    else:
      Log.info("No col_map. Will not merge %d columns for %s." % \
               (len(self.col_titles), self))

    # 0. Determine best gene name column in case GENE_SYMBOL does not exist.
    # ==========
    gene_symbol_name = None
    # Traverse column names in preferred order.
    for name in geo.GPL.EQTL_GENE_NAME_LIST:
      # Skip columns without assignments. Continue
      if self.gse.platform.special_cols[name] is None:
        continue
      # Choose the first column that has an acceptable assignment. Break.
      else:
        actual_column_name = self.gse.platform.special_cols[name]
        gene_symbol_name = name
        break
    # Verify that a column was chosen to identify the row.
    if gene_symbol_name:
      Log.info("Selected column '%s=>%s' to best represent gene name for %s." %\
        (gene_symbol_name, actual_column_name, self.gse.platform))
    else:
      raise MalformedFilterError, "Cannot select gene symbol column from %s" % \
        (self.gse.platform)
    
    # 1. Update column titles accounting for merged columns.
    # ==========
    if self.col_map:
      self.col_titles = self._merge_cols(self.col_titles, merge_titles)
      
    # Insert generated column titles (AFTER merging columns)
    # self.col_titles[0] should always be "ID_REF"
    col_titles_prefix = ["ID_REF", gene_symbol_name, "NUM_VALUES", "MEAN", "STD"]
    self.col_titles = col_titles_prefix + self.col_titles[1:]
    Log.info("Added %s, NUM_VALUES, MEAN, STD to col titles for %s." %\
             (gene_symbol_name, self))
             
    # Open new temporary file. XXX RENAME
    filepath = temp_file_name("%s.rowmerge" % self.gse.id)
    fp_out = open(filepath, "w")

    # 2: @DATAPASS 1: Merge columns, add gene symbol, filter non-genes.
    # ==========
    Log.info(("Started filter 1 in %s for %s: find and add gene, merge cols. " +
             "(This may take a while.)") % (self, self.gse))
      
    num_rows = 0
    for row in self.gse.get_rows():
      # TODO: Add status reporting to console
      num_rows += 1

      # Determine gene symbol for this row. Filter if no gene symbol exists.
      row_id = row[0] # Row ID should always be the first entry in a row.
      gene_sym = self.gse.platform.get_column(row_id, gene_symbol_name)
      if not gene_sym:
        self.rows_filtered.append(row_id)
        continue # skip this row
      else:
        self.rows_per_gene.setdefault(gene_sym, set()).add(row_id)
      
      # Merge columns using column mapping of series matrix columns.
      # Also, transform row into "floats" and None
      if self.col_map:
        # XXX_merge_cols is slow, perhaps due to float conversions.
        row = self._merge_cols(row, merge_floats)
      else:
        row = map(get_float, row)

      # Compute mean and standard deviation of all non-ID columns
      # check for None specifically since a valid value could be 0
      filtered_row = filter(lambda x: x is not None, row[1:])
      std = calc_std(filtered_row)
      mean = calc_mean(filtered_row)
      num_values = len(filtered_row)
      # Store row statistics
      self.row_stats[row_id] = \
        {'num_values': num_values, 'mean': mean, 'std': std}

      # Insert (gene_sym, size, mean, std) into second column
      row = [row_id , gene_sym, num_values, mean, std] + row[1:]

      # Write row to temporary file.
      # TODO: I may want to compress my row by converting it to a pickle.
      # pickling a list of floats uses 2/3 space and takes 1/2 compute time.
      fp_out.write("\t".join(map(str, row)))
      fp_out.write("\n")
    fp_out.close()
#.........这里部分代码省略.........
开发者ID:andrewdyates,项目名称:geo_api,代码行数:103,代码来源:filter.py

示例6: int

# 需要导入模块: from logger import Log [as 别名]
# 或者: from logger.Log import info [as 别名]
    """信号处理,退出程序
    """
    tornado.ioloop.IOLoop.instance().stop()
    logger.info('Msg-delivery stopped!')

signal.signal(signal.SIGTERM, quit_app)
signal.signal(signal.SIGINT,  quit_app)
if __name__ == "__main__":
    #init
    port = 8776
    includes = None
    opts, argvs = getopt.getopt(sys.argv[1:], "c:p:h")
    for op, value in opts:
        if op == '-c':
            includes = value
        elif op == '-p':
            port = int(value)
        elif op == '-h':
            Usage()
    if not includes:
        Usage()
    confs = init_application(includes)
    logger.info("Msg-delivery initialized!")

    #main
    timer = timer_procedure.msgTimer()
    application = tornado.web.Application([(r"^/([^\.|]*)(?!\.\w+)$",MainHandler,dict(timer=timer)),], log_function=log_request)
    application.listen(port) 
    logger.info("Msg-delivery start to Loop!")
    tornado.ioloop.IOLoop.instance().start()
开发者ID:dragonflylxp,项目名称:msg-delivery,代码行数:32,代码来源:main.py

示例7: timeout_callback

# 需要导入模块: from logger import Log [as 别名]
# 或者: from logger.Log import info [as 别名]
        if not self.exist_t(evt.t):
            #持久化时间戳
            self.save_t(evt.t)
            logger.info("Save a event-timer![ TIME:%s ]" % evt.t)

            #建立定时任务
            try:
                callback = functools.partial(self.timeout_callback, evt.t)
                tornado.ioloop.IOLoop.instance().add_timeout(int(evt.t), callback)
            except Exception,e:
                logger.error("Tornado add timeout error! [ ERROR:%s ]" % e)
            logger.info("Set event-timer's callback![ TIME:%s ]" % evt.t)

        #持久化事件(设置过期时间)
        self.save_evt(evt)
        logger.info("Save a event-list![ KEY:%s ]" % (REDIS_EVT_LST_PREFIX + evt.t))

    def timeout_callback(self, t):
        logger.debug("Event-timer callback![ TIMER:%s NOW:%s ]" % (t, time.time()))
        #将t时刻的事件全部处理
        key = REDIS_EVT_LST_PREFIX + t
        len = self.R.llen(key)
        logger.debug("Scan event list![ KEY:%s TOTAL:%d ]" % (key, len))
        for i in range(len):
            str = self.R.lpop(key)
            logger.debug("Pop a event from list![ KEY:%s EVT:%s ]" % (key, str))
            if not str: continue 
            dct = ujson.loads(str)
            evt = msgEvent().from_dict(dct)
            msg_procedure.process(evt)  
开发者ID:dragonflylxp,项目名称:msg-delivery,代码行数:32,代码来源:timer_procedure.py

示例8: close

# 需要导入模块: from logger import Log [as 别名]
# 或者: from logger.Log import info [as 别名]
  def close(self):
    """Close any open file pointers, close and finalize cache file.
    """
    # Ignore repeated calls to close()
    if self.closed:
      Log.info("Redundant call to close(), Ignored for %s." % self)
      return
    else:
      Log.info("Closing %s..." % self)

    # Handle finalize requests to complete download to buffer.
    if self.finalize:
      if not self.completed and self.cache:
        Log.info("Finalizing download of %s." % self)
        # Read remaining buffer unconditionally. Use iterator if reporting.
        if self.report:
          while True:
            try:
              self.next()
            except StopIteration:
              break
        else:
          self.read()
        # If not closed in previous read(), try another read().
        if not self.closed:
          # This closes self since the previous read flushed the buffer.
          self.read()
        if not self.closed:
          Log.warning("Close sequence not completed as expected for %s." % self)
        # Exit: prior reads in the finalize process already closed self.
        return

    # self.buffer.close() causes bugs with FTP. Python sockets clean up after 
    #   themselves in garbage collection, so to remove the reference to buffer
    # self.buffer.close()
    self.buffer = None
    self.fp_out.close()

    if self.completed:
      Log.info("Download complete. %d bytes read." % (self.bytes_read))
      # Finalize cache.
      if self.cache:
        os.rename(self.tmp_filepath, self.dest_filepath)
        Log.info("Cache finalized as '%s'." % (self.dest_filepath))
    else:
      Log.info("Download closed before completion. %d bytes read." % \
               (self.bytes_read))
      # Flush cache.
      if self.cache:
        os.remove(self.tmp_filepath)
        Log.info("Incomplete cache '%s' deleted." % (self.tmp_filepath))
        
    # Flag self as closed to prevent redundant .close() calls.
    self.closed = True
开发者ID:andrewdyates,项目名称:geo_api,代码行数:56,代码来源:cached_download.py


注:本文中的logger.Log.info方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。