当前位置: 首页>>代码示例>>Python>>正文


Python Log.warning方法代码示例

本文整理汇总了Python中pyLibrary.debugs.logs.Log.warning方法的典型用法代码示例。如果您正苦于以下问题:Python Log.warning方法的具体用法?Python Log.warning怎么用?Python Log.warning使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pyLibrary.debugs.logs.Log的用法示例。


在下文中一共展示了Log.warning方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: monitor

# 需要导入模块: from pyLibrary.debugs.logs import Log [as 别名]
# 或者: from pyLibrary.debugs.logs.Log import warning [as 别名]
    def monitor(self, please_stop):
        please_stop.on_go(lambda: self.todo.add(Thread.STOP))
        while not please_stop:
            try:
                if not self.todo:
                    with self.columns.locker:
                        old_columns = filter(
                            lambda c: (c.last_updated == None or c.last_updated < Date.now()-TOO_OLD) and c.type not in ["object", "nested"],
                            self.columns
                        )
                        if old_columns:
                            self.todo.extend(old_columns)
                            # TEST CONSISTENCY
                            for c, d in product(list(self.todo.queue), list(self.todo.queue)):
                                if c.abs_name==d.abs_name and c.table==d.table and c!=d:
                                    Log.error("")


                        else:
                            Log.note("no more metatdata to update")

                column = self.todo.pop(timeout=10*MINUTE)
                if column:
                    if column.type in ["object", "nested"]:
                        continue
                    elif column.last_updated >= Date.now()-TOO_OLD:
                        continue
                    try:
                        self._update_cardinality(column)
                        Log.note("updated {{column.name}}", column=column)
                    except Exception, e:
                        Log.warning("problem getting cardinality for  {{column.name}}", column=column, cause=e)
            except Exception, e:
                Log.warning("problem in cardinality monitor", cause=e)
开发者ID:klahnakoski,项目名称:MoDevETL,代码行数:36,代码来源:meta.py

示例2: _worker

# 需要导入模块: from pyLibrary.debugs.logs import Log [as 别名]
# 或者: from pyLibrary.debugs.logs.Log import warning [as 别名]
    def _worker(self, please_stop):
        curr = "0.0"
        acc = []
        last_count_written = -1
        next_write = Date.now()

        while not please_stop:
            d = self.temp_queue.pop(timeout=MINUTE)
            if d == None:
                if not acc:
                    continue
                # WRITE THE INCOMPLETE DATA TO S3, BUT NOT TOO OFTEN
                next_write = Date.now() + MINUTE
                try:
                    if last_count_written != len(acc):
                        if DEBUG:
                            Log.note("write incomplete data ({{num}} lines) to {{uid}} in S3 next (time = {{next_write}})", uid=curr, next_write=next_write, num=len(acc))
                        self.bucket.write_lines(curr, (convert.value2json(a) for a in acc))
                        last_count_written = len(acc)
                except Exception, e:
                    Log.note("Problem with write to S3", cause=e)
            elif d[UID_PATH] != curr:
                # WRITE acc TO S3 IF WE ARE MOVING TO A NEW KEY
                try:
                    if acc:
                        if DEBUG:
                            Log.note("write complete data ({{num}} lines) to {{curr}} in S3", num=len(acc), curr=curr)
                        self.bucket.write_lines(curr, (convert.value2json(a) for a in acc))
                        last_count_written = 0
                    curr = d[UID_PATH]
                    acc = [d]
                except Exception, e:
                    Log.warning("Can not store data", cause=e)
                    Thread.sleep(30*MINUTE)
开发者ID:klahnakoski,项目名称:MoDataSubmission,代码行数:36,代码来源:storage.py

示例3: event_loop

# 需要导入模块: from pyLibrary.debugs.logs import Log [as 别名]
# 或者: from pyLibrary.debugs.logs.Log import warning [as 别名]
    def event_loop(self, please_stop):
        got_stop_message = False
        while not please_stop.is_go():
            with Timer("get more work", debug=DEBUG):
                request = self.in_queue.pop()
            if request == Thread.STOP:
                if DEBUG:
                    Log.note("{{name}} got a stop message", name=self.name)
                got_stop_message = True
                if self.in_queue:
                    Log.warning(
                        "programmer error, queue not empty. {{num}} requests lost:\n{{requests}}",
                        num=len(self.in_queue.queue),
                        requests=list(self.in_queue.queue)[:5:] + list(self.in_queue.queue)[-5::],
                    )
                break
            if please_stop.is_go():
                break

            with Timer("run {{function}}", {"function": get_function_name(self.function)}, debug=DEBUG):
                try:
                    result = self.function(**request)
                    if self.out_queue != None:
                        self.out_queue.add({"response": result})
                except Exception, e:
                    Log.warning("Can not execute with params={{params}}", params=request, cause=e)
                    if self.out_queue != None:
                        self.out_queue.add({"exception": e})
                finally:
                    self.num_runs += 1
开发者ID:klahnakoski,项目名称:MoDataSubmission,代码行数:32,代码来源:multithread.py

示例4: _insert_loop

# 需要导入模块: from pyLibrary.debugs.logs import Log [as 别名]
# 或者: from pyLibrary.debugs.logs.Log import warning [as 别名]
    def _insert_loop(self, please_stop=None):
        bad_count = 0
        while not please_stop:
            try:
                Thread.sleep(seconds=1)
                messages = wrap(self.queue.pop_all())
                if not messages:
                    continue

                for g, mm in jx.groupby(messages, size=self.batch_size):
                    scrubbed = []
                    try:
                        for i, message in enumerate(mm):
                            if message is Thread.STOP:
                                please_stop.go()
                                return
                            scrubbed.append(_deep_json_to_string(message, depth=3))
                    finally:
                        self.es.extend(scrubbed)
                    bad_count = 0
            except Exception, e:
                Log.warning("Problem inserting logs into ES", cause=e)
                bad_count += 1
                if bad_count > MAX_BAD_COUNT:
                    Log.warning("Given up trying to write debug logs to ES index {{index}}", index=self.es.settings.index)
                Thread.sleep(seconds=30)
开发者ID:klahnakoski,项目名称:TestFailures,代码行数:28,代码来源:log_usingElasticSearch.py

示例5: replacer

# 需要导入模块: from pyLibrary.debugs.logs import Log [as 别名]
# 或者: from pyLibrary.debugs.logs.Log import warning [as 别名]
    def replacer(found):
        ops = found.group(1).split("|")

        path = ops[0]
        var = path.lstrip(".")
        depth = min(len(seq), max(1, len(path) - len(var)))
        try:
            val = seq[-depth]
            if var:
                val = val[var]
            for func_name in ops[1:]:
                parts = func_name.split('(')
                if len(parts) > 1:
                    val = eval(parts[0] + "(val, " + ("(".join(parts[1::])))
                else:
                    val = globals()[func_name](val)
            val = toString(val)
            return val
        except Exception, e:
            try:
                if e.message.find("is not JSON serializable"):
                    # WORK HARDER
                    val = toString(val)
                    return val
            except Exception, f:
                if not _Log:
                    _late_import()

                _Log.warning(
                    "Can not expand " + "|".join(ops) + " in template: {{template_|json}}",
                    template_=template,
                    cause=e
                )
开发者ID:klahnakoski,项目名称:MoDevETL,代码行数:35,代码来源:strings.py

示例6: daemon

# 需要导入模块: from pyLibrary.debugs.logs import Log [as 别名]
# 或者: from pyLibrary.debugs.logs.Log import warning [as 别名]
    def daemon(cls, please_stop):
        global next_ping

        Till.enabled = True
        try:
            while not please_stop:
                now = _time()
                with Till.locker:
                    if next_ping > now:
                        _sleep(min(next_ping - now, INTERVAL))
                        continue

                    next_ping = now + INTERVAL
                    work = None
                    if Till.all_timers:
                        Till.all_timers.sort(key=lambda r: r[0])
                        for i, (t, s) in enumerate(Till.all_timers):
                            if now < t:
                                work, Till.all_timers[:i] = Till.all_timers[:i], []
                                next_ping = min(next_ping, Till.all_timers[0][0])
                                break
                        else:
                            work, Till.all_timers = Till.all_timers, []

                if work:
                    for t, s in work:
                        s.go()

        except Exception, e:
            from pyLibrary.debugs.logs import Log

            Log.warning("timer shutdown", cause=e)
开发者ID:klahnakoski,项目名称:TestFailures,代码行数:34,代码来源:till.py

示例7: main

# 需要导入模块: from pyLibrary.debugs.logs import Log [as 别名]
# 或者: from pyLibrary.debugs.logs.Log import warning [as 别名]
def main():
    """
    CLEAR OUT KEYS FROM BUCKET BY RANGE, OR BY FILE
    """
    settings = startup.read_settings(defs=[
        {
            "name": ["--bucket"],
            "help": "bucket to scan",
            "type": str,
            "dest": "bucket",
            "required": True
        }
    ])
    Log.start(settings.debug)

    source = Connection(settings.aws).get_bucket(settings.args.bucket)

    for k in qb.sort(source.keys()):
        try:
            data = source.read_bytes(k)
            if convert.ascii2unicode(data).find("2e2834fa7ecd8d3bb1ad49ec981fdb89eb4df95e18") >= 0:
                Log.note("Found at {{key}}", key=k)
        except Exception, e:
            Log.warning("Problem with {{key}}", key=k, cause=e)
        finally:
开发者ID:klahnakoski,项目名称:Activedata-ETL,代码行数:27,代码来源:find_in_s3.py

示例8: go

# 需要导入模块: from pyLibrary.debugs.logs import Log [as 别名]
# 或者: from pyLibrary.debugs.logs.Log import warning [as 别名]
    def go(self):
        """
        ACTIVATE SIGNAL (DOES NOTHING IF SIGNAL IS ALREADY ACTIVATED)
        """
        if DEBUG:
            if not _Log:
                _late_import()
            _Log.note("GO! {{name|quote}}", name=self.name)

        with self.lock:
            if DEBUG:
                _Log.note("internal GO! {{name|quote}}", name=self.name)
            if self._go:
                return
            self._go = True
            jobs, self.job_queue = self.job_queue, []
            threads, self.waiting_threads = self.waiting_threads, []

        for t in threads:
            if DEBUG:
                _Log.note("Release")
            t.release()

        for j in jobs:
            try:
                j()
            except Exception, e:
                if not _Log:
                    _late_import()
                _Log.warning("Trigger on Signal.go() failed!", cause=e)
开发者ID:klahnakoski,项目名称:TestFailures,代码行数:32,代码来源:signal.py

示例9: main

# 需要导入模块: from pyLibrary.debugs.logs import Log [as 别名]
# 或者: from pyLibrary.debugs.logs.Log import warning [as 别名]
def main():
    try:
        settings = startup.read_settings()
        Log.start(settings.debug)
        constants.set(settings.constants)

        with startup.SingleInstance(flavor_id=settings.args.filename):
            with aws.s3.Bucket(settings.destination) as bucket:

                if settings.param.debug:
                    if settings.source.durable:
                        Log.error("Can not run in debug mode with a durable queue")
                    synch = SynchState(bucket.get_key(SYNCHRONIZATION_KEY, must_exist=False))
                else:
                    synch = SynchState(bucket.get_key(SYNCHRONIZATION_KEY, must_exist=False))
                    if settings.source.durable:
                        synch.startup()

                queue = PersistentQueue(settings.param.queue_file)
                if queue:
                    last_item = queue[len(queue) - 1]
                    synch.source_key = last_item._meta.count + 1

                with pulse.Consumer(settings=settings.source, target=None, target_queue=queue, start=synch.source_key):
                    Thread.run("pulse log loop", log_loop, settings, synch, queue, bucket)
                    Thread.wait_for_shutdown_signal(allow_exit=True)
                    Log.warning("starting shutdown")

                queue.close()
                Log.note("write shutdown state to S3")
                synch.shutdown()

    except Exception, e:
        Log.error("Problem with etl", e)
开发者ID:klahnakoski,项目名称:Activedata-ETL,代码行数:36,代码来源:pulse_logger.py

示例10: _worker

# 需要导入模块: from pyLibrary.debugs.logs import Log [as 别名]
# 或者: from pyLibrary.debugs.logs.Log import warning [as 别名]
 def _worker(self, please_stop):
     while not please_stop:
         try:
             self.pulse.listen()
         except Exception, e:
             if not please_stop:
                 Log.warning("pulse had problem", e)
开发者ID:klahnakoski,项目名称:intermittents,代码行数:9,代码来源:pulse.py

示例11: etl_one

# 需要导入模块: from pyLibrary.debugs.logs import Log [as 别名]
# 或者: from pyLibrary.debugs.logs.Log import warning [as 别名]
def etl_one(settings):
    queue = Queue("temp work queue")
    queue.__setattr__(b"commit", Null)
    queue.__setattr__(b"rollback", Null)

    settings.param.wait_forever = False
    already_in_queue = set()
    for w in settings.workers:
        source = get_container(w.source)
        # source.settings.fast_forward = True
        if id(source) in already_in_queue:
            continue
        try:
            for i in parse_id_argument(settings.args.id):
                data = source.get_key(i)
                if data != None:
                    already_in_queue.add(id(source))
                    queue.add(Dict(
                        bucket=w.source.bucket,
                        key=i
                    ))
        except Exception, e:
            if "Key {{key}} does not exist" in e:
                already_in_queue.add(id(source))
                queue.add(Dict(
                    bucket=w.source.bucket,
                    key=settings.args.id
                ))
            Log.warning("Problem", cause=e)
开发者ID:klahnakoski,项目名称:Activedata-ETL,代码行数:31,代码来源:etl.py

示例12: write_lines

# 需要导入模块: from pyLibrary.debugs.logs import Log [as 别名]
# 或者: from pyLibrary.debugs.logs.Log import warning [as 别名]
    def write_lines(self, key, lines):
        self._verify_key_format(key)
        storage = self.bucket.new_key(key + ".json.gz")

        buff = TemporaryFile()
        archive = gzip.GzipFile(fileobj=buff, mode='w')
        count = 0
        for l in lines:
            if hasattr(l, "__iter__"):
                for ll in l:
                    archive.write(ll.encode("utf8"))
                    archive.write(b"\n")
                    count += 1
            else:
                archive.write(l.encode("utf8"))
                archive.write(b"\n")
                count += 1
        archive.close()
        file_length = buff.tell()

        retry = 3
        while retry:
            try:
                with Timer("Sending {{count}} lines in {{file_length|comma}} bytes", {"file_length": file_length, "count": count}, debug=self.settings.debug):
                    buff.seek(0)
                    storage.set_contents_from_file(buff)
                break
            except Exception, e:
                Log.warning("could not push data to s3", cause=e)
                retry -= 1
开发者ID:klahnakoski,项目名称:MoTreeherder,代码行数:32,代码来源:s3.py

示例13: _get_from_elasticsearch

# 需要导入模块: from pyLibrary.debugs.logs import Log [as 别名]
# 或者: from pyLibrary.debugs.logs.Log import warning [as 别名]
    def _get_from_elasticsearch(self, revision, locale=None):
        rev = revision.changeset.id
        query = {
            "query": {"filtered": {
                "query": {"match_all": {}},
                "filter": {"and": [
                    {"prefix": {"changeset.id": rev[0:12]}},
                    {"term": {"branch.name": revision.branch.name}},
                    {"term": {"branch.locale": coalesce(locale, revision.branch.locale, DEFAULT_LOCALE)}}
                ]}
            }},
            "size": 2000,
        }
        try:
            docs = self.es.search(query, timeout=120).hits.hits
            if len(docs) > 1:
                for d in docs:
                    if d._id.endswith(d._source.branch.locale):
                        return d._source
                Log.warning("expecting no more than one document")

            return docs[0]._source
        except Exception, e:
            Log.warning("Bad ES call", e)
            return None
开发者ID:klahnakoski,项目名称:MoHg,代码行数:27,代码来源:hg_mozilla_org.py

示例14: loop

# 需要导入模块: from pyLibrary.debugs.logs import Log [as 别名]
# 或者: from pyLibrary.debugs.logs.Log import warning [as 别名]
    def loop(self, please_stop):
        with self.work_queue:
            while not please_stop:
                if self.settings.wait_forever:
                    todo = None
                    while not please_stop and not todo:
                        if isinstance(self.work_queue, aws.Queue):
                            todo = self.work_queue.pop(wait=EXTRA_WAIT_TIME)
                        else:
                            todo = self.work_queue.pop()
                else:
                    if isinstance(self.work_queue, aws.Queue):
                        todo = self.work_queue.pop()
                    else:
                        todo = self.work_queue.pop(till=Date.now())
                    if todo == None:
                        please_stop.go()
                        return

                try:
                    is_ok = self._dispatch_work(todo)
                    if is_ok:
                        self.work_queue.commit()
                    else:
                        self.work_queue.rollback()
                except Exception, e:
                    self.work_queue.rollback()
                    Log.warning("could not processs {{key}}.  Returned back to work queue.", key=todo.key, cause=e)
开发者ID:klahnakoski,项目名称:Activedata-ETL,代码行数:30,代码来源:etl.py

示例15: process

# 需要导入模块: from pyLibrary.debugs.logs import Log [as 别名]
# 或者: from pyLibrary.debugs.logs.Log import warning [as 别名]
def process(source_key, source, destination, resources, please_stop=None):
    lines = source.read_lines()

    etl_header = convert.json2value(lines[0])
    if etl_header.etl:
        start = 0
    elif etl_header.locale or etl_header._meta:
        start = 0
    else:
        start = 1

    keys = []
    records = []
    stats = Dict()
    for i, line in enumerate(lines[start:]):
        pulse_record = Null
        try:
            pulse_record = scrub_pulse_record(source_key, i, line, stats)
            if not pulse_record:
                continue

            with Profiler("transform_buildbot"):
                record = transform_buildbot(pulse_record.payload, resources=resources)
                record.etl = {
                    "id": i,
                    "source": pulse_record.etl,
                    "type": "join",
                    "revision": get_git_revision()
                }
            key = etl2key(record.etl)
            keys.append(key)
            records.append({"id": key, "value": record})
        except Exception, e:
            Log.warning("Problem with pulse payload {{pulse|json}}", pulse=pulse_record.payload, cause=e)
开发者ID:klahnakoski,项目名称:Activedata-ETL,代码行数:36,代码来源:pulse_block_to_es.py


注:本文中的pyLibrary.debugs.logs.Log.warning方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。