当前位置: 首页>>代码示例>>Python>>正文


Python Log.alert方法代码示例

本文整理汇总了Python中pyLibrary.debugs.logs.Log.alert方法的典型用法代码示例。如果您正苦于以下问题:Python Log.alert方法的具体用法?Python Log.alert怎么用?Python Log.alert使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pyLibrary.debugs.logs.Log的用法示例。


在下文中一共展示了Log.alert方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from pyLibrary.debugs.logs import Log [as 别名]
# 或者: from pyLibrary.debugs.logs.Log import alert [as 别名]
    def __init__(
        self,
        index,  # NAME OF THE INDEX, EITHER ALIAS NAME OR FULL VERSION NAME
        type,  # SCHEMA NAME
        alias=None,
        explore_metadata=True,  # PROBING THE CLUSTER FOR METADATA IS ALLOWED
        timeout=None,  # NUMBER OF SECONDS TO WAIT FOR RESPONSE, OR SECONDS TO WAIT FOR DOWNLOAD (PASSED TO requests)
        debug=False,  # DO NOT SHOW THE DEBUG STATEMENTS
        settings=None
    ):
        if index==None or type==None:
            Log.error("not allowed")
        if index == alias:
            Log.error("must have a unique index name")

        self.cluster_state = None
        self.cluster_metadata = None
        self.debug = debug
        if self.debug:
            Log.alert("elasticsearch debugging for index {{index}} is on",  index= settings.index)

        self.settings = settings
        self.cluster = Cluster(settings)

        try:
            index = self.get_index(index)
            if index and alias==None:
                settings.alias = settings.index
                settings.index = index
            if index==None:
                Log.error("not allowed")
        except Exception, e:
            # EXPLORING (get_metadata()) IS NOT ALLOWED ON THE PUBLIC CLUSTER
            pass
开发者ID:klahnakoski,项目名称:intermittents,代码行数:36,代码来源:elasticsearch.py

示例2: __init__

# 需要导入模块: from pyLibrary.debugs.logs import Log [as 别名]
# 或者: from pyLibrary.debugs.logs.Log import alert [as 别名]
    def __init__(
        self,
        host,
        user,
        password,
        table,
        meta,       # REDSHIFT COPY COMMAND REQUIRES A BUCKET TO HOLD PARAMETERS
        database=None,
        port=5439,
        settings=None
    ):
        self.settings = settings
        self.db = Redshift(settings)
        INDEX_CACHE[settings.table] = wrap({"name":settings.table})  # HACK TO GET parse_columns TO WORK
        columns = parse_columns(settings.table, settings.mapping.test_result.properties)
        nested = [c.name for c in columns if c.type == "nested"]
        self.columns = wrap([c for c in columns if c.type not in ["object"] and not any(c.name.startswith(n+".") for n in nested)])

        try:
            self.db.execute("""
                CREATE TABLE {{table_name}} (
                    "_id" character varying UNIQUE,
                    {{columns}}
                )""", {
                "table_name": self.db.quote_column(settings.table),
                "columns": SQL(",\n".join(self.db.quote_column(c.name) + " " + self.db.es_type2pg_type(c.type) for c in self.columns))
            }, retry=False)
        except Exception, e:
            if "already exists" in e:
                Log.alert("Table {{table}} exists in Redshift",  table= settings.table)
            else:
                Log.error("Could not make table", e)
开发者ID:klahnakoski,项目名称:Activedata-ETL,代码行数:34,代码来源:redshift.py

示例3: _monitor

# 需要导入模块: from pyLibrary.debugs.logs import Log [as 别名]
# 或者: from pyLibrary.debugs.logs.Log import alert [as 别名]
 def _monitor(self, please_stop):
     self.service.wait()
     if DEBUG:
         Log.alert(
             "{{name}} stopped with returncode={{returncode}}", name=self.name, returncode=self.service.returncode
         )
     self.stdin.add(Thread.STOP)
     self.service_stopped.go()
开发者ID:klahnakoski,项目名称:MoDevETL,代码行数:10,代码来源:multiprocess.py

示例4: rollback

# 需要导入模块: from pyLibrary.debugs.logs import Log [as 别名]
# 或者: from pyLibrary.debugs.logs.Log import alert [as 别名]
    def rollback(self):
        if self.pending:
            pending, self.pending = self.pending, []

            for p in pending:
                m = Message()
                m.set_body(p.get_body())
                self.queue.write(m)

            for p in pending:
                self.queue.delete_message(p)

            if self.settings.debug:
                Log.alert("{{num}} messages returned to queue", num=len(pending))
开发者ID:klahnakoski,项目名称:TestFailures,代码行数:16,代码来源:__init__.py

示例5: main

# 需要导入模块: from pyLibrary.debugs.logs import Log [as 别名]
# 或者: from pyLibrary.debugs.logs.Log import alert [as 别名]
def main():

    try:
        settings = startup.read_settings()
        constants.set(settings.constants)
        Log.start(settings.debug)

        branches = _get_branches_from_hg(settings.hg)

        es = elasticsearch.Cluster(settings=settings.hg.branches).get_or_create_index(settings=settings.hg.branches)
        es.add_alias()
        es.extend({"id": b.name + " " + b.locale, "value": b} for b in branches)
        Log.alert("DONE!")
    except Exception, e:
        Log.error("Problem with etl", e)
开发者ID:klahnakoski,项目名称:MoTreeherder,代码行数:17,代码来源:hg_branches.py

示例6: __init__

# 需要导入模块: from pyLibrary.debugs.logs import Log [as 别名]
# 或者: from pyLibrary.debugs.logs.Log import alert [as 别名]
 def __init__(
     self,
     bucket,  # NAME OF THE BUCKET
     aws_access_key_id=None,  # CREDENTIAL
     aws_secret_access_key=None,  # CREDENTIAL
     region=None,  # NAME OF AWS REGION, REQUIRED FOR SOME BUCKETS
     public=False,
     debug=False,
     settings=None
 ):
     self.uid = None
     self.bucket = s3.Bucket(settings=settings)
     Log.alert("Using {{bucket}} for S3 storage", bucket=self.bucket.name)
     self.temp_queue = PersistentQueue(bucket + "_queue.txt")
     self._figure_out_start_point()
     self.push_to_s3 = Thread.run("pushing to " + bucket, self._worker)
开发者ID:klahnakoski,项目名称:MoDataSubmission,代码行数:18,代码来源:storage.py

示例7: __init__

# 需要导入模块: from pyLibrary.debugs.logs import Log [as 别名]
# 或者: from pyLibrary.debugs.logs.Log import alert [as 别名]
    def __init__(
        self,
        alias,  # NAME OF THE ALIAS
        type=None,  # SCHEMA NAME, WILL HUNT FOR ONE IF None
        explore_metadata=True,  # IF PROBING THE CLUSTER FOR METADATA IS ALLOWED
        debug=False,
        timeout=None,  # NUMBER OF SECONDS TO WAIT FOR RESPONSE, OR SECONDS TO WAIT FOR DOWNLOAD (PASSED TO requests)
        settings=None
    ):
        self.debug = debug
        if self.debug:
            Log.alert("Elasticsearch debugging on {{index|quote}} is on",  index= settings.index)
        if alias == None:
            Log.error("Alias can not be None")
        self.settings = settings
        self.cluster = Cluster(settings)

        if type == None:
            if not explore_metadata:
                Log.error("Alias() was given no `type` (aka schema) and not allowed to explore metadata.  Do not know what to do now.")

            indices = self.cluster.get_metadata().indices
            if not self.settings.alias or self.settings.alias==self.settings.index:
                alias_list = self.cluster.get("/_alias/"+self.settings.index)
                candidates = [(name, i) for name, i in alias_list.items() if self.settings.index in i.aliases.keys()]
                full_name = jx.sort(candidates, 0).last()[0]
                index = self.cluster.get("/" + full_name + "/_mapping")[full_name]
            else:
                index = self.cluster.get("/"+self.settings.index+"/_mapping")[self.settings.index]

            # FIND MAPPING WITH MOST PROPERTIES (AND ASSUME THAT IS THE CANONICAL TYPE)
            max_prop = -1
            for _type, mapping in index.mappings.items():
                if _type == "_default_":
                    continue
                num_prop = len(mapping.properties.keys())
                if max_prop < num_prop:
                    max_prop = num_prop
                    self.settings.type = _type
                    type = _type

            if type == None:
                Log.error("Can not find schema type for index {{index}}", index=coalesce(self.settings.alias, self.settings.index))

        self.path = "/" + alias + "/" + type
开发者ID:klahnakoski,项目名称:MoDataSubmission,代码行数:47,代码来源:elasticsearch.py

示例8: not_monitor

# 需要导入模块: from pyLibrary.debugs.logs import Log [as 别名]
# 或者: from pyLibrary.debugs.logs.Log import alert [as 别名]
    def not_monitor(self, please_stop):
        Log.alert("metadata scan has been disabled")
        please_stop.on_go(lambda: self.todo.add(Thread.STOP))
        while not please_stop:
            c = self.todo.pop()
            if c == Thread.STOP:
                break

            if not c.last_updated or c.last_updated >= Date.now()-TOO_OLD:
                continue

            with self.meta.columns.locker:
                self.meta.columns.update({
                    "set": {
                        "last_updated": Date.now()
                    },
                    "clear":[
                        "count",
                        "cardinality",
                        "partitions",
                    ],
                    "where": {"eq": {"es_index": c.es_index, "es_column": c.es_column}}
                })
            Log.note("Could not get {{col.es_index}}.{{col.es_column}} info", col=c)
开发者ID:klahnakoski,项目名称:esReplicate,代码行数:26,代码来源:meta.py

示例9: parse_short_desc

# 需要导入模块: from pyLibrary.debugs.logs import Log [as 别名]
# 或者: from pyLibrary.debugs.logs.Log import alert [as 别名]
def parse_short_desc(bug):
    parts = bug.short_desc.split("|")
    if len(parts) in [2, 3]:
        bug.result.test = parts[0].strip()
        bug.result.message = parts[1].strip()
    elif any(map(parts[0].strip().endswith, [".html", ".py", ".js", ".xul"])) and len(parts)>2:
        bug.result.test = parts[0].strip()
        bug.result.message = parts[1].strip()
    elif len(parts) >= 4:
        set_default(bug.result, parse_status(parts[0]))
        bug.result.test = parts[1].strip()
        bug.result.message = parts[3].strip()
    elif any(black in bug.short_desc for black in blacklist):
        Log.note("IGNORED {{line}}", line=bug.short_desc)
    elif bug.bug_id in [1165765]:
        Log.note("IGNORED {{line}}", line=bug.short_desc)
    elif "###" in bug.short_desc:
        bug.short_desc = bug.short_desc.replace("###", " | ")
        parse_short_desc(bug)
    else:
        Log.alert("can not handle {{bug_id}}: {{line}}", line=bug.short_desc, bug_id=bug.bug_id)

    if bug.result.test.lower().startswith("intermittent "):
        bug.result.test = bug.result.test[13:]
开发者ID:klahnakoski,项目名称:intermittents,代码行数:26,代码来源:get_data.py

示例10: _dispatch_work

# 需要导入模块: from pyLibrary.debugs.logs import Log [as 别名]
# 或者: from pyLibrary.debugs.logs.Log import alert [as 别名]
    def _dispatch_work(self, source_block):
        """
        source_block POINTS TO THE bucket AND key TO PROCESS
        :return: False IF THERE IS NOTHING LEFT TO DO
        """
        source_keys = listwrap(coalesce(source_block.key, source_block.keys))

        if not isinstance(source_block.bucket, basestring):  # FIX MISTAKE
            source_block.bucket = source_block.bucket.bucket
        bucket = source_block.bucket
        work_actions = [w for w in self.settings.workers if w.source.bucket == bucket]

        if not work_actions:
            Log.note("No worker defined for records from {{bucket}}, {{action}}.\n{{message|indent}}",
                bucket= source_block.bucket,
                message= source_block,
                action= "skipping" if self.settings.keep_unknown_on_queue else "deleting")
            return not self.settings.keep_unknown_on_queue

        for action in work_actions:
            try:
                source_key = unicode(source_keys[0])
                if len(source_keys) > 1:
                    multi_source = action._source
                    source = ConcatSources([multi_source.get_key(k) for k in source_keys])
                    source_key = MIN(source_key)
                else:
                    source = action._source.get_key(source_key)
                    source_key = source.key

                Log.note("Execute {{action}} on bucket={{source}} key={{key}}",
                    action= action.name,
                    source= source_block.bucket,
                    key= source_key)

                if action.transform_type == "bulk":
                    old_keys = set()
                else:
                    old_keys = action._destination.keys(prefix=source_block.key)

                new_keys = set(action._transformer(source_key, source, action._destination, resources=self.resources, please_stop=self.please_stop))

                #VERIFY KEYS
                if len(new_keys) == 1 and list(new_keys)[0] == source_key:
                    pass  # ok
                else:
                    etls = map(key2etl, new_keys)
                    etls = qb.sort(etls, "id")
                    for i, e in enumerate(etls):
                        if i != e.id:
                            Log.error("expecting keys to have dense order: {{ids}}", ids=etls.id)
                    #VERIFY KEYS EXIST
                    if hasattr(action._destination, "get_key"):
                        for k in new_keys:
                            action._destination.get_key(k)

                for n in action._notify:
                    for k in new_keys:
                        n.add(k)

                if action.transform_type == "bulk":
                    continue

                # DUE TO BUGS THIS INVARIANT IS NOW BROKEN
                # TODO: FIGURE OUT HOW TO FIX THIS (CHANGE NAME OF THE SOURCE BLOCK KEY?)
                # for n in new_keys:
                #     if not n.startswith(source_key):
                #         Log.error("Expecting new keys ({{new_key}}) to start with source key ({{source_key}})",  new_key= n,  source_key= source_key)

                if not new_keys and old_keys:
                    Log.alert("Expecting some new keys after etl of {{source_key}}, especially since there were old ones\n{{old_keys}}",
                        old_keys= old_keys,
                        source_key= source_key)
                    continue
                elif not new_keys:
                    Log.alert("Expecting some new keys after processing {{source_key}}",
                        old_keys= old_keys,
                        source_key= source_key)
                    continue

                for k in new_keys:
                    if len(k.split(".")) == 3 and action.destination.type!="test_result":
                        Log.error("two dots have not been needed yet, this is a consitency check")

                delete_me = old_keys - new_keys
                if delete_me:
                    if action.destination.bucket == "ekyle-test-result":
                        for k in delete_me:
                            action._destination.delete_key(k)
                    else:
                        Log.note("delete keys?\n{{list}}",  list= sorted(delete_me))
                        # for k in delete_me:
                # WE DO NOT PUT KEYS ON WORK QUEUE IF ALREADY NOTIFYING SOME OTHER
                # AND NOT GOING TO AN S3 BUCKET
                if not action._notify and isinstance(action._destination, (aws.s3.Bucket, S3Bucket)):
                    for k in old_keys | new_keys:
                        self.work_queue.add(Dict(
                            bucket=action.destination.bucket,
                            key=k
                        ))
#.........这里部分代码省略.........
开发者ID:klahnakoski,项目名称:Activedata-ETL,代码行数:103,代码来源:etl.py

示例11: _update_cardinality

# 需要导入模块: from pyLibrary.debugs.logs import Log [as 别名]
# 或者: from pyLibrary.debugs.logs.Log import alert [as 别名]

#.........这里部分代码省略.........
                            "last_updated": Date.now()
                        },
                        "where": {"eq": {"table": c.table, "name": c.name}}
                    })
                return

            es_index = c.table.split(".")[0]
            result = self.default_es.post("/"+es_index+"/_search", data={
                "aggs": {c.name: _counting_query(c)},
                "size": 0
            })
            r = result.aggregations.values()[0]
            count = result.hits.total
            cardinality = coalesce(r.value, r._nested.value)
            if cardinality == None:
                Log.error("logic error")

            query = Dict(size=0)
            if c.type in ["object", "nested"]:
                Log.note("{{field}} has {{num}} parts", field=c.name, num=cardinality)
                with self.columns.locker:
                    self.columns.update({
                        "set": {
                            "count": count,
                            "cardinality": cardinality,
                            "last_updated": Date.now()
                        },
                        "clear": ["partitions"],
                        "where": {"eq": {"table": c.table, "name": c.name}}
                    })
                return
            elif cardinality > 1000 or (count >= 30 and cardinality == count) or (count >= 1000 and cardinality / count > 0.99):
                Log.note("{{field}} has {{num}} parts", field=c.name, num=cardinality)
                with self.columns.locker:
                    self.columns.update({
                        "set": {
                            "count": count,
                            "cardinality": cardinality,
                            "last_updated": Date.now()
                        },
                        "clear": ["partitions"],
                        "where": {"eq": {"table": c.table, "name": c.name}}
                    })
                return
            elif c.type in _elasticsearch.ES_NUMERIC_TYPES and cardinality > 30:
                Log.note("{{field}} has {{num}} parts", field=c.name, num=cardinality)
                with self.columns.locker:
                    self.columns.update({
                        "set": {
                            "count": count,
                            "cardinality": cardinality,
                            "last_updated": Date.now()
                        },
                        "clear": ["partitions"],
                        "where": {"eq": {"table": c.table, "name": c.name}}
                    })
                return
            elif c.nested_path:
                query.aggs[literal_field(c.name)] = {
                    "nested": {"path": listwrap(c.nested_path)[0]},
                    "aggs": {"_nested": {"terms": {"field": c.abs_name, "size": 0}}}
                }
            else:
                query.aggs[literal_field(c.name)] = {"terms": {"field": c.abs_name, "size": 0}}

            result = self.default_es.post("/"+es_index+"/_search", data=query)

            aggs = result.aggregations.values()[0]
            if aggs._nested:
                parts = qb.sort(aggs._nested.buckets.key)
            else:
                parts = qb.sort(aggs.buckets.key)

            Log.note("{{field}} has {{parts}}", field=c.name, parts=parts)
            with self.columns.locker:
                self.columns.update({
                    "set": {
                        "count": count,
                        "cardinality": cardinality,
                        "partitions": parts,
                        "last_updated": Date.now()
                    },
                    "where": {"eq": {"table": c.table, "abs_name": c.abs_name}}
                })
        except Exception, e:
            if "IndexMissingException" in e and c.table.startswith("testing"):
                Log.alert("{{col.table}} does not exist", col=c)
            else:
                self.columns.update({
                    "set": {
                        "last_updated": Date.now()
                    },
                    "clear":[
                        "count",
                        "cardinality",
                        "partitions",
                    ],
                    "where": {"eq": {"table": c.table, "abs_name": c.abs_name}}
                })
                Log.warning("Could not get {{col.table}}.{{col.abs_name}} info", col=c, cause=e)
开发者ID:klahnakoski,项目名称:MoDevETL,代码行数:104,代码来源:meta.py


注:本文中的pyLibrary.debugs.logs.Log.alert方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。