当前位置: 首页>>代码示例>>Python>>正文


Python Thread.run方法代码示例

本文整理汇总了Python中pyLibrary.thread.threads.Thread.run方法的典型用法代码示例。如果您正苦于以下问题:Python Thread.run方法的具体用法?Python Thread.run怎么用?Python Thread.run使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pyLibrary.thread.threads.Thread的用法示例。


在下文中一共展示了Thread.run方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from pyLibrary.thread.threads import Thread [as 别名]
# 或者: from pyLibrary.thread.threads.Thread import run [as 别名]
    def __init__(self, host, index, alias=None, name=None, port=9200, settings=None):
        global _elasticsearch
        if hasattr(self, "settings"):
            return

        from pyLibrary.queries.containers.lists import ListContainer
        from pyLibrary.env import elasticsearch as _elasticsearch

        self.settings = settings
        self.default_name = coalesce(name, alias, index)
        self.default_es = _elasticsearch.Cluster(settings=settings)
        self.todo = Queue("refresh metadata", max=100000, unique=True)

        self.meta=Dict()
        table_columns = metadata_tables()
        column_columns = metadata_columns()
        self.meta.tables = ListContainer("meta.tables", [], wrap({c.name: c for c in table_columns}))
        self.meta.columns = ListContainer("meta.columns", [], wrap({c.name: c for c in column_columns}))
        self.meta.columns.insert(column_columns)
        self.meta.columns.insert(table_columns)
        # TODO: fix monitor so it does not bring down ES
        if ENABLE_META_SCAN:
            self.worker = Thread.run("refresh metadata", self.monitor)
        else:
            self.worker = Thread.run("refresh metadata", self.not_monitor)
        return
开发者ID:klahnakoski,项目名称:esReplicate,代码行数:28,代码来源:meta.py

示例2: main

# 需要导入模块: from pyLibrary.thread.threads import Thread [as 别名]
# 或者: from pyLibrary.thread.threads.Thread import run [as 别名]
def main():
    try:
        settings = startup.read_settings()
        Log.start(settings.debug)
        constants.set(settings.constants)

        with startup.SingleInstance(flavor_id=settings.args.filename):
            with aws.s3.Bucket(settings.destination) as bucket:

                if settings.param.debug:
                    if settings.source.durable:
                        Log.error("Can not run in debug mode with a durable queue")
                    synch = SynchState(bucket.get_key(SYNCHRONIZATION_KEY, must_exist=False))
                else:
                    synch = SynchState(bucket.get_key(SYNCHRONIZATION_KEY, must_exist=False))
                    if settings.source.durable:
                        synch.startup()

                queue = PersistentQueue(settings.param.queue_file)
                if queue:
                    last_item = queue[len(queue) - 1]
                    synch.source_key = last_item._meta.count + 1

                with pulse.Consumer(settings=settings.source, target=None, target_queue=queue, start=synch.source_key):
                    Thread.run("pulse log loop", log_loop, settings, synch, queue, bucket)
                    Thread.wait_for_shutdown_signal(allow_exit=True)
                    Log.warning("starting shutdown")

                queue.close()
                Log.note("write shutdown state to S3")
                synch.shutdown()

    except Exception, e:
        Log.error("Problem with etl", e)
开发者ID:klahnakoski,项目名称:Activedata-ETL,代码行数:36,代码来源:pulse_logger.py

示例3: worker

# 需要导入模块: from pyLibrary.thread.threads import Thread [as 别名]
# 或者: from pyLibrary.thread.threads.Thread import run [as 别名]
    def worker(please_stop):
        pending = Queue("pending ids", max=BATCH_SIZE*3, silent=False)

        pending_thread = Thread.run(
            "get pending",
            get_pending,
            source=source,
            since=last_updated,
            pending_bugs=pending,
            please_stop=please_stop
        )
        diff_thread = Thread.run(
            "diff",
            diff,
            source,
            destination,
            pending,
            please_stop=please_stop
        )
        replication_thread = Thread.run(
            "replication",
            replicate,
            source,
            destination,
            pending,
            config.fix,
            please_stop=please_stop
        )
        pending_thread.join()
        diff_thread.join()
        pending.add(Thread.STOP)
        replication_thread.join()
        done.go()
        please_stop.go()
开发者ID:klahnakoski,项目名称:esReplicate,代码行数:36,代码来源:replicate.py

示例4: __init__

# 需要导入模块: from pyLibrary.thread.threads import Thread [as 别名]
# 或者: from pyLibrary.thread.threads.Thread import run [as 别名]
 def __init__(self, host, index, type="log", max_size=1000, batch_size=100, settings=None):
     """
     settings ARE FOR THE ELASTICSEARCH INDEX
     """
     self.es = Cluster(settings).get_or_create_index(
         schema=convert.json2value(convert.value2json(SCHEMA), leaves=True),
         limit_replicas=True,
         tjson=True,
         settings=settings
     )
     self.batch_size=batch_size
     self.es.add_alias("debug")
     self.queue = Queue("debug logs to es", max=max_size, silent=True)
     Thread.run("add debug logs to es", self._insert_loop)
开发者ID:mozilla,项目名称:ChangeDetector,代码行数:16,代码来源:log_usingElasticSearch.py

示例5: main

# 需要导入模块: from pyLibrary.thread.threads import Thread [as 别名]
# 或者: from pyLibrary.thread.threads.Thread import run [as 别名]
def main():
    global config

    try:
        config = startup.read_settings()
        with startup.SingleInstance(flavor_id=config.args.filename):
            constants.set(config.constants)
            Log.start(config.debug)

            es = elasticsearch.Cluster(config.destination).get_or_create_index(config.destination)

            please_stop = Signal()
            Thread.run("aggregator", loop_all_days, es, please_stop=please_stop)
            Thread.wait_for_shutdown_signal(please_stop=please_stop, allow_exit=True)
    except Exception, e:
        Log.error("Serious problem with Test Failure Aggregator service!  Shutdown completed!", cause=e)
开发者ID:klahnakoski,项目名称:TestFailures,代码行数:18,代码来源:app.py

示例6: _get_columns

# 需要导入模块: from pyLibrary.thread.threads import Thread [as 别名]
# 或者: from pyLibrary.thread.threads.Thread import run [as 别名]
    def _get_columns(self, table=None, metadata=None):
        # TODO: HANDLE MORE THEN ONE ES, MAP TABLE SHORT_NAME TO ES INSTANCE
        if not metadata:
            metadata = self.default_es.get_metadata(force=True)

        def parse_all(please_stop):
            for abs_index, meta in jx.sort(metadata.indices.items(), {"value": 0, "sort": -1}):
                if meta.index != abs_index:
                    continue

                for _, properties in meta.mappings.items():
                    if please_stop:
                        return
                    self._parse_properties(abs_index, properties, meta)

        if table:
            for abs_index, meta in jx.sort(metadata.indices.items(), {"value": 0, "sort": -1}):
                if table == meta.index:
                    for _, properties in meta.mappings.items():
                        self._parse_properties(abs_index, properties, meta)
                    return
                if table == abs_index:
                    self._get_columns(table=meta.index, metadata=metadata)
                    return
        else:
            self.parser = Thread.run("parse properties", parse_all)
开发者ID:klahnakoski,项目名称:esReplicate,代码行数:28,代码来源:meta.py

示例7: __init__

# 需要导入模块: from pyLibrary.thread.threads import Thread [as 别名]
# 或者: from pyLibrary.thread.threads.Thread import run [as 别名]
 def __init__(self, db=None):
     """
     :param db:  Optional, wrap a sqlite db in a thread
     :return: Multithread save database
     """
     self.db = None
     self.queue = Queue("sql commands")   # HOLD (command, result, signal) PAIRS
     self.worker = Thread.run("sqlite db thread", self._worker)
     self.get_trace = DEBUG
开发者ID:klahnakoski,项目名称:esReplicate,代码行数:11,代码来源:sqlite.py

示例8: startup

# 需要导入模块: from pyLibrary.thread.threads import Thread [as 别名]
# 或者: from pyLibrary.thread.threads.Thread import run [as 别名]
    def startup(self):
        try:
            json = self.synch.read()
            if not json:
                Log.note("{{synchro_key}} does not exist.  Starting.",  synchro_key= SYNCHRONIZATION_KEY)
                return

            last_run = convert.json2value(json)
            self.next_key = last_run.next_key
            self.source_key = last_run.source_key
            if last_run.action == "shutdown":
                Log.note("{{synchro_key}} shutdown detected.  Starting at {{num}}",
                    synchro_key= SYNCHRONIZATION_KEY,
                    num= self.next_key)
            else:
                resume_time = Date(last_run.timestamp) + WAIT_FOR_ACTIVITY
                Log.note("Shutdown not detected, waiting until {{time}} to see if existing pulse_logger is running...",  time= resume_time)
                while resume_time > Date.now():
                    Thread.sleep(seconds=10)
                    json = self.synch.read()
                    if json == None:
                        Log.note("{{synchro_key}} disappeared!  Starting over.",  synchro_key= SYNCHRONIZATION_KEY)
                        self._start()
                        self.pinger_thread = Thread.run("synch pinger", self._pinger)
                        return

                    self.next_key = last_run.next_key
                    self.source_key = last_run.source_key
                    if last_run.action == "shutdown":
                        Log.note("Shutdown detected!  Resuming...")
                        self._start()
                        self.pinger_thread = Thread.run("synch pinger", self._pinger)
                        return

                    if last_run.timestamp > self.ping_time:
                        Log.error("Another instance of pulse_logger is running!")
                    Log.note("No activity, still waiting...")
                Log.note("No activity detected!  Resuming...")
        except Exception, e:
            Log.error("Can not start", e)
开发者ID:klahnakoski,项目名称:Activedata-ETL,代码行数:42,代码来源:synchro.py

示例9: __init__

# 需要导入模块: from pyLibrary.thread.threads import Thread [as 别名]
# 或者: from pyLibrary.thread.threads.Thread import run [as 别名]
 def __init__(
     self,
     bucket,  # NAME OF THE BUCKET
     aws_access_key_id=None,  # CREDENTIAL
     aws_secret_access_key=None,  # CREDENTIAL
     region=None,  # NAME OF AWS REGION, REQUIRED FOR SOME BUCKETS
     public=False,
     debug=False,
     settings=None
 ):
     self.uid = None
     self.bucket = s3.Bucket(settings=settings)
     Log.alert("Using {{bucket}} for S3 storage", bucket=self.bucket.name)
     self.temp_queue = PersistentQueue(bucket + "_queue.txt")
     self._figure_out_start_point()
     self.push_to_s3 = Thread.run("pushing to " + bucket, self._worker)
开发者ID:klahnakoski,项目名称:MoDataSubmission,代码行数:18,代码来源:storage.py

示例10: es_deepop

# 需要导入模块: from pyLibrary.thread.threads import Thread [as 别名]
# 或者: from pyLibrary.thread.threads.Thread import run [as 别名]

#.........这里部分代码省略.........
                new_select.append({
                    "name": s.name,
                    "value": s.value.var,
                    "pull": "_id",
                    "put": {"name": s.name, "index": i, "child": "."}
                })
                i += 1
            else:
                column = columns[(s.value.var,)]
                parent = column.es_column+"."
                prefix = len(parent)
                net_columns = [c for c in columns if c.es_column.startswith(parent) and c.type not in ["object", "nested"]]
                if not net_columns:
                    pull = get_pull(column)
                    if not column.nested_path:
                        es_query.fields += [column.es_column]
                    new_select.append({
                        "name": s.name,
                        "pull": pull,
                        "nested_path": listwrap(column.nested_path)[0],
                        "put": {"name": s.name, "index": i, "child": "."}
                    })
                else:
                    done = set()
                    for n in net_columns:
                        # THE COLUMNS CAN HAVE DUPLICATE REFERNCES TO THE SAME ES_COLUMN
                        if n.es_column in done:
                            continue
                        done.add(n.es_column)

                        pull = get_pull(n)
                        if not n.nested_path:
                            es_query.fields += [n.es_column]
                        new_select.append({
                            "name": s.name,
                            "pull": pull,
                            "nested_path": listwrap(n.nested_path)[0],
                            "put": {"name": s.name, "index": i, "child": n.es_column[prefix:]}
                        })
                i += 1
        else:
            expr = s.value
            for v in expr.vars():
                for n in columns:
                    if n.name == v:
                        if not n.nested_path:
                            es_query.fields += [n.es_column]

            pull = EXPRESSION_PREFIX + s.name
            post_expressions[pull] = compile_expression(expr.map(map_to_local).to_python())

            new_select.append({
                "name": s.name if is_list else ".",
                "pull": pull,
                "value": expr.to_dict(),
                "put": {"name": s.name, "index": i, "child": "."}
            })
            i += 1

    # <COMPLICATED> ES needs two calls to get all documents
    more = []
    def get_more(please_stop):
        more.append(es09.util.post(
            es,
            Dict(
                filter=more_filter,
                fields=es_query.fields
            ),
            query.limit
        ))
    if more_filter:
        need_more = Thread.run("get more", target=get_more)

    with Timer("call to ES") as call_timer:
        data = es09.util.post(es, es_query, query.limit)

    # EACH A HIT IS RETURNED MULTIPLE TIMES FOR EACH INNER HIT, WITH INNER HIT INCLUDED
    def inners():
        for t in data.hits.hits:
            for i in t.inner_hits[literal_field(query_path)].hits.hits:
                t._inner = i._source
                for k, e in post_expressions.items():
                    t[k] = e(t)
                yield t
        if more_filter:
            Thread.join(need_more)
            for t in more[0].hits.hits:
                yield t
    #</COMPLICATED>

    try:
        formatter, groupby_formatter, mime_type = format_dispatch[query.format]

        output = formatter(inners(), new_select, query)
        output.meta.timing.es = call_timer.duration
        output.meta.content_type = mime_type
        output.meta.es_query = es_query
        return output
    except Exception, e:
        Log.error("problem formatting", e)
开发者ID:klahnakoski,项目名称:MoDataSubmission,代码行数:104,代码来源:deep.py

示例11: File

# 需要导入模块: from pyLibrary.thread.threads import Thread [as 别名]
# 或者: from pyLibrary.thread.threads.Thread import run [as 别名]
    }
        for f, d, in acc.stats.iteritems()
    ]
    stats_file = File(profile_settings.filename, suffix=convert.datetime2string(datetime.now(), "_%Y%m%d_%H%M%S"))
    stats_file.write(convert.list2tab(stats))


# GET THE MACHINE METADATA
machine_metadata = wrap({
    "python": platform.python_implementation(),
    "os": (platform.system() + platform.release()).strip(),
    "name": platform.node()
})


# GET FROM AWS, IF WE CAN
def _get_metadata_from_from_aws(please_stop):
    with suppress_exception:
        from pyLibrary import aws

        ec2 = aws.get_instance_metadata()
        if ec2:
            machine_metadata.aws_instance_type = ec2.instance_type
            machine_metadata.name = ec2.instance_id

Thread.run("get aws machine metadata", _get_metadata_from_from_aws)

if not Log.main_log:
    Log.main_log = TextLog_usingStream(sys.stdout)

开发者ID:klahnakoski,项目名称:MoTreeherder,代码行数:31,代码来源:logs.py

示例12: range

# 需要导入模块: from pyLibrary.thread.threads import Thread [as 别名]
# 或者: from pyLibrary.thread.threads.Thread import run [as 别名]
            for b in queue:
                if please_stop:
                    return
                try:
                    url = b.url + "json-info?node=" + revision
                    response = http.get(url, timeout=30)
                    if response.status_code == 200:
                        with locker:
                            output.append(b)
                        Log.note("{{revision}} found at {{url}}", url=url, revision=revision)
                except Exception, f:
                    problems.append(f)

        threads = []
        for i in range(20):
            threads.append(Thread.run("find changeset " + unicode(i), _find, please_stop=please_stop))

        for t in threads:
            try:
                t.join()
            except Exception, e:
                Log.error("Not expected", cause=e)

        if problems:
            Log.error("Could not scan for {{revision}}", revision=revision, cause=problems[0])

        return output

    def _extract_bug_id(self, description):
        """
        LOOK INTO description to FIND bug_id
开发者ID:klahnakoski,项目名称:MoHg,代码行数:33,代码来源:hg_mozilla_org.py

示例13: worker

# 需要导入模块: from pyLibrary.thread.threads import Thread [as 别名]
# 或者: from pyLibrary.thread.threads.Thread import run [as 别名]
    WILL SIGNAL please_stop WHEN THIS AWS INSTANCE IS DUE FOR SHUTDOWN
    """

    def worker(please_stop):
        while not please_stop:
            try:
                response = requests.get("http://169.254.169.254/latest/meta-data/spot/termination-time")
                if response.status_code != 400:
                    please_stop.go()
                    return
            except Exception, e:
                pass  # BE QUIET
                Thread.sleep(seconds=61, please_stop=please_stop)
            Thread.sleep(seconds=11, please_stop=please_stop)

    Thread.run("listen for termination", worker)


def get_instance_metadata(timeout=None):
    if not isinstance(timeout, (int, float)):
        timeout = Duration(timeout).seconds

    output = wrap({k.replace("-", "_"): v for k, v in boto_utils.get_instance_metadata(timeout=5, num_retries=2).items()})
    return output


def aws_retry(func):
    def output(*args, **kwargs):
        while True:
            try:
                return func(*args, **kwargs)
开发者ID:klahnakoski,项目名称:MoDataSubmission,代码行数:33,代码来源:__init__.py

示例14: UniqueIndex

# 需要导入模块: from pyLibrary.thread.threads import Thread [as 别名]
# 或者: from pyLibrary.thread.threads.Thread import run [as 别名]
        query = {
            "query": {"match_all": {}},
            "size": 20000
        }

        docs = es.search(query).hits.hits._source
        for d in docs:
            d.name = d.name.lower()
        try:
            return UniqueIndex(["name", "locale"], data=docs, fail_on_dup=False)
        except Exception, e:
            Log.error("Bad branch in ES index", cause=e)


    def find_changeset(self, revision):
        def _find(b, please_stop):
            try:
                url = b.url + "rev/" + revision
                response = http.get(url)
                if response.status_code == 200:
                    Log.note("{{revision}} found at {{url}}", url=url, revision=revision)
            except Exception, e:
                pass

        threads = []
        for b in self.branches:
            threads.append(Thread.run("find changeset", _find, b))
        for t in threads:
            t.join()
        pass
开发者ID:klahnakoski,项目名称:Activedata-ETL,代码行数:32,代码来源:hg_mozilla_org.py

示例15: get_instance_metadata

# 需要导入模块: from pyLibrary.thread.threads import Thread [as 别名]
# 或者: from pyLibrary.thread.threads.Thread import run [as 别名]
                response = requests.get("http://169.254.169.254/latest/meta-data/spot/termination-time")
                if response.status_code not in [400, 404]:
                    Log.warning("Shutdown AWS Spot Node {{name}} {{type}}", name=machine_metadata.name, type=machine_metadata.aws_instance_type)
                    please_stop.go()
                    return
            except Exception, e:
                e = Except.wrap(e)
                if "Failed to establish a new connection: [Errno 10060]" in e or "A socket operation was attempted to an unreachable network" in e:
                    Log.warning("AWS Spot Detection has shutdown, probably not a spot node, (http://169.254.169.254 is unreachable)")
                    return
                else:
                    Log.warning("AWS shutdown detection has problems", cause=e)
                Thread.sleep(seconds=61, please_stop=please_stop)
            Thread.sleep(seconds=11, please_stop=please_stop)

    Thread.run("listen for termination", worker, please_stop=please_stop)


def get_instance_metadata(timeout=None):
    if not isinstance(timeout, (int, float)):
        timeout = Duration(timeout).seconds

    output = wrap({k.replace("-", "_"): v for k, v in boto_utils.get_instance_metadata(timeout=coalesce(timeout, 5), num_retries=2).items()})
    return output


def aws_retry(func):
    def output(*args, **kwargs):
        while True:
            try:
                return func(*args, **kwargs)
开发者ID:klahnakoski,项目名称:TestFailures,代码行数:33,代码来源:__init__.py


注:本文中的pyLibrary.thread.threads.Thread.run方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。