本文整理汇总了Python中mo_threads.Thread.run方法的典型用法代码示例。如果您正苦于以下问题:Python Thread.run方法的具体用法?Python Thread.run怎么用?Python Thread.run使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mo_threads.Thread
的用法示例。
在下文中一共展示了Thread.run方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from mo_threads import Thread [as 别名]
# 或者: from mo_threads.Thread import run [as 别名]
def __init__(self, host, index, alias=None, name=None, port=9200, kwargs=None):
global _elasticsearch
if hasattr(self, "settings"):
return
from pyLibrary.queries.containers.list_usingPythonList import ListContainer
from pyLibrary.env import elasticsearch as _elasticsearch
self.settings = kwargs
self.default_name = coalesce(name, alias, index)
self.default_es = _elasticsearch.Cluster(kwargs=kwargs)
self.todo = Queue("refresh metadata", max=100000, unique=True)
self.es_metadata = Null
self.last_es_metadata = Date.now()-OLD_METADATA
self.meta=Data()
table_columns = metadata_tables()
column_columns = metadata_columns()
self.meta.tables = ListContainer("meta.tables", [], wrap({c.names["."]: c for c in table_columns}))
self.meta.columns = ColumnList()
self.meta.columns.insert(column_columns)
self.meta.columns.insert(table_columns)
# TODO: fix monitor so it does not bring down ES
if ENABLE_META_SCAN:
self.worker = Thread.run("refresh metadata", self.monitor)
else:
self.worker = Thread.run("refresh metadata", self.not_monitor)
return
示例2: capture_termination_signal
# 需要导入模块: from mo_threads import Thread [as 别名]
# 或者: from mo_threads.Thread import run [as 别名]
def capture_termination_signal(please_stop):
"""
WILL SIGNAL please_stop WHEN THIS AWS INSTANCE IS DUE FOR SHUTDOWN
"""
def worker(please_stop):
seen_problem = False
while not please_stop:
request_time = (time.time() - timer.START)/60 # MINUTES
try:
response = requests.get("http://169.254.169.254/latest/meta-data/spot/termination-time")
seen_problem = False
if response.status_code not in [400, 404]:
Log.alert("Shutdown AWS Spot Node {{name}} {{type}}", name=machine_metadata.name, type=machine_metadata.aws_instance_type)
please_stop.go()
except Exception as e:
e = Except.wrap(e)
if "Failed to establish a new connection: [Errno 10060]" in e or "A socket operation was attempted to an unreachable network" in e:
Log.note("AWS Spot Detection has shutdown, probably not a spot node, (http://169.254.169.254 is unreachable)")
return
elif seen_problem:
# IGNORE THE FIRST PROBLEM
Log.warning("AWS shutdown detection has more than one consecutive problem: (last request {{time|round(1)}} minutes since startup)", time=request_time, cause=e)
seen_problem = True
(Till(seconds=61) | please_stop).wait()
(Till(seconds=11) | please_stop).wait()
Thread.run("listen for termination", worker)
示例3: __init__
# 需要导入模块: from mo_threads import Thread [as 别名]
# 或者: from mo_threads.Thread import run [as 别名]
def __init__(self, rate=None, amortization_period=None, source=None, database=None, kwargs=None):
self.amortization_period = coalesce(amortization_period, AMORTIZATION_PERIOD)
self.rate = coalesce(rate, HG_REQUEST_PER_SECOND)
self.cache_locker = Lock()
self.cache = {} # MAP FROM url TO (ready, headers, response, timestamp) PAIR
self.no_cache = {} # VERY SHORT TERM CACHE
self.workers = []
self.todo = Queue(APP_NAME+" todo")
self.requests = Queue(APP_NAME + " requests", max=int(self.rate * self.amortization_period.seconds))
self.url = URL(source.url)
self.db = Sqlite(database)
self.inbound_rate = RateLogger("Inbound")
self.outbound_rate = RateLogger("hg.mo")
if not self.db.query("SELECT name FROM sqlite_master WHERE type='table'").data:
with self.db.transaction() as t:
t.execute(
"CREATE TABLE cache ("
" path TEXT PRIMARY KEY, "
" headers TEXT, "
" response TEXT, "
" timestamp REAL "
")"
)
self.threads = [
Thread.run(APP_NAME+" worker" + text_type(i), self._worker)
for i in range(CONCURRENCY)
]
self.limiter = Thread.run(APP_NAME+" limiter", self._rate_limiter)
self.cleaner = Thread.run(APP_NAME+" cleaner", self._cache_cleaner)
示例4: __init__
# 需要导入模块: from mo_threads import Thread [as 别名]
# 或者: from mo_threads.Thread import run [as 别名]
def __init__(self, name):
self.name = name
self.lock = Lock("rate locker")
self.request_rate = 0.0
self.last_request = Date.now()
Thread.run("rate logger", self._daemon)
示例5: queue_consumer
# 需要导入模块: from mo_threads import Thread [as 别名]
# 或者: from mo_threads.Thread import run [as 别名]
def queue_consumer(pull_queue, please_stop=None):
queue = aws.Queue(pull_queue)
time_offset = None
request_count = 0
while not please_stop:
request = queue.pop(till=please_stop)
if please_stop:
break
if not request:
Log.note("Nothing in queue, pausing for 5 seconds...")
(please_stop | Till(seconds=5)).wait()
continue
if SKIP_TRY_REQUESTS and 'try' in request.where['and'].eq.branch:
Log.note("Skipping try revision.")
queue.commit()
continue
now = Date.now().unix
if time_offset is None:
time_offset = now - request.meta.request_time
next_request = request.meta.request_time + time_offset
if next_request > now:
Log.note("Next request in {{wait_time}}", wait_time=Duration(seconds=next_request - now))
Till(till=next_request).wait()
Thread.run("request "+text_type(request_count), one_request, request)
request_count += 1
queue.commit()
示例6: __init__
# 需要导入模块: from mo_threads import Thread [as 别名]
# 或者: from mo_threads.Thread import run [as 别名]
def __init__(self):
self.out_of_memory_restart = False
self.total_locker = Lock()
self.total_files_requested = 0
self.total_tuids_mapped = 0
self.threads_locker = Lock()
self.waiting = 0
self.threads_waiting = 0
self.requests_locker = Lock()
self.requests_total = 0
self.requests_complete = 0
self.requests_incomplete = 0
self.requests_passed = 0
self.requests_failed = 0
self.prev_mem = 0
self.curr_mem = 0
self.initial_growth = {}
Thread.run("pc-daemon", self.run_pc_daemon)
Thread.run("threads-daemon", self.run_threads_daemon)
Thread.run("memory-daemon", self.run_memory_daemon)
Thread.run("requests-daemon", self.run_requests_daemon)
示例7: __init__
# 需要导入模块: from mo_threads import Thread [as 别名]
# 或者: from mo_threads.Thread import run [as 别名]
def __init__(self, name):
Table.__init__(self, "meta.columns")
self.db_file = File("metadata." + name + ".sqlite")
self.data = {} # MAP FROM ES_INDEX TO (abs_column_name to COLUMNS)
self.locker = Lock()
self._schema = None
self.db = sqlite3.connect(
database=self.db_file.abspath, check_same_thread=False, isolation_level=None
)
self.last_load = Null
self.todo = Queue(
"update columns to db"
) # HOLD (action, column) PAIR, WHERE action in ['insert', 'update']
self._db_load()
Thread.run("update " + name, self._db_worker)
示例8: __init__
# 需要导入模块: from mo_threads import Thread [as 别名]
# 或者: from mo_threads.Thread import run [as 别名]
def __init__(self, name, config):
config = wrap(config)
if config.debug.logs:
Log.error("not allowed to configure logging on other process")
self.process = Process(name, [PYTHON, "mo_threads" + os.sep + "python_worker.py"], shell=True)
self.process.stdin.add(value2json(set_default({"debug": {"trace": True}}, config)))
self.lock = Lock("wait for response from "+name)
self.current_task = None
self.current_response = None
self.current_error = None
self.daemon = Thread.run("", self._daemon)
self.errors = Thread.run("", self._stderr)
示例9: _find_revision
# 需要导入模块: from mo_threads import Thread [as 别名]
# 或者: from mo_threads.Thread import run [as 别名]
def _find_revision(self, revision):
please_stop = False
locker = Lock()
output = []
queue = Queue("branches", max=2000)
queue.extend(b for b in self.branches if b.locale == DEFAULT_LOCALE and b.name in ["try", "mozilla-inbound", "autoland"])
queue.add(THREAD_STOP)
problems = []
def _find(please_stop):
for b in queue:
if please_stop:
return
try:
url = b.url + "json-info?node=" + revision
rev = self.get_revision(Revision(branch=b, changeset={"id": revision}))
with locker:
output.append(rev)
Log.note("Revision found at {{url}}", url=url)
except Exception as f:
problems.append(f)
threads = []
for i in range(3):
threads.append(Thread.run("find changeset " + text_type(i), _find, please_stop=please_stop))
for t in threads:
with assert_no_exception:
t.join()
return output
示例10: setup
# 需要导入模块: from mo_threads import Thread [as 别名]
# 或者: from mo_threads.Thread import run [as 别名]
def setup(
self,
instance, # THE boto INSTANCE OBJECT FOR THE MACHINE TO SETUP
utility # THE utility OBJECT FOUND IN CONFIG
):
with self.locker:
if not self.settings.setup_timeout:
Log.error("expecting instance.setup_timeout to prevent setup from locking")
def worker(please_stop):
cpu_count = int(round(utility.cpu))
with hide('output'):
Log.note("setup {{instance}}", instance=instance.id)
self._config_fabric(instance)
Log.note("update packages on {{instance}} ip={{ip}}", instance=instance.id, ip=instance.ip_address)
try:
self._update_ubuntu_packages()
except Exception as e:
Log.warning("Can not setup {{instance}}, type={{type}}", instance=instance.id, type=instance.instance_type, cause=e)
return
Log.note("setup etl on {{instance}}", instance=instance.id)
self._setup_etl_code()
Log.note("setup grcov on {{instance}}", instance=instance.id)
self._setup_grcov()
Log.note("add config file on {{instance}}", instance=instance.id)
self._add_private_file()
Log.note("setup supervisor on {{instance}}", instance=instance.id)
self._setup_etl_supervisor(cpu_count)
Log.note("setup done {{instance}}", instance=instance.id)
worker_thread = Thread.run("etl setup started at "+unicode(Date.now().format()), worker)
(Till(timeout=Duration(self.settings.setup_timeout).seconds) | worker_thread.stopped).wait()
if not worker_thread.stopped:
Log.error("critical failure in thread {{name|quote}}", name=worker_thread.name)
worker_thread.join()
示例11: __init__
# 需要导入模块: from mo_threads import Thread [as 别名]
# 或者: from mo_threads.Thread import run [as 别名]
def __init__(self, host, index, port=9200, type="log", max_size=1000, batch_size=100, kwargs=None):
"""
settings ARE FOR THE ELASTICSEARCH INDEX
"""
self.es = Cluster(kwargs).get_or_create_index(
schema=mo_json.json2value(value2json(SCHEMA), leaves=True),
limit_replicas=True,
tjson=True,
kwargs=kwargs
)
self.batch_size = batch_size
self.es.add_alias(coalesce(kwargs.alias, kwargs.index))
self.queue = Queue("debug logs to es", max=max_size, silent=True)
self.es.settings.retry.times = coalesce(self.es.settings.retry.times, 3)
self.es.settings.retry.sleep = Duration(coalesce(self.es.settings.retry.sleep, MINUTE))
Thread.run("add debug logs to es", self._insert_loop)
示例12: __init__
# 需要导入模块: from mo_threads import Thread [as 别名]
# 或者: from mo_threads.Thread import run [as 别名]
def __init__(
self,
host,
index,
port=9200,
type="log",
queue_size=1000,
batch_size=100,
kwargs=None,
):
"""
settings ARE FOR THE ELASTICSEARCH INDEX
"""
kwargs.timeout = Duration(coalesce(kwargs.timeout, "30second")).seconds
kwargs.retry.times = coalesce(kwargs.retry.times, 3)
kwargs.retry.sleep = Duration(coalesce(kwargs.retry.sleep, MINUTE)).seconds
kwargs.host = Random.sample(listwrap(host), 1)[0]
schema = json2value(value2json(SCHEMA), leaves=True)
schema.mappings[type].properties["~N~"].type = "nested"
self.es = Cluster(kwargs).get_or_create_index(
schema=schema,
limit_replicas=True,
typed=True,
kwargs=kwargs,
)
self.batch_size = batch_size
self.es.add_alias(coalesce(kwargs.alias, kwargs.index))
self.queue = Queue("debug logs to es", max=queue_size, silent=True)
self.worker = Thread.run("add debug logs to es", self._insert_loop)
示例13: __init__
# 需要导入模块: from mo_threads import Thread [as 别名]
# 或者: from mo_threads.Thread import run [as 别名]
def __init__(self, host, index, sql_file='metadata.sqlite', alias=None, name=None, port=9200, kwargs=None):
if hasattr(self, "settings"):
return
self.too_old = TOO_OLD
self.settings = kwargs
self.default_name = coalesce(name, alias, index)
self.es_cluster = elasticsearch.Cluster(kwargs=kwargs)
self.index_does_not_exist = set()
self.todo = Queue("refresh metadata", max=100000, unique=True)
self.index_to_alias = Relation_usingList()
self.es_metadata = Null
self.metadata_last_updated = Date.now() - OLD_METADATA
self.meta = Data()
self.meta.columns = ColumnList()
self.alias_to_query_paths = {
"meta.columns": [['.']],
"meta.tables": [['.']]
}
self.alias_last_updated = {
"meta.columns": Date.now(),
"meta.tables": Date.now()
}
table_columns = metadata_tables()
self.meta.tables = ListContainer(
"meta.tables",
[
# TableDesc("meta.columns", None, ".", Date.now()),
# TableDesc("meta.tables", None, ".", Date.now())
],
jx_base.Schema(".", table_columns)
)
self.meta.columns.extend(table_columns)
# TODO: fix monitor so it does not bring down ES
if ENABLE_META_SCAN:
self.worker = Thread.run("refresh metadata", self.monitor)
else:
self.worker = Thread.run("refresh metadata", self.not_monitor)
return
示例14: __init__
# 需要导入模块: from mo_threads import Thread [as 别名]
# 或者: from mo_threads.Thread import run [as 别名]
def __init__(
self,
hg=None, # CONNECT TO hg
repo=None, # CONNECTION INFO FOR ES CACHE
branches=None, # CONNECTION INFO FOR ES CACHE
use_cache=False, # True IF WE WILL USE THE ES FOR DOWNLOADING BRANCHES
timeout=30 * SECOND,
kwargs=None
):
if not _hg_branches:
_late_imports()
self.es_locker = Lock()
self.todo = mo_threads.Queue("todo for hg daemon", max=DAEMON_QUEUE_SIZE)
self.settings = kwargs
self.timeout = Duration(timeout)
# VERIFY CONNECTIVITY
with Explanation("Test connect with hg"):
response = http.head(self.settings.hg.url)
if branches == None:
self.branches = _hg_branches.get_branches(kwargs=kwargs)
self.es = None
return
self.last_cache_miss = Date.now()
set_default(repo, {"schema": revision_schema})
self.es = elasticsearch.Cluster(kwargs=repo).get_or_create_index(kwargs=repo)
def setup_es(please_stop):
with suppress_exception:
self.es.add_alias()
with suppress_exception:
self.es.set_refresh_interval(seconds=1)
Thread.run("setup_es", setup_es)
self.branches = _hg_branches.get_branches(kwargs=kwargs)
self.timeout = timeout
Thread.run("hg daemon", self._daemon)
示例15: _setup
# 需要导入模块: from mo_threads import Thread [as 别名]
# 或者: from mo_threads.Thread import run [as 别名]
def _setup():
threads = Data()
signals = Data()
db = Sqlite()
db.query("CREATE TABLE my_table (value TEXT)")
for name in ["a", "b"]:
signals[name] = [{"begin": Signal(), "done": Signal()} for _ in range(4)]
threads[name] = Thread.run(name, _work, name, db, signals[name])
return db, threads, signals