本文整理汇总了Python中mo_threads.Thread类的典型用法代码示例。如果您正苦于以下问题:Python Thread类的具体用法?Python Thread怎么用?Python Thread使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Thread类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, name):
self.name = name
self.lock = Lock("rate locker")
self.request_rate = 0.0
self.last_request = Date.now()
Thread.run("rate logger", self._daemon)
示例2: __init__
def __init__(self, rate=None, amortization_period=None, source=None, database=None, kwargs=None):
self.amortization_period = coalesce(amortization_period, AMORTIZATION_PERIOD)
self.rate = coalesce(rate, HG_REQUEST_PER_SECOND)
self.cache_locker = Lock()
self.cache = {} # MAP FROM url TO (ready, headers, response, timestamp) PAIR
self.no_cache = {} # VERY SHORT TERM CACHE
self.workers = []
self.todo = Queue(APP_NAME+" todo")
self.requests = Queue(APP_NAME + " requests", max=int(self.rate * self.amortization_period.seconds))
self.url = URL(source.url)
self.db = Sqlite(database)
self.inbound_rate = RateLogger("Inbound")
self.outbound_rate = RateLogger("hg.mo")
if not self.db.query("SELECT name FROM sqlite_master WHERE type='table'").data:
with self.db.transaction() as t:
t.execute(
"CREATE TABLE cache ("
" path TEXT PRIMARY KEY, "
" headers TEXT, "
" response TEXT, "
" timestamp REAL "
")"
)
self.threads = [
Thread.run(APP_NAME+" worker" + text_type(i), self._worker)
for i in range(CONCURRENCY)
]
self.limiter = Thread.run(APP_NAME+" limiter", self._rate_limiter)
self.cleaner = Thread.run(APP_NAME+" cleaner", self._cache_cleaner)
示例3: __init__
def __init__(self, host, index, alias=None, name=None, port=9200, kwargs=None):
global _elasticsearch
if hasattr(self, "settings"):
return
from pyLibrary.queries.containers.list_usingPythonList import ListContainer
from pyLibrary.env import elasticsearch as _elasticsearch
self.settings = kwargs
self.default_name = coalesce(name, alias, index)
self.default_es = _elasticsearch.Cluster(kwargs=kwargs)
self.todo = Queue("refresh metadata", max=100000, unique=True)
self.es_metadata = Null
self.last_es_metadata = Date.now()-OLD_METADATA
self.meta=Data()
table_columns = metadata_tables()
column_columns = metadata_columns()
self.meta.tables = ListContainer("meta.tables", [], wrap({c.names["."]: c for c in table_columns}))
self.meta.columns = ColumnList()
self.meta.columns.insert(column_columns)
self.meta.columns.insert(table_columns)
# TODO: fix monitor so it does not bring down ES
if ENABLE_META_SCAN:
self.worker = Thread.run("refresh metadata", self.monitor)
else:
self.worker = Thread.run("refresh metadata", self.not_monitor)
return
示例4: capture_termination_signal
def capture_termination_signal(please_stop):
"""
WILL SIGNAL please_stop WHEN THIS AWS INSTANCE IS DUE FOR SHUTDOWN
"""
def worker(please_stop):
seen_problem = False
while not please_stop:
request_time = (time.time() - timer.START)/60 # MINUTES
try:
response = requests.get("http://169.254.169.254/latest/meta-data/spot/termination-time")
seen_problem = False
if response.status_code not in [400, 404]:
Log.alert("Shutdown AWS Spot Node {{name}} {{type}}", name=machine_metadata.name, type=machine_metadata.aws_instance_type)
please_stop.go()
except Exception as e:
e = Except.wrap(e)
if "Failed to establish a new connection: [Errno 10060]" in e or "A socket operation was attempted to an unreachable network" in e:
Log.note("AWS Spot Detection has shutdown, probably not a spot node, (http://169.254.169.254 is unreachable)")
return
elif seen_problem:
# IGNORE THE FIRST PROBLEM
Log.warning("AWS shutdown detection has more than one consecutive problem: (last request {{time|round(1)}} minutes since startup)", time=request_time, cause=e)
seen_problem = True
(Till(seconds=61) | please_stop).wait()
(Till(seconds=11) | please_stop).wait()
Thread.run("listen for termination", worker)
示例5: queue_consumer
def queue_consumer(pull_queue, please_stop=None):
queue = aws.Queue(pull_queue)
time_offset = None
request_count = 0
while not please_stop:
request = queue.pop(till=please_stop)
if please_stop:
break
if not request:
Log.note("Nothing in queue, pausing for 5 seconds...")
(please_stop | Till(seconds=5)).wait()
continue
if SKIP_TRY_REQUESTS and 'try' in request.where['and'].eq.branch:
Log.note("Skipping try revision.")
queue.commit()
continue
now = Date.now().unix
if time_offset is None:
time_offset = now - request.meta.request_time
next_request = request.meta.request_time + time_offset
if next_request > now:
Log.note("Next request in {{wait_time}}", wait_time=Duration(seconds=next_request - now))
Till(till=next_request).wait()
Thread.run("request "+text_type(request_count), one_request, request)
request_count += 1
queue.commit()
示例6: inners
def inners():
for t in data.hits.hits:
for i in t.inner_hits[literal_field(query_path)].hits.hits:
t._inner = i._source
for k, e in post_expressions.items():
t[k] = e(t)
yield t
if more_filter:
Thread.join(need_more)
for t in more[0].hits.hits:
yield t
示例7: __exit__
def __exit__(self, exc_type, exc_val, exc_tb):
Log.note("clean pulse exit")
self.please_stop.go()
with suppress_exception:
self.target_queue.close()
Log.note("stop put into queue")
try:
self.pulse.disconnect()
except Exception as e:
Log.warning("Can not disconnect during pulse exit, ignoring", e)
Thread.__exit__(self, exc_type, exc_val, exc_tb)
示例8: StructuredLogger_usingThreadedStream
class StructuredLogger_usingThreadedStream(StructuredLogger):
# stream CAN BE AN OBJCET WITH write() METHOD, OR A STRING
# WHICH WILL eval() TO ONE
def __init__(self, stream):
assert stream
if isinstance(stream, text_type):
name = stream
stream = self.stream = eval(stream)
if name.startswith("sys.") and PY3:
self.stream = Data(write=lambda d: stream.write(d.decode('utf8')))
else:
name = "stream"
self.stream = stream
# WRITE TO STREAMS CAN BE *REALLY* SLOW, WE WILL USE A THREAD
from mo_threads import Queue
def utf8_appender(value):
if isinstance(value, text_type):
value = value.encode('utf8')
self.stream.write(value)
appender = utf8_appender
self.queue = Queue("queue for " + self.__class__.__name__ + "(" + name + ")", max=10000, silent=True)
self.thread = Thread("log to " + self.__class__.__name__ + "(" + name + ")", time_delta_pusher, appender=appender, queue=self.queue, interval=0.3)
self.thread.parent.remove_child(self.thread) # LOGGING WILL BE RESPONSIBLE FOR THREAD stop()
self.thread.start()
def write(self, template, params):
try:
self.queue.add({"template": template, "params": params})
return self
except Exception as e:
raise e # OH NO!
def stop(self):
try:
self.queue.add(THREAD_STOP) # BE PATIENT, LET REST OF MESSAGE BE SENT
self.thread.join()
except Exception as e:
if DEBUG_LOGGING:
raise e
try:
self.queue.close()
except Exception as f:
if DEBUG_LOGGING:
raise f
示例9: __init__
def __init__(self):
self.out_of_memory_restart = False
self.total_locker = Lock()
self.total_files_requested = 0
self.total_tuids_mapped = 0
self.threads_locker = Lock()
self.waiting = 0
self.threads_waiting = 0
self.requests_locker = Lock()
self.requests_total = 0
self.requests_complete = 0
self.requests_incomplete = 0
self.requests_passed = 0
self.requests_failed = 0
self.prev_mem = 0
self.curr_mem = 0
self.initial_growth = {}
Thread.run("pc-daemon", self.run_pc_daemon)
Thread.run("threads-daemon", self.run_threads_daemon)
Thread.run("memory-daemon", self.run_memory_daemon)
Thread.run("requests-daemon", self.run_requests_daemon)
示例10: StructuredLogger_usingThread
class StructuredLogger_usingThread(StructuredLogger):
def __init__(self, logger):
if not isinstance(logger, StructuredLogger):
Log.error("Expecting a StructuredLogger")
self.queue = Queue("Queue for " + self.__class__.__name__, max=10000, silent=True, allow_add_after_close=True)
self.logger = logger
def worker(logger, please_stop):
try:
while not please_stop:
logs = self.queue.pop_all()
if not logs:
(Till(seconds=1) | please_stop).wait()
continue
for log in logs:
if log is THREAD_STOP:
please_stop.go()
else:
logger.write(**log)
except Exception as e:
print("problem in " + StructuredLogger_usingThread.__name__ + ": " + str(e))
finally:
Log.note("stop the child")
logger.stop()
self.thread = Thread("Thread for " + self.__class__.__name__, worker, logger)
self.thread.parent.remove_child(self.thread) # LOGGING WILL BE RESPONSIBLE FOR THREAD stop()
self.thread.start()
def write(self, template, params):
try:
self.queue.add({"template": template, "params": params})
return self
except Exception as e:
e = Except.wrap(e)
raise e # OH NO!
def stop(self):
try:
self.queue.add(THREAD_STOP) # BE PATIENT, LET REST OF MESSAGE BE SENT
self.thread.join()
Log.note("joined on thread")
except Exception as e:
Log.note("problem in threaded logger" + str(e))
with suppress_exception:
self.queue.close()
示例11: __init__
def __init__(self, name):
Table.__init__(self, "meta.columns")
self.db_file = File("metadata." + name + ".sqlite")
self.data = {} # MAP FROM ES_INDEX TO (abs_column_name to COLUMNS)
self.locker = Lock()
self._schema = None
self.db = sqlite3.connect(
database=self.db_file.abspath, check_same_thread=False, isolation_level=None
)
self.last_load = Null
self.todo = Queue(
"update columns to db"
) # HOLD (action, column) PAIR, WHERE action in ['insert', 'update']
self._db_load()
Thread.run("update " + name, self._db_worker)
示例12: __init__
def __init__(self, name, config):
config = wrap(config)
if config.debug.logs:
Log.error("not allowed to configure logging on other process")
self.process = Process(name, [PYTHON, "mo_threads" + os.sep + "python_worker.py"], shell=True)
self.process.stdin.add(value2json(set_default({"debug": {"trace": True}}, config)))
self.lock = Lock("wait for response from "+name)
self.current_task = None
self.current_response = None
self.current_error = None
self.daemon = Thread.run("", self._daemon)
self.errors = Thread.run("", self._stderr)
示例13: __init__
def __init__(self, stream):
assert stream
if isinstance(stream, text_type):
name = stream
stream = self.stream = eval(stream)
if name.startswith("sys.") and PY3:
self.stream = Data(write=lambda d: stream.write(d.decode('utf8')))
else:
name = "stream"
self.stream = stream
# WRITE TO STREAMS CAN BE *REALLY* SLOW, WE WILL USE A THREAD
from mo_threads import Queue
def utf8_appender(value):
if isinstance(value, text_type):
value = value.encode('utf8')
self.stream.write(value)
appender = utf8_appender
self.queue = Queue("queue for " + self.__class__.__name__ + "(" + name + ")", max=10000, silent=True)
self.thread = Thread("log to " + self.__class__.__name__ + "(" + name + ")", time_delta_pusher, appender=appender, queue=self.queue, interval=0.3)
self.thread.parent.remove_child(self.thread) # LOGGING WILL BE RESPONSIBLE FOR THREAD stop()
self.thread.start()
示例14: __init__
def __init__(
self,
host,
index,
port=9200,
type="log",
queue_size=1000,
batch_size=100,
kwargs=None,
):
"""
settings ARE FOR THE ELASTICSEARCH INDEX
"""
kwargs.timeout = Duration(coalesce(kwargs.timeout, "30second")).seconds
kwargs.retry.times = coalesce(kwargs.retry.times, 3)
kwargs.retry.sleep = Duration(coalesce(kwargs.retry.sleep, MINUTE)).seconds
kwargs.host = Random.sample(listwrap(host), 1)[0]
schema = json2value(value2json(SCHEMA), leaves=True)
schema.mappings[type].properties["~N~"].type = "nested"
self.es = Cluster(kwargs).get_or_create_index(
schema=schema,
limit_replicas=True,
typed=True,
kwargs=kwargs,
)
self.batch_size = batch_size
self.es.add_alias(coalesce(kwargs.alias, kwargs.index))
self.queue = Queue("debug logs to es", max=queue_size, silent=True)
self.worker = Thread.run("add debug logs to es", self._insert_loop)
示例15: __init__
def __init__(self, host, index, port=9200, type="log", max_size=1000, batch_size=100, kwargs=None):
"""
settings ARE FOR THE ELASTICSEARCH INDEX
"""
self.es = Cluster(kwargs).get_or_create_index(
schema=mo_json.json2value(value2json(SCHEMA), leaves=True),
limit_replicas=True,
tjson=True,
kwargs=kwargs
)
self.batch_size = batch_size
self.es.add_alias(coalesce(kwargs.alias, kwargs.index))
self.queue = Queue("debug logs to es", max=max_size, silent=True)
self.es.settings.retry.times = coalesce(self.es.settings.retry.times, 3)
self.es.settings.retry.sleep = Duration(coalesce(self.es.settings.retry.sleep, MINUTE))
Thread.run("add debug logs to es", self._insert_loop)