本文整理汇总了Python中pyLibrary.thread.threads.Queue.extend方法的典型用法代码示例。如果您正苦于以下问题:Python Queue.extend方法的具体用法?Python Queue.extend怎么用?Python Queue.extend使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pyLibrary.thread.threads.Queue
的用法示例。
在下文中一共展示了Queue.extend方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: find_changeset
# 需要导入模块: from pyLibrary.thread.threads import Queue [as 别名]
# 或者: from pyLibrary.thread.threads.Queue import extend [as 别名]
def find_changeset(self, revision, please_stop=False):
locker = Lock()
output = []
queue = Queue("branches", max=2000)
queue.extend(self.branches)
queue.add(Thread.STOP)
problems = []
def _find(please_stop):
for b in queue:
if please_stop:
return
try:
url = b.url + "json-info?node=" + revision
response = http.get(url, timeout=30)
if response.status_code == 200:
with locker:
output.append(b)
Log.note("{{revision}} found at {{url}}", url=url, revision=revision)
except Exception, f:
problems.append(f)
示例2: FromESMetadata
# 需要导入模块: from pyLibrary.thread.threads import Queue [as 别名]
# 或者: from pyLibrary.thread.threads.Queue import extend [as 别名]
class FromESMetadata(Schema):
"""
QUERY THE METADATA
"""
def __new__(cls, *args, **kwargs):
global singlton
if singlton:
return singlton
else:
singlton = object.__new__(cls)
return singlton
@use_settings
def __init__(self, host, index, alias=None, name=None, port=9200, settings=None):
global _elasticsearch
if hasattr(self, "settings"):
return
from pyLibrary.queries.containers.list_usingPythonList import ListContainer
from pyLibrary.env import elasticsearch as _elasticsearch
self.settings = settings
self.default_name = coalesce(name, alias, index)
self.default_es = _elasticsearch.Cluster(settings=settings)
self.todo = Queue("refresh metadata", max=100000, unique=True)
self.es_metadata = Null
self.last_es_metadata = Date.now()-OLD_METADATA
self.meta=Dict()
table_columns = metadata_tables()
column_columns = metadata_columns()
self.meta.tables = ListContainer("meta.tables", [], wrap({c.name: c for c in table_columns}))
self.meta.columns = ColumnList()
self.meta.columns.insert(column_columns)
self.meta.columns.insert(table_columns)
# TODO: fix monitor so it does not bring down ES
if ENABLE_META_SCAN:
self.worker = Thread.run("refresh metadata", self.monitor)
else:
self.worker = Thread.run("refresh metadata", self.not_monitor)
return
@property
def query_path(self):
return None
@property
def url(self):
return self.default_es.path + "/" + self.default_name.replace(".", "/")
def get_table(self, table_name):
with self.meta.tables.locker:
return wrap([t for t in self.meta.tables.data if t.name == table_name])
def _upsert_column(self, c):
# ASSUMING THE self.meta.columns.locker IS HAD
existing_columns = self.meta.columns.find(c.table, c.name)
if not existing_columns:
self.meta.columns.add(c)
self.todo.add(c)
if ENABLE_META_SCAN:
Log.note("todo: {{table}}::{{column}}", table=c.table, column=c.es_column)
# MARK meta.columns AS DIRTY TOO
cols = self.meta.columns.find("meta.columns", None)
for cc in cols:
cc.partitions = cc.cardinality = None
cc.last_updated = Date.now()
self.todo.extend(cols)
else:
canonical = existing_columns[0]
if canonical.relative and not c.relative:
return # RELATIVE COLUMNS WILL SHADOW ABSOLUTE COLUMNS
for key in Column.__slots__:
canonical[key] = c[key]
Log.note("todo: {{table}}::{{column}}", table=canonical.table, column=canonical.es_column)
self.todo.add(canonical)
def _get_columns(self, table=None):
# TODO: HANDLE MORE THEN ONE ES, MAP TABLE SHORT_NAME TO ES INSTANCE
meta = self.es_metadata.indices[table]
if not meta or self.last_es_metadata < Date.now() - OLD_METADATA:
self.es_metadata = self.default_es.get_metadata(force=True)
meta = self.es_metadata.indices[table]
for _, properties in meta.mappings.items():
self._parse_properties(meta.index, properties, meta)
def _parse_properties(self, abs_index, properties, meta):
abs_columns = _elasticsearch.parse_properties(abs_index, None, properties.properties)
abs_columns = abs_columns.filter( # TODO: REMOVE WHEN jobs PROPERTY EXPLOSION IS CONTAINED
lambda r: not r.es_column.startswith("other.") and
not r.es_column.startswith("previous_values.cf_") and
not r.es_index.startswith("debug") and
r.es_column.find("=")==-1 and
r.es_column.find(" ")==-1
)
#.........这里部分代码省略.........
示例3: FromESMetadata
# 需要导入模块: from pyLibrary.thread.threads import Queue [as 别名]
# 或者: from pyLibrary.thread.threads.Queue import extend [as 别名]
class FromESMetadata(Schema):
"""
QUERY THE METADATA
"""
def __new__(cls, *args, **kwargs):
global singlton
if singlton:
return singlton
else:
singlton = object.__new__(cls)
return singlton
@use_settings
def __init__(self, host, index, alias=None, name=None, port=9200, settings=None):
global _elasticsearch
if hasattr(self, "settings"):
return
from pyLibrary.queries.containers.lists import ListContainer
from pyLibrary.env import elasticsearch as _elasticsearch
self.settings = settings
self.default_name = coalesce(name, alias, index)
self.default_es = _elasticsearch.Cluster(settings=settings)
self.todo = Queue("refresh metadata", max=100000, unique=True)
self.meta=Dict()
table_columns = metadata_tables()
column_columns = metadata_columns()
self.meta.tables = ListContainer("meta.tables", [], wrap({c.name: c for c in table_columns}))
self.meta.columns = ListContainer("meta.columns", [], wrap({c.name: c for c in column_columns}))
self.meta.columns.insert(column_columns)
self.meta.columns.insert(table_columns)
# TODO: fix monitor so it does not bring down ES
if ENABLE_META_SCAN:
self.worker = Thread.run("refresh metadata", self.monitor)
else:
self.worker = Thread.run("refresh metadata", self.not_monitor)
return
@property
def query_path(self):
return None
@property
def url(self):
return self.default_es.path + "/" + self.default_name.replace(".", "/")
def get_table(self, table_name):
with self.meta.tables.locker:
return self.meta.tables.query({"where": {"eq": {"name": table_name}}})
def _upsert_column(self, c):
# ASSUMING THE self.meta.columns.locker IS HAD
existing_columns = [r for r in self.meta.columns.data if r.table == c.table and r.name == c.name]
if not existing_columns:
self.meta.columns.add(c)
Log.note("todo: {{table}}.{{column}}", table=c.table, column=c.es_column)
self.todo.add(c)
# MARK meta.columns AS DIRTY TOO
cols = [r for r in self.meta.columns.data if r.table == "meta.columns"]
for cc in cols:
cc.partitions = cc.cardinality = None
cc.last_updated = Date.now()
self.todo.extend(cols)
else:
canonical = existing_columns[0]
if canonical.relative and not c.relative:
return # RELATIVE COLUMNS WILL SHADOW ABSOLUTE COLUMNS
for key in Column.__slots__:
canonical[key] = c[key]
Log.note("todo: {{table}}.{{column}}", table=canonical.table, column=canonical.es_column)
self.todo.add(canonical)
def _get_columns(self, table=None, metadata=None):
# TODO: HANDLE MORE THEN ONE ES, MAP TABLE SHORT_NAME TO ES INSTANCE
if not metadata:
metadata = self.default_es.get_metadata(force=True)
def parse_all(please_stop):
for abs_index, meta in jx.sort(metadata.indices.items(), {"value": 0, "sort": -1}):
if meta.index != abs_index:
continue
for _, properties in meta.mappings.items():
if please_stop:
return
self._parse_properties(abs_index, properties, meta)
if table:
for abs_index, meta in jx.sort(metadata.indices.items(), {"value": 0, "sort": -1}):
if table == meta.index:
for _, properties in meta.mappings.items():
self._parse_properties(abs_index, properties, meta)
return
if table == abs_index:
self._get_columns(table=meta.index, metadata=metadata)
#.........这里部分代码省略.........
示例4: FromESMetadata
# 需要导入模块: from pyLibrary.thread.threads import Queue [as 别名]
# 或者: from pyLibrary.thread.threads.Queue import extend [as 别名]
class FromESMetadata(object):
"""
QUERY THE METADATA
"""
def __new__(cls, *args, **kwargs):
global singlton
if singlton:
return singlton
else:
singlton = object.__new__(cls)
return singlton
@use_settings
def __init__(self, host, index, alias=None, name=None, port=9200, settings=None):
global _elasticsearch
if hasattr(self, "settings"):
return
from pyLibrary.queries.containers.lists import ListContainer
from pyLibrary.env import elasticsearch as _elasticsearch
self.settings = settings
self.default_name = coalesce(name, alias, index)
self.default_es = _elasticsearch.Cluster(settings=settings)
self.todo = Queue("refresh metadata", max=100000, unique=True)
table_columns = metadata_tables()
column_columns = metadata_columns()
self.tables = ListContainer("meta.tables", [], wrap({c.name: c for c in table_columns}))
self.columns = ListContainer("meta.columns", [], wrap({c.name: c for c in column_columns}))
self.columns.insert(column_columns)
self.columns.insert(table_columns)
# TODO: fix monitor so it does not bring down ES
if ENABLE_META_SCAN:
self.worker = Thread.run("refresh metadata", self.monitor)
else:
self.worker = Thread.run("refresh metadata", self.not_monitor)
return
@property
def query_path(self):
return None
@property
def url(self):
return self.default_es.path + "/" + self.default_name.replace(".", "/")
def get_table(self, table_name):
with self.tables.locker:
return self.tables.query({"where": {"eq": {"name": table_name}}})
def upsert_column(self, c):
existing_columns = filter(lambda r: r.table == c.table and r.abs_name == c.abs_name, self.columns.data)
if not existing_columns:
self.columns.add(c)
cols = filter(lambda r: r.table == "meta.columns", self.columns.data)
for cc in cols:
cc.partitions = cc.cardinality = cc.last_updated = None
self.todo.add(c)
self.todo.extend(cols)
else:
set_default(existing_columns[0], c)
self.todo.add(existing_columns[0])
# TEST CONSISTENCY
for c, d in product(list(self.todo.queue), list(self.todo.queue)):
if c.abs_name==d.abs_name and c.table==d.table and c!=d:
Log.error("")
def _get_columns(self, table=None):
# TODO: HANDLE MORE THEN ONE ES, MAP TABLE SHORT_NAME TO ES INSTANCE
alias_done = set()
index = split_field(table)[0]
query_path = split_field(table)[1:]
metadata = self.default_es.get_metadata(index=index)
for index, meta in qb.sort(metadata.indices.items(), {"value": 0, "sort": -1}):
for _, properties in meta.mappings.items():
columns = _elasticsearch.parse_properties(index, None, properties.properties)
columns = columns.filter(lambda r: not r.abs_name.startswith("other.") and not r.abs_name.startswith("previous_values.cf_")) # TODO: REMOVE WHEN jobs PROPERTY EXPLOSION IS CONTAINED
with Timer("upserting {{num}} columns", {"num": len(columns)}, debug=DEBUG):
with self.columns.locker:
for c in columns:
# ABSOLUTE
c.table = join_field([index]+query_path)
self.upsert_column(c)
for alias in meta.aliases:
# ONLY THE LATEST ALIAS IS CHOSEN TO GET COLUMNS
if alias in alias_done:
continue
alias_done.add(alias)
c = copy(c)
c.table = join_field([alias]+query_path)
self.upsert_column(c)
def query(self, _query):
return self.columns.query(Query(set_default(
{
#.........这里部分代码省略.........