本文整理汇总了Python中google.appengine.ext.ndb.OR属性的典型用法代码示例。如果您正苦于以下问题:Python ndb.OR属性的具体用法?Python ndb.OR怎么用?Python ndb.OR使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类google.appengine.ext.ndb
的用法示例。
在下文中一共展示了ndb.OR属性的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: CalculateInstallerState
# 需要导入模块: from google.appengine.ext import ndb [as 别名]
# 或者: from google.appengine.ext.ndb import OR [as 别名]
def CalculateInstallerState(self):
"""Returns the blockable's installer state as prescribed by Upvote.
NOTE: Due to the ancestor query, this method will not reflect changes within
uncommitted transactions.
Returns:
The current installer state prescribed by Upvote.
"""
# pylint: disable=g-explicit-bool-comparison, singleton-comparison
query = rule_models.Bit9Rule.query(
rule_models.Bit9Rule.in_effect == True,
ndb.OR(
rule_models.Bit9Rule.policy == _POLICY.FORCE_INSTALLER,
rule_models.Bit9Rule.policy == _POLICY.FORCE_NOT_INSTALLER),
ancestor=self.key
).order(-rule_models.Bit9Rule.updated_dt)
# pylint: enable=g-explicit-bool-comparison, singleton-comparison
installer_rule = query.get()
if installer_rule is None:
return self.detected_installer
else:
return installer_rule.policy == _POLICY.FORCE_INSTALLER
示例2: query_display
# 需要导入模块: from google.appengine.ext import ndb [as 别名]
# 或者: from google.appengine.ext.ndb import OR [as 别名]
def query_display(cls, user_id, delta_minutes=60):
"""
Query all jobs that have state scheduled, queued or sent (but not done)
OR are done and have been scheduled for no longer than delta_minutes
ago.
"""
shortly_ago = datetime.datetime.utcnow() - datetime.timedelta(
minutes=delta_minutes)
# query all jobs that are
return cls.query(ndb.OR(cls.state.IN(['scheduled', 'queued', 'sent']),
ndb.AND(cls.scheduled_at >= shortly_ago,
cls.state == 'done')),
cls.user_id == user_id)
示例3: query_article_inequality_explicit
# 需要导入模块: from google.appengine.ext import ndb [as 别名]
# 或者: from google.appengine.ext.ndb import OR [as 别名]
def query_article_inequality_explicit():
query = Article.query(ndb.OR(Article.tags < 'perl',
Article.tags > 'perl'))
return query
示例4: query_article_in_equivalent
# 需要导入模块: from google.appengine.ext import ndb [as 别名]
# 或者: from google.appengine.ext.ndb import OR [as 别名]
def query_article_in_equivalent():
query = Article.query(ndb.OR(Article.tags == 'python',
Article.tags == 'ruby',
Article.tags == 'php'))
return query
示例5: query_article_nested
# 需要导入模块: from google.appengine.ext import ndb [as 别名]
# 或者: from google.appengine.ext.ndb import OR [as 别名]
def query_article_nested():
query = Article.query(ndb.AND(Article.tags == 'python',
ndb.OR(Article.tags.IN(['ruby', 'jruby']),
ndb.AND(Article.tags == 'php',
Article.tags != 'perl'))))
return query
示例6: task_bq_run
# 需要导入模块: from google.appengine.ext import ndb [as 别名]
# 或者: from google.appengine.ext.ndb import OR [as 别名]
def task_bq_run(start, end):
"""Sends TaskRunResult to BigQuery swarming.task_results_run table.
Multiple queries are run one after the other. This is because ndb.OR() cannot
be used when the subqueries are inequalities on different fields.
"""
def _convert(e):
"""Returns a tuple(bq_key, row)."""
out = swarming_pb2.TaskResult()
e.to_proto(out)
return (e.task_id, out)
failed = 0
total = 0
seen = set()
# Completed
q = TaskRunResult.query(
TaskRunResult.completed_ts >= start,
TaskRunResult.completed_ts <= end,
# Disable cache for consistency.
default_options=ndb.QueryOptions(use_cache=False, use_memcache=False))
cursor = None
more = True
while more:
entities, cursor, more = q.fetch_page(500, start_cursor=cursor)
rows = [_convert(e) for e in entities]
seen.update(e.task_id for e in entities)
total += len(rows)
failed += bq_state.send_to_bq('task_results_run', rows)
return total, failed
示例7: task_bq_summary
# 需要导入模块: from google.appengine.ext import ndb [as 别名]
# 或者: from google.appengine.ext.ndb import OR [as 别名]
def task_bq_summary(start, end):
"""Sends TaskResultSummary to BigQuery swarming.task_results_summary table.
Multiple queries are run one after the other. This is because ndb.OR() cannot
be used when the subqueries are inequalities on different fields.
"""
def _convert(e):
"""Returns a tuple(bq_key, row)."""
out = swarming_pb2.TaskResult()
e.to_proto(out)
if not out.HasField('end_time'):
logging.warning('crbug.com/1064833: task %s does not have end_time %s',
e.task_id, out)
return (e.task_id, out)
failed = 0
total = 0
seen = set()
# Completed
q = TaskResultSummary.query(
TaskResultSummary.completed_ts >= start,
TaskResultSummary.completed_ts <= end,
# Disable cache for consistency.
default_options=ndb.QueryOptions(use_cache=False, use_memcache=False))
cursor = None
more = True
while more:
entities, cursor, more = q.fetch_page(500, start_cursor=cursor)
rows = [_convert(e) for e in entities]
seen.update(e.task_id for e in entities)
total += len(rows)
failed += bq_state.send_to_bq('task_results_summary', rows)
return total, failed
示例8: top_lovers_and_lovees
# 需要导入模块: from google.appengine.ext import ndb [as 别名]
# 或者: from google.appengine.ext.ndb import OR [as 别名]
def top_lovers_and_lovees(utc_week_start, dept=None, limit=20):
"""Synchronously return a list of (employee key, sent love count) and a list of
(employee key, received love count), each sorted in descending order of love sent
or received.
"""
sent_query = LoveCount.query(LoveCount.week_start == utc_week_start)
if dept:
sent_query = sent_query.filter(ndb.OR(LoveCount.meta_department == dept, LoveCount.department == dept))
sent = sent_query.order(-LoveCount.sent_count).fetch()
lovers = []
for c in sent:
if len(lovers) == limit:
break
if c.sent_count == 0:
continue
employee_key = c.key.parent()
lovers.append((employee_key, c.sent_count))
received = sorted(sent, key=lambda c: c.received_count, reverse=True)
lovees = []
for c in received:
if len(lovees) == limit:
break
if c.received_count == 0:
continue
employee_key = c.key.parent()
lovees.append((employee_key, c.received_count))
return (lovers, lovees)
示例9: get
# 需要导入模块: from google.appengine.ext import ndb [as 别名]
# 或者: from google.appengine.ext.ndb import OR [as 别名]
def get(self, blockable_id):
blockable_id = blockable_id.lower()
blockable = binary_models.Blockable.get_by_id(blockable_id)
if not blockable:
self.abort(httplib.NOT_FOUND, explanation='Blockable not found.')
platform = blockable.GetPlatformName()
if platform != constants.PLATFORM.WINDOWS:
self.respond_json(False)
return
# Get uncommitted Rules for this blockable that are relevant to the user.
# Relevant Rules are either global Rules or local Rules that the user was
# responsible for creating.
# pylint: disable=g-explicit-bool-comparison, singleton-comparison
pending_rule_query = rule_models.Bit9Rule.query(
rule_models.Bit9Rule.in_effect == True,
rule_models.Bit9Rule.is_committed == False,
rule_models.Bit9Rule.policy.IN(constants.RULE_POLICY.SET_EXECUTION),
ndb.OR(
rule_models.Bit9Rule.host_id == '', # Global rule
rule_models.Bit9Rule.user_key == self.user.key), # User's rule
ancestor=blockable.key)
# pylint: enable=g-explicit-bool-comparison, singleton-comparison
has_pending_rules = bool(pending_rule_query.count(limit=1))
self.respond_json(has_pending_rules)
示例10: _ChangeModeForGroup
# 需要导入模块: from google.appengine.ext import ndb [as 别名]
# 或者: from google.appengine.ext.ndb import OR [as 别名]
def _ChangeModeForGroup(self, mode, group, honor_lock=True):
"""Loads all users in the group and sets the client_mode for their hosts.
This will make sure that hosts are in the right mode if they are members of
a group, but will not change mode for non-members. Users can be left out of
groups to be manaully managed.
Args:
mode: The new client_mode to set.
group: The group of users whose hosts should have a mode change.
honor_lock: bool, whether the client_mode will be honored.
"""
logging.info('Changing mode to %s for %s', mode, group)
group_client = group_utils.GroupManager()
roster = group_client.AllMembers(group)
logging.info('Fetched %d user(s) from group %s', len(roster), group)
# Generate the NDB Keys for all users in the roster.
user_keys = [
ndb.Key(user_models.User, email) for email in roster if email]
# ndb.OR falls over if it gets an empty iterable...
if not user_keys:
return
for user_key_group in iter_utils.Grouper(user_keys, BATCH_SIZE):
user_key_group = filter(None, user_key_group)
deferred.defer(
_ChangeModeForHosts, mode, user_key_group, honor_lock,
_queue=constants.TASK_QUEUE.DEFAULT)
示例11: _ChangeModeForHosts
# 需要导入模块: from google.appengine.ext import ndb [as 别名]
# 或者: from google.appengine.ext.ndb import OR [as 别名]
def _ChangeModeForHosts(mode, user_keys, honor_lock=True):
"""Performs a client mode change for the specified users' hosts.
Args:
mode: The new client_mode to set.
user_keys: The users whose host modes are to be changed.
honor_lock: bool, whether the client_mode_lock property will be honored.
"""
predicates = [
host_models.SantaHost.primary_user == user_utils.EmailToUsername(key.id())
for key in user_keys]
query = host_models.SantaHost.query(ndb.OR(*predicates))
hosts = query.fetch()
updated_hosts = []
for host in hosts:
# If lock is honored, skip locked users.
if honor_lock and host.client_mode_lock:
continue
# Ignore non-changes also.
if host.client_mode == mode:
continue
# Proceed with the mode change.
host.client_mode = mode
host.client_mode_lock = False
updated_hosts.append(host)
ndb.put_multi(updated_hosts)
logging.info(
'Client mode changed to %s for %d host(s)', mode, len(updated_hosts))
示例12: cancel
# 需要导入模块: from google.appengine.ext import ndb [as 别名]
# 或者: from google.appengine.ext.ndb import OR [as 别名]
def cancel(self, request):
"""Cancel a subset of pending tasks based on the tags.
Cancellation happens asynchronously, so when this call returns,
cancellations will not have completed yet.
"""
logging.debug('request %s', request)
if not request.tags:
# Prevent accidental cancellation of everything.
raise endpoints.BadRequestException(
'You must specify tags when cancelling multiple tasks.')
# Check permission.
# If the caller has global permission, it can access all tasks.
# Otherwise, it requires a pool tag to check ACL.
realms.check_tasks_cancel_acl(request.tags)
now = utils.utcnow()
cond = task_result.TaskResultSummary.state == task_result.State.PENDING
if request.kill_running:
cond = ndb.OR(
cond,
task_result.TaskResultSummary.state == task_result.State.RUNNING)
q = task_result.TaskResultSummary.query(cond).order(
task_result.TaskResultSummary.key)
for tag in request.tags:
q = q.filter(task_result.TaskResultSummary.tags == tag)
tasks, cursor = datastore_utils.fetch_page(q, request.limit, request.cursor)
if tasks:
payload = json.dumps(
{
'tasks': [t.task_id for t in tasks],
'kill_running': request.kill_running or False,
})
ok = utils.enqueue_task(
'/internal/taskqueue/important/tasks/cancel', 'cancel-tasks',
payload=payload)
if not ok:
raise endpoints.InternalServerErrorException(
'Could not enqueue cancel request, try again later')
else:
logging.info('No tasks to cancel.')
return swarming_rpcs.TasksCancelResponse(
cursor=cursor,
matched=len(tasks),
now=now)