本文整理汇总了Python中sqlalchemy.sql.functions.count方法的典型用法代码示例。如果您正苦于以下问题:Python functions.count方法的具体用法?Python functions.count怎么用?Python functions.count使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sqlalchemy.sql.functions
的用法示例。
在下文中一共展示了functions.count方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_logging_in_creates_user_no_https
# 需要导入模块: from sqlalchemy.sql import functions [as 别名]
# 或者: from sqlalchemy.sql.functions import count [as 别名]
def test_logging_in_creates_user_no_https(self):
dokomoforms.handlers.demo.options.https = False
no_user = (
self.session
.query(count(Administrator.id))
.filter_by(name='demo_user')
.scalar()
)
self.assertEqual(no_user, 0)
self.fetch('/demo/login', _logged_in_user=None)
user = (
self.session
.query(count(Administrator.id))
.filter_by(name='demo_user')
.scalar()
)
self.assertEqual(user, 1)
示例2: test_logging_in_creates_user_https
# 需要导入模块: from sqlalchemy.sql import functions [as 别名]
# 或者: from sqlalchemy.sql.functions import count [as 别名]
def test_logging_in_creates_user_https(self):
dokomoforms.handlers.demo.options.https = True
no_user = (
self.session
.query(count(Administrator.id))
.filter_by(name='demo_user')
.scalar()
)
self.assertEqual(no_user, 0)
self.fetch('/demo/login', _logged_in_user=None)
user = (
self.session
.query(count(Administrator.id))
.filter_by(name='demo_user')
.scalar()
)
self.assertEqual(user, 1)
示例3: test_get_logged_in_admin
# 需要导入模块: from sqlalchemy.sql import functions [as 别名]
# 或者: from sqlalchemy.sql.functions import count [as 别名]
def test_get_logged_in_admin(self):
num_surveys = (
self.session
.query(count(models.Survey.id))
.filter_by(creator_id='b7becd02-1a3f-4c1d-a0e1-286ba121aef4')
.scalar()
)
response = self.fetch('/', method='GET')
response_soup = BeautifulSoup(response.body, 'html.parser')
links = response_soup.select('a.btn-login.btn-large')
self.assertEqual(len(links), 0, msg=response.body)
self.assertIn(
'Account Overview', response.body.decode(), msg=response.body
)
survey_dropdown = (
response_soup.find('ul', {'aria-labelledby': 'SurveysDropdown'})
)
self.assertEqual(
len(survey_dropdown.findAll('li')),
min(num_surveys, BaseHandler.num_surveys_for_menu),
msg=survey_dropdown
)
示例4: test_login_success
# 需要导入模块: from sqlalchemy.sql import functions [as 别名]
# 或者: from sqlalchemy.sql.functions import count [as 别名]
def test_login_success(self):
dokomoforms.handlers.auth.options.https = False
with patch.object(handlers.Login, '_async_post') as p:
dummy = lambda: None
dummy.body = json_encode(
{'status': 'okay', 'email': 'test_creator@fixtures.com'}
)
p.return_value = tornado.gen.Task(
lambda callback=None: callback(dummy)
)
response = self.fetch(
'/user/login?assertion=woah', method='POST', body='',
_logged_in_user=None
)
self.assertEqual(response.code, 200, msg=response.body)
self.assertEqual(
response.headers['Set-Cookie'].lower().count('secure'),
1
)
示例5: tag_counts
# 需要导入模块: from sqlalchemy.sql import functions [as 别名]
# 或者: from sqlalchemy.sql.functions import count [as 别名]
def tag_counts(self):
""" Return a list of counts for the tags this project has.
[('arcade', 2), ('opengl', 1)]
"""
tags = [t.value for t in self.tags]
cnt = count(Tags.value)
tag_counts = (inspect(self).session
.query(Tags.value, cnt)
.group_by(Tags.value)
.filter(Tags.value.in_(tags))
.order_by(cnt.desc())).all()
return [(tag, cnt, (int(10 + min(24, sqrt(cnt) * 24 / 5))))
for tag, cnt in tag_counts]
示例6: _delete_time_range
# 需要导入模块: from sqlalchemy.sql import functions [as 别名]
# 或者: from sqlalchemy.sql.functions import count [as 别名]
def _delete_time_range(self, s, start, finish):
composite_ids = s.query(Composite.id). \
join(StatisticJournal, Composite.id == StatisticJournal.source_id). \
join(StatisticName, StatisticJournal.statistic_name_id == StatisticName.id). \
filter(StatisticName.owner == self.owner_out)
if start:
composite_ids = composite_ids.filter(StatisticJournal.time >= start)
if finish:
composite_ids = composite_ids.filter(StatisticJournal.time <= finish)
log.debug(f'Delete query: {composite_ids}')
n = s.query(count(Source.id)). \
filter(Source.id.in_(composite_ids)). \
scalar()
if n:
log.warning(f'Deleting {n} Composite sources ({start} - {finish})')
s.query(Source). \
filter(Source.id.in_(composite_ids)). \
delete(synchronize_session=False)
s.commit()
示例7: _delete
# 需要导入模块: from sqlalchemy.sql import functions [as 别名]
# 或者: from sqlalchemy.sql.functions import count [as 别名]
def _delete(self, s):
start, finish = self._start_finish(type=local_time_to_time)
s.commit() # so that we don't have any risk of having something in the session that can be deleted
statistic_names = s.query(StatisticName.id).filter(StatisticName.owner == self.owner_out)
activity_journals = self._delimit_query(s.query(self._journal_type.id))
statistic_journals = s.query(StatisticJournal.id). \
filter(StatisticJournal.statistic_name_id.in_(statistic_names.cte()),
StatisticJournal.source_id.in_(activity_journals))
for repeat in range(2):
if repeat:
s.query(StatisticJournal).filter(StatisticJournal.id.in_(statistic_journals.cte())). \
delete(synchronize_session=False)
Timestamp.clear_keys(s, activity_journals.cte(), self.owner_out, constraint=None)
else:
n = s.query(count(StatisticJournal.id)). \
filter(StatisticJournal.id.in_(statistic_journals.cte())).scalar()
if n:
log.warning(f'Deleting {n} statistics for {long_cls(self.owner_out)} from {start} to {finish}')
else:
log.warning(f'No statistics to delete for {long_cls(self.owner_out)} from {start} to {finish}')
s.commit()
示例8: _delete_from
# 需要导入模块: from sqlalchemy.sql import functions [as 别名]
# 或者: from sqlalchemy.sql.functions import count [as 别名]
def _delete_from(self, s, start=None):
composite_ids = s.query(Composite.id). \
join(StatisticJournal, Composite.id == StatisticJournal.source_id). \
join(StatisticName, StatisticJournal.statistic_name_id == StatisticName.id). \
filter(StatisticName.owner == self.owner_out)
if start:
composite_ids = composite_ids.filter(StatisticJournal.time >= start)
log.debug(f'Delete query: {composite_ids}')
n = s.query(count(Source.id)). \
filter(Source.id.in_(composite_ids)). \
scalar()
if n:
log.warning(f'Deleting {n} Composite sources ({start} onwards)')
s.query(Source). \
filter(Source.id.in_(composite_ids)). \
delete(synchronize_session=False)
s.commit()
Composite.clean(s)
示例9: __missing_sources
# 需要导入模块: from sqlalchemy.sql import functions [as 别名]
# 或者: from sqlalchemy.sql.functions import count [as 别名]
def __missing_sources(self, s):
log.debug('Searching for missing sources')
available = s.query(count(distinct(Source.id))). \
join(StatisticJournal). \
join(StatisticName). \
filter(StatisticName.name == self.prefix + SPACE + N.HR_IMPULSE_10)
used = s.query(count(distinct(Source.id))). \
join(CompositeComponent, CompositeComponent.input_source_id == Source.id). \
join(Composite, Composite.id == CompositeComponent.output_source_id). \
join(StatisticJournal, StatisticJournal.source_id == Composite.id). \
join(StatisticName). \
filter(StatisticName.owner == self.owner_out,
Source.type == SourceType.ACTIVITY)
n_avaialble = available.scalar()
n_used = used.scalar()
log.debug(f'Using {n_used} of {n_avaialble} sources')
return n_used != n_avaialble
示例10: run_pipeline
# 需要导入模块: from sqlalchemy.sql import functions [as 别名]
# 或者: from sqlalchemy.sql.functions import count [as 别名]
def run_pipeline(data, type, like=tuple(), unlike=tuple(), id=None, progress=None, **extra_kargs):
with data.db.session_context() as s:
if id is None: # don't run for each worker
if type in (PipelineType.CALCULATE, PipelineType.READ_ACTIVITY, PipelineType.READ_MONITOR):
Interval.clean(s)
local_progress = ProgressTree(Pipeline.count(s, type, like=like, unlike=unlike, id=id), parent=progress)
for pipeline in Pipeline.all(s, type, like=like, unlike=unlike, id=id):
kargs = dict(pipeline.kargs)
kargs.update(extra_kargs)
msg = f'Ran {short_cls(pipeline.cls)}'
if 'activity_group' in kargs: msg += f' ({kargs["activity_group"]})'
log.debug(f'Running {pipeline.cls}({pipeline.args}, {kargs})')
with timing(msg):
before = None if id else count_statistics(s)
pipeline.cls(data, *pipeline.args, id=pipeline.id, progress=local_progress, **kargs).run()
after = None if id else count_statistics(s)
if before or after:
log.info(f'{msg}: statistic count {before} -> {after} (change of {after - before})')
示例11: _read_data
# 需要导入模块: from sqlalchemy.sql import functions [as 别名]
# 或者: from sqlalchemy.sql.functions import count [as 别名]
def _read_data(self, s, file_scan):
records = self.parse_records(read_fit(file_scan.path))
first_timestamp = self.read_first_timestamp(file_scan.path, records)
last_timestamp = self.read_last_timestamp(file_scan.path, records)
if first_timestamp == last_timestamp:
log.warning('File %s is empty (no timespan)' % file_scan)
raise AbortImportButMarkScanned()
if not first_timestamp:
raise Exception('Missing timestamp in %s' % file_scan)
log.info(f'Importing monitor data from {file_scan} '
f'for {format_time(first_timestamp)} - {format_time(last_timestamp)}')
if self.force:
log.debug(f'Deleting previous entry')
s.query(MonitorJournal).filter(MonitorJournal.file_hash == file_scan.file_hash).delete()
else:
if s.query(MonitorJournal).filter(MonitorJournal.file_hash == file_scan.file_hash).count():
raise Exception(f'Duplicate for {file_scan.path}') # should never happen
# adding 0.1s to the end time makes the intervals semi-open which simplifies cleanup later
mjournal = add(s, MonitorJournal(start=first_timestamp,
finish=last_timestamp + dt.timedelta(seconds=0.1),
file_hash_id=file_scan.file_hash.id))
return mjournal, (first_timestamp, last_timestamp, mjournal, records)
示例12: _fix_pair
# 需要导入模块: from sqlalchemy.sql import functions [as 别名]
# 或者: from sqlalchemy.sql.functions import count [as 别名]
def _fix_pair(self, s, a, b):
# a starts before b (from query)
if b.finish <= a.finish:
# b completely enclosed in a
log.warning(f'Deleting monitor journal entry that completely overlaps another')
log.debug(f'{a.start} - {a.finish} ({a.id}) encloses {b.start} - {b.finish} ({b.id})')
# be careful to delete superclass...
s.query(Source).filter(Source.id == b.id).delete()
else:
# otherwise, shorten a so it finishes where b starts
q = s.query(StatisticJournal). \
filter(StatisticJournal.source == a,
StatisticJournal.time >= b.start)
count = q.count()
if count:
# not really a warning because we expect this
log.debug(f'Shifting edge of overlapping monitor journals ({count} statistic values)')
log.debug(f'{a.start} - {a.finish} ({a.id}) overlaps {b.start} - {b.finish} ({b.id})')
q.delete()
# update monitor whether statistics were changed or not
log.debug(f'Shift monitor finish back from {a.finish} to {b.start}')
a.finish = b.start
s.flush() # not sure this is needed
示例13: _write_diff
# 需要导入模块: from sqlalchemy.sql import functions [as 别名]
# 或者: from sqlalchemy.sql.functions import count [as 别名]
def _write_diff(self, s, df):
steps = StatisticName.add_if_missing(s, T.STEPS, StatisticJournalType.INTEGER, Units.STEPS_UNITS,
None, self.owner_out, description=STEPS_DESCRIPTION)
times = df.loc[(df[NEW_STEPS] != df[N.STEPS]) & ~df[N.STEPS].isna()].index.astype(np.int64) / 1e9
if len(times):
n = s.query(func.count(StatisticJournal.id)). \
filter(StatisticJournal.time.in_(times),
StatisticJournal.statistic_name == steps).scalar()
log.warning(f'Deleting {n} {N.STEPS} entries')
s.query(StatisticJournal.id). \
filter(StatisticJournal.time.in_(times),
StatisticJournal.statistic_name == steps).delete(synchronize_session=False)
loader = self._get_loader(s, owner=self.owner_out, add_serial=False)
for time, row in df.loc[(df[NEW_STEPS] != df[N.STEPS]) & ~df[NEW_STEPS].isna()].iterrows():
loader.add(T.STEPS, Units.STEPS_UNITS, None, row[N.SOURCE], int(row[NEW_STEPS]),
time, StatisticJournalInteger, description=STEPS_DESCRIPTION)
loader.load()
示例14: missing_dates
# 需要导入模块: from sqlalchemy.sql import functions [as 别名]
# 或者: from sqlalchemy.sql.functions import count [as 别名]
def missing_dates(cls, s, expected, schedule, interval_owner, statistic_owner=None, start=None, finish=None):
'''
Previous approach was way too complicated and not thread-safe. Instead, just enumerate intervals and test.
'''
stats_start_time, stats_finish_time = cls._raw_statistics_time_range(s, statistic_owner)
stats_start = time_to_local_date(stats_start_time)
stats_finish = time_to_local_date(stats_finish_time)
log.debug('Statistics (in general) exist %s - %s' % (stats_start, stats_finish))
start = schedule.start_of_frame(start if start else stats_start)
finish = finish if finish else schedule.next_frame(stats_finish)
while start < finish:
next = schedule.next_frame(start)
existing = s.query(Interval). \
filter(Interval.start == start,
Interval.schedule == schedule,
Interval.owner == interval_owner).count()
if existing != expected:
yield start, next
start = next
示例15: clean
# 需要导入模块: from sqlalchemy.sql import functions [as 别名]
# 或者: from sqlalchemy.sql.functions import count [as 别名]
def clean(cls, s):
log.debug('Searching for invalid composites')
# see test_recursive
q_input_counts = s.query(Composite.id,
count(CompositeComponent.input_source_id).label('count')). \
outerjoin(CompositeComponent, CompositeComponent.output_source_id == Composite.id). \
group_by(Composite.id).cte()
q_bad_nodes = s.query(Composite.id). \
join(q_input_counts, q_input_counts.c.id == Composite.id). \
filter(Composite.n_components != q_input_counts.c.count)
q_count = s.query(count(Composite.id)).filter(Composite.id.in_(q_bad_nodes))
log.debug(q_count)
if q_count.scalar():
log.warning('Need to clean expired composite sources (may take some time)')
q_bad_nodes = q_bad_nodes.cte(recursive=True)
q_all_nodes = q_bad_nodes. \
union_all(s.query(Composite.id).
join(CompositeComponent,
CompositeComponent.output_source_id == Composite.id).
join(q_bad_nodes,
CompositeComponent.input_source_id == q_bad_nodes.c.id)).select()
log.debug(f'Executing {q_all_nodes}')
s.flush()
with timing('GC of composite sources'):
s.query(Source).filter(Source.id.in_(q_all_nodes)).delete(synchronize_session=False)