本文整理汇总了Python中socorrolib.lib.datetimeutil.utc_now函数的典型用法代码示例。如果您正苦于以下问题:Python utc_now函数的具体用法?Python utc_now怎么用?Python utc_now使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了utc_now函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_no_new_crashes
def test_no_new_crashes(self):
new_crash_source = ESNewCrashSource(self.config)
self.health_check()
generator = new_crash_source.new_crashes(
utc_now() - datetime.timedelta(days=1),
'Firefox',
['43.0.1']
)
eq_(list(generator), [])
self.index_crash(
a_processed_crash,
raw_crash=a_raw_crash,
crash_id=a_processed_crash['uuid']
)
self.refresh_index()
# Same test now that there is a processed crash in there
# but notably under a different name and version.
generator = new_crash_source.new_crashes(
utc_now() - datetime.timedelta(days=1),
'Firefox',
['43.0.1']
)
eq_(list(generator), [])
示例2: _normal_jobs_iter
def _normal_jobs_iter(self):
"""
Yields a list of job tuples pulled from the 'jobs' table for which the
owner is this process and the started datetime is null. This iterator
is perpetual - it never raises the StopIteration exception
"""
get_normal_job_sql = (
"select"
" j.id,"
" j.uuid,"
" priority "
"from"
" jobs j "
"where"
" j.owner = %d"
" and j.starteddatetime is null "
"order by queueddatetime"
" limit %d" % (self.processor_id,
self.config.batchJobLimit))
normal_jobs_list = []
last_query_timestamp = utc_now()
while True:
polling_threshold = utc_now() - self.config.pollingInterval
if not normal_jobs_list and \
last_query_timestamp < polling_threshold: # get more
normal_jobs_list = self.transaction(
execute_query_fetchall,
get_normal_job_sql
)
last_query_timestamp = utc_now()
if normal_jobs_list:
while normal_jobs_list:
yield normal_jobs_list.pop(0)
else:
yield None
示例3: run
def run(self):
# if this is non-zero, we use it.
if self.config.days_into_past:
last_run = utc_now() - datetime.timedelta(days=self.config.days_into_past)
else:
try:
# KeyError if it's never run successfully
# TypeError if self.job_information is None
last_run = self.job_information["last_success"]
except (KeyError, TypeError):
# basically, the "virgin run" of this job
last_run = utc_now()
# bugzilla runs on PST, so we need to communicate in its time zone
PST = tz.gettz("PST8PDT")
last_run_formatted = last_run.astimezone(PST).strftime("%Y-%m-%d")
query = self.config.query % last_run_formatted
for (bug_id, status, resolution, short_desc, signature_set) in self._iterator(query):
try:
# each run of this loop is a transaction
self.database_transaction_executor(
self.inner_transaction, bug_id, status, resolution, short_desc, signature_set
)
except NothingUsefulHappened:
pass
示例4: test_delete_old_indices
def test_delete_old_indices(self):
# Create old indices to be deleted.
self.index_client.create('socorro200142', {})
self.indices.append('socorro200142')
self.index_client.create('socorro200000', {})
self.indices.append('socorro200000')
# Create an old aliased index.
self.index_client.create('socorro200201_20030101', {})
self.indices.append('socorro200201_20030101')
self.index_client.put_alias(
index='socorro200201_20030101',
name='socorro200201',
)
# Create a recent aliased index.
last_week_index = self.get_index_for_date(
utc_now() - datetime.timedelta(weeks=1)
)
self.index_client.create('socorro_some_aliased_index', {})
self.indices.append('socorro_some_aliased_index')
self.index_client.put_alias(
index='socorro_some_aliased_index',
name=last_week_index,
)
# Create a recent index that should not be deleted.
now_index = self.get_index_for_date(utc_now())
self.index_client.create(now_index, {})
self.indices.append(now_index)
# These will raise an error if an index was not correctly created.
assert self.index_client.exists('socorro200142')
assert self.index_client.exists('socorro200000')
assert self.index_client.exists('socorro200201')
assert self.index_client.exists(now_index)
assert self.index_client.exists(last_week_index)
api = IndexCleaner(self.config)
api.delete_old_indices()
# Verify the recent index is still there.
ok_(self.index_client.exists(now_index))
ok_(self.index_client.exists(last_week_index))
# Verify the old indices are gone.
ok_(not self.index_client.exists('socorro200142'))
ok_(not self.index_client.exists('socorro200000'))
ok_(not self.index_client.exists('socorro200201'))
示例5: test_create_release
def test_create_release(self):
self._insert_release_channels()
self._insert_products()
config_manager = self._setup_config_manager()
with config_manager.context() as config:
app = middleware_app.MiddlewareApp(config)
app.main()
server = middleware_app.application
now = datetimeutil.utc_now()
response = self.post(
server,
'/releases/release/',
{
'product': 'Firefox',
'version': '1.0',
'update_channel': 'beta',
'build_id': now.strftime('%Y%m%d%H%M'),
'platform': 'Windows',
'beta_number': '1',
'release_channel': 'Beta',
'throttle': '1'
}
)
eq_(response.data, True)
示例6: test_basic_run
def test_basic_run(self):
cur = self.conn.cursor()
# Ensure test table is present.
statement = """
INSERT INTO raw_adi_logs
(report_date, product_name, count) VALUES
(%(first)s, 'WinterFox', 11),
(%(second)s, 'WinterFox', 23)
"""
second = utc_now().date()
first = second - datetime.timedelta(days=1)
cur.execute(statement, {'first': first, 'second': second})
self.conn.commit()
# Run the crontabber job to remove the test table.
config_manager = self._setup_config_manager(days_to_keep=1)
with config_manager.context() as config:
tab = CronTabber(config)
tab.run_all()
# Basic assertion test of stored procedure.
information = self._load_structure()
assert information['clean-raw-adi-logs']
assert not information['clean-raw-adi-logs']['last_error']
assert information['clean-raw-adi-logs']['last_success']
# Ensure test row was removed
cur.execute("""
SELECT report_date FROM raw_adi_logs
""")
result, = cur.fetchall()
report_date = result[0]
eq_(report_date, second)
示例7: POST
def POST(self, *args):
raw_crash, dumps = self._get_raw_crash_from_form()
current_timestamp = utc_now()
raw_crash.submitted_timestamp = current_timestamp.isoformat()
# legacy - ought to be removed someday
raw_crash.timestamp = time.time()
if (not self.config.accept_submitted_crash_id
or 'crash_id' not in raw_crash
):
crash_id = createNewOoid(current_timestamp)
raw_crash.crash_id = crash_id
self.logger.info('%s received', crash_id)
else:
crash_id = raw_crash.crash_id
self.logger.info('%s received with existing crash_id:', crash_id)
raw_crash.type_tag = self.type_tag
self.crash_storage.save_raw_crash(
raw_crash,
dumps,
crash_id
)
self.logger.info('%s accepted', crash_id)
return "CrashID=%s%s\n" % (self.type_tag, crash_id)
示例8: test_basic_run
def test_basic_run(self):
cur = self.conn.cursor()
# Ensure test table is present.
statement = """
INSERT INTO missing_symbols
(date_processed, debug_file, debug_id, code_file, code_id)
VALUES
(%(first)s, 'foo.pdb', '0420', 'foo.py', '123'),
(%(second)s, 'bar.pdb', '65EA9', 'bar.py', null)
"""
second = utc_now().date()
first = second - datetime.timedelta(days=1)
cur.execute(statement, {'first': first, 'second': second})
self.conn.commit()
# Run the crontabber job to remove the test table.
config_manager = self._setup_config_manager(days_to_keep=1)
with config_manager.context() as config:
tab = CronTabber(config)
tab.run_all()
# Basic assertion test of stored procedure.
information = self._load_structure()
assert information['clean-missing-symbols']
assert not information['clean-missing-symbols']['last_error']
assert information['clean-missing-symbols']['last_success']
# Ensure expected test row was removed
cur.execute("""
SELECT date_processed FROM missing_symbols
""")
first, = cur.fetchall()
date_processed = first[0]
eq_(date_processed, second)
示例9: test_mapping
def test_mapping(self, mapping):
"""Verify that a mapping is correct.
This function does so by first creating a new, temporary index in
elasticsearch using the mapping. It then takes some recent crash
reports that are in elasticsearch and tries to insert them in the
temporary index. Any failure in any of those steps will raise an
exception. If any is raised, that means the mapping is incorrect in
some way (either it doesn't validate against elasticsearch's rules,
or is not compatible with the data we currently store).
If no exception is raised, the mapping is likely correct.
This function is to be used in any place that can change the
`storage_mapping` field in any Super Search Field.
Methods `create_field` and `update_field` use it, see above.
"""
temp_index = 'socorro_mapping_test'
es_connection = self.get_connection()
# Import at runtime to avoid dependency circle.
from socorro.external.es.index_creator import IndexCreator
index_creator = IndexCreator(self.config)
try:
index_creator.create_index(
temp_index,
mapping,
)
now = datetimeutil.utc_now()
last_week = now - datetime.timedelta(days=7)
current_indices = self.generate_list_of_indexes(last_week, now)
crashes_sample = es_connection.search(
index=current_indices,
doc_type=self.config.elasticsearch.elasticsearch_doctype,
size=self.config.elasticsearch.mapping_test_crash_number,
)
crashes = [x['_source'] for x in crashes_sample['hits']['hits']]
for crash in crashes:
es_connection.index(
index=temp_index,
doc_type=self.config.elasticsearch.elasticsearch_doctype,
body=crash,
)
except elasticsearch.exceptions.ElasticsearchException as e:
raise BadArgumentError(
'storage_mapping',
msg='Indexing existing data in Elasticsearch failed with the '
'new mapping. Error is: %s' % str(e),
)
finally:
try:
index_creator.get_index_client().delete(temp_index)
except elasticsearch.exceptions.NotFoundError:
# If the index does not exist (if the index creation failed
# for example), we don't need to do anything.
pass
示例10: get_signatures
def get_signatures(self, **kwargs):
"""Return top crashers by signatures.
See http://socorro.readthedocs.org/en/latest/middleware.html#tcbs
"""
filters = [
("product", None, "str"),
("version", None, "str"),
("crash_type", "all", "str"),
("to_date", datetimeutil.utc_now(), "datetime"),
("duration", datetime.timedelta(7), "timedelta"),
("os", None, "str"),
("limit", 100, "int"),
("date_range_type", None, "str")
]
params = external_common.parse_arguments(filters, kwargs)
params.logger = logger
# what the twoPeriodTopCrasherComparison() function does is that it
# makes a start date from taking the to_date - duration
if params.duration > datetime.timedelta(30):
raise BadArgumentError('Duration too long. Max 30 days.')
with self.get_connection() as connection:
return tcbs.twoPeriodTopCrasherComparison(connection, params)
示例11: test_create_release_with_beta_number_null
def test_create_release_with_beta_number_null(self):
self._insert_release_channels()
service = Releases(config=self.config)
now = datetimeutil.utc_now()
build_id = now.strftime('%Y%m%d%H%M')
params = dict(
product='Firefox',
version='1.0',
update_channel='beta',
build_id=build_id,
platform='Windows',
beta_number=None,
release_channel='Beta',
throttle=1
)
res = service.create_release(**params)
ok_(res)
# but...
params['beta_number'] = 0
assert_raises(
MissingArgumentError,
service.create_release,
**params
)
示例12: test_utc_now
def test_utc_now():
"""
Test datetimeutil.utc_now()
"""
res = datetimeutil.utc_now()
eq_(res.strftime('%Z'), 'UTC')
eq_(res.strftime('%z'), '+0000')
ok_(res.tzinfo)
示例13: test_get_parameters_date_defaults
def test_get_parameters_date_defaults(self):
with _get_config_manager().context() as config:
search = SearchBaseWithFields(
config=config,
)
now = datetimeutil.utc_now()
# Test default values when nothing is passed
params = search.get_parameters()
ok_('date' in params)
eq_(len(params['date']), 2)
# Pass only the high value
args = {
'date': '<%s' % datetimeutil.date_to_string(now)
}
params = search.get_parameters(**args)
ok_('date' in params)
eq_(len(params['date']), 2)
eq_(params['date'][0].operator, '<')
eq_(params['date'][1].operator, '>=')
eq_(params['date'][0].value.date(), now.date())
eq_(
params['date'][1].value.date(),
now.date() - datetime.timedelta(days=7)
)
# Pass only the low value
pasttime = now - datetime.timedelta(days=10)
args = {
'date': '>=%s' % datetimeutil.date_to_string(pasttime)
}
params = search.get_parameters(**args)
ok_('date' in params)
eq_(len(params['date']), 2)
eq_(params['date'][0].operator, '<=')
eq_(params['date'][1].operator, '>=')
eq_(params['date'][0].value.date(), now.date())
eq_(params['date'][1].value.date(), pasttime.date())
# Pass the two values
pasttime = now - datetime.timedelta(days=10)
args = {
'date': [
'<%s' % datetimeutil.date_to_string(now),
'>%s' % datetimeutil.date_to_string(pasttime),
]
}
params = search.get_parameters(**args)
ok_('date' in params)
eq_(len(params['date']), 2)
eq_(params['date'][0].operator, '<')
eq_(params['date'][1].operator, '>')
eq_(params['date'][0].value.date(), now.date())
eq_(params['date'][1].value.date(), pasttime.date())
示例14: test_new_crashes
def test_new_crashes(self):
new_crash_source = ESNewCrashSource(self.config)
self.index_crash(
a_processed_crash,
raw_crash=a_raw_crash,
crash_id=a_processed_crash['uuid']
)
self.index_crash(
a_firefox_processed_crash,
raw_crash=a_raw_crash,
crash_id=a_firefox_processed_crash['uuid']
)
other_firefox_processed_crash = deepcopy(a_firefox_processed_crash)
other_firefox_processed_crash['uuid'] = (
other_firefox_processed_crash['uuid'].replace('a', 'e')
)
other_firefox_processed_crash['date_processed'] = (
utc_now() - datetime.timedelta(days=1)
)
self.index_crash(
other_firefox_processed_crash,
raw_crash=a_raw_crash,
crash_id=other_firefox_processed_crash['uuid']
)
self.refresh_index()
assert self.connection.get(
index=self.config.elasticsearch.elasticsearch_index,
id=a_processed_crash['uuid']
)
assert self.connection.get(
index=self.config.elasticsearch.elasticsearch_index,
id=a_firefox_processed_crash['uuid']
)
# same test now that there is a processed crash in there
generator = new_crash_source.new_crashes(
utc_now() - datetime.timedelta(days=1),
'Firefox',
['43.0.1']
)
eq_(list(generator), [a_firefox_processed_crash['uuid']])
示例15: _get_base
def _get_base(self, crash_id):
"""this method overrides the base method to define the daily file
system root directory name. While the default class is to use a
YYYYMMDD form, this class substitutes a simple DD form. This is the
mechanism of directory recycling as at the first day of a new month
we return to the same directiory structures that were created on the
first day of the previous month"""
date = dateFromOoid(crash_id)
if not date:
date = utc_now()
date_formatted = "%02d" % (date.day,)
return [self.config.fs_root, date_formatted]