本文整理汇总了Python中datetime.datetime.max方法的典型用法代码示例。如果您正苦于以下问题:Python datetime.max方法的具体用法?Python datetime.max怎么用?Python datetime.max使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类datetime.datetime
的用法示例。
在下文中一共展示了datetime.max方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: time_coverage
# 需要导入模块: from datetime import datetime [as 别名]
# 或者: from datetime.datetime import max [as 别名]
def time_coverage(self, value):
if self.single_file:
if value is None:
# The default for single file filesets:
self._time_coverage = [
datetime.min,
datetime.max
]
else:
self._time_coverage = [
to_datetime(value[0]),
to_datetime(value[1]),
]
elif value is not None:
self._time_coverage = to_timedelta(value)
else:
self._time_coverage = None
# Reset the info cache because some file information may have changed
# now
self.info_cache = {}
示例2: test_extreme_ordinals
# 需要导入模块: from datetime import datetime [as 别名]
# 或者: from datetime.datetime import max [as 别名]
def test_extreme_ordinals(self):
a = self.theclass.min
a = self.theclass(a.year, a.month, a.day) # get rid of time parts
aord = a.toordinal()
b = a.fromordinal(aord)
self.assertEqual(a, b)
self.assertRaises(ValueError, lambda: a.fromordinal(aord - 1))
b = a + timedelta(days=1)
self.assertEqual(b.toordinal(), aord + 1)
self.assertEqual(b, self.theclass.fromordinal(aord + 1))
a = self.theclass.max
a = self.theclass(a.year, a.month, a.day) # get rid of time parts
aord = a.toordinal()
b = a.fromordinal(aord)
self.assertEqual(a, b)
self.assertRaises(ValueError, lambda: a.fromordinal(aord + 1))
b = a - timedelta(days=1)
self.assertEqual(b.toordinal(), aord - 1)
self.assertEqual(b, self.theclass.fromordinal(aord - 1))
示例3: test_max_runs_when_no_files
# 需要导入模块: from datetime import datetime [as 别名]
# 或者: from datetime.datetime import max [as 别名]
def test_max_runs_when_no_files(self):
child_pipe, parent_pipe = multiprocessing.Pipe()
with TemporaryDirectory(prefix="empty-airflow-dags-") as dags_folder:
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
manager = DagFileProcessorManager(
dag_directory=dags_folder,
max_runs=1,
processor_factory=FakeDagFileProcessorRunner._fake_dag_processor_factory,
processor_timeout=timedelta.max,
signal_conn=child_pipe,
dag_ids=[],
pickle_dags=False,
async_mode=async_mode)
self.run_processor_manager_one_loop(manager, parent_pipe)
child_pipe.close()
parent_pipe.close()
示例4: test_set_file_paths_when_processor_file_path_not_in_new_file_paths
# 需要导入模块: from datetime import datetime [as 别名]
# 或者: from datetime.datetime import max [as 别名]
def test_set_file_paths_when_processor_file_path_not_in_new_file_paths(self):
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_factory=MagicMock().return_value,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True)
mock_processor = MagicMock()
mock_processor.stop.side_effect = AttributeError(
'DagFileProcessor object has no attribute stop')
mock_processor.terminate.side_effect = None
manager._processors['missing_file.txt'] = mock_processor
manager._file_stats['missing_file.txt'] = DagFileStat(0, 0, None, None, 0)
manager.set_file_paths(['abc.txt'])
self.assertDictEqual(manager._processors, {})
示例5: test_set_file_paths_when_processor_file_path_is_in_new_file_paths
# 需要导入模块: from datetime import datetime [as 别名]
# 或者: from datetime.datetime import max [as 别名]
def test_set_file_paths_when_processor_file_path_is_in_new_file_paths(self):
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_factory=MagicMock().return_value,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True)
mock_processor = MagicMock()
mock_processor.stop.side_effect = AttributeError(
'DagFileProcessor object has no attribute stop')
mock_processor.terminate.side_effect = None
manager._processors['abc.txt'] = mock_processor
manager.set_file_paths(['abc.txt'])
self.assertDictEqual(manager._processors, {'abc.txt': mock_processor})
示例6: test_parse_once
# 需要导入模块: from datetime import datetime [as 别名]
# 或者: from datetime.datetime import max [as 别名]
def test_parse_once(self):
test_dag_path = os.path.join(TEST_DAG_FOLDER, 'test_scheduler_dags.py')
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
processor_agent = DagFileProcessorAgent(test_dag_path,
1,
type(self)._processor_factory,
timedelta.max,
[],
False,
async_mode)
processor_agent.start()
parsing_result = []
if not async_mode:
processor_agent.run_single_parsing_loop()
while not processor_agent.done:
if not async_mode:
processor_agent.wait_until_finished()
parsing_result.extend(processor_agent.harvest_simple_dags())
dag_ids = [result.dag_id for result in parsing_result]
self.assertEqual(dag_ids.count('test_start_date_scheduling'), 1)
示例7: test_launch_process
# 需要导入模块: from datetime import datetime [as 别名]
# 或者: from datetime.datetime import max [as 别名]
def test_launch_process(self):
test_dag_path = os.path.join(TEST_DAG_FOLDER, 'test_scheduler_dags.py')
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
log_file_loc = conf.get('logging', 'DAG_PROCESSOR_MANAGER_LOG_LOCATION')
try:
os.remove(log_file_loc)
except OSError:
pass
# Starting dag processing with 0 max_runs to avoid redundant operations.
processor_agent = DagFileProcessorAgent(test_dag_path,
0,
type(self)._processor_factory,
timedelta.max,
[],
False,
async_mode)
processor_agent.start()
if not async_mode:
processor_agent.run_single_parsing_loop()
processor_agent._process.join()
self.assertTrue(os.path.isfile(log_file_loc))
示例8: test_overflow
# 需要导入模块: from datetime import datetime [as 别名]
# 或者: from datetime.datetime import max [as 别名]
def test_overflow(self):
tiny = timedelta.resolution
td = timedelta.min + tiny
td -= tiny # no problem
self.assertRaises(OverflowError, td.__sub__, tiny)
self.assertRaises(OverflowError, td.__add__, -tiny)
td = timedelta.max - tiny
td += tiny # no problem
self.assertRaises(OverflowError, td.__add__, tiny)
self.assertRaises(OverflowError, td.__sub__, -tiny)
self.assertRaises(OverflowError, lambda: -timedelta.max)
day = timedelta(1)
self.assertRaises(OverflowError, day.__mul__, 10**9)
self.assertRaises(OverflowError, day.__mul__, 1e9)
self.assertRaises(OverflowError, day.__truediv__, 1e-20)
self.assertRaises(OverflowError, day.__truediv__, 1e-10)
self.assertRaises(OverflowError, day.__truediv__, 9e-10)
示例9: find_max_value
# 需要导入模块: from datetime import datetime [as 别名]
# 或者: from datetime.datetime import max [as 别名]
def find_max_value(test_func, initial_value):
"""
Starting from an initial number (integer or float), find the maximum value
for which the test function does not yet fail, and return that maximum
value.
"""
assert isinstance(initial_value, int) and initial_value > 0
fails = FailsArray(test_func)
value = initial_value
# Advance the value exponentially beyond the max value
while fails[value] == 0:
value *= 2
# Search for the exact max value in the previous range. We search for the
# boundary where the fails array goes from 0 to 1.
boundary = 0.5
value = binary_search(fails, boundary, value // 2, value)
max_value = value - 1
# Verify that we found exactly the maximum:
assert fails[max_value] == 0 and fails[max_value + 1] == 1, \
"max_value={}, fails[+-2]: {}, {}, {}, {}, {}".\
format(max_value, fails[max_value - 2], fails[max_value - 1],
fails[max_value], fails[max_value + 1], fails[max_value + 2])
return max_value
示例10: x_test_print_datetime_max
# 需要导入模块: from datetime import datetime [as 别名]
# 或者: from datetime.datetime import max [as 别名]
def x_test_print_datetime_max(self):
"""Print datetime.max."""
print("\nMax value for Python datetime (datetime.max): {!r}".
format(datetime.max))
sys.stdout.flush()
示例11: test_datetime_max
# 需要导入模块: from datetime import datetime [as 别名]
# 或者: from datetime.datetime import max [as 别名]
def test_datetime_max(self):
"""Test timestamp_from_datetime() with datetime.max."""
# The test is that it does not raise an exception:
timestamp_from_datetime(datetime.max)
示例12: __init__
# 需要导入模块: from datetime import datetime [as 别名]
# 或者: from datetime.datetime import max [as 别名]
def __init__(self):
self.start = datetime.max
self.end = datetime.min
self.packets = 0
self.bytes = 0
示例13: generate_auth_token
# 需要导入模块: from datetime import datetime [as 别名]
# 或者: from datetime.datetime import max [as 别名]
def generate_auth_token(user_id):
host_list = request.host.rsplit(':')
if len(host_list) == 1:
host = ':'.join(host_list)
else:
host = ':'.join(host_list[0:-1])
if host.startswith('127.') or host.lower() == 'localhost' or host.startswith('[::ffff:7f'):
warning = _('PLease access calibre-web from non localhost to get valid api_endpoint for kobo device')
return render_title_template(
"generate_kobo_auth_url.html",
title=_(u"Kobo Setup"),
warning = warning
)
else:
# Invalidate any prevously generated Kobo Auth token for this user.
auth_token = ub.session.query(ub.RemoteAuthToken).filter(
ub.RemoteAuthToken.user_id == user_id
).filter(ub.RemoteAuthToken.token_type==1).first()
if not auth_token:
auth_token = ub.RemoteAuthToken()
auth_token.user_id = user_id
auth_token.expiration = datetime.max
auth_token.auth_token = (hexlify(urandom(16))).decode("utf-8")
auth_token.token_type = 1
ub.session.add(auth_token)
ub.session.commit()
return render_title_template(
"generate_kobo_auth_url.html",
title=_(u"Kobo Setup"),
kobo_auth_url=url_for(
"kobo.TopLevelEndpoint", auth_token=auth_token.auth_token, _external=True
),
warning = False
)
示例14: _get_common_time_period
# 需要导入模块: from datetime import datetime [as 别名]
# 或者: from datetime.datetime import max [as 别名]
def _get_common_time_period(
primary, secondary, max_interval, start, end):
max_interval = pd.Timedelta(max_interval)
# We want to select a common time window from both datasets,
# aligned to the primary's time coverage. Because xarray has a
# very annoying bug in time retrieving
# (https://github.com/pydata/xarray/issues/1240), this is a
# little bit cumbersome:
common_start = max(
start,
pd.Timestamp(primary.time.min().item(0)) - max_interval,
pd.Timestamp(secondary.time.min().item(0)) - max_interval
)
common_end = min(
end,
pd.Timestamp(primary.time.max().item(0)) + max_interval,
pd.Timestamp(secondary.time.max().item(0)) + max_interval
)
primary_period = primary.time.where(
(primary.time.values >= np.datetime64(common_start))
& (primary.time.values <= np.datetime64(common_end))
).dropna(primary.time.dims[0])
secondary_period = secondary.time.where(
(secondary.time.values >= np.datetime64(common_start))
& (secondary.time.values <= np.datetime64(common_end))
).dropna(secondary.time.dims[0])
return primary_period, secondary_period
示例15: _bin_pairs
# 需要导入模块: from datetime import datetime [as 别名]
# 或者: from datetime.datetime import max [as 别名]
def _bin_pairs(chunk1_start, chunk1, primary, secondary, max_interval):
""""""
chunk2_start = chunk1_start - max_interval
chunk2_end = chunk1.index.max() + max_interval
offset1 = primary.index.searchsorted(chunk1_start)
offset2 = secondary.index.searchsorted(chunk2_start)
chunk2 = secondary.loc[chunk2_start:chunk2_end]
return offset1, chunk1, offset2, chunk2