本文整理汇总了Python中moto.core.utils.unix_time_millis函数的典型用法代码示例。如果您正苦于以下问题:Python unix_time_millis函数的具体用法?Python unix_time_millis怎么用?Python unix_time_millis使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了unix_time_millis函数的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, region, name, tags):
self.name = name
self.region = region
self.arn = "arn:aws:logs:{region}:1:log-group:{log_group}".format(
region=region, log_group=name)
self.creationTime = unix_time_millis()
self.tags = tags
self.streams = dict() # {name: LogStream}
示例2: __init__
def __init__(self, region, name, tags):
self.name = name
self.region = region
self.arn = "arn:aws:logs:{region}:1:log-group:{log_group}".format(
region=region, log_group=name)
self.creationTime = unix_time_millis()
self.tags = tags
self.streams = dict() # {name: LogStream}
self.retentionInDays = None # AWS defaults to Never Expire for log group retention
示例3: put_log_events
def put_log_events(self, log_group_name, log_stream_name, log_events, sequence_token):
# TODO: ensure sequence_token
# TODO: to be thread safe this would need a lock
self.lastIngestionTime = unix_time_millis()
# TODO: make this match AWS if possible
self.storedBytes += sum([len(log_event["message"]) for log_event in log_events])
self.events += [LogEvent(self.lastIngestionTime, log_event) for log_event in log_events]
self.uploadSequenceToken += 1
return self.uploadSequenceToken
示例4: __init__
def __init__(self, region, log_group, name):
self.region = region
self.arn = "arn:aws:logs:{region}:{id}:log-group:{log_group}:log-stream:{log_stream}".format(
region=region, id=self.__class__._log_ids, log_group=log_group, log_stream=name)
self.creationTime = unix_time_millis()
self.firstEventTimestamp = None
self.lastEventTimestamp = None
self.lastIngestionTime = None
self.logStreamName = name
self.storedBytes = 0
self.uploadSequenceToken = 0 # I'm guessing this is token needed for sequenceToken by put_events
self.events = []
self.__class__._log_ids += 1
示例5: mark_received
def mark_received(self, visibility_timeout=None):
"""
When a message is received we will set the first receive timestamp,
tap the ``approximate_receive_count`` and the ``visible_at`` time.
"""
if visibility_timeout:
visibility_timeout = int(visibility_timeout)
else:
visibility_timeout = 0
if not self.approximate_first_receive_timestamp:
self.approximate_first_receive_timestamp = unix_time_millis()
self.approximate_receive_count += 1
# Make message visible again in the future unless its
# destroyed.
if visibility_timeout:
self.change_visibility(visibility_timeout)
self.receipt_handle = generate_receipt_handle()
示例6: delayed
def delayed(self):
current_time = unix_time_millis()
if current_time < self.delayed_until:
return True
return False
示例7: visible
def visible(self):
current_time = unix_time_millis()
if current_time > self.visible_at:
return True
return False
示例8: delay
def delay(self, delay_seconds):
delay_msec = int(delay_seconds) * 1000
self.delayed_until = unix_time_millis() + delay_msec
示例9: change_visibility
def change_visibility(self, visibility_timeout):
# We're dealing with milliseconds internally
visibility_timeout_msec = int(visibility_timeout) * 1000
self.visible_at = unix_time_millis() + visibility_timeout_msec
示例10: mark_sent
def mark_sent(self, delay_seconds=None):
self.sent_timestamp = unix_time_millis()
if delay_seconds:
self.delay(delay_seconds=delay_seconds)
示例11: _invoke_lambda
def _invoke_lambda(self, code, event=None, context=None):
# TODO: context not yet implemented
if event is None:
event = dict()
if context is None:
context = {}
try:
# TODO: I believe we can keep the container running and feed events as needed
# also need to hook it up to the other services so it can make kws/s3 etc calls
# Should get invoke_id /RequestId from invovation
env_vars = {
"AWS_LAMBDA_FUNCTION_TIMEOUT": self.timeout,
"AWS_LAMBDA_FUNCTION_NAME": self.function_name,
"AWS_LAMBDA_FUNCTION_MEMORY_SIZE": self.memory_size,
"AWS_LAMBDA_FUNCTION_VERSION": self.version,
"AWS_REGION": self.region,
}
env_vars.update(self.environment_vars)
container = output = exit_code = None
with _DockerDataVolumeContext(self) as data_vol:
try:
run_kwargs = dict(links={'motoserver': 'motoserver'}) if settings.TEST_SERVER_MODE else {}
container = self.docker_client.containers.run(
"lambci/lambda:{}".format(self.run_time),
[self.handler, json.dumps(event)], remove=False,
mem_limit="{}m".format(self.memory_size),
volumes=["{}:/var/task".format(data_vol.name)], environment=env_vars, detach=True, **run_kwargs)
finally:
if container:
try:
exit_code = container.wait(timeout=300)
except requests.exceptions.ReadTimeout:
exit_code = -1
container.stop()
container.kill()
else:
if docker_3:
exit_code = exit_code['StatusCode']
output = container.logs(stdout=False, stderr=True)
output += container.logs(stdout=True, stderr=False)
container.remove()
output = output.decode('utf-8')
# Send output to "logs" backend
invoke_id = uuid.uuid4().hex
log_stream_name = "{date.year}/{date.month:02d}/{date.day:02d}/[{version}]{invoke_id}".format(
date=datetime.datetime.utcnow(), version=self.version, invoke_id=invoke_id
)
self.logs_backend.create_log_stream(self.logs_group_name, log_stream_name)
log_events = [{'timestamp': unix_time_millis(), "message": line}
for line in output.splitlines()]
self.logs_backend.put_log_events(self.logs_group_name, log_stream_name, log_events, None)
if exit_code != 0:
raise Exception(
'lambda invoke failed output: {}'.format(output))
# strip out RequestId lines
output = os.linesep.join([line for line in self.convert(output).splitlines() if not _stderr_regex.match(line)])
return output, False
except BaseException as e:
traceback.print_exc()
return "error running lambda: {}".format(e), True