本文整理汇总了Python中inbox.api.kellogs.APIEncoder类的典型用法代码示例。如果您正苦于以下问题:Python APIEncoder类的具体用法?Python APIEncoder怎么用?Python APIEncoder使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了APIEncoder类的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: message_read_api
def message_read_api(public_id):
g.parser.add_argument('view', type=view, location='args')
args = strict_parse_args(g.parser, request.args)
encoder = APIEncoder(g.namespace.public_id, args['view'] == 'expanded')
try:
valid_public_id(public_id)
message = g.db_session.query(Message).filter(
Message.public_id == public_id,
Message.namespace_id == g.namespace.id).one()
except NoResultFound:
raise NotFoundError("Couldn't find message {0} ".format(public_id))
if request.headers.get('Accept', None) == 'message/rfc822':
if message.full_body is not None:
return Response(message.full_body.data,
mimetype='message/rfc822')
else:
g.log.error("Message without full_body attribute: id='{0}'"
.format(message.id))
raise NotFoundError(
"Couldn't find raw contents for message `{0}` "
.format(public_id))
return encoder.jsonify(message)
示例2: ns_all
def ns_all():
""" Return all namespaces """
# We do this outside the blueprint to support the case of an empty
# public_id. However, this means the before_request isn't run, so we need
# to make our own session
with global_session_scope() as db_session:
parser = reqparse.RequestParser(argument_class=ValidatableArgument)
parser.add_argument('limit', default=DEFAULT_LIMIT, type=limit,
location='args')
parser.add_argument('offset', default=0, type=int, location='args')
parser.add_argument('email_address', type=bounded_str, location='args')
args = strict_parse_args(parser, request.args)
query = db_session.query(Namespace)
if args['email_address']:
query = query.join(Account)
query = query.filter_by(email_address=args['email_address'])
query = query.limit(args['limit'])
if args['offset']:
query = query.offset(args['offset'])
namespaces = query.all()
encoder = APIEncoder(legacy_nsid=request.path.startswith('/n'))
return encoder.jsonify(namespaces)
示例3: format_output
def format_output(public_snapshot, include_body):
# Because we're using a snapshot of the message API representation in the
# transaction log, we can just return that directly (without the 'body'
# field if include_body is False).
encoder = APIEncoder()
return encoder.cereal({k: v for k, v in public_snapshot.iteritems()
if k != 'body' or include_body})
示例4: streaming_change_generator
def streaming_change_generator(namespace_id, poll_interval, timeout,
transaction_pointer, exclude_types=None):
"""
Poll the transaction log for the given `namespace_id` until `timeout`
expires, and yield each time new entries are detected.
Arguments
---------
namespace_id: int
Id of the namespace for which to check changes.
poll_interval: float
How often to check for changes.
timeout: float
How many seconds to allow the connection to remain open.
transaction_pointer: int, optional
Yield transaction rows starting after the transaction with id equal to
`transaction_pointer`.
"""
encoder = APIEncoder()
start_time = time.time()
while time.time() - start_time < timeout:
with session_scope() as db_session:
deltas, new_pointer = format_transactions_after_pointer(
namespace_id, transaction_pointer, db_session, 100,
_format_transaction_for_delta_sync, exclude_types)
if new_pointer is not None and new_pointer != transaction_pointer:
transaction_pointer = new_pointer
for delta in deltas:
yield encoder.cereal(delta) + '\n'
else:
gevent.sleep(poll_interval)
示例5: ns_all
def ns_all():
""" Return all namespaces """
# We do this outside the blueprint to support the case of an empty
# public_id. However, this means the before_request isn't run, so we need
# to make our own session
with session_scope() as db_session:
namespaces = db_session.query(Namespace).all()
encoder = APIEncoder()
return encoder.jsonify(namespaces)
示例6: streaming_change_generator
def streaming_change_generator(
namespace,
poll_interval,
timeout,
transaction_pointer,
exclude_types=None,
include_types=None,
exclude_folders=True,
exclude_metadata=True,
exclude_account=True,
expand=False,
is_n1=False,
):
"""
Poll the transaction log for the given `namespace_id` until `timeout`
expires, and yield each time new entries are detected.
Arguments
---------
namespace_id: int
Id of the namespace for which to check changes.
poll_interval: float
How often to check for changes.
timeout: float
How many seconds to allow the connection to remain open.
transaction_pointer: int, optional
Yield transaction rows starting after the transaction with id equal to
`transaction_pointer`.
"""
encoder = APIEncoder(is_n1=is_n1)
start_time = time.time()
while time.time() - start_time < timeout:
with session_scope(namespace.id) as db_session:
deltas, new_pointer = format_transactions_after_pointer(
namespace,
transaction_pointer,
db_session,
100,
exclude_types,
include_types,
exclude_folders,
exclude_metadata,
exclude_account,
expand=expand,
is_n1=is_n1,
)
if new_pointer is not None and new_pointer != transaction_pointer:
transaction_pointer = new_pointer
for delta in deltas:
yield encoder.cereal(delta) + "\n"
else:
yield "\n"
gevent.sleep(poll_interval)
示例7: message_query_api
def message_query_api():
g.parser.add_argument('subject', type=bounded_str, location='args')
g.parser.add_argument('to', type=bounded_str, location='args')
g.parser.add_argument('from', type=bounded_str, location='args')
g.parser.add_argument('cc', type=bounded_str, location='args')
g.parser.add_argument('bcc', type=bounded_str, location='args')
g.parser.add_argument('any_email', type=bounded_str, location='args')
g.parser.add_argument('started_before', type=timestamp, location='args')
g.parser.add_argument('started_after', type=timestamp, location='args')
g.parser.add_argument('last_message_before', type=timestamp,
location='args')
g.parser.add_argument('last_message_after', type=timestamp,
location='args')
g.parser.add_argument('filename', type=bounded_str, location='args')
g.parser.add_argument('in', type=bounded_str, location='args')
g.parser.add_argument('thread_id', type=valid_public_id, location='args')
g.parser.add_argument('unread', type=strict_bool, location='args')
g.parser.add_argument('starred', type=strict_bool, location='args')
g.parser.add_argument('view', type=view, location='args')
# For backwards-compatibility -- remove after deprecating tags API.
g.parser.add_argument('tag', type=bounded_str, location='args')
args = strict_parse_args(g.parser, request.args)
# For backwards-compatibility -- remove after deprecating tags API.
in_ = args['in'] or args['tag']
messages = filtering.messages_or_drafts(
namespace_id=g.namespace.id,
drafts=False,
subject=args['subject'],
thread_public_id=args['thread_id'],
to_addr=args['to'],
from_addr=args['from'],
cc_addr=args['cc'],
bcc_addr=args['bcc'],
any_email=args['any_email'],
started_before=args['started_before'],
started_after=args['started_after'],
last_message_before=args['last_message_before'],
last_message_after=args['last_message_after'],
filename=args['filename'],
in_=in_,
unread=args['unread'],
starred=args['starred'],
limit=args['limit'],
offset=args['offset'],
view=args['view'],
db_session=g.db_session)
# Use a new encoder object with the expand parameter set.
encoder = APIEncoder(g.namespace.public_id, args['view'] == 'expanded')
return encoder.jsonify(messages)
示例8: g
def g():
encoder = APIEncoder()
with session_scope(self.account_id) as db_session:
for imap_uids in self._search(db_session, search_query):
query = db_session.query(Message) \
.join(ImapUid) \
.filter(ImapUid.account_id == self.account_id,
ImapUid.msg_uid.in_(imap_uids))\
.order_by(desc(Message.received_date))\
yield encoder.cereal(query.all()) + '\n'
示例9: thread_api
def thread_api(public_id):
g.parser.add_argument('view', type=view, location='args')
args = strict_parse_args(g.parser, request.args)
# Use a new encoder object with the expand parameter set.
encoder = APIEncoder(g.namespace.public_id, args['view'] == 'expanded')
try:
valid_public_id(public_id)
thread = g.db_session.query(Thread).filter(
Thread.public_id == public_id,
Thread.namespace_id == g.namespace.id).one()
return encoder.jsonify(thread)
except NoResultFound:
raise NotFoundError("Couldn't find thread `{0}`".format(public_id))
示例10: thread_query_api
def thread_query_api():
g.parser.add_argument('subject', type=bounded_str, location='args')
g.parser.add_argument('to', type=bounded_str, location='args')
g.parser.add_argument('from', type=bounded_str, location='args')
g.parser.add_argument('cc', type=bounded_str, location='args')
g.parser.add_argument('bcc', type=bounded_str, location='args')
g.parser.add_argument('any_email', type=bounded_str, location='args')
g.parser.add_argument('started_before', type=timestamp, location='args')
g.parser.add_argument('started_after', type=timestamp, location='args')
g.parser.add_argument('last_message_before', type=timestamp,
location='args')
g.parser.add_argument('last_message_after', type=timestamp,
location='args')
g.parser.add_argument('filename', type=bounded_str, location='args')
g.parser.add_argument('thread_id', type=valid_public_id, location='args')
g.parser.add_argument('tag', type=bounded_str, location='args')
g.parser.add_argument('view', type=view, location='args')
args = strict_parse_args(g.parser, request.args)
threads = filtering.threads(
namespace_id=g.namespace.id,
subject=args['subject'],
thread_public_id=args['thread_id'],
to_addr=args['to'],
from_addr=args['from'],
cc_addr=args['cc'],
bcc_addr=args['bcc'],
any_email=args['any_email'],
started_before=args['started_before'],
started_after=args['started_after'],
last_message_before=args['last_message_before'],
last_message_after=args['last_message_after'],
filename=args['filename'],
tag=args['tag'],
limit=args['limit'],
offset=args['offset'],
view=args['view'],
db_session=g.db_session)
# Use a new encoder object with the expand parameter set.
encoder = APIEncoder(g.namespace.public_id, args['view'] == 'expanded')
return encoder.jsonify(threads)
示例11: __init__
def __init__(self, poll_interval=1, chunk_size=22):
self.workers = defaultdict(set)
self.log = get_logger()
self.poll_interval = poll_interval
self.chunk_size = chunk_size
self.minimum_id = -1
self.poller = None
self.polling = False
self.encoder = APIEncoder()
self._on_startup()
示例12: g
def g():
encoder = APIEncoder()
with session_scope(self.account_id) as db_session:
yield encoder.cereal(self.search_threads(db_session, search_query)) + '\n'
示例13: WebhookService
class WebhookService():
"""Asynchronously consumes the transaction log and executes registered
webhooks."""
def __init__(self, poll_interval=1, chunk_size=22):
self.workers = defaultdict(set)
self.log = get_logger()
self.poll_interval = poll_interval
self.chunk_size = chunk_size
self.minimum_id = -1
self.poller = None
self.polling = False
self.encoder = APIEncoder()
self._on_startup()
@property
def all_active_workers(self):
worker_sets = self.workers.values()
if not worker_sets:
return set()
return set.union(*worker_sets)
def register_hook(self, namespace_id, parameters):
"""Register a new webhook.
Parameters
----------
namespace_id: int
ID for the namespace to apply the webhook on.
parameters: dictionary
Dictionary of the hook parameters.
"""
# TODO(emfree) do more meaningful parameter validation here
# (or in the calling code in the API)
if urlparse.urlparse(parameters.get('callback_url')).scheme != 'https':
raise ValueError('callback_url MUST be https!')
with session_scope() as db_session:
lens = Lens(
namespace_id=namespace_id,
subject=parameters.get('subject'),
thread_public_id=parameters.get('thread'),
to_addr=parameters.get('to'),
from_addr=parameters.get('from'),
cc_addr=parameters.get('cc'),
bcc_addr=parameters.get('bcc'),
any_email=parameters.get('any_email'),
started_before=parameters.get('started_before'),
started_after=parameters.get('started_after'),
last_message_before=parameters.get('last_message_before'),
last_message_after=parameters.get('last_message_after'),
filename=parameters.get('filename'))
hook = Webhook(
namespace_id=namespace_id,
lens=lens,
callback_url=parameters.get('callback_url'),
failure_notify_url=parameters.get('failure_notify_url'),
include_body=parameters.get('include_body', False),
active=parameters.get('active', True),
min_processed_id=self.minimum_id)
db_session.add(hook)
db_session.add(lens)
db_session.commit()
if hook.active:
self._start_hook(hook, db_session)
return self.encoder.cereal(hook, pretty=True)
def start_hook(self, hook_public_id):
with session_scope() as db_session:
hook = db_session.query(Webhook). \
filter_by(public_id=hook_public_id).one()
self._start_hook(hook, db_session)
def _start_hook(self, hook, db_session):
self.log.info('Starting hook with public id {}'.format(hook.public_id))
if any(worker.id == hook.id for worker in self.all_active_workers):
# Hook already has a worker
return 'OK hook already running'
hook.min_processed_id = self.minimum_id
hook.active = True
namespace_id = hook.namespace_id
worker = WebhookWorker(hook)
self.workers[namespace_id].add(worker)
if not worker.started:
worker.start()
db_session.commit()
if not self.polling:
self._start_polling()
return 'OK hook started'
def stop_hook(self, hook_public_id):
self.log.info('Stopping hook with public id {}'.format(hook_public_id))
with session_scope() as db_session:
hook = db_session.query(Webhook). \
filter_by(public_id=hook_public_id).one()
hook.active = False
#.........这里部分代码省略.........