本文整理汇总了Python中rx.Observable方法的典型用法代码示例。如果您正苦于以下问题:Python rx.Observable方法的具体用法?Python rx.Observable怎么用?Python rx.Observable使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类rx
的用法示例。
在下文中一共展示了rx.Observable方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: stream_from_datetime
# 需要导入模块: import rx [as 别名]
# 或者: from rx import Observable [as 别名]
def stream_from_datetime(cls, start_time: datetime.date, stream_from: Union[Iterable, AnyStr]):
stream = cls.STREAMS if stream_from == 'kafka' else stream_from
return Observable \
.from_(stream) \
.filter(lambda value: datetime.strptime(value['ts'], '%Y-%m-%d %H:%M:%S') > start_time)
示例2: stream_from_start
# 需要导入模块: import rx [as 别名]
# 或者: from rx import Observable [as 别名]
def stream_from_start(cls, stream_from: Union[Iterable, AnyStr]):
stream = cls.STREAMS if stream_from == 'kafka' else stream_from
return Observable \
.from_(stream)
示例3: stream_from_offset
# 需要导入模块: import rx [as 别名]
# 或者: from rx import Observable [as 别名]
def stream_from_offset(cls, offset, stream_from: Union[Iterable, AnyStr]):
stream = cls.STREAMS if stream_from == 'kafka' else stream_from
return Observable \
.from_(stream) \
.take_while(lambda value: datetime.now() -
datetime.strptime(value['ts'], '%Y-%m-%d %H:%M:%S') > timedelta(seconds=5))
示例4: execute
# 需要导入模块: import rx [as 别名]
# 或者: from rx import Observable [as 别名]
def execute(self, *args, **kwargs):
operation_ast = get_operation_ast(args[0])
if operation_ast and operation_ast.operation == "subscription":
result = subscribe(self.schema, *args, **kwargs)
if isinstance(result, Observable):
a = []
result.subscribe(lambda x: a.append(x))
if len(a) > 0:
result = a[-1]
return result
return execute(self.schema, *args, **kwargs)
示例5: _instantiate_chaincode
# 需要导入模块: import rx [as 别名]
# 或者: from rx import Observable [as 别名]
def _instantiate_chaincode(chain, cc_instantiation_request, scheduler=None):
"""Instantiate chaincode.
:param chain: chain instance
:param cc_instantiation_request: see TransactionProposalRequest
:param scheduler: see rx.Scheduler, defaults to None
:return: An rx.Observable of instantiation response
"""
if len(chain.peers) < 1:
return rx.Observable.just(ValueError(
"Missing peer objects on this chain"
))
peers = {}
if cc_instantiation_request and cc_instantiation_request.targets:
peers = cc_instantiation_request.targets
for peer in peers:
if not chain.is_valid_peer(peer):
return rx.Observable.just(ValueError(
'Request targets peer object {} not in chain'.format(peer)
))
if len(peers) < 1:
peers = chain.peers
return rx.Observable \
.just(cc_instantiation_request) \
.map(check_tran_prop_request) \
.map(lambda req, idx: _create_instantiation_proposal(req, chain))
# .flatmap(lambda proposal, idx:
# send_transaction_proposal(proposal, peers, scheduler))
示例6: _invoke_chaincode
# 需要导入模块: import rx [as 别名]
# 或者: from rx import Observable [as 别名]
def _invoke_chaincode(chain, cc_invocation_request, scheduler=None):
"""Invoke chaincode.
:param chain: chain instance
:param scheduler: see rx.Scheduler (Default value = None)
:param cc_invocation_request: see TransactionProposalRequest
:return: An rx.Observable of invocation response
"""
if len(chain.peers) < 1:
return rx.Observable.just(ValueError(
"Missing peer objects on this chain"
))
peers = {}
if cc_invocation_request and cc_invocation_request.targets:
peers = cc_invocation_request.targets
for peer in peers:
if not chain.is_valid_peer(peer):
return rx.Observable.just(ValueError(
'Request targets peer object {} not in chain'.format(peer)
))
if len(peers) < 1:
peers = chain.peers
return rx.Observable \
.just(cc_invocation_request) \
.map(check_tran_prop_request) \
.map(lambda req, idx: _create_invocation_proposal(req, chain))
# .flatmap(lambda proposal, idx:
# send_transaction_proposal(proposal, peers, scheduler))
示例7: _write_batching
# 需要导入模块: import rx [as 别名]
# 或者: from rx import Observable [as 别名]
def _write_batching(self, bucket, org, data,
precision=DEFAULT_WRITE_PRECISION,
**kwargs):
if isinstance(data, bytes):
_key = _BatchItemKey(bucket, org, precision)
self._subject.on_next(_BatchItem(key=_key, data=data))
elif isinstance(data, str):
self._write_batching(bucket, org, data.encode("utf-8"),
precision, **kwargs)
elif isinstance(data, Point):
self._write_batching(bucket, org, data.to_line_protocol(), data.write_precision, **kwargs)
elif isinstance(data, dict):
self._write_batching(bucket, org, Point.from_dict(data, write_precision=precision),
precision, **kwargs)
elif 'DataFrame' in type(data).__name__:
self._write_batching(bucket, org, self._data_frame_to_list_of_points(data, precision, **kwargs),
precision, **kwargs)
elif isinstance(data, list):
for item in data:
self._write_batching(bucket, org, item, precision, **kwargs)
elif isinstance(data, Observable):
data.subscribe(lambda it: self._write_batching(bucket, org, it, precision, **kwargs))
pass
return None
示例8: graphql
# 需要导入模块: import rx [as 别名]
# 或者: from rx import Observable [as 别名]
def graphql(*args, **kwargs):
# type: (*Any, **Any) -> Union[ExecutionResult, Observable, Promise[ExecutionResult]]
return_promise = kwargs.get("return_promise", False)
if return_promise:
return execute_graphql_as_promise(*args, **kwargs)
else:
return execute_graphql(*args, **kwargs)
示例9: execute_graphql
# 需要导入模块: import rx [as 别名]
# 或者: from rx import Observable [as 别名]
def execute_graphql(
schema, # type: GraphQLSchema
request_string="", # type: Union[Document, str]
root_value=None, # type: Any
context_value=None, # type: Optional[Any]
variable_values=None, # type: Optional[Any]
operation_name=None, # type: Optional[Any]
middleware=None, # type: Optional[Any]
backend=None, # type: Optional[Any]
**execute_options # type: Any
):
# type: (...) -> Union[ExecutionResult, Observable, Promise[ExecutionResult]]
try:
if backend is None:
backend = get_default_backend()
document = backend.document_from_string(schema, request_string)
return document.execute(
root_value,
context_value,
operation_name=operation_name,
variable_values=variable_values,
middleware=middleware,
**execute_options
)
except Exception as e:
return ExecutionResult(errors=[e], invalid=True)
示例10: execute_and_validate
# 需要导入模块: import rx [as 别名]
# 或者: from rx import Observable [as 别名]
def execute_and_validate(
schema, # type: GraphQLSchema
document_ast, # type: Document
*args, # type: Any
**kwargs # type: Any
):
# type: (...) -> Union[ExecutionResult, Observable]
do_validation = kwargs.get("validate", True)
if do_validation:
validation_errors = validate(schema, document_ast)
if validation_errors:
return ExecutionResult(errors=validation_errors, invalid=True)
return execute(schema, document_ast, *args, **kwargs)
示例11: on_start
# 需要导入模块: import rx [as 别名]
# 或者: from rx import Observable [as 别名]
def on_start(self, connection_context, op_id, params):
try:
execution_result = self.execute(
connection_context.request_context, params)
assert isinstance(execution_result, Observable), \
"A subscription must return an observable"
execution_result.subscribe(SubscriptionObserver(
connection_context,
op_id,
self.send_execution_result,
self.send_error,
self.on_close
))
except Exception as e:
self.send_error(connection_context, op_id, str(e))
示例12: write
# 需要导入模块: import rx [as 别名]
# 或者: from rx import Observable [as 别名]
def write(self, bucket: str, org: str = None,
record: Union[
str, List['str'], Point, List['Point'], dict, List['dict'], bytes, List['bytes'], Observable] = None,
write_precision: WritePrecision = DEFAULT_WRITE_PRECISION, **kwargs) -> Any:
"""
Writes time-series data into influxdb.
:param str org: specifies the destination organization for writes; take either the ID or Name interchangeably; if both orgID and org are specified, org takes precedence. (required)
:param str bucket: specifies the destination bucket for writes (required)
:param WritePrecision write_precision: specifies the precision for the unix timestamps within the body line-protocol. The precision specified on a Point has precedes and is use for write.
:param record: Points, line protocol, Pandas DataFrame, RxPY Observable to write
:param data_frame_measurement_name: name of measurement for writing Pandas DataFrame
:param data_frame_tag_columns: list of DataFrame columns which are tags, rest columns will be fields
"""
if org is None:
org = self._influxdb_client.org
if self._point_settings.defaultTags and record is not None:
for key, val in self._point_settings.defaultTags.items():
self._append_default_tag(key, val, record)
if self._write_options.write_type is WriteType.batching:
return self._write_batching(bucket, org, record,
write_precision, **kwargs)
payloads = defaultdict(list)
self._serialize(record, write_precision, payloads, **kwargs)
_async_req = True if self._write_options.write_type == WriteType.asynchronous else False
def write_payload(payload):
final_string = b'\n'.join(payload[1])
return self._post_write(_async_req, bucket, org, final_string, payload[0])
results = list(map(write_payload, payloads.items()))
if not _async_req:
return None
elif len(results) == 1:
return results[0]
return results