本文整理匯總了Python中contextlib.nullcontext方法的典型用法代碼示例。如果您正苦於以下問題:Python contextlib.nullcontext方法的具體用法?Python contextlib.nullcontext怎麽用?Python contextlib.nullcontext使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類contextlib
的用法示例。
在下文中一共展示了contextlib.nullcontext方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: import contextlib [as 別名]
# 或者: from contextlib import nullcontext [as 別名]
def __init__(self, fileobj, filename=None):
super(tzfile, self).__init__()
file_opened_here = False
if isinstance(fileobj, string_types):
self._filename = fileobj
fileobj = open(fileobj, 'rb')
file_opened_here = True
elif filename is not None:
self._filename = filename
elif hasattr(fileobj, "name"):
self._filename = fileobj.name
else:
self._filename = repr(fileobj)
if fileobj is not None:
if not file_opened_here:
fileobj = _nullcontext(fileobj)
with fileobj as file_stream:
tzobj = self._read_tzfile(file_stream)
self._set_tzdata(tzobj)
示例2: check_model_compatibility
# 需要導入模塊: import contextlib [as 別名]
# 或者: from contextlib import nullcontext [as 別名]
def check_model_compatibility(self, backend_config, save, load):
C = self.n_channels // self.groups
old_model = {
'avg_mean': numpy.random.uniform(
-1, 1, (C,)).astype(self.dtype),
'avg_projection': numpy.random.uniform(
0.5, 1, (C, C)).astype(self.dtype),
'N': numpy.array(0)
}
save(self.temp_file_path, old_model)
model = links.DecorrelatedBatchNormalization(
self.n_channels, groups=self.groups, dtype=self.dtype)
model.to_device(backend_config.device)
with (
testing.assert_warns(UserWarning) if self.groups != 1
else nullcontext()):
load(self.temp_file_path, model)
x = numpy.random.rand(5, self.n_channels, 2).astype(self.dtype)
x = backend_config.get_array(x)
with chainer.using_config('train', False):
model(x)
model(x)
示例3: __init__
# 需要導入模塊: import contextlib [as 別名]
# 或者: from contextlib import nullcontext [as 別名]
def __init__(
self,
mode: int,
ctx: moderngl.Context,
prog: moderngl.Program,
dtype: np.dtype,
draw_context: ContextManager = nullcontext(),
capacity: int = 256,
index_capacity: int = 512):
self.mode = mode
self.ctx = ctx
self.prog = prog
self.dtype = dtype_to_moderngl(dtype)
self.allocs: Dict[int, Tuple[slice, np.ndarray]] = {}
self.verts = MemoryBackedBuffer(ctx, capacity, dtype)
self.indexes = IndexBuffer(ctx)
self.draw_context = draw_context
self.dirty = False
示例4: iteration
# 需要導入模塊: import contextlib [as 別名]
# 或者: from contextlib import nullcontext [as 別名]
def iteration(self,
data: Tuple[torch.Tensor, torch.Tensor]) -> Mapping[str, torch.Tensor]:
input, target = data
context = torch.cuda.amp.autocast if self._use_amp else contextlib.nullcontext
with context():
output = self.model(input)
loss = self.loss_f(output, target)
if self.is_train:
self.optimizer.zero_grad()
if self._use_amp:
self.scaler.scale(loss).backward()
self.scaler.step(self.optimizer)
self.scaler.update()
else:
loss.backward()
self.optimizer.step()
return TensorMap(loss=loss, output=output)
示例5: get_psl_context_manager
# 需要導入模塊: import contextlib [as 別名]
# 或者: from contextlib import nullcontext [as 別名]
def get_psl_context_manager(self, side_effect_parameter):
if side_effect_parameter is None:
return nullcontext()
if callable(side_effect_parameter):
side_effect = side_effect_parameter
else:
side_effect = partial(
self._mock_get_public_suffix,
public_suffixes=[side_effect_parameter] if not isinstance(side_effect_parameter, list) else list(side_effect_parameter)
)
return mock.patch.object(psl, 'get_public_suffix', side_effect=side_effect)
示例6: get_distribution_scope
# 需要導入模塊: import contextlib [as 別名]
# 或者: from contextlib import nullcontext [as 別名]
def get_distribution_scope(batch_size):
if num_gpus() > 1:
strategy = tf.distribute.MirroredStrategy()
assert (
batch_size % strategy.num_replicas_in_sync == 0
), f"Batch size {batch_size} cannot be divided onto {num_gpus()} GPUs"
distribution_scope = strategy.scope
else:
if sys.version_info >= (3, 7):
distribution_scope = contextlib.nullcontext
else:
distribution_scope = contextlib.suppress
return distribution_scope()
示例7: _null_context
# 需要導入模塊: import contextlib [as 別名]
# 或者: from contextlib import nullcontext [as 別名]
def _null_context() -> Iterator[None]:
# TODO(b/154533346)
# This should move to `contextlib.nullcontext` once TFF's minimum
# Python version moves up to 3.7,
yield None
示例8: forward
# 需要導入模塊: import contextlib [as 別名]
# 或者: from contextlib import nullcontext [as 別名]
def forward(self, link, inputs, backend_config):
x, = inputs
with chainer.using_config('train', not self.test):
y = link(x)
return y,
# TODO(kataoka) Use `contextlib.nullcontext` if Python 3.7 or higher is assumed
示例9: nullcontext
# 需要導入模塊: import contextlib [as 別名]
# 或者: from contextlib import nullcontext [as 別名]
def nullcontext():
yield
示例10: nullcontext
# 需要導入模塊: import contextlib [as 別名]
# 或者: from contextlib import nullcontext [as 別名]
def nullcontext():
yield
示例11: __init__
# 需要導入模塊: import contextlib [as 別名]
# 或者: from contextlib import nullcontext [as 別名]
def __init__(self, length, multithread_safe=False, do_pickle=False):
self._multithread_safe = multithread_safe
self.length = length
assert self.length > 0
if self._multithread_safe:
self.lock = threading.Lock()
else:
# Use contextlib.nullcontext() when Python 3.6 is dropped.
self.lock = contextlib.suppress()
self.data = [None for _ in range(self.length)]
示例12: nullcontext
# 需要導入模塊: import contextlib [as 別名]
# 或者: from contextlib import nullcontext [as 別名]
def nullcontext(obj):
yield obj
示例13: nullcontext
# 需要導入模塊: import contextlib [as 別名]
# 或者: from contextlib import nullcontext [as 別名]
def nullcontext():
yield None
示例14: nullcontext
# 需要導入模塊: import contextlib [as 別名]
# 或者: from contextlib import nullcontext [as 別名]
def nullcontext(enter_result=None):
"""Backport of the Python >=3.7 trivial context manager.
See `the Python documentation
<https://docs.python.org/3/library/contextlib.html#contextlib.nullcontext>`_
for details.
"""
try:
yield enter_result
finally:
pass
示例15: read_until_prompt
# 需要導入模塊: import contextlib [as 別名]
# 或者: from contextlib import nullcontext [as 別名]
def read_until_prompt(
self,
prompt: typing.Optional[ConvenientSearchString] = None,
timeout: typing.Optional[float] = None,
) -> str:
"""
Read until prompt is detected.
Read from the channel until the configured prompt string is detected.
All data captured up until the prompt is returned, decoded as UTF-8.
If ``prompt`` is ``None``, the prompt which was set using
:py:meth:`tbot.machine.channel.Channel.with_prompt` is used.
:param ConvenientSearchString prompt: The prompt to read up to. It
must appear as the very last readable data in the channel's data
stream. See :ref:`channel_search_string` for more info about which
types can be passed for this parameter.
:param float timeout: Optional timeout. If ``timeout`` is set and
expires before the prompt was detected, ``read_until_prompt``
raises an execption.
:rtype: str
:returns: UTF-8 decoded string of all bytes read up to the prompt.
"""
ctx: typing.ContextManager[typing.Any]
if prompt is not None:
ctx = self.with_prompt(prompt)
else:
# contextlib.nullcontext() would be a better fit here but sadly it
# is only available in 3.7+
ctx = contextlib.ExitStack()
buf = bytearray()
with ctx:
for new in self.read_iter(timeout=timeout):
buf += new
if isinstance(self.prompt, bytes):
if buf.endswith(self.prompt):
return (
buf[: -len(self.prompt)]
.decode("utf-8", errors="replace")
.replace("\r\n", "\n")
.replace("\n\r", "\n")
)
elif isinstance(self.prompt, BoundedPattern):
match = self.prompt.pattern.search(buf)
if match is not None:
return (
buf[: match.span()[0]]
.decode("utf-8", errors="replace")
.replace("\r\n", "\n")
.replace("\n\r", "\n")
)
raise RuntimeError("unreachable")
# }}}
# miscellaneous {{{