本文整理汇总了Python中tensorflow.python.ops.data_flow_ops.StagingArea方法的典型用法代码示例。如果您正苦于以下问题:Python data_flow_ops.StagingArea方法的具体用法?Python data_flow_ops.StagingArea怎么用?Python data_flow_ops.StagingArea使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.data_flow_ops
的用法示例。
在下文中一共展示了data_flow_ops.StagingArea方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _defer_tensor
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import StagingArea [as 别名]
def _defer_tensor(tensor):
"""Defers the retrieval of a tensor.
The tensor is put into a StagingArea, and the return value is the
retrieval of the tensor from the StagingArea. The effect is that the
tensor returned from this function is the tensor that was put in the
StagingArea for the previous Session.run() call.
Args:
tensor: The tensor to defer for one step.
Returns:
deferred_tensor: The tensor deferred for one step.
put_op: An op to put `tensor` in the StagingArea. Must be run every step
that `deferred_tensor` is run.
warmup_op: A warmup op that should be called before the first step. Puts
a zero tensor into the StagingArea.
"""
tensor_stage = data_flow_ops.StagingArea([tensor.dtype], [tensor.shape])
put_op = tensor_stage.put([tensor])
warmup_op = tensor_stage.put([tf.zeros(tensor.shape, dtype=tensor.dtype)])
# Fetch the next tensor to use.
(tensor,) = tensor_stage.get()
return tensor, put_op, warmup_op
示例2: defer_single_device_tensors
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import StagingArea [as 别名]
def defer_single_device_tensors(device_tensors):
"""Defer tensors (gradients in this case) from a single device.
Arguments:
device_tensors: A list of gradients tensors from a single device to defer.
Returns:
deferred_tensors: A list of tensors deferred for one step.
put_ops: A list of ops that put `tensors` in the StagingAreas. Must be run
every step that `deferred_tensors` is run.
warmup_ops: Warmup ops that should be called before the first step. Puts
zero tensors into the StagingArea.
"""
put_ops = []
warmup_ops = []
deferred_tensors = []
for tensor in device_tensors:
deferred_tensor, put_op, warmup_op = _defer_tensor(tensor)
deferred_tensors.append(deferred_tensor)
put_ops.append(put_op)
warmup_ops.append(warmup_op)
return deferred_tensors, put_ops, warmup_ops
示例3: assign_sub
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import StagingArea [as 别名]
def assign_sub(self, delta, name=None):
"""Mimic the updates to the variable.
Args:
delta: is pushed into a staging buffer and will be pumped later.
name: currently ignored; names of ops and the StagingArea are
computed without using this pass name.
Returns:
The actual updates. The colocation constraint will be reapplied.
"""
# This parameter is ignored: the StagingArea only supports setting
# the shared name, not the names of individual ops it uses.
del name
# colocate_with(None, True) clears the colocation constraints.
# Push the delta into a staging buffer.
with ops.colocate_with(None, True), tf.device(self.var_stage_get.device):
delta_staging_area = data_flow_ops.StagingArea(
[self.var_stage_get.dtype], shapes=[self.var_stage_get.shape])
delta_put_op = delta_staging_area.put([delta])
self.variable_mgr.staging_delta_ops.append(delta_put_op)
delta_get_op = delta_staging_area.get()[0]
# Return the actual updates. The colocation constraint will be reapplied.
return self.real_var.assign_sub(delta_get_op)
示例4: _build_image_processing
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import StagingArea [as 别名]
def _build_image_processing(self, shift_ratio=0):
""""Build the image (pre)processing portion of the model graph."""
with tf.device(self.cpu_device):
if self.params.eval:
subset = 'validation'
else:
subset = 'train'
image_producer_ops = []
image_producer_stages = []
images_splits, labels_splits = self.image_preprocessor.minibatch(
self.dataset, subset=subset, use_datasets=self.params.use_datasets,
cache_data=self.params.cache_data, shift_ratio=shift_ratio)
images_shape = images_splits[0].get_shape()
labels_shape = labels_splits[0].get_shape()
for device_num in range(len(self.devices)):
image_producer_stages.append(data_flow_ops.StagingArea(
[images_splits[0].dtype, labels_splits[0].dtype],
shapes=[images_shape, labels_shape]))
for group_index in xrange(self.batch_group_size):
if not self.use_synthetic_gpu_images:
batch_index = group_index + device_num * self.batch_group_size
put_op = image_producer_stages[device_num].put(
[images_splits[batch_index], labels_splits[batch_index]])
image_producer_ops.append(put_op)
return (image_producer_ops, image_producer_stages)
示例5: _build_image_processing
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import StagingArea [as 别名]
def _build_image_processing(self, shift_ratio=0):
""""Build the image (pre)processing portion of the model graph."""
with tf.device(self.cpu_device):
if self.params.eval:
subset = 'validation'
else:
subset = 'train'
image_producer_ops = []
image_producer_stages = []
images_splits, labels_splits = self.image_preprocessor.minibatch(
self.dataset, subset=subset, use_datasets=self.params.use_datasets,
cache_data=self.params.cache_data, shift_ratio=shift_ratio)
images_shape = images_splits[0].get_shape()
labels_shape = labels_splits[0].get_shape()
for device_num in range(len(self.devices)):
image_producer_stages.append(data_flow_ops.StagingArea(
[images_splits[0].dtype, labels_splits[0].dtype],
shapes=[images_shape, labels_shape]))
for group_index in xrange(self.batch_group_size):
if not self.use_synthetic_gpu_images:
batch_index = group_index + device_num * self.batch_group_size
put_op = image_producer_stages[device_num].put(
[images_splits[batch_index], labels_splits[batch_index]])
image_producer_ops.append(put_op)
return (image_producer_ops, image_producer_stages)
示例6: __init__
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import StagingArea [as 别名]
def __init__(self, input, nr_stage=1, device=None):
"""
Args:
input (FeedfreeInput):
nr_stage (int): number of elements to prefetch into each StagingArea, at the beginning.
Since enqueue and dequeue are synchronized, prefetching 1 element should be sufficient.
device (str or None): if not None, place the StagingArea on a specific device. e.g., '/cpu:0'.
Otherwise, they are placed under where `get_inputs_tensors`
gets called, which could be unspecified in case of simple trainers.
"""
if not isinstance(input, FeedfreeInput):
raise ValueError("StagingInput takes a FeedfreeInput! Got {}".format(input))
if isinstance(input, StagingInput):
raise ValueError("StagingInput cannot be nested!")
self._input = input
self._nr_stage = nr_stage
self._areas = []
self._stage_ops = []
self._unstage_ops = []
self._device = device
示例7: assign_sub
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import StagingArea [as 别名]
def assign_sub(self, delta, name=None, read_value=True):
"""Mimic the updates to the variable.
Args:
delta: is pushed into a staging buffer and will be pumped later.
name: currently ignored; names of ops and the StagingArea are
computed without using this pass name.
read_value: if True, will return something which evaluates to the new
value of the variable; if False will return the assign op.
Returns:
The actual updates. The colocation constraint will be reapplied.
"""
# This parameter is ignored: the StagingArea only supports setting
# the shared name, not the names of individual ops it uses.
del name
# colocate_with(None, True) clears the colocation constraints.
# Push the delta into a staging buffer.
with ops.colocate_with(None, True), tf.device(self.var_stage_get.device):
delta_staging_area = data_flow_ops.StagingArea(
[self.var_stage_get.dtype], shapes=[self.var_stage_get.shape])
delta_put_op = delta_staging_area.put([delta])
self.variable_mgr.staging_delta_ops.append(delta_put_op)
delta_get_op = delta_staging_area.get()[0]
# Return the actual updates. The colocation constraint will be reapplied.
return self.real_var.assign_sub(delta_get_op, read_value=read_value)
示例8: __call__
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import StagingArea [as 别名]
def __call__(self, getter, name, *args, **kwargs):
staging_ops = self.variable_mgr.staging_vars_on_devices[self.device_num]
if name in staging_ops:
put_op, get_op = staging_ops[name]
return get_op
real_var = getter(name, *args, **kwargs)
shape = kwargs['shape']
dtype = kwargs['dtype']
trainable = kwargs['trainable']
if self.cpu_device:
with tf.device(self.cpu_device):
# This helps copying the weights from the parameter to this server only
# once.
if name in self.variable_mgr.staged_vars_on_cpu:
cpu_var = self.variable_mgr.staged_vars_on_cpu[name]
else:
cpu_var = tf.identity(real_var)
self.variable_mgr.staged_vars_on_cpu[name] = cpu_var
var_to_stage = cpu_var
else:
var_to_stage = tf.identity(real_var) # de-reference the variable.
with tf.device(self.devices[self.device_num]):
staging_area = data_flow_ops.StagingArea([dtype], shapes=[shape])
put_op = staging_area.put([var_to_stage])
get_op = staging_area.get()[0]
staging_ops[name] = (put_op, get_op)
if trainable:
# For trainable variables, they are managed separatedly through
# apply_gradients.
return get_op
else:
# For other shadow variables, the access is decoupled through a wrapper
# class.
return StagedModelVariable(real_var, get_op, self.variable_mgr)
示例9: _build_image_processing
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import StagingArea [as 别名]
def _build_image_processing(self, shift_ratio=0):
""""Build the image (pre)processing portion of the model graph."""
if self.use_synthetic_gpu_images:
return (None, None)
with tf.device('/cpu:0'):
if self.params.eval:
subset = 'validation'
else:
subset = 'train'
image_producer_ops = []
images_splits, labels_splits = self.image_preprocessor.minibatch(
self.dataset,
subset=subset,
use_datasets=self.params.use_datasets,
cache_data=self.params.cache_data,
shift_ratio=shift_ratio)
images_shape = images_splits[0].get_shape()
labels_shape = labels_splits[0].get_shape()
with tf.device('/gpu:0'):
if self.params.eval:
image_producer_stage = data_flow_ops.StagingArea(
[images_splits[0].dtype, labels_splits[0].dtype],
shapes=[images_shape, labels_shape],
capacity=1)
else:
image_producer_stage = data_flow_ops.StagingArea(
[images_splits[0].dtype, labels_splits[0].dtype],
shapes=[images_shape, labels_shape],
capacity=self.batch_group_size)
put_op = image_producer_stage.put(
[images_splits[0], labels_splits[0]])
image_producer_ops.append(put_op)
return (image_producer_ops, image_producer_stage)
示例10: _prefill
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import StagingArea [as 别名]
def _prefill(self, sess):
logger.info("Pre-filling StagingArea ...")
for k in range(self.nr_stage):
self.stage_op.run(session=sess)
logger.info("{} element{} put into StagingArea on each tower.".format(
self.nr_stage, "s were" if self.nr_stage > 1 else " was"))
示例11: _get_input_tensors
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import StagingArea [as 别名]
def _get_input_tensors(self):
inputs = self._input.get_input_tensors()
with self._device_ctx():
with self.cached_name_scope():
# Putting variables to stagingarea will cause trouble
dtypes = []
for idx in range(len(inputs)):
dtype = inputs[idx].dtype
if dtype.base_dtype != dtype: # is reference type
inputs[idx] = tf.identity(inputs[idx])
dtypes.append(dtype.base_dtype)
# TODO tensorflow/benchmarks use static shapes here,
# though it doesn't seem to help. We can use it when it's known.
# Setting capacity to 1 to potentially save some memory, because we should
# expect the consumers to run slower than the producer.
stage = StagingArea(dtypes, shapes=None, capacity=1)
# put & get automatically inherit the name scope from the area
self._stage_ops.append(stage.put(inputs))
self._areas.append(stage)
outputs = stage.get()
if isinstance(outputs, tf.Tensor): # when size=1, TF doesn't return a list
outputs = [outputs]
for vin, vout in zip(inputs, outputs):
vout.set_shape(vin.get_shape())
self._unstage_ops.append(outputs)
# self._size_ops.append(stage.size())
return outputs
示例12: stage
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import StagingArea [as 别名]
def stage(tensors):
"""Stages the given tensors in a StagingArea for asynchronous put/get.
"""
stage_area = data_flow_ops.StagingArea(
dtypes=[tensor.dtype for tensor in tensors],
shapes=[tensor.get_shape() for tensor in tensors])
put_op = stage_area.put(tensors)
get_tensors = stage_area.get()
tf.add_to_collection('STAGING_AREA_PUTS', put_op)
return put_op, get_tensors
示例13: _prefill
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import StagingArea [as 别名]
def _prefill(self, sess):
logger.info("Pre-filling StagingArea ...")
for _ in range(self.nr_stage):
self.stage_op.run(session=sess)
logger.info("{} element{} put into StagingArea on each tower.".format(
self.nr_stage, "s were" if self.nr_stage > 1 else " was"))
示例14: stage
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import StagingArea [as 别名]
def stage(tensors):
"""Stages the given tensors in a StagingArea for asynchronous put/get.
"""
stage_area = data_flow_ops.StagingArea(
dtypes=[tensor.dtype for tensor in tensors],
shapes=[tensor.get_shape() for tensor in tensors])
put_op = stage_area.put(tensors)
get_tensors = stage_area.get()
get_tensors = [tf.reshape(gt, t.get_shape())
for (gt,t) in zip(get_tensors, tensors)]
return put_op, get_tensors
示例15: _build_image_processing
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import StagingArea [as 别名]
def _build_image_processing(self, shift_ratio=0):
""""Build the image (pre)processing portion of the model graph."""
with tf.device(self.cpu_device):
if self.params.eval:
subset = 'validation'
else:
subset = 'train'
image_producer_ops = []
image_producer_stages = []
images_splits, labels_splits = self.image_preprocessor.minibatch(
self.dataset,
subset=subset,
use_datasets=self.params.use_datasets,
cache_data=self.params.cache_data,
shift_ratio=shift_ratio)
images_shape = images_splits[0].get_shape()
labels_shape = labels_splits[0].get_shape()
for device_num in range(len(self.devices)):
image_producer_stages.append(
data_flow_ops.StagingArea(
[images_splits[0].dtype, labels_splits[0].dtype],
shapes=[images_shape, labels_shape]))
for group_index in xrange(self.batch_group_size):
if not self.use_synthetic_gpu_images:
batch_index = group_index + device_num * self.batch_group_size
put_op = image_producer_stages[device_num].put(
[images_splits[batch_index], labels_splits[batch_index]])
image_producer_ops.append(put_op)
return (image_producer_ops, image_producer_stages)