當前位置: 首頁>>代碼示例>>Python>>正文


Python ops.device方法代碼示例

本文整理匯總了Python中tensorflow.python.framework.ops.device方法的典型用法代碼示例。如果您正苦於以下問題:Python ops.device方法的具體用法?Python ops.device怎麽用?Python ops.device使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.python.framework.ops的用法示例。


在下文中一共展示了ops.device方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: build_all_reduce_device_prefixes

# 需要導入模塊: from tensorflow.python.framework import ops [as 別名]
# 或者: from tensorflow.python.framework.ops import device [as 別名]
def build_all_reduce_device_prefixes(job_name, num_tasks):
  """Build list of device prefix names for all_reduce.

  Args:
    job_name: 'worker', 'ps' or 'localhost'.
    num_tasks: number of jobs across which device names should be generated.

  Returns:
     A list of device name prefix strings. Each element spells out the full
     host name without adding the device.
     e.g. '/job:worker/task:0'
  """
  if job_name != 'localhost':
    return ['/job:%s/task:%d' % (job_name, d) for d in range(0, num_tasks)]
  else:
    assert num_tasks == 1
    return ['/job:%s' % job_name] 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:19,代碼來源:allreduce.py

示例2: collective_group_key

# 需要導入模塊: from tensorflow.python.framework import ops [as 別名]
# 或者: from tensorflow.python.framework.ops import device [as 別名]
def collective_group_key(devices):
  """Returns a group key for the set of devices.

  Args:
    devices: list of strings naming devices in a collective group.

  Returns:
    int key uniquely identifying the set of device names.
  """
  global _group_key
  global _group_key_table
  parsed = [pydev.DeviceSpec.from_string(d) for d in devices]
  names = sorted(['%s:%d' % (d.device_type, d.device_index) for d in parsed])
  concat = ','.join(names)
  if concat not in _group_key_table.keys():
    new_key = _group_key
    _group_key += 1
    _group_key_table[concat] = new_key
  rv = _group_key_table[concat]
  return rv 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:22,代碼來源:allreduce.py

示例3: unpack_grad_tuple

# 需要導入模塊: from tensorflow.python.framework import ops [as 別名]
# 或者: from tensorflow.python.framework.ops import device [as 別名]
def unpack_grad_tuple(gv, gpt):
  """Unpack a previously packed collection of gradient tensors.

  Args:
    gv: A (grad, var) pair to be unpacked.
    gpt: A GradPackTuple describing the packing operation that produced gv.

  Returns:
    A list of (grad, var) pairs corresponding to the values that were
     originally packed into gv, maybe following subsequent operations like
     reduction.
  """
  elt_widths = [x.num_elements() for x in gpt.shapes]
  with tf.device(gv[0][0].device):
    with tf.name_scope('unpack'):
      splits = tf.split(gv[0], elt_widths)
      unpacked_gv = []
      for idx, s in enumerate(splits):
        unpacked_gv.append((tf.reshape(s, gpt.shapes[idx]), gpt.vars[idx]))
  return unpacked_gv 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:22,代碼來源:allreduce.py

示例4: value

# 需要導入模塊: from tensorflow.python.framework import ops [as 別名]
# 或者: from tensorflow.python.framework.ops import device [as 別名]
def value(self):
    """Returns the last snapshot of this variable.

    You usually do not need to call this method as all ops that need the value
    of the variable call it automatically through a `convert_to_tensor()` call.

    Returns a `Tensor` which holds the value of the variable.  You can not
    assign a new value to this tensor as it is not a reference to the variable.

    To avoid copies, if the consumer of the returned value is on the same device
    as the variable, this actually returns the live value of the variable, not
    a copy.  Updates to the variable are seen by the consumer.  If the consumer
    is on a different device it will get a copy of the variable.

    Returns:
      A `Tensor` containing the value of the variable.
    """
    return self._snapshot 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:20,代碼來源:variables.py

示例5: read_value

# 需要導入模塊: from tensorflow.python.framework import ops [as 別名]
# 或者: from tensorflow.python.framework.ops import device [as 別名]
def read_value(self):
    """Constructs an op which reads the value of this variable.

    Should be used when there are multiple reads, or when it is desirable to
    read the value only after some condition is true.

    Returns:
     the read operation.
    """
    with ops.name_scope("Read"):
      with ops.device(self._handle.device):
        value = gen_resource_variable_ops.read_variable_op(
            self._handle, dtype=self._dtype)
    # Return an identity so it can get placed on whatever device the context
    # specifies instead of the device where the variable is.
    return array_ops.identity(value) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:18,代碼來源:resource_variable_ops.py

示例6: _get_handle_mover

# 需要導入模塊: from tensorflow.python.framework import ops [as 別名]
# 或者: from tensorflow.python.framework.ops import device [as 別名]
def _get_handle_mover(graph, feeder, handle):
  """Return a move subgraph for this pair of feeder and handle."""
  dtype = _get_handle_feeder(graph, feeder)
  if dtype is None:
    return None
  handle_device = TensorHandle._get_device_name(handle)
  if feeder.op.device == handle_device:
    return None
  # Now we know we have to move the tensor.
  graph_key = TensorHandle._get_mover_key(feeder, handle)
  result = graph._handle_movers.get(graph_key)
  if result is None:
    # Create mover if we haven't done it.
    holder, reader = _get_handle_reader(graph, handle, dtype)
    with graph.as_default(), graph.device(feeder.op.device):
      mover = gen_data_flow_ops._get_session_handle(reader)  # pylint: disable=protected-access
    result = (holder, mover)
    graph._handle_movers[graph_key] = result
  return result 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:21,代碼來源:session_ops.py

示例7: _apply_dense

# 需要導入模塊: from tensorflow.python.framework import ops [as 別名]
# 或者: from tensorflow.python.framework.ops import device [as 別名]
def _apply_dense(self, grad, var):
    g_acc = self.get_slot(var, "gradient_accumulator")
    gg_acc = self.get_slot(var, "gradient_squared_accumulator")
    # Performance optimization so that worker creates a copy of the global step
    # to avoid overloading the parameter server holding the global step.
    with ops.device(grad[0].device):
      global_step = array_ops.identity(self._global_step) + 1
    return training_ops.apply_adagrad_da(
        var,
        g_acc,
        gg_acc,
        grad,
        math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
        math_ops.cast(self._l1_regularization_strength, var.dtype.base_dtype),
        math_ops.cast(self._l2_regularization_strength, var.dtype.base_dtype),
        global_step,
        use_locking=self._use_locking) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:19,代碼來源:adagrad_da.py

示例8: _resource_apply_dense

# 需要導入模塊: from tensorflow.python.framework import ops [as 別名]
# 或者: from tensorflow.python.framework.ops import device [as 別名]
def _resource_apply_dense(self, grad, var):
    g_acc = self.get_slot(var, "gradient_accumulator")
    gg_acc = self.get_slot(var, "gradient_squared_accumulator")
    # Performance optimization so that worker creates a copy of the global step
    # to avoid overloading the parameter server holding the global step.
    with ops.device(grad[0].device):
      global_step = array_ops.identity(self._global_step) + 1
    return training_ops.resource_apply_adagrad_da(
        var.handle,
        g_acc.handle,
        gg_acc.handle,
        grad,
        math_ops.cast(self._learning_rate_tensor, grad.dtype.base_dtype),
        math_ops.cast(self._l1_regularization_strength, grad.dtype.base_dtype),
        math_ops.cast(self._l2_regularization_strength, grad.dtype.base_dtype),
        global_step,
        use_locking=self._use_locking) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:19,代碼來源:adagrad_da.py

示例9: _resource_apply_sparse

# 需要導入模塊: from tensorflow.python.framework import ops [as 別名]
# 或者: from tensorflow.python.framework.ops import device [as 別名]
def _resource_apply_sparse(self, grad, var, indices):
    g_acc = self.get_slot(var, "gradient_accumulator")
    gg_acc = self.get_slot(var, "gradient_squared_accumulator")
    # Performance optimization so that worker creates a copy of the global step
    # to avoid overloading the parameter server holding the global step.
    with ops.device(grad[0].device):
      global_step = array_ops.identity(self._global_step) + 1
    return training_ops.resource_sparse_apply_adagrad_da(
        var.handle,
        g_acc.handle,
        gg_acc.handle,
        grad,
        indices,
        math_ops.cast(self._learning_rate_tensor, grad.dtype),
        math_ops.cast(self._l1_regularization_strength, grad.dtype),
        math_ops.cast(self._l2_regularization_strength, grad.dtype),
        global_step,
        use_locking=self._use_locking) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:20,代碼來源:adagrad_da.py

示例10: call

# 需要導入模塊: from tensorflow.python.framework import ops [as 別名]
# 或者: from tensorflow.python.framework.ops import device [as 別名]
def call(self, inputs, state):
    """Run the cell on embedded inputs."""
    with ops.device("/cpu:0"):
      if self._initializer:
        initializer = self._initializer
      elif vs.get_variable_scope().initializer:
        initializer = vs.get_variable_scope().initializer
      else:
        # Default initializer for embeddings should have variance=1.
        sqrt3 = math.sqrt(3)  # Uniform(-sqrt(3), sqrt(3)) has variance=1.
        initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)

      if isinstance(state, tuple):
        data_type = state[0].dtype
      else:
        data_type = state.dtype

      embedding = vs.get_variable(
          "embedding", [self._embedding_classes, self._embedding_size],
          initializer=initializer,
          dtype=data_type)
      embedded = embedding_ops.embedding_lookup(embedding,
                                                array_ops.reshape(inputs, [-1]))

      return self._cell(embedded, state) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:27,代碼來源:core_rnn_cell.py

示例11: inference_graph

# 需要導入模塊: from tensorflow.python.framework import ops [as 別名]
# 或者: from tensorflow.python.framework.ops import device [as 別名]
def inference_graph(self, data):
    with ops.device(self.device_assigner):
      # Compute activations for the neural network.
      nn_activations = [layers.fully_connected(data, self.params.layer_size)]

      for _ in range(1, self.params.num_layers):
        # pylint: disable=W0106
        nn_activations.append(
            layers.fully_connected(
                nn_activations[-1],
                self.params.layer_size))

      nn_activations_tensor = array_ops.concat(
          nn_activations, 1, name="flattened_nn_activations")

      return nn_activations_tensor 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:18,代碼來源:fully_connected.py

示例12: inference_graph

# 需要導入模塊: from tensorflow.python.framework import ops [as 別名]
# 或者: from tensorflow.python.framework.ops import device [as 別名]
def inference_graph(self, data):
    with ops.device(self.device_assigner):
      routing_probabilities = gen_training_ops.k_feature_routing_function(
          data,
          self.tree_parameters,
          self.tree_thresholds,
          max_nodes=self.params.num_nodes,
          num_features_per_node=self.params.num_features_per_node,
          layer_num=0,
          random_seed=self.params.base_random_seed)

      output = array_ops.slice(
          routing_probabilities,
          [0, self.params.num_nodes - self.params.num_leaves - 1],
          [-1, self.params.num_leaves])

      return output 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:19,代碼來源:decisions_to_data.py

示例13: add_remote_device

# 需要導入模塊: from tensorflow.python.framework import ops [as 別名]
# 或者: from tensorflow.python.framework.ops import device [as 別名]
def add_remote_device(self, remote_device):
    """Requests that fed values are sent to `remote_device`."""
    local_value = self.get_fed_tensors()

    self._num_remote_feeds += 1

    with ops.device(None):  # Bypass any existing device() calls
      with ops.device(remote_device):
        remote_q = data_flow_ops.FIFOQueue(capacity=self._capacity,
                                           dtypes=self._dtypes,
                                           shapes=self._shapes,
                                           name=self._shared_name,
                                           shared_name=self._shared_name)
        remote_enq_op = remote_q.enqueue(local_value)

    # Add a remote queue runner to feed the remote queue.
    self._add_remote_queue_runner(remote_q, [remote_enq_op]) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:19,代碼來源:feeder.py

示例14: _apply_all_reduce

# 需要導入模塊: from tensorflow.python.framework import ops [as 別名]
# 或者: from tensorflow.python.framework.ops import device [as 別名]
def _apply_all_reduce(reduction_op, tensors):
  if not tensors:
    raise ValueError('Must pass >0 tensors to all reduce operations')
  shared_name = _get_shared_name()
  res = []
  for t in tensors:
    if not device.canonical_name(t.device):
      raise ValueError('Device assignment required for nccl collective ops')
    with ops.device(t.device):
      res.append(
          gen_nccl_ops.nccl_all_reduce(
              t,
              reduction=reduction_op,
              num_devices=len(tensors),
              shared_name=shared_name))
  return res 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:18,代碼來源:nccl_ops.py

示例15: __init__

# 需要導入模塊: from tensorflow.python.framework import ops [as 別名]
# 或者: from tensorflow.python.framework.ops import device [as 別名]
def __init__(self,
               num_tasks=0,
               job_name='ps',
               device_type='CPU',
               device_index=0,
               replica=None):
    """Initialize VariableDeviceChooser.

    Usage:
      To use with 2 parameter servers:
        VariableDeviceChooser(2)

      To use without parameter servers:
        VariableDeviceChooser()
        VariableDeviceChooser(device_type='GPU') # For GPU placement

    Args:
      num_tasks: number of tasks.
      job_name: String, a name for the parameter server job.
      device_type: Optional device type string (e.g. "CPU" or "GPU")
      device_index: int.  Optional device index.  If left unspecified, device
        represents 'any' device_index.
    """
    self._job_name = job_name
    self._device_type = device_type
    self._device_index = device_index
    self._replica = replica
    self._num_tasks = num_tasks
    self._next_task_id = 0 
開發者ID:taehoonlee,項目名稱:tensornets,代碼行數:31,代碼來源:variables.py


注:本文中的tensorflow.python.framework.ops.device方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。