當前位置: 首頁>>代碼示例>>Python>>正文


Python v1.TensorShape方法代碼示例

本文整理匯總了Python中tensorflow.compat.v1.TensorShape方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.TensorShape方法的具體用法?Python v1.TensorShape怎麽用?Python v1.TensorShape使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.TensorShape方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __call__

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import TensorShape [as 別名]
def __call__(self, getter, *args, **kwargs):
    size = tf.TensorShape(kwargs['shape']).num_elements()
    if size < self.small_variable_size_threshold:
      device_name = self.device_for_small_variables
    else:
      device_index, _ = min(enumerate(self.sizes), key=operator.itemgetter(1))
      device_name = self.devices[device_index]
      self.sizes[device_index] += size

    kwargs['caching_device'] = device_name
    var = getter(*args, **kwargs)
    return var


# To be used with custom_getter on tf.get_variable. Ensures the created variable
# is in LOCAL_VARIABLES and not GLOBAL_VARIBLES collection. 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:18,代碼來源:variable_mgr_util.py

示例2: sparse_random_indices

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import TensorShape [as 別名]
def sparse_random_indices(ratio, shape):
  """DOC."""
  assert 0 < ratio and ratio <= 1.0
  n = round_to_int(tf.TensorShape(shape).num_elements()*ratio)
  # There are two implementations. The first generates random indices
  # and wastes computation due to collisions, and the second wastes
  # memory.
  if ratio < 0.25:
    indices = {}
    if isinstance(shape, tf.TensorShape):
      shape = shape.as_list()
    while len(indices) < n:
      index = _random_index(shape)
      indices[index] = True
    return indices.keys()
  else:
    indices = _all_indices(shape)
    random.shuffle(indices)
    return indices[:n] 
開發者ID:deepmind,項目名稱:lamb,代碼行數:21,代碼來源:utils.py

示例3: testDatasetPacking

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import TensorShape [as 別名]
def testDatasetPacking(self):
    dataset = tf.data.Dataset.from_generator(
        example_generator,
        output_types={"inputs": tf.int64, "targets": tf.int64},
        output_shapes={"inputs": tf.TensorShape((None,)),
                       "targets": tf.TensorShape((None,))}
    )
    dataset = generator_utils.pack_dataset(
        dataset, length=5, keys=("inputs", "targets"), use_custom_ops=False)

    with tf.Session().as_default() as sess:
      batch = dataset.make_one_shot_iterator().get_next()
      for reference in reference_packing():
        example = sess.run(batch)
        self.assertAllEqual(set(example.keys()), set(reference.keys()))
        for k in reference:
          self.assertAllEqual(example[k], reference[k]) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:19,代碼來源:generator_utils_test.py

示例4: _transpose_batch_time

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import TensorShape [as 別名]
def _transpose_batch_time(x):
  """Transposes the batch and time dimensions of a Tensor.

  If the input tensor has rank < 2 it returns the original tensor. Retains as
  much of the static shape information as possible.

  Args:
    x: A Tensor.

  Returns:
    x transposed along the first two dimensions.
  """
  x_static_shape = x.get_shape()
  if x_static_shape.rank is not None and x_static_shape.rank < 2:
    return x

  x_rank = tf.rank(x)
  x_t = tf.transpose(
      x, tf.concat(([1, 0], tf.range(2, x_rank)), axis=0))
  x_t.set_shape(
      tf.TensorShape(
          [x_static_shape[1], x_static_shape[0]]).concatenate(
              x_static_shape[2:]))
  return x_t 
開發者ID:magenta,項目名稱:magenta,代碼行數:26,代碼來源:seq2seq.py

示例5: __init__

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import TensorShape [as 別名]
def __init__(self, initialize_fn, sample_fn, next_inputs_fn,
               sample_ids_shape=None, sample_ids_dtype=None):
    """Initializer.

    Args:
      initialize_fn: callable that returns `(finished, next_inputs)`
        for the first iteration.
      sample_fn: callable that takes `(time, outputs, state)`
        and emits tensor `sample_ids`.
      next_inputs_fn: callable that takes `(time, outputs, state, sample_ids)`
        and emits `(finished, next_inputs, next_state)`.
      sample_ids_shape: Either a list of integers, or a 1-D Tensor of type
        `int32`, the shape of each value in the `sample_ids` batch. Defaults to
        a scalar.
      sample_ids_dtype: The dtype of the `sample_ids` tensor. Defaults to int32.
    """
    self._initialize_fn = initialize_fn
    self._sample_fn = sample_fn
    self._next_inputs_fn = next_inputs_fn
    self._batch_size = None
    self._sample_ids_shape = tf.TensorShape(sample_ids_shape or [])
    self._sample_ids_dtype = sample_ids_dtype or tf.int32 
開發者ID:magenta,項目名稱:magenta,代碼行數:24,代碼來源:seq2seq.py

示例6: _rnn_output_size

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import TensorShape [as 別名]
def _rnn_output_size(self):
    """Compute output size of RNN."""
    size = self._cell.output_size
    if self._output_layer is None:
      return size
    else:
      # To use layer's compute_output_shape, we need to convert the
      # RNNCell's output_size entries into shapes with an unknown
      # batch size.  We then pass this through the layer's
      # compute_output_shape and read off all but the first (batch)
      # dimensions to get the output size of the rnn with the layer
      # applied to the top.
      output_shape_with_unknown_batch = tf.nest.map_structure(
          lambda s: tf.TensorShape([None]).concatenate(s), size)
      layer_output_shape = self._output_layer.compute_output_shape(
          output_shape_with_unknown_batch)
      return tf.nest.map_structure(lambda s: s[1:], layer_output_shape) 
開發者ID:magenta,項目名稱:magenta,代碼行數:19,代碼來源:seq2seq.py

示例7: build

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import TensorShape [as 別名]
def build(self, input_shape):
    channel_axis = self._channel_axis()
    input_shape = tf.TensorShape(input_shape)
    num_channels = input_shape.as_list()[channel_axis]
    if num_channels is None:
      raise ValueError("The channel dimension of the inputs to `GDN` "
                       "must be defined.")
    self._input_rank = input_shape.ndims
    self.input_spec = tf.keras.layers.InputSpec(
        ndim=input_shape.ndims, axes={channel_axis: num_channels})

    # Sorry, lint, but these objects really are callable ...
    # pylint:disable=not-callable
    self._beta = self.beta_parameterizer(
        name="beta", shape=[num_channels], dtype=self.dtype,
        getter=self.add_weight, initializer=tf.initializers.ones())

    self._gamma = self.gamma_parameterizer(
        name="gamma", shape=[num_channels, num_channels], dtype=self.dtype,
        getter=self.add_weight,
        initializer=tf.initializers.identity(gain=self._gamma_init))
    # pylint:enable=not-callable

    super().build(input_shape) 
開發者ID:tensorflow,項目名稱:compression,代碼行數:26,代碼來源:gdn.py

示例8: testShapeInference

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import TensorShape [as 別名]
def testShapeInference(self, multi_actions, normalise_entropy, gae_lambda):
    sequence_length = 4
    batch_size = 2
    self._setUp_a2c_loss(multi_actions, normalise_entropy, gae_lambda,
                         sequence_length=sequence_length, batch_size=batch_size)

    sequence_batch_shape = tf.TensorShape([sequence_length, batch_size])
    batch_shape = tf.TensorShape(batch_size)
    self.assertEqual(self._extra.discounted_returns.get_shape(),
                     sequence_batch_shape)
    self.assertEqual(self._extra.advantages.get_shape(), sequence_batch_shape)
    self.assertEqual(self._extra.policy_gradient_loss.get_shape(), batch_shape)
    self.assertEqual(self._extra.baseline_loss.get_shape(), batch_shape)
    self.assertEqual(self._extra.entropy.get_shape(), batch_shape)
    self.assertEqual(self._extra.entropy_loss.get_shape(), batch_shape)
    self.assertEqual(self._loss.get_shape(), batch_shape) 
開發者ID:deepmind,項目名稱:trfl,代碼行數:18,代碼來源:policy_gradient_ops_test.py

示例9: testEntropy

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import TensorShape [as 別名]
def testEntropy(self, is_multi_actions):
    with self.test_session() as sess:
      # Large values check numerical stability through the logs
      policy_logits_np = np.array([[0, 1], [1, 2], [0, 2], [1, 1], [0, -1000],
                                   [0, 1000]])
      if is_multi_actions:
        num_action_components = 3
        policy_logits_nest = [tf.constant(policy_logits_np, dtype=tf.float32)
                              for _ in xrange(num_action_components)]
      else:
        num_action_components = 1
        policy_logits_nest = tf.constant(policy_logits_np, dtype=tf.float32)

      entropy_op = pg_ops.discrete_policy_entropy_loss(policy_logits_nest)
      entropy = entropy_op.extra.entropy
      self.assertEqual(entropy.get_shape(), tf.TensorShape(6))
      # Get these reference values in Torch with:
      #   c = nnd.EntropyCriterion()
      #   s = nn.LogSoftMax()
      #   result = c:forward(s:forward(logits))
      expected_entropy = num_action_components * np.array(
          [0.58220309, 0.58220309, 0.36533386, 0.69314718, 0, 0])
      self.assertAllClose(sess.run(entropy),
                          expected_entropy,
                          atol=1e-4) 
開發者ID:deepmind,項目名稱:trfl,代碼行數:27,代碼來源:discrete_policy_gradient_ops_test.py

示例10: testPolicyGradients

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import TensorShape [as 別名]
def testPolicyGradients(self, is_multi_actions):
    if is_multi_actions:
      loss = self.multi_op.extra.policy_gradient_loss
      policy_logits_nest = self.multi_policy_logits
    else:
      loss = self.op.extra.policy_gradient_loss
      policy_logits_nest = self.policy_logits

    grad_policy_list = [
        tf.gradients(loss, policy_logits)[0] * self.num_actions
        for policy_logits in nest.flatten(policy_logits_nest)]

    for grad_policy in grad_policy_list:
      self.assertEqual(grad_policy.get_shape(), tf.TensorShape([2, 1, 3]))

    self.assertAllEqual(tf.gradients(loss, self.baseline_values), [None])
    self.assertAllEqual(tf.gradients(loss, self.invalid_grad_inputs),
                        self.invalid_grad_outputs) 
開發者ID:deepmind,項目名稱:trfl,代碼行數:20,代碼來源:discrete_policy_gradient_ops_test.py

示例11: testEntropyGradients

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import TensorShape [as 別名]
def testEntropyGradients(self, is_multi_actions):
    if is_multi_actions:
      loss = self.multi_op.extra.entropy_loss
      policy_logits_nest = self.multi_policy_logits
    else:
      loss = self.op.extra.entropy_loss
      policy_logits_nest = self.policy_logits

    grad_policy_list = [
        tf.gradients(loss, policy_logits)[0] * self.num_actions
        for policy_logits in nest.flatten(policy_logits_nest)]

    for grad_policy in grad_policy_list:
      self.assertEqual(grad_policy.get_shape(), tf.TensorShape([2, 1, 3]))

    self.assertAllEqual(tf.gradients(loss, self.baseline_values), [None])
    self.assertAllEqual(tf.gradients(loss, self.invalid_grad_inputs),
                        self.invalid_grad_outputs) 
開發者ID:deepmind,項目名稱:trfl,代碼行數:20,代碼來源:discrete_policy_gradient_ops_test.py

示例12: test_tensor_array_write_read

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import TensorShape [as 別名]
def test_tensor_array_write_read():
    def run(dtype_str, infer_shape, element_shape):
        with tf.Graph().as_default():
            dtype = tf_dtypes[dtype_str]
            np_data = np.array([[1.0, 2.0], [3.0, 4.0]]).astype(dtype_str)
            in_data = [np_data, np_data]
            t1 = tf.constant(np_data, dtype=dtype)
            t2 = tf.constant(np_data, dtype=dtype)
            ta1 = tf.TensorArray(dtype=dtype, size=2, infer_shape=infer_shape,
                                 element_shape=element_shape)
            ta2 = ta1.write(0, t1)
            ta3 = ta2.write(1, t2)
            out = ta3.read(0)
            g = tf.get_default_graph()
            compare_tf_with_tvm([], [], 'TensorArrayReadV3:0', mode='vm')

    for dtype in ["float32", "int8"]:
        run(dtype, False, None)
        run(dtype, False, tf.TensorShape([None, 2]))
        run(dtype, True, None) 
開發者ID:apache,項目名稱:incubator-tvm,代碼行數:22,代碼來源:test_forward.py

示例13: test_tensor_array_scatter

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import TensorShape [as 別名]
def test_tensor_array_scatter():
    def run(dtype_str, infer_shape):
        with tf.Graph().as_default():
            dtype =  tf_dtypes[dtype_str]
            if infer_shape:
                element_shape = tf.TensorShape([tf.Dimension(None)])
            else:
                element_shape = None
            t = tf.constant(np.array([[1.0], [2.0], [3.0]]).astype(dtype_str), dtype=dtype)
            indices = tf.constant([2, 1, 0])
            ta1 = tf.TensorArray(dtype=dtype, size=3,
                                 infer_shape=infer_shape,
                                 element_shape=element_shape)
            ta2 = ta1.scatter(indices, t)
            out0 = ta2.read(0)
            out1 = ta2.read(1)
            out2 = ta2.read(2)
            g = tf.get_default_graph()
            compare_tf_with_tvm([], [], ['TensorArrayReadV3:0'], mode='vm')
            compare_tf_with_tvm([], [], ['TensorArrayReadV3_1:0'], mode='vm')
            compare_tf_with_tvm([], [], ['TensorArrayReadV3_2:0'], mode='vm')
    for dtype in ["float32", "int8"]:
        run(dtype, False)
        run(dtype, True) 
開發者ID:apache,項目名稱:incubator-tvm,代碼行數:26,代碼來源:test_forward.py

示例14: testPackRange

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import TensorShape [as 別名]
def testPackRange(self):
    packing = {}
    t0 = tf.constant([0, 1, 2, 3], dtype=tf.float32)
    t1 = tf.constant([4, 5, 6, 7], dtype=tf.float32)

    gv = [(t0, 'v0'), (t1, 'v1')]
    new_t = allreduce.pack_range('0:0', packing, gv, [0, 1])
    self.assertEqual(1, new_t.shape.ndims)
    self.assertEqual(8, new_t.shape.dims[0])
    self.assertEqual(
        packing, {
            '0:0':
                allreduce.GradPackTuple(
                    indices=range(2),
                    vars=['v0', 'v1'],
                    shapes=[tf.TensorShape([4]),
                            tf.TensorShape([4])])
        })

    t2 = tf.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8]], dtype=tf.float32)
    t3 = tf.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8]], dtype=tf.float32)
    gv = [(t0, 'v0'), (t1, 'v1'), (t2, 'v2'), (t3, 'v3')]
    packing = {}
    new_t = allreduce.pack_range('1:0', packing, gv, [0, 3])
    self.assertEqual(1, new_t.shape.ndims)
    self.assertEqual(26, new_t.shape.dims[0])
    self.assertEqual(
        packing, {
            '1:0':
                allreduce.GradPackTuple(
                    indices=range(4),
                    vars=['v0', 'v1', 'v2', 'v3'],
                    shapes=[
                        tf.TensorShape([4]),
                        tf.TensorShape([4]),
                        tf.TensorShape([3, 3]),
                        tf.TensorShape([3, 3])
                    ])
        }) 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:41,代碼來源:allreduce_test.py

示例15: testUnpackGradTuple

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import TensorShape [as 別名]
def testUnpackGradTuple(self):
    packing = {
        '0:0':
            allreduce.GradPackTuple(
                indices=range(4),
                vars=['v0', 'v1', 'v2', 'v3'],
                shapes=[
                    tf.TensorShape([4]),
                    tf.TensorShape([4]),
                    tf.TensorShape([3, 3]),
                    tf.TensorShape([3, 3])
                ])
    }
    tc = tf.constant([0, 1, 2, 3, 4, 5, 6, 7,
                      0, 1, 2, 3, 4, 5, 6, 7, 8,
                      0, 1, 2, 3, 4, 5, 6, 7, 8], dtype=tf.float32)
    packed_gv = [tc, 'packing_var_placeholder']
    gv = allreduce.unpack_grad_tuple(packed_gv, packing['0:0'])
    self.assertLen(gv, 4)
    self.assertEqual('v0', gv[0][1])
    self.assertEqual('v1', gv[1][1])
    self.assertEqual('v2', gv[2][1])
    self.assertEqual('v3', gv[3][1])
    self.assertEqual(1, gv[0][0].shape.ndims)
    self.assertEqual(4, gv[0][0].shape.dims[0])
    self.assertEqual(1, gv[1][0].shape.ndims)
    self.assertEqual(4, gv[1][0].shape.dims[0])
    self.assertEqual(2, gv[2][0].shape.ndims)
    self.assertEqual(3, gv[2][0].shape.dims[0])
    self.assertEqual(3, gv[2][0].shape.dims[1]) 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:32,代碼來源:allreduce_test.py


注:本文中的tensorflow.compat.v1.TensorShape方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。