當前位置: 首頁>>代碼示例>>Python>>正文


Python tensorflow.while方法代碼示例

本文整理匯總了Python中tensorflow.while方法的典型用法代碼示例。如果您正苦於以下問題:Python tensorflow.while方法的具體用法?Python tensorflow.while怎麽用?Python tensorflow.while使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.while方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: testUseWithinWhileLoop

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import while [as 別名]
def testUseWithinWhileLoop(self):
    with tf.Graph().as_default():
      spec = hub.create_module_spec(double_module_fn)
      m = hub.Module(spec)
      i = tf.constant(0)
      x = tf.constant(10.0)
      p = tf_v1.placeholder(dtype=tf.int32)
      c = lambda i, x: tf.less(i, p)
      b = lambda i, x: (tf.add(i, 1), m(x))
      oi, ox = tf.while_loop(c, b, [i, x])  # ox = v**p * x
      v = m.variables[0]
      dodv = tf.gradients(ox, v)[0]  # d ox / dv = p*v**(p-1) * x
      dodx = tf.gradients(ox, x)[0]  # d ox / dx = v**p
      with tf_v1.Session() as sess:
        sess.run(tf_v1.global_variables_initializer())
        self.assertAllEqual(sess.run([oi, ox], feed_dict={p: 1}), [1, 20])
        self.assertAllEqual(sess.run([oi, ox], feed_dict={p: 2}), [2, 40])
        self.assertAllEqual(sess.run([oi, ox], feed_dict={p: 4}), [4, 160])
        # Gradients also use the control flow structures setup earlier.
        # Also check they are working properly.
        self.assertAllEqual(sess.run([dodv, dodx], feed_dict={p: 1}), [10, 2])
        self.assertAllEqual(sess.run([dodv, dodx], feed_dict={p: 2}), [40, 4])
        self.assertAllEqual(sess.run([dodv, dodx], feed_dict={p: 4}), [320, 16])

  # tf.map_fn() is merely a wrapper around tf.while(), but just to be sure... 
開發者ID:tensorflow,項目名稱:hub,代碼行數:27,代碼來源:native_module_test.py

示例2: __init__

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import while [as 別名]
def __init__(self, input_type=None, output_type=None, name_or_scope=None):
    """Creates the layer.

    Args:
      input_type: A type.
      output_type: A type.
      name_or_scope: A string or variable scope. If a string, a new variable
        scope will be created by calling
        [`create_variable_scope`](#create_variable_scope), with defaults
        inherited from the current variable scope. If no caching device is set,
        it will be set to `lambda op: op.device`. This is because `tf.while` can
        be very inefficient if the variables it uses are not cached locally.
    """
    if name_or_scope is None: name_or_scope = type(self).__name__
    if isinstance(name_or_scope, tf.VariableScope):
      self._vscope = name_or_scope
      name = str(self._vscope.name)
    elif isinstance(name_or_scope, six.string_types):
      self._vscope = create_variable_scope(name_or_scope)
      name = name_or_scope
    else:
      raise TypeError('name_or_scope must be a tf.VariableScope or a string: '
                      '%s' % (name_or_scope,))
    if self._vscope.caching_device is None:
      self._vscope.set_caching_device(lambda op: op.device)
    super(Layer, self).__init__(input_type, output_type, name)

    if not hasattr(self, '_constructor_name'):
      self._constructor_name = '__.%s' % self.__class__.__name__
    if not hasattr(self, '_constructor_args'):
      self._constructor_args = None
    if not hasattr(self, '_constructor_kwargs'):
      self._constructor_kwargs = None 
開發者ID:tensorflow,項目名稱:fold,代碼行數:35,代碼來源:layers.py

示例3: forward

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import while [as 別名]
def forward(self, image, reference):
		'''Evaluates distances between images in 'image' and 'reference' (data in NHWC order).
		   Returns an N-element distance vector.
		   
		   If 'image' is a tuple, evaluates all the images in the tuple with the same input transformations
		   and dropout as 'reference'. A different set of input transformations for each would result in
		   unnecessary uncertainty in determining which of the images is closest to the reference. The
		   returned value is a tuple of N-element distance vectors.'''
		  
		if isinstance(image, list):
			raise Exception('Parameter \'image\' must be a tensor or a tuple of tensors.')
		
		image_in = as_tuple(image)
		
		def cond(i, loss_sum):
			return tf.less(i, tf.cast(self.config.average_over, tf.int32))
		
		def body(i, loss_sum):
			ensemble = self.sample_ensemble(self.config)
			
			ensemble_X = for_each(image_in, lambda X: apply_ensemble(self.config, ensemble, X))
			ensemble_X = for_each(ensemble_X, lambda X: 2.0 * X - 1.0)

			ensemble_R = apply_ensemble(self.config, ensemble, reference)			
			ensemble_R = 2.0 * ensemble_R - 1.0
			
			loss = self.network.forward(ensemble_X, ensemble_R)
			loss_sum += tf.stack(loss, axis=0)
			
			loss_sum.set_shape([len(image_in), self.config.batch_size])
			
			return i+1, loss_sum

		if isinstance(self.config.average_over, numbers.Number) and self.config.average_over == 1:
			# Skip tf.while for trivial single iterations.
			_, loss_sum = body(0, tf.zeros([len(image_in), self.config.batch_size], dtype=self.config.dtype))
		else:
			# Run multiple times for any other average_over count.
			_, loss_sum = tf.while_loop(cond, body, (0, tf.zeros([len(image_in), self.config.batch_size], dtype=self.config.dtype)), back_prop=self.back_prop)
			loss_sum /= tf.cast(self.config.average_over, self.config.dtype)

		
		if isinstance(image, tuple):
			return tuple((loss_sum[i, :] for i in range(len(image))))
		else:
			return tf.reshape(loss_sum, [self.config.batch_size]) 
開發者ID:mkettune,項目名稱:elpips,代碼行數:48,代碼來源:elpips.py


注:本文中的tensorflow.while方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。