当前位置: 首页>>代码示例>>Python>>正文


Python utils.smart_cond方法代码示例

本文整理汇总了Python中tensorflow.contrib.layers.python.layers.utils.smart_cond方法的典型用法代码示例。如果您正苦于以下问题:Python utils.smart_cond方法的具体用法?Python utils.smart_cond怎么用?Python utils.smart_cond使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.contrib.layers.python.layers.utils的用法示例。


在下文中一共展示了utils.smart_cond方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_value

# 需要导入模块: from tensorflow.contrib.layers.python.layers import utils [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.utils import smart_cond [as 别名]
def test_value(self):
    fn1 = lambda: 'fn1'
    fn2 = lambda: 'fn2'
    expected = lambda v: 'fn1' if v else 'fn2'
    for v in [True, False, 1, 0]:
      o = utils.smart_cond(tf.constant(v), fn1, fn2)
      self.assertEqual(o, expected(v)) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:9,代码来源:utils_test.py

示例2: test_constant

# 需要导入模块: from tensorflow.contrib.layers.python.layers import utils [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.utils import smart_cond [as 别名]
def test_constant(self):
    fn1 = lambda: tf.constant('fn1')
    fn2 = lambda: tf.constant('fn2')
    expected = lambda v: b'fn1' if v else b'fn2'
    for v in [True, False, 1, 0]:
      o = utils.smart_cond(tf.constant(v), fn1, fn2)
      with self.test_session():
        self.assertEqual(o.eval(), expected(v)) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:10,代码来源:utils_test.py

示例3: test_variable

# 需要导入模块: from tensorflow.contrib.layers.python.layers import utils [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.utils import smart_cond [as 别名]
def test_variable(self):
    fn1 = lambda: tf.Variable('fn1')
    fn2 = lambda: tf.Variable('fn2')
    expected = lambda v: b'fn1' if v else b'fn2'
    for v in [True, False, 1, 0]:
      o = utils.smart_cond(tf.constant(v), fn1, fn2)
      with self.test_session() as sess:
        sess.run(tf.global_variables_initializer())
        self.assertEqual(o.eval(), expected(v)) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:11,代码来源:utils_test.py

示例4: test_tensors

# 需要导入模块: from tensorflow.contrib.layers.python.layers import utils [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.utils import smart_cond [as 别名]
def test_tensors(self):
    fn1 = lambda: tf.constant(0) - tf.constant(1)
    fn2 = lambda: tf.constant(0) - tf.constant(2)
    expected = lambda v: -1 if v else -2
    for v in [True, False, 1, 0]:
      o = utils.smart_cond(tf.constant(v), fn1, fn2)
      with self.test_session():
        self.assertEqual(o.eval(), expected(v)) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:10,代码来源:utils_test.py

示例5: dropout

# 需要导入模块: from tensorflow.contrib.layers.python.layers import utils [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.utils import smart_cond [as 别名]
def dropout(inputs,
            keep_prob=0.5,
            noise_shape=None,
            is_training=True,
            outputs_collections=None,
            scope=None):
  """Returns a dropout op applied to the input.

  With probability `keep_prob`, outputs the input element scaled up by
  `1 / keep_prob`, otherwise outputs `0`.  The scaling is so that the expected
  sum is unchanged.

  Args:
    inputs: the tensor to pass to the nn.dropout op.
    keep_prob: A scalar `Tensor` with the same type as x. The probability
      that each element is kept.
    noise_shape: A 1-D `Tensor` of type `int32`, representing the
      shape for randomly generated keep/drop flags.
    is_training: A bool `Tensor` indicating whether or not the model
      is in training mode. If so, dropout is applied and values scaled.
      Otherwise, inputs is returned.
    outputs_collections: collection to add the outputs.
    scope: Optional scope for name_scope.

  Returns:
    a tensor representing the output of the operation.
  """
  with ops.name_scope(scope, 'Dropout', [inputs]) as sc:
    inputs = ops.convert_to_tensor(inputs)
    dropout_fn = lambda: nn.dropout(inputs, keep_prob, noise_shape)
    id_fn = lambda: array_ops.identity(inputs)
    outputs = utils.smart_cond(is_training, dropout_fn, id_fn)
    return utils.collect_named_outputs(outputs_collections, sc, outputs) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:35,代码来源:layers.py

示例6: _build_update_ops_variance

# 需要导入模块: from tensorflow.contrib.layers.python.layers import utils [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.utils import smart_cond [as 别名]
def _build_update_ops_variance(self, mean, variance, is_training):
        def build_update_ops():
            update_mean_op = moving_averages.assign_moving_average(
              variable=self._moving_mean,
              value=mean,
              decay=self._decay_rate,
              name="update_moving_mean").op

            update_variance_op = moving_averages.assign_moving_average(
              variable=self._moving_variance,
              value=variance,
              decay=self._decay_rate,
              name="update_moving_variance").op

            return update_mean_op, update_variance_op

        def build_no_ops():
            return (tf.no_op(), tf.no_op())

            # Only make the ops if we know that `is_training=True`, or the
            # value of `is_training` is unknown.
        is_training_const = utils.constant_value(is_training)
        if is_training_const is None or is_training_const:
            update_mean_op, update_variance_op = utils.smart_cond(
                is_training,
                build_update_ops,
                build_no_ops,
            )

          # Every new connection creates a new op which adds its contribution
          # to the running average when ran.
        tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_mean_op)
        tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_variance_op) 
开发者ID:lightingghost,项目名称:chemopt,代码行数:35,代码来源:batch_norm.py

示例7: _build_update_ops_second_moment

# 需要导入模块: from tensorflow.contrib.layers.python.layers import utils [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.utils import smart_cond [as 别名]
def _build_update_ops_second_moment(self, mean, second_moment, is_training):
        def build_update_ops():
            update_mean_op = moving_averages.assign_moving_average(
              variable=self._moving_mean,
              value=mean,
              decay=self._decay_rate,
              name="update_moving_mean").op

            update_second_moment_op = moving_averages.assign_moving_average(
              variable=self._moving_second_moment,
              value=second_moment,
              decay=self._decay_rate,
              name="update_moving_second_moment").op

            return update_mean_op, update_second_moment_op

        def build_no_ops():
            return (tf.no_op(), tf.no_op())

        is_training_const = utils.constant_value(is_training)
        if is_training_const is None or is_training_const:
            update_mean_op, update_second_moment_op = utils.smart_cond(
                is_training,
                build_update_ops,
                build_no_ops,
                )

        tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_mean_op)
        tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_second_moment_op) 
开发者ID:lightingghost,项目名称:chemopt,代码行数:31,代码来源:batch_norm.py

示例8: _build_statistics_variance

# 需要导入模块: from tensorflow.contrib.layers.python.layers import utils [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.utils import smart_cond [as 别名]
def _build_statistics_variance(self, input_batch,
        reduction_indices, use_batch_stats):
        self._moving_mean = tf.get_variable(
            "moving_mean",
            shape=self._mean_shape,
            collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES,
                         tf.GraphKeys.VARIABLES],
            initializer=tf.zeros_initializer,
            trainable=False)

        self._moving_variance = tf.get_variable(
            "moving_variance",
            shape=self._mean_shape,
            collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES,
                         tf.GraphKeys.VARIABLES],
            initializer=tf.ones_initializer(),
            trainable=False)

        def build_batch_stats():
            """Builds the batch statistics calculation ops."""
            shift = tf.add(self._moving_mean, 0)
            counts, shifted_sum_x, shifted_sum_x2, _ = tf.nn.sufficient_statistics(
                input_batch,
                reduction_indices,
                keep_dims=True,
                shift=shift,
                name="batch_norm_ss")

            mean, variance = tf.nn.normalize_moments(counts,
                                               shifted_sum_x,
                                               shifted_sum_x2,
                                               shift,
                                               name="normalize_moments")

            return mean, variance

        def build_moving_stats():
            return (
                tf.identity(self._moving_mean),
                tf.identity(self._moving_variance),)

        mean, variance = utils.smart_cond(
            use_batch_stats,
            build_batch_stats,
            build_moving_stats,
        )

        return mean, variance 
开发者ID:lightingghost,项目名称:chemopt,代码行数:50,代码来源:batch_norm.py

示例9: _build_statistics_second_moment

# 需要导入模块: from tensorflow.contrib.layers.python.layers import utils [as 别名]
# 或者: from tensorflow.contrib.layers.python.layers.utils import smart_cond [as 别名]
def _build_statistics_second_moment(self, input_batch,
        reduction_indices, use_batch_stats):
        self._moving_mean = tf.get_variable(
            "moving_mean",
            shape=self._mean_shape,
            collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES,
                         tf.GraphKeys.VARIABLES],
            initializer=tf.zeros_initializer,
            trainable=False)

        self._moving_second_moment = tf.get_variable(
            "moving_second_moment",
            shape=self._mean_shape,
            collections=[tf.GraphKeys.MOVING_AVERAGE_VARIABLES,
                         tf.GraphKeys.VARIABLES],
            initializer=tf.ones_initializer(),
            trainable=False)

        self._moving_variance = tf.sub(self._moving_second_moment,
                                       tf.square(self._moving_mean),
                                       name="moving_variance")

        def build_batch_stats():
            shift = tf.add(self._moving_mean, 0)
            counts, shifted_sum_x, shifted_sum_x2, _ = tf.nn.sufficient_statistics(
                input_batch,
                reduction_indices,
                keep_dims=True,
                shift=shift,
                name="batch_norm_ss")

            mean, variance = tf.nn.normalize_moments(counts,
                                               shifted_sum_x,
                                               shifted_sum_x2,
                                               shift,
                                               name="normalize_moments")
            second_moment = variance + tf.square(mean)

            return mean, variance, second_moment

        def build_moving_stats():
            return (
                tf.identity(self._moving_mean),
                tf.identity(self._moving_variance),
                tf.identity(self._moving_second_moment),
            )

        mean, variance, second_moment = utils.smart_cond(
            use_batch_stats,
            build_batch_stats,
            build_moving_stats,
        )

        return mean, variance, second_moment 
开发者ID:lightingghost,项目名称:chemopt,代码行数:56,代码来源:batch_norm.py


注:本文中的tensorflow.contrib.layers.python.layers.utils.smart_cond方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。