當前位置: 首頁>>代碼示例>>Python>>正文


Python framework.ops方法代碼示例

本文整理匯總了Python中tensorflow.python.framework.ops方法的典型用法代碼示例。如果您正苦於以下問題:Python framework.ops方法的具體用法?Python framework.ops怎麽用?Python framework.ops使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.python.framework的用法示例。


在下文中一共展示了framework.ops方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: meta_minimize

# 需要導入模塊: from tensorflow.python import framework [as 別名]
# 或者: from tensorflow.python.framework import ops [as 別名]
def meta_minimize(self, make_loss, len_unroll, learning_rate=0.01, **kwargs):
    """Returns an operator minimizing the meta-loss.

    Args:
      make_loss: Callable which returns the optimizee loss; note that this
          should create its ops in the default graph.
      len_unroll: Number of steps to unroll.
      learning_rate: Learning rate for the Adam optimizer.
      **kwargs: keyword arguments forwarded to meta_loss.

    Returns:
      namedtuple containing (step, update, reset, fx, x)
    """
    info = self.meta_loss(make_loss, len_unroll, **kwargs)
    optimizer = tf.train.AdamOptimizer(learning_rate)
    step = optimizer.minimize(info.loss)
    return MetaStep(step, *info[1:]) 
開發者ID:deepmind,項目名稱:learning-to-learn,代碼行數:19,代碼來源:meta.py

示例2: _add_variable_collection

# 需要導入模塊: from tensorflow.python import framework [as 別名]
# 或者: from tensorflow.python.framework import ops [as 別名]
def _add_variable_collection(weight_collections):
  if weight_collections:
    weight_collections = list(
        set(list(weight_collections) + [ops.GraphKeys.GLOBAL_VARIABLES]))
  return weight_collections


# TODO(jamieas): remove the following logic once all FeatureColumn types are
# supported for sequences.
# pylint: disable=protected-access 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:12,代碼來源:feature_column_ops.py

示例3: forward_train

# 需要導入模塊: from tensorflow.python import framework [as 別名]
# 或者: from tensorflow.python.framework import ops [as 別名]
def forward_train(self, train_input):

        batch_norm_params = {'epsilon': 1e-5,
                             'scale': True,
                             'is_training': True,
                             'updates_collections': ops.GraphKeys.UPDATE_OPS}

        with slim.arg_scope([layers.batch_norm], **batch_norm_params):
            with slim.arg_scope([slim.conv2d],
                                weights_initializer=he_normal_fanout(),
                                weights_regularizer=slim.l2_regularizer(self.cfg['NET']['weight_l2_scale'])):
                final_logit = self._forward(train_input)

        return final_logit 
開發者ID:yuanyuanli85,項目名稱:tf-hrnet,代碼行數:16,代碼來源:model.py

示例4: forward_eval

# 需要導入模塊: from tensorflow.python import framework [as 別名]
# 或者: from tensorflow.python.framework import ops [as 別名]
def forward_eval(self, eval_input):

        batch_norm_params = {'epsilon': 1e-5,
                             'scale': True,
                             'is_training': False,
                             'updates_collections': ops.GraphKeys.UPDATE_OPS}

        with slim.arg_scope([layers.batch_norm], **batch_norm_params):
            with slim.arg_scope([slim.conv2d],
                                weights_regularizer=slim.l2_regularizer(self.cfg['NET']['weight_l2_scale'])):
                final_logit = self._forward(eval_input)

        return final_logit 
開發者ID:yuanyuanli85,項目名稱:tf-hrnet,代碼行數:15,代碼來源:model.py

示例5: model_summary

# 需要導入模塊: from tensorflow.python import framework [as 別名]
# 或者: from tensorflow.python.framework import ops [as 別名]
def model_summary(self):

        cnt = Counter()
        ops = ['ResizeNearestNeighbor', 'Relu', 'Conv2D']

        for op in tf.get_default_graph().get_operations():
            if op.type in ops:
                cnt[op.type] += 1

        print(cnt) 
開發者ID:yuanyuanli85,項目名稱:tf-hrnet,代碼行數:12,代碼來源:model.py

示例6: _reduced_kernel_size_for_small_input

# 需要導入模塊: from tensorflow.python import framework [as 別名]
# 或者: from tensorflow.python.framework import ops [as 別名]
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
  """Define kernel size which is automatically reduced for small input.

  If the shape of the input images is unknown at graph construction time this
  function assumes that the input images are is large enough.

  Args:
    input_tensor: input tensor of size [batch_size, height, width, channels].
    kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]

  Returns:
    a tensor with the kernel size.

  TODO(jrru): Make this function work with unknown shapes. Theoretically, this
  can be done with the code below. Problems are two-fold: (1) If the shape was
  known, it will be lost. (2) inception.slim.ops._two_element_tuple cannot
  handle tensors that define the kernel size.
      shape = tf.shape(input_tensor)
      return = tf.stack([tf.minimum(shape[1], kernel_size[0]),
                        tf.minimum(shape[2], kernel_size[1])])

  """
  shape = input_tensor.get_shape().as_list()
  if shape[1] is None or shape[2] is None:
    kernel_size_out = kernel_size
  else:
    kernel_size_out = [
        min(shape[1], kernel_size[0]), min(shape[2], kernel_size[1])
    ]
  return kernel_size_out 
開發者ID:mlperf,項目名稱:training_results_v0.5,代碼行數:32,代碼來源:inception_v2_tpu_model.py

示例7: _reduced_kernel_size_for_small_input

# 需要導入模塊: from tensorflow.python import framework [as 別名]
# 或者: from tensorflow.python.framework import ops [as 別名]
def _reduced_kernel_size_for_small_input(input_tensor, kernel_size):
  """Define kernel size which is automatically reduced for small input.

  If the shape of the input images is unknown at graph construction time this
  function assumes that the input images are is large enough.

  Args:
    input_tensor: input tensor of size [batch_size, height, width, channels].
    kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]

  Returns:
    a tensor with the kernel size.

  Make this function work with unknown shapes. Theoretically, this
  can be done with the code below. Problems are two-fold: (1) If the shape was
  known, it will be lost. (2) inception.tf.contrib.slim.ops._two_element_tuple
  cannot
  handle tensors that define the kernel size.
      shape = tf.shape(input_tensor)
      return = tf.stack([tf.minimum(shape[1], kernel_size[0]),
                        tf.minimum(shape[2], kernel_size[1])])

  """
  shape = input_tensor.get_shape().as_list()
  if shape[1] is None or shape[2] is None:
    kernel_size_out = kernel_size
  else:
    kernel_size_out = [
        min(shape[1], kernel_size[0]), min(shape[2], kernel_size[1])
    ]
  return kernel_size_out 
開發者ID:google-research,項目名稱:tf-slim,代碼行數:33,代碼來源:inception_v3.py

示例8: _input_from_feature_columns

# 需要導入模塊: from tensorflow.python import framework [as 別名]
# 或者: from tensorflow.python.framework import ops [as 別名]
def _input_from_feature_columns(columns_to_tensors,
                                feature_columns,
                                weight_collections,
                                trainable,
                                scope,
                                output_rank,
                                default_name):
  """Implementation of `input_from(_sequence)_feature_columns`."""
  columns_to_tensors = columns_to_tensors.copy()
  check_feature_columns(feature_columns)
  with variable_scope.variable_scope(scope,
                                     default_name=default_name,
                                     values=columns_to_tensors.values()):
    output_tensors = []
    transformer = _Transformer(columns_to_tensors)
    if weight_collections:
      weight_collections = list(set(list(weight_collections) +
                                    [ops.GraphKeys.GLOBAL_VARIABLES]))

    for column in sorted(set(feature_columns), key=lambda x: x.key):
      with variable_scope.variable_scope(None,
                                         default_name=column.name,
                                         values=columns_to_tensors.values()):
        transformed_tensor = transformer.transform(column)
        if output_rank == 3:
          transformed_tensor = nest.map_structure(
              functools.partial(
                  _maybe_reshape_input_tensor,
                  column_name=column.name,
                  output_rank=output_rank), transformed_tensor)
        try:
          # pylint: disable=protected-access
          arguments = column._deep_embedding_lookup_arguments(
              transformed_tensor)
          output_tensors.append(
              fc._embeddings_from_arguments(  # pylint: disable=protected-access
                  column,
                  arguments,
                  weight_collections,
                  trainable,
                  output_rank=output_rank))

        except NotImplementedError as ee:
          try:
            # pylint: disable=protected-access
            output_tensors.append(column._to_dnn_input_layer(
                transformed_tensor,
                weight_collections,
                trainable,
                output_rank=output_rank))
          except ValueError as e:
            raise ValueError('Error creating input layer for column: {}.\n'
                             '{}, {}'.format(column.name, e, ee))
    return array_ops.concat(output_tensors, output_rank - 1) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:56,代碼來源:feature_column_ops.py

示例9: inception_v2_arg_scope

# 需要導入模塊: from tensorflow.python import framework [as 別名]
# 或者: from tensorflow.python.framework import ops [as 別名]
def inception_v2_arg_scope(weight_decay=0.00004,
                           batch_norm_var_collection='moving_vars',
                           batch_norm_decay=0.9997,
                           batch_norm_epsilon=0.001,
                           updates_collections=ops.GraphKeys.UPDATE_OPS,
                           use_fused_batchnorm=True):
  """Defines the default InceptionV2 arg scope.

  Args:
    weight_decay: The weight decay to use for regularizing the model.
    batch_norm_var_collection: The name of the collection for the batch norm
      variables.
    batch_norm_decay: Decay for batch norm moving average
    batch_norm_epsilon: Small float added to variance to avoid division by zero
    updates_collections: Collections for the update ops of the layer
    use_fused_batchnorm: Enable fused batchnorm.

  Returns:
    An `arg_scope` to use for the inception v3 model.
  """
  batch_norm_params = {
      # Decay for the moving averages.
      'decay': batch_norm_decay,
      # epsilon to prevent 0s in variance.
      'epsilon': batch_norm_epsilon,
      # collection containing update_ops.
      'updates_collections': updates_collections,
      # Enable fused batchnorm.
      'fused': use_fused_batchnorm,
      # collection containing the moving mean and moving variance.
      'variables_collections': {
          'beta': None,
          'gamma': None,
          'moving_mean': [batch_norm_var_collection],
          'moving_variance': [batch_norm_var_collection],
      }
  }

  # Set weight_decay for weights in Conv and FC layers.
  with arg_scope(
      [layers.conv2d, layers_lib.fully_connected],
      weights_regularizer=regularizers.l2_regularizer(weight_decay)):
    with arg_scope(
        [layers.conv2d],
        weights_initializer=initializers.variance_scaling_initializer(),
        activation_fn=nn_ops.relu,
        normalizer_fn=layers_lib.batch_norm,
        normalizer_params=batch_norm_params) as sc:
      return sc 
開發者ID:mlperf,項目名稱:training_results_v0.5,代碼行數:51,代碼來源:inception_v2_tpu_model.py

示例10: inception_v4_arg_scope

# 需要導入模塊: from tensorflow.python import framework [as 別名]
# 或者: from tensorflow.python.framework import ops [as 別名]
def inception_v4_arg_scope(weight_decay=0.00004,
                           batch_norm_var_collection='moving_vars',
                           batch_norm_decay=0.9997,
                           batch_norm_epsilon=0.001,
                           updates_collections=ops.GraphKeys.UPDATE_OPS,
                           use_fused_batchnorm=True,
                           activation_fn=nn_ops.relu):
  """Defines the default InceptionV3 arg scope.

  Args:
    weight_decay: The weight decay to use for regularizing the model.
    batch_norm_var_collection: The name of the collection for the batch norm
      variables.
    batch_norm_decay: Decay for batch norm moving average
    batch_norm_epsilon: Small float added to variance to avoid division by zero
    updates_collections: Collections for the update ops of the layer
    use_fused_batchnorm: Enable fused batchnorm.
    activation_fn: Activation function for conv2d.

  Returns:
    An `arg_scope` to use for the inception v3 model.
  """
  batch_norm_params = {
      # Decay for the moving averages.
      'decay': batch_norm_decay,
      # epsilon to prevent 0s in variance.
      'epsilon': batch_norm_epsilon,
      # collection containing update_ops.
      'updates_collections': updates_collections,
      # Use fused batch norm if possible.
      'fused': use_fused_batchnorm,
      # collection containing the moving mean and moving variance.
      'variables_collections': {
          'beta': None,
          'gamma': None,
          'moving_mean': [batch_norm_var_collection],
          'moving_variance': [batch_norm_var_collection],
      }
  }

  normalizer_fn = slim.batch_norm
  normalizer_params = batch_norm_params
  # Set weight_decay for weights in Conv and FC layers.
  with slim.arg_scope(
      [slim.conv2d, slim.fully_connected],
      weights_regularizer=slim.l2_regularizer(weight_decay)):
    with slim.arg_scope(
        [slim.conv2d],
        weights_initializer=slim.variance_scaling_initializer(),
        activation_fn=activation_fn,
        normalizer_fn=normalizer_fn,
        normalizer_params=normalizer_params) as sc:
      return sc 
開發者ID:mlperf,項目名稱:training_results_v0.5,代碼行數:55,代碼來源:inception_v4_model.py

示例11: inception_v3_arg_scope

# 需要導入模塊: from tensorflow.python import framework [as 別名]
# 或者: from tensorflow.python.framework import ops [as 別名]
def inception_v3_arg_scope(weight_decay=0.00004,
                           batch_norm_var_collection='moving_vars',
                           batch_norm_decay=0.9997,
                           batch_norm_epsilon=0.001,
                           updates_collections=ops.GraphKeys.UPDATE_OPS,
                           use_fused_batchnorm=True):
  """Defines the default InceptionV3 arg scope.

  Args:
    weight_decay: The weight decay to use for regularizing the model.
    batch_norm_var_collection: The name of the collection for the batch norm
      variables.
    batch_norm_decay: Decay for batch norm moving average
    batch_norm_epsilon: Small float added to variance to avoid division by zero
    updates_collections: Collections for the update ops of the layer
    use_fused_batchnorm: Enable fused batchnorm.

  Returns:
    An `arg_scope` to use for the inception v3 model.
  """
  batch_norm_params = {
      # Decay for the moving averages.
      'decay': batch_norm_decay,
      # epsilon to prevent 0s in variance.
      'epsilon': batch_norm_epsilon,
      # collection containing update_ops.
      'updates_collections': updates_collections,
      # Use fused batch norm if possible.
      'fused': use_fused_batchnorm,
      # collection containing the moving mean and moving variance.
      'variables_collections': {
          'beta': None,
          'gamma': None,
          'moving_mean': [batch_norm_var_collection],
          'moving_variance': [batch_norm_var_collection],
      }
  }

  # Set weight_decay for weights in Conv and FC layers.
  with arg_scope(
      [layers.conv2d, layers_lib.fully_connected],
      weights_regularizer=regularizers.l2_regularizer(weight_decay)):
    with arg_scope(
        [layers.conv2d],
        weights_initializer=initializers.variance_scaling_initializer(),
        activation_fn=nn_ops.relu,
        normalizer_fn=layers_lib.batch_norm,
        normalizer_params=batch_norm_params) as sc:
      return sc 
開發者ID:google-research,項目名稱:tf-slim,代碼行數:51,代碼來源:inception_v3.py

示例12: custom_op

# 需要導入模塊: from tensorflow.python import framework [as 別名]
# 或者: from tensorflow.python.framework import ops [as 別名]
def custom_op(op: Union[CustomOp, CompilableOp, TFCompiledOp], stateful=True, name=None,
              use_autodiff=False, compile_only=False, return_handle=False):
    """
        Registers a custom Tensorflow operator from `CustomOp`, 
        `CompilableOp`, or `TFCompiledOp` objects.
        @param op The custom operator. If numpy is not used, automatic 
                    differentiation via Tensorflow applies.
        @param stateful True if the operation is not a pure function (enables
                        sub-expression elimination optimizations if False).
        @param name Specify a custom name for this operation.
        @param use_autodiff If true, uses tensorflow tensors, otherwise 
                            assumes numpy arrays.
        @param compile_only If true, returns a TFCompiledOp instead of an instantiated op
        @param return_handle (for C++ ops) If true, also returns a direct handle
                             to the operator object and library as a 3-tuple:
                             (operator, library, handle).
        @return A tf.Operation object (or a function) that calls the custom operator.
    """
    if isinstance(op, CompilableOp):
        result = _custom_cpp_op(op, stateful, name)
        if compile_only:
            return result
        else:
            op = result
    if isinstance(op, TFCompiledOp):
        result = _create_op_handle(op)
        if return_handle:
            return result
        else:
            return result[0]
    elif isinstance(op, CustomOp):
        if use_autodiff == True:
            return op.forward

        def _fwd(*inputs):
            return op.forward(*inputs)
        def _bwd(tfop, *grads):
            def _actual_bwd(*args):
                return op.backward(args[:len(grads)], 
                                     args[len(grads):(len(grads)+len(tfop.inputs))], 
                                     args[(len(grads)+len(tfop.inputs)):])
            return tf.py_func(_actual_bwd, 
                              (list(grads) + list(tfop.inputs) + list(tfop.outputs)), 
                              [inp.dtype for inp in op.input_descriptors], 
                              stateful=stateful)

        # Gradient replacement adapted from https://gist.github.com/harpone/3453185b41d8d985356cbe5e57d67342

        # Generate a unique name to avoid duplicates
        rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))
        tf.RegisterGradient(rnd_name)(_bwd)

        def result(*inputs):
            g = tf.get_default_graph()
            with g.gradient_override_map({"PyFunc": rnd_name, "PyFuncStateless": rnd_name}):
                return tf.py_func(_fwd, inputs, 
                                  [out.dtype for out in op.output_descriptors],
                                  stateful=stateful, name=name)
        return result 
開發者ID:deep500,項目名稱:deep500,代碼行數:61,代碼來源:tf.py

示例13: batch_norm_new

# 需要導入模塊: from tensorflow.python import framework [as 別名]
# 或者: from tensorflow.python.framework import ops [as 別名]
def batch_norm_new(name, input_var, is_train, decay=0.999, epsilon=1e-5):
    """Batch normalization modified from BatchNormLayer in Tensorlayer.
    Source: <https://github.com/zsdonghao/tensorlayer/blob/master/tensorlayer/layers.py#L2190>
    """

    inputs_shape = input_var.get_shape()
    axis = list(range(len(inputs_shape) - 1))
    params_shape = inputs_shape[-1:]

    with tf.variable_scope(name) as scope:
        # Trainable beta and gamma variables
        beta = tf.get_variable('beta',
                                shape=params_shape,
                                initializer=tf.zeros_initializer)
        gamma = tf.get_variable('gamma',
                                shape=params_shape,
                                initializer=tf.random_normal_initializer(mean=1.0, stddev=0.002))
        
        # Moving mean and variance updated during training
        moving_mean = tf.get_variable('moving_mean',
                                      params_shape,
                                      initializer=tf.zeros_initializer,
                                      trainable=False)
        moving_variance = tf.get_variable('moving_variance',
                                          params_shape,
                                          initializer=tf.constant_initializer(1.),
                                          trainable=False)
        
        # Compute mean and variance along axis
        batch_mean, batch_variance = tf.nn.moments(input_var, axis, name='moments')

        # Define ops to update moving_mean and moving_variance
        update_moving_mean = moving_averages.assign_moving_average(moving_mean, batch_mean, decay, zero_debias=False)
        update_moving_variance = moving_averages.assign_moving_average(moving_variance, batch_variance, decay, zero_debias=False)

        # Define a function that :
        # 1. Update moving_mean & moving_variance with batch_mean & batch_variance
        # 2. Then return the batch_mean & batch_variance
        def mean_var_with_update():
            with tf.control_dependencies([update_moving_mean, update_moving_variance]):
                return tf.identity(batch_mean), tf.identity(batch_variance)

        # Perform different ops for training and testing
        if is_train:
            mean, variance = mean_var_with_update()
            normed = tf.nn.batch_normalization(input_var, mean, variance, beta, gamma, epsilon)
        
        else:
            normed = tf.nn.batch_normalization(input_var, moving_mean, moving_variance, beta, gamma, epsilon)
        # mean, variance = tf.cond(
        #     is_train,
        #     mean_var_with_update, # Training
        #     lambda: (moving_mean, moving_variance) # Testing - it will use the moving_mean and moving_variance (fixed during test) that are computed during training
        # )
        # normed = tf.nn.batch_normalization(input_var, mean, variance, beta, gamma, epsilon)

        return normed 
開發者ID:akaraspt,項目名稱:deepsleepnet,代碼行數:59,代碼來源:nn.py


注:本文中的tensorflow.python.framework.ops方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。