当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.add_to_collection函数代码示例

本文整理汇总了Python中tensorflow.add_to_collection函数的典型用法代码示例。如果您正苦于以下问题:Python add_to_collection函数的具体用法?Python add_to_collection怎么用?Python add_to_collection使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了add_to_collection函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: add_gradients_summary

def add_gradients_summary(grads, name_prefix="", name_suffix="",
                          collection_key=None):
    """ add_gradients_summary.

    Add histogram summary for given gradients.

    Arguments:
        grads: A list of `Tensor`. The gradients to summarize.
        name_prefix: `str`. A prefix to add to summary scope.
        name_suffix: `str`. A suffix to add to summary scope.
        collection_key: `str`. A collection to store the summaries.

    Returns:
        The list of created gradient summaries.

    """

    # Add histograms for gradients.
    summ = []
    for grad, var in grads:
        if grad is not None:
            summ_name = format_scope_name(var.op.name, name_prefix,
                                          "Gradients/" + name_suffix)
            summ_exists = summary_exists(summ_name)
            if summ_exists is not None:
                tf.add_to_collection(collection_key, summ_exists)
                summ.append(summ_exists)
            else:
                summ.append(get_summary("histogram", summ_name, grad,
                                        collection_key))
    return summ
开发者ID:rickyall,项目名称:tflearn,代码行数:31,代码来源:summaries.py

示例2: _variable_with_weight_decay

def _variable_with_weight_decay(name, shape, stddev, wd):
    var = _variable_on_cpu(name, shape,
                           tf.truncated_normal_initializer(stddev=stddev))
    if wd is not None:
        weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
开发者ID:amoliu,项目名称:Renju-AI,代码行数:7,代码来源:AI_multi_GPU_rollout_v3.py

示例3: _variable_with_weight_decay

def _variable_with_weight_decay(shape, stddev, wd):
    """Helper to create an initialized Variable with weight decay.

    Note that the Variable is initialized with a truncated normal
    distribution.
    A weight decay is added only if one is specified.

    Args:
      name: name of the variable
      shape: list of ints
      stddev: standard deviation of a truncated Gaussian
      wd: add L2Loss weight decay multiplied by this float. If None, weight
          decay is not added for this Variable.

    Returns:
      Variable Tensor
    """
    
    initializer = tf.truncated_normal_initializer(stddev=stddev)
    var = tf.get_variable('weights', shape=shape,
                          initializer=initializer)
    # var = tf.get_variable(name="weights", shape=shape, 
    #                       initializer=tf.contrib.layers.xavier_initializer())

    if wd and (not tf.get_variable_scope().reuse):
        weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
开发者ID:ruyi345,项目名称:FCN-TensorFlow,代码行数:28,代码来源:inception_resnet_v2_fcn_8s.py

示例4: batch_norm

def batch_norm(x, decay=0.999, epsilon=1e-03, is_training=True,
               scope="scope"):
    x_shape = x.get_shape()
    num_inputs = x_shape[-1]
    reduce_dims = list(range(len(x_shape) - 1))
    with tf.variable_scope(scope):
        beta = create_var("beta", [num_inputs,],
                               initializer=tf.zeros_initializer())
        gamma = create_var("gamma", [num_inputs,],
                                initializer=tf.ones_initializer())
        # for inference
        moving_mean = create_var("moving_mean", [num_inputs,],
                                 initializer=tf.zeros_initializer(),
                                 trainable=False)
        moving_variance = create_var("moving_variance", [num_inputs],
                                     initializer=tf.ones_initializer(),
                                     trainable=False)
    if is_training:
        mean, variance = tf.nn.moments(x, axes=reduce_dims)
        update_move_mean = moving_averages.assign_moving_average(moving_mean,
                                                mean, decay=decay)
        update_move_variance = moving_averages.assign_moving_average(moving_variance,
                                                variance, decay=decay)
        tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_move_mean)
        tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, update_move_variance)
    else:
        mean, variance = moving_mean, moving_variance
    return tf.nn.batch_normalization(x, mean, variance, beta, gamma, epsilon)
开发者ID:kaka7,项目名称:DeepLearning_tutorials,代码行数:28,代码来源:ResNet50.py

示例5: activation

def activation(incoming, activation='linear', name='activation'):

    """ Activation.

    Apply given activation to incoming tensor.

    Arguments:
        incoming: A `Tensor`. The incoming tensor.
        activation: `str` (name) or `function` (returning a `Tensor`).
            Activation applied to this layer (see tflearn.activations).
            Default: 'linear'.

    """

    if isinstance(activation, str):
        x = activations.get(activation)(incoming)
    elif hasattr(incoming, '__call__'):
        x = activation(incoming)
    else:
        raise ValueError('Unknown activation type.')

    # Track output tensor.
    tf.add_to_collection(tf.GraphKeys.LAYER_TENSOR + '/' + name, x)

    return x
开发者ID:igormq,项目名称:tflearn,代码行数:25,代码来源:core.py

示例6: func_wrapper

 def func_wrapper(weights):
   if weights.dtype.base_dtype == tf.float16:
     tf.add_to_collection('REGULARIZATION_FUNCTIONS', (weights, regularizer))
     # disabling the inner regularizer
     return None
   else:
     return regularizer(weights)
开发者ID:fotwo,项目名称:OpenSeq2Seq,代码行数:7,代码来源:mp_wrapper.py

示例7: loss

def loss(H, logits, labels):
    """Calculates the loss from the logits and the labels.

    Args:
      logits: Logits tensor, float - [batch_size, NUM_CLASSES].
      labels: Labels tensor, int32 - [batch_size].

    Returns:
      loss: Loss tensor of type float.
    """
    # Convert from sparse integer labels in the range [0, NUM_CLASSSES)
    # to 1-hot dense float vectors (that is we will have batch_size vectors,
    # each with NUM_CLASSES values, all of which are 0.0 except there will
    # be a 1.0 in the entry corresponding to the label).
    with tf.name_scope('loss'):
        batch_size = tf.size(labels)
        labels = tf.expand_dims(labels, 1)
        indices = tf.expand_dims(tf.range(0, batch_size), 1)
        concated = tf.concat(1, [indices, labels])
        onehot_labels = tf.sparse_to_dense(
            concated, tf.pack([batch_size, H['arch']['num_classes']]), 1.0, 0.0)
        cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits,
                                                                onehot_labels,
                                                                name='xentropy')
        cross_entropy_mean = tf.reduce_mean(
            cross_entropy, name='xentropy_mean')
        tf.add_to_collection('losses', cross_entropy_mean)

        loss = tf.add_n(tf.get_collection('losses'), name='total_loss')
    return loss
开发者ID:TensorVision,项目名称:MediSeg,代码行数:30,代码来源:VGG8.py

示例8: loss

def loss(logits, labels):
  """Add L2Loss to all the trainable variables.

  Add summary for for "Loss" and "Loss/avg".
  Args:
    logits: Logits from inference().
    labels: Labels from distorted_inputs or inputs(). 1-D tensor
            of shape [batch_size]

  Returns:
    Loss tensor of type float.
  """
  # Reshape the labels into a dense Tensor of
  # shape [batch_size, NUM_CLASSES].
  sparse_labels = tf.reshape(labels, [FLAGS.batch_size, 1])
  indices = tf.reshape(tf.range(0, FLAGS.batch_size, 1), [FLAGS.batch_size, 1])
  concated = tf.concat(1, [indices, sparse_labels])
  dense_labels = tf.sparse_to_dense(concated,
                                    [FLAGS.batch_size, NUM_CLASSES],
                                    1.0, 0.0)

  # Calculate the average cross entropy loss across the batch.
  cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
      logits, dense_labels, name='cross_entropy_per_example')
  cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
  tf.add_to_collection('losses', cross_entropy_mean)

  # The total loss is defined as the cross entropy loss plus all of the weight
  # decay terms (L2 loss).
  return tf.add_n(tf.get_collection('losses'), name='total_loss')
开发者ID:bicimsiz,项目名称:tensorflow,代码行数:30,代码来源:cifar10.py

示例9: _variable_with_weight_decay

def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
  var = _variable_on_cpu(
      name,
      shape,
      tf.truncated_normal_initializer(stddev=stddev, dtype=dtype))
  if wd is not None:
    weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
开发者ID:2020zyc,项目名称:tensorflow,代码行数:25,代码来源:cifar10.py

示例10: weight_variable

def weight_variable(shape, initializer=None, init_val=None, wd=None, name=None, trainable=True):
    """Initialize weights.

    Args:
        shape: shape of the weights, list of int
        wd: weight decay
    """
    log = logger.get()

    if initializer is None:
        # initializer = tf.truncated_normal(shape, stddev=0.01)
        initializer = tf.truncated_normal_initializer(stddev=0.01)
    if init_val is None:
        var = tf.Variable(initializer(shape), name=name, trainable=trainable)
    else:
        var = tf.Variable(init_val, name=name, trainable=trainable)

    # log.info(var.name)
    # if init_val is not None:
    #     if hasattr(init_val, 'shape'):
    #         log.info('Initialized with array shape {}'.format(init_val.shape))
    #     else:
    #         log.info('Initialized with {}'.format(init_val))

    if wd:
        weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
        tf.add_to_collection('losses', weight_decay)
    return var
开发者ID:ziyu-zhang,项目名称:ins-seg-public,代码行数:28,代码来源:nnlib.py

示例11: _variable

def _variable(name, shape, initializer, wd=None):
  var = tf.get_variable(name, shape, initializer=initializer)

  if wd is not None:
    weight_decay = tf.mul(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var
开发者ID:adhsu,项目名称:deepaccent,代码行数:7,代码来源:utils.py

示例12: _construct

    def _construct(self):
        """
        Construct the model; main part of it goes here
        """
        # our query = m_u + e_i
        query = (self._cur_user, self._cur_item)
        neg_query = (self._cur_user, self._cur_item_negative)

        # Positive
        neighbor = self._mem_layer(query,
                                   self.user_memory(self.input_neighborhoods),
                                   self.user_output(self.input_neighborhoods),
                                   self.input_neighborhood_lengths,
                                   self.config.max_neighbors)[-1].output
        self.score = self._output_module(tf.concat([self._cur_user * self._cur_item,
                                                    neighbor], axis=1))

        # Negative
        neighbor_negative = self._mem_layer(neg_query,
                                            self.user_memory(self.input_neighborhoods_negative),
                                            self.user_output(self.input_neighborhoods_negative),
                                            self.input_neighborhood_lengths_negative,
                                            self.config.max_neighbors)[-1].output
        negative_output = self._output_module(tf.concat(
            [self._cur_user * self._cur_item_negative, neighbor_negative], axis=1))

        # Loss and Optimizer
        self.loss = LossLayer()(self.score, negative_output)
        self._optimizer = OptimizerLayer(self.config.optimizer, clip=self.config.grad_clip,
                                         params=self.config.optimizer_params)
        self.train = self._optimizer(self.loss)

        tf.add_to_collection(GraphKeys.PREDICTION, self.score)
开发者ID:dotrado,项目名称:CollaborativeMemoryNetwork,代码行数:33,代码来源:cmn.py

示例13: weighted_loss

def weighted_loss(logits, labels, num_classes, head=None):
    """ median-frequency re-weighting """
    with tf.name_scope('loss'):

        logits = tf.reshape(logits, (-1, num_classes))

        epsilon = tf.constant(value=1e-10)

        logits = logits + epsilon

        # consturct one-hot label array
        label_flat = tf.reshape(labels, (-1, 1))

        # should be [batch ,num_classes]
        labels = tf.reshape(tf.one_hot(label_flat, depth=num_classes), (-1, num_classes))

        softmax = tf.nn.softmax(logits)

        cross_entropy = -tf.reduce_sum(tf.multiply(labels * tf.log(softmax + epsilon), head), axis=[1])

        cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')

        tf.add_to_collection('losses', cross_entropy_mean)

        loss = tf.add_n(tf.get_collection('losses'), name='total_loss')

    return loss
开发者ID:Ray-Leung,项目名称:Tensorflow-SegNet,代码行数:27,代码来源:model.py

示例14: add_trainable_vars_summary

def add_trainable_vars_summary(variables, name_prefix="", name_suffix="",
                               collection_key=None):
    """ add_trainable_vars_summary.

    Add histogram summary for given variables weights.

    Arguments:
        variables: A list of `Variable`. The variables to summarize.
        name_prefix: `str`. A prefix to add to summary scope.
        name_suffix: `str`. A suffix to add to summary scope.
        collection_key: `str`. A collection to store the summaries.

    Returns:
        The list of created weights summaries.

    """

    # Add histograms for trainable variables.
    summ = []
    for var in variables:
        summ_name = format_scope_name(var.op.name, name_prefix, name_suffix)
        summ_exists = summary_exists(summ_name)
        if summ_exists is not None:
            tf.add_to_collection(collection_key, summ_exists)
            summ.append(summ_exists)
        else:
            summ.append(get_summary("histogram", summ_name, var, collection_key))
    return summ
开发者ID:rickyall,项目名称:tflearn,代码行数:28,代码来源:summaries.py

示例15: inference

def inference(input_tensor,train,regularizer):
    #第一层卷积
    with tf.variable_scope('layer1-conv1'):
        conv1_weights = tf.get_variable("weight",
                [CONV1_SIZE,CONV1_SIZE,NUM_CHANNELS,CONV1_DEEP],
                initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv1_biases = tf.get_variable("biases",[CONV1_DEEP],
                 initializer=tf.constant_initializer(0.0))
        conv1 = tf.nn.conv2d(input_tensor,conv1_weights,
                             strides=[1,1,1,1],padding='SAME')
        relu1 = tf.nn.relu(tf.nn.bias_add(conv1,conv1_biases))
    #第二层池化    
    with tf.name_scope('layer2-pool1'):
        pool1 = tf.nn.max_pool(relu1,ksize=[1,2,2,1],
                               strides=[1,2,2,1],padding='SAME')
    #第三层卷积
    with tf.variable_scope('layer3-conv2'):
        conv2_weights = tf.get_variable("weight",
                [CONV2_SIZE,CONV2_SIZE,CONV1_DEEP,CONV2_DEEP],
                initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv2_biases = tf.get_variable("biases",[CONV2_DEEP],
                 initializer=tf.constant_initializer(0.0))
        conv2 = tf.nn.conv2d(pool1,conv2_weights,
                             strides=[1,1,1,1],padding='SAME')
        relu2 = tf.nn.relu(tf.nn.bias_add(conv2,conv2_biases))    
        
    #第四层池化
    with tf.name_scope('layer4-pool2'):
        pool2 = tf.nn.max_pool(relu2,ksize=[1,2,2,1],
                               strides=[1,2,2,1],padding='SAME')
        
    pool_shape = pool2.get_shape().as_list()
    nodes = pool_shape[1] * pool_shape[2] * pool_shape[3]
    
    reshaped = tf.reshape(pool2,[pool_shape[0],nodes])
    
    #第五层全连接层
    with tf.variable_scope('layer5-fc1'):
        fc1_weights = tf.get_variable("weight",[nodes,FC_SIZE],
                initializer=tf.truncated_normal_initializer(stddev=0.1))
        #只有全连接层的权重需要加入正则化
        if regularizer != None:
            tf.add_to_collection('losses',regularizer(fc1_weights))
        fc1_biases = tf.get_variable("bias",[FC_SIZE],
                initializer=tf.constant_initializer(0.1))
        fc1 = tf.nn.relu(tf.matmul(reshaped,fc1_weights) + fc1_biases)
        if train: fc1 = tf.nn.dropout(fc1,0.5)

    #第六层全连接层
    with tf.variable_scope('layer6-fc2'):
        fc2_weights = tf.get_variable("weight",[FC_SIZE,NUM_LABELS],
                initializer=tf.truncated_normal_initializer(stddev=0.1))
        #只有全连接层的权重需要加入正则化
        if regularizer != None:
            tf.add_to_collection('losses',regularizer(fc2_weights))
        fc2_biases = tf.get_variable("bias",[NUM_LABELS],
                initializer=tf.constant_initializer(0.1))
        logit = tf.matmul(fc1,fc2_weights) + fc2_biases

    return logit
开发者ID:yyzahuopu,项目名称:Deep-learning,代码行数:60,代码来源:mnist_inferenceCNN.py


注:本文中的tensorflow.add_to_collection函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。