当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.nn方法代码示例

本文整理汇总了Python中tensorflow.nn方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.nn方法的具体用法?Python tensorflow.nn怎么用?Python tensorflow.nn使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.nn方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: embedding_lookup

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import nn [as 别名]
def embedding_lookup(embedding_matrix, indices, ids, weights, size):
  """Performs a weighted embedding lookup.

  Args:
    embedding_matrix: float Tensor from which to do the lookup.
    indices: int Tensor for the output rows of the looked up vectors.
    ids: int Tensor vectors to look up in the embedding_matrix.
    weights: float Tensor weights to apply to the looked up vectors.
    size: int number of output rows. Needed since some output rows may be
        empty.

  Returns:
    Weighted embedding vectors.
  """
  embeddings = tf.nn.embedding_lookup([embedding_matrix], ids)
  # TODO(googleuser): allow skipping weights.
  broadcast_weights_shape = tf.concat([tf.shape(weights), [1]], 0)
  embeddings *= tf.reshape(weights, broadcast_weights_shape)
  embeddings = tf.unsorted_segment_sum(embeddings, indices, size)
  return embeddings 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:22,代码来源:network_units.py

示例2: get_rnn_cell_trainable_variables

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import nn [as 别名]
def get_rnn_cell_trainable_variables(cell):
    """Returns the list of trainable variables of an RNN cell.

    Args:
        cell: an instance of :tf_main:`RNNCell <nn/rnn_cell/RNNCell>`.

    Returns:
        list: trainable variables of the cell.
    """
    cell_ = cell
    while True:
        try:
            return cell_.trainable_variables
        except AttributeError:
        # Cell wrappers (e.g., `DropoutWrapper`) cannot directly access to
        # `trainable_variables` as they don't initialize superclass
        # (tf==v1.3). So try to access through the cell in the wrapper.
            cell_ = cell._cell  # pylint: disable=protected-access 
开发者ID:qkaren,项目名称:Counterfactual-StoryRW,代码行数:20,代码来源:layers.py

示例3: conv_1d

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import nn [as 别名]
def conv_1d(inputs, weights, biases,
            stride=1, padding='SAME',
            activation='relu', norm=None,
            dropout=False, dropout_rate=None,
            is_training=True):
    hidden = tf.nn.conv1d(tf.cast(inputs, tf.float32), weights, stride=stride,
                          padding=padding) + biases

    if norm is not None:
        if norm == 'batch':
            hidden = tf.layers.batch_normalization(
                hidden,
                training=is_training
            )
        elif norm == 'layer':
            hidden = tf.contrib.layers.layer_norm(hidden)

    if activation:
        hidden = getattr(tf.nn, activation)(hidden)

    if dropout and dropout_rate is not None:
        hidden = tf.layers.dropout(hidden, rate=dropout_rate,
                                   training=is_training)

    return hidden 
开发者ID:uber,项目名称:ludwig,代码行数:27,代码来源:convolutional_modules.py

示例4: conv_2d

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import nn [as 别名]
def conv_2d(inputs, weights, biases,
            stride=1, padding='SAME',
            activation='relu', norm=None,
            dropout=False, dropout_rate=None,
            is_training=True):
    hidden = tf.nn.conv2d(inputs, weights, strides=[1, stride, stride, 1],
                          padding=padding) + biases

    if norm is not None:
        if norm == 'batch':
            hidden = tf.layers.batch_normalization(
                hidden,
                training=is_training
            )
        elif norm == 'layer':
            hidden = tf.contrib.layers.layer_norm(hidden)

    if activation:
        hidden = getattr(tf.nn, activation)(hidden)

    if dropout and dropout_rate is not None:
        hidden = tf.layers.dropout(hidden, rate=dropout_rate,
                                   training=is_training)

    return hidden 
开发者ID:uber,项目名称:ludwig,代码行数:27,代码来源:convolutional_modules.py

示例5: build_output

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import nn [as 别名]
def build_output(self, inputs, inferences):
    scores = tf.nn.softmax(inferences, name='scores')
    tf.add_to_collection('outputs', scores)

    with tf.name_scope('labels'):
      label_indices = tf.arg_max(inferences, 1, name='arg_max')
      labels = self.classification.output_labels(label_indices)
      tf.add_to_collection('outputs', labels)

    keys = self.classification.keys(inputs)
    if keys:
      # Key feature, if it exists, is a passthrough to the output.
      # The use of identity is to name the tensor and correspondingly the output field.
      keys = tf.identity(keys, name='key')
      tf.add_to_collection('outputs', keys)

    return {
      'label': labels,
      'score': scores
    } 
开发者ID:TensorLab,项目名称:tensorfx,代码行数:22,代码来源:_ff.py

示例6: efficientnet

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import nn [as 别名]
def efficientnet(width_coefficient=None,
                 depth_coefficient=None,
                 dropout_rate=0.2,
                 survival_prob=0.8):
  """Creates a efficientnet model."""
  global_params = efficientnet_model.GlobalParams(
      blocks_args=_DEFAULT_BLOCKS_ARGS,
      batch_norm_momentum=0.99,
      batch_norm_epsilon=1e-3,
      dropout_rate=dropout_rate,
      survival_prob=survival_prob,
      data_format='channels_last',
      num_classes=1000,
      width_coefficient=width_coefficient,
      depth_coefficient=depth_coefficient,
      depth_divisor=8,
      min_depth=None,
      relu_fn=tf.nn.swish,
      # The default is TPU-specific batch norm.
      # The alternative is tf.layers.BatchNormalization.
      batch_norm=utils.BatchNormalization,  # TPU-specific requirement.
      use_se=True,
      clip_projection_output=False)
  return global_params 
开发者ID:Thinklab-SJTU,项目名称:R3Det_Tensorflow,代码行数:26,代码来源:efficientnet_builder.py

示例7: build

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import nn [as 别名]
def build(self, img):
        """Constructs the generative network's layers. Normally called after initialization.
        
        Args:
            img: 4D tensor representation of image batch
        """

        self.padded = self._pad(img, 40)

        self.conv1 = self._conv_block(self.padded, maps_shape=[9, 9, 3, 32], stride=1, name='conv1')
        self.conv2 = self._conv_block(self.conv1, maps_shape=[2, 2, 32, 64], stride=2, name='conv2')
        self.conv3 = self._conv_block(self.conv2, maps_shape=[2, 2, 64, 128], stride=2, name='conv3')

        self.resid1 = self._residual_block(self.conv3, maps_shape=[3, 3, 128, 128], stride=1, name='resid1')
        self.resid2 = self._residual_block(self.resid1, maps_shape=[3, 3, 128, 128], stride=1, name='resid2')
        self.resid3 = self._residual_block(self.resid2, maps_shape=[3, 3, 128, 128], stride=1, name='resid3')
        self.resid4 = self._residual_block(self.resid3, maps_shape=[3, 3, 128, 128], stride=1, name='resid4')
        self.resid5 = self._residual_block(self.resid4, maps_shape=[3, 3, 128, 128], stride=1, name='resid5')

        self.conv4 = self._upsample_block(self.resid5, maps_shape=[2, 2, 64, 128], stride=2, name='conv4')
        self.conv5 = self._upsample_block(self.conv4, maps_shape=[2, 2, 32, 64], stride=2, name='conv5')
        self.conv6 = self._conv_block(self.conv5, maps_shape=[9, 9, 32, 3], stride=1, name='conv6', activation=None)

        self.output = tf.nn.sigmoid(self.conv6) 
开发者ID:mohamedkeid,项目名称:Feed-Forward-Style-Transfer,代码行数:26,代码来源:generator.py

示例8: _instance_normalize

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import nn [as 别名]
def _instance_normalize(inputs):
        """Instance normalize inputs to reduce covariate shift and reduce dependency on input contrast to improve results.
        
        Args:
            inputs: 4D tensor representing image layer encodings
            
        Returns:
            maps: 4D tensor of batch normalized inputs
        """

        with tf.variable_scope('instance_normalization'):
            batch, height, width, channels = [_.value for _ in inputs.get_shape()]
            mu, sigma_sq = tf.nn.moments(inputs, [1, 2], keep_dims=True)

            shift = tf.Variable(tf.constant(.1, shape=[channels]))
            scale = tf.Variable(tf.ones([channels]))
            normalized = (inputs - mu) / (sigma_sq + EPSILON) ** .5
            maps = scale * normalized + shift
            return maps 
开发者ID:mohamedkeid,项目名称:Feed-Forward-Style-Transfer,代码行数:21,代码来源:generator.py

示例9: variable_summaries

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import nn [as 别名]
def variable_summaries(var, name, collection_key):
    """Attach a lot of summaries to a Tensor (for TensorBoard visualization).

    Args:
        - var: Tensor for variable from which we want to log.
        - name: Variable name.
        - collection_key: Collection to save the summary to, can be any key of
          `VAR_LOG_LEVELS`.
    """
    if collection_key not in VAR_LOG_LEVELS.keys():
        raise ValueError('"{}" not in `VAR_LOG_LEVELS`'.format(collection_key))
    collections = VAR_LOG_LEVELS[collection_key]

    with tf.name_scope(name):
        mean = tf.reduce_mean(var)
        tf.summary.scalar('mean', mean, collections)
        num_params = tf.reduce_prod(tf.shape(var))
        tf.summary.scalar('num_params', num_params, collections)
        with tf.name_scope('stddev'):
            stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
        tf.summary.scalar('stddev', stddev, collections)
        tf.summary.scalar('max', tf.reduce_max(var), collections)
        tf.summary.scalar('min', tf.reduce_min(var), collections)
        tf.summary.histogram('histogram', var, collections)
        tf.summary.scalar('sparsity', tf.nn.zero_fraction(var), collections) 
开发者ID:Sargunan,项目名称:Table-Detection-using-Deep-learning,代码行数:27,代码来源:vars.py

示例10: _batch_conv_block

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import nn [as 别名]
def _batch_conv_block(self, x, scope, x_dim, y_dim, in_channels, out_channels, conv_size = (3,3), pooling=False, pool_size=(2,2)):
        with tf.variable_scope(scope):
            
            x = tf.reshape(x, [self._batch_size, x_dim, y_dim, in_channels])
            h1 = tf.contrib.layers.conv2d(x, out_channels,
                                          conv_size,
                                          activation_fn=None,
                                          weights_regularizer=None)
            h2 = tf.contrib.layers.batch_norm(h1,
                                              center=True, scale=True,
                                              is_training=self._phase)

            # return tf.nn.relu(h2, 'relu')
            h3 = self._activation_fn(h2,name='activation_fn')

            #h3 = tf.layers.flatten(h3)

            #return h3
            if pooling:
                h3 = tf.contrib.layers.max_pool2d(h3, pool_size)

            #h4 = tf.layers.flatten(h3)

            return h3 
开发者ID:euclidjda,项目名称:deep-quant,代码行数:26,代码来源:deep_cnn_model.py

示例11: multilayer_perceptron

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import nn [as 别名]
def multilayer_perceptron(final_output, weights, biases):
  """MLP over output with attention over enc outputs
  Args:
     final_output: [batch_size x 2*size]
  Returns:
     logit:  [batch_size x target_label_size]
  """
  
  # Layer 1
  layer_1 = tf.add(tf.matmul(final_output, weights["h1"]), biases["b1"])
  layer_1 = tf.nn.relu(layer_1)

  # Layer 2
  layer_2 = tf.add(tf.matmul(layer_1, weights["h2"]), biases["b2"])
  layer_2 = tf.nn.relu(layer_2)

  # output layer
  layer_out = tf.add(tf.matmul(layer_2, weights["out"]), biases["out"])
  
  return layer_out 
开发者ID:shashiongithub,项目名称:sidenet,代码行数:22,代码来源:model_utils.py

示例12: simple_rnn

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import nn [as 别名]
def simple_rnn(rnn_input, initial_state=None):
  """Implements Simple RNN
  Args:
  rnn_input: List of tensors of sizes [-1, sentembed_size]
  Returns:
  encoder_outputs, encoder_state
  """     
  # Setup cell
  cell_enc = get_lstm_cell()
  
  # Setup RNNs
  dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
  rnn_outputs, rnn_state = tf.nn.rnn(cell_enc, rnn_input, dtype=dtype, initial_state=initial_state)
  # print(rnn_outputs)
  # print(rnn_state)
  
  return rnn_outputs, rnn_state 
开发者ID:shashiongithub,项目名称:sidenet,代码行数:19,代码来源:model_utils.py

示例13: _loop_function

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import nn [as 别名]
def _loop_function(current_inp, ext_logits, gold_logits):
  """ Update current input wrt previous logits
  Args:
  current_inp: [batch_size x sentence_embedding_size]
  ext_logits: [batch_size x target_label_size] [1, 0]
  gold_logits: [batch_size x target_label_size]
  Returns:
  updated_inp: [batch_size x sentence_embedding_size]
  """

  prev_logits = gold_logits
  if not FLAGS.authorise_gold_label:
    prev_logits = ext_logits
    prev_logits = tf.nn.softmax(prev_logits) # [batch_size x target_label_size]

  prev_logits = tf.split(1, FLAGS.target_label_size, prev_logits) # [[batch_size], [batch_size], ...]
  prev_weight_one = prev_logits[0]
    
  updated_inp = tf.mul(current_inp, prev_weight_one)
  # print(updated_inp)

  return updated_inp


### SoftMax and Predictions 
开发者ID:shashiongithub,项目名称:sidenet,代码行数:27,代码来源:model_utils.py

示例14: normalize

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import nn [as 别名]
def normalize(self, inputs):
    """Apply normalization to input.

    The shape must match the declared shape in the constructor.
    [This is copied from tf.contrib.rnn.LayerNormBasicLSTMCell.]

    Args:
      inputs: Input tensor

    Returns:
      Normalized version of input tensor.

    Raises:
      ValueError: if inputs has undefined rank.
    """
    inputs_shape = inputs.get_shape()
    inputs_rank = inputs_shape.ndims
    if inputs_rank is None:
      raise ValueError('Inputs %s has undefined rank.' % inputs.name)
    axis = range(1, inputs_rank)

    beta = self._component.get_variable('beta_%s' % self._name)
    gamma = self._component.get_variable('gamma_%s' % self._name)

    with tf.variable_scope('layer_norm_%s' % self._name):
      # Calculate the moments on the last axis (layer activations).
      mean, variance = nn.moments(inputs, axis, keep_dims=True)

      # Compute layer normalization using the batch_normalization function.
      variance_epsilon = 1E-12
      outputs = nn.batch_normalization(
          inputs, mean, variance, beta, gamma, variance_epsilon)
      outputs.set_shape(inputs_shape)
      return outputs 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:36,代码来源:network_units.py

示例15: attention

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import nn [as 别名]
def attention(self, last_layer, attention_tensor):
    """Compute the attention term for the network unit."""
    h_tensor = attention_tensor

    # Compute the attentions.
    # Using feed-forward net to map the two inputs into the same dimension
    focus_tensor = tf.nn.tanh(
        tf.matmul(
            h_tensor,
            self._component.get_variable('attention_weights_pm_0'),
            name='h_x_pm') + self._component.get_variable('attention_bias_0'))

    context_tensor = tf.nn.tanh(
        tf.matmul(
            last_layer,
            self._component.get_variable('attention_weights_hm_0'),
            name='l_x_hm') + self._component.get_variable('attention_bias_1'))
    # The tf.multiply in the following expression broadcasts along the 0 dim:
    z_vec = tf.reduce_sum(tf.multiply(focus_tensor, context_tensor), 1)
    p_vec = tf.nn.softmax(tf.reshape(z_vec, [1, -1]))
    # The tf.multiply in the following expression broadcasts along the 1 dim:
    r_vec = tf.expand_dims(
        tf.reduce_sum(
            tf.multiply(
                h_tensor, tf.reshape(p_vec, [-1, 1]), name='time_together2'),
            0),
        0)
    return tf.matmul(
        r_vec,
        self._component.get_variable('attention_weights_pu'),
        name='time_together3') 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:33,代码来源:network_units.py


注:本文中的tensorflow.nn方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。