当前位置: 首页>>代码示例>>Python>>正文


Python nn.relu方法代码示例

本文整理汇总了Python中tensorflow.python.ops.nn.relu方法的典型用法代码示例。如果您正苦于以下问题:Python nn.relu方法的具体用法?Python nn.relu怎么用?Python nn.relu使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.nn的用法示例。


在下文中一共展示了nn.relu方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: relu

# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import relu [as 别名]
def relu(x, alpha=0., max_value=None):
  """Rectified linear unit.

  With default values, it returns element-wise `max(x, 0)`.

  Arguments:
      x: A tensor or variable.
      alpha: A scalar, slope of negative section (default=`0.`).
      max_value: Saturation threshold.

  Returns:
      A tensor.
  """
  if alpha != 0.:
    negative_part = nn.relu(-x)
  x = nn.relu(x)
  if max_value is not None:
    max_value = _to_tensor(max_value, x.dtype.base_dtype)
    zero = _to_tensor(0., x.dtype.base_dtype)
    x = clip_ops.clip_by_value(x, zero, max_value)
  if alpha != 0.:
    alpha = _to_tensor(alpha, x.dtype.base_dtype)
    x -= alpha * negative_part
  return x 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:26,代码来源:backend.py

示例2: convolution1d

# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import relu [as 别名]
def convolution1d(inputs,
                  num_outputs,
                  kernel_size,
                  stride=1,
                  padding='SAME',
                  data_format=None,
                  rate=1,
                  activation_fn=nn.relu,
                  normalizer_fn=None,
                  normalizer_params=None,
                  weights_initializer=initializers.xavier_initializer(),
                  weights_regularizer=None,
                  biases_initializer=init_ops.zeros_initializer(),
                  biases_regularizer=None,
                  reuse=None,
                  variables_collections=None,
                  outputs_collections=None,
                  trainable=True,
                  scope=None):
  return convolution(
      inputs,
      num_outputs,
      kernel_size,
      stride,
      padding,
      data_format,
      rate,
      activation_fn,
      normalizer_fn,
      normalizer_params,
      weights_initializer,
      weights_regularizer,
      biases_initializer,
      biases_regularizer,
      reuse,
      variables_collections,
      outputs_collections,
      trainable,
      scope,
      conv_dims=1) 
开发者ID:taehoonlee,项目名称:tensornets,代码行数:42,代码来源:layers.py

示例3: convolution2d

# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import relu [as 别名]
def convolution2d(inputs,
                  num_outputs,
                  kernel_size,
                  stride=1,
                  padding='SAME',
                  data_format=None,
                  rate=1,
                  activation_fn=nn.relu,
                  normalizer_fn=None,
                  normalizer_params=None,
                  weights_initializer=initializers.xavier_initializer(),
                  weights_regularizer=None,
                  biases_initializer=init_ops.zeros_initializer(),
                  biases_regularizer=None,
                  reuse=None,
                  variables_collections=None,
                  outputs_collections=None,
                  trainable=True,
                  scope=None):
  return convolution(
      inputs,
      num_outputs,
      kernel_size,
      stride,
      padding,
      data_format,
      rate,
      activation_fn,
      normalizer_fn,
      normalizer_params,
      weights_initializer,
      weights_regularizer,
      biases_initializer,
      biases_regularizer,
      reuse,
      variables_collections,
      outputs_collections,
      trainable,
      scope,
      conv_dims=2) 
开发者ID:taehoonlee,项目名称:tensornets,代码行数:42,代码来源:layers.py

示例4: convolution3d

# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import relu [as 别名]
def convolution3d(inputs,
                  num_outputs,
                  kernel_size,
                  stride=1,
                  padding='SAME',
                  data_format=None,
                  rate=1,
                  activation_fn=nn.relu,
                  normalizer_fn=None,
                  normalizer_params=None,
                  weights_initializer=initializers.xavier_initializer(),
                  weights_regularizer=None,
                  biases_initializer=init_ops.zeros_initializer(),
                  biases_regularizer=None,
                  reuse=None,
                  variables_collections=None,
                  outputs_collections=None,
                  trainable=True,
                  scope=None):
  return convolution(
      inputs,
      num_outputs,
      kernel_size,
      stride,
      padding,
      data_format,
      rate,
      activation_fn,
      normalizer_fn,
      normalizer_params,
      weights_initializer,
      weights_regularizer,
      biases_initializer,
      biases_regularizer,
      reuse,
      variables_collections,
      outputs_collections,
      trainable,
      scope,
      conv_dims=3) 
开发者ID:taehoonlee,项目名称:tensornets,代码行数:42,代码来源:layers.py

示例5: log_relu

# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import relu [as 别名]
def log_relu(t):
  return tf.log(1+tf.nn.relu(t)) 
开发者ID:tdozat,项目名称:Parser-v3,代码行数:4,代码来源:nonlin.py

示例6: flatten_fully_connected_v2

# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import relu [as 别名]
def flatten_fully_connected_v2(inputs,
                               num_outputs,
                               activation_fn=nn.relu,
                               normalizer_fn=None,
                               normalizer_params=None,
                               weights_normalizer_fn=None,
                               weights_normalizer_params=None,
                               weights_initializer=initializers.xavier_initializer(),
                               weights_regularizer=None,
                               biases_initializer=init_ops.zeros_initializer(),
                               biases_regularizer=None,
                               reuse=None,
                               variables_collections=None,
                               outputs_collections=None,
                               trainable=True,
                               scope=None):
    with variable_scope.variable_scope(scope, 'flatten_fully_connected_v2'):
        if inputs.shape.ndims > 2:
            inputs = layers.flatten(inputs)
        return fully_connected(inputs=inputs,
                               num_outputs=num_outputs,
                               activation_fn=activation_fn,
                               normalizer_fn=normalizer_fn,
                               normalizer_params=normalizer_params,
                               weights_normalizer_fn=weights_normalizer_fn,
                               weights_normalizer_params=weights_normalizer_params,
                               weights_initializer=weights_initializer,
                               weights_regularizer=weights_regularizer,
                               biases_initializer=biases_initializer,
                               biases_regularizer=biases_regularizer,
                               reuse=reuse,
                               variables_collections=variables_collections,
                               outputs_collections=outputs_collections,
                               trainable=trainable,
                               scope=scope) 
开发者ID:csmliu,项目名称:STGAN,代码行数:37,代码来源:layers.py

示例7: flatten_fully_connected_v1

# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import relu [as 别名]
def flatten_fully_connected_v1(inputs,
                               num_outputs,
                               activation_fn=tf.nn.relu,
                               normalizer_fn=None,
                               normalizer_params=None,
                               weights_initializer=slim.xavier_initializer(),
                               weights_regularizer=None,
                               biases_initializer=tf.zeros_initializer(),
                               biases_regularizer=None,
                               reuse=None,
                               variables_collections=None,
                               outputs_collections=None,
                               trainable=True,
                               scope=None):
    with tf.variable_scope(scope, 'flatten_fully_connected_v1'):
        if inputs.shape.ndims > 2:
            inputs = slim.flatten(inputs)
        return slim.fully_connected(inputs,
                                    num_outputs,
                                    activation_fn,
                                    normalizer_fn,
                                    normalizer_params,
                                    weights_initializer,
                                    weights_regularizer,
                                    biases_initializer,
                                    biases_regularizer,
                                    reuse,
                                    variables_collections,
                                    outputs_collections,
                                    trainable,
                                    scope) 
开发者ID:csmliu,项目名称:STGAN,代码行数:33,代码来源:layers.py

示例8: flatten_fully_connected

# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import relu [as 别名]
def flatten_fully_connected(inputs,
                            num_outputs,
                            activation_fn=tf.nn.relu,
                            normalizer_fn=None,
                            normalizer_params=None,
                            weights_initializer=slim.xavier_initializer(),
                            weights_regularizer=None,
                            biases_initializer=tf.zeros_initializer(),
                            biases_regularizer=None,
                            reuse=None,
                            variables_collections=None,
                            outputs_collections=None,
                            trainable=True,
                            scope=None):
    with tf.variable_scope(scope, 'flatten_fully_connected', [inputs]):
        if inputs.shape.ndims > 2:
            inputs = slim.flatten(inputs)
        return slim.fully_connected(inputs,
                                    num_outputs,
                                    activation_fn,
                                    normalizer_fn,
                                    normalizer_params,
                                    weights_initializer,
                                    weights_regularizer,
                                    biases_initializer,
                                    biases_regularizer,
                                    reuse,
                                    variables_collections,
                                    outputs_collections,
                                    trainable,
                                    scope) 
开发者ID:csmliu,项目名称:STGAN,代码行数:33,代码来源:layers.py

示例9: preact_conv2d

# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import relu [as 别名]
def preact_conv2d(
        inputs,
        num_outputs,
        kernel_size,
        stride=1,
        padding='SAME',
        activation_fn=nn.relu,
        normalizer_fn=None,
        normalizer_params=None,
        weights_initializer=initializers.xavier_initializer(),
        weights_regularizer=None,
        reuse=None,
        variables_collections=None,
        outputs_collections=None,
        trainable=True,
        scope=None):
    """Adds a 2D convolution preceded by batch normalization and activation.
    """
    with variable_scope.variable_scope(scope, 'Conv', values=[inputs], reuse=reuse) as sc:
        inputs = ops.convert_to_tensor(inputs)
        dtype = inputs.dtype.base_dtype
        if normalizer_fn:
            normalizer_params = normalizer_params or {}
            inputs = normalizer_fn(inputs, activation_fn=activation_fn, **normalizer_params)
        kernel_h, kernel_w = utils.two_element_tuple(kernel_size)
        stride_h, stride_w = utils.two_element_tuple(stride)
        num_filters_in = utils.last_dimension(inputs.get_shape(), min_rank=4)
        weights_shape = [kernel_h, kernel_w, num_filters_in, num_outputs]
        weights_collections = utils.get_variable_collections(variables_collections, 'weights')
        weights = variables.model_variable('weights',
                                           shape=weights_shape,
                                           dtype=dtype,
                                           initializer=weights_initializer,
                                           regularizer=weights_regularizer,
                                           collections=weights_collections,
                                           trainable=trainable)
        outputs = nn.conv2d(inputs, weights, [1, stride_h, stride_w, 1], padding=padding)
        return utils.collect_named_outputs(outputs_collections, sc.name, outputs) 
开发者ID:rwightman,项目名称:tensorflow-litterbox,代码行数:40,代码来源:preact_conv.py

示例10: __init__

# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import relu [as 别名]
def __init__(self, hidden_units=(256,), batch_size=64, n_epochs=5,
                 keep_prob=1.0, activation=nn.relu,
                 random_state=None, solver=tf.train.AdamOptimizer,
                 solver_kwargs=None, transform_layer_index=None):
        self.hidden_units = hidden_units
        self.batch_size = batch_size
        self.n_epochs = n_epochs
        self.keep_prob = keep_prob
        self.activation = activation
        self.random_state = random_state
        self.solver = solver
        self.solver_kwargs = solver_kwargs
        self.transform_layer_index = transform_layer_index 
开发者ID:civisanalytics,项目名称:muffnn,代码行数:15,代码来源:mlp_classifier.py

示例11: __init__

# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import relu [as 别名]
def __init__(self, hidden_units=(256,), batch_size=64, n_epochs=5,
                 keep_prob=1.0, activation=nn.relu,
                 random_state=None, monitor=None,
                 solver=tf.train.AdamOptimizer, solver_kwargs=None,
                 transform_layer_index=None):
        self.hidden_units = hidden_units
        self.batch_size = batch_size
        self.n_epochs = n_epochs
        self.keep_prob = keep_prob
        self.activation = activation
        self.random_state = random_state
        self.monitor = monitor
        self.solver = solver
        self.solver_kwargs = solver_kwargs
        self.transform_layer_index = transform_layer_index 
开发者ID:civisanalytics,项目名称:muffnn,代码行数:17,代码来源:test_base.py

示例12: __init__

# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import relu [as 别名]
def __init__(self,
               num_label_columns,
               hidden_units,
               optimizer=None,
               activation_fn=nn.relu,
               dropout=None,
               gradient_clip_norm=None,
               num_ps_replicas=0,
               scope=None):
    """Initializes DNNComposableModel objects.

    Args:
      num_label_columns: The number of label columns.
      hidden_units: List of hidden units per layer. All layers are fully
        connected.
      optimizer: An instance of `tf.Optimizer` used to apply gradients to
        the model. If `None`, will use a FTRL optimizer.
      activation_fn: Activation function applied to each layer. If `None`,
        will use `tf.nn.relu`.
      dropout: When not None, the probability we will drop out
        a given coordinate.
      gradient_clip_norm: A float > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        tf.clip_by_global_norm for more details.
      num_ps_replicas: The number of parameter server replicas.
      scope: Optional scope for variables created in this model. If not scope
        is supplied, one is generated.
    """
    scope = "dnn" if not scope else scope
    super(DNNComposableModel, self).__init__(
        num_label_columns=num_label_columns,
        optimizer=optimizer,
        gradient_clip_norm=gradient_clip_norm,
        num_ps_replicas=num_ps_replicas,
        scope=scope)
    self._hidden_units = hidden_units
    self._activation_fn = activation_fn
    self._dropout = dropout 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:40,代码来源:composable_model.py

示例13: call

# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import relu [as 别名]
def call(self, inputs):
    inputs = ops.convert_to_tensor(inputs, dtype=self.dtype)
    ndim = self._input_rank

    if self.rectify:
      inputs = nn.relu(inputs)

    # Compute normalization pool.
    if ndim == 2:
      norm_pool = math_ops.matmul(math_ops.square(inputs), self.gamma)
      norm_pool = nn.bias_add(norm_pool, self.beta)
    elif self.data_format == "channels_last" and ndim <= 5:
      shape = self.gamma.shape.as_list()
      gamma = array_ops.reshape(self.gamma, (ndim - 2) * [1] + shape)
      norm_pool = nn.convolution(math_ops.square(inputs), gamma, "VALID")
      norm_pool = nn.bias_add(norm_pool, self.beta)
    else:  # generic implementation
      # This puts channels in the last dimension regardless of input.
      norm_pool = math_ops.tensordot(
          math_ops.square(inputs), self.gamma, [[self._channel_axis()], [0]])
      norm_pool += self.beta
      if self.data_format == "channels_first":
        # Return to channels_first format if necessary.
        axes = list(range(ndim - 1))
        axes.insert(1, ndim - 1)
        norm_pool = array_ops.transpose(norm_pool, axes)

    if self.inverse:
      norm_pool = math_ops.sqrt(norm_pool)
    else:
      norm_pool = math_ops.rsqrt(norm_pool)
    outputs = inputs * norm_pool

    if not context.executing_eagerly():
      outputs.set_shape(self.compute_output_shape(inputs.shape))
    return outputs 
开发者ID:mauriceqch,项目名称:pcc_geo_cnn,代码行数:38,代码来源:gdn.py

示例14: fractal_conv2d

# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import relu [as 别名]
def fractal_conv2d(inputs,
                   num_columns,
                   num_outputs,
                   kernel_size,
                   joined=True,
                   stride=1,
                   padding='SAME',
                   # rate=1,
                   activation_fn=nn.relu,
                   normalizer_fn=slim.batch_norm,
                   normalizer_params=None,
                   weights_initializer=initializers.xavier_initializer(),
                   weights_regularizer=None,
                   biases_initializer=None,
                   biases_regularizer=None,
                   reuse=None,
                   variables_collections=None,
                   outputs_collections=None,
                   is_training=True,
                   trainable=True,
                   scope=None):
  """Builds a fractal block with slim.conv2d.
  The fractal will have `num_columns` columns, and have
  Args:
    inputs: a 4-D tensor  `[batch_size, height, width, channels]`.
    num_columns: integer, the columns in the fractal.
  """
  locs = locals()
  fractal_args = ['inputs','num_columns','joined','is_training']
  asc_fn = lambda : slim.arg_scope([slim.conv2d],
                                   **{arg:val for (arg,val) in locs.items()
                                      if arg not in fractal_args})
  return fractal_template(inputs, num_columns, slim.conv2d, asc_fn,
                          joined, is_training, reuse, scope) 
开发者ID:tensorpro,项目名称:FractalNet,代码行数:36,代码来源:fractal_block.py

示例15: __init__

# 需要导入模块: from tensorflow.python.ops import nn [as 别名]
# 或者: from tensorflow.python.ops.nn import relu [as 别名]
def __init__(self,
               num_label_columns,
               hidden_units,
               optimizer=None,
               activation_fn=nn.relu,
               dropout=None,
               gradient_clip_norm=None,
               num_ps_replicas=0,
               scope=None,
               trainable=True):
    """Initializes DNNComposableModel objects.

    Args:
      num_label_columns: The number of label columns.
      hidden_units: List of hidden units per layer. All layers are fully
        connected.
      optimizer: An instance of `tf.Optimizer` used to apply gradients to
        the model. If `None`, will use a FTRL optimizer.
      activation_fn: Activation function applied to each layer. If `None`,
        will use `tf.nn.relu`.
      dropout: When not None, the probability we will drop out
        a given coordinate.
      gradient_clip_norm: A float > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        tf.clip_by_global_norm for more details.
      num_ps_replicas: The number of parameter server replicas.
      scope: Optional scope for variables created in this model. If not scope
        is supplied, one is generated.
      trainable: True if this model contains variables that can be trained.
        False otherwise (in cases where the variables are used strictly for
        transforming input labels for training).
    """
    scope = "dnn" if not scope else scope
    super(DNNComposableModel, self).__init__(
        num_label_columns=num_label_columns,
        optimizer=optimizer,
        gradient_clip_norm=gradient_clip_norm,
        num_ps_replicas=num_ps_replicas,
        scope=scope,
        trainable=trainable)
    self._hidden_units = hidden_units
    self._activation_fn = activation_fn
    self._dropout = dropout 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:45,代码来源:composable_model.py


注:本文中的tensorflow.python.ops.nn.relu方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。