当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.relu方法代码示例

本文整理汇总了Python中tensorflow.relu方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.relu方法的具体用法?Python tensorflow.relu怎么用?Python tensorflow.relu使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.relu方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: double_discriminator

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import relu [as 别名]
def double_discriminator(x, filters1=128, filters2=None,
                         kernel_size=8, strides=4, pure_mean=False):
  """A convolutional discriminator with 2 layers and concatenated output."""
  if filters2 is None:
    filters2 = 4 * filters1
  with tf.variable_scope("discriminator"):
    batch_size = shape_list(x)[0]
    net = layers().Conv2D(
        filters1, kernel_size, strides=strides, padding="SAME", name="conv1")(x)
    if pure_mean:
      net1 = tf.reduce_mean(net, [1, 2])
    else:
      net1 = mean_with_attention(net, "mean_with_attention1")
      tf.reshape(net, [batch_size, -1])
    net = tf.nn.relu(net)
    net = layers().Conv2D(
        filters2, kernel_size, strides=strides, padding="SAME", name="conv2")(x)
    if pure_mean:
      net2 = tf.reduce_mean(net, [1, 2])
    else:
      net2 = mean_with_attention(net, "mean_with_attention2")
    return tf.concat([net1, net2], axis=-1) 
开发者ID:yyht,项目名称:BERT,代码行数:24,代码来源:common_layers.py

示例2: double_discriminator

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import relu [as 别名]
def double_discriminator(x, filters1=128, filters2=None,
                         kernel_size=8, strides=4, pure_mean=False):
  """A convolutional discriminator with 2 layers and concatenated output."""
  if filters2 is None:
    filters2 = 4 * filters1
  with tf.variable_scope("discriminator"):
    batch_size = shape_list(x)[0]
    net = tf.layers.conv2d(
        x, filters1, kernel_size, strides=strides, padding="SAME", name="conv1")
    if pure_mean:
      net1 = tf.reduce_mean(net, [1, 2])
    else:
      net1 = mean_with_attention(net, "mean_with_attention1")
      tf.reshape(net, [batch_size, -1])
    net = tf.nn.relu(net)
    net = tf.layers.conv2d(
        x, filters2, kernel_size, strides=strides, padding="SAME", name="conv2")
    if pure_mean:
      net2 = tf.reduce_mean(net, [1, 2])
    else:
      net2 = mean_with_attention(net, "mean_with_attention2")
    return tf.concat([net1, net2], axis=-1) 
开发者ID:mlperf,项目名称:training_results_v0.5,代码行数:24,代码来源:common_layers.py

示例3: conv_stride2_multistep

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import relu [as 别名]
def conv_stride2_multistep(x, nbr_steps, output_filters, name=None, reuse=None):
  """Use a strided convolution to downsample x by 2, `nbr_steps` times.

  We use stride and filter size 2 to avoid the checkerboard problem of deconvs.
  As detailed in http://distill.pub/2016/deconv-checkerboard/.

  Args:
    x: a `Tensor` with shape `[batch, spatial, depth]` or
     `[batch, spatial_1, spatial_2, depth]`
    nbr_steps: number of halving downsample rounds to apply
    output_filters: an int specifying the filter count for the convolutions
    name: a string
    reuse: a boolean

  Returns:
    a `Tensor` with shape `[batch, spatial / (2**nbr_steps), output_filters]` or
     `[batch, spatial_1 / (2**nbr_steps), spatial_2 / (2**nbr_steps),
       output_filters]`
  """
  with tf.variable_scope(
      name, default_name="conv_stride2_multistep", values=[x], reuse=reuse):
    if nbr_steps == 0:
      out = conv(x, output_filters, (1, 1))
      return out, [out]
    hidden_layers = [x]
    for i in range(nbr_steps):
      hidden_layers.append(
          conv(
              hidden_layers[-1],
              output_filters, (2, 2),
              strides=2,
              activation=tf.nn.relu,
              name="conv" + str(i)))
    return hidden_layers[-1], hidden_layers 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:36,代码来源:common_layers.py

示例4: hard_sigmoid

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import relu [as 别名]
def hard_sigmoid(x, saturation_limit=0.9):
  saturation_cost = tf.reduce_mean(tf.nn.relu(tf.abs(x) - saturation_limit))
  x_shifted = 0.5 * x + 0.5
  return tf.minimum(1.0, tf.nn.relu(x_shifted)), saturation_cost 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:6,代码来源:common_layers.py

示例5: hard_tanh

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import relu [as 别名]
def hard_tanh(x, saturation_limit=0.9):
  saturation_cost = tf.reduce_mean(tf.nn.relu(tf.abs(x) - saturation_limit))
  return tf.minimum(1.0, tf.maximum(x, -1.0)), saturation_cost 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:5,代码来源:common_layers.py

示例6: relu_density_logit

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import relu [as 别名]
def relu_density_logit(x, reduce_dims):
  """logit(density(x)).

  Useful for histograms.

  Args:
    x: a Tensor, typically the output of tf.relu
    reduce_dims: a list of dimensions

  Returns:
    a Tensor
  """
  frac = tf.reduce_mean(tf.to_float(x > 0.0), reduce_dims)
  scaled = tf.log(frac + math.exp(-10)) - tf.log((1.0 - frac) + math.exp(-10))
  return scaled 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:17,代码来源:common_layers.py

示例7: sepconv_relu_sepconv

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import relu [as 别名]
def sepconv_relu_sepconv(inputs,
                         filter_size,
                         output_size,
                         first_kernel_size=(1, 1),
                         second_kernel_size=(1, 1),
                         padding="LEFT",
                         nonpadding_mask=None,
                         dropout=0.0,
                         name=None):
  """Hidden layer with RELU activation followed by linear projection."""
  with tf.variable_scope(name, "sepconv_relu_sepconv", [inputs]):
    inputs = maybe_zero_out_padding(inputs, first_kernel_size, nonpadding_mask)
    if inputs.get_shape().ndims == 3:
      is_3d = True
      inputs = tf.expand_dims(inputs, 2)
    else:
      is_3d = False
    h = separable_conv(
        inputs,
        filter_size,
        first_kernel_size,
        activation=tf.nn.relu,
        padding=padding,
        name="conv1")
    if dropout != 0.0:
      h = tf.nn.dropout(h, 1.0 - dropout)
    h = maybe_zero_out_padding(h, second_kernel_size, nonpadding_mask)
    ret = separable_conv(
        h, output_size, second_kernel_size, padding=padding, name="conv2")
    if is_3d:
      ret = tf.squeeze(ret, 2)
    return ret


# DEPRECATED - use dense_relu_dense, conv_relu_conv, sepconv_relu_sepconv 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:37,代码来源:common_layers.py

示例8: conv_hidden_relu

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import relu [as 别名]
def conv_hidden_relu(inputs,
                     hidden_size,
                     output_size,
                     kernel_size=(1, 1),
                     second_kernel_size=(1, 1),
                     dropout=0.0,
                     **kwargs):
  """Hidden layer with RELU activation followed by linear projection."""
  name = kwargs.pop("name") if "name" in kwargs else None
  with tf.variable_scope(name, "conv_hidden_relu", [inputs]):
    if inputs.get_shape().ndims == 3:
      is_3d = True
      inputs = tf.expand_dims(inputs, 2)
    else:
      is_3d = False
    conv_f1 = conv if kernel_size == (1, 1) else separable_conv
    h = conv_f1(
        inputs,
        hidden_size,
        kernel_size,
        activation=tf.nn.relu,
        name="conv1",
        **kwargs)
    if dropout != 0.0:
      h = tf.nn.dropout(h, 1.0 - dropout)
    conv_f2 = conv if second_kernel_size == (1, 1) else separable_conv
    ret = conv_f2(h, output_size, second_kernel_size, name="conv2", **kwargs)
    if is_3d:
      ret = tf.squeeze(ret, 2)
    return ret 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:32,代码来源:common_layers.py

示例9: brelu

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import relu [as 别名]
def brelu(x):
  """Bipolar ReLU as in https://arxiv.org/abs/1709.04054."""
  x_shape = shape_list(x)
  x1, x2 = tf.split(tf.reshape(x, x_shape[:-1] + [-1, 2]), 2, axis=-1)
  y1 = tf.nn.relu(x1)
  y2 = -tf.nn.relu(-x2)
  return tf.reshape(tf.concat([y1, y2], axis=-1), x_shape) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:9,代码来源:common_layers.py

示例10: relu_density_logit

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import relu [as 别名]
def relu_density_logit(x, reduce_dims):
  """logit(density(x)).

  Useful for histograms.

  Args:
    x: a Tensor, typically the output of tf.relu
    reduce_dims: a list of dimensions

  Returns:
    a Tensor
  """
  frac = tf.reduce_mean(to_float(x > 0.0), reduce_dims)
  scaled = tf.log(frac + math.exp(-10)) - tf.log((1.0 - frac) + math.exp(-10))
  return scaled 
开发者ID:yyht,项目名称:BERT,代码行数:17,代码来源:common_layers.py

示例11: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import relu [as 别名]
def __init__(self, num_parameters, D=20, boundary=3.0, conv=False, init_fcn=None, kernel='gaussian', **kwargs):
        self.num_parameters = num_parameters
        self.D = D
        self.boundary = boundary
        self.init_fcn = init_fcn
        self.conv = conv
        if self.conv:
            self.unsqueeze_dim = 4
        else:
            self.unsqueeze_dim = 2
        self.kernel = kernel
        if not (kernel in ['gaussian', 'relu', 'softplus']):
            raise ValueError('Kernel not recognized (must be {gaussian, relu, softplus})')
        super().__init__(**kwargs) 
开发者ID:ispamm,项目名称:kernel-activation-functions,代码行数:16,代码来源:kafnets.py

示例12: relu_kernel

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import relu [as 别名]
def relu_kernel(self, x):
        return tf.relu(tf.expand_dims(x, axis=self.unsqueeze_dim) - self.dict) 
开发者ID:ispamm,项目名称:kernel-activation-functions,代码行数:4,代码来源:kafnets.py

示例13: conv_stride2_multistep

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import relu [as 别名]
def conv_stride2_multistep(x, nbr_steps, output_filters, name=None, reuse=None):
  """Use a strided convolution to downsample x by 2, `nbr_steps` times.

  We use stride and filter size 2 to avoid the checkerboard problem of deconvs.
  As detailed in http://distill.pub/2016/deconv-checkerboard/.

  Args:
    x: a `Tensor` with shape `[batch, spatial, depth]` or
     `[batch, spatial_1, spatial_2, depth]`
    nbr_steps: number of halving downsample rounds to apply
    output_filters: an int specifying the filter count for the convolutions
    name: a string
    reuse: a boolean

  Returns:
    a `Tensor` with shape `[batch, spatial / (2**nbr_steps), output_filters]` or
     `[batch, spatial_1 / (2**nbr_steps), spatial_2 / (2**nbr_steps),
       output_filters]`
  """
  with tf.variable_scope(
      name, default_name="conv_stride2_multistep", values=[x], reuse=reuse):
    if nbr_steps == 0:
      out = conv(x, output_filters, (1, 1))
      return out, [out]
    hidden_layers = [x]
    for i in xrange(nbr_steps):
      hidden_layers.append(
          conv(
              hidden_layers[-1],
              output_filters, (2, 2),
              strides=2,
              activation=tf.nn.relu,
              name="conv" + str(i)))
    return hidden_layers[-1], hidden_layers 
开发者ID:ZhenYangIACAS,项目名称:NMT_GAN,代码行数:36,代码来源:common_layers.py

示例14: conv_hidden_relu

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import relu [as 别名]
def conv_hidden_relu(inputs,
                     hidden_size,
                     output_size,
                     kernel_size=(1, 1),
                     summaries=True,
                     dropout=0.0,
                     **kwargs):
  """Hidden layer with RELU activation followed by linear projection."""
  name = kwargs.pop("name") if "name" in kwargs else None
  with tf.variable_scope(name, "conv_hidden_relu", [inputs]):
    if inputs.get_shape().ndims == 3:
      is_3d = True
      inputs = tf.expand_dims(inputs, 2)
    else:
      is_3d = False
    h = conv(
        inputs,
        hidden_size,
        kernel_size,
        activation=tf.nn.relu,
        name="conv1",
        **kwargs)
    if dropout != 0.0:
      h = tf.nn.dropout(h, 1.0 - dropout)
    if summaries and not tf.get_variable_scope().reuse:
      tf.summary.histogram("hidden_density_logit",
                           relu_density_logit(
                               h, list(range(inputs.shape.ndims - 1))))
    ret = conv(h, output_size, (1, 1), name="conv2", **kwargs)
    if is_3d:
      ret = tf.squeeze(ret, 2)
    return ret 
开发者ID:ZhenYangIACAS,项目名称:NMT_GAN,代码行数:34,代码来源:common_layers.py


注:本文中的tensorflow.relu方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。