当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.erf方法代码示例

本文整理汇总了Python中tensorflow.erf方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.erf方法的具体用法?Python tensorflow.erf怎么用?Python tensorflow.erf使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.erf方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _normal_distribution_cdf

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import erf [as 别名]
def _normal_distribution_cdf(x, stddev):
  """Evaluates the CDF of the normal distribution.

  Normal distribution with mean 0 and standard deviation stddev,
  evaluated at x=x.

  input and output `Tensor`s have matching shapes.

  Args:
    x: a `Tensor`
    stddev: a `Tensor` with the same shape as `x`.

  Returns:
    a `Tensor` with the same shape as `x`.

  """
  return 0.5 * (1.0 + tf.erf(x / (math.sqrt(2) * stddev + 1e-20))) 
开发者ID:akzaidi,项目名称:fine-lm,代码行数:19,代码来源:expert_utils.py

示例2: fully_variance_dense

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import erf [as 别名]
def fully_variance_dense(input_tensor, num_inputs, num_outputs, mean_initializer, name, stochastic=True, reuse=False):
    with tf.variable_scope(name) as scope:
        W = tf.get_variable('W', [num_inputs, num_outputs], initializer=mean_initializer, dtype=tf.float32,
                            trainable=False)
        log_sigma2 = tf.get_variable('log_sigma2', [num_inputs, num_outputs],
                                     initializer=tf.constant_initializer(-3.0),
                                     dtype=tf.float32, trainable=True)
        mu = tf.matmul(input_tensor, W)
        si = tf.sqrt(tf.matmul(input_tensor * input_tensor, tf.exp(log_sigma2)) + 1e-16)
        output = mu
        if stochastic:
            output += tf.random_normal(mu.shape, mean=0, stddev=1) * si

        # summaries
        if not reuse:
            error = 0.5*(1.0+tf.erf((-mu)/tf.sqrt(2.0)/si))
            tf.summary.scalar('error', tf.reduce_sum(error))
            #tf.summary.histogram('log_sigma2', log_sigma2)
    return output 
开发者ID:da-molchanov,项目名称:variance-networks,代码行数:21,代码来源:layers.py

示例3: get_activation

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import erf [as 别名]
def get_activation(activation_fun: Optional[str]):
    if activation_fun is None:
        return None
    activation_fun = activation_fun.lower()
    if activation_fun == 'linear':
        return None
    if activation_fun == 'tanh':
        return tf.tanh
    if activation_fun == 'relu':
        return tf.nn.relu
    if activation_fun == 'leaky_relu':
        return tf.nn.leaky_relu
    if activation_fun == 'elu':
        return tf.nn.elu
    if activation_fun == 'selu':
        return tf.nn.selu
    if activation_fun == 'gelu':
        def gelu(input_tensor):
            cdf = 0.5 * (1.0 + tf.erf(input_tensor / tf.sqrt(2.0)))
            return input_tensor * cdf
        return gelu
    else:
        raise ValueError("Unknown activation function '%s'!" % activation_fun) 
开发者ID:microsoft,项目名称:tf-gnn-samples,代码行数:25,代码来源:utils.py

示例4: _NormalDistributionCDF

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import erf [as 别名]
def _NormalDistributionCDF(x, stddev):
  """Evaluates the CDF of the normal distribution.

  Normal distribution with mean 0 and standard deviation stddev,
  evaluated at x=x.

  input and output `Tensor`s have matching shapes.

  Args:
    x: a `Tensor`
    stddev: a `Tensor` with the same shape as `x`.

  Returns:
    a `Tensor` with the same shape as `x`.

  """
  return 0.5 * (1.0 + tf.erf(x / (math.sqrt(2) * stddev + 1e-20))) 
开发者ID:ZhenYangIACAS,项目名称:NMT_GAN,代码行数:19,代码来源:expert_utils.py

示例5: gelu

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import erf [as 别名]
def gelu(inputs, scope='gelu', reuse=None):
    """Gaussian Error Linear Unit.
    
    This is a smoother version of the ReLU.
    Paper: https://arxiv.org/abs/1606.08415

    Args:
        - inputs: float Tensor
        - scope: scope name
        - reuse: whether to reuse

    Returns:
        `inputs` with the gelu activation applied.
    """
    with tf.variable_scope(scope, reuse=reuse):
        alpha = 0.5 * (1.0 + tf.erf(inputs / tf.sqrt(2.0)))
        return inputs * alpha 
开发者ID:SeanLee97,项目名称:clfzoo,代码行数:19,代码来源:__init__.py

示例6: get_activation

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import erf [as 别名]
def get_activation(activation_fun: Optional[str]) -> Optional[Callable]:
    if activation_fun is None:
        return None
    activation_fun = activation_fun.lower()
    if activation_fun == 'linear':
        return None
    if activation_fun == 'tanh':
        return tf.tanh
    if activation_fun == 'relu':
        return tf.nn.relu
    if activation_fun == 'leaky_relu':
        return tf.nn.leaky_relu
    if activation_fun == 'elu':
        return tf.nn.elu
    if activation_fun == 'selu':
        return tf.nn.selu
    if activation_fun == 'gelu':
        def gelu(input_tensor):
            cdf = 0.5 * (1.0 + tf.erf(input_tensor / tf.sqrt(2.0)))
            return input_tensor * cdf
        return gelu
    else:
        raise ValueError("Unknown activation function '%s'!" % activation_fun) 
开发者ID:microsoft,项目名称:dpu-utils,代码行数:25,代码来源:activation.py

示例7: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import erf [as 别名]
def __init__(self, config):
        self.config = config
        self.n_steps = 10
        self.n_input, self.n_hidden =  4, 2
        self.state = tf.Variable(tf.random_normal(shape=[1, 4]))
        self.lstm = tf.contrib.rnn.BasicLSTMCell(self.n_hidden, forget_bias=1.0, state_is_tuple=False)
        self.Wc, self.bc = self.init_controller_vars()
        self.Wv, self.bv = self.init_value_vars()

        # Other functions used in the paper
        # self.full_list_unary = {1:lambda x:x ,2:lambda x: -x, 3: tf.abs, 4:lambda x : tf.pow(x,2),5:lambda x : tf.pow(x,3),
        #   6:tf.sqrt,7:lambda x: tf.Variable(tf.truncated_normal([1], stddev=0.08))*x,
        #   8:lambda x : x + tf.Variable(tf.truncated_normal([1], stddev=0.08)),9:lambda x: tf.log(tf.abs(x)+10e-8),
        #   10:tf.exp,11:tf.sin,12:tf.sinh,13:tf.cosh,14:tf.tanh,15:tf.asinh,16:tf.atan,17:lambda x: tf.sin(x)/x,
        #   18:lambda x : tf.maximum(x,0),19:lambda x : tf.minimum(x,0),20:tf.sigmoid,21:lambda x:tf.log(1+tf.exp(x)),
        #   22:lambda x:tf.exp(-tf.pow(x,2)),23:tf.erf,24:lambda x: tf.Variable(tf.truncated_normal([1], stddev=0.08))}
        #
        # self.full_list_binary = {1:lambda x,y: x+y,2:lambda x,y:x*y,3:lambda x,y:x-y,4:lambda x,y:x/(y+10e-8),
        # 5:lambda x,y:tf.maximum(x,y),6:lambda x,y: tf.sigmoid(x)*y,7:lambda x,y:tf.exp(-tf.Variable(tf.truncated_normal([1], stddev=0.08))*tf.pow(x-y,2)),
        # 8:lambda x,y:tf.exp(-tf.Variable(tf.truncated_normal([1], stddev=0.08))*tf.abs(x-y)),
        # 9:lambda x,y: tf.Variable(tf.truncated_normal([1], stddev=0.08))*x + (1-tf.Variable(tf.truncated_normal([1], stddev=0.08)))*y}
        #
        # self.unary = {1:lambda x:x ,2:lambda x: -x, 3: lambda x: tf.maximum(x,0), 4:lambda x : tf.pow(x,2),5:tf.tanh}
        # binary = {1:lambda x,y: x+y,2:lambda x,y:x*y,3:lambda x,y:x-y,4:lambda x,y:tf.maximum(x,y),5:lambda x,y: tf.sigmoid(x)*y}
        # inputs = {1:lambda x:x , 2:lambda x:0, 3: lambda x:3.14159265,4: lambda x : 1, 5: lambda x: 1.61803399} 
开发者ID:Neoanarika,项目名称:Searching-for-activation-functions,代码行数:27,代码来源:rnn_controller.py

示例8: test_forward_unary

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import erf [as 别名]
def test_forward_unary():
    def _test_forward_unary(op, a_min=1, a_max=5, dtype=np.float32):
        """test unary operators"""
        np_data = np.random.uniform(a_min, a_max, size=(2, 3, 5)).astype(dtype)
        tf.reset_default_graph()
        with tf.Graph().as_default():
            in_data = tf.placeholder(dtype, (2, 3, 5), name="in_data")
            out = op(in_data)
            compare_tf_with_tvm([np_data], ['in_data:0'], out.name)

    _test_forward_unary(tf.acos, -1, 1)
    _test_forward_unary(tf.asin, -1, 1)
    _test_forward_unary(tf.atanh, -1, 1)
    _test_forward_unary(tf.sinh)
    _test_forward_unary(tf.cosh)
    _test_forward_unary(tf.acosh)
    _test_forward_unary(tf.asinh)
    _test_forward_unary(tf.atan)
    _test_forward_unary(tf.sin)
    _test_forward_unary(tf.cos)
    _test_forward_unary(tf.tan)
    _test_forward_unary(tf.tanh)
    _test_forward_unary(tf.erf)
    _test_forward_unary(tf.log)
    _test_forward_unary(tf.log1p) 
开发者ID:apache,项目名称:incubator-tvm,代码行数:27,代码来源:test_forward.py

示例9: gelu

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import erf [as 别名]
def gelu(input_tensor):
    """Gaussian Error Linear Unit.

    This is a smoother version of the RELU.
    Original paper: https://arxiv.org/abs/1606.08415

    Args:
      input_tensor: float Tensor to perform activation.

    Returns:
      `input_tensor` with the GELU activation applied.
    """
    cdf = 0.5 * (1.0 + tf.erf(input_tensor / tf.sqrt(2.0)))
    return input_tensor * cdf 
开发者ID:Socialbird-AILab,项目名称:BERT-Classification-Tutorial,代码行数:16,代码来源:modeling.py

示例10: gelu

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import erf [as 别名]
def gelu(input_tensor):
  """Gaussian Error Linear Unit.

  This is a smoother version of the RELU.
  Original paper: https://arxiv.org/abs/1606.08415

  Args:
    input_tensor: float Tensor to perform activation.

  Returns:
    `input_tensor` with the GELU activation applied.
  """
  cdf = 0.5 * (1.0 + tf.erf(input_tensor / tf.sqrt(2.0)))
  return input_tensor * cdf 
开发者ID:fennuDetudou,项目名称:tudouNLP,代码行数:16,代码来源:modeling.py

示例11: gelu

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import erf [as 别名]
def gelu(input_tensor):
    """Gaussian Error Linear Unit.

    This is a smoother version of the RELU.
    Original paper: https://arxiv.org/abs/1606.08415

    Args:
        input_tensor: float Tensor to perform activation.

    Returns:
        `input_tensor` with the GELU activation applied.
    """
    cdf = 0.5 * (1.0 + tf.erf(input_tensor / tf.sqrt(2.0)))
    return input_tensor * cdf 
开发者ID:CoNLL-UD-2018,项目名称:UDPipe-Future,代码行数:16,代码来源:bert_wrapper.py

示例12: fully_variance_conv_2d

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import erf [as 别名]
def fully_variance_conv_2d(input_tensor, filter_shape, input_channels, output_channels, mean_initializer, padding,
                           name, stochastic=True, reuse=False):
    with tf.variable_scope(name) as scope:
        kernel = tf.get_variable('kernel',
                                 [filter_shape[0], filter_shape[1], input_channels, output_channels],
                                 initializer=mean_initializer, dtype=tf.float32, trainable=False)
        log_sigma2 = tf.get_variable('log_sigma2', [filter_shape[0], filter_shape[1], input_channels, output_channels],
                                     initializer=tf.constant_initializer(-3.0),
                                     dtype=tf.float32, trainable=True)
        conved_mu = tf.nn.conv2d(input_tensor, kernel, [1, 1, 1, 1], padding=padding)
        conved_si = tf.sqrt(tf.nn.conv2d(input_tensor * input_tensor,
                                         tf.exp(log_sigma2), [1, 1, 1, 1],
                                         padding=padding)+1e-16)
        output = conved_mu
        if stochastic:
            output += tf.random_normal(conved_mu.shape, mean=0, stddev=1) * conved_si

        # summaries
        if not reuse:
            error = 0.5*(1.0+tf.erf((-conved_mu)/tf.sqrt(2.0)/conved_si))
            tf.summary.scalar('error', tf.reduce_sum(error))
            #tf.summary.histogram('log_sigma2', log_sigma2)
    return output


# PHASE TRANSITION LAYERS 
开发者ID:da-molchanov,项目名称:variance-networks,代码行数:28,代码来源:layers.py

示例13: pt_dense

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import erf [as 别名]
def pt_dense(input_tensor, num_inputs, num_outputs, name, stochastic=True, with_bias=True, reuse=False):
    with tf.variable_scope(name) as scope:
        W = tf.get_variable('W', [num_inputs, num_outputs], initializer=tf.truncated_normal_initializer(1e-2),
                            dtype=tf.float32, trainable=True)
        log_alpha = tf.get_variable('log_alpha', [], initializer=tf.constant_initializer(-10.0), dtype=tf.float32,
                                    trainable=True)
        log_alpha = tf.clip_by_value(log_alpha, -20.0, 20.0)

        if not reuse:
            # computing reg
            k1, k2, k3 = 0.63576, 1.8732, 1.48695
            C = -k1
            mdkl = k1 * tf.nn.sigmoid(k2 + k3 * log_alpha) - 0.5 * tf.log1p(tf.exp(-log_alpha)) + C
            kl = -tf.reduce_sum(mdkl) * tf.reduce_prod(tf.cast(W.get_shape(), tf.float32))
            tf.add_to_collection('kl_loss', kl)

        # computing output
        mu = tf.matmul(input_tensor, W)
        si = tf.sqrt(tf.matmul(input_tensor * input_tensor, tf.exp(log_alpha) * W * W)   + 1e-16)
        output = mu
        if stochastic:
            output += tf.random_normal(mu.shape, mean=0, stddev=1) * si
        if with_bias:
            biases = tf.get_variable('biases', num_outputs, tf.float32, tf.constant_initializer(0.0))
            output = tf.nn.bias_add(output, biases)

        # summaries
        if not reuse:
            if with_bias:
                error = 0.5*(1.0+tf.erf((-mu-biases)/tf.sqrt(2.0)/si))
            else:
                error = 0.5*(1.0+tf.erf((-mu)/tf.sqrt(2.0)/si))
            tf.summary.scalar('error', tf.reduce_sum(error))
            tf.summary.scalar('log_alpha', log_alpha)
            tf.add_to_collection('log_alpha', log_alpha)
    return output 
开发者ID:da-molchanov,项目名称:variance-networks,代码行数:38,代码来源:layers.py

示例14: pt_conv_2d

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import erf [as 别名]
def pt_conv_2d(input_tensor, filter_shape, input_channels, output_channels, padding, name, stochastic=True,
               with_bias=True, reuse=False):
    with tf.variable_scope(name) as scope:
        kernel = tf.get_variable('kernel', [filter_shape[0], filter_shape[1], input_channels, output_channels],
                                 initializer=tf.contrib.layers.xavier_initializer(seed=322), dtype=tf.float32,
                                 trainable=True)
        log_alpha = tf.get_variable('log_alpha', [], initializer=tf.constant_initializer(-10.0), dtype=tf.float32,
                                    trainable=True)
        log_alpha = tf.clip_by_value(log_alpha, -20.0, 20.0)

        if not reuse:
            # computing reg
            k1, k2, k3 = 0.63576, 1.8732, 1.48695
            C = -k1
            mdkl = k1 * tf.nn.sigmoid(k2 + k3 * log_alpha) - 0.5 * tf.log1p(tf.exp(-log_alpha)) + C
            kl = -tf.reduce_sum(mdkl) * tf.reduce_prod(tf.cast(kernel.get_shape(), tf.float32))
            tf.add_to_collection('kl_loss', kl)

        # computing output
        conved_mu = tf.nn.conv2d(input_tensor, kernel, [1, 1, 1, 1], padding=padding)
        conved_si = tf.sqrt(tf.nn.conv2d(input_tensor * input_tensor,
                                         tf.exp(log_alpha) * kernel * kernel,
                                         [1, 1, 1, 1], padding=padding)+1e-16)
        output = conved_mu
        if stochastic:
            output += tf.random_normal(conved_mu.shape, mean=0, stddev=1) * conved_si
        if with_bias:
            biases = tf.get_variable('biases', output_channels, tf.float32, tf.constant_initializer(0.0))
            output = tf.nn.bias_add(output, biases)

        # summaries
        if not reuse:
            if with_bias:
                error = 0.5*(1.0+tf.erf((-conved_mu-biases)/tf.sqrt(2.0)/conved_si))
            else:
                error = 0.5*(1.0+tf.erf((-conved_mu)/tf.sqrt(2.0)/conved_si))
            tf.summary.scalar('error', tf.reduce_sum(error))
            tf.summary.scalar('log_alpha', log_alpha)
            tf.add_to_collection('log_alpha', log_alpha)

    return output 
开发者ID:da-molchanov,项目名称:variance-networks,代码行数:43,代码来源:layers.py

示例15: gelu

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import erf [as 别名]
def gelu(input_tensor):
	"""Gaussian Error Linear Unit.

	This is a smoother version of the RELU.
	Original paper: https://arxiv.org/abs/1606.08415

	Args:
		input_tensor: float Tensor to perform activation.

	Returns:
		`input_tensor` with the GELU activation applied.
	"""
	cdf = 0.5 * (1.0 + tf.erf(input_tensor / tf.sqrt(2.0)))
	return input_tensor * cdf 
开发者ID:yyht,项目名称:BERT,代码行数:16,代码来源:bert_adapter_modules.py


注:本文中的tensorflow.erf方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。