当前位置: 首页>>代码示例>>Python>>正文


Python numpy.tanh方法代码示例

本文整理汇总了Python中autograd.numpy.tanh方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.tanh方法的具体用法?Python numpy.tanh怎么用?Python numpy.tanh使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在autograd.numpy的用法示例。


在下文中一共展示了numpy.tanh方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: rnn_predict

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import tanh [as 别名]
def rnn_predict(params, inputs):
    def update_rnn(input, hiddens):
        return np.tanh(concat_and_multiply(params['change'], input, hiddens))

    def hiddens_to_output_probs(hiddens):
        output = concat_and_multiply(params['predict'], hiddens)
        return output - logsumexp(output, axis=1, keepdims=True)     # Normalize log-probs.

    num_sequences = inputs.shape[1]
    hiddens = np.repeat(params['init hiddens'], num_sequences, axis=0)
    output = [hiddens_to_output_probs(hiddens)]

    for input in inputs:  # Iterate over time steps.
        hiddens = update_rnn(input, hiddens)
        output.append(hiddens_to_output_probs(hiddens))
    return output 
开发者ID:HIPS,项目名称:autograd,代码行数:18,代码来源:rnn.py

示例2: lstm_predict

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import tanh [as 别名]
def lstm_predict(params, inputs):
    def update_lstm(input, hiddens, cells):
        change  = np.tanh(concat_and_multiply(params['change'], input, hiddens))
        forget  = sigmoid(concat_and_multiply(params['forget'], input, hiddens))
        ingate  = sigmoid(concat_and_multiply(params['ingate'], input, hiddens))
        outgate = sigmoid(concat_and_multiply(params['outgate'], input, hiddens))
        cells   = cells * forget + ingate * change
        hiddens = outgate * np.tanh(cells)
        return hiddens, cells

    def hiddens_to_output_probs(hiddens):
        output = concat_and_multiply(params['predict'], hiddens)
        return output - logsumexp(output, axis=1, keepdims=True) # Normalize log-probs.

    num_sequences = inputs.shape[1]
    hiddens = np.repeat(params['init hiddens'], num_sequences, axis=0)
    cells   = np.repeat(params['init cells'],   num_sequences, axis=0)

    output = [hiddens_to_output_probs(hiddens)]
    for input in inputs:  # Iterate over time steps.
        hiddens, cells = update_lstm(input, hiddens, cells)
        output.append(hiddens_to_output_probs(hiddens))
    return output 
开发者ID:HIPS,项目名称:autograd,代码行数:25,代码来源:lstm.py

示例3: setup

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import tanh [as 别名]
def setup(self):
        self.batch_size = 16
        self.dtype = "float32"
        self.D = 2**10
        self.x = 0.01 * np.random.randn(self.batch_size,self.D).astype(self.dtype)
        self.W1 = 0.01 * np.random.randn(self.D,self.D).astype(self.dtype)
        self.b1 = 0.01 * np.random.randn(self.D).astype(self.dtype)
        self.Wout = 0.01 * np.random.randn(self.D,1).astype(self.dtype)
        self.bout = 0.01 * np.random.randn(1).astype(self.dtype)
        self.l = (np.random.rand(self.batch_size,1) > 0.5).astype(self.dtype)
        self.n = 50

        def autograd_rnn(params, x, label, n):
            W, b, Wout, bout = params
            h1 = x
            for i in range(n):
                h1 = np.tanh(np.dot(h1, W) + b)
            logit = np.dot(h1, Wout) + bout
            loss = -np.sum(label * logit - (
                    logit + np.log(1 + np.exp(-logit))))
            return loss

        self.fn = autograd_rnn
        self.grad_fn = grad(self.fn) 
开发者ID:HIPS,项目名称:autograd,代码行数:26,代码来源:bench_rnn.py

示例4: test_grad_and_aux

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import tanh [as 别名]
def test_grad_and_aux():
    A = npr.randn(5, 4)
    x = npr.randn(4)

    f = lambda x: (np.sum(np.dot(A, x)), x**2)
    g = lambda x: np.sum(np.dot(A, x))

    assert len(grad_and_aux(f)(x)) == 2

    check_equivalent(grad_and_aux(f)(x)[0], grad(g)(x))
    check_equivalent(grad_and_aux(f)(x)[1], x**2)

## No longer support this behavior
# def test_make_ggnvp_broadcasting():
#   A = npr.randn(4, 5)
#   x = npr.randn(10, 4)
#   v = npr.randn(10, 4)

#   fun = lambda x: np.tanh(np.dot(x, A))
#   res1 = np.stack([_make_explicit_ggnvp(fun)(xi)(vi) for xi, vi in zip(x, v)])
#   res2 = make_ggnvp(fun)(x)(v)
#   check_equivalent(res1, res2) 
开发者ID:HIPS,项目名称:autograd,代码行数:24,代码来源:test_wrappers.py

示例5: forward_pass

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import tanh [as 别名]
def forward_pass(self, X):
        self.last_input = X
        n_samples, n_timesteps, input_shape = X.shape
        states = np.zeros((n_samples, n_timesteps + 1, self.hidden_dim))
        states[:, -1, :] = self.hprev.copy()
        p = self._params

        for i in range(n_timesteps):
            states[:, i, :] = np.tanh(np.dot(X[:, i, :], p["W"]) + np.dot(states[:, i - 1, :], p["U"]) + p["b"])

        self.states = states
        self.hprev = states[:, n_timesteps - 1, :].copy()
        if self.return_sequences:
            return states[:, 0:-1, :]
        else:
            return states[:, -2, :] 
开发者ID:rushter,项目名称:MLAlgorithms,代码行数:18,代码来源:rnn.py

示例6: build_mlp

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import tanh [as 别名]
def build_mlp(layer_sizes, activation=np.tanh, output_activation=lambda x: x):
    """Constructor for multilayer perceptron.

    @param layer_sizes: list of integers
                        list of layer sizes in the perceptron.
    @param activation: function (default: np.tanh)
                       what activation to use after first N - 1 layers.
    @param output_activation: function (default: linear)
                              what activation to use after last layer.
    @return predict: function
                     used to predict y_hat
    @return log_likelihood: function
                            used to compute log likelihood
    @return parser: WeightsParser object
                    object to organize weights
    """
    parser = WeightsParser()
    for i, shape in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])):
        parser.add_shape(('weights', i), shape)
        parser.add_shape(('biases', i), (1, shape[1]))

    def predict(weights, X):
        cur_X = copy(X.T)
        for layer in range(len(layer_sizes) - 1):
            cur_W = parser.get(weights, ('weights', layer))
            cur_B = parser.get(weights, ('biases', layer))
            cur_Z = np.dot(cur_X, cur_W) + cur_B
            cur_X = activation(cur_Z)
        return output_activation(cur_Z.T)

    def log_likelihood(weights, X, y):
        y_hat = predict(weights, X)
        return mse(y.T, y_hat.T)

    return predict, log_likelihood, parser 
开发者ID:dtak,项目名称:tree-regularization-public,代码行数:37,代码来源:model.py

示例7: sigmoid

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import tanh [as 别名]
def sigmoid(x):
    return 0.5 * (np.tanh(x) + 1) 
开发者ID:dtak,项目名称:tree-regularization-public,代码行数:4,代码来源:model.py

示例8: sigmoid

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import tanh [as 别名]
def sigmoid(x):
    return 0.5*(np.tanh(x) + 1.0)   # Output ranges from 0 to 1. 
开发者ID:HIPS,项目名称:autograd,代码行数:4,代码来源:rnn.py

示例9: nonlinearity

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import tanh [as 别名]
def nonlinearity(self, x):
        return np.tanh(x) 
开发者ID:HIPS,项目名称:autograd,代码行数:4,代码来源:convnet.py

示例10: neural_net_predict

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import tanh [as 别名]
def neural_net_predict(params, inputs):
    """Implements a deep neural network for classification.
       params is a list of (weights, bias) tuples.
       inputs is an (N x D) matrix.
       returns normalized class log-probabilities."""
    for W, b in params:
        outputs = np.dot(inputs, W) + b
        inputs = np.tanh(outputs)
    return outputs - logsumexp(outputs, axis=1, keepdims=True) 
开发者ID:HIPS,项目名称:autograd,代码行数:11,代码来源:neural_net.py

示例11: sigmoid

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import tanh [as 别名]
def sigmoid(x):    return 0.5 * (np.tanh(x) + 1.0) 
开发者ID:HIPS,项目名称:autograd,代码行数:3,代码来源:generative_adversarial_net.py

示例12: nn_predict

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import tanh [as 别名]
def nn_predict(params, inputs, nonlinearity=np.tanh):
    for W, b in params:
        outputs = np.dot(inputs, W) + b
        inputs = nonlinearity(outputs)
    return outputs 
开发者ID:HIPS,项目名称:autograd,代码行数:7,代码来源:neural_net_regression.py

示例13: make_nn_funs

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import tanh [as 别名]
def make_nn_funs(layer_sizes, L2_reg, noise_variance, nonlinearity=np.tanh):
    """These functions implement a standard multi-layer perceptron,
    vectorized over both training examples and weight samples."""
    shapes = list(zip(layer_sizes[:-1], layer_sizes[1:]))
    num_weights = sum((m+1)*n for m, n in shapes)

    def unpack_layers(weights):
        num_weight_sets = len(weights)
        for m, n in shapes:
            yield weights[:, :m*n]     .reshape((num_weight_sets, m, n)),\
                  weights[:, m*n:m*n+n].reshape((num_weight_sets, 1, n))
            weights = weights[:, (m+1)*n:]

    def predictions(weights, inputs):
        """weights is shape (num_weight_samples x num_weights)
           inputs  is shape (num_datapoints x D)"""
        inputs = np.expand_dims(inputs, 0)
        for W, b in unpack_layers(weights):
            outputs = np.einsum('mnd,mdo->mno', inputs, W) + b
            inputs = nonlinearity(outputs)
        return outputs

    def logprob(weights, inputs, targets):
        log_prior = -L2_reg * np.sum(weights**2, axis=1)
        preds = predictions(weights, inputs)
        log_lik = -np.sum((preds - targets)**2, axis=1)[:, 0] / noise_variance
        return log_prior + log_lik

    return num_weights, predictions, logprob 
开发者ID:HIPS,项目名称:autograd,代码行数:31,代码来源:bayesian_neural_net.py

示例14: test_make_jvp

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import tanh [as 别名]
def test_make_jvp():
    A = npr.randn(3, 5)
    x = npr.randn(5)
    v = npr.randn(5)
    fun = lambda x: np.tanh(np.dot(A, x))

    jvp_explicit = lambda x: lambda v: np.dot(jacobian(fun)(x), v)
    jvp = make_jvp(fun)

    check_equivalent(jvp_explicit(x)(v), jvp(x)(v)[1]) 
开发者ID:HIPS,项目名称:autograd,代码行数:12,代码来源:test_wrappers.py

示例15: test_make_ggnvp

# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import tanh [as 别名]
def test_make_ggnvp():
    A = npr.randn(5, 4)
    x = npr.randn(4)
    v = npr.randn(4)

    fun = lambda x: np.dot(A, x)
    check_equivalent(make_ggnvp(fun)(x)(v), _make_explicit_ggnvp(fun)(x)(v))

    fun2 = lambda x: np.tanh(np.dot(A, x))
    check_equivalent(make_ggnvp(fun2)(x)(v), _make_explicit_ggnvp(fun2)(x)(v)) 
开发者ID:HIPS,项目名称:autograd,代码行数:12,代码来源:test_wrappers.py


注:本文中的autograd.numpy.tanh方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。