当前位置: 首页>>代码示例>>Python>>正文


Python tensor.tanh方法代码示例

本文整理汇总了Python中theano.tensor.tanh方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.tanh方法的具体用法?Python tensor.tanh怎么用?Python tensor.tanh使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在theano.tensor的用法示例。


在下文中一共展示了tensor.tanh方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import tanh [as 别名]
def __init__(self, input_dim, output_dim, with_batch=True, 
                 activation='tanh',inner_activation='hard_sigmoid',
                 name='LSTM_normal'):
#{{{
        """
        Initialize neural network.
        """
        self.input_dim = input_dim
        self.output_dim = output_dim;
        self.with_batch = with_batch
        self.name = name 
        self.inner_activation=activations.get(inner_activation);
        self.forget_bias_init = initializations.get('one')
        self.activation=activations.get(activation);
        self.build();
#}}} 
开发者ID:lingluodlut,项目名称:Att-ChemdNER,代码行数:18,代码来源:nn.py

示例2: __init__

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import tanh [as 别名]
def __init__(self, input_size, output_size, hidden_sizes=[], bias_shift=0.0, name=None, hidden_activation=T.tanh, activation=identity, dropout_keep=1, dropout_input=True, dropout_output=False):
        self.input_size = input_size
        self.output_size =output_size
        self.name = name if name is not None else get_unique_name(type(self))

        self.dropout_keep = dropout_keep
        self.dropout_output = dropout_output

        self.layers = []
        for i, isize, osize in zip(itertools.count(),
                                    [input_size]+hidden_sizes,
                                    hidden_sizes+[output_size]):
            cur_dropout_keep = 1 if (i==0 and not dropout_input) else dropout_keep
            if i == len(hidden_sizes):
                # Last layer
                self.layers.append(Layer(isize, osize, bias_shift=bias_shift, name="{}[output]".format(self.name), activation=activation, dropout_keep=cur_dropout_keep))
            else:
                self.layers.append(Layer(isize, osize, name="{}[hidden{}]".format(self.name,i), activation=hidden_activation, dropout_keep=cur_dropout_keep)) 
开发者ID:hexahedria,项目名称:gated-graph-transformer-network,代码行数:20,代码来源:layer.py

示例3: get_output

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import tanh [as 别名]
def get_output(self, train=False):
        X = self.get_input(train)
        padded_mask = self.get_padded_shuffled_mask(train, X, pad=1)
        X = X.dimshuffle((1, 0, 2))

        x_z = T.dot(X, self.W_z) + self.b_z
        x_r = T.dot(X, self.W_r) + self.b_r
        x_h = T.tanh(T.dot(X, self.Pmat)) + self.b_h
        outputs, updates = theano.scan(
            self._step,
            sequences=[x_z, x_r, x_h, padded_mask],
            outputs_info=T.unbroadcast(alloc_zeros_matrix(X.shape[1], self.output_dim), 1),
            non_sequences=[self.U_r, self.U_h],
            truncate_gradient=self.truncate_gradient)
        if self.return_sequences:
            return outputs.dimshuffle((1, 0, 2))
        return outputs[-1] 
开发者ID:lllcho,项目名称:CAPTCHA-breaking,代码行数:19,代码来源:recurrent.py

示例4: __init__

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import tanh [as 别名]
def __init__(self):
        super(M, self).__init__()

        x = T.matrix('x') # input, target
        self.w = module.Member(T.matrix('w')) # weights
        self.a = module.Member(T.vector('a')) # hid bias
        self.b = module.Member(T.vector('b')) # output bias

        self.hid = T.tanh(T.dot(x, self.w) + self.a)
        hid = self.hid

        self.out = T.tanh(T.dot(hid, self.w.T) + self.b)
        out = self.out

        self.err = 0.5 * T.sum((out - x)**2)
        err = self.err

        params = [self.w, self.a, self.b]

        gparams = T.grad(err, params)

        updates = [(p, p - 0.01 * gp) for p, gp in zip(params, gparams)]

        self.step = module.Method([x], err, updates=dict(updates)) 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:26,代码来源:aa.py

示例5: encode

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import tanh [as 别名]
def encode(self, x):
        """Helper function to compute the encoding of a datapoint to z"""
        h = np.zeros((self.hidden_units_encoder,1))

        W_xhe = self.params["W_xhe"].get_value()
        b_xhe = self.params["b_xhe"].get_value()
        W_hhe = self.params["W_hhe"].get_value()
        b_hhe = self.params["b_hhe"].get_value()
        W_hmu = self.params["W_hmu"].get_value()
        b_hmu = self.params["b_hmu"].get_value()
        W_hsigma = self.params["W_hsigma"].get_value()
        b_hsigma = self.params["b_hsigma"].get_value()

        for t in xrange(x.shape[0]):
            h = np.tanh(W_xhe.dot(x[t,:,np.newaxis]) + b_xhe + W_hhe.dot(h) + b_hhe)

        mu_encoder = W_hmu.dot(h) + b_hmu
        log_sigma_encoder = W_hsigma.dot(h) + b_hsigma

        z = np.random.normal(mu_encoder,np.exp(log_sigma_encoder))

        return z, mu_encoder, log_sigma_encoder 
开发者ID:y0ast,项目名称:Variational-Recurrent-Autoencoder,代码行数:24,代码来源:VRAE.py

示例6: rnn_layer

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import tanh [as 别名]
def rnn_layer(x, h_, c_, m_):
    
    if prm.encoder.lower() == 'lstm':
        i = tensor.nnet.sigmoid(_slice(x, 0, prm.dim_proj))
        f = tensor.nnet.sigmoid(_slice(x, 1, prm.dim_proj))
        o = tensor.nnet.sigmoid(_slice(x, 2, prm.dim_proj))
        c = tensor.tanh(_slice(x, 3, prm.dim_proj))
    
        c = f * c_ + i * c
        c = m_[:, None] * c + (1. - m_)[:, None] * c_
    
        h = o * tensor.tanh(c)
        h = m_[:, None] * h + (1. - m_)[:, None] * h_
    else:
        c = c_
        h = tensor.tanh(x) * m_[:, None]
    return h, c 
开发者ID:nyu-dl,项目名称:dl4ir-webnav,代码行数:19,代码来源:neuagent.py

示例7: tensor_constrain

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import tanh [as 别名]
def tensor_constrain(u, min_bounds, max_bounds):
    """Constrains a control vector tensor variable between given bounds through
    a squashing function.

    This is implemented with Theano, so as to be auto-differentiable.

    Args:
        u: Control vector tensor variable [action_size].
        min_bounds: Minimum control bounds [action_size].
        max_bounds: Maximum control bounds [action_size].

    Returns:
        Constrained control vector tensor variable [action_size].
    """
    diff = (max_bounds - min_bounds) / 2.0
    mean = (max_bounds + min_bounds) / 2.0
    return diff * T.tanh(u) + mean 
开发者ID:anassinator,项目名称:ilqr,代码行数:19,代码来源:dynamics.py

示例8: step

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import tanh [as 别名]
def step(self, word,index,energy_tm1,h_tm1,c_tm1,x):
#{{{
        #attention 
        H=x;
        if self.attendedMode is "concat":
            M_X=T.dot(x,self.W_A_X)#+self.b_A_X;
            M_state=T.dot(self.W_A_h,c_tm1)#+self.b_A_h; 
            M=T.tanh(M_X+M_state)
            _energy=T.dot(M,self.W_A.T)#+self.b_A;
        elif self.attendedMode is "dot":
            energy=None;
            assert 0,"not implement";
        elif self.attendedMode is "general":
            M_X=T.dot(x,self.W_A_X)#+self.b_A_X;
            M_state=T.dot(self.W_A_h,c_tm1)#+self.b_A_h; 
            M=T.tanh(M_X*M_state);
            _energy=T.dot(M,self.W_A.T)#+self.b_A;
        #mask
        mask=T.zeros((1,x.shape[0]),dtype=theano.config.floatX);
        energy=T.nnet.softmax(_energy[:index+1]);
        masked_energy=T.set_subtensor(mask[0,:index+1],energy.flatten());
        glimpsed=(masked_energy.T*H).sum(axis=0)
        #combine glimpsed with word;
        if self.wordInput_dim==0:
            combined=glimpsed;
        else:
            combine=K.concatenate([glimpsed,word]);
            combined=combine; 
        #original LSTM step 
        h_t,c_t=super(AttentionLSTM3,self).step(combined,h_tm1,c_tm1);
        return  masked_energy.flatten(),h_t,c_t
#}}} 
开发者ID:lingluodlut,项目名称:Att-ChemdNER,代码行数:34,代码来源:nn.py

示例9: tanh

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import tanh [as 别名]
def tanh(x):
    return T.tanh(x) 
开发者ID:lingluodlut,项目名称:Att-ChemdNER,代码行数:4,代码来源:theano_backend.py

示例10: process

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import tanh [as 别名]
def process(self, gstate, dropout_masks=Ellipsis):
        """
        Convert the graph state to a representation vector, using softmax attention to scale representations

        Params:
            gstate: A GraphState giving the current state

        Returns: A representation vector of shape (n_batch, representation_width)
        """
        if dropout_masks is Ellipsis:
            dropout_masks = None
            append_masks = False
        else:
            append_masks = True

        flat_obs = T.concatenate([
                        gstate.node_ids.reshape([-1, self._graph_spec.num_node_ids]),
                        gstate.node_states.reshape([-1, self._graph_spec.node_state_size])], 1)
        flat_activations, dropout_masks = self._representation_stack.process(flat_obs, dropout_masks)
        activations = flat_activations.reshape([gstate.n_batch, gstate.n_nodes, self._representation_width+1])

        activation_strengths = activations[:,:,0]
        existence_penalty = T.log(gstate.node_strengths + EPSILON) # TODO: consider removing epsilon here
        selector = T.shape_padright(T.nnet.softmax(activation_strengths + existence_penalty))
        representations = T.tanh(activations[:,:,1:])

        result = T.sum(selector * representations, 1)
        if append_masks:
            return result, dropout_masks
        else:
            return result 
开发者ID:hexahedria,项目名称:gated-graph-transformer-network,代码行数:33,代码来源:aggregate_representation_softmax.py

示例11: process

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import tanh [as 别名]
def process(self, gstate, dropout_masks=Ellipsis):
        """
        Convert the graph state to a representation vector, using sigmoid attention to scale representations

        Params:
            gstate: A GraphState giving the current state

        Returns: A representation vector of shape (n_batch, representation_width)
        """
        if dropout_masks is Ellipsis:
            dropout_masks = None
            append_masks = False
        else:
            append_masks = True

        flat_obs = T.concatenate([
                        gstate.node_ids.reshape([-1, self._graph_spec.num_node_ids]),
                        gstate.node_states.reshape([-1, self._graph_spec.node_state_size])], 1)
        flat_activations, dropout_masks = self._representation_stack.process(flat_obs, dropout_masks)
        activations = flat_activations.reshape([gstate.n_batch, gstate.n_nodes, self._representation_width+1])

        activation_strengths = activations[:,:,0]
        selector = T.shape_padright(T.nnet.sigmoid(activation_strengths) * gstate.node_strengths)
        representations = T.tanh(activations[:,:,1:])

        result = T.tanh(T.sum(selector * representations, 1))
        if append_masks:
            return result, dropout_masks
        else:
            return result 
开发者ID:hexahedria,项目名称:gated-graph-transformer-network,代码行数:32,代码来源:aggregate_representation.py

示例12: step

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import tanh [as 别名]
def step(self, ipt, state, state_strength, dropout_masks=None):
        """
        Perform a single step of the network

        Params:
            ipt: The current input. Should be an int tensor of shape (n_batch, self.input_width)
            state: The previous state. Should be a float tensor of shape (n_batch, self.output_width)
            state_strength: Strength of the previous state. Should be a float tensor of shape
                (n_batch)
            dropout_masks: Masks from get_dropout_masks

        Returns: The next output state, and the next output strength
        """
        if dropout_masks is not None:
            ipt_masks, state_masks = dropout_masks
            ipt = ipt*ipt_masks
            state = state*state_masks

        obs_state = state * T.shape_padright(state_strength)
        cat_ipt_state = T.concatenate([ipt, obs_state], 1)
        reset = do_layer( T.nnet.sigmoid, cat_ipt_state,
                            self._reset_W, self._reset_b )
        update = do_layer( T.nnet.sigmoid, cat_ipt_state,
                            self._update_W, self._update_b )
        update_state = update[:,:-1]
        update_strength = update[:,-1]

        cat_reset_ipt_state = T.concatenate([ipt, (reset * obs_state)], 1)
        candidate_act = do_layer( T.tanh, cat_reset_ipt_state,
                            self._activation_W, self._activation_b )
        candidate_strength = do_layer( T.nnet.sigmoid, cat_reset_ipt_state,
                            self._strength_W, self._strength_b ).reshape(state_strength.shape)

        newstate = update_state * state + (1-update_state) * candidate_act
        newstrength = update_strength * state_strength + (1-update_strength) * candidate_strength

        return newstate, newstrength 
开发者ID:hexahedria,项目名称:gated-graph-transformer-network,代码行数:39,代码来源:strength_weighted_gru.py

示例13: step

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import tanh [as 别名]
def step(self, ipt, state, dropout_masks=Ellipsis):
        """
        Perform a single step of the network

        Params:
            ipt: The current input. Should be an int tensor of shape (n_batch, self.input_width)
            state: The previous state. Should be a float tensor of shape (n_batch, self.output_width)
            dropout_masks: Masks from get_dropout_masks

        Returns: The next output state
        """
        if dropout_masks is Ellipsis:
            dropout_masks = None
            append_masks = False
        else:
            append_masks = True

        if self._dropout_keep != 1 and self._dropout_input and dropout_masks is not None:
                ipt_masks = dropout_masks[0]
                ipt = apply_dropout(ipt, ipt_masks)
                dropout_masks = dropout_masks[1:]

        cat_ipt_state = T.concatenate([ipt, state], 1)
        reset = do_layer( T.nnet.sigmoid, cat_ipt_state,
                            self._reset_W, self._reset_b )
        update = do_layer( T.nnet.sigmoid, cat_ipt_state,
                            self._update_W, self._update_b )
        candidate_act = do_layer( T.tanh, T.concatenate([ipt, (reset * state)], 1),
                            self._activation_W, self._activation_b )

        newstate = update * state + (1-update) * candidate_act

        if self._dropout_keep != 1 and self._dropout_output and dropout_masks is not None:
                newstate_masks = dropout_masks[0]
                newstate = apply_dropout(newstate, newstate_masks)
                dropout_masks = dropout_masks[1:]

        if append_masks:
            return newstate, dropout_masks
        else:
            return newstate 
开发者ID:hexahedria,项目名称:gated-graph-transformer-network,代码行数:43,代码来源:base_gru.py

示例14: LSTMLayer

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import tanh [as 别名]
def LSTMLayer(lstm_prev, inp, inp_dim, full_memory_dim, vs, name="lstm", initializer=None):
    assert full_memory_dim % 2 == 0, "Input is concatenated (h, c); dim must be even."
    hidden_dim = full_memory_dim / 2

    # gate biases
    # TODO(jgauthier): support excluding params from regularization
    b = vs.add_param("%s_b" % name, (hidden_dim * 4,),
                     initializer=LSTMBiasInitializer())

    def slice_gate(gate_data, i):
        return gate_data[:, i * hidden_dim:(i + 1) * hidden_dim]

    # Decompose previous LSTM value into hidden and cell value
    h_prev = lstm_prev[:, :hidden_dim]
    c_prev = lstm_prev[:, hidden_dim:]

    # Compute and slice gate values
    # input -> hidden mapping
    gates = Linear(inp, inp_dim, hidden_dim * 4, vs,
                   name="%s/inp/linear" % name,
                   initializer=initializer, use_bias=False)
    # hidden -> hidden mapping
    gates += Linear(h_prev, hidden_dim, hidden_dim * 4, vs,
                    name="%s/hid/linear" % name,
                    initializer=initializer, use_bias=False)
    gates += b
    i_gate, f_gate, o_gate, cell_inp = [slice_gate(gates, i) for i in range(4)]

    # Apply nonlinearities
    i_gate = T.nnet.sigmoid(i_gate)
    f_gate = T.nnet.sigmoid(f_gate)
    o_gate = T.nnet.sigmoid(o_gate)
    cell_inp = T.tanh(cell_inp)

    # Compute new cell and hidden value
    c_t = f_gate * c_prev + i_gate * cell_inp
    h_t = o_gate * T.tanh(c_t)

    return T.concatenate([h_t, c_t], axis=1) 
开发者ID:stanfordnlp,项目名称:spinn,代码行数:41,代码来源:blocks.py

示例15: GRULayer

# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import tanh [as 别名]
def GRULayer(h_prev, inp, inp_dim, full_memory_dim, vs, name="gru", initializer=None):
    hidden_dim = full_memory_dim

    # gate biases
    # TODO(mrdrozdov): use b (bias) same as is done in LSTM
    # b = vs.add_param("%s_b" % name, (hidden_dim * 3,),
    #                  initializer=GRUBiasInitializer())

    def slice_gate(gate_data, i):
        return gate_data[:, i * hidden_dim:(i + 1) * hidden_dim]

    # Compute and slice gate values
    # input -> hidden mapping
    i2h = Linear(inp, inp_dim, hidden_dim * 3, vs,
                   name="%s/inp/linear" % name,
                   initializer=initializer, use_bias=False)
    # hidden -> hidden mapping
    h2h = Linear(h_prev, hidden_dim, hidden_dim * 3, vs,
                    name="%s/hid/linear" % name,
                    initializer=initializer, use_bias=False)

    gates = i2h[:, 0:2*hidden_dim] + h2h[:, 0:2*hidden_dim]

    z_gate = gates[:, 0:hidden_dim]
    r_gate = gates[:, hidden_dim:2*hidden_dim]

    # Apply nonlinearities
    z_gate = T.nnet.sigmoid(z_gate)
    r_gate = T.nnet.sigmoid(r_gate)

    i2h_gate = i2h[:, 2*hidden_dim:3*hidden_dim]
    h2h_gate = h2h[:, 2*hidden_dim:3*hidden_dim]

    h_t = T.tanh(i2h_gate + r_gate * h2h_gate)
    h_next = h_prev + z_gate * (h_t - h_prev)

    return T.concatenate([h_next], axis=1) 
开发者ID:stanfordnlp,项目名称:spinn,代码行数:39,代码来源:blocks.py


注:本文中的theano.tensor.tanh方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。