当前位置: 首页>>代码示例>>Python>>正文


Python lstm.LSTM属性代码示例

本文整理汇总了Python中lstm.LSTM属性的典型用法代码示例。如果您正苦于以下问题:Python lstm.LSTM属性的具体用法?Python lstm.LSTM怎么用?Python lstm.LSTM使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在lstm的用法示例。


在下文中一共展示了lstm.LSTM属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: forward_instance

# 需要导入模块: import lstm [as 别名]
# 或者: from lstm import LSTM [as 别名]
def forward_instance(self, instance_node, current_depth, max_depth, sequence_function = SEQUENCE_FUNCTIONS):
        if instance_node.get_number_of_children() == 0:
            return -100 * np.ones(self.hidden_layer_sizes[current_depth]) # no children signifier vector
        input_sequence = np.array([])
        children_sequence = get_sequence(instance_node.get_children(), sequence_function[current_depth])
        for item in children_sequence:
            feature_vector = item.get_feature_vector()
            """ If we are not at the very bottom we need to get input from LSTM at the next level"""
            LSTM_output_from_below = np.array([])
            if current_depth < max_depth:
                 LSTM_output_from_below = self.forward_instance(item, current_depth + 1, max_depth).reshape(self.hidden_layer_sizes[current_depth +1]) # recursive call
            full_feature_vector = np.concatenate((LSTM_output_from_below, feature_vector)) # concatenate feature vector and input from LSTM output below
            # concatenate current feature vector to input sequence for the LSTM
            input_sequence = np.concatenate((input_sequence,full_feature_vector))
        # forward the input sequence to this depth's LSTM
        input_sequence = input_sequence.reshape(instance_node.get_number_of_children(), 1, len(full_feature_vector))
        _, _, Y, cache = self.lstm_stack[current_depth]._forward(input_sequence)
        instance_node.cache = cache
        instance_node.children_sequence = children_sequence
        return softmax(Y) 
开发者ID:ML-on-structures,项目名称:blockchain-lstm,代码行数:22,代码来源:multi_layer_LSTM.py

示例2: _compute_LSTM_updates

# 需要导入模块: import lstm [as 别名]
# 或者: from lstm import LSTM [as 别名]
def _compute_LSTM_updates(self, instance_node, current_depth):
        """Computes the update to the LSTM coefficients, recurrently down
        the tree of nodes."""
        # First, computes the update for the current node.
        method = self.learning_method_vector[current_depth]
        if method == "steady_rate":
            self._compute_update_LSTM_weights_steady_rate(instance_node, current_depth)
        elif method == "momentum":
            self._compute_update_LSTM_weights_with_momentum(instance_node, current_depth)
        elif method == "adadelta":
            self._compute_update_LSTM_weights_adadelta(instance_node, current_depth)
        else:
            raise UnknownLearningMethod(method)
        # Then, recurs down the tree.
        if current_depth == self.max_depth - 1:
            return
        for item in instance_node.children_sequence:
            self._compute_LSTM_updates(item, current_depth + 1) 
开发者ID:ML-on-structures,项目名称:graph-lstm,代码行数:20,代码来源:multi_level_lstm.py

示例3: train

# 需要导入模块: import lstm [as 别名]
# 或者: from lstm import LSTM [as 别名]
def train(model_name, train_data, dev_data):

    training_data = np.load(train_data)
    validation_data = np.load(dev_data)

    assert training_data["vocabulary"] == validation_data["vocabulary"]
    assert training_data["batch_size"] == validation_data["batch_size"]
    assert training_data["punctuations"] == validation_data["punctuations"]

    net = lstm.LSTM()
    net.initialize(projection_size=conf.PROJECTION_SIZE,
                   hidden_size=conf.HIDDEN_SIZE,
                   in_vocabulary=training_data["vocabulary"],
                   out_vocabulary=training_data["punctuations"],
                   batch_size=training_data["batch_size"],
                   hidden_activation=conf.HIDDEN_ACTIVATION,
                   gate_activation = conf.GATE_ACTIVATION,
                   bptt_steps=conf.BPTT_STEPS)

    _train(net, training_data, validation_data, model_name, conf.LEARN_RATE,
           conf.MAX_EPOCHS, conf.MIN_IMPROVEMENT) 
开发者ID:kaituoxu,项目名称:python-lstm-punctuation,代码行数:23,代码来源:trainer.py

示例4: forward_propagation

# 需要导入模块: import lstm [as 别名]
# 或者: from lstm import LSTM [as 别名]
def forward_propagation(self, instance_node, instance_depth=0):
        """Performs forward propagation through the multi-level LSTM structure.
         The node instance_node at depth instance_depth is propagated.
         The node should be an object of class InstanceNode."""
        # Prepares for back-propagation.
        self._reset_learning_parameters()
        input_sequence = np.array([])
        children_sequence = list(instance_node.get_children())
        if len(children_sequence) == 0:
            # FIXME We should really have a feature that describes the number of children.
            # This loses any data that might be associated with the node itself.
            return -100 * np.ones(self.output_sizes[instance_depth]) # no children signifier vector
        if instance_depth in self.shuffle_levels:
            # Shuffles children order if required.
            random.shuffle(children_sequence)
        for child_node in children_sequence:
            child_node_feature_vector = child_node.get_feature_vector()
            assert len(child_node_feature_vector) == self.node_feature_sizes[instance_depth]
            # If we are not at the very bottom we need to get input from LSTM at the next level.
            LSTM_output_from_below = np.array([])
            if instance_depth < self.max_depth - 1:
                 LSTM_output_from_below = self.forward_propagation(child_node, instance_depth=instance_depth + 1).reshape(
                     self.output_sizes[instance_depth + 1]) # recursive call
            # concatenate feature vector and input from LSTM output below
            full_feature_vector = np.concatenate((LSTM_output_from_below, child_node_feature_vector))
            # concatenate current feature vector to input sequence for the LSTM
            # TODO: This is very confusing; can you change this to use row and column stacking?
            input_sequence = np.concatenate((input_sequence, full_feature_vector))
        # forward the input sequence to this depth's LSTM
        input_sequence = input_sequence.reshape(len(children_sequence), 1, len(full_feature_vector))
        _, _, Y, cache = self.lstm_stack[instance_depth]._forward(input_sequence)
        # We store the state of the LSTM, so we can use it for back-propagation.
        instance_node.cache.lstm_cache = cache
        # we also need to save the sequence in the same order we used it.
        instance_node.children_sequence = children_sequence
        return Y 
开发者ID:ML-on-structures,项目名称:graph-lstm,代码行数:38,代码来源:multi_level_lstm.py

示例5: backward_propagation

# 需要导入模块: import lstm [as 别名]
# 或者: from lstm import LSTM [as 别名]
def backward_propagation(self, instance_node, derivative, instance_depth=0):
        """Performs backward propagation, given a loss derivative for the outputs."""
        # First, we backpropagate through the layers the backward gradient.
        self._compute_backward_gradients(instance_node, derivative, instance_depth)
        # Second, we compute (but we do not apply) the update at all layers
        # of the MLSL.  We don't apply it because at every layer, there are in
        # general multiple instances of an LSTM, and we will have to add all the
        # updates for an LSTM at the same level before applying them.
        self._compute_LSTM_updates(instance_node, instance_depth)
        # Finally, once the updates have been computed, it applies them
        # to all the levels of the LSTM.
        self._apply_LSTM_updates() 
开发者ID:ML-on-structures,项目名称:graph-lstm,代码行数:14,代码来源:multi_level_lstm.py

示例6: _compute_update_LSTM_weights_steady_rate

# 需要导入模块: import lstm [as 别名]
# 或者: from lstm import LSTM [as 别名]
def _compute_update_LSTM_weights_steady_rate(self, instance_node, current_depth):
        """Computes the LSTM weight update at steady rate."""
        if instance_node.cache is not None:
            dW = - self.learning_rate_vector[current_depth] * instance_node.cache.weight_gradient
            self.sum_of_dWs[current_depth] += dW
            self.number_of_nodes_per_level[current_depth] += 1 
开发者ID:ML-on-structures,项目名称:graph-lstm,代码行数:8,代码来源:multi_level_lstm.py

示例7: _compute_update_LSTM_weights_with_momentum

# 需要导入模块: import lstm [as 别名]
# 或者: from lstm import LSTM [as 别名]
def _compute_update_LSTM_weights_with_momentum(self, instance_node, current_depth):
        """Computes the LSTM weight update using momentum."""
        if instance_node.cache is not None:
            if self.lstm_stack[current_depth].momentum_dW is None: # initialize momentum of LSTM to zero
                self.lstm_stack[current_depth].momentum_dW = np.zeros(self.lstm_stack[current_depth].WLSTM.shape)
            dW = (- self.learning_rate_vector[current_depth] * instance_node.cache.weight_gradient
                  + self.momentum_vector[current_depth] * self.lstm_stack[current_depth].momentum_dW)
            self.lstm_stack[current_depth].WLSTM += dW
            self.sum_of_dWs[current_depth] += dW
            self.number_of_nodes_per_level[current_depth] += 1 
开发者ID:ML-on-structures,项目名称:graph-lstm,代码行数:12,代码来源:multi_level_lstm.py

示例8: _apply_LSTM_updates

# 需要导入模块: import lstm [as 别名]
# 或者: from lstm import LSTM [as 别名]
def _apply_LSTM_updates(self):
        """Applies the updates that have been computed to the LSTM."""
        for d in range(self.max_depth):
            self.lstm_stack[d].WLSTM += self.sum_of_dWs[d] / self.number_of_nodes_per_level[d]
            self.lstm_stack[d].momentum_dW = self.sum_of_dWs[d] / self.number_of_nodes_per_level[d]
            self.lstm_stack[d].tot_gradient_weight = self.sum_tot_delta_weight[d] / self.number_of_nodes_per_level[d]
            self.lstm_stack[d].tot_sq_gradient = self.sum_tot_sq_gradient[d] / self.number_of_nodes_per_level[d]
            self.lstm_stack[d].tot_delta_weight = self.sum_tot_delta_weight[d] / self.number_of_nodes_per_level[d]
            self.lstm_stack[d].tot_sq_delta = self.sum_tot_sq_delta[d] / self.number_of_nodes_per_level[d]



# the following class represents nodes of the unfoldings
# the MLSL module understands and can train and test on tree instances that are encoded as objects of this class 
开发者ID:ML-on-structures,项目名称:graph-lstm,代码行数:16,代码来源:multi_level_lstm.py

示例9: __init__

# 需要导入模块: import lstm [as 别名]
# 或者: from lstm import LSTM [as 别名]
def __init__(self, feature_vector = None, label = None, id = None):
        self.id = id
        self.feature_vector = feature_vector
        self.label = label
        self.children = []
        self.children_sequence = [] # Stores the specific order by which the items were fed into the LSTM to update weights correctly
        # The gradient backpropagated at this node will be left here.
        # It can be used for further back-propagation as needed.
        self.gradient = None
        # Here we store intermediate values useful for the processing.
        self.cache = Storage() 
开发者ID:ML-on-structures,项目名称:graph-lstm,代码行数:13,代码来源:multi_level_lstm.py

示例10: __init__

# 需要导入模块: import lstm [as 别名]
# 或者: from lstm import LSTM [as 别名]
def __init__(self,max_depth, hidden_layer_sizes, input_sizes):
        self.lstm_stack = [lstm.LSTM() for l in range(max_depth)]
        for l in range(max_depth):
            self.lstm_stack[l].initialize(input_sizes[l] + (0 if l== max_depth -1 else hidden_layer_sizes[l + 1]), hidden_layer_sizes[l])
        self.hidden_layer_sizes = hidden_layer_sizes
        self.input_sizes = input_sizes 
开发者ID:ML-on-structures,项目名称:blockchain-lstm,代码行数:8,代码来源:multi_layer_LSTM.py

示例11: sgd_train_multilayer

# 需要导入模块: import lstm [as 别名]
# 或者: from lstm import LSTM [as 别名]
def sgd_train_multilayer(self, root, target, max_depth, objective_function, learning_rate_vector):
        # first pass the instance root one forward so that all internal LSTM states
        # get calculated and stored in "cache" field
        Y = self.forward_instance(root, current_depth = 0, max_depth= max_depth)
        deriv = getDerivative(output = Y, target = target, objective = objective_function)
        self.calculate_backward_gradients(root, deriv, 0, max_depth)
        self.update_LSTM_weights(root, 0, max_depth, learning_rate_vector = learning_rate_vector) 
开发者ID:ML-on-structures,项目名称:blockchain-lstm,代码行数:9,代码来源:multi_layer_LSTM.py

示例12: setUp

# 需要导入模块: import lstm [as 别名]
# 或者: from lstm import LSTM [as 别名]
def setUp(self):
        global l, in_vocabulary, out_vocabulary
        l = LSTM()
        in_vocabulary = {'hello': 0, 'world': 1, 'dog': 2, 'cat': 3}
        out_vocabulary = {' ': 0, ',': 1, '.': 2} 
开发者ID:kaituoxu,项目名称:python-lstm-punctuation,代码行数:7,代码来源:unittest_lstm.py

示例13: load_model

# 需要导入模块: import lstm [as 别名]
# 或者: from lstm import LSTM [as 别名]
def load_model(file_path):
    import lstm
    import numpy as np
    model = np.load(file_path)
    net = lstm.LSTM()

    net.load(model)

    return net 
开发者ID:kaituoxu,项目名称:python-lstm-punctuation,代码行数:11,代码来源:utils.py

示例14: build

# 需要导入模块: import lstm [as 别名]
# 或者: from lstm import LSTM [as 别名]
def build(self):
        print '\t building rnn cell...'
        if self.cell=='gru':
            hidden_layer=GRU(self.rng,
                             self.n_input,self.n_hidden,self.n_batch,
                             self.x,self.E,self.x_mask,
                             self.is_train,self.p)
        else:
            hidden_layer=LSTM(self.rng,
                              self.n_input,self.n_hidden,self.n_batch,
                              self.x,self.E,self.x_mask,
                              self.is_train,self.p)
        print '\t building softmax output layer...'
        softmax_shape=(self.n_hidden,self.n_output)
        output_layer=H_Softmax(softmax_shape,hidden_layer.activation,self.y,self.y_mask)
        self.params=[self.E,]
        self.params+=hidden_layer.params
        self.params+=output_layer.params

        cost=output_layer.activation
        lr=T.scalar("lr")
        gparams=[T.clip(T.grad(cost,p),-10,10) for p in self.params]
        updates=sgd(self.params,gparams,lr)

        self.train=theano.function(inputs=[self.x,self.x_mask,self.y,self.y_mask,self.n_batch,lr],
                                   outputs=cost,
                                   updates=updates,
                                   givens={self.is_train:np.cast['int32'](1)})
        '''
        self.predict=theano.function(inputs=[self.x,self.x_mask,self.n_batch],
                                     outputs=output_layer.prediction,
                                     givens={self.is_train:np.cast['int32'](0)})
        ''' 
开发者ID:jiangnanhugo,项目名称:lmkit,代码行数:35,代码来源:rnnlm.py

示例15: build

# 需要导入模块: import lstm [as 别名]
# 或者: from lstm import LSTM [as 别名]
def build(self):
        print 'building rnn cell...'
        hidden_layer = LSTM(self.rng,
                            self.n_input, self.n_hidden, self.n_batch,
                            self.x, self.E, self.x_mask,
                            self.is_train, self.p)
        print 'building softmax output layer...'
        if self.use_adaptive_softmax:
            cutoff = [2000, self.n_output]
            softmax_inputs = hidden_layer.activation
            logit_shape = softmax_inputs.shape
            softmax_inputs = softmax_inputs.reshape([logit_shape[0]*logit_shape[1], logit_shape[2]])
            labels = self.y.flatten()
            y_mask = self.y_mask.flatten()
            output_layer = adaptive_softmax(softmax_inputs, labels, y_mask,
                                            self.n_hidden,
                                            cutoff)
            #cost = T.sum
            training_loss = output_layer.training_losses
            cost = output_layer.loss
        else:
            output_layer = softmax(self.n_hidden, self.n_output, hidden_layer.activation)
            cost = self.categorical_crossentropy(output_layer.activation, self.y)
        self.params = [self.E, ]
        self.params += hidden_layer.params
        self.params += output_layer.params

        lr = T.scalar("lr")
        gparams = [T.clip(T.grad(cost, p), -1, 1) for p in self.params]
        updates = self.optimizer(self.params, gparams, lr)

        self.train = theano.function(inputs=[self.x, self.x_mask, self.y, self.y_mask, lr],
                                     outputs=[cost,hidden_layer.activation, output_layer.head_loss, output_layer.tail_loss],
                                     updates=updates,
                                     givens={self.is_train: np.cast['int32'](1)})

        self.test = theano.function(inputs=[self.x, self.x_mask,self.y, self.y_mask],
                                    outputs=cost,
                                    givens={self.is_train: np.cast['int32'](0)}) 
开发者ID:OlivierShi,项目名称:adaptiveSM,代码行数:41,代码来源:rnnlm.py


注:本文中的lstm.LSTM属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。