當前位置: 首頁>>代碼示例>>Python>>正文


Python rnn.BasicRNNCell方法代碼示例

本文整理匯總了Python中tensorflow.contrib.rnn.BasicRNNCell方法的典型用法代碼示例。如果您正苦於以下問題:Python rnn.BasicRNNCell方法的具體用法?Python rnn.BasicRNNCell怎麽用?Python rnn.BasicRNNCell使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.contrib.rnn的用法示例。


在下文中一共展示了rnn.BasicRNNCell方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import BasicRNNCell [as 別名]
def __init__(self,
               num_units,
               tied=False,
               non_recurrent_fn=None,
               state_is_tuple=True,
               output_is_tuple=True):
    super(Grid2BasicRNNCell, self).__init__(
        num_units=num_units,
        num_dims=2,
        input_dims=0,
        output_dims=0,
        priority_dims=0,
        tied=tied,
        non_recurrent_dims=None if non_recurrent_fn is None else 0,
        cell_fn=lambda n: rnn.BasicRNNCell(num_units=n),
        non_recurrent_fn=non_recurrent_fn,
        state_is_tuple=state_is_tuple,
        output_is_tuple=output_is_tuple) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:20,代碼來源:grid_rnn_cell.py

示例2: build_cell

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import BasicRNNCell [as 別名]
def build_cell(self, name=None):
        if self.hparams.cell_type == 'linear':
            cell = BasicRNNCell(self.hparams.hidden_units,
                                activation=tf.identity, name=name)
        elif self.hparams.cell_type == 'tanh':
            cell = BasicRNNCell(self.hparams.hidden_units,
                                activation=tf.tanh, name=name)
        elif self.hparams.cell_type == 'relu':
            cell = BasicRNNCell(self.hparams.hidden_units,
                                activation=tf.nn.relu, name=name)
        elif self.hparams.cell_type == 'gru':
            cell = GRUCell(self.hparams.hidden_units, name=name)
        elif self.hparams.cell_type == 'lstm':
            cell = LSTMCell(self.hparams.hidden_units, name=name, state_is_tuple=False)
        else:
            raise ValueError('Provided cell type not supported.')
        return cell 
開發者ID:microsoft,項目名稱:icecaps,代碼行數:19,代碼來源:abstract_recurrent_estimator.py

示例3: RNN

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import BasicRNNCell [as 別名]
def RNN(x, weights, biases, max_time, num_hidden):

    # Prepare data shape to match `rnn` function requirements
    # Current data input shape: (batch_size, timesteps, n_input)
    # Required shape: 'timesteps' tensors list of shape (batch_size, n_input)

    # Unstack to get a list of 'timesteps' tensors of shape (batch_size, n_input)
    x = tf.unstack(x, max_time, 1)

    # Define a rnn cell with tensorflow
    # rnn_cell = rnn.BasicRNNCell(num_hidden)
    # Define a lstm cell with tensorflow
    lstm_cell = rnn.BasicLSTMCell(num_hidden)

    # Get lstm cell output
    # If no initial_state is provided, dtype must be specified
    # If no initial cell satte is provided, they will be initialized to zero
    # states_series, current_state = rnn.static_rnn(rnn_cell, x, dtype=tf.float32)
    outputs, states = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)

    # Linear activation, using rnn inner loop last output
    # return tf.matmul(current_state, weights) + biases
    return tf.matmul(outputs[-1], weights) + biases 
開發者ID:easy-tensorflow,項目名稱:easy-tensorflow,代碼行數:25,代碼來源:ops.py

示例4: __init__

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import BasicRNNCell [as 別名]
def __init__(self, num_units):
    super(Grid1BasicRNNCell, self).__init__(
        num_units=num_units, num_dims=1,
        input_dims=0, output_dims=0, priority_dims=0, tied=False,
        cell_fn=lambda n, i: rnn.BasicRNNCell(num_units=n, input_size=i)) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:7,代碼來源:grid_rnn_cell.py

示例5: cell_create

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import BasicRNNCell [as 別名]
def cell_create(self,scope_name):
         with tf.variable_scope(scope_name):
             if self.cell_type == 'tanh':
                 cells = rnn.MultiRNNCell([rnn.BasicRNNCell(self.n_hidden[i]) for i in range(self.n_layers)], state_is_tuple=True)
             elif self.cell_type == 'LSTM': 
                 cells = rnn.MultiRNNCell([rnn.BasicLSTMCell(self.n_hidden[i]) for i in range(self.n_layers)], state_is_tuple=True)
             elif self.cell_type == 'GRU':
                 cells = rnn.MultiRNNCell([rnn.GRUCell(self.n_hidden[i]) for i in range(self.n_layers)], state_is_tuple=True)
             elif self.cell_type == 'LSTMP':
                 cells = rnn.MultiRNNCell([rnn.LSTMCell(self.n_hidden[i]) for i in range(self.n_layers)], state_is_tuple=True)
             cells = rnn.DropoutWrapper(cells, input_keep_prob=self.dropout_ph,output_keep_prob=self.dropout_ph) 
         return cells 
開發者ID:CarlSouthall,項目名稱:ADTLib,代碼行數:14,代碼來源:__init__.py

示例6: _inference

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import BasicRNNCell [as 別名]
def _inference(self):
        logging.info('...create inference')

        fw_state_tuple = self.unstack_fw_states(self.fw_state)

        fw_cells   = list()
        for i in range(0, self.num_layers):
            if (self.cell_type == 'lstm'):
                cell = rnn.LSTMCell(num_units=self.cell_sizes[i], state_is_tuple=True)
            elif (self.cell_type == 'gru'):
                # change to GRU
                cell = rnn.GRUCell(num_units=self.cell_sizes[i])
            else:
                cell = rnn.BasicRNNCell(num_units=self.cell_sizes[i])

            cell = rnn.DropoutWrapper(cell, output_keep_prob=self.keep_prob)
            fw_cells.append(cell)
        self.fw_cells = rnn.MultiRNNCell(fw_cells, state_is_tuple=True)

        rnn_outputs, states = tf.nn.dynamic_rnn(
            self.fw_cells, 
            self.inputs, 
            initial_state=fw_state_tuple,
            sequence_length=self.seq_lengths,
            dtype=tf.float32, time_major=True)

        # project output from rnn output size to OUTPUT_SIZE. Sometimes it is worth adding
        # an extra layer here.
        self.projection = lambda x: layers.linear(x, 
            num_outputs=self.label_classes, activation_fn=tf.nn.sigmoid)

        self.logits = tf.map_fn(self.projection, rnn_outputs, name="logits")
        self.probs  = tf.nn.softmax(self.logits, name="probs")
        self.states = states

        tf.add_to_collection('probs',  self.probs) 
開發者ID:pucktada,項目名稱:cutkum,代碼行數:38,代碼來源:ck_model.py

示例7: __init__

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import BasicRNNCell [as 別名]
def __init__(self, args, infer=False):
        self.args = args
        if infer:
            args.batch_size = 1
            args.seq_length = 1

        additional_cell_args = {}
        if args.model == 'rnn':
            cell_fn = rnn_cell.BasicRNNCell
        elif args.model == 'gru':
            cell_fn = rnn_cell.GRUCell
        elif args.model == 'lstm':
            cell_fn = rnn_cell.BasicLSTMCell
        elif args.model == 'gridlstm':
            cell_fn = grid_rnn.Grid2LSTMCell
            additional_cell_args.update({'use_peepholes': True, 'forget_bias': 1.0})
        elif args.model == 'gridgru':
            cell_fn = grid_rnn.Grid2GRUCell
        else:
            raise Exception("model type not supported: {}".format(args.model))

        cell = cell_fn(args.rnn_size, **additional_cell_args)

        self.cell = cell = rnn_cell.MultiRNNCell([cell] * args.num_layers)

        self.input_data = tf.placeholder(tf.int32, [args.batch_size, args.seq_length])
        self.targets = tf.placeholder(tf.int32, [args.batch_size, args.seq_length])
        self.initial_state = cell.zero_state(args.batch_size, tf.float32)

        with tf.variable_scope('rnnlm'):
            softmax_w = tf.get_variable("softmax_w", [args.rnn_size, args.vocab_size])
            softmax_b = tf.get_variable("softmax_b", [args.vocab_size])
            with tf.device("/cpu:0"):
                embedding = tf.get_variable("embedding", [args.vocab_size, args.rnn_size])
                inputs = tf.split(axis=1, num_or_size_splits=args.seq_length,
                                  value=tf.nn.embedding_lookup(embedding, self.input_data))
                inputs = [tf.squeeze(input_, [1]) for input_ in inputs]

        def loop(prev, _):
            prev = tf.nn.xw_plus_b(prev, softmax_w, softmax_b)
            prev_symbol = tf.stop_gradient(tf.argmax(prev, 1))
            return tf.nn.embedding_lookup(embedding, prev_symbol)

        outputs, last_state = seq2seq.rnn_decoder(inputs, self.initial_state, cell,
                                                  loop_function=loop if infer else None, scope='rnnlm')
        # output = tf.reshape(tf.concat(1, outputs), [-1, args.rnn_size])
        output = tf.reshape(tf.concat(axis=1, values=outputs), [-1, args.rnn_size])
        self.logits = tf.nn.xw_plus_b(output, softmax_w, softmax_b)
        self.probs = tf.nn.softmax(self.logits)
        loss = seq2seq.sequence_loss_by_example([self.logits],
                                                [tf.reshape(self.targets, [-1])],
                                                [tf.ones([args.batch_size * args.seq_length])],
                                                args.vocab_size)
        self.cost = tf.reduce_sum(loss) / args.batch_size / args.seq_length
        self.final_state = last_state
        self.lr = tf.Variable(0.0, trainable=False)
        tvars = tf.trainable_variables()
        grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, tvars),
                                          args.grad_clip)
        optimizer = tf.train.AdamOptimizer(self.lr)
        self.train_op = optimizer.apply_gradients(zip(grads, tvars)) 
開發者ID:philipperemy,項目名稱:tensorflow-grid-lstm,代碼行數:63,代碼來源:model.py

示例8: main

# 需要導入模塊: from tensorflow.contrib import rnn [as 別名]
# 或者: from tensorflow.contrib.rnn import BasicRNNCell [as 別名]
def main():
    args = parser.parse_args()
    input_size = args.input_size
    batch_size = args.batch_size
    hidden_size = args.hidden_size

    # Placeholders for inputs.
    x = tf.placeholder(tf.float32, [batch_size, args.ponder, 1+input_size])
    y = tf.placeholder(tf.float32, [batch_size, 1])
    zeros = tf.zeros([batch_size, 1])

    rnn = BasicRNNCell(args.hidden_size)
    outputs, final_state = tf.nn.dynamic_rnn(rnn, x, dtype=tf.float32)

    softmax_w = tf.get_variable("softmax_w", [hidden_size, 1])
    softmax_b = tf.get_variable("softmax_b", [1])
    logits = tf.matmul(final_state, softmax_w) + softmax_b

    loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=y, logits=logits)

    loss = tf.reduce_mean(loss)

    train_step = tf.train.AdamOptimizer(args.lr).minimize(loss)

    correct_prediction = tf.equal(tf.cast(tf.greater(logits, zeros), tf.float32), y)
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    tf.summary.scalar('Accuracy', accuracy)
    tf.summary.scalar('Loss', loss)

    merged = tf.summary.merge_all()
    logdir = './logs/parity_test/LR={}_Len={}_Pond={}'.format(args.lr, args.input_size, args.ponder)
    while os.path.isdir(logdir):
        logdir += '_'
    if args.log:
        writer = tf.summary.FileWriter(logdir)

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.vram_fraction)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        sess.run(tf.global_variables_initializer())
        loop = trange(args.steps)
        for i in loop:
            batch = generate(args)

            if i % args.log_interval == 0:
                summary, step_accuracy, step_loss = sess.run([merged, accuracy, loss], feed_dict={x: batch[0],
                                                                                                  y: batch[1]})

                if args.print_results:
                    loop.set_postfix(Loss='{:0.3f}'.format(step_loss),
                                     Accuracy='{:0.3f}'.format(step_accuracy))
                if args.log:
                    writer.add_summary(summary, i)
            train_step.run(feed_dict={x: batch[0], y: batch[1]}) 
開發者ID:imatge-upc,項目名稱:danifojo-2018-repeatrnn,代碼行數:55,代碼來源:parity-repeat.py


注:本文中的tensorflow.contrib.rnn.BasicRNNCell方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。