當前位置: 首頁>>代碼示例>>Python>>正文


Python utils.load_model方法代碼示例

本文整理匯總了Python中utils.load_model方法的典型用法代碼示例。如果您正苦於以下問題:Python utils.load_model方法的具體用法?Python utils.load_model怎麽用?Python utils.load_model使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在utils的用法示例。


在下文中一共展示了utils.load_model方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: infer

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load_model [as 別名]
def infer(data_filepath='data/flowers.hdf5', z_dim=128, out_dir='gan',
          n_steps=10):

    G = load_model(out_dir)
    val_data = get_data(data_filepath, 'train')
    val_data = next(iterate_minibatches(val_data, 2))
    emb_source, emb_target = val_data[1]
    txts = val_data[2]

    z = np.random.uniform(-1, 1, size=(1, z_dim))

    G.trainable = False
    for i in range(n_steps+1):
        p = i/float(n_steps)
        emb = emb_source * (1-p) + emb_target * p
        emb = emb[None, :]
        fake_image = G.predict([z, emb])[0]
        img = ((fake_image + 1)*0.5)
        plt.imsave("{}/fake_text_interpolation_i{}".format(out_dir, i), img)
        print(i, str(txts[int(round(p))]).strip(),
              file=open("{}/fake_text_interpolation.txt".format(out_dir), "a")) 
開發者ID:PacktPublishing,項目名稱:Hands-On-Generative-Adversarial-Networks-with-Keras,代碼行數:23,代碼來源:interpolation_in_text.py

示例2: infer

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load_model [as 別名]
def infer(data_filepath='data/flowers.hdf5', z_dim=128, out_dir='gan',
          n_samples=5):

    G = load_model(out_dir)
    val_data = get_data(data_filepath, 'train')
    val_data = next(iterate_minibatches(val_data, n_samples))    
    emb, txts = val_data[1], val_data[2]

    # sample z vector for inference
    z = np.random.uniform(-1, 1, size=(n_samples, z_dim))

    G.trainable = False
    fake_images = G.predict([z, emb])
    for i in range(n_samples):
        img = ((fake_images[i] + 1)*0.5)
        plt.imsave("{}/fake_{}".format(out_dir, i), img)
        print(i, str(txts[i]).strip(),
              file=open("{}/fake_text.txt".format(out_dir), "a")) 
開發者ID:PacktPublishing,項目名稱:Hands-On-Generative-Adversarial-Networks-with-Keras,代碼行數:20,代碼來源:inference.py

示例3: main

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load_model [as 別名]
def main(test_desc_file, train_desc_file, load_dir):
    # Prepare the data generator
    datagen = DataGenerator()
    # Load the JSON file that contains the dataset
    datagen.load_test_data(test_desc_file)
    datagen.load_train_data(train_desc_file)
    # Use a few samples from the dataset, to calculate the means and variance
    # of the features, so that we can center our inputs to the network
    datagen.fit_train(100)

    # Compile a Recurrent Network with 1 1D convolution layer, GRU units
    # and 1 fully connected layer
    model = load_model(load_dir)

    # Compile the testing function
    test_fn = compile_test_fn(model)

    # Test the model
    test_loss = test(model, test_fn, datagen)
    print ("Test loss: {}".format(test_loss)) 
開發者ID:baidu-research,項目名稱:ba-dls-deepspeech,代碼行數:22,代碼來源:test.py

示例4: main

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load_model [as 別名]
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('test_file', type=str,
                        help='Path to an audio file')
    parser.add_argument('train_desc_file', type=str,
                        help='Path to the training JSON-line file. This will '
                             'be used to extract feature means/variance')
    parser.add_argument('load_dir', type=str,
                        help='Directory where a trained model is stored.')
    parser.add_argument('--weights_file', type=str, default=None,
                        help='Path to a model weights file')
    args = parser.parse_args()

    print ("Loading model")
    model = load_model(args.load_dir, args.weights_file)
    visualize(model, args.test_file, args.train_desc_file) 
開發者ID:baidu-research,項目名稱:ba-dls-deepspeech,代碼行數:18,代碼來源:visualize.py

示例5: main

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load_model [as 別名]
def main(args):
    # MODEL
    num_features = [args.features*i for i in range(1, args.levels+1)] if args.feature_growth == "add" else \
                   [args.features*2**i for i in range(0, args.levels)]
    target_outputs = int(args.output_size * args.sr)
    model = Waveunet(args.channels, num_features, args.channels, args.instruments, kernel_size=args.kernel_size,
                     target_output_size=target_outputs, depth=args.depth, strides=args.strides,
                     conv_type=args.conv_type, res=args.res, separate=args.separate)

    if args.cuda:
        model = utils.DataParallel(model)
        print("move model to gpu")
        model.cuda()

    print("Loading model from checkpoint " + str(args.load_model))
    state = utils.load_model(model, None, args.load_model)
    print('Step', state['step'])

    preds = predict_song(args, args.input, model)

    output_folder = os.path.dirname(args.input) if args.output is None else args.output
    for inst in preds.keys():
        utils.write_wav(os.path.join(output_folder, os.path.basename(args.input) + "_" + inst + ".wav"), preds[inst], args.sr) 
開發者ID:f90,項目名稱:Wave-U-Net-Pytorch,代碼行數:25,代碼來源:predict.py

示例6: main

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load_model [as 別名]
def main():
  model = utils.load_model(args)
  new_model = fc_decomposition(model, args)
  new_model.save(args.save_model) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:6,代碼來源:acc_fc.py

示例7: main

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load_model [as 別名]
def main():
  model = utils.load_model(args)
  new_model = conv_vh_decomposition(model, args)
  new_model.save(args.save_model) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:6,代碼來源:acc_conv.py

示例8: load

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load_model [as 別名]
def load(self, model):
        self.t_lstm = load_model(model["t_lstm_file_name"])
        self.in_vocabulary = self.t_lstm.in_vocabulary
        super(TA_LSTM, self).load(model) 
開發者ID:ottokart,項目名稱:punctuator,代碼行數:6,代碼來源:models.py

示例9: run

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load_model [as 別名]
def run(args):
    pprint(args)
    logging.basicConfig(level=logging.INFO)

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    seed(args.seed)

    dataset, ontology, vocab, Eword = load_dataset()

    model = load_model(args.model, args, ontology, vocab)
    model.save_config()
    model.load_emb(Eword)

    model = model.to(model.device)
    if not args.test:
        logging.info('Starting train')
        model.run_train(dataset['train'], dataset['dev'], args)
    if args.resume:
        model.load_best_save(directory=args.resume)
    else:
        model.load_best_save(directory=args.dout)
    model = model.to(model.device)
    logging.info('Running dev evaluation')
    dev_out = model.run_eval(dataset['dev'], args)
    pprint(dev_out) 
開發者ID:salesforce,項目名稱:glad,代碼行數:28,代碼來源:train.py

示例10: __call__

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load_model [as 別名]
def __call__(self, sess, epoch, iteration, model, loss, processed):
        if epoch == self.at_epoch:
            print("Loading model...")
            model = load_model(sess, self.path + "latest/") 
開發者ID:uclnlp,項目名稱:pycodesuggest,代碼行數:6,代碼來源:hooks.py

示例11: _train

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load_model [as 別名]
def _train(net, training_data, validation_data, model_name, learning_rate, max_epochs, min_improvement):
    min_learning_rate = 1e-6
    best_validation_ppl = np.inf
    divide = False

    for epoch in range(1, max_epochs+1):
        
        epoch_start = time()
        
        print "\n======= EPOCH %s =======" % epoch
        print "\tLearning rate is %s" % learning_rate

        train_ppl = _process_corpus(net, training_data, mode='train', learning_rate=learning_rate) 
        print "\tTrain PPL is %.3f" % train_ppl

        validation_ppl = _process_corpus(net, validation_data, mode='test')
        print "\tValidation PPL is %.3f" % validation_ppl

        print "\tTime taken: %ds" % (time() - epoch_start)

        if np.log(validation_ppl) * min_improvement > np.log(best_validation_ppl): # Mikolovs recipe
            if not divide:
                divide = True
                print "\tStarting to reduce the learning rate..."
                if validation_ppl > best_validation_ppl:
                    print "\tLoading best model."
                    net = utils.load_model("../out/" + model_name)
            else:
                if validation_ppl < best_validation_ppl:
                    print "\tSaving model."
                    net.save("../out/" + model_name, final=True)
                break
        else:
            print "\tNew best model! Saving..."
            best_validation_ppl = validation_ppl
            final = learning_rate / 2. < min_learning_rate or epoch == max_epochs
            net.save("../out/" + model_name, final)

        if divide:
            learning_rate /= 2.
        
        if learning_rate < min_learning_rate:
            break
            
    print "-"*30
    print "Finished training."
    print "Best validation PPL is %.3f\n\n" % best_validation_ppl 
開發者ID:ottokart,項目名稱:punctuator,代碼行數:49,代碼來源:trainer.py

示例12: main

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load_model [as 別名]
def main():
    """
    The main executable function
    """
    parser = make_argument_parser()
    args = parser.parse_args()

    input_dir = args.inputdir
    model_dir = args.modeldir
    tf = args.factor
    bed_file = args.bed
    output_file = args.outputfile

    print 'Loading genome'
    genome = utils.load_genome()
    print 'Loading model'
    model_tfs, model_bigwig_names, features, model = utils.load_model(model_dir)
    L = model.input_shape[0][1]
    utils.L = L
    assert tf in model_tfs
    assert 'bigwig' in features
    use_meta = 'meta' in features
    use_gencode = 'gencode' in features
    print 'Loading test data'
    is_sorted = True
    bigwig_names, meta_names, datagen_bed, nonblacklist_bools = utils.load_beddata(genome, bed_file, use_meta, use_gencode, input_dir, is_sorted)
    assert bigwig_names == model_bigwig_names
    if use_meta:
        model_meta_file = model_dir + '/meta.txt'
        assert os.path.isfile(model_meta_file)
        model_meta_names = np.loadtxt(model_meta_file, dtype=str)
        if len(model_meta_names.shape) == 0:
            model_meta_names = [str(model_meta_names)]
        else:
            model_meta_names = list(model_meta_names)
        assert meta_names == model_meta_names
    print 'Generating predictions'
    model_tf_index = model_tfs.index(tf)
    model_predicts = model.predict_generator(datagen_bed, val_samples=len(datagen_bed), pickle_safe=True)
    if len(model_tfs) > 1:
        model_tf_predicts = model_predicts[:, model_tf_index]
    else:
        model_tf_predicts = model_predicts
    final_scores = np.zeros(len(nonblacklist_bools))
    final_scores[nonblacklist_bools] = model_tf_predicts
    print 'Saving predictions'
    df = pandas.read_csv(bed_file, sep='\t', header=None)
    df[3] = final_scores
    df.to_csv(output_file, sep='\t', compression='gzip', float_format='%.3e', header=False, index=False) 
開發者ID:uci-cbcl,項目名稱:FactorNet,代碼行數:51,代碼來源:predict.py

示例13: main

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load_model [as 別名]
def main():
    """
    The main executable function
    """
    parser = make_argument_parser()
    args = parser.parse_args()

    input_dir = args.inputdir
    model_dir = args.modeldir
    bed_file = args.bed
    chrom = args.chrom

    if args.outputdir is None:
        clobber = True
        output_dir = args.outputdirc
    else:
        clobber = False
        output_dir = args.outputdir

    try:  # adapted from dreme.py by T. Bailey
        os.makedirs(output_dir)
    except OSError as exc:
        if exc.errno == errno.EEXIST:
            if not clobber:
                print >> sys.stderr, ('output directory (%s) already exists '
                                      'but you specified not to clobber it') % output_dir
                sys.exit(1)
            else:
                print >> sys.stderr, ('output directory (%s) already exists '
                                      'so it will be clobbered') % output_dir

    print 'Loading genome'
    genome = utils.load_genome()
    print 'Loading model'
    model_tfs, model_bigwig_names, features, model = utils.load_model(model_dir)
    L = model.input_shape[0][1]
    utils.L = L
    use_meta = 'meta' in features
    use_gencode = 'gencode' in features
    print 'Loading BED data'
    is_sorted = False
    bigwig_names, meta_names, datagen_bed, nonblacklist_bools = utils.load_beddata(genome, bed_file, use_meta, use_gencode, input_dir, is_sorted, chrom)
    assert bigwig_names == model_bigwig_names
    if use_meta:
        model_meta_file = model_dir + '/meta.txt'
        assert os.path.isfile(model_meta_file)
        model_meta_names = np.loadtxt(model_meta_file, dtype=str)
        if len(model_meta_names.shape) == 0:
            model_meta_names = [str(model_meta_names)]
        else:
            model_meta_names = list(model_meta_names)
        assert meta_names == model_meta_names
    output_results(bigwig_names, datagen_bed, model, output_dir) 
開發者ID:uci-cbcl,項目名稱:FactorNet,代碼行數:55,代碼來源:visualize.py

示例14: train

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load_model [as 別名]
def train(**kwargs):
    config = Config()
    config.update(**kwargs)
    print('當前設置為:\n', config)
    if config.use_cuda:
        torch.cuda.set_device(config.gpu)
    print('loading corpus')
    vocab = load_vocab(config.vocab)
    label_dic = load_vocab(config.label_file)
    tagset_size = len(label_dic)
    train_data = read_corpus(config.train_file, max_length=config.max_length, label_dic=label_dic, vocab=vocab)
    dev_data = read_corpus(config.dev_file, max_length=config.max_length, label_dic=label_dic, vocab=vocab)

    train_ids = torch.LongTensor([temp.input_id for temp in train_data])
    train_masks = torch.LongTensor([temp.input_mask for temp in train_data])
    train_tags = torch.LongTensor([temp.label_id for temp in train_data])

    train_dataset = TensorDataset(train_ids, train_masks, train_tags)
    train_loader = DataLoader(train_dataset, shuffle=True, batch_size=config.batch_size)

    dev_ids = torch.LongTensor([temp.input_id for temp in dev_data])
    dev_masks = torch.LongTensor([temp.input_mask for temp in dev_data])
    dev_tags = torch.LongTensor([temp.label_id for temp in dev_data])

    dev_dataset = TensorDataset(dev_ids, dev_masks, dev_tags)
    dev_loader = DataLoader(dev_dataset, shuffle=True, batch_size=config.batch_size)
    model = BERT_LSTM_CRF(config.bert_path, tagset_size, config.bert_embedding, config.rnn_hidden, config.rnn_layer, dropout_ratio=config.dropout_ratio, dropout1=config.dropout1, use_cuda=config.use_cuda)
    if config.load_model:
        assert config.load_path is not None
        model = load_model(model, name=config.load_path)
    if config.use_cuda:
        model.cuda()
    model.train()
    optimizer = getattr(optim, config.optim)
    optimizer = optimizer(model.parameters(), lr=config.lr, weight_decay=config.weight_decay)
    eval_loss = 10000
    for epoch in range(config.base_epoch):
        step = 0
        for i, batch in enumerate(train_loader):
            step += 1
            model.zero_grad()
            inputs, masks, tags = batch
            inputs, masks, tags = Variable(inputs), Variable(masks), Variable(tags)
            if config.use_cuda:
                inputs, masks, tags = inputs.cuda(), masks.cuda(), tags.cuda()
            feats = model(inputs, masks)
            loss = model.loss(feats, masks,tags)
            loss.backward()
            optimizer.step()
            if step % 50 == 0:
                print('step: {} |  epoch: {}|  loss: {}'.format(step, epoch, loss.item()))
        loss_temp = dev(model, dev_loader, epoch, config)
        if loss_temp < eval_loss:
            save_model(model,epoch) 
開發者ID:chenxiaoyouyou,項目名稱:Bert-BiLSTM-CRF-pytorch,代碼行數:56,代碼來源:main.py

示例15: infer

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load_model [as 別名]
def infer(data_filepath='data/flowers.hdf5', z_dim=128, out_dir='gan',
          n_steps=10):

    G = load_model(out_dir)
    val_data = get_data(data_filepath, 'train')
    val_data = next(iterate_minibatches(val_data, 2))
    emb_a, emb_b = val_data[1]
    txts = val_data[2]

    # add batch dimension
    emb_a, emb_b = emb_a[None, :], emb_b[None, :]

    # sample z vector for inference
    z = np.random.uniform(-1, 1, size=(1, z_dim))

    G.trainable = False
    # predict using embeddings a and b
    fake_image_a = G.predict([z, emb_a])[0]
    fake_image_b  = G.predict([z, emb_b])[0]

    # add and subtract
    emb_add = (emb_a + emb_b)
    emb_a_sub_b = (emb_a - emb_b)
    emb_b_sub_a = (emb_b - emb_a)

    # generate images
    fake_a = G.predict([z, emb_a])[0]
    fake_b = G.predict([z, emb_b])[0]
    fake_add = G.predict([z, emb_add])[0]
    fake_a_sub_b = G.predict([z, emb_a_sub_b])[0]
    fake_b_sub_a = G.predict([z, emb_b_sub_a])[0]

    fake_a = ((fake_a + 1)*0.5)
    fake_b = ((fake_b + 1)*0.5)
    fake_add = ((fake_add + 1)*0.5)
    fake_a_sub_b = ((fake_a_sub_b + 1)*0.5)
    fake_b_sub_a = ((fake_b_sub_a + 1)*0.5)

    plt.imsave("{}/fake_text_arithmetic_a".format(out_dir), fake_a)
    plt.imsave("{}/fake_text_arithmetic_b".format(out_dir), fake_b)
    plt.imsave("{}/fake_text_arithmetic_add".format(out_dir), fake_add)
    plt.imsave("{}/fake_text_arithmetic_a_sub_b".format(out_dir), fake_a_sub_b)
    plt.imsave("{}/fake_text_arithmetic_b_sub_a".format(out_dir), fake_b_sub_a)
    print(str(txts[0]), str(txts[1]),
          file=open("{}/fake_text_arithmetic.txt".format(out_dir), "a")) 
開發者ID:PacktPublishing,項目名稱:Hands-On-Generative-Adversarial-Networks-with-Keras,代碼行數:47,代碼來源:arithmetic_in_text.py


注:本文中的utils.load_model方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。