当前位置: 首页>>代码示例>>Python>>正文


Python utils.load_model方法代码示例

本文整理汇总了Python中utils.load_model方法的典型用法代码示例。如果您正苦于以下问题:Python utils.load_model方法的具体用法?Python utils.load_model怎么用?Python utils.load_model使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在utils的用法示例。


在下文中一共展示了utils.load_model方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: infer

# 需要导入模块: import utils [as 别名]
# 或者: from utils import load_model [as 别名]
def infer(data_filepath='data/flowers.hdf5', z_dim=128, out_dir='gan',
          n_steps=10):

    G = load_model(out_dir)
    val_data = get_data(data_filepath, 'train')
    val_data = next(iterate_minibatches(val_data, 2))
    emb_source, emb_target = val_data[1]
    txts = val_data[2]

    z = np.random.uniform(-1, 1, size=(1, z_dim))

    G.trainable = False
    for i in range(n_steps+1):
        p = i/float(n_steps)
        emb = emb_source * (1-p) + emb_target * p
        emb = emb[None, :]
        fake_image = G.predict([z, emb])[0]
        img = ((fake_image + 1)*0.5)
        plt.imsave("{}/fake_text_interpolation_i{}".format(out_dir, i), img)
        print(i, str(txts[int(round(p))]).strip(),
              file=open("{}/fake_text_interpolation.txt".format(out_dir), "a")) 
开发者ID:PacktPublishing,项目名称:Hands-On-Generative-Adversarial-Networks-with-Keras,代码行数:23,代码来源:interpolation_in_text.py

示例2: infer

# 需要导入模块: import utils [as 别名]
# 或者: from utils import load_model [as 别名]
def infer(data_filepath='data/flowers.hdf5', z_dim=128, out_dir='gan',
          n_samples=5):

    G = load_model(out_dir)
    val_data = get_data(data_filepath, 'train')
    val_data = next(iterate_minibatches(val_data, n_samples))    
    emb, txts = val_data[1], val_data[2]

    # sample z vector for inference
    z = np.random.uniform(-1, 1, size=(n_samples, z_dim))

    G.trainable = False
    fake_images = G.predict([z, emb])
    for i in range(n_samples):
        img = ((fake_images[i] + 1)*0.5)
        plt.imsave("{}/fake_{}".format(out_dir, i), img)
        print(i, str(txts[i]).strip(),
              file=open("{}/fake_text.txt".format(out_dir), "a")) 
开发者ID:PacktPublishing,项目名称:Hands-On-Generative-Adversarial-Networks-with-Keras,代码行数:20,代码来源:inference.py

示例3: main

# 需要导入模块: import utils [as 别名]
# 或者: from utils import load_model [as 别名]
def main(test_desc_file, train_desc_file, load_dir):
    # Prepare the data generator
    datagen = DataGenerator()
    # Load the JSON file that contains the dataset
    datagen.load_test_data(test_desc_file)
    datagen.load_train_data(train_desc_file)
    # Use a few samples from the dataset, to calculate the means and variance
    # of the features, so that we can center our inputs to the network
    datagen.fit_train(100)

    # Compile a Recurrent Network with 1 1D convolution layer, GRU units
    # and 1 fully connected layer
    model = load_model(load_dir)

    # Compile the testing function
    test_fn = compile_test_fn(model)

    # Test the model
    test_loss = test(model, test_fn, datagen)
    print ("Test loss: {}".format(test_loss)) 
开发者ID:baidu-research,项目名称:ba-dls-deepspeech,代码行数:22,代码来源:test.py

示例4: main

# 需要导入模块: import utils [as 别名]
# 或者: from utils import load_model [as 别名]
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('test_file', type=str,
                        help='Path to an audio file')
    parser.add_argument('train_desc_file', type=str,
                        help='Path to the training JSON-line file. This will '
                             'be used to extract feature means/variance')
    parser.add_argument('load_dir', type=str,
                        help='Directory where a trained model is stored.')
    parser.add_argument('--weights_file', type=str, default=None,
                        help='Path to a model weights file')
    args = parser.parse_args()

    print ("Loading model")
    model = load_model(args.load_dir, args.weights_file)
    visualize(model, args.test_file, args.train_desc_file) 
开发者ID:baidu-research,项目名称:ba-dls-deepspeech,代码行数:18,代码来源:visualize.py

示例5: main

# 需要导入模块: import utils [as 别名]
# 或者: from utils import load_model [as 别名]
def main(args):
    # MODEL
    num_features = [args.features*i for i in range(1, args.levels+1)] if args.feature_growth == "add" else \
                   [args.features*2**i for i in range(0, args.levels)]
    target_outputs = int(args.output_size * args.sr)
    model = Waveunet(args.channels, num_features, args.channels, args.instruments, kernel_size=args.kernel_size,
                     target_output_size=target_outputs, depth=args.depth, strides=args.strides,
                     conv_type=args.conv_type, res=args.res, separate=args.separate)

    if args.cuda:
        model = utils.DataParallel(model)
        print("move model to gpu")
        model.cuda()

    print("Loading model from checkpoint " + str(args.load_model))
    state = utils.load_model(model, None, args.load_model)
    print('Step', state['step'])

    preds = predict_song(args, args.input, model)

    output_folder = os.path.dirname(args.input) if args.output is None else args.output
    for inst in preds.keys():
        utils.write_wav(os.path.join(output_folder, os.path.basename(args.input) + "_" + inst + ".wav"), preds[inst], args.sr) 
开发者ID:f90,项目名称:Wave-U-Net-Pytorch,代码行数:25,代码来源:predict.py

示例6: main

# 需要导入模块: import utils [as 别名]
# 或者: from utils import load_model [as 别名]
def main():
  model = utils.load_model(args)
  new_model = fc_decomposition(model, args)
  new_model.save(args.save_model) 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:6,代码来源:acc_fc.py

示例7: main

# 需要导入模块: import utils [as 别名]
# 或者: from utils import load_model [as 别名]
def main():
  model = utils.load_model(args)
  new_model = conv_vh_decomposition(model, args)
  new_model.save(args.save_model) 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:6,代码来源:acc_conv.py

示例8: load

# 需要导入模块: import utils [as 别名]
# 或者: from utils import load_model [as 别名]
def load(self, model):
        self.t_lstm = load_model(model["t_lstm_file_name"])
        self.in_vocabulary = self.t_lstm.in_vocabulary
        super(TA_LSTM, self).load(model) 
开发者ID:ottokart,项目名称:punctuator,代码行数:6,代码来源:models.py

示例9: run

# 需要导入模块: import utils [as 别名]
# 或者: from utils import load_model [as 别名]
def run(args):
    pprint(args)
    logging.basicConfig(level=logging.INFO)

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    seed(args.seed)

    dataset, ontology, vocab, Eword = load_dataset()

    model = load_model(args.model, args, ontology, vocab)
    model.save_config()
    model.load_emb(Eword)

    model = model.to(model.device)
    if not args.test:
        logging.info('Starting train')
        model.run_train(dataset['train'], dataset['dev'], args)
    if args.resume:
        model.load_best_save(directory=args.resume)
    else:
        model.load_best_save(directory=args.dout)
    model = model.to(model.device)
    logging.info('Running dev evaluation')
    dev_out = model.run_eval(dataset['dev'], args)
    pprint(dev_out) 
开发者ID:salesforce,项目名称:glad,代码行数:28,代码来源:train.py

示例10: __call__

# 需要导入模块: import utils [as 别名]
# 或者: from utils import load_model [as 别名]
def __call__(self, sess, epoch, iteration, model, loss, processed):
        if epoch == self.at_epoch:
            print("Loading model...")
            model = load_model(sess, self.path + "latest/") 
开发者ID:uclnlp,项目名称:pycodesuggest,代码行数:6,代码来源:hooks.py

示例11: _train

# 需要导入模块: import utils [as 别名]
# 或者: from utils import load_model [as 别名]
def _train(net, training_data, validation_data, model_name, learning_rate, max_epochs, min_improvement):
    min_learning_rate = 1e-6
    best_validation_ppl = np.inf
    divide = False

    for epoch in range(1, max_epochs+1):
        
        epoch_start = time()
        
        print "\n======= EPOCH %s =======" % epoch
        print "\tLearning rate is %s" % learning_rate

        train_ppl = _process_corpus(net, training_data, mode='train', learning_rate=learning_rate) 
        print "\tTrain PPL is %.3f" % train_ppl

        validation_ppl = _process_corpus(net, validation_data, mode='test')
        print "\tValidation PPL is %.3f" % validation_ppl

        print "\tTime taken: %ds" % (time() - epoch_start)

        if np.log(validation_ppl) * min_improvement > np.log(best_validation_ppl): # Mikolovs recipe
            if not divide:
                divide = True
                print "\tStarting to reduce the learning rate..."
                if validation_ppl > best_validation_ppl:
                    print "\tLoading best model."
                    net = utils.load_model("../out/" + model_name)
            else:
                if validation_ppl < best_validation_ppl:
                    print "\tSaving model."
                    net.save("../out/" + model_name, final=True)
                break
        else:
            print "\tNew best model! Saving..."
            best_validation_ppl = validation_ppl
            final = learning_rate / 2. < min_learning_rate or epoch == max_epochs
            net.save("../out/" + model_name, final)

        if divide:
            learning_rate /= 2.
        
        if learning_rate < min_learning_rate:
            break
            
    print "-"*30
    print "Finished training."
    print "Best validation PPL is %.3f\n\n" % best_validation_ppl 
开发者ID:ottokart,项目名称:punctuator,代码行数:49,代码来源:trainer.py

示例12: main

# 需要导入模块: import utils [as 别名]
# 或者: from utils import load_model [as 别名]
def main():
    """
    The main executable function
    """
    parser = make_argument_parser()
    args = parser.parse_args()

    input_dir = args.inputdir
    model_dir = args.modeldir
    tf = args.factor
    bed_file = args.bed
    output_file = args.outputfile

    print 'Loading genome'
    genome = utils.load_genome()
    print 'Loading model'
    model_tfs, model_bigwig_names, features, model = utils.load_model(model_dir)
    L = model.input_shape[0][1]
    utils.L = L
    assert tf in model_tfs
    assert 'bigwig' in features
    use_meta = 'meta' in features
    use_gencode = 'gencode' in features
    print 'Loading test data'
    is_sorted = True
    bigwig_names, meta_names, datagen_bed, nonblacklist_bools = utils.load_beddata(genome, bed_file, use_meta, use_gencode, input_dir, is_sorted)
    assert bigwig_names == model_bigwig_names
    if use_meta:
        model_meta_file = model_dir + '/meta.txt'
        assert os.path.isfile(model_meta_file)
        model_meta_names = np.loadtxt(model_meta_file, dtype=str)
        if len(model_meta_names.shape) == 0:
            model_meta_names = [str(model_meta_names)]
        else:
            model_meta_names = list(model_meta_names)
        assert meta_names == model_meta_names
    print 'Generating predictions'
    model_tf_index = model_tfs.index(tf)
    model_predicts = model.predict_generator(datagen_bed, val_samples=len(datagen_bed), pickle_safe=True)
    if len(model_tfs) > 1:
        model_tf_predicts = model_predicts[:, model_tf_index]
    else:
        model_tf_predicts = model_predicts
    final_scores = np.zeros(len(nonblacklist_bools))
    final_scores[nonblacklist_bools] = model_tf_predicts
    print 'Saving predictions'
    df = pandas.read_csv(bed_file, sep='\t', header=None)
    df[3] = final_scores
    df.to_csv(output_file, sep='\t', compression='gzip', float_format='%.3e', header=False, index=False) 
开发者ID:uci-cbcl,项目名称:FactorNet,代码行数:51,代码来源:predict.py

示例13: main

# 需要导入模块: import utils [as 别名]
# 或者: from utils import load_model [as 别名]
def main():
    """
    The main executable function
    """
    parser = make_argument_parser()
    args = parser.parse_args()

    input_dir = args.inputdir
    model_dir = args.modeldir
    bed_file = args.bed
    chrom = args.chrom

    if args.outputdir is None:
        clobber = True
        output_dir = args.outputdirc
    else:
        clobber = False
        output_dir = args.outputdir

    try:  # adapted from dreme.py by T. Bailey
        os.makedirs(output_dir)
    except OSError as exc:
        if exc.errno == errno.EEXIST:
            if not clobber:
                print >> sys.stderr, ('output directory (%s) already exists '
                                      'but you specified not to clobber it') % output_dir
                sys.exit(1)
            else:
                print >> sys.stderr, ('output directory (%s) already exists '
                                      'so it will be clobbered') % output_dir

    print 'Loading genome'
    genome = utils.load_genome()
    print 'Loading model'
    model_tfs, model_bigwig_names, features, model = utils.load_model(model_dir)
    L = model.input_shape[0][1]
    utils.L = L
    use_meta = 'meta' in features
    use_gencode = 'gencode' in features
    print 'Loading BED data'
    is_sorted = False
    bigwig_names, meta_names, datagen_bed, nonblacklist_bools = utils.load_beddata(genome, bed_file, use_meta, use_gencode, input_dir, is_sorted, chrom)
    assert bigwig_names == model_bigwig_names
    if use_meta:
        model_meta_file = model_dir + '/meta.txt'
        assert os.path.isfile(model_meta_file)
        model_meta_names = np.loadtxt(model_meta_file, dtype=str)
        if len(model_meta_names.shape) == 0:
            model_meta_names = [str(model_meta_names)]
        else:
            model_meta_names = list(model_meta_names)
        assert meta_names == model_meta_names
    output_results(bigwig_names, datagen_bed, model, output_dir) 
开发者ID:uci-cbcl,项目名称:FactorNet,代码行数:55,代码来源:visualize.py

示例14: train

# 需要导入模块: import utils [as 别名]
# 或者: from utils import load_model [as 别名]
def train(**kwargs):
    config = Config()
    config.update(**kwargs)
    print('当前设置为:\n', config)
    if config.use_cuda:
        torch.cuda.set_device(config.gpu)
    print('loading corpus')
    vocab = load_vocab(config.vocab)
    label_dic = load_vocab(config.label_file)
    tagset_size = len(label_dic)
    train_data = read_corpus(config.train_file, max_length=config.max_length, label_dic=label_dic, vocab=vocab)
    dev_data = read_corpus(config.dev_file, max_length=config.max_length, label_dic=label_dic, vocab=vocab)

    train_ids = torch.LongTensor([temp.input_id for temp in train_data])
    train_masks = torch.LongTensor([temp.input_mask for temp in train_data])
    train_tags = torch.LongTensor([temp.label_id for temp in train_data])

    train_dataset = TensorDataset(train_ids, train_masks, train_tags)
    train_loader = DataLoader(train_dataset, shuffle=True, batch_size=config.batch_size)

    dev_ids = torch.LongTensor([temp.input_id for temp in dev_data])
    dev_masks = torch.LongTensor([temp.input_mask for temp in dev_data])
    dev_tags = torch.LongTensor([temp.label_id for temp in dev_data])

    dev_dataset = TensorDataset(dev_ids, dev_masks, dev_tags)
    dev_loader = DataLoader(dev_dataset, shuffle=True, batch_size=config.batch_size)
    model = BERT_LSTM_CRF(config.bert_path, tagset_size, config.bert_embedding, config.rnn_hidden, config.rnn_layer, dropout_ratio=config.dropout_ratio, dropout1=config.dropout1, use_cuda=config.use_cuda)
    if config.load_model:
        assert config.load_path is not None
        model = load_model(model, name=config.load_path)
    if config.use_cuda:
        model.cuda()
    model.train()
    optimizer = getattr(optim, config.optim)
    optimizer = optimizer(model.parameters(), lr=config.lr, weight_decay=config.weight_decay)
    eval_loss = 10000
    for epoch in range(config.base_epoch):
        step = 0
        for i, batch in enumerate(train_loader):
            step += 1
            model.zero_grad()
            inputs, masks, tags = batch
            inputs, masks, tags = Variable(inputs), Variable(masks), Variable(tags)
            if config.use_cuda:
                inputs, masks, tags = inputs.cuda(), masks.cuda(), tags.cuda()
            feats = model(inputs, masks)
            loss = model.loss(feats, masks,tags)
            loss.backward()
            optimizer.step()
            if step % 50 == 0:
                print('step: {} |  epoch: {}|  loss: {}'.format(step, epoch, loss.item()))
        loss_temp = dev(model, dev_loader, epoch, config)
        if loss_temp < eval_loss:
            save_model(model,epoch) 
开发者ID:chenxiaoyouyou,项目名称:Bert-BiLSTM-CRF-pytorch,代码行数:56,代码来源:main.py

示例15: infer

# 需要导入模块: import utils [as 别名]
# 或者: from utils import load_model [as 别名]
def infer(data_filepath='data/flowers.hdf5', z_dim=128, out_dir='gan',
          n_steps=10):

    G = load_model(out_dir)
    val_data = get_data(data_filepath, 'train')
    val_data = next(iterate_minibatches(val_data, 2))
    emb_a, emb_b = val_data[1]
    txts = val_data[2]

    # add batch dimension
    emb_a, emb_b = emb_a[None, :], emb_b[None, :]

    # sample z vector for inference
    z = np.random.uniform(-1, 1, size=(1, z_dim))

    G.trainable = False
    # predict using embeddings a and b
    fake_image_a = G.predict([z, emb_a])[0]
    fake_image_b  = G.predict([z, emb_b])[0]

    # add and subtract
    emb_add = (emb_a + emb_b)
    emb_a_sub_b = (emb_a - emb_b)
    emb_b_sub_a = (emb_b - emb_a)

    # generate images
    fake_a = G.predict([z, emb_a])[0]
    fake_b = G.predict([z, emb_b])[0]
    fake_add = G.predict([z, emb_add])[0]
    fake_a_sub_b = G.predict([z, emb_a_sub_b])[0]
    fake_b_sub_a = G.predict([z, emb_b_sub_a])[0]

    fake_a = ((fake_a + 1)*0.5)
    fake_b = ((fake_b + 1)*0.5)
    fake_add = ((fake_add + 1)*0.5)
    fake_a_sub_b = ((fake_a_sub_b + 1)*0.5)
    fake_b_sub_a = ((fake_b_sub_a + 1)*0.5)

    plt.imsave("{}/fake_text_arithmetic_a".format(out_dir), fake_a)
    plt.imsave("{}/fake_text_arithmetic_b".format(out_dir), fake_b)
    plt.imsave("{}/fake_text_arithmetic_add".format(out_dir), fake_add)
    plt.imsave("{}/fake_text_arithmetic_a_sub_b".format(out_dir), fake_a_sub_b)
    plt.imsave("{}/fake_text_arithmetic_b_sub_a".format(out_dir), fake_b_sub_a)
    print(str(txts[0]), str(txts[1]),
          file=open("{}/fake_text_arithmetic.txt".format(out_dir), "a")) 
开发者ID:PacktPublishing,项目名称:Hands-On-Generative-Adversarial-Networks-with-Keras,代码行数:47,代码来源:arithmetic_in_text.py


注:本文中的utils.load_model方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。