當前位置: 首頁>>代碼示例>>Python>>正文


Python dataset.Dataset方法代碼示例

本文整理匯總了Python中utils.dataset.Dataset方法的典型用法代碼示例。如果您正苦於以下問題:Python dataset.Dataset方法的具體用法?Python dataset.Dataset怎麽用?Python dataset.Dataset使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在utils.dataset的用法示例。


在下文中一共展示了dataset.Dataset方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test

# 需要導入模塊: from utils import dataset [as 別名]
# 或者: from utils.dataset import Dataset [as 別名]
def test(network, config, log_dir, step):

    # Initialize testing
    if not hasattr(test, 'images'):
        testset = Dataset(config.test_dataset_path, prefix=config.data_prefix)
        random_indices = np.random.permutation(np.where(testset.is_photo)[0])[:64]
        test.images = testset.images[random_indices].astype(np.object)
        test.images = preprocess(test.images, config, is_training=False)

    output_dir = os.path.join(log_dir, 'samples')
    if not os.path.isdir(output_dir):
        os.makedirs(output_dir)

    # scales = np.indices((8,8), dtype=np.float32)[1] * 5
    scales = np.ones((8,8))
    scales = scales.flatten()
    test_results = network.generate_BA(test.images, scales, config.batch_size)
    utils.save_manifold(test_results, os.path.join(output_dir, '{}.jpg'.format(step))) 
開發者ID:seasonSH,項目名稱:WarpGAN,代碼行數:20,代碼來源:train.py

示例2: main

# 需要導入模塊: from utils import dataset [as 別名]
# 或者: from utils.dataset import Dataset [as 別名]
def main(args):


    paths = Dataset(args.dataset_path)['abspath']
    print('%d images to load.' % len(paths))
    assert(len(paths)>0)

    # Load model files and config file
    network = Network()
    network.load_model(args.model_dir) 
    images = preprocess(paths, network.config, False)

    # Run forward pass to calculate embeddings
    mu, sigma_sq = network.extract_feature(images, args.batch_size, verbose=True)
    feat_pfe = np.concatenate([mu, sigma_sq], axis=1)
    
    lfwtest = LFWTest(paths)
    lfwtest.init_standard_proto(args.protocol_path)

    accuracy, threshold = lfwtest.test_standard_proto(mu, utils.pair_euc_score)
    print('Euclidean (cosine) accuracy: %.5f threshold: %.5f' % (accuracy, threshold))
    accuracy, threshold = lfwtest.test_standard_proto(feat_pfe, utils.pair_MLS_score)
    print('MLS accuracy: %.5f threshold: %.5f' % (accuracy, threshold)) 
開發者ID:seasonSH,項目名稱:Probabilistic-Face-Embeddings,代碼行數:25,代碼來源:eval_lfw.py

示例3: train

# 需要導入模塊: from utils import dataset [as 別名]
# 或者: from utils.dataset import Dataset [as 別名]
def train(label2index, index2label):
    # list of train videos
    with open('data/split1.train', 'r') as f:
        video_list = f.read().split('\n')[0:-1]
    # read train set
    print('read data...')
    dataset = Dataset('data', video_list, label2index)
    print('done')
    # train the network
    trainer = Trainer(dataset)
    trainer.train(batch_size = 512, n_epochs = 2, learning_rate = 0.1)
    trainer.save_model('results/net.model')
    # estimate prior, loss-based lengths, and monte-carlo grammar
    prior = estimate_prior(dataset)
    mean_lengths = loss_based_lengths(dataset)
    grammar = monte_carlo_grammar(dataset, mean_lengths, index2label)
    np.savetxt('results/prior', prior)
    np.savetxt('results/mean_lengths', mean_lengths, fmt='%.3f')
    with open('results/grammar', 'w') as f:
        f.write('\n'.join(grammar) + '\n')


################################################################################
### INFERENCE                                                                ###
################################################################################ 
開發者ID:alexanderrichard,項目名稱:action-sets,代碼行數:27,代碼來源:main.py

示例4: load_FREY

# 需要導入模塊: from utils import dataset [as 別名]
# 或者: from utils.dataset import Dataset [as 別名]
def load_FREY():
    data_path = '../data/frey_rawface.mat'
    mat = loadmat(data_path)
    data = mat['ff']
    data = np.transpose(data) # [num_images, dimension]
    data = np.array(data, dtype=np.float32)
    for i in range(data.shape[0]):
        min_value = np.min(data[i,:])
        max_value = np.max(data[i,:])
        num = (data[i,:] - min_value)
        den = (max_value - min_value)
        data[i,:] = num/den

    data_dim = data.shape[1]
    num_images = data.shape[0]
    train_size = int(num_images*0.8)
    valid_size = int(num_images*0.1)
    test_size = num_images - train_size - valid_size

    x_train = data[:train_size]
    x_valid = data[train_size:(train_size+valid_size)]
    x_test = data[(train_size+valid_size):]

    x_train = np.reshape(x_train, [-1, 28, 20, 1])
    x_valid = np.reshape(x_valid, [-1, 28, 20, 1])
    x_test = np.reshape(x_test, [-1, 28, 20, 1])

    x_train_labels = np.zeros(x_train.shape[0])
    x_valid_labels = np.zeros(x_valid.shape[0])
    x_test_labels = np.zeros(x_test.shape[0])

    train_dataset = Dataset(x_train, x_train_labels)
    valid_dataset = Dataset(x_valid, x_valid_labels)
    test_dataset = Dataset(x_test, x_test_labels)

    print('Train Data: ', train_dataset.x.shape)
    print('Valid Data: ', valid_dataset.x.shape)
    print('Test Data: ', test_dataset.x.shape)

    return train_dataset, valid_dataset, test_dataset 
開發者ID:psanch21,項目名稱:VAE-GMVAE,代碼行數:42,代碼來源:utils.py

示例5: load_MNIST

# 需要導入模塊: from utils import dataset [as 別名]
# 或者: from utils.dataset import Dataset [as 別名]
def load_MNIST():
    data_path = '../data/MNIST_data'
    data = input_data.read_data_sets(data_path, one_hot=False)
    x_train_aux = data.train.images
    x_test = data.test.images
    data_dim = data.train.images.shape[1]
    n_train = data.train.images.shape[0]

    train_size = int(n_train * 0.8)
    valid_size = n_train - train_size
    x_valid, x_train = merge_datasets(x_train_aux, data_dim, train_size, valid_size)
    print('Data loaded. ', time.localtime().tm_hour,
          ':', time.localtime().tm_min, 'h')
    # logs.write('\tData loaded ' + str(time.localtime().tm_hour) +':' + str(time.localtime().tm_min) + 'h\n')

    x_train = np.reshape(x_train, [-1, 28, 28, 1])
    x_valid = np.reshape(x_valid, [-1, 28, 28, 1])
    x_test = np.reshape(x_test, [-1, 28, 28, 1])


    train_dataset = Dataset(x_train, data.train.labels)
    valid_dataset = Dataset(x_valid, data.train.labels)
    test_dataset = Dataset(x_test, data.test.labels)

    print('Train Data: ', train_dataset.x.shape)
    print('Valid Data: ', valid_dataset.x.shape)
    print('Test Data: ', test_dataset.x.shape)

    return train_dataset, valid_dataset, test_dataset 
開發者ID:psanch21,項目名稱:VAE-GMVAE,代碼行數:31,代碼來源:utils.py

示例6: main

# 需要導入模塊: from utils import dataset [as 別名]
# 或者: from utils.dataset import Dataset [as 別名]
def main(args):

    network = Network()
    network.load_model(args.model_dir)
    proc_func = lambda x: preprocess(x, network.config, False)

    testset = Dataset(args.dataset_path)
    if args.protocol == 'ijba':
        tester = IJBATest(testset['abspath'].values)
        tester.init_proto(args.protocol_path)
    elif args.protocol == 'ijbc':
        tester = IJBCTest(testset['abspath'].values)
        tester.init_proto(args.protocol_path)
    else:
        raise ValueError('Unkown protocol. Only accept "ijba" or "ijbc".')


    mu, sigma_sq = network.extract_feature(tester.image_paths, args.batch_size, proc_func=proc_func, verbose=True)
    features = np.concatenate([mu, sigma_sq], axis=1)

    print('---- Average pooling')
    aggregate_templates(tester.verification_templates, features, 'mean')
    TARs, std, FARs = tester.test_verification(force_compare(utils.pair_euc_score))
    for i in range(len(TARs)):
        print('TAR: {:.5} +- {:.5} FAR: {:.5}'.format(TARs[i], std[i], FARs[i]))

    print('---- Uncertainty pooling')
    aggregate_templates(tester.verification_templates, features, 'PFE_fuse')
    TARs, std, FARs = tester.test_verification(force_compare(utils.pair_euc_score))
    for i in range(len(TARs)):
        print('TAR: {:.5} +- {:.5} FAR: {:.5}'.format(TARs[i], std[i], FARs[i]))


    print('---- MLS comparison')
    aggregate_templates(tester.verification_templates, features, 'PFE_fuse_match')
    TARs, std, FARs = tester.test_verification(force_compare(utils.pair_MLS_score))
    for i in range(len(TARs)):
        print('TAR: {:.5} +- {:.5} FAR: {:.5}'.format(TARs[i], std[i], FARs[i])) 
開發者ID:seasonSH,項目名稱:Probabilistic-Face-Embeddings,代碼行數:40,代碼來源:eval_ijb.py

示例7: infer

# 需要導入模塊: from utils import dataset [as 別名]
# 或者: from utils.dataset import Dataset [as 別名]
def infer(label2index, index2label, n_threads):
    # load models
    log_prior = np.log( np.loadtxt('results/prior') )
    grammar = PathGrammar('results/grammar', label2index)
    length_model = PoissonModel('results/mean_lengths', max_length = 2000)
    forwarder = Forwarder('results/net.model')
    # Viterbi decoder (max_hypotheses = n: at each time step, prune all hypotheses worse than the top n)
    viterbi_decoder = Viterbi(grammar, length_model, frame_sampling = 30, max_hypotheses = 50000 )
    # create list of test videos
    with open('data/split1.test', 'r') as f:
        video_list = f.read().split('\n')[0:-1]
    # forward each video
    log_probs = dict()
    queue = mp.Queue()
    for video in video_list:
        queue.put(video)
        dataset = Dataset('data', [video], label2index)
        log_probs[video] = forwarder.forward(dataset) - log_prior
        log_probs[video] = log_probs[video] - np.max(log_probs[video])
    # Viterbi decoding
    procs = []
    for i in range(n_threads):
        p = mp.Process(target = decode, args = (queue, log_probs, viterbi_decoder, index2label) )
        procs.append(p)
        p.start()
    for p in procs:
        p.join()


### helper function for parallelized Viterbi decoding ########################## 
開發者ID:alexanderrichard,項目名稱:action-sets,代碼行數:32,代碼來源:main.py

示例8: main

# 需要導入模塊: from utils import dataset [as 別名]
# 或者: from utils.dataset import Dataset [as 別名]
def main(args):

    # I/O
    config_file = args.config_file
    config = imp.load_source('config', config_file)
    if args.name:
        config.name = args.name

    trainset = Dataset(config.train_dataset_path, prefix=config.data_prefix)

    network = WarpGAN()
    network.initialize(config, trainset.num_classes)

    # Initalization for running
    if config.save_model:
        log_dir = utils.create_log_dir(config, config_file)
        summary_writer = tf.summary.FileWriter(log_dir, network.graph)
    if config.restore_model:
        network.restore_model(config.restore_model, config.restore_scopes)

    proc_func = lambda images: preprocess(images, config, True)
    trainset.start_batch_queue(config.batch_size, proc_func=proc_func)


    # Main Loop
    print('\nStart Training\nname: {}\n# epochs: {}\nepoch_size: {}\nbatch_size: {}\n'.format(
            config.name, config.num_epochs, config.epoch_size, config.batch_size))
    global_step = 0
    start_time = time.time()
    for epoch in range(config.num_epochs):

        if epoch == 0: test(network, config, log_dir, global_step)

        # Training
        for step in range(config.epoch_size):
            # Prepare input
            learning_rate = utils.get_updated_learning_rate(global_step, config)
            batch = trainset.pop_batch_queue()

            wl, sm, global_step = network.train(batch['images'], batch['labels'], batch['is_photo'], learning_rate, config.keep_prob)

            wl['lr'] = learning_rate

            # Display
            if step % config.summary_interval == 0:
                duration = time.time() - start_time
                start_time = time.time()
                utils.display_info(epoch, step, duration, wl)
                if config.save_model:
                    summary_writer.add_summary(sm, global_step=global_step)

        # Testing
        test(network, config, log_dir, global_step)

        # Save the model
        if config.save_model:
            network.save_model(log_dir, global_step) 
開發者ID:seasonSH,項目名稱:WarpGAN,代碼行數:59,代碼來源:train.py

示例9: main

# 需要導入模塊: from utils import dataset [as 別名]
# 或者: from utils.dataset import Dataset [as 別名]
def main(args):

    # I/O
    config_file = args.config_file
    config = imp.load_source('config', config_file)
    if args.name:
        config.name = args.name

    trainset = Dataset(config.train_dataset_path)

    network = Network()
    network.initialize(config, trainset.num_classes)

    # Initalization for running
    log_dir = utils.create_log_dir(config, config_file)
    summary_writer = tf.summary.FileWriter(log_dir, network.graph)
    if config.restore_model:
        network.restore_model(config.restore_model, config.restore_scopes)

    proc_func = lambda images: preprocess(images, config, True)
    trainset.start_batch_queue(config.batch_format, proc_func=proc_func)


    # Main Loop
    print('\nStart Training\nname: {}\n# epochs: {}\nepoch_size: {}\nbatch_size: {}\n'.format(
            config.name, config.num_epochs, config.epoch_size, config.batch_format['size']))
    global_step = 0
    start_time = time.time()
    for epoch in range(config.num_epochs):

        # Training
        for step in range(config.epoch_size):
            # Prepare input
            learning_rate = utils.get_updated_learning_rate(global_step, config)
            batch = trainset.pop_batch_queue()

            wl, sm, global_step = network.train(batch['image'], batch['label'], learning_rate, config.keep_prob)

            wl['lr'] = learning_rate

            # Display
            if step % config.summary_interval == 0:
                duration = time.time() - start_time
                start_time = time.time()
                utils.display_info(epoch, step, duration, wl)
                summary_writer.add_summary(sm, global_step=global_step)

        # Save the model
        network.save_model(log_dir, global_step) 
開發者ID:seasonSH,項目名稱:Probabilistic-Face-Embeddings,代碼行數:51,代碼來源:train.py


注:本文中的utils.dataset.Dataset方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。