当前位置: 首页>>代码示例>>Python>>正文


Python dataset.Dataset方法代码示例

本文整理汇总了Python中utils.dataset.Dataset方法的典型用法代码示例。如果您正苦于以下问题:Python dataset.Dataset方法的具体用法?Python dataset.Dataset怎么用?Python dataset.Dataset使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在utils.dataset的用法示例。


在下文中一共展示了dataset.Dataset方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test

# 需要导入模块: from utils import dataset [as 别名]
# 或者: from utils.dataset import Dataset [as 别名]
def test(network, config, log_dir, step):

    # Initialize testing
    if not hasattr(test, 'images'):
        testset = Dataset(config.test_dataset_path, prefix=config.data_prefix)
        random_indices = np.random.permutation(np.where(testset.is_photo)[0])[:64]
        test.images = testset.images[random_indices].astype(np.object)
        test.images = preprocess(test.images, config, is_training=False)

    output_dir = os.path.join(log_dir, 'samples')
    if not os.path.isdir(output_dir):
        os.makedirs(output_dir)

    # scales = np.indices((8,8), dtype=np.float32)[1] * 5
    scales = np.ones((8,8))
    scales = scales.flatten()
    test_results = network.generate_BA(test.images, scales, config.batch_size)
    utils.save_manifold(test_results, os.path.join(output_dir, '{}.jpg'.format(step))) 
开发者ID:seasonSH,项目名称:WarpGAN,代码行数:20,代码来源:train.py

示例2: main

# 需要导入模块: from utils import dataset [as 别名]
# 或者: from utils.dataset import Dataset [as 别名]
def main(args):


    paths = Dataset(args.dataset_path)['abspath']
    print('%d images to load.' % len(paths))
    assert(len(paths)>0)

    # Load model files and config file
    network = Network()
    network.load_model(args.model_dir) 
    images = preprocess(paths, network.config, False)

    # Run forward pass to calculate embeddings
    mu, sigma_sq = network.extract_feature(images, args.batch_size, verbose=True)
    feat_pfe = np.concatenate([mu, sigma_sq], axis=1)
    
    lfwtest = LFWTest(paths)
    lfwtest.init_standard_proto(args.protocol_path)

    accuracy, threshold = lfwtest.test_standard_proto(mu, utils.pair_euc_score)
    print('Euclidean (cosine) accuracy: %.5f threshold: %.5f' % (accuracy, threshold))
    accuracy, threshold = lfwtest.test_standard_proto(feat_pfe, utils.pair_MLS_score)
    print('MLS accuracy: %.5f threshold: %.5f' % (accuracy, threshold)) 
开发者ID:seasonSH,项目名称:Probabilistic-Face-Embeddings,代码行数:25,代码来源:eval_lfw.py

示例3: train

# 需要导入模块: from utils import dataset [as 别名]
# 或者: from utils.dataset import Dataset [as 别名]
def train(label2index, index2label):
    # list of train videos
    with open('data/split1.train', 'r') as f:
        video_list = f.read().split('\n')[0:-1]
    # read train set
    print('read data...')
    dataset = Dataset('data', video_list, label2index)
    print('done')
    # train the network
    trainer = Trainer(dataset)
    trainer.train(batch_size = 512, n_epochs = 2, learning_rate = 0.1)
    trainer.save_model('results/net.model')
    # estimate prior, loss-based lengths, and monte-carlo grammar
    prior = estimate_prior(dataset)
    mean_lengths = loss_based_lengths(dataset)
    grammar = monte_carlo_grammar(dataset, mean_lengths, index2label)
    np.savetxt('results/prior', prior)
    np.savetxt('results/mean_lengths', mean_lengths, fmt='%.3f')
    with open('results/grammar', 'w') as f:
        f.write('\n'.join(grammar) + '\n')


################################################################################
### INFERENCE                                                                ###
################################################################################ 
开发者ID:alexanderrichard,项目名称:action-sets,代码行数:27,代码来源:main.py

示例4: load_FREY

# 需要导入模块: from utils import dataset [as 别名]
# 或者: from utils.dataset import Dataset [as 别名]
def load_FREY():
    data_path = '../data/frey_rawface.mat'
    mat = loadmat(data_path)
    data = mat['ff']
    data = np.transpose(data) # [num_images, dimension]
    data = np.array(data, dtype=np.float32)
    for i in range(data.shape[0]):
        min_value = np.min(data[i,:])
        max_value = np.max(data[i,:])
        num = (data[i,:] - min_value)
        den = (max_value - min_value)
        data[i,:] = num/den

    data_dim = data.shape[1]
    num_images = data.shape[0]
    train_size = int(num_images*0.8)
    valid_size = int(num_images*0.1)
    test_size = num_images - train_size - valid_size

    x_train = data[:train_size]
    x_valid = data[train_size:(train_size+valid_size)]
    x_test = data[(train_size+valid_size):]

    x_train = np.reshape(x_train, [-1, 28, 20, 1])
    x_valid = np.reshape(x_valid, [-1, 28, 20, 1])
    x_test = np.reshape(x_test, [-1, 28, 20, 1])

    x_train_labels = np.zeros(x_train.shape[0])
    x_valid_labels = np.zeros(x_valid.shape[0])
    x_test_labels = np.zeros(x_test.shape[0])

    train_dataset = Dataset(x_train, x_train_labels)
    valid_dataset = Dataset(x_valid, x_valid_labels)
    test_dataset = Dataset(x_test, x_test_labels)

    print('Train Data: ', train_dataset.x.shape)
    print('Valid Data: ', valid_dataset.x.shape)
    print('Test Data: ', test_dataset.x.shape)

    return train_dataset, valid_dataset, test_dataset 
开发者ID:psanch21,项目名称:VAE-GMVAE,代码行数:42,代码来源:utils.py

示例5: load_MNIST

# 需要导入模块: from utils import dataset [as 别名]
# 或者: from utils.dataset import Dataset [as 别名]
def load_MNIST():
    data_path = '../data/MNIST_data'
    data = input_data.read_data_sets(data_path, one_hot=False)
    x_train_aux = data.train.images
    x_test = data.test.images
    data_dim = data.train.images.shape[1]
    n_train = data.train.images.shape[0]

    train_size = int(n_train * 0.8)
    valid_size = n_train - train_size
    x_valid, x_train = merge_datasets(x_train_aux, data_dim, train_size, valid_size)
    print('Data loaded. ', time.localtime().tm_hour,
          ':', time.localtime().tm_min, 'h')
    # logs.write('\tData loaded ' + str(time.localtime().tm_hour) +':' + str(time.localtime().tm_min) + 'h\n')

    x_train = np.reshape(x_train, [-1, 28, 28, 1])
    x_valid = np.reshape(x_valid, [-1, 28, 28, 1])
    x_test = np.reshape(x_test, [-1, 28, 28, 1])


    train_dataset = Dataset(x_train, data.train.labels)
    valid_dataset = Dataset(x_valid, data.train.labels)
    test_dataset = Dataset(x_test, data.test.labels)

    print('Train Data: ', train_dataset.x.shape)
    print('Valid Data: ', valid_dataset.x.shape)
    print('Test Data: ', test_dataset.x.shape)

    return train_dataset, valid_dataset, test_dataset 
开发者ID:psanch21,项目名称:VAE-GMVAE,代码行数:31,代码来源:utils.py

示例6: main

# 需要导入模块: from utils import dataset [as 别名]
# 或者: from utils.dataset import Dataset [as 别名]
def main(args):

    network = Network()
    network.load_model(args.model_dir)
    proc_func = lambda x: preprocess(x, network.config, False)

    testset = Dataset(args.dataset_path)
    if args.protocol == 'ijba':
        tester = IJBATest(testset['abspath'].values)
        tester.init_proto(args.protocol_path)
    elif args.protocol == 'ijbc':
        tester = IJBCTest(testset['abspath'].values)
        tester.init_proto(args.protocol_path)
    else:
        raise ValueError('Unkown protocol. Only accept "ijba" or "ijbc".')


    mu, sigma_sq = network.extract_feature(tester.image_paths, args.batch_size, proc_func=proc_func, verbose=True)
    features = np.concatenate([mu, sigma_sq], axis=1)

    print('---- Average pooling')
    aggregate_templates(tester.verification_templates, features, 'mean')
    TARs, std, FARs = tester.test_verification(force_compare(utils.pair_euc_score))
    for i in range(len(TARs)):
        print('TAR: {:.5} +- {:.5} FAR: {:.5}'.format(TARs[i], std[i], FARs[i]))

    print('---- Uncertainty pooling')
    aggregate_templates(tester.verification_templates, features, 'PFE_fuse')
    TARs, std, FARs = tester.test_verification(force_compare(utils.pair_euc_score))
    for i in range(len(TARs)):
        print('TAR: {:.5} +- {:.5} FAR: {:.5}'.format(TARs[i], std[i], FARs[i]))


    print('---- MLS comparison')
    aggregate_templates(tester.verification_templates, features, 'PFE_fuse_match')
    TARs, std, FARs = tester.test_verification(force_compare(utils.pair_MLS_score))
    for i in range(len(TARs)):
        print('TAR: {:.5} +- {:.5} FAR: {:.5}'.format(TARs[i], std[i], FARs[i])) 
开发者ID:seasonSH,项目名称:Probabilistic-Face-Embeddings,代码行数:40,代码来源:eval_ijb.py

示例7: infer

# 需要导入模块: from utils import dataset [as 别名]
# 或者: from utils.dataset import Dataset [as 别名]
def infer(label2index, index2label, n_threads):
    # load models
    log_prior = np.log( np.loadtxt('results/prior') )
    grammar = PathGrammar('results/grammar', label2index)
    length_model = PoissonModel('results/mean_lengths', max_length = 2000)
    forwarder = Forwarder('results/net.model')
    # Viterbi decoder (max_hypotheses = n: at each time step, prune all hypotheses worse than the top n)
    viterbi_decoder = Viterbi(grammar, length_model, frame_sampling = 30, max_hypotheses = 50000 )
    # create list of test videos
    with open('data/split1.test', 'r') as f:
        video_list = f.read().split('\n')[0:-1]
    # forward each video
    log_probs = dict()
    queue = mp.Queue()
    for video in video_list:
        queue.put(video)
        dataset = Dataset('data', [video], label2index)
        log_probs[video] = forwarder.forward(dataset) - log_prior
        log_probs[video] = log_probs[video] - np.max(log_probs[video])
    # Viterbi decoding
    procs = []
    for i in range(n_threads):
        p = mp.Process(target = decode, args = (queue, log_probs, viterbi_decoder, index2label) )
        procs.append(p)
        p.start()
    for p in procs:
        p.join()


### helper function for parallelized Viterbi decoding ########################## 
开发者ID:alexanderrichard,项目名称:action-sets,代码行数:32,代码来源:main.py

示例8: main

# 需要导入模块: from utils import dataset [as 别名]
# 或者: from utils.dataset import Dataset [as 别名]
def main(args):

    # I/O
    config_file = args.config_file
    config = imp.load_source('config', config_file)
    if args.name:
        config.name = args.name

    trainset = Dataset(config.train_dataset_path, prefix=config.data_prefix)

    network = WarpGAN()
    network.initialize(config, trainset.num_classes)

    # Initalization for running
    if config.save_model:
        log_dir = utils.create_log_dir(config, config_file)
        summary_writer = tf.summary.FileWriter(log_dir, network.graph)
    if config.restore_model:
        network.restore_model(config.restore_model, config.restore_scopes)

    proc_func = lambda images: preprocess(images, config, True)
    trainset.start_batch_queue(config.batch_size, proc_func=proc_func)


    # Main Loop
    print('\nStart Training\nname: {}\n# epochs: {}\nepoch_size: {}\nbatch_size: {}\n'.format(
            config.name, config.num_epochs, config.epoch_size, config.batch_size))
    global_step = 0
    start_time = time.time()
    for epoch in range(config.num_epochs):

        if epoch == 0: test(network, config, log_dir, global_step)

        # Training
        for step in range(config.epoch_size):
            # Prepare input
            learning_rate = utils.get_updated_learning_rate(global_step, config)
            batch = trainset.pop_batch_queue()

            wl, sm, global_step = network.train(batch['images'], batch['labels'], batch['is_photo'], learning_rate, config.keep_prob)

            wl['lr'] = learning_rate

            # Display
            if step % config.summary_interval == 0:
                duration = time.time() - start_time
                start_time = time.time()
                utils.display_info(epoch, step, duration, wl)
                if config.save_model:
                    summary_writer.add_summary(sm, global_step=global_step)

        # Testing
        test(network, config, log_dir, global_step)

        # Save the model
        if config.save_model:
            network.save_model(log_dir, global_step) 
开发者ID:seasonSH,项目名称:WarpGAN,代码行数:59,代码来源:train.py

示例9: main

# 需要导入模块: from utils import dataset [as 别名]
# 或者: from utils.dataset import Dataset [as 别名]
def main(args):

    # I/O
    config_file = args.config_file
    config = imp.load_source('config', config_file)
    if args.name:
        config.name = args.name

    trainset = Dataset(config.train_dataset_path)

    network = Network()
    network.initialize(config, trainset.num_classes)

    # Initalization for running
    log_dir = utils.create_log_dir(config, config_file)
    summary_writer = tf.summary.FileWriter(log_dir, network.graph)
    if config.restore_model:
        network.restore_model(config.restore_model, config.restore_scopes)

    proc_func = lambda images: preprocess(images, config, True)
    trainset.start_batch_queue(config.batch_format, proc_func=proc_func)


    # Main Loop
    print('\nStart Training\nname: {}\n# epochs: {}\nepoch_size: {}\nbatch_size: {}\n'.format(
            config.name, config.num_epochs, config.epoch_size, config.batch_format['size']))
    global_step = 0
    start_time = time.time()
    for epoch in range(config.num_epochs):

        # Training
        for step in range(config.epoch_size):
            # Prepare input
            learning_rate = utils.get_updated_learning_rate(global_step, config)
            batch = trainset.pop_batch_queue()

            wl, sm, global_step = network.train(batch['image'], batch['label'], learning_rate, config.keep_prob)

            wl['lr'] = learning_rate

            # Display
            if step % config.summary_interval == 0:
                duration = time.time() - start_time
                start_time = time.time()
                utils.display_info(epoch, step, duration, wl)
                summary_writer.add_summary(sm, global_step=global_step)

        # Save the model
        network.save_model(log_dir, global_step) 
开发者ID:seasonSH,项目名称:Probabilistic-Face-Embeddings,代码行数:51,代码来源:train.py


注:本文中的utils.dataset.Dataset方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。