當前位置: 首頁>>代碼示例>>Python>>正文


Python data.Dataset方法代碼示例

本文整理匯總了Python中data.Dataset方法的典型用法代碼示例。如果您正苦於以下問題:Python data.Dataset方法的具體用法?Python data.Dataset怎麽用?Python data.Dataset使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在data的用法示例。


在下文中一共展示了data.Dataset方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: get_featurs

# 需要導入模塊: import data [as 別名]
# 或者: from data import Dataset [as 別名]
def get_featurs(model, test_list):

    device = torch.device("cuda")

    pbar = tqdm(total=len(test_list))
    for idx, img_path in enumerate(test_list):
        pbar.update(1)


        dataset = Dataset(root=img_path,
                      phase='test',
                      input_shape=(1, 112, 112))

        trainloader = data.DataLoader(dataset, batch_size=1)
        for img in trainloader:
            img = img.to(device)
            if idx == 0:
                feature = model(img)
                feature = feature.detach().cpu().numpy()
                features = feature
            else:
                feature = model(img)
                feature = feature.detach().cpu().numpy()
                features = np.concatenate((features, feature), axis=0)
    return features 
開發者ID:LcenArthas,項目名稱:CCF-BDCI2019-Multi-person-Face-Recognition-Competition-Baseline,代碼行數:27,代碼來源:test_ccf.py

示例2: start_data_loader

# 需要導入模塊: import data [as 別名]
# 或者: from data import Dataset [as 別名]
def start_data_loader(sess, enqueue_op, queue_placeholders, model, dataset, config):
    """ Starts a data loader thread coordinated by a tf.train.Coordinator()

    Args:
    sess: tf.Session
    enqueue_op: tf.FIFOQueue.enqueue
    queue_placeholders: dict
    model: FCPN
    dataset: Dataset
    config: dict, session configuration parameters

    Returns:
    coord: tf.train.Coordinator
    loader_thread: Thread

    """

    coord = tf.train.Coordinator()
    loader_thread = threading.Thread(target=load_data_into_queue, args=(
        sess, enqueue_op, queue_placeholders, coord, model, dataset, config))
    loader_thread.daemon = True
    loader_thread.start()
    return coord, loader_thread 
開發者ID:drethage,項目名稱:fully-convolutional-point-network,代碼行數:25,代碼來源:training.py

示例3: batch_generator

# 需要導入模塊: import data [as 別名]
# 或者: from data import Dataset [as 別名]
def batch_generator(args):
    print('-' * 70)
    dataset = Dataset(args.dataset_path, verbose=True)
    print(dataset)
    return dataset.batches(args.batch_size, args.window_size, args.stride_size) 
開發者ID:djosix,項目名稱:Performance-RNN-PyTorch,代碼行數:7,代碼來源:adversarial.py

示例4: load_dataset

# 需要導入模塊: import data [as 別名]
# 或者: from data import Dataset [as 別名]
def load_dataset():
    global data_path
    dataset = Dataset(data_path, verbose=True)
    dataset_size = len(dataset.samples)
    assert dataset_size > 0
    return dataset 
開發者ID:djosix,項目名稱:Performance-RNN-PyTorch,代碼行數:8,代碼來源:train.py

示例5: main

# 需要導入模塊: import data [as 別名]
# 或者: from data import Dataset [as 別名]
def main():
    parser = get_parser()
    args = parser.parse_args()
    setup(args)
    dataset = data.Dataset(args)

    tf.reset_default_graph()
    if args.model_type == "student":
        teacher_model = None
        if args.load_teacher_from_checkpoint:
            teacher_model = model.BigModel(args, "teacher")
            teacher_model.start_session()
            teacher_model.load_model_from_file(args.load_teacher_checkpoint_dir)
            print("Verify Teacher State before Training Student")
            teacher_model.run_inference(dataset)
        student_model = model.SmallModel(args, "student")
        student_model.start_session()
        student_model.train(dataset, teacher_model)

        # Testing student model on the best model based on validation set
        student_model.load_model_from_file(args.checkpoint_dir)
        student_model.run_inference(dataset)

        if args.load_teacher_from_checkpoint:
            print("Verify Teacher State After Training student Model")
            teacher_model.run_inference(dataset)
            teacher_model.close_session()
        student_model.close_session()
    else:
        teacher_model = model.BigModel(args, "teacher")
        teacher_model.start_session()
        teacher_model.train(dataset)

        # Testing teacher model on the best model based on validation set
        teacher_model.load_model_from_file(args.checkpoint_dir)
        teacher_model.run_inference(dataset)
        teacher_model.close_session() 
開發者ID:DushyantaDhyani,項目名稱:kdtf,代碼行數:39,代碼來源:main.py

示例6: main

# 需要導入模塊: import data [as 別名]
# 或者: from data import Dataset [as 別名]
def main():
    """ Run training and export summaries to data_dir/logs for a single test
    setup and a single set of parameters. Summaries include a) TensorBoard
    summaries, b) the latest train/test accuracies and raw edit distances
    (status.txt), c) the latest test predictions along with test ground-truth
    labels (test_label_seqs.pkl, test_prediction_seqs.pkl), d) visualizations
    as training progresses (test_visualizations_######.png)."""

    args = define_and_process_args()
    print('\n', 'ARGUMENTS', '\n\n', args, '\n')

    log_dir = get_log_dir(args)
    print('\n', 'LOG DIRECTORY', '\n\n', log_dir, '\n')

    standardized_data_path = os.path.join(args.data_dir, args.data_filename)
    if not os.path.exists(standardized_data_path):
        message = '%s does not exist.' % standardized_data_path
        raise ValueError(message)

    dataset = data.Dataset(standardized_data_path)
    train_raw_seqs, test_raw_seqs = dataset.get_splits(args.test_users)
    train_triplets = [data.prepare_raw_seq(seq) for seq in train_raw_seqs]
    test_triplets = [data.prepare_raw_seq(seq) for seq in test_raw_seqs]

    train_input_seqs, train_reset_seqs, train_label_seqs = zip(*train_triplets)
    test_input_seqs, test_reset_seqs, test_label_seqs = zip(*test_triplets)

    Model = eval('models.' + args.model_type + 'Model')
    input_size = dataset.input_size
    target_size = dataset.num_classes

    # This is just to satisfy a low-CPU requirement on our cluster
    # when using GPUs.
    if 'CUDA_VISIBLE_DEVICES' in os.environ:
        config = tf.ConfigProto(intra_op_parallelism_threads=2,
                                inter_op_parallelism_threads=2)
    else:
        config = None

    with tf.Session(config=config) as sess:
        model = Model(input_size, target_size, args.num_layers,
                      args.hidden_layer_size, args.init_scale,
                      args.dropout_keep_prob)
        optimizer = optimizers.Optimizer(
            model.loss, args.num_train_sweeps, args.initial_learning_rate,
            args.num_initial_sweeps, args.num_sweeps_per_decay,
            args.decay_factor, args.max_global_grad_norm)
        train(sess, model, optimizer, log_dir, args.batch_size,
              args.num_sweeps_per_summary, args.num_sweeps_per_save,
              train_input_seqs, train_reset_seqs, train_label_seqs,
              test_input_seqs, test_reset_seqs, test_label_seqs) 
開發者ID:rdipietro,項目名稱:miccai-2016-surgical-activity-rec,代碼行數:53,代碼來源:train_and_summarize.py

示例7: load_data_into_queue

# 需要導入模塊: import data [as 別名]
# 或者: from data import Dataset [as 別名]
def load_data_into_queue(sess, enqueue_op, queue_placeholders, coord, model, dataset, config):
    """ Fills a FIFO queue with one epoch of training samples, then one epoch of validation samples. Alternatingly, for config['training']['max_epochs'] epochs.

    Args:
    sess: tf.Session
    enqueue_op: tf.FIFOQueue.enqueue
    queue_placeholders: dict
    coord: tf.train.Coordinator()
    model: FCPN
    dataset: Dataset
    config: dict, session configuration parameters

    """

    sample_generators = {
        'train': dataset.sample_generator('train', config['dataset']['training_samples']['num_points'], config['training']['data_augmentation']),
        'val': dataset.sample_generator('val', config['dataset']['training_samples']['num_points'])
    }

    pointnet_locations = model.get_pointnet_locations()
    point_features = np.ones(config['dataset']['training_samples']['num_points'])
    pointnet_features = np.zeros(config['model']['pointnet']['num'])

    constant_features = np.expand_dims(np.concatenate([point_features, pointnet_features]), axis=1)

    for _ in range(config['training']['max_epochs']):
        for s in ['train', 'val']:
            num_enqueued_samples = 0

            for sample_i in range(dataset.get_num_samples(s)):

                if coord.should_stop():
                    return

                input_points_xyz, output_voxelgrid = next(sample_generators[s])
                output_voxelvector = output_voxelgrid.reshape(-1)

                points_xyz_and_pointnet_locations = np.concatenate(
                    (input_points_xyz, pointnet_locations), axis=0)
                voxel_weights = dataset.get_voxel_weights(output_voxelvector)

                feed_dict = {queue_placeholders['input_points_pl']: points_xyz_and_pointnet_locations,
                             queue_placeholders['input_features_pl']: constant_features,
                             queue_placeholders['output_voxels_pl']: output_voxelvector,
                             queue_placeholders['output_voxel_weights_pl']: voxel_weights}

                sess.run(enqueue_op, feed_dict=feed_dict)
                num_enqueued_samples += 1

                # If its the last sample of the batch, repeat it to complete
                # the last batch
                if num_enqueued_samples == dataset.get_num_samples(s):
                    num_duplicate_samples = dataset.get_num_batches(s, config['training']['batch_size']) * config['training']['batch_size'] - num_enqueued_samples
                    for _ in range(num_duplicate_samples):
                        sess.run(enqueue_op, feed_dict=feed_dict) 
開發者ID:drethage,項目名稱:fully-convolutional-point-network,代碼行數:57,代碼來源:training.py


注:本文中的data.Dataset方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。