当前位置: 首页>>代码示例>>Python>>正文


Python data_loader.DataLoader方法代码示例

本文整理汇总了Python中data_loader.DataLoader方法的典型用法代码示例。如果您正苦于以下问题:Python data_loader.DataLoader方法的具体用法?Python data_loader.DataLoader怎么用?Python data_loader.DataLoader使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在data_loader的用法示例。


在下文中一共展示了data_loader.DataLoader方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: import data_loader [as 别名]
# 或者: from data_loader import DataLoader [as 别名]
def __init__(self, use_cuda=USECUDA, lr=LR):

        if use_cuda:
            torch.cuda.manual_seed(1234)
        else:
            torch.manual_seed(1234)

        self.kl_targ = 0.02
        self.lr_multiplier = 1.
        self.use_cuda = use_cuda

        self.net = Net()
        self.eval_net = Net()
        if use_cuda:
            self.net = self.net.cuda()
            self.eval_net = self.eval_net.cuda()

        self.dl = DataLoader(use_cuda, MINIBATCH)
        self.sample_data = deque(maxlen=TRAINLEN)
        self.gen_optim(lr)
        self.entropy = AlphaEntropy() 
开发者ID:ne7ermore,项目名称:torch-light,代码行数:23,代码来源:train.py

示例2: build_pose_test_graph

# 需要导入模块: import data_loader [as 别名]
# 或者: from data_loader import DataLoader [as 别名]
def build_pose_test_graph(self, input_uint8):
        input_mc = self.select_tensor_or_placeholder_input(input_uint8)
        loader = DataLoader()
        tgt_image, src_image_stack = \
            loader.batch_unpack_image_sequence(
                input_mc, self.img_height, self.img_width, self.num_source)
        with tf.name_scope("pose_prediction"):
            pred_poses, _ = pose_net(tgt_image, src_image_stack, is_training=False)
            self.pred_poses = pred_poses 
开发者ID:hlzz,项目名称:DeepMatchVO,代码行数:11,代码来源:deep_slam.py

示例3: get_data_loaders

# 需要导入模块: import data_loader [as 别名]
# 或者: from data_loader import DataLoader [as 别名]
def get_data_loaders(region_feature_dim, tok2idx):
    test_loader = DataLoader(args, region_feature_dim, 'test', tok2idx)

    if args.test:
        return test_loader, None, None

    max_length = test_loader.max_length
    train_loader = DataLoader(args, region_feature_dim, 'train', tok2idx)
    max_length = max(max_length, train_loader.max_length)
    val_loader = DataLoader(args, region_feature_dim, 'val', tok2idx, set(train_loader.phrases))
    max_length = max(max_length, val_loader.max_length)
    test_loader.set_max_length(max_length)
    train_loader.set_max_length(max_length)
    val_loader.set_max_length(max_length)
    return test_loader, train_loader, val_loader 
开发者ID:BryanPlummer,项目名称:cite,代码行数:17,代码来源:main.py

示例4: test

# 需要导入模块: import data_loader [as 别名]
# 或者: from data_loader import DataLoader [as 别名]
def test(cfg):
    entity2id = load_dict(cfg['data_folder'] + cfg['entity2id'])
    word2id = load_dict(cfg['data_folder'] + cfg['word2id'])
    relation2id = load_dict(cfg['data_folder'] + cfg['relation2id'])

    test_documents = load_documents(cfg['data_folder'] + cfg['test_documents'])
    test_document_entity_indices, test_document_texts = index_document_entities(test_documents, word2id, entity2id, cfg['max_document_word'])
    test_data = DataLoader(cfg['data_folder'] + cfg['test_data'], test_documents, test_document_entity_indices, test_document_texts, word2id, relation2id, entity2id, cfg['max_query_word'], cfg['max_document_word'], cfg['use_kb'], cfg['use_doc'], cfg['use_inverse_relation'])

    my_model = get_model(cfg, test_data.num_kb_relation, len(entity2id), len(word2id))
    test_acc = inference(my_model, test_data, entity2id, cfg, log_info=True)
    return test_acc 
开发者ID:OceanskySun,项目名称:GraftNet,代码行数:14,代码来源:main.py

示例5: main

# 需要导入模块: import data_loader [as 别名]
# 或者: from data_loader import DataLoader [as 别名]
def main():
    args = parse_args()

    mp.set_start_method('spawn')  # Using spawn is decided.
    _logger = log.get_logger(__name__, args)
    _logger.info(print_args(args))

    loaders = []
    file_list = os.listdir(args.train_file)
    random.shuffle(file_list)
    for i in range(args.worker):
        loader = data_loader.DataLoader(
            args.train_file,
            args.dict_file,
            separate_conj_stmt=args.direction,
            binary=args.binary,
            part_no=i,
            part_total=args.worker,
            file_list=file_list,
            norename=args.norename,
            filter_abelian=args.fabelian,
            compatible=args.compatible)
        loaders.append(loader)
        loader.start_reader()

    net, mid_net, loss_fn = create_models(args, loaders[0], allow_resume=True)
    # Use fake modules to replace the real ones
    net = FakeModule(net)
    if mid_net is not None:
        mid_net = FakeModule(mid_net)
    for i in range(len(loss_fn)):
        loss_fn[i] = FakeModule(loss_fn[i])

    opt = get_opt(net, mid_net, loss_fn, args)

    inqueues = []
    outqueues = []

    plist = []
    for i in range(args.worker):
        recv_p, send_p = Pipe(False)
        recv_p2, send_p2 = Pipe(False)
        inqueues.append(send_p)
        outqueues.append(recv_p2)
        plist.append(
            Process(target=worker, args=(recv_p, send_p2, loaders[i], args, i)))
        plist[-1].start()

    _logger.warning('Training begins')
    train(inqueues, outqueues, net, mid_net, loss_fn, opt, loaders, args, _logger)
    loader.destruct()
    for p in plist:
        p.terminate()
    for loader in loaders:
        loader.destruct()
    _logger.warning('Training ends') 
开发者ID:princeton-vl,项目名称:FormulaNet,代码行数:58,代码来源:batch_train.py

示例6: __init__

# 需要导入模块: import data_loader [as 别名]
# 或者: from data_loader import DataLoader [as 别名]
def __init__(self):
        # Input shape
        self.img_rows = 32
        self.img_cols = 32
        self.channels = 3
        self.img_shape = (self.img_rows, self.img_cols, self.channels)
        self.num_classes = 10

        # Configure MNIST and MNIST-M data loader
        self.data_loader = DataLoader(img_res=(self.img_rows, self.img_cols))

        # Loss weights
        lambda_adv = 10
        lambda_clf = 1

        # Calculate output shape of D (PatchGAN)
        patch = int(self.img_rows / 2**4)
        self.disc_patch = (patch, patch, 1)

        # Number of residual blocks in the generator
        self.residual_blocks = 6

        optimizer = Adam(0.0002, 0.5)

        # Number of filters in first layer of discriminator and classifier
        self.df = 64
        self.cf = 64

        # Build and compile the discriminators
        self.discriminator = self.build_discriminator()
        self.discriminator.compile(loss='mse',
            optimizer=optimizer,
            metrics=['accuracy'])

        # Build the generator
        self.generator = self.build_generator()

        # Build the task (classification) network
        self.clf = self.build_classifier()

        # Input images from both domains
        img_A = Input(shape=self.img_shape)
        img_B = Input(shape=self.img_shape)

        # Translate images from domain A to domain B
        fake_B = self.generator(img_A)

        # Classify the translated image
        class_pred = self.clf(fake_B)

        # For the combined model we will only train the generator and classifier
        self.discriminator.trainable = False

        # Discriminator determines validity of translated images
        valid = self.discriminator(fake_B)

        self.combined = Model(img_A, [valid, class_pred])
        self.combined.compile(loss=['mse', 'categorical_crossentropy'],
                                    loss_weights=[lambda_adv, lambda_clf],
                                    optimizer=optimizer,
                                    metrics=['accuracy']) 
开发者ID:eriklindernoren,项目名称:Keras-GAN,代码行数:63,代码来源:pixelda.py

示例7: __init__

# 需要导入模块: import data_loader [as 别名]
# 或者: from data_loader import DataLoader [as 别名]
def __init__(self):
        # Input shape
        self.img_rows = 256
        self.img_cols = 256
        self.channels = 3
        self.img_shape = (self.img_rows, self.img_cols, self.channels)

        # Configure data loader
        self.dataset_name = 'facades'
        self.data_loader = DataLoader(dataset_name=self.dataset_name,
                                      img_res=(self.img_rows, self.img_cols))


        # Calculate output shape of D (PatchGAN)
        patch = int(self.img_rows / 2**4)
        self.disc_patch = (patch, patch, 1)

        # Number of filters in the first layer of G and D
        self.gf = 64
        self.df = 64

        optimizer = Adam(0.0002, 0.5)

        # Build and compile the discriminator
        self.discriminator = self.build_discriminator()
        self.discriminator.compile(loss='mse',
            optimizer=optimizer,
            metrics=['accuracy'])

        #-------------------------
        # Construct Computational
        #   Graph of Generator
        #-------------------------

        # Build the generator
        self.generator = self.build_generator()

        # Input images and their conditioning images
        img_A = Input(shape=self.img_shape)
        img_B = Input(shape=self.img_shape)

        # By conditioning on B generate a fake version of A
        fake_A = self.generator(img_B)

        # For the combined model we will only train the generator
        self.discriminator.trainable = False

        # Discriminators determines validity of translated images / condition pairs
        valid = self.discriminator([fake_A, img_B])

        self.combined = Model(inputs=[img_A, img_B], outputs=[valid, fake_A])
        self.combined.compile(loss=['mse', 'mae'],
                              loss_weights=[1, 100],
                              optimizer=optimizer) 
开发者ID:eriklindernoren,项目名称:Keras-GAN,代码行数:56,代码来源:pix2pix.py

示例8: main

# 需要导入模块: import data_loader [as 别名]
# 或者: from data_loader import DataLoader [as 别名]
def main():
    # Parse the JSON arguments
    try:
        config_args = parse_args()
    except:
        print("Add a config file using \'--config file_name.json\'")
        exit(1)

    # Create the experiment directories
    _, config_args.summary_dir, config_args.checkpoint_dir = create_experiment_dirs(config_args.experiment_dir)

    # Reset the default Tensorflow graph
    tf.reset_default_graph()

    # Tensorflow specific configuration
    config = tf.ConfigProto(allow_soft_placement=True)
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)

    # Data loading
    data = DataLoader(config_args.batch_size, config_args.shuffle)
    print("Loading Data...")
    config_args.img_height, config_args.img_width, config_args.num_channels, \
    config_args.train_data_size, config_args.test_data_size = data.load_data()
    print("Data loaded\n\n")

    # Model creation
    print("Building the model...")
    model = MobileNet(config_args)
    print("Model is built successfully\n\n")

    # Summarizer creation
    summarizer = Summarizer(sess, config_args.summary_dir)
    # Train class
    trainer = Train(sess, model, data, summarizer)

    if config_args.to_train:
        try:
            print("Training...")
            trainer.train()
            print("Training Finished\n\n")
        except KeyboardInterrupt:
            trainer.save_model()

    if config_args.to_test:
        print("Final test!")
        trainer.test('val')
        print("Testing Finished\n\n") 
开发者ID:MG2033,项目名称:MobileNet,代码行数:50,代码来源:main.py

示例9: main

# 需要导入模块: import data_loader [as 别名]
# 或者: from data_loader import DataLoader [as 别名]
def main():
    # get input images
    if not os.path.isdir(FLAGS.output_dir):
        os.makedirs(FLAGS.output_dir)
    concat_img_dir = os.path.join(FLAGS.concat_img_dir, '%.2d' % FLAGS.test_seq)
    max_src_offset = int((FLAGS.seq_length - 1)/2)
    N = len(glob(concat_img_dir + '/*.jpg')) + 2*max_src_offset
    test_frames = ['%.2d %.6d' % (FLAGS.test_seq, n) for n in range(N)]

    with open(FLAGS.dataset_dir + 'sequences/%.2d/times.txt' % FLAGS.test_seq, 'r') as f:
        times = f.readlines()
    times = np.array([float(s[:-1]) for s in times])

    with tf.Session() as sess:
        # setup input tensor
        loader = DataLoader(FLAGS.concat_img_dir, FLAGS.batch_size, FLAGS.img_height, FLAGS.img_width, FLAGS.seq_length-1)
        image_sequence_names, tgt_inds = load_kitti_image_sequence_names(FLAGS.concat_img_dir, test_frames, FLAGS.seq_length)
        image_sequence_names = complete_batch_size(image_sequence_names, FLAGS.batch_size)
        tgt_inds = complete_batch_size(tgt_inds, FLAGS.batch_size)
        assert len(tgt_inds) == len(image_sequence_names)
        batch_sample = loader.load_test_batch(image_sequence_names)
        sess.run(batch_sample.initializer)
        input_batch = batch_sample.get_next()
        input_batch.set_shape([FLAGS.batch_size, FLAGS.img_height, FLAGS.img_width * FLAGS.seq_length, 3])

        # init system
        system = DeepSlam()
        system.setup_inference(FLAGS.img_height, FLAGS.img_width,
                               'pose', FLAGS.seq_length, FLAGS.batch_size, input_batch)
        saver = tf.train.Saver([var for var in tf.trainable_variables()]) 
        saver.restore(sess, FLAGS.ckpt_file)

        round_num = len(image_sequence_names) // FLAGS.batch_size
        for i in range(round_num):
            pred = system.inference(sess, mode='pose')
            for j in range(FLAGS.batch_size):
                tgt_idx = tgt_inds[i * FLAGS.batch_size + j]
                pred_poses = pred['pose'][j]
                # Insert the target pose [0, 0, 0, 0, 0, 0] to the middle
                pred_poses = np.insert(pred_poses, max_src_offset, np.zeros((1,6)), axis=0)
                curr_times = times[tgt_idx-max_src_offset : tgt_idx+max_src_offset+1]
                out_file = FLAGS.output_dir + '%.6d.txt' % (tgt_idx - max_src_offset)
                dump_pose_seq_TUM(out_file, pred_poses, curr_times) 
开发者ID:hlzz,项目名称:DeepMatchVO,代码行数:45,代码来源:test_kitti_pose.py

示例10: main

# 需要导入模块: import data_loader [as 别名]
# 或者: from data_loader import DataLoader [as 别名]
def main():
    # Parse the JSON arguments
    config_args = parse_args()

    # Create the experiment directories
    _, config_args.summary_dir, config_args.checkpoint_dir = create_experiment_dirs(config_args.experiment_dir)

    # Reset the default Tensorflow graph
    tf.reset_default_graph()

    # Tensorflow specific configuration
    config = tf.ConfigProto(allow_soft_placement=True)
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)

    # Data loading
    # The batch size is equal to 1 when testing to simulate the real experiment.
    data_batch_size = config_args.batch_size if config_args.train_or_test == "train" else 1
    data = DataLoader(data_batch_size, config_args.shuffle)
    print("Loading Data...")
    config_args.img_height, config_args.img_width, config_args.num_channels, \
    config_args.train_data_size, config_args.test_data_size = data.load_data()
    print("Data loaded\n\n")

    # Model creation
    print("Building the model...")
    model = ShuffleNet(config_args)
    print("Model is built successfully\n\n")

    # Parameters visualization
    show_parameters()

    # Summarizer creation
    summarizer = Summarizer(sess, config_args.summary_dir)
    # Train class
    trainer = Train(sess, model, data, summarizer)

    if config_args.train_or_test == 'train':
        try:
            # print("FLOPs for batch size = " + str(config_args.batch_size) + "\n")
            # calculate_flops()
            print("Training...")
            trainer.train()
            print("Training Finished\n\n")
        except KeyboardInterrupt:
            trainer.save_model()

    elif config_args.train_or_test == 'test':
        # print("FLOPs for single inference \n")
        # calculate_flops()
        # This can be 'val' or 'test' or even 'train' according to the needs.
        print("Testing...")
        trainer.test('val')
        print("Testing Finished\n\n")

    else:
        raise ValueError("Train or Test options only are allowed") 
开发者ID:MG2033,项目名称:ShuffleNet,代码行数:59,代码来源:main.py


注:本文中的data_loader.DataLoader方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。