当前位置: 首页>>代码示例>>Python>>正文


Python data_loader.load_data方法代码示例

本文整理汇总了Python中data_loader.load_data方法的典型用法代码示例。如果您正苦于以下问题:Python data_loader.load_data方法的具体用法?Python data_loader.load_data怎么用?Python data_loader.load_data使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在data_loader的用法示例。


在下文中一共展示了data_loader.load_data方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: load_data

# 需要导入模块: import data_loader [as 别名]
# 或者: from data_loader import load_data [as 别名]
def load_data(src, tar, root_dir):
    folder_src = root_dir + src + '/images/'
    folder_tar = root_dir + tar + '/images/'
    source_loader = data_loader.load_data(
        folder_src, CFG['batch_size'], True, CFG['kwargs'])
    target_train_loader = data_loader.load_data(
        folder_tar, CFG['batch_size'], True, CFG['kwargs'])
    target_test_loader = data_loader.load_data(
        folder_tar, CFG['batch_size'], False, CFG['kwargs'])
    return source_loader, target_train_loader, target_test_loader 
开发者ID:jindongwang,项目名称:transferlearning,代码行数:12,代码来源:main.py

示例2: test

# 需要导入模块: import data_loader [as 别名]
# 或者: from data_loader import load_data [as 别名]
def test(self):
        """Test Function."""
        print("Testing the results")

        self.inputs = data_loader.load_data(
            self._dataset_name, self._size_before_crop,
            False, self._do_flipping)

        self.model_setup()
        saver = tf.train.Saver()
        init = tf.global_variables_initializer()

        with tf.Session() as sess:
            sess.run(init)

            chkpt_fname = tf.train.latest_checkpoint(self._checkpoint_dir)
            saver.restore(sess, chkpt_fname)

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)

            self._num_imgs_to_save = cyclegan_datasets.DATASET_TO_SIZES[
                self._dataset_name]
            self.save_images_bis(sess, sess.run(self.global_step))

            coord.request_stop()
            coord.join(threads) 
开发者ID:AlamiMejjati,项目名称:Unsupervised-Attention-guided-Image-to-Image-Translation,代码行数:29,代码来源:main.py

示例3: __init__

# 需要导入模块: import data_loader [as 别名]
# 或者: from data_loader import load_data [as 别名]
def __init__(self, n=0, train=True, transform=None, expanded=False):
        self.n = n
        self.transform = transform
        td, vd, ts = data_loader.load_data(n, expanded=expanded)
        if train: self.data = td
        else: self.data = vd 
开发者ID:mnielsen,项目名称:rmnist,代码行数:8,代码来源:conv.py

示例4: transfer

# 需要导入模块: import data_loader [as 别名]
# 或者: from data_loader import load_data [as 别名]
def transfer(n):
    td, vd, ts = data_loader.load_data(n, abstract=True, expanded=expanded)
    classifiers = [
        #sklearn.svm.SVC(),
        #sklearn.svm.SVC(kernel="linear", C=0.1),
        #sklearn.neighbors.KNeighborsClassifier(1),
        #sklearn.tree.DecisionTreeClassifier(),
        #sklearn.ensemble.RandomForestClassifier(max_depth=10, n_estimators=500, max_features=1),
        sklearn.neural_network.MLPClassifier(alpha=1.0, hidden_layer_sizes=(300,), max_iter=500)
    ]
    for clf in classifiers:
        clf.fit(td[0], td[1])
        print "\n{}: {}".format(type(clf).__name__, round(clf.score(vd[0], vd[1])*100, 2)) 
开发者ID:mnielsen,项目名称:rmnist,代码行数:15,代码来源:transfer.py

示例5: baselines

# 需要导入模块: import data_loader [as 别名]
# 或者: from data_loader import load_data [as 别名]
def baselines(n):
    td, vd, ts = data_loader.load_data(n)
    classifiers = [
        sklearn.svm.SVC(C=1000),
        sklearn.svm.SVC(kernel="linear", C=0.1),
        sklearn.neighbors.KNeighborsClassifier(1),
        sklearn.tree.DecisionTreeClassifier(),
        sklearn.ensemble.RandomForestClassifier(max_depth=10, n_estimators=500, max_features=1),
        sklearn.neural_network.MLPClassifier(alpha=1, hidden_layer_sizes=(500, 100))
    ]
    for clf in classifiers:
        clf.fit(td[0], td[1])
        print "\n{}: {}".format(type(clf).__name__, round(clf.score(vd[0], vd[1])*100, 2)) 
开发者ID:mnielsen,项目名称:rmnist,代码行数:15,代码来源:baselines.py

示例6: train

# 需要导入模块: import data_loader [as 别名]
# 或者: from data_loader import load_data [as 别名]
def train(epochs=HYPERPARAMS.epochs, random_state=HYPERPARAMS.random_state, 
          kernel=HYPERPARAMS.kernel, decision_function=HYPERPARAMS.decision_function, gamma=HYPERPARAMS.gamma, train_model=True):

        print( "loading dataset " + DATASET.name + "...")
        if train_model:
                data, validation = load_data(validation=True)
        else:
                data, validation, test = load_data(validation=True, test=True)
        
        if train_model:
            # Training phase
            print( "building model...")
            model = SVC(random_state=random_state, max_iter=epochs, kernel=kernel, decision_function_shape=decision_function, gamma=gamma)

            print( "start training...")
            print( "--")
            print( "kernel: {}".format(kernel))
            print( "decision function: {} ".format(decision_function))
            print( "max epochs: {} ".format(epochs))
            print( "gamma: {} ".format(gamma))
            print( "--")
            print( "Training samples: {}".format(len(data['Y'])))
            print( "Validation samples: {}".format(len(validation['Y'])))
            print( "--")
            start_time = time.time()
            model.fit(data['X'], data['Y'])
            training_time = time.time() - start_time
            print( "training time = {0:.1f} sec".format(training_time))

            if TRAINING.save_model:
                print( "saving model...")
                with open(TRAINING.save_model_path, 'wb') as f:
                        cPickle.dump(model, f)

            print( "evaluating...")
            validation_accuracy = evaluate(model, validation['X'], validation['Y'])
            print( "  - validation accuracy = {0:.1f}".format(validation_accuracy*100))
            return validation_accuracy
        else:
            # Testing phase : load saved model and evaluate on test dataset
            print( "start evaluation...")
            print( "loading pretrained model...")
            if os.path.isfile(TRAINING.save_model_path):
                with open(TRAINING.save_model_path, 'rb') as f:
                        model = cPickle.load(f)
            else:
                print( "Error: file '{}' not found".format(TRAINING.save_model_path))
                exit()

            print( "--")
            print( "Validation samples: {}".format(len(validation['Y'])))
            print( "Test samples: {}".format(len(test['Y'])))
            print( "--")
            print( "evaluating...")
            start_time = time.time()
            validation_accuracy = evaluate(model, validation['X'],  validation['Y'])
            print( "  - validation accuracy = {0:.1f}".format(validation_accuracy*100))
            test_accuracy = evaluate(model, test['X'], test['Y'])
            print( "  - test accuracy = {0:.1f}".format(test_accuracy*100))
            print( "  - evalution time = {0:.1f} sec".format(time.time() - start_time))
            return test_accuracy 
开发者ID:amineHorseman,项目名称:facial-expression-recognition-svm,代码行数:63,代码来源:train.py

示例7: eval

# 需要导入模块: import data_loader [as 别名]
# 或者: from data_loader import load_data [as 别名]
def eval():
	g = Graph(is_training = False)
	print("MSG : Graph loaded!")

	X, Sources, Targets = load_data('test')
	en2idx, idx2en = load_vocab('en.vocab.tsv')
	de2idx, idx2de = load_vocab('de.vocab.tsv')

	with g.graph.as_default():
		sv = tf.train.Supervisor()
		with sv.managed_session(config = tf.ConfigProto(allow_soft_placement = True)) as sess:
			# load pre-train model
			sv.saver.restore(sess, tf.train.latest_checkpoint(pm.checkpoint))
			print("MSG : Restore Model!")

			mname = open(pm.checkpoint + '/checkpoint', 'r').read().split('"')[1]

			if not os.path.exists('Results'):
				os.mkdir('Results')
			with codecs.open("Results/" + mname, 'w', 'utf-8') as f:
				list_of_refs, predict = [], []
				# Get a batch
				for i in range(len(X) // pm.batch_size):
					x = X[i * pm.batch_size: (i + 1) * pm.batch_size]
					sources = Sources[i * pm.batch_size: (i + 1) * pm.batch_size]
					targets = Targets[i * pm.batch_size: (i + 1) * pm.batch_size]

					# Autoregressive inference
					preds = np.zeros((pm.batch_size, pm.maxlen), dtype = np.int32)
					for j in range(pm.maxlen):
						_preds = sess.run(g.preds, feed_dict = {g.inpt: x, g.outpt: preds})
						preds[:, j] = _preds[:, j]

					for source, target, pred in zip(sources, targets, preds):
						got = " ".join(idx2de[idx] for idx in pred).split("<EOS>")[0].strip()
						f.write("- Source: {}\n".format(source))
						f.write("- Ground Truth: {}\n".format(target))
						f.write("- Predict: {}\n\n".format(got))
						f.flush()

						# Bleu Score
						ref = target.split()
						prediction = got.split()
						if len(ref) > pm.word_limit_lower and len(prediction) > pm.word_limit_lower:
							list_of_refs.append([ref])
							predict.append(prediction)

				score = corpus_bleu(list_of_refs, predict)
				f.write("Bleu Score = " + str(100 * score)) 
开发者ID:EternalFeather,项目名称:Transformer-in-generating-dialogue,代码行数:51,代码来源:eval.py

示例8: main

# 需要导入模块: import data_loader [as 别名]
# 或者: from data_loader import load_data [as 别名]
def main():
    # parse the command line arguments
    parser = NeonArgparser(__doc__)
    parser.add_argument('--output_path', required=True,
                        help='Output path used when training model')
    parser.add_argument('--w2v_path', required=False, default=None,
                        help='Path to GoogleNews w2v file for voab expansion.')
    parser.add_argument('--eval_data_path', required=False, default='./SICK_data',
                        help='Path to the SICK dataset for evaluating semantic relateness')
    parser.add_argument('--max_vocab_size', required=False, default=1000000,
                        help='Limit the vocabulary expansion to fit in GPU memory')
    parser.add_argument('--subset_pct', required=False, default=100,
                        help='subset of training dataset to use (use to retreive \
                        preprocessed data from training)')
    args = parser.parse_args(gen_be=True)

    # load vocab file from training
    _, vocab_file = load_data(args.data_dir, output_path=args.output_path,
                              subset_pct=float(args.subset_pct))
    vocab, _, _ = load_obj(vocab_file)

    vocab_size = len(vocab)
    neon_logger.display("\nVocab size from the dataset is: {}".format(vocab_size))

    index_from = 2  # 0: padding 1: oov
    vocab_size_layer = vocab_size + index_from
    max_len = 30

    # load trained model
    model_dict = load_obj(args.model_file)

    # Vocabulary expansion trick needs to pass the correct vocab set to evaluate (for tokenization)
    if args.w2v_path:
        neon_logger.display("Performing Vocabulary Expansion... Loading W2V...")
        w2v_vocab, w2v_vocab_size = get_w2v_vocab(args.w2v_path,
                                                  int(args.max_vocab_size), cache=True)

        vocab_size_layer = w2v_vocab_size + index_from
        model = load_sent_encoder(model_dict, expand_vocab=True, orig_vocab=vocab,
                                  w2v_vocab=w2v_vocab, w2v_path=args.w2v_path, use_recur_last=True)
        vocab = w2v_vocab
    else:
        # otherwise stick with original vocab size used to train the model
        model = load_sent_encoder(model_dict, use_recur_last=True)

    model.initialize(dataset=(max_len, 1))

    evaluate(model, vocab=vocab, data_path=args.eval_data_path, evaltest=True,
             vocab_size_layer=vocab_size_layer) 
开发者ID:NervanaSystems,项目名称:neon,代码行数:51,代码来源:eval_sick.py


注:本文中的data_loader.load_data方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。