當前位置: 首頁>>代碼示例>>Python>>正文


Python utils.load_mnist方法代碼示例

本文整理匯總了Python中utils.load_mnist方法的典型用法代碼示例。如果您正苦於以下問題:Python utils.load_mnist方法的具體用法?Python utils.load_mnist怎麽用?Python utils.load_mnist使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在utils的用法示例。


在下文中一共展示了utils.load_mnist方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: plot_pca

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load_mnist [as 別名]
def plot_pca():
    print('loading data')
    X_train, y_train, X_test, y_test = utils.load_mnist()
    pca = PCA(n_components=2)

    print('transforming training data')
    Z_train = pca.fit_transform(X_train)

    print('transforming test data')
    Z_test = pca.transform(X_test)

    plot(Z_train, y_train, Z_test, y_test,
         filename='pca.png', title='projected onto principle components') 
開發者ID:hjweide,項目名稱:adversarial-autoencoder,代碼行數:15,代碼來源:plot.py

示例2: plot_autoencoder

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load_mnist [as 別名]
def plot_autoencoder(weightsfile):
    print('building model')
    layers = model.build_model()

    batch_size = 128

    print('compiling theano function')
    encoder_func = theano_funcs.create_encoder_func(layers)

    print('loading weights from %s' % (weightsfile))
    model.load_weights([
        layers['l_decoder_out'],
        layers['l_discriminator_out'],
    ], weightsfile)

    print('loading data')
    X_train, y_train, X_test, y_test = utils.load_mnist()

    train_datapoints = []
    print('transforming training data')
    for train_idx in get_batch_idx(X_train.shape[0], batch_size):
        X_train_batch = X_train[train_idx]
        train_batch_codes = encoder_func(X_train_batch)
        train_datapoints.append(train_batch_codes)

    test_datapoints = []
    print('transforming test data')
    for test_idx in get_batch_idx(X_test.shape[0], batch_size):
        X_test_batch = X_test[test_idx]
        test_batch_codes = encoder_func(X_test_batch)
        test_datapoints.append(test_batch_codes)

    Z_train = np.vstack(train_datapoints)
    Z_test = np.vstack(test_datapoints)

    plot(Z_train, y_train, Z_test, y_test,
         filename='adversarial_train_val.png',
         title='projected onto latent space of autoencoder') 
開發者ID:hjweide,項目名稱:adversarial-autoencoder,代碼行數:40,代碼來源:plot.py

示例3: create_inputs

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load_mnist [as 別名]
def create_inputs():
    trX, trY = load_mnist(cfg.dataset, cfg.is_training)

    num_pre_threads = cfg.thread_per_gpu*cfg.num_gpu
    data_queue = tf.train.slice_input_producer([trX, trY], capacity=64*num_pre_threads)
    X, Y = tf.train.shuffle_batch(data_queue, num_threads=num_pre_threads,
                                  batch_size=cfg.batch_size_per_gpu*cfg.num_gpu,
                                  capacity=cfg.batch_size_per_gpu*cfg.num_gpu * 64,
                                  min_after_dequeue=cfg.batch_size_per_gpu*cfg.num_gpu * 32,
                                  allow_smaller_final_batch=False)

    return (X, Y) 
開發者ID:bourdakos1,項目名稱:capsule-networks,代碼行數:14,代碼來源:distributed_train.py

示例4: run_mnist

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load_mnist [as 別名]
def run_mnist(n_train=None, n_test=None, model_type="normal"):
    datasize = {"n_train": n_train, "n_test": n_test}
    transformer_params = {
        "image_shape": 28,
        "filter_shape_l1": 5, "step_shape_l1": 1, "n_l1_output": 8,
        "filter_shape_l2": 5, "step_shape_l2": 1, "n_l2_output": 4,
        "filter_shape_pooling": 5, "step_shape_pooling": 5
    }
    ensemble_params = {
        "n_estimators" : 40,
        "sampling_ratio" : 0.03,
        "n_jobs" : -1
    }
    dataset = utils.load_mnist()
    run(dataset, datasize, transformer_params, ensemble_params, model_type) 
開發者ID:IshitaTakeshi,項目名稱:PCANet,代碼行數:17,代碼來源:evaluation.py

示例5: main

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load_mnist [as 別名]
def main(_):
    # Load Graph
    capsNet = CapsNet(is_training=False)
    print('[+] Graph is constructed')

    # Load test data
    teX, teY = load_mnist(conf.dataset, is_training=False)

    # Start session
    with capsNet.graph.as_default():
        sv = tf.train.Supervisor(logdir=conf.logdir)
        with sv.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
            # Restore parameters
            checkpoint_path = tf.train.latest_checkpoint(conf.logdir)
            sv.saver.restore(sess, checkpoint_path)
            print('[+] Graph is restored from ' + checkpoint_path)

            # Make results directory
            if not os.path.exists('results'):
                os.mkdir('results')

            reconstruction_err = []
            classification_acc = []
            for i in range(10000 // conf.batch_size):
                start = i * conf.batch_size
                end = start + conf.batch_size

                # Reconstruction
                recon_imgs = sess.run(capsNet.decoded, {capsNet.x: teX[start:end]})
                recon_imgs = np.reshape(recon_imgs, (conf.batch_size, -1))
                orgin_imgs = np.reshape(teX[start:end], (conf.batch_size, -1))
                squared = np.square(recon_imgs - orgin_imgs)
                reconstruction_err.append(np.mean(squared))
                if i % 5 == 0:
                    imgs = np.reshape(recon_imgs, (conf.batch_size, 28, 28, 1))
                    size = 6
                    save_images(imgs[0:size * size, :], [size, size], 'results/test_%03d.png' % i)

                # Classification
                cls_result = sess.run(capsNet.preds, {capsNet.x: teX[start:end]})
                cls_answer = teY[start:end]
                cls_acc = np.mean(np.equal(cls_result, cls_answer).astype(np.float32))
                classification_acc.append(cls_acc)

            # Print classification accuracy & reconstruction error
            print('reconstruction_err : ' + str(np.mean(reconstruction_err)))
            print('classification_acc : ' + str(np.mean(classification_acc) * 100)) 
開發者ID:JunYeopLee,項目名稱:capsule-networks,代碼行數:49,代碼來源:eval.py

示例6: __init__

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load_mnist [as 別名]
def __init__(self, args):
        # parameters
        self.epoch = args.epoch
        self.sample_num = 100
        self.batch_size = args.batch_size
        self.save_dir = args.save_dir
        self.result_dir = args.result_dir
        self.dataset = args.dataset
        self.log_dir = args.log_dir
        self.gpu_mode = args.gpu_mode
        self.model_name = args.gan_type

        # networks init
        self.G = generator(self.dataset)
        self.D = discriminator(self.dataset)
        self.G_optimizer = optim.Adam(self.G.parameters(), lr=args.lrG, betas=(args.beta1, args.beta2))
        self.D_optimizer = optim.Adam(self.D.parameters(), lr=args.lrD, betas=(args.beta1, args.beta2))

        if self.gpu_mode:
            self.G.cuda()
            self.D.cuda()
            self.BCE_loss = nn.BCELoss().cuda()
        else:
            self.BCE_loss = nn.BCELoss()

        print('---------- Networks architecture -------------')
        utils.print_network(self.G)
        utils.print_network(self.D)
        print('-----------------------------------------------')

        # load mnist
        self.data_X, self.data_Y = utils.load_mnist(args.dataset)
        self.z_dim = 62
        self.y_dim = 10

        # fixed noise & condition
        self.sample_z_ = torch.zeros((self.sample_num, self.z_dim))
        for i in range(10):
            self.sample_z_[i*self.y_dim] = torch.rand(1, self.z_dim)
            for j in range(1, self.y_dim):
                self.sample_z_[i*self.y_dim + j] = self.sample_z_[i*self.y_dim]

        temp = torch.zeros((10, 1))
        for i in range(self.y_dim):
            temp[i, 0] = i

        temp_y = torch.zeros((self.sample_num, 1))
        for i in range(10):
            temp_y[i*self.y_dim: (i+1)*self.y_dim] = temp

        self.sample_y_ = torch.zeros((self.sample_num, self.y_dim))
        self.sample_y_.scatter_(1, temp_y.type(torch.LongTensor), 1)
        if self.gpu_mode:
            self.sample_z_, self.sample_y_ = Variable(self.sample_z_.cuda(), volatile=True), Variable(self.sample_y_.cuda(), volatile=True)
        else:
            self.sample_z_, self.sample_y_ = Variable(self.sample_z_, volatile=True), Variable(self.sample_y_, volatile=True) 
開發者ID:tangzhenyu,項目名稱:Generative_Model_Zoo,代碼行數:58,代碼來源:CGAN.py

示例7: __init__

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load_mnist [as 別名]
def __init__(self, args):
        # parameters
        self.epoch = args.epoch
        self.sample_num = 100
        self.batch_size = args.batch_size
        self.save_dir = args.save_dir
        self.result_dir = args.result_dir
        self.dataset = args.dataset
        self.log_dir = args.log_dir
        self.gpu_mode = args.gpu_mode
        self.model_name = args.gan_type

        # networks init
        self.G = generator(self.dataset)
        self.D = discriminator(self.dataset)
        self.G_optimizer = optim.Adam(self.G.parameters(), lr=args.lrG, betas=(args.beta1, args.beta2))
        self.D_optimizer = optim.Adam(self.D.parameters(), lr=args.lrD, betas=(args.beta1, args.beta2))

        if self.gpu_mode:
            self.G.cuda()
            self.D.cuda()
            self.BCE_loss = nn.BCELoss().cuda()
            self.CE_loss = nn.CrossEntropyLoss().cuda()
        else:
            self.BCE_loss = nn.BCELoss()
            self.CE_loss = nn.CrossEntropyLoss()

        print('---------- Networks architecture -------------')
        utils.print_network(self.G)
        utils.print_network(self.D)
        print('-----------------------------------------------')

        # load mnist
        self.data_X, self.data_Y = utils.load_mnist(args.dataset)
        self.z_dim = 62
        self.y_dim = 10

        # fixed noise & condition
        self.sample_z_ = torch.zeros((self.sample_num, self.z_dim))
        for i in range(10):
            self.sample_z_[i*self.y_dim] = torch.rand(1, self.z_dim)
            for j in range(1, self.y_dim):
                self.sample_z_[i*self.y_dim + j] = self.sample_z_[i*self.y_dim]

        temp = torch.zeros((10, 1))
        for i in range(self.y_dim):
            temp[i, 0] = i

        temp_y = torch.zeros((self.sample_num, 1))
        for i in range(10):
            temp_y[i*self.y_dim: (i+1)*self.y_dim] = temp

        self.sample_y_ = torch.zeros((self.sample_num, self.y_dim))
        self.sample_y_.scatter_(1, temp_y.type(torch.LongTensor), 1)
        if self.gpu_mode:
            self.sample_z_, self.sample_y_ = Variable(self.sample_z_.cuda(), volatile=True), Variable(self.sample_y_.cuda(), volatile=True)
        else:
            self.sample_z_, self.sample_y_ = Variable(self.sample_z_, volatile=True), Variable(self.sample_y_, volatile=True) 
開發者ID:tangzhenyu,項目名稱:Generative_Model_Zoo,代碼行數:60,代碼來源:ACGAN.py

示例8: __init__

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load_mnist [as 別名]
def __init__(self, seq_len, batch_size, dataset='mnist', set='train',
                 rng=None, infinite=True, digits=None):

        if dataset == 'fashion_mnist':
            (x_train, y_train), (x_test, y_test) = utils.load_fashion_mnist()
            if set == 'train':
                self.x = x_train
                self.y = y_train
            else:
                self.x = x_test
                self.y = y_test
        elif dataset == 'mnist':
            (x_train, y_train), (x_test, y_test) = utils.load_mnist()
            if set == 'train':
                self.x = x_train
                self.y = y_train
            elif set == 'test':
                self.x = x_test
                self.y = y_test
        elif dataset == 'cifar10':
            self.x, self.y = utils.load_cifar('data/cifar', subset=set)
            self.x = np.transpose(self.x, (0, 2, 3, 1))  # (N,3,32,32) -> (N,32,32,3)
            self.x = np.float32(self.x)
            self.img_shape = self.x.shape[1:]
            self.input_dim = np.prod(self.img_shape)
        else:
            raise ValueError('wrong dataset name')

        if dataset == 'mnist' or dataset == 'fashion_mnist':
            self.input_dim = self.x.shape[-1]
            self.img_shape = (int(np.sqrt(self.input_dim)), int(np.sqrt(self.input_dim)), 1)
            self.x = np.reshape(self.x, (self.x.shape[0],) + self.img_shape)
            self.x = np.float32(self.x)

        self.classes = np.unique(self.y)
        self.n_classes = len(self.classes)
        self.y2idxs = {}
        self.nsamples = 0
        for i in list(self.classes):
            self.y2idxs[i] = np.where(self.y == i)[0]
            self.nsamples += len(self.y2idxs[i])

        self.batch_size = batch_size
        self.seq_len = seq_len
        self.rng = np.random.RandomState(42) if not rng else rng
        self.infinite = infinite
        self.digits = digits if digits is not None else np.arange(self.n_classes)

        print(set, 'dataset size:', self.x.shape)
        print(set, 'N classes', self.n_classes)
        print(set, 'min, max', np.min(self.x), np.max(self.x))
        print(set, 'nsamples', self.nsamples)
        print(set, 'digits', self.digits)
        print('--------------') 
開發者ID:IraKorshunova,項目名稱:bruno,代碼行數:56,代碼來源:data_iter.py


注:本文中的utils.load_mnist方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。