当前位置: 首页>>代码示例>>Python>>正文


Python utils.load_mnist方法代码示例

本文整理汇总了Python中utils.load_mnist方法的典型用法代码示例。如果您正苦于以下问题:Python utils.load_mnist方法的具体用法?Python utils.load_mnist怎么用?Python utils.load_mnist使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在utils的用法示例。


在下文中一共展示了utils.load_mnist方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: plot_pca

# 需要导入模块: import utils [as 别名]
# 或者: from utils import load_mnist [as 别名]
def plot_pca():
    print('loading data')
    X_train, y_train, X_test, y_test = utils.load_mnist()
    pca = PCA(n_components=2)

    print('transforming training data')
    Z_train = pca.fit_transform(X_train)

    print('transforming test data')
    Z_test = pca.transform(X_test)

    plot(Z_train, y_train, Z_test, y_test,
         filename='pca.png', title='projected onto principle components') 
开发者ID:hjweide,项目名称:adversarial-autoencoder,代码行数:15,代码来源:plot.py

示例2: plot_autoencoder

# 需要导入模块: import utils [as 别名]
# 或者: from utils import load_mnist [as 别名]
def plot_autoencoder(weightsfile):
    print('building model')
    layers = model.build_model()

    batch_size = 128

    print('compiling theano function')
    encoder_func = theano_funcs.create_encoder_func(layers)

    print('loading weights from %s' % (weightsfile))
    model.load_weights([
        layers['l_decoder_out'],
        layers['l_discriminator_out'],
    ], weightsfile)

    print('loading data')
    X_train, y_train, X_test, y_test = utils.load_mnist()

    train_datapoints = []
    print('transforming training data')
    for train_idx in get_batch_idx(X_train.shape[0], batch_size):
        X_train_batch = X_train[train_idx]
        train_batch_codes = encoder_func(X_train_batch)
        train_datapoints.append(train_batch_codes)

    test_datapoints = []
    print('transforming test data')
    for test_idx in get_batch_idx(X_test.shape[0], batch_size):
        X_test_batch = X_test[test_idx]
        test_batch_codes = encoder_func(X_test_batch)
        test_datapoints.append(test_batch_codes)

    Z_train = np.vstack(train_datapoints)
    Z_test = np.vstack(test_datapoints)

    plot(Z_train, y_train, Z_test, y_test,
         filename='adversarial_train_val.png',
         title='projected onto latent space of autoencoder') 
开发者ID:hjweide,项目名称:adversarial-autoencoder,代码行数:40,代码来源:plot.py

示例3: create_inputs

# 需要导入模块: import utils [as 别名]
# 或者: from utils import load_mnist [as 别名]
def create_inputs():
    trX, trY = load_mnist(cfg.dataset, cfg.is_training)

    num_pre_threads = cfg.thread_per_gpu*cfg.num_gpu
    data_queue = tf.train.slice_input_producer([trX, trY], capacity=64*num_pre_threads)
    X, Y = tf.train.shuffle_batch(data_queue, num_threads=num_pre_threads,
                                  batch_size=cfg.batch_size_per_gpu*cfg.num_gpu,
                                  capacity=cfg.batch_size_per_gpu*cfg.num_gpu * 64,
                                  min_after_dequeue=cfg.batch_size_per_gpu*cfg.num_gpu * 32,
                                  allow_smaller_final_batch=False)

    return (X, Y) 
开发者ID:bourdakos1,项目名称:capsule-networks,代码行数:14,代码来源:distributed_train.py

示例4: run_mnist

# 需要导入模块: import utils [as 别名]
# 或者: from utils import load_mnist [as 别名]
def run_mnist(n_train=None, n_test=None, model_type="normal"):
    datasize = {"n_train": n_train, "n_test": n_test}
    transformer_params = {
        "image_shape": 28,
        "filter_shape_l1": 5, "step_shape_l1": 1, "n_l1_output": 8,
        "filter_shape_l2": 5, "step_shape_l2": 1, "n_l2_output": 4,
        "filter_shape_pooling": 5, "step_shape_pooling": 5
    }
    ensemble_params = {
        "n_estimators" : 40,
        "sampling_ratio" : 0.03,
        "n_jobs" : -1
    }
    dataset = utils.load_mnist()
    run(dataset, datasize, transformer_params, ensemble_params, model_type) 
开发者ID:IshitaTakeshi,项目名称:PCANet,代码行数:17,代码来源:evaluation.py

示例5: main

# 需要导入模块: import utils [as 别名]
# 或者: from utils import load_mnist [as 别名]
def main(_):
    # Load Graph
    capsNet = CapsNet(is_training=False)
    print('[+] Graph is constructed')

    # Load test data
    teX, teY = load_mnist(conf.dataset, is_training=False)

    # Start session
    with capsNet.graph.as_default():
        sv = tf.train.Supervisor(logdir=conf.logdir)
        with sv.managed_session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
            # Restore parameters
            checkpoint_path = tf.train.latest_checkpoint(conf.logdir)
            sv.saver.restore(sess, checkpoint_path)
            print('[+] Graph is restored from ' + checkpoint_path)

            # Make results directory
            if not os.path.exists('results'):
                os.mkdir('results')

            reconstruction_err = []
            classification_acc = []
            for i in range(10000 // conf.batch_size):
                start = i * conf.batch_size
                end = start + conf.batch_size

                # Reconstruction
                recon_imgs = sess.run(capsNet.decoded, {capsNet.x: teX[start:end]})
                recon_imgs = np.reshape(recon_imgs, (conf.batch_size, -1))
                orgin_imgs = np.reshape(teX[start:end], (conf.batch_size, -1))
                squared = np.square(recon_imgs - orgin_imgs)
                reconstruction_err.append(np.mean(squared))
                if i % 5 == 0:
                    imgs = np.reshape(recon_imgs, (conf.batch_size, 28, 28, 1))
                    size = 6
                    save_images(imgs[0:size * size, :], [size, size], 'results/test_%03d.png' % i)

                # Classification
                cls_result = sess.run(capsNet.preds, {capsNet.x: teX[start:end]})
                cls_answer = teY[start:end]
                cls_acc = np.mean(np.equal(cls_result, cls_answer).astype(np.float32))
                classification_acc.append(cls_acc)

            # Print classification accuracy & reconstruction error
            print('reconstruction_err : ' + str(np.mean(reconstruction_err)))
            print('classification_acc : ' + str(np.mean(classification_acc) * 100)) 
开发者ID:JunYeopLee,项目名称:capsule-networks,代码行数:49,代码来源:eval.py

示例6: __init__

# 需要导入模块: import utils [as 别名]
# 或者: from utils import load_mnist [as 别名]
def __init__(self, args):
        # parameters
        self.epoch = args.epoch
        self.sample_num = 100
        self.batch_size = args.batch_size
        self.save_dir = args.save_dir
        self.result_dir = args.result_dir
        self.dataset = args.dataset
        self.log_dir = args.log_dir
        self.gpu_mode = args.gpu_mode
        self.model_name = args.gan_type

        # networks init
        self.G = generator(self.dataset)
        self.D = discriminator(self.dataset)
        self.G_optimizer = optim.Adam(self.G.parameters(), lr=args.lrG, betas=(args.beta1, args.beta2))
        self.D_optimizer = optim.Adam(self.D.parameters(), lr=args.lrD, betas=(args.beta1, args.beta2))

        if self.gpu_mode:
            self.G.cuda()
            self.D.cuda()
            self.BCE_loss = nn.BCELoss().cuda()
        else:
            self.BCE_loss = nn.BCELoss()

        print('---------- Networks architecture -------------')
        utils.print_network(self.G)
        utils.print_network(self.D)
        print('-----------------------------------------------')

        # load mnist
        self.data_X, self.data_Y = utils.load_mnist(args.dataset)
        self.z_dim = 62
        self.y_dim = 10

        # fixed noise & condition
        self.sample_z_ = torch.zeros((self.sample_num, self.z_dim))
        for i in range(10):
            self.sample_z_[i*self.y_dim] = torch.rand(1, self.z_dim)
            for j in range(1, self.y_dim):
                self.sample_z_[i*self.y_dim + j] = self.sample_z_[i*self.y_dim]

        temp = torch.zeros((10, 1))
        for i in range(self.y_dim):
            temp[i, 0] = i

        temp_y = torch.zeros((self.sample_num, 1))
        for i in range(10):
            temp_y[i*self.y_dim: (i+1)*self.y_dim] = temp

        self.sample_y_ = torch.zeros((self.sample_num, self.y_dim))
        self.sample_y_.scatter_(1, temp_y.type(torch.LongTensor), 1)
        if self.gpu_mode:
            self.sample_z_, self.sample_y_ = Variable(self.sample_z_.cuda(), volatile=True), Variable(self.sample_y_.cuda(), volatile=True)
        else:
            self.sample_z_, self.sample_y_ = Variable(self.sample_z_, volatile=True), Variable(self.sample_y_, volatile=True) 
开发者ID:tangzhenyu,项目名称:Generative_Model_Zoo,代码行数:58,代码来源:CGAN.py

示例7: __init__

# 需要导入模块: import utils [as 别名]
# 或者: from utils import load_mnist [as 别名]
def __init__(self, args):
        # parameters
        self.epoch = args.epoch
        self.sample_num = 100
        self.batch_size = args.batch_size
        self.save_dir = args.save_dir
        self.result_dir = args.result_dir
        self.dataset = args.dataset
        self.log_dir = args.log_dir
        self.gpu_mode = args.gpu_mode
        self.model_name = args.gan_type

        # networks init
        self.G = generator(self.dataset)
        self.D = discriminator(self.dataset)
        self.G_optimizer = optim.Adam(self.G.parameters(), lr=args.lrG, betas=(args.beta1, args.beta2))
        self.D_optimizer = optim.Adam(self.D.parameters(), lr=args.lrD, betas=(args.beta1, args.beta2))

        if self.gpu_mode:
            self.G.cuda()
            self.D.cuda()
            self.BCE_loss = nn.BCELoss().cuda()
            self.CE_loss = nn.CrossEntropyLoss().cuda()
        else:
            self.BCE_loss = nn.BCELoss()
            self.CE_loss = nn.CrossEntropyLoss()

        print('---------- Networks architecture -------------')
        utils.print_network(self.G)
        utils.print_network(self.D)
        print('-----------------------------------------------')

        # load mnist
        self.data_X, self.data_Y = utils.load_mnist(args.dataset)
        self.z_dim = 62
        self.y_dim = 10

        # fixed noise & condition
        self.sample_z_ = torch.zeros((self.sample_num, self.z_dim))
        for i in range(10):
            self.sample_z_[i*self.y_dim] = torch.rand(1, self.z_dim)
            for j in range(1, self.y_dim):
                self.sample_z_[i*self.y_dim + j] = self.sample_z_[i*self.y_dim]

        temp = torch.zeros((10, 1))
        for i in range(self.y_dim):
            temp[i, 0] = i

        temp_y = torch.zeros((self.sample_num, 1))
        for i in range(10):
            temp_y[i*self.y_dim: (i+1)*self.y_dim] = temp

        self.sample_y_ = torch.zeros((self.sample_num, self.y_dim))
        self.sample_y_.scatter_(1, temp_y.type(torch.LongTensor), 1)
        if self.gpu_mode:
            self.sample_z_, self.sample_y_ = Variable(self.sample_z_.cuda(), volatile=True), Variable(self.sample_y_.cuda(), volatile=True)
        else:
            self.sample_z_, self.sample_y_ = Variable(self.sample_z_, volatile=True), Variable(self.sample_y_, volatile=True) 
开发者ID:tangzhenyu,项目名称:Generative_Model_Zoo,代码行数:60,代码来源:ACGAN.py

示例8: __init__

# 需要导入模块: import utils [as 别名]
# 或者: from utils import load_mnist [as 别名]
def __init__(self, seq_len, batch_size, dataset='mnist', set='train',
                 rng=None, infinite=True, digits=None):

        if dataset == 'fashion_mnist':
            (x_train, y_train), (x_test, y_test) = utils.load_fashion_mnist()
            if set == 'train':
                self.x = x_train
                self.y = y_train
            else:
                self.x = x_test
                self.y = y_test
        elif dataset == 'mnist':
            (x_train, y_train), (x_test, y_test) = utils.load_mnist()
            if set == 'train':
                self.x = x_train
                self.y = y_train
            elif set == 'test':
                self.x = x_test
                self.y = y_test
        elif dataset == 'cifar10':
            self.x, self.y = utils.load_cifar('data/cifar', subset=set)
            self.x = np.transpose(self.x, (0, 2, 3, 1))  # (N,3,32,32) -> (N,32,32,3)
            self.x = np.float32(self.x)
            self.img_shape = self.x.shape[1:]
            self.input_dim = np.prod(self.img_shape)
        else:
            raise ValueError('wrong dataset name')

        if dataset == 'mnist' or dataset == 'fashion_mnist':
            self.input_dim = self.x.shape[-1]
            self.img_shape = (int(np.sqrt(self.input_dim)), int(np.sqrt(self.input_dim)), 1)
            self.x = np.reshape(self.x, (self.x.shape[0],) + self.img_shape)
            self.x = np.float32(self.x)

        self.classes = np.unique(self.y)
        self.n_classes = len(self.classes)
        self.y2idxs = {}
        self.nsamples = 0
        for i in list(self.classes):
            self.y2idxs[i] = np.where(self.y == i)[0]
            self.nsamples += len(self.y2idxs[i])

        self.batch_size = batch_size
        self.seq_len = seq_len
        self.rng = np.random.RandomState(42) if not rng else rng
        self.infinite = infinite
        self.digits = digits if digits is not None else np.arange(self.n_classes)

        print(set, 'dataset size:', self.x.shape)
        print(set, 'N classes', self.n_classes)
        print(set, 'min, max', np.min(self.x), np.max(self.x))
        print(set, 'nsamples', self.nsamples)
        print(set, 'digits', self.digits)
        print('--------------') 
开发者ID:IraKorshunova,项目名称:bruno,代码行数:56,代码来源:data_iter.py


注:本文中的utils.load_mnist方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。