當前位置: 首頁>>代碼示例>>Python>>正文


Python cifar10.load_data方法代碼示例

本文整理匯總了Python中keras.datasets.cifar10.load_data方法的典型用法代碼示例。如果您正苦於以下問題:Python cifar10.load_data方法的具體用法?Python cifar10.load_data怎麽用?Python cifar10.load_data使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在keras.datasets.cifar10的用法示例。


在下文中一共展示了cifar10.load_data方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: load_and_preprocess_data_3

# 需要導入模塊: from keras.datasets import cifar10 [as 別名]
# 或者: from keras.datasets.cifar10 import load_data [as 別名]
def load_and_preprocess_data_3():
    # The data, shuffled and split between train and test sets:
    (X_train, y_train), (x_test, y_test) = cifar10.load_data()
    logging.debug('X_train shape: {}'.format(X_train.shape))
    logging.debug('train samples: {}'.format(X_train.shape[0]))
    logging.debug('test samples: {}'.format(x_test.shape[0]))

    # Convert class vectors to binary class matrices.
    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)

    X_train = X_train.astype('float32')
    x_test = x_test.astype('float32')
    X_train /= 255
    x_test /= 255

    input_shape = X_train[0].shape
    logging.debug('input_shape {}'.format(input_shape))
    input_shape = X_train.shape[1:]
    logging.debug('input_shape {}'.format(input_shape))

    return X_train, x_test, y_train, y_test, input_shape 
開發者ID:abhishekrana,項目名稱:DeepFashion,代碼行數:24,代碼來源:cnn.py

示例2: test_cifar

# 需要導入模塊: from keras.datasets import cifar10 [as 別名]
# 或者: from keras.datasets.cifar10 import load_data [as 別名]
def test_cifar(self):
        print('cifar10')
        (X_train, y_train), (X_test, y_test) = cifar10.load_data()
        print(X_train.shape)
        print(X_test.shape)
        print(y_train.shape)
        print(y_test.shape)

        print('cifar100 fine')
        (X_train, y_train), (X_test, y_test) = cifar100.load_data('fine')
        print(X_train.shape)
        print(X_test.shape)
        print(y_train.shape)
        print(y_test.shape)

        print('cifar100 coarse')
        (X_train, y_train), (X_test, y_test) = cifar100.load_data('coarse')
        print(X_train.shape)
        print(X_test.shape)
        print(y_train.shape)
        print(y_test.shape) 
開發者ID:lllcho,項目名稱:CAPTCHA-breaking,代碼行數:23,代碼來源:test_datasets.py

示例3: test_imdb

# 需要導入模塊: from keras.datasets import cifar10 [as 別名]
# 或者: from keras.datasets.cifar10 import load_data [as 別名]
def test_imdb(self):
        print('imdb')
        (X_train, y_train), (X_test, y_test) = imdb.load_data() 
開發者ID:lllcho,項目名稱:CAPTCHA-breaking,代碼行數:5,代碼來源:test_datasets.py

示例4: get_cifar10

# 需要導入模塊: from keras.datasets import cifar10 [as 別名]
# 或者: from keras.datasets.cifar10 import load_data [as 別名]
def get_cifar10():
    """Retrieve the CIFAR dataset and process the data."""
    # Set defaults.
    nb_classes = 10
    batch_size = 64
    input_shape = (3072,)

    # Get the data.
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()
    x_train = x_train.reshape(50000, 3072)
    x_test = x_test.reshape(10000, 3072)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255

    # convert class vectors to binary class matrices
    y_train = to_categorical(y_train, nb_classes)
    y_test = to_categorical(y_test, nb_classes)

    return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test) 
開發者ID:harvitronix,項目名稱:super-simple-distributed-keras,代碼行數:23,代碼來源:datasets.py

示例5: get_mnist

# 需要導入模塊: from keras.datasets import cifar10 [as 別名]
# 或者: from keras.datasets.cifar10 import load_data [as 別名]
def get_mnist():
    """Retrieve the MNIST dataset and process the data."""
    # Set defaults.
    nb_classes = 10
    batch_size = 128
    input_shape = (784,)

    # Get the data.
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.reshape(60000, 784)
    x_test = x_test.reshape(10000, 784)
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255

    # convert class vectors to binary class matrices
    y_train = to_categorical(y_train, nb_classes)
    y_test = to_categorical(y_test, nb_classes)

    return (nb_classes, batch_size, input_shape, x_train, x_test, y_train, y_test) 
開發者ID:harvitronix,項目名稱:super-simple-distributed-keras,代碼行數:23,代碼來源:datasets.py

示例6: load_dataset

# 需要導入模塊: from keras.datasets import cifar10 [as 別名]
# 或者: from keras.datasets.cifar10 import load_data [as 別名]
def load_dataset():
    # Load the dataset from Keras
    from keras.datasets import cifar10
    (x_train, y_train), (x_test, y_test) = cifar10.load_data()

    # Preprocessing the dataset
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train= preprocess_input(x_train)
    x_test= preprocess_input(x_test)
    x_train = x_train.reshape(-1, 32, 32, 3).astype('float32') 
    x_test = x_test.reshape(-1, 32, 32, 3).astype('float32')
    y_train = to_categorical(y_train.astype('float32'))
    y_test = to_categorical(y_test.astype('float32'))

    return (x_train, y_train), (x_test, y_test) 
開發者ID:ssrp,項目名稱:Multi-level-DCNet,代碼行數:18,代碼來源:3leveldcnet.py

示例7: load_mnist

# 需要導入模塊: from keras.datasets import cifar10 [as 別名]
# 或者: from keras.datasets.cifar10 import load_data [as 別名]
def load_mnist(size=64):
    (train_data, train_labels), (test_data, test_labels) = mnist.load_data()
    train_data = normalize(train_data)
    test_data = normalize(test_data)

    x = np.concatenate((train_data, test_data), axis=0)
    # y = np.concatenate((train_labels, test_labels), axis=0).astype(np.int)

    seed = 777
    np.random.seed(seed)
    np.random.shuffle(x)
    # np.random.seed(seed)
    # np.random.shuffle(y)
    # x = np.expand_dims(x, axis=-1)

    x = np.asarray([scipy.misc.imresize(x_img, [size, size]) for x_img in x])
    x = np.expand_dims(x, axis=-1)
    return x 
開發者ID:taki0112,項目名稱:Self-Attention-GAN-Tensorflow,代碼行數:20,代碼來源:utils.py

示例8: load_cifar10

# 需要導入模塊: from keras.datasets import cifar10 [as 別名]
# 或者: from keras.datasets.cifar10 import load_data [as 別名]
def load_cifar10(size=64) :
    (train_data, train_labels), (test_data, test_labels) = cifar10.load_data()
    train_data = normalize(train_data)
    test_data = normalize(test_data)

    x = np.concatenate((train_data, test_data), axis=0)
    # y = np.concatenate((train_labels, test_labels), axis=0).astype(np.int)

    seed = 777
    np.random.seed(seed)
    np.random.shuffle(x)
    # np.random.seed(seed)
    # np.random.shuffle(y)

    x = np.asarray([scipy.misc.imresize(x_img, [size, size]) for x_img in x])

    return x 
開發者ID:taki0112,項目名稱:Self-Attention-GAN-Tensorflow,代碼行數:19,代碼來源:utils.py

示例9: __init__

# 需要導入模塊: from keras.datasets import cifar10 [as 別名]
# 或者: from keras.datasets.cifar10 import load_data [as 別名]
def __init__(self, model, n_labeled_sample, batch_size):
        self.n_labeled_sample = n_labeled_sample
        self.batch_size = batch_size
        self.model = model
        self.n_classes = 10
        # labeled_unlabeledの作成
        (X_train, y_train), (self.X_test, self.y_test) = cifar10.load_data()
        indices = np.arange(X_train.shape[0])
        np.random.shuffle(indices)
        self.X_train_labeled = X_train[indices[:n_labeled_sample]]
        self.y_train_labeled = y_train[indices[:n_labeled_sample]]
        self.X_train_unlabeled = X_train[indices[n_labeled_sample:]]
        self.y_train_unlabeled_groundtruth = y_train[indices[n_labeled_sample:]]
        # unlabeledの予測値
        self.y_train_unlabeled_prediction = np.random.randint(
            10, size=(self.y_train_unlabeled_groundtruth.shape[0], 1))
        # steps_per_epoch
        self.train_steps_per_epoch = X_train.shape[0] // batch_size
        self.test_stepes_per_epoch = self.X_test.shape[0] // batch_size
        # unlabeledの重み
        self.alpha_t = 0.0
        # labeled/unlabeledの一致率推移
        self.unlabeled_accuracy = []
        self.labeled_accuracy = [] 
開發者ID:koshian2,項目名稱:Pseudo-Label-Keras,代碼行數:26,代碼來源:mobilenet_pseudo_cifar.py

示例10: __init__

# 需要導入模塊: from keras.datasets import cifar10 [as 別名]
# 或者: from keras.datasets.cifar10 import load_data [as 別名]
def __init__(self, model, n_labeled_sample, batch_size):
        self.n_labeled_sample = n_labeled_sample
        self.batch_size = batch_size
        self.model = model
        self.n_classes = 10
        # labeled_unlabeledの作成
        (X_train, y_train), (self.X_test, self.y_test) = cifar10.load_data()
        indices = np.arange(X_train.shape[0])
        np.random.shuffle(indices)
        self.X_train_labeled = X_train[indices[:n_labeled_sample]]
        self.y_train_labeled = y_train[indices[:n_labeled_sample]]
        self.X_train_unlabeled = X_train[indices[n_labeled_sample:]]
        self.y_train_unlabeled_groundtruth = y_train[indices[n_labeled_sample:]]
        # unlabeledの予測値
        self.y_train_unlabeled_prediction = np.random.randint(
            10, size=(self.y_train_unlabeled_groundtruth.shape[0], 1))
        # steps_per_epoch
        self.train_steps_per_epoch = X_train.shape[0] // batch_size
        self.test_stepes_per_epoch = self.X_test.shape[0] // batch_size
        # unlabeledの重み
        self.alpha_t = 0.05
        # labeled/unlabeledの一致率推移
        self.unlabeled_accuracy = []
        self.labeled_accuracy = [] 
開發者ID:koshian2,項目名稱:Pseudo-Label-Keras,代碼行數:26,代碼來源:pseudo_pretrain_cifar.py

示例11: generate_training_data

# 需要導入模塊: from keras.datasets import cifar10 [as 別名]
# 或者: from keras.datasets.cifar10 import load_data [as 別名]
def generate_training_data(data='mnist'):
    if data == 'mnist':
        (X_train, _), (_, _) = mnist.load_data()
        X_train = np.expand_dims(X_train, -1) / 255.
    elif data == 'cifar':
        (X_train, _), (_, _) = cifar10.load_data()
        X_train = X_train / 255.
    else:
        raise ValueError('data should be "mnist" or "cifar", got '
                         '"%s".' % data)

    # Downsamples by averaging adjacent pixels.
    X_low_dim = mean_bins(X_train)

    return X_low_dim, X_train 
開發者ID:codekansas,項目名稱:gandlf,代碼行數:17,代碼來源:upsample_gan.py

示例12: test_reuters

# 需要導入模塊: from keras.datasets import cifar10 [as 別名]
# 或者: from keras.datasets.cifar10 import load_data [as 別名]
def test_reuters(self):
        print('reuters')
        (X_train, y_train), (X_test, y_test) = reuters.load_data() 
開發者ID:lllcho,項目名稱:CAPTCHA-breaking,代碼行數:5,代碼來源:test_datasets.py

示例13: test_mnist

# 需要導入模塊: from keras.datasets import cifar10 [as 別名]
# 或者: from keras.datasets.cifar10 import load_data [as 別名]
def test_mnist(self):
        print('mnist')
        (X_train, y_train), (X_test, y_test) = mnist.load_data()
        print(X_train.shape)
        print(X_test.shape)
        print(y_train.shape)
        print(y_test.shape) 
開發者ID:lllcho,項目名稱:CAPTCHA-breaking,代碼行數:9,代碼來源:test_datasets.py

示例14: usps_to_mnist

# 需要導入模塊: from keras.datasets import cifar10 [as 別名]
# 或者: from keras.datasets.cifar10 import load_data [as 別名]
def usps_to_mnist():
    from DatasetLoad import usps_digit_dataload
    source_traindata, source_trainlabel, source_testdata, source_testlabel = usps_digit_dataload()
    source_trainlabel =source_trainlabel-1
    source_testlabel =source_testlabel-1

    # 2d to 3d for CNN
    source_traindata = source_traindata.reshape(-1, 16, 16,1)
    source_testdata = source_testdata.reshape(-1,16, 16,1)

    from preprocess import zero_mean_unitvarince, resize_data

    source_traindata = zero_mean_unitvarince(source_traindata, scaling=True)
    source_testdata = zero_mean_unitvarince(source_testdata, scaling=True)

    
    #
    from keras.datasets import mnist
    (target_traindata, target_trainlabel), (target_testdata, target_testlabel) = mnist.load_data()
    target_size = target_traindata.shape
    
    resize = True
    resize_size =16

    if resize == True:
       target_traindata = resize_data(target_traindata, resize_size=resize_size)
       target_testdata = resize_data(target_testdata, resize_size=resize_size)
    
    target_size = target_traindata.shape
    
    target_traindata = zero_mean_unitvarince(target_traindata,scaling=True)
    target_testdata = zero_mean_unitvarince(target_testdata,scaling=True)
    
    
    target_traindata = target_traindata.reshape(-1,target_size[1],target_size[2],1)
    target_testdata =target_testdata.reshape(-1,target_size[1],target_size[2],1)
    
    return (source_traindata, source_trainlabel, source_testdata, source_testlabel), (target_traindata, target_trainlabel, target_testdata, target_testlabel)
    

#%% MNIST MNISTM 
開發者ID:bbdamodaran,項目名稱:deepJDOT,代碼行數:43,代碼來源:da_dataload.py

示例15: mnist_to_mnistm

# 需要導入模塊: from keras.datasets import cifar10 [as 別名]
# 或者: from keras.datasets.cifar10 import load_data [as 別名]
def mnist_to_mnistm():
    from keras.datasets import mnist
    (source_traindata, source_trainlabel), (source_testdata, source_testlabel) = mnist.load_data()
    
    source_size = source_traindata.shape
    resize = False
    resize_size =32
    from preprocess import zero_mean_unitvarince,resize_data
    if resize == True:
       source_traindata = resize_data(source_traindata, resize_size=resize_size)
       source_testdata = resize_data(source_testdata, resize_size=resize_size)
    
    source_size = source_traindata.shape
    
    source_traindata = zero_mean_unitvarince(source_traindata,scaling=True)
    source_testdata = zero_mean_unitvarince(source_testdata,scaling=True)
    
    convert_rgb=1
    if convert_rgb:
        source_traindata = np.stack((source_traindata,source_traindata,source_traindata), axis=3)
        source_testdata = np.stack((source_testdata,source_testdata,source_testdata), axis=3)
        
    from DatasetLoad import mnist_m_dataload
    from skimage.color import rgb2gray
    target_traindata, target_trainlabel, target_testdata, target_testlabel= mnist_m_dataload()
    target_size = target_traindata.shape
    resize = False
    resize_size =28
    
    if resize == True:
       target_traindata = resize_data(target_traindata, resize_size=resize_size)
       target_testdata = resize_data(target_testdata, resize_size=resize_size)
    
    target_size = target_traindata.shape
    
    target_traindata = zero_mean_unitvarince(target_traindata,scaling=True)
    target_testdata = zero_mean_unitvarince(target_testdata,scaling=True)
    
    return (source_traindata, source_trainlabel, source_testdata, source_testlabel), (target_traindata, target_trainlabel, target_testdata, target_testlabel)
    
#%% 
開發者ID:bbdamodaran,項目名稱:deepJDOT,代碼行數:43,代碼來源:da_dataload.py


注:本文中的keras.datasets.cifar10.load_data方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。