本文整理汇总了Python中keras.datasets.imdb.load_data方法的典型用法代码示例。如果您正苦于以下问题:Python imdb.load_data方法的具体用法?Python imdb.load_data怎么用?Python imdb.load_data使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.datasets.imdb
的用法示例。
在下文中一共展示了imdb.load_data方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_cifar
# 需要导入模块: from keras.datasets import imdb [as 别名]
# 或者: from keras.datasets.imdb import load_data [as 别名]
def test_cifar(self):
print('cifar10')
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
print('cifar100 fine')
(X_train, y_train), (X_test, y_test) = cifar100.load_data('fine')
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
print('cifar100 coarse')
(X_train, y_train), (X_test, y_test) = cifar100.load_data('coarse')
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
示例2: test_imdb
# 需要导入模块: from keras.datasets import imdb [as 别名]
# 或者: from keras.datasets.imdb import load_data [as 别名]
def test_imdb(self):
print('imdb')
(X_train, y_train), (X_test, y_test) = imdb.load_data()
示例3: setup_cifar
# 需要导入模块: from keras.datasets import imdb [as 别名]
# 或者: from keras.datasets.imdb import load_data [as 别名]
def setup_cifar(train, epoch_size):
# Setup
if train:
# Training setup
from keras.datasets import cifar10
from keras.utils.np_utils import to_categorical
click.echo('Loading CIFAR data')
(x_train, y_train_cats), (_, _) = cifar10.load_data()
x_train = x_train[:epoch_size]
y_train_cats = y_train_cats[:epoch_size]
y_train = to_categorical(y_train_cats, num_classes=1000)
else:
# Inference setup
this_dir = os.path.dirname(os.path.abspath(__file__))
cifar_path = os.path.join(this_dir, 'cifar16.npy')
x_train = np.load(cifar_path).repeat(1 + epoch_size // 16, axis=0)[:epoch_size]
y_train = None
return x_train, y_train
示例4: setup_imdb
# 需要导入模块: from keras.datasets import imdb [as 别名]
# 或者: from keras.datasets.imdb import load_data [as 别名]
def setup_imdb(train, epoch_size):
# Setup
if train:
# Training setup
from keras.datasets import imdb
from keras.preprocessing import sequence
click.echo('Loading IMDB data')
(x_train, y_train), (_, _) = imdb.load_data(num_words=imdb_max_features)
x_train = sequence.pad_sequences(x_train, maxlen=imdb_max_length)
x_train = x_train[:epoch_size]
y_train = y_train[:epoch_size]
else:
# Inference setup
this_dir = os.path.dirname(os.path.abspath(__file__))
imdb_path = os.path.join(this_dir, 'imdb16.npy')
x_train = np.load(imdb_path).repeat(1 + epoch_size // 16, axis=0)[:epoch_size]
y_train = None
return x_train, y_train
示例5: data
# 需要导入模块: from keras.datasets import imdb [as 别名]
# 或者: from keras.datasets.imdb import load_data [as 别名]
def data():
maxlen = 100
max_features = 20000
print('Loading data...')
(X_train, y_train), (X_test, y_test) = imdb.load_data(nb_words=max_features)
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print("Pad sequences (samples x time)")
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
return X_train, X_test, y_train, y_test, max_features, maxlen
示例6: data
# 需要导入模块: from keras.datasets import imdb [as 别名]
# 或者: from keras.datasets.imdb import load_data [as 别名]
def data(max_features=5000, maxlen=400):
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
# subset the data
x_train = x_train[:1000]
y_train = y_train[:1000]
x_test = x_test[:100]
y_test = y_test[:100]
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)
return (x_train, y_train, [1, 2, 3, "dummy_data"]), (x_test, y_test)
示例7: load_retures_keras
# 需要导入模块: from keras.datasets import imdb [as 别名]
# 或者: from keras.datasets.imdb import load_data [as 别名]
def load_retures_keras():
from keras.preprocessing.text import Tokenizer
from keras.datasets import reuters
max_words = 1000
print('Loading data...')
(x, y), (_, _) = reuters.load_data(num_words=max_words, test_split=0.)
print(len(x), 'train sequences')
num_classes = np.max(y) + 1
print(num_classes, 'classes')
print('Vectorizing sequence data...')
tokenizer = Tokenizer(num_words=max_words)
x = tokenizer.sequences_to_matrix(x, mode='binary')
print('x_train shape:', x.shape)
return x.astype(float), y
示例8: load_imdb
# 需要导入模块: from keras.datasets import imdb [as 别名]
# 或者: from keras.datasets.imdb import load_data [as 别名]
def load_imdb():
from keras.preprocessing.text import Tokenizer
from keras.datasets import imdb
max_words = 1000
print('Loading data...')
(x1, y1), (x2, y2) = imdb.load_data(num_words=max_words)
x = np.concatenate((x1, x2))
y = np.concatenate((y1, y2))
print(len(x), 'train sequences')
num_classes = np.max(y) + 1
print(num_classes, 'classes')
print('Vectorizing sequence data...')
tokenizer = Tokenizer(num_words=max_words)
x = tokenizer.sequences_to_matrix(x, mode='binary')
print('x_train shape:', x.shape)
return x.astype(float), y
示例9: main
# 需要导入模块: from keras.datasets import imdb [as 别名]
# 或者: from keras.datasets.imdb import load_data [as 别名]
def main():
data = load_data(20000)
data = pad_sequences(data)
model = build_network(vocab_size=data["vocab_size"],
embedding_dim=100,
sequence_length=data["sequence_length"])
callbacks = create_callbacks("sentiment")
model.fit(x=data["X_train"], y=data["y_train"],
batch_size=32,
epochs=10,
validation_data=(data["X_test"], data["y_test"]),
callbacks=callbacks)
model.save("sentiment.h5")
score, acc = model.evaluate(data["X_test"], data["y_test"],
batch_size=32)
print('Test loss:', score)
print('Test accuracy:', acc)
示例10: test_reuters
# 需要导入模块: from keras.datasets import imdb [as 别名]
# 或者: from keras.datasets.imdb import load_data [as 别名]
def test_reuters(self):
print('reuters')
(X_train, y_train), (X_test, y_test) = reuters.load_data()
示例11: test_mnist
# 需要导入模块: from keras.datasets import imdb [as 别名]
# 或者: from keras.datasets.imdb import load_data [as 别名]
def test_mnist(self):
print('mnist')
(X_train, y_train), (X_test, y_test) = mnist.load_data()
print(X_train.shape)
print(X_test.shape)
print(y_train.shape)
print(y_test.shape)
示例12: test_cifar
# 需要导入模块: from keras.datasets import imdb [as 别名]
# 或者: from keras.datasets.imdb import load_data [as 别名]
def test_cifar():
# only run data download tests 20% of the time
# to speed up frequent testing
random.seed(time.time())
if random.random() > 0.8:
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
assert len(x_train) == len(y_train) == 50000
assert len(x_test) == len(y_test) == 10000
(x_train, y_train), (x_test, y_test) = cifar100.load_data('fine')
assert len(x_train) == len(y_train) == 50000
assert len(x_test) == len(y_test) == 10000
(x_train, y_train), (x_test, y_test) = cifar100.load_data('coarse')
assert len(x_train) == len(y_train) == 50000
assert len(x_test) == len(y_test) == 10000
示例13: test_reuters
# 需要导入模块: from keras.datasets import imdb [as 别名]
# 或者: from keras.datasets.imdb import load_data [as 别名]
def test_reuters():
# only run data download tests 20% of the time
# to speed up frequent testing
random.seed(time.time())
if random.random() > 0.8:
(x_train, y_train), (x_test, y_test) = reuters.load_data()
assert len(x_train) == len(y_train)
assert len(x_test) == len(y_test)
assert len(x_train) + len(x_test) == 11228
(x_train, y_train), (x_test, y_test) = reuters.load_data(maxlen=10)
assert len(x_train) == len(y_train)
assert len(x_test) == len(y_test)
word_index = reuters.get_word_index()
assert isinstance(word_index, dict)
示例14: test_mnist
# 需要导入模块: from keras.datasets import imdb [as 别名]
# 或者: from keras.datasets.imdb import load_data [as 别名]
def test_mnist():
# only run data download tests 20% of the time
# to speed up frequent testing
random.seed(time.time())
if random.random() > 0.8:
(x_train, y_train), (x_test, y_test) = mnist.load_data()
assert len(x_train) == len(y_train) == 60000
assert len(x_test) == len(y_test) == 10000
示例15: test_imdb
# 需要导入模块: from keras.datasets import imdb [as 别名]
# 或者: from keras.datasets.imdb import load_data [as 别名]
def test_imdb():
# only run data download tests 20% of the time
# to speed up frequent testing
random.seed(time.time())
if random.random() > 0.8:
(x_train, y_train), (x_test, y_test) = imdb.load_data()
(x_train, y_train), (x_test, y_test) = imdb.load_data(maxlen=40)
assert len(x_train) == len(y_train)
assert len(x_test) == len(y_test)
word_index = imdb.get_word_index()
assert isinstance(word_index, dict)