本文整理汇总了Python中tensorflow.keras.utils.to_categorical方法的典型用法代码示例。如果您正苦于以下问题:Python utils.to_categorical方法的具体用法?Python utils.to_categorical怎么用?Python utils.to_categorical使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.keras.utils
的用法示例。
在下文中一共展示了utils.to_categorical方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: segment_objects
# 需要导入模块: from tensorflow.keras import utils [as 别名]
# 或者: from tensorflow.keras.utils import to_categorical [as 别名]
def segment_objects(self, image, normalized=True):
"""Run segmentation prediction for a given image
Arguments:
image (tensor): Image loaded in a numpy tensor.
RGB components range is [0.0, 1.0]
normalized (Bool): Use normalized=True for
pixel-wise categorical prediction. False if
segmentation will be displayed in RGB
image format.
"""
from tensorflow.keras.utils import to_categorical
image = np.expand_dims(image, axis=0)
segmentation = self.fcn.predict(image)
segmentation = np.squeeze(segmentation, axis=0)
segmentation = np.argmax(segmentation, axis=-1)
segmentation = to_categorical(segmentation,
num_classes=self.n_classes)
if not normalized:
segmentation = segmentation * 255
segmentation = segmentation.astype('uint8')
return segmentation
示例2: load_eval_dataset
# 需要导入模块: from tensorflow.keras import utils [as 别名]
# 或者: from tensorflow.keras.utils import to_categorical [as 别名]
def load_eval_dataset(self):
"""Pre-load test data for evaluation
"""
(_, _), (x_test, self.y_test) = \
self.args.dataset.load_data()
image_size = x_test.shape[1]
x_test = np.reshape(x_test,
[-1, image_size, image_size, 1])
x_test = x_test.astype('float32') / 255
x_eval = np.zeros([x_test.shape[0],
*self.train_gen.input_shape])
for i in range(x_eval.shape[0]):
x_eval[i] = center_crop(x_test[i])
self.y_test = to_categorical(self.y_test)
self.x_test = x_eval
示例3: test_svm_score_samples
# 需要导入模块: from tensorflow.keras import utils [as 别名]
# 或者: from tensorflow.keras.utils import to_categorical [as 别名]
def test_svm_score_samples():
iris = datasets.load_iris()
x = iris.data
y = iris.target
supervision_metric = 'categorical_hinge'
ivis_iris = Ivis(k=15, batch_size=16, epochs=5,
supervision_metric=supervision_metric)
# Correctly formatted one-hot labels train successfully
y = to_categorical(y)
embeddings = ivis_iris.fit_transform(x, y)
y_pred = ivis_iris.score_samples(x)
loss_name = ivis_iris.model_.loss['supervised'].__name__
assert losses.get(loss_name).__name__ == losses.get(supervision_metric).__name__
assert ivis_iris.model_.layers[-1].activation.__name__ == 'linear'
assert ivis_iris.model_.layers[-1].kernel_regularizer is not None
assert ivis_iris.model_.layers[-1].output_shape[-1] == y.shape[-1]
示例4: _preprocess_labels
# 需要导入模块: from tensorflow.keras import utils [as 别名]
# 或者: from tensorflow.keras.utils import to_categorical [as 别名]
def _preprocess_labels(self, y):
self.classes_ = unique_labels(y)
n_labels = len(self.classes_)
if n_labels == 1:
raise ValueError("Classifier can't train when only one class "
"is present.")
if self.classes_.dtype in [numpy.int32, numpy.int64]:
self.label_to_ind_ = {int(lab): ind
for ind, lab in enumerate(self.classes_)}
else:
self.label_to_ind_ = {lab: ind
for ind, lab in enumerate(self.classes_)}
y_ind = numpy.array(
[self.label_to_ind_[lab] for lab in y]
)
y_ = to_categorical(y_ind)
if n_labels == 2:
y_ = y_[:, 1:] # Keep only indicator of positive class
return y_
示例5: test_PREPROCESSOR_patchwisecrop_skipBlanks
# 需要导入模块: from tensorflow.keras import utils [as 别名]
# 或者: from tensorflow.keras.utils import to_categorical [as 别名]
def test_PREPROCESSOR_patchwisecrop_skipBlanks(self):
sample_list = self.data_io3D.get_indiceslist()
pp = Preprocessor(self.data_io3D, data_aug=None, batch_size=1,
analysis="patchwise-crop", patch_shape=(4,4,4))
pp.patchwise_skip_blanks = True
batches = pp.run(sample_list[0:3], training=True, validation=False)
sample = self.data_io3D.sample_loader(sample_list[0], load_seg=True)
sample.seg_data = to_categorical(sample.seg_data,
num_classes=sample.classes)
ready_data = pp.analysis_patchwise_crop(sample, data_aug=False)
self.assertEqual(len(ready_data), 1)
self.assertEqual(ready_data[0][0].shape, (4,4,4,1))
self.assertEqual(ready_data[0][1].shape, (4,4,4,3))
#-------------------------------------------------#
# Analysis: Patchwise-grid #
#-------------------------------------------------#
示例6: test_PREPROCESSOR_patchwisegrid_2D
# 需要导入模块: from tensorflow.keras import utils [as 别名]
# 或者: from tensorflow.keras.utils import to_categorical [as 别名]
def test_PREPROCESSOR_patchwisegrid_2D(self):
sample_list = self.data_io2D.get_indiceslist()
pp = Preprocessor(self.data_io2D, data_aug=None, batch_size=1,
analysis="patchwise-grid", patch_shape=(4,4))
batches = pp.run(sample_list[0:1], training=False, validation=False)
self.assertEqual(len(batches), 16)
sample = self.data_io2D.sample_loader(sample_list[0], load_seg=True)
sample.seg_data = to_categorical(sample.seg_data,
num_classes=sample.classes)
pp = Preprocessor(self.data_io2D, data_aug=None, batch_size=1,
analysis="patchwise-grid", patch_shape=(5,5))
ready_data = pp.analysis_patchwise_grid(sample, data_aug=False,
training=True)
self.assertEqual(len(ready_data), 16)
self.assertEqual(ready_data[0][0].shape, (5,5,1))
self.assertEqual(ready_data[0][1].shape, (5,5,3))
示例7: test_PREPROCESSOR_patchwisegrid_3D
# 需要导入模块: from tensorflow.keras import utils [as 别名]
# 或者: from tensorflow.keras.utils import to_categorical [as 别名]
def test_PREPROCESSOR_patchwisegrid_3D(self):
sample_list = self.data_io3D.get_indiceslist()
pp = Preprocessor(self.data_io3D, data_aug=None, batch_size=1,
analysis="patchwise-grid", patch_shape=(4,4,4))
batches = pp.run(sample_list[0:1], training=False, validation=False)
self.assertEqual(len(batches), 64)
sample = self.data_io3D.sample_loader(sample_list[0], load_seg=True)
sample.seg_data = to_categorical(sample.seg_data,
num_classes=sample.classes)
pp = Preprocessor(self.data_io3D, data_aug=None, batch_size=1,
analysis="patchwise-grid", patch_shape=(5,5,5))
ready_data = pp.analysis_patchwise_grid(sample, data_aug=False,
training=True)
self.assertEqual(len(ready_data), 64)
self.assertEqual(ready_data[0][0].shape, (5,5,5,1))
self.assertEqual(ready_data[0][1].shape, (5,5,5,3))
示例8: test_PREPROCESSOR_fullimage_2D
# 需要导入模块: from tensorflow.keras import utils [as 别名]
# 或者: from tensorflow.keras.utils import to_categorical [as 别名]
def test_PREPROCESSOR_fullimage_2D(self):
sample_list = self.data_io2D.get_indiceslist()
pp = Preprocessor(self.data_io2D, data_aug=None, batch_size=2,
analysis="fullimage")
batches = pp.run(sample_list[0:3], training=True, validation=False)
self.assertEqual(len(batches), 2)
batches = pp.run(sample_list[0:1], training=False, validation=False)
self.assertEqual(len(batches), 1)
sample = self.data_io2D.sample_loader(sample_list[0], load_seg=True)
sample.seg_data = to_categorical(sample.seg_data,
num_classes=sample.classes)
ready_data = pp.analysis_fullimage(sample, data_aug=False,
training=True)
self.assertEqual(len(ready_data), 1)
self.assertEqual(ready_data[0][0].shape, (16,16,1))
self.assertEqual(ready_data[0][1].shape, (16,16,3))
示例9: test_PREPROCESSOR_fullimage_3D
# 需要导入模块: from tensorflow.keras import utils [as 别名]
# 或者: from tensorflow.keras.utils import to_categorical [as 别名]
def test_PREPROCESSOR_fullimage_3D(self):
sample_list = self.data_io3D.get_indiceslist()
pp = Preprocessor(self.data_io3D, data_aug=None, batch_size=2,
analysis="fullimage")
batches = pp.run(sample_list[0:3], training=True, validation=False)
self.assertEqual(len(batches), 2)
batches = pp.run(sample_list[0:1], training=False, validation=False)
self.assertEqual(len(batches), 1)
sample = self.data_io3D.sample_loader(sample_list[0], load_seg=True)
sample.seg_data = to_categorical(sample.seg_data,
num_classes=sample.classes)
ready_data = pp.analysis_fullimage(sample, data_aug=False,
training=True)
self.assertEqual(len(ready_data), 1)
self.assertEqual(ready_data[0][0].shape, (16,16,16,1))
self.assertEqual(ready_data[0][1].shape, (16,16,16,3))
示例10: setUpClass
# 需要导入模块: from tensorflow.keras import utils [as 别名]
# 或者: from tensorflow.keras.utils import to_categorical [as 别名]
def setUpClass(self):
np.random.seed(1234)
# Create 2D data
img2D = np.random.rand(1, 16, 16, 1) * 255
self.img2D = img2D.astype(int)
seg2D = np.random.rand(1, 16, 16, 1) * 3
self.seg2D = seg2D.astype(int)
self.seg2D = to_categorical(self.seg2D, num_classes=3)
# Create 3D data
img3D = np.random.rand(1, 16, 16, 16, 1) * 255
self.img3D = img3D.astype(int)
seg3D = np.random.rand(1, 16, 16, 16, 1) * 3
self.seg3D = seg3D.astype(int)
self.seg3D = to_categorical(self.seg3D, num_classes=3)
#-------------------------------------------------#
# Base Functionality #
#-------------------------------------------------#
# Class Creation
示例11: test_DATAGENERATOR_consistency
# 需要导入模块: from tensorflow.keras import utils [as 别名]
# 或者: from tensorflow.keras.utils import to_categorical [as 别名]
def test_DATAGENERATOR_consistency(self):
pp_fi = Preprocessor(self.data_io, batch_size=1, data_aug=None,
prepare_subfunctions=False, prepare_batches=False,
analysis="fullimage")
data_gen = DataGenerator(self.sample_list, pp_fi,
training=True, shuffle=False, iterations=None)
i = 0
for batch in data_gen:
sample = self.data_io.sample_loader(self.sample_list[i],
load_seg=True)
self.assertTrue(np.array_equal(batch[0][0], sample.img_data))
seg = to_categorical(sample.seg_data, num_classes=3)
self.assertTrue(np.array_equal(batch[1][0], seg))
i += 1
# Iteration fixation test
示例12: fashion_mnist_dataset
# 需要导入模块: from tensorflow.keras import utils [as 别名]
# 或者: from tensorflow.keras.utils import to_categorical [as 别名]
def fashion_mnist_dataset():
"""
Load and prepare Fashion MNIST dataset.
"""
(x_train, y_train), (x_test, y_test) = keras.datasets.fashion_mnist.load_data()
x_train = x_train.astype('float32') / 255
x_train = np.reshape(x_train, x_train.shape + (1,))
y_train = to_categorical(y_train)
return {
'X_train': x_train,
'y_train': y_train,
'X_test': x_test,
'y_test': y_test,
'preprocessor': None,
'metadata': {'name': 'fashion_mnist'},
}
示例13: tf_keras_iris
# 需要导入模块: from tensorflow.keras import utils [as 别名]
# 或者: from tensorflow.keras.utils import to_categorical [as 别名]
def tf_keras_iris(tf_keras_iris_model, tf_keras_iris_ae):
X, y = load_iris(return_X_y=True)
X = (X - X.mean(axis=0)) / X.std(axis=0) # scale dataset
idx = 145
X_train, y_train = X[:idx, :], y[:idx]
# y_train = to_categorical(y_train) # TODO: fine to leave as is?
# set random seed
np.random.seed(1)
tf.set_random_seed(1)
model = tf_keras_iris_model
model.fit(X_train, y_train, batch_size=128, epochs=500, verbose=0)
ae, enc, _ = tf_keras_iris_ae
ae.fit(X_train, X_train, batch_size=32, epochs=100, verbose=0)
return X_train, model, ae, enc
示例14: load_data_from_scratch
# 需要导入模块: from tensorflow.keras import utils [as 别名]
# 或者: from tensorflow.keras.utils import to_categorical [as 别名]
def load_data_from_scratch(self, test_size=0.2, max_len=100):
assert self.train_file_path is not None, 'file must not be none '
stopwords = load_en_stopwords()
with open(self.train_file_path, 'r', encoding='utf-8') as file:
lines = file.readlines()
lines = [line.strip() for line in lines]
lines = [line.split('##') for line in lines]
x = [line[0] for line in lines]
x = [line.split() for line in x]
data = [word for xx in x for word in xx]
y = [line[0] for line in lines]
counter = Counter(data)
vocab = [k for k, v in counter.items() if v >= 5]
word_index = {k: v for v, k in enumerate(vocab)}
max_sentence_length = max([len(words) for words in x])
max_len = max_len if max_sentence_length > max_len else max_sentence_length
x_data = [[word_index[word] for word in words if word in word_index.keys() and word not in stopwords] for words
in x]
x_data = pad_sequences(x_data, maxlen=max_len)
y_data = to_categorical(y)
x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=test_size)
return x_train, y_train, x_test, y_test, word_index
# cnn
示例15: get_cifar_data
# 需要导入模块: from tensorflow.keras import utils [as 别名]
# 或者: from tensorflow.keras.utils import to_categorical [as 别名]
def get_cifar_data(num_classes=10):
"""Loads cifar-10 data. Normalize the images and do one-hot encoding for labels"""
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
x_train = x_train.astype(np.float32) / 255.
x_test = x_test.astype(np.float32) / 255.
y_train_cat = to_categorical(y_train, num_classes=num_classes).astype(np.float32)
y_test_cat = to_categorical(y_test, num_classes=num_classes).astype(np.float32)
return x_train, y_train, x_test, y_test, y_train_cat, y_test_cat
###########################################################################