本文整理匯總了Python中tensorflow.python.keras.utils.to_categorical方法的典型用法代碼示例。如果您正苦於以下問題:Python utils.to_categorical方法的具體用法?Python utils.to_categorical怎麽用?Python utils.to_categorical使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.python.keras.utils
的用法示例。
在下文中一共展示了utils.to_categorical方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: train
# 需要導入模塊: from tensorflow.python.keras import utils [as 別名]
# 或者: from tensorflow.python.keras.utils import to_categorical [as 別名]
def train(full_model, en_seq, fr_seq, batch_size, n_epochs=10):
""" Training the model """
for ep in range(n_epochs):
losses = []
for bi in range(0, en_seq.shape[0] - batch_size, batch_size):
en_onehot_seq = to_categorical(en_seq[bi:bi + batch_size, :], num_classes=en_vsize)
fr_onehot_seq = to_categorical(fr_seq[bi:bi + batch_size, :], num_classes=fr_vsize)
full_model.train_on_batch([en_onehot_seq, fr_onehot_seq[:, :-1, :]], fr_onehot_seq[:, 1:, :])
l = full_model.evaluate([en_onehot_seq, fr_onehot_seq[:, :-1, :]], fr_onehot_seq[:, 1:, :],
batch_size=batch_size, verbose=0)
losses.append(l)
if (ep + 1) % 1 == 0:
logger.info("Loss in epoch {}: {}".format(ep + 1, np.mean(losses)))
示例2: process_y_dataset
# 需要導入模塊: from tensorflow.python.keras import utils [as 別名]
# 或者: from tensorflow.python.keras.utils import to_categorical [as 別名]
def process_y_dataset(self,
data: List[str],
max_len: Optional[int] = None,
subset: Optional[List[int]] = None) -> np.ndarray:
from tensorflow.python.keras.utils import to_categorical
if subset is not None:
target = get_list_subset(data, subset)
else:
target = data
if self.multi_label:
return self.multi_label_binarizer.fit_transform(target)
else:
numerized_samples = self.numerize_label_sequences(target)
return to_categorical(numerized_samples, len(self.label2idx))
示例3: return_format_keras
# 需要導入模塊: from tensorflow.python.keras import utils [as 別名]
# 或者: from tensorflow.python.keras.utils import to_categorical [as 別名]
def return_format_keras(_dict, _mapping, num_classes):
ret_ = list()
for speaker, files in _dict.items():
for file in files:
spk = _mapping.index(speaker)
feat = file
ret_.append([feat, to_categorical(spk, num_classes)])
return map(list, zip(*ret_))
示例4: infer_nmt
# 需要導入模塊: from tensorflow.python.keras import utils [as 別名]
# 或者: from tensorflow.python.keras.utils import to_categorical [as 別名]
def infer_nmt(encoder_model, decoder_model, test_en_seq, en_vsize, fr_vsize):
"""
Infer logic
:param encoder_model: keras.Model
:param decoder_model: keras.Model
:param test_en_seq: sequence of word ids
:param en_vsize: int
:param fr_vsize: int
:return:
"""
test_fr_seq = sents2sequences(fr_tokenizer, ['sos'], fr_vsize)
test_en_onehot_seq = to_categorical(test_en_seq, num_classes=en_vsize)
test_fr_onehot_seq = np.expand_dims(to_categorical(test_fr_seq, num_classes=fr_vsize), 1)
enc_outs, enc_last_state = encoder_model.predict(test_en_onehot_seq)
dec_state = enc_last_state
attention_weights = []
fr_text = ''
for i in range(20):
dec_out, attention, dec_state = decoder_model.predict([enc_outs, dec_state, test_fr_onehot_seq])
dec_ind = np.argmax(dec_out, axis=-1)[0, 0]
if dec_ind == 0:
break
test_fr_seq = sents2sequences(fr_tokenizer, [fr_index2word[dec_ind]], fr_vsize)
test_fr_onehot_seq = np.expand_dims(to_categorical(test_fr_seq, num_classes=fr_vsize), 1)
attention_weights.append((dec_ind, attention))
fr_text += fr_index2word[dec_ind] + ' '
return fr_text, attention_weights