本文整理汇总了Python中utils.xprint函数的典型用法代码示例。如果您正苦于以下问题:Python xprint函数的具体用法?Python xprint怎么用?Python xprint使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了xprint函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build_lstm9
def build_lstm9(embeddings, shape, settings):
"""2 layer LSTM
"""
model = Sequential()
model.add(
Embedding(
embeddings.shape[0],
embeddings.shape[1],
input_length=shape['max_length'],
trainable=False,
weights=[embeddings],
mask_zero=False
)
)
model.add(TimeDistributed(Dense(shape['n_hidden'], use_bias=False), name='td9a'))
model.add(Bidirectional(LSTM(shape['n_hidden'], return_sequences=True,
recurrent_dropout=settings['dropout'],
dropout=settings['dropout']), name='bidi9a'))
# model.add(GlobalMaxPool1D())
# model.add(BatchNormalization())
# model.add(Dropout(settings['dropout'] / 2.0))
# model.add(TimeDistributed(Dense(shape['n_hidden'], use_bias=False), name='td9b'))
model.add(Bidirectional(LSTM(shape['n_hidden'], return_sequences=True,
recurrent_dropout=settings['dropout'],
dropout=settings['dropout']), name='bidi9b'))
model.add(GlobalMaxPool1D(name='mp9'))
model.add(BatchNormalization(name='bn9'))
model.add(Dropout(settings['dropout'] / 2.0, name='drop9b'))
model.add(Dense(shape['n_class'], activation='sigmoid', name='den9b'))
xprint('build_lstm9: embeddings=%s shape=%s' % (dim(embeddings), shape))
return model
示例2: build_lstm4
def build_lstm4(embeddings, shape, settings):
model = Sequential()
model.add(
Embedding(
embeddings.shape[0],
embeddings.shape[1],
input_length=shape['max_length'],
trainable=False,
weights=[embeddings],
mask_zero=False,
name='eembed'
)
)
model.add(TimeDistributed(Dense(shape['n_hidden'], use_bias=False, name='td4')))
model.add(Bidirectional(LSTM(shape['n_hidden'], return_sequences=True,
recurrent_dropout=settings['dropout'],
dropout=settings['dropout'])))
model.add(Flatten(name='flaaten'))
model.add(BatchNormalization())
n_dense = int(math.ceil(math.sqrt(shape['n_hidden'] * shape['n_class'])))
model.add(Dense(n_dense, activation='relu'))
# model.add(BatchNormalization())
# x = Dropout(dropout)(x)
model.add(Dense(shape['n_class'], activation='sigmoid'))
xprint('build_lstm4: embeddings=%s shape=%s' % (dim(embeddings), shape))
return model
示例3: fit
def fit(self, train, test_size=0.1):
model_dir = get_model_dir(self.model_name, 0)
# RocAucEvaluation saves the trainable part of the model
model_path = os.path.join(model_dir, 'model')
os.makedirs(model_dir, exist_ok=True)
xprint('ClfCharLstm.fit: model_dir=%s' % model_dir)
y_train = train[LABEL_COLS].values
X_train = df_to_sentences(train)
X_val, y_val = None, None
if test_size > 0.0:
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=test_size)
lstm_shape = {'n_hidden': self.n_hidden,
'max_length': self.max_length,
'n_class': len(LABEL_COLS)}
lstm_settings = {'dropout': self.dropout,
'lr': self.learn_rate}
lstm, self.best_epochs = do_train(X_train, y_train, X_val, y_val, lstm_shape, lstm_settings, {},
epochs=self.epochs, batch_size=self.batch_size, frozen=self.frozen,
lstm_type=self.lstm_type, model_path=model_path)
with open(os.path.join(model_dir, 'config.json'), 'wt') as f:
f.write(lstm.to_json())
print('****: best_epochs=%s - %s' % (self.best_epochs, self.description))
示例4: describe
def describe(y):
"""Return table of values
min, mean, max
"""
MEASURES = ['min', 'mean', 'max']
stats = np.zeros((3, len(LABEL_COLS)), dtype=np.float64)
xprint('stats=%s' % dim(stats))
for j, col in enumerate(LABEL_COLS):
stats[0, j] = y[:, j].min()
stats[1, j] = y[:, j].mean()
stats[2, j] = y[:, j].max()
def draw(name, vals, sep='|'):
vals = ['%12s' % v for v in ([name] + vals)]
xprint((' %s ' % sep).join(vals))
def draw_bar():
bar = '-' * 12
draw(bar, [bar] * len(LABEL_COLS), sep='+')
draw_bar()
draw('', LABEL_COLS)
draw_bar()
for i, measure in enumerate(MEASURES):
draw(measure, ['%10.4f' % z for z in stats[i, :]])
draw_bar()
示例5: evaluate
def evaluate(self, get_clf):
auc = np.zeros((self.n, len(LABEL_COLS)), dtype=np.float64)
for i in range(self.n):
ok, auc[i, :] = self._evaluate(get_clf, i)
if not ok:
return ok, auc
show_auc(auc[:i + 1, :])
xprint('program=%s train=%s' % (sys.argv[0], dim(self.train)))
return True, auc
示例6: do_train
def do_train(train_texts, train_labels, dev_texts, dev_labels,
lstm_shape, lstm_settings, lstm_optimizer, batch_size=100, epochs=5, by_sentence=True,
frozen=False, lstm_type=1, model_path=None):
"""Train a Keras model on the sentences in `train_texts`
All the sentences in a text have the text's label
"""
print('do_train: train_texts=%s dev_texts=%s' % (dim(train_texts), dim(dev_texts)))
embeddings, char_index, _ = get_char_embeddings()
n_train_sents = count_sentences(char_index, train_texts, batch_size, 'train')
X_train, y_train = make_char_sentences(char_index, lstm_shape['max_length'], batch_size,
train_texts, train_labels, 'train', n_train_sents)
validation_data = None
if dev_texts is not None:
n_dev_sents = count_sentences(char_index, dev_texts, batch_size, 'dev')
X_val, y_val = make_char_sentences(char_index, lstm_shape['max_length'], batch_size,
dev_texts, dev_labels, 'dev', n_dev_sents)
validation_data = (X_val, y_val)
sentence_cache.flush()
model = build_lstm[lstm_type](embeddings, lstm_shape, lstm_settings)
compile_lstm(model, lstm_settings['lr'])
callback_list = None
if validation_data is not None:
ra_val = RocAucEvaluation(validation_data=validation_data, interval=1, frozen=frozen,
model_path=model_path)
early = EarlyStopping(monitor='val_auc', mode='max', patience=1, verbose=1)
callback_list = [ra_val, early]
model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs,
validation_data=validation_data, callbacks=callback_list, verbose=1)
best_epoch_frozen = ra_val.best_epoch
ra_val.best_epoch = -1
best_epoch_unfrozen = -1
if not frozen:
xprint("Unfreezing")
for layer in model.layers:
layer.trainable = True
compile_lstm(model, lstm_settings['lr'] / 10)
if validation_data is not None:
# Reload the best model so far
lstm_weights = [embeddings] + ra_val.top_weights
model.set_weights(lstm_weights)
# Reset early stopping
early = EarlyStopping(monitor='val_auc', mode='max', patience=1, verbose=1)
callback_list = [ra_val, early]
model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs,
validation_data=validation_data, callbacks=callback_list, verbose=1)
best_epoch_unfrozen = ra_val.best_epoch
return model, (best_epoch_frozen, best_epoch_unfrozen)
示例7: load
def load(cls, path, char_index, max_length, frozen):
xprint('SentimentAnalyser.load: path=%s max_length=%d' % (path, max_length))
with open(os.path.join(path, 'config.json'), 'rt') as f:
model = model_from_json(f.read())
with open(os.path.join(path, 'model'), 'rb') as f:
lstm_weights = pickle.load(f)
if frozen:
embeddings, char_index, index_char = get_char_embeddings()
lstm_weights = [embeddings] + lstm_weights
model.set_weights(lstm_weights)
return cls(char_index, model, max_length=max_length)
示例8: split_data
def split_data(df, indexes, frac):
show_values('df', df)
n = int(len(df) * frac)
train = df.loc[indexes[:n]]
test = df.loc[indexes[n:]]
show_values('train', train)
show_values('test', test)
xprint('split_data: %.2f of %d: train=%d test=%d' % (frac, len(df), len(train), len(test)))
return train, test
示例9: make_submission_reductions
def make_submission_reductions(get_clf, submission_name, predict_methods):
seed_random()
os.makedirs(SUBMISSION_DIR, exist_ok=True)
train, test, subm = load_data()
clf = get_clf()
clf.fit(train, test_size=0.0)
reductions = clf.predict_reductions(test, predict_methods)
ok = True
for method in predict_methods:
submission_path = join(SUBMISSION_DIR, '%s.%s.%s.csv' % (
submission_name, get_n_samples_str(), method))
if os.path.exists(submission_path):
xprint('make_submission_reductions: submission_path=%s already exists' % submission_path)
ok = False
break
xprint('make_submission_reduction: method=%s' % method)
pred = reductions[method]
describe(pred)
# Create the submission file.
submid = pd.DataFrame({'id': subm['id']})
submission = pd.concat([submid, pd.DataFrame(pred, columns=LABEL_COLS)], axis=1)
submission.to_csv(submission_path, index=False)
xprint('make_submission_reductions: Saved in %s' % submission_path)
xprint('program=%s train=%s test=%s submission=%s' % (sys.argv[0], dim(train), dim(test),
dim(submission)))
if clf is not None:
del clf
return ok
示例10: process_summary
def process_summary(path, n_rank):
print('=' * 100)
print('path=%s' % path)
completed_tests = load_json(path)
xprint('run_summary_path=%s' % path)
best = {}
try:
best = display_results(completed_tests, do_max, n_rank)
# display_results(completed_tests, True)
except Exception as e:
print('Bad summary: %s' % e)
print('&' * 100)
return best
示例11: show_auc
def show_auc(auc):
n = auc.shape[0]
mean_auc = auc.mean(axis=0)
auc_mean = auc.mean(axis=1)
xprint('-' * 110, 'n=%d' % n)
for i in range(n):
xprint('%5d: auc=%.3f %s' % (i, auc[i, :].mean(), label_score(auc[i, :])))
xprint('%5s: auc=%.3f %s' % ('Mean', mean_auc.mean(), label_score(mean_auc)))
xprint('-' * 110)
xprint('auc=%.3f +- %.3f (%.0f%%) range=%.3f (%.0f%%)' % (
auc_mean.mean(), auc_mean.std(),
100.0 * auc_mean.std() / auc_mean.mean(),
auc_mean.max() - auc_mean.min(),
100.0 * (auc_mean.max() - auc_mean.min()) / auc_mean.mean()
))
示例12: fit
def fit(self, train, test_size=0.1):
print('ClfSpacy.fit', '-' * 80)
(model1_path, config1_path), (model2_path, config2_path), epoch_path = self._get_paths(True)
if not self.force_fit:
if self.frozen:
if (os.path.exists(model1_path) and os.path.exists(config1_path) and
SaveAllEpochs.epoch_dict(epoch_path)['epoch1'] == self.epochs):
xprint('model1_path already exists. re-using')
return
else:
if (os.path.exists(model2_path) and os.path.exists(config2_path) and
SaveAllEpochs.epoch_dict(epoch_path)['epoch2'] == self.epochs2):
xprint('model2_path already exists. re-using')
return
do_fit1 = (not (os.path.exists(model1_path) and os.path.exists(config1_path)) or
SaveAllEpochs.epoch_dict(epoch_path)['epoch1'] < self.epochs)
do_fit2 = (not self.frozen and (not (os.path.exists(model2_path) and
os.path.exists(config2_path)) or
SaveAllEpochs.epoch_dict(epoch_path)['epoch2'] < self.epochs2))
y_train = train[LABEL_COLS].values
X_train = df_to_sentences(train)
X_val, y_val = None, None
if test_size > 0.0:
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=test_size)
lstm_shape = {'n_hidden': self.n_hidden,
'max_length': self.max_length,
'n_class': len(LABEL_COLS)}
lstm_settings = {'dropout': self.dropout,
'lr': self.learn_rate}
lstm, self.best_epochs = do_train(X_train, y_train, X_val, y_val, lstm_shape, lstm_settings,
{}, batch_size=self.batch_size, lstm_type=self.lstm_type,
do_fit1=do_fit1, epochs1=self.epochs, model1_path=model1_path, config1_path=config1_path,
do_fit2=do_fit2, epochs2=self.epochs2, model2_path=model2_path, config2_path=config2_path,
epoch_path=epoch_path)
assert do_fit1
if do_fit1:
assert os.path.exists(model1_path), model1_path
assert os.path.exists(config1_path), config1_path
if do_fit2:
assert os.path.exists(model2_path), model2_path
assert os.path.exists(config2_path), config2_path
print('****: best_epochs=%s - %s Add 1 to these' % (self.best_epochs, self.description))
del lstm
示例13: show_scores
def show_scores(scores, force=False):
global scores_t0, scores_len
if not force:
if not scores or len(scores) == scores_len:
return
if time.clock() < scores_t0 + 60.0:
return
scores_t0 = time.clock()
scores_len = len(scores)
scores.sort(key=lambda x: (-x[0], x[2]))
xprint('!' * 80)
with open('all.results3.txt', 'wt') as f:
for i, (score, col_scores, params, desc) in enumerate(scores):
if i < 10:
xprint('%4d: auc=%.3f %s %s %s' % (i, score, col_scores, params, desc))
print('%4d: auc=%.3f %s %s %s' % (i, score, col_scores, params, desc), file=f)
示例14: _get_paths
def _get_paths(self, create_dir):
model_dir = get_model_dir(self.model_name, 0)
if create_dir:
os.makedirs(model_dir, exist_ok=True)
# RocAucEvaluation saves the trainable part of the model
model1_path = os.path.join(model_dir, 'model')
config1_path = os.path.join(model_dir, 'config.json')
model2_path = os.path.join(model_dir, 'model2')
config2_path = os.path.join(model_dir, 'config2.json')
epoch_path = os.path.join(model_dir, 'epochs.json')
if not self._shown_paths:
xprint('model1_path=%s exists=%s' % (model1_path, os.path.exists(model1_path)))
xprint('config1_path=%s exists=%s' % (config1_path, os.path.exists(config1_path)))
xprint('model2_path=%s exists=%s' % (model2_path, os.path.exists(model2_path)))
xprint('config2_path=%s exists=%s' % (config1_path, os.path.exists(config2_path)))
xprint('epoch_path=%s exists=%s' % (epoch_path, os.path.exists(epoch_path)))
self._shown_paths = True
return (model1_path, config1_path), (model2_path, config2_path), epoch_path
示例15: evaluate_reductions
def evaluate_reductions(self, get_clf, predict_methods):
predict_methods_all = predict_methods + ['BEST']
auc_reductions = {method: np.zeros((self.n, len(LABEL_COLS)), dtype=np.float64)
for method in predict_methods_all}
best_methods = []
for i in range(self.n):
ok, reductions, best = self._evaluate_reductions(get_clf, i, predict_methods)
best_methods.append(best)
if not ok:
return ok, {}, best_methods
for method in predict_methods_all:
auc = auc_reductions[method]
auci = reductions[method]
auc[i, :] = auci
print('evaluate_reductions: method=%s' % method)
show_auc(auc[:i + 1, :])
xprint('program=%s train=%s' % (sys.argv[0], dim(self.train)))
return True, auc_reductions, best_methods