本文整理汇总了Python中keras.utils.generic_utils.Progbar类的典型用法代码示例。如果您正苦于以下问题:Python Progbar类的具体用法?Python Progbar怎么用?Python Progbar使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Progbar类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: train_model_embed
def train_model_embed(train, dev, glove, model, model_dir = 'models/curr_model', nb_epochs = 20, batch_size = 64, hs=True, ci = True):
X_dev_p, X_dev_h, y_dev = load_data.prepare_split_vec_dataset(dev, glove=glove)
word_index = load_data.WordIndex(glove)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
for e in range(nb_epochs):
print "Epoch ", e
mb = load_data.get_minibatches_idx(len(train), batch_size, shuffle=True)
p = Progbar(len(train))
for i, train_index in mb:
if len(train_index) != batch_size:
continue
X_train_p, X_train_h , y_train = load_data.prepare_split_vec_dataset([train[k] for k in train_index], word_index.index)
padded_p = load_data.pad_sequences(X_train_p, maxlen = PREM_LEN, dim = -1, padding = 'pre')
padded_h = load_data.pad_sequences(X_train_h, maxlen = HYPO_LEN, dim = -1, padding = 'post')
data = {'premise_input': padded_p, 'embed_input': np.expand_dims(np.array(train_index), axis=1), 'output' : padded_h}
if ci:
data['class_input'] = y_train
if hs:
data['train_input'] = padded_h
data['output'] = np.ones((batch_size, HYPO_LEN, 1))
#sw = (padded_h != 0).astype(float)
#train_loss = float(model.train_on_batch(data, sample_weight={'output':sw})[0])
train_loss = float(model.train_on_batch(data)[0])
p.add(len(train_index),[('train_loss', train_loss)])
sys.stdout.write('\n')
model.save_weights(model_dir + '/model~' + str(e))
示例2: run_epoch
def run_epoch(self, split, train=False, batch_size=128, return_pred=False):
total = total_loss = 0
func = self.model.train_on_batch if train else self.model.test_on_batch
ids, preds, targs = [], [], []
prog = Progbar(split.num_examples)
for idx, X, Y, types in split.batches(batch_size):
X.update({k: np.concatenate([v, types], axis=1) for k, v in Y.items()})
batch_end = time()
loss = func(X)
prob = self.model.predict(X, verbose=0)['p_relation']
prob *= self.typechecker.get_valid_cpu(types[:, 0], types[:, 1])
pred = prob.argmax(axis=1)
targ = Y['p_relation'].argmax(axis=1)
ids.append(idx)
targs.append(targ)
preds.append(pred)
total_loss += loss
total += 1
prog.add(idx.size, values=[('loss', loss), ('acc', np.mean(pred==targ))])
preds = np.concatenate(preds).astype('int32')
targs = np.concatenate(targs).astype('int32')
ids = np.concatenate(ids).astype('int32')
ret = {
'f1': f1_score(targs, preds, average='micro', labels=self.labels),
'precision': precision_score(targs, preds, average='micro', labels=self.labels),
'recall': recall_score(targs, preds, average='micro', labels=self.labels),
'accuracy': accuracy_score(targs, preds),
'loss': total_loss / float(total),
}
if return_pred:
ret.update({'ids': ids.tolist(), 'preds': preds.tolist(), 'targs': targs.tolist()})
return ret
示例3: play
def play(self, env, epoch=1, batch_size=1, visualize=None, verbose=1):
print("Free play started!")
frames = np.zeros((0, ) + env.observe_image().shape[1:])
frames = frames.transpose(0, 2, 3, 1)
rewards = 0
progbar = Progbar(epoch)
for e in xrange(epoch):
env.reset()
game_over = False
loss = 0
# get initial observation, start game
obs_t = env.observe()
while not game_over:
obs_tm1 = obs_t
# get next action
action = self.policy(obs_tm1, train=False)
# apply action, get rewareds and new state
obs_t, reward, game_over = env.update(action)
rewards += reward
frame_t = env.observe_image().transpose(0, 2, 3, 1)
frames = np.concatenate([frames, frame_t], axis=0)
if verbose == 1:
progbar.add(1, values=[("loss", loss), ("rewards", rewards)])
if visualize:
print("Making gif!")
frames = np.repeat(frames, 3, axis=-1)
make_gif(frames[:-visualize['n_frames']],
filepath=visualize['filepath'], gray=visualize['gray'])
print("See your gif at {}".format(visualize['filepath']))
示例4: learn
def learn(self, env, epoch=1, batch_size=1, exp_batch_size=0,
gamma=0.9, reset_memory=False, verbose=1, callbacks=None):
"""Train Agent to play Enviroment env
Parameters
----------
env : :obj:`Enviroment`
The enviroment the agent learn to play
epoch : int
number of complete episodes to play
batch_size : int
number of experiences to replay per step
exp_batch_size : int
number of experiences to replay from the consolidated
:attr:`ExperienceReplayexperience.experience`.
gamma : float
discount factor
reset_memory : bool
if we should restart :attr:`ExperienceReplay.memory` before
starting the game.
verbose : int
controls how much should we print
callbacks : list of callables
TODO: Add callback support
"""
print("Learning started!")
print("[Environment]: {}".format(env.description))
print("[Model]: {}".format(self.model.description))
print("[Memory]: {}".format(self.memory.description))
if reset_memory:
self.reset()
progbar = Progbar(epoch)
rewards = 0
for e in xrange(epoch):
# reset enviroment
env.reset()
game_over = False
loss = 0
# get initial observation, start game
obs_t = env.observe()
# Run an episonde
while not game_over:
obs_tm1 = obs_t
action = self.policy(obs_tm1)
# apply action, get rewards and new state
obs_t, reward, game_over = env.update(action)
rewards += reward
# store experience
self.remember(obs_tm1, action, reward, obs_t, game_over)
# adapt model
loss += self.update(batch_size=batch_size,
exp_batch_size=exp_batch_size,
gamma=gamma)
if verbose == 1:
progbar.add(1, values=[("loss", loss), ("rewards", rewards)])
示例5: validate
def validate(dev, gen_test, beam_size, hypo_len, samples, noise_size, glove, cmodel = None, adverse = False,
diverse = False):
vgen = val_generator(dev, gen_test, beam_size, hypo_len, noise_size)
p = Progbar(samples)
batchez = []
while p.seen_so_far < samples:
batch = next(vgen)
preplexity = np.mean(np.power(2, batch[2]))
loss = np.mean(batch[2])
losses = [('hypo_loss',loss),('perplexity', preplexity)]
if cmodel is not None:
ceval = cmodel.evaluate([batch[0], batch[1]], batch[4], verbose = 0)
losses += [('class_loss', ceval[0]), ('class_acc', ceval[1])]
probs = cmodel.predict([batch[0], batch[1]], verbose = 0)
losses += [('class_entropy', np.mean(-np.sum(probs * np.log(probs), axis=1)))]
p.add(len(batch[0]), losses)
batchez.append(batch)
batchez = merge_result_batches(batchez)
res = {}
if adverse:
val_loss = adverse_validation(dev, batchez, glove)
print 'adverse_loss:', val_loss
res['adverse_loss'] = val_loss
if diverse:
div, _, _, _ = diversity(dev, gen_test, beam_size, hypo_len, noise_size, 64, 32)
res['diversity'] = div
print
for val in p.unique_values:
arr = p.sum_values[val]
res[val] = arr[0] / arr[1]
return res
示例6: fit_model
def fit_model(self, X, y):
"""
fits a model to some data
"""
for e in range(self.nb_epoch):
print('Epoch: ', e, ' of ', self.nb_epoch)
progbar = Progbar(target=X.shape[0], verbose=True)
# batch train with realtime data augmentation
total_accuracy = 0
total_loss = 0
current = 0
for X_batch, y_batch in self.datagen.flow(X, y, self.batch_size):
# prepare the batch with random augmentations
X_batch, y_batch = self.batch_warp(X_batch, y_batch)
# train on the batch
loss, accuracy = self.model.train(X_batch, y_batch, accuracy = True)
# update the progress bar
total_loss += loss * self.batch_size
total_accuracy += accuracy * self.batch_size
current += self.batch_size
if current > self.X.shape[0]:
current = self.X.shape[0]
else:
progbar.update(current, [('loss', loss), ('acc.', accuracy)])
progbar.update(current, [('loss', total_loss/current), ('acc.', total_accuracy/current)])
# checkpoints between epochs
self.model.save_weights(self.save_weights_file, overwrite = True)
示例7: make_predictions
def make_predictions(conf,shot_list,loader,custom_path=None):
feature_extractor = FeatureExtractor(loader)
save_prepath = feature_extractor.get_save_prepath()
if custom_path == None:
model_path = conf['paths']['model_save_path'] + model_filename#save_prepath + model_filename
else:
model_path = custom_path
model = joblib.load(model_path)
#shot_list = shot_list.random_sublist(10)
y_prime = []
y_gold = []
disruptive = []
pbar = Progbar(len(shot_list))
fn = partial(predict_single_shot,model=model,feature_extractor=feature_extractor)
pool = mp.Pool()
print('predicting in parallel on {} processes'.format(pool._processes))
#for (y_p,y,disr) in map(fn,shot_list):
for (y_p,y,disr) in pool.imap(fn,shot_list):
#y_p,y,disr = predict_single_shot(model,feature_extractor,shot)
y_prime += [np.expand_dims(y_p,axis=1)]
y_gold += [np.expand_dims(y,axis=1)]
disruptive += [disr]
pbar.add(1.0)
pool.close()
pool.join()
return y_prime,y_gold,disruptive
示例8: adverse_generate2
def adverse_generate2(gen_model, ad_model, cmodel, train, word_index, glove, threshold = 0.95, batch_size = 64, ci = False):
mb = load_data.get_minibatches_idx(len(train), batch_size, shuffle=True)
p = Progbar(len(train))
results = []
for i, train_index in mb:
if len(train_index) != batch_size:
continue
orig_batch = [train[k] for k in train_index]
class_indices = [load_data.LABEL_LIST.index(train[k][2]) for k in train_index]
probs = generation.generation_predict_embed(gen_model, word_index.index, orig_batch,
np.random.random_integers(0, len(train), len(orig_batch)), class_indices = class_indices)
gen_batch = generation.get_classes(probs)
ad_preds = ad_model.predict_on_batch(gen_batch)[0].flatten()
X = []
for i in range(len(orig_batch)):
concat = orig_batch[i][0] + ["--"] + word_index.get_seq(gen_batch[i])
X.append(load_data.load_word_vecs(concat, glove))
X = np.array(X)
X_padded = load_data.pad_sequences(X, dim = len(X[0][0]))
cpreds = cmodel.predict_on_batch(X_padded)[0][np.arange(len(X_padded)), class_indices]
pred_seq = [word_index.print_seq(gen) for gen in gen_batch]
premises = [" ".join(ex[0]) for ex in orig_batch]
classes = np.array(load_data.LABEL_LIST)[class_indices]
zipped = zip(cpreds, ad_preds, premises, pred_seq, classes)
results += [el for el in zipped if el[0] * el[1]> threshold]
p.add(len(train_index),[('added', float(len([el for el in zipped if el[0] * el[1]> threshold])))])
if len(results) > 200:
print (i + 1) * batch_size
return results
return results
示例9: _predict_loop
def _predict_loop(self, f, ins, batch_size=128, verbose=0):
'''
Abstract method to loop over some data in batches.
'''
nb_sample = len(ins[0])
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
ins_batch = slice_X(ins, batch_ids)
batch_outs = f(*ins_batch)
if type(batch_outs) != list:
batch_outs = [batch_outs]
if batch_index == 0:
for batch_out in batch_outs:
shape = (nb_sample,) + batch_out.shape[1:]
outs.append(np.zeros(shape))
for i, batch_out in enumerate(batch_outs):
outs[i][batch_start:batch_end] = batch_out
if verbose == 1:
progbar.update(batch_end)
return outs
示例10: _test_loop
def _test_loop(self, f, ins, batch_size=128, verbose=0):
'''
Abstract method to loop over some data in batches.
'''
nb_sample = len(ins[0])
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
ins_batch = slice_X(ins, batch_ids)
batch_outs = f(*ins_batch)
if type(batch_outs) == list:
if batch_index == 0:
for batch_out in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
outs[i] += batch_out * len(batch_ids)
else:
if batch_index == 0:
outs.append(0.)
outs[0] += batch_outs * len(batch_ids)
if verbose == 1:
progbar.update(batch_end)
for i, out in enumerate(outs):
outs[i] /= nb_sample
return outs
示例11: test_progbar
def test_progbar():
n = 2
input_arr = np.random.random((n, n, n))
bar = Progbar(n)
for i, arr in enumerate(input_arr):
bar.update(i, list(arr))
示例12: TrainIntervalLogger
class TrainIntervalLogger(Callback):
def __init__(self, interval=10000):
self.interval = interval
self.step = 0
self.reset()
def reset(self):
self.interval_start = timeit.default_timer()
self.progbar = Progbar(target=self.interval)
self.metrics = []
self.infos = []
self.info_names = None
self.episode_rewards = []
def on_train_begin(self, logs):
self.train_start = timeit.default_timer()
self.metrics_names = self.model.metrics_names
print('Training for {} steps ...'.format(self.params['nb_steps']))
def on_train_end(self, logs):
duration = timeit.default_timer() - self.train_start
print('done, took {:.3f} seconds'.format(duration))
def on_step_begin(self, step, logs):
if self.step % self.interval == 0:
if len(self.episode_rewards) > 0:
metrics = np.array(self.metrics)
assert metrics.shape == (self.interval, len(self.metrics_names))
formatted_metrics = ''
if not np.isnan(metrics).all(): # not all values are means
means = np.nanmean(self.metrics, axis=0)
assert means.shape == (len(self.metrics_names),)
for name, mean in zip(self.metrics_names, means):
formatted_metrics += ' - {}: {:.3f}'.format(name, mean)
formatted_infos = ''
if len(self.infos) > 0:
infos = np.array(self.infos)
if not np.isnan(infos).all(): # not all values are means
means = np.nanmean(self.infos, axis=0)
assert means.shape == (len(self.info_names),)
for name, mean in zip(self.info_names, means):
formatted_infos += ' - {}: {:.3f}'.format(name, mean)
print('{} episodes - episode_reward: {:.3f} [{:.3f}, {:.3f}]{}{}'.format(len(self.episode_rewards), np.mean(self.episode_rewards), np.min(self.episode_rewards), np.max(self.episode_rewards), formatted_metrics, formatted_infos))
print('')
self.reset()
print('Interval {} ({} steps performed)'.format(self.step // self.interval + 1, self.step))
def on_step_end(self, step, logs):
if self.info_names is None:
self.info_names = logs['info'].keys()
values = [('reward', logs['reward'])]
self.progbar.update((self.step % self.interval) + 1, values=values, force=True)
self.step += 1
self.metrics.append(logs['metrics'])
if len(self.info_names) > 0:
self.infos.append([logs['info'][k] for k in self.info_names])
def on_episode_end(self, episode, logs):
self.episode_rewards.append(logs['episode_reward'])
示例13: preprocess
def preprocess(X):
progbar = Progbar(X.shape[0]) # progress bar for pre-processing status tracking
for i in range(X.shape[0]):
for j in range(X.shape[1]):
X[i, j] = denoise_tv_chambolle(X[i, j], weight=0.1, multichannel=False)
progbar.add(1)
return X
示例14: generation_test
def generation_test(train, glove, model, batch_size = 64, prem_len = 22, hypo_len = 12):
mb = load_data.get_minibatches_idx(len(train), batch_size, shuffle=True)
p = Progbar(len(train))
for i, train_index in mb:
X_prem, X_hypo, _ = load_data.prepare_split_vec_dataset([train[k] for k in train_index], glove)
X_p = load_data.pad_sequences(X_prem, maxlen = prem_len, dim = 50)
X_h = load_data.pad_sequences(X_hypo, maxlen = hypo_len, dim = 50)
train_loss = model.train_on_batch(X_p, X_h)[0]
p.add(len(X_p),[('train_loss', train_loss)])
示例15: rotation_augmentation
def rotation_augmentation(X, angle_range):
progbar = Progbar(X.shape[0])
X_rot = np.copy(X)
for i in range(len(X)):
angle = np.random.randint(-angle_range, angle_range)
for j in range(X.shape[1]):
X_rot[i, j] = ndimage.rotate(X[i, j], angle, reshape=False, order=1)
progbar.add(1)
return X_rot