本文整理汇总了Python中keras.utils.generic_utils.Progbar.update方法的典型用法代码示例。如果您正苦于以下问题:Python Progbar.update方法的具体用法?Python Progbar.update怎么用?Python Progbar.update使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.utils.generic_utils.Progbar
的用法示例。
在下文中一共展示了Progbar.update方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: TrainIntervalLogger
# 需要导入模块: from keras.utils.generic_utils import Progbar [as 别名]
# 或者: from keras.utils.generic_utils.Progbar import update [as 别名]
class TrainIntervalLogger(Callback):
def __init__(self, interval=10000):
self.interval = interval
self.step = 0
self.reset()
def reset(self):
self.interval_start = timeit.default_timer()
self.progbar = Progbar(target=self.interval)
self.metrics = []
self.infos = []
self.info_names = None
self.episode_rewards = []
def on_train_begin(self, logs):
self.train_start = timeit.default_timer()
self.metrics_names = self.model.metrics_names
print('Training for {} steps ...'.format(self.params['nb_steps']))
def on_train_end(self, logs):
duration = timeit.default_timer() - self.train_start
print('done, took {:.3f} seconds'.format(duration))
def on_step_begin(self, step, logs):
if self.step % self.interval == 0:
if len(self.episode_rewards) > 0:
metrics = np.array(self.metrics)
assert metrics.shape == (self.interval, len(self.metrics_names))
formatted_metrics = ''
if not np.isnan(metrics).all(): # not all values are means
means = np.nanmean(self.metrics, axis=0)
assert means.shape == (len(self.metrics_names),)
for name, mean in zip(self.metrics_names, means):
formatted_metrics += ' - {}: {:.3f}'.format(name, mean)
formatted_infos = ''
if len(self.infos) > 0:
infos = np.array(self.infos)
if not np.isnan(infos).all(): # not all values are means
means = np.nanmean(self.infos, axis=0)
assert means.shape == (len(self.info_names),)
for name, mean in zip(self.info_names, means):
formatted_infos += ' - {}: {:.3f}'.format(name, mean)
print('{} episodes - episode_reward: {:.3f} [{:.3f}, {:.3f}]{}{}'.format(len(self.episode_rewards), np.mean(self.episode_rewards), np.min(self.episode_rewards), np.max(self.episode_rewards), formatted_metrics, formatted_infos))
print('')
self.reset()
print('Interval {} ({} steps performed)'.format(self.step // self.interval + 1, self.step))
def on_step_end(self, step, logs):
if self.info_names is None:
self.info_names = logs['info'].keys()
values = [('reward', logs['reward'])]
self.progbar.update((self.step % self.interval) + 1, values=values, force=True)
self.step += 1
self.metrics.append(logs['metrics'])
if len(self.info_names) > 0:
self.infos.append([logs['info'][k] for k in self.info_names])
def on_episode_end(self, episode, logs):
self.episode_rewards.append(logs['episode_reward'])
示例2: _test_loop
# 需要导入模块: from keras.utils.generic_utils import Progbar [as 别名]
# 或者: from keras.utils.generic_utils.Progbar import update [as 别名]
def _test_loop(self, f, ins, batch_size=128, verbose=0):
'''
Abstract method to loop over some data in batches.
'''
nb_sample = len(ins[0])
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
ins_batch = slice_X(ins, batch_ids)
batch_outs = f(*ins_batch)
if type(batch_outs) == list:
if batch_index == 0:
for batch_out in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
outs[i] += batch_out * len(batch_ids)
else:
if batch_index == 0:
outs.append(0.)
outs[0] += batch_outs * len(batch_ids)
if verbose == 1:
progbar.update(batch_end)
for i, out in enumerate(outs):
outs[i] /= nb_sample
return outs
示例3: test_progbar
# 需要导入模块: from keras.utils.generic_utils import Progbar [as 别名]
# 或者: from keras.utils.generic_utils.Progbar import update [as 别名]
def test_progbar():
n = 2
input_arr = np.random.random((n, n, n))
bar = Progbar(n)
for i, arr in enumerate(input_arr):
bar.update(i, list(arr))
示例4: fit_model
# 需要导入模块: from keras.utils.generic_utils import Progbar [as 别名]
# 或者: from keras.utils.generic_utils.Progbar import update [as 别名]
def fit_model(self, X, y):
"""
fits a model to some data
"""
for e in range(self.nb_epoch):
print('Epoch: ', e, ' of ', self.nb_epoch)
progbar = Progbar(target=X.shape[0], verbose=True)
# batch train with realtime data augmentation
total_accuracy = 0
total_loss = 0
current = 0
for X_batch, y_batch in self.datagen.flow(X, y, self.batch_size):
# prepare the batch with random augmentations
X_batch, y_batch = self.batch_warp(X_batch, y_batch)
# train on the batch
loss, accuracy = self.model.train(X_batch, y_batch, accuracy = True)
# update the progress bar
total_loss += loss * self.batch_size
total_accuracy += accuracy * self.batch_size
current += self.batch_size
if current > self.X.shape[0]:
current = self.X.shape[0]
else:
progbar.update(current, [('loss', loss), ('acc.', accuracy)])
progbar.update(current, [('loss', total_loss/current), ('acc.', total_accuracy/current)])
# checkpoints between epochs
self.model.save_weights(self.save_weights_file, overwrite = True)
示例5: _predict_loop
# 需要导入模块: from keras.utils.generic_utils import Progbar [as 别名]
# 或者: from keras.utils.generic_utils.Progbar import update [as 别名]
def _predict_loop(self, f, ins, batch_size=128, verbose=0):
'''
Abstract method to loop over some data in batches.
'''
nb_sample = len(ins[0])
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
ins_batch = slice_X(ins, batch_ids)
batch_outs = f(*ins_batch)
if type(batch_outs) != list:
batch_outs = [batch_outs]
if batch_index == 0:
for batch_out in batch_outs:
shape = (nb_sample,) + batch_out.shape[1:]
outs.append(np.zeros(shape))
for i, batch_out in enumerate(batch_outs):
outs[i][batch_start:batch_end] = batch_out
if verbose == 1:
progbar.update(batch_end)
return outs
示例6: test_progbar
# 需要导入模块: from keras.utils.generic_utils import Progbar [as 别名]
# 或者: from keras.utils.generic_utils.Progbar import update [as 别名]
def test_progbar():
values_s = [None,
[['key1', 1], ['key2', 1e-4]],
[['key3', 1], ['key2', 1e-4]]]
for target in (len(values_s) - 1, None):
for verbose in (0, 1, 2):
bar = Progbar(target, width=30, verbose=verbose, interval=0.05)
for current, values in enumerate(values_s):
bar.update(current, values=values)
示例7: TrainIntervalLogger
# 需要导入模块: from keras.utils.generic_utils import Progbar [as 别名]
# 或者: from keras.utils.generic_utils.Progbar import update [as 别名]
class TrainIntervalLogger(Callback):
def __init__(self, interval=10000):
self.interval = interval
self.step = 0
self.reset()
def reset(self):
""" Reset statistics """
self.interval_start = timeit.default_timer()
self.progbar = Progbar(target=self.interval)
self.metrics = []
self.infos = []
self.info_names = None
self.episode_rewards = []
def on_train_begin(self, logs):
""" Initialize training statistics at beginning of training """
self.train_start = timeit.default_timer()
self.metrics_names = metrics_names()
print('Training for {} steps ...'.format(self.params['nb_steps']))
def on_train_end(self, logs):
""" Print training duration at end of training """
duration = timeit.default_timer() - self.train_start
print('done, took {:.3f} seconds'.format(duration))
def on_step_begin(self, step, logs):
""" Print metrics if interval is over """
if self.step % self.interval == 0:
if len(self.episode_rewards) > 0:
metrics = np.array(self.metrics)
assert metrics.shape == (self.interval, len(self.metrics_names))
formatted_metrics = ''
if not np.isnan(metrics).all(): # not all values are means
means = np.nanmean(self.metrics, axis=0)
assert means.shape == (len(self.metrics_names),)
for name, mean in zip(self.metrics_names, means):
formatted_metrics += ' - {}: {:.3f}'.format(name, mean)
formatted_infos = ''
print('{} episodes - episode_reward: {:.3f} [{:.3f}, {:.3f}]{}{}'.format(len(self.episode_rewards), np.mean(self.episode_rewards), np.min(self.episode_rewards), np.max(self.episode_rewards), formatted_metrics, formatted_infos))
print('')
self.reset()
print('Interval {} ({} steps performed)'.format(self.step // self.interval + 1, self.step))
def on_step_end(self, step, logs):
""" Update progression bar at the end of each step """
values = [('reward', logs['reward'])]
self.progbar.update((self.step % self.interval) + 1, values=values)
self.step += 1
self.metrics.append(logs['metrics'])
def on_episode_end(self, episode, logs):
""" Update reward value at the end of each episode """
self.episode_rewards.append(logs['episode_reward'])
示例8: predict_general_
# 需要导入模块: from keras.utils.generic_utils import Progbar [as 别名]
# 或者: from keras.utils.generic_utils.Progbar import update [as 别名]
def predict_general_(self, model, X, size, load_func):
queue = Queue.Queue()
#generate the progress bar
if self.verbose>0:
progbar = Progbar(size, width=80, verbose=self.verbose)
batch_idx = range(min(size, self.memory_batch_size));
self.matrix_load_into_queue(X, batch_idx, queue, load_func);
X_batch,_,_ = queue.get()
p = []
samples = 0
last_update = time.time()-1000
for _, i in enumerate(xrange(0, size, self.memory_batch_size)):
next_start = i+len(batch_idx)
next_end = min(size, next_start+self.memory_batch_size)
if next_end>next_start:
#spin the thread up
batch_idx_next = range(next_start,next_end);
thread = threading.Thread(target=self.matrix_load_into_queue, args=(X,batch_idx_next,queue,load_func))
thread.start()
else:
batch_idx_next = None
thread = None
#predict the value
if X_batch.shape[0]>0:
p_curr = model.predict(X_batch, batch_size=self.batch_size, verbose=0)
p.append(p_curr)
#increment the counter
samples+= len(batch_idx)
curr_update = time.time()
if self.verbose>0 and (curr_update-last_update>=0.5 or (samples)>=size):
progbar.update(samples, [])
last_update = curr_update
#wait for the next load to happen
if thread is not None:
thread.join()
X_batch,_,_ = queue.get()
#now add the next batch
batch_idx = batch_idx_next
p = np.vstack(p)
return p
示例9: positive
# 需要导入模块: from keras.utils.generic_utils import Progbar [as 别名]
# 或者: from keras.utils.generic_utils.Progbar import update [as 别名]
def positive(self, set_positive=1.):
"""
Create a list of positive data points, expand them to square disk of radius `self.radius` pixels.
:param set_positive: Set positive to max(1., p) where p is given probability
:return: Dictionary(key: filename, value: [x, y, (p, 1-p)])
"""
if self.positives is not None: # If already calculated then return it.
return self.positives, self.positives_count
if self.files is None: # Curate list of files if not done already.
self.files = read_all_files(self.path)
bar = Progbar(len(self.files)) # Create instance of progress bar.
if self.verbose: # Verbose output for debugging.
print TT.info("> Collecting positive samples from dataset...")
bar.update(0)
index = 0 # File index - to update state of progress bar.
count = 0 # Holds total number of positive samples.
expanded = {} # Holds list of files and positive pixels in flattened image with mitosis probability.
normal = {} # Holds list of files and positive pixel (y, x) along with class probabilities.
# (0: Mitotic, 1: Non-Mitotic)
total = 0
for data_image, target_csv in self.files:
labels = csv2np(os.path.join(self.path, target_csv)) # Load CSV annotations into numpy array.
expanded[data_image] = {} # Initialize list for file
normal[data_image] = []
total += len(labels)
for (y, x, p) in labels: # Iterate over annotated pixel values.
x = int(x)
y = int(y)
p = max(set_positive, float(p))
# Image position, horizontal -> y, vertical -> x
# Image size, (y, x)
# @see http://www.scipy-lectures.org/advanced/image_processing/#basic-manipulations
range_x = xrange(max(0, x - self.radius), min(x + self.radius, self.image_size[1]))
range_y = xrange(max(0, y - self.radius), min(y + self.radius, self.image_size[0]))
for i in range_x:
for j in range_y:
expanded[data_image][i * self.image_size[0] + j] = p # TODO: Verify this. `x * width + y`
normal[data_image].append([i, j, p]) # (x, y) => (row, column)
count += 1
index += 1
if self.verbose:
bar.update(index)
self.positives = normal
self.positives_sorted = expanded
self.positives_count = count
TT.success("> Total", count, "positive pixels from", total, "annotations.")
return normal, count
示例10: TrainIntervalLogger
# 需要导入模块: from keras.utils.generic_utils import Progbar [as 别名]
# 或者: from keras.utils.generic_utils.Progbar import update [as 别名]
class TrainIntervalLogger(Callback):
def __init__(self, interval=10000):
self.interval = interval
self.step = 0
self.reset()
def reset(self):
self.interval_start = timeit.default_timer()
self.progbar = Progbar(target=self.interval)
self.metrics = []
def on_train_begin(self, logs):
self.train_start = timeit.default_timer()
self.metrics_names = self.model.metrics_names
print('Training for {} steps ...'.format(self.params['nb_steps']))
def on_train_end(self, logs):
duration = timeit.default_timer() - self.train_start
print('done, took {:.3f} seconds'.format(duration))
def on_step_begin(self, step, logs):
if self.step % self.interval == 0:
self.reset()
print('Interval {} ({} steps performed)'.format(self.step / self.interval + 1, self.step))
def on_step_end(self, step, logs):
# TODO: work around nan's in metrics. This isn't really great yet and probably not 100% accurate
filtered_metrics = []
means = None
for idx, value in enumerate(logs['metrics']):
if not np.isnan(value):
filtered_metrics.append(value)
else:
mean = np.nan
if len(self.metrics) > 0 and not np.isnan(self.metrics).all():
if means is None:
means = np.nanmean(self.metrics, axis=0)
assert means.shape == (len(self.metrics_names),)
mean = means[idx]
filtered_metrics.append(mean)
values = [('reward', logs['reward'])]
if not np.isnan(filtered_metrics).any():
values += list(zip(self.metrics_names, filtered_metrics))
self.progbar.update((self.step % self.interval) + 1, values=values, force=True)
self.step += 1
self.metrics.append(logs['metrics'])
示例11: run
# 需要导入模块: from keras.utils.generic_utils import Progbar [as 别名]
# 或者: from keras.utils.generic_utils.Progbar import update [as 别名]
def run(tag_dist, output_fname, force, nb_samples):
os.makedirs(os.path.dirname(output_fname), exist_ok=True)
if os.path.exists(output_fname) and force:
print("Deleted {}".format(output_fname))
os.remove(output_fname)
else:
assert not os.path.exists(output_fname), \
"File {} already exists. Use --force to override it"
basename, _ = os.path.splitext(output_fname)
anit_name = basename + "_anti_{}.png"
hist_name = basename + "_hist_{}.png"
plot_anitaliasing(tag_dist, anit_name, 1)
plot_anitaliasing(tag_dist, anit_name, 2)
plot_anitaliasing(tag_dist, anit_name, 4)
plot_anitaliasing(tag_dist, anit_name, 8)
labels, masks, _ = next(generator(tag_dist, 10000, antialiasing=2))
for key in labels.dtype.names:
m = labels[key].mean()
s = labels[key].std()
print("{}: {:.3f}, {:.3f}".format(key, m, s))
assert abs(m) <= 0.03
for label_name in sorted(set(labels.dtype.names) - set(['bits'])):
x = labels[label_name]
plt.hist(x.flatten(), bins=40, normed=True)
plt.savefig(hist_name.format(label_name))
plt.clf()
dset = DistributionHDF5Dataset(output_fname, distribution=tag_dist,
nb_samples=nb_samples, mode='w')
progbar = Progbar(nb_samples)
batch_size = min(25000, nb_samples)
for labels, tags, depth_map in generator(tag_dist, batch_size, antialiasing=4):
pos = dset.append(labels=labels, tag3d=tags, depth_map=depth_map)
progbar.update(pos)
if pos == nb_samples:
break
print("Saved tag 3d dataset to: {}".format(output_fname))
dist_fname = basename + "_distribution.json"
with open(dist_fname, "w+") as dist_f:
dist_f.write(tag_dist.to_json())
print("Saved distribution to: {}".format(dist_fname))
示例12: sample
# 需要导入模块: from keras.utils.generic_utils import Progbar [as 别名]
# 或者: from keras.utils.generic_utils.Progbar import update [as 别名]
def sample(self, batch_size=100):
if self.sampled is not None and self.batch == batch_size:
return self.sampled, batch_size
self.batch = batch_size
self.positive()
if self.verbose:
TT.info("> Creating a random dataset...")
if self.files is None:
self.files = read_all_files(self.path)
TT.info("> Sampling from", len(self), "pixels.")
indices = xrange(len(self))
sampled = {}
bar = Progbar(self.batch)
count = 0
positives = 0
if self.verbose:
bar.update(count)
for index in random.sample(indices, self.batch):
file_id = index / self.pixels_per_image
image, csv = self.files[file_id]
if image not in sampled:
sampled[image] = []
pixel = index % self.pixels_per_image
if image in self.positives_sorted and pixel in self.positives_sorted[image]:
p = 1.
positives += 1
else:
p = 0.
(x, y) = self.pixel_to_xy(pixel)
sampled[image].append([x, y, p])
count += 1
if self.verbose:
bar.update(count)
self.sampled = sampled
if positives > 0:
TT.warn("> Out of", batch_size, "sampled pixels,", positives, "pixels are positive.")
return sampled, count
示例13: fit
# 需要导入模块: from keras.utils.generic_utils import Progbar [as 别名]
# 或者: from keras.utils.generic_utils.Progbar import update [as 别名]
def fit(self, X, y, M, batch_size=128, nb_epoch=100, verbose=1,
validation_split=0., lr=None, shuffle=True):
y = standardize_y(y)
# If a validation split size is given (e.g. validation_split=0.2)
# then split X into smaller X and X_val,
# and split y into smaller y and y_val.
do_validation = False
if validation_split > 0 and validation_split < 1:
do_validation = True
split_at = int(len(X) * (1 - validation_split))
(X, X_val) = (X[0:split_at], X[split_at:])
(y, y_val) = (y[0:split_at], y[split_at:])
(M, M_val) = (M[0:split_at], M[split_at:])
if verbose:
print "Train on %d samples, validate on %d samples" % (len(y), len(y_val))
index_array = numpy.arange(len(X))
for epoch in range(nb_epoch):
if verbose:
print 'Epoch', epoch
if shuffle:
numpy.random.shuffle(index_array)
nb_batch = int(numpy.ceil(len(X)/float(batch_size)))
progbar = Progbar(target=len(X))
for batch_index in range(0, nb_batch):
batch_start = batch_index*batch_size
batch_end = min(len(X), (batch_index+1)*batch_size)
batch_ids = index_array[batch_start:batch_end]
X_batch = X[batch_ids]
y_batch = y[batch_ids]
M_batch = M[batch_ids]
loss = self._train(X_batch, y_batch, M_batch, lr)
if verbose:
is_last_batch = (batch_index == nb_batch - 1)
if not is_last_batch or not do_validation:
progbar.update(batch_end, [('loss', loss)])
else:
progbar.update(batch_end, [('loss', loss), ('val. loss', self.test(X_val, y_val, M_val))])
示例14: float
# 需要导入模块: from keras.utils.generic_utils import Progbar [as 别名]
# 或者: from keras.utils.generic_utils.Progbar import update [as 别名]
# Train ConvNet
from keras.utils.generic_utils import Progbar
batch_losses = np.zeros(epoch_size / batch_size)
loss_history = []
mean_loss = float("inf")
for epoch_id in xrange(n_epochs):
dataflow = datagen.flow(batch_size=batch_size,
epoch_size=epoch_size)
print "\nEpoch ", 1 + epoch_id
progbar = Progbar(epoch_size)
batch_id = 0
for (X_batch, Y_batch) in dataflow:
loss = di.learning.train_on_batch(graph, X_batch,
Y_batch, Q, js, offsets)
batch_losses[batch_id] = loss[0]
progbar.update(batch_id * batch_size)
batch_id += 1
if np.mean(batch_losses) < mean_loss:
mean_loss = np.mean(batch_losses)
std_loss = np.std(batch_losses)
else:
break
print "\nTraining loss = ", mean_loss, " +/- ", std_loss
# Measure test accuracies
class_probs = di.learning.predict(graph, X_test, Q, js, offsets)
y_predicted = np.argmax(class_probs, axis=1)
chunk_accuracies = di.singlelabel.chunk_accuracies(y_predicted, y_test)
file_accuracies = di.singlelabel.file_accuracies(test_paths,
class_probs, y_test, method="geometric_mean")
mean_file_accuracy = np.mean(file_accuracies)
示例15: enumerate
# 需要导入模块: from keras.utils.generic_utils import Progbar [as 别名]
# 或者: from keras.utils.generic_utils.Progbar import update [as 别名]
losses = []
batch_loss = []
for i, seq in enumerate(tokenizer.texts_to_sequences_generator(text_generator())):
# get skipgram couples for one text in the dataset
couples, labels = skipgrams_l2c_fast(seq, vocab_size, num_senses =num_senses, window_size=4, negative_samples=1., sampling_table=sampling_table)
if couples:
# one gradient update per sentence (one sentence = a few 1000s of word couples)
# print couples
X = np.array(couples, dtype="int32")
labels= np.array(labels, dtype="int32")
loss = model.train_on_batch(X, labels)
losses.append(loss)
batch_loss.append(loss)
if len(losses) % 10 == 0:
print ('\nBatch Loss: '+str(np.mean(batch_loss)))
progbar.update(i, values=[("loss", np.mean(losses))])
batch_loss = []
samples_seen += len(labels)
if (i and i % 10000 == 0):
global_weights, sense_weights = model.layers[0].get_weights()[0]
avgSim, avgSimC = evaluator.get_scores(global_weights, sense_weights)
print("scores after %d epochs:")
print("\t avg-sim: %5.3f", avgSim)
print("\t global-sim: %5.3f", avgSimC)
print('Samples seen:', samples_seen)