本文整理汇总了Python中tensorflow.contrib.eager.Iterator方法的典型用法代码示例。如果您正苦于以下问题:Python eager.Iterator方法的具体用法?Python eager.Iterator怎么用?Python eager.Iterator使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.eager
的用法示例。
在下文中一共展示了eager.Iterator方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from tensorflow.contrib import eager [as 别名]
# 或者: from tensorflow.contrib.eager import Iterator [as 别名]
def main():
dataset = tf.data.Dataset.from_generator(gen, (tf.int32, tf.int32),
(tf.TensorShape([BATCH_SIZE]),
tf.TensorShape([BATCH_SIZE, 1])))
optimizer = tf.compat.v1.train.GradientDescentOptimizer(LEARNING_RATE)
model = Word2Vec(vocab_size=VOCAB_SIZE, embed_size=EMBED_SIZE)
grad_fn = tfe.implicit_value_and_gradients(model.compute_loss)
total_loss = 0.0
num_train_steps = 0
while num_train_steps < NUM_TRAIN_STEPS:
for center_words, target_words in tfe.Iterator(dataset):
if num_train_steps >= NUM_TRAIN_STEPS:
break
loss_batch, grads = grad_fn(center_words, target_words)
total_loss += loss_batch
optimizer.apply_gradients(grads)
if (num_train_steps + 1) % SKIP_STEP == 0:
print('Average loss at step {}: {:5.1f}'.format(
num_train_steps, total_loss / SKIP_STEP
))
total_loss = 0.0
num_train_steps += 1
示例2: train_one_epoch
# 需要导入模块: from tensorflow.contrib import eager [as 别名]
# 或者: from tensorflow.contrib.eager import Iterator [as 别名]
def train_one_epoch(model, optimizer, dataset, log_interval=None):
"""Trains model on `dataset` using `optimizer`."""
tf.train.get_or_create_global_step()
def model_loss(labels, images):
prediction = model(images, training=True)
loss_value = loss(prediction, labels)
tf.contrib.summary.scalar('loss', loss_value)
tf.contrib.summary.scalar('accuracy',
compute_accuracy(prediction, labels))
return loss_value
for (batch, (images, labels)) in enumerate(tfe.Iterator(dataset)):
with tf.contrib.summary.record_summaries_every_n_global_steps(10):
batch_model_loss = functools.partial(model_loss, labels, images)
optimizer.minimize(
batch_model_loss, global_step=tf.train.get_global_step())
if log_interval and batch % log_interval == 0:
print('Batch #%d\tLoss: %.6f' % (batch, batch_model_loss()))
示例3: _bulid
# 需要导入模块: from tensorflow.contrib import eager [as 别名]
# 或者: from tensorflow.contrib.eager import Iterator [as 别名]
def _bulid(self, dataset, sess=None):
self._dataset = dataset
if self._is_eager:
self._eager_iterator = tfe.Iterator(dataset)
else:
self._iterator = dataset.make_initializable_iterator()
self._batch_op = self._iterator.get_next()
if sess:
self._sess = sess
else:
self._sess = session()
try:
self.reset()
except:
pass
示例4: train
# 需要导入模块: from tensorflow.contrib import eager [as 别名]
# 或者: from tensorflow.contrib.eager import Iterator [as 别名]
def train(model, optimizer, dataset, step_counter, log_interval=None):
"""Trains model on `dataset` using `optimizer`."""
start = time.time()
for (batch, (images, labels)) in enumerate(tfe.Iterator(dataset)):
with tf.contrib.summary.record_summaries_every_n_global_steps(
10, global_step=step_counter):
# Record the operations used to compute the loss given the input,
# so that the gradient of the loss with respect to the variables
# can be computed.
with tf.GradientTape() as tape:
logits = model(images, training=True)
loss_value = loss(logits, labels)
tf.contrib.summary.scalar('loss', loss_value)
tf.contrib.summary.scalar('accuracy', compute_accuracy(logits, labels))
grads = tape.gradient(loss_value, model.variables)
optimizer.apply_gradients(
zip(grads, model.variables), global_step=step_counter)
if log_interval and batch % log_interval == 0:
rate = log_interval / (time.time() - start)
print('Step #%d\tLoss: %.6f (%d steps/sec)' % (batch, loss_value, rate))
start = time.time()
示例5: test
# 需要导入模块: from tensorflow.contrib import eager [as 别名]
# 或者: from tensorflow.contrib.eager import Iterator [as 别名]
def test(model, dataset):
"""Perform an evaluation of `model` on the examples from `dataset`."""
avg_loss = tfe.metrics.Mean('loss')
accuracy = tfe.metrics.Accuracy('accuracy')
for (images, labels) in tfe.Iterator(dataset):
logits = model(images, training=False)
avg_loss(loss(logits, labels))
accuracy(
tf.argmax(logits, axis=1, output_type=tf.int64),
tf.cast(labels, tf.int64))
print('Test set: Average loss: %.4f, Accuracy: %4f%%\n' %
(avg_loss.result(), 100 * accuracy.result()))
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar('loss', avg_loss.result())
tf.contrib.summary.scalar('accuracy', accuracy.result())
示例6: main
# 需要导入模块: from tensorflow.contrib import eager [as 别名]
# 或者: from tensorflow.contrib.eager import Iterator [as 别名]
def main():
dataset = tf.data.Dataset.from_generator(gen, (tf.int32, tf.int32),
(tf.TensorShape([BATCH_SIZE]),
tf.TensorShape([BATCH_SIZE, 1])))
optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE)
model = Word2Vec(vocab_size=VOCAB_SIZE, embed_size=EMBED_SIZE)
grad_fn = tfe.implicit_value_and_gradients(model.compute_loss)
total_loss = 0.0 # for average loss in the last SKIP_STEP steps
num_train_steps = 0
while num_train_steps < NUM_TRAIN_STEPS:
for center_words, target_words in tfe.Iterator(dataset):
if num_train_steps >= NUM_TRAIN_STEPS:
break
loss_batch, grads = grad_fn(center_words, target_words)
total_loss += loss_batch
optimizer.apply_gradients(grads)
if (num_train_steps + 1) % SKIP_STEP == 0:
print('Average loss at step {}: {:5.1f}'.format(
num_train_steps, total_loss / SKIP_STEP))
total_loss = 0.0
num_train_steps += 1
示例7: test
# 需要导入模块: from tensorflow.contrib import eager [as 别名]
# 或者: from tensorflow.contrib.eager import Iterator [as 别名]
def test(model, dataset):
"""Perform an evaluation of `model` on the examples from `dataset`."""
avg_loss = tfe.metrics.Mean('loss')
accuracy = tfe.metrics.Accuracy('accuracy')
for (images, labels) in tfe.Iterator(dataset):
predictions = model(images, training=False)
avg_loss(loss(predictions, labels))
accuracy(tf.argmax(predictions, axis=1, output_type=tf.int64),
tf.argmax(labels, axis=1, output_type=tf.int64))
print('Test set: Average loss: %.4f, Accuracy: %4f%%\n' %
(avg_loss.result(), 100 * accuracy.result()))
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar('loss', avg_loss.result())
tf.contrib.summary.scalar('accuracy', accuracy.result())
示例8: reset
# 需要导入模块: from tensorflow.contrib import eager [as 别名]
# 或者: from tensorflow.contrib.eager import Iterator [as 别名]
def reset(self, feed_dict={}):
if self._is_eager:
self._eager_iterator = tfe.Iterator(self._dataset)
else:
self._sess.run(self._iterator.initializer, feed_dict=feed_dict)
示例9: test
# 需要导入模块: from tensorflow.contrib import eager [as 别名]
# 或者: from tensorflow.contrib.eager import Iterator [as 别名]
def test(self, mode):
"""
Testing procedure
Args:
mode: string, 'validation' or 'test',
choose which set to test
"""
test_examples = self.testset.dataset_size
total_top1_accuracy = 0.
total_topk_accuracy = 0.
for (ex_i, (images, label)) in enumerate(tfe.Iterator(self.testset.dataset)):
top_1_a = self.top_1_accuracy(images, label)
top_k_a = self.top_k_accuracy(images, label)
total_top1_accuracy += top_1_a
total_topk_accuracy += top_k_a
if (ex_i % self.cfg.DISPLAY_STEP) == 0:
print ('Examples done: {:5d}/{} ---- Top-1: {:.4f} -- Top-{}: {:.4f}'.format(ex_i + 1, test_examples, total_top1_accuracy / (ex_i + 1), self.cfg.TOP_K, total_topk_accuracy / (ex_i + 1)))
print ('---- Final accuracy ----')
print ('Top-1: {:.4f} -- Top-{}: {:.4f}'.format(total_top1_accuracy / test_examples, self.cfg.TOP_K, total_topk_accuracy / test_examples))
print ('Top-1 error rate: {:.4f} -- Top-{} error rate: {:.4f}'.format(1 - (total_top1_accuracy / test_examples), self.cfg.TOP_K, 1 - (total_topk_accuracy / test_examples)))
示例10: train
# 需要导入模块: from tensorflow.contrib import eager [as 别名]
# 或者: from tensorflow.contrib.eager import Iterator [as 别名]
def train(self):
"""
Training procedure
"""
start_time = time.time()
step_time = 0.0
with self.writer.as_default():
with tf.contrib.summary.record_summaries_every_n_global_steps(self.cfg.DISPLAY_STEP):
for e in range(self.epoch.numpy(), self.cfg.EPOCHS):
tf.assign(self.epoch, e)
for (batch_i, (images, labels)) in enumerate(tfe.Iterator(self.trainingset.dataset)):
self.global_step = tf.train.get_global_step()
step = self.global_step.numpy() + 1
step_start_time = int(round(time.time() * 1000))
self.optimizer.minimize(lambda: self.loss('train', images, labels), global_step=self.global_step)
step_end_time = int(round(time.time() * 1000))
step_time += step_end_time - step_start_time
if (step % self.cfg.DISPLAY_STEP) == 0:
l = self.loss('train', images, labels)
a = self.accuracy('train', images, labels).numpy()
print ('Epoch: {:03d} Step/Batch: {:09d} Step mean time: {:04d}ms \nLoss: {:.7f} Training accuracy: {:.4f}'.format(e, step, int(step_time / step), l, a))
if (step % self.cfg.VALIDATION_STEP) == 0:
val_images, val_labels = tfe.Iterator(self.valset.dataset).next()
l = self.loss('val', val_images, val_labels)
a = self.accuracy('val', val_images, val_labels).numpy()
int_time = time.time() - start_time
print ('Elapsed time: {} --- Loss: {:.7f} Validation accuracy: {:.4f}'.format(ut.format_time(int_time), l, a))
if (step % self.cfg.SAVE_STEP) == 0:
tfe.Saver(self.all_variables).save(os.path.join(self.cfg.CKPT_PATH, 'net.ckpt'), global_step=self.global_step)
print('Variables saved')
示例11: run_train_epoch
# 需要导入模块: from tensorflow.contrib import eager [as 别名]
# 或者: from tensorflow.contrib.eager import Iterator [as 别名]
def run_train_epoch(self, dataset):
num_correct_total = 0
for (x, y) in tfe.Iterator(dataset):
self.run_train_step(x, y)
num_correct_total += self.num_correct
return num_correct_total
示例12: train
# 需要导入模块: from tensorflow.contrib import eager [as 别名]
# 或者: from tensorflow.contrib.eager import Iterator [as 别名]
def train(loss_fn):
"""Train a regression model evaluated using `loss_fn`."""
print('Training; loss function: ' + loss_fn.__name__)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
# Define the function through which to differentiate.
def loss_for_example(x, y):
return loss_fn(y, prediction(x))
# `grad_fn(x_i, y_i)` returns (1) the value of `loss_for_example`
# evaluated at `x_i`, `y_i` and (2) the gradients of any variables used in
# calculating it.
grad_fn = tfe.implicit_value_and_gradients(loss_for_example)
start = time.time()
for epoch in range(100):
total_loss = 0.0
for x_i, y_i in tfe.Iterator(dataset):
loss, gradients = grad_fn(x_i, y_i)
# Take an optimization step and update variables.
optimizer.apply_gradients(gradients)
total_loss += loss
if epoch % 10 == 0:
print('Epoch {0}: {1}'.format(epoch, total_loss / n_samples))
print('Took: %f seconds' % (time.time() - start))
print('Eager execution exhibits significant overhead per operation. '
'As you increase your batch size, the impact of the overhead will '
'become less noticeable. Eager execution is under active development: '
'expect performance to increase substantially in the near future!')
示例13: train
# 需要导入模块: from tensorflow.contrib import eager [as 别名]
# 或者: from tensorflow.contrib.eager import Iterator [as 别名]
def train(loss_fn):
"""Train a regression model evaluated using `loss_fn`."""
print('Training; loss function: ' + loss_fn.__name__)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
# Define the function through which to differentiate.
#############################
########## TO DO ############
#############################
def loss_for_example(x, y):
pass
# Obtain a gradients function using `tfe.implicit_value_and_gradients`.
#############################
########## TO DO ############
#############################
grad_fn = None
start = time.time()
for epoch in range(100):
total_loss = 0.0
for x_i, y_i in tfe.Iterator(dataset):
# Compute the loss and gradient, and take an optimization step.
#############################
########## TO DO ############
#############################
optimizer.apply_gradients(gradients)
total_loss += loss
if epoch % 10 == 0:
print('Epoch {0}: {1}'.format(epoch, total_loss / n_samples))
print('Took: %f seconds' % (time.time() - start))
print('Eager execution exhibits significant overhead per operation. '
'As you increase your batch size, the impact of the overhead will '
'become less noticeable. Eager execution is under active development: '
'expect performance to increase substantially in the near future!')
示例14: main
# 需要导入模块: from tensorflow.contrib import eager [as 别名]
# 或者: from tensorflow.contrib.eager import Iterator [as 别名]
def main():
dataset = tf.data.Dataset.from_generator(gen, (tf.int32, tf.int32),
(tf.TensorShape([BATCH_SIZE]),
tf.TensorShape([BATCH_SIZE, 1])))
optimizer = tf.train.GradientDescentOptimizer(LEARNING_RATE)
# Create the model
#############################
########## TO DO ############
#############################
model = None
# Create the gradients function, using `tfe.implicit_value_and_gradients`
#############################
########## TO DO ############
#############################
grad_fn = None
total_loss = 0.0 # for average loss in the last SKIP_STEP steps
num_train_steps = 0
while num_train_steps < NUM_TRAIN_STEPS:
for center_words, target_words in tfe.Iterator(dataset):
if num_train_steps >= NUM_TRAIN_STEPS:
break
# Compute the loss and gradients, and take an optimization step.
#############################
########## TO DO ############
#############################
if (num_train_steps + 1) % SKIP_STEP == 0:
print('Average loss at step {}: {:5.1f}'.format(
num_train_steps, total_loss / SKIP_STEP))
total_loss = 0.0
num_train_steps += 1
示例15: _dataset_iterator
# 需要导入模块: from tensorflow.contrib import eager [as 别名]
# 或者: from tensorflow.contrib.eager import Iterator [as 别名]
def _dataset_iterator(self, group_by_samples_per_pixel, source_samples_per_pixel_list):
directory = os.path.join(self.tfrecords_creator.base_tfrecords_directory, self.tfrecords_creator.name)
if group_by_samples_per_pixel:
assert len(source_samples_per_pixel_list) == 1
directory = os.path.join(directory, str(source_samples_per_pixel_list[0]))
files = tf.data.Dataset.list_files(directory + '/*')
threads = multiprocessing.cpu_count()
dataset = tf.data.TFRecordDataset(files, compression_type='GZIP', buffer_size=None, num_parallel_reads=threads)
def _feature_parser(serialized_example):
features = {}
for samples_per_pixel in source_samples_per_pixel_list:
for source_index in range(self.tfrecords_creator.number_of_sources_per_example):
for source_render_pass in self.tfrecords_creator.source_render_passes_usage.render_passes():
indexed_source_feature_name = Naming.source_feature_name(source_render_pass, samples_per_pixel=samples_per_pixel, index=source_index)
features[indexed_source_feature_name] = tf.FixedLenFeature([], tf.string)
for target_render_pass in self.tfrecords_creator.target_render_passes_usage.render_passes():
features[Naming.target_feature_name(target_render_pass)] = tf.FixedLenFeature([], tf.string)
parsed_features = tf.parse_single_example(serialized_example, features)
source_features = {}
for samples_per_pixel in source_samples_per_pixel_list:
for source_index in range(self.tfrecords_creator.number_of_sources_per_example):
for source_render_pass in self.tfrecords_creator.source_render_passes_usage.render_passes():
indexed_source_feature_name = Naming.source_feature_name(source_render_pass, samples_per_pixel=samples_per_pixel, index=source_index)
source_feature = tf.decode_raw(
parsed_features[indexed_source_feature_name], tf.float32)
number_of_channels = RenderPasses.number_of_channels(source_render_pass)
source_feature = tf.reshape(
source_feature, [self.tfrecords_creator.tiles_height_width, self.tfrecords_creator.tiles_height_width, number_of_channels])
source_features[indexed_source_feature_name] = source_feature
target_features = {}
for target_render_pass in self.tfrecords_creator.target_render_passes_usage.render_passes():
target_feature = tf.decode_raw(
parsed_features[Naming.target_feature_name(target_render_pass)], tf.float32)
number_of_channels = RenderPasses.number_of_channels(target_render_pass)
target_feature = tf.reshape(
target_feature, [self.tfrecords_creator.tiles_height_width, self.tfrecords_creator.tiles_height_width, number_of_channels])
target_features[Naming.target_feature_name(target_render_pass)] = target_feature
return source_features, target_features
dataset = dataset.map(map_func=_feature_parser, num_parallel_calls=threads)
iterator = tfe.Iterator(dataset)
return iterator