本文整理汇总了Python中my.tensorflow.average_gradients方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.average_gradients方法的具体用法?Python tensorflow.average_gradients怎么用?Python tensorflow.average_gradients使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类my.tensorflow
的用法示例。
在下文中一共展示了tensorflow.average_gradients方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from my import tensorflow [as 别名]
# 或者: from my.tensorflow import average_gradients [as 别名]
def __init__(self, config, models):
model = models[0]
assert isinstance(model, Model)
self.config = config
self.model = model
self.opt = tf.train.AdadeltaOptimizer(config.init_lr)
self.var_list = model.get_var_list()
self.global_step = model.get_global_step()
self.summary = model.summary
self.models = models
losses = []
grads_list = []
for gpu_idx, model in enumerate(models):
with tf.name_scope("grads_{}".format(gpu_idx)), tf.device("/gpu:{}".format(gpu_idx)):
loss = model.get_loss()
grads = self.opt.compute_gradients(loss, var_list=self.var_list)
losses.append(loss)
grads_list.append(grads)
self.loss = tf.add_n(losses)/len(losses)
self.grads = average_gradients(grads_list)
self.train_op = self.opt.apply_gradients(self.grads, global_step=self.global_step)
示例2: __init__
# 需要导入模块: from my import tensorflow [as 别名]
# 或者: from my.tensorflow import average_gradients [as 别名]
def __init__(self, config, models):
model = models[0]
assert isinstance(model, Model)
self.config = config
self.model = model
self.opt = tf.train.AdamOptimizer(config.init_lr)
self.var_list = model.get_var_list()
self.global_step = model.get_global_step()
self.summary = model.summary
self.models = models
losses = []
grads_list = []
for gpu_idx, model in enumerate(models):
with tf.name_scope("grads_{}".format(gpu_idx)), tf.device("/{}:{}".format(config.device_type, gpu_idx)):
loss = model.get_loss()
grads = self.opt.compute_gradients(loss, var_list=self.var_list)
losses.append(loss)
grads_list.append(grads)
self.loss = tf.add_n(losses)/len(losses)
self.grads = average_gradients(grads_list)
self.train_op = self.opt.apply_gradients(self.grads, global_step=self.global_step)
示例3: __init__
# 需要导入模块: from my import tensorflow [as 别名]
# 或者: from my.tensorflow import average_gradients [as 别名]
def __init__(self, config, models):
model = models[0]
assert isinstance(model, Model)
self.config = config
self.model = model
self.opt = tf.train.AdadeltaOptimizer(config.init_lr)
self.var_list = model.get_var_list()
self.global_step = model.get_global_step()
self.summary = model.summary
self.models = models
losses = []
grads_list = []
for gpu_idx, model in enumerate(models):
with tf.name_scope("grads_{}".format(gpu_idx)), tf.device("/{}:{}".format(config.device_type, gpu_idx)):
loss = model.get_loss()
grads = self.opt.compute_gradients(loss, var_list=self.var_list)
losses.append(loss)
grads_list.append(grads)
self.loss = tf.add_n(losses)/len(losses)
self.grads = average_gradients(grads_list)
self.train_op = self.opt.apply_gradients(self.grads, global_step=self.global_step)
示例4: __init__
# 需要导入模块: from my import tensorflow [as 别名]
# 或者: from my.tensorflow import average_gradients [as 别名]
def __init__(self, config, models):
model = models[0]
assert isinstance(model, Model)
self.config = config
self.model = model
self.global_step = model.get_global_step()
if 'adam' == config.optimizer:
self.opt = tf.train.AdamOptimizer(config.init_lr)
elif 'gd' == config.optimizer:
lr = tf.train.exponential_decay(config.init_lr,
self.global_step,
tf.to_int32(config.num_steps/3),
0.1,
staircase=True)
self.opt = tf.train.GradientDescentOptimizer(lr)
else:
raise ValueError('Unsupported optimizer')
self.var_list = model.get_var_list()
self.summary = model.summary
self.models = models
losses = []
grads_list = []
for gpu_idx, _model in enumerate(models):
with tf.name_scope("grads_{}".format(gpu_idx)), tf.device("/{}:{}".format(config.device_type, gpu_idx)):
loss = _model.get_loss()
grads = self.opt.compute_gradients(loss, var_list=self.var_list)
losses.append(loss)
grads_list.append(grads)
self.loss = tf.add_n(losses)/len(losses)
self.grads = average_gradients(grads_list)
if config.freeze_mode:
self.grads = zerout_gradients_for_zero_weights(self.grads, mode=config.freeze_mode)
self.train_op = self.opt.apply_gradients(self.grads, global_step=self.global_step)
if model.get_sparsity_op():
with tf.control_dependencies([self.train_op]):
self.train_op = tf.group(self.train_op, model.get_sparsity_op())