本文整理匯總了Python中my.tensorflow.average_gradients方法的典型用法代碼示例。如果您正苦於以下問題:Python tensorflow.average_gradients方法的具體用法?Python tensorflow.average_gradients怎麽用?Python tensorflow.average_gradients使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類my.tensorflow
的用法示例。
在下文中一共展示了tensorflow.average_gradients方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: from my import tensorflow [as 別名]
# 或者: from my.tensorflow import average_gradients [as 別名]
def __init__(self, config, models):
model = models[0]
assert isinstance(model, Model)
self.config = config
self.model = model
self.opt = tf.train.AdadeltaOptimizer(config.init_lr)
self.var_list = model.get_var_list()
self.global_step = model.get_global_step()
self.summary = model.summary
self.models = models
losses = []
grads_list = []
for gpu_idx, model in enumerate(models):
with tf.name_scope("grads_{}".format(gpu_idx)), tf.device("/gpu:{}".format(gpu_idx)):
loss = model.get_loss()
grads = self.opt.compute_gradients(loss, var_list=self.var_list)
losses.append(loss)
grads_list.append(grads)
self.loss = tf.add_n(losses)/len(losses)
self.grads = average_gradients(grads_list)
self.train_op = self.opt.apply_gradients(self.grads, global_step=self.global_step)
示例2: __init__
# 需要導入模塊: from my import tensorflow [as 別名]
# 或者: from my.tensorflow import average_gradients [as 別名]
def __init__(self, config, models):
model = models[0]
assert isinstance(model, Model)
self.config = config
self.model = model
self.opt = tf.train.AdamOptimizer(config.init_lr)
self.var_list = model.get_var_list()
self.global_step = model.get_global_step()
self.summary = model.summary
self.models = models
losses = []
grads_list = []
for gpu_idx, model in enumerate(models):
with tf.name_scope("grads_{}".format(gpu_idx)), tf.device("/{}:{}".format(config.device_type, gpu_idx)):
loss = model.get_loss()
grads = self.opt.compute_gradients(loss, var_list=self.var_list)
losses.append(loss)
grads_list.append(grads)
self.loss = tf.add_n(losses)/len(losses)
self.grads = average_gradients(grads_list)
self.train_op = self.opt.apply_gradients(self.grads, global_step=self.global_step)
示例3: __init__
# 需要導入模塊: from my import tensorflow [as 別名]
# 或者: from my.tensorflow import average_gradients [as 別名]
def __init__(self, config, models):
model = models[0]
assert isinstance(model, Model)
self.config = config
self.model = model
self.opt = tf.train.AdadeltaOptimizer(config.init_lr)
self.var_list = model.get_var_list()
self.global_step = model.get_global_step()
self.summary = model.summary
self.models = models
losses = []
grads_list = []
for gpu_idx, model in enumerate(models):
with tf.name_scope("grads_{}".format(gpu_idx)), tf.device("/{}:{}".format(config.device_type, gpu_idx)):
loss = model.get_loss()
grads = self.opt.compute_gradients(loss, var_list=self.var_list)
losses.append(loss)
grads_list.append(grads)
self.loss = tf.add_n(losses)/len(losses)
self.grads = average_gradients(grads_list)
self.train_op = self.opt.apply_gradients(self.grads, global_step=self.global_step)
示例4: __init__
# 需要導入模塊: from my import tensorflow [as 別名]
# 或者: from my.tensorflow import average_gradients [as 別名]
def __init__(self, config, models):
model = models[0]
assert isinstance(model, Model)
self.config = config
self.model = model
self.global_step = model.get_global_step()
if 'adam' == config.optimizer:
self.opt = tf.train.AdamOptimizer(config.init_lr)
elif 'gd' == config.optimizer:
lr = tf.train.exponential_decay(config.init_lr,
self.global_step,
tf.to_int32(config.num_steps/3),
0.1,
staircase=True)
self.opt = tf.train.GradientDescentOptimizer(lr)
else:
raise ValueError('Unsupported optimizer')
self.var_list = model.get_var_list()
self.summary = model.summary
self.models = models
losses = []
grads_list = []
for gpu_idx, _model in enumerate(models):
with tf.name_scope("grads_{}".format(gpu_idx)), tf.device("/{}:{}".format(config.device_type, gpu_idx)):
loss = _model.get_loss()
grads = self.opt.compute_gradients(loss, var_list=self.var_list)
losses.append(loss)
grads_list.append(grads)
self.loss = tf.add_n(losses)/len(losses)
self.grads = average_gradients(grads_list)
if config.freeze_mode:
self.grads = zerout_gradients_for_zero_weights(self.grads, mode=config.freeze_mode)
self.train_op = self.opt.apply_gradients(self.grads, global_step=self.global_step)
if model.get_sparsity_op():
with tf.control_dependencies([self.train_op]):
self.train_op = tf.group(self.train_op, model.get_sparsity_op())