当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.average_gradients方法代码示例

本文整理汇总了Python中my.tensorflow.average_gradients方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.average_gradients方法的具体用法?Python tensorflow.average_gradients怎么用?Python tensorflow.average_gradients使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在my.tensorflow的用法示例。


在下文中一共展示了tensorflow.average_gradients方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from my import tensorflow [as 别名]
# 或者: from my.tensorflow import average_gradients [as 别名]
def __init__(self, config, models):
        model = models[0]
        assert isinstance(model, Model)
        self.config = config
        self.model = model
        self.opt = tf.train.AdadeltaOptimizer(config.init_lr)
        self.var_list = model.get_var_list()
        self.global_step = model.get_global_step()
        self.summary = model.summary
        self.models = models
        losses = []
        grads_list = []
        for gpu_idx, model in enumerate(models):
            with tf.name_scope("grads_{}".format(gpu_idx)), tf.device("/gpu:{}".format(gpu_idx)):
                loss = model.get_loss()
                grads = self.opt.compute_gradients(loss, var_list=self.var_list)
                losses.append(loss)
                grads_list.append(grads)

        self.loss = tf.add_n(losses)/len(losses)
        self.grads = average_gradients(grads_list)
        self.train_op = self.opt.apply_gradients(self.grads, global_step=self.global_step) 
开发者ID:IsaacChanghau,项目名称:AmusingPythonCodes,代码行数:24,代码来源:trainer.py

示例2: __init__

# 需要导入模块: from my import tensorflow [as 别名]
# 或者: from my.tensorflow import average_gradients [as 别名]
def __init__(self, config, models):
        model = models[0]
        assert isinstance(model, Model)
        self.config = config
        self.model = model
        self.opt = tf.train.AdamOptimizer(config.init_lr)
        self.var_list = model.get_var_list()
        self.global_step = model.get_global_step()
        self.summary = model.summary
        self.models = models
        losses = []
        grads_list = []
        for gpu_idx, model in enumerate(models):
            with tf.name_scope("grads_{}".format(gpu_idx)), tf.device("/{}:{}".format(config.device_type, gpu_idx)):
                loss = model.get_loss()
                grads = self.opt.compute_gradients(loss, var_list=self.var_list)
                losses.append(loss)
                grads_list.append(grads)

        self.loss = tf.add_n(losses)/len(losses)
        self.grads = average_gradients(grads_list)
        self.train_op = self.opt.apply_gradients(self.grads, global_step=self.global_step) 
开发者ID:IsaacChanghau,项目名称:AmusingPythonCodes,代码行数:24,代码来源:trainer.py

示例3: __init__

# 需要导入模块: from my import tensorflow [as 别名]
# 或者: from my.tensorflow import average_gradients [as 别名]
def __init__(self, config, models):
        model = models[0]
        assert isinstance(model, Model)
        self.config = config
        self.model = model
        self.opt = tf.train.AdadeltaOptimizer(config.init_lr)
        self.var_list = model.get_var_list()
        self.global_step = model.get_global_step()
        self.summary = model.summary
        self.models = models
        losses = []
        grads_list = []
        for gpu_idx, model in enumerate(models):
            with tf.name_scope("grads_{}".format(gpu_idx)), tf.device("/{}:{}".format(config.device_type, gpu_idx)):
                loss = model.get_loss()
                grads = self.opt.compute_gradients(loss, var_list=self.var_list)
                losses.append(loss)
                grads_list.append(grads)

        self.loss = tf.add_n(losses)/len(losses)
        self.grads = average_gradients(grads_list)
        self.train_op = self.opt.apply_gradients(self.grads, global_step=self.global_step) 
开发者ID:sld,项目名称:convai-bot-1337,代码行数:24,代码来源:trainer.py

示例4: __init__

# 需要导入模块: from my import tensorflow [as 别名]
# 或者: from my.tensorflow import average_gradients [as 别名]
def __init__(self, config, models):
        model = models[0]
        assert isinstance(model, Model)
        self.config = config
        self.model = model
        self.global_step = model.get_global_step()
        if 'adam' == config.optimizer:
            self.opt = tf.train.AdamOptimizer(config.init_lr)
        elif 'gd' == config.optimizer:
            lr = tf.train.exponential_decay(config.init_lr,
                                            self.global_step,
                                            tf.to_int32(config.num_steps/3),
                                            0.1,
                                            staircase=True)
            self.opt = tf.train.GradientDescentOptimizer(lr)
        else:
            raise ValueError('Unsupported optimizer')
        self.var_list = model.get_var_list()
        self.summary = model.summary
        self.models = models
        losses = []
        grads_list = []
        for gpu_idx, _model in enumerate(models):
            with tf.name_scope("grads_{}".format(gpu_idx)), tf.device("/{}:{}".format(config.device_type, gpu_idx)):
                loss = _model.get_loss()
                grads = self.opt.compute_gradients(loss, var_list=self.var_list)
                losses.append(loss)
                grads_list.append(grads)

        self.loss = tf.add_n(losses)/len(losses)
        self.grads = average_gradients(grads_list)
        if config.freeze_mode:
            self.grads = zerout_gradients_for_zero_weights(self.grads, mode=config.freeze_mode)
        self.train_op = self.opt.apply_gradients(self.grads, global_step=self.global_step)
        if model.get_sparsity_op():
            with tf.control_dependencies([self.train_op]):
                self.train_op = tf.group(self.train_op, model.get_sparsity_op()) 
开发者ID:wenwei202,项目名称:iss-rnns,代码行数:39,代码来源:trainer.py


注:本文中的my.tensorflow.average_gradients方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。