当前位置: 首页>>代码示例>>Python>>正文


Python ops.init_scope方法代码示例

本文整理汇总了Python中tensorflow.python.framework.ops.init_scope方法的典型用法代码示例。如果您正苦于以下问题:Python ops.init_scope方法的具体用法?Python ops.init_scope怎么用?Python ops.init_scope使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.framework.ops的用法示例。


在下文中一共展示了ops.init_scope方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: apply_gradients

# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import init_scope [as 别名]
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
    all_vars = [ v for _,v in grads_and_vars]
    d_vars = []
    g_vars = []
    all_grads = [ g for g, _ in grads_and_vars ]
    for grad,var in grads_and_vars:
        if var in self.gan.d_vars():
            d_vars += [var]
        elif var in self.gan.g_vars():
            g_vars += [var]
        else:
            raise("Couldn't find var in g_vars or d_vars")

    with ops.init_scope():
        self.optimizer._create_slots([v for g,v in grads_and_vars])
    self._prepare()

    d_grads = all_grads[:len(d_vars)]
    g_grads = all_grads[len(d_vars):]
    return self.finite_differences(grads_and_vars, global_step, name, d_vars, g_vars, d_grads, g_grads) 
开发者ID:HyperGAN,项目名称:HyperGAN,代码行数:22,代码来源:local_nash_optimizer.py

示例2: apply_gradients

# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import init_scope [as 别名]
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
    d_vars = []
    g_vars = []
    d_grads = []
    g_grads = []
    for grad,var in grads_and_vars:
        if var in self.gan.d_vars():
            d_vars += [var]
            d_grads += [grad]
        elif var in self.gan.g_vars():
            g_vars += [var]
            g_grads += [grad]
        else:
            raise ValidationException("Couldn't find var in g_vars or d_vars " + var.name)
    grad_list = d_grads + g_grads
    var_list = d_vars + g_vars

    with ops.init_scope():
            nms = [self._get_or_make_slot(v, tf.zeros_like(v), "nm", self._name) for v in var_list]
    self._prepare()

    nms = [self.get_slot(v, "nm") for v in var_list]
    momentum = []
    for grad, nm, w in zip(grad_list, nms, var_list):
        momentum += [-self._decay * nm]

    newgrads = [g + m for g, m in zip(grad_list, momentum)]

    new_grads_and_vars = list(zip(newgrads, var_list)).copy()

    op2 = self.optimizer.apply_gradients(new_grads_and_vars, global_step=global_step, name=name)
    with tf.get_default_graph().control_dependencies([op2]):
        save = tf.group(*[tf.assign(nm, ((self.config.alpha or 0.666) *grad+ (1-self.config.beta or 0.5)*nm)) for nm, grad in zip(nms, grad_list)])
        with tf.get_default_graph().control_dependencies([save]):
            return tf.no_op() 
开发者ID:HyperGAN,项目名称:HyperGAN,代码行数:37,代码来源:negative_momentum_optimizer.py

示例3: apply_gradients

# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import init_scope [as 别名]
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
    d_vars = []
    g_vars = []
    for grad,var in grads_and_vars:
        if var in self.gan.d_vars():
            d_vars += [var]
        elif var in self.gan.g_vars():
            g_vars += [var]
        else:
            raise("Couldn't find var in g_vars or d_vars")

    if self.config.apply_on == "discriminator":
        ema_vars = d_vars
    else:
        ema_vars = d_vars + g_vars
    with ops.init_scope():
        [self._get_or_make_slot(v, v, "ema", self._name) for v in ema_vars]
        self.optimizer._create_slots([v for g,v in grads_and_vars])
        for name in self.optimizer.get_slot_names():
            for var in self.optimizer.variables():
                self._zeros_slot(var, "ema", self.name)

    self._prepare()
    ema_slots = [self.get_slot(v, "ema") for v in ema_vars]
    for name in self.optimizer.get_slot_names():
        for var in self.optimizer.variables():
            ema_vars += [var]
            ema_slots += [self._zeros_slot(var, "ema", self.name)]

    def calculate_ema(_v1,_v2):
        return self._decay *_v1 + (1-self._decay)*_v2
    op1 = tf.group(*[tf.assign(w, v) for w,v in zip(ema_slots, ema_vars)]) # store variables
    with tf.get_default_graph().control_dependencies([op1]):
        op2 = self.optimizer.apply_gradients(grads_and_vars, global_step=global_step, name=name)
        with tf.get_default_graph().control_dependencies([op2]):
            calculated_ema = [calculate_ema(v1, v2) for v1,v2 in zip(ema_slots, ema_vars)] # store variables
            op3 = tf.group(*[tf.assign(w, v) for w,v in zip(ema_vars, calculated_ema)])
            with tf.get_default_graph().control_dependencies([op3]):
                return tf.no_op() 
开发者ID:HyperGAN,项目名称:HyperGAN,代码行数:41,代码来源:ema_optimizer.py

示例4: _get_beta_accumulators

# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import init_scope [as 别名]
def _get_beta_accumulators(self):
		with ops.init_scope():
			# if context.executing_eagerly():
			# 	graph = None
			# else:
			graph = ops.get_default_graph()
			return (self._get_non_slot_variable("step", graph=graph),
					self._get_non_slot_variable("beta1_power", graph=graph),
					self._get_non_slot_variable("beta2_power", graph=graph)) 
开发者ID:yyht,项目名称:BERT,代码行数:11,代码来源:lamb_utils.py

示例5: _get_beta_accumulators

# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import init_scope [as 别名]
def _get_beta_accumulators(self):
        with ops.init_scope():
            # if context.executing_eagerly():
            #     graph = None
            # else:
            graph = ops.get_default_graph()
            return (self._get_non_slot_variable("step", graph=graph),
                    self._get_non_slot_variable("beta1_power", graph=graph),
                    self._get_non_slot_variable("beta2_power", graph=graph)) 
开发者ID:yyht,项目名称:BERT,代码行数:11,代码来源:radam_utils.py

示例6: _get_custom_getter

# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import init_scope [as 别名]
def _get_custom_getter():
  """Returns a custom getter that this class's methods must be called under.

  All methods of this class must be called under a variable scope that was
  passed this custom getter. Example:

  ```python
  network = ConvNetBuilder(...)
  with tf.variable_scope('cg', custom_getter=network.get_custom_getter()):
    network.conv(...)
    # Call more methods of network here
  ```

  Currently, this custom getter only does anything if self.use_tf_layers is
  True. In that case, it causes variables to be stored as dtype
  self.variable_type, then casted to the requested dtype, instead of directly
  storing the variable as the requested dtype.
  """

  def inner_custom_getter(getter, *args, **kwargs):
    """Custom getter that forces variables to have type self.variable_type."""
    cast_to_float16 = False
    requested_dtype = kwargs["dtype"]
    if requested_dtype == tf.float16:
      # Only change the variable dtype if doing so does not decrease variable
      # precision.
      kwargs["dtype"] = tf.float32
      cast_to_float16 = True
    var = getter(*args, **kwargs)
    with tf_ops.init_scope():
      # This if statement is needed to guard the cast, because batch norm
      # assigns directly to the return value of this custom getter. The cast
      # makes the return value not a variable so it cannot be assigned. Batch
      # norm variables are always in fp32 so this if statement is never
      # triggered for them.
      if cast_to_float16:
        var = tf.cast(var, tf.float16)
    return var

  return inner_custom_getter 
开发者ID:mlperf,项目名称:training_results_v0.5,代码行数:42,代码来源:estimator.py

示例7: _get_beta_weights

# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import init_scope [as 别名]
def _get_beta_weights(self):
        with ops.init_scope():
            if context.executing_eagerly():
                graph = None
            else:
                graph = ops.get_default_graph()
        return (
            self._get_non_slot_variable("beta1_weight", graph=graph),
            self._get_non_slot_variable("beta2_weight", graph=graph),
        ) 
开发者ID:facebookresearch,项目名称:qhoptim,代码行数:12,代码来源:qhadam.py

示例8: __init__

# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import init_scope [as 别名]
def __init__(self, name):
    super(_SpanMetricsBase, self).__init__(name=name)
    with ops.init_scope():
      self.true_positive = self.add_weight(
          "true_positive",
          initializer=init_ops.zeros_initializer,
          dtype=dtypes.float32)
      self.false_positive = self.add_weight(
          "false_positive",
          initializer=init_ops.zeros_initializer,
          dtype=dtypes.float32)
      self.false_negative = self.add_weight(
          "false_negative",
          initializer=init_ops.zeros_initializer,
          dtype=dtypes.float32) 
开发者ID:tensorflow,项目名称:text,代码行数:17,代码来源:span_metrics.py

示例9: _get_beta_accumulators

# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import init_scope [as 别名]
def _get_beta_accumulators(self):
        with ops.init_scope():
            if context.executing_eagerly():
                graph = None
            else:
                graph = ops.get_default_graph()
            return (self._get_non_slot_variable("step", graph=graph),
                    self._get_non_slot_variable("beta1_power", graph=graph),
                    self._get_non_slot_variable("beta2_power", graph=graph)) 
开发者ID:kerlomz,项目名称:captcha_trainer,代码行数:11,代码来源:RAdam.py

示例10: _get_la_step_accumulators

# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import init_scope [as 别名]
def _get_la_step_accumulators(self):
        with ops.init_scope():
            if context.executing_eagerly():
                graph = None
            else:
                graph = ops.get_default_graph()
            return self._get_non_slot_variable("la_step", graph=graph) 
开发者ID:michaelrzhang,项目名称:lookahead,代码行数:9,代码来源:lookahead_tensorflow.py

示例11: _get_beta_accumulators

# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import init_scope [as 别名]
def _get_beta_accumulators(self):
    with ops.init_scope():
      if context.executing_eagerly():
        graph = None
      else:
        graph = ops.get_default_graph()
      return (self._get_non_slot_variable("beta1_power", graph=graph),
              self._get_non_slot_variable("beta2_power", graph=graph)) 
开发者ID:mlperf,项目名称:training,代码行数:10,代码来源:lamb_optimizer_v1.py

示例12: apply_gradients

# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import init_scope [as 别名]
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
    var_list = [ v for _,v in grads_and_vars]
    with ops.init_scope():
        zt = [self._get_or_make_slot(v, v, "zt", self._name) for _,v in grads_and_vars]
        slots_list = []
        for name in self.optimizer.get_slot_names():
            for var in self.optimizer.variables():
                self._get_or_make_slot(var, var, "zt", "zt")
    self._prepare()

    def _name(post, s):
        ss = s.split(":")
        return ss[0] + "_" + post + "_dontsave"
    zt = [self.get_slot(v, "zt") for _,v in grads_and_vars]
    xt = [tf.Variable(v, name=_name("gigaxt",v.name)) for _,v in grads_and_vars]
    tmp = [tf.Variable(v, name=_name("gigatmp",v.name)) for _,v in grads_and_vars]
    xslots_list = []
    zslots_list = []
    tmpslots_list = []
    slots_vars = []
    for name in self.optimizer.get_slot_names():
        for var in self.optimizer.variables():
            slots_vars += [var]
            xslots_list.append(tf.Variable(var))
            zslots_list.append(self._get_or_make_slot(var, var, "zt", "zt"))
            tmpslots_list.append(tf.Variable(var, name=_name("gigaslottmp", var.name)))


    restored_vars = var_list + slots_vars
    zt_vars = zt + zslots_list
    xt_vars = xt + xslots_list
    tmp_vars = tmp + tmpslots_list
    all_grads = [ g for g, _ in grads_and_vars ]
    # store variables for resetting

    op1 = tf.group(*[tf.assign(w, v) for w,v in zip(tmp_vars, restored_vars)]) # store tmp_vars

    with tf.get_default_graph().control_dependencies([op1]):
        op2 = self.optimizer.apply_gradients(grads_and_vars.copy(), global_step=global_step, name=name)
        with tf.get_default_graph().control_dependencies([op2]):
            op3 = tf.group(*[tf.assign(w, v) for w,v in zip(xt_vars, restored_vars)]) # store xt^+1 in xt_vars
            with tf.get_default_graph().control_dependencies([op3]):
                op4 = tf.group(*[tf.assign(w, v) for w,v in zip(restored_vars, zt_vars)]) # restore vars to zt (different weights)
                with tf.get_default_graph().control_dependencies([op4]):
                    op5 = self.optimizer2.apply_gradients(grads_and_vars.copy(), global_step=global_step, name=name) # zt+1
                    with tf.get_default_graph().control_dependencies([op5]):
                        zt1_xt1 = [_restored_vars - _xt1_vars for _restored_vars, _xt1_vars in zip(restored_vars, xt_vars)]
                        St1 = [tf.minimum(1.0, tf.norm(_zt1_vars-_zt_vars) / tf.norm(_zt1_xt1)) for _zt1_vars, _zt_vars, _zt1_xt1 in zip(restored_vars, zt_vars, zt1_xt1)]
                        self.gan.add_metric('st1',tf.reduce_mean(tf.add_n(St1)/len(St1)))
                        #self.gan.add_metric('xzt1',tf.norm(xt_vars[0]-zt_vars[0]))
                        nextw = [_xt_t1 + _St1 * _zt1_xt1 for _xt_t1, _St1, _zt1_xt1 in zip(xt_vars, St1, zt1_xt1)]
                        op6 = tf.group(*[tf.assign(w, v) for w,v in zip(zt_vars, restored_vars)]) # set zt+1
                        with tf.get_default_graph().control_dependencies([op6]):
                            op7 = tf.group(*[tf.assign(w, v) for w,v in zip(restored_vars, nextw)]) # set xt+1
                            with tf.get_default_graph().control_dependencies([op7]):
                                return tf.no_op() 
开发者ID:HyperGAN,项目名称:HyperGAN,代码行数:58,代码来源:giga_wolf_optimizer.py

示例13: apply_gradients

# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import init_scope [as 别名]
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
    var_list = [ v for _,v in grads_and_vars]
    d_vars = []
    g_vars = []
    for grad,var in grads_and_vars:
        if var in self.gan.d_vars():
            d_vars += [var]
        elif var in self.gan.g_vars():
            g_vars += [var]
        else:
            raise("Couldn't find var in g_vars or d_vars")

    with ops.init_scope():
        v1 = [self._zeros_slot(v, "v1", self._name) for _,v in grads_and_vars]
        if self.config.include_slots:
            for name in self.optimizer.get_slot_names():
                for var in self.optimizer.variables():
                    self._zeros_slot(var, "pm", "pm")
    self._prepare()

    v1 = [self.get_slot(v, "v1") for _,v in grads_and_vars]
    slots_list = []
    slots_vars = []
    if self.config.include_slots:
        for name in self.optimizer.get_slot_names():
            for var in self.optimizer.variables():
                slots_vars += [var]
                slots_list.append(self._zeros_slot(var, "pm", "pm"))


    current_vars = var_list + slots_vars
    tmp_vars = v1 + slots_list
    all_grads = [ g for g, _ in grads_and_vars ]

    op1 = tf.group(*[tf.assign(w, v) for w,v in zip(tmp_vars, current_vars)]) # store variables

    with tf.get_default_graph().control_dependencies([op1]):
        # store g2
        #op3 = tf.group(*[tf.assign_sub(v, self._lr_t*grad) for grad,v in grads_and_vars])
        op3 = self.optimizer.apply_gradients(grads_and_vars.copy(), global_step=global_step, name=name)
        with tf.get_default_graph().control_dependencies([op3]):

            def pmcombine(_v1,_v2):
                return _v2 + (_v2 - _v1)

            combined = [pmcombine(_v1, _v2) for _v1, _v2 in zip(tmp_vars, current_vars)]
            # restore v1, slots
            op5 = tf.group(*[ tf.assign(w,v) for w,v in zip(current_vars, combined)])
            with tf.get_default_graph().control_dependencies([op5]):
                return tf.no_op() 
开发者ID:HyperGAN,项目名称:HyperGAN,代码行数:52,代码来源:predictive_method_optimizer.py

示例14: apply_gradients

# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import init_scope [as 别名]
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
    d_vars = []
    g_vars = []
    for grad,var in grads_and_vars:
        if var in self.gan.d_vars():
            d_vars += [var]
        elif var in self.gan.g_vars():
            g_vars += [var]
        else:
            raise Exception("Couldn't find var in g_vars or d_vars")

    if self.config.apply_on == "discriminator":
        depth_vars = d_vars
    else:
        depth_vars = d_vars + g_vars
    with ops.init_scope():
        [self._get_or_make_slot(v, v, "depth", self.name) for v in depth_vars]
        self.optimizer._create_slots([v for g,v in grads_and_vars])
        for name in self.optimizer.get_slot_names():
            for var in self.optimizer.variables():
                self._zeros_slot(var, "depth", self.name)

    self._prepare()
    depth_slots = [self.get_slot(v, "depth") for v in depth_vars]
    for name in self.optimizer.get_slot_names():
        for var in self.optimizer.variables():
            depth_vars += [var]
            depth_slots += [self._zeros_slot(var, "depth", self.name)]

    def calculate_depth(grads_and_vars_k,k=0):
        if(k == 0):
            return tf.group(*[tf.assign(v,nv) for v,nv in zip(depth_vars, depth_slots)])

        op2 = self.optimizer.apply_gradients(grads_and_vars_k, global_step=global_step, name=name)
        with tf.get_default_graph().control_dependencies([op2]):
            w_k_combined = [self._decay *w_k_1 + (1.-self._decay)*w_hat for w_hat, w_k_1 in zip(depth_slots, depth_vars)]
            op3 = tf.group(*[tf.assign(w, v) for w,v in zip(depth_slots, w_k_combined)]) # store variables
            with tf.get_default_graph().control_dependencies([op3]):
                d_loss, g_loss = self.gan.loss.sample
                d_grads = tf.gradients(d_loss, d_vars)
                g_grads = tf.gradients(g_loss, g_vars)
                grads_k_1 = d_grads + g_grads
                grads_and_vars_k_1 = list(zip(grads_k_1,depth_vars)).copy()
                return calculate_depth(grads_and_vars_k_1,k-1)

    op1 = tf.group(*[tf.assign(w, v) for w,v in zip(depth_slots, depth_vars)]) # store variables
    with tf.get_default_graph().control_dependencies([op1]):
        opd = calculate_depth(grads_and_vars, self._depth)
        with tf.get_default_graph().control_dependencies([opd]):
            return tf.no_op() 
开发者ID:HyperGAN,项目名称:HyperGAN,代码行数:52,代码来源:depth_optimizer.py

示例15: apply_gradients

# 需要导入模块: from tensorflow.python.framework import ops [as 别名]
# 或者: from tensorflow.python.framework.ops import init_scope [as 别名]
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
    all_vars = [ v for _,v in grads_and_vars]
    d_vars = []
    g_vars = []
    all_grads = [ g for g, _ in grads_and_vars ]
    for grad,var in grads_and_vars:
        if var in self.gan.d_vars():
            d_vars += [var]
        elif var in self.gan.g_vars():
            g_vars += [var]
        else:
            raise("Couldn't find var in g_vars or d_vars")

    with ops.init_scope():
        self.optimizer._create_slots([v for g,v in grads_and_vars])
    self._prepare()

    d_grads = all_grads[:len(d_vars)]
    g_grads = all_grads[len(d_vars):]
    if self.config.finite_differences:
        return self.finite_differences(grads_and_vars, global_step, name, d_vars, g_vars, d_grads, g_grads)
    dc_grads = sum([tf.reduce_sum(tf.square(d)) for d in d_grads])
    gc_grads = sum([tf.reduce_sum(tf.square(g)) for g in g_grads])
    gamma12 = tf.gradients(gc_grads, d_vars) + [tf.zeros_like(g) for g in g_vars]
    gamma21 = [tf.zeros_like(d) for d in d_vars] + tf.gradients(dc_grads, g_vars)

    gamma12 = [ tf.zeros_like(ddg) if _dg is None else _dg for ddg, _dg in zip(all_vars, gamma12) ]
    gamma21 = [ tf.zeros_like(ddg) if _dg is None else _dg for ddg, _dg in zip(all_vars, gamma21) ]
    __gamma12 = [ tf.reduce_sum(_gamma12) for _gamma12 in gamma12 ]
    __gamma21 = [ tf.reduce_sum(_gamma21) for _gamma21 in gamma21 ]
    #gamma12_metric = self.gan.ops.squash(sum(gamma12))
    gamma12_metric = self.gan.ops.squash(sum(__gamma12))
    self.gan.add_metric('gamma12', gamma12_metric)
    gamma21_metric = self.gan.ops.squash(sum(__gamma21))
    self.gan.add_metric('gamma21', gamma21_metric)
   
    new_grads = []
    for _gamma12, _gamma21, _grads in zip(gamma12, gamma21, all_grads):
        Eo = _grads - \
             0.5*self._alpha*_gamma21 +\
             0.5*self._alpha*_gamma12
        new_grads += [ Eo ]

    new_grads_and_vars = list(zip(new_grads, all_vars)).copy()

    return self.optimizer.apply_gradients(new_grads_and_vars, global_step=global_step, name=name) 
开发者ID:HyperGAN,项目名称:HyperGAN,代码行数:48,代码来源:jr_optimizer.py


注:本文中的tensorflow.python.framework.ops.init_scope方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。