當前位置: 首頁>>代碼示例>>Python>>正文


Python utils.GetTensorOpName方法代碼示例

本文整理匯總了Python中differential_privacy.dp_sgd.dp_optimizer.utils.GetTensorOpName方法的典型用法代碼示例。如果您正苦於以下問題:Python utils.GetTensorOpName方法的具體用法?Python utils.GetTensorOpName怎麽用?Python utils.GetTensorOpName使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在differential_privacy.dp_sgd.dp_optimizer.utils的用法示例。


在下文中一共展示了utils.GetTensorOpName方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: from differential_privacy.dp_sgd.dp_optimizer import utils [as 別名]
# 或者: from differential_privacy.dp_sgd.dp_optimizer.utils import GetTensorOpName [as 別名]
def __init__(self, learning_rate, eps_delta, sanitizer,
               sigma=None, use_locking=False, name="DPGradientDescent",
               batches_per_lot=1):
    """Construct a differentially private gradient descent optimizer.

    The optimizer uses fixed privacy budget for each batch of training.

    Args:
      learning_rate: for GradientDescentOptimizer.
      eps_delta: EpsDelta pair for each epoch.
      sanitizer: for sanitizing the graident.
      sigma: noise sigma. If None, use eps_delta pair to compute sigma;
        otherwise use supplied sigma directly.
      use_locking: use locking.
      name: name for the object.
      batches_per_lot: Number of batches in a lot.
    """

    super(DPGradientDescentOptimizer, self).__init__(learning_rate,
                                                     use_locking, name)

    # Also, if needed, define the gradient accumulators
    self._batches_per_lot = batches_per_lot
    self._grad_accum_dict = {}
    if batches_per_lot > 1:
      self._batch_count = tf.Variable(1, dtype=tf.int32, trainable=False,
                                      name="batch_count")
      var_list = tf.trainable_variables()
      with tf.variable_scope("grad_acc_for"):
        for var in var_list:
          v_grad_accum = tf.Variable(tf.zeros_like(var),
                                     trainable=False,
                                     name=utils.GetTensorOpName(var))
          self._grad_accum_dict[var.name] = v_grad_accum

    self._eps_delta = eps_delta
    self._sanitizer = sanitizer
    self._sigma = sigma 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:40,代碼來源:dp_optimizer.py

示例2: compute_sanitized_gradients

# 需要導入模塊: from differential_privacy.dp_sgd.dp_optimizer import utils [as 別名]
# 或者: from differential_privacy.dp_sgd.dp_optimizer.utils import GetTensorOpName [as 別名]
def compute_sanitized_gradients(self, loss, var_list=None,
                                  add_noise=True):
    """Compute the sanitized gradients.

    Args:
      loss: the loss tensor.
      var_list: the optional variables.
      add_noise: if true, then add noise. Always clip.
    Returns:
      a pair of (list of sanitized gradients) and privacy spending accumulation
      operations.
    Raises:
      TypeError: if var_list contains non-variable.
    """

    self._assert_valid_dtypes([loss])

    xs = [tf.convert_to_tensor(x) for x in var_list]
    px_grads = per_example_gradients.PerExampleGradients(loss, xs)
    sanitized_grads = []
    for px_grad, v in zip(px_grads, var_list):
      tensor_name = utils.GetTensorOpName(v)
      sanitized_grad = self._sanitizer.sanitize(
          px_grad, self._eps_delta, sigma=self._sigma,
          tensor_name=tensor_name, add_noise=add_noise,
          num_examples=self._batches_per_lot * tf.slice(
              tf.shape(px_grad), [0], [1]))
      sanitized_grads.append(sanitized_grad)

    return sanitized_grads 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:32,代碼來源:dp_optimizer.py

示例3: __init__

# 需要導入模塊: from differential_privacy.dp_sgd.dp_optimizer import utils [as 別名]
# 或者: from differential_privacy.dp_sgd.dp_optimizer.utils import GetTensorOpName [as 別名]
def __init__(self, learning_rate, eps_delta, sanitizer,
               sigma=None, use_locking=False, name="DPGradientDescent",
               batches_per_lot=1):
    """Construct a differentially private gradient descent optimizer.

    The optimizer uses fixed privacy budget for each batch of training.

    Args:
      learning_rate: for GradientDescentOptimizer.
      eps_delta: EpsDelta pair for each epoch.
      sanitizer: for sanitizing the graident.
      sigma: noise sigma. If None, use eps_delta pair to compute sigma;
        otherwise use supplied sigma directly.
      use_locking: use locking.
      name: name for the object.
      batches_per_lot: Number of batches in a lot.
    """

    super(DPGradientDescentOptimizer, self).__init__(learning_rate,
                                                     use_locking, name)
    # Also, if needed, define the gradient accumulators
    self._batches_per_lot = batches_per_lot
    self._grad_accum_dict = {}
    if batches_per_lot > 1:
      self._batch_count = tf.Variable(1, dtype=tf.int32, trainable=False,
                                      name="batch_count")
      var_list = tf.trainable_variables()
      with tf.variable_scope("grad_acc_for"):
        for var in var_list:
          v_grad_accum = tf.Variable(tf.zeros_like(var),
                                     trainable=False,
                                     name=utils.GetTensorOpName(var))
          self._grad_accum_dict[var.name] = v_grad_accum

    self._eps_delta = eps_delta
    self._sanitizer = sanitizer
    self._sigma = sigma 
開發者ID:ratschlab,項目名稱:RGAN,代碼行數:39,代碼來源:dp_optimizer.py

示例4: compute_sanitized_gradients

# 需要導入模塊: from differential_privacy.dp_sgd.dp_optimizer import utils [as 別名]
# 或者: from differential_privacy.dp_sgd.dp_optimizer.utils import GetTensorOpName [as 別名]
def compute_sanitized_gradients(self, loss, var_list=None,
                                  add_noise=True):
    """Compute the sanitized gradients.

    Args:
      loss: the loss tensor.
      var_list: the optional variables.
      add_noise: if true, then add noise. Always clip.
    Returns:
      a pair of (list of sanitized gradients) and privacy spending accumulation
      operations.
    Raises:
      TypeError: if var_list contains non-variable.
    """

    self._assert_valid_dtypes([loss])

    xs = [tf.convert_to_tensor(x) for x in var_list]
    # TODO check this change
    loss_list = tf.unstack(loss, axis=0)
    px_grads_byexample = [tf.gradients(l, xs) for l in loss_list]
    px_grads = [[x[v] for x in px_grads_byexample] for v in range(len(xs))]
    #px_grads = tf.gradients(loss, xs)
    # add a dummy 0th dimension to reflect the fact that we have a batch size of 1...
  #  px_grads = [tf.expand_dims(x, 0) for x in px_grads]
#    px_grads = per_example_gradients.PerExampleGradients(loss, xs)
    sanitized_grads = []
    for px_grad, v in zip(px_grads, var_list):
      tensor_name = utils.GetTensorOpName(v)
      sanitized_grad = self._sanitizer.sanitize(
          px_grad, self._eps_delta, sigma=self._sigma,
          tensor_name=tensor_name, add_noise=add_noise,
          num_examples=self._batches_per_lot * tf.slice(
              tf.shape(px_grad), [0], [1]))
      sanitized_grads.append(sanitized_grad)

    return sanitized_grads 
開發者ID:ratschlab,項目名稱:RGAN,代碼行數:39,代碼來源:dp_optimizer.py


注:本文中的differential_privacy.dp_sgd.dp_optimizer.utils.GetTensorOpName方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。