本文整理汇总了Python中differential_privacy.dp_sgd.dp_optimizer.utils.GetTensorOpName方法的典型用法代码示例。如果您正苦于以下问题:Python utils.GetTensorOpName方法的具体用法?Python utils.GetTensorOpName怎么用?Python utils.GetTensorOpName使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类differential_privacy.dp_sgd.dp_optimizer.utils
的用法示例。
在下文中一共展示了utils.GetTensorOpName方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from differential_privacy.dp_sgd.dp_optimizer import utils [as 别名]
# 或者: from differential_privacy.dp_sgd.dp_optimizer.utils import GetTensorOpName [as 别名]
def __init__(self, learning_rate, eps_delta, sanitizer,
sigma=None, use_locking=False, name="DPGradientDescent",
batches_per_lot=1):
"""Construct a differentially private gradient descent optimizer.
The optimizer uses fixed privacy budget for each batch of training.
Args:
learning_rate: for GradientDescentOptimizer.
eps_delta: EpsDelta pair for each epoch.
sanitizer: for sanitizing the graident.
sigma: noise sigma. If None, use eps_delta pair to compute sigma;
otherwise use supplied sigma directly.
use_locking: use locking.
name: name for the object.
batches_per_lot: Number of batches in a lot.
"""
super(DPGradientDescentOptimizer, self).__init__(learning_rate,
use_locking, name)
# Also, if needed, define the gradient accumulators
self._batches_per_lot = batches_per_lot
self._grad_accum_dict = {}
if batches_per_lot > 1:
self._batch_count = tf.Variable(1, dtype=tf.int32, trainable=False,
name="batch_count")
var_list = tf.trainable_variables()
with tf.variable_scope("grad_acc_for"):
for var in var_list:
v_grad_accum = tf.Variable(tf.zeros_like(var),
trainable=False,
name=utils.GetTensorOpName(var))
self._grad_accum_dict[var.name] = v_grad_accum
self._eps_delta = eps_delta
self._sanitizer = sanitizer
self._sigma = sigma
示例2: compute_sanitized_gradients
# 需要导入模块: from differential_privacy.dp_sgd.dp_optimizer import utils [as 别名]
# 或者: from differential_privacy.dp_sgd.dp_optimizer.utils import GetTensorOpName [as 别名]
def compute_sanitized_gradients(self, loss, var_list=None,
add_noise=True):
"""Compute the sanitized gradients.
Args:
loss: the loss tensor.
var_list: the optional variables.
add_noise: if true, then add noise. Always clip.
Returns:
a pair of (list of sanitized gradients) and privacy spending accumulation
operations.
Raises:
TypeError: if var_list contains non-variable.
"""
self._assert_valid_dtypes([loss])
xs = [tf.convert_to_tensor(x) for x in var_list]
px_grads = per_example_gradients.PerExampleGradients(loss, xs)
sanitized_grads = []
for px_grad, v in zip(px_grads, var_list):
tensor_name = utils.GetTensorOpName(v)
sanitized_grad = self._sanitizer.sanitize(
px_grad, self._eps_delta, sigma=self._sigma,
tensor_name=tensor_name, add_noise=add_noise,
num_examples=self._batches_per_lot * tf.slice(
tf.shape(px_grad), [0], [1]))
sanitized_grads.append(sanitized_grad)
return sanitized_grads
示例3: __init__
# 需要导入模块: from differential_privacy.dp_sgd.dp_optimizer import utils [as 别名]
# 或者: from differential_privacy.dp_sgd.dp_optimizer.utils import GetTensorOpName [as 别名]
def __init__(self, learning_rate, eps_delta, sanitizer,
sigma=None, use_locking=False, name="DPGradientDescent",
batches_per_lot=1):
"""Construct a differentially private gradient descent optimizer.
The optimizer uses fixed privacy budget for each batch of training.
Args:
learning_rate: for GradientDescentOptimizer.
eps_delta: EpsDelta pair for each epoch.
sanitizer: for sanitizing the graident.
sigma: noise sigma. If None, use eps_delta pair to compute sigma;
otherwise use supplied sigma directly.
use_locking: use locking.
name: name for the object.
batches_per_lot: Number of batches in a lot.
"""
super(DPGradientDescentOptimizer, self).__init__(learning_rate,
use_locking, name)
# Also, if needed, define the gradient accumulators
self._batches_per_lot = batches_per_lot
self._grad_accum_dict = {}
if batches_per_lot > 1:
self._batch_count = tf.Variable(1, dtype=tf.int32, trainable=False,
name="batch_count")
var_list = tf.trainable_variables()
with tf.variable_scope("grad_acc_for"):
for var in var_list:
v_grad_accum = tf.Variable(tf.zeros_like(var),
trainable=False,
name=utils.GetTensorOpName(var))
self._grad_accum_dict[var.name] = v_grad_accum
self._eps_delta = eps_delta
self._sanitizer = sanitizer
self._sigma = sigma
示例4: compute_sanitized_gradients
# 需要导入模块: from differential_privacy.dp_sgd.dp_optimizer import utils [as 别名]
# 或者: from differential_privacy.dp_sgd.dp_optimizer.utils import GetTensorOpName [as 别名]
def compute_sanitized_gradients(self, loss, var_list=None,
add_noise=True):
"""Compute the sanitized gradients.
Args:
loss: the loss tensor.
var_list: the optional variables.
add_noise: if true, then add noise. Always clip.
Returns:
a pair of (list of sanitized gradients) and privacy spending accumulation
operations.
Raises:
TypeError: if var_list contains non-variable.
"""
self._assert_valid_dtypes([loss])
xs = [tf.convert_to_tensor(x) for x in var_list]
# TODO check this change
loss_list = tf.unstack(loss, axis=0)
px_grads_byexample = [tf.gradients(l, xs) for l in loss_list]
px_grads = [[x[v] for x in px_grads_byexample] for v in range(len(xs))]
#px_grads = tf.gradients(loss, xs)
# add a dummy 0th dimension to reflect the fact that we have a batch size of 1...
# px_grads = [tf.expand_dims(x, 0) for x in px_grads]
# px_grads = per_example_gradients.PerExampleGradients(loss, xs)
sanitized_grads = []
for px_grad, v in zip(px_grads, var_list):
tensor_name = utils.GetTensorOpName(v)
sanitized_grad = self._sanitizer.sanitize(
px_grad, self._eps_delta, sigma=self._sigma,
tensor_name=tensor_name, add_noise=add_noise,
num_examples=self._batches_per_lot * tf.slice(
tf.shape(px_grad), [0], [1]))
sanitized_grads.append(sanitized_grad)
return sanitized_grads