本文整理汇总了Python中cleverhans.utils_tf.batch_eval方法的典型用法代码示例。如果您正苦于以下问题:Python utils_tf.batch_eval方法的具体用法?Python utils_tf.batch_eval怎么用?Python utils_tf.batch_eval使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cleverhans.utils_tf
的用法示例。
在下文中一共展示了utils_tf.batch_eval方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: fast_gradient_sign_method
# 需要导入模块: from cleverhans import utils_tf [as 别名]
# 或者: from cleverhans.utils_tf import batch_eval [as 别名]
def fast_gradient_sign_method(sess, model, X, Y, eps, clip_min=None,
clip_max=None, batch_size=256):
"""
TODO
:param sess:
:param model: predictions or after-softmax
:param X:
:param Y:
:param eps:
:param clip_min:
:param clip_max:
:param batch_size:
:return:
"""
# Define TF placeholders for the input and output
x = tf.placeholder(tf.float32, shape=(None,) + X.shape[1:])
y = tf.placeholder(tf.float32, shape=(None,) + Y.shape[1:])
adv_x = fgsm(
x, model(x), eps=eps,
clip_min=clip_min,
clip_max=clip_max, y=y
)
X_adv, = batch_eval(
sess, [x, y], [adv_x],
[X, Y], feed={K.learning_phase(): 0},
args={'batch_size': batch_size}
)
return X_adv
示例2: calculate_signed_gradient_x
# 需要导入模块: from cleverhans import utils_tf [as 别名]
# 或者: from cleverhans.utils_tf import batch_eval [as 别名]
def calculate_signed_gradient_x(sess, x, predictions, X_test):
signed_gradient = get_gradient_sign_tf(x, predictions)
X_test_signed_gradient, = batch_eval(sess, [x], [signed_gradient], [X_test])
return X_test_signed_gradient
示例3: basic_iterative_method
# 需要导入模块: from cleverhans import utils_tf [as 别名]
# 或者: from cleverhans.utils_tf import batch_eval [as 别名]
def basic_iterative_method(sess, model, X, Y, eps, eps_iter, nb_iter=50,
clip_min=None, clip_max=None, batch_size=256):
"""
TODO
:param sess:
:param model: predictions or after-softmax
:param X:
:param Y:
:param eps:
:param eps_iter:
:param nb_iter:
:param clip_min:
:param clip_max:
:param batch_size:
:return:
"""
# Define TF placeholders for the input and output
x = tf.placeholder(tf.float32, shape=(None,)+X.shape[1:])
y = tf.placeholder(tf.float32, shape=(None,)+Y.shape[1:])
# results will hold the adversarial inputs at each iteration of BIM;
# thus it will have shape (nb_iter, n_samples, n_rows, n_cols, n_channels)
results = np.zeros((nb_iter, X.shape[0],) + X.shape[1:])
# Initialize adversarial samples as the original samples, set upper and
# lower bounds
X_adv = X
X_min = X_adv - eps
X_max = X_adv + eps
print('Running BIM iterations...')
# "its" is a dictionary that keeps track of the iteration at which each
# sample becomes misclassified. The default value will be (nb_iter-1), the
# very last iteration.
def f(val):
return lambda: val
its = defaultdict(f(nb_iter-1))
# Out keeps track of which samples have already been misclassified
out = set()
for i in tqdm(range(nb_iter)):
adv_x = fgsm(
x, model(x), eps=eps_iter,
clip_min=clip_min, clip_max=clip_max, y=y
)
X_adv, = batch_eval(
sess, [x, y], [adv_x],
[X_adv, Y], feed={K.learning_phase(): 0},
args={'batch_size': batch_size}
)
X_adv = np.maximum(np.minimum(X_adv, X_max), X_min)
results[i] = X_adv
# check misclassifieds
predictions = model.predict_classes(X_adv, batch_size=512, verbose=0)
misclassifieds = np.where(predictions != Y.argmax(axis=1))[0]
for elt in misclassifieds:
if elt not in out:
its[elt] = i
out.add(elt)
return its, results