本文整理匯總了Python中cleverhans.utils_tf.batch_eval方法的典型用法代碼示例。如果您正苦於以下問題:Python utils_tf.batch_eval方法的具體用法?Python utils_tf.batch_eval怎麽用?Python utils_tf.batch_eval使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類cleverhans.utils_tf
的用法示例。
在下文中一共展示了utils_tf.batch_eval方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: fast_gradient_sign_method
# 需要導入模塊: from cleverhans import utils_tf [as 別名]
# 或者: from cleverhans.utils_tf import batch_eval [as 別名]
def fast_gradient_sign_method(sess, model, X, Y, eps, clip_min=None,
clip_max=None, batch_size=256):
"""
TODO
:param sess:
:param model: predictions or after-softmax
:param X:
:param Y:
:param eps:
:param clip_min:
:param clip_max:
:param batch_size:
:return:
"""
# Define TF placeholders for the input and output
x = tf.placeholder(tf.float32, shape=(None,) + X.shape[1:])
y = tf.placeholder(tf.float32, shape=(None,) + Y.shape[1:])
adv_x = fgsm(
x, model(x), eps=eps,
clip_min=clip_min,
clip_max=clip_max, y=y
)
X_adv, = batch_eval(
sess, [x, y], [adv_x],
[X, Y], feed={K.learning_phase(): 0},
args={'batch_size': batch_size}
)
return X_adv
示例2: calculate_signed_gradient_x
# 需要導入模塊: from cleverhans import utils_tf [as 別名]
# 或者: from cleverhans.utils_tf import batch_eval [as 別名]
def calculate_signed_gradient_x(sess, x, predictions, X_test):
signed_gradient = get_gradient_sign_tf(x, predictions)
X_test_signed_gradient, = batch_eval(sess, [x], [signed_gradient], [X_test])
return X_test_signed_gradient
示例3: basic_iterative_method
# 需要導入模塊: from cleverhans import utils_tf [as 別名]
# 或者: from cleverhans.utils_tf import batch_eval [as 別名]
def basic_iterative_method(sess, model, X, Y, eps, eps_iter, nb_iter=50,
clip_min=None, clip_max=None, batch_size=256):
"""
TODO
:param sess:
:param model: predictions or after-softmax
:param X:
:param Y:
:param eps:
:param eps_iter:
:param nb_iter:
:param clip_min:
:param clip_max:
:param batch_size:
:return:
"""
# Define TF placeholders for the input and output
x = tf.placeholder(tf.float32, shape=(None,)+X.shape[1:])
y = tf.placeholder(tf.float32, shape=(None,)+Y.shape[1:])
# results will hold the adversarial inputs at each iteration of BIM;
# thus it will have shape (nb_iter, n_samples, n_rows, n_cols, n_channels)
results = np.zeros((nb_iter, X.shape[0],) + X.shape[1:])
# Initialize adversarial samples as the original samples, set upper and
# lower bounds
X_adv = X
X_min = X_adv - eps
X_max = X_adv + eps
print('Running BIM iterations...')
# "its" is a dictionary that keeps track of the iteration at which each
# sample becomes misclassified. The default value will be (nb_iter-1), the
# very last iteration.
def f(val):
return lambda: val
its = defaultdict(f(nb_iter-1))
# Out keeps track of which samples have already been misclassified
out = set()
for i in tqdm(range(nb_iter)):
adv_x = fgsm(
x, model(x), eps=eps_iter,
clip_min=clip_min, clip_max=clip_max, y=y
)
X_adv, = batch_eval(
sess, [x, y], [adv_x],
[X_adv, Y], feed={K.learning_phase(): 0},
args={'batch_size': batch_size}
)
X_adv = np.maximum(np.minimum(X_adv, X_max), X_min)
results[i] = X_adv
# check misclassifieds
predictions = model.predict_classes(X_adv, batch_size=512, verbose=0)
misclassifieds = np.where(predictions != Y.argmax(axis=1))[0]
for elt in misclassifieds:
if elt not in out:
its[elt] = i
out.add(elt)
return its, results