本文整理汇总了Python中cifar10_input.CIFAR10Data方法的典型用法代码示例。如果您正苦于以下问题:Python cifar10_input.CIFAR10Data方法的具体用法?Python cifar10_input.CIFAR10Data怎么用?Python cifar10_input.CIFAR10Data使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cifar10_input
的用法示例。
在下文中一共展示了cifar10_input.CIFAR10Data方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: evaluate_ch
# 需要导入模块: import cifar10_input [as 别名]
# 或者: from cifar10_input import CIFAR10Data [as 别名]
def evaluate_ch(model, config, sess, norm='l1', bound=None, verbose=True):
dataset = config['data']
num_eval_examples = config['num_eval_examples']
eval_batch_size = config['eval_batch_size']
if dataset == "mnist":
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=False)
X = mnist.test.images[0:num_eval_examples, :].reshape(-1, 28, 28, 1)
Y = mnist.test.labels[0:num_eval_examples]
x_image = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])
else:
import cifar10_input
data_path = config["data_path"]
cifar = cifar10_input.CIFAR10Data(data_path)
X = cifar.eval_data.xs[0:num_eval_examples, :].astype(np.float32) / 255.0
Y = cifar.eval_data.ys[0:num_eval_examples]
x_image = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
assert norm == 'l1'
if norm=='l2':
attack = CarliniWagnerL2(model, sess)
params = {'batch_size': eval_batch_size, 'binary_search_steps': 9}
else:
attack = ElasticNetMethod(model, sess, clip_min=0.0, clip_max=1.0)
params = {'beta': 1e-2,
'decision_rule': 'L1',
'batch_size': eval_batch_size,
'learning_rate': 1e-2,
'max_iterations': 1000}
if verbose:
set_log_level(logging.DEBUG, name="cleverhans")
y = tf.placeholder(tf.int64, shape=[None, 10])
params['y'] = y
adv_x = attack.generate(x_image, **params)
preds_adv = model.get_predicted_class(adv_x)
preds_nat = model.get_predicted_class(x_image)
all_preds, all_preds_adv, all_adv_x = batch_eval(
sess, [x_image, y], [preds_nat, preds_adv, adv_x], [X, one_hot(Y, 10)], batch_size=eval_batch_size)
print('acc nat', np.mean(all_preds == Y))
print('acc adv', np.mean(all_preds_adv == Y))
if dataset == "cifar10":
X *= 255.0
all_adv_x *= 255.0
if norm == 'l2':
lps = np.sqrt(np.sum(np.square(all_adv_x - X), axis=(1,2,3)))
else:
lps = np.sum(np.abs(all_adv_x - X), axis=(1,2,3))
print('mean lp: ', np.mean(lps))
for b in [bound, bound/2.0, bound/4.0, bound/8.0]:
print('lp={}, acc={}'.format(b, np.mean((all_preds_adv == Y) | (lps > b))))
all_corr_adv = (all_preds_adv == Y)
all_corr_nat = (all_preds == Y)
return all_corr_nat, all_corr_adv, lps
示例2: run_attack
# 需要导入模块: import cifar10_input [as 别名]
# 或者: from cifar10_input import CIFAR10Data [as 别名]
def run_attack(checkpoint, x_adv, epsilon):
cifar = cifar10_input.CIFAR10Data(data_path)
model = Model(mode='eval')
saver = tf.train.Saver()
num_eval_examples = 10000
eval_batch_size = 100
num_batches = int(math.ceil(num_eval_examples / eval_batch_size))
total_corr = 0
x_nat = cifar.eval_data.xs
l_inf = np.amax(np.abs(x_nat - x_adv))
if l_inf > epsilon + 0.0001:
print('maximum perturbation found: {}'.format(l_inf))
print('maximum perturbation allowed: {}'.format(epsilon))
return
y_pred = [] # label accumulator
with tf.Session() as sess:
# Restore the checkpoint
saver.restore(sess, checkpoint)
# Iterate over the samples batch-by-batch
for ibatch in range(num_batches):
bstart = ibatch * eval_batch_size
bend = min(bstart + eval_batch_size, num_eval_examples)
x_batch = x_adv[bstart:bend, :]
y_batch = cifar.eval_data.ys[bstart:bend]
dict_adv = {model.x_input: x_batch,
model.y_input: y_batch}
cur_corr, y_pred_batch = sess.run([model.num_correct, model.predictions],
feed_dict=dict_adv)
total_corr += cur_corr
y_pred.append(y_pred_batch)
accuracy = total_corr / num_eval_examples
print('Accuracy: {:.2f}%'.format(100.0 * accuracy))
y_pred = np.concatenate(y_pred, axis=0)
np.save('pred.npy', y_pred)
print('Output saved at pred.npy')