当前位置: 首页>>代码示例>>Python>>正文


Python attacks.SPSA属性代码示例

本文整理汇总了Python中cleverhans.attacks.SPSA属性的典型用法代码示例。如果您正苦于以下问题:Python attacks.SPSA属性的具体用法?Python attacks.SPSA怎么用?Python attacks.SPSA使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在cleverhans.attacks的用法示例。


在下文中一共展示了attacks.SPSA属性的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from cleverhans import attacks [as 别名]
# 或者: from cleverhans.attacks import SPSA [as 别名]
def __init__(self, model, image_shape_hwc, epsilon=(16. / 255),
               num_steps=200, batch_size=32, is_debug=False):
    self.graph = tf.Graph()

    with self.graph.as_default():
      self.sess = tf.Session(graph=self.graph)

      self.x_input = tf.placeholder(tf.float32, shape=(1,) + image_shape_hwc)
      self.y_label = tf.placeholder(tf.int32, shape=(1,))

      self.model = model
      attack = SPSA(CleverhansPyfuncModelWrapper(self.model), sess=self.sess)
      self.x_adv = attack.generate(
        self.x_input,
        y=self.y_label,
        epsilon=epsilon,
        num_steps=num_steps,
        early_stop_loss_threshold=-1.,
        batch_size=batch_size,
        is_debug=is_debug)

    self.graph.finalize() 
开发者ID:google,项目名称:unrestricted-adversarial-examples,代码行数:24,代码来源:attacks.py

示例2: setUp

# 需要导入模块: from cleverhans import attacks [as 别名]
# 或者: from cleverhans.attacks import SPSA [as 别名]
def setUp(self):
        super(TestSPSA, self).setUp()

        self.sess = tf.Session()
        self.model = SimpleModel()
        self.attack = SPSA(self.model, sess=self.sess) 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:8,代码来源:test_attacks.py

示例3: test_attack_strength

# 需要导入模块: from cleverhans import attacks [as 别名]
# 或者: from cleverhans.attacks import SPSA [as 别名]
def test_attack_strength(self):
        # This uses the existing input structure for SPSA. Tom tried for ~40
        # minutes to get generate_np to work correctly but could not.

        n_samples = 10
        x_val = np.random.rand(n_samples, 2)
        x_val = np.array(x_val, dtype=np.float32)

        # The SPSA attack currently uses non-one-hot labels
        # TODO: change this to use standard cleverhans label conventions
        feed_labs = np.random.randint(0, 2, n_samples)

        x_input = tf.placeholder(tf.float32, shape=(1,2))
        y_label = tf.placeholder(tf.int32, shape=(1,))

        x_adv_op = self.attack.generate(
            x_input, y=y_label,
            epsilon=.5, num_steps=100, batch_size=64, spsa_iters=1,
        )

        all_x_adv = []
        for i in range(n_samples):
            x_adv_np = self.sess.run(x_adv_op, feed_dict={
                            x_input: np.expand_dims(x_val[i], axis=0),
                             y_label: np.expand_dims(feed_labs[i], axis=0),
            })
            all_x_adv.append(x_adv_np[0])

        x_adv = np.vstack(all_x_adv)
        new_labs = np.argmax(self.sess.run(self.model(x_adv)), axis=1)
        self.assertTrue(np.mean(feed_labs == new_labs) < 0.1) 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:33,代码来源:test_attacks.py

示例4: spsa_attack

# 需要导入模块: from cleverhans import attacks [as 别名]
# 或者: from cleverhans.attacks import SPSA [as 别名]
def spsa_attack():
    # Use tf for evaluation on adversarial data
    sess = tf.Session()
    x_op = tf.placeholder(tf.float32, shape=(None, 3, 32, 32,))
    y_op = tf.placeholder(tf.float32, shape=(1,))

    # Convert pytorch model to a tf_model and wrap it in cleverhans
    tf_model_fn = convert_pytorch_model_to_tf(menet_model)
    cleverhans_model = CallableModelWrapper(tf_model_fn, output_layer='logits')

    # Create an SPSA attack
    spsa = SPSA(cleverhans_model, sess=sess)
    spsa_params = {
        'eps': config['epsilon'],
        'nb_iter': config['num_steps'],
        'clip_min': 0.,
        'clip_max': 1.,
        'spsa_samples': args.spsa_sample,  # in this case, the batch_size is equal to spsa_samples
        'spsa_iters': 1,
    }

    adv_x_op = spsa.generate(x_op, y_op, **spsa_params)
    adv_preds_op = tf_model_fn(adv_x_op)

    # Evaluation against SPSA attacks
    correct = 0
    total = 0
    for batch_idx, (inputs, targets) in enumerate(test_loader):
        adv_preds = sess.run(adv_preds_op, feed_dict={x_op: inputs, y_op: targets})
        correct += (np.argmax(adv_preds, axis=1) == targets).sum().float()
        total += len(inputs)

        sys.stdout.write(
            "\rBlack-box SPSA attack... Acc: %.3f%% (%d/%d)" % (100. * correct / total, correct, total))
        sys.stdout.flush()

    print('Accuracy under SPSA attack: %.3f%%' % (100. * correct / total)) 
开发者ID:YyzHarry,项目名称:ME-Net,代码行数:39,代码来源:attack_blackbox.py

示例5: setUp

# 需要导入模块: from cleverhans import attacks [as 别名]
# 或者: from cleverhans.attacks import SPSA [as 别名]
def setUp(self):
    super(TestSPSA, self).setUp()

    self.sess = tf.Session()
    self.model = SimpleModel()
    self.attack = SPSA(self.model, sess=self.sess) 
开发者ID:tensorflow,项目名称:cleverhans,代码行数:8,代码来源:test_attacks.py

示例6: test_attack_strength

# 需要导入模块: from cleverhans import attacks [as 别名]
# 或者: from cleverhans.attacks import SPSA [as 别名]
def test_attack_strength(self):
    n_samples = 10
    x_val = np.random.rand(n_samples, 2)
    x_val = np.array(x_val, dtype=np.float32)

    # The SPSA attack currently uses non-one-hot labels
    # TODO: change this to use standard cleverhans label conventions
    feed_labs = np.random.randint(0, 2, n_samples)

    x_input = tf.placeholder(tf.float32, shape=(1, 2))
    y_label = tf.placeholder(tf.int32, shape=(1,))

    x_adv_op = self.attack.generate(
        x_input, y=y_label,
        epsilon=.5, num_steps=100, batch_size=64, spsa_iters=1,
        clip_min=0., clip_max=1.
    )

    all_x_adv = []
    for i in range(n_samples):
      x_adv_np = self.sess.run(x_adv_op, feed_dict={
          x_input: np.expand_dims(x_val[i], axis=0),
          y_label: np.expand_dims(feed_labs[i], axis=0),
      })
      all_x_adv.append(x_adv_np[0])

    x_adv = np.vstack(all_x_adv)
    new_labs = np.argmax(self.sess.run(self.model.get_logits(x_adv)), axis=1)
    self.assertTrue(np.mean(feed_labs == new_labs) < 0.1) 
开发者ID:tensorflow,项目名称:cleverhans,代码行数:31,代码来源:test_attacks.py

示例7: test_attack_bounds

# 需要导入模块: from cleverhans import attacks [as 别名]
# 或者: from cleverhans.attacks import SPSA [as 别名]
def test_attack_bounds(self):
        """Check SPSA respects perturbation limits."""
        epsilon = 4. / 255
        input_dir = FLAGS.input_image_dir
        metadata_file_path = FLAGS.metadata_file_path
        num_images = 8
        batch_shape = (num_images, 299, 299, 3)
        images, labels = load_images(
            input_dir, metadata_file_path, batch_shape)
        num_classes = 1001

        tf.logging.set_verbosity(tf.logging.INFO)
        with tf.Graph().as_default():
            # Prepare graph
            x_input = tf.placeholder(tf.float32, shape=(1,) + batch_shape[1:])
            y_label = tf.placeholder(tf.int32, shape=(1,))
            model = InceptionModel(num_classes)

            attack = SPSA(model)
            x_adv = attack.generate(
                x_input, y=y_label, epsilon=epsilon, num_steps=10,
                early_stop_loss_threshold=-1., spsa_samples=32, spsa_iters=1,
                is_debug=True)

            # Run computation
            saver = tf.train.Saver(slim.get_model_variables())
            session_creator = tf.train.ChiefSessionCreator(
                scaffold=tf.train.Scaffold(saver=saver),
                checkpoint_filename_with_path=FLAGS.checkpoint_path,
                master=FLAGS.master)

            with tf.train.MonitoredSession(
                    session_creator=session_creator) as sess:
                for i in xrange(num_images):
                    adv_image = sess.run(x_adv, feed_dict={
                        x_input: np.expand_dims(images[i], axis=0),
                        y_label: np.expand_dims(labels[i], axis=0),
                    })
                    diff = adv_image - images[i]
                    assert np.max(np.abs(diff)) < epsilon + 1e-4
                    assert np.max(adv_image < 1. + 1e-4)
                    assert np.min(adv_image > -1e-4) 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:44,代码来源:test_imagenet_attacks.py

示例8: test_attack_success

# 需要导入模块: from cleverhans import attacks [as 别名]
# 或者: from cleverhans.attacks import SPSA [as 别名]
def test_attack_success(self):
        """Check SPSA creates misclassified images."""
        epsilon = 4. / 255
        input_dir = FLAGS.input_image_dir
        metadata_file_path = FLAGS.metadata_file_path
        num_images = 8
        batch_shape = (num_images, 299, 299, 3)
        images, labels = load_images(
            input_dir, metadata_file_path, batch_shape)
        num_classes = 1001

        tf.logging.set_verbosity(tf.logging.INFO)
        with tf.Graph().as_default():
            # Prepare graph
            x_input = tf.placeholder(tf.float32, shape=(1,) + batch_shape[1:])
            y_label = tf.placeholder(tf.int32, shape=(1,))
            model = InceptionModel(num_classes)

            attack = SPSA(model)
            x_adv = attack.generate(
                x_input, y=y_label, epsilon=epsilon, num_steps=30,
                early_stop_loss_threshold=-1., spsa_samples=32, spsa_iters=16,
                is_debug=True)

            logits = model.get_logits(x_adv)
            acc = _top_1_accuracy(logits, y_label)

            # Run computation
            saver = tf.train.Saver(slim.get_model_variables())
            session_creator = tf.train.ChiefSessionCreator(
                scaffold=tf.train.Scaffold(saver=saver),
                checkpoint_filename_with_path=FLAGS.checkpoint_path,
                master=FLAGS.master)

            num_correct = 0.
            with tf.train.MonitoredSession(
                    session_creator=session_creator) as sess:
                for i in xrange(num_images):
                    acc_val = sess.run(acc, feed_dict={
                        x_input: np.expand_dims(images[i], axis=0),
                        y_label: np.expand_dims(labels[i], axis=0),
                    })
                    tf.logging.info('Accuracy: %s', acc_val)
                    num_correct += acc_val
                assert (num_correct / num_images) < 0.1 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:47,代码来源:test_imagenet_attacks.py

示例9: test_attack_bounds

# 需要导入模块: from cleverhans import attacks [as 别名]
# 或者: from cleverhans.attacks import SPSA [as 别名]
def test_attack_bounds(self):
    """Check SPSA respects perturbation limits."""
    epsilon = 4. / 255
    input_dir = FLAGS.input_image_dir
    metadata_file_path = FLAGS.metadata_file_path
    num_images = 8
    batch_shape = (num_images, 299, 299, 3)
    images, labels = load_images(
        input_dir, metadata_file_path, batch_shape)
    nb_classes = 1001

    tf.logging.set_verbosity(tf.logging.INFO)
    with tf.Graph().as_default():
      # Prepare graph
      x_input = tf.placeholder(tf.float32, shape=(1,) + batch_shape[1:])
      y_label = tf.placeholder(tf.int32, shape=(1,))
      model = InceptionModel(nb_classes)

      attack = SPSA(model)
      x_adv = attack.generate(
          x_input, y=y_label, epsilon=epsilon, num_steps=10,
          early_stop_loss_threshold=-1., spsa_samples=32, spsa_iters=1,
          is_debug=True)

      # Run computation
      saver = tf.train.Saver(slim.get_model_variables())
      session_creator = tf.train.ChiefSessionCreator(
          scaffold=tf.train.Scaffold(saver=saver),
          checkpoint_filename_with_path=FLAGS.checkpoint_path,
          master=FLAGS.master)

      with tf.train.MonitoredSession(session_creator=session_creator) as sess:
        for i in xrange(num_images):
          x_expanded = np.expand_dims(images[i], axis=0)
          y_expanded = np.expand_dims(labels[i], axis=0)

          adv_image = sess.run(x_adv, feed_dict={x_input: x_expanded,
                                                 y_label: y_expanded})
          diff = adv_image - images[i]
          assert np.max(np.abs(diff)) < epsilon + 1e-4
          assert np.max(adv_image < 1. + 1e-4)
          assert np.min(adv_image > -1e-4) 
开发者ID:tensorflow,项目名称:cleverhans,代码行数:44,代码来源:test_imagenet_attacks.py

示例10: test_attack_success

# 需要导入模块: from cleverhans import attacks [as 别名]
# 或者: from cleverhans.attacks import SPSA [as 别名]
def test_attack_success(self):
    """Check SPSA creates misclassified images."""
    epsilon = 4. / 255
    input_dir = FLAGS.input_image_dir
    metadata_file_path = FLAGS.metadata_file_path
    num_images = 8
    batch_shape = (num_images, 299, 299, 3)
    images, labels = load_images(
        input_dir, metadata_file_path, batch_shape)
    nb_classes = 1001

    tf.logging.set_verbosity(tf.logging.INFO)
    with tf.Graph().as_default():
      # Prepare graph
      x_input = tf.placeholder(tf.float32, shape=(1,) + batch_shape[1:])
      y_label = tf.placeholder(tf.int32, shape=(1,))
      model = InceptionModel(nb_classes)

      attack = SPSA(model)
      x_adv = attack.generate(
          x_input, y=y_label, epsilon=epsilon, num_steps=30,
          early_stop_loss_threshold=-1., spsa_samples=32, spsa_iters=16,
          is_debug=True)

      logits = model.get_logits(x_adv)
      acc = _top_1_accuracy(logits, y_label)

      # Run computation
      saver = tf.train.Saver(slim.get_model_variables())
      session_creator = tf.train.ChiefSessionCreator(
          scaffold=tf.train.Scaffold(saver=saver),
          checkpoint_filename_with_path=FLAGS.checkpoint_path,
          master=FLAGS.master)

      num_correct = 0.
      with tf.train.MonitoredSession(session_creator=session_creator) as sess:
        for i in xrange(num_images):
          feed_dict_i = {x_input: np.expand_dims(images[i], axis=0),
                         y_label: np.expand_dims(labels[i], axis=0)}
          acc_val = sess.run(acc, feed_dict=feed_dict_i)
          tf.logging.info('Accuracy: %s', acc_val)
          num_correct += acc_val
        assert (num_correct / num_images) < 0.1 
开发者ID:tensorflow,项目名称:cleverhans,代码行数:45,代码来源:test_imagenet_attacks.py


注:本文中的cleverhans.attacks.SPSA属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。