From 31aae9bcf49909b963edc3e1aa9578ec81f6cfae Mon Sep 17 00:00:00 2001 From: Tzu-Wei Sung Date: Wed, 3 Jul 2019 17:44:43 +0800 Subject: [PATCH 1/3] add unit tests for fgm --- .../tf2/attacks/fast_gradient_method.py | 26 +++- cleverhans/future/tf2/tests/test_attacks.py | 138 ++++++++++++++++++ 2 files changed, 159 insertions(+), 5 deletions(-) create mode 100644 cleverhans/future/tf2/tests/test_attacks.py diff --git a/cleverhans/future/tf2/attacks/fast_gradient_method.py b/cleverhans/future/tf2/attacks/fast_gradient_method.py index 3f68201fa..fca9071ef 100644 --- a/cleverhans/future/tf2/attacks/fast_gradient_method.py +++ b/cleverhans/future/tf2/attacks/fast_gradient_method.py @@ -27,7 +27,18 @@ def fast_gradient_method(model_fn, x, eps, norm, clip_min=None, clip_max=None, y :return: a tensor for the adversarial example """ if norm not in [np.inf, 1, 2]: - raise ValueError("Norm order must be either np.inf, 1, or 2.") + raise ValueError( + "Norm order must be either np.inf, 1, or 2, got {} instead.".format(norm)) + if eps < 0: + raise ValueError( + "eps must be greater than or equal to 0, got {} instead.".format(eps)) + if eps == 0: + return x + if clip_min is not None and clip_max is not None: + if clip_min > clip_max: + raise ValueError( + "clip_min must be less than or equal to clip_max, got clip_min={} and clip_max={}.".format( + clip_min, clip_max)) asserts = [] @@ -51,7 +62,9 @@ def fast_gradient_method(model_fn, x, eps, norm, clip_min=None, clip_max=None, y # If clipping is needed, reset all values outside of [clip_min, clip_max] if (clip_min is not None) or (clip_max is not None): # We don't currently support one-sided clipping - assert clip_min is not None and clip_max is not None + if clip_min is None or clip_max is None: + raise ValueError( + "One of clip_min and clip_max is None but we don't currently support one-sided clipping.") adv_x = tf.clip_by_value(adv_x, clip_min, clip_max) if sanity_checks: @@ -114,14 +127,17 @@ def optimize_linear(grad, eps, norm=np.inf): abs_grad = tf.abs(grad) sign = tf.sign(grad) max_abs_grad = tf.reduce_max(abs_grad, axis, keepdims=True) - tied_for_max = tf.dtypes.cast(tf.equal(abs_grad, max_abs_grad), dtype=tf.float32) + tied_for_max = tf.dtypes.cast( + tf.equal(abs_grad, max_abs_grad), dtype=tf.float32) num_ties = tf.reduce_sum(tied_for_max, axis, keepdims=True) optimal_perturbation = sign * tied_for_max / num_ties elif norm == 2: - square = tf.maximum(avoid_zero_div, tf.reduce_sum(tf.square(grad), axis, keepdims=True)) + square = tf.maximum(avoid_zero_div, tf.reduce_sum( + tf.square(grad), axis, keepdims=True)) optimal_perturbation = grad / tf.sqrt(square) else: - raise NotImplementedError("Only L-inf, L1 and L2 norms are currently implemented.") + raise NotImplementedError( + "Only L-inf, L1 and L2 norms are currently implemented.") # Scale perturbation to be the solution for the norm=eps rather than norm=1 problem scaled_perturbation = tf.multiply(eps, optimal_perturbation) diff --git a/cleverhans/future/tf2/tests/test_attacks.py b/cleverhans/future/tf2/tests/test_attacks.py new file mode 100644 index 000000000..c915a8f22 --- /dev/null +++ b/cleverhans/future/tf2/tests/test_attacks.py @@ -0,0 +1,138 @@ +"""Tests for attacks.""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from __future__ import unicode_literals + +import numpy as np +import tensorflow as tf + +from cleverhans.future.tf2.attacks.fast_gradient_method import fast_gradient_method + + +class SimpleModel(tf.keras.Model): + def __init__(self): + super(SimpleModel, self).__init__() + self.w1 = tf.constant([[1.5, .3], [-2, .3]]) + self.w2 = tf.constant([[-2.4, 1.2], [.5, -2.3]]) + + def call(self, x): + x = tf.linalg.matmul(x, self.w1) + x = tf.math.sigmoid(x) + x = tf.linalg.matmul(x, self.w2) + return x + + +class CommonAttackProperties(tf.test.TestCase): + + def setUp(self): + super(CommonAttackProperties, self).setUp() + self.model = SimpleModel() + self.x = tf.random.uniform((100, 2)) + self.normalized_x = tf.random.uniform((100, 2)) # truncated between [0, 1) + self.red_ind = list(range(1, self.x.shape[0])) + self.ord_list = [1, 2, np.inf] + + def help_adv_examples_success_rate(self, **kwargs): + x_adv = self.attack(model_fn=self.model, x=self.normalized_x, **kwargs) + ori_label = tf.math.argmax(self.model(self.normalized_x), -1) + adv_label = tf.math.argmax(self.model(x_adv), -1) + adv_acc = tf.math.reduce_mean( + tf.cast(tf.math.equal(adv_label, ori_label), tf.float32)) + self.assertLess(adv_acc, .5) + + def help_targeted_adv_examples_success_rate(self, **kwargs): + y_target = tf.random.uniform(shape=(self.normalized_x.shape[0],), + minval=0, maxval=2, dtype=tf.int64) + x_adv = self.attack(model_fn=self.model, x=self.normalized_x, + y=y_target, targeted=True, **kwargs) + adv_label = tf.math.argmax(self.model(x_adv), -1) + adv_success = tf.math.reduce_mean( + tf.cast(tf.math.equal(adv_label, y_target), tf.float32)) + self.assertGreater(adv_success, .7) + + +class TestFastGradientMethod(CommonAttackProperties): + + def setUp(self): + super(TestFastGradientMethod, self).setUp() + self.attack = fast_gradient_method + self.eps_list = [0, .1, .3, 1., 3] + self.attack_param = { + 'eps': .5, + 'clip_min': -5, + 'clip_max': 5 + } + + def test_invalid_input(self): + x = tf.constant([[-2., 3.]]) + for norm in self.ord_list: + with self.assertRaises(AssertionError): + self.attack(model_fn=self.model, x=x, eps=.1, norm=norm, + clip_min=-1., clip_max=1., sanity_checks=True) + + def test_invalid_eps(self): + for norm in self.ord_list: + with self.assertRaises(ValueError): + self.attack(model_fn=self.model, x=self.x, eps=-.1, norm=norm) + + def test_eps_equals_zero(self): + for norm in self.ord_list: + self.assertAllClose(self.attack(model_fn=self.model, x=self.x, eps=0, norm=norm), + self.x) + + def test_eps(self): + # test if the attack respects the norm constraint + # NOTE this has been tested with the optimize_linear function in + # test_utils, so duplicate tests are not needed here. + # Although, if ever switch the engine of the FGM method to some + # function other than optimize_linear. This test should be added. + raise self.skipTest("TODO") + + def test_clips(self): + clip_min = -1. + clip_max = 1. + for norm in self.ord_list: + x_adv = self.attack( + model_fn=self.model, x=self.normalized_x, eps=.3, norm=norm, + clip_min=clip_min, clip_max=clip_max) + self.assertAllLessEqual(x_adv, clip_max) + self.assertAllGreaterEqual(x_adv, clip_min) + + def test_invalid_clips(self): + clip_min = .5 + clip_max = -.5 + for norm in self.ord_list: + with self.assertRaises(ValueError): + self.attack(model_fn=self.model, x=self.x, eps=.1, norm=norm, + clip_min=clip_min, clip_max=clip_max) + + def test_adv_example_success_rate_linf(self): + # use normalized_x to make sure the same eps gives uniformly high attack + # success rate across randomized tests + self.help_adv_examples_success_rate( + norm=np.inf, **self.attack_param) + + def test_targeted_adv_example_success_rate_linf(self): + self.help_targeted_adv_examples_success_rate( + norm=np.inf, **self.attack_param) + + def test_adv_example_success_rate_l1(self): + self.help_adv_examples_success_rate( + norm=1, **self.attack_param) + + def test_targeted_adv_example_success_rate_l1(self): + self.help_targeted_adv_examples_success_rate( + norm=1, **self.attack_param) + + def test_adv_example_success_rate_l2(self): + self.help_adv_examples_success_rate( + norm=2, **self.attack_param) + + def test_targeted_adv_example_success_rate_l2(self): + self.help_targeted_adv_examples_success_rate( + norm=2, **self.attack_param) + + +if __name__ == "__main__": + tf.test.main() From 4c7a61c461d5470f7f8914ce9ba28e1f7a63b5dd Mon Sep 17 00:00:00 2001 From: Tzu-Wei Sung Date: Wed, 3 Jul 2019 17:46:20 +0800 Subject: [PATCH 2/3] remove main --- cleverhans/future/tf2/tests/test_attacks.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/cleverhans/future/tf2/tests/test_attacks.py b/cleverhans/future/tf2/tests/test_attacks.py index c915a8f22..3d348805d 100644 --- a/cleverhans/future/tf2/tests/test_attacks.py +++ b/cleverhans/future/tf2/tests/test_attacks.py @@ -132,7 +132,3 @@ def test_adv_example_success_rate_l2(self): def test_targeted_adv_example_success_rate_l2(self): self.help_targeted_adv_examples_success_rate( norm=2, **self.attack_param) - - -if __name__ == "__main__": - tf.test.main() From 8fb909c907c68f176d2b7c0c93de170d36b346e9 Mon Sep 17 00:00:00 2001 From: Tzu-Wei Sung Date: Wed, 3 Jul 2019 17:53:58 +0800 Subject: [PATCH 3/3] remove unused raise --- cleverhans/future/tf2/tests/test_attacks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cleverhans/future/tf2/tests/test_attacks.py b/cleverhans/future/tf2/tests/test_attacks.py index 3d348805d..2456bae07 100644 --- a/cleverhans/future/tf2/tests/test_attacks.py +++ b/cleverhans/future/tf2/tests/test_attacks.py @@ -87,7 +87,7 @@ def test_eps(self): # test_utils, so duplicate tests are not needed here. # Although, if ever switch the engine of the FGM method to some # function other than optimize_linear. This test should be added. - raise self.skipTest("TODO") + self.skipTest("TODO") def test_clips(self): clip_min = -1.