使用IBM ART库生成交通信号牌的攻击样本

目标:生成对抗样本,扰动图像,让原本是“停”的信号牌识别为“禁止驶入”:

 

代码如下(注意,因为我找的cnn原始模型支持的是灰度图像,所以彩色的对抗样本还需要修改代码):

import cv2, os
import numpy as np
import numpy as np
import tensorflow as tf
from keras.models import load_model
from tensorflow.keras.layers import Dense
from tensorflow.keras.utils import to_categorical
from art.estimators.classification import TensorFlowV2Classifier
from art.attacks.evasion import FastGradientMethod
import matplotlib.pyplot as plt
from tensorflow.keras.models import load_model
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import to_categorical
from sklearn.model_selection import train_test_split


def getCalssName(classNo):
    if   classNo == 0: return 'Speed Limit 20 km/h'
    elif classNo == 1: return 'Speed Limit 30 km/h'
    elif classNo == 2: return 'Speed Limit 50 km/h'
    elif classNo == 3: return 'Speed Limit 60 km/h'
    elif classNo == 4: return 'Speed Limit 70 km/h'
    elif classNo == 5: return 'Speed Limit 80 km/h'
    elif classNo == 6: return 'End of Speed Limit 80 km/h'
    elif classNo == 7: return 'Speed Limit 100 km/h'
    elif classNo == 8: return 'Speed Limit 120 km/h'
    elif classNo == 9: return 'No passing'
    elif classNo == 10: return 'No passing for vechiles over 3.5 metric tons'
    elif classNo == 11: return 'Right-of-way at the next intersection'
    elif classNo == 12: return 'Priority road'
    elif classNo == 13: return 'Yield'
    elif classNo == 14: return 'Stop'
    elif classNo == 15: return 'No vechiles'
    elif classNo == 16: return 'Vechiles over 3.5 metric tons prohibited'
    elif classNo == 17: return 'No entry'
    elif classNo == 18: return 'General caution'
    elif classNo == 19: return 'Dangerous curve to the left'
    elif classNo == 20: return 'Dangerous curve to the right'
    elif classNo == 21: return 'Double curve'
    elif classNo == 22: return 'Bumpy road'
    elif classNo == 23: return 'Slippery road'
    elif classNo == 24: return 'Road narrows on the right'
    elif classNo == 25: return 'Road work'
    elif classNo == 26: return 'Traffic signals'
    elif classNo == 27: return 'Pedestrians'
    elif classNo == 28: return 'Children crossing'
    elif classNo == 29: return 'Bicycles crossing'
    elif classNo == 30: return 'Beware of ice/snow'
    elif classNo == 31: return 'Wild animals crossing'
    elif classNo == 32: return 'End of all speed and passing limits'
    elif classNo == 33: return 'Turn right ahead'
    elif classNo == 34: return 'Turn left ahead'
    elif classNo == 35: return 'Ahead only'
    elif classNo == 36: return 'Go straight or right'
    elif classNo == 37: return 'Go straight or left'
    elif classNo == 38: return 'Keep right'
    elif classNo == 39: return 'Keep left'
    elif classNo == 40: return 'Roundabout mandatory'
    elif classNo == 41: return 'End of no passing'
    elif classNo == 42: return 'End of no passing by vechiles over 3.5 metric tons'


def grayscale(img):
    img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    return img

def equalize(img):
    img =cv2.equalizeHist(img)
    return img

def preprocessing(img):
    # img = grayscale(img)
    img = equalize(img)
    img = img/255
    return img

def read_imgs(image_dir, label=0):
    # 读取图片
    image_files = os.listdir(image_dir)
    images = []
    labels = []
    for image_file in image_files:
        image_path = os.path.join(image_dir, image_file)
        image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)  # 以灰度模式读取图片
        image = cv2.resize(image, (30, 30)) 
        img = preprocessing(image)
        images.append(img)
        labels.append(label)
    return images, labels

def test_predict(model):
    # 读取图片
    image_dir = 'D:/welink/STOP/before'
    # images = read_imgs(image_dir)
    image_files = os.listdir(image_dir)
    images = []
    for image_file in image_files:
        image_path = os.path.join(image_dir, image_file)
        image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)  # 以灰度模式读取图片
        image = cv2.resize(image, (30, 30)) 
        img = preprocessing(image)
        img = img.reshape(1, 30, 30, 1)
        images.append(img)
        # PREDICT IMAGE
        predictions = model.predict(img)
        predict_x=model.predict(img)
        classIndex=np.argmax(predict_x)
        probabilityValue =np.amax(predictions)
        print("img path:", image_file, " ==> ", str(classIndex)+" "+str(getCalssName(classIndex)))
        print(str(round(probabilityValue*100,2) )+"%")

def refact_model(model):
    base_model = model
    # 移除最后的分类层
    base_model = Model(inputs=base_model.input, outputs=base_model.layers[-2].output)
    # 添加一个新的分类层
    output = Dense(2, activation='softmax', name='new_dense')(base_model.output)
    model = Model(inputs=base_model.input, outputs=output)
    # 编译模型
    model.compile(optimizer=Adam(), loss='categorical_crossentropy', metrics=['accuracy'])
    return model

def retrain_with2label(model):
    image_dir1 = 'D:/welink/STOP/14'
    image_dir2 = 'D:/welink/STOP/17'
    images1, labels1 = read_imgs(image_dir1, 0)
    images2, labels2 = read_imgs(image_dir2, 1)

    # 合并图片和标签
    images = images1 + images2
    labels = labels1 + labels2

    images = np.array(images, dtype='float32')
    # 如果模型的输入形状是(30, 30, 1),那么我们需要增加一个维度
    if model.input_shape[-1] == 1:
        images = np.expand_dims(images, axis=-1)

    labels = np.array(labels)
    labels = to_categorical(labels, num_classes=2)

    # 划分训练集和测试集
    train_images, test_images, train_labels, test_labels = train_test_split(images, labels, test_size=0.2)
    # 训练模型
    model.fit(train_images, train_labels, validation_data=(test_images, test_labels), epochs=10)

def test_predict2(model):
    # 选择stop的图像,扰动前的
    images, _ = read_imgs('D:/welink/STOP/before')
    if model.input_shape[-1] == 1:
        images = np.expand_dims(images, axis=-1)
    preds = model.predict(images)
    print('Predicted before:', preds.argmax(axis=1))
    return images

def run_art(images):
    # 创建一个目标标签(我们希望模型将0 stop识别为1 no entry)
    target_label = to_categorical(1, num_classes=2)
    target_label = np.tile(target_label, (len(images), 1))

    # 创建ART分类器
    classifier = TensorFlowV2Classifier(
        model=model,
        nb_classes=2,
        input_shape=(30, 30, 1),
        loss_object=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
        clip_values=(0, 1)
    )
    # 创建FGSM实例
    attack = FastGradientMethod(estimator=classifier, targeted=True)

    # 初始化对抗样本为原始图像
    adv_images = np.copy(images)

    for i in range(100):  # 最多迭代100次
        # 生成对抗样本的扰动
        perturbations = attack.generate(x=adv_images, y=target_label) - adv_images

        # 计算所有样本的平均扰动
        avg_perturbation = np.mean(perturbations, axis=0)

        # 将平均扰动添加到所有对抗样本上
        adv_images += avg_perturbation

        # 使用模型对对抗样本进行预测
        preds = model.predict(adv_images)
        print('Iteration:', i, 'Predicted after:', preds.argmax(axis=1))

        # 如果所有的预测结果都为1,那么停止迭代
        if np.all(preds.argmax(axis=1) == 1):
            break

    # 保存对抗样本
    for i in range(len(adv_images)):
        # 将图像的数据类型转换为uint8,并且将图像的值范围调整到[0, 255]
        img = (adv_images[i] * 255).astype(np.uint8)
        # 保存图像
        cv2.imwrite(f'traffic_adv_image_{i}.png', img)

    # 归一化平均扰动并保存为图像
    avg_perturbation = (avg_perturbation - np.min(avg_perturbation)) / (np.max(avg_perturbation) - np.min(avg_perturbation))
    # 将平均扰动的值范围调整到[0, 255],并转换为uint8类型
    avg_perturbation = (avg_perturbation * 255).astype(np.uint8)
    # 将灰度图像转换为RGB图像
    avg_perturbation_rgb = cv2.cvtColor(avg_perturbation, cv2.COLOR_GRAY2RGB)
    # 保存图像
    cv2.imwrite('traffic_avg_perturbation.png', avg_perturbation_rgb)


if __name__ == "__main__":
    # 找到一个训练好的,识别交通信号牌的模型: https://github.com/Daulettulegenov/TSR_CNN
    model = load_model(r'D:\source\competition\TSR_CNN-main\CNN_model_3.h5')
    # 预测原始的输出类型,可以看到并不能正确的分类,因为是中文字幕 停!!!!而不是 STOP
    test_predict(model)
    # 因此,需要迁移训练,让其识别中文的“停”
    model = refact_model(model)
    # 测试是否可以识别中文的停
    retrain_with2label(model)
    # 预测新的输出类型,可以看到能正确的分类,即便是中文的停!!!
    images = test_predict2(model)
    # 生成扰动图像,让其扰动,识别为no entry,保存扰动图像
    run_art(images)

  

效果:

Predicted before: [0 0 0 0 0 0 0 0 0 0]
  output, from_logits = _get_logits(....
1/1 [==============================] - 0s 38ms/step
Iteration: 0 Predicted after: [1 1 0 1 0 0 1 0 1 0]
1/1 [==============================] - 0s 46ms/step
Iteration: 1 Predicted after: [1 1 1 1 1 1 1 1 1 1]
 
生成的对抗样本:

依稀可见“停”

扰动图像:

 

 
代码说明:这份代码的主要目的是使用深度学习模型来识别交通信号牌,并使用对抗性攻击来生成能够误导模型的对抗样本。
以下是代码的主要部分的解释:

1. getCalssName(classNo): 这个函数根据类别编号返回对应的交通信号牌的名称。

2. grayscale(img), equalize(img), preprocessing(img): 这些函数用于图像预处理,包括将图像转换为灰度图像,进行直方图均衡化,以及归一化。

3. read_imgs(image_dir, label=0): 这个函数读取指定目录下的所有图像,并返回图像数据和对应的标签。

4. test_predict(model): 这个函数读取指定目录下的所有图像,然后使用模型进行预测,并打印出预测结果。

5. refact_model(model): 这个函数修改模型的结构,移除最后的分类层,然后添加一个新的分类层。

6. retrain_with2label(model): 这个函数读取两个目录下的所有图像,然后使用这些图像和对应的标签来训练模型。

7. test_predict2(model): 这个函数读取指定目录下的所有图像,然后使用模型进行预测,并返回预测结果和图像数据。

8. run_art(images): 这个函数使用对抗性攻击来生成对抗样本,并保存对抗样本和平均扰动。

在if __name__ == "__main__"部分,代码首先加载一个预训练的模型,然后使用这个模型来预测原始的交通信号牌图像。然后,代码修改模型的结构,并使用新的数据来训练模型。最后,代码使用新的模型来预测原始的交通信号牌图像,并使用对抗性攻击来生成对抗样本。
 
样本下载:
交通信号灯 https://www.kaggle.com/datasets/meowmeowmeowmeowmeow/gtsrb-german-traffic-sign
 

 

posted @ 2023-10-30 11:55  bonelee  阅读(196)  评论(0编辑  收藏  举报