?文章來(lái)源地址http://www.zghlxwxcb.cn/news/detail-440283.html
(一)白盒攻擊

?
?
?
?
(二)黑盒攻擊
?https://zhuanlan.zhihu.com/p/493333024
源碼鏈接:https://pan.baidu.com/s/1RIduv6ngpCM3jx63D3PQGA? 提取碼:aid6
import os
import torch
import torch.nn as nn
from torchvision.models import mobilenet_v2
from advertorch.utils import predict_from_logits
from advertorch.utils import NormalizeByChannelMeanStd
from advertorch.attacks import LinfPGDAttack
from advertorch_examples.utils import ImageNetClassNameLookup
from advertorch_examples.utils import bhwc2bchw
from advertorch_examples.utils import bchw2bhwc
device = "cuda" if torch.cuda.is_available() else "cpu"
### 讀取圖片
def get_image():
img_path = os.path.join("./images", "school_bus.png")
def _load_image():
from skimage.io import imread
return imread(img_path) / 255.
if os.path.exists(img_path):
return _load_image()
def tensor2npimg(tensor):
return bchw2bhwc(tensor[0].cpu().numpy())
### 展示攻擊結(jié)果
def show_images(model, img, advimg, enhance=127):
np_advimg = tensor2npimg(advimg)
np_perturb = tensor2npimg(advimg - img)
pred = imagenet_label2classname(predict_from_logits(model(img)))
advpred = imagenet_label2classname(predict_from_logits(model(advimg)))
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 5))
plt.subplot(1, 3, 1)
plt.imshow(np_img)
plt.axis("off")
plt.title("original image\n prediction: {}".format(pred))
plt.subplot(1, 3, 2)
plt.imshow(np_perturb * enhance + 0.5)
plt.axis("off")
plt.title("the perturbation,\n enhanced {} times".format(enhance))
plt.subplot(1, 3, 3)
plt.imshow(np_advimg)
plt.axis("off")
plt.title("perturbed image\n prediction: {}".format(advpred))
plt.show()
normalize = NormalizeByChannelMeanStd(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
### 常規(guī)模型加載
model = mobilenet_v2(pretrained=True)
model.eval()
model = nn.Sequential(normalize, model)
model = model.to(device)
### 數(shù)據(jù)預(yù)處理
np_img = get_image()
img = torch.tensor(bhwc2bchw(np_img))[None, :, :, :].float().to(device)
imagenet_label2classname = ImageNetClassNameLookup()
### 測(cè)試模型輸出結(jié)果
pred = imagenet_label2classname(predict_from_logits(model(img)))
print("test output:", pred)
### 輸出原label
pred_label = predict_from_logits(model(img))
### 對(duì)抗攻擊:PGD攻擊算法
adversary = LinfPGDAttack(
model, eps=8 / 255, eps_iter=2 / 255, nb_iter=80,
rand_init=True)
### 完成攻擊,輸出對(duì)抗樣本
advimg = adversary.perturb(img, pred_label)
### 展示源圖片,對(duì)抗擾動(dòng),對(duì)抗樣本以及模型的輸出結(jié)果
show_images(model, img, advimg)
輸出結(jié)果:校車誤判為縫紉機(jī)
文章來(lái)源:http://www.zghlxwxcb.cn/news/detail-440283.html
?
到了這里,關(guān)于AidLux智慧交通AI安全之對(duì)抗攻擊算法的文章就介紹完了。如果您還想了解更多內(nèi)容,請(qǐng)?jiān)谟疑辖撬阉鱐OY模板網(wǎng)以前的文章或繼續(xù)瀏覽下面的相關(guān)文章,希望大家以后多多支持TOY模板網(wǎng)!