update 116

This commit is contained in:
freeneuro 2021-09-12 22:34:04 +08:00
parent ad700c92e8
commit 7cf9b058f3
28 changed files with 67860 additions and 0 deletions

View File

@ -0,0 +1,74 @@
# 3ds Max Wavefront OBJ Exporter v0.97b - (c)2007 guruware
# File Created: 27.06.2020 15:26:49
newmtl MI_CarLightGlass_Etron
Ns 43.2193
Ni 1.5000
d 0.0000
Tr 1.0000
Tf 0.0000 0.0000 0.0000
illum 2
Ka 0.0000 0.0000 0.0000
Kd 0.7159 0.7159 0.7159
Ks 0.2000 0.2000 0.2000
Ke 0.0000 0.0000 0.0000
newmtl MI_Interior_Etron
Ns 43.2193
Ni 1.5000
d 1.0000
Tr 0.0000
Tf 1.0000 1.0000 1.0000
illum 2
Ka 0.0000 0.0000 0.0000
Kd 0.8000 0.8000 0.8000
Ks 0.2000 0.2000 0.2000
Ke 0.0000 0.0000 0.0000
newmtl MI_CarExterior_Etron
Ns 0.0000
Ni 1.5000
d 1.0000
Tr 0.0000
Tf 1.0000 1.0000 1.0000
illum 2
Ka 0.0000 0.0000 0.0000
Kd 1.0000 1.0000 1.0000
Ks 0.0000 0.0000 0.0000
Ke 0.0000 0.0000 0.0000
newmtl M_LicensePlate
Ns 43.2193
Ni 1.5000
d 1.0000
Tr 0.0000
Tf 1.0000 1.0000 1.0000
illum 2
Ka 0.0000 0.0000 0.0000
Kd 0.8000 0.8000 0.8000
Ks 0.2000 0.2000 0.2000
Ke 0.0000 0.0000 0.0000
newmtl MI_Wheels_Etron
Ns 43.2193
Ni 1.5000
d 1.0000
Tr 0.0000
Tf 1.0000 1.0000 1.0000
illum 2
Ka 0.0000 0.0000 0.0000
Kd 0.2500 0.2500 0.2500
Ks 0.2000 0.2000 0.2000
Ke 0.0000 0.0000 0.0000
newmtl MI_CarGlass_Etron
Ns 43.2193
Ni 1.5000
d 0.0000
Tr 1.0000
Tf 0.0000 0.0000 0.0000
illum 2
Ka 0.0000 0.0000 0.0000
Kd 0.2159 0.2159 0.2159
Ks 0.2000 0.2000 0.2000
Ke 0.0000 0.0000 0.0000

60942
src/car_assets/audi_et_te.obj Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

BIN
src/data/mask/data12501.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.9 KiB

BIN
src/data/mask/data12502.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.1 KiB

BIN
src/data/mask/data12503.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.9 KiB

BIN
src/data/mask/data12504.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.9 KiB

BIN
src/data/mask/data12505.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.1 KiB

BIN
src/data/mask/data12506.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.1 KiB

BIN
src/data/mask/data12507.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.0 KiB

BIN
src/data/mask/data12508.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.0 KiB

BIN
src/data/mask/data12509.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.0 KiB

BIN
src/data/mask/data12510.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.3 KiB

BIN
src/data/test/data12501.npz Normal file

Binary file not shown.

BIN
src/data/test/data12502.npz Normal file

Binary file not shown.

BIN
src/data/test/data12503.npz Normal file

Binary file not shown.

BIN
src/data/test/data12504.npz Normal file

Binary file not shown.

BIN
src/data/test/data12505.npz Normal file

Binary file not shown.

BIN
src/data/test/data12506.npz Normal file

Binary file not shown.

BIN
src/data/test/data12507.npz Normal file

Binary file not shown.

BIN
src/data/test/data12508.npz Normal file

Binary file not shown.

BIN
src/data/test/data12509.npz Normal file

Binary file not shown.

BIN
src/data/test/data12510.npz Normal file

Binary file not shown.

109
src/data_loader.py Normal file
View File

@ -0,0 +1,109 @@
import torch
from torch.utils.data import Dataset, DataLoader
import os
import sys
import cv2
import numpy as np
import cupy as cp
import warnings
import chainer
from PIL import Image
import math
warnings.filterwarnings("ignore")
sys.path.append("./neural_renderer/")
import matplotlib.pyplot as plt
import nmr_test as nmr
import neural_renderer
from torchvision import transforms
from torchvision.transforms import functional as F
class MyDatasetTestAdv(Dataset):
def __init__(self, data_dir, img_size, texture_size, faces, vertices, distence=None, mask_dir='', ret_mask=False):
self.data_dir = data_dir
self.files = []
files = os.listdir(data_dir)
for file in files:
if distence is None:
self.files.append(file)
else:
data = np.load(os.path.join(self.data_dir, file))
veh_trans = data['veh_trans']
cam_trans = data['cam_trans']
dis = (cam_trans - veh_trans)[0, :]
dis = np.sum(dis ** 2)
# print(dis)
if dis <= distence:
self.files.append(file)
print(len(self.files))
self.img_size = img_size
textures = np.ones((1, faces.shape[0], texture_size, texture_size, texture_size, 3), 'float32')
self.textures_adv = torch.from_numpy(textures).cuda(device=0)
self.faces_var = faces[None, :, :]
self.vertices_var = vertices[None, :, :]
self.mask_renderer = nmr.NeuralRenderer(img_size=img_size).cuda()
self.mask_dir = mask_dir
self.ret_mask = ret_mask
def set_textures(self, textures_adv):
self.textures_adv = textures_adv
def __getitem__(self, index):
file = os.path.join(self.data_dir, self.files[index])
data = np.load(file, allow_pickle=True) #.item()
img = data['img']
veh_trans, cam_trans = data['veh_trans'], data['cam_trans']
eye, camera_direction, camera_up = nmr.get_params(cam_trans, veh_trans)
self.mask_renderer.renderer.renderer.eye = eye
self.mask_renderer.renderer.renderer.camera_direction = camera_direction
self.mask_renderer.renderer.renderer.camera_up = camera_up
imgs_pred = self.mask_renderer.forward(self.vertices_var, self.faces_var, self.textures_adv)
img = img[:, :, ::-1]
img = cv2.resize(img, (self.img_size, self.img_size))
img = np.transpose(img, (2, 0, 1))
img = np.resize(img, (1, img.shape[0], img.shape[1], img.shape[2]))
img = torch.from_numpy(img).cuda(device=0)
imgs_pred = imgs_pred / torch.max(imgs_pred)
if self.ret_mask:
mask_file = os.path.join(self.mask_dir, "%s.png" % self.files[index][:-4])
mask = cv2.imread(mask_file)
mask = cv2.resize(mask, (self.img_size, self.img_size))
mask = np.logical_or(mask[:, :, 0], mask[:, :, 1], mask[:, :, 2])
mask = torch.from_numpy(mask.astype('float32')).cuda()
total_img = (1 - mask) * img + (255 * imgs_pred) * mask
return index, total_img.squeeze(0), imgs_pred.squeeze(0), mask, self.files[index]
total_img = img + 255 * imgs_pred
return index, total_img.squeeze(0), imgs_pred.squeeze(0), self.files[index]
def __len__(self):
return len(self.files)
if __name__ == '__main__':
obj_file = 'audi_et_te.obj'
vertices, faces, textures = neural_renderer.load_obj(filename_obj=obj_file, load_texture=True)
rnder = neural_renderer.Renderer()
vertices = np.expand_dims(vertices, axis=0)
faces = np.expand_dims(faces, axis=0)
textures = np.expand_dims(textures, axis=0)
faces = chainer.Variable(chainer.cuda.to_gpu(faces, 0))
vertices = chainer.Variable(chainer.cuda.to_gpu(vertices, 0))
textures = chainer.Variable(chainer.cuda.to_gpu(textures, 0))
image = rnder.render(vertices, faces, textures)
image = image.data[0]
image = (np.clip(cp.asnumpy(image),0,1) * 255).astype(np.uint8)
image = Image.fromarray(np.transpose(image, (1,2,0)))
image.show()
dataset = MyDataset('../data/phy_attack/train/', 608, 4, faces, vertices)
loader = DataLoader(
dataset=dataset,
batch_size=3,
shuffle=True,
#num_workers=2,
)
for img, car_box in loader:
print(img.size(), car_box.size())

105
src/generated_and_test.py Normal file
View File

@ -0,0 +1,105 @@
import shutil
import torch
from torch.utils.data import DataLoader
from torchvision import models
from data_loader import MyDatasetTestAdv
from tqdm import tqdm
import numpy as np
import sys
import argparse
from PIL import Image
import os
from grad_cam import CAM
from torchvision import transforms
import torchvision
from fasterrcnn.plot_image import plot_image
from fasterrcnn.plot_image import *
from fasterrcnn.golden import golden
sys.path.append("./neural_renderer/")
import neural_renderer
parser = argparse.ArgumentParser()
parser.add_argument("--batchsize", type=int, default=1)
parser.add_argument("--obj", type=str, default='car_assets/audi_et_te.obj')
parser.add_argument("--faces", type=str, default='car_assets/exterior_face.txt') # exterior_face all_faces
parser.add_argument("--textures", type=str, default='textures/texture_camouflage.npy')
parser.add_argument("--datapath", type=str, default="../data/")
args = parser.parse_args()
BATCH_SIZE = args.batchsize
mask_dir = os.path.join(args.datapath, 'masks/')
obj_file =args.obj
texture_size = 6
vertices, faces, textures = neural_renderer.load_obj(filename_obj=obj_file, texture_size=texture_size, load_texture=True)
# Camouflage Textures
texture_content_adv = torch.from_numpy(np.load(args.textures)).cuda(device=0)
texture_origin =textures[None, :, :, :, :, :].cuda(device=0)
texture_mask = np.zeros((faces.shape[0], texture_size, texture_size, texture_size, 3), 'int8')
with open(args.faces, 'r') as f:
face_ids = f.readlines()
for face_id in face_ids:
if face_id != '\n':
texture_mask[int(face_id) - 1, :, :, :, :] = 1
texture_mask = torch.from_numpy(texture_mask).cuda(device=0).unsqueeze(0)
def cal_texture(texture_content, CONTENT=False):
if CONTENT:
textures = 0.5 * (torch.nn.Tanh()(texture_content) + 1)
else:
textures = 0.5 * (torch.nn.Tanh()(texture_param) + 1)
return texture_origin * (1 - texture_mask) + texture_mask * texture
@torch.no_grad()
def run_cam(data_dir, batch_size=BATCH_SIZE):
print(data_dir)
dataset = MyDatasetTestAdv(data_dir, input_size, texture_size, faces, vertices, distence=None, mask_dir=mask_dir, ret_mask=True)
loader = DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=False,
# num_workers=2,
)
print(len(dataset))
tqdm_loader = tqdm(loader)
textures_adv = cal_texture(texture_content_adv, CONTENT=True)
dataset.set_textures(textures_adv)
for i, (index, total_img, texture_img, _, filename) in enumerate(tqdm_loader):
index = int(index[0])
texture_img_np = total_img.data.cpu().numpy()[0]
texture_img_np = Image.fromarray(np.transpose(texture_img_np, (1, 2, 0)).astype('uint8'))
filename = filename[0].split('.')[0]
texture_img_np.save(fr'savedImage/{filename}.png')
# Yolo-v5 detection
results = net(texture_img_np)
results.show()
results.save(fr'savedImage/{filename}_pred.png')
if __name__ == "__main__":
data_dir = "../data/test/"
batch_size = 1
input_size = 800
net = torch.hub.load('ultralytics/yolov5', 'yolov5x') # or yolov5m, yolov5x, custom
net.eval()
if torch.cuda.is_available():
net = net.cuda()
run_cam(data_dir)

244
src/nmr_test.py Normal file
View File

@ -0,0 +1,244 @@
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ntpath
import numpy as np
import scipy.misc
import math
# import chainer
import torch
import neural_renderer
from glob import glob
import os
#############
### Utils ###
#############
def convert_as(src, trg):
src = src.type_as(trg)
if src.is_cuda:
src = src.cuda(device=trg.get_device())
return src
def get_params(carlaTcam, carlaTveh):
scale = 0.40
eye = [0, 0, 0]
for i in range(0, 3):
eye[i] = carlaTcam[0][i] * scale
# calc camera_direction and camera_up
pitch = math.radians(carlaTcam[1][0])
yaw = math.radians(carlaTcam[1][1])
roll = math.radians(carlaTcam[1][2])
cam_direct = [math.cos(pitch) * math.cos(yaw), math.cos(pitch) * math.sin(yaw), math.sin(pitch)]
cam_up = [math.cos(math.pi / 2 + pitch) * math.cos(yaw), math.cos(math.pi / 2 + pitch) * math.sin(yaw),
math.sin(math.pi / 2 + pitch)]
p_cam = eye
p_dir = [eye[0] + cam_direct[0], eye[1] + cam_direct[1], eye[2] + cam_direct[2]]
p_up = [eye[0] + cam_up[0], eye[1] + cam_up[1], eye[2] + cam_up[2]]
p_l = [p_cam, p_dir, p_up]
trans_p = []
for p in p_l:
if math.sqrt(p[0] ** 2 + p[1] ** 2) == 0:
cosfi = 0
sinfi = 0
else:
cosfi = p[0] / math.sqrt(p[0] ** 2 + p[1] ** 2)
sinfi = p[1] / math.sqrt(p[0] ** 2 + p[1] ** 2)
cossum = cosfi * math.cos(math.radians(carlaTveh[1][1])) + sinfi * math.sin(math.radians(carlaTveh[1][1]))
sinsum = math.cos(math.radians(carlaTveh[1][1])) * sinfi - math.sin(math.radians(carlaTveh[1][1])) * cosfi
trans_p.append([math.sqrt(p[0] ** 2 + p[1] ** 2) * cossum, math.sqrt(p[0] ** 2 + p[1] ** 2) * sinsum, p[2]])
return trans_p[0], \
[trans_p[1][0] - trans_p[0][0], trans_p[1][1] - trans_p[0][1], trans_p[1][2] - trans_p[0][2]], \
[trans_p[2][0] - trans_p[0][0], trans_p[2][1] - trans_p[0][1], trans_p[2][2] - trans_p[0][2]]
########################################################################
############ Wrapper class for the chainer Neural Renderer #############
##### All functions must only use numpy arrays as inputs/outputs #######
########################################################################
class NMR(object):
def __init__(self):
# setup renderer
renderer = neural_renderer.Renderer(camera_mode='look')
self.renderer = renderer
def to_gpu(self, device=0):
self.cuda_device = device
def forward_mask(self, vertices, faces):
''' Renders masks.
Args:
vertices: B X N X 3 numpy array
faces: B X F X 3 numpy array
Returns:
masks: B X 256 X 256 numpy array
'''
self.faces = torch.autograd.Variable(faces.cuda())
self.vertices = torch.autograd.Variable(vertices.cuda())
self.masks = self.renderer.render_silhouettes(self.vertices, self.faces)
masks = self.masks.data.get()
return masks
def forward_img(self, vertices, faces, textures):
''' Renders masks.
Args:
vertices: B X N X 3 numpy array
faces: B X F X 3 numpy array
textures: B X F X T X T X T X 3 numpy array
Returns:
images: B X 3 x 256 X 256 numpy array
'''
self.faces = faces
self.vertices = vertices
self.textures = textures
self.images,_,_ = self.renderer.render(self.vertices, self.faces, self.textures)
return self.images
########################################################################
################# Wrapper class a rendering PythonOp ###################
##### All functions must only use torch Tensors as inputs/outputs ######
########################################################################
class Render(torch.autograd.Function):
# TODO(Shubham): Make sure the outputs/gradients are on the GPU
def __init__(self, renderer):
super(Render, self).__init__()
self.renderer = renderer
def forward(self, vertices, faces, textures=None):
# B x N x 3
# Flipping the y-axis here to make it align with the image coordinate system!
vs = vertices
vs[:, :, 1] *= -1
fs = faces
if textures is None:
self.mask_only = True
masks = self.renderer.forward_mask(vs, fs)
return masks # , convert_as(torch.Tensor(masks), vertices)
else:
self.mask_only = False
ts = textures
imgs = self.renderer.forward_img(vs, fs, ts)
return imgs
########################################################################
############## Wrapper torch module for Neural Renderer ################
########################################################################
class NeuralRenderer(torch.nn.Module):
"""
This is the core pytorch function to call.
Every torch NMR has a chainer NMR.
Only fwd/bwd once per iteration.
"""
def __init__(self, img_size=720):
super(NeuralRenderer, self).__init__()
self.renderer = NMR()
# rendering
self.renderer.renderer.image_size = img_size
# camera
self.renderer.renderer.camera_mode = 'look'
self.renderer.renderer.viewing_angle = 45
# test example
eye, camera_direction, camera_up = get_params(((-25, 16, 20), (-45, 180, 0)), ((-45, 3, 0.8), (0, 0, 0)))
self.renderer.renderer.eye = eye
self.renderer.renderer.camera_direction = camera_direction
self.renderer.renderer.camera_up = camera_up
# light
self.renderer.renderer.light_intensity_ambient = 0.5
self.renderer.renderer.light_intensity_directional = 0.5
self.renderer.renderer.light_color_ambient = [1, 1, 1] # white
self.renderer.renderer.light_color_directional = [1, 1, 1] # white
self.renderer.renderer.light_direction = [0, 0, 1] # up-to-down
self.renderer.to_gpu()
self.proj_fn = None
self.offset_z = 5.
self.RenderFunc = Render(self.renderer)
def ambient_light_only(self):
# Make light only ambient.
self.renderer.renderer.light_intensity_ambient = 1
self.renderer.renderer.light_intensity_directional = 0
def set_bgcolor(self, color):
self.renderer.renderer.background_color = color
def project_points(self, verts, cams):
proj = self.proj_fn(verts, cams)
return proj[:, :, :2]
def forward(self, vertices, faces, textures=None):
if textures is not None:
return self.RenderFunc.forward(vertices, faces, textures)
else:
return self.RenderFunc.forward(vertices, faces)
def example():
obj_file = 'audi_et_te.obj'
data_path = '../data/phy_attack/train/'
img_save_dir = '../data/phy_attack/render_test_res'
vertices, faces = neural_renderer.load_obj(obj_file)
texture_mask = np.zeros((faces.shape[0], 2, 2, 2, 3), 'int8')
with open('./all_faces.txt', 'r') as f:
face_ids = f.readlines()
for face_id in face_ids:
texture_mask[int(face_id) - 1, :, :, :, :] = 1
texture_mask = torch.from_numpy(texture_mask).cuda(device=0).unsqueeze(0)
print(texture_mask.size())
mask_renderer = NeuralRenderer()
faces_var = torch.autograd.Variable(torch.from_numpy(faces[None, :, :]).cuda(device=0))
vertices_var = torch.from_numpy(vertices[None, :, :]).cuda(device=0)
# Textures
texture_size = 2
textures = np.ones((1, faces.shape[0], texture_size, texture_size, texture_size, 3), 'float32')
textures = torch.from_numpy(textures).cuda(device=0)
print(textures.size())
textures = textures * texture_mask
# data = np.load(data_path)
data_lsit = glob(os.path.join(data_path, "*.npy"))
for data_path in data_lsit:
data = np.load(data_path)
img = data['img']
veh_trans = data['veh_trans']
cam_trans = data['cam_trans']
eye, camera_direction, camera_up = get_params(cam_trans, veh_trans)
mask_renderer.renderer.renderer.eye = eye
mask_renderer.renderer.renderer.camera_direction = camera_direction
mask_renderer.renderer.renderer.camera_up = camera_up
imgs_pred = mask_renderer.forward(vertices_var, faces_var, textures)
im_rendered = imgs_pred.data.cpu().numpy()[0]
im_rendered = np.transpose(im_rendered, (1,2,0))
print(im_rendered.shape)
print(np.max(im_rendered), np.max(img))
scipy.misc.imsave(img_save_dir + 'test_render.png', im_rendered)
scipy.misc.imsave(img_save_dir + 'test_origin.png', img)
scipy.misc.imsave(img_save_dir + 'test_total.png', np.add(img, 255 * im_rendered))

11
src/readme.md Normal file
View File

@ -0,0 +1,11 @@
## The code for FCA for multi-view physical adversarial attack
### requirements
> neural_renderer
>
> [neural renderer]: https://github.com/daniilidis-group/neural_renderer.git "neural renderer"
>
> before you running the code, you must install the `neural renderer` python package.
If there is any question, please contact us without hesitate.

Binary file not shown.