ADD file via upload

This commit is contained in:
p74035216 2023-11-25 16:16:53 +08:00
parent 4201f20d35
commit 7c54729011
1 changed files with 257 additions and 0 deletions

257
DevNet_v1_0.py Normal file
View File

@ -0,0 +1,257 @@
import torch
import torchvision
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms,datasets
from torchvision.transforms import ToTensor
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
from PIL import Image
device=torch.device("cpu")
cropbox=(0,0,316,316)
learning_rate = 1e-3
batch_size = 50
epochs = 12
def readimg(path):
txt=open(path+"\\data.txt",'r')
lines=txt.readlines()
txt.close()
imgs=[]
labels=[]
for line in lines:
line = line.rstrip()
word = line.split(' ')
img = Image.open(path+'\\'+word[0])
img_2=img.crop(cropbox)
img_ten=transforms.ToTensor()(np.array(img_2))
imgs.append(img_ten)
if word[1]=='0':
labels.append(torch.zeros(1))
else:
labels.append(torch.ones(1))
return imgs,labels
class CustomedDataSet(Dataset):
def __init__(self, train=True, train_x = None, train_y = None, test_x = None, test_y = None, val = False, transform = None):
self.train = train
self.val = val
self.transform = transform
if self.train:
self.dataset=train_x
self.labels=train_y
elif val:
self.dataset=test_x
self.labels=test_y
else:
self.dataset= test_x
def __getitem__(self, index):
if self.train:
return torch.Tensor(self.dataset[index]).to(device), torch.Tensor(self.labels[index]).to(device)
elif self.val:
return torch.Tensor(self.dataset[index]).to(device), torch.Tensor(self.labels[index]).to(device)
else:
return torch.Tensor(self.dataset[index]).to(device)
def __len__(self):
return len(self.dataset)
class CNN(nn.Module):
def __init__(self):
super().__init__()
self.features=nn.Sequential(
nn.Conv2d(3,6, kernel_size=5),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2,stride=2),
nn.Conv2d(6,12, kernel_size=5),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2,stride=2),
nn.Conv2d(12,24,kernel_size=5,stride=1),
nn.ReLU(inplace=True),
nn.Conv2d(24,24,kernel_size=5,stride=1),
nn.ReLU(inplace=True),
nn.Conv2d(24,48,kernel_size=5,stride=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2,stride=2),
)
# 请把之前 Network1 的 self.fc1 复制到这里:
# self.fc1 = nn.Linear(in_features=XXX, out_features=120)
self.classifier=nn.Sequential(
nn.Linear(32*32*48,2048),
nn.ReLU(inplace=True),
#nn.Dropout(0.5),
nn.Linear(2048,1024),
nn.ReLU(inplace=True),
#nn.Dropout(0.5),
nn.Linear(1024,1),
)
def forward(self, t):
t=self.features(t)
# 特别注意:最后一个卷积层的输出在流入全连接层之前,要对激活值的维度进行变换,变换后的张量
# 是二维张量,其中 dim0 对应该批次中的不同样本dim1 对应每个样本流向全连接层的输入激活向量。
# 请完成如下代码。
# dim0一个批次的样本数
# dim1每一个样本的输入激活向量长度
# t = t.reshape(XXX,XXX)
t = t.reshape(batch_size,32*32*48)
t = self.classifier(t)
return t
class DeviationLoss(nn.Module):
def __init__(self):
super().__init__()
def forward(self, y_pred, y_true):
confidence_margin = 5.
#ref = torch.normal(mean=0., std=torch.full([5000], 1.)).cuda()
#dev = (y_pred - torch.mean(ref)) / torch.std(ref)
dev = y_pred
inlier_loss = torch.abs(dev)
#outlier_loss = torch.abs((confidence_margin - dev).clamp_(min=0.))
outlier_loss = torch.abs((confidence_margin - torch.abs(dev)))
dev_loss = (1 - y_true) * inlier_loss + y_true * outlier_loss
#print("y_pred:",y_pred.shape,'y_true:',y_true.shape,'mean_loss:',torch.mean(dev_loss).item())
return torch.mean(dev_loss*(1+10*y_true))
def pred_to_label(preds):
a = torch.abs(preds)
# 保证和a相同的维度大小
zero = torch.zeros_like(a)
one = torch.ones_like(a)
# a中大于5的用one(1)替换,否则a替换,即不变
a = torch.where(a >= 5.0, one, zero)
# a中小于5的用zero(0)替换,否则a替换,即不变
return a
def get_tp(preds,labels):
a=pred_to_label(preds)
two=2.0*torch.ones_like(labels)
b=torch.where(labels==0.0,two,labels)
return a.eq(b).sum().item()
def get_tn(preds,labels):
a=pred_to_label(preds)
two=2.0*torch.ones_like(labels)
b=torch.where(labels==1.0,two,labels)
return a.eq(b).sum().item()
def get_fp(preds,labels):
a=pred_to_label(preds)
two=2.0*torch.ones_like(labels)
one=torch.ones_like(labels)
b=torch.where(labels==0.0,one,two)
return a.eq(b).sum().item()
def get_fn(preds,labels):
a=pred_to_label(preds)
two=2.0*torch.ones_like(labels)
zero=torch.zeros_like(labels)
b=torch.where(labels==1.0,zero,two)
return a.eq(b).sum().item()
if __name__ == '__main__':
network=CNN()
#network = torchvision.models.alexnet(pretrained=False)
new_train_x,new_train_y=readimg("train")
train_set = CustomedDataSet(train_x = new_train_x, train_y = new_train_y)
new_val_x,new_val_y=readimg('val')
val_set = CustomedDataSet(test_x = new_val_x, test_y = new_val_y, train=False, val = True)
train_loader = DataLoader(dataset=train_set,batch_size=batch_size,shuffle=True,drop_last=True)
val_loader = DataLoader(dataset=val_set,batch_size=batch_size,shuffle=False,drop_last=True)
optimizer = torch.optim.SGD(network.parameters(), lr=learning_rate)
loss_func=DeviationLoss()
#loss_func=nn.BCEWithLogitsLoss()
for epoch in range(epochs): # 训练周期
total_loss = 0
total_tp = 0
total_tn = 0
total_fp = 0
total_fn = 0
count = 0
for batch in train_loader: # get a batch from the dataloader
# 读取样本数据,完成正向传播,计算损失
##################### please finish the code ########################
# images, labels = XXX
# preds = XXX
# loss = XXX
images, labels = batch
preds = network(images)
loss = loss_func(preds, labels)
################################ end ################################
# 下面这行非常重要,它使得优化器 (optimizer) 将权重的偏导重新归零;
# 如果不归零,那么在反向传播时,计算出来的偏导会累加在原先的偏导上,
# 造成错误。
optimizer.zero_grad()
# 完成反向传播,更新参数
##################### please finish the code ########################
# 反向传播 (一行代码)
loss.backward()
# 更新参数 (一行代码)
optimizer.step()
################################ end ################################
total_loss += loss.item()
count += 1
total_tp += get_tp(preds,labels)
total_tn += get_tn(preds,labels)
total_fp += get_fp(preds,labels)
total_fn += get_fn(preds,labels)
Precision_ratio=total_tp/(total_tp+total_fp+1e-5)
Recall_ratio=total_tp/(total_tp+total_fn+1e-5)
F_Measure=2*(Precision_ratio*Recall_ratio)/(Precision_ratio+Recall_ratio+1e-5)
print("epoch:", epoch,
"\ncorrect times:", (total_tp+total_tn), f"training accuracy:", "%.3f" %((total_tp+total_tn)/(total_tp+total_tn+total_fp+total_fn+1e-5)*100), "%",
"\ntrue positive times:", (total_tp), f"Precision ratio:", "%.3f" %(Precision_ratio*100), "%",
f"Recall ratio:", "%.3f" %(Recall_ratio*100), "%",
f"F-Measure:", "%.3f" %(F_Measure*100), "%",
"\naverage_loss:", "%.3f" %(total_loss/count))
preds = network(images)
pred_list=(pred_to_label(preds)-0.5*labels).tolist()
Preds=preds.tolist()
for i in range(len(Preds)):
print("({:.5f},{})".format(Preds[i][0],pred_list[i][0]),end='')
print('\n')
total_loss = 0
total_tp = 0
total_tn = 0
total_fp = 0
total_fn = 0
count = 0
for batch in val_loader:
images, labels = batch
preds = network(images)
loss = loss_func(preds, labels)
total_loss += loss.item()
count += 1
total_tp += get_tp(preds,labels)
total_tn += get_tn(preds,labels)
total_fp += get_fp(preds,labels)
total_fn += get_fn(preds,labels)
print("\nOn val:",
"\ncorrect times:", (total_tp+total_tn), f"training accuracy:", "%.3f" %((total_tp+total_tn)/(total_tp+total_tn+total_fp+total_fn+1e-5)*100), "%",
"\ntrue positive times:", (total_tp), f"Precision ratio:", "%.3f" %(total_tp/(total_tp+total_fp+1e-5)*100), "%",
f"Recall ratio:", "%.3f" %(total_tp/(total_tp+total_fn+1e-5)*100), "%",
f"F-Measure:", "%.3f" %(F_Measure*100), "%",
"\naverage_loss:", "%.3f" %(total_loss/count))
preds=network(images)
print("output avg:",torch.mean(preds).item(),"output std:",torch.std(preds).item())
torch.save(network, "Dev_netv1.0_CNN.pth") # PATH variable should be stated