update
This commit is contained in:
parent
352fbc18b8
commit
93619ce4a7
|
@ -25,7 +25,6 @@
|
|||
|
||||
storage.googleapis.com
|
||||
runs/*
|
||||
data/*
|
||||
!data/images/zidane.jpg
|
||||
!data/images/bus.jpg
|
||||
!data/coco.names
|
||||
|
|
|
@ -0,0 +1,55 @@
|
|||
# Global Wheat 2020 dataset http://www.global-wheat.com/
|
||||
# Train command: python train.py --data GlobalWheat2020.yaml
|
||||
# Default dataset location is next to YOLOv3:
|
||||
# /parent_folder
|
||||
# /datasets/GlobalWheat2020
|
||||
# /yolov3
|
||||
|
||||
|
||||
# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
|
||||
train: # 3422 images
|
||||
- ../datasets/GlobalWheat2020/images/arvalis_1
|
||||
- ../datasets/GlobalWheat2020/images/arvalis_2
|
||||
- ../datasets/GlobalWheat2020/images/arvalis_3
|
||||
- ../datasets/GlobalWheat2020/images/ethz_1
|
||||
- ../datasets/GlobalWheat2020/images/rres_1
|
||||
- ../datasets/GlobalWheat2020/images/inrae_1
|
||||
- ../datasets/GlobalWheat2020/images/usask_1
|
||||
|
||||
val: # 748 images (WARNING: train set contains ethz_1)
|
||||
- ../datasets/GlobalWheat2020/images/ethz_1
|
||||
|
||||
test: # 1276 images
|
||||
- ../datasets/GlobalWheat2020/images/utokyo_1
|
||||
- ../datasets/GlobalWheat2020/images/utokyo_2
|
||||
- ../datasets/GlobalWheat2020/images/nau_1
|
||||
- ../datasets/GlobalWheat2020/images/uq_1
|
||||
|
||||
# number of classes
|
||||
nc: 1
|
||||
|
||||
# class names
|
||||
names: [ 'wheat_head' ]
|
||||
|
||||
|
||||
# download command/URL (optional) --------------------------------------------------------------------------------------
|
||||
download: |
|
||||
from utils.general import download, Path
|
||||
|
||||
# Download
|
||||
dir = Path('../datasets/GlobalWheat2020') # dataset directory
|
||||
urls = ['https://zenodo.org/record/4298502/files/global-wheat-codalab-official.zip',
|
||||
'https://github.com/ultralytics/yolov5/releases/download/v1.0/GlobalWheat2020_labels.zip']
|
||||
download(urls, dir=dir)
|
||||
|
||||
# Make Directories
|
||||
for p in 'annotations', 'images', 'labels':
|
||||
(dir / p).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Move
|
||||
for p in 'arvalis_1', 'arvalis_2', 'arvalis_3', 'ethz_1', 'rres_1', 'inrae_1', 'usask_1', \
|
||||
'utokyo_1', 'utokyo_2', 'nau_1', 'uq_1':
|
||||
(dir / p).rename(dir / 'images' / p) # move to /images
|
||||
f = (dir / p).with_suffix('.json') # json file
|
||||
if f.exists():
|
||||
f.rename((dir / 'annotations' / p).with_suffix('.json')) # move to /annotations
|
|
@ -0,0 +1,52 @@
|
|||
# SKU-110K retail items dataset https://github.com/eg4000/SKU110K_CVPR19
|
||||
# Train command: python train.py --data SKU-110K.yaml
|
||||
# Default dataset location is next to YOLOv3:
|
||||
# /parent_folder
|
||||
# /datasets/SKU-110K
|
||||
# /yolov3
|
||||
|
||||
|
||||
# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
|
||||
train: ../datasets/SKU-110K/train.txt # 8219 images
|
||||
val: ../datasets/SKU-110K/val.txt # 588 images
|
||||
test: ../datasets/SKU-110K/test.txt # 2936 images
|
||||
|
||||
# number of classes
|
||||
nc: 1
|
||||
|
||||
# class names
|
||||
names: [ 'object' ]
|
||||
|
||||
|
||||
# download command/URL (optional) --------------------------------------------------------------------------------------
|
||||
download: |
|
||||
import shutil
|
||||
from tqdm import tqdm
|
||||
from utils.general import np, pd, Path, download, xyxy2xywh
|
||||
|
||||
# Download
|
||||
datasets = Path('../datasets') # download directory
|
||||
urls = ['http://trax-geometry.s3.amazonaws.com/cvpr_challenge/SKU110K_fixed.tar.gz']
|
||||
download(urls, dir=datasets, delete=False)
|
||||
|
||||
# Rename directories
|
||||
dir = (datasets / 'SKU-110K')
|
||||
if dir.exists():
|
||||
shutil.rmtree(dir)
|
||||
(datasets / 'SKU110K_fixed').rename(dir) # rename dir
|
||||
(dir / 'labels').mkdir(parents=True, exist_ok=True) # create labels dir
|
||||
|
||||
# Convert labels
|
||||
names = 'image', 'x1', 'y1', 'x2', 'y2', 'class', 'image_width', 'image_height' # column names
|
||||
for d in 'annotations_train.csv', 'annotations_val.csv', 'annotations_test.csv':
|
||||
x = pd.read_csv(dir / 'annotations' / d, names=names).values # annotations
|
||||
images, unique_images = x[:, 0], np.unique(x[:, 0])
|
||||
with open((dir / d).with_suffix('.txt').__str__().replace('annotations_', ''), 'w') as f:
|
||||
f.writelines(f'./images/{s}\n' for s in unique_images)
|
||||
for im in tqdm(unique_images, desc=f'Converting {dir / d}'):
|
||||
cls = 0 # single-class dataset
|
||||
with open((dir / 'labels' / im).with_suffix('.txt'), 'a') as f:
|
||||
for r in x[images == im]:
|
||||
w, h = r[6], r[7] # image width, height
|
||||
xywh = xyxy2xywh(np.array([[r[1] / w, r[2] / h, r[3] / w, r[4] / h]]))[0] # instance
|
||||
f.write(f"{cls} {xywh[0]:.5f} {xywh[1]:.5f} {xywh[2]:.5f} {xywh[3]:.5f}\n") # write label
|
|
@ -0,0 +1,61 @@
|
|||
# VisDrone2019-DET dataset https://github.com/VisDrone/VisDrone-Dataset
|
||||
# Train command: python train.py --data VisDrone.yaml
|
||||
# Default dataset location is next to YOLOv3:
|
||||
# /parent_folder
|
||||
# /VisDrone
|
||||
# /yolov3
|
||||
|
||||
|
||||
# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
|
||||
train: ../VisDrone/VisDrone2019-DET-train/images # 6471 images
|
||||
val: ../VisDrone/VisDrone2019-DET-val/images # 548 images
|
||||
test: ../VisDrone/VisDrone2019-DET-test-dev/images # 1610 images
|
||||
|
||||
# number of classes
|
||||
nc: 10
|
||||
|
||||
# class names
|
||||
names: [ 'pedestrian', 'people', 'bicycle', 'car', 'van', 'truck', 'tricycle', 'awning-tricycle', 'bus', 'motor' ]
|
||||
|
||||
|
||||
# download command/URL (optional) --------------------------------------------------------------------------------------
|
||||
download: |
|
||||
from utils.general import download, os, Path
|
||||
|
||||
def visdrone2yolo(dir):
|
||||
from PIL import Image
|
||||
from tqdm import tqdm
|
||||
|
||||
def convert_box(size, box):
|
||||
# Convert VisDrone box to YOLO xywh box
|
||||
dw = 1. / size[0]
|
||||
dh = 1. / size[1]
|
||||
return (box[0] + box[2] / 2) * dw, (box[1] + box[3] / 2) * dh, box[2] * dw, box[3] * dh
|
||||
|
||||
(dir / 'labels').mkdir(parents=True, exist_ok=True) # make labels directory
|
||||
pbar = tqdm((dir / 'annotations').glob('*.txt'), desc=f'Converting {dir}')
|
||||
for f in pbar:
|
||||
img_size = Image.open((dir / 'images' / f.name).with_suffix('.jpg')).size
|
||||
lines = []
|
||||
with open(f, 'r') as file: # read annotation.txt
|
||||
for row in [x.split(',') for x in file.read().strip().splitlines()]:
|
||||
if row[4] == '0': # VisDrone 'ignored regions' class 0
|
||||
continue
|
||||
cls = int(row[5]) - 1
|
||||
box = convert_box(img_size, tuple(map(int, row[:4])))
|
||||
lines.append(f"{cls} {' '.join(f'{x:.6f}' for x in box)}\n")
|
||||
with open(str(f).replace(os.sep + 'annotations' + os.sep, os.sep + 'labels' + os.sep), 'w') as fl:
|
||||
fl.writelines(lines) # write label.txt
|
||||
|
||||
|
||||
# Download
|
||||
dir = Path('../VisDrone') # dataset directory
|
||||
urls = ['https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-train.zip',
|
||||
'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-val.zip',
|
||||
'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-dev.zip',
|
||||
'https://github.com/ultralytics/yolov5/releases/download/v1.0/VisDrone2019-DET-test-challenge.zip']
|
||||
download(urls, dir=dir)
|
||||
|
||||
# Convert
|
||||
for d in 'VisDrone2019-DET-train', 'VisDrone2019-DET-val', 'VisDrone2019-DET-test-dev':
|
||||
visdrone2yolo(dir / d) # convert VisDrone annotations to YOLO labels
|
|
@ -0,0 +1,21 @@
|
|||
# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/
|
||||
# Train command: python train.py --data argoverse_hd.yaml
|
||||
# Default dataset location is next to YOLOv3:
|
||||
# /parent_folder
|
||||
# /argoverse
|
||||
# /yolov3
|
||||
|
||||
|
||||
# download command/URL (optional)
|
||||
download: bash data/scripts/get_argoverse_hd.sh
|
||||
|
||||
# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
|
||||
train: ../argoverse/Argoverse-1.1/images/train/ # 39384 images
|
||||
val: ../argoverse/Argoverse-1.1/images/val/ # 15062 iamges
|
||||
test: ../argoverse/Argoverse-1.1/images/test/ # Submit to: https://eval.ai/web/challenges/challenge-page/800/overview
|
||||
|
||||
# number of classes
|
||||
nc: 8
|
||||
|
||||
# class names
|
||||
names: [ 'person', 'bicycle', 'car', 'motorcycle', 'bus', 'truck', 'traffic_light', 'stop_sign' ]
|
|
@ -0,0 +1,26 @@
|
|||
# COCO 2017 dataset http://cocodataset.org - first 128 training images
|
||||
# Train command: python train.py --data coco128.yaml
|
||||
# Default dataset location is next to YOLOv5:
|
||||
# /parent
|
||||
# /datasets/coco128
|
||||
# /yolov5
|
||||
|
||||
|
||||
# Train/val/test sets as 1) dir: path/to/imgs, 2) file: path/to/imgs.txt, or 3) list: [path/to/imgs1, path/to/imgs2, ..]
|
||||
path: F:/PythonPro/DualAttentionAttack/data/phy_attack # dataset root dir
|
||||
train: train_new # train_new train_label_new
|
||||
val: train_new # val images (relative to 'path') 128 images
|
||||
test: # test images (optional)
|
||||
|
||||
# Classes
|
||||
nc: 80 # number of classes
|
||||
names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
|
||||
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
|
||||
'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
|
||||
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
|
||||
'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
|
||||
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
|
||||
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
|
||||
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
|
||||
'hair drier', 'toothbrush' ] # class names
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
# COCO 2017 dataset http://cocodataset.org
|
||||
# Train command: python train.py --data coco.yaml
|
||||
# Default dataset location is next to YOLOv3:
|
||||
# /parent_folder
|
||||
# /coco
|
||||
# /yolov3
|
||||
|
||||
|
||||
# download command/URL (optional)
|
||||
download: bash data/scripts/get_coco.sh
|
||||
|
||||
# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
|
||||
train: ../coco/train2017.txt # 118287 images
|
||||
val: ../coco/val2017.txt # 5000 images
|
||||
test: ../coco/test-dev2017.txt # 20288 of 40670 images, submit to https://competitions.codalab.org/competitions/20794
|
||||
|
||||
# number of classes
|
||||
nc: 80
|
||||
|
||||
# class names
|
||||
names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
|
||||
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
|
||||
'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
|
||||
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
|
||||
'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
|
||||
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
|
||||
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
|
||||
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
|
||||
'hair drier', 'toothbrush' ]
|
||||
|
||||
# Print classes
|
||||
# with open('data/coco.yaml') as f:
|
||||
# d = yaml.safe_load(f) # dict
|
||||
# for i, x in enumerate(d['names']):
|
||||
# print(i, x)
|
|
@ -0,0 +1,28 @@
|
|||
# COCO 2017 dataset http://cocodataset.org - first 128 training images
|
||||
# Train command: python train.py --data coco128.yaml
|
||||
# Default dataset location is next to YOLOv3:
|
||||
# /parent_folder
|
||||
# /coco128
|
||||
# /yolov3
|
||||
|
||||
|
||||
# download command/URL (optional)
|
||||
download: https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip
|
||||
|
||||
# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
|
||||
train: F:/DataSource/coco128/images/train2017/ # 128 images
|
||||
val: F:/DataSource/coco128/images/train2017/ # 128 images
|
||||
|
||||
# number of classes
|
||||
nc: 80
|
||||
|
||||
# class names
|
||||
names: [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
|
||||
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
|
||||
'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
|
||||
'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
|
||||
'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
|
||||
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
|
||||
'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
|
||||
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
|
||||
'hair drier', 'toothbrush' ]
|
|
@ -0,0 +1,38 @@
|
|||
# Hyperparameters for VOC finetuning
|
||||
# python train.py --batch 64 --weights yolov5m.pt --data voc.yaml --img 512 --epochs 50
|
||||
# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
|
||||
|
||||
|
||||
# Hyperparameter Evolution Results
|
||||
# Generations: 306
|
||||
# P R mAP.5 mAP.5:.95 box obj cls
|
||||
# Metrics: 0.6 0.936 0.896 0.684 0.0115 0.00805 0.00146
|
||||
|
||||
lr0: 0.0032
|
||||
lrf: 0.12
|
||||
momentum: 0.843
|
||||
weight_decay: 0.00036
|
||||
warmup_epochs: 2.0
|
||||
warmup_momentum: 0.5
|
||||
warmup_bias_lr: 0.05
|
||||
box: 0.0296
|
||||
cls: 0.243
|
||||
cls_pw: 0.631
|
||||
obj: 0.301
|
||||
obj_pw: 0.911
|
||||
iou_t: 0.2
|
||||
anchor_t: 2.91
|
||||
# anchors: 3.63
|
||||
fl_gamma: 0.0
|
||||
hsv_h: 0.0138
|
||||
hsv_s: 0.664
|
||||
hsv_v: 0.464
|
||||
degrees: 0.373
|
||||
translate: 0.245
|
||||
scale: 0.898
|
||||
shear: 0.602
|
||||
perspective: 0.0
|
||||
flipud: 0.00856
|
||||
fliplr: 0.5
|
||||
mosaic: 1.0
|
||||
mixup: 0.243
|
|
@ -0,0 +1,28 @@
|
|||
lr0: 0.00258
|
||||
lrf: 0.17
|
||||
momentum: 0.779
|
||||
weight_decay: 0.00058
|
||||
warmup_epochs: 1.33
|
||||
warmup_momentum: 0.86
|
||||
warmup_bias_lr: 0.0711
|
||||
box: 0.0539
|
||||
cls: 0.299
|
||||
cls_pw: 0.825
|
||||
obj: 0.632
|
||||
obj_pw: 1.0
|
||||
iou_t: 0.2
|
||||
anchor_t: 3.44
|
||||
anchors: 3.2
|
||||
fl_gamma: 0.0
|
||||
hsv_h: 0.0188
|
||||
hsv_s: 0.704
|
||||
hsv_v: 0.36
|
||||
degrees: 0.0
|
||||
translate: 0.0902
|
||||
scale: 0.491
|
||||
shear: 0.0
|
||||
perspective: 0.0
|
||||
flipud: 0.0
|
||||
fliplr: 0.5
|
||||
mosaic: 1.0
|
||||
mixup: 0.0
|
|
@ -0,0 +1,33 @@
|
|||
# Hyperparameters for COCO training from scratch
|
||||
# python train.py --batch 40 --cfg yolov5m.yaml --weights '' --data coco.yaml --img 640 --epochs 300
|
||||
# See tutorials for hyperparameter evolution https://github.com/ultralytics/yolov5#tutorials
|
||||
|
||||
|
||||
lr0: 0.01 # initial learning rate (SGD=1E-2, Adam=1E-3)
|
||||
lrf: 0.2 # final OneCycleLR learning rate (lr0 * lrf)
|
||||
momentum: 0.937 # SGD momentum/Adam beta1
|
||||
weight_decay: 0.0005 # optimizer weight decay 5e-4
|
||||
warmup_epochs: 3.0 # warmup epochs (fractions ok)
|
||||
warmup_momentum: 0.8 # warmup initial momentum
|
||||
warmup_bias_lr: 0.1 # warmup initial bias lr
|
||||
box: 0.05 # box loss gain
|
||||
cls: 0.5 # cls loss gain
|
||||
cls_pw: 1.0 # cls BCELoss positive_weight
|
||||
obj: 1.0 # obj loss gain (scale with pixels)
|
||||
obj_pw: 1.0 # obj BCELoss positive_weight
|
||||
iou_t: 0.20 # IoU training threshold
|
||||
anchor_t: 4.0 # anchor-multiple threshold
|
||||
# anchors: 3 # anchors per output layer (0 to ignore)
|
||||
fl_gamma: 0.0 # focal loss gamma (efficientDet default gamma=1.5)
|
||||
hsv_h: 0.015 # image HSV-Hue augmentation (fraction)
|
||||
hsv_s: 0.7 # image HSV-Saturation augmentation (fraction)
|
||||
hsv_v: 0.4 # image HSV-Value augmentation (fraction)
|
||||
degrees: 0.0 # image rotation (+/- deg)
|
||||
translate: 0.1 # image translation (+/- fraction)
|
||||
scale: 0.5 # image scale (+/- gain)
|
||||
shear: 0.0 # image shear (+/- deg)
|
||||
perspective: 0.0 # image perspective (+/- fraction), range 0-0.001
|
||||
flipud: 0.0 # image flip up-down (probability)
|
||||
fliplr: 0.5 # image flip left-right (probability)
|
||||
mosaic: 1.0 # image mosaic (probability)
|
||||
mixup: 0.0 # image mixup (probability)
|
Binary file not shown.
After Width: | Height: | Size: 476 KiB |
Binary file not shown.
After Width: | Height: | Size: 165 KiB |
|
@ -0,0 +1,102 @@
|
|||
# Objects365 dataset https://www.objects365.org/
|
||||
# Train command: python train.py --data objects365.yaml
|
||||
# Default dataset location is next to YOLOv3:
|
||||
# /parent_folder
|
||||
# /datasets/objects365
|
||||
# /yolov3
|
||||
|
||||
# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
|
||||
train: ../datasets/objects365/images/train # 1742289 images
|
||||
val: ../datasets/objects365/images/val # 5570 images
|
||||
|
||||
# number of classes
|
||||
nc: 365
|
||||
|
||||
# class names
|
||||
names: [ 'Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp', 'Glasses', 'Bottle', 'Desk', 'Cup',
|
||||
'Street Lights', 'Cabinet/shelf', 'Handbag/Satchel', 'Bracelet', 'Plate', 'Picture/Frame', 'Helmet', 'Book',
|
||||
'Gloves', 'Storage box', 'Boat', 'Leather Shoes', 'Flower', 'Bench', 'Potted Plant', 'Bowl/Basin', 'Flag',
|
||||
'Pillow', 'Boots', 'Vase', 'Microphone', 'Necklace', 'Ring', 'SUV', 'Wine Glass', 'Belt', 'Monitor/TV',
|
||||
'Backpack', 'Umbrella', 'Traffic Light', 'Speaker', 'Watch', 'Tie', 'Trash bin Can', 'Slippers', 'Bicycle',
|
||||
'Stool', 'Barrel/bucket', 'Van', 'Couch', 'Sandals', 'Basket', 'Drum', 'Pen/Pencil', 'Bus', 'Wild Bird',
|
||||
'High Heels', 'Motorcycle', 'Guitar', 'Carpet', 'Cell Phone', 'Bread', 'Camera', 'Canned', 'Truck',
|
||||
'Traffic cone', 'Cymbal', 'Lifesaver', 'Towel', 'Stuffed Toy', 'Candle', 'Sailboat', 'Laptop', 'Awning',
|
||||
'Bed', 'Faucet', 'Tent', 'Horse', 'Mirror', 'Power outlet', 'Sink', 'Apple', 'Air Conditioner', 'Knife',
|
||||
'Hockey Stick', 'Paddle', 'Pickup Truck', 'Fork', 'Traffic Sign', 'Balloon', 'Tripod', 'Dog', 'Spoon', 'Clock',
|
||||
'Pot', 'Cow', 'Cake', 'Dinning Table', 'Sheep', 'Hanger', 'Blackboard/Whiteboard', 'Napkin', 'Other Fish',
|
||||
'Orange/Tangerine', 'Toiletry', 'Keyboard', 'Tomato', 'Lantern', 'Machinery Vehicle', 'Fan',
|
||||
'Green Vegetables', 'Banana', 'Baseball Glove', 'Airplane', 'Mouse', 'Train', 'Pumpkin', 'Soccer', 'Skiboard',
|
||||
'Luggage', 'Nightstand', 'Tea pot', 'Telephone', 'Trolley', 'Head Phone', 'Sports Car', 'Stop Sign',
|
||||
'Dessert', 'Scooter', 'Stroller', 'Crane', 'Remote', 'Refrigerator', 'Oven', 'Lemon', 'Duck', 'Baseball Bat',
|
||||
'Surveillance Camera', 'Cat', 'Jug', 'Broccoli', 'Piano', 'Pizza', 'Elephant', 'Skateboard', 'Surfboard',
|
||||
'Gun', 'Skating and Skiing shoes', 'Gas stove', 'Donut', 'Bow Tie', 'Carrot', 'Toilet', 'Kite', 'Strawberry',
|
||||
'Other Balls', 'Shovel', 'Pepper', 'Computer Box', 'Toilet Paper', 'Cleaning Products', 'Chopsticks',
|
||||
'Microwave', 'Pigeon', 'Baseball', 'Cutting/chopping Board', 'Coffee Table', 'Side Table', 'Scissors',
|
||||
'Marker', 'Pie', 'Ladder', 'Snowboard', 'Cookies', 'Radiator', 'Fire Hydrant', 'Basketball', 'Zebra', 'Grape',
|
||||
'Giraffe', 'Potato', 'Sausage', 'Tricycle', 'Violin', 'Egg', 'Fire Extinguisher', 'Candy', 'Fire Truck',
|
||||
'Billiards', 'Converter', 'Bathtub', 'Wheelchair', 'Golf Club', 'Briefcase', 'Cucumber', 'Cigar/Cigarette',
|
||||
'Paint Brush', 'Pear', 'Heavy Truck', 'Hamburger', 'Extractor', 'Extension Cord', 'Tong', 'Tennis Racket',
|
||||
'Folder', 'American Football', 'earphone', 'Mask', 'Kettle', 'Tennis', 'Ship', 'Swing', 'Coffee Machine',
|
||||
'Slide', 'Carriage', 'Onion', 'Green beans', 'Projector', 'Frisbee', 'Washing Machine/Drying Machine',
|
||||
'Chicken', 'Printer', 'Watermelon', 'Saxophone', 'Tissue', 'Toothbrush', 'Ice cream', 'Hot-air balloon',
|
||||
'Cello', 'French Fries', 'Scale', 'Trophy', 'Cabbage', 'Hot dog', 'Blender', 'Peach', 'Rice', 'Wallet/Purse',
|
||||
'Volleyball', 'Deer', 'Goose', 'Tape', 'Tablet', 'Cosmetics', 'Trumpet', 'Pineapple', 'Golf Ball',
|
||||
'Ambulance', 'Parking meter', 'Mango', 'Key', 'Hurdle', 'Fishing Rod', 'Medal', 'Flute', 'Brush', 'Penguin',
|
||||
'Megaphone', 'Corn', 'Lettuce', 'Garlic', 'Swan', 'Helicopter', 'Green Onion', 'Sandwich', 'Nuts',
|
||||
'Speed Limit Sign', 'Induction Cooker', 'Broom', 'Trombone', 'Plum', 'Rickshaw', 'Goldfish', 'Kiwi fruit',
|
||||
'Router/modem', 'Poker Card', 'Toaster', 'Shrimp', 'Sushi', 'Cheese', 'Notepaper', 'Cherry', 'Pliers', 'CD',
|
||||
'Pasta', 'Hammer', 'Cue', 'Avocado', 'Hamimelon', 'Flask', 'Mushroom', 'Screwdriver', 'Soap', 'Recorder',
|
||||
'Bear', 'Eggplant', 'Board Eraser', 'Coconut', 'Tape Measure/Ruler', 'Pig', 'Showerhead', 'Globe', 'Chips',
|
||||
'Steak', 'Crosswalk Sign', 'Stapler', 'Camel', 'Formula 1', 'Pomegranate', 'Dishwasher', 'Crab',
|
||||
'Hoverboard', 'Meat ball', 'Rice Cooker', 'Tuba', 'Calculator', 'Papaya', 'Antelope', 'Parrot', 'Seal',
|
||||
'Butterfly', 'Dumbbell', 'Donkey', 'Lion', 'Urinal', 'Dolphin', 'Electric Drill', 'Hair Dryer', 'Egg tart',
|
||||
'Jellyfish', 'Treadmill', 'Lighter', 'Grapefruit', 'Game board', 'Mop', 'Radish', 'Baozi', 'Target', 'French',
|
||||
'Spring Rolls', 'Monkey', 'Rabbit', 'Pencil Case', 'Yak', 'Red Cabbage', 'Binoculars', 'Asparagus', 'Barbell',
|
||||
'Scallop', 'Noddles', 'Comb', 'Dumpling', 'Oyster', 'Table Tennis paddle', 'Cosmetics Brush/Eyeliner Pencil',
|
||||
'Chainsaw', 'Eraser', 'Lobster', 'Durian', 'Okra', 'Lipstick', 'Cosmetics Mirror', 'Curling', 'Table Tennis' ]
|
||||
|
||||
|
||||
# download command/URL (optional) --------------------------------------------------------------------------------------
|
||||
download: |
|
||||
from pycocotools.coco import COCO
|
||||
from tqdm import tqdm
|
||||
|
||||
from utils.general import download, Path
|
||||
|
||||
# Make Directories
|
||||
dir = Path('../datasets/objects365') # dataset directory
|
||||
for p in 'images', 'labels':
|
||||
(dir / p).mkdir(parents=True, exist_ok=True)
|
||||
for q in 'train', 'val':
|
||||
(dir / p / q).mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Download
|
||||
url = "https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/train/"
|
||||
download([url + 'zhiyuan_objv2_train.tar.gz'], dir=dir, delete=False) # annotations json
|
||||
download([url + f for f in [f'patch{i}.tar.gz' for i in range(51)]], dir=dir / 'images' / 'train',
|
||||
curl=True, delete=False, threads=8)
|
||||
|
||||
# Move
|
||||
train = dir / 'images' / 'train'
|
||||
for f in tqdm(train.rglob('*.jpg'), desc=f'Moving images'):
|
||||
f.rename(train / f.name) # move to /images/train
|
||||
|
||||
# Labels
|
||||
coco = COCO(dir / 'zhiyuan_objv2_train.json')
|
||||
names = [x["name"] for x in coco.loadCats(coco.getCatIds())]
|
||||
for cid, cat in enumerate(names):
|
||||
catIds = coco.getCatIds(catNms=[cat])
|
||||
imgIds = coco.getImgIds(catIds=catIds)
|
||||
for im in tqdm(coco.loadImgs(imgIds), desc=f'Class {cid + 1}/{len(names)} {cat}'):
|
||||
width, height = im["width"], im["height"]
|
||||
path = Path(im["file_name"]) # image filename
|
||||
try:
|
||||
with open(dir / 'labels' / 'train' / path.with_suffix('.txt').name, 'a') as file:
|
||||
annIds = coco.getAnnIds(imgIds=im["id"], catIds=catIds, iscrowd=None)
|
||||
for a in coco.loadAnns(annIds):
|
||||
x, y, w, h = a['bbox'] # bounding box in xywh (xy top-left corner)
|
||||
x, y = x + w / 2, y + h / 2 # xy to center
|
||||
file.write(f"{cid} {x / width:.5f} {y / height:.5f} {w / width:.5f} {h / height:.5f}\n")
|
||||
|
||||
except Exception as e:
|
||||
print(e)
|
|
@ -0,0 +1,61 @@
|
|||
#!/bin/bash
|
||||
# Argoverse-HD dataset (ring-front-center camera) http://www.cs.cmu.edu/~mengtial/proj/streaming/
|
||||
# Download command: bash data/scripts/get_argoverse_hd.sh
|
||||
# Train command: python train.py --data argoverse_hd.yaml
|
||||
# Default dataset location is next to YOLOv3:
|
||||
# /parent_folder
|
||||
# /argoverse
|
||||
# /yolov3
|
||||
|
||||
# Download/unzip images
|
||||
d='../argoverse/' # unzip directory
|
||||
mkdir $d
|
||||
url=https://argoverse-hd.s3.us-east-2.amazonaws.com/
|
||||
f=Argoverse-HD-Full.zip
|
||||
curl -L $url$f -o $f && unzip -q $f -d $d && rm $f &# download, unzip, remove in background
|
||||
wait # finish background tasks
|
||||
|
||||
cd ../argoverse/Argoverse-1.1/
|
||||
ln -s tracking images
|
||||
|
||||
cd ../Argoverse-HD/annotations/
|
||||
|
||||
python3 - "$@" <<END
|
||||
import json
|
||||
from pathlib import Path
|
||||
|
||||
annotation_files = ["train.json", "val.json"]
|
||||
print("Converting annotations to YOLOv3 format...")
|
||||
|
||||
for val in annotation_files:
|
||||
a = json.load(open(val, "rb"))
|
||||
|
||||
label_dict = {}
|
||||
for annot in a['annotations']:
|
||||
img_id = annot['image_id']
|
||||
img_name = a['images'][img_id]['name']
|
||||
img_label_name = img_name[:-3] + "txt"
|
||||
|
||||
cls = annot['category_id'] # instance class id
|
||||
x_center, y_center, width, height = annot['bbox']
|
||||
x_center = (x_center + width / 2) / 1920. # offset and scale
|
||||
y_center = (y_center + height / 2) / 1200. # offset and scale
|
||||
width /= 1920. # scale
|
||||
height /= 1200. # scale
|
||||
|
||||
img_dir = "./labels/" + a['seq_dirs'][a['images'][annot['image_id']]['sid']]
|
||||
|
||||
Path(img_dir).mkdir(parents=True, exist_ok=True)
|
||||
if img_dir + "/" + img_label_name not in label_dict:
|
||||
label_dict[img_dir + "/" + img_label_name] = []
|
||||
|
||||
label_dict[img_dir + "/" + img_label_name].append(f"{cls} {x_center} {y_center} {width} {height}\n")
|
||||
|
||||
for filename in label_dict:
|
||||
with open(filename, "w") as file:
|
||||
for string in label_dict[filename]:
|
||||
file.write(string)
|
||||
|
||||
END
|
||||
|
||||
mv ./labels ../../Argoverse-1.1/
|
|
@ -0,0 +1,27 @@
|
|||
#!/bin/bash
|
||||
# COCO 2017 dataset http://cocodataset.org
|
||||
# Download command: bash data/scripts/get_coco.sh
|
||||
# Train command: python train.py --data coco.yaml
|
||||
# Default dataset location is next to YOLOv3:
|
||||
# /parent_folder
|
||||
# /coco
|
||||
# /yolov3
|
||||
|
||||
# Download/unzip labels
|
||||
d='../' # unzip directory
|
||||
url=https://github.com/ultralytics/yolov5/releases/download/v1.0/
|
||||
f='coco2017labels.zip' # or 'coco2017labels-segments.zip', 68 MB
|
||||
echo 'Downloading' $url$f ' ...'
|
||||
curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background
|
||||
|
||||
# Download/unzip images
|
||||
d='../coco/images' # unzip directory
|
||||
url=http://images.cocodataset.org/zips/
|
||||
f1='train2017.zip' # 19G, 118k images
|
||||
f2='val2017.zip' # 1G, 5k images
|
||||
f3='test2017.zip' # 7G, 41k images (optional)
|
||||
for f in $f1 $f2; do
|
||||
echo 'Downloading' $url$f '...'
|
||||
curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background
|
||||
done
|
||||
wait # finish background tasks
|
|
@ -0,0 +1,17 @@
|
|||
#!/bin/bash
|
||||
# COCO128 dataset https://www.kaggle.com/ultralytics/coco128
|
||||
# Download command: bash data/scripts/get_coco128.sh
|
||||
# Train command: python train.py --data coco128.yaml
|
||||
# Default dataset location is next to YOLOv3:
|
||||
# /parent_folder
|
||||
# /coco128
|
||||
# /yolov3
|
||||
|
||||
# Download/unzip images and labels
|
||||
d='../' # unzip directory
|
||||
url=https://github.com/ultralytics/yolov5/releases/download/v1.0/
|
||||
f='coco128.zip' # or 'coco2017labels-segments.zip', 68 MB
|
||||
echo 'Downloading' $url$f ' ...'
|
||||
curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background
|
||||
|
||||
wait # finish background tasks
|
|
@ -0,0 +1,116 @@
|
|||
#!/bin/bash
|
||||
# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC/
|
||||
# Download command: bash data/scripts/get_voc.sh
|
||||
# Train command: python train.py --data voc.yaml
|
||||
# Default dataset location is next to YOLOv3:
|
||||
# /parent_folder
|
||||
# /VOC
|
||||
# /yolov3
|
||||
|
||||
start=$(date +%s)
|
||||
mkdir -p ../tmp
|
||||
cd ../tmp/
|
||||
|
||||
# Download/unzip images and labels
|
||||
d='.' # unzip directory
|
||||
url=https://github.com/ultralytics/yolov5/releases/download/v1.0/
|
||||
f1=VOCtrainval_06-Nov-2007.zip # 446MB, 5012 images
|
||||
f2=VOCtest_06-Nov-2007.zip # 438MB, 4953 images
|
||||
f3=VOCtrainval_11-May-2012.zip # 1.95GB, 17126 images
|
||||
for f in $f3 $f2 $f1; do
|
||||
echo 'Downloading' $url$f '...'
|
||||
curl -L $url$f -o $f && unzip -q $f -d $d && rm $f & # download, unzip, remove in background
|
||||
done
|
||||
wait # finish background tasks
|
||||
|
||||
end=$(date +%s)
|
||||
runtime=$((end - start))
|
||||
echo "Completed in" $runtime "seconds"
|
||||
|
||||
echo "Splitting dataset..."
|
||||
python3 - "$@" <<END
|
||||
import os
|
||||
import xml.etree.ElementTree as ET
|
||||
from os import getcwd
|
||||
|
||||
sets = [('2012', 'train'), ('2012', 'val'), ('2007', 'train'), ('2007', 'val'), ('2007', 'test')]
|
||||
|
||||
classes = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog",
|
||||
"horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
|
||||
|
||||
|
||||
def convert_box(size, box):
|
||||
dw = 1. / (size[0])
|
||||
dh = 1. / (size[1])
|
||||
x, y, w, h = (box[0] + box[1]) / 2.0 - 1, (box[2] + box[3]) / 2.0 - 1, box[1] - box[0], box[3] - box[2]
|
||||
return x * dw, y * dh, w * dw, h * dh
|
||||
|
||||
|
||||
def convert_annotation(year, image_id):
|
||||
in_file = open('VOCdevkit/VOC%s/Annotations/%s.xml' % (year, image_id))
|
||||
out_file = open('VOCdevkit/VOC%s/labels/%s.txt' % (year, image_id), 'w')
|
||||
tree = ET.parse(in_file)
|
||||
root = tree.getroot()
|
||||
size = root.find('size')
|
||||
w = int(size.find('width').text)
|
||||
h = int(size.find('height').text)
|
||||
|
||||
for obj in root.iter('object'):
|
||||
difficult = obj.find('difficult').text
|
||||
cls = obj.find('name').text
|
||||
if cls not in classes or int(difficult) == 1:
|
||||
continue
|
||||
cls_id = classes.index(cls)
|
||||
xmlbox = obj.find('bndbox')
|
||||
b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text),
|
||||
float(xmlbox.find('ymax').text))
|
||||
bb = convert_box((w, h), b)
|
||||
out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
|
||||
|
||||
|
||||
cwd = getcwd()
|
||||
for year, image_set in sets:
|
||||
if not os.path.exists('VOCdevkit/VOC%s/labels/' % year):
|
||||
os.makedirs('VOCdevkit/VOC%s/labels/' % year)
|
||||
image_ids = open('VOCdevkit/VOC%s/ImageSets/Main/%s.txt' % (year, image_set)).read().strip().split()
|
||||
list_file = open('%s_%s.txt' % (year, image_set), 'w')
|
||||
for image_id in image_ids:
|
||||
list_file.write('%s/VOCdevkit/VOC%s/JPEGImages/%s.jpg\n' % (cwd, year, image_id))
|
||||
convert_annotation(year, image_id)
|
||||
list_file.close()
|
||||
END
|
||||
|
||||
cat 2007_train.txt 2007_val.txt 2012_train.txt 2012_val.txt >train.txt
|
||||
cat 2007_train.txt 2007_val.txt 2007_test.txt 2012_train.txt 2012_val.txt >train.all.txt
|
||||
|
||||
mkdir ../VOC ../VOC/images ../VOC/images/train ../VOC/images/val
|
||||
mkdir ../VOC/labels ../VOC/labels/train ../VOC/labels/val
|
||||
|
||||
python3 - "$@" <<END
|
||||
import os
|
||||
|
||||
print(os.path.exists('../tmp/train.txt'))
|
||||
with open('../tmp/train.txt', 'r') as f:
|
||||
for line in f.readlines():
|
||||
line = "/".join(line.split('/')[-5:]).strip()
|
||||
if os.path.exists("../" + line):
|
||||
os.system("cp ../" + line + " ../VOC/images/train")
|
||||
|
||||
line = line.replace('JPEGImages', 'labels').replace('jpg', 'txt')
|
||||
if os.path.exists("../" + line):
|
||||
os.system("cp ../" + line + " ../VOC/labels/train")
|
||||
|
||||
print(os.path.exists('../tmp/2007_test.txt'))
|
||||
with open('../tmp/2007_test.txt', 'r') as f:
|
||||
for line in f.readlines():
|
||||
line = "/".join(line.split('/')[-5:]).strip()
|
||||
if os.path.exists("../" + line):
|
||||
os.system("cp ../" + line + " ../VOC/images/val")
|
||||
|
||||
line = line.replace('JPEGImages', 'labels').replace('jpg', 'txt')
|
||||
if os.path.exists("../" + line):
|
||||
os.system("cp ../" + line + " ../VOC/labels/val")
|
||||
END
|
||||
|
||||
rm -rf ../tmp # remove temporary directory
|
||||
echo "VOC download done."
|
|
@ -0,0 +1,21 @@
|
|||
# PASCAL VOC dataset http://host.robots.ox.ac.uk/pascal/VOC/
|
||||
# Train command: python train.py --data voc.yaml
|
||||
# Default dataset location is next to YOLOv3:
|
||||
# /parent_folder
|
||||
# /VOC
|
||||
# /yolov3
|
||||
|
||||
|
||||
# download command/URL (optional)
|
||||
download: bash data/scripts/get_voc.sh
|
||||
|
||||
# train and val data as 1) directory: path/images/, 2) file: path/images.txt, or 3) list: [path1/images/, path2/images/]
|
||||
train: ../VOC/images/train/ # 16551 images
|
||||
val: ../VOC/images/val/ # 4952 images
|
||||
|
||||
# number of classes
|
||||
nc: 20
|
||||
|
||||
# class names
|
||||
names: [ 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog',
|
||||
'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor' ]
|
Loading…
Reference in New Issue