fatigue-detection

本项目基于PaddleDetection目标检测开发套件,选取1.3M超轻量PPYOLO tiny进行项目开发,并部署于windows端。

Stars
10

B

  

  45

  

     

  

  PaddleDetection1.3MPPYOLO tinywindows

PPYOLO tiny

  FPGAK210 App 6M PaddleSlim 1.3M PP-YOLO Tiny !


   PP-YOLO Tiny

  PP-YOLO Tiny PP-YOLO sppiou loss, drop block, mixup, sync bn 10

  1

    PP-YOLO Tiny MobileNetV3

  2head

    PP-YOLO Tiny headDepthwise Separable Convolution,

  3

     PP-YOLO 10 iou aware matrix nms Trick

  4

    PP-YOLO Tiny 320 416 PaddleDetection2.0 tools/anchor_cluster.py Anchor COCO 320*320 anchor batch 192-512

  5

    

    a. anchor IoU 0.25 anchor

    b. IoU 0.7 anchor 0.5

    

  6 batch size

     Batch Size PP-YOLO Tiny batch size 24 328 batch size=8*32=256 COCO 4.3M

  7

     Paddle Inference Paddle Lite int8 1.3M Paddle Lite int8 float32

   1.3M PP-YOLO tiny Paddle Lite 990

# PaddleDetectiongiteedownload
!git clone https://gitee.com/paddlepaddle/PaddleDetection.git

  1. COCOPaddleDetection
# 
!unzip -oq /home/aistudio/data/data85880/fdd-dataset.zip
# <path>
import xml.dom.minidom
import os

path = r'dataset/Annotations'  # xml
sv_path = r'dataset/Annotations1'  # xml
files = os.listdir(path)
cnt = 1

for xmlFile in files:
    dom = xml.dom.minidom.parse(os.path.join(path, xmlFile))  # xmldom
    root = dom.documentElement  # 
    item = root.getElementsByTagName('path')  # pathnode
    for i in item:
        i.firstChild.data = 'dataset/JPEGImages/' + str(cnt).zfill(6) + '.jpg'  # xml

    with open(os.path.join(sv_path, xmlFile), 'w') as fh:
        dom.writexml(fh)
    cnt += 1

# 
%cd dataset/
!rm -ir Annotations
!mv Annotations1 Annotations
%cd ..
rm: descend into directory 'dataset/Annotations'? ^C
mv: cannot stat 'dataset/Annotations1': No such file or directory

  

import os,shutil

jpeg = 'dataset/JPEGImages'
jpeg_list = os.listdir(jpeg)

anno = 'dataset/Annotations'
anno_list = os.listdir(anno)

for pic in jpeg_list:
    name = pic.split('.')[0]
    anno_name = name + '.xml'
    print(anno_name)
    if anno_name not in anno_list:
        os.remove(os.path.join(jpeg,pic))
paddlex
!pip install paddlex
!pip install paddle2onnx
8112332sammples,291samples
!paddlex --split_dataset --format VOC --dataset_dir dataset --val_value 0.1 --test_value 0.1
PaddleDetectionVOCCOCObug,PaddleDetectionGithub issuex2coco.py  
VOCPaddleDetection PPYOLO tinyCOCO
!python x2coco.py --dataset_type voc --voc_anno_dir /home/aistudio/dataset/Annotations/ --voc_anno_list /home/aistudio/dataset/ImageSets/Main/train.txt --voc_label_list /home/aistudio/dataset/labels.txt --voc_out_name voc_test.json
!python x2coco.py --dataset_type voc --voc_anno_dir /home/aistudio/dataset/Annotations/ --voc_anno_list /home/aistudio/dataset/ImageSets/Main/val.txt --voc_label_list /home/aistudio/dataset/labels.txt --voc_out_name voc_val.json
!python x2coco.py --dataset_type voc --voc_anno_dir /home/aistudio/dataset/Annotations/ --voc_anno_list /home/aistudio/dataset/ImageSets/Main/test.txt --voc_label_list /home/aistudio/dataset/labels.txt --voc_out_name voc_train.json
!mv voc_train.json dataset/
!mv voc_test.json dataset/
!mv voc_val.json dataset/
Start converting !
100%|| 1631/1631 [00:00<00:00, 12327.36it/s]
Start converting !
100%|| 583/583 [00:00<00:00, 12504.37it/s]
Start converting !
100%|| 1283/1283 [00:00<00:00, 12609.82it/s]

./PaddleDetection/configs/ppyolo/ppyolo_tiny_650e_coco.yml

yolotools/anchor_cluster.pyAnchor

!python tools/anchor_cluster.py -c configs/ppyolo/ppyolo_tiny_650e_coco.yml -n 9 -s 608 -m v2 -i 1000

: yamlTrainReaderEvalReaderTestReader PaddleDetection,

%cd PaddleDetection/
!pip install -r requirements.txt
!python -u tools/train.py -c configs/ppyolo/ppyolo_tiny_650e_coco.yml \
              -o pretrain_weights=https://paddledet.bj.bcebos.com/models/pretrained/MobileNetV3_large_x0_5_pretrained.pdparams \
              --eval \
              -r output/ppyolo_tiny_650e_coco/1200 \
              --vdl_log_dir vdl_log_dir/scalar

  inferencePaddlePaddle tensorkernelinference__model____params__model.yml

# output/ppyolo
!python tools/export_model.py -c configs/ppyolo/ppyolo_tiny_650e_coco.yml -o weights=Poutput/ppyolo_tiny_650e_coco/best_model

PaddleDetectionPython :

--model_dir Yes
--image_file Option
--image_dir Option
--video_file Option
--camera_id Option ID-1(0 - (-1) )qoutput/output.mp4
--use_gpu No GPUFalse
--run_mode No GPUfluid, fluid/trt_fp32/trt_fp16/trt_int8
--batch_size No batch sizeimage_dir
--threshold No 0.5
--output_dir No output/
--run_benchmark No benchmark--image_file``--image_dir
--enable_mkldnn No CPUMKLDNN
--cpu_threads No cpu1
!python deploy/python/infer.py --model_dir=/path/to/models --image_file=/path/to/image --use_gpu=(False/True)

PaddleX

# GPU
# https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html#gpu
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'

from paddlex.det import transforms
import paddlex as pdx

# transforms
# API https://paddlex.readthedocs.io/zh_CN/develop/apis/transforms/det_transforms.html
train_transforms = transforms.Compose([
    transforms.MixupImage(mixup_epoch=250), transforms.RandomDistort(),
    transforms.RandomExpand(), transforms.RandomCrop(), transforms.Resize(
        target_size=608, interp='RANDOM'), transforms.RandomHorizontalFlip(),
    transforms.Normalize()
])

eval_transforms = transforms.Compose([
    transforms.Resize(
        target_size=608, interp='CUBIC'), transforms.Normalize()
])

# 
# APIhttps://paddlex.readthedocs.io/zh_CN/develop/apis/datasets.html#paddlex-datasets-vocdetection
train_dataset = pdx.datasets.VOCDetection(
    data_dir='dataset',
    file_list='dataset/train_list.txt',
    label_list='dataset/labels.txt',
    transforms=train_transforms,
    shuffle=True)
eval_dataset = pdx.datasets.VOCDetection(
    data_dir='dataset',
    file_list='dataset/val_list.txt',
    label_list='dataset/labels.txt',
    transforms=eval_transforms)

# 
# VisualDLhttps://paddlex.readthedocs.io/zh_CN/develop/train/visualdl.html
num_classes = len(train_dataset.labels)

# API: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#paddlex-det-ppyolo
model = pdx.det.PPYOLO(num_classes=num_classes)

# API: https://paddlex.readthedocs.io/zh_CN/develop/apis/models/detection.html#train
# https://paddlex.readthedocs.io/zh_CN/develop/appendix/parameters.html
model.train(
    num_epochs=270,
    train_dataset=train_dataset,
    train_batch_size=8,
    eval_dataset=eval_dataset,
    learning_rate=0.000125,
    lr_decay_epochs=[210, 240],
    save_dir='output/ppyolo',
    save_interval_epochs=1,
    use_vdl=True)
!paddlex --export_inference --model_dir=output/ppyolo/best_model --save_dir=./inference_model

. Please click here for more detailed instructions.