Framework agnostic sliced/tiled inference + interactive ui + error analysis plots
MIT License
Bot releases are visible (Hide)
Published by fcakyon over 3 years ago
sliced/standard inference
over yolov5/mmdetection models
and visualize the incorrect prediction over the fiftyone ui
.Published by fcakyon over 3 years ago
# perform standard or sliced prediction
result = get_prediction(image, detection_model)
result = get_sliced_prediction(image, detection_model)
# export prediction visuals to "demo_data/"
result.export_visuals(export_dir="demo_data/")
# convert predictions to coco annotations
result.to_coco_annotations()
# convert predictions to coco predictions
result.to_coco_predictions(image_id=1)
# convert predictions to [imantics](https://github.com/jsbroks/imantics) annotation format
result.to_imantics_annotations()
# convert predictions to [fiftyone](https://github.com/voxel51/fiftyone) detection format
result.to_fiftyone_detections()
YOLOv5
+ SAHI
demo:
MMDetection
+ SAHI
demo:
Published by fcakyon over 3 years ago
from sahi.utils.fiftyone import launch_fiftyone_app
# launch fiftyone app:
session = launch_fiftyone_app(coco_image_dir, coco_json_path)
# close fiftyone app:
session.close()
from sahi import get_sliced_prediction
# perform sliced prediction
result = get_sliced_prediction(
image,
detection_model,
slice_height = 256,
slice_width = 256,
overlap_height_ratio = 0.2,
overlap_width_ratio = 0.2
)
# convert first object into fiftyone detection format
object_prediction = result["object_prediction_list"][0]
fiftyone_detection = object_prediction.to_fiftyone_detection(image_height=720, image_width=1280)
Published by fcakyon over 3 years ago
from sahi.utils.mot import MotAnnotation, MotFrame, MotVideo
mot_video = MotVideo(name="sequence_name")
mot_frame = MotFrame()
mot_frame.add_annotation(
MotAnnotation(bbox=[x_min, y_min, width, height])
)
mot_frame.add_annotation(
MotAnnotation(bbox=[x_min, y_min, width, height])
)
mot_video.add_frame(mot_frame)
mot_video.export(export_dir="mot_gt", type="gt")
mot_gt/sequence_name/
folder.tracker_params = {
'max_distance_between_points': 30,
'min_detection_threshold': 0,
'hit_inertia_min': 10,
'hit_inertia_max': 12,
'point_transience': 4,
}
# for details: https://github.com/tryolabs/norfair/tree/master/docs#arguments
mot_video = MotVideo(tracker_kwargs=tracker_params)
# create annotations with track ids:
mot_frame.add_annotation(
MotAnnotation(bbox=[x_min, y_min, width, height], track_id=1)
)
mot_frame.add_annotation(
MotAnnotation(bbox=[x_min, y_min, width, height], track_id=2)
)
# add frame to video:
mot_video.add_frame(mot_frame)
# export in MOT challenge format without automatic track id generation:
mot_video.export(export_dir="mot_gt", type="gt", use_tracker=False)
exist_ok=True
:mot_video.export(export_dir="mot_gt", type="gt", exist_ok=True)
from sahi.utils.mot import MotAnnotation, MotFrame, MotVideo
mot_video = MotVideo(name="sequence_name")
mot_frame = MotFrame()
mot_frame.add_annotation(
MotAnnotation(bbox=[x_min, y_min, width, height], track_id=1)
)
mot_frame.add_annotation(
MotAnnotation(bbox=[x_min, y_min, width, height], track_id=2)
)
mot_video.add_frame(mot_frame)
mot_video.export(export_dir="mot_test", type="test")
mot_test/sequence_name.txt
.# add object detector outputs:
mot_frame.add_annotation(
MotAnnotation(bbox=[x_min, y_min, width, height])
)
mot_frame.add_annotation(
MotAnnotation(bbox=[x_min, y_min, width, height])
)
# add frame to video:
mot_video.add_frame(mot_frame)
# export in MOT challenge format by applying a kalman based tracker:
mot_video.export(export_dir="mot_gt", type="gt", use_tracker=True)
tracker_params = {
'max_distance_between_points': 30,
'min_detection_threshold': 0,
'hit_inertia_min': 10,
'hit_inertia_max': 12,
'point_transience': 4,
}
# for details: https://github.com/tryolabs/norfair/tree/master/docs#arguments
mot_video = MotVideo(tracker_kwargs=tracker_params)
exist_ok=True
:mot_video.export(export_dir="mot_gt", type="gt", exist_ok=True)
Check YOLOv5
+ SAHI
demo:
Check MMDetection
+ SAHI
demo:
Published by fcakyon over 3 years ago
Published by fcakyon over 3 years ago
Published by fcakyon over 3 years ago
--postprocess_type UNIONMERGE
or --postprocess_type NMS
to be applied over sliced predictions--match_metric IOS
for intersection over smaller area or --match_metric IOU
for intersection over union--match_thresh 0.5
--class_agnostic
argument to ignore category ids of the predictions during postprocess (merging/nms)visuals_with_gt
folder when coco_file_path
is providedfrom_coco_annotation_dict
classmethod to ObjectAnnotation
Published by fcakyon over 3 years ago
CLI usage:
python scripts/predict.py --model_type yolov5 --source image/file/or/folder --model_path path/to/model
Published by fcakyon over 3 years ago
Published by fcakyon over 3 years ago
from sahi.utils.coco import Coco
# set ignore_negative_samples as False if you want images without annotations present in json and yolov5 exports
coco = Coco.from_coco_dict_or_path("coco.json", ignore_negative_samples=True)
Published by fcakyon over 3 years ago
from sahi.utils.coco import Coco
# init Coco object
coco = Coco.from_coco_dict_or_path("coco.json")
# get dataset stats
coco.stats
{
'num_images': 6471,
'num_annotations': 343204,
'num_categories': 2,
'num_negative_images': 0,
'num_images_per_category': {'human': 5684, 'vehicle': 6323},
'num_annotations_per_category': {'human': 106396, 'vehicle': 236808},
'min_num_annotations_in_image': 1,
'max_num_annotations_in_image': 902,
'avg_num_annotations_in_image': 53.037243084530985,
'min_annotation_area': 3,
'max_annotation_area': 328640,
'avg_annotation_area': 2448.405738278109,
'min_annotation_area_per_category': {'human': 3, 'vehicle': 3},
'max_annotation_area_per_category': {'human': 72670, 'vehicle': 328640},
}
# filter out images with seperate area intervals per category
intervals_per_category = {
"human": {"min": 20, "max": 10000},
"vehicle": {"min": 50, "max": 15000},
}
area_filtered_coco = coco.get_area_filtered_coco(intervals_per_category=intervals_per_category)
Published by fcakyon over 3 years ago
from sahi.utils.coco import Coco
from sahi.utils.file import save_json
# init Coco objects by specifying coco dataset paths and image folder directories
coco = Coco.from_coco_dict_or_path("coco.json")
# filter out images that contain annotations with smaller area than 50
area_filtered_coco = coco.get_area_filtered_coco(min=50)
# filter out images that contain annotations with smaller area than 50 and larger area than 10000
area_filtered_coco = coco.get_area_filtered_coco(min=50, max=10000)
# export filtered COCO dataset
save_json(area_filtered_coco.json, "area_filtered_coco.json")
# multiprocess support
if __name__ == __main__:
coco = Coco.from_coco_dict_or_path(
"coco.json",
image_dir="coco_images/"
mp=True
)
coco.export_as_yolov5(
output_dir="output/folder/dir",
train_split_rate=0.85,
mp=True
)
Published by fcakyon over 3 years ago
from sahi.utils.coco import Coco
# init Coco object
coco = Coco.from_coco_dict_or_path("coco.json")
# get dataset stats
coco.stats
{
'avg_annotation_area': 2448.405738278109,
'avg_num_annotations_in_image': 53.037243084530985,
'max_annotation_area': 328640,
'max_num_annotations_in_image': 902,
'min_annotation_area': 3,
'min_num_annotations_in_image': 1,
'num_annotations': 343204,
'num_annotations_per_category': {
'human': 106396,
'vehicle': 236808
},
'num_categories': 2,
'num_images': 6471,
'num_images_per_category': {
'human': 5684,
'vehicle': 6323
}
}
Published by fcakyon over 3 years ago
Published by fcakyon over 3 years ago
Published by fcakyon over 3 years ago
Published by fcakyon over 3 years ago
Published by fcakyon over 3 years ago
Published by fcakyon over 3 years ago
Published by fcakyon over 3 years ago