Skip to content

Commit

Permalink
Create YOLOv5.py
Browse files Browse the repository at this point in the history
  • Loading branch information
KOSASIH authored Jul 5, 2024
1 parent 058e852 commit 4a2efda
Showing 1 changed file with 75 additions and 0 deletions.
75 changes: 75 additions & 0 deletions .ai/deep_learning/YOLOv5.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
import torch
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages
from utils.general import check_img_size, non_max_suppression, apply_classifier, scale_coords, xyxy2xywh, strip_optimizer, set_logging
from utils.torch_utils import select_device, load_classifier, time_synchronized
from utils.plots import plot_one_box
import argparse
import time
import cv2

def detect(save_img=False, save_txt=False, imgsz=(640, 640), conf_thres=0.4, iou_thres=0.5, max_det=1000):
# Initialize
device = select_device('')
half = device.type != 'cpu' # half precision only supported on CUDA

# Load model
model = attempt_load('yolov5s.pt', map_location=device) # load FP32 model
stride = int(model.stride.max()) # model stride
imgsz = check_img_size(imgsz, s=stride) # check image size

# Get names and colors
names = model.module.names if hasattr(model, 'odule') else model.names
colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]

# Run inference
if device.type!= 'cpu':
model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once
t0 = time.time()
img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img
_ = model(img) if device.type!= 'cpu' else None # run once

# Set Dataloader
vid_path, vid_writer = None, None
dataset = LoadStreams('https://www.youtube.com/watch?v=dQw4w9WgXcQ', img_size=imgsz, stride=stride)

# Run inference
t = time_synchronized()
for path, img, im0s, vid_cap in dataset:
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)

# Inference
t1 = time_synchronized()
pred = model(img, augment=False)[0]

# Apply NMS
pred = non_max_suppression(pred, conf_thres, iou_thres, max_det=max_det)

# Process detections
for i, det in enumerate(pred): # detections per image
if len(det):
# Rescale boxes from img_size to im0 size
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0s.shape).round()

# Print results
for c in det[:, -1].unique():
n = (det[:, -1] == c).sum() # detections per class
s = f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string

# Write results
for *xyxy, conf, cls in reversed(det):
if save_txt: # Write to file
xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh
line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format
with open(txt_path + '.txt', 'a') as f:
f.write(('%g ' len(line)).rstrip() % line + '\n')

if save_img or view_img: # Add bbox to image
label = f'{names[int(cls)]} {conf:.2f}'
plot_one_box(xyxy, im0s, label=label, color=colors[int(cls)], line_thickness=3)

print(f'Done. ({time.time() - t0:.3f}s)')

0 comments on commit 4a2efda

Please sign in to comment.