- # 实时检测跟踪模块,并将检测跟踪结果保存到数据库中
- # -*- coding: utf-8 -*-
- import argparse
- import os
- os.environ["OMP_NUM_THREADS"] = "1"
- os.environ["OPENBLAS_NUM_THREADS"] = "1"
- os.environ["MKL_NUM_THREADS"] = "1"
- os.environ["VECLIB_MAXIMUM_THREADS"] = "1"
- os.environ["NUMEXPR_NUM_THREADS"] = "1"
- import sys
- import platform
- import platform
- import numpy as np
- from pathlib import Path
- import torch
- import torch.backends.cudnn as cudnn
- from shapely.geometry import Polygon
- import numpy as np
- import matplotlib.pyplot as plt
- import pymysql
- import time
- from datetime import datetime
- import json
- FILE = Path(__file__).resolve()
- ROOT = FILE.parents[0] # yolov5 strongsort root directory
- WEIGHTS = ROOT / 'weights'
- if str(ROOT) not in sys.path:
- sys.path.append(str(ROOT)) # add ROOT to PATH
- if str(ROOT / 'yolov5') not in sys.path:
- sys.path.append(str(ROOT / 'yolov5')) # add yolov5 ROOT to PATH
- if str(ROOT / 'trackers' / 'strongsort') not in sys.path:
- sys.path.append(str(ROOT / 'trackers' / 'strongsort')) # add strong_sort ROOT to PATH
- ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
- from yolov5.models.common import DetectMultiBackend
- from yolov5.utils.dataloaders import VID_FORMATS, LoadImages, LoadStreams
- from yolov5.utils.general import (LOGGER, Profile, check_img_size, non_max_suppression, scale_boxes, check_requirements, cv2,
- check_imshow, xyxy2xywh, increment_path, strip_optimizer, colorstr, print_args, check_file)
- from yolov5.utils.torch_utils import select_device, time_sync
- from yolov5.utils.plots import Annotator, colors, save_one_box
- from trackers.multi_tracker_zoo import create_tracker
- def get_connection():
- """创建并返回一个新的数据库连接。"""
- # 数据库连接信息
- host = 'localhost'
- user = 'root'
- password = '123456'
- database = 'video_streaming_database'
- return pymysql.connect(host=host, user=user, password=password, database=database)
- def ensure_connection(connection):
- """确保连接有效。如果连接无效,则重新建立连接。"""
- if connection is None or not connection.open:
- print("Connection is invalid or closed. Reconnecting...")
- return get_connection()
- return connection
- @torch.no_grad()
- def run(
- source='0',
- yolo_weights=WEIGHTS / 'yolov5m.pt', # model.pt path(s),
- reid_weights=WEIGHTS / 'osnet_x0_25_msmt17.pt', # model.pt path,
- tracking_method='strongsort',
- tracking_config=None,
- imgsz=(640, 640), # inference size (height, width)
- cam_ip = '192.168.31.97',
- conf_thres=0.25, # confidence threshold
- iou_thres=0.45, # NMS IOU threshold
- max_det=1000, # maximum detections per image
- device='0', # cuda device, i.e. 0 or 0,1,2,3 or cpu
- show_vid=False, # show results
- save_txt=False, # save results to *.txt
- save_conf=False, # save confidences in --save-txt labels
- save_crop=False, # save cropped prediction boxes
- save_trajectories=False, # save trajectories for each track
- save_vid=True, # save confidences in --save-txt labels
- nosave=False, # do not save images/videos
- classes=None, # filter by class: --class 0, or --class 0 2 3
- agnostic_nms=False, # class-agnostic NMS
- augment=False, # augmented inference
- visualize=False, # visualize features
- update=False, # update all models
- project=ROOT / 'runs' / 'track', # save results to project/name
- name='exp', # save results to project/name
- exist_ok=False, # existing project/name ok, do not increment
- line_thickness=2, # bounding box thickness (pixels)
- hide_labels=False, # hide labels
- hide_conf=False, # hide confidences
- hide_class=False, # hide IDs
- half=False, # use FP16 half-precision inference
- dnn=False, # use OpenCV DNN for ONNX inference
- vid_stride=1, # video frame-rate stride
- retina_masks=False,
- ):
- source = str(source)
- is_file = Path(source).suffix[1:] in (VID_FORMATS)
- is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://'))
- webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file)
- if is_url and is_file:
- source = check_file(source)
- if not isinstance(yolo_weights, list): # single yolo model
- exp_name = yolo_weights.stem
- elif type(yolo_weights) is list and len(yolo_weights) == 1: # single models after --yolo_weights
- exp_name = Path(yolo_weights[0]).stem
- else: # multiple models after --yolo_weights
- exp_name = 'ensemble'
- # 结果保存路径
- project = os.path.join(os.path.dirname(source), (source.split("\")[-1][:-4])) + "_det"
- save_dir = increment_path(Path(project), exist_ok=exist_ok) # increment run
- (save_dir / 'tracks' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir
- # 载入模型
- device = select_device(device)
- model = DetectMultiBackend(yolo_weights, device=device, dnn=dnn, data=None, fp16=half)
- stride, names, pt = model.stride, model.names, model.pt
- imgsz = check_img_size(imgsz, s=stride) # check image size
- if webcam:
- show_vid = check_imshow()
- dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt, vid_stride=vid_stride)
- nr_sources = len(dataset)
- else:
- dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt)
- nr_sources = 1
- tracker_list = []
- for i in range(nr_sources):
- tracker = create_tracker(tracking_method, tracking_config, reid_weights, device, half)
- tracker_list.append(tracker, )
- if hasattr(tracker_list[i], 'model'):
- if hasattr(tracker_list[i].model, 'warmup'):
- tracker_list[i].model.warmup()
- outputs = [None] * nr_sources
- # Run tracking
- seen, windows, dt = 0, [], (Profile(), Profile(), Profile(), Profile())
- curr_frames, prev_frames = [None] * nr_sources, [None] * nr_sources
-
- # 数据库连接信息
- host = 'localhost'
- user = 'root'
- password = '123456'
- database = 'video_streaming_database'
- connection = pymysql.connect(host=host, user=user, password=password, database=database)
- data = []
- for frame_idx, (path, im, im0s, vid_cap, s) in enumerate(dataset):
- start_time = time.time()
- im_Original = im0s
- # 隔帧操作,实际测试对跟踪计数影响很大
- if frame_idx % 2 != 0:
- im_Original_resieze = cv2.resize(im_Original, dsize=None, fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC)
- cv2.imwrite(os.path.join(str(save_dir), cam_ip + "_" + str(frame_idx + 1).zfill(8) + ".jpg"), im_Original_resieze)
- continue
- with dt[0]:
- im = torch.from_numpy(im).to(device)
- im = im.half() if half else im.float() # uint8 to fp16/32
- im /= 255.0 # 0 - 255 to 0.0 - 1.0
- if len(im.shape) == 3:
- im = im[None] # expand for batch dim
- with dt[1]:
- pred = model(im, augment=augment, visualize=visualize)
- with dt[2]:
- pred = non_max_suppression(pred, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)
- # 处理检测结果
- for i, det in enumerate(pred): # detections per image
- seen += 1
- if webcam: # nr_sources >= 1
- p, im0, _ = path[i], im0s[i].copy(), dataset.count
- p = Path(p) # to Path
- s += f'{i}: '
- txt_file_name = p.name
- save_path = str(save_dir / p.name) # im.jpg, vid.mp4, ...
- else:
- p, im0, _ = path, im0s.copy(), getattr(dataset, 'frame', 0)
- p = Path(p) # to Pathf
- # video file
- if source.endswith(VID_FORMATS):
- txt_file_name = p.stem
- save_path = str(save_dir / p.name) # im.jpg, vid.mp4, ...
- # folder with imgs
- else:
- txt_file_name = p.parent.name # get folder name containing current img
- save_path = str(save_dir / p.parent.name) # im.jpg, vid.mp4, ...
- curr_frames[i] = im0
- s += '%gx%g ' % im.shape[2:] # print string
- annotator = Annotator(im0, line_width=line_thickness, example=str(names))
- if hasattr(tracker_list[i], 'tracker') and hasattr(tracker_list[i].tracker, 'camera_update'):
- if prev_frames[i] is not None and curr_frames[i] is not None: # camera motion compensation
- tracker_list[i].tracker.camera_update(prev_frames[i], curr_frames[i])
- if det is not None and len(det):
- det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round() # rescale boxes to im0 size
- for c in det[:, 5].unique():
- n = (det[:, 5] == c).sum() # detections per class
- s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string
- with dt[3]:
- outputs[i] = tracker_list[i].update(det.cpu(), im0)
- # 处理跟踪结果
- if len(outputs[i]) > 0:
- for j, (output) in enumerate(outputs[i]):
- bbox = output[0:4]
- id = output[4]
- cls = output[5]
- conf = output[6]
- bbox_x = int((output[0] + output[2]) / 2)
- bbox_y = int((output[1] + output[3]) / 2)
- bbox_w = int(output[2] - output[0])
- bbox_h = int(output[3] - output[1])
-
- if save_vid or save_crop or show_vid: # Add bbox to image
- c = int(cls) # integer class
- id = int(id) # integer id
- label = None if hide_labels else (f'{id} {names[c]}' if hide_conf else \
- (f'{id} {conf:.2f}' if hide_class else f'{id} {names[c]} {conf:.2f}'))
- color = colors(c, True)
- annotator.box_label(bbox, label, color=color)
- if save_trajectories and tracking_method == 'strongsort':
- q = output[7]
- tracker_list[i].trajectory(im0, q, color=color)
- if save_crop:
- bbox = np.array(bbox)
- if frame_idx % 12 == 0:
- save_one_box(bbox.astype(np.int16), im_Original, file = save_dir / f'{id}' /
- (cam_ip + "_"
- + str(frame_idx + 1).zfill(8) + "_"
- + str(id).zfill(4) + "_"
- + str(int(bbox_x)).zfill(4) + "_"
- + str(int(bbox_y)).zfill(4) + "_"
- + str(int(bbox_w)).zfill(4) + "_"
- + str(int(bbox_h)).zfill(4) + "_"
- + str(int(float(conf) * 10000))
- + f'.jpg'), BGR=True)
- # 将检测跟踪中间结果保存到数据库中
- connection = ensure_connection(connection) # 确保连接有效
- # 获取当前日期和时刻
- current_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
- try:
- with connection.cursor() as cursor:
- # 插入数据的SQL语句
- insert_sql = """
- INSERT INTO new_detection_tracking_results_1 (camera_ip, frame_number, tracking_id, crop_image_path, event_datetime)
- VALUES (%s, %s, %s, %s, %s);
- """
- # 示例数据
- data = [
- (cam_ip,
- int(frame_idx+1),
- int(id),
- save_dir / f'{id}' /
- (cam_ip + "_"
- + str(frame_idx + 1).zfill(8) + "_"
- + str(id).zfill(4) + "_"
- + str(int(bbox_x)).zfill(4) + "_"
- + str(int(bbox_y)).zfill(4) + "_"
- + str(int(bbox_w)).zfill(4) + "_"
- + str(int(bbox_h)).zfill(4) + "_"
- + str(int(float(conf) * 10000))
- + f'.jpg'),
- current_time)
- ]
- # 执行插入操作
- cursor.executemany(insert_sql, data)
- connection.commit()
- finally:
- pass
- else:
- pass
- # # 将检测跟踪的原图,标注图,检测结果保存到数据库中
- im_Original_resieze = cv2.resize(im_Original, dsize=None, fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC)
- cv2.imwrite(os.path.join(str(save_dir), cam_ip + "_" + str(frame_idx + 1).zfill(8) + ".jpg"), im_Original_resieze)
- # 保存检测跟踪结果到文件
- if outputs[0] == None:
- track_outputs = []
- else:
- track_outputs = [
- [float(x[0] / 2), float(x[1] / 2), float(x[2] / 2), float(x[3] / 2), int(x[4]), float(x[6]), ""]
- for x in outputs[0]
- ]
- data_dict = {}
- for row in track_outputs:
- key = int(row[4])
- value = row
- data_dict[key] = value
- json_output_path = os.path.join(str(save_dir), cam_ip + "_" + str(frame_idx + 1).zfill(8) + "_track.json")
- with open(json_output_path, 'w') as json_file:
- json.dump(data_dict, json_file, indent=4)
- # 记录结束时间
- end_time = time.time()
- # 计算并打印运行时间
- print(f"第{frame_idx}帧,程序运行时间: {end_time - start_time}秒")
- if end_time - start_time >= 0.0833333333333333333333:
- print(f"第{frame_idx}帧,程序运行时间: {end_time - start_time}秒")
- if (end_time - start_time < 0.0833333333333333333333):
- time.sleep(0.0833333333333333333333-end_time+start_time)
- def parse_opt():
- parser = argparse.ArgumentParser()
- parser.add_argument('--yolo-weights', nargs='+', type=Path, default=R'/home/hitsz/yk_workspace/Yolov5_track/weights/train_citys_bdd_4S_crowdhuman_coco_labs_liucl_1215_no_freeze_no_freeze_yolov5m3/weights/v5m_861.pt', help='model.pt path(s)')
- parser.add_argument('--reid-weights', type=Path, default=R'weights\osnet_x1_0_msmt17.pt')
- parser.add_argument('--tracking-method', type=str, default='bytetrack', help='strongsort, ocsort, bytetrack')
- parser.add_argument('--tracking-config', type=Path, default=None)
- parser.add_argument('--source', type=str, default=R"02_output_0.mp4", help='file/dir/URL/glob, 0 for webcam')
- # 下面为输入为摄像头视频流的参数设置
- # parser.add_argument('--source', type=str, default=R'rtsp://admin:1234qwer@192.168.1.64:554/Streaming/Channels/101', help='file/dir/URL/glob, 0 for webcam')
- parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w')
- parser.add_argument('--cam_ip', type=str, default='192.168.31.97')
- parser.add_argument('--conf-thres', type=float, default=0.45, help='confidence threshold')
- parser.add_argument('--iou-thres', type=float, default=0.25, help='NMS IoU threshold')
- parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image')
- parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
- parser.add_argument('--show-vid', default=False, action='store_true' , help='display tracking video results')
- parser.add_argument('--save-txt', default=True, action='store_true', help='save results to *.txt')
- parser.add_argument('--save-conf', default=True, action='store_true', help='save confidences in --save-txt labels')
- parser.add_argument('--save-crop', default=True, action='store_true', help='save cropped prediction boxes')
- parser.add_argument('--save-trajectories', default=True, action='store_true', help='save trajectories for each track')
- parser.add_argument('--save-vid', default=True, action='store_true', help='save video tracking results')
- parser.add_argument('--nosave', default=False, action='store_true', help='do not save images/videos')
- parser.add_argument('--classes', nargs='+', type=int, help='filter by class: --classes 0, or --classes 0 2 3')
- parser.add_argument('--agnostic-nms', action='store_true', help='class-agnostic NMS')
- parser.add_argument('--augment', action='store_true', help='augmented inference')
- parser.add_argument('--visualize', action='store_true', help='visualize features')
- parser.add_argument('--update', action='store_true', help='update all models')
- parser.add_argument('--project', default=R"/home/hitsz/yk_web/Yolov5_track/results/test_save_results1", help='save results to project/name')
- parser.add_argument('--name', default='test', help='save results to project/name')
- parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment')
- parser.add_argument('--line-thickness', default=2, type=int, help='bounding box thickness (pixels)')
- parser.add_argument('--hide-labels', default=False, action='store_true', help='hide labels')
- parser.add_argument('--hide-conf', default=False, action='store_true', help='hide confidences')
- parser.add_argument('--hide-class', default=False, action='store_true', help='hide IDs')
- parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference')
- parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference')
- parser.add_argument('--vid-stride', type=int, default=1, help='video frame-rate stride')
- parser.add_argument('--retina-masks', action='store_true', help='whether to plot masks in native resolution')
- opt = parser.parse_args()
- opt.imgsz *= 2 if len(opt.imgsz) == 1 else 1 # expand
- opt.tracking_config = ROOT / 'trackers' / opt.tracking_method / 'configs' / (opt.tracking_method + '.yaml')
- print_args(vars(opt))
- return opt
- def main(opt):
- check_requirements(requirements=ROOT / 'requirements.txt', exclude=('tensorboard', 'thop'))
- run(**vars(opt))
- if __name__ == "__main__":
- opt = parse_opt()
- main(opt)
- import { createApp, createElementBlock } from 'vue';
- import App from './App.vue';
- import "@/assets/less/index.less";
- import router from "@/router";
- import ElementPlus from 'element-plus'
- import 'element-plus/dist/index.css'
- import * as ElementPlusIconsVue from '@element-plus/icons-vue'
- import {createPinia} from 'pinia'
- import "video.js/dist/video-js.css";
- import "@/api/mock.js";
- import api from '@/api/api'
- import {useALLDataStore} from "@/stores"
- function isRoute(to){
- const routes = router.getRoutes();
- // 检查是否有匹配的路由
- return routes.some(route => {
- // 处理动态路径匹配
- const regex = new RegExp(`^${route.path.replace(/:\w+/g, '\\w+')}$`);
- return regex.test(to.path);
- });
- }
- const pinia = createPinia();
- const app = createApp(App);
- app.config.globalProperties.$api = api;
- for (const [key, component] of Object.entries(ElementPlusIconsVue)) {
- app.component(key, component)
- }
- app.use(pinia)
- const store = useALLDataStore();
- app.use(ElementPlus)
- store.addMenu(router,"refresh")
- app.use(router).mount("#app");
复制代码 免责声明:如果侵犯了您的权益,请联系站长,我们会及时删除侵权内容,谢谢合作!更多信息从访问主页:qidao123.com:ToB企服之家,中国第一个企服评测及商务社交产业平台。 |