Przeglądaj źródła

chore: Add initial project configuration files and ignore unnecessary directories, finish welding and platform

dxc 8 miesięcy temu
commit
2a500d9105
13 zmienionych plików z 2167 dodań i 0 usunięć
  1. 6 0
      .gitignore
  2. 109 0
      app1.py
  3. 164 0
      app2.py
  4. 109 0
      app3.py
  5. 161 0
      app4.py
  6. 103 0
      config.py
  7. 75 0
      globals.py
  8. 267 0
      platform_remove_detect.py
  9. 297 0
      platform_setup_detect.py
  10. 130 0
      platform_wearing_detect.py
  11. 374 0
      welding_exam_detect.py
  12. 252 0
      welding_reset_detect.py
  13. 120 0
      welding_wearing_detect.py

+ 6 - 0
.gitignore

@@ -0,0 +1,6 @@
+__pycache__
+old
+post_demo
+static
+utils
+weights

+ 109 - 0
app1.py

@@ -0,0 +1,109 @@
+import threading
+import time
+from flask import Flask, jsonify,send_from_directory
+from welding_wearing_detect import start_wearing_detection,init_wearing_detection
+from globals import inference_thread, stop_event,lock,redis_client
+
+#焊接考核的穿戴
+app = Flask(__name__)
+
+
+# Define the /wearing_detection endpoint
+@app.route('/wearing_detection', methods=['GET'])
+def wearing_detection():
+    global inference_thread#当全局变量需要重新赋值时,需要用global关键字声明
+
+    if inference_thread is None or not inference_thread.is_alive():
+        stop_event.clear()#stop_event不用global声明,因为不需要重新赋值,他只是调用了其方法,并没有重新赋值
+        
+        start_events = []#给每个线程一个事件,让我知道某个线程是否开始检测
+        inference_thread = threading.Thread(target=start_wearing_detection,args=(start_events,))
+        inference_thread.start()
+        init_wearing_detection()
+
+        # 等待所有YOLO线程开始检测,两个线程检测完毕时,才返回SUCCESS
+        for event in start_events:
+            event.wait()
+
+        app.logger.info('start_wearing_detection')
+        return jsonify({"status": "SUCCESS"}), 200
+    
+    else:
+        app.logger.info("start_wearing_detection already running")   
+        return jsonify({"status": "ALREADY_RUNNING"}), 200
+    
+
+@app.route('/human_postion_status', methods=['GET']) 
+def human_postion_status():#开始登录时,检测是否需要复位,若需要,则发送复位信息,否则开始焊接检测
+    #global inference_thread
+    if redis_client.get("welding_wearing_human_in_postion")=='False':
+        app.logger.info('NOT_IN_POSTION')
+        return jsonify({"status": "NOT_IN_POSTION"}), 200
+    else:
+        app.logger.info('IN_POSTION')
+        return jsonify({"status": "IN_POSTION"}), 200
+
+@app.route('/wearing_status', methods=['GET']) 
+def wearing_status():#开始登录时,检测是否需要复位,若需要,则发送复位信息,否则开始焊接检测
+    #global inference_thread
+    with lock:   
+        #TODO 若出现异常再发送FAIL.
+        redis_client.set("welding_wearing_detection_img_flag",'True')
+        time.sleep(1)
+        if not redis_client.exists("welding_wearing_items_nums") or not redis_client.exists("welding_wearing_detection_img"):
+            return jsonify({"status": "NONE"}), 200##表示穿戴检测线程还未检测完
+        
+        wearing_items_nums = redis_client.lrange("welding_wearing_items_nums", 0, -1)
+        wearing_items_list = ['pants', 'jacket', 'helmet', 'gloves', 'shoes']
+        json_array = []
+        for num, item in zip(wearing_items_nums, wearing_items_list):
+            json_object = {"name": item, "number": num}
+            json_array.append(json_object)
+
+        app.logger.info(json_array)
+        image=redis_client.get("welding_wearing_detection_img")
+        app.logger.info(image)
+
+        return jsonify({"status": "SUCCESS","data":json_array,"image":image}), 200
+
+               
+@app.route('/end_wearing_exam', methods=['GET'])
+def end_wearing_exam():
+    init_wearing_detection()
+    return jsonify({"status": "SUCCESS"}), 200
+
+    
+
+def stop_inference_internal():
+    global inference_thread
+    if inference_thread is not None and inference_thread.is_alive():
+        stop_event.set()  # 设置停止事件标志,通知推理线程停止运行
+        inference_thread.join()  # 等待推理线程结束
+        inference_thread = None  # 释放线程资源
+        app.logger.info('detection stopped')
+        return True
+    else:
+        app.logger.info('No inference stopped')
+        return False
+
+@app.route('/stop_detection', methods=['GET'])
+def stop_inference():
+    #global inference_thread
+    if stop_inference_internal():
+        app.logger.info('detection stopped')
+        return jsonify({"status": "DETECTION_STOPPED"}), 200
+    else:
+        app.logger.info('No_detection_running')
+        return jsonify({"status": "No_detection_running"}), 200
+
+@app.route('/images/<filename>')
+def get_image(filename):
+    app.logger.info('get_image'+filename)
+    #pdb.set_trace()
+    return send_from_directory('static/images', filename)
+
+
+if __name__ == '__main__':
+
+    # Start the Flask server
+    app.run(debug=False, host='172.16.20.163', port=5001)

+ 164 - 0
app2.py

@@ -0,0 +1,164 @@
+import re
+import threading
+import time
+from flask import Flask, jsonify,send_from_directory
+
+from welding_exam_detect import start_welding_detection,init_welding_detection
+from welding_reset_detect import start_reset_detection,init_rest_detection
+from globals import inference_thread, stop_event,lock,redis_client
+
+
+app = Flask(__name__)
+#app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024  # 16 MB
+
+# Define the /wearing_detection endpoint
+
+@app.route('/reset_detection', methods=['GET'])
+def reset_detection():#发送开启AI服务时,检测复位
+    global inference_thread#当全局变量需要重新赋值时,需要用global关键字声明
+
+    if inference_thread is None or not inference_thread.is_alive():#防止重复开启检测服务
+        #redis_client.set("log_in_flag",'False')
+
+        stop_event.clear()
+
+        start_events = []#给每个线程一个事件,让我知道某个线程是否开始检测
+        inference_thread = threading.Thread(target=start_reset_detection,args=(start_events,))
+        inference_thread.start()
+        
+            
+        app.logger.info('start_reset_detection')
+    
+        init_rest_detection()
+        #init_rest()#设置复位检测图片保存标志为False
+        #redis_client.set("log_in_flag",'True')#设置登录标志为True,进入保存图片阶段
+        #time.sleep(8)#等待3s,等待reset_post_path列表中有数据,然后返回给前端
+
+        # 等待所有YOLO线程开始检测
+        for event in start_events:
+            event.wait()
+
+        return jsonify({"status": "SUCCESS"}), 200
+    
+    else:
+        app.logger.info("reset_detection already running")   
+        return jsonify({"status": "ALREADY_RUNNING"}), 200    
+
+@app.route('/reset_status', methods=['GET']) 
+def reset_status():#获取复位检测状态
+    if redis_client.get("welding_reset_flag")=='0':#表明不需要复位,welding_reset_flag是要复位的个数
+        app.logger.info('reset_all is true')
+        #此时复位的检测还在进行,需要停止复位检测
+        stop_inference_internal()
+
+        return jsonify({"status": "RESET_ALL"}), 200
+    
+    if redis_client.get("welding_reset_flag")>'0':#表面需要复位,并设置log_in_flag为True
+        app.logger.info('reset_all is false')
+
+        #发送需要复位的信息
+        reset_post_path = redis_client.lrange("welding_reset_post_path", 0, -1)
+
+        json_array = []
+        for value in reset_post_path:
+            
+            match = re.search(r'resetStep(\d+)', value)
+            step_number = match.group(1)
+            json_object = {"resetStep": step_number, "image": value}
+            json_array.append(json_object)
+
+        init_rest_detection()
+        app.logger.info(reset_post_path)
+        return jsonify({"status": "NOT_RESET_ALL","data":json_array}), 200
+
+
+@app.route('/welding_detection', methods=['GET']) 
+def welding_detection():#开始登录时,检测是否需要复位,若需要,则发送复位信息,否则开始焊接检测
+    global inference_thread
+
+    if inference_thread is None or not inference_thread.is_alive():#防止重复开启检测服务
+
+        stop_event.clear()#stop_event不用global声明,因为不需要重新赋值,他只是调用了其方法,并没有重新赋值
+
+        start_events = []#给每个线程一个事件,让我知道某个线程是否开始检测
+        inference_thread = threading.Thread(target=start_welding_detection,args=(start_events,))
+        inference_thread.start()
+
+        init_welding_detection()
+        
+        #等待所有YOLO线程开始检测
+        for event in start_events:
+            event.wait()
+
+        return jsonify({"status": "SUCCESS"}), 200
+    else:
+        app.logger.info("welding_detection already running")   
+        return jsonify({"status": "ALREADY_RUNNING"}), 200
+
+            
+
+@app.route('/welding_status', methods=['GET']) 
+def welding_status():#开始登录时,检测是否需要复位,若需要,则发送复位信息,否则开始焊接检测
+    #global inference_thread
+    #with lock:   
+    #TODO 若出现异常再发送FAIL.
+    if redis_client.llen("welding_post_path") == 0:
+        return jsonify({"status": "NONE"}), 200##表示还没有检测到任何一个焊接步骤
+    
+    welding_post_path = redis_client.lrange("welding_post_path", 0, -1)
+
+    json_array = []
+    for value in welding_post_path:
+        match = re.search(r'step(\d+)', value)
+        step_number = match.group(1)
+        json_object = {"step": step_number, "image": value}
+        json_array.append(json_object)
+
+    #init_rest()
+    app.logger.info(welding_post_path)
+    return jsonify({"status": "SUCCESS","data":json_array}), 200
+
+@app.route('/end_welding_exam', methods=['GET'])#点击考试结束按钮,停止检测,并复位
+def end_welding_exam():
+    stop_inference_internal()
+    time.sleep(1)
+    return reset_detection()
+               
+
+# def return_post_path():
+#     app.logger.info("List elements:", redis_client.lrange("reset_post_path", 0, -1))
+    
+def stop_inference_internal():
+    global inference_thread
+    if inference_thread is not None and inference_thread.is_alive():
+        stop_event.set()  # 设置停止事件标志,通知推理线程停止运行
+        inference_thread.join()  # 等待推理线程结束
+        inference_thread = None  # 释放线程资源
+        app.logger.info('detection stopped')
+        return True
+    else:
+        app.logger.info('No inference stopped')
+        return False
+
+@app.route('/stop_detection', methods=['GET'])
+def stop_inference():
+    #global inference_thread
+    if stop_inference_internal():
+        app.logger.info('detection stopped')
+        return jsonify({"status": "DETECTION_STOPPED"}), 200
+    else:
+        app.logger.info('No_detection_running')
+        return jsonify({"status": "No_detection_running"}), 200
+
+
+@app.route('/images/<filename>')
+def get_image(filename):
+    app.logger.info('get_image'+filename)
+    #pdb.set_trace()
+    return send_from_directory('static/images', filename)
+
+
+if __name__ == '__main__':
+
+    # Start the Flask server
+    app.run(debug=False, host='172.16.20.163', port=5002)

+ 109 - 0
app3.py

@@ -0,0 +1,109 @@
+import threading
+import time
+from flask import Flask, jsonify,send_from_directory
+from platform_wearing_detect import start_wearing_detection,init_wearing_detection
+from globals import inference_thread, stop_event,lock,redis_client
+
+
+app = Flask(__name__)
+#app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024  # 16 MB
+
+# Define the /wearing_detection endpoint
+@app.route('/wearing_detection', methods=['GET'])
+def wearing_detection():
+    global inference_thread#当全局变量需要重新赋值时,需要用global关键字声明
+
+    if inference_thread is None or not inference_thread.is_alive():
+        stop_event.clear()#stop_event不用global声明,因为不需要重新赋值,他只是调用了其方法,并没有重新赋值
+        
+        start_events = []#给每个线程一个事件,让我知道某个线程是否开始检测
+        inference_thread = threading.Thread(target=start_wearing_detection,args=(start_events,))
+        inference_thread.start()
+        init_wearing_detection()
+        #print('start_wearing_detection')
+        # 等待所有YOLO线程开始检测,两个线程检测完毕时,才返回SUCCESS
+        for event in start_events:
+            event.wait()
+
+        app.logger.info('start_wearing_detection')
+        return jsonify({"status": "SUCCESS"}), 200
+    
+    else:
+        app.logger.info("start_wearing_detection already running")   
+        return jsonify({"status": "ALREADY_RUNNING"}), 200
+    
+
+@app.route('/human_postion_status', methods=['GET']) 
+def human_postion_status():#开始登录时,检测是否需要复位,若需要,则发送复位信息,否则开始焊接检测
+    #global inference_thread
+    if redis_client.get("platform_wearing_human_in_postion")=='False':
+        app.logger.info('NOT_IN_POSTION')
+        return jsonify({"status": "NOT_IN_POSTION"}), 200
+    else:
+        app.logger.info('IN_POSTION')
+        return jsonify({"status": "IN_POSTION"}), 200
+
+@app.route('/wearing_status', methods=['GET']) 
+def wearing_status():#开始登录时,检测是否需要复位,若需要,则发送复位信息,否则开始焊接检测
+    #global inference_thread
+    with lock:   
+        #TODO 若出现异常再发送FAIL.
+        redis_client.set("platform_wearing_detection_img_flag",'True')
+        time.sleep(1)
+        if not redis_client.exists("platform_wearing_items_nums") or not redis_client.exists("platform_wearing_detection_img"):
+            return jsonify({"status": "NONE"}), 200##表示穿戴检测线程还未检测完
+        
+        wearing_items_nums = redis_client.lrange("platform_wearing_items_nums", 0, -1)
+        wearing_items_list = ['belt', 'helmet',  'shoe']
+        json_array = []
+        for num, item in zip(wearing_items_nums, wearing_items_list):
+            json_object = {"name": item, "number": num}
+            json_array.append(json_object)
+
+        app.logger.info(json_array)
+        image=redis_client.get("platform_wearing_detection_img")
+        app.logger.info(image)
+
+        return jsonify({"status": "SUCCESS","data":json_array,"image":image}), 200
+
+               
+@app.route('/end_wearing_exam', methods=['GET'])
+def end_wearing_exam():
+    init_wearing_detection()
+    return jsonify({"status": "SUCCESS"}), 200
+
+    
+
+def stop_inference_internal():
+    global inference_thread
+    if inference_thread is not None and inference_thread.is_alive():
+        stop_event.set()  # 设置停止事件标志,通知推理线程停止运行
+        inference_thread.join()  # 等待推理线程结束
+        inference_thread = None  # 释放线程资源
+        app.logger.info('detection stopped')
+        return True
+    else:
+        app.logger.info('No inference stopped')
+        return False
+
+@app.route('/stop_detection', methods=['GET'])
+def stop_inference():
+    #global inference_thread
+    if stop_inference_internal():
+        app.logger.info('detection stopped')
+        return jsonify({"status": "DETECTION_STOPPED"}), 200
+    else:
+        app.logger.info('No_detection_running')
+        return jsonify({"status": "No_detection_running"}), 200
+
+@app.route('/images/<filename>')
+def get_image(filename):
+    app.logger.info('get_image'+filename)
+    #pdb.set_trace()
+    return send_from_directory('static/images', filename)
+
+
+if __name__ == '__main__':
+
+    # Start the Flask server
+    app.run(debug=False, host='172.16.20.163', port=5003)

+ 161 - 0
app4.py

@@ -0,0 +1,161 @@
+import re
+import threading
+import time
+from flask import Flask, jsonify,send_from_directory
+
+from platform_setup_detect import start_platform_setup_detection,init_platform_setup_detection
+from platform_remove_detect import start_platform_remove_detection,init_platform_remove_detection
+from globals import inference_thread, stop_event,lock,redis_client
+
+
+app = Flask(__name__)
+
+
+# Define the /wearing_detection endpoint
+
+@app.route('/platform_setup_detection', methods=['GET'])
+def platform_setup_detection():#开启平台搭设检测
+    global inference_thread#当全局变量需要重新赋值时,需要用global关键字声明
+
+    if inference_thread is None or not inference_thread.is_alive():#防止重复开启检测服务
+        #redis_client.set("log_in_flag",'False')
+
+        stop_event.clear()
+
+        start_events = []#给每个线程一个事件,让我知道某个线程是否开始检测
+        inference_thread = threading.Thread(target=start_platform_setup_detection,args=(start_events,))
+        inference_thread.start()
+        
+            
+        app.logger.info('start_platform_setup_detection')
+        init_platform_setup_detection()
+
+
+        # 等待所有YOLO线程开始检测
+        for event in start_events:
+            event.wait()
+
+        return jsonify({"status": "SUCCESS"}), 200
+    
+    else:
+        app.logger.info("reset_detection already running")   
+        return jsonify({"status": "ALREADY_RUNNING"}), 200    
+
+@app.route('/platform_setup_status', methods=['GET']) 
+def platform_setup_status():#获取平台搭设状态状态
+    if not redis_client.exists('platform_setup_order'):#平台搭设步骤还没有一个完成
+        app.logger.info('platform_setup_order is none')
+
+        return jsonify({"status": "NONE"}), 200
+    
+    else:
+
+        platform_setup_order = redis_client.lrange("platform_setup_order", 0, -1)
+
+        json_array = []
+        for value in platform_setup_order:
+            match = re.search(r'platform_setup_(\d+)', value)
+            step_number = match.group(1)
+            json_object = {"step": step_number, "image": redis_client.get(f"platform_setup_{step_number}_img"),'number':redis_client.get(f"platform_setup_{step_number}")}
+            json_array.append(json_object) 
+
+        return jsonify({"status": "SUCCESS","data":json_array}), 200
+
+
+@app.route('/platform_setup_finish', methods=['GET']) 
+def platform_setup_finish():#开始登录时,检测是否需要复位,若需要,则发送复位信息,否则开始焊接检测
+
+    stop_inference_internal()
+    app.logger.info('platform_setup_finish')
+    return jsonify({"status": "SUCCESS"}), 200
+
+            
+
+@app.route('/platform_remove_detection', methods=['GET']) 
+def platform_remove_detection():#开始登录时,检测是否需要复位,若需要,则发送复位信息,否则开始焊接检测
+    global inference_thread#当全局变量需要重新赋值时,需要用global关键字声明
+
+    if inference_thread is None or not inference_thread.is_alive():#防止重复开启检测服务
+        #redis_client.set("log_in_flag",'False')
+
+        stop_event.clear()
+
+        start_events = []#给每个线程一个事件,让我知道某个线程是否开始检测
+        inference_thread = threading.Thread(target=start_platform_remove_detection,args=(start_events,))
+        inference_thread.start()
+        
+            
+        app.logger.info('start_platform_remove_detection')
+        init_platform_remove_detection()
+
+
+        # 等待所有YOLO线程开始检测
+        for event in start_events:
+            event.wait()
+
+        return jsonify({"status": "SUCCESS"}), 200
+    
+    else:
+        app.logger.info("reset_detection already running")   
+        return jsonify({"status": "ALREADY_RUNNING"}), 200    
+    
+@app.route('/platform_remove_status', methods=['GET']) 
+def platform_remove_status():#开始登录时,检测是否需要复位,若需要,则发送复位信息,否则开始焊接检测
+    if not redis_client.exists('platform_remove_order'):#平台搭设步骤还没有一个完成
+        app.logger.info('platform_remove_order is none')
+
+        return jsonify({"status": "NONE"}), 200
+    
+    else:
+
+        platform_setup_order = redis_client.lrange("platform_remove_order", 0, -1)
+
+        json_array = []
+        for num in platform_setup_order:
+
+            json_object = {"step": num, "image": redis_client.get(f"platform_remove_{num}_img")}
+            json_array.append(json_object) 
+
+        return jsonify({"status": "SUCCESS","data":json_array}), 200
+
+@app.route('/platform_remove_finish', methods=['GET'])#点击考试结束按钮,停止检测,并复位
+def platform_remove_finish():
+    stop_inference_internal()
+    app.logger.info('platform_remove_finish')
+    return jsonify({"status": "SUCCESS"}), 200
+
+
+    
+def stop_inference_internal():
+    global inference_thread
+    if inference_thread is not None and inference_thread.is_alive():
+        stop_event.set()  # 设置停止事件标志,通知推理线程停止运行
+        inference_thread.join()  # 等待推理线程结束
+        inference_thread = None  # 释放线程资源
+        app.logger.info('detection stopped')
+        return True
+    else:
+        app.logger.info('No inference stopped')
+        return False
+
+@app.route('/stop_detection', methods=['GET'])
+def stop_inference():
+    #global inference_thread
+    if stop_inference_internal():
+        app.logger.info('detection stopped')
+        return jsonify({"status": "DETECTION_STOPPED"}), 200
+    else:
+        app.logger.info('No_detection_running')
+        return jsonify({"status": "No_detection_running"}), 200
+
+
+@app.route('/images/<filename>')
+def get_image(filename):
+    app.logger.info('get_image'+filename)
+    return send_from_directory('static/images', filename)
+
+
+if __name__ == '__main__':
+
+    # Start the Flask server
+    app.run(debug=False, host='172.16.20.163', port=5004)

+ 103 - 0
config.py

@@ -0,0 +1,103 @@
+import numpy as np
+
+
+#CLIENT_URL = 'http://172.16.20.23:8081/'
+
+SAVE_IMG_PATH = '/mnt/xcd/code/ai_test/static/images'  # 图片保存在服务器的实际位置
+
+POST_IMG_PATH1 = 'http://172.16.20.163:5001/images'  # 通过端口映射post发送能够访问的位置 焊接考核科目1
+POST_IMG_PATH2 = 'http://172.16.20.163:5002/images' #焊接考核科目2
+POST_IMG_PATH3 = 'http://172.16.20.163:5003/images' #平台搭设科目1,劳保穿戴
+POST_IMG_PATH4 = 'http://172.16.20.163:5004/images' #平台搭设科目2,搭建和拆除
+
+#焊接考核视频流
+# Define paths to RTSP streams
+WELDING_CH1_RTSP = 'rtsp://admin:yaoan1234@172.16.22.230/cam/realmonitor?channel=1&subtype=0'#焊机开关视频
+WELDING_CH2_RTSP = 'rtsp://admin:yaoan1234@172.16.22.231/cam/realmonitor?channel=1&subtype=0'#焊台视频
+WELDING_CH3_RTSP = 'rtsp://admin:yaoan1234@172.16.22.233/cam/realmonitor?channel=1&subtype=0'#检查油桶视频
+WELDING_CH4_RTSP = 'rtsp://admin:yaoan1234@172.16.22.232/cam/realmonitor?channel=1&subtype=0'#检查总开关视频
+WELDING_CH5_RTSP = 'rtsp://admin:yaoan1234@172.16.22.234/cam/realmonitor?channel=1&subtype=0'#检查面具手套接地线视频
+WELDING_CH6_RTSP = 'rtsp://admin:yaoan1234@172.16.22.235/cam/realmonitor?channel=1&subtype=0'#劳保用品穿戴视频
+
+WELDING_CH1_MODEL="/mnt/xcd/code/ai_test/weights/ch1_welding_switch_813.pt"
+WELDING_CH2_MODEL="/mnt/xcd/code/ai_test/weights/ch2_welding_desk_cls_813.pt"
+WELDING_CH3_MODEL="/mnt/xcd/code/ai_test/weights/ch3_oil_barrel_detect_813.pt"
+WELDING_CH4_MODEL="/mnt/xcd/code/ai_test/weights/ch4_main_switch_cls_813.pt"
+WELDING_CH5_MODEL="/mnt/xcd/code/ai_test/weights/ch5_mask_gloves_wire_detect_813.pt"
+WELDING_CH6_MODEL='/mnt/xcd/code/ai_test/weights/ch6_wearing_detect_813.pt'
+
+HUMAN_DETECTION_MODEL="/mnt/xcd/code/ai_test/weights/yolov8n.pt"#人体检测模型
+
+# Define paths to models
+WELDING_MODEL_PATHS = [
+    WELDING_CH1_MODEL,
+    WELDING_CH2_MODEL,
+    WELDING_CH3_MODEL,
+    WELDING_CH4_MODEL,
+    WELDING_CH5_MODEL
+]
+
+WELDING_VIDEO_SOURCES = [
+    WELDING_CH1_RTSP,
+    WELDING_CH2_RTSP,
+    WELDING_CH3_RTSP,
+    WELDING_CH4_RTSP,
+    WELDING_CH5_RTSP
+]
+
+
+
+WELDING_WEARING_MODEL=[
+    HUMAN_DETECTION_MODEL,
+    WELDING_CH6_MODEL
+]
+
+WELDING_WEARING_VIDEO_SOURCES= WELDING_CH6_RTSP
+
+#WEAR_DETECTION_VIDEO_SOURCES= "/home/dxc/special_test/ch1.mp4"
+
+# 劳保用品 指定要检测的区域 (xmin, ymin, xmax, ymax)
+WEAR_DETECTION_AREA = (350, 0, 1400, 1080)
+
+
+# 头盔检测区域(xmin, ymin, xmax, ymax)
+
+WELDING_REGION1=(1499,339,1839,723)
+# 油桶危险区域(多边形)
+
+WELDING_REGION2 = np.array([[607, 555], [454, 0], [2560, 0], [2560, 1440], [430, 1440]], np.int32)
+
+# 搭铁夹连接焊台位置
+
+WELDING_REGION3 = np.array([[1613, 627], [1601, 658], [1697, 987], [1710, 962]], np.int32)
+
+
+####平台搭设视频流
+PLATFORM_CH1_RTSP='rtsp://admin:yaoan1234@172.16.22.241/cam/realmonitor?channel=1&subtype=0'#检测穿戴
+PLATFORM_CH2_RTSP='rtsp://admin:yaoan1234@172.16.22.240/cam/realmonitor?channel=1&subtype=0'#脚手架搭建
+
+PLATFORM_CH3_RTSP='rtsp://admin:yaoan1234@172.16.22.243/cam/realmonitor?channel=1&subtype=0'#脚手架搭建
+
+PLATFORM_CH1_MODEL='/mnt/xcd/code/ai_test/weights/platform_ch1_wearing.pt'
+PLATFORM_CH2_MODEL='/mnt/xcd/code/ai_test/weights/high_work_obb_final.pt'
+
+# Define paths to input videos
+
+
+#焊接劳保检测相关参数
+
+#################平台搭设检测相关参数
+PLATFORM_WEARING_MODEL=[
+    HUMAN_DETECTION_MODEL,
+    PLATFORM_CH1_MODEL    
+]
+
+PLATFORM_WEARING_VIDEO_SOURCES=PLATFORM_CH1_RTSP
+PLATFORM_SETUP_VIDEO_SOURCES=PLATFORM_CH2_RTSP
+PLATFORM_SETUP_MODEL=PLATFORM_CH2_MODEL
+
+# 焊件是否在焊台上,焊台的多边形
+#REGION4 = np.array([[1614, 630], [1712, 961], [1963, 953], [1841, 643]], np.int32)
+
+# task1_finish=0 #劳保任务完成标志,当前逻辑是为3则劳保任务完成
+# task1_sava_img=False #劳保任务图片保存标志

+ 75 - 0
globals.py

@@ -0,0 +1,75 @@
+import threading
+# 全局变量来控制推理线程
+import redis
+import time
+# 连接到 Redis 服务器
+redis_client = redis.StrictRedis(host='localhost', port=5050, db=0,decode_responses=True)
+
+inference_thread = None
+stop_event = threading.Event()
+lock=threading.Lock()
+
+#condition = threading.Condition()
+###############焊接考核
+#为True时,表示某一步骤完成,并保存图片post
+step1=False #危险源排除
+step2=False
+step3=False
+step4=False
+step5=False
+step6=False #危险源排除
+step7=False
+step8=False
+step9=False
+step10=False
+step11=False
+step12=False
+step13=False
+
+steps = [False] * 13
+
+oil_barrel=None
+main_switch=None
+grounding_wire=None
+welding_machine_switch=None
+welding_components=None
+mask=None
+welding=None
+gloves=None
+sweep=None
+
+sweep_detect_num=0
+welding_detect_num=0
+###############
+
+
+
+
+###########检测物品是否复位
+oil_barrel_flag=False
+main_switch_flag=False
+ground_wire_flag=False
+welding_components_flag=False
+welding_machine_switch_flag=False
+
+oil_barrel_save_img=False
+main_switch_save_img=False
+ground_wire_save_img=False
+welding_components_save_img=False
+welding_machine_switch_save_img=False
+
+
+reset_all=None
+log_in_flag=False#登录标志,如前端未登录,不允许保存图片并post
+###############################
+###############平台搭设考核
+platform_setup_steps_detect_num=[0]*14
+platform_setup_final_result=[0]*14
+platform_setup_steps_img=[False]*14
+################平台拆除考核
+platform_remove_steps_detect_num=[0]*14
+platform_remove_final_result=[0]*14
+platform_remove_steps_img=[False]*14
+
+remove_detection_timers = [time.time()] * 14  # 初始化计时器
+remove_detection_status = [False]*14 # 初始化检

+ 267 - 0
platform_remove_detect.py

@@ -0,0 +1,267 @@
+import cv2
+import torch
+import time
+from shapely.geometry import box, Polygon
+import threading
+import numpy as np
+from datetime import datetime
+from ultralytics import YOLO
+
+from utils.tool import IoU
+from globals import stop_event,redis_client
+from config import SAVE_IMG_PATH,POST_IMG_PATH4,PLATFORM_SETUP_MODEL,PLATFORM_SETUP_VIDEO_SOURCES
+from globals import platform_remove_steps_detect_num,platform_remove_final_result,platform_remove_steps_img,remove_detection_status,remove_detection_timers
+
+def update_detection_status(platform_remove_steps):
+    global remove_detection_status,remove_detection_timers
+    current_time = time.time()
+    
+    for i,nums in enumerate(platform_remove_steps):
+        if nums>0:
+            remove_detection_timers[i] = current_time  # 更新物体的最后检测时间
+            remove_detection_status[i] = False  # 重置状态为 False
+
+
+
+def check_timeout():
+    current_time = time.time()
+    
+    for i in range(14):
+        if current_time - remove_detection_timers[i] > 10:  # 如果超过10秒未检测到
+            remove_detection_status[i] = True  # 将状态置为 True
+    #         return True
+    # return False
+
+
+
+def init_platform_remove_detection():
+    global remove_detection_timers,remove_detection_status
+    remove_detection_timers = [time.time()] * 14  # 初始化计时器
+    remove_detection_status = [False]*14 # 初始化检
+    redis_client.delete("platform_remove_order")
+    for i in range(1, 14):
+        redis_client.delete(f"platform_remove_{i}_img")
+
+
+def start_platform_remove_detection(start_events):
+    threads = []
+    model_path = PLATFORM_SETUP_MODEL
+    video_source = PLATFORM_SETUP_VIDEO_SOURCES
+    event = threading.Event()
+    start_events.append(event)
+    thread = threading.Thread(target=process_video, args=(model_path, video_source, event))
+    threads.append(thread)
+    thread.daemon = True
+    thread.start()
+
+    # Wait for the thread to complete
+    thread.join()
+    print("搭设线程运行结束")
+
+
+
+# Function to process video with YOLO model
+def process_video(model_path, video_source,start_event):
+    # Load YOLO model
+    model = YOLO(model_path)
+    
+    cap = cv2.VideoCapture(video_source)
+    
+    while cap.isOpened():
+        if stop_event.is_set():#控制停止推理
+        #del model
+            break
+    # Read a frame from the video
+        success, frame = cap.read()
+
+        if success:
+            # Run YOLOv8 inference on the frame
+            if cap.get(cv2.CAP_PROP_POS_FRAMES) % 25 != 0:
+                continue
+            
+            results = model.predict(frame,conf=0.6,verbose=False,task='obb')
+            LEVLEL0_REGION = np.array([[1167, 908], [931, 1153], [1962, 1187], [2034, 936]], np.int32)
+            LEVLEL1_REGION = np.array([[1163, 574], [859, 818], [1969, 828], [2060, 588]], np.int32)
+            LEVLEL2_REGION = np.array([[1147, 263], [778, 438], [1953, 442], [2044, 248]], np.int32)
+            LEVLEL3_REGION = np.array([[1142, 34], [793, 163], [1945, 112], [2050, 17]], np.int32)
+
+
+            DIAGONAL_REGION = np.array([[838, 147], [845, 1145], [1935, 1147], [1943, 147]], np.int32)
+            global platform_remove_steps_detect_num,remove_detection_status,platform_remove_steps_img
+            
+            # 每十秒归零 platform_remove_steps_detect_num
+            # current_time = time.time()
+            # if not hasattr(process_video, 'last_reset_time'):
+            #     process_video.last_reset_time = current_time
+            
+            # if current_time - process_video.last_reset_time >= 10:
+            #     platform_remove_steps_detect_num = [0] * 14
+            #     process_video.last_reset_time = current_time
+
+
+            for r in results:
+                boxes=r.obb.xyxyxyxy
+                confidences=r.obb.conf
+                classes=r.obb.cls
+                
+                # 0: montant
+                # 1: diagonal
+                # 2: wheel
+                # 3: vertical_bar
+                # 4: horizontal_bar
+                # 5: ladder
+                # 6: toe_board
+                # 7: scaffold
+
+
+                # 1=轮子
+                # 2=立杆
+                # 3=纵向扫地杆
+                # 4=横向扫地杆
+                # 5=纵向水平杆1
+                # 6=横向水平杆1
+                # 7=纵向水平杆2
+                # 8=横向水平杆2
+                # 9=斜撑
+                # 10=爬梯
+                # 11=脚手板
+                # 12=挡脚板
+                # 13=纵向水平杆3
+                # 14=横向水平杆3
+                platform_remove_steps = [0] * 14
+                for i in range(len(boxes)):
+                    confidence = confidences[i].item()
+                    cls = int(classes[i].item())
+                    label = model.names[cls]
+
+                    #print(boxes[i].tolist())
+                    box_coords = boxes[i].tolist()
+                    x_center = (box_coords[0][0] + box_coords[1][0]+box_coords[2][0]+box_coords[3][0]) / 4
+                    y_center=(box_coords[0][1] + box_coords[1][1]+box_coords[2][1]+box_coords[3][1]) / 4
+                    center_point = (int(x_center), int(y_center))
+                    if label=="wheel":#轮子
+                        platform_remove_steps_detect_num[0]+=1
+                        if platform_remove_steps_detect_num[0]>3:
+                            platform_remove_steps[0]+=1
+
+                    elif label=="montant":#立杆
+                        platform_remove_steps_detect_num[1]+=1
+                        if platform_remove_steps_detect_num[1]>3:
+                            platform_remove_steps[1]+=1
+                    elif label=="vertical_bar":#水平横杆,纵杆
+                        
+
+                        is_inside0 = cv2.pointPolygonTest(LEVLEL0_REGION.reshape((-1, 1, 2)), center_point, False)
+                        is_inside1 = cv2.pointPolygonTest(LEVLEL1_REGION.reshape((-1, 1, 2)), center_point, False)
+                        is_inside2 = cv2.pointPolygonTest(LEVLEL2_REGION.reshape((-1, 1, 2)), center_point, False)
+                        is_inside3 = cv2.pointPolygonTest(LEVLEL3_REGION.reshape((-1, 1, 2)), center_point, False)
+                        #print(is_inside)
+                        if is_inside0>=0 :
+                            platform_remove_steps_detect_num[2]+=1
+                            if platform_remove_steps_detect_num[2]>3:
+                                platform_remove_steps[2]+=1 #表示纵向扫地杆
+                        elif is_inside1>=0:
+                            platform_remove_steps_detect_num[4]+=1
+                            if platform_remove_steps_detect_num[4]>3:
+                                platform_remove_steps[4]+=1#5=纵向水平杆1
+                        elif is_inside2>=0:
+                            platform_remove_steps_detect_num[6]+=1
+                            if platform_remove_steps_detect_num[6]>3:
+                                platform_remove_steps[6]+=1#7=纵向水平杆2
+                        elif is_inside3>=0:
+                            platform_remove_steps_detect_num[12]+=1
+                            if platform_remove_steps_detect_num[12]>3:
+                                platform_remove_steps[12]+=1#13=纵向水平杆3
+                        
+
+                    elif label=="horizontal_bar":#水平纵杆
+
+                        is_inside0 = cv2.pointPolygonTest(LEVLEL0_REGION.reshape((-1, 1, 2)), center_point, False)
+                        is_inside1 = cv2.pointPolygonTest(LEVLEL1_REGION.reshape((-1, 1, 2)), center_point, False)
+                        is_inside2 = cv2.pointPolygonTest(LEVLEL2_REGION.reshape((-1, 1, 2)), center_point, False)
+                        is_inside3 = cv2.pointPolygonTest(LEVLEL3_REGION.reshape((-1, 1, 2)), center_point, False)
+                        #print(is_inside)
+                        if is_inside0>=0 :
+                            platform_remove_steps_detect_num[3]+=1
+                            if platform_remove_steps_detect_num[3]>3:
+                                platform_remove_steps[3]+=1#4=横向扫地杆
+                        elif is_inside1>=0:
+                            platform_remove_steps_detect_num[5]+=1
+                            if platform_remove_steps_detect_num[5]>3:
+                                platform_remove_steps[5]+=1#6=横向水平杆1
+                        elif is_inside2>=0:
+                            platform_remove_steps_detect_num[7]+=1
+                            if platform_remove_steps_detect_num[7]>3:
+                                platform_remove_steps[7]+=1# 8=横向水平杆2
+                        elif is_inside3>=0:
+                            platform_remove_steps_detect_num[13]+=1
+                            if platform_remove_steps_detect_num[13]>3:
+                                platform_remove_steps[13]+=1#14=横向水平杆3
+
+                    elif label=="diagonal":#斜撑
+
+                        is_inside = cv2.pointPolygonTest(DIAGONAL_REGION.reshape((-1, 1, 2)), center_point, False)
+                        if is_inside>=0:
+                            platform_remove_steps_detect_num[8]+=1
+                            if platform_remove_steps_detect_num[8]>3:
+                                platform_remove_steps[8]+=1# 9=斜撑
+                    
+
+                    elif label=="ladder":#梯子
+                        #10=爬梯
+                        platform_remove_steps_detect_num[9]+=1
+                        if platform_remove_steps_detect_num[9]>3:
+                            platform_remove_steps[9]+=1
+                        
+                    elif label=="scaffold":#脚手板
+                        #11=脚手板
+                        platform_remove_steps_detect_num[10]+=1
+                        if platform_remove_steps_detect_num[10]>3:
+                            platform_remove_steps[10]+=1
+
+
+                    elif label=="toe_board":#档脚板
+                        #12=挡脚板
+                        platform_remove_steps_detect_num[11]+=1
+                        if platform_remove_steps_detect_num[11]>3:
+                            platform_remove_steps[11]+=1
+
+
+                update_detection_status(platform_remove_steps)
+                check_timeout()#返回True表示10秒内未检测到某物体,表示该物体拆除完成
+
+                for i, status in reversed(list(enumerate(remove_detection_status))):
+                    if status and platform_remove_steps_img[i]==False:
+                        redis_client.rpush("platform_remove_order", i+1)
+                        platform_remove_steps_img[i]=True
+                        save_time=datetime.now().strftime('%Y%m%d_%H%M')
+                        imgpath = f"{SAVE_IMG_PATH}/platform_remove{i+1}_{save_time}.jpg"
+                        post_path= f"{POST_IMG_PATH4}/platform_remove{i+1}_{save_time}.jpg"
+                        annotated_frame = results[0].plot()
+                        cv2.imwrite(imgpath, annotated_frame)
+                        redis_client.set(f"platform_remove_{i+1}_img",post_path)
+                        #redis_client.set(f"platform_remove_{14-i-1}", "1")
+
+
+
+
+
+
+                start_event.set()          
+
+
+        else:
+            # Break the loop if the end of the video is reached
+            break
+
+    # Release the video capture object and close the display window
+    cap.release()
+    if torch.cuda.is_available():
+        torch.cuda.empty_cache()
+    del model
+
+
+
+        
+
+    

+ 297 - 0
platform_setup_detect.py

@@ -0,0 +1,297 @@
+import cv2
+import torch
+from shapely.geometry import box, Polygon
+import threading
+import numpy as np
+from datetime import datetime
+from ultralytics import YOLO
+
+from utils.tool import IoU
+from globals import stop_event,redis_client
+from config import SAVE_IMG_PATH,POST_IMG_PATH4,PLATFORM_SETUP_MODEL,PLATFORM_SETUP_VIDEO_SOURCES
+from globals import platform_setup_steps_detect_num,platform_setup_final_result,platform_setup_steps_img
+
+
+def init_platform_setup_detection():
+    global platform_setup_steps_detect_num,platform_setup_final_result,platform_setup_steps_img
+    platform_setup_final_result=[0]*14
+    platform_setup_steps_detect_num=[0]*14
+    platform_setup_steps_img=[False]*14
+    redis_client.delete("platform_setup_order")
+    for i in range(1, 14):
+        redis_client.delete(f"platform_setup_{i}")
+        redis_client.delete(f"platform_setup_{i}_img")
+
+def start_platform_setup_detection(start_events):
+    threads = []
+    model_path = PLATFORM_SETUP_MODEL
+    video_source = PLATFORM_SETUP_VIDEO_SOURCES
+    event = threading.Event()
+    start_events.append(event)
+    thread = threading.Thread(target=process_video, args=(model_path, video_source, event))
+    threads.append(thread)
+    thread.daemon = True
+    thread.start()
+
+    # Wait for the thread to complete
+    thread.join()
+    print("搭设线程运行结束")
+
+
+
+# Function to process video with YOLO model
+def process_video(model_path, video_source,start_event):
+    # Load YOLO model
+    model = YOLO(model_path)
+    
+    cap = cv2.VideoCapture(video_source)
+    
+    while cap.isOpened():
+        if stop_event.is_set():#控制停止推理
+        #del model
+            break
+    # Read a frame from the video
+        success, frame = cap.read()
+
+        if success:
+            # Run YOLOv8 inference on the frame
+            if cap.get(cv2.CAP_PROP_POS_FRAMES) % 25 != 0:
+                continue
+            
+            results = model.predict(frame,conf=0.6,verbose=False,task='obb')
+            LEVLEL0_REGION = np.array([[1167, 908], [931, 1153], [1962, 1187], [2034, 936]], np.int32)
+            LEVLEL1_REGION = np.array([[1163, 574], [859, 818], [1969, 828], [2060, 588]], np.int32)
+            LEVLEL2_REGION = np.array([[1147, 263], [778, 438], [1953, 442], [2044, 248]], np.int32)
+            LEVLEL3_REGION = np.array([[1142, 34], [793, 163], [1945, 112], [2050, 17]], np.int32)
+
+
+            DIAGONAL_REGION = np.array([[838, 147], [845, 1145], [1935, 1147], [1943, 147]], np.int32)
+            global platform_setup_steps_detect_num,platform_setup_final_result
+            
+            # 每十秒归零 platform_setup_steps_detect_num
+            # current_time = time.time()
+            # if not hasattr(process_video, 'last_reset_time'):
+            #     process_video.last_reset_time = current_time
+            
+            # if current_time - process_video.last_reset_time >= 10:
+            #     platform_setup_steps_detect_num = [0] * 14
+            #     process_video.last_reset_time = current_time
+
+
+            for r in results:
+                boxes=r.obb.xyxyxyxy
+                confidences=r.obb.conf
+                classes=r.obb.cls
+                
+                # 0: montant
+                # 1: diagonal
+                # 2: wheel
+                # 3: vertical_bar
+                # 4: horizontal_bar
+                # 5: ladder
+                # 6: toe_board
+                # 7: scaffold
+
+
+                # 1=轮子
+                # 2=立杆
+                # 3=纵向扫地杆
+                # 4=横向扫地杆
+                # 5=纵向水平杆1
+                # 6=横向水平杆1
+                # 7=纵向水平杆2
+                # 8=横向水平杆2
+                # 9=斜撑
+                # 10=爬梯
+                # 11=脚手板
+                # 12=挡脚板
+                # 13=纵向水平杆3
+                # 14=横向水平杆3
+                platform_setup_steps = [0] * 14
+                for i in range(len(boxes)):
+                    confidence = confidences[i].item()
+                    cls = int(classes[i].item())
+                    label = model.names[cls]
+
+                    #print(boxes[i].tolist())
+                    box_coords = boxes[i].tolist()
+                    x_center = (box_coords[0][0] + box_coords[1][0]+box_coords[2][0]+box_coords[3][0]) / 4
+                    y_center=(box_coords[0][1] + box_coords[1][1]+box_coords[2][1]+box_coords[3][1]) / 4
+                    center_point = (int(x_center), int(y_center))
+                    if label=="wheel":#轮子
+                        platform_setup_steps_detect_num[0]+=1
+                        if platform_setup_steps_detect_num[0]>3:
+                            platform_setup_steps[0]+=1
+
+                    elif label=="montant":#立杆
+                        platform_setup_steps_detect_num[1]+=1
+                        if platform_setup_steps_detect_num[1]>3:
+                            platform_setup_steps[1]+=1
+                    elif label=="vertical_bar":#水平横杆,纵杆
+                        
+
+                        is_inside0 = cv2.pointPolygonTest(LEVLEL0_REGION.reshape((-1, 1, 2)), center_point, False)
+                        is_inside1 = cv2.pointPolygonTest(LEVLEL1_REGION.reshape((-1, 1, 2)), center_point, False)
+                        is_inside2 = cv2.pointPolygonTest(LEVLEL2_REGION.reshape((-1, 1, 2)), center_point, False)
+                        is_inside3 = cv2.pointPolygonTest(LEVLEL3_REGION.reshape((-1, 1, 2)), center_point, False)
+                        #print(is_inside)
+                        if is_inside0>=0 :
+                            platform_setup_steps_detect_num[2]+=1
+                            if platform_setup_steps_detect_num[2]>3:
+                                platform_setup_steps[2]+=1 #表示纵向扫地杆
+                        elif is_inside1>=0:
+                            platform_setup_steps_detect_num[4]+=1
+                            if platform_setup_steps_detect_num[4]>3:
+                                platform_setup_steps[4]+=1#5=纵向水平杆1
+                        elif is_inside2>=0:
+                            platform_setup_steps_detect_num[6]+=1
+                            if platform_setup_steps_detect_num[6]>3:
+                                platform_setup_steps[6]+=1#7=纵向水平杆2
+                        elif is_inside3>=0:
+                            platform_setup_steps_detect_num[12]+=1
+                            if platform_setup_steps_detect_num[12]>3:
+                                platform_setup_steps[12]+=1#13=纵向水平杆3
+                        
+
+                    elif label=="horizontal_bar":#水平纵杆
+
+                        is_inside0 = cv2.pointPolygonTest(LEVLEL0_REGION.reshape((-1, 1, 2)), center_point, False)
+                        is_inside1 = cv2.pointPolygonTest(LEVLEL1_REGION.reshape((-1, 1, 2)), center_point, False)
+                        is_inside2 = cv2.pointPolygonTest(LEVLEL2_REGION.reshape((-1, 1, 2)), center_point, False)
+                        is_inside3 = cv2.pointPolygonTest(LEVLEL3_REGION.reshape((-1, 1, 2)), center_point, False)
+                        #print(is_inside)
+                        if is_inside0>=0 :
+                            platform_setup_steps_detect_num[3]+=1
+                            if platform_setup_steps_detect_num[3]>3:
+                                platform_setup_steps[3]+=1#4=横向扫地杆
+                        elif is_inside1>=0:
+                            platform_setup_steps_detect_num[5]+=1
+                            if platform_setup_steps_detect_num[5]>3:
+                                platform_setup_steps[5]+=1#6=横向水平杆1
+                        elif is_inside2>=0:
+                            platform_setup_steps_detect_num[7]+=1
+                            if platform_setup_steps_detect_num[7]>3:
+                                platform_setup_steps[7]+=1# 8=横向水平杆2
+                        elif is_inside3>=0:
+                            platform_setup_steps_detect_num[13]+=1
+                            if platform_setup_steps_detect_num[13]>3:
+                                platform_setup_steps[13]+=1#14=横向水平杆3
+
+                    elif label=="diagonal":#斜撑
+
+                        is_inside = cv2.pointPolygonTest(DIAGONAL_REGION.reshape((-1, 1, 2)), center_point, False)
+                        if is_inside>=0:
+                            platform_setup_steps_detect_num[8]+=1
+                            if platform_setup_steps_detect_num[8]>3:
+                                platform_setup_steps[8]+=1# 9=斜撑
+                    
+
+                    elif label=="ladder":#梯子
+                        #10=爬梯
+                        platform_setup_steps_detect_num[9]+=1
+                        if platform_setup_steps_detect_num[9]>3:
+                            platform_setup_steps[9]+=1
+                        
+                    elif label=="scaffold":#脚手板
+                        #11=脚手板
+                        platform_setup_steps_detect_num[10]+=1
+                        if platform_setup_steps_detect_num[10]>3:
+                            platform_setup_steps[10]+=1
+
+
+                    elif label=="toe_board":#档脚板
+                        #12=挡脚板
+                        platform_setup_steps_detect_num[11]+=1
+                        if platform_setup_steps_detect_num[11]>3:
+                            platform_setup_steps[11]+=1
+
+                #print(platform_setup_steps)
+                #print('----------------------')
+                # Print the platform setup steps
+                for i, nums in enumerate(platform_setup_steps):
+                    label = ""
+                    if nums>0:
+                        if all(not redis_client.exists(f"platform_setup_{j}") for j in range(i+2, len(platform_setup_steps))):
+                            if redis_client.exists(f"platform_setup_{i+1}"):
+                                redis_client.set(f"platform_setup_{i+1}", str(nums))
+                                platform_setup_final_result[i] = nums
+                            else:
+                                redis_client.set(f"platform_setup_{i+1}", str(nums))
+                                platform_setup_final_result[i] = nums
+                                redis_client.rpush("platform_setup_order", f"platform_setup_{i+1}")
+                            if i==13:
+                                save_time=datetime.now().strftime('%Y%m%d_%H%M')
+                                imgpath = f"{SAVE_IMG_PATH}/platform_setup{i+1}_{save_time}.jpg"
+                                post_path= f"{POST_IMG_PATH4}/platform_setup{i+1}_{save_time}.jpg"
+                                annotated_frame = results[0].plot()
+                                cv2.imwrite(imgpath, annotated_frame)
+                                redis_client.set(f"platform_setup_{i+1}_img",post_path)
+
+                        elif not platform_setup_steps_img[i]:
+                                save_time=datetime.now().strftime('%Y%m%d_%H%M')
+                                imgpath = f"{SAVE_IMG_PATH}/platform_setup{i+1}_{save_time}.jpg"
+                                post_path= f"{POST_IMG_PATH4}/platform_setup{i+1}_{save_time}.jpg"
+                                annotated_frame = results[0].plot()
+                                cv2.imwrite(imgpath, annotated_frame)
+                                redis_client.set(f"platform_setup_{i+1}_img",post_path)
+                                platform_setup_steps_img[i]=True
+
+                    # if nums > 0 and not redis_client.exists(f"platform_setup_{i+2}"):
+                    #     if redis_client.exists(f"platform_setup_{i+1}"):
+                    #         if all(platform_setup_final_result[j] == 0 for j in range(i+1, len(platform_setup_final_result))):#当前步骤检测到,后面的步骤都没有检测到
+                    #             redis_client.set(f"platform_setup_{i+1}", str(nums))
+                    #             platform_setup_final_result[i] = nums
+                    #         else:
+                    #             save_time=datetime.now().strftime('%Y%m%d_%H%M')
+                    #             imgpath = f"{SAVE_IMG_PATH}/platform_setup{i}_{save_time}.jpg"
+                    #             post_path= f"{POST_IMG_PATH4}/platform_setup{i}_{save_time}.jpg"
+                    #             annotated_frame = results[0].plot()
+                    #             cv2.imwrite(imgpath, annotated_frame)
+                    #             redis_client.set(f"platform_setup_{i}_img",post_path)
+                    #         if i==13:
+                    #             save_time=datetime.now().strftime('%Y%m%d_%H%M')
+                    #             imgpath = f"{SAVE_IMG_PATH}/platform_setup{i}_{save_time}.jpg"
+                    #             post_path= f"{POST_IMG_PATH4}/platform_setup{i}_{save_time}.jpg"
+                    #             annotated_frame = results[0].plot()
+                    #             cv2.imwrite(imgpath, annotated_frame)
+                    #             redis_client.set(f"platform_setup_{i}_img",post_path)
+                    #     else:
+                    #         redis_client.set(f"platform_setup_{i+1}", str(nums))
+                    #         platform_setup_final_result[i] = nums
+                    #         redis_client.rpush("platform_setup_order", f"platform_setup_{i+1}")
+                    #         if i != 0:                            
+                    #             save_time=datetime.now().strftime('%Y%m%d_%H%M')
+                    #             imgpath = f"{SAVE_IMG_PATH}/platform_setup{i}_{save_time}.jpg"
+                    #             post_path= f"{POST_IMG_PATH4}/platform_setup{i}_{save_time}.jpg"
+                    #             annotated_frame = results[0].plot()
+                    #             # Draw the polygon on the frame
+                    #             # cv2.polylines(annotated_frame, [LEVLEL3_REGION], True, (0, 255, 0), 5)
+                    #             # cv2.polylines(annotated_frame, [LEVLEL2_REGION], True, (0, 255, 0), 5)
+                    #             # cv2.polylines(annotated_frame, [LEVLEL1_REGION], True, (0, 255, 0), 5)
+                    #             # cv2.polylines(annotated_frame, [LEVLEL0_REGION], True, (0, 255, 0), 5)
+
+                    #             cv2.imwrite(imgpath, annotated_frame)
+                    #             redis_client.set(f"platform_setup_{i}_img",post_path)
+
+
+
+
+
+                start_event.set()          
+
+
+        else:
+            # Break the loop if the end of the video is reached
+            break
+
+    # Release the video capture object and close the display window
+    cap.release()
+    if torch.cuda.is_available():
+        torch.cuda.empty_cache()
+    del model
+
+
+
+        
+
+    

+ 130 - 0
platform_wearing_detect.py

@@ -0,0 +1,130 @@
+import time
+import torch
+import cv2
+import threading
+from datetime import datetime
+from ultralytics import YOLO
+from globals import stop_event,redis_client
+from config import SAVE_IMG_PATH,POST_IMG_PATH3,PLATFORM_WEARING_MODEL,PLATFORM_WEARING_VIDEO_SOURCES
+
+
+
+def init_wearing_detection():
+    redis_client.set("platform_wearing_human_in_postion",'False')
+    redis_client.delete("platform_wearing_items_nums")
+    redis_client.delete("platform_wearing_detection_img")
+    redis_client.set("platform_wearing_detection_img_flag",'False')
+
+def start_wearing_detection(start_events):
+        # Create threads for each video stream and model
+    threads = []
+    for model_path in PLATFORM_WEARING_MODEL:
+        event = threading.Event()
+        start_events.append(event)
+        thread = threading.Thread(target=process_video, args=(model_path,PLATFORM_WEARING_VIDEO_SOURCES,event))
+        threads.append(thread)
+        thread.daemon=True
+        thread.start()
+
+
+    # Wait for all threads to complete
+    for thread in threads:
+        thread.join()
+
+def process_video(model_path, video_source, start_event):
+
+    
+    model = YOLO(model_path)
+    cap = cv2.VideoCapture(video_source)
+    while cap.isOpened():
+    # Read a frame from the video
+        success, frame = cap.read()
+        
+        if stop_event.is_set():#控制停止推理
+            break
+
+        if success:
+            
+            if cap.get(cv2.CAP_PROP_POS_FRAMES) % 10 != 0:#跳帧检测,
+                continue
+
+            x, y, w, h = 650, 0, 980, 1440#剪裁画面的中心区域
+
+            # Crop the frame to the ROI
+            frame = frame[y:y+h, x:x+w]
+            # Run YOLOv8 inference on the frame
+            if model_path==PLATFORM_WEARING_MODEL[0]:#yolov8n,专门用来检测人
+                #model.classes = [0]#设置只检测人一个类别
+                results = model.predict(frame,conf=0.6,verbose=False,classes=[0])#这里的results是一个生成器
+
+                for r in results:
+
+                    ##下面这些都是tensor类型
+                    boxes = r.boxes.xyxy  # 提取所有检测到的边界框坐标
+                    confidences = r.boxes.conf  # 提取所有检测到的置信度
+                    classes = r.boxes.cls  # 提取所有检测到的类别索引
+
+                    
+                    for i in range(len(boxes)):
+                        confidence = confidences[i].item()
+                        cls = int(classes[i].item())
+                        label = model.names[cls]
+                        
+                        if label=="person" and redis_client.get("platform_wearing_human_in_postion")=='False':
+                            redis_client.set("platform_wearing_human_in_postion",'True')
+
+                    start_event.set()  
+
+
+            if model_path==PLATFORM_WEARING_MODEL[1]:
+                results = model.predict(frame,conf=0.6,verbose=False)
+                for r in results:
+                    boxes=r.obb.xyxyxyxy
+                    confidences=r.obb.conf
+                    classes=r.obb.cls
+                
+                    wearing_items={"belt" :0,
+                            'helmet': 0,
+                            'shoes': 0
+                    }
+
+                    for i in range(len(boxes)):
+                        confidence = confidences[i].item()
+                        cls = int(classes[i].item())
+                        label = model.names[cls]
+
+                        wearing_items[label] += 1
+
+                    
+                    #因为安全带检测有四个标签,当检测到两个及以上的时候,就认为有安全带
+                    wearing_items["belt"] = 1 if wearing_items["belt"] > 2 else 0
+
+                    wearing_items_nums = [wearing_items["belt"],  wearing_items["helmet"], wearing_items["shoes"]]
+                    if redis_client.exists("platform_wearing_items_nums"):
+                        redis_client.delete("platform_wearing_items_nums")
+                    redis_client.rpush("platform_wearing_items_nums", *wearing_items_nums)
+
+
+                    if redis_client.get("platform_wearing_detection_img_flag")=='True' and not redis_client.exists("platform_wearing_detection_img"):
+                        save_time=datetime.now().strftime('%Y%m%d_%H%M')
+                        imgpath = f"{SAVE_IMG_PATH}/platform_wearing_detection_{save_time}.jpg"
+                        post_path= f"{POST_IMG_PATH3}/platform_wearing_detection_{save_time}.jpg"
+                        annotated_frame = results[0].plot()
+                        cv2.imwrite(imgpath, annotated_frame)
+                        redis_client.set("platform_wearing_detection_img",post_path)
+
+
+                    start_event.set()    
+
+
+        else:
+            # Break the loop if the end of the video is reached
+            break
+
+        # Release the video capture object and close the display window
+    cap.release()    
+    if torch.cuda.is_available():
+        torch.cuda.empty_cache()
+    del model
+
+

+ 374 - 0
welding_exam_detect.py

@@ -0,0 +1,374 @@
+import cv2
+import torch
+from shapely.geometry import box, Polygon
+import threading
+from datetime import datetime
+from ultralytics import YOLO
+
+from config import WELDING_MODEL_PATHS,WELDING_VIDEO_SOURCES
+from utils.tool import IoU
+from globals import stop_event,redis_client,lock
+from config import WELDING_CH1_RTSP,WELDING_CH2_RTSP,WELDING_CH3_RTSP,WELDING_CH4_RTSP,WELDING_CH5_RTSP
+from config import  SAVE_IMG_PATH,POST_IMG_PATH2,WELDING_REGION1,WELDING_REGION2,WELDING_REGION3
+from globals import steps
+from globals import oil_barrel,main_switch,grounding_wire,welding_machine_switch,welding_components,mask,welding,gloves,sweep,sweep_detect_num,welding_detect_num
+
+
+def init_welding_detection():
+    global steps
+    steps = [False] * 13
+    redis_client.delete("welding_post_path")
+
+def start_welding_detection(start_events):
+    threads = []
+    for model_path, video_source in zip(WELDING_MODEL_PATHS, WELDING_VIDEO_SOURCES):
+        event = threading.Event()
+        start_events.append(event)
+        thread = threading.Thread(target=process_video, args=(model_path, video_source,event))
+        threads.append(thread)
+        thread.daemon=True
+        thread.start()
+
+    # Wait for all threads to complete
+    for thread in threads:
+        thread.join()
+        print("焊接子线程运行结束")
+
+
+
+# Function to process video with YOLO model
+def process_video(model_path, video_source,start_event):
+    # Load YOLO model
+    model = YOLO(model_path)
+    
+    cap = cv2.VideoCapture(video_source)
+    
+    while cap.isOpened():
+        if stop_event.is_set():#控制停止推理
+        #del model
+            break
+    # Read a frame from the video
+        success, frame = cap.read()
+
+        if success:
+            # Run YOLOv8 inference on the frame
+            if cap.get(cv2.CAP_PROP_POS_FRAMES) % 25 != 0:
+                continue
+            
+            if video_source == WELDING_CH2_RTSP or video_source == WELDING_CH4_RTSP:#这两个视频流用的分类模型,因为分类模型预处理较慢,需要手动resize
+                frame=cv2.resize(frame,(640,640))
+            
+            results = model.predict(frame,verbose=False,conf=0.4)
+
+            global steps
+            global oil_barrel,main_switch,grounding_wire,welding_machine_switch,welding_components,mask,welding,gloves,sweep
+            global sweep_detect_num,welding_detect_num
+
+            for r in results:
+
+                if video_source == WELDING_CH2_RTSP:#焊台
+                    if r.probs.top1conf>0.8:
+                        label=model.names[r.probs.top1]
+                        if label=='component':
+                            welding_components='in_position'#在焊台上
+                        if label=='empty':
+                            welding_components='not_in_position'#不在焊台上
+                        if label=='welding':
+                            if welding_detect_num<3:
+                                welding_detect_num+=1
+                            else:
+                                welding=True#表示有焊接
+                        if label=='sweep':
+                            if sweep_detect_num<3:
+                                sweep_detect_num+=1
+                            else:
+                                sweep=True#表示有打扫
+                    else:
+                        continue
+
+
+                #if video_source == WELDING_CH3_RTSP:#油桶
+                if video_source == WELDING_CH4_RTSP:#总开关
+                    if r.probs.top1conf>0.8:
+                        label=model.names[r.probs.top1]#获取最大概率的类别的label
+                        main_switch = "open" if label == "open" else "close"
+                    else:
+                        continue   
+
+
+                
+                if video_source == WELDING_CH1_RTSP or video_source==WELDING_CH3_RTSP or video_source==WELDING_CH5_RTSP:#焊接操作,进行目标检测
+                    ##下面这些都是tensor类型
+                    boxes = r.boxes.xyxy  # 提取所有检测到的边界框坐标
+                    confidences = r.boxes.conf  # 提取所有检测到的置信度
+                    classes = r.boxes.cls  # 提取所有检测到的类别索引
+
+
+                    # if video_source==WELDING_CH5_RTSP:
+                    #     grounding_wire=="disconnect"##单独每次设置为false,是为了防止没有检测到
+                        #welding_components=False
+
+                    if video_source==WELDING_CH3_RTSP:#当画面没有油桶时,给个初值为安全
+                        oil_barrel="safe"
+
+                    for i in range(len(boxes)):
+                        x1, y1, x2, y2 = boxes[i].tolist()
+                        confidence = confidences[i].item()
+                        cls = int(classes[i].item())
+                        label = model.names[cls]
+                        
+                        if label=="dump":#检测油桶
+                            x_center = (x1 + x2) / 2
+                            y_center = (y1 + y2) / 2
+                            center_point = (int(x_center), int(y_center))
+                            is_inside = cv2.pointPolygonTest(WELDING_REGION2.reshape((-1, 1, 2)), center_point, False)
+                            #print(is_inside)
+                            if is_inside>=0 :
+                                oil_barrel='danger' #表示油桶在危险区域
+                            else:
+                                oil_barrel='safe' 
+
+                        if label== "open" or "close":#检测焊机开关
+                            welding_machine_switch = label
+
+                        if label=="grounding_wire" :
+                            if confidence<0.6:
+                                continue
+                            rect_shapely = box(x1,y1, x2, y2)#使用shapely库创建的矩形
+                            WELDING_REGION3_shapely = Polygon(WELDING_REGION3.tolist()) #shapely计算矩形检测框和多边形的iou使用
+                            intersection = rect_shapely.intersection(WELDING_REGION3_shapely)
+                            # 计算并集
+                            union = rect_shapely.union(WELDING_REGION3_shapely)
+                            # 计算 IoU
+                            iou = intersection.area / union.area
+
+                            grounding_wire="connect" if iou>0 else "disconnect" #表示搭铁线连接在焊台上    
+
+                        # if label=="welding_components" :
+                        #     welding_components_xyxy=boxes[i].tolist()#实时检测焊件的位置
+                        #     # 计算检测框的中心点
+                        #     x_center = (x1 + x2) / 2
+                        #     y_center = (y1 + y2) / 2
+                        #     center_point = (int(x_center), int(y_center))
+                        #     # 检查中心点是否在多边形内
+                        #     is_inside = cv2.pointPolygonTest(REGION4.reshape((-1, 1, 2)), center_point, False)
+                        #     welding_components=True if is_inside>=0 else False #表示在焊料在焊台上
+
+                        
+                        if label=="mask":
+                            #mask=True #表示戴面罩
+                            iou=IoU(boxes[i].tolist(),WELDING_REGION1)
+                            mask=True if iou>0 else False #表示戴面罩
+
+
+                        if label=="gloves":
+                            #gloves_xyxy=boxes[i].tolist()#实时检测手套的位置
+                            if confidence>0.5:
+                                gloves=True#表示戴手套
+                            else:
+                                gloves=False
+
+                        # if label=="welding" :
+                        #     iou1=IoU(gloves_xyxy,boxes[i].tolist())#自定义的矩形iou方法,焊枪跟手套进行iou计算
+                        #     iou2=IoU(welding_components_xyxy,boxes[i].tolist())#自定义的矩形iou方法,焊枪跟焊件进行iou计算
+                        #     if iou1>0 and iou2>0:
+                        #         gloves=True#表示有手套焊接
+                        #     if iou1<=0 and iou2>0:
+                        #         welding=True#表示无手套焊接
+                        
+                        # if label=="sweep" :
+                        #     # 计算检测框的中心点
+                        #     x_center = (x1 + x2) / 2
+                        #     y_center = (y1 + y2) / 2
+                        #     center_point = (int(x_center), int(y_center))
+                        #     # 检查中心点是否在多边形内
+                        #     is_inside = cv2.pointPolygonTest(REGION4.reshape((-1, 1, 2)), center_point, False)
+                        #     sweep=True if is_inside>=0 else False #表示是否打扫
+                
+                if video_source ==WELDING_CH3_RTSP:
+                    if oil_barrel=="safe" and steps[0]==False:#排除危险源
+                        steps[0]=True
+                        save_time=datetime.now().strftime('%Y%m%d_%H%M')
+                        #save_time=datetime.now().strftime('%Y%m%d_%H')
+                        img_path = f"{SAVE_IMG_PATH}/step1_{save_time}.jpg"
+                        post_path = f"{POST_IMG_PATH2}/step1_{save_time}.jpg"
+                        redis_client.rpush("welding_post_path",post_path)
+                        annotated_frame = results[0].plot()
+                        cv2.imwrite(img_path, annotated_frame)
+                        print("step1完成")
+                        #post("1",post_path)
+                    
+                if video_source==WELDING_CH4_RTSP:
+                    if main_switch=="open" and steps[1]==False:
+                        steps[1]=True
+                        save_time=datetime.now().strftime('%Y%m%d_%H%M')
+                        #save_time=datetime.now().strftime('%Y%m%d_%H')
+                        img_path = f"{SAVE_IMG_PATH}/step2_{save_time}.jpg"
+                        post_path = f"{POST_IMG_PATH2}/step2_{save_time}.jpg"
+                        redis_client.rpush("welding_post_path",post_path)
+                        annotated_frame = results[0].plot()
+                        cv2.imwrite(img_path, annotated_frame)
+                        print("step2完成")
+                        #post("2",post_path)
+
+                    if main_switch=="close" and steps[12]==False and steps[1]:
+                        steps[12]=True
+                        save_time=datetime.now().strftime('%Y%m%d_%H%M')
+                        #save_time=datetime.now().strftime('%Y%m%d_%H')
+                        img_path = f"{SAVE_IMG_PATH}/step13_{save_time}.jpg"
+                        post_path = f"{POST_IMG_PATH2}/step13_{save_time}.jpg"
+                        redis_client.rpush("welding_post_path",post_path)
+                        annotated_frame = results[0].plot()
+                        cv2.imwrite(img_path, annotated_frame)
+                        print("step13完成")
+                        #post("13",post_path)
+                    
+                if video_source==WELDING_CH1_RTSP:
+                    if welding_machine_switch=="open" and steps[4]==False:
+                        steps[4]=True
+                        save_time=datetime.now().strftime('%Y%m%d_%H%M')
+                        #save_time=datetime.now().strftime('%Y%m%d_%H')
+                        img_path = f"{SAVE_IMG_PATH}/step5_{save_time}.jpg"
+                        post_path = f"{POST_IMG_PATH2}/step5_{save_time}.jpg"
+                        redis_client.rpush("welding_post_path",post_path)
+                        annotated_frame = results[0].plot()
+                        cv2.imwrite(img_path, annotated_frame)
+                        print("step5完成")
+                        #post("5",post_path)
+                    
+                    if welding_machine_switch=="close" and steps[8]==False and steps[4]:
+                        steps[8]=True
+                        save_time=datetime.now().strftime('%Y%m%d_%H%M')
+                        #save_time=datetime.now().strftime('%Y%m%d_%H')
+                        img_path = f"{SAVE_IMG_PATH}/step9_{save_time}.jpg"
+                        post_path = f"{POST_IMG_PATH2}/step9_{save_time}.jpg"
+                        redis_client.rpush("welding_post_path",post_path)
+                        annotated_frame = results[0].plot()
+                        cv2.imwrite(img_path, annotated_frame)
+                        print("step9完成")
+                        #post("9",post_path)
+                    
+                if video_source==WELDING_CH2_RTSP:
+                    if sweep==True and steps[11]==False:#打扫
+                        steps[11]=True
+                        save_time=datetime.now().strftime('%Y%m%d_%H%M')
+                        #save_time=datetime.now().strftime('%Y%m%d_%H')
+                        img_path = f"{SAVE_IMG_PATH}/step12_{save_time}.jpg"
+                        post_path = f"{POST_IMG_PATH2}/step12_{save_time}.jpg"
+                        redis_client.rpush("welding_post_path",post_path)
+                        annotated_frame = results[0].plot()
+                        cv2.imwrite(img_path, annotated_frame)
+                        print("step12完成")
+                        #post("12",post_path)
+                    
+                    if welding_components=='in_position' and steps[3]==False:
+                        steps[3]=True
+                        save_time=datetime.now().strftime('%Y%m%d_%H%M')
+                        #save_time=datetime.now().strftime('%Y%m%d_%H')
+                        img_path = f"{SAVE_IMG_PATH}/step4_{save_time}.jpg"
+                        post_path = f"{POST_IMG_PATH2}/step4_{save_time}.jpg"
+                        redis_client.rpush("welding_post_path",post_path)
+                        annotated_frame = results[0].plot()
+                        cv2.imwrite(img_path, annotated_frame)
+                        print("step4完成")
+                        #post("4",post_path)
+
+                    if welding_components=='not_in_position' and steps[10]==False and steps[3]:
+                        steps[10]=True
+                        save_time=datetime.now().strftime('%Y%m%d_%H%M')
+                        #save_time=datetime.now().strftime('%Y%m%d_%H')
+                        img_path = f"{SAVE_IMG_PATH}/step11_{save_time}.jpg"
+                        post_path = f"{POST_IMG_PATH2}/step11_{save_time}.jpg"
+                        redis_client.rpush("welding_post_path",post_path)
+                        annotated_frame = results[0].plot()
+                        cv2.imwrite(img_path, annotated_frame)
+                        print("step11完成")
+                        #post("11",post_path)
+                    
+                    if welding==True and steps[6]==False:
+                        steps[6]=True
+                        save_time=datetime.now().strftime('%Y%m%d_%H%M')
+                        #save_time=datetime.now().strftime('%Y%m%d_%H')
+                        img_path = f"{SAVE_IMG_PATH}/step7_{save_time}.jpg"
+                        post_path = f"{POST_IMG_PATH2}/step7_{save_time}.jpg"
+                        redis_client.rpush("welding_post_path",post_path)
+                        annotated_frame = results[0].plot()
+                        cv2.imwrite(img_path, annotated_frame)
+                        print("step7完成")
+                        #post("8",post_path)
+
+
+                if video_source==WELDING_CH5_RTSP:
+                    if gloves==True and steps[7]==False:
+                        steps[7]=True
+                        save_time=datetime.now().strftime('%Y%m%d_%H%M')
+                        #save_time=datetime.now().strftime('%Y%m%d_%H')
+                        img_path = f"{SAVE_IMG_PATH}/step8_{save_time}.jpg"
+                        post_path = f"{POST_IMG_PATH2}/step8_{save_time}.jpg"
+                        redis_client.rpush("welding_post_path",post_path)
+                        annotated_frame = results[0].plot()
+                        cv2.imwrite(img_path, annotated_frame)
+                        print("step8完成")
+                        #post("7",post_path)
+
+                    if grounding_wire=="connect" and steps[2]==False:
+                        steps[2]=True
+                        save_time=datetime.now().strftime('%Y%m%d_%H%M')
+                        #save_time=datetime.now().strftime('%Y%m%d_%H')
+                        img_path = f"{SAVE_IMG_PATH}/step3_{save_time}.jpg"
+                        post_path = f"{POST_IMG_PATH2}/step3_{save_time}.jpg"
+                        redis_client.rpush("welding_post_path",post_path)
+                        annotated_frame = results[0].plot()
+                        cv2.imwrite(img_path, annotated_frame)
+                        print("step3完成")
+                        #post("3",post_path)
+                            
+                    if grounding_wire=="disconnect" and steps[9]==False and steps[2]:
+                        steps[9]=True
+                        save_time=datetime.now().strftime('%Y%m%d_%H%M')
+                        #save_time=datetime.now().strftime('%Y%m%d_%H')
+                        img_path = f"{SAVE_IMG_PATH}/step10_{save_time}.jpg"
+                        post_path = f"{POST_IMG_PATH2}/step10_{save_time}.jpg"
+                        redis_client.rpush("welding_post_path",post_path)
+                        annotated_frame = results[0].plot()
+                        cv2.imwrite(img_path, annotated_frame)
+                        print("step10完成")
+                        #post("10",post_path)
+                    
+                            
+                    if mask==True and steps[5]==False:
+                        steps[5]=True
+                        save_time=datetime.now().strftime('%Y%m%d_%H%M')
+                        #save_time=datetime.now().strftime('%Y%m%d_%H')
+                        img_path = f"{SAVE_IMG_PATH}/step6_{save_time}.jpg"
+                        post_path = f"{POST_IMG_PATH2}/step6_{save_time}.jpg"
+                        redis_client.rpush("welding_post_path",post_path)
+                        annotated_frame = results[0].plot()
+                        cv2.imwrite(img_path, annotated_frame)
+                        print("step6完成")
+                        #post("6",post_path)
+            
+            
+                start_event.set()          
+                # Display the annotated frame
+            # cv2.imshow("YOLOv8 Inference", results[0].plot())
+
+            # # Break the loop if 'q' is pressed
+            # if cv2.waitKey(1) & 0xFF == ord("q"):
+            #     break
+
+        else:
+            # Break the loop if the end of the video is reached
+            break
+
+    # Release the video capture object and close the display window
+    cap.release()
+    if torch.cuda.is_available():
+        torch.cuda.empty_cache()
+    del model
+
+
+
+        
+
+    

+ 252 - 0
welding_reset_detect.py

@@ -0,0 +1,252 @@
+import cv2
+import torch
+import threading
+from shapely.geometry import box, Polygon
+
+from datetime import datetime
+from ultralytics import YOLO
+
+from config import WELDING_MODEL_PATHS,WELDING_VIDEO_SOURCES
+from config import WELDING_CH1_RTSP,WELDING_CH2_RTSP,WELDING_CH3_RTSP,WELDING_CH4_RTSP,WELDING_CH5_RTSP
+from config import SAVE_IMG_PATH,POST_IMG_PATH2,WELDING_REGION2,WELDING_REGION3
+from globals import oil_barrel_flag,main_switch_flag,ground_wire_flag,welding_components_flag,welding_machine_switch_flag
+from globals import lock,redis_client,stop_event
+
+
+def init_rest_detection():
+    redis_client.delete("welding_reset_post_path")#删除该列表welding_reset_post_path
+    
+    redis_client.set("welding_main_switch_save_img",'False')
+    redis_client.set("welding_oil_barrel_save_img",'False')
+    redis_client.set("welding_ground_wire_save_img",'False')
+    redis_client.set("welding_components_save_img",'False')
+    redis_client.set("welding_machine_switch_save_img",'False')
+
+
+def start_reset_detection(start_events):
+        # Create threads for each video stream and model
+    threads = []
+    for model_path, video_source in zip(WELDING_MODEL_PATHS, WELDING_VIDEO_SOURCES):
+        event = threading.Event()
+        start_events.append(event)
+        thread = threading.Thread(target=process_video, args=(model_path, video_source,event))
+        threads.append(thread)
+        thread.daemon=True
+        thread.start()
+
+
+    # Wait for all threads to complete
+    for thread in threads:
+        thread.join()
+        print("复位检测子线程结束")
+
+# Function to process video with YOLO model
+def process_video(model_path, video_source, start_event):
+    # Load YOLO model
+    model = YOLO(model_path)
+    #results = model.predict(video_source,stream=True,verbose=False,conf=0.4,device='0')#这里的results是一个生成器
+    cap = cv2.VideoCapture(video_source)
+    # Loop through the video frames
+    while cap.isOpened():
+        if stop_event.is_set():#控制停止推理
+            break
+        # Read a frame from the video
+        success, frame = cap.read()
+
+        if success:
+            if cap.get(cv2.CAP_PROP_POS_FRAMES) % 25 != 0:#跳帧检测,
+                continue
+
+            if video_source == WELDING_CH2_RTSP or video_source == WELDING_CH4_RTSP:#这两个视频流用的分类模型,因为分类模型预处理较慢,需要手动resize
+                frame=cv2.resize(frame,(640,640))
+
+            # Run YOLOv8 inference on the frame
+            results = model.predict(frame,verbose=False,conf=0.4)
+
+            global oil_barrel_flag,main_switch_flag,ground_wire_flag,welding_components_flag,welding_machine_switch_flag
+
+    #with lock:
+            for r in results:
+
+
+                #因为搭铁线不好识别,所以当没有检测到时,ground_wire_flag=False
+                if video_source == WELDING_CH2_RTSP:
+                    # annotated_frame = results[0].plot()
+                    # cv2.namedWindow('main_switch', cv2.WINDOW_NORMAL)
+                    # cv2.imshow('main_switch', annotated_frame)
+                    # if cv2.waitKey(1) & 0xFF == ord("q"):
+                    #     break  
+
+                    if r.probs.top1conf>0.6:
+                        label=model.names[r.probs.top1]
+                        
+                        welding_components_flag=True if label == "component" else False
+                    else:
+                        continue
+
+                if video_source == WELDING_CH4_RTSP:
+                    if r.probs.top1conf>0.6:
+                        label=model.names[r.probs.top1]#获取最大概率的类别的label
+                        
+                        main_switch_flag = True if label == "open" else False
+                    else:
+                        continue
+
+                if video_source == WELDING_CH5_RTSP or video_source == WELDING_CH3_RTSP or video_source == WELDING_CH1_RTSP:
+                    ##下面这些都是tensor类型
+                    boxes = r.boxes.xyxy  # 提取所有检测到的边界框坐标
+                    confidences = r.boxes.conf  # 提取所有检测到的置信度
+                    classes = r.boxes.cls  # 提取所有检测到的类别索引
+
+                    if video_source == WELDING_CH5_RTSP:
+                        ground_wire_flag=False
+                        #welding_components_flag=False
+                    if video_source == WELDING_CH3_RTSP:
+                        oil_barrel_flag=True
+
+                    for i in range(len(boxes)):
+                        x1, y1, x2, y2 = boxes[i].tolist()
+                        confidence = confidences[i].item()
+                        cls = int(classes[i].item())
+                        label = model.names[cls]
+
+                        if label=="dump":#检测油桶
+                            x_center = (x1 + x2) / 2
+                            y_center = (y1 + y2) / 2
+                            center_point = (int(x_center), int(y_center))
+                            is_inside = cv2.pointPolygonTest(WELDING_REGION2.reshape((-1, 1, 2)), center_point, False)
+                            #print(is_inside)
+                            
+                            if is_inside>=0 :
+                                oil_barrel_flag=False #表示油桶在危险区域
+                            else:
+                                oil_barrel_flag=True 
+
+                        
+                        if label== "open":#检测焊机开关
+                            welding_machine_switch_flag = True
+
+
+
+                        if label=="close":#检测焊机开关
+                            welding_machine_switch_flag = False
+
+
+                        if label=="grounding_wire" :
+                            rect_shapely = box(x1,y1, x2, y2)#使用shapely库创建的矩形
+                            WELDING_REGION3_shapely = Polygon(WELDING_REGION3.tolist()) #shapely计算矩形检测框和多边形的iou使用
+                            intersection = rect_shapely.intersection(WELDING_REGION3_shapely)
+                                    # 计算并集
+                            union = rect_shapely.union(WELDING_REGION3_shapely)
+                                    # 计算 IoU
+                            iou = intersection.area / union.area
+
+
+                            if iou>0 :
+                                ground_wire_flag=True #表示搭铁线连接在焊台上
+                            else:
+                                ground_wire_flag=False #表示未连接上
+
+                flag_count = sum([oil_barrel_flag, main_switch_flag, ground_wire_flag, welding_components_flag, welding_machine_switch_flag])
+                redis_client.set("welding_reset_flag",flag_count)
+                #print("主开关",main_switch_flag)
+
+                if video_source == WELDING_CH4_RTSP :#检测到总开关
+                        if main_switch_flag and redis_client.get("welding_main_switch_save_img")=='False':
+                            print("当前总开关没有复位")##焊前检查只保存一次
+                            redis_client.set("welding_main_switch_save_img",'True')
+                            #save_time=datetime.now().strftime('%Y%m%d_%H%M')
+                            save_time=datetime.now().strftime('%Y%m%d_%H')
+                            imgpath = f"{SAVE_IMG_PATH}/welding_resetStep2_{save_time}.jpg"
+                            post_path = f"{POST_IMG_PATH2}/welding_resetStep2_{save_time}.jpg"
+                            redis_client.rpush("welding_reset_post_path",post_path)#welding_reset_post_path为一个列表,存储需要发送的图片路径,rpush为从右侧加入
+                            annotated_frame = results[0].plot()
+                            cv2.imwrite(imgpath, annotated_frame)
+
+
+
+                                
+                if video_source == WELDING_CH3_RTSP :##检测油桶{0: 'dump'},在安全区域时保存图片一张
+                    if oil_barrel_flag and redis_client.get("welding_oil_barrel_save_img")=='False':
+                        print("当前油桶没有复位")
+                        redis_client.set("welding_oil_barrel_save_img",'True')
+                        #save_time=datetime.now().strftime('%Y%m%d_%H%M')
+                        save_time=datetime.now().strftime('%Y%m%d_%H')
+                        imgpath = f"{SAVE_IMG_PATH}/welding_resetStep1_{save_time}.jpg"
+                        post_path = f"{POST_IMG_PATH2}/welding_resetStep1_{save_time}.jpg"
+                        redis_client.rpush("welding_reset_post_path",post_path)
+                        annotated_frame = results[0].plot()
+                        cv2.imwrite(imgpath, annotated_frame)
+                        #reset_post(welding_resetStep='1',path=post_path)
+                        #post(step='2',path=post_path)
+
+                if video_source == WELDING_CH1_RTSP:#检测到焊机开关
+                        if welding_machine_switch_flag and redis_client.get("welding_machine_switch_save_img")=='False':
+                            print("当前焊机开关没有复位")##焊前检查只保存一次
+                            redis_client.set("welding_machine_switch_save_img",'True')
+                            #save_time=datetime.now().strftime('%Y%m%d_%H%M')
+                            save_time=datetime.now().strftime('%Y%m%d_%H')
+                            imgpath = f"{SAVE_IMG_PATH}/welding_resetStep5_{save_time}.jpg"
+                            post_path = f"{POST_IMG_PATH2}/welding_resetStep5_{save_time}.jpg"
+                            redis_client.rpush("welding_reset_post_path",post_path)
+                            #cv2.imwrite(imgpath, annotator.result())
+                            annotated_frame = results[0].plot()
+                            cv2.imwrite(imgpath, annotated_frame)
+                            #reset_post(welding_resetStep='5',path=post_path)
+                            #post(step='2',path=post_path)
+            
+
+                if video_source == WELDING_CH5_RTSP:
+                    if ground_wire_flag and redis_client.get("welding_ground_wire_save_img")=='False':
+                        print("搭铁线没有复位")
+                        redis_client.set("welding_ground_wire_save_img",'True')
+                        #save_time=datetime.now().strftime('%Y%m%d_%H%M')  
+                        save_time=datetime.now().strftime('%Y%m%d_%H')
+                        imgpath = f"{SAVE_IMG_PATH}/welding_resetStep3_{save_time}.jpg"
+                        post_path = f"{POST_IMG_PATH2}/welding_resetStep3_{save_time}.jpg"
+                        redis_client.rpush("welding_reset_post_path",post_path)
+                        # result_image = annotator.result()
+                        # cv2.polylines(result_image, [WELDING_REGION3.reshape(-1, 1, 2)], isClosed=True, color=(0, 255, 0), thickness=4)
+                        # cv2.imwrite(imgpath, result_image)
+                        annotated_frame = results[0].plot()
+                        cv2.imwrite(imgpath, annotated_frame)
+                        #reset_post(welding_resetStep="3",path=post_path)
+                        #time.sleep(1)
+                        #post(step='4',path=post_path)
+
+                if video_source == WELDING_CH2_RTSP:
+                    if welding_components_flag and redis_client.get("welding_components_save_img")=='False':
+                        print("焊件没有复位")
+                        redis_client.set("welding_components_save_img",'True')
+                        #save_time=datetime.now().strftime('%Y%m%d_%H%M')  
+                        save_time=datetime.now().strftime('%Y%m%d_%H')
+                        imgpath = f"{SAVE_IMG_PATH}/welding_resetStep4_{save_time}.jpg"
+                        post_path = f"{POST_IMG_PATH2}/welding_resetStep4_{save_time}.jpg"
+                        redis_client.rpush("welding_reset_post_path",post_path)
+                        # result_image = annotator.result()
+                        # cv2.polylines(result_image, [REGION4.reshape(-1, 1, 2)], isClosed=True, color=(0, 255, 0), thickness=4)
+                        # cv2.imwrite(imgpath, result_image)
+                        annotated_frame = results[0].plot()
+                        cv2.imwrite(imgpath, annotated_frame)
+
+                        #reset_post(welding_resetStep='4',path=post_path)
+                        #post(step='4',path=post_path)
+
+                #运行到这里表示一个线程检测完毕
+                start_event.set()
+            
+
+
+        else:
+            # Break the loop if the end of the video is reached
+            break
+
+    # Release the video capture object and close the display window
+    cap.release()
+    # 释放模型资源(如果使用GPU)
+    if torch.cuda.is_available():
+        torch.cuda.empty_cache()
+    del model
+    #cv2.destroyAllWindows()
+
+

+ 120 - 0
welding_wearing_detect.py

@@ -0,0 +1,120 @@
+import torch
+import cv2
+import threading
+from datetime import datetime
+from ultralytics import YOLO
+from globals import stop_event,redis_client
+from config import SAVE_IMG_PATH,POST_IMG_PATH1,WELDING_WEARING_MODEL,WELDING_WEARING_VIDEO_SOURCES
+
+
+def init_wearing_detection():
+    redis_client.set("welding_wearing_human_in_postion",'False')
+    redis_client.delete("welding_wearing_items_nums")
+    redis_client.delete("welding_wearing_detection_img")
+    redis_client.set("welding_wearing_detection_img_flag",'False')
+
+def start_wearing_detection(start_events):
+        # Create threads for each video stream and model
+    threads = []
+    for model_path in WELDING_WEARING_MODEL:
+        event = threading.Event()
+        start_events.append(event)
+        thread = threading.Thread(target=process_video, args=(model_path,WELDING_WEARING_VIDEO_SOURCES,event))
+        threads.append(thread)
+        thread.daemon=True
+        thread.start()
+
+
+    # Wait for all threads to complete
+    for thread in threads:
+        thread.join()
+
+def process_video(model_path, video_source, start_event):
+
+    
+    model = YOLO(model_path)
+    cap = cv2.VideoCapture(video_source)
+    while cap.isOpened():
+    # Read a frame from the video
+        success, frame = cap.read()
+        if stop_event.is_set():#控制停止推理
+            break
+        if success:
+            
+            if cap.get(cv2.CAP_PROP_POS_FRAMES) % 10 != 0:#跳帧检测,
+                continue
+
+            x, y, w, h = 786, 0, 385, 1000#剪裁画面的中心区域
+
+            # Crop the frame to the ROI
+            cropped_frame = frame[y:y+h, x:x+w]
+            # Run YOLOv8 inference on the frame
+            if model_path==WELDING_WEARING_MODEL[0]:#yolov8s,专门用来检测人
+                #model.classes = [0]#设置只检测人一个类别
+                results = model.predict(cropped_frame,conf=0.6,verbose=False,classes=[0])#这里的results是一个生成器
+            else:
+                results = model.predict(cropped_frame,conf=0.6,verbose=False)
+            #while not stop_event.is_set():
+
+            for r in results:
+
+                ##下面这些都是tensor类型
+                boxes = r.boxes.xyxy  # 提取所有检测到的边界框坐标
+                confidences = r.boxes.conf  # 提取所有检测到的置信度
+                classes = r.boxes.cls  # 提取所有检测到的类别索引
+                ###劳保,不在函数外部定义是因为需要每一帧重新赋值
+                wearing_items={"pants" :0,
+                        'jacket': 0,
+                        'helmet': 0,
+                        'gloves': 0,
+                        'shoes': 0
+                }
+                
+                for i in range(len(boxes)):
+                    x1, y1, x2, y2 = boxes[i].tolist()
+                    confidence = confidences[i].item()
+                    cls = int(classes[i].item())
+                    label = model.names[cls]
+
+                    
+                    # if x1 < WEAR_DETECTION_AREA[0] or y1 < WEAR_DETECTION_AREA[1] or x2 > WEAR_DETECTION_AREA[2] or y2 > WEAR_DETECTION_AREA[3]:
+                    #     continue  # 跳过不在区域内的检测框
+                    
+                    if model_path==WELDING_WEARING_MODEL[0]:#yolov8s,专门用来检测人
+                        if label=="person" and redis_client.get("welding_wearing_human_in_postion")=='False':
+                            redis_client.set("welding_wearing_human_in_postion",'True')
+                    else:
+                        wearing_items[label] += 1
+
+
+                if model_path==WELDING_WEARING_MODEL[1]:
+                    welding_wearing_items_nums = [wearing_items["pants"], wearing_items["jacket"], wearing_items["helmet"], wearing_items["gloves"], wearing_items["shoes"]]
+                    if redis_client.exists("welding_wearing_items_nums"):
+                        redis_client.delete("welding_wearing_items_nums")
+                    redis_client.rpush("welding_wearing_items_nums", *welding_wearing_items_nums)
+
+
+                    if redis_client.get("welding_wearing_detection_img_flag")=='True' and not redis_client.exists("welding_wearing_detection_img"):
+                        save_time=datetime.now().strftime('%Y%m%d_%H%M')
+                        imgpath = f"{SAVE_IMG_PATH}/welding_wearing_detection_{save_time}.jpg"
+                        post_path= f"{POST_IMG_PATH1}/welding_wearing_detection_{save_time}.jpg"
+                        annotated_frame = results[0].plot()
+                        cv2.imwrite(imgpath, annotated_frame)
+                        redis_client.set("welding_wearing_detection_img",post_path)
+
+
+                start_event.set()    
+
+
+        else:
+            # Break the loop if the end of the video is reached
+            break
+
+        # Release the video capture object and close the display window
+    cap.release()
+    
+    if torch.cuda.is_available():
+        torch.cuda.empty_cache()
+    del model
+
+