Jelajahi Sumber

完善代码

leon 4 minggu lalu
induk
melakukan
8aa29ae33a

+ 20 - 0
src/graph/graph.cpp

@@ -0,0 +1,20 @@
+#include "graph/graph.hpp"
+
+namespace Graph
+{
+
+using json = nlohmann::json;
+
+void read_models()
+{
+
+}
+
+
+void Graph::create_from_json(const std::string& json_path)
+{
+    nlohmann::json config;
+    std::ifstream file(json_path);
+}
+
+}

+ 10 - 428
src/graph/graph.hpp

@@ -4,6 +4,7 @@
 #include <iostream>
 #include <vector>
 #include <string>
+#include <unordered_map>
 #include "common/json.hpp"
 #include "nodes/base/base.hpp"
 #include "nodes/stream/streamNode.hpp"
@@ -18,205 +19,18 @@
 namespace Graph
 {
 
-// json format
-/*
-json 设计原则
-1. 同一条pipeline下的infer节点公用一个模型 
-2. 不同条pipeline下的infer节点不公用一个模型
-3. 相当于每条pipeline都是平行的,只是在infer节点上共用了同一个模型指针
-
-
-1. 每个pipeline都是平行的,没有交叉
-2. 每个pipeline的节点按顺序连接
-3. 每个pipeline可以公用一个模型指针,infer节点通过设置模型指针来共用模型
-4. 下面是我的设计思路,可以参考。请帮我修改完善,我觉得有些地方不太可以,但是我想不出更好的办法
-
-{
-  // ---------------------------------------------------------------------------
-  // 全局资源定义区:定义可被多个Pipeline共享的资源,例如模型
-  // ---------------------------------------------------------------------------
-  "shared_resources": {
-    "models": {
-      // 定义一个模型,给它一个唯一的ID,例如 "yolo_model_main"
-      "yolo_model_main": {
-        "model_path": "model/yolo11s.engine", // 模型文件路径
-        "model_type": "YOLO11",             // 模型类型
-        "names": ["person", "car", "close", "open"], // 类别名称
-        // 注意:GPU ID通常与推理实例相关,而不是模型本身。
-        // 如果模型加载强绑定GPU,可以放在这里,但更常见的是在推理节点实例中指定。
-        "gpu_id": 0,
-        "confidence_threshold": 0.25,
-        "nms_threshold": 0.45
-      }
-      // 如果有其他模型,可以在这里继续添加
-      // "another_model": { ... }
-    }
-    // 可以扩展添加其他共享资源,如特定的追踪器配置模板等
-    // "tracker_configs": { ... }
-  },
-
-  // ---------------------------------------------------------------------------
-  // Pipeline 定义区:一个包含所有独立Pipeline的数组
-  // ---------------------------------------------------------------------------
-  "pipelines": [
-    // --- Pipeline 0 ---
-    {
-      "pipeline_id": "pipeline_0", // 给Pipeline一个唯一标识符
-      "description": "处理摄像头0的视频流", // 可选的描述信息
-      // 使用一个节点数组来明确定义执行顺序
-      "nodes": [
-        {
-          "node_id": "src_0",          // 节点实例的唯一ID
-          "node_type": "Source",       // 节点类型(更通用的名称)
-          "params": {                  // 节点的具体参数
-            "stream_url": "rtsp://xxxx_cam0", // 使用具体的URL
-            "gpu_id": 0,               // 指定解码使用的GPU
-            "decode_type": "GPU",
-            "skip_frame": 10
-          }
-        },
-        {
-          "node_id": "infer_0",        // 节点实例的唯一ID
-          "node_type": "Inference",    // 节点类型
-          "params": {
-            // 通过 model_id 引用上面定义的共享模型
-            "model_id": "yolo_model_main",
-          }
-        },
-        {
-          "node_id": "track_0",        // 节点实例的唯一ID
-          "node_type": "Tracker",      // 节点类型
-          "params": {
-            "track_name": "person",    // 需要追踪的目标类别
-            "track_frame": 30,         // 追踪相关的帧数参数(具体含义需根据实现确定,可能是最大丢失帧数)
-            "track_distance": 30       // 追踪相关的距离参数(具体含义和单位需根据实现确定)
-          }
-        },
-        {
-          "node_id": "analyze_0",      // 节点实例的唯一ID
-          "node_type": "Analyzer",     // 节点类型
-          "params": {
-            // 分析节点的特定参数(如果需要)
-          }
-        },
-        {
-          "node_id": "draw_0",         // 节点实例的唯一ID
-          "node_type": "Drawer",       // 节点类型
-          "params": {
-            // 绘制节点的特定参数(如果需要)
-          }
-        },
-        {
-          "node_id": "record_0",       // 节点实例的唯一ID
-          "node_type": "Recorder",     // 节点类型
-          "params": {
-            // 建议为每个pipeline使用不同的输出文件路径
-            "record_path": "result/result_pipeline0.mp4"
-          }
-        }
-      ] // end of nodes for pipeline_0
-    }, // end of pipeline_0
-
-    // --- Pipeline 1 ---
-    {
-      "pipeline_id": "pipeline_1", // 唯一标识符
-      "description": "处理摄像头1的视频流",
-      "nodes": [
-        {
-          "node_id": "src_1",          // 唯一节点ID
-          "node_type": "Source",
-          "params": {
-            "stream_url": "rtsp://xxxx_cam1", // 不同的URL
-            "gpu_id": 0,               // 可以是同一个GPU,也可以是不同的
-            "decode_type": "GPU",
-            "skip_frame": 10
-          }
-        },
-        {
-          "node_id": "infer_1",        // 唯一节点ID
-          "node_type": "Inference",
-          "params": {
-            // 引用同一个共享模型
-            "model_id": "yolo_model_main",
-            
-          }
-        },
-        {
-          "node_id": "track_1",        // 唯一节点ID
-          "node_type": "Tracker",
-          // 即使参数相同,这也是一个独立的追踪器实例,处理来自 src_1 的数据
-          "params": {
-            "track_name": "person",
-            "track_frame": 30,
-            "track_distance": 30
-          }
-        },
-        {
-          "node_id": "analyze_1",      // 唯一节点ID
-          "node_type": "Analyzer",
-          "params": {}
-        },
-        {
-          "node_id": "draw_1",         // 唯一节点ID
-          "node_type": "Drawer",
-          "params": {}
-        },
-        {
-          "node_id": "record_1",       // 唯一节点ID
-          "node_type": "Recorder",
-          "params": {
-            "record_path": "result/result_pipeline1.mp4" // 不同的输出路径
-          }
-        }
-      ] // end of nodes for pipeline_1
-    } // end of pipeline_1
-
-    // 可以根据需要添加更多的 Pipeline 定义 ...
-
-  ] // end of pipelines
-}
-
-void load_from_json(const std::string& json_path)
-    {
-        auto json_data = nlohmann::json::parse(json_path);
-        auto shared_resources = json_data["shared_resources"];
-        auto models = shared_resources["models"];
-        for (auto& model : models.items())
-        {
-            std::string model_id = model.key();
-            std::string model_path = model.value()["model_path"];
-            std::string model_type = model.value()["model_type"];
-            std::vector<std::string> names = model.value()["names"];
-            int gpu_id = model.value()["gpu_id"];
-            float confidence_threshold = model.value()["confidence_threshold"];
-            float nms_threshold = model.value()["nms_threshold"];
-            std::shared_ptr<Infer> infer = load(model_path, model_type, names, gpu_id, confidence_threshold, nms_threshold);
-            shared_models_[model_id] = infer;
-        }
-
-        auto pipelines = json_data["pipelines"];
-        
-    }
-
-*/
-
-namespace Graph
-{
-    // --- 内部结构,用于存储单个配置好的 Pipeline ---
-    struct PipelineInstance {
-        std::string pipeline_id;
-        std::string description;
-        std::vector<std::shared_ptr<GNode::BaseNode>> nodes; // 只包含此 pipeline 的节点,按顺序
-    };
+struct PipelineInstance {
+    std::string pipeline_id;
+    std::string description;
+    std::vector<std::shared_ptr<GNode::BaseNode>> nodes; // 只包含此 pipeline 的节点,按顺序
+};
 
 class Graph
 {
+
 public:
     Graph() = default;
-    ~Graph() {
-        // 如果需要,在这里添加清理逻辑,比如停止所有节点
-        std::cout << "Graph destructor called. Consider adding node stopping logic here." << std::endl;
-    }
+    ~Graph() { }
 
 
     // 获取配置好的 pipelines (只读)
@@ -230,243 +44,11 @@ public:
     }
 
 
-    void load_from_json(const std::string& json_path)
-    {
-        std::cout << "Loading graph configuration from: " << json_path << std::endl;
-        std::ifstream ifs(json_path);
-        if (!ifs.is_open()) {
-            throw std::runtime_error("Failed to open JSON file: " + json_path);
-        }
-
-        nlohmann::json json_data;
-        try {
-            json_data = nlohmann::json::parse(ifs);
-        } catch (const nlohmann::json::parse_error& e) {
-            throw std::runtime_error("Failed to parse JSON: " + std::string(e.what()));
-        }
-
-        // --- 1. 加载共享模型 ---
-        std::cout << "\n--- Loading Shared Models ---" << std::endl;
-        shared_models_.clear(); // 清空旧模型
-        try {
-            // 安全地访问 JSON 结构
-            const auto models_json = json_data.value("shared_resources", nlohmann::json::object()).value("models", nlohmann::json::object());
-
-            for (auto& [model_id, model_config] : models_json.items())
-            {
-                std::cout << "Processing shared model definition: " << model_id << std::endl;
-                std::string model_path = model_config.value("model_path", "");
-                std::string model_type_str = model_config.value("model_type", ""); // JSON中的模型类型字符串
-                std::vector<std::string> names;
-                 if(model_config.contains("names") && model_config["names"].is_array()){
-                     names = model_config["names"].get<std::vector<std::string>>();
-                 } else {
-                      std::cerr << "Warning: Model '" << model_id << "' missing 'names' array." << std::endl;
-                 }
-                int gpu_id = model_config.value("gpu_id", 0);
-                float confidence_threshold = model_config.value("confidence_threshold", 0.25f);
-                float nms_threshold = model_config.value("nms_threshold", 0.45f);
-
-                if (model_path.empty() || model_type_str.empty()) {
-                    std::cerr << "Warning: Skipping model '" << model_id << "' due to missing 'model_path' or 'model_type'." << std::endl;
-                    shared_models_[model_id] = nullptr;
-                    continue;
-                }
-
-                // 调用模型加载函数 (全局或静态/成员)
-                std::shared_ptr<GNode::Infer> infer_ptr = ::load(model_path, model_type_str, names, gpu_id, confidence_threshold, nms_threshold);
-                shared_models_[model_id] = infer_ptr;
-
-                if (!infer_ptr) {
-                    std::cerr << "ERROR: Model '" << model_id << "' failed to load." << std::endl;
-                    // 根据需要决定是否抛出异常终止
-                    // throw std::runtime_error("Failed to load critical model: " + model_id);
-                } else {
-                    std::cout << "Successfully loaded shared model: " << model_id << std::endl;
-                }
-            }
-        } catch (const std::exception& e) {
-             std::cerr << "ERROR during shared model loading: " << e.what() << std::endl;
-             throw; // 重新抛出
-        }
-
-        // --- 2. 加载并构建 Pipelines ---
-        std::cout << "\n--- Configuring Pipelines ---" << std::endl;
-        configured_pipelines_.clear(); // 清空旧的 pipelines
-
-        try {
-            const auto pipelines_json = json_data.value("pipelines", nlohmann::json::array());
-
-            for (const auto& pipeline_config : pipelines_json) {
-                std::string pipeline_id = pipeline_config.value("pipeline_id", "unknown_pipeline");
-                std::cout << "\nConfiguring pipeline: " << pipeline_id << std::endl;
-
-                PipelineInstance current_pipeline; // 创建当前 pipeline 的实例结构
-                current_pipeline.pipeline_id = pipeline_id;
-                current_pipeline.description = pipeline_config.value("description", "");
-                // current_pipeline.nodes 会在下面填充
-
-                const auto nodes_json = pipeline_config.value("nodes", nlohmann::json::array());
-                if (nodes_json.empty()) {
-                     std::cerr << "Warning: Pipeline '" << pipeline_id << "' has no nodes defined." << std::endl;
-                     continue;
-                }
-
-                // --- 2.1 创建当前 pipeline 的所有节点实例 ---
-                std::vector<std::shared_ptr<GNode::BaseNode>> temp_nodes_for_this_pipeline; // 临时存储当前pipeline的节点
-                for (const auto& node_config : nodes_json) {
-                    std::string node_id = node_config.value("node_id", "unknown_node");
-                    std::string node_type = node_config.value("node_type", "");
-                    const auto params = node_config.value("params", nlohmann::json::object());
-
-                    std::shared_ptr<GNode::BaseNode> node_ptr = nullptr;
-
-                    if (node_type.empty()) {
-                         std::cerr << "ERROR: Node in pipeline '" << pipeline_id << "' is missing 'node_type'. Skipping node." << std::endl;
-                         continue;
-                    }
-
-                    std::cout << "  Creating node: " << node_id << " (Type: " << node_type << ")" << std::endl;
-
-                    // 根据类型创建节点,并添加到 *临时* 列表
-                    if (node_type == "Source") {
-                        std::string stream_url = params.value("stream_url", "");
-                        int gpu_id = params.value("gpu_id", 0);
-                        std::string decode_type = params.value("decode_type", "GPU");
-                        int skip_frame = params.value("skip_frame", 0);
-                        if (stream_url.empty()) throw std::runtime_error("Source node '" + node_id + "' missing 'stream_url'.");
-
-                        auto src_node = std::make_shared<GNode::StreamNode>(node_id, stream_url, gpu_id, decode_type);
-                        src_node->set_skip_frame(skip_frame);
-                        node_ptr = src_node;
-                    }
-                    else if (node_type == "Inference") {
-                        std::string model_id = params.value("model_id", "");
-                        if (model_id.empty()) throw std::runtime_error("Inference node '" + node_id + "' missing 'model_id'.");
-
-                        auto it = shared_models_.find(model_id);
-                        if (it == shared_models_.end()) throw std::runtime_error("Node '" + node_id + "' references unknown model_id '" + model_id + "'.");
-                        if (!it->second) throw std::runtime_error("Node '" + node_id + "' references model '" + model_id + "' which failed to load or is null.");
-
-                        auto infer_node = std::make_shared<GNode::InferNode>(node_id);
-
-                        // --- 模型类型转换 ---
-                        // 从 shared_resources 中获取模型类型字符串
-                        std::string model_type_str_from_shared = json_data["shared_resources"]["models"][model_id].value("model_type", "UNKNOWN");
-                        GNode::ModelType model_enum_type = GNode::ModelType::YOLO11; // 默认值
-                        if (model_type_str_from_shared == "YOLO11") { // 这里进行字符串到枚举的映射
-                            model_enum_type = GNode::ModelType::YOLO11;
-                        } // else if (model_type_str == "ANOTHER_TYPE") { model_enum_type = GNode::ModelType::ANOTHER_ENUM; }
-                        else {
-                             std::cerr << "Warning: Unknown or unhandled model_type string '" << model_type_str_from_shared
-                                       << "' for model_id '" << model_id << "'. Using default." << std::endl;
-                        }
-                        // --- 结束模型类型转换 ---
-
-                        infer_node->set_model_instance(it->second, model_enum_type);
-                        node_ptr = infer_node;
-                    }
-                    else if (node_type == "Tracker") {
-                        std::string track_name = params.value("track_name", "person");
-                        int track_frame = params.value("track_frame", 30);
-                        int track_distance = params.value("track_distance", 30);
-                        node_ptr = std::make_shared<GNode::TrackNode>(node_id, track_name, track_frame, track_distance);
-                    }
-                    else if (node_type == "Analyzer") {
-                        node_ptr = std::make_shared<GNode::AnalyzeNode>(node_id);
-                        // 解析 Analyzer 特有参数 (如果 params 中有定义)
-                        // example: int threshold = params.value("threshold", 10);
-                    }
-                    else if (node_type == "Drawer") {
-                        node_ptr = std::make_shared<GNode::DrawNode>(node_id);
-                        // 解析 Drawer 特有参数
-                    }
-                    else if (node_type == "Recorder") {
-                        std::string record_path = params.value("record_path", "");
-                        if (record_path.empty()) throw std::runtime_error("Recorder node '" + node_id + "' missing 'record_path'.");
-                        auto record_node = std::make_shared<GNode::RecordNode>(node_id);
-                        record_node->set_record_path(record_path);
-                        node_ptr = record_node;
-                    }
-                    // else if (node_type == "HttpPush") { // 如果需要 HttpPush 节点
-                    //     std::string url = params.value("url", "");
-                    //     if (url.empty()) throw std::runtime_error("HttpPush node '" + node_id + "' missing 'url'.");
-                    //     node_ptr = std::make_shared<GNode::HttpPushNode>(node_id, url); // 假设构造函数是这样
-                    // }
-                     else {
-                         std::cerr << "ERROR: Unknown node_type '" << node_type << "' for node '" << node_id << "'. Skipping." << std::endl;
-                         continue; // 跳过未知类型
-                     }
-
-                    // 将成功创建的节点添加到当前 pipeline 的临时列表中
-                    temp_nodes_for_this_pipeline.push_back(node_ptr);
-                    std::cout << "    Added node '" << node_id << "' to temporary list for pipeline '" << pipeline_id << "'" << std::endl;
-
-                } // --- 结束当前 pipeline 的节点创建循环 ---
-
-
-                // --- 2.2 连接当前 pipeline 内部的节点 ---
-                std::cout << "  Linking nodes for pipeline: " << pipeline_id << std::endl;
-                if (temp_nodes_for_this_pipeline.size() > 1) {
-                    for (size_t i = 0; i < temp_nodes_for_this_pipeline.size() - 1; ++i) {
-                        // 使用 GNode::LinkNode 连接临时列表中的节点
-                        GNode::LinkNode(temp_nodes_for_this_pipeline[i], temp_nodes_for_this_pipeline[i + 1]);
-                    }
-                } else {
-                     std::cout << "  Pipeline '" << pipeline_id << "' has " << temp_nodes_for_this_pipeline.size() << " node(s), linking skipped/not needed." << std::endl;
-                 }
-
-                // --- 2.3 将节点列表存入 PipelineInstance 并保存 ---
-                if (!temp_nodes_for_this_pipeline.empty()) {
-                    current_pipeline.nodes = std::move(temp_nodes_for_this_pipeline); // 移动节点列表
-                    configured_pipelines_.push_back(std::move(current_pipeline)); // 保存配置好的 pipeline 实例
-                    std::cout << "Successfully configured and stored pipeline: " << pipeline_id << std::endl;
-                } else {
-                     std::cerr << "Warning: Pipeline '" << pipeline_id << "' ended up with no valid nodes. Skipping storage." << std::endl;
-                }
-
-            } // --- 结束所有 pipeline 的配置循环 ---
-
-        } catch (const std::exception& e) {
-             std::cerr << "ERROR during pipeline configuration: " << e.what() << std::endl;
-             // 清理状态
-             configured_pipelines_.clear();
-             shared_models_.clear();
-             throw; // 重新抛出
-        }
-
-        // --- 3. 启动所有 Pipeline 的节点 ---
-        std::cout << "\n--- Starting All Configured Pipelines ---" << std::endl;
-        if (configured_pipelines_.empty()) {
-            std::cout << "No pipelines were configured to start." << std::endl;
-            return;
-        }
-
-        for (auto& pipeline : configured_pipelines_) {
-            std::cout << "Starting nodes for pipeline: " << pipeline.pipeline_id << std::endl;
-            // 从后往前启动当前 pipeline 的节点
-            for (int i = static_cast<int>(pipeline.nodes.size()) - 1; i >= 0; --i) {
-                if (pipeline.nodes[i]) {
-                    // 假设 GNode::BaseNode 有 start() 方法
-                    pipeline.nodes[i]->start();
-                }
-            }
-        }
-
-        std::cout << "\nGraph configuration loading and pipeline starting finished." << std::endl;
-        std::cout << "Total shared models loaded (or attempted): " << shared_models_.size() << std::endl;
-        std::cout << "Total pipelines configured and started: " << configured_pipelines_.size() << std::endl;
-    }
+    void create_from_json(const std::string& json_path);
 
 private:
-    // 存储共享模型指针 <model_id, model_instance_ptr>
-    std::map<std::string, std::shared_ptr<GNode::Infer>> shared_models_;
-
-    // 存储所有配置好的 Pipeline 实例
+    std::unordered_map<std::string, std::shared_ptr<Infer>> shared_models_;
     std::vector<PipelineInstance> configured_pipelines_;
-
-    // 这个全局 nodes_ 列表不再符合多 pipeline 的设计,应移除或注释掉
-    // std::vector<std::shared_ptr<GNode::BaseNode>> nodes_;
 };
 
 }   // namespace Graph

+ 13 - 0
src/infer/infer.hpp

@@ -23,6 +23,19 @@ public:
     virtual Result forward(const tensor::Image &image, int slice_width, int slice_height, float overlap_width_ratio, float overlap_height_ratio, void *stream = nullptr) = 0;
     virtual Result forward(const tensor::Image &image, void *stream = nullptr) = 0;
 
+
+    virtual int get_gpu_id() = 0;
+
+    virtual void set_nms_threshold(float nms_threshold)
+    {
+
+    }
+
+    virtual void set_conf_threshold(float conf_threshold)
+    {
+
+    }
+
 protected:
     std::mutex mutex_;
 };

+ 5 - 4
src/infer/trt/depth/depth.cu

@@ -9,9 +9,10 @@ namespace depth
 {
 
 
-bool DepthModelImpl::load(const std::string &engine_file)
+bool DepthModelImpl::load(const std::string &engine_file, int gpu_id)
 {
     trt_ = TensorRT::load(engine_file);
+    device_id_ = gpu_id;
     if (trt_ == nullptr) return false;
 
     trt_->print();
@@ -155,10 +156,10 @@ Result DepthModelImpl::forward(const tensor::Image &image, void *stream)
 }
 
 
-Infer *loadraw(const std::string &engine_file) 
+Infer *loadraw(const std::string &engine_file, int gpu_id) 
 {
     DepthModelImpl *impl = new DepthModelImpl();
-    if (!impl->load(engine_file)) 
+    if (!impl->load(engine_file, gpu_id)) 
     {
         delete impl;
         impl = nullptr;
@@ -169,7 +170,7 @@ Infer *loadraw(const std::string &engine_file)
 std::shared_ptr<Infer> load_depth(const std::string &engine_file, int gpu_id) 
 {
     checkRuntime(cudaSetDevice(gpu_id));
-    return std::shared_ptr<DepthModelImpl>((DepthModelImpl *)loadraw(engine_file));
+    return std::shared_ptr<DepthModelImpl>((DepthModelImpl *)loadraw(engine_file, gpu_id));
 }
 
 

+ 7 - 1
src/infer/trt/depth/depth.hpp

@@ -38,6 +38,8 @@ namespace depth
         int network_input_width_, network_input_height_;
         affine::Norm normalize_;
 
+        int device_id_ = 0;
+
         bool isdynamic_model_ = false;
     
         DepthModelImpl() = default;
@@ -50,6 +52,10 @@ namespace depth
             depth_map_buffer_.cpu(width * height);
         }
 
+        virtual int get_gpu_id() override
+        {
+            return device_id_;
+        }
 
         void adjust_memory() 
         {
@@ -71,7 +77,7 @@ namespace depth
         void postprocess(int width, int height, void *stream = nullptr);
         
     
-        bool load(const std::string &engine_file);
+        bool load(const std::string &engine_file, int gpu_id);
     
         virtual Result forward(const tensor::Image &image, void *stream = nullptr);
         virtual Result forward(const tensor::Image &image, int slice_width, int slice_height, float overlap_width_ratio, float overlap_height_ratio, void *stream = nullptr);

+ 4 - 3
src/infer/trt/yolo/yolo.cu

@@ -541,9 +541,10 @@ std::shared_ptr<data::InstanceSegmentMap> YoloModelImpl::decode_segment(int imem
     
 }
 
-bool YoloModelImpl::load(const std::string &engine_file, ModelType model_type, const std::vector<std::string>& names, float confidence_threshold, float nms_threshold)
+bool YoloModelImpl::load(const std::string &engine_file, ModelType model_type, const std::vector<std::string>& names, float confidence_threshold, float nms_threshold, int gpu_id)
 {
     trt_ = TensorRT::load(engine_file);
+    device_id_ = gpu_id;
     if (trt_ == nullptr) return false;
 
     trt_->print();
@@ -774,7 +775,7 @@ Result YoloModelImpl::forwards(void *stream)
 }
 
 Infer *loadraw(const std::string &engine_file, ModelType model_type, const std::vector<std::string>& names, float confidence_threshold,
-               float nms_threshold) 
+               float nms_threshold, int gpu_id) 
 {
     YoloModelImpl *impl = new YoloModelImpl();
     if (!impl->load(engine_file, model_type, names, confidence_threshold, nms_threshold)) 
@@ -791,7 +792,7 @@ std::shared_ptr<Infer> load_yolo(const std::string &engine_file, ModelType model
     try
     {
         checkRuntime(cudaSetDevice(gpu_id));
-        return std::shared_ptr<YoloModelImpl>((YoloModelImpl *)loadraw(engine_file, model_type, names, confidence_threshold, nms_threshold));
+        return std::shared_ptr<YoloModelImpl>((YoloModelImpl *)loadraw(engine_file, model_type, names, confidence_threshold, nms_threshold, gpu_id));
     }
     catch (const std::exception& ex) 
     {

+ 18 - 1
src/infer/trt/yolo/yolo.hpp

@@ -61,6 +61,23 @@ namespace yolo
         YoloModelImpl() = default;
     
         virtual ~YoloModelImpl() = default;
+
+        void set_nms_threshold(float nms_threshold) override
+        {
+            std::lock_guard<std::mutex> lock(mutex_);
+            nms_threshold_ = nms_threshold;
+        }
+
+        void set_conf_threshold(float conf_threshold) override
+        {
+            std::lock_guard<std::mutex> lock(mutex_);
+            confidence_threshold_ = conf_threshold;
+        }
+
+        virtual int get_gpu_id() override
+        {
+            return device_id_;
+        }
     
         void adjust_memory(int batch_size);
     
@@ -80,7 +97,7 @@ namespace yolo
 };
 
 Infer *loadraw(const std::string &engine_file, ModelType model_type, const std::vector<std::string>& names, float confidence_threshold,
-    float nms_threshold);
+    float nms_threshold, int gpu_id);
 
 std::shared_ptr<Infer> load_yolo(const std::string &engine_file, ModelType model_type, const std::vector<std::string>& names, int gpu_id, float confidence_threshold, float nms_threshold);
 

+ 4 - 4
src/main.cpp

@@ -22,7 +22,7 @@ void test_depth()
 
     std::shared_ptr<Infer> depth_model = load("model/depth.engine", ModelType::DEPTH_ANYTHING, {}, 0, 0.25, 0.45);
     std::shared_ptr<GNode::InferNode> infer_node   = std::make_shared<GNode::InferNode>("depth");
-    infer_node->set_model_instance(depth_model, ModelType::DEPTH_ANYTHING);
+    infer_node->set_model_instance(depth_model, depth_model->get_gpu_id());
 
     std::shared_ptr<GNode::DrawNode> draw_node     = std::make_shared<GNode::DrawNode>("draw");
     std::shared_ptr<GNode::HttpPushNode> push_node = std::make_shared<GNode::HttpPushNode>("push", "172.16.20.168", 8080, "/push");
@@ -79,7 +79,7 @@ void test_yolo()
 
     std::shared_ptr<Infer> yolo_model = load("model/model1.engine", ModelType::YOLO11SEG, names, 1, 0.25, 0.45);
     std::shared_ptr<GNode::InferNode> infer_node   = std::make_shared<GNode::InferNode>("seg");
-    infer_node->set_model_instance(yolo_model, ModelType::YOLO11SEG, 1);
+    infer_node->set_model_instance(yolo_model, yolo_model->get_gpu_id());
 
     std::shared_ptr<GNode::TrackNode> track_node     = std::make_shared<GNode::TrackNode>("tracker", "person", 30, 30);
 
@@ -120,11 +120,11 @@ void test_multi()
 
     std::shared_ptr<Infer> yolo_model = load("model/yolo11s.engine", ModelType::YOLO11, names, 1, 0.25, 0.45);
     std::shared_ptr<GNode::InferNode> infer_node1   = std::make_shared<GNode::InferNode>("yolo11");
-    infer_node1->set_model_instance(yolo_model, ModelType::YOLO11, 1);
+    infer_node1->set_model_instance(yolo_model, yolo_model->get_gpu_id());
 
     std::shared_ptr<Infer> depth_model = load("model/depth.engine", ModelType::DEPTH_ANYTHING, {}, 1, 0.25, 0.45);
     std::shared_ptr<GNode::InferNode> infer_node2   = std::make_shared<GNode::InferNode>("depth");
-    infer_node2->set_model_instance(depth_model, ModelType::DEPTH_ANYTHING, 1);
+    infer_node2->set_model_instance(depth_model, depth_model->get_gpu_id());
 
     std::shared_ptr<GNode::DrawNode> draw_node     = std::make_shared<GNode::DrawNode>("draw");
     std::shared_ptr<GNode::HttpPushNode> push_node = std::make_shared<GNode::HttpPushNode>("push", "172.16.20.168", 8080, "/push");

+ 2 - 1
src/nodes/base/base.cpp

@@ -57,13 +57,14 @@ void BaseNode::work()
             has_data = true;
             Timer timer(name_);
             handle_data(meta_data);
+            timer.stop_print();
             send_output_data(meta_data);          
         }
         if (!has_data)
         {
             std::unique_lock<std::mutex> lock(mutex_);
             cond_var_->wait_for(lock, std::chrono::milliseconds(100), [this] {
-                return !running_;  // 等待时检查退出条件
+                return !running_;
             });
         }
     }

+ 1 - 3
src/nodes/infer/inferNode.hpp

@@ -19,18 +19,16 @@ public:
     InferNode(const std::string& name) : BaseNode(name, NODE_TYPE::MID_NODE) {}
     virtual ~InferNode()  { stop(); };
 
-    void set_model_instance(std::shared_ptr<Infer> model, ModelType model_type, int device_id=0)
+    void set_model_instance(std::shared_ptr<Infer> model, int device_id=0)
     {
         model_ = model;
         device_id_ = device_id;
-        model_type_ = model_type;
     }
 
     void handle_data(std::shared_ptr<meta::MetaData>& meta_data) override;
 
 private:
     std::shared_ptr<Infer> model_ = nullptr;
-    ModelType model_type_;
     int device_id_;
 };
 

+ 1 - 1
src/nodes/stream/streamNode.cpp

@@ -43,7 +43,7 @@ bool StreamNode::open_stream() {
             status_ = StreamStatus::OPEN_FAILED;
             return false;
         }
-        printf("StreamNode [%s]: GPU Demuxer and Decoder created successfully.", name_.c_str());
+        PLOGI.printf("StreamNode [%s]: GPU Demuxer and Decoder created successfully.", name_.c_str());
         status_ = StreamStatus::OPENED;
     }
     else

+ 122 - 0
workspace/demo.json

@@ -0,0 +1,122 @@
+{
+    "models": {
+        "yolo_model_main": {
+            "model_path": "model/yolo11s.engine",
+            "model_type": "YOLO11",
+            "names": [
+                "person",
+                "car",
+                "close",
+                "open"
+            ],
+            "gpu_id": 0,
+            "confidence_threshold": 0.25,
+            "nms_threshold": 0.45
+        }
+    },
+    "pipelines": [
+        {
+            "pipeline_id": "pipeline_0",
+            "description": "处理摄像头0的视频流",
+            "nodes": [
+                {
+                    "node_id": "src_0",
+                    "node_type": "Source",
+                    "params": {
+                        "stream_url": "rtsp://xxxx_cam0",
+                        "gpu_id": 0,
+                        "decode_type": "GPU",
+                        "skip_frame": 10
+                    }
+                },
+                {
+                    "node_id": "infer_0",
+                    "node_type": "Inference",
+                    "params": {
+                        "model_id": "yolo_model_main"
+                    }
+                },
+                {
+                    "node_id": "track_0",
+                    "node_type": "Tracker",
+                    "params": {
+                        "track_name": "person",
+                        "track_frame": 30,
+                        "track_distance": 30
+                    }
+                },
+                {
+                    "node_id": "analyze_0",
+                    "node_type": "Analyzer",
+                    "params": {}
+                },
+                {
+                    "node_id": "draw_0",
+                    "node_type": "Drawer",
+                    "params": {}
+                },
+                {
+                    "node_id": "push_0",
+                    "node_type": "Push",
+                    "params": {}
+                },
+                {
+                    "node_id": "record_0",
+                    "node_type": "Recorder",
+                    "params": {
+                        "record_path": "result/result_pipeline0.mp4"
+                    }
+                }
+            ]
+        },
+        {
+            "pipeline_id": "pipeline_1",
+            "description": "处理摄像头1的视频流",
+            "nodes": [
+                {
+                    "node_id": "src_1",
+                    "node_type": "Source",
+                    "params": {
+                        "stream_url": "rtsp://xxxx_cam1",
+                        "gpu_id": 0,
+                        "decode_type": "GPU",
+                        "skip_frame": 10
+                    }
+                },
+                {
+                    "node_id": "infer_1",
+                    "node_type": "Inference",
+                    "params": {
+                        "model_id": "yolo_model_main",
+                    }
+                },
+                {
+                    "node_id": "track_1",
+                    "node_type": "Tracker",
+                    "params": {
+                        "track_name": "person",
+                        "track_frame": 30,
+                        "track_distance": 30
+                    }
+                },
+                {
+                    "node_id": "analyze_1",
+                    "node_type": "Analyzer",
+                    "params": {}
+                },
+                {
+                    "node_id": "draw_1",
+                    "node_type": "Drawer",
+                    "params": {}
+                },
+                {
+                    "node_id": "record_1",
+                    "node_type": "Recorder",
+                    "params": {
+                        "record_path": "result/result_pipeline1.mp4"
+                    }
+                }
+            ]
+        }
+    ]
+}