|
@@ -0,0 +1,474 @@
|
|
|
+#ifndef GRAPH_HPP__
|
|
|
+#define GRAPH_HPP__
|
|
|
+
|
|
|
+#include <iostream>
|
|
|
+#include <vector>
|
|
|
+#include <string>
|
|
|
+#include "common/json.hpp"
|
|
|
+#include "nodes/base/base.hpp"
|
|
|
+#include "nodes/stream/streamNode.hpp"
|
|
|
+#include "nodes/infer/inferNode.hpp"
|
|
|
+#include "nodes/analyze/analyzeNode.hpp"
|
|
|
+#include "nodes/draw/drawNode.hpp"
|
|
|
+#include "nodes/record/recordNode.hpp"
|
|
|
+#include "nodes/httpPush/httpPush.hpp"
|
|
|
+#include "nodes/track/trackNode.hpp"
|
|
|
+
|
|
|
+
|
|
|
+namespace Graph
|
|
|
+{
|
|
|
+
|
|
|
+// json format
|
|
|
+/*
|
|
|
+json 设计原则
|
|
|
+1. 同一条pipeline下的infer节点公用一个模型
|
|
|
+2. 不同条pipeline下的infer节点不公用一个模型
|
|
|
+3. 相当于每条pipeline都是平行的,只是在infer节点上共用了同一个模型指针
|
|
|
+
|
|
|
+
|
|
|
+1. 每个pipeline都是平行的,没有交叉
|
|
|
+2. 每个pipeline的节点按顺序连接
|
|
|
+3. 每个pipeline可以公用一个模型指针,infer节点通过设置模型指针来共用模型
|
|
|
+4. 下面是我的设计思路,可以参考。请帮我修改完善,我觉得有些地方不太可以,但是我想不出更好的办法
|
|
|
+
|
|
|
+{
|
|
|
+ // ---------------------------------------------------------------------------
|
|
|
+ // 全局资源定义区:定义可被多个Pipeline共享的资源,例如模型
|
|
|
+ // ---------------------------------------------------------------------------
|
|
|
+ "shared_resources": {
|
|
|
+ "models": {
|
|
|
+ // 定义一个模型,给它一个唯一的ID,例如 "yolo_model_main"
|
|
|
+ "yolo_model_main": {
|
|
|
+ "model_path": "model/yolo11s.engine", // 模型文件路径
|
|
|
+ "model_type": "YOLO11", // 模型类型
|
|
|
+ "names": ["person", "car", "close", "open"], // 类别名称
|
|
|
+ // 注意:GPU ID通常与推理实例相关,而不是模型本身。
|
|
|
+ // 如果模型加载强绑定GPU,可以放在这里,但更常见的是在推理节点实例中指定。
|
|
|
+ "gpu_id": 0,
|
|
|
+ "confidence_threshold": 0.25,
|
|
|
+ "nms_threshold": 0.45
|
|
|
+ }
|
|
|
+ // 如果有其他模型,可以在这里继续添加
|
|
|
+ // "another_model": { ... }
|
|
|
+ }
|
|
|
+ // 可以扩展添加其他共享资源,如特定的追踪器配置模板等
|
|
|
+ // "tracker_configs": { ... }
|
|
|
+ },
|
|
|
+
|
|
|
+ // ---------------------------------------------------------------------------
|
|
|
+ // Pipeline 定义区:一个包含所有独立Pipeline的数组
|
|
|
+ // ---------------------------------------------------------------------------
|
|
|
+ "pipelines": [
|
|
|
+ // --- Pipeline 0 ---
|
|
|
+ {
|
|
|
+ "pipeline_id": "pipeline_0", // 给Pipeline一个唯一标识符
|
|
|
+ "description": "处理摄像头0的视频流", // 可选的描述信息
|
|
|
+ // 使用一个节点数组来明确定义执行顺序
|
|
|
+ "nodes": [
|
|
|
+ {
|
|
|
+ "node_id": "src_0", // 节点实例的唯一ID
|
|
|
+ "node_type": "Source", // 节点类型(更通用的名称)
|
|
|
+ "params": { // 节点的具体参数
|
|
|
+ "stream_url": "rtsp://xxxx_cam0", // 使用具体的URL
|
|
|
+ "gpu_id": 0, // 指定解码使用的GPU
|
|
|
+ "decode_type": "GPU",
|
|
|
+ "skip_frame": 10
|
|
|
+ }
|
|
|
+ },
|
|
|
+ {
|
|
|
+ "node_id": "infer_0", // 节点实例的唯一ID
|
|
|
+ "node_type": "Inference", // 节点类型
|
|
|
+ "params": {
|
|
|
+ // 通过 model_id 引用上面定义的共享模型
|
|
|
+ "model_id": "yolo_model_main",
|
|
|
+ }
|
|
|
+ },
|
|
|
+ {
|
|
|
+ "node_id": "track_0", // 节点实例的唯一ID
|
|
|
+ "node_type": "Tracker", // 节点类型
|
|
|
+ "params": {
|
|
|
+ "track_name": "person", // 需要追踪的目标类别
|
|
|
+ "track_frame": 30, // 追踪相关的帧数参数(具体含义需根据实现确定,可能是最大丢失帧数)
|
|
|
+ "track_distance": 30 // 追踪相关的距离参数(具体含义和单位需根据实现确定)
|
|
|
+ }
|
|
|
+ },
|
|
|
+ {
|
|
|
+ "node_id": "analyze_0", // 节点实例的唯一ID
|
|
|
+ "node_type": "Analyzer", // 节点类型
|
|
|
+ "params": {
|
|
|
+ // 分析节点的特定参数(如果需要)
|
|
|
+ }
|
|
|
+ },
|
|
|
+ {
|
|
|
+ "node_id": "draw_0", // 节点实例的唯一ID
|
|
|
+ "node_type": "Drawer", // 节点类型
|
|
|
+ "params": {
|
|
|
+ // 绘制节点的特定参数(如果需要)
|
|
|
+ }
|
|
|
+ },
|
|
|
+ {
|
|
|
+ "node_id": "record_0", // 节点实例的唯一ID
|
|
|
+ "node_type": "Recorder", // 节点类型
|
|
|
+ "params": {
|
|
|
+ // 建议为每个pipeline使用不同的输出文件路径
|
|
|
+ "record_path": "result/result_pipeline0.mp4"
|
|
|
+ }
|
|
|
+ }
|
|
|
+ ] // end of nodes for pipeline_0
|
|
|
+ }, // end of pipeline_0
|
|
|
+
|
|
|
+ // --- Pipeline 1 ---
|
|
|
+ {
|
|
|
+ "pipeline_id": "pipeline_1", // 唯一标识符
|
|
|
+ "description": "处理摄像头1的视频流",
|
|
|
+ "nodes": [
|
|
|
+ {
|
|
|
+ "node_id": "src_1", // 唯一节点ID
|
|
|
+ "node_type": "Source",
|
|
|
+ "params": {
|
|
|
+ "stream_url": "rtsp://xxxx_cam1", // 不同的URL
|
|
|
+ "gpu_id": 0, // 可以是同一个GPU,也可以是不同的
|
|
|
+ "decode_type": "GPU",
|
|
|
+ "skip_frame": 10
|
|
|
+ }
|
|
|
+ },
|
|
|
+ {
|
|
|
+ "node_id": "infer_1", // 唯一节点ID
|
|
|
+ "node_type": "Inference",
|
|
|
+ "params": {
|
|
|
+ // 引用同一个共享模型
|
|
|
+ "model_id": "yolo_model_main",
|
|
|
+
|
|
|
+ }
|
|
|
+ },
|
|
|
+ {
|
|
|
+ "node_id": "track_1", // 唯一节点ID
|
|
|
+ "node_type": "Tracker",
|
|
|
+ // 即使参数相同,这也是一个独立的追踪器实例,处理来自 src_1 的数据
|
|
|
+ "params": {
|
|
|
+ "track_name": "person",
|
|
|
+ "track_frame": 30,
|
|
|
+ "track_distance": 30
|
|
|
+ }
|
|
|
+ },
|
|
|
+ {
|
|
|
+ "node_id": "analyze_1", // 唯一节点ID
|
|
|
+ "node_type": "Analyzer",
|
|
|
+ "params": {}
|
|
|
+ },
|
|
|
+ {
|
|
|
+ "node_id": "draw_1", // 唯一节点ID
|
|
|
+ "node_type": "Drawer",
|
|
|
+ "params": {}
|
|
|
+ },
|
|
|
+ {
|
|
|
+ "node_id": "record_1", // 唯一节点ID
|
|
|
+ "node_type": "Recorder",
|
|
|
+ "params": {
|
|
|
+ "record_path": "result/result_pipeline1.mp4" // 不同的输出路径
|
|
|
+ }
|
|
|
+ }
|
|
|
+ ] // end of nodes for pipeline_1
|
|
|
+ } // end of pipeline_1
|
|
|
+
|
|
|
+ // 可以根据需要添加更多的 Pipeline 定义 ...
|
|
|
+
|
|
|
+ ] // end of pipelines
|
|
|
+}
|
|
|
+
|
|
|
+void load_from_json(const std::string& json_path)
|
|
|
+ {
|
|
|
+ auto json_data = nlohmann::json::parse(json_path);
|
|
|
+ auto shared_resources = json_data["shared_resources"];
|
|
|
+ auto models = shared_resources["models"];
|
|
|
+ for (auto& model : models.items())
|
|
|
+ {
|
|
|
+ std::string model_id = model.key();
|
|
|
+ std::string model_path = model.value()["model_path"];
|
|
|
+ std::string model_type = model.value()["model_type"];
|
|
|
+ std::vector<std::string> names = model.value()["names"];
|
|
|
+ int gpu_id = model.value()["gpu_id"];
|
|
|
+ float confidence_threshold = model.value()["confidence_threshold"];
|
|
|
+ float nms_threshold = model.value()["nms_threshold"];
|
|
|
+ std::shared_ptr<Infer> infer = load(model_path, model_type, names, gpu_id, confidence_threshold, nms_threshold);
|
|
|
+ shared_models_[model_id] = infer;
|
|
|
+ }
|
|
|
+
|
|
|
+ auto pipelines = json_data["pipelines"];
|
|
|
+
|
|
|
+ }
|
|
|
+
|
|
|
+*/
|
|
|
+
|
|
|
+namespace Graph
|
|
|
+{
|
|
|
+ // --- 内部结构,用于存储单个配置好的 Pipeline ---
|
|
|
+ struct PipelineInstance {
|
|
|
+ std::string pipeline_id;
|
|
|
+ std::string description;
|
|
|
+ std::vector<std::shared_ptr<GNode::BaseNode>> nodes; // 只包含此 pipeline 的节点,按顺序
|
|
|
+ };
|
|
|
+
|
|
|
+class Graph
|
|
|
+{
|
|
|
+public:
|
|
|
+ Graph() = default;
|
|
|
+ ~Graph() {
|
|
|
+ // 如果需要,在这里添加清理逻辑,比如停止所有节点
|
|
|
+ std::cout << "Graph destructor called. Consider adding node stopping logic here." << std::endl;
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+ // 获取配置好的 pipelines (只读)
|
|
|
+ const std::vector<PipelineInstance>& getPipelines() const {
|
|
|
+ return configured_pipelines_;
|
|
|
+ }
|
|
|
+
|
|
|
+ // 获取加载的共享模型 (只读)
|
|
|
+ const std::map<std::string, std::shared_ptr<GNode::Infer>>& getSharedModels() const {
|
|
|
+ return shared_models_;
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+ void load_from_json(const std::string& json_path)
|
|
|
+ {
|
|
|
+ std::cout << "Loading graph configuration from: " << json_path << std::endl;
|
|
|
+ std::ifstream ifs(json_path);
|
|
|
+ if (!ifs.is_open()) {
|
|
|
+ throw std::runtime_error("Failed to open JSON file: " + json_path);
|
|
|
+ }
|
|
|
+
|
|
|
+ nlohmann::json json_data;
|
|
|
+ try {
|
|
|
+ json_data = nlohmann::json::parse(ifs);
|
|
|
+ } catch (const nlohmann::json::parse_error& e) {
|
|
|
+ throw std::runtime_error("Failed to parse JSON: " + std::string(e.what()));
|
|
|
+ }
|
|
|
+
|
|
|
+ // --- 1. 加载共享模型 ---
|
|
|
+ std::cout << "\n--- Loading Shared Models ---" << std::endl;
|
|
|
+ shared_models_.clear(); // 清空旧模型
|
|
|
+ try {
|
|
|
+ // 安全地访问 JSON 结构
|
|
|
+ const auto models_json = json_data.value("shared_resources", nlohmann::json::object()).value("models", nlohmann::json::object());
|
|
|
+
|
|
|
+ for (auto& [model_id, model_config] : models_json.items())
|
|
|
+ {
|
|
|
+ std::cout << "Processing shared model definition: " << model_id << std::endl;
|
|
|
+ std::string model_path = model_config.value("model_path", "");
|
|
|
+ std::string model_type_str = model_config.value("model_type", ""); // JSON中的模型类型字符串
|
|
|
+ std::vector<std::string> names;
|
|
|
+ if(model_config.contains("names") && model_config["names"].is_array()){
|
|
|
+ names = model_config["names"].get<std::vector<std::string>>();
|
|
|
+ } else {
|
|
|
+ std::cerr << "Warning: Model '" << model_id << "' missing 'names' array." << std::endl;
|
|
|
+ }
|
|
|
+ int gpu_id = model_config.value("gpu_id", 0);
|
|
|
+ float confidence_threshold = model_config.value("confidence_threshold", 0.25f);
|
|
|
+ float nms_threshold = model_config.value("nms_threshold", 0.45f);
|
|
|
+
|
|
|
+ if (model_path.empty() || model_type_str.empty()) {
|
|
|
+ std::cerr << "Warning: Skipping model '" << model_id << "' due to missing 'model_path' or 'model_type'." << std::endl;
|
|
|
+ shared_models_[model_id] = nullptr;
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ // 调用模型加载函数 (全局或静态/成员)
|
|
|
+ std::shared_ptr<GNode::Infer> infer_ptr = ::load(model_path, model_type_str, names, gpu_id, confidence_threshold, nms_threshold);
|
|
|
+ shared_models_[model_id] = infer_ptr;
|
|
|
+
|
|
|
+ if (!infer_ptr) {
|
|
|
+ std::cerr << "ERROR: Model '" << model_id << "' failed to load." << std::endl;
|
|
|
+ // 根据需要决定是否抛出异常终止
|
|
|
+ // throw std::runtime_error("Failed to load critical model: " + model_id);
|
|
|
+ } else {
|
|
|
+ std::cout << "Successfully loaded shared model: " << model_id << std::endl;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ } catch (const std::exception& e) {
|
|
|
+ std::cerr << "ERROR during shared model loading: " << e.what() << std::endl;
|
|
|
+ throw; // 重新抛出
|
|
|
+ }
|
|
|
+
|
|
|
+ // --- 2. 加载并构建 Pipelines ---
|
|
|
+ std::cout << "\n--- Configuring Pipelines ---" << std::endl;
|
|
|
+ configured_pipelines_.clear(); // 清空旧的 pipelines
|
|
|
+
|
|
|
+ try {
|
|
|
+ const auto pipelines_json = json_data.value("pipelines", nlohmann::json::array());
|
|
|
+
|
|
|
+ for (const auto& pipeline_config : pipelines_json) {
|
|
|
+ std::string pipeline_id = pipeline_config.value("pipeline_id", "unknown_pipeline");
|
|
|
+ std::cout << "\nConfiguring pipeline: " << pipeline_id << std::endl;
|
|
|
+
|
|
|
+ PipelineInstance current_pipeline; // 创建当前 pipeline 的实例结构
|
|
|
+ current_pipeline.pipeline_id = pipeline_id;
|
|
|
+ current_pipeline.description = pipeline_config.value("description", "");
|
|
|
+ // current_pipeline.nodes 会在下面填充
|
|
|
+
|
|
|
+ const auto nodes_json = pipeline_config.value("nodes", nlohmann::json::array());
|
|
|
+ if (nodes_json.empty()) {
|
|
|
+ std::cerr << "Warning: Pipeline '" << pipeline_id << "' has no nodes defined." << std::endl;
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ // --- 2.1 创建当前 pipeline 的所有节点实例 ---
|
|
|
+ std::vector<std::shared_ptr<GNode::BaseNode>> temp_nodes_for_this_pipeline; // 临时存储当前pipeline的节点
|
|
|
+ for (const auto& node_config : nodes_json) {
|
|
|
+ std::string node_id = node_config.value("node_id", "unknown_node");
|
|
|
+ std::string node_type = node_config.value("node_type", "");
|
|
|
+ const auto params = node_config.value("params", nlohmann::json::object());
|
|
|
+
|
|
|
+ std::shared_ptr<GNode::BaseNode> node_ptr = nullptr;
|
|
|
+
|
|
|
+ if (node_type.empty()) {
|
|
|
+ std::cerr << "ERROR: Node in pipeline '" << pipeline_id << "' is missing 'node_type'. Skipping node." << std::endl;
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ std::cout << " Creating node: " << node_id << " (Type: " << node_type << ")" << std::endl;
|
|
|
+
|
|
|
+ // 根据类型创建节点,并添加到 *临时* 列表
|
|
|
+ if (node_type == "Source") {
|
|
|
+ std::string stream_url = params.value("stream_url", "");
|
|
|
+ int gpu_id = params.value("gpu_id", 0);
|
|
|
+ std::string decode_type = params.value("decode_type", "GPU");
|
|
|
+ int skip_frame = params.value("skip_frame", 0);
|
|
|
+ if (stream_url.empty()) throw std::runtime_error("Source node '" + node_id + "' missing 'stream_url'.");
|
|
|
+
|
|
|
+ auto src_node = std::make_shared<GNode::StreamNode>(node_id, stream_url, gpu_id, decode_type);
|
|
|
+ src_node->set_skip_frame(skip_frame);
|
|
|
+ node_ptr = src_node;
|
|
|
+ }
|
|
|
+ else if (node_type == "Inference") {
|
|
|
+ std::string model_id = params.value("model_id", "");
|
|
|
+ if (model_id.empty()) throw std::runtime_error("Inference node '" + node_id + "' missing 'model_id'.");
|
|
|
+
|
|
|
+ auto it = shared_models_.find(model_id);
|
|
|
+ if (it == shared_models_.end()) throw std::runtime_error("Node '" + node_id + "' references unknown model_id '" + model_id + "'.");
|
|
|
+ if (!it->second) throw std::runtime_error("Node '" + node_id + "' references model '" + model_id + "' which failed to load or is null.");
|
|
|
+
|
|
|
+ auto infer_node = std::make_shared<GNode::InferNode>(node_id);
|
|
|
+
|
|
|
+ // --- 模型类型转换 ---
|
|
|
+ // 从 shared_resources 中获取模型类型字符串
|
|
|
+ std::string model_type_str_from_shared = json_data["shared_resources"]["models"][model_id].value("model_type", "UNKNOWN");
|
|
|
+ GNode::ModelType model_enum_type = GNode::ModelType::YOLO11; // 默认值
|
|
|
+ if (model_type_str_from_shared == "YOLO11") { // 这里进行字符串到枚举的映射
|
|
|
+ model_enum_type = GNode::ModelType::YOLO11;
|
|
|
+ } // else if (model_type_str == "ANOTHER_TYPE") { model_enum_type = GNode::ModelType::ANOTHER_ENUM; }
|
|
|
+ else {
|
|
|
+ std::cerr << "Warning: Unknown or unhandled model_type string '" << model_type_str_from_shared
|
|
|
+ << "' for model_id '" << model_id << "'. Using default." << std::endl;
|
|
|
+ }
|
|
|
+ // --- 结束模型类型转换 ---
|
|
|
+
|
|
|
+ infer_node->set_model_instance(it->second, model_enum_type);
|
|
|
+ node_ptr = infer_node;
|
|
|
+ }
|
|
|
+ else if (node_type == "Tracker") {
|
|
|
+ std::string track_name = params.value("track_name", "person");
|
|
|
+ int track_frame = params.value("track_frame", 30);
|
|
|
+ int track_distance = params.value("track_distance", 30);
|
|
|
+ node_ptr = std::make_shared<GNode::TrackNode>(node_id, track_name, track_frame, track_distance);
|
|
|
+ }
|
|
|
+ else if (node_type == "Analyzer") {
|
|
|
+ node_ptr = std::make_shared<GNode::AnalyzeNode>(node_id);
|
|
|
+ // 解析 Analyzer 特有参数 (如果 params 中有定义)
|
|
|
+ // example: int threshold = params.value("threshold", 10);
|
|
|
+ }
|
|
|
+ else if (node_type == "Drawer") {
|
|
|
+ node_ptr = std::make_shared<GNode::DrawNode>(node_id);
|
|
|
+ // 解析 Drawer 特有参数
|
|
|
+ }
|
|
|
+ else if (node_type == "Recorder") {
|
|
|
+ std::string record_path = params.value("record_path", "");
|
|
|
+ if (record_path.empty()) throw std::runtime_error("Recorder node '" + node_id + "' missing 'record_path'.");
|
|
|
+ auto record_node = std::make_shared<GNode::RecordNode>(node_id);
|
|
|
+ record_node->set_record_path(record_path);
|
|
|
+ node_ptr = record_node;
|
|
|
+ }
|
|
|
+ // else if (node_type == "HttpPush") { // 如果需要 HttpPush 节点
|
|
|
+ // std::string url = params.value("url", "");
|
|
|
+ // if (url.empty()) throw std::runtime_error("HttpPush node '" + node_id + "' missing 'url'.");
|
|
|
+ // node_ptr = std::make_shared<GNode::HttpPushNode>(node_id, url); // 假设构造函数是这样
|
|
|
+ // }
|
|
|
+ else {
|
|
|
+ std::cerr << "ERROR: Unknown node_type '" << node_type << "' for node '" << node_id << "'. Skipping." << std::endl;
|
|
|
+ continue; // 跳过未知类型
|
|
|
+ }
|
|
|
+
|
|
|
+ // 将成功创建的节点添加到当前 pipeline 的临时列表中
|
|
|
+ temp_nodes_for_this_pipeline.push_back(node_ptr);
|
|
|
+ std::cout << " Added node '" << node_id << "' to temporary list for pipeline '" << pipeline_id << "'" << std::endl;
|
|
|
+
|
|
|
+ } // --- 结束当前 pipeline 的节点创建循环 ---
|
|
|
+
|
|
|
+
|
|
|
+ // --- 2.2 连接当前 pipeline 内部的节点 ---
|
|
|
+ std::cout << " Linking nodes for pipeline: " << pipeline_id << std::endl;
|
|
|
+ if (temp_nodes_for_this_pipeline.size() > 1) {
|
|
|
+ for (size_t i = 0; i < temp_nodes_for_this_pipeline.size() - 1; ++i) {
|
|
|
+ // 使用 GNode::LinkNode 连接临时列表中的节点
|
|
|
+ GNode::LinkNode(temp_nodes_for_this_pipeline[i], temp_nodes_for_this_pipeline[i + 1]);
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ std::cout << " Pipeline '" << pipeline_id << "' has " << temp_nodes_for_this_pipeline.size() << " node(s), linking skipped/not needed." << std::endl;
|
|
|
+ }
|
|
|
+
|
|
|
+ // --- 2.3 将节点列表存入 PipelineInstance 并保存 ---
|
|
|
+ if (!temp_nodes_for_this_pipeline.empty()) {
|
|
|
+ current_pipeline.nodes = std::move(temp_nodes_for_this_pipeline); // 移动节点列表
|
|
|
+ configured_pipelines_.push_back(std::move(current_pipeline)); // 保存配置好的 pipeline 实例
|
|
|
+ std::cout << "Successfully configured and stored pipeline: " << pipeline_id << std::endl;
|
|
|
+ } else {
|
|
|
+ std::cerr << "Warning: Pipeline '" << pipeline_id << "' ended up with no valid nodes. Skipping storage." << std::endl;
|
|
|
+ }
|
|
|
+
|
|
|
+ } // --- 结束所有 pipeline 的配置循环 ---
|
|
|
+
|
|
|
+ } catch (const std::exception& e) {
|
|
|
+ std::cerr << "ERROR during pipeline configuration: " << e.what() << std::endl;
|
|
|
+ // 清理状态
|
|
|
+ configured_pipelines_.clear();
|
|
|
+ shared_models_.clear();
|
|
|
+ throw; // 重新抛出
|
|
|
+ }
|
|
|
+
|
|
|
+ // --- 3. 启动所有 Pipeline 的节点 ---
|
|
|
+ std::cout << "\n--- Starting All Configured Pipelines ---" << std::endl;
|
|
|
+ if (configured_pipelines_.empty()) {
|
|
|
+ std::cout << "No pipelines were configured to start." << std::endl;
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ for (auto& pipeline : configured_pipelines_) {
|
|
|
+ std::cout << "Starting nodes for pipeline: " << pipeline.pipeline_id << std::endl;
|
|
|
+ // 从后往前启动当前 pipeline 的节点
|
|
|
+ for (int i = static_cast<int>(pipeline.nodes.size()) - 1; i >= 0; --i) {
|
|
|
+ if (pipeline.nodes[i]) {
|
|
|
+ // 假设 GNode::BaseNode 有 start() 方法
|
|
|
+ pipeline.nodes[i]->start();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ std::cout << "\nGraph configuration loading and pipeline starting finished." << std::endl;
|
|
|
+ std::cout << "Total shared models loaded (or attempted): " << shared_models_.size() << std::endl;
|
|
|
+ std::cout << "Total pipelines configured and started: " << configured_pipelines_.size() << std::endl;
|
|
|
+ }
|
|
|
+
|
|
|
+private:
|
|
|
+ // 存储共享模型指针 <model_id, model_instance_ptr>
|
|
|
+ std::map<std::string, std::shared_ptr<GNode::Infer>> shared_models_;
|
|
|
+
|
|
|
+ // 存储所有配置好的 Pipeline 实例
|
|
|
+ std::vector<PipelineInstance> configured_pipelines_;
|
|
|
+
|
|
|
+ // 这个全局 nodes_ 列表不再符合多 pipeline 的设计,应移除或注释掉
|
|
|
+ // std::vector<std::shared_ptr<GNode::BaseNode>> nodes_;
|
|
|
+};
|
|
|
+
|
|
|
+} // namespace Graph
|
|
|
+
|
|
|
+#endif // GRAPH_HPP__
|