|
@@ -9,6 +9,14 @@
|
|
|
#include "common/data.hpp"
|
|
|
#include "infer/trt/affine.hpp"
|
|
|
|
|
|
+#ifdef TRT10
|
|
|
+#include "common/tensorrt.hpp"
|
|
|
+namespace TensorRT = TensorRT10;
|
|
|
+#else
|
|
|
+#include "common/tensorrt8.hpp"
|
|
|
+namespace TensorRT = TensorRT8;
|
|
|
+#endif
|
|
|
+
|
|
|
namespace depth
|
|
|
{
|
|
|
|
|
@@ -46,17 +54,17 @@ namespace depth
|
|
|
{
|
|
|
// the inference batch_size
|
|
|
size_t input_numel = network_input_width_ * network_input_height_ * 3;
|
|
|
- input_buffer_.gpu(batch_size * input_numel);
|
|
|
+ input_buffer_.gpu(input_numel);
|
|
|
|
|
|
- output_buffer_.gpu(batch_size * input_numel / 3);
|
|
|
- output_buffer_.cpu(batch_size * input_numel / 3);
|
|
|
+ output_buffer_.gpu(input_numel / 3);
|
|
|
+ output_buffer_.cpu(input_numel / 3);
|
|
|
}
|
|
|
|
|
|
void preprocess(const tensor::Image &image, affine::LetterBoxMatrix &affine, void *stream = nullptr);
|
|
|
void postprocess(int width, int height, void *stream = nullptr)
|
|
|
|
|
|
|
|
|
- bool load(const std::string &engine_file, ModelType model_type, const std::vector<std::string>& names, float confidence_threshold, float nms_threshold);
|
|
|
+ bool load(const std::string &engine_file);
|
|
|
|
|
|
virtual cv::Mat forward(const tensor::Image &image, void *stream = nullptr);
|
|
|
|
|
@@ -65,7 +73,7 @@ namespace depth
|
|
|
Infer *loadraw(const std::string &engine_file, ModelType model_type, const std::vector<std::string>& names, float confidence_threshold,
|
|
|
float nms_threshold);
|
|
|
|
|
|
-std::shared_ptr<Infer> load_depth(const std::string &engine_file, ModelType model_type, const std::vector<std::string>& names, int gpu_id, float confidence_threshold, float nms_threshold);
|
|
|
+std::shared_ptr<Infer> load_depth(const std::string &engine_file, int gpu_id);
|
|
|
|
|
|
} // namespace depth
|
|
|
|