123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172 |
- #ifndef DEPTH_HPP__
- #define DEPTH_HPP__
- #include <vector>
- #include <iomanip>
- #include "common/memory.hpp"
- #include "infer/infer.hpp"
- #include "common/image.hpp"
- #include "common/data.hpp"
- #include "infer/trt/affine.hpp"
- namespace depth
- {
- class DepthModelImpl : public Infer
- {
- public:
- ModelType model_type_;
-
- std::shared_ptr<TensorRT::Engine> trt_;
- std::string engine_file_;
-
- tensor::Memory<unsigned char> preprocess_buffer_;
-
- tensor::Memory<float> affine_matrix_;
- tensor::Memory<float> input_buffer_, output_buffer_;
- tensor::Memory<float> depth_map_buffer_;
-
- int network_input_width_, network_input_height_;
- affine::Norm normalize_;
- bool isdynamic_model_ = false;
-
- DepthModelImpl() = default;
-
- virtual ~DepthModelImpl() = default;
-
- void adjust_memory(int width, int height)
- {
- depth_map_buffer_.gpu(width * height);
- depth_map_buffer_.cpu(width * height);
- }
- void adjust_memory()
- {
- // the inference batch_size
- size_t input_numel = network_input_width_ * network_input_height_ * 3;
- input_buffer_.gpu(batch_size * input_numel);
- output_buffer_.gpu(batch_size * input_numel / 3);
- output_buffer_.cpu(batch_size * input_numel / 3);
- }
-
- void preprocess(const tensor::Image &image, affine::LetterBoxMatrix &affine, void *stream = nullptr);
- void postprocess(int width, int height, void *stream = nullptr)
-
-
- bool load(const std::string &engine_file, ModelType model_type, const std::vector<std::string>& names, float confidence_threshold, float nms_threshold);
-
- virtual cv::Mat forward(const tensor::Image &image, void *stream = nullptr);
-
- };
- Infer *loadraw(const std::string &engine_file, ModelType model_type, const std::vector<std::string>& names, float confidence_threshold,
- float nms_threshold);
- std::shared_ptr<Infer> load_depth(const std::string &engine_file, ModelType model_type, const std::vector<std::string>& names, int gpu_id, float confidence_threshold, float nms_threshold);
- } // namespace depth
- #endif
|