depth.hpp 2.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172
  1. #ifndef DEPTH_HPP__
  2. #define DEPTH_HPP__
  3. #include <vector>
  4. #include <iomanip>
  5. #include "common/memory.hpp"
  6. #include "infer/infer.hpp"
  7. #include "common/image.hpp"
  8. #include "common/data.hpp"
  9. #include "infer/trt/affine.hpp"
  10. namespace depth
  11. {
  12. class DepthModelImpl : public Infer
  13. {
  14. public:
  15. ModelType model_type_;
  16. std::shared_ptr<TensorRT::Engine> trt_;
  17. std::string engine_file_;
  18. tensor::Memory<unsigned char> preprocess_buffer_;
  19. tensor::Memory<float> affine_matrix_;
  20. tensor::Memory<float> input_buffer_, output_buffer_;
  21. tensor::Memory<float> depth_map_buffer_;
  22. int network_input_width_, network_input_height_;
  23. affine::Norm normalize_;
  24. bool isdynamic_model_ = false;
  25. DepthModelImpl() = default;
  26. virtual ~DepthModelImpl() = default;
  27. void adjust_memory(int width, int height)
  28. {
  29. depth_map_buffer_.gpu(width * height);
  30. depth_map_buffer_.cpu(width * height);
  31. }
  32. void adjust_memory()
  33. {
  34. // the inference batch_size
  35. size_t input_numel = network_input_width_ * network_input_height_ * 3;
  36. input_buffer_.gpu(batch_size * input_numel);
  37. output_buffer_.gpu(batch_size * input_numel / 3);
  38. output_buffer_.cpu(batch_size * input_numel / 3);
  39. }
  40. void preprocess(const tensor::Image &image, affine::LetterBoxMatrix &affine, void *stream = nullptr);
  41. void postprocess(int width, int height, void *stream = nullptr)
  42. bool load(const std::string &engine_file, ModelType model_type, const std::vector<std::string>& names, float confidence_threshold, float nms_threshold);
  43. virtual cv::Mat forward(const tensor::Image &image, void *stream = nullptr);
  44. };
  45. Infer *loadraw(const std::string &engine_file, ModelType model_type, const std::vector<std::string>& names, float confidence_threshold,
  46. float nms_threshold);
  47. std::shared_ptr<Infer> load_depth(const std::string &engine_file, ModelType model_type, const std::vector<std::string>& names, int gpu_id, float confidence_threshold, float nms_threshold);
  48. } // namespace depth
  49. #endif