depth.hpp 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687
  1. #ifndef DEPTH_HPP__
  2. #define DEPTH_HPP__
  3. #include <vector>
  4. #include <iomanip>
  5. #include "common/memory.hpp"
  6. #include "infer/infer.hpp"
  7. #include "common/image.hpp"
  8. #include "common/data.hpp"
  9. #include "infer/trt/affine.hpp"
  10. #ifdef TRT10
  11. #include "common/tensorrt.hpp"
  12. namespace TensorRT = TensorRT10;
  13. #else
  14. #include "common/tensorrt8.hpp"
  15. namespace TensorRT = TensorRT8;
  16. #endif
  17. namespace depth
  18. {
  19. class DepthModelImpl : public Infer
  20. {
  21. public:
  22. ModelType model_type_;
  23. std::shared_ptr<TensorRT::Engine> trt_;
  24. std::string engine_file_;
  25. tensor::Memory<unsigned char> preprocess_buffer_;
  26. tensor::Memory<float> affine_matrix_;
  27. tensor::Memory<float> invert_affine_matrix_;
  28. tensor::Memory<float> input_buffer_, output_buffer_;
  29. tensor::Memory<float> depth_map_buffer_;
  30. int network_input_width_, network_input_height_;
  31. affine::Norm normalize_;
  32. bool isdynamic_model_ = false;
  33. DepthModelImpl() = default;
  34. virtual ~DepthModelImpl() = default;
  35. void adjust_memory(int width, int height)
  36. {
  37. depth_map_buffer_.gpu(width * height);
  38. depth_map_buffer_.cpu(width * height);
  39. }
  40. void adjust_memory()
  41. {
  42. // the inference batch_size
  43. size_t input_numel = network_input_width_ * network_input_height_ * 3;
  44. input_buffer_.gpu(input_numel);
  45. output_buffer_.gpu(input_numel / 3);
  46. output_buffer_.cpu(input_numel / 3);
  47. affine_matrix_.gpu(6);
  48. affine_matrix_.cpu(6);
  49. invert_affine_matrix_.gpu(6);
  50. invert_affine_matrix_.cpu(6);
  51. }
  52. void preprocess(const tensor::Image &image, affine::LetterBoxMatrix &affine, void *stream = nullptr);
  53. void postprocess(int width, int height, void *stream = nullptr);
  54. bool load(const std::string &engine_file);
  55. virtual Result forward(const tensor::Image &image, void *stream = nullptr);
  56. virtual Result forward(const tensor::Image &image, int slice_width, int slice_height, float overlap_width_ratio, float overlap_height_ratio, void *stream = nullptr);
  57. };
  58. Infer *loadraw(const std::string &engine_file);
  59. std::shared_ptr<Infer> load_depth(const std::string &engine_file, int gpu_id);
  60. } // namespace depth
  61. #endif