depth.hpp 2.2 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283
  1. #ifndef DEPTH_HPP__
  2. #define DEPTH_HPP__
  3. #include <vector>
  4. #include <iomanip>
  5. #include "common/memory.hpp"
  6. #include "infer/infer.hpp"
  7. #include "common/image.hpp"
  8. #include "common/data.hpp"
  9. #include "infer/trt/affine.hpp"
  10. #ifdef TRT10
  11. #include "common/tensorrt.hpp"
  12. namespace TensorRT = TensorRT10;
  13. #else
  14. #include "common/tensorrt8.hpp"
  15. namespace TensorRT = TensorRT8;
  16. #endif
  17. namespace depth
  18. {
  19. class DepthModelImpl : public Infer
  20. {
  21. public:
  22. ModelType model_type_;
  23. std::shared_ptr<TensorRT::Engine> trt_;
  24. std::string engine_file_;
  25. tensor::Memory<unsigned char> preprocess_buffer_;
  26. tensor::Memory<float> affine_matrix_;
  27. tensor::Memory<float> input_buffer_, output_buffer_;
  28. tensor::Memory<float> depth_map_buffer_;
  29. int network_input_width_, network_input_height_;
  30. affine::Norm normalize_;
  31. bool isdynamic_model_ = false;
  32. DepthModelImpl() = default;
  33. virtual ~DepthModelImpl() = default;
  34. void adjust_memory(int width, int height)
  35. {
  36. depth_map_buffer_.gpu(width * height);
  37. depth_map_buffer_.cpu(width * height);
  38. }
  39. void adjust_memory()
  40. {
  41. // the inference batch_size
  42. size_t input_numel = network_input_width_ * network_input_height_ * 3;
  43. input_buffer_.gpu(input_numel);
  44. output_buffer_.gpu(input_numel / 3);
  45. output_buffer_.cpu(input_numel / 3);
  46. affine_matrix_.gpu(6);
  47. affine_matrix_.cpu(6);
  48. }
  49. void preprocess(const tensor::Image &image, affine::LetterBoxMatrix &affine, void *stream = nullptr);
  50. void postprocess(int width, int height, void *stream = nullptr);
  51. bool load(const std::string &engine_file);
  52. virtual Result forward(const tensor::Image &image, void *stream = nullptr);
  53. virtual Result forward(const tensor::Image &image, int slice_width, int slice_height, float overlap_width_ratio, float overlap_height_ratio, void *stream = nullptr);
  54. };
  55. Infer *loadraw(const std::string &engine_file);
  56. std::shared_ptr<Infer> load_depth(const std::string &engine_file, int gpu_id);
  57. } // namespace depth
  58. #endif