|
@@ -118,14 +118,14 @@ Result DepthModelImpl::forward(const tensor::Image &image, void *stream)
|
|
#ifdef TRT10
|
|
#ifdef TRT10
|
|
if (!trt_->forward(std::unordered_map<std::string, const void *>{
|
|
if (!trt_->forward(std::unordered_map<std::string, const void *>{
|
|
{ "input", input_buffer_.gpu() },
|
|
{ "input", input_buffer_.gpu() },
|
|
- { "output", depth_map_buffer_.gpu() }
|
|
|
|
|
|
+ { "output", output_buffer_.gpu() }
|
|
}, stream_))
|
|
}, stream_))
|
|
{
|
|
{
|
|
- printf("Failed to tensorRT forward.");
|
|
|
|
- return {};
|
|
|
|
|
|
+ printf("Failed to tensorRT forward.\n");
|
|
|
|
+ return cv::Mat();
|
|
}
|
|
}
|
|
#else
|
|
#else
|
|
- std::vector<void *> bindings{input_buffer_.gpu(), depth_map_buffer_.gpu()};
|
|
|
|
|
|
+ std::vector<void *> bindings{input_buffer_.gpu(), output_buffer_.gpu()};
|
|
if (!trt_->forward(bindings, stream))
|
|
if (!trt_->forward(bindings, stream))
|
|
{
|
|
{
|
|
printf("Failed to tensorRT forward.");
|
|
printf("Failed to tensorRT forward.");
|
|
@@ -133,6 +133,8 @@ Result DepthModelImpl::forward(const tensor::Image &image, void *stream)
|
|
}
|
|
}
|
|
#endif
|
|
#endif
|
|
|
|
|
|
|
|
+ adjust_memory(image.width, image.height);
|
|
|
|
+
|
|
postprocess(image.width, image.height, stream);
|
|
postprocess(image.width, image.height, stream);
|
|
|
|
|
|
checkRuntime(cudaMemcpyAsync(depth_map_buffer_.cpu(), depth_map_buffer_.gpu(),
|
|
checkRuntime(cudaMemcpyAsync(depth_map_buffer_.cpu(), depth_map_buffer_.gpu(),
|