diff --git a/app/SandboxSetup/beamerlocationgui.cpp b/app/SandboxSetup/beamerlocationgui.cpp index 51592d686cd30bcad43de3d0919d3ed0291d672d..3b2c2b80da75e4c1aa20a1a67d7299b9d1240b4e 100644 --- a/app/SandboxSetup/beamerlocationgui.cpp +++ b/app/SandboxSetup/beamerlocationgui.cpp @@ -133,7 +133,7 @@ void BeamerLocationGui::userValidePoint(){ if(!circle.empty()){ - capturedPoints.push_back( beamer->deprojectPixel(circle.at(0), &depth, camera) ); + capturedPoints.push_back( beamer->deprojectPixel(cv::Point2i(circle.at(0).x, circle.at(0).y), &depth, camera) ); updateLabelSteps(); // enough points to perform linear regression and move into next step diff --git a/inc/beamer.h b/inc/beamer.h index 08d9f6fec7e3a9ee26a43236ce7d6e8940237193..12c05bdc89a8af77b236f01bc4ec818b4a62a5bf 100644 --- a/inc/beamer.h +++ b/inc/beamer.h @@ -84,7 +84,7 @@ class Beamer{ int findBeamerFrom(Camera *camera); cv::Mat editContrast(cv::Mat image, double contrast, int brightness); - cv::Point3f deprojectPixel(cv::Point3i circle, cv::Mat *depth, Camera *camera); + cv::Point3f deprojectPixel(cv::Point2i circle, cv::Mat *depth, Camera *camera); std::vector<cv::Point2i> getCrossList(); cv::Mat getCrossFrame(cv::Point2i projectedCross, int step, int max, bool circlesFound); cv::Point3d approximatePosition(std::vector<cv::Point3d> *bases, std::vector<cv::Point3d> *directions); diff --git a/inc/camera.h b/inc/camera.h index 067f0c8c2af0bb95eabf80fa5241e29734b99bfc..3e0ad99f2dff9189a6f8548890d5a8387cdde5d1 100644 --- a/inc/camera.h +++ b/inc/camera.h @@ -25,6 +25,7 @@ class Camera{ Camera(); ~Camera(); + // return a float matrix of depth in meters cv::Mat getDepthFrame(){ cv::Mat meters; cv::Mat values = cv::Mat(cv::Size(depth_frame->get_width(), depth_frame->get_height()), CV_16UC1, (void *)depth_frame->get_data(), cv::Mat::AUTO_STEP); diff --git a/inc/sandbox.h b/inc/sandbox.h index 2e69baeff0020af7a2a1c2dea7354e11a35989a7..b6b749701229adb4865bd1005b79f1a447dc680f 100644 --- a/inc/sandbox.h +++ b/inc/sandbox.h @@ -21,6 +21,7 @@ class Sandbox{ public: Sandbox(); + void init(); cv::Mat getColorFrame(); cv::Mat getDepthFrame(); cv::Mat adjustProjection(cv::Mat frame); diff --git a/src/components/beamer.cpp b/src/components/beamer.cpp index 055151d03c841b1d561a6e6ef46ce3d63663ded2..e68745c5486652f237bc273eb62fc0a7ab032fc4 100644 --- a/src/components/beamer.cpp +++ b/src/components/beamer.cpp @@ -64,7 +64,7 @@ int Beamer::findBeamerFrom(Camera *camera){ } else if (keyCode == ' '){ if(!circles.empty()){ - capturedPoints.push_back( deprojectPixel(circles.at(0), &depth, camera) ); + capturedPoints.push_back( deprojectPixel( cv::Point2i(circles.at(0).x, circles.at(0).y ), &depth, camera) ); } } } @@ -81,12 +81,10 @@ int Beamer::findBeamerFrom(Camera *camera){ return 0; } -cv::Point3f Beamer::deprojectPixel(cv::Point3i circle, cv::Mat *depth, Camera *camera){ +cv::Point3f Beamer::deprojectPixel(cv::Point2i circle, cv::Mat *depth, Camera *camera){ float coord[2] = {(float)circle.x, (float)circle.y}; - float z = static_cast<float>(depth->at<uint16_t>(circle.y, circle.x)); - // rs2.get_depth_frame().get_units() - float depth_unit = 1 / 1000.0; - return camera->deprojectPixelToPoint(coord, z * depth_unit); + float z = depth->at<float>(circle.y, circle.x); + return camera->deprojectPixelToPoint(coord, z); } void Beamer::findLinearLineFrom(std::vector<cv::Point3f> *capturedPoints, std::vector<cv::Point3d> *bases, std::vector<cv::Point3d> *directions){ diff --git a/src/components/beamerProjection.cpp b/src/components/beamerProjection.cpp index 8513b54f8fd49aaae793bc1e6ccad4084bf667f2..180c2caecf1e6c2abf6049b981fc1251b375e89a 100644 --- a/src/components/beamerProjection.cpp +++ b/src/components/beamerProjection.cpp @@ -25,12 +25,14 @@ void BeamerProjection::adjustFrame(cv::Mat depth, cv::Mat src, cv::Mat &dst, Cam // src.size = n * camera.depth.size , where n is uint > 0 cv::resize(src, src, getMatchingSize(src, mask)); cv::resize(dst, dst, src.size()); - //src.copyTo(dst); + + cv::Mat pixelsDeprojectMap = cv::Mat_<cv::Point2i>(mask.height, mask.width, cv::Point2i(-1,-1)); + cv::Mat pixelsDeprojectHighestMap = cv::Mat_<cv::Point2i>(mask.height, mask.width, cv::Point2i(-1,-1)); // Browse the depth frame matching the cropping mask // while adapting pixels's position to the beamer's position - for (int j = 0; j < mask.height; j++){ - for (int i = 0; i < mask.width; i++){ + for (int j = 0; j < depth.rows; j++){ + for (int i = 0; i < depth.cols; i++){ // coordinates of the pixel relative to the orginial image taken from the camera int base_x = mask.x+i; @@ -40,17 +42,46 @@ void BeamerProjection::adjustFrame(cv::Mat depth, cv::Mat src, cv::Mat &dst, Cam // pixels based on the original depth frame taken from the camera cv::Point2i pixel = findMatchingPixel( base_x, base_y, z, camera, beamer_pos ); - // pixel relative to the cropping mask + // pixel relative to the cropping mask (the area where the frame is projected) pixel.x -= mask.x; pixel.y -= mask.y; - if( (0<=pixel.x && pixel.x<mask.width) && (0<=pixel.y && pixel.y<mask.height) ){ + pixelsDeprojectMap.at<cv::Point2i>(j,i) = pixel; + } + } + + for (int j = 0; j < pixelsDeprojectMap.rows; j++){ + for (int i = 0; i < pixelsDeprojectMap.cols; i++){ + + cv::Point2i pixel = pixelsDeprojectMap.at<cv::Point2i>(j,i); + + if(pixel.x != -1 && pixel.y != -1){ + // check and keep the highest point at the location pointed by pixel + cv::Point2i defaultPoint = pixelsDeprojectHighestMap.at<cv::Point2i>(j,i); + if(defaultPoint.x != -1 && defaultPoint.y != -1){ + if(depth.at<float>(defaultPoint) <= depth.at<float>(pixel)) + pixel = defaultPoint; + } + pixelsDeprojectHighestMap.at<cv::Point2i>(j,i) = pixel; + } + } + } + + + for (int j = 0; j < pixelsDeprojectHighestMap.rows; j++){ + for (int i = 0; i < pixelsDeprojectHighestMap.cols; i++){ + + cv::Point2i pixel = pixelsDeprojectHighestMap.at<cv::Point2i>(j,i); + + if( (0<=pixel.x && pixel.x<depth.cols) && (0<=pixel.y && pixel.y<depth.rows) ){ // src and dst must be of same size copyPixelsInto(pixel, dst, cv::Point2i(i,j), src, mask); } } } + + cv::resize(src, src, dst_size); cv::warpAffine(dst, dst, adjustingMatrix, dst.size()); } diff --git a/src/components/camera.cpp b/src/components/camera.cpp index 13970cd71be043b79572b7d5b73b7ca0832cfca0..5bbc05581fa45cb89dde392aca4a231d79724fec 100644 --- a/src/components/camera.cpp +++ b/src/components/camera.cpp @@ -54,6 +54,8 @@ int Camera::start(){ // Doc presets : https://dev.intelrealsense.com/docs/d400-series-visual-presets sensor.set_option(RS2_OPTION_VISUAL_PRESET, RS2_RS400_VISUAL_PRESET_HIGH_DENSITY); + //sensor.set_option(RS2_OPTION_VISUAL_PRESET, RS2_RS400_VISUAL_PRESET_DEFAULT); + // 5 = range mapped to unlimited spatFilter->set_option(RS2_OPTION_HOLES_FILL, 5); diff --git a/src/lib/sandbox.cpp b/src/lib/sandbox.cpp index e19e74418b58d67aaa377ad0946aff946500aff2..2bda963e1f3fdc6bf644aff6f1a7c2b9f928f97f 100644 --- a/src/lib/sandbox.cpp +++ b/src/lib/sandbox.cpp @@ -6,7 +6,7 @@ */ Sandbox::Sandbox(){ - camera.start(); + } @@ -14,6 +14,10 @@ Sandbox::Sandbox(){ * PUBLIC */ +void Sandbox::init(){ + camera.start(); +} + cv::Mat Sandbox::getColorFrame(){ camera.capture(); return camera.getColorFrame()(camera.getCroppingMask());