diff --git a/README.md b/README.md index 7171b156a8d5ca87ac0bc4ab457e113a22594e03..f08af1dd7c80b421e80b36925985c46a882c9839 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,17 @@ # Library Sandbox +## Depedencies + - OS : Ubuntu 18.04 + - C++ : 11.0 + - g++ : 7.5.0 + - OpenCV : 3.2.0 + - realsense 2 : 2.35.2 + - yaml-cpp : https://github.com/jbeder/yaml-cpp @commit : ... + - Qt : 5.9.5 + +## Makefiles +Flags for the linker availible throught the variable DEP_SANDBOX in file dep.mk + ## Before launching your application - LD_LIBRARY_PATH must contain the path to the directory containing libsandbox.so (which is generated in /build) - Enter this command in the terminal executing your applications @@ -9,4 +21,4 @@ export LD_LIBRARY_PATH=$(pwd)/$RELATIVE_PATH_TO_SO ``` - Where $REALTIVE_PATH_TO_SO is the relative path to the generated file libsandbox.so from the current path of your terminal - - Don't forget that LD_LIBRARY_PATH is local to each terminal \ No newline at end of file + - Don't forget that LD_LIBRARY_PATH is local to each terminal diff --git a/app/SandboxSetup/beamerlocationgui.cpp b/app/SandboxSetup/beamerlocationgui.cpp index 51de4f524028709c1d874336b626bd3141c9e86a..89baca4926de71632fcae85c5887f04cca4f3056 100644 --- a/app/SandboxSetup/beamerlocationgui.cpp +++ b/app/SandboxSetup/beamerlocationgui.cpp @@ -1,3 +1,15 @@ +/* +* BeamerLocationGui +* +* Step of the calibration which allows us to approximate the position of the beamer. +* The position is approximated by checking the position of the circle target each frame (with a timer), +* and it updates the validated positions once the user "locked" a position (on the button clic). +* When the clic is done, we check if there is enough points, if there is, +* we find the right passing by the points, and if we have enough rights, +* we end the routine and the position is approximated when the user valide the step. +* The frame process is executed in another thread to not block the application and uses the profil we established in CameraFocusGui. +*/ + #include "beamerlocationgui.h" #include "ui_beamerlocationgui.h" @@ -113,6 +125,7 @@ void BeamerLocationGui::routineFrame(){ (int)profil->getHoughAccThreshold(), minRadius, maxRadius ); + // thread safe save mut->lock(); circles = tmp_circles; mut->unlock(); @@ -141,6 +154,7 @@ void BeamerLocationGui::routineFrame(){ void BeamerLocationGui::userValidePoint(){ std::vector<cv::Point3i> tmp_circles; + // thread safe check mut->lock(); if(!circles.empty()){ tmp_circles = circles; diff --git a/app/SandboxSetup/camerafocusgui.cpp b/app/SandboxSetup/camerafocusgui.cpp index fb3c5ae146869e13999c588b31586edebe0decb4..a8a954ffe399ae5245741799978a00788d419369 100644 --- a/app/SandboxSetup/camerafocusgui.cpp +++ b/app/SandboxSetup/camerafocusgui.cpp @@ -1,8 +1,18 @@ +/* +* CameraFocusGui +* +* Step of the calibration which allows the user to set the parameters to have a better detecting of the circle target, +* since the next step need to detect easily the circle and is based on HoughCircle from OpenCV. +* The frame process is executed in another thread to not block the main application. +*/ + #include "camerafocusgui.h" #include "ui_camerafocusgui.h" - +/* +* Thread executing the frame process to not block the application +*/ CameraFocusGui::RefreshFrame::RefreshFrame(CameraFocusGui *_camera) : QThread() { camera = _camera; } @@ -51,7 +61,7 @@ void CameraFocusGui::showEvent(QShowEvent *event){ } -// TODO : Fixe bug timer +// TODO : Fixe bug timer (sometimes can't stop) void CameraFocusGui::closeEvent(QCloseEvent *event){ @@ -74,6 +84,14 @@ void CameraFocusGui::cancelRoutine(){ endSuccess = false; } + + + + +/* +* Frame process +*/ + void CameraFocusGui::startCapture(){ myThread->start(); } @@ -88,7 +106,8 @@ void CameraFocusGui::refreshFrame(){ std::vector<cv::Point3i> crc; if(profil->getCannyEdgeThreshold() > 0 && profil->getHoughAccThreshold() > 0){ - + + // Detect the circles in the frame double minDist = rgb.cols*(double)profil->getMinDistance()/100; double minRadius = (profil->getMinRadius()>0) ? rgb.cols*(double)profil->getMinRadius()/100 : 0; double maxRadius = (profil->getMaxRadius()>0) ? rgb.cols*(double)profil->getMaxRadius()/100 : 0; @@ -102,7 +121,8 @@ void CameraFocusGui::refreshFrame(){ maxRadius ); } - // Preview image for the user + // Preview image for the user, one with only the contrast and brightness edited, + // the other with the canny in addition cv::cvtColor(rgb, gray, CV_BGR2GRAY); cv::GaussianBlur(gray, gray, cv::Size(9, 9), 2, 2); gray = setup->getBeamer()->editContrast(gray, (double)profil->getContrast(), (double)profil->getBrightness()); @@ -128,6 +148,13 @@ void CameraFocusGui::refreshFrame(){ } + + + +/* +* Camera Profil +*/ + void CameraFocusGui::initCameraParams(){ FrameProcessProfil *profil = setup->getBeamer()->getProfil(); @@ -174,6 +201,14 @@ void CameraFocusGui::loadProfil(FrameProcessProfil *profilLoaded, FrameProcessPr } + + + + +/* +* Inputs GUI +*/ + void CameraFocusGui::on_btnReset_clicked() { loadProfil(&defaultProfil, setup->getBeamer()-> getProfil()); @@ -279,6 +314,9 @@ void CameraFocusGui::on_ckbxTargetSize_clicked(bool checked) } +/* +* Update the range of the inputs depending on the restriction +*/ void CameraFocusGui::switchMode(bool isRestricted){ if(isRestricted){ diff --git a/app/SandboxSetup/croppingmaskgui.cpp b/app/SandboxSetup/croppingmaskgui.cpp index d9be67f168501a4de7f405decc93ee56f7bf0dab..9da7fe4b868cb44e68adb2c9861773b3b7840887 100644 --- a/app/SandboxSetup/croppingmaskgui.cpp +++ b/app/SandboxSetup/croppingmaskgui.cpp @@ -1,3 +1,12 @@ +/* +* ProjectionGui +* +* Step of the calibration which display point of view of the video camera and allows to delimit the border of the projection. +* Since there is no drawing event, we have to add a widget (MaskEdit) which is generated with the frame and the borders drawn. +* Each time we want to modify the borders, we will edit the variable containing the borders and trigger paint event of the widget +* showing the camera with our projection and delimitations. +*/ + #include "croppingmaskgui.h" #include "ui_croppingmaskgui.h" @@ -66,6 +75,9 @@ void CroppingMaskGui::closeEvent(QCloseEvent *event){ blueScreen->close(); } +/* +* Initialize the frame displayed with the cropping mask by default or the one availible in the configuration +*/ void CroppingMaskGui::init(){ setup->getCamera()->capture(); @@ -104,6 +116,9 @@ void CroppingMaskGui::refreshFrame(){ maskEdit->updateFrame(&cameraColoredFrame); } +/* +* check if the mask from the configuration is valide in the frame from the camera +*/ bool CroppingMaskGui::maskValideInFrame(cv::Mat_<cv::Vec3b> *rgb){ if(rectPoints.empty()) @@ -120,7 +135,9 @@ bool CroppingMaskGui::maskValideInFrame(cv::Mat_<cv::Vec3b> *rgb){ return true; } - +/* +* Set the mask with the default values +*/ void CroppingMaskGui::on_btnClear_clicked() { if(cameraColoredFrame.size().height != 0 && cameraColoredFrame.size().width != 0){ @@ -131,6 +148,9 @@ void CroppingMaskGui::on_btnClear_clicked() } } +/* +* Set the mask with the one from the configuration (if any) +*/ void CroppingMaskGui::on_btnReset_clicked() { if(!loadedMask.empty()){ diff --git a/app/SandboxSetup/initcameragui.cpp b/app/SandboxSetup/initcameragui.cpp index 14a434dd037f4ef073ec1deadf53c9bb269c9c80..5b56aadf10565abbc04bbae56109742eb62f3706 100644 --- a/app/SandboxSetup/initcameragui.cpp +++ b/app/SandboxSetup/initcameragui.cpp @@ -1,3 +1,10 @@ +/* +* InitCameraGui +* +* Step of the calibration which check if the camera is availibe and initialize it. +* The initialization is done in another thread to not block the application. +*/ + #include "initcameragui.h" #include "ui_initcameragui.h" diff --git a/app/SandboxSetup/main.cpp b/app/SandboxSetup/main.cpp index 7f6520d97a7f75701d1d060a0170bcfa685259b8..38abefd3fcad5b2de122c29af4dc5840f0ba4955 100644 --- a/app/SandboxSetup/main.cpp +++ b/app/SandboxSetup/main.cpp @@ -1,3 +1,6 @@ +/* +* Main executing the setup application +*/ #include <QApplication> #include "mainwindow.h" diff --git a/app/SandboxSetup/mainwindow.cpp b/app/SandboxSetup/mainwindow.cpp index 2fa7223066ece3d6e30090ee60743c416dfbfedb..1c7b0255358bdd097798d933bb2e98cd29f394ea 100644 --- a/app/SandboxSetup/mainwindow.cpp +++ b/app/SandboxSetup/mainwindow.cpp @@ -1,3 +1,9 @@ +/* +* MainWindow +* +* Controller which opens the corresponding step of the calibration by following the setup process. +*/ + #include "mainwindow.h" #include "ui_mainwindow.h" @@ -60,6 +66,9 @@ void MainWindow::closeEvent(QCloseEvent *event){ QWidget::closeEvent(event); } +/* +* Allows the step apps to send a notification to the controller with a status(int) +*/ void MainWindow::receiveNotif(int state){ // active app : InitCamera diff --git a/app/SandboxSetup/maskedit.cpp b/app/SandboxSetup/maskedit.cpp index 26568e6cf3535ded83a8107e73851f1fe17fcd8b..45c7523291340315290d6378846e64dccf241c7d 100644 --- a/app/SandboxSetup/maskedit.cpp +++ b/app/SandboxSetup/maskedit.cpp @@ -1,3 +1,11 @@ +/* +* MaskEdit +* +* This widget allows us to draw the widget each time the paintevent is triggered. +* The widget display an image with a rectangle based on the points contained in +* its variables defining the corners. +*/ + #include "maskedit.h" #include "ui_maskedit.h" diff --git a/app/SandboxSetup/monitorgui.cpp b/app/SandboxSetup/monitorgui.cpp index d2dbd2a6aae63cfec673a886e1a93a49aecab71c..24f8a5be8efa826a468110e8f3d732bb43c06535 100644 --- a/app/SandboxSetup/monitorgui.cpp +++ b/app/SandboxSetup/monitorgui.cpp @@ -1,3 +1,12 @@ +/* +* MonitorGui +* +* Step of the calibration which allows to choose the output matching the beamer +* and to select the current active resolution of the beamer +* (doesn't change the active resolution of the beamer). +* This step parses the output of the xrandr command and show the result in the GUI. +*/ + #include "monitorgui.h" #include "ui_monitorgui.h" diff --git a/app/SandboxSetup/projectiongui.cpp b/app/SandboxSetup/projectiongui.cpp index 92422001da0db6a0f36a83f76c96704dde26b954..6aed1ae848f31908d5d4629ed0e556177d51f72a 100644 --- a/app/SandboxSetup/projectiongui.cpp +++ b/app/SandboxSetup/projectiongui.cpp @@ -1,3 +1,9 @@ +/* +* ProjectionGui +* +* Step of the calibration which display an image in fullscreen and show the camera point of view +*/ + #include "projectiongui.h" #include "ui_projectiongui.h" diff --git a/app/SandboxSetup/qtfullscreen.cpp b/app/SandboxSetup/qtfullscreen.cpp index e51a3c4f156d8855a887d570892615aab8720b83..036e6cd063e70d4060f7c4891b5bf83b1c0f2ee2 100644 --- a/app/SandboxSetup/qtfullscreen.cpp +++ b/app/SandboxSetup/qtfullscreen.cpp @@ -1,3 +1,9 @@ +/* +* QtFullScreen +* +* Window which allows to display an image in fullscreen. +*/ + #include "qtfullscreen.h" QtFullScreen::QtFullScreen(QRect reso, bool _isBorderless, QWidget *parent) : QDialog(parent) diff --git a/app/SandboxSetup/saveconfiggui.cpp b/app/SandboxSetup/saveconfiggui.cpp index 16f341cd087d0cc08ba2b04d03364eee0a90f860..c1fb6ea3028d8ade7e0e6f2a21bfd4f0c76f9e97 100644 --- a/app/SandboxSetup/saveconfiggui.cpp +++ b/app/SandboxSetup/saveconfiggui.cpp @@ -1,3 +1,9 @@ +/* +* SaveConfigGui +* +* Step of the calibration which try to save the configuration in a file. +*/ + #include "saveconfiggui.h" #include "ui_saveconfiggui.h" diff --git a/app/SandboxSetup/subapp.cpp b/app/SandboxSetup/subapp.cpp index 87cfe32a89f3bc0f3cb49d8571a4fc7528dce18e..811397f30d4355b0cff7ea646820d923d1db5df1 100644 --- a/app/SandboxSetup/subapp.cpp +++ b/app/SandboxSetup/subapp.cpp @@ -1,3 +1,9 @@ +/* +* SubApp +* +* Widget which is a step in the calibration process. +*/ + #include "subapp.h" SubApp::SubApp(std::string _title, std::string msg, QWidget *parent) : QWidget(parent) @@ -15,7 +21,13 @@ bool SubApp::checkRoutine(){ return true; } +/* +* Must be trigger when a step is completed and it needs to move to the next step +*/ void SubApp::valideRoutine(){ } +/* +* Must be trigger when a step is canceled and it needs to move to the previous step +*/ void SubApp::cancelRoutine(){ } diff --git a/inc/camera.h b/inc/camera.h index ba76e9d07261b66e628374abb89da24aa5dcc831..6bd66a3868983ab140bafd42dbe74ea7621c2165 100644 --- a/inc/camera.h +++ b/inc/camera.h @@ -1,5 +1,5 @@ -#ifndef CAMERA_H -#define CAMERA_H +#ifndef SANDBOX_CAMERA_H +#define SANDBOX_CAMERA_H #include <librealsense2/rs.hpp> #include <librealsense2/rsutil.h> @@ -7,17 +7,28 @@ class Camera{ private: + // spatial filter, improve depth data accuracy rs2::spatial_filter *spatFilter; + // temporal filter, improve depth data stability rs2::temporal_filter *tempFilter; + // config can manage RealSense cameras and streams rs2::config *cfg; + // manage the streams of cameras rs2::pipeline *pipe; + // manage the stream's of color and depth frames based on the depth point of view rs2::align *align_to_depth; - + + // frame objects from the camera rs2::video_frame *color_frame; rs2::depth_frame *depth_frame; + // intrisic profil of the camera based on the objective and its configuration + // (this won't be the same profil for the video camera or the depth camera + // and depends on the configuration of the stream(resolution,...) too) rs2_intrinsics intr_profile; + // factor to normalize the datas from depth frame to distances in meter float depth_scale; + // Cropping mask (config parameter determined by the calibration), allows to set the ROI(range of interest) in an OpenCV Matrix cv::Rect croppingMask; void warmUpDepthLens(); @@ -27,20 +38,23 @@ class Camera{ Camera(); ~Camera(); - // return values from depth matrix to real world (matrix of floats in meter) + // Return values from depth matrix to real world (matrix of floats in meter) + // the depths are the Z values from a 3D orthonormed system, where (0,0,0) is defined by the position of the camera + // so it's not the distance from the camera to the point, it's its Z value cv::Mat getDepthFrame(); cv::Mat getColorFrame(); void setCroppingMask(cv::Rect mask){ croppingMask = mask; }; cv::Rect getCroppingMask(){ return croppingMask; }; int start(); + void capture(); void stop(); cv::Point3f deprojectPixelToPoint(float coord[], float z); cv::Point3f deprojectPixelToPoint(float coord[], float z, cv::Point2f fxy, cv::Point2f ppxy); cv::Point2i projectPointToPixel(cv::Point3f point3D); cv::Point2i projectPointToPixel(cv::Point3f point3D, cv::Point2f fxy, cv::Point2f ppxy); std::vector<cv::Point2f> getAdaptedIntrinsics(cv::Mat &projection); - void capture(); + void printCroppingMask(); }; diff --git a/inc/frameProcessProfil.h b/inc/frameProcessProfil.h index 5a0bfe6b77b95cdfd92d68b98226e8e74b23653f..6282b5c892e8473ed2c036ff370b31acc52d7ead 100644 --- a/inc/frameProcessProfil.h +++ b/inc/frameProcessProfil.h @@ -1,6 +1,13 @@ +/* +* FrameProcessProfil +* +* Profil used for image processing in the setup routines. +*/ + #ifndef SANDBOX_FRAME_PROCESS_PROFIL_H #define SANDBOX_FRAME_PROCESS_PROFIL_H + class FrameProcessProfil{ private: // Profil to process on gray scaled images diff --git a/inc/projection.h b/inc/projection.h index bfa8670f90f2904a3dda0d2212b7a48b7c19cb5d..1af63617c1a34c595d4dd9f94bb3ef74ddd3044d 100644 --- a/inc/projection.h +++ b/inc/projection.h @@ -21,7 +21,7 @@ class Projection{ // Buffer indicating from where to get the pixels in the source frame to build the output cv::Mat_<cv::Point2i> frameMap; - // intrinsics parameters for deprojection, which are adapted to projection's resolution + // intrinsics parameters for deprojection, which are adapted to the projection's resolution cv::Point2f fxy; cv::Point2f ppxy; @@ -42,7 +42,7 @@ class Projection{ float getDistanceTopSandbox(){ return distanceTopSandbox; }; cv::Point2i rotatePixel(cv::Point2i center, double angle, cv::Point2i pixel); - cv::Point2i revertRotatePixel(cv::Point2i center, double angle, cv::Point2i pixel); + cv::Point2i revertRotatePixel(cv::Point2i center, double angle, cv::Point2i rotatedPixel); void adjustFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst, Camera *camera, cv::Point3f beamer_pos); void printAdjustingMatrix(); diff --git a/src/components/beamer.cpp b/src/components/beamer.cpp index d5a4f5cc9e882a159a5f096c94d7b8c67c6a656e..f8a9c5a2042a907b2b21a688bfbfe571fdfadac3 100644 --- a/src/components/beamer.cpp +++ b/src/components/beamer.cpp @@ -1,5 +1,15 @@ +/* +* Beamer +* +* Contains all the functions needed to approximate the position of the beamer. +* Contains also the position of the beamer, his resolution and the profil for the image process +* which allows us to easily detect the target in the beamer's location approximation routine +*/ + #include "../../inc/beamer.h" + + /* * Main */ @@ -23,6 +33,11 @@ Beamer::~Beamer(){ */ + +/* +* Return the list of the crosses which will be projected in the sandbox +* They are needed because the user need to match the position of those crosses to approximate the position of the beamer +*/ std::vector<cv::Point2i> Beamer::getCrossList(){ std::vector<cv::Point2i> points; @@ -35,8 +50,16 @@ std::vector<cv::Point2i> Beamer::getCrossList(){ } +/* +* Edit the contrast and brightness in an image +* +* image : gray scaled image to update +* Return a new image based on the parameter +*/ +// TODO : changer cv::Mat en cv::Mat_<float> cv::Mat Beamer::editContrast(cv::Mat image, double contrast, int brightness){ + // TODO : modifier l'image directement, ne pas créer une copie cv::Mat new_image = cv::Mat::zeros( image.size(), image.type() ); double alpha = contrast; int beta = brightness; @@ -51,7 +74,12 @@ cv::Mat Beamer::editContrast(cv::Mat image, double contrast, int brightness){ return new_image; } + +/* +* Find the circles detected in an image with HoughCircles (parameters are describe in the function) +*/ // TODO : modifier pour avoir une matrice gray_scale en param à la place de rgb +// + effectuer le filtre gaussien et editContrast en dehors de la fonction std::vector<cv::Point3i> Beamer::findCircles(cv::Mat &rgb, double contrast, int brightness, double centersMinDist, int cannyEdgeThreshold, int houghAccThreshold, double minRadius, double maxRadius){ cv::Mat src_gray; @@ -59,12 +87,15 @@ std::vector<cv::Point3i> Beamer::findCircles(cv::Mat &rgb, double contrast, int circles.clear(); cv::cvtColor(rgb, src_gray, CV_BGR2GRAY); - /// Reduce the noise so we avoid false circle detection + // Reduce the noise so we avoid false circle detection cv::GaussianBlur(src_gray, src_gray, cv::Size(9, 9), 2, 2); src_gray = editContrast(src_gray, (double)contrast, (double)brightness); - /// Apply the Hough Transform to find the circles - // source, output, method, inverse ratio of resolution, Minimum distance between detected centers, threeshold canny, threeshold center, min radius, max radius + + // Apply the Hough Transform to find the circles + // Parameters : source, output, method, dp, min_dist, param_1, param_2, min radius, max radius + + // method : Only CV_HOUGH_GRADIENT is availible // dp : Inverse resolution for the accumulator matrixe => image_resolution * dp = acc_resolution // min_dist : Minimal distance between the detected centers // param_1 : Upper threshold of the canny edge detector, determines if a pixel is an edge @@ -73,6 +104,7 @@ std::vector<cv::Point3i> Beamer::findCircles(cv::Mat &rgb, double contrast, int // max_radius : Max radius of the circles drawn on the accumulator cv::HoughCircles(src_gray, circles, CV_HOUGH_GRADIENT, 1, centersMinDist, (double)cannyEdgeThreshold, (double)houghAccThreshold, minRadius, maxRadius); + // Point with (x,y) and radius of the circle std::vector<cv::Point3i> result; if (!circles.empty()) { @@ -84,12 +116,21 @@ std::vector<cv::Point3i> Beamer::findCircles(cv::Mat &rgb, double contrast, int } +/* +* Build the frame to project in the sandbox, the frame has the cross drawn on it, +* the current step the user is at, and a square indicating if a target's been detected +* +* projectedCross : point indicating the coordinates of the cross in an image +* Return a BGR image +*/ +// TODO : changer cv::Point par cv::Point2i cv::Mat Beamer::buildCrossFrame(cv::Point projectedCross, int step, int max, bool circlesFound){ cv::Mat frameImage(resolution, CV_8UC3, cv::Scalar(0, 0, 0)); cv::Scalar red = cv::Scalar(0, 0, 255); cv::Scalar color = (circlesFound) ? cv::Scalar(0, 180, 0) : cv::Scalar(0, 0, 255); + // draw the cross cv::line(frameImage, cv::Point(projectedCross.x, 0), cv::Point(projectedCross.x, frameImage.rows - 1), red, 4); cv::line(frameImage, cv::Point(0, projectedCross.y), cv::Point(frameImage.cols - 1, projectedCross.y), red, 4); @@ -111,6 +152,11 @@ cv::Mat Beamer::buildCrossFrame(cv::Point projectedCross, int step, int max, boo } +/* +* Find the best right passing by the captured 3D points, +* the right is describe by a point of this right and a vetor describing its direction, +* the point is saved in bases and the direction in directions +*/ void Beamer::findLinearLine(std::vector<cv::Point3f> *capturedPoints, std::vector<cv::Point3d> *bases, std::vector<cv::Point3d> *directions){ cv::Vec6f line; @@ -189,7 +235,8 @@ int Beamer::LineLineIntersect( /* -* Approxime la position du beamer en 3D en ce basant sur 3 droites +* Approximate the poisition of the beamer with the 3 rights (described by their base and direction) +* Find the 3D points closest to each rights and return the average (which is the approximated position) */ cv::Point3d Beamer::approximatePosition(std::vector<cv::Point3d> *bases, std::vector<cv::Point3d> *directions){ @@ -197,6 +244,7 @@ cv::Point3d Beamer::approximatePosition(std::vector<cv::Point3d> *bases, std::ve double mua; double mub; std::vector<cv::Point3d> beamerPoints; + // allows to match the 3 rights the ones with the anothers by pairs int selected[3][2] = { {0,1}, {0,2}, {1,2} }; for(int i=0; i < 3; i++){ @@ -219,6 +267,12 @@ cv::Point3d Beamer::approximatePosition(std::vector<cv::Point3d> *bases, std::ve } +/* +* Return the 3D point matching the center of the detected circle in the image from the camera +* +* circle : Coordinates (x,y) of the detected circle +* depth : Depth matrix retrives from the depth camera +*/ cv::Point3f Beamer::deprojectPixel(cv::Point2i circle, cv::Mat *depth, Camera *camera){ float coord[2] = {(float)circle.x, (float)circle.y}; float z = depth->at<float>(circle.y, circle.x); diff --git a/src/components/camera.cpp b/src/components/camera.cpp index 4dc25d8abbfa30128a9c74dc42c5e651fef46880..321fe80236b4ed0546021411edda5001cbfd4e6d 100644 --- a/src/components/camera.cpp +++ b/src/components/camera.cpp @@ -1,3 +1,10 @@ +/* +* Camera +* +* Interface for the camera interactions, based on librealsense +* Contains also the croppingMask which determines the zone of the projection in the frames of the camera +*/ + #include "../../inc/camera.h" @@ -25,11 +32,15 @@ Camera::~Camera(){ } + /* * Public */ +/* +* Get a depth frame with values in meter +*/ cv::Mat Camera::getDepthFrame(){ static cv::Mat values = cv::Mat(depth_frame->get_height(), depth_frame->get_width(), CV_16UC1); static cv::Mat meters = cv::Mat(depth_frame->get_height(), depth_frame->get_width(), CV_32FC1); @@ -41,6 +52,10 @@ cv::Mat Camera::getDepthFrame(){ return meters.clone(); }; + +/* +* Get a frame from the color camera +*/ cv::Mat Camera::getColorFrame(){ static cv::Mat colors = cv::Mat(color_frame->get_height(), color_frame->get_width(), CV_8UC3); colors.data = (uchar*)color_frame->get_data(); @@ -48,7 +63,9 @@ cv::Mat Camera::getColorFrame(){ }; - +/* +* Start the camera and indicates if it's availible or not +*/ int Camera::start(){ // check for a device available @@ -90,13 +107,20 @@ int Camera::start(){ } +/* +* Stop the camera +*/ void Camera::stop(){ pipe->stop(); } +/* +* Take a picture and update the buffer of the video and depth frames +*/ void Camera::capture(){ + // Warning : Blocking instruction ! auto frameset = pipe->wait_for_frames(); // Trying to get frames from the depth perspective (aligned on depth camera) @@ -107,14 +131,16 @@ void Camera::capture(){ if(depthFrame && colorFrame){ filterDepthFrame(depthFrame); - - // Values relative to camera (not in meter) - depth_frame->swap(depthFrame); + // TODO : vérifier que le temp_filter fonctionne correctement même avec un swap des buffers et qu'il n'y ait pas de memory leak + depth_frame->swap(depthFrame); // depth values not in meter ! color_frame->swap(colorFrame); } } -// Get the coordinates of the pixel matching the point relative to the real world + +/* +* Get the coordinates of the pixel matching the point relative to the real world and to the profil of the camera +*/ cv::Point2i Camera::projectPointToPixel(cv::Point3f point3D){ float point[3] = {point3D.x, point3D.y, point3D.z}; @@ -124,7 +150,9 @@ cv::Point2i Camera::projectPointToPixel(cv::Point3f point3D){ } -// Get the point relative to the real world matching the coordinates of the pixel +/* +* Get the point relative to the real world matching the coordinates of the pixel based on the profil of the camera +*/ cv::Point3f Camera::deprojectPixelToPoint(float coord[], float z){ float p[3]; @@ -133,14 +161,36 @@ cv::Point3f Camera::deprojectPixelToPoint(float coord[], float z){ } + + + /* - Custom function based on librealsense, take f and pp as args, +* Custom functions based on librealsense +*/ + + +/* + Note on project Point To Pixel (and vice-versa) : + + Take f and pp as args and used to project a point to pixel and vice-versa, they are related to the camera's profil, but adapted to what we want to display, - the limits of the pixels match the limits of the camera's frame and + the limits of the pixels match the limits of the camera's frame in 3D and the 3D projection match the camera's too Works in our case, because our camera's profil is RS2_DISTORTION_BROWN_CONRADY - (profil describing what kind of distoration is applied on the frame to adjust on the lens) + (profil describing what kind of distoration is applied on the frame to adjust on the lens forme) +*/ + + +/* +* Project a 3D point to the corresponding pixel of the image taken by the depth camera, +* but the resolution determined by f and the center of the matrix determined by pp are +* variables depending on the image which point comes from +* +* point : 3D coordinates of a point +* z : z distance of a depth matrix +* f : Functions x and y describing the distance of pixels matching the real world coordinates +* pp : Coordinates x and y of the pixel matching the center of the camera Point Of View (coordinates (0,0,z) in the real world matching this pixel) */ cv::Point2i Camera::projectPointToPixel(cv::Point3f point, cv::Point2f f, cv::Point2f pp){ @@ -149,6 +199,17 @@ cv::Point2i Camera::projectPointToPixel(cv::Point3f point, cv::Point2f f, cv::Po return cv::Point2i( x*f.x+pp.x, y*f.y+pp.y); } + +/* +* Project a pixel to the corresponding 3D point based on the 3D point of view of the image taken by the depth camera, +* but the resolution determined by f and the center of the matrix determined by pp are +* variables depending on the image which point comes from and match the depth matrix in reality +* +* pixel : 2D coordinates of a pixel +* z : z distance of a depth matrix +* f : Functions x and y describing the distance of pixels matching the real world +* pp : Coordinates x and y of the pixel matching the center of the camera Point Of View (coordinates (0,0,z) in the real world matching this pixel) +*/ cv::Point3f Camera::deprojectPixelToPoint(float pixel[], float z, cv::Point2f f, cv::Point2f pp){ float x = (pixel[0] - pp.x) / f.x; @@ -156,6 +217,13 @@ cv::Point3f Camera::deprojectPixelToPoint(float pixel[], float z, cv::Point2f f, return cv::Point3f(z*x, z*y, z); } + +/* +* Get the intrincis parameters (f and pp) adapted from the camera's profil +* to the image matching the depth frame but with a different resolution. +* +* projection : Image which needs to match the depth frame 3D projection +*/ std::vector<cv::Point2f> Camera::getAdaptedIntrinsics(cv::Mat &projection){ float fx = static_cast<float>(intr_profile.fx * projection.size().width) / croppingMask.width; @@ -170,6 +238,17 @@ std::vector<cv::Point2f> Camera::getAdaptedIntrinsics(cv::Mat &projection){ } + + + + + + +/* +* Debug +*/ + + void Camera::printCroppingMask(){ cv::Rect mask = getCroppingMask(); std::cout << "(" << mask.x << "," << mask.y << ") + " << mask.width << "x" << mask.height << std::endl; @@ -178,11 +257,19 @@ void Camera::printCroppingMask(){ + + + /* * Private */ -// Capture 30 frames to give autoexposure, etc. a chance to settle + + + +/* +* Capture 30 frames to give autoexposure, etc. a chance to settle +*/ void Camera::warmUpDepthLens() { for (int i = 0; i < 30; ++i) @@ -193,6 +280,10 @@ void Camera::warmUpDepthLens() } } + +/* +* Apply filters to get a better depth frame +*/ void Camera::filterDepthFrame(rs2::depth_frame &frameDepth) { frameDepth = spatFilter->process(frameDepth); diff --git a/src/components/projection.cpp b/src/components/projection.cpp index 21988dceefd35af79144234043139f1caf1f4a14..7d3f76aedcf2cb0fa125879a60eb51b4ef1fa8e7 100644 --- a/src/components/projection.cpp +++ b/src/components/projection.cpp @@ -1,5 +1,12 @@ +/* +* Projection +* +* Class which adapts a projected image to the topology of the sandbox and the beamer point of view. +* Contains the matrix to compensate for the rotation of the beamer +* and the distance to the plane at the top of the sandbox. +*/ + #include "../../inc/projection.h" -#include <chrono> /* * MAIN @@ -10,27 +17,15 @@ Projection::Projection(){ distanceTopSandbox = 1.0f; } -cv::Point2i Projection::rotatePixel(cv::Point2i center, double angle, cv::Point2i pixel){ - - cv::Mat_<float> matRotation = cv::getRotationMatrix2D(center, angle, 1); - cv::Mat tmp = (cv::Mat_<cv::Vec2f>(1, 1) << cv::Vec2f(pixel.x, pixel.y)); - cv::transform(tmp, tmp, matRotation); - return cv::Point2i(tmp.at<cv::Vec2f>(0, 0)); -} - -cv::Point2i Projection::revertRotatePixel(cv::Point2i center, double angle, cv::Point2i pixel){ - - cv::Mat_<float> matRotation = cv::getRotationMatrix2D(center, angle, 1); - cv::Mat tmp = (cv::Mat_<cv::Vec2f>(1, 1) << cv::Vec2f(pixel.x, pixel.y)); - cv::Mat invMat; - cv::invertAffineTransform(matRotation, invMat); - cv::transform(tmp, tmp, invMat); - return cv::Point2i(tmp.at<cv::Vec2f>(0, 0)); -} - /* - Adjust the projected frame with the topology from the camera to the beamer POV +* Adjust the projected frame with the topology from the camera to the beamer POV (point of view) +* +* depth : Topology of the sandbox under the projection +* src : Image source projected to adjust to the topology +* dst : Output which will contain the adapted image +* camera : Active camera +* beamer_pos : 3D position of the beamer relative to the camera */ void Projection::adjustFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst, Camera *camera, cv::Point3f beamer_pos){ @@ -56,39 +51,31 @@ void Projection::adjustFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Vec3b> &src, c cv::resize(src, resized_src, dst.size()); cv::resize(depth, resized_depth, dst.size()); - // 75 ms - //std::chrono::milliseconds start_ms = std::chrono::duration_cast< std::chrono::milliseconds >( std::chrono::system_clock::now().time_since_epoch() ); deprojectPixelsFromDepth(resized_depth, camera, camera->getCroppingMask() , beamer_pos, deprojectMap, fxy, ppxy); - //std::chrono::milliseconds now_ms = std::chrono::duration_cast< std::chrono::milliseconds >( std::chrono::system_clock::now().time_since_epoch() ); - //std::cout << "Deproject : " << (now_ms - start_ms).count() << std::endl; - - // 18-19 ms - //start_ms = std::chrono::duration_cast< std::chrono::milliseconds >( std::chrono::system_clock::now().time_since_epoch() ); filterLowestDeprojectedPoints(resized_depth, deprojectMap, frameMap); - //now_ms = std::chrono::duration_cast< std::chrono::milliseconds >( std::chrono::system_clock::now().time_since_epoch() ); - //std::cout << "Filter : " << (now_ms - start_ms).count() << std::endl; - - // 14-15 ms - //start_ms = std::chrono::duration_cast< std::chrono::milliseconds >( std::chrono::system_clock::now().time_since_epoch() ); buildFrame(resized_depth, frameMap, resized_src, dst); - //now_ms = std::chrono::duration_cast< std::chrono::milliseconds >( std::chrono::system_clock::now().time_since_epoch() ); - //std::cout << "Build : " << (now_ms - start_ms).count() << std::endl; - holeFilling(dst, frameMap); - - cv::warpAffine(dst, dst, adjustingMatrix, dst.size()); + cv::warpAffine(dst, dst, adjustingMatrix, dst.size()); // apply the rotation on the image } /* - * PRIVATE + * Private */ /* - Deproject pixels in 3D, then adapt to Beamer's POV, and go back to 2D - This gives us the location od pixels adapted to the Beamer projection +* Deproject pixels in 3D, then adapt to Beamer's POV, and go back to 2D +* This gives us the location of pixels adapted to the Beamer projection +* +* depth : Topology of the sandbox under the projection +* camera : Active camera +* mask : ROI (Range Of Interest) delimiting the zone of the projection from the camera POV +* beamer_pos : 3D position of the beamer relative to the camera +* deprojectMap : Indicates for each pixel of src, where it'll be displayed +* fxy : function x and y adapted to the projection matching the original depth matrix(without ROI) of the camera +* ppxy : coordinates x and y of the central pixel adapted to the projection matching the original depth matrix(without ROI) of the camera */ void Projection::deprojectPixelsFromDepth(cv::Mat_<float> &depth, Camera *camera, cv::Rect mask, cv::Point3f beamer_pos, cv::Mat_<cv::Point2i> &deprojectMap, cv::Point2f fxy, cv::Point2f ppxy){ @@ -116,11 +103,12 @@ void Projection::deprojectPixelsFromDepth(cv::Mat_<float> &depth, Camera *camera } /* - Save the highest points in deprojectMap into frameMap, - because some points can be deprojected at the same location - - frameMap indicates for each pixel of dst, where it should get the value from in src - deprojectMap indicates for each pixel, where it'll be displayed +* Save the highest points in deprojectMap into frameMap, +* because some points can be deprojected at the same location +* +* depth : Topology of the sandbox matching the projection +* deprojectMap : Indicates for each pixel of src, where it'll be displayed +* frameMap : Indicates for each pixel of dst, where it should get the value from in src */ void Projection::filterLowestDeprojectedPoints(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &deprojectMap, cv::Mat_<cv::Point2i> &frameMap){ @@ -150,10 +138,15 @@ void Projection::filterLowestDeprojectedPoints(cv::Mat_<float> &depth, cv::Mat_< /* - Build the frame using frameMap, - where each pixel describes in which pixel of the source it should take the value from - dst[i] = src[frameMap[i]] +* Build the frame using frameMap, we assume that all the buffers have the same size +* where each pixel describes in which pixel of the source it should take the value from +* dst[i] = src[frameMap[i]] +* +* frameMap : The map describing where are the source pixels for each pixel in our output image +* src : Image source to adapt +* dst : Output image adapted from src to project */ +// TODO : enlever depth en paramètre et vérifier que le pixel soit dans la range d'un des autres buffer void Projection::buildFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frameMap, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst){ for (int i = 0; i < frameMap.rows; i++){ @@ -170,10 +163,14 @@ void Projection::buildFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frame } } + /* - fill with value of the 1st non null neighbour - Fixe only the holes formed by the deprojection round up coordinates (because deproject goes from 3D floats values to 2D uint), - not the big deprojection (like a hand moving above the sand). +* Fixe holes formed by the deprojection due to round up coordinates +* (because deproject goes from 3D floats values to 2D uint), +* by filling with value of the 1st non null neighbour +* +* dst : The output image to project +* frameMap : The map describing where are the source pixels for each pixel in our output image */ void Projection::holeFilling(cv::Mat_<cv::Vec3b> &dst, cv::Mat_<cv::Point2i> &frameMap){ @@ -211,17 +208,28 @@ void Projection::holeFilling(cv::Mat_<cv::Vec3b> &dst, cv::Mat_<cv::Point2i> &fr } } + /* - C : Camera position - B : Beamer position - P : Point computed by camera depth - V : Point adjusted to plan - A : Point of the right-angle triangle PAB - E : Point of the right-angle triangle VEP - - Where - CP : distance from camera to point (value of depth_frame) - CB : distance from camera to beamer (beamer's position is relative to the camera) +* C : Camera position +* B : Beamer position +* P : Point computed by camera depth +* V : Point at the plane height adjusted to the beamer POV (Point Of View) +* E : Point of the right-angle triangle VEB at the plane height +* A : Point of the right-angle triangle PAB at the point P height +* +* Where +* CP : distance from camera to point (value of depth_frame) +* CB : distance from camera to beamer (beamer's position is relative to the camera) +* +* i : y coordinate of the pixel to adjust +* j : x coordinate of the pixel to adjust +* z : Z coordinate of the pixel from the depth matrix +* camera : Camera active +* CB : Position of the beamer relative to the camera (vector camera-beamer) +* fxy : function x and y adapted to the projection matching the original depth matrix(without ROI) of the camera +* ppxy : coordinates x and y of the central pixel adapted to the projection matching the original depth matrix(without ROI) of the camera +* +* Return the coordinates of the pixel source adapted to the beamer POV */ cv::Point2i Projection::findMatchingPixel(int i, int j, float z, Camera *camera, cv::Point3f CB, cv::Point2f fxy, cv::Point2f ppxy){ @@ -241,6 +249,46 @@ cv::Point2i Projection::findMatchingPixel(int i, int j, float z, Camera *camera, } + + + + +/* +* Sandbox Setup purpose +*/ + +// TODO : move rotatePixel and revertRotatePixel in SandboxSetup (they don't depend on Projection anymore) + + +/* +* Rotate a pixel to compensate for the rotate of the beamer +*/ +cv::Point2i Projection::rotatePixel(cv::Point2i center, double angle, cv::Point2i pixel){ + + cv::Mat_<float> matRotation = cv::getRotationMatrix2D(center, angle, 1); + cv::Mat tmp = (cv::Mat_<cv::Vec2f>(1, 1) << cv::Vec2f(pixel.x, pixel.y)); + cv::transform(tmp, tmp, matRotation); + return cv::Point2i(tmp.at<cv::Vec2f>(0, 0)); +} + +/* +* Rotate back a rotated pixel to match the projection of the beamer +*/ +cv::Point2i Projection::revertRotatePixel(cv::Point2i center, double angle, cv::Point2i rotatedPixel){ + + cv::Mat_<float> matRotation = cv::getRotationMatrix2D(center, angle, 1); + cv::Mat tmp = (cv::Mat_<cv::Vec2f>(1, 1) << cv::Vec2f(pixel.x, pixel.y)); + cv::Mat invMat; + cv::invertAffineTransform(matRotation, invMat); + cv::transform(tmp, tmp, invMat); + return cv::Point2i(tmp.at<cv::Vec2f>(0, 0)); +} + + + + + + /* * Debug */ diff --git a/src/lib/sandbox.cpp b/src/lib/sandbox.cpp index bd551812e9b8881235a20ef84329eefb12154717..0dd37126a98b2b00d70e643c15a0b37efece9454 100644 --- a/src/lib/sandbox.cpp +++ b/src/lib/sandbox.cpp @@ -1,3 +1,9 @@ +/* +* Sandbox +* +* Interface for the devs to create applications using the sandbox +*/ + #include "../../inc/sandbox.h" diff --git a/src/lib/sandboxSetup.cpp b/src/lib/sandboxSetup.cpp index f0596ca6360483fb6ea07c12fc5ea3378d803108..0305b5c6b061eb47d942b9d457ca2b5f3ac64707 100644 --- a/src/lib/sandboxSetup.cpp +++ b/src/lib/sandboxSetup.cpp @@ -1,3 +1,9 @@ +/* +* Sandbox +* +* Interface for the devs to create other setup applications +*/ + #include "../../inc/sandboxSetup.h" @@ -13,6 +19,7 @@ SandboxSetup::~SandboxSetup(){ delete projection; } + // // PUBLIC // @@ -74,6 +81,7 @@ int SandboxSetup::loadFrameProcessProfil(){ * * center : center of the rotation in the projected frame */ +// TODO : retourner la valeur sans set les variables void SandboxSetup::setupAdjustMatrix(std::vector<cv::Point2i> rectPoints, cv::Point2i center){ // Set adjusting matrix for the projection @@ -90,6 +98,7 @@ void SandboxSetup::setupAdjustMatrix(std::vector<cv::Point2i> rectPoints, cv::Po * pts[2] : bottom right * pts[3] : top right */ +// TODO : retourner la valeur sans set les variables void SandboxSetup::setupCroppingMask(std::vector<cv::Point2i> rectPoints, cv::Point2i center){ // Set cropping mask diff --git a/src/tools/sandboxConfig.cpp b/src/tools/sandboxConfig.cpp index 93cbfe0da7f73afdd019d767d0dcdacfc7de8acb..d306fb5489038578f5bec16481dd0435544fd805 100644 --- a/src/tools/sandboxConfig.cpp +++ b/src/tools/sandboxConfig.cpp @@ -1,8 +1,24 @@ +/* +* SandboxConfig +* +* Static class which allows to read and write the variables needed for the +* Sandbox usage and the SandboxSetup calibration routines in a YAML file. +*/ + #include "../../inc/sandboxConfig.h" static int saveConfigIn(char *path, YAML::Node config); + + + +/* +* Write in file functions +*/ + + + int SandboxConfig::saveAdjustingMatrixInto(char *path, cv::Mat_<float> matrix, double angle){ // convert matrix into a one layer vector @@ -142,6 +158,10 @@ int SandboxConfig::saveFrameProcessProfilInto(char *path, FrameProcessProfil pro +/* +* Read in file functions +*/ + @@ -353,6 +373,9 @@ int SandboxConfig::loadFrameProcessProfilFrom(char *path, FrameProcessProfil *pr return 0; } +/* +* Private +*/ static int saveConfigIn(char *path, YAML::Node config){