diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..2388c533cd377d1d6e8ff08e6d2d493fe784b889 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +.idea/ +build/ \ No newline at end of file diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..82c488707ece7f69c355d8e0055d23b9f73e08a3 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,68 @@ +cmake_minimum_required(VERSION 3.10) +project(libsandbox) + +# Définition de la version de la bibliothèque +set(LIBNAME "sandbox") +set(LIB_MINOR_VERS "0.0") +set(LIB_MAJOR_VERS "1") +set(LIB_VERS "${LIB_MAJOR_VERS}.${LIB_MINOR_VERS}") +set(LIB_FULL_NAME "${LIBNAME}.so.${LIB_VERS}") + +# Define the output directory for shared libraries +set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib) + +# Ensure the directory exists (not strictly necessary, but good practice) +file(MAKE_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}) + +# Trouver OpenCV (ou d'autres bibliothèques si besoin) +find_package(OpenCV REQUIRED) +include_directories(inc ${OpenCV_INCLUDE_DIRS}) + +find_package(realsense2 REQUIRED ) +include_directories(inc ${realsense_INCLUDE_DIR}) + +find_package(yaml-cpp REQUIRED) +include_directories(inc ${YAML_CPP_INCLUDE_DIRS}) + +# Définition des dossiers d'inclusion (headers) +include_directories(inc) + +# Recherche tous les fichiers sources dans src/ et ses sous-dossiers +file(GLOB_RECURSE SOURCES + src/components/*.cpp + src/lib/*.cpp + src/tools/*.cpp +) + +# Création de la bibliothèque partagée +add_library(${LIBNAME} SHARED ${SOURCES}) +set_target_properties(${LIBNAME} PROPERTIES OUTPUT_NAME ${LIBNAME} VERSION ${LIB_VERS} SOVERSION ${LIB_MAJOR_VERS}) + +target_link_libraries(${LIBNAME} ${OpenCV_LIBS}) +target_link_libraries(${LIBNAME} ${realsense_LIBS}) +target_link_libraries(${LIBNAME} ${YAML_CPP_LIBRARIES}) + +# Définition des options de compilation +target_compile_options(${LIBNAME} PRIVATE + -std=c++11 # Utilisation de C++11 + -Wall # Affichage des avertissements classiques + -Wextra # Avertissements supplémentaires + -g # Ajout des symboles de debug + -fPIC # Position Indépendante du Code (obligatoire pour les bibliothèques partagées) +) + +# Création des liens symboliques après la compilation +add_custom_command(TARGET ${LIBNAME} POST_BUILD + COMMAND ln -sf ${LIBNAME}.so.${LIB_MAJOR_VERS} + COMMAND ln -sf ${LIBNAME}.so + WORKING_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY} + COMMENT "Création des liens symboliques pour ${LIBNAME}" +) + +# Installation de la bibliothèque et des fichiers d'en-tête +install(TARGETS ${LIBNAME} DESTINATION lib) +install(DIRECTORY inc/ DESTINATION include) + +# Installation des liens symboliques +install(CODE "execute_process(COMMAND ln -sf ${LIB_FULL_NAME} ${CMAKE_INSTALL_PREFIX}/lib/${LIBNAME}.so.${LIB_MAJOR_VERS})") +install(CODE "execute_process(COMMAND ln -sf ${LIB_FULL_NAME} ${CMAKE_INSTALL_PREFIX}/lib/${LIBNAME}.so)") diff --git a/README.md b/README.md index 2f320f42fbdc44217ee043b30457b7cd7968f89f..a5d90b0926a2444bcee21cf63ae7902979967d52 100644 --- a/README.md +++ b/README.md @@ -5,31 +5,25 @@ - The camera must be pluged with an usb 3.0 extension (otherwise the resolution of the frames will be lowered) ## Depedencies - - OS : Ubuntu 18.04 + - OS : Arch (Endeavour), Ubuntu - C++ : 11.0 - g++ : 7.5.0 - - OpenCV : 3.2.0 - - librealsense 2 : 2.35.2 - - yaml-cpp : https://github.com/jbeder/yaml-cpp @commit : 9fb51534877d16597cfd94c18890d87af0879d65 + - OpenCV : 4.x.x + - librealsense 2 : 2.x.x + - yaml-cpp : https://github.com/jbeder/yaml-cpp (latest) - Qt : 5.9.5 -## Makefile - - make lib : build the librairy - - make apps : build the setup application - - make clean : clean all generated files - ## Makefile for applications Examples can be found in the project ar_sandbox_app -### Compilation -You need to specify : - - The path (can be relative) to the headers of the classes in the project with the parameter -I (this is an upper case i) - -### Linker -You need to specify : - - The path (can be relative) to the generated shared object library (file .so) with the parameter -L - - The flag of the generated library (-lsandbox in our case) - - The flags of the libraries used in the project for the linker, which are availible throught the variable DEP_SANDBOX in file dep.mk +## Installation + - run `mkdir build` if build directory does not exist and `cd` into it. + - in build directory, run `cmake ..` + - in build directory run `cmake --build .` + - Lib should be compiled in lib directory of the build directory. + - If a problem arise, check if all dependencies are met. + - to clean project run `cmake --build . --target clean` + - to uninstall completly, delete build directory. ## Before launching your application - LD_LIBRARY_PATH must contain the path to the directory containing libsandbox.so (which is generated in /build) diff --git a/dep.mk b/dep.mk index 6988cb59534cd75f315b3eaa5cca74e3d3ae258e..1df8847e5cf789705823eeea6ec38d91433105b4 100644 --- a/dep.mk +++ b/dep.mk @@ -1,5 +1,8 @@ -OPENCVFLAG=`pkg-config --libs --cflags opencv` +OPENCVFLAG=`pkg-config --libs --cflags opencv3` CAMERAFLAG=-lrealsense2 -YAMLFLAG=-lyaml-cpp +YAMLFLAG=-I/usr/local/include -L/usr/local/lib -lyaml-cpp DEP_SANDBOX=$(OPENCVFLAG) $(CAMERAFLAG) $(YAMLFLAG) + +CCP=g++ +CFLAGS=-std=c++11 -Wall -Wextra -g -Ilib -fPIC -I/opt/opencv3/include -I/usr/local/include -L/usr/local/lib -lyaml-cpp -lpthread diff --git a/inc/projection.h b/inc/projection.h index 1af63617c1a34c595d4dd9f94bb3ef74ddd3044d..219e6885effec2d9f6d453fa6403fe7e6d5f3e91 100644 --- a/inc/projection.h +++ b/inc/projection.h @@ -25,7 +25,12 @@ class Projection{ cv::Point2f fxy; cv::Point2f ppxy; - void deprojectPixelsFromDepth(cv::Mat_<float> &depth, Camera *camera, cv::Rect mask, cv::Point3f beamer_pos, cv::Mat_<cv::Point2i> &deprojectMap, cv::Point2f fxy, cv::Point2f ppxy); + void deprojectPixelsFromDepthThread(int start_x, int start_y, int partition_len, cv::Mat_<float> &depth, Camera *camera, cv::Rect mask, cv::Point3f beamer_pos); + void filterLowestDeprojectedPointsThread(int start_x, int start_y, int partition_len, cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &deprojectMap, cv::Mat_<cv::Point2i> &frameMap); + void buildFrameThread(int start_x, int start_y, int partition_len, cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frameMap, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst); + void holeFillingThread(int startx, int start_y, int partition_len, cv::Mat_<cv::Vec3b> &dst, cv::Mat_<cv::Point2i> &frameMap); + + void deprojectPixelsFromDepth(cv::Mat_<float> &depth, Camera *camera, cv::Rect mask, cv::Point3f beamer_pos); void filterLowestDeprojectedPoints(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &deprojectMap, cv::Mat_<cv::Point2i> &frameMap); void buildFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frameMap, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst); void holeFilling(cv::Mat_<cv::Vec3b> &dst, cv::Mat_<cv::Point2i> &frameMap); diff --git a/inc/projection.h.old b/inc/projection.h.old new file mode 100644 index 0000000000000000000000000000000000000000..3dc495defb907383146ecc0310b0ef010e3acf08 --- /dev/null +++ b/inc/projection.h.old @@ -0,0 +1,50 @@ +#ifndef SANDBOX_PROJECTION_H +#define SANDBOX_PROJECTION_H + +#include <opencv2/opencv.hpp> +#include "beamer.h" +#include "camera.h" + +class Projection{ + private: + cv::Mat_<float> adjustingMatrix; + // based angle to find adjustingMatrix + double angleRotation = 0.0; + float distanceTopSandbox; + + // resized depth frame to dst resolution + cv::Mat_<float> resized_depth; + // resized src to project to dst resolution + cv::Mat_<cv::Vec3b> resized_src; + // Buffer containing the pixels's new location when deprojected to beamer's POV + cv::Mat_<cv::Point2i> deprojectMap; + // Buffer indicating from where to get the pixels in the source frame to build the output + cv::Mat_<cv::Point2i> frameMap; + + // intrinsics parameters for deprojection, which are adapted to the projection's resolution + cv::Point2f fxy; + cv::Point2f ppxy; + + void deprojectPixelsFromDepth(cv::Mat_<float> &depth, Camera *camera, cv::Rect mask, cv::Point3f beamer_pos, cv::Mat_<cv::Point2i> &deprojectMap, cv::Point2f fxy, cv::Point2f ppxy); + void filterLowestDeprojectedPoints(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &deprojectMap, cv::Mat_<cv::Point2i> &frameMap); + void buildFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frameMap, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst); + void holeFilling(cv::Mat_<cv::Vec3b> &dst, cv::Mat_<cv::Point2i> &frameMap); + cv::Point2i findMatchingPixel(int i, int j, float z, Camera *camera, cv::Point3f beamer_pos, cv::Point2f fxy, cv::Point2f ppxy); + + public: + Projection(); + + void setAdjustingMatrix(cv::Mat_<float> matrix){ adjustingMatrix = matrix; }; + cv::Mat_<float> getAdjustingMatrix(){ return adjustingMatrix; }; + void setAngleRotation(double angle){ angleRotation = angle; }; + double getAngleRotation(){ return angleRotation; }; + void setDistanceTopSandbox(float dist){ distanceTopSandbox = dist; }; + float getDistanceTopSandbox(){ return distanceTopSandbox; }; + + cv::Point2i rotatePixel(cv::Point2i center, double angle, cv::Point2i pixel); + cv::Point2i revertRotatePixel(cv::Point2i center, double angle, cv::Point2i rotatedPixel); + void adjustFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst, Camera *camera, cv::Point3f beamer_pos); + void printAdjustingMatrix(); + +}; +#endif diff --git a/inc/sandboxConfig.h b/inc/sandboxConfig.h index 0fc9b233e774272c7bceb33d553b6263e745730d..53797079d730a77063887885c3ec3deb1605fa2e 100644 --- a/inc/sandboxConfig.h +++ b/inc/sandboxConfig.h @@ -1,6 +1,7 @@ #ifndef SANDBOX_CONFIG_H #define SANDBOX_CONFIG_H +#include <fstream> #include <yaml-cpp/yaml.h> #include <opencv2/opencv.hpp> #include "camera.h" diff --git a/src/common.mk b/src/common.mk index 6f95981371b428ef5068397631c4ba270443949e..68b62e124da5bc8b7c953cf83513eb14942e9ddb 100644 --- a/src/common.mk +++ b/src/common.mk @@ -1,2 +1,2 @@ CCP=g++ -CFLAGS=-std=c++11 -Wall -Wextra -g -Ilib -fPIC +CFLAGS=-std=c++11 -Wall -Wextra -g -Ilib -fPIC -I/opt/opencv3/include -I/usr/local/include -L/usr/local/lib -lyaml-cpp -lpthread diff --git a/src/components/beamer.cpp b/src/components/beamer.cpp index f8a9c5a2042a907b2b21a688bfbfe571fdfadac3..4155c19810f88774c2dc6aa141497c95c4577914 100644 --- a/src/components/beamer.cpp +++ b/src/components/beamer.cpp @@ -86,7 +86,7 @@ std::vector<cv::Point3i> Beamer::findCircles(cv::Mat &rgb, double contrast, int std::vector<cv::Vec3f> circles; circles.clear(); - cv::cvtColor(rgb, src_gray, CV_BGR2GRAY); + cv::cvtColor(rgb, src_gray, cv::COLOR_BGR2GRAY); // Reduce the noise so we avoid false circle detection cv::GaussianBlur(src_gray, src_gray, cv::Size(9, 9), 2, 2); src_gray = editContrast(src_gray, (double)contrast, (double)brightness); @@ -102,7 +102,7 @@ std::vector<cv::Point3i> Beamer::findCircles(cv::Mat &rgb, double contrast, int // param_2 : Threshold for the center detection, after the accumulator is complet, threshold to keep only the possible centers // min_radius : Min radius of the circles drawn on the accumulator // max_radius : Max radius of the circles drawn on the accumulator - cv::HoughCircles(src_gray, circles, CV_HOUGH_GRADIENT, 1, centersMinDist, (double)cannyEdgeThreshold, (double)houghAccThreshold, minRadius, maxRadius); + cv::HoughCircles(src_gray, circles, cv::HOUGH_GRADIENT, 1, centersMinDist, (double)cannyEdgeThreshold, (double)houghAccThreshold, minRadius, maxRadius); // Point with (x,y) and radius of the circle std::vector<cv::Point3i> result; @@ -160,7 +160,7 @@ cv::Mat Beamer::buildCrossFrame(cv::Point projectedCross, int step, int max, boo void Beamer::findLinearLine(std::vector<cv::Point3f> *capturedPoints, std::vector<cv::Point3d> *bases, std::vector<cv::Point3d> *directions){ cv::Vec6f line; - cv::fitLine(*capturedPoints, line, CV_DIST_L2, 0, 0.01, 0.01); + cv::fitLine(*capturedPoints, line, cv::DIST_L2, 0, 0.01, 0.01); cv::Point3d direction = cv::Point3d(line[0], line[1], line[2]); cv::Point3d base = cv::Point3d(line[3], line[4], line[5]); diff --git a/src/components/projection.cpp b/src/components/projection.cpp index d0c30c0dfa4921999c3703cdbcabfc641683bbc1..c2e539b70b6962f0ee77c5922f20a06d87badb76 100644 --- a/src/components/projection.cpp +++ b/src/components/projection.cpp @@ -5,7 +5,7 @@ * Contains the matrix to compensate for the rotation of the beamer * and the distance to the plane at the top of the sandbox. */ - +#include <thread> #include "../../inc/projection.h" /* @@ -36,6 +36,7 @@ void Projection::adjustFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Vec3b> &src, c fxy = profil.at(0); ppxy = profil.at(1); } + // do nothing if contains matrix with same size // otherwise => release => allocate deprojectMap.create(dst.rows, dst.cols); @@ -51,20 +52,25 @@ void Projection::adjustFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Vec3b> &src, c cv::resize(src, resized_src, dst.size()); cv::resize(depth, resized_depth, dst.size()); - deprojectPixelsFromDepth(resized_depth, camera, camera->getCroppingMask() , beamer_pos, deprojectMap, fxy, ppxy); + // 130ms EXEC -> ~50ms + deprojectPixelsFromDepth(resized_depth, camera, camera->getCroppingMask() , beamer_pos); + + // 40ms EXEC -> ~20ms filterLowestDeprojectedPoints(resized_depth, deprojectMap, frameMap); + + // 30ms EXEC -> ~10ms buildFrame(resized_depth, frameMap, resized_src, dst); + + // 46ms EXEC -> 20ms holeFilling(dst, frameMap); + cv::warpAffine(dst, dst, adjustingMatrix, dst.size()); // apply the rotation on the image } - - /* * Private */ - /* * Deproject pixels in 3D, then adapt to Beamer's POV, and go back to 2D * This gives us the location of pixels adapted to the Beamer projection @@ -77,28 +83,62 @@ void Projection::adjustFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Vec3b> &src, c * fxy : function x and y adapted to the projection matching the original depth matrix(without ROI) of the camera * ppxy : coordinates x and y of the central pixel adapted to the projection matching the original depth matrix(without ROI) of the camera */ -void Projection::deprojectPixelsFromDepth(cv::Mat_<float> &depth, Camera *camera, cv::Rect mask, cv::Point3f beamer_pos, cv::Mat_<cv::Point2i> &deprojectMap, cv::Point2f fxy, cv::Point2f ppxy){ - +void Projection::deprojectPixelsFromDepthThread(int start_x, int start_y, int partition_len, cv::Mat_<float> &depth, Camera *camera, cv::Rect mask, cv::Point3f beamer_pos){ // scale coord of the mask with the new resolution of the depth frame float mask_scale_x = mask.x * depth.cols / mask.width; float mask_scale_y = mask.y * depth.rows / mask.height; - for (int i = 0; i < depth.rows; i++){ - for (int j = 0; j < depth.cols; j++){ - // coordinates of the pixel relative to the orginial image taken from the camera - int base_x = mask_scale_x+j; - int base_y = mask_scale_y+i; - float z = depth.at<float>(i,j); - + // coordinates of the pixel relative to the orginial image taken from the camera + for(int y = start_y; y < start_y + partition_len; y++){ + for(int x = start_x; x < start_x + partition_len; x++){ + if( 0 <= x && x < depth.cols && 0 <= y && y < depth.rows ){ + // Check boundaries + int base_x = mask_scale_x+x; + int base_y = mask_scale_y+y; + float z = depth.at<float>(y,x); + // pixels based on the original depth frame taken from the camera + cv::Point2i pixel = findMatchingPixel( base_y, base_x, z, camera, beamer_pos, fxy, ppxy ); - + // pixel relative to the cropping mask (the area where the frame is projected) pixel.x -= mask_scale_x; pixel.y -= mask_scale_y; - deprojectMap.at<cv::Point2i>(i,j) = pixel; + // No need to lock ressource, we are sure to write at a different point each time + deprojectMap.at<cv::Point2i>(y,x) = pixel; } + } + } +// int g = x+y; +} + + +/* +* Deproject pixels in 3D, then adapt to Beamer's POV, and go back to 2D +* This gives us the location of pixels adapted to the Beamer projection +* +* depth : Topology of the sandbox under the projection +* camera : Active camera +* mask : ROI (Range Of Interest) delimiting the zone of the projection from the camera POV +* beamer_pos : 3D position of the beamer relative to the camera +* deprojectMap : Indicates for each pixel of src, where it'll be displayed +* fxy : function x and y adapted to the projection matching the original depth matrix(without ROI) of the camera +* ppxy : coordinates x and y of the central pixel adapted to the projection matching the original depth matrix(without ROI) of the camera +*/ +void Projection::deprojectPixelsFromDepth(cv::Mat_<float> &depth, Camera *camera, cv::Rect mask, cv::Point3f beamer_pos){ + std::vector<std::thread> threads; + // Creation of n submatrix of size 100x100 + // Each matrix will be managed by a different thread + int partition_len = 100; + for (int start_y = 0; start_y < depth.rows; start_y += partition_len){ + for(int start_x = 0; start_x < depth.cols; start_x += partition_len){ + threads.emplace_back(std::thread(&Projection::deprojectPixelsFromDepthThread, this, start_x, start_y, partition_len, std::ref(depth), camera, mask, beamer_pos)); + } + } + + for(auto& th: threads){ + th.join(); } } @@ -108,34 +148,59 @@ void Projection::deprojectPixelsFromDepth(cv::Mat_<float> &depth, Camera *camera * * depth : Topology of the sandbox matching the projection * deprojectMap : Indicates for each pixel of src, where it'll be displayed -* frameMap : Indicates for each pixel of dst, where it should get the value from in src +* frameMap : Indicates for each pixel of dst, where it should get the value from in src */ -void Projection::filterLowestDeprojectedPoints(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &deprojectMap, cv::Mat_<cv::Point2i> &frameMap){ - - for (int i = 0; i < deprojectMap.rows; i++){ - for (int j = 0; j < deprojectMap.cols; j++){ +void Projection::filterLowestDeprojectedPointsThread(int start_x, int start_y, int partition_len, cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &deprojectMap, cv::Mat_<cv::Point2i> &frameMap){ + for(int y = start_y; y < start_y + partition_len; y++){ + for(int x = start_x; x < start_x + partition_len; x++){ + if( 0 <= x && x < depth.cols && 0 <= y && y < depth.rows ){ // coords of the new pixel - cv::Point2i deprojectedPixel = deprojectMap.at<cv::Point2i>(i,j); - cv::Point2i highestDepthPixel = cv::Point2i(j,i); + cv::Point2i deprojectedPixel = deprojectMap.at<cv::Point2i>(y,x); + cv::Point2i highestDepthPixel = cv::Point2i(x,y); if( (0 <= deprojectedPixel.x && deprojectedPixel.x < depth.cols) && (0 <= deprojectedPixel.y && deprojectedPixel.y < depth.rows) ){ - + // check and keep the highest point at the location pointed by pixel cv::Point2i currentDepthPixel = frameMap.at<cv::Point2i>(deprojectedPixel); if( (0 <= currentDepthPixel.x && currentDepthPixel.x < depth.cols) && (0 <= currentDepthPixel.y && currentDepthPixel.y < depth.rows) ){ - if(depth.at<float>(currentDepthPixel) <= depth.at<float>(i,j)){ - highestDepthPixel = currentDepthPixel; - } - } + if(depth.at<float>(currentDepthPixel) <= depth.at<float>(y,x)){ + highestDepthPixel = currentDepthPixel; + } + } frameMap.at<cv::Point2i>(deprojectedPixel) = highestDepthPixel; + } } } } } +/* +* Save the highest points in deprojectMap into frameMap, +* because some points can be deprojected at the same location +* +* depth : Topology of the sandbox matching the projection +* deprojectMap : Indicates for each pixel of src, where it'll be displayed +* frameMap : Indicates for each pixel of dst, where it should get the value from in src +*/ +void Projection::filterLowestDeprojectedPoints(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &deprojectMap, cv::Mat_<cv::Point2i> &frameMap){ + std::vector<std::thread> threads; + // Creation of n submatrix of size 100x100 + // Each matrix will be managed by a different thread + int partition_len = 100; + for (int start_y = 0; start_y < depth.rows; start_y += partition_len){ + for(int start_x = 0; start_x < depth.cols; start_x += partition_len){ + threads.emplace_back(std::thread(&Projection::filterLowestDeprojectedPointsThread, this, start_x, start_y, partition_len, std::ref(depth), std::ref(deprojectMap), std::ref(frameMap))); + } + } + + for(auto& th: threads){ + th.join(); + } +} + /* * Build the frame using frameMap, we assume that all the buffers have the same size @@ -147,20 +212,48 @@ void Projection::filterLowestDeprojectedPoints(cv::Mat_<float> &depth, cv::Mat_< * dst : Output image adapted from src to project */ // TODO : enlever depth en paramètre et vérifier que le pixel soit dans la range d'un des autres buffer -void Projection::buildFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frameMap, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst){ - - for (int i = 0; i < frameMap.rows; i++){ - for (int j = 0; j < frameMap.cols; j++){ +void Projection::buildFrameThread(int start_x, int start_y, int partition_len, cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frameMap, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst){ + for(int y = start_y; y < start_y + partition_len; y++){ + for(int x = start_x; x < start_x + partition_len; x++){ + if( 0 <= x && x < depth.cols && 0 <= y && y < depth.rows ){ - cv::Point2i pixel_src = frameMap.at<cv::Point2i>(i,j); - cv::Point2i pixel_dst = cv::Point2i(j,i); + cv::Point2i pixel_src = frameMap.at<cv::Point2i>(y,x); + cv::Point2i pixel_dst = cv::Point2i(x,y); if( (0<=pixel_src.x && pixel_src.x<depth.cols) && (0<=pixel_src.y && pixel_src.y<depth.rows) ){ // src and dst must be of same size dst.at<cv::Vec3b>(pixel_dst) = src.at<cv::Vec3b>(pixel_src); - } + } + } + } + } +} + +/* +* Build the frame using frameMap, we assume that all the buffers have the same size +* where each pixel describes in which pixel of the source it should take the value from +* dst[i] = src[frameMap[i]] +* +* frameMap : The map describing where are the source pixels for each pixel in our output image +* src : Image source to adapt +* dst : Output image adapted from src to project +*/ +// TODO : enlever depth en paramètre et vérifier que le pixel soit dans la range d'un des autres buffer +void Projection::buildFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frameMap, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst){ + std::vector<std::thread> threads; + // Creation of n submatrix of size 100x100 + // Each matrix will be managed by a different thread + int partition_len = 100; + for (int start_y = 0; start_y < depth.rows; start_y += partition_len){ + for(int start_x = 0; start_x < depth.cols; start_x += partition_len){ + threads.emplace_back(std::thread(&Projection::buildFrameThread, this, start_x, start_y, partition_len, std::ref(depth), std::ref(frameMap), std::ref(src), std::ref(dst))); } } + + + for(auto& th: threads){ + th.join(); + } } @@ -172,25 +265,25 @@ void Projection::buildFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frame * dst : The output image to project * frameMap : The map describing where are the source pixels for each pixel in our output image */ -void Projection::holeFilling(cv::Mat_<cv::Vec3b> &dst, cv::Mat_<cv::Point2i> &frameMap){ +void Projection::holeFillingThread(int start_x, int start_y, int partition_len, cv::Mat_<cv::Vec3b> &dst, cv::Mat_<cv::Point2i> &frameMap){ + for(int y = start_y; y < start_y + partition_len; y++){ + for(int x = start_x; x < start_x + partition_len; x++){ + if( 0 <= x && x < frameMap.cols && 0 <= y && y < frameMap.rows ){ - for (int i = 0; i < dst.rows; i++){ - for (int j = 0; j < dst.cols; j++){ - - if(frameMap.at<cv::Point2i>(i,j) == cv::Point2i(-1,-1)){ + if(frameMap.at<cv::Point2i>(y,x) == cv::Point2i(-1,-1)){ cv::Vec3b color; bool found = false; for(int d_y = -1; d_y <= 1; d_y++){ for(int d_x = -1; d_x <= 1; d_x++){ - + if(!(d_x == 0 && d_y == 0)){ - - int y = i+d_y; - int x = j+d_x; - if( ((0 <= x && x < dst.cols) && (0 <= y && y < dst.rows)) && (frameMap.at<cv::Point2i>(y,x) != cv::Point2i(-1,-1)) ){ - color = dst.at<cv::Vec3b>(y,x); + + int offset_y = y+d_y; + int offset_x = x+d_x; + if( ((0 <= offset_x && offset_x < dst.cols) && (0 <= offset_y && offset_y < dst.rows)) && (frameMap.at<cv::Point2i>(offset_y,offset_x) != cv::Point2i(-1,-1)) ){ + color = dst.at<cv::Vec3b>(offset_y,offset_x); found = true; break; } @@ -201,7 +294,8 @@ void Projection::holeFilling(cv::Mat_<cv::Vec3b> &dst, cv::Mat_<cv::Point2i> &fr } if(found){ - dst.at<cv::Vec3b>(i,j) = color; + dst.at<cv::Vec3b>(y,x) = color; + } } } } @@ -209,6 +303,31 @@ void Projection::holeFilling(cv::Mat_<cv::Vec3b> &dst, cv::Mat_<cv::Point2i> &fr } +/* +* Fixe holes formed by the deprojection due to round up coordinates +* (because deproject goes from 3D floats values to 2D uint), +* by filling with value of the 1st non null neighbour +* +* dst : The output image to project +* frameMap : The map describing where are the source pixels for each pixel in our output image +*/ +void Projection::holeFilling(cv::Mat_<cv::Vec3b> &dst, cv::Mat_<cv::Point2i> &frameMap){ + std::vector<std::thread> threads; + // Creation of n submatrix of size 100x100 + // Each matrix will be managed by a different thread + int partition_len = 100; + for (int start_y = 0; start_y < frameMap.rows; start_y += partition_len){ + for(int start_x = 0; start_x < frameMap.cols; start_x += partition_len){ + threads.emplace_back(std::thread(&Projection::holeFillingThread, this, start_x, start_y, partition_len, std::ref(dst), std::ref(frameMap))); + } + } + + for(auto& th: threads){ + th.join(); + } +} + + /* * C : Camera position * B : Beamer position diff --git a/src/components/projection.cpp.old b/src/components/projection.cpp.old new file mode 100644 index 0000000000000000000000000000000000000000..c6b143696d767d8196b867d8a711210f1d8a6450 --- /dev/null +++ b/src/components/projection.cpp.old @@ -0,0 +1,305 @@ +/* +* Projection +* +* Class which adapts a projected image to the topology of the sandbox and the beamer point of view. +* Contains the matrix to compensate for the rotation of the beamer +* and the distance to the plane at the top of the sandbox. +*/ + +#include "../../inc/projection.h" + +/* + * MAIN + */ + +Projection::Projection(){ + adjustingMatrix = cv::getRotationMatrix2D(cv::Point(0, 0), 0, 1); + distanceTopSandbox = 1.0f; +} + + +/* +* Adjust the projected frame with the topology from the camera to the beamer POV (point of view) +* +* depth : Topology of the sandbox under the projection +* src : Image source projected to adjust to the topology +* dst : Output which will contain the adapted image +* camera : Active camera +* beamer_pos : 3D position of the beamer relative to the camera +*/ +void Projection::adjustFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst, Camera *camera, cv::Point3f beamer_pos){ + + // update if dst.size changed, since for all buffers : + // buff.size == dst.size + if(deprojectMap.size() != dst.size()){ + std::vector<cv::Point2f> profil = camera->getAdaptedIntrinsics(dst); + fxy = profil.at(0); + ppxy = profil.at(1); + } + // do nothing if contains matrix with same size + // otherwise => release => allocate + deprojectMap.create(dst.rows, dst.cols); + frameMap.create(dst.rows, dst.cols); + resized_depth.create(dst.rows, dst.cols); + resized_src.create(dst.rows, dst.cols); + + deprojectMap = cv::Point2i(-1,-1); + frameMap = cv::Point2i(-1,-1); + + // resize to match 1:1 ratio with dst, since we'll do later: + // dst[i] = src[i] + cv::resize(src, resized_src, dst.size()); + cv::resize(depth, resized_depth, dst.size()); + + deprojectPixelsFromDepth(resized_depth, camera, camera->getCroppingMask() , beamer_pos, deprojectMap, fxy, ppxy); + filterLowestDeprojectedPoints(resized_depth, deprojectMap, frameMap); + buildFrame(resized_depth, frameMap, resized_src, dst); + holeFilling(dst, frameMap); + cv::warpAffine(dst, dst, adjustingMatrix, dst.size()); // apply the rotation on the image +} + + + +/* + * Private + */ + + +/* +* Deproject pixels in 3D, then adapt to Beamer's POV, and go back to 2D +* This gives us the location of pixels adapted to the Beamer projection +* +* depth : Topology of the sandbox under the projection +* camera : Active camera +* mask : ROI (Range Of Interest) delimiting the zone of the projection from the camera POV +* beamer_pos : 3D position of the beamer relative to the camera +* deprojectMap : Indicates for each pixel of src, where it'll be displayed +* fxy : function x and y adapted to the projection matching the original depth matrix(without ROI) of the camera +* ppxy : coordinates x and y of the central pixel adapted to the projection matching the original depth matrix(without ROI) of the camera +*/ +void Projection::deprojectPixelsFromDepth(cv::Mat_<float> &depth, Camera *camera, cv::Rect mask, cv::Point3f beamer_pos, cv::Mat_<cv::Point2i> &deprojectMap, cv::Point2f fxy, cv::Point2f ppxy){ + + // scale coord of the mask with the new resolution of the depth frame + float mask_scale_x = mask.x * depth.cols / mask.width; + float mask_scale_y = mask.y * depth.rows / mask.height; + + for (int i = 0; i < depth.rows; i++){ + for (int j = 0; j < depth.cols; j++){ + // coordinates of the pixel relative to the orginial image taken from the camera + int base_x = mask_scale_x+j; + int base_y = mask_scale_y+i; + float z = depth.at<float>(i,j); + + // pixels based on the original depth frame taken from the camera + cv::Point2i pixel = findMatchingPixel( base_y, base_x, z, camera, beamer_pos, fxy, ppxy ); + + // pixel relative to the cropping mask (the area where the frame is projected) + pixel.x -= mask_scale_x; + pixel.y -= mask_scale_y; + + deprojectMap.at<cv::Point2i>(i,j) = pixel; + } + } +} + +/* +* Save the highest points in deprojectMap into frameMap, +* because some points can be deprojected at the same location +* +* depth : Topology of the sandbox matching the projection +* deprojectMap : Indicates for each pixel of src, where it'll be displayed +* frameMap : Indicates for each pixel of dst, where it should get the value from in src +*/ +void Projection::filterLowestDeprojectedPoints(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &deprojectMap, cv::Mat_<cv::Point2i> &frameMap){ + + for (int i = 0; i < deprojectMap.rows; i++){ + for (int j = 0; j < deprojectMap.cols; j++){ + + // coords of the new pixel + cv::Point2i deprojectedPixel = deprojectMap.at<cv::Point2i>(i,j); + cv::Point2i highestDepthPixel = cv::Point2i(j,i); + + if( (0 <= deprojectedPixel.x && deprojectedPixel.x < depth.cols) && + (0 <= deprojectedPixel.y && deprojectedPixel.y < depth.rows) ){ + + // check and keep the highest point at the location pointed by pixel + cv::Point2i currentDepthPixel = frameMap.at<cv::Point2i>(deprojectedPixel); + if( (0 <= currentDepthPixel.x && currentDepthPixel.x < depth.cols) && + (0 <= currentDepthPixel.y && currentDepthPixel.y < depth.rows) ){ + if(depth.at<float>(currentDepthPixel) <= depth.at<float>(i,j)){ + highestDepthPixel = currentDepthPixel; + } + } + frameMap.at<cv::Point2i>(deprojectedPixel) = highestDepthPixel; + } + } + } +} + + +/* +* Build the frame using frameMap, we assume that all the buffers have the same size +* where each pixel describes in which pixel of the source it should take the value from +* dst[i] = src[frameMap[i]] +* +* frameMap : The map describing where are the source pixels for each pixel in our output image +* src : Image source to adapt +* dst : Output image adapted from src to project +*/ +// TODO : enlever depth en paramètre et vérifier que le pixel soit dans la range d'un des autres buffer +void Projection::buildFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frameMap, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst){ + + for (int i = 0; i < frameMap.rows; i++){ + for (int j = 0; j < frameMap.cols; j++){ + + cv::Point2i pixel_src = frameMap.at<cv::Point2i>(i,j); + cv::Point2i pixel_dst = cv::Point2i(j,i); + + if( (0<=pixel_src.x && pixel_src.x<depth.cols) && (0<=pixel_src.y && pixel_src.y<depth.rows) ){ + // src and dst must be of same size + dst.at<cv::Vec3b>(pixel_dst) = src.at<cv::Vec3b>(pixel_src); + } + } + } +} + + +/* +* Fixe holes formed by the deprojection due to round up coordinates +* (because deproject goes from 3D floats values to 2D uint), +* by filling with value of the 1st non null neighbour +* +* dst : The output image to project +* frameMap : The map describing where are the source pixels for each pixel in our output image +*/ +void Projection::holeFilling(cv::Mat_<cv::Vec3b> &dst, cv::Mat_<cv::Point2i> &frameMap){ + + for (int i = 0; i < dst.rows; i++){ + for (int j = 0; j < dst.cols; j++){ + + if(frameMap.at<cv::Point2i>(i,j) == cv::Point2i(-1,-1)){ + + cv::Vec3b color; + bool found = false; + + for(int d_y = -1; d_y <= 1; d_y++){ + for(int d_x = -1; d_x <= 1; d_x++){ + + if(!(d_x == 0 && d_y == 0)){ + + int y = i+d_y; + int x = j+d_x; + if( ((0 <= x && x < dst.cols) && (0 <= y && y < dst.rows)) && (frameMap.at<cv::Point2i>(y,x) != cv::Point2i(-1,-1)) ){ + color = dst.at<cv::Vec3b>(y,x); + found = true; + break; + } + } + } + if(found) + break; + } + + if(found){ + dst.at<cv::Vec3b>(i,j) = color; + } + } + } + } +} + + +/* +* C : Camera position +* B : Beamer position +* P : Point computed by camera depth +* V : Point at the plane height adjusted to the beamer POV (Point Of View) +* E : Point of the right-angle triangle VEB at the plane height +* A : Point of the right-angle triangle PAB at the point P height +* +* Where +* CP : distance from camera to point (value of depth_frame) +* CB : distance from camera to beamer (beamer's position is relative to the camera) +* +* i : y coordinate of the pixel to adjust +* j : x coordinate of the pixel to adjust +* z : Z coordinate of the pixel from the depth matrix +* camera : Camera active +* CB : Position of the beamer relative to the camera (vector camera-beamer) +* fxy : function x and y adapted to the projection matching the original depth matrix(without ROI) of the camera +* ppxy : coordinates x and y of the central pixel adapted to the projection matching the original depth matrix(without ROI) of the camera +* +* Return the coordinates of the pixel source adapted to the beamer POV +*/ +cv::Point2i Projection::findMatchingPixel(int i, int j, float z, Camera *camera, cv::Point3f CB, cv::Point2f fxy, cv::Point2f ppxy){ + + float pixel[2] = {static_cast<float>(j), static_cast<float>(i)}; + + cv::Point3f CP = camera->deprojectPixelToPoint(pixel, z, fxy, ppxy); + cv::Point3f BP = CP - CB; + + const float BEz = distanceTopSandbox - CB.z; + float BAz = BP.z; + float alpha = BEz / BAz; + + cv::Point3f BV = (alpha * BP); + cv::Point3f CV = CB + BV; + + return camera->projectPointToPixel(CV, fxy, ppxy); +} + + + + + + +/* +* Sandbox Setup purpose +*/ + +// TODO : move rotatePixel and revertRotatePixel in SandboxSetup (they don't depend on Projection anymore) + + +/* +* Rotate a pixel to compensate for the rotate of the beamer +*/ +cv::Point2i Projection::rotatePixel(cv::Point2i center, double angle, cv::Point2i pixel){ + + cv::Mat_<float> matRotation = cv::getRotationMatrix2D(center, angle, 1); + cv::Mat tmp = (cv::Mat_<cv::Vec2f>(1, 1) << cv::Vec2f(pixel.x, pixel.y)); + cv::transform(tmp, tmp, matRotation); + return cv::Point2i(tmp.at<cv::Vec2f>(0, 0)); +} + +/* +* Rotate back a rotated pixel to match the projection of the beamer +*/ +cv::Point2i Projection::revertRotatePixel(cv::Point2i center, double angle, cv::Point2i rotatedPixel){ + + cv::Mat_<float> matRotation = cv::getRotationMatrix2D(center, angle, 1); + cv::Mat tmp = (cv::Mat_<cv::Vec2f>(1, 1) << cv::Vec2f(rotatedPixel.x, rotatedPixel.y)); + cv::Mat invMat; + cv::invertAffineTransform(matRotation, invMat); + cv::transform(tmp, tmp, invMat); + return cv::Point2i(tmp.at<cv::Vec2f>(0, 0)); +} + + + + + + +/* +* Debug +*/ + +void Projection::printAdjustingMatrix(){ + + cv::Mat matrix = getAdjustingMatrix(); + for (int y = 0; y < matrix.rows; y++){ + for (int x = 0; x < matrix.cols; x++){ + std::cout << (float)matrix.at<double>(y, x) << " "; + } + std::cout << std::endl; + } +}