Skip to content
Snippets Groups Projects
Commit 4e1de7ea authored by Adrien Lescourt's avatar Adrien Lescourt
Browse files

Merge branch 'QL-devel' into 'master'

Ql devel

See merge request !4
parents f1df7949 463bf38c
Branches master
No related tags found
1 merge request!4Ql devel
.idea/
build/
\ No newline at end of file
cmake_minimum_required(VERSION 3.10)
project(libsandbox)
# Définition de la version de la bibliothèque
set(LIBNAME "sandbox")
set(LIB_MINOR_VERS "0.0")
set(LIB_MAJOR_VERS "1")
set(LIB_VERS "${LIB_MAJOR_VERS}.${LIB_MINOR_VERS}")
set(LIB_FULL_NAME "${LIBNAME}.so.${LIB_VERS}")
# Define the output directory for shared libraries
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
# Ensure the directory exists (not strictly necessary, but good practice)
file(MAKE_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY})
# Trouver OpenCV (ou d'autres bibliothèques si besoin)
find_package(OpenCV REQUIRED)
include_directories(inc ${OpenCV_INCLUDE_DIRS})
find_package(realsense2 REQUIRED )
include_directories(inc ${realsense_INCLUDE_DIR})
find_package(yaml-cpp REQUIRED)
include_directories(inc ${YAML_CPP_INCLUDE_DIRS})
# Définition des dossiers d'inclusion (headers)
include_directories(inc)
# Recherche tous les fichiers sources dans src/ et ses sous-dossiers
file(GLOB_RECURSE SOURCES
src/components/*.cpp
src/lib/*.cpp
src/tools/*.cpp
)
# Création de la bibliothèque partagée
add_library(${LIBNAME} SHARED ${SOURCES})
set_target_properties(${LIBNAME} PROPERTIES OUTPUT_NAME ${LIBNAME} VERSION ${LIB_VERS} SOVERSION ${LIB_MAJOR_VERS})
target_link_libraries(${LIBNAME} ${OpenCV_LIBS})
target_link_libraries(${LIBNAME} ${realsense_LIBS})
target_link_libraries(${LIBNAME} ${YAML_CPP_LIBRARIES})
# Définition des options de compilation
target_compile_options(${LIBNAME} PRIVATE
-std=c++11 # Utilisation de C++11
-Wall # Affichage des avertissements classiques
-Wextra # Avertissements supplémentaires
-g # Ajout des symboles de debug
-fPIC # Position Indépendante du Code (obligatoire pour les bibliothèques partagées)
)
# Création des liens symboliques après la compilation
add_custom_command(TARGET ${LIBNAME} POST_BUILD
COMMAND ln -sf ${LIBNAME}.so.${LIB_MAJOR_VERS}
COMMAND ln -sf ${LIBNAME}.so
WORKING_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}
COMMENT "Création des liens symboliques pour ${LIBNAME}"
)
# Installation de la bibliothèque et des fichiers d'en-tête
install(TARGETS ${LIBNAME} DESTINATION lib)
install(DIRECTORY inc/ DESTINATION include)
# Installation des liens symboliques
install(CODE "execute_process(COMMAND ln -sf ${LIB_FULL_NAME} ${CMAKE_INSTALL_PREFIX}/lib/${LIBNAME}.so.${LIB_MAJOR_VERS})")
install(CODE "execute_process(COMMAND ln -sf ${LIB_FULL_NAME} ${CMAKE_INSTALL_PREFIX}/lib/${LIBNAME}.so)")
...@@ -5,31 +5,25 @@ ...@@ -5,31 +5,25 @@
- The camera must be pluged with an usb 3.0 extension (otherwise the resolution of the frames will be lowered) - The camera must be pluged with an usb 3.0 extension (otherwise the resolution of the frames will be lowered)
## Depedencies ## Depedencies
- OS : Ubuntu 18.04 - OS : Arch (Endeavour), Ubuntu
- C++ : 11.0 - C++ : 11.0
- g++ : 7.5.0 - g++ : 7.5.0
- OpenCV : 3.2.0 - OpenCV : 4.x.x
- librealsense 2 : 2.35.2 - librealsense 2 : 2.x.x
- yaml-cpp : https://github.com/jbeder/yaml-cpp @commit : 9fb51534877d16597cfd94c18890d87af0879d65 - yaml-cpp : https://github.com/jbeder/yaml-cpp (latest)
- Qt : 5.9.5 - Qt : 5.9.5
## Makefile
- make lib : build the librairy
- make apps : build the setup application
- make clean : clean all generated files
## Makefile for applications ## Makefile for applications
Examples can be found in the project ar_sandbox_app Examples can be found in the project ar_sandbox_app
### Compilation ## Installation
You need to specify : - run `mkdir build` if build directory does not exist and `cd` into it.
- The path (can be relative) to the headers of the classes in the project with the parameter -I (this is an upper case i) - in build directory, run `cmake ..`
- in build directory run `cmake --build .`
### Linker - Lib should be compiled in lib directory of the build directory.
You need to specify : - If a problem arise, check if all dependencies are met.
- The path (can be relative) to the generated shared object library (file .so) with the parameter -L - to clean project run `cmake --build . --target clean`
- The flag of the generated library (-lsandbox in our case) - to uninstall completly, delete build directory.
- The flags of the libraries used in the project for the linker, which are availible throught the variable DEP_SANDBOX in file dep.mk
## Before launching your application ## Before launching your application
- LD_LIBRARY_PATH must contain the path to the directory containing libsandbox.so (which is generated in /build) - LD_LIBRARY_PATH must contain the path to the directory containing libsandbox.so (which is generated in /build)
......
OPENCVFLAG=`pkg-config --libs --cflags opencv` OPENCVFLAG=`pkg-config --libs --cflags opencv3`
CAMERAFLAG=-lrealsense2 CAMERAFLAG=-lrealsense2
YAMLFLAG=-lyaml-cpp YAMLFLAG=-I/usr/local/include -L/usr/local/lib -lyaml-cpp
DEP_SANDBOX=$(OPENCVFLAG) $(CAMERAFLAG) $(YAMLFLAG) DEP_SANDBOX=$(OPENCVFLAG) $(CAMERAFLAG) $(YAMLFLAG)
CCP=g++
CFLAGS=-std=c++11 -Wall -Wextra -g -Ilib -fPIC -I/opt/opencv3/include -I/usr/local/include -L/usr/local/lib -lyaml-cpp -lpthread
...@@ -25,7 +25,12 @@ class Projection{ ...@@ -25,7 +25,12 @@ class Projection{
cv::Point2f fxy; cv::Point2f fxy;
cv::Point2f ppxy; cv::Point2f ppxy;
void deprojectPixelsFromDepth(cv::Mat_<float> &depth, Camera *camera, cv::Rect mask, cv::Point3f beamer_pos, cv::Mat_<cv::Point2i> &deprojectMap, cv::Point2f fxy, cv::Point2f ppxy); void deprojectPixelsFromDepthThread(int start_x, int start_y, int partition_len, cv::Mat_<float> &depth, Camera *camera, cv::Rect mask, cv::Point3f beamer_pos);
void filterLowestDeprojectedPointsThread(int start_x, int start_y, int partition_len, cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &deprojectMap, cv::Mat_<cv::Point2i> &frameMap);
void buildFrameThread(int start_x, int start_y, int partition_len, cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frameMap, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst);
void holeFillingThread(int startx, int start_y, int partition_len, cv::Mat_<cv::Vec3b> &dst, cv::Mat_<cv::Point2i> &frameMap);
void deprojectPixelsFromDepth(cv::Mat_<float> &depth, Camera *camera, cv::Rect mask, cv::Point3f beamer_pos);
void filterLowestDeprojectedPoints(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &deprojectMap, cv::Mat_<cv::Point2i> &frameMap); void filterLowestDeprojectedPoints(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &deprojectMap, cv::Mat_<cv::Point2i> &frameMap);
void buildFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frameMap, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst); void buildFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frameMap, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst);
void holeFilling(cv::Mat_<cv::Vec3b> &dst, cv::Mat_<cv::Point2i> &frameMap); void holeFilling(cv::Mat_<cv::Vec3b> &dst, cv::Mat_<cv::Point2i> &frameMap);
......
#ifndef SANDBOX_PROJECTION_H
#define SANDBOX_PROJECTION_H
#include <opencv2/opencv.hpp>
#include "beamer.h"
#include "camera.h"
class Projection{
private:
cv::Mat_<float> adjustingMatrix;
// based angle to find adjustingMatrix
double angleRotation = 0.0;
float distanceTopSandbox;
// resized depth frame to dst resolution
cv::Mat_<float> resized_depth;
// resized src to project to dst resolution
cv::Mat_<cv::Vec3b> resized_src;
// Buffer containing the pixels's new location when deprojected to beamer's POV
cv::Mat_<cv::Point2i> deprojectMap;
// Buffer indicating from where to get the pixels in the source frame to build the output
cv::Mat_<cv::Point2i> frameMap;
// intrinsics parameters for deprojection, which are adapted to the projection's resolution
cv::Point2f fxy;
cv::Point2f ppxy;
void deprojectPixelsFromDepth(cv::Mat_<float> &depth, Camera *camera, cv::Rect mask, cv::Point3f beamer_pos, cv::Mat_<cv::Point2i> &deprojectMap, cv::Point2f fxy, cv::Point2f ppxy);
void filterLowestDeprojectedPoints(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &deprojectMap, cv::Mat_<cv::Point2i> &frameMap);
void buildFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frameMap, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst);
void holeFilling(cv::Mat_<cv::Vec3b> &dst, cv::Mat_<cv::Point2i> &frameMap);
cv::Point2i findMatchingPixel(int i, int j, float z, Camera *camera, cv::Point3f beamer_pos, cv::Point2f fxy, cv::Point2f ppxy);
public:
Projection();
void setAdjustingMatrix(cv::Mat_<float> matrix){ adjustingMatrix = matrix; };
cv::Mat_<float> getAdjustingMatrix(){ return adjustingMatrix; };
void setAngleRotation(double angle){ angleRotation = angle; };
double getAngleRotation(){ return angleRotation; };
void setDistanceTopSandbox(float dist){ distanceTopSandbox = dist; };
float getDistanceTopSandbox(){ return distanceTopSandbox; };
cv::Point2i rotatePixel(cv::Point2i center, double angle, cv::Point2i pixel);
cv::Point2i revertRotatePixel(cv::Point2i center, double angle, cv::Point2i rotatedPixel);
void adjustFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst, Camera *camera, cv::Point3f beamer_pos);
void printAdjustingMatrix();
};
#endif
#ifndef SANDBOX_CONFIG_H #ifndef SANDBOX_CONFIG_H
#define SANDBOX_CONFIG_H #define SANDBOX_CONFIG_H
#include <fstream>
#include <yaml-cpp/yaml.h> #include <yaml-cpp/yaml.h>
#include <opencv2/opencv.hpp> #include <opencv2/opencv.hpp>
#include "camera.h" #include "camera.h"
......
CCP=g++ CCP=g++
CFLAGS=-std=c++11 -Wall -Wextra -g -Ilib -fPIC CFLAGS=-std=c++11 -Wall -Wextra -g -Ilib -fPIC -I/opt/opencv3/include -I/usr/local/include -L/usr/local/lib -lyaml-cpp -lpthread
...@@ -86,7 +86,7 @@ std::vector<cv::Point3i> Beamer::findCircles(cv::Mat &rgb, double contrast, int ...@@ -86,7 +86,7 @@ std::vector<cv::Point3i> Beamer::findCircles(cv::Mat &rgb, double contrast, int
std::vector<cv::Vec3f> circles; std::vector<cv::Vec3f> circles;
circles.clear(); circles.clear();
cv::cvtColor(rgb, src_gray, CV_BGR2GRAY); cv::cvtColor(rgb, src_gray, cv::COLOR_BGR2GRAY);
// Reduce the noise so we avoid false circle detection // Reduce the noise so we avoid false circle detection
cv::GaussianBlur(src_gray, src_gray, cv::Size(9, 9), 2, 2); cv::GaussianBlur(src_gray, src_gray, cv::Size(9, 9), 2, 2);
src_gray = editContrast(src_gray, (double)contrast, (double)brightness); src_gray = editContrast(src_gray, (double)contrast, (double)brightness);
...@@ -102,7 +102,7 @@ std::vector<cv::Point3i> Beamer::findCircles(cv::Mat &rgb, double contrast, int ...@@ -102,7 +102,7 @@ std::vector<cv::Point3i> Beamer::findCircles(cv::Mat &rgb, double contrast, int
// param_2 : Threshold for the center detection, after the accumulator is complet, threshold to keep only the possible centers // param_2 : Threshold for the center detection, after the accumulator is complet, threshold to keep only the possible centers
// min_radius : Min radius of the circles drawn on the accumulator // min_radius : Min radius of the circles drawn on the accumulator
// max_radius : Max radius of the circles drawn on the accumulator // max_radius : Max radius of the circles drawn on the accumulator
cv::HoughCircles(src_gray, circles, CV_HOUGH_GRADIENT, 1, centersMinDist, (double)cannyEdgeThreshold, (double)houghAccThreshold, minRadius, maxRadius); cv::HoughCircles(src_gray, circles, cv::HOUGH_GRADIENT, 1, centersMinDist, (double)cannyEdgeThreshold, (double)houghAccThreshold, minRadius, maxRadius);
// Point with (x,y) and radius of the circle // Point with (x,y) and radius of the circle
std::vector<cv::Point3i> result; std::vector<cv::Point3i> result;
...@@ -160,7 +160,7 @@ cv::Mat Beamer::buildCrossFrame(cv::Point projectedCross, int step, int max, boo ...@@ -160,7 +160,7 @@ cv::Mat Beamer::buildCrossFrame(cv::Point projectedCross, int step, int max, boo
void Beamer::findLinearLine(std::vector<cv::Point3f> *capturedPoints, std::vector<cv::Point3d> *bases, std::vector<cv::Point3d> *directions){ void Beamer::findLinearLine(std::vector<cv::Point3f> *capturedPoints, std::vector<cv::Point3d> *bases, std::vector<cv::Point3d> *directions){
cv::Vec6f line; cv::Vec6f line;
cv::fitLine(*capturedPoints, line, CV_DIST_L2, 0, 0.01, 0.01); cv::fitLine(*capturedPoints, line, cv::DIST_L2, 0, 0.01, 0.01);
cv::Point3d direction = cv::Point3d(line[0], line[1], line[2]); cv::Point3d direction = cv::Point3d(line[0], line[1], line[2]);
cv::Point3d base = cv::Point3d(line[3], line[4], line[5]); cv::Point3d base = cv::Point3d(line[3], line[4], line[5]);
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
* Contains the matrix to compensate for the rotation of the beamer * Contains the matrix to compensate for the rotation of the beamer
* and the distance to the plane at the top of the sandbox. * and the distance to the plane at the top of the sandbox.
*/ */
#include <thread>
#include "../../inc/projection.h" #include "../../inc/projection.h"
/* /*
...@@ -36,6 +36,7 @@ void Projection::adjustFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Vec3b> &src, c ...@@ -36,6 +36,7 @@ void Projection::adjustFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Vec3b> &src, c
fxy = profil.at(0); fxy = profil.at(0);
ppxy = profil.at(1); ppxy = profil.at(1);
} }
// do nothing if contains matrix with same size // do nothing if contains matrix with same size
// otherwise => release => allocate // otherwise => release => allocate
deprojectMap.create(dst.rows, dst.cols); deprojectMap.create(dst.rows, dst.cols);
...@@ -51,20 +52,25 @@ void Projection::adjustFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Vec3b> &src, c ...@@ -51,20 +52,25 @@ void Projection::adjustFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Vec3b> &src, c
cv::resize(src, resized_src, dst.size()); cv::resize(src, resized_src, dst.size());
cv::resize(depth, resized_depth, dst.size()); cv::resize(depth, resized_depth, dst.size());
deprojectPixelsFromDepth(resized_depth, camera, camera->getCroppingMask() , beamer_pos, deprojectMap, fxy, ppxy); // 130ms EXEC -> ~50ms
deprojectPixelsFromDepth(resized_depth, camera, camera->getCroppingMask() , beamer_pos);
// 40ms EXEC -> ~20ms
filterLowestDeprojectedPoints(resized_depth, deprojectMap, frameMap); filterLowestDeprojectedPoints(resized_depth, deprojectMap, frameMap);
// 30ms EXEC -> ~10ms
buildFrame(resized_depth, frameMap, resized_src, dst); buildFrame(resized_depth, frameMap, resized_src, dst);
// 46ms EXEC -> 20ms
holeFilling(dst, frameMap); holeFilling(dst, frameMap);
cv::warpAffine(dst, dst, adjustingMatrix, dst.size()); // apply the rotation on the image cv::warpAffine(dst, dst, adjustingMatrix, dst.size()); // apply the rotation on the image
} }
/* /*
* Private * Private
*/ */
/* /*
* Deproject pixels in 3D, then adapt to Beamer's POV, and go back to 2D * Deproject pixels in 3D, then adapt to Beamer's POV, and go back to 2D
* This gives us the location of pixels adapted to the Beamer projection * This gives us the location of pixels adapted to the Beamer projection
...@@ -77,28 +83,62 @@ void Projection::adjustFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Vec3b> &src, c ...@@ -77,28 +83,62 @@ void Projection::adjustFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Vec3b> &src, c
* fxy : function x and y adapted to the projection matching the original depth matrix(without ROI) of the camera * fxy : function x and y adapted to the projection matching the original depth matrix(without ROI) of the camera
* ppxy : coordinates x and y of the central pixel adapted to the projection matching the original depth matrix(without ROI) of the camera * ppxy : coordinates x and y of the central pixel adapted to the projection matching the original depth matrix(without ROI) of the camera
*/ */
void Projection::deprojectPixelsFromDepth(cv::Mat_<float> &depth, Camera *camera, cv::Rect mask, cv::Point3f beamer_pos, cv::Mat_<cv::Point2i> &deprojectMap, cv::Point2f fxy, cv::Point2f ppxy){ void Projection::deprojectPixelsFromDepthThread(int start_x, int start_y, int partition_len, cv::Mat_<float> &depth, Camera *camera, cv::Rect mask, cv::Point3f beamer_pos){
// scale coord of the mask with the new resolution of the depth frame // scale coord of the mask with the new resolution of the depth frame
float mask_scale_x = mask.x * depth.cols / mask.width; float mask_scale_x = mask.x * depth.cols / mask.width;
float mask_scale_y = mask.y * depth.rows / mask.height; float mask_scale_y = mask.y * depth.rows / mask.height;
for (int i = 0; i < depth.rows; i++){ // coordinates of the pixel relative to the orginial image taken from the camera
for (int j = 0; j < depth.cols; j++){ for(int y = start_y; y < start_y + partition_len; y++){
// coordinates of the pixel relative to the orginial image taken from the camera for(int x = start_x; x < start_x + partition_len; x++){
int base_x = mask_scale_x+j; if( 0 <= x && x < depth.cols && 0 <= y && y < depth.rows ){
int base_y = mask_scale_y+i; // Check boundaries
float z = depth.at<float>(i,j); int base_x = mask_scale_x+x;
int base_y = mask_scale_y+y;
float z = depth.at<float>(y,x);
// pixels based on the original depth frame taken from the camera // pixels based on the original depth frame taken from the camera
cv::Point2i pixel = findMatchingPixel( base_y, base_x, z, camera, beamer_pos, fxy, ppxy ); cv::Point2i pixel = findMatchingPixel( base_y, base_x, z, camera, beamer_pos, fxy, ppxy );
// pixel relative to the cropping mask (the area where the frame is projected) // pixel relative to the cropping mask (the area where the frame is projected)
pixel.x -= mask_scale_x; pixel.x -= mask_scale_x;
pixel.y -= mask_scale_y; pixel.y -= mask_scale_y;
deprojectMap.at<cv::Point2i>(i,j) = pixel; // No need to lock ressource, we are sure to write at a different point each time
deprojectMap.at<cv::Point2i>(y,x) = pixel;
} }
}
}
// int g = x+y;
}
/*
* Deproject pixels in 3D, then adapt to Beamer's POV, and go back to 2D
* This gives us the location of pixels adapted to the Beamer projection
*
* depth : Topology of the sandbox under the projection
* camera : Active camera
* mask : ROI (Range Of Interest) delimiting the zone of the projection from the camera POV
* beamer_pos : 3D position of the beamer relative to the camera
* deprojectMap : Indicates for each pixel of src, where it'll be displayed
* fxy : function x and y adapted to the projection matching the original depth matrix(without ROI) of the camera
* ppxy : coordinates x and y of the central pixel adapted to the projection matching the original depth matrix(without ROI) of the camera
*/
void Projection::deprojectPixelsFromDepth(cv::Mat_<float> &depth, Camera *camera, cv::Rect mask, cv::Point3f beamer_pos){
std::vector<std::thread> threads;
// Creation of n submatrix of size 100x100
// Each matrix will be managed by a different thread
int partition_len = 100;
for (int start_y = 0; start_y < depth.rows; start_y += partition_len){
for(int start_x = 0; start_x < depth.cols; start_x += partition_len){
threads.emplace_back(std::thread(&Projection::deprojectPixelsFromDepthThread, this, start_x, start_y, partition_len, std::ref(depth), camera, mask, beamer_pos));
}
}
for(auto& th: threads){
th.join();
} }
} }
...@@ -108,34 +148,59 @@ void Projection::deprojectPixelsFromDepth(cv::Mat_<float> &depth, Camera *camera ...@@ -108,34 +148,59 @@ void Projection::deprojectPixelsFromDepth(cv::Mat_<float> &depth, Camera *camera
* *
* depth : Topology of the sandbox matching the projection * depth : Topology of the sandbox matching the projection
* deprojectMap : Indicates for each pixel of src, where it'll be displayed * deprojectMap : Indicates for each pixel of src, where it'll be displayed
* frameMap : Indicates for each pixel of dst, where it should get the value from in src * frameMap : Indicates for each pixel of dst, where it should get the value from in src
*/ */
void Projection::filterLowestDeprojectedPoints(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &deprojectMap, cv::Mat_<cv::Point2i> &frameMap){ void Projection::filterLowestDeprojectedPointsThread(int start_x, int start_y, int partition_len, cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &deprojectMap, cv::Mat_<cv::Point2i> &frameMap){
for(int y = start_y; y < start_y + partition_len; y++){
for (int i = 0; i < deprojectMap.rows; i++){ for(int x = start_x; x < start_x + partition_len; x++){
for (int j = 0; j < deprojectMap.cols; j++){ if( 0 <= x && x < depth.cols && 0 <= y && y < depth.rows ){
// coords of the new pixel // coords of the new pixel
cv::Point2i deprojectedPixel = deprojectMap.at<cv::Point2i>(i,j); cv::Point2i deprojectedPixel = deprojectMap.at<cv::Point2i>(y,x);
cv::Point2i highestDepthPixel = cv::Point2i(j,i); cv::Point2i highestDepthPixel = cv::Point2i(x,y);
if( (0 <= deprojectedPixel.x && deprojectedPixel.x < depth.cols) && if( (0 <= deprojectedPixel.x && deprojectedPixel.x < depth.cols) &&
(0 <= deprojectedPixel.y && deprojectedPixel.y < depth.rows) ){ (0 <= deprojectedPixel.y && deprojectedPixel.y < depth.rows) ){
// check and keep the highest point at the location pointed by pixel // check and keep the highest point at the location pointed by pixel
cv::Point2i currentDepthPixel = frameMap.at<cv::Point2i>(deprojectedPixel); cv::Point2i currentDepthPixel = frameMap.at<cv::Point2i>(deprojectedPixel);
if( (0 <= currentDepthPixel.x && currentDepthPixel.x < depth.cols) && if( (0 <= currentDepthPixel.x && currentDepthPixel.x < depth.cols) &&
(0 <= currentDepthPixel.y && currentDepthPixel.y < depth.rows) ){ (0 <= currentDepthPixel.y && currentDepthPixel.y < depth.rows) ){
if(depth.at<float>(currentDepthPixel) <= depth.at<float>(i,j)){ if(depth.at<float>(currentDepthPixel) <= depth.at<float>(y,x)){
highestDepthPixel = currentDepthPixel; highestDepthPixel = currentDepthPixel;
} }
} }
frameMap.at<cv::Point2i>(deprojectedPixel) = highestDepthPixel; frameMap.at<cv::Point2i>(deprojectedPixel) = highestDepthPixel;
}
} }
} }
} }
} }
/*
* Save the highest points in deprojectMap into frameMap,
* because some points can be deprojected at the same location
*
* depth : Topology of the sandbox matching the projection
* deprojectMap : Indicates for each pixel of src, where it'll be displayed
* frameMap : Indicates for each pixel of dst, where it should get the value from in src
*/
void Projection::filterLowestDeprojectedPoints(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &deprojectMap, cv::Mat_<cv::Point2i> &frameMap){
std::vector<std::thread> threads;
// Creation of n submatrix of size 100x100
// Each matrix will be managed by a different thread
int partition_len = 100;
for (int start_y = 0; start_y < depth.rows; start_y += partition_len){
for(int start_x = 0; start_x < depth.cols; start_x += partition_len){
threads.emplace_back(std::thread(&Projection::filterLowestDeprojectedPointsThread, this, start_x, start_y, partition_len, std::ref(depth), std::ref(deprojectMap), std::ref(frameMap)));
}
}
for(auto& th: threads){
th.join();
}
}
/* /*
* Build the frame using frameMap, we assume that all the buffers have the same size * Build the frame using frameMap, we assume that all the buffers have the same size
...@@ -147,20 +212,48 @@ void Projection::filterLowestDeprojectedPoints(cv::Mat_<float> &depth, cv::Mat_< ...@@ -147,20 +212,48 @@ void Projection::filterLowestDeprojectedPoints(cv::Mat_<float> &depth, cv::Mat_<
* dst : Output image adapted from src to project * dst : Output image adapted from src to project
*/ */
// TODO : enlever depth en paramètre et vérifier que le pixel soit dans la range d'un des autres buffer // TODO : enlever depth en paramètre et vérifier que le pixel soit dans la range d'un des autres buffer
void Projection::buildFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frameMap, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst){ void Projection::buildFrameThread(int start_x, int start_y, int partition_len, cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frameMap, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst){
for(int y = start_y; y < start_y + partition_len; y++){
for (int i = 0; i < frameMap.rows; i++){ for(int x = start_x; x < start_x + partition_len; x++){
for (int j = 0; j < frameMap.cols; j++){ if( 0 <= x && x < depth.cols && 0 <= y && y < depth.rows ){
cv::Point2i pixel_src = frameMap.at<cv::Point2i>(i,j); cv::Point2i pixel_src = frameMap.at<cv::Point2i>(y,x);
cv::Point2i pixel_dst = cv::Point2i(j,i); cv::Point2i pixel_dst = cv::Point2i(x,y);
if( (0<=pixel_src.x && pixel_src.x<depth.cols) && (0<=pixel_src.y && pixel_src.y<depth.rows) ){ if( (0<=pixel_src.x && pixel_src.x<depth.cols) && (0<=pixel_src.y && pixel_src.y<depth.rows) ){
// src and dst must be of same size // src and dst must be of same size
dst.at<cv::Vec3b>(pixel_dst) = src.at<cv::Vec3b>(pixel_src); dst.at<cv::Vec3b>(pixel_dst) = src.at<cv::Vec3b>(pixel_src);
} }
}
}
}
}
/*
* Build the frame using frameMap, we assume that all the buffers have the same size
* where each pixel describes in which pixel of the source it should take the value from
* dst[i] = src[frameMap[i]]
*
* frameMap : The map describing where are the source pixels for each pixel in our output image
* src : Image source to adapt
* dst : Output image adapted from src to project
*/
// TODO : enlever depth en paramètre et vérifier que le pixel soit dans la range d'un des autres buffer
void Projection::buildFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frameMap, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst){
std::vector<std::thread> threads;
// Creation of n submatrix of size 100x100
// Each matrix will be managed by a different thread
int partition_len = 100;
for (int start_y = 0; start_y < depth.rows; start_y += partition_len){
for(int start_x = 0; start_x < depth.cols; start_x += partition_len){
threads.emplace_back(std::thread(&Projection::buildFrameThread, this, start_x, start_y, partition_len, std::ref(depth), std::ref(frameMap), std::ref(src), std::ref(dst)));
} }
} }
for(auto& th: threads){
th.join();
}
} }
...@@ -172,25 +265,25 @@ void Projection::buildFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frame ...@@ -172,25 +265,25 @@ void Projection::buildFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frame
* dst : The output image to project * dst : The output image to project
* frameMap : The map describing where are the source pixels for each pixel in our output image * frameMap : The map describing where are the source pixels for each pixel in our output image
*/ */
void Projection::holeFilling(cv::Mat_<cv::Vec3b> &dst, cv::Mat_<cv::Point2i> &frameMap){ void Projection::holeFillingThread(int start_x, int start_y, int partition_len, cv::Mat_<cv::Vec3b> &dst, cv::Mat_<cv::Point2i> &frameMap){
for(int y = start_y; y < start_y + partition_len; y++){
for(int x = start_x; x < start_x + partition_len; x++){
if( 0 <= x && x < frameMap.cols && 0 <= y && y < frameMap.rows ){
for (int i = 0; i < dst.rows; i++){ if(frameMap.at<cv::Point2i>(y,x) == cv::Point2i(-1,-1)){
for (int j = 0; j < dst.cols; j++){
if(frameMap.at<cv::Point2i>(i,j) == cv::Point2i(-1,-1)){
cv::Vec3b color; cv::Vec3b color;
bool found = false; bool found = false;
for(int d_y = -1; d_y <= 1; d_y++){ for(int d_y = -1; d_y <= 1; d_y++){
for(int d_x = -1; d_x <= 1; d_x++){ for(int d_x = -1; d_x <= 1; d_x++){
if(!(d_x == 0 && d_y == 0)){ if(!(d_x == 0 && d_y == 0)){
int y = i+d_y; int offset_y = y+d_y;
int x = j+d_x; int offset_x = x+d_x;
if( ((0 <= x && x < dst.cols) && (0 <= y && y < dst.rows)) && (frameMap.at<cv::Point2i>(y,x) != cv::Point2i(-1,-1)) ){ if( ((0 <= offset_x && offset_x < dst.cols) && (0 <= offset_y && offset_y < dst.rows)) && (frameMap.at<cv::Point2i>(offset_y,offset_x) != cv::Point2i(-1,-1)) ){
color = dst.at<cv::Vec3b>(y,x); color = dst.at<cv::Vec3b>(offset_y,offset_x);
found = true; found = true;
break; break;
} }
...@@ -201,7 +294,8 @@ void Projection::holeFilling(cv::Mat_<cv::Vec3b> &dst, cv::Mat_<cv::Point2i> &fr ...@@ -201,7 +294,8 @@ void Projection::holeFilling(cv::Mat_<cv::Vec3b> &dst, cv::Mat_<cv::Point2i> &fr
} }
if(found){ if(found){
dst.at<cv::Vec3b>(i,j) = color; dst.at<cv::Vec3b>(y,x) = color;
}
} }
} }
} }
...@@ -209,6 +303,31 @@ void Projection::holeFilling(cv::Mat_<cv::Vec3b> &dst, cv::Mat_<cv::Point2i> &fr ...@@ -209,6 +303,31 @@ void Projection::holeFilling(cv::Mat_<cv::Vec3b> &dst, cv::Mat_<cv::Point2i> &fr
} }
/*
* Fixe holes formed by the deprojection due to round up coordinates
* (because deproject goes from 3D floats values to 2D uint),
* by filling with value of the 1st non null neighbour
*
* dst : The output image to project
* frameMap : The map describing where are the source pixels for each pixel in our output image
*/
void Projection::holeFilling(cv::Mat_<cv::Vec3b> &dst, cv::Mat_<cv::Point2i> &frameMap){
std::vector<std::thread> threads;
// Creation of n submatrix of size 100x100
// Each matrix will be managed by a different thread
int partition_len = 100;
for (int start_y = 0; start_y < frameMap.rows; start_y += partition_len){
for(int start_x = 0; start_x < frameMap.cols; start_x += partition_len){
threads.emplace_back(std::thread(&Projection::holeFillingThread, this, start_x, start_y, partition_len, std::ref(dst), std::ref(frameMap)));
}
}
for(auto& th: threads){
th.join();
}
}
/* /*
* C : Camera position * C : Camera position
* B : Beamer position * B : Beamer position
......
/*
* Projection
*
* Class which adapts a projected image to the topology of the sandbox and the beamer point of view.
* Contains the matrix to compensate for the rotation of the beamer
* and the distance to the plane at the top of the sandbox.
*/
#include "../../inc/projection.h"
/*
* MAIN
*/
Projection::Projection(){
adjustingMatrix = cv::getRotationMatrix2D(cv::Point(0, 0), 0, 1);
distanceTopSandbox = 1.0f;
}
/*
* Adjust the projected frame with the topology from the camera to the beamer POV (point of view)
*
* depth : Topology of the sandbox under the projection
* src : Image source projected to adjust to the topology
* dst : Output which will contain the adapted image
* camera : Active camera
* beamer_pos : 3D position of the beamer relative to the camera
*/
void Projection::adjustFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst, Camera *camera, cv::Point3f beamer_pos){
// update if dst.size changed, since for all buffers :
// buff.size == dst.size
if(deprojectMap.size() != dst.size()){
std::vector<cv::Point2f> profil = camera->getAdaptedIntrinsics(dst);
fxy = profil.at(0);
ppxy = profil.at(1);
}
// do nothing if contains matrix with same size
// otherwise => release => allocate
deprojectMap.create(dst.rows, dst.cols);
frameMap.create(dst.rows, dst.cols);
resized_depth.create(dst.rows, dst.cols);
resized_src.create(dst.rows, dst.cols);
deprojectMap = cv::Point2i(-1,-1);
frameMap = cv::Point2i(-1,-1);
// resize to match 1:1 ratio with dst, since we'll do later:
// dst[i] = src[i]
cv::resize(src, resized_src, dst.size());
cv::resize(depth, resized_depth, dst.size());
deprojectPixelsFromDepth(resized_depth, camera, camera->getCroppingMask() , beamer_pos, deprojectMap, fxy, ppxy);
filterLowestDeprojectedPoints(resized_depth, deprojectMap, frameMap);
buildFrame(resized_depth, frameMap, resized_src, dst);
holeFilling(dst, frameMap);
cv::warpAffine(dst, dst, adjustingMatrix, dst.size()); // apply the rotation on the image
}
/*
* Private
*/
/*
* Deproject pixels in 3D, then adapt to Beamer's POV, and go back to 2D
* This gives us the location of pixels adapted to the Beamer projection
*
* depth : Topology of the sandbox under the projection
* camera : Active camera
* mask : ROI (Range Of Interest) delimiting the zone of the projection from the camera POV
* beamer_pos : 3D position of the beamer relative to the camera
* deprojectMap : Indicates for each pixel of src, where it'll be displayed
* fxy : function x and y adapted to the projection matching the original depth matrix(without ROI) of the camera
* ppxy : coordinates x and y of the central pixel adapted to the projection matching the original depth matrix(without ROI) of the camera
*/
void Projection::deprojectPixelsFromDepth(cv::Mat_<float> &depth, Camera *camera, cv::Rect mask, cv::Point3f beamer_pos, cv::Mat_<cv::Point2i> &deprojectMap, cv::Point2f fxy, cv::Point2f ppxy){
// scale coord of the mask with the new resolution of the depth frame
float mask_scale_x = mask.x * depth.cols / mask.width;
float mask_scale_y = mask.y * depth.rows / mask.height;
for (int i = 0; i < depth.rows; i++){
for (int j = 0; j < depth.cols; j++){
// coordinates of the pixel relative to the orginial image taken from the camera
int base_x = mask_scale_x+j;
int base_y = mask_scale_y+i;
float z = depth.at<float>(i,j);
// pixels based on the original depth frame taken from the camera
cv::Point2i pixel = findMatchingPixel( base_y, base_x, z, camera, beamer_pos, fxy, ppxy );
// pixel relative to the cropping mask (the area where the frame is projected)
pixel.x -= mask_scale_x;
pixel.y -= mask_scale_y;
deprojectMap.at<cv::Point2i>(i,j) = pixel;
}
}
}
/*
* Save the highest points in deprojectMap into frameMap,
* because some points can be deprojected at the same location
*
* depth : Topology of the sandbox matching the projection
* deprojectMap : Indicates for each pixel of src, where it'll be displayed
* frameMap : Indicates for each pixel of dst, where it should get the value from in src
*/
void Projection::filterLowestDeprojectedPoints(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &deprojectMap, cv::Mat_<cv::Point2i> &frameMap){
for (int i = 0; i < deprojectMap.rows; i++){
for (int j = 0; j < deprojectMap.cols; j++){
// coords of the new pixel
cv::Point2i deprojectedPixel = deprojectMap.at<cv::Point2i>(i,j);
cv::Point2i highestDepthPixel = cv::Point2i(j,i);
if( (0 <= deprojectedPixel.x && deprojectedPixel.x < depth.cols) &&
(0 <= deprojectedPixel.y && deprojectedPixel.y < depth.rows) ){
// check and keep the highest point at the location pointed by pixel
cv::Point2i currentDepthPixel = frameMap.at<cv::Point2i>(deprojectedPixel);
if( (0 <= currentDepthPixel.x && currentDepthPixel.x < depth.cols) &&
(0 <= currentDepthPixel.y && currentDepthPixel.y < depth.rows) ){
if(depth.at<float>(currentDepthPixel) <= depth.at<float>(i,j)){
highestDepthPixel = currentDepthPixel;
}
}
frameMap.at<cv::Point2i>(deprojectedPixel) = highestDepthPixel;
}
}
}
}
/*
* Build the frame using frameMap, we assume that all the buffers have the same size
* where each pixel describes in which pixel of the source it should take the value from
* dst[i] = src[frameMap[i]]
*
* frameMap : The map describing where are the source pixels for each pixel in our output image
* src : Image source to adapt
* dst : Output image adapted from src to project
*/
// TODO : enlever depth en paramètre et vérifier que le pixel soit dans la range d'un des autres buffer
void Projection::buildFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frameMap, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst){
for (int i = 0; i < frameMap.rows; i++){
for (int j = 0; j < frameMap.cols; j++){
cv::Point2i pixel_src = frameMap.at<cv::Point2i>(i,j);
cv::Point2i pixel_dst = cv::Point2i(j,i);
if( (0<=pixel_src.x && pixel_src.x<depth.cols) && (0<=pixel_src.y && pixel_src.y<depth.rows) ){
// src and dst must be of same size
dst.at<cv::Vec3b>(pixel_dst) = src.at<cv::Vec3b>(pixel_src);
}
}
}
}
/*
* Fixe holes formed by the deprojection due to round up coordinates
* (because deproject goes from 3D floats values to 2D uint),
* by filling with value of the 1st non null neighbour
*
* dst : The output image to project
* frameMap : The map describing where are the source pixels for each pixel in our output image
*/
void Projection::holeFilling(cv::Mat_<cv::Vec3b> &dst, cv::Mat_<cv::Point2i> &frameMap){
for (int i = 0; i < dst.rows; i++){
for (int j = 0; j < dst.cols; j++){
if(frameMap.at<cv::Point2i>(i,j) == cv::Point2i(-1,-1)){
cv::Vec3b color;
bool found = false;
for(int d_y = -1; d_y <= 1; d_y++){
for(int d_x = -1; d_x <= 1; d_x++){
if(!(d_x == 0 && d_y == 0)){
int y = i+d_y;
int x = j+d_x;
if( ((0 <= x && x < dst.cols) && (0 <= y && y < dst.rows)) && (frameMap.at<cv::Point2i>(y,x) != cv::Point2i(-1,-1)) ){
color = dst.at<cv::Vec3b>(y,x);
found = true;
break;
}
}
}
if(found)
break;
}
if(found){
dst.at<cv::Vec3b>(i,j) = color;
}
}
}
}
}
/*
* C : Camera position
* B : Beamer position
* P : Point computed by camera depth
* V : Point at the plane height adjusted to the beamer POV (Point Of View)
* E : Point of the right-angle triangle VEB at the plane height
* A : Point of the right-angle triangle PAB at the point P height
*
* Where
* CP : distance from camera to point (value of depth_frame)
* CB : distance from camera to beamer (beamer's position is relative to the camera)
*
* i : y coordinate of the pixel to adjust
* j : x coordinate of the pixel to adjust
* z : Z coordinate of the pixel from the depth matrix
* camera : Camera active
* CB : Position of the beamer relative to the camera (vector camera-beamer)
* fxy : function x and y adapted to the projection matching the original depth matrix(without ROI) of the camera
* ppxy : coordinates x and y of the central pixel adapted to the projection matching the original depth matrix(without ROI) of the camera
*
* Return the coordinates of the pixel source adapted to the beamer POV
*/
cv::Point2i Projection::findMatchingPixel(int i, int j, float z, Camera *camera, cv::Point3f CB, cv::Point2f fxy, cv::Point2f ppxy){
float pixel[2] = {static_cast<float>(j), static_cast<float>(i)};
cv::Point3f CP = camera->deprojectPixelToPoint(pixel, z, fxy, ppxy);
cv::Point3f BP = CP - CB;
const float BEz = distanceTopSandbox - CB.z;
float BAz = BP.z;
float alpha = BEz / BAz;
cv::Point3f BV = (alpha * BP);
cv::Point3f CV = CB + BV;
return camera->projectPointToPixel(CV, fxy, ppxy);
}
/*
* Sandbox Setup purpose
*/
// TODO : move rotatePixel and revertRotatePixel in SandboxSetup (they don't depend on Projection anymore)
/*
* Rotate a pixel to compensate for the rotate of the beamer
*/
cv::Point2i Projection::rotatePixel(cv::Point2i center, double angle, cv::Point2i pixel){
cv::Mat_<float> matRotation = cv::getRotationMatrix2D(center, angle, 1);
cv::Mat tmp = (cv::Mat_<cv::Vec2f>(1, 1) << cv::Vec2f(pixel.x, pixel.y));
cv::transform(tmp, tmp, matRotation);
return cv::Point2i(tmp.at<cv::Vec2f>(0, 0));
}
/*
* Rotate back a rotated pixel to match the projection of the beamer
*/
cv::Point2i Projection::revertRotatePixel(cv::Point2i center, double angle, cv::Point2i rotatedPixel){
cv::Mat_<float> matRotation = cv::getRotationMatrix2D(center, angle, 1);
cv::Mat tmp = (cv::Mat_<cv::Vec2f>(1, 1) << cv::Vec2f(rotatedPixel.x, rotatedPixel.y));
cv::Mat invMat;
cv::invertAffineTransform(matRotation, invMat);
cv::transform(tmp, tmp, invMat);
return cv::Point2i(tmp.at<cv::Vec2f>(0, 0));
}
/*
* Debug
*/
void Projection::printAdjustingMatrix(){
cv::Mat matrix = getAdjustingMatrix();
for (int y = 0; y < matrix.rows; y++){
for (int x = 0; x < matrix.cols; x++){
std::cout << (float)matrix.at<double>(y, x) << " ";
}
std::cout << std::endl;
}
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment