Skip to content
Snippets Groups Projects
Commit dff9a095 authored by simon.fanetti's avatar simon.fanetti
Browse files

add deprojection with custom resolution

parent 0c4ff5dc
Branches
No related tags found
1 merge request!3Custom realsense
...@@ -35,8 +35,11 @@ class Camera{ ...@@ -35,8 +35,11 @@ class Camera{
int start(); int start();
void stop(); void stop();
cv::Point3f deprojectPixelToPoint(float coord[], float z1); cv::Point3f deprojectPixelToPoint(float coord[], float z);
cv::Point3f deprojectPixelToPoint(float coord[], float z, cv::Point2f fxy, cv::Point2f ppxy);
cv::Point2i projectPointToPixel(cv::Point3f point3D); cv::Point2i projectPointToPixel(cv::Point3f point3D);
cv::Point2i projectPointToPixel(cv::Point3f point3D, cv::Point2f fxy, cv::Point2f ppxy);
std::vector<cv::Point2f> getAdaptedIntrinsics(cv::Mat &projection);
void capture(); void capture();
void printCroppingMask(); void printCroppingMask();
......
...@@ -16,12 +16,13 @@ class Projection{ ...@@ -16,12 +16,13 @@ class Projection{
// Buffer indicating from where to get the pixels in the source frame // Buffer indicating from where to get the pixels in the source frame
cv::Mat_<cv::Point2i> frameMap; cv::Mat_<cv::Point2i> frameMap;
void deprojectPixelsFromDepth(cv::Mat_<float> &depth, cv::Rect mask, Camera *camera, cv::Point3f beamer_pos, cv::Mat_<cv::Point2i> &deprojectMap); //void deprojectPixelsFromDepth(cv::Mat_<float> &depth, cv::Rect mask, Camera *camera, cv::Point3f beamer_pos, cv::Mat_<cv::Point2i> &deprojectMap);
void deprojectPixelsFromDepth(cv::Mat_<float> &depth, Camera *camera, cv::Point3f beamer_pos, cv::Mat_<cv::Point2i> &deprojectMap, cv::Point2f fxy, cv::Point2f ppxy);
void filterLowestDeprojectedPoints(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &deprojectMap, cv::Mat_<cv::Point2i> &frameMap); void filterLowestDeprojectedPoints(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &deprojectMap, cv::Mat_<cv::Point2i> &frameMap);
void buildFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frameMap, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst); void buildFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frameMap, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst);
cv::Point2i findMatchingPixel(int i, int j, float z, Camera *camera, cv::Point3f beamer_pos); cv::Point2i findMatchingPixel(int i, int j, float z, Camera *camera, cv::Point3f beamer_pos, cv::Point2f fxy, cv::Point2f ppxy);
void copyPixelsInto(cv::Point2i pixel_dst, cv::Mat_<cv::Vec3b> &dst, cv::Point2i pixel_src, cv::Mat_<cv::Vec3b> &src, cv::Mat_<float> &depth); //void copyPixelsInto(cv::Point2i pixel_dst, cv::Mat_<cv::Vec3b> &dst, cv::Point2i pixel_src, cv::Mat_<cv::Vec3b> &src, cv::Mat_<float> &depth);
cv::Size getMatchingSize(cv::Mat &src, cv::Mat &base); //cv::Size getMatchingSize(cv::Mat &src, cv::Mat &base);
public: public:
Projection(); Projection();
......
...@@ -119,6 +119,7 @@ cv::Point2i Camera::projectPointToPixel(cv::Point3f point3D){ ...@@ -119,6 +119,7 @@ cv::Point2i Camera::projectPointToPixel(cv::Point3f point3D){
return cv::Point2i(pixel[0], pixel[1]); return cv::Point2i(pixel[0], pixel[1]);
} }
// Get the point relative to the real world matching the coordinates of the pixel // Get the point relative to the real world matching the coordinates of the pixel
cv::Point3f Camera::deprojectPixelToPoint(float coord[], float z){ cv::Point3f Camera::deprojectPixelToPoint(float coord[], float z){
...@@ -128,6 +129,40 @@ cv::Point3f Camera::deprojectPixelToPoint(float coord[], float z){ ...@@ -128,6 +129,40 @@ cv::Point3f Camera::deprojectPixelToPoint(float coord[], float z){
} }
/*
Custom function based on librealsense, take f and pp as args,
they are related to the camera's profil, but adapted to what we want to display,
the limits of the pixels match the limits of the camera's frame and
the 3D projection match the camera's too
*/
cv::Point2i Camera::projectPointToPixel(cv::Point3f point, cv::Point2f f, cv::Point2f pp){
float x = point.x / point.z;
float y = point.y / point.z;
return cv::Point2i( x*f.x+pp.x, y*f.y+pp.y);
}
cv::Point3f Camera::deprojectPixelToPoint(float pixel[], float z, cv::Point2f f, cv::Point2f pp){
float x = (pixel[0] - pp.x) / f.x;
float y = (pixel[1] - pp.y) / f.y;
return cv::Point3f(z*x, z*y, z);
}
std::vector<cv::Point2f> Camera::getAdaptedIntrinsics(cv::Mat &projection){
float fx = projection.size().width * intr_profile.fx / croppingMask.width;
float fy = projection.size().height * intr_profile.fy / croppingMask.height;
cv::Point2f f = cv::Point2f(fx, fy);
float ppx = projection.size().width * (intr_profile.ppx-croppingMask.x) / croppingMask.width;
float ppy = projection.size().height * (intr_profile.ppy-croppingMask.y) / croppingMask.height;
cv::Point2f pp = cv::Point2f(ppx, ppy);
return std::vector<cv::Point2f> {f, pp};
}
void Camera::printCroppingMask(){ void Camera::printCroppingMask(){
cv::Rect mask = getCroppingMask(); cv::Rect mask = getCroppingMask();
std::cout << "(" << mask.x << "," << mask.y << ") + " << mask.width << "x" << mask.height << std::endl; std::cout << "(" << mask.x << "," << mask.y << ") + " << mask.width << "x" << mask.height << std::endl;
......
...@@ -38,9 +38,9 @@ void Projection::adjustFrame(cv::Mat_<float> depth, cv::Mat_<cv::Vec3b> src, cv: ...@@ -38,9 +38,9 @@ void Projection::adjustFrame(cv::Mat_<float> depth, cv::Mat_<cv::Vec3b> src, cv:
frameMap.release(); frameMap.release();
resized_dst.release(); resized_dst.release();
} }
deprojectMap.create(depth.rows, depth.cols); deprojectMap.create(dst.rows, dst.cols);
frameMap.create(depth.rows, depth.cols); frameMap.create(dst.rows, dst.cols);
resized_dst.create(getMatchingSize(dst, depth)); resized_dst.create(dst.rows, dst.cols);
} }
deprojectMap = cv::Point2i(-1,-1); deprojectMap = cv::Point2i(-1,-1);
...@@ -49,9 +49,14 @@ void Projection::adjustFrame(cv::Mat_<float> depth, cv::Mat_<cv::Vec3b> src, cv: ...@@ -49,9 +49,14 @@ void Projection::adjustFrame(cv::Mat_<float> depth, cv::Mat_<cv::Vec3b> src, cv:
// resize to match 1:1 ratio with resized_dst, since we'll do later: // resize to match 1:1 ratio with resized_dst, since we'll do later:
// resized_dst[i] = src[i] // resized_dst[i] = src[i]
cv::resize(src, src, resized_dst.size()); cv::resize(src, src, dst.size());
cv::resize(depth, depth, dst.size());
deprojectPixelsFromDepth(depth, camera->getCroppingMask(), camera, beamer_pos, deprojectMap); std::vector<cv::Point2f> profil = camera->getAdaptedIntrinsics(dst);
cv::Point2f fxy = profil.at(0);
cv::Point2f ppxy = profil.at(1);
deprojectPixelsFromDepth(depth, camera, beamer_pos, deprojectMap, fxy, ppxy);
filterLowestDeprojectedPoints(depth, deprojectMap, frameMap); filterLowestDeprojectedPoints(depth, deprojectMap, frameMap);
buildFrame(depth, frameMap, src, resized_dst); buildFrame(depth, frameMap, src, resized_dst);
...@@ -70,6 +75,7 @@ void Projection::adjustFrame(cv::Mat_<float> depth, cv::Mat_<cv::Vec3b> src, cv: ...@@ -70,6 +75,7 @@ void Projection::adjustFrame(cv::Mat_<float> depth, cv::Mat_<cv::Vec3b> src, cv:
Deproject pixels in 3D, then adapt to Beamer's POV, and go back to 2D Deproject pixels in 3D, then adapt to Beamer's POV, and go back to 2D
This gives us the location od pixels adapted to the Beamer projection This gives us the location od pixels adapted to the Beamer projection
*/ */
/*
void Projection::deprojectPixelsFromDepth(cv::Mat_<float> &depth, cv::Rect mask, Camera *camera, cv::Point3f beamer_pos, cv::Mat_<cv::Point2i> &deprojectMap){ void Projection::deprojectPixelsFromDepth(cv::Mat_<float> &depth, cv::Rect mask, Camera *camera, cv::Point3f beamer_pos, cv::Mat_<cv::Point2i> &deprojectMap){
// Browse the depth frame matching the cropping mask // Browse the depth frame matching the cropping mask
...@@ -94,6 +100,23 @@ void Projection::deprojectPixelsFromDepth(cv::Mat_<float> &depth, cv::Rect mask, ...@@ -94,6 +100,23 @@ void Projection::deprojectPixelsFromDepth(cv::Mat_<float> &depth, cv::Rect mask,
} }
} }
*/
void Projection::deprojectPixelsFromDepth(cv::Mat_<float> &depth, Camera *camera, cv::Point3f beamer_pos, cv::Mat_<cv::Point2i> &deprojectMap, cv::Point2f fxy, cv::Point2f ppxy){
// Browse the depth frame matching the cropping mask
// while adapting pixels's position to the beamer's position
for (int j = 0; j < depth.rows; j++){
for (int i = 0; i < depth.cols; i++){
// pixels based on the original depth frame taken from the camera
//cv::Point2i pixel = findMatchingPixel( i, j, depth.at<float>(j,i), camera, beamer_pos );
deprojectMap.at<cv::Point2i>(j,i) = findMatchingPixel( i, j, depth.at<float>(j,i), camera, beamer_pos, fxy, ppxy );
}
}
}
/* /*
Save the highest points in deprojectMap into frameMap, Save the highest points in deprojectMap into frameMap,
because some points can be deprojected at the same location because some points can be deprojected at the same location
...@@ -143,7 +166,8 @@ void Projection::buildFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frame ...@@ -143,7 +166,8 @@ void Projection::buildFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frame
if( (0<=pixel_src.x && pixel_src.x<depth.cols) && (0<=pixel_src.y && pixel_src.y<depth.rows) ){ if( (0<=pixel_src.x && pixel_src.x<depth.cols) && (0<=pixel_src.y && pixel_src.y<depth.rows) ){
// src and dst must be of same size // src and dst must be of same size
copyPixelsInto(pixel_dst, dst, pixel_src, src, depth); //copyPixelsInto(pixel_dst, dst, pixel_src, src, depth);
dst.at<cv::Vec3b>(pixel_dst) = src.at<cv::Vec3b>(pixel_src);
} }
} }
} }
...@@ -154,17 +178,19 @@ void Projection::buildFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frame ...@@ -154,17 +178,19 @@ void Projection::buildFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frame
resize the frames to be a multiple of the base size: resize the frames to be a multiple of the base size:
src.size = n * base.size, where n is uint > 0 src.size = n * base.size, where n is uint > 0
*/ */
/*
cv::Size Projection::getMatchingSize(cv::Mat &src, cv::Mat &base){ cv::Size Projection::getMatchingSize(cv::Mat &src, cv::Mat &base){
cv::Size bigSize; cv::Size bigSize;
bigSize.width = (src.size().width % base.size().width == 0) ? src.size().width : src.size().width - (src.size().width % base.size().width) + base.size().width; bigSize.width = (src.size().width % base.size().width == 0) ? src.size().width : src.size().width - (src.size().width % base.size().width) + base.size().width;
bigSize.height = (src.size().height % base.size().height == 0) ? src.size().height : src.size().height - (src.size().height % base.size().height) + base.size().height; bigSize.height = (src.size().height % base.size().height == 0) ? src.size().height : src.size().height - (src.size().height % base.size().height) + base.size().height;
return bigSize; return bigSize;
} }
*/
/* /*
pixels coordinates are relative to the camera depth frame pixels coordinates are relative to the camera depth frame
*/ */
/*
void Projection::copyPixelsInto(cv::Point2i pixel_dst, cv::Mat_<cv::Vec3b> &dst, cv::Point2i pixel_src, cv::Mat_<cv::Vec3b> &src, cv::Mat_<float> &depth){ void Projection::copyPixelsInto(cv::Point2i pixel_dst, cv::Mat_<cv::Vec3b> &dst, cv::Point2i pixel_src, cv::Mat_<cv::Vec3b> &src, cv::Mat_<float> &depth){
if( src.size().width == dst.size().width && src.size().height == dst.size().height ){ if( src.size().width == dst.size().width && src.size().height == dst.size().height ){
...@@ -187,7 +213,7 @@ void Projection::copyPixelsInto(cv::Point2i pixel_dst, cv::Mat_<cv::Vec3b> &dst, ...@@ -187,7 +213,7 @@ void Projection::copyPixelsInto(cv::Point2i pixel_dst, cv::Mat_<cv::Vec3b> &dst,
} }
} }
} }
*/
/* /*
C : Camera position C : Camera position
...@@ -201,19 +227,19 @@ void Projection::copyPixelsInto(cv::Point2i pixel_dst, cv::Mat_<cv::Vec3b> &dst, ...@@ -201,19 +227,19 @@ void Projection::copyPixelsInto(cv::Point2i pixel_dst, cv::Mat_<cv::Vec3b> &dst,
CP : distance from camera to point (value of depth_frame) CP : distance from camera to point (value of depth_frame)
CB : distance from camera to beamer (beamer's position is relative to the camera) CB : distance from camera to beamer (beamer's position is relative to the camera)
*/ */
cv::Point2i Projection::findMatchingPixel(int i, int j, float z, Camera *camera, cv::Point3f CB){ cv::Point2i Projection::findMatchingPixel(int i, int j, float z, Camera *camera, cv::Point3f CB, cv::Point2f fxy, cv::Point2f ppxy){
float pixel[2] = {static_cast<float>(i), static_cast<float>(j)}; float pixel[2] = {static_cast<float>(i), static_cast<float>(j)};
const float BEz = distanceTopSandbox - CB.z; const float BEz = distanceTopSandbox - CB.z;
cv::Point3f CP = camera->deprojectPixelToPoint(pixel, z); cv::Point3f CP = camera->deprojectPixelToPoint(pixel, z, fxy, ppxy);
cv::Point3f BP = CP - CB; cv::Point3f BP = CP - CB;
float BAz = BP.z; float BAz = BP.z;
float alpha = BEz / BAz; float alpha = BEz / BAz;
cv::Point3f BV = (alpha * BP); cv::Point3f BV = (alpha * BP);
cv::Point3f CV = CB + BV; cv::Point3f CV = CB + BV;
return camera->projectPointToPixel(CV); return camera->projectPointToPixel(CV, fxy, ppxy);
} }
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment