Skip to content
Snippets Groups Projects
Commit 30819645 authored by simon.fanetti's avatar simon.fanetti
Browse files

fixe scaled params for camera projection

parent dff9a095
Branches
No related tags found
1 merge request!3Custom realsense
......@@ -16,13 +16,13 @@ class Projection{
// Buffer indicating from where to get the pixels in the source frame
cv::Mat_<cv::Point2i> frameMap;
//void deprojectPixelsFromDepth(cv::Mat_<float> &depth, cv::Rect mask, Camera *camera, cv::Point3f beamer_pos, cv::Mat_<cv::Point2i> &deprojectMap);
cv::Point2f fxy;
cv::Point2f ppxy;
void deprojectPixelsFromDepth(cv::Mat_<float> &depth, Camera *camera, cv::Point3f beamer_pos, cv::Mat_<cv::Point2i> &deprojectMap, cv::Point2f fxy, cv::Point2f ppxy);
void filterLowestDeprojectedPoints(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &deprojectMap, cv::Mat_<cv::Point2i> &frameMap);
void buildFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frameMap, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst);
cv::Point2i findMatchingPixel(int i, int j, float z, Camera *camera, cv::Point3f beamer_pos, cv::Point2f fxy, cv::Point2f ppxy);
//void copyPixelsInto(cv::Point2i pixel_dst, cv::Mat_<cv::Vec3b> &dst, cv::Point2i pixel_src, cv::Mat_<cv::Vec3b> &src, cv::Mat_<float> &depth);
//cv::Size getMatchingSize(cv::Mat &src, cv::Mat &base);
public:
Projection();
......
......@@ -82,6 +82,51 @@ int Camera::start(){
capture();
intr_profile = depth_frame->get_profile().as<rs2::video_stream_profile>().get_intrinsics();
// Debug
/*
cv::Mat projection(768, 1024, CV_16UC1);
//cv::Mat projection(croppingMask.height, croppingMask.width, CV_16UC1);
std::vector<cv::Point2f> prof = getAdaptedIntrinsics(projection);
float mask_scale_x = croppingMask.x * projection.cols / croppingMask.width;
float mask_scale_y = croppingMask.y * projection.rows / croppingMask.height;
float pxc[] = { 0 + croppingMask.x ,0 + croppingMask.y,
croppingMask.width + croppingMask.x, 0 + croppingMask.y,
croppingMask.width/2.0f + croppingMask.x, croppingMask.height/2.0f + croppingMask.y,
0 + croppingMask.x, croppingMask.height + croppingMask.y,
croppingMask.width + croppingMask.x, croppingMask.height + croppingMask.y};
float pxp[] = { 0 + mask_scale_x, 0 + mask_scale_y,
projection.size().width + mask_scale_x, 0 + mask_scale_y,
projection.size().width/2.0f + mask_scale_x, projection.size().height/2.0f + mask_scale_y,
0 + mask_scale_x, projection.size().height + mask_scale_y,
projection.size().width + mask_scale_x, projection.size().height + mask_scale_y};
std::cout << "Camera " << intr_profile.width << "," << intr_profile.height << std::endl << intr_profile.fx << "," << intr_profile.fy << std::endl << intr_profile.ppx << "," << intr_profile.ppy << std::endl;
std::cout << "Proj " << projection.size().width << "," << projection.size().height << std::endl << prof.at(0).x << "," << prof.at(0).y << std::endl << prof.at(1).x << "," << prof.at(1).y << std::endl;
for(int i=0; i<5; i++){
float c[] = {pxc[0+i], pxc[1+i]};
float px[] = {pxp[0+i], pxp[1+i]};
cv::Point3f pcam = deprojectPixelToPoint(c, 2.0f);
cv::Point3f ppro = deprojectPixelToPoint(px, 2.0f, prof.at(0), prof.at(1));
std::cout << "Center" << std::endl;
std::cout << "Camera : " << pcam.x << "," << pcam.y << "," << pcam.z << std::endl;
std::cout << "Projection : " << ppro.x << "," << ppro.y << "," << ppro.z << std::endl;
}
*/
return 0;
}
......@@ -151,15 +196,30 @@ cv::Point3f Camera::deprojectPixelToPoint(float pixel[], float z, cv::Point2f f,
std::vector<cv::Point2f> Camera::getAdaptedIntrinsics(cv::Mat &projection){
float fx = projection.size().width * intr_profile.fx / croppingMask.width;
float fy = projection.size().height * intr_profile.fy / croppingMask.height;
cv::Point2f f = cv::Point2f(fx, fy);
float fx = static_cast<float>(intr_profile.fx * projection.size().width) / croppingMask.width;
float fy = static_cast<float>(intr_profile.fy * projection.size().height) / croppingMask.height;
float ppx = projection.size().width * (intr_profile.ppx-croppingMask.x) / croppingMask.width;
float ppy = projection.size().height * (intr_profile.ppy-croppingMask.y) / croppingMask.height;
cv::Point2f pp = cv::Point2f(ppx, ppy);
cv::Point2f fxy = cv::Point2f(fx, fy);
float ppx = static_cast<float>(intr_profile.ppx * projection.size().width) / croppingMask.width;
float ppy = static_cast<float>(intr_profile.ppy * projection.size().height) / croppingMask.height;
/*
float cam_center_x = croppingMask.width/2.0f;
float ppx_cam_relative_to_croppingMask_center = intr_profile.ppx-(croppingMask.x+cam_center_x);
float proj_center_x = projection.size().width/2.0f;
float ppx_proj_relative_to_proj_center = proj_center_x * ppx_cam_relative_to_croppingMask_center / cam_center_x;
float ppx_proj = ppx_proj_relative_to_proj_center + proj_center_x;
float cam_center_y = croppingMask.height/2.0f;
float ppy_cam_relative_to_croppingMask_center = intr_profile.ppy-(croppingMask.y+cam_center_y);
float proj_center_y = projection.size().height/2.0f;
float ppy_proj_relative_to_proj_center = proj_center_y * ppy_cam_relative_to_croppingMask_center / cam_center_y;
float ppy_proj = ppy_proj_relative_to_proj_center + proj_center_y;
*/
cv::Point2f ppxy = cv::Point2f(ppx, ppy);
return std::vector<cv::Point2f> {f, pp};
return std::vector<cv::Point2f> {fxy, ppxy};
}
......
......@@ -41,6 +41,10 @@ void Projection::adjustFrame(cv::Mat_<float> depth, cv::Mat_<cv::Vec3b> src, cv:
deprojectMap.create(dst.rows, dst.cols);
frameMap.create(dst.rows, dst.cols);
resized_dst.create(dst.rows, dst.cols);
std::vector<cv::Point2f> profil = camera->getAdaptedIntrinsics(dst);
fxy = profil.at(0);
ppxy = profil.at(1);
}
deprojectMap = cv::Point2i(-1,-1);
......@@ -50,11 +54,7 @@ void Projection::adjustFrame(cv::Mat_<float> depth, cv::Mat_<cv::Vec3b> src, cv:
// resize to match 1:1 ratio with resized_dst, since we'll do later:
// resized_dst[i] = src[i]
cv::resize(src, src, dst.size());
cv::resize(depth, depth, dst.size());
std::vector<cv::Point2f> profil = camera->getAdaptedIntrinsics(dst);
cv::Point2f fxy = profil.at(0);
cv::Point2f ppxy = profil.at(1);
cv::resize(depth, depth, dst.size());
deprojectPixelsFromDepth(depth, camera, beamer_pos, deprojectMap, fxy, ppxy);
filterLowestDeprojectedPoints(depth, deprojectMap, frameMap);
......@@ -75,48 +75,34 @@ void Projection::adjustFrame(cv::Mat_<float> depth, cv::Mat_<cv::Vec3b> src, cv:
Deproject pixels in 3D, then adapt to Beamer's POV, and go back to 2D
This gives us the location od pixels adapted to the Beamer projection
*/
/*
void Projection::deprojectPixelsFromDepth(cv::Mat_<float> &depth, cv::Rect mask, Camera *camera, cv::Point3f beamer_pos, cv::Mat_<cv::Point2i> &deprojectMap){
void Projection::deprojectPixelsFromDepth(cv::Mat_<float> &depth, Camera *camera, cv::Point3f beamer_pos, cv::Mat_<cv::Point2i> &deprojectMap, cv::Point2f fxy, cv::Point2f ppxy){
// scale coord of the mask with the new resolution of the depth frame
cv::Rect mask = camera->getCroppingMask();
float mask_scale_x = mask.x * depth.cols / mask.width;
float mask_scale_y = mask.y * depth.rows / mask.height;
// Browse the depth frame matching the cropping mask
// while adapting pixels's position to the beamer's position
for (int j = 0; j < depth.rows; j++){
for (int i = 0; i < depth.cols; i++){
// coordinates of the pixel relative to the orginial image taken from the camera
int base_x = mask.x+i;
int base_y = mask.y+j;
int base_x = mask_scale_x+i;
int base_y = mask_scale_y+j;
float z = depth.at<float>(j,i);
// pixels based on the original depth frame taken from the camera
cv::Point2i pixel = findMatchingPixel( base_x, base_y, z, camera, beamer_pos );
cv::Point2i pixel = findMatchingPixel( base_x, base_y, z, camera, beamer_pos, fxy, ppxy );
// pixel relative to the cropping mask (the area where the frame is projected)
pixel.x -= mask.x;
pixel.y -= mask.y;
pixel.x -= mask_scale_x;
pixel.y -= mask_scale_y;
deprojectMap.at<cv::Point2i>(j,i) = pixel;
//deprojectMap.at<cv::Point2i>(j,i) = findMatchingPixel( i, j, depth.at<float>(j,i), camera, beamer_pos, fxy, ppxy );
}
}
}
*/
void Projection::deprojectPixelsFromDepth(cv::Mat_<float> &depth, Camera *camera, cv::Point3f beamer_pos, cv::Mat_<cv::Point2i> &deprojectMap, cv::Point2f fxy, cv::Point2f ppxy){
// Browse the depth frame matching the cropping mask
// while adapting pixels's position to the beamer's position
for (int j = 0; j < depth.rows; j++){
for (int i = 0; i < depth.cols; i++){
// pixels based on the original depth frame taken from the camera
//cv::Point2i pixel = findMatchingPixel( i, j, depth.at<float>(j,i), camera, beamer_pos );
deprojectMap.at<cv::Point2i>(j,i) = findMatchingPixel( i, j, depth.at<float>(j,i), camera, beamer_pos, fxy, ppxy );
}
}
}
/*
Save the highest points in deprojectMap into frameMap,
because some points can be deprojected at the same location
......@@ -173,48 +159,6 @@ void Projection::buildFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frame
}
}
/*
resize the frames to be a multiple of the base size:
src.size = n * base.size, where n is uint > 0
*/
/*
cv::Size Projection::getMatchingSize(cv::Mat &src, cv::Mat &base){
cv::Size bigSize;
bigSize.width = (src.size().width % base.size().width == 0) ? src.size().width : src.size().width - (src.size().width % base.size().width) + base.size().width;
bigSize.height = (src.size().height % base.size().height == 0) ? src.size().height : src.size().height - (src.size().height % base.size().height) + base.size().height;
return bigSize;
}
*/
/*
pixels coordinates are relative to the camera depth frame
*/
/*
void Projection::copyPixelsInto(cv::Point2i pixel_dst, cv::Mat_<cv::Vec3b> &dst, cv::Point2i pixel_src, cv::Mat_<cv::Vec3b> &src, cv::Mat_<float> &depth){
if( src.size().width == dst.size().width && src.size().height == dst.size().height ){
// determines the size of the square of pixels to copy from the source matching the position of the pixel of the depth frame
// (since src and dst are multiple of the depth frame's size)
int ratio_w = src.size().width / depth.size().width;
int ratio_h = src.size().height / depth.size().height;
for(int j=0; j<ratio_h; j++){
for (int i=0; i<ratio_w; i++){
int x_dst = pixel_dst.x * ratio_w + i;
int y_dst = pixel_dst.y * ratio_h + j;
int x_src = pixel_src.x * ratio_w + i;
int y_src = pixel_src.y * ratio_h + j;
dst.at<cv::Vec3b>(y_dst, x_dst) = src.at<cv::Vec3b>(y_src, x_src);
}
}
}
}
*/
/*
C : Camera position
B : Beamer position
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment