Skip to content
Snippets Groups Projects
Commit 221e5fe9 authored by simon.fanetti's avatar simon.fanetti
Browse files

cleaning camera + fixe inaccuracy

parent 10e5c7ff
No related branches found
No related tags found
1 merge request!1Camera
......@@ -10,12 +10,7 @@ CameraStartThread::CameraStartThread(Camera *c) : QThread() {
void CameraStartThread::run() {
int err = camera->start();
emit deviceFound(err);
if(!err){
camera->warmUpDepthLens();
emit setupReady(0);
}
emit setupReady(err);
}
......@@ -27,9 +22,9 @@ InitCamera::InitCamera(SandboxSetup *_setup, QWidget *parent) :
{
setup = _setup;
ui->setupUi(this);
ui->label->setText(QString("Initializing device..."));
workerThread = new CameraStartThread(setup->getCamera());
connect(workerThread, &CameraStartThread::deviceFound, this, &InitCamera::deviceFound);
connect(workerThread, &CameraStartThread::setupReady, this, &InitCamera::setupReady);
connect(workerThread, &CameraStartThread::finished, workerThread, &QObject::deleteLater);
}
......@@ -53,16 +48,12 @@ void InitCamera::setupCamera(){
workerThread->start();
}
void InitCamera::deviceFound(int err){
QString msg = (err) ? "No device found" : "Initializing device...";
void InitCamera::setupReady(int err){
if(err){
QString msg = "No device found";
ui->label->setText(msg);
if(err)
emit sendNotif(err);
}
void InitCamera::setupReady(int err){
emit sendNotif(err);
}
......
......@@ -21,8 +21,7 @@ public:
void run();
signals:
void setupReady(int state);
void deviceFound(int err);
void setupReady(int err);
private:
Camera *camera;
......@@ -47,8 +46,7 @@ private:
CameraStartThread *workerThread;
void setupCamera();
void setupReady(int state);
void deviceFound(int err);
void setupReady(int err);
};
#endif // INITCAMERA_H
......@@ -9,16 +9,16 @@ class BeamerProjection{
private:
cv::Mat adjustingMatrix;
cv::Point2i adjustPixel(int i, int j, float z, Camera camera, cv::Point3f beamer);
cv::Point2i adjustPixel(int i, int j, float z, Camera *camera, cv::Point3f beamer_pos);
public:
BeamerProjection();
void setAdjustingMatrix(cv::Mat matrix){ matrix.clone(); }
void setAdjustingMatrix(cv::Mat matrix){ adjustingMatrix.setTo(matrix); }
cv::Mat getAdjustingMatrix(){ return adjustingMatrix.clone(); }
cv::Point2i rotatePixel(cv::Point2i pixel);
void adjustFrame(cv::Mat &depth, cv::Mat &src, cv::Mat &dst, Camera camera, cv::Point3f beamer);
void adjustFrame(cv::Mat &depth, cv::Mat &src, cv::Mat &dst, Camera *camera, cv::Point3f beamer_pos);
void printAdjustingMatrix();
};
......
......@@ -5,33 +5,32 @@
#include <librealsense2/rsutil.h>
#include <opencv2/opencv.hpp>
// camera resolution : 640x480
class Camera{
private:
rs2::spatial_filter spatFilter;
rs2::temporal_filter tempFilter;
rs2::config cfg;
rs2::pipeline pipe;
rs2::pipeline_profile profile;
rs2_intrinsics intrinsics;
cv::Mat matDepth;
cv::Mat matRGB;
rs2::spatial_filter *spatFilter;
rs2::temporal_filter *tempFilter;
rs2::config *cfg;
rs2::pipeline *pipe;
rs2::align *align_to_depth;
rs2::video_frame *color_frame;
rs2::depth_frame *depth_frame;
cv::Rect croppingMask;
void warmUpDepthLens();
void filterDepthFrame(rs2::depth_frame &frameDepth);
public:
Camera();
~Camera();
cv::Mat getDepthFrame(){ return matDepth.clone(); };
cv::Mat getColorFrame(){ return matRGB.clone(); };
cv::Mat getDepthFrame(){ return cv::Mat(cv::Size(depth_frame->get_width(), depth_frame->get_height()), CV_16UC1, (void *)depth_frame->get_data(), cv::Mat::AUTO_STEP); };
cv::Mat getColorFrame(){ return cv::Mat(cv::Size(color_frame->get_width(), color_frame->get_height()), CV_8UC3, (void *)color_frame->get_data(), cv::Mat::AUTO_STEP); };
void setCroppingMask(cv::Rect mask){ croppingMask = mask; };
cv::Rect getCroppingMask(){ return croppingMask; };
int start();
void warmUpDepthLens();
void stop();
cv::Point3f deprojectPixelToPoint(float coord[], float z1);
cv::Point2i projectPointToPixel(cv::Point3f point3D);
......
......@@ -23,8 +23,8 @@ class Sandbox{
cv::Mat getColorFrame();
cv::Mat getDepthFrame();
cv::Mat* adjustProjection(cv::Mat* frame);
void showImage(cv::Mat* image);
void adjustProjection(cv::Mat &frame);
void showImage(cv::Mat &image);
int loadConfig();
int loadConfigFrom(char *path);
void initWindowsFullScreen();
......
......@@ -14,23 +14,23 @@ cv::Point2i BeamerProjection::rotatePixel(cv::Point2i pixel){
}
cv::Point2i BeamerProjection::adjustPixel(int i, int j, float z, Camera camera, cv::Point3f beamer){
cv::Point2i BeamerProjection::adjustPixel(int i, int j, float z, Camera *camera, cv::Point3f beamer_pos){
//pixel to point 3d
float coord[2] = {static_cast<float>(j), static_cast<float>(i)};
cv::Point3f p = camera.deprojectPixelToPoint(coord, z / 1000.0);
cv::Point3f p = camera->deprojectPixelToPoint(coord, z / 1000.0);
//calcul vector point to beamer
cv::Vec3f dir(beamer.x - p.x, beamer.y - p.y, beamer.z - p.z);
cv::Vec3f dir(beamer_pos.x - p.x, beamer_pos.y - p.y, beamer_pos.z - p.z);
float prop = (0.99 - p.z) / dir[2];
p.x += dir[0] * prop;
p.y += dir[1] * prop;
p.z += dir[2] * prop;
//point 3d to pixel
return camera.projectPointToPixel(p);
return camera->projectPointToPixel(p);
}
void BeamerProjection::adjustFrame(cv::Mat &depth, cv::Mat &src, cv::Mat &dst, Camera camera, cv::Point3f beamer){
void BeamerProjection::adjustFrame(cv::Mat &depth, cv::Mat &src, cv::Mat &dst, Camera *camera, cv::Point3f beamer_pos){
int nbChannel = src.channels();
//transformation on all pixel
......@@ -40,7 +40,8 @@ void BeamerProjection::adjustFrame(cv::Mat &depth, cv::Mat &src, cv::Mat &dst, C
for (int j = 0; j < src.cols; j++)
{
cv::Point pixelIJ(j, i);
cv::Point pixel = adjustPixel(i, j, static_cast<float>(depth.at<uint16_t>(pixelIJ)), camera, beamer);
cv::Point pixel = adjustPixel(i, j, static_cast<float>(depth.at<uint16_t>(pixelIJ)), camera, beamer_pos);
if (pixel.x < dst.cols && pixel.y < dst.rows && pixel.x >= 0 && pixel.y >= 0)
{
if (nbChannel == 1)
......
......@@ -3,9 +3,26 @@
Camera::Camera(){
cfg = new rs2::config();
pipe = new rs2::pipeline();
align_to_depth = new rs2::align(RS2_STREAM_DEPTH);
spatFilter = new rs2::spatial_filter();
tempFilter = new rs2::temporal_filter();
color_frame = new rs2::video_frame(rs2::frame());
depth_frame = new rs2::depth_frame(rs2::frame());
}
Camera::~Camera(){
delete align_to_depth;
delete tempFilter;
delete spatFilter;
delete pipe;
delete cfg;
delete depth_frame;
delete color_frame;
}
/*
* Public
......@@ -15,86 +32,86 @@ Camera::Camera() {
int Camera::start(){
// check for a device availible
if(!cfg.can_resolve(pipe)){
if(!cfg->can_resolve(*pipe)){
//std::cout << "Error: No device found" << std::endl;
return 1;
}
// Logs
std::remove("./camera.logs");
rs2::log_to_file(RS2_LOG_SEVERITY_DEBUG, "./camera.logs");
//rs2::log_to_console(RS2_LOG_SEVERITY_DEBUG);//RS2_LOG_SEVERITY_ERROR);
spatFilter.set_option(RS2_OPTION_HOLES_FILL, 5);
profile = pipe.start(cfg);
auto sensor = profile.get_device().first<rs2::depth_sensor>();
// Settings
// default camera resolution : 640x480
cfg->enable_stream(RS2_STREAM_COLOR);
cfg->enable_stream(RS2_STREAM_DEPTH);
/*
cfg->enable_stream(RS2_STREAM_COLOR, 1280, 720, RS2_FORMAT_RGB8, 30);
cfg->enable_stream(RS2_STREAM_DEPTH, 1280, 720, RS2_FORMAT_Y16, 30);
*/
// TODO: At the moment the SDK does not offer a closed enum for D400 visual presets
// We do this to reduce the number of black pixels
// The hardware can perform hole-filling much better and much more power efficient then our software
auto range = sensor.get_option_range(RS2_OPTION_VISUAL_PRESET);
for (auto i = range.min; i < range.max; i += range.step)
if (std::string(sensor.get_option_value_description(RS2_OPTION_VISUAL_PRESET, i)) == "High Density")
sensor.set_option(RS2_OPTION_VISUAL_PRESET, i);
rs2::depth_sensor sensor = pipe->start(*cfg).get_device().first<rs2::depth_sensor>();
return 0;
}
// Doc presets : https://dev.intelrealsense.com/docs/d400-series-visual-presets
sensor.set_option(RS2_OPTION_VISUAL_PRESET, RS2_RS400_VISUAL_PRESET_HIGH_DENSITY);
// 5 = range mapped to unlimited
spatFilter->set_option(RS2_OPTION_HOLES_FILL, 5);
// Capture 30 frames to give autoexposure, etc. a chance to settle
void Camera::warmUpDepthLens()
{
for (int i = 0; i < 30; ++i)
{
rs2::frameset data = pipe.wait_for_frames();
auto frameDepth = data.get_depth_frame();
frameDepth = spatFilter.process(frameDepth);
frameDepth = tempFilter.process(frameDepth);
}
warmUpDepthLens();
return 0;
}
void Camera::stop(){
pipe.stop();
pipe->stop();
}
void Camera::capture(){
rs2::align align(RS2_STREAM_DEPTH);
auto frameset = pipe.wait_for_frames();
//Get processed aligned frame
frameset = align.process(frameset);
// Trying to get color and aligned depth frames
rs2::video_frame color_frame = frameset.get_color_frame();
rs2::depth_frame depth_frame = frameset.get_depth_frame();
filterDepthFrame(depth_frame);
// intrinsics.model : Brown Conrady
matDepth = cv::Mat(cv::Size(depth_frame.get_width(), depth_frame.get_height()), CV_16UC1, (void *)depth_frame.get_data(), cv::Mat::AUTO_STEP);
// depth_frame.get_distance(x,y) == ((float)matDepth.at<uint16_t>(y, x) * depth_frame.get_units())
cv::Mat(cv::Size(color_frame.get_width(), color_frame.get_height()), CV_8UC3, (void *)color_frame.get_data(), cv::Mat::AUTO_STEP).copyTo(matRGB);
}
auto frameset = pipe->wait_for_frames();
// Trying to get frames from the depth perspective (aligned on depth camera)
frameset = align_to_depth->process(frameset);
rs2::video_frame colorFrame = frameset.get_color_frame();
rs2::depth_frame depthFrame = frameset.get_depth_frame();
if(depthFrame && colorFrame){
filterDepthFrame(depthFrame);
// Values from depth matrix to real world
// depth_frame.get_distance(x,y) == ((float)cv::Mat.at<uint16_t>(y, x) * depth_frame.get_units())
color_frame->swap(colorFrame);
depth_frame->swap(depthFrame);
}
}
// match a pixel of depth frame to color frame
cv::Point2i Camera::projectPointToPixel(cv::Point3f point3D){
float point[3] = {point3D.x, point3D.y, point3D.z};
float pixel[2];
rs2_project_point_to_pixel(pixel, &intrinsics, point);
rs2_intrinsics intr = depth_frame->get_profile().as<rs2::video_stream_profile>().get_intrinsics();
rs2_project_point_to_pixel(pixel, &intr, point);
return cv::Point2i(pixel[0], pixel[1]);
}
// match a pixel of color frame to depth frame
cv::Point3f Camera::deprojectPixelToPoint(float coord[], float z1){
float p[3];
rs2_deproject_pixel_to_point(p, &intrinsics, coord, z1);
rs2_intrinsics intr = depth_frame->get_profile().as<rs2::video_stream_profile>().get_intrinsics();
rs2_deproject_pixel_to_point(p, &intr, coord, z1);
return cv::Point3f(p[0], p[1], p[2]);
}
void Camera::printCroppingMask(){
cv::Rect mask = getCroppingMask();
std::cout << "(" << mask.x << "," << mask.y << ") + " << mask.width << "," << mask.height << std::endl;
std::cout << "(" << mask.x << "," << mask.y << ") + " << mask.width << "x" << mask.height << std::endl;
}
......@@ -104,10 +121,19 @@ void Camera::printCroppingMask(){
* Private
*/
// Capture 30 frames to give autoexposure, etc. a chance to settle
void Camera::warmUpDepthLens()
{
for (int i = 0; i < 30; ++i)
{
rs2::frameset data = pipe->wait_for_frames();
auto frameDepth = data.get_depth_frame();
filterDepthFrame(frameDepth);
}
}
void Camera::filterDepthFrame(rs2::depth_frame &frameDepth)
{
frameDepth = spatFilter.process(frameDepth);
frameDepth = tempFilter.process(frameDepth);
intrinsics = frameDepth.get_profile().as<rs2::video_stream_profile>().get_intrinsics();
frameDepth = spatFilter->process(frameDepth);
frameDepth = tempFilter->process(frameDepth);
}
......@@ -16,31 +16,30 @@ Sandbox::Sandbox(){
cv::Mat Sandbox::getColorFrame(){
camera.capture();
return camera.getColorFrame()(camera.getCroppingMask());
return camera.getColorFrame();
}
cv::Mat Sandbox::getDepthFrame(){
camera.capture();
return camera.getDepthFrame()(camera.getCroppingMask());
return camera.getDepthFrame();
}
cv::Mat* Sandbox::adjustProjection(cv::Mat* frame){
void Sandbox::adjustProjection(cv::Mat &frame){
static cv::Mat frameBeamer(cv::Size(beamer.getWidth(), beamer.getHeight()), CV_8UC3);
static cv::Mat frameBeamer = cv::Mat(cv::Size(beamer.getWidth(), beamer.getHeight()), CV_8UC3);
cv::Mat depth = getDepthFrame();
cv::Mat imageCalibrate(depth.size(), CV_8UC3, cv::Scalar(0, 0, 0));
cv::Mat imageCalibrate = cv::Mat(depth.size(), CV_8UC3, cv::Scalar(0, 0, 0));
cv::resize(*frame, *frame, depth.size());
projection.adjustFrame(depth, *frame, imageCalibrate, camera, beamer.getPosition());
cv::resize(frame, frame, depth.size());
projection.adjustFrame(depth, frame, imageCalibrate, &camera, beamer.getPosition());
//flip to align frame with beamer
//cv::flip(imageCalibrate, imageCalibrate, 1);
//cv::flip(imageCalibrate, imageCalibrate, 0);
cv::resize(imageCalibrate, frameBeamer, frameBeamer.size());
frameBeamer.copyTo(*frame);
return frame;
cv::resize(imageCalibrate(camera.getCroppingMask()), frameBeamer, frameBeamer.size());
frameBeamer.copyTo(frame);
/*
cv::Mat image = *frame;
......@@ -57,11 +56,11 @@ cv::Mat* Sandbox::adjustProjection(cv::Mat* frame){
*/
}
void Sandbox::showImage(cv::Mat* image){
void Sandbox::showImage(cv::Mat &image){
initWindowsFullScreen(defaultWindowsName);
adjustProjection(image);
cv::imshow(defaultWindowsName, *image);
cv::imshow(defaultWindowsName, image);
}
int Sandbox::loadConfigFrom(char *path){
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment