diff --git a/app/SandboxSetup/initcamera.cpp b/app/SandboxSetup/initcamera.cpp
index 3985fe0fe9d6c3adeac8893d6b89685da3e26a28..79ee3a69372a9415d6cfa12a684f0603662a1b14 100644
--- a/app/SandboxSetup/initcamera.cpp
+++ b/app/SandboxSetup/initcamera.cpp
@@ -10,12 +10,7 @@ CameraStartThread::CameraStartThread(Camera *c) : QThread() {
 void CameraStartThread::run() {
 
     int err = camera->start();
-    emit deviceFound(err);
-
-    if(!err){
-        camera->warmUpDepthLens();
-        emit setupReady(0);
-    }
+    emit setupReady(err);
 }
 
 
@@ -27,9 +22,9 @@ InitCamera::InitCamera(SandboxSetup *_setup, QWidget *parent) :
 {
     setup = _setup;
     ui->setupUi(this);
+    ui->label->setText(QString("Initializing device..."));
 
     workerThread = new CameraStartThread(setup->getCamera());
-    connect(workerThread, &CameraStartThread::deviceFound, this, &InitCamera::deviceFound);
     connect(workerThread, &CameraStartThread::setupReady, this, &InitCamera::setupReady);
     connect(workerThread, &CameraStartThread::finished, workerThread, &QObject::deleteLater);
 }
@@ -53,15 +48,11 @@ void InitCamera::setupCamera(){
     workerThread->start();
 }
 
-void InitCamera::deviceFound(int err){
-    QString msg = (err) ? "No device found" : "Initializing device...";
-    ui->label->setText(msg);
-
-    if(err)
-        emit sendNotif(err);
-}
-
 void InitCamera::setupReady(int err){
+    if(err){
+       QString msg = "No device found";
+       ui->label->setText(msg);
+    }
 
     emit sendNotif(err);
 }
diff --git a/app/SandboxSetup/initcamera.h b/app/SandboxSetup/initcamera.h
index 86862835777611064cec771398bc4d630e523e6e..1ffe6d7dc780a3e8fedec9792723bd34ef8f17be 100644
--- a/app/SandboxSetup/initcamera.h
+++ b/app/SandboxSetup/initcamera.h
@@ -21,8 +21,7 @@ public:
     void run();
 
 signals:
-    void setupReady(int state);
-    void deviceFound(int err);
+    void setupReady(int err);
 
 private:
     Camera *camera;
@@ -47,8 +46,7 @@ private:
     CameraStartThread *workerThread;
 
     void setupCamera();
-    void setupReady(int state);
-    void deviceFound(int err);
+    void setupReady(int err);
 };
 
 #endif // INITCAMERA_H
diff --git a/inc/beamerProjection.h b/inc/beamerProjection.h
index 5f756ad8d6a54b864ed5a3a35ec7e01ca73cfcee..5989fd7a630515bd97033f36bc0fc68db1812edb 100644
--- a/inc/beamerProjection.h
+++ b/inc/beamerProjection.h
@@ -9,16 +9,16 @@ class BeamerProjection{
     private:
         cv::Mat adjustingMatrix;
 
-        cv::Point2i adjustPixel(int i, int j, float z, Camera camera, cv::Point3f beamer);
+        cv::Point2i adjustPixel(int i, int j, float z, Camera *camera, cv::Point3f beamer_pos);
 
     public:
         BeamerProjection();
 
-        void setAdjustingMatrix(cv::Mat matrix){ matrix.clone(); }
+        void setAdjustingMatrix(cv::Mat matrix){ adjustingMatrix.setTo(matrix); }
         cv::Mat getAdjustingMatrix(){ return adjustingMatrix.clone(); }
 
         cv::Point2i rotatePixel(cv::Point2i pixel);
-        void adjustFrame(cv::Mat &depth, cv::Mat &src, cv::Mat &dst, Camera camera, cv::Point3f beamer);
+        void adjustFrame(cv::Mat &depth, cv::Mat &src, cv::Mat &dst, Camera *camera, cv::Point3f beamer_pos);
         void printAdjustingMatrix();
 
 };
diff --git a/inc/camera.h b/inc/camera.h
index a1dd321a72ada8541daaa3e147c5e222ddb4f24c..71c9490cc8553cb5fafe7e7add4ccc8892500fb8 100644
--- a/inc/camera.h
+++ b/inc/camera.h
@@ -5,33 +5,32 @@
 #include <librealsense2/rsutil.h>
 #include <opencv2/opencv.hpp>
 
-// camera resolution : 640x480
-
 class Camera{
     private:
-        rs2::spatial_filter spatFilter;
-        rs2::temporal_filter tempFilter;
-        rs2::config cfg;
-        rs2::pipeline pipe;
-        rs2::pipeline_profile profile;
-        rs2_intrinsics intrinsics;
-
-        cv::Mat matDepth;
-        cv::Mat matRGB;
+        rs2::spatial_filter *spatFilter;
+        rs2::temporal_filter *tempFilter;
+        rs2::config *cfg;
+        rs2::pipeline *pipe;
+        rs2::align *align_to_depth;
+
+        rs2::video_frame *color_frame;
+        rs2::depth_frame *depth_frame;
+    
         cv::Rect croppingMask;
 
+        void warmUpDepthLens();
         void filterDepthFrame(rs2::depth_frame &frameDepth);
 
     public:
         Camera();
+        ~Camera();
 
-        cv::Mat getDepthFrame(){ return matDepth.clone(); };
-        cv::Mat getColorFrame(){ return matRGB.clone(); };
+        cv::Mat getDepthFrame(){ return cv::Mat(cv::Size(depth_frame->get_width(), depth_frame->get_height()), CV_16UC1, (void *)depth_frame->get_data(), cv::Mat::AUTO_STEP); };
+        cv::Mat getColorFrame(){ return cv::Mat(cv::Size(color_frame->get_width(), color_frame->get_height()), CV_8UC3, (void *)color_frame->get_data(), cv::Mat::AUTO_STEP); };
         void setCroppingMask(cv::Rect mask){ croppingMask = mask; };
         cv::Rect getCroppingMask(){ return croppingMask; };
         
         int start();
-        void warmUpDepthLens();
         void stop();
         cv::Point3f deprojectPixelToPoint(float coord[], float z1);
         cv::Point2i projectPointToPixel(cv::Point3f point3D);
diff --git a/inc/sandbox.h b/inc/sandbox.h
index 2588e5a37b089d1d306ab318d66d4f3caf236303..99e39976b47e94eca98faf36a70c65aadbac7f55 100644
--- a/inc/sandbox.h
+++ b/inc/sandbox.h
@@ -23,8 +23,8 @@ class Sandbox{
 
         cv::Mat getColorFrame();
         cv::Mat getDepthFrame();
-        cv::Mat* adjustProjection(cv::Mat* frame);
-        void showImage(cv::Mat* image);
+        void adjustProjection(cv::Mat &frame);
+        void showImage(cv::Mat &image);
         int loadConfig();
         int loadConfigFrom(char *path);
         void initWindowsFullScreen();
diff --git a/src/components/beamerProjection.cpp b/src/components/beamerProjection.cpp
index b07d6515727b43c70abe21fd4d0f3b9664c1d186..aaf9ea1fcb7fb75b2dc6efce34a03d82518a7d15 100644
--- a/src/components/beamerProjection.cpp
+++ b/src/components/beamerProjection.cpp
@@ -14,23 +14,23 @@ cv::Point2i BeamerProjection::rotatePixel(cv::Point2i pixel){
 }
 
 
-cv::Point2i BeamerProjection::adjustPixel(int i, int j, float z, Camera camera, cv::Point3f beamer){
+cv::Point2i BeamerProjection::adjustPixel(int i, int j, float z, Camera *camera, cv::Point3f beamer_pos){
 
     //pixel to point 3d
     float coord[2] = {static_cast<float>(j), static_cast<float>(i)};
-    cv::Point3f p = camera.deprojectPixelToPoint(coord, z / 1000.0);
+    cv::Point3f p = camera->deprojectPixelToPoint(coord, z / 1000.0);
     //calcul vector point to beamer
-    cv::Vec3f dir(beamer.x - p.x, beamer.y - p.y, beamer.z - p.z);
+    cv::Vec3f dir(beamer_pos.x - p.x, beamer_pos.y - p.y, beamer_pos.z - p.z);
     float prop = (0.99 - p.z) / dir[2];
     p.x += dir[0] * prop;
     p.y += dir[1] * prop;
     p.z += dir[2] * prop;
     //point 3d to pixel
-    return camera.projectPointToPixel(p);
+    return camera->projectPointToPixel(p);
 }
 
 
-void BeamerProjection::adjustFrame(cv::Mat &depth, cv::Mat &src, cv::Mat &dst, Camera camera, cv::Point3f beamer){
+void BeamerProjection::adjustFrame(cv::Mat &depth, cv::Mat &src, cv::Mat &dst, Camera *camera, cv::Point3f beamer_pos){
 
     int nbChannel = src.channels();
     //transformation on all pixel
@@ -40,7 +40,8 @@ void BeamerProjection::adjustFrame(cv::Mat &depth, cv::Mat &src, cv::Mat &dst, C
         for (int j = 0; j < src.cols; j++)
         {
             cv::Point pixelIJ(j, i);
-            cv::Point pixel = adjustPixel(i, j, static_cast<float>(depth.at<uint16_t>(pixelIJ)), camera, beamer);
+            
+            cv::Point pixel = adjustPixel(i, j, static_cast<float>(depth.at<uint16_t>(pixelIJ)), camera, beamer_pos);
             if (pixel.x < dst.cols && pixel.y < dst.rows && pixel.x >= 0 && pixel.y >= 0)
             {
                 if (nbChannel == 1)
diff --git a/src/components/camera.cpp b/src/components/camera.cpp
index 05100752411533bd57f3d64840e9f1522fe9cc3e..7fcbb1073ae67dd9b89a379ef1d916cfd9f4194b 100644
--- a/src/components/camera.cpp
+++ b/src/components/camera.cpp
@@ -1,11 +1,28 @@
 #include "../../inc/camera.h"
 
 
-Camera::Camera() {
-    
+Camera::Camera(){
+
+    cfg = new rs2::config();
+    pipe = new rs2::pipeline();
+    align_to_depth = new rs2::align(RS2_STREAM_DEPTH);
+    spatFilter = new rs2::spatial_filter();
+    tempFilter = new rs2::temporal_filter();
+    color_frame = new rs2::video_frame(rs2::frame());
+    depth_frame = new rs2::depth_frame(rs2::frame());
 }
 
 
+Camera::~Camera(){
+    delete align_to_depth;
+    delete tempFilter;
+    delete spatFilter;
+    delete pipe;
+    delete cfg;
+    delete depth_frame;
+    delete color_frame;
+}
+
 
 /*
 *   Public
@@ -15,86 +32,86 @@ Camera::Camera() {
 int Camera::start(){
     
     // check for a device availible
-    if(!cfg.can_resolve(pipe)){
+    if(!cfg->can_resolve(*pipe)){
         //std::cout << "Error: No device found" << std::endl;
         return 1;
     }
 
+    // Logs
     std::remove("./camera.logs");
     rs2::log_to_file(RS2_LOG_SEVERITY_DEBUG, "./camera.logs");
-    //rs2::log_to_console(RS2_LOG_SEVERITY_DEBUG);//RS2_LOG_SEVERITY_ERROR);
 
-    spatFilter.set_option(RS2_OPTION_HOLES_FILL, 5);
-    profile = pipe.start(cfg);
-    auto sensor = profile.get_device().first<rs2::depth_sensor>();
+    // Settings
+    // default camera resolution : 640x480
+    cfg->enable_stream(RS2_STREAM_COLOR);
+    cfg->enable_stream(RS2_STREAM_DEPTH);
+    /*
+    cfg->enable_stream(RS2_STREAM_COLOR, 1280, 720, RS2_FORMAT_RGB8, 30);
+    cfg->enable_stream(RS2_STREAM_DEPTH, 1280, 720, RS2_FORMAT_Y16, 30);
+    */
+   
+    rs2::depth_sensor sensor = pipe->start(*cfg).get_device().first<rs2::depth_sensor>();
 
-    // TODO: At the moment the SDK does not offer a closed enum for D400 visual presets
-    // We do this to reduce the number of black pixels
-    // The hardware can perform hole-filling much better and much more power efficient then our software
-    auto range = sensor.get_option_range(RS2_OPTION_VISUAL_PRESET);
-    for (auto i = range.min; i < range.max; i += range.step)
-        if (std::string(sensor.get_option_value_description(RS2_OPTION_VISUAL_PRESET, i)) == "High Density")
-            sensor.set_option(RS2_OPTION_VISUAL_PRESET, i);
+    // Doc presets : https://dev.intelrealsense.com/docs/d400-series-visual-presets
+    sensor.set_option(RS2_OPTION_VISUAL_PRESET, RS2_RS400_VISUAL_PRESET_HIGH_DENSITY);
 
-    return 0;
-}
+    // 5 = range mapped to unlimited
+    spatFilter->set_option(RS2_OPTION_HOLES_FILL, 5);
 
+    warmUpDepthLens();
 
-// Capture 30 frames to give autoexposure, etc. a chance to settle
-void Camera::warmUpDepthLens()
-{
-    for (int i = 0; i < 30; ++i)
-    {
-        rs2::frameset data = pipe.wait_for_frames();
-        auto frameDepth = data.get_depth_frame();
-        frameDepth = spatFilter.process(frameDepth);
-        frameDepth = tempFilter.process(frameDepth);
-    }
+    return 0;
 }
 
 
 void Camera::stop(){
-    pipe.stop();
+    pipe->stop();
 }
 
 
 void Camera::capture(){
 
-    rs2::align align(RS2_STREAM_DEPTH);
-    auto frameset = pipe.wait_for_frames();
-    //Get processed aligned frame
-    frameset = align.process(frameset);
-    // Trying to get color and aligned depth frames
-    rs2::video_frame color_frame = frameset.get_color_frame();
-    rs2::depth_frame depth_frame = frameset.get_depth_frame();
-    filterDepthFrame(depth_frame);
-    // intrinsics.model : Brown Conrady
-    matDepth = cv::Mat(cv::Size(depth_frame.get_width(), depth_frame.get_height()), CV_16UC1, (void *)depth_frame.get_data(), cv::Mat::AUTO_STEP);
-    // depth_frame.get_distance(x,y) == ((float)matDepth.at<uint16_t>(y, x) * depth_frame.get_units())
-    cv::Mat(cv::Size(color_frame.get_width(), color_frame.get_height()), CV_8UC3, (void *)color_frame.get_data(), cv::Mat::AUTO_STEP).copyTo(matRGB);
-}
+    auto frameset = pipe->wait_for_frames();
 
+    // Trying to get frames from the depth perspective (aligned on depth camera)    
+    frameset = align_to_depth->process(frameset);
+    rs2::video_frame colorFrame = frameset.get_color_frame();
+    rs2::depth_frame depthFrame = frameset.get_depth_frame();
 
+    if(depthFrame && colorFrame){
+
+        filterDepthFrame(depthFrame);
+
+        // Values from depth matrix to real world
+        // depth_frame.get_distance(x,y) == ((float)cv::Mat.at<uint16_t>(y, x) * depth_frame.get_units())
+        color_frame->swap(colorFrame);
+        depth_frame->swap(depthFrame);
+    }
+}
+
+// match a pixel of depth frame to color frame
 cv::Point2i Camera::projectPointToPixel(cv::Point3f point3D){
 
     float point[3] = {point3D.x, point3D.y, point3D.z};
     float pixel[2];
-    rs2_project_point_to_pixel(pixel, &intrinsics, point);
+    rs2_intrinsics intr = depth_frame->get_profile().as<rs2::video_stream_profile>().get_intrinsics();
+    rs2_project_point_to_pixel(pixel, &intr, point);
     return cv::Point2i(pixel[0], pixel[1]);
 }
 
-
+// match a pixel of color frame to depth frame
 cv::Point3f Camera::deprojectPixelToPoint(float coord[], float z1){
 
     float p[3];
-    rs2_deproject_pixel_to_point(p, &intrinsics, coord, z1);
+    rs2_intrinsics intr = depth_frame->get_profile().as<rs2::video_stream_profile>().get_intrinsics();
+    rs2_deproject_pixel_to_point(p, &intr, coord, z1);
     return cv::Point3f(p[0], p[1], p[2]);
 }
 
 
 void Camera::printCroppingMask(){
     cv::Rect mask = getCroppingMask();
-    std::cout << "(" << mask.x << "," << mask.y << ") + " << mask.width << "," << mask.height << std::endl;
+    std::cout << "(" << mask.x << "," << mask.y << ") + " << mask.width << "x" << mask.height << std::endl;
 }
 
 
@@ -104,10 +121,19 @@ void Camera::printCroppingMask(){
 *   Private
 */
 
+// Capture 30 frames to give autoexposure, etc. a chance to settle
+void Camera::warmUpDepthLens()
+{
+    for (int i = 0; i < 30; ++i)
+    {
+        rs2::frameset data = pipe->wait_for_frames();
+        auto frameDepth = data.get_depth_frame();
+        filterDepthFrame(frameDepth);
+    }
+}
 
 void Camera::filterDepthFrame(rs2::depth_frame &frameDepth)
 {
-    frameDepth = spatFilter.process(frameDepth);
-    frameDepth = tempFilter.process(frameDepth);
-    intrinsics = frameDepth.get_profile().as<rs2::video_stream_profile>().get_intrinsics();
+    frameDepth = spatFilter->process(frameDepth);
+    frameDepth = tempFilter->process(frameDepth);
 }
diff --git a/src/lib/sandbox.cpp b/src/lib/sandbox.cpp
index 0284015eb43ecc415e9024655c26697f50b8fdf6..2ebfa1a1dd1f6133ea458cb3d9d3c49206abe3fd 100644
--- a/src/lib/sandbox.cpp
+++ b/src/lib/sandbox.cpp
@@ -16,31 +16,30 @@ Sandbox::Sandbox(){
 
 cv::Mat Sandbox::getColorFrame(){
     camera.capture();
-    return camera.getColorFrame()(camera.getCroppingMask());
+    return camera.getColorFrame();
 }
 
 cv::Mat Sandbox::getDepthFrame(){
     camera.capture();
-    return camera.getDepthFrame()(camera.getCroppingMask());
+    return camera.getDepthFrame();
 }
 
 
-cv::Mat* Sandbox::adjustProjection(cv::Mat* frame){
+void Sandbox::adjustProjection(cv::Mat &frame){
 
-    static cv::Mat frameBeamer(cv::Size(beamer.getWidth(), beamer.getHeight()), CV_8UC3);
+    static cv::Mat frameBeamer = cv::Mat(cv::Size(beamer.getWidth(), beamer.getHeight()), CV_8UC3);
     
     cv::Mat depth = getDepthFrame();
-    cv::Mat imageCalibrate(depth.size(), CV_8UC3, cv::Scalar(0, 0, 0));
+    cv::Mat imageCalibrate = cv::Mat(depth.size(), CV_8UC3, cv::Scalar(0, 0, 0));
     
-    cv::resize(*frame, *frame, depth.size());
-    projection.adjustFrame(depth, *frame, imageCalibrate, camera, beamer.getPosition());
+    cv::resize(frame, frame, depth.size());
+
+    projection.adjustFrame(depth, frame, imageCalibrate, &camera, beamer.getPosition());
     //flip to align frame with beamer
     //cv::flip(imageCalibrate, imageCalibrate, 1);
     //cv::flip(imageCalibrate, imageCalibrate, 0);
-
-    cv::resize(imageCalibrate, frameBeamer, frameBeamer.size());
-    frameBeamer.copyTo(*frame);
-    return frame;
+    cv::resize(imageCalibrate(camera.getCroppingMask()), frameBeamer, frameBeamer.size());
+    frameBeamer.copyTo(frame);
 
 /*
     cv::Mat image = *frame;
@@ -57,11 +56,11 @@ cv::Mat* Sandbox::adjustProjection(cv::Mat* frame){
 */
 }
 
-void Sandbox::showImage(cv::Mat* image){
+void Sandbox::showImage(cv::Mat &image){
 
     initWindowsFullScreen(defaultWindowsName);
     adjustProjection(image);
-    cv::imshow(defaultWindowsName, *image);
+    cv::imshow(defaultWindowsName, image);
 }
 
 int Sandbox::loadConfigFrom(char *path){