diff --git a/app/SandboxSetupApp.cpp b/app/SandboxSetupApp.cpp
index d5458085d23999555c270419b3b514afd082d810..d8ed820e94e1d334f1d465bbc8b05a876c947823 100644
--- a/app/SandboxSetupApp.cpp
+++ b/app/SandboxSetupApp.cpp
@@ -4,8 +4,25 @@
 int main(){
     SandboxSetup setup;
     
-    setup.setupBeamerResolution();
-    setup.setupProjection();
-    setup.setupBeamerLocation();
-    setup.saveConfig();
+    if(setup.setupBeamerResolution()){
+        std::cout << "Invalide resolution" << std::endl;
+        return 1;
+    }
+    
+    if(setup.setupProjection()){
+        std::cout << "Cancel calibration" << std::endl;
+        return 1;
+    }
+    
+    if(setup.setupBeamerLocation()){
+        std::cout << "Cancel calibration" << std::endl;
+        return 1;
+    }
+
+    if(setup.saveConfig()){
+        std::cout << "Failed to save configuration" << std::endl;
+        return 1;
+    }
+    
+    return 0;
 }
diff --git a/inc/beamer.h b/inc/beamer.h
index 68fd4adeb09b4979d1144e90bf11b3eaf4f4918e..ab68548270ab845b2c550aee6cdb4838f045387a 100644
--- a/inc/beamer.h
+++ b/inc/beamer.h
@@ -9,22 +9,24 @@ class Beamer
 {
 private:
     const char *BEAMER_POSITION_FILE = "./beamer.dat";
-    float solveD(cv::Vec3f v, cv::Point3f p);
+    cv::Point3f beamerPosition;
+    cv::Size resolution = cv::Size(256,144);
+
     cv::Point3f intersection(cv::Vec3f v1, cv::Point3f p1, cv::Vec3f v2, cv::Point3f p2, cv::Vec3f v3, cv::Point3f p3, bool &isFound);
     std::vector<int> findCercleZ(cv::Mat &rgb);
-    cv::Point3f beamerPosition;
-    cv::Size resolution = cv::Size(1,1);
+    cv::Point3d approximatePosition(std::vector<cv::Point3d> points1, std::vector<cv::Point3d> points2);
 
 public:
     Beamer();
+
     cv::Point3f getPosition(){ return beamerPosition; };
     void setPosition(cv::Point3f pos){ beamerPosition = pos; };
     void setWidth(int w){ resolution.width = w; };
     int getWidth(){ return resolution.width; };
     void setHeight(int h){ resolution.height = h; };
     int getHeight(){ return resolution.height; };
-    void findBeamerFrom(Camera camera);
 
+    int findBeamerFrom(Camera camera);
     void printPosition();
 };
 #endif
diff --git a/inc/beamerProjection.h b/inc/beamerProjection.h
index 366b2483aa8826d7e27add5a2a6d9a9658283caf..bd28fd0520a5e95ec7e8555ae37270bbe0c404c5 100644
--- a/inc/beamerProjection.h
+++ b/inc/beamerProjection.h
@@ -1,5 +1,6 @@
 #ifndef BEAMERPROJECTION_H
 #define BEAMERPROJECTION_H
+
 #include <opencv2/opencv.hpp>
 #include "beamer.h"
 #include "camera.h"
@@ -14,7 +15,6 @@ private:
 public:
     BeamerProjection();
     cv::Point2i rotatePixel(cv::Point2i pixel);
-    void adjustFrame(cv::Mat &src, cv::Mat &dst, Camera camera, cv::Point3f beamer);
     void adjustFrame(cv::Mat &depth, cv::Mat &src, cv::Mat &dst, Camera camera, cv::Point3f beamer);
     void setAdjustingMatrix(cv::Mat matrix){ matrix.clone(); }
     cv::Mat getAdjustingMatrix(){ return adjustingMatrix.clone(); }
diff --git a/inc/borderedit.h b/inc/borderedit.h
index 0eabb739319ae93a17f76314ba8154bad42cdc9a..dc996005e52a7d0cba973194597e897c0bd28602 100644
--- a/inc/borderedit.h
+++ b/inc/borderedit.h
@@ -1,18 +1,23 @@
 #ifndef BORDEREDIT_H
 #define BORDEREDIT_H
+
 #include <opencv2/opencv.hpp>
-class BorderEdit
-{
-private:
-    static const int margeClick = 10;
-    static constexpr char *wndname = (char *)"Sandbox Border Finder";
-    static cv::Mat frameImage;
-    // OPENCV - MANUEL RECT CHANGE
-    static void drawSquare(cv::Point *p, int n);
-    static int findPoints(int x, int y, std::vector<cv::Point> &posSandbox);
-    static void mouseHandler(int event, int x, int y, int, void *param);
 
-public:
-    static void edit(cv::Mat frame, std::vector<cv::Point> *posSandbox);
+#define ESCAPE_CHAR 27
+
+class BorderEdit{
+    private:
+        static const int margeClick = 10;
+        static constexpr char *wndname = (char *)"Sandbox Border Finder";
+        static cv::Mat frameImage;
+
+        // OPENCV - MANUEL RECT CHANGE
+        static void drawSquare(cv::Point *p, int n);
+        static int findPoints(int x, int y, std::vector<cv::Point> &posSandbox);
+        static void mouseHandler(int event, int x, int y, int, void *param);
+
+    public:
+        static int edit(cv::Mat frame, std::vector<cv::Point> *posSandbox);
+        
 };
 #endif
\ No newline at end of file
diff --git a/inc/camera.h b/inc/camera.h
index 0cc36ca4b4b669fe24ead233dea9a6e8c9ccbb4f..9c617e774d47198d3caea975f0cbac24e584daea 100644
--- a/inc/camera.h
+++ b/inc/camera.h
@@ -5,42 +5,42 @@
 #include <librealsense2/rsutil.h>
 #include <opencv2/opencv.hpp>
 
-class Camera
-{
-private:
-    rs2::spatial_filter spatFilter;
-    rs2::temporal_filter tempFilter;
-    rs2::decimation_filter decFilter;
-    rs2::config cfg;
-    rs2::pipeline pipe;
-    rs2::pipeline_profile profile;
-    rs2_intrinsics intrinsics;
-    void flipMat(cv::Mat &m);
-    void filterDepthFrame(rs2::depth_frame &frameDepth);
-
-    cv::Mat matDepth;
-    cv::Mat matRGB;
-    cv::Rect croppingMask;
-
-public:
-    Camera();
-    void start();
-    void stop();
-    // Capture 30 frames to give autoexposure, etc. a chance to settle
-    void warmingUp();
-    cv::Mat captureFrame();
-    cv::Mat getAverageFrame(int numberFrame);
-    static cv::Mat coloredFrame(cv::Mat frameDepth);
-    cv::Point3f deprojectPixelToPoint(float coord[], float z1);
-    cv::Point2i projectPointToPixel(cv::Point3f point3D);
-
-    void captureFramesAlign();
-    void startAlign();
-    cv::Mat getDepthFrameAlign(){ return matDepth; };
-    cv::Mat getRGBFrameAlign(){ return matRGB.clone(); };
-    void setCroppingMask(cv::Rect mask){ croppingMask = mask; };
-    cv::Rect getCroppingMask(){ return croppingMask; };
-
-    void printCroppingMask();
+// camera definition : 640x480
+
+class Camera{
+    private:
+        rs2::spatial_filter spatFilter;
+        rs2::temporal_filter tempFilter;
+        rs2::decimation_filter decFilter;
+        rs2::config cfg;
+        rs2::pipeline pipe;
+        rs2::pipeline_profile profile;
+        rs2_intrinsics intrinsics;
+
+        cv::Mat matDepth;
+        cv::Mat matRGB;
+        cv::Rect croppingMask;
+
+        void flipMat(cv::Mat &m);
+        void filterDepthFrame(rs2::depth_frame &frameDepth);
+        cv::Mat captureFrame();
+        cv::Mat getAverageFrame(int numberFrame);
+
+    public:
+        Camera();
+
+        cv::Mat getDepthFrameAlign(){ return matDepth; };
+        cv::Mat getRGBFrameAlign(){ return matRGB.clone(); };
+        void setCroppingMask(cv::Rect mask){ croppingMask = mask; };
+        cv::Rect getCroppingMask(){ return croppingMask; };
+        
+        void start();
+        void warmingUp();
+        void stop();
+        cv::Point3f deprojectPixelToPoint(float coord[], float z1);
+        cv::Point2i projectPointToPixel(cv::Point3f point3D);
+        void captureFramesAlign();
+        void printCroppingMask();
+
 };
 #endif
\ No newline at end of file
diff --git a/inc/sandbox.h b/inc/sandbox.h
index c308469f6d827835a6fa2f599e610fcd57490683..bf0a9074d559096af46c0cac751047ade7368c62 100644
--- a/inc/sandbox.h
+++ b/inc/sandbox.h
@@ -8,21 +8,23 @@
 #include "borderedit.h"
 #include "sandboxConfig.h"
 
-class Sandbox
-{
+class Sandbox{
     private:
         char *defaultConfigFilePath = (char *)"./sandbox_conf.yaml";
         char *defaultWindowsName = (char*) "ShowApp";
         BeamerProjection projection;
         Camera camera;
         Beamer beamer;
+
         void initWindowsFullScreen(char *windowName);
         void showImage(cv::Mat* image, char *windowName);
 
     public:
         Sandbox();
+
         cv::Mat getRGBFrame();
         cv::Mat getDepthFrame();
+
         cv::Mat* adjustProjection(cv::Mat* frame);
         void showImage(cv::Mat* image);
         int loadConfig();
diff --git a/inc/sandboxSetup.h b/inc/sandboxSetup.h
index c67ecd56dee816ad912fef9ae3873055c2232f75..5e4dd59899a4d60f43a3b9035d4d9003ad51dc12 100644
--- a/inc/sandboxSetup.h
+++ b/inc/sandboxSetup.h
@@ -15,16 +15,18 @@ class SandboxSetup{
         BeamerProjection projection;
         Camera camera;
         Beamer beamer;
+
         double toDegrees(double radians);
         void initWindowsFullScreen(char *windowName);
 
     public:
         SandboxSetup();
-        void saveConfigFrom(char *path);
-        void saveConfig();
-        void setupProjection();
-        void setupBeamerResolution();
-        void setupBeamerLocation();
+        
+        int saveConfigFrom(char *path);
+        int saveConfig();
+        int setupProjection();
+        int setupBeamerResolution();
+        int setupBeamerLocation();
 };
 
 #endif
diff --git a/sandbox.h b/sandbox.h
index c308469f6d827835a6fa2f599e610fcd57490683..bf0a9074d559096af46c0cac751047ade7368c62 100644
--- a/sandbox.h
+++ b/sandbox.h
@@ -8,21 +8,23 @@
 #include "borderedit.h"
 #include "sandboxConfig.h"
 
-class Sandbox
-{
+class Sandbox{
     private:
         char *defaultConfigFilePath = (char *)"./sandbox_conf.yaml";
         char *defaultWindowsName = (char*) "ShowApp";
         BeamerProjection projection;
         Camera camera;
         Beamer beamer;
+
         void initWindowsFullScreen(char *windowName);
         void showImage(cv::Mat* image, char *windowName);
 
     public:
         Sandbox();
+
         cv::Mat getRGBFrame();
         cv::Mat getDepthFrame();
+
         cv::Mat* adjustProjection(cv::Mat* frame);
         void showImage(cv::Mat* image);
         int loadConfig();
diff --git a/src/components/beamer.cpp b/src/components/beamer.cpp
index f163152e807cf1977b3820113e5669c17f510ac7..0d8da8645990d3ca2abf8d9b1dfde4178827ad67 100644
--- a/src/components/beamer.cpp
+++ b/src/components/beamer.cpp
@@ -1,5 +1,123 @@
 #include "../../inc/beamer.h"
 
+
+
+Beamer::Beamer(){
+    beamerPosition = cv::Point3f(0.0f, 0.265f, -0.205f);
+}
+
+
+
+/*
+*   Public
+*/
+
+
+int Beamer::findBeamerFrom(Camera camera)
+{
+    char wname[] = "FindBeamer";
+    cv::namedWindow(wname, CV_WINDOW_NORMAL);
+    cv::setWindowProperty(wname, CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
+    cv::Mat depth;
+    cv::Mat rgb;
+    cv::Mat frameImage(resolution, CV_8UC3, cv::Scalar(0, 0, 0));
+
+    unsigned int nbPoint = 3; //number of point to calculate 1 vector
+    std::vector<cv::Point3d> directions;  //vectors calculate for each point
+    std::vector<cv::Point3d> bases;  //1 point for each vector (to calculate constante d)
+    double fact = -20.0;
+
+    // Setup cross for calibration
+    std::vector<cv::Point> points;
+    points.push_back( cv::Point( getWidth() * 5/14, getHeight() * 5/14));
+    points.push_back( cv::Point( getWidth() * 5/7 , getHeight() * 3/14));
+    points.push_back( cv::Point( getWidth() * 3/14, getHeight() * 4/7 ));
+
+
+    camera.start();
+
+    for (int i = 0; i < (int)points.size(); i++)
+    {
+        std::vector<cv::Point3f> capturedPoints;
+        cv::Point projCross = points[i];
+        
+        while (capturedPoints.size() < nbPoint){
+
+            // capture frame
+            camera.captureFramesAlign();
+            depth = camera.getDepthFrameAlign();
+            rgb = camera.getRGBFrameAlign();
+
+            // Look for the circle target
+            std::vector<int> crc = findCercleZ(rgb);
+            cv::Scalar color = (!crc.empty()) ? cv::Scalar(0, 255, 0) : cv::Scalar(0, 0, 255);
+            
+            // Show black screen with cross
+            cv::line(frameImage, cv::Point(projCross.x, 0), cv::Point(projCross.x, frameImage.rows - 1), color, 4);
+            cv::line(frameImage, cv::Point(0, projCross.y), cv::Point(frameImage.cols - 1, projCross.y), color, 4);
+            cv::putText( frameImage,
+                         std::to_string(capturedPoints.size() + 1) + "/" + std::to_string(nbPoint),
+                         cv::Point( getWidth()/2, getHeight()/2 ),
+                         cv::FONT_HERSHEY_SIMPLEX,
+                         1,
+                         cv::Scalar(255, 255, 255));
+            cv::imshow(wname, frameImage);
+
+            // Wait for interaction
+            char keyCode = cv::waitKey(500);
+            if (keyCode == ESCAPE_CHAR){
+                return 1;
+            }
+            else if (keyCode == ' ' && !crc.empty()){
+                // Point validate
+                float coord[2] = {(float)crc[0], (float)crc[1]};
+                float z = static_cast<float>(depth.at<uint16_t>(crc[1], crc[0]));
+                cv::Point3f p = camera.deprojectPixelToPoint(coord, z / 1000.0);
+                capturedPoints.push_back(p);
+            }
+            
+            // clear frame
+            frameImage.setTo(cv::Scalar(0, 0, 0));
+        }
+
+        // check for the end
+        if (capturedPoints.size() == nbPoint){
+
+            cv::Vec3f dir(capturedPoints[0].x - capturedPoints[1].x, capturedPoints[0].y - capturedPoints[1].y, capturedPoints[0].z - capturedPoints[1].z);
+            cv::Vec6f line;
+            cv::fitLine(capturedPoints, line, CV_DIST_L2, 0, 0.01, 0.01);
+            directions.push_back(cv::Point3d(line[3] * fact, line[4] * fact, line[5] * fact));
+            bases.push_back(cv::Point3d(line[0], line[1], line[2]));
+        }
+    }
+
+    camera.stop();
+    cv::destroyAllWindows();
+
+
+    cv::Point3d beamerPoint = approximatePosition(directions, bases);
+
+    //set beamer position
+    beamerPosition.x = (float)beamerPoint.x;
+    beamerPosition.y = (float)beamerPoint.y;
+    beamerPosition.z = (float)beamerPoint.z;
+
+    return 0;
+}
+
+
+void Beamer::printPosition(){
+    cv::Point3f pos = getPosition();
+    std::cout << "(" << pos.x << "," << pos.y << "," << pos.z << ")" << std::endl;
+}
+
+
+
+/*
+*   Private
+*/
+
+
 /*
    Calculate the line segment PaPb that is the shortest route between
    two lines P1P2 and P3P4. Calculate also the values of mua and mub where
@@ -62,11 +180,6 @@ int LineLineIntersect(
     return (true);
 }
 
-Beamer::Beamer(){
-    //position par défaut
-    beamerPosition = cv::Point3f(0.0f, 0.265f, -0.205f);
-
-}
 
 std::vector<int> Beamer::findCercleZ(cv::Mat &rgb)
 {
@@ -92,94 +205,21 @@ std::vector<int> Beamer::findCercleZ(cv::Mat &rgb)
     return result;
 }
 
-void Beamer::findBeamerFrom(Camera camera)
-{
-    char wname[] = "FindBeamer";
-    cv::namedWindow(wname, CV_WINDOW_NORMAL);
-    cv::setWindowProperty(wname, CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
-    cv::Mat depth;
-    cv::Mat rgb;
-    cv::Mat frameImage(resolution, CV_8UC3, cv::Scalar(0, 0, 0));
-
-    // Setup cross for calibration
-    std::vector<cv::Point> points;
-    points.push_back( cv::Point( getWidth() * 5/14, getHeight() * 5/14));
-    points.push_back( cv::Point( getWidth() * 5/7 , getHeight() * 3/14));
-    points.push_back( cv::Point( getWidth() * 3/14, getHeight() * 4/7 ));
-
-    unsigned int nbPoint = 3; //number of point to calculate 1 vector
-    std::vector<cv::Point3d> points1;  //vectors calculate for each point
-    std::vector<cv::Point3d> points2;  //1 point for each vector (to calculate constante d)
-    double fact = -20.0;
-
-
-    camera.startAlign();
-
-    for (int i = 0; i < (int)points.size(); i++)
-    {
-        std::vector<cv::Point3f> capturedPoints;
-        cv::Point p = points[i];
-        while (1)
-        {
-            camera.captureFramesAlign();
-            depth = camera.getDepthFrameAlign();
-            rgb = camera.getRGBFrameAlign();
-            cv::Scalar color;
-            std::vector<int> crc = findCercleZ(rgb);
-            if (!crc.empty())
-                color = cv::Scalar(0, 255, 0);
-            else
-                color = cv::Scalar(0, 0, 255);
-
-            cv::line(frameImage, cv::Point(p.x, 0), cv::Point(p.x, frameImage.rows - 1), color, 4);
-            cv::line(frameImage, cv::Point(0, p.y), cv::Point(frameImage.cols - 1, p.y), color, 4);
-            cv::putText( frameImage,
-                     std::to_string(capturedPoints.size() + 1) + "/" + std::to_string(nbPoint),
-                     cv::Point( getWidth()/2, getHeight()/2 ),
-                     cv::FONT_HERSHEY_SIMPLEX,
-                     1,
-                     cv::Scalar(255, 255, 255));
-            cv::imshow(wname, frameImage);
-            char keyCode = cv::waitKey(500);
-            if (keyCode == ESCAPE_CHAR)
-                exit(0);
-            else if (keyCode == ' ' && !crc.empty())
-            {
-                float coord[2] = {(float)crc[0], (float)crc[1]};
-                float z = static_cast<float>(depth.at<uint16_t>(crc[1], crc[0]));
-                cv::Point3f p = camera.deprojectPixelToPoint(coord, z / 1000.0);
-                capturedPoints.push_back(p);
-            }
-
-            if (capturedPoints.size() == nbPoint)
-            {
-                cv::Vec3f dir(capturedPoints[0].x - capturedPoints[1].x, capturedPoints[0].y - capturedPoints[1].y, capturedPoints[0].z - capturedPoints[1].z);
-                cv::Vec6f line;
-                cv::fitLine(capturedPoints, line, CV_DIST_L2, 0, 0.01, 0.01);
-                points1.push_back(cv::Point3d(line[3] * fact, line[4] * fact, line[5] * fact));
-                points2.push_back(cv::Point3d(line[0], line[1], line[2]));
-                frameImage.setTo(cv::Scalar(0, 0, 0));
-                break;
-            }
-            frameImage.setTo(cv::Scalar(0, 0, 0));
-        }
-    }
-
-    camera.stop();
 
+cv::Point3d Beamer::approximatePosition(std::vector<cv::Point3d> directions, std::vector<cv::Point3d> bases){
 
     // Regression Linéaire
     cv::Point3d pa, pb;
     double mua;
     double mub;
     std::vector<cv::Point3d> beamerPoints;
-    LineLineIntersect(points1[0], points2[0], points1[1], points2[1], &pa, &pb, &mua, &mub);
+    LineLineIntersect(directions[0], bases[0], directions[1], bases[1], &pa, &pb, &mua, &mub);
     beamerPoints.push_back(pa);
     beamerPoints.push_back(pb);
-    LineLineIntersect(points1[0], points2[0], points1[2], points2[2], &pa, &pb, &mua, &mub);
+    LineLineIntersect(directions[0], bases[0], directions[2], bases[2], &pa, &pb, &mua, &mub);
     beamerPoints.push_back(pa);
     beamerPoints.push_back(pb);
-    LineLineIntersect(points1[1], points2[1], points1[2], points2[2], &pa, &pb, &mua, &mub);
+    LineLineIntersect(directions[1], bases[1], directions[2], bases[2], &pa, &pb, &mua, &mub);
     beamerPoints.push_back(pa);
     beamerPoints.push_back(pb);
 
@@ -189,18 +229,6 @@ void Beamer::findBeamerFrom(Camera camera)
         beamerPoint += beamerPoints[i];
     }
     beamerPoint /= 6.0;
-
-
-    //set beamer position
-    beamerPosition.x = (float)beamerPoint.x;
-    beamerPosition.y = (float)beamerPoint.y;
-    beamerPosition.z = (float)beamerPoint.z;
-  
-    cv::destroyAllWindows();
-}
-
-
-void Beamer::printPosition(){
-    cv::Point3f pos = getPosition();
-    std::cout << "(" << pos.x << "," << pos.y << "," << pos.z << ")" << std::endl;
-}
+    
+    return beamerPoint;
+}
\ No newline at end of file
diff --git a/src/components/beamerProjection.cpp b/src/components/beamerProjection.cpp
index 6cabc7d1f3ef0acc63af66f67711c414dd9fd5a1..b07d6515727b43c70abe21fd4d0f3b9664c1d186 100644
--- a/src/components/beamerProjection.cpp
+++ b/src/components/beamerProjection.cpp
@@ -1,17 +1,19 @@
 #include "../../inc/beamerProjection.h"
 
-BeamerProjection::BeamerProjection()
-{
+
+BeamerProjection::BeamerProjection(){
     adjustingMatrix = cv::getRotationMatrix2D(cv::Point(0, 0), 0, 1);
 }
 
-cv::Point2i BeamerProjection::rotatePixel(cv::Point2i pixel)
-{
+
+cv::Point2i BeamerProjection::rotatePixel(cv::Point2i pixel){
+
     cv::Mat tmp = (cv::Mat_<cv::Vec2f>(1, 1) << cv::Vec2f(pixel.x, pixel.y));
     cv::transform(tmp, tmp, adjustingMatrix);
     return cv::Point2i(tmp.at<cv::Vec2f>(0, 0));
 }
 
+
 cv::Point2i BeamerProjection::adjustPixel(int i, int j, float z, Camera camera, cv::Point3f beamer){
 
     //pixel to point 3d
@@ -27,25 +29,6 @@ cv::Point2i BeamerProjection::adjustPixel(int i, int j, float z, Camera camera,
     return camera.projectPointToPixel(p);
 }
 
-void BeamerProjection::adjustFrame(cv::Mat &src, cv::Mat &dst, Camera camera, cv::Point3f beamer)
-{
-    //int64_t t1 = getTickCount();
-    //transformation on all pixel
-    for (int i = 0; i < src.rows; i++)
-    {
-        for (int j = 0; j < src.cols; j++)
-        {
-            cv::Point pixelIJ(j, i);
-            cv::Point pixel = adjustPixel(i, j, static_cast<float>(src.at<uint16_t>(pixelIJ)), camera, beamer);
-            if (pixel.x < dst.cols && pixel.y < dst.rows && pixel.x >= 0 && pixel.y >= 0)
-                dst.at<uint16_t>(pixel) = src.at<uint16_t>(pixelIJ);
-        }
-    }
-    //cout << "temps de calcul: " << (getTickCount() - t1) / getTickFrequency() << endl;
-
-    cv::warpAffine(dst, dst, adjustingMatrix, dst.size());
-    // medianBlur(dst, dst, 3);
-}
 
 void BeamerProjection::adjustFrame(cv::Mat &depth, cv::Mat &src, cv::Mat &dst, Camera camera, cv::Point3f beamer){
 
@@ -75,6 +58,7 @@ void BeamerProjection::adjustFrame(cv::Mat &depth, cv::Mat &src, cv::Mat &dst, C
 
 
 void BeamerProjection::printAdjustingMatrix(){
+
     cv::Mat matrix = getAdjustingMatrix();
     for (int y = 0; y < matrix.rows; y++){
         for (int x = 0; x < matrix.cols; x++){
diff --git a/src/components/camera.cpp b/src/components/camera.cpp
index 133ad5c034a40fd7ba8a1e0879cdbcff5acaf795..d1772b0169a361550567b468fee36917c379ab06 100644
--- a/src/components/camera.cpp
+++ b/src/components/camera.cpp
@@ -1,9 +1,35 @@
 #include "../../inc/camera.h"
 
+
 Camera::Camera() {
     
 }
 
+
+
+/*
+*   Public
+*/
+
+
+void Camera::start(){
+
+    rs2::log_to_console(RS2_LOG_SEVERITY_ERROR);
+    spatFilter.set_option(RS2_OPTION_HOLES_FILL, 5);
+    //cfg.enable_device_from_file("../input/flux/flux5.bag");
+    profile = pipe.start(cfg);
+    auto sensor = profile.get_device().first<rs2::depth_sensor>();
+    // TODO: At the moment the SDK does not offer a closed enum for D400 visual presets
+    // We do this to reduce the number of black pixels
+    // The hardware can perform hole-filling much better and much more power efficient then our software
+    auto range = sensor.get_option_range(RS2_OPTION_VISUAL_PRESET);
+    for (auto i = range.min; i < range.max; i += range.step)
+        if (std::string(sensor.get_option_value_description(RS2_OPTION_VISUAL_PRESET, i)) == "High Density")
+            sensor.set_option(RS2_OPTION_VISUAL_PRESET, i);
+    warmingUp();
+}
+
+
 // Capture 30 frames to give autoexposure, etc. a chance to settle
 void Camera::warmingUp()
 {
@@ -17,55 +43,14 @@ void Camera::warmingUp()
     }
 }
 
-cv::Mat Camera::coloredFrame(cv::Mat frameDepth)
-{
-    cv::Mat depthFrameColored(frameDepth.size(), CV_8U);
-    int width = frameDepth.cols, height = frameDepth.rows;
-    static uint32_t histogram[0x10000];
-    memset(histogram, 0, sizeof(histogram));
 
-    for (int i = 0; i < height; ++i)
-    {
-        for (int j = 0; j < width; ++j)
-        {
-            ++histogram[frameDepth.at<ushort>(i, j)];
-        }
-    }
+void Camera::stop(){
+    pipe.stop();
+}
 
-    for (int i = 2; i < 0x10000; ++i)
-        histogram[i] += histogram[i - 1]; // Build a cumulative histogram for the indices in [1,0xFFFF]
 
-    for (int i = 0; i < height; ++i)
-    {
-        for (int j = 0; j < width; ++j)
-        {
-            if (uint16_t d = frameDepth.at<ushort>(i, j))
-            {
-                int f = histogram[d] * 255 / histogram[0xFFFF]; // 0-255 based on histogram location
-                depthFrameColored.at<uchar>(i, j) = static_cast<uchar>(f);
-            }
-            else
-            {
-                depthFrameColored.at<uchar>(i, j) = 0;
-            }
-        }
-    }
-    cv::bitwise_not(depthFrameColored, depthFrameColored); //reverse colormap
-    cv::applyColorMap(depthFrameColored, depthFrameColored, cv::COLORMAP_JET);
-    depthFrameColored.setTo(cv::Scalar(0, 0, 0), (frameDepth == 0));
-    return depthFrameColored;
-}
+void Camera::captureFramesAlign(){
 
-cv::Mat Camera::captureFrame()
-{
-    auto frame = pipe.wait_for_frames();
-    auto frameDepth = frame.get_depth_frame();
-    filterDepthFrame(frameDepth);
-    cv::Mat matFrame(cv::Size(frameDepth.get_width(), frameDepth.get_height()), CV_16UC1, (void *)frameDepth.get_data(), cv::Mat::AUTO_STEP);
-    return matFrame;
-}
-void Camera::captureFramesAlign()
-{
     rs2::align align(RS2_STREAM_DEPTH);
     auto frameset = pipe.wait_for_frames();
     //Get processed aligned frame
@@ -78,6 +63,47 @@ void Camera::captureFramesAlign()
     cv::Mat(cv::Size(color_frame.get_width(), color_frame.get_height()), CV_8UC3, (void *)color_frame.get_data(), cv::Mat::AUTO_STEP).copyTo(matRGB);
 }
 
+
+cv::Point2i Camera::projectPointToPixel(cv::Point3f point3D){
+
+    float point[3] = {point3D.x, point3D.y, point3D.z};
+    float pixel[2];
+    rs2_project_point_to_pixel(pixel, &intrinsics, point);
+    return cv::Point2i(pixel[0], pixel[1]);
+}
+
+
+cv::Point3f Camera::deprojectPixelToPoint(float coord[], float z1){
+
+    float p[3];
+    rs2_deproject_pixel_to_point(p, &intrinsics, coord, z1);
+    return cv::Point3f(p[0], p[1], p[2]);
+}
+
+
+void Camera::printCroppingMask(){
+    cv::Rect mask = getCroppingMask();
+    std::cout << "(" << mask.x << "," << mask.y << ") + " << mask.width << "," << mask.height << std::endl;
+}
+
+
+
+
+/*
+*   Private
+*/
+
+
+cv::Mat Camera::captureFrame(){
+    
+    auto frame = pipe.wait_for_frames();
+    auto frameDepth = frame.get_depth_frame();
+    filterDepthFrame(frameDepth);
+    cv::Mat matFrame(cv::Size(frameDepth.get_width(), frameDepth.get_height()), CV_16UC1, (void *)frameDepth.get_data(), cv::Mat::AUTO_STEP);
+    return matFrame;
+}
+
+
 void Camera::filterDepthFrame(rs2::depth_frame &frameDepth)
 {
     // frameDepth = decFilter.process(frameDepth);
@@ -85,8 +111,10 @@ void Camera::filterDepthFrame(rs2::depth_frame &frameDepth)
     frameDepth = tempFilter.process(frameDepth);
     intrinsics = frameDepth.get_profile().as<rs2::video_stream_profile>().get_intrinsics();
 }
-cv::Mat Camera::getAverageFrame(int numberFrame)
-{
+
+
+cv::Mat Camera::getAverageFrame(int numberFrame){
+
     cv::Mat averageFrame;
     Camera::captureFrame().copyTo(averageFrame);
     for (int i = 1; i <= numberFrame; i++)
@@ -98,61 +126,3 @@ cv::Mat Camera::getAverageFrame(int numberFrame)
     return averageFrame;
 }
 
-cv::Point3f Camera::deprojectPixelToPoint(float coord[], float z1)
-{
-    float p[3];
-    rs2_deproject_pixel_to_point(p, &intrinsics, coord, z1);
-    return cv::Point3f(p[0], p[1], p[2]);
-}
-
-cv::Point2i Camera::projectPointToPixel(cv::Point3f point3D)
-{
-    float point[3] = {point3D.x, point3D.y, point3D.z};
-    float pixel[2];
-    rs2_project_point_to_pixel(pixel, &intrinsics, point);
-    return cv::Point2i(pixel[0], pixel[1]);
-}
-
-void Camera::start()
-{
-    rs2::log_to_console(RS2_LOG_SEVERITY_ERROR);
-    //spatFilter.set_option(RS2_OPTION_HOLES_FILL, 5);
-    profile = pipe.start(cfg);
-    auto sensor = profile.get_device().first<rs2::depth_sensor>();
-    // TODO: At the moment the SDK does not offer a closed enum for D400 visual presets
-    // We do this to reduce the number of black pixels
-    // The hardware can perform hole-filling much better and much more power efficient then our software
-    auto range = sensor.get_option_range(RS2_OPTION_VISUAL_PRESET);
-    for (auto i = range.min; i < range.max; i += range.step)
-        if (std::string(sensor.get_option_value_description(RS2_OPTION_VISUAL_PRESET, i)) == "High Density")
-            sensor.set_option(RS2_OPTION_VISUAL_PRESET, i);
-    warmingUp();
-}
-
-void Camera::startAlign()
-{
-    rs2::log_to_console(RS2_LOG_SEVERITY_ERROR);
-    spatFilter.set_option(RS2_OPTION_HOLES_FILL, 5);
-    //cfg.enable_device_from_file("../input/flux/flux5.bag");
-    profile = pipe.start(cfg);
-    auto sensor = profile.get_device().first<rs2::depth_sensor>();
-    // TODO: At the moment the SDK does not offer a closed enum for D400 visual presets
-    // We do this to reduce the number of black pixels
-    // The hardware can perform hole-filling much better and much more power efficient then our software
-    auto range = sensor.get_option_range(RS2_OPTION_VISUAL_PRESET);
-    for (auto i = range.min; i < range.max; i += range.step)
-        if (std::string(sensor.get_option_value_description(RS2_OPTION_VISUAL_PRESET, i)) == "High Density")
-            sensor.set_option(RS2_OPTION_VISUAL_PRESET, i);
-    warmingUp();
-}
-
-void Camera::stop()
-{
-    pipe.stop();
-}
-
-
-void Camera::printCroppingMask(){
-    cv::Rect mask = getCroppingMask();
-    std::cout << "(" << mask.x << "," << mask.y << ") + " << mask.width << "," << mask.height << std::endl;
-}
\ No newline at end of file
diff --git a/src/lib/sandbox.cpp b/src/lib/sandbox.cpp
index 7c9b6779f2a9d95718f74e3ac600e4db77359f8c..334b3af16fb0ec01326d9658fe2680e12039168a 100644
--- a/src/lib/sandbox.cpp
+++ b/src/lib/sandbox.cpp
@@ -6,7 +6,7 @@
  */
 
 Sandbox::Sandbox(){
-    camera.startAlign();
+    camera.start();
 }
 
 
@@ -35,12 +35,26 @@ cv::Mat* Sandbox::adjustProjection(cv::Mat* frame){
     cv::resize(*frame, *frame, depth.size());
     projection.adjustFrame(depth, *frame, imageCalibrate, camera, beamer.getPosition());
     //flip to align frame with beamer
-    cv::flip(imageCalibrate, imageCalibrate, 1);
-    cv::flip(imageCalibrate, imageCalibrate, 0);
+    //cv::flip(imageCalibrate, imageCalibrate, 1);
+    //cv::flip(imageCalibrate, imageCalibrate, 0);
 
     cv::resize(imageCalibrate, frameBeamer, frameBeamer.size());
     frameBeamer.copyTo(*frame);
     return frame;
+
+/*
+    cv::Mat image = *frame;
+    cv::Mat depth = getDepthFrame();
+    cv::Size beamerResolution = cv::Size(beamer.getWidth(), beamer.getHeight());
+    cv::Mat imageCalibrate(beamerResolution, CV_8UC3, cv::Scalar(0, 0, 0));
+    
+    cv::resize(depth, depth, beamerResolution);
+    cv::resize(image, image, beamerResolution);
+    projection.adjustFrame(depth, image, imageCalibrate, camera, beamer.getPosition());
+
+    imageCalibrate.copyTo(*frame);
+    return frame;
+*/
 }
 
 void Sandbox::showImage(cv::Mat* image){
diff --git a/src/lib/sandboxSetup.cpp b/src/lib/sandboxSetup.cpp
index a69cece94c132de10d0c5e34aeab9f3dd35dd302..f2d60b6045e601ab91c37938107df4b515c81ae9 100644
--- a/src/lib/sandboxSetup.cpp
+++ b/src/lib/sandboxSetup.cpp
@@ -6,39 +6,59 @@ SandboxSetup::SandboxSetup(){
 }
 
 
-void SandboxSetup::saveConfigFrom(char *path){
-    SandboxConfig::saveAdjustingMatrixInto(path, projection.getAdjustingMatrix());
-    SandboxConfig::saveCroppingMaskInto(path, camera.getCroppingMask());
-    SandboxConfig::saveBeamerResolutionInto(path, cv::Size(beamer.getWidth(), beamer.getHeight()));
-    SandboxConfig::saveBeamerPositionInto(path, beamer.getPosition());
+// return 1 when config can't be saved in file
+int SandboxSetup::saveConfigFrom(char *path){
+    if(SandboxConfig::saveAdjustingMatrixInto(path, projection.getAdjustingMatrix()))
+        return 1;
+
+    if(SandboxConfig::saveCroppingMaskInto(path, camera.getCroppingMask()))
+        return 1;
+
+    if(SandboxConfig::saveBeamerResolutionInto(path, cv::Size(beamer.getWidth(), beamer.getHeight())))
+        return 1;
+
+    if(SandboxConfig::saveBeamerPositionInto(path, beamer.getPosition()))
+        return 1;
+
+    return 0;
 }
 
 
-void SandboxSetup::saveConfig(){
-    saveConfigFrom(defaultConfigFilePath);
+int SandboxSetup::saveConfig(){
+    return saveConfigFrom(defaultConfigFilePath);
 }
 
 
-void SandboxSetup::setupBeamerResolution(){
+int SandboxSetup::setupBeamerResolution(){
     int width = 0;
     int height = 0;
 
     std::cout << "Enter width of the beamer's resolution :" << std::endl;
     std::cin >> width;
+    if(std::cin.fail()){
+        return 1;
+    }
     std::cout << "Enter height of the beamer's resolution :" << std::endl;
     std::cin >> height;
+    if(std::cin.fail()){
+        return 1;
+    }
 
     beamer.setHeight(height);
     beamer.setWidth(width);
+
+    return 0;
 }
 
 
-void SandboxSetup::setupBeamerLocation(){
-    beamer.findBeamerFrom(camera);
+// return 1 when user exits process
+int SandboxSetup::setupBeamerLocation(){
+    return beamer.findBeamerFrom(camera);
 }
 
 
-void SandboxSetup::setupProjection(){   
+// return 1 when user exits process
+int SandboxSetup::setupProjection(){
 
     // Blue screen 
     char windowName[] = "border";
@@ -48,7 +68,7 @@ void SandboxSetup::setupProjection(){
     cv::waitKey(100);
 
     // Take picture
-    camera.startAlign(); // 1 seconde of warming up
+    camera.start(); // 1 seconde of warming up
     camera.captureFramesAlign();
     cv::Mat frameData = camera.getDepthFrameAlign();
     cv::Mat coloredFrame = camera.getRGBFrameAlign();
@@ -61,7 +81,10 @@ void SandboxSetup::setupProjection(){
     float y = coloredFrame.size().height;
     float x = coloredFrame.size().width;
     std::vector<cv::Point> rectPoints{ cv::Point(1.0/4*x, 1.0/4*y), cv::Point(1.0/4*x, 3.0/4*y), cv::Point(3.0/4*x, 3.0/4*y), cv::Point(3.0/4*x, 1.0/4*y) };
-    BorderEdit::edit(coloredFrame, &rectPoints); // edit projected frame
+    if(BorderEdit::edit(coloredFrame, &rectPoints)){
+        return 1;
+    }
+    cv::destroyAllWindows();
 
     // Set adjusting matrix for the projection
     int widthTop = rectPoints[3].x - rectPoints[0].x;
@@ -73,7 +96,8 @@ void SandboxSetup::setupProjection(){
     cv::Size rectSize = cv::Size(widthTop, cvRound(widthTop / 1.33333) + 5);
     cv::Point p = projection.rotatePixel(rectPoints[0]);
     camera.setCroppingMask(cv::Rect(p, rectSize)); // croppingMask
-    cv::destroyAllWindows();
+
+    return 0;
 }
 
 
diff --git a/src/tools/borderedit.cpp b/src/tools/borderedit.cpp
index 2d93ca6ace855f158c75b40fab2d2a93530487fa..ae54b2249f840c23ec587ed8662cd265d99fd365 100644
--- a/src/tools/borderedit.cpp
+++ b/src/tools/borderedit.cpp
@@ -1,26 +1,25 @@
 #include "../../inc/borderedit.h"
 
+
 cv::Mat BorderEdit::frameImage;
 
-void BorderEdit::drawSquare(cv::Point *p, int n)
-{
-    cv::Mat imageCopy = frameImage.clone();
-    polylines(imageCopy, &p, &n, 1, true, cv::Scalar(0, 255, 0), 1, cv::LINE_AA);
-    imshow(wndname, imageCopy);
-}
-int BorderEdit::findPoints(int x, int y, std::vector<cv::Point> &posSandbox)
-{
-    for (int i = 0; i < (int)posSandbox.size(); i++)
-    {
-        //check if the point is clicked
-        if ((x >= (posSandbox[i].x - margeClick)) && (x <= (posSandbox[i].x + margeClick)) && (y >= (posSandbox[i].y - margeClick)) && (y <= (posSandbox[i].y + margeClick)))
-            return i;
+
+int BorderEdit::edit(cv::Mat frame, std::vector<cv::Point> *posSandbox){
+
+    cv::namedWindow(wndname, CV_WINDOW_AUTOSIZE);
+    frame.copyTo(frameImage);
+    cv::setMouseCallback(wndname, BorderEdit::mouseHandler, (void *)posSandbox);
+    BorderEdit::drawSquare(&posSandbox->at(0), (int)posSandbox->size());
+    char keyCode = cv::waitKey(0);
+    if (keyCode == ESCAPE_CHAR){
+        return 1;
     }
-    return -1; //not found
+    return 0;
 }
 
-void BorderEdit::mouseHandler(int event, int x, int y, int, void *param)
-{
+
+void BorderEdit::mouseHandler(int event, int x, int y, int, void *param){
+
     static int selectedPoint = -1;
     std::vector<cv::Point> &posSandbox = *((std::vector<cv::Point> *)param);
     switch (event)
@@ -49,11 +48,23 @@ void BorderEdit::mouseHandler(int event, int x, int y, int, void *param)
     }
 }
 
-void BorderEdit::edit(cv::Mat frame, std::vector<cv::Point> *posSandbox)
-{
-    cv::namedWindow(wndname, CV_WINDOW_AUTOSIZE);
-    frame.copyTo(frameImage);
-    cv::setMouseCallback(wndname, BorderEdit::mouseHandler, (void *)posSandbox);
-    BorderEdit::drawSquare(&posSandbox->at(0), (int)posSandbox->size());
-    cv::waitKey(0);
-}
\ No newline at end of file
+
+void BorderEdit::drawSquare(cv::Point *p, int n){
+
+    cv::Mat imageCopy = frameImage.clone();
+    polylines(imageCopy, &p, &n, 1, true, cv::Scalar(0, 255, 0), 1, cv::LINE_AA);
+    imshow(wndname, imageCopy);
+}
+
+
+int BorderEdit::findPoints(int x, int y, std::vector<cv::Point> &posSandbox){
+
+    for (int i = 0; i < (int)posSandbox.size(); i++)
+    {
+        //check if the point is clicked
+        if ((x >= (posSandbox[i].x - margeClick)) && (x <= (posSandbox[i].x + margeClick)) && (y >= (posSandbox[i].y - margeClick)) && (y <= (posSandbox[i].y + margeClick)))
+            return i;
+    }
+    return -1; //not found
+}
+