diff --git a/app/SandboxSetup/croppingmask.cpp b/app/SandboxSetup/croppingmask.cpp
index 21fcbba34a54901c02a67844e0d7af539e7c07a3..11f0af135a1bf16fa175efef0e5d3122c4472231 100644
--- a/app/SandboxSetup/croppingmask.cpp
+++ b/app/SandboxSetup/croppingmask.cpp
@@ -31,13 +31,14 @@ void CroppingMask::valideRoutine(){
     timer->stop();
     setup->getCamera()->capture();
     std::vector<cv::Point> rectPoints = getRectPoints();
-    cv::Point2i centerProjection = setup->getCenterOfQuadrilateral(rectPoints);
     cv::Mat depthFrame = setup->getCamera()->getDepthFrame();
+    
+    setup->setupCroppingMask(rectPoints);
+    cv::Rect mask = setup->getCamera()->getCroppingMask();
 
+    cv::Point2i centerProjection = cv::Point2i(mask.width/2, mask.height/2);
     setup->setupAdjustMatrix(rectPoints, centerProjection);
-    setup->setupCroppingMask(rectPoints);
     setup->getProjection()->setDistanceTopSandbox(depthFrame.at<float>(centerProjection));
-
     endSuccess = true;
 }
 
diff --git a/inc/projection.h b/inc/projection.h
index 5390b06abbcbfa78dcb4329b3dd7de5425e11a2e..8e02ba3e83ff168904efe37050b6f5f6ed7ae302 100644
--- a/inc/projection.h
+++ b/inc/projection.h
@@ -23,6 +23,7 @@ class Projection{
         void deprojectPixelsFromDepth(cv::Mat_<float> &depth, Camera *camera, cv::Rect mask, cv::Point3f beamer_pos, cv::Mat_<cv::Point2i> &deprojectMap, cv::Point2f fxy, cv::Point2f ppxy);
         void filterLowestDeprojectedPoints(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &deprojectMap, cv::Mat_<cv::Point2i> &frameMap);
         void buildFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frameMap, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst);
+        void holeFilling(cv::Mat_<cv::Vec3b> &dst, cv::Mat_<cv::Point2i> &frameMap);
         cv::Point2i findMatchingPixel(int i, int j, float z, Camera *camera, cv::Point3f beamer_pos, cv::Point2f fxy, cv::Point2f ppxy);
 
     public:
diff --git a/src/components/camera.cpp b/src/components/camera.cpp
index 5c84c7ffa05e8f9b42bffb5dc0d4d55bc3f4143a..11b5984b6a97117fb8a641749efba5fe89a6ac34 100644
--- a/src/components/camera.cpp
+++ b/src/components/camera.cpp
@@ -35,11 +35,13 @@ cv::Mat Camera::getDepthFrame(){
     static cv::Mat meters = cv::Mat(depth_frame->get_height(), depth_frame->get_width(), CV_32FC1);
     values.data = (uchar*)depth_frame->get_data();
     values.convertTo(meters, CV_32FC1, depth_scale);
-    return meters;
+    return meters.clone();
 };
 
 cv::Mat Camera::getColorFrame(){
-    return cv::Mat(color_frame->get_height(), color_frame->get_width(), CV_8UC3, (void *)color_frame->get_data());
+    static cv::Mat colors = cv::Mat(color_frame->get_height(), color_frame->get_width(), CV_8UC3);
+    colors.data = (uchar*)color_frame->get_data();
+    return colors.clone();
 };
         
 
@@ -57,13 +59,10 @@ int Camera::start(){
     //rs2::log_to_file(RS2_LOG_SEVERITY_DEBUG, "./camera.logs");
 
     // Settings
-    // default camera resolution : 640x480
-    cfg->enable_stream(RS2_STREAM_COLOR);
-    cfg->enable_stream(RS2_STREAM_DEPTH);
-    /*
-    cfg->enable_stream(RS2_STREAM_COLOR, 1280, 720, RS2_FORMAT_RGB8, 30);
-    cfg->enable_stream(RS2_STREAM_DEPTH, 1280, 720, RS2_FORMAT_Y16, 30);
-    */
+    //cfg->enable_stream(rs2_stream::RS2_STREAM_DEPTH, 1280, 720, rs2_format::RS2_FORMAT_Z16);
+    //cfg->enable_stream(rs2_stream::RS2_STREAM_COLOR, 1280, 720, rs2_format::RS2_FORMAT_RGB8);
+    cfg->enable_stream(rs2_stream::RS2_STREAM_DEPTH);
+    cfg->enable_stream(rs2_stream::RS2_STREAM_COLOR);
    
     rs2::depth_sensor sensor = pipe->start(*cfg).get_device().first<rs2::depth_sensor>();
     depth_scale = sensor.get_depth_scale();
@@ -81,6 +80,7 @@ int Camera::start(){
     // and we use it only in one precise case
     capture();
     intr_profile = depth_frame->get_profile().as<rs2::video_stream_profile>().get_intrinsics();
+    //std::cout << intr_profile.width << "x" << intr_profile.height << std::endl;
 
 
     return 0;
diff --git a/src/components/projection.cpp b/src/components/projection.cpp
index 483e696aec11eb6ebfcd23830ac17ee79c88e945..95dad2c73d65cc7dcccb0fb94f6cc5fce67bb5f3 100644
--- a/src/components/projection.cpp
+++ b/src/components/projection.cpp
@@ -39,7 +39,6 @@ void Projection::adjustFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Vec3b> &src, c
             resized_depth.release();
             resized_src.release();
         }
-        std::cout << "Create" << std::endl;
         deprojectMap.create(dst.rows, dst.cols);
         frameMap.create(dst.rows, dst.cols);
         resized_depth.create(dst.rows, dst.cols);
@@ -76,7 +75,7 @@ void Projection::adjustFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Vec3b> &src, c
     //now_ms = std::chrono::duration_cast< std::chrono::milliseconds >( std::chrono::system_clock::now().time_since_epoch() );
     //std::cout << "Build : " << (now_ms - start_ms).count() << std::endl;
     
-    
+    holeFilling(dst, frameMap);
 
     cv::warpAffine(dst, dst, adjustingMatrix, dst.size());
 }
@@ -98,23 +97,21 @@ void Projection::deprojectPixelsFromDepth(cv::Mat_<float> &depth, Camera *camera
     float mask_scale_x = mask.x * depth.cols / mask.width;
     float mask_scale_y = mask.y * depth.rows / mask.height;
 
-    for (int j = 0; j < depth.rows; j++){
-        for (int i = 0; i < depth.cols; i++){
+    for (int i = 0; i < depth.rows; i++){
+        for (int j = 0; j < depth.cols; j++){
             // coordinates of the pixel relative to the orginial image taken from the camera
-            int base_x = mask_scale_x+i;
-            int base_y = mask_scale_y+j;
-            float z = depth.at<float>(j,i);
+            int base_x = mask_scale_x+j;
+            int base_y = mask_scale_y+i;
+            float z = depth.at<float>(i,j);
             
             // pixels based on the original depth frame taken from the camera
-            cv::Point2i pixel = findMatchingPixel( base_x, base_y, z, camera, beamer_pos, fxy, ppxy );
+            cv::Point2i pixel = findMatchingPixel( base_y, base_x, z, camera, beamer_pos, fxy, ppxy );
             
             // pixel relative to the cropping mask (the area where the frame is projected)
             pixel.x -= mask_scale_x;
             pixel.y -= mask_scale_y;
 
-            deprojectMap.at<cv::Point2i>(j,i) = pixel;
-
-            //deprojectMap.at<cv::Point2i>(j,i) = findMatchingPixel( i, j, depth.at<float>(j,i), camera, beamer_pos, fxy, ppxy );
+            deprojectMap.at<cv::Point2i>(i,j) = pixel;
         }
     }
 }
@@ -128,12 +125,12 @@ void Projection::deprojectPixelsFromDepth(cv::Mat_<float> &depth, Camera *camera
 */
 void Projection::filterLowestDeprojectedPoints(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &deprojectMap, cv::Mat_<cv::Point2i> &frameMap){
 
-    for (int j = 0; j < deprojectMap.rows; j++){
-        for (int i = 0; i < deprojectMap.cols; i++){
+    for (int i = 0; i < deprojectMap.rows; i++){
+        for (int j = 0; j < deprojectMap.cols; j++){
 
             // coords of the new pixel
-            cv::Point2i deprojectedPixel = deprojectMap.at<cv::Point2i>(j,i);
-            cv::Point2i highestDepthPixel = cv::Point2i(i,j);
+            cv::Point2i deprojectedPixel = deprojectMap.at<cv::Point2i>(i,j);
+            cv::Point2i highestDepthPixel = cv::Point2i(j,i);
 
             if( (0 <= deprojectedPixel.x && deprojectedPixel.x < depth.cols) &&
                 (0 <= deprojectedPixel.y && deprojectedPixel.y < depth.rows) ){
@@ -142,7 +139,7 @@ void Projection::filterLowestDeprojectedPoints(cv::Mat_<float> &depth, cv::Mat_<
                 cv::Point2i currentDepthPixel = frameMap.at<cv::Point2i>(deprojectedPixel);
                 if( (0 <= currentDepthPixel.x && currentDepthPixel.x < depth.cols) &&
                     (0 <= currentDepthPixel.y && currentDepthPixel.y < depth.rows) ){
-                        if(depth.at<float>(currentDepthPixel) <= depth.at<float>(j,i)){
+                        if(depth.at<float>(currentDepthPixel) <= depth.at<float>(i,j)){
                             highestDepthPixel = currentDepthPixel;
                         }
                 }
@@ -160,21 +157,64 @@ void Projection::filterLowestDeprojectedPoints(cv::Mat_<float> &depth, cv::Mat_<
 */
 void Projection::buildFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frameMap, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst){
     
-    for (int j = 0; j < frameMap.rows; j++){
-        for (int i = 0; i < frameMap.cols; i++){
+    for (int i = 0; i < frameMap.rows; i++){
+        for (int j = 0; j < frameMap.cols; j++){
 
-            cv::Point2i pixel_src = frameMap.at<cv::Point2i>(j,i);
-            cv::Point2i pixel_dst = cv::Point2i(i,j);
+            cv::Point2i pixel_src = frameMap.at<cv::Point2i>(i,j);
+            cv::Point2i pixel_dst = cv::Point2i(j,i);
 
             if( (0<=pixel_src.x && pixel_src.x<depth.cols) && (0<=pixel_src.y && pixel_src.y<depth.rows) ){
                 // src and dst must be of same size
-                //copyPixelsInto(pixel_dst, dst, pixel_src, src, depth);
                 dst.at<cv::Vec3b>(pixel_dst) = src.at<cv::Vec3b>(pixel_src);
             }
         }
     }
 }
 
+/*
+    average of non null neighbours
+*/
+void Projection::holeFilling(cv::Mat_<cv::Vec3b> &dst, cv::Mat_<cv::Point2i> &frameMap){
+
+    for (int i = 0; i < dst.rows; i++){
+        for (int j = 0; j < dst.cols; j++){
+            
+            if(frameMap.at<cv::Point2i>(i,j) == cv::Point2i(-1,-1)){
+
+                int cpt = 0;
+                int r = 0;
+                int g = 0;
+                int b = 0;
+
+                for(int d_y = -1; d_y <= 1; d_y++){
+                    for(int d_x = -1; d_x <= 1; d_x++){
+                
+                        if(!(d_x == 0 && d_y == 0)){
+                            
+                            int y = i+d_y;
+                            int x = j+d_x;
+                            if( ((0 <= x && x < dst.cols) && (0 <= y && y < dst.rows)) && (frameMap.at<cv::Point2i>(y,x) != cv::Point2i(-1,-1)) ){
+                                cv::Vec3b color = dst.at<cv::Vec3b>(y,x);
+                                r += color[0];
+                                g += color[1];
+                                b += color[2];
+                                cpt++;
+                            }
+                        }
+                    }
+                }
+
+                if(cpt > 0){
+                    r /= cpt;
+                    g /= cpt;
+                    b /= cpt;
+                    dst.at<cv::Vec3b>(i,j) = cv::Vec3b(r,g,b);
+                }
+            }
+        }
+    }
+}
+
 /*
     C : Camera position
     B : Beamer position
@@ -189,13 +229,15 @@ void Projection::buildFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frame
 */
 cv::Point2i Projection::findMatchingPixel(int i, int j, float z, Camera *camera, cv::Point3f CB, cv::Point2f fxy, cv::Point2f ppxy){
    
-    float pixel[2] = {static_cast<float>(i), static_cast<float>(j)};
-    const float BEz = distanceTopSandbox - CB.z;
+    float pixel[2] = {static_cast<float>(j), static_cast<float>(i)};
 
     cv::Point3f CP = camera->deprojectPixelToPoint(pixel, z, fxy, ppxy);
     cv::Point3f BP = CP - CB;
+
+    const float BEz = distanceTopSandbox - CB.z;
     float BAz = BP.z;
     float alpha = BEz / BAz;
+
     cv::Point3f BV = (alpha * BP);
     cv::Point3f CV = CB + BV;
 
diff --git a/src/lib/sandboxSetup.cpp b/src/lib/sandboxSetup.cpp
index e6226e593857e129459e54b8a8494ac6b8b6d3cb..6c0ef6a8ff2dc42866cd1e5f7148d7f8481a0609 100644
--- a/src/lib/sandboxSetup.cpp
+++ b/src/lib/sandboxSetup.cpp
@@ -80,8 +80,8 @@ cv::Point2i SandboxSetup::getCenterOfQuadrilateral(std::vector<cv::Point> rectPo
     double mua;
     double mub;
     beamer->LineLineIntersect( cv::Point3d(centroids.at(0).x, centroids.at(0).y, 0),
-                               cv::Point3d(centroids.at(1).x, centroids.at(1).y, 0),
                                cv::Point3d(centroids.at(2).x, centroids.at(2).y, 0),
+                               cv::Point3d(centroids.at(1).x, centroids.at(1).y, 0),
                                cv::Point3d(centroids.at(3).x, centroids.at(3).y, 0),
                                &pa, &pb, &mua, &mub );
 
@@ -106,8 +106,8 @@ void SandboxSetup::setupAdjustMatrix(std::vector<cv::Point> rectPoints, cv::Poin
 
     // Set adjusting matrix for the projection
     int widthTop = rectPoints[3].x - rectPoints[0].x;
-    double angle1 = atan((double)(rectPoints[3].y - rectPoints[0].y) / widthTop);
-    cv::Mat matRotation = cv::getRotationMatrix2D(center, toDegrees(angle1), 1); // adjustingMatrix
+    double angle = atan((double)(rectPoints[3].y - rectPoints[0].y) / widthTop);
+    cv::Mat_<float> matRotation = cv::getRotationMatrix2D(center, toDegrees(angle), 1); // adjustingMatrix
     projection->setAdjustingMatrix(matRotation);
 }
 
diff --git a/src/tools/sandboxConfig.cpp b/src/tools/sandboxConfig.cpp
index 001a6337f21260e2ac7250de6331de56609e4fd1..557d7ba23b0bc4dfd54a86a26976a84a44318670 100644
--- a/src/tools/sandboxConfig.cpp
+++ b/src/tools/sandboxConfig.cpp
@@ -9,7 +9,7 @@ int SandboxConfig::saveAdjustingMatrixInto(char *path, cv::Mat matrix){
     YAML::Node vec = YAML::Load("[]");
     for (int y = 0; y < matrix.rows; y++){
         for (int x = 0; x < matrix.cols; x++){
-            vec.push_back((float)matrix.at<double>(y, x));
+            vec.push_back((float)matrix.at<float>(y, x));
         }
     }