diff --git a/app/SandboxSetup/croppingmask.cpp b/app/SandboxSetup/croppingmask.cpp
index 21fcbba34a54901c02a67844e0d7af539e7c07a3..71b138c27c8016245e9cf4d6c27030b0b80117dc 100644
--- a/app/SandboxSetup/croppingmask.cpp
+++ b/app/SandboxSetup/croppingmask.cpp
@@ -31,13 +31,14 @@ void CroppingMask::valideRoutine(){
     timer->stop();
     setup->getCamera()->capture();
     std::vector<cv::Point> rectPoints = getRectPoints();
-    cv::Point2i centerProjection = setup->getCenterOfQuadrilateral(rectPoints);
     cv::Mat depthFrame = setup->getCamera()->getDepthFrame();
+    
+    setup->setupCroppingMask(rectPoints);
+    cv::Rect mask = setup->getCamera()->getCroppingMask();
 
+    cv::Point2i centerProjection = cv::Point2i(mask.width/2, mask.height/2);
     setup->setupAdjustMatrix(rectPoints, centerProjection);
-    setup->setupCroppingMask(rectPoints);
     setup->getProjection()->setDistanceTopSandbox(depthFrame.at<float>(centerProjection));
-
     endSuccess = true;
 }
 
@@ -86,7 +87,7 @@ void CroppingMask::init(){
     }
 
     // no config found
-    if(rectPoints.empty() || !maskValideInFrame(&cameraColoredFrame)){
+    if(!maskValideInFrame(&cameraColoredFrame)){
             float y = cameraColoredFrame.size().height;
             float x = cameraColoredFrame.size().width;
             rectPoints = std::vector<cv::Point2i>{ cv::Point2i(1.0/4*x, 1.0/4*y), cv::Point2i(1.0/4*x, 3.0/4*y), cv::Point2i(3.0/4*x, 3.0/4*y), cv::Point2i(3.0/4*x, 1.0/4*y) };
diff --git a/inc/camera.h b/inc/camera.h
index 58491ffc3e5cee352fb8c6f6fd26ea7baa0ddd77..ba76e9d07261b66e628374abb89da24aa5dcc831 100644
--- a/inc/camera.h
+++ b/inc/camera.h
@@ -35,8 +35,11 @@ class Camera{
         
         int start();
         void stop();
-        cv::Point3f deprojectPixelToPoint(float coord[], float z1);
+        cv::Point3f deprojectPixelToPoint(float coord[], float z);
+        cv::Point3f deprojectPixelToPoint(float coord[], float z, cv::Point2f fxy, cv::Point2f ppxy);
         cv::Point2i projectPointToPixel(cv::Point3f point3D);
+        cv::Point2i projectPointToPixel(cv::Point3f point3D, cv::Point2f fxy, cv::Point2f ppxy);
+        std::vector<cv::Point2f> getAdaptedIntrinsics(cv::Mat &projection);
         void capture();
         void printCroppingMask();
 
diff --git a/inc/projection.h b/inc/projection.h
index c85b20d56f725c1123f8eef62ec1e730db54df00..f088b89e5b7b7815f9ec4f0879970d2abaaf1d8d 100644
--- a/inc/projection.h
+++ b/inc/projection.h
@@ -7,33 +7,39 @@
 
 class Projection{
     private:
-        cv::Mat adjustingMatrix;
+        cv::Mat_<float> adjustingMatrix;
         float distanceTopSandbox;
-        // Buffer for the builded virtual frame, which is scaled to n * depth_frame.size for the building process
-        cv::Mat_<cv::Vec3b> resized_dst;
+
+        // resized depth frame to dst resolution
+        cv::Mat_<float> resized_depth;
+        // resized src to project to dst resolution
+        cv::Mat_<cv::Vec3b> resized_src;
         // Buffer containing the pixels's new location when deprojected to beamer's POV
         cv::Mat_<cv::Point2i> deprojectMap;
-        // Buffer indicating from where to get the pixels in the source frame
+        // Buffer indicating from where to get the pixels in the source frame to build the output
         cv::Mat_<cv::Point2i> frameMap;
 
-        void deprojectPixelsFromDepth(cv::Mat_<float> &depth, cv::Rect mask, Camera *camera, cv::Point3f beamer_pos, cv::Mat_<cv::Point2i> &deprojectMap);
+        // intrinsics parameters for deprojection, which are adapted to projection's resolution
+        cv::Point2f fxy;
+        cv::Point2f ppxy;
+
+        void deprojectPixelsFromDepth(cv::Mat_<float> &depth, Camera *camera, cv::Rect mask, cv::Point3f beamer_pos, cv::Mat_<cv::Point2i> &deprojectMap, cv::Point2f fxy, cv::Point2f ppxy);
         void filterLowestDeprojectedPoints(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &deprojectMap, cv::Mat_<cv::Point2i> &frameMap);
         void buildFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frameMap, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst);
-        cv::Point2i findMatchingPixel(int i, int j, float z, Camera *camera, cv::Point3f beamer_pos);
-        void copyPixelsInto(cv::Point2i pixel_dst, cv::Mat_<cv::Vec3b> &dst, cv::Point2i pixel_src, cv::Mat_<cv::Vec3b> &src, cv::Mat_<float> &depth);
-        cv::Size getMatchingSize(cv::Mat &src, cv::Mat &base);
+        void holeFilling(cv::Mat_<cv::Vec3b> &dst, cv::Mat_<cv::Point2i> &frameMap);
+        cv::Point2i findMatchingPixel(int i, int j, float z, Camera *camera, cv::Point3f beamer_pos, cv::Point2f fxy, cv::Point2f ppxy);
 
     public:
         Projection();
 
-        void setAdjustingMatrix(cv::Mat matrix){ adjustingMatrix = matrix; }
+        void setAdjustingMatrix(cv::Mat_<float> matrix){ adjustingMatrix = matrix; }
         cv::Mat getAdjustingMatrix(){ return adjustingMatrix; }
         void setDistanceTopSandbox(float dist){ distanceTopSandbox = dist; };
         float getDistanceTopSandbox(){ return distanceTopSandbox; };
 
         cv::Point2i rotatePixel(cv::Point2i pixel);
         cv::Point2i revertRotatePixel(cv::Point2i pixel);
-        void adjustFrame(cv::Mat_<float> depth, cv::Mat_<cv::Vec3b> src, cv::Mat_<cv::Vec3b> &dst, Camera *camera, cv::Point3f beamer_pos);
+        void adjustFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst, Camera *camera, cv::Point3f beamer_pos);
         void printAdjustingMatrix();
 
 };
diff --git a/inc/sandbox.h b/inc/sandbox.h
index 02828dbd5304e80478c9db5215c0ae625c75f01f..bb45cae46c256a29dc8419ae40d2200f345de457 100644
--- a/inc/sandbox.h
+++ b/inc/sandbox.h
@@ -13,6 +13,7 @@ class Sandbox{
         Projection *projection;
         Camera *camera;
         Beamer *beamer;
+        cv::Mat_<cv::Vec3b> projectedFrame;
 
     public:
         Sandbox();
@@ -26,8 +27,8 @@ class Sandbox{
         void captureFrame();
         cv::Mat_<cv::Vec3b> getColorFrame();
         cv::Mat_<float> getDepthFrame();
-        cv::Mat_<cv::Vec3b> adjustProjection(cv::Mat_<cv::Vec3b> frame);
-        cv::Mat_<cv::Vec3b> adjustProjection(cv::Mat_<cv::Vec3b> frame, cv::Mat_<float> depth);
+        cv::Mat_<cv::Vec3b> adjustProjection(cv::Mat_<cv::Vec3b> &frame);
+        cv::Mat_<cv::Vec3b> adjustProjection(cv::Mat_<cv::Vec3b> &frame, cv::Mat_<float> &depth);
         int loadConfig();
         int loadConfigFrom(char *path);
     
diff --git a/inc/sandboxSetup.h b/inc/sandboxSetup.h
index 967a75c40c2d30d3c984282db89a86774bab9608..94cbf17a797db29dd46f9c1a5962bba5c7f8dc22 100644
--- a/inc/sandboxSetup.h
+++ b/inc/sandboxSetup.h
@@ -14,8 +14,6 @@ class SandboxSetup{
         Camera *camera;
         Beamer *beamer;
 
-        std::vector<std::vector<cv::Point2i>> getTriangles(std::vector<cv::Point2i> rectPoints);
-        std::vector<cv::Point2i> getCentroids(std::vector<std::vector<cv::Point2i>> triangles);
         double toDegrees(double radians);
 
     public:
@@ -37,7 +35,6 @@ class SandboxSetup{
         int loadCroppingMask();
 
         // edit variables of config => not persistant
-        cv::Point2i getCenterOfQuadrilateral(std::vector<cv::Point> rectPoints);
         void setupAdjustMatrix(std::vector<cv::Point> rectPoints, cv::Point center);
         void setupCroppingMask(std::vector<cv::Point> rectPoints);
 };
diff --git a/src/components/camera.cpp b/src/components/camera.cpp
index e786cbae4094ab9284fa6e142eabb7a985ce4c9d..11b5984b6a97117fb8a641749efba5fe89a6ac34 100644
--- a/src/components/camera.cpp
+++ b/src/components/camera.cpp
@@ -35,11 +35,13 @@ cv::Mat Camera::getDepthFrame(){
     static cv::Mat meters = cv::Mat(depth_frame->get_height(), depth_frame->get_width(), CV_32FC1);
     values.data = (uchar*)depth_frame->get_data();
     values.convertTo(meters, CV_32FC1, depth_scale);
-    return meters;
+    return meters.clone();
 };
 
 cv::Mat Camera::getColorFrame(){
-    return cv::Mat(color_frame->get_height(), color_frame->get_width(), CV_8UC3, (void *)color_frame->get_data());
+    static cv::Mat colors = cv::Mat(color_frame->get_height(), color_frame->get_width(), CV_8UC3);
+    colors.data = (uchar*)color_frame->get_data();
+    return colors.clone();
 };
         
 
@@ -57,13 +59,10 @@ int Camera::start(){
     //rs2::log_to_file(RS2_LOG_SEVERITY_DEBUG, "./camera.logs");
 
     // Settings
-    // default camera resolution : 640x480
-    cfg->enable_stream(RS2_STREAM_COLOR);
-    cfg->enable_stream(RS2_STREAM_DEPTH);
-    /*
-    cfg->enable_stream(RS2_STREAM_COLOR, 1280, 720, RS2_FORMAT_RGB8, 30);
-    cfg->enable_stream(RS2_STREAM_DEPTH, 1280, 720, RS2_FORMAT_Y16, 30);
-    */
+    //cfg->enable_stream(rs2_stream::RS2_STREAM_DEPTH, 1280, 720, rs2_format::RS2_FORMAT_Z16);
+    //cfg->enable_stream(rs2_stream::RS2_STREAM_COLOR, 1280, 720, rs2_format::RS2_FORMAT_RGB8);
+    cfg->enable_stream(rs2_stream::RS2_STREAM_DEPTH);
+    cfg->enable_stream(rs2_stream::RS2_STREAM_COLOR);
    
     rs2::depth_sensor sensor = pipe->start(*cfg).get_device().first<rs2::depth_sensor>();
     depth_scale = sensor.get_depth_scale();
@@ -81,6 +80,8 @@ int Camera::start(){
     // and we use it only in one precise case
     capture();
     intr_profile = depth_frame->get_profile().as<rs2::video_stream_profile>().get_intrinsics();
+    //std::cout << intr_profile.width << "x" << intr_profile.height << std::endl;
+
 
     return 0;
 }
@@ -119,6 +120,7 @@ cv::Point2i Camera::projectPointToPixel(cv::Point3f point3D){
     return cv::Point2i(pixel[0], pixel[1]);
 }
 
+
 // Get the point relative to the real world matching the coordinates of the pixel
 cv::Point3f Camera::deprojectPixelToPoint(float coord[], float z){
 
@@ -128,6 +130,43 @@ cv::Point3f Camera::deprojectPixelToPoint(float coord[], float z){
 }
 
 
+/*
+    Custom function based on librealsense, take f and pp as args,
+    they are related to the camera's profil, but adapted to what we want to display,
+    the limits of the pixels match the limits of the camera's frame and 
+    the 3D projection match the camera's too
+
+    Works in our case, because our camera's profil is RS2_DISTORTION_BROWN_CONRADY
+    (profil describing what kind of distoration is applied on the frame to adjust on the lens)
+*/
+cv::Point2i Camera::projectPointToPixel(cv::Point3f point, cv::Point2f f, cv::Point2f pp){
+    
+    float x = point.x / point.z;
+    float y = point.y / point.z;
+    return cv::Point2i( x*f.x+pp.x, y*f.y+pp.y);
+}
+
+cv::Point3f Camera::deprojectPixelToPoint(float pixel[], float z, cv::Point2f f, cv::Point2f pp){
+    
+    float x = (pixel[0] - pp.x) / f.x;
+    float y = (pixel[1] - pp.y) / f.y;
+    return cv::Point3f(z*x, z*y, z);
+}
+
+std::vector<cv::Point2f> Camera::getAdaptedIntrinsics(cv::Mat &projection){
+    
+    float fx = static_cast<float>(intr_profile.fx * projection.size().width) / croppingMask.width;
+    float fy = static_cast<float>(intr_profile.fy * projection.size().height) / croppingMask.height;
+    cv::Point2f fxy = cv::Point2f(fx, fy);
+
+    float ppx = static_cast<float>(intr_profile.ppx * projection.size().width) / croppingMask.width;
+    float ppy = static_cast<float>(intr_profile.ppy * projection.size().height) / croppingMask.height;
+    cv::Point2f ppxy = cv::Point2f(ppx, ppy);
+
+    return std::vector<cv::Point2f> {fxy, ppxy};
+}
+
+
 void Camera::printCroppingMask(){
     cv::Rect mask = getCroppingMask();
     std::cout << "(" << mask.x << "," << mask.y << ") + " << mask.width << "x" << mask.height << std::endl;
diff --git a/src/components/projection.cpp b/src/components/projection.cpp
index f445ee6b06395ce17c50d1d3329d0dc5e98cac01..196dd55fcd4c7f0706376326067bca892b7bee1c 100644
--- a/src/components/projection.cpp
+++ b/src/components/projection.cpp
@@ -1,5 +1,5 @@
 #include "../../inc/projection.h"
-
+#include <chrono>
 
 /*
  *   MAIN
@@ -30,32 +30,53 @@ cv::Point2i Projection::revertRotatePixel(cv::Point2i pixel){
 /*
     Adjust the projected frame with the topology from the camera to the beamer POV
 */
-void Projection::adjustFrame(cv::Mat_<float> depth, cv::Mat_<cv::Vec3b> src, cv::Mat_<cv::Vec3b> &dst, Camera *camera, cv::Point3f beamer_pos){
+void Projection::adjustFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst, Camera *camera, cv::Point3f beamer_pos){
 
-    if(deprojectMap.empty() || deprojectMap.size() != depth.size()){
+    if(deprojectMap.empty() || deprojectMap.size() != dst.size()){
         if(!deprojectMap.empty()){
             deprojectMap.release();
             frameMap.release();
-            resized_dst.release();
+            resized_depth.release();
+            resized_src.release();
         }
-        deprojectMap.create(depth.rows, depth.cols);
-        frameMap.create(depth.rows, depth.cols);
-        resized_dst.create(getMatchingSize(dst, depth));
+        deprojectMap.create(dst.rows, dst.cols);
+        frameMap.create(dst.rows, dst.cols);
+        resized_depth.create(dst.rows, dst.cols);
+        resized_src.create(dst.rows, dst.cols);
+
+        std::vector<cv::Point2f> profil = camera->getAdaptedIntrinsics(dst);
+        fxy = profil.at(0);
+        ppxy = profil.at(1);
     }
 
     deprojectMap = cv::Point2i(-1,-1);
     frameMap = cv::Point2i(-1,-1);
-    resized_dst = cv::Vec3b(0,0,0);
-
-    // resize to match 1:1 ratio with resized_dst, since we'll do later:
-    //      resized_dst[i] = src[i]
-    cv::resize(src, src, resized_dst.size());
 
-    deprojectPixelsFromDepth(depth, camera->getCroppingMask(), camera, beamer_pos, deprojectMap);
-    filterLowestDeprojectedPoints(depth, deprojectMap, frameMap);
-    buildFrame(depth, frameMap, src, resized_dst);
+    // resize to match 1:1 ratio with dst, since we'll do later:
+    //      dst[i] = src[i]
+    cv::resize(src, resized_src, dst.size());
+    cv::resize(depth, resized_depth, dst.size());    
+
+    // 75 ms
+    //std::chrono::milliseconds start_ms = std::chrono::duration_cast< std::chrono::milliseconds >( std::chrono::system_clock::now().time_since_epoch() );
+    deprojectPixelsFromDepth(resized_depth, camera, camera->getCroppingMask() , beamer_pos, deprojectMap, fxy, ppxy);
+    //std::chrono::milliseconds now_ms = std::chrono::duration_cast< std::chrono::milliseconds >( std::chrono::system_clock::now().time_since_epoch() );
+    //std::cout << "Deproject : " << (now_ms - start_ms).count() << std::endl;
+
+    // 18-19 ms
+    //start_ms = std::chrono::duration_cast< std::chrono::milliseconds >( std::chrono::system_clock::now().time_since_epoch() );
+    filterLowestDeprojectedPoints(resized_depth, deprojectMap, frameMap);
+    //now_ms = std::chrono::duration_cast< std::chrono::milliseconds >( std::chrono::system_clock::now().time_since_epoch() );
+    //std::cout << "Filter : " << (now_ms - start_ms).count() << std::endl;
+    
+    // 14-15 ms
+    //start_ms = std::chrono::duration_cast< std::chrono::milliseconds >( std::chrono::system_clock::now().time_since_epoch() );
+    buildFrame(resized_depth, frameMap, resized_src, dst);
+    //now_ms = std::chrono::duration_cast< std::chrono::milliseconds >( std::chrono::system_clock::now().time_since_epoch() );
+    //std::cout << "Build : " << (now_ms - start_ms).count() << std::endl;
+    
+    holeFilling(dst, frameMap);
 
-    cv::resize(resized_dst, dst, dst.size());
     cv::warpAffine(dst, dst, adjustingMatrix, dst.size());
 }
 
@@ -70,26 +91,27 @@ void Projection::adjustFrame(cv::Mat_<float> depth, cv::Mat_<cv::Vec3b> src, cv:
     Deproject pixels in 3D, then adapt to Beamer's POV, and go back to 2D
     This gives us the location od pixels adapted to the Beamer projection
 */
-void Projection::deprojectPixelsFromDepth(cv::Mat_<float> &depth, cv::Rect mask, Camera *camera, cv::Point3f beamer_pos, cv::Mat_<cv::Point2i> &deprojectMap){
-
-    // Browse the depth frame matching the cropping mask
-    // while adapting pixels's position to the beamer's position
-    for (int j = 0; j < depth.rows; j++){
-        for (int i = 0; i < depth.cols; i++){
+void Projection::deprojectPixelsFromDepth(cv::Mat_<float> &depth, Camera *camera, cv::Rect mask, cv::Point3f beamer_pos, cv::Mat_<cv::Point2i> &deprojectMap, cv::Point2f fxy, cv::Point2f ppxy){
+    
+    // scale coord of the mask with the new resolution of the depth frame
+    float mask_scale_x = mask.x * depth.cols / mask.width;
+    float mask_scale_y = mask.y * depth.rows / mask.height;
 
+    for (int i = 0; i < depth.rows; i++){
+        for (int j = 0; j < depth.cols; j++){
             // coordinates of the pixel relative to the orginial image taken from the camera
-            int base_x = mask.x+i;
-            int base_y = mask.y+j;
-            float z = depth.at<float>(j,i);
+            int base_x = mask_scale_x+j;
+            int base_y = mask_scale_y+i;
+            float z = depth.at<float>(i,j);
             
             // pixels based on the original depth frame taken from the camera
-            cv::Point2i pixel = findMatchingPixel( base_x, base_y, z, camera, beamer_pos );
+            cv::Point2i pixel = findMatchingPixel( base_y, base_x, z, camera, beamer_pos, fxy, ppxy );
             
             // pixel relative to the cropping mask (the area where the frame is projected)
-            pixel.x -= mask.x;
-            pixel.y -= mask.y;
+            pixel.x -= mask_scale_x;
+            pixel.y -= mask_scale_y;
 
-            deprojectMap.at<cv::Point2i>(j,i) = pixel;
+            deprojectMap.at<cv::Point2i>(i,j) = pixel;
         }
     }
 }
@@ -103,12 +125,12 @@ void Projection::deprojectPixelsFromDepth(cv::Mat_<float> &depth, cv::Rect mask,
 */
 void Projection::filterLowestDeprojectedPoints(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &deprojectMap, cv::Mat_<cv::Point2i> &frameMap){
 
-    for (int j = 0; j < deprojectMap.rows; j++){
-        for (int i = 0; i < deprojectMap.cols; i++){
+    for (int i = 0; i < deprojectMap.rows; i++){
+        for (int j = 0; j < deprojectMap.cols; j++){
 
             // coords of the new pixel
-            cv::Point2i deprojectedPixel = deprojectMap.at<cv::Point2i>(j,i);
-            cv::Point2i highestDepthPixel = cv::Point2i(i,j);
+            cv::Point2i deprojectedPixel = deprojectMap.at<cv::Point2i>(i,j);
+            cv::Point2i highestDepthPixel = cv::Point2i(j,i);
 
             if( (0 <= deprojectedPixel.x && deprojectedPixel.x < depth.cols) &&
                 (0 <= deprojectedPixel.y && deprojectedPixel.y < depth.rows) ){
@@ -117,7 +139,7 @@ void Projection::filterLowestDeprojectedPoints(cv::Mat_<float> &depth, cv::Mat_<
                 cv::Point2i currentDepthPixel = frameMap.at<cv::Point2i>(deprojectedPixel);
                 if( (0 <= currentDepthPixel.x && currentDepthPixel.x < depth.cols) &&
                     (0 <= currentDepthPixel.y && currentDepthPixel.y < depth.rows) ){
-                        if(depth.at<float>(currentDepthPixel) <= depth.at<float>(j,i)){
+                        if(depth.at<float>(currentDepthPixel) <= depth.at<float>(i,j)){
                             highestDepthPixel = currentDepthPixel;
                         }
                 }
@@ -135,60 +157,56 @@ void Projection::filterLowestDeprojectedPoints(cv::Mat_<float> &depth, cv::Mat_<
 */
 void Projection::buildFrame(cv::Mat_<float> &depth, cv::Mat_<cv::Point2i> &frameMap, cv::Mat_<cv::Vec3b> &src, cv::Mat_<cv::Vec3b> &dst){
     
-    for (int j = 0; j < frameMap.rows; j++){
-        for (int i = 0; i < frameMap.cols; i++){
+    for (int i = 0; i < frameMap.rows; i++){
+        for (int j = 0; j < frameMap.cols; j++){
 
-            cv::Point2i pixel_src = frameMap.at<cv::Point2i>(j,i);
-            cv::Point2i pixel_dst = cv::Point2i(i,j);
+            cv::Point2i pixel_src = frameMap.at<cv::Point2i>(i,j);
+            cv::Point2i pixel_dst = cv::Point2i(j,i);
 
             if( (0<=pixel_src.x && pixel_src.x<depth.cols) && (0<=pixel_src.y && pixel_src.y<depth.rows) ){
                 // src and dst must be of same size
-                copyPixelsInto(pixel_dst, dst, pixel_src, src, depth);
+                dst.at<cv::Vec3b>(pixel_dst) = src.at<cv::Vec3b>(pixel_src);
             }
         }
     }
 }
 
-
-/*
-    resize the frames to be a multiple of the base size:
-        src.size = n * base.size, where n is uint > 0
-*/
-cv::Size Projection::getMatchingSize(cv::Mat &src, cv::Mat &base){
-    cv::Size bigSize;
-    bigSize.width = (src.size().width % base.size().width == 0) ? src.size().width : src.size().width - (src.size().width % base.size().width) + base.size().width;
-    bigSize.height = (src.size().height % base.size().height == 0) ? src.size().height : src.size().height - (src.size().height % base.size().height) + base.size().height;
-    return bigSize;
-}
-
-
 /*
-    pixels coordinates are relative to the camera depth frame
+    fill with value of the 1st non null neighbour
 */
-void Projection::copyPixelsInto(cv::Point2i pixel_dst, cv::Mat_<cv::Vec3b> &dst, cv::Point2i pixel_src, cv::Mat_<cv::Vec3b> &src, cv::Mat_<float> &depth){
+void Projection::holeFilling(cv::Mat_<cv::Vec3b> &dst, cv::Mat_<cv::Point2i> &frameMap){
 
-    if( src.size().width == dst.size().width && src.size().height == dst.size().height ){
-
-        // determines the size of the square of pixels to copy from the source matching the position of the pixel of the depth frame
-        // (since src and dst are multiple of the depth frame's size)
-        int ratio_w = src.size().width / depth.size().width;
-        int ratio_h = src.size().height / depth.size().height;
+    for (int i = 0; i < dst.rows; i++){
+        for (int j = 0; j < dst.cols; j++){
+            
+            if(frameMap.at<cv::Point2i>(i,j) == cv::Point2i(-1,-1)){
 
-        for(int j=0; j<ratio_h; j++){
-            for (int i=0; i<ratio_w; i++){
+                cv::Vec3b color;
+                bool found = false;
 
-                int x_dst = pixel_dst.x * ratio_w + i;
-                int y_dst = pixel_dst.y * ratio_h + j;
-                int x_src = pixel_src.x * ratio_w + i;
-                int y_src = pixel_src.y * ratio_h + j;
+                for(int d_y = -1; d_y <= 1 && !found; d_y++){
+                    for(int d_x = -1; d_x <= 1 && !found; d_x++){
+                
+                        if(!(d_x == 0 && d_y == 0)){
+                            
+                            int y = i+d_y;
+                            int x = j+d_x;
+                            if( ((0 <= x && x < dst.cols) && (0 <= y && y < dst.rows)) && (frameMap.at<cv::Point2i>(y,x) != cv::Point2i(-1,-1)) ){
+                                color = dst.at<cv::Vec3b>(y,x);
+                                found = true;
+                            }
+                        }
+                    }
+                }
 
-                dst.at<cv::Vec3b>(y_dst, x_dst) = src.at<cv::Vec3b>(y_src, x_src);
+                if(found){
+                    dst.at<cv::Vec3b>(i,j) = color;
+                }
             }
         }
     }
 }
 
-
 /*
     C : Camera position
     B : Beamer position
@@ -201,19 +219,21 @@ void Projection::copyPixelsInto(cv::Point2i pixel_dst, cv::Mat_<cv::Vec3b> &dst,
         CP : distance from camera to point (value of depth_frame)
         CB : distance from camera to beamer (beamer's position is relative to the camera)
 */
-cv::Point2i Projection::findMatchingPixel(int i, int j, float z, Camera *camera, cv::Point3f CB){
+cv::Point2i Projection::findMatchingPixel(int i, int j, float z, Camera *camera, cv::Point3f CB, cv::Point2f fxy, cv::Point2f ppxy){
    
-    float pixel[2] = {static_cast<float>(i), static_cast<float>(j)};
-    const float BEz = distanceTopSandbox - CB.z;
+    float pixel[2] = {static_cast<float>(j), static_cast<float>(i)};
 
-    cv::Point3f CP = camera->deprojectPixelToPoint(pixel, z);
+    cv::Point3f CP = camera->deprojectPixelToPoint(pixel, z, fxy, ppxy);
     cv::Point3f BP = CP - CB;
+
+    const float BEz = distanceTopSandbox - CB.z;
     float BAz = BP.z;
     float alpha = BEz / BAz;
+
     cv::Point3f BV = (alpha * BP);
     cv::Point3f CV = CB + BV;
 
-    return camera->projectPointToPixel(CV);
+    return camera->projectPointToPixel(CV, fxy, ppxy);
 }
 
 
diff --git a/src/lib/sandbox.cpp b/src/lib/sandbox.cpp
index f5747f0a20b99ede034b607ec5ae2d97fa79eba1..aa5fd148d0690edaac54439e84c472045ace9d22 100644
--- a/src/lib/sandbox.cpp
+++ b/src/lib/sandbox.cpp
@@ -38,20 +38,23 @@ cv::Mat_<float> Sandbox::getDepthFrame(){
     return camera->getDepthFrame()(camera->getCroppingMask());
 }
 
-cv::Mat_<cv::Vec3b> Sandbox::adjustProjection(cv::Mat_<cv::Vec3b> frame, cv::Mat_<float> depth){
+cv::Mat_<cv::Vec3b> Sandbox::adjustProjection(cv::Mat_<cv::Vec3b> &frame, cv::Mat_<float> &depth){
     
-    static cv::Mat_<cv::Vec3b> imageCalibrate = cv::Mat_<cv::Vec3b>(cv::Size(beamer->getWidth(), beamer->getHeight()));
-    imageCalibrate = cv::Vec3b(0, 0, 0);
-    projection->adjustFrame(depth, frame, imageCalibrate, camera, beamer->getPosition());
+    if(projectedFrame.empty()){
+        projectedFrame.create(beamer->getHeight(), beamer->getWidth());
+    }
+
+    projectedFrame = cv::Vec3b(0, 0, 0);
+    projection->adjustFrame(depth, frame, projectedFrame, camera, beamer->getPosition());
 
     // frame after process
-    //cv::dilate(imageCalibrate, imageCalibrate, cv::Mat(), cv::Point(-1, -1), 2, 1, 1);
-    //cv::erode(imageCalibrate, imageCalibrate, cv::Mat(), cv::Point(-1, -1), 2, 1, 1);
+    //cv::dilate(projectedFrame, projectedFrame, cv::Mat(), cv::Point(-1, -1), 2, 1, 1);
+    //cv::erode(projectedFrame, projectedFrame, cv::Mat(), cv::Point(-1, -1), 2, 1, 1);
     
-    return imageCalibrate;
+    return projectedFrame;
 }
 
-cv::Mat_<cv::Vec3b> Sandbox::adjustProjection(cv::Mat_<cv::Vec3b> frame){
+cv::Mat_<cv::Vec3b> Sandbox::adjustProjection(cv::Mat_<cv::Vec3b> &frame){
     
     captureFrame();
     cv::Mat_<float> depth = getDepthFrame();
diff --git a/src/lib/sandboxSetup.cpp b/src/lib/sandboxSetup.cpp
index e6226e593857e129459e54b8a8494ac6b8b6d3cb..13d020506ede192420d37c631820cda715ab81f5 100644
--- a/src/lib/sandboxSetup.cpp
+++ b/src/lib/sandboxSetup.cpp
@@ -62,37 +62,6 @@ int SandboxSetup::loadFrameProcessProfil(){
     return loadFrameProcessProfilFrom(defaultConfigFilePath);
 }
 
-/*
-    Get the centroid of a quadrilateral
-    source : http://jwilson.coe.uga.edu/EMT668/EMT668.Folders.F97/Patterson/EMT%20669/centroid%20of%20quad/Centroid.html
-*/
-cv::Point2i SandboxSetup::getCenterOfQuadrilateral(std::vector<cv::Point> rectPoints){
-    
-    std::vector<std::vector<cv::Point2i>> triangles = getTriangles(rectPoints);
-    std::vector<cv::Point2i> centroids = getCentroids(triangles);
-
-    /*
-        Pa = P1 + mua (P2 - P1)
-        Pb = P3 + mub (P4 - P3)
-    */
-    cv::Point3d pa;
-    cv::Point3d pb;
-    double mua;
-    double mub;
-    beamer->LineLineIntersect( cv::Point3d(centroids.at(0).x, centroids.at(0).y, 0),
-                               cv::Point3d(centroids.at(1).x, centroids.at(1).y, 0),
-                               cv::Point3d(centroids.at(2).x, centroids.at(2).y, 0),
-                               cv::Point3d(centroids.at(3).x, centroids.at(3).y, 0),
-                               &pa, &pb, &mua, &mub );
-
-    // pa and pb should be the same
-    cv::Point2i center;
-    center.x = (pa.x + pb.x) / 2;
-    center.y = (pa.y + pb.y) / 2;
-
-    return center;
-}
-
 
 /*  Assuming points positions are :
  *      pts[0] : top left
@@ -106,8 +75,8 @@ void SandboxSetup::setupAdjustMatrix(std::vector<cv::Point> rectPoints, cv::Poin
 
     // Set adjusting matrix for the projection
     int widthTop = rectPoints[3].x - rectPoints[0].x;
-    double angle1 = atan((double)(rectPoints[3].y - rectPoints[0].y) / widthTop);
-    cv::Mat matRotation = cv::getRotationMatrix2D(center, toDegrees(angle1), 1); // adjustingMatrix
+    double angle = atan((double)(rectPoints[3].y - rectPoints[0].y) / widthTop);
+    cv::Mat_<float> matRotation = cv::getRotationMatrix2D(center, toDegrees(angle), 1);
     projection->setAdjustingMatrix(matRotation);
 }
 
@@ -138,47 +107,6 @@ void SandboxSetup::setupCroppingMask(std::vector<cv::Point2i> rectPoints){
 //
 
 
-/*
-    Get the 4 triangles in the quadrilateral
-*/
-std::vector<std::vector<cv::Point2i>> SandboxSetup::getTriangles(std::vector<cv::Point2i> rectPoints){
-    
-    std::vector<std::vector<cv::Point2i>> triangles;
-    std::vector<cv::Point2i> A, B, C, D;
-    std::vector<cv::Point2i> lst[4] = {A,B,C,D};
-
-    // 4 triangles in the quadrilateral
-    for (int i=0; i<4; i++){
-        // corners in the triangle
-        for(int j=0; j<3; j++){
-            lst[i].push_back(rectPoints.at( (i+j)%rectPoints.size() ));
-        }
-        triangles.push_back(lst[i]);
-    }
-
-    return triangles;
-}
-
-/*
-    Get the centroid of each of the 4 triangles
-    source : https://www.khanacademy.org/math/geometry-home/triangle-properties/medians-centroids/v/triangle-medians-and-centroids
-*/
-std::vector<cv::Point2i> SandboxSetup::getCentroids(std::vector<std::vector<cv::Point2i>> triangles){
-    
-    std::vector<cv::Point2i> centroids;
-
-    // the centroid is the average of the 3 coordinates
-    for(int i=0; i<(int)triangles.size(); i++){
-        std::vector<cv::Point2i> tr = triangles.at(i);
-        cv::Point2i center;
-        center.x = (tr.at(0).x + tr.at(1).x + tr.at(2).x) / 3;
-        center.y = (tr.at(0).y + tr.at(1).y + tr.at(2).y) / 3;
-        centroids.push_back(center);
-    }
-
-    return centroids;
-}
-
 double SandboxSetup::toDegrees(double radians){
     return radians * (180.0 / M_PI);
 }
diff --git a/src/tools/sandboxConfig.cpp b/src/tools/sandboxConfig.cpp
index 001a6337f21260e2ac7250de6331de56609e4fd1..557d7ba23b0bc4dfd54a86a26976a84a44318670 100644
--- a/src/tools/sandboxConfig.cpp
+++ b/src/tools/sandboxConfig.cpp
@@ -9,7 +9,7 @@ int SandboxConfig::saveAdjustingMatrixInto(char *path, cv::Mat matrix){
     YAML::Node vec = YAML::Load("[]");
     for (int y = 0; y < matrix.rows; y++){
         for (int x = 0; x < matrix.cols; x++){
-            vec.push_back((float)matrix.at<double>(y, x));
+            vec.push_back((float)matrix.at<float>(y, x));
         }
     }