diff --git a/env b/env
new file mode 100644
index 0000000000000000000000000000000000000000..18d6458a7847c26e155b173c82627be43f9bd3c4
--- /dev/null
+++ b/env
@@ -0,0 +1,13 @@
+# Rename this file to ".env"
+# Replace the value of these variables with your own container registry
+
+# ACR
+# CONTAINER_REGISTRY_ADDRESS="<username>.azurecr.io"
+# CONTAINER_REGISTRY_USERNAME="<username>"
+# CONTAINER_REGISTRY_PASSWORD="KpJ1e****************"
+
+# Docker Hub
+CONTAINER_REGISTRY_ADDRESS="<Docker ID>/<registry name>"
+CONTAINER_REGISTRY_ADDRESS_EDGE="https://index.docker.io/v1/"
+CONTAINER_REGISTRY_USERNAME="<Docker ID"
+CONTAINER_REGISTRY_PASSWORD="3f123***************"
\ No newline at end of file
diff --git a/modules/CameraCapture/app/AnnotationParser.py b/modules/CameraCapture/app/AnnotationParser.py
new file mode 100644
index 0000000000000000000000000000000000000000..674505e962bbfadacb016158ad9b199a38e25e4f
--- /dev/null
+++ b/modules/CameraCapture/app/AnnotationParser.py
@@ -0,0 +1,71 @@
+# To make python 2 and python 3 compatible code
+from __future__ import absolute_import
+
+# Returns rectangle boundaries in the CV2 format (topLeftX, topLeftY, bottomRightX, bottomRightY) given by a processing service
+
+
+class AnnotationParser:
+    def getCV2RectanglesFromProcessingService1(self, response):
+        try:
+            listOfCV2Rectangles = []
+            for item in response["regions"]:
+                for decoration in item:
+                    if "box" in decoration.lower():
+                        rectList = item[decoration].split(",")
+                        top = int(rectList[0])
+                        left = int(rectList[1])
+                        width = int(rectList[2])
+                        height = int(rectList[3])
+                        for decorationProperty in item[decoration]:
+                            if "top" in decorationProperty.lower():
+                                top = int(item[decoration][decorationProperty])
+                            if "left" in decorationProperty.lower():
+                                left = int(item[decoration]
+                                           [decorationProperty])
+                            if "width" in decorationProperty.lower():
+                                width = int(item[decoration]
+                                            [decorationProperty])
+                            if "height" in decorationProperty.lower():
+                                height = int(item[decoration]
+                                             [decorationProperty])
+                        if top is not None and left is not None and width is not None and height is not None:
+                            topLeftX = left
+                            topLeftY = top
+                            bottomRightX = left + width
+                            bottomRightY = top + height
+                            listOfCV2Rectangles.append(
+                                [topLeftX, topLeftY, bottomRightX, bottomRightY])
+            return listOfCV2Rectangles
+        except:
+            # Ignoring exceptions for now so that video can be read and analyzed without post-processing in case of errors
+            pass
+
+    def getCV2RectanglesFromProcessingService2(self, response):
+        try:
+            listOfCV2Rectangles = []
+            for item in response:
+                for decoration in item:
+                    if "rect" in decoration.lower():
+                        for decorationProperty in item[decoration]:
+                            if "top" in decorationProperty.lower():
+                                top = int(item[decoration][decorationProperty])
+                            if "left" in decorationProperty.lower():
+                                left = int(item[decoration]
+                                           [decorationProperty])
+                            if "width" in decorationProperty.lower():
+                                width = int(item[decoration]
+                                            [decorationProperty])
+                            if "height" in decorationProperty.lower():
+                                height = int(item[decoration]
+                                             [decorationProperty])
+                        if top is not None and left is not None and width is not None and height is not None:
+                            topLeftX = left
+                            topLeftY = top
+                            bottomRightX = left + width
+                            bottomRightY = top + height
+                            listOfCV2Rectangles.append(
+                                [topLeftX, topLeftY, bottomRightX, bottomRightY])
+            return listOfCV2Rectangles
+        except:
+            # Ignoring exceptions for now so that video can be read and analyzed without post-processing in case of errors
+            pass
diff --git a/modules/CameraCapture/app/CameraCapture.py b/modules/CameraCapture/app/CameraCapture.py
new file mode 100644
index 0000000000000000000000000000000000000000..71429f0e1ad9bf55fc9f5f7060633a9021d8cea4
--- /dev/null
+++ b/modules/CameraCapture/app/CameraCapture.py
@@ -0,0 +1,303 @@
+#To make python 2 and python 3 compatible code
+from __future__ import division
+from __future__ import absolute_import
+
+#Imports
+import sys
+if sys.version_info[0] < 3:#e.g python version <3
+    import cv2
+else:
+    import cv2
+    from cv2 import cv2
+# pylint: disable=E1101
+# pylint: disable=E0401
+# Disabling linting that is not supported by Pylint for C extensions such as OpenCV. See issue https://github.com/PyCQA/pylint/issues/1955 
+import numpy
+import requests
+import json
+import time
+
+import os
+
+import VideoStream
+from VideoStream import VideoStream
+import AnnotationParser
+from AnnotationParser import AnnotationParser
+import ImageServer
+from ImageServer import ImageServer
+
+class CameraCapture(object):
+
+    def __IsInt(self,string):
+        try: 
+            int(string)
+            return True
+        except ValueError:
+            return False
+
+    def __init__(
+            self,
+            videoPath,
+            imageProcessingEndpoint = "",
+            imageUploadingEndpoint = "",
+            imageProcessingParams = "", 
+            showVideo = False, 
+            verbose = False,
+            loopVideo = True,
+            convertToGray = False,
+            resizeWidth = 0,
+            resizeHeight = 0,
+            annotate = False,
+            sendToHubCallback = None):
+        self.videoPath = videoPath
+        if self.__IsInt(videoPath):
+            #case of a usb camera (usually mounted at /dev/video* where * is an int)
+            self.isWebcam = True
+        else:
+            #case of a video file
+            self.isWebcam = False
+        self.imageProcessingEndpoint = imageProcessingEndpoint
+        self.imageUploadingEndpoint = imageUploadingEndpoint
+        if imageProcessingParams == "":
+            self.imageProcessingParams = "" 
+        else:
+            self.imageProcessingParams = json.loads(imageProcessingParams)
+        self.showVideo = showVideo
+        self.verbose = verbose
+        self.loopVideo = loopVideo
+        self.convertToGray = convertToGray
+        self.resizeWidth = resizeWidth
+        self.resizeHeight = resizeHeight
+        self.annotate = (self.imageProcessingEndpoint != "") and self.showVideo & annotate
+        self.nbOfPreprocessingSteps = 0
+        self.autoRotate = False
+        self.sendToHubCallback = sendToHubCallback
+        self.vs = None
+
+        if self.convertToGray:
+            self.nbOfPreprocessingSteps +=1
+        if self.resizeWidth != 0 or self.resizeHeight != 0:
+            self.nbOfPreprocessingSteps +=1
+        if self.verbose:
+            print("Initialising the camera capture with the following parameters: ")
+            print("   - Video path: " + self.videoPath)
+            print("   - Image processing endpoint: " + self.imageProcessingEndpoint)
+            print("   - Image processing params: " + json.dumps(self.imageProcessingParams))
+            print("   - Show video: " + str(self.showVideo))
+            print("   - Loop video: " + str(self.loopVideo))
+            print("   - Convert to gray: " + str(self.convertToGray))
+            print("   - Resize width: " + str(self.resizeWidth))
+            print("   - Resize height: " + str(self.resizeHeight))
+            print("   - Annotate: " + str(self.annotate))
+            print("   - Send processing results to hub: " + str(self.sendToHubCallback is not None))
+            print()
+        
+        self.displayFrame = None
+        if self.showVideo:
+            self.imageServer = ImageServer(5012, self)
+            self.imageServer.start()
+
+    def __annotate(self, frame, response):
+        AnnotationParserInstance = AnnotationParser()
+        #TODO: Make the choice of the service configurable
+        listOfRectanglesToDisplay = AnnotationParserInstance.getCV2RectanglesFromProcessingService1(response)
+        for rectangle in listOfRectanglesToDisplay:
+            cv2.rectangle(frame, (rectangle(0), rectangle(1)), (rectangle(2), rectangle(3)), (0,0,255),4)
+        return
+
+    def __sendFrameForProcessing(self, frame):
+        headers = {'Content-Type': 'application/octet-stream'}
+        try:
+            response = requests.post(self.imageProcessingEndpoint, headers = headers, params = self.imageProcessingParams, data = frame)
+        except Exception as e:
+            print('__sendFrameForProcessing Excpetion -' + str(e))
+            return "[]"
+
+        if self.verbose:
+            try:
+                print("Response from external processing service: (" + str(response.status_code) + ") " + json.dumps(response.json()))
+            except Exception:
+                print("Response from external processing service (status code): " + str(response.status_code))
+        return json.dumps(response.json())
+    
+    def __sendFrameForUploading(self, frame, uri):
+        headers = {'Content-Type': 'application/octet-stream'}
+        endpoint = self.imageUploadingEndpoint + uri
+        try:
+            response = requests.post(endpoint, headers = headers, params = self.imageProcessingParams, data = frame)
+        except Exception as e:
+            print('__sendFrameForUploading Excpetion -' + str(e))
+            return "[]"
+
+        if self.verbose:
+            try:
+                print("Response from external Uploading service: (" + str(response.status_code) + ") " + json.dumps(response.json()))
+            except Exception:
+                print("Response from external Uploading service (status code): " + str(response.status_code))
+        return response #json.dumps(response.json())
+
+    def __displayTimeDifferenceInMs(self, endTime, startTime):
+        return str(int((endTime-startTime) * 1000)) + " ms"
+
+    def __enter__(self):
+        if self.isWebcam:
+            #The VideoStream class always gives us the latest frame from the webcam. It uses another thread to read the frames.
+            self.vs = VideoStream(int(self.videoPath)).start()
+            time.sleep(1.0)#needed to load at least one frame into the VideoStream class
+            #self.capture = cv2.VideoCapture(int(self.videoPath))
+        else:
+            #In the case of a video file, we want to analyze all the frames of the video thus are not using VideoStream class
+            self.capture = cv2.VideoCapture(self.videoPath)
+        return self
+
+    def get_display_frame(self):
+        return self.displayFrame
+
+    def start(self):
+        frameCounter = 0
+        perfForOneFrameInMs = None
+        cpt_img = 0
+        while True:
+            if self.showVideo or self.verbose:
+                startOverall = time.time()
+            if self.verbose:
+                startCapture = time.time()
+
+            frameCounter +=1
+            if self.isWebcam:
+                frame = self.vs.read()
+            else:
+                frame = self.capture.read()[1]
+                if frameCounter == 1:
+                    if self.capture.get(cv2.CAP_PROP_FRAME_WIDTH) < self.capture.get(cv2.CAP_PROP_FRAME_HEIGHT):
+                        self.autoRotate = True
+                if self.autoRotate:
+                    frame = cv2.rotate(frame, cv2.ROTATE_90_COUNTERCLOCKWISE) #The counterclockwise is random...It coudl well be clockwise. Is there a way to auto detect it?
+            if self.verbose:
+                if frameCounter == 1:
+                    if not self.isWebcam:
+                        print("Original frame size: " + str(int(self.capture.get(cv2.CAP_PROP_FRAME_WIDTH))) + "x" + str(int(self.capture.get(cv2.CAP_PROP_FRAME_HEIGHT))))
+                        print("Frame rate (FPS): " + str(int(self.capture.get(cv2.CAP_PROP_FPS))))
+                print("Frame number: " + str(frameCounter))
+                print("Time to capture (+ straighten up) a frame: " + self.__displayTimeDifferenceInMs(time.time(), startCapture))
+                startPreProcessing = time.time()
+            
+            #Loop video
+            if not self.isWebcam:             
+                if frameCounter == self.capture.get(cv2.CAP_PROP_FRAME_COUNT):
+                    if self.loopVideo: 
+                        frameCounter = 0
+                        self.capture.set(cv2.CAP_PROP_POS_FRAMES, 0)
+                    else:
+                        break
+
+            #Pre-process locally
+            if self.nbOfPreprocessingSteps == 1 and self.convertToGray:
+                preprocessedFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
+            
+            if self.nbOfPreprocessingSteps == 1 and (self.resizeWidth != 0 or self.resizeHeight != 0):
+                preprocessedFrame = cv2.resize(frame, (self.resizeWidth, self.resizeHeight))
+
+            if self.nbOfPreprocessingSteps > 1:
+                preprocessedFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
+                preprocessedFrame = cv2.resize(preprocessedFrame, (self.resizeWidth,self.resizeHeight))
+            
+            if self.verbose:
+                print("Time to pre-process a frame: " + self.__displayTimeDifferenceInMs(time.time(), startPreProcessing))
+                startEncodingForProcessing = time.time()
+
+            color_green = True
+            #Process externally
+            if self.imageProcessingEndpoint != "":
+
+                #Encode frame to send over HTTP
+                if self.nbOfPreprocessingSteps == 0:
+                    encodedFrame = cv2.imencode(".jpg", frame)[1].tostring()
+                else:
+                    encodedFrame = cv2.imencode(".jpg", preprocessedFrame)[1].tostring()
+
+                if self.verbose:
+                    print("Time to encode a frame for processing: " + self.__displayTimeDifferenceInMs(time.time(), startEncodingForProcessing))
+                    startProcessingExternally = time.time()
+
+                #Send over HTTP for processing
+                response = self.__sendFrameForProcessing(encodedFrame)
+                if self.verbose:
+                    print("Time to process frame externally: " + self.__displayTimeDifferenceInMs(time.time(), startProcessingExternally))
+                    startSendingToEdgeHub = time.time()
+
+                #forwarding outcome of external processing to the EdgeHub
+                if response != "[]" and self.sendToHubCallback is not None:
+                    # self.sendToHubCallback(response)
+                    print("abc {}".format(response))
+                    #accuracy = float(json.loads(response)['accuracy'])
+                    accuracy = float(json.loads(response)['predictions'][0]['probability'])
+                    acc_threshold = int(os.environ['THRESHOLD'])/100
+                    if accuracy < acc_threshold :
+                        color_green = False
+                        if self.verbose:
+                            print("abcd {}".format(accuracy))
+                        cpt_img +=1
+                        if(cpt_img > 4):
+                            cpt_img = 0
+                            # send image to storage blob
+                            response1 = self.__sendFrameForUploading(encodedFrame, "/data?ext=jpg")
+                            response2 = self.__sendFrameForUploading(response, "/data?ext=txt")
+                            if self.verbose and response1 != "[]" :
+                                print("Upload file : {}".format(response1.text))
+                        
+                    if self.verbose:
+                        print("Time to message from processing service to edgeHub: " + self.__displayTimeDifferenceInMs(time.time(), startSendingToEdgeHub))
+                        startDisplaying = time.time()
+
+            #Display frames
+            if self.showVideo:
+                try:
+                    if self.nbOfPreprocessingSteps == 0:
+                        if self.verbose and (perfForOneFrameInMs is not None):
+                            cv2.putText(frame, "FPS " + str(round(1000/perfForOneFrameInMs, 2)),(10, 35),cv2.FONT_HERSHEY_SIMPLEX,1.0,(0,0,255), 2)
+                            if response != "[]":
+                                if color_green:
+                                    cv2.putText(frame, "label  " + str(json.loads(response)['predictions'][0]['tagName']),(10, 70),cv2.FONT_HERSHEY_SIMPLEX,1.0,(0,0,255), 2)
+                                    cv2.putText(frame, "proba " + str(round(float(json.loads(response)['predictions'][0]['probability'])*100,1)) + "%",(10, 105),cv2.FONT_HERSHEY_SIMPLEX,1.0,(0,255,0), 2)
+                                else:
+                                    cv2.putText(frame, "label  " + str(json.loads(response)['predictions'][0]['tagName']),(10, 70),cv2.FONT_HERSHEY_SIMPLEX,1.0,(0,0,255), 2)
+                                    cv2.putText(frame, "proba " + str(round(float(json.loads(response)['predictions'][0]['probability'])*100,1)) + "%",(10, 105),cv2.FONT_HERSHEY_SIMPLEX,1.0,(0,0,255), 2)
+                        if self.annotate:
+                            #TODO: fix bug with annotate function
+                            self.__annotate(frame, response)
+                        self.displayFrame = cv2.imencode('.jpg', frame)[1].tobytes()
+                    else:
+                        if self.verbose and (perfForOneFrameInMs is not None):
+                            cv2.putText(preprocessedFrame, "FPS " + str(round(1000/perfForOneFrameInMs, 2)),(10, 35),cv2.FONT_HERSHEY_SIMPLEX,1.0,(0,0,255), 2)
+                        if self.annotate:
+                            #TODO: fix bug with annotate function
+                            self.__annotate(preprocessedFrame, response)
+                        self.displayFrame = cv2.imencode('.jpg', preprocessedFrame)[1].tobytes()
+                except Exception as e:
+                    print("Could not display the video to a web browser.") 
+                    print('Excpetion -' + str(e))
+                if self.verbose:
+                    if 'startDisplaying' in locals():
+                        print("Time to display frame: " + self.__displayTimeDifferenceInMs(time.time(), startDisplaying))
+                    elif 'startSendingToEdgeHub' in locals():
+                        print("Time to display frame: " + self.__displayTimeDifferenceInMs(time.time(), startSendingToEdgeHub))
+                    else:
+                        print("Time to display frame: " + self.__displayTimeDifferenceInMs(time.time(), startEncodingForProcessing))
+                perfForOneFrameInMs = int((time.time()-startOverall) * 1000)
+                if not self.isWebcam:
+                    waitTimeBetweenFrames = max(int(1000 / self.capture.get(cv2.CAP_PROP_FPS))-perfForOneFrameInMs, 1)
+                    print("Wait time between frames :" + str(waitTimeBetweenFrames))
+                    if cv2.waitKey(waitTimeBetweenFrames) & 0xFF == ord('q'):
+                        break
+
+            if self.verbose:
+                perfForOneFrameInMs = int((time.time()-startOverall) * 1000)
+                print("Total time for one frame: " + self.__displayTimeDifferenceInMs(time.time(), startOverall))
+
+    def __exit__(self, exception_type, exception_value, traceback):
+        if not self.isWebcam:
+            self.capture.release()
+        if self.showVideo:
+            self.imageServer.close()
+            cv2.destroyAllWindows()
\ No newline at end of file
diff --git a/modules/CameraCapture/app/FileUpload.py b/modules/CameraCapture/app/FileUpload.py
new file mode 100644
index 0000000000000000000000000000000000000000..41509ff908f793f1ba58ae799097589a7f6f52f5
--- /dev/null
+++ b/modules/CameraCapture/app/FileUpload.py
@@ -0,0 +1,180 @@
+import os
+import asyncio
+from azure.iot.device.aio import IoTHubDeviceClient
+from azure.core.exceptions import AzureError
+from azure.storage.blob import BlobClient
+import sys
+
+#CONNECTION_STRING = "[Device Connection String]"
+#PATH_TO_FILE = r"[Full path to local file]"
+CONNECTION_STRING = "HostName=Mon-hub-IoT.azure-devices.net;DeviceId=raspberry1;SharedAccessKey=Y8xVIGo6dDQZoMFBwImvzhvX9r2jVIUp/jyShRGgOqA="
+PATH_TO_FILE = r"./templates/index.html"
+#/home/harrond/Documents/azure/MNIST/modules/CameraCapture/app/templates/index.html
+
+class FileUpload(object):
+    def __init__():
+        print ( "IoT Hub file upload init")
+        conn_str = CONNECTION_STRING
+
+        device_client = IoTHubDeviceClient.create_from_connection_string(conn_str)
+
+        # Connect the client
+        #await device_client.connect()
+
+    async def store_blob(blob_info, file_name):
+        try:
+            sas_url = "https://{}/{}/{}{}".format(
+                blob_info["hostName"],
+                blob_info["containerName"],
+                blob_info["blobName"],
+                blob_info["sasToken"]
+            )
+            print("\nUploading file: {} to Azure Storage as blob: {} in container {}\n".format(file_name, blob_info["blobName"], blob_info["containerName"]))
+
+            # Upload the specified file
+            with BlobClient.from_blob_url(sas_url) as blob_client:
+                with open(file_name, "rb") as f:
+                    result = blob_client.upload_blob(f, overwrite=True)
+                    return (True, result)
+
+        except FileNotFoundError as ex:
+            # catch file not found and add an HTTP status code to return in notification to IoT Hub
+            ex.status_code = 404
+            return (False, ex)
+
+        except AzureError as ex:
+            # catch Azure errors that might result from the upload operation
+            return (False, ex)
+
+    async def upload(path_to_file):
+        try:
+            file_name = path_to_file
+            blob_name = os.path.basename(file_name)
+            # Get the storage info for the blob
+            storage_info = await device_client.get_storage_info_for_blob(blob_name)
+
+            # Upload to blob
+            success, result = await store_blob(storage_info, file_name)
+
+            if success == True:
+                print("Upload succeeded. Result is: \n") 
+                print(result)
+                print()
+
+                await device_client.notify_blob_upload_status(
+                    storage_info["correlationId"], True, 200, "OK: {}".format(file_name)
+                )
+
+            else :
+                # If the upload was not successful, the result is the exception object
+                print("Upload failed. Exception is: \n") 
+                print(result)
+                print()
+
+                await device_client.notify_blob_upload_status(
+                    storage_info["correlationId"], False, result.status_code, str(result)
+                )
+
+        except Exception as ex:
+            print("\nException:")
+            print(ex)
+
+        finally:
+            # Finally, disconnect the client
+            await device_client.disconnect()
+
+
+
+async def store_blob(blob_info, file_name):
+    try:
+        sas_url = "https://{}/{}/{}{}".format(
+            blob_info["hostName"],
+            blob_info["containerName"],
+            blob_info["blobName"],
+            blob_info["sasToken"]
+        )
+
+        print("\nUploading file: {} to Azure Storage as blob: {} in container {}\n".format(file_name, blob_info["blobName"], blob_info["containerName"]))
+
+        # Upload the specified file
+        with BlobClient.from_blob_url(sas_url) as blob_client:
+            with open(file_name, "rb") as f:
+                result = blob_client.upload_blob(f, overwrite=True)
+                return (True, result)
+
+    except FileNotFoundError as ex:
+        # catch file not found and add an HTTP status code to return in notification to IoT Hub
+        ex.status_code = 404
+        return (False, ex)
+
+    except AzureError as ex:
+        # catch Azure errors that might result from the upload operation
+        return (False, ex)
+
+async def main():
+    try:
+        print ( "IoT Hub file upload sample, press Ctrl-C to exit" )
+
+        conn_str = CONNECTION_STRING
+        file_name = PATH_TO_FILE
+        blob_name = os.path.basename(file_name)
+
+        device_client = IoTHubDeviceClient.create_from_connection_string(conn_str)
+
+        # Connect the client
+        await device_client.connect()
+
+        # Get the storage info for the blob
+        storage_info = await device_client.get_storage_info_for_blob(blob_name)
+
+        # Upload to blob
+        success, result = await store_blob(storage_info, file_name)
+
+        if success == True:
+            print("Upload succeeded. Result is: \n") 
+            print(result)
+            print()
+
+            await device_client.notify_blob_upload_status(
+                storage_info["correlationId"], True, 200, "OK: {}".format(file_name)
+            )
+
+        else :
+            # If the upload was not successful, the result is the exception object
+            print("Upload failed. Exception is: \n") 
+            print(result)
+            print()
+
+            await device_client.notify_blob_upload_status(
+                storage_info["correlationId"], False, result.status_code, str(result)
+            )
+
+    except Exception as ex:
+        print("\nException:")
+        print(ex)
+
+    except KeyboardInterrupt:
+        print ( "\nIoTHubDeviceClient sample stopped" )
+
+    finally:
+        # Finally, disconnect the client
+        await device_client.disconnect()
+
+
+if __name__ == "__main__":
+    #asyncio.run(main())
+    print("Python version")
+    print (sys.version)
+    loop = asyncio.get_event_loop()
+    loop.run_until_complete(main())
+    loop.close()
+
+    #blob = BlobClient.from_connection_string(conn_str="DefaultEndpointsProtocol=https;AccountName=mnisthepia;AccountKey=tIkw3moqsSMQQ3t4ZMMjkAUO1gi088jPNvmR08vtIzmEVWdgcWmXeDa+uoSJOc5B/f/oWgZWWKlSTqP9294LvQ==;EndpointSuffix=core.windows.net", container_name="imagesfail", blob_name="my_blob")
+    #with open("./templates/index.html", "rb") as data:
+    #    blob.upload_blob(data)
+
+
+    #print("Python version")
+    #print (sys.version)
+    #print("Version info.")
+    #print (sys.version_info)
diff --git a/modules/CameraCapture/app/ImageServer.py b/modules/CameraCapture/app/ImageServer.py
new file mode 100644
index 0000000000000000000000000000000000000000..1a9b0a82b2b0ae5c105eae98e8ed1021559272d4
--- /dev/null
+++ b/modules/CameraCapture/app/ImageServer.py
@@ -0,0 +1,62 @@
+# Base on work from https://github.com/Bronkoknorb/PyImageStream
+#import trollius as asyncio
+import asyncio
+import tornado.ioloop
+import tornado.web
+import tornado.websocket
+import threading
+import base64
+import os
+
+
+class ImageStreamHandler(tornado.websocket.WebSocketHandler):
+    def initialize(self, camera):
+        self.clients = []
+        self.camera = camera
+
+    def check_origin(self, origin):
+        return True
+
+    def open(self):
+        self.clients.append(self)
+        print("Image Server Connection::opened")
+
+    def on_message(self, msg):
+        if msg == 'next':
+            frame = self.camera.get_display_frame()
+            if frame != None:
+                encoded = base64.b64encode(frame)
+                self.write_message(encoded, binary=False)
+
+    def on_close(self):
+        self.clients.remove(self)
+        print("Image Server Connection::closed")
+
+
+class ImageServer(threading.Thread):
+
+    def __init__(self, port, cameraObj):
+        threading.Thread.__init__(self)
+        self.setDaemon(True)
+        self.port = port
+        self.camera = cameraObj
+
+    def run(self):
+        try:
+            asyncio.set_event_loop(asyncio.new_event_loop())
+
+            indexPath = os.path.join(os.path.dirname(
+                os.path.realpath(__file__)), 'templates')
+            app = tornado.web.Application([
+                (r"/stream", ImageStreamHandler, {'camera': self.camera}),
+                (r"/(.*)", tornado.web.StaticFileHandler,
+                 {'path': indexPath, 'default_filename': 'index.html'})
+            ])
+            app.listen(self.port)
+            print('ImageServer::Started.')
+            tornado.ioloop.IOLoop.current().start()
+        except Exception as e:
+            print('ImageServer::exited run loop. Exception - ' + str(e))
+
+    def close(self):
+        print('ImageServer::Closed.')
diff --git a/modules/CameraCapture/app/VideoStream.py b/modules/CameraCapture/app/VideoStream.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c0647e6e2af9daf9a89bc305cd8f424c14d2122
--- /dev/null
+++ b/modules/CameraCapture/app/VideoStream.py
@@ -0,0 +1,72 @@
+# To make python 2 and python 3 compatible code
+from __future__ import absolute_import
+
+from threading import Thread
+import sys
+if sys.version_info[0] < 3:  # e.g python version <3
+    import cv2
+else:
+    import cv2
+    from cv2 import cv2
+# pylint: disable=E1101
+# pylint: disable=E0401
+# Disabling linting that is not supported by Pylint for C extensions such as OpenCV. See issue https://github.com/PyCQA/pylint/issues/1955
+
+
+# import the Queue class from Python 3
+if sys.version_info >= (3, 0):
+    from queue import Queue
+# otherwise, import the Queue class for Python 2.7
+else:
+    from Queue import Queue
+
+# This class reads all the video frames in a separate thread and always has the keeps only the latest frame in its queue to be grabbed by another thread
+
+
+class VideoStream(object):
+    def __init__(self, path, queueSize=3):
+        self.stream = cv2.VideoCapture(path)
+        self.stopped = False
+        self.Q = Queue(maxsize=queueSize)
+
+    def start(self):
+        # start a thread to read frames from the video stream
+        t = Thread(target=self.update, args=())
+        t.daemon = True
+        t.start()
+        return self
+
+    def update(self):
+        try:
+            while True:
+                if self.stopped:
+                    return
+
+                if not self.Q.full():
+                    (grabbed, frame) = self.stream.read()
+
+                    # if the `grabbed` boolean is `False`, then we have
+                    # reached the end of the video file
+                    if not grabbed:
+                        self.stop()
+                        return
+
+                    self.Q.put(frame)
+
+                    # Clean the queue to keep only the latest frame
+                    while self.Q.qsize() > 1:
+                        self.Q.get()
+        except Exception as e:
+            print("got error: "+str(e))
+
+    def read(self):
+        return self.Q.get()
+
+    def more(self):
+        return self.Q.qsize() > 0
+
+    def stop(self):
+        self.stopped = True
+
+    def __exit__(self, exception_type, exception_value, traceback):
+        self.stream.release()
diff --git a/modules/CameraCapture/app/main.py b/modules/CameraCapture/app/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b62b71e3c08ef43185d924bc71699e7a5b9041b
--- /dev/null
+++ b/modules/CameraCapture/app/main.py
@@ -0,0 +1,141 @@
+# Copyright (c) Microsoft. All rights reserved.
+# Licensed under the MIT license. See LICENSE file in the project root for
+# full license information.
+
+import os
+import random
+import sys
+import time
+
+#import iothub_client
+# pylint: disable=E0611
+# Disabling linting that is not supported by Pylint for C extensions such as iothub_client. See issue https://github.com/PyCQA/pylint/issues/1955
+#from iothub_client import (IoTHubModuleClient, IoTHubClientError, IoTHubError,
+#                           IoTHubMessage, IoTHubMessageDispositionResult,
+#                           IoTHubTransportProvider)
+from azure.iot.device import IoTHubModuleClient 
+
+import CameraCapture
+from CameraCapture import CameraCapture
+
+
+# global counters
+SEND_CALLBACKS = 0
+
+
+def send_to_Hub_callback(strMessage):
+    #message = IoTHubMessage(bytearray(strMessage, 'utf8'))
+    #hubManager.send_event_to_output("output1", message, 0)
+    pass
+
+# Callback received when the message that we're forwarding is processed.
+
+
+def send_confirmation_callback(message, result, user_context):
+    global SEND_CALLBACKS
+    SEND_CALLBACKS += 1
+
+
+class HubManager(object):
+
+    def __init__(
+            self,
+            messageTimeout,
+            protocol,
+            verbose):
+        '''
+        Communicate with the Edge Hub
+
+        :param int messageTimeout: the maximum time in milliseconds until a message times out. The timeout period starts at IoTHubClient.send_event_async. By default, messages do not expire.
+        :param IoTHubTransportProvider protocol: Choose HTTP, AMQP or MQTT as transport protocol.  Currently only MQTT is supported.
+        :param bool verbose: set to true to get detailed logs on messages
+        '''
+        self.messageTimeout = messageTimeout
+        self.client_protocol = protocol
+        self.client = IoTHubModuleClient()
+        self.client.create_from_environment(protocol)
+        self.client.set_option("messageTimeout", self.messageTimeout)
+        self.client.set_option("product_info", "edge-camera-capture")
+        if verbose:
+            self.client.set_option("logtrace", 1)  # enables MQTT logging
+
+    def send_event_to_output(self, outputQueueName, event, send_context):
+        if VERBOSE:
+            print("send message")
+        self.client.send_event_async(
+            outputQueueName, event, send_confirmation_callback, send_context)
+
+
+def main(
+        videoPath,
+        imageProcessingEndpoint="",
+        imageUploadingEndpoint="",
+        imageProcessingParams="",
+        showVideo=False,
+        verbose=False,
+        loopVideo=True,
+        convertToGray=False,
+        resizeWidth=0,
+        resizeHeight=0,
+        annotate=False
+):
+    '''
+    Capture a camera feed, send it to processing and forward outputs to EdgeHub
+
+    :param int videoPath: camera device path such as /dev/video0 or a test video file such as /TestAssets/myvideo.avi. Mandatory.
+    :param str imageProcessingEndpoint: service endpoint to send the frames to for processing. Example: "http://face-detect-service:8080". Leave empty when no external processing is needed (Default). Optional.
+    :param str imageProcessingParams: query parameters to send to the processing service. Example: "'returnLabels': 'true'". Empty by default. Optional.
+    :param bool showVideo: show the video in a windows. False by default. Optional.
+    :param bool verbose: show detailed logs and perf timers. False by default. Optional.
+    :param bool loopVideo: when reading from a video file, it will loop this video. True by default. Optional.
+    :param bool convertToGray: convert to gray before sending to external service for processing. False by default. Optional.
+    :param int resizeWidth: resize frame width before sending to external service for processing. Does not resize by default (0). Optional.
+    :param int resizeHeight: resize frame width before sending to external service for processing. Does not resize by default (0). Optional.ion(
+    :param bool annotate: when showing the video in a window, it will annotate the frames with rectangles given by the image processing service. False by default. Optional. Rectangles should be passed in a json blob with a key containing the string rectangle, and a top left corner + bottom right corner or top left corner with width and height.
+    '''
+    try:
+        print("\nPython %s\n" % sys.version)
+        print("Camera Capture Azure IoT Edge Module. Press Ctrl-C to exit.")
+        try:
+            global hubManager
+            #hubManager = HubManager(
+            #    10000, IoTHubTransportProvider.MQTT, verbose)
+        except error as iothub_error:
+            print("Unexpected error %s from IoTHub" % iothub_error)
+            return
+        with CameraCapture(videoPath, imageProcessingEndpoint, imageUploadingEndpoint, imageProcessingParams, showVideo, verbose, loopVideo, convertToGray, resizeWidth, resizeHeight, annotate, send_to_Hub_callback) as cameraCapture:
+            cameraCapture.start()
+    except KeyboardInterrupt:
+        print("Camera capture module stopped")
+
+
+def __convertStringToBool(env):
+    if env in ['True', 'TRUE', '1', 'y', 'YES', 'Y', 'Yes']:
+        return True
+    elif env in ['False', 'FALSE', '0', 'n', 'NO', 'N', 'No']:
+        return False
+    else:
+        raise ValueError('Could not convert string to bool.')
+
+
+if __name__ == '__main__':
+    try:
+        VIDEO_PATH = os.environ['VIDEO_PATH']
+        IMAGE_PROCESSING_ENDPOINT = os.getenv('IMAGE_PROCESSING_ENDPOINT', "")
+        IMAGE_UPLOADING_ENDPOINT = os.getenv('IMAGE_UPLOADING_ENDPOINT', "")
+        IMAGE_PROCESSING_PARAMS = os.getenv('IMAGE_PROCESSING_PARAMS', "")
+        SHOW_VIDEO = __convertStringToBool(os.getenv('SHOW_VIDEO', 'False'))
+        VERBOSE = __convertStringToBool(os.getenv('VERBOSE', 'False'))
+        LOOP_VIDEO = __convertStringToBool(os.getenv('LOOP_VIDEO', 'True'))
+        CONVERT_TO_GRAY = __convertStringToBool(
+            os.getenv('CONVERT_TO_GRAY', 'False'))
+        RESIZE_WIDTH = int(os.getenv('RESIZE_WIDTH', 0))
+        RESIZE_HEIGHT = int(os.getenv('RESIZE_HEIGHT', 0))
+        ANNOTATE = __convertStringToBool(os.getenv('ANNOTATE', 'False'))
+
+    except ValueError as error:
+        print(error)
+        sys.exit(1)
+
+    main(VIDEO_PATH, IMAGE_PROCESSING_ENDPOINT, IMAGE_UPLOADING_ENDPOINT, IMAGE_PROCESSING_PARAMS, SHOW_VIDEO,
+         VERBOSE, LOOP_VIDEO, CONVERT_TO_GRAY, RESIZE_WIDTH, RESIZE_HEIGHT, ANNOTATE)
diff --git a/modules/CameraCapture/app/templates/index.html b/modules/CameraCapture/app/templates/index.html
new file mode 100644
index 0000000000000000000000000000000000000000..2f27674cc56e38b04093eef997ec10e1ee40147a
--- /dev/null
+++ b/modules/CameraCapture/app/templates/index.html
@@ -0,0 +1,27 @@
+<html>
+  <head>
+    <title>Video Stream</title>
+  </head>
+  <body style="background-color:#222;">
+    <h1>Video Stream</h1>
+    <img id="currentImage" style="border:2px solid teal;height:700px;">
+    <script>
+
+      var img = document.getElementById("currentImage");
+      var ws = new WebSocket("ws://" + location.host + "/stream");
+
+      ws.onopen = function() {
+          console.log("connection was established");
+          ws.send("next");
+      };
+
+      ws.onmessage = function(msg) {
+          img.src = 'data:image/png;base64, ' + msg.data;
+      };
+
+      img.onload = function() {
+        ws.send("next");
+      }
+    </script>
+  </body>
+</html>
diff --git a/modules/CameraCapture/arm32v7.Dockerfile b/modules/CameraCapture/arm32v7.Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..21dee5d3cbba1e07c2440b28247557b5162ef25d
--- /dev/null
+++ b/modules/CameraCapture/arm32v7.Dockerfile
@@ -0,0 +1,57 @@
+FROM balenalib/raspberrypi4-64-python:3.9
+# The balena base image for building apps on Raspberry Pi 3. 
+# Raspbian Stretch required for piwheels support. https://downloads.raspberrypi.org/raspbian/images/raspbian-2019-04-09/
+
+# Enforces cross-compilation through Quemu
+RUN [ "cross-build-start" ]
+
+RUN echo "BUILD MODULE: CameraCapture"
+
+# Update package index and install dependencies
+#RUN install_packages \
+#    python3 \
+#    python3-pip \
+#    python3-dev \
+#    build-essential \
+
+RUN apt update && apt upgrade
+# Required for OpenCV
+RUN install_packages \
+    # Hierarchical Data Format
+    libhdf5-dev libhdf5-serial-dev \
+    # for image files
+    #libjpeg-dev libtiff5-dev libjasper-dev libpng-dev \
+    # for video files
+    libavcodec-dev libavformat-dev libswscale-dev libv4l-dev 
+    # for gui
+    #libqt4-test libqtgui4 libqtwebkit4 libgtk2.0-dev \
+    # high def image processing
+    #libilmbase-dev libopenexr-dev
+#RUN apt install libgl1
+#RUN apt-get install libilmbase-dev libopenexr-dev
+RUN install_packages libgl-dev
+
+# Install Python packages
+RUN pip install --upgrade pip
+RUN pip install --upgrade wheel
+RUN pip install --upgrade setuptools
+RUN pip install --upgrade requests
+#RUN install_packages python-requests
+COPY /build/arm32v7-requirements.txt ./
+#RUN pip3 install --upgrade setuptools
+#RUN pip install --index-url=https://www.piwheels.org/simple -r arm32v7-requirements.txt
+RUN pip install -r arm32v7-requirements.txt
+#RUN pip install iothub-client
+
+# Cleanup
+RUN rm -rf /var/lib/apt/lists/* \
+    && apt-get -y autoremove
+
+RUN [ "cross-build-end" ]  
+
+ADD /app/ .
+
+# Expose the port
+EXPOSE 5012
+
+ENTRYPOINT [ "python3", "-u", "./main.py" ]
diff --git a/modules/CameraCapture/arm32v7.Dockerfile.old b/modules/CameraCapture/arm32v7.Dockerfile.old
new file mode 100644
index 0000000000000000000000000000000000000000..38e08ed2846aaeb906ce767b69dbb81dc5b192f4
--- /dev/null
+++ b/modules/CameraCapture/arm32v7.Dockerfile.old
@@ -0,0 +1,54 @@
+FROM balenalib/raspberrypi3:stretch
+# The balena base image for building apps on Raspberry Pi 3. 
+# Raspbian Stretch required for piwheels support. https://downloads.raspberrypi.org/raspbian/images/raspbian-2019-04-09/
+
+# Enforces cross-compilation through Quemu
+RUN [ "cross-build-start" ]
+
+RUN echo "BUILD MODULE: CameraCapture"
+
+# Update package index and install dependencies
+RUN install_packages \
+    python3 \
+    python3-pip \
+    python3-dev \
+    build-essential \
+    libopenjp2-7-dev \
+    zlib1g-dev \
+    libatlas-base-dev \
+    wget \
+    libboost-python1.62.0 \
+    curl \
+    libcurl4-openssl-dev
+
+# Required for OpenCV
+RUN install_packages \
+    # Hierarchical Data Format
+    libhdf5-dev libhdf5-serial-dev \
+    # for image files
+    libjpeg-dev libtiff5-dev libjasper-dev libpng-dev \
+    # for video files
+    libavcodec-dev libavformat-dev libswscale-dev libv4l-dev \
+    # for gui
+    libqt4-test libqtgui4 libqtwebkit4 libgtk2.0-dev \
+    # high def image processing
+    libilmbase-dev libopenexr-dev
+
+# Install Python packages
+COPY /build/arm32v7-requirements.txt ./
+RUN pip3 install --upgrade pip
+RUN pip3 install --upgrade setuptools
+RUN pip3 install --index-url=https://www.piwheels.org/simple -r arm32v7-requirements.txt
+
+# Cleanup
+RUN rm -rf /var/lib/apt/lists/* \
+    && apt-get -y autoremove
+
+RUN [ "cross-build-end" ]  
+
+ADD /app/ .
+
+# Expose the port
+EXPOSE 5012
+
+ENTRYPOINT [ "python3", "-u", "./main.py" ]
diff --git a/modules/CameraCapture/build/amd64-requirements.txt b/modules/CameraCapture/build/amd64-requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..786658d7104174e580596f79a1dff86ec0a901d4
--- /dev/null
+++ b/modules/CameraCapture/build/amd64-requirements.txt
@@ -0,0 +1,6 @@
+azure-iothub-device-client
+numpy
+opencv-contrib-python
+requests
+trollius
+tornado==4.5.3
\ No newline at end of file
diff --git a/modules/CameraCapture/build/arm32v7-requirements.txt b/modules/CameraCapture/build/arm32v7-requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..9c997ff3d63dc43bd8ac451eda34e2d48d5b4574
--- /dev/null
+++ b/modules/CameraCapture/build/arm32v7-requirements.txt
@@ -0,0 +1,10 @@
+#azure-iot-deviceupdate
+#azure-iothub-device-client
+numpy
+opencv-contrib-python==4.5.5.64
+requests
+trollius
+tornado==4.5.3
+azure-iot-device
+azure.storage.blob
+asyncio
\ No newline at end of file
diff --git a/modules/CameraCapture/module.json b/modules/CameraCapture/module.json
new file mode 100644
index 0000000000000000000000000000000000000000..5296bfb76a5b7e310eb1d41dd9fa440d9f6624d4
--- /dev/null
+++ b/modules/CameraCapture/module.json
@@ -0,0 +1,16 @@
+{
+    "$schema-version": "0.0.1",
+    "description": "",
+    "image": {
+        "repository": "$CONTAINER_REGISTRY_ADDRESS",
+        "tag": {
+            "version": "cameracapture_2022.2.10",
+            "platforms": {
+                "arm32v7": "./arm32v7.Dockerfile"
+            }
+        },
+        "buildOptions": []
+    },
+    
+    "language": "python"
+}
\ No newline at end of file
diff --git a/modules/CoralVision/app/app.py b/modules/CoralVision/app/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..af1e78505e8e180d76302729666b11ebd9c740f3
--- /dev/null
+++ b/modules/CoralVision/app/app.py
@@ -0,0 +1,82 @@
+
+import json
+import os
+import io
+
+# Imports for the REST API
+from flask import Flask, request, jsonify
+
+# Imports for image procesing
+from PIL import Image
+
+# Imports for prediction
+from predict import initialize, predict_image, predict_url
+
+app = Flask(__name__)
+
+# 4MB Max image size limit
+app.config['MAX_CONTENT_LENGTH'] = 4 * 1024 * 1024 
+
+# Default route just shows simple text
+@app.route('/')
+def index():
+    return 'CustomVision.ai model host harness'
+
+# Like the CustomVision.ai Prediction service /image route handles either
+#     - octet-stream image file 
+#     - a multipart/form-data with files in the imageData parameter
+@app.route('/image', methods=['POST'])
+@app.route('/<project>/image', methods=['POST'])
+@app.route('/<project>/image/nostore', methods=['POST'])
+@app.route('/<project>/classify/iterations/<publishedName>/image', methods=['POST'])
+@app.route('/<project>/classify/iterations/<publishedName>/image/nostore', methods=['POST'])
+@app.route('/<project>/detect/iterations/<publishedName>/image', methods=['POST'])
+@app.route('/<project>/detect/iterations/<publishedName>/image/nostore', methods=['POST'])
+def predict_image_handler(project=None, publishedName=None):
+    try:
+        imageData = None
+        if ('imageData' in request.files):
+            imageData = request.files['imageData']
+        elif ('imageData' in request.form):
+            imageData = request.form['imageData']
+        else:
+            imageData = io.BytesIO(request.get_data())
+
+        img = Image.open(imageData)
+        results = predict_image(img)
+        return jsonify(results)
+    except Exception as e:
+        print('EXCEPTION:', str(e))
+        return 'Error processing image', 500
+
+
+# Like the CustomVision.ai Prediction service /url route handles url's
+# in the body of hte request of the form:
+#     { 'Url': '<http url>'}  
+@app.route('/url', methods=['POST'])
+@app.route('/<project>/url', methods=['POST'])
+@app.route('/<project>/url/nostore', methods=['POST'])
+@app.route('/<project>/classify/iterations/<publishedName>/url', methods=['POST'])
+@app.route('/<project>/classify/iterations/<publishedName>/url/nostore', methods=['POST'])
+@app.route('/<project>/detect/iterations/<publishedName>/url', methods=['POST'])
+@app.route('/<project>/detect/iterations/<publishedName>/url/nostore', methods=['POST'])
+def predict_url_handler(project=None, publishedName=None):
+    try:
+        image_url = json.loads(request.get_data().decode('utf-8'))['url']
+        results = predict_url(image_url)
+        return jsonify(results)
+    except Exception as e:
+        print('EXCEPTION:', str(e))
+        return 'Error processing image'
+
+if __name__ == '__main__':
+    # Load and intialize the model
+    if os.environ['ML_MODEL'] == "0":
+        mnist_model = True
+    else:
+        mnist_model = False
+    initialize(mnist_model)
+
+    # Run the server
+    app.run(host='0.0.0.0', port=80)
+
diff --git a/modules/CoralVision/app/common.py b/modules/CoralVision/app/common.py
new file mode 100644
index 0000000000000000000000000000000000000000..a9b446458f15598c7563bc71004f461668e2b103
--- /dev/null
+++ b/modules/CoralVision/app/common.py
@@ -0,0 +1,51 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Common utilities."""
+import numpy as np
+import tflite_runtime.interpreter as tflite
+
+EDGETPU_SHARED_LIB = 'libedgetpu.so.1'
+#EDGETPU_SHARED_LIB = 'delegate.so'
+
+def make_interpreter(model_file):
+    model_file, *device = model_file.split('@')
+    return tflite.Interpreter(
+      model_path=model_file,
+      experimental_delegates=[
+          tflite.load_delegate(EDGETPU_SHARED_LIB,
+                               {'device': device[0]} if device else {})
+      ])
+
+def input_image_size(interpreter):
+    """Returns input image size as (width, height, channels) tuple."""
+    _, height, width, channels = interpreter.get_input_details()[0]['shape']
+    return width, height, channels
+
+def input_tensor(interpreter):
+    """Returns input tensor view as numpy array of shape (height, width, 3)."""
+    tensor_index = interpreter.get_input_details()[0]['index']
+    return interpreter.tensor(tensor_index)()[0]
+
+def output_tensor(interpreter, i):
+    """Returns dequantized output tensor if quantized before."""
+    output_details = interpreter.get_output_details()[i]
+    output_data = np.squeeze(interpreter.tensor(output_details['index'])())
+    if 'quantization' not in output_details:
+        return output_data
+    scale, zero_point = output_details['quantization']
+    if scale == 0:
+        return output_data - zero_point
+    return scale * (output_data - zero_point)
+
diff --git a/modules/CoralVision/app/labels.txt b/modules/CoralVision/app/labels.txt
new file mode 100644
index 0000000000000000000000000000000000000000..9696fbb42b81bf32986160f1319d3b040c42f70e
--- /dev/null
+++ b/modules/CoralVision/app/labels.txt
@@ -0,0 +1,1001 @@
+background
+tench, Tinca tinca
+goldfish, Carassius auratus
+great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias
+tiger shark, Galeocerdo cuvieri
+hammerhead, hammerhead shark
+electric ray, crampfish, numbfish, torpedo
+stingray
+cock
+hen
+ostrich, Struthio camelus
+brambling, Fringilla montifringilla
+goldfinch, Carduelis carduelis
+house finch, linnet, Carpodacus mexicanus
+junco, snowbird
+indigo bunting, indigo finch, indigo bird, Passerina cyanea
+robin, American robin, Turdus migratorius
+bulbul
+jay
+magpie
+chickadee
+water ouzel, dipper
+kite
+bald eagle, American eagle, Haliaeetus leucocephalus
+vulture
+great grey owl, great gray owl, Strix nebulosa
+European fire salamander, Salamandra salamandra
+common newt, Triturus vulgaris
+eft
+spotted salamander, Ambystoma maculatum
+axolotl, mud puppy, Ambystoma mexicanum
+bullfrog, Rana catesbeiana
+tree frog, tree-frog
+tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui
+loggerhead, loggerhead turtle, Caretta caretta
+leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea
+mud turtle
+terrapin
+box turtle, box tortoise
+banded gecko
+common iguana, iguana, Iguana iguana
+American chameleon, anole, Anolis carolinensis
+whiptail, whiptail lizard
+agama
+frilled lizard, Chlamydosaurus kingi
+alligator lizard
+Gila monster, Heloderma suspectum
+green lizard, Lacerta viridis
+African chameleon, Chamaeleo chamaeleon
+Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis
+African crocodile, Nile crocodile, Crocodylus niloticus
+American alligator, Alligator mississipiensis
+triceratops
+thunder snake, worm snake, Carphophis amoenus
+ringneck snake, ring-necked snake, ring snake
+hognose snake, puff adder, sand viper
+green snake, grass snake
+king snake, kingsnake
+garter snake, grass snake
+water snake
+vine snake
+night snake, Hypsiglena torquata
+boa constrictor, Constrictor constrictor
+rock python, rock snake, Python sebae
+Indian cobra, Naja naja
+green mamba
+sea snake
+horned viper, cerastes, sand viper, horned asp, Cerastes cornutus
+diamondback, diamondback rattlesnake, Crotalus adamanteus
+sidewinder, horned rattlesnake, Crotalus cerastes
+trilobite
+harvestman, daddy longlegs, Phalangium opilio
+scorpion
+black and gold garden spider, Argiope aurantia
+barn spider, Araneus cavaticus
+garden spider, Aranea diademata
+black widow, Latrodectus mactans
+tarantula
+wolf spider, hunting spider
+tick
+centipede
+black grouse
+ptarmigan
+ruffed grouse, partridge, Bonasa umbellus
+prairie chicken, prairie grouse, prairie fowl
+peacock
+quail
+partridge
+African grey, African gray, Psittacus erithacus
+macaw
+sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita
+lorikeet
+coucal
+bee eater
+hornbill
+hummingbird
+jacamar
+toucan
+drake
+red-breasted merganser, Mergus serrator
+goose
+black swan, Cygnus atratus
+tusker
+echidna, spiny anteater, anteater
+platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus
+wallaby, brush kangaroo
+koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus
+wombat
+jellyfish
+sea anemone, anemone
+brain coral
+flatworm, platyhelminth
+nematode, nematode worm, roundworm
+conch
+snail
+slug
+sea slug, nudibranch
+chiton, coat-of-mail shell, sea cradle, polyplacophore
+chambered nautilus, pearly nautilus, nautilus
+Dungeness crab, Cancer magister
+rock crab, Cancer irroratus
+fiddler crab
+king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica
+American lobster, Northern lobster, Maine lobster, Homarus americanus
+spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish
+crayfish, crawfish, crawdad, crawdaddy
+hermit crab
+isopod
+white stork, Ciconia ciconia
+black stork, Ciconia nigra
+spoonbill
+flamingo
+little blue heron, Egretta caerulea
+American egret, great white heron, Egretta albus
+bittern
+crane
+limpkin, Aramus pictus
+European gallinule, Porphyrio porphyrio
+American coot, marsh hen, mud hen, water hen, Fulica americana
+bustard
+ruddy turnstone, Arenaria interpres
+red-backed sandpiper, dunlin, Erolia alpina
+redshank, Tringa totanus
+dowitcher
+oystercatcher, oyster catcher
+pelican
+king penguin, Aptenodytes patagonica
+albatross, mollymawk
+grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus
+killer whale, killer, orca, grampus, sea wolf, Orcinus orca
+dugong, Dugong dugon
+sea lion
+Chihuahua
+Japanese spaniel
+Maltese dog, Maltese terrier, Maltese
+Pekinese, Pekingese, Peke
+Shih-Tzu
+Blenheim spaniel
+papillon
+toy terrier
+Rhodesian ridgeback
+Afghan hound, Afghan
+basset, basset hound
+beagle
+bloodhound, sleuthhound
+bluetick
+black-and-tan coonhound
+Walker hound, Walker foxhound
+English foxhound
+redbone
+borzoi, Russian wolfhound
+Irish wolfhound
+Italian greyhound
+whippet
+Ibizan hound, Ibizan Podenco
+Norwegian elkhound, elkhound
+otterhound, otter hound
+Saluki, gazelle hound
+Scottish deerhound, deerhound
+Weimaraner
+Staffordshire bullterrier, Staffordshire bull terrier
+American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier
+Bedlington terrier
+Border terrier
+Kerry blue terrier
+Irish terrier
+Norfolk terrier
+Norwich terrier
+Yorkshire terrier
+wire-haired fox terrier
+Lakeland terrier
+Sealyham terrier, Sealyham
+Airedale, Airedale terrier
+cairn, cairn terrier
+Australian terrier
+Dandie Dinmont, Dandie Dinmont terrier
+Boston bull, Boston terrier
+miniature schnauzer
+giant schnauzer
+standard schnauzer
+Scotch terrier, Scottish terrier, Scottie
+Tibetan terrier, chrysanthemum dog
+silky terrier, Sydney silky
+soft-coated wheaten terrier
+West Highland white terrier
+Lhasa, Lhasa apso
+flat-coated retriever
+curly-coated retriever
+golden retriever
+Labrador retriever
+Chesapeake Bay retriever
+German short-haired pointer
+vizsla, Hungarian pointer
+English setter
+Irish setter, red setter
+Gordon setter
+Brittany spaniel
+clumber, clumber spaniel
+English springer, English springer spaniel
+Welsh springer spaniel
+cocker spaniel, English cocker spaniel, cocker
+Sussex spaniel
+Irish water spaniel
+kuvasz
+schipperke
+groenendael
+malinois
+briard
+kelpie
+komondor
+Old English sheepdog, bobtail
+Shetland sheepdog, Shetland sheep dog, Shetland
+collie
+Border collie
+Bouvier des Flandres, Bouviers des Flandres
+Rottweiler
+German shepherd, German shepherd dog, German police dog, alsatian
+Doberman, Doberman pinscher
+miniature pinscher
+Greater Swiss Mountain dog
+Bernese mountain dog
+Appenzeller
+EntleBucher
+boxer
+bull mastiff
+Tibetan mastiff
+French bulldog
+Great Dane
+Saint Bernard, St Bernard
+Eskimo dog, husky
+malamute, malemute, Alaskan malamute
+Siberian husky
+dalmatian, coach dog, carriage dog
+affenpinscher, monkey pinscher, monkey dog
+basenji
+pug, pug-dog
+Leonberg
+Newfoundland, Newfoundland dog
+Great Pyrenees
+Samoyed, Samoyede
+Pomeranian
+chow, chow chow
+keeshond
+Brabancon griffon
+Pembroke, Pembroke Welsh corgi
+Cardigan, Cardigan Welsh corgi
+toy poodle
+miniature poodle
+standard poodle
+Mexican hairless
+timber wolf, grey wolf, gray wolf, Canis lupus
+white wolf, Arctic wolf, Canis lupus tundrarum
+red wolf, maned wolf, Canis rufus, Canis niger
+coyote, prairie wolf, brush wolf, Canis latrans
+dingo, warrigal, warragal, Canis dingo
+dhole, Cuon alpinus
+African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus
+hyena, hyaena
+red fox, Vulpes vulpes
+kit fox, Vulpes macrotis
+Arctic fox, white fox, Alopex lagopus
+grey fox, gray fox, Urocyon cinereoargenteus
+tabby, tabby cat
+tiger cat
+Persian cat
+Siamese cat, Siamese
+Egyptian cat
+cougar, puma, catamount, mountain lion, painter, panther, Felis concolor
+lynx, catamount
+leopard, Panthera pardus
+snow leopard, ounce, Panthera uncia
+jaguar, panther, Panthera onca, Felis onca
+lion, king of beasts, Panthera leo
+tiger, Panthera tigris
+cheetah, chetah, Acinonyx jubatus
+brown bear, bruin, Ursus arctos
+American black bear, black bear, Ursus americanus, Euarctos americanus
+ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus
+sloth bear, Melursus ursinus, Ursus ursinus
+mongoose
+meerkat, mierkat
+tiger beetle
+ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle
+ground beetle, carabid beetle
+long-horned beetle, longicorn, longicorn beetle
+leaf beetle, chrysomelid
+dung beetle
+rhinoceros beetle
+weevil
+fly
+bee
+ant, emmet, pismire
+grasshopper, hopper
+cricket
+walking stick, walkingstick, stick insect
+cockroach, roach
+mantis, mantid
+cicada, cicala
+leafhopper
+lacewing, lacewing fly
+dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk
+damselfly
+admiral
+ringlet, ringlet butterfly
+monarch, monarch butterfly, milkweed butterfly, Danaus plexippus
+cabbage butterfly
+sulphur butterfly, sulfur butterfly
+lycaenid, lycaenid butterfly
+starfish, sea star
+sea urchin
+sea cucumber, holothurian
+wood rabbit, cottontail, cottontail rabbit
+hare
+Angora, Angora rabbit
+hamster
+porcupine, hedgehog
+fox squirrel, eastern fox squirrel, Sciurus niger
+marmot
+beaver
+guinea pig, Cavia cobaya
+sorrel
+zebra
+hog, pig, grunter, squealer, Sus scrofa
+wild boar, boar, Sus scrofa
+warthog
+hippopotamus, hippo, river horse, Hippopotamus amphibius
+ox
+water buffalo, water ox, Asiatic buffalo, Bubalus bubalis
+bison
+ram, tup
+bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis
+ibex, Capra ibex
+hartebeest
+impala, Aepyceros melampus
+gazelle
+Arabian camel, dromedary, Camelus dromedarius
+llama
+weasel
+mink
+polecat, fitch, foulmart, foumart, Mustela putorius
+black-footed ferret, ferret, Mustela nigripes
+otter
+skunk, polecat, wood pussy
+badger
+armadillo
+three-toed sloth, ai, Bradypus tridactylus
+orangutan, orang, orangutang, Pongo pygmaeus
+gorilla, Gorilla gorilla
+chimpanzee, chimp, Pan troglodytes
+gibbon, Hylobates lar
+siamang, Hylobates syndactylus, Symphalangus syndactylus
+guenon, guenon monkey
+patas, hussar monkey, Erythrocebus patas
+baboon
+macaque
+langur
+colobus, colobus monkey
+proboscis monkey, Nasalis larvatus
+marmoset
+capuchin, ringtail, Cebus capucinus
+howler monkey, howler
+titi, titi monkey
+spider monkey, Ateles geoffroyi
+squirrel monkey, Saimiri sciureus
+Madagascar cat, ring-tailed lemur, Lemur catta
+indri, indris, Indri indri, Indri brevicaudatus
+Indian elephant, Elephas maximus
+African elephant, Loxodonta africana
+lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens
+giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca
+barracouta, snoek
+eel
+coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch
+rock beauty, Holocanthus tricolor
+anemone fish
+sturgeon
+gar, garfish, garpike, billfish, Lepisosteus osseus
+lionfish
+puffer, pufferfish, blowfish, globefish
+abacus
+abaya
+academic gown, academic robe, judge's robe
+accordion, piano accordion, squeeze box
+acoustic guitar
+aircraft carrier, carrier, flattop, attack aircraft carrier
+airliner
+airship, dirigible
+altar
+ambulance
+amphibian, amphibious vehicle
+analog clock
+apiary, bee house
+apron
+ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin
+assault rifle, assault gun
+backpack, back pack, knapsack, packsack, rucksack, haversack
+bakery, bakeshop, bakehouse
+balance beam, beam
+balloon
+ballpoint, ballpoint pen, ballpen, Biro
+Band Aid
+banjo
+bannister, banister, balustrade, balusters, handrail
+barbell
+barber chair
+barbershop
+barn
+barometer
+barrel, cask
+barrow, garden cart, lawn cart, wheelbarrow
+baseball
+basketball
+bassinet
+bassoon
+bathing cap, swimming cap
+bath towel
+bathtub, bathing tub, bath, tub
+beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon
+beacon, lighthouse, beacon light, pharos
+beaker
+bearskin, busby, shako
+beer bottle
+beer glass
+bell cote, bell cot
+bib
+bicycle-built-for-two, tandem bicycle, tandem
+bikini, two-piece
+binder, ring-binder
+binoculars, field glasses, opera glasses
+birdhouse
+boathouse
+bobsled, bobsleigh, bob
+bolo tie, bolo, bola tie, bola
+bonnet, poke bonnet
+bookcase
+bookshop, bookstore, bookstall
+bottlecap
+bow
+bow tie, bow-tie, bowtie
+brass, memorial tablet, plaque
+brassiere, bra, bandeau
+breakwater, groin, groyne, mole, bulwark, seawall, jetty
+breastplate, aegis, egis
+broom
+bucket, pail
+buckle
+bulletproof vest
+bullet train, bullet
+butcher shop, meat market
+cab, hack, taxi, taxicab
+caldron, cauldron
+candle, taper, wax light
+cannon
+canoe
+can opener, tin opener
+cardigan
+car mirror
+carousel, carrousel, merry-go-round, roundabout, whirligig
+carpenter's kit, tool kit
+carton
+car wheel
+cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM
+cassette
+cassette player
+castle
+catamaran
+CD player
+cello, violoncello
+cellular telephone, cellular phone, cellphone, cell, mobile phone
+chain
+chainlink fence
+chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour
+chain saw, chainsaw
+chest
+chiffonier, commode
+chime, bell, gong
+china cabinet, china closet
+Christmas stocking
+church, church building
+cinema, movie theater, movie theatre, movie house, picture palace
+cleaver, meat cleaver, chopper
+cliff dwelling
+cloak
+clog, geta, patten, sabot
+cocktail shaker
+coffee mug
+coffeepot
+coil, spiral, volute, whorl, helix
+combination lock
+computer keyboard, keypad
+confectionery, confectionary, candy store
+container ship, containership, container vessel
+convertible
+corkscrew, bottle screw
+cornet, horn, trumpet, trump
+cowboy boot
+cowboy hat, ten-gallon hat
+cradle
+crane
+crash helmet
+crate
+crib, cot
+Crock Pot
+croquet ball
+crutch
+cuirass
+dam, dike, dyke
+desk
+desktop computer
+dial telephone, dial phone
+diaper, nappy, napkin
+digital clock
+digital watch
+dining table, board
+dishrag, dishcloth
+dishwasher, dish washer, dishwashing machine
+disk brake, disc brake
+dock, dockage, docking facility
+dogsled, dog sled, dog sleigh
+dome
+doormat, welcome mat
+drilling platform, offshore rig
+drum, membranophone, tympan
+drumstick
+dumbbell
+Dutch oven
+electric fan, blower
+electric guitar
+electric locomotive
+entertainment center
+envelope
+espresso maker
+face powder
+feather boa, boa
+file, file cabinet, filing cabinet
+fireboat
+fire engine, fire truck
+fire screen, fireguard
+flagpole, flagstaff
+flute, transverse flute
+folding chair
+football helmet
+forklift
+fountain
+fountain pen
+four-poster
+freight car
+French horn, horn
+frying pan, frypan, skillet
+fur coat
+garbage truck, dustcart
+gasmask, respirator, gas helmet
+gas pump, gasoline pump, petrol pump, island dispenser
+goblet
+go-kart
+golf ball
+golfcart, golf cart
+gondola
+gong, tam-tam
+gown
+grand piano, grand
+greenhouse, nursery, glasshouse
+grille, radiator grille
+grocery store, grocery, food market, market
+guillotine
+hair slide
+hair spray
+half track
+hammer
+hamper
+hand blower, blow dryer, blow drier, hair dryer, hair drier
+hand-held computer, hand-held microcomputer
+handkerchief, hankie, hanky, hankey
+hard disc, hard disk, fixed disk
+harmonica, mouth organ, harp, mouth harp
+harp
+harvester, reaper
+hatchet
+holster
+home theater, home theatre
+honeycomb
+hook, claw
+hoopskirt, crinoline
+horizontal bar, high bar
+horse cart, horse-cart
+hourglass
+iPod
+iron, smoothing iron
+jack-o'-lantern
+jean, blue jean, denim
+jeep, landrover
+jersey, T-shirt, tee shirt
+jigsaw puzzle
+jinrikisha, ricksha, rickshaw
+joystick
+kimono
+knee pad
+knot
+lab coat, laboratory coat
+ladle
+lampshade, lamp shade
+laptop, laptop computer
+lawn mower, mower
+lens cap, lens cover
+letter opener, paper knife, paperknife
+library
+lifeboat
+lighter, light, igniter, ignitor
+limousine, limo
+liner, ocean liner
+lipstick, lip rouge
+Loafer
+lotion
+loudspeaker, speaker, speaker unit, loudspeaker system, speaker system
+loupe, jeweler's loupe
+lumbermill, sawmill
+magnetic compass
+mailbag, postbag
+mailbox, letter box
+maillot
+maillot, tank suit
+manhole cover
+maraca
+marimba, xylophone
+mask
+matchstick
+maypole
+maze, labyrinth
+measuring cup
+medicine chest, medicine cabinet
+megalith, megalithic structure
+microphone, mike
+microwave, microwave oven
+military uniform
+milk can
+minibus
+miniskirt, mini
+minivan
+missile
+mitten
+mixing bowl
+mobile home, manufactured home
+Model T
+modem
+monastery
+monitor
+moped
+mortar
+mortarboard
+mosque
+mosquito net
+motor scooter, scooter
+mountain bike, all-terrain bike, off-roader
+mountain tent
+mouse, computer mouse
+mousetrap
+moving van
+muzzle
+nail
+neck brace
+necklace
+nipple
+notebook, notebook computer
+obelisk
+oboe, hautboy, hautbois
+ocarina, sweet potato
+odometer, hodometer, mileometer, milometer
+oil filter
+organ, pipe organ
+oscilloscope, scope, cathode-ray oscilloscope, CRO
+overskirt
+oxcart
+oxygen mask
+packet
+paddle, boat paddle
+paddlewheel, paddle wheel
+padlock
+paintbrush
+pajama, pyjama, pj's, jammies
+palace
+panpipe, pandean pipe, syrinx
+paper towel
+parachute, chute
+parallel bars, bars
+park bench
+parking meter
+passenger car, coach, carriage
+patio, terrace
+pay-phone, pay-station
+pedestal, plinth, footstall
+pencil box, pencil case
+pencil sharpener
+perfume, essence
+Petri dish
+photocopier
+pick, plectrum, plectron
+pickelhaube
+picket fence, paling
+pickup, pickup truck
+pier
+piggy bank, penny bank
+pill bottle
+pillow
+ping-pong ball
+pinwheel
+pirate, pirate ship
+pitcher, ewer
+plane, carpenter's plane, woodworking plane
+planetarium
+plastic bag
+plate rack
+plow, plough
+plunger, plumber's helper
+Polaroid camera, Polaroid Land camera
+pole
+police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria
+poncho
+pool table, billiard table, snooker table
+pop bottle, soda bottle
+pot, flowerpot
+potter's wheel
+power drill
+prayer rug, prayer mat
+printer
+prison, prison house
+projectile, missile
+projector
+puck, hockey puck
+punching bag, punch bag, punching ball, punchball
+purse
+quill, quill pen
+quilt, comforter, comfort, puff
+racer, race car, racing car
+racket, racquet
+radiator
+radio, wireless
+radio telescope, radio reflector
+rain barrel
+recreational vehicle, RV, R.V.
+reel
+reflex camera
+refrigerator, icebox
+remote control, remote
+restaurant, eating house, eating place, eatery
+revolver, six-gun, six-shooter
+rifle
+rocking chair, rocker
+rotisserie
+rubber eraser, rubber, pencil eraser
+rugby ball
+rule, ruler
+running shoe
+safe
+safety pin
+saltshaker, salt shaker
+sandal
+sarong
+sax, saxophone
+scabbard
+scale, weighing machine
+school bus
+schooner
+scoreboard
+screen, CRT screen
+screw
+screwdriver
+seat belt, seatbelt
+sewing machine
+shield, buckler
+shoe shop, shoe-shop, shoe store
+shoji
+shopping basket
+shopping cart
+shovel
+shower cap
+shower curtain
+ski
+ski mask
+sleeping bag
+slide rule, slipstick
+sliding door
+slot, one-armed bandit
+snorkel
+snowmobile
+snowplow, snowplough
+soap dispenser
+soccer ball
+sock
+solar dish, solar collector, solar furnace
+sombrero
+soup bowl
+space bar
+space heater
+space shuttle
+spatula
+speedboat
+spider web, spider's web
+spindle
+sports car, sport car
+spotlight, spot
+stage
+steam locomotive
+steel arch bridge
+steel drum
+stethoscope
+stole
+stone wall
+stopwatch, stop watch
+stove
+strainer
+streetcar, tram, tramcar, trolley, trolley car
+stretcher
+studio couch, day bed
+stupa, tope
+submarine, pigboat, sub, U-boat
+suit, suit of clothes
+sundial
+sunglass
+sunglasses, dark glasses, shades
+sunscreen, sunblock, sun blocker
+suspension bridge
+swab, swob, mop
+sweatshirt
+swimming trunks, bathing trunks
+swing
+switch, electric switch, electrical switch
+syringe
+table lamp
+tank, army tank, armored combat vehicle, armoured combat vehicle
+tape player
+teapot
+teddy, teddy bear
+television, television system
+tennis ball
+thatch, thatched roof
+theater curtain, theatre curtain
+thimble
+thresher, thrasher, threshing machine
+throne
+tile roof
+toaster
+tobacco shop, tobacconist shop, tobacconist
+toilet seat
+torch
+totem pole
+tow truck, tow car, wrecker
+toyshop
+tractor
+trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi
+tray
+trench coat
+tricycle, trike, velocipede
+trimaran
+tripod
+triumphal arch
+trolleybus, trolley coach, trackless trolley
+trombone
+tub, vat
+turnstile
+typewriter keyboard
+umbrella
+unicycle, monocycle
+upright, upright piano
+vacuum, vacuum cleaner
+vase
+vault
+velvet
+vending machine
+vestment
+viaduct
+violin, fiddle
+volleyball
+waffle iron
+wall clock
+wallet, billfold, notecase, pocketbook
+wardrobe, closet, press
+warplane, military plane
+washbasin, handbasin, washbowl, lavabo, wash-hand basin
+washer, automatic washer, washing machine
+water bottle
+water jug
+water tower
+whiskey jug
+whistle
+wig
+window screen
+window shade
+Windsor tie
+wine bottle
+wing
+wok
+wooden spoon
+wool, woolen, woollen
+worm fence, snake fence, snake-rail fence, Virginia fence
+wreck
+yawl
+yurt
+web site, website, internet site, site
+comic book
+crossword puzzle, crossword
+street sign
+traffic light, traffic signal, stoplight
+book jacket, dust cover, dust jacket, dust wrapper
+menu
+plate
+guacamole
+consomme
+hot pot, hotpot
+trifle
+ice cream, icecream
+ice lolly, lolly, lollipop, popsicle
+French loaf
+bagel, beigel
+pretzel
+cheeseburger
+hotdog, hot dog, red hot
+mashed potato
+head cabbage
+broccoli
+cauliflower
+zucchini, courgette
+spaghetti squash
+acorn squash
+butternut squash
+cucumber, cuke
+artichoke, globe artichoke
+bell pepper
+cardoon
+mushroom
+Granny Smith
+strawberry
+orange
+lemon
+fig
+pineapple, ananas
+banana
+jackfruit, jak, jack
+custard apple
+pomegranate
+hay
+carbonara
+chocolate sauce, chocolate syrup
+dough
+meat loaf, meatloaf
+pizza, pizza pie
+potpie
+burrito
+red wine
+espresso
+cup
+eggnog
+alp
+bubble
+cliff, drop, drop-off
+coral reef
+geyser
+lakeside, lakeshore
+promontory, headland, head, foreland
+sandbar, sand bar
+seashore, coast, seacoast, sea-coast
+valley, vale
+volcano
+ballplayer, baseball player
+groom, bridegroom
+scuba diver
+rapeseed
+daisy
+yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum
+corn
+acorn
+hip, rose hip, rosehip
+buckeye, horse chestnut, conker
+coral fungus
+agaric
+gyromitra
+stinkhorn, carrion fungus
+earthstar
+hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa
+bolete
+ear, spike, capitulum
+toilet tissue, toilet paper, bathroom tissue
diff --git a/modules/CoralVision/app/mnist_labels.txt b/modules/CoralVision/app/mnist_labels.txt
new file mode 100644
index 0000000000000000000000000000000000000000..aaf8637e1537ca397dd08f078cc62aa9ef1e07da
--- /dev/null
+++ b/modules/CoralVision/app/mnist_labels.txt
@@ -0,0 +1,10 @@
+eight
+five
+four
+nine
+one
+seven
+six
+three
+two
+zero
\ No newline at end of file
diff --git a/modules/CoralVision/app/mnist_model.tflite b/modules/CoralVision/app/mnist_model.tflite
new file mode 100644
index 0000000000000000000000000000000000000000..ac3a142c0cbd9e64c5b23a1d17eaf9bd79a98d6d
Binary files /dev/null and b/modules/CoralVision/app/mnist_model.tflite differ
diff --git a/modules/CoralVision/app/model.tflite b/modules/CoralVision/app/model.tflite
new file mode 100644
index 0000000000000000000000000000000000000000..9b78e04db00702d2995b65a05b872e981e0041a9
Binary files /dev/null and b/modules/CoralVision/app/model.tflite differ
diff --git a/modules/CoralVision/app/predict.py b/modules/CoralVision/app/predict.py
new file mode 100644
index 0000000000000000000000000000000000000000..d4947ea2bb0597961f8311bd56a60a43f23bd9db
--- /dev/null
+++ b/modules/CoralVision/app/predict.py
@@ -0,0 +1,195 @@
+
+# from edgetpu.classification.engine import ClassificationEngine
+#from edgetpu.utils import dataset_utils
+from pycoral.utils.dataset import read_label_file
+from PIL import Image
+import collections
+from collections import deque
+import common
+import io
+import numpy as np
+import operator
+import tflite_runtime.interpreter as tflite
+import time
+
+from urllib.request import urlopen
+from datetime import datetime
+import sys
+
+
+#global variable
+global labels
+global interpreter
+global new_model
+
+
+Category = collections.namedtuple('Category', ['id', 'score'])
+
+def input_tensor(interpreter):
+    """Returns input tensor view as numpy array of shape (height, width, 3)."""
+    tensor_index = interpreter.get_input_details()[0]['index']
+    return interpreter.tensor(tensor_index)()[0]
+
+def get_output(interpreter, top_k, score_threshold):
+    """Returns no more than top_k categories with score >= score_threshold."""
+    scores = common.output_tensor(interpreter, 0)
+    categories = [
+        Category(i, scores[i])
+        for i in np.argpartition(scores, -top_k)[-top_k:]
+        if scores[i] >= score_threshold
+    ]
+    return sorted(categories, key=operator.itemgetter(1), reverse=True)
+
+
+def initialize(mnist = True):
+    print('Loading model...')
+    global labels
+    #mnist = True
+    if mnist:
+        label_filename = 'mnist_labels.txt'
+        model_filename = 'mnist_model.tflite'
+    else:
+        label_filename = 'labels.txt'
+        model_filename = 'model.tflite'
+    labels = read_label_file(label_filename)
+    global interpreter
+    interpreter = common.make_interpreter(model_filename)
+    interpreter.allocate_tensors()
+
+
+def log_msg(msg):
+    print("{}: {}".format(datetime.now(),msg))
+
+
+def extract_and_resize_to_256_square(image):
+    h, w = image.shape[:2]
+    log_msg("crop_center: " + str(w) + "x" + str(h) +" and resize to " + str(256) + "x" + str(256))
+    if use_opencv:
+        return cv2.resize(image, (256, 256), interpolation = cv2.INTER_LINEAR)
+    else:
+        return extract_and_resize(image, (256, 256))
+
+
+def crop_center(img,cropx,cropy):
+    h, w = img.shape[:2]
+    startx = max(0, w//2-(cropx//2))
+    starty = max(0, h//2-(cropy//2))
+    log_msg("crop_center: " + str(w) + "x" + str(h) +" to " + str(cropx) + "x" + str(cropy))
+    return img[starty:starty+cropy, startx:startx+cropx]
+
+
+def resize_down_to_1600_max_dim(image):
+    w,h = image.size
+    if h < 1600 and w < 1600:
+        return image
+
+    new_size = (1600 * w // h, 1600) if (h > w) else (1600, 1600 * h // w)
+    log_msg("resize: " + str(w) + "x" + str(h) + " to " + str(new_size[0]) + "x" + str(new_size[1]))
+    
+    if use_opencv:
+        # Convert image to numpy array
+        image = convert_to_nparray(image)
+        return cv2.resize(image, new_size, interpolation = cv2.INTER_LINEAR)
+    else:
+        if max(new_size) / max(image.size) >= 0.5:
+            method = Image.BILINEAR
+        else:
+            method = Image.BICUBIC
+        image = image.resize(new_size, method)
+        return image
+
+
+def convert_to_nparray(image):
+    # RGB -> BGR
+    log_msg("Convert to numpy array")
+    image = np.array(image)
+    return image[:, :, (2,1,0)]
+
+
+def update_orientation(image):
+    exif_orientation_tag = 0x0112
+    if hasattr(image, '_getexif'):
+        exif = image._getexif()
+        if exif != None and exif_orientation_tag in exif:
+            orientation = exif.get(exif_orientation_tag, 1)
+            log_msg('Image has EXIF Orientation: ' + str(orientation))
+            # orientation is 1 based, shift to zero based and flip/transpose based on 0-based values
+            orientation -= 1
+            if orientation >= 4:
+                image = image.transpose(Image.TRANSPOSE)
+            if orientation == 2 or orientation == 3 or orientation == 6 or orientation == 7:
+                image = image.transpose(Image.FLIP_TOP_BOTTOM)
+            if orientation == 1 or orientation == 2 or orientation == 5 or orientation == 6:
+                image = image.transpose(Image.FLIP_LEFT_RIGHT)
+    return image
+
+
+def predict_url(imageUrl):
+    log_msg("Predicting from url: " +imageUrl)
+    with urlopen(imageUrl) as testImage:
+        image = Image.open(testImage)
+        return predict_image(image)
+
+
+def predict_image(image):
+    global interpreter
+    global labels
+
+    log_msg('Predicting image')
+    w,h = image.size
+    log_msg("Image size: " + str(w) + "x" + str(h))
+    width, height, channels = common.input_image_size(interpreter)
+    # print(width, height, channels)
+    # Update orientation based on EXIF tags
+    image = update_orientation(image)
+
+    # If the image has either w or h greater than 1600 we resize it down respecting
+    # aspect ratio such that the largest dimention is 1600
+    image = resize_down_to_1600_max_dim(image)
+
+    # Convert image to numpy array
+    image = convert_to_nparray(image)
+        
+    # Crop the center square and resize that square down to 256x256
+    resized_image = image # extract_and_resize_to_256_square(image)
+
+    # Crop the center for the specified network_input_Size
+    cropped_image = crop_center(resized_image, width, height)
+
+    common.input_tensor(interpreter)[:,:] = np.reshape(cropped_image, (common.input_image_size(interpreter)))
+    interpreter.invoke()
+    results = get_output(interpreter, top_k=3, score_threshold=0)
+    annotate_text = ''
+    result_rep = []
+    for result in results:
+        annotate_text += '\n{:.0f}% {}'.format(100*result[1], labels[result[0]])
+        result_rep.append({
+                        'tagName': "{}".format(labels[result[0]]),
+                        'probability': "{}".format(result[1]),
+                        'tagId': '',
+                        'boundingBox': None })
+    print(annotate_text)
+    
+    #result = []
+    #for p, label in zip(predictions, labels):
+    #    truncated_probablity = np.float64(round(p,8))
+    #    if truncated_probablity > 1e-8:
+    #        result.append({
+    #                    'tagName': label,
+    #                    'probability': truncated_probablity,
+    #                    'tagId': '',
+    #                    'boundingBox': None })
+
+    response = { 
+        'id': '',
+        'project': '',
+        'iteration': '',
+        'created': datetime.utcnow().isoformat(),
+        'predictions': result_rep
+        #'accuracy' : "{}".format(results[0][1])
+    }
+
+    log_msg("Results: " + str(response))
+    return response
+
+
diff --git a/modules/CoralVision/arm32v7.Dockerfile b/modules/CoralVision/arm32v7.Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..df0973d8ce8e40b83cdb1242f3961d77e2191810
--- /dev/null
+++ b/modules/CoralVision/arm32v7.Dockerfile
@@ -0,0 +1,35 @@
+#FROM balenalib/raspberrypi3-debian-python:3.7
+FROM balenalib/raspberrypi4-64-python:3.9
+
+# Enforces cross-compilation through Quemu
+RUN [ "cross-build-start" ]
+
+RUN echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list
+RUN curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
+RUN sudo apt-get update
+RUN sudo apt dist-upgrade 
+#RUN sudo apt-get install libedgetpu1-legacy-std python3-edgetpu
+#RUN sudo apt install python3-pycoral
+RUN install_packages python3-pycoral
+RUN install_packages libedgetpu1-std
+
+#RUN pip install https://dl.google.com/coral/python/tflite_runtime-2.1.0.post1-cp37-cp37m-linux_armv7l.whl
+RUN pip install numpy
+RUN pip install flask 
+#pillow --index-url 'https://www.piwheels.org/simple'
+RUN pip install --upgrade pip
+RUN pip install pillow
+
+
+COPY app /app
+
+# Expose the port
+EXPOSE 80
+
+# Set the working directory
+WORKDIR /app
+
+RUN [ "cross-build-end" ]
+
+# Run the flask server for the endpoints
+CMD python -u app.py
\ No newline at end of file
diff --git a/modules/CoralVision/cvexport.manifest b/modules/CoralVision/cvexport.manifest
new file mode 100644
index 0000000000000000000000000000000000000000..a6362d21c7aaeadc6e522f881fe82ca01fce31b3
--- /dev/null
+++ b/modules/CoralVision/cvexport.manifest
@@ -0,0 +1,12 @@
+{
+  "DomainType": "Classification",
+  "Platform": "TensorFlow",
+  "Flavor": "TensorFlowLite",
+  "ExporterVersion": "2.0",
+  "ExportedDate": "2020-03-31T16:18:34.3848291Z",
+  "IterationId": "6e942ec6-a67f-4e5d-a49c-41d79fb9e753",
+  "ModelFileName": "model.tflite",
+  "LabelFileName": "labels.txt",
+  "ModelFileSHA1": "5ee7dda434b9166bfbcda659830f324723750839",
+  "SchemaVersion": "1.0"
+}
\ No newline at end of file
diff --git a/modules/CoralVision/module.json b/modules/CoralVision/module.json
new file mode 100644
index 0000000000000000000000000000000000000000..c5f7cd3b17cbe9e2a6f55f3ee64c8a83428bdaae
--- /dev/null
+++ b/modules/CoralVision/module.json
@@ -0,0 +1,16 @@
+{
+    "$schema-version": "0.0.1",
+    "description": "",
+    "image": {
+        "repository": "$CONTAINER_REGISTRY_ADDRESS",
+        "tag": {
+            "version": "coralvision_2022.2.11",
+            "platforms": {
+                "arm32v7": "./arm32v7.Dockerfile"
+            }
+        },
+        "buildOptions": []
+    },
+    
+    "language": "python"
+}
\ No newline at end of file
diff --git a/modules/FileUpload/app/main.py b/modules/FileUpload/app/main.py
new file mode 100644
index 0000000000000000000000000000000000000000..4613494f2c01dba79e491b7b79d38d6c2f6ccb87
--- /dev/null
+++ b/modules/FileUpload/app/main.py
@@ -0,0 +1,252 @@
+import os
+import asyncio
+from azure.iot.device.aio import IoTHubDeviceClient
+from azure.core.exceptions import AzureError
+from azure.storage.blob import BlobClient
+
+# Imports for the REST API
+from flask import Flask, request, jsonify
+import json
+import io
+import datetime
+import yaml
+import tomli
+
+app = Flask(__name__)
+
+# 4MB Max image size limit
+app.config['MAX_CONTENT_LENGTH'] = 4 * 1024 * 1024 
+
+# Default route just shows simple text
+@app.route('/')
+def index():
+    return 'CustomVision.ai model host harness'
+
+# Like the CustomVision.ai Prediction service /image route handles either
+#     - octet-stream image file 
+#     - a multipart/form-data with files in the imageData parameter
+@app.route('/image', methods=['POST'])
+def upload_image_handler():
+    try:
+        imageData = None
+        if ('imageData' in request.files):
+            imageData = request.files['imageData']
+        elif ('imageData' in request.form):
+            imageData = request.form['imageData']
+        else:
+            imageData = io.BytesIO(request.get_data())
+
+        #img = Image.open(imageData)
+        #results = predict_image(img)
+        #return jsonify(results)
+        # Upload to blob
+        #file_name = "image.jpg"
+        file_name = "{}.jpg".format(datetime.datetime.now())
+        blob_name = os.path.basename(file_name)
+        storage_info = asyncio.run(device_client.get_storage_info_for_blob(blob_name))
+
+        success, result = asyncio.run(store_image(imageData, storage_info, file_name))
+        if success == True:
+            asyncio.run(device_client.notify_blob_upload_status(
+                storage_info["correlationId"], True, 200, "OK: {}".format(file_name)
+            ))
+            print(result)
+            #print(jsonify(result))
+            print(datetime.datetime.now())
+            #return jsonify(result)
+            return 'Upload Success', 200
+        else :
+            asyncio.run(devicejson_client.notify_blob_upload_status(
+                storage_info["correlationId"], False, result.status_code, str(result)
+            ))
+            return 'Error upload image', result
+
+    except Exception as e:
+        print('EXCEPTION:', str(e))
+        return 'Error upload image', 500
+
+
+@app.route('/data', methods=['POST'])
+def upload_data_handler():
+    try:
+        imageData = None
+        if ('imageData' in request.files):
+            imageData = request.files['imageData']
+        elif ('imageData' in request.form):
+            imageData = request.form['imageData']
+        else:
+            imageData = io.BytesIO(request.get_data())
+
+        ext = request.args['ext']
+        file_name = "{}.{}".format(datetime.datetime.now(), ext)
+        blob_name = os.path.basename(file_name)
+        #global device_client
+        storage_info = asyncio.run(device_client.get_storage_info_for_blob(blob_name))
+
+        success, result = asyncio.run(store_image(imageData, storage_info, file_name))
+        if success == True:
+            asyncio.run(device_client.notify_blob_upload_status(
+                storage_info["correlationId"], True, 200, "OK: {}".format(file_name)
+            ))
+            print(result)
+            #print(jsonify(result))
+            print(datetime.datetime.now())
+            #return jsonify(result)
+            return 'Upload Success', 200
+        else :
+            asyncio.run(device_client.notify_blob_upload_status(
+                storage_info["correlationId"], False, result.status_code, str(result)
+            ))
+            return 'Error upload image', result
+
+    except Exception as e:
+        print('EXCEPTION:', str(e))
+        return 'Error upload image', 500
+
+
+# CONNECTION_STRING = "HostName=Mon-hub-IoT.azure-devices.net;DeviceId=;SharedAccessKey="
+PATH_TO_FILE = r"./requirements.txt"
+global device_client
+#global storage_info
+
+async def store_blob(blob_info, file_name):
+    try:
+        sas_url = "https://{}/{}/{}{}".format(
+            blob_info["hostName"],
+            blob_info["containerName"],
+            blob_info["blobName"],
+            blob_info["sasToken"]
+        )
+
+        print("\nUploading file: {} to Azure Storage as blob: {} in container {}\n".format(file_name, blob_info["blobName"], blob_info["containerName"]))
+
+        # Upload the specified file
+        with BlobClient.from_blob_url(sas_url) as blob_client:
+            with open(file_name, "rb") as f:
+                result = blob_client.upload_blob(f, overwrite=True)
+                return (True, result)
+
+    except FileNotFoundError as ex:
+        # catch file not found and add an HTTP status code to return in notification to IoT Hub
+        ex.status_code = 404
+        return (False, ex)
+
+    except AzureError as ex:
+        # catch Azure errors that might result from the upload operation
+        return (False, ex)
+
+async def main():
+    try:
+        print ( "IoT Hub file upload sample, press Ctrl-C to exit" )
+
+        conn_str = CONNECTION_STRING
+        # conn_str = os.getenv("IOTHUB_DEVICE_CONNECTION_STRING")
+        file_name = PATH_TO_FILE
+        blob_name = os.path.basename(file_name)
+
+        device_client = IoTHubDeviceClient.create_from_connection_string(conn_str)
+
+        # Connect the client
+        await device_client.connect()
+
+        # Get the storage info for the blob
+        storage_info = await device_client.get_storage_info_for_blob(blob_name)
+
+        # Upload to blob
+        success, result = await store_blob(storage_info, file_name)
+
+        if success == True:
+            print("Upload succeeded. Result is: \n") 
+            print(result)
+            print()
+
+            await device_client.notify_blob_upload_status(
+                storage_info["correlationId"], True, 200, "OK: {}".format(file_name)
+            )
+
+        else :
+            # If the upload was not successful, the result is the exception object
+            print("Upload failed. Exception is: \n") 
+            print(result)
+            print()
+
+            await device_client.notify_blob_upload_status(
+                storage_info["correlationId"], False, result.status_code, str(result)
+            )
+
+    except Exception as ex:
+        print("\nException:")
+        print(ex)
+
+    except KeyboardInterrupt:
+        print ( "\nIoTHubDeviceClient sample stopped" )
+
+    finally:
+        # Finally, disconnect the client
+        await device_client.disconnect()
+
+
+async def initialize():
+    try:
+        print ( "IoT Hub file upload" )
+
+        with open(r'/etc/aziot/config.toml', mode='rb') as file:
+            #config = yaml.load(file, Loader=yaml.FullLoader)
+            config = tomli.load(file)
+
+        conn_str = config['provisioning']['connection_string']
+        # conn_str = os.getenv("IOTHUB_DEVICE_CONNECTION_STRING")
+        file_name = PATH_TO_FILE
+        blob_name = os.path.basename(file_name)
+
+        global device_client
+        device_client = IoTHubDeviceClient.create_from_connection_string(conn_str)
+
+        # Connect the client
+        await device_client.connect()
+
+        # Get the storage info for the blob
+        #global storage_info
+        #storage_info = await device_client.get_storage_info_for_blob(blob_name)
+
+    except Exception as ex:
+        print("\nException:")
+        print(ex)
+
+async def store_image(image, blob_info, file_name):
+    #global storage_info
+    #blob_info = await device_client.get_storage_info_for_blob(blob_name)
+    try:
+        sas_url = "https://{}/{}/{}{}".format(
+            blob_info["hostName"],
+            blob_info["containerName"],
+            blob_info["blobName"],
+            blob_info["sasToken"]
+        )
+
+        print("\nUploading file: {} to Azure Storage as blob: {} in container {}\n".format(file_name, blob_info["blobName"], blob_info["containerName"]))
+
+        # Upload the specified file
+        with BlobClient.from_blob_url(sas_url) as blob_client:
+            #with open(file_name, "rb") as f:
+            result = blob_client.upload_blob(image, overwrite=True)
+            return (True, result)
+
+    except FileNotFoundError as ex:
+        # catch file not found and add an HTTP status code to return in notification to IoT Hub
+        ex.status_code = 404
+        return (False, ex)
+
+    except AzureError as ex:
+        # catch Azure errors that might result from the upload operation
+        return (False, ex)
+
+if __name__ == "__main__":
+    asyncio.run(initialize())
+
+    # Run the server
+    app.run(host='0.0.0.0', port=80)
+
+    #loop = asyncio.get_event_loop()
+    #loop.run_until_complete(main())
+    #loop.close()
\ No newline at end of file
diff --git a/modules/FileUpload/arm32v7.Dockerfile b/modules/FileUpload/arm32v7.Dockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..f7970ea775ed486f206723f83e37216e7724a418
--- /dev/null
+++ b/modules/FileUpload/arm32v7.Dockerfile
@@ -0,0 +1,23 @@
+FROM balenalib/raspberrypi3-debian-python:3.7
+
+# Enforces cross-compilation through Quemu
+RUN [ "cross-build-start" ]
+
+RUN echo "BUILD MODULE: File Upload"
+
+# Install Python packages
+COPY /build/requirements.txt ./
+RUN pip3 install --upgrade pip
+RUN pip3 install --upgrade setuptools
+RUN pip3 install --index-url=https://www.piwheels.org/simple -r requirements.txt
+
+RUN install_packages libffi6
+
+RUN [ "cross-build-end" ]  
+
+ADD /app/ .
+
+# Expose the port
+# EXPOSE 5012
+
+ENTRYPOINT [ "python3", "-u", "./main.py" ]
diff --git a/modules/FileUpload/build/requirements.txt b/modules/FileUpload/build/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..e22c13f0e16fad6c697d7a961540cb2b0a6413ec
--- /dev/null
+++ b/modules/FileUpload/build/requirements.txt
@@ -0,0 +1,5 @@
+azure-iot-device
+azure.storage.blob
+Flask
+pyyaml
+tomli
\ No newline at end of file
diff --git a/modules/FileUpload/module.json b/modules/FileUpload/module.json
new file mode 100644
index 0000000000000000000000000000000000000000..1bac740db76385aa5f33335612bb4c35b987f24d
--- /dev/null
+++ b/modules/FileUpload/module.json
@@ -0,0 +1,16 @@
+{
+    "$schema-version": "0.0.1",
+    "description": "",
+    "image": {
+        "repository": "$CONTAINER_REGISTRY_ADDRESS",
+        "tag": {
+            "version": "fileupload_2022.2.10",
+            "platforms": {
+                "arm32v7": "./arm32v7.Dockerfile"
+            }
+        },
+        "buildOptions": []
+    },
+    
+    "language": "python"
+}
\ No newline at end of file