diff --git a/Color Detection and Segmentation/README.md b/Color Detection and Segmentation/README.md
new file mode 100644
index 00000000..be585ec0
--- /dev/null
+++ b/Color Detection and Segmentation/README.md
@@ -0,0 +1,40 @@
+# How does this algorithm work ?
+
+
+The algorithm is very similar in principle to green screening. But unlike green screening where we remove the background, in this application, we remove the foreground! As everyone is a fan of Harry Potter World and Hogwartz. So today, I am going to show you the magic of Invisibility Cloak of Harry with Computer Vision.
+
+
+
+
+
+The project comprises of 5 simple steps:
+
+
+ - Importing the required libraries.
+ - Capture and store the background frame [This will be done for some seconds]
+ - Detect the red colored cloth using color detection and segmentation algorithm.
+ - Segment out the red colored cloth by generating a mask. [used in code]
+ - Generate the final augmented output to create a magical effect. [output.avi]
+
+
+Step 1: Importing the required libraries.
+We have to import all the necessary libraries into the code. “cv2” is for OpenCV, “time” is for time-related operations, and “numpy” is for numerical purposes. We are writing the video in fourcc and saving the video output as output.avi.
+
+
+
+Step 2: Capture and store the background frame.
+In order to create an invisible effect, we have to remove the red-colored pixels from the background and store the background image of each frame. For capturing the frame we have used cap.read(). The variable ‘ret’ is used to return the boolean value.
+
+
+
+Step 3: Detect the red colored cloth using color detection and segmentation algorithm.
+Here we will convert the RGB (Red-Blue-Green) to HSV (Hue-Saturation-Value) because RGB value is too sensitive.
+
+
+
+Step 4: Segment out the red colored cloth by generating a mask.
+
+
+
+Step 5: Generate the final augmented output to create a magical effect.
+The final step, in which red-colored pixel is removed, and we obtain the desired output.
\ No newline at end of file
diff --git a/Color Detection and Segmentation/colorDetectionAndSegmentation.ipynb b/Color Detection and Segmentation/colorDetectionAndSegmentation.ipynb
new file mode 100644
index 00000000..7b83f994
--- /dev/null
+++ b/Color Detection and Segmentation/colorDetectionAndSegmentation.ipynb
@@ -0,0 +1,108 @@
+{
+ "metadata": {
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.8.5-final"
+ },
+ "orig_nbformat": 2,
+ "kernelspec": {
+ "name": "python3",
+ "display_name": "Python 3.8.5 32-bit",
+ "metadata": {
+ "interpreter": {
+ "hash": "7ee70af6370f0e7ac52364848d783fba7ea9f9b1187f891824a5c73a15f2031e"
+ }
+ }
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2,
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "##Import opencv(cv2), Numpy array(numpy) \r\n",
+ "import cv2\r\n",
+ "import time\r\n",
+ "import numpy as np\r\n",
+ "\r\n",
+ "## Preparation for writing the ouput video\r\n",
+ "fourcc = cv2.VideoWriter_fourcc(*'XVID')\r\n",
+ "out = cv2.VideoWriter('output.avi',fourcc,20.0, (640,480))\r\n",
+ "\r\n",
+ "##reading from the webcam \r\n",
+ "cap = cv2.VideoCapture(0)\r\n",
+ "\r\n",
+ "## Allow the system to sleep for 3 seconds before the webcam starts\r\n",
+ "time.sleep(3)\r\n",
+ "count = 0\r\n",
+ "background = 0\r\n",
+ "\r\n",
+ "## Capture the background in range of 60\r\n",
+ "for i in range(60):\r\n",
+ " ret,background = cap.read()\r\n",
+ "background = np.flip(background,axis=1)\r\n",
+ "\r\n",
+ "\r\n",
+ "## Read every frame from the webcam, until the camera is open\r\n",
+ "while(cap.isOpened()):\r\n",
+ " ret, img = cap.read()\r\n",
+ " if not ret:\r\n",
+ " break\r\n",
+ " count+=1\r\n",
+ " img = np.flip(img,axis=1)\r\n",
+ " \r\n",
+ " ## Convert the color space from BGR to HSV\r\n",
+ " hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\r\n",
+ "\r\n",
+ " ## Generat masks to detect red color\r\n",
+ " lower_red = np.array([0,120,50])\r\n",
+ " upper_red = np.array([10,255,255])\r\n",
+ " mask1 = cv2.inRange(hsv,lower_red,upper_red)\r\n",
+ "\r\n",
+ " lower_red = np.array([170,120,70])\r\n",
+ " upper_red = np.array([180,255,255])\r\n",
+ " mask2 = cv2.inRange(hsv,lower_red,upper_red)\r\n",
+ "\r\n",
+ " mask1 = mask1+mask2\r\n",
+ "\r\n",
+ " ## Open and Dilate the mask image\r\n",
+ " mask1 = cv2.morphologyEx(mask1, cv2.MORPH_OPEN, np.ones((3,3),np.uint8))\r\n",
+ " mask1 = cv2.morphologyEx(mask1, cv2.MORPH_DILATE, np.ones((3,3),np.uint8))\r\n",
+ " \r\n",
+ " \r\n",
+ " ## Create an inverted mask to segment out the red color from the frame\r\n",
+ " mask2 = cv2.bitwise_not(mask1)\r\n",
+ " \r\n",
+ " \r\n",
+ " ## Segment the red color part out of the frame using bitwise and with the inverted mask\r\n",
+ " res1 = cv2.bitwise_and(img,img,mask=mask2)\r\n",
+ "\r\n",
+ " ## Create image showing static background frame pixels only for the masked region\r\n",
+ " res2 = cv2.bitwise_and(background, background, mask = mask1)\r\n",
+ " \r\n",
+ " \r\n",
+ " ## Generating the final output and writing\r\n",
+ " finalOutput = cv2.addWeighted(res1,1,res2,1,0)\r\n",
+ " out.write(finalOutput)\r\n",
+ " cv2.imshow(\"magic\",finalOutput)\r\n",
+ " cv2.waitKey(1)\r\n",
+ "\r\n",
+ "cap.release()\r\n",
+ "out.release()\r\n",
+ "cv2.destroyAllWindows()"
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/Color Detection and Segmentation/invisible.gif b/Color Detection and Segmentation/invisible.gif
new file mode 100644
index 00000000..89a54353
Binary files /dev/null and b/Color Detection and Segmentation/invisible.gif differ