diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..c531222 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ + +\.vscode/ diff --git a/biden.jpg b/biden.jpg new file mode 100644 index 0000000..3a0bdc9 Binary files /dev/null and b/biden.jpg differ diff --git a/face_detectio.py b/face_detectio.py new file mode 100644 index 0000000..8a0b3a8 --- /dev/null +++ b/face_detectio.py @@ -0,0 +1,52 @@ +import face_recognition +import cv2 + +# This is a demo of blurring faces in video. + +# PLEASE NOTE: This example requires OpenCV (the `cv2` library) to be installed only to read from your webcam. +# OpenCV is *not* required to use the face_recognition library. It's only required if you want to run this +# specific demo. If you have trouble installing it, try any of the other demos that don't require it instead. + +# Get a reference to webcam #0 (the default one) +video_capture = cv2.VideoCapture("http://192.168.178.53:8000/stream.mjpg") + +# Initialize some variables +face_locations = [] + +while True: + # Grab a single frame of video + ret, frame = video_capture.read() + + # Resize frame of video to 1/4 size for faster face detection processing + small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25) + + # Find all the faces and face encodings in the current frame of video + face_locations = face_recognition.face_locations(small_frame, model="cnn") + + # Display the results + for top, right, bottom, left in face_locations: + # Scale back up face locations since the frame we detected in was scaled to 1/4 size + top *= 4 + right *= 4 + bottom *= 4 + left *= 4 + + # Extract the region of the image that contains the face + face_image = frame[top:bottom, left:right] + + # Blur the face image + face_image = cv2.GaussianBlur(face_image, (99, 99), 30) + + # Put the blurred face region back into the frame image + frame[top:bottom, left:right] = face_image + + # Display the resulting image + cv2.imshow('Video', frame) + + # Hit 'q' on the keyboard to quit! + if cv2.waitKey(1) & 0xFF == ord('q'): + break + +# Release handle to the webcam +video_capture.release() +cv2.destroyAllWindows() \ No newline at end of file diff --git a/motion_detector.py b/motion_detector.py new file mode 100644 index 0000000..a20cc76 --- /dev/null +++ b/motion_detector.py @@ -0,0 +1,132 @@ +# USAGE +# python motion_detector.py +# python motion_detector.py --video videos/example_01.mp4 + +# import the necessary packages +from imutils.video import VideoStream +import argparse +import datetime +import imutils +import time +import cv2 + + + + +def increase_brightness(img, value=30): + hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) + h, s, v = cv2.split(hsv) + + lim = 255 - value + v[v > lim] = 255 + v[v <= lim] += value + + final_hsv = cv2.merge((h, s, v)) + img = cv2.cvtColor(final_hsv, cv2.COLOR_HSV2BGR) + + return img + +# construct the argument parser and parse the arguments +ap = argparse.ArgumentParser() +ap.add_argument("-v", "--video", help="path to the video file") +ap.add_argument("-amin", "--min-area", type=int, default=3000, help="minimum area size") +ap.add_argument("-amax", "--max-area", type=int, default=10000, help="minimum area size") +args = vars(ap.parse_args()) + +time.sleep(5) + +# if the video argument is None, then we are reading from webcam +args["video"] = "http://192.168.178.53:8000/stream.mjpg" +#args["video"] = "./videos/example_02.mp4" +vs = cv2.VideoCapture(args["video"]) +counter = 0 +threashold = 50 +delay = 2 +framerate = 30 + +# initialize the first frame in the video stream +firstFrame = None + +# loop over the frames of the video +while True: + # grab the current frame and initialize the occupied/unoccupied + # text + frame = vs.read() + frame = frame if args.get("video", None) is None else frame[1] + text = "Unoccupied" + + # if the frame could not be grabbed, then we have reached the end + # of the video + if frame is None: + break + + # resize the frame, convert it to grayscale, and blur it + frame = imutils.resize(frame, width=500) + #frame = increase_brightness(frame, value=50) + gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) + gray = cv2.GaussianBlur(gray, (31, 31), 0) + + # if the first frame is None, initialize it + if firstFrame is None: + firstFrame = gray + continue + + # compute the absolute difference between the current frame and + # first frame + frameDelta = cv2.absdiff(gray, firstFrame) + + thresh = cv2.threshold(frameDelta, threashold, 255, cv2.THRESH_BINARY)[1] + + # dilate the thresholded image to fill in holes, then find contours + # on thresholded image + thresh = cv2.dilate(thresh, None, iterations=2) + cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) + cnts = imutils.grab_contours(cnts) + + # loop over the contours + for c in cnts: + # if the contour is too small, ignore it + if cv2.contourArea(c) < args["min_area"]: + continue + if cv2.contourArea(c) > args["max_area"]: + continue + # compute the bounding box for the contour, draw it on the frame, + # and update the text + + print(cv2.contourArea(c)) + + (x, y, w, h) = cv2.boundingRect(c) + cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) + text = "Occupied" + + # draw the text and timestamp on the frame + cv2.putText(frame, "Room Status: {}".format(text), (10, 20), + cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) + cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"), + (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1) + + # show the frame and record if the user presses a key + cv2.imshow("Security Feed", frame) + cv2.imshow("Thresh", thresh) + cv2.imshow("Frame Delta", frameDelta) + + img_yuv = cv2.cvtColor(frame, cv2.COLOR_BGR2YUV) + # equalize the histogram of the Y channel + img_yuv[:,:,0] = cv2.equalizeHist(img_yuv[:,:,0]) + # convert the YUV image back to RGB format + img_output = cv2.cvtColor(img_yuv, cv2.COLOR_YUV2BGR) + + cv2.imshow("equalized", img_output) + + key = cv2.waitKey(1) & 0xFF + + # if the `q` key is pressed, break from the lop + if key == ord("q"): + break + counter+=1 + #if counter % (framerate * delay) == 0: + # firstFrame = gray + +# cleanup the camera and close any open windows +vs.stop() if args.get("video", None) is None else vs.release() +cv2.destroyAllWindows() \ No newline at end of file diff --git a/obama.jpg b/obama.jpg new file mode 100644 index 0000000..1ce4b84 Binary files /dev/null and b/obama.jpg differ diff --git a/videos/example_01.mp4 b/videos/example_01.mp4 new file mode 100644 index 0000000..486f294 Binary files /dev/null and b/videos/example_01.mp4 differ diff --git a/videos/example_02.mp4 b/videos/example_02.mp4 new file mode 100644 index 0000000..a4cb161 Binary files /dev/null and b/videos/example_02.mp4 differ