Week #22 6/25 - 7/2
- rms126
- Jun 27, 2024
- 3 min read
Updated: Jul 20, 2024
Nick:
I spent the week working on the project report:
imported chapters from proposal report
updated tables and diagrams with the latest from the website
created the structure of the report following the rubric
Ryan:
Continued troubleshooting code provided by Nick. Revised the code and added functionality for the COCO library to be used as the means of opening the door rather than the haar cascade image classifier.
Code:
import cv2
from gpiozero import Motor
from time import sleep
from tkinter import *
from tkinter import Tk, font
import datetime
import RPi.GPIO as GPIO
# GPIO setup
GPIO.setmode(GPIO.BCM)
GPIO.setup(17, GPIO.IN)
doormotor = Motor(24, 23)
#door roll up function
def rollup():
doormotor.forward()
sleep(7)
doormotor.stop()
#door roll down function with prox sensor added
def rolldown():
i = 0
while i < 7:
i = i+1
doormotor.backward()
sleep(1)
if not GPIO.input(17):
doormotor.forward()
sleep(2)
i = i-2
doormotor.stop()
#Full opening sequence function
def opensequence():
log_list.insert(END, datetime.datetime.now().strftime("%D - %H:%M:%S"))
rollup()
sleep(2)
rolldown()
#COCO object recognition code
classNames = []
classFile = "coco.names"
with open(classFile,"rt") as f:
classNames = f.read().rstrip("\n").split("\n")
configPath = "ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt"
weightsPath = "frozen_inference_graph.pb"
net = cv2.dnn_DetectionModel(weightsPath,configPath)
net.setInputSize(320,320)
net.setInputScale(1.0/ 127.5)
net.setInputMean((127.5, 127.5, 127.5))
net.setInputSwapRB(True)
def getObjects(img, thres, nms, draw=True, objects=["dog"]):
classIds, confs, bbox = net.detect(img,confThreshold=thres,nmsThreshold=nms)
if len(objects) == 0: objects = classNames
objectInfo =[]
if len(classIds) != 0:
for classId, confidence,box in zip(classIds.flatten(),confs.flatten(),bbox):
className = classNames[classId - 1]
if className in objects:
objectInfo.append([box,className])
if draw:
cv2.rectangle(img,box,color=(0,255,0),thickness=2)
cv2.putText(img,classNames[classId-1].upper(),(box[0]+10,box[1]+30),
cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)
cv2.putText(img,str(round(confidence*100,2)),(box[0]+200,box[1]+30),
cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)
return img, objectInfo
# Camera setup
cam1 = cv2.VideoCapture(0)
cam1.set(cv2.CAP_PROP_FRAME_WIDTH, 800)
cam1.set(cv2.CAP_PROP_FRAME_HEIGHT, 600)
cam2 = cv2.VideoCapture(2)
cam2.set(cv2.CAP_PROP_FRAME_WIDTH, 800)
cam2.set(cv2.CAP_PROP_FRAME_HEIGHT, 600)
#function to determine if the current time is within the time window set by the sliders
def is_within_time_window(current_time):
start_time = datetime.time(tw_start_hour_slider.get(), tw_start_minute_slider.get())
end_time = datetime.time(tw_end_hour_slider.get(), tw_end_minute_slider.get())
return start_time <= current_time <= end_time
#scan function to determine if a dog is in the frame
def scan():
ret1, frame1 = cam1.read()
ret2, frame2 = cam2.read()
frame1_with_dogs, dogs_info1 = getObjects(frame1, 0.55, 0.2, objects=["dog"])
frame2_with_dogs, dogs_info2 = getObjects(frame2, 0.55, 0.2, objects=["dog"])
if dogs_info1 and is_within_time_window(datetime.datetime.now().time()):
opensequence()
if dogs_info2:
opensequence()
# GUI setup
main_window = Tk()
main_window.configure(background="SkyBlue4")
main_window.geometry("800x480")
main_window.attributes("-fullscreen", True)
main_window.title("Smart Doggy Door")
main_window.option_add("*font", "SegoeUI 24")
status = str("Locked")
tw_start = Label(main_window, bg='yellow', text='Time Window Start: ')
tw_start.grid(row=2, column=0)
tw_end = Label(main_window, bg='yellow', text='Time Window End: ')
tw_end.grid(row=3, column=0)
tw_start_hour_slider = Scale(main_window, label="HH", orient=HORIZONTAL, from_=0, to=23, background='SteelBlue3', font=('SegoeUI', 20))
tw_start_hour_slider.grid(row=2, column=1, sticky=W, padx=10, pady=10)
tw_start_minute_slider = Scale(main_window, label="MM", orient=HORIZONTAL, from_=0, to=59, background='SteelBlue3', font=('SegoeUI', 20))
tw_start_minute_slider.grid(row=2, column=2, sticky=W, padx=10, pady=10)
tw_end_hour_slider = Scale(main_window, label="HH", orient=HORIZONTAL, from_=0, to=23, background='SteelBlue3', font=('SegoeUI', 20))
tw_end_hour_slider.grid(row=3, column=1, sticky=W, padx=10, pady=10)
tw_end_minute_slider = Scale(main_window, label="MM", orient=HORIZONTAL, from_=0, to=59, background='SteelBlue3', font=('SegoeUI', 20))
tw_end_minute_slider.grid(row=3, column=2, sticky=W, padx=10, pady=10)
manual_button = Button(main_window, text="Manual Open", bg="Navy", fg="White", command=opensequence, font=('SegoeUI', 26))
manual_button.grid(row=4, column=0, padx=10, pady=10)
log_list = Listbox(main_window, fg='blue', bg='SeaShell3', width=25, height=6, borderwidth=3, font=('SegoeUI', 13))
log_list.grid(row=6, column=0, padx=10, pady=10)
history_label = Label(main_window, bg='LightCyan3', text='Access History', font=('SegoeUI', 15))
history_label.grid(row=5, column=0, padx=10, pady=10, sticky=W)
status_label = Label(main_window, bg='DodgerBlue', text='Current Status: ', font=('SegoeUI', 18))
status_label.grid(row=0, column=2, padx=10, pady=10, sticky=E)
current_status_label = Label(main_window, bg='Salmon', text=status, font=('SegoeUI', 18))
current_status_label.grid(row=0, column=3, padx=10, pady=10, sticky=W)
current_time = Label(main_window, font=('SegoeUI', 15))
current_time.grid(row=0, column=0, columnspan=2, padx=10, pady=10)
def clock():
time_str = datetime.datetime.now().strftime("Current Time: %H:%M:%S")
current_time.config(text=time_str)
if is_within_time_window(datetime.datetime.now().time()):
current_status_label.config(text="Unlocked")
else:
current_status_label.config(text="Locked")
scan()
main_window.after(200, clock)
clock()
mainloop()
Once functioning, the code was tested using the method previously by showing the camera various images of dogs. After it was determined that the code was working as per the engineering requirements, testing was conducted using a new dog, Butch. Video of the results is shown below: