You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

332 lines
11 KiB

import warnings
6 months ago
import threading
import cv2
import mediapipe as mp
import numpy as np
from tensorflow.keras.models import load_model
6 months ago
from tkinter import Tk, Canvas, Button, Label, LEFT, RIGHT, NW
from PIL import Image, ImageTk
warnings.filterwarnings("ignore", category=UserWarning, message='SymbolDatabase.GetPrototype() is deprecated')
hands = None
mp_draw = mp.solutions.drawing_utils
cap = None
keep_running = False
6 months ago
paused = False
popup_open = False # 用于标记当前是否有弹窗打开
model_path = 'D:/hand/hand_gesture_model.h5'
model = load_model(model_path)
gesture_classes = ['00', '01', '02', '03', '04', '05', '06', '07', '08', '09']
6 months ago
def start_recognition(callback=None):
global keep_running, cap, hands
if cap is None or not cap.isOpened():
cap = cv2.VideoCapture(0)
if hands is None:
hands = mp.solutions.hands.Hands(static_image_mode=False, max_num_hands=2,
model_complexity=1, min_detection_confidence=0.5,
min_tracking_confidence=0.5)
keep_running = True
threading.Thread(target=run_recognition, args=(callback,)).start()
6 months ago
def run_recognition(callback=None):
6 months ago
global keep_running, paused
while keep_running and cap.isOpened():
ret, img = cap.read()
if not ret:
break
img = cv2.flip(img, 1)
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
6 months ago
if not paused:
results = hands.process(img_rgb)
total_raised_fingers = 0
if results.multi_hand_landmarks:
for handLms in results.multi_hand_landmarks:
mp_draw.draw_landmarks(img, handLms, mp.solutions.hands.HAND_CONNECTIONS)
gesture, raised_fingers = detect_gesture_and_fingers(handLms)
total_raised_fingers += raised_fingers
if gesture == "OK":
handle_ok_gesture()
6 months ago
if total_raised_fingers > 0:
handle_finger_detection(total_raised_fingers)
6 months ago
cv2.putText(img, f'Total Raised Fingers: {total_raised_fingers}', (10, 30),
cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 2, cv2.LINE_AA, )
if callback:
callback(img)
6 months ago
def stop_recognition():
global keep_running, cap
keep_running = False
if cap is not None and cap.isOpened():
cap.release()
cap = None
cv2.destroyAllWindows()
def release_camera():
global cap
if cap is not None and cap.isOpened():
cap.release()
cap = None
6 months ago
def detect_gesture_and_fingers(hand_landmarks):
gesture_image = get_hand_image(hand_landmarks)
gesture = predict_gesture(gesture_image)
raised_fingers = count_raised_fingers(hand_landmarks)
6 months ago
if is_ok_gesture(hand_landmarks):
gesture = "OK"
return gesture, raised_fingers
6 months ago
def get_hand_image(hand_landmarks):
6 months ago
img = np.zeros((150, 150, 3), dtype=np.uint8)
return img
6 months ago
def predict_gesture(img):
img = cv2.resize(img, (150, 150))
img_array = np.expand_dims(img, axis=0) / 255.0
predictions = model.predict(img_array)
predicted_class = gesture_classes[np.argmax(predictions)]
return predicted_class
6 months ago
def count_raised_fingers(hand_landmarks):
fingers_status = [0, 0, 0, 0, 0]
thumb_tip = hand_landmarks.landmark[mp.solutions.hands.HandLandmark.THUMB_TIP]
thumb_ip = hand_landmarks.landmark[mp.solutions.hands.HandLandmark.THUMB_IP]
thumb_mcp = hand_landmarks.landmark[mp.solutions.hands.HandLandmark.THUMB_MCP]
thumb_cmc = hand_landmarks.landmark[mp.solutions.hands.HandLandmark.THUMB_CMC]
angle_thumb = calculate_angle(thumb_cmc, thumb_mcp, thumb_tip)
6 months ago
if angle_thumb > 160:
fingers_status[0] = 1
for i, finger_tip_id in enumerate([mp.solutions.hands.HandLandmark.INDEX_FINGER_TIP,
mp.solutions.hands.HandLandmark.MIDDLE_FINGER_TIP,
mp.solutions.hands.HandLandmark.RING_FINGER_TIP,
mp.solutions.hands.HandLandmark.PINKY_TIP]):
finger_tip = hand_landmarks.landmark[finger_tip_id]
finger_pip = hand_landmarks.landmark[finger_tip_id - 2]
finger_mcp = hand_landmarks.landmark[finger_tip_id - 3]
angle_finger = calculate_angle(finger_mcp, finger_pip, finger_tip)
6 months ago
if angle_finger > 160:
fingers_status[i + 1] = 1
return sum(fingers_status)
6 months ago
def calculate_angle(point1, point2, point3):
angle = np.arctan2(point3.y - point2.y, point3.x - point2.x) - np.arctan2(point1.y - point2.y, point1.x - point2.x)
angle = np.abs(angle)
if angle > np.pi:
angle = 2 * np.pi - angle
return angle * 180 / np.pi
6 months ago
def is_ok_gesture(hand_landmarks):
thumb_tip = hand_landmarks.landmark[mp.solutions.hands.HandLandmark.THUMB_TIP]
index_tip = hand_landmarks.landmark[mp.solutions.hands.HandLandmark.INDEX_FINGER_TIP]
distance = np.linalg.norm(np.array([thumb_tip.x, thumb_tip.y]) - np.array([index_tip.x, index_tip.y]))
# 检查其他手指是否弯曲
middle_tip = hand_landmarks.landmark[mp.solutions.hands.HandLandmark.MIDDLE_FINGER_TIP]
ring_tip = hand_landmarks.landmark[mp.solutions.hands.HandLandmark.RING_FINGER_TIP]
pinky_tip = hand_landmarks.landmark[mp.solutions.hands.HandLandmark.PINKY_TIP]
middle_pip = hand_landmarks.landmark[mp.solutions.hands.HandLandmark.MIDDLE_FINGER_PIP]
ring_pip = hand_landmarks.landmark[mp.solutions.hands.HandLandmark.RING_FINGER_PIP]
pinky_pip = hand_landmarks.landmark[mp.solutions.hands.HandLandmark.PINKY_FINGER_PIP]
middle_finger_bent = middle_tip.y > middle_pip.y
ring_finger_bent = ring_tip.y > ring_pip.y
pinky_finger_bent = pinky_tip.y > pinky_pip.y
return distance < 0.05 and middle_finger_bent and ring_finger_bent and pinky_finger_bent # 根据实际情况调整这个阈值
def handle_ok_gesture():
global paused, popup_open
if not popup_open:
paused = True
popup_open = True
show_ok_window()
def show_ok_window():
def on_continue():
global paused, popup_open
paused = False
popup_open = False # 关闭弹窗后将标志设置为False
ok_window.destroy()
start_recognition(show_frame)
ok_window = Tk()
ok_window.title("手势检测")
label = Label(ok_window, text="检测到OK手势", font=('Helvetica', 24, 'bold'))
label.pack(pady=20)
continue_button = Button(ok_window, text="继续识别", command=on_continue)
continue_button.pack(pady=10)
ok_window.protocol("WM_DELETE_WINDOW", on_continue)
ok_window.mainloop()
def handle_finger_detection(finger_count):
global paused, popup_open
if not popup_open: # 只有在没有弹窗打开的情况下才处理手指检测并显示弹窗
if finger_count == 1:
paused = True
popup_open = True
show_finger_window("您竖起了一根手指")
elif finger_count == 2:
paused = True
popup_open = True
show_finger_window("您竖起了两根手指")
elif finger_count == 3:
paused = True
popup_open = True
show_finger_window("您竖起了三根手指")
elif finger_count == 4:
paused = True
popup_open = True
show_finger_window("您竖起了四根手指")
elif finger_count == 5:
paused = True
popup_open = True
show_stop_recognition_window()
elif finger_count == 6:
paused = True
popup_open = True
show_finger_window("您竖起了六根手指")
elif finger_count == 7:
paused = True
popup_open = True
show_finger_window("您竖起了七根手指")
elif finger_count == 8:
paused = True
popup_open = True
show_finger_window("您竖起了八根手指")
elif finger_count == 9:
paused = True
popup_open = True
show_finger_window("您竖起了九根手指")
elif finger_count == 10:
paused = True
popup_open = True
show_finger_window("您竖起了十根手指")
def show_finger_window(message):
def on_continue():
global paused, popup_open
paused = False
popup_open = False # 关闭弹窗后将标志设置为False
finger_window.destroy()
start_recognition(show_frame)
finger_window = Tk()
finger_window.title("手指检测")
label = Label(finger_window, text=message, font=('Helvetica', 24, 'bold'))
label.pack(pady=20)
continue_button = Button(finger_window, text="继续识别", command=on_continue)
continue_button.pack(pady=10)
finger_window.protocol("WM_DELETE_WINDOW", on_continue)
finger_window.mainloop()
def show_stop_recognition_window():
def on_continue():
global paused, popup_open
paused = False
popup_open = False # 关闭弹窗后将标志设置为False
stop_window.destroy()
start_recognition(show_frame)
def on_stop():
global popup_open
stop_recognition()
popup_open = False # 关闭弹窗后将标志设置为False
stop_window.destroy()
stop_window = Tk()
stop_window.title("停止识别")
label = Label(stop_window, text="您竖起了五根手指,是否停止识别?", font=('Helvetica', 24, 'bold'))
label.pack(pady=20)
continue_button = Button(stop_window, text="继续识别", command=on_continue)
continue_button.pack(side=LEFT, padx=10, pady=10)
stop_button = Button(stop_window, text="停止识别", command=on_stop)
stop_button.pack(side=RIGHT, padx=10, pady=10)
stop_window.protocol("WM_DELETE_WINDOW", on_continue)
stop_window.mainloop()
def show_frame(img=None):
global paused
if keep_running and cap.isOpened():
if img is not None:
frame = img
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
else:
ret, frame = cap.read()
if not ret:
return
frame = cv2.flip(frame, 1)
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
img = Image.fromarray(frame_rgb)
imgtk = ImageTk.PhotoImage(image=img)
canvas.create_image(0, 0, anchor=NW, image=imgtk)
canvas.image = imgtk
if not paused:
root.after(10, show_frame)
else:
root.update_idletasks()
root.update()
if __name__ == "__main__":
root = Tk()
root.title("手势识别")
canvas = Canvas(root, width=640, height=480)
canvas.pack()
start_button = Button(root, text="开始识别", command=lambda: start_recognition(show_frame))
start_button.pack(side=LEFT, padx=10, pady=10)
stop_button = Button(root, text="停止识别", command=stop_recognition)
stop_button.pack(side=RIGHT, padx=10, pady=10)
root.mainloop()