import pyautogui import time from tkinter import * import cv2 import sendinput from calculate_model import Capture_Point from region_detection import Region_Detector def show_capture(win_name,roi_image): cv2.namedWindow(win_name, cv2.WINDOW_NORMAL) cv2.resizeWindow(win_name, 640, 360) cv2.imshow(win_name, roi_image) cv2.moveWindow(win_name, 0,0) cv2.getWindowImageRect(win_name) def main(): user_interface = Tk() # 界面开头 user_interface.title("辅助瞄准") user_interface.geometry("480x640") user_interface.resizable(0, 0) Label(user_interface, text='以下是你打开的窗口,请选择要使用的窗口', font=(None, 15), fg='#66CCFF', bg="black").pack() ht=Region_Detector.getButtonInfo() for wnd in ht.items(): if wnd[1]!= "": Button(user_interface, text=wnd[1], width=50, height=1, command=lambda wnd=wnd:runhelper(wnd[1]), activeforeground="red", activebackground="black").pack() mainloop() # 界面结尾 def runhelper(wn): win_name = wn print("现在捕获的窗口是:" ,end="") print(win_name) win_size = [1920, 1080] #窗口大小 win_index = [0,0] #窗口放置位置 win_more = 0 #窗口多余部分,即窗口标题 RD = Region_Detector(win_name, win_more) #创建窗口处理对象 hwdn = RD.getWindowHandle() #获取窗口句柄 RD.setWindows(hwdn, win_size, win_index) #设置窗口参数 while True: MouseX, MouseY = pyautogui.position() start_time = time.time() #开始时间 img, Screen = RD.getWindowsRGB(hwdn) #获取窗口截图 img_roi, Screen_c,[x0,y0,x1,y1] = RD.getRegion(img, Screen) #获取窗口截图ROI图片 roi_image,my_result = Capture_Point(img_roi, confidence=0.9) #ROI图像进行推理,返回图像与ROI图像中位置坐标 print("捕获结果:", my_result) fps = int(1/(time.time() - start_time)) # 计算推理帧率 print("处理帧数:{}".format(fps)) show_capture(win_name, roi_image) # OpenCV窗口显示结果图像 if cv2.waitKey(1) & 0xFF == ord("q"):#退出快捷键 break if my_result == {}: continue x= int(my_result["1"][0]) y = int(my_result["1"][1]) currentMouseX = (640+x-MouseX)*1.1 currentMouseY = (360+y-MouseY)*1.1 sendinput.Mouse.move(int(currentMouseX),int(currentMouseY)) time.sleep(0.001) sendinput.Mouse.leftClick() sendinput.Mouse.leftClick() sendinput.Mouse.leftClick() sendinput.Mouse.leftClick() cv2.destroyAllWindows()