diff --git a/src/Screenshot/Audio_record.py b/src/Screenshot/Audio_record.py new file mode 100644 index 0000000..89f3561 --- /dev/null +++ b/src/Screenshot/Audio_record.py @@ -0,0 +1,133 @@ +import time +import wave +from pathlib import Path +from threading import Thread + +from pyaudio import PyAudio, paInt16, paContinue, paComplete + + +class AudioRecord(PyAudio): + + def __init__(self, channels=2): + super().__init__() + self.chunk = 1024 # 每个缓冲区的帧数 + self.format_sample = paInt16 # 采样位数 + self.channels = channels # 声道: 1,单声道;2,双声道 + self.fps = 44100 # 采样频率 + self.input_dict = None + self.output_dict = None + self.stream = None + self.filename = '~test.wav' + self.wf = None + self.stop_flag = False + self.kill = False + + def callback_input(self, in_data, frame_count, time_info, status): + """录制回调函数""" + self.wf.writeframes(in_data) + if not self.stop_flag: + return (in_data, paContinue) + else: + return (in_data, paComplete) + + def callback_output(self, in_data, frame_count, time_info, status): + """播放回调函数""" + data = self.wf.readframes(frame_count) + return (data, paContinue) + + def open_stream(self, name): + """打开录制流""" + input_device_index = self.get_device_index(name, True) if name else None + return self.open(format=self.format_sample, + channels=self.channels, + rate=self.fps, + frames_per_buffer=self.chunk, + input=True, + input_device_index=input_device_index, + stream_callback=self.callback_input + ) + + def audio_record_run(self, name=None): + """音频录制""" + self.wf = self.save_audio_file(self.filename) + self.stream = self.open_stream(name) + self.stream.start_stream() + while self.stream.is_active(): + time.sleep(0.1) + self.wf.close() + if self.kill: + Path(self.filename).unlink() + self.duration = self.get_duration() + self.terminate_run() + + def run(self, filename=None, name=None, record=True): + """音频录制启动""" + if record: + if filename: + self.filename = filename + thread_1 = Thread(target=self.audio_record_run, args=(name,)) + else: + if not filename: + raise Exception('未输入音频文件名,不能播放,请输入后再试!') + thread_1 = Thread(target=self.read_audio, args=(filename, name,)) + thread_1.start() + + def read_audio(self, filename, name=None): + """音频播放""" + output_device_index = self.get_device_index(name, False) if name else None + with wave.open(filename, 'rb') as self.wf: + # 获取音频长度 + self.duration = self.get_duration() + self.stream = self.open(format=self.get_format_from_width(self.wf.getsampwidth()), + channels=self.wf.getnchannels(), + rate=self.wf.getframerate(), + output=True, + output_device_index=output_device_index, + stream_callback=self.callback_output + ) + self.stream.start_stream() + while self.stream.is_active(): + time.sleep(0.1) + print(self.duration) + self.terminate_run() + + def get_duration(self): + """获取音频时长""" + return round(self.wf.getnframes() / self.wf.getframerate(), 2) + + def get_in_out_devices(self): + """获取系统输入输出设备""" + self.input_dict = {} + self.output_dict = {} + for i in range(self.get_device_count()): + devinfo = self.get_device_info_by_index(i) + if not devinfo['hostApi'] and int(devinfo['defaultSampleRate']) == self.fps \ + and '映射器' not in devinfo['name']: + if devinfo['maxInputChannels']: + self.input_dict[devinfo['name'].split(' ')[0]] = i + elif devinfo['maxOutputChannels']: + self.output_dict[devinfo['name'].split(' ')[0]] = i + + def get_device_index(self, name, inp=True): + """获取选定设备索引""" + if inp and self.input_dict: + return self.input_dict.get(name, -1) + elif not inp and self.output_dict: + return self.output_dict.get(name, -1) + + def save_audio_file(self, filename): + """音频文件保存""" + wf = wave.open(filename, 'wb') + wf.setnchannels(self.channels) + wf.setsampwidth(self.get_sample_size(self.format_sample)) + wf.setframerate(self.fps) + return wf + + def terminate_run(self): + """结束流录制或流播放""" + if self.stream is not None: + self.stream.stop_stream() + self.stream.close() + self.stream = None + self.wf = None + self.terminate() diff --git a/src/Screenshot/PyAudio-0.2.11-cp37-cp37m-win_amd64.whl b/src/Screenshot/PyAudio-0.2.11-cp37-cp37m-win_amd64.whl new file mode 100644 index 0000000..72fbfc8 Binary files /dev/null and b/src/Screenshot/PyAudio-0.2.11-cp37-cp37m-win_amd64.whl differ diff --git a/src/Screenshot/README.md b/src/Screenshot/README.md new file mode 100644 index 0000000..4aa3f83 --- /dev/null +++ b/src/Screenshot/README.md @@ -0,0 +1,39 @@ +# 使用python动手做一个屏幕录制工具 + +*更新日期:2022-04-20* +> windows 10 +> python 3.7 + +- Screeshot_Gui.exe [58.5M] [下载地址](https://github.com/lk-itween/FunnyCodeRepository/releases/download/Screenshot_Gui/Screenshot_Gui.exe) +先将ffmpeg.exe压缩成7z格式,再一并打包进入exe文件中,且使用pyinstaller打包,生成文件较大 + +- Screenshot_Gui.py +pyqt5制作的屏幕录制窗口,窗口程序入口 + +- Screenshot_record.py +使用ffmpeg工具,利用python调用ffmpeg命令行来录制屏幕 + +- Audio_record.py +pyaudio读取可录制音频设备,设定需要使用的设备进行录制音频 + +- Screenshot_record_with_cv2.py +使用opencv-python录制视频并保存 + +- Screenshot_test.py +无窗口化测试联动屏幕录制及音频录制,保持音视频同时录制和同时停止录制 + +- resource +保存程序运行过程中需要使用到的程序或图片 + +- requirements.txt +已将所需模块放在requirements.txt中,可直接pip install -r requirements.txt下载安装。 + +- PyAudio-0.2.11-cp37-cp37m-win_amd64.whl +python3.7环境的PyAudio的轮子 + + +**ps:** +*1. ffmpeg.exe 请至此https://www.gyan.dev/ffmpeg/builds/#release-builds 下载符合的版本,并解压将bin下的ffmpeg.exe移动到resource文件夹下* +*2. 如果PyAudio包不能使用pip在线安装,python3.7环境请下载PyAudio-0.2.11-cp37-cp37m-win_amd64.whl至本地,在该路径下打开cmd窗口,执行pip install 该whl文件* +*3. 其他python环境可在https://www.lfd.uci.edu/~gohlke/pythonlibs/#pyaudio 下自行安装对应版本的whl文件* + diff --git a/src/Screenshot/Screenshot_Gui.py b/src/Screenshot/Screenshot_Gui.py new file mode 100644 index 0000000..0025695 --- /dev/null +++ b/src/Screenshot/Screenshot_Gui.py @@ -0,0 +1,360 @@ +# -*- coding: utf-8 -*- + +# Form implementation generated from reading ui file 'Screenshot_GUI.ui' +# +# Created by: PyQt5 UI code generator 5.15.4 +# +# WARNING: Any manual changes made to this file will be lost when pyuic5 is +# run again. Do not edit this file unless you know what you are doing. +import sys +import os + +sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../../PaddleClas-release-2.3") +import shutil + +from datetime import datetime +from pathlib import Path + +from PIL import ImageGrab +from PyQt5.QtCore import QRect, Qt, QPoint, QMetaObject, QThread +from PyQt5.QtGui import QIcon, QFont, QColor, QImage, QPixmap, QPen, QPainter +from PyQt5.QtWidgets import (QMainWindow, QApplication, QDesktopWidget, QWidget, QFrame, + QLabel, QPushButton, QComboBox, QCheckBox, QSpinBox, QDialog, QFileDialog, QMessageBox) +from py7zr import pack_7zarchive, unpack_7zarchive + +from Screenshot.Audio_record import AudioRecord +from Screenshot.Screenshot_record import Screenshot, file_path + + +def unpack_7zip_to_exe(filename): + file_basename = Path(file_path(f'{filename}.exe')) + if file_basename.exists(): + return True + elif file_basename.with_suffix('.7z').exists(): + shutil.unpack_archive(file_basename.with_suffix('.7z'), file_basename.parent) + return unpack_7zip_to_exe(file_basename.stem) + else: + return False + +class Thread_screenshot(QThread): + + def __init__(self): + super().__init__() + self.offset_x = 0 + self.offset_y = 0 + self.draw_mouse = 1 + self.filename = '' + self.screen = Screenshot() + + def config_modify(self, **kwargs): + self.screen.width = kwargs.get('width', self.screen.width) + self.screen.height = kwargs.get('height', self.screen.height) + self.screen.fps = kwargs.get('fps', self.screen.fps) + self.offset_x = kwargs.get('offset_x', self.offset_x) + self.offset_y = kwargs.get('offset_y', self.offset_y) + self.draw_mouse = kwargs.get('draw_mouse', self.draw_mouse) + self.filename = kwargs.get('filename', self.filename) + + def compose(self): + video = self.filename + '.mp4' + audio = self.filename + '.mp3' + self.screen.compose_audio(video, audio, video.replace('缓存', '')) + QThread.sleep(1) + self.stop() + Path(audio).unlink() + Path(video).unlink() + + def run(self): + if not self.filename: + raise Exception('请输入filename!') + filename = self.filename + '.mp4' + self.screen.record(filename, self.offset_x, self.offset_y, self.draw_mouse) + + def stop(self): + self.screen.terminate() + + +class Ui_MainWindow1(QMainWindow): + """主体窗口设置""" + + def __init__(self): + super().__init__() + self.setObjectName("MainWindow") + self.setWindowTitle('屏幕录制') + windowico = file_path('gui_svg.svg') + self.setWindowIcon(QIcon(windowico)) + self.resize(512, 352) + self.setFixedSize(512, 352) + self._dir = '.' + self.fname = '' + self.offset_x, self.offset_y = 0, 0 + self.screen_x, self.screen_y = ImageGrab.grab().size + self.audio = AudioRecord() + self.audio.get_in_out_devices() + self.device_name = None + self.record_audio = True if self.audio.input_dict else False + self.hotkey_start = False + self.screenshot = Thread_screenshot() + self.setupUi() + ffmpeg_exists = unpack_7zip_to_exe('ffmpeg') + if not ffmpeg_exists: + QMessageBox.warning(self, '请下载ffmpeg!', '请将ffmpeg.exe放在resource目录下再录制!') + self.show() + + def setupUi(self): + self.centralwidget = QWidget(self) + self.centralwidget.setObjectName("centralwidget") + self.label0 = QLabel(self.centralwidget) + self.label0.setGeometry(QRect(0, 0, 421, 351)) + self.label0.setObjectName("scrollArea") + # 添加边框 + self.label0.setFrameShape(QFrame.Box) + self.label0.setFrameShadow(QFrame.Sunken) + self.get_screen_grab((0, 0, self.screen_x, self.screen_y)) + self.label0.setAlignment(Qt.AlignCenter) + self.label_1 = QLabel(self.centralwidget) + font = QFont() + font.setBold(True) + self.label_1.setGeometry(QRect(430, 0, 85, 31)) + self.label_1.setTextFormat(Qt.AutoText) + self.label_1.setFont(font) + self.label_1.setScaledContents(True) + self.label_1.setObjectName("屏幕录制器") + self.label_6 = QLabel(self.centralwidget) + self.label_6.setGeometry(QRect(430, 20, 85, 31)) + self.label_6.setTextFormat(Qt.AutoText) + self.label_6.setScaledContents(True) + self.label_6.setObjectName("注释1") + self.checkbox = QCheckBox(self.centralwidget) + self.checkbox.setGeometry(QRect(430, 100, 85, 20)) + self.checkbox.setChecked(True) + self.checkbox.stateChanged.connect(self.mouse_draw) + self.checkbox.setObjectName("录制鼠标") + self.comboBox = QComboBox(self.centralwidget) + self.comboBox.setGeometry(QRect(430, 200, 85, 20)) + self.comboBox.setEditable(False) + self.comboBox.addItems(self.audio.input_dict.keys()) + self.comboBox.addItems(['无']) + self.comboBox.setMaxVisibleItems(9) + self.comboBox.currentTextChanged.connect(self.change_combox) + self.comboBox.setObjectName("音频来源") + self.comboBox_3 = QComboBox(self.centralwidget) + self.comboBox_3.setGeometry(QRect(430, 260, 80, 20)) + self.comboBox_3.addItems(['双声道', '单声道']) + self.comboBox_3.currentIndexChanged.connect(self.change_combox) + self.comboBox_3.setObjectName("声道设置") + self.pushButton = QPushButton(self.centralwidget) + self.pushButton.setGeometry(QRect(470, 320, 41, 31)) + self.pushButton.clicked.connect(self.stop) + self.pushButton.setObjectName("停止") + self.pushButton_2 = QPushButton(self.centralwidget) + self.pushButton_2.setGeometry(QRect(430, 320, 41, 31)) + self.pushButton_2.setObjectName("开始") + self.pushButton_2.clicked.connect(self.start) + self.pushButton_3 = QPushButton(self.centralwidget) + self.pushButton_3.setGeometry(QRect(430, 290, 80, 23)) + self.pushButton_3.setObjectName("保存目录") + self.pushButton_3.clicked.connect(self.open_dirpath) + self.pushButton_4 = QPushButton(self.centralwidget) + self.pushButton_4.setGeometry(QRect(430, 70, 80, 23)) + self.pushButton_4.setCheckable(True) + self.pushButton_4.setObjectName("Size") + self.pushButton_4.clicked.connect(self.get_screen_area) + self.spinBox = QSpinBox(self.centralwidget) + self.spinBox.setValue(15) + self.spinBox.setRange(10, 60) + self.spinBox.setGeometry(QRect(430, 145, 80, 20)) + self.spinBox.valueChanged.connect(self.screen_fps_mofify) + self.spinBox.setObjectName("spinBox") + self.label_2 = QLabel(self.centralwidget) + self.label_2.setGeometry(QRect(430, 50, 54, 16)) + self.label_2.setTextFormat(Qt.AutoText) + self.label_2.setObjectName("选择区域") + self.label_3 = QLabel(self.centralwidget) + self.label_3.setGeometry(QRect(430, 125, 54, 16)) + self.label_3.setObjectName("帧率") + self.label_4 = QLabel(self.centralwidget) + self.label_4.setGeometry(QRect(430, 240, 54, 16)) + self.label_4.setObjectName("声道设置") + self.label_5 = QLabel(self.centralwidget) + self.label_5.setGeometry(QRect(430, 180, 54, 16)) + self.label_5.setObjectName("音频来源") + self.setCentralWidget(self.centralwidget) + + self.retranslateUi() + QMetaObject.connectSlotsByName(self) + + def retranslateUi(self): + self.pushButton.setText("停止") + self.pushButton_2.setText("开始") + self.pushButton_3.setText("保存目录") + self.pushButton_4.setText("Size*Size") + self.checkbox.setText("录制鼠标") + self.label_1.setText("屏幕录制器") + self.label_2.setText("选择区域:") + self.label_3.setText("帧率:") + self.label_4.setText("声道设置:") + self.label_5.setText("音频来源:") + self.label_6.setText("F7:开始/停止") + + def keyPressEvent(self, event): + """监测键盘是否按下F3,如果已开始录制则停止,否则开始录制""" + if event.key() == Qt.Key_F7: + if not self.hotkey_start: + self.start() + else: + self.stop() + + def change_combox(self, event): + sendername = self.sender().objectName() + if sendername == '声道设置': + self.channels = 2 if event == 0 else 1 + elif sendername == '音频来源': + if event == '无': + self.record_audio = False + else: + self.record_audio = True + self.device_name = event + + def get_screen_grab(self, crop_size): + # 获取屏幕截图并更新至self.label0中 + screen = ImageGrab.grab().convert('RGBA') + screen = screen.crop(box=crop_size) # box=(left, upper, right, lower) + data = screen.tobytes("raw", "RGBA") + qim = QImage(data, screen.size[0], screen.size[1], QImage.Format_RGBA8888) + pix = QPixmap.fromImage(qim).scaled(self.label0.size(), aspectRatioMode=Qt.KeepAspectRatio) + pix.detach() # QPixmap为另开线程操作的,避免程序崩溃,使用detach方法使用一个QImage对象直到QPixmap对象销毁 + self.label0.setPixmap(pix) + self.label0.repaint() + return screen.size + + def get_screen_area(self): + # 获取截图区间 + SizeScreen = MousePaint() + SizeScreen.exec_() + self.offset_x, self.offset_y = SizeScreen.lastpoint + end_x, end_y = SizeScreen.endpoint + self.screen_x, self.screen_y = self.get_screen_grab((self.offset_x, self.offset_y, end_x, end_y)) + self.screenshot.config_modify(offset_x=self.offset_x, offset_y=self.offset_y, + width=self.screen_x, height=self.screen_y) + self.pushButton_4.setText(f'{self.screen_x}*{self.screen_y}') + + def screen_fps_mofify(self, fps): + self.screenshot.config_modify(fps=fps) + + def mouse_draw(self, checked): + draw_mouse = 1 if checked else 0 + self.screenshot.config_modify(draw_mouse=draw_mouse) + + def open_dirpath(self): + fdir = QFileDialog.getExistingDirectory(self, '选择目录', self._dir) + if not fdir: + return None + self._dir = fdir + self.set_filename() + + def set_filename(self): + self.fname = f'{self._dir}/屏幕录制缓存_{int(datetime.now().replace(microsecond=0).timestamp())}' + self.screenshot.config_modify(filename=self.fname) + self.audio = AudioRecord() + self.setWindowTitle(f'屏幕录制 {self.fname}') + + def start(self): + if self.hotkey_start: + return + if not self.fname: + QMessageBox.warning(self, '请选择保存目录!', '请选择保存目录!') + return + self.screenshot.start() + if self.record_audio: + self.audio.stop_flag = False + self.audio.run(filename=self.fname + '.mp3', name=self.device_name) + self.hotkey_start = True + + def stop(self): + if not self.hotkey_start: + return + self.audio.stop_flag = True + self.hotkey_start = False + self.screenshot.stop() + if self.record_audio: + self.screenshot.compose() + else: + filename = self.fname + '.mp4' + target = filename.replace('缓存', '') + Path(filename).replace(target) + self.set_filename() + + +class MousePaint(QDialog): + """移动鼠标获取屏幕捕获范围""" + + def __init__(self): + super().__init__() + self.setMouseTracking(True) + # 设置窗口布满整个屏幕 + self.showFullScreen() + # 设置窗体无边框 + self.setWindowFlags(Qt.FramelessWindowHint) # 窗口置顶,无边框 + # 设置背景透明 + self.setWindowOpacity(0.5) + self.initUI() + self.setFocus() + + def initUI(self): + self.setGeometry(*(QDesktopWidget().screenGeometry()).getRect()) + self.pix = QPixmap() + self.lastpoint = QPoint() + self.endpoint = QPoint() + self.pos = None + self.bline = 0 + + def mousePressEvent(self, event): + # 监听鼠标按压事件 + if event.button() == Qt.LeftButton: + self.lastpoint = event.x(), event.y() + self.bline = 1 + elif event.button() == Qt.RightButton: + self.close() + event.accept() + + def mouseReleaseEvent(self, event): + # 监听鼠标释放事件 + self.endpoint = event.x(), event.y() + self.bline = 0 + event.accept() + self.close() + + def mouseMoveEvent(self, event): + # 监听鼠标移动事件 + if self.bline == 1: + self.pos = event.x(), event.y() + event.accept() + self.update() + + def paintEvent(self, event): + # 绘画事件 + if self.bline == 1: + pp = QPainter(self) + pen = QPen() # 定义笔格式对象 + pen.setWidth(5) # 设置笔的宽度 + pen.setColor(QColor(255, 0, 0)) + pp.setPen(pen) + lpx, lpy = self.lastpoint + pp.drawRect(lpx, lpy, self.pos[0] - lpx, self.pos[1] - lpy) + event.accept() + + +# def main(): +# """运行函数""" +# # 将7zip压缩格式添加到shutil中 +# shutil.register_archive_format('7zip', pack_7zarchive, description='7zip archive') +# shutil.register_unpack_format('7zip', ['.7z'], unpack_7zarchive, description='7zip archive') +# app = QApplication(sys.argv) +# app.setAttribute(Qt.AA_UseHighDpiPixmaps) +# ui = Ui_MainWindow1() +# sys.exit(app.exec_()) + + +#if __name__ == '__main__': +# main() diff --git a/src/Screenshot/Screenshot_record.py b/src/Screenshot/Screenshot_record.py new file mode 100644 index 0000000..a19e253 --- /dev/null +++ b/src/Screenshot/Screenshot_record.py @@ -0,0 +1,68 @@ +""" +使用ffmpeg录制屏幕,并结合Pyaudio同步录制的音频合成带有声音的视频文件 +""" +import sys +from pathlib import Path +from time import sleep + +import ffmpeg + + +def file_path(filename): + relative_path = Path('resource').joinpath(filename) + if getattr(sys, 'frozen', False): + base_path = Path(sys._MEIPASS) + else: + base_path = Path('.').absolute() + return Path(base_path).joinpath(relative_path).as_posix() + + +class Screenshot: + + def __init__(self, width=1920, height=1080, fps=15): + self.width = width + self.height = height + self.fps = fps + self.process = None + self.ffmpeg_path = file_path('ffmpeg.exe') + + def __call__(self, width, height, fps=None): + self.width = width + self.height = height + self.fps = fps if fps else self.fps + + @staticmethod + def unlink(filename): + Path(filename).unlink() + + def record(self, filename, offset_x=0, offset_y=0, draw_mouse=0): + self.process = ( + ffmpeg.output( + ffmpeg.input( + filename='desktop', format='gdigrab', framerate=self.fps, offset_x=offset_x, offset_y=offset_y, + draw_mouse=draw_mouse, s=f'{self.width}x{self.height}'), + filename=filename, pix_fmt='yuv420p' + ).overwrite_output() + ) + self.ffmpeg_async() + + def compose_audio(self, video_path, audio_path, output_path): + self.process = ( + ffmpeg.output( + ffmpeg.input(filename=video_path), + ffmpeg.input(filename=audio_path), + filename=output_path, vcodec='copy', acodec='aac', strict='experimental', pix_fmt='yuv420p' + ).overwrite_output() + ) + sleep(1) + self.ffmpeg_async() + + def ffmpeg_async(self): + self.process = self.process.run_async(cmd=self.ffmpeg_path, pipe_stdin=True, pipe_stdout=False, + pipe_stderr=False) + + def terminate(self): + if self.process is not None: + self.process.communicate(str.encode("q")) + self.process.terminate() + self.process = None diff --git a/src/Screenshot/Screenshot_record_with_cv2.py b/src/Screenshot/Screenshot_record_with_cv2.py new file mode 100644 index 0000000..8f6a136 --- /dev/null +++ b/src/Screenshot/Screenshot_record_with_cv2.py @@ -0,0 +1,151 @@ +import time +from pathlib import Path +from threading import Thread + +import cv2 +import numpy as np +from PIL import ImageGrab +from numba import jit +from pynput import keyboard + + +@jit(nopython=True) +def average_n(x, y): + """Numpy计算趋近值""" + return ((x + y + y) // 3).astype(x.dtype) + + +class ScreenshotVideo(Thread): + + def __init__(self, width, high, path='', fps=15): + """初始化参数""" + super().__init__() + self.save_file = path + self.best_fps = fps + self.fps = fps + self.width = width + self.high = high + self.spend_time = 1 + self.flag = False + self.kill = False + self.video = None + + def __call__(self, path): + """重载视频路径,便于类的二次调用""" + self.save_file = Path(path) + self.video = self.init_videowriter(self.save_file) + + @staticmethod + def screenshot(): + """静态方法,屏幕截图,并转换为np.array数组""" + return np.array(ImageGrab.grab()) + + @staticmethod + def get_fourcc(name): + """视频编码字典""" + fourcc_maps = {'.avi': 'I420', + '.m4v': 'mp4v', + '.mp4': 'avc1', + '.ogv': 'THEO', + '.flv': 'FLV1', + } + return fourcc_maps.get(name) + + def init_videowriter(self, path): + """获取视频编码并新建视频文件""" + if not path: + raise Exception('视频路径未设置,请设置\nvideo = ScreenshotVideo(fps,width,high)\nvideo = video(video_path)') + path = Path(path) if isinstance(path, str) else path + fourcc = cv2.VideoWriter_fourcc(*self.get_fourcc(path.suffix)) + return cv2.VideoWriter(path.as_posix(), fourcc, self.fps, (self.width, self.high)) + + def video_record_doing(self, img): + """将BGR数组转换为RGB数组""" + im_cv = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + self.video.write(im_cv) + + def video_record_end(self): + """录制结束,根据条件判断文件是否保存""" + self.video.release() + cv2.destroyAllWindows() + if self.save_file and self.kill: + Path(self.save_file).unlink() + + def video_best_fps(self, path): + """获取电脑录制视频的最优帧率""" + video = cv2.VideoCapture(path) + fps = video.get(cv2.CAP_PROP_FPS) + count = video.get(cv2.CAP_PROP_FRAME_COUNT) + self.best_fps = int(fps * ((int(count) / fps) / self.spend_time)) + video.release() + + def pre_video_record(self): + """预录制,以获取最佳fps值""" + self.video = self.init_videowriter('test.mp4') + start_time = time.time() + for _ in range(10): + im = self.screenshot() + self.video_record_doing(im) + self.spend_time = round(time.time() - start_time, 4) + self.video_record_end() + time.sleep(2) + self.video_best_fps('test.mp4') + Path('test.mp4').unlink() + + def insert_frame_array(self, frame_list): + """Numpy增强截图信息""" + fps_n = round(self.fps / self.best_fps) + if fps_n <= 0: + return frame_list + times = int(np.log2(fps_n)) # 倍率 + for _ in range(times): + frame_list2 = map(average_n, [frame_list[0]] + frame_list[:-1], frame_list) + frame_list = [[x, y] for x, y in zip(frame_list2, frame_list)] + frame_list = [j for i in frame_list for j in i] + return frame_list + + def frame2video_run(self): + """使用opencv将连续型截图转换为视频""" + self.video = self.init_videowriter(self.save_file) + start_time = time.time() + frame_list = [] + while True: + frame_list.append(self.screenshot()) + if self.flag: + break + self.spend_time = round(time.time() - start_time, 4) + if not self.kill: # 视频录制不被终止将逐帧处理图像 + frame_list = self.insert_frame_array(frame_list) + for im in frame_list: + self.video_record_doing(im) + self.video_record_end() + + def hotkey(self): + """热键监听""" + with keyboard.Listener(on_press=self.on_press) as listener: + listener.join() + + def on_press(self, key): + try: + if key.char == 't': # 录屏结束,保存视频 + self.flag = True + elif key.char == 'k': # 录屏中止,删除文件 + self.flag = True + self.kill = True + except Exception as e: + print(e) + + def run(self): + # 运行函数 + # 设置守护线程 + Thread(target=self.hotkey, daemon=True).start() + # 运行截图函数 + self.frame2video_run() + + +screen = ImageGrab.grab() +width, high = screen.size +video = ScreenshotVideo(width, high, fps=60) +video.pre_video_record() # 预录制获取最优fps +video('test1.mp4') +video.run() diff --git a/src/Screenshot/Screenshot_test.py b/src/Screenshot/Screenshot_test.py new file mode 100644 index 0000000..0f8276f --- /dev/null +++ b/src/Screenshot/Screenshot_test.py @@ -0,0 +1,32 @@ +from threading import Thread + +from pynput import keyboard # pip install pynput + +from Audio_record import AudioRecord +from Screenshot_record import Screenshot + + +def hotkey(): + """热键监听""" + with keyboard.Listener(on_press=on_press) as listener: + listener.join() + + +def on_press(key): + try: + video.terminate() + if key.char == 't': # t键,录制结束,保存音视频 + audio.stop_flag = True + elif key.char == 'k': # k键,录制中止,删除文件 + audio.stop_flag = True + audio.kill = True + video.unlink('test.mp4') + except Exception as e: + print(e) + + +key_thread = Thread(target=hotkey, daemon=True) +audio = AudioRecord() +video = Screenshot() +key_thread.start() +video.record('test.mp4') diff --git a/src/Screenshot/__pycache__/Audio_record.cpython-39.pyc b/src/Screenshot/__pycache__/Audio_record.cpython-39.pyc new file mode 100644 index 0000000..b172c7d Binary files /dev/null and b/src/Screenshot/__pycache__/Audio_record.cpython-39.pyc differ diff --git a/src/Screenshot/__pycache__/Screenshot_Gui.cpython-39.pyc b/src/Screenshot/__pycache__/Screenshot_Gui.cpython-39.pyc new file mode 100644 index 0000000..a922dc6 Binary files /dev/null and b/src/Screenshot/__pycache__/Screenshot_Gui.cpython-39.pyc differ diff --git a/src/Screenshot/__pycache__/Screenshot_record.cpython-39.pyc b/src/Screenshot/__pycache__/Screenshot_record.cpython-39.pyc new file mode 100644 index 0000000..826f23e Binary files /dev/null and b/src/Screenshot/__pycache__/Screenshot_record.cpython-39.pyc differ diff --git a/src/Screenshot/requirements.txt b/src/Screenshot/requirements.txt new file mode 100644 index 0000000..e36e104 --- /dev/null +++ b/src/Screenshot/requirements.txt @@ -0,0 +1,9 @@ +ffmpeg-python==0.2.0 +numba==0.54.1 +numpy==1.20.3 +opencv-python==4.5.4.60 +Pillow==8.4.0 +PyAudio +pynput==1.7.5 +PyQt5==5.15.4 +py7zr==0.18.4 diff --git a/src/Screenshot/resource/ffmpeg.7z b/src/Screenshot/resource/ffmpeg.7z new file mode 100644 index 0000000..80bad7c Binary files /dev/null and b/src/Screenshot/resource/ffmpeg.7z differ diff --git a/src/Screenshot/resource/ffmpeg.exe b/src/Screenshot/resource/ffmpeg.exe new file mode 100644 index 0000000..71b5352 Binary files /dev/null and b/src/Screenshot/resource/ffmpeg.exe differ diff --git a/src/Screenshot/resource/gui_ico.ico b/src/Screenshot/resource/gui_ico.ico new file mode 100644 index 0000000..4a5fb69 Binary files /dev/null and b/src/Screenshot/resource/gui_ico.ico differ diff --git a/src/Screenshot/resource/gui_svg.svg b/src/Screenshot/resource/gui_svg.svg new file mode 100644 index 0000000..37d2ebc --- /dev/null +++ b/src/Screenshot/resource/gui_svg.svg @@ -0,0 +1,4 @@ + + \ No newline at end of file